aboutsummaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorKalle Valo <kvalo@qca.qualcomm.com>2011-12-16 21:10:39 +0200
committerKalle Valo <kvalo@qca.qualcomm.com>2011-12-16 21:10:39 +0200
commit7e95e365d5399647a41e10059e4b09826b82d78b (patch)
tree305c9968798adae3d9484657339fa39d2a5fdaac /drivers
parent3ca9d1fc9aa64077645a26c396de9399b49ea226 (diff)
parent5bd5e9a6ae5137a61d0b5c277eac61892d89fc4f (diff)
Merge remote branch 'wireless-next/master' into ath6kl-next
Conflicts: drivers/net/wireless/ath/ath6kl/init.c
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/acpica/evxface.c1
-rw-r--r--drivers/acpi/acpica/evxfevnt.c1
-rw-r--r--drivers/acpi/acpica/evxfgpe.c1
-rw-r--r--drivers/acpi/acpica/evxfregn.c1
-rw-r--r--drivers/acpi/acpica/hwregs.c11
-rw-r--r--drivers/acpi/acpica/hwsleep.c1
-rw-r--r--drivers/acpi/acpica/hwtimer.c1
-rw-r--r--drivers/acpi/acpica/hwxface.c1
-rw-r--r--drivers/acpi/acpica/nsxfeval.c1
-rw-r--r--drivers/acpi/acpica/nsxfname.c1
-rw-r--r--drivers/acpi/acpica/nsxfobj.c1
-rw-r--r--drivers/acpi/acpica/rsxface.c1
-rw-r--r--drivers/acpi/acpica/tbxface.c1
-rw-r--r--drivers/acpi/acpica/utdebug.c1
-rw-r--r--drivers/acpi/acpica/utdecode.c1
-rw-r--r--drivers/acpi/acpica/utglobal.c1
-rw-r--r--drivers/acpi/acpica/utxface.c1
-rw-r--r--drivers/acpi/acpica/utxferror.c1
-rw-r--r--drivers/acpi/apei/erst.c12
-rw-r--r--drivers/acpi/atomicio.c4
-rw-r--r--drivers/acpi/blacklist.c1
-rw-r--r--drivers/acpi/bus.c8
-rw-r--r--drivers/acpi/debugfs.c1
-rw-r--r--drivers/acpi/ec_sys.c1
-rw-r--r--drivers/acpi/event.c1
-rw-r--r--drivers/acpi/glue.c1
-rw-r--r--drivers/acpi/osl.c3
-rw-r--r--drivers/acpi/proc.c1
-rw-r--r--drivers/acpi/processor_core.c1
-rw-r--r--drivers/acpi/processor_driver.c20
-rw-r--r--drivers/acpi/processor_idle.c254
-rw-r--r--drivers/acpi/sbshc.c1
-rw-r--r--drivers/acpi/scan.c3
-rw-r--r--drivers/acpi/sleep.c8
-rw-r--r--drivers/acpi/sysfs.c14
-rw-r--r--drivers/acpi/video_detect.c1
-rw-r--r--drivers/amba/bus.c68
-rw-r--r--drivers/ata/Kconfig3
-rw-r--r--drivers/ata/Makefile1
-rw-r--r--drivers/ata/ahci.c3
-rw-r--r--drivers/ata/ahci_platform.c55
-rw-r--r--drivers/ata/ata_piix.c153
-rw-r--r--drivers/ata/libata-core.c3
-rw-r--r--drivers/ata/libata-eh.c18
-rw-r--r--drivers/ata/libata-pmp.c8
-rw-r--r--drivers/ata/libata-scsi.c43
-rw-r--r--drivers/ata/libata-sff.c118
-rw-r--r--drivers/ata/pata_ali.c2
-rw-r--r--drivers/ata/pata_arasan_cf.c6
-rw-r--r--drivers/ata/pata_artop.c107
-rw-r--r--drivers/ata/pata_at91.c7
-rw-r--r--drivers/ata/pata_atiixp.c35
-rw-r--r--drivers/ata/pata_cmd64x.c44
-rw-r--r--drivers/ata/pata_cs5535.c16
-rw-r--r--drivers/ata/pata_efar.c22
-rw-r--r--drivers/ata/pata_hpt366.c35
-rw-r--r--drivers/ata/pata_it8213.c24
-rw-r--r--drivers/ata/pata_legacy.c129
-rw-r--r--drivers/ata/pata_mpc52xx.c2
-rw-r--r--drivers/ata/pata_of_platform.c8
-rw-r--r--drivers/ata/pata_pdc2027x.c30
-rw-r--r--drivers/ata/pata_qdi.c366
-rw-r--r--drivers/ata/pata_rdc.c16
-rw-r--r--drivers/ata/pata_sc1200.c14
-rw-r--r--drivers/ata/pata_scc.c14
-rw-r--r--drivers/ata/pata_serverworks.c127
-rw-r--r--drivers/ata/pata_sil680.c94
-rw-r--r--drivers/ata/pata_sis.c154
-rw-r--r--drivers/ata/pata_sl82c105.c37
-rw-r--r--drivers/ata/pata_via.c49
-rw-r--r--drivers/ata/sata_mv.c16
-rw-r--r--drivers/ata/sata_sil24.c4
-rw-r--r--drivers/ata/sata_sis.c2
-rw-r--r--drivers/base/base.h1
-rw-r--r--drivers/base/dma-coherent.c1
-rw-r--r--drivers/base/dma-mapping.c1
-rw-r--r--drivers/base/hypervisor.c1
-rw-r--r--drivers/base/power/common.c2
-rw-r--r--drivers/base/power/generic_ops.c1
-rw-r--r--drivers/base/power/main.c1
-rw-r--r--drivers/base/power/opp.c2
-rw-r--r--drivers/base/power/qos.c1
-rw-r--r--drivers/base/power/runtime.c24
-rw-r--r--drivers/base/power/sysfs.c1
-rw-r--r--drivers/base/power/trace.c1
-rw-r--r--drivers/base/power/wakeup.c1
-rw-r--r--drivers/base/regmap/regcache.c1
-rw-r--r--drivers/base/transport_class.c1
-rw-r--r--drivers/bcma/bcma_private.h3
-rw-r--r--drivers/bcma/core.c1
-rw-r--r--drivers/bcma/driver_chipcommon.c1
-rw-r--r--drivers/bcma/driver_chipcommon_pmu.c1
-rw-r--r--drivers/bcma/driver_pci.c1
-rw-r--r--drivers/bcma/host_pci.c70
-rw-r--r--drivers/bcma/main.c17
-rw-r--r--drivers/bcma/sprom.c61
-rw-r--r--drivers/block/aoe/aoeblk.c15
-rw-r--r--drivers/block/aoe/aoechr.c1
-rw-r--r--drivers/block/brd.c4
-rw-r--r--drivers/block/cciss.c76
-rw-r--r--drivers/block/cciss.h1
-rw-r--r--drivers/block/cpqarray.c2
-rw-r--r--drivers/block/drbd/drbd_int.h2
-rw-r--r--drivers/block/drbd/drbd_req.c8
-rw-r--r--drivers/block/loop.c251
-rw-r--r--drivers/block/nbd.c69
-rw-r--r--drivers/block/pktcdvd.c11
-rw-r--r--drivers/block/ps3disk.c1
-rw-r--r--drivers/block/ps3vram.c7
-rw-r--r--drivers/block/rbd.c2
-rw-r--r--drivers/block/umem.c4
-rw-r--r--drivers/block/virtio_blk.c31
-rw-r--r--drivers/block/xen-blkback/blkback.c130
-rw-r--r--drivers/block/xen-blkback/common.h103
-rw-r--r--drivers/block/xen-blkback/xenbus.c130
-rw-r--r--drivers/block/xen-blkfront.c123
-rw-r--r--drivers/bluetooth/Kconfig6
-rw-r--r--drivers/bluetooth/btmrvl_main.c2
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c16
-rw-r--r--drivers/bluetooth/btusb.c3
-rw-r--r--drivers/bluetooth/btwilink.c1
-rw-r--r--drivers/bluetooth/hci_vhci.c8
-rw-r--r--drivers/char/Kconfig6
-rw-r--r--drivers/char/agp/hp-agp.c6
-rw-r--r--drivers/char/agp/intel-gtt.c29
-rw-r--r--drivers/char/hw_random/Kconfig28
-rw-r--r--drivers/char/hw_random/Makefile1
-rw-r--r--drivers/char/hw_random/atmel-rng.c158
-rw-r--r--drivers/char/hw_random/virtio-rng.c1
-rw-r--r--drivers/char/mem.c1
-rw-r--r--drivers/char/ps3flash.c1
-rw-r--r--drivers/char/ramoops.c1
-rw-r--r--drivers/char/random.c23
-rw-r--r--drivers/char/ttyprintk.c3
-rw-r--r--drivers/char/virtio_console.c121
-rw-r--r--drivers/clocksource/Kconfig15
-rw-r--r--drivers/clocksource/Makefile1
-rw-r--r--drivers/clocksource/clksrc-dbx500-prcmu.c106
-rw-r--r--drivers/clocksource/sh_cmt.c1
-rw-r--r--drivers/clocksource/sh_mtu2.c1
-rw-r--r--drivers/clocksource/sh_tmu.c1
-rw-r--r--drivers/cpufreq/cpufreq_stats.c1
-rw-r--r--drivers/cpufreq/db8500-cpufreq.c40
-rw-r--r--drivers/cpufreq/e_powersaver.c135
-rw-r--r--drivers/cpufreq/exynos4210-cpufreq.c129
-rw-r--r--drivers/cpufreq/s3c64xx-cpufreq.c1
-rw-r--r--drivers/cpuidle/cpuidle.c87
-rw-r--r--drivers/cpuidle/driver.c25
-rw-r--r--drivers/cpuidle/governors/ladder.c43
-rw-r--r--drivers/cpuidle/governors/menu.c30
-rw-r--r--drivers/cpuidle/sysfs.c22
-rw-r--r--drivers/crypto/Kconfig3
-rw-r--r--drivers/crypto/hifn_795x.c6
-rw-r--r--drivers/crypto/mv_cesa.c1
-rw-r--r--drivers/crypto/n2_core.c4
-rw-r--r--drivers/crypto/padlock-aes.c4
-rw-r--r--drivers/crypto/picoxcell_crypto.c121
-rw-r--r--drivers/crypto/talitos.c18
-rw-r--r--drivers/dca/dca-core.c1
-rw-r--r--drivers/dca/dca-sysfs.c1
-rw-r--r--drivers/dma/Kconfig3
-rw-r--r--drivers/dma/amba-pl08x.c640
-rw-r--r--drivers/dma/at_hdmac.c164
-rw-r--r--drivers/dma/at_hdmac_regs.h24
-rw-r--r--drivers/dma/dmatest.c23
-rw-r--r--drivers/dma/dw_dmac.c5
-rw-r--r--drivers/dma/ep93xx_dma.c1
-rw-r--r--drivers/dma/imx-dma.c2
-rw-r--r--drivers/dma/imx-sdma.c49
-rw-r--r--drivers/dma/intel_mid_dma.c10
-rw-r--r--drivers/dma/ipu/ipu_idmac.c66
-rw-r--r--drivers/dma/mpc512x_dma.c1
-rw-r--r--drivers/dma/mxs-dma.c45
-rw-r--r--drivers/dma/pch_dma.c7
-rw-r--r--drivers/dma/pl330.c231
-rw-r--r--drivers/dma/shdma.c129
-rw-r--r--drivers/dma/shdma.h7
-rw-r--r--drivers/dma/ste_dma40.c1
-rw-r--r--drivers/dma/timb_dma.c5
-rw-r--r--drivers/edac/Kconfig16
-rw-r--r--drivers/edac/Makefile2
-rw-r--r--drivers/edac/amd64_edac.c37
-rw-r--r--drivers/edac/cpc925_edac.c67
-rw-r--r--drivers/edac/edac_core.h350
-rw-r--r--drivers/edac/edac_mce.c61
-rw-r--r--drivers/edac/i7300_edac.c51
-rw-r--r--drivers/edac/i7core_edac.c415
-rw-r--r--drivers/edac/mce_amd.c46
-rw-r--r--drivers/edac/mce_amd.h6
-rw-r--r--drivers/edac/mce_amd_inj.c1
-rw-r--r--drivers/edac/ppc4xx_edac.c2
-rw-r--r--drivers/edac/sb_edac.c1893
-rw-r--r--drivers/firewire/core-iso.c1
-rw-r--r--drivers/firewire/core-transaction.c4
-rw-r--r--drivers/firewire/net.c15
-rw-r--r--drivers/firewire/ohci.c255
-rw-r--r--drivers/firewire/sbp2.c260
-rw-r--r--drivers/firmware/edd.c6
-rw-r--r--drivers/firmware/efivars.c19
-rw-r--r--drivers/firmware/google/gsmi.c1
-rw-r--r--drivers/gpio/Kconfig38
-rw-r--r--drivers/gpio/Makefile14
-rw-r--r--drivers/gpio/gpio-74x164.c1
-rw-r--r--drivers/gpio/gpio-davinci.c455
-rw-r--r--drivers/gpio/gpio-ep93xx.c23
-rw-r--r--drivers/gpio/gpio-exynos4.c385
-rw-r--r--drivers/gpio/gpio-ks8695.c319
-rw-r--r--drivers/gpio/gpio-langwell.c27
-rw-r--r--drivers/gpio/gpio-lpc32xx.c446
-rw-r--r--drivers/gpio/gpio-mc33880.c1
-rw-r--r--drivers/gpio/gpio-mcp23s08.c1
-rw-r--r--drivers/gpio/gpio-ml-ioh.c255
-rw-r--r--drivers/gpio/gpio-mpc5200.c1
-rw-r--r--drivers/gpio/gpio-mpc8xxx.c398
-rw-r--r--drivers/gpio/gpio-mxc.c19
-rw-r--r--drivers/gpio/gpio-mxs.c3
-rw-r--r--drivers/gpio/gpio-nomadik.c130
-rw-r--r--drivers/gpio/gpio-omap.c61
-rw-r--r--drivers/gpio/gpio-pca953x.c11
-rw-r--r--drivers/gpio/gpio-pcf857x.c1
-rw-r--r--drivers/gpio/gpio-pch.c251
-rw-r--r--drivers/gpio/gpio-pl061.c31
-rw-r--r--drivers/gpio/gpio-plat-samsung.c205
-rw-r--r--drivers/gpio/gpio-pxa.c338
-rw-r--r--drivers/gpio/gpio-s5pc100.c354
-rw-r--r--drivers/gpio/gpio-s5pv210.c287
-rw-r--r--drivers/gpio/gpio-sa1100.c63
-rw-r--r--drivers/gpio/gpio-samsung.c2712
-rw-r--r--drivers/gpio/gpio-tegra.c166
-rw-r--r--drivers/gpio/gpio-tnetv107x.c205
-rw-r--r--drivers/gpio/gpio-u300.c1190
-rw-r--r--drivers/gpio/gpio-xilinx.c1
-rw-r--r--drivers/gpu/drm/Kconfig6
-rw-r--r--drivers/gpu/drm/Makefile1
-rw-r--r--drivers/gpu/drm/ati_pcigart.c1
-rw-r--r--drivers/gpu/drm/drm_buffer.c1
-rw-r--r--drivers/gpu/drm/drm_bufs.c1
-rw-r--r--drivers/gpu/drm/drm_cache.c1
-rw-r--r--drivers/gpu/drm/drm_crtc.c13
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c18
-rw-r--r--drivers/gpu/drm/drm_debugfs.c18
-rw-r--r--drivers/gpu/drm/drm_dma.c1
-rw-r--r--drivers/gpu/drm/drm_drv.c5
-rw-r--r--drivers/gpu/drm/drm_edid.c172
-rw-r--r--drivers/gpu/drm/drm_encoder_slave.c2
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c1
-rw-r--r--drivers/gpu/drm/drm_fops.c1
-rw-r--r--drivers/gpu/drm/drm_gem.c88
-rw-r--r--drivers/gpu/drm/drm_hashtab.c1
-rw-r--r--drivers/gpu/drm/drm_ioc32.c1
-rw-r--r--drivers/gpu/drm/drm_irq.c23
-rw-r--r--drivers/gpu/drm/drm_memory.c1
-rw-r--r--drivers/gpu/drm/drm_mm.c1
-rw-r--r--drivers/gpu/drm/drm_modes.c1
-rw-r--r--drivers/gpu/drm/drm_pci.c1
-rw-r--r--drivers/gpu/drm/drm_platform.c1
-rw-r--r--drivers/gpu/drm/drm_proc.c4
-rw-r--r--drivers/gpu/drm/drm_sman.c1
-rw-r--r--drivers/gpu/drm/drm_sysfs.c1
-rw-r--r--drivers/gpu/drm/drm_usb.c1
-rw-r--r--drivers/gpu/drm/drm_vm.c1
-rw-r--r--drivers/gpu/drm/exynos/Kconfig20
-rw-r--r--drivers/gpu/drm/exynos/Makefile11
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_buf.c110
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_buf.h53
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_connector.c293
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_connector.h34
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_core.c272
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c381
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.h38
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c244
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h254
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_encoder.c271
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_encoder.h45
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c265
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.h37
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c456
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.h37
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c811
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c415
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h107
-rw-r--r--drivers/gpu/drm/i2c/ch7006_drv.c2
-rw-r--r--drivers/gpu/drm/i2c/sil164_drv.c2
-rw-r--r--drivers/gpu/drm/i810/i810_drv.c2
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7017.c2
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7xxx.c4
-rw-r--r--drivers/gpu/drm/i915/dvo_ivch.c6
-rw-r--r--drivers/gpu/drm/i915/dvo_sil164.c2
-rw-r--r--drivers/gpu/drm/i915/dvo_tfp410.c14
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c44
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c49
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c68
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h103
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c115
-rw-r--r--drivers/gpu/drm/i915/i915_gem_debug.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c3
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c30
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c15
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c42
-rw-r--r--drivers/gpu/drm/i915/i915_mem.c14
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h111
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c8
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h46
-rw-r--r--drivers/gpu/drm/i915/intel_acpi.c2
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c23
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h22
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c20
-rw-r--r--drivers/gpu/drm/i915/intel_display.c762
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c615
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h21
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c33
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c9
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c8
-rw-r--r--drivers/gpu/drm/i915/intel_modes.c2
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c90
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c146
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c27
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c369
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h16
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c250
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo_regs.h558
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c58
-rw-r--r--drivers/gpu/drm/mga/mga_drv.c2
-rw-r--r--drivers/gpu/drm/mga/mga_warp.c1
-rw-r--r--drivers/gpu/drm/nouveau/Makefile9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_backlight.c169
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c297
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_channel.c17
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c54
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_crtc.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c963
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h123
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_encoder.h27
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_i2c.c60
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c284
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mm.c89
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mm.h5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_object.c18
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_perf.c118
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_pm.c100
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_pm.h15
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_reg.h11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c66
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c223
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_temp.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vm.c40
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vm.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_volt.c46
-rw-r--r--drivers/gpu/drm/nouveau/nv04_display.c23
-rw-r--r--drivers/gpu/drm/nouveau/nv04_pm.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv04_timer.c84
-rw-r--r--drivers/gpu/drm/nouveau/nv31_mpeg.c (renamed from drivers/gpu/drm/nouveau/nv40_mpeg.c)91
-rw-r--r--drivers/gpu/drm/nouveau/nv40_pm.c348
-rw-r--r--drivers/gpu/drm/nouveau/nv50_crtc.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_cursor.c18
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c76
-rw-r--r--drivers/gpu/drm/nouveau/nv50_gpio.c31
-rw-r--r--drivers/gpu/drm/nouveau/nv50_graph.c118
-rw-r--r--drivers/gpu/drm/nouveau/nv50_grctx.c13
-rw-r--r--drivers/gpu/drm/nouveau/nv50_pm.c8
-rw-r--r--drivers/gpu/drm/nouveau/nv50_sor.c42
-rw-r--r--drivers/gpu/drm/nouveau/nv50_vram.c6
-rw-r--r--drivers/gpu/drm/nouveau/nva3_pm.c402
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fb.c27
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fifo.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_graph.c82
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_graph.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_grctx.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_grgpc.fuc8
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_grgpc.fuc.h29
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_grhub.fuc3
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_grhub.fuc.h16
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_pm.c155
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_vram.c56
-rw-r--r--drivers/gpu/drm/nouveau/nvd0_display.c1473
-rw-r--r--drivers/gpu/drm/r128/r128_cce.c1
-rw-r--r--drivers/gpu/drm/r128/r128_drv.c2
-rw-r--r--drivers/gpu/drm/radeon/Makefile2
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c62
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c22
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c2369
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c122
-rw-r--r--drivers/gpu/drm/radeon/evergreen_blit_kms.c364
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c32
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h42
-rw-r--r--drivers/gpu/drm/radeon/ni.c47
-rw-r--r--drivers/gpu/drm/radeon/r100.c157
-rw-r--r--drivers/gpu/drm/radeon/r100_track.h110
-rw-r--r--drivers/gpu/drm/radeon/r300.c21
-rw-r--r--drivers/gpu/drm/radeon/r300_cmdbuf.c2
-rw-r--r--drivers/gpu/drm/radeon/r600.c262
-rw-r--r--drivers/gpu/drm/radeon/r600_blit.c24
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c361
-rw-r--r--drivers/gpu/drm/radeon/r600_cp.c2
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c41
-rw-r--r--drivers/gpu/drm/radeon/r600d.h22
-rw-r--r--drivers/gpu/drm/radeon/radeon.h214
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h15
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c122
-rw-r--r--drivers/gpu/drm/radeon/radeon_benchmark.c247
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c31
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c114
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c38
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c36
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c2151
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c75
-rw-r--r--drivers/gpu/drm/radeon/radeon_i2c.c78
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c60
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_tv.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h15
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c41
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h42
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c38
-rw-r--r--drivers/gpu/drm/radeon/radeon_state.c16
-rw-r--r--drivers/gpu/drm/radeon/rs400.c8
-rw-r--r--drivers/gpu/drm/radeon/rs600.c20
-rw-r--r--drivers/gpu/drm/radeon/rv770.c88
-rw-r--r--drivers/gpu/drm/savage/savage_drv.c2
-rw-r--r--drivers/gpu/drm/sis/sis_drv.c2
-rw-r--r--drivers/gpu/drm/tdfx/tdfx_drv.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c1
-rw-r--r--drivers/gpu/drm/via/via_drv.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/Kconfig9
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile3
-rw-r--r--drivers/gpu/drm/vmwgfx/svga3d_reg.h259
-rw-r--r--drivers/gpu/drm/vmwgfx/svga_escape.h2
-rw-r--r--drivers/gpu/drm/vmwgfx/svga_overlay.h22
-rw-r--r--drivers/gpu/drm/vmwgfx/svga_reg.h304
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c73
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c322
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c279
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h229
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c870
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c76
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c1135
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.h113
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c156
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c81
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c46
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c273
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_irq.c187
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c1174
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h50
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c310
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_marker.c171
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c188
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c1035
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c537
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c5
-rw-r--r--drivers/hid/hid-apple.c12
-rw-r--r--drivers/hid/hid-axff.c1
-rw-r--r--drivers/hid/hid-core.c7
-rw-r--r--drivers/hid/hid-debug.c1
-rw-r--r--drivers/hid/hid-dr.c1
-rw-r--r--drivers/hid/hid-emsff.c1
-rw-r--r--drivers/hid/hid-gaff.c1
-rw-r--r--drivers/hid/hid-holtekff.c1
-rw-r--r--drivers/hid/hid-ids.h4
-rw-r--r--drivers/hid/hid-multitouch.c47
-rw-r--r--drivers/hid/hid-picolcd.c1
-rw-r--r--drivers/hid/hid-pl.c1
-rw-r--r--drivers/hid/hid-roccat-common.c1
-rw-r--r--drivers/hid/hid-roccat.c19
-rw-r--r--drivers/hid/hid-sjoy.c1
-rw-r--r--drivers/hid/hid-tmff.c1
-rw-r--r--drivers/hid/hid-zpff.c1
-rw-r--r--drivers/hid/usbhid/hid-quirks.c1
-rw-r--r--drivers/hwmon/Kconfig20
-rw-r--r--drivers/hwmon/ad7414.c7
-rw-r--r--drivers/hwmon/ad7418.c27
-rw-r--r--drivers/hwmon/ads1015.c21
-rw-r--r--drivers/hwmon/ads7828.c12
-rw-r--r--drivers/hwmon/asb100.c10
-rw-r--r--drivers/hwmon/coretemp.c31
-rw-r--r--drivers/hwmon/ds1621.c24
-rw-r--r--drivers/hwmon/ds620.c42
-rw-r--r--drivers/hwmon/gl518sm.c4
-rw-r--r--drivers/hwmon/gl520sm.c4
-rw-r--r--drivers/hwmon/hwmon.c32
-rw-r--r--drivers/hwmon/ibmaem.c107
-rw-r--r--drivers/hwmon/jc42.c52
-rw-r--r--drivers/hwmon/lm73.c30
-rw-r--r--drivers/hwmon/lm75.c9
-rw-r--r--drivers/hwmon/lm77.c4
-rw-r--r--drivers/hwmon/lm90.c146
-rw-r--r--drivers/hwmon/lm92.c26
-rw-r--r--drivers/hwmon/max16065.c4
-rw-r--r--drivers/hwmon/mc13783-adc.c12
-rw-r--r--drivers/hwmon/sht21.c26
-rw-r--r--drivers/hwmon/smm665.c15
-rw-r--r--drivers/hwmon/smsc47b397.c13
-rw-r--r--drivers/hwmon/tmp102.c44
-rw-r--r--drivers/hwmon/ultra45_env.c1
-rw-r--r--drivers/hwmon/w83627ehf.c213
-rw-r--r--drivers/hwmon/w83781d.c10
-rw-r--r--drivers/hwspinlock/Kconfig27
-rw-r--r--drivers/hwspinlock/Makefile1
-rw-r--r--drivers/hwspinlock/hwspinlock_core.c204
-rw-r--r--drivers/hwspinlock/hwspinlock_internal.h40
-rw-r--r--drivers/hwspinlock/omap_hwspinlock.c127
-rw-r--r--drivers/hwspinlock/u8500_hsem.c197
-rw-r--r--drivers/i2c/algos/i2c-algo-bit.c24
-rw-r--r--drivers/i2c/algos/i2c-algo-pca.c6
-rw-r--r--drivers/i2c/busses/Kconfig21
-rw-r--r--drivers/i2c/busses/Makefile5
-rw-r--r--drivers/i2c/busses/i2c-au1550.c282
-rw-r--r--drivers/i2c/busses/i2c-bfin-twi.c4
-rw-r--r--drivers/i2c/busses/i2c-designware-core.c (renamed from drivers/i2c/busses/i2c-designware.c)382
-rw-r--r--drivers/i2c/busses/i2c-designware-core.h105
-rw-r--r--drivers/i2c/busses/i2c-designware-pcidrv.c392
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c215
-rw-r--r--drivers/i2c/busses/i2c-eg20t.c270
-rw-r--r--drivers/i2c/busses/i2c-highlander.c2
-rw-r--r--drivers/i2c/busses/i2c-imx.c46
-rw-r--r--drivers/i2c/busses/i2c-ixp2000.c2
-rw-r--r--drivers/i2c/busses/i2c-nomadik.c91
-rw-r--r--drivers/i2c/busses/i2c-nuc900.c2
-rw-r--r--drivers/i2c/busses/i2c-omap.c164
-rw-r--r--drivers/i2c/busses/i2c-pmcmsp.c2
-rw-r--r--drivers/i2c/busses/i2c-pxa-pci.c1
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c130
-rw-r--r--drivers/i2c/busses/i2c-sh7760.c3
-rw-r--r--drivers/i2c/busses/i2c-sh_mobile.c2
-rw-r--r--drivers/i2c/busses/i2c-stu300.c2
-rw-r--r--drivers/i2c/busses/i2c-tegra.c3
-rw-r--r--drivers/i2c/busses/scx200_acb.c6
-rw-r--r--drivers/i2c/i2c-boardinfo.c1
-rw-r--r--drivers/ide/Kconfig6
-rw-r--r--drivers/ide/at91_ide.c2
-rw-r--r--drivers/ide/au1xxx-ide.c46
-rw-r--r--drivers/ide/buddha.c1
-rw-r--r--drivers/ide/cmd640.c1
-rw-r--r--drivers/ide/ide-acpi.c1
-rw-r--r--drivers/ide/ide-atapi.c1
-rw-r--r--drivers/ide/ide-cd.c1
-rw-r--r--drivers/ide/ide-disk_proc.c1
-rw-r--r--drivers/ide/ide-dma-sff.c1
-rw-r--r--drivers/ide/ide-dma.c1
-rw-r--r--drivers/ide/ide-eh.c1
-rw-r--r--drivers/ide/ide-floppy.c1
-rw-r--r--drivers/ide/ide-floppy_proc.c1
-rw-r--r--drivers/ide/ide-io-std.c1
-rw-r--r--drivers/ide/ide-ioctls.c1
-rw-r--r--drivers/ide/ide-legacy.c1
-rw-r--r--drivers/ide/ide-lib.c1
-rw-r--r--drivers/ide/ide-pnp.c1
-rw-r--r--drivers/ide/ide-tape.c1
-rw-r--r--drivers/ide/ide-taskfile.c1
-rw-r--r--drivers/ide/ide-xfer-mode.c1
-rw-r--r--drivers/ide/macide.c1
-rw-r--r--drivers/ide/pmac.c1
-rw-r--r--drivers/ide/q40ide.c1
-rw-r--r--drivers/ide/setup-pci.c1
-rw-r--r--drivers/ide/tc86c001.c1
-rw-r--r--drivers/idle/intel_idle.c131
-rw-r--r--drivers/infiniband/core/addr.c1
-rw-r--r--drivers/infiniband/core/cm.c64
-rw-r--r--drivers/infiniband/core/cm_msgs.h32
-rw-r--r--drivers/infiniband/core/cma.c68
-rw-r--r--drivers/infiniband/core/fmr_pool.c1
-rw-r--r--drivers/infiniband/core/iwcm.c1
-rw-r--r--drivers/infiniband/core/mad.c4
-rw-r--r--drivers/infiniband/core/multicast.c1
-rw-r--r--drivers/infiniband/core/netlink.c1
-rw-r--r--drivers/infiniband/core/packer.c1
-rw-r--r--drivers/infiniband/core/sysfs.c27
-rw-r--r--drivers/infiniband/core/ucm.c2
-rw-r--r--drivers/infiniband/core/ucma.c8
-rw-r--r--drivers/infiniband/core/ud_header.c1
-rw-r--r--drivers/infiniband/core/umem.c7
-rw-r--r--drivers/infiniband/core/user_mad.c5
-rw-r--r--drivers/infiniband/core/uverbs.h18
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c703
-rw-r--r--drivers/infiniband/core/uverbs_main.c30
-rw-r--r--drivers/infiniband/core/uverbs_marshall.c1
-rw-r--r--drivers/infiniband/core/verbs.c377
-rw-r--r--drivers/infiniband/hw/amso1100/c2_ae.c5
-rw-r--r--drivers/infiniband/hw/amso1100/c2_intr.c5
-rw-r--r--drivers/infiniband/hw/amso1100/c2_provider.c5
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c10
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_ev.c6
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c1
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.h1
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_qp.c14
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c469
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c3
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c41
-rw-r--r--drivers/infiniband/hw/cxgb4/ev.c10
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h23
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c42
-rw-r--r--drivers/infiniband/hw/ehca/ehca_eq.c4
-rw-r--r--drivers/infiniband/hw/ehca/ehca_qp.c3
-rw-r--r--drivers/infiniband/hw/ipath/ipath_diag.c1
-rw-r--r--drivers/infiniband/hw/ipath/ipath_driver.c1
-rw-r--r--drivers/infiniband/hw/ipath/ipath_file_ops.c1
-rw-r--r--drivers/infiniband/hw/ipath/ipath_init_chip.c2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_srq.c5
-rw-r--r--drivers/infiniband/hw/ipath/ipath_sysfs.c1
-rw-r--r--drivers/infiniband/hw/ipath/ipath_user_pages.c6
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.c1
-rw-r--r--drivers/infiniband/hw/mlx4/main.c106
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h13
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c131
-rw-r--r--drivers/infiniband/hw/mlx4/srq.c10
-rw-r--r--drivers/infiniband/hw/mthca/mthca_catas.c1
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cmd.c1
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mr.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c5
-rw-r--r--drivers/infiniband/hw/nes/Makefile2
-rw-r--r--drivers/infiniband/hw/nes/nes.c8
-rw-r--r--drivers/infiniband/hw/nes/nes.h17
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c1121
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.h75
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c99
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.h35
-rw-r--r--drivers/infiniband/hw/nes/nes_mgt.c1162
-rw-r--r--drivers/infiniband/hw/nes/nes_mgt.h97
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c4
-rw-r--r--drivers/infiniband/hw/nes/nes_utils.c53
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c8
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.h12
-rw-r--r--drivers/infiniband/hw/qib/qib.h15
-rw-r--r--drivers/infiniband/hw/qib/qib_diag.c1
-rw-r--r--drivers/infiniband/hw/qib/qib_driver.c21
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c3
-rw-r--r--drivers/infiniband/hw/qib/qib_iba6120.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7220.c3
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c136
-rw-r--r--drivers/infiniband/hw/qib/qib_init.c9
-rw-r--r--drivers/infiniband/hw/qib/qib_pcie.c1
-rw-r--r--drivers/infiniband/hw/qib/qib_qp.c90
-rw-r--r--drivers/infiniband/hw/qib/qib_qsfp.c25
-rw-r--r--drivers/infiniband/hw/qib/qib_qsfp.h3
-rw-r--r--drivers/infiniband/hw/qib/qib_rc.c46
-rw-r--r--drivers/infiniband/hw/qib/qib_ruc.c7
-rw-r--r--drivers/infiniband/hw/qib/qib_sd7220.c1
-rw-r--r--drivers/infiniband/hw/qib/qib_sdma.c1
-rw-r--r--drivers/infiniband/hw/qib/qib_srq.c5
-rw-r--r--drivers/infiniband/hw/qib/qib_sysfs.c3
-rw-r--r--drivers/infiniband/hw/qib/qib_tx.c1
-rw-r--r--drivers/infiniband/hw/qib/qib_uc.c25
-rw-r--r--drivers/infiniband/hw/qib/qib_user_pages.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.c37
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.h5
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c6
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_fs.c8
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c1
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c1
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c92
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h4
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c31
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c33
-rw-r--r--drivers/input/Kconfig2
-rw-r--r--drivers/input/ff-core.c11
-rw-r--r--drivers/input/input-compat.c1
-rw-r--r--drivers/input/input-mt.c2
-rw-r--r--drivers/input/input-polldev.c7
-rw-r--r--drivers/input/joystick/as5011.c1
-rw-r--r--drivers/input/keyboard/Kconfig4
-rw-r--r--drivers/input/keyboard/adp5588-keys.c2
-rw-r--r--drivers/input/keyboard/adp5589-keys.c609
-rw-r--r--drivers/input/keyboard/davinci_keyscan.c2
-rw-r--r--drivers/input/keyboard/ep93xx_keypad.c2
-rw-r--r--drivers/input/keyboard/gpio_keys.c24
-rw-r--r--drivers/input/keyboard/imx_keypad.c46
-rw-r--r--drivers/input/keyboard/jornada720_kbd.c2
-rw-r--r--drivers/input/keyboard/lm8323.c2
-rw-r--r--drivers/input/keyboard/matrix_keypad.c1
-rw-r--r--drivers/input/keyboard/nomadik-ske-keypad.c1
-rw-r--r--drivers/input/keyboard/omap-keypad.c2
-rw-r--r--drivers/input/keyboard/pxa27x_keypad.c2
-rw-r--r--drivers/input/keyboard/pxa930_rotary.c2
-rw-r--r--drivers/input/keyboard/tc3589x-keypad.c2
-rw-r--r--drivers/input/keyboard/tegra-kbc.c85
-rw-r--r--drivers/input/keyboard/tnetv107x-keypad.c1
-rw-r--r--drivers/input/keyboard/w90p910_keypad.c2
-rw-r--r--drivers/input/misc/Kconfig71
-rw-r--r--drivers/input/misc/Makefile6
-rw-r--r--drivers/input/misc/ad714x-i2c.c4
-rw-r--r--drivers/input/misc/ad714x.c1
-rw-r--r--drivers/input/misc/adxl34x.c1
-rw-r--r--drivers/input/misc/ati_remote2.c1
-rw-r--r--drivers/input/misc/bma150.c691
-rw-r--r--drivers/input/misc/cma3000_d0x.c1
-rw-r--r--drivers/input/misc/dm355evm_keys.c1
-rw-r--r--drivers/input/misc/ixp4xx-beeper.c2
-rw-r--r--drivers/input/misc/mc13783-pwrbutton.c282
-rw-r--r--drivers/input/misc/mma8450.c20
-rw-r--r--drivers/input/misc/pm8xxx-vibrator.c296
-rw-r--r--drivers/input/misc/twl6040-vibra.c23
-rw-r--r--drivers/input/mouse/alps.c52
-rw-r--r--drivers/input/mouse/elantech.c734
-rw-r--r--drivers/input/mouse/elantech.h57
-rw-r--r--drivers/input/mouse/hgpk.c84
-rw-r--r--drivers/input/mouse/hgpk.h11
-rw-r--r--drivers/input/mouse/lifebook.c6
-rw-r--r--drivers/input/mouse/logips2pp.c16
-rw-r--r--drivers/input/mouse/psmouse-base.c67
-rw-r--r--drivers/input/mouse/psmouse.h25
-rw-r--r--drivers/input/mouse/pxa930_trkball.c2
-rw-r--r--drivers/input/mouse/sentelic.c13
-rw-r--r--drivers/input/mouse/synaptics.c610
-rw-r--r--drivers/input/mouse/synaptics.h27
-rw-r--r--drivers/input/mouse/synaptics_i2c.c4
-rw-r--r--drivers/input/serio/serio_raw.c215
-rw-r--r--drivers/input/sparse-keymap.c1
-rw-r--r--drivers/input/tablet/wacom.h10
-rw-r--r--drivers/input/tablet/wacom_sys.c356
-rw-r--r--drivers/input/tablet/wacom_wac.c48
-rw-r--r--drivers/input/touchscreen/Kconfig12
-rw-r--r--drivers/input/touchscreen/Makefile1
-rw-r--r--drivers/input/touchscreen/ad7877.c1
-rw-r--r--drivers/input/touchscreen/ad7879-i2c.c6
-rw-r--r--drivers/input/touchscreen/ad7879-spi.c1
-rw-r--r--drivers/input/touchscreen/ad7879.c1
-rw-r--r--drivers/input/touchscreen/ads7846.c1
-rw-r--r--drivers/input/touchscreen/atmel_mxt_ts.c21
-rw-r--r--drivers/input/touchscreen/atmel_tsadcc.c2
-rw-r--r--drivers/input/touchscreen/bu21013_ts.c1
-rw-r--r--drivers/input/touchscreen/h3600_ts_input.c4
-rw-r--r--drivers/input/touchscreen/hp680_ts_input.c2
-rw-r--r--drivers/input/touchscreen/jornada720_ts.c2
-rw-r--r--drivers/input/touchscreen/lpc32xx_ts.c2
-rw-r--r--drivers/input/touchscreen/mc13783_ts.c34
-rw-r--r--drivers/input/touchscreen/penmount.c199
-rw-r--r--drivers/input/touchscreen/s3c2410_ts.c2
-rw-r--r--drivers/input/touchscreen/tsc2007.c200
-rw-r--r--drivers/input/touchscreen/tsc40.c184
-rw-r--r--drivers/input/touchscreen/w90p910_ts.c2
-rw-r--r--drivers/input/touchscreen/wacom_w8001.c29
-rw-r--r--drivers/iommu/Kconfig19
-rw-r--r--drivers/iommu/Makefile3
-rw-r--r--drivers/iommu/amd_iommu.c4
-rw-r--r--drivers/iommu/intel-iommu.c3
-rw-r--r--drivers/iommu/iommu.c114
-rw-r--r--drivers/iommu/msm_iommu.c9
-rw-r--r--drivers/iommu/omap-iommu-debug.c419
-rw-r--r--drivers/iommu/omap-iommu.c1245
-rw-r--r--drivers/iommu/omap-iovmm.c743
-rw-r--r--drivers/isdn/Kconfig2
-rw-r--r--drivers/isdn/capi/kcapi_proc.c1
-rw-r--r--drivers/isdn/gigaset/asyncdata.c1
-rw-r--r--drivers/isdn/gigaset/capi.c1
-rw-r--r--drivers/isdn/gigaset/dummyll.c1
-rw-r--r--drivers/isdn/gigaset/ev-layer.c1
-rw-r--r--drivers/isdn/gigaset/i4l.c1
-rw-r--r--drivers/isdn/gigaset/interface.c1
-rw-r--r--drivers/isdn/hardware/mISDN/mISDNisar.c1
-rw-r--r--drivers/isdn/hisax/callc.c4
-rw-r--r--drivers/isdn/hisax/hisax.h4
-rw-r--r--drivers/isdn/hisax/isdnl1.h2
-rw-r--r--drivers/isdn/hisax/isdnl3.c2
-rw-r--r--drivers/isdn/hisax/l3dss1.c6
-rw-r--r--drivers/isdn/hisax/st5481_d.c4
-rw-r--r--drivers/isdn/i4l/isdn_tty.c1
-rw-r--r--drivers/isdn/mISDN/clock.c1
-rw-r--r--drivers/isdn/mISDN/dsp_audio.c1
-rw-r--r--drivers/isdn/mISDN/dsp_pipeline.c1
-rw-r--r--drivers/isdn/mISDN/socket.c1
-rw-r--r--drivers/leds/Kconfig20
-rw-r--r--drivers/leds/Makefile2
-rw-r--r--drivers/leds/dell-led.c1
-rw-r--r--drivers/leds/led-class.c7
-rw-r--r--drivers/leds/led-triggers.c5
-rw-r--r--drivers/leds/leds-88pm860x.c1
-rw-r--r--drivers/leds/leds-alix2.c239
-rw-r--r--drivers/leds/leds-asic3.c36
-rw-r--r--drivers/leds/leds-atmel-pwm.c1
-rw-r--r--drivers/leds/leds-cobalt-raq.c1
-rw-r--r--drivers/leds/leds-fsg.c1
-rw-r--r--drivers/leds/leds-gpio.c3
-rw-r--r--drivers/leds/leds-lm3530.c4
-rw-r--r--drivers/leds/leds-locomo.c1
-rw-r--r--drivers/leds/leds-lp5521.c24
-rw-r--r--drivers/leds/leds-lt3593.c1
-rw-r--r--drivers/leds/leds-mc13783.c60
-rw-r--r--drivers/leds/leds-net48xx.c1
-rw-r--r--drivers/leds/leds-net5501.c1
-rw-r--r--drivers/leds/leds-ns2.c1
-rw-r--r--drivers/leds/leds-renesas-tpu.c357
-rw-r--r--drivers/leds/leds-s3c24xx.c1
-rw-r--r--drivers/leds/leds-wm831x-status.c1
-rw-r--r--drivers/leds/leds-wm8350.c1
-rw-r--r--drivers/leds/leds-wrap.c1
-rw-r--r--drivers/lguest/core.c16
-rw-r--r--drivers/lguest/lguest_device.c1
-rw-r--r--drivers/lguest/lguest_user.c1
-rw-r--r--drivers/macintosh/via-macii.c2
-rw-r--r--drivers/macintosh/via-maciisi.c4
-rw-r--r--drivers/md/Kconfig36
-rw-r--r--drivers/md/Makefile4
-rw-r--r--drivers/md/dm-bufio.c1700
-rw-r--r--drivers/md/dm-bufio.h112
-rw-r--r--drivers/md/dm-exception-store.c1
-rw-r--r--drivers/md/dm-ioctl.c11
-rw-r--r--drivers/md/dm-kcopyd.c31
-rw-r--r--drivers/md/dm-log-userspace-base.c38
-rw-r--r--drivers/md/dm-path-selector.c1
-rw-r--r--drivers/md/dm-raid.c49
-rw-r--r--drivers/md/dm-round-robin.c1
-rw-r--r--drivers/md/dm-service-time.c1
-rw-r--r--drivers/md/dm-snap-persistent.c1
-rw-r--r--drivers/md/dm-snap-transient.c1
-rw-r--r--drivers/md/dm-table.c73
-rw-r--r--drivers/md/dm-thin-metadata.c1391
-rw-r--r--drivers/md/dm-thin-metadata.h156
-rw-r--r--drivers/md/dm-thin.c2428
-rw-r--r--drivers/md/dm-uevent.c1
-rw-r--r--drivers/md/dm.c46
-rw-r--r--drivers/md/dm.h2
-rw-r--r--drivers/md/faulty.c15
-rw-r--r--drivers/md/linear.c18
-rw-r--r--drivers/md/md.c13
-rw-r--r--drivers/md/md.h2
-rw-r--r--drivers/md/multipath.c9
-rw-r--r--drivers/md/persistent-data/Kconfig8
-rw-r--r--drivers/md/persistent-data/Makefile11
-rw-r--r--drivers/md/persistent-data/dm-block-manager.c620
-rw-r--r--drivers/md/persistent-data/dm-block-manager.h123
-rw-r--r--drivers/md/persistent-data/dm-btree-internal.h137
-rw-r--r--drivers/md/persistent-data/dm-btree-remove.c566
-rw-r--r--drivers/md/persistent-data/dm-btree-spine.c244
-rw-r--r--drivers/md/persistent-data/dm-btree.c805
-rw-r--r--drivers/md/persistent-data/dm-btree.h145
-rw-r--r--drivers/md/persistent-data/dm-persistent-data-internal.h19
-rw-r--r--drivers/md/persistent-data/dm-space-map-checker.c438
-rw-r--r--drivers/md/persistent-data/dm-space-map-checker.h26
-rw-r--r--drivers/md/persistent-data/dm-space-map-common.c705
-rw-r--r--drivers/md/persistent-data/dm-space-map-common.h126
-rw-r--r--drivers/md/persistent-data/dm-space-map-disk.c335
-rw-r--r--drivers/md/persistent-data/dm-space-map-disk.h25
-rw-r--r--drivers/md/persistent-data/dm-space-map-metadata.c596
-rw-r--r--drivers/md/persistent-data/dm-space-map-metadata.h33
-rw-r--r--drivers/md/persistent-data/dm-space-map.h134
-rw-r--r--drivers/md/persistent-data/dm-transaction-manager.c400
-rw-r--r--drivers/md/persistent-data/dm-transaction-manager.h130
-rw-r--r--drivers/md/raid0.c23
-rw-r--r--drivers/md/raid1.c10
-rw-r--r--drivers/md/raid10.c22
-rw-r--r--drivers/md/raid5.c25
-rw-r--r--drivers/media/common/saa7146_core.c75
-rw-r--r--drivers/media/common/saa7146_fops.c119
-rw-r--r--drivers/media/common/saa7146_hlp.c15
-rw-r--r--drivers/media/common/saa7146_i2c.c60
-rw-r--r--drivers/media/common/saa7146_vbi.c48
-rw-r--r--drivers/media/common/saa7146_video.c184
-rw-r--r--drivers/media/common/tuners/Makefile4
-rw-r--r--drivers/media/common/tuners/mt20xx.c24
-rw-r--r--drivers/media/common/tuners/mxl5005s.c22
-rw-r--r--drivers/media/common/tuners/tda18212.c31
-rw-r--r--drivers/media/common/tuners/tda18271-common.c32
-rw-r--r--drivers/media/common/tuners/tda18271-fe.c2
-rw-r--r--drivers/media/common/tuners/tda18271-priv.h39
-rw-r--r--drivers/media/common/tuners/tda827x.c8
-rw-r--r--drivers/media/common/tuners/tuner-types.c1
-rw-r--r--drivers/media/common/tuners/tuner-xc2028.c18
-rw-r--r--drivers/media/common/tuners/tuner-xc2028.h1
-rw-r--r--drivers/media/dvb/b2c2/Makefile4
-rw-r--r--drivers/media/dvb/bt8xx/Makefile8
-rw-r--r--drivers/media/dvb/ddbridge/Makefile8
-rw-r--r--drivers/media/dvb/ddbridge/ddbridge-core.c43
-rw-r--r--drivers/media/dvb/dm1105/Makefile2
-rw-r--r--drivers/media/dvb/dvb-core/dvb_frontend.c95
-rw-r--r--drivers/media/dvb/dvb-core/dvb_frontend.h1
-rw-r--r--drivers/media/dvb/dvb-usb/Kconfig28
-rw-r--r--drivers/media/dvb/dvb-usb/Makefile16
-rw-r--r--drivers/media/dvb/dvb-usb/a800.c4
-rw-r--r--drivers/media/dvb/dvb-usb/af9005-fe.c2
-rw-r--r--drivers/media/dvb/dvb-usb/af9005.c5
-rw-r--r--drivers/media/dvb/dvb-usb/af9015.c70
-rw-r--r--drivers/media/dvb/dvb-usb/anysee.c337
-rw-r--r--drivers/media/dvb/dvb-usb/anysee.h1
-rw-r--r--drivers/media/dvb/dvb-usb/au6610.c9
-rw-r--r--drivers/media/dvb/dvb-usb/az6027.c26
-rw-r--r--drivers/media/dvb/dvb-usb/ce6230.c9
-rw-r--r--drivers/media/dvb/dvb-usb/cinergyT2-core.c5
-rw-r--r--drivers/media/dvb/dvb-usb/cxusb.c142
-rw-r--r--drivers/media/dvb/dvb-usb/dib0700_core.c99
-rw-r--r--drivers/media/dvb/dvb-usb/dib0700_devices.c377
-rw-r--r--drivers/media/dvb/dvb-usb/dibusb-common.c27
-rw-r--r--drivers/media/dvb/dvb-usb/dibusb-mb.c31
-rw-r--r--drivers/media/dvb/dvb-usb/dibusb-mc.c3
-rw-r--r--drivers/media/dvb/dvb-usb/digitv.c16
-rw-r--r--drivers/media/dvb/dvb-usb/dtt200u.c14
-rw-r--r--drivers/media/dvb/dvb-usb/dtv5100.c11
-rw-r--r--drivers/media/dvb/dvb-usb/dvb-usb-dvb.c153
-rw-r--r--drivers/media/dvb/dvb-usb/dvb-usb-ids.h6
-rw-r--r--drivers/media/dvb/dvb-usb/dvb-usb-init.c41
-rw-r--r--drivers/media/dvb/dvb-usb/dvb-usb-urb.c28
-rw-r--r--drivers/media/dvb/dvb-usb/dvb-usb.h37
-rw-r--r--drivers/media/dvb/dvb-usb/dw2102.c115
-rw-r--r--drivers/media/dvb/dvb-usb/ec168.c9
-rw-r--r--drivers/media/dvb/dvb-usb/friio.c7
-rw-r--r--drivers/media/dvb/dvb-usb/gl861.c9
-rw-r--r--drivers/media/dvb/dvb-usb/gp8psk-fe.c17
-rw-r--r--drivers/media/dvb/dvb-usb/gp8psk.c5
-rw-r--r--drivers/media/dvb/dvb-usb/it913x.c702
-rw-r--r--drivers/media/dvb/dvb-usb/lmedm04.c60
-rw-r--r--drivers/media/dvb/dvb-usb/m920x.c58
-rw-r--r--drivers/media/dvb/dvb-usb/mxl111sf-demod.c614
-rw-r--r--drivers/media/dvb/dvb-usb/mxl111sf-demod.h55
-rw-r--r--drivers/media/dvb/dvb-usb/mxl111sf-gpio.c763
-rw-r--r--drivers/media/dvb/dvb-usb/mxl111sf-gpio.h56
-rw-r--r--drivers/media/dvb/dvb-usb/mxl111sf-i2c.c850
-rw-r--r--drivers/media/dvb/dvb-usb/mxl111sf-i2c.h35
-rw-r--r--drivers/media/dvb/dvb-usb/mxl111sf-phy.c343
-rw-r--r--drivers/media/dvb/dvb-usb/mxl111sf-phy.h53
-rw-r--r--drivers/media/dvb/dvb-usb/mxl111sf-reg.h179
-rw-r--r--drivers/media/dvb/dvb-usb/mxl111sf-tuner.c476
-rw-r--r--drivers/media/dvb/dvb-usb/mxl111sf-tuner.h89
-rw-r--r--drivers/media/dvb/dvb-usb/mxl111sf.c1086
-rw-r--r--drivers/media/dvb/dvb-usb/mxl111sf.h158
-rw-r--r--drivers/media/dvb/dvb-usb/nova-t-usb2.c4
-rw-r--r--drivers/media/dvb/dvb-usb/opera1.c13
-rw-r--r--drivers/media/dvb/dvb-usb/pctv452e.c1079
-rw-r--r--drivers/media/dvb/dvb-usb/technisat-usb2.c28
-rw-r--r--drivers/media/dvb/dvb-usb/ttusb2.c407
-rw-r--r--drivers/media/dvb/dvb-usb/umt-010.c8
-rw-r--r--drivers/media/dvb/dvb-usb/usb-urb.c4
-rw-r--r--drivers/media/dvb/dvb-usb/vp702x.c5
-rw-r--r--drivers/media/dvb/dvb-usb/vp7045.c5
-rw-r--r--drivers/media/dvb/frontends/Kconfig30
-rw-r--r--drivers/media/dvb/frontends/Makefile8
-rw-r--r--drivers/media/dvb/frontends/a8293.c184
-rw-r--r--drivers/media/dvb/frontends/a8293.h (renamed from drivers/media/common/tuners/tda18212_priv.h)39
-rw-r--r--drivers/media/dvb/frontends/cxd2820r.h9
-rw-r--r--drivers/media/dvb/frontends/cxd2820r_c.c1
-rw-r--r--drivers/media/dvb/frontends/cxd2820r_core.c80
-rw-r--r--drivers/media/dvb/frontends/cxd2820r_priv.h1
-rw-r--r--drivers/media/dvb/frontends/cxd2820r_t.c1
-rw-r--r--drivers/media/dvb/frontends/cxd2820r_t2.c1
-rw-r--r--drivers/media/dvb/frontends/dib0070.c37
-rw-r--r--drivers/media/dvb/frontends/dib0090.c70
-rw-r--r--drivers/media/dvb/frontends/dib7000m.c27
-rw-r--r--drivers/media/dvb/frontends/dib7000p.c34
-rw-r--r--drivers/media/dvb/frontends/dib8000.c72
-rw-r--r--drivers/media/dvb/frontends/dib9000.c167
-rw-r--r--drivers/media/dvb/frontends/dibx000_common.c77
-rw-r--r--drivers/media/dvb/frontends/dibx000_common.h1
-rw-r--r--drivers/media/dvb/frontends/drxd_hard.c24
-rw-r--r--drivers/media/dvb/frontends/drxk_hard.c10
-rw-r--r--drivers/media/dvb/frontends/it913x-fe-priv.h336
-rw-r--r--drivers/media/dvb/frontends/it913x-fe.c839
-rw-r--r--drivers/media/dvb/frontends/it913x-fe.h196
-rw-r--r--drivers/media/dvb/frontends/lnbp22.c148
-rw-r--r--drivers/media/dvb/frontends/lnbp22.h57
-rw-r--r--drivers/media/dvb/frontends/stb0899_algo.c3
-rw-r--r--drivers/media/dvb/frontends/stb0899_drv.c6
-rw-r--r--drivers/media/dvb/frontends/stv0288.c29
-rw-r--r--drivers/media/dvb/frontends/stv090x.c35
-rw-r--r--drivers/media/dvb/frontends/tda10048.c37
-rw-r--r--drivers/media/dvb/frontends/tda10048.h8
-rw-r--r--drivers/media/dvb/frontends/tda10071.c1269
-rw-r--r--drivers/media/dvb/frontends/tda10071.h81
-rw-r--r--drivers/media/dvb/frontends/tda10071_priv.h122
-rw-r--r--drivers/media/dvb/frontends/tda18271c2dd.c4
-rw-r--r--drivers/media/dvb/mantis/Makefile2
-rw-r--r--drivers/media/dvb/mantis/hopper_cards.c6
-rw-r--r--drivers/media/dvb/mantis/mantis_cards.c6
-rw-r--r--drivers/media/dvb/mantis/mantis_common.h5
-rw-r--r--drivers/media/dvb/mantis/mantis_dma.c92
-rw-r--r--drivers/media/dvb/mantis/mantis_vp1041.c1
-rw-r--r--drivers/media/dvb/ngene/Makefile8
-rw-r--r--drivers/media/dvb/pluto2/Makefile2
-rw-r--r--drivers/media/dvb/pt1/Makefile2
-rw-r--r--drivers/media/dvb/siano/Makefile4
-rw-r--r--drivers/media/dvb/siano/sms-cards.c1
-rw-r--r--drivers/media/dvb/siano/smsendian.c1
-rw-r--r--drivers/media/dvb/siano/smssdio.c1
-rw-r--r--drivers/media/dvb/siano/smsusb.c1
-rw-r--r--drivers/media/dvb/ttpci/Makefile4
-rw-r--r--drivers/media/dvb/ttpci/av7110_v4l.c32
-rw-r--r--drivers/media/dvb/ttpci/budget-av.c47
-rw-r--r--drivers/media/dvb/ttpci/budget-ci.c1
-rw-r--r--drivers/media/dvb/ttpci/budget-core.c2
-rw-r--r--drivers/media/dvb/ttpci/budget.h1
-rw-r--r--drivers/media/dvb/ttpci/ttpci-eeprom.c29
-rw-r--r--drivers/media/dvb/ttpci/ttpci-eeprom.h1
-rw-r--r--drivers/media/dvb/ttusb-budget/Makefile2
-rw-r--r--drivers/media/dvb/ttusb-dec/Makefile2
-rw-r--r--drivers/media/media-device.c1
-rw-r--r--drivers/media/radio/Kconfig1
-rw-r--r--drivers/media/radio/Makefile2
-rw-r--r--drivers/media/radio/radio-si4713.c4
-rw-r--r--drivers/media/radio/radio-tea5764.c4
-rw-r--r--drivers/media/radio/radio-timb.c1
-rw-r--r--drivers/media/radio/radio-wl1273.c3
-rw-r--r--drivers/media/radio/si470x/radio-si470x-usb.c2
-rw-r--r--drivers/media/radio/si4713-i2c.c1
-rw-r--r--drivers/media/radio/wl128x/fmdrv_v4l2.c8
-rw-r--r--drivers/media/rc/Kconfig23
-rw-r--r--drivers/media/rc/Makefile1
-rw-r--r--drivers/media/rc/ati_remote.c (renamed from drivers/input/misc/ati_remote.c)301
-rw-r--r--drivers/media/rc/ene_ir.c73
-rw-r--r--drivers/media/rc/ene_ir.h19
-rw-r--r--drivers/media/rc/imon.c36
-rw-r--r--drivers/media/rc/ir-jvc-decoder.c1
-rw-r--r--drivers/media/rc/ir-lirc-codec.c10
-rw-r--r--drivers/media/rc/ir-nec-decoder.c1
-rw-r--r--drivers/media/rc/ir-raw.c2
-rw-r--r--drivers/media/rc/ir-rc5-decoder.c1
-rw-r--r--drivers/media/rc/ir-rc5-sz-decoder.c1
-rw-r--r--drivers/media/rc/ir-rc6-decoder.c1
-rw-r--r--drivers/media/rc/ir-sony-decoder.c1
-rw-r--r--drivers/media/rc/keymaps/Makefile3
-rw-r--r--drivers/media/rc/keymaps/rc-adstech-dvb-t-pci.c1
-rw-r--r--drivers/media/rc/keymaps/rc-alink-dtu-m.c1
-rw-r--r--drivers/media/rc/keymaps/rc-anysee.c1
-rw-r--r--drivers/media/rc/keymaps/rc-apac-viewcomp.c1
-rw-r--r--drivers/media/rc/keymaps/rc-asus-pc39.c1
-rw-r--r--drivers/media/rc/keymaps/rc-ati-tv-wonder-hd-600.c1
-rw-r--r--drivers/media/rc/keymaps/rc-ati-x10.c104
-rw-r--r--drivers/media/rc/keymaps/rc-avermedia-a16d.c1
-rw-r--r--drivers/media/rc/keymaps/rc-avermedia-cardbus.c1
-rw-r--r--drivers/media/rc/keymaps/rc-avermedia-dvbt.c1
-rw-r--r--drivers/media/rc/keymaps/rc-avermedia-m135a.c1
-rw-r--r--drivers/media/rc/keymaps/rc-avermedia-m733a-rm-k6.c1
-rw-r--r--drivers/media/rc/keymaps/rc-avermedia-rm-ks.c1
-rw-r--r--drivers/media/rc/keymaps/rc-avermedia.c1
-rw-r--r--drivers/media/rc/keymaps/rc-avertv-303.c1
-rw-r--r--drivers/media/rc/keymaps/rc-azurewave-ad-tu700.c1
-rw-r--r--drivers/media/rc/keymaps/rc-behold-columbus.c1
-rw-r--r--drivers/media/rc/keymaps/rc-behold.c1
-rw-r--r--drivers/media/rc/keymaps/rc-budget-ci-old.c1
-rw-r--r--drivers/media/rc/keymaps/rc-cinergy-1400.c1
-rw-r--r--drivers/media/rc/keymaps/rc-cinergy.c1
-rw-r--r--drivers/media/rc/keymaps/rc-dib0700-nec.c1
-rw-r--r--drivers/media/rc/keymaps/rc-dib0700-rc5.c1
-rw-r--r--drivers/media/rc/keymaps/rc-digitalnow-tinytwin.c1
-rw-r--r--drivers/media/rc/keymaps/rc-digittrade.c1
-rw-r--r--drivers/media/rc/keymaps/rc-dm1105-nec.c1
-rw-r--r--drivers/media/rc/keymaps/rc-dntv-live-dvb-t.c1
-rw-r--r--drivers/media/rc/keymaps/rc-dntv-live-dvbt-pro.c1
-rw-r--r--drivers/media/rc/keymaps/rc-em-terratec.c1
-rw-r--r--drivers/media/rc/keymaps/rc-encore-enltv-fm53.c1
-rw-r--r--drivers/media/rc/keymaps/rc-encore-enltv.c1
-rw-r--r--drivers/media/rc/keymaps/rc-encore-enltv2.c1
-rw-r--r--drivers/media/rc/keymaps/rc-evga-indtube.c1
-rw-r--r--drivers/media/rc/keymaps/rc-eztv.c1
-rw-r--r--drivers/media/rc/keymaps/rc-flydvb.c1
-rw-r--r--drivers/media/rc/keymaps/rc-flyvideo.c1
-rw-r--r--drivers/media/rc/keymaps/rc-fusionhdtv-mce.c1
-rw-r--r--drivers/media/rc/keymaps/rc-gadmei-rm008z.c1
-rw-r--r--drivers/media/rc/keymaps/rc-genius-tvgo-a11mce.c1
-rw-r--r--drivers/media/rc/keymaps/rc-gotview7135.c1
-rw-r--r--drivers/media/rc/keymaps/rc-hauppauge.c1
-rw-r--r--drivers/media/rc/keymaps/rc-imon-mce.c1
-rw-r--r--drivers/media/rc/keymaps/rc-imon-pad.c1
-rw-r--r--drivers/media/rc/keymaps/rc-iodata-bctv7e.c1
-rw-r--r--drivers/media/rc/keymaps/rc-kaiomy.c1
-rw-r--r--drivers/media/rc/keymaps/rc-kworld-315u.c1
-rw-r--r--drivers/media/rc/keymaps/rc-kworld-plus-tv-analog.c1
-rw-r--r--drivers/media/rc/keymaps/rc-leadtek-y04g0051.c1
-rw-r--r--drivers/media/rc/keymaps/rc-lirc.c1
-rw-r--r--drivers/media/rc/keymaps/rc-lme2510.c1
-rw-r--r--drivers/media/rc/keymaps/rc-manli.c1
-rw-r--r--drivers/media/rc/keymaps/rc-medion-x10.c117
-rw-r--r--drivers/media/rc/keymaps/rc-msi-digivox-ii.c1
-rw-r--r--drivers/media/rc/keymaps/rc-msi-digivox-iii.c1
-rw-r--r--drivers/media/rc/keymaps/rc-msi-tvanywhere-plus.c1
-rw-r--r--drivers/media/rc/keymaps/rc-msi-tvanywhere.c1
-rw-r--r--drivers/media/rc/keymaps/rc-nebula.c1
-rw-r--r--drivers/media/rc/keymaps/rc-nec-terratec-cinergy-xs.c1
-rw-r--r--drivers/media/rc/keymaps/rc-norwood.c1
-rw-r--r--drivers/media/rc/keymaps/rc-npgtech.c1
-rw-r--r--drivers/media/rc/keymaps/rc-pctv-sedna.c1
-rw-r--r--drivers/media/rc/keymaps/rc-pinnacle-color.c1
-rw-r--r--drivers/media/rc/keymaps/rc-pinnacle-grey.c1
-rw-r--r--drivers/media/rc/keymaps/rc-pinnacle-pctv-hd.c2
-rw-r--r--drivers/media/rc/keymaps/rc-pixelview-002t.c1
-rw-r--r--drivers/media/rc/keymaps/rc-pixelview-mk12.c1
-rw-r--r--drivers/media/rc/keymaps/rc-pixelview-new.c1
-rw-r--r--drivers/media/rc/keymaps/rc-pixelview.c1
-rw-r--r--drivers/media/rc/keymaps/rc-powercolor-real-angel.c1
-rw-r--r--drivers/media/rc/keymaps/rc-proteus-2309.c1
-rw-r--r--drivers/media/rc/keymaps/rc-purpletv.c1
-rw-r--r--drivers/media/rc/keymaps/rc-pv951.c1
-rw-r--r--drivers/media/rc/keymaps/rc-rc6-mce.c1
-rw-r--r--drivers/media/rc/keymaps/rc-real-audio-220-32-keys.c1
-rw-r--r--drivers/media/rc/keymaps/rc-snapstream-firefly.c107
-rw-r--r--drivers/media/rc/keymaps/rc-streamzap.c1
-rw-r--r--drivers/media/rc/keymaps/rc-tbs-nec.c1
-rw-r--r--drivers/media/rc/keymaps/rc-technisat-usb2.c1
-rw-r--r--drivers/media/rc/keymaps/rc-terratec-cinergy-xs.c1
-rw-r--r--drivers/media/rc/keymaps/rc-terratec-slim-2.c1
-rw-r--r--drivers/media/rc/keymaps/rc-terratec-slim.c1
-rw-r--r--drivers/media/rc/keymaps/rc-tevii-nec.c1
-rw-r--r--drivers/media/rc/keymaps/rc-tivo.c1
-rw-r--r--drivers/media/rc/keymaps/rc-total-media-in-hand.c1
-rw-r--r--drivers/media/rc/keymaps/rc-trekstor.c1
-rw-r--r--drivers/media/rc/keymaps/rc-tt-1500.c1
-rw-r--r--drivers/media/rc/keymaps/rc-twinhan1027.c1
-rw-r--r--drivers/media/rc/keymaps/rc-videomate-m1f.c1
-rw-r--r--drivers/media/rc/keymaps/rc-videomate-s350.c1
-rw-r--r--drivers/media/rc/keymaps/rc-videomate-tv-pvr.c1
-rw-r--r--drivers/media/rc/keymaps/rc-winfast-usbii-deluxe.c1
-rw-r--r--drivers/media/rc/keymaps/rc-winfast.c1
-rw-r--r--drivers/media/rc/mceusb.c410
-rw-r--r--drivers/media/rc/rc-core-priv.h14
-rw-r--r--drivers/media/rc/rc-main.c30
-rw-r--r--drivers/media/rc/redrat3.c7
-rw-r--r--drivers/media/rc/winbond-cir.c6
-rw-r--r--drivers/media/video/Kconfig61
-rw-r--r--drivers/media/video/Makefile11
-rw-r--r--drivers/media/video/adp1653.c21
-rw-r--r--drivers/media/video/adv7175.c62
-rw-r--r--drivers/media/video/ak881x.c1
-rw-r--r--drivers/media/video/atmel-isi.c164
-rw-r--r--drivers/media/video/au0828/Makefile8
-rw-r--r--drivers/media/video/bt819.c2
-rw-r--r--drivers/media/video/bt8xx/Makefile6
-rw-r--r--drivers/media/video/bt8xx/bttv-cards.c242
-rw-r--r--drivers/media/video/bt8xx/bttv-driver.c294
-rw-r--r--drivers/media/video/bt8xx/bttv-gpio.c4
-rw-r--r--drivers/media/video/bt8xx/bttv-i2c.c56
-rw-r--r--drivers/media/video/bt8xx/bttv-input.c37
-rw-r--r--drivers/media/video/bt8xx/bttv-risc.c25
-rw-r--r--drivers/media/video/bt8xx/bttv-vbi.c9
-rw-r--r--drivers/media/video/bt8xx/bttvp.h18
-rw-r--r--drivers/media/video/cpia2/cpia2_usb.c1
-rw-r--r--drivers/media/video/cx18/Makefile6
-rw-r--r--drivers/media/video/cx18/cx18-driver.c2
-rw-r--r--drivers/media/video/cx18/cx18-driver.h5
-rw-r--r--drivers/media/video/cx18/cx18-fileops.c2
-rw-r--r--drivers/media/video/cx18/cx18-ioctl.c18
-rw-r--r--drivers/media/video/cx18/cx18-mailbox.c2
-rw-r--r--drivers/media/video/cx18/cx18-streams.c13
-rw-r--r--drivers/media/video/cx231xx/Makefile10
-rw-r--r--drivers/media/video/cx23885/Kconfig2
-rw-r--r--drivers/media/video/cx23885/Makefile12
-rw-r--r--drivers/media/video/cx23885/cx23885-alsa.c535
-rw-r--r--drivers/media/video/cx23885/cx23885-cards.c55
-rw-r--r--drivers/media/video/cx23885/cx23885-core.c99
-rw-r--r--drivers/media/video/cx23885/cx23885-dvb.c2
-rw-r--r--drivers/media/video/cx23885/cx23885-i2c.c1
-rw-r--r--drivers/media/video/cx23885/cx23885-reg.h3
-rw-r--r--drivers/media/video/cx23885/cx23885-vbi.c72
-rw-r--r--drivers/media/video/cx23885/cx23885-video.c358
-rw-r--r--drivers/media/video/cx23885/cx23885.h56
-rw-r--r--drivers/media/video/cx23885/cx23888-ir.c12
-rw-r--r--drivers/media/video/cx25821/Kconfig (renamed from drivers/staging/cx25821/Kconfig)0
-rw-r--r--drivers/media/video/cx25821/Makefile (renamed from drivers/staging/cx25821/Makefile)0
-rw-r--r--drivers/media/video/cx25821/cx25821-alsa.c (renamed from drivers/staging/cx25821/cx25821-alsa.c)0
-rw-r--r--drivers/media/video/cx25821/cx25821-audio-upstream.c (renamed from drivers/staging/cx25821/cx25821-audio-upstream.c)0
-rw-r--r--drivers/media/video/cx25821/cx25821-audio-upstream.h (renamed from drivers/staging/cx25821/cx25821-audio-upstream.h)0
-rw-r--r--drivers/media/video/cx25821/cx25821-audio.h (renamed from drivers/staging/cx25821/cx25821-audio.h)0
-rw-r--r--drivers/media/video/cx25821/cx25821-biffuncs.h (renamed from drivers/staging/cx25821/cx25821-biffuncs.h)0
-rw-r--r--drivers/media/video/cx25821/cx25821-cards.c (renamed from drivers/staging/cx25821/cx25821-cards.c)0
-rw-r--r--drivers/media/video/cx25821/cx25821-core.c (renamed from drivers/staging/cx25821/cx25821-core.c)0
-rw-r--r--drivers/media/video/cx25821/cx25821-gpio.c (renamed from drivers/staging/cx25821/cx25821-gpio.c)0
-rw-r--r--drivers/media/video/cx25821/cx25821-i2c.c (renamed from drivers/staging/cx25821/cx25821-i2c.c)0
-rw-r--r--drivers/media/video/cx25821/cx25821-medusa-defines.h (renamed from drivers/staging/cx25821/cx25821-medusa-defines.h)0
-rw-r--r--drivers/media/video/cx25821/cx25821-medusa-reg.h (renamed from drivers/staging/cx25821/cx25821-medusa-reg.h)0
-rw-r--r--drivers/media/video/cx25821/cx25821-medusa-video.c (renamed from drivers/staging/cx25821/cx25821-medusa-video.c)0
-rw-r--r--drivers/media/video/cx25821/cx25821-medusa-video.h (renamed from drivers/staging/cx25821/cx25821-medusa-video.h)0
-rw-r--r--drivers/media/video/cx25821/cx25821-reg.h (renamed from drivers/staging/cx25821/cx25821-reg.h)0
-rw-r--r--drivers/media/video/cx25821/cx25821-sram.h (renamed from drivers/staging/cx25821/cx25821-sram.h)0
-rw-r--r--drivers/media/video/cx25821/cx25821-video-upstream-ch2.c (renamed from drivers/staging/cx25821/cx25821-video-upstream-ch2.c)0
-rw-r--r--drivers/media/video/cx25821/cx25821-video-upstream-ch2.h (renamed from drivers/staging/cx25821/cx25821-video-upstream-ch2.h)0
-rw-r--r--drivers/media/video/cx25821/cx25821-video-upstream.c (renamed from drivers/staging/cx25821/cx25821-video-upstream.c)0
-rw-r--r--drivers/media/video/cx25821/cx25821-video-upstream.h (renamed from drivers/staging/cx25821/cx25821-video-upstream.h)0
-rw-r--r--drivers/media/video/cx25821/cx25821-video.c (renamed from drivers/staging/cx25821/cx25821-video.c)2
-rw-r--r--drivers/media/video/cx25821/cx25821-video.h (renamed from drivers/staging/cx25821/cx25821-video.h)0
-rw-r--r--drivers/media/video/cx25821/cx25821.h (renamed from drivers/staging/cx25821/cx25821.h)3
-rw-r--r--drivers/media/video/cx25840/Makefile2
-rw-r--r--drivers/media/video/cx25840/cx25840-audio.c10
-rw-r--r--drivers/media/video/cx25840/cx25840-core.c19
-rw-r--r--drivers/media/video/cx25840/cx25840-ir.c13
-rw-r--r--drivers/media/video/cx88/Makefile8
-rw-r--r--drivers/media/video/cx88/cx88-core.c3
-rw-r--r--drivers/media/video/cx88/cx88-video.c2
-rw-r--r--drivers/media/video/davinci/dm355_ccdc.c1
-rw-r--r--drivers/media/video/davinci/dm644x_ccdc.c1
-rw-r--r--drivers/media/video/davinci/vpbe_display.c1
-rw-r--r--drivers/media/video/davinci/vpbe_osd.c2
-rw-r--r--drivers/media/video/em28xx/Kconfig2
-rw-r--r--drivers/media/video/em28xx/Makefile8
-rw-r--r--drivers/media/video/em28xx/em28xx-cards.c157
-rw-r--r--drivers/media/video/em28xx/em28xx-core.c45
-rw-r--r--drivers/media/video/em28xx/em28xx-dvb.c117
-rw-r--r--drivers/media/video/em28xx/em28xx-input.c6
-rw-r--r--drivers/media/video/em28xx/em28xx-video.c58
-rw-r--r--drivers/media/video/em28xx/em28xx.h3
-rw-r--r--drivers/media/video/et61x251/et61x251.h66
-rw-r--r--drivers/media/video/et61x251/et61x251_core.c2
-rw-r--r--drivers/media/video/et61x251/et61x251_tas5130d1b.c2
-rw-r--r--drivers/media/video/gspca/Kconfig10
-rw-r--r--drivers/media/video/gspca/Makefile2
-rw-r--r--drivers/media/video/gspca/benq.c31
-rw-r--r--drivers/media/video/gspca/conex.c6
-rw-r--r--drivers/media/video/gspca/cpia1.c7
-rw-r--r--drivers/media/video/gspca/etoms.c6
-rw-r--r--drivers/media/video/gspca/finepix.c8
-rw-r--r--drivers/media/video/gspca/gl860/Makefile2
-rw-r--r--drivers/media/video/gspca/gl860/gl860.c8
-rw-r--r--drivers/media/video/gspca/gspca.c287
-rw-r--r--drivers/media/video/gspca/gspca.h22
-rw-r--r--drivers/media/video/gspca/jeilinj.c20
-rw-r--r--drivers/media/video/gspca/kinect.c41
-rw-r--r--drivers/media/video/gspca/konica.c16
-rw-r--r--drivers/media/video/gspca/m5602/Makefile2
-rw-r--r--drivers/media/video/gspca/m5602/m5602_core.c9
-rw-r--r--drivers/media/video/gspca/m5602/m5602_mt9m111.c28
-rw-r--r--drivers/media/video/gspca/m5602/m5602_ov7660.c21
-rw-r--r--drivers/media/video/gspca/m5602/m5602_ov9650.c19
-rw-r--r--drivers/media/video/gspca/m5602/m5602_po1030.c21
-rw-r--r--drivers/media/video/gspca/m5602/m5602_s5k4aa.c35
-rw-r--r--drivers/media/video/gspca/m5602/m5602_s5k83a.c30
-rw-r--r--drivers/media/video/gspca/mars.c6
-rw-r--r--drivers/media/video/gspca/mr97310a.c24
-rw-r--r--drivers/media/video/gspca/nw80x.c9
-rw-r--r--drivers/media/video/gspca/ov519.c41
-rw-r--r--drivers/media/video/gspca/ov534.c12
-rw-r--r--drivers/media/video/gspca/ov534_9.c516
-rw-r--r--drivers/media/video/gspca/pac207.c14
-rw-r--r--drivers/media/video/gspca/pac7302.c15
-rw-r--r--drivers/media/video/gspca/pac7311.c15
-rw-r--r--drivers/media/video/gspca/se401.c46
-rw-r--r--drivers/media/video/gspca/sn9c2028.c14
-rw-r--r--drivers/media/video/gspca/sn9c20x.c76
-rw-r--r--drivers/media/video/gspca/sonixj.c45
-rw-r--r--drivers/media/video/gspca/spca1528.c34
-rw-r--r--drivers/media/video/gspca/spca500.c6
-rw-r--r--drivers/media/video/gspca/spca501.c4
-rw-r--r--drivers/media/video/gspca/spca505.c8
-rw-r--r--drivers/media/video/gspca/spca508.c6
-rw-r--r--drivers/media/video/gspca/spca561.c4
-rw-r--r--drivers/media/video/gspca/sq905.c17
-rw-r--r--drivers/media/video/gspca/sq905c.c10
-rw-r--r--drivers/media/video/gspca/sq930x.c21
-rw-r--r--drivers/media/video/gspca/stk014.c16
-rw-r--r--drivers/media/video/gspca/stv0680.c6
-rw-r--r--drivers/media/video/gspca/stv06xx/Makefile2
-rw-r--r--drivers/media/video/gspca/stv06xx/stv06xx.c18
-rw-r--r--drivers/media/video/gspca/stv06xx/stv06xx.h6
-rw-r--r--drivers/media/video/gspca/stv06xx/stv06xx_hdcs.c10
-rw-r--r--drivers/media/video/gspca/stv06xx/stv06xx_pb0100.c4
-rw-r--r--drivers/media/video/gspca/stv06xx/stv06xx_st6422.c4
-rw-r--r--drivers/media/video/gspca/stv06xx/stv06xx_vv6410.c32
-rw-r--r--drivers/media/video/gspca/stv06xx/stv06xx_vv6410.h56
-rw-r--r--drivers/media/video/gspca/sunplus.c10
-rw-r--r--drivers/media/video/gspca/t613.c12
-rw-r--r--drivers/media/video/gspca/topro.c4989
-rw-r--r--drivers/media/video/gspca/vc032x.c13
-rw-r--r--drivers/media/video/gspca/vicam.c12
-rw-r--r--drivers/media/video/gspca/w996Xcf.c8
-rw-r--r--drivers/media/video/gspca/xirlink_cit.c14
-rw-r--r--drivers/media/video/gspca/zc3xx.c15
-rw-r--r--drivers/media/video/hdpvr/Makefile4
-rw-r--r--drivers/media/video/hdpvr/hdpvr-i2c.c1
-rw-r--r--drivers/media/video/hexium_gemini.c45
-rw-r--r--drivers/media/video/hexium_orion.c39
-rw-r--r--drivers/media/video/imx074.c55
-rw-r--r--drivers/media/video/ivtv/Makefile8
-rw-r--r--drivers/media/video/ivtv/ivtv-driver.c2
-rw-r--r--drivers/media/video/ivtv/ivtv-ioctl.c15
-rw-r--r--drivers/media/video/m5mols/m5mols_core.c7
-rw-r--r--drivers/media/video/marvell-ccic/mcam-core.c15
-rw-r--r--drivers/media/video/marvell-ccic/mmp-driver.c1
-rw-r--r--drivers/media/video/mem2mem_testdev.c21
-rw-r--r--drivers/media/video/msp3400-driver.c20
-rw-r--r--drivers/media/video/msp3400-driver.h2
-rw-r--r--drivers/media/video/msp3400-kthreads.c86
-rw-r--r--drivers/media/video/mt9m001.c329
-rw-r--r--drivers/media/video/mt9m111.c266
-rw-r--r--drivers/media/video/mt9p031.c964
-rw-r--r--drivers/media/video/mt9t001.c836
-rw-r--r--drivers/media/video/mt9t031.c348
-rw-r--r--drivers/media/video/mt9t112.c269
-rw-r--r--drivers/media/video/mt9v011.c1
-rw-r--r--drivers/media/video/mt9v022.c448
-rw-r--r--drivers/media/video/mt9v032.c1
-rw-r--r--drivers/media/video/mx1_camera.c71
-rw-r--r--drivers/media/video/mx2_camera.c78
-rw-r--r--drivers/media/video/mx3_camera.c361
-rw-r--r--drivers/media/video/mxb.c81
-rw-r--r--drivers/media/video/noon010pc30.c264
-rw-r--r--drivers/media/video/omap/omap_vout.c28
-rw-r--r--drivers/media/video/omap1_camera.c62
-rw-r--r--drivers/media/video/omap24xxcam.c1
-rw-r--r--drivers/media/video/omap3isp/Makefile4
-rw-r--r--drivers/media/video/omap3isp/isp.c54
-rw-r--r--drivers/media/video/omap3isp/isp.h90
-rw-r--r--drivers/media/video/omap3isp/ispccdc.c122
-rw-r--r--drivers/media/video/omap3isp/ispccp2.c129
-rw-r--r--drivers/media/video/omap3isp/ispcsi2.c91
-rw-r--r--drivers/media/video/omap3isp/isph3a_aewb.c2
-rw-r--r--drivers/media/video/omap3isp/isph3a_af.c2
-rw-r--r--drivers/media/video/omap3isp/isphist.c2
-rw-r--r--drivers/media/video/omap3isp/isppreview.c419
-rw-r--r--drivers/media/video/omap3isp/isppreview.h9
-rw-r--r--drivers/media/video/omap3isp/ispqueue.c4
-rw-r--r--drivers/media/video/omap3isp/ispreg.h3
-rw-r--r--drivers/media/video/omap3isp/ispresizer.c104
-rw-r--r--drivers/media/video/omap3isp/ispstat.c63
-rw-r--r--drivers/media/video/omap3isp/ispstat.h2
-rw-r--r--drivers/media/video/omap3isp/ispvideo.c37
-rw-r--r--drivers/media/video/omap3isp/ispvideo.h1
-rw-r--r--drivers/media/video/ov2640.c178
-rw-r--r--drivers/media/video/ov5642.c288
-rw-r--r--drivers/media/video/ov6650.c505
-rw-r--r--drivers/media/video/ov772x.c198
-rw-r--r--drivers/media/video/ov9640.c186
-rw-r--r--drivers/media/video/ov9640.h4
-rw-r--r--drivers/media/video/ov9740.c151
-rw-r--r--drivers/media/video/pvrusb2/Makefile8
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-devattr.c1
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-hdw.c8
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-hdw.h3
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-i2c-core.c1
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-v4l2.c9
-rw-r--r--drivers/media/video/pwc/pwc-if.c8
-rw-r--r--drivers/media/video/pwc/pwc-v4l.c136
-rw-r--r--drivers/media/video/pxa_camera.c140
-rw-r--r--drivers/media/video/rj54n1cb0c.c224
-rw-r--r--drivers/media/video/s5k6aa.c1681
-rw-r--r--drivers/media/video/s5p-fimc/Makefile2
-rw-r--r--drivers/media/video/s5p-fimc/fimc-capture.c1466
-rw-r--r--drivers/media/video/s5p-fimc/fimc-core.c1138
-rw-r--r--drivers/media/video/s5p-fimc/fimc-core.h221
-rw-r--r--drivers/media/video/s5p-fimc/fimc-mdevice.c858
-rw-r--r--drivers/media/video/s5p-fimc/fimc-mdevice.h118
-rw-r--r--drivers/media/video/s5p-fimc/fimc-reg.c90
-rw-r--r--drivers/media/video/s5p-fimc/mipi-csis.c90
-rw-r--r--drivers/media/video/s5p-fimc/regs-fimc.h9
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc.c15
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_ctrl.c4
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_dec.c27
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_enc.c43
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_opr.c14
-rw-r--r--drivers/media/video/s5p-tv/Kconfig2
-rw-r--r--drivers/media/video/s5p-tv/hdmi_drv.c15
-rw-r--r--drivers/media/video/s5p-tv/mixer.h2
-rw-r--r--drivers/media/video/s5p-tv/mixer_grp_layer.c2
-rw-r--r--drivers/media/video/s5p-tv/mixer_reg.c11
-rw-r--r--drivers/media/video/s5p-tv/mixer_video.c26
-rw-r--r--drivers/media/video/s5p-tv/mixer_vp_layer.c4
-rw-r--r--drivers/media/video/s5p-tv/regs-hdmi.h4
-rw-r--r--drivers/media/video/s5p-tv/regs-mixer.h1
-rw-r--r--drivers/media/video/s5p-tv/sdo_drv.c1
-rw-r--r--drivers/media/video/saa7115.c53
-rw-r--r--drivers/media/video/saa7134/Makefile8
-rw-r--r--drivers/media/video/saa7134/saa7134.h12
-rw-r--r--drivers/media/video/saa7164/Makefile10
-rw-r--r--drivers/media/video/saa7164/saa7164-cards.c128
-rw-r--r--drivers/media/video/saa7164/saa7164-dvb.c2
-rw-r--r--drivers/media/video/saa7164/saa7164.h2
-rw-r--r--drivers/media/video/sh_mobile_ceu_camera.c497
-rw-r--r--drivers/media/video/sh_mobile_csi2.c133
-rw-r--r--drivers/media/video/sh_vou.c1
-rw-r--r--drivers/media/video/soc_camera.c273
-rw-r--r--drivers/media/video/soc_camera_platform.c45
-rw-r--r--drivers/media/video/soc_mediabus.c33
-rw-r--r--drivers/media/video/sr030pc30.c7
-rw-r--r--drivers/media/video/stk-webcam.c29
-rw-r--r--drivers/media/video/tcm825x.c1
-rw-r--r--drivers/media/video/timblogiw.c1
-rw-r--r--drivers/media/video/tlg2300/Makefile8
-rw-r--r--drivers/media/video/tm6000/Kconfig (renamed from drivers/staging/tm6000/Kconfig)0
-rw-r--r--drivers/media/video/tm6000/Makefile (renamed from drivers/staging/tm6000/Makefile)0
-rw-r--r--drivers/media/video/tm6000/tm6000-alsa.c (renamed from drivers/staging/tm6000/tm6000-alsa.c)9
-rw-r--r--drivers/media/video/tm6000/tm6000-cards.c (renamed from drivers/staging/tm6000/tm6000-cards.c)44
-rw-r--r--drivers/media/video/tm6000/tm6000-core.c (renamed from drivers/staging/tm6000/tm6000-core.c)108
-rw-r--r--drivers/media/video/tm6000/tm6000-dvb.c (renamed from drivers/staging/tm6000/tm6000-dvb.c)18
-rw-r--r--drivers/media/video/tm6000/tm6000-i2c.c (renamed from drivers/staging/tm6000/tm6000-i2c.c)21
-rw-r--r--drivers/media/video/tm6000/tm6000-input.c (renamed from drivers/staging/tm6000/tm6000-input.c)2
-rw-r--r--drivers/media/video/tm6000/tm6000-regs.h (renamed from drivers/staging/tm6000/tm6000-regs.h)6
-rw-r--r--drivers/media/video/tm6000/tm6000-stds.c659
-rw-r--r--drivers/media/video/tm6000/tm6000-usb-isoc.h (renamed from drivers/staging/tm6000/tm6000-usb-isoc.h)2
-rw-r--r--drivers/media/video/tm6000/tm6000-video.c (renamed from drivers/staging/tm6000/tm6000-video.c)122
-rw-r--r--drivers/media/video/tm6000/tm6000.h (renamed from drivers/staging/tm6000/tm6000.h)15
-rw-r--r--drivers/media/video/tvaudio.c9
-rw-r--r--drivers/media/video/tvp514x.c1
-rw-r--r--drivers/media/video/tvp5150.c1
-rw-r--r--drivers/media/video/tvp5150_reg.h17
-rw-r--r--drivers/media/video/tvp7002.c15
-rw-r--r--drivers/media/video/tw9910.c268
-rw-r--r--drivers/media/video/usbvision/Makefile4
-rw-r--r--drivers/media/video/usbvision/usbvision-cards.c1
-rw-r--r--drivers/media/video/uvc/uvc_ctrl.c6
-rw-r--r--drivers/media/video/uvc/uvc_driver.c13
-rw-r--r--drivers/media/video/uvc/uvc_v4l2.c54
-rw-r--r--drivers/media/video/uvc/uvc_video.c17
-rw-r--r--drivers/media/video/uvc/uvcvideo.h104
-rw-r--r--drivers/media/video/v4l2-compat-ioctl32.c76
-rw-r--r--drivers/media/video/v4l2-ctrls.c111
-rw-r--r--drivers/media/video/v4l2-device.c37
-rw-r--r--drivers/media/video/v4l2-event.c11
-rw-r--r--drivers/media/video/v4l2-fh.c1
-rw-r--r--drivers/media/video/v4l2-int-device.c1
-rw-r--r--drivers/media/video/v4l2-ioctl.c559
-rw-r--r--drivers/media/video/v4l2-mem2mem.c18
-rw-r--r--drivers/media/video/v4l2-subdev.c20
-rw-r--r--drivers/media/video/videobuf2-core.c596
-rw-r--r--drivers/media/video/videobuf2-dma-contig.c16
-rw-r--r--drivers/media/video/videobuf2-dma-sg.c6
-rw-r--r--drivers/media/video/videobuf2-memops.c6
-rw-r--r--drivers/media/video/vivi.c27
-rw-r--r--drivers/media/video/vpx3220.c2
-rw-r--r--drivers/media/video/zr364xx.c3
-rw-r--r--drivers/memstick/core/memstick.c1
-rw-r--r--drivers/memstick/core/mspro_block.c1
-rw-r--r--drivers/memstick/host/jmb38x_ms.c1
-rw-r--r--drivers/memstick/host/tifm_ms.c1
-rw-r--r--drivers/message/fusion/mptbase.c92
-rw-r--r--drivers/message/fusion/mptbase.h72
-rw-r--r--drivers/message/fusion/mptsas.c47
-rw-r--r--drivers/message/fusion/mptscsih.c18
-rw-r--r--drivers/message/fusion/mptscsih.h1
-rw-r--r--drivers/message/i2o/pci.c1
-rw-r--r--drivers/mfd/Kconfig64
-rw-r--r--drivers/mfd/Makefile4
-rw-r--r--drivers/mfd/aat2870-core.c2
-rw-r--r--drivers/mfd/ab3100-core.c3
-rw-r--r--drivers/mfd/ab3550-core.c1380
-rw-r--r--drivers/mfd/ab5500-core.c1440
-rw-r--r--drivers/mfd/ab5500-core.h87
-rw-r--r--drivers/mfd/ab5500-debugfs.c807
-rw-r--r--drivers/mfd/ab5500-debugfs.h22
-rw-r--r--drivers/mfd/ab8500-core.c33
-rw-r--r--drivers/mfd/ab8500-debugfs.c1
-rw-r--r--drivers/mfd/ab8500-gpadc.c56
-rw-r--r--drivers/mfd/ab8500-sysctrl.c1
-rw-r--r--drivers/mfd/abx500-core.c1
-rw-r--r--drivers/mfd/asic3.c27
-rw-r--r--drivers/mfd/da903x.c2
-rw-r--r--drivers/mfd/db5500-prcmu.c29
-rw-r--r--drivers/mfd/db8500-prcmu-regs.h166
-rw-r--r--drivers/mfd/db8500-prcmu.c629
-rw-r--r--drivers/mfd/dbx500-prcmu-regs.h (renamed from drivers/mfd/db5500-prcmu-regs.h)105
-rw-r--r--drivers/mfd/dm355evm_msp.c1
-rw-r--r--drivers/mfd/intel_msic.c502
-rw-r--r--drivers/mfd/jz4740-adc.c2
-rw-r--r--drivers/mfd/max8997.c28
-rw-r--r--drivers/mfd/mc13xxx-core.c116
-rw-r--r--drivers/mfd/menelaus.c4
-rw-r--r--drivers/mfd/mfd-core.c1
-rw-r--r--drivers/mfd/pcf50633-core.c114
-rw-r--r--drivers/mfd/pcf50633-irq.c1
-rw-r--r--drivers/mfd/tc3589x.c2
-rw-r--r--drivers/mfd/timberdale.c9
-rw-r--r--drivers/mfd/tmio_core.c1
-rw-r--r--drivers/mfd/tps65912-core.c6
-rw-r--r--drivers/mfd/twl-core.c3
-rw-r--r--drivers/mfd/twl4030-irq.c342
-rw-r--r--drivers/mfd/twl4030-madc.c22
-rw-r--r--drivers/mfd/twl6030-irq.c76
-rw-r--r--drivers/mfd/twl6040-core.c67
-rw-r--r--drivers/mfd/wl1273-core.c1
-rw-r--r--drivers/mfd/wm831x-irq.c24
-rw-r--r--drivers/mfd/wm8400-core.c1
-rw-r--r--drivers/mfd/wm8994-core.c80
-rw-r--r--drivers/misc/Kconfig3
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/ab8500-pwm.c1
-rw-r--r--drivers/misc/ad525x_dpot-i2c.c1
-rw-r--r--drivers/misc/altera-stapl/Kconfig (renamed from drivers/staging/altera-stapl/Kconfig)2
-rw-r--r--drivers/misc/altera-stapl/Makefile3
-rw-r--r--drivers/misc/altera-stapl/altera-comp.c (renamed from drivers/staging/altera-stapl/altera-comp.c)0
-rw-r--r--drivers/misc/altera-stapl/altera-exprt.h (renamed from drivers/staging/altera-stapl/altera-exprt.h)0
-rw-r--r--drivers/misc/altera-stapl/altera-jtag.c (renamed from drivers/staging/altera-stapl/altera-jtag.c)2
-rw-r--r--drivers/misc/altera-stapl/altera-jtag.h (renamed from drivers/staging/altera-stapl/altera-jtag.h)0
-rw-r--r--drivers/misc/altera-stapl/altera-lpt.c (renamed from drivers/staging/altera-stapl/altera-lpt.c)0
-rw-r--r--drivers/misc/altera-stapl/altera.c (renamed from drivers/staging/altera-stapl/altera.c)2
-rw-r--r--drivers/misc/atmel-ssc.c1
-rw-r--r--drivers/misc/atmel_tclib.c1
-rw-r--r--drivers/misc/bh1780gli.c1
-rw-r--r--drivers/misc/fsa9480.c3
-rw-r--r--drivers/misc/kgdbts.c1
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d.c389
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d.h9
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d_i2c.c45
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d_spi.c2
-rw-r--r--drivers/misc/sgi-gru/grukservices.c1
-rw-r--r--drivers/misc/ti-st/st_kim.c1
-rw-r--r--drivers/misc/tifm_7xx1.c1
-rw-r--r--drivers/misc/tifm_core.c1
-rw-r--r--drivers/misc/vmw_balloon.c4
-rw-r--r--drivers/mmc/card/block.c310
-rw-r--r--drivers/mmc/card/mmc_test.c65
-rw-r--r--drivers/mmc/card/queue.c8
-rw-r--r--drivers/mmc/card/sdio_uart.c10
-rw-r--r--drivers/mmc/core/bus.c8
-rw-r--r--drivers/mmc/core/core.c426
-rw-r--r--drivers/mmc/core/core.h1
-rw-r--r--drivers/mmc/core/debugfs.c29
-rw-r--r--drivers/mmc/core/host.c12
-rw-r--r--drivers/mmc/core/mmc.c288
-rw-r--r--drivers/mmc/core/mmc_ops.c40
-rw-r--r--drivers/mmc/core/mmc_ops.h1
-rw-r--r--drivers/mmc/core/quirks.c12
-rw-r--r--drivers/mmc/core/sd.c50
-rw-r--r--drivers/mmc/core/sd_ops.c9
-rw-r--r--drivers/mmc/core/sdio.c11
-rw-r--r--drivers/mmc/core/sdio_bus.c3
-rw-r--r--drivers/mmc/core/sdio_cis.c4
-rw-r--r--drivers/mmc/core/sdio_io.c1
-rw-r--r--drivers/mmc/core/sdio_irq.c7
-rw-r--r--drivers/mmc/core/sdio_ops.c9
-rw-r--r--drivers/mmc/host/Kconfig18
-rw-r--r--drivers/mmc/host/at91_mci.c6
-rw-r--r--drivers/mmc/host/atmel-mci-regs.h220
-rw-r--r--drivers/mmc/host/atmel-mci.c823
-rw-r--r--drivers/mmc/host/au1xmmc.c98
-rw-r--r--drivers/mmc/host/davinci_mmc.c13
-rw-r--r--drivers/mmc/host/dw_mmc.c104
-rw-r--r--drivers/mmc/host/dw_mmc.h15
-rw-r--r--drivers/mmc/host/imxmmc.c2
-rw-r--r--drivers/mmc/host/mmc_spi.c1
-rw-r--r--drivers/mmc/host/mmci.c25
-rw-r--r--drivers/mmc/host/msm_sdcc.c86
-rw-r--r--drivers/mmc/host/msm_sdcc.h6
-rw-r--r--drivers/mmc/host/mvsdio.c14
-rw-r--r--drivers/mmc/host/mxcmmc.c3
-rw-r--r--drivers/mmc/host/mxs-mmc.c1
-rw-r--r--drivers/mmc/host/omap.c2
-rw-r--r--drivers/mmc/host/omap_hsmmc.c32
-rw-r--r--drivers/mmc/host/pxamci.c2
-rw-r--r--drivers/mmc/host/s3cmci.c10
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c88
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c19
-rw-r--r--drivers/mmc/host/sdhci-of-hlwd.c1
-rw-r--r--drivers/mmc/host/sdhci-pci.c268
-rw-r--r--drivers/mmc/host/sdhci-pltfm.c1
-rw-r--r--drivers/mmc/host/sdhci-pxav2.c5
-rw-r--r--drivers/mmc/host/sdhci-pxav3.c1
-rw-r--r--drivers/mmc/host/sdhci-s3c.c34
-rw-r--r--drivers/mmc/host/sdhci-spear.c3
-rw-r--r--drivers/mmc/host/sdhci-tegra.c60
-rw-r--r--drivers/mmc/host/sdhci.c358
-rw-r--r--drivers/mmc/host/sdhci.h7
-rw-r--r--drivers/mmc/host/sdricoh_cs.c1
-rw-r--r--drivers/mmc/host/sh_mmcif.c21
-rw-r--r--drivers/mmc/host/sh_mobile_sdhi.c99
-rw-r--r--drivers/mmc/host/tifm_sd.c19
-rw-r--r--drivers/mmc/host/tmio_mmc.c4
-rw-r--r--drivers/mmc/host/tmio_mmc.h7
-rw-r--r--drivers/mmc/host/tmio_mmc_pio.c151
-rw-r--r--drivers/mmc/host/via-sdmmc.c3
-rw-r--r--drivers/mmc/host/wbsd.c22
-rw-r--r--drivers/mtd/Kconfig21
-rw-r--r--drivers/mtd/Makefile2
-rw-r--r--drivers/mtd/afs.c4
-rw-r--r--drivers/mtd/ar7part.c3
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c31
-rw-r--r--drivers/mtd/chips/fwh_lock.h3
-rw-r--r--drivers/mtd/chips/jedec_probe.c34
-rw-r--r--drivers/mtd/cmdlinepart.c8
-rw-r--r--drivers/mtd/devices/Kconfig13
-rw-r--r--drivers/mtd/devices/Makefile3
-rw-r--r--drivers/mtd/devices/doc2000.c17
-rw-r--r--drivers/mtd/devices/doc2001.c11
-rw-r--r--drivers/mtd/devices/doc2001plus.c11
-rw-r--r--drivers/mtd/devices/docecc.c2
-rw-r--r--drivers/mtd/devices/docg3.c1114
-rw-r--r--drivers/mtd/devices/docg3.h297
-rw-r--r--drivers/mtd/devices/docprobe.c5
-rw-r--r--drivers/mtd/devices/lart.c18
-rw-r--r--drivers/mtd/devices/m25p80.c92
-rw-r--r--drivers/mtd/devices/mtd_dataflash.c91
-rw-r--r--drivers/mtd/devices/sst25l.c42
-rw-r--r--drivers/mtd/ftl.c40
-rw-r--r--drivers/mtd/inftlcore.c69
-rw-r--r--drivers/mtd/inftlmount.c116
-rw-r--r--drivers/mtd/lpddr/lpddr_cmds.c1
-rw-r--r--drivers/mtd/maps/Kconfig26
-rw-r--r--drivers/mtd/maps/Makefile2
-rw-r--r--drivers/mtd/maps/bcm963xx-flash.c1
-rw-r--r--drivers/mtd/maps/bfin-async-flash.c16
-rw-r--r--drivers/mtd/maps/ceiva.c341
-rw-r--r--drivers/mtd/maps/dc21285.c9
-rw-r--r--drivers/mtd/maps/edb7312.c134
-rw-r--r--drivers/mtd/maps/gpio-addr-flash.c16
-rw-r--r--drivers/mtd/maps/h720x-flash.c23
-rw-r--r--drivers/mtd/maps/impa7.c28
-rw-r--r--drivers/mtd/maps/intel_vr_nor.c7
-rw-r--r--drivers/mtd/maps/ixp2000.c11
-rw-r--r--drivers/mtd/maps/ixp4xx.c29
-rw-r--r--drivers/mtd/maps/lantiq-flash.c19
-rw-r--r--drivers/mtd/maps/latch-addr-flash.c24
-rw-r--r--drivers/mtd/maps/pcmciamtd.c124
-rw-r--r--drivers/mtd/maps/physmap.c38
-rw-r--r--drivers/mtd/maps/physmap_of.c80
-rw-r--r--drivers/mtd/maps/plat-ram.c23
-rw-r--r--drivers/mtd/maps/pxa2xx-flash.c20
-rw-r--r--drivers/mtd/maps/rbtx4939-flash.c24
-rw-r--r--drivers/mtd/maps/sa1100-flash.c30
-rw-r--r--drivers/mtd/maps/solutionengine.c30
-rw-r--r--drivers/mtd/maps/wr_sbc82xx_flash.c33
-rw-r--r--drivers/mtd/mtd_blkdevs.c2
-rw-r--r--drivers/mtd/mtdblock.c18
-rw-r--r--drivers/mtd/mtdblock_ro.c1
-rw-r--r--drivers/mtd/mtdchar.c164
-rw-r--r--drivers/mtd/mtdconcat.c10
-rw-r--r--drivers/mtd/mtdcore.c70
-rw-r--r--drivers/mtd/mtdcore.h3
-rw-r--r--drivers/mtd/mtdoops.c2
-rw-r--r--drivers/mtd/mtdpart.c62
-rw-r--r--drivers/mtd/mtdsuper.c21
-rw-r--r--drivers/mtd/mtdswap.c31
-rw-r--r--drivers/mtd/nand/Kconfig31
-rw-r--r--drivers/mtd/nand/Makefile2
-rw-r--r--drivers/mtd/nand/ams-delta.c2
-rw-r--r--drivers/mtd/nand/atmel_nand.c74
-rw-r--r--drivers/mtd/nand/au1550nd.c35
-rw-r--r--drivers/mtd/nand/autcpu12.c4
-rw-r--r--drivers/mtd/nand/bcm_umi_nand.c57
-rw-r--r--drivers/mtd/nand/cafe_nand.c22
-rw-r--r--drivers/mtd/nand/cmx270_nand.c24
-rw-r--r--drivers/mtd/nand/cs553x_nand.c15
-rw-r--r--drivers/mtd/nand/davinci_nand.c39
-rw-r--r--drivers/mtd/nand/denali.c6
-rw-r--r--drivers/mtd/nand/diskonchip.c9
-rw-r--r--drivers/mtd/nand/edb7312.c203
-rw-r--r--drivers/mtd/nand/fsl_elbc_nand.c75
-rw-r--r--drivers/mtd/nand/fsl_upm.c16
-rw-r--r--drivers/mtd/nand/fsmc_nand.c77
-rw-r--r--drivers/mtd/nand/gpmi-nand/Makefile3
-rw-r--r--drivers/mtd/nand/gpmi-nand/bch-regs.h84
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-lib.c1057
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.c1619
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.h273
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-regs.h172
-rw-r--r--drivers/mtd/nand/h1910.c19
-rw-r--r--drivers/mtd/nand/jz4740_nand.c18
-rw-r--r--drivers/mtd/nand/mpc5121_nfc.c22
-rw-r--r--drivers/mtd/nand/mxc_nand.c37
-rw-r--r--drivers/mtd/nand/nand_base.c1109
-rw-r--r--drivers/mtd/nand/nand_bbt.c693
-rw-r--r--drivers/mtd/nand/nand_bch.c4
-rw-r--r--drivers/mtd/nand/nand_ecc.c10
-rw-r--r--drivers/mtd/nand/nandsim.c4
-rw-r--r--drivers/mtd/nand/ndfc.c22
-rw-r--r--drivers/mtd/nand/nomadik_nand.c1
-rw-r--r--drivers/mtd/nand/nuc900_nand.c1
-rw-r--r--drivers/mtd/nand/omap2.c23
-rw-r--r--drivers/mtd/nand/orion_nand.c16
-rw-r--r--drivers/mtd/nand/pasemi_nand.c3
-rw-r--r--drivers/mtd/nand/plat_nand.c25
-rw-r--r--drivers/mtd/nand/ppchameleonevb.c47
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c471
-rw-r--r--drivers/mtd/nand/r852.c6
-rw-r--r--drivers/mtd/nand/rtc_from4.c5
-rw-r--r--drivers/mtd/nand/s3c2410.c27
-rw-r--r--drivers/mtd/nand/sharpsl.c13
-rw-r--r--drivers/mtd/nand/sm_common.c3
-rw-r--r--drivers/mtd/nand/socrates_nand.c28
-rw-r--r--drivers/mtd/nand/tmio_nand.c17
-rw-r--r--drivers/mtd/nand/txx9ndfmc.c8
-rw-r--r--drivers/mtd/nftlcore.c37
-rw-r--r--drivers/mtd/nftlmount.c26
-rw-r--r--drivers/mtd/ofpart.c112
-rw-r--r--drivers/mtd/onenand/generic.c14
-rw-r--r--drivers/mtd/onenand/omap2.c18
-rw-r--r--drivers/mtd/onenand/onenand_base.c114
-rw-r--r--drivers/mtd/onenand/onenand_bbt.c9
-rw-r--r--drivers/mtd/onenand/samsung.c13
-rw-r--r--drivers/mtd/redboot.c17
-rw-r--r--drivers/mtd/rfd_ftl.c1
-rw-r--r--drivers/mtd/sm_ftl.c26
-rw-r--r--drivers/mtd/ssfdc.c46
-rw-r--r--drivers/mtd/tests/mtd_oobtest.c33
-rw-r--r--drivers/mtd/tests/mtd_pagetest.c37
-rw-r--r--drivers/mtd/tests/mtd_readtest.c13
-rw-r--r--drivers/mtd/tests/mtd_speedtest.c17
-rw-r--r--drivers/mtd/tests/mtd_stresstest.c11
-rw-r--r--drivers/mtd/tests/mtd_subpagetest.c17
-rw-r--r--drivers/mtd/tests/mtd_torturetest.c11
-rw-r--r--drivers/mtd/ubi/eba.c2
-rw-r--r--drivers/mtd/ubi/io.c24
-rw-r--r--drivers/mtd/ubi/kapi.c2
-rw-r--r--drivers/mtd/ubi/misc.c2
-rw-r--r--drivers/mtd/ubi/scan.c4
-rw-r--r--drivers/mtd/ubi/vmt.c1
-rw-r--r--drivers/mtd/ubi/vtbl.c2
-rw-r--r--drivers/net/bonding/bond_main.c37
-rw-r--r--drivers/net/bonding/bond_procfs.c13
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c48
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.h2
-rw-r--r--drivers/net/ethernet/apple/Kconfig12
-rw-r--r--drivers/net/ethernet/apple/Makefile1
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c195
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h21
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c1
-rw-r--r--drivers/net/ethernet/cadence/at91_ether.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/l2t.c1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.c7
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c1
-rw-r--r--drivers/net/ethernet/cirrus/Kconfig14
-rw-r--r--drivers/net/ethernet/cirrus/Makefile1
-rw-r--r--drivers/net/ethernet/cirrus/mac89x0.c (renamed from drivers/net/ethernet/apple/mac89x0.c)0
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c12
-rw-r--r--drivers/net/ethernet/emulex/benet/be_hw.h2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c67
-rw-r--r--drivers/net/ethernet/ethoc.c1
-rw-r--r--drivers/net/ethernet/freescale/Kconfig3
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c1
-rw-r--r--drivers/net/ethernet/i825xx/3c505.c6
-rw-r--r--drivers/net/ethernet/intel/Kconfig6
-rw-r--r--drivers/net/ethernet/intel/e100.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c3
-rw-r--r--drivers/net/ethernet/intel/e1000e/param.c1
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c20
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c48
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h4
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c2
-rw-r--r--drivers/net/ethernet/marvell/sky2.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/alloc.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/catas.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cq.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/intf.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/pd.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c73
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/srq.c21
-rw-r--r--drivers/net/ethernet/natsemi/Kconfig5
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c1
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c88
-rw-r--r--drivers/net/ethernet/octeon/octeon_mgmt.c1
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c1
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c1
-rw-r--r--drivers/net/ethernet/realtek/Kconfig12
-rw-r--r--drivers/net/ethernet/realtek/Makefile1
-rw-r--r--drivers/net/ethernet/sfc/efx.c10
-rw-r--r--drivers/net/ethernet/sfc/efx.h4
-rw-r--r--drivers/net/ethernet/sfc/falcon.c3
-rw-r--r--drivers/net/ethernet/sfc/falcon_boards.c3
-rw-r--r--drivers/net/ethernet/sfc/rx.c1
-rw-r--r--drivers/net/ethernet/silan/Kconfig33
-rw-r--r--drivers/net/ethernet/silan/Makefile5
-rw-r--r--drivers/net/ethernet/silan/sc92031.c (renamed from drivers/net/ethernet/realtek/sc92031.c)0
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig9
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c3
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.h6
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c4
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c1
-rw-r--r--drivers/net/irda/Kconfig2
-rw-r--r--drivers/net/macvlan.c7
-rw-r--r--drivers/net/phy/realtek.c1
-rw-r--r--drivers/net/rionet.c4
-rw-r--r--drivers/net/usb/lg-vl600.c1
-rw-r--r--drivers/net/usb/usbnet.c3
-rw-r--r--drivers/net/veth.c1
-rw-r--r--drivers/net/virtio_net.c14
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c1
-rw-r--r--drivers/net/wimax/i2400m/control.c2
-rw-r--r--drivers/net/wimax/i2400m/debugfs.c1
-rw-r--r--drivers/net/wimax/i2400m/fw.c1
-rw-r--r--drivers/net/wimax/i2400m/netdev.c1
-rw-r--r--drivers/net/wimax/i2400m/rx.c2
-rw-r--r--drivers/net/wimax/i2400m/sdio.c1
-rw-r--r--drivers/net/wimax/i2400m/tx.c1
-rw-r--r--drivers/net/wimax/i2400m/usb.c1
-rw-r--r--drivers/net/wireless/Makefile2
-rw-r--r--drivers/net/wireless/adm8211.c1
-rw-r--r--drivers/net/wireless/at76c50x-usb.c2
-rw-r--r--drivers/net/wireless/ath/Kconfig2
-rw-r--r--drivers/net/wireless/ath/ath.h11
-rw-r--r--drivers/net/wireless/ath/ath5k/ahb.c4
-rw-r--r--drivers/net/wireless/ath/ath5k/ani.c91
-rw-r--r--drivers/net/wireless/ath/ath5k/ani.h32
-rw-r--r--drivers/net/wireless/ath/ath5k/ath5k.h569
-rw-r--r--drivers/net/wireless/ath/ath5k/attach.c16
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c287
-rw-r--r--drivers/net/wireless/ath/ath5k/caps.c27
-rw-r--r--drivers/net/wireless/ath/ath5k/debug.c3
-rw-r--r--drivers/net/wireless/ath/ath5k/debug.h4
-rw-r--r--drivers/net/wireless/ath/ath5k/desc.c217
-rw-r--r--drivers/net/wireless/ath/ath5k/desc.h124
-rw-r--r--drivers/net/wireless/ath/ath5k/dma.c370
-rw-r--r--drivers/net/wireless/ath/ath5k/gpio.c81
-rw-r--r--drivers/net/wireless/ath/ath5k/initvals.c75
-rw-r--r--drivers/net/wireless/ath/ath5k/pci.c3
-rw-r--r--drivers/net/wireless/ath/ath5k/pcu.c222
-rw-r--r--drivers/net/wireless/ath/ath5k/phy.c853
-rw-r--r--drivers/net/wireless/ath/ath5k/qcu.c143
-rw-r--r--drivers/net/wireless/ath/ath5k/reg.h27
-rw-r--r--drivers/net/wireless/ath/ath5k/reset.c230
-rw-r--r--drivers/net/wireless/ath/ath5k/rfbuffer.h59
-rw-r--r--drivers/net/wireless/ath/ath5k/rfgain.h22
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/debug.c1
-rw-r--r--drivers/net/wireless/ath/ath6kl/debug.h4
-rw-r--r--drivers/net/wireless/ath/ath6kl/init.c1
-rw-r--r--drivers/net/wireless/ath/ath6kl/sdio.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/Kconfig22
-rw-r--r--drivers/net/wireless/ath/ath9k/Makefile5
-rw-r--r--drivers/net/wireless/ath/ath9k/ahb.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_calib.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_hw.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_mac.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_calib.c47
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mac.c40
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mci.c1464
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mci.h102
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_paprd.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h42
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h12
-rw-r--r--drivers/net/wireless/ath/ath9k/btcoex.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/btcoex.h31
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs.c215
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs.h43
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs_debug.c81
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs_debug.h57
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_4k.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_9287.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_def.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/hw-ops.h9
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c221
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h213
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c53
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c18
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c220
-rw-r--r--drivers/net/wireless/ath/ath9k/mci.c427
-rw-r--r--drivers/net/wireless/ath/ath9k/mci.h24
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c22
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c35
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h306
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c251
-rw-r--r--drivers/net/wireless/ath/carl9170/fw.c1
-rw-r--r--drivers/net/wireless/ath/carl9170/tx.c4
-rw-r--r--drivers/net/wireless/ath/debug.c1
-rw-r--r--drivers/net/wireless/ath/hw.c1
-rw-r--r--drivers/net/wireless/ath/key.c1
-rw-r--r--drivers/net/wireless/ath/regd.c78
-rw-r--r--drivers/net/wireless/b43/b43.h12
-rw-r--r--drivers/net/wireless/b43/main.c2
-rw-r--r--drivers/net/wireless/b43/pcmcia.c1
-rw-r--r--drivers/net/wireless/b43/phy_n.c312
-rw-r--r--drivers/net/wireless/b43/phy_n.h8
-rw-r--r--drivers/net/wireless/b43/radio_2056.c25
-rw-r--r--drivers/net/wireless/b43/radio_2056.h1
-rw-r--r--drivers/net/wireless/b43/tables_nphy.c77
-rw-r--r--drivers/net/wireless/b43/tables_nphy.h31
-rw-r--r--drivers/net/wireless/b43legacy/b43legacy.h16
-rw-r--r--drivers/net/wireless/b43legacy/main.c2
-rw-r--r--drivers/net/wireless/brcm80211/Kconfig3
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmchip.h25
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c146
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c214
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd.h33
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h19
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c12
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c15
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c85
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c322
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c74
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h31
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c9
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/aiutils.c1239
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/aiutils.h148
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/ampdu.c7
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/d11.h3
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/dma.c289
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/dma.h9
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c208
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h2
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/main.c749
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/main.h10
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/nicpci.c241
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/nicpci.h11
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/otp.c76
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy/phy_cmn.c231
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy/phy_hal.h4
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h6
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c69
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c84
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/pmu.c270
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/pmu.h5
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/pub.h6
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/srom.c50
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/srom.h2
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/types.h54
-rw-r--r--drivers/net/wireless/brcm80211/include/chipcommon.h2
-rw-r--r--drivers/net/wireless/hostap/hostap_80211_rx.c1
-rw-r--r--drivers/net/wireless/hostap/hostap_80211_tx.c1
-rw-r--r--drivers/net/wireless/hostap/hostap_ap.c2
-rw-r--r--drivers/net/wireless/hostap/hostap_cs.c3
-rw-r--r--drivers/net/wireless/hostap/hostap_info.c1
-rw-r--r--drivers/net/wireless/hostap/hostap_ioctl.c1
-rw-r--r--drivers/net/wireless/hostap/hostap_proc.c1
-rw-r--r--drivers/net/wireless/iwlegacy/3945-debug.c505
-rw-r--r--drivers/net/wireless/iwlegacy/3945-mac.c3977
-rw-r--r--drivers/net/wireless/iwlegacy/3945-rs.c995
-rw-r--r--drivers/net/wireless/iwlegacy/3945.c2751
-rw-r--r--drivers/net/wireless/iwlegacy/3945.h626
-rw-r--r--drivers/net/wireless/iwlegacy/4965-calib.c (renamed from drivers/net/wireless/iwlegacy/iwl-4965-calib.c)613
-rw-r--r--drivers/net/wireless/iwlegacy/4965-debug.c746
-rw-r--r--drivers/net/wireless/iwlegacy/4965-mac.c6536
-rw-r--r--drivers/net/wireless/iwlegacy/4965-rs.c2860
-rw-r--r--drivers/net/wireless/iwlegacy/4965.c2421
-rw-r--r--drivers/net/wireless/iwlegacy/4965.h1309
-rw-r--r--drivers/net/wireless/iwlegacy/Kconfig43
-rw-r--r--drivers/net/wireless/iwlegacy/Makefile24
-rw-r--r--drivers/net/wireless/iwlegacy/commands.h (renamed from drivers/net/wireless/iwlegacy/iwl-commands.h)1134
-rw-r--r--drivers/net/wireless/iwlegacy/common.c5706
-rw-r--r--drivers/net/wireless/iwlegacy/common.h3424
-rw-r--r--drivers/net/wireless/iwlegacy/csr.h (renamed from drivers/net/wireless/iwlegacy/iwl-csr.h)93
-rw-r--r--drivers/net/wireless/iwlegacy/debug.c1411
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-debugfs.c523
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-debugfs.h60
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-fh.h187
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-hw.h291
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-led.c63
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-led.h32
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-rs.c996
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945.c2741
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945.h308
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-calib.h75
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-debugfs.c774
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-debugfs.h59
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-eeprom.c154
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-hw.h811
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-led.c73
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-led.h33
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-lib.c1194
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-rs.c2871
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-rx.c215
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-sta.c721
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-tx.c1378
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-ucode.c166
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965.c2183
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965.h282
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-core.c2661
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-core.h636
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-debug.h198
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-debugfs.c1313
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-dev.h1364
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-devtrace.c42
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-devtrace.h210
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-eeprom.c553
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-eeprom.h344
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-fh.h513
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-hcmd.c271
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-helpers.h196
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-io.h545
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-led.c205
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-led.h56
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-legacy-rs.h456
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-power.c165
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-power.h55
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-rx.c281
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-scan.c549
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-spectrum.h4
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-sta.c541
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-sta.h148
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-tx.c658
-rw-r--r--drivers/net/wireless/iwlegacy/iwl3945-base.c4016
-rw-r--r--drivers/net/wireless/iwlegacy/iwl4965-base.c3281
-rw-r--r--drivers/net/wireless/iwlegacy/prph.h (renamed from drivers/net/wireless/iwlegacy/iwl-prph.h)133
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig30
-rw-r--r--drivers/net/wireless/iwlwifi/Makefile4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-1000.c10
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-2000.c14
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c25
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c26
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-calib.c72
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-calib.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-lib.c85
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.c16
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.h3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rx.c19
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rxon.c42
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-sta.c9
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-tx.c17
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c66
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.h30
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-commands.h84
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c37
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h7
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.h99
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debugfs.c11
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-dev.h85
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.c60
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.h7
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-mac80211.c41
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-pci.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-shared.h93
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-testmode.c (renamed from drivers/net/wireless/iwlwifi/iwl-sv-open.c)251
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-testmode.h66
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c31
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-pcie.c58
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans.h21
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-ucode.c (renamed from drivers/net/wireless/iwlwifi/iwl-agn-ucode.c)229
-rw-r--r--drivers/net/wireless/iwmc3200wifi/commands.c1
-rw-r--r--drivers/net/wireless/iwmc3200wifi/debugfs.c1
-rw-r--r--drivers/net/wireless/iwmc3200wifi/main.c1
-rw-r--r--drivers/net/wireless/iwmc3200wifi/sdio.c1
-rw-r--r--drivers/net/wireless/libertas/cfg.c2
-rw-r--r--drivers/net/wireless/libertas/cmd.c1
-rw-r--r--drivers/net/wireless/libertas/debugfs.c1
-rw-r--r--drivers/net/wireless/libertas/if_sdio.c2
-rw-r--r--drivers/net/wireless/libertas/if_spi.c4
-rw-r--r--drivers/net/wireless/libertas/if_usb.c2
-rw-r--r--drivers/net/wireless/libertas/main.c2
-rw-r--r--drivers/net/wireless/libertas/rx.c1
-rw-r--r--drivers/net/wireless/libertas/tx.c1
-rw-r--r--drivers/net/wireless/libertas_tf/cmd.c1
-rw-r--r--drivers/net/wireless/libertas_tf/if_usb.c2
-rw-r--r--drivers/net/wireless/libertas_tf/main.c1
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c22
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c197
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.h1
-rw-r--r--drivers/net/wireless/mwifiex/cmdevt.c9
-rw-r--r--drivers/net/wireless/mwifiex/init.c40
-rw-r--r--drivers/net/wireless/mwifiex/main.c13
-rw-r--r--drivers/net/wireless/mwifiex/main.h18
-rw-r--r--drivers/net/wireless/mwifiex/pcie.c20
-rw-r--r--drivers/net/wireless/mwifiex/scan.c19
-rw-r--r--drivers/net/wireless/mwifiex/sdio.c22
-rw-r--r--drivers/net/wireless/mwifiex/sta_event.c23
-rw-r--r--drivers/net/wireless/mwifiex/sta_ioctl.c4
-rw-r--r--drivers/net/wireless/mwifiex/txrx.c5
-rw-r--r--drivers/net/wireless/orinoco/fw.c1
-rw-r--r--drivers/net/wireless/p54/eeprom.c1
-rw-r--r--drivers/net/wireless/p54/fwio.c1
-rw-r--r--drivers/net/wireless/p54/main.c1
-rw-r--r--drivers/net/wireless/p54/p54pci.c1
-rw-r--r--drivers/net/wireless/p54/p54spi.c6
-rw-r--r--drivers/net/wireless/p54/p54usb.c1
-rw-r--r--drivers/net/wireless/p54/txrx.c5
-rw-r--r--drivers/net/wireless/prism54/isl_ioctl.c335
-rw-r--r--drivers/net/wireless/prism54/isl_ioctl.h2
-rw-r--r--drivers/net/wireless/prism54/islpci_dev.c1
-rw-r--r--drivers/net/wireless/rndis_wlan.c86
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c8
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c1
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h1
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c26
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c2
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/dev.c1
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187/dev.c1
-rw-r--r--drivers/net/wireless/rtlwifi/Kconfig4
-rw-r--r--drivers/net/wireless/rtlwifi/base.c10
-rw-r--r--drivers/net/wireless/rtlwifi/base.h2
-rw-r--r--drivers/net/wireless/rtlwifi/cam.c1
-rw-r--r--drivers/net/wireless/rtlwifi/efuse.c1
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c37
-rw-r--r--drivers/net/wireless/rtlwifi/ps.c21
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c63
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h23
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/main.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/phy.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/sw.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/hw.c9
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/mac.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/phy.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/sw.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/trx.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/phy.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/sw.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/phy.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/sw.c1
-rw-r--r--drivers/net/wireless/rtlwifi/usb.c56
-rw-r--r--drivers/net/wireless/rtlwifi/wifi.h13
-rw-r--r--drivers/net/wireless/wl1251/spi.c1
-rw-r--r--drivers/net/wireless/wl12xx/Kconfig10
-rw-r--r--drivers/net/wireless/wl12xx/Makefile3
-rw-r--r--drivers/net/wireless/wl12xx/acx.c172
-rw-r--r--drivers/net/wireless/wl12xx/acx.h91
-rw-r--r--drivers/net/wireless/wl12xx/boot.c16
-rw-r--r--drivers/net/wireless/wl12xx/cmd.c368
-rw-r--r--drivers/net/wireless/wl12xx/cmd.h50
-rw-r--r--drivers/net/wireless/wl12xx/conf.h4
-rw-r--r--drivers/net/wireless/wl12xx/debug.h101
-rw-r--r--drivers/net/wireless/wl12xx/debugfs.c157
-rw-r--r--drivers/net/wireless/wl12xx/event.c214
-rw-r--r--drivers/net/wireless/wl12xx/event.h3
-rw-r--r--drivers/net/wireless/wl12xx/init.c491
-rw-r--r--drivers/net/wireless/wl12xx/init.h8
-rw-r--r--drivers/net/wireless/wl12xx/io.c12
-rw-r--r--drivers/net/wireless/wl12xx/io.h23
-rw-r--r--drivers/net/wireless/wl12xx/main.c1966
-rw-r--r--drivers/net/wireless/wl12xx/ps.c56
-rw-r--r--drivers/net/wireless/wl12xx/ps.h9
-rw-r--r--drivers/net/wireless/wl12xx/reg.h2
-rw-r--r--drivers/net/wireless/wl12xx/rx.c38
-rw-r--r--drivers/net/wireless/wl12xx/scan.c102
-rw-r--r--drivers/net/wireless/wl12xx/scan.h8
-rw-r--r--drivers/net/wireless/wl12xx/sdio.c259
-rw-r--r--drivers/net/wireless/wl12xx/sdio_test.c543
-rw-r--r--drivers/net/wireless/wl12xx/spi.c215
-rw-r--r--drivers/net/wireless/wl12xx/testmode.c77
-rw-r--r--drivers/net/wireless/wl12xx/tx.c382
-rw-r--r--drivers/net/wireless/wl12xx/tx.h11
-rw-r--r--drivers/net/wireless/wl12xx/wl12xx.h384
-rw-r--r--drivers/net/wireless/wl12xx/wl12xx_80211.h5
-rw-r--r--drivers/net/wireless/wl12xx/wl12xx_platform_data.c4
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c1
-rw-r--r--drivers/net/xen-netback/common.h11
-rw-r--r--drivers/net/xen-netback/netback.c80
-rw-r--r--drivers/nfc/nfcwilink.c1
-rw-r--r--drivers/nfc/pn533.c170
-rw-r--r--drivers/of/base.c234
-rw-r--r--drivers/of/fdt.c13
-rw-r--r--drivers/of/irq.c121
-rw-r--r--drivers/of/of_mdio.c1
-rw-r--r--drivers/of/of_net.c1
-rw-r--r--drivers/of/of_pci.c1
-rw-r--r--drivers/of/of_pci_irq.c1
-rw-r--r--drivers/of/of_spi.c1
-rw-r--r--drivers/of/pdt.c8
-rw-r--r--drivers/of/platform.c28
-rw-r--r--drivers/parisc/ccio-dma.c1
-rw-r--r--drivers/parisc/sba_iommu.c1
-rw-r--r--drivers/pci/Kconfig37
-rw-r--r--drivers/pci/Makefile1
-rw-r--r--drivers/pci/ats.c439
-rw-r--r--drivers/pci/hotplug-pci.c1
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c109
-rw-r--r--drivers/pci/hotplug/pciehp_acpi.c1
-rw-r--r--drivers/pci/hotplug/pcihp_slot.c1
-rw-r--r--drivers/pci/hotplug/rpadlpar_core.c1
-rw-r--r--drivers/pci/htirq.c1
-rw-r--r--drivers/pci/ioapic.c1
-rw-r--r--drivers/pci/iov.c143
-rw-r--r--drivers/pci/irq.c1
-rw-r--r--drivers/pci/msi.c1
-rw-r--r--drivers/pci/pci-acpi.c6
-rw-r--r--drivers/pci/pci-sysfs.c1
-rw-r--r--drivers/pci/pci.c59
-rw-r--r--drivers/pci/pcie/pme.c9
-rw-r--r--drivers/pci/probe.c68
-rw-r--r--drivers/pci/quirks.c112
-rw-r--r--drivers/pci/rom.c1
-rw-r--r--drivers/pci/setup-bus.c2
-rw-r--r--drivers/pci/setup-res.c1
-rw-r--r--drivers/pci/slot.c1
-rw-r--r--drivers/pci/vpd.c1
-rw-r--r--drivers/pcmcia/db1xxx_ss.c1
-rw-r--r--drivers/pcmcia/pxa2xx_balloon3.c2
-rw-r--r--drivers/pcmcia/pxa2xx_base.c1
-rw-r--r--drivers/pcmcia/pxa2xx_cm_x255.c1
-rw-r--r--drivers/pcmcia/pxa2xx_cm_x270.c1
-rw-r--r--drivers/pcmcia/pxa2xx_cm_x2xx.c3
-rw-r--r--drivers/pcmcia/pxa2xx_vpac270.c2
-rw-r--r--drivers/pinctrl/Kconfig22
-rw-r--r--drivers/pinctrl/core.c1
-rw-r--r--drivers/platform/x86/Kconfig21
-rw-r--r--drivers/platform/x86/acer-wmi.c488
-rw-r--r--drivers/platform/x86/asus-laptop.c378
-rw-r--r--drivers/platform/x86/asus-wmi.c4
-rw-r--r--drivers/platform/x86/dell-laptop.c84
-rw-r--r--drivers/platform/x86/eeepc-laptop.c2
-rw-r--r--drivers/platform/x86/hp_accel.c15
-rw-r--r--drivers/platform/x86/ideapad-laptop.c251
-rw-r--r--drivers/platform/x86/intel_scu_ipc.c1
-rw-r--r--drivers/platform/x86/intel_scu_ipcutil.c8
-rw-r--r--drivers/platform/x86/msi-wmi.c1
-rw-r--r--drivers/platform/x86/samsung-laptop.c107
-rw-r--r--drivers/platform/x86/sony-laptop.c2
-rw-r--r--drivers/platform/x86/topstar-laptop.c1
-rw-r--r--drivers/platform/x86/toshiba_acpi.c641
-rw-r--r--drivers/platform/x86/wmi.c7
-rw-r--r--drivers/pnp/pnpacpi/core.c1
-rw-r--r--drivers/pnp/pnpacpi/rsparser.c62
-rw-r--r--drivers/power/Kconfig1
-rw-r--r--drivers/power/ds2780_battery.c86
-rw-r--r--drivers/power/max17042_battery.c1
-rw-r--r--drivers/power/max8903_charger.c1
-rw-r--r--drivers/power/max8997_charger.c1
-rw-r--r--drivers/power/max8998_charger.c1
-rw-r--r--drivers/power/power_supply_sysfs.c1
-rw-r--r--drivers/pps/clients/Kconfig9
-rw-r--r--drivers/pps/clients/Makefile1
-rw-r--r--drivers/pps/clients/pps-gpio.c227
-rw-r--r--drivers/pps/clients/pps-ktimer.c12
-rw-r--r--drivers/pps/clients/pps_parport.c9
-rw-r--r--drivers/pps/kapi.c20
-rw-r--r--drivers/ps3/ps3-vuart.c2
-rw-r--r--drivers/ps3/ps3stor_lib.c3
-rw-r--r--drivers/ps3/sys-manager-core.c1
-rw-r--r--drivers/rapidio/Kconfig6
-rw-r--r--drivers/rapidio/Makefile1
-rw-r--r--drivers/rapidio/devices/Kconfig10
-rw-r--r--drivers/rapidio/devices/Makefile5
-rw-r--r--drivers/rapidio/devices/tsi721.c2360
-rw-r--r--drivers/rapidio/devices/tsi721.h766
-rw-r--r--drivers/rapidio/rio-scan.c6
-rw-r--r--drivers/rapidio/switches/idt_gen2.c1
-rw-r--r--drivers/regulator/88pm8607.c3
-rw-r--r--drivers/regulator/Kconfig10
-rw-r--r--drivers/regulator/Makefile1
-rw-r--r--drivers/regulator/aat2870-regulator.c1
-rw-r--r--drivers/regulator/ab8500.c1
-rw-r--r--drivers/regulator/bq24022.c1
-rw-r--r--drivers/regulator/core.c108
-rw-r--r--drivers/regulator/da903x.c1
-rw-r--r--drivers/regulator/db8500-prcmu.c3
-rw-r--r--drivers/regulator/dummy.c1
-rw-r--r--drivers/regulator/fixed.c1
-rw-r--r--drivers/regulator/gpio-regulator.c358
-rw-r--r--drivers/regulator/lp3971.c1
-rw-r--r--drivers/regulator/lp3972.c1
-rw-r--r--drivers/regulator/max8649.c2
-rw-r--r--drivers/regulator/max8925-regulator.c1
-rw-r--r--drivers/regulator/max8952.c3
-rw-r--r--drivers/regulator/max8997.c1
-rw-r--r--drivers/regulator/mc13783-regulator.c7
-rw-r--r--drivers/regulator/mc13892-regulator.c1
-rw-r--r--drivers/regulator/mc13xxx-regulator-core.c1
-rw-r--r--drivers/regulator/tps65023-regulator.c183
-rw-r--r--drivers/regulator/tps6507x-regulator.c8
-rw-r--r--drivers/regulator/tps6586x-regulator.c33
-rw-r--r--drivers/regulator/tps65912-regulator.c2
-rw-r--r--drivers/regulator/userspace-consumer.c1
-rw-r--r--drivers/regulator/virtual.c1
-rw-r--r--drivers/regulator/wm8400-regulator.c1
-rw-r--r--drivers/regulator/wm8994-regulator.c13
-rw-r--r--drivers/rtc/Kconfig2
-rw-r--r--drivers/rtc/class.c32
-rw-r--r--drivers/rtc/interface.c1
-rw-r--r--drivers/rtc/rtc-dm355evm.c1
-rw-r--r--drivers/rtc/rtc-ds1305.c1
-rw-r--r--drivers/rtc/rtc-ds1307.c27
-rw-r--r--drivers/rtc/rtc-ds1511.c1
-rw-r--r--drivers/rtc/rtc-ds1553.c1
-rw-r--r--drivers/rtc/rtc-ds1672.c1
-rw-r--r--drivers/rtc/rtc-ds1742.c1
-rw-r--r--drivers/rtc/rtc-em3027.c1
-rw-r--r--drivers/rtc/rtc-isl12022.c1
-rw-r--r--drivers/rtc/rtc-mc13xxx.c6
-rw-r--r--drivers/rtc/rtc-mrst.c19
-rw-r--r--drivers/rtc/rtc-mv.c1
-rw-r--r--drivers/rtc/rtc-pcf2123.c1
-rw-r--r--drivers/rtc/rtc-pcf8563.c1
-rw-r--r--drivers/rtc/rtc-rs5c348.c1
-rw-r--r--drivers/rtc/rtc-rs5c372.c1
-rw-r--r--drivers/rtc/rtc-stk17ta8.c1
-rw-r--r--drivers/rtc/rtc-tx4939.c1
-rw-r--r--drivers/rtc/rtc-x1205.c1
-rw-r--r--drivers/s390/block/dasd.c19
-rw-r--r--drivers/s390/block/dasd_eckd.c45
-rw-r--r--drivers/s390/block/dasd_fba.c1
-rw-r--r--drivers/s390/block/dasd_int.h2
-rw-r--r--drivers/s390/block/dcssblk.c7
-rw-r--r--drivers/s390/block/xpram.c5
-rw-r--r--drivers/s390/char/con3215.c3
-rw-r--r--drivers/s390/char/fs3270.c1
-rw-r--r--drivers/s390/char/raw3270.c3
-rw-r--r--drivers/s390/char/sclp_cmd.c4
-rw-r--r--drivers/s390/char/sclp_cpi_sys.c1
-rw-r--r--drivers/s390/char/sclp_quiesce.c3
-rw-r--r--drivers/s390/char/tape_34xx.c1
-rw-r--r--drivers/s390/char/tape_3590.c1
-rw-r--r--drivers/s390/char/tape_core.c2
-rw-r--r--drivers/s390/char/vmcp.c1
-rw-r--r--drivers/s390/char/vmur.c4
-rw-r--r--drivers/s390/char/zcore.c21
-rw-r--r--drivers/s390/cio/ccwgroup.c333
-rw-r--r--drivers/s390/cio/ccwreq.c23
-rw-r--r--drivers/s390/cio/chp.c2
-rw-r--r--drivers/s390/cio/chsc_sch.c5
-rw-r--r--drivers/s390/cio/cio.c19
-rw-r--r--drivers/s390/cio/css.h2
-rw-r--r--drivers/s390/cio/device.c13
-rw-r--r--drivers/s390/cio/device.h13
-rw-r--r--drivers/s390/cio/io_sch.h2
-rw-r--r--drivers/s390/cio/qdio.h40
-rw-r--r--drivers/s390/cio/qdio_debug.c12
-rw-r--r--drivers/s390/cio/qdio_main.c105
-rw-r--r--drivers/s390/cio/qdio_setup.c2
-rw-r--r--drivers/s390/cio/qdio_thinint.c52
-rw-r--r--drivers/s390/kvm/kvm_virtio.c9
-rw-r--r--drivers/s390/net/claw.c3
-rw-r--r--drivers/s390/net/ctcm_main.c3
-rw-r--r--drivers/s390/net/ctcm_sysfs.c2
-rw-r--r--drivers/s390/net/lcs.c5
-rw-r--r--drivers/s390/net/qeth_l3_main.c2
-rw-r--r--drivers/s390/scsi/zfcp_aux.c1
-rw-r--r--drivers/s390/scsi/zfcp_ccw.c1
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c37
-rw-r--r--drivers/s390/scsi/zfcp_def.h7
-rw-r--r--drivers/s390/scsi/zfcp_ext.h1
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c80
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c59
-rw-r--r--drivers/s390/scsi/zfcp_qdio.h66
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c16
-rw-r--r--drivers/scsi/Kconfig28
-rw-r--r--drivers/scsi/Makefile1
-rw-r--r--drivers/scsi/a2091.c1
-rw-r--r--drivers/scsi/a3000.c1
-rw-r--r--drivers/scsi/aacraid/aachba.c1
-rw-r--r--drivers/scsi/aacraid/linit.c7
-rw-r--r--drivers/scsi/aic94xx/aic94xx_scb.c1
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.c20
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.h2
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.c60
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.h2
-rw-r--r--drivers/scsi/be2iscsi/be_main.c113
-rw-r--r--drivers/scsi/be2iscsi/be_main.h5
-rw-r--r--drivers/scsi/bfa/bfad_debugfs.c1
-rw-r--r--drivers/scsi/bfa/bfad_im.c2
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc.h6
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_els.c49
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c226
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c16
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c69
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_tgt.c23
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c80
-rw-r--r--drivers/scsi/cxgbi/cxgb3i/cxgb3i.c20
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c20
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c57
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.h1
-rw-r--r--drivers/scsi/device_handler/scsi_dh.c72
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c109
-rw-r--r--drivers/scsi/device_handler/scsi_dh_emc.c1
-rw-r--r--drivers/scsi/device_handler/scsi_dh_hp_sw.c1
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c7
-rw-r--r--drivers/scsi/fcoe/fcoe.c116
-rw-r--r--drivers/scsi/fcoe/fcoe.h11
-rw-r--r--drivers/scsi/fcoe/fcoe_transport.c101
-rw-r--r--drivers/scsi/gvp11.c1
-rw-r--r--drivers/scsi/hosts.c9
-rw-r--r--drivers/scsi/hpsa.c234
-rw-r--r--drivers/scsi/hpsa.h10
-rw-r--r--drivers/scsi/hpsa_cmd.h5
-rw-r--r--drivers/scsi/ipr.c33
-rw-r--r--drivers/scsi/ipr.h5
-rw-r--r--drivers/scsi/isci/host.c92
-rw-r--r--drivers/scsi/isci/host.h15
-rw-r--r--drivers/scsi/isci/init.c5
-rw-r--r--drivers/scsi/isci/isci.h2
-rw-r--r--drivers/scsi/isci/phy.c11
-rw-r--r--drivers/scsi/isci/port.c148
-rw-r--r--drivers/scsi/isci/port.h6
-rw-r--r--drivers/scsi/isci/port_config.c2
-rw-r--r--drivers/scsi/isci/probe_roms.h4
-rw-r--r--drivers/scsi/isci/registers.h122
-rw-r--r--drivers/scsi/isci/remote_device.c109
-rw-r--r--drivers/scsi/isci/remote_device.h14
-rw-r--r--drivers/scsi/isci/request.c432
-rw-r--r--drivers/scsi/isci/request.h37
-rw-r--r--drivers/scsi/isci/sas.h2
-rw-r--r--drivers/scsi/isci/task.c721
-rw-r--r--drivers/scsi/isci/task.h54
-rw-r--r--drivers/scsi/iscsi_tcp.c84
-rw-r--r--drivers/scsi/libfc/fc_disc.c1
-rw-r--r--drivers/scsi/libfc/fc_elsct.c1
-rw-r--r--drivers/scsi/libfc/fc_exch.c19
-rw-r--r--drivers/scsi/libfc/fc_fcp.c2
-rw-r--r--drivers/scsi/libfc/fc_libfc.c1
-rw-r--r--drivers/scsi/libfc/fc_lport.c101
-rw-r--r--drivers/scsi/libfc/fc_npiv.c1
-rw-r--r--drivers/scsi/libfc/fc_rport.c1
-rw-r--r--drivers/scsi/libiscsi.c10
-rw-r--r--drivers/scsi/libiscsi_tcp.c1
-rw-r--r--drivers/scsi/libsas/sas_discover.c13
-rw-r--r--drivers/scsi/libsas/sas_expander.c37
-rw-r--r--drivers/scsi/libsas/sas_host_smp.c104
-rw-r--r--drivers/scsi/libsas/sas_init.c43
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c146
-rw-r--r--drivers/scsi/libsas/sas_task.c1
-rw-r--r--drivers/scsi/libsrp.c1
-rw-r--r--drivers/scsi/lpfc/lpfc.h20
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c80
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c3
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c182
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c33
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c7
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h300
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c288
-rw-r--r--drivers/scsi/lpfc/lpfc_logmsg.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c80
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c245
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c460
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h18
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h23
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c16
-rw-r--r--drivers/scsi/mac53c94.c1
-rw-r--r--drivers/scsi/mac_esp.c9
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c1
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h22
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c239
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fp.c26
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c193
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.h19
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2.h11
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h153
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_ioc.h113
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.c694
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.h72
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_config.c67
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_ctl.c37
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c636
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_transport.c2
-rw-r--r--drivers/scsi/mvsas/mv_94xx.c35
-rw-r--r--drivers/scsi/mvsas/mv_defs.h2
-rw-r--r--drivers/scsi/mvsas/mv_init.c23
-rw-r--r--drivers/scsi/mvsas/mv_sas.c79
-rw-r--r--drivers/scsi/mvsas/mv_sas.h4
-rw-r--r--drivers/scsi/mvumi.c2018
-rw-r--r--drivers/scsi/mvumi.h505
-rw-r--r--drivers/scsi/osd/osd_initiator.c1
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c73
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c4
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c93
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h2
-rw-r--r--drivers/scsi/pmcraid.c2
-rw-r--r--drivers/scsi/ps3rom.c1
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c111
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c152
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.h42
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h9
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h18
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c36
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c7
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c127
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c970
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.h255
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c46
-rw-r--r--drivers/scsi/qla4xxx/Kconfig1
-rw-r--r--drivers/scsi/qla4xxx/Makefile2
-rw-r--r--drivers/scsi/qla4xxx/ql4_attr.c76
-rw-r--r--drivers/scsi/qla4xxx/ql4_bsg.c513
-rw-r--r--drivers/scsi/qla4xxx/ql4_bsg.h19
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h214
-rw-r--r--drivers/scsi/qla4xxx/ql4_fw.h198
-rw-r--r--drivers/scsi/qla4xxx/ql4_glbl.h71
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c852
-rw-r--r--drivers/scsi/qla4xxx/ql4_iocb.c68
-rw-r--r--drivers/scsi/qla4xxx/ql4_isr.c83
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c824
-rw-r--r--drivers/scsi/qla4xxx/ql4_nvram.c21
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c17
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c2321
-rw-r--r--drivers/scsi/qla4xxx/ql4_version.h2
-rw-r--r--drivers/scsi/qlogicpti.c2
-rw-r--r--drivers/scsi/scsi_error.c12
-rw-r--r--drivers/scsi/scsi_lib.c10
-rw-r--r--drivers/scsi/scsi_lib_dma.c1
-rw-r--r--drivers/scsi/scsi_netlink.c1
-rw-r--r--drivers/scsi/scsi_pm.c1
-rw-r--r--drivers/scsi/scsi_scan.c1
-rw-r--r--drivers/scsi/scsi_sysfs.c38
-rw-r--r--drivers/scsi/scsi_tgt_if.c1
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c742
-rw-r--r--drivers/scsi/scsi_transport_sas.c10
-rw-r--r--drivers/scsi/sd.c19
-rw-r--r--drivers/scsi/sd.h6
-rw-r--r--drivers/scsi/sr_ioctl.c1
-rw-r--r--drivers/scsi/st.c4
-rw-r--r--drivers/sfi/sfi_core.h3
-rw-r--r--drivers/sh/Makefile8
-rw-r--r--drivers/sh/clk/core.c107
-rw-r--r--drivers/sh/intc/chip.c11
-rw-r--r--drivers/sh/intc/core.c2
-rw-r--r--drivers/sh/intc/dynamic.c1
-rw-r--r--drivers/sh/intc/userimask.c1
-rw-r--r--drivers/sh/intc/virq.c1
-rw-r--r--drivers/sh/maple/maple.c1
-rw-r--r--drivers/sh/pfc.c29
-rw-r--r--drivers/sh/pm_runtime.c65
-rw-r--r--drivers/spi/Kconfig2
-rw-r--r--drivers/spi/spi-altera.c14
-rw-r--r--drivers/spi/spi-ath79.c13
-rw-r--r--drivers/spi/spi-atmel.c20
-rw-r--r--drivers/spi/spi-au1550.c1
-rw-r--r--drivers/spi/spi-bfin-sport.c13
-rw-r--r--drivers/spi/spi-bfin5xx.c2
-rw-r--r--drivers/spi/spi-bitbang.c1
-rw-r--r--drivers/spi/spi-butterfly.c1
-rw-r--r--drivers/spi/spi-coldfire-qspi.c16
-rw-r--r--drivers/spi/spi-davinci.c20
-rw-r--r--drivers/spi/spi-dw-mid.c9
-rw-r--r--drivers/spi/spi-dw-mmio.c15
-rw-r--r--drivers/spi/spi-dw-pci.c1
-rw-r--r--drivers/spi/spi-dw.c63
-rw-r--r--drivers/spi/spi-dw.h97
-rw-r--r--drivers/spi/spi-ep93xx.c21
-rw-r--r--drivers/spi/spi-fsl-espi.c13
-rw-r--r--drivers/spi/spi-gpio.c22
-rw-r--r--drivers/spi/spi-imx.c14
-rw-r--r--drivers/spi/spi-mpc512x-psc.c13
-rw-r--r--drivers/spi/spi-mpc52xx-psc.c13
-rw-r--r--drivers/spi/spi-mpc52xx.c14
-rw-r--r--drivers/spi/spi-nuc900.c14
-rw-r--r--drivers/spi/spi-oc-tiny.c14
-rw-r--r--drivers/spi/spi-omap-uwire.c1
-rw-r--r--drivers/spi/spi-omap2-mcspi.c7
-rw-r--r--drivers/spi/spi-orion.c1
-rw-r--r--drivers/spi/spi-pl022.c238
-rw-r--r--drivers/spi/spi-ppc4xx.c15
-rw-r--r--drivers/spi/spi-pxa2xx-pci.c1
-rw-r--r--drivers/spi/spi-s3c24xx.c22
-rw-r--r--drivers/spi/spi-s3c64xx.c175
-rw-r--r--drivers/spi/spi-sh-msiof.c16
-rw-r--r--drivers/spi/spi-sh-sci.c14
-rw-r--r--drivers/spi/spi-sh.c15
-rw-r--r--drivers/spi/spi-stmp.c13
-rw-r--r--drivers/spi/spi-tegra.c17
-rw-r--r--drivers/spi/spi-ti-ssp.c14
-rw-r--r--drivers/spi/spi-tle62x0.c1
-rw-r--r--drivers/spi/spi-txx9.c1
-rw-r--r--drivers/spi/spi-xilinx.c13
-rw-r--r--drivers/spi/spi.c1
-rw-r--r--drivers/ssb/b43_pci_bridge.c1
-rw-r--r--drivers/ssb/driver_chipcommon.c1
-rw-r--r--drivers/ssb/driver_chipcommon_pmu.c1
-rw-r--r--drivers/ssb/driver_gige.c1
-rw-r--r--drivers/ssb/driver_pcicore.c9
-rw-r--r--drivers/ssb/embedded.c1
-rw-r--r--drivers/ssb/main.c1
-rw-r--r--drivers/ssb/pci.c23
-rw-r--r--drivers/ssb/pcihost_wrapper.c1
-rw-r--r--drivers/staging/Kconfig22
-rw-r--r--drivers/staging/Makefile10
-rw-r--r--drivers/staging/altera-stapl/Makefile3
-rw-r--r--drivers/staging/altera-stapl/altera.h49
-rw-r--r--drivers/staging/cx25821/README6
-rw-r--r--drivers/staging/cxt1e1/linux.c1
-rw-r--r--drivers/staging/gma500/intel_i2c.c1
-rw-r--r--drivers/staging/gma500/mdfld_dsi_output.c1
-rw-r--r--drivers/staging/gma500/mdfld_output.c3
-rw-r--r--drivers/staging/gma500/mid_bios.c1
-rw-r--r--drivers/staging/gma500/mrst_hdmi_i2c.c1
-rw-r--r--drivers/staging/gma500/psb_drv.c1
-rw-r--r--drivers/staging/hv/Makefile1
-rw-r--r--drivers/staging/hv/hv_timesource.c101
-rw-r--r--drivers/staging/iio/accel/adis16201_ring.c1
-rw-r--r--drivers/staging/iio/accel/adis16201_trigger.c1
-rw-r--r--drivers/staging/iio/accel/adis16203_ring.c1
-rw-r--r--drivers/staging/iio/accel/adis16203_trigger.c1
-rw-r--r--drivers/staging/iio/accel/adis16204_ring.c1
-rw-r--r--drivers/staging/iio/accel/adis16204_trigger.c1
-rw-r--r--drivers/staging/iio/accel/adis16209_ring.c1
-rw-r--r--drivers/staging/iio/accel/adis16209_trigger.c1
-rw-r--r--drivers/staging/iio/accel/adis16240_ring.c1
-rw-r--r--drivers/staging/iio/accel/adis16240_trigger.c1
-rw-r--r--drivers/staging/iio/accel/lis3l02dq_ring.c1
-rw-r--r--drivers/staging/iio/adc/ad7793.c1
-rw-r--r--drivers/staging/iio/dac/ad5686.c1
-rw-r--r--drivers/staging/iio/gyro/adis16060_core.c1
-rw-r--r--drivers/staging/iio/gyro/adis16260_ring.c1
-rw-r--r--drivers/staging/iio/gyro/adis16260_trigger.c1
-rw-r--r--drivers/staging/iio/iio_simple_dummy_buffer.c1
-rw-r--r--drivers/staging/iio/imu/adis16400_ring.c1
-rw-r--r--drivers/staging/iio/imu/adis16400_trigger.c1
-rw-r--r--drivers/staging/iio/industrialio-buffer.c1
-rw-r--r--drivers/staging/iio/meter/ade7758_ring.c1
-rw-r--r--drivers/staging/iio/meter/ade7758_trigger.c1
-rw-r--r--drivers/staging/iio/trigger.h3
-rw-r--r--drivers/staging/intel_sst/intel_sst.c1
-rw-r--r--drivers/staging/intel_sst/intel_sst_drv_interface.c1
-rw-r--r--drivers/staging/line6/audio.c1
-rw-r--r--drivers/staging/media/Kconfig37
-rw-r--r--drivers/staging/media/Makefile7
-rw-r--r--drivers/staging/media/as102/Kconfig7
-rw-r--r--drivers/staging/media/as102/Makefile6
-rw-r--r--drivers/staging/media/as102/as102_drv.c351
-rw-r--r--drivers/staging/media/as102/as102_drv.h141
-rw-r--r--drivers/staging/media/as102/as102_fe.c603
-rw-r--r--drivers/staging/media/as102/as102_fw.c251
-rw-r--r--drivers/staging/media/as102/as102_fw.h42
-rw-r--r--drivers/staging/media/as102/as102_usb_drv.c478
-rw-r--r--drivers/staging/media/as102/as102_usb_drv.h59
-rw-r--r--drivers/staging/media/as102/as10x_cmd.c452
-rw-r--r--drivers/staging/media/as102/as10x_cmd.h540
-rw-r--r--drivers/staging/media/as102/as10x_cmd_cfg.c215
-rw-r--r--drivers/staging/media/as102/as10x_cmd_stream.c223
-rw-r--r--drivers/staging/media/as102/as10x_handle.h58
-rw-r--r--drivers/staging/media/as102/as10x_types.h198
-rw-r--r--drivers/staging/media/cxd2099/Kconfig (renamed from drivers/staging/cxd2099/Kconfig)0
-rw-r--r--drivers/staging/media/cxd2099/Makefile (renamed from drivers/staging/cxd2099/Makefile)0
-rw-r--r--drivers/staging/media/cxd2099/TODO (renamed from drivers/staging/cxd2099/TODO)0
-rw-r--r--drivers/staging/media/cxd2099/cxd2099.c (renamed from drivers/staging/cxd2099/cxd2099.c)0
-rw-r--r--drivers/staging/media/cxd2099/cxd2099.h (renamed from drivers/staging/cxd2099/cxd2099.h)0
-rw-r--r--drivers/staging/media/dt3155v4l/Kconfig (renamed from drivers/staging/dt3155v4l/Kconfig)0
-rw-r--r--drivers/staging/media/dt3155v4l/Makefile (renamed from drivers/staging/dt3155v4l/Makefile)0
-rw-r--r--drivers/staging/media/dt3155v4l/dt3155v4l.c (renamed from drivers/staging/dt3155v4l/dt3155v4l.c)4
-rw-r--r--drivers/staging/media/dt3155v4l/dt3155v4l.h (renamed from drivers/staging/dt3155v4l/dt3155v4l.h)0
-rw-r--r--drivers/staging/media/easycap/Kconfig (renamed from drivers/staging/easycap/Kconfig)0
-rw-r--r--drivers/staging/media/easycap/Makefile (renamed from drivers/staging/easycap/Makefile)0
-rw-r--r--drivers/staging/media/easycap/README (renamed from drivers/staging/easycap/README)0
-rw-r--r--drivers/staging/media/easycap/easycap.h (renamed from drivers/staging/easycap/easycap.h)0
-rw-r--r--drivers/staging/media/easycap/easycap_ioctl.c (renamed from drivers/staging/easycap/easycap_ioctl.c)0
-rw-r--r--drivers/staging/media/easycap/easycap_low.c (renamed from drivers/staging/easycap/easycap_low.c)0
-rw-r--r--drivers/staging/media/easycap/easycap_main.c (renamed from drivers/staging/easycap/easycap_main.c)0
-rw-r--r--drivers/staging/media/easycap/easycap_settings.c (renamed from drivers/staging/easycap/easycap_settings.c)0
-rw-r--r--drivers/staging/media/easycap/easycap_sound.c (renamed from drivers/staging/easycap/easycap_sound.c)0
-rw-r--r--drivers/staging/media/easycap/easycap_testcard.c (renamed from drivers/staging/easycap/easycap_testcard.c)0
-rw-r--r--drivers/staging/media/go7007/Kconfig (renamed from drivers/staging/go7007/Kconfig)0
-rw-r--r--drivers/staging/media/go7007/Makefile (renamed from drivers/staging/go7007/Makefile)0
-rw-r--r--drivers/staging/media/go7007/README (renamed from drivers/staging/go7007/README)0
-rw-r--r--drivers/staging/media/go7007/go7007-driver.c (renamed from drivers/staging/go7007/go7007-driver.c)0
-rw-r--r--drivers/staging/media/go7007/go7007-fw.c (renamed from drivers/staging/go7007/go7007-fw.c)0
-rw-r--r--drivers/staging/media/go7007/go7007-i2c.c (renamed from drivers/staging/go7007/go7007-i2c.c)0
-rw-r--r--drivers/staging/media/go7007/go7007-priv.h (renamed from drivers/staging/go7007/go7007-priv.h)0
-rw-r--r--drivers/staging/media/go7007/go7007-usb.c (renamed from drivers/staging/go7007/go7007-usb.c)0
-rw-r--r--drivers/staging/media/go7007/go7007-v4l2.c (renamed from drivers/staging/go7007/go7007-v4l2.c)0
-rw-r--r--drivers/staging/media/go7007/go7007.h (renamed from drivers/staging/go7007/go7007.h)0
-rw-r--r--drivers/staging/media/go7007/go7007.txt (renamed from drivers/staging/go7007/go7007.txt)0
-rw-r--r--drivers/staging/media/go7007/s2250-board.c (renamed from drivers/staging/go7007/s2250-board.c)0
-rw-r--r--drivers/staging/media/go7007/s2250-loader.c (renamed from drivers/staging/go7007/s2250-loader.c)0
-rw-r--r--drivers/staging/media/go7007/s2250-loader.h (renamed from drivers/staging/go7007/s2250-loader.h)0
-rw-r--r--drivers/staging/media/go7007/saa7134-go7007.c (renamed from drivers/staging/go7007/saa7134-go7007.c)0
-rw-r--r--drivers/staging/media/go7007/snd-go7007.c (renamed from drivers/staging/go7007/snd-go7007.c)0
-rw-r--r--drivers/staging/media/go7007/wis-i2c.h (renamed from drivers/staging/go7007/wis-i2c.h)0
-rw-r--r--drivers/staging/media/go7007/wis-ov7640.c (renamed from drivers/staging/go7007/wis-ov7640.c)0
-rw-r--r--drivers/staging/media/go7007/wis-saa7113.c (renamed from drivers/staging/go7007/wis-saa7113.c)0
-rw-r--r--drivers/staging/media/go7007/wis-saa7115.c (renamed from drivers/staging/go7007/wis-saa7115.c)0
-rw-r--r--drivers/staging/media/go7007/wis-sony-tuner.c (renamed from drivers/staging/go7007/wis-sony-tuner.c)0
-rw-r--r--drivers/staging/media/go7007/wis-tw2804.c (renamed from drivers/staging/go7007/wis-tw2804.c)0
-rw-r--r--drivers/staging/media/go7007/wis-tw9903.c (renamed from drivers/staging/go7007/wis-tw9903.c)0
-rw-r--r--drivers/staging/media/go7007/wis-uda1342.c (renamed from drivers/staging/go7007/wis-uda1342.c)0
-rw-r--r--drivers/staging/media/lirc/Kconfig (renamed from drivers/staging/lirc/Kconfig)0
-rw-r--r--drivers/staging/media/lirc/Makefile (renamed from drivers/staging/lirc/Makefile)0
-rw-r--r--drivers/staging/media/lirc/TODO (renamed from drivers/staging/lirc/TODO)0
-rw-r--r--drivers/staging/media/lirc/TODO.lirc_zilog (renamed from drivers/staging/lirc/TODO.lirc_zilog)0
-rw-r--r--drivers/staging/media/lirc/lirc_bt829.c (renamed from drivers/staging/lirc/lirc_bt829.c)0
-rw-r--r--drivers/staging/media/lirc/lirc_ene0100.h (renamed from drivers/staging/lirc/lirc_ene0100.h)0
-rw-r--r--drivers/staging/media/lirc/lirc_igorplugusb.c (renamed from drivers/staging/lirc/lirc_igorplugusb.c)0
-rw-r--r--drivers/staging/media/lirc/lirc_imon.c (renamed from drivers/staging/lirc/lirc_imon.c)0
-rw-r--r--drivers/staging/media/lirc/lirc_parallel.c (renamed from drivers/staging/lirc/lirc_parallel.c)0
-rw-r--r--drivers/staging/media/lirc/lirc_parallel.h (renamed from drivers/staging/lirc/lirc_parallel.h)0
-rw-r--r--drivers/staging/media/lirc/lirc_sasem.c (renamed from drivers/staging/lirc/lirc_sasem.c)0
-rw-r--r--drivers/staging/media/lirc/lirc_serial.c (renamed from drivers/staging/lirc/lirc_serial.c)0
-rw-r--r--drivers/staging/media/lirc/lirc_sir.c (renamed from drivers/staging/lirc/lirc_sir.c)0
-rw-r--r--drivers/staging/media/lirc/lirc_ttusbir.c (renamed from drivers/staging/lirc/lirc_ttusbir.c)0
-rw-r--r--drivers/staging/media/lirc/lirc_zilog.c (renamed from drivers/staging/lirc/lirc_zilog.c)0
-rw-r--r--drivers/staging/media/solo6x10/Kconfig (renamed from drivers/staging/solo6x10/Kconfig)0
-rw-r--r--drivers/staging/media/solo6x10/Makefile (renamed from drivers/staging/solo6x10/Makefile)0
-rw-r--r--drivers/staging/media/solo6x10/TODO (renamed from drivers/staging/solo6x10/TODO)0
-rw-r--r--drivers/staging/media/solo6x10/core.c (renamed from drivers/staging/solo6x10/core.c)0
-rw-r--r--drivers/staging/media/solo6x10/disp.c (renamed from drivers/staging/solo6x10/disp.c)0
-rw-r--r--drivers/staging/media/solo6x10/enc.c (renamed from drivers/staging/solo6x10/enc.c)0
-rw-r--r--drivers/staging/media/solo6x10/g723.c (renamed from drivers/staging/solo6x10/g723.c)1
-rw-r--r--drivers/staging/media/solo6x10/gpio.c (renamed from drivers/staging/solo6x10/gpio.c)0
-rw-r--r--drivers/staging/media/solo6x10/i2c.c (renamed from drivers/staging/solo6x10/i2c.c)0
-rw-r--r--drivers/staging/media/solo6x10/jpeg.h (renamed from drivers/staging/solo6x10/jpeg.h)0
-rw-r--r--drivers/staging/media/solo6x10/offsets.h (renamed from drivers/staging/solo6x10/offsets.h)0
-rw-r--r--drivers/staging/media/solo6x10/osd-font.h (renamed from drivers/staging/solo6x10/osd-font.h)0
-rw-r--r--drivers/staging/media/solo6x10/p2m.c (renamed from drivers/staging/solo6x10/p2m.c)0
-rw-r--r--drivers/staging/media/solo6x10/registers.h (renamed from drivers/staging/solo6x10/registers.h)0
-rw-r--r--drivers/staging/media/solo6x10/solo6x10.h (renamed from drivers/staging/solo6x10/solo6x10.h)0
-rw-r--r--drivers/staging/media/solo6x10/tw28.c (renamed from drivers/staging/solo6x10/tw28.c)0
-rw-r--r--drivers/staging/media/solo6x10/tw28.h (renamed from drivers/staging/solo6x10/tw28.h)0
-rw-r--r--drivers/staging/media/solo6x10/v4l2-enc.c (renamed from drivers/staging/solo6x10/v4l2-enc.c)0
-rw-r--r--drivers/staging/media/solo6x10/v4l2.c (renamed from drivers/staging/solo6x10/v4l2.c)0
-rw-r--r--drivers/staging/pohmelfs/inode.c2
-rw-r--r--drivers/staging/rts5139/rts51x_scsi.c1
-rw-r--r--drivers/staging/spectra/lld_mtd.c6
-rw-r--r--drivers/staging/tm6000/CARDLIST16
-rw-r--r--drivers/staging/tm6000/README22
-rw-r--r--drivers/staging/tm6000/TODO8
-rw-r--r--drivers/staging/tm6000/tm6000-stds.c679
-rw-r--r--drivers/staging/usbip/usbip_common.c1
-rw-r--r--drivers/staging/usbip/usbip_event.c1
-rw-r--r--drivers/staging/winbond/wbusb.c1
-rw-r--r--drivers/staging/wlags49_h2/wl_cs.c1
-rw-r--r--drivers/staging/xgifb/XGI_main_26.c25
-rw-r--r--drivers/staging/zram/zram_drv.c5
-rw-r--r--drivers/target/iscsi/iscsi_target.c12
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c1
-rw-r--r--drivers/target/iscsi/iscsi_target_stat.c1
-rw-r--r--drivers/target/loopback/tcm_loop.c23
-rw-r--r--drivers/target/target_core_alua.c12
-rw-r--r--drivers/target/target_core_alua.h4
-rw-r--r--drivers/target/target_core_cdb.c216
-rw-r--r--drivers/target/target_core_cdb.h14
-rw-r--r--drivers/target/target_core_device.c15
-rw-r--r--drivers/target/target_core_fabric_lib.c1
-rw-r--r--drivers/target/target_core_file.c1
-rw-r--r--drivers/target/target_core_hba.c1
-rw-r--r--drivers/target/target_core_iblock.c1
-rw-r--r--drivers/target/target_core_pr.c349
-rw-r--r--drivers/target/target_core_pr.h7
-rw-r--r--drivers/target/target_core_pscsi.c3
-rw-r--r--drivers/target/target_core_tmr.c24
-rw-r--r--drivers/target/target_core_tpg.c1
-rw-r--r--drivers/target/target_core_transport.c393
-rw-r--r--drivers/target/tcm_fc/tcm_fc.h2
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c3
-rw-r--r--drivers/thermal/thermal_sys.c4
-rw-r--r--drivers/tty/Kconfig2
-rw-r--r--drivers/tty/hvc/Kconfig9
-rw-r--r--drivers/tty/hvc/Makefile1
-rw-r--r--drivers/tty/hvc/hvc_console.c7
-rw-r--r--drivers/tty/hvc/hvc_iseries.c1
-rw-r--r--drivers/tty/hvc/hvc_opal.c425
-rw-r--r--drivers/tty/hvc/hvc_vio.c1
-rw-r--r--drivers/tty/hvc/hvcs.c6
-rw-r--r--drivers/tty/hvc/hvsi_lib.c4
-rw-r--r--drivers/tty/n_gsm.c12
-rw-r--r--drivers/tty/serial/8250.c23
-rw-r--r--drivers/tty/serial/altera_jtaguart.c5
-rw-r--r--drivers/tty/serial/altera_uart.c4
-rw-r--r--drivers/tty/serial/amba-pl010.c14
-rw-r--r--drivers/tty/serial/amba-pl011.c14
-rw-r--r--drivers/tty/serial/atmel_serial.c2
-rw-r--r--drivers/tty/serial/imx.c11
-rw-r--r--drivers/tty/serial/jsm/jsm_driver.c2
-rw-r--r--drivers/tty/serial/kgdboc.c1
-rw-r--r--drivers/tty/serial/max3100.c1
-rw-r--r--drivers/tty/serial/max3107-aava.c1
-rw-r--r--drivers/tty/serial/max3107.c1
-rw-r--r--drivers/tty/serial/msm_serial.c30
-rw-r--r--drivers/tty/serial/nwpserial.c1
-rw-r--r--drivers/tty/serial/of_serial.c12
-rw-r--r--drivers/tty/serial/sh-sci.c44
-rw-r--r--drivers/tty/serial/timbuart.c1
-rw-r--r--drivers/tty/serial/uartlite.c4
-rw-r--r--drivers/tty/serial/xilinx_uartps.c1
-rw-r--r--drivers/tty/vt/vc_screen.c1
-rw-r--r--drivers/uio/uio_pdrv.c1
-rw-r--r--drivers/uio/uio_pdrv_genirq.c1
-rw-r--r--drivers/usb/Kconfig1
-rw-r--r--drivers/usb/c67x00/c67x00-drv.c1
-rw-r--r--drivers/usb/core/driver.c6
-rw-r--r--drivers/usb/core/notify.c1
-rw-r--r--drivers/usb/gadget/cdc2.c1
-rw-r--r--drivers/usb/gadget/composite.c1
-rw-r--r--drivers/usb/gadget/dbgp.c1
-rw-r--r--drivers/usb/gadget/f_fs.c1
-rw-r--r--drivers/usb/gadget/f_obex.c1
-rw-r--r--drivers/usb/gadget/f_sourcesink.c1
-rw-r--r--drivers/usb/gadget/file_storage.c1
-rw-r--r--drivers/usb/gadget/fusb300_udc.c1
-rw-r--r--drivers/usb/gadget/gmidi.c1
-rw-r--r--drivers/usb/gadget/u_serial.c1
-rw-r--r--drivers/usb/host/Makefile1
-rw-r--r--drivers/usb/host/alchemy-common.c337
-rw-r--r--drivers/usb/host/ehci-au1xxx.c77
-rw-r--r--drivers/usb/host/ehci-hcd.c2
-rw-r--r--drivers/usb/host/fsl-mph-dr-of.c1
-rw-r--r--drivers/usb/host/isp1760-if.c1
-rw-r--r--drivers/usb/host/ohci-at91.c239
-rw-r--r--drivers/usb/host/ohci-au1xxx.c110
-rw-r--r--drivers/usb/host/ohci-pnx4008.c2
-rw-r--r--drivers/usb/host/pci-quirks.c1
-rw-r--r--drivers/usb/host/whci/debug.c1
-rw-r--r--drivers/usb/host/whci/hcd.c1
-rw-r--r--drivers/usb/host/xhci-hub.c1
-rw-r--r--drivers/usb/host/xhci-pci.c1
-rw-r--r--drivers/usb/mon/mon_bin.c1
-rw-r--r--drivers/usb/mon/mon_stat.c1
-rw-r--r--drivers/usb/mon/mon_text.c1
-rw-r--r--drivers/usb/musb/davinci.c2
-rw-r--r--drivers/usb/musb/musb_debugfs.c6
-rw-r--r--drivers/usb/otg/gpio_vbus.c1
-rw-r--r--drivers/usb/otg/otg.c1
-rw-r--r--drivers/usb/otg/ulpi.c1
-rw-r--r--drivers/usb/serial/aircable.c1
-rw-r--r--drivers/usb/serial/qcserial.c1
-rw-r--r--drivers/usb/storage/option_ms.c1
-rw-r--r--drivers/usb/storage/protocol.c1
-rw-r--r--drivers/usb/storage/sierra_ms.c1
-rw-r--r--drivers/usb/storage/transport.c1
-rw-r--r--drivers/usb/storage/uas.c1
-rw-r--r--drivers/usb/wusbcore/devconnect.c1
-rw-r--r--drivers/usb/wusbcore/mmc.c1
-rw-r--r--drivers/usb/wusbcore/rh.c1
-rw-r--r--drivers/usb/wusbcore/security.c1
-rw-r--r--drivers/usb/wusbcore/wa-hc.c1
-rw-r--r--drivers/usb/wusbcore/wa-rpipe.c1
-rw-r--r--drivers/usb/wusbcore/wa-xfer.c1
-rw-r--r--drivers/uwb/est.c1
-rw-r--r--drivers/uwb/i1480/dfu/dfu.c1
-rw-r--r--drivers/uwb/ie.c1
-rw-r--r--drivers/uwb/lc-dev.c2
-rw-r--r--drivers/uwb/lc-rc.c1
-rw-r--r--drivers/uwb/neh.c1
-rw-r--r--drivers/uwb/pal.c1
-rw-r--r--drivers/uwb/radio.c1
-rw-r--r--drivers/uwb/reset.c1
-rw-r--r--drivers/uwb/rsv.c1
-rw-r--r--drivers/uwb/scan.c1
-rw-r--r--drivers/uwb/umc-bus.c1
-rw-r--r--drivers/uwb/umc-dev.c1
-rw-r--r--drivers/uwb/umc-drv.c1
-rw-r--r--drivers/uwb/whci.c1
-rw-r--r--drivers/video/68328fb.c4
-rw-r--r--drivers/video/Kconfig38
-rw-r--r--drivers/video/Makefile2
-rw-r--r--drivers/video/acornfb.c5
-rw-r--r--drivers/video/amba-clcd.c9
-rw-r--r--drivers/video/arkfb.c9
-rw-r--r--drivers/video/atmel_lcdfb.c18
-rw-r--r--drivers/video/aty/radeon_base.c10
-rw-r--r--drivers/video/au1100fb.c181
-rw-r--r--drivers/video/au1100fb.h6
-rw-r--r--drivers/video/au1200fb.c299
-rw-r--r--drivers/video/backlight/88pm860x_bl.c1
-rw-r--r--drivers/video/backlight/adp5520_bl.c1
-rw-r--r--drivers/video/backlight/adp8860_bl.c1
-rw-r--r--drivers/video/backlight/adp8870_bl.c3
-rw-r--r--drivers/video/backlight/ams369fg06.c1
-rw-r--r--drivers/video/backlight/da903x_bl.c1
-rw-r--r--drivers/video/backlight/ep93xx_bl.c1
-rw-r--r--drivers/video/backlight/generic_bl.c4
-rw-r--r--drivers/video/backlight/l4f00242t03.c71
-rw-r--r--drivers/video/backlight/ld9040.c1
-rw-r--r--drivers/video/backlight/lms283gf05.c1
-rw-r--r--drivers/video/backlight/max8925_bl.c1
-rw-r--r--drivers/video/backlight/s6e63m0.c1
-rw-r--r--drivers/video/backlight/wm831x_bl.c1
-rw-r--r--drivers/video/bf54x-lq043fb.c2
-rw-r--r--drivers/video/bfin-lq035q1-fb.c2
-rw-r--r--drivers/video/bfin-t350mcqb-fb.c2
-rw-r--r--drivers/video/bfin_adv7393fb.c2
-rw-r--r--drivers/video/carminefb.c7
-rw-r--r--drivers/video/cobalt_lcdfb.c1
-rw-r--r--drivers/video/controlfb.c2
-rw-r--r--drivers/video/da8xx-fb.c186
-rw-r--r--drivers/video/ep93xx-fb.c1
-rw-r--r--drivers/video/fb-puv3.c4
-rw-r--r--drivers/video/fb_ddc.c3
-rw-r--r--drivers/video/fb_defio.c3
-rw-r--r--drivers/video/fb_notify.c1
-rw-r--r--drivers/video/fbmem.c3
-rw-r--r--drivers/video/fbmon.c21
-rw-r--r--drivers/video/fbsysfs.c3
-rw-r--r--drivers/video/fsl-diu-fb.c992
-rw-r--r--drivers/video/g364fb.c5
-rw-r--r--drivers/video/grvga.c579
-rw-r--r--drivers/video/gxt4500.c4
-rw-r--r--drivers/video/hgafb.c4
-rw-r--r--drivers/video/imsttfb.c2
-rw-r--r--drivers/video/intelfb/intelfbhw.c6
-rw-r--r--drivers/video/mb862xx/mb862xx-i2c.c3
-rw-r--r--drivers/video/mb862xx/mb862xxfbdrv.c7
-rw-r--r--drivers/video/mbx/mbxfb.c6
-rw-r--r--drivers/video/modedb.c444
-rw-r--r--drivers/video/msm/mddi.c2
-rw-r--r--drivers/video/msm/mdp.c7
-rw-r--r--drivers/video/mx3fb.c19
-rw-r--r--drivers/video/mxsfb.c1
-rw-r--r--drivers/video/neofb.c4
-rw-r--r--drivers/video/nuc900fb.c3
-rw-r--r--drivers/video/omap/Kconfig29
-rw-r--r--drivers/video/omap/Makefile8
-rw-r--r--drivers/video/omap/lcd_2430sdp.c203
-rw-r--r--drivers/video/omap/lcd_apollon.c136
-rw-r--r--drivers/video/omap/lcd_h3.c2
-rw-r--r--drivers/video/omap/lcd_h4.c117
-rw-r--r--drivers/video/omap/lcd_inn1610.c2
-rw-r--r--drivers/video/omap/lcd_ldp.c201
-rw-r--r--drivers/video/omap/lcd_mipid.c1
-rw-r--r--drivers/video/omap/lcd_omap3beagle.c130
-rw-r--r--drivers/video/omap/lcd_omap3evm.c193
-rw-r--r--drivers/video/omap/lcd_osk.c2
-rw-r--r--drivers/video/omap/lcd_overo.c180
-rw-r--r--drivers/video/omap/lcd_palmtt.c2
-rw-r--r--drivers/video/omap/omapfb_main.c1
-rw-r--r--drivers/video/omap2/displays/Kconfig28
-rw-r--r--drivers/video/omap2/displays/Makefile3
-rw-r--r--drivers/video/omap2/displays/panel-dvi.c363
-rw-r--r--drivers/video/omap2/displays/panel-generic-dpi.c113
-rw-r--r--drivers/video/omap2/displays/panel-n8x0.c747
-rw-r--r--drivers/video/omap2/displays/panel-picodlp.c594
-rw-r--r--drivers/video/omap2/displays/panel-picodlp.h288
-rw-r--r--drivers/video/omap2/displays/panel-taal.c125
-rw-r--r--drivers/video/omap2/dss/Kconfig2
-rw-r--r--drivers/video/omap2/dss/Makefile2
-rw-r--r--drivers/video/omap2/dss/core.c4
-rw-r--r--drivers/video/omap2/dss/dispc.c1701
-rw-r--r--drivers/video/omap2/dss/dispc.h57
-rw-r--r--drivers/video/omap2/dss/display.c31
-rw-r--r--drivers/video/omap2/dss/dpi.c29
-rw-r--r--drivers/video/omap2/dss/dsi.c930
-rw-r--r--drivers/video/omap2/dss/dss.c19
-rw-r--r--drivers/video/omap2/dss/dss.h156
-rw-r--r--drivers/video/omap2/dss/dss_features.c130
-rw-r--r--drivers/video/omap2/dss/dss_features.h17
-rw-r--r--drivers/video/omap2/dss/hdmi.c1260
-rw-r--r--drivers/video/omap2/dss/hdmi_panel.c (renamed from drivers/video/omap2/dss/hdmi_omap4_panel.c)68
-rw-r--r--drivers/video/omap2/dss/manager.c191
-rw-r--r--drivers/video/omap2/dss/overlay.c122
-rw-r--r--drivers/video/omap2/dss/rfbi.c46
-rw-r--r--drivers/video/omap2/dss/sdi.c20
-rw-r--r--drivers/video/omap2/dss/ti_hdmi.h138
-rw-r--r--drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c1239
-rw-r--r--drivers/video/omap2/dss/ti_hdmi_4xxx_ip.h (renamed from drivers/video/omap2/dss/hdmi.h)400
-rw-r--r--drivers/video/omap2/dss/venc.c27
-rw-r--r--drivers/video/omap2/omapfb/Kconfig2
-rw-r--r--drivers/video/omap2/omapfb/omapfb-ioctl.c1
-rw-r--r--drivers/video/omap2/omapfb/omapfb-main.c134
-rw-r--r--drivers/video/omap2/omapfb/omapfb-sysfs.c6
-rw-r--r--drivers/video/platinumfb.c4
-rw-r--r--drivers/video/pm2fb.c6
-rw-r--r--drivers/video/pm3fb.c6
-rw-r--r--drivers/video/pnx4008/sdum.c2
-rw-r--r--drivers/video/ps3fb.c2
-rw-r--r--drivers/video/pxa3xx-gcu.c4
-rw-r--r--drivers/video/pxafb.c12
-rw-r--r--drivers/video/s3c-fb.c117
-rw-r--r--drivers/video/s3c2410fb.c3
-rw-r--r--drivers/video/s3fb.c9
-rw-r--r--drivers/video/sa1100fb.c3
-rw-r--r--drivers/video/savage/savagefb_accel.c1
-rw-r--r--drivers/video/savage/savagefb_driver.c16
-rw-r--r--drivers/video/sh_mipi_dsi.c1
-rw-r--r--drivers/video/sh_mobile_hdmi.c47
-rw-r--r--drivers/video/sh_mobile_lcdcfb.c1163
-rw-r--r--drivers/video/sh_mobile_lcdcfb.h12
-rw-r--r--drivers/video/sh_mobile_meram.c208
-rw-r--r--drivers/video/sh_mobile_meram.h41
-rw-r--r--drivers/video/sis/sis_main.c30
-rw-r--r--drivers/video/skeletonfb.c2
-rw-r--r--drivers/video/sm501fb.c6
-rw-r--r--drivers/video/smscufx.c1994
-rw-r--r--drivers/video/tmiofb.c2
-rw-r--r--drivers/video/tridentfb.c4
-rw-r--r--drivers/video/udlfb.c187
-rw-r--r--drivers/video/valkyriefb.c2
-rw-r--r--drivers/video/vfb.c4
-rw-r--r--drivers/video/vga16fb.c2
-rw-r--r--drivers/video/via/dvi.c34
-rw-r--r--drivers/video/via/dvi.h3
-rw-r--r--drivers/video/via/global.c2
-rw-r--r--drivers/video/via/global.h2
-rw-r--r--drivers/video/via/hw.c544
-rw-r--r--drivers/video/via/hw.h285
-rw-r--r--drivers/video/via/lcd.c53
-rw-r--r--drivers/video/via/lcd.h7
-rw-r--r--drivers/video/via/share.h23
-rw-r--r--drivers/video/via/via-core.c2
-rw-r--r--drivers/video/via/via-gpio.c1
-rw-r--r--drivers/video/via/via_modesetting.c104
-rw-r--r--drivers/video/via/via_modesetting.h23
-rw-r--r--drivers/video/via/viafbdev.c182
-rw-r--r--drivers/video/via/viamode.c60
-rw-r--r--drivers/video/via/viamode.h4
-rw-r--r--drivers/video/vt8500lcdfb.c6
-rw-r--r--drivers/video/vt8623fb.c9
-rw-r--r--drivers/video/w100fb.c1
-rw-r--r--drivers/video/xilinxfb.c1
-rw-r--r--drivers/virt/fsl_hypervisor.c1
-rw-r--r--drivers/virtio/Kconfig11
-rw-r--r--drivers/virtio/Makefile1
-rw-r--r--drivers/virtio/virtio.c1
-rw-r--r--drivers/virtio/virtio_balloon.c1
-rw-r--r--drivers/virtio/virtio_mmio.c479
-rw-r--r--drivers/virtio/virtio_pci.c21
-rw-r--r--drivers/virtio/virtio_ring.c1
-rw-r--r--drivers/w1/slaves/w1_ds2760.c48
-rw-r--r--drivers/w1/slaves/w1_ds2780.c96
-rw-r--r--drivers/w1/slaves/w1_ds2780.h2
-rw-r--r--drivers/w1/w1_family.c1
-rw-r--r--drivers/w1/w1_int.c3
-rw-r--r--drivers/w1/w1_io.c5
-rw-r--r--drivers/watchdog/Kconfig8
-rw-r--r--drivers/watchdog/coh901327_wdt.c2
-rw-r--r--drivers/watchdog/eurotechwdt.c2
-rw-r--r--drivers/watchdog/iTCO_wdt.c19
-rw-r--r--drivers/watchdog/mpcore_wdt.c3
-rw-r--r--drivers/watchdog/octeon-wdt-main.c2
-rw-r--r--drivers/watchdog/s3c2410_wdt.c176
-rw-r--r--drivers/watchdog/sb_wdog.c4
-rw-r--r--drivers/watchdog/sc520_wdt.c2
-rw-r--r--drivers/watchdog/stmp3xxx_wdt.c1
-rw-r--r--drivers/watchdog/w83627hf_wdt.c33
-rw-r--r--drivers/watchdog/wdt.c2
-rw-r--r--drivers/watchdog/wdt_pci.c2
-rw-r--r--drivers/watchdog/wm831x_wdt.c318
-rw-r--r--drivers/xen/balloon.c14
-rw-r--r--drivers/xen/events.c9
-rw-r--r--drivers/xen/gntdev.c3
-rw-r--r--drivers/xen/grant-table.c2
-rw-r--r--drivers/xen/manage.c1
-rw-r--r--drivers/xen/pci.c2
-rw-r--r--drivers/xen/swiotlb-xen.c1
-rw-r--r--drivers/xen/xen-balloon.c17
-rw-r--r--drivers/xen/xen-pciback/conf_space.c1
-rw-r--r--drivers/xen/xenbus/xenbus_client.c7
-rw-r--r--drivers/xen/xenbus/xenbus_probe.c8
-rw-r--r--drivers/xen/xenbus/xenbus_probe.h3
-rw-r--r--drivers/xen/xenbus/xenbus_probe_backend.c1
-rw-r--r--drivers/xen/xenbus/xenbus_probe_frontend.c1
-rw-r--r--drivers/zorro/proc.c1
3020 files changed, 197384 insertions, 94166 deletions
diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c
index e1141402dbe..f4f523bf593 100644
--- a/drivers/acpi/acpica/evxface.c
+++ b/drivers/acpi/acpica/evxface.c
@@ -41,6 +41,7 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
+#include <linux/export.h>
#include <acpi/acpi.h>
#include "accommon.h"
#include "acnamesp.h"
diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c
index c57b5c707a7..20516e59947 100644
--- a/drivers/acpi/acpica/evxfevnt.c
+++ b/drivers/acpi/acpica/evxfevnt.c
@@ -41,6 +41,7 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
+#include <linux/export.h>
#include <acpi/acpi.h>
#include "accommon.h"
#include "actables.h"
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c
index 52aaff3df56..f06a3ee356b 100644
--- a/drivers/acpi/acpica/evxfgpe.c
+++ b/drivers/acpi/acpica/evxfgpe.c
@@ -41,6 +41,7 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
+#include <linux/export.h>
#include <acpi/acpi.h>
#include "accommon.h"
#include "acevents.h"
diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c
index 00cd95692a9..aee887e3ca5 100644
--- a/drivers/acpi/acpica/evxfregn.c
+++ b/drivers/acpi/acpica/evxfregn.c
@@ -42,6 +42,7 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
+#include <linux/export.h>
#include <acpi/acpi.h>
#include "accommon.h"
#include "acnamesp.h"
diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c
index 55accb7018b..cc70f3fdcdd 100644
--- a/drivers/acpi/acpica/hwregs.c
+++ b/drivers/acpi/acpica/hwregs.c
@@ -269,16 +269,17 @@ acpi_status acpi_hw_clear_acpi_status(void)
status = acpi_hw_register_write(ACPI_REGISTER_PM1_STATUS,
ACPI_BITMASK_ALL_FIXED_STATUS);
- if (ACPI_FAILURE(status)) {
- goto unlock_and_exit;
- }
+
+ acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags);
+
+ if (ACPI_FAILURE(status))
+ goto exit;
/* Clear the GPE Bits in all GPE registers in all GPE blocks */
status = acpi_ev_walk_gpe_list(acpi_hw_clear_gpe_block, NULL);
- unlock_and_exit:
- acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags);
+exit:
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/hwsleep.c b/drivers/acpi/acpica/hwsleep.c
index 2ac28bbe882..d52da307365 100644
--- a/drivers/acpi/acpica/hwsleep.c
+++ b/drivers/acpi/acpica/hwsleep.c
@@ -46,6 +46,7 @@
#include "accommon.h"
#include "actables.h"
#include <linux/tboot.h>
+#include <linux/module.h>
#define _COMPONENT ACPI_HARDWARE
ACPI_MODULE_NAME("hwsleep")
diff --git a/drivers/acpi/acpica/hwtimer.c b/drivers/acpi/acpica/hwtimer.c
index 9c8eb71a12f..50d21c40b5c 100644
--- a/drivers/acpi/acpica/hwtimer.c
+++ b/drivers/acpi/acpica/hwtimer.c
@@ -42,6 +42,7 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
+#include <linux/export.h>
#include <acpi/acpi.h>
#include "accommon.h"
diff --git a/drivers/acpi/acpica/hwxface.c b/drivers/acpi/acpica/hwxface.c
index f75f81ad15c..c2793a82f12 100644
--- a/drivers/acpi/acpica/hwxface.c
+++ b/drivers/acpi/acpica/hwxface.c
@@ -42,6 +42,7 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
+#include <linux/export.h>
#include <acpi/acpi.h>
#include "accommon.h"
#include "acnamesp.h"
diff --git a/drivers/acpi/acpica/nsxfeval.c b/drivers/acpi/acpica/nsxfeval.c
index c53f0040e49..e7f016d1b22 100644
--- a/drivers/acpi/acpica/nsxfeval.c
+++ b/drivers/acpi/acpica/nsxfeval.c
@@ -42,6 +42,7 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
+#include <linux/export.h>
#include <acpi/acpi.h>
#include "accommon.h"
#include "acnamesp.h"
diff --git a/drivers/acpi/acpica/nsxfname.c b/drivers/acpi/acpica/nsxfname.c
index 3fd4526f3db..83bf9302430 100644
--- a/drivers/acpi/acpica/nsxfname.c
+++ b/drivers/acpi/acpica/nsxfname.c
@@ -42,6 +42,7 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
+#include <linux/export.h>
#include <acpi/acpi.h>
#include "accommon.h"
#include "acnamesp.h"
diff --git a/drivers/acpi/acpica/nsxfobj.c b/drivers/acpi/acpica/nsxfobj.c
index db7660f8b86..57e6d825ed8 100644
--- a/drivers/acpi/acpica/nsxfobj.c
+++ b/drivers/acpi/acpica/nsxfobj.c
@@ -42,6 +42,7 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
+#include <linux/export.h>
#include <acpi/acpi.h>
#include "accommon.h"
#include "acnamesp.h"
diff --git a/drivers/acpi/acpica/rsxface.c b/drivers/acpi/acpica/rsxface.c
index 2ff657a28f2..fe86b37b16c 100644
--- a/drivers/acpi/acpica/rsxface.c
+++ b/drivers/acpi/acpica/rsxface.c
@@ -41,6 +41,7 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
+#include <linux/export.h>
#include <acpi/acpi.h>
#include "accommon.h"
#include "acresrc.h"
diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c
index 4b7085dfc68..e7d13f5d3f2 100644
--- a/drivers/acpi/acpica/tbxface.c
+++ b/drivers/acpi/acpica/tbxface.c
@@ -42,6 +42,7 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
+#include <linux/export.h>
#include <acpi/acpi.h>
#include "accommon.h"
#include "acnamesp.h"
diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c
index a9bcd816dc2..a1f8d7509e6 100644
--- a/drivers/acpi/acpica/utdebug.c
+++ b/drivers/acpi/acpica/utdebug.c
@@ -41,6 +41,7 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
+#include <linux/export.h>
#include <acpi/acpi.h>
#include "accommon.h"
diff --git a/drivers/acpi/acpica/utdecode.c b/drivers/acpi/acpica/utdecode.c
index 97cb36f85ce..8b087e2d64f 100644
--- a/drivers/acpi/acpica/utdecode.c
+++ b/drivers/acpi/acpica/utdecode.c
@@ -41,6 +41,7 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
+#include <linux/export.h>
#include <acpi/acpi.h>
#include "accommon.h"
#include "acnamesp.h"
diff --git a/drivers/acpi/acpica/utglobal.c b/drivers/acpi/acpica/utglobal.c
index 833a38a9c90..ffba0a39c3e 100644
--- a/drivers/acpi/acpica/utglobal.c
+++ b/drivers/acpi/acpica/utglobal.c
@@ -43,6 +43,7 @@
#define DEFINE_ACPI_GLOBALS
+#include <linux/export.h>
#include <acpi/acpi.h>
#include "accommon.h"
diff --git a/drivers/acpi/acpica/utxface.c b/drivers/acpi/acpica/utxface.c
index 98ad125e14f..420ebfe08c7 100644
--- a/drivers/acpi/acpica/utxface.c
+++ b/drivers/acpi/acpica/utxface.c
@@ -41,6 +41,7 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
+#include <linux/export.h>
#include <acpi/acpi.h>
#include "accommon.h"
#include "acevents.h"
diff --git a/drivers/acpi/acpica/utxferror.c b/drivers/acpi/acpica/utxferror.c
index 916ae097c43..8d0245ec431 100644
--- a/drivers/acpi/acpica/utxferror.c
+++ b/drivers/acpi/acpica/utxferror.c
@@ -41,6 +41,7 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
+#include <linux/export.h>
#include <acpi/acpi.h>
#include "accommon.h"
#include "acnamesp.h"
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
index 2ca59dc69f7..127408069ca 100644
--- a/drivers/acpi/apei/erst.c
+++ b/drivers/acpi/apei/erst.c
@@ -933,7 +933,7 @@ static int erst_open_pstore(struct pstore_info *psi);
static int erst_close_pstore(struct pstore_info *psi);
static ssize_t erst_reader(u64 *id, enum pstore_type_id *type,
struct timespec *time, struct pstore_info *psi);
-static u64 erst_writer(enum pstore_type_id type, unsigned int part,
+static int erst_writer(enum pstore_type_id type, u64 *id, unsigned int part,
size_t size, struct pstore_info *psi);
static int erst_clearer(enum pstore_type_id type, u64 id,
struct pstore_info *psi);
@@ -1040,11 +1040,12 @@ out:
return (rc < 0) ? rc : (len - sizeof(*rcd));
}
-static u64 erst_writer(enum pstore_type_id type, unsigned int part,
+static int erst_writer(enum pstore_type_id type, u64 *id, unsigned int part,
size_t size, struct pstore_info *psi)
{
struct cper_pstore_record *rcd = (struct cper_pstore_record *)
(erst_info.buf - sizeof(*rcd));
+ int ret;
memset(rcd, 0, sizeof(*rcd));
memcpy(rcd->hdr.signature, CPER_SIG_RECORD, CPER_SIG_SIZE);
@@ -1079,9 +1080,10 @@ static u64 erst_writer(enum pstore_type_id type, unsigned int part,
}
rcd->sec_hdr.section_severity = CPER_SEV_FATAL;
- erst_write(&rcd->hdr);
+ ret = erst_write(&rcd->hdr);
+ *id = rcd->hdr.record_id;
- return rcd->hdr.record_id;
+ return ret;
}
static int erst_clearer(enum pstore_type_id type, u64 id,
@@ -1165,7 +1167,7 @@ static int __init erst_init(void)
goto err_release_erange;
buf = kmalloc(erst_erange.size, GFP_KERNEL);
- mutex_init(&erst_info.buf_mutex);
+ spin_lock_init(&erst_info.buf_lock);
if (buf) {
erst_info.buf = buf + sizeof(struct cper_pstore_record);
erst_info.bufsize = erst_erange.size -
diff --git a/drivers/acpi/atomicio.c b/drivers/acpi/atomicio.c
index 7489b89c300..cfc0cc10af3 100644
--- a/drivers/acpi/atomicio.c
+++ b/drivers/acpi/atomicio.c
@@ -24,7 +24,7 @@
*/
#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/init.h>
#include <linux/acpi.h>
#include <linux/io.h>
@@ -76,7 +76,7 @@ static void __iomem *__acpi_ioremap_fast(phys_addr_t paddr,
{
struct acpi_iomap *map;
- map = __acpi_find_iomap(paddr, size);
+ map = __acpi_find_iomap(paddr, size/8);
if (map)
return map->vaddr + (paddr - map->paddr);
else
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
index af308d03f49..cb9629638de 100644
--- a/drivers/acpi/blacklist.c
+++ b/drivers/acpi/blacklist.c
@@ -28,7 +28,6 @@
*/
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/init.h>
#include <linux/acpi.h>
#include <acpi/acpi_bus.h>
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 437ddbf0c49..9ecec98bc76 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -911,10 +911,7 @@ void __init acpi_early_init(void)
}
#endif
- status =
- acpi_enable_subsystem(~
- (ACPI_NO_HARDWARE_INIT |
- ACPI_NO_ACPI_ENABLE));
+ status = acpi_enable_subsystem(~ACPI_NO_ACPI_ENABLE);
if (ACPI_FAILURE(status)) {
printk(KERN_ERR PREFIX "Unable to enable ACPI\n");
goto error0;
@@ -935,8 +932,7 @@ static int __init acpi_bus_init(void)
acpi_os_initialize1();
- status =
- acpi_enable_subsystem(ACPI_NO_HARDWARE_INIT | ACPI_NO_ACPI_ENABLE);
+ status = acpi_enable_subsystem(ACPI_NO_ACPI_ENABLE);
if (ACPI_FAILURE(status)) {
printk(KERN_ERR PREFIX
"Unable to start the ACPI Interpreter\n");
diff --git a/drivers/acpi/debugfs.c b/drivers/acpi/debugfs.c
index 182a9fc3635..b55d6a20dc0 100644
--- a/drivers/acpi/debugfs.c
+++ b/drivers/acpi/debugfs.c
@@ -2,6 +2,7 @@
* debugfs.c - ACPI debugfs interface to userspace.
*/
+#include <linux/export.h>
#include <linux/init.h>
#include <linux/debugfs.h>
#include <acpi/acpi_drivers.h>
diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
index 22f918bacd3..6c47ae9793a 100644
--- a/drivers/acpi/ec_sys.c
+++ b/drivers/acpi/ec_sys.c
@@ -11,6 +11,7 @@
#include <linux/kernel.h>
#include <linux/acpi.h>
#include <linux/debugfs.h>
+#include <linux/module.h>
#include "internal.h"
MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
diff --git a/drivers/acpi/event.c b/drivers/acpi/event.c
index 85d90899380..1442737cede 100644
--- a/drivers/acpi/event.c
+++ b/drivers/acpi/event.c
@@ -7,6 +7,7 @@
*/
#include <linux/spinlock.h>
+#include <linux/export.h>
#include <linux/proc_fs.h>
#include <linux/init.h>
#include <linux/poll.h>
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
index 7c47ed55e52..29a4a5c8ee0 100644
--- a/drivers/acpi/glue.c
+++ b/drivers/acpi/glue.c
@@ -6,6 +6,7 @@
*
* This file is released under the GPLv2.
*/
+#include <linux/export.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/device.h>
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index fa32f584229..f31c5c5f1b7 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -80,7 +80,8 @@ static acpi_osd_handler acpi_irq_handler;
static void *acpi_irq_context;
static struct workqueue_struct *kacpid_wq;
static struct workqueue_struct *kacpi_notify_wq;
-static struct workqueue_struct *kacpi_hotplug_wq;
+struct workqueue_struct *kacpi_hotplug_wq;
+EXPORT_SYMBOL(kacpi_hotplug_wq);
struct acpi_res_list {
resource_size_t start;
diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
index f5f986991b5..251c7b6273a 100644
--- a/drivers/acpi/proc.c
+++ b/drivers/acpi/proc.c
@@ -1,5 +1,6 @@
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
+#include <linux/export.h>
#include <linux/suspend.h>
#include <linux/bcd.h>
#include <asm/uaccess.h>
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index 02d2a4c9084..3a0428e8435 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -7,6 +7,7 @@
* Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
* - Added _PDC for platforms with Intel CPUs
*/
+#include <linux/export.h>
#include <linux/dmi.h>
#include <linux/slab.h>
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index a4e0f1ba604..9d7bc9f6b6c 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -426,7 +426,7 @@ static int acpi_cpu_soft_notify(struct notifier_block *nfb,
if (action == CPU_ONLINE && pr) {
acpi_processor_ppc_has_changed(pr, 0);
- acpi_processor_cst_has_changed(pr);
+ acpi_processor_hotplug(pr);
acpi_processor_reevaluate_tstate(pr, action);
acpi_processor_tstate_has_changed(pr);
}
@@ -503,8 +503,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
acpi_processor_get_throttling_info(pr);
acpi_processor_get_limit_info(pr);
-
- if (cpuidle_get_driver() == &acpi_idle_driver)
+ if (!cpuidle_get_driver() || cpuidle_get_driver() == &acpi_idle_driver)
acpi_processor_power_init(pr, device);
pr->cdev = thermal_cooling_device_register("Processor", device,
@@ -800,17 +799,9 @@ static int __init acpi_processor_init(void)
memset(&errata, 0, sizeof(errata));
- if (!cpuidle_register_driver(&acpi_idle_driver)) {
- printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n",
- acpi_idle_driver.name);
- } else {
- printk(KERN_DEBUG "ACPI: acpi_idle yielding to %s\n",
- cpuidle_get_driver()->name);
- }
-
result = acpi_bus_register_driver(&acpi_processor_driver);
if (result < 0)
- goto out_cpuidle;
+ return result;
acpi_processor_install_hotplug_notify();
@@ -821,11 +812,6 @@ static int __init acpi_processor_init(void)
acpi_processor_throttling_init();
return 0;
-
-out_cpuidle:
- cpuidle_unregister_driver(&acpi_idle_driver);
-
- return result;
}
static void __exit acpi_processor_exit(void)
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 9b88f9828d8..0e8e2de2ed3 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -224,7 +224,6 @@ static void lapic_timer_state_broadcast(struct acpi_processor *pr,
/*
* Suspend / resume control
*/
-static int acpi_idle_suspend;
static u32 saved_bm_rld;
static void acpi_idle_bm_rld_save(void)
@@ -243,21 +242,13 @@ static void acpi_idle_bm_rld_restore(void)
int acpi_processor_suspend(struct acpi_device * device, pm_message_t state)
{
- if (acpi_idle_suspend == 1)
- return 0;
-
acpi_idle_bm_rld_save();
- acpi_idle_suspend = 1;
return 0;
}
int acpi_processor_resume(struct acpi_device * device)
{
- if (acpi_idle_suspend == 0)
- return 0;
-
acpi_idle_bm_rld_restore();
- acpi_idle_suspend = 0;
return 0;
}
@@ -741,66 +732,65 @@ static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx)
/**
* acpi_idle_enter_c1 - enters an ACPI C1 state-type
* @dev: the target CPU
- * @state: the state data
+ * @drv: cpuidle driver containing cpuidle state info
+ * @index: index of target state
*
* This is equivalent to the HALT instruction.
*/
static int acpi_idle_enter_c1(struct cpuidle_device *dev,
- struct cpuidle_state *state)
+ struct cpuidle_driver *drv, int index)
{
ktime_t kt1, kt2;
s64 idle_time;
struct acpi_processor *pr;
- struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
+ struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
+ struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage);
pr = __this_cpu_read(processors);
+ dev->last_residency = 0;
if (unlikely(!pr))
- return 0;
+ return -EINVAL;
local_irq_disable();
- /* Do not access any ACPI IO ports in suspend path */
- if (acpi_idle_suspend) {
- local_irq_enable();
- cpu_relax();
- return 0;
- }
-
lapic_timer_state_broadcast(pr, cx, 1);
kt1 = ktime_get_real();
acpi_idle_do_entry(cx);
kt2 = ktime_get_real();
idle_time = ktime_to_us(ktime_sub(kt2, kt1));
+ /* Update device last_residency*/
+ dev->last_residency = (int)idle_time;
+
local_irq_enable();
cx->usage++;
lapic_timer_state_broadcast(pr, cx, 0);
- return idle_time;
+ return index;
}
/**
* acpi_idle_enter_simple - enters an ACPI state without BM handling
* @dev: the target CPU
- * @state: the state data
+ * @drv: cpuidle driver with cpuidle state information
+ * @index: the index of suggested state
*/
static int acpi_idle_enter_simple(struct cpuidle_device *dev,
- struct cpuidle_state *state)
+ struct cpuidle_driver *drv, int index)
{
struct acpi_processor *pr;
- struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
+ struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
+ struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage);
ktime_t kt1, kt2;
s64 idle_time_ns;
s64 idle_time;
pr = __this_cpu_read(processors);
+ dev->last_residency = 0;
if (unlikely(!pr))
- return 0;
-
- if (acpi_idle_suspend)
- return(acpi_idle_enter_c1(dev, state));
+ return -EINVAL;
local_irq_disable();
@@ -815,7 +805,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
if (unlikely(need_resched())) {
current_thread_info()->status |= TS_POLLING;
local_irq_enable();
- return 0;
+ return -EINVAL;
}
}
@@ -837,6 +827,9 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
idle_time = idle_time_ns;
do_div(idle_time, NSEC_PER_USEC);
+ /* Update device last_residency*/
+ dev->last_residency = (int)idle_time;
+
/* Tell the scheduler how much we idled: */
sched_clock_idle_wakeup_event(idle_time_ns);
@@ -848,7 +841,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
lapic_timer_state_broadcast(pr, cx, 0);
cx->time += idle_time;
- return idle_time;
+ return index;
}
static int c3_cpu_count;
@@ -857,37 +850,37 @@ static DEFINE_RAW_SPINLOCK(c3_lock);
/**
* acpi_idle_enter_bm - enters C3 with proper BM handling
* @dev: the target CPU
- * @state: the state data
+ * @drv: cpuidle driver containing state data
+ * @index: the index of suggested state
*
* If BM is detected, the deepest non-C3 idle state is entered instead.
*/
static int acpi_idle_enter_bm(struct cpuidle_device *dev,
- struct cpuidle_state *state)
+ struct cpuidle_driver *drv, int index)
{
struct acpi_processor *pr;
- struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
+ struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
+ struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage);
ktime_t kt1, kt2;
s64 idle_time_ns;
s64 idle_time;
pr = __this_cpu_read(processors);
+ dev->last_residency = 0;
if (unlikely(!pr))
- return 0;
-
- if (acpi_idle_suspend)
- return(acpi_idle_enter_c1(dev, state));
+ return -EINVAL;
if (!cx->bm_sts_skip && acpi_idle_bm_check()) {
- if (dev->safe_state) {
- dev->last_state = dev->safe_state;
- return dev->safe_state->enter(dev, dev->safe_state);
+ if (drv->safe_state_index >= 0) {
+ return drv->states[drv->safe_state_index].enter(dev,
+ drv, drv->safe_state_index);
} else {
local_irq_disable();
acpi_safe_halt();
local_irq_enable();
- return 0;
+ return -EINVAL;
}
}
@@ -904,7 +897,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
if (unlikely(need_resched())) {
current_thread_info()->status |= TS_POLLING;
local_irq_enable();
- return 0;
+ return -EINVAL;
}
}
@@ -954,6 +947,9 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
idle_time = idle_time_ns;
do_div(idle_time, NSEC_PER_USEC);
+ /* Update device last_residency*/
+ dev->last_residency = (int)idle_time;
+
/* Tell the scheduler how much we idled: */
sched_clock_idle_wakeup_event(idle_time_ns);
@@ -965,7 +961,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
lapic_timer_state_broadcast(pr, cx, 0);
cx->time += idle_time;
- return idle_time;
+ return index;
}
struct cpuidle_driver acpi_idle_driver = {
@@ -974,14 +970,16 @@ struct cpuidle_driver acpi_idle_driver = {
};
/**
- * acpi_processor_setup_cpuidle - prepares and configures CPUIDLE
+ * acpi_processor_setup_cpuidle_cx - prepares and configures CPUIDLE
+ * device i.e. per-cpu data
+ *
* @pr: the ACPI processor
*/
-static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
+static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr)
{
int i, count = CPUIDLE_DRIVER_STATE_START;
struct acpi_processor_cx *cx;
- struct cpuidle_state *state;
+ struct cpuidle_state_usage *state_usage;
struct cpuidle_device *dev = &pr->power.dev;
if (!pr->flags.power_setup_done)
@@ -992,9 +990,62 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
}
dev->cpu = pr->id;
+
+ if (max_cstate == 0)
+ max_cstate = 1;
+
+ for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
+ cx = &pr->power.states[i];
+ state_usage = &dev->states_usage[count];
+
+ if (!cx->valid)
+ continue;
+
+#ifdef CONFIG_HOTPLUG_CPU
+ if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
+ !pr->flags.has_cst &&
+ !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
+ continue;
+#endif
+
+ cpuidle_set_statedata(state_usage, cx);
+
+ count++;
+ if (count == CPUIDLE_STATE_MAX)
+ break;
+ }
+
+ dev->state_count = count;
+
+ if (!count)
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * acpi_processor_setup_cpuidle states- prepares and configures cpuidle
+ * global state data i.e. idle routines
+ *
+ * @pr: the ACPI processor
+ */
+static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
+{
+ int i, count = CPUIDLE_DRIVER_STATE_START;
+ struct acpi_processor_cx *cx;
+ struct cpuidle_state *state;
+ struct cpuidle_driver *drv = &acpi_idle_driver;
+
+ if (!pr->flags.power_setup_done)
+ return -EINVAL;
+
+ if (pr->flags.power == 0)
+ return -EINVAL;
+
+ drv->safe_state_index = -1;
for (i = 0; i < CPUIDLE_STATE_MAX; i++) {
- dev->states[i].name[0] = '\0';
- dev->states[i].desc[0] = '\0';
+ drv->states[i].name[0] = '\0';
+ drv->states[i].desc[0] = '\0';
}
if (max_cstate == 0)
@@ -1002,7 +1053,6 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
cx = &pr->power.states[i];
- state = &dev->states[count];
if (!cx->valid)
continue;
@@ -1013,8 +1063,8 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
!(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
continue;
#endif
- cpuidle_set_statedata(state, cx);
+ state = &drv->states[count];
snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i);
strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
state->exit_latency = cx->latency;
@@ -1027,13 +1077,13 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
state->flags |= CPUIDLE_FLAG_TIME_VALID;
state->enter = acpi_idle_enter_c1;
- dev->safe_state = state;
+ drv->safe_state_index = count;
break;
case ACPI_STATE_C2:
state->flags |= CPUIDLE_FLAG_TIME_VALID;
state->enter = acpi_idle_enter_simple;
- dev->safe_state = state;
+ drv->safe_state_index = count;
break;
case ACPI_STATE_C3:
@@ -1049,7 +1099,7 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
break;
}
- dev->state_count = count;
+ drv->state_count = count;
if (!count)
return -EINVAL;
@@ -1057,7 +1107,7 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
return 0;
}
-int acpi_processor_cst_has_changed(struct acpi_processor *pr)
+int acpi_processor_hotplug(struct acpi_processor *pr)
{
int ret = 0;
@@ -1078,7 +1128,7 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr)
cpuidle_disable_device(&pr->power.dev);
acpi_processor_get_power_info(pr);
if (pr->flags.power) {
- acpi_processor_setup_cpuidle(pr);
+ acpi_processor_setup_cpuidle_cx(pr);
ret = cpuidle_enable_device(&pr->power.dev);
}
cpuidle_resume_and_unlock();
@@ -1086,10 +1136,72 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr)
return ret;
}
+int acpi_processor_cst_has_changed(struct acpi_processor *pr)
+{
+ int cpu;
+ struct acpi_processor *_pr;
+
+ if (disabled_by_idle_boot_param())
+ return 0;
+
+ if (!pr)
+ return -EINVAL;
+
+ if (nocst)
+ return -ENODEV;
+
+ if (!pr->flags.power_setup_done)
+ return -ENODEV;
+
+ /*
+ * FIXME: Design the ACPI notification to make it once per
+ * system instead of once per-cpu. This condition is a hack
+ * to make the code that updates C-States be called once.
+ */
+
+ if (smp_processor_id() == 0 &&
+ cpuidle_get_driver() == &acpi_idle_driver) {
+
+ cpuidle_pause_and_lock();
+ /* Protect against cpu-hotplug */
+ get_online_cpus();
+
+ /* Disable all cpuidle devices */
+ for_each_online_cpu(cpu) {
+ _pr = per_cpu(processors, cpu);
+ if (!_pr || !_pr->flags.power_setup_done)
+ continue;
+ cpuidle_disable_device(&_pr->power.dev);
+ }
+
+ /* Populate Updated C-state information */
+ acpi_processor_setup_cpuidle_states(pr);
+
+ /* Enable all cpuidle devices */
+ for_each_online_cpu(cpu) {
+ _pr = per_cpu(processors, cpu);
+ if (!_pr || !_pr->flags.power_setup_done)
+ continue;
+ acpi_processor_get_power_info(_pr);
+ if (_pr->flags.power) {
+ acpi_processor_setup_cpuidle_cx(_pr);
+ cpuidle_enable_device(&_pr->power.dev);
+ }
+ }
+ put_online_cpus();
+ cpuidle_resume_and_unlock();
+ }
+
+ return 0;
+}
+
+static int acpi_processor_registered;
+
int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
struct acpi_device *device)
{
acpi_status status = 0;
+ int retval;
static int first_run;
if (disabled_by_idle_boot_param())
@@ -1126,9 +1238,26 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
* platforms that only support C1.
*/
if (pr->flags.power) {
- acpi_processor_setup_cpuidle(pr);
- if (cpuidle_register_device(&pr->power.dev))
- return -EIO;
+ /* Register acpi_idle_driver if not already registered */
+ if (!acpi_processor_registered) {
+ acpi_processor_setup_cpuidle_states(pr);
+ retval = cpuidle_register_driver(&acpi_idle_driver);
+ if (retval)
+ return retval;
+ printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n",
+ acpi_idle_driver.name);
+ }
+ /* Register per-cpu cpuidle_device. Cpuidle driver
+ * must already be registered before registering device
+ */
+ acpi_processor_setup_cpuidle_cx(pr);
+ retval = cpuidle_register_device(&pr->power.dev);
+ if (retval) {
+ if (acpi_processor_registered == 0)
+ cpuidle_unregister_driver(&acpi_idle_driver);
+ return retval;
+ }
+ acpi_processor_registered++;
}
return 0;
}
@@ -1139,8 +1268,13 @@ int acpi_processor_power_exit(struct acpi_processor *pr,
if (disabled_by_idle_boot_param())
return 0;
- cpuidle_unregister_device(&pr->power.dev);
- pr->flags.power_setup_done = 0;
+ if (pr->flags.power) {
+ cpuidle_unregister_device(&pr->power.dev);
+ acpi_processor_registered--;
+ if (acpi_processor_registered == 0)
+ cpuidle_unregister_driver(&acpi_idle_driver);
+ }
+ pr->flags.power_setup_done = 0;
return 0;
}
diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c
index f8be23b6c12..f8d2a472795 100644
--- a/drivers/acpi/sbshc.c
+++ b/drivers/acpi/sbshc.c
@@ -13,6 +13,7 @@
#include <linux/wait.h>
#include <linux/slab.h>
#include <linux/delay.h>
+#include <linux/module.h>
#include <linux/interrupt.h>
#include "sbshc.h"
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 449c556274c..8ab80bafe3f 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1062,13 +1062,12 @@ static void acpi_add_id(struct acpi_device *device, const char *dev_id)
if (!id)
return;
- id->id = kmalloc(strlen(dev_id) + 1, GFP_KERNEL);
+ id->id = kstrdup(dev_id, GFP_KERNEL);
if (!id->id) {
kfree(id);
return;
}
- strcpy(id->id, dev_id);
list_add_tail(&id->list, &device->pnp.ids);
}
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 0e46faef1d3..6d9a3ab58db 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -398,6 +398,14 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
},
{
.callback = init_nvs_nosave,
+ .ident = "Sony Vaio VPCEB17FX",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB17FX"),
+ },
+ },
+ {
+ .callback = init_nvs_nosave,
.ident = "Sony Vaio VGN-SR11M",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index c538d0ef10f..9f66181c814 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -706,11 +706,23 @@ static void __exit interrupt_stats_exit(void)
return;
}
+static ssize_t
+acpi_show_profile(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%d\n", acpi_gbl_FADT.preferred_profile);
+}
+
+static const struct device_attribute pm_profile_attr =
+ __ATTR(pm_profile, S_IRUGO, acpi_show_profile, NULL);
+
int __init acpi_sysfs_init(void)
{
int result;
result = acpi_tables_sysfs_init();
-
+ if (result)
+ return result;
+ result = sysfs_create_file(acpi_kobj, &pm_profile_attr.attr);
return result;
}
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index 5af3479714f..f3f0fe7e255 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -33,6 +33,7 @@
*
*/
+#include <linux/export.h>
#include <linux/acpi.h>
#include <linux/dmi.h>
#include <linux/pci.h>
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index d74926e0939..bd230e80113 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -365,6 +365,40 @@ static int amba_pm_restore_noirq(struct device *dev)
#endif /* !CONFIG_HIBERNATE_CALLBACKS */
+#ifdef CONFIG_PM_RUNTIME
+/*
+ * Hooks to provide runtime PM of the pclk (bus clock). It is safe to
+ * enable/disable the bus clock at runtime PM suspend/resume as this
+ * does not result in loss of context. However, disabling vcore power
+ * would do, so we leave that to the driver.
+ */
+static int amba_pm_runtime_suspend(struct device *dev)
+{
+ struct amba_device *pcdev = to_amba_device(dev);
+ int ret = pm_generic_runtime_suspend(dev);
+
+ if (ret == 0 && dev->driver)
+ clk_disable(pcdev->pclk);
+
+ return ret;
+}
+
+static int amba_pm_runtime_resume(struct device *dev)
+{
+ struct amba_device *pcdev = to_amba_device(dev);
+ int ret;
+
+ if (dev->driver) {
+ ret = clk_enable(pcdev->pclk);
+ /* Failure is probably fatal to the system, but... */
+ if (ret)
+ return ret;
+ }
+
+ return pm_generic_runtime_resume(dev);
+}
+#endif
+
#ifdef CONFIG_PM
static const struct dev_pm_ops amba_pm = {
@@ -383,8 +417,8 @@ static const struct dev_pm_ops amba_pm = {
.poweroff_noirq = amba_pm_poweroff_noirq,
.restore_noirq = amba_pm_restore_noirq,
SET_RUNTIME_PM_OPS(
- pm_generic_runtime_suspend,
- pm_generic_runtime_resume,
+ amba_pm_runtime_suspend,
+ amba_pm_runtime_resume,
pm_generic_runtime_idle
)
};
@@ -426,9 +460,17 @@ static int amba_get_enable_pclk(struct amba_device *pcdev)
if (IS_ERR(pclk))
return PTR_ERR(pclk);
+ ret = clk_prepare(pclk);
+ if (ret) {
+ clk_put(pclk);
+ return ret;
+ }
+
ret = clk_enable(pclk);
- if (ret)
+ if (ret) {
+ clk_unprepare(pclk);
clk_put(pclk);
+ }
return ret;
}
@@ -438,6 +480,7 @@ static void amba_put_disable_pclk(struct amba_device *pcdev)
struct clk *pclk = pcdev->pclk;
clk_disable(pclk);
+ clk_unprepare(pclk);
clk_put(pclk);
}
@@ -494,10 +537,18 @@ static int amba_probe(struct device *dev)
if (ret)
break;
+ pm_runtime_get_noresume(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
ret = pcdrv->probe(pcdev, id);
if (ret == 0)
break;
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+ pm_runtime_put_noidle(dev);
+
amba_put_disable_pclk(pcdev);
amba_put_disable_vcore(pcdev);
} while (0);
@@ -509,7 +560,16 @@ static int amba_remove(struct device *dev)
{
struct amba_device *pcdev = to_amba_device(dev);
struct amba_driver *drv = to_amba_driver(dev->driver);
- int ret = drv->remove(pcdev);
+ int ret;
+
+ pm_runtime_get_sync(dev);
+ ret = drv->remove(pcdev);
+ pm_runtime_put_noidle(dev);
+
+ /* Undo the runtime PM settings in amba_probe() */
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+ pm_runtime_put_noidle(dev);
amba_put_disable_pclk(pcdev);
amba_put_disable_vcore(pcdev);
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 5987e0ba8c2..6bdedd7cca2 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -820,7 +820,7 @@ config PATA_PLATFORM
config PATA_OF_PLATFORM
tristate "OpenFirmware platform device PATA support"
- depends on PATA_PLATFORM && PPC_OF
+ depends on PATA_PLATFORM && OF
help
This option enables support for generic directly connected ATA
devices commonly found on embedded systems with OpenFirmware
@@ -831,6 +831,7 @@ config PATA_OF_PLATFORM
config PATA_QDI
tristate "QDI VLB PATA support"
depends on ISA
+ select PATA_LEGACY
help
Support for QDI 6500 and 6580 PATA controllers on VESA local bus.
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index 9550d691fd1..6ece5b7231a 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -88,7 +88,6 @@ obj-$(CONFIG_PATA_PCMCIA) += pata_pcmcia.o
obj-$(CONFIG_PATA_PALMLD) += pata_palmld.o
obj-$(CONFIG_PATA_PLATFORM) += pata_platform.o
obj-$(CONFIG_PATA_OF_PLATFORM) += pata_of_platform.o
-obj-$(CONFIG_PATA_QDI) += pata_qdi.o
obj-$(CONFIG_PATA_RB532) += pata_rb532_cf.o
obj-$(CONFIG_PATA_RZ1000) += pata_rz1000.o
obj-$(CONFIG_PATA_SAMSUNG_CF) += pata_samsung_cf.o
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index fb7b90b0592..cf26222a93c 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -390,6 +390,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
/* Promise */
{ PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */
+ /* Asmedia */
+ { PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci }, /* ASM1061 */
+
/* Generic, PCI class code for AHCI */
{ PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci },
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
index 6fef1fa75c5..ec555951176 100644
--- a/drivers/ata/ahci_platform.c
+++ b/drivers/ata/ahci_platform.c
@@ -23,6 +23,41 @@
#include <linux/ahci_platform.h>
#include "ahci.h"
+enum ahci_type {
+ AHCI, /* standard platform ahci */
+ IMX53_AHCI, /* ahci on i.mx53 */
+};
+
+static struct platform_device_id ahci_devtype[] = {
+ {
+ .name = "ahci",
+ .driver_data = AHCI,
+ }, {
+ .name = "imx53-ahci",
+ .driver_data = IMX53_AHCI,
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(platform, ahci_devtype);
+
+
+static const struct ata_port_info ahci_port_info[] = {
+ /* by features */
+ [AHCI] = {
+ .flags = AHCI_FLAG_COMMON,
+ .pio_mask = ATA_PIO4,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &ahci_ops,
+ },
+ [IMX53_AHCI] = {
+ .flags = AHCI_FLAG_COMMON,
+ .pio_mask = ATA_PIO4,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &ahci_pmp_retry_srst_ops,
+ },
+};
+
static struct scsi_host_template ahci_platform_sht = {
AHCI_SHT("ahci_platform"),
};
@@ -30,13 +65,9 @@ static struct scsi_host_template ahci_platform_sht = {
static int __init ahci_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct ahci_platform_data *pdata = dev->platform_data;
- struct ata_port_info pi = {
- .flags = AHCI_FLAG_COMMON,
- .pio_mask = ATA_PIO4,
- .udma_mask = ATA_UDMA6,
- .port_ops = &ahci_ops,
- };
+ struct ahci_platform_data *pdata = dev_get_platdata(dev);
+ const struct platform_device_id *id = platform_get_device_id(pdev);
+ struct ata_port_info pi = ahci_port_info[id->driver_data];
const struct ata_port_info *ppi[] = { &pi, NULL };
struct ahci_host_priv *hpriv;
struct ata_host *host;
@@ -160,7 +191,7 @@ err0:
static int __devexit ahci_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct ahci_platform_data *pdata = dev->platform_data;
+ struct ahci_platform_data *pdata = dev_get_platdata(dev);
struct ata_host *host = dev_get_drvdata(dev);
ata_host_detach(host);
@@ -171,12 +202,20 @@ static int __devexit ahci_remove(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id ahci_of_match[] = {
+ { .compatible = "calxeda,hb-ahci", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ahci_of_match);
+
static struct platform_driver ahci_driver = {
.remove = __devexit_p(ahci_remove),
.driver = {
.name = "ahci",
.owner = THIS_MODULE,
+ .of_match_table = ahci_of_match,
},
+ .id_table = ahci_devtype,
};
static int __init ahci_init(void)
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index 43107e9415d..69ac373c72a 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -113,6 +113,8 @@ enum {
PIIX_PATA_FLAGS = ATA_FLAG_SLAVE_POSS,
PIIX_SATA_FLAGS = ATA_FLAG_SATA | PIIX_FLAG_CHECKINTR,
+ PIIX_FLAG_PIO16 = (1 << 30), /*support 16bit PIO only*/
+
PIIX_80C_PRI = (1 << 5) | (1 << 4),
PIIX_80C_SEC = (1 << 7) | (1 << 6),
@@ -147,6 +149,7 @@ enum piix_controller_ids {
ich8m_apple_sata, /* locks up on second port enable */
tolapai_sata,
piix_pata_vmw, /* PIIX4 for VMware, spurious DMA_ERR */
+ ich8_sata_snb,
};
struct piix_map_db {
@@ -177,6 +180,7 @@ static int piix_sidpr_scr_write(struct ata_link *link,
static int piix_sidpr_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
unsigned hints);
static bool piix_irq_check(struct ata_port *ap);
+static int piix_port_start(struct ata_port *ap);
#ifdef CONFIG_PM
static int piix_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
static int piix_pci_device_resume(struct pci_dev *pdev);
@@ -298,21 +302,21 @@ static const struct pci_device_id piix_pci_tbl[] = {
/* SATA Controller IDE (PCH) */
{ 0x8086, 0x3b2e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
/* SATA Controller IDE (CPT) */
- { 0x8086, 0x1c00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
+ { 0x8086, 0x1c00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
/* SATA Controller IDE (CPT) */
- { 0x8086, 0x1c01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
+ { 0x8086, 0x1c01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
/* SATA Controller IDE (CPT) */
{ 0x8086, 0x1c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
/* SATA Controller IDE (CPT) */
{ 0x8086, 0x1c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
/* SATA Controller IDE (PBG) */
- { 0x8086, 0x1d00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
+ { 0x8086, 0x1d00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
/* SATA Controller IDE (PBG) */
{ 0x8086, 0x1d08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
/* SATA Controller IDE (Panther Point) */
- { 0x8086, 0x1e00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
+ { 0x8086, 0x1e00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
/* SATA Controller IDE (Panther Point) */
- { 0x8086, 0x1e01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
+ { 0x8086, 0x1e01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
/* SATA Controller IDE (Panther Point) */
{ 0x8086, 0x1e08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
/* SATA Controller IDE (Panther Point) */
@@ -338,6 +342,7 @@ static struct scsi_host_template piix_sht = {
static struct ata_port_operations piix_sata_ops = {
.inherits = &ata_bmdma32_port_ops,
.sff_irq_check = piix_irq_check,
+ .port_start = piix_port_start,
};
static struct ata_port_operations piix_pata_ops = {
@@ -478,6 +483,7 @@ static const struct piix_map_db *piix_map_db_table[] = {
[ich8_2port_sata] = &ich8_2port_map_db,
[ich8m_apple_sata] = &ich8m_apple_map_db,
[tolapai_sata] = &tolapai_map_db,
+ [ich8_sata_snb] = &ich8_map_db,
};
static struct ata_port_info piix_port_info[] = {
@@ -606,6 +612,19 @@ static struct ata_port_info piix_port_info[] = {
.port_ops = &piix_vmw_ops,
},
+ /*
+ * some Sandybridge chipsets have broken 32 mode up to now,
+ * see https://bugzilla.kernel.org/show_bug.cgi?id=40592
+ */
+ [ich8_sata_snb] =
+ {
+ .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR | PIIX_FLAG_PIO16,
+ .pio_mask = ATA_PIO4,
+ .mwdma_mask = ATA_MWDMA2,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &piix_sata_ops,
+ },
+
};
static struct pci_bits piix_enable_bits[] = {
@@ -649,6 +668,14 @@ static const struct ich_laptop ich_laptop[] = {
{ 0, }
};
+static int piix_port_start(struct ata_port *ap)
+{
+ if (!(ap->flags & PIIX_FLAG_PIO16))
+ ap->pflags |= ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE;
+
+ return ata_bmdma_port_start(ap);
+}
+
/**
* ich_pata_cable_detect - Probe host controller cable detect info
* @ap: Port for which cable detect info is desired
@@ -704,22 +731,11 @@ static int piix_pata_prereset(struct ata_link *link, unsigned long deadline)
static DEFINE_SPINLOCK(piix_lock);
-/**
- * piix_set_piomode - Initialize host controller PATA PIO timings
- * @ap: Port whose timings we are configuring
- * @adev: um
- *
- * Set PIO mode for device, in host controller PCI config space.
- *
- * LOCKING:
- * None (inherited from caller).
- */
-
-static void piix_set_piomode(struct ata_port *ap, struct ata_device *adev)
+static void piix_set_timings(struct ata_port *ap, struct ata_device *adev,
+ u8 pio)
{
struct pci_dev *dev = to_pci_dev(ap->host->dev);
unsigned long flags;
- unsigned int pio = adev->pio_mode - XFER_PIO_0;
unsigned int is_slave = (adev->devno != 0);
unsigned int master_port= ap->port_no ? 0x42 : 0x40;
unsigned int slave_port = 0x44;
@@ -744,10 +760,16 @@ static void piix_set_piomode(struct ata_port *ap, struct ata_device *adev)
control |= 1; /* TIME1 enable */
if (ata_pio_need_iordy(adev))
control |= 2; /* IE enable */
-
/* Intel specifies that the PPE functionality is for disk only */
if (adev->class == ATA_DEV_ATA)
control |= 4; /* PPE enable */
+ /*
+ * If the drive MWDMA is faster than it can do PIO then
+ * we must force PIO into PIO0
+ */
+ if (adev->pio_mode < XFER_PIO_0 + pio)
+ /* Enable DMA timing only */
+ control |= 8; /* PIO cycles in PIO0 */
spin_lock_irqsave(&piix_lock, flags);
@@ -759,8 +781,6 @@ static void piix_set_piomode(struct ata_port *ap, struct ata_device *adev)
if (is_slave) {
/* clear TIME1|IE1|PPE1|DTE1 */
master_data &= 0xff0f;
- /* Enable SITRE (separate slave timing register) */
- master_data |= 0x4000;
/* enable PPE1, IE1 and TIME1 as needed */
master_data |= (control << 4);
pci_read_config_byte(dev, slave_port, &slave_data);
@@ -778,6 +798,9 @@ static void piix_set_piomode(struct ata_port *ap, struct ata_device *adev)
(timings[pio][0] << 12) |
(timings[pio][1] << 8);
}
+
+ /* Enable SITRE (separate slave timing register) */
+ master_data |= 0x4000;
pci_write_config_word(dev, master_port, master_data);
if (is_slave)
pci_write_config_byte(dev, slave_port, slave_data);
@@ -795,6 +818,22 @@ static void piix_set_piomode(struct ata_port *ap, struct ata_device *adev)
}
/**
+ * piix_set_piomode - Initialize host controller PATA PIO timings
+ * @ap: Port whose timings we are configuring
+ * @adev: Drive in question
+ *
+ * Set PIO mode for device, in host controller PCI config space.
+ *
+ * LOCKING:
+ * None (inherited from caller).
+ */
+
+static void piix_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+ piix_set_timings(ap, adev, adev->pio_mode - XFER_PIO_0);
+}
+
+/**
* do_pata_set_dmamode - Initialize host controller PATA PIO timings
* @ap: Port whose timings we are configuring
* @adev: Drive in question
@@ -810,31 +849,20 @@ static void do_pata_set_dmamode(struct ata_port *ap, struct ata_device *adev, in
{
struct pci_dev *dev = to_pci_dev(ap->host->dev);
unsigned long flags;
- u8 master_port = ap->port_no ? 0x42 : 0x40;
- u16 master_data;
u8 speed = adev->dma_mode;
int devid = adev->devno + 2 * ap->port_no;
u8 udma_enable = 0;
- static const /* ISP RTC */
- u8 timings[][2] = { { 0, 0 },
- { 0, 0 },
- { 1, 0 },
- { 2, 1 },
- { 2, 3 }, };
-
- spin_lock_irqsave(&piix_lock, flags);
-
- pci_read_config_word(dev, master_port, &master_data);
- if (ap->udma_mask)
- pci_read_config_byte(dev, 0x48, &udma_enable);
-
if (speed >= XFER_UDMA_0) {
- unsigned int udma = adev->dma_mode - XFER_UDMA_0;
+ unsigned int udma = speed - XFER_UDMA_0;
u16 udma_timing;
u16 ideconf;
int u_clock, u_speed;
+ spin_lock_irqsave(&piix_lock, flags);
+
+ pci_read_config_byte(dev, 0x48, &udma_enable);
+
/*
* UDMA is handled by a combination of clock switching and
* selection of dividers
@@ -867,56 +895,21 @@ static void do_pata_set_dmamode(struct ata_port *ap, struct ata_device *adev, in
performance (WR_PingPong_En) */
pci_write_config_word(dev, 0x54, ideconf);
}
+
+ pci_write_config_byte(dev, 0x48, udma_enable);
+
+ spin_unlock_irqrestore(&piix_lock, flags);
} else {
- /*
- * MWDMA is driven by the PIO timings. We must also enable
- * IORDY unconditionally along with TIME1. PPE has already
- * been set when the PIO timing was set.
- */
- unsigned int mwdma = adev->dma_mode - XFER_MW_DMA_0;
- unsigned int control;
- u8 slave_data;
+ /* MWDMA is driven by the PIO timings. */
+ unsigned int mwdma = speed - XFER_MW_DMA_0;
const unsigned int needed_pio[3] = {
XFER_PIO_0, XFER_PIO_3, XFER_PIO_4
};
int pio = needed_pio[mwdma] - XFER_PIO_0;
- control = 3; /* IORDY|TIME1 */
-
- /* If the drive MWDMA is faster than it can do PIO then
- we must force PIO into PIO0 */
-
- if (adev->pio_mode < needed_pio[mwdma])
- /* Enable DMA timing only */
- control |= 8; /* PIO cycles in PIO0 */
-
- if (adev->devno) { /* Slave */
- master_data &= 0xFF4F; /* Mask out IORDY|TIME1|DMAONLY */
- master_data |= control << 4;
- pci_read_config_byte(dev, 0x44, &slave_data);
- slave_data &= (ap->port_no ? 0x0f : 0xf0);
- /* Load the matching timing */
- slave_data |= ((timings[pio][0] << 2) | timings[pio][1]) << (ap->port_no ? 4 : 0);
- pci_write_config_byte(dev, 0x44, slave_data);
- } else { /* Master */
- master_data &= 0xCCF4; /* Mask out IORDY|TIME1|DMAONLY
- and master timing bits */
- master_data |= control;
- master_data |=
- (timings[pio][0] << 12) |
- (timings[pio][1] << 8);
- }
-
- if (ap->udma_mask)
- udma_enable &= ~(1 << devid);
-
- pci_write_config_word(dev, master_port, master_data);
+ /* XFER_PIO_0 is never used currently */
+ piix_set_timings(ap, adev, pio);
}
- /* Don't scribble on 0x48 if the controller does not support UDMA */
- if (ap->udma_mask)
- pci_write_config_byte(dev, 0x48, udma_enable);
-
- spin_unlock_irqrestore(&piix_lock, flags);
}
/**
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 4a3a5ae7bb4..c04ad68cb60 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -2938,7 +2938,7 @@ int ata_timing_compute(struct ata_device *adev, unsigned short speed,
if (id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
memset(&p, 0, sizeof(p));
- if (speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
+ if (speed >= XFER_PIO_0 && speed < XFER_SW_DMA_0) {
if (speed <= XFER_PIO_2)
p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO];
else if ((speed <= XFER_PIO_4) ||
@@ -6713,6 +6713,7 @@ EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
+EXPORT_SYMBOL_GPL(__ata_change_queue_depth);
EXPORT_SYMBOL_GPL(sata_scr_valid);
EXPORT_SYMBOL_GPL(sata_scr_read);
EXPORT_SYMBOL_GPL(sata_scr_write);
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index ed16fbedaab..a9b28203800 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -34,6 +34,7 @@
#include <linux/kernel.h>
#include <linux/blkdev.h>
+#include <linux/export.h>
#include <linux/pci.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
@@ -2532,8 +2533,7 @@ static int ata_do_reset(struct ata_link *link, ata_reset_fn_t reset,
return reset(link, classes, deadline);
}
-static int ata_eh_followup_srst_needed(struct ata_link *link,
- int rc, const unsigned int *classes)
+static int ata_eh_followup_srst_needed(struct ata_link *link, int rc)
{
if ((link->flags & ATA_LFLAG_NO_SRST) || ata_link_offline(link))
return 0;
@@ -2726,7 +2726,7 @@ int ata_eh_reset(struct ata_link *link, int classify,
/* perform follow-up SRST if necessary */
if (reset == hardreset &&
- ata_eh_followup_srst_needed(link, rc, classes)) {
+ ata_eh_followup_srst_needed(link, rc)) {
reset = softreset;
if (!reset) {
@@ -2883,7 +2883,7 @@ int ata_eh_reset(struct ata_link *link, int classify,
sata_scr_read(link, SCR_STATUS, &sstatus))
rc = -ERESTART;
- if (rc == -ERESTART || try >= max_tries) {
+ if (try >= max_tries) {
/*
* Thaw host port even if reset failed, so that the port
* can be retried on the next phy event. This risks
@@ -2909,6 +2909,16 @@ int ata_eh_reset(struct ata_link *link, int classify,
ata_eh_acquire(ap);
}
+ /*
+ * While disks spinup behind PMP, some controllers fail sending SRST.
+ * They need to be reset - as well as the PMP - before retrying.
+ */
+ if (rc == -ERESTART) {
+ if (ata_is_host_link(link))
+ ata_eh_thaw_port(ap);
+ goto out;
+ }
+
if (try == max_tries - 1) {
sata_down_spd_limit(link, 0);
if (slave)
diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
index 3eb2b816eb2..21b80c555c6 100644
--- a/drivers/ata/libata-pmp.c
+++ b/drivers/ata/libata-pmp.c
@@ -8,6 +8,7 @@
*/
#include <linux/kernel.h>
+#include <linux/export.h>
#include <linux/libata.h>
#include <linux/slab.h>
#include "libata.h"
@@ -388,12 +389,9 @@ static void sata_pmp_quirks(struct ata_port *ap)
/* link reports offline after LPM */
link->flags |= ATA_LFLAG_NO_LPM;
- /* Class code report is unreliable and SRST
- * times out under certain configurations.
- */
+ /* Class code report is unreliable. */
if (link->pmp < 5)
- link->flags |= ATA_LFLAG_NO_SRST |
- ATA_LFLAG_ASSUME_ATA;
+ link->flags |= ATA_LFLAG_ASSUME_ATA;
/* port 5 is for SEMB device and it doesn't like SRST */
if (link->pmp == 5)
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 46d087f0860..2a5412e7e9c 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -37,6 +37,7 @@
#include <linux/kernel.h>
#include <linux/blkdev.h>
#include <linux/spinlock.h>
+#include <linux/export.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_cmnd.h>
@@ -1215,25 +1216,19 @@ void ata_scsi_slave_destroy(struct scsi_device *sdev)
}
/**
- * ata_scsi_change_queue_depth - SCSI callback for queue depth config
+ * __ata_change_queue_depth - helper for ata_scsi_change_queue_depth
+ * @ap: ATA port to which the device change the queue depth
* @sdev: SCSI device to configure queue depth for
* @queue_depth: new queue depth
* @reason: calling context
*
- * This is libata standard hostt->change_queue_depth callback.
- * SCSI will call into this callback when user tries to set queue
- * depth via sysfs.
+ * libsas and libata have different approaches for associating a sdev to
+ * its ata_port.
*
- * LOCKING:
- * SCSI layer (we don't care)
- *
- * RETURNS:
- * Newly configured queue depth.
*/
-int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth,
- int reason)
+int __ata_change_queue_depth(struct ata_port *ap, struct scsi_device *sdev,
+ int queue_depth, int reason)
{
- struct ata_port *ap = ata_shost_to_port(sdev->host);
struct ata_device *dev;
unsigned long flags;
@@ -1269,6 +1264,30 @@ int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth,
}
/**
+ * ata_scsi_change_queue_depth - SCSI callback for queue depth config
+ * @sdev: SCSI device to configure queue depth for
+ * @queue_depth: new queue depth
+ * @reason: calling context
+ *
+ * This is libata standard hostt->change_queue_depth callback.
+ * SCSI will call into this callback when user tries to set queue
+ * depth via sysfs.
+ *
+ * LOCKING:
+ * SCSI layer (we don't care)
+ *
+ * RETURNS:
+ * Newly configured queue depth.
+ */
+int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth,
+ int reason)
+{
+ struct ata_port *ap = ata_shost_to_port(sdev->host);
+
+ return __ata_change_queue_depth(ap, sdev, queue_depth, reason);
+}
+
+/**
* ata_scsi_start_stop_xlat - Translate SCSI START STOP UNIT command
* @qc: Storage for translated ATA taskfile
*
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index c24127dd6ef..63d53277d6a 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -35,6 +35,7 @@
#include <linux/kernel.h>
#include <linux/gfp.h>
#include <linux/pci.h>
+#include <linux/module.h>
#include <linux/libata.h>
#include <linux/highmem.h>
@@ -569,7 +570,7 @@ unsigned int ata_sff_data_xfer(struct ata_device *dev, unsigned char *buf,
/* Transfer trailing byte, if any. */
if (unlikely(buflen & 0x01)) {
- unsigned char pad[2];
+ unsigned char pad[2] = { };
/* Point buf to the tail of buffer */
buf += buflen - 1;
@@ -628,7 +629,7 @@ unsigned int ata_sff_data_xfer32(struct ata_device *dev, unsigned char *buf,
/* Transfer trailing bytes, if any */
if (unlikely(slop)) {
- unsigned char pad[4];
+ unsigned char pad[4] = { };
/* Point buf to the tail of buffer */
buf += buflen - slop;
@@ -678,7 +679,7 @@ unsigned int ata_sff_data_xfer_noirq(struct ata_device *dev, unsigned char *buf,
unsigned int consumed;
local_irq_save(flags);
- consumed = ata_sff_data_xfer(dev, buf, buflen, rw);
+ consumed = ata_sff_data_xfer32(dev, buf, buflen, rw);
local_irq_restore(flags);
return consumed;
@@ -2507,31 +2508,10 @@ static const struct ata_port_info *ata_sff_find_valid_pi(
return NULL;
}
-/**
- * ata_pci_sff_init_one - Initialize/register PIO-only PCI IDE controller
- * @pdev: Controller to be initialized
- * @ppi: array of port_info, must be enough for two ports
- * @sht: scsi_host_template to use when registering the host
- * @host_priv: host private_data
- * @hflag: host flags
- *
- * This is a helper function which can be called from a driver's
- * xxx_init_one() probe function if the hardware uses traditional
- * IDE taskfile registers and is PIO only.
- *
- * ASSUMPTION:
- * Nobody makes a single channel controller that appears solely as
- * the secondary legacy port on PCI.
- *
- * LOCKING:
- * Inherited from PCI layer (may sleep).
- *
- * RETURNS:
- * Zero on success, negative on errno-based value on error.
- */
-int ata_pci_sff_init_one(struct pci_dev *pdev,
- const struct ata_port_info * const *ppi,
- struct scsi_host_template *sht, void *host_priv, int hflag)
+static int ata_pci_init_one(struct pci_dev *pdev,
+ const struct ata_port_info * const *ppi,
+ struct scsi_host_template *sht, void *host_priv,
+ int hflags, bool bmdma)
{
struct device *dev = &pdev->dev;
const struct ata_port_info *pi;
@@ -2553,14 +2533,22 @@ int ata_pci_sff_init_one(struct pci_dev *pdev,
if (rc)
goto out;
- /* prepare and activate SFF host */
- rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
+ if (bmdma)
+ /* prepare and activate BMDMA host */
+ rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
+ else
+ /* prepare and activate SFF host */
+ rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
if (rc)
goto out;
host->private_data = host_priv;
- host->flags |= hflag;
+ host->flags |= hflags;
- rc = ata_pci_sff_activate_host(host, ata_sff_interrupt, sht);
+ if (bmdma) {
+ pci_set_master(pdev);
+ rc = ata_pci_sff_activate_host(host, ata_bmdma_interrupt, sht);
+ } else
+ rc = ata_pci_sff_activate_host(host, ata_sff_interrupt, sht);
out:
if (rc == 0)
devres_remove_group(&pdev->dev, NULL);
@@ -2569,6 +2557,35 @@ out:
return rc;
}
+
+/**
+ * ata_pci_sff_init_one - Initialize/register PIO-only PCI IDE controller
+ * @pdev: Controller to be initialized
+ * @ppi: array of port_info, must be enough for two ports
+ * @sht: scsi_host_template to use when registering the host
+ * @host_priv: host private_data
+ * @hflag: host flags
+ *
+ * This is a helper function which can be called from a driver's
+ * xxx_init_one() probe function if the hardware uses traditional
+ * IDE taskfile registers and is PIO only.
+ *
+ * ASSUMPTION:
+ * Nobody makes a single channel controller that appears solely as
+ * the secondary legacy port on PCI.
+ *
+ * LOCKING:
+ * Inherited from PCI layer (may sleep).
+ *
+ * RETURNS:
+ * Zero on success, negative on errno-based value on error.
+ */
+int ata_pci_sff_init_one(struct pci_dev *pdev,
+ const struct ata_port_info * const *ppi,
+ struct scsi_host_template *sht, void *host_priv, int hflag)
+{
+ return ata_pci_init_one(pdev, ppi, sht, host_priv, hflag, 0);
+}
EXPORT_SYMBOL_GPL(ata_pci_sff_init_one);
#endif /* CONFIG_PCI */
@@ -3286,42 +3303,7 @@ int ata_pci_bmdma_init_one(struct pci_dev *pdev,
struct scsi_host_template *sht, void *host_priv,
int hflags)
{
- struct device *dev = &pdev->dev;
- const struct ata_port_info *pi;
- struct ata_host *host = NULL;
- int rc;
-
- DPRINTK("ENTER\n");
-
- pi = ata_sff_find_valid_pi(ppi);
- if (!pi) {
- dev_err(&pdev->dev, "no valid port_info specified\n");
- return -EINVAL;
- }
-
- if (!devres_open_group(dev, NULL, GFP_KERNEL))
- return -ENOMEM;
-
- rc = pcim_enable_device(pdev);
- if (rc)
- goto out;
-
- /* prepare and activate BMDMA host */
- rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
- if (rc)
- goto out;
- host->private_data = host_priv;
- host->flags |= hflags;
-
- pci_set_master(pdev);
- rc = ata_pci_sff_activate_host(host, ata_bmdma_interrupt, sht);
- out:
- if (rc == 0)
- devres_remove_group(&pdev->dev, NULL);
- else
- devres_release_group(&pdev->dev, NULL);
-
- return rc;
+ return ata_pci_init_one(pdev, ppi, sht, host_priv, hflags, 1);
}
EXPORT_SYMBOL_GPL(ata_pci_bmdma_init_one);
diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c
index cadd67998ba..61da0694aec 100644
--- a/drivers/ata/pata_ali.c
+++ b/drivers/ata/pata_ali.c
@@ -56,7 +56,7 @@ static const struct dmi_system_id cable_dmi_table[] = {
},
},
{
- .ident = "Toshiba Satelite S1800-814",
+ .ident = "Toshiba Satellite S1800-814",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
DMI_MATCH(DMI_PRODUCT_NAME, "S1800-814"),
diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
index 719bb73a73e..e8574bba3ee 100644
--- a/drivers/ata/pata_arasan_cf.c
+++ b/drivers/ata/pata_arasan_cf.c
@@ -922,8 +922,7 @@ static int __devexit arasan_cf_remove(struct platform_device *pdev)
#ifdef CONFIG_PM
static int arasan_cf_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct ata_host *host = dev_get_drvdata(&pdev->dev);
+ struct ata_host *host = dev_get_drvdata(dev);
struct arasan_cf_dev *acdev = host->ports[0]->private_data;
if (acdev->dma_chan) {
@@ -937,8 +936,7 @@ static int arasan_cf_suspend(struct device *dev)
static int arasan_cf_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct ata_host *host = dev_get_drvdata(&pdev->dev);
+ struct ata_host *host = dev_get_drvdata(dev);
struct arasan_cf_dev *acdev = host->ports[0]->private_data;
cf_init(acdev);
diff --git a/drivers/ata/pata_artop.c b/drivers/ata/pata_artop.c
index 78a93b69095..4b8b22efc00 100644
--- a/drivers/ata/pata_artop.c
+++ b/drivers/ata/pata_artop.c
@@ -2,7 +2,7 @@
* pata_artop.c - ARTOP ATA controller driver
*
* (C) 2006 Red Hat
- * (C) 2007 Bartlomiej Zolnierkiewicz
+ * (C) 2007,2011 Bartlomiej Zolnierkiewicz
*
* Based in part on drivers/ide/pci/aec62xx.c
* Copyright (C) 1999-2002 Andre Hedrick <andre@linux-ide.org>
@@ -28,7 +28,7 @@
#include <linux/ata.h>
#define DRV_NAME "pata_artop"
-#define DRV_VERSION "0.4.5"
+#define DRV_VERSION "0.4.6"
/*
* The ARTOP has 33 Mhz and "over clocked" timing tables. Until we
@@ -39,31 +39,15 @@
static int clock = 0;
-static int artop6210_pre_reset(struct ata_link *link, unsigned long deadline)
-{
- struct ata_port *ap = link->ap;
- struct pci_dev *pdev = to_pci_dev(ap->host->dev);
- const struct pci_bits artop_enable_bits[] = {
- { 0x4AU, 1U, 0x02UL, 0x02UL }, /* port 0 */
- { 0x4AU, 1U, 0x04UL, 0x04UL }, /* port 1 */
- };
-
- if (!pci_test_config_bits(pdev, &artop_enable_bits[ap->port_no]))
- return -ENOENT;
-
- return ata_sff_prereset(link, deadline);
-}
-
/**
- * artop6260_pre_reset - check for 40/80 pin
+ * artop62x0_pre_reset - probe begin
* @link: link
* @deadline: deadline jiffies for the operation
*
- * The ARTOP hardware reports the cable detect bits in register 0x49.
* Nothing complicated needed here.
*/
-static int artop6260_pre_reset(struct ata_link *link, unsigned long deadline)
+static int artop62x0_pre_reset(struct ata_link *link, unsigned long deadline)
{
static const struct pci_bits artop_enable_bits[] = {
{ 0x4AU, 1U, 0x02UL, 0x02UL }, /* port 0 */
@@ -73,7 +57,7 @@ static int artop6260_pre_reset(struct ata_link *link, unsigned long deadline)
struct ata_port *ap = link->ap;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
- /* Odd numbered device ids are the units with enable bits (the -R cards) */
+ /* Odd numbered device ids are the units with enable bits. */
if ((pdev->device & 1) &&
!pci_test_config_bits(pdev, &artop_enable_bits[ap->port_no]))
return -ENOENT;
@@ -317,7 +301,7 @@ static struct ata_port_operations artop6210_ops = {
.cable_detect = ata_cable_40wire,
.set_piomode = artop6210_set_piomode,
.set_dmamode = artop6210_set_dmamode,
- .prereset = artop6210_pre_reset,
+ .prereset = artop62x0_pre_reset,
.qc_defer = artop6210_qc_defer,
};
@@ -326,9 +310,36 @@ static struct ata_port_operations artop6260_ops = {
.cable_detect = artop6260_cable_detect,
.set_piomode = artop6260_set_piomode,
.set_dmamode = artop6260_set_dmamode,
- .prereset = artop6260_pre_reset,
+ .prereset = artop62x0_pre_reset,
};
+static void atp8xx_fixup(struct pci_dev *pdev)
+{
+ if (pdev->device == 0x0005)
+ /* BIOS may have left us in UDMA, clear it before libata probe */
+ pci_write_config_byte(pdev, 0x54, 0);
+ else if (pdev->device == 0x0008 || pdev->device == 0x0009) {
+ u8 reg;
+
+ /* Mac systems come up with some registers not set as we
+ will need them */
+
+ /* Clear reset & test bits */
+ pci_read_config_byte(pdev, 0x49, &reg);
+ pci_write_config_byte(pdev, 0x49, reg & ~0x30);
+
+ /* PCI latency must be > 0x80 for burst mode, tweak it
+ * if required.
+ */
+ pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &reg);
+ if (reg <= 0x80)
+ pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x90);
+
+ /* Enable IRQ output and burst mode */
+ pci_read_config_byte(pdev, 0x4a, &reg);
+ pci_write_config_byte(pdev, 0x4a, (reg & ~0x01) | 0x80);
+ }
+}
/**
* artop_init_one - Register ARTOP ATA PCI device with kernel services
@@ -383,42 +394,22 @@ static int artop_init_one (struct pci_dev *pdev, const struct pci_device_id *id)
if (rc)
return rc;
- if (id->driver_data == 0) { /* 6210 variant */
+ if (id->driver_data == 0) /* 6210 variant */
ppi[0] = &info_6210;
- /* BIOS may have left us in UDMA, clear it before libata probe */
- pci_write_config_byte(pdev, 0x54, 0);
- }
else if (id->driver_data == 1) /* 6260 */
ppi[0] = &info_626x;
else if (id->driver_data == 2) { /* 6280 or 6280 + fast */
unsigned long io = pci_resource_start(pdev, 4);
- u8 reg;
ppi[0] = &info_628x;
if (inb(io) & 0x10)
ppi[0] = &info_628x_fast;
- /* Mac systems come up with some registers not set as we
- will need them */
-
- /* Clear reset & test bits */
- pci_read_config_byte(pdev, 0x49, &reg);
- pci_write_config_byte(pdev, 0x49, reg & ~ 0x30);
-
- /* PCI latency must be > 0x80 for burst mode, tweak it
- * if required.
- */
- pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &reg);
- if (reg <= 0x80)
- pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x90);
-
- /* Enable IRQ output and burst mode */
- pci_read_config_byte(pdev, 0x4a, &reg);
- pci_write_config_byte(pdev, 0x4a, (reg & ~0x01) | 0x80);
-
}
BUG_ON(ppi[0] == NULL);
+ atp8xx_fixup(pdev);
+
return ata_pci_bmdma_init_one(pdev, ppi, &artop_sht, NULL, 0);
}
@@ -432,11 +423,32 @@ static const struct pci_device_id artop_pci_tbl[] = {
{ } /* terminate list */
};
+#ifdef CONFIG_PM
+static int atp8xx_reinit_one(struct pci_dev *pdev)
+{
+ struct ata_host *host = dev_get_drvdata(&pdev->dev);
+ int rc;
+
+ rc = ata_pci_device_do_resume(pdev);
+ if (rc)
+ return rc;
+
+ atp8xx_fixup(pdev);
+
+ ata_host_resume(host);
+ return 0;
+}
+#endif
+
static struct pci_driver artop_pci_driver = {
.name = DRV_NAME,
.id_table = artop_pci_tbl,
.probe = artop_init_one,
.remove = ata_pci_remove_one,
+#ifdef CONFIG_PM
+ .suspend = ata_pci_device_suspend,
+ .resume = atp8xx_reinit_one,
+#endif
};
static int __init artop_init(void)
@@ -452,9 +464,8 @@ static void __exit artop_exit(void)
module_init(artop_init);
module_exit(artop_exit);
-MODULE_AUTHOR("Alan Cox");
+MODULE_AUTHOR("Alan Cox, Bartlomiej Zolnierkiewicz");
MODULE_DESCRIPTION("SCSI low-level driver for ARTOP PATA");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, artop_pci_tbl);
MODULE_VERSION(DRV_VERSION);
-
diff --git a/drivers/ata/pata_at91.c b/drivers/ata/pata_at91.c
index 960c7257139..a76f24a8e5d 100644
--- a/drivers/ata/pata_at91.c
+++ b/drivers/ata/pata_at91.c
@@ -30,7 +30,7 @@
#include <mach/at91sam9_smc.h>
#include <mach/board.h>
-#include <mach/gpio.h>
+#include <asm/gpio.h>
#define DRV_NAME "pata_at91"
#define DRV_VERSION "0.3"
@@ -414,10 +414,13 @@ static int __devinit pata_at91_probe(struct platform_device *pdev)
host->private_data = info;
- return ata_host_activate(host, irq ? gpio_to_irq(irq) : 0,
+ ret = ata_host_activate(host, irq ? gpio_to_irq(irq) : 0,
irq ? ata_sff_interrupt : NULL,
irq_flags, &pata_at91_sht);
+ if (!ret)
+ return 0;
+
err_put:
clk_put(info->mck);
return ret;
diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c
index 43755616dc5..be1aa1486d3 100644
--- a/drivers/ata/pata_atiixp.c
+++ b/drivers/ata/pata_atiixp.c
@@ -49,6 +49,31 @@ static int atiixp_cable_detect(struct ata_port *ap)
static DEFINE_SPINLOCK(atiixp_lock);
/**
+ * atiixp_prereset - perform reset handling
+ * @link: ATA link
+ * @deadline: deadline jiffies for the operation
+ *
+ * Reset sequence checking enable bits to see which ports are
+ * active.
+ */
+
+static int atiixp_prereset(struct ata_link *link, unsigned long deadline)
+{
+ static const struct pci_bits atiixp_enable_bits[] = {
+ { 0x48, 1, 0x01, 0x00 },
+ { 0x48, 1, 0x08, 0x00 }
+ };
+
+ struct ata_port *ap = link->ap;
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+
+ if (!pci_test_config_bits(pdev, &atiixp_enable_bits[ap->port_no]))
+ return -ENOENT;
+
+ return ata_sff_prereset(link, deadline);
+}
+
+/**
* atiixp_set_pio_timing - set initial PIO mode data
* @ap: ATA interface
* @adev: ATA device
@@ -221,6 +246,7 @@ static struct ata_port_operations atiixp_port_ops = {
.bmdma_start = atiixp_bmdma_start,
.bmdma_stop = atiixp_bmdma_stop,
+ .prereset = atiixp_prereset,
.cable_detect = atiixp_cable_detect,
.set_piomode = atiixp_set_piomode,
.set_dmamode = atiixp_set_dmamode,
@@ -235,16 +261,7 @@ static int atiixp_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
.udma_mask = ATA_UDMA5,
.port_ops = &atiixp_port_ops
};
- static const struct pci_bits atiixp_enable_bits[] = {
- { 0x48, 1, 0x01, 0x00 },
- { 0x48, 1, 0x08, 0x00 }
- };
const struct ata_port_info *ppi[] = { &info, &info };
- int i;
-
- for (i = 0; i < 2; i++)
- if (!pci_test_config_bits(pdev, &atiixp_enable_bits[i]))
- ppi[i] = &ata_dummy_port_info;
return ata_pci_bmdma_init_one(pdev, ppi, &atiixp_sht, NULL,
ATA_HOST_PARALLEL_SCAN);
diff --git a/drivers/ata/pata_cmd64x.c b/drivers/ata/pata_cmd64x.c
index 7bafc16cf5e..e1fb39a74ce 100644
--- a/drivers/ata/pata_cmd64x.c
+++ b/drivers/ata/pata_cmd64x.c
@@ -82,7 +82,7 @@ static int cmd648_cable_detect(struct ata_port *ap)
}
/**
- * cmd64x_set_piomode - set PIO and MWDMA timing
+ * cmd64x_set_timing - set PIO and MWDMA timing
* @ap: ATA interface
* @adev: ATA device
* @mode: mode
@@ -288,6 +288,22 @@ static struct ata_port_operations cmd648_port_ops = {
.cable_detect = cmd648_cable_detect,
};
+static void cmd64x_fixup(struct pci_dev *pdev)
+{
+ u8 mrdmode;
+
+ pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 64);
+ pci_read_config_byte(pdev, MRDMODE, &mrdmode);
+ mrdmode &= ~0x30; /* IRQ set up */
+ mrdmode |= 0x02; /* Memory read line enable */
+ pci_write_config_byte(pdev, MRDMODE, mrdmode);
+
+ /* PPC specific fixup copied from old driver */
+#ifdef CONFIG_PPC
+ pci_write_config_byte(pdev, UDIDETCR0, 0xF0);
+#endif
+}
+
static int cmd64x_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
static const struct ata_port_info cmd_info[6] = {
@@ -336,7 +352,7 @@ static int cmd64x_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
&cmd_info[id->driver_data],
NULL
};
- u8 mrdmode, reg;
+ u8 reg;
int rc;
struct pci_dev *bridge = pdev->bus->self;
/* mobility split bridges don't report enabled ports correctly */
@@ -368,11 +384,7 @@ static int cmd64x_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
cntrl_ch0_ok = 0;
}
- pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 64);
- pci_read_config_byte(pdev, MRDMODE, &mrdmode);
- mrdmode &= ~ 0x30; /* IRQ set up */
- mrdmode |= 0x02; /* Memory read line enable */
- pci_write_config_byte(pdev, MRDMODE, mrdmode);
+ cmd64x_fixup(pdev);
/* check for enabled ports */
pci_read_config_byte(pdev, CNTRL, &reg);
@@ -388,13 +400,6 @@ static int cmd64x_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
ppi[1] = &ata_dummy_port_info;
}
- /* Force PIO 0 here.. */
-
- /* PPC specific fixup copied from old driver */
-#ifdef CONFIG_PPC
- pci_write_config_byte(pdev, UDIDETCR0, 0xF0);
-#endif
-
return ata_pci_bmdma_init_one(pdev, ppi, &cmd64x_sht, NULL, 0);
}
@@ -402,21 +407,14 @@ static int cmd64x_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
static int cmd64x_reinit_one(struct pci_dev *pdev)
{
struct ata_host *host = dev_get_drvdata(&pdev->dev);
- u8 mrdmode;
int rc;
rc = ata_pci_device_do_resume(pdev);
if (rc)
return rc;
- pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 64);
- pci_read_config_byte(pdev, MRDMODE, &mrdmode);
- mrdmode &= ~ 0x30; /* IRQ set up */
- mrdmode |= 0x02; /* Memory read line enable */
- pci_write_config_byte(pdev, MRDMODE, mrdmode);
-#ifdef CONFIG_PPC
- pci_write_config_byte(pdev, UDIDETCR0, 0xF0);
-#endif
+ cmd64x_fixup(pdev);
+
ata_host_resume(host);
return 0;
}
diff --git a/drivers/ata/pata_cs5535.c b/drivers/ata/pata_cs5535.c
index 03a93186aa1..a0b4640125a 100644
--- a/drivers/ata/pata_cs5535.c
+++ b/drivers/ata/pata_cs5535.c
@@ -38,7 +38,7 @@
#include <linux/libata.h>
#include <asm/msr.h>
-#define DRV_NAME "cs5535"
+#define DRV_NAME "pata_cs5535"
#define DRV_VERSION "0.2.12"
/*
@@ -67,8 +67,6 @@
#define CS5535_CABLE_DETECT 0x48
-#define CS5535_BAD_PIO(timings) ( (timings&~0x80000000UL)==0x00009172 )
-
/**
* cs5535_cable_detect - detect cable type
* @ap: Port to detect on
@@ -188,16 +186,6 @@ static int cs5535_init_one(struct pci_dev *dev, const struct pci_device_id *id)
};
const struct ata_port_info *ppi[] = { &info, &ata_dummy_port_info };
- u32 timings, dummy;
-
- /* Check the BIOS set the initial timing clock. If not set the
- timings for PIO0 */
- rdmsr(ATAC_CH0D0_PIO, timings, dummy);
- if (CS5535_BAD_PIO(timings))
- wrmsr(ATAC_CH0D0_PIO, 0xF7F4F7F4UL, 0);
- rdmsr(ATAC_CH0D1_PIO, timings, dummy);
- if (CS5535_BAD_PIO(timings))
- wrmsr(ATAC_CH0D1_PIO, 0xF7F4F7F4UL, 0);
return ata_pci_bmdma_init_one(dev, ppi, &cs5535_sht, NULL, 0);
}
@@ -230,7 +218,7 @@ static void __exit cs5535_exit(void)
}
MODULE_AUTHOR("Alan Cox, Jens Altmann, Wolfgan Zuleger, Alexander Kiausch");
-MODULE_DESCRIPTION("low-level driver for the NS/AMD 5530");
+MODULE_DESCRIPTION("low-level driver for the NS/AMD 5535");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, cs5535);
MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/pata_efar.c b/drivers/ata/pata_efar.c
index aca47e4e29e..f0243ed206f 100644
--- a/drivers/ata/pata_efar.c
+++ b/drivers/ata/pata_efar.c
@@ -73,7 +73,7 @@ static DEFINE_SPINLOCK(efar_lock);
/**
* efar_set_piomode - Initialize host controller PATA PIO timings
* @ap: Port whose timings we are configuring
- * @adev: um
+ * @adev: Device to program
*
* Set PIO mode for device, in host controller PCI config space.
*
@@ -85,9 +85,9 @@ static void efar_set_piomode (struct ata_port *ap, struct ata_device *adev)
{
unsigned int pio = adev->pio_mode - XFER_PIO_0;
struct pci_dev *dev = to_pci_dev(ap->host->dev);
- unsigned int idetm_port= ap->port_no ? 0x42 : 0x40;
+ unsigned int master_port = ap->port_no ? 0x42 : 0x40;
unsigned long flags;
- u16 idetm_data;
+ u16 master_data;
u8 udma_enable;
int control = 0;
@@ -113,20 +113,20 @@ static void efar_set_piomode (struct ata_port *ap, struct ata_device *adev)
spin_lock_irqsave(&efar_lock, flags);
- pci_read_config_word(dev, idetm_port, &idetm_data);
+ pci_read_config_word(dev, master_port, &master_data);
/* Set PPE, IE, and TIME as appropriate */
if (adev->devno == 0) {
- idetm_data &= 0xCCF0;
- idetm_data |= control;
- idetm_data |= (timings[pio][0] << 12) |
+ master_data &= 0xCCF0;
+ master_data |= control;
+ master_data |= (timings[pio][0] << 12) |
(timings[pio][1] << 8);
} else {
int shift = 4 * ap->port_no;
u8 slave_data;
- idetm_data &= 0xFF0F;
- idetm_data |= (control << 4);
+ master_data &= 0xFF0F;
+ master_data |= (control << 4);
/* Slave timing in separate register */
pci_read_config_byte(dev, 0x44, &slave_data);
@@ -135,8 +135,8 @@ static void efar_set_piomode (struct ata_port *ap, struct ata_device *adev)
pci_write_config_byte(dev, 0x44, slave_data);
}
- idetm_data |= 0x4000; /* Ensure SITRE is set */
- pci_write_config_word(dev, idetm_port, idetm_data);
+ master_data |= 0x4000; /* Ensure SITRE is set */
+ pci_write_config_word(dev, master_port, master_data);
pci_read_config_byte(dev, 0x48, &udma_enable);
udma_enable &= ~(1 << (2 * ap->port_no + adev->devno));
diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c
index 6c77d68dbd0..42cffd38910 100644
--- a/drivers/ata/pata_hpt366.c
+++ b/drivers/ata/pata_hpt366.c
@@ -111,6 +111,28 @@ static const struct hpt_clock hpt366_25[] = {
{ 0, 0x01208585 }
};
+/**
+ * hpt36x_find_mode - find the hpt36x timing
+ * @ap: ATA port
+ * @speed: transfer mode
+ *
+ * Return the 32bit register programming information for this channel
+ * that matches the speed provided.
+ */
+
+static u32 hpt36x_find_mode(struct ata_port *ap, int speed)
+{
+ struct hpt_clock *clocks = ap->host->private_data;
+
+ while (clocks->xfer_mode) {
+ if (clocks->xfer_mode == speed)
+ return clocks->timing;
+ clocks++;
+ }
+ BUG();
+ return 0xffffffffU; /* silence compiler warning */
+}
+
static const char * const bad_ata33[] = {
"Maxtor 92720U8", "Maxtor 92040U6", "Maxtor 91360U4", "Maxtor 91020U3",
"Maxtor 90845U3", "Maxtor 90650U2",
@@ -210,10 +232,9 @@ static int hpt36x_cable_detect(struct ata_port *ap)
static void hpt366_set_mode(struct ata_port *ap, struct ata_device *adev,
u8 mode)
{
- struct hpt_clock *clocks = ap->host->private_data;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u32 addr = 0x40 + 4 * adev->devno;
- u32 mask, reg;
+ u32 mask, reg, t;
/* determine timing mask and find matching clock entry */
if (mode < XFER_MW_DMA_0)
@@ -223,13 +244,7 @@ static void hpt366_set_mode(struct ata_port *ap, struct ata_device *adev,
else
mask = 0x30070000;
- while (clocks->xfer_mode) {
- if (clocks->xfer_mode == mode)
- break;
- clocks++;
- }
- if (!clocks->xfer_mode)
- BUG();
+ t = hpt36x_find_mode(ap, mode);
/*
* Combine new mode bits with old config bits and disable
@@ -237,7 +252,7 @@ static void hpt366_set_mode(struct ata_port *ap, struct ata_device *adev,
* problems handling I/O errors later.
*/
pci_read_config_dword(pdev, addr, &reg);
- reg = ((reg & ~mask) | (clocks->timing & mask)) & ~0xc0000000;
+ reg = ((reg & ~mask) | (t & mask)) & ~0xc0000000;
pci_write_config_dword(pdev, addr, reg);
}
diff --git a/drivers/ata/pata_it8213.c b/drivers/ata/pata_it8213.c
index 998af0e629b..cf9164d79f1 100644
--- a/drivers/ata/pata_it8213.c
+++ b/drivers/ata/pata_it8213.c
@@ -76,8 +76,8 @@ static void it8213_set_piomode (struct ata_port *ap, struct ata_device *adev)
{
unsigned int pio = adev->pio_mode - XFER_PIO_0;
struct pci_dev *dev = to_pci_dev(ap->host->dev);
- unsigned int idetm_port= ap->port_no ? 0x42 : 0x40;
- u16 idetm_data;
+ unsigned int master_port = ap->port_no ? 0x42 : 0x40;
+ u16 master_data;
int control = 0;
/*
@@ -100,19 +100,19 @@ static void it8213_set_piomode (struct ata_port *ap, struct ata_device *adev)
if (adev->class != ATA_DEV_ATA)
control |= 4; /* PPE */
- pci_read_config_word(dev, idetm_port, &idetm_data);
+ pci_read_config_word(dev, master_port, &master_data);
/* Set PPE, IE, and TIME as appropriate */
if (adev->devno == 0) {
- idetm_data &= 0xCCF0;
- idetm_data |= control;
- idetm_data |= (timings[pio][0] << 12) |
+ master_data &= 0xCCF0;
+ master_data |= control;
+ master_data |= (timings[pio][0] << 12) |
(timings[pio][1] << 8);
} else {
u8 slave_data;
- idetm_data &= 0xFF0F;
- idetm_data |= (control << 4);
+ master_data &= 0xFF0F;
+ master_data |= (control << 4);
/* Slave timing in separate register */
pci_read_config_byte(dev, 0x44, &slave_data);
@@ -121,8 +121,8 @@ static void it8213_set_piomode (struct ata_port *ap, struct ata_device *adev)
pci_write_config_byte(dev, 0x44, slave_data);
}
- idetm_data |= 0x4000; /* Ensure SITRE is set */
- pci_write_config_word(dev, idetm_port, idetm_data);
+ master_data |= 0x4000; /* Ensure SITRE is set */
+ pci_write_config_word(dev, master_port, master_data);
}
/**
@@ -163,7 +163,7 @@ static void it8213_set_dmamode (struct ata_port *ap, struct ata_device *adev)
/* Clocks follow the PIIX style */
u_speed = min(2 - (udma & 1), udma);
- if (udma == 5)
+ if (udma > 4)
u_clock = 0x1000; /* 100Mhz */
else if (udma > 2)
u_clock = 1; /* 66Mhz */
@@ -262,7 +262,7 @@ static int it8213_init_one (struct pci_dev *pdev, const struct pci_device_id *en
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA12_ONLY,
- .udma_mask = ATA_UDMA4, /* FIXME: want UDMA 100? */
+ .udma_mask = ATA_UDMA6,
.port_ops = &it8213_ops,
};
/* Current IT8213 stuff is single port */
diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c
index d960f8e9e8b..35aca7d1a3e 100644
--- a/drivers/ata/pata_legacy.c
+++ b/drivers/ata/pata_legacy.c
@@ -79,15 +79,6 @@ static int all;
module_param(all, int, 0444);
MODULE_PARM_DESC(all, "Grab all legacy port devices, even if PCI(0=off, 1=on)");
-struct legacy_data {
- unsigned long timing;
- u8 clock[2];
- u8 last;
- int fast;
- struct platform_device *platform_dev;
-
-};
-
enum controller {
BIOS = 0,
SNOOP = 1,
@@ -104,6 +95,14 @@ enum controller {
UNKNOWN = -1
};
+struct legacy_data {
+ unsigned long timing;
+ u8 clock[2];
+ u8 last;
+ int fast;
+ enum controller type;
+ struct platform_device *platform_dev;
+};
struct legacy_probe {
unsigned char *name;
@@ -137,11 +136,17 @@ static int ht6560a; /* HT 6560A on primary 1, second 2, both 3 */
static int ht6560b; /* HT 6560A on primary 1, second 2, both 3 */
static int opti82c611a; /* Opti82c611A on primary 1, sec 2, both 3 */
static int opti82c46x; /* Opti 82c465MV present(pri/sec autodetect) */
-static int qdi; /* Set to probe QDI controllers */
static int autospeed; /* Chip present which snoops speed changes */
static int pio_mask = ATA_PIO4; /* PIO range for autospeed devices */
static int iordy_mask = 0xFFFFFFFF; /* Use iordy if available */
+/* Set to probe QDI controllers */
+#ifdef CONFIG_PATA_QDI_MODULE
+static int qdi = 1;
+#else
+static int qdi;
+#endif
+
#ifdef CONFIG_PATA_WINBOND_VLB_MODULE
static int winbond = 1; /* Set to probe Winbond controllers,
give I/O port if non standard */
@@ -631,40 +636,20 @@ static struct ata_port_operations opti82c46x_port_ops = {
.qc_issue = opti82c46x_qc_issue,
};
-static void qdi6500_set_piomode(struct ata_port *ap, struct ata_device *adev)
-{
- struct ata_timing t;
- struct legacy_data *ld_qdi = ap->host->private_data;
- int active, recovery;
- u8 timing;
-
- /* Get the timing data in cycles */
- ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000);
-
- if (ld_qdi->fast) {
- active = 8 - clamp_val(t.active, 1, 8);
- recovery = 18 - clamp_val(t.recover, 3, 18);
- } else {
- active = 9 - clamp_val(t.active, 2, 9);
- recovery = 15 - clamp_val(t.recover, 0, 15);
- }
- timing = (recovery << 4) | active | 0x08;
-
- ld_qdi->clock[adev->devno] = timing;
-
- outb(timing, ld_qdi->timing);
-}
-
/**
- * qdi6580dp_set_piomode - PIO setup for dual channel
+ * qdi65x0_set_piomode - PIO setup for QDI65x0
* @ap: Port
* @adev: Device
*
+ * In single channel mode the 6580 has one clock per device and we can
+ * avoid the requirement to clock switch. We also have to load the timing
+ * into the right clock according to whether we are master or slave.
+ *
* In dual channel mode the 6580 has one clock per channel and we have
* to software clockswitch in qc_issue.
*/
-static void qdi6580dp_set_piomode(struct ata_port *ap, struct ata_device *adev)
+static void qdi65x0_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
struct ata_timing t;
struct legacy_data *ld_qdi = ap->host->private_data;
@@ -682,47 +667,15 @@ static void qdi6580dp_set_piomode(struct ata_port *ap, struct ata_device *adev)
recovery = 15 - clamp_val(t.recover, 0, 15);
}
timing = (recovery << 4) | active | 0x08;
-
ld_qdi->clock[adev->devno] = timing;
- outb(timing, ld_qdi->timing + 2 * ap->port_no);
- /* Clear the FIFO */
- if (adev->class != ATA_DEV_ATA)
- outb(0x5F, (ld_qdi->timing & 0xFFF0) + 3);
-}
-
-/**
- * qdi6580_set_piomode - PIO setup for single channel
- * @ap: Port
- * @adev: Device
- *
- * In single channel mode the 6580 has one clock per device and we can
- * avoid the requirement to clock switch. We also have to load the timing
- * into the right clock according to whether we are master or slave.
- */
-
-static void qdi6580_set_piomode(struct ata_port *ap, struct ata_device *adev)
-{
- struct ata_timing t;
- struct legacy_data *ld_qdi = ap->host->private_data;
- int active, recovery;
- u8 timing;
-
- /* Get the timing data in cycles */
- ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000);
+ if (ld_qdi->type == QDI6580)
+ outb(timing, ld_qdi->timing + 2 * adev->devno);
+ else
+ outb(timing, ld_qdi->timing + 2 * ap->port_no);
- if (ld_qdi->fast) {
- active = 8 - clamp_val(t.active, 1, 8);
- recovery = 18 - clamp_val(t.recover, 3, 18);
- } else {
- active = 9 - clamp_val(t.active, 2, 9);
- recovery = 15 - clamp_val(t.recover, 0, 15);
- }
- timing = (recovery << 4) | active | 0x08;
- ld_qdi->clock[adev->devno] = timing;
- outb(timing, ld_qdi->timing + 2 * adev->devno);
/* Clear the FIFO */
- if (adev->class != ATA_DEV_ATA)
+ if (ld_qdi->type != QDI6500 && adev->class != ATA_DEV_ATA)
outb(0x5F, (ld_qdi->timing & 0xFFF0) + 3);
}
@@ -789,20 +742,20 @@ static int qdi_port(struct platform_device *dev,
static struct ata_port_operations qdi6500_port_ops = {
.inherits = &legacy_base_port_ops,
- .set_piomode = qdi6500_set_piomode,
+ .set_piomode = qdi65x0_set_piomode,
.qc_issue = qdi_qc_issue,
.sff_data_xfer = vlb32_data_xfer,
};
static struct ata_port_operations qdi6580_port_ops = {
.inherits = &legacy_base_port_ops,
- .set_piomode = qdi6580_set_piomode,
+ .set_piomode = qdi65x0_set_piomode,
.sff_data_xfer = vlb32_data_xfer,
};
static struct ata_port_operations qdi6580dp_port_ops = {
.inherits = &legacy_base_port_ops,
- .set_piomode = qdi6580dp_set_piomode,
+ .set_piomode = qdi65x0_set_piomode,
.qc_issue = qdi_qc_issue,
.sff_data_xfer = vlb32_data_xfer,
};
@@ -879,29 +832,29 @@ static struct ata_port_operations winbond_port_ops = {
};
static struct legacy_controller controllers[] = {
- {"BIOS", &legacy_port_ops, 0x1F,
+ {"BIOS", &legacy_port_ops, ATA_PIO4,
ATA_FLAG_NO_IORDY, 0, NULL },
- {"Snooping", &simple_port_ops, 0x1F,
+ {"Snooping", &simple_port_ops, ATA_PIO4,
0, 0, NULL },
- {"PDC20230", &pdc20230_port_ops, 0x7,
+ {"PDC20230", &pdc20230_port_ops, ATA_PIO2,
ATA_FLAG_NO_IORDY,
ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE, NULL },
- {"HT6560A", &ht6560a_port_ops, 0x07,
+ {"HT6560A", &ht6560a_port_ops, ATA_PIO2,
ATA_FLAG_NO_IORDY, 0, NULL },
- {"HT6560B", &ht6560b_port_ops, 0x1F,
+ {"HT6560B", &ht6560b_port_ops, ATA_PIO4,
ATA_FLAG_NO_IORDY, 0, NULL },
- {"OPTI82C611A", &opti82c611a_port_ops, 0x0F,
+ {"OPTI82C611A", &opti82c611a_port_ops, ATA_PIO3,
0, 0, NULL },
- {"OPTI82C46X", &opti82c46x_port_ops, 0x0F,
+ {"OPTI82C46X", &opti82c46x_port_ops, ATA_PIO3,
0, 0, NULL },
- {"QDI6500", &qdi6500_port_ops, 0x07,
+ {"QDI6500", &qdi6500_port_ops, ATA_PIO2,
ATA_FLAG_NO_IORDY,
ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE, qdi_port },
- {"QDI6580", &qdi6580_port_ops, 0x1F,
+ {"QDI6580", &qdi6580_port_ops, ATA_PIO4,
0, ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE, qdi_port },
- {"QDI6580DP", &qdi6580dp_port_ops, 0x1F,
+ {"QDI6580DP", &qdi6580dp_port_ops, ATA_PIO4,
0, ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE, qdi_port },
- {"W83759A", &winbond_port_ops, 0x1F,
+ {"W83759A", &winbond_port_ops, ATA_PIO4,
0, ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE,
winbond_port }
};
@@ -1022,6 +975,7 @@ static __init int legacy_init_one(struct legacy_probe *probe)
ctrl_addr = devm_ioport_map(&pdev->dev, io + 0x0206, 1);
if (!io_addr || !ctrl_addr)
goto fail;
+ ld->type = probe->type;
if (controller->setup)
if (controller->setup(pdev, probe, ld) < 0)
goto fail;
@@ -1306,6 +1260,7 @@ MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("low-level driver for legacy ATA");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
+MODULE_ALIAS("pata_qdi");
MODULE_ALIAS("pata_winbond");
module_param(probe_all, int, 0);
diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c
index 2fcac511d39..3e1746314f2 100644
--- a/drivers/ata/pata_mpc52xx.c
+++ b/drivers/ata/pata_mpc52xx.c
@@ -780,7 +780,7 @@ mpc52xx_ata_probe(struct platform_device *op)
}
task_irq = bcom_get_task_irq(dmatsk);
- ret = request_irq(task_irq, &mpc52xx_ata_task_irq, IRQF_DISABLED,
+ ret = request_irq(task_irq, &mpc52xx_ata_task_irq, 0,
"ATA task", priv);
if (ret) {
dev_err(&op->dev, "error requesting DMA IRQ\n");
diff --git a/drivers/ata/pata_of_platform.c b/drivers/ata/pata_of_platform.c
index f3054009bd2..2a472c5bb7d 100644
--- a/drivers/ata/pata_of_platform.c
+++ b/drivers/ata/pata_of_platform.c
@@ -11,6 +11,8 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/ata_platform.h>
@@ -50,18 +52,18 @@ static int __devinit pata_of_platform_probe(struct platform_device *ofdev)
}
ret = of_irq_to_resource(dn, 0, &irq_res);
- if (ret == NO_IRQ)
+ if (!ret)
irq_res.start = irq_res.end = 0;
else
irq_res.flags = 0;
prop = of_get_property(dn, "reg-shift", NULL);
if (prop)
- reg_shift = *prop;
+ reg_shift = be32_to_cpup(prop);
prop = of_get_property(dn, "pio-mode", NULL);
if (prop) {
- pio_mode = *prop;
+ pio_mode = be32_to_cpup(prop);
if (pio_mode > 6) {
dev_err(&ofdev->dev, "invalid pio-mode\n");
return -EINVAL;
diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c
index b1511f38b0e..7d63f24179c 100644
--- a/drivers/ata/pata_pdc2027x.c
+++ b/drivers/ata/pata_pdc2027x.c
@@ -63,6 +63,7 @@ enum {
};
static int pdc2027x_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
+static int pdc2027x_reinit_one(struct pci_dev *pdev);
static int pdc2027x_prereset(struct ata_link *link, unsigned long deadline);
static void pdc2027x_set_piomode(struct ata_port *ap, struct ata_device *adev);
static void pdc2027x_set_dmamode(struct ata_port *ap, struct ata_device *adev);
@@ -126,6 +127,10 @@ static struct pci_driver pdc2027x_pci_driver = {
.id_table = pdc2027x_pci_tbl,
.probe = pdc2027x_init_one,
.remove = ata_pci_remove_one,
+#ifdef CONFIG_PM
+ .suspend = ata_pci_device_suspend,
+ .resume = pdc2027x_reinit_one,
+#endif
};
static struct scsi_host_template pdc2027x_sht = {
@@ -754,6 +759,31 @@ static int __devinit pdc2027x_init_one(struct pci_dev *pdev, const struct pci_de
IRQF_SHARED, &pdc2027x_sht);
}
+#ifdef CONFIG_PM
+static int pdc2027x_reinit_one(struct pci_dev *pdev)
+{
+ struct ata_host *host = dev_get_drvdata(&pdev->dev);
+ unsigned int board_idx;
+ int rc;
+
+ rc = ata_pci_device_do_resume(pdev);
+ if (rc)
+ return rc;
+
+ if (pdev->device == PCI_DEVICE_ID_PROMISE_20268 ||
+ pdev->device == PCI_DEVICE_ID_PROMISE_20270)
+ board_idx = PDC_UDMA_100;
+ else
+ board_idx = PDC_UDMA_133;
+
+ if (pdc_hardware_init(host, board_idx))
+ return -EIO;
+
+ ata_host_resume(host);
+ return 0;
+}
+#endif
+
/**
* pdc2027x_init - Called after this module is loaded into the kernel.
*/
diff --git a/drivers/ata/pata_qdi.c b/drivers/ata/pata_qdi.c
deleted file mode 100644
index 45879dc6fa4..00000000000
--- a/drivers/ata/pata_qdi.c
+++ /dev/null
@@ -1,366 +0,0 @@
-/*
- * pata_qdi.c - QDI VLB ATA controllers
- * (C) 2006 Red Hat
- *
- * This driver mostly exists as a proof of concept for non PCI devices under
- * libata. While the QDI6580 was 'neat' in 1993 it is no longer terribly
- * useful.
- *
- * Tuning code written from the documentation at
- * http://www.ryston.cz/petr/vlb/qd6500.html
- * http://www.ryston.cz/petr/vlb/qd6580.html
- *
- * Probe code based on drivers/ide/legacy/qd65xx.c
- * Rewritten from the work of Colten Edwards <pje120@cs.usask.ca> by
- * Samuel Thibault <samuel.thibault@ens-lyon.org>
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/blkdev.h>
-#include <linux/delay.h>
-#include <scsi/scsi_host.h>
-#include <linux/libata.h>
-#include <linux/platform_device.h>
-
-#define DRV_NAME "pata_qdi"
-#define DRV_VERSION "0.3.1"
-
-#define NR_HOST 4 /* Two 6580s */
-
-struct qdi_data {
- unsigned long timing;
- u8 clock[2];
- u8 last;
- int fast;
- struct platform_device *platform_dev;
-
-};
-
-static struct ata_host *qdi_host[NR_HOST];
-static struct qdi_data qdi_data[NR_HOST];
-static int nr_qdi_host;
-
-#ifdef MODULE
-static int probe_qdi = 1;
-#else
-static int probe_qdi;
-#endif
-
-static void qdi6500_set_piomode(struct ata_port *ap, struct ata_device *adev)
-{
- struct ata_timing t;
- struct qdi_data *qdi = ap->host->private_data;
- int active, recovery;
- u8 timing;
-
- /* Get the timing data in cycles */
- ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000);
-
- if (qdi->fast) {
- active = 8 - clamp_val(t.active, 1, 8);
- recovery = 18 - clamp_val(t.recover, 3, 18);
- } else {
- active = 9 - clamp_val(t.active, 2, 9);
- recovery = 15 - clamp_val(t.recover, 0, 15);
- }
- timing = (recovery << 4) | active | 0x08;
-
- qdi->clock[adev->devno] = timing;
-
- outb(timing, qdi->timing);
-}
-
-static void qdi6580_set_piomode(struct ata_port *ap, struct ata_device *adev)
-{
- struct ata_timing t;
- struct qdi_data *qdi = ap->host->private_data;
- int active, recovery;
- u8 timing;
-
- /* Get the timing data in cycles */
- ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000);
-
- if (qdi->fast) {
- active = 8 - clamp_val(t.active, 1, 8);
- recovery = 18 - clamp_val(t.recover, 3, 18);
- } else {
- active = 9 - clamp_val(t.active, 2, 9);
- recovery = 15 - clamp_val(t.recover, 0, 15);
- }
- timing = (recovery << 4) | active | 0x08;
-
- qdi->clock[adev->devno] = timing;
-
- outb(timing, qdi->timing);
-
- /* Clear the FIFO */
- if (adev->class != ATA_DEV_ATA)
- outb(0x5F, (qdi->timing & 0xFFF0) + 3);
-}
-
-/**
- * qdi_qc_issue - command issue
- * @qc: command pending
- *
- * Called when the libata layer is about to issue a command. We wrap
- * this interface so that we can load the correct ATA timings.
- */
-
-static unsigned int qdi_qc_issue(struct ata_queued_cmd *qc)
-{
- struct ata_port *ap = qc->ap;
- struct ata_device *adev = qc->dev;
- struct qdi_data *qdi = ap->host->private_data;
-
- if (qdi->clock[adev->devno] != qdi->last) {
- if (adev->pio_mode) {
- qdi->last = qdi->clock[adev->devno];
- outb(qdi->clock[adev->devno], qdi->timing);
- }
- }
- return ata_sff_qc_issue(qc);
-}
-
-static unsigned int qdi_data_xfer(struct ata_device *dev, unsigned char *buf,
- unsigned int buflen, int rw)
-{
- if (ata_id_has_dword_io(dev->id)) {
- struct ata_port *ap = dev->link->ap;
- int slop = buflen & 3;
-
- if (rw == READ)
- ioread32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
- else
- iowrite32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
-
- if (unlikely(slop)) {
- __le32 pad;
- if (rw == READ) {
- pad = cpu_to_le32(ioread32(ap->ioaddr.data_addr));
- memcpy(buf + buflen - slop, &pad, slop);
- } else {
- memcpy(&pad, buf + buflen - slop, slop);
- iowrite32(le32_to_cpu(pad), ap->ioaddr.data_addr);
- }
- buflen += 4 - slop;
- }
- } else
- buflen = ata_sff_data_xfer(dev, buf, buflen, rw);
-
- return buflen;
-}
-
-static struct scsi_host_template qdi_sht = {
- ATA_PIO_SHT(DRV_NAME),
-};
-
-static struct ata_port_operations qdi6500_port_ops = {
- .inherits = &ata_sff_port_ops,
- .qc_issue = qdi_qc_issue,
- .sff_data_xfer = qdi_data_xfer,
- .cable_detect = ata_cable_40wire,
- .set_piomode = qdi6500_set_piomode,
-};
-
-static struct ata_port_operations qdi6580_port_ops = {
- .inherits = &qdi6500_port_ops,
- .set_piomode = qdi6580_set_piomode,
-};
-
-/**
- * qdi_init_one - attach a qdi interface
- * @type: Type to display
- * @io: I/O port start
- * @irq: interrupt line
- * @fast: True if on a > 33Mhz VLB
- *
- * Register an ISA bus IDE interface. Such interfaces are PIO and we
- * assume do not support IRQ sharing.
- */
-
-static __init int qdi_init_one(unsigned long port, int type, unsigned long io, int irq, int fast)
-{
- unsigned long ctl = io + 0x206;
- struct platform_device *pdev;
- struct ata_host *host;
- struct ata_port *ap;
- void __iomem *io_addr, *ctl_addr;
- int ret;
-
- /*
- * Fill in a probe structure first of all
- */
-
- pdev = platform_device_register_simple(DRV_NAME, nr_qdi_host, NULL, 0);
- if (IS_ERR(pdev))
- return PTR_ERR(pdev);
-
- ret = -ENOMEM;
- io_addr = devm_ioport_map(&pdev->dev, io, 8);
- ctl_addr = devm_ioport_map(&pdev->dev, ctl, 1);
- if (!io_addr || !ctl_addr)
- goto fail;
-
- ret = -ENOMEM;
- host = ata_host_alloc(&pdev->dev, 1);
- if (!host)
- goto fail;
- ap = host->ports[0];
-
- if (type == 6580) {
- ap->ops = &qdi6580_port_ops;
- ap->pio_mask = ATA_PIO4;
- ap->flags |= ATA_FLAG_SLAVE_POSS;
- } else {
- ap->ops = &qdi6500_port_ops;
- ap->pio_mask = ATA_PIO2; /* Actually PIO3 !IORDY is possible */
- ap->flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_NO_IORDY;
- }
-
- ap->ioaddr.cmd_addr = io_addr;
- ap->ioaddr.altstatus_addr = ctl_addr;
- ap->ioaddr.ctl_addr = ctl_addr;
- ata_sff_std_ports(&ap->ioaddr);
-
- ata_port_desc(ap, "cmd %lx ctl %lx", io, ctl);
-
- /*
- * Hook in a private data structure per channel
- */
- ap->private_data = &qdi_data[nr_qdi_host];
-
- qdi_data[nr_qdi_host].timing = port;
- qdi_data[nr_qdi_host].fast = fast;
- qdi_data[nr_qdi_host].platform_dev = pdev;
-
- printk(KERN_INFO DRV_NAME": qd%d at 0x%lx.\n", type, io);
-
- /* activate */
- ret = ata_host_activate(host, irq, ata_sff_interrupt, 0, &qdi_sht);
- if (ret)
- goto fail;
-
- qdi_host[nr_qdi_host++] = dev_get_drvdata(&pdev->dev);
- return 0;
-
- fail:
- platform_device_unregister(pdev);
- return ret;
-}
-
-/**
- * qdi_init - attach qdi interfaces
- *
- * Attach qdi IDE interfaces by scanning the ports it may occupy.
- */
-
-static __init int qdi_init(void)
-{
- unsigned long flags;
- static const unsigned long qd_port[2] = { 0x30, 0xB0 };
- static const unsigned long ide_port[2] = { 0x170, 0x1F0 };
- static const int ide_irq[2] = { 14, 15 };
-
- int ct = 0;
- int i;
-
- if (probe_qdi == 0)
- return -ENODEV;
-
- /*
- * Check each possible QD65xx base address
- */
-
- for (i = 0; i < 2; i++) {
- unsigned long port = qd_port[i];
- u8 r, res;
-
-
- if (request_region(port, 2, "pata_qdi")) {
- /* Check for a card */
- local_irq_save(flags);
- r = inb_p(port);
- outb_p(0x19, port);
- res = inb_p(port);
- outb_p(r, port);
- local_irq_restore(flags);
-
- /* Fail */
- if (res == 0x19)
- {
- release_region(port, 2);
- continue;
- }
-
- /* Passes the presence test */
- r = inb_p(port + 1); /* Check port agrees with port set */
- if ((r & 2) >> 1 != i) {
- release_region(port, 2);
- continue;
- }
-
- /* Check card type */
- if ((r & 0xF0) == 0xC0) {
- /* QD6500: single channel */
- if (r & 8) {
- /* Disabled ? */
- release_region(port, 2);
- continue;
- }
- if (qdi_init_one(port, 6500, ide_port[r & 0x01], ide_irq[r & 0x01], r & 0x04) == 0)
- ct++;
- }
- if (((r & 0xF0) == 0xA0) || (r & 0xF0) == 0x50) {
- /* QD6580: dual channel */
- if (!request_region(port + 2 , 2, "pata_qdi"))
- {
- release_region(port, 2);
- continue;
- }
- res = inb(port + 3);
- if (res & 1) {
- /* Single channel mode */
- if (qdi_init_one(port, 6580, ide_port[r & 0x01], ide_irq[r & 0x01], r & 0x04) == 0)
- ct++;
- } else {
- /* Dual channel mode */
- if (qdi_init_one(port, 6580, 0x1F0, 14, r & 0x04) == 0)
- ct++;
- if (qdi_init_one(port + 2, 6580, 0x170, 15, r & 0x04) == 0)
- ct++;
- }
- }
- }
- }
- if (ct != 0)
- return 0;
- return -ENODEV;
-}
-
-static __exit void qdi_exit(void)
-{
- int i;
-
- for (i = 0; i < nr_qdi_host; i++) {
- ata_host_detach(qdi_host[i]);
- /* Free the control resource. The 6580 dual channel has the resources
- * claimed as a pair of 2 byte resources so we need no special cases...
- */
- release_region(qdi_data[i].timing, 2);
- platform_device_unregister(qdi_data[i].platform_dev);
- }
-}
-
-MODULE_AUTHOR("Alan Cox");
-MODULE_DESCRIPTION("low-level driver for qdi ATA");
-MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_VERSION);
-
-module_init(qdi_init);
-module_exit(qdi_exit);
-
-module_param(probe_qdi, int, 0);
-
diff --git a/drivers/ata/pata_rdc.c b/drivers/ata/pata_rdc.c
index 4d318f86ae8..e6a2dd7809c 100644
--- a/drivers/ata/pata_rdc.c
+++ b/drivers/ata/pata_rdc.c
@@ -86,6 +86,8 @@ static int rdc_pata_prereset(struct ata_link *link, unsigned long deadline)
return ata_sff_prereset(link, deadline);
}
+static DEFINE_SPINLOCK(rdc_lock);
+
/**
* rdc_set_piomode - Initialize host controller PATA PIO timings
* @ap: Port whose timings we are configuring
@@ -101,6 +103,7 @@ static void rdc_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
unsigned int pio = adev->pio_mode - XFER_PIO_0;
struct pci_dev *dev = to_pci_dev(ap->host->dev);
+ unsigned long flags;
unsigned int is_slave = (adev->devno != 0);
unsigned int master_port= ap->port_no ? 0x42 : 0x40;
unsigned int slave_port = 0x44;
@@ -124,6 +127,8 @@ static void rdc_set_piomode(struct ata_port *ap, struct ata_device *adev)
if (adev->class == ATA_DEV_ATA)
control |= 4; /* PPE enable */
+ spin_lock_irqsave(&rdc_lock, flags);
+
/* PIO configuration clears DTE unconditionally. It will be
* programmed in set_dmamode which is guaranteed to be called
* after set_piomode if any DMA mode is available.
@@ -161,6 +166,8 @@ static void rdc_set_piomode(struct ata_port *ap, struct ata_device *adev)
pci_read_config_byte(dev, 0x48, &udma_enable);
udma_enable &= ~(1 << (2 * ap->port_no + adev->devno));
pci_write_config_byte(dev, 0x48, udma_enable);
+
+ spin_unlock_irqrestore(&rdc_lock, flags);
}
/**
@@ -177,6 +184,7 @@ static void rdc_set_piomode(struct ata_port *ap, struct ata_device *adev)
static void rdc_set_dmamode(struct ata_port *ap, struct ata_device *adev)
{
struct pci_dev *dev = to_pci_dev(ap->host->dev);
+ unsigned long flags;
u8 master_port = ap->port_no ? 0x42 : 0x40;
u16 master_data;
u8 speed = adev->dma_mode;
@@ -190,6 +198,8 @@ static void rdc_set_dmamode(struct ata_port *ap, struct ata_device *adev)
{ 2, 1 },
{ 2, 3 }, };
+ spin_lock_irqsave(&rdc_lock, flags);
+
pci_read_config_word(dev, master_port, &master_data);
pci_read_config_byte(dev, 0x48, &udma_enable);
@@ -271,6 +281,8 @@ static void rdc_set_dmamode(struct ata_port *ap, struct ata_device *adev)
pci_write_config_word(dev, master_port, master_data);
}
pci_write_config_byte(dev, 0x48, udma_enable);
+
+ spin_unlock_irqrestore(&rdc_lock, flags);
}
static struct ata_port_operations rdc_pata_ops = {
@@ -375,6 +387,10 @@ static struct pci_driver rdc_pci_driver = {
.id_table = rdc_pci_tbl,
.probe = rdc_init_one,
.remove = rdc_remove_one,
+#ifdef CONFIG_PM
+ .suspend = ata_pci_device_suspend,
+ .resume = ata_pci_device_resume,
+#endif
};
diff --git a/drivers/ata/pata_sc1200.c b/drivers/ata/pata_sc1200.c
index e2c18257adf..c0e603a84f7 100644
--- a/drivers/ata/pata_sc1200.c
+++ b/drivers/ata/pata_sc1200.c
@@ -38,7 +38,7 @@
#include <scsi/scsi_host.h>
#include <linux/libata.h>
-#define DRV_NAME "sc1200"
+#define DRV_NAME "pata_sc1200"
#define DRV_VERSION "0.2.6"
#define SC1200_REV_A 0x00
@@ -86,10 +86,14 @@ static int sc1200_clock(void)
static void sc1200_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
static const u32 pio_timings[4][5] = {
- {0x00009172, 0x00012171, 0x00020080, 0x00032010, 0x00040010}, // format0 33Mhz
- {0xd1329172, 0x71212171, 0x30200080, 0x20102010, 0x00100010}, // format1, 33Mhz
- {0xfaa3f4f3, 0xc23232b2, 0x513101c1, 0x31213121, 0x10211021}, // format1, 48Mhz
- {0xfff4fff4, 0xf35353d3, 0x814102f1, 0x42314231, 0x11311131} // format1, 66Mhz
+ /* format0, 33Mhz */
+ { 0x00009172, 0x00012171, 0x00020080, 0x00032010, 0x00040010 },
+ /* format1, 33Mhz */
+ { 0xd1329172, 0x71212171, 0x30200080, 0x20102010, 0x00100010 },
+ /* format1, 48Mhz */
+ { 0xfaa3f4f3, 0xc23232b2, 0x513101c1, 0x31213121, 0x10211021 },
+ /* format1, 66Mhz */
+ { 0xfff4fff4, 0xf35353d3, 0x814102f1, 0x42314231, 0x11311131 }
};
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
diff --git a/drivers/ata/pata_scc.c b/drivers/ata/pata_scc.c
index eb748e32714..e265f835c95 100644
--- a/drivers/ata/pata_scc.c
+++ b/drivers/ata/pata_scc.c
@@ -826,18 +826,6 @@ static unsigned int scc_data_xfer (struct ata_device *dev, unsigned char *buf,
}
/**
- * scc_pata_prereset - prepare for reset
- * @ap: ATA port to be reset
- * @deadline: deadline jiffies for the operation
- */
-
-static int scc_pata_prereset(struct ata_link *link, unsigned long deadline)
-{
- link->ap->cbl = ATA_CBL_PATA80;
- return ata_sff_prereset(link, deadline);
-}
-
-/**
* scc_postreset - standard postreset callback
* @ap: the target ata_port
* @classes: classes of attached devices
@@ -946,7 +934,7 @@ static struct ata_port_operations scc_pata_ops = {
.bmdma_status = scc_bmdma_status,
.sff_data_xfer = scc_data_xfer,
- .prereset = scc_pata_prereset,
+ .cable_detect = ata_cable_80wire,
.softreset = scc_softreset,
.postreset = scc_postreset,
diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c
index 86dd714e3e1..71eaf385e97 100644
--- a/drivers/ata/pata_serverworks.c
+++ b/drivers/ata/pata_serverworks.c
@@ -58,31 +58,15 @@ static const char *csb_bad_ata100[] = {
};
/**
- * dell_cable - Dell serverworks cable detection
+ * oem_cable - Dell/Sun serverworks cable detection
* @ap: ATA port to do cable detect
*
- * Dell hide the 40/80 pin select for their interfaces in the top two
- * bits of the subsystem ID.
+ * Dell PowerEdge and Sun Cobalt 'Alpine' hide the 40/80 pin select
+ * for their interfaces in the top two bits of the subsystem ID.
*/
-static int dell_cable(struct ata_port *ap) {
- struct pci_dev *pdev = to_pci_dev(ap->host->dev);
-
- if (pdev->subsystem_device & (1 << (ap->port_no + 14)))
- return ATA_CBL_PATA80;
- return ATA_CBL_PATA40;
-}
-
-/**
- * sun_cable - Sun Cobalt 'Alpine' cable detection
- * @ap: ATA port to do cable select
- *
- * Cobalt CSB5 IDE hides the 40/80pin in the top two bits of the
- * subsystem ID the same as dell. We could use one function but we may
- * need to extend the Dell one in future
- */
-
-static int sun_cable(struct ata_port *ap) {
+static int oem_cable(struct ata_port *ap)
+{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
if (pdev->subsystem_device & (1 << (ap->port_no + 14)))
@@ -90,49 +74,21 @@ static int sun_cable(struct ata_port *ap) {
return ATA_CBL_PATA40;
}
-/**
- * osb4_cable - OSB4 cable detect
- * @ap: ATA port to check
- *
- * The OSB4 isn't UDMA66 capable so this is easy
- */
-
-static int osb4_cable(struct ata_port *ap) {
- return ATA_CBL_PATA40;
-}
-
-/**
- * csb_cable - CSB5/6 cable detect
- * @ap: ATA port to check
- *
- * Serverworks default arrangement is to use the drive side detection
- * only.
- */
-
-static int csb_cable(struct ata_port *ap) {
- return ATA_CBL_PATA_UNK;
-}
-
struct sv_cable_table {
int device;
int subvendor;
int (*cable_detect)(struct ata_port *ap);
};
-/*
- * Note that we don't copy the old serverworks code because the old
- * code contains obvious mistakes
- */
-
static struct sv_cable_table cable_detect[] = {
- { PCI_DEVICE_ID_SERVERWORKS_CSB5IDE, PCI_VENDOR_ID_DELL, dell_cable },
- { PCI_DEVICE_ID_SERVERWORKS_CSB6IDE, PCI_VENDOR_ID_DELL, dell_cable },
- { PCI_DEVICE_ID_SERVERWORKS_CSB5IDE, PCI_VENDOR_ID_SUN, sun_cable },
- { PCI_DEVICE_ID_SERVERWORKS_OSB4IDE, PCI_ANY_ID, osb4_cable },
- { PCI_DEVICE_ID_SERVERWORKS_CSB5IDE, PCI_ANY_ID, csb_cable },
- { PCI_DEVICE_ID_SERVERWORKS_CSB6IDE, PCI_ANY_ID, csb_cable },
- { PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2, PCI_ANY_ID, csb_cable },
- { PCI_DEVICE_ID_SERVERWORKS_HT1000IDE, PCI_ANY_ID, csb_cable },
+ { PCI_DEVICE_ID_SERVERWORKS_CSB5IDE, PCI_VENDOR_ID_DELL, oem_cable },
+ { PCI_DEVICE_ID_SERVERWORKS_CSB6IDE, PCI_VENDOR_ID_DELL, oem_cable },
+ { PCI_DEVICE_ID_SERVERWORKS_CSB5IDE, PCI_VENDOR_ID_SUN, oem_cable },
+ { PCI_DEVICE_ID_SERVERWORKS_OSB4IDE, PCI_ANY_ID, ata_cable_40wire },
+ { PCI_DEVICE_ID_SERVERWORKS_CSB5IDE, PCI_ANY_ID, ata_cable_unknown },
+ { PCI_DEVICE_ID_SERVERWORKS_CSB6IDE, PCI_ANY_ID, ata_cable_unknown },
+ { PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2, PCI_ANY_ID, ata_cable_unknown },
+ { PCI_DEVICE_ID_SERVERWORKS_HT1000IDE, PCI_ANY_ID, ata_cable_unknown },
{ }
};
@@ -393,6 +349,31 @@ static void serverworks_fixup_ht1000(struct pci_dev *pdev)
pci_write_config_byte(pdev, 0x5A, btr);
}
+static int serverworks_fixup(struct pci_dev *pdev)
+{
+ int rc = 0;
+
+ /* Force master latency timer to 64 PCI clocks */
+ pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x40);
+
+ switch (pdev->device) {
+ case PCI_DEVICE_ID_SERVERWORKS_OSB4IDE:
+ rc = serverworks_fixup_osb4(pdev);
+ break;
+ case PCI_DEVICE_ID_SERVERWORKS_CSB5IDE:
+ ata_pci_bmdma_clear_simplex(pdev);
+ /* fall through */
+ case PCI_DEVICE_ID_SERVERWORKS_CSB6IDE:
+ case PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2:
+ rc = serverworks_fixup_csb(pdev);
+ break;
+ case PCI_DEVICE_ID_SERVERWORKS_HT1000IDE:
+ serverworks_fixup_ht1000(pdev);
+ break;
+ }
+
+ return rc;
+}
static int serverworks_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
@@ -430,13 +411,12 @@ static int serverworks_init_one(struct pci_dev *pdev, const struct pci_device_id
if (rc)
return rc;
- /* Force master latency timer to 64 PCI clocks */
- pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x40);
+ rc = serverworks_fixup(pdev);
/* OSB4 : South Bridge and IDE */
if (pdev->device == PCI_DEVICE_ID_SERVERWORKS_OSB4IDE) {
/* Select non UDMA capable OSB4 if we can't do fixups */
- if ( serverworks_fixup_osb4(pdev) < 0)
+ if (rc < 0)
ppi[0] = &info[1];
}
/* setup CSB5/CSB6 : South Bridge and IDE option RAID */
@@ -446,19 +426,13 @@ static int serverworks_init_one(struct pci_dev *pdev, const struct pci_device_id
/* If the returned btr is the newer revision then
select the right info block */
- if (serverworks_fixup_csb(pdev) == 3)
+ if (rc == 3)
ppi[0] = &info[3];
/* Is this the 3rd channel CSB6 IDE ? */
if (pdev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2)
ppi[1] = &ata_dummy_port_info;
}
- /* setup HT1000E */
- else if (pdev->device == PCI_DEVICE_ID_SERVERWORKS_HT1000IDE)
- serverworks_fixup_ht1000(pdev);
-
- if (pdev->device == PCI_DEVICE_ID_SERVERWORKS_CSB5IDE)
- ata_pci_bmdma_clear_simplex(pdev);
return ata_pci_bmdma_init_one(pdev, ppi, &serverworks_sht, NULL, 0);
}
@@ -473,24 +447,7 @@ static int serverworks_reinit_one(struct pci_dev *pdev)
if (rc)
return rc;
- /* Force master latency timer to 64 PCI clocks */
- pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x40);
-
- switch (pdev->device) {
- case PCI_DEVICE_ID_SERVERWORKS_OSB4IDE:
- serverworks_fixup_osb4(pdev);
- break;
- case PCI_DEVICE_ID_SERVERWORKS_CSB5IDE:
- ata_pci_bmdma_clear_simplex(pdev);
- /* fall through */
- case PCI_DEVICE_ID_SERVERWORKS_CSB6IDE:
- case PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2:
- serverworks_fixup_csb(pdev);
- break;
- case PCI_DEVICE_ID_SERVERWORKS_HT1000IDE:
- serverworks_fixup_ht1000(pdev);
- break;
- }
+ (void)serverworks_fixup(pdev);
ata_host_resume(host);
return 0;
diff --git a/drivers/ata/pata_sil680.c b/drivers/ata/pata_sil680.c
index 31f759b0ab7..b92eacf8dd3 100644
--- a/drivers/ata/pata_sil680.c
+++ b/drivers/ata/pata_sil680.c
@@ -38,11 +38,12 @@
/**
* sil680_selreg - return register base
- * @hwif: interface
+ * @ap: ATA interface
* @r: config offset
*
- * Turn a config register offset into the right address in either
- * PCI space or MMIO space to access the control register in question
+ * Turn a config register offset into the right address in PCI space
+ * to access the control register in question.
+ *
* Thankfully this is a configuration operation so isn't performance
* criticial.
*/
@@ -56,12 +57,12 @@ static unsigned long sil680_selreg(struct ata_port *ap, int r)
/**
* sil680_seldev - return register base
- * @hwif: interface
+ * @ap: ATA interface
* @r: config offset
*
- * Turn a config register offset into the right address in either
- * PCI space or MMIO space to access the control register in question
- * including accounting for the unit shift.
+ * Turn a config register offset into the right address in PCI space
+ * to access the control register in question including accounting for
+ * the unit shift.
*/
static unsigned long sil680_seldev(struct ata_port *ap, struct ata_device *adev, int r)
@@ -81,7 +82,8 @@ static unsigned long sil680_seldev(struct ata_port *ap, struct ata_device *adev,
* space for us.
*/
-static int sil680_cable_detect(struct ata_port *ap) {
+static int sil680_cable_detect(struct ata_port *ap)
+{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
unsigned long addr = sil680_selreg(ap, 0);
u8 ata66;
@@ -93,7 +95,7 @@ static int sil680_cable_detect(struct ata_port *ap) {
}
/**
- * sil680_set_piomode - set initial PIO mode data
+ * sil680_set_piomode - set PIO mode data
* @ap: ATA interface
* @adev: ATA device
*
@@ -104,8 +106,12 @@ static int sil680_cable_detect(struct ata_port *ap) {
static void sil680_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
- static u16 speed_p[5] = { 0x328A, 0x2283, 0x1104, 0x10C3, 0x10C1 };
- static u16 speed_t[5] = { 0x328A, 0x2283, 0x1281, 0x10C3, 0x10C1 };
+ static const u16 speed_p[5] = {
+ 0x328A, 0x2283, 0x1104, 0x10C3, 0x10C1
+ };
+ static const u16 speed_t[5] = {
+ 0x328A, 0x2283, 0x1281, 0x10C3, 0x10C1
+ };
unsigned long tfaddr = sil680_selreg(ap, 0x02);
unsigned long addr = sil680_seldev(ap, adev, 0x04);
@@ -140,22 +146,23 @@ static void sil680_set_piomode(struct ata_port *ap, struct ata_device *adev)
}
/**
- * sil680_set_dmamode - set initial DMA mode data
+ * sil680_set_dmamode - set DMA mode data
* @ap: ATA interface
* @adev: ATA device
*
- * Program the MWDMA/UDMA modes for the sil680 k
- * chipset. The MWDMA mode values are pulled from a lookup table
+ * Program the MWDMA/UDMA modes for the sil680 chipset.
+ *
+ * The MWDMA mode values are pulled from a lookup table
* while the chipset uses mode number for UDMA.
*/
static void sil680_set_dmamode(struct ata_port *ap, struct ata_device *adev)
{
- static u8 ultra_table[2][7] = {
+ static const u8 ultra_table[2][7] = {
{ 0x0C, 0x07, 0x05, 0x04, 0x02, 0x01, 0xFF }, /* 100MHz */
{ 0x0F, 0x0B, 0x07, 0x05, 0x03, 0x02, 0x01 }, /* 133Mhz */
};
- static u16 dma_table[3] = { 0x2208, 0x10C2, 0x10C1 };
+ static const u16 dma_table[3] = { 0x2208, 0x10C2, 0x10C1 };
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
unsigned long ma = sil680_seldev(ap, adev, 0x08);
@@ -175,7 +182,7 @@ static void sil680_set_dmamode(struct ata_port *ap, struct ata_device *adev)
mode &= ~(0x03 << port_shift);
/* Extract scsc */
- scsc = (scsc & 0x30) ? 1: 0;
+ scsc = (scsc & 0x30) ? 1 : 0;
if (adev->dma_mode >= XFER_UDMA_0) {
multi = 0x10C1;
@@ -248,7 +255,7 @@ static u8 sil680_init_chip(struct pci_dev *pdev, int *try_mmio)
{
u8 tmpbyte = 0;
- /* FIXME: double check */
+ /* FIXME: double check */
pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
pdev->revision ? 1 : 255);
@@ -266,22 +273,22 @@ static u8 sil680_init_chip(struct pci_dev *pdev, int *try_mmio)
*try_mmio = (tmpbyte & 1) || pci_resource_start(pdev, 5);
#endif
- switch(tmpbyte & 0x30) {
- case 0x00:
- /* 133 clock attempt to force it on */
- pci_write_config_byte(pdev, 0x8A, tmpbyte|0x10);
- break;
- case 0x30:
- /* if clocking is disabled */
- /* 133 clock attempt to force it on */
- pci_write_config_byte(pdev, 0x8A, tmpbyte & ~0x20);
- break;
- case 0x10:
- /* 133 already */
- break;
- case 0x20:
- /* BIOS set PCI x2 clocking */
- break;
+ switch (tmpbyte & 0x30) {
+ case 0x00:
+ /* 133 clock attempt to force it on */
+ pci_write_config_byte(pdev, 0x8A, tmpbyte|0x10);
+ break;
+ case 0x30:
+ /* if clocking is disabled */
+ /* 133 clock attempt to force it on */
+ pci_write_config_byte(pdev, 0x8A, tmpbyte & ~0x20);
+ break;
+ case 0x10:
+ /* 133 already */
+ break;
+ case 0x20:
+ /* BIOS set PCI x2 clocking */
+ break;
}
pci_read_config_byte(pdev, 0x8A, &tmpbyte);
@@ -299,12 +306,19 @@ static u8 sil680_init_chip(struct pci_dev *pdev, int *try_mmio)
pci_write_config_dword(pdev, 0xB8, 0x43924392);
pci_write_config_dword(pdev, 0xBC, 0x40094009);
- switch(tmpbyte & 0x30) {
- case 0x00: printk(KERN_INFO "sil680: 100MHz clock.\n");break;
- case 0x10: printk(KERN_INFO "sil680: 133MHz clock.\n");break;
- case 0x20: printk(KERN_INFO "sil680: Using PCI clock.\n");break;
- /* This last case is _NOT_ ok */
- case 0x30: printk(KERN_ERR "sil680: Clock disabled ?\n");
+ switch (tmpbyte & 0x30) {
+ case 0x00:
+ printk(KERN_INFO "sil680: 100MHz clock.\n");
+ break;
+ case 0x10:
+ printk(KERN_INFO "sil680: 133MHz clock.\n");
+ break;
+ case 0x20:
+ printk(KERN_INFO "sil680: Using PCI clock.\n");
+ break;
+ /* This last case is _NOT_ ok */
+ case 0x30:
+ printk(KERN_ERR "sil680: Clock disabled ?\n");
}
return tmpbyte & 0x30;
}
diff --git a/drivers/ata/pata_sis.c b/drivers/ata/pata_sis.c
index 533f2aefab8..b0edc7de7b2 100644
--- a/drivers/ata/pata_sis.c
+++ b/drivers/ata/pata_sis.c
@@ -55,7 +55,7 @@ static const struct sis_laptop sis_laptop[] = {
/* devid, subvendor, subdev */
{ 0x5513, 0x1043, 0x1107 }, /* ASUS A6K */
{ 0x5513, 0x1734, 0x105F }, /* FSC Amilo A1630 */
- { 0x5513, 0x1071, 0x8640 }, /* EasyNote K5305 */
+ { 0x5513, 0x1071, 0x8640 }, /* EasyNote K5305 */
/* end marker */
{ 0, }
};
@@ -76,7 +76,7 @@ static int sis_short_ata40(struct pci_dev *dev)
}
/**
- * sis_old_port_base - return PCI configuration base for dev
+ * sis_old_port_base - return PCI configuration base for dev
* @adev: device
*
* Returns the base of the PCI configuration registers for this port
@@ -85,11 +85,34 @@ static int sis_short_ata40(struct pci_dev *dev)
static int sis_old_port_base(struct ata_device *adev)
{
- return 0x40 + (4 * adev->link->ap->port_no) + (2 * adev->devno);
+ return 0x40 + (4 * adev->link->ap->port_no) + (2 * adev->devno);
}
/**
- * sis_133_cable_detect - check for 40/80 pin
+ * sis_port_base - return PCI configuration base for dev
+ * @adev: device
+ *
+ * Returns the base of the PCI configuration registers for this port
+ * number.
+ */
+
+static int sis_port_base(struct ata_device *adev)
+{
+ struct ata_port *ap = adev->link->ap;
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ int port = 0x40;
+ u32 reg54;
+
+ /* If bit 30 is set then the registers are mapped at 0x70 not 0x40 */
+ pci_read_config_dword(pdev, 0x54, &reg54);
+ if (reg54 & 0x40000000)
+ port = 0x70;
+
+ return port + (8 * ap->port_no) + (4 * adev->devno);
+}
+
+/**
+ * sis_133_cable_detect - check for 40/80 pin
* @ap: Port
* @deadline: deadline jiffies for the operation
*
@@ -110,7 +133,7 @@ static int sis_133_cable_detect(struct ata_port *ap)
}
/**
- * sis_66_cable_detect - check for 40/80 pin
+ * sis_66_cable_detect - check for 40/80 pin
* @ap: Port
*
* Perform cable detection on the UDMA66, UDMA100 and early UDMA133
@@ -132,7 +155,7 @@ static int sis_66_cable_detect(struct ata_port *ap)
/**
- * sis_pre_reset - probe begin
+ * sis_pre_reset - probe begin
* @link: ATA link
* @deadline: deadline jiffies for the operation
*
@@ -160,7 +183,7 @@ static int sis_pre_reset(struct ata_link *link, unsigned long deadline)
/**
- * sis_set_fifo - Set RWP fifo bits for this device
+ * sis_set_fifo - Set RWP fifo bits for this device
* @ap: Port
* @adev: Device
*
@@ -203,13 +226,13 @@ static void sis_set_fifo(struct ata_port *ap, struct ata_device *adev)
static void sis_old_set_piomode (struct ata_port *ap, struct ata_device *adev)
{
- struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int port = sis_old_port_base(adev);
u8 t1, t2;
int speed = adev->pio_mode - XFER_PIO_0;
- const u8 active[] = { 0x00, 0x07, 0x04, 0x03, 0x01 };
- const u8 recovery[] = { 0x00, 0x06, 0x04, 0x03, 0x03 };
+ static const u8 active[] = { 0x00, 0x07, 0x04, 0x03, 0x01 };
+ static const u8 recovery[] = { 0x00, 0x06, 0x04, 0x03, 0x03 };
sis_set_fifo(ap, adev);
@@ -240,11 +263,11 @@ static void sis_old_set_piomode (struct ata_port *ap, struct ata_device *adev)
static void sis_100_set_piomode (struct ata_port *ap, struct ata_device *adev)
{
- struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int port = sis_old_port_base(adev);
int speed = adev->pio_mode - XFER_PIO_0;
- const u8 actrec[] = { 0x00, 0x67, 0x44, 0x33, 0x31 };
+ static const u8 actrec[] = { 0x00, 0x67, 0x44, 0x33, 0x31 };
sis_set_fifo(ap, adev);
@@ -265,20 +288,19 @@ static void sis_100_set_piomode (struct ata_port *ap, struct ata_device *adev)
static void sis_133_set_piomode (struct ata_port *ap, struct ata_device *adev)
{
- struct pci_dev *pdev = to_pci_dev(ap->host->dev);
- int port = 0x40;
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ int port;
u32 t1;
- u32 reg54;
int speed = adev->pio_mode - XFER_PIO_0;
- const u32 timing133[] = {
+ static const u32 timing133[] = {
0x28269000, /* Recovery << 24 | Act << 16 | Ini << 12 */
0x0C266000,
0x04263000,
0x0C0A3000,
0x05093000
};
- const u32 timing100[] = {
+ static const u32 timing100[] = {
0x1E1C6000, /* Recovery << 24 | Act << 16 | Ini << 12 */
0x091C4000,
0x031C2000,
@@ -288,12 +310,7 @@ static void sis_133_set_piomode (struct ata_port *ap, struct ata_device *adev)
sis_set_fifo(ap, adev);
- /* If bit 14 is set then the registers are mapped at 0x70 not 0x40 */
- pci_read_config_dword(pdev, 0x54, &reg54);
- if (reg54 & 0x40000000)
- port = 0x70;
- port += 8 * ap->port_no + 4 * adev->devno;
-
+ port = sis_port_base(adev);
pci_read_config_dword(pdev, port, &t1);
t1 &= 0xC0C00FFF; /* Mask out timing */
@@ -319,13 +336,13 @@ static void sis_133_set_piomode (struct ata_port *ap, struct ata_device *adev)
static void sis_old_set_dmamode (struct ata_port *ap, struct ata_device *adev)
{
- struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int speed = adev->dma_mode - XFER_MW_DMA_0;
int drive_pci = sis_old_port_base(adev);
u16 timing;
- const u16 mwdma_bits[] = { 0x008, 0x302, 0x301 };
- const u16 udma_bits[] = { 0xE000, 0xC000, 0xA000 };
+ static const u16 mwdma_bits[] = { 0x008, 0x302, 0x301 };
+ static const u16 udma_bits[] = { 0xE000, 0xC000, 0xA000 };
pci_read_config_word(pdev, drive_pci, &timing);
@@ -358,14 +375,14 @@ static void sis_old_set_dmamode (struct ata_port *ap, struct ata_device *adev)
static void sis_66_set_dmamode (struct ata_port *ap, struct ata_device *adev)
{
- struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int speed = adev->dma_mode - XFER_MW_DMA_0;
int drive_pci = sis_old_port_base(adev);
u16 timing;
/* MWDMA 0-2 and UDMA 0-5 */
- const u16 mwdma_bits[] = { 0x008, 0x302, 0x301 };
- const u16 udma_bits[] = { 0xF000, 0xD000, 0xB000, 0xA000, 0x9000, 0x8000 };
+ static const u16 mwdma_bits[] = { 0x008, 0x302, 0x301 };
+ static const u16 udma_bits[] = { 0xF000, 0xD000, 0xB000, 0xA000, 0x9000, 0x8000 };
pci_read_config_word(pdev, drive_pci, &timing);
@@ -397,12 +414,12 @@ static void sis_66_set_dmamode (struct ata_port *ap, struct ata_device *adev)
static void sis_100_set_dmamode (struct ata_port *ap, struct ata_device *adev)
{
- struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int speed = adev->dma_mode - XFER_MW_DMA_0;
int drive_pci = sis_old_port_base(adev);
u8 timing;
- const u8 udma_bits[] = { 0x8B, 0x87, 0x85, 0x83, 0x82, 0x81};
+ static const u8 udma_bits[] = { 0x8B, 0x87, 0x85, 0x83, 0x82, 0x81};
pci_read_config_byte(pdev, drive_pci + 1, &timing);
@@ -431,7 +448,7 @@ static void sis_100_set_dmamode (struct ata_port *ap, struct ata_device *adev)
static void sis_133_early_set_dmamode (struct ata_port *ap, struct ata_device *adev)
{
- struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int speed = adev->dma_mode - XFER_MW_DMA_0;
int drive_pci = sis_old_port_base(adev);
u8 timing;
@@ -464,32 +481,34 @@ static void sis_133_early_set_dmamode (struct ata_port *ap, struct ata_device *a
static void sis_133_set_dmamode (struct ata_port *ap, struct ata_device *adev)
{
- struct pci_dev *pdev = to_pci_dev(ap->host->dev);
- int speed = adev->dma_mode - XFER_MW_DMA_0;
- int port = 0x40;
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ int port;
u32 t1;
- u32 reg54;
-
- /* bits 4- cycle time 8 - cvs time */
- static const u32 timing_u100[] = { 0x6B0, 0x470, 0x350, 0x140, 0x120, 0x110, 0x000 };
- static const u32 timing_u133[] = { 0x9F0, 0x6A0, 0x470, 0x250, 0x230, 0x220, 0x210 };
-
- /* If bit 14 is set then the registers are mapped at 0x70 not 0x40 */
- pci_read_config_dword(pdev, 0x54, &reg54);
- if (reg54 & 0x40000000)
- port = 0x70;
- port += (8 * ap->port_no) + (4 * adev->devno);
+ port = sis_port_base(adev);
pci_read_config_dword(pdev, port, &t1);
if (adev->dma_mode < XFER_UDMA_0) {
+ /* Recovery << 24 | Act << 16 | Ini << 12, like PIO modes */
+ static const u32 timing_u100[] = { 0x19154000, 0x06072000, 0x04062000 };
+ static const u32 timing_u133[] = { 0x221C6000, 0x0C0A3000, 0x05093000 };
+ int speed = adev->dma_mode - XFER_MW_DMA_0;
+
+ t1 &= 0xC0C00FFF;
+ /* disable UDMA */
t1 &= ~0x00000004;
- /* FIXME: need data sheet to add MWDMA here. Also lacking on
- ide/pci driver */
+ if (t1 & 0x08)
+ t1 |= timing_u133[speed];
+ else
+ t1 |= timing_u100[speed];
} else {
- speed = adev->dma_mode - XFER_UDMA_0;
- /* if & 8 no UDMA133 - need info for ... */
+ /* bits 4- cycle time 8 - cvs time */
+ static const u32 timing_u100[] = { 0x6B0, 0x470, 0x350, 0x140, 0x120, 0x110, 0x000 };
+ static const u32 timing_u133[] = { 0x9F0, 0x6A0, 0x470, 0x250, 0x230, 0x220, 0x210 };
+ int speed = adev->dma_mode - XFER_UDMA_0;
+
t1 &= ~0x00000FF0;
+ /* enable UDMA */
t1 |= 0x00000004;
if (t1 & 0x08)
t1 |= timing_u133[speed];
@@ -499,6 +518,27 @@ static void sis_133_set_dmamode (struct ata_port *ap, struct ata_device *adev)
pci_write_config_dword(pdev, port, t1);
}
+/**
+ * sis_133_mode_filter - mode selection filter
+ * @adev: ATA device
+ *
+ * Block UDMA6 on devices that do not support it.
+ */
+
+static unsigned long sis_133_mode_filter(struct ata_device *adev, unsigned long mask)
+{
+ struct ata_port *ap = adev->link->ap;
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ int port = sis_port_base(adev);
+ u32 t1;
+
+ pci_read_config_dword(pdev, port, &t1);
+ /* if ATA133 is disabled, mask it out */
+ if (!(t1 & 0x08))
+ mask &= ~(0xC0 << ATA_SHIFT_UDMA);
+ return mask;
+}
+
static struct scsi_host_template sis_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
@@ -520,6 +560,7 @@ static struct ata_port_operations sis_133_ops = {
.set_piomode = sis_133_set_piomode,
.set_dmamode = sis_133_set_dmamode,
.cable_detect = sis_133_cable_detect,
+ .mode_filter = sis_133_mode_filter,
};
static struct ata_port_operations sis_133_early_ops = {
@@ -588,7 +629,7 @@ static const struct ata_port_info sis_info100_early = {
static const struct ata_port_info sis_info133 = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
- /* No MWDMA */
+ .mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
.port_ops = &sis_133_ops,
};
@@ -669,7 +710,7 @@ static void sis_fixup(struct pci_dev *pdev, struct sis_chipset *sis)
* @pdev: PCI device to register
* @ent: Entry in sis_pci_tbl matching with @pdev
*
- * Called from kernel PCI layer. We probe for combined mode (sigh),
+ * Called from kernel PCI layer. We probe for combined mode (sigh),
* and then hand over control to libata, for it to do the rest.
*
* LOCKING:
@@ -769,17 +810,20 @@ static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
switch(trueid) {
case 0x5518: /* SIS 962/963 */
+ dev_info(&pdev->dev,
+ "SiS 962/963 MuTIOL IDE UDMA133 controller\n");
chipset = &sis133;
if ((idemisc & 0x40000000) == 0) {
pci_write_config_dword(pdev, 0x54, idemisc | 0x40000000);
- printk(KERN_INFO "SIS5513: Switching to 5513 register mapping\n");
+ dev_info(&pdev->dev,
+ "Switching to 5513 register mapping\n");
}
break;
case 0x0180: /* SIS 965/965L */
- chipset = &sis133;
+ chipset = &sis133;
break;
case 0x1180: /* SIS 966/966L */
- chipset = &sis133;
+ chipset = &sis133;
break;
}
}
diff --git a/drivers/ata/pata_sl82c105.c b/drivers/ata/pata_sl82c105.c
index c06ce8ced56..24cf200dd1c 100644
--- a/drivers/ata/pata_sl82c105.c
+++ b/drivers/ata/pata_sl82c105.c
@@ -1,6 +1,7 @@
/*
* pata_sl82c105.c - SL82C105 PATA for new ATA layer
* (C) 2005 Red Hat Inc
+ * (C) 2011 Bartlomiej Zolnierkiewicz
*
* Based in part on linux/drivers/ide/pci/sl82c105.c
* SL82C105/Winbond 553 IDE driver
@@ -289,6 +290,14 @@ static int sl82c105_bridge_revision(struct pci_dev *pdev)
return bridge->revision;
}
+static void sl82c105_fixup(struct pci_dev *pdev)
+{
+ u32 val;
+
+ pci_read_config_dword(pdev, 0x40, &val);
+ val |= CTRL_P0EN | CTRL_P0F16 | CTRL_P1F16;
+ pci_write_config_dword(pdev, 0x40, val);
+}
static int sl82c105_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
@@ -306,7 +315,6 @@ static int sl82c105_init_one(struct pci_dev *dev, const struct pci_device_id *id
/* for now use only the first port */
const struct ata_port_info *ppi[] = { &info_early,
NULL };
- u32 val;
int rev;
int rc;
@@ -325,13 +333,28 @@ static int sl82c105_init_one(struct pci_dev *dev, const struct pci_device_id *id
else
ppi[0] = &info_dma;
- pci_read_config_dword(dev, 0x40, &val);
- val |= CTRL_P0EN | CTRL_P0F16 | CTRL_P1F16;
- pci_write_config_dword(dev, 0x40, val);
+ sl82c105_fixup(dev);
return ata_pci_bmdma_init_one(dev, ppi, &sl82c105_sht, NULL, 0);
}
+#ifdef CONFIG_PM
+static int sl82c105_reinit_one(struct pci_dev *pdev)
+{
+ struct ata_host *host = dev_get_drvdata(&pdev->dev);
+ int rc;
+
+ rc = ata_pci_device_do_resume(pdev);
+ if (rc)
+ return rc;
+
+ sl82c105_fixup(pdev);
+
+ ata_host_resume(host);
+ return 0;
+}
+#endif
+
static const struct pci_device_id sl82c105[] = {
{ PCI_VDEVICE(WINBOND, PCI_DEVICE_ID_WINBOND_82C105), },
@@ -342,7 +365,11 @@ static struct pci_driver sl82c105_pci_driver = {
.name = DRV_NAME,
.id_table = sl82c105,
.probe = sl82c105_init_one,
- .remove = ata_pci_remove_one
+ .remove = ata_pci_remove_one,
+#ifdef CONFIG_PM
+ .suspend = ata_pci_device_suspend,
+ .resume = sl82c105_reinit_one,
+#endif
};
static int __init sl82c105_init(void)
diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
index 8e9f5048a10..255f336cd7e 100644
--- a/drivers/ata/pata_via.c
+++ b/drivers/ata/pata_via.c
@@ -509,6 +509,27 @@ static void via_config_fifo(struct pci_dev *pdev, unsigned int flags)
}
}
+static void via_fixup(struct pci_dev *pdev, const struct via_isa_bridge *config)
+{
+ u32 timing;
+
+ /* Initialise the FIFO for the enabled channels. */
+ via_config_fifo(pdev, config->flags);
+
+ if (config->udma_mask == ATA_UDMA4) {
+ /* The 66 MHz devices require we enable the clock */
+ pci_read_config_dword(pdev, 0x50, &timing);
+ timing |= 0x80008;
+ pci_write_config_dword(pdev, 0x50, timing);
+ }
+ if (config->flags & VIA_BAD_CLK66) {
+ /* Disable the 66MHz clock on problem devices */
+ pci_read_config_dword(pdev, 0x50, &timing);
+ timing &= ~0x80008;
+ pci_write_config_dword(pdev, 0x50, timing);
+ }
+}
+
/**
* via_init_one - discovery callback
* @pdev: PCI device
@@ -570,7 +591,6 @@ static int via_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
struct pci_dev *isa;
const struct via_isa_bridge *config;
u8 enable;
- u32 timing;
unsigned long flags = id->driver_data;
int rc;
@@ -609,9 +629,6 @@ static int via_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
return -ENODEV;
}
- /* Initialise the FIFO for the enabled channels. */
- via_config_fifo(pdev, config->flags);
-
/* Clock set up */
switch (config->udma_mask) {
case 0x00:
@@ -637,12 +654,7 @@ static int via_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
return -ENODEV;
}
- if (config->flags & VIA_BAD_CLK66) {
- /* Disable the 66MHz clock on problem devices */
- pci_read_config_dword(pdev, 0x50, &timing);
- timing &= ~0x80008;
- pci_write_config_dword(pdev, 0x50, timing);
- }
+ via_fixup(pdev, config);
/* We have established the device type, now fire it up */
return ata_pci_bmdma_init_one(pdev, ppi, &via_sht, (void *)config, 0);
@@ -661,29 +673,14 @@ static int via_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
static int via_reinit_one(struct pci_dev *pdev)
{
- u32 timing;
struct ata_host *host = dev_get_drvdata(&pdev->dev);
- const struct via_isa_bridge *config = host->private_data;
int rc;
rc = ata_pci_device_do_resume(pdev);
if (rc)
return rc;
- via_config_fifo(pdev, config->flags);
-
- if (config->udma_mask == ATA_UDMA4) {
- /* The 66 MHz devices require we enable the clock */
- pci_read_config_dword(pdev, 0x50, &timing);
- timing |= 0x80008;
- pci_write_config_dword(pdev, 0x50, timing);
- }
- if (config->flags & VIA_BAD_CLK66) {
- /* Disable the 66MHz clock on problem devices */
- pci_read_config_dword(pdev, 0x50, &timing);
- timing &= ~0x80008;
- pci_write_config_dword(pdev, 0x50, timing);
- }
+ via_fixup(pdev, host->private_data);
ata_host_resume(host);
return 0;
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 4b6b2090784..0b8b8b488ee 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -4087,8 +4087,11 @@ static int mv_platform_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "slots %u ports %d\n",
(unsigned)MV_MAX_Q_DEPTH, host->n_ports);
- return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
- IRQF_SHARED, &mv6_sht);
+ rc = ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
+ IRQF_SHARED, &mv6_sht);
+ if (!rc)
+ return 0;
+
err:
#if defined(CONFIG_HAVE_CLK)
if (!IS_ERR(hpriv->clk)) {
@@ -4110,8 +4113,7 @@ err:
*/
static int __devexit mv_platform_remove(struct platform_device *pdev)
{
- struct device *dev = &pdev->dev;
- struct ata_host *host = dev_get_drvdata(dev);
+ struct ata_host *host = platform_get_drvdata(pdev);
#if defined(CONFIG_HAVE_CLK)
struct mv_host_priv *hpriv = host->private_data;
#endif
@@ -4129,7 +4131,7 @@ static int __devexit mv_platform_remove(struct platform_device *pdev)
#ifdef CONFIG_PM
static int mv_platform_suspend(struct platform_device *pdev, pm_message_t state)
{
- struct ata_host *host = dev_get_drvdata(&pdev->dev);
+ struct ata_host *host = platform_get_drvdata(pdev);
if (host)
return ata_host_suspend(host, state);
else
@@ -4138,7 +4140,7 @@ static int mv_platform_suspend(struct platform_device *pdev, pm_message_t state)
static int mv_platform_resume(struct platform_device *pdev)
{
- struct ata_host *host = dev_get_drvdata(&pdev->dev);
+ struct ata_host *host = platform_get_drvdata(pdev);
int ret;
if (host) {
@@ -4353,7 +4355,7 @@ static int mv_pci_init_one(struct pci_dev *pdev,
#ifdef CONFIG_PM
static int mv_pci_device_resume(struct pci_dev *pdev)
{
- struct ata_host *host = dev_get_drvdata(&pdev->dev);
+ struct ata_host *host = pci_get_drvdata(pdev);
int rc;
rc = ata_pci_device_do_resume(pdev);
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
index 55470f337e5..1e9140626a8 100644
--- a/drivers/ata/sata_sil24.c
+++ b/drivers/ata/sata_sil24.c
@@ -268,7 +268,7 @@ union sil24_cmd_block {
struct sil24_atapi_block atapi;
};
-static struct sil24_cerr_info {
+static const struct sil24_cerr_info {
unsigned int err_mask, action;
const char *desc;
} sil24_cerr_db[] = {
@@ -1019,7 +1019,7 @@ static void sil24_error_intr(struct ata_port *ap)
/* deal with command error */
if (irq_stat & PORT_IRQ_ERROR) {
- struct sil24_cerr_info *ci = NULL;
+ const struct sil24_cerr_info *ci = NULL;
unsigned int err_mask = 0, action = 0;
u32 context, cerr;
int pmp;
diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c
index 447d9c05fb5..95ec435f0eb 100644
--- a/drivers/ata/sata_sis.c
+++ b/drivers/ata/sata_sis.c
@@ -104,7 +104,7 @@ static const struct ata_port_info sis_port_info = {
};
MODULE_AUTHOR("Uwe Koziolek");
-MODULE_DESCRIPTION("low-level driver for Silicon Integratad Systems SATA controller");
+MODULE_DESCRIPTION("low-level driver for Silicon Integrated Systems SATA controller");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, sis_pci_tbl);
MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/base/base.h b/drivers/base/base.h
index a34dca0ad04..21c1b96c34c 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -1,3 +1,4 @@
+#include <linux/notifier.h>
/**
* struct subsys_private - structure to hold the private to the driver core portions of the bus_type/class structure.
diff --git a/drivers/base/dma-coherent.c b/drivers/base/dma-coherent.c
index f369e279598..bb0025c510b 100644
--- a/drivers/base/dma-coherent.c
+++ b/drivers/base/dma-coherent.c
@@ -4,6 +4,7 @@
*/
#include <linux/slab.h>
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/dma-mapping.h>
struct dma_coherent_mem {
diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c
index 763d59c1eb6..6f3676f1559 100644
--- a/drivers/base/dma-mapping.c
+++ b/drivers/base/dma-mapping.c
@@ -8,6 +8,7 @@
*/
#include <linux/dma-mapping.h>
+#include <linux/export.h>
#include <linux/gfp.h>
/*
diff --git a/drivers/base/hypervisor.c b/drivers/base/hypervisor.c
index 6428cba3aad..4f8b741f461 100644
--- a/drivers/base/hypervisor.c
+++ b/drivers/base/hypervisor.c
@@ -10,6 +10,7 @@
#include <linux/kobject.h>
#include <linux/device.h>
+#include <linux/export.h>
#include "base.h"
struct kobject *hypervisor_kobj;
diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c
index 29820c39618..4af7c1cbf90 100644
--- a/drivers/base/power/common.c
+++ b/drivers/base/power/common.c
@@ -8,7 +8,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/slab.h>
#include <linux/pm_clock.h>
diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c
index 9508df71274..265a0ee3b49 100644
--- a/drivers/base/power/generic_ops.c
+++ b/drivers/base/power/generic_ops.c
@@ -8,6 +8,7 @@
#include <linux/pm.h>
#include <linux/pm_runtime.h>
+#include <linux/export.h>
#ifdef CONFIG_PM_RUNTIME
/**
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 59f8ab23548..7fa098464da 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -19,6 +19,7 @@
#include <linux/device.h>
#include <linux/kallsyms.h>
+#include <linux/export.h>
#include <linux/mutex.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
index 434a6c01167..95706fa24c7 100644
--- a/drivers/base/power/opp.c
+++ b/drivers/base/power/opp.c
@@ -669,7 +669,7 @@ struct srcu_notifier_head *opp_get_notifier(struct device *dev)
struct device_opp *dev_opp = find_device_opp(dev);
if (IS_ERR(dev_opp))
- return ERR_PTR(PTR_ERR(dev_opp)); /* matching type */
+ return ERR_CAST(dev_opp); /* matching type */
return &dev_opp->head;
}
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index 91e06141738..30a94eadc20 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -39,6 +39,7 @@
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/mutex.h>
+#include <linux/export.h>
static DEFINE_MUTEX(dev_pm_qos_mtx);
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 6bb3aafa85e..8c78443bca8 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -8,6 +8,7 @@
*/
#include <linux/sched.h>
+#include <linux/export.h>
#include <linux/pm_runtime.h>
#include <trace/events/rpm.h>
#include "power.h"
@@ -29,13 +30,10 @@ static int rpm_suspend(struct device *dev, int rpmflags);
void update_pm_runtime_accounting(struct device *dev)
{
unsigned long now = jiffies;
- int delta;
+ unsigned long delta;
delta = now - dev->power.accounting_timestamp;
- if (delta < 0)
- delta = 0;
-
dev->power.accounting_timestamp = now;
if (dev->power.disable_depth > 0)
@@ -296,6 +294,9 @@ static int rpm_callback(int (*cb)(struct device *), struct device *dev)
* the callback was running then carry it out, otherwise send an idle
* notification for its parent (if the suspend succeeded and both
* ignore_children of parent->power and irq_safe of dev->power are not set).
+ * If ->runtime_suspend failed with -EAGAIN or -EBUSY, and if the RPM_AUTO
+ * flag is set and the next autosuspend-delay expiration time is in the
+ * future, schedule another autosuspend attempt.
*
* This function must be called under dev->power.lock with interrupts disabled.
*/
@@ -416,10 +417,21 @@ static int rpm_suspend(struct device *dev, int rpmflags)
if (retval) {
__update_runtime_status(dev, RPM_ACTIVE);
dev->power.deferred_resume = false;
- if (retval == -EAGAIN || retval == -EBUSY)
+ if (retval == -EAGAIN || retval == -EBUSY) {
dev->power.runtime_error = 0;
- else
+
+ /*
+ * If the callback routine failed an autosuspend, and
+ * if the last_busy time has been updated so that there
+ * is a new autosuspend expiration time, automatically
+ * reschedule another autosuspend.
+ */
+ if ((rpmflags & RPM_AUTO) &&
+ pm_runtime_autosuspend_expiration(dev) != 0)
+ goto repeat;
+ } else {
pm_runtime_cancel_pending(dev);
+ }
wake_up_all(&dev->power.wait_queue);
goto out;
}
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index 17b7934f31c..adf41be0ea6 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -4,6 +4,7 @@
#include <linux/device.h>
#include <linux/string.h>
+#include <linux/export.h>
#include <linux/pm_runtime.h>
#include <linux/atomic.h>
#include <linux/jiffies.h>
diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c
index af10abecb99..d94a1f5121c 100644
--- a/drivers/base/power/trace.c
+++ b/drivers/base/power/trace.c
@@ -8,6 +8,7 @@
*/
#include <linux/resume-trace.h>
+#include <linux/export.h>
#include <linux/rtc.h>
#include <asm/rtc.h>
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index 14ee07e9cc4..caf995fb774 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -10,6 +10,7 @@
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/capability.h>
+#include <linux/export.h>
#include <linux/suspend.h>
#include <linux/seq_file.h>
#include <linux/debugfs.h>
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index afcfef83826..666f6f5011d 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -11,6 +11,7 @@
*/
#include <linux/slab.h>
+#include <linux/export.h>
#include <trace/events/regmap.h>
#include <linux/bsearch.h>
#include <linux/sort.h>
diff --git a/drivers/base/transport_class.c b/drivers/base/transport_class.c
index 84997efdb23..f6c453c3816 100644
--- a/drivers/base/transport_class.c
+++ b/drivers/base/transport_class.c
@@ -27,6 +27,7 @@
* transport class is framed entirely in terms of generic devices to
* allow it to be used by any physical HBA in the system.
*/
+#include <linux/export.h>
#include <linux/attribute_container.h>
#include <linux/transport_class.h>
diff --git a/drivers/bcma/bcma_private.h b/drivers/bcma/bcma_private.h
index 30a3085d335..fda56bde36b 100644
--- a/drivers/bcma/bcma_private.h
+++ b/drivers/bcma/bcma_private.h
@@ -18,6 +18,9 @@ void bcma_bus_unregister(struct bcma_bus *bus);
int __init bcma_bus_early_register(struct bcma_bus *bus,
struct bcma_device *core_cc,
struct bcma_device *core_mips);
+#ifdef CONFIG_PM
+int bcma_bus_resume(struct bcma_bus *bus);
+#endif
/* scan.c */
int bcma_bus_scan(struct bcma_bus *bus);
diff --git a/drivers/bcma/core.c b/drivers/bcma/core.c
index 189a97b51be..893f6e0c759 100644
--- a/drivers/bcma/core.c
+++ b/drivers/bcma/core.c
@@ -6,6 +6,7 @@
*/
#include "bcma_private.h"
+#include <linux/export.h>
#include <linux/bcma/bcma.h>
bool bcma_core_is_enabled(struct bcma_device *core)
diff --git a/drivers/bcma/driver_chipcommon.c b/drivers/bcma/driver_chipcommon.c
index 47cce9d6963..e9f1b3fd252 100644
--- a/drivers/bcma/driver_chipcommon.c
+++ b/drivers/bcma/driver_chipcommon.c
@@ -9,6 +9,7 @@
*/
#include "bcma_private.h"
+#include <linux/export.h>
#include <linux/bcma/bcma.h>
static inline u32 bcma_cc_write32_masked(struct bcma_drv_cc *cc, u16 offset,
diff --git a/drivers/bcma/driver_chipcommon_pmu.c b/drivers/bcma/driver_chipcommon_pmu.c
index 2968d809d49..800163c8c2e 100644
--- a/drivers/bcma/driver_chipcommon_pmu.c
+++ b/drivers/bcma/driver_chipcommon_pmu.c
@@ -9,6 +9,7 @@
*/
#include "bcma_private.h"
+#include <linux/export.h>
#include <linux/bcma/bcma.h>
static u32 bcma_chipco_pll_read(struct bcma_drv_cc *cc, u32 offset)
diff --git a/drivers/bcma/driver_pci.c b/drivers/bcma/driver_pci.c
index 81f3d0a4b85..4fde6254f04 100644
--- a/drivers/bcma/driver_pci.c
+++ b/drivers/bcma/driver_pci.c
@@ -9,6 +9,7 @@
*/
#include "bcma_private.h"
+#include <linux/export.h>
#include <linux/bcma/bcma.h>
/**************************************************
diff --git a/drivers/bcma/host_pci.c b/drivers/bcma/host_pci.c
index ac4bc626c14..443b83a2fd7 100644
--- a/drivers/bcma/host_pci.c
+++ b/drivers/bcma/host_pci.c
@@ -9,6 +9,7 @@
#include <linux/slab.h>
#include <linux/bcma/bcma.h>
#include <linux/pci.h>
+#include <linux/module.h>
static void bcma_host_pci_switch_core(struct bcma_device *core)
{
@@ -20,48 +21,58 @@ static void bcma_host_pci_switch_core(struct bcma_device *core)
pr_debug("Switched to core: 0x%X\n", core->id.id);
}
-static u8 bcma_host_pci_read8(struct bcma_device *core, u16 offset)
+/* Provides access to the requested core. Returns base offset that has to be
+ * used. It makes use of fixed windows when possible. */
+static u16 bcma_host_pci_provide_access_to_core(struct bcma_device *core)
{
+ switch (core->id.id) {
+ case BCMA_CORE_CHIPCOMMON:
+ return 3 * BCMA_CORE_SIZE;
+ case BCMA_CORE_PCIE:
+ return 2 * BCMA_CORE_SIZE;
+ }
+
if (core->bus->mapped_core != core)
bcma_host_pci_switch_core(core);
+ return 0;
+}
+
+static u8 bcma_host_pci_read8(struct bcma_device *core, u16 offset)
+{
+ offset += bcma_host_pci_provide_access_to_core(core);
return ioread8(core->bus->mmio + offset);
}
static u16 bcma_host_pci_read16(struct bcma_device *core, u16 offset)
{
- if (core->bus->mapped_core != core)
- bcma_host_pci_switch_core(core);
+ offset += bcma_host_pci_provide_access_to_core(core);
return ioread16(core->bus->mmio + offset);
}
static u32 bcma_host_pci_read32(struct bcma_device *core, u16 offset)
{
- if (core->bus->mapped_core != core)
- bcma_host_pci_switch_core(core);
+ offset += bcma_host_pci_provide_access_to_core(core);
return ioread32(core->bus->mmio + offset);
}
static void bcma_host_pci_write8(struct bcma_device *core, u16 offset,
u8 value)
{
- if (core->bus->mapped_core != core)
- bcma_host_pci_switch_core(core);
+ offset += bcma_host_pci_provide_access_to_core(core);
iowrite8(value, core->bus->mmio + offset);
}
static void bcma_host_pci_write16(struct bcma_device *core, u16 offset,
u16 value)
{
- if (core->bus->mapped_core != core)
- bcma_host_pci_switch_core(core);
+ offset += bcma_host_pci_provide_access_to_core(core);
iowrite16(value, core->bus->mmio + offset);
}
static void bcma_host_pci_write32(struct bcma_device *core, u16 offset,
u32 value)
{
- if (core->bus->mapped_core != core)
- bcma_host_pci_switch_core(core);
+ offset += bcma_host_pci_provide_access_to_core(core);
iowrite32(value, core->bus->mmio + offset);
}
@@ -223,6 +234,41 @@ static void bcma_host_pci_remove(struct pci_dev *dev)
pci_set_drvdata(dev, NULL);
}
+#ifdef CONFIG_PM
+static int bcma_host_pci_suspend(struct pci_dev *dev, pm_message_t state)
+{
+ /* Host specific */
+ pci_save_state(dev);
+ pci_disable_device(dev);
+ pci_set_power_state(dev, pci_choose_state(dev, state));
+
+ return 0;
+}
+
+static int bcma_host_pci_resume(struct pci_dev *dev)
+{
+ struct bcma_bus *bus = pci_get_drvdata(dev);
+ int err;
+
+ /* Host specific */
+ pci_set_power_state(dev, 0);
+ err = pci_enable_device(dev);
+ if (err)
+ return err;
+ pci_restore_state(dev);
+
+ /* Bus specific */
+ err = bcma_bus_resume(bus);
+ if (err)
+ return err;
+
+ return 0;
+}
+#else /* CONFIG_PM */
+# define bcma_host_pci_suspend NULL
+# define bcma_host_pci_resume NULL
+#endif /* CONFIG_PM */
+
static DEFINE_PCI_DEVICE_TABLE(bcma_pci_bridge_tbl) = {
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x0576) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4331) },
@@ -238,6 +284,8 @@ static struct pci_driver bcma_pci_bridge_driver = {
.id_table = bcma_pci_bridge_tbl,
.probe = bcma_host_pci_probe,
.remove = bcma_host_pci_remove,
+ .suspend = bcma_host_pci_suspend,
+ .resume = bcma_host_pci_resume,
};
int __init bcma_host_pci_init(void)
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
index 8c09c3e547c..10f92b371e5 100644
--- a/drivers/bcma/main.c
+++ b/drivers/bcma/main.c
@@ -6,6 +6,7 @@
*/
#include "bcma_private.h"
+#include <linux/module.h>
#include <linux/bcma/bcma.h>
#include <linux/slab.h>
@@ -239,6 +240,22 @@ int __init bcma_bus_early_register(struct bcma_bus *bus,
return 0;
}
+#ifdef CONFIG_PM
+int bcma_bus_resume(struct bcma_bus *bus)
+{
+ struct bcma_device *core;
+
+ /* Init CC core */
+ core = bcma_find_core(bus, BCMA_CORE_CHIPCOMMON);
+ if (core) {
+ bus->drv_cc.setup_done = false;
+ bcma_core_chipcommon_init(&bus->drv_cc);
+ }
+
+ return 0;
+}
+#endif
+
int __bcma_driver_register(struct bcma_driver *drv, struct module *owner)
{
drv->drv.name = drv->name;
diff --git a/drivers/bcma/sprom.c b/drivers/bcma/sprom.c
index d7292390d23..6f230fb087c 100644
--- a/drivers/bcma/sprom.c
+++ b/drivers/bcma/sprom.c
@@ -129,6 +129,9 @@ static void bcma_sprom_extract_r8(struct bcma_bus *bus, const u16 *sprom)
u16 v;
int i;
+ bus->sprom.revision = sprom[SSB_SPROMSIZE_WORDS_R4 - 1] &
+ SSB_SPROM_REVISION_REV;
+
for (i = 0; i < 3; i++) {
v = sprom[SPOFF(SSB_SPROM8_IL0MAC) + i];
*(((__be16 *)bus->sprom.il0mac) + i) = cpu_to_be16(v);
@@ -136,12 +139,70 @@ static void bcma_sprom_extract_r8(struct bcma_bus *bus, const u16 *sprom)
bus->sprom.board_rev = sprom[SPOFF(SSB_SPROM8_BOARDREV)];
+ bus->sprom.txpid2g[0] = (sprom[SPOFF(SSB_SPROM4_TXPID2G01)] &
+ SSB_SPROM4_TXPID2G0) >> SSB_SPROM4_TXPID2G0_SHIFT;
+ bus->sprom.txpid2g[1] = (sprom[SPOFF(SSB_SPROM4_TXPID2G01)] &
+ SSB_SPROM4_TXPID2G1) >> SSB_SPROM4_TXPID2G1_SHIFT;
+ bus->sprom.txpid2g[2] = (sprom[SPOFF(SSB_SPROM4_TXPID2G23)] &
+ SSB_SPROM4_TXPID2G2) >> SSB_SPROM4_TXPID2G2_SHIFT;
+ bus->sprom.txpid2g[3] = (sprom[SPOFF(SSB_SPROM4_TXPID2G23)] &
+ SSB_SPROM4_TXPID2G3) >> SSB_SPROM4_TXPID2G3_SHIFT;
+
+ bus->sprom.txpid5gl[0] = (sprom[SPOFF(SSB_SPROM4_TXPID5GL01)] &
+ SSB_SPROM4_TXPID5GL0) >> SSB_SPROM4_TXPID5GL0_SHIFT;
+ bus->sprom.txpid5gl[1] = (sprom[SPOFF(SSB_SPROM4_TXPID5GL01)] &
+ SSB_SPROM4_TXPID5GL1) >> SSB_SPROM4_TXPID5GL1_SHIFT;
+ bus->sprom.txpid5gl[2] = (sprom[SPOFF(SSB_SPROM4_TXPID5GL23)] &
+ SSB_SPROM4_TXPID5GL2) >> SSB_SPROM4_TXPID5GL2_SHIFT;
+ bus->sprom.txpid5gl[3] = (sprom[SPOFF(SSB_SPROM4_TXPID5GL23)] &
+ SSB_SPROM4_TXPID5GL3) >> SSB_SPROM4_TXPID5GL3_SHIFT;
+
+ bus->sprom.txpid5g[0] = (sprom[SPOFF(SSB_SPROM4_TXPID5G01)] &
+ SSB_SPROM4_TXPID5G0) >> SSB_SPROM4_TXPID5G0_SHIFT;
+ bus->sprom.txpid5g[1] = (sprom[SPOFF(SSB_SPROM4_TXPID5G01)] &
+ SSB_SPROM4_TXPID5G1) >> SSB_SPROM4_TXPID5G1_SHIFT;
+ bus->sprom.txpid5g[2] = (sprom[SPOFF(SSB_SPROM4_TXPID5G23)] &
+ SSB_SPROM4_TXPID5G2) >> SSB_SPROM4_TXPID5G2_SHIFT;
+ bus->sprom.txpid5g[3] = (sprom[SPOFF(SSB_SPROM4_TXPID5G23)] &
+ SSB_SPROM4_TXPID5G3) >> SSB_SPROM4_TXPID5G3_SHIFT;
+
+ bus->sprom.txpid5gh[0] = (sprom[SPOFF(SSB_SPROM4_TXPID5GH01)] &
+ SSB_SPROM4_TXPID5GH0) >> SSB_SPROM4_TXPID5GH0_SHIFT;
+ bus->sprom.txpid5gh[1] = (sprom[SPOFF(SSB_SPROM4_TXPID5GH01)] &
+ SSB_SPROM4_TXPID5GH1) >> SSB_SPROM4_TXPID5GH1_SHIFT;
+ bus->sprom.txpid5gh[2] = (sprom[SPOFF(SSB_SPROM4_TXPID5GH23)] &
+ SSB_SPROM4_TXPID5GH2) >> SSB_SPROM4_TXPID5GH2_SHIFT;
+ bus->sprom.txpid5gh[3] = (sprom[SPOFF(SSB_SPROM4_TXPID5GH23)] &
+ SSB_SPROM4_TXPID5GH3) >> SSB_SPROM4_TXPID5GH3_SHIFT;
+
bus->sprom.boardflags_lo = sprom[SPOFF(SSB_SPROM8_BFLLO)];
bus->sprom.boardflags_hi = sprom[SPOFF(SSB_SPROM8_BFLHI)];
bus->sprom.boardflags2_lo = sprom[SPOFF(SSB_SPROM8_BFL2LO)];
bus->sprom.boardflags2_hi = sprom[SPOFF(SSB_SPROM8_BFL2HI)];
bus->sprom.country_code = sprom[SPOFF(SSB_SPROM8_CCODE)];
+
+ bus->sprom.fem.ghz2.tssipos = (sprom[SPOFF(SSB_SPROM8_FEM2G)] &
+ SSB_SROM8_FEM_TSSIPOS) >> SSB_SROM8_FEM_TSSIPOS_SHIFT;
+ bus->sprom.fem.ghz2.extpa_gain = (sprom[SPOFF(SSB_SPROM8_FEM2G)] &
+ SSB_SROM8_FEM_EXTPA_GAIN) >> SSB_SROM8_FEM_EXTPA_GAIN_SHIFT;
+ bus->sprom.fem.ghz2.pdet_range = (sprom[SPOFF(SSB_SPROM8_FEM2G)] &
+ SSB_SROM8_FEM_PDET_RANGE) >> SSB_SROM8_FEM_PDET_RANGE_SHIFT;
+ bus->sprom.fem.ghz2.tr_iso = (sprom[SPOFF(SSB_SPROM8_FEM2G)] &
+ SSB_SROM8_FEM_TR_ISO) >> SSB_SROM8_FEM_TR_ISO_SHIFT;
+ bus->sprom.fem.ghz2.antswlut = (sprom[SPOFF(SSB_SPROM8_FEM2G)] &
+ SSB_SROM8_FEM_ANTSWLUT) >> SSB_SROM8_FEM_ANTSWLUT_SHIFT;
+
+ bus->sprom.fem.ghz5.tssipos = (sprom[SPOFF(SSB_SPROM8_FEM5G)] &
+ SSB_SROM8_FEM_TSSIPOS) >> SSB_SROM8_FEM_TSSIPOS_SHIFT;
+ bus->sprom.fem.ghz5.extpa_gain = (sprom[SPOFF(SSB_SPROM8_FEM5G)] &
+ SSB_SROM8_FEM_EXTPA_GAIN) >> SSB_SROM8_FEM_EXTPA_GAIN_SHIFT;
+ bus->sprom.fem.ghz5.pdet_range = (sprom[SPOFF(SSB_SPROM8_FEM5G)] &
+ SSB_SROM8_FEM_PDET_RANGE) >> SSB_SROM8_FEM_PDET_RANGE_SHIFT;
+ bus->sprom.fem.ghz5.tr_iso = (sprom[SPOFF(SSB_SPROM8_FEM5G)] &
+ SSB_SROM8_FEM_TR_ISO) >> SSB_SROM8_FEM_TR_ISO_SHIFT;
+ bus->sprom.fem.ghz5.antswlut = (sprom[SPOFF(SSB_SPROM8_FEM5G)] &
+ SSB_SROM8_FEM_ANTSWLUT) >> SSB_SROM8_FEM_ANTSWLUT_SHIFT;
}
int bcma_sprom_get(struct bcma_bus *bus)
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index 528f6318ded..321de7b6c44 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -15,6 +15,7 @@
#include <linux/genhd.h>
#include <linux/netdevice.h>
#include <linux/mutex.h>
+#include <linux/export.h>
#include "aoe.h"
static DEFINE_MUTEX(aoeblk_mutex);
@@ -159,7 +160,7 @@ aoeblk_release(struct gendisk *disk, fmode_t mode)
return 0;
}
-static int
+static void
aoeblk_make_request(struct request_queue *q, struct bio *bio)
{
struct sk_buff_head queue;
@@ -172,25 +173,25 @@ aoeblk_make_request(struct request_queue *q, struct bio *bio)
if (bio == NULL) {
printk(KERN_ERR "aoe: bio is NULL\n");
BUG();
- return 0;
+ return;
}
d = bio->bi_bdev->bd_disk->private_data;
if (d == NULL) {
printk(KERN_ERR "aoe: bd_disk->private_data is NULL\n");
BUG();
bio_endio(bio, -ENXIO);
- return 0;
+ return;
} else if (bio->bi_io_vec == NULL) {
printk(KERN_ERR "aoe: bi_io_vec is NULL\n");
BUG();
bio_endio(bio, -ENXIO);
- return 0;
+ return;
}
buf = mempool_alloc(d->bufpool, GFP_NOIO);
if (buf == NULL) {
printk(KERN_INFO "aoe: buf allocation failure\n");
bio_endio(bio, -ENOMEM);
- return 0;
+ return;
}
memset(buf, 0, sizeof(*buf));
INIT_LIST_HEAD(&buf->bufs);
@@ -211,7 +212,7 @@ aoeblk_make_request(struct request_queue *q, struct bio *bio)
spin_unlock_irqrestore(&d->lock, flags);
mempool_free(buf, d->bufpool);
bio_endio(bio, -ENXIO);
- return 0;
+ return;
}
list_add_tail(&buf->bufs, &d->bufq);
@@ -222,8 +223,6 @@ aoeblk_make_request(struct request_queue *q, struct bio *bio)
spin_unlock_irqrestore(&d->lock, flags);
aoenet_xmit(&queue);
-
- return 0;
}
static int
diff --git a/drivers/block/aoe/aoechr.c b/drivers/block/aoe/aoechr.c
index 146296ca496..5f8e39c43ae 100644
--- a/drivers/block/aoe/aoechr.c
+++ b/drivers/block/aoe/aoechr.c
@@ -11,6 +11,7 @@
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/skbuff.h>
+#include <linux/export.h>
#include "aoe.h"
enum {
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index dba1c32e1dd..d22119d49e5 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -323,7 +323,7 @@ out:
return err;
}
-static int brd_make_request(struct request_queue *q, struct bio *bio)
+static void brd_make_request(struct request_queue *q, struct bio *bio)
{
struct block_device *bdev = bio->bi_bdev;
struct brd_device *brd = bdev->bd_disk->private_data;
@@ -359,8 +359,6 @@ static int brd_make_request(struct request_queue *q, struct bio *bio)
out:
bio_endio(bio, err);
-
- return 0;
}
#ifdef CONFIG_BLK_DEV_XIP
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 8f4ef656a1a..486f94ef24d 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -68,6 +68,10 @@ static int cciss_tape_cmds = 6;
module_param(cciss_tape_cmds, int, 0644);
MODULE_PARM_DESC(cciss_tape_cmds,
"number of commands to allocate for tape devices (default: 6)");
+static int cciss_simple_mode;
+module_param(cciss_simple_mode, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(cciss_simple_mode,
+ "Use 'simple mode' rather than 'performant mode'");
static DEFINE_MUTEX(cciss_mutex);
static struct proc_dir_entry *proc_cciss;
@@ -176,6 +180,7 @@ static void cciss_geometry_inquiry(ctlr_info_t *h, int logvol,
unsigned int block_size, InquiryData_struct *inq_buff,
drive_info_struct *drv);
static void __devinit cciss_interrupt_mode(ctlr_info_t *);
+static int __devinit cciss_enter_simple_mode(struct ctlr_info *h);
static void start_io(ctlr_info_t *h);
static int sendcmd_withirq(ctlr_info_t *h, __u8 cmd, void *buff, size_t size,
__u8 page_code, unsigned char scsi3addr[],
@@ -388,7 +393,7 @@ static void cciss_seq_show_header(struct seq_file *seq)
h->product_name,
(unsigned long)h->board_id,
h->firm_ver[0], h->firm_ver[1], h->firm_ver[2],
- h->firm_ver[3], (unsigned int)h->intr[PERF_MODE_INT],
+ h->firm_ver[3], (unsigned int)h->intr[h->intr_mode],
h->num_luns,
h->Qdepth, h->commands_outstanding,
h->maxQsinceinit, h->max_outstanding, h->maxSG);
@@ -636,6 +641,18 @@ static ssize_t host_store_rescan(struct device *dev,
}
static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
+static ssize_t host_show_transport_mode(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ctlr_info *h = to_hba(dev);
+
+ return snprintf(buf, 20, "%s\n",
+ h->transMethod & CFGTBL_Trans_Performant ?
+ "performant" : "simple");
+}
+static DEVICE_ATTR(transport_mode, S_IRUGO, host_show_transport_mode, NULL);
+
static ssize_t dev_show_unique_id(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -808,6 +825,7 @@ static DEVICE_ATTR(usage_count, S_IRUGO, cciss_show_usage_count, NULL);
static struct attribute *cciss_host_attrs[] = {
&dev_attr_rescan.attr,
&dev_attr_resettable.attr,
+ &dev_attr_transport_mode.attr,
NULL
};
@@ -3984,6 +4002,9 @@ static void __devinit cciss_put_controller_into_performant_mode(ctlr_info_t *h)
{
__u32 trans_support;
+ if (cciss_simple_mode)
+ return;
+
dev_dbg(&h->pdev->dev, "Trying to put board into Performant mode\n");
/* Attempt to put controller into performant mode if supported */
/* Does board support performant mode? */
@@ -4081,7 +4102,7 @@ static void __devinit cciss_interrupt_mode(ctlr_info_t *h)
default_int_mode:
#endif /* CONFIG_PCI_MSI */
/* if we get here we're going to use the default interrupt mode */
- h->intr[PERF_MODE_INT] = h->pdev->irq;
+ h->intr[h->intr_mode] = h->pdev->irq;
return;
}
@@ -4341,6 +4362,9 @@ static int __devinit cciss_pci_init(ctlr_info_t *h)
}
cciss_enable_scsi_prefetch(h);
cciss_p600_dma_prefetch_quirk(h);
+ err = cciss_enter_simple_mode(h);
+ if (err)
+ goto err_out_free_res;
cciss_put_controller_into_performant_mode(h);
return 0;
@@ -4533,6 +4557,13 @@ static int cciss_controller_hard_reset(struct pci_dev *pdev,
pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
pmcsr |= PCI_D0;
pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
+
+ /*
+ * The P600 requires a small delay when changing states.
+ * Otherwise we may think the board did not reset and we bail.
+ * This for kdump only and is particular to the P600.
+ */
+ msleep(500);
}
return 0;
}
@@ -4843,20 +4874,20 @@ static int cciss_request_irq(ctlr_info_t *h,
irqreturn_t (*intxhandler)(int, void *))
{
if (h->msix_vector || h->msi_vector) {
- if (!request_irq(h->intr[PERF_MODE_INT], msixhandler,
+ if (!request_irq(h->intr[h->intr_mode], msixhandler,
IRQF_DISABLED, h->devname, h))
return 0;
dev_err(&h->pdev->dev, "Unable to get msi irq %d"
- " for %s\n", h->intr[PERF_MODE_INT],
+ " for %s\n", h->intr[h->intr_mode],
h->devname);
return -1;
}
- if (!request_irq(h->intr[PERF_MODE_INT], intxhandler,
+ if (!request_irq(h->intr[h->intr_mode], intxhandler,
IRQF_DISABLED, h->devname, h))
return 0;
dev_err(&h->pdev->dev, "Unable to get irq %d for %s\n",
- h->intr[PERF_MODE_INT], h->devname);
+ h->intr[h->intr_mode], h->devname);
return -1;
}
@@ -4887,7 +4918,7 @@ static void cciss_undo_allocations_after_kdump_soft_reset(ctlr_info_t *h)
{
int ctlr = h->ctlr;
- free_irq(h->intr[PERF_MODE_INT], h);
+ free_irq(h->intr[h->intr_mode], h);
#ifdef CONFIG_PCI_MSI
if (h->msix_vector)
pci_disable_msix(h->pdev);
@@ -4953,6 +4984,7 @@ reinit_after_soft_reset:
h = hba[i];
h->pdev = pdev;
h->busy_initializing = 1;
+ h->intr_mode = cciss_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT;
INIT_LIST_HEAD(&h->cmpQ);
INIT_LIST_HEAD(&h->reqQ);
mutex_init(&h->busy_shutting_down);
@@ -5009,7 +5041,7 @@ reinit_after_soft_reset:
dev_info(&h->pdev->dev, "%s: <0x%x> at PCI %s IRQ %d%s using DAC\n",
h->devname, pdev->device, pci_name(pdev),
- h->intr[PERF_MODE_INT], dac ? "" : " not");
+ h->intr[h->intr_mode], dac ? "" : " not");
if (cciss_allocate_cmd_pool(h))
goto clean4;
@@ -5056,7 +5088,7 @@ reinit_after_soft_reset:
spin_lock_irqsave(&h->lock, flags);
h->access.set_intr_mask(h, CCISS_INTR_OFF);
spin_unlock_irqrestore(&h->lock, flags);
- free_irq(h->intr[PERF_MODE_INT], h);
+ free_irq(h->intr[h->intr_mode], h);
rc = cciss_request_irq(h, cciss_msix_discard_completions,
cciss_intx_discard_completions);
if (rc) {
@@ -5133,7 +5165,7 @@ clean4:
cciss_free_cmd_pool(h);
cciss_free_scatterlists(h);
cciss_free_sg_chain_blocks(h->cmd_sg_list, h->nr_cmds);
- free_irq(h->intr[PERF_MODE_INT], h);
+ free_irq(h->intr[h->intr_mode], h);
clean2:
unregister_blkdev(h->major, h->devname);
clean1:
@@ -5172,9 +5204,31 @@ static void cciss_shutdown(struct pci_dev *pdev)
if (return_code != IO_OK)
dev_warn(&h->pdev->dev, "Error flushing cache\n");
h->access.set_intr_mask(h, CCISS_INTR_OFF);
- free_irq(h->intr[PERF_MODE_INT], h);
+ free_irq(h->intr[h->intr_mode], h);
+}
+
+static int __devinit cciss_enter_simple_mode(struct ctlr_info *h)
+{
+ u32 trans_support;
+
+ trans_support = readl(&(h->cfgtable->TransportSupport));
+ if (!(trans_support & SIMPLE_MODE))
+ return -ENOTSUPP;
+
+ h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
+ writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest));
+ writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
+ cciss_wait_for_mode_change_ack(h);
+ print_cfg_table(h);
+ if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) {
+ dev_warn(&h->pdev->dev, "unable to get board into simple mode\n");
+ return -ENODEV;
+ }
+ h->transMethod = CFGTBL_Trans_Simple;
+ return 0;
}
+
static void __devexit cciss_remove_one(struct pci_dev *pdev)
{
ctlr_info_t *h;
diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
index c049548e68b..7fda30e4a24 100644
--- a/drivers/block/cciss.h
+++ b/drivers/block/cciss.h
@@ -92,6 +92,7 @@ struct ctlr_info
unsigned int intr[4];
unsigned int msix_vector;
unsigned int msi_vector;
+ int intr_mode;
int cciss_max_sectors;
BYTE cciss_read;
BYTE cciss_write;
diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
index b2fceb53e80..9125bbeacd4 100644
--- a/drivers/block/cpqarray.c
+++ b/drivers/block/cpqarray.c
@@ -620,6 +620,7 @@ static int cpqarray_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
}
vendor_id = pdev->vendor;
device_id = pdev->device;
+ revision = pdev->revision;
irq = pdev->irq;
for(i=0; i<6; i++)
@@ -632,7 +633,6 @@ static int cpqarray_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
}
pci_read_config_word(pdev, PCI_COMMAND, &command);
- pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision);
pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache_line_size);
pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &latency_timer);
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index 1706d60b8c9..9cf20355cee 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -1506,7 +1506,7 @@ extern void drbd_free_mdev(struct drbd_conf *mdev);
extern int proc_details;
/* drbd_req */
-extern int drbd_make_request(struct request_queue *q, struct bio *bio);
+extern void drbd_make_request(struct request_queue *q, struct bio *bio);
extern int drbd_read_remote(struct drbd_conf *mdev, struct drbd_request *req);
extern int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct bio_vec *bvec);
extern int is_valid_ar_handle(struct drbd_request *, sector_t);
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 3424d675b76..4a0f314086e 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -1073,7 +1073,7 @@ static int drbd_fail_request_early(struct drbd_conf *mdev, int is_write)
return 0;
}
-int drbd_make_request(struct request_queue *q, struct bio *bio)
+void drbd_make_request(struct request_queue *q, struct bio *bio)
{
unsigned int s_enr, e_enr;
struct drbd_conf *mdev = (struct drbd_conf *) q->queuedata;
@@ -1081,7 +1081,7 @@ int drbd_make_request(struct request_queue *q, struct bio *bio)
if (drbd_fail_request_early(mdev, bio_data_dir(bio) & WRITE)) {
bio_endio(bio, -EPERM);
- return 0;
+ return;
}
start_time = jiffies;
@@ -1100,7 +1100,8 @@ int drbd_make_request(struct request_queue *q, struct bio *bio)
if (likely(s_enr == e_enr)) {
inc_ap_bio(mdev, 1);
- return drbd_make_request_common(mdev, bio, start_time);
+ drbd_make_request_common(mdev, bio, start_time);
+ return;
}
/* can this bio be split generically?
@@ -1148,7 +1149,6 @@ int drbd_make_request(struct request_queue *q, struct bio *bio)
bio_pair_release(bp);
}
- return 0;
}
/* This is called by bio_add_page(). With this function we reduce
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 4720c7ade0a..3d806820280 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -76,6 +76,8 @@
#include <linux/splice.h>
#include <linux/sysfs.h>
#include <linux/miscdevice.h>
+#include <linux/falloc.h>
+
#include <asm/uaccess.h>
static DEFINE_IDR(loop_index_idr);
@@ -203,74 +205,6 @@ lo_do_transfer(struct loop_device *lo, int cmd,
}
/**
- * do_lo_send_aops - helper for writing data to a loop device
- *
- * This is the fast version for backing filesystems which implement the address
- * space operations write_begin and write_end.
- */
-static int do_lo_send_aops(struct loop_device *lo, struct bio_vec *bvec,
- loff_t pos, struct page *unused)
-{
- struct file *file = lo->lo_backing_file; /* kudos to NFsckingS */
- struct address_space *mapping = file->f_mapping;
- pgoff_t index;
- unsigned offset, bv_offs;
- int len, ret;
-
- mutex_lock(&mapping->host->i_mutex);
- index = pos >> PAGE_CACHE_SHIFT;
- offset = pos & ((pgoff_t)PAGE_CACHE_SIZE - 1);
- bv_offs = bvec->bv_offset;
- len = bvec->bv_len;
- while (len > 0) {
- sector_t IV;
- unsigned size, copied;
- int transfer_result;
- struct page *page;
- void *fsdata;
-
- IV = ((sector_t)index << (PAGE_CACHE_SHIFT - 9))+(offset >> 9);
- size = PAGE_CACHE_SIZE - offset;
- if (size > len)
- size = len;
-
- ret = pagecache_write_begin(file, mapping, pos, size, 0,
- &page, &fsdata);
- if (ret)
- goto fail;
-
- file_update_time(file);
-
- transfer_result = lo_do_transfer(lo, WRITE, page, offset,
- bvec->bv_page, bv_offs, size, IV);
- copied = size;
- if (unlikely(transfer_result))
- copied = 0;
-
- ret = pagecache_write_end(file, mapping, pos, size, copied,
- page, fsdata);
- if (ret < 0 || ret != copied)
- goto fail;
-
- if (unlikely(transfer_result))
- goto fail;
-
- bv_offs += copied;
- len -= copied;
- offset = 0;
- index++;
- pos += copied;
- }
- ret = 0;
-out:
- mutex_unlock(&mapping->host->i_mutex);
- return ret;
-fail:
- ret = -1;
- goto out;
-}
-
-/**
* __do_lo_send_write - helper for writing data to a loop device
*
* This helper just factors out common code between do_lo_send_direct_write()
@@ -297,10 +231,8 @@ static int __do_lo_send_write(struct file *file,
/**
* do_lo_send_direct_write - helper for writing data to a loop device
*
- * This is the fast, non-transforming version for backing filesystems which do
- * not implement the address space operations write_begin and write_end.
- * It uses the write file operation which should be present on all writeable
- * filesystems.
+ * This is the fast, non-transforming version that does not need double
+ * buffering.
*/
static int do_lo_send_direct_write(struct loop_device *lo,
struct bio_vec *bvec, loff_t pos, struct page *page)
@@ -316,15 +248,9 @@ static int do_lo_send_direct_write(struct loop_device *lo,
/**
* do_lo_send_write - helper for writing data to a loop device
*
- * This is the slow, transforming version for filesystems which do not
- * implement the address space operations write_begin and write_end. It
- * uses the write file operation which should be present on all writeable
- * filesystems.
- *
- * Using fops->write is slower than using aops->{prepare,commit}_write in the
- * transforming case because we need to double buffer the data as we cannot do
- * the transformations in place as we do not have direct access to the
- * destination pages of the backing file.
+ * This is the slow, transforming version that needs to double buffer the
+ * data as it cannot do the transformations in place without having direct
+ * access to the destination pages of the backing file.
*/
static int do_lo_send_write(struct loop_device *lo, struct bio_vec *bvec,
loff_t pos, struct page *page)
@@ -350,17 +276,16 @@ static int lo_send(struct loop_device *lo, struct bio *bio, loff_t pos)
struct page *page = NULL;
int i, ret = 0;
- do_lo_send = do_lo_send_aops;
- if (!(lo->lo_flags & LO_FLAGS_USE_AOPS)) {
+ if (lo->transfer != transfer_none) {
+ page = alloc_page(GFP_NOIO | __GFP_HIGHMEM);
+ if (unlikely(!page))
+ goto fail;
+ kmap(page);
+ do_lo_send = do_lo_send_write;
+ } else {
do_lo_send = do_lo_send_direct_write;
- if (lo->transfer != transfer_none) {
- page = alloc_page(GFP_NOIO | __GFP_HIGHMEM);
- if (unlikely(!page))
- goto fail;
- kmap(page);
- do_lo_send = do_lo_send_write;
- }
}
+
bio_for_each_segment(bvec, bio, i) {
ret = do_lo_send(lo, bvec, pos, page);
if (ret < 0)
@@ -484,6 +409,29 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
}
}
+ /*
+ * We use punch hole to reclaim the free space used by the
+ * image a.k.a. discard. However we do support discard if
+ * encryption is enabled, because it may give an attacker
+ * useful information.
+ */
+ if (bio->bi_rw & REQ_DISCARD) {
+ struct file *file = lo->lo_backing_file;
+ int mode = FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE;
+
+ if ((!file->f_op->fallocate) ||
+ lo->lo_encrypt_key_size) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+ ret = file->f_op->fallocate(file, mode, pos,
+ bio->bi_size);
+ if (unlikely(ret && ret != -EINVAL &&
+ ret != -EOPNOTSUPP))
+ ret = -EIO;
+ goto out;
+ }
+
ret = lo_send(lo, bio, pos);
if ((bio->bi_rw & REQ_FUA) && !ret) {
@@ -514,7 +462,7 @@ static struct bio *loop_get_bio(struct loop_device *lo)
return bio_list_pop(&lo->lo_bio_list);
}
-static int loop_make_request(struct request_queue *q, struct bio *old_bio)
+static void loop_make_request(struct request_queue *q, struct bio *old_bio)
{
struct loop_device *lo = q->queuedata;
int rw = bio_rw(old_bio);
@@ -532,12 +480,11 @@ static int loop_make_request(struct request_queue *q, struct bio *old_bio)
loop_add_bio(lo, old_bio);
wake_up(&lo->lo_event);
spin_unlock_irq(&lo->lo_lock);
- return 0;
+ return;
out:
spin_unlock_irq(&lo->lo_lock);
bio_io_error(old_bio);
- return 0;
}
struct switch_request {
@@ -700,7 +647,7 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
goto out_putf;
fput(old_file);
- if (max_part > 0)
+ if (lo->lo_flags & LO_FLAGS_PARTSCAN)
ioctl_by_bdev(bdev, BLKRRPART, 0);
return 0;
@@ -777,16 +724,25 @@ static ssize_t loop_attr_autoclear_show(struct loop_device *lo, char *buf)
return sprintf(buf, "%s\n", autoclear ? "1" : "0");
}
+static ssize_t loop_attr_partscan_show(struct loop_device *lo, char *buf)
+{
+ int partscan = (lo->lo_flags & LO_FLAGS_PARTSCAN);
+
+ return sprintf(buf, "%s\n", partscan ? "1" : "0");
+}
+
LOOP_ATTR_RO(backing_file);
LOOP_ATTR_RO(offset);
LOOP_ATTR_RO(sizelimit);
LOOP_ATTR_RO(autoclear);
+LOOP_ATTR_RO(partscan);
static struct attribute *loop_attrs[] = {
&loop_attr_backing_file.attr,
&loop_attr_offset.attr,
&loop_attr_sizelimit.attr,
&loop_attr_autoclear.attr,
+ &loop_attr_partscan.attr,
NULL,
};
@@ -807,6 +763,35 @@ static void loop_sysfs_exit(struct loop_device *lo)
&loop_attribute_group);
}
+static void loop_config_discard(struct loop_device *lo)
+{
+ struct file *file = lo->lo_backing_file;
+ struct inode *inode = file->f_mapping->host;
+ struct request_queue *q = lo->lo_queue;
+
+ /*
+ * We use punch hole to reclaim the free space used by the
+ * image a.k.a. discard. However we do support discard if
+ * encryption is enabled, because it may give an attacker
+ * useful information.
+ */
+ if ((!file->f_op->fallocate) ||
+ lo->lo_encrypt_key_size) {
+ q->limits.discard_granularity = 0;
+ q->limits.discard_alignment = 0;
+ q->limits.max_discard_sectors = 0;
+ q->limits.discard_zeroes_data = 0;
+ queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q);
+ return;
+ }
+
+ q->limits.discard_granularity = inode->i_sb->s_blocksize;
+ q->limits.discard_alignment = inode->i_sb->s_blocksize;
+ q->limits.max_discard_sectors = UINT_MAX >> 9;
+ q->limits.discard_zeroes_data = 1;
+ queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
+}
+
static int loop_set_fd(struct loop_device *lo, fmode_t mode,
struct block_device *bdev, unsigned int arg)
{
@@ -849,35 +834,23 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
mapping = file->f_mapping;
inode = mapping->host;
- if (!(file->f_mode & FMODE_WRITE))
- lo_flags |= LO_FLAGS_READ_ONLY;
-
error = -EINVAL;
- if (S_ISREG(inode->i_mode) || S_ISBLK(inode->i_mode)) {
- const struct address_space_operations *aops = mapping->a_ops;
-
- if (aops->write_begin)
- lo_flags |= LO_FLAGS_USE_AOPS;
- if (!(lo_flags & LO_FLAGS_USE_AOPS) && !file->f_op->write)
- lo_flags |= LO_FLAGS_READ_ONLY;
+ if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))
+ goto out_putf;
- lo_blocksize = S_ISBLK(inode->i_mode) ?
- inode->i_bdev->bd_block_size : PAGE_SIZE;
+ if (!(file->f_mode & FMODE_WRITE) || !(mode & FMODE_WRITE) ||
+ !file->f_op->write)
+ lo_flags |= LO_FLAGS_READ_ONLY;
- error = 0;
- } else {
- goto out_putf;
- }
+ lo_blocksize = S_ISBLK(inode->i_mode) ?
+ inode->i_bdev->bd_block_size : PAGE_SIZE;
+ error = -EFBIG;
size = get_loop_size(lo, file);
-
- if ((loff_t)(sector_t)size != size) {
- error = -EFBIG;
+ if ((loff_t)(sector_t)size != size)
goto out_putf;
- }
- if (!(mode & FMODE_WRITE))
- lo_flags |= LO_FLAGS_READ_ONLY;
+ error = 0;
set_device_ro(bdev, (lo_flags & LO_FLAGS_READ_ONLY) != 0);
@@ -919,7 +892,9 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
}
lo->lo_state = Lo_bound;
wake_up_process(lo->lo_thread);
- if (max_part > 0)
+ if (part_shift)
+ lo->lo_flags |= LO_FLAGS_PARTSCAN;
+ if (lo->lo_flags & LO_FLAGS_PARTSCAN)
ioctl_by_bdev(bdev, BLKRRPART, 0);
return 0;
@@ -980,10 +955,11 @@ loop_init_xfer(struct loop_device *lo, struct loop_func_table *xfer,
return err;
}
-static int loop_clr_fd(struct loop_device *lo, struct block_device *bdev)
+static int loop_clr_fd(struct loop_device *lo)
{
struct file *filp = lo->lo_backing_file;
gfp_t gfp = lo->old_gfp_mask;
+ struct block_device *bdev = lo->lo_device;
if (lo->lo_state != Lo_bound)
return -ENXIO;
@@ -1012,7 +988,6 @@ static int loop_clr_fd(struct loop_device *lo, struct block_device *bdev)
lo->lo_offset = 0;
lo->lo_sizelimit = 0;
lo->lo_encrypt_key_size = 0;
- lo->lo_flags = 0;
lo->lo_thread = NULL;
memset(lo->lo_encrypt_key, 0, LO_KEY_SIZE);
memset(lo->lo_crypt_name, 0, LO_NAME_SIZE);
@@ -1030,8 +1005,11 @@ static int loop_clr_fd(struct loop_device *lo, struct block_device *bdev)
lo->lo_state = Lo_unbound;
/* This is safe: open() is still holding a reference. */
module_put(THIS_MODULE);
- if (max_part > 0 && bdev)
+ if (lo->lo_flags & LO_FLAGS_PARTSCAN && bdev)
ioctl_by_bdev(bdev, BLKRRPART, 0);
+ lo->lo_flags = 0;
+ if (!part_shift)
+ lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN;
mutex_unlock(&lo->lo_ctl_mutex);
/*
* Need not hold lo_ctl_mutex to fput backing file.
@@ -1085,6 +1063,7 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
if (figure_loop_size(lo))
return -EFBIG;
}
+ loop_config_discard(lo);
memcpy(lo->lo_file_name, info->lo_file_name, LO_NAME_SIZE);
memcpy(lo->lo_crypt_name, info->lo_crypt_name, LO_NAME_SIZE);
@@ -1100,6 +1079,13 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
(info->lo_flags & LO_FLAGS_AUTOCLEAR))
lo->lo_flags ^= LO_FLAGS_AUTOCLEAR;
+ if ((info->lo_flags & LO_FLAGS_PARTSCAN) &&
+ !(lo->lo_flags & LO_FLAGS_PARTSCAN)) {
+ lo->lo_flags |= LO_FLAGS_PARTSCAN;
+ lo->lo_disk->flags &= ~GENHD_FL_NO_PART_SCAN;
+ ioctl_by_bdev(lo->lo_device, BLKRRPART, 0);
+ }
+
lo->lo_encrypt_key_size = info->lo_encrypt_key_size;
lo->lo_init[0] = info->lo_init[0];
lo->lo_init[1] = info->lo_init[1];
@@ -1293,7 +1279,7 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
break;
case LOOP_CLR_FD:
/* loop_clr_fd would have unlocked lo_ctl_mutex on success */
- err = loop_clr_fd(lo, bdev);
+ err = loop_clr_fd(lo);
if (!err)
goto out_unlocked;
break;
@@ -1513,7 +1499,7 @@ static int lo_release(struct gendisk *disk, fmode_t mode)
* In autoclear mode, stop the loop thread
* and remove configuration after last close.
*/
- err = loop_clr_fd(lo, NULL);
+ err = loop_clr_fd(lo);
if (!err)
goto out_unlocked;
} else {
@@ -1635,6 +1621,27 @@ static int loop_add(struct loop_device **l, int i)
if (!disk)
goto out_free_queue;
+ /*
+ * Disable partition scanning by default. The in-kernel partition
+ * scanning can be requested individually per-device during its
+ * setup. Userspace can always add and remove partitions from all
+ * devices. The needed partition minors are allocated from the
+ * extended minor space, the main loop device numbers will continue
+ * to match the loop minors, regardless of the number of partitions
+ * used.
+ *
+ * If max_part is given, partition scanning is globally enabled for
+ * all loop devices. The minors for the main loop devices will be
+ * multiples of max_part.
+ *
+ * Note: Global-for-all-devices, set-only-at-init, read-only module
+ * parameteters like 'max_loop' and 'max_part' make things needlessly
+ * complicated, are too static, inflexible and may surprise
+ * userspace tools. Parameters like this in general should be avoided.
+ */
+ if (!part_shift)
+ disk->flags |= GENHD_FL_NO_PART_SCAN;
+ disk->flags |= GENHD_FL_EXT_DEVT;
mutex_init(&lo->lo_ctl_mutex);
lo->lo_number = i;
lo->lo_thread = NULL;
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index f533f3375e2..c3f0ee16594 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -127,8 +127,7 @@ static void sock_shutdown(struct nbd_device *lo, int lock)
if (lock)
mutex_lock(&lo->tx_lock);
if (lo->sock) {
- printk(KERN_WARNING "%s: shutting down socket\n",
- lo->disk->disk_name);
+ dev_warn(disk_to_dev(lo->disk), "shutting down socket\n");
kernel_sock_shutdown(lo->sock, SHUT_RDWR);
lo->sock = NULL;
}
@@ -158,8 +157,9 @@ static int sock_xmit(struct nbd_device *lo, int send, void *buf, int size,
sigset_t blocked, oldset;
if (unlikely(!sock)) {
- printk(KERN_ERR "%s: Attempted %s on closed socket in sock_xmit\n",
- lo->disk->disk_name, (send ? "send" : "recv"));
+ dev_err(disk_to_dev(lo->disk),
+ "Attempted %s on closed socket in sock_xmit\n",
+ (send ? "send" : "recv"));
return -EINVAL;
}
@@ -250,8 +250,8 @@ static int nbd_send_req(struct nbd_device *lo, struct request *req)
result = sock_xmit(lo, 1, &request, sizeof(request),
(nbd_cmd(req) == NBD_CMD_WRITE) ? MSG_MORE : 0);
if (result <= 0) {
- printk(KERN_ERR "%s: Send control failed (result %d)\n",
- lo->disk->disk_name, result);
+ dev_err(disk_to_dev(lo->disk),
+ "Send control failed (result %d)\n", result);
goto error_out;
}
@@ -270,8 +270,9 @@ static int nbd_send_req(struct nbd_device *lo, struct request *req)
lo->disk->disk_name, req, bvec->bv_len);
result = sock_send_bvec(lo, bvec, flags);
if (result <= 0) {
- printk(KERN_ERR "%s: Send data failed (result %d)\n",
- lo->disk->disk_name, result);
+ dev_err(disk_to_dev(lo->disk),
+ "Send data failed (result %d)\n",
+ result);
goto error_out;
}
}
@@ -328,14 +329,13 @@ static struct request *nbd_read_stat(struct nbd_device *lo)
reply.magic = 0;
result = sock_xmit(lo, 0, &reply, sizeof(reply), MSG_WAITALL);
if (result <= 0) {
- printk(KERN_ERR "%s: Receive control failed (result %d)\n",
- lo->disk->disk_name, result);
+ dev_err(disk_to_dev(lo->disk),
+ "Receive control failed (result %d)\n", result);
goto harderror;
}
if (ntohl(reply.magic) != NBD_REPLY_MAGIC) {
- printk(KERN_ERR "%s: Wrong magic (0x%lx)\n",
- lo->disk->disk_name,
+ dev_err(disk_to_dev(lo->disk), "Wrong magic (0x%lx)\n",
(unsigned long)ntohl(reply.magic));
result = -EPROTO;
goto harderror;
@@ -347,15 +347,15 @@ static struct request *nbd_read_stat(struct nbd_device *lo)
if (result != -ENOENT)
goto harderror;
- printk(KERN_ERR "%s: Unexpected reply (%p)\n",
- lo->disk->disk_name, reply.handle);
+ dev_err(disk_to_dev(lo->disk), "Unexpected reply (%p)\n",
+ reply.handle);
result = -EBADR;
goto harderror;
}
if (ntohl(reply.error)) {
- printk(KERN_ERR "%s: Other side returned error (%d)\n",
- lo->disk->disk_name, ntohl(reply.error));
+ dev_err(disk_to_dev(lo->disk), "Other side returned error (%d)\n",
+ ntohl(reply.error));
req->errors++;
return req;
}
@@ -369,8 +369,8 @@ static struct request *nbd_read_stat(struct nbd_device *lo)
rq_for_each_segment(bvec, req, iter) {
result = sock_recv_bvec(lo, bvec);
if (result <= 0) {
- printk(KERN_ERR "%s: Receive data failed (result %d)\n",
- lo->disk->disk_name, result);
+ dev_err(disk_to_dev(lo->disk), "Receive data failed (result %d)\n",
+ result);
req->errors++;
return req;
}
@@ -405,10 +405,10 @@ static int nbd_do_it(struct nbd_device *lo)
BUG_ON(lo->magic != LO_MAGIC);
- lo->pid = current->pid;
- ret = sysfs_create_file(&disk_to_dev(lo->disk)->kobj, &pid_attr.attr);
+ lo->pid = task_pid_nr(current);
+ ret = device_create_file(disk_to_dev(lo->disk), &pid_attr);
if (ret) {
- printk(KERN_ERR "nbd: sysfs_create_file failed!");
+ dev_err(disk_to_dev(lo->disk), "device_create_file failed!\n");
lo->pid = 0;
return ret;
}
@@ -416,7 +416,7 @@ static int nbd_do_it(struct nbd_device *lo)
while ((req = nbd_read_stat(lo)) != NULL)
nbd_end_request(req);
- sysfs_remove_file(&disk_to_dev(lo->disk)->kobj, &pid_attr.attr);
+ device_remove_file(disk_to_dev(lo->disk), &pid_attr);
lo->pid = 0;
return 0;
}
@@ -457,8 +457,8 @@ static void nbd_handle_req(struct nbd_device *lo, struct request *req)
if (rq_data_dir(req) == WRITE) {
nbd_cmd(req) = NBD_CMD_WRITE;
if (lo->flags & NBD_READ_ONLY) {
- printk(KERN_ERR "%s: Write on read-only\n",
- lo->disk->disk_name);
+ dev_err(disk_to_dev(lo->disk),
+ "Write on read-only\n");
goto error_out;
}
}
@@ -468,16 +468,15 @@ static void nbd_handle_req(struct nbd_device *lo, struct request *req)
mutex_lock(&lo->tx_lock);
if (unlikely(!lo->sock)) {
mutex_unlock(&lo->tx_lock);
- printk(KERN_ERR "%s: Attempted send on closed socket\n",
- lo->disk->disk_name);
+ dev_err(disk_to_dev(lo->disk),
+ "Attempted send on closed socket\n");
goto error_out;
}
lo->active_req = req;
if (nbd_send_req(lo, req) != 0) {
- printk(KERN_ERR "%s: Request send failed\n",
- lo->disk->disk_name);
+ dev_err(disk_to_dev(lo->disk), "Request send failed\n");
req->errors++;
nbd_end_request(req);
} else {
@@ -549,8 +548,8 @@ static void do_nbd_request(struct request_queue *q)
BUG_ON(lo->magic != LO_MAGIC);
if (unlikely(!lo->sock)) {
- printk(KERN_ERR "%s: Attempted send on closed socket\n",
- lo->disk->disk_name);
+ dev_err(disk_to_dev(lo->disk),
+ "Attempted send on closed socket\n");
req->errors++;
nbd_end_request(req);
spin_lock_irq(q->queue_lock);
@@ -576,7 +575,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
case NBD_DISCONNECT: {
struct request sreq;
- printk(KERN_INFO "%s: NBD_DISCONNECT\n", lo->disk->disk_name);
+ dev_info(disk_to_dev(lo->disk), "NBD_DISCONNECT\n");
blk_rq_init(NULL, &sreq);
sreq.cmd_type = REQ_TYPE_SPECIAL;
@@ -674,7 +673,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
file = lo->file;
lo->file = NULL;
nbd_clear_que(lo);
- printk(KERN_WARNING "%s: queue cleared\n", lo->disk->disk_name);
+ dev_warn(disk_to_dev(lo->disk), "queue cleared\n");
if (file)
fput(file);
lo->bytesize = 0;
@@ -694,8 +693,8 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
return 0;
case NBD_PRINT_DEBUG:
- printk(KERN_INFO "%s: next = %p, prev = %p, head = %p\n",
- bdev->bd_disk->disk_name,
+ dev_info(disk_to_dev(lo->disk),
+ "next = %p, prev = %p, head = %p\n",
lo->queue_head.next, lo->queue_head.prev,
&lo->queue_head);
return 0;
@@ -745,7 +744,7 @@ static int __init nbd_init(void)
BUILD_BUG_ON(sizeof(struct nbd_request) != 28);
if (max_part < 0) {
- printk(KERN_CRIT "nbd: max_part must be >= 0\n");
+ printk(KERN_ERR "nbd: max_part must be >= 0\n");
return -EINVAL;
}
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index e133f094ab0..a63b0a2b780 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -2444,7 +2444,7 @@ static void pkt_end_io_read_cloned(struct bio *bio, int err)
pkt_bio_finished(pd);
}
-static int pkt_make_request(struct request_queue *q, struct bio *bio)
+static void pkt_make_request(struct request_queue *q, struct bio *bio)
{
struct pktcdvd_device *pd;
char b[BDEVNAME_SIZE];
@@ -2473,7 +2473,7 @@ static int pkt_make_request(struct request_queue *q, struct bio *bio)
cloned_bio->bi_end_io = pkt_end_io_read_cloned;
pd->stats.secs_r += bio->bi_size >> 9;
pkt_queue_bio(pd, cloned_bio);
- return 0;
+ return;
}
if (!test_bit(PACKET_WRITABLE, &pd->flags)) {
@@ -2509,7 +2509,7 @@ static int pkt_make_request(struct request_queue *q, struct bio *bio)
pkt_make_request(q, &bp->bio1);
pkt_make_request(q, &bp->bio2);
bio_pair_release(bp);
- return 0;
+ return;
}
}
@@ -2533,7 +2533,7 @@ static int pkt_make_request(struct request_queue *q, struct bio *bio)
}
spin_unlock(&pkt->lock);
spin_unlock(&pd->cdrw.active_list_lock);
- return 0;
+ return;
} else {
blocked_bio = 1;
}
@@ -2584,10 +2584,9 @@ static int pkt_make_request(struct request_queue *q, struct bio *bio)
*/
wake_up(&pd->wqueue);
}
- return 0;
+ return;
end_io:
bio_io_error(bio);
- return 0;
}
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
index 8e1ce2e2916..da0abc1838c 100644
--- a/drivers/block/ps3disk.c
+++ b/drivers/block/ps3disk.c
@@ -21,6 +21,7 @@
#include <linux/ata.h>
#include <linux/blkdev.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include <asm/lv1call.h>
#include <asm/ps3stor.h>
diff --git a/drivers/block/ps3vram.c b/drivers/block/ps3vram.c
index b3bdb8af89c..f58cdcfb305 100644
--- a/drivers/block/ps3vram.c
+++ b/drivers/block/ps3vram.c
@@ -10,6 +10,7 @@
#include <linux/blkdev.h>
#include <linux/delay.h>
+#include <linux/module.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
@@ -596,7 +597,7 @@ out:
return next;
}
-static int ps3vram_make_request(struct request_queue *q, struct bio *bio)
+static void ps3vram_make_request(struct request_queue *q, struct bio *bio)
{
struct ps3_system_bus_device *dev = q->queuedata;
struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
@@ -610,13 +611,11 @@ static int ps3vram_make_request(struct request_queue *q, struct bio *bio)
spin_unlock_irq(&priv->lock);
if (busy)
- return 0;
+ return;
do {
bio = ps3vram_do_bio(dev, bio);
} while (bio);
-
- return 0;
}
static int __devinit ps3vram_probe(struct ps3_system_bus_device *dev)
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index fe3c3249cec..65cc424359b 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -260,7 +260,7 @@ static struct rbd_client *rbd_client_create(struct ceph_options *opt,
kref_init(&rbdc->kref);
INIT_LIST_HEAD(&rbdc->node);
- rbdc->client = ceph_create_client(opt, rbdc);
+ rbdc->client = ceph_create_client(opt, rbdc, 0, 0);
if (IS_ERR(rbdc->client))
goto out_rbdc;
opt = NULL; /* Now rbdc->client is responsible for opt */
diff --git a/drivers/block/umem.c b/drivers/block/umem.c
index 031ca720d92..aa2712060bf 100644
--- a/drivers/block/umem.c
+++ b/drivers/block/umem.c
@@ -513,7 +513,7 @@ static void process_page(unsigned long data)
}
}
-static int mm_make_request(struct request_queue *q, struct bio *bio)
+static void mm_make_request(struct request_queue *q, struct bio *bio)
{
struct cardinfo *card = q->queuedata;
pr_debug("mm_make_request %llu %u\n",
@@ -525,7 +525,7 @@ static int mm_make_request(struct request_queue *q, struct bio *bio)
card->biotail = &bio->bi_next;
spin_unlock_irq(&card->lock);
- return 0;
+ return;
}
static irqreturn_t mm_interrupt(int irq, void *__card)
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 079c08808d8..4d0b70adf5f 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -3,15 +3,19 @@
#include <linux/slab.h>
#include <linux/blkdev.h>
#include <linux/hdreg.h>
+#include <linux/module.h>
#include <linux/virtio.h>
#include <linux/virtio_blk.h>
#include <linux/scatterlist.h>
#include <linux/string_helpers.h>
#include <scsi/scsi_cmnd.h>
+#include <linux/idr.h>
#define PART_BITS 4
-static int major, index;
+static int major;
+static DEFINE_IDA(vd_index_ida);
+
struct workqueue_struct *virtblk_wq;
struct virtio_blk
@@ -35,6 +39,9 @@ struct virtio_blk
/* What host tells us, plus 2 for header & tailer. */
unsigned int sg_elems;
+ /* Ida index - used to track minor number allocations. */
+ int index;
+
/* Scatterlist: can be too big for stack. */
struct scatterlist sg[/*sg_elems*/];
};
@@ -276,6 +283,11 @@ static int index_to_minor(int index)
return index << PART_BITS;
}
+static int minor_to_index(int minor)
+{
+ return minor >> PART_BITS;
+}
+
static ssize_t virtblk_serial_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -341,14 +353,17 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
{
struct virtio_blk *vblk;
struct request_queue *q;
- int err;
+ int err, index;
u64 cap;
u32 v, blk_size, sg_elems, opt_io_size;
u16 min_io_size;
u8 physical_block_exp, alignment_offset;
- if (index_to_minor(index) >= 1 << MINORBITS)
- return -ENOSPC;
+ err = ida_simple_get(&vd_index_ida, 0, minor_to_index(1 << MINORBITS),
+ GFP_KERNEL);
+ if (err < 0)
+ goto out;
+ index = err;
/* We need to know how many segments before we allocate. */
err = virtio_config_val(vdev, VIRTIO_BLK_F_SEG_MAX,
@@ -365,7 +380,7 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
sizeof(vblk->sg[0]) * sg_elems, GFP_KERNEL);
if (!vblk) {
err = -ENOMEM;
- goto out;
+ goto out_free_index;
}
INIT_LIST_HEAD(&vblk->reqs);
@@ -421,7 +436,7 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
vblk->disk->private_data = vblk;
vblk->disk->fops = &virtblk_fops;
vblk->disk->driverfs_dev = &vdev->dev;
- index++;
+ vblk->index = index;
/* configure queue flush support */
if (virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH))
@@ -516,6 +531,8 @@ out_free_vq:
vdev->config->del_vqs(vdev);
out_free_vblk:
kfree(vblk);
+out_free_index:
+ ida_simple_remove(&vd_index_ida, index);
out:
return err;
}
@@ -523,6 +540,7 @@ out:
static void __devexit virtblk_remove(struct virtio_device *vdev)
{
struct virtio_blk *vblk = vdev->priv;
+ int index = vblk->index;
flush_work(&vblk->config_work);
@@ -538,6 +556,7 @@ static void __devexit virtblk_remove(struct virtio_device *vdev)
mempool_destroy(vblk->pool);
vdev->config->del_vqs(vdev);
kfree(vblk);
+ ida_simple_remove(&vd_index_ida, index);
}
static const struct virtio_device_id id_table[] = {
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 1540792b1e5..15ec4db194d 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -39,6 +39,9 @@
#include <linux/list.h>
#include <linux/delay.h>
#include <linux/freezer.h>
+#include <linux/loop.h>
+#include <linux/falloc.h>
+#include <linux/fs.h>
#include <xen/events.h>
#include <xen/page.h>
@@ -258,13 +261,16 @@ irqreturn_t xen_blkif_be_int(int irq, void *dev_id)
static void print_stats(struct xen_blkif *blkif)
{
- pr_info("xen-blkback (%s): oo %3d | rd %4d | wr %4d | f %4d\n",
+ pr_info("xen-blkback (%s): oo %3d | rd %4d | wr %4d | f %4d"
+ " | ds %4d\n",
current->comm, blkif->st_oo_req,
- blkif->st_rd_req, blkif->st_wr_req, blkif->st_f_req);
+ blkif->st_rd_req, blkif->st_wr_req,
+ blkif->st_f_req, blkif->st_ds_req);
blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000);
blkif->st_rd_req = 0;
blkif->st_wr_req = 0;
blkif->st_oo_req = 0;
+ blkif->st_ds_req = 0;
}
int xen_blkif_schedule(void *arg)
@@ -410,6 +416,59 @@ static int xen_blkbk_map(struct blkif_request *req,
return ret;
}
+static void xen_blk_discard(struct xen_blkif *blkif, struct blkif_request *req)
+{
+ int err = 0;
+ int status = BLKIF_RSP_OKAY;
+ struct block_device *bdev = blkif->vbd.bdev;
+
+ if (blkif->blk_backend_type == BLKIF_BACKEND_PHY)
+ /* just forward the discard request */
+ err = blkdev_issue_discard(bdev,
+ req->u.discard.sector_number,
+ req->u.discard.nr_sectors,
+ GFP_KERNEL, 0);
+ else if (blkif->blk_backend_type == BLKIF_BACKEND_FILE) {
+ /* punch a hole in the backing file */
+ struct loop_device *lo = bdev->bd_disk->private_data;
+ struct file *file = lo->lo_backing_file;
+
+ if (file->f_op->fallocate)
+ err = file->f_op->fallocate(file,
+ FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE,
+ req->u.discard.sector_number << 9,
+ req->u.discard.nr_sectors << 9);
+ else
+ err = -EOPNOTSUPP;
+ } else
+ err = -EOPNOTSUPP;
+
+ if (err == -EOPNOTSUPP) {
+ pr_debug(DRV_PFX "discard op failed, not supported\n");
+ status = BLKIF_RSP_EOPNOTSUPP;
+ } else if (err)
+ status = BLKIF_RSP_ERROR;
+
+ make_response(blkif, req->id, req->operation, status);
+}
+
+static void xen_blk_drain_io(struct xen_blkif *blkif)
+{
+ atomic_set(&blkif->drain, 1);
+ do {
+ /* The initial value is one, and one refcnt taken at the
+ * start of the xen_blkif_schedule thread. */
+ if (atomic_read(&blkif->refcnt) <= 2)
+ break;
+ wait_for_completion_interruptible_timeout(
+ &blkif->drain_complete, HZ);
+
+ if (!atomic_read(&blkif->drain))
+ break;
+ } while (!kthread_should_stop());
+ atomic_set(&blkif->drain, 0);
+}
+
/*
* Completion callback on the bio's. Called as bh->b_end_io()
*/
@@ -422,6 +481,11 @@ static void __end_block_io_op(struct pending_req *pending_req, int error)
pr_debug(DRV_PFX "flush diskcache op failed, not supported\n");
xen_blkbk_flush_diskcache(XBT_NIL, pending_req->blkif->be, 0);
pending_req->status = BLKIF_RSP_EOPNOTSUPP;
+ } else if ((pending_req->operation == BLKIF_OP_WRITE_BARRIER) &&
+ (error == -EOPNOTSUPP)) {
+ pr_debug(DRV_PFX "write barrier op failed, not supported\n");
+ xen_blkbk_barrier(XBT_NIL, pending_req->blkif->be, 0);
+ pending_req->status = BLKIF_RSP_EOPNOTSUPP;
} else if (error) {
pr_debug(DRV_PFX "Buffer not up-to-date at end of operation,"
" error=%d\n", error);
@@ -438,6 +502,10 @@ static void __end_block_io_op(struct pending_req *pending_req, int error)
make_response(pending_req->blkif, pending_req->id,
pending_req->operation, pending_req->status);
xen_blkif_put(pending_req->blkif);
+ if (atomic_read(&pending_req->blkif->refcnt) <= 2) {
+ if (atomic_read(&pending_req->blkif->drain))
+ complete(&pending_req->blkif->drain_complete);
+ }
free_req(pending_req);
}
}
@@ -532,7 +600,6 @@ do_block_io_op(struct xen_blkif *blkif)
return more_to_do;
}
-
/*
* Transmutation of the 'struct blkif_request' to a proper 'struct bio'
* and call the 'submit_bio' to pass it to the underlying storage.
@@ -549,6 +616,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
int i, nbio = 0;
int operation;
struct blk_plug plug;
+ bool drain = false;
switch (req->operation) {
case BLKIF_OP_READ:
@@ -559,11 +627,16 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
blkif->st_wr_req++;
operation = WRITE_ODIRECT;
break;
+ case BLKIF_OP_WRITE_BARRIER:
+ drain = true;
case BLKIF_OP_FLUSH_DISKCACHE:
blkif->st_f_req++;
operation = WRITE_FLUSH;
break;
- case BLKIF_OP_WRITE_BARRIER:
+ case BLKIF_OP_DISCARD:
+ blkif->st_ds_req++;
+ operation = REQ_DISCARD;
+ break;
default:
operation = 0; /* make gcc happy */
goto fail_response;
@@ -572,7 +645,8 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
/* Check that the number of segments is sane. */
nseg = req->nr_segments;
- if (unlikely(nseg == 0 && operation != WRITE_FLUSH) ||
+ if (unlikely(nseg == 0 && operation != WRITE_FLUSH &&
+ operation != REQ_DISCARD) ||
unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
pr_debug(DRV_PFX "Bad number of segments in request (%d)\n",
nseg);
@@ -621,16 +695,25 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
}
}
+ /* Wait on all outstanding I/O's and once that has been completed
+ * issue the WRITE_FLUSH.
+ */
+ if (drain)
+ xen_blk_drain_io(pending_req->blkif);
+
/*
* If we have failed at this point, we need to undo the M2P override,
* set gnttab_set_unmap_op on all of the grant references and perform
* the hypercall to unmap the grants - that is all done in
* xen_blkbk_unmap.
*/
- if (xen_blkbk_map(req, pending_req, seg))
+ if (operation != REQ_DISCARD && xen_blkbk_map(req, pending_req, seg))
goto fail_flush;
- /* This corresponding xen_blkif_put is done in __end_block_io_op */
+ /*
+ * This corresponding xen_blkif_put is done in __end_block_io_op, or
+ * below (in "!bio") if we are handling a BLKIF_OP_DISCARD.
+ */
xen_blkif_get(blkif);
for (i = 0; i < nseg; i++) {
@@ -654,18 +737,25 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
preq.sector_number += seg[i].nsec;
}
- /* This will be hit if the operation was a flush. */
+ /* This will be hit if the operation was a flush or discard. */
if (!bio) {
- BUG_ON(operation != WRITE_FLUSH);
+ BUG_ON(operation != WRITE_FLUSH && operation != REQ_DISCARD);
- bio = bio_alloc(GFP_KERNEL, 0);
- if (unlikely(bio == NULL))
- goto fail_put_bio;
+ if (operation == WRITE_FLUSH) {
+ bio = bio_alloc(GFP_KERNEL, 0);
+ if (unlikely(bio == NULL))
+ goto fail_put_bio;
- biolist[nbio++] = bio;
- bio->bi_bdev = preq.bdev;
- bio->bi_private = pending_req;
- bio->bi_end_io = end_block_io_op;
+ biolist[nbio++] = bio;
+ bio->bi_bdev = preq.bdev;
+ bio->bi_private = pending_req;
+ bio->bi_end_io = end_block_io_op;
+ } else if (operation == REQ_DISCARD) {
+ xen_blk_discard(blkif, req);
+ xen_blkif_put(blkif);
+ free_req(pending_req);
+ return 0;
+ }
}
/*
@@ -685,7 +775,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
if (operation == READ)
blkif->st_rd_sect += preq.nr_sects;
- else if (operation == WRITE || operation == WRITE_FLUSH)
+ else if (operation & WRITE)
blkif->st_wr_sect += preq.nr_sects;
return 0;
@@ -765,9 +855,9 @@ static int __init xen_blkif_init(void)
mmap_pages = xen_blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST;
- blkbk->pending_reqs = kmalloc(sizeof(blkbk->pending_reqs[0]) *
+ blkbk->pending_reqs = kzalloc(sizeof(blkbk->pending_reqs[0]) *
xen_blkif_reqs, GFP_KERNEL);
- blkbk->pending_grant_handles = kzalloc(sizeof(blkbk->pending_grant_handles[0]) *
+ blkbk->pending_grant_handles = kmalloc(sizeof(blkbk->pending_grant_handles[0]) *
mmap_pages, GFP_KERNEL);
blkbk->pending_pages = kzalloc(sizeof(blkbk->pending_pages[0]) *
mmap_pages, GFP_KERNEL);
@@ -790,8 +880,6 @@ static int __init xen_blkif_init(void)
if (rc)
goto failed_init;
- memset(blkbk->pending_reqs, 0, sizeof(blkbk->pending_reqs));
-
INIT_LIST_HEAD(&blkbk->pending_free);
spin_lock_init(&blkbk->pending_free_lock);
init_waitqueue_head(&blkbk->pending_free_wq);
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index c4bd34063ec..dfb1b3a43a5 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -62,13 +62,26 @@ struct blkif_common_response {
/* i386 protocol version */
#pragma pack(push, 4)
+
+struct blkif_x86_32_request_rw {
+ blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
+ struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+};
+
+struct blkif_x86_32_request_discard {
+ blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
+ uint64_t nr_sectors;
+};
+
struct blkif_x86_32_request {
uint8_t operation; /* BLKIF_OP_??? */
uint8_t nr_segments; /* number of segments */
blkif_vdev_t handle; /* only for read/write requests */
uint64_t id; /* private guest value, echoed in resp */
- blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
- struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+ union {
+ struct blkif_x86_32_request_rw rw;
+ struct blkif_x86_32_request_discard discard;
+ } u;
};
struct blkif_x86_32_response {
uint64_t id; /* copied from request */
@@ -78,13 +91,26 @@ struct blkif_x86_32_response {
#pragma pack(pop)
/* x86_64 protocol version */
+
+struct blkif_x86_64_request_rw {
+ blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
+ struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+};
+
+struct blkif_x86_64_request_discard {
+ blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
+ uint64_t nr_sectors;
+};
+
struct blkif_x86_64_request {
uint8_t operation; /* BLKIF_OP_??? */
uint8_t nr_segments; /* number of segments */
blkif_vdev_t handle; /* only for read/write requests */
uint64_t __attribute__((__aligned__(8))) id;
- blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
- struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+ union {
+ struct blkif_x86_64_request_rw rw;
+ struct blkif_x86_64_request_discard discard;
+ } u;
};
struct blkif_x86_64_response {
uint64_t __attribute__((__aligned__(8))) id;
@@ -112,6 +138,11 @@ enum blkif_protocol {
BLKIF_PROTOCOL_X86_64 = 3,
};
+enum blkif_backend_type {
+ BLKIF_BACKEND_PHY = 1,
+ BLKIF_BACKEND_FILE = 2,
+};
+
struct xen_vbd {
/* What the domain refers to this vbd as. */
blkif_vdev_t handle;
@@ -137,8 +168,9 @@ struct xen_blkif {
unsigned int irq;
/* Comms information. */
enum blkif_protocol blk_protocol;
+ enum blkif_backend_type blk_backend_type;
union blkif_back_rings blk_rings;
- struct vm_struct *blk_ring_area;
+ void *blk_ring;
/* The VBD attached to this interface. */
struct xen_vbd vbd;
/* Back pointer to the backend_info. */
@@ -148,6 +180,9 @@ struct xen_blkif {
atomic_t refcnt;
wait_queue_head_t wq;
+ /* for barrier (drain) requests */
+ struct completion drain_complete;
+ atomic_t drain;
/* One thread per one blkif. */
struct task_struct *xenblkd;
unsigned int waiting_reqs;
@@ -158,13 +193,11 @@ struct xen_blkif {
int st_wr_req;
int st_oo_req;
int st_f_req;
+ int st_ds_req;
int st_rd_sect;
int st_wr_sect;
wait_queue_head_t waiting_to_free;
-
- grant_handle_t shmem_handle;
- grant_ref_t shmem_ref;
};
@@ -181,7 +214,7 @@ struct xen_blkif {
struct phys_req {
unsigned short dev;
- unsigned short nr_sects;
+ blkif_sector_t nr_sects;
struct block_device *bdev;
blkif_sector_t sector_number;
};
@@ -195,6 +228,8 @@ int xen_blkif_schedule(void *arg);
int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt,
struct backend_info *be, int state);
+int xen_blkbk_barrier(struct xenbus_transaction xbt,
+ struct backend_info *be, int state);
struct xenbus_device *xen_blkbk_xenbus(struct backend_info *be);
static inline void blkif_get_x86_32_req(struct blkif_request *dst,
@@ -205,12 +240,25 @@ static inline void blkif_get_x86_32_req(struct blkif_request *dst,
dst->nr_segments = src->nr_segments;
dst->handle = src->handle;
dst->id = src->id;
- dst->u.rw.sector_number = src->sector_number;
- barrier();
- if (n > dst->nr_segments)
- n = dst->nr_segments;
- for (i = 0; i < n; i++)
- dst->u.rw.seg[i] = src->seg[i];
+ switch (src->operation) {
+ case BLKIF_OP_READ:
+ case BLKIF_OP_WRITE:
+ case BLKIF_OP_WRITE_BARRIER:
+ case BLKIF_OP_FLUSH_DISKCACHE:
+ dst->u.rw.sector_number = src->u.rw.sector_number;
+ barrier();
+ if (n > dst->nr_segments)
+ n = dst->nr_segments;
+ for (i = 0; i < n; i++)
+ dst->u.rw.seg[i] = src->u.rw.seg[i];
+ break;
+ case BLKIF_OP_DISCARD:
+ dst->u.discard.sector_number = src->u.discard.sector_number;
+ dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
+ break;
+ default:
+ break;
+ }
}
static inline void blkif_get_x86_64_req(struct blkif_request *dst,
@@ -221,12 +269,25 @@ static inline void blkif_get_x86_64_req(struct blkif_request *dst,
dst->nr_segments = src->nr_segments;
dst->handle = src->handle;
dst->id = src->id;
- dst->u.rw.sector_number = src->sector_number;
- barrier();
- if (n > dst->nr_segments)
- n = dst->nr_segments;
- for (i = 0; i < n; i++)
- dst->u.rw.seg[i] = src->seg[i];
+ switch (src->operation) {
+ case BLKIF_OP_READ:
+ case BLKIF_OP_WRITE:
+ case BLKIF_OP_WRITE_BARRIER:
+ case BLKIF_OP_FLUSH_DISKCACHE:
+ dst->u.rw.sector_number = src->u.rw.sector_number;
+ barrier();
+ if (n > dst->nr_segments)
+ n = dst->nr_segments;
+ for (i = 0; i < n; i++)
+ dst->u.rw.seg[i] = src->u.rw.seg[i];
+ break;
+ case BLKIF_OP_DISCARD:
+ dst->u.discard.sector_number = src->u.discard.sector_number;
+ dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
+ break;
+ default:
+ break;
+ }
}
#endif /* __XEN_BLKIF__BACKEND__COMMON_H__ */
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 5fd2010f7d2..f759ad4584c 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -114,44 +114,14 @@ static struct xen_blkif *xen_blkif_alloc(domid_t domid)
spin_lock_init(&blkif->blk_ring_lock);
atomic_set(&blkif->refcnt, 1);
init_waitqueue_head(&blkif->wq);
+ init_completion(&blkif->drain_complete);
+ atomic_set(&blkif->drain, 0);
blkif->st_print = jiffies;
init_waitqueue_head(&blkif->waiting_to_free);
return blkif;
}
-static int map_frontend_page(struct xen_blkif *blkif, unsigned long shared_page)
-{
- struct gnttab_map_grant_ref op;
-
- gnttab_set_map_op(&op, (unsigned long)blkif->blk_ring_area->addr,
- GNTMAP_host_map, shared_page, blkif->domid);
-
- if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
- BUG();
-
- if (op.status) {
- DPRINTK("Grant table operation failure !\n");
- return op.status;
- }
-
- blkif->shmem_ref = shared_page;
- blkif->shmem_handle = op.handle;
-
- return 0;
-}
-
-static void unmap_frontend_page(struct xen_blkif *blkif)
-{
- struct gnttab_unmap_grant_ref op;
-
- gnttab_set_unmap_op(&op, (unsigned long)blkif->blk_ring_area->addr,
- GNTMAP_host_map, blkif->shmem_handle);
-
- if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
- BUG();
-}
-
static int xen_blkif_map(struct xen_blkif *blkif, unsigned long shared_page,
unsigned int evtchn)
{
@@ -161,35 +131,29 @@ static int xen_blkif_map(struct xen_blkif *blkif, unsigned long shared_page,
if (blkif->irq)
return 0;
- blkif->blk_ring_area = alloc_vm_area(PAGE_SIZE);
- if (!blkif->blk_ring_area)
- return -ENOMEM;
-
- err = map_frontend_page(blkif, shared_page);
- if (err) {
- free_vm_area(blkif->blk_ring_area);
+ err = xenbus_map_ring_valloc(blkif->be->dev, shared_page, &blkif->blk_ring);
+ if (err < 0)
return err;
- }
switch (blkif->blk_protocol) {
case BLKIF_PROTOCOL_NATIVE:
{
struct blkif_sring *sring;
- sring = (struct blkif_sring *)blkif->blk_ring_area->addr;
+ sring = (struct blkif_sring *)blkif->blk_ring;
BACK_RING_INIT(&blkif->blk_rings.native, sring, PAGE_SIZE);
break;
}
case BLKIF_PROTOCOL_X86_32:
{
struct blkif_x86_32_sring *sring_x86_32;
- sring_x86_32 = (struct blkif_x86_32_sring *)blkif->blk_ring_area->addr;
+ sring_x86_32 = (struct blkif_x86_32_sring *)blkif->blk_ring;
BACK_RING_INIT(&blkif->blk_rings.x86_32, sring_x86_32, PAGE_SIZE);
break;
}
case BLKIF_PROTOCOL_X86_64:
{
struct blkif_x86_64_sring *sring_x86_64;
- sring_x86_64 = (struct blkif_x86_64_sring *)blkif->blk_ring_area->addr;
+ sring_x86_64 = (struct blkif_x86_64_sring *)blkif->blk_ring;
BACK_RING_INIT(&blkif->blk_rings.x86_64, sring_x86_64, PAGE_SIZE);
break;
}
@@ -201,8 +165,7 @@ static int xen_blkif_map(struct xen_blkif *blkif, unsigned long shared_page,
xen_blkif_be_int, 0,
"blkif-backend", blkif);
if (err < 0) {
- unmap_frontend_page(blkif);
- free_vm_area(blkif->blk_ring_area);
+ xenbus_unmap_ring_vfree(blkif->be->dev, blkif->blk_ring);
blkif->blk_rings.common.sring = NULL;
return err;
}
@@ -228,8 +191,7 @@ static void xen_blkif_disconnect(struct xen_blkif *blkif)
}
if (blkif->blk_rings.common.sring) {
- unmap_frontend_page(blkif);
- free_vm_area(blkif->blk_ring_area);
+ xenbus_unmap_ring_vfree(blkif->be->dev, blkif->blk_ring);
blkif->blk_rings.common.sring = NULL;
}
}
@@ -272,6 +234,7 @@ VBD_SHOW(oo_req, "%d\n", be->blkif->st_oo_req);
VBD_SHOW(rd_req, "%d\n", be->blkif->st_rd_req);
VBD_SHOW(wr_req, "%d\n", be->blkif->st_wr_req);
VBD_SHOW(f_req, "%d\n", be->blkif->st_f_req);
+VBD_SHOW(ds_req, "%d\n", be->blkif->st_ds_req);
VBD_SHOW(rd_sect, "%d\n", be->blkif->st_rd_sect);
VBD_SHOW(wr_sect, "%d\n", be->blkif->st_wr_sect);
@@ -280,6 +243,7 @@ static struct attribute *xen_vbdstat_attrs[] = {
&dev_attr_rd_req.attr,
&dev_attr_wr_req.attr,
&dev_attr_f_req.attr,
+ &dev_attr_ds_req.attr,
&dev_attr_rd_sect.attr,
&dev_attr_wr_sect.attr,
NULL
@@ -419,6 +383,73 @@ int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt,
return err;
}
+int xen_blkbk_discard(struct xenbus_transaction xbt, struct backend_info *be)
+{
+ struct xenbus_device *dev = be->dev;
+ struct xen_blkif *blkif = be->blkif;
+ char *type;
+ int err;
+ int state = 0;
+
+ type = xenbus_read(XBT_NIL, dev->nodename, "type", NULL);
+ if (!IS_ERR(type)) {
+ if (strncmp(type, "file", 4) == 0) {
+ state = 1;
+ blkif->blk_backend_type = BLKIF_BACKEND_FILE;
+ }
+ if (strncmp(type, "phy", 3) == 0) {
+ struct block_device *bdev = be->blkif->vbd.bdev;
+ struct request_queue *q = bdev_get_queue(bdev);
+ if (blk_queue_discard(q)) {
+ err = xenbus_printf(xbt, dev->nodename,
+ "discard-granularity", "%u",
+ q->limits.discard_granularity);
+ if (err) {
+ xenbus_dev_fatal(dev, err,
+ "writing discard-granularity");
+ goto kfree;
+ }
+ err = xenbus_printf(xbt, dev->nodename,
+ "discard-alignment", "%u",
+ q->limits.discard_alignment);
+ if (err) {
+ xenbus_dev_fatal(dev, err,
+ "writing discard-alignment");
+ goto kfree;
+ }
+ state = 1;
+ blkif->blk_backend_type = BLKIF_BACKEND_PHY;
+ }
+ }
+ } else {
+ err = PTR_ERR(type);
+ xenbus_dev_fatal(dev, err, "reading type");
+ goto out;
+ }
+
+ err = xenbus_printf(xbt, dev->nodename, "feature-discard",
+ "%d", state);
+ if (err)
+ xenbus_dev_fatal(dev, err, "writing feature-discard");
+kfree:
+ kfree(type);
+out:
+ return err;
+}
+int xen_blkbk_barrier(struct xenbus_transaction xbt,
+ struct backend_info *be, int state)
+{
+ struct xenbus_device *dev = be->dev;
+ int err;
+
+ err = xenbus_printf(xbt, dev->nodename, "feature-barrier",
+ "%d", state);
+ if (err)
+ xenbus_dev_fatal(dev, err, "writing feature-barrier");
+
+ return err;
+}
+
/*
* Entry point to this code when a new device is created. Allocate the basic
* structures, and watch the store waiting for the hotplug scripts to tell us
@@ -650,6 +681,11 @@ again:
if (err)
goto abort;
+ err = xen_blkbk_discard(xbt, be);
+
+ /* If we can't advertise it is OK. */
+ err = xen_blkbk_barrier(xbt, be, be->blkif->vbd.flush_support);
+
err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu",
(unsigned long long)vbd_sz(&be->blkif->vbd));
if (err) {
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 9ea8c2576c7..7b2ec590841 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -98,6 +98,9 @@ struct blkfront_info
unsigned long shadow_free;
unsigned int feature_flush;
unsigned int flush_op;
+ unsigned int feature_discard;
+ unsigned int discard_granularity;
+ unsigned int discard_alignment;
int is_ready;
};
@@ -302,29 +305,36 @@ static int blkif_queue_request(struct request *req)
ring_req->operation = info->flush_op;
}
- ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg);
- BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST);
+ if (unlikely(req->cmd_flags & REQ_DISCARD)) {
+ /* id, sector_number and handle are set above. */
+ ring_req->operation = BLKIF_OP_DISCARD;
+ ring_req->nr_segments = 0;
+ ring_req->u.discard.nr_sectors = blk_rq_sectors(req);
+ } else {
+ ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg);
+ BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST);
- for_each_sg(info->sg, sg, ring_req->nr_segments, i) {
- buffer_mfn = pfn_to_mfn(page_to_pfn(sg_page(sg)));
- fsect = sg->offset >> 9;
- lsect = fsect + (sg->length >> 9) - 1;
- /* install a grant reference. */
- ref = gnttab_claim_grant_reference(&gref_head);
- BUG_ON(ref == -ENOSPC);
+ for_each_sg(info->sg, sg, ring_req->nr_segments, i) {
+ buffer_mfn = pfn_to_mfn(page_to_pfn(sg_page(sg)));
+ fsect = sg->offset >> 9;
+ lsect = fsect + (sg->length >> 9) - 1;
+ /* install a grant reference. */
+ ref = gnttab_claim_grant_reference(&gref_head);
+ BUG_ON(ref == -ENOSPC);
- gnttab_grant_foreign_access_ref(
- ref,
- info->xbdev->otherend_id,
- buffer_mfn,
- rq_data_dir(req) );
-
- info->shadow[id].frame[i] = mfn_to_pfn(buffer_mfn);
- ring_req->u.rw.seg[i] =
- (struct blkif_request_segment) {
- .gref = ref,
- .first_sect = fsect,
- .last_sect = lsect };
+ gnttab_grant_foreign_access_ref(
+ ref,
+ info->xbdev->otherend_id,
+ buffer_mfn,
+ rq_data_dir(req));
+
+ info->shadow[id].frame[i] = mfn_to_pfn(buffer_mfn);
+ ring_req->u.rw.seg[i] =
+ (struct blkif_request_segment) {
+ .gref = ref,
+ .first_sect = fsect,
+ .last_sect = lsect };
+ }
}
info->ring.req_prod_pvt++;
@@ -370,7 +380,9 @@ static void do_blkif_request(struct request_queue *rq)
blk_start_request(req);
- if (req->cmd_type != REQ_TYPE_FS) {
+ if ((req->cmd_type != REQ_TYPE_FS) ||
+ ((req->cmd_flags & (REQ_FLUSH | REQ_FUA)) &&
+ !info->flush_op)) {
__blk_end_request_all(req, -EIO);
continue;
}
@@ -399,6 +411,7 @@ wait:
static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
{
struct request_queue *rq;
+ struct blkfront_info *info = gd->private_data;
rq = blk_init_queue(do_blkif_request, &blkif_io_lock);
if (rq == NULL)
@@ -406,6 +419,13 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq);
+ if (info->feature_discard) {
+ queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, rq);
+ blk_queue_max_discard_sectors(rq, get_capacity(gd));
+ rq->limits.discard_granularity = info->discard_granularity;
+ rq->limits.discard_alignment = info->discard_alignment;
+ }
+
/* Hard sector size and max sectors impersonate the equiv. hardware. */
blk_queue_logical_block_size(rq, sector_size);
blk_queue_max_hw_sectors(rq, 512);
@@ -722,6 +742,17 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
error = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO;
switch (bret->operation) {
+ case BLKIF_OP_DISCARD:
+ if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
+ struct request_queue *rq = info->rq;
+ printk(KERN_WARNING "blkfront: %s: discard op failed\n",
+ info->gd->disk_name);
+ error = -EOPNOTSUPP;
+ info->feature_discard = 0;
+ queue_flag_clear(QUEUE_FLAG_DISCARD, rq);
+ }
+ __blk_end_request_all(req, error);
+ break;
case BLKIF_OP_FLUSH_DISKCACHE:
case BLKIF_OP_WRITE_BARRIER:
if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
@@ -1098,6 +1129,33 @@ blkfront_closing(struct blkfront_info *info)
bdput(bdev);
}
+static void blkfront_setup_discard(struct blkfront_info *info)
+{
+ int err;
+ char *type;
+ unsigned int discard_granularity;
+ unsigned int discard_alignment;
+
+ type = xenbus_read(XBT_NIL, info->xbdev->otherend, "type", NULL);
+ if (IS_ERR(type))
+ return;
+
+ if (strncmp(type, "phy", 3) == 0) {
+ err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
+ "discard-granularity", "%u", &discard_granularity,
+ "discard-alignment", "%u", &discard_alignment,
+ NULL);
+ if (!err) {
+ info->feature_discard = 1;
+ info->discard_granularity = discard_granularity;
+ info->discard_alignment = discard_alignment;
+ }
+ } else if (strncmp(type, "file", 4) == 0)
+ info->feature_discard = 1;
+
+ kfree(type);
+}
+
/*
* Invoked when the backend is finally 'ready' (and has told produced
* the details about the physical device - #sectors, size, etc).
@@ -1108,7 +1166,7 @@ static void blkfront_connect(struct blkfront_info *info)
unsigned long sector_size;
unsigned int binfo;
int err;
- int barrier, flush;
+ int barrier, flush, discard;
switch (info->connected) {
case BLKIF_STATE_CONNECTED:
@@ -1178,7 +1236,14 @@ static void blkfront_connect(struct blkfront_info *info)
info->feature_flush = REQ_FLUSH;
info->flush_op = BLKIF_OP_FLUSH_DISKCACHE;
}
-
+
+ err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
+ "feature-discard", "%d", &discard,
+ NULL);
+
+ if (!err && discard)
+ blkfront_setup_discard(info);
+
err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size);
if (err) {
xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s",
@@ -1385,6 +1450,8 @@ static struct xenbus_driver blkfront = {
static int __init xlblk_init(void)
{
+ int ret;
+
if (!xen_domain())
return -ENODEV;
@@ -1394,7 +1461,13 @@ static int __init xlblk_init(void)
return -ENODEV;
}
- return xenbus_register_frontend(&blkfront);
+ ret = xenbus_register_frontend(&blkfront);
+ if (ret) {
+ unregister_blkdev(XENVBD_MAJOR, DEV_NAME);
+ return ret;
+ }
+
+ return 0;
}
module_init(xlblk_init);
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index 11b41fd40c2..5ccf142ef0b 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -188,7 +188,7 @@ config BT_MRVL
The core driver to support Marvell Bluetooth devices.
This driver is required if you want to support
- Marvell Bluetooth devices, such as 8688/8787.
+ Marvell Bluetooth devices, such as 8688/8787/8797.
Say Y here to compile Marvell Bluetooth driver
into the kernel or say M to compile it as module.
@@ -201,8 +201,8 @@ config BT_MRVL_SDIO
The driver for Marvell Bluetooth chipsets with SDIO interface.
This driver is required if you want to use Marvell Bluetooth
- devices with SDIO interface. Currently SD8688/SD8787 chipsets are
- supported.
+ devices with SDIO interface. Currently SD8688/SD8787/SD8797
+ chipsets are supported.
Say Y here to compile support for Marvell BT-over-SDIO driver
into the kernel or say M to compile it as module.
diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c
index 548d1d9e4dd..a88a78c8616 100644
--- a/drivers/bluetooth/btmrvl_main.c
+++ b/drivers/bluetooth/btmrvl_main.c
@@ -18,6 +18,8 @@
* this warranty disclaimer.
**/
+#include <linux/module.h>
+
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index c827d737cce..27b74b0d547 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -23,6 +23,7 @@
#include <linux/mmc/sdio_ids.h>
#include <linux/mmc/sdio_func.h>
+#include <linux/module.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
@@ -64,7 +65,7 @@ static const struct btmrvl_sdio_card_reg btmrvl_reg_8688 = {
.io_port_1 = 0x01,
.io_port_2 = 0x02,
};
-static const struct btmrvl_sdio_card_reg btmrvl_reg_8787 = {
+static const struct btmrvl_sdio_card_reg btmrvl_reg_87xx = {
.cfg = 0x00,
.host_int_mask = 0x02,
.host_intstatus = 0x03,
@@ -91,7 +92,14 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8688 = {
static const struct btmrvl_sdio_device btmrvl_sdio_sd8787 = {
.helper = NULL,
.firmware = "mrvl/sd8787_uapsta.bin",
- .reg = &btmrvl_reg_8787,
+ .reg = &btmrvl_reg_87xx,
+ .sd_blksz_fw_dl = 256,
+};
+
+static const struct btmrvl_sdio_device btmrvl_sdio_sd8797 = {
+ .helper = NULL,
+ .firmware = "mrvl/sd8797_uapsta.bin",
+ .reg = &btmrvl_reg_87xx,
.sd_blksz_fw_dl = 256,
};
@@ -102,6 +110,9 @@ static const struct sdio_device_id btmrvl_sdio_ids[] = {
/* Marvell SD8787 Bluetooth device */
{ SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x911A),
.driver_data = (unsigned long) &btmrvl_sdio_sd8787 },
+ /* Marvell SD8797 Bluetooth device */
+ { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x912A),
+ .driver_data = (unsigned long) &btmrvl_sdio_sd8797 },
{ } /* Terminating entry */
};
@@ -1075,3 +1086,4 @@ MODULE_LICENSE("GPL v2");
MODULE_FIRMWARE("sd8688_helper.bin");
MODULE_FIRMWARE("sd8688.bin");
MODULE_FIRMWARE("mrvl/sd8787_uapsta.bin");
+MODULE_FIRMWARE("mrvl/sd8797_uapsta.bin");
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 2bd87d45f1c..ea5ad1cbbd3 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -785,9 +785,8 @@ skip_waking:
usb_mark_last_busy(data->udev);
}
- usb_free_urb(urb);
-
done:
+ usb_free_urb(urb);
return err;
}
diff --git a/drivers/bluetooth/btwilink.c b/drivers/bluetooth/btwilink.c
index 04d353f58d7..b5f83b44a0c 100644
--- a/drivers/bluetooth/btwilink.c
+++ b/drivers/bluetooth/btwilink.c
@@ -29,6 +29,7 @@
#include <net/bluetooth/hci.h>
#include <linux/ti_wilink_st.h>
+#include <linux/module.h>
/* Bluetooth Driver Version */
#define VERSION "1.0"
diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
index 2e302a11ab5..2ed6ab1c6e1 100644
--- a/drivers/bluetooth/hci_vhci.c
+++ b/drivers/bluetooth/hci_vhci.c
@@ -41,6 +41,8 @@
#define VERSION "1.3"
+static bool amp;
+
struct vhci_data {
struct hci_dev *hdev;
@@ -239,6 +241,9 @@ static int vhci_open(struct inode *inode, struct file *file)
hdev->bus = HCI_VIRTUAL;
hdev->driver_data = data;
+ if (amp)
+ hdev->dev_type = HCI_AMP;
+
hdev->open = vhci_open_dev;
hdev->close = vhci_close_dev;
hdev->flush = vhci_flush;
@@ -303,6 +308,9 @@ static void __exit vhci_exit(void)
module_init(vhci_init);
module_exit(vhci_exit);
+module_param(amp, bool, 0644);
+MODULE_PARM_DESC(amp, "Create AMP controller device");
+
MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
MODULE_DESCRIPTION("Bluetooth virtual HCI driver ver " VERSION);
MODULE_VERSION(VERSION);
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 423fd56bf61..43643033a3a 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -298,7 +298,7 @@ if RTC_LIB=n
config RTC
tristate "Enhanced Real Time Clock Support (legacy PC RTC driver)"
depends on !PPC && !PARISC && !IA64 && !M68K && !SPARC && !FRV \
- && !ARM && !SUPERH && !S390 && !AVR32 && !BLACKFIN
+ && !ARM && !SUPERH && !S390 && !AVR32 && !BLACKFIN && !UML
---help---
If you say Y here and create a character special file /dev/rtc with
major number 10 and minor number 135 using mknod ("man mknod"), you
@@ -346,7 +346,7 @@ config JS_RTC
config GEN_RTC
tristate "Generic /dev/rtc emulation"
- depends on RTC!=y && !IA64 && !ARM && !M32R && !MIPS && !SPARC && !FRV && !S390 && !SUPERH && !AVR32 && !BLACKFIN
+ depends on RTC!=y && !IA64 && !ARM && !M32R && !MIPS && !SPARC && !FRV && !S390 && !SUPERH && !AVR32 && !BLACKFIN && !UML
---help---
If you say Y here and create a character special file /dev/rtc with
major number 10 and minor number 135 using mknod ("man mknod"), you
@@ -490,7 +490,7 @@ config SCx200_GPIO
config PC8736x_GPIO
tristate "NatSemi PC8736x GPIO Support"
- depends on X86_32
+ depends on X86_32 && !UML
default SCx200_GPIO # mostly N
select NSC_GPIO # needed for support routines
help
diff --git a/drivers/char/agp/hp-agp.c b/drivers/char/agp/hp-agp.c
index 056b289a1e8..3695773ce7c 100644
--- a/drivers/char/agp/hp-agp.c
+++ b/drivers/char/agp/hp-agp.c
@@ -336,7 +336,8 @@ hp_zx1_insert_memory (struct agp_memory *mem, off_t pg_start, int type)
off_t j, io_pg_start;
int io_pg_count;
- if (type != 0 || mem->type != 0) {
+ if (type != mem->type ||
+ agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type)) {
return -EINVAL;
}
@@ -380,7 +381,8 @@ hp_zx1_remove_memory (struct agp_memory *mem, off_t pg_start, int type)
struct _hp_private *hp = &hp_private;
int i, io_pg_start, io_pg_count;
- if (type != 0 || mem->type != 0) {
+ if (type != mem->type ||
+ agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type)) {
return -EINVAL;
}
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index 2774ac1086d..c92424ca1a5 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -923,6 +923,9 @@ static int intel_fake_agp_insert_entries(struct agp_memory *mem,
{
int ret = -EINVAL;
+ if (intel_private.base.do_idle_maps)
+ return -ENODEV;
+
if (intel_private.clear_fake_agp) {
int start = intel_private.base.stolen_size / PAGE_SIZE;
int end = intel_private.base.gtt_mappable_entries;
@@ -985,6 +988,9 @@ static int intel_fake_agp_remove_entries(struct agp_memory *mem,
if (mem->page_count == 0)
return 0;
+ if (intel_private.base.do_idle_maps)
+ return -ENODEV;
+
intel_gtt_clear_range(pg_start, mem->page_count);
if (intel_private.base.needs_dmar) {
@@ -1177,6 +1183,26 @@ static void gen6_cleanup(void)
{
}
+/* Certain Gen5 chipsets require require idling the GPU before
+ * unmapping anything from the GTT when VT-d is enabled.
+ */
+static inline int needs_idle_maps(void)
+{
+#ifdef CONFIG_INTEL_IOMMU
+ const unsigned short gpu_devid = intel_private.pcidev->device;
+ extern int intel_iommu_gfx_mapped;
+
+ /* Query intel_iommu to see if we need the workaround. Presumably that
+ * was loaded first.
+ */
+ if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB ||
+ gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) &&
+ intel_iommu_gfx_mapped)
+ return 1;
+#endif
+ return 0;
+}
+
static int i9xx_setup(void)
{
u32 reg_addr;
@@ -1211,6 +1237,9 @@ static int i9xx_setup(void)
intel_private.gtt_bus_addr = reg_addr + gtt_offset;
}
+ if (needs_idle_maps())
+ intel_private.base.do_idle_maps = 1;
+
intel_i9xx_setup_flush();
return 0;
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 1d2ebc7a494..0689bf6b018 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -60,6 +60,19 @@ config HW_RANDOM_AMD
If unsure, say Y.
+config HW_RANDOM_ATMEL
+ tristate "Atmel Random Number Generator support"
+ depends on HW_RANDOM && ARCH_AT91SAM9G45
+ default HW_RANDOM
+ ---help---
+ This driver provides kernel-side support for the Random Number
+ Generator hardware found on Atmel AT91 devices.
+
+ To compile this driver as a module, choose M here: the
+ module will be called atmel-rng.
+
+ If unsure, say Y.
+
config HW_RANDOM_GEODE
tristate "AMD Geode HW Random Number Generator support"
depends on HW_RANDOM && X86_32 && PCI
@@ -222,3 +235,18 @@ config HW_RANDOM_PPC4XX
module will be called ppc4xx-rng.
If unsure, say N.
+
+config UML_RANDOM
+ depends on UML
+ tristate "Hardware random number generator"
+ help
+ This option enables UML's "hardware" random number generator. It
+ attaches itself to the host's /dev/random, supplying as much entropy
+ as the host has, rather than the small amount the UML gets from its
+ own drivers. It registers itself as a standard hardware random number
+ generator, major 10, minor 183, and the canonical device name is
+ /dev/hwrng.
+ The way to make use of this is to install the rng-tools package
+ (check your distro, or download from
+ http://sourceforge.net/projects/gkernel/). rngd periodically reads
+ /dev/hwrng and injects the entropy into /dev/random.
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index c88f244c8a7..b2ff5265a99 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -7,6 +7,7 @@ rng-core-y := core.o
obj-$(CONFIG_HW_RANDOM_TIMERIOMEM) += timeriomem-rng.o
obj-$(CONFIG_HW_RANDOM_INTEL) += intel-rng.o
obj-$(CONFIG_HW_RANDOM_AMD) += amd-rng.o
+obj-$(CONFIG_HW_RANDOM_ATMEL) += atmel-rng.o
obj-$(CONFIG_HW_RANDOM_GEODE) += geode-rng.o
obj-$(CONFIG_HW_RANDOM_N2RNG) += n2-rng.o
n2-rng-y := n2-drv.o n2-asm.o
diff --git a/drivers/char/hw_random/atmel-rng.c b/drivers/char/hw_random/atmel-rng.c
new file mode 100644
index 00000000000..241df2e76ab
--- /dev/null
+++ b/drivers/char/hw_random/atmel-rng.c
@@ -0,0 +1,158 @@
+/*
+ * Copyright (c) 2011 Peter Korsgaard <jacmet@sunsite.dk>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/hw_random.h>
+#include <linux/platform_device.h>
+
+#define TRNG_CR 0x00
+#define TRNG_ISR 0x1c
+#define TRNG_ODATA 0x50
+
+#define TRNG_KEY 0x524e4700 /* RNG */
+
+struct atmel_trng {
+ struct clk *clk;
+ void __iomem *base;
+ struct hwrng rng;
+};
+
+static int atmel_trng_read(struct hwrng *rng, void *buf, size_t max,
+ bool wait)
+{
+ struct atmel_trng *trng = container_of(rng, struct atmel_trng, rng);
+ u32 *data = buf;
+
+ /* data ready? */
+ if (readl(trng->base + TRNG_ODATA) & 1) {
+ *data = readl(trng->base + TRNG_ODATA);
+ return 4;
+ } else
+ return 0;
+}
+
+static int atmel_trng_probe(struct platform_device *pdev)
+{
+ struct atmel_trng *trng;
+ struct resource *res;
+ int ret;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
+
+ trng = devm_kzalloc(&pdev->dev, sizeof(*trng), GFP_KERNEL);
+ if (!trng)
+ return -ENOMEM;
+
+ if (!devm_request_mem_region(&pdev->dev, res->start,
+ resource_size(res), pdev->name))
+ return -EBUSY;
+
+ trng->base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (!trng->base)
+ return -EBUSY;
+
+ trng->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(trng->clk))
+ return PTR_ERR(trng->clk);
+
+ ret = clk_enable(trng->clk);
+ if (ret)
+ goto err_enable;
+
+ writel(TRNG_KEY | 1, trng->base + TRNG_CR);
+ trng->rng.name = pdev->name;
+ trng->rng.read = atmel_trng_read;
+
+ ret = hwrng_register(&trng->rng);
+ if (ret)
+ goto err_register;
+
+ platform_set_drvdata(pdev, trng);
+
+ return 0;
+
+err_register:
+ clk_disable(trng->clk);
+err_enable:
+ clk_put(trng->clk);
+
+ return ret;
+}
+
+static int __devexit atmel_trng_remove(struct platform_device *pdev)
+{
+ struct atmel_trng *trng = platform_get_drvdata(pdev);
+
+ hwrng_unregister(&trng->rng);
+
+ writel(TRNG_KEY, trng->base + TRNG_CR);
+ clk_disable(trng->clk);
+ clk_put(trng->clk);
+
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int atmel_trng_suspend(struct device *dev)
+{
+ struct atmel_trng *trng = dev_get_drvdata(dev);
+
+ clk_disable(trng->clk);
+
+ return 0;
+}
+
+static int atmel_trng_resume(struct device *dev)
+{
+ struct atmel_trng *trng = dev_get_drvdata(dev);
+
+ return clk_enable(trng->clk);
+}
+
+static const struct dev_pm_ops atmel_trng_pm_ops = {
+ .suspend = atmel_trng_suspend,
+ .resume = atmel_trng_resume,
+};
+#endif /* CONFIG_PM */
+
+static struct platform_driver atmel_trng_driver = {
+ .probe = atmel_trng_probe,
+ .remove = __devexit_p(atmel_trng_remove),
+ .driver = {
+ .name = "atmel-trng",
+ .owner = THIS_MODULE,
+#ifdef CONFIG_PM
+ .pm = &atmel_trng_pm_ops,
+#endif /* CONFIG_PM */
+ },
+};
+
+static int __init atmel_trng_init(void)
+{
+ return platform_driver_register(&atmel_trng_driver);
+}
+module_init(atmel_trng_init);
+
+static void __exit atmel_trng_exit(void)
+{
+ platform_driver_unregister(&atmel_trng_driver);
+}
+module_exit(atmel_trng_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Peter Korsgaard <jacmet@sunsite.dk>");
+MODULE_DESCRIPTION("Atmel true random number generator driver");
diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c
index 75f1cbd61c1..fd699ccecf5 100644
--- a/drivers/char/hw_random/virtio-rng.c
+++ b/drivers/char/hw_random/virtio-rng.c
@@ -23,6 +23,7 @@
#include <linux/spinlock.h>
#include <linux/virtio.h>
#include <linux/virtio_rng.h>
+#include <linux/module.h>
static struct virtqueue *vq;
static unsigned int data_avail;
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 8fc04b4f311..14517903371 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -26,6 +26,7 @@
#include <linux/bootmem.h>
#include <linux/splice.h>
#include <linux/pfn.h>
+#include <linux/export.h>
#include <asm/uaccess.h>
#include <asm/io.h>
diff --git a/drivers/char/ps3flash.c b/drivers/char/ps3flash.c
index d0c57c2e290..6abdde4da2b 100644
--- a/drivers/char/ps3flash.c
+++ b/drivers/char/ps3flash.c
@@ -22,6 +22,7 @@
#include <linux/miscdevice.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
+#include <linux/module.h>
#include <asm/lv1call.h>
#include <asm/ps3stor.h>
diff --git a/drivers/char/ramoops.c b/drivers/char/ramoops.c
index 810aff9e750..7c7f42a1f88 100644
--- a/drivers/char/ramoops.c
+++ b/drivers/char/ramoops.c
@@ -26,6 +26,7 @@
#include <linux/module.h>
#include <linux/kmsg_dump.h>
#include <linux/time.h>
+#include <linux/err.h>
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/platform_device.h>
diff --git a/drivers/char/random.c b/drivers/char/random.c
index c35a785005b..63e19ba56bb 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -932,7 +932,21 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
*/
void get_random_bytes(void *buf, int nbytes)
{
- extract_entropy(&nonblocking_pool, buf, nbytes, 0, 0);
+ char *p = buf;
+
+ while (nbytes) {
+ unsigned long v;
+ int chunk = min(nbytes, (int)sizeof(unsigned long));
+
+ if (!arch_get_random_long(&v))
+ break;
+
+ memcpy(buf, &v, chunk);
+ p += chunk;
+ nbytes -= chunk;
+ }
+
+ extract_entropy(&nonblocking_pool, p, nbytes, 0, 0);
}
EXPORT_SYMBOL(get_random_bytes);
@@ -1318,9 +1332,14 @@ late_initcall(random_int_secret_init);
DEFINE_PER_CPU(__u32 [MD5_DIGEST_WORDS], get_random_int_hash);
unsigned int get_random_int(void)
{
- __u32 *hash = get_cpu_var(get_random_int_hash);
+ __u32 *hash;
unsigned int ret;
+ if (arch_get_random_int(&ret))
+ return ret;
+
+ hash = get_cpu_var(get_random_int_hash);
+
hash[0] += current->pid + jiffies + get_cycles();
md5_transform(hash, random_int_secret);
ret = hash[0];
diff --git a/drivers/char/ttyprintk.c b/drivers/char/ttyprintk.c
index a1f68af4ccf..eedd5474850 100644
--- a/drivers/char/ttyprintk.c
+++ b/drivers/char/ttyprintk.c
@@ -17,6 +17,7 @@
#include <linux/device.h>
#include <linux/serial.h>
#include <linux/tty.h>
+#include <linux/export.h>
struct ttyprintk_port {
struct tty_port port;
@@ -170,7 +171,7 @@ static const struct tty_operations ttyprintk_ops = {
.ioctl = tpk_ioctl,
};
-struct tty_port_operations null_ops = { };
+static struct tty_port_operations null_ops = { };
static struct tty_driver *ttyprintk_driver;
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index fb68b129537..8e3c46d67cb 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -19,8 +19,10 @@
*/
#include <linux/cdev.h>
#include <linux/debugfs.h>
+#include <linux/completion.h>
#include <linux/device.h>
#include <linux/err.h>
+#include <linux/freezer.h>
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/list.h>
@@ -32,6 +34,7 @@
#include <linux/virtio_console.h>
#include <linux/wait.h>
#include <linux/workqueue.h>
+#include <linux/module.h>
#include "../tty/hvc/hvc_console.h"
/*
@@ -73,6 +76,7 @@ struct ports_driver_data {
static struct ports_driver_data pdrvdata;
DEFINE_SPINLOCK(pdrvdata_lock);
+DECLARE_COMPLETION(early_console_added);
/* This struct holds information that's relevant only for console ports */
struct console {
@@ -151,6 +155,10 @@ struct ports_device {
int chr_major;
};
+struct port_stats {
+ unsigned long bytes_sent, bytes_received, bytes_discarded;
+};
+
/* This struct holds the per-port data */
struct port {
/* Next port in the list, head is in the ports_device */
@@ -179,6 +187,13 @@ struct port {
struct dentry *debugfs_file;
/*
+ * Keep count of the bytes sent, received and discarded for
+ * this port for accounting and debugging purposes. These
+ * counts are not reset across port open / close events.
+ */
+ struct port_stats stats;
+
+ /*
* The entries in this struct will be valid if this port is
* hooked up to an hvc console
*/
@@ -347,17 +362,19 @@ fail:
}
/* Callers should take appropriate locks */
-static void *get_inbuf(struct port *port)
+static struct port_buffer *get_inbuf(struct port *port)
{
struct port_buffer *buf;
- struct virtqueue *vq;
unsigned int len;
- vq = port->in_vq;
- buf = virtqueue_get_buf(vq, &len);
+ if (port->inbuf)
+ return port->inbuf;
+
+ buf = virtqueue_get_buf(port->in_vq, &len);
if (buf) {
buf->len = len;
buf->offset = 0;
+ port->stats.bytes_received += len;
}
return buf;
}
@@ -384,32 +401,27 @@ static int add_inbuf(struct virtqueue *vq, struct port_buffer *buf)
static void discard_port_data(struct port *port)
{
struct port_buffer *buf;
- struct virtqueue *vq;
- unsigned int len;
- int ret;
+ unsigned int err;
if (!port->portdev) {
/* Device has been unplugged. vqs are already gone. */
return;
}
- vq = port->in_vq;
- if (port->inbuf)
- buf = port->inbuf;
- else
- buf = virtqueue_get_buf(vq, &len);
+ buf = get_inbuf(port);
- ret = 0;
+ err = 0;
while (buf) {
- if (add_inbuf(vq, buf) < 0) {
- ret++;
+ port->stats.bytes_discarded += buf->len - buf->offset;
+ if (add_inbuf(port->in_vq, buf) < 0) {
+ err++;
free_buf(buf);
}
- buf = virtqueue_get_buf(vq, &len);
+ port->inbuf = NULL;
+ buf = get_inbuf(port);
}
- port->inbuf = NULL;
- if (ret)
+ if (err)
dev_warn(port->dev, "Errors adding %d buffers back to vq\n",
- ret);
+ err);
}
static bool port_has_data(struct port *port)
@@ -417,18 +429,12 @@ static bool port_has_data(struct port *port)
unsigned long flags;
bool ret;
+ ret = false;
spin_lock_irqsave(&port->inbuf_lock, flags);
- if (port->inbuf) {
- ret = true;
- goto out;
- }
port->inbuf = get_inbuf(port);
- if (port->inbuf) {
+ if (port->inbuf)
ret = true;
- goto out;
- }
- ret = false;
-out:
+
spin_unlock_irqrestore(&port->inbuf_lock, flags);
return ret;
}
@@ -529,6 +535,8 @@ static ssize_t send_buf(struct port *port, void *in_buf, size_t in_count,
cpu_relax();
done:
spin_unlock_irqrestore(&port->outvq_lock, flags);
+
+ port->stats.bytes_sent += in_count;
/*
* We're expected to return the amount of data we wrote -- all
* of it
@@ -633,8 +641,8 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
if (filp->f_flags & O_NONBLOCK)
return -EAGAIN;
- ret = wait_event_interruptible(port->waitqueue,
- !will_read_block(port));
+ ret = wait_event_freezable(port->waitqueue,
+ !will_read_block(port));
if (ret < 0)
return ret;
}
@@ -677,8 +685,8 @@ static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
if (nonblock)
return -EAGAIN;
- ret = wait_event_interruptible(port->waitqueue,
- !will_write_block(port));
+ ret = wait_event_freezable(port->waitqueue,
+ !will_write_block(port));
if (ret < 0)
return ret;
}
@@ -1059,6 +1067,14 @@ static ssize_t debugfs_read(struct file *filp, char __user *ubuf,
out_offset += snprintf(buf + out_offset, out_count - out_offset,
"outvq_full: %d\n", port->outvq_full);
out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ "bytes_sent: %lu\n", port->stats.bytes_sent);
+ out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ "bytes_received: %lu\n",
+ port->stats.bytes_received);
+ out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ "bytes_discarded: %lu\n",
+ port->stats.bytes_discarded);
+ out_offset += snprintf(buf + out_offset, out_count - out_offset,
"is_console: %s\n",
is_console_port(port) ? "yes" : "no");
out_offset += snprintf(buf + out_offset, out_count - out_offset,
@@ -1143,6 +1159,7 @@ static int add_port(struct ports_device *portdev, u32 id)
port->cons.ws.ws_row = port->cons.ws.ws_col = 0;
port->host_connected = port->guest_connected = false;
+ port->stats = (struct port_stats) { 0 };
port->outvq_full = false;
@@ -1352,6 +1369,7 @@ static void handle_control_message(struct ports_device *portdev,
break;
init_port_console(port);
+ complete(&early_console_added);
/*
* Could remove the port here in case init fails - but
* have to notify the host first.
@@ -1394,6 +1412,13 @@ static void handle_control_message(struct ports_device *portdev,
break;
case VIRTIO_CONSOLE_PORT_NAME:
/*
+ * If we woke up after hibernation, we can get this
+ * again. Skip it in that case.
+ */
+ if (port->name)
+ break;
+
+ /*
* Skip the size of the header and the cpkt to get the size
* of the name that was sent
*/
@@ -1481,8 +1506,7 @@ static void in_intr(struct virtqueue *vq)
return;
spin_lock_irqsave(&port->inbuf_lock, flags);
- if (!port->inbuf)
- port->inbuf = get_inbuf(port);
+ port->inbuf = get_inbuf(port);
/*
* Don't queue up data when port is closed. This condition
@@ -1563,7 +1587,7 @@ static int init_vqs(struct ports_device *portdev)
portdev->out_vqs = kmalloc(nr_ports * sizeof(struct virtqueue *),
GFP_KERNEL);
if (!vqs || !io_callbacks || !io_names || !portdev->in_vqs ||
- !portdev->out_vqs) {
+ !portdev->out_vqs) {
err = -ENOMEM;
goto free;
}
@@ -1648,6 +1672,10 @@ static int __devinit virtcons_probe(struct virtio_device *vdev)
struct ports_device *portdev;
int err;
bool multiport;
+ bool early = early_put_chars != NULL;
+
+ /* Ensure to read early_put_chars now */
+ barrier();
portdev = kmalloc(sizeof(*portdev), GFP_KERNEL);
if (!portdev) {
@@ -1675,13 +1703,11 @@ static int __devinit virtcons_probe(struct virtio_device *vdev)
multiport = false;
portdev->config.max_nr_ports = 1;
- if (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_MULTIPORT)) {
+ if (virtio_config_val(vdev, VIRTIO_CONSOLE_F_MULTIPORT,
+ offsetof(struct virtio_console_config,
+ max_nr_ports),
+ &portdev->config.max_nr_ports) == 0)
multiport = true;
- vdev->config->get(vdev, offsetof(struct virtio_console_config,
- max_nr_ports),
- &portdev->config.max_nr_ports,
- sizeof(portdev->config.max_nr_ports));
- }
err = init_vqs(portdev);
if (err < 0) {
@@ -1719,6 +1745,19 @@ static int __devinit virtcons_probe(struct virtio_device *vdev)
__send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID,
VIRTIO_CONSOLE_DEVICE_READY, 1);
+
+ /*
+ * If there was an early virtio console, assume that there are no
+ * other consoles. We need to wait until the hvc_alloc matches the
+ * hvc_instantiate, otherwise tty_open will complain, resulting in
+ * a "Warning: unable to open an initial console" boot failure.
+ * Without multiport this is done in add_port above. With multiport
+ * this might take some host<->guest communication - thus we have to
+ * wait.
+ */
+ if (multiport && early)
+ wait_for_completion(&early_console_added);
+
return 0;
free_vqs:
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 34e9c4f8892..999d6a03e43 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -15,3 +15,18 @@ config CLKSRC_MMIO
config DW_APB_TIMER
bool
+
+config CLKSRC_DBX500_PRCMU
+ bool "Clocksource PRCMU Timer"
+ depends on UX500_SOC_DB5500 || UX500_SOC_DB8500
+ default y
+ help
+ Use the always on PRCMU Timer as clocksource
+
+config CLKSRC_DBX500_PRCMU_SCHED_CLOCK
+ bool "Clocksource PRCMU Timer sched_clock"
+ depends on (CLKSRC_DBX500_PRCMU && !NOMADIK_MTU_SCHED_CLOCK)
+ select HAVE_SCHED_CLOCK
+ default y
+ help
+ Use the always on PRCMU Timer as sched_clock
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index 85ad1646a7b..8d81a1d3265 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -9,3 +9,4 @@ obj-$(CONFIG_SH_TIMER_TMU) += sh_tmu.o
obj-$(CONFIG_CLKBLD_I8253) += i8253.o
obj-$(CONFIG_CLKSRC_MMIO) += mmio.o
obj-$(CONFIG_DW_APB_TIMER) += dw_apb_timer.o
+obj-$(CONFIG_CLKSRC_DBX500_PRCMU) += clksrc-dbx500-prcmu.o \ No newline at end of file
diff --git a/drivers/clocksource/clksrc-dbx500-prcmu.c b/drivers/clocksource/clksrc-dbx500-prcmu.c
new file mode 100644
index 00000000000..59feefe0e3e
--- /dev/null
+++ b/drivers/clocksource/clksrc-dbx500-prcmu.c
@@ -0,0 +1,106 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * License Terms: GNU General Public License v2
+ * Author: Mattias Wallin <mattias.wallin@stericsson.com> for ST-Ericsson
+ * Author: Sundar Iyer for ST-Ericsson
+ * sched_clock implementation is based on:
+ * plat-nomadik/timer.c Linus Walleij <linus.walleij@stericsson.com>
+ *
+ * DBx500-PRCMU Timer
+ * The PRCMU has 5 timers which are available in a always-on
+ * power domain. We use the Timer 4 for our always-on clock
+ * source on DB8500 and Timer 3 on DB5500.
+ */
+#include <linux/clockchips.h>
+#include <linux/clksrc-dbx500-prcmu.h>
+
+#include <asm/sched_clock.h>
+
+#include <mach/setup.h>
+#include <mach/hardware.h>
+
+#define RATE_32K 32768
+
+#define TIMER_MODE_CONTINOUS 0x1
+#define TIMER_DOWNCOUNT_VAL 0xffffffff
+
+#define PRCMU_TIMER_REF 0
+#define PRCMU_TIMER_DOWNCOUNT 0x4
+#define PRCMU_TIMER_MODE 0x8
+
+#define SCHED_CLOCK_MIN_WRAP 131072 /* 2^32 / 32768 */
+
+static void __iomem *clksrc_dbx500_timer_base;
+
+static cycle_t clksrc_dbx500_prcmu_read(struct clocksource *cs)
+{
+ u32 count, count2;
+
+ do {
+ count = readl(clksrc_dbx500_timer_base +
+ PRCMU_TIMER_DOWNCOUNT);
+ count2 = readl(clksrc_dbx500_timer_base +
+ PRCMU_TIMER_DOWNCOUNT);
+ } while (count2 != count);
+
+ /* Negate because the timer is a decrementing counter */
+ return ~count;
+}
+
+static struct clocksource clocksource_dbx500_prcmu = {
+ .name = "dbx500-prcmu-timer",
+ .rating = 300,
+ .read = clksrc_dbx500_prcmu_read,
+ .shift = 10,
+ .mask = CLOCKSOURCE_MASK(32),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+#ifdef CONFIG_CLKSRC_DBX500_PRCMU_SCHED_CLOCK
+static DEFINE_CLOCK_DATA(cd);
+
+unsigned long long notrace sched_clock(void)
+{
+ u32 cyc;
+
+ if (unlikely(!clksrc_dbx500_timer_base))
+ return 0;
+
+ cyc = clksrc_dbx500_prcmu_read(&clocksource_dbx500_prcmu);
+
+ return cyc_to_sched_clock(&cd, cyc, (u32)~0);
+}
+
+static void notrace clksrc_dbx500_prcmu_update_sched_clock(void)
+{
+ u32 cyc = clksrc_dbx500_prcmu_read(&clocksource_dbx500_prcmu);
+ update_sched_clock(&cd, cyc, (u32)~0);
+}
+#endif
+
+void __init clksrc_dbx500_prcmu_init(void __iomem *base)
+{
+ clksrc_dbx500_timer_base = base;
+
+ /*
+ * The A9 sub system expects the timer to be configured as
+ * a continous looping timer.
+ * The PRCMU should configure it but if it for some reason
+ * don't we do it here.
+ */
+ if (readl(clksrc_dbx500_timer_base + PRCMU_TIMER_MODE) !=
+ TIMER_MODE_CONTINOUS) {
+ writel(TIMER_MODE_CONTINOUS,
+ clksrc_dbx500_timer_base + PRCMU_TIMER_MODE);
+ writel(TIMER_DOWNCOUNT_VAL,
+ clksrc_dbx500_timer_base + PRCMU_TIMER_REF);
+ }
+#ifdef CONFIG_CLKSRC_DBX500_PRCMU_SCHED_CLOCK
+ init_sched_clock(&cd, clksrc_dbx500_prcmu_update_sched_clock,
+ 32, RATE_32K);
+#endif
+ clocksource_calc_mult_shift(&clocksource_dbx500_prcmu,
+ RATE_32K, SCHED_CLOCK_MIN_WRAP);
+ clocksource_register(&clocksource_dbx500_prcmu);
+}
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index 32a77becc09..ca09bc421dd 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -31,6 +31,7 @@
#include <linux/clockchips.h>
#include <linux/sh_timer.h>
#include <linux/slab.h>
+#include <linux/module.h>
struct sh_cmt_priv {
void __iomem *mapbase;
diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c
index 40630cb9823..db8d5955bad 100644
--- a/drivers/clocksource/sh_mtu2.c
+++ b/drivers/clocksource/sh_mtu2.c
@@ -30,6 +30,7 @@
#include <linux/clockchips.h>
#include <linux/sh_timer.h>
#include <linux/slab.h>
+#include <linux/module.h>
struct sh_mtu2_priv {
void __iomem *mapbase;
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c
index 80813576861..079e96ad44e 100644
--- a/drivers/clocksource/sh_tmu.c
+++ b/drivers/clocksource/sh_tmu.c
@@ -31,6 +31,7 @@
#include <linux/clockchips.h>
#include <linux/sh_timer.h>
#include <linux/slab.h>
+#include <linux/module.h>
struct sh_tmu_priv {
void __iomem *mapbase;
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index faf7c521784..c5072a91e84 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -15,6 +15,7 @@
#include <linux/cpu.h>
#include <linux/sysfs.h>
#include <linux/cpufreq.h>
+#include <linux/module.h>
#include <linux/jiffies.h>
#include <linux/percpu.h>
#include <linux/kobject.h>
diff --git a/drivers/cpufreq/db8500-cpufreq.c b/drivers/cpufreq/db8500-cpufreq.c
index d90456a809f..f5002015d82 100644
--- a/drivers/cpufreq/db8500-cpufreq.c
+++ b/drivers/cpufreq/db8500-cpufreq.c
@@ -12,30 +12,35 @@
#include <linux/cpufreq.h>
#include <linux/delay.h>
#include <linux/slab.h>
-#include <linux/mfd/db8500-prcmu.h>
+#include <linux/mfd/dbx500-prcmu.h>
#include <mach/id.h>
static struct cpufreq_frequency_table freq_table[] = {
[0] = {
.index = 0,
- .frequency = 300000,
+ .frequency = 200000,
},
[1] = {
.index = 1,
- .frequency = 600000,
+ .frequency = 300000,
},
[2] = {
- /* Used for MAX_OPP, if available */
.index = 2,
- .frequency = CPUFREQ_TABLE_END,
+ .frequency = 600000,
},
[3] = {
+ /* Used for MAX_OPP, if available */
.index = 3,
.frequency = CPUFREQ_TABLE_END,
},
+ [4] = {
+ .index = 4,
+ .frequency = CPUFREQ_TABLE_END,
+ },
};
static enum arm_opp idx2opp[] = {
+ ARM_EXTCLK,
ARM_50_OPP,
ARM_100_OPP,
ARM_MAX_OPP
@@ -72,13 +77,13 @@ static int db8500_cpufreq_target(struct cpufreq_policy *policy,
freqs.old = policy->cur;
freqs.new = freq_table[idx].frequency;
- freqs.cpu = policy->cpu;
if (freqs.old == freqs.new)
return 0;
/* pre-change notification */
- cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+ for_each_cpu(freqs.cpu, policy->cpus)
+ cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
/* request the PRCM unit for opp change */
if (prcmu_set_arm_opp(idx2opp[idx])) {
@@ -87,7 +92,8 @@ static int db8500_cpufreq_target(struct cpufreq_policy *policy,
}
/* post change notification */
- cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+ for_each_cpu(freqs.cpu, policy->cpus)
+ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
return 0;
}
@@ -103,17 +109,19 @@ static unsigned int db8500_cpufreq_getspeed(unsigned int cpu)
static int __cpuinit db8500_cpufreq_init(struct cpufreq_policy *policy)
{
- int res;
- int i;
+ int i, res;
BUILD_BUG_ON(ARRAY_SIZE(idx2opp) + 1 != ARRAY_SIZE(freq_table));
- if (cpu_is_u8500v2() && !prcmu_is_u8400()) {
- freq_table[0].frequency = 400000;
- freq_table[1].frequency = 800000;
+ if (!prcmu_is_u8400()) {
+ freq_table[1].frequency = 400000;
+ freq_table[2].frequency = 800000;
if (prcmu_has_arm_maxopp())
- freq_table[2].frequency = 1000000;
+ freq_table[3].frequency = 1000000;
}
+ pr_info("db8500-cpufreq : Available frequencies:\n");
+ for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++)
+ pr_info(" %d Mhz\n", freq_table[i].frequency/1000);
/* get policy fields based on the table */
res = cpufreq_frequency_table_cpuinfo(policy, freq_table);
@@ -127,10 +135,6 @@ static int __cpuinit db8500_cpufreq_init(struct cpufreq_policy *policy)
policy->min = policy->cpuinfo.min_freq;
policy->max = policy->cpuinfo.max_freq;
policy->cur = db8500_cpufreq_getspeed(policy->cpu);
-
- for (i = 0; freq_table[i].frequency != policy->cur; i++)
- ;
-
policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
/*
diff --git a/drivers/cpufreq/e_powersaver.c b/drivers/cpufreq/e_powersaver.c
index 35a257dd4bb..4bd6815d317 100644
--- a/drivers/cpufreq/e_powersaver.c
+++ b/drivers/cpufreq/e_powersaver.c
@@ -19,6 +19,11 @@
#include <asm/msr.h>
#include <asm/tsc.h>
+#if defined CONFIG_ACPI_PROCESSOR || defined CONFIG_ACPI_PROCESSOR_MODULE
+#include <linux/acpi.h>
+#include <acpi/processor.h>
+#endif
+
#define EPS_BRAND_C7M 0
#define EPS_BRAND_C7 1
#define EPS_BRAND_EDEN 2
@@ -27,11 +32,59 @@
struct eps_cpu_data {
u32 fsb;
+#if defined CONFIG_ACPI_PROCESSOR || defined CONFIG_ACPI_PROCESSOR_MODULE
+ u32 bios_limit;
+#endif
struct cpufreq_frequency_table freq_table[];
};
static struct eps_cpu_data *eps_cpu[NR_CPUS];
+/* Module parameters */
+static int freq_failsafe_off;
+static int voltage_failsafe_off;
+static int set_max_voltage;
+
+#if defined CONFIG_ACPI_PROCESSOR || defined CONFIG_ACPI_PROCESSOR_MODULE
+static int ignore_acpi_limit;
+
+static struct acpi_processor_performance *eps_acpi_cpu_perf;
+
+/* Minimum necessary to get acpi_processor_get_bios_limit() working */
+static int eps_acpi_init(void)
+{
+ eps_acpi_cpu_perf = kzalloc(sizeof(struct acpi_processor_performance),
+ GFP_KERNEL);
+ if (!eps_acpi_cpu_perf)
+ return -ENOMEM;
+
+ if (!zalloc_cpumask_var(&eps_acpi_cpu_perf->shared_cpu_map,
+ GFP_KERNEL)) {
+ kfree(eps_acpi_cpu_perf);
+ eps_acpi_cpu_perf = NULL;
+ return -ENOMEM;
+ }
+
+ if (acpi_processor_register_performance(eps_acpi_cpu_perf, 0)) {
+ free_cpumask_var(eps_acpi_cpu_perf->shared_cpu_map);
+ kfree(eps_acpi_cpu_perf);
+ eps_acpi_cpu_perf = NULL;
+ return -EIO;
+ }
+ return 0;
+}
+
+static int eps_acpi_exit(struct cpufreq_policy *policy)
+{
+ if (eps_acpi_cpu_perf) {
+ acpi_processor_unregister_performance(eps_acpi_cpu_perf, 0);
+ free_cpumask_var(eps_acpi_cpu_perf->shared_cpu_map);
+ kfree(eps_acpi_cpu_perf);
+ eps_acpi_cpu_perf = NULL;
+ }
+ return 0;
+}
+#endif
static unsigned int eps_get(unsigned int cpu)
{
@@ -164,6 +217,9 @@ static int eps_cpu_init(struct cpufreq_policy *policy)
int k, step, voltage;
int ret;
int states;
+#if defined CONFIG_ACPI_PROCESSOR || defined CONFIG_ACPI_PROCESSOR_MODULE
+ unsigned int limit;
+#endif
if (policy->cpu != 0)
return -ENODEV;
@@ -244,11 +300,62 @@ static int eps_cpu_init(struct cpufreq_policy *policy)
return -EINVAL;
if (current_voltage > 0x1f || max_voltage > 0x1f)
return -EINVAL;
- if (max_voltage < min_voltage)
+ if (max_voltage < min_voltage
+ || current_voltage < min_voltage
+ || current_voltage > max_voltage)
return -EINVAL;
+ /* Check for systems using underclocked CPU */
+ if (!freq_failsafe_off && max_multiplier != current_multiplier) {
+ printk(KERN_INFO "eps: Your processor is running at different "
+ "frequency then its maximum. Aborting.\n");
+ printk(KERN_INFO "eps: You can use freq_failsafe_off option "
+ "to disable this check.\n");
+ return -EINVAL;
+ }
+ if (!voltage_failsafe_off && max_voltage != current_voltage) {
+ printk(KERN_INFO "eps: Your processor is running at different "
+ "voltage then its maximum. Aborting.\n");
+ printk(KERN_INFO "eps: You can use voltage_failsafe_off "
+ "option to disable this check.\n");
+ return -EINVAL;
+ }
+
/* Calc FSB speed */
fsb = cpu_khz / current_multiplier;
+
+#if defined CONFIG_ACPI_PROCESSOR || defined CONFIG_ACPI_PROCESSOR_MODULE
+ /* Check for ACPI processor speed limit */
+ if (!ignore_acpi_limit && !eps_acpi_init()) {
+ if (!acpi_processor_get_bios_limit(policy->cpu, &limit)) {
+ printk(KERN_INFO "eps: ACPI limit %u.%uGHz\n",
+ limit/1000000,
+ (limit%1000000)/10000);
+ eps_acpi_exit(policy);
+ /* Check if max_multiplier is in BIOS limits */
+ if (limit && max_multiplier * fsb > limit) {
+ printk(KERN_INFO "eps: Aborting.\n");
+ return -EINVAL;
+ }
+ }
+ }
+#endif
+
+ /* Allow user to set lower maximum voltage then that reported
+ * by processor */
+ if (brand == EPS_BRAND_C7M && set_max_voltage) {
+ u32 v;
+
+ /* Change mV to something hardware can use */
+ v = (set_max_voltage - 700) / 16;
+ /* Check if voltage is within limits */
+ if (v >= min_voltage && v <= max_voltage) {
+ printk(KERN_INFO "eps: Setting %dmV as maximum.\n",
+ v * 16 + 700);
+ max_voltage = v;
+ }
+ }
+
/* Calc number of p-states supported */
if (brand == EPS_BRAND_C7M)
states = max_multiplier - min_multiplier + 1;
@@ -265,6 +372,9 @@ static int eps_cpu_init(struct cpufreq_policy *policy)
/* Copy basic values */
centaur->fsb = fsb;
+#if defined CONFIG_ACPI_PROCESSOR || defined CONFIG_ACPI_PROCESSOR_MODULE
+ centaur->bios_limit = limit;
+#endif
/* Fill frequency and MSR value table */
f_table = &centaur->freq_table[0];
@@ -303,17 +413,7 @@ static int eps_cpu_init(struct cpufreq_policy *policy)
static int eps_cpu_exit(struct cpufreq_policy *policy)
{
unsigned int cpu = policy->cpu;
- struct eps_cpu_data *centaur;
- u32 lo, hi;
- if (eps_cpu[cpu] == NULL)
- return -ENODEV;
- centaur = eps_cpu[cpu];
-
- /* Get max frequency */
- rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
- /* Set max frequency */
- eps_set_state(centaur, cpu, hi & 0xffff);
/* Bye */
cpufreq_frequency_table_put_attr(policy->cpu);
kfree(eps_cpu[cpu]);
@@ -359,6 +459,19 @@ static void __exit eps_exit(void)
cpufreq_unregister_driver(&eps_driver);
}
+/* Allow user to overclock his machine or to change frequency to higher after
+ * unloading module */
+module_param(freq_failsafe_off, int, 0644);
+MODULE_PARM_DESC(freq_failsafe_off, "Disable current vs max frequency check");
+module_param(voltage_failsafe_off, int, 0644);
+MODULE_PARM_DESC(voltage_failsafe_off, "Disable current vs max voltage check");
+#if defined CONFIG_ACPI_PROCESSOR || defined CONFIG_ACPI_PROCESSOR_MODULE
+module_param(ignore_acpi_limit, int, 0644);
+MODULE_PARM_DESC(ignore_acpi_limit, "Don't check ACPI's processor speed limit");
+#endif
+module_param(set_max_voltage, int, 0644);
+MODULE_PARM_DESC(set_max_voltage, "Set maximum CPU voltage (mV) C7-M only");
+
MODULE_AUTHOR("Rafal Bilski <rafalbilski@interia.pl>");
MODULE_DESCRIPTION("Enhanced PowerSaver driver for VIA C7 CPU's.");
MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/exynos4210-cpufreq.c b/drivers/cpufreq/exynos4210-cpufreq.c
index b7c3a84c4cf..ab9741fab92 100644
--- a/drivers/cpufreq/exynos4210-cpufreq.c
+++ b/drivers/cpufreq/exynos4210-cpufreq.c
@@ -17,6 +17,8 @@
#include <linux/slab.h>
#include <linux/regulator/consumer.h>
#include <linux/cpufreq.h>
+#include <linux/notifier.h>
+#include <linux/suspend.h>
#include <mach/map.h>
#include <mach/regs-clock.h>
@@ -36,6 +38,10 @@ static struct regulator *int_regulator;
static struct cpufreq_freqs freqs;
static unsigned int memtype;
+static unsigned int locking_frequency;
+static bool frequency_locked;
+static DEFINE_MUTEX(cpufreq_lock);
+
enum exynos4_memory_type {
DDR2 = 4,
LPDDR2,
@@ -405,22 +411,32 @@ static int exynos4_target(struct cpufreq_policy *policy,
{
unsigned int index, old_index;
unsigned int arm_volt, int_volt;
+ int err = -EINVAL;
freqs.old = exynos4_getspeed(policy->cpu);
+ mutex_lock(&cpufreq_lock);
+
+ if (frequency_locked && target_freq != locking_frequency) {
+ err = -EAGAIN;
+ goto out;
+ }
+
if (cpufreq_frequency_table_target(policy, exynos4_freq_table,
freqs.old, relation, &old_index))
- return -EINVAL;
+ goto out;
if (cpufreq_frequency_table_target(policy, exynos4_freq_table,
target_freq, relation, &index))
- return -EINVAL;
+ goto out;
+
+ err = 0;
freqs.new = exynos4_freq_table[index].frequency;
freqs.cpu = policy->cpu;
if (freqs.new == freqs.old)
- return 0;
+ goto out;
/* get the voltage value */
arm_volt = exynos4_volt_table[index].arm_volt;
@@ -447,10 +463,16 @@ static int exynos4_target(struct cpufreq_policy *policy,
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
- return 0;
+out:
+ mutex_unlock(&cpufreq_lock);
+ return err;
}
#ifdef CONFIG_PM
+/*
+ * These suspend/resume are used as syscore_ops, it is already too
+ * late to set regulator voltages at this stage.
+ */
static int exynos4_cpufreq_suspend(struct cpufreq_policy *policy)
{
return 0;
@@ -462,8 +484,82 @@ static int exynos4_cpufreq_resume(struct cpufreq_policy *policy)
}
#endif
+/**
+ * exynos4_cpufreq_pm_notifier - block CPUFREQ's activities in suspend-resume
+ * context
+ * @notifier
+ * @pm_event
+ * @v
+ *
+ * While frequency_locked == true, target() ignores every frequency but
+ * locking_frequency. The locking_frequency value is the initial frequency,
+ * which is set by the bootloader. In order to eliminate possible
+ * inconsistency in clock values, we save and restore frequencies during
+ * suspend and resume and block CPUFREQ activities. Note that the standard
+ * suspend/resume cannot be used as they are too deep (syscore_ops) for
+ * regulator actions.
+ */
+static int exynos4_cpufreq_pm_notifier(struct notifier_block *notifier,
+ unsigned long pm_event, void *v)
+{
+ struct cpufreq_policy *policy = cpufreq_cpu_get(0); /* boot CPU */
+ static unsigned int saved_frequency;
+ unsigned int temp;
+
+ mutex_lock(&cpufreq_lock);
+ switch (pm_event) {
+ case PM_SUSPEND_PREPARE:
+ if (frequency_locked)
+ goto out;
+ frequency_locked = true;
+
+ if (locking_frequency) {
+ saved_frequency = exynos4_getspeed(0);
+
+ mutex_unlock(&cpufreq_lock);
+ exynos4_target(policy, locking_frequency,
+ CPUFREQ_RELATION_H);
+ mutex_lock(&cpufreq_lock);
+ }
+
+ break;
+ case PM_POST_SUSPEND:
+
+ if (saved_frequency) {
+ /*
+ * While frequency_locked, only locking_frequency
+ * is valid for target(). In order to use
+ * saved_frequency while keeping frequency_locked,
+ * we temporarly overwrite locking_frequency.
+ */
+ temp = locking_frequency;
+ locking_frequency = saved_frequency;
+
+ mutex_unlock(&cpufreq_lock);
+ exynos4_target(policy, locking_frequency,
+ CPUFREQ_RELATION_H);
+ mutex_lock(&cpufreq_lock);
+
+ locking_frequency = temp;
+ }
+
+ frequency_locked = false;
+ break;
+ }
+out:
+ mutex_unlock(&cpufreq_lock);
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block exynos4_cpufreq_nb = {
+ .notifier_call = exynos4_cpufreq_pm_notifier,
+};
+
static int exynos4_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
+ int ret;
+
policy->cur = policy->min = policy->max = exynos4_getspeed(policy->cpu);
cpufreq_frequency_table_get_attr(exynos4_freq_table, policy->cpu);
@@ -479,16 +575,35 @@ static int exynos4_cpufreq_cpu_init(struct cpufreq_policy *policy)
*/
cpumask_setall(policy->cpus);
- return cpufreq_frequency_table_cpuinfo(policy, exynos4_freq_table);
+ ret = cpufreq_frequency_table_cpuinfo(policy, exynos4_freq_table);
+ if (ret)
+ return ret;
+
+ cpufreq_frequency_table_get_attr(exynos4_freq_table, policy->cpu);
+
+ return 0;
+}
+
+static int exynos4_cpufreq_cpu_exit(struct cpufreq_policy *policy)
+{
+ cpufreq_frequency_table_put_attr(policy->cpu);
+ return 0;
}
+static struct freq_attr *exynos4_cpufreq_attr[] = {
+ &cpufreq_freq_attr_scaling_available_freqs,
+ NULL,
+};
+
static struct cpufreq_driver exynos4_driver = {
.flags = CPUFREQ_STICKY,
.verify = exynos4_verify_speed,
.target = exynos4_target,
.get = exynos4_getspeed,
.init = exynos4_cpufreq_cpu_init,
+ .exit = exynos4_cpufreq_cpu_exit,
.name = "exynos4_cpufreq",
+ .attr = exynos4_cpufreq_attr,
#ifdef CONFIG_PM
.suspend = exynos4_cpufreq_suspend,
.resume = exynos4_cpufreq_resume,
@@ -501,6 +616,8 @@ static int __init exynos4_cpufreq_init(void)
if (IS_ERR(cpu_clk))
return PTR_ERR(cpu_clk);
+ locking_frequency = exynos4_getspeed(0);
+
moutcore = clk_get(NULL, "moutcore");
if (IS_ERR(moutcore))
goto out;
@@ -540,6 +657,8 @@ static int __init exynos4_cpufreq_init(void)
printk(KERN_DEBUG "%s: memtype= 0x%x\n", __func__, memtype);
}
+ register_pm_notifier(&exynos4_cpufreq_nb);
+
return cpufreq_register_driver(&exynos4_driver);
out:
diff --git a/drivers/cpufreq/s3c64xx-cpufreq.c b/drivers/cpufreq/s3c64xx-cpufreq.c
index b8d1d205e1e..3475f65aeec 100644
--- a/drivers/cpufreq/s3c64xx-cpufreq.c
+++ b/drivers/cpufreq/s3c64xx-cpufreq.c
@@ -15,6 +15,7 @@
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/regulator/consumer.h>
+#include <linux/module.h>
static struct clk *armclk;
static struct regulator *vddarm;
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 0df01411009..06ce2680d00 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -17,6 +17,7 @@
#include <linux/cpuidle.h>
#include <linux/ktime.h>
#include <linux/hrtimer.h>
+#include <linux/module.h>
#include <trace/events/power.h>
#include "cpuidle.h"
@@ -61,8 +62,9 @@ static int __cpuidle_register_device(struct cpuidle_device *dev);
int cpuidle_idle_call(void)
{
struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
+ struct cpuidle_driver *drv = cpuidle_get_driver();
struct cpuidle_state *target_state;
- int next_state;
+ int next_state, entered_state;
if (off)
return -ENODEV;
@@ -83,45 +85,36 @@ int cpuidle_idle_call(void)
hrtimer_peek_ahead_timers();
#endif
- /*
- * Call the device's prepare function before calling the
- * governor's select function. ->prepare gives the device's
- * cpuidle driver a chance to update any dynamic information
- * of its cpuidle states for the current idle period, e.g.
- * state availability, latencies, residencies, etc.
- */
- if (dev->prepare)
- dev->prepare(dev);
-
/* ask the governor for the next state */
- next_state = cpuidle_curr_governor->select(dev);
+ next_state = cpuidle_curr_governor->select(drv, dev);
if (need_resched()) {
local_irq_enable();
return 0;
}
- target_state = &dev->states[next_state];
-
- /* enter the state and update stats */
- dev->last_state = target_state;
+ target_state = &drv->states[next_state];
trace_power_start(POWER_CSTATE, next_state, dev->cpu);
trace_cpu_idle(next_state, dev->cpu);
- dev->last_residency = target_state->enter(dev, target_state);
+ entered_state = target_state->enter(dev, drv, next_state);
trace_power_end(dev->cpu);
trace_cpu_idle(PWR_EVENT_EXIT, dev->cpu);
- if (dev->last_state)
- target_state = dev->last_state;
-
- target_state->time += (unsigned long long)dev->last_residency;
- target_state->usage++;
+ if (entered_state >= 0) {
+ /* Update cpuidle counters */
+ /* This can be moved to within driver enter routine
+ * but that results in multiple copies of same code.
+ */
+ dev->states_usage[entered_state].time +=
+ (unsigned long long)dev->last_residency;
+ dev->states_usage[entered_state].usage++;
+ }
/* give the governor an opportunity to reflect on the outcome */
if (cpuidle_curr_governor->reflect)
- cpuidle_curr_governor->reflect(dev);
+ cpuidle_curr_governor->reflect(dev, entered_state);
return 0;
}
@@ -172,11 +165,11 @@ void cpuidle_resume_and_unlock(void)
EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock);
#ifdef CONFIG_ARCH_HAS_CPU_RELAX
-static int poll_idle(struct cpuidle_device *dev, struct cpuidle_state *st)
+static int poll_idle(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index)
{
ktime_t t1, t2;
s64 diff;
- int ret;
t1 = ktime_get();
local_irq_enable();
@@ -188,15 +181,14 @@ static int poll_idle(struct cpuidle_device *dev, struct cpuidle_state *st)
if (diff > INT_MAX)
diff = INT_MAX;
- ret = (int) diff;
- return ret;
+ dev->last_residency = (int) diff;
+
+ return index;
}
-static void poll_idle_init(struct cpuidle_device *dev)
+static void poll_idle_init(struct cpuidle_driver *drv)
{
- struct cpuidle_state *state = &dev->states[0];
-
- cpuidle_set_statedata(state, NULL);
+ struct cpuidle_state *state = &drv->states[0];
snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
@@ -207,7 +199,7 @@ static void poll_idle_init(struct cpuidle_device *dev)
state->enter = poll_idle;
}
#else
-static void poll_idle_init(struct cpuidle_device *dev) {}
+static void poll_idle_init(struct cpuidle_driver *drv) {}
#endif /* CONFIG_ARCH_HAS_CPU_RELAX */
/**
@@ -234,21 +226,20 @@ int cpuidle_enable_device(struct cpuidle_device *dev)
return ret;
}
- poll_idle_init(dev);
+ poll_idle_init(cpuidle_get_driver());
if ((ret = cpuidle_add_state_sysfs(dev)))
return ret;
if (cpuidle_curr_governor->enable &&
- (ret = cpuidle_curr_governor->enable(dev)))
+ (ret = cpuidle_curr_governor->enable(cpuidle_get_driver(), dev)))
goto fail_sysfs;
for (i = 0; i < dev->state_count; i++) {
- dev->states[i].usage = 0;
- dev->states[i].time = 0;
+ dev->states_usage[i].usage = 0;
+ dev->states_usage[i].time = 0;
}
dev->last_residency = 0;
- dev->last_state = NULL;
smp_wmb();
@@ -282,7 +273,7 @@ void cpuidle_disable_device(struct cpuidle_device *dev)
dev->enabled = 0;
if (cpuidle_curr_governor->disable)
- cpuidle_curr_governor->disable(dev);
+ cpuidle_curr_governor->disable(cpuidle_get_driver(), dev);
cpuidle_remove_state_sysfs(dev);
enabled_devices--;
@@ -310,26 +301,6 @@ static int __cpuidle_register_device(struct cpuidle_device *dev)
init_completion(&dev->kobj_unregister);
- /*
- * cpuidle driver should set the dev->power_specified bit
- * before registering the device if the driver provides
- * power_usage numbers.
- *
- * For those devices whose ->power_specified is not set,
- * we fill in power_usage with decreasing values as the
- * cpuidle code has an implicit assumption that state Cn
- * uses less power than C(n-1).
- *
- * With CONFIG_ARCH_HAS_CPU_RELAX, C0 is already assigned
- * an power value of -1. So we use -2, -3, etc, for other
- * c-states.
- */
- if (!dev->power_specified) {
- int i;
- for (i = CPUIDLE_DRIVER_STATE_START; i < dev->state_count; i++)
- dev->states[i].power_usage = -1 - i;
- }
-
per_cpu(cpuidle_devices, dev->cpu) = dev;
list_add(&dev->device_list, &cpuidle_detected_devices);
if ((ret = cpuidle_add_sysfs(sys_dev))) {
diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
index 3f7e3cedd13..284d7af5a9c 100644
--- a/drivers/cpuidle/driver.c
+++ b/drivers/cpuidle/driver.c
@@ -17,6 +17,30 @@
static struct cpuidle_driver *cpuidle_curr_driver;
DEFINE_SPINLOCK(cpuidle_driver_lock);
+static void __cpuidle_register_driver(struct cpuidle_driver *drv)
+{
+ int i;
+ /*
+ * cpuidle driver should set the drv->power_specified bit
+ * before registering if the driver provides
+ * power_usage numbers.
+ *
+ * If power_specified is not set,
+ * we fill in power_usage with decreasing values as the
+ * cpuidle code has an implicit assumption that state Cn
+ * uses less power than C(n-1).
+ *
+ * With CONFIG_ARCH_HAS_CPU_RELAX, C0 is already assigned
+ * an power value of -1. So we use -2, -3, etc, for other
+ * c-states.
+ */
+ if (!drv->power_specified) {
+ for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++)
+ drv->states[i].power_usage = -1 - i;
+ }
+}
+
+
/**
* cpuidle_register_driver - registers a driver
* @drv: the driver
@@ -34,6 +58,7 @@ int cpuidle_register_driver(struct cpuidle_driver *drv)
spin_unlock(&cpuidle_driver_lock);
return -EBUSY;
}
+ __cpuidle_register_driver(drv);
cpuidle_curr_driver = drv;
spin_unlock(&cpuidle_driver_lock);
diff --git a/drivers/cpuidle/governors/ladder.c b/drivers/cpuidle/governors/ladder.c
index f62fde21e96..b6a09ea859b 100644
--- a/drivers/cpuidle/governors/ladder.c
+++ b/drivers/cpuidle/governors/ladder.c
@@ -15,7 +15,7 @@
#include <linux/kernel.h>
#include <linux/cpuidle.h>
#include <linux/pm_qos.h>
-#include <linux/moduleparam.h>
+#include <linux/module.h>
#include <linux/jiffies.h>
#include <asm/io.h>
@@ -60,9 +60,11 @@ static inline void ladder_do_selection(struct ladder_device *ldev,
/**
* ladder_select_state - selects the next state to enter
+ * @drv: cpuidle driver
* @dev: the CPU
*/
-static int ladder_select_state(struct cpuidle_device *dev)
+static int ladder_select_state(struct cpuidle_driver *drv,
+ struct cpuidle_device *dev)
{
struct ladder_device *ldev = &__get_cpu_var(ladder_devices);
struct ladder_device_state *last_state;
@@ -77,15 +79,17 @@ static int ladder_select_state(struct cpuidle_device *dev)
last_state = &ldev->states[last_idx];
- if (dev->states[last_idx].flags & CPUIDLE_FLAG_TIME_VALID)
- last_residency = cpuidle_get_last_residency(dev) - dev->states[last_idx].exit_latency;
+ if (drv->states[last_idx].flags & CPUIDLE_FLAG_TIME_VALID) {
+ last_residency = cpuidle_get_last_residency(dev) - \
+ drv->states[last_idx].exit_latency;
+ }
else
last_residency = last_state->threshold.promotion_time + 1;
/* consider promotion */
- if (last_idx < dev->state_count - 1 &&
+ if (last_idx < drv->state_count - 1 &&
last_residency > last_state->threshold.promotion_time &&
- dev->states[last_idx + 1].exit_latency <= latency_req) {
+ drv->states[last_idx + 1].exit_latency <= latency_req) {
last_state->stats.promotion_count++;
last_state->stats.demotion_count = 0;
if (last_state->stats.promotion_count >= last_state->threshold.promotion_count) {
@@ -96,11 +100,11 @@ static int ladder_select_state(struct cpuidle_device *dev)
/* consider demotion */
if (last_idx > CPUIDLE_DRIVER_STATE_START &&
- dev->states[last_idx].exit_latency > latency_req) {
+ drv->states[last_idx].exit_latency > latency_req) {
int i;
for (i = last_idx - 1; i > CPUIDLE_DRIVER_STATE_START; i--) {
- if (dev->states[i].exit_latency <= latency_req)
+ if (drv->states[i].exit_latency <= latency_req)
break;
}
ladder_do_selection(ldev, last_idx, i);
@@ -123,9 +127,11 @@ static int ladder_select_state(struct cpuidle_device *dev)
/**
* ladder_enable_device - setup for the governor
+ * @drv: cpuidle driver
* @dev: the CPU
*/
-static int ladder_enable_device(struct cpuidle_device *dev)
+static int ladder_enable_device(struct cpuidle_driver *drv,
+ struct cpuidle_device *dev)
{
int i;
struct ladder_device *ldev = &per_cpu(ladder_devices, dev->cpu);
@@ -134,8 +140,8 @@ static int ladder_enable_device(struct cpuidle_device *dev)
ldev->last_state_idx = CPUIDLE_DRIVER_STATE_START;
- for (i = 0; i < dev->state_count; i++) {
- state = &dev->states[i];
+ for (i = 0; i < drv->state_count; i++) {
+ state = &drv->states[i];
lstate = &ldev->states[i];
lstate->stats.promotion_count = 0;
@@ -144,7 +150,7 @@ static int ladder_enable_device(struct cpuidle_device *dev)
lstate->threshold.promotion_count = PROMOTION_COUNT;
lstate->threshold.demotion_count = DEMOTION_COUNT;
- if (i < dev->state_count - 1)
+ if (i < drv->state_count - 1)
lstate->threshold.promotion_time = state->exit_latency;
if (i > 0)
lstate->threshold.demotion_time = state->exit_latency;
@@ -153,11 +159,24 @@ static int ladder_enable_device(struct cpuidle_device *dev)
return 0;
}
+/**
+ * ladder_reflect - update the correct last_state_idx
+ * @dev: the CPU
+ * @index: the index of actual state entered
+ */
+static void ladder_reflect(struct cpuidle_device *dev, int index)
+{
+ struct ladder_device *ldev = &__get_cpu_var(ladder_devices);
+ if (index > 0)
+ ldev->last_state_idx = index;
+}
+
static struct cpuidle_governor ladder_governor = {
.name = "ladder",
.rating = 10,
.enable = ladder_enable_device,
.select = ladder_select_state,
+ .reflect = ladder_reflect,
.owner = THIS_MODULE,
};
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index 3600f1955e4..ad0952601ae 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -19,6 +19,7 @@
#include <linux/tick.h>
#include <linux/sched.h>
#include <linux/math64.h>
+#include <linux/module.h>
#define BUCKETS 12
#define INTERVALS 8
@@ -182,7 +183,7 @@ static inline int performance_multiplier(void)
static DEFINE_PER_CPU(struct menu_device, menu_devices);
-static void menu_update(struct cpuidle_device *dev);
+static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev);
/* This implements DIV_ROUND_CLOSEST but avoids 64 bit division */
static u64 div_round64(u64 dividend, u32 divisor)
@@ -228,9 +229,10 @@ static void detect_repeating_patterns(struct menu_device *data)
/**
* menu_select - selects the next idle state to enter
+ * @drv: cpuidle driver containing state data
* @dev: the CPU
*/
-static int menu_select(struct cpuidle_device *dev)
+static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
{
struct menu_device *data = &__get_cpu_var(menu_devices);
int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
@@ -240,7 +242,7 @@ static int menu_select(struct cpuidle_device *dev)
struct timespec t;
if (data->needs_update) {
- menu_update(dev);
+ menu_update(drv, dev);
data->needs_update = 0;
}
@@ -285,11 +287,9 @@ static int menu_select(struct cpuidle_device *dev)
* Find the idle state with the lowest power while satisfying
* our constraints.
*/
- for (i = CPUIDLE_DRIVER_STATE_START; i < dev->state_count; i++) {
- struct cpuidle_state *s = &dev->states[i];
+ for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) {
+ struct cpuidle_state *s = &drv->states[i];
- if (s->flags & CPUIDLE_FLAG_IGNORE)
- continue;
if (s->target_residency > data->predicted_us)
continue;
if (s->exit_latency > latency_req)
@@ -310,26 +310,30 @@ static int menu_select(struct cpuidle_device *dev)
/**
* menu_reflect - records that data structures need update
* @dev: the CPU
+ * @index: the index of actual entered state
*
* NOTE: it's important to be fast here because this operation will add to
* the overall exit latency.
*/
-static void menu_reflect(struct cpuidle_device *dev)
+static void menu_reflect(struct cpuidle_device *dev, int index)
{
struct menu_device *data = &__get_cpu_var(menu_devices);
- data->needs_update = 1;
+ data->last_state_idx = index;
+ if (index >= 0)
+ data->needs_update = 1;
}
/**
* menu_update - attempts to guess what happened after entry
+ * @drv: cpuidle driver containing state data
* @dev: the CPU
*/
-static void menu_update(struct cpuidle_device *dev)
+static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
{
struct menu_device *data = &__get_cpu_var(menu_devices);
int last_idx = data->last_state_idx;
unsigned int last_idle_us = cpuidle_get_last_residency(dev);
- struct cpuidle_state *target = &dev->states[last_idx];
+ struct cpuidle_state *target = &drv->states[last_idx];
unsigned int measured_us;
u64 new_factor;
@@ -383,9 +387,11 @@ static void menu_update(struct cpuidle_device *dev)
/**
* menu_enable_device - scans a CPU's states and does setup
+ * @drv: cpuidle driver
* @dev: the CPU
*/
-static int menu_enable_device(struct cpuidle_device *dev)
+static int menu_enable_device(struct cpuidle_driver *drv,
+ struct cpuidle_device *dev)
{
struct menu_device *data = &per_cpu(menu_devices, dev->cpu);
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
index be7917ec40c..1e756e160dc 100644
--- a/drivers/cpuidle/sysfs.c
+++ b/drivers/cpuidle/sysfs.c
@@ -216,7 +216,8 @@ static struct kobj_type ktype_cpuidle = {
struct cpuidle_state_attr {
struct attribute attr;
- ssize_t (*show)(struct cpuidle_state *, char *);
+ ssize_t (*show)(struct cpuidle_state *, \
+ struct cpuidle_state_usage *, char *);
ssize_t (*store)(struct cpuidle_state *, const char *, size_t);
};
@@ -224,19 +225,22 @@ struct cpuidle_state_attr {
static struct cpuidle_state_attr attr_##_name = __ATTR(_name, 0444, show, NULL)
#define define_show_state_function(_name) \
-static ssize_t show_state_##_name(struct cpuidle_state *state, char *buf) \
+static ssize_t show_state_##_name(struct cpuidle_state *state, \
+ struct cpuidle_state_usage *state_usage, char *buf) \
{ \
return sprintf(buf, "%u\n", state->_name);\
}
#define define_show_state_ull_function(_name) \
-static ssize_t show_state_##_name(struct cpuidle_state *state, char *buf) \
+static ssize_t show_state_##_name(struct cpuidle_state *state, \
+ struct cpuidle_state_usage *state_usage, char *buf) \
{ \
- return sprintf(buf, "%llu\n", state->_name);\
+ return sprintf(buf, "%llu\n", state_usage->_name);\
}
#define define_show_state_str_function(_name) \
-static ssize_t show_state_##_name(struct cpuidle_state *state, char *buf) \
+static ssize_t show_state_##_name(struct cpuidle_state *state, \
+ struct cpuidle_state_usage *state_usage, char *buf) \
{ \
if (state->_name[0] == '\0')\
return sprintf(buf, "<null>\n");\
@@ -269,16 +273,18 @@ static struct attribute *cpuidle_state_default_attrs[] = {
#define kobj_to_state_obj(k) container_of(k, struct cpuidle_state_kobj, kobj)
#define kobj_to_state(k) (kobj_to_state_obj(k)->state)
+#define kobj_to_state_usage(k) (kobj_to_state_obj(k)->state_usage)
#define attr_to_stateattr(a) container_of(a, struct cpuidle_state_attr, attr)
static ssize_t cpuidle_state_show(struct kobject * kobj,
struct attribute * attr ,char * buf)
{
int ret = -EIO;
struct cpuidle_state *state = kobj_to_state(kobj);
+ struct cpuidle_state_usage *state_usage = kobj_to_state_usage(kobj);
struct cpuidle_state_attr * cattr = attr_to_stateattr(attr);
if (cattr->show)
- ret = cattr->show(state, buf);
+ ret = cattr->show(state, state_usage, buf);
return ret;
}
@@ -316,13 +322,15 @@ int cpuidle_add_state_sysfs(struct cpuidle_device *device)
{
int i, ret = -ENOMEM;
struct cpuidle_state_kobj *kobj;
+ struct cpuidle_driver *drv = cpuidle_get_driver();
/* state statistics */
for (i = 0; i < device->state_count; i++) {
kobj = kzalloc(sizeof(struct cpuidle_state_kobj), GFP_KERNEL);
if (!kobj)
goto error_state;
- kobj->state = &device->states[i];
+ kobj->state = &drv->states[i];
+ kobj->state_usage = &device->states_usage[i];
init_completion(&kobj->kobj_unregister);
ret = kobject_init_and_add(&kobj->kobj, &ktype_state_cpuidle, &device->kobj,
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index e0b25de1e33..6d16b4b0d7a 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -200,6 +200,7 @@ config CRYPTO_DEV_HIFN_795X
select CRYPTO_BLKCIPHER
select HW_RANDOM if CRYPTO_DEV_HIFN_795X_RNG
depends on PCI
+ depends on !ARCH_DMA_ADDR_T_64BIT
help
This option allows you to have support for HIFN 795x crypto adapters.
@@ -266,7 +267,7 @@ config CRYPTO_DEV_OMAP_AES
config CRYPTO_DEV_PICOXCELL
tristate "Support for picoXcell IPSEC and Layer2 crypto engines"
- depends on ARCH_PICOXCELL
+ depends on ARCH_PICOXCELL && HAVE_CLK
select CRYPTO_AES
select CRYPTO_AUTHENC
select CRYPTO_ALGAPI
diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
index a84250a5dd5..fe765f49de5 100644
--- a/drivers/crypto/hifn_795x.c
+++ b/drivers/crypto/hifn_795x.c
@@ -2744,10 +2744,8 @@ static int __init hifn_init(void)
unsigned int freq;
int err;
- if (sizeof(dma_addr_t) > 4) {
- printk(KERN_INFO "HIFN supports only 32-bit addresses.\n");
- return -EINVAL;
- }
+ /* HIFN supports only 32-bit addresses */
+ BUILD_BUG_ON(sizeof(dma_addr_t) != 4);
if (strncmp(hifn_pll_ref, "ext", 3) &&
strncmp(hifn_pll_ref, "pci", 3)) {
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
index 3cf303ee3fe..5c6f56f2144 100644
--- a/drivers/crypto/mv_cesa.c
+++ b/drivers/crypto/mv_cesa.c
@@ -15,6 +15,7 @@
#include <linux/platform_device.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include <crypto/internal/hash.h>
#include <crypto/sha.h>
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index d0183ddb307..8944dabc0e3 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -1006,9 +1006,9 @@ static int n2_do_ecb(struct ablkcipher_request *req, bool encrypt)
spin_unlock_irqrestore(&qp->lock, flags);
+out:
put_cpu();
-out:
n2_chunk_complete(req, NULL);
return err;
}
@@ -1096,9 +1096,9 @@ static int n2_do_chaining(struct ablkcipher_request *req, bool encrypt)
spin_unlock_irqrestore(&qp->lock, flags);
+out:
put_cpu();
-out:
n2_chunk_complete(req, err ? NULL : final_iv_addr);
return err;
}
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
index db33d300aa2..29b9469f837 100644
--- a/drivers/crypto/padlock-aes.c
+++ b/drivers/crypto/padlock-aes.c
@@ -508,10 +508,8 @@ static int __init padlock_init(void)
int ret;
struct cpuinfo_x86 *c = &cpu_data(0);
- if (!cpu_has_xcrypt) {
- printk(KERN_NOTICE PFX "VIA PadLock not detected.\n");
+ if (!cpu_has_xcrypt)
return -ENODEV;
- }
if (!cpu_has_xcrypt_enabled) {
printk(KERN_NOTICE PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n");
diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c
index 230b5b8cda1..a2b553eabbd 100644
--- a/drivers/crypto/picoxcell_crypto.c
+++ b/drivers/crypto/picoxcell_crypto.c
@@ -34,6 +34,7 @@
#include <linux/io.h>
#include <linux/list.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/rtnetlink.h>
@@ -1241,8 +1242,8 @@ static void spacc_spacc_complete(unsigned long data)
spin_unlock_irqrestore(&engine->hw_lock, flags);
list_for_each_entry_safe(req, tmp, &completed, list) {
- req->complete(req);
list_del(&req->list);
+ req->complete(req);
}
}
@@ -1657,10 +1658,33 @@ static struct spacc_alg l2_engine_algs[] = {
},
};
-static int __devinit spacc_probe(struct platform_device *pdev,
- unsigned max_ctxs, size_t cipher_pg_sz,
- size_t hash_pg_sz, size_t fifo_sz,
- struct spacc_alg *algs, size_t num_algs)
+#ifdef CONFIG_OF
+static const struct of_device_id spacc_of_id_table[] = {
+ { .compatible = "picochip,spacc-ipsec" },
+ { .compatible = "picochip,spacc-l2" },
+ {}
+};
+#else /* CONFIG_OF */
+#define spacc_of_id_table NULL
+#endif /* CONFIG_OF */
+
+static bool spacc_is_compatible(struct platform_device *pdev,
+ const char *spacc_type)
+{
+ const struct platform_device_id *platid = platform_get_device_id(pdev);
+
+ if (platid && !strcmp(platid->name, spacc_type))
+ return true;
+
+#ifdef CONFIG_OF
+ if (of_device_is_compatible(pdev->dev.of_node, spacc_type))
+ return true;
+#endif /* CONFIG_OF */
+
+ return false;
+}
+
+static int __devinit spacc_probe(struct platform_device *pdev)
{
int i, err, ret = -EINVAL;
struct resource *mem, *irq;
@@ -1669,13 +1693,25 @@ static int __devinit spacc_probe(struct platform_device *pdev,
if (!engine)
return -ENOMEM;
- engine->max_ctxs = max_ctxs;
- engine->cipher_pg_sz = cipher_pg_sz;
- engine->hash_pg_sz = hash_pg_sz;
- engine->fifo_sz = fifo_sz;
- engine->algs = algs;
- engine->num_algs = num_algs;
- engine->name = dev_name(&pdev->dev);
+ if (spacc_is_compatible(pdev, "picochip,spacc-ipsec")) {
+ engine->max_ctxs = SPACC_CRYPTO_IPSEC_MAX_CTXS;
+ engine->cipher_pg_sz = SPACC_CRYPTO_IPSEC_CIPHER_PG_SZ;
+ engine->hash_pg_sz = SPACC_CRYPTO_IPSEC_HASH_PG_SZ;
+ engine->fifo_sz = SPACC_CRYPTO_IPSEC_FIFO_SZ;
+ engine->algs = ipsec_engine_algs;
+ engine->num_algs = ARRAY_SIZE(ipsec_engine_algs);
+ } else if (spacc_is_compatible(pdev, "picochip,spacc-l2")) {
+ engine->max_ctxs = SPACC_CRYPTO_L2_MAX_CTXS;
+ engine->cipher_pg_sz = SPACC_CRYPTO_L2_CIPHER_PG_SZ;
+ engine->hash_pg_sz = SPACC_CRYPTO_L2_HASH_PG_SZ;
+ engine->fifo_sz = SPACC_CRYPTO_L2_FIFO_SZ;
+ engine->algs = l2_engine_algs;
+ engine->num_algs = ARRAY_SIZE(l2_engine_algs);
+ } else {
+ return -EINVAL;
+ }
+
+ engine->name = dev_name(&pdev->dev);
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
@@ -1711,7 +1747,7 @@ static int __devinit spacc_probe(struct platform_device *pdev,
spin_lock_init(&engine->hw_lock);
- engine->clk = clk_get(&pdev->dev, NULL);
+ engine->clk = clk_get(&pdev->dev, "ref");
if (IS_ERR(engine->clk)) {
dev_info(&pdev->dev, "clk unavailable\n");
device_remove_file(&pdev->dev, &dev_attr_stat_irq_thresh);
@@ -1800,72 +1836,33 @@ static int __devexit spacc_remove(struct platform_device *pdev)
return 0;
}
-static int __devinit ipsec_probe(struct platform_device *pdev)
-{
- return spacc_probe(pdev, SPACC_CRYPTO_IPSEC_MAX_CTXS,
- SPACC_CRYPTO_IPSEC_CIPHER_PG_SZ,
- SPACC_CRYPTO_IPSEC_HASH_PG_SZ,
- SPACC_CRYPTO_IPSEC_FIFO_SZ, ipsec_engine_algs,
- ARRAY_SIZE(ipsec_engine_algs));
-}
-
-static struct platform_driver ipsec_driver = {
- .probe = ipsec_probe,
- .remove = __devexit_p(spacc_remove),
- .driver = {
- .name = "picoxcell-ipsec",
-#ifdef CONFIG_PM
- .pm = &spacc_pm_ops,
-#endif /* CONFIG_PM */
- },
+static const struct platform_device_id spacc_id_table[] = {
+ { "picochip,spacc-ipsec", },
+ { "picochip,spacc-l2", },
};
-static int __devinit l2_probe(struct platform_device *pdev)
-{
- return spacc_probe(pdev, SPACC_CRYPTO_L2_MAX_CTXS,
- SPACC_CRYPTO_L2_CIPHER_PG_SZ,
- SPACC_CRYPTO_L2_HASH_PG_SZ, SPACC_CRYPTO_L2_FIFO_SZ,
- l2_engine_algs, ARRAY_SIZE(l2_engine_algs));
-}
-
-static struct platform_driver l2_driver = {
- .probe = l2_probe,
+static struct platform_driver spacc_driver = {
+ .probe = spacc_probe,
.remove = __devexit_p(spacc_remove),
.driver = {
- .name = "picoxcell-l2",
+ .name = "picochip,spacc",
#ifdef CONFIG_PM
.pm = &spacc_pm_ops,
#endif /* CONFIG_PM */
+ .of_match_table = spacc_of_id_table,
},
+ .id_table = spacc_id_table,
};
static int __init spacc_init(void)
{
- int ret = platform_driver_register(&ipsec_driver);
- if (ret) {
- pr_err("failed to register ipsec spacc driver");
- goto out;
- }
-
- ret = platform_driver_register(&l2_driver);
- if (ret) {
- pr_err("failed to register l2 spacc driver");
- goto l2_failed;
- }
-
- return 0;
-
-l2_failed:
- platform_driver_unregister(&ipsec_driver);
-out:
- return ret;
+ return platform_driver_register(&spacc_driver);
}
module_init(spacc_init);
static void __exit spacc_exit(void)
{
- platform_driver_unregister(&ipsec_driver);
- platform_driver_unregister(&l2_driver);
+ platform_driver_unregister(&spacc_driver);
}
module_exit(spacc_exit);
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 8a0bb417aa1..dbe76b5df9c 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -416,7 +416,7 @@ static void talitos_done(unsigned long data)
/*
* locate current (offending) descriptor
*/
-static struct talitos_desc *current_desc(struct device *dev, int ch)
+static u32 current_desc_hdr(struct device *dev, int ch)
{
struct talitos_private *priv = dev_get_drvdata(dev);
int tail = priv->chan[ch].tail;
@@ -428,23 +428,25 @@ static struct talitos_desc *current_desc(struct device *dev, int ch)
tail = (tail + 1) & (priv->fifo_len - 1);
if (tail == priv->chan[ch].tail) {
dev_err(dev, "couldn't locate current descriptor\n");
- return NULL;
+ return 0;
}
}
- return priv->chan[ch].fifo[tail].desc;
+ return priv->chan[ch].fifo[tail].desc->hdr;
}
/*
* user diagnostics; report root cause of error based on execution unit status
*/
-static void report_eu_error(struct device *dev, int ch,
- struct talitos_desc *desc)
+static void report_eu_error(struct device *dev, int ch, u32 desc_hdr)
{
struct talitos_private *priv = dev_get_drvdata(dev);
int i;
- switch (desc->hdr & DESC_HDR_SEL0_MASK) {
+ if (!desc_hdr)
+ desc_hdr = in_be32(priv->reg + TALITOS_DESCBUF(ch));
+
+ switch (desc_hdr & DESC_HDR_SEL0_MASK) {
case DESC_HDR_SEL0_AFEU:
dev_err(dev, "AFEUISR 0x%08x_%08x\n",
in_be32(priv->reg + TALITOS_AFEUISR),
@@ -488,7 +490,7 @@ static void report_eu_error(struct device *dev, int ch,
break;
}
- switch (desc->hdr & DESC_HDR_SEL1_MASK) {
+ switch (desc_hdr & DESC_HDR_SEL1_MASK) {
case DESC_HDR_SEL1_MDEUA:
case DESC_HDR_SEL1_MDEUB:
dev_err(dev, "MDEUISR 0x%08x_%08x\n",
@@ -550,7 +552,7 @@ static void talitos_error(unsigned long data, u32 isr, u32 isr_lo)
if (v_lo & TALITOS_CCPSR_LO_IEU)
dev_err(dev, "invalid execution unit error\n");
if (v_lo & TALITOS_CCPSR_LO_EU)
- report_eu_error(dev, ch, current_desc(dev, ch));
+ report_eu_error(dev, ch, current_desc_hdr(dev, ch));
if (v_lo & TALITOS_CCPSR_LO_GB)
dev_err(dev, "gather boundary error\n");
if (v_lo & TALITOS_CCPSR_LO_GRL)
diff --git a/drivers/dca/dca-core.c b/drivers/dca/dca-core.c
index 25ec0bb0519..bc6f5faa1e9 100644
--- a/drivers/dca/dca-core.c
+++ b/drivers/dca/dca-core.c
@@ -28,6 +28,7 @@
#include <linux/device.h>
#include <linux/dca.h>
#include <linux/slab.h>
+#include <linux/module.h>
#define DCA_VERSION "1.12.1"
diff --git a/drivers/dca/dca-sysfs.c b/drivers/dca/dca-sysfs.c
index 5e8f335e6f6..591b6597c00 100644
--- a/drivers/dca/dca-sysfs.c
+++ b/drivers/dca/dca-sysfs.c
@@ -27,6 +27,7 @@
#include <linux/err.h>
#include <linux/dca.h>
#include <linux/gfp.h>
+#include <linux/export.h>
static struct class *dca_class;
static struct idr dca_idr;
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 2e3b3d38c46..ab8f469f5cf 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -193,7 +193,8 @@ config ARCH_HAS_ASYNC_TX_FIND_CHANNEL
config PL330_DMA
tristate "DMA API Driver for PL330"
select DMA_ENGINE
- depends on PL330
+ depends on ARM_AMBA
+ select PL330
help
Select if your platform has one or more PL330 DMACs.
You need to provide platform specific settings via
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index be21e3f138a..b7cbd1ab1db 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -66,32 +66,29 @@
* after the final transfer signalled by LBREQ or LSREQ. The DMAC
* will then move to the next LLI entry.
*
- * Only the former works sanely with scatter lists, so we only implement
- * the DMAC flow control method. However, peripherals which use the LBREQ
- * and LSREQ signals (eg, MMCI) are unable to use this mode, which through
- * these hardware restrictions prevents them from using scatter DMA.
- *
* Global TODO:
* - Break out common code from arch/arm/mach-s3c64xx and share
*/
-#include <linux/device.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/dma-mapping.h>
-#include <linux/dmapool.h>
-#include <linux/dmaengine.h>
#include <linux/amba/bus.h>
#include <linux/amba/pl08x.h>
#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dmaengine.h>
+#include <linux/dmapool.h>
+#include <linux/dma-mapping.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
#include <linux/seq_file.h>
-
+#include <linux/slab.h>
#include <asm/hardware/pl080.h>
#define DRIVER_NAME "pl08xdmac"
+static struct amba_driver pl08x_amba_driver;
+
/**
* struct vendor_data - vendor-specific config parameters for PL08x derivatives
* @channels: the number of channels available in this variant
@@ -126,7 +123,8 @@ struct pl08x_lli {
* @phy_chans: array of data for the physical channels
* @pool: a pool for the LLI descriptors
* @pool_ctr: counter of LLIs in the pool
- * @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI fetches
+ * @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI
+ * fetches
* @mem_buses: set to indicate memory transfers on AHB2.
* @lock: a spinlock for this struct
*/
@@ -149,14 +147,6 @@ struct pl08x_driver_data {
* PL08X specific defines
*/
-/*
- * Memory boundaries: the manual for PL08x says that the controller
- * cannot read past a 1KiB boundary, so these defines are used to
- * create transfer LLIs that do not cross such boundaries.
- */
-#define PL08X_BOUNDARY_SHIFT (10) /* 1KB 0x400 */
-#define PL08X_BOUNDARY_SIZE (1 << PL08X_BOUNDARY_SHIFT)
-
/* Size (bytes) of each LLI buffer allocated for one transfer */
# define PL08X_LLI_TSFR_SIZE 0x2000
@@ -272,7 +262,6 @@ static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch)
writel(val, ch->base + PL080_CH_CONFIG);
}
-
/*
* pl08x_terminate_phy_chan() stops the channel, clears the FIFO and
* clears any pending interrupt status. This should not be used for
@@ -363,7 +352,9 @@ static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan)
if (!list_empty(&plchan->pend_list)) {
struct pl08x_txd *txdi;
list_for_each_entry(txdi, &plchan->pend_list, node) {
- bytes += txdi->len;
+ struct pl08x_sg *dsg;
+ list_for_each_entry(dsg, &txd->dsg_list, node)
+ bytes += dsg->len;
}
}
@@ -407,6 +398,7 @@ pl08x_get_phy_channel(struct pl08x_driver_data *pl08x,
return NULL;
}
+ pm_runtime_get_sync(&pl08x->adev->dev);
return ch;
}
@@ -420,6 +412,8 @@ static inline void pl08x_put_phy_channel(struct pl08x_driver_data *pl08x,
/* Stop the channel and clear its interrupts */
pl08x_terminate_phy_chan(pl08x, ch);
+ pm_runtime_put(&pl08x->adev->dev);
+
/* Mark it as free */
ch->serving = NULL;
spin_unlock_irqrestore(&ch->lock, flags);
@@ -499,36 +493,30 @@ struct pl08x_lli_build_data {
};
/*
- * Autoselect a master bus to use for the transfer this prefers the
- * destination bus if both available if fixed address on one bus the
- * other will be chosen
+ * Autoselect a master bus to use for the transfer. Slave will be the chosen as
+ * victim in case src & dest are not similarly aligned. i.e. If after aligning
+ * masters address with width requirements of transfer (by sending few byte by
+ * byte data), slave is still not aligned, then its width will be reduced to
+ * BYTE.
+ * - prefers the destination bus if both available
+ * - prefers bus with fixed address (i.e. peripheral)
*/
static void pl08x_choose_master_bus(struct pl08x_lli_build_data *bd,
struct pl08x_bus_data **mbus, struct pl08x_bus_data **sbus, u32 cctl)
{
if (!(cctl & PL080_CONTROL_DST_INCR)) {
- *mbus = &bd->srcbus;
- *sbus = &bd->dstbus;
- } else if (!(cctl & PL080_CONTROL_SRC_INCR)) {
*mbus = &bd->dstbus;
*sbus = &bd->srcbus;
+ } else if (!(cctl & PL080_CONTROL_SRC_INCR)) {
+ *mbus = &bd->srcbus;
+ *sbus = &bd->dstbus;
} else {
- if (bd->dstbus.buswidth == 4) {
+ if (bd->dstbus.buswidth >= bd->srcbus.buswidth) {
*mbus = &bd->dstbus;
*sbus = &bd->srcbus;
- } else if (bd->srcbus.buswidth == 4) {
- *mbus = &bd->srcbus;
- *sbus = &bd->dstbus;
- } else if (bd->dstbus.buswidth == 2) {
- *mbus = &bd->dstbus;
- *sbus = &bd->srcbus;
- } else if (bd->srcbus.buswidth == 2) {
+ } else {
*mbus = &bd->srcbus;
*sbus = &bd->dstbus;
- } else {
- /* bd->srcbus.buswidth == 1 */
- *mbus = &bd->dstbus;
- *sbus = &bd->srcbus;
}
}
}
@@ -547,7 +535,8 @@ static void pl08x_fill_lli_for_desc(struct pl08x_lli_build_data *bd,
llis_va[num_llis].cctl = cctl;
llis_va[num_llis].src = bd->srcbus.addr;
llis_va[num_llis].dst = bd->dstbus.addr;
- llis_va[num_llis].lli = llis_bus + (num_llis + 1) * sizeof(struct pl08x_lli);
+ llis_va[num_llis].lli = llis_bus + (num_llis + 1) *
+ sizeof(struct pl08x_lli);
llis_va[num_llis].lli |= bd->lli_bus;
if (cctl & PL080_CONTROL_SRC_INCR)
@@ -560,16 +549,12 @@ static void pl08x_fill_lli_for_desc(struct pl08x_lli_build_data *bd,
bd->remainder -= len;
}
-/*
- * Return number of bytes to fill to boundary, or len.
- * This calculation works for any value of addr.
- */
-static inline size_t pl08x_pre_boundary(u32 addr, size_t len)
+static inline void prep_byte_width_lli(struct pl08x_lli_build_data *bd,
+ u32 *cctl, u32 len, int num_llis, size_t *total_bytes)
{
- size_t boundary_len = PL08X_BOUNDARY_SIZE -
- (addr & (PL08X_BOUNDARY_SIZE - 1));
-
- return min(boundary_len, len);
+ *cctl = pl08x_cctl_bits(*cctl, 1, 1, len);
+ pl08x_fill_lli_for_desc(bd, num_llis, len, *cctl);
+ (*total_bytes) += len;
}
/*
@@ -583,13 +568,12 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
struct pl08x_bus_data *mbus, *sbus;
struct pl08x_lli_build_data bd;
int num_llis = 0;
- u32 cctl;
- size_t max_bytes_per_lli;
- size_t total_bytes = 0;
+ u32 cctl, early_bytes = 0;
+ size_t max_bytes_per_lli, total_bytes;
struct pl08x_lli *llis_va;
+ struct pl08x_sg *dsg;
- txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT,
- &txd->llis_bus);
+ txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT, &txd->llis_bus);
if (!txd->llis_va) {
dev_err(&pl08x->adev->dev, "%s no memory for llis\n", __func__);
return 0;
@@ -597,13 +581,9 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
pl08x->pool_ctr++;
- /* Get the default CCTL */
- cctl = txd->cctl;
-
bd.txd = txd;
- bd.srcbus.addr = txd->src_addr;
- bd.dstbus.addr = txd->dst_addr;
bd.lli_bus = (pl08x->lli_buses & PL08X_AHB2) ? PL080_LLI_LM_AHB2 : 0;
+ cctl = txd->cctl;
/* Find maximum width of the source bus */
bd.srcbus.maxwidth =
@@ -615,215 +595,179 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_DWIDTH_MASK) >>
PL080_CONTROL_DWIDTH_SHIFT);
- /* Set up the bus widths to the maximum */
- bd.srcbus.buswidth = bd.srcbus.maxwidth;
- bd.dstbus.buswidth = bd.dstbus.maxwidth;
+ list_for_each_entry(dsg, &txd->dsg_list, node) {
+ total_bytes = 0;
+ cctl = txd->cctl;
- /*
- * Bytes transferred == tsize * MIN(buswidths), not max(buswidths)
- */
- max_bytes_per_lli = min(bd.srcbus.buswidth, bd.dstbus.buswidth) *
- PL080_CONTROL_TRANSFER_SIZE_MASK;
+ bd.srcbus.addr = dsg->src_addr;
+ bd.dstbus.addr = dsg->dst_addr;
+ bd.remainder = dsg->len;
+ bd.srcbus.buswidth = bd.srcbus.maxwidth;
+ bd.dstbus.buswidth = bd.dstbus.maxwidth;
- /* We need to count this down to zero */
- bd.remainder = txd->len;
+ pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl);
- /*
- * Choose bus to align to
- * - prefers destination bus if both available
- * - if fixed address on one bus chooses other
- */
- pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl);
-
- dev_vdbg(&pl08x->adev->dev, "src=0x%08x%s/%u dst=0x%08x%s/%u len=%zu llimax=%zu\n",
- bd.srcbus.addr, cctl & PL080_CONTROL_SRC_INCR ? "+" : "",
- bd.srcbus.buswidth,
- bd.dstbus.addr, cctl & PL080_CONTROL_DST_INCR ? "+" : "",
- bd.dstbus.buswidth,
- bd.remainder, max_bytes_per_lli);
- dev_vdbg(&pl08x->adev->dev, "mbus=%s sbus=%s\n",
- mbus == &bd.srcbus ? "src" : "dst",
- sbus == &bd.srcbus ? "src" : "dst");
-
- if (txd->len < mbus->buswidth) {
- /* Less than a bus width available - send as single bytes */
- while (bd.remainder) {
- dev_vdbg(&pl08x->adev->dev,
- "%s single byte LLIs for a transfer of "
- "less than a bus width (remain 0x%08x)\n",
- __func__, bd.remainder);
- cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
- pl08x_fill_lli_for_desc(&bd, num_llis++, 1, cctl);
- total_bytes++;
- }
- } else {
- /* Make one byte LLIs until master bus is aligned */
- while ((mbus->addr) % (mbus->buswidth)) {
- dev_vdbg(&pl08x->adev->dev,
- "%s adjustment lli for less than bus width "
- "(remain 0x%08x)\n",
- __func__, bd.remainder);
- cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
- pl08x_fill_lli_for_desc(&bd, num_llis++, 1, cctl);
- total_bytes++;
- }
+ dev_vdbg(&pl08x->adev->dev, "src=0x%08x%s/%u dst=0x%08x%s/%u len=%zu\n",
+ bd.srcbus.addr, cctl & PL080_CONTROL_SRC_INCR ? "+" : "",
+ bd.srcbus.buswidth,
+ bd.dstbus.addr, cctl & PL080_CONTROL_DST_INCR ? "+" : "",
+ bd.dstbus.buswidth,
+ bd.remainder);
+ dev_vdbg(&pl08x->adev->dev, "mbus=%s sbus=%s\n",
+ mbus == &bd.srcbus ? "src" : "dst",
+ sbus == &bd.srcbus ? "src" : "dst");
/*
- * Master now aligned
- * - if slave is not then we must set its width down
+ * Zero length is only allowed if all these requirements are
+ * met:
+ * - flow controller is peripheral.
+ * - src.addr is aligned to src.width
+ * - dst.addr is aligned to dst.width
+ *
+ * sg_len == 1 should be true, as there can be two cases here:
+ *
+ * - Memory addresses are contiguous and are not scattered.
+ * Here, Only one sg will be passed by user driver, with
+ * memory address and zero length. We pass this to controller
+ * and after the transfer it will receive the last burst
+ * request from peripheral and so transfer finishes.
+ *
+ * - Memory addresses are scattered and are not contiguous.
+ * Here, Obviously as DMA controller doesn't know when a lli's
+ * transfer gets over, it can't load next lli. So in this
+ * case, there has to be an assumption that only one lli is
+ * supported. Thus, we can't have scattered addresses.
*/
- if (sbus->addr % sbus->buswidth) {
- dev_dbg(&pl08x->adev->dev,
- "%s set down bus width to one byte\n",
- __func__);
+ if (!bd.remainder) {
+ u32 fc = (txd->ccfg & PL080_CONFIG_FLOW_CONTROL_MASK) >>
+ PL080_CONFIG_FLOW_CONTROL_SHIFT;
+ if (!((fc >= PL080_FLOW_SRC2DST_DST) &&
+ (fc <= PL080_FLOW_SRC2DST_SRC))) {
+ dev_err(&pl08x->adev->dev, "%s sg len can't be zero",
+ __func__);
+ return 0;
+ }
+
+ if ((bd.srcbus.addr % bd.srcbus.buswidth) ||
+ (bd.srcbus.addr % bd.srcbus.buswidth)) {
+ dev_err(&pl08x->adev->dev,
+ "%s src & dst address must be aligned to src"
+ " & dst width if peripheral is flow controller",
+ __func__);
+ return 0;
+ }
- sbus->buswidth = 1;
+ cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth,
+ bd.dstbus.buswidth, 0);
+ pl08x_fill_lli_for_desc(&bd, num_llis++, 0, cctl);
+ break;
}
/*
- * Make largest possible LLIs until less than one bus
- * width left
+ * Send byte by byte for following cases
+ * - Less than a bus width available
+ * - until master bus is aligned
*/
- while (bd.remainder > (mbus->buswidth - 1)) {
- size_t lli_len, target_len, tsize, odd_bytes;
+ if (bd.remainder < mbus->buswidth)
+ early_bytes = bd.remainder;
+ else if ((mbus->addr) % (mbus->buswidth)) {
+ early_bytes = mbus->buswidth - (mbus->addr) %
+ (mbus->buswidth);
+ if ((bd.remainder - early_bytes) < mbus->buswidth)
+ early_bytes = bd.remainder;
+ }
+ if (early_bytes) {
+ dev_vdbg(&pl08x->adev->dev,
+ "%s byte width LLIs (remain 0x%08x)\n",
+ __func__, bd.remainder);
+ prep_byte_width_lli(&bd, &cctl, early_bytes, num_llis++,
+ &total_bytes);
+ }
+
+ if (bd.remainder) {
/*
- * If enough left try to send max possible,
- * otherwise try to send the remainder
+ * Master now aligned
+ * - if slave is not then we must set its width down
*/
- target_len = min(bd.remainder, max_bytes_per_lli);
+ if (sbus->addr % sbus->buswidth) {
+ dev_dbg(&pl08x->adev->dev,
+ "%s set down bus width to one byte\n",
+ __func__);
+
+ sbus->buswidth = 1;
+ }
/*
- * Set bus lengths for incrementing buses to the
- * number of bytes which fill to next memory boundary,
- * limiting on the target length calculated above.
+ * Bytes transferred = tsize * src width, not
+ * MIN(buswidths)
*/
- if (cctl & PL080_CONTROL_SRC_INCR)
- bd.srcbus.fill_bytes =
- pl08x_pre_boundary(bd.srcbus.addr,
- target_len);
- else
- bd.srcbus.fill_bytes = target_len;
-
- if (cctl & PL080_CONTROL_DST_INCR)
- bd.dstbus.fill_bytes =
- pl08x_pre_boundary(bd.dstbus.addr,
- target_len);
- else
- bd.dstbus.fill_bytes = target_len;
-
- /* Find the nearest */
- lli_len = min(bd.srcbus.fill_bytes,
- bd.dstbus.fill_bytes);
-
- BUG_ON(lli_len > bd.remainder);
-
- if (lli_len <= 0) {
- dev_err(&pl08x->adev->dev,
- "%s lli_len is %zu, <= 0\n",
- __func__, lli_len);
- return 0;
- }
+ max_bytes_per_lli = bd.srcbus.buswidth *
+ PL080_CONTROL_TRANSFER_SIZE_MASK;
+ dev_vdbg(&pl08x->adev->dev,
+ "%s max bytes per lli = %zu\n",
+ __func__, max_bytes_per_lli);
+
+ /*
+ * Make largest possible LLIs until less than one bus
+ * width left
+ */
+ while (bd.remainder > (mbus->buswidth - 1)) {
+ size_t lli_len, tsize, width;
- if (lli_len == target_len) {
- /*
- * Can send what we wanted.
- * Maintain alignment
- */
- lli_len = (lli_len/mbus->buswidth) *
- mbus->buswidth;
- odd_bytes = 0;
- } else {
/*
- * So now we know how many bytes to transfer
- * to get to the nearest boundary. The next
- * LLI will past the boundary. However, we
- * may be working to a boundary on the slave
- * bus. We need to ensure the master stays
- * aligned, and that we are working in
- * multiples of the bus widths.
+ * If enough left try to send max possible,
+ * otherwise try to send the remainder
*/
- odd_bytes = lli_len % mbus->buswidth;
- lli_len -= odd_bytes;
-
- }
+ lli_len = min(bd.remainder, max_bytes_per_lli);
- if (lli_len) {
/*
- * Check against minimum bus alignment:
- * Calculate actual transfer size in relation
- * to bus width an get a maximum remainder of
- * the smallest bus width - 1
+ * Check against maximum bus alignment:
+ * Calculate actual transfer size in relation to
+ * bus width an get a maximum remainder of the
+ * highest bus width - 1
*/
- /* FIXME: use round_down()? */
- tsize = lli_len / min(mbus->buswidth,
- sbus->buswidth);
- lli_len = tsize * min(mbus->buswidth,
- sbus->buswidth);
-
- if (target_len != lli_len) {
- dev_vdbg(&pl08x->adev->dev,
- "%s can't send what we want. Desired 0x%08zx, lli of 0x%08zx bytes in txd of 0x%08zx\n",
- __func__, target_len, lli_len, txd->len);
- }
-
- cctl = pl08x_cctl_bits(cctl,
- bd.srcbus.buswidth,
- bd.dstbus.buswidth,
- tsize);
+ width = max(mbus->buswidth, sbus->buswidth);
+ lli_len = (lli_len / width) * width;
+ tsize = lli_len / bd.srcbus.buswidth;
dev_vdbg(&pl08x->adev->dev,
- "%s fill lli with single lli chunk of size 0x%08zx (remainder 0x%08zx)\n",
+ "%s fill lli with single lli chunk of "
+ "size 0x%08zx (remainder 0x%08zx)\n",
__func__, lli_len, bd.remainder);
+
+ cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth,
+ bd.dstbus.buswidth, tsize);
pl08x_fill_lli_for_desc(&bd, num_llis++,
- lli_len, cctl);
+ lli_len, cctl);
total_bytes += lli_len;
}
-
- if (odd_bytes) {
- /*
- * Creep past the boundary, maintaining
- * master alignment
- */
- int j;
- for (j = 0; (j < mbus->buswidth)
- && (bd.remainder); j++) {
- cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
- dev_vdbg(&pl08x->adev->dev,
- "%s align with boundary, single byte (remain 0x%08zx)\n",
- __func__, bd.remainder);
- pl08x_fill_lli_for_desc(&bd,
- num_llis++, 1, cctl);
- total_bytes++;
- }
+ /*
+ * Send any odd bytes
+ */
+ if (bd.remainder) {
+ dev_vdbg(&pl08x->adev->dev,
+ "%s align with boundary, send odd bytes (remain %zu)\n",
+ __func__, bd.remainder);
+ prep_byte_width_lli(&bd, &cctl, bd.remainder,
+ num_llis++, &total_bytes);
}
}
- /*
- * Send any odd bytes
- */
- while (bd.remainder) {
- cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
- dev_vdbg(&pl08x->adev->dev,
- "%s align with boundary, single odd byte (remain %zu)\n",
- __func__, bd.remainder);
- pl08x_fill_lli_for_desc(&bd, num_llis++, 1, cctl);
- total_bytes++;
+ if (total_bytes != dsg->len) {
+ dev_err(&pl08x->adev->dev,
+ "%s size of encoded lli:s don't match total txd, transferred 0x%08zx from size 0x%08zx\n",
+ __func__, total_bytes, dsg->len);
+ return 0;
}
- }
- if (total_bytes != txd->len) {
- dev_err(&pl08x->adev->dev,
- "%s size of encoded lli:s don't match total txd, transferred 0x%08zx from size 0x%08zx\n",
- __func__, total_bytes, txd->len);
- return 0;
- }
- if (num_llis >= MAX_NUM_TSFR_LLIS) {
- dev_err(&pl08x->adev->dev,
- "%s need to increase MAX_NUM_TSFR_LLIS from 0x%08x\n",
- __func__, (u32) MAX_NUM_TSFR_LLIS);
- return 0;
+ if (num_llis >= MAX_NUM_TSFR_LLIS) {
+ dev_err(&pl08x->adev->dev,
+ "%s need to increase MAX_NUM_TSFR_LLIS from 0x%08x\n",
+ __func__, (u32) MAX_NUM_TSFR_LLIS);
+ return 0;
+ }
}
llis_va = txd->llis_va;
@@ -856,11 +800,19 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
static void pl08x_free_txd(struct pl08x_driver_data *pl08x,
struct pl08x_txd *txd)
{
+ struct pl08x_sg *dsg, *_dsg;
+
/* Free the LLI */
- dma_pool_free(pl08x->pool, txd->llis_va, txd->llis_bus);
+ if (txd->llis_va)
+ dma_pool_free(pl08x->pool, txd->llis_va, txd->llis_bus);
pl08x->pool_ctr--;
+ list_for_each_entry_safe(dsg, _dsg, &txd->dsg_list, node) {
+ list_del(&dsg->node);
+ kfree(dsg);
+ }
+
kfree(txd);
}
@@ -917,9 +869,7 @@ static int prep_phy_channel(struct pl08x_dma_chan *plchan,
* need, but for slaves the physical signals may be muxed!
* Can the platform allow us to use this channel?
*/
- if (plchan->slave &&
- ch->signal < 0 &&
- pl08x->pd->get_signal) {
+ if (plchan->slave && pl08x->pd->get_signal) {
ret = pl08x->pd->get_signal(plchan);
if (ret < 0) {
dev_dbg(&pl08x->adev->dev,
@@ -1008,10 +958,8 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt(
* If slaves are relying on interrupts to signal completion this function
* must not be called with interrupts disabled.
*/
-static enum dma_status
-pl08x_dma_tx_status(struct dma_chan *chan,
- dma_cookie_t cookie,
- struct dma_tx_state *txstate)
+static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie, struct dma_tx_state *txstate)
{
struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
dma_cookie_t last_used;
@@ -1253,7 +1201,9 @@ static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan,
num_llis = pl08x_fill_llis_for_desc(pl08x, txd);
if (!num_llis) {
- kfree(txd);
+ spin_lock_irqsave(&plchan->lock, flags);
+ pl08x_free_txd(pl08x, txd);
+ spin_unlock_irqrestore(&plchan->lock, flags);
return -EINVAL;
}
@@ -1301,13 +1251,14 @@ static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan,
static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan,
unsigned long flags)
{
- struct pl08x_txd *txd = kzalloc(sizeof(struct pl08x_txd), GFP_NOWAIT);
+ struct pl08x_txd *txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
if (txd) {
dma_async_tx_descriptor_init(&txd->tx, &plchan->chan);
txd->tx.flags = flags;
txd->tx.tx_submit = pl08x_tx_submit;
INIT_LIST_HEAD(&txd->node);
+ INIT_LIST_HEAD(&txd->dsg_list);
/* Always enable error and terminal interrupts */
txd->ccfg = PL080_CONFIG_ERR_IRQ_MASK |
@@ -1326,6 +1277,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
struct pl08x_driver_data *pl08x = plchan->host;
struct pl08x_txd *txd;
+ struct pl08x_sg *dsg;
int ret;
txd = pl08x_get_txd(plchan, flags);
@@ -1335,10 +1287,19 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
return NULL;
}
+ dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT);
+ if (!dsg) {
+ pl08x_free_txd(pl08x, txd);
+ dev_err(&pl08x->adev->dev, "%s no memory for pl080 sg\n",
+ __func__);
+ return NULL;
+ }
+ list_add_tail(&dsg->node, &txd->dsg_list);
+
txd->direction = DMA_NONE;
- txd->src_addr = src;
- txd->dst_addr = dest;
- txd->len = len;
+ dsg->src_addr = src;
+ dsg->dst_addr = dest;
+ dsg->len = len;
/* Set platform data for m2m */
txd->ccfg |= PL080_FLOW_MEM2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT;
@@ -1367,19 +1328,13 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
struct pl08x_driver_data *pl08x = plchan->host;
struct pl08x_txd *txd;
- int ret;
-
- /*
- * Current implementation ASSUMES only one sg
- */
- if (sg_len != 1) {
- dev_err(&pl08x->adev->dev, "%s prepared too long sglist\n",
- __func__);
- BUG();
- }
+ struct pl08x_sg *dsg;
+ struct scatterlist *sg;
+ dma_addr_t slave_addr;
+ int ret, tmp;
dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n",
- __func__, sgl->length, plchan->name);
+ __func__, sgl->length, plchan->name);
txd = pl08x_get_txd(plchan, flags);
if (!txd) {
@@ -1398,24 +1353,49 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
* channel target address dynamically at runtime.
*/
txd->direction = direction;
- txd->len = sgl->length;
if (direction == DMA_TO_DEVICE) {
- txd->ccfg |= PL080_FLOW_MEM2PER << PL080_CONFIG_FLOW_CONTROL_SHIFT;
txd->cctl = plchan->dst_cctl;
- txd->src_addr = sgl->dma_address;
- txd->dst_addr = plchan->dst_addr;
+ slave_addr = plchan->dst_addr;
} else if (direction == DMA_FROM_DEVICE) {
- txd->ccfg |= PL080_FLOW_PER2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT;
txd->cctl = plchan->src_cctl;
- txd->src_addr = plchan->src_addr;
- txd->dst_addr = sgl->dma_address;
+ slave_addr = plchan->src_addr;
} else {
+ pl08x_free_txd(pl08x, txd);
dev_err(&pl08x->adev->dev,
"%s direction unsupported\n", __func__);
return NULL;
}
+ if (plchan->cd->device_fc)
+ tmp = (direction == DMA_TO_DEVICE) ? PL080_FLOW_MEM2PER_PER :
+ PL080_FLOW_PER2MEM_PER;
+ else
+ tmp = (direction == DMA_TO_DEVICE) ? PL080_FLOW_MEM2PER :
+ PL080_FLOW_PER2MEM;
+
+ txd->ccfg |= tmp << PL080_CONFIG_FLOW_CONTROL_SHIFT;
+
+ for_each_sg(sgl, sg, sg_len, tmp) {
+ dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT);
+ if (!dsg) {
+ pl08x_free_txd(pl08x, txd);
+ dev_err(&pl08x->adev->dev, "%s no mem for pl080 sg\n",
+ __func__);
+ return NULL;
+ }
+ list_add_tail(&dsg->node, &txd->dsg_list);
+
+ dsg->len = sg_dma_len(sg);
+ if (direction == DMA_TO_DEVICE) {
+ dsg->src_addr = sg_phys(sg);
+ dsg->dst_addr = slave_addr;
+ } else {
+ dsg->src_addr = slave_addr;
+ dsg->dst_addr = sg_phys(sg);
+ }
+ }
+
ret = pl08x_prep_channel_resources(plchan, txd);
if (ret)
return NULL;
@@ -1489,9 +1469,15 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
bool pl08x_filter_id(struct dma_chan *chan, void *chan_id)
{
- struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
+ struct pl08x_dma_chan *plchan;
char *name = chan_id;
+ /* Reject channels for devices not bound to this driver */
+ if (chan->device->dev->driver != &pl08x_amba_driver.drv)
+ return false;
+
+ plchan = to_pl08x_chan(chan);
+
/* Check that the channel is not taken! */
if (!strcmp(plchan->name, name))
return true;
@@ -1507,34 +1493,34 @@ bool pl08x_filter_id(struct dma_chan *chan, void *chan_id)
*/
static void pl08x_ensure_on(struct pl08x_driver_data *pl08x)
{
- u32 val;
-
- val = readl(pl08x->base + PL080_CONFIG);
- val &= ~(PL080_CONFIG_M2_BE | PL080_CONFIG_M1_BE | PL080_CONFIG_ENABLE);
- /* We implicitly clear bit 1 and that means little-endian mode */
- val |= PL080_CONFIG_ENABLE;
- writel(val, pl08x->base + PL080_CONFIG);
+ writel(PL080_CONFIG_ENABLE, pl08x->base + PL080_CONFIG);
}
static void pl08x_unmap_buffers(struct pl08x_txd *txd)
{
struct device *dev = txd->tx.chan->device->dev;
+ struct pl08x_sg *dsg;
if (!(txd->tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
if (txd->tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
- dma_unmap_single(dev, txd->src_addr, txd->len,
- DMA_TO_DEVICE);
- else
- dma_unmap_page(dev, txd->src_addr, txd->len,
- DMA_TO_DEVICE);
+ list_for_each_entry(dsg, &txd->dsg_list, node)
+ dma_unmap_single(dev, dsg->src_addr, dsg->len,
+ DMA_TO_DEVICE);
+ else {
+ list_for_each_entry(dsg, &txd->dsg_list, node)
+ dma_unmap_page(dev, dsg->src_addr, dsg->len,
+ DMA_TO_DEVICE);
+ }
}
if (!(txd->tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
if (txd->tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
- dma_unmap_single(dev, txd->dst_addr, txd->len,
- DMA_FROM_DEVICE);
+ list_for_each_entry(dsg, &txd->dsg_list, node)
+ dma_unmap_single(dev, dsg->dst_addr, dsg->len,
+ DMA_FROM_DEVICE);
else
- dma_unmap_page(dev, txd->dst_addr, txd->len,
- DMA_FROM_DEVICE);
+ list_for_each_entry(dsg, &txd->dsg_list, node)
+ dma_unmap_page(dev, dsg->dst_addr, dsg->len,
+ DMA_FROM_DEVICE);
}
}
@@ -1589,8 +1575,8 @@ static void pl08x_tasklet(unsigned long data)
*/
list_for_each_entry(waiting, &pl08x->memcpy.channels,
chan.device_node) {
- if (waiting->state == PL08X_CHAN_WAITING &&
- waiting->waiting != NULL) {
+ if (waiting->state == PL08X_CHAN_WAITING &&
+ waiting->waiting != NULL) {
int ret;
/* This should REALLY not fail now */
@@ -1630,38 +1616,40 @@ static void pl08x_tasklet(unsigned long data)
static irqreturn_t pl08x_irq(int irq, void *dev)
{
struct pl08x_driver_data *pl08x = dev;
- u32 mask = 0;
- u32 val;
- int i;
-
- val = readl(pl08x->base + PL080_ERR_STATUS);
- if (val) {
- /* An error interrupt (on one or more channels) */
- dev_err(&pl08x->adev->dev,
- "%s error interrupt, register value 0x%08x\n",
- __func__, val);
- /*
- * Simply clear ALL PL08X error interrupts,
- * regardless of channel and cause
- * FIXME: should be 0x00000003 on PL081 really.
- */
- writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR);
+ u32 mask = 0, err, tc, i;
+
+ /* check & clear - ERR & TC interrupts */
+ err = readl(pl08x->base + PL080_ERR_STATUS);
+ if (err) {
+ dev_err(&pl08x->adev->dev, "%s error interrupt, register value 0x%08x\n",
+ __func__, err);
+ writel(err, pl08x->base + PL080_ERR_CLEAR);
}
- val = readl(pl08x->base + PL080_INT_STATUS);
+ tc = readl(pl08x->base + PL080_INT_STATUS);
+ if (tc)
+ writel(tc, pl08x->base + PL080_TC_CLEAR);
+
+ if (!err && !tc)
+ return IRQ_NONE;
+
for (i = 0; i < pl08x->vd->channels; i++) {
- if ((1 << i) & val) {
+ if (((1 << i) & err) || ((1 << i) & tc)) {
/* Locate physical channel */
struct pl08x_phy_chan *phychan = &pl08x->phy_chans[i];
struct pl08x_dma_chan *plchan = phychan->serving;
+ if (!plchan) {
+ dev_err(&pl08x->adev->dev,
+ "%s Error TC interrupt on unused channel: 0x%08x\n",
+ __func__, i);
+ continue;
+ }
+
/* Schedule tasklet on this channel */
tasklet_schedule(&plchan->tasklet);
-
mask |= (1 << i);
}
}
- /* Clear only the terminal interrupts on channels we processed */
- writel(mask, pl08x->base + PL080_TC_CLEAR);
return mask ? IRQ_HANDLED : IRQ_NONE;
}
@@ -1685,9 +1673,7 @@ static void pl08x_dma_slave_init(struct pl08x_dma_chan *chan)
* Make a local wrapper to hold required data
*/
static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
- struct dma_device *dmadev,
- unsigned int channels,
- bool slave)
+ struct dma_device *dmadev, unsigned int channels, bool slave)
{
struct pl08x_dma_chan *chan;
int i;
@@ -1700,7 +1686,7 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
* to cope with that situation.
*/
for (i = 0; i < channels; i++) {
- chan = kzalloc(sizeof(struct pl08x_dma_chan), GFP_KERNEL);
+ chan = kzalloc(sizeof(*chan), GFP_KERNEL);
if (!chan) {
dev_err(&pl08x->adev->dev,
"%s no memory for channel\n", __func__);
@@ -1728,7 +1714,7 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
kfree(chan);
continue;
}
- dev_info(&pl08x->adev->dev,
+ dev_dbg(&pl08x->adev->dev,
"initialize virtual channel \"%s\"\n",
chan->name);
@@ -1837,9 +1823,9 @@ static const struct file_operations pl08x_debugfs_operations = {
static void init_pl08x_debugfs(struct pl08x_driver_data *pl08x)
{
/* Expose a simple debugfs interface to view all clocks */
- (void) debugfs_create_file(dev_name(&pl08x->adev->dev), S_IFREG | S_IRUGO,
- NULL, pl08x,
- &pl08x_debugfs_operations);
+ (void) debugfs_create_file(dev_name(&pl08x->adev->dev),
+ S_IFREG | S_IRUGO, NULL, pl08x,
+ &pl08x_debugfs_operations);
}
#else
@@ -1860,12 +1846,15 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
return ret;
/* Create the driver state holder */
- pl08x = kzalloc(sizeof(struct pl08x_driver_data), GFP_KERNEL);
+ pl08x = kzalloc(sizeof(*pl08x), GFP_KERNEL);
if (!pl08x) {
ret = -ENOMEM;
goto out_no_pl08x;
}
+ pm_runtime_set_active(&adev->dev);
+ pm_runtime_enable(&adev->dev);
+
/* Initialize memcpy engine */
dma_cap_set(DMA_MEMCPY, pl08x->memcpy.cap_mask);
pl08x->memcpy.dev = &adev->dev;
@@ -1939,7 +1928,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
}
/* Initialize physical channels */
- pl08x->phy_chans = kmalloc((vd->channels * sizeof(struct pl08x_phy_chan)),
+ pl08x->phy_chans = kmalloc((vd->channels * sizeof(*pl08x->phy_chans)),
GFP_KERNEL);
if (!pl08x->phy_chans) {
dev_err(&adev->dev, "%s failed to allocate "
@@ -1956,9 +1945,8 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
spin_lock_init(&ch->lock);
ch->serving = NULL;
ch->signal = -1;
- dev_info(&adev->dev,
- "physical channel %d is %s\n", i,
- pl08x_phy_channel_busy(ch) ? "BUSY" : "FREE");
+ dev_dbg(&adev->dev, "physical channel %d is %s\n",
+ i, pl08x_phy_channel_busy(ch) ? "BUSY" : "FREE");
}
/* Register as many memcpy channels as there are physical channels */
@@ -1974,8 +1962,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
/* Register slave channels */
ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->slave,
- pl08x->pd->num_slave_channels,
- true);
+ pl08x->pd->num_slave_channels, true);
if (ret <= 0) {
dev_warn(&pl08x->adev->dev,
"%s failed to enumerate slave channels - %d\n",
@@ -2005,6 +1992,8 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
dev_info(&pl08x->adev->dev, "DMA: PL%03x rev%u at 0x%08llx irq %d\n",
amba_part(adev), amba_rev(adev),
(unsigned long long)adev->res.start, adev->irq[0]);
+
+ pm_runtime_put(&adev->dev);
return 0;
out_no_slave_reg:
@@ -2023,6 +2012,9 @@ out_no_ioremap:
dma_pool_destroy(pl08x->pool);
out_no_lli_pool:
out_no_platdata:
+ pm_runtime_put(&adev->dev);
+ pm_runtime_disable(&adev->dev);
+
kfree(pl08x);
out_no_pl08x:
amba_release_regions(adev);
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 6a483eac7b3..fcfa0a8b5c5 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -107,10 +107,11 @@ static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
{
struct at_desc *desc, *_desc;
struct at_desc *ret = NULL;
+ unsigned long flags;
unsigned int i = 0;
LIST_HEAD(tmp_list);
- spin_lock_bh(&atchan->lock);
+ spin_lock_irqsave(&atchan->lock, flags);
list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
i++;
if (async_tx_test_ack(&desc->txd)) {
@@ -121,7 +122,7 @@ static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
dev_dbg(chan2dev(&atchan->chan_common),
"desc %p not ACKed\n", desc);
}
- spin_unlock_bh(&atchan->lock);
+ spin_unlock_irqrestore(&atchan->lock, flags);
dev_vdbg(chan2dev(&atchan->chan_common),
"scanned %u descriptors on freelist\n", i);
@@ -129,9 +130,9 @@ static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
if (!ret) {
ret = atc_alloc_descriptor(&atchan->chan_common, GFP_ATOMIC);
if (ret) {
- spin_lock_bh(&atchan->lock);
+ spin_lock_irqsave(&atchan->lock, flags);
atchan->descs_allocated++;
- spin_unlock_bh(&atchan->lock);
+ spin_unlock_irqrestore(&atchan->lock, flags);
} else {
dev_err(chan2dev(&atchan->chan_common),
"not enough descriptors available\n");
@@ -150,8 +151,9 @@ static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc)
{
if (desc) {
struct at_desc *child;
+ unsigned long flags;
- spin_lock_bh(&atchan->lock);
+ spin_lock_irqsave(&atchan->lock, flags);
list_for_each_entry(child, &desc->tx_list, desc_node)
dev_vdbg(chan2dev(&atchan->chan_common),
"moving child desc %p to freelist\n",
@@ -160,7 +162,7 @@ static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc)
dev_vdbg(chan2dev(&atchan->chan_common),
"moving desc %p to freelist\n", desc);
list_add(&desc->desc_node, &atchan->free_list);
- spin_unlock_bh(&atchan->lock);
+ spin_unlock_irqrestore(&atchan->lock, flags);
}
}
@@ -299,7 +301,7 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
/* for cyclic transfers,
* no need to replay callback function while stopping */
- if (!test_bit(ATC_IS_CYCLIC, &atchan->status)) {
+ if (!atc_chan_is_cyclic(atchan)) {
dma_async_tx_callback callback = txd->callback;
void *param = txd->callback_param;
@@ -471,16 +473,17 @@ static void atc_handle_cyclic(struct at_dma_chan *atchan)
static void atc_tasklet(unsigned long data)
{
struct at_dma_chan *atchan = (struct at_dma_chan *)data;
+ unsigned long flags;
- spin_lock(&atchan->lock);
+ spin_lock_irqsave(&atchan->lock, flags);
if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status))
atc_handle_error(atchan);
- else if (test_bit(ATC_IS_CYCLIC, &atchan->status))
+ else if (atc_chan_is_cyclic(atchan))
atc_handle_cyclic(atchan);
else
atc_advance_work(atchan);
- spin_unlock(&atchan->lock);
+ spin_unlock_irqrestore(&atchan->lock, flags);
}
static irqreturn_t at_dma_interrupt(int irq, void *dev_id)
@@ -539,8 +542,9 @@ static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx)
struct at_desc *desc = txd_to_at_desc(tx);
struct at_dma_chan *atchan = to_at_dma_chan(tx->chan);
dma_cookie_t cookie;
+ unsigned long flags;
- spin_lock_bh(&atchan->lock);
+ spin_lock_irqsave(&atchan->lock, flags);
cookie = atc_assign_cookie(atchan, desc);
if (list_empty(&atchan->active_list)) {
@@ -554,7 +558,7 @@ static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx)
list_add_tail(&desc->desc_node, &atchan->queue);
}
- spin_unlock_bh(&atchan->lock);
+ spin_unlock_irqrestore(&atchan->lock, flags);
return cookie;
}
@@ -927,28 +931,29 @@ static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
struct at_dma_chan *atchan = to_at_dma_chan(chan);
struct at_dma *atdma = to_at_dma(chan->device);
int chan_id = atchan->chan_common.chan_id;
+ unsigned long flags;
LIST_HEAD(list);
dev_vdbg(chan2dev(chan), "atc_control (%d)\n", cmd);
if (cmd == DMA_PAUSE) {
- spin_lock_bh(&atchan->lock);
+ spin_lock_irqsave(&atchan->lock, flags);
dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id));
set_bit(ATC_IS_PAUSED, &atchan->status);
- spin_unlock_bh(&atchan->lock);
+ spin_unlock_irqrestore(&atchan->lock, flags);
} else if (cmd == DMA_RESUME) {
- if (!test_bit(ATC_IS_PAUSED, &atchan->status))
+ if (!atc_chan_is_paused(atchan))
return 0;
- spin_lock_bh(&atchan->lock);
+ spin_lock_irqsave(&atchan->lock, flags);
dma_writel(atdma, CHDR, AT_DMA_RES(chan_id));
clear_bit(ATC_IS_PAUSED, &atchan->status);
- spin_unlock_bh(&atchan->lock);
+ spin_unlock_irqrestore(&atchan->lock, flags);
} else if (cmd == DMA_TERMINATE_ALL) {
struct at_desc *desc, *_desc;
/*
@@ -957,7 +962,7 @@ static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
* channel. We still have to poll the channel enable bit due
* to AHB/HSB limitations.
*/
- spin_lock_bh(&atchan->lock);
+ spin_lock_irqsave(&atchan->lock, flags);
/* disabling channel: must also remove suspend state */
dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask);
@@ -978,7 +983,7 @@ static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
/* if channel dedicated to cyclic operations, free it */
clear_bit(ATC_IS_CYCLIC, &atchan->status);
- spin_unlock_bh(&atchan->lock);
+ spin_unlock_irqrestore(&atchan->lock, flags);
} else {
return -ENXIO;
}
@@ -1004,9 +1009,10 @@ atc_tx_status(struct dma_chan *chan,
struct at_dma_chan *atchan = to_at_dma_chan(chan);
dma_cookie_t last_used;
dma_cookie_t last_complete;
+ unsigned long flags;
enum dma_status ret;
- spin_lock_bh(&atchan->lock);
+ spin_lock_irqsave(&atchan->lock, flags);
last_complete = atchan->completed_cookie;
last_used = chan->cookie;
@@ -1021,7 +1027,7 @@ atc_tx_status(struct dma_chan *chan,
ret = dma_async_is_complete(cookie, last_complete, last_used);
}
- spin_unlock_bh(&atchan->lock);
+ spin_unlock_irqrestore(&atchan->lock, flags);
if (ret != DMA_SUCCESS)
dma_set_tx_state(txstate, last_complete, last_used,
@@ -1029,7 +1035,7 @@ atc_tx_status(struct dma_chan *chan,
else
dma_set_tx_state(txstate, last_complete, last_used, 0);
- if (test_bit(ATC_IS_PAUSED, &atchan->status))
+ if (atc_chan_is_paused(atchan))
ret = DMA_PAUSED;
dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d (d%d, u%d)\n",
@@ -1046,18 +1052,19 @@ atc_tx_status(struct dma_chan *chan,
static void atc_issue_pending(struct dma_chan *chan)
{
struct at_dma_chan *atchan = to_at_dma_chan(chan);
+ unsigned long flags;
dev_vdbg(chan2dev(chan), "issue_pending\n");
/* Not needed for cyclic transfers */
- if (test_bit(ATC_IS_CYCLIC, &atchan->status))
+ if (atc_chan_is_cyclic(atchan))
return;
- spin_lock_bh(&atchan->lock);
+ spin_lock_irqsave(&atchan->lock, flags);
if (!atc_chan_is_enabled(atchan)) {
atc_advance_work(atchan);
}
- spin_unlock_bh(&atchan->lock);
+ spin_unlock_irqrestore(&atchan->lock, flags);
}
/**
@@ -1073,6 +1080,7 @@ static int atc_alloc_chan_resources(struct dma_chan *chan)
struct at_dma *atdma = to_at_dma(chan->device);
struct at_desc *desc;
struct at_dma_slave *atslave;
+ unsigned long flags;
int i;
u32 cfg;
LIST_HEAD(tmp_list);
@@ -1116,11 +1124,11 @@ static int atc_alloc_chan_resources(struct dma_chan *chan)
list_add_tail(&desc->desc_node, &tmp_list);
}
- spin_lock_bh(&atchan->lock);
+ spin_lock_irqsave(&atchan->lock, flags);
atchan->descs_allocated = i;
list_splice(&tmp_list, &atchan->free_list);
atchan->completed_cookie = chan->cookie = 1;
- spin_unlock_bh(&atchan->lock);
+ spin_unlock_irqrestore(&atchan->lock, flags);
/* channel parameters */
channel_writel(atchan, CFG, cfg);
@@ -1260,12 +1268,11 @@ static int __init at_dma_probe(struct platform_device *pdev)
/* initialize channels related values */
INIT_LIST_HEAD(&atdma->dma_common.channels);
- for (i = 0; i < pdata->nr_channels; i++, atdma->dma_common.chancnt++) {
+ for (i = 0; i < pdata->nr_channels; i++) {
struct at_dma_chan *atchan = &atdma->chan[i];
atchan->chan_common.device = &atdma->dma_common;
atchan->chan_common.cookie = atchan->completed_cookie = 1;
- atchan->chan_common.chan_id = i;
list_add_tail(&atchan->chan_common.device_node,
&atdma->dma_common.channels);
@@ -1293,22 +1300,20 @@ static int __init at_dma_probe(struct platform_device *pdev)
if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask))
atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy;
- if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask))
+ if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) {
atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg;
-
- if (dma_has_cap(DMA_CYCLIC, atdma->dma_common.cap_mask))
+ /* controller can do slave DMA: can trigger cyclic transfers */
+ dma_cap_set(DMA_CYCLIC, atdma->dma_common.cap_mask);
atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic;
-
- if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ||
- dma_has_cap(DMA_CYCLIC, atdma->dma_common.cap_mask))
atdma->dma_common.device_control = atc_control;
+ }
dma_writel(atdma, EN, AT_DMA_ENABLE);
dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s), %d channels\n",
dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "",
dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "",
- atdma->dma_common.chancnt);
+ pdata->nr_channels);
dma_async_device_register(&atdma->dma_common);
@@ -1377,27 +1382,112 @@ static void at_dma_shutdown(struct platform_device *pdev)
clk_disable(atdma->clk);
}
+static int at_dma_prepare(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct at_dma *atdma = platform_get_drvdata(pdev);
+ struct dma_chan *chan, *_chan;
+
+ list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
+ device_node) {
+ struct at_dma_chan *atchan = to_at_dma_chan(chan);
+ /* wait for transaction completion (except in cyclic case) */
+ if (atc_chan_is_enabled(atchan) && !atc_chan_is_cyclic(atchan))
+ return -EAGAIN;
+ }
+ return 0;
+}
+
+static void atc_suspend_cyclic(struct at_dma_chan *atchan)
+{
+ struct dma_chan *chan = &atchan->chan_common;
+
+ /* Channel should be paused by user
+ * do it anyway even if it is not done already */
+ if (!atc_chan_is_paused(atchan)) {
+ dev_warn(chan2dev(chan),
+ "cyclic channel not paused, should be done by channel user\n");
+ atc_control(chan, DMA_PAUSE, 0);
+ }
+
+ /* now preserve additional data for cyclic operations */
+ /* next descriptor address in the cyclic list */
+ atchan->save_dscr = channel_readl(atchan, DSCR);
+
+ vdbg_dump_regs(atchan);
+}
+
static int at_dma_suspend_noirq(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct at_dma *atdma = platform_get_drvdata(pdev);
+ struct dma_chan *chan, *_chan;
- at_dma_off(platform_get_drvdata(pdev));
+ /* preserve data */
+ list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
+ device_node) {
+ struct at_dma_chan *atchan = to_at_dma_chan(chan);
+
+ if (atc_chan_is_cyclic(atchan))
+ atc_suspend_cyclic(atchan);
+ atchan->save_cfg = channel_readl(atchan, CFG);
+ }
+ atdma->save_imr = dma_readl(atdma, EBCIMR);
+
+ /* disable DMA controller */
+ at_dma_off(atdma);
clk_disable(atdma->clk);
return 0;
}
+static void atc_resume_cyclic(struct at_dma_chan *atchan)
+{
+ struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
+
+ /* restore channel status for cyclic descriptors list:
+ * next descriptor in the cyclic list at the time of suspend */
+ channel_writel(atchan, SADDR, 0);
+ channel_writel(atchan, DADDR, 0);
+ channel_writel(atchan, CTRLA, 0);
+ channel_writel(atchan, CTRLB, 0);
+ channel_writel(atchan, DSCR, atchan->save_dscr);
+ dma_writel(atdma, CHER, atchan->mask);
+
+ /* channel pause status should be removed by channel user
+ * We cannot take the initiative to do it here */
+
+ vdbg_dump_regs(atchan);
+}
+
static int at_dma_resume_noirq(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct at_dma *atdma = platform_get_drvdata(pdev);
+ struct dma_chan *chan, *_chan;
+ /* bring back DMA controller */
clk_enable(atdma->clk);
dma_writel(atdma, EN, AT_DMA_ENABLE);
+
+ /* clear any pending interrupt */
+ while (dma_readl(atdma, EBCISR))
+ cpu_relax();
+
+ /* restore saved data */
+ dma_writel(atdma, EBCIER, atdma->save_imr);
+ list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
+ device_node) {
+ struct at_dma_chan *atchan = to_at_dma_chan(chan);
+
+ channel_writel(atchan, CFG, atchan->save_cfg);
+ if (atc_chan_is_cyclic(atchan))
+ atc_resume_cyclic(atchan);
+ }
return 0;
}
static const struct dev_pm_ops at_dma_dev_pm_ops = {
+ .prepare = at_dma_prepare,
.suspend_noirq = at_dma_suspend_noirq,
.resume_noirq = at_dma_resume_noirq,
};
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
index 087dbf1dd39..aa4c9aebab7 100644
--- a/drivers/dma/at_hdmac_regs.h
+++ b/drivers/dma/at_hdmac_regs.h
@@ -204,6 +204,9 @@ enum atc_status {
* @status: transmit status information from irq/prep* functions
* to tasklet (use atomic operations)
* @tasklet: bottom half to finish transaction work
+ * @save_cfg: configuration register that is saved on suspend/resume cycle
+ * @save_dscr: for cyclic operations, preserve next descriptor address in
+ * the cyclic list on suspend/resume cycle
* @lock: serializes enqueue/dequeue operations to descriptors lists
* @completed_cookie: identifier for the most recently completed operation
* @active_list: list of descriptors dmaengine is being running on
@@ -218,6 +221,8 @@ struct at_dma_chan {
u8 mask;
unsigned long status;
struct tasklet_struct tasklet;
+ u32 save_cfg;
+ u32 save_dscr;
spinlock_t lock;
@@ -248,6 +253,7 @@ static inline struct at_dma_chan *to_at_dma_chan(struct dma_chan *dchan)
* @chan_common: common dmaengine dma_device object members
* @ch_regs: memory mapped register base
* @clk: dma controller clock
+ * @save_imr: interrupt mask register that is saved on suspend/resume cycle
* @all_chan_mask: all channels availlable in a mask
* @dma_desc_pool: base of DMA descriptor region (DMA address)
* @chan: channels table to store at_dma_chan structures
@@ -256,6 +262,7 @@ struct at_dma {
struct dma_device dma_common;
void __iomem *regs;
struct clk *clk;
+ u32 save_imr;
u8 all_chan_mask;
@@ -355,6 +362,23 @@ static inline int atc_chan_is_enabled(struct at_dma_chan *atchan)
return !!(dma_readl(atdma, CHSR) & atchan->mask);
}
+/**
+ * atc_chan_is_paused - test channel pause/resume status
+ * @atchan: channel we want to test status
+ */
+static inline int atc_chan_is_paused(struct at_dma_chan *atchan)
+{
+ return test_bit(ATC_IS_PAUSED, &atchan->status);
+}
+
+/**
+ * atc_chan_is_cyclic - test if given channel has cyclic property set
+ * @atchan: channel we want to test status
+ */
+static inline int atc_chan_is_cyclic(struct at_dma_chan *atchan)
+{
+ return test_bit(ATC_IS_CYCLIC, &atchan->status);
+}
/**
* set_desc_eol - set end-of-link to descriptor so it will end transfer
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 765f5ff2230..eb1d8641cf5 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -10,6 +10,7 @@
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
+#include <linux/freezer.h>
#include <linux/init.h>
#include <linux/kthread.h>
#include <linux/module.h>
@@ -251,6 +252,7 @@ static int dmatest_func(void *data)
int i;
thread_name = current->comm;
+ set_freezable_with_signal();
ret = -ENOMEM;
@@ -305,7 +307,8 @@ static int dmatest_func(void *data)
dma_addr_t dma_srcs[src_cnt];
dma_addr_t dma_dsts[dst_cnt];
struct completion cmp;
- unsigned long tmo = msecs_to_jiffies(timeout);
+ unsigned long start, tmo, end = 0 /* compiler... */;
+ bool reload = true;
u8 align = 0;
total_tests++;
@@ -404,7 +407,17 @@ static int dmatest_func(void *data)
}
dma_async_issue_pending(chan);
- tmo = wait_for_completion_timeout(&cmp, tmo);
+ do {
+ start = jiffies;
+ if (reload)
+ end = start + msecs_to_jiffies(timeout);
+ else if (end <= start)
+ end = start + 1;
+ tmo = wait_for_completion_interruptible_timeout(&cmp,
+ end - start);
+ reload = try_to_freeze();
+ } while (tmo == -ERESTARTSYS);
+
status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
if (tmo == 0) {
@@ -477,6 +490,8 @@ err_srcs:
pr_notice("%s: terminating after %u tests, %u failures (status %d)\n",
thread_name, total_tests, failed_tests, ret);
+ /* terminate all transfers on specified channels */
+ chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
if (iterations > 0)
while (!kthread_should_stop()) {
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit);
@@ -499,6 +514,10 @@ static void dmatest_cleanup_channel(struct dmatest_chan *dtc)
list_del(&thread->node);
kfree(thread);
}
+
+ /* terminate all transfers on specified channels */
+ dtc->chan->device->device_control(dtc->chan, DMA_TERMINATE_ALL, 0);
+
kfree(dtc);
}
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index 4d180ca9a1d..9bfd6d36071 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -1407,12 +1407,11 @@ static int __init dw_probe(struct platform_device *pdev)
dw->all_chan_mask = (1 << pdata->nr_channels) - 1;
INIT_LIST_HEAD(&dw->dma.channels);
- for (i = 0; i < pdata->nr_channels; i++, dw->dma.chancnt++) {
+ for (i = 0; i < pdata->nr_channels; i++) {
struct dw_dma_chan *dwc = &dw->chan[i];
dwc->chan.device = &dw->dma;
dwc->chan.cookie = dwc->completed = 1;
- dwc->chan.chan_id = i;
if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING)
list_add_tail(&dwc->chan.device_node,
&dw->dma.channels);
@@ -1468,7 +1467,7 @@ static int __init dw_probe(struct platform_device *pdev)
dma_writel(dw, CFG, DW_CFG_DMA_EN);
printk(KERN_INFO "%s: DesignWare DMA Controller, %d channels\n",
- dev_name(&pdev->dev), dw->dma.chancnt);
+ dev_name(&pdev->dev), pdata->nr_channels);
dma_async_device_register(&dw->dma);
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
index 5d7a49bd7c2..b47e2b803fa 100644
--- a/drivers/dma/ep93xx_dma.c
+++ b/drivers/dma/ep93xx_dma.c
@@ -22,6 +22,7 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/dmaengine.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index d99f71c356b..4be55f9bb6c 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -14,6 +14,7 @@
* http://www.gnu.org/copyleft/gpl.html
*/
#include <linux/init.h>
+#include <linux/module.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
@@ -23,6 +24,7 @@
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/dmaengine.h>
+#include <linux/module.h>
#include <asm/irq.h>
#include <mach/dma-v1.h>
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 7bd7e98548c..f993955a640 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -18,6 +18,7 @@
*/
#include <linux/init.h>
+#include <linux/module.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
@@ -34,6 +35,7 @@
#include <linux/dmaengine.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/module.h>
#include <asm/irq.h>
#include <mach/sdma.h>
@@ -318,6 +320,7 @@ struct sdma_engine {
dma_addr_t context_phys;
struct dma_device dma_device;
struct clk *clk;
+ struct mutex channel_0_lock;
struct sdma_script_start_addrs *script_addrs;
};
@@ -415,11 +418,15 @@ static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size,
dma_addr_t buf_phys;
int ret;
+ mutex_lock(&sdma->channel_0_lock);
+
buf_virt = dma_alloc_coherent(NULL,
size,
&buf_phys, GFP_KERNEL);
- if (!buf_virt)
- return -ENOMEM;
+ if (!buf_virt) {
+ ret = -ENOMEM;
+ goto err_out;
+ }
bd0->mode.command = C0_SETPM;
bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD;
@@ -433,6 +440,9 @@ static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size,
dma_free_coherent(NULL, size, buf_virt, buf_phys);
+err_out:
+ mutex_unlock(&sdma->channel_0_lock);
+
return ret;
}
@@ -656,6 +666,8 @@ static int sdma_load_context(struct sdma_channel *sdmac)
dev_dbg(sdma->dev, "event_mask0 = 0x%08x\n", sdmac->event_mask0);
dev_dbg(sdma->dev, "event_mask1 = 0x%08x\n", sdmac->event_mask1);
+ mutex_lock(&sdma->channel_0_lock);
+
memset(context, 0, sizeof(*context));
context->channel_state.pc = load_address;
@@ -676,6 +688,8 @@ static int sdma_load_context(struct sdma_channel *sdmac)
ret = sdma_run_channel(&sdma->channel[0]);
+ mutex_unlock(&sdma->channel_0_lock);
+
return ret;
}
@@ -1131,18 +1145,17 @@ static void sdma_add_scripts(struct sdma_engine *sdma,
saddr_arr[i] = addr_arr[i];
}
-static int __init sdma_get_firmware(struct sdma_engine *sdma,
- const char *fw_name)
+static void sdma_load_firmware(const struct firmware *fw, void *context)
{
- const struct firmware *fw;
+ struct sdma_engine *sdma = context;
const struct sdma_firmware_header *header;
- int ret;
const struct sdma_script_start_addrs *addr;
unsigned short *ram_code;
- ret = request_firmware(&fw, fw_name, sdma->dev);
- if (ret)
- return ret;
+ if (!fw) {
+ dev_err(sdma->dev, "firmware not found\n");
+ return;
+ }
if (fw->size < sizeof(*header))
goto err_firmware;
@@ -1172,6 +1185,16 @@ static int __init sdma_get_firmware(struct sdma_engine *sdma,
err_firmware:
release_firmware(fw);
+}
+
+static int __init sdma_get_firmware(struct sdma_engine *sdma,
+ const char *fw_name)
+{
+ int ret;
+
+ ret = request_firmware_nowait(THIS_MODULE,
+ FW_ACTION_HOTPLUG, fw_name, sdma->dev,
+ GFP_KERNEL, sdma, sdma_load_firmware);
return ret;
}
@@ -1269,11 +1292,14 @@ static int __init sdma_probe(struct platform_device *pdev)
struct sdma_platform_data *pdata = pdev->dev.platform_data;
int i;
struct sdma_engine *sdma;
+ s32 *saddr_arr;
sdma = kzalloc(sizeof(*sdma), GFP_KERNEL);
if (!sdma)
return -ENOMEM;
+ mutex_init(&sdma->channel_0_lock);
+
sdma->dev = &pdev->dev;
iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1310,6 +1336,11 @@ static int __init sdma_probe(struct platform_device *pdev)
goto err_alloc;
}
+ /* initially no scripts available */
+ saddr_arr = (s32 *)sdma->script_addrs;
+ for (i = 0; i < SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1; i++)
+ saddr_arr[i] = -EINVAL;
+
if (of_id)
pdev->id_entry = of_id->data;
sdma->devtype = pdev->id_entry->driver_data;
diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c
index 8a3fdd87db9..19a0c64d45d 100644
--- a/drivers/dma/intel_mid_dma.c
+++ b/drivers/dma/intel_mid_dma.c
@@ -27,6 +27,7 @@
#include <linux/interrupt.h>
#include <linux/pm_runtime.h>
#include <linux/intel_mid_dma.h>
+#include <linux/module.h>
#define MAX_CHAN 4 /*max ch across controllers*/
#include "intel_mid_dma_regs.h"
@@ -115,16 +116,15 @@ DMAC1 interrupt Functions*/
/**
* dmac1_mask_periphral_intr - mask the periphral interrupt
- * @midc: dma channel for which masking is required
+ * @mid: dma device for which masking is required
*
* Masks the DMA periphral interrupt
* this is valid for DMAC1 family controllers only
* This controller should have periphral mask registers already mapped
*/
-static void dmac1_mask_periphral_intr(struct intel_mid_dma_chan *midc)
+static void dmac1_mask_periphral_intr(struct middma_device *mid)
{
u32 pimr;
- struct middma_device *mid = to_middma_device(midc->chan.device);
if (mid->pimr_mask) {
pimr = readl(mid->mask_reg + LNW_PERIPHRAL_MASK);
@@ -184,7 +184,6 @@ static void enable_dma_interrupt(struct intel_mid_dma_chan *midc)
static void disable_dma_interrupt(struct intel_mid_dma_chan *midc)
{
/*Check LPE PISR, make sure fwd is disabled*/
- dmac1_mask_periphral_intr(midc);
iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_BLOCK);
iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_TFR);
iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_ERR);
@@ -1114,7 +1113,6 @@ static int mid_setup_dma(struct pci_dev *pdev)
midch->chan.device = &dma->common;
midch->chan.cookie = 1;
- midch->chan.chan_id = i;
midch->ch_id = dma->chan_base + i;
pr_debug("MDMA:Init CH %d, ID %d\n", i, midch->ch_id);
@@ -1150,7 +1148,6 @@ static int mid_setup_dma(struct pci_dev *pdev)
dma_cap_set(DMA_SLAVE, dma->common.cap_mask);
dma_cap_set(DMA_PRIVATE, dma->common.cap_mask);
dma->common.dev = &pdev->dev;
- dma->common.chancnt = dma->max_chan;
dma->common.device_alloc_chan_resources =
intel_mid_dma_alloc_chan_resources;
@@ -1350,6 +1347,7 @@ int dma_suspend(struct pci_dev *pci, pm_message_t state)
if (device->ch[i].in_use)
return -EAGAIN;
}
+ dmac1_mask_periphral_intr(device);
device->state = SUSPENDED;
pci_save_state(pci);
pci_disable_device(pci);
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
index 6815905a772..0e5ef33f90a 100644
--- a/drivers/dma/ipu/ipu_idmac.c
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -21,6 +21,7 @@
#include <linux/string.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/module.h>
#include <mach/ipu.h>
@@ -1307,6 +1308,7 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id)
ipu_submit_buffer(ichan, descnew, sgnew, ichan->active_buffer) < 0) {
callback = descnew->txd.callback;
callback_param = descnew->txd.callback_param;
+ list_del_init(&descnew->list);
spin_unlock(&ichan->lock);
if (callback)
callback(callback_param);
@@ -1428,39 +1430,58 @@ static int __idmac_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
{
struct idmac_channel *ichan = to_idmac_chan(chan);
struct idmac *idmac = to_idmac(chan->device);
+ struct ipu *ipu = to_ipu(idmac);
+ struct list_head *list, *tmp;
unsigned long flags;
int i;
- /* Only supports DMA_TERMINATE_ALL */
- if (cmd != DMA_TERMINATE_ALL)
- return -ENXIO;
+ switch (cmd) {
+ case DMA_PAUSE:
+ spin_lock_irqsave(&ipu->lock, flags);
+ ipu_ic_disable_task(ipu, chan->chan_id);
- ipu_disable_channel(idmac, ichan,
- ichan->status >= IPU_CHANNEL_ENABLED);
+ /* Return all descriptors into "prepared" state */
+ list_for_each_safe(list, tmp, &ichan->queue)
+ list_del_init(list);
- tasklet_disable(&to_ipu(idmac)->tasklet);
+ ichan->sg[0] = NULL;
+ ichan->sg[1] = NULL;
- /* ichan->queue is modified in ISR, have to spinlock */
- spin_lock_irqsave(&ichan->lock, flags);
- list_splice_init(&ichan->queue, &ichan->free_list);
+ spin_unlock_irqrestore(&ipu->lock, flags);
- if (ichan->desc)
- for (i = 0; i < ichan->n_tx_desc; i++) {
- struct idmac_tx_desc *desc = ichan->desc + i;
- if (list_empty(&desc->list))
- /* Descriptor was prepared, but not submitted */
- list_add(&desc->list, &ichan->free_list);
+ ichan->status = IPU_CHANNEL_INITIALIZED;
+ break;
+ case DMA_TERMINATE_ALL:
+ ipu_disable_channel(idmac, ichan,
+ ichan->status >= IPU_CHANNEL_ENABLED);
- async_tx_clear_ack(&desc->txd);
- }
+ tasklet_disable(&ipu->tasklet);
- ichan->sg[0] = NULL;
- ichan->sg[1] = NULL;
- spin_unlock_irqrestore(&ichan->lock, flags);
+ /* ichan->queue is modified in ISR, have to spinlock */
+ spin_lock_irqsave(&ichan->lock, flags);
+ list_splice_init(&ichan->queue, &ichan->free_list);
- tasklet_enable(&to_ipu(idmac)->tasklet);
+ if (ichan->desc)
+ for (i = 0; i < ichan->n_tx_desc; i++) {
+ struct idmac_tx_desc *desc = ichan->desc + i;
+ if (list_empty(&desc->list))
+ /* Descriptor was prepared, but not submitted */
+ list_add(&desc->list, &ichan->free_list);
- ichan->status = IPU_CHANNEL_INITIALIZED;
+ async_tx_clear_ack(&desc->txd);
+ }
+
+ ichan->sg[0] = NULL;
+ ichan->sg[1] = NULL;
+ spin_unlock_irqrestore(&ichan->lock, flags);
+
+ tasklet_enable(&ipu->tasklet);
+
+ ichan->status = IPU_CHANNEL_INITIALIZED;
+ break;
+ default:
+ return -ENOSYS;
+ }
return 0;
}
@@ -1663,7 +1684,6 @@ static void __exit ipu_idmac_exit(struct ipu *ipu)
struct idmac_channel *ichan = ipu->channel + i;
idmac_control(&ichan->dma_chan, DMA_TERMINATE_ALL, 0);
- idmac_prep_slave_sg(&ichan->dma_chan, NULL, 0, DMA_NONE, 0);
}
dma_async_device_unregister(&idmac->dma);
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c
index b9bae94f201..8ba4edc6185 100644
--- a/drivers/dma/mpc512x_dma.c
+++ b/drivers/dma/mpc512x_dma.c
@@ -741,7 +741,6 @@ static int __devinit mpc_dma_probe(struct platform_device *op)
mchan = &mdma->channels[i];
mchan->chan.device = dma;
- mchan->chan.chan_id = i;
mchan->chan.cookie = 1;
mchan->completed_cookie = mchan->chan.cookie;
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index be641cbd36f..b4588bdd98b 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -130,6 +130,23 @@ struct mxs_dma_engine {
struct mxs_dma_chan mxs_chans[MXS_DMA_CHANNELS];
};
+static inline void mxs_dma_clkgate(struct mxs_dma_chan *mxs_chan, int enable)
+{
+ struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
+ int chan_id = mxs_chan->chan.chan_id;
+ int set_clr = enable ? MXS_CLR_ADDR : MXS_SET_ADDR;
+
+ /* enable apbh channel clock */
+ if (dma_is_apbh()) {
+ if (apbh_is_old())
+ writel(1 << (chan_id + BP_APBH_CTRL0_CLKGATE_CHANNEL),
+ mxs_dma->base + HW_APBHX_CTRL0 + set_clr);
+ else
+ writel(1 << chan_id,
+ mxs_dma->base + HW_APBHX_CTRL0 + set_clr);
+ }
+}
+
static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan)
{
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
@@ -148,38 +165,21 @@ static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan)
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
int chan_id = mxs_chan->chan.chan_id;
+ /* clkgate needs to be enabled before writing other registers */
+ mxs_dma_clkgate(mxs_chan, 1);
+
/* set cmd_addr up */
writel(mxs_chan->ccw_phys,
mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(chan_id));
- /* enable apbh channel clock */
- if (dma_is_apbh()) {
- if (apbh_is_old())
- writel(1 << (chan_id + BP_APBH_CTRL0_CLKGATE_CHANNEL),
- mxs_dma->base + HW_APBHX_CTRL0 + MXS_CLR_ADDR);
- else
- writel(1 << chan_id,
- mxs_dma->base + HW_APBHX_CTRL0 + MXS_CLR_ADDR);
- }
-
/* write 1 to SEMA to kick off the channel */
writel(1, mxs_dma->base + HW_APBHX_CHn_SEMA(chan_id));
}
static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan)
{
- struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
- int chan_id = mxs_chan->chan.chan_id;
-
/* disable apbh channel clock */
- if (dma_is_apbh()) {
- if (apbh_is_old())
- writel(1 << (chan_id + BP_APBH_CTRL0_CLKGATE_CHANNEL),
- mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR);
- else
- writel(1 << chan_id,
- mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR);
- }
+ mxs_dma_clkgate(mxs_chan, 0);
mxs_chan->status = DMA_SUCCESS;
}
@@ -338,7 +338,10 @@ static int mxs_dma_alloc_chan_resources(struct dma_chan *chan)
if (ret)
goto err_clk;
+ /* clkgate needs to be enabled for reset to finish */
+ mxs_dma_clkgate(mxs_chan, 1);
mxs_dma_reset_chan(mxs_chan);
+ mxs_dma_clkgate(mxs_chan, 0);
dma_async_tx_descriptor_init(&mxs_chan->desc, chan);
mxs_chan->desc.tx_submit = mxs_dma_tx_submit;
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index 1ac8d4b580b..a6d0e3dbed0 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -60,7 +60,7 @@
#define DMA_DESC_FOLLOW_WITHOUT_IRQ 0x2
#define DMA_DESC_FOLLOW_WITH_IRQ 0x3
-#define MAX_CHAN_NR 8
+#define MAX_CHAN_NR 12
#define DMA_MASK_CTL0_MODE 0x33333333
#define DMA_MASK_CTL2_MODE 0x00003333
@@ -872,8 +872,7 @@ static int __devinit pch_dma_probe(struct pci_dev *pdev,
int i;
nr_channels = id->driver_data;
- pd = kzalloc(sizeof(struct pch_dma)+
- sizeof(struct pch_dma_chan) * nr_channels, GFP_KERNEL);
+ pd = kzalloc(sizeof(*pd), GFP_KERNEL);
if (!pd)
return -ENOMEM;
@@ -926,7 +925,6 @@ static int __devinit pch_dma_probe(struct pci_dev *pdev,
}
pd->dma.dev = &pdev->dev;
- pd->dma.chancnt = nr_channels;
INIT_LIST_HEAD(&pd->dma.channels);
@@ -935,7 +933,6 @@ static int __devinit pch_dma_probe(struct pci_dev *pdev,
pd_chan->chan.device = &pd->dma;
pd_chan->chan.cookie = 1;
- pd_chan->chan.chan_id = i;
pd_chan->membase = &regs->desc[i];
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 00eee59e8b3..571041477ab 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -17,6 +17,8 @@
#include <linux/interrupt.h>
#include <linux/amba/bus.h>
#include <linux/amba/pl330.h>
+#include <linux/pm_runtime.h>
+#include <linux/scatterlist.h>
#define NR_DEFAULT_DESC 16
@@ -68,6 +70,14 @@ struct dma_pl330_chan {
* NULL if the channel is available to be acquired.
*/
void *pl330_chid;
+
+ /* For D-to-M and M-to-D channels */
+ int burst_sz; /* the peripheral fifo width */
+ int burst_len; /* the number of burst */
+ dma_addr_t fifo_addr;
+
+ /* for cyclic capability */
+ bool cyclic;
};
struct dma_pl330_dmac {
@@ -83,6 +93,8 @@ struct dma_pl330_dmac {
/* Peripheral channels connected to this DMAC */
struct dma_pl330_chan *peripherals; /* keep at end */
+
+ struct clk *clk;
};
struct dma_pl330_desc {
@@ -152,6 +164,31 @@ static inline void free_desc_list(struct list_head *list)
spin_unlock_irqrestore(&pdmac->pool_lock, flags);
}
+static inline void handle_cyclic_desc_list(struct list_head *list)
+{
+ struct dma_pl330_desc *desc;
+ struct dma_pl330_chan *pch;
+ unsigned long flags;
+
+ if (list_empty(list))
+ return;
+
+ list_for_each_entry(desc, list, node) {
+ dma_async_tx_callback callback;
+
+ /* Change status to reload it */
+ desc->status = PREP;
+ pch = desc->pchan;
+ callback = desc->txd.callback;
+ if (callback)
+ callback(desc->txd.callback_param);
+ }
+
+ spin_lock_irqsave(&pch->lock, flags);
+ list_splice_tail_init(list, &pch->work_list);
+ spin_unlock_irqrestore(&pch->lock, flags);
+}
+
static inline void fill_queue(struct dma_pl330_chan *pch)
{
struct dma_pl330_desc *desc;
@@ -205,7 +242,10 @@ static void pl330_tasklet(unsigned long data)
spin_unlock_irqrestore(&pch->lock, flags);
- free_desc_list(&list);
+ if (pch->cyclic)
+ handle_cyclic_desc_list(&list);
+ else
+ free_desc_list(&list);
}
static void dma_pl330_rqcb(void *token, enum pl330_op_err err)
@@ -236,6 +276,7 @@ static int pl330_alloc_chan_resources(struct dma_chan *chan)
spin_lock_irqsave(&pch->lock, flags);
pch->completed = chan->cookie = 1;
+ pch->cyclic = false;
pch->pl330_chid = pl330_request_channel(&pdmac->pif);
if (!pch->pl330_chid) {
@@ -253,25 +294,52 @@ static int pl330_alloc_chan_resources(struct dma_chan *chan)
static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg)
{
struct dma_pl330_chan *pch = to_pchan(chan);
- struct dma_pl330_desc *desc;
+ struct dma_pl330_desc *desc, *_dt;
unsigned long flags;
+ struct dma_pl330_dmac *pdmac = pch->dmac;
+ struct dma_slave_config *slave_config;
+ LIST_HEAD(list);
- /* Only supports DMA_TERMINATE_ALL */
- if (cmd != DMA_TERMINATE_ALL)
- return -ENXIO;
-
- spin_lock_irqsave(&pch->lock, flags);
-
- /* FLUSH the PL330 Channel thread */
- pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH);
+ switch (cmd) {
+ case DMA_TERMINATE_ALL:
+ spin_lock_irqsave(&pch->lock, flags);
- /* Mark all desc done */
- list_for_each_entry(desc, &pch->work_list, node)
- desc->status = DONE;
+ /* FLUSH the PL330 Channel thread */
+ pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH);
- spin_unlock_irqrestore(&pch->lock, flags);
+ /* Mark all desc done */
+ list_for_each_entry_safe(desc, _dt, &pch->work_list , node) {
+ desc->status = DONE;
+ pch->completed = desc->txd.cookie;
+ list_move_tail(&desc->node, &list);
+ }
- pl330_tasklet((unsigned long) pch);
+ list_splice_tail_init(&list, &pdmac->desc_pool);
+ spin_unlock_irqrestore(&pch->lock, flags);
+ break;
+ case DMA_SLAVE_CONFIG:
+ slave_config = (struct dma_slave_config *)arg;
+
+ if (slave_config->direction == DMA_TO_DEVICE) {
+ if (slave_config->dst_addr)
+ pch->fifo_addr = slave_config->dst_addr;
+ if (slave_config->dst_addr_width)
+ pch->burst_sz = __ffs(slave_config->dst_addr_width);
+ if (slave_config->dst_maxburst)
+ pch->burst_len = slave_config->dst_maxburst;
+ } else if (slave_config->direction == DMA_FROM_DEVICE) {
+ if (slave_config->src_addr)
+ pch->fifo_addr = slave_config->src_addr;
+ if (slave_config->src_addr_width)
+ pch->burst_sz = __ffs(slave_config->src_addr_width);
+ if (slave_config->src_maxburst)
+ pch->burst_len = slave_config->src_maxburst;
+ }
+ break;
+ default:
+ dev_err(pch->dmac->pif.dev, "Not supported command.\n");
+ return -ENXIO;
+ }
return 0;
}
@@ -288,6 +356,9 @@ static void pl330_free_chan_resources(struct dma_chan *chan)
pl330_release_channel(pch->pl330_chid);
pch->pl330_chid = NULL;
+ if (pch->cyclic)
+ list_splice_tail_init(&pch->work_list, &pch->dmac->desc_pool);
+
spin_unlock_irqrestore(&pch->lock, flags);
}
@@ -453,7 +524,7 @@ static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch)
if (peri) {
desc->req.rqtype = peri->rqtype;
- desc->req.peri = peri->peri_id;
+ desc->req.peri = pch->chan.chan_id;
} else {
desc->req.rqtype = MEMTOMEM;
desc->req.peri = 0;
@@ -524,6 +595,51 @@ static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len)
return burst_len;
}
+static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
+ struct dma_chan *chan, dma_addr_t dma_addr, size_t len,
+ size_t period_len, enum dma_data_direction direction)
+{
+ struct dma_pl330_desc *desc;
+ struct dma_pl330_chan *pch = to_pchan(chan);
+ dma_addr_t dst;
+ dma_addr_t src;
+
+ desc = pl330_get_desc(pch);
+ if (!desc) {
+ dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n",
+ __func__, __LINE__);
+ return NULL;
+ }
+
+ switch (direction) {
+ case DMA_TO_DEVICE:
+ desc->rqcfg.src_inc = 1;
+ desc->rqcfg.dst_inc = 0;
+ src = dma_addr;
+ dst = pch->fifo_addr;
+ break;
+ case DMA_FROM_DEVICE:
+ desc->rqcfg.src_inc = 0;
+ desc->rqcfg.dst_inc = 1;
+ src = pch->fifo_addr;
+ dst = dma_addr;
+ break;
+ default:
+ dev_err(pch->dmac->pif.dev, "%s:%d Invalid dma direction\n",
+ __func__, __LINE__);
+ return NULL;
+ }
+
+ desc->rqcfg.brst_size = pch->burst_sz;
+ desc->rqcfg.brst_len = 1;
+
+ pch->cyclic = true;
+
+ fill_px(&desc->px, dst, src, period_len);
+
+ return &desc->txd;
+}
+
static struct dma_async_tx_descriptor *
pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
dma_addr_t src, size_t len, unsigned long flags)
@@ -579,7 +695,7 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
struct dma_pl330_peri *peri = chan->private;
struct scatterlist *sg;
unsigned long flags;
- int i, burst_size;
+ int i;
dma_addr_t addr;
if (unlikely(!pch || !sgl || !sg_len || !peri))
@@ -595,8 +711,7 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
return NULL;
}
- addr = peri->fifo_addr;
- burst_size = peri->burst_sz;
+ addr = pch->fifo_addr;
first = NULL;
@@ -644,7 +759,7 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
sg_dma_address(sg), addr, sg_dma_len(sg));
}
- desc->rqcfg.brst_size = burst_size;
+ desc->rqcfg.brst_size = pch->burst_sz;
desc->rqcfg.brst_len = 1;
}
@@ -696,6 +811,30 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
goto probe_err1;
}
+ pdmac->clk = clk_get(&adev->dev, "dma");
+ if (IS_ERR(pdmac->clk)) {
+ dev_err(&adev->dev, "Cannot get operation clock.\n");
+ ret = -EINVAL;
+ goto probe_err1;
+ }
+
+ amba_set_drvdata(adev, pdmac);
+
+#ifdef CONFIG_PM_RUNTIME
+ /* to use the runtime PM helper functions */
+ pm_runtime_enable(&adev->dev);
+
+ /* enable the power domain */
+ if (pm_runtime_get_sync(&adev->dev)) {
+ dev_err(&adev->dev, "failed to get runtime pm\n");
+ ret = -ENODEV;
+ goto probe_err1;
+ }
+#else
+ /* enable dma clk */
+ clk_enable(pdmac->clk);
+#endif
+
irq = adev->irq[0];
ret = request_irq(irq, pl330_irq_handler, 0,
dev_name(&adev->dev), pi);
@@ -732,6 +871,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
case MEMTODEV:
case DEVTOMEM:
dma_cap_set(DMA_SLAVE, pd->cap_mask);
+ dma_cap_set(DMA_CYCLIC, pd->cap_mask);
break;
default:
dev_err(&adev->dev, "DEVTODEV Not Supported\n");
@@ -747,11 +887,9 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
spin_lock_init(&pch->lock);
pch->pl330_chid = NULL;
pch->chan.device = pd;
- pch->chan.chan_id = i;
pch->dmac = pdmac;
/* Add the channel to the DMAC list */
- pd->chancnt++;
list_add_tail(&pch->chan.device_node, &pd->channels);
}
@@ -760,6 +898,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
pd->device_alloc_chan_resources = pl330_alloc_chan_resources;
pd->device_free_chan_resources = pl330_free_chan_resources;
pd->device_prep_dma_memcpy = pl330_prep_dma_memcpy;
+ pd->device_prep_dma_cyclic = pl330_prep_dma_cyclic;
pd->device_tx_status = pl330_tx_status;
pd->device_prep_slave_sg = pl330_prep_slave_sg;
pd->device_control = pl330_control;
@@ -771,8 +910,6 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
goto probe_err4;
}
- amba_set_drvdata(adev, pdmac);
-
dev_info(&adev->dev,
"Loaded driver for PL330 DMAC-%d\n", adev->periphid);
dev_info(&adev->dev,
@@ -833,6 +970,13 @@ static int __devexit pl330_remove(struct amba_device *adev)
res = &adev->res;
release_mem_region(res->start, resource_size(res));
+#ifdef CONFIG_PM_RUNTIME
+ pm_runtime_put(&adev->dev);
+ pm_runtime_disable(&adev->dev);
+#else
+ clk_disable(pdmac->clk);
+#endif
+
kfree(pdmac);
return 0;
@@ -846,10 +990,49 @@ static struct amba_id pl330_ids[] = {
{ 0, 0 },
};
+#ifdef CONFIG_PM_RUNTIME
+static int pl330_runtime_suspend(struct device *dev)
+{
+ struct dma_pl330_dmac *pdmac = dev_get_drvdata(dev);
+
+ if (!pdmac) {
+ dev_err(dev, "failed to get dmac\n");
+ return -ENODEV;
+ }
+
+ clk_disable(pdmac->clk);
+
+ return 0;
+}
+
+static int pl330_runtime_resume(struct device *dev)
+{
+ struct dma_pl330_dmac *pdmac = dev_get_drvdata(dev);
+
+ if (!pdmac) {
+ dev_err(dev, "failed to get dmac\n");
+ return -ENODEV;
+ }
+
+ clk_enable(pdmac->clk);
+
+ return 0;
+}
+#else
+#define pl330_runtime_suspend NULL
+#define pl330_runtime_resume NULL
+#endif /* CONFIG_PM_RUNTIME */
+
+static const struct dev_pm_ops pl330_pm_ops = {
+ .runtime_suspend = pl330_runtime_suspend,
+ .runtime_resume = pl330_runtime_resume,
+};
+
static struct amba_driver pl330_driver = {
.drv = {
.owner = THIS_MODULE,
.name = "dma-pl330",
+ .pm = &pl330_pm_ops,
},
.id_table = pl330_ids,
.probe = pl330_probe,
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c
index 7f49235d14b..81809c2b46a 100644
--- a/drivers/dma/shdma.c
+++ b/drivers/dma/shdma.c
@@ -259,14 +259,23 @@ static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
return 0;
}
+static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan);
+
static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx)
{
struct sh_desc *desc = tx_to_sh_desc(tx), *chunk, *last = desc, *c;
struct sh_dmae_chan *sh_chan = to_sh_chan(tx->chan);
+ struct sh_dmae_slave *param = tx->chan->private;
dma_async_tx_callback callback = tx->callback;
dma_cookie_t cookie;
+ bool power_up;
+
+ spin_lock_irq(&sh_chan->desc_lock);
- spin_lock_bh(&sh_chan->desc_lock);
+ if (list_empty(&sh_chan->ld_queue))
+ power_up = true;
+ else
+ power_up = false;
cookie = sh_chan->common.cookie;
cookie++;
@@ -302,7 +311,38 @@ static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx)
tx->cookie, &last->async_tx, sh_chan->id,
desc->hw.sar, desc->hw.tcr, desc->hw.dar);
- spin_unlock_bh(&sh_chan->desc_lock);
+ if (power_up) {
+ sh_chan->pm_state = DMAE_PM_BUSY;
+
+ pm_runtime_get(sh_chan->dev);
+
+ spin_unlock_irq(&sh_chan->desc_lock);
+
+ pm_runtime_barrier(sh_chan->dev);
+
+ spin_lock_irq(&sh_chan->desc_lock);
+
+ /* Have we been reset, while waiting? */
+ if (sh_chan->pm_state != DMAE_PM_ESTABLISHED) {
+ dev_dbg(sh_chan->dev, "Bring up channel %d\n",
+ sh_chan->id);
+ if (param) {
+ const struct sh_dmae_slave_config *cfg =
+ param->config;
+
+ dmae_set_dmars(sh_chan, cfg->mid_rid);
+ dmae_set_chcr(sh_chan, cfg->chcr);
+ } else {
+ dmae_init(sh_chan);
+ }
+
+ if (sh_chan->pm_state == DMAE_PM_PENDING)
+ sh_chan_xfer_ld_queue(sh_chan);
+ sh_chan->pm_state = DMAE_PM_ESTABLISHED;
+ }
+ }
+
+ spin_unlock_irq(&sh_chan->desc_lock);
return cookie;
}
@@ -346,8 +386,6 @@ static int sh_dmae_alloc_chan_resources(struct dma_chan *chan)
struct sh_dmae_slave *param = chan->private;
int ret;
- pm_runtime_get_sync(sh_chan->dev);
-
/*
* This relies on the guarantee from dmaengine that alloc_chan_resources
* never runs concurrently with itself or free_chan_resources.
@@ -367,31 +405,20 @@ static int sh_dmae_alloc_chan_resources(struct dma_chan *chan)
}
param->config = cfg;
-
- dmae_set_dmars(sh_chan, cfg->mid_rid);
- dmae_set_chcr(sh_chan, cfg->chcr);
- } else {
- dmae_init(sh_chan);
}
- spin_lock_bh(&sh_chan->desc_lock);
while (sh_chan->descs_allocated < NR_DESCS_PER_CHANNEL) {
- spin_unlock_bh(&sh_chan->desc_lock);
desc = kzalloc(sizeof(struct sh_desc), GFP_KERNEL);
- if (!desc) {
- spin_lock_bh(&sh_chan->desc_lock);
+ if (!desc)
break;
- }
dma_async_tx_descriptor_init(&desc->async_tx,
&sh_chan->common);
desc->async_tx.tx_submit = sh_dmae_tx_submit;
desc->mark = DESC_IDLE;
- spin_lock_bh(&sh_chan->desc_lock);
list_add(&desc->node, &sh_chan->ld_free);
sh_chan->descs_allocated++;
}
- spin_unlock_bh(&sh_chan->desc_lock);
if (!sh_chan->descs_allocated) {
ret = -ENOMEM;
@@ -405,7 +432,7 @@ edescalloc:
clear_bit(param->slave_id, sh_dmae_slave_used);
etestused:
efindslave:
- pm_runtime_put(sh_chan->dev);
+ chan->private = NULL;
return ret;
}
@@ -417,7 +444,6 @@ static void sh_dmae_free_chan_resources(struct dma_chan *chan)
struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
struct sh_desc *desc, *_desc;
LIST_HEAD(list);
- int descs = sh_chan->descs_allocated;
/* Protect against ISR */
spin_lock_irq(&sh_chan->desc_lock);
@@ -437,15 +463,12 @@ static void sh_dmae_free_chan_resources(struct dma_chan *chan)
chan->private = NULL;
}
- spin_lock_bh(&sh_chan->desc_lock);
+ spin_lock_irq(&sh_chan->desc_lock);
list_splice_init(&sh_chan->ld_free, &list);
sh_chan->descs_allocated = 0;
- spin_unlock_bh(&sh_chan->desc_lock);
-
- if (descs > 0)
- pm_runtime_put(sh_chan->dev);
+ spin_unlock_irq(&sh_chan->desc_lock);
list_for_each_entry_safe(desc, _desc, &list, node)
kfree(desc);
@@ -534,6 +557,7 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_c
struct sh_desc *first = NULL, *new = NULL /* compiler... */;
LIST_HEAD(tx_list);
int chunks = 0;
+ unsigned long irq_flags;
int i;
if (!sg_len)
@@ -544,7 +568,7 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_c
(SH_DMA_TCR_MAX + 1);
/* Have to lock the whole loop to protect against concurrent release */
- spin_lock_bh(&sh_chan->desc_lock);
+ spin_lock_irqsave(&sh_chan->desc_lock, irq_flags);
/*
* Chaining:
@@ -590,7 +614,7 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_c
/* Put them back on the free list, so, they don't get lost */
list_splice_tail(&tx_list, &sh_chan->ld_free);
- spin_unlock_bh(&sh_chan->desc_lock);
+ spin_unlock_irqrestore(&sh_chan->desc_lock, irq_flags);
return &first->async_tx;
@@ -599,7 +623,7 @@ err_get_desc:
new->mark = DESC_IDLE;
list_splice(&tx_list, &sh_chan->ld_free);
- spin_unlock_bh(&sh_chan->desc_lock);
+ spin_unlock_irqrestore(&sh_chan->desc_lock, irq_flags);
return NULL;
}
@@ -661,6 +685,7 @@ static int sh_dmae_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
unsigned long arg)
{
struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
+ unsigned long flags;
/* Only supports DMA_TERMINATE_ALL */
if (cmd != DMA_TERMINATE_ALL)
@@ -669,7 +694,7 @@ static int sh_dmae_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
if (!chan)
return -EINVAL;
- spin_lock_bh(&sh_chan->desc_lock);
+ spin_lock_irqsave(&sh_chan->desc_lock, flags);
dmae_halt(sh_chan);
if (!list_empty(&sh_chan->ld_queue)) {
@@ -678,9 +703,8 @@ static int sh_dmae_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
struct sh_desc, node);
desc->partial = (desc->hw.tcr - sh_dmae_readl(sh_chan, TCR)) <<
sh_chan->xmit_shift;
-
}
- spin_unlock_bh(&sh_chan->desc_lock);
+ spin_unlock_irqrestore(&sh_chan->desc_lock, flags);
sh_dmae_chan_ld_cleanup(sh_chan, true);
@@ -695,8 +719,9 @@ static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all
dma_cookie_t cookie = 0;
dma_async_tx_callback callback = NULL;
void *param = NULL;
+ unsigned long flags;
- spin_lock_bh(&sh_chan->desc_lock);
+ spin_lock_irqsave(&sh_chan->desc_lock, flags);
list_for_each_entry_safe(desc, _desc, &sh_chan->ld_queue, node) {
struct dma_async_tx_descriptor *tx = &desc->async_tx;
@@ -762,7 +787,13 @@ static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all
async_tx_test_ack(&desc->async_tx)) || all) {
/* Remove from ld_queue list */
desc->mark = DESC_IDLE;
+
list_move(&desc->node, &sh_chan->ld_free);
+
+ if (list_empty(&sh_chan->ld_queue)) {
+ dev_dbg(sh_chan->dev, "Bring down channel %d\n", sh_chan->id);
+ pm_runtime_put(sh_chan->dev);
+ }
}
}
@@ -773,7 +804,7 @@ static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all
*/
sh_chan->completed_cookie = sh_chan->common.cookie;
- spin_unlock_bh(&sh_chan->desc_lock);
+ spin_unlock_irqrestore(&sh_chan->desc_lock, flags);
if (callback)
callback(param);
@@ -792,14 +823,14 @@ static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
;
}
+/* Called under spin_lock_irq(&sh_chan->desc_lock) */
static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
{
struct sh_desc *desc;
- spin_lock_bh(&sh_chan->desc_lock);
/* DMA work check */
if (dmae_is_busy(sh_chan))
- goto sh_chan_xfer_ld_queue_end;
+ return;
/* Find the first not transferred descriptor */
list_for_each_entry(desc, &sh_chan->ld_queue, node)
@@ -812,15 +843,18 @@ static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
dmae_start(sh_chan);
break;
}
-
-sh_chan_xfer_ld_queue_end:
- spin_unlock_bh(&sh_chan->desc_lock);
}
static void sh_dmae_memcpy_issue_pending(struct dma_chan *chan)
{
struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
- sh_chan_xfer_ld_queue(sh_chan);
+
+ spin_lock_irq(&sh_chan->desc_lock);
+ if (sh_chan->pm_state == DMAE_PM_ESTABLISHED)
+ sh_chan_xfer_ld_queue(sh_chan);
+ else
+ sh_chan->pm_state = DMAE_PM_PENDING;
+ spin_unlock_irq(&sh_chan->desc_lock);
}
static enum dma_status sh_dmae_tx_status(struct dma_chan *chan,
@@ -831,6 +865,7 @@ static enum dma_status sh_dmae_tx_status(struct dma_chan *chan,
dma_cookie_t last_used;
dma_cookie_t last_complete;
enum dma_status status;
+ unsigned long flags;
sh_dmae_chan_ld_cleanup(sh_chan, false);
@@ -841,7 +876,7 @@ static enum dma_status sh_dmae_tx_status(struct dma_chan *chan,
BUG_ON(last_complete < 0);
dma_set_tx_state(txstate, last_complete, last_used, 0);
- spin_lock_bh(&sh_chan->desc_lock);
+ spin_lock_irqsave(&sh_chan->desc_lock, flags);
status = dma_async_is_complete(cookie, last_complete, last_used);
@@ -859,7 +894,7 @@ static enum dma_status sh_dmae_tx_status(struct dma_chan *chan,
}
}
- spin_unlock_bh(&sh_chan->desc_lock);
+ spin_unlock_irqrestore(&sh_chan->desc_lock, flags);
return status;
}
@@ -912,6 +947,12 @@ static bool sh_dmae_reset(struct sh_dmae_device *shdev)
list_splice_init(&sh_chan->ld_queue, &dl);
+ if (!list_empty(&dl)) {
+ dev_dbg(sh_chan->dev, "Bring down channel %d\n", sh_chan->id);
+ pm_runtime_put(sh_chan->dev);
+ }
+ sh_chan->pm_state = DMAE_PM_ESTABLISHED;
+
spin_unlock(&sh_chan->desc_lock);
/* Complete all */
@@ -952,7 +993,7 @@ static void dmae_do_tasklet(unsigned long data)
u32 sar_buf = sh_dmae_readl(sh_chan, SAR);
u32 dar_buf = sh_dmae_readl(sh_chan, DAR);
- spin_lock(&sh_chan->desc_lock);
+ spin_lock_irq(&sh_chan->desc_lock);
list_for_each_entry(desc, &sh_chan->ld_queue, node) {
if (desc->mark == DESC_SUBMITTED &&
((desc->direction == DMA_FROM_DEVICE &&
@@ -965,10 +1006,10 @@ static void dmae_do_tasklet(unsigned long data)
break;
}
}
- spin_unlock(&sh_chan->desc_lock);
-
/* Next desc */
sh_chan_xfer_ld_queue(sh_chan);
+ spin_unlock_irq(&sh_chan->desc_lock);
+
sh_dmae_chan_ld_cleanup(sh_chan, false);
}
@@ -1036,7 +1077,9 @@ static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id,
return -ENOMEM;
}
- /* copy struct dma_device */
+ new_sh_chan->pm_state = DMAE_PM_ESTABLISHED;
+
+ /* reference struct dma_device */
new_sh_chan->common.device = &shdev->common;
new_sh_chan->dev = shdev->common.dev;
diff --git a/drivers/dma/shdma.h b/drivers/dma/shdma.h
index dc56576f9fd..2b55a276dc5 100644
--- a/drivers/dma/shdma.h
+++ b/drivers/dma/shdma.h
@@ -23,6 +23,12 @@
struct device;
+enum dmae_pm_state {
+ DMAE_PM_ESTABLISHED,
+ DMAE_PM_BUSY,
+ DMAE_PM_PENDING,
+};
+
struct sh_dmae_chan {
dma_cookie_t completed_cookie; /* The maximum cookie completed */
spinlock_t desc_lock; /* Descriptor operation lock */
@@ -38,6 +44,7 @@ struct sh_dmae_chan {
u32 __iomem *base;
char dev_id[16]; /* unique name per DMAC of channel */
int pm_error;
+ enum dmae_pm_state pm_state;
};
struct sh_dmae_device {
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 467e4dcb20a..13259cad0ce 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -9,6 +9,7 @@
#include <linux/dma-mapping.h>
#include <linux/kernel.h>
#include <linux/slab.h>
+#include <linux/export.h>
#include <linux/dmaengine.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c
index f69f90a6187..a4a398f2ef6 100644
--- a/drivers/dma/timb_dma.c
+++ b/drivers/dma/timb_dma.c
@@ -753,7 +753,7 @@ static int __devinit td_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&td->dma.channels);
- for (i = 0; i < pdata->nr_channels; i++, td->dma.chancnt++) {
+ for (i = 0; i < pdata->nr_channels; i++) {
struct timb_dma_chan *td_chan = &td->channels[i];
struct timb_dma_platform_data_channel *pchan =
pdata->channels + i;
@@ -762,12 +762,11 @@ static int __devinit td_probe(struct platform_device *pdev)
if ((i % 2) == pchan->rx) {
dev_err(&pdev->dev, "Wrong channel configuration\n");
err = -EINVAL;
- goto err_tasklet_kill;
+ goto err_free_irq;
}
td_chan->chan.device = &td->dma;
td_chan->chan.cookie = 1;
- td_chan->chan.chan_id = i;
spin_lock_init(&td_chan->lock);
INIT_LIST_HEAD(&td_chan->active_list);
INIT_LIST_HEAD(&td_chan->queue);
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index af1a17d42bd..5948a2194f5 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -41,7 +41,7 @@ config EDAC_DEBUG
config EDAC_DECODE_MCE
tristate "Decode MCEs in human-readable form (only on AMD for now)"
- depends on CPU_SUP_AMD && X86_MCE
+ depends on CPU_SUP_AMD && X86_MCE_AMD
default y
---help---
Enable this option if you want to decode Machine Check Exceptions
@@ -71,9 +71,6 @@ config EDAC_MM_EDAC
occurred so that a particular failing memory module can be
replaced. If unsure, select 'Y'.
-config EDAC_MCE
- bool
-
config EDAC_AMD64
tristate "AMD64 (Opteron, Athlon64) K8, F10h"
depends on EDAC_MM_EDAC && AMD_NB && X86_64 && EDAC_DECODE_MCE
@@ -173,8 +170,7 @@ config EDAC_I5400
config EDAC_I7CORE
tristate "Intel i7 Core (Nehalem) processors"
- depends on EDAC_MM_EDAC && PCI && X86
- select EDAC_MCE
+ depends on EDAC_MM_EDAC && PCI && X86 && X86_MCE_INTEL
help
Support for error detection and correction the Intel
i7 Core (Nehalem) Integrated Memory Controller that exists on
@@ -216,6 +212,14 @@ config EDAC_I7300
Support for error detection and correction the Intel
Clarksboro MCH (Intel 7300 chipset).
+config EDAC_SBRIDGE
+ tristate "Intel Sandy-Bridge Integrated MC"
+ depends on EDAC_MM_EDAC && PCI && X86_64 && X86_MCE_INTEL
+ depends on EXPERIMENTAL
+ help
+ Support for error detection and correction the Intel
+ Sandy Bridge Integrated Memory Controller.
+
config EDAC_MPC85XX
tristate "Freescale MPC83xx / MPC85xx"
depends on EDAC_MM_EDAC && FSL_SOC && (PPC_83xx || PPC_85xx)
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index 3e239133e29..196a63dd37c 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -8,7 +8,6 @@
obj-$(CONFIG_EDAC) := edac_stub.o
obj-$(CONFIG_EDAC_MM_EDAC) += edac_core.o
-obj-$(CONFIG_EDAC_MCE) += edac_mce.o
edac_core-y := edac_mc.o edac_device.o edac_mc_sysfs.o edac_pci_sysfs.o
edac_core-y += edac_module.o edac_device_sysfs.o
@@ -29,6 +28,7 @@ obj-$(CONFIG_EDAC_I5100) += i5100_edac.o
obj-$(CONFIG_EDAC_I5400) += i5400_edac.o
obj-$(CONFIG_EDAC_I7300) += i7300_edac.o
obj-$(CONFIG_EDAC_I7CORE) += i7core_edac.o
+obj-$(CONFIG_EDAC_SBRIDGE) += sb_edac.o
obj-$(CONFIG_EDAC_E7XXX) += e7xxx_edac.o
obj-$(CONFIG_EDAC_E752X) += e752x_edac.o
obj-$(CONFIG_EDAC_I82443BXGX) += i82443bxgx_edac.o
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 9a8bebcf6b1..c9eee6d33e9 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -114,10 +114,22 @@ static int f10_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func);
}
+/*
+ * Select DCT to which PCI cfg accesses are routed
+ */
+static void f15h_select_dct(struct amd64_pvt *pvt, u8 dct)
+{
+ u32 reg = 0;
+
+ amd64_read_pci_cfg(pvt->F1, DCT_CFG_SEL, &reg);
+ reg &= 0xfffffffe;
+ reg |= dct;
+ amd64_write_pci_cfg(pvt->F1, DCT_CFG_SEL, reg);
+}
+
static int f15_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
const char *func)
{
- u32 reg = 0;
u8 dct = 0;
if (addr >= 0x140 && addr <= 0x1a0) {
@@ -125,10 +137,7 @@ static int f15_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
addr -= 0x100;
}
- amd64_read_pci_cfg(pvt->F1, DCT_CFG_SEL, &reg);
- reg &= 0xfffffffe;
- reg |= dct;
- amd64_write_pci_cfg(pvt->F1, DCT_CFG_SEL, reg);
+ f15h_select_dct(pvt, dct);
return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func);
}
@@ -198,6 +207,10 @@ static int amd64_set_scrub_rate(struct mem_ctl_info *mci, u32 bw)
if (boot_cpu_data.x86 == 0xf)
min_scrubrate = 0x0;
+ /* F15h Erratum #505 */
+ if (boot_cpu_data.x86 == 0x15)
+ f15h_select_dct(pvt, 0);
+
return __amd64_set_scrub_rate(pvt->F3, bw, min_scrubrate);
}
@@ -207,6 +220,10 @@ static int amd64_get_scrub_rate(struct mem_ctl_info *mci)
u32 scrubval = 0;
int i, retval = -EINVAL;
+ /* F15h Erratum #505 */
+ if (boot_cpu_data.x86 == 0x15)
+ f15h_select_dct(pvt, 0);
+
amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
scrubval = scrubval & 0x001F;
@@ -751,10 +768,10 @@ static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16);
* Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs
* are ECC capable.
*/
-static enum edac_type amd64_determine_edac_cap(struct amd64_pvt *pvt)
+static unsigned long amd64_determine_edac_cap(struct amd64_pvt *pvt)
{
u8 bit;
- enum dev_type edac_cap = EDAC_FLAG_NONE;
+ unsigned long edac_cap = EDAC_FLAG_NONE;
bit = (boot_cpu_data.x86 > 0xf || pvt->ext_model >= K8_REV_F)
? 19
@@ -1953,11 +1970,9 @@ static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci,
amd64_handle_ue(mci, m);
}
-void amd64_decode_bus_error(int node_id, struct mce *m, u32 nbcfg)
+void amd64_decode_bus_error(int node_id, struct mce *m)
{
- struct mem_ctl_info *mci = mcis[node_id];
-
- __amd64_decode_bus_error(mci, m);
+ __amd64_decode_bus_error(mcis[node_id], m);
}
/*
diff --git a/drivers/edac/cpc925_edac.c b/drivers/edac/cpc925_edac.c
index a687a0d1696..a774c0ddaf5 100644
--- a/drivers/edac/cpc925_edac.c
+++ b/drivers/edac/cpc925_edac.c
@@ -90,6 +90,7 @@ enum apimask_bits {
ECC_MASK_ENABLE = (APIMASK_ECC_UE_H | APIMASK_ECC_CE_H |
APIMASK_ECC_UE_L | APIMASK_ECC_CE_L),
};
+#define APIMASK_ADI(n) CPC925_BIT(((n)+1))
/************************************************************
* Processor Interface Exception Register (APIEXCP)
@@ -581,16 +582,73 @@ static void cpc925_mc_check(struct mem_ctl_info *mci)
}
/******************** CPU err device********************************/
+static u32 cpc925_cpu_mask_disabled(void)
+{
+ struct device_node *cpus;
+ struct device_node *cpunode = NULL;
+ static u32 mask = 0;
+
+ /* use cached value if available */
+ if (mask != 0)
+ return mask;
+
+ mask = APIMASK_ADI0 | APIMASK_ADI1;
+
+ cpus = of_find_node_by_path("/cpus");
+ if (cpus == NULL) {
+ cpc925_printk(KERN_DEBUG, "No /cpus node !\n");
+ return 0;
+ }
+
+ while ((cpunode = of_get_next_child(cpus, cpunode)) != NULL) {
+ const u32 *reg = of_get_property(cpunode, "reg", NULL);
+
+ if (strcmp(cpunode->type, "cpu")) {
+ cpc925_printk(KERN_ERR, "Not a cpu node in /cpus: %s\n", cpunode->name);
+ continue;
+ }
+
+ if (reg == NULL || *reg > 2) {
+ cpc925_printk(KERN_ERR, "Bad reg value at %s\n", cpunode->full_name);
+ continue;
+ }
+
+ mask &= ~APIMASK_ADI(*reg);
+ }
+
+ if (mask != (APIMASK_ADI0 | APIMASK_ADI1)) {
+ /* We assume that each CPU sits on it's own PI and that
+ * for present CPUs the reg property equals to the PI
+ * interface id */
+ cpc925_printk(KERN_WARNING,
+ "Assuming PI id is equal to CPU MPIC id!\n");
+ }
+
+ of_node_put(cpunode);
+ of_node_put(cpus);
+
+ return mask;
+}
+
/* Enable CPU Errors detection */
static void cpc925_cpu_init(struct cpc925_dev_info *dev_info)
{
u32 apimask;
+ u32 cpumask;
apimask = __raw_readl(dev_info->vbase + REG_APIMASK_OFFSET);
- if ((apimask & CPU_MASK_ENABLE) == 0) {
- apimask |= CPU_MASK_ENABLE;
- __raw_writel(apimask, dev_info->vbase + REG_APIMASK_OFFSET);
+
+ cpumask = cpc925_cpu_mask_disabled();
+ if (apimask & cpumask) {
+ cpc925_printk(KERN_WARNING, "CPU(s) not present, "
+ "but enabled in APIMASK, disabling\n");
+ apimask &= ~cpumask;
}
+
+ if ((apimask & CPU_MASK_ENABLE) == 0)
+ apimask |= CPU_MASK_ENABLE;
+
+ __raw_writel(apimask, dev_info->vbase + REG_APIMASK_OFFSET);
}
/* Disable CPU Errors detection */
@@ -622,6 +680,9 @@ static void cpc925_cpu_check(struct edac_device_ctl_info *edac_dev)
if ((apiexcp & CPU_EXCP_DETECTED) == 0)
return;
+ if ((apiexcp & ~cpc925_cpu_mask_disabled()) == 0)
+ return;
+
apimask = __raw_readl(dev_info->vbase + REG_APIMASK_OFFSET);
cpc925_printk(KERN_INFO, "Processor Interface Fault\n"
"Processor Interface register dump:\n");
diff --git a/drivers/edac/edac_core.h b/drivers/edac/edac_core.h
index 55b8278bb17..fe90cd4a7eb 100644
--- a/drivers/edac/edac_core.h
+++ b/drivers/edac/edac_core.h
@@ -34,11 +34,10 @@
#include <linux/platform_device.h>
#include <linux/sysdev.h>
#include <linux/workqueue.h>
+#include <linux/edac.h>
-#define EDAC_MC_LABEL_LEN 31
#define EDAC_DEVICE_NAME_LEN 31
#define EDAC_ATTRIB_VALUE_LEN 15
-#define MC_PROC_NAME_MAX_LEN 7
#if PAGE_SHIFT < 20
#define PAGES_TO_MiB(pages) ((pages) >> (20 - PAGE_SHIFT))
@@ -101,353 +100,6 @@ extern int edac_debug_level;
#define edac_dev_name(dev) (dev)->dev_name
-/* memory devices */
-enum dev_type {
- DEV_UNKNOWN = 0,
- DEV_X1,
- DEV_X2,
- DEV_X4,
- DEV_X8,
- DEV_X16,
- DEV_X32, /* Do these parts exist? */
- DEV_X64 /* Do these parts exist? */
-};
-
-#define DEV_FLAG_UNKNOWN BIT(DEV_UNKNOWN)
-#define DEV_FLAG_X1 BIT(DEV_X1)
-#define DEV_FLAG_X2 BIT(DEV_X2)
-#define DEV_FLAG_X4 BIT(DEV_X4)
-#define DEV_FLAG_X8 BIT(DEV_X8)
-#define DEV_FLAG_X16 BIT(DEV_X16)
-#define DEV_FLAG_X32 BIT(DEV_X32)
-#define DEV_FLAG_X64 BIT(DEV_X64)
-
-/* memory types */
-enum mem_type {
- MEM_EMPTY = 0, /* Empty csrow */
- MEM_RESERVED, /* Reserved csrow type */
- MEM_UNKNOWN, /* Unknown csrow type */
- MEM_FPM, /* Fast page mode */
- MEM_EDO, /* Extended data out */
- MEM_BEDO, /* Burst Extended data out */
- MEM_SDR, /* Single data rate SDRAM */
- MEM_RDR, /* Registered single data rate SDRAM */
- MEM_DDR, /* Double data rate SDRAM */
- MEM_RDDR, /* Registered Double data rate SDRAM */
- MEM_RMBS, /* Rambus DRAM */
- MEM_DDR2, /* DDR2 RAM */
- MEM_FB_DDR2, /* fully buffered DDR2 */
- MEM_RDDR2, /* Registered DDR2 RAM */
- MEM_XDR, /* Rambus XDR */
- MEM_DDR3, /* DDR3 RAM */
- MEM_RDDR3, /* Registered DDR3 RAM */
-};
-
-#define MEM_FLAG_EMPTY BIT(MEM_EMPTY)
-#define MEM_FLAG_RESERVED BIT(MEM_RESERVED)
-#define MEM_FLAG_UNKNOWN BIT(MEM_UNKNOWN)
-#define MEM_FLAG_FPM BIT(MEM_FPM)
-#define MEM_FLAG_EDO BIT(MEM_EDO)
-#define MEM_FLAG_BEDO BIT(MEM_BEDO)
-#define MEM_FLAG_SDR BIT(MEM_SDR)
-#define MEM_FLAG_RDR BIT(MEM_RDR)
-#define MEM_FLAG_DDR BIT(MEM_DDR)
-#define MEM_FLAG_RDDR BIT(MEM_RDDR)
-#define MEM_FLAG_RMBS BIT(MEM_RMBS)
-#define MEM_FLAG_DDR2 BIT(MEM_DDR2)
-#define MEM_FLAG_FB_DDR2 BIT(MEM_FB_DDR2)
-#define MEM_FLAG_RDDR2 BIT(MEM_RDDR2)
-#define MEM_FLAG_XDR BIT(MEM_XDR)
-#define MEM_FLAG_DDR3 BIT(MEM_DDR3)
-#define MEM_FLAG_RDDR3 BIT(MEM_RDDR3)
-
-/* chipset Error Detection and Correction capabilities and mode */
-enum edac_type {
- EDAC_UNKNOWN = 0, /* Unknown if ECC is available */
- EDAC_NONE, /* Doesn't support ECC */
- EDAC_RESERVED, /* Reserved ECC type */
- EDAC_PARITY, /* Detects parity errors */
- EDAC_EC, /* Error Checking - no correction */
- EDAC_SECDED, /* Single bit error correction, Double detection */
- EDAC_S2ECD2ED, /* Chipkill x2 devices - do these exist? */
- EDAC_S4ECD4ED, /* Chipkill x4 devices */
- EDAC_S8ECD8ED, /* Chipkill x8 devices */
- EDAC_S16ECD16ED, /* Chipkill x16 devices */
-};
-
-#define EDAC_FLAG_UNKNOWN BIT(EDAC_UNKNOWN)
-#define EDAC_FLAG_NONE BIT(EDAC_NONE)
-#define EDAC_FLAG_PARITY BIT(EDAC_PARITY)
-#define EDAC_FLAG_EC BIT(EDAC_EC)
-#define EDAC_FLAG_SECDED BIT(EDAC_SECDED)
-#define EDAC_FLAG_S2ECD2ED BIT(EDAC_S2ECD2ED)
-#define EDAC_FLAG_S4ECD4ED BIT(EDAC_S4ECD4ED)
-#define EDAC_FLAG_S8ECD8ED BIT(EDAC_S8ECD8ED)
-#define EDAC_FLAG_S16ECD16ED BIT(EDAC_S16ECD16ED)
-
-/* scrubbing capabilities */
-enum scrub_type {
- SCRUB_UNKNOWN = 0, /* Unknown if scrubber is available */
- SCRUB_NONE, /* No scrubber */
- SCRUB_SW_PROG, /* SW progressive (sequential) scrubbing */
- SCRUB_SW_SRC, /* Software scrub only errors */
- SCRUB_SW_PROG_SRC, /* Progressive software scrub from an error */
- SCRUB_SW_TUNABLE, /* Software scrub frequency is tunable */
- SCRUB_HW_PROG, /* HW progressive (sequential) scrubbing */
- SCRUB_HW_SRC, /* Hardware scrub only errors */
- SCRUB_HW_PROG_SRC, /* Progressive hardware scrub from an error */
- SCRUB_HW_TUNABLE /* Hardware scrub frequency is tunable */
-};
-
-#define SCRUB_FLAG_SW_PROG BIT(SCRUB_SW_PROG)
-#define SCRUB_FLAG_SW_SRC BIT(SCRUB_SW_SRC)
-#define SCRUB_FLAG_SW_PROG_SRC BIT(SCRUB_SW_PROG_SRC)
-#define SCRUB_FLAG_SW_TUN BIT(SCRUB_SW_SCRUB_TUNABLE)
-#define SCRUB_FLAG_HW_PROG BIT(SCRUB_HW_PROG)
-#define SCRUB_FLAG_HW_SRC BIT(SCRUB_HW_SRC)
-#define SCRUB_FLAG_HW_PROG_SRC BIT(SCRUB_HW_PROG_SRC)
-#define SCRUB_FLAG_HW_TUN BIT(SCRUB_HW_TUNABLE)
-
-/* FIXME - should have notify capabilities: NMI, LOG, PROC, etc */
-
-/* EDAC internal operation states */
-#define OP_ALLOC 0x100
-#define OP_RUNNING_POLL 0x201
-#define OP_RUNNING_INTERRUPT 0x202
-#define OP_RUNNING_POLL_INTR 0x203
-#define OP_OFFLINE 0x300
-
-/*
- * There are several things to be aware of that aren't at all obvious:
- *
- *
- * SOCKETS, SOCKET SETS, BANKS, ROWS, CHIP-SELECT ROWS, CHANNELS, etc..
- *
- * These are some of the many terms that are thrown about that don't always
- * mean what people think they mean (Inconceivable!). In the interest of
- * creating a common ground for discussion, terms and their definitions
- * will be established.
- *
- * Memory devices: The individual chip on a memory stick. These devices
- * commonly output 4 and 8 bits each. Grouping several
- * of these in parallel provides 64 bits which is common
- * for a memory stick.
- *
- * Memory Stick: A printed circuit board that aggregates multiple
- * memory devices in parallel. This is the atomic
- * memory component that is purchaseable by Joe consumer
- * and loaded into a memory socket.
- *
- * Socket: A physical connector on the motherboard that accepts
- * a single memory stick.
- *
- * Channel: Set of memory devices on a memory stick that must be
- * grouped in parallel with one or more additional
- * channels from other memory sticks. This parallel
- * grouping of the output from multiple channels are
- * necessary for the smallest granularity of memory access.
- * Some memory controllers are capable of single channel -
- * which means that memory sticks can be loaded
- * individually. Other memory controllers are only
- * capable of dual channel - which means that memory
- * sticks must be loaded as pairs (see "socket set").
- *
- * Chip-select row: All of the memory devices that are selected together.
- * for a single, minimum grain of memory access.
- * This selects all of the parallel memory devices across
- * all of the parallel channels. Common chip-select rows
- * for single channel are 64 bits, for dual channel 128
- * bits.
- *
- * Single-Ranked stick: A Single-ranked stick has 1 chip-select row of memory.
- * Motherboards commonly drive two chip-select pins to
- * a memory stick. A single-ranked stick, will occupy
- * only one of those rows. The other will be unused.
- *
- * Double-Ranked stick: A double-ranked stick has two chip-select rows which
- * access different sets of memory devices. The two
- * rows cannot be accessed concurrently.
- *
- * Double-sided stick: DEPRECATED TERM, see Double-Ranked stick.
- * A double-sided stick has two chip-select rows which
- * access different sets of memory devices. The two
- * rows cannot be accessed concurrently. "Double-sided"
- * is irrespective of the memory devices being mounted
- * on both sides of the memory stick.
- *
- * Socket set: All of the memory sticks that are required for
- * a single memory access or all of the memory sticks
- * spanned by a chip-select row. A single socket set
- * has two chip-select rows and if double-sided sticks
- * are used these will occupy those chip-select rows.
- *
- * Bank: This term is avoided because it is unclear when
- * needing to distinguish between chip-select rows and
- * socket sets.
- *
- * Controller pages:
- *
- * Physical pages:
- *
- * Virtual pages:
- *
- *
- * STRUCTURE ORGANIZATION AND CHOICES
- *
- *
- *
- * PS - I enjoyed writing all that about as much as you enjoyed reading it.
- */
-
-struct channel_info {
- int chan_idx; /* channel index */
- u32 ce_count; /* Correctable Errors for this CHANNEL */
- char label[EDAC_MC_LABEL_LEN + 1]; /* DIMM label on motherboard */
- struct csrow_info *csrow; /* the parent */
-};
-
-struct csrow_info {
- unsigned long first_page; /* first page number in dimm */
- unsigned long last_page; /* last page number in dimm */
- unsigned long page_mask; /* used for interleaving -
- * 0UL for non intlv
- */
- u32 nr_pages; /* number of pages in csrow */
- u32 grain; /* granularity of reported error in bytes */
- int csrow_idx; /* the chip-select row */
- enum dev_type dtype; /* memory device type */
- u32 ue_count; /* Uncorrectable Errors for this csrow */
- u32 ce_count; /* Correctable Errors for this csrow */
- enum mem_type mtype; /* memory csrow type */
- enum edac_type edac_mode; /* EDAC mode for this csrow */
- struct mem_ctl_info *mci; /* the parent */
-
- struct kobject kobj; /* sysfs kobject for this csrow */
-
- /* channel information for this csrow */
- u32 nr_channels;
- struct channel_info *channels;
-};
-
-struct mcidev_sysfs_group {
- const char *name; /* group name */
- const struct mcidev_sysfs_attribute *mcidev_attr; /* group attributes */
-};
-
-struct mcidev_sysfs_group_kobj {
- struct list_head list; /* list for all instances within a mc */
-
- struct kobject kobj; /* kobj for the group */
-
- const struct mcidev_sysfs_group *grp; /* group description table */
- struct mem_ctl_info *mci; /* the parent */
-};
-
-/* mcidev_sysfs_attribute structure
- * used for driver sysfs attributes and in mem_ctl_info
- * sysfs top level entries
- */
-struct mcidev_sysfs_attribute {
- /* It should use either attr or grp */
- struct attribute attr;
- const struct mcidev_sysfs_group *grp; /* Points to a group of attributes */
-
- /* Ops for show/store values at the attribute - not used on group */
- ssize_t (*show)(struct mem_ctl_info *,char *);
- ssize_t (*store)(struct mem_ctl_info *, const char *,size_t);
-};
-
-/* MEMORY controller information structure
- */
-struct mem_ctl_info {
- struct list_head link; /* for global list of mem_ctl_info structs */
-
- struct module *owner; /* Module owner of this control struct */
-
- unsigned long mtype_cap; /* memory types supported by mc */
- unsigned long edac_ctl_cap; /* Mem controller EDAC capabilities */
- unsigned long edac_cap; /* configuration capabilities - this is
- * closely related to edac_ctl_cap. The
- * difference is that the controller may be
- * capable of s4ecd4ed which would be listed
- * in edac_ctl_cap, but if channels aren't
- * capable of s4ecd4ed then the edac_cap would
- * not have that capability.
- */
- unsigned long scrub_cap; /* chipset scrub capabilities */
- enum scrub_type scrub_mode; /* current scrub mode */
-
- /* Translates sdram memory scrub rate given in bytes/sec to the
- internal representation and configures whatever else needs
- to be configured.
- */
- int (*set_sdram_scrub_rate) (struct mem_ctl_info * mci, u32 bw);
-
- /* Get the current sdram memory scrub rate from the internal
- representation and converts it to the closest matching
- bandwidth in bytes/sec.
- */
- int (*get_sdram_scrub_rate) (struct mem_ctl_info * mci);
-
-
- /* pointer to edac checking routine */
- void (*edac_check) (struct mem_ctl_info * mci);
-
- /*
- * Remaps memory pages: controller pages to physical pages.
- * For most MC's, this will be NULL.
- */
- /* FIXME - why not send the phys page to begin with? */
- unsigned long (*ctl_page_to_phys) (struct mem_ctl_info * mci,
- unsigned long page);
- int mc_idx;
- int nr_csrows;
- struct csrow_info *csrows;
- /*
- * FIXME - what about controllers on other busses? - IDs must be
- * unique. dev pointer should be sufficiently unique, but
- * BUS:SLOT.FUNC numbers may not be unique.
- */
- struct device *dev;
- const char *mod_name;
- const char *mod_ver;
- const char *ctl_name;
- const char *dev_name;
- char proc_name[MC_PROC_NAME_MAX_LEN + 1];
- void *pvt_info;
- u32 ue_noinfo_count; /* Uncorrectable Errors w/o info */
- u32 ce_noinfo_count; /* Correctable Errors w/o info */
- u32 ue_count; /* Total Uncorrectable Errors for this MC */
- u32 ce_count; /* Total Correctable Errors for this MC */
- unsigned long start_time; /* mci load start time (in jiffies) */
-
- struct completion complete;
-
- /* edac sysfs device control */
- struct kobject edac_mci_kobj;
-
- /* list for all grp instances within a mc */
- struct list_head grp_kobj_list;
-
- /* Additional top controller level attributes, but specified
- * by the low level driver.
- *
- * Set by the low level driver to provide attributes at the
- * controller level, same level as 'ue_count' and 'ce_count' above.
- * An array of structures, NULL terminated
- *
- * If attributes are desired, then set to array of attributes
- * If no attributes are desired, leave NULL
- */
- const struct mcidev_sysfs_attribute *mc_driver_sysfs_attributes;
-
- /* work struct for this MC */
- struct delayed_work work;
-
- /* the internal state of this controller instance */
- int op_state;
-};
-
/*
* The following are the structures to provide for a generic
* or abstract 'edac_device'. This set of structures and the
diff --git a/drivers/edac/edac_mce.c b/drivers/edac/edac_mce.c
deleted file mode 100644
index 9ccdc5b140e..00000000000
--- a/drivers/edac/edac_mce.c
+++ /dev/null
@@ -1,61 +0,0 @@
-/* Provides edac interface to mcelog events
- *
- * This file may be distributed under the terms of the
- * GNU General Public License version 2.
- *
- * Copyright (c) 2009 by:
- * Mauro Carvalho Chehab <mchehab@redhat.com>
- *
- * Red Hat Inc. http://www.redhat.com
- */
-
-#include <linux/module.h>
-#include <linux/edac_mce.h>
-#include <asm/mce.h>
-
-int edac_mce_enabled;
-EXPORT_SYMBOL_GPL(edac_mce_enabled);
-
-
-/*
- * Extension interface
- */
-
-static LIST_HEAD(edac_mce_list);
-static DEFINE_MUTEX(edac_mce_lock);
-
-int edac_mce_register(struct edac_mce *edac_mce)
-{
- mutex_lock(&edac_mce_lock);
- list_add_tail(&edac_mce->list, &edac_mce_list);
- mutex_unlock(&edac_mce_lock);
- return 0;
-}
-EXPORT_SYMBOL(edac_mce_register);
-
-void edac_mce_unregister(struct edac_mce *edac_mce)
-{
- mutex_lock(&edac_mce_lock);
- list_del(&edac_mce->list);
- mutex_unlock(&edac_mce_lock);
-}
-EXPORT_SYMBOL(edac_mce_unregister);
-
-int edac_mce_parse(struct mce *mce)
-{
- struct edac_mce *edac_mce;
-
- list_for_each_entry(edac_mce, &edac_mce_list, list) {
- if (edac_mce->check_error(edac_mce->priv, mce))
- return 1;
- }
-
- /* Nobody queued the error */
- return 0;
-}
-EXPORT_SYMBOL_GPL(edac_mce_parse);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
-MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)");
-MODULE_DESCRIPTION("EDAC Driver for mcelog captured errors");
diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c
index a76fe8366b6..6104dba380b 100644
--- a/drivers/edac/i7300_edac.c
+++ b/drivers/edac/i7300_edac.c
@@ -372,7 +372,7 @@ static const char *get_err_from_table(const char *table[], int size, int pos)
static void i7300_process_error_global(struct mem_ctl_info *mci)
{
struct i7300_pvt *pvt;
- u32 errnum, value;
+ u32 errnum, error_reg;
unsigned long errors;
const char *specific;
bool is_fatal;
@@ -381,9 +381,9 @@ static void i7300_process_error_global(struct mem_ctl_info *mci)
/* read in the 1st FATAL error register */
pci_read_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
- FERR_GLOBAL_HI, &value);
- if (unlikely(value)) {
- errors = value;
+ FERR_GLOBAL_HI, &error_reg);
+ if (unlikely(error_reg)) {
+ errors = error_reg;
errnum = find_first_bit(&errors,
ARRAY_SIZE(ferr_global_hi_name));
specific = GET_ERR_FROM_TABLE(ferr_global_hi_name, errnum);
@@ -391,15 +391,15 @@ static void i7300_process_error_global(struct mem_ctl_info *mci)
/* Clear the error bit */
pci_write_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
- FERR_GLOBAL_HI, value);
+ FERR_GLOBAL_HI, error_reg);
goto error_global;
}
pci_read_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
- FERR_GLOBAL_LO, &value);
- if (unlikely(value)) {
- errors = value;
+ FERR_GLOBAL_LO, &error_reg);
+ if (unlikely(error_reg)) {
+ errors = error_reg;
errnum = find_first_bit(&errors,
ARRAY_SIZE(ferr_global_lo_name));
specific = GET_ERR_FROM_TABLE(ferr_global_lo_name, errnum);
@@ -407,7 +407,7 @@ static void i7300_process_error_global(struct mem_ctl_info *mci)
/* Clear the error bit */
pci_write_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
- FERR_GLOBAL_LO, value);
+ FERR_GLOBAL_LO, error_reg);
goto error_global;
}
@@ -427,7 +427,7 @@ error_global:
static void i7300_process_fbd_error(struct mem_ctl_info *mci)
{
struct i7300_pvt *pvt;
- u32 errnum, value;
+ u32 errnum, value, error_reg;
u16 val16;
unsigned branch, channel, bank, rank, cas, ras;
u32 syndrome;
@@ -440,14 +440,14 @@ static void i7300_process_fbd_error(struct mem_ctl_info *mci)
/* read in the 1st FATAL error register */
pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
- FERR_FAT_FBD, &value);
- if (unlikely(value & FERR_FAT_FBD_ERR_MASK)) {
- errors = value & FERR_FAT_FBD_ERR_MASK ;
+ FERR_FAT_FBD, &error_reg);
+ if (unlikely(error_reg & FERR_FAT_FBD_ERR_MASK)) {
+ errors = error_reg & FERR_FAT_FBD_ERR_MASK ;
errnum = find_first_bit(&errors,
ARRAY_SIZE(ferr_fat_fbd_name));
specific = GET_ERR_FROM_TABLE(ferr_fat_fbd_name, errnum);
+ branch = (GET_FBD_FAT_IDX(error_reg) == 2) ? 1 : 0;
- branch = (GET_FBD_FAT_IDX(value) == 2) ? 1 : 0;
pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map,
NRECMEMA, &val16);
bank = NRECMEMA_BANK(val16);
@@ -455,11 +455,14 @@ static void i7300_process_fbd_error(struct mem_ctl_info *mci)
pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
NRECMEMB, &value);
-
is_wr = NRECMEMB_IS_WR(value);
cas = NRECMEMB_CAS(value);
ras = NRECMEMB_RAS(value);
+ /* Clean the error register */
+ pci_write_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
+ FERR_FAT_FBD, error_reg);
+
snprintf(pvt->tmp_prt_buffer, PAGE_SIZE,
"FATAL (Branch=%d DRAM-Bank=%d %s "
"RAS=%d CAS=%d Err=0x%lx (%s))",
@@ -476,21 +479,17 @@ static void i7300_process_fbd_error(struct mem_ctl_info *mci)
/* read in the 1st NON-FATAL error register */
pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
- FERR_NF_FBD, &value);
- if (unlikely(value & FERR_NF_FBD_ERR_MASK)) {
- errors = value & FERR_NF_FBD_ERR_MASK;
+ FERR_NF_FBD, &error_reg);
+ if (unlikely(error_reg & FERR_NF_FBD_ERR_MASK)) {
+ errors = error_reg & FERR_NF_FBD_ERR_MASK;
errnum = find_first_bit(&errors,
ARRAY_SIZE(ferr_nf_fbd_name));
specific = GET_ERR_FROM_TABLE(ferr_nf_fbd_name, errnum);
-
- /* Clear the error bit */
- pci_write_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
- FERR_GLOBAL_LO, value);
+ branch = (GET_FBD_FAT_IDX(error_reg) == 2) ? 1 : 0;
pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
REDMEMA, &syndrome);
- branch = (GET_FBD_FAT_IDX(value) == 2) ? 1 : 0;
pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map,
RECMEMA, &val16);
bank = RECMEMA_BANK(val16);
@@ -498,18 +497,20 @@ static void i7300_process_fbd_error(struct mem_ctl_info *mci)
pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
RECMEMB, &value);
-
is_wr = RECMEMB_IS_WR(value);
cas = RECMEMB_CAS(value);
ras = RECMEMB_RAS(value);
pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
REDMEMB, &value);
-
channel = (branch << 1);
if (IS_SECOND_CH(value))
channel++;
+ /* Clear the error bit */
+ pci_write_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
+ FERR_NF_FBD, error_reg);
+
/* Form out message */
snprintf(pvt->tmp_prt_buffer, PAGE_SIZE,
"Corrected error (Branch=%d, Channel %d), "
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index f6cf448d69b..70ad8923f1d 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -31,11 +31,13 @@
#include <linux/pci_ids.h>
#include <linux/slab.h>
#include <linux/delay.h>
+#include <linux/dmi.h>
#include <linux/edac.h>
#include <linux/mmzone.h>
-#include <linux/edac_mce.h>
#include <linux/smp.h>
+#include <asm/mce.h>
#include <asm/processor.h>
+#include <asm/div64.h>
#include "edac_core.h"
@@ -78,6 +80,8 @@ MODULE_PARM_DESC(use_pci_fixup, "Enable PCI fixup to seek for hidden devices");
/* OFFSETS for Device 0 Function 0 */
#define MC_CFG_CONTROL 0x90
+ #define MC_CFG_UNLOCK 0x02
+ #define MC_CFG_LOCK 0x00
/* OFFSETS for Device 3 Function 0 */
@@ -98,6 +102,15 @@ MODULE_PARM_DESC(use_pci_fixup, "Enable PCI fixup to seek for hidden devices");
#define DIMM0_COR_ERR(r) ((r) & 0x7fff)
/* OFFSETS for Device 3 Function 2, as inicated on Xeon 5500 datasheet */
+#define MC_SSRCONTROL 0x48
+ #define SSR_MODE_DISABLE 0x00
+ #define SSR_MODE_ENABLE 0x01
+ #define SSR_MODE_MASK 0x03
+
+#define MC_SCRUB_CONTROL 0x4c
+ #define STARTSCRUB (1 << 24)
+ #define SCRUBINTERVAL_MASK 0xffffff
+
#define MC_COR_ECC_CNT_0 0x80
#define MC_COR_ECC_CNT_1 0x84
#define MC_COR_ECC_CNT_2 0x88
@@ -253,10 +266,7 @@ struct i7core_pvt {
unsigned long rdimm_ce_count[NUM_CHANS][MAX_DIMMS];
int rdimm_last_ce_count[NUM_CHANS][MAX_DIMMS];
- unsigned int is_registered;
-
- /* mcelog glue */
- struct edac_mce edac_mce;
+ bool is_registered, enable_scrub;
/* Fifo double buffers */
struct mce mce_entry[MCE_LOG_LEN];
@@ -268,6 +278,9 @@ struct i7core_pvt {
/* Count indicator to show errors not got */
unsigned mce_overrun;
+ /* DCLK Frequency used for computing scrub rate */
+ int dclk_freq;
+
/* Struct to control EDAC polling */
struct edac_pci_ctl_info *i7core_pci;
};
@@ -281,8 +294,7 @@ static const struct pci_id_descr pci_dev_descr_i7core_nehalem[] = {
/* Memory controller */
{ PCI_DESCR(3, 0, PCI_DEVICE_ID_INTEL_I7_MCR) },
{ PCI_DESCR(3, 1, PCI_DEVICE_ID_INTEL_I7_MC_TAD) },
-
- /* Exists only for RDIMM */
+ /* Exists only for RDIMM */
{ PCI_DESCR(3, 2, PCI_DEVICE_ID_INTEL_I7_MC_RAS), .optional = 1 },
{ PCI_DESCR(3, 4, PCI_DEVICE_ID_INTEL_I7_MC_TEST) },
@@ -303,6 +315,16 @@ static const struct pci_id_descr pci_dev_descr_i7core_nehalem[] = {
{ PCI_DESCR(6, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH2_ADDR) },
{ PCI_DESCR(6, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH2_RANK) },
{ PCI_DESCR(6, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH2_TC) },
+
+ /* Generic Non-core registers */
+ /*
+ * This is the PCI device on i7core and on Xeon 35xx (8086:2c41)
+ * On Xeon 55xx, however, it has a different id (8086:2c40). So,
+ * the probing code needs to test for the other address in case of
+ * failure of this one
+ */
+ { PCI_DESCR(0, 0, PCI_DEVICE_ID_INTEL_I7_NONCORE) },
+
};
static const struct pci_id_descr pci_dev_descr_lynnfield[] = {
@@ -319,6 +341,12 @@ static const struct pci_id_descr pci_dev_descr_lynnfield[] = {
{ PCI_DESCR( 5, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_ADDR) },
{ PCI_DESCR( 5, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_RANK) },
{ PCI_DESCR( 5, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_TC) },
+
+ /*
+ * This is the PCI device has an alternate address on some
+ * processors like Core i7 860
+ */
+ { PCI_DESCR( 0, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE) },
};
static const struct pci_id_descr pci_dev_descr_i7core_westmere[] = {
@@ -346,6 +374,10 @@ static const struct pci_id_descr pci_dev_descr_i7core_westmere[] = {
{ PCI_DESCR(6, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_ADDR_REV2) },
{ PCI_DESCR(6, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_RANK_REV2) },
{ PCI_DESCR(6, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_TC_REV2) },
+
+ /* Generic Non-core registers */
+ { PCI_DESCR(0, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_REV2) },
+
};
#define PCI_ID_TABLE_ENTRY(A) { .descr=A, .n_devs = ARRAY_SIZE(A) }
@@ -714,6 +746,10 @@ static int get_dimm_config(const struct mem_ctl_info *mci)
csr->edac_mode = mode;
csr->mtype = mtype;
+ snprintf(csr->channels[0].label,
+ sizeof(csr->channels[0].label),
+ "CPU#%uChannel#%u_DIMM#%u",
+ pvt->i7core_dev->socket, i, j);
csrow++;
}
@@ -731,7 +767,7 @@ static int get_dimm_config(const struct mem_ctl_info *mci)
debugf1("\t\t%#x\t%#x\t%#x\n",
(value[j] >> 27) & 0x1,
(value[j] >> 24) & 0x7,
- (value[j] && ((1 << 24) - 1)));
+ (value[j] & ((1 << 24) - 1)));
}
return 0;
@@ -1324,6 +1360,20 @@ static int i7core_get_onedevice(struct pci_dev **prev,
pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
dev_descr->dev_id, *prev);
+ /*
+ * On Xeon 55xx, the Intel Quckpath Arch Generic Non-core regs
+ * is at addr 8086:2c40, instead of 8086:2c41. So, we need
+ * to probe for the alternate address in case of failure
+ */
+ if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_I7_NONCORE && !pdev)
+ pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_I7_NONCORE_ALT, *prev);
+
+ if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE && !pdev)
+ pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_ALT,
+ *prev);
+
if (!pdev) {
if (*prev) {
*prev = pdev;
@@ -1444,8 +1494,10 @@ static int mci_bind_devs(struct mem_ctl_info *mci,
struct i7core_pvt *pvt = mci->pvt_info;
struct pci_dev *pdev;
int i, func, slot;
+ char *family;
- pvt->is_registered = 0;
+ pvt->is_registered = false;
+ pvt->enable_scrub = false;
for (i = 0; i < i7core_dev->n_devs; i++) {
pdev = i7core_dev->pdev[i];
if (!pdev)
@@ -1461,9 +1513,37 @@ static int mci_bind_devs(struct mem_ctl_info *mci,
if (unlikely(func > MAX_CHAN_FUNC))
goto error;
pvt->pci_ch[slot - 4][func] = pdev;
- } else if (!slot && !func)
+ } else if (!slot && !func) {
pvt->pci_noncore = pdev;
- else
+
+ /* Detect the processor family */
+ switch (pdev->device) {
+ case PCI_DEVICE_ID_INTEL_I7_NONCORE:
+ family = "Xeon 35xx/ i7core";
+ pvt->enable_scrub = false;
+ break;
+ case PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_ALT:
+ family = "i7-800/i5-700";
+ pvt->enable_scrub = false;
+ break;
+ case PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE:
+ family = "Xeon 34xx";
+ pvt->enable_scrub = false;
+ break;
+ case PCI_DEVICE_ID_INTEL_I7_NONCORE_ALT:
+ family = "Xeon 55xx";
+ pvt->enable_scrub = true;
+ break;
+ case PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_REV2:
+ family = "Xeon 56xx / i7-900";
+ pvt->enable_scrub = true;
+ break;
+ default:
+ family = "unknown";
+ pvt->enable_scrub = false;
+ }
+ debugf0("Detected a processor type %s\n", family);
+ } else
goto error;
debugf0("Associated fn %d.%d, dev = %p, socket %d\n",
@@ -1472,7 +1552,7 @@ static int mci_bind_devs(struct mem_ctl_info *mci,
if (PCI_SLOT(pdev->devfn) == 3 &&
PCI_FUNC(pdev->devfn) == 2)
- pvt->is_registered = 1;
+ pvt->is_registered = true;
}
return 0;
@@ -1826,33 +1906,43 @@ check_ce_error:
* WARNING: As this routine should be called at NMI time, extra care should
* be taken to avoid deadlocks, and to be as fast as possible.
*/
-static int i7core_mce_check_error(void *priv, struct mce *mce)
+static int i7core_mce_check_error(struct notifier_block *nb, unsigned long val,
+ void *data)
{
- struct mem_ctl_info *mci = priv;
- struct i7core_pvt *pvt = mci->pvt_info;
+ struct mce *mce = (struct mce *)data;
+ struct i7core_dev *i7_dev;
+ struct mem_ctl_info *mci;
+ struct i7core_pvt *pvt;
+
+ i7_dev = get_i7core_dev(mce->socketid);
+ if (!i7_dev)
+ return NOTIFY_BAD;
+
+ mci = i7_dev->mci;
+ pvt = mci->pvt_info;
/*
* Just let mcelog handle it if the error is
* outside the memory controller
*/
if (((mce->status & 0xffff) >> 7) != 1)
- return 0;
+ return NOTIFY_DONE;
/* Bank 8 registers are the only ones that we know how to handle */
if (mce->bank != 8)
- return 0;
+ return NOTIFY_DONE;
#ifdef CONFIG_SMP
/* Only handle if it is the right mc controller */
- if (cpu_data(mce->cpu).phys_proc_id != pvt->i7core_dev->socket)
- return 0;
+ if (mce->socketid != pvt->i7core_dev->socket)
+ return NOTIFY_DONE;
#endif
smp_rmb();
if ((pvt->mce_out + 1) % MCE_LOG_LEN == pvt->mce_in) {
smp_wmb();
pvt->mce_overrun++;
- return 0;
+ return NOTIFY_DONE;
}
/* Copy memory error at the ringbuffer */
@@ -1865,7 +1955,240 @@ static int i7core_mce_check_error(void *priv, struct mce *mce)
i7core_check_error(mci);
/* Advise mcelog that the errors were handled */
- return 1;
+ return NOTIFY_STOP;
+}
+
+static struct notifier_block i7_mce_dec = {
+ .notifier_call = i7core_mce_check_error,
+};
+
+struct memdev_dmi_entry {
+ u8 type;
+ u8 length;
+ u16 handle;
+ u16 phys_mem_array_handle;
+ u16 mem_err_info_handle;
+ u16 total_width;
+ u16 data_width;
+ u16 size;
+ u8 form;
+ u8 device_set;
+ u8 device_locator;
+ u8 bank_locator;
+ u8 memory_type;
+ u16 type_detail;
+ u16 speed;
+ u8 manufacturer;
+ u8 serial_number;
+ u8 asset_tag;
+ u8 part_number;
+ u8 attributes;
+ u32 extended_size;
+ u16 conf_mem_clk_speed;
+} __attribute__((__packed__));
+
+
+/*
+ * Decode the DRAM Clock Frequency, be paranoid, make sure that all
+ * memory devices show the same speed, and if they don't then consider
+ * all speeds to be invalid.
+ */
+static void decode_dclk(const struct dmi_header *dh, void *_dclk_freq)
+{
+ int *dclk_freq = _dclk_freq;
+ u16 dmi_mem_clk_speed;
+
+ if (*dclk_freq == -1)
+ return;
+
+ if (dh->type == DMI_ENTRY_MEM_DEVICE) {
+ struct memdev_dmi_entry *memdev_dmi_entry =
+ (struct memdev_dmi_entry *)dh;
+ unsigned long conf_mem_clk_speed_offset =
+ (unsigned long)&memdev_dmi_entry->conf_mem_clk_speed -
+ (unsigned long)&memdev_dmi_entry->type;
+ unsigned long speed_offset =
+ (unsigned long)&memdev_dmi_entry->speed -
+ (unsigned long)&memdev_dmi_entry->type;
+
+ /* Check that a DIMM is present */
+ if (memdev_dmi_entry->size == 0)
+ return;
+
+ /*
+ * Pick the configured speed if it's available, otherwise
+ * pick the DIMM speed, or we don't have a speed.
+ */
+ if (memdev_dmi_entry->length > conf_mem_clk_speed_offset) {
+ dmi_mem_clk_speed =
+ memdev_dmi_entry->conf_mem_clk_speed;
+ } else if (memdev_dmi_entry->length > speed_offset) {
+ dmi_mem_clk_speed = memdev_dmi_entry->speed;
+ } else {
+ *dclk_freq = -1;
+ return;
+ }
+
+ if (*dclk_freq == 0) {
+ /* First pass, speed was 0 */
+ if (dmi_mem_clk_speed > 0) {
+ /* Set speed if a valid speed is read */
+ *dclk_freq = dmi_mem_clk_speed;
+ } else {
+ /* Otherwise we don't have a valid speed */
+ *dclk_freq = -1;
+ }
+ } else if (*dclk_freq > 0 &&
+ *dclk_freq != dmi_mem_clk_speed) {
+ /*
+ * If we have a speed, check that all DIMMS are the same
+ * speed, otherwise set the speed as invalid.
+ */
+ *dclk_freq = -1;
+ }
+ }
+}
+
+/*
+ * The default DCLK frequency is used as a fallback if we
+ * fail to find anything reliable in the DMI. The value
+ * is taken straight from the datasheet.
+ */
+#define DEFAULT_DCLK_FREQ 800
+
+static int get_dclk_freq(void)
+{
+ int dclk_freq = 0;
+
+ dmi_walk(decode_dclk, (void *)&dclk_freq);
+
+ if (dclk_freq < 1)
+ return DEFAULT_DCLK_FREQ;
+
+ return dclk_freq;
+}
+
+/*
+ * set_sdram_scrub_rate This routine sets byte/sec bandwidth scrub rate
+ * to hardware according to SCRUBINTERVAL formula
+ * found in datasheet.
+ */
+static int set_sdram_scrub_rate(struct mem_ctl_info *mci, u32 new_bw)
+{
+ struct i7core_pvt *pvt = mci->pvt_info;
+ struct pci_dev *pdev;
+ u32 dw_scrub;
+ u32 dw_ssr;
+
+ /* Get data from the MC register, function 2 */
+ pdev = pvt->pci_mcr[2];
+ if (!pdev)
+ return -ENODEV;
+
+ pci_read_config_dword(pdev, MC_SCRUB_CONTROL, &dw_scrub);
+
+ if (new_bw == 0) {
+ /* Prepare to disable petrol scrub */
+ dw_scrub &= ~STARTSCRUB;
+ /* Stop the patrol scrub engine */
+ write_and_test(pdev, MC_SCRUB_CONTROL,
+ dw_scrub & ~SCRUBINTERVAL_MASK);
+
+ /* Get current status of scrub rate and set bit to disable */
+ pci_read_config_dword(pdev, MC_SSRCONTROL, &dw_ssr);
+ dw_ssr &= ~SSR_MODE_MASK;
+ dw_ssr |= SSR_MODE_DISABLE;
+ } else {
+ const int cache_line_size = 64;
+ const u32 freq_dclk_mhz = pvt->dclk_freq;
+ unsigned long long scrub_interval;
+ /*
+ * Translate the desired scrub rate to a register value and
+ * program the corresponding register value.
+ */
+ scrub_interval = (unsigned long long)freq_dclk_mhz *
+ cache_line_size * 1000000;
+ do_div(scrub_interval, new_bw);
+
+ if (!scrub_interval || scrub_interval > SCRUBINTERVAL_MASK)
+ return -EINVAL;
+
+ dw_scrub = SCRUBINTERVAL_MASK & scrub_interval;
+
+ /* Start the patrol scrub engine */
+ pci_write_config_dword(pdev, MC_SCRUB_CONTROL,
+ STARTSCRUB | dw_scrub);
+
+ /* Get current status of scrub rate and set bit to enable */
+ pci_read_config_dword(pdev, MC_SSRCONTROL, &dw_ssr);
+ dw_ssr &= ~SSR_MODE_MASK;
+ dw_ssr |= SSR_MODE_ENABLE;
+ }
+ /* Disable or enable scrubbing */
+ pci_write_config_dword(pdev, MC_SSRCONTROL, dw_ssr);
+
+ return new_bw;
+}
+
+/*
+ * get_sdram_scrub_rate This routine convert current scrub rate value
+ * into byte/sec bandwidth accourding to
+ * SCRUBINTERVAL formula found in datasheet.
+ */
+static int get_sdram_scrub_rate(struct mem_ctl_info *mci)
+{
+ struct i7core_pvt *pvt = mci->pvt_info;
+ struct pci_dev *pdev;
+ const u32 cache_line_size = 64;
+ const u32 freq_dclk_mhz = pvt->dclk_freq;
+ unsigned long long scrub_rate;
+ u32 scrubval;
+
+ /* Get data from the MC register, function 2 */
+ pdev = pvt->pci_mcr[2];
+ if (!pdev)
+ return -ENODEV;
+
+ /* Get current scrub control data */
+ pci_read_config_dword(pdev, MC_SCRUB_CONTROL, &scrubval);
+
+ /* Mask highest 8-bits to 0 */
+ scrubval &= SCRUBINTERVAL_MASK;
+ if (!scrubval)
+ return 0;
+
+ /* Calculate scrub rate value into byte/sec bandwidth */
+ scrub_rate = (unsigned long long)freq_dclk_mhz *
+ 1000000 * cache_line_size;
+ do_div(scrub_rate, scrubval);
+ return (int)scrub_rate;
+}
+
+static void enable_sdram_scrub_setting(struct mem_ctl_info *mci)
+{
+ struct i7core_pvt *pvt = mci->pvt_info;
+ u32 pci_lock;
+
+ /* Unlock writes to pci registers */
+ pci_read_config_dword(pvt->pci_noncore, MC_CFG_CONTROL, &pci_lock);
+ pci_lock &= ~0x3;
+ pci_write_config_dword(pvt->pci_noncore, MC_CFG_CONTROL,
+ pci_lock | MC_CFG_UNLOCK);
+
+ mci->set_sdram_scrub_rate = set_sdram_scrub_rate;
+ mci->get_sdram_scrub_rate = get_sdram_scrub_rate;
+}
+
+static void disable_sdram_scrub_setting(struct mem_ctl_info *mci)
+{
+ struct i7core_pvt *pvt = mci->pvt_info;
+ u32 pci_lock;
+
+ /* Lock writes to pci registers */
+ pci_read_config_dword(pvt->pci_noncore, MC_CFG_CONTROL, &pci_lock);
+ pci_lock &= ~0x3;
+ pci_write_config_dword(pvt->pci_noncore, MC_CFG_CONTROL,
+ pci_lock | MC_CFG_LOCK);
}
static void i7core_pci_ctl_create(struct i7core_pvt *pvt)
@@ -1874,7 +2197,8 @@ static void i7core_pci_ctl_create(struct i7core_pvt *pvt)
&pvt->i7core_dev->pdev[0]->dev,
EDAC_MOD_STR);
if (unlikely(!pvt->i7core_pci))
- pr_warn("Unable to setup PCI error report via EDAC\n");
+ i7core_printk(KERN_WARNING,
+ "Unable to setup PCI error report via EDAC\n");
}
static void i7core_pci_ctl_release(struct i7core_pvt *pvt)
@@ -1906,8 +2230,11 @@ static void i7core_unregister_mci(struct i7core_dev *i7core_dev)
debugf0("MC: " __FILE__ ": %s(): mci = %p, dev = %p\n",
__func__, mci, &i7core_dev->pdev[0]->dev);
- /* Disable MCE NMI handler */
- edac_mce_unregister(&pvt->edac_mce);
+ /* Disable scrubrate setting */
+ if (pvt->enable_scrub)
+ disable_sdram_scrub_setting(mci);
+
+ atomic_notifier_chain_unregister(&x86_mce_decoder_chain, &i7_mce_dec);
/* Disable EDAC polling */
i7core_pci_ctl_release(pvt);
@@ -1979,6 +2306,10 @@ static int i7core_register_mci(struct i7core_dev *i7core_dev)
/* Set the function pointer to an actual operation function */
mci->edac_check = i7core_check_error;
+ /* Enable scrubrate setting */
+ if (pvt->enable_scrub)
+ enable_sdram_scrub_setting(mci);
+
/* add this new MC control structure to EDAC's list of MCs */
if (unlikely(edac_mc_add_mc(mci))) {
debugf0("MC: " __FILE__
@@ -2002,21 +2333,13 @@ static int i7core_register_mci(struct i7core_dev *i7core_dev)
/* allocating generic PCI control info */
i7core_pci_ctl_create(pvt);
- /* Registers on edac_mce in order to receive memory errors */
- pvt->edac_mce.priv = mci;
- pvt->edac_mce.check_error = i7core_mce_check_error;
- rc = edac_mce_register(&pvt->edac_mce);
- if (unlikely(rc < 0)) {
- debugf0("MC: " __FILE__
- ": %s(): failed edac_mce_register()\n", __func__);
- goto fail1;
- }
+ /* DCLK for scrub rate setting */
+ pvt->dclk_freq = get_dclk_freq();
+
+ atomic_notifier_chain_register(&x86_mce_decoder_chain, &i7_mce_dec);
return 0;
-fail1:
- i7core_pci_ctl_release(pvt);
- edac_mc_del_mc(mci->dev);
fail0:
kfree(mci->ctl_name);
edac_mc_free(mci);
@@ -2035,7 +2358,7 @@ fail0:
static int __devinit i7core_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
- int rc;
+ int rc, count = 0;
struct i7core_dev *i7core_dev;
/* get the pci devices we want to reserve for our use */
@@ -2055,12 +2378,28 @@ static int __devinit i7core_probe(struct pci_dev *pdev,
goto fail0;
list_for_each_entry(i7core_dev, &i7core_edac_list, list) {
+ count++;
rc = i7core_register_mci(i7core_dev);
if (unlikely(rc < 0))
goto fail1;
}
- i7core_printk(KERN_INFO, "Driver loaded.\n");
+ /*
+ * Nehalem-EX uses a different memory controller. However, as the
+ * memory controller is not visible on some Nehalem/Nehalem-EP, we
+ * need to indirectly probe via a X58 PCI device. The same devices
+ * are found on (some) Nehalem-EX. So, on those machines, the
+ * probe routine needs to return -ENODEV, as the actual Memory
+ * Controller registers won't be detected.
+ */
+ if (!count) {
+ rc = -ENODEV;
+ goto fail1;
+ }
+
+ i7core_printk(KERN_INFO,
+ "Driver loaded, %d memory controller(s) found.\n",
+ count);
mutex_unlock(&i7core_edac_lock);
return 0;
diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c
index 795cfbc0bf5..d0864d9c38a 100644
--- a/drivers/edac/mce_amd.c
+++ b/drivers/edac/mce_amd.c
@@ -9,7 +9,7 @@ static u8 xec_mask = 0xf;
static u8 nb_err_cpumask = 0xf;
static bool report_gart_errors;
-static void (*nb_bus_decoder)(int node_id, struct mce *m, u32 nbcfg);
+static void (*nb_bus_decoder)(int node_id, struct mce *m);
void amd_report_gart_errors(bool v)
{
@@ -17,13 +17,13 @@ void amd_report_gart_errors(bool v)
}
EXPORT_SYMBOL_GPL(amd_report_gart_errors);
-void amd_register_ecc_decoder(void (*f)(int, struct mce *, u32))
+void amd_register_ecc_decoder(void (*f)(int, struct mce *))
{
nb_bus_decoder = f;
}
EXPORT_SYMBOL_GPL(amd_register_ecc_decoder);
-void amd_unregister_ecc_decoder(void (*f)(int, struct mce *, u32))
+void amd_unregister_ecc_decoder(void (*f)(int, struct mce *))
{
if (nb_bus_decoder) {
WARN_ON(nb_bus_decoder != f);
@@ -592,31 +592,14 @@ static bool nb_noop_mce(u16 ec, u8 xec)
return false;
}
-void amd_decode_nb_mce(int node_id, struct mce *m, u32 nbcfg)
+void amd_decode_nb_mce(struct mce *m)
{
struct cpuinfo_x86 *c = &boot_cpu_data;
- u16 ec = EC(m->status);
- u8 xec = XEC(m->status, 0x1f);
- u32 nbsh = (u32)(m->status >> 32);
- int core = -1;
-
- pr_emerg(HW_ERR "Northbridge Error (node %d", node_id);
-
- /* F10h, revD can disable ErrCpu[3:0] through ErrCpuVal */
- if (c->x86 == 0x10 && c->x86_model > 7) {
- if (nbsh & NBSH_ERR_CPU_VAL)
- core = nbsh & nb_err_cpumask;
- } else {
- u8 assoc_cpus = nbsh & nb_err_cpumask;
-
- if (assoc_cpus > 0)
- core = fls(assoc_cpus) - 1;
- }
+ int node_id = amd_get_nb_id(m->extcpu);
+ u16 ec = EC(m->status);
+ u8 xec = XEC(m->status, 0x1f);
- if (core >= 0)
- pr_cont(", core %d): ", core);
- else
- pr_cont("): ");
+ pr_emerg(HW_ERR "Northbridge Error (node %d): ", node_id);
switch (xec) {
case 0x2:
@@ -648,7 +631,7 @@ void amd_decode_nb_mce(int node_id, struct mce *m, u32 nbcfg)
if (c->x86 == 0xf || c->x86 == 0x10 || c->x86 == 0x15)
if ((xec == 0x8 || xec == 0x0) && nb_bus_decoder)
- nb_bus_decoder(node_id, m, nbcfg);
+ nb_bus_decoder(node_id, m);
return;
@@ -764,13 +747,13 @@ int amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data)
{
struct mce *m = (struct mce *)data;
struct cpuinfo_x86 *c = &boot_cpu_data;
- int node, ecc;
+ int ecc;
if (amd_filter_mce(m))
return NOTIFY_STOP;
- pr_emerg(HW_ERR "MC%d_STATUS[%s|%s|%s|%s|%s",
- m->bank,
+ pr_emerg(HW_ERR "CPU:%d\tMC%d_STATUS[%s|%s|%s|%s|%s",
+ m->extcpu, m->bank,
((m->status & MCI_STATUS_OVER) ? "Over" : "-"),
((m->status & MCI_STATUS_UC) ? "UE" : "CE"),
((m->status & MCI_STATUS_MISCV) ? "MiscV" : "-"),
@@ -789,6 +772,8 @@ int amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data)
pr_cont("]: 0x%016llx\n", m->status);
+ if (m->status & MCI_STATUS_ADDRV)
+ pr_emerg(HW_ERR "\tMC%d_ADDR: 0x%016llx\n", m->bank, m->addr);
switch (m->bank) {
case 0:
@@ -811,8 +796,7 @@ int amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data)
break;
case 4:
- node = amd_get_nb_id(m->extcpu);
- amd_decode_nb_mce(node, m, 0);
+ amd_decode_nb_mce(m);
break;
case 5:
diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
index 795a3206acf..0106747e240 100644
--- a/drivers/edac/mce_amd.h
+++ b/drivers/edac/mce_amd.h
@@ -86,9 +86,9 @@ struct amd_decoder_ops {
};
void amd_report_gart_errors(bool);
-void amd_register_ecc_decoder(void (*f)(int, struct mce *, u32));
-void amd_unregister_ecc_decoder(void (*f)(int, struct mce *, u32));
-void amd_decode_nb_mce(int, struct mce *, u32);
+void amd_register_ecc_decoder(void (*f)(int, struct mce *));
+void amd_unregister_ecc_decoder(void (*f)(int, struct mce *));
+void amd_decode_nb_mce(struct mce *);
int amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data);
#endif /* _EDAC_MCE_AMD_H */
diff --git a/drivers/edac/mce_amd_inj.c b/drivers/edac/mce_amd_inj.c
index a4987e03f59..73c3e26a0bc 100644
--- a/drivers/edac/mce_amd_inj.c
+++ b/drivers/edac/mce_amd_inj.c
@@ -13,6 +13,7 @@
#include <linux/kobject.h>
#include <linux/sysdev.h>
#include <linux/edac.h>
+#include <linux/module.h>
#include <asm/mce.h>
#include "mce_amd.h"
diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c
index 0de7d877089..38400963e24 100644
--- a/drivers/edac/ppc4xx_edac.c
+++ b/drivers/edac/ppc4xx_edac.c
@@ -205,7 +205,7 @@ static struct platform_driver ppc4xx_edac_driver = {
.remove = ppc4xx_edac_remove,
.driver = {
.owner = THIS_MODULE,
- .name = PPC4XX_EDAC_MODULE_NAME
+ .name = PPC4XX_EDAC_MODULE_NAME,
.of_match_table = ppc4xx_edac_match,
},
};
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
new file mode 100644
index 00000000000..7a402bfbee7
--- /dev/null
+++ b/drivers/edac/sb_edac.c
@@ -0,0 +1,1893 @@
+/* Intel Sandy Bridge -EN/-EP/-EX Memory Controller kernel module
+ *
+ * This driver supports the memory controllers found on the Intel
+ * processor family Sandy Bridge.
+ *
+ * This file may be distributed under the terms of the
+ * GNU General Public License version 2 only.
+ *
+ * Copyright (c) 2011 by:
+ * Mauro Carvalho Chehab <mchehab@redhat.com>
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/edac.h>
+#include <linux/mmzone.h>
+#include <linux/smp.h>
+#include <linux/bitmap.h>
+#include <asm/processor.h>
+#include <asm/mce.h>
+
+#include "edac_core.h"
+
+/* Static vars */
+static LIST_HEAD(sbridge_edac_list);
+static DEFINE_MUTEX(sbridge_edac_lock);
+static int probed;
+
+/*
+ * Alter this version for the module when modifications are made
+ */
+#define SBRIDGE_REVISION " Ver: 1.0.0 "
+#define EDAC_MOD_STR "sbridge_edac"
+
+/*
+ * Debug macros
+ */
+#define sbridge_printk(level, fmt, arg...) \
+ edac_printk(level, "sbridge", fmt, ##arg)
+
+#define sbridge_mc_printk(mci, level, fmt, arg...) \
+ edac_mc_chipset_printk(mci, level, "sbridge", fmt, ##arg)
+
+/*
+ * Get a bit field at register value <v>, from bit <lo> to bit <hi>
+ */
+#define GET_BITFIELD(v, lo, hi) \
+ (((v) & ((1ULL << ((hi) - (lo) + 1)) - 1) << (lo)) >> (lo))
+
+/*
+ * sbridge Memory Controller Registers
+ */
+
+/*
+ * FIXME: For now, let's order by device function, as it makes
+ * easier for driver's development proccess. This table should be
+ * moved to pci_id.h when submitted upstream
+ */
+#define PCI_DEVICE_ID_INTEL_SBRIDGE_SAD0 0x3cf4 /* 12.6 */
+#define PCI_DEVICE_ID_INTEL_SBRIDGE_SAD1 0x3cf6 /* 12.7 */
+#define PCI_DEVICE_ID_INTEL_SBRIDGE_BR 0x3cf5 /* 13.6 */
+#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0 0x3ca0 /* 14.0 */
+#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA 0x3ca8 /* 15.0 */
+#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_RAS 0x3c71 /* 15.1 */
+#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0 0x3caa /* 15.2 */
+#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD1 0x3cab /* 15.3 */
+#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD2 0x3cac /* 15.4 */
+#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD3 0x3cad /* 15.5 */
+#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_DDRIO 0x3cb8 /* 17.0 */
+
+ /*
+ * Currently, unused, but will be needed in the future
+ * implementations, as they hold the error counters
+ */
+#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR0 0x3c72 /* 16.2 */
+#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR1 0x3c73 /* 16.3 */
+#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR2 0x3c76 /* 16.6 */
+#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR3 0x3c77 /* 16.7 */
+
+/* Devices 12 Function 6, Offsets 0x80 to 0xcc */
+static const u32 dram_rule[] = {
+ 0x80, 0x88, 0x90, 0x98, 0xa0,
+ 0xa8, 0xb0, 0xb8, 0xc0, 0xc8,
+};
+#define MAX_SAD ARRAY_SIZE(dram_rule)
+
+#define SAD_LIMIT(reg) ((GET_BITFIELD(reg, 6, 25) << 26) | 0x3ffffff)
+#define DRAM_ATTR(reg) GET_BITFIELD(reg, 2, 3)
+#define INTERLEAVE_MODE(reg) GET_BITFIELD(reg, 1, 1)
+#define DRAM_RULE_ENABLE(reg) GET_BITFIELD(reg, 0, 0)
+
+static char *get_dram_attr(u32 reg)
+{
+ switch(DRAM_ATTR(reg)) {
+ case 0:
+ return "DRAM";
+ case 1:
+ return "MMCFG";
+ case 2:
+ return "NXM";
+ default:
+ return "unknown";
+ }
+}
+
+static const u32 interleave_list[] = {
+ 0x84, 0x8c, 0x94, 0x9c, 0xa4,
+ 0xac, 0xb4, 0xbc, 0xc4, 0xcc,
+};
+#define MAX_INTERLEAVE ARRAY_SIZE(interleave_list)
+
+#define SAD_PKG0(reg) GET_BITFIELD(reg, 0, 2)
+#define SAD_PKG1(reg) GET_BITFIELD(reg, 3, 5)
+#define SAD_PKG2(reg) GET_BITFIELD(reg, 8, 10)
+#define SAD_PKG3(reg) GET_BITFIELD(reg, 11, 13)
+#define SAD_PKG4(reg) GET_BITFIELD(reg, 16, 18)
+#define SAD_PKG5(reg) GET_BITFIELD(reg, 19, 21)
+#define SAD_PKG6(reg) GET_BITFIELD(reg, 24, 26)
+#define SAD_PKG7(reg) GET_BITFIELD(reg, 27, 29)
+
+static inline int sad_pkg(u32 reg, int interleave)
+{
+ switch (interleave) {
+ case 0:
+ return SAD_PKG0(reg);
+ case 1:
+ return SAD_PKG1(reg);
+ case 2:
+ return SAD_PKG2(reg);
+ case 3:
+ return SAD_PKG3(reg);
+ case 4:
+ return SAD_PKG4(reg);
+ case 5:
+ return SAD_PKG5(reg);
+ case 6:
+ return SAD_PKG6(reg);
+ case 7:
+ return SAD_PKG7(reg);
+ default:
+ return -EINVAL;
+ }
+}
+
+/* Devices 12 Function 7 */
+
+#define TOLM 0x80
+#define TOHM 0x84
+
+#define GET_TOLM(reg) ((GET_BITFIELD(reg, 0, 3) << 28) | 0x3ffffff)
+#define GET_TOHM(reg) ((GET_BITFIELD(reg, 0, 20) << 25) | 0x3ffffff)
+
+/* Device 13 Function 6 */
+
+#define SAD_TARGET 0xf0
+
+#define SOURCE_ID(reg) GET_BITFIELD(reg, 9, 11)
+
+#define SAD_CONTROL 0xf4
+
+#define NODE_ID(reg) GET_BITFIELD(reg, 0, 2)
+
+/* Device 14 function 0 */
+
+static const u32 tad_dram_rule[] = {
+ 0x40, 0x44, 0x48, 0x4c,
+ 0x50, 0x54, 0x58, 0x5c,
+ 0x60, 0x64, 0x68, 0x6c,
+};
+#define MAX_TAD ARRAY_SIZE(tad_dram_rule)
+
+#define TAD_LIMIT(reg) ((GET_BITFIELD(reg, 12, 31) << 26) | 0x3ffffff)
+#define TAD_SOCK(reg) GET_BITFIELD(reg, 10, 11)
+#define TAD_CH(reg) GET_BITFIELD(reg, 8, 9)
+#define TAD_TGT3(reg) GET_BITFIELD(reg, 6, 7)
+#define TAD_TGT2(reg) GET_BITFIELD(reg, 4, 5)
+#define TAD_TGT1(reg) GET_BITFIELD(reg, 2, 3)
+#define TAD_TGT0(reg) GET_BITFIELD(reg, 0, 1)
+
+/* Device 15, function 0 */
+
+#define MCMTR 0x7c
+
+#define IS_ECC_ENABLED(mcmtr) GET_BITFIELD(mcmtr, 2, 2)
+#define IS_LOCKSTEP_ENABLED(mcmtr) GET_BITFIELD(mcmtr, 1, 1)
+#define IS_CLOSE_PG(mcmtr) GET_BITFIELD(mcmtr, 0, 0)
+
+/* Device 15, function 1 */
+
+#define RASENABLES 0xac
+#define IS_MIRROR_ENABLED(reg) GET_BITFIELD(reg, 0, 0)
+
+/* Device 15, functions 2-5 */
+
+static const int mtr_regs[] = {
+ 0x80, 0x84, 0x88,
+};
+
+#define RANK_DISABLE(mtr) GET_BITFIELD(mtr, 16, 19)
+#define IS_DIMM_PRESENT(mtr) GET_BITFIELD(mtr, 14, 14)
+#define RANK_CNT_BITS(mtr) GET_BITFIELD(mtr, 12, 13)
+#define RANK_WIDTH_BITS(mtr) GET_BITFIELD(mtr, 2, 4)
+#define COL_WIDTH_BITS(mtr) GET_BITFIELD(mtr, 0, 1)
+
+static const u32 tad_ch_nilv_offset[] = {
+ 0x90, 0x94, 0x98, 0x9c,
+ 0xa0, 0xa4, 0xa8, 0xac,
+ 0xb0, 0xb4, 0xb8, 0xbc,
+};
+#define CHN_IDX_OFFSET(reg) GET_BITFIELD(reg, 28, 29)
+#define TAD_OFFSET(reg) (GET_BITFIELD(reg, 6, 25) << 26)
+
+static const u32 rir_way_limit[] = {
+ 0x108, 0x10c, 0x110, 0x114, 0x118,
+};
+#define MAX_RIR_RANGES ARRAY_SIZE(rir_way_limit)
+
+#define IS_RIR_VALID(reg) GET_BITFIELD(reg, 31, 31)
+#define RIR_WAY(reg) GET_BITFIELD(reg, 28, 29)
+#define RIR_LIMIT(reg) ((GET_BITFIELD(reg, 1, 10) << 29)| 0x1fffffff)
+
+#define MAX_RIR_WAY 8
+
+static const u32 rir_offset[MAX_RIR_RANGES][MAX_RIR_WAY] = {
+ { 0x120, 0x124, 0x128, 0x12c, 0x130, 0x134, 0x138, 0x13c },
+ { 0x140, 0x144, 0x148, 0x14c, 0x150, 0x154, 0x158, 0x15c },
+ { 0x160, 0x164, 0x168, 0x16c, 0x170, 0x174, 0x178, 0x17c },
+ { 0x180, 0x184, 0x188, 0x18c, 0x190, 0x194, 0x198, 0x19c },
+ { 0x1a0, 0x1a4, 0x1a8, 0x1ac, 0x1b0, 0x1b4, 0x1b8, 0x1bc },
+};
+
+#define RIR_RNK_TGT(reg) GET_BITFIELD(reg, 16, 19)
+#define RIR_OFFSET(reg) GET_BITFIELD(reg, 2, 14)
+
+/* Device 16, functions 2-7 */
+
+/*
+ * FIXME: Implement the error count reads directly
+ */
+
+static const u32 correrrcnt[] = {
+ 0x104, 0x108, 0x10c, 0x110,
+};
+
+#define RANK_ODD_OV(reg) GET_BITFIELD(reg, 31, 31)
+#define RANK_ODD_ERR_CNT(reg) GET_BITFIELD(reg, 16, 30)
+#define RANK_EVEN_OV(reg) GET_BITFIELD(reg, 15, 15)
+#define RANK_EVEN_ERR_CNT(reg) GET_BITFIELD(reg, 0, 14)
+
+static const u32 correrrthrsld[] = {
+ 0x11c, 0x120, 0x124, 0x128,
+};
+
+#define RANK_ODD_ERR_THRSLD(reg) GET_BITFIELD(reg, 16, 30)
+#define RANK_EVEN_ERR_THRSLD(reg) GET_BITFIELD(reg, 0, 14)
+
+
+/* Device 17, function 0 */
+
+#define RANK_CFG_A 0x0328
+
+#define IS_RDIMM_ENABLED(reg) GET_BITFIELD(reg, 11, 11)
+
+/*
+ * sbridge structs
+ */
+
+#define NUM_CHANNELS 4
+#define MAX_DIMMS 3 /* Max DIMMS per channel */
+
+struct sbridge_info {
+ u32 mcmtr;
+};
+
+struct sbridge_channel {
+ u32 ranks;
+ u32 dimms;
+};
+
+struct pci_id_descr {
+ int dev;
+ int func;
+ int dev_id;
+ int optional;
+};
+
+struct pci_id_table {
+ const struct pci_id_descr *descr;
+ int n_devs;
+};
+
+struct sbridge_dev {
+ struct list_head list;
+ u8 bus, mc;
+ u8 node_id, source_id;
+ struct pci_dev **pdev;
+ int n_devs;
+ struct mem_ctl_info *mci;
+};
+
+struct sbridge_pvt {
+ struct pci_dev *pci_ta, *pci_ddrio, *pci_ras;
+ struct pci_dev *pci_sad0, *pci_sad1, *pci_ha0;
+ struct pci_dev *pci_br;
+ struct pci_dev *pci_tad[NUM_CHANNELS];
+
+ struct sbridge_dev *sbridge_dev;
+
+ struct sbridge_info info;
+ struct sbridge_channel channel[NUM_CHANNELS];
+
+ int csrow_map[NUM_CHANNELS][MAX_DIMMS];
+
+ /* Memory type detection */
+ bool is_mirrored, is_lockstep, is_close_pg;
+
+ /* Fifo double buffers */
+ struct mce mce_entry[MCE_LOG_LEN];
+ struct mce mce_outentry[MCE_LOG_LEN];
+
+ /* Fifo in/out counters */
+ unsigned mce_in, mce_out;
+
+ /* Count indicator to show errors not got */
+ unsigned mce_overrun;
+
+ /* Memory description */
+ u64 tolm, tohm;
+};
+
+#define PCI_DESCR(device, function, device_id) \
+ .dev = (device), \
+ .func = (function), \
+ .dev_id = (device_id)
+
+static const struct pci_id_descr pci_dev_descr_sbridge[] = {
+ /* Processor Home Agent */
+ { PCI_DESCR(14, 0, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0) },
+
+ /* Memory controller */
+ { PCI_DESCR(15, 0, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA) },
+ { PCI_DESCR(15, 1, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_RAS) },
+ { PCI_DESCR(15, 2, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0) },
+ { PCI_DESCR(15, 3, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD1) },
+ { PCI_DESCR(15, 4, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD2) },
+ { PCI_DESCR(15, 5, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD3) },
+ { PCI_DESCR(17, 0, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_DDRIO) },
+
+ /* System Address Decoder */
+ { PCI_DESCR(12, 6, PCI_DEVICE_ID_INTEL_SBRIDGE_SAD0) },
+ { PCI_DESCR(12, 7, PCI_DEVICE_ID_INTEL_SBRIDGE_SAD1) },
+
+ /* Broadcast Registers */
+ { PCI_DESCR(13, 6, PCI_DEVICE_ID_INTEL_SBRIDGE_BR) },
+};
+
+#define PCI_ID_TABLE_ENTRY(A) { .descr=A, .n_devs = ARRAY_SIZE(A) }
+static const struct pci_id_table pci_dev_descr_sbridge_table[] = {
+ PCI_ID_TABLE_ENTRY(pci_dev_descr_sbridge),
+ {0,} /* 0 terminated list. */
+};
+
+/*
+ * pci_device_id table for which devices we are looking for
+ */
+static const struct pci_device_id sbridge_pci_tbl[] __devinitdata = {
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA)},
+ {0,} /* 0 terminated list. */
+};
+
+
+/****************************************************************************
+ Anciliary status routines
+ ****************************************************************************/
+
+static inline int numrank(u32 mtr)
+{
+ int ranks = (1 << RANK_CNT_BITS(mtr));
+
+ if (ranks > 4) {
+ debugf0("Invalid number of ranks: %d (max = 4) raw value = %x (%04x)",
+ ranks, (unsigned int)RANK_CNT_BITS(mtr), mtr);
+ return -EINVAL;
+ }
+
+ return ranks;
+}
+
+static inline int numrow(u32 mtr)
+{
+ int rows = (RANK_WIDTH_BITS(mtr) + 12);
+
+ if (rows < 13 || rows > 18) {
+ debugf0("Invalid number of rows: %d (should be between 14 and 17) raw value = %x (%04x)",
+ rows, (unsigned int)RANK_WIDTH_BITS(mtr), mtr);
+ return -EINVAL;
+ }
+
+ return 1 << rows;
+}
+
+static inline int numcol(u32 mtr)
+{
+ int cols = (COL_WIDTH_BITS(mtr) + 10);
+
+ if (cols > 12) {
+ debugf0("Invalid number of cols: %d (max = 4) raw value = %x (%04x)",
+ cols, (unsigned int)COL_WIDTH_BITS(mtr), mtr);
+ return -EINVAL;
+ }
+
+ return 1 << cols;
+}
+
+static struct sbridge_dev *get_sbridge_dev(u8 bus)
+{
+ struct sbridge_dev *sbridge_dev;
+
+ list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) {
+ if (sbridge_dev->bus == bus)
+ return sbridge_dev;
+ }
+
+ return NULL;
+}
+
+static struct sbridge_dev *alloc_sbridge_dev(u8 bus,
+ const struct pci_id_table *table)
+{
+ struct sbridge_dev *sbridge_dev;
+
+ sbridge_dev = kzalloc(sizeof(*sbridge_dev), GFP_KERNEL);
+ if (!sbridge_dev)
+ return NULL;
+
+ sbridge_dev->pdev = kzalloc(sizeof(*sbridge_dev->pdev) * table->n_devs,
+ GFP_KERNEL);
+ if (!sbridge_dev->pdev) {
+ kfree(sbridge_dev);
+ return NULL;
+ }
+
+ sbridge_dev->bus = bus;
+ sbridge_dev->n_devs = table->n_devs;
+ list_add_tail(&sbridge_dev->list, &sbridge_edac_list);
+
+ return sbridge_dev;
+}
+
+static void free_sbridge_dev(struct sbridge_dev *sbridge_dev)
+{
+ list_del(&sbridge_dev->list);
+ kfree(sbridge_dev->pdev);
+ kfree(sbridge_dev);
+}
+
+/****************************************************************************
+ Memory check routines
+ ****************************************************************************/
+static struct pci_dev *get_pdev_slot_func(u8 bus, unsigned slot,
+ unsigned func)
+{
+ struct sbridge_dev *sbridge_dev = get_sbridge_dev(bus);
+ int i;
+
+ if (!sbridge_dev)
+ return NULL;
+
+ for (i = 0; i < sbridge_dev->n_devs; i++) {
+ if (!sbridge_dev->pdev[i])
+ continue;
+
+ if (PCI_SLOT(sbridge_dev->pdev[i]->devfn) == slot &&
+ PCI_FUNC(sbridge_dev->pdev[i]->devfn) == func) {
+ debugf1("Associated %02x.%02x.%d with %p\n",
+ bus, slot, func, sbridge_dev->pdev[i]);
+ return sbridge_dev->pdev[i];
+ }
+ }
+
+ return NULL;
+}
+
+/**
+ * sbridge_get_active_channels() - gets the number of channels and csrows
+ * bus: Device bus
+ * @channels: Number of channels that will be returned
+ * @csrows: Number of csrows found
+ *
+ * Since EDAC core needs to know in advance the number of available channels
+ * and csrows, in order to allocate memory for csrows/channels, it is needed
+ * to run two similar steps. At the first step, implemented on this function,
+ * it checks the number of csrows/channels present at one socket, identified
+ * by the associated PCI bus.
+ * this is used in order to properly allocate the size of mci components.
+ * Note: one csrow is one dimm.
+ */
+static int sbridge_get_active_channels(const u8 bus, unsigned *channels,
+ unsigned *csrows)
+{
+ struct pci_dev *pdev = NULL;
+ int i, j;
+ u32 mcmtr;
+
+ *channels = 0;
+ *csrows = 0;
+
+ pdev = get_pdev_slot_func(bus, 15, 0);
+ if (!pdev) {
+ sbridge_printk(KERN_ERR, "Couldn't find PCI device "
+ "%2x.%02d.%d!!!\n",
+ bus, 15, 0);
+ return -ENODEV;
+ }
+
+ pci_read_config_dword(pdev, MCMTR, &mcmtr);
+ if (!IS_ECC_ENABLED(mcmtr)) {
+ sbridge_printk(KERN_ERR, "ECC is disabled. Aborting\n");
+ return -ENODEV;
+ }
+
+ for (i = 0; i < NUM_CHANNELS; i++) {
+ u32 mtr;
+
+ /* Device 15 functions 2 - 5 */
+ pdev = get_pdev_slot_func(bus, 15, 2 + i);
+ if (!pdev) {
+ sbridge_printk(KERN_ERR, "Couldn't find PCI device "
+ "%2x.%02d.%d!!!\n",
+ bus, 15, 2 + i);
+ return -ENODEV;
+ }
+ (*channels)++;
+
+ for (j = 0; j < ARRAY_SIZE(mtr_regs); j++) {
+ pci_read_config_dword(pdev, mtr_regs[j], &mtr);
+ debugf1("Bus#%02x channel #%d MTR%d = %x\n", bus, i, j, mtr);
+ if (IS_DIMM_PRESENT(mtr))
+ (*csrows)++;
+ }
+ }
+
+ debugf0("Number of active channels: %d, number of active dimms: %d\n",
+ *channels, *csrows);
+
+ return 0;
+}
+
+static int get_dimm_config(const struct mem_ctl_info *mci)
+{
+ struct sbridge_pvt *pvt = mci->pvt_info;
+ struct csrow_info *csr;
+ int i, j, banks, ranks, rows, cols, size, npages;
+ int csrow = 0;
+ unsigned long last_page = 0;
+ u32 reg;
+ enum edac_type mode;
+ enum mem_type mtype;
+
+ pci_read_config_dword(pvt->pci_br, SAD_TARGET, &reg);
+ pvt->sbridge_dev->source_id = SOURCE_ID(reg);
+
+ pci_read_config_dword(pvt->pci_br, SAD_CONTROL, &reg);
+ pvt->sbridge_dev->node_id = NODE_ID(reg);
+ debugf0("mc#%d: Node ID: %d, source ID: %d\n",
+ pvt->sbridge_dev->mc,
+ pvt->sbridge_dev->node_id,
+ pvt->sbridge_dev->source_id);
+
+ pci_read_config_dword(pvt->pci_ras, RASENABLES, &reg);
+ if (IS_MIRROR_ENABLED(reg)) {
+ debugf0("Memory mirror is enabled\n");
+ pvt->is_mirrored = true;
+ } else {
+ debugf0("Memory mirror is disabled\n");
+ pvt->is_mirrored = false;
+ }
+
+ pci_read_config_dword(pvt->pci_ta, MCMTR, &pvt->info.mcmtr);
+ if (IS_LOCKSTEP_ENABLED(pvt->info.mcmtr)) {
+ debugf0("Lockstep is enabled\n");
+ mode = EDAC_S8ECD8ED;
+ pvt->is_lockstep = true;
+ } else {
+ debugf0("Lockstep is disabled\n");
+ mode = EDAC_S4ECD4ED;
+ pvt->is_lockstep = false;
+ }
+ if (IS_CLOSE_PG(pvt->info.mcmtr)) {
+ debugf0("address map is on closed page mode\n");
+ pvt->is_close_pg = true;
+ } else {
+ debugf0("address map is on open page mode\n");
+ pvt->is_close_pg = false;
+ }
+
+ pci_read_config_dword(pvt->pci_ta, RANK_CFG_A, &reg);
+ if (IS_RDIMM_ENABLED(reg)) {
+ /* FIXME: Can also be LRDIMM */
+ debugf0("Memory is registered\n");
+ mtype = MEM_RDDR3;
+ } else {
+ debugf0("Memory is unregistered\n");
+ mtype = MEM_DDR3;
+ }
+
+ /* On all supported DDR3 DIMM types, there are 8 banks available */
+ banks = 8;
+
+ for (i = 0; i < NUM_CHANNELS; i++) {
+ u32 mtr;
+
+ for (j = 0; j < ARRAY_SIZE(mtr_regs); j++) {
+ pci_read_config_dword(pvt->pci_tad[i],
+ mtr_regs[j], &mtr);
+ debugf4("Channel #%d MTR%d = %x\n", i, j, mtr);
+ if (IS_DIMM_PRESENT(mtr)) {
+ pvt->channel[i].dimms++;
+
+ ranks = numrank(mtr);
+ rows = numrow(mtr);
+ cols = numcol(mtr);
+
+ /* DDR3 has 8 I/O banks */
+ size = (rows * cols * banks * ranks) >> (20 - 3);
+ npages = MiB_TO_PAGES(size);
+
+ debugf0("mc#%d: channel %d, dimm %d, %d Mb (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n",
+ pvt->sbridge_dev->mc, i, j,
+ size, npages,
+ banks, ranks, rows, cols);
+ csr = &mci->csrows[csrow];
+
+ csr->first_page = last_page;
+ csr->last_page = last_page + npages - 1;
+ csr->page_mask = 0UL; /* Unused */
+ csr->nr_pages = npages;
+ csr->grain = 32;
+ csr->csrow_idx = csrow;
+ csr->dtype = (banks == 8) ? DEV_X8 : DEV_X4;
+ csr->ce_count = 0;
+ csr->ue_count = 0;
+ csr->mtype = mtype;
+ csr->edac_mode = mode;
+ csr->nr_channels = 1;
+ csr->channels[0].chan_idx = i;
+ csr->channels[0].ce_count = 0;
+ pvt->csrow_map[i][j] = csrow;
+ snprintf(csr->channels[0].label,
+ sizeof(csr->channels[0].label),
+ "CPU_SrcID#%u_Channel#%u_DIMM#%u",
+ pvt->sbridge_dev->source_id, i, j);
+ last_page += npages;
+ csrow++;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static void get_memory_layout(const struct mem_ctl_info *mci)
+{
+ struct sbridge_pvt *pvt = mci->pvt_info;
+ int i, j, k, n_sads, n_tads, sad_interl;
+ u32 reg;
+ u64 limit, prv = 0;
+ u64 tmp_mb;
+ u32 rir_way;
+
+ /*
+ * Step 1) Get TOLM/TOHM ranges
+ */
+
+ /* Address range is 32:28 */
+ pci_read_config_dword(pvt->pci_sad1, TOLM,
+ &reg);
+ pvt->tolm = GET_TOLM(reg);
+ tmp_mb = (1 + pvt->tolm) >> 20;
+
+ debugf0("TOLM: %Lu.%03Lu GB (0x%016Lx)\n",
+ tmp_mb / 1000, tmp_mb % 1000, (u64)pvt->tolm);
+
+ /* Address range is already 45:25 */
+ pci_read_config_dword(pvt->pci_sad1, TOHM,
+ &reg);
+ pvt->tohm = GET_TOHM(reg);
+ tmp_mb = (1 + pvt->tohm) >> 20;
+
+ debugf0("TOHM: %Lu.%03Lu GB (0x%016Lx)",
+ tmp_mb / 1000, tmp_mb % 1000, (u64)pvt->tohm);
+
+ /*
+ * Step 2) Get SAD range and SAD Interleave list
+ * TAD registers contain the interleave wayness. However, it
+ * seems simpler to just discover it indirectly, with the
+ * algorithm bellow.
+ */
+ prv = 0;
+ for (n_sads = 0; n_sads < MAX_SAD; n_sads++) {
+ /* SAD_LIMIT Address range is 45:26 */
+ pci_read_config_dword(pvt->pci_sad0, dram_rule[n_sads],
+ &reg);
+ limit = SAD_LIMIT(reg);
+
+ if (!DRAM_RULE_ENABLE(reg))
+ continue;
+
+ if (limit <= prv)
+ break;
+
+ tmp_mb = (limit + 1) >> 20;
+ debugf0("SAD#%d %s up to %Lu.%03Lu GB (0x%016Lx) %s reg=0x%08x\n",
+ n_sads,
+ get_dram_attr(reg),
+ tmp_mb / 1000, tmp_mb % 1000,
+ ((u64)tmp_mb) << 20L,
+ INTERLEAVE_MODE(reg) ? "Interleave: 8:6" : "Interleave: [8:6]XOR[18:16]",
+ reg);
+ prv = limit;
+
+ pci_read_config_dword(pvt->pci_sad0, interleave_list[n_sads],
+ &reg);
+ sad_interl = sad_pkg(reg, 0);
+ for (j = 0; j < 8; j++) {
+ if (j > 0 && sad_interl == sad_pkg(reg, j))
+ break;
+
+ debugf0("SAD#%d, interleave #%d: %d\n",
+ n_sads, j, sad_pkg(reg, j));
+ }
+ }
+
+ /*
+ * Step 3) Get TAD range
+ */
+ prv = 0;
+ for (n_tads = 0; n_tads < MAX_TAD; n_tads++) {
+ pci_read_config_dword(pvt->pci_ha0, tad_dram_rule[n_tads],
+ &reg);
+ limit = TAD_LIMIT(reg);
+ if (limit <= prv)
+ break;
+ tmp_mb = (limit + 1) >> 20;
+
+ debugf0("TAD#%d: up to %Lu.%03Lu GB (0x%016Lx), socket interleave %d, memory interleave %d, TGT: %d, %d, %d, %d, reg=0x%08x\n",
+ n_tads, tmp_mb / 1000, tmp_mb % 1000,
+ ((u64)tmp_mb) << 20L,
+ (u32)TAD_SOCK(reg),
+ (u32)TAD_CH(reg),
+ (u32)TAD_TGT0(reg),
+ (u32)TAD_TGT1(reg),
+ (u32)TAD_TGT2(reg),
+ (u32)TAD_TGT3(reg),
+ reg);
+ prv = tmp_mb;
+ }
+
+ /*
+ * Step 4) Get TAD offsets, per each channel
+ */
+ for (i = 0; i < NUM_CHANNELS; i++) {
+ if (!pvt->channel[i].dimms)
+ continue;
+ for (j = 0; j < n_tads; j++) {
+ pci_read_config_dword(pvt->pci_tad[i],
+ tad_ch_nilv_offset[j],
+ &reg);
+ tmp_mb = TAD_OFFSET(reg) >> 20;
+ debugf0("TAD CH#%d, offset #%d: %Lu.%03Lu GB (0x%016Lx), reg=0x%08x\n",
+ i, j,
+ tmp_mb / 1000, tmp_mb % 1000,
+ ((u64)tmp_mb) << 20L,
+ reg);
+ }
+ }
+
+ /*
+ * Step 6) Get RIR Wayness/Limit, per each channel
+ */
+ for (i = 0; i < NUM_CHANNELS; i++) {
+ if (!pvt->channel[i].dimms)
+ continue;
+ for (j = 0; j < MAX_RIR_RANGES; j++) {
+ pci_read_config_dword(pvt->pci_tad[i],
+ rir_way_limit[j],
+ &reg);
+
+ if (!IS_RIR_VALID(reg))
+ continue;
+
+ tmp_mb = RIR_LIMIT(reg) >> 20;
+ rir_way = 1 << RIR_WAY(reg);
+ debugf0("CH#%d RIR#%d, limit: %Lu.%03Lu GB (0x%016Lx), way: %d, reg=0x%08x\n",
+ i, j,
+ tmp_mb / 1000, tmp_mb % 1000,
+ ((u64)tmp_mb) << 20L,
+ rir_way,
+ reg);
+
+ for (k = 0; k < rir_way; k++) {
+ pci_read_config_dword(pvt->pci_tad[i],
+ rir_offset[j][k],
+ &reg);
+ tmp_mb = RIR_OFFSET(reg) << 6;
+
+ debugf0("CH#%d RIR#%d INTL#%d, offset %Lu.%03Lu GB (0x%016Lx), tgt: %d, reg=0x%08x\n",
+ i, j, k,
+ tmp_mb / 1000, tmp_mb % 1000,
+ ((u64)tmp_mb) << 20L,
+ (u32)RIR_RNK_TGT(reg),
+ reg);
+ }
+ }
+ }
+}
+
+struct mem_ctl_info *get_mci_for_node_id(u8 node_id)
+{
+ struct sbridge_dev *sbridge_dev;
+
+ list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) {
+ if (sbridge_dev->node_id == node_id)
+ return sbridge_dev->mci;
+ }
+ return NULL;
+}
+
+static int get_memory_error_data(struct mem_ctl_info *mci,
+ u64 addr,
+ u8 *socket,
+ long *channel_mask,
+ u8 *rank,
+ char *area_type)
+{
+ struct mem_ctl_info *new_mci;
+ struct sbridge_pvt *pvt = mci->pvt_info;
+ char msg[256];
+ int n_rir, n_sads, n_tads, sad_way, sck_xch;
+ int sad_interl, idx, base_ch;
+ int interleave_mode;
+ unsigned sad_interleave[MAX_INTERLEAVE];
+ u32 reg;
+ u8 ch_way,sck_way;
+ u32 tad_offset;
+ u32 rir_way;
+ u64 ch_addr, offset, limit, prv = 0;
+
+
+ /*
+ * Step 0) Check if the address is at special memory ranges
+ * The check bellow is probably enough to fill all cases where
+ * the error is not inside a memory, except for the legacy
+ * range (e. g. VGA addresses). It is unlikely, however, that the
+ * memory controller would generate an error on that range.
+ */
+ if ((addr > (u64) pvt->tolm) && (addr < (1L << 32))) {
+ sprintf(msg, "Error at TOLM area, on addr 0x%08Lx", addr);
+ edac_mc_handle_ce_no_info(mci, msg);
+ return -EINVAL;
+ }
+ if (addr >= (u64)pvt->tohm) {
+ sprintf(msg, "Error at MMIOH area, on addr 0x%016Lx", addr);
+ edac_mc_handle_ce_no_info(mci, msg);
+ return -EINVAL;
+ }
+
+ /*
+ * Step 1) Get socket
+ */
+ for (n_sads = 0; n_sads < MAX_SAD; n_sads++) {
+ pci_read_config_dword(pvt->pci_sad0, dram_rule[n_sads],
+ &reg);
+
+ if (!DRAM_RULE_ENABLE(reg))
+ continue;
+
+ limit = SAD_LIMIT(reg);
+ if (limit <= prv) {
+ sprintf(msg, "Can't discover the memory socket");
+ edac_mc_handle_ce_no_info(mci, msg);
+ return -EINVAL;
+ }
+ if (addr <= limit)
+ break;
+ prv = limit;
+ }
+ if (n_sads == MAX_SAD) {
+ sprintf(msg, "Can't discover the memory socket");
+ edac_mc_handle_ce_no_info(mci, msg);
+ return -EINVAL;
+ }
+ area_type = get_dram_attr(reg);
+ interleave_mode = INTERLEAVE_MODE(reg);
+
+ pci_read_config_dword(pvt->pci_sad0, interleave_list[n_sads],
+ &reg);
+ sad_interl = sad_pkg(reg, 0);
+ for (sad_way = 0; sad_way < 8; sad_way++) {
+ if (sad_way > 0 && sad_interl == sad_pkg(reg, sad_way))
+ break;
+ sad_interleave[sad_way] = sad_pkg(reg, sad_way);
+ debugf0("SAD interleave #%d: %d\n",
+ sad_way, sad_interleave[sad_way]);
+ }
+ debugf0("mc#%d: Error detected on SAD#%d: address 0x%016Lx < 0x%016Lx, Interleave [%d:6]%s\n",
+ pvt->sbridge_dev->mc,
+ n_sads,
+ addr,
+ limit,
+ sad_way + 7,
+ INTERLEAVE_MODE(reg) ? "" : "XOR[18:16]");
+ if (interleave_mode)
+ idx = ((addr >> 6) ^ (addr >> 16)) & 7;
+ else
+ idx = (addr >> 6) & 7;
+ switch (sad_way) {
+ case 1:
+ idx = 0;
+ break;
+ case 2:
+ idx = idx & 1;
+ break;
+ case 4:
+ idx = idx & 3;
+ break;
+ case 8:
+ break;
+ default:
+ sprintf(msg, "Can't discover socket interleave");
+ edac_mc_handle_ce_no_info(mci, msg);
+ return -EINVAL;
+ }
+ *socket = sad_interleave[idx];
+ debugf0("SAD interleave index: %d (wayness %d) = CPU socket %d\n",
+ idx, sad_way, *socket);
+
+ /*
+ * Move to the proper node structure, in order to access the
+ * right PCI registers
+ */
+ new_mci = get_mci_for_node_id(*socket);
+ if (!new_mci) {
+ sprintf(msg, "Struct for socket #%u wasn't initialized",
+ *socket);
+ edac_mc_handle_ce_no_info(mci, msg);
+ return -EINVAL;
+ }
+ mci = new_mci;
+ pvt = mci->pvt_info;
+
+ /*
+ * Step 2) Get memory channel
+ */
+ prv = 0;
+ for (n_tads = 0; n_tads < MAX_TAD; n_tads++) {
+ pci_read_config_dword(pvt->pci_ha0, tad_dram_rule[n_tads],
+ &reg);
+ limit = TAD_LIMIT(reg);
+ if (limit <= prv) {
+ sprintf(msg, "Can't discover the memory channel");
+ edac_mc_handle_ce_no_info(mci, msg);
+ return -EINVAL;
+ }
+ if (addr <= limit)
+ break;
+ prv = limit;
+ }
+ ch_way = TAD_CH(reg) + 1;
+ sck_way = TAD_SOCK(reg) + 1;
+ /*
+ * FIXME: Is it right to always use channel 0 for offsets?
+ */
+ pci_read_config_dword(pvt->pci_tad[0],
+ tad_ch_nilv_offset[n_tads],
+ &tad_offset);
+
+ if (ch_way == 3)
+ idx = addr >> 6;
+ else
+ idx = addr >> (6 + sck_way);
+ idx = idx % ch_way;
+
+ /*
+ * FIXME: Shouldn't we use CHN_IDX_OFFSET() here, when ch_way == 3 ???
+ */
+ switch (idx) {
+ case 0:
+ base_ch = TAD_TGT0(reg);
+ break;
+ case 1:
+ base_ch = TAD_TGT1(reg);
+ break;
+ case 2:
+ base_ch = TAD_TGT2(reg);
+ break;
+ case 3:
+ base_ch = TAD_TGT3(reg);
+ break;
+ default:
+ sprintf(msg, "Can't discover the TAD target");
+ edac_mc_handle_ce_no_info(mci, msg);
+ return -EINVAL;
+ }
+ *channel_mask = 1 << base_ch;
+
+ if (pvt->is_mirrored) {
+ *channel_mask |= 1 << ((base_ch + 2) % 4);
+ switch(ch_way) {
+ case 2:
+ case 4:
+ sck_xch = 1 << sck_way * (ch_way >> 1);
+ break;
+ default:
+ sprintf(msg, "Invalid mirror set. Can't decode addr");
+ edac_mc_handle_ce_no_info(mci, msg);
+ return -EINVAL;
+ }
+ } else
+ sck_xch = (1 << sck_way) * ch_way;
+
+ if (pvt->is_lockstep)
+ *channel_mask |= 1 << ((base_ch + 1) % 4);
+
+ offset = TAD_OFFSET(tad_offset);
+
+ debugf0("TAD#%d: address 0x%016Lx < 0x%016Lx, socket interleave %d, channel interleave %d (offset 0x%08Lx), index %d, base ch: %d, ch mask: 0x%02lx\n",
+ n_tads,
+ addr,
+ limit,
+ (u32)TAD_SOCK(reg),
+ ch_way,
+ offset,
+ idx,
+ base_ch,
+ *channel_mask);
+
+ /* Calculate channel address */
+ /* Remove the TAD offset */
+
+ if (offset > addr) {
+ sprintf(msg, "Can't calculate ch addr: TAD offset 0x%08Lx is too high for addr 0x%08Lx!",
+ offset, addr);
+ edac_mc_handle_ce_no_info(mci, msg);
+ return -EINVAL;
+ }
+ addr -= offset;
+ /* Store the low bits [0:6] of the addr */
+ ch_addr = addr & 0x7f;
+ /* Remove socket wayness and remove 6 bits */
+ addr >>= 6;
+ addr /= sck_xch;
+#if 0
+ /* Divide by channel way */
+ addr = addr / ch_way;
+#endif
+ /* Recover the last 6 bits */
+ ch_addr |= addr << 6;
+
+ /*
+ * Step 3) Decode rank
+ */
+ for (n_rir = 0; n_rir < MAX_RIR_RANGES; n_rir++) {
+ pci_read_config_dword(pvt->pci_tad[base_ch],
+ rir_way_limit[n_rir],
+ &reg);
+
+ if (!IS_RIR_VALID(reg))
+ continue;
+
+ limit = RIR_LIMIT(reg);
+
+ debugf0("RIR#%d, limit: %Lu.%03Lu GB (0x%016Lx), way: %d\n",
+ n_rir,
+ (limit >> 20) / 1000, (limit >> 20) % 1000,
+ limit,
+ 1 << RIR_WAY(reg));
+ if (ch_addr <= limit)
+ break;
+ }
+ if (n_rir == MAX_RIR_RANGES) {
+ sprintf(msg, "Can't discover the memory rank for ch addr 0x%08Lx",
+ ch_addr);
+ edac_mc_handle_ce_no_info(mci, msg);
+ return -EINVAL;
+ }
+ rir_way = RIR_WAY(reg);
+ if (pvt->is_close_pg)
+ idx = (ch_addr >> 6);
+ else
+ idx = (ch_addr >> 13); /* FIXME: Datasheet says to shift by 15 */
+ idx %= 1 << rir_way;
+
+ pci_read_config_dword(pvt->pci_tad[base_ch],
+ rir_offset[n_rir][idx],
+ &reg);
+ *rank = RIR_RNK_TGT(reg);
+
+ debugf0("RIR#%d: channel address 0x%08Lx < 0x%08Lx, RIR interleave %d, index %d\n",
+ n_rir,
+ ch_addr,
+ limit,
+ rir_way,
+ idx);
+
+ return 0;
+}
+
+/****************************************************************************
+ Device initialization routines: put/get, init/exit
+ ****************************************************************************/
+
+/*
+ * sbridge_put_all_devices 'put' all the devices that we have
+ * reserved via 'get'
+ */
+static void sbridge_put_devices(struct sbridge_dev *sbridge_dev)
+{
+ int i;
+
+ debugf0(__FILE__ ": %s()\n", __func__);
+ for (i = 0; i < sbridge_dev->n_devs; i++) {
+ struct pci_dev *pdev = sbridge_dev->pdev[i];
+ if (!pdev)
+ continue;
+ debugf0("Removing dev %02x:%02x.%d\n",
+ pdev->bus->number,
+ PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
+ pci_dev_put(pdev);
+ }
+}
+
+static void sbridge_put_all_devices(void)
+{
+ struct sbridge_dev *sbridge_dev, *tmp;
+
+ list_for_each_entry_safe(sbridge_dev, tmp, &sbridge_edac_list, list) {
+ sbridge_put_devices(sbridge_dev);
+ free_sbridge_dev(sbridge_dev);
+ }
+}
+
+/*
+ * sbridge_get_all_devices Find and perform 'get' operation on the MCH's
+ * device/functions we want to reference for this driver
+ *
+ * Need to 'get' device 16 func 1 and func 2
+ */
+static int sbridge_get_onedevice(struct pci_dev **prev,
+ u8 *num_mc,
+ const struct pci_id_table *table,
+ const unsigned devno)
+{
+ struct sbridge_dev *sbridge_dev;
+ const struct pci_id_descr *dev_descr = &table->descr[devno];
+
+ struct pci_dev *pdev = NULL;
+ u8 bus = 0;
+
+ sbridge_printk(KERN_INFO,
+ "Seeking for: dev %02x.%d PCI ID %04x:%04x\n",
+ dev_descr->dev, dev_descr->func,
+ PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
+
+ pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ dev_descr->dev_id, *prev);
+
+ if (!pdev) {
+ if (*prev) {
+ *prev = pdev;
+ return 0;
+ }
+
+ if (dev_descr->optional)
+ return 0;
+
+ if (devno == 0)
+ return -ENODEV;
+
+ sbridge_printk(KERN_INFO,
+ "Device not found: dev %02x.%d PCI ID %04x:%04x\n",
+ dev_descr->dev, dev_descr->func,
+ PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
+
+ /* End of list, leave */
+ return -ENODEV;
+ }
+ bus = pdev->bus->number;
+
+ sbridge_dev = get_sbridge_dev(bus);
+ if (!sbridge_dev) {
+ sbridge_dev = alloc_sbridge_dev(bus, table);
+ if (!sbridge_dev) {
+ pci_dev_put(pdev);
+ return -ENOMEM;
+ }
+ (*num_mc)++;
+ }
+
+ if (sbridge_dev->pdev[devno]) {
+ sbridge_printk(KERN_ERR,
+ "Duplicated device for "
+ "dev %02x:%d.%d PCI ID %04x:%04x\n",
+ bus, dev_descr->dev, dev_descr->func,
+ PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
+ pci_dev_put(pdev);
+ return -ENODEV;
+ }
+
+ sbridge_dev->pdev[devno] = pdev;
+
+ /* Sanity check */
+ if (unlikely(PCI_SLOT(pdev->devfn) != dev_descr->dev ||
+ PCI_FUNC(pdev->devfn) != dev_descr->func)) {
+ sbridge_printk(KERN_ERR,
+ "Device PCI ID %04x:%04x "
+ "has dev %02x:%d.%d instead of dev %02x:%02x.%d\n",
+ PCI_VENDOR_ID_INTEL, dev_descr->dev_id,
+ bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
+ bus, dev_descr->dev, dev_descr->func);
+ return -ENODEV;
+ }
+
+ /* Be sure that the device is enabled */
+ if (unlikely(pci_enable_device(pdev) < 0)) {
+ sbridge_printk(KERN_ERR,
+ "Couldn't enable "
+ "dev %02x:%d.%d PCI ID %04x:%04x\n",
+ bus, dev_descr->dev, dev_descr->func,
+ PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
+ return -ENODEV;
+ }
+
+ debugf0("Detected dev %02x:%d.%d PCI ID %04x:%04x\n",
+ bus, dev_descr->dev,
+ dev_descr->func,
+ PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
+
+ /*
+ * As stated on drivers/pci/search.c, the reference count for
+ * @from is always decremented if it is not %NULL. So, as we need
+ * to get all devices up to null, we need to do a get for the device
+ */
+ pci_dev_get(pdev);
+
+ *prev = pdev;
+
+ return 0;
+}
+
+static int sbridge_get_all_devices(u8 *num_mc)
+{
+ int i, rc;
+ struct pci_dev *pdev = NULL;
+ const struct pci_id_table *table = pci_dev_descr_sbridge_table;
+
+ while (table && table->descr) {
+ for (i = 0; i < table->n_devs; i++) {
+ pdev = NULL;
+ do {
+ rc = sbridge_get_onedevice(&pdev, num_mc,
+ table, i);
+ if (rc < 0) {
+ if (i == 0) {
+ i = table->n_devs;
+ break;
+ }
+ sbridge_put_all_devices();
+ return -ENODEV;
+ }
+ } while (pdev);
+ }
+ table++;
+ }
+
+ return 0;
+}
+
+static int mci_bind_devs(struct mem_ctl_info *mci,
+ struct sbridge_dev *sbridge_dev)
+{
+ struct sbridge_pvt *pvt = mci->pvt_info;
+ struct pci_dev *pdev;
+ int i, func, slot;
+
+ for (i = 0; i < sbridge_dev->n_devs; i++) {
+ pdev = sbridge_dev->pdev[i];
+ if (!pdev)
+ continue;
+ slot = PCI_SLOT(pdev->devfn);
+ func = PCI_FUNC(pdev->devfn);
+ switch (slot) {
+ case 12:
+ switch (func) {
+ case 6:
+ pvt->pci_sad0 = pdev;
+ break;
+ case 7:
+ pvt->pci_sad1 = pdev;
+ break;
+ default:
+ goto error;
+ }
+ break;
+ case 13:
+ switch (func) {
+ case 6:
+ pvt->pci_br = pdev;
+ break;
+ default:
+ goto error;
+ }
+ break;
+ case 14:
+ switch (func) {
+ case 0:
+ pvt->pci_ha0 = pdev;
+ break;
+ default:
+ goto error;
+ }
+ break;
+ case 15:
+ switch (func) {
+ case 0:
+ pvt->pci_ta = pdev;
+ break;
+ case 1:
+ pvt->pci_ras = pdev;
+ break;
+ case 2:
+ case 3:
+ case 4:
+ case 5:
+ pvt->pci_tad[func - 2] = pdev;
+ break;
+ default:
+ goto error;
+ }
+ break;
+ case 17:
+ switch (func) {
+ case 0:
+ pvt->pci_ddrio = pdev;
+ break;
+ default:
+ goto error;
+ }
+ break;
+ default:
+ goto error;
+ }
+
+ debugf0("Associated PCI %02x.%02d.%d with dev = %p\n",
+ sbridge_dev->bus,
+ PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
+ pdev);
+ }
+
+ /* Check if everything were registered */
+ if (!pvt->pci_sad0 || !pvt->pci_sad1 || !pvt->pci_ha0 ||
+ !pvt-> pci_tad || !pvt->pci_ras || !pvt->pci_ta ||
+ !pvt->pci_ddrio)
+ goto enodev;
+
+ for (i = 0; i < NUM_CHANNELS; i++) {
+ if (!pvt->pci_tad[i])
+ goto enodev;
+ }
+ return 0;
+
+enodev:
+ sbridge_printk(KERN_ERR, "Some needed devices are missing\n");
+ return -ENODEV;
+
+error:
+ sbridge_printk(KERN_ERR, "Device %d, function %d "
+ "is out of the expected range\n",
+ slot, func);
+ return -EINVAL;
+}
+
+/****************************************************************************
+ Error check routines
+ ****************************************************************************/
+
+/*
+ * While Sandy Bridge has error count registers, SMI BIOS read values from
+ * and resets the counters. So, they are not reliable for the OS to read
+ * from them. So, we have no option but to just trust on whatever MCE is
+ * telling us about the errors.
+ */
+static void sbridge_mce_output_error(struct mem_ctl_info *mci,
+ const struct mce *m)
+{
+ struct mem_ctl_info *new_mci;
+ struct sbridge_pvt *pvt = mci->pvt_info;
+ char *type, *optype, *msg, *recoverable_msg;
+ bool ripv = GET_BITFIELD(m->mcgstatus, 0, 0);
+ bool overflow = GET_BITFIELD(m->status, 62, 62);
+ bool uncorrected_error = GET_BITFIELD(m->status, 61, 61);
+ bool recoverable = GET_BITFIELD(m->status, 56, 56);
+ u32 core_err_cnt = GET_BITFIELD(m->status, 38, 52);
+ u32 mscod = GET_BITFIELD(m->status, 16, 31);
+ u32 errcode = GET_BITFIELD(m->status, 0, 15);
+ u32 channel = GET_BITFIELD(m->status, 0, 3);
+ u32 optypenum = GET_BITFIELD(m->status, 4, 6);
+ long channel_mask, first_channel;
+ u8 rank, socket;
+ int csrow, rc, dimm;
+ char *area_type = "Unknown";
+
+ if (ripv)
+ type = "NON_FATAL";
+ else
+ type = "FATAL";
+
+ /*
+ * According with Table 15-9 of the Intel Archictecture spec vol 3A,
+ * memory errors should fit in this mask:
+ * 000f 0000 1mmm cccc (binary)
+ * where:
+ * f = Correction Report Filtering Bit. If 1, subsequent errors
+ * won't be shown
+ * mmm = error type
+ * cccc = channel
+ * If the mask doesn't match, report an error to the parsing logic
+ */
+ if (! ((errcode & 0xef80) == 0x80)) {
+ optype = "Can't parse: it is not a mem";
+ } else {
+ switch (optypenum) {
+ case 0:
+ optype = "generic undef request";
+ break;
+ case 1:
+ optype = "memory read";
+ break;
+ case 2:
+ optype = "memory write";
+ break;
+ case 3:
+ optype = "addr/cmd";
+ break;
+ case 4:
+ optype = "memory scrubbing";
+ break;
+ default:
+ optype = "reserved";
+ break;
+ }
+ }
+
+ rc = get_memory_error_data(mci, m->addr, &socket,
+ &channel_mask, &rank, area_type);
+ if (rc < 0)
+ return;
+ new_mci = get_mci_for_node_id(socket);
+ if (!new_mci) {
+ edac_mc_handle_ce_no_info(mci, "Error: socket got corrupted!");
+ return;
+ }
+ mci = new_mci;
+ pvt = mci->pvt_info;
+
+ first_channel = find_first_bit(&channel_mask, NUM_CHANNELS);
+
+ if (rank < 4)
+ dimm = 0;
+ else if (rank < 8)
+ dimm = 1;
+ else
+ dimm = 2;
+
+ csrow = pvt->csrow_map[first_channel][dimm];
+
+ if (uncorrected_error && recoverable)
+ recoverable_msg = " recoverable";
+ else
+ recoverable_msg = "";
+
+ /*
+ * FIXME: What should we do with "channel" information on mcelog?
+ * Probably, we can just discard it, as the channel information
+ * comes from the get_memory_error_data() address decoding
+ */
+ msg = kasprintf(GFP_ATOMIC,
+ "%d %s error(s): %s on %s area %s%s: cpu=%d Err=%04x:%04x (ch=%d), "
+ "addr = 0x%08llx => socket=%d, Channel=%ld(mask=%ld), rank=%d\n",
+ core_err_cnt,
+ area_type,
+ optype,
+ type,
+ recoverable_msg,
+ overflow ? "OVERFLOW" : "",
+ m->cpu,
+ mscod, errcode,
+ channel, /* 1111b means not specified */
+ (long long) m->addr,
+ socket,
+ first_channel, /* This is the real channel on SB */
+ channel_mask,
+ rank);
+
+ debugf0("%s", msg);
+
+ /* Call the helper to output message */
+ if (uncorrected_error)
+ edac_mc_handle_fbd_ue(mci, csrow, 0, 0, msg);
+ else
+ edac_mc_handle_fbd_ce(mci, csrow, 0, msg);
+
+ kfree(msg);
+}
+
+/*
+ * sbridge_check_error Retrieve and process errors reported by the
+ * hardware. Called by the Core module.
+ */
+static void sbridge_check_error(struct mem_ctl_info *mci)
+{
+ struct sbridge_pvt *pvt = mci->pvt_info;
+ int i;
+ unsigned count = 0;
+ struct mce *m;
+
+ /*
+ * MCE first step: Copy all mce errors into a temporary buffer
+ * We use a double buffering here, to reduce the risk of
+ * loosing an error.
+ */
+ smp_rmb();
+ count = (pvt->mce_out + MCE_LOG_LEN - pvt->mce_in)
+ % MCE_LOG_LEN;
+ if (!count)
+ return;
+
+ m = pvt->mce_outentry;
+ if (pvt->mce_in + count > MCE_LOG_LEN) {
+ unsigned l = MCE_LOG_LEN - pvt->mce_in;
+
+ memcpy(m, &pvt->mce_entry[pvt->mce_in], sizeof(*m) * l);
+ smp_wmb();
+ pvt->mce_in = 0;
+ count -= l;
+ m += l;
+ }
+ memcpy(m, &pvt->mce_entry[pvt->mce_in], sizeof(*m) * count);
+ smp_wmb();
+ pvt->mce_in += count;
+
+ smp_rmb();
+ if (pvt->mce_overrun) {
+ sbridge_printk(KERN_ERR, "Lost %d memory errors\n",
+ pvt->mce_overrun);
+ smp_wmb();
+ pvt->mce_overrun = 0;
+ }
+
+ /*
+ * MCE second step: parse errors and display
+ */
+ for (i = 0; i < count; i++)
+ sbridge_mce_output_error(mci, &pvt->mce_outentry[i]);
+}
+
+/*
+ * sbridge_mce_check_error Replicates mcelog routine to get errors
+ * This routine simply queues mcelog errors, and
+ * return. The error itself should be handled later
+ * by sbridge_check_error.
+ * WARNING: As this routine should be called at NMI time, extra care should
+ * be taken to avoid deadlocks, and to be as fast as possible.
+ */
+static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val,
+ void *data)
+{
+ struct mce *mce = (struct mce *)data;
+ struct mem_ctl_info *mci;
+ struct sbridge_pvt *pvt;
+
+ mci = get_mci_for_node_id(mce->socketid);
+ if (!mci)
+ return NOTIFY_BAD;
+ pvt = mci->pvt_info;
+
+ /*
+ * Just let mcelog handle it if the error is
+ * outside the memory controller. A memory error
+ * is indicated by bit 7 = 1 and bits = 8-11,13-15 = 0.
+ * bit 12 has an special meaning.
+ */
+ if ((mce->status & 0xefff) >> 7 != 1)
+ return NOTIFY_DONE;
+
+ printk("sbridge: HANDLING MCE MEMORY ERROR\n");
+
+ printk("CPU %d: Machine Check Exception: %Lx Bank %d: %016Lx\n",
+ mce->extcpu, mce->mcgstatus, mce->bank, mce->status);
+ printk("TSC %llx ", mce->tsc);
+ printk("ADDR %llx ", mce->addr);
+ printk("MISC %llx ", mce->misc);
+
+ printk("PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x\n",
+ mce->cpuvendor, mce->cpuid, mce->time,
+ mce->socketid, mce->apicid);
+
+#ifdef CONFIG_SMP
+ /* Only handle if it is the right mc controller */
+ if (cpu_data(mce->cpu).phys_proc_id != pvt->sbridge_dev->mc)
+ return NOTIFY_DONE;
+#endif
+
+ smp_rmb();
+ if ((pvt->mce_out + 1) % MCE_LOG_LEN == pvt->mce_in) {
+ smp_wmb();
+ pvt->mce_overrun++;
+ return NOTIFY_DONE;
+ }
+
+ /* Copy memory error at the ringbuffer */
+ memcpy(&pvt->mce_entry[pvt->mce_out], mce, sizeof(*mce));
+ smp_wmb();
+ pvt->mce_out = (pvt->mce_out + 1) % MCE_LOG_LEN;
+
+ /* Handle fatal errors immediately */
+ if (mce->mcgstatus & 1)
+ sbridge_check_error(mci);
+
+ /* Advice mcelog that the error were handled */
+ return NOTIFY_STOP;
+}
+
+static struct notifier_block sbridge_mce_dec = {
+ .notifier_call = sbridge_mce_check_error,
+};
+
+/****************************************************************************
+ EDAC register/unregister logic
+ ****************************************************************************/
+
+static void sbridge_unregister_mci(struct sbridge_dev *sbridge_dev)
+{
+ struct mem_ctl_info *mci = sbridge_dev->mci;
+ struct sbridge_pvt *pvt;
+
+ if (unlikely(!mci || !mci->pvt_info)) {
+ debugf0("MC: " __FILE__ ": %s(): dev = %p\n",
+ __func__, &sbridge_dev->pdev[0]->dev);
+
+ sbridge_printk(KERN_ERR, "Couldn't find mci handler\n");
+ return;
+ }
+
+ pvt = mci->pvt_info;
+
+ debugf0("MC: " __FILE__ ": %s(): mci = %p, dev = %p\n",
+ __func__, mci, &sbridge_dev->pdev[0]->dev);
+
+ atomic_notifier_chain_unregister(&x86_mce_decoder_chain,
+ &sbridge_mce_dec);
+
+ /* Remove MC sysfs nodes */
+ edac_mc_del_mc(mci->dev);
+
+ debugf1("%s: free mci struct\n", mci->ctl_name);
+ kfree(mci->ctl_name);
+ edac_mc_free(mci);
+ sbridge_dev->mci = NULL;
+}
+
+static int sbridge_register_mci(struct sbridge_dev *sbridge_dev)
+{
+ struct mem_ctl_info *mci;
+ struct sbridge_pvt *pvt;
+ int rc, channels, csrows;
+
+ /* Check the number of active and not disabled channels */
+ rc = sbridge_get_active_channels(sbridge_dev->bus, &channels, &csrows);
+ if (unlikely(rc < 0))
+ return rc;
+
+ /* allocate a new MC control structure */
+ mci = edac_mc_alloc(sizeof(*pvt), csrows, channels, sbridge_dev->mc);
+ if (unlikely(!mci))
+ return -ENOMEM;
+
+ debugf0("MC: " __FILE__ ": %s(): mci = %p, dev = %p\n",
+ __func__, mci, &sbridge_dev->pdev[0]->dev);
+
+ pvt = mci->pvt_info;
+ memset(pvt, 0, sizeof(*pvt));
+
+ /* Associate sbridge_dev and mci for future usage */
+ pvt->sbridge_dev = sbridge_dev;
+ sbridge_dev->mci = mci;
+
+ mci->mtype_cap = MEM_FLAG_DDR3;
+ mci->edac_ctl_cap = EDAC_FLAG_NONE;
+ mci->edac_cap = EDAC_FLAG_NONE;
+ mci->mod_name = "sbridge_edac.c";
+ mci->mod_ver = SBRIDGE_REVISION;
+ mci->ctl_name = kasprintf(GFP_KERNEL, "Sandy Bridge Socket#%d", mci->mc_idx);
+ mci->dev_name = pci_name(sbridge_dev->pdev[0]);
+ mci->ctl_page_to_phys = NULL;
+
+ /* Set the function pointer to an actual operation function */
+ mci->edac_check = sbridge_check_error;
+
+ /* Store pci devices at mci for faster access */
+ rc = mci_bind_devs(mci, sbridge_dev);
+ if (unlikely(rc < 0))
+ goto fail0;
+
+ /* Get dimm basic config and the memory layout */
+ get_dimm_config(mci);
+ get_memory_layout(mci);
+
+ /* record ptr to the generic device */
+ mci->dev = &sbridge_dev->pdev[0]->dev;
+
+ /* add this new MC control structure to EDAC's list of MCs */
+ if (unlikely(edac_mc_add_mc(mci))) {
+ debugf0("MC: " __FILE__
+ ": %s(): failed edac_mc_add_mc()\n", __func__);
+ rc = -EINVAL;
+ goto fail0;
+ }
+
+ atomic_notifier_chain_register(&x86_mce_decoder_chain,
+ &sbridge_mce_dec);
+ return 0;
+
+fail0:
+ kfree(mci->ctl_name);
+ edac_mc_free(mci);
+ sbridge_dev->mci = NULL;
+ return rc;
+}
+
+/*
+ * sbridge_probe Probe for ONE instance of device to see if it is
+ * present.
+ * return:
+ * 0 for FOUND a device
+ * < 0 for error code
+ */
+
+static int __devinit sbridge_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ int rc;
+ u8 mc, num_mc = 0;
+ struct sbridge_dev *sbridge_dev;
+
+ /* get the pci devices we want to reserve for our use */
+ mutex_lock(&sbridge_edac_lock);
+
+ /*
+ * All memory controllers are allocated at the first pass.
+ */
+ if (unlikely(probed >= 1)) {
+ mutex_unlock(&sbridge_edac_lock);
+ return -ENODEV;
+ }
+ probed++;
+
+ rc = sbridge_get_all_devices(&num_mc);
+ if (unlikely(rc < 0))
+ goto fail0;
+ mc = 0;
+
+ list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) {
+ debugf0("Registering MC#%d (%d of %d)\n", mc, mc + 1, num_mc);
+ sbridge_dev->mc = mc++;
+ rc = sbridge_register_mci(sbridge_dev);
+ if (unlikely(rc < 0))
+ goto fail1;
+ }
+
+ sbridge_printk(KERN_INFO, "Driver loaded.\n");
+
+ mutex_unlock(&sbridge_edac_lock);
+ return 0;
+
+fail1:
+ list_for_each_entry(sbridge_dev, &sbridge_edac_list, list)
+ sbridge_unregister_mci(sbridge_dev);
+
+ sbridge_put_all_devices();
+fail0:
+ mutex_unlock(&sbridge_edac_lock);
+ return rc;
+}
+
+/*
+ * sbridge_remove destructor for one instance of device
+ *
+ */
+static void __devexit sbridge_remove(struct pci_dev *pdev)
+{
+ struct sbridge_dev *sbridge_dev;
+
+ debugf0(__FILE__ ": %s()\n", __func__);
+
+ /*
+ * we have a trouble here: pdev value for removal will be wrong, since
+ * it will point to the X58 register used to detect that the machine
+ * is a Nehalem or upper design. However, due to the way several PCI
+ * devices are grouped together to provide MC functionality, we need
+ * to use a different method for releasing the devices
+ */
+
+ mutex_lock(&sbridge_edac_lock);
+
+ if (unlikely(!probed)) {
+ mutex_unlock(&sbridge_edac_lock);
+ return;
+ }
+
+ list_for_each_entry(sbridge_dev, &sbridge_edac_list, list)
+ sbridge_unregister_mci(sbridge_dev);
+
+ /* Release PCI resources */
+ sbridge_put_all_devices();
+
+ probed--;
+
+ mutex_unlock(&sbridge_edac_lock);
+}
+
+MODULE_DEVICE_TABLE(pci, sbridge_pci_tbl);
+
+/*
+ * sbridge_driver pci_driver structure for this module
+ *
+ */
+static struct pci_driver sbridge_driver = {
+ .name = "sbridge_edac",
+ .probe = sbridge_probe,
+ .remove = __devexit_p(sbridge_remove),
+ .id_table = sbridge_pci_tbl,
+};
+
+/*
+ * sbridge_init Module entry function
+ * Try to initialize this module for its devices
+ */
+static int __init sbridge_init(void)
+{
+ int pci_rc;
+
+ debugf2("MC: " __FILE__ ": %s()\n", __func__);
+
+ /* Ensure that the OPSTATE is set correctly for POLL or NMI */
+ opstate_init();
+
+ pci_rc = pci_register_driver(&sbridge_driver);
+
+ if (pci_rc >= 0)
+ return 0;
+
+ sbridge_printk(KERN_ERR, "Failed to register device with error %d.\n",
+ pci_rc);
+
+ return pci_rc;
+}
+
+/*
+ * sbridge_exit() Module exit function
+ * Unregister the driver
+ */
+static void __exit sbridge_exit(void)
+{
+ debugf2("MC: " __FILE__ ": %s()\n", __func__);
+ pci_unregister_driver(&sbridge_driver);
+}
+
+module_init(sbridge_init);
+module_exit(sbridge_exit);
+
+module_param(edac_op_state, int, 0444);
+MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)");
+MODULE_DESCRIPTION("MC Driver for Intel Sandy Bridge memory controllers - "
+ SBRIDGE_REVISION);
diff --git a/drivers/firewire/core-iso.c b/drivers/firewire/core-iso.c
index 57c3973093a..0f90e007187 100644
--- a/drivers/firewire/core-iso.c
+++ b/drivers/firewire/core-iso.c
@@ -29,6 +29,7 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/vmalloc.h>
+#include <linux/export.h>
#include <asm/byteorder.h>
diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
index 334b82a3542..855ab3f5936 100644
--- a/drivers/firewire/core-transaction.c
+++ b/drivers/firewire/core-transaction.c
@@ -1046,8 +1046,8 @@ static void update_split_timeout(struct fw_card *card)
cycles = card->split_timeout_hi * 8000 + (card->split_timeout_lo >> 19);
- cycles = max(cycles, 800u); /* minimum as per the spec */
- cycles = min(cycles, 3u * 8000u); /* maximum OHCI timeout */
+ /* minimum per IEEE 1394, maximum which doesn't overflow OHCI */
+ cycles = clamp(cycles, 800u, 3u * 8000u);
card->split_timeout_cycles = cycles;
card->split_timeout_jiffies = DIV_ROUND_UP(cycles * HZ, 8000);
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
index 03a7a85d042..a20f45b1e7e 100644
--- a/drivers/firewire/net.c
+++ b/drivers/firewire/net.c
@@ -502,11 +502,7 @@ static struct fwnet_peer *fwnet_peer_find_by_node_id(struct fwnet_device *dev,
static unsigned fwnet_max_payload(unsigned max_rec, unsigned speed)
{
max_rec = min(max_rec, speed + 8);
- max_rec = min(max_rec, 0xbU); /* <= 4096 */
- if (max_rec < 8) {
- fw_notify("max_rec %x out of range\n", max_rec);
- max_rec = 8;
- }
+ max_rec = clamp(max_rec, 8U, 11U); /* 512...4096 */
return (1 << (max_rec + 1)) - RFC2374_FRAG_HDR_SIZE;
}
@@ -1125,17 +1121,12 @@ static int fwnet_broadcast_start(struct fwnet_device *dev)
unsigned u;
if (dev->local_fifo == FWNET_NO_FIFO_ADDR) {
- /* outside OHCI posted write area? */
- static const struct fw_address_region region = {
- .start = 0xffff00000000ULL,
- .end = CSR_REGISTER_BASE,
- };
-
dev->handler.length = 4096;
dev->handler.address_callback = fwnet_receive_packet;
dev->handler.callback_data = dev;
- retval = fw_core_add_address_handler(&dev->handler, &region);
+ retval = fw_core_add_address_handler(&dev->handler,
+ &fw_high_memory_region);
if (retval < 0)
goto failed_initial;
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index fd7170a9ad2..6628feaa762 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -42,6 +42,7 @@
#include <linux/string.h>
#include <linux/time.h>
#include <linux/vmalloc.h>
+#include <linux/workqueue.h>
#include <asm/byteorder.h>
#include <asm/page.h>
@@ -125,6 +126,7 @@ struct context {
struct fw_ohci *ohci;
u32 regs;
int total_allocation;
+ u32 current_bus;
bool running;
bool flushing;
@@ -226,7 +228,7 @@ struct fw_ohci {
__le32 *self_id_cpu;
dma_addr_t self_id_bus;
- struct tasklet_struct bus_reset_tasklet;
+ struct work_struct bus_reset_work;
u32 self_id_buffer[512];
};
@@ -263,6 +265,8 @@ static char ohci_driver_name[] = KBUILD_MODNAME;
#define PCI_DEVICE_ID_AGERE_FW643 0x5901
#define PCI_DEVICE_ID_JMICRON_JMB38X_FW 0x2380
#define PCI_DEVICE_ID_TI_TSB12LV22 0x8009
+#define PCI_DEVICE_ID_TI_TSB12LV26 0x8020
+#define PCI_DEVICE_ID_TI_TSB82AA2 0x8025
#define PCI_VENDOR_ID_PINNACLE_SYSTEMS 0x11bd
#define QUIRK_CYCLE_TIMER 1
@@ -270,6 +274,7 @@ static char ohci_driver_name[] = KBUILD_MODNAME;
#define QUIRK_BE_HEADERS 4
#define QUIRK_NO_1394A 8
#define QUIRK_NO_MSI 16
+#define QUIRK_TI_SLLZ059 32
/* In case of multiple matches in ohci_quirks[], only the first one is used. */
static const struct {
@@ -299,6 +304,12 @@ static const struct {
{PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB12LV22, PCI_ANY_ID,
QUIRK_CYCLE_TIMER | QUIRK_RESET_PACKET | QUIRK_NO_1394A},
+ {PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB12LV26, PCI_ANY_ID,
+ QUIRK_RESET_PACKET | QUIRK_TI_SLLZ059},
+
+ {PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB82AA2, PCI_ANY_ID,
+ QUIRK_RESET_PACKET | QUIRK_TI_SLLZ059},
+
{PCI_VENDOR_ID_TI, PCI_ANY_ID, PCI_ANY_ID,
QUIRK_RESET_PACKET},
@@ -315,6 +326,7 @@ MODULE_PARM_DESC(quirks, "Chip quirks (default = 0"
", AR/selfID endianess = " __stringify(QUIRK_BE_HEADERS)
", no 1394a enhancements = " __stringify(QUIRK_NO_1394A)
", disable MSI = " __stringify(QUIRK_NO_MSI)
+ ", TI SLLZ059 erratum = " __stringify(QUIRK_TI_SLLZ059)
")");
#define OHCI_PARAM_DEBUG_AT_AR 1
@@ -859,7 +871,7 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
*
* Alas some chips sometimes emit bus reset packets with a
* wrong generation. We set the correct generation for these
- * at a slightly incorrect time (in bus_reset_tasklet).
+ * at a slightly incorrect time (in bus_reset_work).
*/
if (evt == OHCI1394_evt_bus_reset) {
if (!(ohci->quirks & QUIRK_RESET_PACKET))
@@ -1046,6 +1058,7 @@ static void context_tasklet(unsigned long data)
address = le32_to_cpu(last->branch_address);
z = address & 0xf;
address &= ~0xf;
+ ctx->current_bus = address;
/* If the branch address points to a buffer outside of the
* current buffer, advance to the next buffer. */
@@ -1713,9 +1726,94 @@ static u32 update_bus_time(struct fw_ohci *ohci)
return ohci->bus_time | cycle_time_seconds;
}
-static void bus_reset_tasklet(unsigned long data)
+static int get_status_for_port(struct fw_ohci *ohci, int port_index)
+{
+ int reg;
+
+ mutex_lock(&ohci->phy_reg_mutex);
+ reg = write_phy_reg(ohci, 7, port_index);
+ if (reg >= 0)
+ reg = read_phy_reg(ohci, 8);
+ mutex_unlock(&ohci->phy_reg_mutex);
+ if (reg < 0)
+ return reg;
+
+ switch (reg & 0x0f) {
+ case 0x06:
+ return 2; /* is child node (connected to parent node) */
+ case 0x0e:
+ return 3; /* is parent node (connected to child node) */
+ }
+ return 1; /* not connected */
+}
+
+static int get_self_id_pos(struct fw_ohci *ohci, u32 self_id,
+ int self_id_count)
+{
+ int i;
+ u32 entry;
+
+ for (i = 0; i < self_id_count; i++) {
+ entry = ohci->self_id_buffer[i];
+ if ((self_id & 0xff000000) == (entry & 0xff000000))
+ return -1;
+ if ((self_id & 0xff000000) < (entry & 0xff000000))
+ return i;
+ }
+ return i;
+}
+
+/*
+ * TI TSB82AA2B and TSB12LV26 do not receive the selfID of a locally
+ * attached TSB41BA3D phy; see http://www.ti.com/litv/pdf/sllz059.
+ * Construct the selfID from phy register contents.
+ * FIXME: How to determine the selfID.i flag?
+ */
+static int find_and_insert_self_id(struct fw_ohci *ohci, int self_id_count)
+{
+ int reg, i, pos, status;
+ /* link active 1, speed 3, bridge 0, contender 1, more packets 0 */
+ u32 self_id = 0x8040c800;
+
+ reg = reg_read(ohci, OHCI1394_NodeID);
+ if (!(reg & OHCI1394_NodeID_idValid)) {
+ fw_notify("node ID not valid, new bus reset in progress\n");
+ return -EBUSY;
+ }
+ self_id |= ((reg & 0x3f) << 24); /* phy ID */
+
+ reg = ohci_read_phy_reg(&ohci->card, 4);
+ if (reg < 0)
+ return reg;
+ self_id |= ((reg & 0x07) << 8); /* power class */
+
+ reg = ohci_read_phy_reg(&ohci->card, 1);
+ if (reg < 0)
+ return reg;
+ self_id |= ((reg & 0x3f) << 16); /* gap count */
+
+ for (i = 0; i < 3; i++) {
+ status = get_status_for_port(ohci, i);
+ if (status < 0)
+ return status;
+ self_id |= ((status & 0x3) << (6 - (i * 2)));
+ }
+
+ pos = get_self_id_pos(ohci, self_id, self_id_count);
+ if (pos >= 0) {
+ memmove(&(ohci->self_id_buffer[pos+1]),
+ &(ohci->self_id_buffer[pos]),
+ (self_id_count - pos) * sizeof(*ohci->self_id_buffer));
+ ohci->self_id_buffer[pos] = self_id;
+ self_id_count++;
+ }
+ return self_id_count;
+}
+
+static void bus_reset_work(struct work_struct *work)
{
- struct fw_ohci *ohci = (struct fw_ohci *)data;
+ struct fw_ohci *ohci =
+ container_of(work, struct fw_ohci, bus_reset_work);
int self_id_count, i, j, reg;
int generation, new_generation;
unsigned long flags;
@@ -1753,21 +1851,50 @@ static void bus_reset_tasklet(unsigned long data)
* bit extra to get the actual number of self IDs.
*/
self_id_count = (reg >> 3) & 0xff;
- if (self_id_count == 0 || self_id_count > 252) {
+
+ if (self_id_count > 252) {
fw_notify("inconsistent self IDs\n");
return;
}
+
generation = (cond_le32_to_cpu(ohci->self_id_cpu[0]) >> 16) & 0xff;
rmb();
for (i = 1, j = 0; j < self_id_count; i += 2, j++) {
if (ohci->self_id_cpu[i] != ~ohci->self_id_cpu[i + 1]) {
- fw_notify("inconsistent self IDs\n");
- return;
+ /*
+ * If the invalid data looks like a cycle start packet,
+ * it's likely to be the result of the cycle master
+ * having a wrong gap count. In this case, the self IDs
+ * so far are valid and should be processed so that the
+ * bus manager can then correct the gap count.
+ */
+ if (cond_le32_to_cpu(ohci->self_id_cpu[i])
+ == 0xffff008f) {
+ fw_notify("ignoring spurious self IDs\n");
+ self_id_count = j;
+ break;
+ } else {
+ fw_notify("inconsistent self IDs\n");
+ return;
+ }
}
ohci->self_id_buffer[j] =
cond_le32_to_cpu(ohci->self_id_cpu[i]);
}
+
+ if (ohci->quirks & QUIRK_TI_SLLZ059) {
+ self_id_count = find_and_insert_self_id(ohci, self_id_count);
+ if (self_id_count < 0) {
+ fw_notify("could not construct local self ID\n");
+ return;
+ }
+ }
+
+ if (self_id_count == 0) {
+ fw_notify("inconsistent self IDs\n");
+ return;
+ }
rmb();
/*
@@ -1887,7 +2014,7 @@ static irqreturn_t irq_handler(int irq, void *data)
log_irqs(event);
if (event & OHCI1394_selfIDComplete)
- tasklet_schedule(&ohci->bus_reset_tasklet);
+ queue_work(fw_workqueue, &ohci->bus_reset_work);
if (event & OHCI1394_RQPkt)
tasklet_schedule(&ohci->ar_request_ctx.tasklet);
@@ -1934,7 +2061,8 @@ static irqreturn_t irq_handler(int irq, void *data)
reg_read(ohci, OHCI1394_PostedWriteAddressLo);
reg_write(ohci, OHCI1394_IntEventClear,
OHCI1394_postedWriteErr);
- fw_error("PCI posted write error\n");
+ if (printk_ratelimit())
+ fw_error("PCI posted write error\n");
}
if (unlikely(event & OHCI1394_cycleTooLong)) {
@@ -2048,6 +2176,28 @@ static int configure_1394a_enhancements(struct fw_ohci *ohci)
return 0;
}
+static int probe_tsb41ba3d(struct fw_ohci *ohci)
+{
+ /* TI vendor ID = 0x080028, TSB41BA3D product ID = 0x833005 (sic) */
+ static const u8 id[] = { 0x08, 0x00, 0x28, 0x83, 0x30, 0x05, };
+ int reg, i;
+
+ reg = read_phy_reg(ohci, 2);
+ if (reg < 0)
+ return reg;
+ if ((reg & PHY_EXTENDED_REGISTERS) != PHY_EXTENDED_REGISTERS)
+ return 0;
+
+ for (i = ARRAY_SIZE(id) - 1; i >= 0; i--) {
+ reg = read_paged_phy_reg(ohci, 1, i + 10);
+ if (reg < 0)
+ return reg;
+ if (reg != id[i])
+ return 0;
+ }
+ return 1;
+}
+
static int ohci_enable(struct fw_card *card,
const __be32 *config_rom, size_t length)
{
@@ -2085,6 +2235,16 @@ static int ohci_enable(struct fw_card *card,
return -EIO;
}
+ if (ohci->quirks & QUIRK_TI_SLLZ059) {
+ ret = probe_tsb41ba3d(ohci);
+ if (ret < 0)
+ return ret;
+ if (ret)
+ fw_notify("local TSB41BA3D phy\n");
+ else
+ ohci->quirks &= ~QUIRK_TI_SLLZ059;
+ }
+
reg_write(ohci, OHCI1394_HCControlClear,
OHCI1394_HCControl_noByteSwapData);
@@ -2260,7 +2420,7 @@ static int ohci_set_config_rom(struct fw_card *card,
* then set up the real values for the two registers.
*
* We use ohci->lock to avoid racing with the code that sets
- * ohci->next_config_rom to NULL (see bus_reset_tasklet).
+ * ohci->next_config_rom to NULL (see bus_reset_work).
*/
next_config_rom =
@@ -2539,6 +2699,7 @@ static int handle_ir_packet_per_buffer(struct context *context,
struct iso_context *ctx =
container_of(context, struct iso_context, context);
struct descriptor *pd;
+ u32 buffer_dma;
__le32 *ir_header;
void *p;
@@ -2549,6 +2710,16 @@ static int handle_ir_packet_per_buffer(struct context *context,
/* Descriptor(s) not done yet, stop iteration */
return 0;
+ while (!(d->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS))) {
+ d++;
+ buffer_dma = le32_to_cpu(d->data_address);
+ dma_sync_single_range_for_cpu(context->ohci->card.device,
+ buffer_dma & PAGE_MASK,
+ buffer_dma & ~PAGE_MASK,
+ le16_to_cpu(d->req_count),
+ DMA_FROM_DEVICE);
+ }
+
p = last + 1;
copy_iso_headers(ctx, p);
@@ -2571,11 +2742,19 @@ static int handle_ir_buffer_fill(struct context *context,
{
struct iso_context *ctx =
container_of(context, struct iso_context, context);
+ u32 buffer_dma;
if (!last->transfer_status)
/* Descriptor(s) not done yet, stop iteration */
return 0;
+ buffer_dma = le32_to_cpu(last->data_address);
+ dma_sync_single_range_for_cpu(context->ohci->card.device,
+ buffer_dma & PAGE_MASK,
+ buffer_dma & ~PAGE_MASK,
+ le16_to_cpu(last->req_count),
+ DMA_FROM_DEVICE);
+
if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS)
ctx->base.callback.mc(&ctx->base,
le32_to_cpu(last->data_address) +
@@ -2586,6 +2765,43 @@ static int handle_ir_buffer_fill(struct context *context,
return 1;
}
+static inline void sync_it_packet_for_cpu(struct context *context,
+ struct descriptor *pd)
+{
+ __le16 control;
+ u32 buffer_dma;
+
+ /* only packets beginning with OUTPUT_MORE* have data buffers */
+ if (pd->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS))
+ return;
+
+ /* skip over the OUTPUT_MORE_IMMEDIATE descriptor */
+ pd += 2;
+
+ /*
+ * If the packet has a header, the first OUTPUT_MORE/LAST descriptor's
+ * data buffer is in the context program's coherent page and must not
+ * be synced.
+ */
+ if ((le32_to_cpu(pd->data_address) & PAGE_MASK) ==
+ (context->current_bus & PAGE_MASK)) {
+ if (pd->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS))
+ return;
+ pd++;
+ }
+
+ do {
+ buffer_dma = le32_to_cpu(pd->data_address);
+ dma_sync_single_range_for_cpu(context->ohci->card.device,
+ buffer_dma & PAGE_MASK,
+ buffer_dma & ~PAGE_MASK,
+ le16_to_cpu(pd->req_count),
+ DMA_TO_DEVICE);
+ control = pd->control;
+ pd++;
+ } while (!(control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS)));
+}
+
static int handle_it_packet(struct context *context,
struct descriptor *d,
struct descriptor *last)
@@ -2602,6 +2818,8 @@ static int handle_it_packet(struct context *context,
/* Descriptor(s) not done yet, stop iteration */
return 0;
+ sync_it_packet_for_cpu(context, d);
+
i = ctx->header_length;
if (i + 4 < PAGE_SIZE) {
/* Present this value as big-endian to match the receive code */
@@ -2971,6 +3189,10 @@ static int queue_iso_transmit(struct iso_context *ctx,
page_bus = page_private(buffer->pages[page]);
pd[i].data_address = cpu_to_le32(page_bus + offset);
+ dma_sync_single_range_for_device(ctx->context.ohci->card.device,
+ page_bus, offset, length,
+ DMA_TO_DEVICE);
+
payload_index += length;
}
@@ -2995,6 +3217,7 @@ static int queue_iso_packet_per_buffer(struct iso_context *ctx,
struct fw_iso_buffer *buffer,
unsigned long payload)
{
+ struct device *device = ctx->context.ohci->card.device;
struct descriptor *d, *pd;
dma_addr_t d_bus, page_bus;
u32 z, header_z, rest;
@@ -3049,6 +3272,10 @@ static int queue_iso_packet_per_buffer(struct iso_context *ctx,
page_bus = page_private(buffer->pages[page]);
pd->data_address = cpu_to_le32(page_bus + offset);
+ dma_sync_single_range_for_device(device, page_bus,
+ offset, length,
+ DMA_FROM_DEVICE);
+
offset = (offset + length) & ~PAGE_MASK;
rest -= length;
if (offset == 0)
@@ -3108,6 +3335,10 @@ static int queue_iso_buffer_fill(struct iso_context *ctx,
page_bus = page_private(buffer->pages[page]);
d->data_address = cpu_to_le32(page_bus + offset);
+ dma_sync_single_range_for_device(ctx->context.ohci->card.device,
+ page_bus, offset, length,
+ DMA_FROM_DEVICE);
+
rest -= length;
offset = 0;
page++;
@@ -3239,8 +3470,7 @@ static int __devinit pci_probe(struct pci_dev *dev,
spin_lock_init(&ohci->lock);
mutex_init(&ohci->phy_reg_mutex);
- tasklet_init(&ohci->bus_reset_tasklet,
- bus_reset_tasklet, (unsigned long)ohci);
+ INIT_WORK(&ohci->bus_reset_work, bus_reset_work);
err = pci_request_region(dev, 0, ohci_driver_name);
if (err) {
@@ -3382,6 +3612,7 @@ static void pci_remove(struct pci_dev *dev)
ohci = pci_get_drvdata(dev);
reg_write(ohci, OHCI1394_IntMaskClear, ~0);
flush_writes(ohci);
+ cancel_work_sync(&ohci->bus_reset_work);
fw_core_remove_card(&ohci->card);
/*
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
index 17cef864506..68375bc3aef 100644
--- a/drivers/firewire/sbp2.c
+++ b/drivers/firewire/sbp2.c
@@ -154,12 +154,16 @@ struct sbp2_logical_unit {
bool blocked;
};
+static void sbp2_queue_work(struct sbp2_logical_unit *lu, unsigned long delay)
+{
+ queue_delayed_work(fw_workqueue, &lu->work, delay);
+}
+
/*
* We create one struct sbp2_target per IEEE 1212 Unit Directory
* and one struct Scsi_Host per sbp2_target.
*/
struct sbp2_target {
- struct kref kref;
struct fw_unit *unit;
const char *bus_id;
struct list_head lu_list;
@@ -772,71 +776,6 @@ static int sbp2_lun2int(u16 lun)
return scsilun_to_int(&eight_bytes_lun);
}
-static void sbp2_release_target(struct kref *kref)
-{
- struct sbp2_target *tgt = container_of(kref, struct sbp2_target, kref);
- struct sbp2_logical_unit *lu, *next;
- struct Scsi_Host *shost =
- container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
- struct scsi_device *sdev;
- struct fw_device *device = target_device(tgt);
-
- /* prevent deadlocks */
- sbp2_unblock(tgt);
-
- list_for_each_entry_safe(lu, next, &tgt->lu_list, link) {
- sdev = scsi_device_lookup(shost, 0, 0, sbp2_lun2int(lu->lun));
- if (sdev) {
- scsi_remove_device(sdev);
- scsi_device_put(sdev);
- }
- if (lu->login_id != INVALID_LOGIN_ID) {
- int generation, node_id;
- /*
- * tgt->node_id may be obsolete here if we failed
- * during initial login or after a bus reset where
- * the topology changed.
- */
- generation = device->generation;
- smp_rmb(); /* node_id vs. generation */
- node_id = device->node_id;
- sbp2_send_management_orb(lu, node_id, generation,
- SBP2_LOGOUT_REQUEST,
- lu->login_id, NULL);
- }
- fw_core_remove_address_handler(&lu->address_handler);
- list_del(&lu->link);
- kfree(lu);
- }
- scsi_remove_host(shost);
- fw_notify("released %s, target %d:0:0\n", tgt->bus_id, shost->host_no);
-
- fw_unit_put(tgt->unit);
- scsi_host_put(shost);
- fw_device_put(device);
-}
-
-static void sbp2_target_get(struct sbp2_target *tgt)
-{
- kref_get(&tgt->kref);
-}
-
-static void sbp2_target_put(struct sbp2_target *tgt)
-{
- kref_put(&tgt->kref, sbp2_release_target);
-}
-
-/*
- * Always get the target's kref when scheduling work on one its units.
- * Each workqueue job is responsible to call sbp2_target_put() upon return.
- */
-static void sbp2_queue_work(struct sbp2_logical_unit *lu, unsigned long delay)
-{
- sbp2_target_get(lu->tgt);
- if (!queue_delayed_work(fw_workqueue, &lu->work, delay))
- sbp2_target_put(lu->tgt);
-}
-
/*
* Write retransmit retry values into the BUSY_TIMEOUT register.
* - The single-phase retry protocol is supported by all SBP-2 devices, but the
@@ -877,7 +816,7 @@ static void sbp2_login(struct work_struct *work)
int generation, node_id, local_node_id;
if (fw_device_is_shutdown(device))
- goto out;
+ return;
generation = device->generation;
smp_rmb(); /* node IDs must not be older than generation */
@@ -899,7 +838,7 @@ static void sbp2_login(struct work_struct *work)
/* Let any waiting I/O fail from now on. */
sbp2_unblock(lu->tgt);
}
- goto out;
+ return;
}
tgt->node_id = node_id;
@@ -925,7 +864,8 @@ static void sbp2_login(struct work_struct *work)
if (lu->has_sdev) {
sbp2_cancel_orbs(lu);
sbp2_conditionally_unblock(lu);
- goto out;
+
+ return;
}
if (lu->tgt->workarounds & SBP2_WORKAROUND_DELAY_INQUIRY)
@@ -957,7 +897,8 @@ static void sbp2_login(struct work_struct *work)
lu->has_sdev = true;
scsi_device_put(sdev);
sbp2_allow_block(lu);
- goto out;
+
+ return;
out_logout_login:
smp_rmb(); /* generation may have changed */
@@ -971,8 +912,57 @@ static void sbp2_login(struct work_struct *work)
* lu->work already. Reset the work from reconnect to login.
*/
PREPARE_DELAYED_WORK(&lu->work, sbp2_login);
- out:
- sbp2_target_put(tgt);
+}
+
+static void sbp2_reconnect(struct work_struct *work)
+{
+ struct sbp2_logical_unit *lu =
+ container_of(work, struct sbp2_logical_unit, work.work);
+ struct sbp2_target *tgt = lu->tgt;
+ struct fw_device *device = target_device(tgt);
+ int generation, node_id, local_node_id;
+
+ if (fw_device_is_shutdown(device))
+ return;
+
+ generation = device->generation;
+ smp_rmb(); /* node IDs must not be older than generation */
+ node_id = device->node_id;
+ local_node_id = device->card->node_id;
+
+ if (sbp2_send_management_orb(lu, node_id, generation,
+ SBP2_RECONNECT_REQUEST,
+ lu->login_id, NULL) < 0) {
+ /*
+ * If reconnect was impossible even though we are in the
+ * current generation, fall back and try to log in again.
+ *
+ * We could check for "Function rejected" status, but
+ * looking at the bus generation as simpler and more general.
+ */
+ smp_rmb(); /* get current card generation */
+ if (generation == device->card->generation ||
+ lu->retries++ >= 5) {
+ fw_error("%s: failed to reconnect\n", tgt->bus_id);
+ lu->retries = 0;
+ PREPARE_DELAYED_WORK(&lu->work, sbp2_login);
+ }
+ sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5));
+
+ return;
+ }
+
+ tgt->node_id = node_id;
+ tgt->address_high = local_node_id << 16;
+ smp_wmb(); /* node IDs must not be older than generation */
+ lu->generation = generation;
+
+ fw_notify("%s: reconnected to LUN %04x (%d retries)\n",
+ tgt->bus_id, lu->lun, lu->retries);
+
+ sbp2_agent_reset(lu);
+ sbp2_cancel_orbs(lu);
+ sbp2_conditionally_unblock(lu);
}
static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry)
@@ -1120,6 +1110,7 @@ static void sbp2_init_workarounds(struct sbp2_target *tgt, u32 model,
}
static struct scsi_host_template scsi_driver_template;
+static int sbp2_remove(struct device *dev);
static int sbp2_probe(struct device *dev)
{
@@ -1141,7 +1132,6 @@ static int sbp2_probe(struct device *dev)
tgt = (struct sbp2_target *)shost->hostdata;
dev_set_drvdata(&unit->device, tgt);
tgt->unit = unit;
- kref_init(&tgt->kref);
INIT_LIST_HEAD(&tgt->lu_list);
tgt->bus_id = dev_name(&unit->device);
tgt->guid = (u64)device->config_rom[3] << 32 | device->config_rom[4];
@@ -1154,9 +1144,6 @@ static int sbp2_probe(struct device *dev)
if (scsi_add_host(shost, &unit->device) < 0)
goto fail_shost_put;
- fw_device_get(device);
- fw_unit_get(unit);
-
/* implicit directory ID */
tgt->directory_id = ((unit->directory - device->config_rom) * 4
+ CSR_CONFIG_ROM) & 0xffffff;
@@ -1166,7 +1153,7 @@ static int sbp2_probe(struct device *dev)
if (sbp2_scan_unit_dir(tgt, unit->directory, &model,
&firmware_revision) < 0)
- goto fail_tgt_put;
+ goto fail_remove;
sbp2_clamp_management_orb_timeout(tgt);
sbp2_init_workarounds(tgt, model, firmware_revision);
@@ -1177,16 +1164,17 @@ static int sbp2_probe(struct device *dev)
* specifies the max payload size as 2 ^ (max_payload + 2), so
* if we set this to max_speed + 7, we get the right value.
*/
- tgt->max_payload = min(device->max_speed + 7, 10U);
- tgt->max_payload = min(tgt->max_payload, device->card->max_receive - 1);
+ tgt->max_payload = min3(device->max_speed + 7, 10U,
+ device->card->max_receive - 1);
/* Do the login in a workqueue so we can easily reschedule retries. */
list_for_each_entry(lu, &tgt->lu_list, link)
sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5));
+
return 0;
- fail_tgt_put:
- sbp2_target_put(tgt);
+ fail_remove:
+ sbp2_remove(dev);
return -ENOMEM;
fail_shost_put:
@@ -1194,71 +1182,6 @@ static int sbp2_probe(struct device *dev)
return -ENOMEM;
}
-static int sbp2_remove(struct device *dev)
-{
- struct fw_unit *unit = fw_unit(dev);
- struct sbp2_target *tgt = dev_get_drvdata(&unit->device);
- struct sbp2_logical_unit *lu;
-
- list_for_each_entry(lu, &tgt->lu_list, link)
- cancel_delayed_work_sync(&lu->work);
-
- sbp2_target_put(tgt);
- return 0;
-}
-
-static void sbp2_reconnect(struct work_struct *work)
-{
- struct sbp2_logical_unit *lu =
- container_of(work, struct sbp2_logical_unit, work.work);
- struct sbp2_target *tgt = lu->tgt;
- struct fw_device *device = target_device(tgt);
- int generation, node_id, local_node_id;
-
- if (fw_device_is_shutdown(device))
- goto out;
-
- generation = device->generation;
- smp_rmb(); /* node IDs must not be older than generation */
- node_id = device->node_id;
- local_node_id = device->card->node_id;
-
- if (sbp2_send_management_orb(lu, node_id, generation,
- SBP2_RECONNECT_REQUEST,
- lu->login_id, NULL) < 0) {
- /*
- * If reconnect was impossible even though we are in the
- * current generation, fall back and try to log in again.
- *
- * We could check for "Function rejected" status, but
- * looking at the bus generation as simpler and more general.
- */
- smp_rmb(); /* get current card generation */
- if (generation == device->card->generation ||
- lu->retries++ >= 5) {
- fw_error("%s: failed to reconnect\n", tgt->bus_id);
- lu->retries = 0;
- PREPARE_DELAYED_WORK(&lu->work, sbp2_login);
- }
- sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5));
- goto out;
- }
-
- tgt->node_id = node_id;
- tgt->address_high = local_node_id << 16;
- smp_wmb(); /* node IDs must not be older than generation */
- lu->generation = generation;
-
- fw_notify("%s: reconnected to LUN %04x (%d retries)\n",
- tgt->bus_id, lu->lun, lu->retries);
-
- sbp2_agent_reset(lu);
- sbp2_cancel_orbs(lu);
- sbp2_conditionally_unblock(lu);
- out:
- sbp2_target_put(tgt);
-}
-
static void sbp2_update(struct fw_unit *unit)
{
struct sbp2_target *tgt = dev_get_drvdata(&unit->device);
@@ -1277,6 +1200,51 @@ static void sbp2_update(struct fw_unit *unit)
}
}
+static int sbp2_remove(struct device *dev)
+{
+ struct fw_unit *unit = fw_unit(dev);
+ struct fw_device *device = fw_parent_device(unit);
+ struct sbp2_target *tgt = dev_get_drvdata(&unit->device);
+ struct sbp2_logical_unit *lu, *next;
+ struct Scsi_Host *shost =
+ container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
+ struct scsi_device *sdev;
+
+ /* prevent deadlocks */
+ sbp2_unblock(tgt);
+
+ list_for_each_entry_safe(lu, next, &tgt->lu_list, link) {
+ cancel_delayed_work_sync(&lu->work);
+ sdev = scsi_device_lookup(shost, 0, 0, sbp2_lun2int(lu->lun));
+ if (sdev) {
+ scsi_remove_device(sdev);
+ scsi_device_put(sdev);
+ }
+ if (lu->login_id != INVALID_LOGIN_ID) {
+ int generation, node_id;
+ /*
+ * tgt->node_id may be obsolete here if we failed
+ * during initial login or after a bus reset where
+ * the topology changed.
+ */
+ generation = device->generation;
+ smp_rmb(); /* node_id vs. generation */
+ node_id = device->node_id;
+ sbp2_send_management_orb(lu, node_id, generation,
+ SBP2_LOGOUT_REQUEST,
+ lu->login_id, NULL);
+ }
+ fw_core_remove_address_handler(&lu->address_handler);
+ list_del(&lu->link);
+ kfree(lu);
+ }
+ scsi_remove_host(shost);
+ fw_notify("released %s, target %d:0:0\n", tgt->bus_id, shost->host_no);
+
+ scsi_host_put(shost);
+ return 0;
+}
+
#define SBP2_UNIT_SPEC_ID_ENTRY 0x0000609e
#define SBP2_SW_VERSION_ENTRY 0x00010483
diff --git a/drivers/firmware/edd.c b/drivers/firmware/edd.c
index f1b7f659d3c..e2295766580 100644
--- a/drivers/firmware/edd.c
+++ b/drivers/firmware/edd.c
@@ -151,7 +151,8 @@ edd_show_host_bus(struct edd_device *edev, char *buf)
p += scnprintf(p, left, "\tbase_address: %x\n",
info->params.interface_path.isa.base_address);
} else if (!strncmp(info->params.host_bus_type, "PCIX", 4) ||
- !strncmp(info->params.host_bus_type, "PCI", 3)) {
+ !strncmp(info->params.host_bus_type, "PCI", 3) ||
+ !strncmp(info->params.host_bus_type, "XPRS", 4)) {
p += scnprintf(p, left,
"\t%02x:%02x.%d channel: %u\n",
info->params.interface_path.pci.bus,
@@ -159,7 +160,6 @@ edd_show_host_bus(struct edd_device *edev, char *buf)
info->params.interface_path.pci.function,
info->params.interface_path.pci.channel);
} else if (!strncmp(info->params.host_bus_type, "IBND", 4) ||
- !strncmp(info->params.host_bus_type, "XPRS", 4) ||
!strncmp(info->params.host_bus_type, "HTPT", 4)) {
p += scnprintf(p, left,
"\tTBD: %llx\n",
@@ -668,7 +668,7 @@ edd_get_pci_dev(struct edd_device *edev)
{
struct edd_info *info = edd_dev_get_info(edev);
- if (edd_dev_is_type(edev, "PCI")) {
+ if (edd_dev_is_type(edev, "PCI") || edd_dev_is_type(edev, "XPRS")) {
return pci_get_bus_and_slot(info->params.interface_path.pci.bus,
PCI_DEVFN(info->params.interface_path.pci.slot,
info->params.interface_path.pci.
diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
index eb80b549ed8..8370f72d87f 100644
--- a/drivers/firmware/efivars.c
+++ b/drivers/firmware/efivars.c
@@ -490,8 +490,8 @@ static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type,
return 0;
}
-static u64 efi_pstore_write(enum pstore_type_id type, unsigned int part,
- size_t size, struct pstore_info *psi)
+static int efi_pstore_write(enum pstore_type_id type, u64 *id,
+ unsigned int part, size_t size, struct pstore_info *psi)
{
char name[DUMP_NAME_LEN];
char stub_name[DUMP_NAME_LEN];
@@ -499,7 +499,7 @@ static u64 efi_pstore_write(enum pstore_type_id type, unsigned int part,
efi_guid_t vendor = LINUX_EFI_CRASH_GUID;
struct efivars *efivars = psi->data;
struct efivar_entry *entry, *found = NULL;
- int i;
+ int i, ret = 0;
sprintf(stub_name, "dump-type%u-%u-", type, part);
sprintf(name, "%s%lu", stub_name, get_seconds());
@@ -548,18 +548,19 @@ static u64 efi_pstore_write(enum pstore_type_id type, unsigned int part,
efivar_unregister(found);
if (size)
- efivar_create_sysfs_entry(efivars,
+ ret = efivar_create_sysfs_entry(efivars,
utf16_strsize(efi_name,
DUMP_NAME_LEN * 2),
efi_name, &vendor);
- return part;
+ *id = part;
+ return ret;
};
static int efi_pstore_erase(enum pstore_type_id type, u64 id,
struct pstore_info *psi)
{
- efi_pstore_write(type, id, 0, psi);
+ efi_pstore_write(type, &id, (unsigned int)id, 0, psi);
return 0;
}
@@ -580,8 +581,8 @@ static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type,
return -1;
}
-static u64 efi_pstore_write(enum pstore_type_id type, unsigned int part,
- size_t size, struct pstore_info *psi)
+static int efi_pstore_write(enum pstore_type_id type, u64 *id,
+ unsigned int part, size_t size, struct pstore_info *psi)
{
return 0;
}
@@ -978,7 +979,7 @@ int register_efivars(struct efivars *efivars,
if (efivars->efi_pstore_info.buf) {
efivars->efi_pstore_info.bufsize = 1024;
efivars->efi_pstore_info.data = efivars;
- mutex_init(&efivars->efi_pstore_info.buf_mutex);
+ spin_lock_init(&efivars->efi_pstore_info.buf_lock);
pstore_register(&efivars->efi_pstore_info);
}
diff --git a/drivers/firmware/google/gsmi.c b/drivers/firmware/google/gsmi.c
index aa83de9db1b..c4e7c59d1c6 100644
--- a/drivers/firmware/google/gsmi.c
+++ b/drivers/firmware/google/gsmi.c
@@ -27,6 +27,7 @@
#include <linux/kdebug.h>
#include <linux/reboot.h>
#include <linux/efi.h>
+#include <linux/module.h>
#define GSMI_SHUTDOWN_CLEAN 0 /* Clean Shutdown */
/* TODO(mikew@google.com): Tie in HARDLOCKUP_DETECTOR with NMIWDT */
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index d539efd96d4..8482a23887d 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -95,14 +95,18 @@ config GPIO_EP93XX
depends on ARCH_EP93XX
select GPIO_GENERIC
-config GPIO_EXYNOS4
- def_bool y
- depends on CPU_EXYNOS4210
-
config GPIO_MPC5200
def_bool y
depends on PPC_MPC52xx
+config GPIO_MPC8XXX
+ bool "MPC512x/MPC8xxx GPIO support"
+ depends on PPC_MPC512x || PPC_MPC831x || PPC_MPC834x || PPC_MPC837x || \
+ FSL_SOC_BOOKE || PPC_86xx
+ help
+ Say Y here if you're going to use hardware that connects to the
+ MPC512x/831x/834x/837x/8572/8610 GPIOs.
+
config GPIO_MSM_V1
tristate "Qualcomm MSM GPIO v1"
depends on GPIOLIB && ARCH_MSM
@@ -131,18 +135,6 @@ config GPIO_MXS
select GPIO_GENERIC
select GENERIC_IRQ_CHIP
-config GPIO_PLAT_SAMSUNG
- def_bool y
- depends on SAMSUNG_GPIOLIB_4BIT
-
-config GPIO_S5PC100
- def_bool y
- depends on CPU_S5PC100
-
-config GPIO_S5PV210
- def_bool y
- depends on CPU_S5PV210
-
config GPIO_PL061
bool "PrimeCell PL061 GPIO support"
depends on ARM_AMBA
@@ -178,9 +170,18 @@ config GPIO_SCH
The Intel Tunnel Creek processor has 5 GPIOs powered by the
core power rail and 9 from suspend power supply.
+config GPIO_U300
+ bool "ST-Ericsson U300 COH 901 335/571 GPIO"
+ depends on GPIOLIB && ARCH_U300
+ help
+ Say yes here to support GPIO interface on ST-Ericsson U300.
+ The names of the two IP block variants supported are
+ COH 901 335 and COH 901 571/3. They contain 3, 5 or 7
+ ports of 8 GPIO pins each.
+
config GPIO_VX855
tristate "VIA VX855/VX875 GPIO"
- depends on MFD_SUPPORT && PCI
+ depends on PCI
select MFD_CORE
select MFD_VX855
help
@@ -388,6 +389,7 @@ config GPIO_LANGWELL
config GPIO_PCH
tristate "Intel EG20T PCH / OKI SEMICONDUCTOR ML7223 IOH GPIO"
depends on PCI && X86
+ select GENERIC_IRQ_CHIP
help
This driver is for PCH(Platform controller Hub) GPIO of Intel Topcliff
which is an IOH(Input/Output Hub) for x86 embedded processor.
@@ -402,6 +404,7 @@ config GPIO_PCH
config GPIO_ML_IOH
tristate "OKI SEMICONDUCTOR ML7213 IOH GPIO support"
depends on PCI
+ select GENERIC_IRQ_CHIP
help
ML7213 is companion chip for Intel Atom E6xx series.
This driver can be used for OKI SEMICONDUCTOR ML7213 IOH(Input/Output
@@ -417,7 +420,6 @@ config GPIO_TIMBERDALE
config GPIO_RDC321X
tristate "RDC R-321x GPIO support"
depends on PCI
- select MFD_SUPPORT
select MFD_CORE
select MFD_RDC321X
help
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 9588948c96f..dbcb0bcfd8d 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -14,11 +14,13 @@ obj-$(CONFIG_GPIO_ADP5588) += gpio-adp5588.o
obj-$(CONFIG_GPIO_BT8XX) += gpio-bt8xx.o
obj-$(CONFIG_GPIO_CS5535) += gpio-cs5535.o
obj-$(CONFIG_GPIO_DA9052) += gpio-da9052.o
+obj-$(CONFIG_ARCH_DAVINCI) += gpio-davinci.o
obj-$(CONFIG_GPIO_EP93XX) += gpio-ep93xx.o
-obj-$(CONFIG_GPIO_EXYNOS4) += gpio-exynos4.o
obj-$(CONFIG_GPIO_IT8761E) += gpio-it8761e.o
obj-$(CONFIG_GPIO_JANZ_TTL) += gpio-janz-ttl.o
+obj-$(CONFIG_MACH_KS8695) += gpio-ks8695.o
obj-$(CONFIG_GPIO_LANGWELL) += gpio-langwell.o
+obj-$(CONFIG_ARCH_LPC32XX) += gpio-lpc32xx.o
obj-$(CONFIG_GPIO_MAX730X) += gpio-max730x.o
obj-$(CONFIG_GPIO_MAX7300) += gpio-max7300.o
obj-$(CONFIG_GPIO_MAX7301) += gpio-max7301.o
@@ -27,6 +29,7 @@ obj-$(CONFIG_GPIO_MC33880) += gpio-mc33880.o
obj-$(CONFIG_GPIO_MCP23S08) += gpio-mcp23s08.o
obj-$(CONFIG_GPIO_ML_IOH) += gpio-ml-ioh.o
obj-$(CONFIG_GPIO_MPC5200) += gpio-mpc5200.o
+obj-$(CONFIG_GPIO_MPC8XXX) += gpio-mpc8xxx.o
obj-$(CONFIG_GPIO_MSM_V1) += gpio-msm-v1.o
obj-$(CONFIG_GPIO_MSM_V2) += gpio-msm-v2.o
obj-$(CONFIG_GPIO_MXC) += gpio-mxc.o
@@ -37,18 +40,17 @@ obj-$(CONFIG_GPIO_PCA953X) += gpio-pca953x.o
obj-$(CONFIG_GPIO_PCF857X) += gpio-pcf857x.o
obj-$(CONFIG_GPIO_PCH) += gpio-pch.o
obj-$(CONFIG_GPIO_PL061) += gpio-pl061.o
+obj-$(CONFIG_PLAT_PXA) += gpio-pxa.o
obj-$(CONFIG_GPIO_RDC321X) += gpio-rdc321x.o
-
-obj-$(CONFIG_GPIO_PLAT_SAMSUNG) += gpio-plat-samsung.o
-obj-$(CONFIG_GPIO_S5PC100) += gpio-s5pc100.o
-obj-$(CONFIG_GPIO_S5PV210) += gpio-s5pv210.o
-
+obj-$(CONFIG_PLAT_SAMSUNG) += gpio-samsung.o
+obj-$(CONFIG_ARCH_SA1100) += gpio-sa1100.o
obj-$(CONFIG_GPIO_SCH) += gpio-sch.o
obj-$(CONFIG_GPIO_STMPE) += gpio-stmpe.o
obj-$(CONFIG_GPIO_SX150X) += gpio-sx150x.o
obj-$(CONFIG_GPIO_TC3589X) += gpio-tc3589x.o
obj-$(CONFIG_ARCH_TEGRA) += gpio-tegra.o
obj-$(CONFIG_GPIO_TIMBERDALE) += gpio-timberdale.o
+obj-$(CONFIG_ARCH_DAVINCI_TNETV107X) += gpio-tnetv107x.o
obj-$(CONFIG_GPIO_TPS65910) += gpio-tps65910.o
obj-$(CONFIG_GPIO_TPS65912) += gpio-tps65912.o
obj-$(CONFIG_GPIO_TWL4030) += gpio-twl4030.o
diff --git a/drivers/gpio/gpio-74x164.c b/drivers/gpio/gpio-74x164.c
index ff525c0958d..a31ad6f5d91 100644
--- a/drivers/gpio/gpio-74x164.c
+++ b/drivers/gpio/gpio-74x164.c
@@ -15,6 +15,7 @@
#include <linux/spi/74x164.h>
#include <linux/gpio.h>
#include <linux/slab.h>
+#include <linux/module.h>
struct gen_74x164_chip {
struct spi_device *spi;
diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
new file mode 100644
index 00000000000..df0d59570a8
--- /dev/null
+++ b/drivers/gpio/gpio-davinci.c
@@ -0,0 +1,455 @@
+/*
+ * TI DaVinci GPIO Support
+ *
+ * Copyright (c) 2006-2007 David Brownell
+ * Copyright (c) 2007, MontaVista Software, Inc. <source@mvista.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#include <linux/gpio.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+
+#include <asm/mach/irq.h>
+
+struct davinci_gpio_regs {
+ u32 dir;
+ u32 out_data;
+ u32 set_data;
+ u32 clr_data;
+ u32 in_data;
+ u32 set_rising;
+ u32 clr_rising;
+ u32 set_falling;
+ u32 clr_falling;
+ u32 intstat;
+};
+
+#define chip2controller(chip) \
+ container_of(chip, struct davinci_gpio_controller, chip)
+
+static struct davinci_gpio_controller chips[DIV_ROUND_UP(DAVINCI_N_GPIO, 32)];
+static void __iomem *gpio_base;
+
+static struct davinci_gpio_regs __iomem __init *gpio2regs(unsigned gpio)
+{
+ void __iomem *ptr;
+
+ if (gpio < 32 * 1)
+ ptr = gpio_base + 0x10;
+ else if (gpio < 32 * 2)
+ ptr = gpio_base + 0x38;
+ else if (gpio < 32 * 3)
+ ptr = gpio_base + 0x60;
+ else if (gpio < 32 * 4)
+ ptr = gpio_base + 0x88;
+ else if (gpio < 32 * 5)
+ ptr = gpio_base + 0xb0;
+ else
+ ptr = NULL;
+ return ptr;
+}
+
+static inline struct davinci_gpio_regs __iomem *irq2regs(int irq)
+{
+ struct davinci_gpio_regs __iomem *g;
+
+ g = (__force struct davinci_gpio_regs __iomem *)irq_get_chip_data(irq);
+
+ return g;
+}
+
+static int __init davinci_gpio_irq_setup(void);
+
+/*--------------------------------------------------------------------------*/
+
+/* board setup code *MUST* setup pinmux and enable the GPIO clock. */
+static inline int __davinci_direction(struct gpio_chip *chip,
+ unsigned offset, bool out, int value)
+{
+ struct davinci_gpio_controller *d = chip2controller(chip);
+ struct davinci_gpio_regs __iomem *g = d->regs;
+ unsigned long flags;
+ u32 temp;
+ u32 mask = 1 << offset;
+
+ spin_lock_irqsave(&d->lock, flags);
+ temp = __raw_readl(&g->dir);
+ if (out) {
+ temp &= ~mask;
+ __raw_writel(mask, value ? &g->set_data : &g->clr_data);
+ } else {
+ temp |= mask;
+ }
+ __raw_writel(temp, &g->dir);
+ spin_unlock_irqrestore(&d->lock, flags);
+
+ return 0;
+}
+
+static int davinci_direction_in(struct gpio_chip *chip, unsigned offset)
+{
+ return __davinci_direction(chip, offset, false, 0);
+}
+
+static int
+davinci_direction_out(struct gpio_chip *chip, unsigned offset, int value)
+{
+ return __davinci_direction(chip, offset, true, value);
+}
+
+/*
+ * Read the pin's value (works even if it's set up as output);
+ * returns zero/nonzero.
+ *
+ * Note that changes are synched to the GPIO clock, so reading values back
+ * right after you've set them may give old values.
+ */
+static int davinci_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+ struct davinci_gpio_controller *d = chip2controller(chip);
+ struct davinci_gpio_regs __iomem *g = d->regs;
+
+ return (1 << offset) & __raw_readl(&g->in_data);
+}
+
+/*
+ * Assuming the pin is muxed as a gpio output, set its output value.
+ */
+static void
+davinci_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+{
+ struct davinci_gpio_controller *d = chip2controller(chip);
+ struct davinci_gpio_regs __iomem *g = d->regs;
+
+ __raw_writel((1 << offset), value ? &g->set_data : &g->clr_data);
+}
+
+static int __init davinci_gpio_setup(void)
+{
+ int i, base;
+ unsigned ngpio;
+ struct davinci_soc_info *soc_info = &davinci_soc_info;
+ struct davinci_gpio_regs *regs;
+
+ if (soc_info->gpio_type != GPIO_TYPE_DAVINCI)
+ return 0;
+
+ /*
+ * The gpio banks conceptually expose a segmented bitmap,
+ * and "ngpio" is one more than the largest zero-based
+ * bit index that's valid.
+ */
+ ngpio = soc_info->gpio_num;
+ if (ngpio == 0) {
+ pr_err("GPIO setup: how many GPIOs?\n");
+ return -EINVAL;
+ }
+
+ if (WARN_ON(DAVINCI_N_GPIO < ngpio))
+ ngpio = DAVINCI_N_GPIO;
+
+ gpio_base = ioremap(soc_info->gpio_base, SZ_4K);
+ if (WARN_ON(!gpio_base))
+ return -ENOMEM;
+
+ for (i = 0, base = 0; base < ngpio; i++, base += 32) {
+ chips[i].chip.label = "DaVinci";
+
+ chips[i].chip.direction_input = davinci_direction_in;
+ chips[i].chip.get = davinci_gpio_get;
+ chips[i].chip.direction_output = davinci_direction_out;
+ chips[i].chip.set = davinci_gpio_set;
+
+ chips[i].chip.base = base;
+ chips[i].chip.ngpio = ngpio - base;
+ if (chips[i].chip.ngpio > 32)
+ chips[i].chip.ngpio = 32;
+
+ spin_lock_init(&chips[i].lock);
+
+ regs = gpio2regs(base);
+ chips[i].regs = regs;
+ chips[i].set_data = &regs->set_data;
+ chips[i].clr_data = &regs->clr_data;
+ chips[i].in_data = &regs->in_data;
+
+ gpiochip_add(&chips[i].chip);
+ }
+
+ soc_info->gpio_ctlrs = chips;
+ soc_info->gpio_ctlrs_num = DIV_ROUND_UP(ngpio, 32);
+
+ davinci_gpio_irq_setup();
+ return 0;
+}
+pure_initcall(davinci_gpio_setup);
+
+/*--------------------------------------------------------------------------*/
+/*
+ * We expect irqs will normally be set up as input pins, but they can also be
+ * used as output pins ... which is convenient for testing.
+ *
+ * NOTE: The first few GPIOs also have direct INTC hookups in addition
+ * to their GPIOBNK0 irq, with a bit less overhead.
+ *
+ * All those INTC hookups (direct, plus several IRQ banks) can also
+ * serve as EDMA event triggers.
+ */
+
+static void gpio_irq_disable(struct irq_data *d)
+{
+ struct davinci_gpio_regs __iomem *g = irq2regs(d->irq);
+ u32 mask = (u32) irq_data_get_irq_handler_data(d);
+
+ __raw_writel(mask, &g->clr_falling);
+ __raw_writel(mask, &g->clr_rising);
+}
+
+static void gpio_irq_enable(struct irq_data *d)
+{
+ struct davinci_gpio_regs __iomem *g = irq2regs(d->irq);
+ u32 mask = (u32) irq_data_get_irq_handler_data(d);
+ unsigned status = irqd_get_trigger_type(d);
+
+ status &= IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING;
+ if (!status)
+ status = IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING;
+
+ if (status & IRQ_TYPE_EDGE_FALLING)
+ __raw_writel(mask, &g->set_falling);
+ if (status & IRQ_TYPE_EDGE_RISING)
+ __raw_writel(mask, &g->set_rising);
+}
+
+static int gpio_irq_type(struct irq_data *d, unsigned trigger)
+{
+ if (trigger & ~(IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
+ return -EINVAL;
+
+ return 0;
+}
+
+static struct irq_chip gpio_irqchip = {
+ .name = "GPIO",
+ .irq_enable = gpio_irq_enable,
+ .irq_disable = gpio_irq_disable,
+ .irq_set_type = gpio_irq_type,
+ .flags = IRQCHIP_SET_TYPE_MASKED,
+};
+
+static void
+gpio_irq_handler(unsigned irq, struct irq_desc *desc)
+{
+ struct davinci_gpio_regs __iomem *g;
+ u32 mask = 0xffff;
+ struct davinci_gpio_controller *d;
+
+ d = (struct davinci_gpio_controller *)irq_desc_get_handler_data(desc);
+ g = (struct davinci_gpio_regs __iomem *)d->regs;
+
+ /* we only care about one bank */
+ if (irq & 1)
+ mask <<= 16;
+
+ /* temporarily mask (level sensitive) parent IRQ */
+ desc->irq_data.chip->irq_mask(&desc->irq_data);
+ desc->irq_data.chip->irq_ack(&desc->irq_data);
+ while (1) {
+ u32 status;
+ int n;
+ int res;
+
+ /* ack any irqs */
+ status = __raw_readl(&g->intstat) & mask;
+ if (!status)
+ break;
+ __raw_writel(status, &g->intstat);
+
+ /* now demux them to the right lowlevel handler */
+ n = d->irq_base;
+ if (irq & 1) {
+ n += 16;
+ status >>= 16;
+ }
+
+ while (status) {
+ res = ffs(status);
+ n += res;
+ generic_handle_irq(n - 1);
+ status >>= res;
+ }
+ }
+ desc->irq_data.chip->irq_unmask(&desc->irq_data);
+ /* now it may re-trigger */
+}
+
+static int gpio_to_irq_banked(struct gpio_chip *chip, unsigned offset)
+{
+ struct davinci_gpio_controller *d = chip2controller(chip);
+
+ if (d->irq_base >= 0)
+ return d->irq_base + offset;
+ else
+ return -ENODEV;
+}
+
+static int gpio_to_irq_unbanked(struct gpio_chip *chip, unsigned offset)
+{
+ struct davinci_soc_info *soc_info = &davinci_soc_info;
+
+ /* NOTE: we assume for now that only irqs in the first gpio_chip
+ * can provide direct-mapped IRQs to AINTC (up to 32 GPIOs).
+ */
+ if (offset < soc_info->gpio_unbanked)
+ return soc_info->gpio_irq + offset;
+ else
+ return -ENODEV;
+}
+
+static int gpio_irq_type_unbanked(struct irq_data *d, unsigned trigger)
+{
+ struct davinci_gpio_regs __iomem *g = irq2regs(d->irq);
+ u32 mask = (u32) irq_data_get_irq_handler_data(d);
+
+ if (trigger & ~(IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
+ return -EINVAL;
+
+ __raw_writel(mask, (trigger & IRQ_TYPE_EDGE_FALLING)
+ ? &g->set_falling : &g->clr_falling);
+ __raw_writel(mask, (trigger & IRQ_TYPE_EDGE_RISING)
+ ? &g->set_rising : &g->clr_rising);
+
+ return 0;
+}
+
+/*
+ * NOTE: for suspend/resume, probably best to make a platform_device with
+ * suspend_late/resume_resume calls hooking into results of the set_wake()
+ * calls ... so if no gpios are wakeup events the clock can be disabled,
+ * with outputs left at previously set levels, and so that VDD3P3V.IOPWDN0
+ * (dm6446) can be set appropriately for GPIOV33 pins.
+ */
+
+static int __init davinci_gpio_irq_setup(void)
+{
+ unsigned gpio, irq, bank;
+ struct clk *clk;
+ u32 binten = 0;
+ unsigned ngpio, bank_irq;
+ struct davinci_soc_info *soc_info = &davinci_soc_info;
+ struct davinci_gpio_regs __iomem *g;
+
+ ngpio = soc_info->gpio_num;
+
+ bank_irq = soc_info->gpio_irq;
+ if (bank_irq == 0) {
+ printk(KERN_ERR "Don't know first GPIO bank IRQ.\n");
+ return -EINVAL;
+ }
+
+ clk = clk_get(NULL, "gpio");
+ if (IS_ERR(clk)) {
+ printk(KERN_ERR "Error %ld getting gpio clock?\n",
+ PTR_ERR(clk));
+ return PTR_ERR(clk);
+ }
+ clk_enable(clk);
+
+ /* Arrange gpio_to_irq() support, handling either direct IRQs or
+ * banked IRQs. Having GPIOs in the first GPIO bank use direct
+ * IRQs, while the others use banked IRQs, would need some setup
+ * tweaks to recognize hardware which can do that.
+ */
+ for (gpio = 0, bank = 0; gpio < ngpio; bank++, gpio += 32) {
+ chips[bank].chip.to_irq = gpio_to_irq_banked;
+ chips[bank].irq_base = soc_info->gpio_unbanked
+ ? -EINVAL
+ : (soc_info->intc_irq_num + gpio);
+ }
+
+ /*
+ * AINTC can handle direct/unbanked IRQs for GPIOs, with the GPIO
+ * controller only handling trigger modes. We currently assume no
+ * IRQ mux conflicts; gpio_irq_type_unbanked() is only for GPIOs.
+ */
+ if (soc_info->gpio_unbanked) {
+ static struct irq_chip gpio_irqchip_unbanked;
+
+ /* pass "bank 0" GPIO IRQs to AINTC */
+ chips[0].chip.to_irq = gpio_to_irq_unbanked;
+ binten = BIT(0);
+
+ /* AINTC handles mask/unmask; GPIO handles triggering */
+ irq = bank_irq;
+ gpio_irqchip_unbanked = *irq_get_chip(irq);
+ gpio_irqchip_unbanked.name = "GPIO-AINTC";
+ gpio_irqchip_unbanked.irq_set_type = gpio_irq_type_unbanked;
+
+ /* default trigger: both edges */
+ g = gpio2regs(0);
+ __raw_writel(~0, &g->set_falling);
+ __raw_writel(~0, &g->set_rising);
+
+ /* set the direct IRQs up to use that irqchip */
+ for (gpio = 0; gpio < soc_info->gpio_unbanked; gpio++, irq++) {
+ irq_set_chip(irq, &gpio_irqchip_unbanked);
+ irq_set_handler_data(irq, (void *)__gpio_mask(gpio));
+ irq_set_chip_data(irq, (__force void *)g);
+ irq_set_status_flags(irq, IRQ_TYPE_EDGE_BOTH);
+ }
+
+ goto done;
+ }
+
+ /*
+ * Or, AINTC can handle IRQs for banks of 16 GPIO IRQs, which we
+ * then chain through our own handler.
+ */
+ for (gpio = 0, irq = gpio_to_irq(0), bank = 0;
+ gpio < ngpio;
+ bank++, bank_irq++) {
+ unsigned i;
+
+ /* disabled by default, enabled only as needed */
+ g = gpio2regs(gpio);
+ __raw_writel(~0, &g->clr_falling);
+ __raw_writel(~0, &g->clr_rising);
+
+ /* set up all irqs in this bank */
+ irq_set_chained_handler(bank_irq, gpio_irq_handler);
+
+ /*
+ * Each chip handles 32 gpios, and each irq bank consists of 16
+ * gpio irqs. Pass the irq bank's corresponding controller to
+ * the chained irq handler.
+ */
+ irq_set_handler_data(bank_irq, &chips[gpio / 32]);
+
+ for (i = 0; i < 16 && gpio < ngpio; i++, irq++, gpio++) {
+ irq_set_chip(irq, &gpio_irqchip);
+ irq_set_chip_data(irq, (__force void *)g);
+ irq_set_handler_data(irq, (void *)__gpio_mask(gpio));
+ irq_set_handler(irq, handle_simple_irq);
+ set_irq_flags(irq, IRQF_VALID);
+ }
+
+ binten |= BIT(bank);
+ }
+
+done:
+ /* BINTEN -- per-bank interrupt enable. genirq would also let these
+ * bits be set/cleared dynamically.
+ */
+ __raw_writel(binten, gpio_base + 0x08);
+
+ printk(KERN_INFO "DaVinci: %d gpio irqs\n", irq - gpio_to_irq(0));
+
+ return 0;
+}
diff --git a/drivers/gpio/gpio-ep93xx.c b/drivers/gpio/gpio-ep93xx.c
index 72fb9c66532..1c0fc3756cb 100644
--- a/drivers/gpio/gpio-ep93xx.c
+++ b/drivers/gpio/gpio-ep93xx.c
@@ -15,6 +15,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/init.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/gpio.h>
@@ -23,6 +24,9 @@
#include <linux/basic_mmio_gpio.h>
#include <mach/hardware.h>
+#include <mach/gpio-ep93xx.h>
+
+#define irq_to_gpio(irq) ((irq) - gpio_to_irq(0))
struct ep93xx_gpio {
void __iomem *mmio_base;
@@ -307,6 +311,21 @@ static int ep93xx_gpio_set_debounce(struct gpio_chip *chip,
return 0;
}
+/*
+ * Map GPIO A0..A7 (0..7) to irq 64..71,
+ * B0..B7 (7..15) to irq 72..79, and
+ * F0..F7 (16..24) to irq 80..87.
+ */
+static int ep93xx_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+ int gpio = chip->base + offset;
+
+ if (gpio > EP93XX_GPIO_LINE_MAX_IRQ)
+ return -EINVAL;
+
+ return 64 + gpio;
+}
+
static int ep93xx_gpio_add_bank(struct bgpio_chip *bgc, struct device *dev,
void __iomem *mmio_base, struct ep93xx_gpio_bank *bank)
{
@@ -321,8 +340,10 @@ static int ep93xx_gpio_add_bank(struct bgpio_chip *bgc, struct device *dev,
bgc->gc.label = bank->label;
bgc->gc.base = bank->base;
- if (bank->has_debounce)
+ if (bank->has_debounce) {
bgc->gc.set_debounce = ep93xx_gpio_set_debounce;
+ bgc->gc.to_irq = ep93xx_gpio_to_irq;
+ }
return gpiochip_add(&bgc->gc);
}
diff --git a/drivers/gpio/gpio-exynos4.c b/drivers/gpio/gpio-exynos4.c
deleted file mode 100644
index d24b337cf1a..00000000000
--- a/drivers/gpio/gpio-exynos4.c
+++ /dev/null
@@ -1,385 +0,0 @@
-/*
- * EXYNOS4 - GPIOlib support
- *
- * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
- * http://www.samsung.com
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/kernel.h>
-#include <linux/irq.h>
-#include <linux/io.h>
-#include <linux/gpio.h>
-
-#include <mach/map.h>
-
-#include <plat/gpio-core.h>
-#include <plat/gpio-cfg.h>
-#include <plat/gpio-cfg-helpers.h>
-
-int s3c_gpio_setpull_exynos4(struct s3c_gpio_chip *chip,
- unsigned int off, s3c_gpio_pull_t pull)
-{
- if (pull == S3C_GPIO_PULL_UP)
- pull = 3;
-
- return s3c_gpio_setpull_updown(chip, off, pull);
-}
-
-s3c_gpio_pull_t s3c_gpio_getpull_exynos4(struct s3c_gpio_chip *chip,
- unsigned int off)
-{
- s3c_gpio_pull_t pull;
-
- pull = s3c_gpio_getpull_updown(chip, off);
- if (pull == 3)
- pull = S3C_GPIO_PULL_UP;
-
- return pull;
-}
-
-static struct s3c_gpio_cfg gpio_cfg = {
- .set_config = s3c_gpio_setcfg_s3c64xx_4bit,
- .set_pull = s3c_gpio_setpull_exynos4,
- .get_pull = s3c_gpio_getpull_exynos4,
-};
-
-static struct s3c_gpio_cfg gpio_cfg_noint = {
- .set_config = s3c_gpio_setcfg_s3c64xx_4bit,
- .set_pull = s3c_gpio_setpull_exynos4,
- .get_pull = s3c_gpio_getpull_exynos4,
-};
-
-/*
- * Following are the gpio banks in v310.
- *
- * The 'config' member when left to NULL, is initialized to the default
- * structure gpio_cfg in the init function below.
- *
- * The 'base' member is also initialized in the init function below.
- * Note: The initialization of 'base' member of s3c_gpio_chip structure
- * uses the above macro and depends on the banks being listed in order here.
- */
-static struct s3c_gpio_chip exynos4_gpio_part1_4bit[] = {
- {
- .chip = {
- .base = EXYNOS4_GPA0(0),
- .ngpio = EXYNOS4_GPIO_A0_NR,
- .label = "GPA0",
- },
- }, {
- .chip = {
- .base = EXYNOS4_GPA1(0),
- .ngpio = EXYNOS4_GPIO_A1_NR,
- .label = "GPA1",
- },
- }, {
- .chip = {
- .base = EXYNOS4_GPB(0),
- .ngpio = EXYNOS4_GPIO_B_NR,
- .label = "GPB",
- },
- }, {
- .chip = {
- .base = EXYNOS4_GPC0(0),
- .ngpio = EXYNOS4_GPIO_C0_NR,
- .label = "GPC0",
- },
- }, {
- .chip = {
- .base = EXYNOS4_GPC1(0),
- .ngpio = EXYNOS4_GPIO_C1_NR,
- .label = "GPC1",
- },
- }, {
- .chip = {
- .base = EXYNOS4_GPD0(0),
- .ngpio = EXYNOS4_GPIO_D0_NR,
- .label = "GPD0",
- },
- }, {
- .chip = {
- .base = EXYNOS4_GPD1(0),
- .ngpio = EXYNOS4_GPIO_D1_NR,
- .label = "GPD1",
- },
- }, {
- .chip = {
- .base = EXYNOS4_GPE0(0),
- .ngpio = EXYNOS4_GPIO_E0_NR,
- .label = "GPE0",
- },
- }, {
- .chip = {
- .base = EXYNOS4_GPE1(0),
- .ngpio = EXYNOS4_GPIO_E1_NR,
- .label = "GPE1",
- },
- }, {
- .chip = {
- .base = EXYNOS4_GPE2(0),
- .ngpio = EXYNOS4_GPIO_E2_NR,
- .label = "GPE2",
- },
- }, {
- .chip = {
- .base = EXYNOS4_GPE3(0),
- .ngpio = EXYNOS4_GPIO_E3_NR,
- .label = "GPE3",
- },
- }, {
- .chip = {
- .base = EXYNOS4_GPE4(0),
- .ngpio = EXYNOS4_GPIO_E4_NR,
- .label = "GPE4",
- },
- }, {
- .chip = {
- .base = EXYNOS4_GPF0(0),
- .ngpio = EXYNOS4_GPIO_F0_NR,
- .label = "GPF0",
- },
- }, {
- .chip = {
- .base = EXYNOS4_GPF1(0),
- .ngpio = EXYNOS4_GPIO_F1_NR,
- .label = "GPF1",
- },
- }, {
- .chip = {
- .base = EXYNOS4_GPF2(0),
- .ngpio = EXYNOS4_GPIO_F2_NR,
- .label = "GPF2",
- },
- }, {
- .chip = {
- .base = EXYNOS4_GPF3(0),
- .ngpio = EXYNOS4_GPIO_F3_NR,
- .label = "GPF3",
- },
- },
-};
-
-static struct s3c_gpio_chip exynos4_gpio_part2_4bit[] = {
- {
- .chip = {
- .base = EXYNOS4_GPJ0(0),
- .ngpio = EXYNOS4_GPIO_J0_NR,
- .label = "GPJ0",
- },
- }, {
- .chip = {
- .base = EXYNOS4_GPJ1(0),
- .ngpio = EXYNOS4_GPIO_J1_NR,
- .label = "GPJ1",
- },
- }, {
- .chip = {
- .base = EXYNOS4_GPK0(0),
- .ngpio = EXYNOS4_GPIO_K0_NR,
- .label = "GPK0",
- },
- }, {
- .chip = {
- .base = EXYNOS4_GPK1(0),
- .ngpio = EXYNOS4_GPIO_K1_NR,
- .label = "GPK1",
- },
- }, {
- .chip = {
- .base = EXYNOS4_GPK2(0),
- .ngpio = EXYNOS4_GPIO_K2_NR,
- .label = "GPK2",
- },
- }, {
- .chip = {
- .base = EXYNOS4_GPK3(0),
- .ngpio = EXYNOS4_GPIO_K3_NR,
- .label = "GPK3",
- },
- }, {
- .chip = {
- .base = EXYNOS4_GPL0(0),
- .ngpio = EXYNOS4_GPIO_L0_NR,
- .label = "GPL0",
- },
- }, {
- .chip = {
- .base = EXYNOS4_GPL1(0),
- .ngpio = EXYNOS4_GPIO_L1_NR,
- .label = "GPL1",
- },
- }, {
- .chip = {
- .base = EXYNOS4_GPL2(0),
- .ngpio = EXYNOS4_GPIO_L2_NR,
- .label = "GPL2",
- },
- }, {
- .config = &gpio_cfg_noint,
- .chip = {
- .base = EXYNOS4_GPY0(0),
- .ngpio = EXYNOS4_GPIO_Y0_NR,
- .label = "GPY0",
- },
- }, {
- .config = &gpio_cfg_noint,
- .chip = {
- .base = EXYNOS4_GPY1(0),
- .ngpio = EXYNOS4_GPIO_Y1_NR,
- .label = "GPY1",
- },
- }, {
- .config = &gpio_cfg_noint,
- .chip = {
- .base = EXYNOS4_GPY2(0),
- .ngpio = EXYNOS4_GPIO_Y2_NR,
- .label = "GPY2",
- },
- }, {
- .config = &gpio_cfg_noint,
- .chip = {
- .base = EXYNOS4_GPY3(0),
- .ngpio = EXYNOS4_GPIO_Y3_NR,
- .label = "GPY3",
- },
- }, {
- .config = &gpio_cfg_noint,
- .chip = {
- .base = EXYNOS4_GPY4(0),
- .ngpio = EXYNOS4_GPIO_Y4_NR,
- .label = "GPY4",
- },
- }, {
- .config = &gpio_cfg_noint,
- .chip = {
- .base = EXYNOS4_GPY5(0),
- .ngpio = EXYNOS4_GPIO_Y5_NR,
- .label = "GPY5",
- },
- }, {
- .config = &gpio_cfg_noint,
- .chip = {
- .base = EXYNOS4_GPY6(0),
- .ngpio = EXYNOS4_GPIO_Y6_NR,
- .label = "GPY6",
- },
- }, {
- .base = (S5P_VA_GPIO2 + 0xC00),
- .config = &gpio_cfg_noint,
- .irq_base = IRQ_EINT(0),
- .chip = {
- .base = EXYNOS4_GPX0(0),
- .ngpio = EXYNOS4_GPIO_X0_NR,
- .label = "GPX0",
- .to_irq = samsung_gpiolib_to_irq,
- },
- }, {
- .base = (S5P_VA_GPIO2 + 0xC20),
- .config = &gpio_cfg_noint,
- .irq_base = IRQ_EINT(8),
- .chip = {
- .base = EXYNOS4_GPX1(0),
- .ngpio = EXYNOS4_GPIO_X1_NR,
- .label = "GPX1",
- .to_irq = samsung_gpiolib_to_irq,
- },
- }, {
- .base = (S5P_VA_GPIO2 + 0xC40),
- .config = &gpio_cfg_noint,
- .irq_base = IRQ_EINT(16),
- .chip = {
- .base = EXYNOS4_GPX2(0),
- .ngpio = EXYNOS4_GPIO_X2_NR,
- .label = "GPX2",
- .to_irq = samsung_gpiolib_to_irq,
- },
- }, {
- .base = (S5P_VA_GPIO2 + 0xC60),
- .config = &gpio_cfg_noint,
- .irq_base = IRQ_EINT(24),
- .chip = {
- .base = EXYNOS4_GPX3(0),
- .ngpio = EXYNOS4_GPIO_X3_NR,
- .label = "GPX3",
- .to_irq = samsung_gpiolib_to_irq,
- },
- },
-};
-
-static struct s3c_gpio_chip exynos4_gpio_part3_4bit[] = {
- {
- .chip = {
- .base = EXYNOS4_GPZ(0),
- .ngpio = EXYNOS4_GPIO_Z_NR,
- .label = "GPZ",
- },
- },
-};
-
-static __init int exynos4_gpiolib_init(void)
-{
- struct s3c_gpio_chip *chip;
- int i;
- int group = 0;
- int nr_chips;
-
- /* GPIO part 1 */
-
- chip = exynos4_gpio_part1_4bit;
- nr_chips = ARRAY_SIZE(exynos4_gpio_part1_4bit);
-
- for (i = 0; i < nr_chips; i++, chip++) {
- if (chip->config == NULL) {
- chip->config = &gpio_cfg;
- /* Assign the GPIO interrupt group */
- chip->group = group++;
- }
- if (chip->base == NULL)
- chip->base = S5P_VA_GPIO1 + (i) * 0x20;
- }
-
- samsung_gpiolib_add_4bit_chips(exynos4_gpio_part1_4bit, nr_chips);
-
- /* GPIO part 2 */
-
- chip = exynos4_gpio_part2_4bit;
- nr_chips = ARRAY_SIZE(exynos4_gpio_part2_4bit);
-
- for (i = 0; i < nr_chips; i++, chip++) {
- if (chip->config == NULL) {
- chip->config = &gpio_cfg;
- /* Assign the GPIO interrupt group */
- chip->group = group++;
- }
- if (chip->base == NULL)
- chip->base = S5P_VA_GPIO2 + (i) * 0x20;
- }
-
- samsung_gpiolib_add_4bit_chips(exynos4_gpio_part2_4bit, nr_chips);
-
- /* GPIO part 3 */
-
- chip = exynos4_gpio_part3_4bit;
- nr_chips = ARRAY_SIZE(exynos4_gpio_part3_4bit);
-
- for (i = 0; i < nr_chips; i++, chip++) {
- if (chip->config == NULL) {
- chip->config = &gpio_cfg;
- /* Assign the GPIO interrupt group */
- chip->group = group++;
- }
- if (chip->base == NULL)
- chip->base = S5P_VA_GPIO3 + (i) * 0x20;
- }
-
- samsung_gpiolib_add_4bit_chips(exynos4_gpio_part3_4bit, nr_chips);
- s5p_register_gpioint_bank(IRQ_GPIO_XA, 0, IRQ_GPIO1_NR_GROUPS);
- s5p_register_gpioint_bank(IRQ_GPIO_XB, IRQ_GPIO1_NR_GROUPS, IRQ_GPIO2_NR_GROUPS);
-
- return 0;
-}
-core_initcall(exynos4_gpiolib_init);
diff --git a/drivers/gpio/gpio-ks8695.c b/drivers/gpio/gpio-ks8695.c
new file mode 100644
index 00000000000..a3ac66ea364
--- /dev/null
+++ b/drivers/gpio/gpio-ks8695.c
@@ -0,0 +1,319 @@
+/*
+ * arch/arm/mach-ks8695/gpio.c
+ *
+ * Copyright (C) 2006 Andrew Victor
+ * Updated to GPIOLIB, Copyright 2008 Simtec Electronics
+ * Daniel Silverstone <dsilvers@simtec.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <linux/gpio.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/module.h>
+#include <linux/io.h>
+
+#include <mach/hardware.h>
+#include <asm/mach/irq.h>
+
+#include <mach/regs-gpio.h>
+#include <mach/gpio-ks8695.h>
+
+/*
+ * Configure a GPIO line for either GPIO function, or its internal
+ * function (Interrupt, Timer, etc).
+ */
+static void ks8695_gpio_mode(unsigned int pin, short gpio)
+{
+ unsigned int enable[] = { IOPC_IOEINT0EN, IOPC_IOEINT1EN, IOPC_IOEINT2EN, IOPC_IOEINT3EN, IOPC_IOTIM0EN, IOPC_IOTIM1EN };
+ unsigned long x, flags;
+
+ if (pin > KS8695_GPIO_5) /* only GPIO 0..5 have internal functions */
+ return;
+
+ local_irq_save(flags);
+
+ x = __raw_readl(KS8695_GPIO_VA + KS8695_IOPC);
+ if (gpio) /* GPIO: set bit to 0 */
+ x &= ~enable[pin];
+ else /* Internal function: set bit to 1 */
+ x |= enable[pin];
+ __raw_writel(x, KS8695_GPIO_VA + KS8695_IOPC);
+
+ local_irq_restore(flags);
+}
+
+
+static unsigned short gpio_irq[] = { KS8695_IRQ_EXTERN0, KS8695_IRQ_EXTERN1, KS8695_IRQ_EXTERN2, KS8695_IRQ_EXTERN3 };
+
+/*
+ * Configure GPIO pin as external interrupt source.
+ */
+int ks8695_gpio_interrupt(unsigned int pin, unsigned int type)
+{
+ unsigned long x, flags;
+
+ if (pin > KS8695_GPIO_3) /* only GPIO 0..3 can generate IRQ */
+ return -EINVAL;
+
+ local_irq_save(flags);
+
+ /* set pin as input */
+ x = __raw_readl(KS8695_GPIO_VA + KS8695_IOPM);
+ x &= ~IOPM(pin);
+ __raw_writel(x, KS8695_GPIO_VA + KS8695_IOPM);
+
+ local_irq_restore(flags);
+
+ /* Set IRQ triggering type */
+ irq_set_irq_type(gpio_irq[pin], type);
+
+ /* enable interrupt mode */
+ ks8695_gpio_mode(pin, 0);
+
+ return 0;
+}
+EXPORT_SYMBOL(ks8695_gpio_interrupt);
+
+
+
+/* .... Generic GPIO interface .............................................. */
+
+/*
+ * Configure the GPIO line as an input.
+ */
+static int ks8695_gpio_direction_input(struct gpio_chip *gc, unsigned int pin)
+{
+ unsigned long x, flags;
+
+ if (pin > KS8695_GPIO_15)
+ return -EINVAL;
+
+ /* set pin to GPIO mode */
+ ks8695_gpio_mode(pin, 1);
+
+ local_irq_save(flags);
+
+ /* set pin as input */
+ x = __raw_readl(KS8695_GPIO_VA + KS8695_IOPM);
+ x &= ~IOPM(pin);
+ __raw_writel(x, KS8695_GPIO_VA + KS8695_IOPM);
+
+ local_irq_restore(flags);
+
+ return 0;
+}
+
+
+/*
+ * Configure the GPIO line as an output, with default state.
+ */
+static int ks8695_gpio_direction_output(struct gpio_chip *gc,
+ unsigned int pin, int state)
+{
+ unsigned long x, flags;
+
+ if (pin > KS8695_GPIO_15)
+ return -EINVAL;
+
+ /* set pin to GPIO mode */
+ ks8695_gpio_mode(pin, 1);
+
+ local_irq_save(flags);
+
+ /* set line state */
+ x = __raw_readl(KS8695_GPIO_VA + KS8695_IOPD);
+ if (state)
+ x |= IOPD(pin);
+ else
+ x &= ~IOPD(pin);
+ __raw_writel(x, KS8695_GPIO_VA + KS8695_IOPD);
+
+ /* set pin as output */
+ x = __raw_readl(KS8695_GPIO_VA + KS8695_IOPM);
+ x |= IOPM(pin);
+ __raw_writel(x, KS8695_GPIO_VA + KS8695_IOPM);
+
+ local_irq_restore(flags);
+
+ return 0;
+}
+
+
+/*
+ * Set the state of an output GPIO line.
+ */
+static void ks8695_gpio_set_value(struct gpio_chip *gc,
+ unsigned int pin, int state)
+{
+ unsigned long x, flags;
+
+ if (pin > KS8695_GPIO_15)
+ return;
+
+ local_irq_save(flags);
+
+ /* set output line state */
+ x = __raw_readl(KS8695_GPIO_VA + KS8695_IOPD);
+ if (state)
+ x |= IOPD(pin);
+ else
+ x &= ~IOPD(pin);
+ __raw_writel(x, KS8695_GPIO_VA + KS8695_IOPD);
+
+ local_irq_restore(flags);
+}
+
+
+/*
+ * Read the state of a GPIO line.
+ */
+static int ks8695_gpio_get_value(struct gpio_chip *gc, unsigned int pin)
+{
+ unsigned long x;
+
+ if (pin > KS8695_GPIO_15)
+ return -EINVAL;
+
+ x = __raw_readl(KS8695_GPIO_VA + KS8695_IOPD);
+ return (x & IOPD(pin)) != 0;
+}
+
+
+/*
+ * Map GPIO line to IRQ number.
+ */
+static int ks8695_gpio_to_irq(struct gpio_chip *gc, unsigned int pin)
+{
+ if (pin > KS8695_GPIO_3) /* only GPIO 0..3 can generate IRQ */
+ return -EINVAL;
+
+ return gpio_irq[pin];
+}
+
+/*
+ * Map IRQ number to GPIO line.
+ */
+int irq_to_gpio(unsigned int irq)
+{
+ if ((irq < KS8695_IRQ_EXTERN0) || (irq > KS8695_IRQ_EXTERN3))
+ return -EINVAL;
+
+ return (irq - KS8695_IRQ_EXTERN0);
+}
+EXPORT_SYMBOL(irq_to_gpio);
+
+/* GPIOLIB interface */
+
+static struct gpio_chip ks8695_gpio_chip = {
+ .label = "KS8695",
+ .direction_input = ks8695_gpio_direction_input,
+ .direction_output = ks8695_gpio_direction_output,
+ .get = ks8695_gpio_get_value,
+ .set = ks8695_gpio_set_value,
+ .to_irq = ks8695_gpio_to_irq,
+ .base = 0,
+ .ngpio = 16,
+ .can_sleep = 0,
+};
+
+/* Register the GPIOs */
+void ks8695_register_gpios(void)
+{
+ if (gpiochip_add(&ks8695_gpio_chip))
+ printk(KERN_ERR "Unable to register core GPIOs\n");
+}
+
+/* .... Debug interface ..................................................... */
+
+#ifdef CONFIG_DEBUG_FS
+
+static int ks8695_gpio_show(struct seq_file *s, void *unused)
+{
+ unsigned int enable[] = { IOPC_IOEINT0EN, IOPC_IOEINT1EN, IOPC_IOEINT2EN, IOPC_IOEINT3EN, IOPC_IOTIM0EN, IOPC_IOTIM1EN };
+ unsigned int intmask[] = { IOPC_IOEINT0TM, IOPC_IOEINT1TM, IOPC_IOEINT2TM, IOPC_IOEINT3TM };
+ unsigned long mode, ctrl, data;
+ int i;
+
+ mode = __raw_readl(KS8695_GPIO_VA + KS8695_IOPM);
+ ctrl = __raw_readl(KS8695_GPIO_VA + KS8695_IOPC);
+ data = __raw_readl(KS8695_GPIO_VA + KS8695_IOPD);
+
+ seq_printf(s, "Pin\tI/O\tFunction\tState\n\n");
+
+ for (i = KS8695_GPIO_0; i <= KS8695_GPIO_15 ; i++) {
+ seq_printf(s, "%i:\t", i);
+
+ seq_printf(s, "%s\t", (mode & IOPM(i)) ? "Output" : "Input");
+
+ if (i <= KS8695_GPIO_3) {
+ if (ctrl & enable[i]) {
+ seq_printf(s, "EXT%i ", i);
+
+ switch ((ctrl & intmask[i]) >> (4 * i)) {
+ case IOPC_TM_LOW:
+ seq_printf(s, "(Low)"); break;
+ case IOPC_TM_HIGH:
+ seq_printf(s, "(High)"); break;
+ case IOPC_TM_RISING:
+ seq_printf(s, "(Rising)"); break;
+ case IOPC_TM_FALLING:
+ seq_printf(s, "(Falling)"); break;
+ case IOPC_TM_EDGE:
+ seq_printf(s, "(Edges)"); break;
+ }
+ }
+ else
+ seq_printf(s, "GPIO\t");
+ }
+ else if (i <= KS8695_GPIO_5) {
+ if (ctrl & enable[i])
+ seq_printf(s, "TOUT%i\t", i - KS8695_GPIO_4);
+ else
+ seq_printf(s, "GPIO\t");
+ }
+ else
+ seq_printf(s, "GPIO\t");
+
+ seq_printf(s, "\t");
+
+ seq_printf(s, "%i\n", (data & IOPD(i)) ? 1 : 0);
+ }
+ return 0;
+}
+
+static int ks8695_gpio_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ks8695_gpio_show, NULL);
+}
+
+static const struct file_operations ks8695_gpio_operations = {
+ .open = ks8695_gpio_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int __init ks8695_gpio_debugfs_init(void)
+{
+ /* /sys/kernel/debug/ks8695_gpio */
+ (void) debugfs_create_file("ks8695_gpio", S_IFREG | S_IRUGO, NULL, NULL, &ks8695_gpio_operations);
+ return 0;
+}
+postcore_initcall(ks8695_gpio_debugfs_init);
+
+#endif
diff --git a/drivers/gpio/gpio-langwell.c b/drivers/gpio/gpio-langwell.c
index d2eb57c60e0..00692e89ef8 100644
--- a/drivers/gpio/gpio-langwell.c
+++ b/drivers/gpio/gpio-langwell.c
@@ -59,6 +59,7 @@ enum GPIO_REG {
GRER, /* rising edge detect */
GFER, /* falling edge detect */
GEDR, /* edge detect result */
+ GAFR, /* alt function */
};
struct lnw_gpio {
@@ -81,6 +82,31 @@ static void __iomem *gpio_reg(struct gpio_chip *chip, unsigned offset,
return ptr;
}
+static void __iomem *gpio_reg_2bit(struct gpio_chip *chip, unsigned offset,
+ enum GPIO_REG reg_type)
+{
+ struct lnw_gpio *lnw = container_of(chip, struct lnw_gpio, chip);
+ unsigned nreg = chip->ngpio / 32;
+ u8 reg = offset / 16;
+ void __iomem *ptr;
+
+ ptr = (void __iomem *)(lnw->reg_base + reg_type * nreg * 4 + reg * 4);
+ return ptr;
+}
+
+static int lnw_gpio_request(struct gpio_chip *chip, unsigned offset)
+{
+ void __iomem *gafr = gpio_reg_2bit(chip, offset, GAFR);
+ u32 value = readl(gafr);
+ int shift = (offset % 16) << 1, af = (value >> shift) & 3;
+
+ if (af) {
+ value &= ~(3 << shift);
+ writel(value, gafr);
+ }
+ return 0;
+}
+
static int lnw_gpio_get(struct gpio_chip *chip, unsigned offset)
{
void __iomem *gplr = gpio_reg(chip, offset, GPLR);
@@ -321,6 +347,7 @@ static int __devinit lnw_gpio_probe(struct pci_dev *pdev,
lnw->reg_base = base;
lnw->irq_base = irq_base;
lnw->chip.label = dev_name(&pdev->dev);
+ lnw->chip.request = lnw_gpio_request;
lnw->chip.direction_input = lnw_gpio_direction_input;
lnw->chip.direction_output = lnw_gpio_direction_output;
lnw->chip.get = lnw_gpio_get;
diff --git a/drivers/gpio/gpio-lpc32xx.c b/drivers/gpio/gpio-lpc32xx.c
new file mode 100644
index 00000000000..5b6948081f8
--- /dev/null
+++ b/drivers/gpio/gpio-lpc32xx.c
@@ -0,0 +1,446 @@
+/*
+ * arch/arm/mach-lpc32xx/gpiolib.c
+ *
+ * Author: Kevin Wells <kevin.wells@nxp.com>
+ *
+ * Copyright (C) 2010 NXP Semiconductors
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/errno.h>
+#include <linux/gpio.h>
+
+#include <mach/hardware.h>
+#include <mach/platform.h>
+#include <mach/gpio-lpc32xx.h>
+
+#define LPC32XX_GPIO_P3_INP_STATE _GPREG(0x000)
+#define LPC32XX_GPIO_P3_OUTP_SET _GPREG(0x004)
+#define LPC32XX_GPIO_P3_OUTP_CLR _GPREG(0x008)
+#define LPC32XX_GPIO_P3_OUTP_STATE _GPREG(0x00C)
+#define LPC32XX_GPIO_P2_DIR_SET _GPREG(0x010)
+#define LPC32XX_GPIO_P2_DIR_CLR _GPREG(0x014)
+#define LPC32XX_GPIO_P2_DIR_STATE _GPREG(0x018)
+#define LPC32XX_GPIO_P2_INP_STATE _GPREG(0x01C)
+#define LPC32XX_GPIO_P2_OUTP_SET _GPREG(0x020)
+#define LPC32XX_GPIO_P2_OUTP_CLR _GPREG(0x024)
+#define LPC32XX_GPIO_P2_MUX_SET _GPREG(0x028)
+#define LPC32XX_GPIO_P2_MUX_CLR _GPREG(0x02C)
+#define LPC32XX_GPIO_P2_MUX_STATE _GPREG(0x030)
+#define LPC32XX_GPIO_P0_INP_STATE _GPREG(0x040)
+#define LPC32XX_GPIO_P0_OUTP_SET _GPREG(0x044)
+#define LPC32XX_GPIO_P0_OUTP_CLR _GPREG(0x048)
+#define LPC32XX_GPIO_P0_OUTP_STATE _GPREG(0x04C)
+#define LPC32XX_GPIO_P0_DIR_SET _GPREG(0x050)
+#define LPC32XX_GPIO_P0_DIR_CLR _GPREG(0x054)
+#define LPC32XX_GPIO_P0_DIR_STATE _GPREG(0x058)
+#define LPC32XX_GPIO_P1_INP_STATE _GPREG(0x060)
+#define LPC32XX_GPIO_P1_OUTP_SET _GPREG(0x064)
+#define LPC32XX_GPIO_P1_OUTP_CLR _GPREG(0x068)
+#define LPC32XX_GPIO_P1_OUTP_STATE _GPREG(0x06C)
+#define LPC32XX_GPIO_P1_DIR_SET _GPREG(0x070)
+#define LPC32XX_GPIO_P1_DIR_CLR _GPREG(0x074)
+#define LPC32XX_GPIO_P1_DIR_STATE _GPREG(0x078)
+
+#define GPIO012_PIN_TO_BIT(x) (1 << (x))
+#define GPIO3_PIN_TO_BIT(x) (1 << ((x) + 25))
+#define GPO3_PIN_TO_BIT(x) (1 << (x))
+#define GPIO012_PIN_IN_SEL(x, y) (((x) >> (y)) & 1)
+#define GPIO3_PIN_IN_SHIFT(x) ((x) == 5 ? 24 : 10 + (x))
+#define GPIO3_PIN_IN_SEL(x, y) ((x) >> GPIO3_PIN_IN_SHIFT(y))
+#define GPIO3_PIN5_IN_SEL(x) (((x) >> 24) & 1)
+#define GPI3_PIN_IN_SEL(x, y) (((x) >> (y)) & 1)
+
+struct gpio_regs {
+ void __iomem *inp_state;
+ void __iomem *outp_set;
+ void __iomem *outp_clr;
+ void __iomem *dir_set;
+ void __iomem *dir_clr;
+};
+
+/*
+ * GPIO names
+ */
+static const char *gpio_p0_names[LPC32XX_GPIO_P0_MAX] = {
+ "p0.0", "p0.1", "p0.2", "p0.3",
+ "p0.4", "p0.5", "p0.6", "p0.7"
+};
+
+static const char *gpio_p1_names[LPC32XX_GPIO_P1_MAX] = {
+ "p1.0", "p1.1", "p1.2", "p1.3",
+ "p1.4", "p1.5", "p1.6", "p1.7",
+ "p1.8", "p1.9", "p1.10", "p1.11",
+ "p1.12", "p1.13", "p1.14", "p1.15",
+ "p1.16", "p1.17", "p1.18", "p1.19",
+ "p1.20", "p1.21", "p1.22", "p1.23",
+};
+
+static const char *gpio_p2_names[LPC32XX_GPIO_P2_MAX] = {
+ "p2.0", "p2.1", "p2.2", "p2.3",
+ "p2.4", "p2.5", "p2.6", "p2.7",
+ "p2.8", "p2.9", "p2.10", "p2.11",
+ "p2.12"
+};
+
+static const char *gpio_p3_names[LPC32XX_GPIO_P3_MAX] = {
+ "gpi000", "gpio01", "gpio02", "gpio03",
+ "gpio04", "gpio05"
+};
+
+static const char *gpi_p3_names[LPC32XX_GPI_P3_MAX] = {
+ "gpi00", "gpi01", "gpi02", "gpi03",
+ "gpi04", "gpi05", "gpi06", "gpi07",
+ "gpi08", "gpi09", NULL, NULL,
+ NULL, NULL, NULL, "gpi15",
+ "gpi16", "gpi17", "gpi18", "gpi19",
+ "gpi20", "gpi21", "gpi22", "gpi23",
+ "gpi24", "gpi25", "gpi26", "gpi27"
+};
+
+static const char *gpo_p3_names[LPC32XX_GPO_P3_MAX] = {
+ "gpo00", "gpo01", "gpo02", "gpo03",
+ "gpo04", "gpo05", "gpo06", "gpo07",
+ "gpo08", "gpo09", "gpo10", "gpo11",
+ "gpo12", "gpo13", "gpo14", "gpo15",
+ "gpo16", "gpo17", "gpo18", "gpo19",
+ "gpo20", "gpo21", "gpo22", "gpo23"
+};
+
+static struct gpio_regs gpio_grp_regs_p0 = {
+ .inp_state = LPC32XX_GPIO_P0_INP_STATE,
+ .outp_set = LPC32XX_GPIO_P0_OUTP_SET,
+ .outp_clr = LPC32XX_GPIO_P0_OUTP_CLR,
+ .dir_set = LPC32XX_GPIO_P0_DIR_SET,
+ .dir_clr = LPC32XX_GPIO_P0_DIR_CLR,
+};
+
+static struct gpio_regs gpio_grp_regs_p1 = {
+ .inp_state = LPC32XX_GPIO_P1_INP_STATE,
+ .outp_set = LPC32XX_GPIO_P1_OUTP_SET,
+ .outp_clr = LPC32XX_GPIO_P1_OUTP_CLR,
+ .dir_set = LPC32XX_GPIO_P1_DIR_SET,
+ .dir_clr = LPC32XX_GPIO_P1_DIR_CLR,
+};
+
+static struct gpio_regs gpio_grp_regs_p2 = {
+ .inp_state = LPC32XX_GPIO_P2_INP_STATE,
+ .outp_set = LPC32XX_GPIO_P2_OUTP_SET,
+ .outp_clr = LPC32XX_GPIO_P2_OUTP_CLR,
+ .dir_set = LPC32XX_GPIO_P2_DIR_SET,
+ .dir_clr = LPC32XX_GPIO_P2_DIR_CLR,
+};
+
+static struct gpio_regs gpio_grp_regs_p3 = {
+ .inp_state = LPC32XX_GPIO_P3_INP_STATE,
+ .outp_set = LPC32XX_GPIO_P3_OUTP_SET,
+ .outp_clr = LPC32XX_GPIO_P3_OUTP_CLR,
+ .dir_set = LPC32XX_GPIO_P2_DIR_SET,
+ .dir_clr = LPC32XX_GPIO_P2_DIR_CLR,
+};
+
+struct lpc32xx_gpio_chip {
+ struct gpio_chip chip;
+ struct gpio_regs *gpio_grp;
+};
+
+static inline struct lpc32xx_gpio_chip *to_lpc32xx_gpio(
+ struct gpio_chip *gpc)
+{
+ return container_of(gpc, struct lpc32xx_gpio_chip, chip);
+}
+
+static void __set_gpio_dir_p012(struct lpc32xx_gpio_chip *group,
+ unsigned pin, int input)
+{
+ if (input)
+ __raw_writel(GPIO012_PIN_TO_BIT(pin),
+ group->gpio_grp->dir_clr);
+ else
+ __raw_writel(GPIO012_PIN_TO_BIT(pin),
+ group->gpio_grp->dir_set);
+}
+
+static void __set_gpio_dir_p3(struct lpc32xx_gpio_chip *group,
+ unsigned pin, int input)
+{
+ u32 u = GPIO3_PIN_TO_BIT(pin);
+
+ if (input)
+ __raw_writel(u, group->gpio_grp->dir_clr);
+ else
+ __raw_writel(u, group->gpio_grp->dir_set);
+}
+
+static void __set_gpio_level_p012(struct lpc32xx_gpio_chip *group,
+ unsigned pin, int high)
+{
+ if (high)
+ __raw_writel(GPIO012_PIN_TO_BIT(pin),
+ group->gpio_grp->outp_set);
+ else
+ __raw_writel(GPIO012_PIN_TO_BIT(pin),
+ group->gpio_grp->outp_clr);
+}
+
+static void __set_gpio_level_p3(struct lpc32xx_gpio_chip *group,
+ unsigned pin, int high)
+{
+ u32 u = GPIO3_PIN_TO_BIT(pin);
+
+ if (high)
+ __raw_writel(u, group->gpio_grp->outp_set);
+ else
+ __raw_writel(u, group->gpio_grp->outp_clr);
+}
+
+static void __set_gpo_level_p3(struct lpc32xx_gpio_chip *group,
+ unsigned pin, int high)
+{
+ if (high)
+ __raw_writel(GPO3_PIN_TO_BIT(pin), group->gpio_grp->outp_set);
+ else
+ __raw_writel(GPO3_PIN_TO_BIT(pin), group->gpio_grp->outp_clr);
+}
+
+static int __get_gpio_state_p012(struct lpc32xx_gpio_chip *group,
+ unsigned pin)
+{
+ return GPIO012_PIN_IN_SEL(__raw_readl(group->gpio_grp->inp_state),
+ pin);
+}
+
+static int __get_gpio_state_p3(struct lpc32xx_gpio_chip *group,
+ unsigned pin)
+{
+ int state = __raw_readl(group->gpio_grp->inp_state);
+
+ /*
+ * P3 GPIO pin input mapping is not contiguous, GPIOP3-0..4 is mapped
+ * to bits 10..14, while GPIOP3-5 is mapped to bit 24.
+ */
+ return GPIO3_PIN_IN_SEL(state, pin);
+}
+
+static int __get_gpi_state_p3(struct lpc32xx_gpio_chip *group,
+ unsigned pin)
+{
+ return GPI3_PIN_IN_SEL(__raw_readl(group->gpio_grp->inp_state), pin);
+}
+
+/*
+ * GENERIC_GPIO primitives.
+ */
+static int lpc32xx_gpio_dir_input_p012(struct gpio_chip *chip,
+ unsigned pin)
+{
+ struct lpc32xx_gpio_chip *group = to_lpc32xx_gpio(chip);
+
+ __set_gpio_dir_p012(group, pin, 1);
+
+ return 0;
+}
+
+static int lpc32xx_gpio_dir_input_p3(struct gpio_chip *chip,
+ unsigned pin)
+{
+ struct lpc32xx_gpio_chip *group = to_lpc32xx_gpio(chip);
+
+ __set_gpio_dir_p3(group, pin, 1);
+
+ return 0;
+}
+
+static int lpc32xx_gpio_dir_in_always(struct gpio_chip *chip,
+ unsigned pin)
+{
+ return 0;
+}
+
+static int lpc32xx_gpio_get_value_p012(struct gpio_chip *chip, unsigned pin)
+{
+ struct lpc32xx_gpio_chip *group = to_lpc32xx_gpio(chip);
+
+ return __get_gpio_state_p012(group, pin);
+}
+
+static int lpc32xx_gpio_get_value_p3(struct gpio_chip *chip, unsigned pin)
+{
+ struct lpc32xx_gpio_chip *group = to_lpc32xx_gpio(chip);
+
+ return __get_gpio_state_p3(group, pin);
+}
+
+static int lpc32xx_gpi_get_value(struct gpio_chip *chip, unsigned pin)
+{
+ struct lpc32xx_gpio_chip *group = to_lpc32xx_gpio(chip);
+
+ return __get_gpi_state_p3(group, pin);
+}
+
+static int lpc32xx_gpio_dir_output_p012(struct gpio_chip *chip, unsigned pin,
+ int value)
+{
+ struct lpc32xx_gpio_chip *group = to_lpc32xx_gpio(chip);
+
+ __set_gpio_dir_p012(group, pin, 0);
+
+ return 0;
+}
+
+static int lpc32xx_gpio_dir_output_p3(struct gpio_chip *chip, unsigned pin,
+ int value)
+{
+ struct lpc32xx_gpio_chip *group = to_lpc32xx_gpio(chip);
+
+ __set_gpio_dir_p3(group, pin, 0);
+
+ return 0;
+}
+
+static int lpc32xx_gpio_dir_out_always(struct gpio_chip *chip, unsigned pin,
+ int value)
+{
+ return 0;
+}
+
+static void lpc32xx_gpio_set_value_p012(struct gpio_chip *chip, unsigned pin,
+ int value)
+{
+ struct lpc32xx_gpio_chip *group = to_lpc32xx_gpio(chip);
+
+ __set_gpio_level_p012(group, pin, value);
+}
+
+static void lpc32xx_gpio_set_value_p3(struct gpio_chip *chip, unsigned pin,
+ int value)
+{
+ struct lpc32xx_gpio_chip *group = to_lpc32xx_gpio(chip);
+
+ __set_gpio_level_p3(group, pin, value);
+}
+
+static void lpc32xx_gpo_set_value(struct gpio_chip *chip, unsigned pin,
+ int value)
+{
+ struct lpc32xx_gpio_chip *group = to_lpc32xx_gpio(chip);
+
+ __set_gpo_level_p3(group, pin, value);
+}
+
+static int lpc32xx_gpio_request(struct gpio_chip *chip, unsigned pin)
+{
+ if (pin < chip->ngpio)
+ return 0;
+
+ return -EINVAL;
+}
+
+static struct lpc32xx_gpio_chip lpc32xx_gpiochip[] = {
+ {
+ .chip = {
+ .label = "gpio_p0",
+ .direction_input = lpc32xx_gpio_dir_input_p012,
+ .get = lpc32xx_gpio_get_value_p012,
+ .direction_output = lpc32xx_gpio_dir_output_p012,
+ .set = lpc32xx_gpio_set_value_p012,
+ .request = lpc32xx_gpio_request,
+ .base = LPC32XX_GPIO_P0_GRP,
+ .ngpio = LPC32XX_GPIO_P0_MAX,
+ .names = gpio_p0_names,
+ .can_sleep = 0,
+ },
+ .gpio_grp = &gpio_grp_regs_p0,
+ },
+ {
+ .chip = {
+ .label = "gpio_p1",
+ .direction_input = lpc32xx_gpio_dir_input_p012,
+ .get = lpc32xx_gpio_get_value_p012,
+ .direction_output = lpc32xx_gpio_dir_output_p012,
+ .set = lpc32xx_gpio_set_value_p012,
+ .request = lpc32xx_gpio_request,
+ .base = LPC32XX_GPIO_P1_GRP,
+ .ngpio = LPC32XX_GPIO_P1_MAX,
+ .names = gpio_p1_names,
+ .can_sleep = 0,
+ },
+ .gpio_grp = &gpio_grp_regs_p1,
+ },
+ {
+ .chip = {
+ .label = "gpio_p2",
+ .direction_input = lpc32xx_gpio_dir_input_p012,
+ .get = lpc32xx_gpio_get_value_p012,
+ .direction_output = lpc32xx_gpio_dir_output_p012,
+ .set = lpc32xx_gpio_set_value_p012,
+ .request = lpc32xx_gpio_request,
+ .base = LPC32XX_GPIO_P2_GRP,
+ .ngpio = LPC32XX_GPIO_P2_MAX,
+ .names = gpio_p2_names,
+ .can_sleep = 0,
+ },
+ .gpio_grp = &gpio_grp_regs_p2,
+ },
+ {
+ .chip = {
+ .label = "gpio_p3",
+ .direction_input = lpc32xx_gpio_dir_input_p3,
+ .get = lpc32xx_gpio_get_value_p3,
+ .direction_output = lpc32xx_gpio_dir_output_p3,
+ .set = lpc32xx_gpio_set_value_p3,
+ .request = lpc32xx_gpio_request,
+ .base = LPC32XX_GPIO_P3_GRP,
+ .ngpio = LPC32XX_GPIO_P3_MAX,
+ .names = gpio_p3_names,
+ .can_sleep = 0,
+ },
+ .gpio_grp = &gpio_grp_regs_p3,
+ },
+ {
+ .chip = {
+ .label = "gpi_p3",
+ .direction_input = lpc32xx_gpio_dir_in_always,
+ .get = lpc32xx_gpi_get_value,
+ .request = lpc32xx_gpio_request,
+ .base = LPC32XX_GPI_P3_GRP,
+ .ngpio = LPC32XX_GPI_P3_MAX,
+ .names = gpi_p3_names,
+ .can_sleep = 0,
+ },
+ .gpio_grp = &gpio_grp_regs_p3,
+ },
+ {
+ .chip = {
+ .label = "gpo_p3",
+ .direction_output = lpc32xx_gpio_dir_out_always,
+ .set = lpc32xx_gpo_set_value,
+ .request = lpc32xx_gpio_request,
+ .base = LPC32XX_GPO_P3_GRP,
+ .ngpio = LPC32XX_GPO_P3_MAX,
+ .names = gpo_p3_names,
+ .can_sleep = 0,
+ },
+ .gpio_grp = &gpio_grp_regs_p3,
+ },
+};
+
+void __init lpc32xx_gpio_init(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(lpc32xx_gpiochip); i++)
+ gpiochip_add(&lpc32xx_gpiochip[i].chip);
+}
diff --git a/drivers/gpio/gpio-mc33880.c b/drivers/gpio/gpio-mc33880.c
index b3b4652e89e..2de57ce5feb 100644
--- a/drivers/gpio/gpio-mc33880.c
+++ b/drivers/gpio/gpio-mc33880.c
@@ -26,6 +26,7 @@
#include <linux/spi/mc33880.h>
#include <linux/gpio.h>
#include <linux/slab.h>
+#include <linux/module.h>
#define DRIVER_NAME "mc33880"
diff --git a/drivers/gpio/gpio-mcp23s08.c b/drivers/gpio/gpio-mcp23s08.c
index 1ef46e6c2a2..c5d83a8a91c 100644
--- a/drivers/gpio/gpio-mcp23s08.c
+++ b/drivers/gpio/gpio-mcp23s08.c
@@ -5,6 +5,7 @@
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/mutex.h>
+#include <linux/module.h>
#include <linux/gpio.h>
#include <linux/i2c.h>
#include <linux/spi/spi.h>
diff --git a/drivers/gpio/gpio-ml-ioh.c b/drivers/gpio/gpio-ml-ioh.c
index a9016f56ed7..ea8e7386925 100644
--- a/drivers/gpio/gpio-ml-ioh.c
+++ b/drivers/gpio/gpio-ml-ioh.c
@@ -14,10 +14,22 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
*/
+#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+
+#define IOH_EDGE_FALLING 0
+#define IOH_EDGE_RISING BIT(0)
+#define IOH_LEVEL_L BIT(1)
+#define IOH_LEVEL_H (BIT(0) | BIT(1))
+#define IOH_EDGE_BOTH BIT(2)
+#define IOH_IM_MASK (BIT(0) | BIT(1) | BIT(2))
+
+#define IOH_IRQ_BASE 0
#define PCI_VENDOR_ID_ROHM 0x10DB
@@ -46,12 +58,22 @@ struct ioh_regs {
/**
* struct ioh_gpio_reg_data - The register store data.
+ * @ien_reg To store contents of interrupt enable register.
+ * @imask_reg: To store contents of interrupt mask regist
* @po_reg: To store contents of PO register.
* @pm_reg: To store contents of PM register.
+ * @im0_reg: To store contents of interrupt mode regist0
+ * @im1_reg: To store contents of interrupt mode regist1
+ * @use_sel_reg: To store contents of GPIO_USE_SEL0~3
*/
struct ioh_gpio_reg_data {
+ u32 ien_reg;
+ u32 imask_reg;
u32 po_reg;
u32 pm_reg;
+ u32 im0_reg;
+ u32 im1_reg;
+ u32 use_sel_reg;
};
/**
@@ -62,7 +84,11 @@ struct ioh_gpio_reg_data {
* @gpio: Data for GPIO infrastructure.
* @ioh_gpio_reg: Memory mapped Register data is saved here
* when suspend.
+ * @gpio_use_sel: Save GPIO_USE_SEL1~4 register for PM
* @ch: Indicate GPIO channel
+ * @irq_base: Save base of IRQ number for interrupt
+ * @spinlock: Used for register access protection in
+ * interrupt context ioh_irq_type and PM;
*/
struct ioh_gpio {
void __iomem *base;
@@ -70,8 +96,11 @@ struct ioh_gpio {
struct device *dev;
struct gpio_chip gpio;
struct ioh_gpio_reg_data ioh_gpio_reg;
+ u32 gpio_use_sel;
struct mutex lock;
int ch;
+ int irq_base;
+ spinlock_t spinlock;
};
static const int num_ports[] = {6, 12, 16, 16, 15, 16, 16, 12};
@@ -145,8 +174,25 @@ static int ioh_gpio_direction_input(struct gpio_chip *gpio, unsigned nr)
*/
static void ioh_gpio_save_reg_conf(struct ioh_gpio *chip)
{
- chip->ioh_gpio_reg.po_reg = ioread32(&chip->reg->regs[chip->ch].po);
- chip->ioh_gpio_reg.pm_reg = ioread32(&chip->reg->regs[chip->ch].pm);
+ int i;
+
+ for (i = 0; i < 8; i ++, chip++) {
+ chip->ioh_gpio_reg.po_reg =
+ ioread32(&chip->reg->regs[chip->ch].po);
+ chip->ioh_gpio_reg.pm_reg =
+ ioread32(&chip->reg->regs[chip->ch].pm);
+ chip->ioh_gpio_reg.ien_reg =
+ ioread32(&chip->reg->regs[chip->ch].ien);
+ chip->ioh_gpio_reg.imask_reg =
+ ioread32(&chip->reg->regs[chip->ch].imask);
+ chip->ioh_gpio_reg.im0_reg =
+ ioread32(&chip->reg->regs[chip->ch].im_0);
+ chip->ioh_gpio_reg.im1_reg =
+ ioread32(&chip->reg->regs[chip->ch].im_1);
+ if (i < 4)
+ chip->ioh_gpio_reg.use_sel_reg =
+ ioread32(&chip->reg->ioh_sel_reg[i]);
+ }
}
/*
@@ -154,13 +200,34 @@ static void ioh_gpio_save_reg_conf(struct ioh_gpio *chip)
*/
static void ioh_gpio_restore_reg_conf(struct ioh_gpio *chip)
{
- /* to store contents of PO register */
- iowrite32(chip->ioh_gpio_reg.po_reg, &chip->reg->regs[chip->ch].po);
- /* to store contents of PM register */
- iowrite32(chip->ioh_gpio_reg.pm_reg, &chip->reg->regs[chip->ch].pm);
+ int i;
+
+ for (i = 0; i < 8; i ++, chip++) {
+ iowrite32(chip->ioh_gpio_reg.po_reg,
+ &chip->reg->regs[chip->ch].po);
+ iowrite32(chip->ioh_gpio_reg.pm_reg,
+ &chip->reg->regs[chip->ch].pm);
+ iowrite32(chip->ioh_gpio_reg.ien_reg,
+ &chip->reg->regs[chip->ch].ien);
+ iowrite32(chip->ioh_gpio_reg.imask_reg,
+ &chip->reg->regs[chip->ch].imask);
+ iowrite32(chip->ioh_gpio_reg.im0_reg,
+ &chip->reg->regs[chip->ch].im_0);
+ iowrite32(chip->ioh_gpio_reg.im1_reg,
+ &chip->reg->regs[chip->ch].im_1);
+ if (i < 4)
+ iowrite32(chip->ioh_gpio_reg.use_sel_reg,
+ &chip->reg->ioh_sel_reg[i]);
+ }
}
#endif
+static int ioh_gpio_to_irq(struct gpio_chip *gpio, unsigned offset)
+{
+ struct ioh_gpio *chip = container_of(gpio, struct ioh_gpio, gpio);
+ return chip->irq_base + offset;
+}
+
static void ioh_gpio_setup(struct ioh_gpio *chip, int num_port)
{
struct gpio_chip *gpio = &chip->gpio;
@@ -175,16 +242,148 @@ static void ioh_gpio_setup(struct ioh_gpio *chip, int num_port)
gpio->base = -1;
gpio->ngpio = num_port;
gpio->can_sleep = 0;
+ gpio->to_irq = ioh_gpio_to_irq;
+}
+
+static int ioh_irq_type(struct irq_data *d, unsigned int type)
+{
+ u32 im;
+ u32 *im_reg;
+ u32 ien;
+ u32 im_pos;
+ int ch;
+ unsigned long flags;
+ u32 val;
+ int irq = d->irq;
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct ioh_gpio *chip = gc->private;
+
+ ch = irq - chip->irq_base;
+ if (irq <= chip->irq_base + 7) {
+ im_reg = &chip->reg->regs[chip->ch].im_0;
+ im_pos = ch;
+ } else {
+ im_reg = &chip->reg->regs[chip->ch].im_1;
+ im_pos = ch - 8;
+ }
+ dev_dbg(chip->dev, "%s:irq=%d type=%d ch=%d pos=%d type=%d\n",
+ __func__, irq, type, ch, im_pos, type);
+
+ spin_lock_irqsave(&chip->spinlock, flags);
+
+ switch (type) {
+ case IRQ_TYPE_EDGE_RISING:
+ val = IOH_EDGE_RISING;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ val = IOH_EDGE_FALLING;
+ break;
+ case IRQ_TYPE_EDGE_BOTH:
+ val = IOH_EDGE_BOTH;
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ val = IOH_LEVEL_H;
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ val = IOH_LEVEL_L;
+ break;
+ case IRQ_TYPE_PROBE:
+ goto end;
+ default:
+ dev_warn(chip->dev, "%s: unknown type(%dd)",
+ __func__, type);
+ goto end;
+ }
+
+ /* Set interrupt mode */
+ im = ioread32(im_reg) & ~(IOH_IM_MASK << (im_pos * 4));
+ iowrite32(im | (val << (im_pos * 4)), im_reg);
+
+ /* iclr */
+ iowrite32(BIT(ch), &chip->reg->regs[chip->ch].iclr);
+
+ /* IMASKCLR */
+ iowrite32(BIT(ch), &chip->reg->regs[chip->ch].imaskclr);
+
+ /* Enable interrupt */
+ ien = ioread32(&chip->reg->regs[chip->ch].ien);
+ iowrite32(ien | BIT(ch), &chip->reg->regs[chip->ch].ien);
+end:
+ spin_unlock_irqrestore(&chip->spinlock, flags);
+
+ return 0;
+}
+
+static void ioh_irq_unmask(struct irq_data *d)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct ioh_gpio *chip = gc->private;
+
+ iowrite32(1 << (d->irq - chip->irq_base),
+ &chip->reg->regs[chip->ch].imaskclr);
+}
+
+static void ioh_irq_mask(struct irq_data *d)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct ioh_gpio *chip = gc->private;
+
+ iowrite32(1 << (d->irq - chip->irq_base),
+ &chip->reg->regs[chip->ch].imask);
+}
+
+static irqreturn_t ioh_gpio_handler(int irq, void *dev_id)
+{
+ struct ioh_gpio *chip = dev_id;
+ u32 reg_val;
+ int i, j;
+ int ret = IRQ_NONE;
+
+ for (i = 0; i < 8; i++) {
+ reg_val = ioread32(&chip->reg->regs[i].istatus);
+ for (j = 0; j < num_ports[i]; j++) {
+ if (reg_val & BIT(j)) {
+ dev_dbg(chip->dev,
+ "%s:[%d]:irq=%d status=0x%x\n",
+ __func__, j, irq, reg_val);
+ iowrite32(BIT(j),
+ &chip->reg->regs[chip->ch].iclr);
+ generic_handle_irq(chip->irq_base + j);
+ ret = IRQ_HANDLED;
+ }
+ }
+ }
+ return ret;
+}
+
+static __devinit void ioh_gpio_alloc_generic_chip(struct ioh_gpio *chip,
+ unsigned int irq_start, unsigned int num)
+{
+ struct irq_chip_generic *gc;
+ struct irq_chip_type *ct;
+
+ gc = irq_alloc_generic_chip("ioh_gpio", 1, irq_start, chip->base,
+ handle_simple_irq);
+ gc->private = chip;
+ ct = gc->chip_types;
+
+ ct->chip.irq_mask = ioh_irq_mask;
+ ct->chip.irq_unmask = ioh_irq_unmask;
+ ct->chip.irq_set_type = ioh_irq_type;
+
+ irq_setup_generic_chip(gc, IRQ_MSK(num), IRQ_GC_INIT_MASK_CACHE,
+ IRQ_NOREQUEST | IRQ_NOPROBE, 0);
}
static int __devinit ioh_gpio_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
int ret;
- int i;
+ int i, j;
struct ioh_gpio *chip;
void __iomem *base;
void __iomem *chip_save;
+ int irq_base;
ret = pci_enable_device(pdev);
if (ret) {
@@ -228,10 +427,41 @@ static int __devinit ioh_gpio_probe(struct pci_dev *pdev,
}
chip = chip_save;
+ for (j = 0; j < 8; j++, chip++) {
+ irq_base = irq_alloc_descs(-1, IOH_IRQ_BASE, num_ports[j],
+ NUMA_NO_NODE);
+ if (irq_base < 0) {
+ dev_warn(&pdev->dev,
+ "ml_ioh_gpio: Failed to get IRQ base num\n");
+ chip->irq_base = -1;
+ goto err_irq_alloc_descs;
+ }
+ chip->irq_base = irq_base;
+ ioh_gpio_alloc_generic_chip(chip, irq_base, num_ports[j]);
+ }
+
+ chip = chip_save;
+ ret = request_irq(pdev->irq, ioh_gpio_handler,
+ IRQF_SHARED, KBUILD_MODNAME, chip);
+ if (ret != 0) {
+ dev_err(&pdev->dev,
+ "%s request_irq failed\n", __func__);
+ goto err_request_irq;
+ }
+
pci_set_drvdata(pdev, chip);
return 0;
+err_request_irq:
+ chip = chip_save;
+err_irq_alloc_descs:
+ while (--j >= 0) {
+ chip--;
+ irq_free_descs(chip->irq_base, num_ports[j]);
+ }
+
+ chip = chip_save;
err_gpiochip_add:
while (--i >= 0) {
chip--;
@@ -264,7 +494,11 @@ static void __devexit ioh_gpio_remove(struct pci_dev *pdev)
void __iomem *chip_save;
chip_save = chip;
+
+ free_irq(pdev->irq, chip);
+
for (i = 0; i < 8; i++, chip++) {
+ irq_free_descs(chip->irq_base, num_ports[i]);
err = gpiochip_remove(&chip->gpio);
if (err)
dev_err(&pdev->dev, "Failed gpiochip_remove\n");
@@ -282,9 +516,11 @@ static int ioh_gpio_suspend(struct pci_dev *pdev, pm_message_t state)
{
s32 ret;
struct ioh_gpio *chip = pci_get_drvdata(pdev);
+ unsigned long flags;
+ spin_lock_irqsave(&chip->spinlock, flags);
ioh_gpio_save_reg_conf(chip);
- ioh_gpio_restore_reg_conf(chip);
+ spin_unlock_irqrestore(&chip->spinlock, flags);
ret = pci_save_state(pdev);
if (ret) {
@@ -304,6 +540,7 @@ static int ioh_gpio_resume(struct pci_dev *pdev)
{
s32 ret;
struct ioh_gpio *chip = pci_get_drvdata(pdev);
+ unsigned long flags;
ret = pci_enable_wake(pdev, PCI_D0, 0);
@@ -315,9 +552,11 @@ static int ioh_gpio_resume(struct pci_dev *pdev)
}
pci_restore_state(pdev);
+ spin_lock_irqsave(&chip->spinlock, flags);
iowrite32(0x01, &chip->reg->srst);
iowrite32(0x00, &chip->reg->srst);
ioh_gpio_restore_reg_conf(chip);
+ spin_unlock_irqrestore(&chip->spinlock, flags);
return 0;
}
diff --git a/drivers/gpio/gpio-mpc5200.c b/drivers/gpio/gpio-mpc5200.c
index 52d3ed20810..2c7cef367fc 100644
--- a/drivers/gpio/gpio-mpc5200.c
+++ b/drivers/gpio/gpio-mpc5200.c
@@ -23,6 +23,7 @@
#include <linux/of_gpio.h>
#include <linux/io.h>
#include <linux/of_platform.h>
+#include <linux/module.h>
#include <asm/gpio.h>
#include <asm/mpc52xx.h>
diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
new file mode 100644
index 00000000000..ec3fcf0a7e1
--- /dev/null
+++ b/drivers/gpio/gpio-mpc8xxx.c
@@ -0,0 +1,398 @@
+/*
+ * GPIOs on MPC512x/8349/8572/8610 and compatible
+ *
+ * Copyright (C) 2008 Peter Korsgaard <jacmet@sunsite.dk>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/gpio.h>
+#include <linux/slab.h>
+#include <linux/irq.h>
+
+#define MPC8XXX_GPIO_PINS 32
+
+#define GPIO_DIR 0x00
+#define GPIO_ODR 0x04
+#define GPIO_DAT 0x08
+#define GPIO_IER 0x0c
+#define GPIO_IMR 0x10
+#define GPIO_ICR 0x14
+#define GPIO_ICR2 0x18
+
+struct mpc8xxx_gpio_chip {
+ struct of_mm_gpio_chip mm_gc;
+ spinlock_t lock;
+
+ /*
+ * shadowed data register to be able to clear/set output pins in
+ * open drain mode safely
+ */
+ u32 data;
+ struct irq_host *irq;
+ void *of_dev_id_data;
+};
+
+static inline u32 mpc8xxx_gpio2mask(unsigned int gpio)
+{
+ return 1u << (MPC8XXX_GPIO_PINS - 1 - gpio);
+}
+
+static inline struct mpc8xxx_gpio_chip *
+to_mpc8xxx_gpio_chip(struct of_mm_gpio_chip *mm)
+{
+ return container_of(mm, struct mpc8xxx_gpio_chip, mm_gc);
+}
+
+static void mpc8xxx_gpio_save_regs(struct of_mm_gpio_chip *mm)
+{
+ struct mpc8xxx_gpio_chip *mpc8xxx_gc = to_mpc8xxx_gpio_chip(mm);
+
+ mpc8xxx_gc->data = in_be32(mm->regs + GPIO_DAT);
+}
+
+/* Workaround GPIO 1 errata on MPC8572/MPC8536. The status of GPIOs
+ * defined as output cannot be determined by reading GPDAT register,
+ * so we use shadow data register instead. The status of input pins
+ * is determined by reading GPDAT register.
+ */
+static int mpc8572_gpio_get(struct gpio_chip *gc, unsigned int gpio)
+{
+ u32 val;
+ struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc);
+ struct mpc8xxx_gpio_chip *mpc8xxx_gc = to_mpc8xxx_gpio_chip(mm);
+
+ val = in_be32(mm->regs + GPIO_DAT) & ~in_be32(mm->regs + GPIO_DIR);
+
+ return (val | mpc8xxx_gc->data) & mpc8xxx_gpio2mask(gpio);
+}
+
+static int mpc8xxx_gpio_get(struct gpio_chip *gc, unsigned int gpio)
+{
+ struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc);
+
+ return in_be32(mm->regs + GPIO_DAT) & mpc8xxx_gpio2mask(gpio);
+}
+
+static void mpc8xxx_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
+{
+ struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc);
+ struct mpc8xxx_gpio_chip *mpc8xxx_gc = to_mpc8xxx_gpio_chip(mm);
+ unsigned long flags;
+
+ spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
+
+ if (val)
+ mpc8xxx_gc->data |= mpc8xxx_gpio2mask(gpio);
+ else
+ mpc8xxx_gc->data &= ~mpc8xxx_gpio2mask(gpio);
+
+ out_be32(mm->regs + GPIO_DAT, mpc8xxx_gc->data);
+
+ spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
+}
+
+static int mpc8xxx_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
+{
+ struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc);
+ struct mpc8xxx_gpio_chip *mpc8xxx_gc = to_mpc8xxx_gpio_chip(mm);
+ unsigned long flags;
+
+ spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
+
+ clrbits32(mm->regs + GPIO_DIR, mpc8xxx_gpio2mask(gpio));
+
+ spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
+
+ return 0;
+}
+
+static int mpc8xxx_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
+{
+ struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc);
+ struct mpc8xxx_gpio_chip *mpc8xxx_gc = to_mpc8xxx_gpio_chip(mm);
+ unsigned long flags;
+
+ mpc8xxx_gpio_set(gc, gpio, val);
+
+ spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
+
+ setbits32(mm->regs + GPIO_DIR, mpc8xxx_gpio2mask(gpio));
+
+ spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
+
+ return 0;
+}
+
+static int mpc8xxx_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
+{
+ struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc);
+ struct mpc8xxx_gpio_chip *mpc8xxx_gc = to_mpc8xxx_gpio_chip(mm);
+
+ if (mpc8xxx_gc->irq && offset < MPC8XXX_GPIO_PINS)
+ return irq_create_mapping(mpc8xxx_gc->irq, offset);
+ else
+ return -ENXIO;
+}
+
+static void mpc8xxx_gpio_irq_cascade(unsigned int irq, struct irq_desc *desc)
+{
+ struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc;
+ unsigned int mask;
+
+ mask = in_be32(mm->regs + GPIO_IER) & in_be32(mm->regs + GPIO_IMR);
+ if (mask)
+ generic_handle_irq(irq_linear_revmap(mpc8xxx_gc->irq,
+ 32 - ffs(mask)));
+ chip->irq_eoi(&desc->irq_data);
+}
+
+static void mpc8xxx_irq_unmask(struct irq_data *d)
+{
+ struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_data_get_irq_chip_data(d);
+ struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
+
+ setbits32(mm->regs + GPIO_IMR, mpc8xxx_gpio2mask(irqd_to_hwirq(d)));
+
+ spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
+}
+
+static void mpc8xxx_irq_mask(struct irq_data *d)
+{
+ struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_data_get_irq_chip_data(d);
+ struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
+
+ clrbits32(mm->regs + GPIO_IMR, mpc8xxx_gpio2mask(irqd_to_hwirq(d)));
+
+ spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
+}
+
+static void mpc8xxx_irq_ack(struct irq_data *d)
+{
+ struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_data_get_irq_chip_data(d);
+ struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc;
+
+ out_be32(mm->regs + GPIO_IER, mpc8xxx_gpio2mask(irqd_to_hwirq(d)));
+}
+
+static int mpc8xxx_irq_set_type(struct irq_data *d, unsigned int flow_type)
+{
+ struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_data_get_irq_chip_data(d);
+ struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc;
+ unsigned long flags;
+
+ switch (flow_type) {
+ case IRQ_TYPE_EDGE_FALLING:
+ spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
+ setbits32(mm->regs + GPIO_ICR,
+ mpc8xxx_gpio2mask(irqd_to_hwirq(d)));
+ spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
+ break;
+
+ case IRQ_TYPE_EDGE_BOTH:
+ spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
+ clrbits32(mm->regs + GPIO_ICR,
+ mpc8xxx_gpio2mask(irqd_to_hwirq(d)));
+ spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int mpc512x_irq_set_type(struct irq_data *d, unsigned int flow_type)
+{
+ struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_data_get_irq_chip_data(d);
+ struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc;
+ unsigned long gpio = irqd_to_hwirq(d);
+ void __iomem *reg;
+ unsigned int shift;
+ unsigned long flags;
+
+ if (gpio < 16) {
+ reg = mm->regs + GPIO_ICR;
+ shift = (15 - gpio) * 2;
+ } else {
+ reg = mm->regs + GPIO_ICR2;
+ shift = (15 - (gpio % 16)) * 2;
+ }
+
+ switch (flow_type) {
+ case IRQ_TYPE_EDGE_FALLING:
+ case IRQ_TYPE_LEVEL_LOW:
+ spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
+ clrsetbits_be32(reg, 3 << shift, 2 << shift);
+ spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
+ break;
+
+ case IRQ_TYPE_EDGE_RISING:
+ case IRQ_TYPE_LEVEL_HIGH:
+ spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
+ clrsetbits_be32(reg, 3 << shift, 1 << shift);
+ spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
+ break;
+
+ case IRQ_TYPE_EDGE_BOTH:
+ spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
+ clrbits32(reg, 3 << shift);
+ spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static struct irq_chip mpc8xxx_irq_chip = {
+ .name = "mpc8xxx-gpio",
+ .irq_unmask = mpc8xxx_irq_unmask,
+ .irq_mask = mpc8xxx_irq_mask,
+ .irq_ack = mpc8xxx_irq_ack,
+ .irq_set_type = mpc8xxx_irq_set_type,
+};
+
+static int mpc8xxx_gpio_irq_map(struct irq_host *h, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ struct mpc8xxx_gpio_chip *mpc8xxx_gc = h->host_data;
+
+ if (mpc8xxx_gc->of_dev_id_data)
+ mpc8xxx_irq_chip.irq_set_type = mpc8xxx_gc->of_dev_id_data;
+
+ irq_set_chip_data(virq, h->host_data);
+ irq_set_chip_and_handler(virq, &mpc8xxx_irq_chip, handle_level_irq);
+ irq_set_irq_type(virq, IRQ_TYPE_NONE);
+
+ return 0;
+}
+
+static int mpc8xxx_gpio_irq_xlate(struct irq_host *h, struct device_node *ct,
+ const u32 *intspec, unsigned int intsize,
+ irq_hw_number_t *out_hwirq,
+ unsigned int *out_flags)
+
+{
+ /* interrupt sense values coming from the device tree equal either
+ * EDGE_FALLING or EDGE_BOTH
+ */
+ *out_hwirq = intspec[0];
+ *out_flags = intspec[1];
+
+ return 0;
+}
+
+static struct irq_host_ops mpc8xxx_gpio_irq_ops = {
+ .map = mpc8xxx_gpio_irq_map,
+ .xlate = mpc8xxx_gpio_irq_xlate,
+};
+
+static struct of_device_id mpc8xxx_gpio_ids[] __initdata = {
+ { .compatible = "fsl,mpc8349-gpio", },
+ { .compatible = "fsl,mpc8572-gpio", },
+ { .compatible = "fsl,mpc8610-gpio", },
+ { .compatible = "fsl,mpc5121-gpio", .data = mpc512x_irq_set_type, },
+ { .compatible = "fsl,pq3-gpio", },
+ { .compatible = "fsl,qoriq-gpio", },
+ {}
+};
+
+static void __init mpc8xxx_add_controller(struct device_node *np)
+{
+ struct mpc8xxx_gpio_chip *mpc8xxx_gc;
+ struct of_mm_gpio_chip *mm_gc;
+ struct gpio_chip *gc;
+ const struct of_device_id *id;
+ unsigned hwirq;
+ int ret;
+
+ mpc8xxx_gc = kzalloc(sizeof(*mpc8xxx_gc), GFP_KERNEL);
+ if (!mpc8xxx_gc) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ spin_lock_init(&mpc8xxx_gc->lock);
+
+ mm_gc = &mpc8xxx_gc->mm_gc;
+ gc = &mm_gc->gc;
+
+ mm_gc->save_regs = mpc8xxx_gpio_save_regs;
+ gc->ngpio = MPC8XXX_GPIO_PINS;
+ gc->direction_input = mpc8xxx_gpio_dir_in;
+ gc->direction_output = mpc8xxx_gpio_dir_out;
+ if (of_device_is_compatible(np, "fsl,mpc8572-gpio"))
+ gc->get = mpc8572_gpio_get;
+ else
+ gc->get = mpc8xxx_gpio_get;
+ gc->set = mpc8xxx_gpio_set;
+ gc->to_irq = mpc8xxx_gpio_to_irq;
+
+ ret = of_mm_gpiochip_add(np, mm_gc);
+ if (ret)
+ goto err;
+
+ hwirq = irq_of_parse_and_map(np, 0);
+ if (hwirq == NO_IRQ)
+ goto skip_irq;
+
+ mpc8xxx_gc->irq =
+ irq_alloc_host(np, IRQ_HOST_MAP_LINEAR, MPC8XXX_GPIO_PINS,
+ &mpc8xxx_gpio_irq_ops, MPC8XXX_GPIO_PINS);
+ if (!mpc8xxx_gc->irq)
+ goto skip_irq;
+
+ id = of_match_node(mpc8xxx_gpio_ids, np);
+ if (id)
+ mpc8xxx_gc->of_dev_id_data = id->data;
+
+ mpc8xxx_gc->irq->host_data = mpc8xxx_gc;
+
+ /* ack and mask all irqs */
+ out_be32(mm_gc->regs + GPIO_IER, 0xffffffff);
+ out_be32(mm_gc->regs + GPIO_IMR, 0);
+
+ irq_set_handler_data(hwirq, mpc8xxx_gc);
+ irq_set_chained_handler(hwirq, mpc8xxx_gpio_irq_cascade);
+
+skip_irq:
+ return;
+
+err:
+ pr_err("%s: registration failed with status %d\n",
+ np->full_name, ret);
+ kfree(mpc8xxx_gc);
+
+ return;
+}
+
+static int __init mpc8xxx_add_gpiochips(void)
+{
+ struct device_node *np;
+
+ for_each_matching_node(np, mpc8xxx_gpio_ids)
+ mpc8xxx_add_controller(np);
+
+ return 0;
+}
+arch_initcall(mpc8xxx_add_gpiochips);
diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c
index 4340acae3bd..e7914763457 100644
--- a/drivers/gpio/gpio-mxc.c
+++ b/drivers/gpio/gpio-mxc.c
@@ -29,7 +29,11 @@
#include <linux/basic_mmio_gpio.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/module.h>
#include <asm-generic/bug.h>
+#include <asm/mach/irq.h>
+
+#define irq_to_gpio(irq) ((irq) - MXC_GPIO_IRQ_START)
enum mxc_gpio_hwtype {
IMX1_GPIO, /* runs on i.mx1 */
@@ -232,10 +236,15 @@ static void mx3_gpio_irq_handler(u32 irq, struct irq_desc *desc)
{
u32 irq_stat;
struct mxc_gpio_port *port = irq_get_handler_data(irq);
+ struct irq_chip *chip = irq_get_chip(irq);
+
+ chained_irq_enter(chip, desc);
irq_stat = readl(port->base + GPIO_ISR) & readl(port->base + GPIO_IMR);
mxc_gpio_irq_handler(port, irq_stat);
+
+ chained_irq_exit(chip, desc);
}
/* MX2 has one interrupt *for all* gpio ports */
@@ -337,6 +346,15 @@ static void __devinit mxc_gpio_get_hw(struct platform_device *pdev)
mxc_gpio_hwtype = hwtype;
}
+static int mxc_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
+{
+ struct bgpio_chip *bgc = to_bgpio_chip(gc);
+ struct mxc_gpio_port *port =
+ container_of(bgc, struct mxc_gpio_port, bgc);
+
+ return port->virtual_irq_start + offset;
+}
+
static int __devinit mxc_gpio_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
@@ -403,6 +421,7 @@ static int __devinit mxc_gpio_probe(struct platform_device *pdev)
if (err)
goto out_iounmap;
+ port->bgc.gc.to_irq = mxc_gpio_to_irq;
port->bgc.gc.base = pdev->id * 32;
port->bgc.dir = port->bgc.read_reg(port->bgc.reg_dir);
port->bgc.data = port->bgc.read_reg(port->bgc.reg_set);
diff --git a/drivers/gpio/gpio-mxs.c b/drivers/gpio/gpio-mxs.c
index af55a8577c2..385c58e8405 100644
--- a/drivers/gpio/gpio-mxs.c
+++ b/drivers/gpio/gpio-mxs.c
@@ -28,6 +28,7 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/basic_mmio_gpio.h>
+#include <linux/module.h>
#include <mach/mxs.h>
#define MXS_SET 0x4
@@ -49,6 +50,8 @@
#define GPIO_INT_LEV_MASK (1 << 0)
#define GPIO_INT_POL_MASK (1 << 1)
+#define irq_to_gpio(irq) ((irq) - MXS_GPIO_IRQ_START)
+
struct mxs_gpio_port {
void __iomem *base;
int id;
diff --git a/drivers/gpio/gpio-nomadik.c b/drivers/gpio/gpio-nomadik.c
index 2c212c732d7..1ebedfb6d46 100644
--- a/drivers/gpio/gpio-nomadik.c
+++ b/drivers/gpio/gpio-nomadik.c
@@ -27,8 +27,9 @@
#include <asm/mach/irq.h>
#include <plat/pincfg.h>
+#include <plat/gpio-nomadik.h>
#include <mach/hardware.h>
-#include <mach/gpio.h>
+#include <asm/gpio.h>
/*
* The GPIO module in the Nomadik family of Systems-on-Chip is an
@@ -58,7 +59,6 @@ struct nmk_gpio_chip {
u32 rwimsc;
u32 fwimsc;
u32 slpm;
- u32 enabled;
u32 pull_up;
};
@@ -276,6 +276,8 @@ static void nmk_gpio_glitch_slpm_init(unsigned int *slpm)
if (!chip)
break;
+ clk_enable(chip->clk);
+
slpm[i] = readl(chip->addr + NMK_GPIO_SLPC);
writel(temp, chip->addr + NMK_GPIO_SLPC);
}
@@ -292,6 +294,8 @@ static void nmk_gpio_glitch_slpm_restore(unsigned int *slpm)
break;
writel(slpm[i], chip->addr + NMK_GPIO_SLPC);
+
+ clk_disable(chip->clk);
}
}
@@ -336,10 +340,12 @@ static int __nmk_config_pins(pin_cfg_t *cfgs, int num, bool sleep)
break;
}
+ clk_enable(nmk_chip->clk);
spin_lock(&nmk_chip->lock);
__nmk_config_pin(nmk_chip, pin - nmk_chip->chip.base,
cfgs[i], sleep, glitch ? slpm : NULL);
spin_unlock(&nmk_chip->lock);
+ clk_disable(nmk_chip->clk);
}
if (glitch)
@@ -424,6 +430,7 @@ int nmk_gpio_set_slpm(int gpio, enum nmk_gpio_slpm mode)
if (!nmk_chip)
return -EINVAL;
+ clk_enable(nmk_chip->clk);
spin_lock_irqsave(&nmk_gpio_slpm_lock, flags);
spin_lock(&nmk_chip->lock);
@@ -431,6 +438,7 @@ int nmk_gpio_set_slpm(int gpio, enum nmk_gpio_slpm mode)
spin_unlock(&nmk_chip->lock);
spin_unlock_irqrestore(&nmk_gpio_slpm_lock, flags);
+ clk_disable(nmk_chip->clk);
return 0;
}
@@ -457,9 +465,11 @@ int nmk_gpio_set_pull(int gpio, enum nmk_gpio_pull pull)
if (!nmk_chip)
return -EINVAL;
+ clk_enable(nmk_chip->clk);
spin_lock_irqsave(&nmk_chip->lock, flags);
__nmk_gpio_set_pull(nmk_chip, gpio - nmk_chip->chip.base, pull);
spin_unlock_irqrestore(&nmk_chip->lock, flags);
+ clk_disable(nmk_chip->clk);
return 0;
}
@@ -483,9 +493,11 @@ int nmk_gpio_set_mode(int gpio, int gpio_mode)
if (!nmk_chip)
return -EINVAL;
+ clk_enable(nmk_chip->clk);
spin_lock_irqsave(&nmk_chip->lock, flags);
__nmk_gpio_set_mode(nmk_chip, gpio - nmk_chip->chip.base, gpio_mode);
spin_unlock_irqrestore(&nmk_chip->lock, flags);
+ clk_disable(nmk_chip->clk);
return 0;
}
@@ -502,9 +514,13 @@ int nmk_gpio_get_mode(int gpio)
bit = 1 << (gpio - nmk_chip->chip.base);
+ clk_enable(nmk_chip->clk);
+
afunc = readl(nmk_chip->addr + NMK_GPIO_AFSLA) & bit;
bfunc = readl(nmk_chip->addr + NMK_GPIO_AFSLB) & bit;
+ clk_disable(nmk_chip->clk);
+
return (afunc ? NMK_GPIO_ALT_A : 0) | (bfunc ? NMK_GPIO_ALT_B : 0);
}
EXPORT_SYMBOL(nmk_gpio_get_mode);
@@ -525,7 +541,10 @@ static void nmk_gpio_irq_ack(struct irq_data *d)
nmk_chip = irq_data_get_irq_chip_data(d);
if (!nmk_chip)
return;
+
+ clk_enable(nmk_chip->clk);
writel(nmk_gpio_get_bitmask(gpio), nmk_chip->addr + NMK_GPIO_IC);
+ clk_disable(nmk_chip->clk);
}
enum nmk_gpio_irq_type {
@@ -586,11 +605,7 @@ static int nmk_gpio_irq_maskunmask(struct irq_data *d, bool enable)
if (!nmk_chip)
return -EINVAL;
- if (enable)
- nmk_chip->enabled |= bitmask;
- else
- nmk_chip->enabled &= ~bitmask;
-
+ clk_enable(nmk_chip->clk);
spin_lock_irqsave(&nmk_gpio_slpm_lock, flags);
spin_lock(&nmk_chip->lock);
@@ -601,6 +616,7 @@ static int nmk_gpio_irq_maskunmask(struct irq_data *d, bool enable)
spin_unlock(&nmk_chip->lock);
spin_unlock_irqrestore(&nmk_gpio_slpm_lock, flags);
+ clk_disable(nmk_chip->clk);
return 0;
}
@@ -628,10 +644,11 @@ static int nmk_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
return -EINVAL;
bitmask = nmk_gpio_get_bitmask(gpio);
+ clk_enable(nmk_chip->clk);
spin_lock_irqsave(&nmk_gpio_slpm_lock, flags);
spin_lock(&nmk_chip->lock);
- if (!(nmk_chip->enabled & bitmask))
+ if (irqd_irq_disabled(d))
__nmk_gpio_set_wake(nmk_chip, gpio, on);
if (on)
@@ -641,13 +658,15 @@ static int nmk_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
spin_unlock(&nmk_chip->lock);
spin_unlock_irqrestore(&nmk_gpio_slpm_lock, flags);
+ clk_disable(nmk_chip->clk);
return 0;
}
static int nmk_gpio_irq_set_type(struct irq_data *d, unsigned int type)
{
- bool enabled, wake = irqd_is_wakeup_set(d);
+ bool enabled = !irqd_irq_disabled(d);
+ bool wake = irqd_is_wakeup_set(d);
int gpio;
struct nmk_gpio_chip *nmk_chip;
unsigned long flags;
@@ -664,8 +683,7 @@ static int nmk_gpio_irq_set_type(struct irq_data *d, unsigned int type)
if (type & IRQ_TYPE_LEVEL_LOW)
return -EINVAL;
- enabled = nmk_chip->enabled & bitmask;
-
+ clk_enable(nmk_chip->clk);
spin_lock_irqsave(&nmk_chip->lock, flags);
if (enabled)
@@ -689,10 +707,28 @@ static int nmk_gpio_irq_set_type(struct irq_data *d, unsigned int type)
__nmk_gpio_irq_modify(nmk_chip, gpio, WAKE, true);
spin_unlock_irqrestore(&nmk_chip->lock, flags);
+ clk_disable(nmk_chip->clk);
return 0;
}
+static unsigned int nmk_gpio_irq_startup(struct irq_data *d)
+{
+ struct nmk_gpio_chip *nmk_chip = irq_data_get_irq_chip_data(d);
+
+ clk_enable(nmk_chip->clk);
+ nmk_gpio_irq_unmask(d);
+ return 0;
+}
+
+static void nmk_gpio_irq_shutdown(struct irq_data *d)
+{
+ struct nmk_gpio_chip *nmk_chip = irq_data_get_irq_chip_data(d);
+
+ nmk_gpio_irq_mask(d);
+ clk_disable(nmk_chip->clk);
+}
+
static struct irq_chip nmk_gpio_irq_chip = {
.name = "Nomadik-GPIO",
.irq_ack = nmk_gpio_irq_ack,
@@ -700,6 +736,8 @@ static struct irq_chip nmk_gpio_irq_chip = {
.irq_unmask = nmk_gpio_irq_unmask,
.irq_set_type = nmk_gpio_irq_set_type,
.irq_set_wake = nmk_gpio_irq_set_wake,
+ .irq_startup = nmk_gpio_irq_startup,
+ .irq_shutdown = nmk_gpio_irq_shutdown,
};
static void __nmk_gpio_irq_handler(unsigned int irq, struct irq_desc *desc,
@@ -726,7 +764,11 @@ static void __nmk_gpio_irq_handler(unsigned int irq, struct irq_desc *desc,
static void nmk_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
{
struct nmk_gpio_chip *nmk_chip = irq_get_handler_data(irq);
- u32 status = readl(nmk_chip->addr + NMK_GPIO_IS);
+ u32 status;
+
+ clk_enable(nmk_chip->clk);
+ status = readl(nmk_chip->addr + NMK_GPIO_IS);
+ clk_disable(nmk_chip->clk);
__nmk_gpio_irq_handler(irq, desc, status);
}
@@ -772,7 +814,12 @@ static int nmk_gpio_make_input(struct gpio_chip *chip, unsigned offset)
struct nmk_gpio_chip *nmk_chip =
container_of(chip, struct nmk_gpio_chip, chip);
+ clk_enable(nmk_chip->clk);
+
writel(1 << offset, nmk_chip->addr + NMK_GPIO_DIRC);
+
+ clk_disable(nmk_chip->clk);
+
return 0;
}
@@ -781,8 +828,15 @@ static int nmk_gpio_get_input(struct gpio_chip *chip, unsigned offset)
struct nmk_gpio_chip *nmk_chip =
container_of(chip, struct nmk_gpio_chip, chip);
u32 bit = 1 << offset;
+ int value;
+
+ clk_enable(nmk_chip->clk);
- return (readl(nmk_chip->addr + NMK_GPIO_DAT) & bit) != 0;
+ value = (readl(nmk_chip->addr + NMK_GPIO_DAT) & bit) != 0;
+
+ clk_disable(nmk_chip->clk);
+
+ return value;
}
static void nmk_gpio_set_output(struct gpio_chip *chip, unsigned offset,
@@ -791,7 +845,11 @@ static void nmk_gpio_set_output(struct gpio_chip *chip, unsigned offset,
struct nmk_gpio_chip *nmk_chip =
container_of(chip, struct nmk_gpio_chip, chip);
+ clk_enable(nmk_chip->clk);
+
__nmk_gpio_set_output(nmk_chip, offset, val);
+
+ clk_disable(nmk_chip->clk);
}
static int nmk_gpio_make_output(struct gpio_chip *chip, unsigned offset,
@@ -800,8 +858,12 @@ static int nmk_gpio_make_output(struct gpio_chip *chip, unsigned offset,
struct nmk_gpio_chip *nmk_chip =
container_of(chip, struct nmk_gpio_chip, chip);
+ clk_enable(nmk_chip->clk);
+
__nmk_gpio_make_output(nmk_chip, offset, val);
+ clk_disable(nmk_chip->clk);
+
return 0;
}
@@ -832,6 +894,8 @@ static void nmk_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
[NMK_GPIO_ALT_C] = "altC",
};
+ clk_enable(nmk_chip->clk);
+
for (i = 0; i < chip->ngpio; i++, gpio++) {
const char *label = gpiochip_is_requested(chip, i);
bool pull;
@@ -876,6 +940,8 @@ static void nmk_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
seq_printf(s, "\n");
}
+
+ clk_disable(nmk_chip->clk);
}
#else
@@ -893,6 +959,34 @@ static struct gpio_chip nmk_gpio_template = {
.can_sleep = 0,
};
+void nmk_gpio_clocks_enable(void)
+{
+ int i;
+
+ for (i = 0; i < NUM_BANKS; i++) {
+ struct nmk_gpio_chip *chip = nmk_gpio_chips[i];
+
+ if (!chip)
+ continue;
+
+ clk_enable(chip->clk);
+ }
+}
+
+void nmk_gpio_clocks_disable(void)
+{
+ int i;
+
+ for (i = 0; i < NUM_BANKS; i++) {
+ struct nmk_gpio_chip *chip = nmk_gpio_chips[i];
+
+ if (!chip)
+ continue;
+
+ clk_disable(chip->clk);
+ }
+}
+
/*
* Called from the suspend/resume path to only keep the real wakeup interrupts
* (those that have had set_irq_wake() called on them) as wakeup interrupts,
@@ -912,6 +1006,8 @@ void nmk_gpio_wakeups_suspend(void)
if (!chip)
break;
+ clk_enable(chip->clk);
+
chip->rwimsc = readl(chip->addr + NMK_GPIO_RWIMSC);
chip->fwimsc = readl(chip->addr + NMK_GPIO_FWIMSC);
@@ -926,6 +1022,8 @@ void nmk_gpio_wakeups_suspend(void)
/* 0 -> wakeup enable */
writel(~chip->real_wake, chip->addr + NMK_GPIO_SLPC);
}
+
+ clk_disable(chip->clk);
}
}
@@ -939,11 +1037,15 @@ void nmk_gpio_wakeups_resume(void)
if (!chip)
break;
+ clk_enable(chip->clk);
+
writel(chip->rwimsc, chip->addr + NMK_GPIO_RWIMSC);
writel(chip->fwimsc, chip->addr + NMK_GPIO_FWIMSC);
if (chip->sleepmode)
writel(chip->slpm, chip->addr + NMK_GPIO_SLPC);
+
+ clk_disable(chip->clk);
}
}
@@ -1010,8 +1112,6 @@ static int __devinit nmk_gpio_probe(struct platform_device *dev)
goto out_release;
}
- clk_enable(clk);
-
nmk_chip = kzalloc(sizeof(*nmk_chip), GFP_KERNEL);
if (!nmk_chip) {
ret = -ENOMEM;
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index 118ec12d2d5..0b056297917 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -25,7 +25,7 @@
#include <mach/hardware.h>
#include <asm/irq.h>
#include <mach/irqs.h>
-#include <mach/gpio.h>
+#include <asm/gpio.h>
#include <asm/mach/irq.h>
struct gpio_bank {
@@ -148,13 +148,17 @@ static int _get_gpio_dataout(struct gpio_bank *bank, int gpio)
return (__raw_readl(reg) & GPIO_BIT(bank, gpio)) != 0;
}
-#define MOD_REG_BIT(reg, bit_mask, set) \
-do { \
- int l = __raw_readl(base + reg); \
- if (set) l |= bit_mask; \
- else l &= ~bit_mask; \
- __raw_writel(l, base + reg); \
-} while(0)
+static inline void _gpio_rmw(void __iomem *base, u32 reg, u32 mask, bool set)
+{
+ int l = __raw_readl(base + reg);
+
+ if (set)
+ l |= mask;
+ else
+ l &= ~mask;
+
+ __raw_writel(l, base + reg);
+}
/**
* _set_gpio_debounce - low level gpio debounce time
@@ -210,28 +214,28 @@ static inline void set_24xx_gpio_triggering(struct gpio_bank *bank, int gpio,
u32 gpio_bit = 1 << gpio;
if (cpu_is_omap44xx()) {
- MOD_REG_BIT(OMAP4_GPIO_LEVELDETECT0, gpio_bit,
- trigger & IRQ_TYPE_LEVEL_LOW);
- MOD_REG_BIT(OMAP4_GPIO_LEVELDETECT1, gpio_bit,
- trigger & IRQ_TYPE_LEVEL_HIGH);
- MOD_REG_BIT(OMAP4_GPIO_RISINGDETECT, gpio_bit,
- trigger & IRQ_TYPE_EDGE_RISING);
- MOD_REG_BIT(OMAP4_GPIO_FALLINGDETECT, gpio_bit,
- trigger & IRQ_TYPE_EDGE_FALLING);
+ _gpio_rmw(base, OMAP4_GPIO_LEVELDETECT0, gpio_bit,
+ trigger & IRQ_TYPE_LEVEL_LOW);
+ _gpio_rmw(base, OMAP4_GPIO_LEVELDETECT1, gpio_bit,
+ trigger & IRQ_TYPE_LEVEL_HIGH);
+ _gpio_rmw(base, OMAP4_GPIO_RISINGDETECT, gpio_bit,
+ trigger & IRQ_TYPE_EDGE_RISING);
+ _gpio_rmw(base, OMAP4_GPIO_FALLINGDETECT, gpio_bit,
+ trigger & IRQ_TYPE_EDGE_FALLING);
} else {
- MOD_REG_BIT(OMAP24XX_GPIO_LEVELDETECT0, gpio_bit,
- trigger & IRQ_TYPE_LEVEL_LOW);
- MOD_REG_BIT(OMAP24XX_GPIO_LEVELDETECT1, gpio_bit,
- trigger & IRQ_TYPE_LEVEL_HIGH);
- MOD_REG_BIT(OMAP24XX_GPIO_RISINGDETECT, gpio_bit,
- trigger & IRQ_TYPE_EDGE_RISING);
- MOD_REG_BIT(OMAP24XX_GPIO_FALLINGDETECT, gpio_bit,
- trigger & IRQ_TYPE_EDGE_FALLING);
+ _gpio_rmw(base, OMAP24XX_GPIO_LEVELDETECT0, gpio_bit,
+ trigger & IRQ_TYPE_LEVEL_LOW);
+ _gpio_rmw(base, OMAP24XX_GPIO_LEVELDETECT1, gpio_bit,
+ trigger & IRQ_TYPE_LEVEL_HIGH);
+ _gpio_rmw(base, OMAP24XX_GPIO_RISINGDETECT, gpio_bit,
+ trigger & IRQ_TYPE_EDGE_RISING);
+ _gpio_rmw(base, OMAP24XX_GPIO_FALLINGDETECT, gpio_bit,
+ trigger & IRQ_TYPE_EDGE_FALLING);
}
if (likely(!(bank->non_wakeup_gpios & gpio_bit))) {
if (cpu_is_omap44xx()) {
- MOD_REG_BIT(OMAP4_GPIO_IRQWAKEN0, gpio_bit,
- trigger != 0);
+ _gpio_rmw(base, OMAP4_GPIO_IRQWAKEN0, gpio_bit,
+ trigger != 0);
} else {
/*
* GPIO wakeup request can only be generated on edge
@@ -1086,6 +1090,11 @@ omap_mpuio_alloc_gc(struct gpio_bank *bank, unsigned int irq_start,
gc = irq_alloc_generic_chip("MPUIO", 1, irq_start, bank->base,
handle_simple_irq);
+ if (!gc) {
+ dev_err(bank->dev, "Memory alloc failed for gc\n");
+ return;
+ }
+
ct = gc->chip_types;
/* NOTE: No ack required, reading IRQ status clears it. */
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index 0550dcb8581..147df8ae79d 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -596,9 +596,6 @@ static int __devinit device_pca953x_init(struct pca953x_chip *chip, int invert)
/* set platform specific polarity inversion */
ret = pca953x_write_reg(chip, PCA953X_INVERT, invert);
- if (ret)
- goto out;
- return 0;
out:
return ret;
}
@@ -640,7 +637,7 @@ static int __devinit pca953x_probe(struct i2c_client *client,
struct pca953x_platform_data *pdata;
struct pca953x_chip *chip;
int irq_base=0, invert=0;
- int ret = 0;
+ int ret;
chip = kzalloc(sizeof(struct pca953x_chip), GFP_KERNEL);
if (chip == NULL)
@@ -673,10 +670,10 @@ static int __devinit pca953x_probe(struct i2c_client *client,
pca953x_setup_gpio(chip, id->driver_data & PCA_GPIO_MASK);
if (chip->chip_type == PCA953X_TYPE)
- device_pca953x_init(chip, invert);
- else if (chip->chip_type == PCA957X_TYPE)
- device_pca957x_init(chip, invert);
+ ret = device_pca953x_init(chip, invert);
else
+ ret = device_pca957x_init(chip, invert);
+ if (ret)
goto out_failed;
ret = pca953x_irq_setup(chip, id, irq_base);
diff --git a/drivers/gpio/gpio-pcf857x.c b/drivers/gpio/gpio-pcf857x.c
index 7369fdda92b..3e1f1ecd07b 100644
--- a/drivers/gpio/gpio-pcf857x.c
+++ b/drivers/gpio/gpio-pcf857x.c
@@ -23,6 +23,7 @@
#include <linux/gpio.h>
#include <linux/i2c.h>
#include <linux/i2c/pcf857x.h>
+#include <linux/module.h>
static const struct i2c_device_id pcf857x_id[] = {
diff --git a/drivers/gpio/gpio-pch.c b/drivers/gpio/gpio-pch.c
index 36919e77c49..a6008e123d0 100644
--- a/drivers/gpio/gpio-pch.c
+++ b/drivers/gpio/gpio-pch.c
@@ -14,12 +14,21 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
*/
+#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
-#define PCH_GPIO_ALL_PINS 0xfff /* Mask for GPIO pins 0 to 11 */
-#define GPIO_NUM_PINS 12 /* Specifies number of GPIO PINS GPIO0-GPIO11 */
+#define PCH_EDGE_FALLING 0
+#define PCH_EDGE_RISING BIT(0)
+#define PCH_LEVEL_L BIT(1)
+#define PCH_LEVEL_H (BIT(0) | BIT(1))
+#define PCH_EDGE_BOTH BIT(2)
+#define PCH_IM_MASK (BIT(0) | BIT(1) | BIT(2))
+
+#define PCH_IRQ_BASE 24
struct pch_regs {
u32 ien;
@@ -33,18 +42,43 @@ struct pch_regs {
u32 pm;
u32 im0;
u32 im1;
- u32 reserved[4];
+ u32 reserved[3];
+ u32 gpio_use_sel;
u32 reset;
};
+enum pch_type_t {
+ INTEL_EG20T_PCH,
+ OKISEMI_ML7223m_IOH, /* OKISEMI ML7223 IOH PCIe Bus-m */
+ OKISEMI_ML7223n_IOH /* OKISEMI ML7223 IOH PCIe Bus-n */
+};
+
+/* Specifies number of GPIO PINS */
+static int gpio_pins[] = {
+ [INTEL_EG20T_PCH] = 12,
+ [OKISEMI_ML7223m_IOH] = 8,
+ [OKISEMI_ML7223n_IOH] = 8,
+};
+
/**
* struct pch_gpio_reg_data - The register store data.
+ * @ien_reg: To store contents of IEN register.
+ * @imask_reg: To store contents of IMASK register.
* @po_reg: To store contents of PO register.
* @pm_reg: To store contents of PM register.
+ * @im0_reg: To store contents of IM0 register.
+ * @im1_reg: To store contents of IM1 register.
+ * @gpio_use_sel_reg : To store contents of GPIO_USE_SEL register.
+ * (Only ML7223 Bus-n)
*/
struct pch_gpio_reg_data {
+ u32 ien_reg;
+ u32 imask_reg;
u32 po_reg;
u32 pm_reg;
+ u32 im0_reg;
+ u32 im1_reg;
+ u32 gpio_use_sel_reg;
};
/**
@@ -55,6 +89,12 @@ struct pch_gpio_reg_data {
* @gpio: Data for GPIO infrastructure.
* @pch_gpio_reg: Memory mapped Register data is saved here
* when suspend.
+ * @lock: Used for register access protection
+ * @irq_base: Save base of IRQ number for interrupt
+ * @ioh: IOH ID
+ * @spinlock: Used for register access protection in
+ * interrupt context pch_irq_mask,
+ * pch_irq_unmask and pch_irq_type;
*/
struct pch_gpio {
void __iomem *base;
@@ -63,6 +103,9 @@ struct pch_gpio {
struct gpio_chip gpio;
struct pch_gpio_reg_data pch_gpio_reg;
struct mutex lock;
+ int irq_base;
+ enum pch_type_t ioh;
+ spinlock_t spinlock;
};
static void pch_gpio_set(struct gpio_chip *gpio, unsigned nr, int val)
@@ -96,7 +139,7 @@ static int pch_gpio_direction_output(struct gpio_chip *gpio, unsigned nr,
u32 reg_val;
mutex_lock(&chip->lock);
- pm = ioread32(&chip->reg->pm) & PCH_GPIO_ALL_PINS;
+ pm = ioread32(&chip->reg->pm) & ((1 << gpio_pins[chip->ioh]) - 1);
pm |= (1 << nr);
iowrite32(pm, &chip->reg->pm);
@@ -118,7 +161,7 @@ static int pch_gpio_direction_input(struct gpio_chip *gpio, unsigned nr)
u32 pm;
mutex_lock(&chip->lock);
- pm = ioread32(&chip->reg->pm) & PCH_GPIO_ALL_PINS; /*bits 0-11*/
+ pm = ioread32(&chip->reg->pm) & ((1 << gpio_pins[chip->ioh]) - 1);
pm &= ~(1 << nr);
iowrite32(pm, &chip->reg->pm);
mutex_unlock(&chip->lock);
@@ -131,8 +174,16 @@ static int pch_gpio_direction_input(struct gpio_chip *gpio, unsigned nr)
*/
static void pch_gpio_save_reg_conf(struct pch_gpio *chip)
{
+ chip->pch_gpio_reg.ien_reg = ioread32(&chip->reg->ien);
+ chip->pch_gpio_reg.imask_reg = ioread32(&chip->reg->imask);
chip->pch_gpio_reg.po_reg = ioread32(&chip->reg->po);
chip->pch_gpio_reg.pm_reg = ioread32(&chip->reg->pm);
+ chip->pch_gpio_reg.im0_reg = ioread32(&chip->reg->im0);
+ if (chip->ioh == INTEL_EG20T_PCH)
+ chip->pch_gpio_reg.im1_reg = ioread32(&chip->reg->im1);
+ if (chip->ioh == OKISEMI_ML7223n_IOH)
+ chip->pch_gpio_reg.gpio_use_sel_reg =\
+ ioread32(&chip->reg->gpio_use_sel);
}
/*
@@ -140,10 +191,24 @@ static void pch_gpio_save_reg_conf(struct pch_gpio *chip)
*/
static void pch_gpio_restore_reg_conf(struct pch_gpio *chip)
{
+ iowrite32(chip->pch_gpio_reg.ien_reg, &chip->reg->ien);
+ iowrite32(chip->pch_gpio_reg.imask_reg, &chip->reg->imask);
/* to store contents of PO register */
iowrite32(chip->pch_gpio_reg.po_reg, &chip->reg->po);
/* to store contents of PM register */
iowrite32(chip->pch_gpio_reg.pm_reg, &chip->reg->pm);
+ iowrite32(chip->pch_gpio_reg.im0_reg, &chip->reg->im0);
+ if (chip->ioh == INTEL_EG20T_PCH)
+ iowrite32(chip->pch_gpio_reg.im1_reg, &chip->reg->im1);
+ if (chip->ioh == OKISEMI_ML7223n_IOH)
+ iowrite32(chip->pch_gpio_reg.gpio_use_sel_reg,
+ &chip->reg->gpio_use_sel);
+}
+
+static int pch_gpio_to_irq(struct gpio_chip *gpio, unsigned offset)
+{
+ struct pch_gpio *chip = container_of(gpio, struct pch_gpio, gpio);
+ return chip->irq_base + offset;
}
static void pch_gpio_setup(struct pch_gpio *chip)
@@ -158,8 +223,132 @@ static void pch_gpio_setup(struct pch_gpio *chip)
gpio->set = pch_gpio_set;
gpio->dbg_show = NULL;
gpio->base = -1;
- gpio->ngpio = GPIO_NUM_PINS;
+ gpio->ngpio = gpio_pins[chip->ioh];
gpio->can_sleep = 0;
+ gpio->to_irq = pch_gpio_to_irq;
+}
+
+static int pch_irq_type(struct irq_data *d, unsigned int type)
+{
+ u32 im;
+ u32 *im_reg;
+ u32 ien;
+ u32 im_pos;
+ int ch;
+ unsigned long flags;
+ u32 val;
+ int irq = d->irq;
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct pch_gpio *chip = gc->private;
+
+ ch = irq - chip->irq_base;
+ if (irq <= chip->irq_base + 7) {
+ im_reg = &chip->reg->im0;
+ im_pos = ch;
+ } else {
+ im_reg = &chip->reg->im1;
+ im_pos = ch - 8;
+ }
+ dev_dbg(chip->dev, "%s:irq=%d type=%d ch=%d pos=%d\n",
+ __func__, irq, type, ch, im_pos);
+
+ spin_lock_irqsave(&chip->spinlock, flags);
+
+ switch (type) {
+ case IRQ_TYPE_EDGE_RISING:
+ val = PCH_EDGE_RISING;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ val = PCH_EDGE_FALLING;
+ break;
+ case IRQ_TYPE_EDGE_BOTH:
+ val = PCH_EDGE_BOTH;
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ val = PCH_LEVEL_H;
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ val = PCH_LEVEL_L;
+ break;
+ case IRQ_TYPE_PROBE:
+ goto end;
+ default:
+ dev_warn(chip->dev, "%s: unknown type(%dd)",
+ __func__, type);
+ goto end;
+ }
+
+ /* Set interrupt mode */
+ im = ioread32(im_reg) & ~(PCH_IM_MASK << (im_pos * 4));
+ iowrite32(im | (val << (im_pos * 4)), im_reg);
+
+ /* iclr */
+ iowrite32(BIT(ch), &chip->reg->iclr);
+
+ /* IMASKCLR */
+ iowrite32(BIT(ch), &chip->reg->imaskclr);
+
+ /* Enable interrupt */
+ ien = ioread32(&chip->reg->ien);
+ iowrite32(ien | BIT(ch), &chip->reg->ien);
+end:
+ spin_unlock_irqrestore(&chip->spinlock, flags);
+
+ return 0;
+}
+
+static void pch_irq_unmask(struct irq_data *d)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct pch_gpio *chip = gc->private;
+
+ iowrite32(1 << (d->irq - chip->irq_base), &chip->reg->imaskclr);
+}
+
+static void pch_irq_mask(struct irq_data *d)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct pch_gpio *chip = gc->private;
+
+ iowrite32(1 << (d->irq - chip->irq_base), &chip->reg->imask);
+}
+
+static irqreturn_t pch_gpio_handler(int irq, void *dev_id)
+{
+ struct pch_gpio *chip = dev_id;
+ u32 reg_val = ioread32(&chip->reg->istatus);
+ int i;
+ int ret = IRQ_NONE;
+
+ for (i = 0; i < gpio_pins[chip->ioh]; i++) {
+ if (reg_val & BIT(i)) {
+ dev_dbg(chip->dev, "%s:[%d]:irq=%d status=0x%x\n",
+ __func__, i, irq, reg_val);
+ iowrite32(BIT(i), &chip->reg->iclr);
+ generic_handle_irq(chip->irq_base + i);
+ ret = IRQ_HANDLED;
+ }
+ }
+ return ret;
+}
+
+static __devinit void pch_gpio_alloc_generic_chip(struct pch_gpio *chip,
+ unsigned int irq_start, unsigned int num)
+{
+ struct irq_chip_generic *gc;
+ struct irq_chip_type *ct;
+
+ gc = irq_alloc_generic_chip("pch_gpio", 1, irq_start, chip->base,
+ handle_simple_irq);
+ gc->private = chip;
+ ct = gc->chip_types;
+
+ ct->chip.irq_mask = pch_irq_mask;
+ ct->chip.irq_unmask = pch_irq_unmask;
+ ct->chip.irq_set_type = pch_irq_type;
+
+ irq_setup_generic_chip(gc, IRQ_MSK(num), IRQ_GC_INIT_MASK_CACHE,
+ IRQ_NOREQUEST | IRQ_NOPROBE, 0);
}
static int __devinit pch_gpio_probe(struct pci_dev *pdev,
@@ -167,6 +356,7 @@ static int __devinit pch_gpio_probe(struct pci_dev *pdev,
{
s32 ret;
struct pch_gpio *chip;
+ int irq_base;
chip = kzalloc(sizeof(*chip), GFP_KERNEL);
if (chip == NULL)
@@ -192,6 +382,13 @@ static int __devinit pch_gpio_probe(struct pci_dev *pdev,
goto err_iomap;
}
+ if (pdev->device == 0x8803)
+ chip->ioh = INTEL_EG20T_PCH;
+ else if (pdev->device == 0x8014)
+ chip->ioh = OKISEMI_ML7223m_IOH;
+ else if (pdev->device == 0x8043)
+ chip->ioh = OKISEMI_ML7223n_IOH;
+
chip->reg = chip->base;
pci_set_drvdata(pdev, chip);
mutex_init(&chip->lock);
@@ -202,8 +399,36 @@ static int __devinit pch_gpio_probe(struct pci_dev *pdev,
goto err_gpiochip_add;
}
+ irq_base = irq_alloc_descs(-1, 0, gpio_pins[chip->ioh], NUMA_NO_NODE);
+ if (irq_base < 0) {
+ dev_warn(&pdev->dev, "PCH gpio: Failed to get IRQ base num\n");
+ chip->irq_base = -1;
+ goto end;
+ }
+ chip->irq_base = irq_base;
+
+ ret = request_irq(pdev->irq, pch_gpio_handler,
+ IRQF_SHARED, KBUILD_MODNAME, chip);
+ if (ret != 0) {
+ dev_err(&pdev->dev,
+ "%s request_irq failed\n", __func__);
+ goto err_request_irq;
+ }
+
+ pch_gpio_alloc_generic_chip(chip, irq_base, gpio_pins[chip->ioh]);
+
+ /* Initialize interrupt ien register */
+ iowrite32(0, &chip->reg->ien);
+end:
return 0;
+err_request_irq:
+ irq_free_descs(irq_base, gpio_pins[chip->ioh]);
+
+ ret = gpiochip_remove(&chip->gpio);
+ if (ret)
+ dev_err(&pdev->dev, "%s gpiochip_remove failed\n", __func__);
+
err_gpiochip_add:
pci_iounmap(pdev, chip->base);
@@ -224,6 +449,12 @@ static void __devexit pch_gpio_remove(struct pci_dev *pdev)
int err;
struct pch_gpio *chip = pci_get_drvdata(pdev);
+ if (chip->irq_base != -1) {
+ free_irq(pdev->irq, chip);
+
+ irq_free_descs(chip->irq_base, gpio_pins[chip->ioh]);
+ }
+
err = gpiochip_remove(&chip->gpio);
if (err)
dev_err(&pdev->dev, "Failed gpiochip_remove\n");
@@ -239,9 +470,11 @@ static int pch_gpio_suspend(struct pci_dev *pdev, pm_message_t state)
{
s32 ret;
struct pch_gpio *chip = pci_get_drvdata(pdev);
+ unsigned long flags;
+ spin_lock_irqsave(&chip->spinlock, flags);
pch_gpio_save_reg_conf(chip);
- pch_gpio_restore_reg_conf(chip);
+ spin_unlock_irqrestore(&chip->spinlock, flags);
ret = pci_save_state(pdev);
if (ret) {
@@ -261,6 +494,7 @@ static int pch_gpio_resume(struct pci_dev *pdev)
{
s32 ret;
struct pch_gpio *chip = pci_get_drvdata(pdev);
+ unsigned long flags;
ret = pci_enable_wake(pdev, PCI_D0, 0);
@@ -272,9 +506,11 @@ static int pch_gpio_resume(struct pci_dev *pdev)
}
pci_restore_state(pdev);
+ spin_lock_irqsave(&chip->spinlock, flags);
iowrite32(0x01, &chip->reg->reset);
iowrite32(0x00, &chip->reg->reset);
pch_gpio_restore_reg_conf(chip);
+ spin_unlock_irqrestore(&chip->spinlock, flags);
return 0;
}
@@ -287,6 +523,7 @@ static int pch_gpio_resume(struct pci_dev *pdev)
static DEFINE_PCI_DEVICE_TABLE(pch_gpio_pcidev_id) = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x8803) },
{ PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x8014) },
+ { PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x8043) },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, pch_gpio_pcidev_id);
diff --git a/drivers/gpio/gpio-pl061.c b/drivers/gpio/gpio-pl061.c
index 2c5a18f32bf..093c90bd3c1 100644
--- a/drivers/gpio/gpio-pl061.c
+++ b/drivers/gpio/gpio-pl061.c
@@ -118,7 +118,7 @@ static int pl061_to_irq(struct gpio_chip *gc, unsigned offset)
{
struct pl061_gpio *chip = container_of(gc, struct pl061_gpio, gc);
- if (chip->irq_base == (unsigned) -1)
+ if (chip->irq_base == NO_IRQ)
return -EINVAL;
return chip->irq_base + offset;
@@ -246,6 +246,18 @@ static int pl061_probe(struct amba_device *dev, const struct amba_id *id)
if (chip == NULL)
return -ENOMEM;
+ pdata = dev->dev.platform_data;
+ if (pdata) {
+ chip->gc.base = pdata->gpio_base;
+ chip->irq_base = pdata->irq_base;
+ } else if (dev->dev.of_node) {
+ chip->gc.base = -1;
+ chip->irq_base = NO_IRQ;
+ } else {
+ ret = -ENODEV;
+ goto free_mem;
+ }
+
if (!request_mem_region(dev->res.start,
resource_size(&dev->res), "pl061")) {
ret = -EBUSY;
@@ -267,14 +279,11 @@ static int pl061_probe(struct amba_device *dev, const struct amba_id *id)
chip->gc.get = pl061_get_value;
chip->gc.set = pl061_set_value;
chip->gc.to_irq = pl061_to_irq;
- chip->gc.base = pdata->gpio_base;
chip->gc.ngpio = PL061_GPIO_NR;
chip->gc.label = dev_name(&dev->dev);
chip->gc.dev = &dev->dev;
chip->gc.owner = THIS_MODULE;
- chip->irq_base = pdata->irq_base;
-
ret = gpiochip_add(&chip->gc);
if (ret)
goto iounmap;
@@ -283,7 +292,7 @@ static int pl061_probe(struct amba_device *dev, const struct amba_id *id)
* irq_chip support
*/
- if (chip->irq_base == (unsigned) -1)
+ if (chip->irq_base == NO_IRQ)
return 0;
writeb(0, chip->base + GPIOIE); /* disable irqs */
@@ -307,11 +316,13 @@ static int pl061_probe(struct amba_device *dev, const struct amba_id *id)
list_add(&chip->list, chip_list);
for (i = 0; i < PL061_GPIO_NR; i++) {
- if (pdata->directions & (1 << i))
- pl061_direction_output(&chip->gc, i,
- pdata->values & (1 << i));
- else
- pl061_direction_input(&chip->gc, i);
+ if (pdata) {
+ if (pdata->directions & (1 << i))
+ pl061_direction_output(&chip->gc, i,
+ pdata->values & (1 << i));
+ else
+ pl061_direction_input(&chip->gc, i);
+ }
irq_set_chip_and_handler(i + chip->irq_base, &pl061_irqchip,
handle_simple_irq);
diff --git a/drivers/gpio/gpio-plat-samsung.c b/drivers/gpio/gpio-plat-samsung.c
deleted file mode 100644
index ef67f1952a7..00000000000
--- a/drivers/gpio/gpio-plat-samsung.c
+++ /dev/null
@@ -1,205 +0,0 @@
-/*
- * Copyright 2008 Openmoko, Inc.
- * Copyright 2008 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- * http://armlinux.simtec.co.uk/
- *
- * Copyright (c) 2009 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
- *
- * SAMSUNG - GPIOlib support
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/kernel.h>
-#include <linux/irq.h>
-#include <linux/io.h>
-#include <linux/gpio.h>
-#include <plat/gpio-core.h>
-#include <plat/gpio-cfg.h>
-#include <plat/gpio-cfg-helpers.h>
-
-#ifndef DEBUG_GPIO
-#define gpio_dbg(x...) do { } while (0)
-#else
-#define gpio_dbg(x...) printk(KERN_DEBUG x)
-#endif
-
-/* The samsung_gpiolib_4bit routines are to control the gpio banks where
- * the gpio configuration register (GPxCON) has 4 bits per GPIO, as the
- * following example:
- *
- * base + 0x00: Control register, 4 bits per gpio
- * gpio n: 4 bits starting at (4*n)
- * 0000 = input, 0001 = output, others mean special-function
- * base + 0x04: Data register, 1 bit per gpio
- * bit n: data bit n
- *
- * Note, since the data register is one bit per gpio and is at base + 0x4
- * we can use s3c_gpiolib_get and s3c_gpiolib_set to change the state of
- * the output.
-*/
-
-static int samsung_gpiolib_4bit_input(struct gpio_chip *chip,
- unsigned int offset)
-{
- struct s3c_gpio_chip *ourchip = to_s3c_gpio(chip);
- void __iomem *base = ourchip->base;
- unsigned long con;
-
- con = __raw_readl(base + GPIOCON_OFF);
- con &= ~(0xf << con_4bit_shift(offset));
- __raw_writel(con, base + GPIOCON_OFF);
-
- gpio_dbg("%s: %p: CON now %08lx\n", __func__, base, con);
-
- return 0;
-}
-
-static int samsung_gpiolib_4bit_output(struct gpio_chip *chip,
- unsigned int offset, int value)
-{
- struct s3c_gpio_chip *ourchip = to_s3c_gpio(chip);
- void __iomem *base = ourchip->base;
- unsigned long con;
- unsigned long dat;
-
- con = __raw_readl(base + GPIOCON_OFF);
- con &= ~(0xf << con_4bit_shift(offset));
- con |= 0x1 << con_4bit_shift(offset);
-
- dat = __raw_readl(base + GPIODAT_OFF);
-
- if (value)
- dat |= 1 << offset;
- else
- dat &= ~(1 << offset);
-
- __raw_writel(dat, base + GPIODAT_OFF);
- __raw_writel(con, base + GPIOCON_OFF);
- __raw_writel(dat, base + GPIODAT_OFF);
-
- gpio_dbg("%s: %p: CON %08lx, DAT %08lx\n", __func__, base, con, dat);
-
- return 0;
-}
-
-/* The next set of routines are for the case where the GPIO configuration
- * registers are 4 bits per GPIO but there is more than one register (the
- * bank has more than 8 GPIOs.
- *
- * This case is the similar to the 4 bit case, but the registers are as
- * follows:
- *
- * base + 0x00: Control register, 4 bits per gpio (lower 8 GPIOs)
- * gpio n: 4 bits starting at (4*n)
- * 0000 = input, 0001 = output, others mean special-function
- * base + 0x04: Control register, 4 bits per gpio (up to 8 additions GPIOs)
- * gpio n: 4 bits starting at (4*n)
- * 0000 = input, 0001 = output, others mean special-function
- * base + 0x08: Data register, 1 bit per gpio
- * bit n: data bit n
- *
- * To allow us to use the s3c_gpiolib_get and s3c_gpiolib_set routines we
- * store the 'base + 0x4' address so that these routines see the data
- * register at ourchip->base + 0x04.
- */
-
-static int samsung_gpiolib_4bit2_input(struct gpio_chip *chip,
- unsigned int offset)
-{
- struct s3c_gpio_chip *ourchip = to_s3c_gpio(chip);
- void __iomem *base = ourchip->base;
- void __iomem *regcon = base;
- unsigned long con;
-
- if (offset > 7)
- offset -= 8;
- else
- regcon -= 4;
-
- con = __raw_readl(regcon);
- con &= ~(0xf << con_4bit_shift(offset));
- __raw_writel(con, regcon);
-
- gpio_dbg("%s: %p: CON %08lx\n", __func__, base, con);
-
- return 0;
-}
-
-static int samsung_gpiolib_4bit2_output(struct gpio_chip *chip,
- unsigned int offset, int value)
-{
- struct s3c_gpio_chip *ourchip = to_s3c_gpio(chip);
- void __iomem *base = ourchip->base;
- void __iomem *regcon = base;
- unsigned long con;
- unsigned long dat;
- unsigned con_offset = offset;
-
- if (con_offset > 7)
- con_offset -= 8;
- else
- regcon -= 4;
-
- con = __raw_readl(regcon);
- con &= ~(0xf << con_4bit_shift(con_offset));
- con |= 0x1 << con_4bit_shift(con_offset);
-
- dat = __raw_readl(base + GPIODAT_OFF);
-
- if (value)
- dat |= 1 << offset;
- else
- dat &= ~(1 << offset);
-
- __raw_writel(dat, base + GPIODAT_OFF);
- __raw_writel(con, regcon);
- __raw_writel(dat, base + GPIODAT_OFF);
-
- gpio_dbg("%s: %p: CON %08lx, DAT %08lx\n", __func__, base, con, dat);
-
- return 0;
-}
-
-void __init samsung_gpiolib_add_4bit(struct s3c_gpio_chip *chip)
-{
- chip->chip.direction_input = samsung_gpiolib_4bit_input;
- chip->chip.direction_output = samsung_gpiolib_4bit_output;
- chip->pm = __gpio_pm(&s3c_gpio_pm_4bit);
-}
-
-void __init samsung_gpiolib_add_4bit2(struct s3c_gpio_chip *chip)
-{
- chip->chip.direction_input = samsung_gpiolib_4bit2_input;
- chip->chip.direction_output = samsung_gpiolib_4bit2_output;
- chip->pm = __gpio_pm(&s3c_gpio_pm_4bit);
-}
-
-void __init samsung_gpiolib_add_4bit_chips(struct s3c_gpio_chip *chip,
- int nr_chips)
-{
- for (; nr_chips > 0; nr_chips--, chip++) {
- samsung_gpiolib_add_4bit(chip);
- s3c_gpiolib_add(chip);
- }
-}
-
-void __init samsung_gpiolib_add_4bit2_chips(struct s3c_gpio_chip *chip,
- int nr_chips)
-{
- for (; nr_chips > 0; nr_chips--, chip++) {
- samsung_gpiolib_add_4bit2(chip);
- s3c_gpiolib_add(chip);
- }
-}
-
-void __init samsung_gpiolib_add_2bit_chips(struct s3c_gpio_chip *chip,
- int nr_chips)
-{
- for (; nr_chips > 0; nr_chips--, chip++)
- s3c_gpiolib_add(chip);
-}
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
new file mode 100644
index 00000000000..ee137712f9d
--- /dev/null
+++ b/drivers/gpio/gpio-pxa.c
@@ -0,0 +1,338 @@
+/*
+ * linux/arch/arm/plat-pxa/gpio.c
+ *
+ * Generic PXA GPIO handling
+ *
+ * Author: Nicolas Pitre
+ * Created: Jun 15, 2001
+ * Copyright: MontaVista Software Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/gpio.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/syscore_ops.h>
+#include <linux/slab.h>
+
+#include <mach/gpio-pxa.h>
+
+int pxa_last_gpio;
+
+struct pxa_gpio_chip {
+ struct gpio_chip chip;
+ void __iomem *regbase;
+ char label[10];
+
+ unsigned long irq_mask;
+ unsigned long irq_edge_rise;
+ unsigned long irq_edge_fall;
+
+#ifdef CONFIG_PM
+ unsigned long saved_gplr;
+ unsigned long saved_gpdr;
+ unsigned long saved_grer;
+ unsigned long saved_gfer;
+#endif
+};
+
+static DEFINE_SPINLOCK(gpio_lock);
+static struct pxa_gpio_chip *pxa_gpio_chips;
+
+#define for_each_gpio_chip(i, c) \
+ for (i = 0, c = &pxa_gpio_chips[0]; i <= pxa_last_gpio; i += 32, c++)
+
+static inline void __iomem *gpio_chip_base(struct gpio_chip *c)
+{
+ return container_of(c, struct pxa_gpio_chip, chip)->regbase;
+}
+
+static inline struct pxa_gpio_chip *gpio_to_pxachip(unsigned gpio)
+{
+ return &pxa_gpio_chips[gpio_to_bank(gpio)];
+}
+
+static int pxa_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+{
+ void __iomem *base = gpio_chip_base(chip);
+ uint32_t value, mask = 1 << offset;
+ unsigned long flags;
+
+ spin_lock_irqsave(&gpio_lock, flags);
+
+ value = __raw_readl(base + GPDR_OFFSET);
+ if (__gpio_is_inverted(chip->base + offset))
+ value |= mask;
+ else
+ value &= ~mask;
+ __raw_writel(value, base + GPDR_OFFSET);
+
+ spin_unlock_irqrestore(&gpio_lock, flags);
+ return 0;
+}
+
+static int pxa_gpio_direction_output(struct gpio_chip *chip,
+ unsigned offset, int value)
+{
+ void __iomem *base = gpio_chip_base(chip);
+ uint32_t tmp, mask = 1 << offset;
+ unsigned long flags;
+
+ __raw_writel(mask, base + (value ? GPSR_OFFSET : GPCR_OFFSET));
+
+ spin_lock_irqsave(&gpio_lock, flags);
+
+ tmp = __raw_readl(base + GPDR_OFFSET);
+ if (__gpio_is_inverted(chip->base + offset))
+ tmp &= ~mask;
+ else
+ tmp |= mask;
+ __raw_writel(tmp, base + GPDR_OFFSET);
+
+ spin_unlock_irqrestore(&gpio_lock, flags);
+ return 0;
+}
+
+static int pxa_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+ return __raw_readl(gpio_chip_base(chip) + GPLR_OFFSET) & (1 << offset);
+}
+
+static void pxa_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+{
+ __raw_writel(1 << offset, gpio_chip_base(chip) +
+ (value ? GPSR_OFFSET : GPCR_OFFSET));
+}
+
+static int __init pxa_init_gpio_chip(int gpio_end)
+{
+ int i, gpio, nbanks = gpio_to_bank(gpio_end) + 1;
+ struct pxa_gpio_chip *chips;
+
+ chips = kzalloc(nbanks * sizeof(struct pxa_gpio_chip), GFP_KERNEL);
+ if (chips == NULL) {
+ pr_err("%s: failed to allocate GPIO chips\n", __func__);
+ return -ENOMEM;
+ }
+
+ for (i = 0, gpio = 0; i < nbanks; i++, gpio += 32) {
+ struct gpio_chip *c = &chips[i].chip;
+
+ sprintf(chips[i].label, "gpio-%d", i);
+ chips[i].regbase = GPIO_BANK(i);
+
+ c->base = gpio;
+ c->label = chips[i].label;
+
+ c->direction_input = pxa_gpio_direction_input;
+ c->direction_output = pxa_gpio_direction_output;
+ c->get = pxa_gpio_get;
+ c->set = pxa_gpio_set;
+
+ /* number of GPIOs on last bank may be less than 32 */
+ c->ngpio = (gpio + 31 > gpio_end) ? (gpio_end - gpio + 1) : 32;
+ gpiochip_add(c);
+ }
+ pxa_gpio_chips = chips;
+ return 0;
+}
+
+/* Update only those GRERx and GFERx edge detection register bits if those
+ * bits are set in c->irq_mask
+ */
+static inline void update_edge_detect(struct pxa_gpio_chip *c)
+{
+ uint32_t grer, gfer;
+
+ grer = __raw_readl(c->regbase + GRER_OFFSET) & ~c->irq_mask;
+ gfer = __raw_readl(c->regbase + GFER_OFFSET) & ~c->irq_mask;
+ grer |= c->irq_edge_rise & c->irq_mask;
+ gfer |= c->irq_edge_fall & c->irq_mask;
+ __raw_writel(grer, c->regbase + GRER_OFFSET);
+ __raw_writel(gfer, c->regbase + GFER_OFFSET);
+}
+
+static int pxa_gpio_irq_type(struct irq_data *d, unsigned int type)
+{
+ struct pxa_gpio_chip *c;
+ int gpio = irq_to_gpio(d->irq);
+ unsigned long gpdr, mask = GPIO_bit(gpio);
+
+ c = gpio_to_pxachip(gpio);
+
+ if (type == IRQ_TYPE_PROBE) {
+ /* Don't mess with enabled GPIOs using preconfigured edges or
+ * GPIOs set to alternate function or to output during probe
+ */
+ if ((c->irq_edge_rise | c->irq_edge_fall) & GPIO_bit(gpio))
+ return 0;
+
+ if (__gpio_is_occupied(gpio))
+ return 0;
+
+ type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING;
+ }
+
+ gpdr = __raw_readl(c->regbase + GPDR_OFFSET);
+
+ if (__gpio_is_inverted(gpio))
+ __raw_writel(gpdr | mask, c->regbase + GPDR_OFFSET);
+ else
+ __raw_writel(gpdr & ~mask, c->regbase + GPDR_OFFSET);
+
+ if (type & IRQ_TYPE_EDGE_RISING)
+ c->irq_edge_rise |= mask;
+ else
+ c->irq_edge_rise &= ~mask;
+
+ if (type & IRQ_TYPE_EDGE_FALLING)
+ c->irq_edge_fall |= mask;
+ else
+ c->irq_edge_fall &= ~mask;
+
+ update_edge_detect(c);
+
+ pr_debug("%s: IRQ%d (GPIO%d) - edge%s%s\n", __func__, d->irq, gpio,
+ ((type & IRQ_TYPE_EDGE_RISING) ? " rising" : ""),
+ ((type & IRQ_TYPE_EDGE_FALLING) ? " falling" : ""));
+ return 0;
+}
+
+static void pxa_gpio_demux_handler(unsigned int irq, struct irq_desc *desc)
+{
+ struct pxa_gpio_chip *c;
+ int loop, gpio, gpio_base, n;
+ unsigned long gedr;
+
+ do {
+ loop = 0;
+ for_each_gpio_chip(gpio, c) {
+ gpio_base = c->chip.base;
+
+ gedr = __raw_readl(c->regbase + GEDR_OFFSET);
+ gedr = gedr & c->irq_mask;
+ __raw_writel(gedr, c->regbase + GEDR_OFFSET);
+
+ n = find_first_bit(&gedr, BITS_PER_LONG);
+ while (n < BITS_PER_LONG) {
+ loop = 1;
+
+ generic_handle_irq(gpio_to_irq(gpio_base + n));
+ n = find_next_bit(&gedr, BITS_PER_LONG, n + 1);
+ }
+ }
+ } while (loop);
+}
+
+static void pxa_ack_muxed_gpio(struct irq_data *d)
+{
+ int gpio = irq_to_gpio(d->irq);
+ struct pxa_gpio_chip *c = gpio_to_pxachip(gpio);
+
+ __raw_writel(GPIO_bit(gpio), c->regbase + GEDR_OFFSET);
+}
+
+static void pxa_mask_muxed_gpio(struct irq_data *d)
+{
+ int gpio = irq_to_gpio(d->irq);
+ struct pxa_gpio_chip *c = gpio_to_pxachip(gpio);
+ uint32_t grer, gfer;
+
+ c->irq_mask &= ~GPIO_bit(gpio);
+
+ grer = __raw_readl(c->regbase + GRER_OFFSET) & ~GPIO_bit(gpio);
+ gfer = __raw_readl(c->regbase + GFER_OFFSET) & ~GPIO_bit(gpio);
+ __raw_writel(grer, c->regbase + GRER_OFFSET);
+ __raw_writel(gfer, c->regbase + GFER_OFFSET);
+}
+
+static void pxa_unmask_muxed_gpio(struct irq_data *d)
+{
+ int gpio = irq_to_gpio(d->irq);
+ struct pxa_gpio_chip *c = gpio_to_pxachip(gpio);
+
+ c->irq_mask |= GPIO_bit(gpio);
+ update_edge_detect(c);
+}
+
+static struct irq_chip pxa_muxed_gpio_chip = {
+ .name = "GPIO",
+ .irq_ack = pxa_ack_muxed_gpio,
+ .irq_mask = pxa_mask_muxed_gpio,
+ .irq_unmask = pxa_unmask_muxed_gpio,
+ .irq_set_type = pxa_gpio_irq_type,
+};
+
+void __init pxa_init_gpio(int mux_irq, int start, int end, set_wake_t fn)
+{
+ struct pxa_gpio_chip *c;
+ int gpio, irq;
+
+ pxa_last_gpio = end;
+
+ /* Initialize GPIO chips */
+ pxa_init_gpio_chip(end);
+
+ /* clear all GPIO edge detects */
+ for_each_gpio_chip(gpio, c) {
+ __raw_writel(0, c->regbase + GFER_OFFSET);
+ __raw_writel(0, c->regbase + GRER_OFFSET);
+ __raw_writel(~0,c->regbase + GEDR_OFFSET);
+ }
+
+ for (irq = gpio_to_irq(start); irq <= gpio_to_irq(end); irq++) {
+ irq_set_chip_and_handler(irq, &pxa_muxed_gpio_chip,
+ handle_edge_irq);
+ set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+ }
+
+ /* Install handler for GPIO>=2 edge detect interrupts */
+ irq_set_chained_handler(mux_irq, pxa_gpio_demux_handler);
+ pxa_muxed_gpio_chip.irq_set_wake = fn;
+}
+
+#ifdef CONFIG_PM
+static int pxa_gpio_suspend(void)
+{
+ struct pxa_gpio_chip *c;
+ int gpio;
+
+ for_each_gpio_chip(gpio, c) {
+ c->saved_gplr = __raw_readl(c->regbase + GPLR_OFFSET);
+ c->saved_gpdr = __raw_readl(c->regbase + GPDR_OFFSET);
+ c->saved_grer = __raw_readl(c->regbase + GRER_OFFSET);
+ c->saved_gfer = __raw_readl(c->regbase + GFER_OFFSET);
+
+ /* Clear GPIO transition detect bits */
+ __raw_writel(0xffffffff, c->regbase + GEDR_OFFSET);
+ }
+ return 0;
+}
+
+static void pxa_gpio_resume(void)
+{
+ struct pxa_gpio_chip *c;
+ int gpio;
+
+ for_each_gpio_chip(gpio, c) {
+ /* restore level with set/clear */
+ __raw_writel( c->saved_gplr, c->regbase + GPSR_OFFSET);
+ __raw_writel(~c->saved_gplr, c->regbase + GPCR_OFFSET);
+
+ __raw_writel(c->saved_grer, c->regbase + GRER_OFFSET);
+ __raw_writel(c->saved_gfer, c->regbase + GFER_OFFSET);
+ __raw_writel(c->saved_gpdr, c->regbase + GPDR_OFFSET);
+ }
+}
+#else
+#define pxa_gpio_suspend NULL
+#define pxa_gpio_resume NULL
+#endif
+
+struct syscore_ops pxa_gpio_syscore_ops = {
+ .suspend = pxa_gpio_suspend,
+ .resume = pxa_gpio_resume,
+};
diff --git a/drivers/gpio/gpio-s5pc100.c b/drivers/gpio/gpio-s5pc100.c
deleted file mode 100644
index 7f87b0c76e0..00000000000
--- a/drivers/gpio/gpio-s5pc100.c
+++ /dev/null
@@ -1,354 +0,0 @@
-/*
- * S5PC100 - GPIOlib support
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com
- *
- * Copyright 2009 Samsung Electronics Co
- * Kyungmin Park <kyungmin.park@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/kernel.h>
-#include <linux/irq.h>
-#include <linux/io.h>
-#include <linux/gpio.h>
-
-#include <mach/map.h>
-#include <mach/regs-gpio.h>
-
-#include <plat/gpio-core.h>
-#include <plat/gpio-cfg.h>
-#include <plat/gpio-cfg-helpers.h>
-
-/* S5PC100 GPIO bank summary:
- *
- * Bank GPIOs Style INT Type
- * A0 8 4Bit GPIO_INT0
- * A1 5 4Bit GPIO_INT1
- * B 8 4Bit GPIO_INT2
- * C 5 4Bit GPIO_INT3
- * D 7 4Bit GPIO_INT4
- * E0 8 4Bit GPIO_INT5
- * E1 6 4Bit GPIO_INT6
- * F0 8 4Bit GPIO_INT7
- * F1 8 4Bit GPIO_INT8
- * F2 8 4Bit GPIO_INT9
- * F3 4 4Bit GPIO_INT10
- * G0 8 4Bit GPIO_INT11
- * G1 3 4Bit GPIO_INT12
- * G2 7 4Bit GPIO_INT13
- * G3 7 4Bit GPIO_INT14
- * H0 8 4Bit WKUP_INT
- * H1 8 4Bit WKUP_INT
- * H2 8 4Bit WKUP_INT
- * H3 8 4Bit WKUP_INT
- * I 8 4Bit GPIO_INT15
- * J0 8 4Bit GPIO_INT16
- * J1 5 4Bit GPIO_INT17
- * J2 8 4Bit GPIO_INT18
- * J3 8 4Bit GPIO_INT19
- * J4 4 4Bit GPIO_INT20
- * K0 8 4Bit None
- * K1 6 4Bit None
- * K2 8 4Bit None
- * K3 8 4Bit None
- * L0 8 4Bit None
- * L1 8 4Bit None
- * L2 8 4Bit None
- * L3 8 4Bit None
- */
-
-static struct s3c_gpio_cfg gpio_cfg = {
- .set_config = s3c_gpio_setcfg_s3c64xx_4bit,
- .set_pull = s3c_gpio_setpull_updown,
- .get_pull = s3c_gpio_getpull_updown,
-};
-
-static struct s3c_gpio_cfg gpio_cfg_eint = {
- .cfg_eint = 0xf,
- .set_config = s3c_gpio_setcfg_s3c64xx_4bit,
- .set_pull = s3c_gpio_setpull_updown,
- .get_pull = s3c_gpio_getpull_updown,
-};
-
-static struct s3c_gpio_cfg gpio_cfg_noint = {
- .set_config = s3c_gpio_setcfg_s3c64xx_4bit,
- .set_pull = s3c_gpio_setpull_updown,
- .get_pull = s3c_gpio_getpull_updown,
-};
-
-/*
- * GPIO bank's base address given the index of the bank in the
- * list of all gpio banks.
- */
-#define S5PC100_BANK_BASE(bank_nr) (S5P_VA_GPIO + ((bank_nr) * 0x20))
-
-/*
- * Following are the gpio banks in S5PC100.
- *
- * The 'config' member when left to NULL, is initialized to the default
- * structure gpio_cfg in the init function below.
- *
- * The 'base' member is also initialized in the init function below.
- * Note: The initialization of 'base' member of s3c_gpio_chip structure
- * uses the above macro and depends on the banks being listed in order here.
- */
-static struct s3c_gpio_chip s5pc100_gpio_chips[] = {
- {
- .chip = {
- .base = S5PC100_GPA0(0),
- .ngpio = S5PC100_GPIO_A0_NR,
- .label = "GPA0",
- },
- }, {
- .chip = {
- .base = S5PC100_GPA1(0),
- .ngpio = S5PC100_GPIO_A1_NR,
- .label = "GPA1",
- },
- }, {
- .chip = {
- .base = S5PC100_GPB(0),
- .ngpio = S5PC100_GPIO_B_NR,
- .label = "GPB",
- },
- }, {
- .chip = {
- .base = S5PC100_GPC(0),
- .ngpio = S5PC100_GPIO_C_NR,
- .label = "GPC",
- },
- }, {
- .chip = {
- .base = S5PC100_GPD(0),
- .ngpio = S5PC100_GPIO_D_NR,
- .label = "GPD",
- },
- }, {
- .chip = {
- .base = S5PC100_GPE0(0),
- .ngpio = S5PC100_GPIO_E0_NR,
- .label = "GPE0",
- },
- }, {
- .chip = {
- .base = S5PC100_GPE1(0),
- .ngpio = S5PC100_GPIO_E1_NR,
- .label = "GPE1",
- },
- }, {
- .chip = {
- .base = S5PC100_GPF0(0),
- .ngpio = S5PC100_GPIO_F0_NR,
- .label = "GPF0",
- },
- }, {
- .chip = {
- .base = S5PC100_GPF1(0),
- .ngpio = S5PC100_GPIO_F1_NR,
- .label = "GPF1",
- },
- }, {
- .chip = {
- .base = S5PC100_GPF2(0),
- .ngpio = S5PC100_GPIO_F2_NR,
- .label = "GPF2",
- },
- }, {
- .chip = {
- .base = S5PC100_GPF3(0),
- .ngpio = S5PC100_GPIO_F3_NR,
- .label = "GPF3",
- },
- }, {
- .chip = {
- .base = S5PC100_GPG0(0),
- .ngpio = S5PC100_GPIO_G0_NR,
- .label = "GPG0",
- },
- }, {
- .chip = {
- .base = S5PC100_GPG1(0),
- .ngpio = S5PC100_GPIO_G1_NR,
- .label = "GPG1",
- },
- }, {
- .chip = {
- .base = S5PC100_GPG2(0),
- .ngpio = S5PC100_GPIO_G2_NR,
- .label = "GPG2",
- },
- }, {
- .chip = {
- .base = S5PC100_GPG3(0),
- .ngpio = S5PC100_GPIO_G3_NR,
- .label = "GPG3",
- },
- }, {
- .chip = {
- .base = S5PC100_GPI(0),
- .ngpio = S5PC100_GPIO_I_NR,
- .label = "GPI",
- },
- }, {
- .chip = {
- .base = S5PC100_GPJ0(0),
- .ngpio = S5PC100_GPIO_J0_NR,
- .label = "GPJ0",
- },
- }, {
- .chip = {
- .base = S5PC100_GPJ1(0),
- .ngpio = S5PC100_GPIO_J1_NR,
- .label = "GPJ1",
- },
- }, {
- .chip = {
- .base = S5PC100_GPJ2(0),
- .ngpio = S5PC100_GPIO_J2_NR,
- .label = "GPJ2",
- },
- }, {
- .chip = {
- .base = S5PC100_GPJ3(0),
- .ngpio = S5PC100_GPIO_J3_NR,
- .label = "GPJ3",
- },
- }, {
- .chip = {
- .base = S5PC100_GPJ4(0),
- .ngpio = S5PC100_GPIO_J4_NR,
- .label = "GPJ4",
- },
- }, {
- .config = &gpio_cfg_noint,
- .chip = {
- .base = S5PC100_GPK0(0),
- .ngpio = S5PC100_GPIO_K0_NR,
- .label = "GPK0",
- },
- }, {
- .config = &gpio_cfg_noint,
- .chip = {
- .base = S5PC100_GPK1(0),
- .ngpio = S5PC100_GPIO_K1_NR,
- .label = "GPK1",
- },
- }, {
- .config = &gpio_cfg_noint,
- .chip = {
- .base = S5PC100_GPK2(0),
- .ngpio = S5PC100_GPIO_K2_NR,
- .label = "GPK2",
- },
- }, {
- .config = &gpio_cfg_noint,
- .chip = {
- .base = S5PC100_GPK3(0),
- .ngpio = S5PC100_GPIO_K3_NR,
- .label = "GPK3",
- },
- }, {
- .config = &gpio_cfg_noint,
- .chip = {
- .base = S5PC100_GPL0(0),
- .ngpio = S5PC100_GPIO_L0_NR,
- .label = "GPL0",
- },
- }, {
- .config = &gpio_cfg_noint,
- .chip = {
- .base = S5PC100_GPL1(0),
- .ngpio = S5PC100_GPIO_L1_NR,
- .label = "GPL1",
- },
- }, {
- .config = &gpio_cfg_noint,
- .chip = {
- .base = S5PC100_GPL2(0),
- .ngpio = S5PC100_GPIO_L2_NR,
- .label = "GPL2",
- },
- }, {
- .config = &gpio_cfg_noint,
- .chip = {
- .base = S5PC100_GPL3(0),
- .ngpio = S5PC100_GPIO_L3_NR,
- .label = "GPL3",
- },
- }, {
- .config = &gpio_cfg_noint,
- .chip = {
- .base = S5PC100_GPL4(0),
- .ngpio = S5PC100_GPIO_L4_NR,
- .label = "GPL4",
- },
- }, {
- .base = (S5P_VA_GPIO + 0xC00),
- .config = &gpio_cfg_eint,
- .irq_base = IRQ_EINT(0),
- .chip = {
- .base = S5PC100_GPH0(0),
- .ngpio = S5PC100_GPIO_H0_NR,
- .label = "GPH0",
- .to_irq = samsung_gpiolib_to_irq,
- },
- }, {
- .base = (S5P_VA_GPIO + 0xC20),
- .config = &gpio_cfg_eint,
- .irq_base = IRQ_EINT(8),
- .chip = {
- .base = S5PC100_GPH1(0),
- .ngpio = S5PC100_GPIO_H1_NR,
- .label = "GPH1",
- .to_irq = samsung_gpiolib_to_irq,
- },
- }, {
- .base = (S5P_VA_GPIO + 0xC40),
- .config = &gpio_cfg_eint,
- .irq_base = IRQ_EINT(16),
- .chip = {
- .base = S5PC100_GPH2(0),
- .ngpio = S5PC100_GPIO_H2_NR,
- .label = "GPH2",
- .to_irq = samsung_gpiolib_to_irq,
- },
- }, {
- .base = (S5P_VA_GPIO + 0xC60),
- .config = &gpio_cfg_eint,
- .irq_base = IRQ_EINT(24),
- .chip = {
- .base = S5PC100_GPH3(0),
- .ngpio = S5PC100_GPIO_H3_NR,
- .label = "GPH3",
- .to_irq = samsung_gpiolib_to_irq,
- },
- },
-};
-
-static __init int s5pc100_gpiolib_init(void)
-{
- struct s3c_gpio_chip *chip = s5pc100_gpio_chips;
- int nr_chips = ARRAY_SIZE(s5pc100_gpio_chips);
- int gpioint_group = 0;
- int i;
-
- for (i = 0; i < nr_chips; i++, chip++) {
- if (chip->config == NULL) {
- chip->config = &gpio_cfg;
- chip->group = gpioint_group++;
- }
- if (chip->base == NULL)
- chip->base = S5PC100_BANK_BASE(i);
- }
-
- samsung_gpiolib_add_4bit_chips(s5pc100_gpio_chips, nr_chips);
- s5p_register_gpioint_bank(IRQ_GPIOINT, 0, S5P_GPIOINT_GROUP_MAXNR);
-
- return 0;
-}
-core_initcall(s5pc100_gpiolib_init);
diff --git a/drivers/gpio/gpio-s5pv210.c b/drivers/gpio/gpio-s5pv210.c
deleted file mode 100644
index eb12f1602de..00000000000
--- a/drivers/gpio/gpio-s5pv210.c
+++ /dev/null
@@ -1,287 +0,0 @@
-/*
- * S5PV210 - GPIOlib support
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/kernel.h>
-#include <linux/irq.h>
-#include <linux/io.h>
-#include <linux/gpio.h>
-#include <plat/gpio-core.h>
-#include <plat/gpio-cfg.h>
-#include <plat/gpio-cfg-helpers.h>
-#include <mach/map.h>
-
-static struct s3c_gpio_cfg gpio_cfg = {
- .set_config = s3c_gpio_setcfg_s3c64xx_4bit,
- .set_pull = s3c_gpio_setpull_updown,
- .get_pull = s3c_gpio_getpull_updown,
-};
-
-static struct s3c_gpio_cfg gpio_cfg_noint = {
- .set_config = s3c_gpio_setcfg_s3c64xx_4bit,
- .set_pull = s3c_gpio_setpull_updown,
- .get_pull = s3c_gpio_getpull_updown,
-};
-
-/* GPIO bank's base address given the index of the bank in the
- * list of all gpio banks.
- */
-#define S5PV210_BANK_BASE(bank_nr) (S5P_VA_GPIO + ((bank_nr) * 0x20))
-
-/*
- * Following are the gpio banks in v210.
- *
- * The 'config' member when left to NULL, is initialized to the default
- * structure gpio_cfg in the init function below.
- *
- * The 'base' member is also initialized in the init function below.
- * Note: The initialization of 'base' member of s3c_gpio_chip structure
- * uses the above macro and depends on the banks being listed in order here.
- */
-static struct s3c_gpio_chip s5pv210_gpio_4bit[] = {
- {
- .chip = {
- .base = S5PV210_GPA0(0),
- .ngpio = S5PV210_GPIO_A0_NR,
- .label = "GPA0",
- },
- }, {
- .chip = {
- .base = S5PV210_GPA1(0),
- .ngpio = S5PV210_GPIO_A1_NR,
- .label = "GPA1",
- },
- }, {
- .chip = {
- .base = S5PV210_GPB(0),
- .ngpio = S5PV210_GPIO_B_NR,
- .label = "GPB",
- },
- }, {
- .chip = {
- .base = S5PV210_GPC0(0),
- .ngpio = S5PV210_GPIO_C0_NR,
- .label = "GPC0",
- },
- }, {
- .chip = {
- .base = S5PV210_GPC1(0),
- .ngpio = S5PV210_GPIO_C1_NR,
- .label = "GPC1",
- },
- }, {
- .chip = {
- .base = S5PV210_GPD0(0),
- .ngpio = S5PV210_GPIO_D0_NR,
- .label = "GPD0",
- },
- }, {
- .chip = {
- .base = S5PV210_GPD1(0),
- .ngpio = S5PV210_GPIO_D1_NR,
- .label = "GPD1",
- },
- }, {
- .chip = {
- .base = S5PV210_GPE0(0),
- .ngpio = S5PV210_GPIO_E0_NR,
- .label = "GPE0",
- },
- }, {
- .chip = {
- .base = S5PV210_GPE1(0),
- .ngpio = S5PV210_GPIO_E1_NR,
- .label = "GPE1",
- },
- }, {
- .chip = {
- .base = S5PV210_GPF0(0),
- .ngpio = S5PV210_GPIO_F0_NR,
- .label = "GPF0",
- },
- }, {
- .chip = {
- .base = S5PV210_GPF1(0),
- .ngpio = S5PV210_GPIO_F1_NR,
- .label = "GPF1",
- },
- }, {
- .chip = {
- .base = S5PV210_GPF2(0),
- .ngpio = S5PV210_GPIO_F2_NR,
- .label = "GPF2",
- },
- }, {
- .chip = {
- .base = S5PV210_GPF3(0),
- .ngpio = S5PV210_GPIO_F3_NR,
- .label = "GPF3",
- },
- }, {
- .chip = {
- .base = S5PV210_GPG0(0),
- .ngpio = S5PV210_GPIO_G0_NR,
- .label = "GPG0",
- },
- }, {
- .chip = {
- .base = S5PV210_GPG1(0),
- .ngpio = S5PV210_GPIO_G1_NR,
- .label = "GPG1",
- },
- }, {
- .chip = {
- .base = S5PV210_GPG2(0),
- .ngpio = S5PV210_GPIO_G2_NR,
- .label = "GPG2",
- },
- }, {
- .chip = {
- .base = S5PV210_GPG3(0),
- .ngpio = S5PV210_GPIO_G3_NR,
- .label = "GPG3",
- },
- }, {
- .config = &gpio_cfg_noint,
- .chip = {
- .base = S5PV210_GPI(0),
- .ngpio = S5PV210_GPIO_I_NR,
- .label = "GPI",
- },
- }, {
- .chip = {
- .base = S5PV210_GPJ0(0),
- .ngpio = S5PV210_GPIO_J0_NR,
- .label = "GPJ0",
- },
- }, {
- .chip = {
- .base = S5PV210_GPJ1(0),
- .ngpio = S5PV210_GPIO_J1_NR,
- .label = "GPJ1",
- },
- }, {
- .chip = {
- .base = S5PV210_GPJ2(0),
- .ngpio = S5PV210_GPIO_J2_NR,
- .label = "GPJ2",
- },
- }, {
- .chip = {
- .base = S5PV210_GPJ3(0),
- .ngpio = S5PV210_GPIO_J3_NR,
- .label = "GPJ3",
- },
- }, {
- .chip = {
- .base = S5PV210_GPJ4(0),
- .ngpio = S5PV210_GPIO_J4_NR,
- .label = "GPJ4",
- },
- }, {
- .config = &gpio_cfg_noint,
- .chip = {
- .base = S5PV210_MP01(0),
- .ngpio = S5PV210_GPIO_MP01_NR,
- .label = "MP01",
- },
- }, {
- .config = &gpio_cfg_noint,
- .chip = {
- .base = S5PV210_MP02(0),
- .ngpio = S5PV210_GPIO_MP02_NR,
- .label = "MP02",
- },
- }, {
- .config = &gpio_cfg_noint,
- .chip = {
- .base = S5PV210_MP03(0),
- .ngpio = S5PV210_GPIO_MP03_NR,
- .label = "MP03",
- },
- }, {
- .config = &gpio_cfg_noint,
- .chip = {
- .base = S5PV210_MP04(0),
- .ngpio = S5PV210_GPIO_MP04_NR,
- .label = "MP04",
- },
- }, {
- .config = &gpio_cfg_noint,
- .chip = {
- .base = S5PV210_MP05(0),
- .ngpio = S5PV210_GPIO_MP05_NR,
- .label = "MP05",
- },
- }, {
- .base = (S5P_VA_GPIO + 0xC00),
- .config = &gpio_cfg_noint,
- .irq_base = IRQ_EINT(0),
- .chip = {
- .base = S5PV210_GPH0(0),
- .ngpio = S5PV210_GPIO_H0_NR,
- .label = "GPH0",
- .to_irq = samsung_gpiolib_to_irq,
- },
- }, {
- .base = (S5P_VA_GPIO + 0xC20),
- .config = &gpio_cfg_noint,
- .irq_base = IRQ_EINT(8),
- .chip = {
- .base = S5PV210_GPH1(0),
- .ngpio = S5PV210_GPIO_H1_NR,
- .label = "GPH1",
- .to_irq = samsung_gpiolib_to_irq,
- },
- }, {
- .base = (S5P_VA_GPIO + 0xC40),
- .config = &gpio_cfg_noint,
- .irq_base = IRQ_EINT(16),
- .chip = {
- .base = S5PV210_GPH2(0),
- .ngpio = S5PV210_GPIO_H2_NR,
- .label = "GPH2",
- .to_irq = samsung_gpiolib_to_irq,
- },
- }, {
- .base = (S5P_VA_GPIO + 0xC60),
- .config = &gpio_cfg_noint,
- .irq_base = IRQ_EINT(24),
- .chip = {
- .base = S5PV210_GPH3(0),
- .ngpio = S5PV210_GPIO_H3_NR,
- .label = "GPH3",
- .to_irq = samsung_gpiolib_to_irq,
- },
- },
-};
-
-static __init int s5pv210_gpiolib_init(void)
-{
- struct s3c_gpio_chip *chip = s5pv210_gpio_4bit;
- int nr_chips = ARRAY_SIZE(s5pv210_gpio_4bit);
- int gpioint_group = 0;
- int i = 0;
-
- for (i = 0; i < nr_chips; i++, chip++) {
- if (chip->config == NULL) {
- chip->config = &gpio_cfg;
- chip->group = gpioint_group++;
- }
- if (chip->base == NULL)
- chip->base = S5PV210_BANK_BASE(i);
- }
-
- samsung_gpiolib_add_4bit_chips(s5pv210_gpio_4bit, nr_chips);
- s5p_register_gpioint_bank(IRQ_GPIOINT, 0, S5P_GPIOINT_GROUP_MAXNR);
-
- return 0;
-}
-core_initcall(s5pv210_gpiolib_init);
diff --git a/drivers/gpio/gpio-sa1100.c b/drivers/gpio/gpio-sa1100.c
new file mode 100644
index 00000000000..b6c1f6d8064
--- /dev/null
+++ b/drivers/gpio/gpio-sa1100.c
@@ -0,0 +1,63 @@
+/*
+ * linux/arch/arm/mach-sa1100/gpio.c
+ *
+ * Generic SA-1100 GPIO handling
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/gpio.h>
+#include <linux/init.h>
+#include <linux/module.h>
+
+#include <mach/hardware.h>
+
+static int sa1100_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+ return GPLR & GPIO_GPIO(offset);
+}
+
+static void sa1100_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+{
+ if (value)
+ GPSR = GPIO_GPIO(offset);
+ else
+ GPCR = GPIO_GPIO(offset);
+}
+
+static int sa1100_direction_input(struct gpio_chip *chip, unsigned offset)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ GPDR &= ~GPIO_GPIO(offset);
+ local_irq_restore(flags);
+ return 0;
+}
+
+static int sa1100_direction_output(struct gpio_chip *chip, unsigned offset, int value)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ sa1100_gpio_set(chip, offset, value);
+ GPDR |= GPIO_GPIO(offset);
+ local_irq_restore(flags);
+ return 0;
+}
+
+static struct gpio_chip sa1100_gpio_chip = {
+ .label = "gpio",
+ .direction_input = sa1100_direction_input,
+ .direction_output = sa1100_direction_output,
+ .set = sa1100_gpio_set,
+ .get = sa1100_gpio_get,
+ .base = 0,
+ .ngpio = GPIO_MAX + 1,
+};
+
+void __init sa1100_init_gpio(void)
+{
+ gpiochip_add(&sa1100_gpio_chip);
+}
diff --git a/drivers/gpio/gpio-samsung.c b/drivers/gpio/gpio-samsung.c
new file mode 100644
index 00000000000..86625185271
--- /dev/null
+++ b/drivers/gpio/gpio-samsung.c
@@ -0,0 +1,2712 @@
+/*
+ * Copyright (c) 2009-2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Copyright 2008 Openmoko, Inc.
+ * Copyright 2008 Simtec Electronics
+ * Ben Dooks <ben@simtec.co.uk>
+ * http://armlinux.simtec.co.uk/
+ *
+ * SAMSUNG - GPIOlib support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/sysdev.h>
+#include <linux/ioport.h>
+
+#include <asm/irq.h>
+
+#include <mach/hardware.h>
+#include <mach/map.h>
+#include <mach/regs-clock.h>
+#include <mach/regs-gpio.h>
+
+#include <plat/cpu.h>
+#include <plat/gpio-core.h>
+#include <plat/gpio-cfg.h>
+#include <plat/gpio-cfg-helpers.h>
+#include <plat/gpio-fns.h>
+#include <plat/pm.h>
+
+#ifndef DEBUG_GPIO
+#define gpio_dbg(x...) do { } while (0)
+#else
+#define gpio_dbg(x...) printk(KERN_DEBUG x)
+#endif
+
+int samsung_gpio_setpull_updown(struct samsung_gpio_chip *chip,
+ unsigned int off, samsung_gpio_pull_t pull)
+{
+ void __iomem *reg = chip->base + 0x08;
+ int shift = off * 2;
+ u32 pup;
+
+ pup = __raw_readl(reg);
+ pup &= ~(3 << shift);
+ pup |= pull << shift;
+ __raw_writel(pup, reg);
+
+ return 0;
+}
+
+samsung_gpio_pull_t samsung_gpio_getpull_updown(struct samsung_gpio_chip *chip,
+ unsigned int off)
+{
+ void __iomem *reg = chip->base + 0x08;
+ int shift = off * 2;
+ u32 pup = __raw_readl(reg);
+
+ pup >>= shift;
+ pup &= 0x3;
+
+ return (__force samsung_gpio_pull_t)pup;
+}
+
+int s3c2443_gpio_setpull(struct samsung_gpio_chip *chip,
+ unsigned int off, samsung_gpio_pull_t pull)
+{
+ switch (pull) {
+ case S3C_GPIO_PULL_NONE:
+ pull = 0x01;
+ break;
+ case S3C_GPIO_PULL_UP:
+ pull = 0x00;
+ break;
+ case S3C_GPIO_PULL_DOWN:
+ pull = 0x02;
+ break;
+ }
+ return samsung_gpio_setpull_updown(chip, off, pull);
+}
+
+samsung_gpio_pull_t s3c2443_gpio_getpull(struct samsung_gpio_chip *chip,
+ unsigned int off)
+{
+ samsung_gpio_pull_t pull;
+
+ pull = samsung_gpio_getpull_updown(chip, off);
+
+ switch (pull) {
+ case 0x00:
+ pull = S3C_GPIO_PULL_UP;
+ break;
+ case 0x01:
+ case 0x03:
+ pull = S3C_GPIO_PULL_NONE;
+ break;
+ case 0x02:
+ pull = S3C_GPIO_PULL_DOWN;
+ break;
+ }
+
+ return pull;
+}
+
+static int s3c24xx_gpio_setpull_1(struct samsung_gpio_chip *chip,
+ unsigned int off, samsung_gpio_pull_t pull,
+ samsung_gpio_pull_t updown)
+{
+ void __iomem *reg = chip->base + 0x08;
+ u32 pup = __raw_readl(reg);
+
+ if (pull == updown)
+ pup &= ~(1 << off);
+ else if (pull == S3C_GPIO_PULL_NONE)
+ pup |= (1 << off);
+ else
+ return -EINVAL;
+
+ __raw_writel(pup, reg);
+ return 0;
+}
+
+static samsung_gpio_pull_t s3c24xx_gpio_getpull_1(struct samsung_gpio_chip *chip,
+ unsigned int off,
+ samsung_gpio_pull_t updown)
+{
+ void __iomem *reg = chip->base + 0x08;
+ u32 pup = __raw_readl(reg);
+
+ pup &= (1 << off);
+ return pup ? S3C_GPIO_PULL_NONE : updown;
+}
+
+samsung_gpio_pull_t s3c24xx_gpio_getpull_1up(struct samsung_gpio_chip *chip,
+ unsigned int off)
+{
+ return s3c24xx_gpio_getpull_1(chip, off, S3C_GPIO_PULL_UP);
+}
+
+int s3c24xx_gpio_setpull_1up(struct samsung_gpio_chip *chip,
+ unsigned int off, samsung_gpio_pull_t pull)
+{
+ return s3c24xx_gpio_setpull_1(chip, off, pull, S3C_GPIO_PULL_UP);
+}
+
+samsung_gpio_pull_t s3c24xx_gpio_getpull_1down(struct samsung_gpio_chip *chip,
+ unsigned int off)
+{
+ return s3c24xx_gpio_getpull_1(chip, off, S3C_GPIO_PULL_DOWN);
+}
+
+int s3c24xx_gpio_setpull_1down(struct samsung_gpio_chip *chip,
+ unsigned int off, samsung_gpio_pull_t pull)
+{
+ return s3c24xx_gpio_setpull_1(chip, off, pull, S3C_GPIO_PULL_DOWN);
+}
+
+static int exynos4_gpio_setpull(struct samsung_gpio_chip *chip,
+ unsigned int off, samsung_gpio_pull_t pull)
+{
+ if (pull == S3C_GPIO_PULL_UP)
+ pull = 3;
+
+ return samsung_gpio_setpull_updown(chip, off, pull);
+}
+
+static samsung_gpio_pull_t exynos4_gpio_getpull(struct samsung_gpio_chip *chip,
+ unsigned int off)
+{
+ samsung_gpio_pull_t pull;
+
+ pull = samsung_gpio_getpull_updown(chip, off);
+
+ if (pull == 3)
+ pull = S3C_GPIO_PULL_UP;
+
+ return pull;
+}
+
+/*
+ * samsung_gpio_setcfg_2bit - Samsung 2bit style GPIO configuration.
+ * @chip: The gpio chip that is being configured.
+ * @off: The offset for the GPIO being configured.
+ * @cfg: The configuration value to set.
+ *
+ * This helper deal with the GPIO cases where the control register
+ * has two bits of configuration per gpio, which have the following
+ * functions:
+ * 00 = input
+ * 01 = output
+ * 1x = special function
+ */
+
+static int samsung_gpio_setcfg_2bit(struct samsung_gpio_chip *chip,
+ unsigned int off, unsigned int cfg)
+{
+ void __iomem *reg = chip->base;
+ unsigned int shift = off * 2;
+ u32 con;
+
+ if (samsung_gpio_is_cfg_special(cfg)) {
+ cfg &= 0xf;
+ if (cfg > 3)
+ return -EINVAL;
+
+ cfg <<= shift;
+ }
+
+ con = __raw_readl(reg);
+ con &= ~(0x3 << shift);
+ con |= cfg;
+ __raw_writel(con, reg);
+
+ return 0;
+}
+
+/*
+ * samsung_gpio_getcfg_2bit - Samsung 2bit style GPIO configuration read.
+ * @chip: The gpio chip that is being configured.
+ * @off: The offset for the GPIO being configured.
+ *
+ * The reverse of samsung_gpio_setcfg_2bit(). Will return a value whicg
+ * could be directly passed back to samsung_gpio_setcfg_2bit(), from the
+ * S3C_GPIO_SPECIAL() macro.
+ */
+
+static unsigned int samsung_gpio_getcfg_2bit(struct samsung_gpio_chip *chip,
+ unsigned int off)
+{
+ u32 con;
+
+ con = __raw_readl(chip->base);
+ con >>= off * 2;
+ con &= 3;
+
+ /* this conversion works for IN and OUT as well as special mode */
+ return S3C_GPIO_SPECIAL(con);
+}
+
+/*
+ * samsung_gpio_setcfg_4bit - Samsung 4bit single register GPIO config.
+ * @chip: The gpio chip that is being configured.
+ * @off: The offset for the GPIO being configured.
+ * @cfg: The configuration value to set.
+ *
+ * This helper deal with the GPIO cases where the control register has 4 bits
+ * of control per GPIO, generally in the form of:
+ * 0000 = Input
+ * 0001 = Output
+ * others = Special functions (dependent on bank)
+ *
+ * Note, since the code to deal with the case where there are two control
+ * registers instead of one, we do not have a separate set of functions for
+ * each case.
+ */
+
+static int samsung_gpio_setcfg_4bit(struct samsung_gpio_chip *chip,
+ unsigned int off, unsigned int cfg)
+{
+ void __iomem *reg = chip->base;
+ unsigned int shift = (off & 7) * 4;
+ u32 con;
+
+ if (off < 8 && chip->chip.ngpio > 8)
+ reg -= 4;
+
+ if (samsung_gpio_is_cfg_special(cfg)) {
+ cfg &= 0xf;
+ cfg <<= shift;
+ }
+
+ con = __raw_readl(reg);
+ con &= ~(0xf << shift);
+ con |= cfg;
+ __raw_writel(con, reg);
+
+ return 0;
+}
+
+/*
+ * samsung_gpio_getcfg_4bit - Samsung 4bit single register GPIO config read.
+ * @chip: The gpio chip that is being configured.
+ * @off: The offset for the GPIO being configured.
+ *
+ * The reverse of samsung_gpio_setcfg_4bit(), turning a gpio configuration
+ * register setting into a value the software can use, such as could be passed
+ * to samsung_gpio_setcfg_4bit().
+ *
+ * @sa samsung_gpio_getcfg_2bit
+ */
+
+static unsigned samsung_gpio_getcfg_4bit(struct samsung_gpio_chip *chip,
+ unsigned int off)
+{
+ void __iomem *reg = chip->base;
+ unsigned int shift = (off & 7) * 4;
+ u32 con;
+
+ if (off < 8 && chip->chip.ngpio > 8)
+ reg -= 4;
+
+ con = __raw_readl(reg);
+ con >>= shift;
+ con &= 0xf;
+
+ /* this conversion works for IN and OUT as well as special mode */
+ return S3C_GPIO_SPECIAL(con);
+}
+
+#ifdef CONFIG_PLAT_S3C24XX
+/*
+ * s3c24xx_gpio_setcfg_abank - S3C24XX style GPIO configuration (Bank A)
+ * @chip: The gpio chip that is being configured.
+ * @off: The offset for the GPIO being configured.
+ * @cfg: The configuration value to set.
+ *
+ * This helper deal with the GPIO cases where the control register
+ * has one bit of configuration for the gpio, where setting the bit
+ * means the pin is in special function mode and unset means output.
+ */
+
+static int s3c24xx_gpio_setcfg_abank(struct samsung_gpio_chip *chip,
+ unsigned int off, unsigned int cfg)
+{
+ void __iomem *reg = chip->base;
+ unsigned int shift = off;
+ u32 con;
+
+ if (samsung_gpio_is_cfg_special(cfg)) {
+ cfg &= 0xf;
+
+ /* Map output to 0, and SFN2 to 1 */
+ cfg -= 1;
+ if (cfg > 1)
+ return -EINVAL;
+
+ cfg <<= shift;
+ }
+
+ con = __raw_readl(reg);
+ con &= ~(0x1 << shift);
+ con |= cfg;
+ __raw_writel(con, reg);
+
+ return 0;
+}
+
+/*
+ * s3c24xx_gpio_getcfg_abank - S3C24XX style GPIO configuration read (Bank A)
+ * @chip: The gpio chip that is being configured.
+ * @off: The offset for the GPIO being configured.
+ *
+ * The reverse of s3c24xx_gpio_setcfg_abank() turning an GPIO into a usable
+ * GPIO configuration value.
+ *
+ * @sa samsung_gpio_getcfg_2bit
+ * @sa samsung_gpio_getcfg_4bit
+ */
+
+static unsigned s3c24xx_gpio_getcfg_abank(struct samsung_gpio_chip *chip,
+ unsigned int off)
+{
+ u32 con;
+
+ con = __raw_readl(chip->base);
+ con >>= off;
+ con &= 1;
+ con++;
+
+ return S3C_GPIO_SFN(con);
+}
+#endif
+
+#if defined(CONFIG_CPU_S5P6440) || defined(CONFIG_CPU_S5P6450)
+static int s5p64x0_gpio_setcfg_rbank(struct samsung_gpio_chip *chip,
+ unsigned int off, unsigned int cfg)
+{
+ void __iomem *reg = chip->base;
+ unsigned int shift;
+ u32 con;
+
+ switch (off) {
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ case 5:
+ shift = (off & 7) * 4;
+ reg -= 4;
+ break;
+ case 6:
+ shift = ((off + 1) & 7) * 4;
+ reg -= 4;
+ default:
+ shift = ((off + 1) & 7) * 4;
+ break;
+ }
+
+ if (samsung_gpio_is_cfg_special(cfg)) {
+ cfg &= 0xf;
+ cfg <<= shift;
+ }
+
+ con = __raw_readl(reg);
+ con &= ~(0xf << shift);
+ con |= cfg;
+ __raw_writel(con, reg);
+
+ return 0;
+}
+#endif
+
+static void __init samsung_gpiolib_set_cfg(struct samsung_gpio_cfg *chipcfg,
+ int nr_chips)
+{
+ for (; nr_chips > 0; nr_chips--, chipcfg++) {
+ if (!chipcfg->set_config)
+ chipcfg->set_config = samsung_gpio_setcfg_4bit;
+ if (!chipcfg->get_config)
+ chipcfg->get_config = samsung_gpio_getcfg_4bit;
+ if (!chipcfg->set_pull)
+ chipcfg->set_pull = samsung_gpio_setpull_updown;
+ if (!chipcfg->get_pull)
+ chipcfg->get_pull = samsung_gpio_getpull_updown;
+ }
+}
+
+struct samsung_gpio_cfg s3c24xx_gpiocfg_default = {
+ .set_config = samsung_gpio_setcfg_2bit,
+ .get_config = samsung_gpio_getcfg_2bit,
+};
+
+#ifdef CONFIG_PLAT_S3C24XX
+static struct samsung_gpio_cfg s3c24xx_gpiocfg_banka = {
+ .set_config = s3c24xx_gpio_setcfg_abank,
+ .get_config = s3c24xx_gpio_getcfg_abank,
+};
+#endif
+
+static struct samsung_gpio_cfg exynos4_gpio_cfg = {
+ .set_pull = exynos4_gpio_setpull,
+ .get_pull = exynos4_gpio_getpull,
+ .set_config = samsung_gpio_setcfg_4bit,
+ .get_config = samsung_gpio_getcfg_4bit,
+};
+
+#if defined(CONFIG_CPU_S5P6440) || defined(CONFIG_CPU_S5P6450)
+static struct samsung_gpio_cfg s5p64x0_gpio_cfg_rbank = {
+ .cfg_eint = 0x3,
+ .set_config = s5p64x0_gpio_setcfg_rbank,
+ .get_config = samsung_gpio_getcfg_4bit,
+ .set_pull = samsung_gpio_setpull_updown,
+ .get_pull = samsung_gpio_getpull_updown,
+};
+#endif
+
+static struct samsung_gpio_cfg samsung_gpio_cfgs[] = {
+ {
+ .cfg_eint = 0x0,
+ }, {
+ .cfg_eint = 0x3,
+ }, {
+ .cfg_eint = 0x7,
+ }, {
+ .cfg_eint = 0xF,
+ }, {
+ .cfg_eint = 0x0,
+ .set_config = samsung_gpio_setcfg_2bit,
+ .get_config = samsung_gpio_getcfg_2bit,
+ }, {
+ .cfg_eint = 0x2,
+ .set_config = samsung_gpio_setcfg_2bit,
+ .get_config = samsung_gpio_getcfg_2bit,
+ }, {
+ .cfg_eint = 0x3,
+ .set_config = samsung_gpio_setcfg_2bit,
+ .get_config = samsung_gpio_getcfg_2bit,
+ }, {
+ .set_config = samsung_gpio_setcfg_2bit,
+ .get_config = samsung_gpio_getcfg_2bit,
+ }, {
+ .set_pull = exynos4_gpio_setpull,
+ .get_pull = exynos4_gpio_getpull,
+ }, {
+ .cfg_eint = 0x3,
+ .set_pull = exynos4_gpio_setpull,
+ .get_pull = exynos4_gpio_getpull,
+ }
+};
+
+/*
+ * Default routines for controlling GPIO, based on the original S3C24XX
+ * GPIO functions which deal with the case where each gpio bank of the
+ * chip is as following:
+ *
+ * base + 0x00: Control register, 2 bits per gpio
+ * gpio n: 2 bits starting at (2*n)
+ * 00 = input, 01 = output, others mean special-function
+ * base + 0x04: Data register, 1 bit per gpio
+ * bit n: data bit n
+*/
+
+static int samsung_gpiolib_2bit_input(struct gpio_chip *chip, unsigned offset)
+{
+ struct samsung_gpio_chip *ourchip = to_samsung_gpio(chip);
+ void __iomem *base = ourchip->base;
+ unsigned long flags;
+ unsigned long con;
+
+ samsung_gpio_lock(ourchip, flags);
+
+ con = __raw_readl(base + 0x00);
+ con &= ~(3 << (offset * 2));
+
+ __raw_writel(con, base + 0x00);
+
+ samsung_gpio_unlock(ourchip, flags);
+ return 0;
+}
+
+static int samsung_gpiolib_2bit_output(struct gpio_chip *chip,
+ unsigned offset, int value)
+{
+ struct samsung_gpio_chip *ourchip = to_samsung_gpio(chip);
+ void __iomem *base = ourchip->base;
+ unsigned long flags;
+ unsigned long dat;
+ unsigned long con;
+
+ samsung_gpio_lock(ourchip, flags);
+
+ dat = __raw_readl(base + 0x04);
+ dat &= ~(1 << offset);
+ if (value)
+ dat |= 1 << offset;
+ __raw_writel(dat, base + 0x04);
+
+ con = __raw_readl(base + 0x00);
+ con &= ~(3 << (offset * 2));
+ con |= 1 << (offset * 2);
+
+ __raw_writel(con, base + 0x00);
+ __raw_writel(dat, base + 0x04);
+
+ samsung_gpio_unlock(ourchip, flags);
+ return 0;
+}
+
+/*
+ * The samsung_gpiolib_4bit routines are to control the gpio banks where
+ * the gpio configuration register (GPxCON) has 4 bits per GPIO, as the
+ * following example:
+ *
+ * base + 0x00: Control register, 4 bits per gpio
+ * gpio n: 4 bits starting at (4*n)
+ * 0000 = input, 0001 = output, others mean special-function
+ * base + 0x04: Data register, 1 bit per gpio
+ * bit n: data bit n
+ *
+ * Note, since the data register is one bit per gpio and is at base + 0x4
+ * we can use samsung_gpiolib_get and samsung_gpiolib_set to change the
+ * state of the output.
+ */
+
+static int samsung_gpiolib_4bit_input(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ struct samsung_gpio_chip *ourchip = to_samsung_gpio(chip);
+ void __iomem *base = ourchip->base;
+ unsigned long con;
+
+ con = __raw_readl(base + GPIOCON_OFF);
+ con &= ~(0xf << con_4bit_shift(offset));
+ __raw_writel(con, base + GPIOCON_OFF);
+
+ gpio_dbg("%s: %p: CON now %08lx\n", __func__, base, con);
+
+ return 0;
+}
+
+static int samsung_gpiolib_4bit_output(struct gpio_chip *chip,
+ unsigned int offset, int value)
+{
+ struct samsung_gpio_chip *ourchip = to_samsung_gpio(chip);
+ void __iomem *base = ourchip->base;
+ unsigned long con;
+ unsigned long dat;
+
+ con = __raw_readl(base + GPIOCON_OFF);
+ con &= ~(0xf << con_4bit_shift(offset));
+ con |= 0x1 << con_4bit_shift(offset);
+
+ dat = __raw_readl(base + GPIODAT_OFF);
+
+ if (value)
+ dat |= 1 << offset;
+ else
+ dat &= ~(1 << offset);
+
+ __raw_writel(dat, base + GPIODAT_OFF);
+ __raw_writel(con, base + GPIOCON_OFF);
+ __raw_writel(dat, base + GPIODAT_OFF);
+
+ gpio_dbg("%s: %p: CON %08lx, DAT %08lx\n", __func__, base, con, dat);
+
+ return 0;
+}
+
+/*
+ * The next set of routines are for the case where the GPIO configuration
+ * registers are 4 bits per GPIO but there is more than one register (the
+ * bank has more than 8 GPIOs.
+ *
+ * This case is the similar to the 4 bit case, but the registers are as
+ * follows:
+ *
+ * base + 0x00: Control register, 4 bits per gpio (lower 8 GPIOs)
+ * gpio n: 4 bits starting at (4*n)
+ * 0000 = input, 0001 = output, others mean special-function
+ * base + 0x04: Control register, 4 bits per gpio (up to 8 additions GPIOs)
+ * gpio n: 4 bits starting at (4*n)
+ * 0000 = input, 0001 = output, others mean special-function
+ * base + 0x08: Data register, 1 bit per gpio
+ * bit n: data bit n
+ *
+ * To allow us to use the samsung_gpiolib_get and samsung_gpiolib_set
+ * routines we store the 'base + 0x4' address so that these routines see
+ * the data register at ourchip->base + 0x04.
+ */
+
+static int samsung_gpiolib_4bit2_input(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ struct samsung_gpio_chip *ourchip = to_samsung_gpio(chip);
+ void __iomem *base = ourchip->base;
+ void __iomem *regcon = base;
+ unsigned long con;
+
+ if (offset > 7)
+ offset -= 8;
+ else
+ regcon -= 4;
+
+ con = __raw_readl(regcon);
+ con &= ~(0xf << con_4bit_shift(offset));
+ __raw_writel(con, regcon);
+
+ gpio_dbg("%s: %p: CON %08lx\n", __func__, base, con);
+
+ return 0;
+}
+
+static int samsung_gpiolib_4bit2_output(struct gpio_chip *chip,
+ unsigned int offset, int value)
+{
+ struct samsung_gpio_chip *ourchip = to_samsung_gpio(chip);
+ void __iomem *base = ourchip->base;
+ void __iomem *regcon = base;
+ unsigned long con;
+ unsigned long dat;
+ unsigned con_offset = offset;
+
+ if (con_offset > 7)
+ con_offset -= 8;
+ else
+ regcon -= 4;
+
+ con = __raw_readl(regcon);
+ con &= ~(0xf << con_4bit_shift(con_offset));
+ con |= 0x1 << con_4bit_shift(con_offset);
+
+ dat = __raw_readl(base + GPIODAT_OFF);
+
+ if (value)
+ dat |= 1 << offset;
+ else
+ dat &= ~(1 << offset);
+
+ __raw_writel(dat, base + GPIODAT_OFF);
+ __raw_writel(con, regcon);
+ __raw_writel(dat, base + GPIODAT_OFF);
+
+ gpio_dbg("%s: %p: CON %08lx, DAT %08lx\n", __func__, base, con, dat);
+
+ return 0;
+}
+
+#ifdef CONFIG_PLAT_S3C24XX
+/* The next set of routines are for the case of s3c24xx bank a */
+
+static int s3c24xx_gpiolib_banka_input(struct gpio_chip *chip, unsigned offset)
+{
+ return -EINVAL;
+}
+
+static int s3c24xx_gpiolib_banka_output(struct gpio_chip *chip,
+ unsigned offset, int value)
+{
+ struct samsung_gpio_chip *ourchip = to_samsung_gpio(chip);
+ void __iomem *base = ourchip->base;
+ unsigned long flags;
+ unsigned long dat;
+ unsigned long con;
+
+ local_irq_save(flags);
+
+ con = __raw_readl(base + 0x00);
+ dat = __raw_readl(base + 0x04);
+
+ dat &= ~(1 << offset);
+ if (value)
+ dat |= 1 << offset;
+
+ __raw_writel(dat, base + 0x04);
+
+ con &= ~(1 << offset);
+
+ __raw_writel(con, base + 0x00);
+ __raw_writel(dat, base + 0x04);
+
+ local_irq_restore(flags);
+ return 0;
+}
+#endif
+
+/* The next set of routines are for the case of s5p64x0 bank r */
+
+static int s5p64x0_gpiolib_rbank_input(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ struct samsung_gpio_chip *ourchip = to_samsung_gpio(chip);
+ void __iomem *base = ourchip->base;
+ void __iomem *regcon = base;
+ unsigned long con;
+ unsigned long flags;
+
+ switch (offset) {
+ case 6:
+ offset += 1;
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ case 5:
+ regcon -= 4;
+ break;
+ default:
+ offset -= 7;
+ break;
+ }
+
+ samsung_gpio_lock(ourchip, flags);
+
+ con = __raw_readl(regcon);
+ con &= ~(0xf << con_4bit_shift(offset));
+ __raw_writel(con, regcon);
+
+ samsung_gpio_unlock(ourchip, flags);
+
+ return 0;
+}
+
+static int s5p64x0_gpiolib_rbank_output(struct gpio_chip *chip,
+ unsigned int offset, int value)
+{
+ struct samsung_gpio_chip *ourchip = to_samsung_gpio(chip);
+ void __iomem *base = ourchip->base;
+ void __iomem *regcon = base;
+ unsigned long con;
+ unsigned long dat;
+ unsigned long flags;
+ unsigned con_offset = offset;
+
+ switch (con_offset) {
+ case 6:
+ con_offset += 1;
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ case 5:
+ regcon -= 4;
+ break;
+ default:
+ con_offset -= 7;
+ break;
+ }
+
+ samsung_gpio_lock(ourchip, flags);
+
+ con = __raw_readl(regcon);
+ con &= ~(0xf << con_4bit_shift(con_offset));
+ con |= 0x1 << con_4bit_shift(con_offset);
+
+ dat = __raw_readl(base + GPIODAT_OFF);
+ if (value)
+ dat |= 1 << offset;
+ else
+ dat &= ~(1 << offset);
+
+ __raw_writel(con, regcon);
+ __raw_writel(dat, base + GPIODAT_OFF);
+
+ samsung_gpio_unlock(ourchip, flags);
+
+ return 0;
+}
+
+static void samsung_gpiolib_set(struct gpio_chip *chip,
+ unsigned offset, int value)
+{
+ struct samsung_gpio_chip *ourchip = to_samsung_gpio(chip);
+ void __iomem *base = ourchip->base;
+ unsigned long flags;
+ unsigned long dat;
+
+ samsung_gpio_lock(ourchip, flags);
+
+ dat = __raw_readl(base + 0x04);
+ dat &= ~(1 << offset);
+ if (value)
+ dat |= 1 << offset;
+ __raw_writel(dat, base + 0x04);
+
+ samsung_gpio_unlock(ourchip, flags);
+}
+
+static int samsung_gpiolib_get(struct gpio_chip *chip, unsigned offset)
+{
+ struct samsung_gpio_chip *ourchip = to_samsung_gpio(chip);
+ unsigned long val;
+
+ val = __raw_readl(ourchip->base + 0x04);
+ val >>= offset;
+ val &= 1;
+
+ return val;
+}
+
+/*
+ * CONFIG_S3C_GPIO_TRACK enables the tracking of the s3c specific gpios
+ * for use with the configuration calls, and other parts of the s3c gpiolib
+ * support code.
+ *
+ * Not all s3c support code will need this, as some configurations of cpu
+ * may only support one or two different configuration options and have an
+ * easy gpio to samsung_gpio_chip mapping function. If this is the case, then
+ * the machine support file should provide its own samsung_gpiolib_getchip()
+ * and any other necessary functions.
+ */
+
+#ifdef CONFIG_S3C_GPIO_TRACK
+struct samsung_gpio_chip *s3c_gpios[S3C_GPIO_END];
+
+static __init void s3c_gpiolib_track(struct samsung_gpio_chip *chip)
+{
+ unsigned int gpn;
+ int i;
+
+ gpn = chip->chip.base;
+ for (i = 0; i < chip->chip.ngpio; i++, gpn++) {
+ BUG_ON(gpn >= ARRAY_SIZE(s3c_gpios));
+ s3c_gpios[gpn] = chip;
+ }
+}
+#endif /* CONFIG_S3C_GPIO_TRACK */
+
+/*
+ * samsung_gpiolib_add() - add the Samsung gpio_chip.
+ * @chip: The chip to register
+ *
+ * This is a wrapper to gpiochip_add() that takes our specific gpio chip
+ * information and makes the necessary alterations for the platform and
+ * notes the information for use with the configuration systems and any
+ * other parts of the system.
+ */
+
+static void __init samsung_gpiolib_add(struct samsung_gpio_chip *chip)
+{
+ struct gpio_chip *gc = &chip->chip;
+ int ret;
+
+ BUG_ON(!chip->base);
+ BUG_ON(!gc->label);
+ BUG_ON(!gc->ngpio);
+
+ spin_lock_init(&chip->lock);
+
+ if (!gc->direction_input)
+ gc->direction_input = samsung_gpiolib_2bit_input;
+ if (!gc->direction_output)
+ gc->direction_output = samsung_gpiolib_2bit_output;
+ if (!gc->set)
+ gc->set = samsung_gpiolib_set;
+ if (!gc->get)
+ gc->get = samsung_gpiolib_get;
+
+#ifdef CONFIG_PM
+ if (chip->pm != NULL) {
+ if (!chip->pm->save || !chip->pm->resume)
+ printk(KERN_ERR "gpio: %s has missing PM functions\n",
+ gc->label);
+ } else
+ printk(KERN_ERR "gpio: %s has no PM function\n", gc->label);
+#endif
+
+ /* gpiochip_add() prints own failure message on error. */
+ ret = gpiochip_add(gc);
+ if (ret >= 0)
+ s3c_gpiolib_track(chip);
+}
+
+static void __init s3c24xx_gpiolib_add_chips(struct samsung_gpio_chip *chip,
+ int nr_chips, void __iomem *base)
+{
+ int i;
+ struct gpio_chip *gc = &chip->chip;
+
+ for (i = 0 ; i < nr_chips; i++, chip++) {
+ /* skip banks not present on SoC */
+ if (chip->chip.base >= S3C_GPIO_END)
+ continue;
+
+ if (!chip->config)
+ chip->config = &s3c24xx_gpiocfg_default;
+ if (!chip->pm)
+ chip->pm = __gpio_pm(&samsung_gpio_pm_2bit);
+ if ((base != NULL) && (chip->base == NULL))
+ chip->base = base + ((i) * 0x10);
+
+ if (!gc->direction_input)
+ gc->direction_input = samsung_gpiolib_2bit_input;
+ if (!gc->direction_output)
+ gc->direction_output = samsung_gpiolib_2bit_output;
+
+ samsung_gpiolib_add(chip);
+ }
+}
+
+static void __init samsung_gpiolib_add_2bit_chips(struct samsung_gpio_chip *chip,
+ int nr_chips, void __iomem *base,
+ unsigned int offset)
+{
+ int i;
+
+ for (i = 0 ; i < nr_chips; i++, chip++) {
+ chip->chip.direction_input = samsung_gpiolib_2bit_input;
+ chip->chip.direction_output = samsung_gpiolib_2bit_output;
+
+ if (!chip->config)
+ chip->config = &samsung_gpio_cfgs[7];
+ if (!chip->pm)
+ chip->pm = __gpio_pm(&samsung_gpio_pm_2bit);
+ if ((base != NULL) && (chip->base == NULL))
+ chip->base = base + ((i) * offset);
+
+ samsung_gpiolib_add(chip);
+ }
+}
+
+/*
+ * samsung_gpiolib_add_4bit_chips - 4bit single register GPIO config.
+ * @chip: The gpio chip that is being configured.
+ * @nr_chips: The no of chips (gpio ports) for the GPIO being configured.
+ *
+ * This helper deal with the GPIO cases where the control register has 4 bits
+ * of control per GPIO, generally in the form of:
+ * 0000 = Input
+ * 0001 = Output
+ * others = Special functions (dependent on bank)
+ *
+ * Note, since the code to deal with the case where there are two control
+ * registers instead of one, we do not have a separate set of function
+ * (samsung_gpiolib_add_4bit2_chips)for each case.
+ */
+
+static void __init samsung_gpiolib_add_4bit_chips(struct samsung_gpio_chip *chip,
+ int nr_chips, void __iomem *base)
+{
+ int i;
+
+ for (i = 0 ; i < nr_chips; i++, chip++) {
+ chip->chip.direction_input = samsung_gpiolib_4bit_input;
+ chip->chip.direction_output = samsung_gpiolib_4bit_output;
+
+ if (!chip->config)
+ chip->config = &samsung_gpio_cfgs[2];
+ if (!chip->pm)
+ chip->pm = __gpio_pm(&samsung_gpio_pm_4bit);
+ if ((base != NULL) && (chip->base == NULL))
+ chip->base = base + ((i) * 0x20);
+
+ samsung_gpiolib_add(chip);
+ }
+}
+
+static void __init samsung_gpiolib_add_4bit2_chips(struct samsung_gpio_chip *chip,
+ int nr_chips)
+{
+ for (; nr_chips > 0; nr_chips--, chip++) {
+ chip->chip.direction_input = samsung_gpiolib_4bit2_input;
+ chip->chip.direction_output = samsung_gpiolib_4bit2_output;
+
+ if (!chip->config)
+ chip->config = &samsung_gpio_cfgs[2];
+ if (!chip->pm)
+ chip->pm = __gpio_pm(&samsung_gpio_pm_4bit);
+
+ samsung_gpiolib_add(chip);
+ }
+}
+
+static void __init s5p64x0_gpiolib_add_rbank(struct samsung_gpio_chip *chip,
+ int nr_chips)
+{
+ for (; nr_chips > 0; nr_chips--, chip++) {
+ chip->chip.direction_input = s5p64x0_gpiolib_rbank_input;
+ chip->chip.direction_output = s5p64x0_gpiolib_rbank_output;
+
+ if (!chip->pm)
+ chip->pm = __gpio_pm(&samsung_gpio_pm_4bit);
+
+ samsung_gpiolib_add(chip);
+ }
+}
+
+int samsung_gpiolib_to_irq(struct gpio_chip *chip, unsigned int offset)
+{
+ struct samsung_gpio_chip *samsung_chip = container_of(chip, struct samsung_gpio_chip, chip);
+
+ return samsung_chip->irq_base + offset;
+}
+
+#ifdef CONFIG_PLAT_S3C24XX
+static int s3c24xx_gpiolib_fbank_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+ if (offset < 4)
+ return IRQ_EINT0 + offset;
+
+ if (offset < 8)
+ return IRQ_EINT4 + offset - 4;
+
+ return -EINVAL;
+}
+#endif
+
+#ifdef CONFIG_PLAT_S3C64XX
+static int s3c64xx_gpiolib_mbank_to_irq(struct gpio_chip *chip, unsigned pin)
+{
+ return pin < 5 ? IRQ_EINT(23) + pin : -ENXIO;
+}
+
+static int s3c64xx_gpiolib_lbank_to_irq(struct gpio_chip *chip, unsigned pin)
+{
+ return pin >= 8 ? IRQ_EINT(16) + pin - 8 : -ENXIO;
+}
+#endif
+
+struct samsung_gpio_chip s3c24xx_gpios[] = {
+#ifdef CONFIG_PLAT_S3C24XX
+ {
+ .config = &s3c24xx_gpiocfg_banka,
+ .chip = {
+ .base = S3C2410_GPA(0),
+ .owner = THIS_MODULE,
+ .label = "GPIOA",
+ .ngpio = 24,
+ .direction_input = s3c24xx_gpiolib_banka_input,
+ .direction_output = s3c24xx_gpiolib_banka_output,
+ },
+ }, {
+ .chip = {
+ .base = S3C2410_GPB(0),
+ .owner = THIS_MODULE,
+ .label = "GPIOB",
+ .ngpio = 16,
+ },
+ }, {
+ .chip = {
+ .base = S3C2410_GPC(0),
+ .owner = THIS_MODULE,
+ .label = "GPIOC",
+ .ngpio = 16,
+ },
+ }, {
+ .chip = {
+ .base = S3C2410_GPD(0),
+ .owner = THIS_MODULE,
+ .label = "GPIOD",
+ .ngpio = 16,
+ },
+ }, {
+ .chip = {
+ .base = S3C2410_GPE(0),
+ .label = "GPIOE",
+ .owner = THIS_MODULE,
+ .ngpio = 16,
+ },
+ }, {
+ .chip = {
+ .base = S3C2410_GPF(0),
+ .owner = THIS_MODULE,
+ .label = "GPIOF",
+ .ngpio = 8,
+ .to_irq = s3c24xx_gpiolib_fbank_to_irq,
+ },
+ }, {
+ .irq_base = IRQ_EINT8,
+ .chip = {
+ .base = S3C2410_GPG(0),
+ .owner = THIS_MODULE,
+ .label = "GPIOG",
+ .ngpio = 16,
+ .to_irq = samsung_gpiolib_to_irq,
+ },
+ }, {
+ .chip = {
+ .base = S3C2410_GPH(0),
+ .owner = THIS_MODULE,
+ .label = "GPIOH",
+ .ngpio = 11,
+ },
+ },
+ /* GPIOS for the S3C2443 and later devices. */
+ {
+ .base = S3C2440_GPJCON,
+ .chip = {
+ .base = S3C2410_GPJ(0),
+ .owner = THIS_MODULE,
+ .label = "GPIOJ",
+ .ngpio = 16,
+ },
+ }, {
+ .base = S3C2443_GPKCON,
+ .chip = {
+ .base = S3C2410_GPK(0),
+ .owner = THIS_MODULE,
+ .label = "GPIOK",
+ .ngpio = 16,
+ },
+ }, {
+ .base = S3C2443_GPLCON,
+ .chip = {
+ .base = S3C2410_GPL(0),
+ .owner = THIS_MODULE,
+ .label = "GPIOL",
+ .ngpio = 15,
+ },
+ }, {
+ .base = S3C2443_GPMCON,
+ .chip = {
+ .base = S3C2410_GPM(0),
+ .owner = THIS_MODULE,
+ .label = "GPIOM",
+ .ngpio = 2,
+ },
+ },
+#endif
+};
+
+/*
+ * GPIO bank summary:
+ *
+ * Bank GPIOs Style SlpCon ExtInt Group
+ * A 8 4Bit Yes 1
+ * B 7 4Bit Yes 1
+ * C 8 4Bit Yes 2
+ * D 5 4Bit Yes 3
+ * E 5 4Bit Yes None
+ * F 16 2Bit Yes 4 [1]
+ * G 7 4Bit Yes 5
+ * H 10 4Bit[2] Yes 6
+ * I 16 2Bit Yes None
+ * J 12 2Bit Yes None
+ * K 16 4Bit[2] No None
+ * L 15 4Bit[2] No None
+ * M 6 4Bit No IRQ_EINT
+ * N 16 2Bit No IRQ_EINT
+ * O 16 2Bit Yes 7
+ * P 15 2Bit Yes 8
+ * Q 9 2Bit Yes 9
+ *
+ * [1] BANKF pins 14,15 do not form part of the external interrupt sources
+ * [2] BANK has two control registers, GPxCON0 and GPxCON1
+ */
+
+static struct samsung_gpio_chip s3c64xx_gpios_4bit[] = {
+#ifdef CONFIG_PLAT_S3C64XX
+ {
+ .chip = {
+ .base = S3C64XX_GPA(0),
+ .ngpio = S3C64XX_GPIO_A_NR,
+ .label = "GPA",
+ },
+ }, {
+ .chip = {
+ .base = S3C64XX_GPB(0),
+ .ngpio = S3C64XX_GPIO_B_NR,
+ .label = "GPB",
+ },
+ }, {
+ .chip = {
+ .base = S3C64XX_GPC(0),
+ .ngpio = S3C64XX_GPIO_C_NR,
+ .label = "GPC",
+ },
+ }, {
+ .chip = {
+ .base = S3C64XX_GPD(0),
+ .ngpio = S3C64XX_GPIO_D_NR,
+ .label = "GPD",
+ },
+ }, {
+ .config = &samsung_gpio_cfgs[0],
+ .chip = {
+ .base = S3C64XX_GPE(0),
+ .ngpio = S3C64XX_GPIO_E_NR,
+ .label = "GPE",
+ },
+ }, {
+ .base = S3C64XX_GPG_BASE,
+ .chip = {
+ .base = S3C64XX_GPG(0),
+ .ngpio = S3C64XX_GPIO_G_NR,
+ .label = "GPG",
+ },
+ }, {
+ .base = S3C64XX_GPM_BASE,
+ .config = &samsung_gpio_cfgs[1],
+ .chip = {
+ .base = S3C64XX_GPM(0),
+ .ngpio = S3C64XX_GPIO_M_NR,
+ .label = "GPM",
+ .to_irq = s3c64xx_gpiolib_mbank_to_irq,
+ },
+ },
+#endif
+};
+
+static struct samsung_gpio_chip s3c64xx_gpios_4bit2[] = {
+#ifdef CONFIG_PLAT_S3C64XX
+ {
+ .base = S3C64XX_GPH_BASE + 0x4,
+ .chip = {
+ .base = S3C64XX_GPH(0),
+ .ngpio = S3C64XX_GPIO_H_NR,
+ .label = "GPH",
+ },
+ }, {
+ .base = S3C64XX_GPK_BASE + 0x4,
+ .config = &samsung_gpio_cfgs[0],
+ .chip = {
+ .base = S3C64XX_GPK(0),
+ .ngpio = S3C64XX_GPIO_K_NR,
+ .label = "GPK",
+ },
+ }, {
+ .base = S3C64XX_GPL_BASE + 0x4,
+ .config = &samsung_gpio_cfgs[1],
+ .chip = {
+ .base = S3C64XX_GPL(0),
+ .ngpio = S3C64XX_GPIO_L_NR,
+ .label = "GPL",
+ .to_irq = s3c64xx_gpiolib_lbank_to_irq,
+ },
+ },
+#endif
+};
+
+static struct samsung_gpio_chip s3c64xx_gpios_2bit[] = {
+#ifdef CONFIG_PLAT_S3C64XX
+ {
+ .base = S3C64XX_GPF_BASE,
+ .config = &samsung_gpio_cfgs[6],
+ .chip = {
+ .base = S3C64XX_GPF(0),
+ .ngpio = S3C64XX_GPIO_F_NR,
+ .label = "GPF",
+ },
+ }, {
+ .config = &samsung_gpio_cfgs[7],
+ .chip = {
+ .base = S3C64XX_GPI(0),
+ .ngpio = S3C64XX_GPIO_I_NR,
+ .label = "GPI",
+ },
+ }, {
+ .config = &samsung_gpio_cfgs[7],
+ .chip = {
+ .base = S3C64XX_GPJ(0),
+ .ngpio = S3C64XX_GPIO_J_NR,
+ .label = "GPJ",
+ },
+ }, {
+ .config = &samsung_gpio_cfgs[6],
+ .chip = {
+ .base = S3C64XX_GPO(0),
+ .ngpio = S3C64XX_GPIO_O_NR,
+ .label = "GPO",
+ },
+ }, {
+ .config = &samsung_gpio_cfgs[6],
+ .chip = {
+ .base = S3C64XX_GPP(0),
+ .ngpio = S3C64XX_GPIO_P_NR,
+ .label = "GPP",
+ },
+ }, {
+ .config = &samsung_gpio_cfgs[6],
+ .chip = {
+ .base = S3C64XX_GPQ(0),
+ .ngpio = S3C64XX_GPIO_Q_NR,
+ .label = "GPQ",
+ },
+ }, {
+ .base = S3C64XX_GPN_BASE,
+ .irq_base = IRQ_EINT(0),
+ .config = &samsung_gpio_cfgs[5],
+ .chip = {
+ .base = S3C64XX_GPN(0),
+ .ngpio = S3C64XX_GPIO_N_NR,
+ .label = "GPN",
+ .to_irq = samsung_gpiolib_to_irq,
+ },
+ },
+#endif
+};
+
+/*
+ * S5P6440 GPIO bank summary:
+ *
+ * Bank GPIOs Style SlpCon ExtInt Group
+ * A 6 4Bit Yes 1
+ * B 7 4Bit Yes 1
+ * C 8 4Bit Yes 2
+ * F 2 2Bit Yes 4 [1]
+ * G 7 4Bit Yes 5
+ * H 10 4Bit[2] Yes 6
+ * I 16 2Bit Yes None
+ * J 12 2Bit Yes None
+ * N 16 2Bit No IRQ_EINT
+ * P 8 2Bit Yes 8
+ * R 15 4Bit[2] Yes 8
+ */
+
+static struct samsung_gpio_chip s5p6440_gpios_4bit[] = {
+#ifdef CONFIG_CPU_S5P6440
+ {
+ .chip = {
+ .base = S5P6440_GPA(0),
+ .ngpio = S5P6440_GPIO_A_NR,
+ .label = "GPA",
+ },
+ }, {
+ .chip = {
+ .base = S5P6440_GPB(0),
+ .ngpio = S5P6440_GPIO_B_NR,
+ .label = "GPB",
+ },
+ }, {
+ .chip = {
+ .base = S5P6440_GPC(0),
+ .ngpio = S5P6440_GPIO_C_NR,
+ .label = "GPC",
+ },
+ }, {
+ .base = S5P64X0_GPG_BASE,
+ .chip = {
+ .base = S5P6440_GPG(0),
+ .ngpio = S5P6440_GPIO_G_NR,
+ .label = "GPG",
+ },
+ },
+#endif
+};
+
+static struct samsung_gpio_chip s5p6440_gpios_4bit2[] = {
+#ifdef CONFIG_CPU_S5P6440
+ {
+ .base = S5P64X0_GPH_BASE + 0x4,
+ .chip = {
+ .base = S5P6440_GPH(0),
+ .ngpio = S5P6440_GPIO_H_NR,
+ .label = "GPH",
+ },
+ },
+#endif
+};
+
+static struct samsung_gpio_chip s5p6440_gpios_rbank[] = {
+#ifdef CONFIG_CPU_S5P6440
+ {
+ .base = S5P64X0_GPR_BASE + 0x4,
+ .config = &s5p64x0_gpio_cfg_rbank,
+ .chip = {
+ .base = S5P6440_GPR(0),
+ .ngpio = S5P6440_GPIO_R_NR,
+ .label = "GPR",
+ },
+ },
+#endif
+};
+
+static struct samsung_gpio_chip s5p6440_gpios_2bit[] = {
+#ifdef CONFIG_CPU_S5P6440
+ {
+ .base = S5P64X0_GPF_BASE,
+ .config = &samsung_gpio_cfgs[6],
+ .chip = {
+ .base = S5P6440_GPF(0),
+ .ngpio = S5P6440_GPIO_F_NR,
+ .label = "GPF",
+ },
+ }, {
+ .base = S5P64X0_GPI_BASE,
+ .config = &samsung_gpio_cfgs[4],
+ .chip = {
+ .base = S5P6440_GPI(0),
+ .ngpio = S5P6440_GPIO_I_NR,
+ .label = "GPI",
+ },
+ }, {
+ .base = S5P64X0_GPJ_BASE,
+ .config = &samsung_gpio_cfgs[4],
+ .chip = {
+ .base = S5P6440_GPJ(0),
+ .ngpio = S5P6440_GPIO_J_NR,
+ .label = "GPJ",
+ },
+ }, {
+ .base = S5P64X0_GPN_BASE,
+ .config = &samsung_gpio_cfgs[5],
+ .chip = {
+ .base = S5P6440_GPN(0),
+ .ngpio = S5P6440_GPIO_N_NR,
+ .label = "GPN",
+ },
+ }, {
+ .base = S5P64X0_GPP_BASE,
+ .config = &samsung_gpio_cfgs[6],
+ .chip = {
+ .base = S5P6440_GPP(0),
+ .ngpio = S5P6440_GPIO_P_NR,
+ .label = "GPP",
+ },
+ },
+#endif
+};
+
+/*
+ * S5P6450 GPIO bank summary:
+ *
+ * Bank GPIOs Style SlpCon ExtInt Group
+ * A 6 4Bit Yes 1
+ * B 7 4Bit Yes 1
+ * C 8 4Bit Yes 2
+ * D 8 4Bit Yes None
+ * F 2 2Bit Yes None
+ * G 14 4Bit[2] Yes 5
+ * H 10 4Bit[2] Yes 6
+ * I 16 2Bit Yes None
+ * J 12 2Bit Yes None
+ * K 5 4Bit Yes None
+ * N 16 2Bit No IRQ_EINT
+ * P 11 2Bit Yes 8
+ * Q 14 2Bit Yes None
+ * R 15 4Bit[2] Yes None
+ * S 8 2Bit Yes None
+ *
+ * [1] BANKF pins 14,15 do not form part of the external interrupt sources
+ * [2] BANK has two control registers, GPxCON0 and GPxCON1
+ */
+
+static struct samsung_gpio_chip s5p6450_gpios_4bit[] = {
+#ifdef CONFIG_CPU_S5P6450
+ {
+ .chip = {
+ .base = S5P6450_GPA(0),
+ .ngpio = S5P6450_GPIO_A_NR,
+ .label = "GPA",
+ },
+ }, {
+ .chip = {
+ .base = S5P6450_GPB(0),
+ .ngpio = S5P6450_GPIO_B_NR,
+ .label = "GPB",
+ },
+ }, {
+ .chip = {
+ .base = S5P6450_GPC(0),
+ .ngpio = S5P6450_GPIO_C_NR,
+ .label = "GPC",
+ },
+ }, {
+ .chip = {
+ .base = S5P6450_GPD(0),
+ .ngpio = S5P6450_GPIO_D_NR,
+ .label = "GPD",
+ },
+ }, {
+ .base = S5P6450_GPK_BASE,
+ .chip = {
+ .base = S5P6450_GPK(0),
+ .ngpio = S5P6450_GPIO_K_NR,
+ .label = "GPK",
+ },
+ },
+#endif
+};
+
+static struct samsung_gpio_chip s5p6450_gpios_4bit2[] = {
+#ifdef CONFIG_CPU_S5P6450
+ {
+ .base = S5P64X0_GPG_BASE + 0x4,
+ .chip = {
+ .base = S5P6450_GPG(0),
+ .ngpio = S5P6450_GPIO_G_NR,
+ .label = "GPG",
+ },
+ }, {
+ .base = S5P64X0_GPH_BASE + 0x4,
+ .chip = {
+ .base = S5P6450_GPH(0),
+ .ngpio = S5P6450_GPIO_H_NR,
+ .label = "GPH",
+ },
+ },
+#endif
+};
+
+static struct samsung_gpio_chip s5p6450_gpios_rbank[] = {
+#ifdef CONFIG_CPU_S5P6450
+ {
+ .base = S5P64X0_GPR_BASE + 0x4,
+ .config = &s5p64x0_gpio_cfg_rbank,
+ .chip = {
+ .base = S5P6450_GPR(0),
+ .ngpio = S5P6450_GPIO_R_NR,
+ .label = "GPR",
+ },
+ },
+#endif
+};
+
+static struct samsung_gpio_chip s5p6450_gpios_2bit[] = {
+#ifdef CONFIG_CPU_S5P6450
+ {
+ .base = S5P64X0_GPF_BASE,
+ .config = &samsung_gpio_cfgs[6],
+ .chip = {
+ .base = S5P6450_GPF(0),
+ .ngpio = S5P6450_GPIO_F_NR,
+ .label = "GPF",
+ },
+ }, {
+ .base = S5P64X0_GPI_BASE,
+ .config = &samsung_gpio_cfgs[4],
+ .chip = {
+ .base = S5P6450_GPI(0),
+ .ngpio = S5P6450_GPIO_I_NR,
+ .label = "GPI",
+ },
+ }, {
+ .base = S5P64X0_GPJ_BASE,
+ .config = &samsung_gpio_cfgs[4],
+ .chip = {
+ .base = S5P6450_GPJ(0),
+ .ngpio = S5P6450_GPIO_J_NR,
+ .label = "GPJ",
+ },
+ }, {
+ .base = S5P64X0_GPN_BASE,
+ .config = &samsung_gpio_cfgs[5],
+ .chip = {
+ .base = S5P6450_GPN(0),
+ .ngpio = S5P6450_GPIO_N_NR,
+ .label = "GPN",
+ },
+ }, {
+ .base = S5P64X0_GPP_BASE,
+ .config = &samsung_gpio_cfgs[6],
+ .chip = {
+ .base = S5P6450_GPP(0),
+ .ngpio = S5P6450_GPIO_P_NR,
+ .label = "GPP",
+ },
+ }, {
+ .base = S5P6450_GPQ_BASE,
+ .config = &samsung_gpio_cfgs[5],
+ .chip = {
+ .base = S5P6450_GPQ(0),
+ .ngpio = S5P6450_GPIO_Q_NR,
+ .label = "GPQ",
+ },
+ }, {
+ .base = S5P6450_GPS_BASE,
+ .config = &samsung_gpio_cfgs[6],
+ .chip = {
+ .base = S5P6450_GPS(0),
+ .ngpio = S5P6450_GPIO_S_NR,
+ .label = "GPS",
+ },
+ },
+#endif
+};
+
+/*
+ * S5PC100 GPIO bank summary:
+ *
+ * Bank GPIOs Style INT Type
+ * A0 8 4Bit GPIO_INT0
+ * A1 5 4Bit GPIO_INT1
+ * B 8 4Bit GPIO_INT2
+ * C 5 4Bit GPIO_INT3
+ * D 7 4Bit GPIO_INT4
+ * E0 8 4Bit GPIO_INT5
+ * E1 6 4Bit GPIO_INT6
+ * F0 8 4Bit GPIO_INT7
+ * F1 8 4Bit GPIO_INT8
+ * F2 8 4Bit GPIO_INT9
+ * F3 4 4Bit GPIO_INT10
+ * G0 8 4Bit GPIO_INT11
+ * G1 3 4Bit GPIO_INT12
+ * G2 7 4Bit GPIO_INT13
+ * G3 7 4Bit GPIO_INT14
+ * H0 8 4Bit WKUP_INT
+ * H1 8 4Bit WKUP_INT
+ * H2 8 4Bit WKUP_INT
+ * H3 8 4Bit WKUP_INT
+ * I 8 4Bit GPIO_INT15
+ * J0 8 4Bit GPIO_INT16
+ * J1 5 4Bit GPIO_INT17
+ * J2 8 4Bit GPIO_INT18
+ * J3 8 4Bit GPIO_INT19
+ * J4 4 4Bit GPIO_INT20
+ * K0 8 4Bit None
+ * K1 6 4Bit None
+ * K2 8 4Bit None
+ * K3 8 4Bit None
+ * L0 8 4Bit None
+ * L1 8 4Bit None
+ * L2 8 4Bit None
+ * L3 8 4Bit None
+ */
+
+static struct samsung_gpio_chip s5pc100_gpios_4bit[] = {
+#ifdef CONFIG_CPU_S5PC100
+ {
+ .chip = {
+ .base = S5PC100_GPA0(0),
+ .ngpio = S5PC100_GPIO_A0_NR,
+ .label = "GPA0",
+ },
+ }, {
+ .chip = {
+ .base = S5PC100_GPA1(0),
+ .ngpio = S5PC100_GPIO_A1_NR,
+ .label = "GPA1",
+ },
+ }, {
+ .chip = {
+ .base = S5PC100_GPB(0),
+ .ngpio = S5PC100_GPIO_B_NR,
+ .label = "GPB",
+ },
+ }, {
+ .chip = {
+ .base = S5PC100_GPC(0),
+ .ngpio = S5PC100_GPIO_C_NR,
+ .label = "GPC",
+ },
+ }, {
+ .chip = {
+ .base = S5PC100_GPD(0),
+ .ngpio = S5PC100_GPIO_D_NR,
+ .label = "GPD",
+ },
+ }, {
+ .chip = {
+ .base = S5PC100_GPE0(0),
+ .ngpio = S5PC100_GPIO_E0_NR,
+ .label = "GPE0",
+ },
+ }, {
+ .chip = {
+ .base = S5PC100_GPE1(0),
+ .ngpio = S5PC100_GPIO_E1_NR,
+ .label = "GPE1",
+ },
+ }, {
+ .chip = {
+ .base = S5PC100_GPF0(0),
+ .ngpio = S5PC100_GPIO_F0_NR,
+ .label = "GPF0",
+ },
+ }, {
+ .chip = {
+ .base = S5PC100_GPF1(0),
+ .ngpio = S5PC100_GPIO_F1_NR,
+ .label = "GPF1",
+ },
+ }, {
+ .chip = {
+ .base = S5PC100_GPF2(0),
+ .ngpio = S5PC100_GPIO_F2_NR,
+ .label = "GPF2",
+ },
+ }, {
+ .chip = {
+ .base = S5PC100_GPF3(0),
+ .ngpio = S5PC100_GPIO_F3_NR,
+ .label = "GPF3",
+ },
+ }, {
+ .chip = {
+ .base = S5PC100_GPG0(0),
+ .ngpio = S5PC100_GPIO_G0_NR,
+ .label = "GPG0",
+ },
+ }, {
+ .chip = {
+ .base = S5PC100_GPG1(0),
+ .ngpio = S5PC100_GPIO_G1_NR,
+ .label = "GPG1",
+ },
+ }, {
+ .chip = {
+ .base = S5PC100_GPG2(0),
+ .ngpio = S5PC100_GPIO_G2_NR,
+ .label = "GPG2",
+ },
+ }, {
+ .chip = {
+ .base = S5PC100_GPG3(0),
+ .ngpio = S5PC100_GPIO_G3_NR,
+ .label = "GPG3",
+ },
+ }, {
+ .chip = {
+ .base = S5PC100_GPI(0),
+ .ngpio = S5PC100_GPIO_I_NR,
+ .label = "GPI",
+ },
+ }, {
+ .chip = {
+ .base = S5PC100_GPJ0(0),
+ .ngpio = S5PC100_GPIO_J0_NR,
+ .label = "GPJ0",
+ },
+ }, {
+ .chip = {
+ .base = S5PC100_GPJ1(0),
+ .ngpio = S5PC100_GPIO_J1_NR,
+ .label = "GPJ1",
+ },
+ }, {
+ .chip = {
+ .base = S5PC100_GPJ2(0),
+ .ngpio = S5PC100_GPIO_J2_NR,
+ .label = "GPJ2",
+ },
+ }, {
+ .chip = {
+ .base = S5PC100_GPJ3(0),
+ .ngpio = S5PC100_GPIO_J3_NR,
+ .label = "GPJ3",
+ },
+ }, {
+ .chip = {
+ .base = S5PC100_GPJ4(0),
+ .ngpio = S5PC100_GPIO_J4_NR,
+ .label = "GPJ4",
+ },
+ }, {
+ .chip = {
+ .base = S5PC100_GPK0(0),
+ .ngpio = S5PC100_GPIO_K0_NR,
+ .label = "GPK0",
+ },
+ }, {
+ .chip = {
+ .base = S5PC100_GPK1(0),
+ .ngpio = S5PC100_GPIO_K1_NR,
+ .label = "GPK1",
+ },
+ }, {
+ .chip = {
+ .base = S5PC100_GPK2(0),
+ .ngpio = S5PC100_GPIO_K2_NR,
+ .label = "GPK2",
+ },
+ }, {
+ .chip = {
+ .base = S5PC100_GPK3(0),
+ .ngpio = S5PC100_GPIO_K3_NR,
+ .label = "GPK3",
+ },
+ }, {
+ .chip = {
+ .base = S5PC100_GPL0(0),
+ .ngpio = S5PC100_GPIO_L0_NR,
+ .label = "GPL0",
+ },
+ }, {
+ .chip = {
+ .base = S5PC100_GPL1(0),
+ .ngpio = S5PC100_GPIO_L1_NR,
+ .label = "GPL1",
+ },
+ }, {
+ .chip = {
+ .base = S5PC100_GPL2(0),
+ .ngpio = S5PC100_GPIO_L2_NR,
+ .label = "GPL2",
+ },
+ }, {
+ .chip = {
+ .base = S5PC100_GPL3(0),
+ .ngpio = S5PC100_GPIO_L3_NR,
+ .label = "GPL3",
+ },
+ }, {
+ .chip = {
+ .base = S5PC100_GPL4(0),
+ .ngpio = S5PC100_GPIO_L4_NR,
+ .label = "GPL4",
+ },
+ }, {
+ .base = (S5P_VA_GPIO + 0xC00),
+ .irq_base = IRQ_EINT(0),
+ .chip = {
+ .base = S5PC100_GPH0(0),
+ .ngpio = S5PC100_GPIO_H0_NR,
+ .label = "GPH0",
+ .to_irq = samsung_gpiolib_to_irq,
+ },
+ }, {
+ .base = (S5P_VA_GPIO + 0xC20),
+ .irq_base = IRQ_EINT(8),
+ .chip = {
+ .base = S5PC100_GPH1(0),
+ .ngpio = S5PC100_GPIO_H1_NR,
+ .label = "GPH1",
+ .to_irq = samsung_gpiolib_to_irq,
+ },
+ }, {
+ .base = (S5P_VA_GPIO + 0xC40),
+ .irq_base = IRQ_EINT(16),
+ .chip = {
+ .base = S5PC100_GPH2(0),
+ .ngpio = S5PC100_GPIO_H2_NR,
+ .label = "GPH2",
+ .to_irq = samsung_gpiolib_to_irq,
+ },
+ }, {
+ .base = (S5P_VA_GPIO + 0xC60),
+ .irq_base = IRQ_EINT(24),
+ .chip = {
+ .base = S5PC100_GPH3(0),
+ .ngpio = S5PC100_GPIO_H3_NR,
+ .label = "GPH3",
+ .to_irq = samsung_gpiolib_to_irq,
+ },
+ },
+#endif
+};
+
+/*
+ * Followings are the gpio banks in S5PV210/S5PC110
+ *
+ * The 'config' member when left to NULL, is initialized to the default
+ * structure samsung_gpio_cfgs[3] in the init function below.
+ *
+ * The 'base' member is also initialized in the init function below.
+ * Note: The initialization of 'base' member of samsung_gpio_chip structure
+ * uses the above macro and depends on the banks being listed in order here.
+ */
+
+static struct samsung_gpio_chip s5pv210_gpios_4bit[] = {
+#ifdef CONFIG_CPU_S5PV210
+ {
+ .chip = {
+ .base = S5PV210_GPA0(0),
+ .ngpio = S5PV210_GPIO_A0_NR,
+ .label = "GPA0",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_GPA1(0),
+ .ngpio = S5PV210_GPIO_A1_NR,
+ .label = "GPA1",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_GPB(0),
+ .ngpio = S5PV210_GPIO_B_NR,
+ .label = "GPB",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_GPC0(0),
+ .ngpio = S5PV210_GPIO_C0_NR,
+ .label = "GPC0",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_GPC1(0),
+ .ngpio = S5PV210_GPIO_C1_NR,
+ .label = "GPC1",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_GPD0(0),
+ .ngpio = S5PV210_GPIO_D0_NR,
+ .label = "GPD0",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_GPD1(0),
+ .ngpio = S5PV210_GPIO_D1_NR,
+ .label = "GPD1",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_GPE0(0),
+ .ngpio = S5PV210_GPIO_E0_NR,
+ .label = "GPE0",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_GPE1(0),
+ .ngpio = S5PV210_GPIO_E1_NR,
+ .label = "GPE1",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_GPF0(0),
+ .ngpio = S5PV210_GPIO_F0_NR,
+ .label = "GPF0",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_GPF1(0),
+ .ngpio = S5PV210_GPIO_F1_NR,
+ .label = "GPF1",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_GPF2(0),
+ .ngpio = S5PV210_GPIO_F2_NR,
+ .label = "GPF2",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_GPF3(0),
+ .ngpio = S5PV210_GPIO_F3_NR,
+ .label = "GPF3",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_GPG0(0),
+ .ngpio = S5PV210_GPIO_G0_NR,
+ .label = "GPG0",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_GPG1(0),
+ .ngpio = S5PV210_GPIO_G1_NR,
+ .label = "GPG1",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_GPG2(0),
+ .ngpio = S5PV210_GPIO_G2_NR,
+ .label = "GPG2",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_GPG3(0),
+ .ngpio = S5PV210_GPIO_G3_NR,
+ .label = "GPG3",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_GPI(0),
+ .ngpio = S5PV210_GPIO_I_NR,
+ .label = "GPI",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_GPJ0(0),
+ .ngpio = S5PV210_GPIO_J0_NR,
+ .label = "GPJ0",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_GPJ1(0),
+ .ngpio = S5PV210_GPIO_J1_NR,
+ .label = "GPJ1",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_GPJ2(0),
+ .ngpio = S5PV210_GPIO_J2_NR,
+ .label = "GPJ2",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_GPJ3(0),
+ .ngpio = S5PV210_GPIO_J3_NR,
+ .label = "GPJ3",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_GPJ4(0),
+ .ngpio = S5PV210_GPIO_J4_NR,
+ .label = "GPJ4",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_MP01(0),
+ .ngpio = S5PV210_GPIO_MP01_NR,
+ .label = "MP01",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_MP02(0),
+ .ngpio = S5PV210_GPIO_MP02_NR,
+ .label = "MP02",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_MP03(0),
+ .ngpio = S5PV210_GPIO_MP03_NR,
+ .label = "MP03",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_MP04(0),
+ .ngpio = S5PV210_GPIO_MP04_NR,
+ .label = "MP04",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_MP05(0),
+ .ngpio = S5PV210_GPIO_MP05_NR,
+ .label = "MP05",
+ },
+ }, {
+ .base = (S5P_VA_GPIO + 0xC00),
+ .irq_base = IRQ_EINT(0),
+ .chip = {
+ .base = S5PV210_GPH0(0),
+ .ngpio = S5PV210_GPIO_H0_NR,
+ .label = "GPH0",
+ .to_irq = samsung_gpiolib_to_irq,
+ },
+ }, {
+ .base = (S5P_VA_GPIO + 0xC20),
+ .irq_base = IRQ_EINT(8),
+ .chip = {
+ .base = S5PV210_GPH1(0),
+ .ngpio = S5PV210_GPIO_H1_NR,
+ .label = "GPH1",
+ .to_irq = samsung_gpiolib_to_irq,
+ },
+ }, {
+ .base = (S5P_VA_GPIO + 0xC40),
+ .irq_base = IRQ_EINT(16),
+ .chip = {
+ .base = S5PV210_GPH2(0),
+ .ngpio = S5PV210_GPIO_H2_NR,
+ .label = "GPH2",
+ .to_irq = samsung_gpiolib_to_irq,
+ },
+ }, {
+ .base = (S5P_VA_GPIO + 0xC60),
+ .irq_base = IRQ_EINT(24),
+ .chip = {
+ .base = S5PV210_GPH3(0),
+ .ngpio = S5PV210_GPIO_H3_NR,
+ .label = "GPH3",
+ .to_irq = samsung_gpiolib_to_irq,
+ },
+ },
+#endif
+};
+
+/*
+ * Followings are the gpio banks in EXYNOS4210
+ *
+ * The 'config' member when left to NULL, is initialized to the default
+ * structure samsung_gpio_cfgs[3] in the init function below.
+ *
+ * The 'base' member is also initialized in the init function below.
+ * Note: The initialization of 'base' member of samsung_gpio_chip structure
+ * uses the above macro and depends on the banks being listed in order here.
+ */
+
+static struct samsung_gpio_chip exynos4_gpios_1[] = {
+#ifdef CONFIG_ARCH_EXYNOS4
+ {
+ .chip = {
+ .base = EXYNOS4_GPA0(0),
+ .ngpio = EXYNOS4_GPIO_A0_NR,
+ .label = "GPA0",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS4_GPA1(0),
+ .ngpio = EXYNOS4_GPIO_A1_NR,
+ .label = "GPA1",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS4_GPB(0),
+ .ngpio = EXYNOS4_GPIO_B_NR,
+ .label = "GPB",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS4_GPC0(0),
+ .ngpio = EXYNOS4_GPIO_C0_NR,
+ .label = "GPC0",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS4_GPC1(0),
+ .ngpio = EXYNOS4_GPIO_C1_NR,
+ .label = "GPC1",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS4_GPD0(0),
+ .ngpio = EXYNOS4_GPIO_D0_NR,
+ .label = "GPD0",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS4_GPD1(0),
+ .ngpio = EXYNOS4_GPIO_D1_NR,
+ .label = "GPD1",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS4_GPE0(0),
+ .ngpio = EXYNOS4_GPIO_E0_NR,
+ .label = "GPE0",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS4_GPE1(0),
+ .ngpio = EXYNOS4_GPIO_E1_NR,
+ .label = "GPE1",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS4_GPE2(0),
+ .ngpio = EXYNOS4_GPIO_E2_NR,
+ .label = "GPE2",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS4_GPE3(0),
+ .ngpio = EXYNOS4_GPIO_E3_NR,
+ .label = "GPE3",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS4_GPE4(0),
+ .ngpio = EXYNOS4_GPIO_E4_NR,
+ .label = "GPE4",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS4_GPF0(0),
+ .ngpio = EXYNOS4_GPIO_F0_NR,
+ .label = "GPF0",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS4_GPF1(0),
+ .ngpio = EXYNOS4_GPIO_F1_NR,
+ .label = "GPF1",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS4_GPF2(0),
+ .ngpio = EXYNOS4_GPIO_F2_NR,
+ .label = "GPF2",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS4_GPF3(0),
+ .ngpio = EXYNOS4_GPIO_F3_NR,
+ .label = "GPF3",
+ },
+ },
+#endif
+};
+
+static struct samsung_gpio_chip exynos4_gpios_2[] = {
+#ifdef CONFIG_ARCH_EXYNOS4
+ {
+ .chip = {
+ .base = EXYNOS4_GPJ0(0),
+ .ngpio = EXYNOS4_GPIO_J0_NR,
+ .label = "GPJ0",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS4_GPJ1(0),
+ .ngpio = EXYNOS4_GPIO_J1_NR,
+ .label = "GPJ1",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS4_GPK0(0),
+ .ngpio = EXYNOS4_GPIO_K0_NR,
+ .label = "GPK0",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS4_GPK1(0),
+ .ngpio = EXYNOS4_GPIO_K1_NR,
+ .label = "GPK1",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS4_GPK2(0),
+ .ngpio = EXYNOS4_GPIO_K2_NR,
+ .label = "GPK2",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS4_GPK3(0),
+ .ngpio = EXYNOS4_GPIO_K3_NR,
+ .label = "GPK3",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS4_GPL0(0),
+ .ngpio = EXYNOS4_GPIO_L0_NR,
+ .label = "GPL0",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS4_GPL1(0),
+ .ngpio = EXYNOS4_GPIO_L1_NR,
+ .label = "GPL1",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS4_GPL2(0),
+ .ngpio = EXYNOS4_GPIO_L2_NR,
+ .label = "GPL2",
+ },
+ }, {
+ .config = &samsung_gpio_cfgs[8],
+ .chip = {
+ .base = EXYNOS4_GPY0(0),
+ .ngpio = EXYNOS4_GPIO_Y0_NR,
+ .label = "GPY0",
+ },
+ }, {
+ .config = &samsung_gpio_cfgs[8],
+ .chip = {
+ .base = EXYNOS4_GPY1(0),
+ .ngpio = EXYNOS4_GPIO_Y1_NR,
+ .label = "GPY1",
+ },
+ }, {
+ .config = &samsung_gpio_cfgs[8],
+ .chip = {
+ .base = EXYNOS4_GPY2(0),
+ .ngpio = EXYNOS4_GPIO_Y2_NR,
+ .label = "GPY2",
+ },
+ }, {
+ .config = &samsung_gpio_cfgs[8],
+ .chip = {
+ .base = EXYNOS4_GPY3(0),
+ .ngpio = EXYNOS4_GPIO_Y3_NR,
+ .label = "GPY3",
+ },
+ }, {
+ .config = &samsung_gpio_cfgs[8],
+ .chip = {
+ .base = EXYNOS4_GPY4(0),
+ .ngpio = EXYNOS4_GPIO_Y4_NR,
+ .label = "GPY4",
+ },
+ }, {
+ .config = &samsung_gpio_cfgs[8],
+ .chip = {
+ .base = EXYNOS4_GPY5(0),
+ .ngpio = EXYNOS4_GPIO_Y5_NR,
+ .label = "GPY5",
+ },
+ }, {
+ .config = &samsung_gpio_cfgs[8],
+ .chip = {
+ .base = EXYNOS4_GPY6(0),
+ .ngpio = EXYNOS4_GPIO_Y6_NR,
+ .label = "GPY6",
+ },
+ }, {
+ .base = (S5P_VA_GPIO2 + 0xC00),
+ .config = &samsung_gpio_cfgs[9],
+ .irq_base = IRQ_EINT(0),
+ .chip = {
+ .base = EXYNOS4_GPX0(0),
+ .ngpio = EXYNOS4_GPIO_X0_NR,
+ .label = "GPX0",
+ .to_irq = samsung_gpiolib_to_irq,
+ },
+ }, {
+ .base = (S5P_VA_GPIO2 + 0xC20),
+ .config = &samsung_gpio_cfgs[9],
+ .irq_base = IRQ_EINT(8),
+ .chip = {
+ .base = EXYNOS4_GPX1(0),
+ .ngpio = EXYNOS4_GPIO_X1_NR,
+ .label = "GPX1",
+ .to_irq = samsung_gpiolib_to_irq,
+ },
+ }, {
+ .base = (S5P_VA_GPIO2 + 0xC40),
+ .config = &samsung_gpio_cfgs[9],
+ .irq_base = IRQ_EINT(16),
+ .chip = {
+ .base = EXYNOS4_GPX2(0),
+ .ngpio = EXYNOS4_GPIO_X2_NR,
+ .label = "GPX2",
+ .to_irq = samsung_gpiolib_to_irq,
+ },
+ }, {
+ .base = (S5P_VA_GPIO2 + 0xC60),
+ .config = &samsung_gpio_cfgs[9],
+ .irq_base = IRQ_EINT(24),
+ .chip = {
+ .base = EXYNOS4_GPX3(0),
+ .ngpio = EXYNOS4_GPIO_X3_NR,
+ .label = "GPX3",
+ .to_irq = samsung_gpiolib_to_irq,
+ },
+ },
+#endif
+};
+
+static struct samsung_gpio_chip exynos4_gpios_3[] = {
+#ifdef CONFIG_ARCH_EXYNOS4
+ {
+ .chip = {
+ .base = EXYNOS4_GPZ(0),
+ .ngpio = EXYNOS4_GPIO_Z_NR,
+ .label = "GPZ",
+ },
+ },
+#endif
+};
+
+/* TODO: cleanup soc_is_* */
+static __init int samsung_gpiolib_init(void)
+{
+ struct samsung_gpio_chip *chip;
+ int i, nr_chips;
+ int group = 0;
+
+ samsung_gpiolib_set_cfg(samsung_gpio_cfgs, ARRAY_SIZE(samsung_gpio_cfgs));
+
+ if (soc_is_s3c24xx()) {
+ s3c24xx_gpiolib_add_chips(s3c24xx_gpios,
+ ARRAY_SIZE(s3c24xx_gpios), S3C24XX_VA_GPIO);
+ } else if (soc_is_s3c64xx()) {
+ samsung_gpiolib_add_2bit_chips(s3c64xx_gpios_2bit,
+ ARRAY_SIZE(s3c64xx_gpios_2bit),
+ S3C64XX_VA_GPIO + 0xE0, 0x20);
+ samsung_gpiolib_add_4bit_chips(s3c64xx_gpios_4bit,
+ ARRAY_SIZE(s3c64xx_gpios_4bit),
+ S3C64XX_VA_GPIO);
+ samsung_gpiolib_add_4bit2_chips(s3c64xx_gpios_4bit2,
+ ARRAY_SIZE(s3c64xx_gpios_4bit2));
+ } else if (soc_is_s5p6440()) {
+ samsung_gpiolib_add_2bit_chips(s5p6440_gpios_2bit,
+ ARRAY_SIZE(s5p6440_gpios_2bit), NULL, 0x0);
+ samsung_gpiolib_add_4bit_chips(s5p6440_gpios_4bit,
+ ARRAY_SIZE(s5p6440_gpios_4bit), S5P_VA_GPIO);
+ samsung_gpiolib_add_4bit2_chips(s5p6440_gpios_4bit2,
+ ARRAY_SIZE(s5p6440_gpios_4bit2));
+ s5p64x0_gpiolib_add_rbank(s5p6440_gpios_rbank,
+ ARRAY_SIZE(s5p6440_gpios_rbank));
+ } else if (soc_is_s5p6450()) {
+ samsung_gpiolib_add_2bit_chips(s5p6450_gpios_2bit,
+ ARRAY_SIZE(s5p6450_gpios_2bit), NULL, 0x0);
+ samsung_gpiolib_add_4bit_chips(s5p6450_gpios_4bit,
+ ARRAY_SIZE(s5p6450_gpios_4bit), S5P_VA_GPIO);
+ samsung_gpiolib_add_4bit2_chips(s5p6450_gpios_4bit2,
+ ARRAY_SIZE(s5p6450_gpios_4bit2));
+ s5p64x0_gpiolib_add_rbank(s5p6450_gpios_rbank,
+ ARRAY_SIZE(s5p6450_gpios_rbank));
+ } else if (soc_is_s5pc100()) {
+ group = 0;
+ chip = s5pc100_gpios_4bit;
+ nr_chips = ARRAY_SIZE(s5pc100_gpios_4bit);
+
+ for (i = 0; i < nr_chips; i++, chip++) {
+ if (!chip->config) {
+ chip->config = &samsung_gpio_cfgs[3];
+ chip->group = group++;
+ }
+ }
+ samsung_gpiolib_add_4bit_chips(s5pc100_gpios_4bit, nr_chips, S5P_VA_GPIO);
+#if defined(CONFIG_CPU_S5PC100) && defined(CONFIG_S5P_GPIO_INT)
+ s5p_register_gpioint_bank(IRQ_GPIOINT, 0, S5P_GPIOINT_GROUP_MAXNR);
+#endif
+ } else if (soc_is_s5pv210()) {
+ group = 0;
+ chip = s5pv210_gpios_4bit;
+ nr_chips = ARRAY_SIZE(s5pv210_gpios_4bit);
+
+ for (i = 0; i < nr_chips; i++, chip++) {
+ if (!chip->config) {
+ chip->config = &samsung_gpio_cfgs[3];
+ chip->group = group++;
+ }
+ }
+ samsung_gpiolib_add_4bit_chips(s5pv210_gpios_4bit, nr_chips, S5P_VA_GPIO);
+#if defined(CONFIG_CPU_S5PV210) && defined(CONFIG_S5P_GPIO_INT)
+ s5p_register_gpioint_bank(IRQ_GPIOINT, 0, S5P_GPIOINT_GROUP_MAXNR);
+#endif
+ } else if (soc_is_exynos4210()) {
+ group = 0;
+
+ /* gpio part1 */
+ chip = exynos4_gpios_1;
+ nr_chips = ARRAY_SIZE(exynos4_gpios_1);
+
+ for (i = 0; i < nr_chips; i++, chip++) {
+ if (!chip->config) {
+ chip->config = &exynos4_gpio_cfg;
+ chip->group = group++;
+ }
+ }
+ samsung_gpiolib_add_4bit_chips(exynos4_gpios_1, nr_chips, S5P_VA_GPIO1);
+
+ /* gpio part2 */
+ chip = exynos4_gpios_2;
+ nr_chips = ARRAY_SIZE(exynos4_gpios_2);
+
+ for (i = 0; i < nr_chips; i++, chip++) {
+ if (!chip->config) {
+ chip->config = &exynos4_gpio_cfg;
+ chip->group = group++;
+ }
+ }
+ samsung_gpiolib_add_4bit_chips(exynos4_gpios_2, nr_chips, S5P_VA_GPIO2);
+
+ /* gpio part3 */
+ chip = exynos4_gpios_3;
+ nr_chips = ARRAY_SIZE(exynos4_gpios_3);
+
+ for (i = 0; i < nr_chips; i++, chip++) {
+ if (!chip->config) {
+ chip->config = &exynos4_gpio_cfg;
+ chip->group = group++;
+ }
+ }
+ samsung_gpiolib_add_4bit_chips(exynos4_gpios_3, nr_chips, S5P_VA_GPIO3);
+
+#if defined(CONFIG_CPU_EXYNOS4210) && defined(CONFIG_S5P_GPIO_INT)
+ s5p_register_gpioint_bank(IRQ_GPIO_XA, 0, IRQ_GPIO1_NR_GROUPS);
+ s5p_register_gpioint_bank(IRQ_GPIO_XB, IRQ_GPIO1_NR_GROUPS, IRQ_GPIO2_NR_GROUPS);
+#endif
+ } else {
+ WARN(1, "Unknown SoC in gpio-samsung, no GPIOs added\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+core_initcall(samsung_gpiolib_init);
+
+int s3c_gpio_cfgpin(unsigned int pin, unsigned int config)
+{
+ struct samsung_gpio_chip *chip = samsung_gpiolib_getchip(pin);
+ unsigned long flags;
+ int offset;
+ int ret;
+
+ if (!chip)
+ return -EINVAL;
+
+ offset = pin - chip->chip.base;
+
+ samsung_gpio_lock(chip, flags);
+ ret = samsung_gpio_do_setcfg(chip, offset, config);
+ samsung_gpio_unlock(chip, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(s3c_gpio_cfgpin);
+
+int s3c_gpio_cfgpin_range(unsigned int start, unsigned int nr,
+ unsigned int cfg)
+{
+ int ret;
+
+ for (; nr > 0; nr--, start++) {
+ ret = s3c_gpio_cfgpin(start, cfg);
+ if (ret != 0)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(s3c_gpio_cfgpin_range);
+
+int s3c_gpio_cfgall_range(unsigned int start, unsigned int nr,
+ unsigned int cfg, samsung_gpio_pull_t pull)
+{
+ int ret;
+
+ for (; nr > 0; nr--, start++) {
+ s3c_gpio_setpull(start, pull);
+ ret = s3c_gpio_cfgpin(start, cfg);
+ if (ret != 0)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(s3c_gpio_cfgall_range);
+
+unsigned s3c_gpio_getcfg(unsigned int pin)
+{
+ struct samsung_gpio_chip *chip = samsung_gpiolib_getchip(pin);
+ unsigned long flags;
+ unsigned ret = 0;
+ int offset;
+
+ if (chip) {
+ offset = pin - chip->chip.base;
+
+ samsung_gpio_lock(chip, flags);
+ ret = samsung_gpio_do_getcfg(chip, offset);
+ samsung_gpio_unlock(chip, flags);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(s3c_gpio_getcfg);
+
+int s3c_gpio_setpull(unsigned int pin, samsung_gpio_pull_t pull)
+{
+ struct samsung_gpio_chip *chip = samsung_gpiolib_getchip(pin);
+ unsigned long flags;
+ int offset, ret;
+
+ if (!chip)
+ return -EINVAL;
+
+ offset = pin - chip->chip.base;
+
+ samsung_gpio_lock(chip, flags);
+ ret = samsung_gpio_do_setpull(chip, offset, pull);
+ samsung_gpio_unlock(chip, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(s3c_gpio_setpull);
+
+samsung_gpio_pull_t s3c_gpio_getpull(unsigned int pin)
+{
+ struct samsung_gpio_chip *chip = samsung_gpiolib_getchip(pin);
+ unsigned long flags;
+ int offset;
+ u32 pup = 0;
+
+ if (chip) {
+ offset = pin - chip->chip.base;
+
+ samsung_gpio_lock(chip, flags);
+ pup = samsung_gpio_do_getpull(chip, offset);
+ samsung_gpio_unlock(chip, flags);
+ }
+
+ return (__force samsung_gpio_pull_t)pup;
+}
+EXPORT_SYMBOL(s3c_gpio_getpull);
+
+/* gpiolib wrappers until these are totally eliminated */
+
+void s3c2410_gpio_pullup(unsigned int pin, unsigned int to)
+{
+ int ret;
+
+ WARN_ON(to); /* should be none of these left */
+
+ if (!to) {
+ /* if pull is enabled, try first with up, and if that
+ * fails, try using down */
+
+ ret = s3c_gpio_setpull(pin, S3C_GPIO_PULL_UP);
+ if (ret)
+ s3c_gpio_setpull(pin, S3C_GPIO_PULL_DOWN);
+ } else {
+ s3c_gpio_setpull(pin, S3C_GPIO_PULL_NONE);
+ }
+}
+EXPORT_SYMBOL(s3c2410_gpio_pullup);
+
+void s3c2410_gpio_setpin(unsigned int pin, unsigned int to)
+{
+ /* do this via gpiolib until all users removed */
+
+ gpio_request(pin, "temporary");
+ gpio_set_value(pin, to);
+ gpio_free(pin);
+}
+EXPORT_SYMBOL(s3c2410_gpio_setpin);
+
+unsigned int s3c2410_gpio_getpin(unsigned int pin)
+{
+ struct samsung_gpio_chip *chip = samsung_gpiolib_getchip(pin);
+ unsigned long offs = pin - chip->chip.base;
+
+ return __raw_readl(chip->base + 0x04) & (1 << offs);
+}
+EXPORT_SYMBOL(s3c2410_gpio_getpin);
+
+#ifdef CONFIG_S5P_GPIO_DRVSTR
+s5p_gpio_drvstr_t s5p_gpio_get_drvstr(unsigned int pin)
+{
+ struct samsung_gpio_chip *chip = samsung_gpiolib_getchip(pin);
+ unsigned int off;
+ void __iomem *reg;
+ int shift;
+ u32 drvstr;
+
+ if (!chip)
+ return -EINVAL;
+
+ off = pin - chip->chip.base;
+ shift = off * 2;
+ reg = chip->base + 0x0C;
+
+ drvstr = __raw_readl(reg);
+ drvstr = drvstr >> shift;
+ drvstr &= 0x3;
+
+ return (__force s5p_gpio_drvstr_t)drvstr;
+}
+EXPORT_SYMBOL(s5p_gpio_get_drvstr);
+
+int s5p_gpio_set_drvstr(unsigned int pin, s5p_gpio_drvstr_t drvstr)
+{
+ struct samsung_gpio_chip *chip = samsung_gpiolib_getchip(pin);
+ unsigned int off;
+ void __iomem *reg;
+ int shift;
+ u32 tmp;
+
+ if (!chip)
+ return -EINVAL;
+
+ off = pin - chip->chip.base;
+ shift = off * 2;
+ reg = chip->base + 0x0C;
+
+ tmp = __raw_readl(reg);
+ tmp &= ~(0x3 << shift);
+ tmp |= drvstr << shift;
+
+ __raw_writel(tmp, reg);
+
+ return 0;
+}
+EXPORT_SYMBOL(s5p_gpio_set_drvstr);
+#endif /* CONFIG_S5P_GPIO_DRVSTR */
+
+#ifdef CONFIG_PLAT_S3C24XX
+unsigned int s3c2410_modify_misccr(unsigned int clear, unsigned int change)
+{
+ unsigned long flags;
+ unsigned long misccr;
+
+ local_irq_save(flags);
+ misccr = __raw_readl(S3C24XX_MISCCR);
+ misccr &= ~clear;
+ misccr ^= change;
+ __raw_writel(misccr, S3C24XX_MISCCR);
+ local_irq_restore(flags);
+
+ return misccr;
+}
+EXPORT_SYMBOL(s3c2410_modify_misccr);
+#endif
diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c
index 747eb40e8af..61044c889f7 100644
--- a/drivers/gpio/gpio-tegra.c
+++ b/drivers/gpio/gpio-tegra.c
@@ -20,13 +20,15 @@
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
-
#include <linux/io.h>
#include <linux/gpio.h>
#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
#include <asm/mach/irq.h>
+#include <mach/gpio-tegra.h>
#include <mach/iomap.h>
#include <mach/suspend.h>
@@ -34,9 +36,7 @@
#define GPIO_PORT(x) (((x) >> 3) & 0x3)
#define GPIO_BIT(x) ((x) & 0x7)
-#define GPIO_REG(x) (IO_TO_VIRT(TEGRA_GPIO_BASE) + \
- GPIO_BANK(x) * 0x80 + \
- GPIO_PORT(x) * 4)
+#define GPIO_REG(x) (GPIO_BANK(x) * 0x80 + GPIO_PORT(x) * 4)
#define GPIO_CNF(x) (GPIO_REG(x) + 0x00)
#define GPIO_OE(x) (GPIO_REG(x) + 0x10)
@@ -75,15 +75,18 @@ struct tegra_gpio_bank {
};
-static struct tegra_gpio_bank tegra_gpio_banks[] = {
- {.bank = 0, .irq = INT_GPIO1},
- {.bank = 1, .irq = INT_GPIO2},
- {.bank = 2, .irq = INT_GPIO3},
- {.bank = 3, .irq = INT_GPIO4},
- {.bank = 4, .irq = INT_GPIO5},
- {.bank = 5, .irq = INT_GPIO6},
- {.bank = 6, .irq = INT_GPIO7},
-};
+static void __iomem *regs;
+static struct tegra_gpio_bank tegra_gpio_banks[7];
+
+static inline void tegra_gpio_writel(u32 val, u32 reg)
+{
+ __raw_writel(val, regs + reg);
+}
+
+static inline u32 tegra_gpio_readl(u32 reg)
+{
+ return __raw_readl(regs + reg);
+}
static int tegra_gpio_compose(int bank, int port, int bit)
{
@@ -97,7 +100,7 @@ static void tegra_gpio_mask_write(u32 reg, int gpio, int value)
val = 0x100 << GPIO_BIT(gpio);
if (value)
val |= 1 << GPIO_BIT(gpio);
- __raw_writel(val, reg);
+ tegra_gpio_writel(val, reg);
}
void tegra_gpio_enable(int gpio)
@@ -117,7 +120,7 @@ static void tegra_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
static int tegra_gpio_get(struct gpio_chip *chip, unsigned offset)
{
- return (__raw_readl(GPIO_IN(offset)) >> GPIO_BIT(offset)) & 0x1;
+ return (tegra_gpio_readl(GPIO_IN(offset)) >> GPIO_BIT(offset)) & 0x1;
}
static int tegra_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
@@ -134,7 +137,10 @@ static int tegra_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
return 0;
}
-
+static int tegra_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+ return TEGRA_GPIO_TO_IRQ(offset);
+}
static struct gpio_chip tegra_gpio_chip = {
.label = "tegra-gpio",
@@ -142,6 +148,7 @@ static struct gpio_chip tegra_gpio_chip = {
.get = tegra_gpio_get,
.direction_output = tegra_gpio_direction_output,
.set = tegra_gpio_set,
+ .to_irq = tegra_gpio_to_irq,
.base = 0,
.ngpio = TEGRA_NR_GPIOS,
};
@@ -150,7 +157,7 @@ static void tegra_gpio_irq_ack(struct irq_data *d)
{
int gpio = d->irq - INT_GPIO_BASE;
- __raw_writel(1 << GPIO_BIT(gpio), GPIO_INT_CLR(gpio));
+ tegra_gpio_writel(1 << GPIO_BIT(gpio), GPIO_INT_CLR(gpio));
}
static void tegra_gpio_irq_mask(struct irq_data *d)
@@ -203,10 +210,10 @@ static int tegra_gpio_irq_set_type(struct irq_data *d, unsigned int type)
spin_lock_irqsave(&bank->lvl_lock[port], flags);
- val = __raw_readl(GPIO_INT_LVL(gpio));
+ val = tegra_gpio_readl(GPIO_INT_LVL(gpio));
val &= ~(GPIO_INT_LVL_MASK << GPIO_BIT(gpio));
val |= lvl_type << GPIO_BIT(gpio);
- __raw_writel(val, GPIO_INT_LVL(gpio));
+ tegra_gpio_writel(val, GPIO_INT_LVL(gpio));
spin_unlock_irqrestore(&bank->lvl_lock[port], flags);
@@ -232,12 +239,12 @@ static void tegra_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
for (port = 0; port < 4; port++) {
int gpio = tegra_gpio_compose(bank->bank, port, 0);
- unsigned long sta = __raw_readl(GPIO_INT_STA(gpio)) &
- __raw_readl(GPIO_INT_ENB(gpio));
- u32 lvl = __raw_readl(GPIO_INT_LVL(gpio));
+ unsigned long sta = tegra_gpio_readl(GPIO_INT_STA(gpio)) &
+ tegra_gpio_readl(GPIO_INT_ENB(gpio));
+ u32 lvl = tegra_gpio_readl(GPIO_INT_LVL(gpio));
for_each_set_bit(pin, &sta, 8) {
- __raw_writel(1 << pin, GPIO_INT_CLR(gpio));
+ tegra_gpio_writel(1 << pin, GPIO_INT_CLR(gpio));
/* if gpio is edge triggered, clear condition
* before executing the hander so that we don't
@@ -271,11 +278,11 @@ void tegra_gpio_resume(void)
for (p = 0; p < ARRAY_SIZE(bank->oe); p++) {
unsigned int gpio = (b<<5) | (p<<3);
- __raw_writel(bank->cnf[p], GPIO_CNF(gpio));
- __raw_writel(bank->out[p], GPIO_OUT(gpio));
- __raw_writel(bank->oe[p], GPIO_OE(gpio));
- __raw_writel(bank->int_lvl[p], GPIO_INT_LVL(gpio));
- __raw_writel(bank->int_enb[p], GPIO_INT_ENB(gpio));
+ tegra_gpio_writel(bank->cnf[p], GPIO_CNF(gpio));
+ tegra_gpio_writel(bank->out[p], GPIO_OUT(gpio));
+ tegra_gpio_writel(bank->oe[p], GPIO_OE(gpio));
+ tegra_gpio_writel(bank->int_lvl[p], GPIO_INT_LVL(gpio));
+ tegra_gpio_writel(bank->int_enb[p], GPIO_INT_ENB(gpio));
}
}
@@ -294,11 +301,11 @@ void tegra_gpio_suspend(void)
for (p = 0; p < ARRAY_SIZE(bank->oe); p++) {
unsigned int gpio = (b<<5) | (p<<3);
- bank->cnf[p] = __raw_readl(GPIO_CNF(gpio));
- bank->out[p] = __raw_readl(GPIO_OUT(gpio));
- bank->oe[p] = __raw_readl(GPIO_OE(gpio));
- bank->int_enb[p] = __raw_readl(GPIO_INT_ENB(gpio));
- bank->int_lvl[p] = __raw_readl(GPIO_INT_LVL(gpio));
+ bank->cnf[p] = tegra_gpio_readl(GPIO_CNF(gpio));
+ bank->out[p] = tegra_gpio_readl(GPIO_OUT(gpio));
+ bank->oe[p] = tegra_gpio_readl(GPIO_OE(gpio));
+ bank->int_enb[p] = tegra_gpio_readl(GPIO_INT_ENB(gpio));
+ bank->int_lvl[p] = tegra_gpio_readl(GPIO_INT_LVL(gpio));
}
}
local_irq_restore(flags);
@@ -328,38 +335,69 @@ static struct irq_chip tegra_gpio_irq_chip = {
*/
static struct lock_class_key gpio_lock_class;
-static int __init tegra_gpio_init(void)
+static int __devinit tegra_gpio_probe(struct platform_device *pdev)
{
+ struct resource *res;
struct tegra_gpio_bank *bank;
+ int gpio;
int i;
int j;
+ for (i = 0; i < ARRAY_SIZE(tegra_gpio_banks); i++) {
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
+ if (!res) {
+ dev_err(&pdev->dev, "Missing IRQ resource\n");
+ return -ENODEV;
+ }
+
+ bank = &tegra_gpio_banks[i];
+ bank->bank = i;
+ bank->irq = res->start;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "Missing MEM resource\n");
+ return -ENODEV;
+ }
+
+ if (!devm_request_mem_region(&pdev->dev, res->start,
+ resource_size(res),
+ dev_name(&pdev->dev))) {
+ dev_err(&pdev->dev, "Couldn't request MEM resource\n");
+ return -ENODEV;
+ }
+
+ regs = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (!regs) {
+ dev_err(&pdev->dev, "Couldn't ioremap regs\n");
+ return -ENODEV;
+ }
+
for (i = 0; i < 7; i++) {
for (j = 0; j < 4; j++) {
int gpio = tegra_gpio_compose(i, j, 0);
- __raw_writel(0x00, GPIO_INT_ENB(gpio));
+ tegra_gpio_writel(0x00, GPIO_INT_ENB(gpio));
}
}
#ifdef CONFIG_OF_GPIO
- /*
- * This isn't ideal, but it gets things hooked up until this
- * driver is converted into a platform_device
- */
- tegra_gpio_chip.of_node = of_find_compatible_node(NULL, NULL,
- "nvidia,tegra20-gpio");
-#endif /* CONFIG_OF_GPIO */
+ tegra_gpio_chip.of_node = pdev->dev.of_node;
+#endif
gpiochip_add(&tegra_gpio_chip);
- for (i = INT_GPIO_BASE; i < (INT_GPIO_BASE + TEGRA_NR_GPIOS); i++) {
- bank = &tegra_gpio_banks[GPIO_BANK(irq_to_gpio(i))];
+ for (gpio = 0; gpio < TEGRA_NR_GPIOS; gpio++) {
+ int irq = TEGRA_GPIO_TO_IRQ(gpio);
+ /* No validity check; all Tegra GPIOs are valid IRQs */
- irq_set_lockdep_class(i, &gpio_lock_class);
- irq_set_chip_data(i, bank);
- irq_set_chip_and_handler(i, &tegra_gpio_irq_chip,
+ bank = &tegra_gpio_banks[GPIO_BANK(gpio)];
+
+ irq_set_lockdep_class(irq, &gpio_lock_class);
+ irq_set_chip_data(irq, bank);
+ irq_set_chip_and_handler(irq, &tegra_gpio_irq_chip,
handle_simple_irq);
- set_irq_flags(i, IRQF_VALID);
+ set_irq_flags(irq, IRQF_VALID);
}
for (i = 0; i < ARRAY_SIZE(tegra_gpio_banks); i++) {
@@ -375,6 +413,24 @@ static int __init tegra_gpio_init(void)
return 0;
}
+static struct of_device_id tegra_gpio_of_match[] __devinitdata = {
+ { .compatible = "nvidia,tegra20-gpio", },
+ { },
+};
+
+static struct platform_driver tegra_gpio_driver = {
+ .driver = {
+ .name = "tegra-gpio",
+ .owner = THIS_MODULE,
+ .of_match_table = tegra_gpio_of_match,
+ },
+ .probe = tegra_gpio_probe,
+};
+
+static int __init tegra_gpio_init(void)
+{
+ return platform_driver_register(&tegra_gpio_driver);
+}
postcore_initcall(tegra_gpio_init);
void __init tegra_gpio_config(struct tegra_gpio_table *table, int num)
@@ -407,13 +463,13 @@ static int dbg_gpio_show(struct seq_file *s, void *unused)
seq_printf(s,
"%d:%d %02x %02x %02x %02x %02x %02x %06x\n",
i, j,
- __raw_readl(GPIO_CNF(gpio)),
- __raw_readl(GPIO_OE(gpio)),
- __raw_readl(GPIO_OUT(gpio)),
- __raw_readl(GPIO_IN(gpio)),
- __raw_readl(GPIO_INT_STA(gpio)),
- __raw_readl(GPIO_INT_ENB(gpio)),
- __raw_readl(GPIO_INT_LVL(gpio)));
+ tegra_gpio_readl(GPIO_CNF(gpio)),
+ tegra_gpio_readl(GPIO_OE(gpio)),
+ tegra_gpio_readl(GPIO_OUT(gpio)),
+ tegra_gpio_readl(GPIO_IN(gpio)),
+ tegra_gpio_readl(GPIO_INT_STA(gpio)),
+ tegra_gpio_readl(GPIO_INT_ENB(gpio)),
+ tegra_gpio_readl(GPIO_INT_LVL(gpio)));
}
}
return 0;
diff --git a/drivers/gpio/gpio-tnetv107x.c b/drivers/gpio/gpio-tnetv107x.c
new file mode 100644
index 00000000000..3fa3e2867e1
--- /dev/null
+++ b/drivers/gpio/gpio-tnetv107x.c
@@ -0,0 +1,205 @@
+/*
+ * Texas Instruments TNETV107X GPIO Controller
+ *
+ * Copyright (C) 2010 Texas Instruments
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/gpio.h>
+
+#include <mach/common.h>
+#include <mach/tnetv107x.h>
+
+struct tnetv107x_gpio_regs {
+ u32 idver;
+ u32 data_in[3];
+ u32 data_out[3];
+ u32 direction[3];
+ u32 enable[3];
+};
+
+#define gpio_reg_index(gpio) ((gpio) >> 5)
+#define gpio_reg_bit(gpio) BIT((gpio) & 0x1f)
+
+#define gpio_reg_rmw(reg, mask, val) \
+ __raw_writel((__raw_readl(reg) & ~(mask)) | (val), (reg))
+
+#define gpio_reg_set_bit(reg, gpio) \
+ gpio_reg_rmw((reg) + gpio_reg_index(gpio), 0, gpio_reg_bit(gpio))
+
+#define gpio_reg_clear_bit(reg, gpio) \
+ gpio_reg_rmw((reg) + gpio_reg_index(gpio), gpio_reg_bit(gpio), 0)
+
+#define gpio_reg_get_bit(reg, gpio) \
+ (__raw_readl((reg) + gpio_reg_index(gpio)) & gpio_reg_bit(gpio))
+
+#define chip2controller(chip) \
+ container_of(chip, struct davinci_gpio_controller, chip)
+
+#define TNETV107X_GPIO_CTLRS DIV_ROUND_UP(TNETV107X_N_GPIO, 32)
+
+static struct davinci_gpio_controller chips[TNETV107X_GPIO_CTLRS];
+
+static int tnetv107x_gpio_request(struct gpio_chip *chip, unsigned offset)
+{
+ struct davinci_gpio_controller *ctlr = chip2controller(chip);
+ struct tnetv107x_gpio_regs __iomem *regs = ctlr->regs;
+ unsigned gpio = chip->base + offset;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctlr->lock, flags);
+
+ gpio_reg_set_bit(regs->enable, gpio);
+
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+
+ return 0;
+}
+
+static void tnetv107x_gpio_free(struct gpio_chip *chip, unsigned offset)
+{
+ struct davinci_gpio_controller *ctlr = chip2controller(chip);
+ struct tnetv107x_gpio_regs __iomem *regs = ctlr->regs;
+ unsigned gpio = chip->base + offset;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctlr->lock, flags);
+
+ gpio_reg_clear_bit(regs->enable, gpio);
+
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+}
+
+static int tnetv107x_gpio_dir_in(struct gpio_chip *chip, unsigned offset)
+{
+ struct davinci_gpio_controller *ctlr = chip2controller(chip);
+ struct tnetv107x_gpio_regs __iomem *regs = ctlr->regs;
+ unsigned gpio = chip->base + offset;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctlr->lock, flags);
+
+ gpio_reg_set_bit(regs->direction, gpio);
+
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+
+ return 0;
+}
+
+static int tnetv107x_gpio_dir_out(struct gpio_chip *chip,
+ unsigned offset, int value)
+{
+ struct davinci_gpio_controller *ctlr = chip2controller(chip);
+ struct tnetv107x_gpio_regs __iomem *regs = ctlr->regs;
+ unsigned gpio = chip->base + offset;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctlr->lock, flags);
+
+ if (value)
+ gpio_reg_set_bit(regs->data_out, gpio);
+ else
+ gpio_reg_clear_bit(regs->data_out, gpio);
+
+ gpio_reg_clear_bit(regs->direction, gpio);
+
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+
+ return 0;
+}
+
+static int tnetv107x_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+ struct davinci_gpio_controller *ctlr = chip2controller(chip);
+ struct tnetv107x_gpio_regs __iomem *regs = ctlr->regs;
+ unsigned gpio = chip->base + offset;
+ int ret;
+
+ ret = gpio_reg_get_bit(regs->data_in, gpio);
+
+ return ret ? 1 : 0;
+}
+
+static void tnetv107x_gpio_set(struct gpio_chip *chip,
+ unsigned offset, int value)
+{
+ struct davinci_gpio_controller *ctlr = chip2controller(chip);
+ struct tnetv107x_gpio_regs __iomem *regs = ctlr->regs;
+ unsigned gpio = chip->base + offset;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctlr->lock, flags);
+
+ if (value)
+ gpio_reg_set_bit(regs->data_out, gpio);
+ else
+ gpio_reg_clear_bit(regs->data_out, gpio);
+
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+}
+
+static int __init tnetv107x_gpio_setup(void)
+{
+ int i, base;
+ unsigned ngpio;
+ struct davinci_soc_info *soc_info = &davinci_soc_info;
+ struct tnetv107x_gpio_regs *regs;
+ struct davinci_gpio_controller *ctlr;
+
+ if (soc_info->gpio_type != GPIO_TYPE_TNETV107X)
+ return 0;
+
+ ngpio = soc_info->gpio_num;
+ if (ngpio == 0) {
+ pr_err("GPIO setup: how many GPIOs?\n");
+ return -EINVAL;
+ }
+
+ if (WARN_ON(TNETV107X_N_GPIO < ngpio))
+ ngpio = TNETV107X_N_GPIO;
+
+ regs = ioremap(soc_info->gpio_base, SZ_4K);
+ if (WARN_ON(!regs))
+ return -EINVAL;
+
+ for (i = 0, base = 0; base < ngpio; i++, base += 32) {
+ ctlr = &chips[i];
+
+ ctlr->chip.label = "tnetv107x";
+ ctlr->chip.can_sleep = 0;
+ ctlr->chip.base = base;
+ ctlr->chip.ngpio = ngpio - base;
+ if (ctlr->chip.ngpio > 32)
+ ctlr->chip.ngpio = 32;
+
+ ctlr->chip.request = tnetv107x_gpio_request;
+ ctlr->chip.free = tnetv107x_gpio_free;
+ ctlr->chip.direction_input = tnetv107x_gpio_dir_in;
+ ctlr->chip.get = tnetv107x_gpio_get;
+ ctlr->chip.direction_output = tnetv107x_gpio_dir_out;
+ ctlr->chip.set = tnetv107x_gpio_set;
+
+ spin_lock_init(&ctlr->lock);
+
+ ctlr->regs = regs;
+ ctlr->set_data = &regs->data_out[i];
+ ctlr->clr_data = &regs->data_out[i];
+ ctlr->in_data = &regs->data_in[i];
+
+ gpiochip_add(&ctlr->chip);
+ }
+
+ soc_info->gpio_ctlrs = chips;
+ soc_info->gpio_ctlrs_num = DIV_ROUND_UP(ngpio, 32);
+ return 0;
+}
+pure_initcall(tnetv107x_gpio_setup);
diff --git a/drivers/gpio/gpio-u300.c b/drivers/gpio/gpio-u300.c
index 53e8255cb0b..4035778852b 100644
--- a/drivers/gpio/gpio-u300.c
+++ b/drivers/gpio/gpio-u300.c
@@ -1,18 +1,17 @@
/*
* U300 GPIO module.
*
- * Copyright (C) 2007-2009 ST-Ericsson AB
+ * Copyright (C) 2007-2011 ST-Ericsson AB
* License terms: GNU General Public License (GPL) version 2
* This can driver either of the two basic GPIO cores
* available in the U300 platforms:
* COH 901 335 - Used in DB3150 (U300 1.0) and DB3200 (U330 1.0)
* COH 901 571/3 - Used in DB3210 (U365 2.0) and DB3350 (U335 1.0)
- * Notice that you also have inline macros in <asm-arch/gpio.h>
- * Author: Linus Walleij <linus.walleij@stericsson.com>
+ * Author: Linus Walleij <linus.walleij@linaro.org>
* Author: Jonas Aaberg <jonas.aberg@stericsson.com>
- *
*/
#include <linux/module.h>
+#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/errno.h>
@@ -21,677 +20,898 @@
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/gpio.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <mach/gpio-u300.h>
-/* Reference to GPIO block clock */
-static struct clk *clk;
+/*
+ * Bias modes for U300 GPIOs
+ *
+ * GPIO_U300_CONFIG_BIAS_UNKNOWN: this bias mode is not known to us
+ * GPIO_U300_CONFIG_BIAS_FLOAT: no specific bias, the GPIO will float or state
+ * is not controlled by software
+ * GPIO_U300_CONFIG_BIAS_PULL_UP: the GPIO will be pulled up (usually with high
+ * impedance to VDD)
+ */
+#define GPIO_U300_CONFIG_BIAS_UNKNOWN 0x1000
+#define GPIO_U300_CONFIG_BIAS_FLOAT 0x1001
+#define GPIO_U300_CONFIG_BIAS_PULL_UP 0x1002
-/* Memory resource */
-static struct resource *memres;
-static void __iomem *virtbase;
-static struct device *gpiodev;
+/*
+ * Drive modes for U300 GPIOs (output)
+ *
+ * GPIO_U300_CONFIG_DRIVE_PUSH_PULL: the GPIO will be driven actively high and
+ * low, this is the most typical case and is typically achieved with two
+ * active transistors on the output
+ * GPIO_U300_CONFIG_DRIVE_OPEN_DRAIN: the GPIO will be driven with open drain
+ * (open collector) which means it is usually wired with other output
+ * ports which are then pulled up with an external resistor
+ * GPIO_U300_CONFIG_DRIVE_OPEN_SOURCE: the GPIO will be driven with open drain
+ * (open emitter) which is the same as open drain mutatis mutandis but
+ * pulled to ground
+ */
+#define GPIO_U300_CONFIG_DRIVE_PUSH_PULL 0x2000
+#define GPIO_U300_CONFIG_DRIVE_OPEN_DRAIN 0x2001
+#define GPIO_U300_CONFIG_DRIVE_OPEN_SOURCE 0x2002
+
+/*
+ * Register definitions for COH 901 335 variant
+ */
+#define U300_335_PORT_STRIDE (0x1C)
+/* Port X Pin Data Register 32bit, this is both input and output (R/W) */
+#define U300_335_PXPDIR (0x00)
+#define U300_335_PXPDOR (0x00)
+/* Port X Pin Config Register 32bit (R/W) */
+#define U300_335_PXPCR (0x04)
+/* This register layout is the same in both blocks */
+#define U300_GPIO_PXPCR_ALL_PINS_MODE_MASK (0x0000FFFFUL)
+#define U300_GPIO_PXPCR_PIN_MODE_MASK (0x00000003UL)
+#define U300_GPIO_PXPCR_PIN_MODE_SHIFT (0x00000002UL)
+#define U300_GPIO_PXPCR_PIN_MODE_INPUT (0x00000000UL)
+#define U300_GPIO_PXPCR_PIN_MODE_OUTPUT_PUSH_PULL (0x00000001UL)
+#define U300_GPIO_PXPCR_PIN_MODE_OUTPUT_OPEN_DRAIN (0x00000002UL)
+#define U300_GPIO_PXPCR_PIN_MODE_OUTPUT_OPEN_SOURCE (0x00000003UL)
+/* Port X Interrupt Event Register 32bit (R/W) */
+#define U300_335_PXIEV (0x08)
+/* Port X Interrupt Enable Register 32bit (R/W) */
+#define U300_335_PXIEN (0x0C)
+/* Port X Interrupt Force Register 32bit (R/W) */
+#define U300_335_PXIFR (0x10)
+/* Port X Interrupt Config Register 32bit (R/W) */
+#define U300_335_PXICR (0x14)
+/* This register layout is the same in both blocks */
+#define U300_GPIO_PXICR_ALL_IRQ_CONFIG_MASK (0x000000FFUL)
+#define U300_GPIO_PXICR_IRQ_CONFIG_MASK (0x00000001UL)
+#define U300_GPIO_PXICR_IRQ_CONFIG_FALLING_EDGE (0x00000000UL)
+#define U300_GPIO_PXICR_IRQ_CONFIG_RISING_EDGE (0x00000001UL)
+/* Port X Pull-up Enable Register 32bit (R/W) */
+#define U300_335_PXPER (0x18)
+/* This register layout is the same in both blocks */
+#define U300_GPIO_PXPER_ALL_PULL_UP_DISABLE_MASK (0x000000FFUL)
+#define U300_GPIO_PXPER_PULL_UP_DISABLE (0x00000001UL)
+/* Control Register 32bit (R/W) */
+#define U300_335_CR (0x54)
+#define U300_335_CR_BLOCK_CLOCK_ENABLE (0x00000001UL)
+
+/*
+ * Register definitions for COH 901 571 / 3 variant
+ */
+#define U300_571_PORT_STRIDE (0x30)
+/*
+ * Control Register 32bit (R/W)
+ * bit 15-9 (mask 0x0000FE00) contains the number of cores. 8*cores
+ * gives the number of GPIO pins.
+ * bit 8-2 (mask 0x000001FC) contains the core version ID.
+ */
+#define U300_571_CR (0x00)
+#define U300_571_CR_SYNC_SEL_ENABLE (0x00000002UL)
+#define U300_571_CR_BLOCK_CLKRQ_ENABLE (0x00000001UL)
+/*
+ * These registers have the same layout and function as the corresponding
+ * COH 901 335 registers, just at different offset.
+ */
+#define U300_571_PXPDIR (0x04)
+#define U300_571_PXPDOR (0x08)
+#define U300_571_PXPCR (0x0C)
+#define U300_571_PXPER (0x10)
+#define U300_571_PXIEV (0x14)
+#define U300_571_PXIEN (0x18)
+#define U300_571_PXIFR (0x1C)
+#define U300_571_PXICR (0x20)
+
+/* 8 bits per port, no version has more than 7 ports */
+#define U300_GPIO_PINS_PER_PORT 8
+#define U300_GPIO_MAX (U300_GPIO_PINS_PER_PORT * 7)
+
+struct u300_gpio {
+ struct gpio_chip chip;
+ struct list_head port_list;
+ struct clk *clk;
+ struct resource *memres;
+ void __iomem *base;
+ struct device *dev;
+ int irq_base;
+ u32 stride;
+ /* Register offsets */
+ u32 pcr;
+ u32 dor;
+ u32 dir;
+ u32 per;
+ u32 icr;
+ u32 ien;
+ u32 iev;
+};
struct u300_gpio_port {
- const char *name;
+ struct list_head node;
+ struct u300_gpio *gpio;
+ char name[8];
int irq;
int number;
+ u8 toggle_edge_mode;
};
+/*
+ * Macro to expand to read a specific register found in the "gpio"
+ * struct. It requires the struct u300_gpio *gpio variable to exist in
+ * its context. It calculates the port offset from the given pin
+ * offset, muliplies by the port stride and adds the register offset
+ * so it provides a pointer to the desired register.
+ */
+#define U300_PIN_REG(pin, reg) \
+ (gpio->base + (pin >> 3) * gpio->stride + gpio->reg)
-static struct u300_gpio_port gpio_ports[] = {
- {
- .name = "gpio0",
- .number = 0,
- },
- {
- .name = "gpio1",
- .number = 1,
- },
- {
- .name = "gpio2",
- .number = 2,
- },
-#ifdef U300_COH901571_3
- {
- .name = "gpio3",
- .number = 3,
- },
- {
- .name = "gpio4",
- .number = 4,
- },
-#ifdef CONFIG_MACH_U300_BS335
- {
- .name = "gpio5",
- .number = 5,
- },
- {
- .name = "gpio6",
- .number = 6,
- },
-#endif
-#endif
+/*
+ * Provides a bitmask for a specific gpio pin inside an 8-bit GPIO
+ * register.
+ */
+#define U300_PIN_BIT(pin) \
+ (1 << (pin & 0x07))
+struct u300_gpio_confdata {
+ u16 bias_mode;
+ bool output;
+ int outval;
};
+/* BS335 has seven ports of 8 bits each = GPIO pins 0..55 */
+#define BS335_GPIO_NUM_PORTS 7
+/* BS365 has five ports of 8 bits each = GPIO pins 0..39 */
+#define BS365_GPIO_NUM_PORTS 5
-#ifdef U300_COH901571_3
+#define U300_FLOATING_INPUT { \
+ .bias_mode = GPIO_U300_CONFIG_BIAS_FLOAT, \
+ .output = false, \
+}
-/* Default input value */
-#define DEFAULT_OUTPUT_LOW 0
-#define DEFAULT_OUTPUT_HIGH 1
+#define U300_PULL_UP_INPUT { \
+ .bias_mode = GPIO_U300_CONFIG_BIAS_PULL_UP, \
+ .output = false, \
+}
-/* GPIO Pull-Up status */
-#define DISABLE_PULL_UP 0
-#define ENABLE_PULL_UP 1
+#define U300_OUTPUT_LOW { \
+ .output = true, \
+ .outval = 0, \
+}
-#define GPIO_NOT_USED 0
-#define GPIO_IN 1
-#define GPIO_OUT 2
+#define U300_OUTPUT_HIGH { \
+ .output = true, \
+ .outval = 1, \
+}
-struct u300_gpio_configuration_data {
- unsigned char pin_usage;
- unsigned char default_output_value;
- unsigned char pull_up;
-};
/* Initial configuration */
-const struct u300_gpio_configuration_data
-u300_gpio_config[U300_GPIO_NUM_PORTS][U300_GPIO_PINS_PER_PORT] = {
-#ifdef CONFIG_MACH_U300_BS335
+static const struct __initdata u300_gpio_confdata
+bs335_gpio_config[BS335_GPIO_NUM_PORTS][U300_GPIO_PINS_PER_PORT] = {
/* Port 0, pins 0-7 */
{
- {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
- {GPIO_OUT, DEFAULT_OUTPUT_HIGH, DISABLE_PULL_UP},
- {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
- {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
- {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
- {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
- {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
- {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}
+ U300_FLOATING_INPUT,
+ U300_OUTPUT_HIGH,
+ U300_FLOATING_INPUT,
+ U300_OUTPUT_LOW,
+ U300_OUTPUT_LOW,
+ U300_OUTPUT_LOW,
+ U300_OUTPUT_LOW,
+ U300_OUTPUT_LOW,
},
/* Port 1, pins 0-7 */
{
- {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
- {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
- {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
- {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP},
- {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
- {GPIO_OUT, DEFAULT_OUTPUT_HIGH, DISABLE_PULL_UP},
- {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
- {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}
+ U300_OUTPUT_LOW,
+ U300_OUTPUT_LOW,
+ U300_OUTPUT_LOW,
+ U300_PULL_UP_INPUT,
+ U300_FLOATING_INPUT,
+ U300_OUTPUT_HIGH,
+ U300_OUTPUT_LOW,
+ U300_OUTPUT_LOW,
},
/* Port 2, pins 0-7 */
{
- {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
- {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
- {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
- {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
- {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
- {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP},
- {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
- {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP}
+ U300_FLOATING_INPUT,
+ U300_FLOATING_INPUT,
+ U300_FLOATING_INPUT,
+ U300_FLOATING_INPUT,
+ U300_OUTPUT_LOW,
+ U300_PULL_UP_INPUT,
+ U300_OUTPUT_LOW,
+ U300_PULL_UP_INPUT,
},
/* Port 3, pins 0-7 */
{
- {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP},
- {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
- {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
- {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
- {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
- {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
- {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
- {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}
+ U300_PULL_UP_INPUT,
+ U300_OUTPUT_LOW,
+ U300_FLOATING_INPUT,
+ U300_FLOATING_INPUT,
+ U300_FLOATING_INPUT,
+ U300_FLOATING_INPUT,
+ U300_FLOATING_INPUT,
+ U300_FLOATING_INPUT,
},
/* Port 4, pins 0-7 */
{
- {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
- {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
- {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
- {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
- {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
- {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
- {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
- {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}
+ U300_FLOATING_INPUT,
+ U300_FLOATING_INPUT,
+ U300_FLOATING_INPUT,
+ U300_FLOATING_INPUT,
+ U300_FLOATING_INPUT,
+ U300_FLOATING_INPUT,
+ U300_FLOATING_INPUT,
+ U300_FLOATING_INPUT,
},
/* Port 5, pins 0-7 */
{
- {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
- {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
- {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
- {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
- {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
- {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
- {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
- {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}
+ U300_FLOATING_INPUT,
+ U300_FLOATING_INPUT,
+ U300_FLOATING_INPUT,
+ U300_FLOATING_INPUT,
+ U300_FLOATING_INPUT,
+ U300_FLOATING_INPUT,
+ U300_FLOATING_INPUT,
+ U300_FLOATING_INPUT,
},
/* Port 6, pind 0-7 */
{
- {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
- {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
- {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
- {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
- {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
- {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
- {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
- {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}
+ U300_FLOATING_INPUT,
+ U300_FLOATING_INPUT,
+ U300_FLOATING_INPUT,
+ U300_FLOATING_INPUT,
+ U300_FLOATING_INPUT,
+ U300_FLOATING_INPUT,
+ U300_FLOATING_INPUT,
+ U300_FLOATING_INPUT,
}
-#endif
+};
-#ifdef CONFIG_MACH_U300_BS365
+static const struct __initdata u300_gpio_confdata
+bs365_gpio_config[BS365_GPIO_NUM_PORTS][U300_GPIO_PINS_PER_PORT] = {
/* Port 0, pins 0-7 */
{
- {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
- {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
- {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
- {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
- {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
- {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
- {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP},
- {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}
+ U300_FLOATING_INPUT,
+ U300_OUTPUT_LOW,
+ U300_FLOATING_INPUT,
+ U300_OUTPUT_LOW,
+ U300_OUTPUT_LOW,
+ U300_OUTPUT_LOW,
+ U300_PULL_UP_INPUT,
+ U300_FLOATING_INPUT,
},
/* Port 1, pins 0-7 */
{
- {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
- {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
- {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
- {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
- {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
- {GPIO_OUT, DEFAULT_OUTPUT_HIGH, DISABLE_PULL_UP},
- {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
- {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}
+ U300_OUTPUT_LOW,
+ U300_FLOATING_INPUT,
+ U300_OUTPUT_LOW,
+ U300_FLOATING_INPUT,
+ U300_FLOATING_INPUT,
+ U300_OUTPUT_HIGH,
+ U300_OUTPUT_LOW,
+ U300_OUTPUT_LOW,
},
/* Port 2, pins 0-7 */
{
- {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
- {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP},
- {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
- {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
- {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP},
- {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP},
- {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP},
- {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP}
+ U300_FLOATING_INPUT,
+ U300_PULL_UP_INPUT,
+ U300_OUTPUT_LOW,
+ U300_OUTPUT_LOW,
+ U300_PULL_UP_INPUT,
+ U300_PULL_UP_INPUT,
+ U300_PULL_UP_INPUT,
+ U300_PULL_UP_INPUT,
},
/* Port 3, pins 0-7 */
{
- {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP},
- {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP},
- {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP},
- {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP},
- {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP},
- {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP},
- {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP},
- {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP}
+ U300_PULL_UP_INPUT,
+ U300_PULL_UP_INPUT,
+ U300_PULL_UP_INPUT,
+ U300_PULL_UP_INPUT,
+ U300_PULL_UP_INPUT,
+ U300_PULL_UP_INPUT,
+ U300_PULL_UP_INPUT,
+ U300_PULL_UP_INPUT,
},
/* Port 4, pins 0-7 */
{
- {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP},
- {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP},
- {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP},
- {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP},
+ U300_PULL_UP_INPUT,
+ U300_PULL_UP_INPUT,
+ U300_PULL_UP_INPUT,
+ U300_PULL_UP_INPUT,
/* These 4 pins doesn't exist on DB3210 */
- {GPIO_OUT, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP},
- {GPIO_OUT, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP},
- {GPIO_OUT, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP},
- {GPIO_OUT, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP}
+ U300_OUTPUT_LOW,
+ U300_OUTPUT_LOW,
+ U300_OUTPUT_LOW,
+ U300_OUTPUT_LOW,
}
-#endif
};
-#endif
-
-/* No users == we can power down GPIO */
-static int gpio_users;
-
-struct gpio_struct {
- int (*callback)(void *);
- void *data;
- int users;
-};
-
-static struct gpio_struct gpio_pin[U300_GPIO_MAX];
-
-/*
- * Let drivers register callback in order to get notified when there is
- * an interrupt on the gpio pin
+/**
+ * to_u300_gpio() - get the pointer to u300_gpio
+ * @chip: the gpio chip member of the structure u300_gpio
*/
-int gpio_register_callback(unsigned gpio, int (*func)(void *arg), void *data)
+static inline struct u300_gpio *to_u300_gpio(struct gpio_chip *chip)
{
- if (gpio_pin[gpio].callback)
- dev_warn(gpiodev, "%s: WARNING: callback already "
- "registered for gpio pin#%d\n", __func__, gpio);
- gpio_pin[gpio].callback = func;
- gpio_pin[gpio].data = data;
-
- return 0;
+ return container_of(chip, struct u300_gpio, chip);
}
-EXPORT_SYMBOL(gpio_register_callback);
-int gpio_unregister_callback(unsigned gpio)
+static int u300_gpio_get(struct gpio_chip *chip, unsigned offset)
{
- if (!gpio_pin[gpio].callback)
- dev_warn(gpiodev, "%s: WARNING: callback already "
- "unregistered for gpio pin#%d\n", __func__, gpio);
- gpio_pin[gpio].callback = NULL;
- gpio_pin[gpio].data = NULL;
+ struct u300_gpio *gpio = to_u300_gpio(chip);
- return 0;
+ return readl(U300_PIN_REG(offset, dir)) & U300_PIN_BIT(offset);
}
-EXPORT_SYMBOL(gpio_unregister_callback);
-/* Non-zero means valid */
-int gpio_is_valid(int number)
+static void u300_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
{
- if (number >= 0 &&
- number < (U300_GPIO_NUM_PORTS * U300_GPIO_PINS_PER_PORT))
- return 1;
- return 0;
-}
-EXPORT_SYMBOL(gpio_is_valid);
+ struct u300_gpio *gpio = to_u300_gpio(chip);
+ unsigned long flags;
+ u32 val;
-int gpio_request(unsigned gpio, const char *label)
-{
- if (gpio_pin[gpio].users)
- return -EINVAL;
- else
- gpio_pin[gpio].users++;
+ local_irq_save(flags);
- gpio_users++;
+ val = readl(U300_PIN_REG(offset, dor));
+ if (value)
+ writel(val | U300_PIN_BIT(offset), U300_PIN_REG(offset, dor));
+ else
+ writel(val & ~U300_PIN_BIT(offset), U300_PIN_REG(offset, dor));
- return 0;
+ local_irq_restore(flags);
}
-EXPORT_SYMBOL(gpio_request);
-void gpio_free(unsigned gpio)
+static int u300_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
{
- gpio_users--;
- gpio_pin[gpio].users--;
- if (unlikely(gpio_pin[gpio].users < 0)) {
- dev_warn(gpiodev, "warning: gpio#%d release mismatch\n",
- gpio);
- gpio_pin[gpio].users = 0;
- }
-
- return;
-}
-EXPORT_SYMBOL(gpio_free);
+ struct u300_gpio *gpio = to_u300_gpio(chip);
+ unsigned long flags;
+ u32 val;
-/* This returns zero or nonzero */
-int gpio_get_value(unsigned gpio)
-{
- return readl(virtbase + U300_GPIO_PXPDIR +
- PIN_TO_PORT(gpio) * U300_GPIO_PORTX_SPACING) & (1 << (gpio & 0x07));
+ local_irq_save(flags);
+ val = readl(U300_PIN_REG(offset, pcr));
+ /* Mask out this pin, note 2 bits per setting */
+ val &= ~(U300_GPIO_PXPCR_PIN_MODE_MASK << ((offset & 0x07) << 1));
+ writel(val, U300_PIN_REG(offset, pcr));
+ local_irq_restore(flags);
+ return 0;
}
-EXPORT_SYMBOL(gpio_get_value);
-/*
- * We hope that the compiler will optimize away the unused branch
- * in case "value" is a constant
- */
-void gpio_set_value(unsigned gpio, int value)
+static int u300_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
+ int value)
{
- u32 val;
+ struct u300_gpio *gpio = to_u300_gpio(chip);
unsigned long flags;
+ u32 oldmode;
+ u32 val;
local_irq_save(flags);
- if (value) {
- /* set */
- val = readl(virtbase + U300_GPIO_PXPDOR +
- PIN_TO_PORT(gpio) * U300_GPIO_PORTX_SPACING)
- & (1 << (gpio & 0x07));
- writel(val | (1 << (gpio & 0x07)), virtbase +
- U300_GPIO_PXPDOR +
- PIN_TO_PORT(gpio) * U300_GPIO_PORTX_SPACING);
- } else {
- /* clear */
- val = readl(virtbase + U300_GPIO_PXPDOR +
- PIN_TO_PORT(gpio) * U300_GPIO_PORTX_SPACING)
- & (1 << (gpio & 0x07));
- writel(val & ~(1 << (gpio & 0x07)), virtbase +
- U300_GPIO_PXPDOR +
- PIN_TO_PORT(gpio) * U300_GPIO_PORTX_SPACING);
+ val = readl(U300_PIN_REG(offset, pcr));
+ /*
+ * Drive mode must be set by the special mode set function, set
+ * push/pull mode by default if no mode has been selected.
+ */
+ oldmode = val & (U300_GPIO_PXPCR_PIN_MODE_MASK <<
+ ((offset & 0x07) << 1));
+ /* mode = 0 means input, else some mode is already set */
+ if (oldmode == 0) {
+ val &= ~(U300_GPIO_PXPCR_PIN_MODE_MASK <<
+ ((offset & 0x07) << 1));
+ val |= (U300_GPIO_PXPCR_PIN_MODE_OUTPUT_PUSH_PULL
+ << ((offset & 0x07) << 1));
+ writel(val, U300_PIN_REG(offset, pcr));
}
+ u300_gpio_set(chip, offset, value);
local_irq_restore(flags);
+ return 0;
}
-EXPORT_SYMBOL(gpio_set_value);
-int gpio_direction_input(unsigned gpio)
+static int u300_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
{
+ struct u300_gpio *gpio = to_u300_gpio(chip);
+ int retirq = gpio->irq_base + offset;
+
+ dev_dbg(gpio->dev, "request IRQ for GPIO %d, return %d\n", offset,
+ retirq);
+ return retirq;
+}
+
+static int u300_gpio_config(struct gpio_chip *chip, unsigned offset,
+ u16 param, unsigned long *data)
+{
+ struct u300_gpio *gpio = to_u300_gpio(chip);
unsigned long flags;
u32 val;
- if (gpio > U300_GPIO_MAX)
- return -EINVAL;
-
local_irq_save(flags);
- val = readl(virtbase + U300_GPIO_PXPCR + PIN_TO_PORT(gpio) *
- U300_GPIO_PORTX_SPACING);
- /* Mask out this pin*/
- val &= ~(U300_GPIO_PXPCR_PIN_MODE_MASK << ((gpio & 0x07) << 1));
- /* This is not needed since it sets the bits to zero.*/
- /* val |= (U300_GPIO_PXPCR_PIN_MODE_INPUT << (gpio*2)); */
- writel(val, virtbase + U300_GPIO_PXPCR + PIN_TO_PORT(gpio) *
- U300_GPIO_PORTX_SPACING);
+ switch (param) {
+ case GPIO_U300_CONFIG_BIAS_UNKNOWN:
+ case GPIO_U300_CONFIG_BIAS_FLOAT:
+ val = readl(U300_PIN_REG(offset, per));
+ writel(val | U300_PIN_BIT(offset), U300_PIN_REG(offset, per));
+ break;
+ case GPIO_U300_CONFIG_BIAS_PULL_UP:
+ val = readl(U300_PIN_REG(offset, per));
+ writel(val & ~U300_PIN_BIT(offset), U300_PIN_REG(offset, per));
+ break;
+ case GPIO_U300_CONFIG_DRIVE_PUSH_PULL:
+ val = readl(U300_PIN_REG(offset, pcr));
+ val &= ~(U300_GPIO_PXPCR_PIN_MODE_MASK
+ << ((offset & 0x07) << 1));
+ val |= (U300_GPIO_PXPCR_PIN_MODE_OUTPUT_PUSH_PULL
+ << ((offset & 0x07) << 1));
+ writel(val, U300_PIN_REG(offset, pcr));
+ break;
+ case GPIO_U300_CONFIG_DRIVE_OPEN_DRAIN:
+ val = readl(U300_PIN_REG(offset, pcr));
+ val &= ~(U300_GPIO_PXPCR_PIN_MODE_MASK
+ << ((offset & 0x07) << 1));
+ val |= (U300_GPIO_PXPCR_PIN_MODE_OUTPUT_OPEN_DRAIN
+ << ((offset & 0x07) << 1));
+ writel(val, U300_PIN_REG(offset, pcr));
+ break;
+ case GPIO_U300_CONFIG_DRIVE_OPEN_SOURCE:
+ val = readl(U300_PIN_REG(offset, pcr));
+ val &= ~(U300_GPIO_PXPCR_PIN_MODE_MASK
+ << ((offset & 0x07) << 1));
+ val |= (U300_GPIO_PXPCR_PIN_MODE_OUTPUT_OPEN_SOURCE
+ << ((offset & 0x07) << 1));
+ writel(val, U300_PIN_REG(offset, pcr));
+ break;
+ default:
+ local_irq_restore(flags);
+ dev_err(gpio->dev, "illegal configuration requested\n");
+ return -EINVAL;
+ }
local_irq_restore(flags);
return 0;
}
-EXPORT_SYMBOL(gpio_direction_input);
-int gpio_direction_output(unsigned gpio, int value)
+static struct gpio_chip u300_gpio_chip = {
+ .label = "u300-gpio-chip",
+ .owner = THIS_MODULE,
+ .get = u300_gpio_get,
+ .set = u300_gpio_set,
+ .direction_input = u300_gpio_direction_input,
+ .direction_output = u300_gpio_direction_output,
+ .to_irq = u300_gpio_to_irq,
+};
+
+static void u300_toggle_trigger(struct u300_gpio *gpio, unsigned offset)
{
- unsigned long flags;
u32 val;
- if (gpio > U300_GPIO_MAX)
- return -EINVAL;
-
- local_irq_save(flags);
- val = readl(virtbase + U300_GPIO_PXPCR + PIN_TO_PORT(gpio) *
- U300_GPIO_PORTX_SPACING);
- /* Mask out this pin */
- val &= ~(U300_GPIO_PXPCR_PIN_MODE_MASK << ((gpio & 0x07) << 1));
- /*
- * FIXME: configure for push/pull, open drain or open source per pin
- * in setup. The current driver will only support push/pull.
- */
- val |= (U300_GPIO_PXPCR_PIN_MODE_OUTPUT_PUSH_PULL
- << ((gpio & 0x07) << 1));
- writel(val, virtbase + U300_GPIO_PXPCR + PIN_TO_PORT(gpio) *
- U300_GPIO_PORTX_SPACING);
- gpio_set_value(gpio, value);
- local_irq_restore(flags);
- return 0;
+ val = readl(U300_PIN_REG(offset, icr));
+ /* Set mode depending on state */
+ if (u300_gpio_get(&gpio->chip, offset)) {
+ /* High now, let's trigger on falling edge next then */
+ writel(val & ~U300_PIN_BIT(offset), U300_PIN_REG(offset, icr));
+ dev_dbg(gpio->dev, "next IRQ on falling edge on pin %d\n",
+ offset);
+ } else {
+ /* Low now, let's trigger on rising edge next then */
+ writel(val | U300_PIN_BIT(offset), U300_PIN_REG(offset, icr));
+ dev_dbg(gpio->dev, "next IRQ on rising edge on pin %d\n",
+ offset);
+ }
}
-EXPORT_SYMBOL(gpio_direction_output);
-/*
- * Enable an IRQ, edge is rising edge (!= 0) or falling edge (==0).
- */
-void enable_irq_on_gpio_pin(unsigned gpio, int edge)
+static int u300_gpio_irq_type(struct irq_data *d, unsigned trigger)
{
+ struct u300_gpio_port *port = irq_data_get_irq_chip_data(d);
+ struct u300_gpio *gpio = port->gpio;
+ int offset = d->irq - gpio->irq_base;
u32 val;
- unsigned long flags;
- local_irq_save(flags);
- val = readl(virtbase + U300_GPIO_PXIEN + PIN_TO_PORT(gpio) *
- U300_GPIO_PORTX_SPACING);
- val |= (1 << (gpio & 0x07));
- writel(val, virtbase + U300_GPIO_PXIEN + PIN_TO_PORT(gpio) *
- U300_GPIO_PORTX_SPACING);
- val = readl(virtbase + U300_GPIO_PXICR + PIN_TO_PORT(gpio) *
- U300_GPIO_PORTX_SPACING);
- if (edge)
- val |= (1 << (gpio & 0x07));
- else
- val &= ~(1 << (gpio & 0x07));
- writel(val, virtbase + U300_GPIO_PXICR + PIN_TO_PORT(gpio) *
- U300_GPIO_PORTX_SPACING);
- local_irq_restore(flags);
+ if ((trigger & IRQF_TRIGGER_RISING) &&
+ (trigger & IRQF_TRIGGER_FALLING)) {
+ /*
+ * The GPIO block can only trigger on falling OR rising edges,
+ * not both. So we need to toggle the mode whenever the pin
+ * goes from one state to the other with a special state flag
+ */
+ dev_dbg(gpio->dev,
+ "trigger on both rising and falling edge on pin %d\n",
+ offset);
+ port->toggle_edge_mode |= U300_PIN_BIT(offset);
+ u300_toggle_trigger(gpio, offset);
+ } else if (trigger & IRQF_TRIGGER_RISING) {
+ dev_dbg(gpio->dev, "trigger on rising edge on pin %d\n",
+ offset);
+ val = readl(U300_PIN_REG(offset, icr));
+ writel(val | U300_PIN_BIT(offset), U300_PIN_REG(offset, icr));
+ port->toggle_edge_mode &= ~U300_PIN_BIT(offset);
+ } else if (trigger & IRQF_TRIGGER_FALLING) {
+ dev_dbg(gpio->dev, "trigger on falling edge on pin %d\n",
+ offset);
+ val = readl(U300_PIN_REG(offset, icr));
+ writel(val & ~U300_PIN_BIT(offset), U300_PIN_REG(offset, icr));
+ port->toggle_edge_mode &= ~U300_PIN_BIT(offset);
+ }
+
+ return 0;
}
-EXPORT_SYMBOL(enable_irq_on_gpio_pin);
-void disable_irq_on_gpio_pin(unsigned gpio)
+static void u300_gpio_irq_enable(struct irq_data *d)
{
+ struct u300_gpio_port *port = irq_data_get_irq_chip_data(d);
+ struct u300_gpio *gpio = port->gpio;
+ int offset = d->irq - gpio->irq_base;
u32 val;
unsigned long flags;
local_irq_save(flags);
- val = readl(virtbase + U300_GPIO_PXIEN + PIN_TO_PORT(gpio) *
- U300_GPIO_PORTX_SPACING);
- val &= ~(1 << (gpio & 0x07));
- writel(val, virtbase + U300_GPIO_PXIEN + PIN_TO_PORT(gpio) *
- U300_GPIO_PORTX_SPACING);
+ val = readl(U300_PIN_REG(offset, ien));
+ writel(val | U300_PIN_BIT(offset), U300_PIN_REG(offset, ien));
local_irq_restore(flags);
}
-EXPORT_SYMBOL(disable_irq_on_gpio_pin);
-/* Enable (value == 0) or disable (value == 1) internal pullup */
-void gpio_pullup(unsigned gpio, int value)
+static void u300_gpio_irq_disable(struct irq_data *d)
{
+ struct u300_gpio_port *port = irq_data_get_irq_chip_data(d);
+ struct u300_gpio *gpio = port->gpio;
+ int offset = d->irq - gpio->irq_base;
u32 val;
unsigned long flags;
local_irq_save(flags);
- if (value) {
- val = readl(virtbase + U300_GPIO_PXPER + PIN_TO_PORT(gpio) *
- U300_GPIO_PORTX_SPACING);
- writel(val | (1 << (gpio & 0x07)), virtbase + U300_GPIO_PXPER +
- PIN_TO_PORT(gpio) * U300_GPIO_PORTX_SPACING);
- } else {
- val = readl(virtbase + U300_GPIO_PXPER + PIN_TO_PORT(gpio) *
- U300_GPIO_PORTX_SPACING);
- writel(val & ~(1 << (gpio & 0x07)), virtbase + U300_GPIO_PXPER +
- PIN_TO_PORT(gpio) * U300_GPIO_PORTX_SPACING);
- }
+ val = readl(U300_PIN_REG(offset, ien));
+ writel(val & ~U300_PIN_BIT(offset), U300_PIN_REG(offset, ien));
local_irq_restore(flags);
}
-EXPORT_SYMBOL(gpio_pullup);
-static irqreturn_t gpio_irq_handler(int irq, void *dev_id)
+static struct irq_chip u300_gpio_irqchip = {
+ .name = "u300-gpio-irqchip",
+ .irq_enable = u300_gpio_irq_enable,
+ .irq_disable = u300_gpio_irq_disable,
+ .irq_set_type = u300_gpio_irq_type,
+
+};
+
+static void u300_gpio_irq_handler(unsigned irq, struct irq_desc *desc)
{
- struct u300_gpio_port *port = dev_id;
- u32 val;
- int pin;
+ struct u300_gpio_port *port = irq_get_handler_data(irq);
+ struct u300_gpio *gpio = port->gpio;
+ int pinoffset = port->number << 3; /* get the right stride */
+ unsigned long val;
+ desc->irq_data.chip->irq_ack(&desc->irq_data);
/* Read event register */
- val = readl(virtbase + U300_GPIO_PXIEV + port->number *
- U300_GPIO_PORTX_SPACING);
- /* Mask with enable register */
- val &= readl(virtbase + U300_GPIO_PXIEV + port->number *
- U300_GPIO_PORTX_SPACING);
+ val = readl(U300_PIN_REG(pinoffset, iev));
/* Mask relevant bits */
- val &= U300_GPIO_PXIEV_ALL_IRQ_EVENT_MASK;
+ val &= 0xFFU; /* 8 bits per port */
/* ACK IRQ (clear event) */
- writel(val, virtbase + U300_GPIO_PXIEV + port->number *
- U300_GPIO_PORTX_SPACING);
- /* Print message */
- while (val != 0) {
- unsigned gpio;
-
- pin = __ffs(val);
- /* mask off this pin */
- val &= ~(1 << pin);
- gpio = (port->number << 3) + pin;
-
- if (gpio_pin[gpio].callback)
- (void)gpio_pin[gpio].callback(gpio_pin[gpio].data);
- else
- dev_dbg(gpiodev, "stray GPIO IRQ on line %d\n",
- gpio);
+ writel(val, U300_PIN_REG(pinoffset, iev));
+
+ /* Call IRQ handler */
+ if (val != 0) {
+ int irqoffset;
+
+ for_each_set_bit(irqoffset, &val, U300_GPIO_PINS_PER_PORT) {
+ int pin_irq = gpio->irq_base + (port->number << 3)
+ + irqoffset;
+ int offset = pinoffset + irqoffset;
+
+ dev_dbg(gpio->dev, "GPIO IRQ %d on pin %d\n",
+ pin_irq, offset);
+ generic_handle_irq(pin_irq);
+ /*
+ * Triggering IRQ on both rising and falling edge
+ * needs mockery
+ */
+ if (port->toggle_edge_mode & U300_PIN_BIT(offset))
+ u300_toggle_trigger(gpio, offset);
+ }
}
- return IRQ_HANDLED;
+
+ desc->irq_data.chip->irq_unmask(&desc->irq_data);
}
-static void gpio_set_initial_values(void)
+static void __init u300_gpio_init_pin(struct u300_gpio *gpio,
+ int offset,
+ const struct u300_gpio_confdata *conf)
{
-#ifdef U300_COH901571_3
- int i, j;
- unsigned long flags;
- u32 val;
+ /* Set mode: input or output */
+ if (conf->output) {
+ u300_gpio_direction_output(&gpio->chip, offset, conf->outval);
- /* Write default values to all pins */
- for (i = 0; i < U300_GPIO_NUM_PORTS; i++) {
- val = 0;
- for (j = 0; j < 8; j++)
- val |= (u32) (u300_gpio_config[i][j].default_output_value != DEFAULT_OUTPUT_LOW) << j;
- local_irq_save(flags);
- writel(val, virtbase + U300_GPIO_PXPDOR + i * U300_GPIO_PORTX_SPACING);
- local_irq_restore(flags);
+ /* Deactivate bias mode for output */
+ u300_gpio_config(&gpio->chip, offset,
+ GPIO_U300_CONFIG_BIAS_FLOAT,
+ NULL);
+
+ /* Set drive mode for output */
+ u300_gpio_config(&gpio->chip, offset,
+ GPIO_U300_CONFIG_DRIVE_PUSH_PULL, NULL);
+
+ dev_dbg(gpio->dev, "set up pin %d as output, value: %d\n",
+ offset, conf->outval);
+ } else {
+ u300_gpio_direction_input(&gpio->chip, offset);
+
+ /* Always set output low on input pins */
+ u300_gpio_set(&gpio->chip, offset, 0);
+
+ /* Set bias mode for input */
+ u300_gpio_config(&gpio->chip, offset, conf->bias_mode, NULL);
+
+ dev_dbg(gpio->dev, "set up pin %d as input, bias: %04x\n",
+ offset, conf->bias_mode);
}
+}
- /*
- * Put all pins that are set to either 'GPIO_OUT' or 'GPIO_NOT_USED'
- * to output and 'GPIO_IN' to input for each port. And initialize
- * default value on outputs.
- */
- for (i = 0; i < U300_GPIO_NUM_PORTS; i++) {
- for (j = 0; j < U300_GPIO_PINS_PER_PORT; j++) {
- local_irq_save(flags);
- val = readl(virtbase + U300_GPIO_PXPCR +
- i * U300_GPIO_PORTX_SPACING);
- /* Mask out this pin */
- val &= ~(U300_GPIO_PXPCR_PIN_MODE_MASK << (j << 1));
-
- if (u300_gpio_config[i][j].pin_usage != GPIO_IN)
- val |= (U300_GPIO_PXPCR_PIN_MODE_OUTPUT_PUSH_PULL << (j << 1));
- writel(val, virtbase + U300_GPIO_PXPCR +
- i * U300_GPIO_PORTX_SPACING);
- local_irq_restore(flags);
+static void __init u300_gpio_init_coh901571(struct u300_gpio *gpio,
+ struct u300_gpio_platform *plat)
+{
+ int i, j;
+
+ /* Write default config and values to all pins */
+ for (i = 0; i < plat->ports; i++) {
+ for (j = 0; j < 8; j++) {
+ const struct u300_gpio_confdata *conf;
+ int offset = (i*8) + j;
+
+ if (plat->variant == U300_GPIO_COH901571_3_BS335)
+ conf = &bs335_gpio_config[i][j];
+ else if (plat->variant == U300_GPIO_COH901571_3_BS365)
+ conf = &bs365_gpio_config[i][j];
+ else
+ break;
+
+ u300_gpio_init_pin(gpio, offset, conf);
}
}
+}
- /* Enable or disable the internal pull-ups in the GPIO ASIC block */
- for (i = 0; i < U300_GPIO_MAX; i++) {
- val = 0;
- for (j = 0; j < 8; j++)
- val |= (u32)((u300_gpio_config[i][j].pull_up == DISABLE_PULL_UP) << j);
- local_irq_save(flags);
- writel(val, virtbase + U300_GPIO_PXPER + i * U300_GPIO_PORTX_SPACING);
- local_irq_restore(flags);
+static inline void u300_gpio_free_ports(struct u300_gpio *gpio)
+{
+ struct u300_gpio_port *port;
+ struct list_head *p, *n;
+
+ list_for_each_safe(p, n, &gpio->port_list) {
+ port = list_entry(p, struct u300_gpio_port, node);
+ list_del(&port->node);
+ free_irq(port->irq, port);
+ kfree(port);
}
-#endif
}
-static int __init gpio_probe(struct platform_device *pdev)
+static int __init u300_gpio_probe(struct platform_device *pdev)
{
- u32 val;
+ struct u300_gpio_platform *plat = dev_get_platdata(&pdev->dev);
+ struct u300_gpio *gpio;
int err = 0;
+ int portno;
+ u32 val;
+ u32 ifr;
int i;
- int num_irqs;
- gpiodev = &pdev->dev;
- memset(gpio_pin, 0, sizeof(gpio_pin));
+ gpio = kzalloc(sizeof(struct u300_gpio), GFP_KERNEL);
+ if (gpio == NULL) {
+ dev_err(&pdev->dev, "failed to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ gpio->chip = u300_gpio_chip;
+ gpio->chip.ngpio = plat->ports * U300_GPIO_PINS_PER_PORT;
+ gpio->irq_base = plat->gpio_irq_base;
+ gpio->chip.dev = &pdev->dev;
+ gpio->chip.base = plat->gpio_base;
+ gpio->dev = &pdev->dev;
/* Get GPIO clock */
- clk = clk_get(&pdev->dev, NULL);
- if (IS_ERR(clk)) {
- err = PTR_ERR(clk);
- dev_err(gpiodev, "could not get GPIO clock\n");
+ gpio->clk = clk_get(gpio->dev, NULL);
+ if (IS_ERR(gpio->clk)) {
+ err = PTR_ERR(gpio->clk);
+ dev_err(gpio->dev, "could not get GPIO clock\n");
goto err_no_clk;
}
- err = clk_enable(clk);
+ err = clk_enable(gpio->clk);
if (err) {
- dev_err(gpiodev, "could not enable GPIO clock\n");
+ dev_err(gpio->dev, "could not enable GPIO clock\n");
goto err_no_clk_enable;
}
- memres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!memres)
+ gpio->memres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!gpio->memres) {
+ dev_err(gpio->dev, "could not get GPIO memory resource\n");
+ err = -ENODEV;
goto err_no_resource;
+ }
- if (!request_mem_region(memres->start, resource_size(memres),
+ if (!request_mem_region(gpio->memres->start,
+ resource_size(gpio->memres),
"GPIO Controller")) {
err = -ENODEV;
goto err_no_ioregion;
}
- virtbase = ioremap(memres->start, resource_size(memres));
- if (!virtbase) {
+ gpio->base = ioremap(gpio->memres->start, resource_size(gpio->memres));
+ if (!gpio->base) {
err = -ENOMEM;
goto err_no_ioremap;
}
- dev_info(gpiodev, "remapped 0x%08x to %p\n",
- memres->start, virtbase);
-
-#ifdef U300_COH901335
- dev_info(gpiodev, "initializing GPIO Controller COH 901 335\n");
- /* Turn on the GPIO block */
- writel(U300_GPIO_CR_BLOCK_CLOCK_ENABLE, virtbase + U300_GPIO_CR);
-#endif
-
-#ifdef U300_COH901571_3
- dev_info(gpiodev, "initializing GPIO Controller COH 901 571/3\n");
- val = readl(virtbase + U300_GPIO_CR);
- dev_info(gpiodev, "COH901571/3 block version: %d, " \
- "number of cores: %d\n",
- ((val & 0x0000FE00) >> 9),
- ((val & 0x000001FC) >> 2));
- writel(U300_GPIO_CR_BLOCK_CLKRQ_ENABLE, virtbase + U300_GPIO_CR);
-#endif
-
- gpio_set_initial_values();
-
- for (num_irqs = 0 ; num_irqs < U300_GPIO_NUM_PORTS; num_irqs++) {
-
- gpio_ports[num_irqs].irq =
- platform_get_irq_byname(pdev,
- gpio_ports[num_irqs].name);
-
- err = request_irq(gpio_ports[num_irqs].irq,
- gpio_irq_handler, IRQF_DISABLED,
- gpio_ports[num_irqs].name,
- &gpio_ports[num_irqs]);
- if (err) {
- dev_err(gpiodev, "cannot allocate IRQ for %s!\n",
- gpio_ports[num_irqs].name);
- goto err_no_irq;
+
+ if (plat->variant == U300_GPIO_COH901335) {
+ dev_info(gpio->dev,
+ "initializing GPIO Controller COH 901 335\n");
+ gpio->stride = U300_335_PORT_STRIDE;
+ gpio->pcr = U300_335_PXPCR;
+ gpio->dor = U300_335_PXPDOR;
+ gpio->dir = U300_335_PXPDIR;
+ gpio->per = U300_335_PXPER;
+ gpio->icr = U300_335_PXICR;
+ gpio->ien = U300_335_PXIEN;
+ gpio->iev = U300_335_PXIEV;
+ ifr = U300_335_PXIFR;
+
+ /* Turn on the GPIO block */
+ writel(U300_335_CR_BLOCK_CLOCK_ENABLE,
+ gpio->base + U300_335_CR);
+ } else if (plat->variant == U300_GPIO_COH901571_3_BS335 ||
+ plat->variant == U300_GPIO_COH901571_3_BS365) {
+ dev_info(gpio->dev,
+ "initializing GPIO Controller COH 901 571/3\n");
+ gpio->stride = U300_571_PORT_STRIDE;
+ gpio->pcr = U300_571_PXPCR;
+ gpio->dor = U300_571_PXPDOR;
+ gpio->dir = U300_571_PXPDIR;
+ gpio->per = U300_571_PXPER;
+ gpio->icr = U300_571_PXICR;
+ gpio->ien = U300_571_PXIEN;
+ gpio->iev = U300_571_PXIEV;
+ ifr = U300_571_PXIFR;
+
+ val = readl(gpio->base + U300_571_CR);
+ dev_info(gpio->dev, "COH901571/3 block version: %d, " \
+ "number of cores: %d totalling %d pins\n",
+ ((val & 0x000001FC) >> 2),
+ ((val & 0x0000FE00) >> 9),
+ ((val & 0x0000FE00) >> 9) * 8);
+ writel(U300_571_CR_BLOCK_CLKRQ_ENABLE,
+ gpio->base + U300_571_CR);
+ u300_gpio_init_coh901571(gpio, plat);
+ } else {
+ dev_err(gpio->dev, "unknown block variant\n");
+ err = -ENODEV;
+ goto err_unknown_variant;
+ }
+
+ /* Add each port with its IRQ separately */
+ INIT_LIST_HEAD(&gpio->port_list);
+ for (portno = 0 ; portno < plat->ports; portno++) {
+ struct u300_gpio_port *port =
+ kmalloc(sizeof(struct u300_gpio_port), GFP_KERNEL);
+
+ if (!port) {
+ dev_err(gpio->dev, "out of memory\n");
+ err = -ENOMEM;
+ goto err_no_port;
}
- /* Turns off PortX_irq_force */
- writel(0x0, virtbase + U300_GPIO_PXIFR +
- num_irqs * U300_GPIO_PORTX_SPACING);
+
+ snprintf(port->name, 8, "gpio%d", portno);
+ port->number = portno;
+ port->gpio = gpio;
+
+ port->irq = platform_get_irq_byname(pdev,
+ port->name);
+
+ dev_dbg(gpio->dev, "register IRQ %d for %s\n", port->irq,
+ port->name);
+
+ irq_set_chained_handler(port->irq, u300_gpio_irq_handler);
+ irq_set_handler_data(port->irq, port);
+
+ /* For each GPIO pin set the unique IRQ handler */
+ for (i = 0; i < U300_GPIO_PINS_PER_PORT; i++) {
+ int irqno = gpio->irq_base + (portno << 3) + i;
+
+ dev_dbg(gpio->dev, "handler for IRQ %d on %s\n",
+ irqno, port->name);
+ irq_set_chip_and_handler(irqno, &u300_gpio_irqchip,
+ handle_simple_irq);
+ set_irq_flags(irqno, IRQF_VALID);
+ irq_set_chip_data(irqno, port);
+ }
+
+ /* Turns off irq force (test register) for this port */
+ writel(0x0, gpio->base + portno * gpio->stride + ifr);
+
+ list_add_tail(&port->node, &gpio->port_list);
}
+ dev_dbg(gpio->dev, "initialized %d GPIO ports\n", portno);
+
+ err = gpiochip_add(&gpio->chip);
+ if (err) {
+ dev_err(gpio->dev, "unable to add gpiochip: %d\n", err);
+ goto err_no_chip;
+ }
+
+ platform_set_drvdata(pdev, gpio);
return 0;
- err_no_irq:
- for (i = 0; i < num_irqs; i++)
- free_irq(gpio_ports[i].irq, &gpio_ports[i]);
- iounmap(virtbase);
- err_no_ioremap:
- release_mem_region(memres->start, resource_size(memres));
- err_no_ioregion:
- err_no_resource:
- clk_disable(clk);
- err_no_clk_enable:
- clk_put(clk);
- err_no_clk:
- dev_info(gpiodev, "module ERROR:%d\n", err);
+err_no_chip:
+err_no_port:
+ u300_gpio_free_ports(gpio);
+err_unknown_variant:
+ iounmap(gpio->base);
+err_no_ioremap:
+ release_mem_region(gpio->memres->start, resource_size(gpio->memres));
+err_no_ioregion:
+err_no_resource:
+ clk_disable(gpio->clk);
+err_no_clk_enable:
+ clk_put(gpio->clk);
+err_no_clk:
+ kfree(gpio);
+ dev_info(&pdev->dev, "module ERROR:%d\n", err);
return err;
}
-static int __exit gpio_remove(struct platform_device *pdev)
+static int __exit u300_gpio_remove(struct platform_device *pdev)
{
- int i;
+ struct u300_gpio_platform *plat = dev_get_platdata(&pdev->dev);
+ struct u300_gpio *gpio = platform_get_drvdata(pdev);
+ int err;
/* Turn off the GPIO block */
- writel(0x00000000U, virtbase + U300_GPIO_CR);
- for (i = 0 ; i < U300_GPIO_NUM_PORTS; i++)
- free_irq(gpio_ports[i].irq, &gpio_ports[i]);
- iounmap(virtbase);
- release_mem_region(memres->start, resource_size(memres));
- clk_disable(clk);
- clk_put(clk);
+ if (plat->variant == U300_GPIO_COH901335)
+ writel(0x00000000U, gpio->base + U300_335_CR);
+ if (plat->variant == U300_GPIO_COH901571_3_BS335 ||
+ plat->variant == U300_GPIO_COH901571_3_BS365)
+ writel(0x00000000U, gpio->base + U300_571_CR);
+
+ err = gpiochip_remove(&gpio->chip);
+ if (err < 0) {
+ dev_err(gpio->dev, "unable to remove gpiochip: %d\n", err);
+ return err;
+ }
+ u300_gpio_free_ports(gpio);
+ iounmap(gpio->base);
+ release_mem_region(gpio->memres->start,
+ resource_size(gpio->memres));
+ clk_disable(gpio->clk);
+ clk_put(gpio->clk);
+ platform_set_drvdata(pdev, NULL);
+ kfree(gpio);
return 0;
}
-static struct platform_driver gpio_driver = {
+static struct platform_driver u300_gpio_driver = {
.driver = {
.name = "u300-gpio",
},
- .remove = __exit_p(gpio_remove),
+ .remove = __exit_p(u300_gpio_remove),
};
static int __init u300_gpio_init(void)
{
- return platform_driver_probe(&gpio_driver, gpio_probe);
+ return platform_driver_probe(&u300_gpio_driver, u300_gpio_probe);
}
static void __exit u300_gpio_exit(void)
{
- platform_driver_unregister(&gpio_driver);
+ platform_driver_unregister(&u300_gpio_driver);
}
arch_initcall(u300_gpio_init);
module_exit(u300_gpio_exit);
MODULE_AUTHOR("Linus Walleij <linus.walleij@stericsson.com>");
-
-#ifdef U300_COH901571_3
-MODULE_DESCRIPTION("ST-Ericsson AB COH 901 571/3 GPIO driver");
-#endif
-
-#ifdef U300_COH901335
-MODULE_DESCRIPTION("ST-Ericsson AB COH 901 335 GPIO driver");
-#endif
-
+MODULE_DESCRIPTION("ST-Ericsson AB COH 901 335/COH 901 571/3 GPIO driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-xilinx.c b/drivers/gpio/gpio-xilinx.c
index 846fbd5e31b..0ce6ac9898b 100644
--- a/drivers/gpio/gpio-xilinx.c
+++ b/drivers/gpio/gpio-xilinx.c
@@ -14,6 +14,7 @@
#include <linux/init.h>
#include <linux/errno.h>
+#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/of_platform.h>
#include <linux/of_gpio.h>
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index b493663c7ba..1368826ef28 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -9,7 +9,6 @@ menuconfig DRM
depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && MMU
select I2C
select I2C_ALGOBIT
- select SLOW_WORK
help
Kernel-level support for the Direct Rendering Infrastructure (DRI)
introduced in XFree86 4.0. If you say Y here, you need to select
@@ -96,6 +95,7 @@ config DRM_I915
select FB_CFB_IMAGEBLIT
# i915 depends on ACPI_VIDEO when ACPI is enabled
# but for select to work, need to select ACPI_VIDEO's dependencies, ick
+ select BACKLIGHT_LCD_SUPPORT if ACPI
select BACKLIGHT_CLASS_DEVICE if ACPI
select VIDEO_OUTPUT_CONTROL if ACPI
select INPUT if ACPI
@@ -158,3 +158,7 @@ config DRM_SAVAGE
help
Choose this option if you have a Savage3D/4/SuperSavage/Pro/Twister
chipset. If M is selected the module will be called savage.
+
+source "drivers/gpu/drm/exynos/Kconfig"
+
+source "drivers/gpu/drm/vmwgfx/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 89cf05a72d1..c0496f66070 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -35,4 +35,5 @@ obj-$(CONFIG_DRM_SAVAGE)+= savage/
obj-$(CONFIG_DRM_VMWGFX)+= vmwgfx/
obj-$(CONFIG_DRM_VIA) +=via/
obj-$(CONFIG_DRM_NOUVEAU) +=nouveau/
+obj-$(CONFIG_DRM_EXYNOS) +=exynos/
obj-y += i2c/
diff --git a/drivers/gpu/drm/ati_pcigart.c b/drivers/gpu/drm/ati_pcigart.c
index 1c364924220..9afe495c12c 100644
--- a/drivers/gpu/drm/ati_pcigart.c
+++ b/drivers/gpu/drm/ati_pcigart.c
@@ -31,6 +31,7 @@
* DEALINGS IN THE SOFTWARE.
*/
+#include <linux/export.h>
#include "drmP.h"
# define ATI_PCIGART_PAGE_SIZE 4096 /**< PCI GART page size */
diff --git a/drivers/gpu/drm/drm_buffer.c b/drivers/gpu/drm/drm_buffer.c
index 529a0dbe9fc..08ccefedb32 100644
--- a/drivers/gpu/drm/drm_buffer.c
+++ b/drivers/gpu/drm/drm_buffer.c
@@ -32,6 +32,7 @@
* Pauli Nieminen <suokkos-at-gmail-dot-com>
*/
+#include <linux/export.h>
#include "drm_buffer.h"
/**
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index 61e1ef90d4e..30372f7b2d4 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -36,6 +36,7 @@
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/log2.h>
+#include <linux/export.h>
#include <asm/shmparam.h>
#include "drmP.h"
diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c
index 0e3bd5b54b7..592865381c6 100644
--- a/drivers/gpu/drm/drm_cache.c
+++ b/drivers/gpu/drm/drm_cache.c
@@ -28,6 +28,7 @@
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
+#include <linux/export.h>
#include "drmP.h"
#if defined(CONFIG_X86)
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index fe738f05309..405c63b9d53 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -31,6 +31,7 @@
*/
#include <linux/list.h>
#include <linux/slab.h>
+#include <linux/export.h>
#include "drm.h"
#include "drmP.h"
#include "drm_crtc.h"
@@ -162,6 +163,7 @@ static struct drm_conn_prop_enum_list drm_connector_enum_list[] =
{ DRM_MODE_CONNECTOR_HDMIB, "HDMI-B", 0 },
{ DRM_MODE_CONNECTOR_TV, "TV", 0 },
{ DRM_MODE_CONNECTOR_eDP, "eDP", 0 },
+ { DRM_MODE_CONNECTOR_VIRTUAL, "Virtual", 0},
};
static struct drm_prop_enum_list drm_encoder_enum_list[] =
@@ -170,6 +172,7 @@ static struct drm_prop_enum_list drm_encoder_enum_list[] =
{ DRM_MODE_ENCODER_TMDS, "TMDS" },
{ DRM_MODE_ENCODER_LVDS, "LVDS" },
{ DRM_MODE_ENCODER_TVDAC, "TV" },
+ { DRM_MODE_ENCODER_VIRTUAL, "Virtual" },
};
char *drm_get_encoder_name(struct drm_encoder *encoder)
@@ -463,8 +466,10 @@ void drm_connector_init(struct drm_device *dev,
list_add_tail(&connector->head, &dev->mode_config.connector_list);
dev->mode_config.num_connector++;
- drm_connector_attach_property(connector,
- dev->mode_config.edid_property, 0);
+ if (connector_type != DRM_MODE_CONNECTOR_VIRTUAL)
+ drm_connector_attach_property(connector,
+ dev->mode_config.edid_property,
+ 0);
drm_connector_attach_property(connector,
dev->mode_config.dpms_property, 0);
@@ -2113,8 +2118,10 @@ struct drm_property *drm_property_create(struct drm_device *dev, int flags,
property->num_values = num_values;
INIT_LIST_HEAD(&property->enum_blob_list);
- if (name)
+ if (name) {
strncpy(property->name, name, DRM_PROP_NAME_LEN);
+ property->name[DRM_PROP_NAME_LEN-1] = '\0';
+ }
list_add_tail(&property->head, &dev->mode_config.property_list);
return property;
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index f88a9b2c977..3969f7553fe 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -29,6 +29,9 @@
* Jesse Barnes <jesse.barnes@intel.com>
*/
+#include <linux/export.h>
+#include <linux/moduleparam.h>
+
#include "drmP.h"
#include "drm_crtc.h"
#include "drm_crtc_helper.h"
@@ -372,11 +375,13 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
encoder_funcs = encoder->helper_private;
if (!(ret = encoder_funcs->mode_fixup(encoder, mode,
adjusted_mode))) {
+ DRM_DEBUG_KMS("Encoder fixup failed\n");
goto done;
}
}
if (!(ret = crtc_funcs->mode_fixup(crtc, mode, adjusted_mode))) {
+ DRM_DEBUG_KMS("CRTC fixup failed\n");
goto done;
}
DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
@@ -479,6 +484,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
struct drm_connector *save_connectors, *connector;
int count = 0, ro, fail = 0;
struct drm_crtc_helper_funcs *crtc_funcs;
+ struct drm_mode_set save_set;
int ret = 0;
int i;
@@ -551,6 +557,12 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
save_connectors[count++] = *connector;
}
+ save_set.crtc = set->crtc;
+ save_set.mode = &set->crtc->mode;
+ save_set.x = set->crtc->x;
+ save_set.y = set->crtc->y;
+ save_set.fb = set->crtc->fb;
+
/* We should be able to check here if the fb has the same properties
* and then just flip_or_move it */
if (set->crtc->fb != set->fb) {
@@ -716,6 +728,12 @@ fail:
*connector = save_connectors[count++];
}
+ /* Try to restore the config */
+ if (mode_changed &&
+ !drm_crtc_helper_set_mode(save_set.crtc, save_set.mode, save_set.x,
+ save_set.y, save_set.fb))
+ DRM_ERROR("failed to restore config after modeset failure\n");
+
kfree(save_connectors);
kfree(save_encoders);
kfree(save_crtcs);
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index 9d2668a5087..1c7a1c0d3ed 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -33,6 +33,7 @@
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
+#include <linux/export.h>
#include "drmP.h"
#if defined(CONFIG_DEBUG_FS)
@@ -107,11 +108,8 @@ int drm_debugfs_create_files(struct drm_info_list *files, int count,
ent = debugfs_create_file(files[i].name, S_IFREG | S_IRUGO,
root, tmp, &drm_debugfs_fops);
if (!ent) {
- char name[64];
- strncpy(name, root->d_name.name,
- min(root->d_name.len, 64U));
DRM_ERROR("Cannot create /sys/kernel/debug/dri/%s/%s\n",
- name, files[i].name);
+ root->d_name.name, files[i].name);
kfree(tmp);
ret = -1;
goto fail;
@@ -120,7 +118,10 @@ int drm_debugfs_create_files(struct drm_info_list *files, int count,
tmp->minor = minor;
tmp->dent = ent;
tmp->info_ent = &files[i];
- list_add(&(tmp->list), &(minor->debugfs_nodes.list));
+
+ mutex_lock(&minor->debugfs_lock);
+ list_add(&tmp->list, &minor->debugfs_list);
+ mutex_unlock(&minor->debugfs_lock);
}
return 0;
@@ -148,7 +149,8 @@ int drm_debugfs_init(struct drm_minor *minor, int minor_id,
char name[64];
int ret;
- INIT_LIST_HEAD(&minor->debugfs_nodes.list);
+ INIT_LIST_HEAD(&minor->debugfs_list);
+ mutex_init(&minor->debugfs_lock);
sprintf(name, "%d", minor_id);
minor->debugfs_root = debugfs_create_dir(name, root);
if (!minor->debugfs_root) {
@@ -194,8 +196,9 @@ int drm_debugfs_remove_files(struct drm_info_list *files, int count,
struct drm_info_node *tmp;
int i;
+ mutex_lock(&minor->debugfs_lock);
for (i = 0; i < count; i++) {
- list_for_each_safe(pos, q, &minor->debugfs_nodes.list) {
+ list_for_each_safe(pos, q, &minor->debugfs_list) {
tmp = list_entry(pos, struct drm_info_node, list);
if (tmp->info_ent == &files[i]) {
debugfs_remove(tmp->dent);
@@ -204,6 +207,7 @@ int drm_debugfs_remove_files(struct drm_info_list *files, int count,
}
}
}
+ mutex_unlock(&minor->debugfs_lock);
return 0;
}
EXPORT_SYMBOL(drm_debugfs_remove_files);
diff --git a/drivers/gpu/drm/drm_dma.c b/drivers/gpu/drm/drm_dma.c
index 252cbd74df0..cfb4e333ec0 100644
--- a/drivers/gpu/drm/drm_dma.c
+++ b/drivers/gpu/drm/drm_dma.c
@@ -33,6 +33,7 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
+#include <linux/export.h>
#include "drmP.h"
/**
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 93a112d45c1..40c187c60f4 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -48,6 +48,7 @@
#include <linux/debugfs.h>
#include <linux/slab.h>
+#include <linux/export.h>
#include "drmP.h"
#include "drm_core.h"
@@ -124,7 +125,7 @@ static struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0),
@@ -438,6 +439,8 @@ long drm_ioctl(struct file *filp,
goto err_i1;
}
}
+ if (asize > usize)
+ memset(kdata + usize, 0, asize - usize);
}
if (cmd & IOC_IN) {
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 7425e5c9bd7..3e927ce7557 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -30,6 +30,7 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/i2c.h>
+#include <linux/export.h>
#include "drmP.h"
#include "drm_edid.h"
#include "drm_edid_modes.h"
@@ -1319,6 +1320,7 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid,
#define HDMI_IDENTIFIER 0x000C03
#define AUDIO_BLOCK 0x01
#define VENDOR_BLOCK 0x03
+#define SPEAKER_BLOCK 0x04
#define EDID_BASIC_AUDIO (1 << 6)
/**
@@ -1347,6 +1349,176 @@ u8 *drm_find_cea_extension(struct edid *edid)
}
EXPORT_SYMBOL(drm_find_cea_extension);
+static void
+parse_hdmi_vsdb(struct drm_connector *connector, uint8_t *db)
+{
+ connector->eld[5] |= (db[6] >> 7) << 1; /* Supports_AI */
+
+ connector->dvi_dual = db[6] & 1;
+ connector->max_tmds_clock = db[7] * 5;
+
+ connector->latency_present[0] = db[8] >> 7;
+ connector->latency_present[1] = (db[8] >> 6) & 1;
+ connector->video_latency[0] = db[9];
+ connector->audio_latency[0] = db[10];
+ connector->video_latency[1] = db[11];
+ connector->audio_latency[1] = db[12];
+
+ DRM_LOG_KMS("HDMI: DVI dual %d, "
+ "max TMDS clock %d, "
+ "latency present %d %d, "
+ "video latency %d %d, "
+ "audio latency %d %d\n",
+ connector->dvi_dual,
+ connector->max_tmds_clock,
+ (int) connector->latency_present[0],
+ (int) connector->latency_present[1],
+ connector->video_latency[0],
+ connector->video_latency[1],
+ connector->audio_latency[0],
+ connector->audio_latency[1]);
+}
+
+static void
+monitor_name(struct detailed_timing *t, void *data)
+{
+ if (t->data.other_data.type == EDID_DETAIL_MONITOR_NAME)
+ *(u8 **)data = t->data.other_data.data.str.str;
+}
+
+/**
+ * drm_edid_to_eld - build ELD from EDID
+ * @connector: connector corresponding to the HDMI/DP sink
+ * @edid: EDID to parse
+ *
+ * Fill the ELD (EDID-Like Data) buffer for passing to the audio driver.
+ * Some ELD fields are left to the graphics driver caller:
+ * - Conn_Type
+ * - HDCP
+ * - Port_ID
+ */
+void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
+{
+ uint8_t *eld = connector->eld;
+ u8 *cea;
+ u8 *name;
+ u8 *db;
+ int sad_count = 0;
+ int mnl;
+ int dbl;
+
+ memset(eld, 0, sizeof(connector->eld));
+
+ cea = drm_find_cea_extension(edid);
+ if (!cea) {
+ DRM_DEBUG_KMS("ELD: no CEA Extension found\n");
+ return;
+ }
+
+ name = NULL;
+ drm_for_each_detailed_block((u8 *)edid, monitor_name, &name);
+ for (mnl = 0; name && mnl < 13; mnl++) {
+ if (name[mnl] == 0x0a)
+ break;
+ eld[20 + mnl] = name[mnl];
+ }
+ eld[4] = (cea[1] << 5) | mnl;
+ DRM_DEBUG_KMS("ELD monitor %s\n", eld + 20);
+
+ eld[0] = 2 << 3; /* ELD version: 2 */
+
+ eld[16] = edid->mfg_id[0];
+ eld[17] = edid->mfg_id[1];
+ eld[18] = edid->prod_code[0];
+ eld[19] = edid->prod_code[1];
+
+ for (db = cea + 4; db < cea + cea[2]; db += dbl + 1) {
+ dbl = db[0] & 0x1f;
+
+ switch ((db[0] & 0xe0) >> 5) {
+ case AUDIO_BLOCK: /* Audio Data Block, contains SADs */
+ sad_count = dbl / 3;
+ memcpy(eld + 20 + mnl, &db[1], dbl);
+ break;
+ case SPEAKER_BLOCK: /* Speaker Allocation Data Block */
+ eld[7] = db[1];
+ break;
+ case VENDOR_BLOCK:
+ /* HDMI Vendor-Specific Data Block */
+ if (db[1] == 0x03 && db[2] == 0x0c && db[3] == 0)
+ parse_hdmi_vsdb(connector, db);
+ break;
+ default:
+ break;
+ }
+ }
+ eld[5] |= sad_count << 4;
+ eld[2] = (20 + mnl + sad_count * 3 + 3) / 4;
+
+ DRM_DEBUG_KMS("ELD size %d, SAD count %d\n", (int)eld[2], sad_count);
+}
+EXPORT_SYMBOL(drm_edid_to_eld);
+
+/**
+ * drm_av_sync_delay - HDMI/DP sink audio-video sync delay in millisecond
+ * @connector: connector associated with the HDMI/DP sink
+ * @mode: the display mode
+ */
+int drm_av_sync_delay(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ int i = !!(mode->flags & DRM_MODE_FLAG_INTERLACE);
+ int a, v;
+
+ if (!connector->latency_present[0])
+ return 0;
+ if (!connector->latency_present[1])
+ i = 0;
+
+ a = connector->audio_latency[i];
+ v = connector->video_latency[i];
+
+ /*
+ * HDMI/DP sink doesn't support audio or video?
+ */
+ if (a == 255 || v == 255)
+ return 0;
+
+ /*
+ * Convert raw EDID values to millisecond.
+ * Treat unknown latency as 0ms.
+ */
+ if (a)
+ a = min(2 * (a - 1), 500);
+ if (v)
+ v = min(2 * (v - 1), 500);
+
+ return max(v - a, 0);
+}
+EXPORT_SYMBOL(drm_av_sync_delay);
+
+/**
+ * drm_select_eld - select one ELD from multiple HDMI/DP sinks
+ * @encoder: the encoder just changed display mode
+ * @mode: the adjusted display mode
+ *
+ * It's possible for one encoder to be associated with multiple HDMI/DP sinks.
+ * The policy is now hard coded to simply use the first HDMI/DP sink's ELD.
+ */
+struct drm_connector *drm_select_eld(struct drm_encoder *encoder,
+ struct drm_display_mode *mode)
+{
+ struct drm_connector *connector;
+ struct drm_device *dev = encoder->dev;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+ if (connector->encoder == encoder && connector->eld[0])
+ return connector;
+
+ return NULL;
+}
+EXPORT_SYMBOL(drm_select_eld);
+
/**
* drm_detect_hdmi_monitor - detect whether monitor is hdmi.
* @edid: monitor EDID information
diff --git a/drivers/gpu/drm/drm_encoder_slave.c b/drivers/gpu/drm/drm_encoder_slave.c
index d62c064fbaa..fb943551060 100644
--- a/drivers/gpu/drm/drm_encoder_slave.c
+++ b/drivers/gpu/drm/drm_encoder_slave.c
@@ -24,6 +24,8 @@
*
*/
+#include <linux/module.h>
+
#include "drm_encoder_slave.h"
/**
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index f7c6854eb4d..80fe39d98b0 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -31,6 +31,7 @@
#include <linux/sysrq.h>
#include <linux/slab.h>
#include <linux/fb.h>
+#include <linux/module.h>
#include "drmP.h"
#include "drm_crtc.h"
#include "drm_fb_helper.h"
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 2ec7d48fc4a..4911e1d1dcf 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -37,6 +37,7 @@
#include "drmP.h"
#include <linux/poll.h>
#include <linux/slab.h>
+#include <linux/module.h>
/* from BKL pushdown: note that nothing else serializes idr_find() */
DEFINE_MUTEX(drm_global_mutex);
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 186d62eb063..396e60ce811 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -285,6 +285,94 @@ again:
}
EXPORT_SYMBOL(drm_gem_handle_create);
+
+/**
+ * drm_gem_free_mmap_offset - release a fake mmap offset for an object
+ * @obj: obj in question
+ *
+ * This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
+ */
+void
+drm_gem_free_mmap_offset(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+ struct drm_gem_mm *mm = dev->mm_private;
+ struct drm_map_list *list = &obj->map_list;
+
+ drm_ht_remove_item(&mm->offset_hash, &list->hash);
+ drm_mm_put_block(list->file_offset_node);
+ kfree(list->map);
+ list->map = NULL;
+}
+EXPORT_SYMBOL(drm_gem_free_mmap_offset);
+
+/**
+ * drm_gem_create_mmap_offset - create a fake mmap offset for an object
+ * @obj: obj in question
+ *
+ * GEM memory mapping works by handing back to userspace a fake mmap offset
+ * it can use in a subsequent mmap(2) call. The DRM core code then looks
+ * up the object based on the offset and sets up the various memory mapping
+ * structures.
+ *
+ * This routine allocates and attaches a fake offset for @obj.
+ */
+int
+drm_gem_create_mmap_offset(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+ struct drm_gem_mm *mm = dev->mm_private;
+ struct drm_map_list *list;
+ struct drm_local_map *map;
+ int ret = 0;
+
+ /* Set the object up for mmap'ing */
+ list = &obj->map_list;
+ list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
+ if (!list->map)
+ return -ENOMEM;
+
+ map = list->map;
+ map->type = _DRM_GEM;
+ map->size = obj->size;
+ map->handle = obj;
+
+ /* Get a DRM GEM mmap offset allocated... */
+ list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
+ obj->size / PAGE_SIZE, 0, 0);
+
+ if (!list->file_offset_node) {
+ DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
+ ret = -ENOSPC;
+ goto out_free_list;
+ }
+
+ list->file_offset_node = drm_mm_get_block(list->file_offset_node,
+ obj->size / PAGE_SIZE, 0);
+ if (!list->file_offset_node) {
+ ret = -ENOMEM;
+ goto out_free_list;
+ }
+
+ list->hash.key = list->file_offset_node->start;
+ ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
+ if (ret) {
+ DRM_ERROR("failed to add to map hash\n");
+ goto out_free_mm;
+ }
+
+ return 0;
+
+out_free_mm:
+ drm_mm_put_block(list->file_offset_node);
+out_free_list:
+ kfree(list->map);
+ list->map = NULL;
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_gem_create_mmap_offset);
+
/** Returns a reference to the object named by the handle. */
struct drm_gem_object *
drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
diff --git a/drivers/gpu/drm/drm_hashtab.c b/drivers/gpu/drm/drm_hashtab.c
index e3a75688f3c..68dc8744b63 100644
--- a/drivers/gpu/drm/drm_hashtab.c
+++ b/drivers/gpu/drm/drm_hashtab.c
@@ -36,6 +36,7 @@
#include "drm_hashtab.h"
#include <linux/hash.h>
#include <linux/slab.h>
+#include <linux/export.h>
int drm_ht_create(struct drm_open_hash *ht, unsigned int order)
{
diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
index 4a058c7af6c..ddd70db45f7 100644
--- a/drivers/gpu/drm/drm_ioc32.c
+++ b/drivers/gpu/drm/drm_ioc32.c
@@ -29,6 +29,7 @@
*/
#include <linux/compat.h>
#include <linux/ratelimit.h>
+#include <linux/export.h>
#include "drmP.h"
#include "drm_core.h"
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 3830e9e478c..68b756253f9 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -40,6 +40,7 @@
#include <linux/slab.h>
#include <linux/vgaarb.h>
+#include <linux/export.h>
/* Access macro for slots in vblank timestamp ringbuffer. */
#define vblanktimestamp(dev, crtc, count) ( \
@@ -406,13 +407,16 @@ int drm_irq_uninstall(struct drm_device *dev)
/*
* Wake up any waiters so they don't hang.
*/
- spin_lock_irqsave(&dev->vbl_lock, irqflags);
- for (i = 0; i < dev->num_crtcs; i++) {
- DRM_WAKEUP(&dev->vbl_queue[i]);
- dev->vblank_enabled[i] = 0;
- dev->last_vblank[i] = dev->driver->get_vblank_counter(dev, i);
+ if (dev->num_crtcs) {
+ spin_lock_irqsave(&dev->vbl_lock, irqflags);
+ for (i = 0; i < dev->num_crtcs; i++) {
+ DRM_WAKEUP(&dev->vbl_queue[i]);
+ dev->vblank_enabled[i] = 0;
+ dev->last_vblank[i] =
+ dev->driver->get_vblank_counter(dev, i);
+ }
+ spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
}
- spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
if (!irq_enabled)
return -EINVAL;
@@ -1124,6 +1128,7 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
trace_drm_vblank_event_delivered(current->pid, pipe,
vblwait->request.sequence);
} else {
+ /* drm_handle_vblank_events will call drm_vblank_put */
list_add_tail(&e->base.link, &dev->vblank_event_list);
vblwait->reply.sequence = vblwait->request.sequence;
}
@@ -1204,8 +1209,12 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
goto done;
}
- if (flags & _DRM_VBLANK_EVENT)
+ if (flags & _DRM_VBLANK_EVENT) {
+ /* must hold on to the vblank ref until the event fires
+ * drm_vblank_put will be called asynchronously
+ */
return drm_queue_vblank_event(dev, crtc, vblwait, file_priv);
+ }
if ((flags & _DRM_VBLANK_NEXTONMISS) &&
(seq - vblwait->request.sequence) <= (1<<23)) {
diff --git a/drivers/gpu/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c
index c9b805000a1..c8b6b66d428 100644
--- a/drivers/gpu/drm/drm_memory.c
+++ b/drivers/gpu/drm/drm_memory.c
@@ -34,6 +34,7 @@
*/
#include <linux/highmem.h>
+#include <linux/export.h>
#include "drmP.h"
/**
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 959186cbf32..961fb54f426 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -45,6 +45,7 @@
#include "drm_mm.h"
#include <linux/slab.h>
#include <linux/seq_file.h>
+#include <linux/export.h>
#define MM_UNUSED_TARGET 4
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index ad74fb4dc54..fb8e46b4e8b 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -32,6 +32,7 @@
#include <linux/list.h>
#include <linux/list_sort.h>
+#include <linux/export.h>
#include "drmP.h"
#include "drm.h"
#include "drm_crtc.h"
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index b6a19cb07ca..d4d10b7880c 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -39,6 +39,7 @@
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/dma-mapping.h>
+#include <linux/export.h>
#include "drmP.h"
/**********************************************************************/
diff --git a/drivers/gpu/drm/drm_platform.c b/drivers/gpu/drm/drm_platform.c
index 2a8b6265ad3..ae9db5e2b27 100644
--- a/drivers/gpu/drm/drm_platform.c
+++ b/drivers/gpu/drm/drm_platform.c
@@ -25,6 +25,7 @@
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
+#include <linux/export.h>
#include "drmP.h"
/**
diff --git a/drivers/gpu/drm/drm_proc.c b/drivers/gpu/drm/drm_proc.c
index 9e5b07efebb..fff87221f9e 100644
--- a/drivers/gpu/drm/drm_proc.c
+++ b/drivers/gpu/drm/drm_proc.c
@@ -39,6 +39,7 @@
#include <linux/seq_file.h>
#include <linux/slab.h>
+#include <linux/export.h>
#include "drmP.h"
/***************************************************
@@ -95,7 +96,6 @@ int drm_proc_create_files(struct drm_info_list *files, int count,
struct drm_device *dev = minor->dev;
struct proc_dir_entry *ent;
struct drm_info_node *tmp;
- char name[64];
int i, ret;
for (i = 0; i < count; i++) {
@@ -118,7 +118,7 @@ int drm_proc_create_files(struct drm_info_list *files, int count,
&drm_proc_fops, tmp);
if (!ent) {
DRM_ERROR("Cannot create /proc/dri/%s/%s\n",
- name, files[i].name);
+ root->name, files[i].name);
list_del(&tmp->list);
kfree(tmp);
ret = -1;
diff --git a/drivers/gpu/drm/drm_sman.c b/drivers/gpu/drm/drm_sman.c
index 34664587a74..cebce45f442 100644
--- a/drivers/gpu/drm/drm_sman.c
+++ b/drivers/gpu/drm/drm_sman.c
@@ -36,6 +36,7 @@
* Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
+#include <linux/export.h>
#include "drm_sman.h"
struct drm_owner_item {
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 2eee8e016b3..0f9ef9bf673 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -16,6 +16,7 @@
#include <linux/kdev_t.h>
#include <linux/gfp.h>
#include <linux/err.h>
+#include <linux/export.h>
#include "drm_sysfs.h"
#include "drm_core.h"
diff --git a/drivers/gpu/drm/drm_usb.c b/drivers/gpu/drm/drm_usb.c
index 206d2300d87..445003f4dc9 100644
--- a/drivers/gpu/drm/drm_usb.c
+++ b/drivers/gpu/drm/drm_usb.c
@@ -1,5 +1,6 @@
#include "drmP.h"
#include <linux/usb.h>
+#include <linux/export.h>
#ifdef CONFIG_USB
int drm_get_usb_dev(struct usb_interface *interface,
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index 5db96d45fc7..8c03eaf4144 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -34,6 +34,7 @@
*/
#include "drmP.h"
+#include <linux/export.h>
#if defined(__ia64__)
#include <linux/efi.h>
#include <linux/slab.h>
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
new file mode 100644
index 00000000000..847466aab43
--- /dev/null
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -0,0 +1,20 @@
+config DRM_EXYNOS
+ tristate "DRM Support for Samsung SoC EXYNOS Series"
+ depends on DRM && PLAT_SAMSUNG
+ default n
+ select DRM_KMS_HELPER
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
+ select VT_HW_CONSOLE_BINDING if FRAMEBUFFER_CONSOLE
+ help
+ Choose this option if you have a Samsung SoC EXYNOS chipset.
+ If M is selected the module will be called exynosdrm.
+
+config DRM_EXYNOS_FIMD
+ tristate "Exynos DRM FIMD"
+ depends on DRM_EXYNOS
+ default n
+ help
+ Choose this option if you want to use Exynos FIMD for DRM.
+ If M is selected, the module will be called exynos_drm_fimd
diff --git a/drivers/gpu/drm/exynos/Makefile b/drivers/gpu/drm/exynos/Makefile
new file mode 100644
index 00000000000..0496d3ff268
--- /dev/null
+++ b/drivers/gpu/drm/exynos/Makefile
@@ -0,0 +1,11 @@
+#
+# Makefile for the drm device driver. This driver provides support for the
+# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
+
+ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/exynos
+exynosdrm-y := exynos_drm_drv.o exynos_drm_encoder.o exynos_drm_connector.o \
+ exynos_drm_crtc.o exynos_drm_fbdev.o exynos_drm_fb.o \
+ exynos_drm_buf.o exynos_drm_gem.o exynos_drm_core.o
+
+obj-$(CONFIG_DRM_EXYNOS) += exynosdrm.o
+obj-$(CONFIG_DRM_EXYNOS_FIMD) += exynos_drm_fimd.o
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.c b/drivers/gpu/drm/exynos/exynos_drm_buf.c
new file mode 100644
index 00000000000..6f8afea94fc
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_buf.c
@@ -0,0 +1,110 @@
+/* exynos_drm_buf.c
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Author: Inki Dae <inki.dae@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+#include "drm.h"
+
+#include "exynos_drm_drv.h"
+#include "exynos_drm_buf.h"
+
+static DEFINE_MUTEX(exynos_drm_buf_lock);
+
+static int lowlevel_buffer_allocate(struct drm_device *dev,
+ struct exynos_drm_buf_entry *entry)
+{
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ entry->vaddr = dma_alloc_writecombine(dev->dev, entry->size,
+ (dma_addr_t *)&entry->paddr, GFP_KERNEL);
+ if (!entry->paddr) {
+ DRM_ERROR("failed to allocate buffer.\n");
+ return -ENOMEM;
+ }
+
+ DRM_DEBUG_KMS("allocated : vaddr(0x%x), paddr(0x%x), size(0x%x)\n",
+ (unsigned int)entry->vaddr, entry->paddr, entry->size);
+
+ return 0;
+}
+
+static void lowlevel_buffer_deallocate(struct drm_device *dev,
+ struct exynos_drm_buf_entry *entry)
+{
+ DRM_DEBUG_KMS("%s.\n", __FILE__);
+
+ if (entry->paddr && entry->vaddr && entry->size)
+ dma_free_writecombine(dev->dev, entry->size, entry->vaddr,
+ entry->paddr);
+ else
+ DRM_DEBUG_KMS("entry data is null.\n");
+}
+
+struct exynos_drm_buf_entry *exynos_drm_buf_create(struct drm_device *dev,
+ unsigned int size)
+{
+ struct exynos_drm_buf_entry *entry;
+
+ DRM_DEBUG_KMS("%s.\n", __FILE__);
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ DRM_ERROR("failed to allocate exynos_drm_buf_entry.\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ entry->size = size;
+
+ /*
+ * allocate memory region with size and set the memory information
+ * to vaddr and paddr of a entry object.
+ */
+ if (lowlevel_buffer_allocate(dev, entry) < 0) {
+ kfree(entry);
+ entry = NULL;
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return entry;
+}
+
+void exynos_drm_buf_destroy(struct drm_device *dev,
+ struct exynos_drm_buf_entry *entry)
+{
+ DRM_DEBUG_KMS("%s.\n", __FILE__);
+
+ if (!entry) {
+ DRM_DEBUG_KMS("entry is null.\n");
+ return;
+ }
+
+ lowlevel_buffer_deallocate(dev, entry);
+
+ kfree(entry);
+ entry = NULL;
+}
+
+MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
+MODULE_DESCRIPTION("Samsung SoC DRM Buffer Management Module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.h b/drivers/gpu/drm/exynos/exynos_drm_buf.h
new file mode 100644
index 00000000000..045d59eab01
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_buf.h
@@ -0,0 +1,53 @@
+/* exynos_drm_buf.h
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Author: Inki Dae <inki.dae@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _EXYNOS_DRM_BUF_H_
+#define _EXYNOS_DRM_BUF_H_
+
+/*
+ * exynos drm buffer entry structure.
+ *
+ * @paddr: physical address of allocated memory.
+ * @vaddr: kernel virtual address of allocated memory.
+ * @size: size of allocated memory.
+ */
+struct exynos_drm_buf_entry {
+ dma_addr_t paddr;
+ void __iomem *vaddr;
+ unsigned int size;
+};
+
+/* allocate physical memory. */
+struct exynos_drm_buf_entry *exynos_drm_buf_create(struct drm_device *dev,
+ unsigned int size);
+
+/* get physical memory information of a drm framebuffer. */
+struct exynos_drm_buf_entry *exynos_drm_fb_get_buf(struct drm_framebuffer *fb);
+
+/* remove allocated physical memory. */
+void exynos_drm_buf_destroy(struct drm_device *dev,
+ struct exynos_drm_buf_entry *entry);
+
+#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.c b/drivers/gpu/drm/exynos/exynos_drm_connector.c
new file mode 100644
index 00000000000..985d9e76872
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_connector.c
@@ -0,0 +1,293 @@
+/*
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Authors:
+ * Inki Dae <inki.dae@samsung.com>
+ * Joonyoung Shim <jy0922.shim@samsung.com>
+ * Seung-Woo Kim <sw0312.kim@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+#include "drm_crtc_helper.h"
+
+#include "exynos_drm_drv.h"
+#include "exynos_drm_encoder.h"
+
+#define MAX_EDID 256
+#define to_exynos_connector(x) container_of(x, struct exynos_drm_connector,\
+ drm_connector)
+
+struct exynos_drm_connector {
+ struct drm_connector drm_connector;
+};
+
+/* convert exynos_video_timings to drm_display_mode */
+static inline void
+convert_to_display_mode(struct drm_display_mode *mode,
+ struct fb_videomode *timing)
+{
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ mode->clock = timing->pixclock / 1000;
+
+ mode->hdisplay = timing->xres;
+ mode->hsync_start = mode->hdisplay + timing->left_margin;
+ mode->hsync_end = mode->hsync_start + timing->hsync_len;
+ mode->htotal = mode->hsync_end + timing->right_margin;
+
+ mode->vdisplay = timing->yres;
+ mode->vsync_start = mode->vdisplay + timing->upper_margin;
+ mode->vsync_end = mode->vsync_start + timing->vsync_len;
+ mode->vtotal = mode->vsync_end + timing->lower_margin;
+}
+
+/* convert drm_display_mode to exynos_video_timings */
+static inline void
+convert_to_video_timing(struct fb_videomode *timing,
+ struct drm_display_mode *mode)
+{
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ memset(timing, 0, sizeof(*timing));
+
+ timing->pixclock = mode->clock * 1000;
+ timing->refresh = mode->vrefresh;
+
+ timing->xres = mode->hdisplay;
+ timing->left_margin = mode->hsync_start - mode->hdisplay;
+ timing->hsync_len = mode->hsync_end - mode->hsync_start;
+ timing->right_margin = mode->htotal - mode->hsync_end;
+
+ timing->yres = mode->vdisplay;
+ timing->upper_margin = mode->vsync_start - mode->vdisplay;
+ timing->vsync_len = mode->vsync_end - mode->vsync_start;
+ timing->lower_margin = mode->vtotal - mode->vsync_end;
+
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ timing->vmode = FB_VMODE_INTERLACED;
+ else
+ timing->vmode = FB_VMODE_NONINTERLACED;
+
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ timing->vmode |= FB_VMODE_DOUBLE;
+}
+
+static int exynos_drm_connector_get_modes(struct drm_connector *connector)
+{
+ struct exynos_drm_manager *manager =
+ exynos_drm_get_manager(connector->encoder);
+ struct exynos_drm_display *display = manager->display;
+ unsigned int count;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ if (!display) {
+ DRM_DEBUG_KMS("display is null.\n");
+ return 0;
+ }
+
+ /*
+ * if get_edid() exists then get_edid() callback of hdmi side
+ * is called to get edid data through i2c interface else
+ * get timing from the FIMD driver(display controller).
+ *
+ * P.S. in case of lcd panel, count is always 1 if success
+ * because lcd panel has only one mode.
+ */
+ if (display->get_edid) {
+ int ret;
+ void *edid;
+
+ edid = kzalloc(MAX_EDID, GFP_KERNEL);
+ if (!edid) {
+ DRM_ERROR("failed to allocate edid\n");
+ return 0;
+ }
+
+ ret = display->get_edid(manager->dev, connector,
+ edid, MAX_EDID);
+ if (ret < 0) {
+ DRM_ERROR("failed to get edid data.\n");
+ kfree(edid);
+ edid = NULL;
+ return 0;
+ }
+
+ drm_mode_connector_update_edid_property(connector, edid);
+ count = drm_add_edid_modes(connector, edid);
+
+ kfree(connector->display_info.raw_edid);
+ connector->display_info.raw_edid = edid;
+ } else {
+ struct drm_display_mode *mode = drm_mode_create(connector->dev);
+ struct fb_videomode *timing;
+
+ if (display->get_timing)
+ timing = display->get_timing(manager->dev);
+ else {
+ drm_mode_destroy(connector->dev, mode);
+ return 0;
+ }
+
+ convert_to_display_mode(mode, timing);
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ drm_mode_set_name(mode);
+ drm_mode_probed_add(connector, mode);
+
+ count = 1;
+ }
+
+ return count;
+}
+
+static int exynos_drm_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct exynos_drm_manager *manager =
+ exynos_drm_get_manager(connector->encoder);
+ struct exynos_drm_display *display = manager->display;
+ struct fb_videomode timing;
+ int ret = MODE_BAD;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ convert_to_video_timing(&timing, mode);
+
+ if (display && display->check_timing)
+ if (!display->check_timing(manager->dev, (void *)&timing))
+ ret = MODE_OK;
+
+ return ret;
+}
+
+struct drm_encoder *exynos_drm_best_encoder(struct drm_connector *connector)
+{
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ return connector->encoder;
+}
+
+static struct drm_connector_helper_funcs exynos_connector_helper_funcs = {
+ .get_modes = exynos_drm_connector_get_modes,
+ .mode_valid = exynos_drm_connector_mode_valid,
+ .best_encoder = exynos_drm_best_encoder,
+};
+
+/* get detection status of display device. */
+static enum drm_connector_status
+exynos_drm_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct exynos_drm_manager *manager =
+ exynos_drm_get_manager(connector->encoder);
+ struct exynos_drm_display *display = manager->display;
+ enum drm_connector_status status = connector_status_disconnected;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ if (display && display->is_connected) {
+ if (display->is_connected(manager->dev))
+ status = connector_status_connected;
+ else
+ status = connector_status_disconnected;
+ }
+
+ return status;
+}
+
+static void exynos_drm_connector_destroy(struct drm_connector *connector)
+{
+ struct exynos_drm_connector *exynos_connector =
+ to_exynos_connector(connector);
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ drm_sysfs_connector_remove(connector);
+ drm_connector_cleanup(connector);
+ kfree(exynos_connector);
+}
+
+static struct drm_connector_funcs exynos_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .detect = exynos_drm_connector_detect,
+ .destroy = exynos_drm_connector_destroy,
+};
+
+struct drm_connector *exynos_drm_connector_create(struct drm_device *dev,
+ struct drm_encoder *encoder)
+{
+ struct exynos_drm_connector *exynos_connector;
+ struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder);
+ struct drm_connector *connector;
+ int type;
+ int err;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ exynos_connector = kzalloc(sizeof(*exynos_connector), GFP_KERNEL);
+ if (!exynos_connector) {
+ DRM_ERROR("failed to allocate connector\n");
+ return NULL;
+ }
+
+ connector = &exynos_connector->drm_connector;
+
+ switch (manager->display->type) {
+ case EXYNOS_DISPLAY_TYPE_HDMI:
+ type = DRM_MODE_CONNECTOR_HDMIA;
+ break;
+ default:
+ type = DRM_MODE_CONNECTOR_Unknown;
+ break;
+ }
+
+ drm_connector_init(dev, connector, &exynos_connector_funcs, type);
+ drm_connector_helper_add(connector, &exynos_connector_helper_funcs);
+
+ err = drm_sysfs_connector_add(connector);
+ if (err)
+ goto err_connector;
+
+ connector->encoder = encoder;
+ err = drm_mode_connector_attach_encoder(connector, encoder);
+ if (err) {
+ DRM_ERROR("failed to attach a connector to a encoder\n");
+ goto err_sysfs;
+ }
+
+ DRM_DEBUG_KMS("connector has been created\n");
+
+ return connector;
+
+err_sysfs:
+ drm_sysfs_connector_remove(connector);
+err_connector:
+ drm_connector_cleanup(connector);
+ kfree(exynos_connector);
+ return NULL;
+}
+
+MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
+MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>");
+MODULE_AUTHOR("Seung-Woo Kim <sw0312.kim@samsung.com>");
+MODULE_DESCRIPTION("Samsung SoC DRM Connector Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.h b/drivers/gpu/drm/exynos/exynos_drm_connector.h
new file mode 100644
index 00000000000..1c7b2b5b579
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_connector.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Authors:
+ * Inki Dae <inki.dae@samsung.com>
+ * Joonyoung Shim <jy0922.shim@samsung.com>
+ * Seung-Woo Kim <sw0312.kim@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _EXYNOS_DRM_CONNECTOR_H_
+#define _EXYNOS_DRM_CONNECTOR_H_
+
+struct drm_connector *exynos_drm_connector_create(struct drm_device *dev,
+ struct drm_encoder *encoder);
+
+#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_core.c b/drivers/gpu/drm/exynos/exynos_drm_core.c
new file mode 100644
index 00000000000..661a03571d0
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_core.c
@@ -0,0 +1,272 @@
+/* exynos_drm_core.c
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Author:
+ * Inki Dae <inki.dae@samsung.com>
+ * Joonyoung Shim <jy0922.shim@samsung.com>
+ * Seung-Woo Kim <sw0312.kim@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+#include "exynos_drm_drv.h"
+#include "exynos_drm_encoder.h"
+#include "exynos_drm_connector.h"
+#include "exynos_drm_fbdev.h"
+
+static DEFINE_MUTEX(exynos_drm_mutex);
+static LIST_HEAD(exynos_drm_subdrv_list);
+static struct drm_device *drm_dev;
+
+static int exynos_drm_subdrv_probe(struct drm_device *dev,
+ struct exynos_drm_subdrv *subdrv)
+{
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+
+ DRM_DEBUG_DRIVER("%s\n", __FILE__);
+
+ if (subdrv->probe) {
+ int ret;
+
+ /*
+ * this probe callback would be called by sub driver
+ * after setting of all resources to this sub driver,
+ * such as clock, irq and register map are done or by load()
+ * of exynos drm driver.
+ *
+ * P.S. note that this driver is considered for modularization.
+ */
+ ret = subdrv->probe(dev, subdrv->manager.dev);
+ if (ret)
+ return ret;
+ }
+
+ /* create and initialize a encoder for this sub driver. */
+ encoder = exynos_drm_encoder_create(dev, &subdrv->manager,
+ (1 << MAX_CRTC) - 1);
+ if (!encoder) {
+ DRM_ERROR("failed to create encoder\n");
+ return -EFAULT;
+ }
+
+ /*
+ * create and initialize a connector for this sub driver and
+ * attach the encoder created above to the connector.
+ */
+ connector = exynos_drm_connector_create(dev, encoder);
+ if (!connector) {
+ DRM_ERROR("failed to create connector\n");
+ encoder->funcs->destroy(encoder);
+ return -EFAULT;
+ }
+
+ subdrv->encoder = encoder;
+ subdrv->connector = connector;
+
+ return 0;
+}
+
+static void exynos_drm_subdrv_remove(struct drm_device *dev,
+ struct exynos_drm_subdrv *subdrv)
+{
+ DRM_DEBUG_DRIVER("%s\n", __FILE__);
+
+ if (subdrv->remove)
+ subdrv->remove(dev);
+
+ if (subdrv->encoder) {
+ struct drm_encoder *encoder = subdrv->encoder;
+ encoder->funcs->destroy(encoder);
+ subdrv->encoder = NULL;
+ }
+
+ if (subdrv->connector) {
+ struct drm_connector *connector = subdrv->connector;
+ connector->funcs->destroy(connector);
+ subdrv->connector = NULL;
+ }
+}
+
+int exynos_drm_device_register(struct drm_device *dev)
+{
+ struct exynos_drm_subdrv *subdrv, *n;
+ int err;
+
+ DRM_DEBUG_DRIVER("%s\n", __FILE__);
+
+ if (!dev)
+ return -EINVAL;
+
+ if (drm_dev) {
+ DRM_ERROR("Already drm device were registered\n");
+ return -EBUSY;
+ }
+
+ mutex_lock(&exynos_drm_mutex);
+ list_for_each_entry_safe(subdrv, n, &exynos_drm_subdrv_list, list) {
+ err = exynos_drm_subdrv_probe(dev, subdrv);
+ if (err) {
+ DRM_DEBUG("exynos drm subdrv probe failed.\n");
+ list_del(&subdrv->list);
+ }
+ }
+
+ drm_dev = dev;
+ mutex_unlock(&exynos_drm_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(exynos_drm_device_register);
+
+int exynos_drm_device_unregister(struct drm_device *dev)
+{
+ struct exynos_drm_subdrv *subdrv;
+
+ DRM_DEBUG_DRIVER("%s\n", __FILE__);
+
+ if (!dev || dev != drm_dev) {
+ WARN(1, "Unexpected drm device unregister!\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&exynos_drm_mutex);
+ list_for_each_entry(subdrv, &exynos_drm_subdrv_list, list)
+ exynos_drm_subdrv_remove(dev, subdrv);
+
+ drm_dev = NULL;
+ mutex_unlock(&exynos_drm_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(exynos_drm_device_unregister);
+
+static int exynos_drm_mode_group_reinit(struct drm_device *dev)
+{
+ struct drm_mode_group *group = &dev->primary->mode_group;
+ uint32_t *id_list = group->id_list;
+ int ret;
+
+ DRM_DEBUG_DRIVER("%s\n", __FILE__);
+
+ ret = drm_mode_group_init_legacy_group(dev, group);
+ if (ret < 0)
+ return ret;
+
+ kfree(id_list);
+ return 0;
+}
+
+int exynos_drm_subdrv_register(struct exynos_drm_subdrv *subdrv)
+{
+ int err;
+
+ DRM_DEBUG_DRIVER("%s\n", __FILE__);
+
+ if (!subdrv)
+ return -EINVAL;
+
+ mutex_lock(&exynos_drm_mutex);
+ if (drm_dev) {
+ err = exynos_drm_subdrv_probe(drm_dev, subdrv);
+ if (err) {
+ DRM_ERROR("failed to probe exynos drm subdrv\n");
+ mutex_unlock(&exynos_drm_mutex);
+ return err;
+ }
+
+ /*
+ * if any specific driver such as fimd or hdmi driver called
+ * exynos_drm_subdrv_register() later than drm_load(),
+ * the fb helper should be re-initialized and re-configured.
+ */
+ err = exynos_drm_fbdev_reinit(drm_dev);
+ if (err) {
+ DRM_ERROR("failed to reinitialize exynos drm fbdev\n");
+ exynos_drm_subdrv_remove(drm_dev, subdrv);
+ mutex_unlock(&exynos_drm_mutex);
+ return err;
+ }
+
+ err = exynos_drm_mode_group_reinit(drm_dev);
+ if (err) {
+ DRM_ERROR("failed to reinitialize mode group\n");
+ exynos_drm_fbdev_fini(drm_dev);
+ exynos_drm_subdrv_remove(drm_dev, subdrv);
+ mutex_unlock(&exynos_drm_mutex);
+ return err;
+ }
+ }
+
+ subdrv->drm_dev = drm_dev;
+
+ list_add_tail(&subdrv->list, &exynos_drm_subdrv_list);
+ mutex_unlock(&exynos_drm_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(exynos_drm_subdrv_register);
+
+int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *subdrv)
+{
+ int ret = -EFAULT;
+
+ DRM_DEBUG_DRIVER("%s\n", __FILE__);
+
+ if (!subdrv) {
+ DRM_DEBUG("Unexpected exynos drm subdrv unregister!\n");
+ return ret;
+ }
+
+ mutex_lock(&exynos_drm_mutex);
+ if (drm_dev) {
+ exynos_drm_subdrv_remove(drm_dev, subdrv);
+ list_del(&subdrv->list);
+
+ /*
+ * fb helper should be updated once a sub driver is released
+ * to re-configure crtc and connector and also to re-setup
+ * drm framebuffer.
+ */
+ ret = exynos_drm_fbdev_reinit(drm_dev);
+ if (ret < 0) {
+ DRM_ERROR("failed fb helper reinit.\n");
+ goto fail;
+ }
+
+ ret = exynos_drm_mode_group_reinit(drm_dev);
+ if (ret < 0) {
+ DRM_ERROR("failed drm mode group reinit.\n");
+ goto fail;
+ }
+ }
+
+fail:
+ mutex_unlock(&exynos_drm_mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(exynos_drm_subdrv_unregister);
+
+MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
+MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>");
+MODULE_AUTHOR("Seung-Woo Kim <sw0312.kim@samsung.com>");
+MODULE_DESCRIPTION("Samsung SoC DRM Core Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
new file mode 100644
index 00000000000..9337e5e2dbb
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -0,0 +1,381 @@
+/* exynos_drm_crtc.c
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Authors:
+ * Inki Dae <inki.dae@samsung.com>
+ * Joonyoung Shim <jy0922.shim@samsung.com>
+ * Seung-Woo Kim <sw0312.kim@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+#include "drm_crtc_helper.h"
+
+#include "exynos_drm_drv.h"
+#include "exynos_drm_fb.h"
+#include "exynos_drm_encoder.h"
+#include "exynos_drm_buf.h"
+
+#define to_exynos_crtc(x) container_of(x, struct exynos_drm_crtc,\
+ drm_crtc)
+
+/*
+ * Exynos specific crtc postion structure.
+ *
+ * @fb_x: offset x on a framebuffer to be displyed
+ * - the unit is screen coordinates.
+ * @fb_y: offset y on a framebuffer to be displayed
+ * - the unit is screen coordinates.
+ * @crtc_x: offset x on hardware screen.
+ * @crtc_y: offset y on hardware screen.
+ * @crtc_w: width of hardware screen.
+ * @crtc_h: height of hardware screen.
+ */
+struct exynos_drm_crtc_pos {
+ unsigned int fb_x;
+ unsigned int fb_y;
+ unsigned int crtc_x;
+ unsigned int crtc_y;
+ unsigned int crtc_w;
+ unsigned int crtc_h;
+};
+
+/*
+ * Exynos specific crtc structure.
+ *
+ * @drm_crtc: crtc object.
+ * @overlay: contain information common to display controller and hdmi and
+ * contents of this overlay object would be copied to sub driver size.
+ * @pipe: a crtc index created at load() with a new crtc object creation
+ * and the crtc object would be set to private->crtc array
+ * to get a crtc object corresponding to this pipe from private->crtc
+ * array when irq interrupt occured. the reason of using this pipe is that
+ * drm framework doesn't support multiple irq yet.
+ * we can refer to the crtc to current hardware interrupt occured through
+ * this pipe value.
+ */
+struct exynos_drm_crtc {
+ struct drm_crtc drm_crtc;
+ struct exynos_drm_overlay overlay;
+ unsigned int pipe;
+};
+
+static void exynos_drm_crtc_apply(struct drm_crtc *crtc)
+{
+ struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
+ struct exynos_drm_overlay *overlay = &exynos_crtc->overlay;
+
+ exynos_drm_fn_encoder(crtc, overlay,
+ exynos_drm_encoder_crtc_mode_set);
+ exynos_drm_fn_encoder(crtc, NULL, exynos_drm_encoder_crtc_commit);
+}
+
+static int exynos_drm_overlay_update(struct exynos_drm_overlay *overlay,
+ struct drm_framebuffer *fb,
+ struct drm_display_mode *mode,
+ struct exynos_drm_crtc_pos *pos)
+{
+ struct exynos_drm_buf_entry *entry;
+ unsigned int actual_w;
+ unsigned int actual_h;
+
+ entry = exynos_drm_fb_get_buf(fb);
+ if (!entry) {
+ DRM_LOG_KMS("entry is null.\n");
+ return -EFAULT;
+ }
+
+ overlay->paddr = entry->paddr;
+ overlay->vaddr = entry->vaddr;
+
+ DRM_DEBUG_KMS("vaddr = 0x%lx, paddr = 0x%lx\n",
+ (unsigned long)overlay->vaddr,
+ (unsigned long)overlay->paddr);
+
+ actual_w = min((mode->hdisplay - pos->crtc_x), pos->crtc_w);
+ actual_h = min((mode->vdisplay - pos->crtc_y), pos->crtc_h);
+
+ /* set drm framebuffer data. */
+ overlay->fb_x = pos->fb_x;
+ overlay->fb_y = pos->fb_y;
+ overlay->fb_width = fb->width;
+ overlay->fb_height = fb->height;
+ overlay->bpp = fb->bits_per_pixel;
+ overlay->pitch = fb->pitch;
+
+ /* set overlay range to be displayed. */
+ overlay->crtc_x = pos->crtc_x;
+ overlay->crtc_y = pos->crtc_y;
+ overlay->crtc_width = actual_w;
+ overlay->crtc_height = actual_h;
+
+ /* set drm mode data. */
+ overlay->mode_width = mode->hdisplay;
+ overlay->mode_height = mode->vdisplay;
+ overlay->refresh = mode->vrefresh;
+ overlay->scan_flag = mode->flags;
+
+ DRM_DEBUG_KMS("overlay : offset_x/y(%d,%d), width/height(%d,%d)",
+ overlay->crtc_x, overlay->crtc_y,
+ overlay->crtc_width, overlay->crtc_height);
+
+ return 0;
+}
+
+static int exynos_drm_crtc_update(struct drm_crtc *crtc)
+{
+ struct exynos_drm_crtc *exynos_crtc;
+ struct exynos_drm_overlay *overlay;
+ struct exynos_drm_crtc_pos pos;
+ struct drm_display_mode *mode = &crtc->mode;
+ struct drm_framebuffer *fb = crtc->fb;
+
+ if (!mode || !fb)
+ return -EINVAL;
+
+ exynos_crtc = to_exynos_crtc(crtc);
+ overlay = &exynos_crtc->overlay;
+
+ memset(&pos, 0, sizeof(struct exynos_drm_crtc_pos));
+
+ /* it means the offset of framebuffer to be displayed. */
+ pos.fb_x = crtc->x;
+ pos.fb_y = crtc->y;
+
+ /* OSD position to be displayed. */
+ pos.crtc_x = 0;
+ pos.crtc_y = 0;
+ pos.crtc_w = fb->width - crtc->x;
+ pos.crtc_h = fb->height - crtc->y;
+
+ return exynos_drm_overlay_update(overlay, crtc->fb, mode, &pos);
+}
+
+static void exynos_drm_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ /* TODO */
+}
+
+static void exynos_drm_crtc_prepare(struct drm_crtc *crtc)
+{
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ /* drm framework doesn't check NULL. */
+}
+
+static void exynos_drm_crtc_commit(struct drm_crtc *crtc)
+{
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ /* drm framework doesn't check NULL. */
+}
+
+static bool
+exynos_drm_crtc_mode_fixup(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ /* drm framework doesn't check NULL */
+ return true;
+}
+
+static int
+exynos_drm_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ mode = adjusted_mode;
+
+ return exynos_drm_crtc_update(crtc);
+}
+
+static int exynos_drm_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ int ret;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ ret = exynos_drm_crtc_update(crtc);
+ if (ret)
+ return ret;
+
+ exynos_drm_crtc_apply(crtc);
+
+ return ret;
+}
+
+static void exynos_drm_crtc_load_lut(struct drm_crtc *crtc)
+{
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+ /* drm framework doesn't check NULL */
+}
+
+static struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = {
+ .dpms = exynos_drm_crtc_dpms,
+ .prepare = exynos_drm_crtc_prepare,
+ .commit = exynos_drm_crtc_commit,
+ .mode_fixup = exynos_drm_crtc_mode_fixup,
+ .mode_set = exynos_drm_crtc_mode_set,
+ .mode_set_base = exynos_drm_crtc_mode_set_base,
+ .load_lut = exynos_drm_crtc_load_lut,
+};
+
+static int exynos_drm_crtc_page_flip(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event)
+{
+ struct drm_device *dev = crtc->dev;
+ struct exynos_drm_private *dev_priv = dev->dev_private;
+ struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
+ struct drm_framebuffer *old_fb = crtc->fb;
+ int ret = -EINVAL;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ mutex_lock(&dev->struct_mutex);
+
+ if (event) {
+ /*
+ * the pipe from user always is 0 so we can set pipe number
+ * of current owner to event.
+ */
+ event->pipe = exynos_crtc->pipe;
+
+ list_add_tail(&event->base.link,
+ &dev_priv->pageflip_event_list);
+
+ ret = drm_vblank_get(dev, exynos_crtc->pipe);
+ if (ret) {
+ DRM_DEBUG("failed to acquire vblank counter\n");
+ list_del(&event->base.link);
+
+ goto out;
+ }
+
+ crtc->fb = fb;
+ ret = exynos_drm_crtc_update(crtc);
+ if (ret) {
+ crtc->fb = old_fb;
+ drm_vblank_put(dev, exynos_crtc->pipe);
+ list_del(&event->base.link);
+
+ goto out;
+ }
+
+ /*
+ * the values related to a buffer of the drm framebuffer
+ * to be applied should be set at here. because these values
+ * first, are set to shadow registers and then to
+ * real registers at vsync front porch period.
+ */
+ exynos_drm_crtc_apply(crtc);
+ }
+out:
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
+static void exynos_drm_crtc_destroy(struct drm_crtc *crtc)
+{
+ struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
+ struct exynos_drm_private *private = crtc->dev->dev_private;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ private->crtc[exynos_crtc->pipe] = NULL;
+
+ drm_crtc_cleanup(crtc);
+ kfree(exynos_crtc);
+}
+
+static struct drm_crtc_funcs exynos_crtc_funcs = {
+ .set_config = drm_crtc_helper_set_config,
+ .page_flip = exynos_drm_crtc_page_flip,
+ .destroy = exynos_drm_crtc_destroy,
+};
+
+struct exynos_drm_overlay *get_exynos_drm_overlay(struct drm_device *dev,
+ struct drm_crtc *crtc)
+{
+ struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
+
+ return &exynos_crtc->overlay;
+}
+
+int exynos_drm_crtc_create(struct drm_device *dev, unsigned int nr)
+{
+ struct exynos_drm_crtc *exynos_crtc;
+ struct exynos_drm_private *private = dev->dev_private;
+ struct drm_crtc *crtc;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ exynos_crtc = kzalloc(sizeof(*exynos_crtc), GFP_KERNEL);
+ if (!exynos_crtc) {
+ DRM_ERROR("failed to allocate exynos crtc\n");
+ return -ENOMEM;
+ }
+
+ exynos_crtc->pipe = nr;
+ crtc = &exynos_crtc->drm_crtc;
+
+ private->crtc[nr] = crtc;
+
+ drm_crtc_init(dev, crtc, &exynos_crtc_funcs);
+ drm_crtc_helper_add(crtc, &exynos_crtc_helper_funcs);
+
+ return 0;
+}
+
+int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int crtc)
+{
+ struct exynos_drm_private *private = dev->dev_private;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ exynos_drm_fn_encoder(private->crtc[crtc], &crtc,
+ exynos_drm_enable_vblank);
+
+ return 0;
+}
+
+void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int crtc)
+{
+ struct exynos_drm_private *private = dev->dev_private;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ exynos_drm_fn_encoder(private->crtc[crtc], &crtc,
+ exynos_drm_disable_vblank);
+}
+
+MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
+MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>");
+MODULE_AUTHOR("Seung-Woo Kim <sw0312.kim@samsung.com>");
+MODULE_DESCRIPTION("Samsung SoC DRM CRTC Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.h b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
new file mode 100644
index 00000000000..c584042d6d2
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
@@ -0,0 +1,38 @@
+/* exynos_drm_crtc.h
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Authors:
+ * Inki Dae <inki.dae@samsung.com>
+ * Joonyoung Shim <jy0922.shim@samsung.com>
+ * Seung-Woo Kim <sw0312.kim@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _EXYNOS_DRM_CRTC_H_
+#define _EXYNOS_DRM_CRTC_H_
+
+struct exynos_drm_overlay *get_exynos_drm_overlay(struct drm_device *dev,
+ struct drm_crtc *crtc);
+int exynos_drm_crtc_create(struct drm_device *dev, unsigned int nr);
+int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int crtc);
+void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int crtc);
+
+#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
new file mode 100644
index 00000000000..83810cbe3c1
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -0,0 +1,244 @@
+/*
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Authors:
+ * Inki Dae <inki.dae@samsung.com>
+ * Joonyoung Shim <jy0922.shim@samsung.com>
+ * Seung-Woo Kim <sw0312.kim@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+#include "drm.h"
+
+#include <drm/exynos_drm.h>
+
+#include "exynos_drm_drv.h"
+#include "exynos_drm_crtc.h"
+#include "exynos_drm_fbdev.h"
+#include "exynos_drm_fb.h"
+#include "exynos_drm_gem.h"
+
+#define DRIVER_NAME "exynos-drm"
+#define DRIVER_DESC "Samsung SoC DRM"
+#define DRIVER_DATE "20110530"
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 0
+
+static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
+{
+ struct exynos_drm_private *private;
+ int ret;
+ int nr;
+
+ DRM_DEBUG_DRIVER("%s\n", __FILE__);
+
+ private = kzalloc(sizeof(struct exynos_drm_private), GFP_KERNEL);
+ if (!private) {
+ DRM_ERROR("failed to allocate private\n");
+ return -ENOMEM;
+ }
+
+ INIT_LIST_HEAD(&private->pageflip_event_list);
+ dev->dev_private = (void *)private;
+
+ drm_mode_config_init(dev);
+
+ exynos_drm_mode_config_init(dev);
+
+ /*
+ * EXYNOS4 is enough to have two CRTCs and each crtc would be used
+ * without dependency of hardware.
+ */
+ for (nr = 0; nr < MAX_CRTC; nr++) {
+ ret = exynos_drm_crtc_create(dev, nr);
+ if (ret)
+ goto err_crtc;
+ }
+
+ ret = drm_vblank_init(dev, MAX_CRTC);
+ if (ret)
+ goto err_crtc;
+
+ /*
+ * probe sub drivers such as display controller and hdmi driver,
+ * that were registered at probe() of platform driver
+ * to the sub driver and create encoder and connector for them.
+ */
+ ret = exynos_drm_device_register(dev);
+ if (ret)
+ goto err_vblank;
+
+ /*
+ * create and configure fb helper and also exynos specific
+ * fbdev object.
+ */
+ ret = exynos_drm_fbdev_init(dev);
+ if (ret) {
+ DRM_ERROR("failed to initialize drm fbdev\n");
+ goto err_drm_device;
+ }
+
+ return 0;
+
+err_drm_device:
+ exynos_drm_device_unregister(dev);
+err_vblank:
+ drm_vblank_cleanup(dev);
+err_crtc:
+ drm_mode_config_cleanup(dev);
+ kfree(private);
+
+ return ret;
+}
+
+static int exynos_drm_unload(struct drm_device *dev)
+{
+ DRM_DEBUG_DRIVER("%s\n", __FILE__);
+
+ exynos_drm_fbdev_fini(dev);
+ exynos_drm_device_unregister(dev);
+ drm_vblank_cleanup(dev);
+ drm_mode_config_cleanup(dev);
+ kfree(dev->dev_private);
+
+ dev->dev_private = NULL;
+
+ return 0;
+}
+
+static void exynos_drm_preclose(struct drm_device *dev,
+ struct drm_file *file_priv)
+{
+ struct exynos_drm_private *dev_priv = dev->dev_private;
+
+ /*
+ * drm framework frees all events at release time,
+ * so private event list should be cleared.
+ */
+ if (!list_empty(&dev_priv->pageflip_event_list))
+ INIT_LIST_HEAD(&dev_priv->pageflip_event_list);
+}
+
+static void exynos_drm_lastclose(struct drm_device *dev)
+{
+ DRM_DEBUG_DRIVER("%s\n", __FILE__);
+
+ exynos_drm_fbdev_restore_mode(dev);
+}
+
+static struct vm_operations_struct exynos_drm_gem_vm_ops = {
+ .fault = exynos_drm_gem_fault,
+ .open = drm_gem_vm_open,
+ .close = drm_gem_vm_close,
+};
+
+static struct drm_ioctl_desc exynos_ioctls[] = {
+ DRM_IOCTL_DEF_DRV(EXYNOS_GEM_CREATE, exynos_drm_gem_create_ioctl,
+ DRM_UNLOCKED | DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(EXYNOS_GEM_MAP_OFFSET,
+ exynos_drm_gem_map_offset_ioctl, DRM_UNLOCKED |
+ DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(EXYNOS_GEM_MMAP,
+ exynos_drm_gem_mmap_ioctl, DRM_UNLOCKED | DRM_AUTH),
+};
+
+static struct drm_driver exynos_drm_driver = {
+ .driver_features = DRIVER_HAVE_IRQ | DRIVER_BUS_PLATFORM |
+ DRIVER_MODESET | DRIVER_GEM,
+ .load = exynos_drm_load,
+ .unload = exynos_drm_unload,
+ .preclose = exynos_drm_preclose,
+ .lastclose = exynos_drm_lastclose,
+ .get_vblank_counter = drm_vblank_count,
+ .enable_vblank = exynos_drm_crtc_enable_vblank,
+ .disable_vblank = exynos_drm_crtc_disable_vblank,
+ .gem_init_object = exynos_drm_gem_init_object,
+ .gem_free_object = exynos_drm_gem_free_object,
+ .gem_vm_ops = &exynos_drm_gem_vm_ops,
+ .dumb_create = exynos_drm_gem_dumb_create,
+ .dumb_map_offset = exynos_drm_gem_dumb_map_offset,
+ .dumb_destroy = exynos_drm_gem_dumb_destroy,
+ .ioctls = exynos_ioctls,
+ .fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .mmap = exynos_drm_gem_mmap,
+ .poll = drm_poll,
+ .read = drm_read,
+ .unlocked_ioctl = drm_ioctl,
+ .release = drm_release,
+ },
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+};
+
+static int exynos_drm_platform_probe(struct platform_device *pdev)
+{
+ DRM_DEBUG_DRIVER("%s\n", __FILE__);
+
+ exynos_drm_driver.num_ioctls = DRM_ARRAY_SIZE(exynos_ioctls);
+
+ return drm_platform_init(&exynos_drm_driver, pdev);
+}
+
+static int exynos_drm_platform_remove(struct platform_device *pdev)
+{
+ DRM_DEBUG_DRIVER("%s\n", __FILE__);
+
+ drm_platform_exit(&exynos_drm_driver, pdev);
+
+ return 0;
+}
+
+static struct platform_driver exynos_drm_platform_driver = {
+ .probe = exynos_drm_platform_probe,
+ .remove = __devexit_p(exynos_drm_platform_remove),
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = DRIVER_NAME,
+ },
+};
+
+static int __init exynos_drm_init(void)
+{
+ DRM_DEBUG_DRIVER("%s\n", __FILE__);
+
+ return platform_driver_register(&exynos_drm_platform_driver);
+}
+
+static void __exit exynos_drm_exit(void)
+{
+ DRM_DEBUG_DRIVER("%s\n", __FILE__);
+
+ platform_driver_unregister(&exynos_drm_platform_driver);
+}
+
+module_init(exynos_drm_init);
+module_exit(exynos_drm_exit);
+
+MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
+MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>");
+MODULE_AUTHOR("Seung-Woo Kim <sw0312.kim@samsung.com>");
+MODULE_DESCRIPTION("Samsung SoC DRM Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
new file mode 100644
index 00000000000..c03683f2ae7
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -0,0 +1,254 @@
+/* exynos_drm_drv.h
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Authors:
+ * Inki Dae <inki.dae@samsung.com>
+ * Joonyoung Shim <jy0922.shim@samsung.com>
+ * Seung-Woo Kim <sw0312.kim@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _EXYNOS_DRM_DRV_H_
+#define _EXYNOS_DRM_DRV_H_
+
+#include "drm.h"
+
+#define MAX_CRTC 2
+
+struct drm_device;
+struct exynos_drm_overlay;
+struct drm_connector;
+
+/* this enumerates display type. */
+enum exynos_drm_output_type {
+ EXYNOS_DISPLAY_TYPE_NONE,
+ /* RGB or CPU Interface. */
+ EXYNOS_DISPLAY_TYPE_LCD,
+ /* HDMI Interface. */
+ EXYNOS_DISPLAY_TYPE_HDMI,
+};
+
+/*
+ * Exynos drm overlay ops structure.
+ *
+ * @mode_set: copy drm overlay info to hw specific overlay info.
+ * @commit: apply hardware specific overlay data to registers.
+ * @disable: disable hardware specific overlay.
+ */
+struct exynos_drm_overlay_ops {
+ void (*mode_set)(struct device *subdrv_dev,
+ struct exynos_drm_overlay *overlay);
+ void (*commit)(struct device *subdrv_dev);
+ void (*disable)(struct device *subdrv_dev);
+};
+
+/*
+ * Exynos drm common overlay structure.
+ *
+ * @fb_x: offset x on a framebuffer to be displayed.
+ * - the unit is screen coordinates.
+ * @fb_y: offset y on a framebuffer to be displayed.
+ * - the unit is screen coordinates.
+ * @fb_width: width of a framebuffer.
+ * @fb_height: height of a framebuffer.
+ * @crtc_x: offset x on hardware screen.
+ * @crtc_y: offset y on hardware screen.
+ * @crtc_width: window width to be displayed (hardware screen).
+ * @crtc_height: window height to be displayed (hardware screen).
+ * @mode_width: width of screen mode.
+ * @mode_height: height of screen mode.
+ * @refresh: refresh rate.
+ * @scan_flag: interlace or progressive way.
+ * (it could be DRM_MODE_FLAG_*)
+ * @bpp: pixel size.(in bit)
+ * @paddr: bus(accessed by dma) physical memory address to this overlay
+ * and this is physically continuous.
+ * @vaddr: virtual memory addresss to this overlay.
+ * @default_win: a window to be enabled.
+ * @color_key: color key on or off.
+ * @index_color: if using color key feature then this value would be used
+ * as index color.
+ * @local_path: in case of lcd type, local path mode on or off.
+ * @transparency: transparency on or off.
+ * @activated: activated or not.
+ *
+ * this structure is common to exynos SoC and its contents would be copied
+ * to hardware specific overlay info.
+ */
+struct exynos_drm_overlay {
+ unsigned int fb_x;
+ unsigned int fb_y;
+ unsigned int fb_width;
+ unsigned int fb_height;
+ unsigned int crtc_x;
+ unsigned int crtc_y;
+ unsigned int crtc_width;
+ unsigned int crtc_height;
+ unsigned int mode_width;
+ unsigned int mode_height;
+ unsigned int refresh;
+ unsigned int scan_flag;
+ unsigned int bpp;
+ unsigned int pitch;
+ dma_addr_t paddr;
+ void __iomem *vaddr;
+
+ bool default_win;
+ bool color_key;
+ unsigned int index_color;
+ bool local_path;
+ bool transparency;
+ bool activated;
+};
+
+/*
+ * Exynos DRM Display Structure.
+ * - this structure is common to analog tv, digital tv and lcd panel.
+ *
+ * @type: one of EXYNOS_DISPLAY_TYPE_LCD and HDMI.
+ * @is_connected: check for that display is connected or not.
+ * @get_edid: get edid modes from display driver.
+ * @get_timing: get timing object from display driver.
+ * @check_timing: check if timing is valid or not.
+ * @power_on: display device on or off.
+ */
+struct exynos_drm_display {
+ enum exynos_drm_output_type type;
+ bool (*is_connected)(struct device *dev);
+ int (*get_edid)(struct device *dev, struct drm_connector *connector,
+ u8 *edid, int len);
+ void *(*get_timing)(struct device *dev);
+ int (*check_timing)(struct device *dev, void *timing);
+ int (*power_on)(struct device *dev, int mode);
+};
+
+/*
+ * Exynos drm manager ops
+ *
+ * @mode_set: convert drm_display_mode to hw specific display mode and
+ * would be called by encoder->mode_set().
+ * @commit: set current hw specific display mode to hw.
+ * @enable_vblank: specific driver callback for enabling vblank interrupt.
+ * @disable_vblank: specific driver callback for disabling vblank interrupt.
+ */
+struct exynos_drm_manager_ops {
+ void (*mode_set)(struct device *subdrv_dev, void *mode);
+ void (*commit)(struct device *subdrv_dev);
+ int (*enable_vblank)(struct device *subdrv_dev);
+ void (*disable_vblank)(struct device *subdrv_dev);
+};
+
+/*
+ * Exynos drm common manager structure.
+ *
+ * @dev: pointer to device object for subdrv device driver.
+ * sub drivers such as display controller or hdmi driver,
+ * have their own device object.
+ * @ops: pointer to callbacks for exynos drm specific framebuffer.
+ * these callbacks should be set by specific drivers such fimd
+ * or hdmi driver and are used to control hardware global registers.
+ * @overlay_ops: pointer to callbacks for exynos drm specific framebuffer.
+ * these callbacks should be set by specific drivers such fimd
+ * or hdmi driver and are used to control hardware overlay reigsters.
+ * @display: pointer to callbacks for exynos drm specific framebuffer.
+ * these callbacks should be set by specific drivers such fimd
+ * or hdmi driver and are used to control display devices such as
+ * analog tv, digital tv and lcd panel and also get timing data for them.
+ */
+struct exynos_drm_manager {
+ struct device *dev;
+ int pipe;
+ struct exynos_drm_manager_ops *ops;
+ struct exynos_drm_overlay_ops *overlay_ops;
+ struct exynos_drm_display *display;
+};
+
+/*
+ * Exynos drm private structure.
+ */
+struct exynos_drm_private {
+ struct drm_fb_helper *fb_helper;
+
+ /* list head for new event to be added. */
+ struct list_head pageflip_event_list;
+
+ /*
+ * created crtc object would be contained at this array and
+ * this array is used to be aware of which crtc did it request vblank.
+ */
+ struct drm_crtc *crtc[MAX_CRTC];
+};
+
+/*
+ * Exynos drm sub driver structure.
+ *
+ * @list: sub driver has its own list object to register to exynos drm driver.
+ * @drm_dev: pointer to drm_device and this pointer would be set
+ * when sub driver calls exynos_drm_subdrv_register().
+ * @probe: this callback would be called by exynos drm driver after
+ * subdrv is registered to it.
+ * @remove: this callback is used to release resources created
+ * by probe callback.
+ * @manager: subdrv has its own manager to control a hardware appropriately
+ * and we can access a hardware drawing on this manager.
+ * @encoder: encoder object owned by this sub driver.
+ * @connector: connector object owned by this sub driver.
+ */
+struct exynos_drm_subdrv {
+ struct list_head list;
+ struct drm_device *drm_dev;
+
+ int (*probe)(struct drm_device *drm_dev, struct device *dev);
+ void (*remove)(struct drm_device *dev);
+
+ struct exynos_drm_manager manager;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+};
+
+/*
+ * this function calls a probe callback registered to sub driver list and
+ * create its own encoder and connector and then set drm_device object
+ * to global one.
+ */
+int exynos_drm_device_register(struct drm_device *dev);
+/*
+ * this function calls a remove callback registered to sub driver list and
+ * destroy its own encoder and connetor.
+ */
+int exynos_drm_device_unregister(struct drm_device *dev);
+
+/*
+ * this function would be called by sub drivers such as display controller
+ * or hdmi driver to register this sub driver object to exynos drm driver
+ * and when a sub driver is registered to exynos drm driver a probe callback
+ * of the sub driver is called and creates its own encoder and connector
+ * and then fb helper and drm mode group would be re-initialized.
+ */
+int exynos_drm_subdrv_register(struct exynos_drm_subdrv *drm_subdrv);
+
+/*
+ * this function removes subdrv list from exynos drm driver and fb helper
+ * and drm mode group would be re-initialized.
+ */
+int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *drm_subdrv);
+
+#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.c b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
new file mode 100644
index 00000000000..7cf6fa86a67
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
@@ -0,0 +1,271 @@
+/* exynos_drm_encoder.c
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Authors:
+ * Inki Dae <inki.dae@samsung.com>
+ * Joonyoung Shim <jy0922.shim@samsung.com>
+ * Seung-Woo Kim <sw0312.kim@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+#include "drm_crtc_helper.h"
+
+#include "exynos_drm_drv.h"
+#include "exynos_drm_crtc.h"
+#include "exynos_drm_encoder.h"
+
+#define to_exynos_encoder(x) container_of(x, struct exynos_drm_encoder,\
+ drm_encoder)
+
+/*
+ * exynos specific encoder structure.
+ *
+ * @drm_encoder: encoder object.
+ * @manager: specific encoder has its own manager to control a hardware
+ * appropriately and we can access a hardware drawing on this manager.
+ */
+struct exynos_drm_encoder {
+ struct drm_encoder drm_encoder;
+ struct exynos_drm_manager *manager;
+};
+
+static void exynos_drm_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_connector *connector;
+ struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder);
+
+ DRM_DEBUG_KMS("%s, encoder dpms: %d\n", __FILE__, mode);
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ if (connector->encoder == encoder) {
+ struct exynos_drm_display *display = manager->display;
+
+ if (display && display->power_on)
+ display->power_on(manager->dev, mode);
+ }
+ }
+}
+
+static bool
+exynos_drm_encoder_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ /* drm framework doesn't check NULL. */
+
+ return true;
+}
+
+static void exynos_drm_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_connector *connector;
+ struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder);
+ struct exynos_drm_manager_ops *manager_ops = manager->ops;
+ struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
+ struct exynos_drm_overlay *overlay = get_exynos_drm_overlay(dev,
+ encoder->crtc);
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ mode = adjusted_mode;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ if (connector->encoder == encoder) {
+ if (manager_ops && manager_ops->mode_set)
+ manager_ops->mode_set(manager->dev, mode);
+
+ if (overlay_ops && overlay_ops->mode_set)
+ overlay_ops->mode_set(manager->dev, overlay);
+ }
+ }
+}
+
+static void exynos_drm_encoder_prepare(struct drm_encoder *encoder)
+{
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ /* drm framework doesn't check NULL. */
+}
+
+static void exynos_drm_encoder_commit(struct drm_encoder *encoder)
+{
+ struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder);
+ struct exynos_drm_manager_ops *manager_ops = manager->ops;
+ struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ if (manager_ops && manager_ops->commit)
+ manager_ops->commit(manager->dev);
+
+ if (overlay_ops && overlay_ops->commit)
+ overlay_ops->commit(manager->dev);
+}
+
+static struct drm_crtc *
+exynos_drm_encoder_get_crtc(struct drm_encoder *encoder)
+{
+ return encoder->crtc;
+}
+
+static struct drm_encoder_helper_funcs exynos_encoder_helper_funcs = {
+ .dpms = exynos_drm_encoder_dpms,
+ .mode_fixup = exynos_drm_encoder_mode_fixup,
+ .mode_set = exynos_drm_encoder_mode_set,
+ .prepare = exynos_drm_encoder_prepare,
+ .commit = exynos_drm_encoder_commit,
+ .get_crtc = exynos_drm_encoder_get_crtc,
+};
+
+static void exynos_drm_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct exynos_drm_encoder *exynos_encoder =
+ to_exynos_encoder(encoder);
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ exynos_encoder->manager->pipe = -1;
+
+ drm_encoder_cleanup(encoder);
+ encoder->dev->mode_config.num_encoder--;
+ kfree(exynos_encoder);
+}
+
+static struct drm_encoder_funcs exynos_encoder_funcs = {
+ .destroy = exynos_drm_encoder_destroy,
+};
+
+struct drm_encoder *
+exynos_drm_encoder_create(struct drm_device *dev,
+ struct exynos_drm_manager *manager,
+ unsigned int possible_crtcs)
+{
+ struct drm_encoder *encoder;
+ struct exynos_drm_encoder *exynos_encoder;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ if (!manager || !possible_crtcs)
+ return NULL;
+
+ if (!manager->dev)
+ return NULL;
+
+ exynos_encoder = kzalloc(sizeof(*exynos_encoder), GFP_KERNEL);
+ if (!exynos_encoder) {
+ DRM_ERROR("failed to allocate encoder\n");
+ return NULL;
+ }
+
+ exynos_encoder->manager = manager;
+ encoder = &exynos_encoder->drm_encoder;
+ encoder->possible_crtcs = possible_crtcs;
+
+ DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
+
+ drm_encoder_init(dev, encoder, &exynos_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS);
+
+ drm_encoder_helper_add(encoder, &exynos_encoder_helper_funcs);
+
+ DRM_DEBUG_KMS("encoder has been created\n");
+
+ return encoder;
+}
+
+struct exynos_drm_manager *exynos_drm_get_manager(struct drm_encoder *encoder)
+{
+ return to_exynos_encoder(encoder)->manager;
+}
+
+void exynos_drm_fn_encoder(struct drm_crtc *crtc, void *data,
+ void (*fn)(struct drm_encoder *, void *))
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_encoder *encoder;
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->crtc != crtc)
+ continue;
+
+ fn(encoder, data);
+ }
+}
+
+void exynos_drm_enable_vblank(struct drm_encoder *encoder, void *data)
+{
+ struct exynos_drm_manager *manager =
+ to_exynos_encoder(encoder)->manager;
+ struct exynos_drm_manager_ops *manager_ops = manager->ops;
+ int crtc = *(int *)data;
+
+ if (manager->pipe == -1)
+ manager->pipe = crtc;
+
+ if (manager_ops->enable_vblank)
+ manager_ops->enable_vblank(manager->dev);
+}
+
+void exynos_drm_disable_vblank(struct drm_encoder *encoder, void *data)
+{
+ struct exynos_drm_manager *manager =
+ to_exynos_encoder(encoder)->manager;
+ struct exynos_drm_manager_ops *manager_ops = manager->ops;
+ int crtc = *(int *)data;
+
+ if (manager->pipe == -1)
+ manager->pipe = crtc;
+
+ if (manager_ops->disable_vblank)
+ manager_ops->disable_vblank(manager->dev);
+}
+
+void exynos_drm_encoder_crtc_commit(struct drm_encoder *encoder, void *data)
+{
+ struct exynos_drm_manager *manager =
+ to_exynos_encoder(encoder)->manager;
+ struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
+
+ overlay_ops->commit(manager->dev);
+}
+
+void exynos_drm_encoder_crtc_mode_set(struct drm_encoder *encoder, void *data)
+{
+ struct exynos_drm_manager *manager =
+ to_exynos_encoder(encoder)->manager;
+ struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
+ struct exynos_drm_overlay *overlay = data;
+
+ overlay_ops->mode_set(manager->dev, overlay);
+}
+
+MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
+MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>");
+MODULE_AUTHOR("Seung-Woo Kim <sw0312.kim@samsung.com>");
+MODULE_DESCRIPTION("Samsung SoC DRM Encoder Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.h b/drivers/gpu/drm/exynos/exynos_drm_encoder.h
new file mode 100644
index 00000000000..5ecd645d06a
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Authors:
+ * Inki Dae <inki.dae@samsung.com>
+ * Joonyoung Shim <jy0922.shim@samsung.com>
+ * Seung-Woo Kim <sw0312.kim@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _EXYNOS_DRM_ENCODER_H_
+#define _EXYNOS_DRM_ENCODER_H_
+
+struct exynos_drm_manager;
+
+struct drm_encoder *exynos_drm_encoder_create(struct drm_device *dev,
+ struct exynos_drm_manager *mgr,
+ unsigned int possible_crtcs);
+struct exynos_drm_manager *
+exynos_drm_get_manager(struct drm_encoder *encoder);
+void exynos_drm_fn_encoder(struct drm_crtc *crtc, void *data,
+ void (*fn)(struct drm_encoder *, void *));
+void exynos_drm_enable_vblank(struct drm_encoder *encoder, void *data);
+void exynos_drm_disable_vblank(struct drm_encoder *encoder, void *data);
+void exynos_drm_encoder_crtc_commit(struct drm_encoder *encoder, void *data);
+void exynos_drm_encoder_crtc_mode_set(struct drm_encoder *encoder, void *data);
+
+#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
new file mode 100644
index 00000000000..48d29cfd524
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -0,0 +1,265 @@
+/* exynos_drm_fb.c
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Authors:
+ * Inki Dae <inki.dae@samsung.com>
+ * Joonyoung Shim <jy0922.shim@samsung.com>
+ * Seung-Woo Kim <sw0312.kim@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+
+#include "exynos_drm_fb.h"
+#include "exynos_drm_buf.h"
+#include "exynos_drm_gem.h"
+
+#define to_exynos_fb(x) container_of(x, struct exynos_drm_fb, fb)
+
+/*
+ * exynos specific framebuffer structure.
+ *
+ * @fb: drm framebuffer obejct.
+ * @exynos_gem_obj: exynos specific gem object containing a gem object.
+ * @entry: pointer to exynos drm buffer entry object.
+ * - containing only the information to physically continuous memory
+ * region allocated at default framebuffer creation.
+ */
+struct exynos_drm_fb {
+ struct drm_framebuffer fb;
+ struct exynos_drm_gem_obj *exynos_gem_obj;
+ struct exynos_drm_buf_entry *entry;
+};
+
+static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
+{
+ struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ drm_framebuffer_cleanup(fb);
+
+ /*
+ * default framebuffer has no gem object so
+ * a buffer of the default framebuffer should be released at here.
+ */
+ if (!exynos_fb->exynos_gem_obj && exynos_fb->entry)
+ exynos_drm_buf_destroy(fb->dev, exynos_fb->entry);
+
+ kfree(exynos_fb);
+ exynos_fb = NULL;
+}
+
+static int exynos_drm_fb_create_handle(struct drm_framebuffer *fb,
+ struct drm_file *file_priv,
+ unsigned int *handle)
+{
+ struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ return drm_gem_handle_create(file_priv,
+ &exynos_fb->exynos_gem_obj->base, handle);
+}
+
+static int exynos_drm_fb_dirty(struct drm_framebuffer *fb,
+ struct drm_file *file_priv, unsigned flags,
+ unsigned color, struct drm_clip_rect *clips,
+ unsigned num_clips)
+{
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ /* TODO */
+
+ return 0;
+}
+
+static struct drm_framebuffer_funcs exynos_drm_fb_funcs = {
+ .destroy = exynos_drm_fb_destroy,
+ .create_handle = exynos_drm_fb_create_handle,
+ .dirty = exynos_drm_fb_dirty,
+};
+
+static struct drm_framebuffer *
+exynos_drm_fb_init(struct drm_file *file_priv, struct drm_device *dev,
+ struct drm_mode_fb_cmd *mode_cmd)
+{
+ struct exynos_drm_fb *exynos_fb;
+ struct drm_framebuffer *fb;
+ struct exynos_drm_gem_obj *exynos_gem_obj = NULL;
+ struct drm_gem_object *obj;
+ unsigned int size;
+ int ret;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ mode_cmd->pitch = max(mode_cmd->pitch,
+ mode_cmd->width * (mode_cmd->bpp >> 3));
+
+ DRM_LOG_KMS("drm fb create(%dx%d)\n",
+ mode_cmd->width, mode_cmd->height);
+
+ exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL);
+ if (!exynos_fb) {
+ DRM_ERROR("failed to allocate exynos drm framebuffer.\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ fb = &exynos_fb->fb;
+ ret = drm_framebuffer_init(dev, fb, &exynos_drm_fb_funcs);
+ if (ret) {
+ DRM_ERROR("failed to initialize framebuffer.\n");
+ goto err_init;
+ }
+
+ DRM_LOG_KMS("create: fb id: %d\n", fb->base.id);
+
+ size = mode_cmd->pitch * mode_cmd->height;
+
+ /*
+ * mode_cmd->handle could be NULL at booting time or
+ * with user request. if NULL, a new buffer or a gem object
+ * would be allocated.
+ */
+ if (!mode_cmd->handle) {
+ if (!file_priv) {
+ struct exynos_drm_buf_entry *entry;
+
+ /*
+ * in case that file_priv is NULL, it allocates
+ * only buffer and this buffer would be used
+ * for default framebuffer.
+ */
+ entry = exynos_drm_buf_create(dev, size);
+ if (IS_ERR(entry)) {
+ ret = PTR_ERR(entry);
+ goto err_buffer;
+ }
+
+ exynos_fb->entry = entry;
+
+ DRM_LOG_KMS("default fb: paddr = 0x%lx, size = 0x%x\n",
+ (unsigned long)entry->paddr, size);
+
+ goto out;
+ } else {
+ exynos_gem_obj = exynos_drm_gem_create(file_priv, dev,
+ size,
+ &mode_cmd->handle);
+ if (IS_ERR(exynos_gem_obj)) {
+ ret = PTR_ERR(exynos_gem_obj);
+ goto err_buffer;
+ }
+ }
+ } else {
+ obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle);
+ if (!obj) {
+ DRM_ERROR("failed to lookup gem object.\n");
+ goto err_buffer;
+ }
+
+ exynos_gem_obj = to_exynos_gem_obj(obj);
+
+ drm_gem_object_unreference_unlocked(obj);
+ }
+
+ /*
+ * if got a exynos_gem_obj from either a handle or
+ * a new creation then exynos_fb->exynos_gem_obj is NULL
+ * so that default framebuffer has no its own gem object,
+ * only its own buffer object.
+ */
+ exynos_fb->entry = exynos_gem_obj->entry;
+
+ DRM_LOG_KMS("paddr = 0x%lx, size = 0x%x, gem object = 0x%x\n",
+ (unsigned long)exynos_fb->entry->paddr, size,
+ (unsigned int)&exynos_gem_obj->base);
+
+out:
+ exynos_fb->exynos_gem_obj = exynos_gem_obj;
+
+ drm_helper_mode_fill_fb_struct(fb, mode_cmd);
+
+ return fb;
+
+err_buffer:
+ drm_framebuffer_cleanup(fb);
+
+err_init:
+ kfree(exynos_fb);
+
+ return ERR_PTR(ret);
+}
+
+struct drm_framebuffer *exynos_drm_fb_create(struct drm_device *dev,
+ struct drm_file *file_priv,
+ struct drm_mode_fb_cmd *mode_cmd)
+{
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ return exynos_drm_fb_init(file_priv, dev, mode_cmd);
+}
+
+struct exynos_drm_buf_entry *exynos_drm_fb_get_buf(struct drm_framebuffer *fb)
+{
+ struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
+ struct exynos_drm_buf_entry *entry;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ entry = exynos_fb->entry;
+ if (!entry)
+ return NULL;
+
+ DRM_DEBUG_KMS("vaddr = 0x%lx, paddr = 0x%lx\n",
+ (unsigned long)entry->vaddr,
+ (unsigned long)entry->paddr);
+
+ return entry;
+}
+
+static struct drm_mode_config_funcs exynos_drm_mode_config_funcs = {
+ .fb_create = exynos_drm_fb_create,
+};
+
+void exynos_drm_mode_config_init(struct drm_device *dev)
+{
+ dev->mode_config.min_width = 0;
+ dev->mode_config.min_height = 0;
+
+ /*
+ * set max width and height as default value(4096x4096).
+ * this value would be used to check framebuffer size limitation
+ * at drm_mode_addfb().
+ */
+ dev->mode_config.max_width = 4096;
+ dev->mode_config.max_height = 4096;
+
+ dev->mode_config.funcs = &exynos_drm_mode_config_funcs;
+}
+
+MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
+MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>");
+MODULE_AUTHOR("Seung-Woo Kim <sw0312.kim@samsung.com>");
+MODULE_DESCRIPTION("Samsung SoC DRM FB Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.h b/drivers/gpu/drm/exynos/exynos_drm_fb.h
new file mode 100644
index 00000000000..eb35931d302
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Authors:
+ * Inki Dae <inki.dae@samsung.com>
+ * Joonyoung Shim <jy0922.shim@samsung.com>
+ * Seung-Woo Kim <sw0312.kim@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _EXYNOS_DRM_FB_H_
+#define _EXYNOS_DRM_FB_H
+
+struct drm_framebuffer *exynos_drm_fb_create(struct drm_device *dev,
+ struct drm_file *filp,
+ struct drm_mode_fb_cmd *mode_cmd);
+
+void exynos_drm_mode_config_init(struct drm_device *dev);
+
+#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
new file mode 100644
index 00000000000..1f4b3d1a771
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -0,0 +1,456 @@
+/* exynos_drm_fbdev.c
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Authors:
+ * Inki Dae <inki.dae@samsung.com>
+ * Joonyoung Shim <jy0922.shim@samsung.com>
+ * Seung-Woo Kim <sw0312.kim@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+#include "drm_crtc.h"
+#include "drm_fb_helper.h"
+#include "drm_crtc_helper.h"
+
+#include "exynos_drm_drv.h"
+#include "exynos_drm_fb.h"
+#include "exynos_drm_buf.h"
+
+#define MAX_CONNECTOR 4
+#define PREFERRED_BPP 32
+
+#define to_exynos_fbdev(x) container_of(x, struct exynos_drm_fbdev,\
+ drm_fb_helper)
+
+struct exynos_drm_fbdev {
+ struct drm_fb_helper drm_fb_helper;
+ struct drm_framebuffer *fb;
+};
+
+static int exynos_drm_fbdev_set_par(struct fb_info *info)
+{
+ struct fb_var_screeninfo *var = &info->var;
+
+ switch (var->bits_per_pixel) {
+ case 32:
+ case 24:
+ case 18:
+ case 16:
+ case 12:
+ info->fix.visual = FB_VISUAL_TRUECOLOR;
+ break;
+ case 1:
+ info->fix.visual = FB_VISUAL_MONO01;
+ break;
+ default:
+ info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
+ break;
+ }
+
+ info->fix.line_length = (var->xres_virtual * var->bits_per_pixel) / 8;
+
+ return drm_fb_helper_set_par(info);
+}
+
+
+static struct fb_ops exynos_drm_fb_ops = {
+ .owner = THIS_MODULE,
+ .fb_fillrect = cfb_fillrect,
+ .fb_copyarea = cfb_copyarea,
+ .fb_imageblit = cfb_imageblit,
+ .fb_check_var = drm_fb_helper_check_var,
+ .fb_set_par = exynos_drm_fbdev_set_par,
+ .fb_blank = drm_fb_helper_blank,
+ .fb_pan_display = drm_fb_helper_pan_display,
+ .fb_setcmap = drm_fb_helper_setcmap,
+};
+
+static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
+ struct drm_framebuffer *fb,
+ unsigned int fb_width,
+ unsigned int fb_height)
+{
+ struct fb_info *fbi = helper->fbdev;
+ struct drm_device *dev = helper->dev;
+ struct exynos_drm_fbdev *exynos_fb = to_exynos_fbdev(helper);
+ struct exynos_drm_buf_entry *entry;
+ unsigned int size = fb_width * fb_height * (fb->bits_per_pixel >> 3);
+ unsigned long offset;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ exynos_fb->fb = fb;
+
+ drm_fb_helper_fill_fix(fbi, fb->pitch, fb->depth);
+ drm_fb_helper_fill_var(fbi, helper, fb_width, fb_height);
+
+ entry = exynos_drm_fb_get_buf(fb);
+ if (!entry) {
+ DRM_LOG_KMS("entry is null.\n");
+ return -EFAULT;
+ }
+
+ offset = fbi->var.xoffset * (fb->bits_per_pixel >> 3);
+ offset += fbi->var.yoffset * fb->pitch;
+
+ dev->mode_config.fb_base = entry->paddr;
+ fbi->screen_base = entry->vaddr + offset;
+ fbi->fix.smem_start = entry->paddr + offset;
+ fbi->screen_size = size;
+ fbi->fix.smem_len = size;
+
+ return 0;
+}
+
+static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct exynos_drm_fbdev *exynos_fbdev = to_exynos_fbdev(helper);
+ struct drm_device *dev = helper->dev;
+ struct fb_info *fbi;
+ struct drm_mode_fb_cmd mode_cmd = { 0 };
+ struct platform_device *pdev = dev->platformdev;
+ int ret;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d\n",
+ sizes->surface_width, sizes->surface_height,
+ sizes->surface_bpp);
+
+ mode_cmd.width = sizes->surface_width;
+ mode_cmd.height = sizes->surface_height;
+ mode_cmd.bpp = sizes->surface_bpp;
+ mode_cmd.depth = sizes->surface_depth;
+
+ mutex_lock(&dev->struct_mutex);
+
+ fbi = framebuffer_alloc(0, &pdev->dev);
+ if (!fbi) {
+ DRM_ERROR("failed to allocate fb info.\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ exynos_fbdev->fb = exynos_drm_fb_create(dev, NULL, &mode_cmd);
+ if (IS_ERR_OR_NULL(exynos_fbdev->fb)) {
+ DRM_ERROR("failed to create drm framebuffer.\n");
+ ret = PTR_ERR(exynos_fbdev->fb);
+ goto out;
+ }
+
+ helper->fb = exynos_fbdev->fb;
+ helper->fbdev = fbi;
+
+ fbi->par = helper;
+ fbi->flags = FBINFO_FLAG_DEFAULT;
+ fbi->fbops = &exynos_drm_fb_ops;
+
+ ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
+ if (ret) {
+ DRM_ERROR("failed to allocate cmap.\n");
+ goto out;
+ }
+
+ ret = exynos_drm_fbdev_update(helper, helper->fb, sizes->fb_width,
+ sizes->fb_height);
+ if (ret < 0)
+ fb_dealloc_cmap(&fbi->cmap);
+
+/*
+ * if failed, all resources allocated above would be released by
+ * drm_mode_config_cleanup() when drm_load() had been called prior
+ * to any specific driver such as fimd or hdmi driver.
+ */
+out:
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
+static bool
+exynos_drm_fbdev_is_samefb(struct drm_framebuffer *fb,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ if (fb->width != sizes->surface_width)
+ return false;
+ if (fb->height != sizes->surface_height)
+ return false;
+ if (fb->bits_per_pixel != sizes->surface_bpp)
+ return false;
+ if (fb->depth != sizes->surface_depth)
+ return false;
+
+ return true;
+}
+
+static int exynos_drm_fbdev_recreate(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct drm_device *dev = helper->dev;
+ struct exynos_drm_fbdev *exynos_fbdev = to_exynos_fbdev(helper);
+ struct drm_framebuffer *fb = exynos_fbdev->fb;
+ struct drm_mode_fb_cmd mode_cmd = { 0 };
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ if (helper->fb != fb) {
+ DRM_ERROR("drm framebuffer is different\n");
+ return -EINVAL;
+ }
+
+ if (exynos_drm_fbdev_is_samefb(fb, sizes))
+ return 0;
+
+ mode_cmd.width = sizes->surface_width;
+ mode_cmd.height = sizes->surface_height;
+ mode_cmd.bpp = sizes->surface_bpp;
+ mode_cmd.depth = sizes->surface_depth;
+
+ if (fb->funcs->destroy)
+ fb->funcs->destroy(fb);
+
+ exynos_fbdev->fb = exynos_drm_fb_create(dev, NULL, &mode_cmd);
+ if (IS_ERR(exynos_fbdev->fb)) {
+ DRM_ERROR("failed to allocate fb.\n");
+ return PTR_ERR(exynos_fbdev->fb);
+ }
+
+ helper->fb = exynos_fbdev->fb;
+ return exynos_drm_fbdev_update(helper, helper->fb, sizes->fb_width,
+ sizes->fb_height);
+}
+
+static int exynos_drm_fbdev_probe(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ int ret = 0;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ if (!helper->fb) {
+ ret = exynos_drm_fbdev_create(helper, sizes);
+ if (ret < 0) {
+ DRM_ERROR("failed to create fbdev.\n");
+ return ret;
+ }
+
+ /*
+ * fb_helper expects a value more than 1 if succeed
+ * because register_framebuffer() should be called.
+ */
+ ret = 1;
+ } else {
+ ret = exynos_drm_fbdev_recreate(helper, sizes);
+ if (ret < 0) {
+ DRM_ERROR("failed to reconfigure fbdev\n");
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static struct drm_fb_helper_funcs exynos_drm_fb_helper_funcs = {
+ .fb_probe = exynos_drm_fbdev_probe,
+};
+
+int exynos_drm_fbdev_init(struct drm_device *dev)
+{
+ struct exynos_drm_fbdev *fbdev;
+ struct exynos_drm_private *private = dev->dev_private;
+ struct drm_fb_helper *helper;
+ unsigned int num_crtc;
+ int ret;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ if (!dev->mode_config.num_crtc || !dev->mode_config.num_connector)
+ return 0;
+
+ fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
+ if (!fbdev) {
+ DRM_ERROR("failed to allocate drm fbdev.\n");
+ return -ENOMEM;
+ }
+
+ private->fb_helper = helper = &fbdev->drm_fb_helper;
+ helper->funcs = &exynos_drm_fb_helper_funcs;
+
+ num_crtc = dev->mode_config.num_crtc;
+
+ ret = drm_fb_helper_init(dev, helper, num_crtc, MAX_CONNECTOR);
+ if (ret < 0) {
+ DRM_ERROR("failed to initialize drm fb helper.\n");
+ goto err_init;
+ }
+
+ ret = drm_fb_helper_single_add_all_connectors(helper);
+ if (ret < 0) {
+ DRM_ERROR("failed to register drm_fb_helper_connector.\n");
+ goto err_setup;
+
+ }
+
+ ret = drm_fb_helper_initial_config(helper, PREFERRED_BPP);
+ if (ret < 0) {
+ DRM_ERROR("failed to set up hw configuration.\n");
+ goto err_setup;
+ }
+
+ return 0;
+
+err_setup:
+ drm_fb_helper_fini(helper);
+
+err_init:
+ private->fb_helper = NULL;
+ kfree(fbdev);
+
+ return ret;
+}
+
+static void exynos_drm_fbdev_destroy(struct drm_device *dev,
+ struct drm_fb_helper *fb_helper)
+{
+ struct drm_framebuffer *fb;
+
+ /* release drm framebuffer and real buffer */
+ if (fb_helper->fb && fb_helper->fb->funcs) {
+ fb = fb_helper->fb;
+ if (fb && fb->funcs->destroy)
+ fb->funcs->destroy(fb);
+ }
+
+ /* release linux framebuffer */
+ if (fb_helper->fbdev) {
+ struct fb_info *info;
+ int ret;
+
+ info = fb_helper->fbdev;
+ ret = unregister_framebuffer(info);
+ if (ret < 0)
+ DRM_DEBUG_KMS("failed unregister_framebuffer()\n");
+
+ if (info->cmap.len)
+ fb_dealloc_cmap(&info->cmap);
+
+ framebuffer_release(info);
+ }
+
+ drm_fb_helper_fini(fb_helper);
+}
+
+void exynos_drm_fbdev_fini(struct drm_device *dev)
+{
+ struct exynos_drm_private *private = dev->dev_private;
+ struct exynos_drm_fbdev *fbdev;
+
+ if (!private || !private->fb_helper)
+ return;
+
+ fbdev = to_exynos_fbdev(private->fb_helper);
+
+ exynos_drm_fbdev_destroy(dev, private->fb_helper);
+ kfree(fbdev);
+ private->fb_helper = NULL;
+}
+
+void exynos_drm_fbdev_restore_mode(struct drm_device *dev)
+{
+ struct exynos_drm_private *private = dev->dev_private;
+
+ if (!private || !private->fb_helper)
+ return;
+
+ drm_fb_helper_restore_fbdev_mode(private->fb_helper);
+}
+
+int exynos_drm_fbdev_reinit(struct drm_device *dev)
+{
+ struct exynos_drm_private *private = dev->dev_private;
+ struct drm_fb_helper *fb_helper;
+ int ret;
+
+ if (!private)
+ return -EINVAL;
+
+ /*
+ * if all sub drivers were unloaded then num_connector is 0
+ * so at this time, the framebuffers also should be destroyed.
+ */
+ if (!dev->mode_config.num_connector) {
+ exynos_drm_fbdev_fini(dev);
+ return 0;
+ }
+
+ fb_helper = private->fb_helper;
+
+ if (fb_helper) {
+ drm_fb_helper_fini(fb_helper);
+
+ ret = drm_fb_helper_init(dev, fb_helper,
+ dev->mode_config.num_crtc, MAX_CONNECTOR);
+ if (ret < 0) {
+ DRM_ERROR("failed to initialize drm fb helper\n");
+ return ret;
+ }
+
+ ret = drm_fb_helper_single_add_all_connectors(fb_helper);
+ if (ret < 0) {
+ DRM_ERROR("failed to add fb helper to connectors\n");
+ goto err;
+ }
+
+ ret = drm_fb_helper_initial_config(fb_helper, PREFERRED_BPP);
+ if (ret < 0) {
+ DRM_ERROR("failed to set up hw configuration.\n");
+ goto err;
+ }
+ } else {
+ /*
+ * if drm_load() failed whem drm load() was called prior
+ * to specific drivers, fb_helper must be NULL and so
+ * this fuction should be called again to re-initialize and
+ * re-configure the fb helper. it means that this function
+ * has been called by the specific drivers.
+ */
+ ret = exynos_drm_fbdev_init(dev);
+ }
+
+ return ret;
+
+err:
+ /*
+ * if drm_load() failed when drm load() was called prior
+ * to specific drivers, the fb_helper must be NULL and so check it.
+ */
+ if (fb_helper)
+ drm_fb_helper_fini(fb_helper);
+
+ return ret;
+}
+
+MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
+MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>");
+MODULE_AUTHOR("Seung-Woo Kim <sw0312.kim@samsung.com>");
+MODULE_DESCRIPTION("Samsung SoC DRM FBDEV Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.h b/drivers/gpu/drm/exynos/exynos_drm_fbdev.h
new file mode 100644
index 00000000000..ccfce8a1a45
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ *
+ * Authors:
+ * Inki Dae <inki.dae@samsung.com>
+ * Joonyoung Shim <jy0922.shim@samsung.com>
+ * Seung-Woo Kim <sw0312.kim@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _EXYNOS_DRM_FBDEV_H_
+#define _EXYNOS_DRM_FBDEV_H_
+
+int exynos_drm_fbdev_init(struct drm_device *dev);
+int exynos_drm_fbdev_reinit(struct drm_device *dev);
+void exynos_drm_fbdev_fini(struct drm_device *dev);
+void exynos_drm_fbdev_restore_mode(struct drm_device *dev);
+
+#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
new file mode 100644
index 00000000000..4659c88cdd9
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -0,0 +1,811 @@
+/* exynos_drm_fimd.c
+ *
+ * Copyright (C) 2011 Samsung Electronics Co.Ltd
+ * Authors:
+ * Joonyoung Shim <jy0922.shim@samsung.com>
+ * Inki Dae <inki.dae@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#include "drmP.h"
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+
+#include <drm/exynos_drm.h>
+#include <plat/regs-fb-v4.h>
+
+#include "exynos_drm_drv.h"
+#include "exynos_drm_fbdev.h"
+#include "exynos_drm_crtc.h"
+
+/*
+ * FIMD is stand for Fully Interactive Mobile Display and
+ * as a display controller, it transfers contents drawn on memory
+ * to a LCD Panel through Display Interfaces such as RGB or
+ * CPU Interface.
+ */
+
+/* position control register for hardware window 0, 2 ~ 4.*/
+#define VIDOSD_A(win) (VIDOSD_BASE + 0x00 + (win) * 16)
+#define VIDOSD_B(win) (VIDOSD_BASE + 0x04 + (win) * 16)
+/* size control register for hardware window 0. */
+#define VIDOSD_C_SIZE_W0 (VIDOSD_BASE + 0x08)
+/* alpha control register for hardware window 1 ~ 4. */
+#define VIDOSD_C(win) (VIDOSD_BASE + 0x18 + (win) * 16)
+/* size control register for hardware window 1 ~ 4. */
+#define VIDOSD_D(win) (VIDOSD_BASE + 0x0C + (win) * 16)
+
+#define VIDWx_BUF_START(win, buf) (VIDW_BUF_START(buf) + (win) * 8)
+#define VIDWx_BUF_END(win, buf) (VIDW_BUF_END(buf) + (win) * 8)
+#define VIDWx_BUF_SIZE(win, buf) (VIDW_BUF_SIZE(buf) + (win) * 4)
+
+/* color key control register for hardware window 1 ~ 4. */
+#define WKEYCON0_BASE(x) ((WKEYCON0 + 0x140) + (x * 8))
+/* color key value register for hardware window 1 ~ 4. */
+#define WKEYCON1_BASE(x) ((WKEYCON1 + 0x140) + (x * 8))
+
+/* FIMD has totally five hardware windows. */
+#define WINDOWS_NR 5
+
+#define get_fimd_context(dev) platform_get_drvdata(to_platform_device(dev))
+
+struct fimd_win_data {
+ unsigned int offset_x;
+ unsigned int offset_y;
+ unsigned int ovl_width;
+ unsigned int ovl_height;
+ unsigned int fb_width;
+ unsigned int fb_height;
+ unsigned int bpp;
+ dma_addr_t paddr;
+ void __iomem *vaddr;
+ unsigned int buf_offsize;
+ unsigned int line_size; /* bytes */
+};
+
+struct fimd_context {
+ struct exynos_drm_subdrv subdrv;
+ int irq;
+ struct drm_crtc *crtc;
+ struct clk *bus_clk;
+ struct clk *lcd_clk;
+ struct resource *regs_res;
+ void __iomem *regs;
+ struct fimd_win_data win_data[WINDOWS_NR];
+ unsigned int clkdiv;
+ unsigned int default_win;
+ unsigned long irq_flags;
+ u32 vidcon0;
+ u32 vidcon1;
+
+ struct fb_videomode *timing;
+};
+
+static bool fimd_display_is_connected(struct device *dev)
+{
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ /* TODO. */
+
+ return true;
+}
+
+static void *fimd_get_timing(struct device *dev)
+{
+ struct fimd_context *ctx = get_fimd_context(dev);
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ return ctx->timing;
+}
+
+static int fimd_check_timing(struct device *dev, void *timing)
+{
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ /* TODO. */
+
+ return 0;
+}
+
+static int fimd_display_power_on(struct device *dev, int mode)
+{
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ /* TODO. */
+
+ return 0;
+}
+
+static struct exynos_drm_display fimd_display = {
+ .type = EXYNOS_DISPLAY_TYPE_LCD,
+ .is_connected = fimd_display_is_connected,
+ .get_timing = fimd_get_timing,
+ .check_timing = fimd_check_timing,
+ .power_on = fimd_display_power_on,
+};
+
+static void fimd_commit(struct device *dev)
+{
+ struct fimd_context *ctx = get_fimd_context(dev);
+ struct fb_videomode *timing = ctx->timing;
+ u32 val;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ /* setup polarity values from machine code. */
+ writel(ctx->vidcon1, ctx->regs + VIDCON1);
+
+ /* setup vertical timing values. */
+ val = VIDTCON0_VBPD(timing->upper_margin - 1) |
+ VIDTCON0_VFPD(timing->lower_margin - 1) |
+ VIDTCON0_VSPW(timing->vsync_len - 1);
+ writel(val, ctx->regs + VIDTCON0);
+
+ /* setup horizontal timing values. */
+ val = VIDTCON1_HBPD(timing->left_margin - 1) |
+ VIDTCON1_HFPD(timing->right_margin - 1) |
+ VIDTCON1_HSPW(timing->hsync_len - 1);
+ writel(val, ctx->regs + VIDTCON1);
+
+ /* setup horizontal and vertical display size. */
+ val = VIDTCON2_LINEVAL(timing->yres - 1) |
+ VIDTCON2_HOZVAL(timing->xres - 1);
+ writel(val, ctx->regs + VIDTCON2);
+
+ /* setup clock source, clock divider, enable dma. */
+ val = ctx->vidcon0;
+ val &= ~(VIDCON0_CLKVAL_F_MASK | VIDCON0_CLKDIR);
+
+ if (ctx->clkdiv > 1)
+ val |= VIDCON0_CLKVAL_F(ctx->clkdiv - 1) | VIDCON0_CLKDIR;
+ else
+ val &= ~VIDCON0_CLKDIR; /* 1:1 clock */
+
+ /*
+ * fields of register with prefix '_F' would be updated
+ * at vsync(same as dma start)
+ */
+ val |= VIDCON0_ENVID | VIDCON0_ENVID_F;
+ writel(val, ctx->regs + VIDCON0);
+}
+
+static int fimd_enable_vblank(struct device *dev)
+{
+ struct fimd_context *ctx = get_fimd_context(dev);
+ u32 val;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ if (!test_and_set_bit(0, &ctx->irq_flags)) {
+ val = readl(ctx->regs + VIDINTCON0);
+
+ val |= VIDINTCON0_INT_ENABLE;
+ val |= VIDINTCON0_INT_FRAME;
+
+ val &= ~VIDINTCON0_FRAMESEL0_MASK;
+ val |= VIDINTCON0_FRAMESEL0_VSYNC;
+ val &= ~VIDINTCON0_FRAMESEL1_MASK;
+ val |= VIDINTCON0_FRAMESEL1_NONE;
+
+ writel(val, ctx->regs + VIDINTCON0);
+ }
+
+ return 0;
+}
+
+static void fimd_disable_vblank(struct device *dev)
+{
+ struct fimd_context *ctx = get_fimd_context(dev);
+ u32 val;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ if (test_and_clear_bit(0, &ctx->irq_flags)) {
+ val = readl(ctx->regs + VIDINTCON0);
+
+ val &= ~VIDINTCON0_INT_FRAME;
+ val &= ~VIDINTCON0_INT_ENABLE;
+
+ writel(val, ctx->regs + VIDINTCON0);
+ }
+}
+
+static struct exynos_drm_manager_ops fimd_manager_ops = {
+ .commit = fimd_commit,
+ .enable_vblank = fimd_enable_vblank,
+ .disable_vblank = fimd_disable_vblank,
+};
+
+static void fimd_win_mode_set(struct device *dev,
+ struct exynos_drm_overlay *overlay)
+{
+ struct fimd_context *ctx = get_fimd_context(dev);
+ struct fimd_win_data *win_data;
+ unsigned long offset;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ if (!overlay) {
+ dev_err(dev, "overlay is NULL\n");
+ return;
+ }
+
+ offset = overlay->fb_x * (overlay->bpp >> 3);
+ offset += overlay->fb_y * overlay->pitch;
+
+ DRM_DEBUG_KMS("offset = 0x%lx, pitch = %x\n", offset, overlay->pitch);
+
+ win_data = &ctx->win_data[ctx->default_win];
+
+ win_data->offset_x = overlay->crtc_x;
+ win_data->offset_y = overlay->crtc_y;
+ win_data->ovl_width = overlay->crtc_width;
+ win_data->ovl_height = overlay->crtc_height;
+ win_data->fb_width = overlay->fb_width;
+ win_data->fb_height = overlay->fb_height;
+ win_data->paddr = overlay->paddr + offset;
+ win_data->vaddr = overlay->vaddr + offset;
+ win_data->bpp = overlay->bpp;
+ win_data->buf_offsize = (overlay->fb_width - overlay->crtc_width) *
+ (overlay->bpp >> 3);
+ win_data->line_size = overlay->crtc_width * (overlay->bpp >> 3);
+
+ DRM_DEBUG_KMS("offset_x = %d, offset_y = %d\n",
+ win_data->offset_x, win_data->offset_y);
+ DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
+ win_data->ovl_width, win_data->ovl_height);
+ DRM_DEBUG_KMS("paddr = 0x%lx, vaddr = 0x%lx\n",
+ (unsigned long)win_data->paddr,
+ (unsigned long)win_data->vaddr);
+ DRM_DEBUG_KMS("fb_width = %d, crtc_width = %d\n",
+ overlay->fb_width, overlay->crtc_width);
+}
+
+static void fimd_win_set_pixfmt(struct device *dev, unsigned int win)
+{
+ struct fimd_context *ctx = get_fimd_context(dev);
+ struct fimd_win_data *win_data = &ctx->win_data[win];
+ unsigned long val;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ val = WINCONx_ENWIN;
+
+ switch (win_data->bpp) {
+ case 1:
+ val |= WINCON0_BPPMODE_1BPP;
+ val |= WINCONx_BITSWP;
+ val |= WINCONx_BURSTLEN_4WORD;
+ break;
+ case 2:
+ val |= WINCON0_BPPMODE_2BPP;
+ val |= WINCONx_BITSWP;
+ val |= WINCONx_BURSTLEN_8WORD;
+ break;
+ case 4:
+ val |= WINCON0_BPPMODE_4BPP;
+ val |= WINCONx_BITSWP;
+ val |= WINCONx_BURSTLEN_8WORD;
+ break;
+ case 8:
+ val |= WINCON0_BPPMODE_8BPP_PALETTE;
+ val |= WINCONx_BURSTLEN_8WORD;
+ val |= WINCONx_BYTSWP;
+ break;
+ case 16:
+ val |= WINCON0_BPPMODE_16BPP_565;
+ val |= WINCONx_HAWSWP;
+ val |= WINCONx_BURSTLEN_16WORD;
+ break;
+ case 24:
+ val |= WINCON0_BPPMODE_24BPP_888;
+ val |= WINCONx_WSWP;
+ val |= WINCONx_BURSTLEN_16WORD;
+ break;
+ case 32:
+ val |= WINCON1_BPPMODE_28BPP_A4888
+ | WINCON1_BLD_PIX | WINCON1_ALPHA_SEL;
+ val |= WINCONx_WSWP;
+ val |= WINCONx_BURSTLEN_16WORD;
+ break;
+ default:
+ DRM_DEBUG_KMS("invalid pixel size so using unpacked 24bpp.\n");
+
+ val |= WINCON0_BPPMODE_24BPP_888;
+ val |= WINCONx_WSWP;
+ val |= WINCONx_BURSTLEN_16WORD;
+ break;
+ }
+
+ DRM_DEBUG_KMS("bpp = %d\n", win_data->bpp);
+
+ writel(val, ctx->regs + WINCON(win));
+}
+
+static void fimd_win_set_colkey(struct device *dev, unsigned int win)
+{
+ struct fimd_context *ctx = get_fimd_context(dev);
+ unsigned int keycon0 = 0, keycon1 = 0;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ keycon0 = ~(WxKEYCON0_KEYBL_EN | WxKEYCON0_KEYEN_F |
+ WxKEYCON0_DIRCON) | WxKEYCON0_COMPKEY(0);
+
+ keycon1 = WxKEYCON1_COLVAL(0xffffffff);
+
+ writel(keycon0, ctx->regs + WKEYCON0_BASE(win));
+ writel(keycon1, ctx->regs + WKEYCON1_BASE(win));
+}
+
+static void fimd_win_commit(struct device *dev)
+{
+ struct fimd_context *ctx = get_fimd_context(dev);
+ struct fimd_win_data *win_data;
+ int win = ctx->default_win;
+ unsigned long val, alpha, size;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ if (win < 0 || win > WINDOWS_NR)
+ return;
+
+ win_data = &ctx->win_data[win];
+
+ /*
+ * SHADOWCON register is used for enabling timing.
+ *
+ * for example, once only width value of a register is set,
+ * if the dma is started then fimd hardware could malfunction so
+ * with protect window setting, the register fields with prefix '_F'
+ * wouldn't be updated at vsync also but updated once unprotect window
+ * is set.
+ */
+
+ /* protect windows */
+ val = readl(ctx->regs + SHADOWCON);
+ val |= SHADOWCON_WINx_PROTECT(win);
+ writel(val, ctx->regs + SHADOWCON);
+
+ /* buffer start address */
+ val = win_data->paddr;
+ writel(val, ctx->regs + VIDWx_BUF_START(win, 0));
+
+ /* buffer end address */
+ size = win_data->fb_width * win_data->ovl_height * (win_data->bpp >> 3);
+ val = win_data->paddr + size;
+ writel(val, ctx->regs + VIDWx_BUF_END(win, 0));
+
+ DRM_DEBUG_KMS("start addr = 0x%lx, end addr = 0x%lx, size = 0x%lx\n",
+ (unsigned long)win_data->paddr, val, size);
+ DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
+ win_data->ovl_width, win_data->ovl_height);
+
+ /* buffer size */
+ val = VIDW_BUF_SIZE_OFFSET(win_data->buf_offsize) |
+ VIDW_BUF_SIZE_PAGEWIDTH(win_data->line_size);
+ writel(val, ctx->regs + VIDWx_BUF_SIZE(win, 0));
+
+ /* OSD position */
+ val = VIDOSDxA_TOPLEFT_X(win_data->offset_x) |
+ VIDOSDxA_TOPLEFT_Y(win_data->offset_y);
+ writel(val, ctx->regs + VIDOSD_A(win));
+
+ val = VIDOSDxB_BOTRIGHT_X(win_data->offset_x +
+ win_data->ovl_width - 1) |
+ VIDOSDxB_BOTRIGHT_Y(win_data->offset_y +
+ win_data->ovl_height - 1);
+ writel(val, ctx->regs + VIDOSD_B(win));
+
+ DRM_DEBUG_KMS("osd pos: tx = %d, ty = %d, bx = %d, by = %d\n",
+ win_data->offset_x, win_data->offset_y,
+ win_data->offset_x + win_data->ovl_width - 1,
+ win_data->offset_y + win_data->ovl_height - 1);
+
+ /* hardware window 0 doesn't support alpha channel. */
+ if (win != 0) {
+ /* OSD alpha */
+ alpha = VIDISD14C_ALPHA1_R(0xf) |
+ VIDISD14C_ALPHA1_G(0xf) |
+ VIDISD14C_ALPHA1_B(0xf);
+
+ writel(alpha, ctx->regs + VIDOSD_C(win));
+ }
+
+ /* OSD size */
+ if (win != 3 && win != 4) {
+ u32 offset = VIDOSD_D(win);
+ if (win == 0)
+ offset = VIDOSD_C_SIZE_W0;
+ val = win_data->ovl_width * win_data->ovl_height;
+ writel(val, ctx->regs + offset);
+
+ DRM_DEBUG_KMS("osd size = 0x%x\n", (unsigned int)val);
+ }
+
+ fimd_win_set_pixfmt(dev, win);
+
+ /* hardware window 0 doesn't support color key. */
+ if (win != 0)
+ fimd_win_set_colkey(dev, win);
+
+ /* Enable DMA channel and unprotect windows */
+ val = readl(ctx->regs + SHADOWCON);
+ val |= SHADOWCON_CHx_ENABLE(win);
+ val &= ~SHADOWCON_WINx_PROTECT(win);
+ writel(val, ctx->regs + SHADOWCON);
+}
+
+static void fimd_win_disable(struct device *dev)
+{
+ struct fimd_context *ctx = get_fimd_context(dev);
+ struct fimd_win_data *win_data;
+ int win = ctx->default_win;
+ u32 val;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ if (win < 0 || win > WINDOWS_NR)
+ return;
+
+ win_data = &ctx->win_data[win];
+
+ /* protect windows */
+ val = readl(ctx->regs + SHADOWCON);
+ val |= SHADOWCON_WINx_PROTECT(win);
+ writel(val, ctx->regs + SHADOWCON);
+
+ /* wincon */
+ val = readl(ctx->regs + WINCON(win));
+ val &= ~WINCONx_ENWIN;
+ writel(val, ctx->regs + WINCON(win));
+
+ /* unprotect windows */
+ val = readl(ctx->regs + SHADOWCON);
+ val &= ~SHADOWCON_CHx_ENABLE(win);
+ val &= ~SHADOWCON_WINx_PROTECT(win);
+ writel(val, ctx->regs + SHADOWCON);
+}
+
+static struct exynos_drm_overlay_ops fimd_overlay_ops = {
+ .mode_set = fimd_win_mode_set,
+ .commit = fimd_win_commit,
+ .disable = fimd_win_disable,
+};
+
+static void fimd_finish_pageflip(struct drm_device *drm_dev, int crtc)
+{
+ struct exynos_drm_private *dev_priv = drm_dev->dev_private;
+ struct drm_pending_vblank_event *e, *t;
+ struct timeval now;
+ unsigned long flags;
+ bool is_checked = false;
+
+ spin_lock_irqsave(&drm_dev->event_lock, flags);
+
+ list_for_each_entry_safe(e, t, &dev_priv->pageflip_event_list,
+ base.link) {
+ /* if event's pipe isn't same as crtc then ignore it. */
+ if (crtc != e->pipe)
+ continue;
+
+ is_checked = true;
+
+ do_gettimeofday(&now);
+ e->event.sequence = 0;
+ e->event.tv_sec = now.tv_sec;
+ e->event.tv_usec = now.tv_usec;
+
+ list_move_tail(&e->base.link, &e->base.file_priv->event_list);
+ wake_up_interruptible(&e->base.file_priv->event_wait);
+ }
+
+ if (is_checked)
+ drm_vblank_put(drm_dev, crtc);
+
+ spin_unlock_irqrestore(&drm_dev->event_lock, flags);
+}
+
+static irqreturn_t fimd_irq_handler(int irq, void *dev_id)
+{
+ struct fimd_context *ctx = (struct fimd_context *)dev_id;
+ struct exynos_drm_subdrv *subdrv = &ctx->subdrv;
+ struct drm_device *drm_dev = subdrv->drm_dev;
+ struct exynos_drm_manager *manager = &subdrv->manager;
+ u32 val;
+
+ val = readl(ctx->regs + VIDINTCON1);
+
+ if (val & VIDINTCON1_INT_FRAME)
+ /* VSYNC interrupt */
+ writel(VIDINTCON1_INT_FRAME, ctx->regs + VIDINTCON1);
+
+ drm_handle_vblank(drm_dev, manager->pipe);
+ fimd_finish_pageflip(drm_dev, manager->pipe);
+
+ return IRQ_HANDLED;
+}
+
+static int fimd_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
+{
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ /*
+ * enable drm irq mode.
+ * - with irq_enabled = 1, we can use the vblank feature.
+ *
+ * P.S. note that we wouldn't use drm irq handler but
+ * just specific driver own one instead because
+ * drm framework supports only one irq handler.
+ */
+ drm_dev->irq_enabled = 1;
+
+ /*
+ * with vblank_disable_allowed = 1, vblank interrupt will be disabled
+ * by drm timer once a current process gives up ownership of
+ * vblank event.(drm_vblank_put function was called)
+ */
+ drm_dev->vblank_disable_allowed = 1;
+
+ return 0;
+}
+
+static void fimd_subdrv_remove(struct drm_device *drm_dev)
+{
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ /* TODO. */
+}
+
+static int fimd_calc_clkdiv(struct fimd_context *ctx,
+ struct fb_videomode *timing)
+{
+ unsigned long clk = clk_get_rate(ctx->lcd_clk);
+ u32 retrace;
+ u32 clkdiv;
+ u32 best_framerate = 0;
+ u32 framerate;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ retrace = timing->left_margin + timing->hsync_len +
+ timing->right_margin + timing->xres;
+ retrace *= timing->upper_margin + timing->vsync_len +
+ timing->lower_margin + timing->yres;
+
+ /* default framerate is 60Hz */
+ if (!timing->refresh)
+ timing->refresh = 60;
+
+ clk /= retrace;
+
+ for (clkdiv = 1; clkdiv < 0x100; clkdiv++) {
+ int tmp;
+
+ /* get best framerate */
+ framerate = clk / clkdiv;
+ tmp = timing->refresh - framerate;
+ if (tmp < 0) {
+ best_framerate = framerate;
+ continue;
+ } else {
+ if (!best_framerate)
+ best_framerate = framerate;
+ else if (tmp < (best_framerate - framerate))
+ best_framerate = framerate;
+ break;
+ }
+ }
+
+ return clkdiv;
+}
+
+static void fimd_clear_win(struct fimd_context *ctx, int win)
+{
+ u32 val;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ writel(0, ctx->regs + WINCON(win));
+ writel(0, ctx->regs + VIDOSD_A(win));
+ writel(0, ctx->regs + VIDOSD_B(win));
+ writel(0, ctx->regs + VIDOSD_C(win));
+
+ if (win == 1 || win == 2)
+ writel(0, ctx->regs + VIDOSD_D(win));
+
+ val = readl(ctx->regs + SHADOWCON);
+ val &= ~SHADOWCON_WINx_PROTECT(win);
+ writel(val, ctx->regs + SHADOWCON);
+}
+
+static int __devinit fimd_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct fimd_context *ctx;
+ struct exynos_drm_subdrv *subdrv;
+ struct exynos_drm_fimd_pdata *pdata;
+ struct fb_videomode *timing;
+ struct resource *res;
+ int win;
+ int ret = -EINVAL;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ pdata = pdev->dev.platform_data;
+ if (!pdata) {
+ dev_err(dev, "no platform data specified\n");
+ return -EINVAL;
+ }
+
+ timing = &pdata->timing;
+ if (!timing) {
+ dev_err(dev, "timing is null.\n");
+ return -EINVAL;
+ }
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->bus_clk = clk_get(dev, "fimd");
+ if (IS_ERR(ctx->bus_clk)) {
+ dev_err(dev, "failed to get bus clock\n");
+ ret = PTR_ERR(ctx->bus_clk);
+ goto err_clk_get;
+ }
+
+ clk_enable(ctx->bus_clk);
+
+ ctx->lcd_clk = clk_get(dev, "sclk_fimd");
+ if (IS_ERR(ctx->lcd_clk)) {
+ dev_err(dev, "failed to get lcd clock\n");
+ ret = PTR_ERR(ctx->lcd_clk);
+ goto err_bus_clk;
+ }
+
+ clk_enable(ctx->lcd_clk);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "failed to find registers\n");
+ ret = -ENOENT;
+ goto err_clk;
+ }
+
+ ctx->regs_res = request_mem_region(res->start, resource_size(res),
+ dev_name(dev));
+ if (!ctx->regs_res) {
+ dev_err(dev, "failed to claim register region\n");
+ ret = -ENOENT;
+ goto err_clk;
+ }
+
+ ctx->regs = ioremap(res->start, resource_size(res));
+ if (!ctx->regs) {
+ dev_err(dev, "failed to map registers\n");
+ ret = -ENXIO;
+ goto err_req_region_io;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ dev_err(dev, "irq request failed.\n");
+ goto err_req_region_irq;
+ }
+
+ ctx->irq = res->start;
+
+ for (win = 0; win < WINDOWS_NR; win++)
+ fimd_clear_win(ctx, win);
+
+ ret = request_irq(ctx->irq, fimd_irq_handler, 0, "drm_fimd", ctx);
+ if (ret < 0) {
+ dev_err(dev, "irq request failed.\n");
+ goto err_req_irq;
+ }
+
+ ctx->clkdiv = fimd_calc_clkdiv(ctx, timing);
+ ctx->vidcon0 = pdata->vidcon0;
+ ctx->vidcon1 = pdata->vidcon1;
+ ctx->default_win = pdata->default_win;
+ ctx->timing = timing;
+
+ timing->pixclock = clk_get_rate(ctx->lcd_clk) / ctx->clkdiv;
+
+ DRM_DEBUG_KMS("pixel clock = %d, clkdiv = %d\n",
+ timing->pixclock, ctx->clkdiv);
+
+ subdrv = &ctx->subdrv;
+
+ subdrv->probe = fimd_subdrv_probe;
+ subdrv->remove = fimd_subdrv_remove;
+ subdrv->manager.pipe = -1;
+ subdrv->manager.ops = &fimd_manager_ops;
+ subdrv->manager.overlay_ops = &fimd_overlay_ops;
+ subdrv->manager.display = &fimd_display;
+ subdrv->manager.dev = dev;
+
+ platform_set_drvdata(pdev, ctx);
+ exynos_drm_subdrv_register(subdrv);
+
+ return 0;
+
+err_req_irq:
+err_req_region_irq:
+ iounmap(ctx->regs);
+
+err_req_region_io:
+ release_resource(ctx->regs_res);
+ kfree(ctx->regs_res);
+
+err_clk:
+ clk_disable(ctx->lcd_clk);
+ clk_put(ctx->lcd_clk);
+
+err_bus_clk:
+ clk_disable(ctx->bus_clk);
+ clk_put(ctx->bus_clk);
+
+err_clk_get:
+ kfree(ctx);
+ return ret;
+}
+
+static int __devexit fimd_remove(struct platform_device *pdev)
+{
+ struct fimd_context *ctx = platform_get_drvdata(pdev);
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ exynos_drm_subdrv_unregister(&ctx->subdrv);
+
+ clk_disable(ctx->lcd_clk);
+ clk_disable(ctx->bus_clk);
+ clk_put(ctx->lcd_clk);
+ clk_put(ctx->bus_clk);
+
+ iounmap(ctx->regs);
+ release_resource(ctx->regs_res);
+ kfree(ctx->regs_res);
+ free_irq(ctx->irq, ctx);
+
+ kfree(ctx);
+
+ return 0;
+}
+
+static struct platform_driver fimd_driver = {
+ .probe = fimd_probe,
+ .remove = __devexit_p(fimd_remove),
+ .driver = {
+ .name = "exynos4-fb",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init fimd_init(void)
+{
+ return platform_driver_register(&fimd_driver);
+}
+
+static void __exit fimd_exit(void)
+{
+ platform_driver_unregister(&fimd_driver);
+}
+
+module_init(fimd_init);
+module_exit(fimd_exit);
+
+MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>");
+MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
+MODULE_DESCRIPTION("Samsung DRM FIMD Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
new file mode 100644
index 00000000000..a8e7a88906e
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -0,0 +1,415 @@
+/* exynos_drm_gem.c
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Author: Inki Dae <inki.dae@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+#include "drm.h"
+
+#include <drm/exynos_drm.h>
+
+#include "exynos_drm_drv.h"
+#include "exynos_drm_gem.h"
+#include "exynos_drm_buf.h"
+
+static unsigned int convert_to_vm_err_msg(int msg)
+{
+ unsigned int out_msg;
+
+ switch (msg) {
+ case 0:
+ case -ERESTARTSYS:
+ case -EINTR:
+ out_msg = VM_FAULT_NOPAGE;
+ break;
+
+ case -ENOMEM:
+ out_msg = VM_FAULT_OOM;
+ break;
+
+ default:
+ out_msg = VM_FAULT_SIGBUS;
+ break;
+ }
+
+ return out_msg;
+}
+
+static unsigned int get_gem_mmap_offset(struct drm_gem_object *obj)
+{
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ return (unsigned int)obj->map_list.hash.key << PAGE_SHIFT;
+}
+
+struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_file *file_priv,
+ struct drm_device *dev, unsigned int size,
+ unsigned int *handle)
+{
+ struct exynos_drm_gem_obj *exynos_gem_obj;
+ struct exynos_drm_buf_entry *entry;
+ struct drm_gem_object *obj;
+ int ret;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ size = roundup(size, PAGE_SIZE);
+
+ exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL);
+ if (!exynos_gem_obj) {
+ DRM_ERROR("failed to allocate exynos gem object.\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* allocate the new buffer object and memory region. */
+ entry = exynos_drm_buf_create(dev, size);
+ if (!entry) {
+ kfree(exynos_gem_obj);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ exynos_gem_obj->entry = entry;
+
+ obj = &exynos_gem_obj->base;
+
+ ret = drm_gem_object_init(dev, obj, size);
+ if (ret < 0) {
+ DRM_ERROR("failed to initailize gem object.\n");
+ goto err_obj_init;
+ }
+
+ DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp);
+
+ ret = drm_gem_create_mmap_offset(obj);
+ if (ret < 0) {
+ DRM_ERROR("failed to allocate mmap offset.\n");
+ goto err_create_mmap_offset;
+ }
+
+ /*
+ * allocate a id of idr table where the obj is registered
+ * and handle has the id what user can see.
+ */
+ ret = drm_gem_handle_create(file_priv, obj, handle);
+ if (ret)
+ goto err_handle_create;
+
+ DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle);
+
+ /* drop reference from allocate - handle holds it now. */
+ drm_gem_object_unreference_unlocked(obj);
+
+ return exynos_gem_obj;
+
+err_handle_create:
+ drm_gem_free_mmap_offset(obj);
+
+err_create_mmap_offset:
+ drm_gem_object_release(obj);
+
+err_obj_init:
+ exynos_drm_buf_destroy(dev, exynos_gem_obj->entry);
+
+ kfree(exynos_gem_obj);
+
+ return ERR_PTR(ret);
+}
+
+int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_exynos_gem_create *args = data;
+ struct exynos_drm_gem_obj *exynos_gem_obj;
+
+ DRM_DEBUG_KMS("%s : size = 0x%x\n", __FILE__, args->size);
+
+ exynos_gem_obj = exynos_drm_gem_create(file_priv, dev, args->size,
+ &args->handle);
+ if (IS_ERR(exynos_gem_obj))
+ return PTR_ERR(exynos_gem_obj);
+
+ return 0;
+}
+
+int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_exynos_gem_map_off *args = data;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ DRM_DEBUG_KMS("handle = 0x%x, offset = 0x%lx\n",
+ args->handle, (unsigned long)args->offset);
+
+ if (!(dev->driver->driver_features & DRIVER_GEM)) {
+ DRM_ERROR("does not support GEM.\n");
+ return -ENODEV;
+ }
+
+ return exynos_drm_gem_dumb_map_offset(file_priv, dev, args->handle,
+ &args->offset);
+}
+
+static int exynos_drm_gem_mmap_buffer(struct file *filp,
+ struct vm_area_struct *vma)
+{
+ struct drm_gem_object *obj = filp->private_data;
+ struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
+ struct exynos_drm_buf_entry *entry;
+ unsigned long pfn, vm_size;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ vma->vm_flags |= (VM_IO | VM_RESERVED);
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ vma->vm_file = filp;
+
+ vm_size = vma->vm_end - vma->vm_start;
+ /*
+ * a entry contains information to physically continuous memory
+ * allocated by user request or at framebuffer creation.
+ */
+ entry = exynos_gem_obj->entry;
+
+ /* check if user-requested size is valid. */
+ if (vm_size > entry->size)
+ return -EINVAL;
+
+ /*
+ * get page frame number to physical memory to be mapped
+ * to user space.
+ */
+ pfn = exynos_gem_obj->entry->paddr >> PAGE_SHIFT;
+
+ DRM_DEBUG_KMS("pfn = 0x%lx\n", pfn);
+
+ if (remap_pfn_range(vma, vma->vm_start, pfn, vm_size,
+ vma->vm_page_prot)) {
+ DRM_ERROR("failed to remap pfn range.\n");
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+static const struct file_operations exynos_drm_gem_fops = {
+ .mmap = exynos_drm_gem_mmap_buffer,
+};
+
+int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_exynos_gem_mmap *args = data;
+ struct drm_gem_object *obj;
+ unsigned int addr;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ if (!(dev->driver->driver_features & DRIVER_GEM)) {
+ DRM_ERROR("does not support GEM.\n");
+ return -ENODEV;
+ }
+
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (!obj) {
+ DRM_ERROR("failed to lookup gem object.\n");
+ return -EINVAL;
+ }
+
+ obj->filp->f_op = &exynos_drm_gem_fops;
+ obj->filp->private_data = obj;
+
+ down_write(&current->mm->mmap_sem);
+ addr = do_mmap(obj->filp, 0, args->size,
+ PROT_READ | PROT_WRITE, MAP_SHARED, 0);
+ up_write(&current->mm->mmap_sem);
+
+ drm_gem_object_unreference_unlocked(obj);
+
+ if (IS_ERR((void *)addr))
+ return PTR_ERR((void *)addr);
+
+ args->mapped = addr;
+
+ DRM_DEBUG_KMS("mapped = 0x%lx\n", (unsigned long)args->mapped);
+
+ return 0;
+}
+
+int exynos_drm_gem_init_object(struct drm_gem_object *obj)
+{
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ return 0;
+}
+
+void exynos_drm_gem_free_object(struct drm_gem_object *gem_obj)
+{
+ struct exynos_drm_gem_obj *exynos_gem_obj;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ DRM_DEBUG_KMS("handle count = %d\n",
+ atomic_read(&gem_obj->handle_count));
+
+ if (gem_obj->map_list.map)
+ drm_gem_free_mmap_offset(gem_obj);
+
+ /* release file pointer to gem object. */
+ drm_gem_object_release(gem_obj);
+
+ exynos_gem_obj = to_exynos_gem_obj(gem_obj);
+
+ exynos_drm_buf_destroy(gem_obj->dev, exynos_gem_obj->entry);
+
+ kfree(exynos_gem_obj);
+}
+
+int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
+ struct drm_device *dev, struct drm_mode_create_dumb *args)
+{
+ struct exynos_drm_gem_obj *exynos_gem_obj;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ /*
+ * alocate memory to be used for framebuffer.
+ * - this callback would be called by user application
+ * with DRM_IOCTL_MODE_CREATE_DUMB command.
+ */
+
+ args->pitch = args->width * args->bpp >> 3;
+ args->size = args->pitch * args->height;
+
+ exynos_gem_obj = exynos_drm_gem_create(file_priv, dev, args->size,
+ &args->handle);
+ if (IS_ERR(exynos_gem_obj))
+ return PTR_ERR(exynos_gem_obj);
+
+ return 0;
+}
+
+int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
+ struct drm_device *dev, uint32_t handle, uint64_t *offset)
+{
+ struct exynos_drm_gem_obj *exynos_gem_obj;
+ struct drm_gem_object *obj;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ mutex_lock(&dev->struct_mutex);
+
+ /*
+ * get offset of memory allocated for drm framebuffer.
+ * - this callback would be called by user application
+ * with DRM_IOCTL_MODE_MAP_DUMB command.
+ */
+
+ obj = drm_gem_object_lookup(dev, file_priv, handle);
+ if (!obj) {
+ DRM_ERROR("failed to lookup gem object.\n");
+ mutex_unlock(&dev->struct_mutex);
+ return -EINVAL;
+ }
+
+ exynos_gem_obj = to_exynos_gem_obj(obj);
+
+ *offset = get_gem_mmap_offset(&exynos_gem_obj->base);
+
+ drm_gem_object_unreference(obj);
+
+ DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
+
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
+int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct drm_gem_object *obj = vma->vm_private_data;
+ struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
+ struct drm_device *dev = obj->dev;
+ unsigned long pfn;
+ pgoff_t page_offset;
+ int ret;
+
+ page_offset = ((unsigned long)vmf->virtual_address -
+ vma->vm_start) >> PAGE_SHIFT;
+
+ mutex_lock(&dev->struct_mutex);
+
+ pfn = (exynos_gem_obj->entry->paddr >> PAGE_SHIFT) + page_offset;
+
+ ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
+
+ mutex_unlock(&dev->struct_mutex);
+
+ return convert_to_vm_err_msg(ret);
+}
+
+int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ int ret;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ /* set vm_area_struct. */
+ ret = drm_gem_mmap(filp, vma);
+ if (ret < 0) {
+ DRM_ERROR("failed to mmap.\n");
+ return ret;
+ }
+
+ vma->vm_flags &= ~VM_PFNMAP;
+ vma->vm_flags |= VM_MIXEDMAP;
+
+ return ret;
+}
+
+
+int exynos_drm_gem_dumb_destroy(struct drm_file *file_priv,
+ struct drm_device *dev, unsigned int handle)
+{
+ int ret;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ /*
+ * obj->refcount and obj->handle_count are decreased and
+ * if both them are 0 then exynos_drm_gem_free_object()
+ * would be called by callback to release resources.
+ */
+ ret = drm_gem_handle_delete(file_priv, handle);
+ if (ret < 0) {
+ DRM_ERROR("failed to delete drm_gem_handle.\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
+MODULE_DESCRIPTION("Samsung SoC DRM GEM Module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
new file mode 100644
index 00000000000..e5fc0148277
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -0,0 +1,107 @@
+/* exynos_drm_gem.h
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Authoer: Inki Dae <inki.dae@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _EXYNOS_DRM_GEM_H_
+#define _EXYNOS_DRM_GEM_H_
+
+#define to_exynos_gem_obj(x) container_of(x,\
+ struct exynos_drm_gem_obj, base)
+
+/*
+ * exynos drm buffer structure.
+ *
+ * @base: a gem object.
+ * - a new handle to this gem object would be created
+ * by drm_gem_handle_create().
+ * @entry: pointer to exynos drm buffer entry object.
+ * - containing the information to physically
+ * continuous memory region allocated by user request
+ * or at framebuffer creation.
+ *
+ * P.S. this object would be transfered to user as kms_bo.handle so
+ * user can access the buffer through kms_bo.handle.
+ */
+struct exynos_drm_gem_obj {
+ struct drm_gem_object base;
+ struct exynos_drm_buf_entry *entry;
+};
+
+/* create a new buffer and get a new gem handle. */
+struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_file *file_priv,
+ struct drm_device *dev, unsigned int size,
+ unsigned int *handle);
+
+/*
+ * request gem object creation and buffer allocation as the size
+ * that it is calculated with framebuffer information such as width,
+ * height and bpp.
+ */
+int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
+/* get buffer offset to map to user space. */
+int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
+/* unmap a buffer from user space. */
+int exynos_drm_gem_munmap_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
+/* initialize gem object. */
+int exynos_drm_gem_init_object(struct drm_gem_object *obj);
+
+/* free gem object. */
+void exynos_drm_gem_free_object(struct drm_gem_object *gem_obj);
+
+/* create memory region for drm framebuffer. */
+int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
+ struct drm_device *dev, struct drm_mode_create_dumb *args);
+
+/* map memory region for drm framebuffer to user space. */
+int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
+ struct drm_device *dev, uint32_t handle, uint64_t *offset);
+
+/* page fault handler and mmap fault address(virtual) to physical memory. */
+int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
+
+/*
+ * mmap the physically continuous memory that a gem object contains
+ * to user space.
+ */
+int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
+/* set vm_flags and we can change the vm attribute to other one at here. */
+int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
+
+/*
+ * destroy memory region allocated.
+ * - a gem handle and physical memory region pointed by a gem object
+ * would be released by drm_gem_handle_delete().
+ */
+int exynos_drm_gem_dumb_destroy(struct drm_file *file_priv,
+ struct drm_device *dev, unsigned int handle);
+
+#endif
diff --git a/drivers/gpu/drm/i2c/ch7006_drv.c b/drivers/gpu/drm/i2c/ch7006_drv.c
index 08792a740f1..07d55df6623 100644
--- a/drivers/gpu/drm/i2c/ch7006_drv.c
+++ b/drivers/gpu/drm/i2c/ch7006_drv.c
@@ -24,6 +24,8 @@
*
*/
+#include <linux/module.h>
+
#include "ch7006_priv.h"
/* DRM encoder functions */
diff --git a/drivers/gpu/drm/i2c/sil164_drv.c b/drivers/gpu/drm/i2c/sil164_drv.c
index 0b6773290c0..b7d45ab4ba6 100644
--- a/drivers/gpu/drm/i2c/sil164_drv.c
+++ b/drivers/gpu/drm/i2c/sil164_drv.c
@@ -24,6 +24,8 @@
*
*/
+#include <linux/module.h>
+
#include "drmP.h"
#include "drm_crtc_helper.h"
#include "drm_encoder_slave.h"
diff --git a/drivers/gpu/drm/i810/i810_drv.c b/drivers/gpu/drm/i810/i810_drv.c
index 6f98d059f68..d4266bdf6fb 100644
--- a/drivers/gpu/drm/i810/i810_drv.c
+++ b/drivers/gpu/drm/i810/i810_drv.c
@@ -30,6 +30,8 @@
* Gareth Hughes <gareth@valinux.com>
*/
+#include <linux/module.h>
+
#include "drmP.h"
#include "drm.h"
#include "i810_drm.h"
diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c
index d3e8c540f77..1ca799a1e1f 100644
--- a/drivers/gpu/drm/i915/dvo_ch7017.c
+++ b/drivers/gpu/drm/i915/dvo_ch7017.c
@@ -227,7 +227,7 @@ static bool ch7017_init(struct intel_dvo_device *dvo,
default:
DRM_DEBUG_KMS("ch701x not detected, got %d: from %s "
"slave %d.\n",
- val, adapter->name,dvo->slave_addr);
+ val, adapter->name, dvo->slave_addr);
goto fail;
}
diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c
index 7eaa94e4ff0..4a036600e80 100644
--- a/drivers/gpu/drm/i915/dvo_ch7xxx.c
+++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c
@@ -111,7 +111,7 @@ static char *ch7xxx_get_id(uint8_t vid)
/** Reads an 8 bit register */
static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
{
- struct ch7xxx_priv *ch7xxx= dvo->dev_priv;
+ struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
u8 out_buf[2];
u8 in_buf[2];
@@ -303,7 +303,7 @@ static void ch7xxx_dump_regs(struct intel_dvo_device *dvo)
for (i = 0; i < CH7xxx_NUM_REGS; i++) {
uint8_t val;
- if ((i % 8) == 0 )
+ if ((i % 8) == 0)
DRM_LOG_KMS("\n %02X: ", i);
ch7xxx_readb(dvo, i, &val);
DRM_LOG_KMS("%02X ", val);
diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c
index a12ed9414cc..04f2893d5e3 100644
--- a/drivers/gpu/drm/i915/dvo_ivch.c
+++ b/drivers/gpu/drm/i915/dvo_ivch.c
@@ -344,8 +344,8 @@ static void ivch_mode_set(struct intel_dvo_device *dvo,
(adjusted_mode->hdisplay - 1)) >> 2;
y_ratio = (((mode->vdisplay - 1) << 16) /
(adjusted_mode->vdisplay - 1)) >> 2;
- ivch_write (dvo, VR42, x_ratio);
- ivch_write (dvo, VR41, y_ratio);
+ ivch_write(dvo, VR42, x_ratio);
+ ivch_write(dvo, VR41, y_ratio);
} else {
vr01 &= ~VR01_PANEL_FIT_ENABLE;
vr40 &= ~VR40_CLOCK_GATING_ENABLE;
@@ -410,7 +410,7 @@ static void ivch_destroy(struct intel_dvo_device *dvo)
}
}
-struct intel_dvo_dev_ops ivch_ops= {
+struct intel_dvo_dev_ops ivch_ops = {
.init = ivch_init,
.dpms = ivch_dpms,
.mode_valid = ivch_mode_valid,
diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c
index e4b4091df94..a0b13a6f619 100644
--- a/drivers/gpu/drm/i915/dvo_sil164.c
+++ b/drivers/gpu/drm/i915/dvo_sil164.c
@@ -104,7 +104,7 @@ static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
{
- struct sil164_priv *sil= dvo->dev_priv;
+ struct sil164_priv *sil = dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
uint8_t out_buf[2];
struct i2c_msg msg = {
diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c
index 8ab2855bb54..aa2cd3ec54a 100644
--- a/drivers/gpu/drm/i915/dvo_tfp410.c
+++ b/drivers/gpu/drm/i915/dvo_tfp410.c
@@ -56,7 +56,7 @@
#define TFP410_CTL_2_MDI (1<<0)
#define TFP410_CTL_3 0x0A
-#define TFP410_CTL_3_DK_MASK (0x7<<5)
+#define TFP410_CTL_3_DK_MASK (0x7<<5)
#define TFP410_CTL_3_DK (1<<5)
#define TFP410_CTL_3_DKEN (1<<4)
#define TFP410_CTL_3_CTL_MASK (0x7<<1)
@@ -225,12 +225,12 @@ static void tfp410_mode_set(struct intel_dvo_device *dvo,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
- /* As long as the basics are set up, since we don't have clock dependencies
- * in the mode setup, we can just leave the registers alone and everything
- * will work fine.
- */
- /* don't do much */
- return;
+ /* As long as the basics are set up, since we don't have clock dependencies
+ * in the mode setup, we can just leave the registers alone and everything
+ * will work fine.
+ */
+ /* don't do much */
+ return;
}
/* set the tfp410 power state */
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 3c395a59da3..4f40f1ce1d8 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -29,6 +29,7 @@
#include <linux/seq_file.h>
#include <linux/debugfs.h>
#include <linux/slab.h>
+#include <linux/export.h>
#include "drmP.h"
#include "drm.h"
#include "intel_drv.h"
@@ -98,12 +99,12 @@ static const char *get_pin_flag(struct drm_i915_gem_object *obj)
static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
{
- switch (obj->tiling_mode) {
- default:
- case I915_TILING_NONE: return " ";
- case I915_TILING_X: return "X";
- case I915_TILING_Y: return "Y";
- }
+ switch (obj->tiling_mode) {
+ default:
+ case I915_TILING_NONE: return " ";
+ case I915_TILING_X: return "X";
+ case I915_TILING_Y: return "Y";
+ }
}
static const char *cache_level_str(int type)
@@ -217,7 +218,7 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
++mappable_count; \
} \
} \
-} while(0)
+} while (0)
static int i915_gem_object_info(struct seq_file *m, void* data)
{
@@ -1293,12 +1294,12 @@ i915_wedged_read(struct file *filp,
char buf[80];
int len;
- len = snprintf(buf, sizeof (buf),
+ len = snprintf(buf, sizeof(buf),
"wedged : %d\n",
atomic_read(&dev_priv->mm.wedged));
- if (len > sizeof (buf))
- len = sizeof (buf);
+ if (len > sizeof(buf))
+ len = sizeof(buf);
return simple_read_from_buffer(ubuf, max, ppos, buf, len);
}
@@ -1314,7 +1315,7 @@ i915_wedged_write(struct file *filp,
int val = 1;
if (cnt > 0) {
- if (cnt > sizeof (buf) - 1)
+ if (cnt > sizeof(buf) - 1)
return -EINVAL;
if (copy_from_user(buf, ubuf, cnt))
@@ -1357,11 +1358,11 @@ i915_max_freq_read(struct file *filp,
char buf[80];
int len;
- len = snprintf(buf, sizeof (buf),
+ len = snprintf(buf, sizeof(buf),
"max freq: %d\n", dev_priv->max_delay * 50);
- if (len > sizeof (buf))
- len = sizeof (buf);
+ if (len > sizeof(buf))
+ len = sizeof(buf);
return simple_read_from_buffer(ubuf, max, ppos, buf, len);
}
@@ -1378,7 +1379,7 @@ i915_max_freq_write(struct file *filp,
int val = 1;
if (cnt > 0) {
- if (cnt > sizeof (buf) - 1)
+ if (cnt > sizeof(buf) - 1)
return -EINVAL;
if (copy_from_user(buf, ubuf, cnt))
@@ -1432,12 +1433,12 @@ i915_cache_sharing_read(struct file *filp,
snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
mutex_unlock(&dev_priv->dev->struct_mutex);
- len = snprintf(buf, sizeof (buf),
+ len = snprintf(buf, sizeof(buf),
"%d\n", (snpcr & GEN6_MBC_SNPCR_MASK) >>
GEN6_MBC_SNPCR_SHIFT);
- if (len > sizeof (buf))
- len = sizeof (buf);
+ if (len > sizeof(buf))
+ len = sizeof(buf);
return simple_read_from_buffer(ubuf, max, ppos, buf, len);
}
@@ -1455,7 +1456,7 @@ i915_cache_sharing_write(struct file *filp,
int val = 1;
if (cnt > 0) {
- if (cnt > sizeof (buf) - 1)
+ if (cnt > sizeof(buf) - 1)
return -EINVAL;
if (copy_from_user(buf, ubuf, cnt))
@@ -1505,7 +1506,10 @@ drm_add_fake_info_node(struct drm_minor *minor,
node->minor = minor;
node->dent = ent;
node->info_ent = (void *) key;
- list_add(&node->list, &minor->debugfs_nodes.list);
+
+ mutex_lock(&minor->debugfs_lock);
+ list_add(&node->list, &minor->debugfs_list);
+ mutex_unlock(&minor->debugfs_lock);
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 8a3942c4f09..a9533c54c93 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -41,6 +41,7 @@
#include <linux/pnp.h>
#include <linux/vga_switcheroo.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include <acpi/video.h>
static void i915_write_hws_pga(struct drm_device *dev)
@@ -884,7 +885,7 @@ static int i915_get_bridge_dev(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0,0));
+ dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
if (!dev_priv->bridge_dev) {
DRM_ERROR("bridge device not found\n");
return -1;
@@ -1730,10 +1731,10 @@ static DEFINE_SPINLOCK(mchdev_lock);
*/
unsigned long i915_read_mch_val(void)
{
- struct drm_i915_private *dev_priv;
+ struct drm_i915_private *dev_priv;
unsigned long chipset_val, graphics_val, ret = 0;
- spin_lock(&mchdev_lock);
+ spin_lock(&mchdev_lock);
if (!i915_mch_dev)
goto out_unlock;
dev_priv = i915_mch_dev;
@@ -1744,9 +1745,9 @@ unsigned long i915_read_mch_val(void)
ret = chipset_val + graphics_val;
out_unlock:
- spin_unlock(&mchdev_lock);
+ spin_unlock(&mchdev_lock);
- return ret;
+ return ret;
}
EXPORT_SYMBOL_GPL(i915_read_mch_val);
@@ -1757,10 +1758,10 @@ EXPORT_SYMBOL_GPL(i915_read_mch_val);
*/
bool i915_gpu_raise(void)
{
- struct drm_i915_private *dev_priv;
+ struct drm_i915_private *dev_priv;
bool ret = true;
- spin_lock(&mchdev_lock);
+ spin_lock(&mchdev_lock);
if (!i915_mch_dev) {
ret = false;
goto out_unlock;
@@ -1771,9 +1772,9 @@ bool i915_gpu_raise(void)
dev_priv->max_delay--;
out_unlock:
- spin_unlock(&mchdev_lock);
+ spin_unlock(&mchdev_lock);
- return ret;
+ return ret;
}
EXPORT_SYMBOL_GPL(i915_gpu_raise);
@@ -1785,10 +1786,10 @@ EXPORT_SYMBOL_GPL(i915_gpu_raise);
*/
bool i915_gpu_lower(void)
{
- struct drm_i915_private *dev_priv;
+ struct drm_i915_private *dev_priv;
bool ret = true;
- spin_lock(&mchdev_lock);
+ spin_lock(&mchdev_lock);
if (!i915_mch_dev) {
ret = false;
goto out_unlock;
@@ -1799,9 +1800,9 @@ bool i915_gpu_lower(void)
dev_priv->max_delay++;
out_unlock:
- spin_unlock(&mchdev_lock);
+ spin_unlock(&mchdev_lock);
- return ret;
+ return ret;
}
EXPORT_SYMBOL_GPL(i915_gpu_lower);
@@ -1812,10 +1813,10 @@ EXPORT_SYMBOL_GPL(i915_gpu_lower);
*/
bool i915_gpu_busy(void)
{
- struct drm_i915_private *dev_priv;
+ struct drm_i915_private *dev_priv;
bool ret = false;
- spin_lock(&mchdev_lock);
+ spin_lock(&mchdev_lock);
if (!i915_mch_dev)
goto out_unlock;
dev_priv = i915_mch_dev;
@@ -1823,9 +1824,9 @@ bool i915_gpu_busy(void)
ret = dev_priv->busy;
out_unlock:
- spin_unlock(&mchdev_lock);
+ spin_unlock(&mchdev_lock);
- return ret;
+ return ret;
}
EXPORT_SYMBOL_GPL(i915_gpu_busy);
@@ -1837,10 +1838,10 @@ EXPORT_SYMBOL_GPL(i915_gpu_busy);
*/
bool i915_gpu_turbo_disable(void)
{
- struct drm_i915_private *dev_priv;
+ struct drm_i915_private *dev_priv;
bool ret = true;
- spin_lock(&mchdev_lock);
+ spin_lock(&mchdev_lock);
if (!i915_mch_dev) {
ret = false;
goto out_unlock;
@@ -1853,9 +1854,9 @@ bool i915_gpu_turbo_disable(void)
ret = false;
out_unlock:
- spin_unlock(&mchdev_lock);
+ spin_unlock(&mchdev_lock);
- return ret;
+ return ret;
}
EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
@@ -1948,7 +1949,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
agp_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
- dev_priv->mm.gtt_mapping =
+ dev_priv->mm.gtt_mapping =
io_mapping_create_wc(dev->agp->base, agp_size);
if (dev_priv->mm.gtt_mapping == NULL) {
ret = -EIO;
@@ -2035,7 +2036,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
spin_lock_init(&dev_priv->error_lock);
spin_lock_init(&dev_priv->rps_lock);
- if (IS_MOBILE(dev) || !IS_GEN2(dev))
+ if (IS_IVYBRIDGE(dev))
+ dev_priv->num_pipe = 3;
+ else if (IS_MOBILE(dev) || !IS_GEN2(dev))
dev_priv->num_pipe = 2;
else
dev_priv->num_pipe = 1;
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index f07e4252b70..e9c2cfe45da 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -35,6 +35,7 @@
#include "intel_drv.h"
#include <linux/console.h>
+#include <linux/module.h>
#include "drm_crtc_helper.h"
static int i915_modeset __read_mostly = -1;
@@ -79,11 +80,11 @@ MODULE_PARM_DESC(lvds_downclock,
"Use panel (LVDS/eDP) downclocking for power savings "
"(default: false)");
-unsigned int i915_panel_use_ssc __read_mostly = 1;
+unsigned int i915_panel_use_ssc __read_mostly = -1;
module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600);
MODULE_PARM_DESC(lvds_use_ssc,
"Use Spread Spectrum Clock with panels [LVDS/eDP] "
- "(default: true)");
+ "(default: auto from VBT)");
int i915_vbt_sdvo_panel_type __read_mostly = -1;
module_param_named(vbt_sdvo_panel_type, i915_vbt_sdvo_panel_type, int, 0600);
@@ -294,7 +295,7 @@ MODULE_DEVICE_TABLE(pci, pciidlist);
#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
-void intel_detect_pch (struct drm_device *dev)
+void intel_detect_pch(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct pci_dev *pch;
@@ -377,7 +378,7 @@ void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
{
- if (dev_priv->gt_fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES ) {
+ if (dev_priv->gt_fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
int loop = 500;
u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
@@ -471,6 +472,9 @@ static int i915_drm_thaw(struct drm_device *dev)
error = i915_gem_init_ringbuffer(dev);
mutex_unlock(&dev->struct_mutex);
+ if (HAS_PCH_SPLIT(dev))
+ ironlake_init_pch_refclk(dev);
+
drm_mode_config_reset(dev);
drm_irq_install(dev);
@@ -770,12 +774,12 @@ static int i915_pm_poweroff(struct device *dev)
}
static const struct dev_pm_ops i915_pm_ops = {
- .suspend = i915_pm_suspend,
- .resume = i915_pm_resume,
- .freeze = i915_pm_freeze,
- .thaw = i915_pm_thaw,
- .poweroff = i915_pm_poweroff,
- .restore = i915_pm_resume,
+ .suspend = i915_pm_suspend,
+ .resume = i915_pm_resume,
+ .freeze = i915_pm_freeze,
+ .thaw = i915_pm_thaw,
+ .poweroff = i915_pm_poweroff,
+ .restore = i915_pm_resume,
};
static struct vm_operations_struct i915_gem_vm_ops = {
@@ -785,8 +789,8 @@ static struct vm_operations_struct i915_gem_vm_ops = {
};
static struct drm_driver driver = {
- /* don't use mtrr's here, the Xserver or user space app should
- * deal with them for intel hardware.
+ /* Don't use MTRRs here; the Xserver or userspace app should
+ * deal with them for Intel hardware.
*/
.driver_features =
DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/
@@ -895,3 +899,43 @@ module_exit(i915_exit);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL and additional rights");
+
+/* We give fast paths for the really cool registers */
+#define NEEDS_FORCE_WAKE(dev_priv, reg) \
+ (((dev_priv)->info->gen >= 6) && \
+ ((reg) < 0x40000) && \
+ ((reg) != FORCEWAKE))
+
+#define __i915_read(x, y) \
+u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
+ u##x val = 0; \
+ if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
+ gen6_gt_force_wake_get(dev_priv); \
+ val = read##y(dev_priv->regs + reg); \
+ gen6_gt_force_wake_put(dev_priv); \
+ } else { \
+ val = read##y(dev_priv->regs + reg); \
+ } \
+ trace_i915_reg_rw(false, reg, val, sizeof(val)); \
+ return val; \
+}
+
+__i915_read(8, b)
+__i915_read(16, w)
+__i915_read(32, l)
+__i915_read(64, q)
+#undef __i915_read
+
+#define __i915_write(x, y) \
+void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
+ trace_i915_reg_rw(true, reg, val, sizeof(val)); \
+ if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
+ __gen6_gt_wait_for_fifo(dev_priv); \
+ } \
+ write##y(val, dev_priv->regs + reg); \
+}
+__i915_write(8, b)
+__i915_write(16, w)
+__i915_write(32, l)
+__i915_write(64, q)
+#undef __i915_write
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 7916bd97d5c..06a37f4fd74 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -139,7 +139,6 @@ struct sdvo_device_mapping {
u8 slave_addr;
u8 dvo_wiring;
u8 i2c_pin;
- u8 i2c_speed;
u8 ddc_pin;
};
@@ -209,6 +208,8 @@ struct drm_i915_display_funcs {
struct drm_display_mode *adjusted_mode,
int x, int y,
struct drm_framebuffer *old_fb);
+ void (*write_eld)(struct drm_connector *connector,
+ struct drm_crtc *crtc);
void (*fdi_link_train)(struct drm_crtc *crtc);
void (*init_clock_gating)(struct drm_device *dev);
void (*init_pch_clock_gating)(struct drm_device *dev);
@@ -226,26 +227,26 @@ struct drm_i915_display_funcs {
struct intel_device_info {
u8 gen;
- u8 is_mobile : 1;
- u8 is_i85x : 1;
- u8 is_i915g : 1;
- u8 is_i945gm : 1;
- u8 is_g33 : 1;
- u8 need_gfx_hws : 1;
- u8 is_g4x : 1;
- u8 is_pineview : 1;
- u8 is_broadwater : 1;
- u8 is_crestline : 1;
- u8 is_ivybridge : 1;
- u8 has_fbc : 1;
- u8 has_pipe_cxsr : 1;
- u8 has_hotplug : 1;
- u8 cursor_needs_physical : 1;
- u8 has_overlay : 1;
- u8 overlay_needs_physical : 1;
- u8 supports_tv : 1;
- u8 has_bsd_ring : 1;
- u8 has_blt_ring : 1;
+ u8 is_mobile:1;
+ u8 is_i85x:1;
+ u8 is_i915g:1;
+ u8 is_i945gm:1;
+ u8 is_g33:1;
+ u8 need_gfx_hws:1;
+ u8 is_g4x:1;
+ u8 is_pineview:1;
+ u8 is_broadwater:1;
+ u8 is_crestline:1;
+ u8 is_ivybridge:1;
+ u8 has_fbc:1;
+ u8 has_pipe_cxsr:1;
+ u8 has_hotplug:1;
+ u8 cursor_needs_physical:1;
+ u8 has_overlay:1;
+ u8 overlay_needs_physical:1;
+ u8 supports_tv:1;
+ u8 has_bsd_ring:1;
+ u8 has_blt_ring:1;
};
enum no_fbc_reason {
@@ -347,7 +348,6 @@ typedef struct drm_i915_private {
/* LVDS info */
int backlight_level; /* restore backlight to this value */
bool backlight_enabled;
- struct drm_display_mode *panel_fixed_mode;
struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
@@ -357,6 +357,7 @@ typedef struct drm_i915_private {
unsigned int lvds_vbt:1;
unsigned int int_crt_support:1;
unsigned int lvds_use_ssc:1;
+ unsigned int display_clock_mode:1;
int lvds_ssc_freq;
struct {
int rate;
@@ -672,10 +673,9 @@ typedef struct drm_i915_private {
unsigned int lvds_border_bits;
/* Panel fitter placement and size for Ironlake+ */
u32 pch_pf_pos, pch_pf_size;
- int panel_t3, panel_t12;
- struct drm_crtc *plane_to_crtc_mapping[2];
- struct drm_crtc *pipe_to_crtc_mapping[2];
+ struct drm_crtc *plane_to_crtc_mapping[3];
+ struct drm_crtc *pipe_to_crtc_mapping[3];
wait_queue_head_t pending_flip_queue;
bool flip_pending_is_done;
@@ -759,19 +759,19 @@ struct drm_i915_gem_object {
* (has pending rendering), and is not set if it's on inactive (ready
* to be unbound).
*/
- unsigned int active : 1;
+ unsigned int active:1;
/**
* This is set if the object has been written to since last bound
* to the GTT
*/
- unsigned int dirty : 1;
+ unsigned int dirty:1;
/**
* This is set if the object has been written to since the last
* GPU flush.
*/
- unsigned int pending_gpu_write : 1;
+ unsigned int pending_gpu_write:1;
/**
* Fence register bits (if any) for this object. Will be set
@@ -780,18 +780,18 @@ struct drm_i915_gem_object {
*
* Size: 4 bits for 16 fences + sign (for FENCE_REG_NONE)
*/
- signed int fence_reg : 5;
+ signed int fence_reg:5;
/**
* Advice: are the backing pages purgeable?
*/
- unsigned int madv : 2;
+ unsigned int madv:2;
/**
* Current tiling mode for the object.
*/
- unsigned int tiling_mode : 2;
- unsigned int tiling_changed : 1;
+ unsigned int tiling_mode:2;
+ unsigned int tiling_changed:1;
/** How many users have pinned this object in GTT space. The following
* users can each hold at most one reference: pwrite/pread, pin_ioctl
@@ -802,22 +802,22 @@ struct drm_i915_gem_object {
*
* In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
* bits with absolutely no headroom. So use 4 bits. */
- unsigned int pin_count : 4;
+ unsigned int pin_count:4;
#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
/**
* Is the object at the current location in the gtt mappable and
* fenceable? Used to avoid costly recalculations.
*/
- unsigned int map_and_fenceable : 1;
+ unsigned int map_and_fenceable:1;
/**
* Whether the current gtt mapping needs to be mappable (and isn't just
* mappable by accident). Track pin and fault separate for a more
* accurate mappable working set.
*/
- unsigned int fault_mappable : 1;
- unsigned int pin_mappable : 1;
+ unsigned int fault_mappable:1;
+ unsigned int pin_mappable:1;
/*
* Is the GPU currently using a fence to access this buffer,
@@ -1056,7 +1056,7 @@ i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
void
i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
-void intel_enable_asle (struct drm_device *dev);
+void intel_enable_asle(struct drm_device *dev);
#ifdef CONFIG_DEBUG_FS
extern void i915_destroy_error_state(struct drm_device *dev);
@@ -1146,7 +1146,7 @@ int i915_gem_dumb_create(struct drm_file *file_priv,
int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
uint32_t handle, uint64_t *offset);
int i915_gem_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev,
- uint32_t handle);
+ uint32_t handle);
/**
* Returns true if seq1 is later than seq2.
*/
@@ -1301,10 +1301,11 @@ extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
extern bool intel_fbc_enabled(struct drm_device *dev);
extern void intel_disable_fbc(struct drm_device *dev);
extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
+extern void ironlake_init_pch_refclk(struct drm_device *dev);
extern void ironlake_enable_rc6(struct drm_device *dev);
extern void gen6_set_rps(struct drm_device *dev, u8 val);
-extern void intel_detect_pch (struct drm_device *dev);
-extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
+extern void intel_detect_pch(struct drm_device *dev);
+extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
/* overlay */
#ifdef CONFIG_DEBUG_FS
@@ -1354,18 +1355,7 @@ void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
((reg) != FORCEWAKE))
#define __i915_read(x, y) \
-static inline u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
- u##x val = 0; \
- if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
- gen6_gt_force_wake_get(dev_priv); \
- val = read##y(dev_priv->regs + reg); \
- gen6_gt_force_wake_put(dev_priv); \
- } else { \
- val = read##y(dev_priv->regs + reg); \
- } \
- trace_i915_reg_rw(false, reg, val, sizeof(val)); \
- return val; \
-}
+ u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg);
__i915_read(8, b)
__i915_read(16, w)
@@ -1374,13 +1364,8 @@ __i915_read(64, q)
#undef __i915_read
#define __i915_write(x, y) \
-static inline void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
- trace_i915_reg_rw(true, reg, val, sizeof(val)); \
- if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
- __gen6_gt_wait_for_fifo(dev_priv); \
- } \
- write##y(val, dev_priv->regs + reg); \
-}
+ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val);
+
__i915_write(8, b)
__i915_write(16, w)
__i915_write(32, l)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index a546a71fb06..d18b07adcff 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -179,7 +179,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
mutex_unlock(&dev->struct_mutex);
args->aper_size = dev_priv->mm.gtt_total;
- args->aper_available_size = args->aper_size -pinned;
+ args->aper_available_size = args->aper_size - pinned;
return 0;
}
@@ -195,6 +195,8 @@ i915_gem_create(struct drm_file *file,
u32 handle;
size = roundup(size, PAGE_SIZE);
+ if (size == 0)
+ return -EINVAL;
/* Allocate the new object */
obj = i915_gem_alloc_object(dev, size);
@@ -800,11 +802,11 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev,
if (IS_ERR(page))
return PTR_ERR(page);
- vaddr = kmap_atomic(page, KM_USER0);
+ vaddr = kmap_atomic(page);
ret = __copy_from_user_inatomic(vaddr + page_offset,
user_data,
page_length);
- kunmap_atomic(vaddr, KM_USER0);
+ kunmap_atomic(vaddr);
set_page_dirty(page);
mark_page_accessed(page);
@@ -1265,74 +1267,6 @@ out:
}
/**
- * i915_gem_create_mmap_offset - create a fake mmap offset for an object
- * @obj: obj in question
- *
- * GEM memory mapping works by handing back to userspace a fake mmap offset
- * it can use in a subsequent mmap(2) call. The DRM core code then looks
- * up the object based on the offset and sets up the various memory mapping
- * structures.
- *
- * This routine allocates and attaches a fake offset for @obj.
- */
-static int
-i915_gem_create_mmap_offset(struct drm_i915_gem_object *obj)
-{
- struct drm_device *dev = obj->base.dev;
- struct drm_gem_mm *mm = dev->mm_private;
- struct drm_map_list *list;
- struct drm_local_map *map;
- int ret = 0;
-
- /* Set the object up for mmap'ing */
- list = &obj->base.map_list;
- list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
- if (!list->map)
- return -ENOMEM;
-
- map = list->map;
- map->type = _DRM_GEM;
- map->size = obj->base.size;
- map->handle = obj;
-
- /* Get a DRM GEM mmap offset allocated... */
- list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
- obj->base.size / PAGE_SIZE,
- 0, 0);
- if (!list->file_offset_node) {
- DRM_ERROR("failed to allocate offset for bo %d\n",
- obj->base.name);
- ret = -ENOSPC;
- goto out_free_list;
- }
-
- list->file_offset_node = drm_mm_get_block(list->file_offset_node,
- obj->base.size / PAGE_SIZE,
- 0);
- if (!list->file_offset_node) {
- ret = -ENOMEM;
- goto out_free_list;
- }
-
- list->hash.key = list->file_offset_node->start;
- ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
- if (ret) {
- DRM_ERROR("failed to add to map hash\n");
- goto out_free_mm;
- }
-
- return 0;
-
-out_free_mm:
- drm_mm_put_block(list->file_offset_node);
-out_free_list:
- kfree(list->map);
- list->map = NULL;
-
- return ret;
-}
-
-/**
* i915_gem_release_mmap - remove physical page mappings
* @obj: obj in question
*
@@ -1360,19 +1294,6 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
obj->fault_mappable = false;
}
-static void
-i915_gem_free_mmap_offset(struct drm_i915_gem_object *obj)
-{
- struct drm_device *dev = obj->base.dev;
- struct drm_gem_mm *mm = dev->mm_private;
- struct drm_map_list *list = &obj->base.map_list;
-
- drm_ht_remove_item(&mm->offset_hash, &list->hash);
- drm_mm_put_block(list->file_offset_node);
- kfree(list->map);
- list->map = NULL;
-}
-
static uint32_t
i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
{
@@ -1475,7 +1396,7 @@ i915_gem_mmap_gtt(struct drm_file *file,
if (obj->base.size > dev_priv->mm.gtt_mappable_end) {
ret = -E2BIG;
- goto unlock;
+ goto out;
}
if (obj->madv != I915_MADV_WILLNEED) {
@@ -1485,7 +1406,7 @@ i915_gem_mmap_gtt(struct drm_file *file,
}
if (!obj->base.map_list.map) {
- ret = i915_gem_create_mmap_offset(obj);
+ ret = drm_gem_create_mmap_offset(&obj->base);
if (ret)
goto out;
}
@@ -1557,7 +1478,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
obj->pages[i] = page;
}
- if (obj->tiling_mode != I915_TILING_NONE)
+ if (i915_gem_object_needs_bit17_swizzle(obj))
i915_gem_object_do_bit_17_swizzle(obj);
return 0;
@@ -1579,7 +1500,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
BUG_ON(obj->madv == __I915_MADV_PURGED);
- if (obj->tiling_mode != I915_TILING_NONE)
+ if (i915_gem_object_needs_bit17_swizzle(obj))
i915_gem_object_save_bit_17_swizzle(obj);
if (obj->madv == I915_MADV_DONTNEED)
@@ -1856,7 +1777,7 @@ void i915_gem_reset(struct drm_device *dev)
* lost bo to the inactive list.
*/
while (!list_empty(&dev_priv->mm.flushing_list)) {
- obj= list_first_entry(&dev_priv->mm.flushing_list,
+ obj = list_first_entry(&dev_priv->mm.flushing_list,
struct drm_i915_gem_object,
mm_list);
@@ -1922,7 +1843,7 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
while (!list_empty(&ring->active_list)) {
struct drm_i915_gem_object *obj;
- obj= list_first_entry(&ring->active_list,
+ obj = list_first_entry(&ring->active_list,
struct drm_i915_gem_object,
ring_list);
@@ -2272,14 +2193,8 @@ int
i915_gpu_idle(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- bool lists_empty;
int ret, i;
- lists_empty = (list_empty(&dev_priv->mm.flushing_list) &&
- list_empty(&dev_priv->mm.active_list));
- if (lists_empty)
- return 0;
-
/* Flush everything onto the inactive list. */
for (i = 0; i < I915_NUM_RINGS; i++) {
ret = i915_ring_idle(&dev_priv->ring[i]);
@@ -2882,7 +2797,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
fenceable =
obj->gtt_space->size == fence_size &&
- (obj->gtt_space->start & (fence_alignment -1)) == 0;
+ (obj->gtt_space->start & (fence_alignment - 1)) == 0;
mappable =
obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
@@ -3598,7 +3513,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
*/
request = kzalloc(sizeof(*request), GFP_KERNEL);
if (request)
- ret = i915_add_request(obj->ring, NULL,request);
+ ret = i915_add_request(obj->ring, NULL, request);
else
ret = -ENOMEM;
}
@@ -3623,7 +3538,7 @@ int
i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- return i915_gem_ring_throttle(dev, file_priv);
+ return i915_gem_ring_throttle(dev, file_priv);
}
int
@@ -3752,7 +3667,7 @@ static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj)
trace_i915_gem_object_destroy(obj);
if (obj->base.map_list.map)
- i915_gem_free_mmap_offset(obj);
+ drm_gem_free_mmap_offset(&obj->base);
drm_gem_object_release(&obj->base);
i915_gem_info_remove_obj(dev_priv, obj->base.size);
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c
index 8da1899bd24..cc93cac242d 100644
--- a/drivers/gpu/drm/i915/i915_gem_debug.c
+++ b/drivers/gpu/drm/i915/i915_gem_debug.c
@@ -72,7 +72,7 @@ i915_verify_lists(struct drm_device *dev)
break;
} else if (!obj->active ||
(obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0 ||
- list_empty(&obj->gpu_write_list)){
+ list_empty(&obj->gpu_write_list)) {
DRM_ERROR("invalid flushing %p (a %d w %x gwl %d)\n",
obj,
obj->active,
@@ -157,7 +157,7 @@ i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, int handle)
for (page = 0; page < obj->size / PAGE_SIZE; page++) {
int i;
- backing_map = kmap_atomic(obj->pages[page], KM_USER0);
+ backing_map = kmap_atomic(obj->pages[page]);
if (backing_map == NULL) {
DRM_ERROR("failed to map backing page\n");
@@ -181,13 +181,13 @@ i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, int handle)
}
}
}
- kunmap_atomic(backing_map, KM_USER0);
+ kunmap_atomic(backing_map);
backing_map = NULL;
}
out:
if (backing_map != NULL)
- kunmap_atomic(backing_map, KM_USER0);
+ kunmap_atomic(backing_map);
iounmap(gtt_mapping);
/* give syslog time to catch up */
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index da05a2692a7..ead5d00f91b 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -122,7 +122,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
goto found;
}
list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
- if (! obj->base.write_domain || obj->pin_count)
+ if (!obj->base.write_domain || obj->pin_count)
continue;
if (mark_free(obj, &unwind_list))
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 4934cf84c32..3693e83a97f 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -784,7 +784,8 @@ i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
}
from->sync_seqno[idx] = seqno;
- return intel_ring_sync(to, from, seqno - 1);
+
+ return to->sync_to(to, from, seqno - 1);
}
static int
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 7a709cd8d54..6042c5e6d27 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -49,6 +49,28 @@ static unsigned int cache_level_to_agp_type(struct drm_device *dev,
}
}
+static bool do_idling(struct drm_i915_private *dev_priv)
+{
+ bool ret = dev_priv->mm.interruptible;
+
+ if (unlikely(dev_priv->mm.gtt->do_idle_maps)) {
+ dev_priv->mm.interruptible = false;
+ if (i915_gpu_idle(dev_priv->dev)) {
+ DRM_ERROR("Couldn't idle GPU\n");
+ /* Wait a bit, in hopes it avoids the hang */
+ udelay(10);
+ }
+ }
+
+ return ret;
+}
+
+static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
+{
+ if (unlikely(dev_priv->mm.gtt->do_idle_maps))
+ dev_priv->mm.interruptible = interruptible;
+}
+
void i915_gem_restore_gtt_mappings(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -117,6 +139,12 @@ void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj,
void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
{
+ struct drm_device *dev = obj->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ bool interruptible;
+
+ interruptible = do_idling(dev_priv);
+
intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT,
obj->base.size >> PAGE_SHIFT);
@@ -124,4 +152,6 @@ void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
intel_gtt_unmap_memory(obj->sg_list, obj->num_sg);
obj->sg_list = NULL;
}
+
+ undo_idling(dev_priv, interruptible);
}
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 99c4faa59d8..31d334d9d9d 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -92,7 +92,10 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
- if (INTEL_INFO(dev)->gen >= 5) {
+ if (INTEL_INFO(dev)->gen >= 6) {
+ swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+ swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+ } else if (IS_GEN5(dev)) {
/* On Ironlake whatever DRAM config, GPU always do
* same swizzling setup.
*/
@@ -440,14 +443,9 @@ i915_gem_swizzle_page(struct page *page)
void
i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->base.dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
int page_count = obj->base.size >> PAGE_SHIFT;
int i;
- if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17)
- return;
-
if (obj->bit_17 == NULL)
return;
@@ -464,14 +462,9 @@ i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
void
i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->base.dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
int page_count = obj->base.size >> PAGE_SHIFT;
int i;
- if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17)
- return;
-
if (obj->bit_17 == NULL) {
obj->bit_17 = kmalloc(BITS_TO_LONGS(page_count) *
sizeof(long), GFP_KERNEL);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 9cbb0cd8f46..9ee2729fe5c 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -383,6 +383,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
pm_iir = dev_priv->pm_iir;
dev_priv->pm_iir = 0;
pm_imr = I915_READ(GEN6_PMIMR);
+ I915_WRITE(GEN6_PMIMR, 0);
spin_unlock_irq(&dev_priv->rps_lock);
if (!pm_iir)
@@ -420,7 +421,6 @@ static void gen6_pm_rps_work(struct work_struct *work)
* an *extremely* unlikely race with gen6_rps_enable() that is prevented
* by holding struct_mutex for the duration of the write.
*/
- I915_WRITE(GEN6_PMIMR, pm_imr & ~pm_iir);
mutex_unlock(&dev_priv->dev->struct_mutex);
}
@@ -536,8 +536,9 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
unsigned long flags;
spin_lock_irqsave(&dev_priv->rps_lock, flags);
WARN(dev_priv->pm_iir & pm_iir, "Missed a PM interrupt\n");
- I915_WRITE(GEN6_PMIMR, pm_iir);
dev_priv->pm_iir |= pm_iir;
+ I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir);
+ POSTING_READ(GEN6_PMIMR);
spin_unlock_irqrestore(&dev_priv->rps_lock, flags);
queue_work(dev_priv->wq, &dev_priv->rps_work);
}
@@ -649,8 +650,9 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
unsigned long flags;
spin_lock_irqsave(&dev_priv->rps_lock, flags);
WARN(dev_priv->pm_iir & pm_iir, "Missed a PM interrupt\n");
- I915_WRITE(GEN6_PMIMR, pm_iir);
dev_priv->pm_iir |= pm_iir;
+ I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir);
+ POSTING_READ(GEN6_PMIMR);
spin_unlock_irqrestore(&dev_priv->rps_lock, flags);
queue_work(dev_priv->wq, &dev_priv->rps_work);
}
@@ -711,7 +713,7 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
page_count = src->base.size / PAGE_SIZE;
- dst = kmalloc(sizeof(*dst) + page_count * sizeof (u32 *), GFP_ATOMIC);
+ dst = kmalloc(sizeof(*dst) + page_count * sizeof(u32 *), GFP_ATOMIC);
if (dst == NULL)
return NULL;
@@ -1493,7 +1495,7 @@ static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
- DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
+ DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
return 0;
@@ -1541,7 +1543,7 @@ static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
- DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
+ DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
@@ -1777,6 +1779,26 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
POSTING_READ(SDEIER);
}
+/*
+ * Enable digital hotplug on the PCH, and configure the DP short pulse
+ * duration to 2ms (which is the minimum in the Display Port spec)
+ *
+ * This register is the same on all known PCH chips.
+ */
+
+static void ironlake_enable_pch_hotplug(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ u32 hotplug;
+
+ hotplug = I915_READ(PCH_PORT_HOTPLUG);
+ hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
+ hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
+ hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
+ hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
+ I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
+}
+
static int ironlake_irq_postinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -1839,6 +1861,8 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
I915_WRITE(SDEIER, hotplug_mask);
POSTING_READ(SDEIER);
+ ironlake_enable_pch_hotplug(dev);
+
if (IS_IRONLAKE_M(dev)) {
/* Clear & enable PCU event interrupts */
I915_WRITE(DEIIR, DE_PCU_EVENT);
@@ -1896,6 +1920,8 @@ static int ivybridge_irq_postinstall(struct drm_device *dev)
I915_WRITE(SDEIER, hotplug_mask);
POSTING_READ(SDEIER);
+ ironlake_enable_pch_hotplug(dev);
+
return 0;
}
@@ -2020,6 +2046,10 @@ static void ironlake_irq_uninstall(struct drm_device *dev)
I915_WRITE(GTIMR, 0xffffffff);
I915_WRITE(GTIER, 0x0);
I915_WRITE(GTIIR, I915_READ(GTIIR));
+
+ I915_WRITE(SDEIMR, 0xffffffff);
+ I915_WRITE(SDEIER, 0x0);
+ I915_WRITE(SDEIIR, I915_READ(SDEIIR));
}
static void i915_driver_irq_uninstall(struct drm_device * dev)
diff --git a/drivers/gpu/drm/i915/i915_mem.c b/drivers/gpu/drm/i915/i915_mem.c
index 83b7b81bb2b..cc8f6d49cf2 100644
--- a/drivers/gpu/drm/i915/i915_mem.c
+++ b/drivers/gpu/drm/i915/i915_mem.c
@@ -202,7 +202,7 @@ static int init_heap(struct mem_block **heap, int start, int size)
blocks->next = blocks->prev = *heap;
memset(*heap, 0, sizeof(**heap));
- (*heap)->file_priv = (struct drm_file *) - 1;
+ (*heap)->file_priv = (struct drm_file *) -1;
(*heap)->next = (*heap)->prev = blocks;
return 0;
}
@@ -359,19 +359,19 @@ int i915_mem_init_heap(struct drm_device *dev, void *data,
return init_heap(heap, initheap->start, initheap->size);
}
-int i915_mem_destroy_heap( struct drm_device *dev, void *data,
- struct drm_file *file_priv )
+int i915_mem_destroy_heap(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
drm_i915_private_t *dev_priv = dev->dev_private;
drm_i915_mem_destroy_heap_t *destroyheap = data;
struct mem_block **heap;
- if ( !dev_priv ) {
- DRM_ERROR( "called with no initialization\n" );
+ if (!dev_priv) {
+ DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
- heap = get_heap( dev_priv, destroyheap->region );
+ heap = get_heap(dev_priv, destroyheap->region);
if (!heap) {
DRM_ERROR("get_heap failed");
return -EFAULT;
@@ -382,6 +382,6 @@ int i915_mem_destroy_heap( struct drm_device *dev, void *data,
return -EFAULT;
}
- i915_mem_takedown( heap );
+ i915_mem_takedown(heap);
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 542453f7498..5a09416e611 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -156,7 +156,7 @@
#define MI_SUSPEND_FLUSH MI_INSTR(0x0b, 0)
#define MI_SUSPEND_FLUSH_EN (1<<0)
#define MI_REPORT_HEAD MI_INSTR(0x07, 0)
-#define MI_OVERLAY_FLIP MI_INSTR(0x11,0)
+#define MI_OVERLAY_FLIP MI_INSTR(0x11, 0)
#define MI_OVERLAY_CONTINUE (0x0<<21)
#define MI_OVERLAY_ON (0x1<<21)
#define MI_OVERLAY_OFF (0x2<<21)
@@ -194,6 +194,13 @@
#define MI_SEMAPHORE_UPDATE (1<<21)
#define MI_SEMAPHORE_COMPARE (1<<20)
#define MI_SEMAPHORE_REGISTER (1<<18)
+#define MI_SEMAPHORE_SYNC_RV (2<<16)
+#define MI_SEMAPHORE_SYNC_RB (0<<16)
+#define MI_SEMAPHORE_SYNC_VR (0<<16)
+#define MI_SEMAPHORE_SYNC_VB (2<<16)
+#define MI_SEMAPHORE_SYNC_BR (2<<16)
+#define MI_SEMAPHORE_SYNC_BV (0<<16)
+#define MI_SEMAPHORE_SYNC_INVALID (1<<0)
/*
* 3D instructions used by the kernel
*/
@@ -235,16 +242,22 @@
#define ASYNC_FLIP (1<<22)
#define DISPLAY_PLANE_A (0<<20)
#define DISPLAY_PLANE_B (1<<20)
-#define GFX_OP_PIPE_CONTROL ((0x3<<29)|(0x3<<27)|(0x2<<24)|2)
-#define PIPE_CONTROL_QW_WRITE (1<<14)
-#define PIPE_CONTROL_DEPTH_STALL (1<<13)
-#define PIPE_CONTROL_WC_FLUSH (1<<12)
-#define PIPE_CONTROL_IS_FLUSH (1<<11) /* MBZ on Ironlake */
-#define PIPE_CONTROL_TC_FLUSH (1<<10) /* GM45+ only */
-#define PIPE_CONTROL_ISP_DIS (1<<9)
-#define PIPE_CONTROL_NOTIFY (1<<8)
+#define GFX_OP_PIPE_CONTROL(len) ((0x3<<29)|(0x3<<27)|(0x2<<24)|(len-2))
+#define PIPE_CONTROL_CS_STALL (1<<20)
+#define PIPE_CONTROL_QW_WRITE (1<<14)
+#define PIPE_CONTROL_DEPTH_STALL (1<<13)
+#define PIPE_CONTROL_WRITE_FLUSH (1<<12)
+#define PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH (1<<12) /* gen6+ */
+#define PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE (1<<11) /* MBZ on Ironlake */
+#define PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE (1<<10) /* GM45+ only */
+#define PIPE_CONTROL_INDIRECT_STATE_DISABLE (1<<9)
+#define PIPE_CONTROL_NOTIFY (1<<8)
+#define PIPE_CONTROL_VF_CACHE_INVALIDATE (1<<4)
+#define PIPE_CONTROL_CONST_CACHE_INVALIDATE (1<<3)
+#define PIPE_CONTROL_STATE_CACHE_INVALIDATE (1<<2)
+#define PIPE_CONTROL_STALL_AT_SCOREBOARD (1<<1)
+#define PIPE_CONTROL_DEPTH_CACHE_FLUSH (1<<0)
#define PIPE_CONTROL_GLOBAL_GTT (1<<2) /* in addr dword */
-#define PIPE_CONTROL_STALL_EN (1<<1) /* in addr word, Ironlake+ only */
/*
@@ -296,6 +309,12 @@
#define RING_CTL(base) ((base)+0x3c)
#define RING_SYNC_0(base) ((base)+0x40)
#define RING_SYNC_1(base) ((base)+0x44)
+#define GEN6_RVSYNC (RING_SYNC_0(RENDER_RING_BASE))
+#define GEN6_RBSYNC (RING_SYNC_1(RENDER_RING_BASE))
+#define GEN6_VRSYNC (RING_SYNC_1(GEN6_BSD_RING_BASE))
+#define GEN6_VBSYNC (RING_SYNC_0(GEN6_BSD_RING_BASE))
+#define GEN6_BRSYNC (RING_SYNC_0(BLT_RING_BASE))
+#define GEN6_BVSYNC (RING_SYNC_1(BLT_RING_BASE))
#define RING_MAX_IDLE(base) ((base)+0x54)
#define RING_HWS_PGA(base) ((base)+0x80)
#define RING_HWS_PGA_GEN6(base) ((base)+0x2080)
@@ -470,7 +489,7 @@
/* Enables non-sequential data reads through arbiter
*/
-#define MI_ARB_DUAL_DATA_PHASE_DISABLE (1 << 9)
+#define MI_ARB_DUAL_DATA_PHASE_DISABLE (1 << 9)
/* Disable FSB snooping of cacheable write cycles from binner/render
* command stream
@@ -626,7 +645,7 @@
#define ILK_DISPLAY_CHICKEN1 0x42000
#define ILK_FBCQ_DIS (1<<22)
-#define ILK_PABSTRETCH_DIS (1<<21)
+#define ILK_PABSTRETCH_DIS (1<<21)
/*
@@ -2358,7 +2377,7 @@
#define DSPFW1 0x70034
#define DSPFW_SR_SHIFT 23
-#define DSPFW_SR_MASK (0x1ff<<23)
+#define DSPFW_SR_MASK (0x1ff<<23)
#define DSPFW_CURSORB_SHIFT 16
#define DSPFW_CURSORB_MASK (0x3f<<16)
#define DSPFW_PLANEB_SHIFT 8
@@ -2416,6 +2435,7 @@
#define WM0_PIPE_CURSOR_MASK (0x1f)
#define WM0_PIPEB_ILK 0x45104
+#define WM0_PIPEC_IVB 0x45200
#define WM1_LP_ILK 0x45108
#define WM1_LP_SR_EN (1<<31)
#define WM1_LP_LATENCY_SHIFT 24
@@ -2554,10 +2574,18 @@
#define _CURBBASE 0x700c4
#define _CURBPOS 0x700c8
+#define _CURBCNTR_IVB 0x71080
+#define _CURBBASE_IVB 0x71084
+#define _CURBPOS_IVB 0x71088
+
#define CURCNTR(pipe) _PIPE(pipe, _CURACNTR, _CURBCNTR)
#define CURBASE(pipe) _PIPE(pipe, _CURABASE, _CURBBASE)
#define CURPOS(pipe) _PIPE(pipe, _CURAPOS, _CURBPOS)
+#define CURCNTR_IVB(pipe) _PIPE(pipe, _CURACNTR, _CURBCNTR_IVB)
+#define CURBASE_IVB(pipe) _PIPE(pipe, _CURABASE, _CURBBASE_IVB)
+#define CURPOS_IVB(pipe) _PIPE(pipe, _CURAPOS, _CURBPOS_IVB)
+
/* Display A control */
#define _DSPACNTR 0x70180
#define DISPLAY_PLANE_ENABLE (1<<31)
@@ -2903,12 +2931,13 @@
#define SDEIER 0xc400c
/* digital port hotplug */
-#define PCH_PORT_HOTPLUG 0xc4030
+#define PCH_PORT_HOTPLUG 0xc4030 /* SHOTPLUG_CTL */
#define PORTD_HOTPLUG_ENABLE (1 << 20)
#define PORTD_PULSE_DURATION_2ms (0)
#define PORTD_PULSE_DURATION_4_5ms (1 << 18)
#define PORTD_PULSE_DURATION_6ms (2 << 18)
#define PORTD_PULSE_DURATION_100ms (3 << 18)
+#define PORTD_PULSE_DURATION_MASK (3 << 18)
#define PORTD_HOTPLUG_NO_DETECT (0)
#define PORTD_HOTPLUG_SHORT_DETECT (1 << 16)
#define PORTD_HOTPLUG_LONG_DETECT (1 << 17)
@@ -2917,6 +2946,7 @@
#define PORTC_PULSE_DURATION_4_5ms (1 << 10)
#define PORTC_PULSE_DURATION_6ms (2 << 10)
#define PORTC_PULSE_DURATION_100ms (3 << 10)
+#define PORTC_PULSE_DURATION_MASK (3 << 10)
#define PORTC_HOTPLUG_NO_DETECT (0)
#define PORTC_HOTPLUG_SHORT_DETECT (1 << 8)
#define PORTC_HOTPLUG_LONG_DETECT (1 << 9)
@@ -2925,6 +2955,7 @@
#define PORTB_PULSE_DURATION_4_5ms (1 << 2)
#define PORTB_PULSE_DURATION_6ms (2 << 2)
#define PORTB_PULSE_DURATION_100ms (3 << 2)
+#define PORTB_PULSE_DURATION_MASK (3 << 2)
#define PORTB_HOTPLUG_NO_DETECT (0)
#define PORTB_HOTPLUG_SHORT_DETECT (1 << 0)
#define PORTB_HOTPLUG_LONG_DETECT (1 << 1)
@@ -2945,15 +2976,15 @@
#define _PCH_DPLL_A 0xc6014
#define _PCH_DPLL_B 0xc6018
-#define PCH_DPLL(pipe) _PIPE(pipe, _PCH_DPLL_A, _PCH_DPLL_B)
+#define PCH_DPLL(pipe) (pipe == 0 ? _PCH_DPLL_A : _PCH_DPLL_B)
#define _PCH_FPA0 0xc6040
#define FP_CB_TUNE (0x3<<22)
#define _PCH_FPA1 0xc6044
#define _PCH_FPB0 0xc6048
#define _PCH_FPB1 0xc604c
-#define PCH_FP0(pipe) _PIPE(pipe, _PCH_FPA0, _PCH_FPB0)
-#define PCH_FP1(pipe) _PIPE(pipe, _PCH_FPA1, _PCH_FPB1)
+#define PCH_FP0(pipe) (pipe == 0 ? _PCH_FPA0 : _PCH_FPB0)
+#define PCH_FP1(pipe) (pipe == 0 ? _PCH_FPA1 : _PCH_FPB1)
#define PCH_DPLL_TEST 0xc606c
@@ -3167,6 +3198,7 @@
#define FDI_LINK_TRAIN_NONE_IVB (3<<8)
/* both Tx and Rx */
+#define FDI_COMPOSITE_SYNC (1<<11)
#define FDI_LINK_TRAIN_AUTO (1<<10)
#define FDI_SCRAMBLING_ENABLE (0<<7)
#define FDI_SCRAMBLING_DISABLE (1<<7)
@@ -3308,15 +3340,35 @@
#define PCH_PP_STATUS 0xc7200
#define PCH_PP_CONTROL 0xc7204
#define PANEL_UNLOCK_REGS (0xabcd << 16)
+#define PANEL_UNLOCK_MASK (0xffff << 16)
#define EDP_FORCE_VDD (1 << 3)
#define EDP_BLC_ENABLE (1 << 2)
#define PANEL_POWER_RESET (1 << 1)
#define PANEL_POWER_OFF (0 << 0)
#define PANEL_POWER_ON (1 << 0)
#define PCH_PP_ON_DELAYS 0xc7208
+#define PANEL_PORT_SELECT_MASK (3 << 30)
+#define PANEL_PORT_SELECT_LVDS (0 << 30)
+#define PANEL_PORT_SELECT_DPA (1 << 30)
#define EDP_PANEL (1 << 30)
+#define PANEL_PORT_SELECT_DPC (2 << 30)
+#define PANEL_PORT_SELECT_DPD (3 << 30)
+#define PANEL_POWER_UP_DELAY_MASK (0x1fff0000)
+#define PANEL_POWER_UP_DELAY_SHIFT 16
+#define PANEL_LIGHT_ON_DELAY_MASK (0x1fff)
+#define PANEL_LIGHT_ON_DELAY_SHIFT 0
+
#define PCH_PP_OFF_DELAYS 0xc720c
+#define PANEL_POWER_DOWN_DELAY_MASK (0x1fff0000)
+#define PANEL_POWER_DOWN_DELAY_SHIFT 16
+#define PANEL_LIGHT_OFF_DELAY_MASK (0x1fff)
+#define PANEL_LIGHT_OFF_DELAY_SHIFT 0
+
#define PCH_PP_DIVISOR 0xc7210
+#define PP_REFERENCE_DIVIDER_MASK (0xffffff00)
+#define PP_REFERENCE_DIVIDER_SHIFT 8
+#define PANEL_POWER_CYCLE_DELAY_MASK (0x1f)
+#define PANEL_POWER_CYCLE_DELAY_SHIFT 0
#define PCH_DP_B 0xe4100
#define PCH_DPB_AUX_CH_CTL 0xe4110
@@ -3470,4 +3522,29 @@
#define GEN6_PCODE_DATA 0x138128
#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8
+#define G4X_AUD_VID_DID 0x62020
+#define INTEL_AUDIO_DEVCL 0x808629FB
+#define INTEL_AUDIO_DEVBLC 0x80862801
+#define INTEL_AUDIO_DEVCTG 0x80862802
+
+#define G4X_AUD_CNTL_ST 0x620B4
+#define G4X_ELDV_DEVCL_DEVBLC (1 << 13)
+#define G4X_ELDV_DEVCTG (1 << 14)
+#define G4X_ELD_ADDR (0xf << 5)
+#define G4X_ELD_ACK (1 << 4)
+#define G4X_HDMIW_HDMIEDID 0x6210C
+
+#define GEN5_HDMIW_HDMIEDID_A 0xE2050
+#define GEN5_AUD_CNTL_ST_A 0xE20B4
+#define GEN5_ELD_BUFFER_SIZE (0x1f << 10)
+#define GEN5_ELD_ADDRESS (0x1f << 5)
+#define GEN5_ELD_ACK (1 << 4)
+#define GEN5_AUD_CNTL_ST2 0xE20C0
+#define GEN5_ELD_VALIDB (1 << 0)
+#define GEN5_CP_READYB (1 << 1)
+
+#define GEN7_HDMIW_HDMIEDID_A 0xE5050
+#define GEN7_AUD_CNTRL_ST_A 0xE50B4
+#define GEN7_AUD_CNTRL_ST2 0xE50C0
+
#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index f10742359ec..f8f602d7665 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -60,7 +60,7 @@ static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
else
array = dev_priv->save_palette_b;
- for(i = 0; i < 256; i++)
+ for (i = 0; i < 256; i++)
array[i] = I915_READ(reg + (i << 2));
}
@@ -82,7 +82,7 @@ static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
else
array = dev_priv->save_palette_b;
- for(i = 0; i < 256; i++)
+ for (i = 0; i < 256; i++)
I915_WRITE(reg + (i << 2), array[i]);
}
@@ -887,10 +887,10 @@ int i915_restore_state(struct drm_device *dev)
mutex_lock(&dev->struct_mutex);
/* Cache mode state */
- I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
+ I915_WRITE(CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
/* Memory arbitration state */
- I915_WRITE (MI_ARB_STATE, dev_priv->saveMI_ARB_STATE | 0xffff0000);
+ I915_WRITE(MI_ARB_STATE, dev_priv->saveMI_ARB_STATE | 0xffff0000);
for (i = 0; i < 16; i++) {
I915_WRITE(SWF00 + (i << 2), dev_priv->saveSWF0[i]);
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index d623fefbfac..dac7bba4d9d 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -385,29 +385,29 @@ TRACE_EVENT(i915_flip_complete,
);
TRACE_EVENT(i915_reg_rw,
- TP_PROTO(bool write, u32 reg, u64 val, int len),
-
- TP_ARGS(write, reg, val, len),
-
- TP_STRUCT__entry(
- __field(u64, val)
- __field(u32, reg)
- __field(u16, write)
- __field(u16, len)
- ),
-
- TP_fast_assign(
- __entry->val = (u64)val;
- __entry->reg = reg;
- __entry->write = write;
- __entry->len = len;
- ),
-
- TP_printk("%s reg=0x%x, len=%d, val=(0x%x, 0x%x)",
- __entry->write ? "write" : "read",
- __entry->reg, __entry->len,
- (u32)(__entry->val & 0xffffffff),
- (u32)(__entry->val >> 32))
+ TP_PROTO(bool write, u32 reg, u64 val, int len),
+
+ TP_ARGS(write, reg, val, len),
+
+ TP_STRUCT__entry(
+ __field(u64, val)
+ __field(u32, reg)
+ __field(u16, write)
+ __field(u16, len)
+ ),
+
+ TP_fast_assign(
+ __entry->val = (u64)val;
+ __entry->reg = reg;
+ __entry->write = write;
+ __entry->len = len;
+ ),
+
+ TP_printk("%s reg=0x%x, len=%d, val=(0x%x, 0x%x)",
+ __entry->write ? "write" : "read",
+ __entry->reg, __entry->len,
+ (u32)(__entry->val & 0xffffffff),
+ (u32)(__entry->val >> 32))
);
#endif /* _I915_TRACE_H_ */
diff --git a/drivers/gpu/drm/i915/intel_acpi.c b/drivers/gpu/drm/i915/intel_acpi.c
index 2cb8e0b9f1e..cb912106d1a 100644
--- a/drivers/gpu/drm/i915/intel_acpi.c
+++ b/drivers/gpu/drm/i915/intel_acpi.c
@@ -64,7 +64,7 @@ static int intel_dsm(acpi_handle handle, int func, int arg)
case ACPI_TYPE_BUFFER:
if (obj->buffer.length == 4) {
- result =(obj->buffer.pointer[0] |
+ result = (obj->buffer.pointer[0] |
(obj->buffer.pointer[1] << 8) |
(obj->buffer.pointer[2] << 16) |
(obj->buffer.pointer[3] << 24));
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 61abef8a811..63880e2e5cf 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -1,5 +1,5 @@
/*
- * Copyright © 2006 Intel Corporation
+ * Copyright © 2006 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -309,6 +309,13 @@ parse_general_features(struct drm_i915_private *dev_priv,
dev_priv->lvds_use_ssc = general->enable_ssc;
dev_priv->lvds_ssc_freq =
intel_bios_ssc_frequency(dev, general->ssc_freq);
+ dev_priv->display_clock_mode = general->display_clock_mode;
+ DRM_DEBUG_KMS("BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d\n",
+ dev_priv->int_tv_support,
+ dev_priv->int_crt_support,
+ dev_priv->lvds_use_ssc,
+ dev_priv->lvds_ssc_freq,
+ dev_priv->display_clock_mode);
}
}
@@ -381,7 +388,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
if (p_child->dvo_port != DEVICE_PORT_DVOB &&
p_child->dvo_port != DEVICE_PORT_DVOC) {
/* skip the incorrect SDVO port */
- DRM_DEBUG_KMS("Incorrect SDVO port. Skip it \n");
+ DRM_DEBUG_KMS("Incorrect SDVO port. Skip it\n");
continue;
}
DRM_DEBUG_KMS("the SDVO device with slave addr %2x is found on"
@@ -396,15 +403,13 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
p_mapping->dvo_wiring = p_child->dvo_wiring;
p_mapping->ddc_pin = p_child->ddc_pin;
p_mapping->i2c_pin = p_child->i2c_pin;
- p_mapping->i2c_speed = p_child->i2c_speed;
p_mapping->initialized = 1;
- DRM_DEBUG_KMS("SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d, i2c_speed=%d\n",
+ DRM_DEBUG_KMS("SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d\n",
p_mapping->dvo_port,
p_mapping->slave_addr,
p_mapping->dvo_wiring,
p_mapping->ddc_pin,
- p_mapping->i2c_pin,
- p_mapping->i2c_speed);
+ p_mapping->i2c_pin);
} else {
DRM_DEBUG_KMS("Maybe one SDVO port is shared by "
"two SDVO device.\n");
@@ -564,7 +569,7 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
count++;
}
if (!count) {
- DRM_DEBUG_KMS("no child dev is parsed from VBT \n");
+ DRM_DEBUG_KMS("no child dev is parsed from VBT\n");
return;
}
dev_priv->child_dev = kzalloc(sizeof(*p_child) * count, GFP_KERNEL);
@@ -610,7 +615,7 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
/* Default to using SSC */
dev_priv->lvds_use_ssc = 1;
dev_priv->lvds_ssc_freq = intel_bios_ssc_frequency(dev, 1);
- DRM_DEBUG("Set default to SSC at %dMHz\n", dev_priv->lvds_ssc_freq);
+ DRM_DEBUG_KMS("Set default to SSC at %dMHz\n", dev_priv->lvds_ssc_freq);
/* eDP data */
dev_priv->edp.bpp = 18;
@@ -639,7 +644,7 @@ intel_parse_bios(struct drm_device *dev)
if (dev_priv->opregion.vbt) {
struct vbt_header *vbt = dev_priv->opregion.vbt;
if (memcmp(vbt->signature, "$VBT", 4) == 0) {
- DRM_DEBUG_DRIVER("Using VBT from OpRegion: %20s\n",
+ DRM_DEBUG_KMS("Using VBT from OpRegion: %20s\n",
vbt->signature);
bdb = (struct bdb_header *)((char *)vbt + vbt->bdb_offset);
} else
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index 5f8e4edcbbb..8af3735e27c 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -1,5 +1,5 @@
/*
- * Copyright © 2006 Intel Corporation
+ * Copyright © 2006 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -120,7 +120,9 @@ struct bdb_general_features {
u8 ssc_freq:1;
u8 enable_lfp_on_override:1;
u8 disable_ssc_ddt:1;
- u8 rsvd8:3; /* finish byte */
+ u8 rsvd7:1;
+ u8 display_clock_mode:1;
+ u8 rsvd8:1; /* finish byte */
/* bits 3 */
u8 disable_smooth_vision:1;
@@ -133,7 +135,10 @@ struct bdb_general_features {
/* bits 5 */
u8 int_crt_support:1;
u8 int_tv_support:1;
- u8 rsvd11:6; /* finish byte */
+ u8 int_efp_support:1;
+ u8 dp_ssc_enb:1; /* PCH attached eDP supports SSC */
+ u8 dp_ssc_freq:1; /* SSC freq for PCH attached eDP */
+ u8 rsvd11:3; /* finish byte */
} __attribute__((packed));
/* pre-915 */
@@ -197,8 +202,7 @@ struct bdb_general_features {
struct child_device_config {
u16 handle;
u16 device_type;
- u8 i2c_speed;
- u8 rsvd[9];
+ u8 device_id[10]; /* ascii string */
u16 addin_offset;
u8 dvo_port; /* See Device_PORT_* above */
u8 i2c_pin;
@@ -240,7 +244,7 @@ struct bdb_general_definitions {
* And the device num is related with the size of general definition
* block. It is obtained by using the following formula:
* number = (block_size - sizeof(bdb_general_definitions))/
- * sizeof(child_device_config);
+ * sizeof(child_device_config);
*/
struct child_device_config devices[0];
} __attribute__((packed));
@@ -446,11 +450,11 @@ struct bdb_driver_features {
#define EDP_VSWING_1_2V 3
struct edp_power_seq {
- u16 t3;
- u16 t7;
+ u16 t1_t3;
+ u16 t8;
u16 t9;
u16 t10;
- u16 t12;
+ u16 t11_t12;
} __attribute__ ((packed));
struct edp_link_params {
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 0979d887788..fee0ad02c6d 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -69,7 +69,7 @@ static void intel_crt_dpms(struct drm_encoder *encoder, int mode)
temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
temp &= ~ADPA_DAC_ENABLE;
- switch(mode) {
+ switch (mode) {
case DRM_MODE_DPMS_ON:
temp |= ADPA_DAC_ENABLE;
break;
@@ -152,17 +152,13 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
adpa |= ADPA_VSYNC_ACTIVE_HIGH;
- if (intel_crtc->pipe == 0) {
- if (HAS_PCH_CPT(dev))
- adpa |= PORT_TRANS_A_SEL_CPT;
- else
- adpa |= ADPA_PIPE_A_SELECT;
- } else {
- if (HAS_PCH_CPT(dev))
- adpa |= PORT_TRANS_B_SEL_CPT;
- else
- adpa |= ADPA_PIPE_B_SELECT;
- }
+ /* For CPT allow 3 pipe config, for others just use A or B */
+ if (HAS_PCH_CPT(dev))
+ adpa |= PORT_TRANS_SEL_CPT(intel_crtc->pipe);
+ else if (intel_crtc->pipe == 0)
+ adpa |= ADPA_PIPE_A_SELECT;
+ else
+ adpa |= ADPA_PIPE_B_SELECT;
if (!HAS_PCH_SPLIT(dev))
I915_WRITE(BCLRPAT(intel_crtc->pipe), 0);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 04411ad2e77..981b1f1c04d 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -31,6 +31,7 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/vgaarb.h>
+#include <drm/drm_edid.h>
#include "drmP.h"
#include "intel_drv.h"
#include "i915_drm.h"
@@ -42,39 +43,39 @@
#define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
-bool intel_pipe_has_type (struct drm_crtc *crtc, int type);
+bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
static void intel_update_watermarks(struct drm_device *dev);
static void intel_increase_pllclock(struct drm_crtc *crtc);
static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
typedef struct {
- /* given values */
- int n;
- int m1, m2;
- int p1, p2;
- /* derived values */
- int dot;
- int vco;
- int m;
- int p;
+ /* given values */
+ int n;
+ int m1, m2;
+ int p1, p2;
+ /* derived values */
+ int dot;
+ int vco;
+ int m;
+ int p;
} intel_clock_t;
typedef struct {
- int min, max;
+ int min, max;
} intel_range_t;
typedef struct {
- int dot_limit;
- int p2_slow, p2_fast;
+ int dot_limit;
+ int p2_slow, p2_fast;
} intel_p2_t;
#define INTEL_P2_NUM 2
typedef struct intel_limit intel_limit_t;
struct intel_limit {
- intel_range_t dot, vco, n, m, m1, m2, p, p1;
- intel_p2_t p2;
- bool (* find_pll)(const intel_limit_t *, struct drm_crtc *,
- int, int, intel_clock_t *);
+ intel_range_t dot, vco, n, m, m1, m2, p, p1;
+ intel_p2_t p2;
+ bool (* find_pll)(const intel_limit_t *, struct drm_crtc *,
+ int, int, intel_clock_t *);
};
/* FDI */
@@ -105,56 +106,56 @@ intel_fdi_link_freq(struct drm_device *dev)
}
static const intel_limit_t intel_limits_i8xx_dvo = {
- .dot = { .min = 25000, .max = 350000 },
- .vco = { .min = 930000, .max = 1400000 },
- .n = { .min = 3, .max = 16 },
- .m = { .min = 96, .max = 140 },
- .m1 = { .min = 18, .max = 26 },
- .m2 = { .min = 6, .max = 16 },
- .p = { .min = 4, .max = 128 },
- .p1 = { .min = 2, .max = 33 },
+ .dot = { .min = 25000, .max = 350000 },
+ .vco = { .min = 930000, .max = 1400000 },
+ .n = { .min = 3, .max = 16 },
+ .m = { .min = 96, .max = 140 },
+ .m1 = { .min = 18, .max = 26 },
+ .m2 = { .min = 6, .max = 16 },
+ .p = { .min = 4, .max = 128 },
+ .p1 = { .min = 2, .max = 33 },
.p2 = { .dot_limit = 165000,
.p2_slow = 4, .p2_fast = 2 },
.find_pll = intel_find_best_PLL,
};
static const intel_limit_t intel_limits_i8xx_lvds = {
- .dot = { .min = 25000, .max = 350000 },
- .vco = { .min = 930000, .max = 1400000 },
- .n = { .min = 3, .max = 16 },
- .m = { .min = 96, .max = 140 },
- .m1 = { .min = 18, .max = 26 },
- .m2 = { .min = 6, .max = 16 },
- .p = { .min = 4, .max = 128 },
- .p1 = { .min = 1, .max = 6 },
+ .dot = { .min = 25000, .max = 350000 },
+ .vco = { .min = 930000, .max = 1400000 },
+ .n = { .min = 3, .max = 16 },
+ .m = { .min = 96, .max = 140 },
+ .m1 = { .min = 18, .max = 26 },
+ .m2 = { .min = 6, .max = 16 },
+ .p = { .min = 4, .max = 128 },
+ .p1 = { .min = 1, .max = 6 },
.p2 = { .dot_limit = 165000,
.p2_slow = 14, .p2_fast = 7 },
.find_pll = intel_find_best_PLL,
};
static const intel_limit_t intel_limits_i9xx_sdvo = {
- .dot = { .min = 20000, .max = 400000 },
- .vco = { .min = 1400000, .max = 2800000 },
- .n = { .min = 1, .max = 6 },
- .m = { .min = 70, .max = 120 },
- .m1 = { .min = 10, .max = 22 },
- .m2 = { .min = 5, .max = 9 },
- .p = { .min = 5, .max = 80 },
- .p1 = { .min = 1, .max = 8 },
+ .dot = { .min = 20000, .max = 400000 },
+ .vco = { .min = 1400000, .max = 2800000 },
+ .n = { .min = 1, .max = 6 },
+ .m = { .min = 70, .max = 120 },
+ .m1 = { .min = 10, .max = 22 },
+ .m2 = { .min = 5, .max = 9 },
+ .p = { .min = 5, .max = 80 },
+ .p1 = { .min = 1, .max = 8 },
.p2 = { .dot_limit = 200000,
.p2_slow = 10, .p2_fast = 5 },
.find_pll = intel_find_best_PLL,
};
static const intel_limit_t intel_limits_i9xx_lvds = {
- .dot = { .min = 20000, .max = 400000 },
- .vco = { .min = 1400000, .max = 2800000 },
- .n = { .min = 1, .max = 6 },
- .m = { .min = 70, .max = 120 },
- .m1 = { .min = 10, .max = 22 },
- .m2 = { .min = 5, .max = 9 },
- .p = { .min = 7, .max = 98 },
- .p1 = { .min = 1, .max = 8 },
+ .dot = { .min = 20000, .max = 400000 },
+ .vco = { .min = 1400000, .max = 2800000 },
+ .n = { .min = 1, .max = 6 },
+ .m = { .min = 70, .max = 120 },
+ .m1 = { .min = 10, .max = 22 },
+ .m2 = { .min = 5, .max = 9 },
+ .p = { .min = 7, .max = 98 },
+ .p1 = { .min = 1, .max = 8 },
.p2 = { .dot_limit = 112000,
.p2_slow = 14, .p2_fast = 7 },
.find_pll = intel_find_best_PLL,
@@ -222,44 +223,44 @@ static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
};
static const intel_limit_t intel_limits_g4x_display_port = {
- .dot = { .min = 161670, .max = 227000 },
- .vco = { .min = 1750000, .max = 3500000},
- .n = { .min = 1, .max = 2 },
- .m = { .min = 97, .max = 108 },
- .m1 = { .min = 0x10, .max = 0x12 },
- .m2 = { .min = 0x05, .max = 0x06 },
- .p = { .min = 10, .max = 20 },
- .p1 = { .min = 1, .max = 2},
- .p2 = { .dot_limit = 0,
+ .dot = { .min = 161670, .max = 227000 },
+ .vco = { .min = 1750000, .max = 3500000},
+ .n = { .min = 1, .max = 2 },
+ .m = { .min = 97, .max = 108 },
+ .m1 = { .min = 0x10, .max = 0x12 },
+ .m2 = { .min = 0x05, .max = 0x06 },
+ .p = { .min = 10, .max = 20 },
+ .p1 = { .min = 1, .max = 2},
+ .p2 = { .dot_limit = 0,
.p2_slow = 10, .p2_fast = 10 },
- .find_pll = intel_find_pll_g4x_dp,
+ .find_pll = intel_find_pll_g4x_dp,
};
static const intel_limit_t intel_limits_pineview_sdvo = {
- .dot = { .min = 20000, .max = 400000},
- .vco = { .min = 1700000, .max = 3500000 },
+ .dot = { .min = 20000, .max = 400000},
+ .vco = { .min = 1700000, .max = 3500000 },
/* Pineview's Ncounter is a ring counter */
- .n = { .min = 3, .max = 6 },
- .m = { .min = 2, .max = 256 },
+ .n = { .min = 3, .max = 6 },
+ .m = { .min = 2, .max = 256 },
/* Pineview only has one combined m divider, which we treat as m2. */
- .m1 = { .min = 0, .max = 0 },
- .m2 = { .min = 0, .max = 254 },
- .p = { .min = 5, .max = 80 },
- .p1 = { .min = 1, .max = 8 },
+ .m1 = { .min = 0, .max = 0 },
+ .m2 = { .min = 0, .max = 254 },
+ .p = { .min = 5, .max = 80 },
+ .p1 = { .min = 1, .max = 8 },
.p2 = { .dot_limit = 200000,
.p2_slow = 10, .p2_fast = 5 },
.find_pll = intel_find_best_PLL,
};
static const intel_limit_t intel_limits_pineview_lvds = {
- .dot = { .min = 20000, .max = 400000 },
- .vco = { .min = 1700000, .max = 3500000 },
- .n = { .min = 3, .max = 6 },
- .m = { .min = 2, .max = 256 },
- .m1 = { .min = 0, .max = 0 },
- .m2 = { .min = 0, .max = 254 },
- .p = { .min = 7, .max = 112 },
- .p1 = { .min = 1, .max = 8 },
+ .dot = { .min = 20000, .max = 400000 },
+ .vco = { .min = 1700000, .max = 3500000 },
+ .n = { .min = 3, .max = 6 },
+ .m = { .min = 2, .max = 256 },
+ .m1 = { .min = 0, .max = 0 },
+ .m2 = { .min = 0, .max = 254 },
+ .p = { .min = 7, .max = 112 },
+ .p1 = { .min = 1, .max = 8 },
.p2 = { .dot_limit = 112000,
.p2_slow = 14, .p2_fast = 14 },
.find_pll = intel_find_best_PLL,
@@ -321,7 +322,7 @@ static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
.m1 = { .min = 12, .max = 22 },
.m2 = { .min = 5, .max = 9 },
.p = { .min = 28, .max = 112 },
- .p1 = { .min = 2,.max = 8 },
+ .p1 = { .min = 2, .max = 8 },
.p2 = { .dot_limit = 225000,
.p2_slow = 14, .p2_fast = 14 },
.find_pll = intel_g4x_find_best_PLL,
@@ -335,24 +336,24 @@ static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
.m1 = { .min = 12, .max = 22 },
.m2 = { .min = 5, .max = 9 },
.p = { .min = 14, .max = 42 },
- .p1 = { .min = 2,.max = 6 },
+ .p1 = { .min = 2, .max = 6 },
.p2 = { .dot_limit = 225000,
.p2_slow = 7, .p2_fast = 7 },
.find_pll = intel_g4x_find_best_PLL,
};
static const intel_limit_t intel_limits_ironlake_display_port = {
- .dot = { .min = 25000, .max = 350000 },
- .vco = { .min = 1760000, .max = 3510000},
- .n = { .min = 1, .max = 2 },
- .m = { .min = 81, .max = 90 },
- .m1 = { .min = 12, .max = 22 },
- .m2 = { .min = 5, .max = 9 },
- .p = { .min = 10, .max = 20 },
- .p1 = { .min = 1, .max = 2},
- .p2 = { .dot_limit = 0,
+ .dot = { .min = 25000, .max = 350000 },
+ .vco = { .min = 1760000, .max = 3510000},
+ .n = { .min = 1, .max = 2 },
+ .m = { .min = 81, .max = 90 },
+ .m1 = { .min = 12, .max = 22 },
+ .m2 = { .min = 5, .max = 9 },
+ .p = { .min = 10, .max = 20 },
+ .p1 = { .min = 1, .max = 2},
+ .p2 = { .dot_limit = 0,
.p2_slow = 10, .p2_fast = 10 },
- .find_pll = intel_find_pll_ironlake_dp,
+ .find_pll = intel_find_pll_ironlake_dp,
};
static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
@@ -404,7 +405,7 @@ static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
limit = &intel_limits_g4x_hdmi;
} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
limit = &intel_limits_g4x_sdvo;
- } else if (intel_pipe_has_type (crtc, INTEL_OUTPUT_DISPLAYPORT)) {
+ } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
limit = &intel_limits_g4x_display_port;
} else /* The option is for other outputs */
limit = &intel_limits_i9xx_sdvo;
@@ -488,26 +489,26 @@ static bool intel_PLL_is_valid(struct drm_device *dev,
const intel_clock_t *clock)
{
if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
- INTELPllInvalid ("p1 out of range\n");
+ INTELPllInvalid("p1 out of range\n");
if (clock->p < limit->p.min || limit->p.max < clock->p)
- INTELPllInvalid ("p out of range\n");
+ INTELPllInvalid("p out of range\n");
if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
- INTELPllInvalid ("m2 out of range\n");
+ INTELPllInvalid("m2 out of range\n");
if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
- INTELPllInvalid ("m1 out of range\n");
+ INTELPllInvalid("m1 out of range\n");
if (clock->m1 <= clock->m2 && !IS_PINEVIEW(dev))
- INTELPllInvalid ("m1 <= m2\n");
+ INTELPllInvalid("m1 <= m2\n");
if (clock->m < limit->m.min || limit->m.max < clock->m)
- INTELPllInvalid ("m out of range\n");
+ INTELPllInvalid("m out of range\n");
if (clock->n < limit->n.min || limit->n.max < clock->n)
- INTELPllInvalid ("n out of range\n");
+ INTELPllInvalid("n out of range\n");
if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
- INTELPllInvalid ("vco out of range\n");
+ INTELPllInvalid("vco out of range\n");
/* XXX: We may need to be checking "Dot clock" depending on the multiplier,
* connector, etc., rather than just a single range.
*/
if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
- INTELPllInvalid ("dot out of range\n");
+ INTELPllInvalid("dot out of range\n");
return true;
}
@@ -542,7 +543,7 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
clock.p2 = limit->p2.p2_fast;
}
- memset (best_clock, 0, sizeof (*best_clock));
+ memset(best_clock, 0, sizeof(*best_clock));
for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
clock.m1++) {
@@ -802,6 +803,19 @@ static void assert_pch_pll(struct drm_i915_private *dev_priv,
u32 val;
bool cur_state;
+ if (HAS_PCH_CPT(dev_priv->dev)) {
+ u32 pch_dpll;
+
+ pch_dpll = I915_READ(PCH_DPLL_SEL);
+
+ /* Make sure the selected PLL is enabled to the transcoder */
+ WARN(!((pch_dpll >> (4 * pipe)) & 8),
+ "transcoder %d PLL not enabled\n", pipe);
+
+ /* Convert the transcoder pipe number to a pll pipe number */
+ pipe = (pch_dpll >> (4 * pipe)) & 1;
+ }
+
reg = PCH_DPLL(pipe);
val = I915_READ(reg);
cur_state = !!(val & DPLL_VCO_ENABLE);
@@ -1171,6 +1185,9 @@ static void intel_enable_pch_pll(struct drm_i915_private *dev_priv,
int reg;
u32 val;
+ if (pipe > 1)
+ return;
+
/* PCH only available on ILK+ */
BUG_ON(dev_priv->info->gen < 5);
@@ -1191,6 +1208,9 @@ static void intel_disable_pch_pll(struct drm_i915_private *dev_priv,
int reg;
u32 val;
+ if (pipe > 1)
+ return;
+
/* PCH only available on ILK+ */
BUG_ON(dev_priv->info->gen < 5);
@@ -1256,7 +1276,7 @@ static void intel_disable_transcoder(struct drm_i915_private *dev_priv,
I915_WRITE(reg, val);
/* wait for PCH transcoder off, transcoder state */
if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
- DRM_ERROR("failed to disable transcoder\n");
+ DRM_ERROR("failed to disable transcoder %d\n", pipe);
}
/**
@@ -2085,6 +2105,7 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
switch (plane) {
case 0:
case 1:
+ case 2:
break;
default:
DRM_ERROR("Can't update plane %d in SAREA\n", plane);
@@ -2184,6 +2205,10 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
case 0:
case 1:
break;
+ case 2:
+ if (IS_IVYBRIDGE(dev))
+ break;
+ /* fall through otherwise */
default:
DRM_ERROR("no plane for crtc\n");
return -EINVAL;
@@ -2440,7 +2465,7 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
}
-static const int snb_b_fdi_train_param [] = {
+static const int snb_b_fdi_train_param[] = {
FDI_LINK_TRAIN_400MV_0DB_SNB_B,
FDI_LINK_TRAIN_400MV_6DB_SNB_B,
FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
@@ -2496,7 +2521,7 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
if (HAS_PCH_CPT(dev))
cpt_phase_pointer_enable(dev, pipe);
- for (i = 0; i < 4; i++ ) {
+ for (i = 0; i < 4; i++) {
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
@@ -2545,7 +2570,7 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
POSTING_READ(reg);
udelay(150);
- for (i = 0; i < 4; i++ ) {
+ for (i = 0; i < 4; i++) {
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
@@ -2600,6 +2625,7 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
+ temp |= FDI_COMPOSITE_SYNC;
I915_WRITE(reg, temp | FDI_TX_ENABLE);
reg = FDI_RX_CTL(pipe);
@@ -2607,6 +2633,7 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
temp &= ~FDI_LINK_TRAIN_AUTO;
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
+ temp |= FDI_COMPOSITE_SYNC;
I915_WRITE(reg, temp | FDI_RX_ENABLE);
POSTING_READ(reg);
@@ -2615,7 +2642,7 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
if (HAS_PCH_CPT(dev))
cpt_phase_pointer_enable(dev, pipe);
- for (i = 0; i < 4; i++ ) {
+ for (i = 0; i < 4; i++) {
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
@@ -2657,7 +2684,7 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
POSTING_READ(reg);
udelay(150);
- for (i = 0; i < 4; i++ ) {
+ for (i = 0; i < 4; i++) {
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
@@ -2866,7 +2893,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
- u32 reg, temp;
+ u32 reg, temp, transc_sel;
/* For PCH output, training FDI link */
dev_priv->display.fdi_link_train(crtc);
@@ -2874,12 +2901,21 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
intel_enable_pch_pll(dev_priv, pipe);
if (HAS_PCH_CPT(dev)) {
+ transc_sel = intel_crtc->use_pll_a ? TRANSC_DPLLA_SEL :
+ TRANSC_DPLLB_SEL;
+
/* Be sure PCH DPLL SEL is set */
temp = I915_READ(PCH_DPLL_SEL);
- if (pipe == 0 && (temp & TRANSA_DPLL_ENABLE) == 0)
+ if (pipe == 0) {
+ temp &= ~(TRANSA_DPLLB_SEL);
temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
- else if (pipe == 1 && (temp & TRANSB_DPLL_ENABLE) == 0)
+ } else if (pipe == 1) {
+ temp &= ~(TRANSB_DPLLB_SEL);
temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
+ } else if (pipe == 2) {
+ temp &= ~(TRANSC_DPLLB_SEL);
+ temp |= (TRANSC_DPLL_ENABLE | transc_sel);
+ }
I915_WRITE(PCH_DPLL_SEL, temp);
}
@@ -2935,6 +2971,24 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
intel_enable_transcoder(dev_priv, pipe);
}
+void intel_cpt_verify_modeset(struct drm_device *dev, int pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int dslreg = PIPEDSL(pipe), tc2reg = TRANS_CHICKEN2(pipe);
+ u32 temp;
+
+ temp = I915_READ(dslreg);
+ udelay(500);
+ if (wait_for(I915_READ(dslreg) != temp, 5)) {
+ /* Without this, mode sets may fail silently on FDI */
+ I915_WRITE(tc2reg, TRANS_AUTOTRAIN_GEN_STALL_DIS);
+ udelay(250);
+ I915_WRITE(tc2reg, 0);
+ if (wait_for(I915_READ(dslreg) != temp, 5))
+ DRM_ERROR("mode set failed: pipe %d stuck\n", pipe);
+ }
+}
+
static void ironlake_crtc_enable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@@ -3045,13 +3099,13 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
temp = I915_READ(PCH_DPLL_SEL);
switch (pipe) {
case 0:
- temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
+ temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL);
break;
case 1:
temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
break;
case 2:
- /* FIXME: manage transcoder PLLs? */
+ /* C shares PLL A or B */
temp &= ~(TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL);
break;
default:
@@ -3061,7 +3115,8 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
}
/* disable PCH DPLL */
- intel_disable_pch_pll(dev_priv, pipe);
+ if (!intel_crtc->no_pll)
+ intel_disable_pch_pll(dev_priv, pipe);
/* Switch from PCDclk to Rawclk */
reg = FDI_RX_CTL(pipe);
@@ -3293,18 +3348,25 @@ static void ironlake_crtc_commit(struct drm_crtc *crtc)
ironlake_crtc_enable(crtc);
}
-void intel_encoder_prepare (struct drm_encoder *encoder)
+void intel_encoder_prepare(struct drm_encoder *encoder)
{
struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
/* lvds has its own version of prepare see intel_lvds_prepare */
encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
}
-void intel_encoder_commit (struct drm_encoder *encoder)
+void intel_encoder_commit(struct drm_encoder *encoder)
{
struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
+ struct drm_device *dev = encoder->dev;
+ struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
+ struct intel_crtc *intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
+
/* lvds has its own version of commit see intel_lvds_commit */
encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
+
+ if (HAS_PCH_CPT(dev))
+ intel_cpt_verify_modeset(dev, intel_crtc->pipe);
}
void intel_encoder_destroy(struct drm_encoder *encoder)
@@ -4478,6 +4540,20 @@ static void sandybridge_update_wm(struct drm_device *dev)
enabled |= 2;
}
+ /* IVB has 3 pipes */
+ if (IS_IVYBRIDGE(dev) &&
+ g4x_compute_wm0(dev, 2,
+ &sandybridge_display_wm_info, latency,
+ &sandybridge_cursor_wm_info, latency,
+ &plane_wm, &cursor_wm)) {
+ I915_WRITE(WM0_PIPEC_IVB,
+ (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
+ DRM_DEBUG_KMS("FIFO watermarks For pipe C -"
+ " plane %d, cursor: %d\n",
+ plane_wm, cursor_wm);
+ enabled |= 3;
+ }
+
/*
* Calculate and update the self-refresh watermark only when one
* display plane is used.
@@ -4584,7 +4660,9 @@ static void intel_update_watermarks(struct drm_device *dev)
static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
{
- return dev_priv->lvds_use_ssc && i915_panel_use_ssc
+ if (i915_panel_use_ssc >= 0)
+ return i915_panel_use_ssc != 0;
+ return dev_priv->lvds_use_ssc
&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
}
@@ -5107,36 +5185,52 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
return ret;
}
-static void ironlake_update_pch_refclk(struct drm_device *dev)
+/*
+ * Initialize reference clocks when the driver loads
+ */
+void ironlake_init_pch_refclk(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_mode_config *mode_config = &dev->mode_config;
- struct drm_crtc *crtc;
struct intel_encoder *encoder;
- struct intel_encoder *has_edp_encoder = NULL;
u32 temp;
bool has_lvds = false;
+ bool has_cpu_edp = false;
+ bool has_pch_edp = false;
+ bool has_panel = false;
+ bool has_ck505 = false;
+ bool can_ssc = false;
/* We need to take the global config into account */
- list_for_each_entry(crtc, &mode_config->crtc_list, head) {
- if (!crtc->enabled)
- continue;
-
- list_for_each_entry(encoder, &mode_config->encoder_list,
- base.head) {
- if (encoder->base.crtc != crtc)
- continue;
-
- switch (encoder->type) {
- case INTEL_OUTPUT_LVDS:
- has_lvds = true;
- case INTEL_OUTPUT_EDP:
- has_edp_encoder = encoder;
- break;
- }
+ list_for_each_entry(encoder, &mode_config->encoder_list,
+ base.head) {
+ switch (encoder->type) {
+ case INTEL_OUTPUT_LVDS:
+ has_panel = true;
+ has_lvds = true;
+ break;
+ case INTEL_OUTPUT_EDP:
+ has_panel = true;
+ if (intel_encoder_is_pch_edp(&encoder->base))
+ has_pch_edp = true;
+ else
+ has_cpu_edp = true;
+ break;
}
}
+ if (HAS_PCH_IBX(dev)) {
+ has_ck505 = dev_priv->display_clock_mode;
+ can_ssc = has_ck505;
+ } else {
+ has_ck505 = false;
+ can_ssc = true;
+ }
+
+ DRM_DEBUG_KMS("has_panel %d has_lvds %d has_pch_edp %d has_cpu_edp %d has_ck505 %d\n",
+ has_panel, has_lvds, has_pch_edp, has_cpu_edp,
+ has_ck505);
+
/* Ironlake: try to setup display ref clock before DPLL
* enabling. This is only under driver's control after
* PCH B stepping, previous chipset stepping should be
@@ -5145,43 +5239,102 @@ static void ironlake_update_pch_refclk(struct drm_device *dev)
temp = I915_READ(PCH_DREF_CONTROL);
/* Always enable nonspread source */
temp &= ~DREF_NONSPREAD_SOURCE_MASK;
- temp |= DREF_NONSPREAD_SOURCE_ENABLE;
- temp &= ~DREF_SSC_SOURCE_MASK;
- temp |= DREF_SSC_SOURCE_ENABLE;
- I915_WRITE(PCH_DREF_CONTROL, temp);
- POSTING_READ(PCH_DREF_CONTROL);
- udelay(200);
+ if (has_ck505)
+ temp |= DREF_NONSPREAD_CK505_ENABLE;
+ else
+ temp |= DREF_NONSPREAD_SOURCE_ENABLE;
- if (has_edp_encoder) {
- if (intel_panel_use_ssc(dev_priv)) {
- temp |= DREF_SSC1_ENABLE;
- I915_WRITE(PCH_DREF_CONTROL, temp);
+ if (has_panel) {
+ temp &= ~DREF_SSC_SOURCE_MASK;
+ temp |= DREF_SSC_SOURCE_ENABLE;
- POSTING_READ(PCH_DREF_CONTROL);
- udelay(200);
+ /* SSC must be turned on before enabling the CPU output */
+ if (intel_panel_use_ssc(dev_priv) && can_ssc) {
+ DRM_DEBUG_KMS("Using SSC on panel\n");
+ temp |= DREF_SSC1_ENABLE;
}
+
+ /* Get SSC going before enabling the outputs */
+ I915_WRITE(PCH_DREF_CONTROL, temp);
+ POSTING_READ(PCH_DREF_CONTROL);
+ udelay(200);
+
temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
/* Enable CPU source on CPU attached eDP */
- if (!intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
- if (intel_panel_use_ssc(dev_priv))
+ if (has_cpu_edp) {
+ if (intel_panel_use_ssc(dev_priv) && can_ssc) {
+ DRM_DEBUG_KMS("Using SSC on eDP\n");
temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
+ }
else
temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
- } else {
- /* Enable SSC on PCH eDP if needed */
- if (intel_panel_use_ssc(dev_priv)) {
- DRM_ERROR("enabling SSC on PCH\n");
- temp |= DREF_SUPERSPREAD_SOURCE_ENABLE;
- }
- }
+ } else
+ temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
+
+ I915_WRITE(PCH_DREF_CONTROL, temp);
+ POSTING_READ(PCH_DREF_CONTROL);
+ udelay(200);
+ } else {
+ DRM_DEBUG_KMS("Disabling SSC entirely\n");
+
+ temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
+
+ /* Turn off CPU output */
+ temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
+
+ I915_WRITE(PCH_DREF_CONTROL, temp);
+ POSTING_READ(PCH_DREF_CONTROL);
+ udelay(200);
+
+ /* Turn off the SSC source */
+ temp &= ~DREF_SSC_SOURCE_MASK;
+ temp |= DREF_SSC_SOURCE_DISABLE;
+
+ /* Turn off SSC1 */
+ temp &= ~ DREF_SSC1_ENABLE;
+
I915_WRITE(PCH_DREF_CONTROL, temp);
POSTING_READ(PCH_DREF_CONTROL);
udelay(200);
}
}
+static int ironlake_get_refclk(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_encoder *encoder;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct intel_encoder *edp_encoder = NULL;
+ int num_connectors = 0;
+ bool is_lvds = false;
+
+ list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
+ if (encoder->base.crtc != crtc)
+ continue;
+
+ switch (encoder->type) {
+ case INTEL_OUTPUT_LVDS:
+ is_lvds = true;
+ break;
+ case INTEL_OUTPUT_EDP:
+ edp_encoder = encoder;
+ break;
+ }
+ num_connectors++;
+ }
+
+ if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
+ DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
+ dev_priv->lvds_ssc_freq);
+ return dev_priv->lvds_ssc_freq * 1000;
+ }
+
+ return 120000;
+}
+
static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
@@ -5241,16 +5394,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
num_connectors++;
}
- if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
- refclk = dev_priv->lvds_ssc_freq * 1000;
- DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
- refclk / 1000);
- } else {
- refclk = 96000;
- if (!has_edp_encoder ||
- intel_encoder_is_pch_edp(&has_edp_encoder->base))
- refclk = 120000; /* 120Mhz refclk */
- }
+ refclk = ironlake_get_refclk(crtc);
/*
* Returns a set of divisors for the desired target clock with the given
@@ -5377,8 +5521,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw,
&m_n);
- ironlake_update_pch_refclk(dev);
-
fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
if (has_reduced_clock)
fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
@@ -5450,39 +5592,32 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
/* Set up the display plane register */
dspcntr = DISPPLANE_GAMMA_ENABLE;
- DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
+ DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
drm_mode_debug_printmodeline(mode);
/* PCH eDP needs FDI, but CPU eDP does not */
- if (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
- I915_WRITE(PCH_FP0(pipe), fp);
- I915_WRITE(PCH_DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
-
- POSTING_READ(PCH_DPLL(pipe));
- udelay(150);
- }
+ if (!intel_crtc->no_pll) {
+ if (!has_edp_encoder ||
+ intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
+ I915_WRITE(PCH_FP0(pipe), fp);
+ I915_WRITE(PCH_DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
- /* enable transcoder DPLL */
- if (HAS_PCH_CPT(dev)) {
- temp = I915_READ(PCH_DPLL_SEL);
- switch (pipe) {
- case 0:
- temp |= TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL;
- break;
- case 1:
- temp |= TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL;
- break;
- case 2:
- /* FIXME: manage transcoder PLLs? */
- temp |= TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL;
- break;
- default:
- BUG();
+ POSTING_READ(PCH_DPLL(pipe));
+ udelay(150);
+ }
+ } else {
+ if (dpll == (I915_READ(PCH_DPLL(0)) & 0x7fffffff) &&
+ fp == I915_READ(PCH_FP0(0))) {
+ intel_crtc->use_pll_a = true;
+ DRM_DEBUG_KMS("using pipe a dpll\n");
+ } else if (dpll == (I915_READ(PCH_DPLL(1)) & 0x7fffffff) &&
+ fp == I915_READ(PCH_FP0(1))) {
+ intel_crtc->use_pll_a = false;
+ DRM_DEBUG_KMS("using pipe b dpll\n");
+ } else {
+ DRM_DEBUG_KMS("no matching PLL configuration for pipe 2\n");
+ return -EINVAL;
}
- I915_WRITE(PCH_DPLL_SEL, temp);
-
- POSTING_READ(PCH_DPLL_SEL);
- udelay(150);
}
/* The LVDS pin pair needs to be on before the DPLLs are enabled.
@@ -5492,17 +5627,13 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
if (is_lvds) {
temp = I915_READ(PCH_LVDS);
temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
- if (pipe == 1) {
- if (HAS_PCH_CPT(dev))
- temp |= PORT_TRANS_B_SEL_CPT;
- else
- temp |= LVDS_PIPEB_SELECT;
- } else {
- if (HAS_PCH_CPT(dev))
- temp &= ~PORT_TRANS_SEL_MASK;
- else
- temp &= ~LVDS_PIPEB_SELECT;
- }
+ if (HAS_PCH_CPT(dev))
+ temp |= PORT_TRANS_SEL_CPT(pipe);
+ else if (pipe == 1)
+ temp |= LVDS_PIPEB_SELECT;
+ else
+ temp &= ~LVDS_PIPEB_SELECT;
+
/* set the corresponsding LVDS_BORDER bit */
temp |= dev_priv->lvds_border_bits;
/* Set the B0-B3 data pairs corresponding to whether we're going to
@@ -5552,8 +5683,9 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
I915_WRITE(TRANSDPLINK_N1(pipe), 0);
}
- if (!has_edp_encoder ||
- intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
+ if (!intel_crtc->no_pll &&
+ (!has_edp_encoder ||
+ intel_encoder_is_pch_edp(&has_edp_encoder->base))) {
I915_WRITE(PCH_DPLL(pipe), dpll);
/* Wait for the clocks to stabilize. */
@@ -5569,18 +5701,20 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
}
intel_crtc->lowfreq_avail = false;
- if (is_lvds && has_reduced_clock && i915_powersave) {
- I915_WRITE(PCH_FP1(pipe), fp2);
- intel_crtc->lowfreq_avail = true;
- if (HAS_PIPE_CXSR(dev)) {
- DRM_DEBUG_KMS("enabling CxSR downclocking\n");
- pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
- }
- } else {
- I915_WRITE(PCH_FP1(pipe), fp);
- if (HAS_PIPE_CXSR(dev)) {
- DRM_DEBUG_KMS("disabling CxSR downclocking\n");
- pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
+ if (!intel_crtc->no_pll) {
+ if (is_lvds && has_reduced_clock && i915_powersave) {
+ I915_WRITE(PCH_FP1(pipe), fp2);
+ intel_crtc->lowfreq_avail = true;
+ if (HAS_PIPE_CXSR(dev)) {
+ DRM_DEBUG_KMS("enabling CxSR downclocking\n");
+ pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
+ }
+ } else {
+ I915_WRITE(PCH_FP1(pipe), fp);
+ if (HAS_PIPE_CXSR(dev)) {
+ DRM_DEBUG_KMS("disabling CxSR downclocking\n");
+ pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
+ }
}
}
@@ -5677,6 +5811,131 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
return ret;
}
+static void g4x_write_eld(struct drm_connector *connector,
+ struct drm_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ uint8_t *eld = connector->eld;
+ uint32_t eldv;
+ uint32_t len;
+ uint32_t i;
+
+ i = I915_READ(G4X_AUD_VID_DID);
+
+ if (i == INTEL_AUDIO_DEVBLC || i == INTEL_AUDIO_DEVCL)
+ eldv = G4X_ELDV_DEVCL_DEVBLC;
+ else
+ eldv = G4X_ELDV_DEVCTG;
+
+ i = I915_READ(G4X_AUD_CNTL_ST);
+ i &= ~(eldv | G4X_ELD_ADDR);
+ len = (i >> 9) & 0x1f; /* ELD buffer size */
+ I915_WRITE(G4X_AUD_CNTL_ST, i);
+
+ if (!eld[0])
+ return;
+
+ len = min_t(uint8_t, eld[2], len);
+ DRM_DEBUG_DRIVER("ELD size %d\n", len);
+ for (i = 0; i < len; i++)
+ I915_WRITE(G4X_HDMIW_HDMIEDID, *((uint32_t *)eld + i));
+
+ i = I915_READ(G4X_AUD_CNTL_ST);
+ i |= eldv;
+ I915_WRITE(G4X_AUD_CNTL_ST, i);
+}
+
+static void ironlake_write_eld(struct drm_connector *connector,
+ struct drm_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ uint8_t *eld = connector->eld;
+ uint32_t eldv;
+ uint32_t i;
+ int len;
+ int hdmiw_hdmiedid;
+ int aud_cntl_st;
+ int aud_cntrl_st2;
+
+ if (IS_IVYBRIDGE(connector->dev)) {
+ hdmiw_hdmiedid = GEN7_HDMIW_HDMIEDID_A;
+ aud_cntl_st = GEN7_AUD_CNTRL_ST_A;
+ aud_cntrl_st2 = GEN7_AUD_CNTRL_ST2;
+ } else {
+ hdmiw_hdmiedid = GEN5_HDMIW_HDMIEDID_A;
+ aud_cntl_st = GEN5_AUD_CNTL_ST_A;
+ aud_cntrl_st2 = GEN5_AUD_CNTL_ST2;
+ }
+
+ i = to_intel_crtc(crtc)->pipe;
+ hdmiw_hdmiedid += i * 0x100;
+ aud_cntl_st += i * 0x100;
+
+ DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(i));
+
+ i = I915_READ(aud_cntl_st);
+ i = (i >> 29) & 0x3; /* DIP_Port_Select, 0x1 = PortB */
+ if (!i) {
+ DRM_DEBUG_DRIVER("Audio directed to unknown port\n");
+ /* operate blindly on all ports */
+ eldv = GEN5_ELD_VALIDB;
+ eldv |= GEN5_ELD_VALIDB << 4;
+ eldv |= GEN5_ELD_VALIDB << 8;
+ } else {
+ DRM_DEBUG_DRIVER("ELD on port %c\n", 'A' + i);
+ eldv = GEN5_ELD_VALIDB << ((i - 1) * 4);
+ }
+
+ i = I915_READ(aud_cntrl_st2);
+ i &= ~eldv;
+ I915_WRITE(aud_cntrl_st2, i);
+
+ if (!eld[0])
+ return;
+
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
+ DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
+ eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */
+ }
+
+ i = I915_READ(aud_cntl_st);
+ i &= ~GEN5_ELD_ADDRESS;
+ I915_WRITE(aud_cntl_st, i);
+
+ len = min_t(uint8_t, eld[2], 21); /* 84 bytes of hw ELD buffer */
+ DRM_DEBUG_DRIVER("ELD size %d\n", len);
+ for (i = 0; i < len; i++)
+ I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
+
+ i = I915_READ(aud_cntrl_st2);
+ i |= eldv;
+ I915_WRITE(aud_cntrl_st2, i);
+}
+
+void intel_write_eld(struct drm_encoder *encoder,
+ struct drm_display_mode *mode)
+{
+ struct drm_crtc *crtc = encoder->crtc;
+ struct drm_connector *connector;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ connector = drm_select_eld(encoder, mode);
+ if (!connector)
+ return;
+
+ DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
+ connector->base.id,
+ drm_get_connector_name(connector),
+ connector->encoder->base.id,
+ drm_get_encoder_name(connector->encoder));
+
+ connector->eld[6] = drm_av_sync_delay(connector, mode) / 2;
+
+ if (dev_priv->display.write_eld)
+ dev_priv->display.write_eld(connector, crtc);
+}
+
/** Loads the palette/gamma unit for the CRTC with the prepared values */
void intel_crtc_load_lut(struct drm_crtc *crtc)
{
@@ -5758,6 +6017,31 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
I915_WRITE(CURBASE(pipe), base);
}
+static void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ bool visible = base != 0;
+
+ if (intel_crtc->cursor_visible != visible) {
+ uint32_t cntl = I915_READ(CURCNTR_IVB(pipe));
+ if (base) {
+ cntl &= ~CURSOR_MODE;
+ cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
+ } else {
+ cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
+ cntl |= CURSOR_MODE_DISABLE;
+ }
+ I915_WRITE(CURCNTR_IVB(pipe), cntl);
+
+ intel_crtc->cursor_visible = visible;
+ }
+ /* and commit changes on next vblank */
+ I915_WRITE(CURBASE_IVB(pipe), base);
+}
+
/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
static void intel_crtc_update_cursor(struct drm_crtc *crtc,
bool on)
@@ -5805,11 +6089,16 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
if (!visible && !intel_crtc->cursor_visible)
return;
- I915_WRITE(CURPOS(pipe), pos);
- if (IS_845G(dev) || IS_I865G(dev))
- i845_update_cursor(crtc, base);
- else
- i9xx_update_cursor(crtc, base);
+ if (IS_IVYBRIDGE(dev)) {
+ I915_WRITE(CURPOS_IVB(pipe), pos);
+ ivb_update_cursor(crtc, base);
+ } else {
+ I915_WRITE(CURPOS(pipe), pos);
+ if (IS_845G(dev) || IS_I865G(dev))
+ i845_update_cursor(crtc, base);
+ else
+ i9xx_update_cursor(crtc, base);
+ }
if (visible)
intel_mark_busy(dev, to_intel_framebuffer(crtc->fb)->obj);
@@ -7071,6 +7360,8 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
intel_crtc->bpp = 24; /* default for pre-Ironlake */
if (HAS_PCH_SPLIT(dev)) {
+ if (pipe == 2 && IS_IVYBRIDGE(dev))
+ intel_crtc->no_pll = true;
intel_helper_funcs.prepare = ironlake_crtc_prepare;
intel_helper_funcs.commit = ironlake_crtc_commit;
} else {
@@ -7250,6 +7541,9 @@ static void intel_setup_outputs(struct drm_device *dev)
/* disable all the possible outputs/crtcs before entering KMS mode */
drm_helper_disable_unused_functions(dev);
+
+ if (HAS_PCH_SPLIT(dev))
+ ironlake_init_pch_refclk(dev);
}
static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
@@ -7494,6 +7788,10 @@ void gen6_disable_rps(struct drm_device *dev)
I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
I915_WRITE(GEN6_PMIER, 0);
+ /* Complete PM interrupt masking here doesn't race with the rps work
+ * item again unmasking PM interrupts because that is using a different
+ * register (PMIMR) to mask PM interrupts. The only risk is in leaving
+ * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
spin_lock_irq(&dev_priv->rps_lock);
dev_priv->pm_iir = 0;
@@ -8154,7 +8452,7 @@ static void intel_init_display(struct drm_device *dev)
}
/* Returns the core display clock speed */
- if (IS_I945G(dev) || (IS_G33(dev) && ! IS_PINEVIEW_M(dev)))
+ if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev)))
dev_priv->display.get_display_clock_speed =
i945_get_display_clock_speed;
else if (IS_I915G(dev))
@@ -8193,6 +8491,7 @@ static void intel_init_display(struct drm_device *dev)
}
dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
+ dev_priv->display.write_eld = ironlake_write_eld;
} else if (IS_GEN6(dev)) {
if (SNB_READ_WM0_LATENCY()) {
dev_priv->display.update_wm = sandybridge_update_wm;
@@ -8203,6 +8502,7 @@ static void intel_init_display(struct drm_device *dev)
}
dev_priv->display.fdi_link_train = gen6_fdi_link_train;
dev_priv->display.init_clock_gating = gen6_init_clock_gating;
+ dev_priv->display.write_eld = ironlake_write_eld;
} else if (IS_IVYBRIDGE(dev)) {
/* FIXME: detect B0+ stepping and use auto training */
dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
@@ -8214,7 +8514,7 @@ static void intel_init_display(struct drm_device *dev)
dev_priv->display.update_wm = NULL;
}
dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
-
+ dev_priv->display.write_eld = ironlake_write_eld;
} else
dev_priv->display.update_wm = NULL;
} else if (IS_PINEVIEW(dev)) {
@@ -8225,7 +8525,7 @@ static void intel_init_display(struct drm_device *dev)
DRM_INFO("failed to find known CxSR latency "
"(found ddr%s fsb freq %d, mem freq %d), "
"disabling CxSR\n",
- (dev_priv->is_ddr3 == 1) ? "3": "2",
+ (dev_priv->is_ddr3 == 1) ? "3" : "2",
dev_priv->fsb_freq, dev_priv->mem_freq);
/* Disable CxSR and never update its watermark again */
pineview_disable_cxsr(dev);
@@ -8234,6 +8534,7 @@ static void intel_init_display(struct drm_device *dev)
dev_priv->display.update_wm = pineview_update_wm;
dev_priv->display.init_clock_gating = gen3_init_clock_gating;
} else if (IS_G4X(dev)) {
+ dev_priv->display.write_eld = g4x_write_eld;
dev_priv->display.update_wm = g4x_update_wm;
dev_priv->display.init_clock_gating = g4x_init_clock_gating;
} else if (IS_GEN4(dev)) {
@@ -8294,7 +8595,7 @@ static void intel_init_display(struct drm_device *dev)
* resume, or other times. This quirk makes sure that's the case for
* affected systems.
*/
-static void quirk_pipea_force (struct drm_device *dev)
+static void quirk_pipea_force(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -8322,7 +8623,7 @@ struct intel_quirk intel_quirks[] = {
/* HP Compaq 2730p needs pipe A force quirk (LP: #291555) */
{ 0x2a42, 0x103c, 0x30eb, quirk_pipea_force },
/* HP Mini needs pipe A force quirk (LP: #322104) */
- { 0x27ae,0x103c, 0x361a, quirk_pipea_force },
+ { 0x27ae, 0x103c, 0x361a, quirk_pipea_force },
/* Thinkpad R31 needs pipe A force quirk */
{ 0x3577, 0x1014, 0x0505, quirk_pipea_force },
@@ -8488,6 +8789,7 @@ void intel_modeset_cleanup(struct drm_device *dev)
* enqueue unpin/hotplug work. */
drm_irq_uninstall(dev);
cancel_work_sync(&dev_priv->hotplug_work);
+ cancel_work_sync(&dev_priv->rps_work);
/* flush any delayed tasks or pending work */
flush_scheduled_work();
@@ -8573,7 +8875,7 @@ struct intel_display_error_state {
struct intel_display_error_state *
intel_display_capture_error_state(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_display_error_state *error;
int i;
@@ -8589,7 +8891,7 @@ intel_display_capture_error_state(struct drm_device *dev)
error->plane[i].control = I915_READ(DSPCNTR(i));
error->plane[i].stride = I915_READ(DSPSTRIDE(i));
error->plane[i].size = I915_READ(DSPSIZE(i));
- error->plane[i].pos= I915_READ(DSPPOS(i));
+ error->plane[i].pos = I915_READ(DSPPOS(i));
error->plane[i].addr = I915_READ(DSPADDR(i));
if (INTEL_INFO(dev)->gen >= 4) {
error->plane[i].surface = I915_READ(DSPSURF(i));
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 44fef5e1c49..09b318b0227 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -27,6 +27,7 @@
#include <linux/i2c.h>
#include <linux/slab.h>
+#include <linux/export.h>
#include "drmP.h"
#include "drm.h"
#include "drm_crtc.h"
@@ -36,7 +37,7 @@
#include "i915_drv.h"
#include "drm_dp_helper.h"
-
+#define DP_RECEIVER_CAP_SIZE 0xf
#define DP_LINK_STATUS_SIZE 6
#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
@@ -53,12 +54,21 @@ struct intel_dp {
int dpms_mode;
uint8_t link_bw;
uint8_t lane_count;
- uint8_t dpcd[8];
+ uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
struct i2c_adapter adapter;
struct i2c_algo_dp_aux_data algo;
bool is_pch_edp;
uint8_t train_set[4];
uint8_t link_status[DP_LINK_STATUS_SIZE];
+ int panel_power_up_delay;
+ int panel_power_down_delay;
+ int panel_power_cycle_delay;
+ int backlight_on_delay;
+ int backlight_off_delay;
+ struct drm_display_mode *panel_fixed_mode; /* for eDP */
+ struct delayed_work panel_vdd_work;
+ bool want_panel_vdd;
+ unsigned long panel_off_jiffies;
};
/**
@@ -86,6 +96,17 @@ static bool is_pch_edp(struct intel_dp *intel_dp)
return intel_dp->is_pch_edp;
}
+/**
+ * is_cpu_edp - is the port on the CPU and attached to an eDP panel?
+ * @intel_dp: DP struct
+ *
+ * Returns true if the given DP struct corresponds to a CPU eDP port.
+ */
+static bool is_cpu_edp(struct intel_dp *intel_dp)
+{
+ return is_edp(intel_dp) && !is_pch_edp(intel_dp);
+}
+
static struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder)
{
return container_of(encoder, struct intel_dp, base.base);
@@ -121,7 +142,7 @@ static void intel_dp_complete_link_train(struct intel_dp *intel_dp);
static void intel_dp_link_down(struct intel_dp *intel_dp);
void
-intel_edp_link_config (struct intel_encoder *intel_encoder,
+intel_edp_link_config(struct intel_encoder *intel_encoder,
int *lane_num, int *link_bw)
{
struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base);
@@ -175,9 +196,25 @@ intel_dp_link_clock(uint8_t link_bw)
return 162000;
}
-/* I think this is a fiction */
+/*
+ * The units on the numbers in the next two are... bizarre. Examples will
+ * make it clearer; this one parallels an example in the eDP spec.
+ *
+ * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
+ *
+ * 270000 * 1 * 8 / 10 == 216000
+ *
+ * The actual data capacity of that configuration is 2.16Gbit/s, so the
+ * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
+ * or equivalently, kilopixels per second - so for 1680x1050R it'd be
+ * 119000. At 18bpp that's 2142000 kilobits per second.
+ *
+ * Thus the strange-looking division by 10 in intel_dp_link_required, to
+ * get the result in decakilobits instead of kilobits.
+ */
+
static int
-intel_dp_link_required(struct drm_device *dev, struct intel_dp *intel_dp, int pixel_clock)
+intel_dp_link_required(struct intel_dp *intel_dp, int pixel_clock)
{
struct drm_crtc *crtc = intel_dp->base.base.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -186,7 +223,7 @@ intel_dp_link_required(struct drm_device *dev, struct intel_dp *intel_dp, int pi
if (intel_crtc)
bpp = intel_crtc->bpp;
- return (pixel_clock * bpp + 7) / 8;
+ return (pixel_clock * bpp + 9) / 10;
}
static int
@@ -200,24 +237,19 @@ intel_dp_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp));
int max_lanes = intel_dp_max_lane_count(intel_dp);
- if (is_edp(intel_dp) && dev_priv->panel_fixed_mode) {
- if (mode->hdisplay > dev_priv->panel_fixed_mode->hdisplay)
+ if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
+ if (mode->hdisplay > intel_dp->panel_fixed_mode->hdisplay)
return MODE_PANEL;
- if (mode->vdisplay > dev_priv->panel_fixed_mode->vdisplay)
+ if (mode->vdisplay > intel_dp->panel_fixed_mode->vdisplay)
return MODE_PANEL;
}
- /* only refuse the mode on non eDP since we have seen some weird eDP panels
- which are outside spec tolerances but somehow work by magic */
- if (!is_edp(intel_dp) &&
- (intel_dp_link_required(connector->dev, intel_dp, mode->clock)
- > intel_dp_max_data_rate(max_link_clock, max_lanes)))
+ if (intel_dp_link_required(intel_dp, mode->clock)
+ > intel_dp_max_data_rate(max_link_clock, max_lanes))
return MODE_CLOCK_HIGH;
if (mode->clock < 10000)
@@ -279,6 +311,38 @@ intel_hrawclk(struct drm_device *dev)
}
}
+static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ return (I915_READ(PCH_PP_STATUS) & PP_ON) != 0;
+}
+
+static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ return (I915_READ(PCH_PP_CONTROL) & EDP_FORCE_VDD) != 0;
+}
+
+static void
+intel_dp_check_edp(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!is_edp(intel_dp))
+ return;
+ if (!ironlake_edp_have_panel_power(intel_dp) && !ironlake_edp_have_panel_vdd(intel_dp)) {
+ WARN(1, "eDP powered off while attempting aux channel communication.\n");
+ DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
+ I915_READ(PCH_PP_STATUS),
+ I915_READ(PCH_PP_CONTROL));
+ }
+}
+
static int
intel_dp_aux_ch(struct intel_dp *intel_dp,
uint8_t *send, int send_bytes,
@@ -295,6 +359,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
uint32_t aux_clock_divider;
int try, precharge;
+ intel_dp_check_edp(intel_dp);
/* The clock divider is based off the hrawclk,
* and would like to run at 2MHz. So, take the
* hrawclk value and divide by 2 and use that
@@ -302,7 +367,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
* Note that PCH attached eDP panels should use a 125MHz input
* clock divider.
*/
- if (is_edp(intel_dp) && !is_pch_edp(intel_dp)) {
+ if (is_cpu_edp(intel_dp)) {
if (IS_GEN6(dev))
aux_clock_divider = 200; /* SNB eDP input clock at 400Mhz */
else
@@ -337,7 +402,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
for (i = 0; i < send_bytes; i += 4)
I915_WRITE(ch_data + i,
pack_aux(send + i, send_bytes - i));
-
+
/* Send the command and wait for it to complete */
I915_WRITE(ch_ctl,
DP_AUX_CH_CTL_SEND_BUSY |
@@ -354,7 +419,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
break;
udelay(100);
}
-
+
/* Clear done status and any errors */
I915_WRITE(ch_ctl,
status |
@@ -390,7 +455,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
if (recv_bytes > recv_size)
recv_bytes = recv_size;
-
+
for (i = 0; i < recv_bytes; i += 4)
unpack_aux(I915_READ(ch_data + i),
recv + i, recv_bytes - i);
@@ -408,6 +473,7 @@ intel_dp_aux_native_write(struct intel_dp *intel_dp,
int msg_bytes;
uint8_t ack;
+ intel_dp_check_edp(intel_dp);
if (send_bytes > 16)
return -1;
msg[0] = AUX_NATIVE_WRITE << 4;
@@ -450,6 +516,7 @@ intel_dp_aux_native_read(struct intel_dp *intel_dp,
uint8_t ack;
int ret;
+ intel_dp_check_edp(intel_dp);
msg[0] = AUX_NATIVE_READ << 4;
msg[1] = address >> 8;
msg[2] = address & 0xff;
@@ -493,6 +560,7 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
int reply_bytes;
int ret;
+ intel_dp_check_edp(intel_dp);
/* Set up the command byte */
if (mode & MODE_I2C_READ)
msg[0] = AUX_I2C_READ << 4;
@@ -573,24 +641,32 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
return -EREMOTEIO;
}
+static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp);
+static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
+
static int
intel_dp_i2c_init(struct intel_dp *intel_dp,
struct intel_connector *intel_connector, const char *name)
{
+ int ret;
+
DRM_DEBUG_KMS("i2c_init %s\n", name);
intel_dp->algo.running = false;
intel_dp->algo.address = 0;
intel_dp->algo.aux_ch = intel_dp_i2c_aux_ch;
- memset(&intel_dp->adapter, '\0', sizeof (intel_dp->adapter));
+ memset(&intel_dp->adapter, '\0', sizeof(intel_dp->adapter));
intel_dp->adapter.owner = THIS_MODULE;
intel_dp->adapter.class = I2C_CLASS_DDC;
- strncpy (intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1);
+ strncpy(intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1);
intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0';
intel_dp->adapter.algo_data = &intel_dp->algo;
intel_dp->adapter.dev.parent = &intel_connector->base.kdev;
- return i2c_dp_aux_add_bus(&intel_dp->adapter);
+ ironlake_edp_panel_vdd_on(intel_dp);
+ ret = i2c_dp_aux_add_bus(&intel_dp->adapter);
+ ironlake_edp_panel_vdd_off(intel_dp, false);
+ return ret;
}
static bool
@@ -598,29 +674,28 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
int lane_count, clock;
int max_lane_count = intel_dp_max_lane_count(intel_dp);
int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
- if (is_edp(intel_dp) && dev_priv->panel_fixed_mode) {
- intel_fixed_panel_mode(dev_priv->panel_fixed_mode, adjusted_mode);
+ if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
+ intel_fixed_panel_mode(intel_dp->panel_fixed_mode, adjusted_mode);
intel_pch_panel_fitting(dev, DRM_MODE_SCALE_FULLSCREEN,
mode, adjusted_mode);
/*
* the mode->clock is used to calculate the Data&Link M/N
* of the pipe. For the eDP the fixed clock should be used.
*/
- mode->clock = dev_priv->panel_fixed_mode->clock;
+ mode->clock = intel_dp->panel_fixed_mode->clock;
}
for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
for (clock = 0; clock <= max_clock; clock++) {
int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
- if (intel_dp_link_required(encoder->dev, intel_dp, mode->clock)
+ if (intel_dp_link_required(intel_dp, mode->clock)
<= link_avail) {
intel_dp->link_bw = bws[clock];
intel_dp->lane_count = lane_count;
@@ -634,19 +709,6 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
}
}
- if (is_edp(intel_dp)) {
- /* okay we failed just pick the highest */
- intel_dp->lane_count = max_lane_count;
- intel_dp->link_bw = bws[max_clock];
- adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw);
- DRM_DEBUG_KMS("Force picking display port link bw %02x lane "
- "count %d clock %d\n",
- intel_dp->link_bw, intel_dp->lane_count,
- adjusted_mode->clock);
-
- return true;
- }
-
return false;
}
@@ -740,6 +802,9 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
}
}
+static void ironlake_edp_pll_on(struct drm_encoder *encoder);
+static void ironlake_edp_pll_off(struct drm_encoder *encoder);
+
static void
intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -749,6 +814,14 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
struct drm_crtc *crtc = intel_dp->base.base.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ /* Turn on the eDP PLL if needed */
+ if (is_edp(intel_dp)) {
+ if (!is_pch_edp(intel_dp))
+ ironlake_edp_pll_on(encoder);
+ else
+ ironlake_edp_pll_off(encoder);
+ }
+
intel_dp->DP = DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
intel_dp->DP |= intel_dp->color_range;
@@ -757,7 +830,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
intel_dp->DP |= DP_SYNC_VS_HIGH;
- if (HAS_PCH_CPT(dev) && !is_edp(intel_dp))
+ if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
else
intel_dp->DP |= DP_LINK_TRAIN_OFF;
@@ -773,8 +846,12 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
intel_dp->DP |= DP_PORT_WIDTH_4;
break;
}
- if (intel_dp->has_audio)
+ if (intel_dp->has_audio) {
+ DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
+ pipe_name(intel_crtc->pipe));
intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
+ intel_write_eld(encoder, adjusted_mode);
+ }
memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
intel_dp->link_configuration[0] = intel_dp->link_bw;
@@ -794,7 +871,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
if (intel_crtc->pipe == 1 && !HAS_PCH_CPT(dev))
intel_dp->DP |= DP_PIPEB_SELECT;
- if (is_edp(intel_dp) && !is_pch_edp(intel_dp)) {
+ if (is_cpu_edp(intel_dp)) {
/* don't miss out required setting for eDP */
intel_dp->DP |= DP_PLL_ENABLE;
if (adjusted_mode->clock < 200000)
@@ -804,58 +881,150 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
}
}
+static void ironlake_wait_panel_off(struct intel_dp *intel_dp)
+{
+ unsigned long off_time;
+ unsigned long delay;
+
+ DRM_DEBUG_KMS("Wait for panel power off time\n");
+
+ if (ironlake_edp_have_panel_power(intel_dp) ||
+ ironlake_edp_have_panel_vdd(intel_dp))
+ {
+ DRM_DEBUG_KMS("Panel still on, no delay needed\n");
+ return;
+ }
+
+ off_time = intel_dp->panel_off_jiffies + msecs_to_jiffies(intel_dp->panel_power_down_delay);
+ if (time_after(jiffies, off_time)) {
+ DRM_DEBUG_KMS("Time already passed");
+ return;
+ }
+ delay = jiffies_to_msecs(off_time - jiffies);
+ if (delay > intel_dp->panel_power_down_delay)
+ delay = intel_dp->panel_power_down_delay;
+ DRM_DEBUG_KMS("Waiting an additional %ld ms\n", delay);
+ msleep(delay);
+}
+
static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp;
- /*
- * If the panel wasn't on, make sure there's not a currently
- * active PP sequence before enabling AUX VDD.
- */
- if (!(I915_READ(PCH_PP_STATUS) & PP_ON))
- msleep(dev_priv->panel_t3);
+ if (!is_edp(intel_dp))
+ return;
+ DRM_DEBUG_KMS("Turn eDP VDD on\n");
+
+ WARN(intel_dp->want_panel_vdd,
+ "eDP VDD already requested on\n");
+
+ intel_dp->want_panel_vdd = true;
+ if (ironlake_edp_have_panel_vdd(intel_dp)) {
+ DRM_DEBUG_KMS("eDP VDD already on\n");
+ return;
+ }
+ ironlake_wait_panel_off(intel_dp);
pp = I915_READ(PCH_PP_CONTROL);
+ pp &= ~PANEL_UNLOCK_MASK;
+ pp |= PANEL_UNLOCK_REGS;
pp |= EDP_FORCE_VDD;
I915_WRITE(PCH_PP_CONTROL, pp);
POSTING_READ(PCH_PP_CONTROL);
+ DRM_DEBUG_KMS("PCH_PP_STATUS: 0x%08x PCH_PP_CONTROL: 0x%08x\n",
+ I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL));
+
+ /*
+ * If the panel wasn't on, delay before accessing aux channel
+ */
+ if (!ironlake_edp_have_panel_power(intel_dp)) {
+ DRM_DEBUG_KMS("eDP was not running\n");
+ msleep(intel_dp->panel_power_up_delay);
+ }
}
-static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp)
+static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp;
- pp = I915_READ(PCH_PP_CONTROL);
- pp &= ~EDP_FORCE_VDD;
- I915_WRITE(PCH_PP_CONTROL, pp);
- POSTING_READ(PCH_PP_CONTROL);
+ if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) {
+ pp = I915_READ(PCH_PP_CONTROL);
+ pp &= ~PANEL_UNLOCK_MASK;
+ pp |= PANEL_UNLOCK_REGS;
+ pp &= ~EDP_FORCE_VDD;
+ I915_WRITE(PCH_PP_CONTROL, pp);
+ POSTING_READ(PCH_PP_CONTROL);
+
+ /* Make sure sequencer is idle before allowing subsequent activity */
+ DRM_DEBUG_KMS("PCH_PP_STATUS: 0x%08x PCH_PP_CONTROL: 0x%08x\n",
+ I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL));
+ intel_dp->panel_off_jiffies = jiffies;
+ }
+}
+
+static void ironlake_panel_vdd_work(struct work_struct *__work)
+{
+ struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
+ struct intel_dp, panel_vdd_work);
+ struct drm_device *dev = intel_dp->base.base.dev;
- /* Make sure sequencer is idle before allowing subsequent activity */
- msleep(dev_priv->panel_t12);
+ mutex_lock(&dev->struct_mutex);
+ ironlake_panel_vdd_off_sync(intel_dp);
+ mutex_unlock(&dev->struct_mutex);
+}
+
+static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
+{
+ if (!is_edp(intel_dp))
+ return;
+
+ DRM_DEBUG_KMS("Turn eDP VDD off %d\n", intel_dp->want_panel_vdd);
+ WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on");
+
+ intel_dp->want_panel_vdd = false;
+
+ if (sync) {
+ ironlake_panel_vdd_off_sync(intel_dp);
+ } else {
+ /*
+ * Queue the timer to fire a long
+ * time from now (relative to the power down delay)
+ * to keep the panel power up across a sequence of operations
+ */
+ schedule_delayed_work(&intel_dp->panel_vdd_work,
+ msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5));
+ }
}
/* Returns true if the panel was already on when called */
-static bool ironlake_edp_panel_on (struct intel_dp *intel_dp)
+static void ironlake_edp_panel_on(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp, idle_on_mask = PP_ON | PP_SEQUENCE_STATE_ON_IDLE;
- if (I915_READ(PCH_PP_STATUS) & PP_ON)
- return true;
+ if (!is_edp(intel_dp))
+ return;
+ if (ironlake_edp_have_panel_power(intel_dp))
+ return;
+ ironlake_wait_panel_off(intel_dp);
pp = I915_READ(PCH_PP_CONTROL);
+ pp &= ~PANEL_UNLOCK_MASK;
+ pp |= PANEL_UNLOCK_REGS;
+
+ if (IS_GEN5(dev)) {
+ /* ILK workaround: disable reset around power sequence */
+ pp &= ~PANEL_POWER_RESET;
+ I915_WRITE(PCH_PP_CONTROL, pp);
+ POSTING_READ(PCH_PP_CONTROL);
+ }
- /* ILK workaround: disable reset around power sequence */
- pp &= ~PANEL_POWER_RESET;
- I915_WRITE(PCH_PP_CONTROL, pp);
- POSTING_READ(PCH_PP_CONTROL);
-
- pp |= PANEL_UNLOCK_REGS | POWER_TARGET_ON;
+ pp |= POWER_TARGET_ON;
I915_WRITE(PCH_PP_CONTROL, pp);
POSTING_READ(PCH_PP_CONTROL);
@@ -864,44 +1033,64 @@ static bool ironlake_edp_panel_on (struct intel_dp *intel_dp)
DRM_ERROR("panel on wait timed out: 0x%08x\n",
I915_READ(PCH_PP_STATUS));
- pp |= PANEL_POWER_RESET; /* restore panel reset bit */
- I915_WRITE(PCH_PP_CONTROL, pp);
- POSTING_READ(PCH_PP_CONTROL);
-
- return false;
+ if (IS_GEN5(dev)) {
+ pp |= PANEL_POWER_RESET; /* restore panel reset bit */
+ I915_WRITE(PCH_PP_CONTROL, pp);
+ POSTING_READ(PCH_PP_CONTROL);
+ }
}
-static void ironlake_edp_panel_off (struct drm_device *dev)
+static void ironlake_edp_panel_off(struct drm_encoder *encoder)
{
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp, idle_off_mask = PP_ON | PP_SEQUENCE_MASK |
PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK;
+ if (!is_edp(intel_dp))
+ return;
pp = I915_READ(PCH_PP_CONTROL);
+ pp &= ~PANEL_UNLOCK_MASK;
+ pp |= PANEL_UNLOCK_REGS;
+
+ if (IS_GEN5(dev)) {
+ /* ILK workaround: disable reset around power sequence */
+ pp &= ~PANEL_POWER_RESET;
+ I915_WRITE(PCH_PP_CONTROL, pp);
+ POSTING_READ(PCH_PP_CONTROL);
+ }
- /* ILK workaround: disable reset around power sequence */
- pp &= ~PANEL_POWER_RESET;
- I915_WRITE(PCH_PP_CONTROL, pp);
- POSTING_READ(PCH_PP_CONTROL);
+ intel_dp->panel_off_jiffies = jiffies;
- pp &= ~POWER_TARGET_ON;
- I915_WRITE(PCH_PP_CONTROL, pp);
- POSTING_READ(PCH_PP_CONTROL);
+ if (IS_GEN5(dev)) {
+ pp &= ~POWER_TARGET_ON;
+ I915_WRITE(PCH_PP_CONTROL, pp);
+ POSTING_READ(PCH_PP_CONTROL);
+ pp &= ~POWER_TARGET_ON;
+ I915_WRITE(PCH_PP_CONTROL, pp);
+ POSTING_READ(PCH_PP_CONTROL);
+ msleep(intel_dp->panel_power_cycle_delay);
- if (wait_for((I915_READ(PCH_PP_STATUS) & idle_off_mask) == 0, 5000))
- DRM_ERROR("panel off wait timed out: 0x%08x\n",
- I915_READ(PCH_PP_STATUS));
+ if (wait_for((I915_READ(PCH_PP_STATUS) & idle_off_mask) == 0, 5000))
+ DRM_ERROR("panel off wait timed out: 0x%08x\n",
+ I915_READ(PCH_PP_STATUS));
- pp |= PANEL_POWER_RESET; /* restore panel reset bit */
- I915_WRITE(PCH_PP_CONTROL, pp);
- POSTING_READ(PCH_PP_CONTROL);
+ pp |= PANEL_POWER_RESET; /* restore panel reset bit */
+ I915_WRITE(PCH_PP_CONTROL, pp);
+ POSTING_READ(PCH_PP_CONTROL);
+ }
}
-static void ironlake_edp_backlight_on (struct drm_device *dev)
+static void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
{
+ struct drm_device *dev = intel_dp->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp;
+ if (!is_edp(intel_dp))
+ return;
+
DRM_DEBUG_KMS("\n");
/*
* If we enable the backlight right away following a panel power
@@ -909,21 +1098,32 @@ static void ironlake_edp_backlight_on (struct drm_device *dev)
* link. So delay a bit to make sure the image is solid before
* allowing it to appear.
*/
- msleep(300);
+ msleep(intel_dp->backlight_on_delay);
pp = I915_READ(PCH_PP_CONTROL);
+ pp &= ~PANEL_UNLOCK_MASK;
+ pp |= PANEL_UNLOCK_REGS;
pp |= EDP_BLC_ENABLE;
I915_WRITE(PCH_PP_CONTROL, pp);
+ POSTING_READ(PCH_PP_CONTROL);
}
-static void ironlake_edp_backlight_off (struct drm_device *dev)
+static void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
{
+ struct drm_device *dev = intel_dp->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp;
+ if (!is_edp(intel_dp))
+ return;
+
DRM_DEBUG_KMS("\n");
pp = I915_READ(PCH_PP_CONTROL);
+ pp &= ~PANEL_UNLOCK_MASK;
+ pp |= PANEL_UNLOCK_REGS;
pp &= ~EDP_BLC_ENABLE;
I915_WRITE(PCH_PP_CONTROL, pp);
+ POSTING_READ(PCH_PP_CONTROL);
+ msleep(intel_dp->backlight_off_delay);
}
static void ironlake_edp_pll_on(struct drm_encoder *encoder)
@@ -986,43 +1186,39 @@ static void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
static void intel_dp_prepare(struct drm_encoder *encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- struct drm_device *dev = encoder->dev;
/* Wake up the sink first */
+ ironlake_edp_panel_vdd_on(intel_dp);
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
+ ironlake_edp_panel_vdd_off(intel_dp, false);
- if (is_edp(intel_dp)) {
- ironlake_edp_backlight_off(dev);
- ironlake_edp_panel_off(dev);
- if (!is_pch_edp(intel_dp))
- ironlake_edp_pll_on(encoder);
- else
- ironlake_edp_pll_off(encoder);
- }
+ /* Make sure the panel is off before trying to
+ * change the mode
+ */
+ ironlake_edp_backlight_off(intel_dp);
intel_dp_link_down(intel_dp);
+ ironlake_edp_panel_off(encoder);
}
static void intel_dp_commit(struct drm_encoder *encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct drm_device *dev = encoder->dev;
+ struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.base.crtc);
- if (is_edp(intel_dp))
- ironlake_edp_panel_vdd_on(intel_dp);
-
+ ironlake_edp_panel_vdd_on(intel_dp);
+ intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
intel_dp_start_link_train(intel_dp);
-
- if (is_edp(intel_dp)) {
- ironlake_edp_panel_on(intel_dp);
- ironlake_edp_panel_vdd_off(intel_dp);
- }
+ ironlake_edp_panel_on(intel_dp);
+ ironlake_edp_panel_vdd_off(intel_dp, true);
intel_dp_complete_link_train(intel_dp);
-
- if (is_edp(intel_dp))
- ironlake_edp_backlight_on(dev);
+ ironlake_edp_backlight_on(intel_dp);
intel_dp->dpms_mode = DRM_MODE_DPMS_ON;
+
+ if (HAS_PCH_CPT(dev))
+ intel_cpt_verify_modeset(dev, intel_crtc->pipe);
}
static void
@@ -1034,28 +1230,27 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
uint32_t dp_reg = I915_READ(intel_dp->output_reg);
if (mode != DRM_MODE_DPMS_ON) {
+ ironlake_edp_panel_vdd_on(intel_dp);
if (is_edp(intel_dp))
- ironlake_edp_backlight_off(dev);
+ ironlake_edp_backlight_off(intel_dp);
intel_dp_sink_dpms(intel_dp, mode);
intel_dp_link_down(intel_dp);
- if (is_edp(intel_dp))
- ironlake_edp_panel_off(dev);
+ ironlake_edp_panel_off(encoder);
if (is_edp(intel_dp) && !is_pch_edp(intel_dp))
ironlake_edp_pll_off(encoder);
+ ironlake_edp_panel_vdd_off(intel_dp, false);
} else {
- if (is_edp(intel_dp))
- ironlake_edp_panel_vdd_on(intel_dp);
+ ironlake_edp_panel_vdd_on(intel_dp);
intel_dp_sink_dpms(intel_dp, mode);
if (!(dp_reg & DP_PORT_EN)) {
intel_dp_start_link_train(intel_dp);
- if (is_edp(intel_dp)) {
- ironlake_edp_panel_on(intel_dp);
- ironlake_edp_panel_vdd_off(intel_dp);
- }
+ ironlake_edp_panel_on(intel_dp);
+ ironlake_edp_panel_vdd_off(intel_dp, true);
intel_dp_complete_link_train(intel_dp);
- }
- if (is_edp(intel_dp))
- ironlake_edp_backlight_on(dev);
+ ironlake_edp_backlight_on(intel_dp);
+ } else
+ ironlake_edp_panel_vdd_off(intel_dp, false);
+ ironlake_edp_backlight_on(intel_dp);
}
intel_dp->dpms_mode = mode;
}
@@ -1364,7 +1559,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
DP_LINK_CONFIGURATION_SIZE);
DP |= DP_PORT_EN;
- if (HAS_PCH_CPT(dev) && !is_edp(intel_dp))
+ if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
DP &= ~DP_LINK_TRAIN_MASK_CPT;
else
DP &= ~DP_LINK_TRAIN_MASK;
@@ -1383,7 +1578,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
}
- if (HAS_PCH_CPT(dev) && !is_edp(intel_dp))
+ if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
reg = DP | DP_LINK_TRAIN_PAT_1_CPT;
else
reg = DP | DP_LINK_TRAIN_PAT_1;
@@ -1458,7 +1653,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
}
- if (HAS_PCH_CPT(dev) && !is_edp(intel_dp))
+ if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
reg = DP | DP_LINK_TRAIN_PAT_2_CPT;
else
reg = DP | DP_LINK_TRAIN_PAT_2;
@@ -1499,7 +1694,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
++tries;
}
- if (HAS_PCH_CPT(dev) && !is_edp(intel_dp))
+ if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
reg = DP | DP_LINK_TRAIN_OFF_CPT;
else
reg = DP | DP_LINK_TRAIN_OFF;
@@ -1529,7 +1724,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
udelay(100);
}
- if (HAS_PCH_CPT(dev) && !is_edp(intel_dp)) {
+ if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp)) {
DP &= ~DP_LINK_TRAIN_MASK_CPT;
I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
} else {
@@ -1578,13 +1773,14 @@ intel_dp_link_down(struct intel_dp *intel_dp)
I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
POSTING_READ(intel_dp->output_reg);
+ msleep(intel_dp->panel_power_down_delay);
}
static bool
intel_dp_get_dpcd(struct intel_dp *intel_dp)
{
if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd,
- sizeof (intel_dp->dpcd)) &&
+ sizeof(intel_dp->dpcd)) &&
(intel_dp->dpcd[DP_DPCD_REV] != 0)) {
return true;
}
@@ -1592,6 +1788,27 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
return false;
}
+static bool
+intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
+{
+ int ret;
+
+ ret = intel_dp_aux_native_read_retry(intel_dp,
+ DP_DEVICE_SERVICE_IRQ_VECTOR,
+ sink_irq_vector, 1);
+ if (!ret)
+ return false;
+
+ return true;
+}
+
+static void
+intel_dp_handle_test_request(struct intel_dp *intel_dp)
+{
+ /* NAK by default */
+ intel_dp_aux_native_write_1(intel_dp, DP_TEST_RESPONSE, DP_TEST_ACK);
+}
+
/*
* According to DP spec
* 5.1.2:
@@ -1604,6 +1821,8 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
static void
intel_dp_check_link_status(struct intel_dp *intel_dp)
{
+ u8 sink_irq_vector;
+
if (intel_dp->dpms_mode != DRM_MODE_DPMS_ON)
return;
@@ -1622,6 +1841,20 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
return;
}
+ /* Try to read the source of the interrupt */
+ if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
+ intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
+ /* Clear interrupt source */
+ intel_dp_aux_native_write_1(intel_dp,
+ DP_DEVICE_SERVICE_IRQ_VECTOR,
+ sink_irq_vector);
+
+ if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
+ intel_dp_handle_test_request(intel_dp);
+ if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
+ DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
+ }
+
if (!intel_channel_eq_ok(intel_dp)) {
DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
drm_get_encoder_name(&intel_dp->base.base));
@@ -1683,6 +1916,31 @@ g4x_dp_detect(struct intel_dp *intel_dp)
return intel_dp_detect_dpcd(intel_dp);
}
+static struct edid *
+intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
+{
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct edid *edid;
+
+ ironlake_edp_panel_vdd_on(intel_dp);
+ edid = drm_get_edid(connector, adapter);
+ ironlake_edp_panel_vdd_off(intel_dp, false);
+ return edid;
+}
+
+static int
+intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *adapter)
+{
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
+ int ret;
+
+ ironlake_edp_panel_vdd_on(intel_dp);
+ ret = intel_ddc_get_modes(connector, adapter);
+ ironlake_edp_panel_vdd_off(intel_dp, false);
+ return ret;
+}
+
+
/**
* Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection.
*
@@ -1715,7 +1973,7 @@ intel_dp_detect(struct drm_connector *connector, bool force)
if (intel_dp->force_audio) {
intel_dp->has_audio = intel_dp->force_audio > 0;
} else {
- edid = drm_get_edid(connector, &intel_dp->adapter);
+ edid = intel_dp_get_edid(connector, &intel_dp->adapter);
if (edid) {
intel_dp->has_audio = drm_detect_monitor_audio(edid);
connector->display_info.raw_edid = NULL;
@@ -1736,28 +1994,36 @@ static int intel_dp_get_modes(struct drm_connector *connector)
/* We should parse the EDID data and find out if it has an audio sink
*/
- ret = intel_ddc_get_modes(connector, &intel_dp->adapter);
+ ret = intel_dp_get_edid_modes(connector, &intel_dp->adapter);
if (ret) {
- if (is_edp(intel_dp) && !dev_priv->panel_fixed_mode) {
+ if (is_edp(intel_dp) && !intel_dp->panel_fixed_mode) {
struct drm_display_mode *newmode;
list_for_each_entry(newmode, &connector->probed_modes,
head) {
- if (newmode->type & DRM_MODE_TYPE_PREFERRED) {
- dev_priv->panel_fixed_mode =
+ if ((newmode->type & DRM_MODE_TYPE_PREFERRED)) {
+ intel_dp->panel_fixed_mode =
drm_mode_duplicate(dev, newmode);
break;
}
}
}
-
return ret;
}
/* if eDP has no EDID, try to use fixed panel mode from VBT */
if (is_edp(intel_dp)) {
- if (dev_priv->panel_fixed_mode != NULL) {
+ /* initialize panel mode from VBT if available for eDP */
+ if (intel_dp->panel_fixed_mode == NULL && dev_priv->lfp_lvds_vbt_mode != NULL) {
+ intel_dp->panel_fixed_mode =
+ drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
+ if (intel_dp->panel_fixed_mode) {
+ intel_dp->panel_fixed_mode->type |=
+ DRM_MODE_TYPE_PREFERRED;
+ }
+ }
+ if (intel_dp->panel_fixed_mode) {
struct drm_display_mode *mode;
- mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode);
+ mode = drm_mode_duplicate(dev, intel_dp->panel_fixed_mode);
drm_mode_probed_add(connector, mode);
return 1;
}
@@ -1772,7 +2038,7 @@ intel_dp_detect_audio(struct drm_connector *connector)
struct edid *edid;
bool has_audio = false;
- edid = drm_get_edid(connector, &intel_dp->adapter);
+ edid = intel_dp_get_edid(connector, &intel_dp->adapter);
if (edid) {
has_audio = drm_detect_monitor_audio(edid);
@@ -1839,7 +2105,7 @@ done:
}
static void
-intel_dp_destroy (struct drm_connector *connector)
+intel_dp_destroy(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
@@ -1857,6 +2123,10 @@ static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
i2c_del_adapter(&intel_dp->adapter);
drm_encoder_cleanup(encoder);
+ if (is_edp(intel_dp)) {
+ cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
+ ironlake_panel_vdd_off_sync(intel_dp);
+ }
kfree(intel_dp);
}
@@ -1896,7 +2166,7 @@ intel_dp_hot_plug(struct intel_encoder *intel_encoder)
/* Return which DP Port should be selected for Transcoder DP control */
int
-intel_trans_dp_port_sel (struct drm_crtc *crtc)
+intel_trans_dp_port_sel(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_mode_config *mode_config = &dev->mode_config;
@@ -1993,10 +2263,13 @@ intel_dp_init(struct drm_device *dev, int output_reg)
else if (output_reg == DP_D || output_reg == PCH_DP_D)
intel_encoder->clone_mask = (1 << INTEL_DP_D_CLONE_BIT);
- if (is_edp(intel_dp))
+ if (is_edp(intel_dp)) {
intel_encoder->clone_mask = (1 << INTEL_EDP_CLONE_BIT);
+ INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
+ ironlake_panel_vdd_work);
+ }
- intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
+ intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
connector->interlace_allowed = true;
connector->doublescan_allowed = 0;
@@ -2032,25 +2305,60 @@ intel_dp_init(struct drm_device *dev, int output_reg)
break;
}
- intel_dp_i2c_init(intel_dp, intel_connector, name);
-
/* Cache some DPCD data in the eDP case */
if (is_edp(intel_dp)) {
bool ret;
- u32 pp_on, pp_div;
+ struct edp_power_seq cur, vbt;
+ u32 pp_on, pp_off, pp_div;
pp_on = I915_READ(PCH_PP_ON_DELAYS);
+ pp_off = I915_READ(PCH_PP_OFF_DELAYS);
pp_div = I915_READ(PCH_PP_DIVISOR);
- /* Get T3 & T12 values (note: VESA not bspec terminology) */
- dev_priv->panel_t3 = (pp_on & 0x1fff0000) >> 16;
- dev_priv->panel_t3 /= 10; /* t3 in 100us units */
- dev_priv->panel_t12 = pp_div & 0xf;
- dev_priv->panel_t12 *= 100; /* t12 in 100ms units */
+ /* Pull timing values out of registers */
+ cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
+ PANEL_POWER_UP_DELAY_SHIFT;
+
+ cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
+ PANEL_LIGHT_ON_DELAY_SHIFT;
+
+ cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
+ PANEL_LIGHT_OFF_DELAY_SHIFT;
+
+ cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
+ PANEL_POWER_DOWN_DELAY_SHIFT;
+
+ cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
+ PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
+
+ DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
+ cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
+
+ vbt = dev_priv->edp.pps;
+
+ DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
+ vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
+
+#define get_delay(field) ((max(cur.field, vbt.field) + 9) / 10)
+
+ intel_dp->panel_power_up_delay = get_delay(t1_t3);
+ intel_dp->backlight_on_delay = get_delay(t8);
+ intel_dp->backlight_off_delay = get_delay(t9);
+ intel_dp->panel_power_down_delay = get_delay(t10);
+ intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
+
+ DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
+ intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
+ intel_dp->panel_power_cycle_delay);
+
+ DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
+ intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
+
+ intel_dp->panel_off_jiffies = jiffies - intel_dp->panel_power_down_delay;
ironlake_edp_panel_vdd_on(intel_dp);
ret = intel_dp_get_dpcd(intel_dp);
- ironlake_edp_panel_vdd_off(intel_dp);
+ ironlake_edp_panel_vdd_off(intel_dp, false);
if (ret) {
if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
dev_priv->no_aux_handshake =
@@ -2065,18 +2373,11 @@ intel_dp_init(struct drm_device *dev, int output_reg)
}
}
+ intel_dp_i2c_init(intel_dp, intel_connector, name);
+
intel_encoder->hot_plug = intel_dp_hot_plug;
if (is_edp(intel_dp)) {
- /* initialize panel mode from VBT if available for eDP */
- if (dev_priv->lfp_lvds_vbt_mode) {
- dev_priv->panel_fixed_mode =
- drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
- if (dev_priv->panel_fixed_mode) {
- dev_priv->panel_fixed_mode->type |=
- DRM_MODE_TYPE_PREFERRED;
- }
- }
dev_priv->int_edp_connector = connector;
intel_panel_setup_backlight(dev);
}
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index fe1099d8817..bd9a604b73d 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -34,7 +34,7 @@
#define _wait_for(COND, MS, W) ({ \
unsigned long timeout__ = jiffies + msecs_to_jiffies(MS); \
int ret__ = 0; \
- while (! (COND)) { \
+ while (!(COND)) { \
if (time_after(jiffies, timeout__)) { \
ret__ = -ETIMEDOUT; \
break; \
@@ -49,10 +49,10 @@
#define MSLEEP(x) do { \
if (in_dbg_master()) \
- mdelay(x); \
+ mdelay(x); \
else \
msleep(x); \
-} while(0)
+} while (0)
#define KHz(x) (1000*x)
#define MHz(x) KHz(1000*x)
@@ -171,6 +171,9 @@ struct intel_crtc {
int16_t cursor_width, cursor_height;
bool cursor_visible;
unsigned int bpp;
+
+ bool no_pll; /* tertiary pipe for IVB */
+ bool use_pll_a;
};
#define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
@@ -184,7 +187,7 @@ struct intel_crtc {
#define DIP_VERSION_AVI 0x2
#define DIP_LEN_AVI 13
-#define DIP_TYPE_SPD 0x3
+#define DIP_TYPE_SPD 0x83
#define DIP_VERSION_SPD 0x1
#define DIP_LEN_SPD 25
#define DIP_SPD_UNKNOWN 0
@@ -284,7 +287,7 @@ void
intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
extern bool intel_dpd_is_edp(struct drm_device *dev);
-extern void intel_edp_link_config (struct intel_encoder *, int *, int *);
+extern void intel_edp_link_config(struct intel_encoder *, int *, int *);
extern bool intel_encoder_is_pch_edp(struct drm_encoder *encoder);
/* intel_panel.c */
@@ -304,8 +307,8 @@ extern void intel_panel_destroy_backlight(struct drm_device *dev);
extern enum drm_connector_status intel_panel_detect(struct drm_device *dev);
extern void intel_crtc_load_lut(struct drm_crtc *crtc);
-extern void intel_encoder_prepare (struct drm_encoder *encoder);
-extern void intel_encoder_commit (struct drm_encoder *encoder);
+extern void intel_encoder_prepare(struct drm_encoder *encoder);
+extern void intel_encoder_commit(struct drm_encoder *encoder);
extern void intel_encoder_destroy(struct drm_encoder *encoder);
static inline struct intel_encoder *intel_attached_encoder(struct drm_connector *connector)
@@ -377,4 +380,8 @@ extern void intel_fb_output_poll_changed(struct drm_device *dev);
extern void intel_fb_restore_mode(struct drm_device *dev);
extern void intel_init_clock_gating(struct drm_device *dev);
+extern void intel_write_eld(struct drm_encoder *encoder,
+ struct drm_display_mode *mode);
+extern void intel_cpt_verify_modeset(struct drm_device *dev, int pipe);
+
#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 226ba830f38..d4f5a0b2120 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -69,8 +69,7 @@ void intel_dip_infoframe_csum(struct dip_infoframe *frame)
frame->checksum = 0;
frame->ecc = 0;
- /* Header isn't part of the checksum */
- for (i = 5; i < frame->len; i++)
+ for (i = 0; i < frame->len + DIP_HEADER_SIZE; i++)
sum += data[i];
frame->checksum = 0x100 - sum;
@@ -104,7 +103,7 @@ static u32 intel_infoframe_flags(struct dip_infoframe *frame)
flags |= VIDEO_DIP_ENABLE_AVI | VIDEO_DIP_FREQ_VSYNC;
break;
case DIP_TYPE_SPD:
- flags |= VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_FREQ_2VSYNC;
+ flags |= VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_FREQ_VSYNC;
break;
default:
DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
@@ -165,9 +164,9 @@ static void ironlake_write_infoframe(struct drm_encoder *encoder,
flags = intel_infoframe_index(frame);
- val &= ~VIDEO_DIP_SELECT_MASK;
+ val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
- I915_WRITE(reg, val | flags);
+ I915_WRITE(reg, VIDEO_DIP_ENABLE | val | flags);
for (i = 0; i < len; i += 4) {
I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
@@ -245,16 +244,17 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
sdvox |= HDMI_MODE_SELECT;
if (intel_hdmi->has_audio) {
+ DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n",
+ pipe_name(intel_crtc->pipe));
sdvox |= SDVO_AUDIO_ENABLE;
sdvox |= SDVO_NULL_PACKETS_DURING_VSYNC;
+ intel_write_eld(encoder, adjusted_mode);
}
- if (intel_crtc->pipe == 1) {
- if (HAS_PCH_CPT(dev))
- sdvox |= PORT_TRANS_B_SEL_CPT;
- else
- sdvox |= SDVO_PIPE_B_SELECT;
- }
+ if (HAS_PCH_CPT(dev))
+ sdvox |= PORT_TRANS_SEL_CPT(intel_crtc->pipe);
+ else if (intel_crtc->pipe == 1)
+ sdvox |= SDVO_PIPE_B_SELECT;
I915_WRITE(intel_hdmi->sdvox_reg, sdvox);
POSTING_READ(intel_hdmi->sdvox_reg);
@@ -486,6 +486,7 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
struct intel_encoder *intel_encoder;
struct intel_connector *intel_connector;
struct intel_hdmi *intel_hdmi;
+ int i;
intel_hdmi = kzalloc(sizeof(struct intel_hdmi), GFP_KERNEL);
if (!intel_hdmi)
@@ -511,7 +512,7 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
connector->polled = DRM_CONNECTOR_POLL_HPD;
connector->interlace_allowed = 0;
connector->doublescan_allowed = 0;
- intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
+ intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
/* Set up the DDC bus. */
if (sdvox_reg == SDVOB) {
@@ -538,10 +539,14 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
intel_hdmi->sdvox_reg = sdvox_reg;
- if (!HAS_PCH_SPLIT(dev))
+ if (!HAS_PCH_SPLIT(dev)) {
intel_hdmi->write_infoframe = i9xx_write_infoframe;
- else
+ I915_WRITE(VIDEO_DIP_CTL, 0);
+ } else {
intel_hdmi->write_infoframe = ironlake_write_infoframe;
+ for_each_pipe(i)
+ I915_WRITE(TVIDEO_DIP_CTL(i), 0);
+ }
drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs);
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index d98cee60b60..d30ccccb9d7 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -28,6 +28,7 @@
*/
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
+#include <linux/export.h>
#include "drmP.h"
#include "drm.h"
#include "intel_drv.h"
@@ -422,13 +423,7 @@ void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed)
{
struct intel_gmbus *bus = to_intel_gmbus(adapter);
- /* speed:
- * 0x0 = 100 KHz
- * 0x1 = 50 KHz
- * 0x2 = 400 KHz
- * 0x3 = 1000 Khz
- */
- bus->reg0 = (bus->reg0 & ~(0x3 << 8)) | (speed << 8);
+ bus->reg0 = (bus->reg0 & ~(0x3 << 8)) | speed;
}
void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit)
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 31da77f5c05..42f165a520d 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -888,9 +888,11 @@ bool intel_lvds_init(struct drm_device *dev)
intel_encoder->type = INTEL_OUTPUT_LVDS;
intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT);
- intel_encoder->crtc_mask = (1 << 1);
- if (INTEL_INFO(dev)->gen >= 5)
- intel_encoder->crtc_mask |= (1 << 0);
+ if (HAS_PCH_SPLIT(dev))
+ intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+ else
+ intel_encoder->crtc_mask = (1 << 1);
+
drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs);
drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs);
connector->display_info.subpixel_order = SubPixelHorizontalRGB;
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
index 3b26a3ba02d..be2c6fe07d1 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_modes.c
@@ -26,6 +26,7 @@
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/fb.h>
+#include <drm/drm_edid.h>
#include "drmP.h"
#include "intel_drv.h"
#include "i915_drv.h"
@@ -74,6 +75,7 @@ int intel_ddc_get_modes(struct drm_connector *connector,
if (edid) {
drm_mode_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
+ drm_edid_to_eld(connector, edid);
connector->display_info.raw_edid = NULL;
kfree(edid);
}
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index b8e8158bb16..289140bc83c 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -51,61 +51,61 @@
#define MBOX_ASLE (1<<2)
struct opregion_header {
- u8 signature[16];
- u32 size;
- u32 opregion_ver;
- u8 bios_ver[32];
- u8 vbios_ver[16];
- u8 driver_ver[16];
- u32 mboxes;
- u8 reserved[164];
+ u8 signature[16];
+ u32 size;
+ u32 opregion_ver;
+ u8 bios_ver[32];
+ u8 vbios_ver[16];
+ u8 driver_ver[16];
+ u32 mboxes;
+ u8 reserved[164];
} __attribute__((packed));
/* OpRegion mailbox #1: public ACPI methods */
struct opregion_acpi {
- u32 drdy; /* driver readiness */
- u32 csts; /* notification status */
- u32 cevt; /* current event */
- u8 rsvd1[20];
- u32 didl[8]; /* supported display devices ID list */
- u32 cpdl[8]; /* currently presented display list */
- u32 cadl[8]; /* currently active display list */
- u32 nadl[8]; /* next active devices list */
- u32 aslp; /* ASL sleep time-out */
- u32 tidx; /* toggle table index */
- u32 chpd; /* current hotplug enable indicator */
- u32 clid; /* current lid state*/
- u32 cdck; /* current docking state */
- u32 sxsw; /* Sx state resume */
- u32 evts; /* ASL supported events */
- u32 cnot; /* current OS notification */
- u32 nrdy; /* driver status */
- u8 rsvd2[60];
+ u32 drdy; /* driver readiness */
+ u32 csts; /* notification status */
+ u32 cevt; /* current event */
+ u8 rsvd1[20];
+ u32 didl[8]; /* supported display devices ID list */
+ u32 cpdl[8]; /* currently presented display list */
+ u32 cadl[8]; /* currently active display list */
+ u32 nadl[8]; /* next active devices list */
+ u32 aslp; /* ASL sleep time-out */
+ u32 tidx; /* toggle table index */
+ u32 chpd; /* current hotplug enable indicator */
+ u32 clid; /* current lid state*/
+ u32 cdck; /* current docking state */
+ u32 sxsw; /* Sx state resume */
+ u32 evts; /* ASL supported events */
+ u32 cnot; /* current OS notification */
+ u32 nrdy; /* driver status */
+ u8 rsvd2[60];
} __attribute__((packed));
/* OpRegion mailbox #2: SWSCI */
struct opregion_swsci {
- u32 scic; /* SWSCI command|status|data */
- u32 parm; /* command parameters */
- u32 dslp; /* driver sleep time-out */
- u8 rsvd[244];
+ u32 scic; /* SWSCI command|status|data */
+ u32 parm; /* command parameters */
+ u32 dslp; /* driver sleep time-out */
+ u8 rsvd[244];
} __attribute__((packed));
/* OpRegion mailbox #3: ASLE */
struct opregion_asle {
- u32 ardy; /* driver readiness */
- u32 aslc; /* ASLE interrupt command */
- u32 tche; /* technology enabled indicator */
- u32 alsi; /* current ALS illuminance reading */
- u32 bclp; /* backlight brightness to set */
- u32 pfit; /* panel fitting state */
- u32 cblv; /* current brightness level */
- u16 bclm[20]; /* backlight level duty cycle mapping table */
- u32 cpfm; /* current panel fitting mode */
- u32 epfm; /* enabled panel fitting modes */
- u8 plut[74]; /* panel LUT and identifier */
- u32 pfmb; /* PWM freq and min brightness */
- u8 rsvd[102];
+ u32 ardy; /* driver readiness */
+ u32 aslc; /* ASLE interrupt command */
+ u32 tche; /* technology enabled indicator */
+ u32 alsi; /* current ALS illuminance reading */
+ u32 bclp; /* backlight brightness to set */
+ u32 pfit; /* panel fitting state */
+ u32 cblv; /* current brightness level */
+ u16 bclm[20]; /* backlight level duty cycle mapping table */
+ u32 cpfm; /* current panel fitting mode */
+ u32 epfm; /* enabled panel fitting modes */
+ u8 plut[74]; /* panel LUT and identifier */
+ u32 pfmb; /* PWM freq and min brightness */
+ u8 rsvd[102];
} __attribute__((packed));
/* ASLE irq request bits */
@@ -361,7 +361,7 @@ static void intel_didl_outputs(struct drm_device *dev)
list_for_each_entry(acpi_cdev, &acpi_video_bus->children, node) {
if (i >= 8) {
- dev_printk (KERN_ERR, &dev->pdev->dev,
+ dev_printk(KERN_ERR, &dev->pdev->dev,
"More than 8 outputs detected\n");
return;
}
@@ -387,7 +387,7 @@ blind_set:
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
int output_type = ACPI_OTHER_OUTPUT;
if (i >= 8) {
- dev_printk (KERN_ERR, &dev->pdev->dev,
+ dev_printk(KERN_ERR, &dev->pdev->dev,
"More than 8 outputs detected\n");
return;
}
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index d3603808682..cdf17d4cc1f 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -117,57 +117,57 @@
/* memory bufferd overlay registers */
struct overlay_registers {
- u32 OBUF_0Y;
- u32 OBUF_1Y;
- u32 OBUF_0U;
- u32 OBUF_0V;
- u32 OBUF_1U;
- u32 OBUF_1V;
- u32 OSTRIDE;
- u32 YRGB_VPH;
- u32 UV_VPH;
- u32 HORZ_PH;
- u32 INIT_PHS;
- u32 DWINPOS;
- u32 DWINSZ;
- u32 SWIDTH;
- u32 SWIDTHSW;
- u32 SHEIGHT;
- u32 YRGBSCALE;
- u32 UVSCALE;
- u32 OCLRC0;
- u32 OCLRC1;
- u32 DCLRKV;
- u32 DCLRKM;
- u32 SCLRKVH;
- u32 SCLRKVL;
- u32 SCLRKEN;
- u32 OCONFIG;
- u32 OCMD;
- u32 RESERVED1; /* 0x6C */
- u32 OSTART_0Y;
- u32 OSTART_1Y;
- u32 OSTART_0U;
- u32 OSTART_0V;
- u32 OSTART_1U;
- u32 OSTART_1V;
- u32 OTILEOFF_0Y;
- u32 OTILEOFF_1Y;
- u32 OTILEOFF_0U;
- u32 OTILEOFF_0V;
- u32 OTILEOFF_1U;
- u32 OTILEOFF_1V;
- u32 FASTHSCALE; /* 0xA0 */
- u32 UVSCALEV; /* 0xA4 */
- u32 RESERVEDC[(0x200 - 0xA8) / 4]; /* 0xA8 - 0x1FC */
- u16 Y_VCOEFS[N_VERT_Y_TAPS * N_PHASES]; /* 0x200 */
- u16 RESERVEDD[0x100 / 2 - N_VERT_Y_TAPS * N_PHASES];
- u16 Y_HCOEFS[N_HORIZ_Y_TAPS * N_PHASES]; /* 0x300 */
- u16 RESERVEDE[0x200 / 2 - N_HORIZ_Y_TAPS * N_PHASES];
- u16 UV_VCOEFS[N_VERT_UV_TAPS * N_PHASES]; /* 0x500 */
- u16 RESERVEDF[0x100 / 2 - N_VERT_UV_TAPS * N_PHASES];
- u16 UV_HCOEFS[N_HORIZ_UV_TAPS * N_PHASES]; /* 0x600 */
- u16 RESERVEDG[0x100 / 2 - N_HORIZ_UV_TAPS * N_PHASES];
+ u32 OBUF_0Y;
+ u32 OBUF_1Y;
+ u32 OBUF_0U;
+ u32 OBUF_0V;
+ u32 OBUF_1U;
+ u32 OBUF_1V;
+ u32 OSTRIDE;
+ u32 YRGB_VPH;
+ u32 UV_VPH;
+ u32 HORZ_PH;
+ u32 INIT_PHS;
+ u32 DWINPOS;
+ u32 DWINSZ;
+ u32 SWIDTH;
+ u32 SWIDTHSW;
+ u32 SHEIGHT;
+ u32 YRGBSCALE;
+ u32 UVSCALE;
+ u32 OCLRC0;
+ u32 OCLRC1;
+ u32 DCLRKV;
+ u32 DCLRKM;
+ u32 SCLRKVH;
+ u32 SCLRKVL;
+ u32 SCLRKEN;
+ u32 OCONFIG;
+ u32 OCMD;
+ u32 RESERVED1; /* 0x6C */
+ u32 OSTART_0Y;
+ u32 OSTART_1Y;
+ u32 OSTART_0U;
+ u32 OSTART_0V;
+ u32 OSTART_1U;
+ u32 OSTART_1V;
+ u32 OTILEOFF_0Y;
+ u32 OTILEOFF_1Y;
+ u32 OTILEOFF_0U;
+ u32 OTILEOFF_0V;
+ u32 OTILEOFF_1U;
+ u32 OTILEOFF_1V;
+ u32 FASTHSCALE; /* 0xA0 */
+ u32 UVSCALEV; /* 0xA4 */
+ u32 RESERVEDC[(0x200 - 0xA8) / 4]; /* 0xA8 - 0x1FC */
+ u16 Y_VCOEFS[N_VERT_Y_TAPS * N_PHASES]; /* 0x200 */
+ u16 RESERVEDD[0x100 / 2 - N_VERT_Y_TAPS * N_PHASES];
+ u16 Y_HCOEFS[N_HORIZ_Y_TAPS * N_PHASES]; /* 0x300 */
+ u16 RESERVEDE[0x200 / 2 - N_HORIZ_Y_TAPS * N_PHASES];
+ u16 UV_VCOEFS[N_VERT_UV_TAPS * N_PHASES]; /* 0x500 */
+ u16 RESERVEDF[0x100 / 2 - N_VERT_UV_TAPS * N_PHASES];
+ u16 UV_HCOEFS[N_HORIZ_UV_TAPS * N_PHASES]; /* 0x600 */
+ u16 RESERVEDG[0x100 / 2 - N_HORIZ_UV_TAPS * N_PHASES];
};
struct intel_overlay {
@@ -192,7 +192,7 @@ struct intel_overlay {
static struct overlay_registers *
intel_overlay_map_regs(struct intel_overlay *overlay)
{
- drm_i915_private_t *dev_priv = overlay->dev->dev_private;
+ drm_i915_private_t *dev_priv = overlay->dev->dev_private;
struct overlay_registers *regs;
if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
@@ -264,7 +264,7 @@ i830_activate_pipe_a(struct drm_device *dev)
mode = drm_mode_duplicate(dev, &vesa_640x480);
drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
- if(!drm_crtc_helper_set_mode(&crtc->base, mode,
+ if (!drm_crtc_helper_set_mode(&crtc->base, mode,
crtc->base.x, crtc->base.y,
crtc->base.fb))
return 0;
@@ -332,7 +332,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
bool load_polyphase_filter)
{
struct drm_device *dev = overlay->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_request *request;
u32 flip_addr = overlay->flip_addr;
u32 tmp;
@@ -359,7 +359,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
}
OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
OUT_RING(flip_addr);
- ADVANCE_LP_RING();
+ ADVANCE_LP_RING();
ret = i915_add_request(LP_RING(dev_priv), NULL, request);
if (ret) {
@@ -583,7 +583,7 @@ static u32 calc_swidthsw(struct drm_device *dev, u32 offset, u32 width)
ret = ((offset + width + mask) >> shift) - (offset >> shift);
if (!IS_GEN2(dev))
ret <<= 1;
- ret -=1;
+ ret -= 1;
return ret << 2;
}
@@ -817,7 +817,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
regs->SWIDTHSW = calc_swidthsw(overlay->dev,
params->offset_Y, tmp_width);
regs->SHEIGHT = params->src_h;
- regs->OBUF_0Y = new_bo->gtt_offset + params-> offset_Y;
+ regs->OBUF_0Y = new_bo->gtt_offset + params->offset_Y;
regs->OSTRIDE = params->stride_Y;
if (params->format & I915_OVERLAY_YUV_PLANAR) {
@@ -917,7 +917,7 @@ static void update_pfit_vscale_ratio(struct intel_overlay *overlay)
* line with the intel documentation for the i965
*/
if (INTEL_INFO(dev)->gen >= 4) {
- /* on i965 use the PGM reg to read out the autoscaler values */
+ /* on i965 use the PGM reg to read out the autoscaler values */
ratio = I915_READ(PFIT_PGM_RATIOS) >> PFIT_VERT_SCALE_SHIFT_965;
} else {
if (pfit_control & VERT_AUTO_SCALE)
@@ -1098,7 +1098,7 @@ static int intel_panel_fitter_pipe(struct drm_device *dev)
}
int intel_overlay_put_image(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file_priv)
{
struct drm_intel_overlay_put_image *put_image_rec = data;
drm_i915_private_t *dev_priv = dev->dev_private;
@@ -1301,10 +1301,10 @@ static int check_gamma(struct drm_intel_overlay_attrs *attrs)
}
int intel_overlay_attrs(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file_priv)
{
struct drm_intel_overlay_attrs *attrs = data;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_overlay *overlay;
struct overlay_registers *regs;
int ret;
@@ -1393,7 +1393,7 @@ out_unlock:
void intel_setup_overlay(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_overlay *overlay;
struct drm_i915_gem_object *reg_bo;
struct overlay_registers *regs;
@@ -1421,24 +1421,24 @@ void intel_setup_overlay(struct drm_device *dev)
ret = i915_gem_attach_phys_object(dev, reg_bo,
I915_GEM_PHYS_OVERLAY_REGS,
PAGE_SIZE);
- if (ret) {
- DRM_ERROR("failed to attach phys overlay regs\n");
- goto out_free_bo;
- }
+ if (ret) {
+ DRM_ERROR("failed to attach phys overlay regs\n");
+ goto out_free_bo;
+ }
overlay->flip_addr = reg_bo->phys_obj->handle->busaddr;
} else {
ret = i915_gem_object_pin(reg_bo, PAGE_SIZE, true);
if (ret) {
- DRM_ERROR("failed to pin overlay register bo\n");
- goto out_free_bo;
- }
+ DRM_ERROR("failed to pin overlay register bo\n");
+ goto out_free_bo;
+ }
overlay->flip_addr = reg_bo->gtt_offset;
ret = i915_gem_object_set_to_gtt_domain(reg_bo, true);
if (ret) {
- DRM_ERROR("failed to move overlay register bo into the GTT\n");
- goto out_unpin_bo;
- }
+ DRM_ERROR("failed to move overlay register bo into the GTT\n");
+ goto out_unpin_bo;
+ }
}
/* init all values */
@@ -1525,7 +1525,7 @@ static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay,
struct intel_overlay_error_state *
intel_overlay_capture_error_state(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_overlay *overlay = dev_priv->overlay;
struct intel_overlay_error_state *error;
struct overlay_registers __iomem *regs;
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index a9e0c7bcd31..499d4c0dbee 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -84,7 +84,7 @@ intel_pch_panel_fitting(struct drm_device *dev,
if (scaled_width > scaled_height) { /* pillar */
width = scaled_height / mode->vdisplay;
if (width & 1)
- width++;
+ width++;
x = (adjusted_mode->hdisplay - width + 1) / 2;
y = 0;
height = adjusted_mode->vdisplay;
@@ -206,7 +206,7 @@ u32 intel_panel_get_backlight(struct drm_device *dev)
if (IS_PINEVIEW(dev))
val >>= 1;
- if (is_backlight_combination_mode(dev)){
+ if (is_backlight_combination_mode(dev)) {
u8 lbpc;
val &= ~1;
@@ -226,7 +226,7 @@ static void intel_pch_panel_set_backlight(struct drm_device *dev, u32 level)
I915_WRITE(BLC_PWM_CPU_CTL, val | level);
}
-void intel_panel_set_backlight(struct drm_device *dev, u32 level)
+static void intel_panel_actually_set_backlight(struct drm_device *dev, u32 level)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 tmp;
@@ -236,7 +236,7 @@ void intel_panel_set_backlight(struct drm_device *dev, u32 level)
if (HAS_PCH_SPLIT(dev))
return intel_pch_panel_set_backlight(dev, level);
- if (is_backlight_combination_mode(dev)){
+ if (is_backlight_combination_mode(dev)) {
u32 max = intel_panel_get_max_backlight(dev);
u8 lbpc;
@@ -254,16 +254,21 @@ void intel_panel_set_backlight(struct drm_device *dev, u32 level)
I915_WRITE(BLC_PWM_CTL, tmp | level);
}
-void intel_panel_disable_backlight(struct drm_device *dev)
+void intel_panel_set_backlight(struct drm_device *dev, u32 level)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (dev_priv->backlight_enabled) {
- dev_priv->backlight_level = intel_panel_get_backlight(dev);
- dev_priv->backlight_enabled = false;
- }
+ dev_priv->backlight_level = level;
+ if (dev_priv->backlight_enabled)
+ intel_panel_actually_set_backlight(dev, level);
+}
+
+void intel_panel_disable_backlight(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
- intel_panel_set_backlight(dev, 0);
+ dev_priv->backlight_enabled = false;
+ intel_panel_actually_set_backlight(dev, 0);
}
void intel_panel_enable_backlight(struct drm_device *dev)
@@ -273,8 +278,8 @@ void intel_panel_enable_backlight(struct drm_device *dev)
if (dev_priv->backlight_level == 0)
dev_priv->backlight_level = intel_panel_get_max_backlight(dev);
- intel_panel_set_backlight(dev, dev_priv->backlight_level);
dev_priv->backlight_enabled = true;
+ intel_panel_actually_set_backlight(dev, dev_priv->backlight_level);
}
static void intel_panel_init_backlight(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index c30626ea9f9..ca70e2f1044 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -34,6 +34,16 @@
#include "i915_trace.h"
#include "intel_drv.h"
+/*
+ * 965+ support PIPE_CONTROL commands, which provide finer grained control
+ * over cache flushing.
+ */
+struct pipe_control {
+ struct drm_i915_gem_object *obj;
+ volatile u32 *cpu_page;
+ u32 gtt_offset;
+};
+
static inline int ring_space(struct intel_ring_buffer *ring)
{
int space = (ring->head & HEAD_ADDR) - (ring->tail + 8);
@@ -123,6 +133,118 @@ render_ring_flush(struct intel_ring_buffer *ring,
return 0;
}
+/**
+ * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
+ * implementing two workarounds on gen6. From section 1.4.7.1
+ * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
+ *
+ * [DevSNB-C+{W/A}] Before any depth stall flush (including those
+ * produced by non-pipelined state commands), software needs to first
+ * send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
+ * 0.
+ *
+ * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
+ * =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
+ *
+ * And the workaround for these two requires this workaround first:
+ *
+ * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
+ * BEFORE the pipe-control with a post-sync op and no write-cache
+ * flushes.
+ *
+ * And this last workaround is tricky because of the requirements on
+ * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
+ * volume 2 part 1:
+ *
+ * "1 of the following must also be set:
+ * - Render Target Cache Flush Enable ([12] of DW1)
+ * - Depth Cache Flush Enable ([0] of DW1)
+ * - Stall at Pixel Scoreboard ([1] of DW1)
+ * - Depth Stall ([13] of DW1)
+ * - Post-Sync Operation ([13] of DW1)
+ * - Notify Enable ([8] of DW1)"
+ *
+ * The cache flushes require the workaround flush that triggered this
+ * one, so we can't use it. Depth stall would trigger the same.
+ * Post-sync nonzero is what triggered this second workaround, so we
+ * can't use that one either. Notify enable is IRQs, which aren't
+ * really our business. That leaves only stall at scoreboard.
+ */
+static int
+intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring)
+{
+ struct pipe_control *pc = ring->private;
+ u32 scratch_addr = pc->gtt_offset + 128;
+ int ret;
+
+
+ ret = intel_ring_begin(ring, 6);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
+ intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_STALL_AT_SCOREBOARD);
+ intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
+ intel_ring_emit(ring, 0); /* low dword */
+ intel_ring_emit(ring, 0); /* high dword */
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+
+ ret = intel_ring_begin(ring, 6);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
+ intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE);
+ intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+static int
+gen6_render_ring_flush(struct intel_ring_buffer *ring,
+ u32 invalidate_domains, u32 flush_domains)
+{
+ u32 flags = 0;
+ struct pipe_control *pc = ring->private;
+ u32 scratch_addr = pc->gtt_offset + 128;
+ int ret;
+
+ /* Force SNB workarounds for PIPE_CONTROL flushes */
+ intel_emit_post_sync_nonzero_flush(ring);
+
+ /* Just flush everything. Experiments have shown that reducing the
+ * number of bits based on the write domains has little performance
+ * impact.
+ */
+ flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
+
+ ret = intel_ring_begin(ring, 6);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
+ intel_ring_emit(ring, flags);
+ intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
+ intel_ring_emit(ring, 0); /* lower dword */
+ intel_ring_emit(ring, 0); /* uppwer dword */
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
static void ring_write_tail(struct intel_ring_buffer *ring,
u32 value)
{
@@ -206,16 +328,6 @@ static int init_ring_common(struct intel_ring_buffer *ring)
return 0;
}
-/*
- * 965+ support PIPE_CONTROL commands, which provide finer grained control
- * over cache flushing.
- */
-struct pipe_control {
- struct drm_i915_gem_object *obj;
- volatile u32 *cpu_page;
- u32 gtt_offset;
-};
-
static int
init_pipe_control(struct intel_ring_buffer *ring)
{
@@ -296,8 +408,7 @@ static int init_render_ring(struct intel_ring_buffer *ring)
GFX_MODE_ENABLE(GFX_REPLAY_MODE));
}
- if (INTEL_INFO(dev)->gen >= 6) {
- } else if (IS_GEN5(dev)) {
+ if (INTEL_INFO(dev)->gen >= 5) {
ret = init_pipe_control(ring);
if (ret)
return ret;
@@ -315,83 +426,131 @@ static void render_ring_cleanup(struct intel_ring_buffer *ring)
}
static void
-update_semaphore(struct intel_ring_buffer *ring, int i, u32 seqno)
+update_mboxes(struct intel_ring_buffer *ring,
+ u32 seqno,
+ u32 mmio_offset)
{
- struct drm_device *dev = ring->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- int id;
-
- /*
- * cs -> 1 = vcs, 0 = bcs
- * vcs -> 1 = bcs, 0 = cs,
- * bcs -> 1 = cs, 0 = vcs.
- */
- id = ring - dev_priv->ring;
- id += 2 - i;
- id %= 3;
-
- intel_ring_emit(ring,
- MI_SEMAPHORE_MBOX |
- MI_SEMAPHORE_REGISTER |
- MI_SEMAPHORE_UPDATE);
+ intel_ring_emit(ring, MI_SEMAPHORE_MBOX |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_REGISTER |
+ MI_SEMAPHORE_UPDATE);
intel_ring_emit(ring, seqno);
- intel_ring_emit(ring,
- RING_SYNC_0(dev_priv->ring[id].mmio_base) + 4*i);
+ intel_ring_emit(ring, mmio_offset);
}
+/**
+ * gen6_add_request - Update the semaphore mailbox registers
+ *
+ * @ring - ring that is adding a request
+ * @seqno - return seqno stuck into the ring
+ *
+ * Update the mailbox registers in the *other* rings with the current seqno.
+ * This acts like a signal in the canonical semaphore.
+ */
static int
gen6_add_request(struct intel_ring_buffer *ring,
- u32 *result)
+ u32 *seqno)
{
- u32 seqno;
+ u32 mbox1_reg;
+ u32 mbox2_reg;
int ret;
ret = intel_ring_begin(ring, 10);
if (ret)
return ret;
- seqno = i915_gem_get_seqno(ring->dev);
- update_semaphore(ring, 0, seqno);
- update_semaphore(ring, 1, seqno);
+ mbox1_reg = ring->signal_mbox[0];
+ mbox2_reg = ring->signal_mbox[1];
+ *seqno = i915_gem_get_seqno(ring->dev);
+
+ update_mboxes(ring, *seqno, mbox1_reg);
+ update_mboxes(ring, *seqno, mbox2_reg);
intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- intel_ring_emit(ring, seqno);
+ intel_ring_emit(ring, *seqno);
intel_ring_emit(ring, MI_USER_INTERRUPT);
intel_ring_advance(ring);
- *result = seqno;
return 0;
}
-int
-intel_ring_sync(struct intel_ring_buffer *ring,
- struct intel_ring_buffer *to,
+/**
+ * intel_ring_sync - sync the waiter to the signaller on seqno
+ *
+ * @waiter - ring that is waiting
+ * @signaller - ring which has, or will signal
+ * @seqno - seqno which the waiter will block on
+ */
+static int
+intel_ring_sync(struct intel_ring_buffer *waiter,
+ struct intel_ring_buffer *signaller,
+ int ring,
u32 seqno)
{
int ret;
+ u32 dw1 = MI_SEMAPHORE_MBOX |
+ MI_SEMAPHORE_COMPARE |
+ MI_SEMAPHORE_REGISTER;
- ret = intel_ring_begin(ring, 4);
+ ret = intel_ring_begin(waiter, 4);
if (ret)
return ret;
- intel_ring_emit(ring,
- MI_SEMAPHORE_MBOX |
- MI_SEMAPHORE_REGISTER |
- intel_ring_sync_index(ring, to) << 17 |
- MI_SEMAPHORE_COMPARE);
- intel_ring_emit(ring, seqno);
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
+ intel_ring_emit(waiter, dw1 | signaller->semaphore_register[ring]);
+ intel_ring_emit(waiter, seqno);
+ intel_ring_emit(waiter, 0);
+ intel_ring_emit(waiter, MI_NOOP);
+ intel_ring_advance(waiter);
return 0;
}
+/* VCS->RCS (RVSYNC) or BCS->RCS (RBSYNC) */
+int
+render_ring_sync_to(struct intel_ring_buffer *waiter,
+ struct intel_ring_buffer *signaller,
+ u32 seqno)
+{
+ WARN_ON(signaller->semaphore_register[RCS] == MI_SEMAPHORE_SYNC_INVALID);
+ return intel_ring_sync(waiter,
+ signaller,
+ RCS,
+ seqno);
+}
+
+/* RCS->VCS (VRSYNC) or BCS->VCS (VBSYNC) */
+int
+gen6_bsd_ring_sync_to(struct intel_ring_buffer *waiter,
+ struct intel_ring_buffer *signaller,
+ u32 seqno)
+{
+ WARN_ON(signaller->semaphore_register[VCS] == MI_SEMAPHORE_SYNC_INVALID);
+ return intel_ring_sync(waiter,
+ signaller,
+ VCS,
+ seqno);
+}
+
+/* RCS->BCS (BRSYNC) or VCS->BCS (BVSYNC) */
+int
+gen6_blt_ring_sync_to(struct intel_ring_buffer *waiter,
+ struct intel_ring_buffer *signaller,
+ u32 seqno)
+{
+ WARN_ON(signaller->semaphore_register[BCS] == MI_SEMAPHORE_SYNC_INVALID);
+ return intel_ring_sync(waiter,
+ signaller,
+ BCS,
+ seqno);
+}
+
+
+
#define PIPE_CONTROL_FLUSH(ring__, addr__) \
do { \
- intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \
- PIPE_CONTROL_DEPTH_STALL | 2); \
+ intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | \
+ PIPE_CONTROL_DEPTH_STALL); \
intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \
intel_ring_emit(ring__, 0); \
intel_ring_emit(ring__, 0); \
@@ -419,8 +578,9 @@ pc_render_add_request(struct intel_ring_buffer *ring,
if (ret)
return ret;
- intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
- PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
+ intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
+ PIPE_CONTROL_WRITE_FLUSH |
+ PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
intel_ring_emit(ring, seqno);
intel_ring_emit(ring, 0);
@@ -435,8 +595,9 @@ pc_render_add_request(struct intel_ring_buffer *ring,
PIPE_CONTROL_FLUSH(ring, scratch_addr);
scratch_addr += 128;
PIPE_CONTROL_FLUSH(ring, scratch_addr);
- intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
- PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
+ intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
+ PIPE_CONTROL_WRITE_FLUSH |
+ PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
PIPE_CONTROL_NOTIFY);
intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
intel_ring_emit(ring, seqno);
@@ -1026,7 +1187,12 @@ static const struct intel_ring_buffer render_ring = {
.irq_get = render_ring_get_irq,
.irq_put = render_ring_put_irq,
.dispatch_execbuffer = render_ring_dispatch_execbuffer,
- .cleanup = render_ring_cleanup,
+ .cleanup = render_ring_cleanup,
+ .sync_to = render_ring_sync_to,
+ .semaphore_register = {MI_SEMAPHORE_SYNC_INVALID,
+ MI_SEMAPHORE_SYNC_RV,
+ MI_SEMAPHORE_SYNC_RB},
+ .signal_mbox = {GEN6_VRSYNC, GEN6_BRSYNC},
};
/* ring buffer for bit-stream decoder */
@@ -1050,23 +1216,23 @@ static const struct intel_ring_buffer bsd_ring = {
static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
u32 value)
{
- drm_i915_private_t *dev_priv = ring->dev->dev_private;
+ drm_i915_private_t *dev_priv = ring->dev->dev_private;
/* Every tail move must follow the sequence below */
- I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
- GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
- GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE);
- I915_WRITE(GEN6_BSD_RNCID, 0x0);
-
- if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
- GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR) == 0,
- 50))
- DRM_ERROR("timed out waiting for IDLE Indicator\n");
-
- I915_WRITE_TAIL(ring, value);
- I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
- GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
- GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE);
+ I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
+ GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
+ GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE);
+ I915_WRITE(GEN6_BSD_RNCID, 0x0);
+
+ if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
+ GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR) == 0,
+ 50))
+ DRM_ERROR("timed out waiting for IDLE Indicator\n");
+
+ I915_WRITE_TAIL(ring, value);
+ I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
+ GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
+ GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE);
}
static int gen6_ring_flush(struct intel_ring_buffer *ring,
@@ -1094,18 +1260,18 @@ static int
gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
u32 offset, u32 len)
{
- int ret;
+ int ret;
- ret = intel_ring_begin(ring, 2);
- if (ret)
- return ret;
+ ret = intel_ring_begin(ring, 2);
+ if (ret)
+ return ret;
- intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
- /* bit0-7 is the length on GEN6+ */
- intel_ring_emit(ring, offset);
- intel_ring_advance(ring);
+ intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
+ /* bit0-7 is the length on GEN6+ */
+ intel_ring_emit(ring, offset);
+ intel_ring_advance(ring);
- return 0;
+ return 0;
}
static bool
@@ -1154,6 +1320,11 @@ static const struct intel_ring_buffer gen6_bsd_ring = {
.irq_get = gen6_bsd_ring_get_irq,
.irq_put = gen6_bsd_ring_put_irq,
.dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
+ .sync_to = gen6_bsd_ring_sync_to,
+ .semaphore_register = {MI_SEMAPHORE_SYNC_VR,
+ MI_SEMAPHORE_SYNC_INVALID,
+ MI_SEMAPHORE_SYNC_VB},
+ .signal_mbox = {GEN6_RVSYNC, GEN6_BVSYNC},
};
/* Blitter support (SandyBridge+) */
@@ -1272,19 +1443,24 @@ static void blt_ring_cleanup(struct intel_ring_buffer *ring)
}
static const struct intel_ring_buffer gen6_blt_ring = {
- .name = "blt ring",
- .id = RING_BLT,
- .mmio_base = BLT_RING_BASE,
- .size = 32 * PAGE_SIZE,
- .init = blt_ring_init,
- .write_tail = ring_write_tail,
- .flush = blt_ring_flush,
- .add_request = gen6_add_request,
- .get_seqno = ring_get_seqno,
- .irq_get = blt_ring_get_irq,
- .irq_put = blt_ring_put_irq,
- .dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
- .cleanup = blt_ring_cleanup,
+ .name = "blt ring",
+ .id = RING_BLT,
+ .mmio_base = BLT_RING_BASE,
+ .size = 32 * PAGE_SIZE,
+ .init = blt_ring_init,
+ .write_tail = ring_write_tail,
+ .flush = blt_ring_flush,
+ .add_request = gen6_add_request,
+ .get_seqno = ring_get_seqno,
+ .irq_get = blt_ring_get_irq,
+ .irq_put = blt_ring_put_irq,
+ .dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
+ .cleanup = blt_ring_cleanup,
+ .sync_to = gen6_blt_ring_sync_to,
+ .semaphore_register = {MI_SEMAPHORE_SYNC_BR,
+ MI_SEMAPHORE_SYNC_BV,
+ MI_SEMAPHORE_SYNC_INVALID},
+ .signal_mbox = {GEN6_RBSYNC, GEN6_VBSYNC},
};
int intel_init_render_ring_buffer(struct drm_device *dev)
@@ -1295,6 +1471,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
*ring = render_ring;
if (INTEL_INFO(dev)->gen >= 6) {
ring->add_request = gen6_add_request;
+ ring->flush = gen6_render_ring_flush;
ring->irq_get = gen6_render_ring_get_irq;
ring->irq_put = gen6_render_ring_put_irq;
} else if (IS_GEN5(dev)) {
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 39ac2b634ae..68281c96c55 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -2,10 +2,10 @@
#define _INTEL_RINGBUFFER_H_
enum {
- RCS = 0x0,
- VCS,
- BCS,
- I915_NUM_RINGS,
+ RCS = 0x0,
+ VCS,
+ BCS,
+ I915_NUM_RINGS,
};
struct intel_hw_status_page {
@@ -75,7 +75,12 @@ struct intel_ring_buffer {
int (*dispatch_execbuffer)(struct intel_ring_buffer *ring,
u32 offset, u32 length);
void (*cleanup)(struct intel_ring_buffer *ring);
+ int (*sync_to)(struct intel_ring_buffer *ring,
+ struct intel_ring_buffer *to,
+ u32 seqno);
+ u32 semaphore_register[3]; /*our mbox written by others */
+ u32 signal_mbox[2]; /* mboxes this ring signals to */
/**
* List of objects currently involved in rendering from the
* ringbuffer.
@@ -180,9 +185,6 @@ static inline void intel_ring_emit(struct intel_ring_buffer *ring,
void intel_ring_advance(struct intel_ring_buffer *ring);
u32 intel_ring_get_seqno(struct intel_ring_buffer *ring);
-int intel_ring_sync(struct intel_ring_buffer *ring,
- struct intel_ring_buffer *to,
- u32 seqno);
int intel_init_render_ring_buffer(struct drm_device *dev);
int intel_init_bsd_ring_buffer(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 6348c499616..3003fb25aef 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -28,6 +28,7 @@
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/delay.h>
+#include <linux/export.h>
#include "drmP.h"
#include "drm.h"
#include "drm_crtc.h"
@@ -43,7 +44,7 @@
#define SDVO_TV_MASK (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_SVID0)
#define SDVO_OUTPUT_MASK (SDVO_TMDS_MASK | SDVO_RGB_MASK | SDVO_LVDS_MASK |\
- SDVO_TV_MASK)
+ SDVO_TV_MASK)
#define IS_TV(c) (c->output_flag & SDVO_TV_MASK)
#define IS_TMDS(c) (c->output_flag & SDVO_TMDS_MASK)
@@ -288,117 +289,117 @@ static const struct _sdvo_cmd_name {
u8 cmd;
const char *name;
} sdvo_cmd_names[] = {
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FIRMWARE_REV),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TRAINED_INPUTS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_OUTPUTS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_OUTPUTS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_IN_OUT_MAP),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_IN_OUT_MAP),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ATTACHED_DISPLAYS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HOT_PLUG_SUPPORT),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_HOT_PLUG),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_HOT_PLUG),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_INPUT),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_OUTPUT),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART1),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART2),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART2),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART1),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART2),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART1),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART2),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CLOCK_RATE_MULT),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CLOCK_RATE_MULT),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_TV_FORMATS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_FORMAT),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_FORMAT),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_POWER_STATES),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_POWER_STATE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODER_POWER_STATE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DISPLAY_POWER_STATE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTROL_BUS_SWITCH),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS),
-
- /* Add the op code for SDVO enhancements */
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HPOS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HPOS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HPOS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_VPOS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_VPOS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_VPOS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SATURATION),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SATURATION),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SATURATION),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HUE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HUE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HUE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_CONTRAST),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CONTRAST),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTRAST),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_BRIGHTNESS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_BRIGHTNESS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_BRIGHTNESS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_H),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_H),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_H),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_V),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_V),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_V),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_ADAPTIVE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_ADAPTIVE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_ADAPTIVE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_2D),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_2D),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_2D),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SHARPNESS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SHARPNESS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SHARPNESS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DOT_CRAWL),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DOT_CRAWL),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_CHROMA_FILTER),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_CHROMA_FILTER),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_CHROMA_FILTER),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_LUMA_FILTER),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_LUMA_FILTER),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_LUMA_FILTER),
-
- /* HDMI op code */
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPP_ENCODE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ENCODE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_PIXEL_REPLI),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PIXEL_REPLI),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY_CAP),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_COLORIMETRY),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_ENCRYPT_PREFER),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_AUDIO_STAT),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_STAT),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INDEX),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_INDEX),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INFO),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_AV_SPLIT),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_AV_SPLIT),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_TXRATE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_TXRATE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_DATA),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FIRMWARE_REV),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TRAINED_INPUTS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_OUTPUTS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_OUTPUTS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_IN_OUT_MAP),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_IN_OUT_MAP),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ATTACHED_DISPLAYS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HOT_PLUG_SUPPORT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_HOT_PLUG),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_HOT_PLUG),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_INPUT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_OUTPUT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART1),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART2),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART2),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART1),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART2),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART1),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART2),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CLOCK_RATE_MULT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CLOCK_RATE_MULT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_TV_FORMATS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_FORMAT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_FORMAT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_POWER_STATES),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_POWER_STATE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODER_POWER_STATE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DISPLAY_POWER_STATE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTROL_BUS_SWITCH),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS),
+
+ /* Add the op code for SDVO enhancements */
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HPOS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HPOS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HPOS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_VPOS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_VPOS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_VPOS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SATURATION),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SATURATION),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SATURATION),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HUE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HUE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HUE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_CONTRAST),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CONTRAST),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTRAST),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_BRIGHTNESS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_BRIGHTNESS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_BRIGHTNESS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_H),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_H),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_H),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_V),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_V),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_V),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_ADAPTIVE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_ADAPTIVE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_ADAPTIVE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_2D),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_2D),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_2D),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SHARPNESS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SHARPNESS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SHARPNESS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DOT_CRAWL),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DOT_CRAWL),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_CHROMA_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_CHROMA_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_CHROMA_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_LUMA_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_LUMA_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_LUMA_FILTER),
+
+ /* HDMI op code */
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPP_ENCODE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ENCODE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_PIXEL_REPLI),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PIXEL_REPLI),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY_CAP),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_COLORIMETRY),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_ENCRYPT_PREFER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_AUDIO_STAT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_STAT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INDEX),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_INDEX),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INFO),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_AV_SPLIT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_AV_SPLIT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_TXRATE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_TXRATE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_DATA),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA),
};
#define IS_SDVOB(reg) (reg == SDVOB || reg == PCH_SDVOB)
@@ -1232,8 +1233,7 @@ static bool
intel_sdvo_multifunc_encoder(struct intel_sdvo *intel_sdvo)
{
/* Is there more than one type of output? */
- int caps = intel_sdvo->caps.output_flags & 0xf;
- return caps & -caps;
+ return hweight16(intel_sdvo->caps.output_flags) > 1;
}
static struct edid *
@@ -1254,7 +1254,7 @@ intel_sdvo_get_analog_edid(struct drm_connector *connector)
}
enum drm_connector_status
-intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
+intel_sdvo_tmds_sink_detect(struct drm_connector *connector)
{
struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
enum drm_connector_status status;
@@ -1349,7 +1349,7 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
if ((intel_sdvo_connector->output_flag & response) == 0)
ret = connector_status_disconnected;
else if (IS_TMDS(intel_sdvo_connector))
- ret = intel_sdvo_hdmi_sink_detect(connector);
+ ret = intel_sdvo_tmds_sink_detect(connector);
else {
struct edid *edid;
@@ -1896,7 +1896,7 @@ intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv,
struct intel_sdvo *sdvo, u32 reg)
{
struct sdvo_device_mapping *mapping;
- u8 pin, speed;
+ u8 pin;
if (IS_SDVOB(reg))
mapping = &dev_priv->sdvo_mappings[0];
@@ -1904,18 +1904,16 @@ intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv,
mapping = &dev_priv->sdvo_mappings[1];
pin = GMBUS_PORT_DPB;
- speed = GMBUS_RATE_1MHZ >> 8;
- if (mapping->initialized) {
+ if (mapping->initialized)
pin = mapping->i2c_pin;
- speed = mapping->i2c_speed;
- }
if (pin < GMBUS_NUM_PORTS) {
sdvo->i2c = &dev_priv->gmbus[pin].adapter;
- intel_gmbus_set_speed(sdvo->i2c, speed);
+ intel_gmbus_set_speed(sdvo->i2c, GMBUS_RATE_1MHZ);
intel_gmbus_force_bit(sdvo->i2c, true);
- } else
+ } else {
sdvo->i2c = &dev_priv->gmbus[GMBUS_PORT_DPB].adapter;
+ }
}
static bool
@@ -2206,7 +2204,7 @@ intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags)
bytes[0], bytes[1]);
return false;
}
- intel_sdvo->base.crtc_mask = (1 << 0) | (1 << 1);
+ intel_sdvo->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
return true;
}
@@ -2275,7 +2273,7 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
DRM_DEBUG_KMS(#name ": max %d, default %d, current %d\n", \
data_value[0], data_value[1], response); \
} \
-} while(0)
+} while (0)
static bool
intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
@@ -2442,7 +2440,7 @@ static bool intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
if (IS_TV(intel_sdvo_connector))
return intel_sdvo_create_enhance_property_tv(intel_sdvo, intel_sdvo_connector, enhancements.reply);
- else if(IS_LVDS(intel_sdvo_connector))
+ else if (IS_LVDS(intel_sdvo_connector))
return intel_sdvo_create_enhance_property_lvds(intel_sdvo, intel_sdvo_connector, enhancements.reply);
else
return true;
diff --git a/drivers/gpu/drm/i915/intel_sdvo_regs.h b/drivers/gpu/drm/i915/intel_sdvo_regs.h
index 4f4e23bc2d1..4aa6f343e49 100644
--- a/drivers/gpu/drm/i915/intel_sdvo_regs.h
+++ b/drivers/gpu/drm/i915/intel_sdvo_regs.h
@@ -46,63 +46,63 @@
#define SDVO_OUTPUT_LAST (14)
struct intel_sdvo_caps {
- u8 vendor_id;
- u8 device_id;
- u8 device_rev_id;
- u8 sdvo_version_major;
- u8 sdvo_version_minor;
- unsigned int sdvo_inputs_mask:2;
- unsigned int smooth_scaling:1;
- unsigned int sharp_scaling:1;
- unsigned int up_scaling:1;
- unsigned int down_scaling:1;
- unsigned int stall_support:1;
- unsigned int pad:1;
- u16 output_flags;
+ u8 vendor_id;
+ u8 device_id;
+ u8 device_rev_id;
+ u8 sdvo_version_major;
+ u8 sdvo_version_minor;
+ unsigned int sdvo_inputs_mask:2;
+ unsigned int smooth_scaling:1;
+ unsigned int sharp_scaling:1;
+ unsigned int up_scaling:1;
+ unsigned int down_scaling:1;
+ unsigned int stall_support:1;
+ unsigned int pad:1;
+ u16 output_flags;
} __attribute__((packed));
/** This matches the EDID DTD structure, more or less */
struct intel_sdvo_dtd {
- struct {
- u16 clock; /**< pixel clock, in 10kHz units */
- u8 h_active; /**< lower 8 bits (pixels) */
- u8 h_blank; /**< lower 8 bits (pixels) */
- u8 h_high; /**< upper 4 bits each h_active, h_blank */
- u8 v_active; /**< lower 8 bits (lines) */
- u8 v_blank; /**< lower 8 bits (lines) */
- u8 v_high; /**< upper 4 bits each v_active, v_blank */
- } part1;
-
- struct {
- u8 h_sync_off; /**< lower 8 bits, from hblank start */
- u8 h_sync_width; /**< lower 8 bits (pixels) */
- /** lower 4 bits each vsync offset, vsync width */
- u8 v_sync_off_width;
- /**
- * 2 high bits of hsync offset, 2 high bits of hsync width,
- * bits 4-5 of vsync offset, and 2 high bits of vsync width.
- */
- u8 sync_off_width_high;
- u8 dtd_flags;
- u8 sdvo_flags;
- /** bits 6-7 of vsync offset at bits 6-7 */
- u8 v_sync_off_high;
- u8 reserved;
- } part2;
+ struct {
+ u16 clock; /**< pixel clock, in 10kHz units */
+ u8 h_active; /**< lower 8 bits (pixels) */
+ u8 h_blank; /**< lower 8 bits (pixels) */
+ u8 h_high; /**< upper 4 bits each h_active, h_blank */
+ u8 v_active; /**< lower 8 bits (lines) */
+ u8 v_blank; /**< lower 8 bits (lines) */
+ u8 v_high; /**< upper 4 bits each v_active, v_blank */
+ } part1;
+
+ struct {
+ u8 h_sync_off; /**< lower 8 bits, from hblank start */
+ u8 h_sync_width; /**< lower 8 bits (pixels) */
+ /** lower 4 bits each vsync offset, vsync width */
+ u8 v_sync_off_width;
+ /**
+ * 2 high bits of hsync offset, 2 high bits of hsync width,
+ * bits 4-5 of vsync offset, and 2 high bits of vsync width.
+ */
+ u8 sync_off_width_high;
+ u8 dtd_flags;
+ u8 sdvo_flags;
+ /** bits 6-7 of vsync offset at bits 6-7 */
+ u8 v_sync_off_high;
+ u8 reserved;
+ } part2;
} __attribute__((packed));
struct intel_sdvo_pixel_clock_range {
- u16 min; /**< pixel clock, in 10kHz units */
- u16 max; /**< pixel clock, in 10kHz units */
+ u16 min; /**< pixel clock, in 10kHz units */
+ u16 max; /**< pixel clock, in 10kHz units */
} __attribute__((packed));
struct intel_sdvo_preferred_input_timing_args {
- u16 clock;
- u16 width;
- u16 height;
- u8 interlace:1;
- u8 scaled:1;
- u8 pad:6;
+ u16 clock;
+ u16 width;
+ u16 height;
+ u8 interlace:1;
+ u8 scaled:1;
+ u8 pad:6;
} __attribute__((packed));
/* I2C registers for SDVO */
@@ -154,9 +154,9 @@ struct intel_sdvo_preferred_input_timing_args {
*/
#define SDVO_CMD_GET_TRAINED_INPUTS 0x03
struct intel_sdvo_get_trained_inputs_response {
- unsigned int input0_trained:1;
- unsigned int input1_trained:1;
- unsigned int pad:6;
+ unsigned int input0_trained:1;
+ unsigned int input1_trained:1;
+ unsigned int pad:6;
} __attribute__((packed));
/** Returns a struct intel_sdvo_output_flags of active outputs. */
@@ -177,7 +177,7 @@ struct intel_sdvo_get_trained_inputs_response {
*/
#define SDVO_CMD_GET_IN_OUT_MAP 0x06
struct intel_sdvo_in_out_map {
- u16 in0, in1;
+ u16 in0, in1;
};
/**
@@ -210,10 +210,10 @@ struct intel_sdvo_in_out_map {
#define SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE 0x0f
struct intel_sdvo_get_interrupt_event_source_response {
- u16 interrupt_status;
- unsigned int ambient_light_interrupt:1;
- unsigned int hdmi_audio_encrypt_change:1;
- unsigned int pad:6;
+ u16 interrupt_status;
+ unsigned int ambient_light_interrupt:1;
+ unsigned int hdmi_audio_encrypt_change:1;
+ unsigned int pad:6;
} __attribute__((packed));
/**
@@ -225,8 +225,8 @@ struct intel_sdvo_get_interrupt_event_source_response {
*/
#define SDVO_CMD_SET_TARGET_INPUT 0x10
struct intel_sdvo_set_target_input_args {
- unsigned int target_1:1;
- unsigned int pad:7;
+ unsigned int target_1:1;
+ unsigned int pad:7;
} __attribute__((packed));
/**
@@ -314,57 +314,57 @@ struct intel_sdvo_set_target_input_args {
#define SDVO_CMD_GET_SUPPORTED_TV_FORMATS 0x27
/** 6 bytes of bit flags for TV formats shared by all TV format functions */
struct intel_sdvo_tv_format {
- unsigned int ntsc_m:1;
- unsigned int ntsc_j:1;
- unsigned int ntsc_443:1;
- unsigned int pal_b:1;
- unsigned int pal_d:1;
- unsigned int pal_g:1;
- unsigned int pal_h:1;
- unsigned int pal_i:1;
-
- unsigned int pal_m:1;
- unsigned int pal_n:1;
- unsigned int pal_nc:1;
- unsigned int pal_60:1;
- unsigned int secam_b:1;
- unsigned int secam_d:1;
- unsigned int secam_g:1;
- unsigned int secam_k:1;
-
- unsigned int secam_k1:1;
- unsigned int secam_l:1;
- unsigned int secam_60:1;
- unsigned int hdtv_std_smpte_240m_1080i_59:1;
- unsigned int hdtv_std_smpte_240m_1080i_60:1;
- unsigned int hdtv_std_smpte_260m_1080i_59:1;
- unsigned int hdtv_std_smpte_260m_1080i_60:1;
- unsigned int hdtv_std_smpte_274m_1080i_50:1;
-
- unsigned int hdtv_std_smpte_274m_1080i_59:1;
- unsigned int hdtv_std_smpte_274m_1080i_60:1;
- unsigned int hdtv_std_smpte_274m_1080p_23:1;
- unsigned int hdtv_std_smpte_274m_1080p_24:1;
- unsigned int hdtv_std_smpte_274m_1080p_25:1;
- unsigned int hdtv_std_smpte_274m_1080p_29:1;
- unsigned int hdtv_std_smpte_274m_1080p_30:1;
- unsigned int hdtv_std_smpte_274m_1080p_50:1;
-
- unsigned int hdtv_std_smpte_274m_1080p_59:1;
- unsigned int hdtv_std_smpte_274m_1080p_60:1;
- unsigned int hdtv_std_smpte_295m_1080i_50:1;
- unsigned int hdtv_std_smpte_295m_1080p_50:1;
- unsigned int hdtv_std_smpte_296m_720p_59:1;
- unsigned int hdtv_std_smpte_296m_720p_60:1;
- unsigned int hdtv_std_smpte_296m_720p_50:1;
- unsigned int hdtv_std_smpte_293m_480p_59:1;
-
- unsigned int hdtv_std_smpte_170m_480i_59:1;
- unsigned int hdtv_std_iturbt601_576i_50:1;
- unsigned int hdtv_std_iturbt601_576p_50:1;
- unsigned int hdtv_std_eia_7702a_480i_60:1;
- unsigned int hdtv_std_eia_7702a_480p_60:1;
- unsigned int pad:3;
+ unsigned int ntsc_m:1;
+ unsigned int ntsc_j:1;
+ unsigned int ntsc_443:1;
+ unsigned int pal_b:1;
+ unsigned int pal_d:1;
+ unsigned int pal_g:1;
+ unsigned int pal_h:1;
+ unsigned int pal_i:1;
+
+ unsigned int pal_m:1;
+ unsigned int pal_n:1;
+ unsigned int pal_nc:1;
+ unsigned int pal_60:1;
+ unsigned int secam_b:1;
+ unsigned int secam_d:1;
+ unsigned int secam_g:1;
+ unsigned int secam_k:1;
+
+ unsigned int secam_k1:1;
+ unsigned int secam_l:1;
+ unsigned int secam_60:1;
+ unsigned int hdtv_std_smpte_240m_1080i_59:1;
+ unsigned int hdtv_std_smpte_240m_1080i_60:1;
+ unsigned int hdtv_std_smpte_260m_1080i_59:1;
+ unsigned int hdtv_std_smpte_260m_1080i_60:1;
+ unsigned int hdtv_std_smpte_274m_1080i_50:1;
+
+ unsigned int hdtv_std_smpte_274m_1080i_59:1;
+ unsigned int hdtv_std_smpte_274m_1080i_60:1;
+ unsigned int hdtv_std_smpte_274m_1080p_23:1;
+ unsigned int hdtv_std_smpte_274m_1080p_24:1;
+ unsigned int hdtv_std_smpte_274m_1080p_25:1;
+ unsigned int hdtv_std_smpte_274m_1080p_29:1;
+ unsigned int hdtv_std_smpte_274m_1080p_30:1;
+ unsigned int hdtv_std_smpte_274m_1080p_50:1;
+
+ unsigned int hdtv_std_smpte_274m_1080p_59:1;
+ unsigned int hdtv_std_smpte_274m_1080p_60:1;
+ unsigned int hdtv_std_smpte_295m_1080i_50:1;
+ unsigned int hdtv_std_smpte_295m_1080p_50:1;
+ unsigned int hdtv_std_smpte_296m_720p_59:1;
+ unsigned int hdtv_std_smpte_296m_720p_60:1;
+ unsigned int hdtv_std_smpte_296m_720p_50:1;
+ unsigned int hdtv_std_smpte_293m_480p_59:1;
+
+ unsigned int hdtv_std_smpte_170m_480i_59:1;
+ unsigned int hdtv_std_iturbt601_576i_50:1;
+ unsigned int hdtv_std_iturbt601_576p_50:1;
+ unsigned int hdtv_std_eia_7702a_480i_60:1;
+ unsigned int hdtv_std_eia_7702a_480p_60:1;
+ unsigned int pad:3;
} __attribute__((packed));
#define SDVO_CMD_GET_TV_FORMAT 0x28
@@ -374,53 +374,53 @@ struct intel_sdvo_tv_format {
/** Returns the resolutiosn that can be used with the given TV format */
#define SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT 0x83
struct intel_sdvo_sdtv_resolution_request {
- unsigned int ntsc_m:1;
- unsigned int ntsc_j:1;
- unsigned int ntsc_443:1;
- unsigned int pal_b:1;
- unsigned int pal_d:1;
- unsigned int pal_g:1;
- unsigned int pal_h:1;
- unsigned int pal_i:1;
-
- unsigned int pal_m:1;
- unsigned int pal_n:1;
- unsigned int pal_nc:1;
- unsigned int pal_60:1;
- unsigned int secam_b:1;
- unsigned int secam_d:1;
- unsigned int secam_g:1;
- unsigned int secam_k:1;
-
- unsigned int secam_k1:1;
- unsigned int secam_l:1;
- unsigned int secam_60:1;
- unsigned int pad:5;
+ unsigned int ntsc_m:1;
+ unsigned int ntsc_j:1;
+ unsigned int ntsc_443:1;
+ unsigned int pal_b:1;
+ unsigned int pal_d:1;
+ unsigned int pal_g:1;
+ unsigned int pal_h:1;
+ unsigned int pal_i:1;
+
+ unsigned int pal_m:1;
+ unsigned int pal_n:1;
+ unsigned int pal_nc:1;
+ unsigned int pal_60:1;
+ unsigned int secam_b:1;
+ unsigned int secam_d:1;
+ unsigned int secam_g:1;
+ unsigned int secam_k:1;
+
+ unsigned int secam_k1:1;
+ unsigned int secam_l:1;
+ unsigned int secam_60:1;
+ unsigned int pad:5;
} __attribute__((packed));
struct intel_sdvo_sdtv_resolution_reply {
- unsigned int res_320x200:1;
- unsigned int res_320x240:1;
- unsigned int res_400x300:1;
- unsigned int res_640x350:1;
- unsigned int res_640x400:1;
- unsigned int res_640x480:1;
- unsigned int res_704x480:1;
- unsigned int res_704x576:1;
-
- unsigned int res_720x350:1;
- unsigned int res_720x400:1;
- unsigned int res_720x480:1;
- unsigned int res_720x540:1;
- unsigned int res_720x576:1;
- unsigned int res_768x576:1;
- unsigned int res_800x600:1;
- unsigned int res_832x624:1;
-
- unsigned int res_920x766:1;
- unsigned int res_1024x768:1;
- unsigned int res_1280x1024:1;
- unsigned int pad:5;
+ unsigned int res_320x200:1;
+ unsigned int res_320x240:1;
+ unsigned int res_400x300:1;
+ unsigned int res_640x350:1;
+ unsigned int res_640x400:1;
+ unsigned int res_640x480:1;
+ unsigned int res_704x480:1;
+ unsigned int res_704x576:1;
+
+ unsigned int res_720x350:1;
+ unsigned int res_720x400:1;
+ unsigned int res_720x480:1;
+ unsigned int res_720x540:1;
+ unsigned int res_720x576:1;
+ unsigned int res_768x576:1;
+ unsigned int res_800x600:1;
+ unsigned int res_832x624:1;
+
+ unsigned int res_920x766:1;
+ unsigned int res_1024x768:1;
+ unsigned int res_1280x1024:1;
+ unsigned int pad:5;
} __attribute__((packed));
/* Get supported resolution with squire pixel aspect ratio that can be
@@ -428,90 +428,90 @@ struct intel_sdvo_sdtv_resolution_reply {
#define SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT 0x85
struct intel_sdvo_hdtv_resolution_request {
- unsigned int hdtv_std_smpte_240m_1080i_59:1;
- unsigned int hdtv_std_smpte_240m_1080i_60:1;
- unsigned int hdtv_std_smpte_260m_1080i_59:1;
- unsigned int hdtv_std_smpte_260m_1080i_60:1;
- unsigned int hdtv_std_smpte_274m_1080i_50:1;
- unsigned int hdtv_std_smpte_274m_1080i_59:1;
- unsigned int hdtv_std_smpte_274m_1080i_60:1;
- unsigned int hdtv_std_smpte_274m_1080p_23:1;
-
- unsigned int hdtv_std_smpte_274m_1080p_24:1;
- unsigned int hdtv_std_smpte_274m_1080p_25:1;
- unsigned int hdtv_std_smpte_274m_1080p_29:1;
- unsigned int hdtv_std_smpte_274m_1080p_30:1;
- unsigned int hdtv_std_smpte_274m_1080p_50:1;
- unsigned int hdtv_std_smpte_274m_1080p_59:1;
- unsigned int hdtv_std_smpte_274m_1080p_60:1;
- unsigned int hdtv_std_smpte_295m_1080i_50:1;
-
- unsigned int hdtv_std_smpte_295m_1080p_50:1;
- unsigned int hdtv_std_smpte_296m_720p_59:1;
- unsigned int hdtv_std_smpte_296m_720p_60:1;
- unsigned int hdtv_std_smpte_296m_720p_50:1;
- unsigned int hdtv_std_smpte_293m_480p_59:1;
- unsigned int hdtv_std_smpte_170m_480i_59:1;
- unsigned int hdtv_std_iturbt601_576i_50:1;
- unsigned int hdtv_std_iturbt601_576p_50:1;
-
- unsigned int hdtv_std_eia_7702a_480i_60:1;
- unsigned int hdtv_std_eia_7702a_480p_60:1;
- unsigned int pad:6;
+ unsigned int hdtv_std_smpte_240m_1080i_59:1;
+ unsigned int hdtv_std_smpte_240m_1080i_60:1;
+ unsigned int hdtv_std_smpte_260m_1080i_59:1;
+ unsigned int hdtv_std_smpte_260m_1080i_60:1;
+ unsigned int hdtv_std_smpte_274m_1080i_50:1;
+ unsigned int hdtv_std_smpte_274m_1080i_59:1;
+ unsigned int hdtv_std_smpte_274m_1080i_60:1;
+ unsigned int hdtv_std_smpte_274m_1080p_23:1;
+
+ unsigned int hdtv_std_smpte_274m_1080p_24:1;
+ unsigned int hdtv_std_smpte_274m_1080p_25:1;
+ unsigned int hdtv_std_smpte_274m_1080p_29:1;
+ unsigned int hdtv_std_smpte_274m_1080p_30:1;
+ unsigned int hdtv_std_smpte_274m_1080p_50:1;
+ unsigned int hdtv_std_smpte_274m_1080p_59:1;
+ unsigned int hdtv_std_smpte_274m_1080p_60:1;
+ unsigned int hdtv_std_smpte_295m_1080i_50:1;
+
+ unsigned int hdtv_std_smpte_295m_1080p_50:1;
+ unsigned int hdtv_std_smpte_296m_720p_59:1;
+ unsigned int hdtv_std_smpte_296m_720p_60:1;
+ unsigned int hdtv_std_smpte_296m_720p_50:1;
+ unsigned int hdtv_std_smpte_293m_480p_59:1;
+ unsigned int hdtv_std_smpte_170m_480i_59:1;
+ unsigned int hdtv_std_iturbt601_576i_50:1;
+ unsigned int hdtv_std_iturbt601_576p_50:1;
+
+ unsigned int hdtv_std_eia_7702a_480i_60:1;
+ unsigned int hdtv_std_eia_7702a_480p_60:1;
+ unsigned int pad:6;
} __attribute__((packed));
struct intel_sdvo_hdtv_resolution_reply {
- unsigned int res_640x480:1;
- unsigned int res_800x600:1;
- unsigned int res_1024x768:1;
- unsigned int res_1280x960:1;
- unsigned int res_1400x1050:1;
- unsigned int res_1600x1200:1;
- unsigned int res_1920x1440:1;
- unsigned int res_2048x1536:1;
-
- unsigned int res_2560x1920:1;
- unsigned int res_3200x2400:1;
- unsigned int res_3840x2880:1;
- unsigned int pad1:5;
-
- unsigned int res_848x480:1;
- unsigned int res_1064x600:1;
- unsigned int res_1280x720:1;
- unsigned int res_1360x768:1;
- unsigned int res_1704x960:1;
- unsigned int res_1864x1050:1;
- unsigned int res_1920x1080:1;
- unsigned int res_2128x1200:1;
-
- unsigned int res_2560x1400:1;
- unsigned int res_2728x1536:1;
- unsigned int res_3408x1920:1;
- unsigned int res_4264x2400:1;
- unsigned int res_5120x2880:1;
- unsigned int pad2:3;
-
- unsigned int res_768x480:1;
- unsigned int res_960x600:1;
- unsigned int res_1152x720:1;
- unsigned int res_1124x768:1;
- unsigned int res_1536x960:1;
- unsigned int res_1680x1050:1;
- unsigned int res_1728x1080:1;
- unsigned int res_1920x1200:1;
-
- unsigned int res_2304x1440:1;
- unsigned int res_2456x1536:1;
- unsigned int res_3072x1920:1;
- unsigned int res_3840x2400:1;
- unsigned int res_4608x2880:1;
- unsigned int pad3:3;
-
- unsigned int res_1280x1024:1;
- unsigned int pad4:7;
-
- unsigned int res_1280x768:1;
- unsigned int pad5:7;
+ unsigned int res_640x480:1;
+ unsigned int res_800x600:1;
+ unsigned int res_1024x768:1;
+ unsigned int res_1280x960:1;
+ unsigned int res_1400x1050:1;
+ unsigned int res_1600x1200:1;
+ unsigned int res_1920x1440:1;
+ unsigned int res_2048x1536:1;
+
+ unsigned int res_2560x1920:1;
+ unsigned int res_3200x2400:1;
+ unsigned int res_3840x2880:1;
+ unsigned int pad1:5;
+
+ unsigned int res_848x480:1;
+ unsigned int res_1064x600:1;
+ unsigned int res_1280x720:1;
+ unsigned int res_1360x768:1;
+ unsigned int res_1704x960:1;
+ unsigned int res_1864x1050:1;
+ unsigned int res_1920x1080:1;
+ unsigned int res_2128x1200:1;
+
+ unsigned int res_2560x1400:1;
+ unsigned int res_2728x1536:1;
+ unsigned int res_3408x1920:1;
+ unsigned int res_4264x2400:1;
+ unsigned int res_5120x2880:1;
+ unsigned int pad2:3;
+
+ unsigned int res_768x480:1;
+ unsigned int res_960x600:1;
+ unsigned int res_1152x720:1;
+ unsigned int res_1124x768:1;
+ unsigned int res_1536x960:1;
+ unsigned int res_1680x1050:1;
+ unsigned int res_1728x1080:1;
+ unsigned int res_1920x1200:1;
+
+ unsigned int res_2304x1440:1;
+ unsigned int res_2456x1536:1;
+ unsigned int res_3072x1920:1;
+ unsigned int res_3840x2400:1;
+ unsigned int res_4608x2880:1;
+ unsigned int pad3:3;
+
+ unsigned int res_1280x1024:1;
+ unsigned int pad4:7;
+
+ unsigned int res_1280x768:1;
+ unsigned int pad5:7;
} __attribute__((packed));
/* Get supported power state returns info for encoder and monitor, rely on
@@ -539,25 +539,25 @@ struct intel_sdvo_hdtv_resolution_reply {
* The high fields are bits 8:9 of the 10-bit values.
*/
struct sdvo_panel_power_sequencing {
- u8 t0;
- u8 t1;
- u8 t2;
- u8 t3;
- u8 t4;
-
- unsigned int t0_high:2;
- unsigned int t1_high:2;
- unsigned int t2_high:2;
- unsigned int t3_high:2;
-
- unsigned int t4_high:2;
- unsigned int pad:6;
+ u8 t0;
+ u8 t1;
+ u8 t2;
+ u8 t3;
+ u8 t4;
+
+ unsigned int t0_high:2;
+ unsigned int t1_high:2;
+ unsigned int t2_high:2;
+ unsigned int t3_high:2;
+
+ unsigned int t4_high:2;
+ unsigned int pad:6;
} __attribute__((packed));
#define SDVO_CMD_GET_MAX_BACKLIGHT_LEVEL 0x30
struct sdvo_max_backlight_reply {
- u8 max_value;
- u8 default_value;
+ u8 max_value;
+ u8 default_value;
} __attribute__((packed));
#define SDVO_CMD_GET_BACKLIGHT_LEVEL 0x31
@@ -565,16 +565,16 @@ struct sdvo_max_backlight_reply {
#define SDVO_CMD_GET_AMBIENT_LIGHT 0x33
struct sdvo_get_ambient_light_reply {
- u16 trip_low;
- u16 trip_high;
- u16 value;
+ u16 trip_low;
+ u16 trip_high;
+ u16 value;
} __attribute__((packed));
#define SDVO_CMD_SET_AMBIENT_LIGHT 0x34
struct sdvo_set_ambient_light_reply {
- u16 trip_low;
- u16 trip_high;
- unsigned int enable:1;
- unsigned int pad:7;
+ u16 trip_low;
+ u16 trip_high;
+ unsigned int enable:1;
+ unsigned int pad:7;
} __attribute__((packed));
/* Set display power state */
@@ -586,23 +586,23 @@ struct sdvo_set_ambient_light_reply {
#define SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS 0x84
struct intel_sdvo_enhancements_reply {
- unsigned int flicker_filter:1;
- unsigned int flicker_filter_adaptive:1;
- unsigned int flicker_filter_2d:1;
- unsigned int saturation:1;
- unsigned int hue:1;
- unsigned int brightness:1;
- unsigned int contrast:1;
- unsigned int overscan_h:1;
-
- unsigned int overscan_v:1;
- unsigned int hpos:1;
- unsigned int vpos:1;
- unsigned int sharpness:1;
- unsigned int dot_crawl:1;
- unsigned int dither:1;
- unsigned int tv_chroma_filter:1;
- unsigned int tv_luma_filter:1;
+ unsigned int flicker_filter:1;
+ unsigned int flicker_filter_adaptive:1;
+ unsigned int flicker_filter_2d:1;
+ unsigned int saturation:1;
+ unsigned int hue:1;
+ unsigned int brightness:1;
+ unsigned int contrast:1;
+ unsigned int overscan_h:1;
+
+ unsigned int overscan_v:1;
+ unsigned int hpos:1;
+ unsigned int vpos:1;
+ unsigned int sharpness:1;
+ unsigned int dot_crawl:1;
+ unsigned int dither:1;
+ unsigned int tv_chroma_filter:1;
+ unsigned int tv_luma_filter:1;
} __attribute__((packed));
/* Picture enhancement limits below are dependent on the current TV format,
@@ -623,8 +623,8 @@ struct intel_sdvo_enhancements_reply {
#define SDVO_CMD_GET_MAX_TV_CHROMA_FILTER 0x74
#define SDVO_CMD_GET_MAX_TV_LUMA_FILTER 0x77
struct intel_sdvo_enhancement_limits_reply {
- u16 max_value;
- u16 default_value;
+ u16 max_value;
+ u16 default_value;
} __attribute__((packed));
#define SDVO_CMD_GET_LVDS_PANEL_INFORMATION 0x7f
@@ -665,8 +665,8 @@ struct intel_sdvo_enhancement_limits_reply {
#define SDVO_CMD_GET_TV_LUMA_FILTER 0x78
#define SDVO_CMD_SET_TV_LUMA_FILTER 0x79
struct intel_sdvo_enhancements_arg {
- u16 value;
-}__attribute__((packed));
+ u16 value;
+} __attribute__((packed));
#define SDVO_CMD_GET_DOT_CRAWL 0x70
#define SDVO_CMD_SET_DOT_CRAWL 0x71
@@ -717,7 +717,7 @@ struct intel_sdvo_enhancements_arg {
#define SDVO_CMD_GET_AUDIO_TX_INFO 0x9c
#define SDVO_NEED_TO_STALL (1 << 7)
-struct intel_sdvo_encode{
- u8 dvi_rev;
- u8 hdmi_rev;
+struct intel_sdvo_encode {
+ u8 dvi_rev;
+ u8 hdmi_rev;
} __attribute__ ((packed));
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 210d570fd51..f3c6a9a8b08 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -194,10 +194,10 @@ static const u32 filter_table[] = {
*
* if (f >= 1) {
* exp = 0x7;
- * mant = 1 << 8;
+ * mant = 1 << 8;
* } else {
* for (exp = 0; exp < 3 && f < 0.5; exp++)
- * f *= 2.0;
+ * f *= 2.0;
* mant = (f * (1 << 9) + 0.5);
* if (mant >= (1 << 9))
* mant = (1 << 9) - 1;
@@ -430,7 +430,7 @@ static const struct tv_mode tv_modes[] = {
.vsync_start_f1 = 6, .vsync_start_f2 = 7,
.vsync_len = 6,
- .veq_ena = true, .veq_start_f1 = 0,
+ .veq_ena = true, .veq_start_f1 = 0,
.veq_start_f2 = 1, .veq_len = 18,
.vi_end_f1 = 20, .vi_end_f2 = 21,
@@ -472,7 +472,7 @@ static const struct tv_mode tv_modes[] = {
.vsync_start_f1 = 6, .vsync_start_f2 = 7,
.vsync_len = 6,
- .veq_ena = true, .veq_start_f1 = 0,
+ .veq_ena = true, .veq_start_f1 = 0,
.veq_start_f2 = 1, .veq_len = 18,
.vi_end_f1 = 20, .vi_end_f2 = 21,
@@ -515,7 +515,7 @@ static const struct tv_mode tv_modes[] = {
.vsync_start_f1 = 6, .vsync_start_f2 = 7,
.vsync_len = 6,
- .veq_ena = true, .veq_start_f1 = 0,
+ .veq_ena = true, .veq_start_f1 = 0,
.veq_start_f2 = 1, .veq_len = 18,
.vi_end_f1 = 20, .vi_end_f2 = 21,
@@ -558,7 +558,7 @@ static const struct tv_mode tv_modes[] = {
.vsync_start_f1 = 6, .vsync_start_f2 = 7,
.vsync_len = 6,
- .veq_ena = true, .veq_start_f1 = 0,
+ .veq_ena = true, .veq_start_f1 = 0,
.veq_start_f2 = 1, .veq_len = 18,
.vi_end_f1 = 20, .vi_end_f2 = 21,
@@ -602,14 +602,14 @@ static const struct tv_mode tv_modes[] = {
.vsync_start_f1 = 6, .vsync_start_f2 = 7,
.vsync_len = 6,
- .veq_ena = true, .veq_start_f1 = 0,
+ .veq_ena = true, .veq_start_f1 = 0,
.veq_start_f2 = 1, .veq_len = 18,
.vi_end_f1 = 24, .vi_end_f2 = 25,
.nbr_end = 286,
.burst_ena = true,
- .hburst_start = 73, .hburst_len = 34,
+ .hburst_start = 73, .hburst_len = 34,
.vburst_start_f1 = 8, .vburst_end_f1 = 285,
.vburst_start_f2 = 8, .vburst_end_f2 = 286,
.vburst_start_f3 = 9, .vburst_end_f3 = 286,
@@ -646,7 +646,7 @@ static const struct tv_mode tv_modes[] = {
.vsync_start_f1 = 5, .vsync_start_f2 = 6,
.vsync_len = 5,
- .veq_ena = true, .veq_start_f1 = 0,
+ .veq_ena = true, .veq_start_f1 = 0,
.veq_start_f2 = 1, .veq_len = 15,
.vi_end_f1 = 24, .vi_end_f2 = 25,
@@ -675,7 +675,7 @@ static const struct tv_mode tv_modes[] = {
},
{
.name = "480p@59.94Hz",
- .clock = 107520,
+ .clock = 107520,
.refresh = 59940,
.oversample = TV_OVERSAMPLE_4X,
.component_only = 1,
@@ -683,7 +683,7 @@ static const struct tv_mode tv_modes[] = {
.hsync_end = 64, .hblank_end = 122,
.hblank_start = 842, .htotal = 857,
- .progressive = true,.trilevel_sync = false,
+ .progressive = true, .trilevel_sync = false,
.vsync_start_f1 = 12, .vsync_start_f2 = 12,
.vsync_len = 12,
@@ -699,7 +699,7 @@ static const struct tv_mode tv_modes[] = {
},
{
.name = "480p@60Hz",
- .clock = 107520,
+ .clock = 107520,
.refresh = 60000,
.oversample = TV_OVERSAMPLE_4X,
.component_only = 1,
@@ -707,7 +707,7 @@ static const struct tv_mode tv_modes[] = {
.hsync_end = 64, .hblank_end = 122,
.hblank_start = 842, .htotal = 856,
- .progressive = true,.trilevel_sync = false,
+ .progressive = true, .trilevel_sync = false,
.vsync_start_f1 = 12, .vsync_start_f2 = 12,
.vsync_len = 12,
@@ -723,7 +723,7 @@ static const struct tv_mode tv_modes[] = {
},
{
.name = "576p",
- .clock = 107520,
+ .clock = 107520,
.refresh = 50000,
.oversample = TV_OVERSAMPLE_4X,
.component_only = 1,
@@ -755,7 +755,7 @@ static const struct tv_mode tv_modes[] = {
.hsync_end = 80, .hblank_end = 300,
.hblank_start = 1580, .htotal = 1649,
- .progressive = true, .trilevel_sync = true,
+ .progressive = true, .trilevel_sync = true,
.vsync_start_f1 = 10, .vsync_start_f2 = 10,
.vsync_len = 10,
@@ -779,7 +779,7 @@ static const struct tv_mode tv_modes[] = {
.hsync_end = 80, .hblank_end = 300,
.hblank_start = 1580, .htotal = 1651,
- .progressive = true, .trilevel_sync = true,
+ .progressive = true, .trilevel_sync = true,
.vsync_start_f1 = 10, .vsync_start_f2 = 10,
.vsync_len = 10,
@@ -803,7 +803,7 @@ static const struct tv_mode tv_modes[] = {
.hsync_end = 80, .hblank_end = 300,
.hblank_start = 1580, .htotal = 1979,
- .progressive = true, .trilevel_sync = true,
+ .progressive = true, .trilevel_sync = true,
.vsync_start_f1 = 10, .vsync_start_f2 = 10,
.vsync_len = 10,
@@ -828,12 +828,12 @@ static const struct tv_mode tv_modes[] = {
.hsync_end = 88, .hblank_end = 235,
.hblank_start = 2155, .htotal = 2639,
- .progressive = false, .trilevel_sync = true,
+ .progressive = false, .trilevel_sync = true,
.vsync_start_f1 = 4, .vsync_start_f2 = 5,
.vsync_len = 10,
- .veq_ena = true, .veq_start_f1 = 4,
+ .veq_ena = true, .veq_start_f1 = 4,
.veq_start_f2 = 4, .veq_len = 10,
@@ -854,12 +854,12 @@ static const struct tv_mode tv_modes[] = {
.hsync_end = 88, .hblank_end = 235,
.hblank_start = 2155, .htotal = 2199,
- .progressive = false, .trilevel_sync = true,
+ .progressive = false, .trilevel_sync = true,
.vsync_start_f1 = 4, .vsync_start_f2 = 5,
.vsync_len = 10,
- .veq_ena = true, .veq_start_f1 = 4,
+ .veq_ena = true, .veq_start_f1 = 4,
.veq_start_f2 = 4, .veq_len = 10,
@@ -880,16 +880,16 @@ static const struct tv_mode tv_modes[] = {
.hsync_end = 88, .hblank_end = 235,
.hblank_start = 2155, .htotal = 2201,
- .progressive = false, .trilevel_sync = true,
+ .progressive = false, .trilevel_sync = true,
.vsync_start_f1 = 4, .vsync_start_f2 = 5,
.vsync_len = 10,
.veq_ena = true, .veq_start_f1 = 4,
- .veq_start_f2 = 4, .veq_len = 10,
+ .veq_start_f2 = 4, .veq_len = 10,
- .vi_end_f1 = 21, .vi_end_f2 = 22,
+ .vi_end_f1 = 21, .vi_end_f2 = 22,
.nbr_end = 539,
.burst_ena = false,
@@ -916,7 +916,7 @@ intel_tv_dpms(struct drm_encoder *encoder, int mode)
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- switch(mode) {
+ switch (mode) {
case DRM_MODE_DPMS_ON:
I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE);
break;
@@ -933,7 +933,7 @@ intel_tv_mode_lookup(const char *tv_format)
{
int i;
- for (i = 0; i < sizeof(tv_modes) / sizeof (tv_modes[0]); i++) {
+ for (i = 0; i < sizeof(tv_modes) / sizeof(tv_modes[0]); i++) {
const struct tv_mode *tv_mode = &tv_modes[i];
if (!strcmp(tv_format, tv_mode->name))
@@ -1128,7 +1128,7 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
if (color_conversion) {
I915_WRITE(TV_CSC_Y, (color_conversion->ry << 16) |
color_conversion->gy);
- I915_WRITE(TV_CSC_Y2,(color_conversion->by << 16) |
+ I915_WRITE(TV_CSC_Y2, (color_conversion->by << 16) |
color_conversion->ay);
I915_WRITE(TV_CSC_U, (color_conversion->ru << 16) |
color_conversion->gu);
@@ -1232,7 +1232,7 @@ static const struct drm_display_mode reported_modes[] = {
* \return false if TV is disconnected.
*/
static int
-intel_tv_detect_type (struct intel_tv *intel_tv,
+intel_tv_detect_type(struct intel_tv *intel_tv,
struct drm_connector *connector)
{
struct drm_encoder *encoder = &intel_tv->base.base;
@@ -1486,7 +1486,7 @@ intel_tv_get_modes(struct drm_connector *connector)
}
static void
-intel_tv_destroy (struct drm_connector *connector)
+intel_tv_destroy(struct drm_connector *connector)
{
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
diff --git a/drivers/gpu/drm/mga/mga_drv.c b/drivers/gpu/drm/mga/mga_drv.c
index 42d31874edf..33daa29eea6 100644
--- a/drivers/gpu/drm/mga/mga_drv.c
+++ b/drivers/gpu/drm/mga/mga_drv.c
@@ -29,6 +29,8 @@
* Gareth Hughes <gareth@valinux.com>
*/
+#include <linux/module.h>
+
#include "drmP.h"
#include "drm.h"
#include "mga_drm.h"
diff --git a/drivers/gpu/drm/mga/mga_warp.c b/drivers/gpu/drm/mga/mga_warp.c
index f172bd5c257..722a91b69b0 100644
--- a/drivers/gpu/drm/mga/mga_warp.c
+++ b/drivers/gpu/drm/mga/mga_warp.c
@@ -30,6 +30,7 @@
#include <linux/firmware.h>
#include <linux/ihex.h>
#include <linux/platform_device.h>
+#include <linux/module.h>
#include "drmP.h"
#include "drm.h"
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index 0583677e458..35ef5b1e356 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -21,16 +21,17 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
nv40_grctx.o nv50_grctx.o nvc0_grctx.o \
nv84_crypt.o \
nva3_copy.o nvc0_copy.o \
- nv40_mpeg.o nv50_mpeg.o \
+ nv31_mpeg.o nv50_mpeg.o \
nv04_instmem.o nv50_instmem.o nvc0_instmem.o \
- nv50_evo.o nv50_crtc.o nv50_dac.o nv50_sor.o \
- nv50_cursor.o nv50_display.o \
nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o \
nv04_crtc.o nv04_display.o nv04_cursor.o \
+ nv50_evo.o nv50_crtc.o nv50_dac.o nv50_sor.o \
+ nv50_cursor.o nv50_display.o \
+ nvd0_display.o \
nv04_fbcon.o nv50_fbcon.o nvc0_fbcon.o \
nv10_gpio.o nv50_gpio.o \
nv50_calc.o \
- nv04_pm.o nv50_pm.o nva3_pm.o \
+ nv04_pm.o nv40_pm.o nv50_pm.o nva3_pm.o nvc0_pm.o \
nv50_vram.o nvc0_vram.o \
nv50_vm.o nvc0_vm.o
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
index 00a55dfdba8..fa22b28e877 100644
--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -37,8 +37,10 @@
#include "nouveau_drv.h"
#include "nouveau_drm.h"
#include "nouveau_reg.h"
+#include "nouveau_encoder.h"
-static int nv40_get_intensity(struct backlight_device *bd)
+static int
+nv40_get_intensity(struct backlight_device *bd)
{
struct drm_device *dev = bl_get_data(bd);
int val = (nv_rd32(dev, NV40_PMC_BACKLIGHT) & NV40_PMC_BACKLIGHT_MASK)
@@ -47,7 +49,8 @@ static int nv40_get_intensity(struct backlight_device *bd)
return val;
}
-static int nv40_set_intensity(struct backlight_device *bd)
+static int
+nv40_set_intensity(struct backlight_device *bd)
{
struct drm_device *dev = bl_get_data(bd);
int val = bd->props.brightness;
@@ -65,30 +68,8 @@ static const struct backlight_ops nv40_bl_ops = {
.update_status = nv40_set_intensity,
};
-static int nv50_get_intensity(struct backlight_device *bd)
-{
- struct drm_device *dev = bl_get_data(bd);
-
- return nv_rd32(dev, NV50_PDISPLAY_SOR_BACKLIGHT);
-}
-
-static int nv50_set_intensity(struct backlight_device *bd)
-{
- struct drm_device *dev = bl_get_data(bd);
- int val = bd->props.brightness;
-
- nv_wr32(dev, NV50_PDISPLAY_SOR_BACKLIGHT,
- val | NV50_PDISPLAY_SOR_BACKLIGHT_ENABLE);
- return 0;
-}
-
-static const struct backlight_ops nv50_bl_ops = {
- .options = BL_CORE_SUSPENDRESUME,
- .get_brightness = nv50_get_intensity,
- .update_status = nv50_set_intensity,
-};
-
-static int nouveau_nv40_backlight_init(struct drm_connector *connector)
+static int
+nv40_backlight_init(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -113,34 +94,129 @@ static int nouveau_nv40_backlight_init(struct drm_connector *connector)
return 0;
}
-static int nouveau_nv50_backlight_init(struct drm_connector *connector)
+static int
+nv50_get_intensity(struct backlight_device *bd)
+{
+ struct nouveau_encoder *nv_encoder = bl_get_data(bd);
+ struct drm_device *dev = nv_encoder->base.base.dev;
+ int or = nv_encoder->or;
+ u32 div = 1025;
+ u32 val;
+
+ val = nv_rd32(dev, NV50_PDISP_SOR_PWM_CTL(or));
+ val &= NV50_PDISP_SOR_PWM_CTL_VAL;
+ return ((val * 100) + (div / 2)) / div;
+}
+
+static int
+nv50_set_intensity(struct backlight_device *bd)
+{
+ struct nouveau_encoder *nv_encoder = bl_get_data(bd);
+ struct drm_device *dev = nv_encoder->base.base.dev;
+ int or = nv_encoder->or;
+ u32 div = 1025;
+ u32 val = (bd->props.brightness * div) / 100;
+
+ nv_wr32(dev, NV50_PDISP_SOR_PWM_CTL(or),
+ NV50_PDISP_SOR_PWM_CTL_NEW | val);
+ return 0;
+}
+
+static const struct backlight_ops nv50_bl_ops = {
+ .options = BL_CORE_SUSPENDRESUME,
+ .get_brightness = nv50_get_intensity,
+ .update_status = nv50_set_intensity,
+};
+
+static int
+nva3_get_intensity(struct backlight_device *bd)
+{
+ struct nouveau_encoder *nv_encoder = bl_get_data(bd);
+ struct drm_device *dev = nv_encoder->base.base.dev;
+ int or = nv_encoder->or;
+ u32 div, val;
+
+ div = nv_rd32(dev, NV50_PDISP_SOR_PWM_DIV(or));
+ val = nv_rd32(dev, NV50_PDISP_SOR_PWM_CTL(or));
+ val &= NVA3_PDISP_SOR_PWM_CTL_VAL;
+ if (div && div >= val)
+ return ((val * 100) + (div / 2)) / div;
+
+ return 100;
+}
+
+static int
+nva3_set_intensity(struct backlight_device *bd)
+{
+ struct nouveau_encoder *nv_encoder = bl_get_data(bd);
+ struct drm_device *dev = nv_encoder->base.base.dev;
+ int or = nv_encoder->or;
+ u32 div, val;
+
+ div = nv_rd32(dev, NV50_PDISP_SOR_PWM_DIV(or));
+ val = (bd->props.brightness * div) / 100;
+ if (div) {
+ nv_wr32(dev, NV50_PDISP_SOR_PWM_CTL(or), val |
+ NV50_PDISP_SOR_PWM_CTL_NEW |
+ NVA3_PDISP_SOR_PWM_CTL_UNK);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static const struct backlight_ops nva3_bl_ops = {
+ .options = BL_CORE_SUSPENDRESUME,
+ .get_brightness = nva3_get_intensity,
+ .update_status = nva3_set_intensity,
+};
+
+static int
+nv50_backlight_init(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_encoder *nv_encoder;
struct backlight_properties props;
struct backlight_device *bd;
+ const struct backlight_ops *ops;
+
+ nv_encoder = find_encoder(connector, OUTPUT_LVDS);
+ if (!nv_encoder) {
+ nv_encoder = find_encoder(connector, OUTPUT_DP);
+ if (!nv_encoder)
+ return -ENODEV;
+ }
- if (!nv_rd32(dev, NV50_PDISPLAY_SOR_BACKLIGHT))
+ if (!nv_rd32(dev, NV50_PDISP_SOR_PWM_CTL(nv_encoder->or)))
return 0;
+ if (dev_priv->chipset <= 0xa0 ||
+ dev_priv->chipset == 0xaa ||
+ dev_priv->chipset == 0xac)
+ ops = &nv50_bl_ops;
+ else
+ ops = &nva3_bl_ops;
+
memset(&props, 0, sizeof(struct backlight_properties));
props.type = BACKLIGHT_RAW;
- props.max_brightness = 1025;
- bd = backlight_device_register("nv_backlight", &connector->kdev, dev,
- &nv50_bl_ops, &props);
+ props.max_brightness = 100;
+ bd = backlight_device_register("nv_backlight", &connector->kdev,
+ nv_encoder, ops, &props);
if (IS_ERR(bd))
return PTR_ERR(bd);
dev_priv->backlight = bd;
- bd->props.brightness = nv50_get_intensity(bd);
+ bd->props.brightness = bd->ops->get_brightness(bd);
backlight_update_status(bd);
return 0;
}
-int nouveau_backlight_init(struct drm_connector *connector)
+int
+nouveau_backlight_init(struct drm_device *dev)
{
- struct drm_device *dev = connector->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct drm_connector *connector;
#ifdef CONFIG_ACPI
if (acpi_video_backlight_support()) {
@@ -150,21 +226,28 @@ int nouveau_backlight_init(struct drm_connector *connector)
}
#endif
- switch (dev_priv->card_type) {
- case NV_40:
- return nouveau_nv40_backlight_init(connector);
- case NV_50:
- return nouveau_nv50_backlight_init(connector);
- default:
- break;
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS &&
+ connector->connector_type != DRM_MODE_CONNECTOR_eDP)
+ continue;
+
+ switch (dev_priv->card_type) {
+ case NV_40:
+ return nv40_backlight_init(connector);
+ case NV_50:
+ return nv50_backlight_init(connector);
+ default:
+ break;
+ }
}
+
return 0;
}
-void nouveau_backlight_exit(struct drm_connector *connector)
+void
+nouveau_backlight_exit(struct drm_device *dev)
{
- struct drm_device *dev = connector->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
if (dev_priv->backlight) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index b311faba34f..5fc201b49d3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -296,6 +296,11 @@ munge_reg(struct nvbios *bios, uint32_t reg)
if (dev_priv->card_type < NV_50)
return reg;
+ if (reg & 0x80000000) {
+ BUG_ON(bios->display.crtc < 0);
+ reg += bios->display.crtc * 0x800;
+ }
+
if (reg & 0x40000000) {
BUG_ON(!dcbent);
@@ -304,7 +309,7 @@ munge_reg(struct nvbios *bios, uint32_t reg)
reg += 0x00000080;
}
- reg &= ~0x60000000;
+ reg &= ~0xe0000000;
return reg;
}
@@ -635,10 +640,9 @@ static int
nv50_pll_set(struct drm_device *dev, uint32_t reg, uint32_t clk)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- uint32_t reg0 = nv_rd32(dev, reg + 0);
- uint32_t reg1 = nv_rd32(dev, reg + 4);
struct nouveau_pll_vals pll;
struct pll_lims pll_limits;
+ u32 ctrl, mask, coef;
int ret;
ret = get_pll_limits(dev, reg, &pll_limits);
@@ -649,15 +653,20 @@ nv50_pll_set(struct drm_device *dev, uint32_t reg, uint32_t clk)
if (!clk)
return -ERANGE;
- reg0 = (reg0 & 0xfff8ffff) | (pll.log2P << 16);
- reg1 = (reg1 & 0xffff0000) | (pll.N1 << 8) | pll.M1;
-
- if (dev_priv->vbios.execute) {
- still_alive();
- nv_wr32(dev, reg + 4, reg1);
- nv_wr32(dev, reg + 0, reg0);
+ coef = pll.N1 << 8 | pll.M1;
+ ctrl = pll.log2P << 16;
+ mask = 0x00070000;
+ if (reg == 0x004008) {
+ mask |= 0x01f80000;
+ ctrl |= (pll_limits.log2p_bias << 19);
+ ctrl |= (pll.log2P << 22);
}
+ if (!dev_priv->vbios.execute)
+ return 0;
+
+ nv_mask(dev, reg + 0, mask, ctrl);
+ nv_wr32(dev, reg + 4, coef);
return 0;
}
@@ -1174,22 +1183,19 @@ init_dp_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
*
*/
- struct bit_displayport_encoder_table *dpe = NULL;
struct dcb_entry *dcb = bios->display.output;
struct drm_device *dev = bios->dev;
uint8_t cond = bios->data[offset + 1];
- int dummy;
+ uint8_t *table, *entry;
BIOSLOG(bios, "0x%04X: subop 0x%02X\n", offset, cond);
if (!iexec->execute)
return 3;
- dpe = nouveau_bios_dp_table(dev, dcb, &dummy);
- if (!dpe) {
- NV_ERROR(dev, "0x%04X: INIT_3A: no encoder table!!\n", offset);
+ table = nouveau_dp_bios_data(dev, dcb, &entry);
+ if (!table)
return 3;
- }
switch (cond) {
case 0:
@@ -1203,7 +1209,7 @@ init_dp_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
break;
case 1:
case 2:
- if (!(dpe->unknown & cond))
+ if (!(entry[5] & cond))
iexec->execute = false;
break;
case 5:
@@ -3221,6 +3227,49 @@ init_8d(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
return 1;
}
+static void
+init_gpio_unknv50(struct nvbios *bios, struct dcb_gpio_entry *gpio)
+{
+ const uint32_t nv50_gpio_ctl[2] = { 0xe100, 0xe28c };
+ u32 r, s, v;
+
+ /* Not a clue, needs de-magicing */
+ r = nv50_gpio_ctl[gpio->line >> 4];
+ s = (gpio->line & 0x0f);
+ v = bios_rd32(bios, r) & ~(0x00010001 << s);
+ switch ((gpio->entry & 0x06000000) >> 25) {
+ case 1:
+ v |= (0x00000001 << s);
+ break;
+ case 2:
+ v |= (0x00010000 << s);
+ break;
+ default:
+ break;
+ }
+
+ bios_wr32(bios, r, v);
+}
+
+static void
+init_gpio_unknvd0(struct nvbios *bios, struct dcb_gpio_entry *gpio)
+{
+ u32 v, i;
+
+ v = bios_rd32(bios, 0x00d610 + (gpio->line * 4));
+ v &= 0xffffff00;
+ v |= (gpio->entry & 0x00ff0000) >> 16;
+ bios_wr32(bios, 0x00d610 + (gpio->line * 4), v);
+
+ i = (gpio->entry & 0x1f000000) >> 24;
+ if (i) {
+ v = bios_rd32(bios, 0x00d640 + ((i - 1) * 4));
+ v &= 0xffffff00;
+ v |= gpio->line;
+ bios_wr32(bios, 0x00d640 + ((i - 1) * 4), v);
+ }
+}
+
static int
init_gpio(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
@@ -3235,7 +3284,6 @@ init_gpio(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
- const uint32_t nv50_gpio_ctl[2] = { 0xe100, 0xe28c };
int i;
if (dev_priv->card_type < NV_50) {
@@ -3248,33 +3296,20 @@ init_gpio(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
for (i = 0; i < bios->dcb.gpio.entries; i++) {
struct dcb_gpio_entry *gpio = &bios->dcb.gpio.entry[i];
- uint32_t r, s, v;
BIOSLOG(bios, "0x%04X: Entry: 0x%08X\n", offset, gpio->entry);
BIOSLOG(bios, "0x%04X: set gpio 0x%02x, state %d\n",
offset, gpio->tag, gpio->state_default);
- if (bios->execute)
- pgpio->set(bios->dev, gpio->tag, gpio->state_default);
- /* The NVIDIA binary driver doesn't appear to actually do
- * any of this, my VBIOS does however.
- */
- /* Not a clue, needs de-magicing */
- r = nv50_gpio_ctl[gpio->line >> 4];
- s = (gpio->line & 0x0f);
- v = bios_rd32(bios, r) & ~(0x00010001 << s);
- switch ((gpio->entry & 0x06000000) >> 25) {
- case 1:
- v |= (0x00000001 << s);
- break;
- case 2:
- v |= (0x00010000 << s);
- break;
- default:
- break;
- }
- bios_wr32(bios, r, v);
+ if (!bios->execute)
+ continue;
+
+ pgpio->set(bios->dev, gpio->tag, gpio->state_default);
+ if (dev_priv->card_type < NV_D0)
+ init_gpio_unknv50(bios, gpio);
+ else
+ init_gpio_unknvd0(bios, gpio);
}
return 1;
@@ -3737,6 +3772,10 @@ parse_init_table(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
int count = 0, i, ret;
uint8_t id;
+ /* catch NULL script pointers */
+ if (offset == 0)
+ return 0;
+
/*
* Loop until INIT_DONE causes us to break out of the loop
* (or until offset > bios length just in case... )
@@ -4389,86 +4428,37 @@ int nouveau_bios_parse_lvds_table(struct drm_device *dev, int pxclk, bool *dl, b
return 0;
}
-static uint8_t *
-bios_output_config_match(struct drm_device *dev, struct dcb_entry *dcbent,
- uint16_t record, int record_len, int record_nr,
- bool match_link)
+/* BIT 'U'/'d' table encoder subtables have hashes matching them to
+ * a particular set of encoders.
+ *
+ * This function returns true if a particular DCB entry matches.
+ */
+bool
+bios_encoder_match(struct dcb_entry *dcb, u32 hash)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->vbios;
- uint32_t entry;
- uint16_t table;
- int i, v;
+ if ((hash & 0x000000f0) != (dcb->location << 4))
+ return false;
+ if ((hash & 0x0000000f) != dcb->type)
+ return false;
+ if (!(hash & (dcb->or << 16)))
+ return false;
- switch (dcbent->type) {
+ switch (dcb->type) {
case OUTPUT_TMDS:
case OUTPUT_LVDS:
case OUTPUT_DP:
- break;
- default:
- match_link = false;
- break;
- }
-
- for (i = 0; i < record_nr; i++, record += record_len) {
- table = ROM16(bios->data[record]);
- if (!table)
- continue;
- entry = ROM32(bios->data[table]);
-
- if (match_link) {
- v = (entry & 0x00c00000) >> 22;
- if (!(v & dcbent->sorconf.link))
- continue;
+ if (hash & 0x00c00000) {
+ if (!(hash & (dcb->sorconf.link << 22)))
+ return false;
}
-
- v = (entry & 0x000f0000) >> 16;
- if (!(v & dcbent->or))
- continue;
-
- v = (entry & 0x000000f0) >> 4;
- if (v != dcbent->location)
- continue;
-
- v = (entry & 0x0000000f);
- if (v != dcbent->type)
- continue;
-
- return &bios->data[table];
- }
-
- return NULL;
-}
-
-void *
-nouveau_bios_dp_table(struct drm_device *dev, struct dcb_entry *dcbent,
- int *length)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->vbios;
- uint8_t *table;
-
- if (!bios->display.dp_table_ptr) {
- NV_ERROR(dev, "No pointer to DisplayPort table\n");
- return NULL;
- }
- table = &bios->data[bios->display.dp_table_ptr];
-
- if (table[0] != 0x20 && table[0] != 0x21) {
- NV_ERROR(dev, "DisplayPort table version 0x%02x unknown\n",
- table[0]);
- return NULL;
+ default:
+ return true;
}
-
- *length = table[4];
- return bios_output_config_match(dev, dcbent,
- bios->display.dp_table_ptr + table[1],
- table[2], table[3], table[0] >= 0x21);
}
int
-nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
- uint32_t sub, int pxclk)
+nouveau_bios_run_display_table(struct drm_device *dev, u16 type, int pclk,
+ struct dcb_entry *dcbent, int crtc)
{
/*
* The display script table is located by the BIT 'U' table.
@@ -4498,7 +4488,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
uint8_t *table = &bios->data[bios->display.script_table_ptr];
uint8_t *otable = NULL;
uint16_t script;
- int i = 0;
+ int i;
if (!bios->display.script_table_ptr) {
NV_ERROR(dev, "No pointer to output script table\n");
@@ -4550,30 +4540,33 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
NV_DEBUG_KMS(dev, "Searching for output entry for %d %d %d\n",
dcbent->type, dcbent->location, dcbent->or);
- otable = bios_output_config_match(dev, dcbent, table[1] +
- bios->display.script_table_ptr,
- table[2], table[3], table[0] >= 0x21);
+ for (i = 0; i < table[3]; i++) {
+ otable = ROMPTR(bios, table[table[1] + (i * table[2])]);
+ if (otable && bios_encoder_match(dcbent, ROM32(otable[0])))
+ break;
+ }
+
if (!otable) {
NV_DEBUG_KMS(dev, "failed to match any output table\n");
return 1;
}
- if (pxclk < -2 || pxclk > 0) {
+ if (pclk < -2 || pclk > 0) {
/* Try to find matching script table entry */
for (i = 0; i < otable[5]; i++) {
- if (ROM16(otable[table[4] + i*6]) == sub)
+ if (ROM16(otable[table[4] + i*6]) == type)
break;
}
if (i == otable[5]) {
NV_ERROR(dev, "Table 0x%04x not found for %d/%d, "
"using first\n",
- sub, dcbent->type, dcbent->or);
+ type, dcbent->type, dcbent->or);
i = 0;
}
}
- if (pxclk == 0) {
+ if (pclk == 0) {
script = ROM16(otable[6]);
if (!script) {
NV_DEBUG_KMS(dev, "output script 0 not found\n");
@@ -4581,9 +4574,9 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
}
NV_DEBUG_KMS(dev, "0x%04X: parsing output script 0\n", script);
- nouveau_bios_run_init_table(dev, script, dcbent);
+ nouveau_bios_run_init_table(dev, script, dcbent, crtc);
} else
- if (pxclk == -1) {
+ if (pclk == -1) {
script = ROM16(otable[8]);
if (!script) {
NV_DEBUG_KMS(dev, "output script 1 not found\n");
@@ -4591,9 +4584,9 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
}
NV_DEBUG_KMS(dev, "0x%04X: parsing output script 1\n", script);
- nouveau_bios_run_init_table(dev, script, dcbent);
+ nouveau_bios_run_init_table(dev, script, dcbent, crtc);
} else
- if (pxclk == -2) {
+ if (pclk == -2) {
if (table[4] >= 12)
script = ROM16(otable[10]);
else
@@ -4604,31 +4597,31 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
}
NV_DEBUG_KMS(dev, "0x%04X: parsing output script 2\n", script);
- nouveau_bios_run_init_table(dev, script, dcbent);
+ nouveau_bios_run_init_table(dev, script, dcbent, crtc);
} else
- if (pxclk > 0) {
+ if (pclk > 0) {
script = ROM16(otable[table[4] + i*6 + 2]);
if (script)
- script = clkcmptable(bios, script, pxclk);
+ script = clkcmptable(bios, script, pclk);
if (!script) {
NV_DEBUG_KMS(dev, "clock script 0 not found\n");
return 1;
}
NV_DEBUG_KMS(dev, "0x%04X: parsing clock script 0\n", script);
- nouveau_bios_run_init_table(dev, script, dcbent);
+ nouveau_bios_run_init_table(dev, script, dcbent, crtc);
} else
- if (pxclk < 0) {
+ if (pclk < 0) {
script = ROM16(otable[table[4] + i*6 + 4]);
if (script)
- script = clkcmptable(bios, script, -pxclk);
+ script = clkcmptable(bios, script, -pclk);
if (!script) {
NV_DEBUG_KMS(dev, "clock script 1 not found\n");
return 1;
}
NV_DEBUG_KMS(dev, "0x%04X: parsing clock script 1\n", script);
- nouveau_bios_run_init_table(dev, script, dcbent);
+ nouveau_bios_run_init_table(dev, script, dcbent, crtc);
}
return 0;
@@ -5478,14 +5471,6 @@ parse_bit_U_tbl_entry(struct drm_device *dev, struct nvbios *bios,
return 0;
}
-static int
-parse_bit_displayport_tbl_entry(struct drm_device *dev, struct nvbios *bios,
- struct bit_entry *bitentry)
-{
- bios->display.dp_table_ptr = ROM16(bios->data[bitentry->offset]);
- return 0;
-}
-
struct bit_table {
const char id;
int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
@@ -5559,7 +5544,6 @@ parse_bit_structure(struct nvbios *bios, const uint16_t bitoffset)
parse_bit_table(bios, bitoffset, &BIT_TABLE('L', lvds));
parse_bit_table(bios, bitoffset, &BIT_TABLE('T', tmds));
parse_bit_table(bios, bitoffset, &BIT_TABLE('U', U));
- parse_bit_table(bios, bitoffset, &BIT_TABLE('d', displayport));
return 0;
}
@@ -5884,9 +5868,15 @@ parse_dcb_gpio_table(struct nvbios *bios)
}
e->line = (e->entry & 0x0000001f) >> 0;
- e->state_default = (e->entry & 0x01000000) >> 24;
- e->state[0] = (e->entry & 0x18000000) >> 27;
- e->state[1] = (e->entry & 0x60000000) >> 29;
+ if (gpio[0] == 0x40) {
+ e->state_default = (e->entry & 0x01000000) >> 24;
+ e->state[0] = (e->entry & 0x18000000) >> 27;
+ e->state[1] = (e->entry & 0x60000000) >> 29;
+ } else {
+ e->state_default = (e->entry & 0x00000080) >> 7;
+ e->state[0] = (entry[4] >> 4) & 3;
+ e->state[1] = (entry[4] >> 6) & 3;
+ }
}
}
@@ -6156,7 +6146,14 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
}
case OUTPUT_DP:
entry->dpconf.sor.link = (conf & 0x00000030) >> 4;
- entry->dpconf.link_bw = (conf & 0x00e00000) >> 21;
+ switch ((conf & 0x00e00000) >> 21) {
+ case 0:
+ entry->dpconf.link_bw = 162000;
+ break;
+ default:
+ entry->dpconf.link_bw = 270000;
+ break;
+ }
switch ((conf & 0x0f000000) >> 24) {
case 0xf:
entry->dpconf.link_nr = 4;
@@ -6769,7 +6766,7 @@ uint8_t *nouveau_bios_embedded_edid(struct drm_device *dev)
void
nouveau_bios_run_init_table(struct drm_device *dev, uint16_t table,
- struct dcb_entry *dcbent)
+ struct dcb_entry *dcbent, int crtc)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nvbios *bios = &dev_priv->vbios;
@@ -6777,11 +6774,22 @@ nouveau_bios_run_init_table(struct drm_device *dev, uint16_t table,
spin_lock_bh(&bios->lock);
bios->display.output = dcbent;
+ bios->display.crtc = crtc;
parse_init_table(bios, table, &iexec);
bios->display.output = NULL;
spin_unlock_bh(&bios->lock);
}
+void
+nouveau_bios_init_exec(struct drm_device *dev, uint16_t table)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvbios *bios = &dev_priv->vbios;
+ struct init_exec iexec = { true, false };
+
+ parse_init_table(bios, table, &iexec);
+}
+
static bool NVInitVBIOS(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -6863,9 +6871,8 @@ nouveau_run_vbios_init(struct drm_device *dev)
if (dev_priv->card_type >= NV_50) {
for (i = 0; i < bios->dcb.entries; i++) {
- nouveau_bios_run_display_table(dev,
- &bios->dcb.entry[i],
- 0, 0);
+ nouveau_bios_run_display_table(dev, 0, 0,
+ &bios->dcb.entry[i], -1);
}
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
index 050c314119d..8adb69e4a6b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -289,8 +289,8 @@ struct nvbios {
struct {
struct dcb_entry *output;
+ int crtc;
uint16_t script_table_ptr;
- uint16_t dp_table_ptr;
} display;
struct {
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 890d50e4d68..7cc37e69086 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -148,7 +148,7 @@ set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
if (dev_priv->card_type == NV_10 &&
nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
- nvbo->bo.mem.num_pages < vram_pages / 2) {
+ nvbo->bo.mem.num_pages < vram_pages / 4) {
/*
* Make sure that the color and depth buffers are handled
* by independent memory controller units. Up to a 9x
@@ -956,7 +956,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
break;
}
- if (dev_priv->card_type == NV_C0)
+ if (dev_priv->card_type >= NV_C0)
page_shift = node->page_shift;
else
page_shift = 12;
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
index b0d753f45bb..bb6ec9ef867 100644
--- a/drivers/gpu/drm/nouveau/nouveau_channel.c
+++ b/drivers/gpu/drm/nouveau/nouveau_channel.c
@@ -158,6 +158,7 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
INIT_LIST_HEAD(&chan->nvsw.vbl_wait);
INIT_LIST_HEAD(&chan->nvsw.flip);
INIT_LIST_HEAD(&chan->fence.pending);
+ spin_lock_init(&chan->fence.lock);
/* setup channel's memory and vm */
ret = nouveau_gpuobj_channel_init(chan, vram_handle, gart_handle);
@@ -411,13 +412,17 @@ nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data,
return ret;
init->channel = chan->id;
- if (chan->dma.ib_max)
- init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM |
- NOUVEAU_GEM_DOMAIN_GART;
- else if (chan->pushbuf_bo->bo.mem.mem_type == TTM_PL_VRAM)
+ if (nouveau_vram_pushbuf == 0) {
+ if (chan->dma.ib_max)
+ init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM |
+ NOUVEAU_GEM_DOMAIN_GART;
+ else if (chan->pushbuf_bo->bo.mem.mem_type == TTM_PL_VRAM)
+ init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM;
+ else
+ init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART;
+ } else {
init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM;
- else
- init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART;
+ }
if (dev_priv->card_type < NV_C0) {
init->subchan[0].handle = NvM2MF;
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 939d4df0777..cea6696b190 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -39,7 +39,7 @@
static void nouveau_connector_hotplug(void *, int);
-static struct nouveau_encoder *
+struct nouveau_encoder *
find_encoder(struct drm_connector *connector, int type)
{
struct drm_device *dev = connector->dev;
@@ -116,10 +116,6 @@ nouveau_connector_destroy(struct drm_connector *connector)
nouveau_connector_hotplug, connector);
}
- if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS ||
- connector->connector_type == DRM_MODE_CONNECTOR_eDP)
- nouveau_backlight_exit(connector);
-
kfree(nv_connector->edid);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
@@ -712,12 +708,9 @@ nouveau_connector_mode_valid(struct drm_connector *connector,
case OUTPUT_TV:
return get_slave_funcs(encoder)->mode_valid(encoder, mode);
case OUTPUT_DP:
- if (nv_encoder->dp.link_bw == DP_LINK_BW_2_7)
- max_clock = nv_encoder->dp.link_nr * 270000;
- else
- max_clock = nv_encoder->dp.link_nr * 162000;
-
- clock = clock * nouveau_connector_bpp(connector) / 8;
+ max_clock = nv_encoder->dp.link_nr;
+ max_clock *= nv_encoder->dp.link_bw;
+ clock = clock * nouveau_connector_bpp(connector) / 10;
break;
default:
BUG_ON(1);
@@ -871,7 +864,6 @@ nouveau_connector_create(struct drm_device *dev, int index)
dev->mode_config.scaling_mode_property,
nv_connector->scaling_mode);
}
- connector->polled = DRM_CONNECTOR_POLL_CONNECT;
/* fall-through */
case DCB_CONNECTOR_TV_0:
case DCB_CONNECTOR_TV_1:
@@ -888,27 +880,20 @@ nouveau_connector_create(struct drm_device *dev, int index)
dev->mode_config.dithering_mode_property,
nv_connector->use_dithering ?
DRM_MODE_DITHERING_ON : DRM_MODE_DITHERING_OFF);
-
- if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS) {
- if (dev_priv->card_type >= NV_50)
- connector->polled = DRM_CONNECTOR_POLL_HPD;
- else
- connector->polled = DRM_CONNECTOR_POLL_CONNECT;
- }
break;
}
- if (pgpio->irq_register) {
+ if (nv_connector->dcb->gpio_tag != 0xff && pgpio->irq_register) {
pgpio->irq_register(dev, nv_connector->dcb->gpio_tag,
nouveau_connector_hotplug, connector);
+
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+ } else {
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT;
}
drm_sysfs_connector_add(connector);
- if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS ||
- connector->connector_type == DRM_MODE_CONNECTOR_eDP)
- nouveau_backlight_init(connector);
-
dcb->drm = connector;
return dcb->drm;
@@ -925,22 +910,13 @@ nouveau_connector_hotplug(void *data, int plugged)
struct drm_connector *connector = data;
struct drm_device *dev = connector->dev;
- NV_INFO(dev, "%splugged %s\n", plugged ? "" : "un",
- drm_get_connector_name(connector));
-
- if (connector->encoder && connector->encoder->crtc &&
- connector->encoder->crtc->enabled) {
- struct nouveau_encoder *nv_encoder = nouveau_encoder(connector->encoder);
- struct drm_encoder_helper_funcs *helper =
- connector->encoder->helper_private;
+ NV_DEBUG(dev, "%splugged %s\n", plugged ? "" : "un",
+ drm_get_connector_name(connector));
- if (nv_encoder->dcb->type == OUTPUT_DP) {
- if (plugged)
- helper->dpms(connector->encoder, DRM_MODE_DPMS_ON);
- else
- helper->dpms(connector->encoder, DRM_MODE_DPMS_OFF);
- }
- }
+ if (plugged)
+ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
+ else
+ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
drm_helper_hpd_irq_event(dev);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_crtc.h b/drivers/gpu/drm/nouveau/nouveau_crtc.h
index cb1ce2a0916..bf8e1289953 100644
--- a/drivers/gpu/drm/nouveau/nouveau_crtc.h
+++ b/drivers/gpu/drm/nouveau/nouveau_crtc.h
@@ -82,14 +82,13 @@ static inline struct drm_crtc *to_drm_crtc(struct nouveau_crtc *crtc)
}
int nv50_crtc_create(struct drm_device *dev, int index);
-int nv50_cursor_init(struct nouveau_crtc *);
-void nv50_cursor_fini(struct nouveau_crtc *);
int nv50_crtc_cursor_set(struct drm_crtc *drm_crtc, struct drm_file *file_priv,
uint32_t buffer_handle, uint32_t width,
uint32_t height);
int nv50_crtc_cursor_move(struct drm_crtc *drm_crtc, int x, int y);
int nv04_cursor_init(struct nouveau_crtc *);
+int nv50_cursor_init(struct nouveau_crtc *);
struct nouveau_connector *
nouveau_crtc_connector_get(struct nouveau_crtc *crtc);
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index eb514ea2937..ddbabefb427 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -105,9 +105,12 @@ nouveau_framebuffer_init(struct drm_device *dev,
if (dev_priv->chipset == 0x50)
nv_fb->r_format |= (tile_flags << 8);
- if (!tile_flags)
- nv_fb->r_pitch = 0x00100000 | fb->pitch;
- else {
+ if (!tile_flags) {
+ if (dev_priv->card_type < NV_D0)
+ nv_fb->r_pitch = 0x00100000 | fb->pitch;
+ else
+ nv_fb->r_pitch = 0x01000000 | fb->pitch;
+ } else {
u32 mode = nvbo->tile_mode;
if (dev_priv->card_type >= NV_C0)
mode >>= 4;
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index 7beb82a0315..de5efe71fef 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -28,418 +28,619 @@
#include "nouveau_i2c.h"
#include "nouveau_connector.h"
#include "nouveau_encoder.h"
+#include "nouveau_crtc.h"
+
+/******************************************************************************
+ * aux channel util functions
+ *****************************************************************************/
+#define AUX_DBG(fmt, args...) do { \
+ if (nouveau_reg_debug & NOUVEAU_REG_DEBUG_AUXCH) { \
+ NV_PRINTK(KERN_DEBUG, dev, "AUXCH(%d): " fmt, ch, ##args); \
+ } \
+} while (0)
+#define AUX_ERR(fmt, args...) NV_ERROR(dev, "AUXCH(%d): " fmt, ch, ##args)
+
+static void
+auxch_fini(struct drm_device *dev, int ch)
+{
+ nv_mask(dev, 0x00e4e4 + (ch * 0x50), 0x00310000, 0x00000000);
+}
static int
-auxch_rd(struct drm_encoder *encoder, int address, uint8_t *buf, int size)
+auxch_init(struct drm_device *dev, int ch)
{
- struct drm_device *dev = encoder->dev;
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_i2c_chan *auxch;
- int ret;
-
- auxch = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index);
- if (!auxch)
- return -ENODEV;
-
- ret = nouveau_dp_auxch(auxch, 9, address, buf, size);
- if (ret)
- return ret;
+ const u32 unksel = 1; /* nfi which to use, or if it matters.. */
+ const u32 ureq = unksel ? 0x00100000 : 0x00200000;
+ const u32 urep = unksel ? 0x01000000 : 0x02000000;
+ u32 ctrl, timeout;
+
+ /* wait up to 1ms for any previous transaction to be done... */
+ timeout = 1000;
+ do {
+ ctrl = nv_rd32(dev, 0x00e4e4 + (ch * 0x50));
+ udelay(1);
+ if (!timeout--) {
+ AUX_ERR("begin idle timeout 0x%08x", ctrl);
+ return -EBUSY;
+ }
+ } while (ctrl & 0x03010000);
+
+ /* set some magic, and wait up to 1ms for it to appear */
+ nv_mask(dev, 0x00e4e4 + (ch * 0x50), 0x00300000, ureq);
+ timeout = 1000;
+ do {
+ ctrl = nv_rd32(dev, 0x00e4e4 + (ch * 0x50));
+ udelay(1);
+ if (!timeout--) {
+ AUX_ERR("magic wait 0x%08x\n", ctrl);
+ auxch_fini(dev, ch);
+ return -EBUSY;
+ }
+ } while ((ctrl & 0x03000000) != urep);
return 0;
}
static int
-auxch_wr(struct drm_encoder *encoder, int address, uint8_t *buf, int size)
+auxch_tx(struct drm_device *dev, int ch, u8 type, u32 addr, u8 *data, u8 size)
{
- struct drm_device *dev = encoder->dev;
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_i2c_chan *auxch;
- int ret;
+ u32 ctrl, stat, timeout, retries;
+ u32 xbuf[4] = {};
+ int ret, i;
- auxch = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index);
- if (!auxch)
- return -ENODEV;
+ AUX_DBG("%d: 0x%08x %d\n", type, addr, size);
- ret = nouveau_dp_auxch(auxch, 8, address, buf, size);
- return ret;
-}
+ ret = auxch_init(dev, ch);
+ if (ret)
+ goto out;
-static int
-nouveau_dp_lane_count_set(struct drm_encoder *encoder, uint8_t cmd)
-{
- struct drm_device *dev = encoder->dev;
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- uint32_t tmp;
- int or = nv_encoder->or, link = !(nv_encoder->dcb->sorconf.link & 1);
-
- tmp = nv_rd32(dev, NV50_SOR_DP_CTRL(or, link));
- tmp &= ~(NV50_SOR_DP_CTRL_ENHANCED_FRAME_ENABLED |
- NV50_SOR_DP_CTRL_LANE_MASK);
- tmp |= ((1 << (cmd & DP_LANE_COUNT_MASK)) - 1) << 16;
- if (cmd & DP_LANE_COUNT_ENHANCED_FRAME_EN)
- tmp |= NV50_SOR_DP_CTRL_ENHANCED_FRAME_ENABLED;
- nv_wr32(dev, NV50_SOR_DP_CTRL(or, link), tmp);
-
- return auxch_wr(encoder, DP_LANE_COUNT_SET, &cmd, 1);
-}
+ stat = nv_rd32(dev, 0x00e4e8 + (ch * 0x50));
+ if (!(stat & 0x10000000)) {
+ AUX_DBG("sink not detected\n");
+ ret = -ENXIO;
+ goto out;
+ }
-static int
-nouveau_dp_link_bw_set(struct drm_encoder *encoder, uint8_t cmd)
-{
- struct drm_device *dev = encoder->dev;
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- uint32_t tmp;
- int reg = 0x614300 + (nv_encoder->or * 0x800);
+ if (!(type & 1)) {
+ memcpy(xbuf, data, size);
+ for (i = 0; i < 16; i += 4) {
+ AUX_DBG("wr 0x%08x\n", xbuf[i / 4]);
+ nv_wr32(dev, 0x00e4c0 + (ch * 0x50) + i, xbuf[i / 4]);
+ }
+ }
- tmp = nv_rd32(dev, reg);
- tmp &= 0xfff3ffff;
- if (cmd == DP_LINK_BW_2_7)
- tmp |= 0x00040000;
- nv_wr32(dev, reg, tmp);
+ ctrl = nv_rd32(dev, 0x00e4e4 + (ch * 0x50));
+ ctrl &= ~0x0001f0ff;
+ ctrl |= type << 12;
+ ctrl |= size - 1;
+ nv_wr32(dev, 0x00e4e0 + (ch * 0x50), addr);
+
+ /* retry transaction a number of times on failure... */
+ ret = -EREMOTEIO;
+ for (retries = 0; retries < 32; retries++) {
+ /* reset, and delay a while if this is a retry */
+ nv_wr32(dev, 0x00e4e4 + (ch * 0x50), 0x80000000 | ctrl);
+ nv_wr32(dev, 0x00e4e4 + (ch * 0x50), 0x00000000 | ctrl);
+ if (retries)
+ udelay(400);
+
+ /* transaction request, wait up to 1ms for it to complete */
+ nv_wr32(dev, 0x00e4e4 + (ch * 0x50), 0x00010000 | ctrl);
+
+ timeout = 1000;
+ do {
+ ctrl = nv_rd32(dev, 0x00e4e4 + (ch * 0x50));
+ udelay(1);
+ if (!timeout--) {
+ AUX_ERR("tx req timeout 0x%08x\n", ctrl);
+ goto out;
+ }
+ } while (ctrl & 0x00010000);
- return auxch_wr(encoder, DP_LINK_BW_SET, &cmd, 1);
-}
+ /* read status, and check if transaction completed ok */
+ stat = nv_mask(dev, 0x00e4e8 + (ch * 0x50), 0, 0);
+ if (!(stat & 0x000f0f00)) {
+ ret = 0;
+ break;
+ }
-static int
-nouveau_dp_link_train_set(struct drm_encoder *encoder, int pattern)
-{
- struct drm_device *dev = encoder->dev;
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- uint32_t tmp;
- uint8_t cmd;
- int or = nv_encoder->or, link = !(nv_encoder->dcb->sorconf.link & 1);
- int ret;
+ AUX_DBG("%02d 0x%08x 0x%08x\n", retries, ctrl, stat);
+ }
- tmp = nv_rd32(dev, NV50_SOR_DP_CTRL(or, link));
- tmp &= ~NV50_SOR_DP_CTRL_TRAINING_PATTERN;
- tmp |= (pattern << 24);
- nv_wr32(dev, NV50_SOR_DP_CTRL(or, link), tmp);
+ if (type & 1) {
+ for (i = 0; i < 16; i += 4) {
+ xbuf[i / 4] = nv_rd32(dev, 0x00e4d0 + (ch * 0x50) + i);
+ AUX_DBG("rd 0x%08x\n", xbuf[i / 4]);
+ }
+ memcpy(data, xbuf, size);
+ }
- ret = auxch_rd(encoder, DP_TRAINING_PATTERN_SET, &cmd, 1);
- if (ret)
- return ret;
- cmd &= ~DP_TRAINING_PATTERN_MASK;
- cmd |= (pattern & DP_TRAINING_PATTERN_MASK);
- return auxch_wr(encoder, DP_TRAINING_PATTERN_SET, &cmd, 1);
+out:
+ auxch_fini(dev, ch);
+ return ret;
}
-static int
-nouveau_dp_max_voltage_swing(struct drm_encoder *encoder)
+static u32
+dp_link_bw_get(struct drm_device *dev, int or, int link)
{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct drm_device *dev = encoder->dev;
- struct bit_displayport_encoder_table_entry *dpse;
- struct bit_displayport_encoder_table *dpe;
- int i, dpe_headerlen, max_vs = 0;
-
- dpe = nouveau_bios_dp_table(dev, nv_encoder->dcb, &dpe_headerlen);
- if (!dpe)
- return false;
- dpse = (void *)((char *)dpe + dpe_headerlen);
+ u32 ctrl = nv_rd32(dev, 0x614300 + (or * 0x800));
+ if (!(ctrl & 0x000c0000))
+ return 162000;
+ return 270000;
+}
- for (i = 0; i < dpe_headerlen; i++, dpse++) {
- if (dpse->vs_level > max_vs)
- max_vs = dpse->vs_level;
+static int
+dp_lane_count_get(struct drm_device *dev, int or, int link)
+{
+ u32 ctrl = nv_rd32(dev, NV50_SOR_DP_CTRL(or, link));
+ switch (ctrl & 0x000f0000) {
+ case 0x00010000: return 1;
+ case 0x00030000: return 2;
+ default:
+ return 4;
}
-
- return max_vs;
}
-static int
-nouveau_dp_max_pre_emphasis(struct drm_encoder *encoder, int vs)
+void
+nouveau_dp_tu_update(struct drm_device *dev, int or, int link, u32 clk, u32 bpp)
{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct drm_device *dev = encoder->dev;
- struct bit_displayport_encoder_table_entry *dpse;
- struct bit_displayport_encoder_table *dpe;
- int i, dpe_headerlen, max_pre = 0;
+ const u32 symbol = 100000;
+ int bestTU = 0, bestVTUi = 0, bestVTUf = 0, bestVTUa = 0;
+ int TU, VTUi, VTUf, VTUa;
+ u64 link_data_rate, link_ratio, unk;
+ u32 best_diff = 64 * symbol;
+ u32 link_nr, link_bw, r;
+
+ /* calculate packed data rate for each lane */
+ link_nr = dp_lane_count_get(dev, or, link);
+ link_data_rate = (clk * bpp / 8) / link_nr;
+
+ /* calculate ratio of packed data rate to link symbol rate */
+ link_bw = dp_link_bw_get(dev, or, link);
+ link_ratio = link_data_rate * symbol;
+ r = do_div(link_ratio, link_bw);
+
+ for (TU = 64; TU >= 32; TU--) {
+ /* calculate average number of valid symbols in each TU */
+ u32 tu_valid = link_ratio * TU;
+ u32 calc, diff;
+
+ /* find a hw representation for the fraction.. */
+ VTUi = tu_valid / symbol;
+ calc = VTUi * symbol;
+ diff = tu_valid - calc;
+ if (diff) {
+ if (diff >= (symbol / 2)) {
+ VTUf = symbol / (symbol - diff);
+ if (symbol - (VTUf * diff))
+ VTUf++;
+
+ if (VTUf <= 15) {
+ VTUa = 1;
+ calc += symbol - (symbol / VTUf);
+ } else {
+ VTUa = 0;
+ VTUf = 1;
+ calc += symbol;
+ }
+ } else {
+ VTUa = 0;
+ VTUf = min((int)(symbol / diff), 15);
+ calc += symbol / VTUf;
+ }
- dpe = nouveau_bios_dp_table(dev, nv_encoder->dcb, &dpe_headerlen);
- if (!dpe)
- return false;
- dpse = (void *)((char *)dpe + dpe_headerlen);
+ diff = calc - tu_valid;
+ } else {
+ /* no remainder, but the hw doesn't like the fractional
+ * part to be zero. decrement the integer part and
+ * have the fraction add a whole symbol back
+ */
+ VTUa = 0;
+ VTUf = 1;
+ VTUi--;
+ }
- for (i = 0; i < dpe_headerlen; i++, dpse++) {
- if (dpse->vs_level != vs)
- continue;
+ if (diff < best_diff) {
+ best_diff = diff;
+ bestTU = TU;
+ bestVTUa = VTUa;
+ bestVTUf = VTUf;
+ bestVTUi = VTUi;
+ if (diff == 0)
+ break;
+ }
+ }
- if (dpse->pre_level > max_pre)
- max_pre = dpse->pre_level;
+ if (!bestTU) {
+ NV_ERROR(dev, "DP: unable to find suitable config\n");
+ return;
}
- return max_pre;
+ /* XXX close to vbios numbers, but not right */
+ unk = (symbol - link_ratio) * bestTU;
+ unk *= link_ratio;
+ r = do_div(unk, symbol);
+ r = do_div(unk, symbol);
+ unk += 6;
+
+ nv_mask(dev, NV50_SOR_DP_CTRL(or, link), 0x000001fc, bestTU << 2);
+ nv_mask(dev, NV50_SOR_DP_SCFG(or, link), 0x010f7f3f, bestVTUa << 24 |
+ bestVTUf << 16 |
+ bestVTUi << 8 |
+ unk);
}
-static bool
-nouveau_dp_link_train_adjust(struct drm_encoder *encoder, uint8_t *config)
+u8 *
+nouveau_dp_bios_data(struct drm_device *dev, struct dcb_entry *dcb, u8 **entry)
{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct drm_device *dev = encoder->dev;
- struct bit_displayport_encoder_table *dpe;
- int ret, i, dpe_headerlen, vs = 0, pre = 0;
- uint8_t request[2];
-
- dpe = nouveau_bios_dp_table(dev, nv_encoder->dcb, &dpe_headerlen);
- if (!dpe)
- return false;
-
- ret = auxch_rd(encoder, DP_ADJUST_REQUEST_LANE0_1, request, 2);
- if (ret)
- return false;
-
- NV_DEBUG_KMS(dev, "\t\tadjust 0x%02x 0x%02x\n", request[0], request[1]);
-
- /* Keep all lanes at the same level.. */
- for (i = 0; i < nv_encoder->dp.link_nr; i++) {
- int lane_req = (request[i >> 1] >> ((i & 1) << 2)) & 0xf;
- int lane_vs = lane_req & 3;
- int lane_pre = (lane_req >> 2) & 3;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvbios *bios = &dev_priv->vbios;
+ struct bit_entry d;
+ u8 *table;
+ int i;
+
+ if (bit_table(dev, 'd', &d)) {
+ NV_ERROR(dev, "BIT 'd' table not found\n");
+ return NULL;
+ }
- if (lane_vs > vs)
- vs = lane_vs;
- if (lane_pre > pre)
- pre = lane_pre;
+ if (d.version != 1) {
+ NV_ERROR(dev, "BIT 'd' table version %d unknown\n", d.version);
+ return NULL;
}
- if (vs >= nouveau_dp_max_voltage_swing(encoder)) {
- vs = nouveau_dp_max_voltage_swing(encoder);
- vs |= 4;
+ table = ROMPTR(bios, d.data[0]);
+ if (!table) {
+ NV_ERROR(dev, "displayport table pointer invalid\n");
+ return NULL;
}
- if (pre >= nouveau_dp_max_pre_emphasis(encoder, vs & 3)) {
- pre = nouveau_dp_max_pre_emphasis(encoder, vs & 3);
- pre |= 4;
+ switch (table[0]) {
+ case 0x20:
+ case 0x21:
+ case 0x30:
+ break;
+ default:
+ NV_ERROR(dev, "displayport table 0x%02x unknown\n", table[0]);
+ return NULL;
}
- /* Update the configuration for all lanes.. */
- for (i = 0; i < nv_encoder->dp.link_nr; i++)
- config[i] = (pre << 3) | vs;
+ for (i = 0; i < table[3]; i++) {
+ *entry = ROMPTR(bios, table[table[1] + (i * table[2])]);
+ if (*entry && bios_encoder_match(dcb, ROM32((*entry)[0])))
+ return table;
+ }
- return true;
+ NV_ERROR(dev, "displayport encoder table not found\n");
+ return NULL;
}
-static bool
-nouveau_dp_link_train_commit(struct drm_encoder *encoder, uint8_t *config)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct drm_device *dev = encoder->dev;
- struct bit_displayport_encoder_table_entry *dpse;
- struct bit_displayport_encoder_table *dpe;
- int or = nv_encoder->or, link = !(nv_encoder->dcb->sorconf.link & 1);
- int dpe_headerlen, ret, i;
+/******************************************************************************
+ * link training
+ *****************************************************************************/
+struct dp_state {
+ struct dcb_entry *dcb;
+ u8 *table;
+ u8 *entry;
+ int auxch;
+ int crtc;
+ int or;
+ int link;
+ u8 *dpcd;
+ int link_nr;
+ u32 link_bw;
+ u8 stat[6];
+ u8 conf[4];
+};
- NV_DEBUG_KMS(dev, "\t\tconfig 0x%02x 0x%02x 0x%02x 0x%02x\n",
- config[0], config[1], config[2], config[3]);
+static void
+dp_set_link_config(struct drm_device *dev, struct dp_state *dp)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int or = dp->or, link = dp->link;
+ u8 *entry, sink[2];
+ u32 dp_ctrl;
+ u16 script;
+
+ NV_DEBUG_KMS(dev, "%d lanes at %d KB/s\n", dp->link_nr, dp->link_bw);
+
+ /* set selected link rate on source */
+ switch (dp->link_bw) {
+ case 270000:
+ nv_mask(dev, 0x614300 + (or * 0x800), 0x000c0000, 0x00040000);
+ sink[0] = DP_LINK_BW_2_7;
+ break;
+ default:
+ nv_mask(dev, 0x614300 + (or * 0x800), 0x000c0000, 0x00000000);
+ sink[0] = DP_LINK_BW_1_62;
+ break;
+ }
- dpe = nouveau_bios_dp_table(dev, nv_encoder->dcb, &dpe_headerlen);
- if (!dpe)
- return false;
- dpse = (void *)((char *)dpe + dpe_headerlen);
+ /* offset +0x0a of each dp encoder table entry is a pointer to another
+ * table, that has (among other things) pointers to more scripts that
+ * need to be executed, this time depending on link speed.
+ */
+ entry = ROMPTR(&dev_priv->vbios, dp->entry[10]);
+ if (entry) {
+ if (dp->table[0] < 0x30) {
+ while (dp->link_bw < (ROM16(entry[0]) * 10))
+ entry += 4;
+ script = ROM16(entry[2]);
+ } else {
+ while (dp->link_bw < (entry[0] * 27000))
+ entry += 3;
+ script = ROM16(entry[1]);
+ }
- for (i = 0; i < dpe->record_nr; i++, dpse++) {
- if (dpse->vs_level == (config[0] & 3) &&
- dpse->pre_level == ((config[0] >> 3) & 3))
- break;
+ nouveau_bios_run_init_table(dev, script, dp->dcb, dp->crtc);
}
- BUG_ON(i == dpe->record_nr);
-
- for (i = 0; i < nv_encoder->dp.link_nr; i++) {
- const int shift[4] = { 16, 8, 0, 24 };
- uint32_t mask = 0xff << shift[i];
- uint32_t reg0, reg1, reg2;
-
- reg0 = nv_rd32(dev, NV50_SOR_DP_UNK118(or, link)) & ~mask;
- reg0 |= (dpse->reg0 << shift[i]);
- reg1 = nv_rd32(dev, NV50_SOR_DP_UNK120(or, link)) & ~mask;
- reg1 |= (dpse->reg1 << shift[i]);
- reg2 = nv_rd32(dev, NV50_SOR_DP_UNK130(or, link)) & 0xffff00ff;
- reg2 |= (dpse->reg2 << 8);
- nv_wr32(dev, NV50_SOR_DP_UNK118(or, link), reg0);
- nv_wr32(dev, NV50_SOR_DP_UNK120(or, link), reg1);
- nv_wr32(dev, NV50_SOR_DP_UNK130(or, link), reg2);
+
+ /* configure lane count on the source */
+ dp_ctrl = ((1 << dp->link_nr) - 1) << 16;
+ sink[1] = dp->link_nr;
+ if (dp->dpcd[2] & DP_ENHANCED_FRAME_CAP) {
+ dp_ctrl |= 0x00004000;
+ sink[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
}
- ret = auxch_wr(encoder, DP_TRAINING_LANE0_SET, config, 4);
- if (ret)
- return false;
+ nv_mask(dev, NV50_SOR_DP_CTRL(or, link), 0x001f4000, dp_ctrl);
- return true;
+ /* inform the sink of the new configuration */
+ auxch_tx(dev, dp->auxch, 8, DP_LINK_BW_SET, sink, 2);
}
-bool
-nouveau_dp_link_train(struct drm_encoder *encoder)
+static void
+dp_set_training_pattern(struct drm_device *dev, struct dp_state *dp, u8 tp)
{
- struct drm_device *dev = encoder->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_connector *nv_connector;
- struct bit_displayport_encoder_table *dpe;
- int dpe_headerlen;
- uint8_t config[4], status[3];
- bool cr_done, cr_max_vs, eq_done, hpd_state;
- int ret = 0, i, tries, voltage;
+ u8 sink_tp;
- NV_DEBUG_KMS(dev, "link training!!\n");
+ NV_DEBUG_KMS(dev, "training pattern %d\n", tp);
- nv_connector = nouveau_encoder_connector_get(nv_encoder);
- if (!nv_connector)
- return false;
+ nv_mask(dev, NV50_SOR_DP_CTRL(dp->or, dp->link), 0x0f000000, tp << 24);
- dpe = nouveau_bios_dp_table(dev, nv_encoder->dcb, &dpe_headerlen);
- if (!dpe) {
- NV_ERROR(dev, "SOR-%d: no DP encoder table!\n", nv_encoder->or);
- return false;
- }
+ auxch_tx(dev, dp->auxch, 9, DP_TRAINING_PATTERN_SET, &sink_tp, 1);
+ sink_tp &= ~DP_TRAINING_PATTERN_MASK;
+ sink_tp |= tp;
+ auxch_tx(dev, dp->auxch, 8, DP_TRAINING_PATTERN_SET, &sink_tp, 1);
+}
- /* disable hotplug detect, this flips around on some panels during
- * link training.
- */
- hpd_state = pgpio->irq_enable(dev, nv_connector->dcb->gpio_tag, false);
+static const u8 nv50_lane_map[] = { 16, 8, 0, 24 };
+static const u8 nvaf_lane_map[] = { 24, 16, 8, 0 };
+
+static int
+dp_link_train_commit(struct drm_device *dev, struct dp_state *dp)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ u32 mask = 0, drv = 0, pre = 0, unk = 0;
+ const u8 *shifts;
+ int link = dp->link;
+ int or = dp->or;
+ int i;
+
+ if (dev_priv->chipset != 0xaf)
+ shifts = nv50_lane_map;
+ else
+ shifts = nvaf_lane_map;
+
+ for (i = 0; i < dp->link_nr; i++) {
+ u8 *conf = dp->entry + dp->table[4];
+ u8 lane = (dp->stat[4 + (i >> 1)] >> ((i & 1) * 4)) & 0xf;
+ u8 lpre = (lane & 0x0c) >> 2;
+ u8 lvsw = (lane & 0x03) >> 0;
+
+ mask |= 0xff << shifts[i];
+ unk |= 1 << (shifts[i] >> 3);
+
+ dp->conf[i] = (lpre << 3) | lvsw;
+ if (lvsw == DP_TRAIN_VOLTAGE_SWING_1200)
+ dp->conf[i] |= DP_TRAIN_MAX_SWING_REACHED;
+ if (lpre == DP_TRAIN_PRE_EMPHASIS_9_5)
+ dp->conf[i] |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
+
+ NV_DEBUG_KMS(dev, "config lane %d %02x\n", i, dp->conf[i]);
+
+ if (dp->table[0] < 0x30) {
+ u8 *last = conf + (dp->entry[4] * dp->table[5]);
+ while (lvsw != conf[0] || lpre != conf[1]) {
+ conf += dp->table[5];
+ if (conf >= last)
+ return -EINVAL;
+ }
+
+ conf += 2;
+ } else {
+ /* no lookup table anymore, set entries for each
+ * combination of voltage swing and pre-emphasis
+ * level allowed by the DP spec.
+ */
+ switch (lvsw) {
+ case 0: lpre += 0; break;
+ case 1: lpre += 4; break;
+ case 2: lpre += 7; break;
+ case 3: lpre += 9; break;
+ }
+
+ conf = conf + (lpre * dp->table[5]);
+ conf++;
+ }
- if (dpe->script0) {
- NV_DEBUG_KMS(dev, "SOR-%d: running DP script 0\n", nv_encoder->or);
- nouveau_bios_run_init_table(dev, le16_to_cpu(dpe->script0),
- nv_encoder->dcb);
+ drv |= conf[0] << shifts[i];
+ pre |= conf[1] << shifts[i];
+ unk = (unk & ~0x0000ff00) | (conf[2] << 8);
}
-train:
- cr_done = eq_done = false;
+ nv_mask(dev, NV50_SOR_DP_UNK118(or, link), mask, drv);
+ nv_mask(dev, NV50_SOR_DP_UNK120(or, link), mask, pre);
+ nv_mask(dev, NV50_SOR_DP_UNK130(or, link), 0x0000ff0f, unk);
- /* set link configuration */
- NV_DEBUG_KMS(dev, "\tbegin train: bw %d, lanes %d\n",
- nv_encoder->dp.link_bw, nv_encoder->dp.link_nr);
+ return auxch_tx(dev, dp->auxch, 8, DP_TRAINING_LANE0_SET, dp->conf, 4);
+}
- ret = nouveau_dp_link_bw_set(encoder, nv_encoder->dp.link_bw);
- if (ret)
- return false;
+static int
+dp_link_train_update(struct drm_device *dev, struct dp_state *dp, u32 delay)
+{
+ int ret;
- config[0] = nv_encoder->dp.link_nr;
- if (nv_encoder->dp.dpcd_version >= 0x11 &&
- nv_encoder->dp.enhanced_frame)
- config[0] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+ udelay(delay);
- ret = nouveau_dp_lane_count_set(encoder, config[0]);
+ ret = auxch_tx(dev, dp->auxch, 9, DP_LANE0_1_STATUS, dp->stat, 6);
if (ret)
- return false;
+ return ret;
- /* clock recovery */
- NV_DEBUG_KMS(dev, "\tbegin cr\n");
- ret = nouveau_dp_link_train_set(encoder, DP_TRAINING_PATTERN_1);
- if (ret)
- goto stop;
+ NV_DEBUG_KMS(dev, "status %02x %02x %02x %02x %02x %02x\n",
+ dp->stat[0], dp->stat[1], dp->stat[2], dp->stat[3],
+ dp->stat[4], dp->stat[5]);
+ return 0;
+}
- tries = 0;
- voltage = -1;
- memset(config, 0x00, sizeof(config));
- for (;;) {
- if (!nouveau_dp_link_train_commit(encoder, config))
- break;
+static int
+dp_link_train_cr(struct drm_device *dev, struct dp_state *dp)
+{
+ bool cr_done = false, abort = false;
+ int voltage = dp->conf[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
+ int tries = 0, i;
- udelay(100);
+ dp_set_training_pattern(dev, dp, DP_TRAINING_PATTERN_1);
- ret = auxch_rd(encoder, DP_LANE0_1_STATUS, status, 2);
- if (ret)
+ do {
+ if (dp_link_train_commit(dev, dp) ||
+ dp_link_train_update(dev, dp, 100))
break;
- NV_DEBUG_KMS(dev, "\t\tstatus: 0x%02x 0x%02x\n",
- status[0], status[1]);
cr_done = true;
- cr_max_vs = false;
- for (i = 0; i < nv_encoder->dp.link_nr; i++) {
- int lane = (status[i >> 1] >> ((i & 1) * 4)) & 0xf;
-
+ for (i = 0; i < dp->link_nr; i++) {
+ u8 lane = (dp->stat[i >> 1] >> ((i & 1) * 4)) & 0xf;
if (!(lane & DP_LANE_CR_DONE)) {
cr_done = false;
- if (config[i] & DP_TRAIN_MAX_PRE_EMPHASIS_REACHED)
- cr_max_vs = true;
+ if (dp->conf[i] & DP_TRAIN_MAX_SWING_REACHED)
+ abort = true;
break;
}
}
- if ((config[0] & DP_TRAIN_VOLTAGE_SWING_MASK) != voltage) {
- voltage = config[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
+ if ((dp->conf[0] & DP_TRAIN_VOLTAGE_SWING_MASK) != voltage) {
+ voltage = dp->conf[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
tries = 0;
}
+ } while (!cr_done && !abort && ++tries < 5);
- if (cr_done || cr_max_vs || (++tries == 5))
- break;
-
- if (!nouveau_dp_link_train_adjust(encoder, config))
- break;
- }
-
- if (!cr_done)
- goto stop;
+ return cr_done ? 0 : -1;
+}
- /* channel equalisation */
- NV_DEBUG_KMS(dev, "\tbegin eq\n");
- ret = nouveau_dp_link_train_set(encoder, DP_TRAINING_PATTERN_2);
- if (ret)
- goto stop;
+static int
+dp_link_train_eq(struct drm_device *dev, struct dp_state *dp)
+{
+ bool eq_done, cr_done = true;
+ int tries = 0, i;
- for (tries = 0; tries <= 5; tries++) {
- udelay(400);
+ dp_set_training_pattern(dev, dp, DP_TRAINING_PATTERN_2);
- ret = auxch_rd(encoder, DP_LANE0_1_STATUS, status, 3);
- if (ret)
+ do {
+ if (dp_link_train_update(dev, dp, 400))
break;
- NV_DEBUG_KMS(dev, "\t\tstatus: 0x%02x 0x%02x\n",
- status[0], status[1]);
- eq_done = true;
- if (!(status[2] & DP_INTERLANE_ALIGN_DONE))
- eq_done = false;
-
- for (i = 0; eq_done && i < nv_encoder->dp.link_nr; i++) {
- int lane = (status[i >> 1] >> ((i & 1) * 4)) & 0xf;
-
- if (!(lane & DP_LANE_CR_DONE)) {
+ eq_done = !!(dp->stat[2] & DP_INTERLANE_ALIGN_DONE);
+ for (i = 0; i < dp->link_nr && eq_done; i++) {
+ u8 lane = (dp->stat[i >> 1] >> ((i & 1) * 4)) & 0xf;
+ if (!(lane & DP_LANE_CR_DONE))
cr_done = false;
- break;
- }
-
if (!(lane & DP_LANE_CHANNEL_EQ_DONE) ||
- !(lane & DP_LANE_SYMBOL_LOCKED)) {
+ !(lane & DP_LANE_SYMBOL_LOCKED))
eq_done = false;
- break;
- }
}
- if (eq_done || !cr_done)
+ if (dp_link_train_commit(dev, dp))
break;
+ } while (!eq_done && cr_done && ++tries <= 5);
- if (!nouveau_dp_link_train_adjust(encoder, config) ||
- !nouveau_dp_link_train_commit(encoder, config))
- break;
- }
+ return eq_done ? 0 : -1;
+}
-stop:
- /* end link training */
- ret = nouveau_dp_link_train_set(encoder, DP_TRAINING_PATTERN_DISABLE);
- if (ret)
+bool
+nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate)
+{
+ struct drm_nouveau_private *dev_priv = encoder->dev->dev_private;
+ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
+ struct nouveau_connector *nv_connector =
+ nouveau_encoder_connector_get(nv_encoder);
+ struct drm_device *dev = encoder->dev;
+ struct nouveau_i2c_chan *auxch;
+ const u32 bw_list[] = { 270000, 162000, 0 };
+ const u32 *link_bw = bw_list;
+ struct dp_state dp;
+
+ auxch = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index);
+ if (!auxch)
return false;
- /* retry at a lower setting, if possible */
- if (!ret && !(eq_done && cr_done)) {
- NV_DEBUG_KMS(dev, "\twe failed\n");
- if (nv_encoder->dp.link_bw != DP_LINK_BW_1_62) {
- NV_DEBUG_KMS(dev, "retry link training at low rate\n");
- nv_encoder->dp.link_bw = DP_LINK_BW_1_62;
- goto train;
- }
+ dp.table = nouveau_dp_bios_data(dev, nv_encoder->dcb, &dp.entry);
+ if (!dp.table)
+ return -EINVAL;
+
+ dp.dcb = nv_encoder->dcb;
+ dp.crtc = nv_crtc->index;
+ dp.auxch = auxch->rd;
+ dp.or = nv_encoder->or;
+ dp.link = !(nv_encoder->dcb->sorconf.link & 1);
+ dp.dpcd = nv_encoder->dp.dpcd;
+
+ /* some sinks toggle hotplug in response to some of the actions
+ * we take during link training (DP_SET_POWER is one), we need
+ * to ignore them for the moment to avoid races.
+ */
+ pgpio->irq_enable(dev, nv_connector->dcb->gpio_tag, false);
+
+ /* enable down-spreading, if possible */
+ if (dp.table[1] >= 16) {
+ u16 script = ROM16(dp.entry[14]);
+ if (nv_encoder->dp.dpcd[3] & 1)
+ script = ROM16(dp.entry[12]);
+
+ nouveau_bios_run_init_table(dev, script, dp.dcb, dp.crtc);
}
- if (dpe->script1) {
- NV_DEBUG_KMS(dev, "SOR-%d: running DP script 1\n", nv_encoder->or);
- nouveau_bios_run_init_table(dev, le16_to_cpu(dpe->script1),
- nv_encoder->dcb);
+ /* execute pre-train script from vbios */
+ nouveau_bios_run_init_table(dev, ROM16(dp.entry[6]), dp.dcb, dp.crtc);
+
+ /* start off at highest link rate supported by encoder and display */
+ while (*link_bw > nv_encoder->dp.link_bw)
+ link_bw++;
+
+ while (link_bw[0]) {
+ /* find minimum required lane count at this link rate */
+ dp.link_nr = nv_encoder->dp.link_nr;
+ while ((dp.link_nr >> 1) * link_bw[0] > datarate)
+ dp.link_nr >>= 1;
+
+ /* drop link rate to minimum with this lane count */
+ while ((link_bw[1] * dp.link_nr) > datarate)
+ link_bw++;
+ dp.link_bw = link_bw[0];
+
+ /* program selected link configuration */
+ dp_set_link_config(dev, &dp);
+
+ /* attempt to train the link at this configuration */
+ memset(dp.stat, 0x00, sizeof(dp.stat));
+ if (!dp_link_train_cr(dev, &dp) &&
+ !dp_link_train_eq(dev, &dp))
+ break;
+
+ /* retry at lower rate */
+ link_bw++;
}
- /* re-enable hotplug detect */
- pgpio->irq_enable(dev, nv_connector->dcb->gpio_tag, hpd_state);
+ /* finish link training */
+ dp_set_training_pattern(dev, &dp, DP_TRAINING_PATTERN_DISABLE);
- return eq_done;
+ /* execute post-train script from vbios */
+ nouveau_bios_run_init_table(dev, ROM16(dp.entry[8]), dp.dcb, dp.crtc);
+
+ /* re-enable hotplug detect */
+ pgpio->irq_enable(dev, nv_connector->dcb->gpio_tag, true);
+ return true;
}
bool
@@ -447,31 +648,34 @@ nouveau_dp_detect(struct drm_encoder *encoder)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct drm_device *dev = encoder->dev;
- uint8_t dpcd[4];
+ struct nouveau_i2c_chan *auxch;
+ u8 *dpcd = nv_encoder->dp.dpcd;
int ret;
- ret = auxch_rd(encoder, 0x0000, dpcd, 4);
- if (ret)
+ auxch = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index);
+ if (!auxch)
return false;
- NV_DEBUG_KMS(dev, "encoder: link_bw %d, link_nr %d\n"
- "display: link_bw %d, link_nr %d version 0x%02x\n",
- nv_encoder->dcb->dpconf.link_bw,
- nv_encoder->dcb->dpconf.link_nr,
- dpcd[1], dpcd[2] & 0x0f, dpcd[0]);
+ ret = auxch_tx(dev, auxch->rd, 9, DP_DPCD_REV, dpcd, 8);
+ if (ret)
+ return false;
- nv_encoder->dp.dpcd_version = dpcd[0];
+ nv_encoder->dp.link_bw = 27000 * dpcd[1];
+ nv_encoder->dp.link_nr = dpcd[2] & DP_MAX_LANE_COUNT_MASK;
- nv_encoder->dp.link_bw = dpcd[1];
- if (nv_encoder->dp.link_bw != DP_LINK_BW_1_62 &&
- !nv_encoder->dcb->dpconf.link_bw)
- nv_encoder->dp.link_bw = DP_LINK_BW_1_62;
+ NV_DEBUG_KMS(dev, "display: %dx%d dpcd 0x%02x\n",
+ nv_encoder->dp.link_nr, nv_encoder->dp.link_bw, dpcd[0]);
+ NV_DEBUG_KMS(dev, "encoder: %dx%d\n",
+ nv_encoder->dcb->dpconf.link_nr,
+ nv_encoder->dcb->dpconf.link_bw);
- nv_encoder->dp.link_nr = dpcd[2] & DP_MAX_LANE_COUNT_MASK;
- if (nv_encoder->dp.link_nr > nv_encoder->dcb->dpconf.link_nr)
+ if (nv_encoder->dcb->dpconf.link_nr < nv_encoder->dp.link_nr)
nv_encoder->dp.link_nr = nv_encoder->dcb->dpconf.link_nr;
+ if (nv_encoder->dcb->dpconf.link_bw < nv_encoder->dp.link_bw)
+ nv_encoder->dp.link_bw = nv_encoder->dcb->dpconf.link_bw;
- nv_encoder->dp.enhanced_frame = (dpcd[2] & DP_ENHANCED_FRAME_CAP);
+ NV_DEBUG_KMS(dev, "maximum: %dx%d\n",
+ nv_encoder->dp.link_nr, nv_encoder->dp.link_bw);
return true;
}
@@ -480,105 +684,13 @@ int
nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
uint8_t *data, int data_nr)
{
- struct drm_device *dev = auxch->dev;
- uint32_t tmp, ctrl, stat = 0, data32[4] = {};
- int ret = 0, i, index = auxch->rd;
-
- NV_DEBUG_KMS(dev, "ch %d cmd %d addr 0x%x len %d\n", index, cmd, addr, data_nr);
-
- tmp = nv_rd32(dev, NV50_AUXCH_CTRL(auxch->rd));
- nv_wr32(dev, NV50_AUXCH_CTRL(auxch->rd), tmp | 0x00100000);
- tmp = nv_rd32(dev, NV50_AUXCH_CTRL(auxch->rd));
- if (!(tmp & 0x01000000)) {
- NV_ERROR(dev, "expected bit 24 == 1, got 0x%08x\n", tmp);
- ret = -EIO;
- goto out;
- }
-
- for (i = 0; i < 3; i++) {
- tmp = nv_rd32(dev, NV50_AUXCH_STAT(auxch->rd));
- if (tmp & NV50_AUXCH_STAT_STATE_READY)
- break;
- udelay(100);
- }
-
- if (i == 3) {
- ret = -EBUSY;
- goto out;
- }
-
- if (!(cmd & 1)) {
- memcpy(data32, data, data_nr);
- for (i = 0; i < 4; i++) {
- NV_DEBUG_KMS(dev, "wr %d: 0x%08x\n", i, data32[i]);
- nv_wr32(dev, NV50_AUXCH_DATA_OUT(index, i), data32[i]);
- }
- }
-
- nv_wr32(dev, NV50_AUXCH_ADDR(index), addr);
- ctrl = nv_rd32(dev, NV50_AUXCH_CTRL(index));
- ctrl &= ~(NV50_AUXCH_CTRL_CMD | NV50_AUXCH_CTRL_LEN);
- ctrl |= (cmd << NV50_AUXCH_CTRL_CMD_SHIFT);
- ctrl |= ((data_nr - 1) << NV50_AUXCH_CTRL_LEN_SHIFT);
-
- for (i = 0; i < 16; i++) {
- nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl | 0x80000000);
- nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl);
- nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl | 0x00010000);
- if (!nv_wait(dev, NV50_AUXCH_CTRL(index),
- 0x00010000, 0x00000000)) {
- NV_ERROR(dev, "expected bit 16 == 0, got 0x%08x\n",
- nv_rd32(dev, NV50_AUXCH_CTRL(index)));
- ret = -EBUSY;
- goto out;
- }
-
- udelay(400);
-
- stat = nv_rd32(dev, NV50_AUXCH_STAT(index));
- if ((stat & NV50_AUXCH_STAT_REPLY_AUX) !=
- NV50_AUXCH_STAT_REPLY_AUX_DEFER)
- break;
- }
-
- if (i == 16) {
- NV_ERROR(dev, "auxch DEFER too many times, bailing\n");
- ret = -EREMOTEIO;
- goto out;
- }
-
- if (cmd & 1) {
- if ((stat & NV50_AUXCH_STAT_COUNT) != data_nr) {
- ret = -EREMOTEIO;
- goto out;
- }
-
- for (i = 0; i < 4; i++) {
- data32[i] = nv_rd32(dev, NV50_AUXCH_DATA_IN(index, i));
- NV_DEBUG_KMS(dev, "rd %d: 0x%08x\n", i, data32[i]);
- }
- memcpy(data, data32, data_nr);
- }
-
-out:
- tmp = nv_rd32(dev, NV50_AUXCH_CTRL(auxch->rd));
- nv_wr32(dev, NV50_AUXCH_CTRL(auxch->rd), tmp & ~0x00100000);
- tmp = nv_rd32(dev, NV50_AUXCH_CTRL(auxch->rd));
- if (tmp & 0x01000000) {
- NV_ERROR(dev, "expected bit 24 == 0, got 0x%08x\n", tmp);
- ret = -EIO;
- }
-
- udelay(400);
-
- return ret ? ret : (stat & NV50_AUXCH_STAT_REPLY);
+ return auxch_tx(auxch->dev, auxch->rd, cmd, addr, data, data_nr);
}
static int
nouveau_dp_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
{
struct nouveau_i2c_chan *auxch = (struct nouveau_i2c_chan *)adap;
- struct drm_device *dev = auxch->dev;
struct i2c_msg *msg = msgs;
int ret, mcnt = num;
@@ -602,19 +714,6 @@ nouveau_dp_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
if (ret < 0)
return ret;
- switch (ret & NV50_AUXCH_STAT_REPLY_I2C) {
- case NV50_AUXCH_STAT_REPLY_I2C_ACK:
- break;
- case NV50_AUXCH_STAT_REPLY_I2C_NACK:
- return -EREMOTEIO;
- case NV50_AUXCH_STAT_REPLY_I2C_DEFER:
- udelay(100);
- continue;
- default:
- NV_ERROR(dev, "bad auxch reply: 0x%08x\n", ret);
- return -EREMOTEIO;
- }
-
ptr += cnt;
remaining -= cnt;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
index b30ddd8d2e2..9f7bb129526 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -23,6 +23,7 @@
*/
#include <linux/console.h>
+#include <linux/module.h>
#include "drmP.h"
#include "drm.h"
@@ -41,7 +42,7 @@ int nouveau_agpmode = -1;
module_param_named(agpmode, nouveau_agpmode, int, 0400);
MODULE_PARM_DESC(modeset, "Enable kernel modesetting");
-static int nouveau_modeset = -1; /* kms */
+int nouveau_modeset = -1;
module_param_named(modeset, nouveau_modeset, int, 0400);
MODULE_PARM_DESC(vbios, "Override default VBIOS location");
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index d7d51deb34b..29837da1098 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -414,12 +414,13 @@ struct nouveau_gpio_engine {
};
struct nouveau_pm_voltage_level {
- u8 voltage;
- u8 vid;
+ u32 voltage; /* microvolts */
+ u8 vid;
};
struct nouveau_pm_voltage {
bool supported;
+ u8 version;
u8 vid_mask;
struct nouveau_pm_voltage_level *level;
@@ -428,17 +429,48 @@ struct nouveau_pm_voltage {
struct nouveau_pm_memtiming {
int id;
- u32 reg_100220;
- u32 reg_100224;
- u32 reg_100228;
- u32 reg_10022c;
- u32 reg_100230;
- u32 reg_100234;
- u32 reg_100238;
- u32 reg_10023c;
- u32 reg_100240;
+ u32 reg_0; /* 0x10f290 on Fermi, 0x100220 for older */
+ u32 reg_1;
+ u32 reg_2;
+ u32 reg_3;
+ u32 reg_4;
+ u32 reg_5;
+ u32 reg_6;
+ u32 reg_7;
+ u32 reg_8;
+ /* To be written to 0x1002c0 */
+ u8 CL;
+ u8 WR;
};
+struct nouveau_pm_tbl_header{
+ u8 version;
+ u8 header_len;
+ u8 entry_cnt;
+ u8 entry_len;
+};
+
+struct nouveau_pm_tbl_entry{
+ u8 tWR;
+ u8 tUNK_1;
+ u8 tCL;
+ u8 tRP; /* Byte 3 */
+ u8 empty_4;
+ u8 tRAS; /* Byte 5 */
+ u8 empty_6;
+ u8 tRFC; /* Byte 7 */
+ u8 empty_8;
+ u8 tRC; /* Byte 9 */
+ u8 tUNK_10, tUNK_11, tUNK_12, tUNK_13, tUNK_14;
+ u8 empty_15,empty_16,empty_17;
+ u8 tUNK_18, tUNK_19, tUNK_20, tUNK_21;
+};
+
+/* nouveau_mem.c */
+void nv30_mem_timing_entry(struct drm_device *dev, struct nouveau_pm_tbl_header *hdr,
+ struct nouveau_pm_tbl_entry *e, uint8_t magic_number,
+ struct nouveau_pm_memtiming *timing);
+
#define NOUVEAU_PM_MAX_LEVEL 8
struct nouveau_pm_level {
struct device_attribute dev_attr;
@@ -448,11 +480,19 @@ struct nouveau_pm_level {
u32 core;
u32 memory;
u32 shader;
- u32 unk05;
- u32 unk0a;
-
- u8 voltage;
- u8 fanspeed;
+ u32 rop;
+ u32 copy;
+ u32 daemon;
+ u32 vdec;
+ u32 unk05; /* nv50:nva3, roughly.. */
+ u32 unka0; /* nva3:nvc0 */
+ u32 hub01; /* nvc0- */
+ u32 hub06; /* nvc0- */
+ u32 hub07; /* nvc0- */
+
+ u32 volt_min; /* microvolts */
+ u32 volt_max;
+ u8 fanspeed;
u16 memscript;
struct nouveau_pm_memtiming *timing;
@@ -496,6 +536,11 @@ struct nouveau_pm_engine {
void *(*clock_pre)(struct drm_device *, struct nouveau_pm_level *,
u32 id, int khz);
void (*clock_set)(struct drm_device *, void *);
+
+ int (*clocks_get)(struct drm_device *, struct nouveau_pm_level *);
+ void *(*clocks_pre)(struct drm_device *, struct nouveau_pm_level *);
+ void (*clocks_set)(struct drm_device *, void *);
+
int (*voltage_get)(struct drm_device *);
int (*voltage_set)(struct drm_device *, int voltage);
int (*fanspeed_get)(struct drm_device *);
@@ -504,7 +549,7 @@ struct nouveau_pm_engine {
};
struct nouveau_vram_engine {
- struct nouveau_mm *mm;
+ struct nouveau_mm mm;
int (*init)(struct drm_device *);
void (*takedown)(struct drm_device *dev);
@@ -623,6 +668,7 @@ enum nouveau_card_type {
NV_40 = 0x40,
NV_50 = 0x50,
NV_C0 = 0xc0,
+ NV_D0 = 0xd0
};
struct drm_nouveau_private {
@@ -633,8 +679,8 @@ struct drm_nouveau_private {
enum nouveau_card_type card_type;
/* exact chipset, derived from NV_PMC_BOOT_0 */
int chipset;
- int stepping;
int flags;
+ u32 crystal;
void __iomem *mmio;
@@ -721,7 +767,6 @@ struct drm_nouveau_private {
uint64_t vram_size;
uint64_t vram_sys_base;
- uint64_t fb_phys;
uint64_t fb_available_size;
uint64_t fb_mappable_pages;
uint64_t fb_aper_free;
@@ -784,6 +829,7 @@ nouveau_bo_ref(struct nouveau_bo *ref, struct nouveau_bo **pnvbo)
}
/* nouveau_drv.c */
+extern int nouveau_modeset;
extern int nouveau_agpmode;
extern int nouveau_duallink;
extern int nouveau_uscript_lvds;
@@ -824,6 +870,8 @@ extern bool nouveau_wait_eq(struct drm_device *, uint64_t timeout,
uint32_t reg, uint32_t mask, uint32_t val);
extern bool nouveau_wait_ne(struct drm_device *, uint64_t timeout,
uint32_t reg, uint32_t mask, uint32_t val);
+extern bool nouveau_wait_cb(struct drm_device *, u64 timeout,
+ bool (*cond)(void *), void *);
extern bool nouveau_wait_for_idle(struct drm_device *);
extern int nouveau_card_init(struct drm_device *);
@@ -1006,15 +1054,15 @@ static inline int nouveau_acpi_edid(struct drm_device *dev, struct drm_connector
/* nouveau_backlight.c */
#ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT
-extern int nouveau_backlight_init(struct drm_connector *);
-extern void nouveau_backlight_exit(struct drm_connector *);
+extern int nouveau_backlight_init(struct drm_device *);
+extern void nouveau_backlight_exit(struct drm_device *);
#else
-static inline int nouveau_backlight_init(struct drm_connector *dev)
+static inline int nouveau_backlight_init(struct drm_device *dev)
{
return 0;
}
-static inline void nouveau_backlight_exit(struct drm_connector *dev) { }
+static inline void nouveau_backlight_exit(struct drm_device *dev) { }
#endif
/* nouveau_bios.c */
@@ -1022,7 +1070,8 @@ extern int nouveau_bios_init(struct drm_device *);
extern void nouveau_bios_takedown(struct drm_device *dev);
extern int nouveau_run_vbios_init(struct drm_device *);
extern void nouveau_bios_run_init_table(struct drm_device *, uint16_t table,
- struct dcb_entry *);
+ struct dcb_entry *, int crtc);
+extern void nouveau_bios_init_exec(struct drm_device *, uint16_t table);
extern struct dcb_gpio_entry *nouveau_bios_gpio_entry(struct drm_device *,
enum dcb_gpio_tag);
extern struct dcb_connector_table_entry *
@@ -1030,11 +1079,8 @@ nouveau_bios_connector_entry(struct drm_device *, int index);
extern u32 get_pll_register(struct drm_device *, enum pll_types);
extern int get_pll_limits(struct drm_device *, uint32_t limit_match,
struct pll_lims *);
-extern int nouveau_bios_run_display_table(struct drm_device *,
- struct dcb_entry *,
- uint32_t script, int pxclk);
-extern void *nouveau_bios_dp_table(struct drm_device *, struct dcb_entry *,
- int *length);
+extern int nouveau_bios_run_display_table(struct drm_device *, u16 id, int clk,
+ struct dcb_entry *, int crtc);
extern bool nouveau_bios_fp_mode(struct drm_device *, struct drm_display_mode *);
extern uint8_t *nouveau_bios_embedded_edid(struct drm_device *);
extern int nouveau_bios_parse_lvds_table(struct drm_device *, int pxclk,
@@ -1043,6 +1089,7 @@ extern int run_tmds_table(struct drm_device *, struct dcb_entry *,
int head, int pxclk);
extern int call_lvds_script(struct drm_device *, struct dcb_entry *, int head,
enum LVDS_script, int pxclk);
+bool bios_encoder_match(struct dcb_entry *, u32 hash);
/* nouveau_ttm.c */
int nouveau_ttm_global_init(struct drm_nouveau_private *);
@@ -1053,7 +1100,9 @@ int nouveau_ttm_mmap(struct file *, struct vm_area_struct *);
int nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
uint8_t *data, int data_nr);
bool nouveau_dp_detect(struct drm_encoder *);
-bool nouveau_dp_link_train(struct drm_encoder *);
+bool nouveau_dp_link_train(struct drm_encoder *, u32 datarate);
+void nouveau_dp_tu_update(struct drm_device *, int, int, u32, u32);
+u8 *nouveau_dp_bios_data(struct drm_device *, struct dcb_entry *, u8 **);
/* nv04_fb.c */
extern int nv04_fb_init(struct drm_device *);
@@ -1179,8 +1228,8 @@ extern int nva3_copy_create(struct drm_device *dev);
/* nvc0_copy.c */
extern int nvc0_copy_create(struct drm_device *dev, int engine);
-/* nv40_mpeg.c */
-extern int nv40_mpeg_create(struct drm_device *dev);
+/* nv31_mpeg.c */
+extern int nv31_mpeg_create(struct drm_device *dev);
/* nv50_mpeg.c */
extern int nv50_mpeg_create(struct drm_device *dev);
@@ -1265,6 +1314,11 @@ extern int nv04_display_create(struct drm_device *);
extern int nv04_display_init(struct drm_device *);
extern void nv04_display_destroy(struct drm_device *);
+/* nvd0_display.c */
+extern int nvd0_display_create(struct drm_device *);
+extern int nvd0_display_init(struct drm_device *);
+extern void nvd0_display_destroy(struct drm_device *);
+
/* nv04_crtc.c */
extern int nv04_crtc_create(struct drm_device *, int index);
@@ -1374,6 +1428,8 @@ int nv50_gpio_init(struct drm_device *dev);
void nv50_gpio_fini(struct drm_device *dev);
int nv50_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag);
int nv50_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state);
+int nvd0_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag);
+int nvd0_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state);
int nv50_gpio_irq_register(struct drm_device *, enum dcb_gpio_tag,
void (*)(void *, int), void *);
void nv50_gpio_irq_unregister(struct drm_device *, enum dcb_gpio_tag,
@@ -1448,6 +1504,8 @@ static inline void nv_wr08(struct drm_device *dev, unsigned reg, u8 val)
nouveau_wait_eq(dev, 2000000000ULL, (reg), (mask), (val))
#define nv_wait_ne(dev, reg, mask, val) \
nouveau_wait_ne(dev, 2000000000ULL, (reg), (mask), (val))
+#define nv_wait_cb(dev, func, data) \
+ nouveau_wait_cb(dev, 2000000000ULL, (func), (data))
/* PRAMIN access */
static inline u32 nv_ri32(struct drm_device *dev, unsigned offset)
@@ -1514,6 +1572,7 @@ enum {
NOUVEAU_REG_DEBUG_RMVIO = 0x80,
NOUVEAU_REG_DEBUG_VGAATTR = 0x100,
NOUVEAU_REG_DEBUG_EVO = 0x200,
+ NOUVEAU_REG_DEBUG_AUXCH = 0x400
};
#define NV_REG_DEBUG(type, dev, fmt, arg...) do { \
diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h
index ae69b61d93d..e5d6e3faff3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_encoder.h
+++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h
@@ -49,17 +49,17 @@ struct nouveau_encoder {
union {
struct {
- int mc_unknown;
- uint32_t unk0;
- uint32_t unk1;
- int dpcd_version;
+ u8 dpcd[8];
int link_nr;
int link_bw;
- bool enhanced_frame;
+ u32 datarate;
} dp;
};
};
+struct nouveau_encoder *
+find_encoder(struct drm_connector *connector, int type);
+
static inline struct nouveau_encoder *nouveau_encoder(struct drm_encoder *enc)
{
struct drm_encoder_slave *slave = to_encoder_slave(enc);
@@ -83,21 +83,4 @@ nouveau_encoder_connector_get(struct nouveau_encoder *encoder);
int nv50_sor_create(struct drm_connector *, struct dcb_entry *);
int nv50_dac_create(struct drm_connector *, struct dcb_entry *);
-struct bit_displayport_encoder_table {
- uint32_t match;
- uint8_t record_nr;
- uint8_t unknown;
- uint16_t script0;
- uint16_t script1;
- uint16_t unknown_table;
-} __attribute__ ((packed));
-
-struct bit_displayport_encoder_table_entry {
- uint8_t vs_level;
- uint8_t pre_level;
- uint8_t reg0;
- uint8_t reg1;
- uint8_t reg2;
-} __attribute__ ((packed));
-
#endif /* __NOUVEAU_ENCODER_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 14a8627efe4..3a4cc32b9e4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -487,6 +487,7 @@ int nouveau_fbcon_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_fbdev *nfbdev;
+ int preferred_bpp;
int ret;
nfbdev = kzalloc(sizeof(struct nouveau_fbdev), GFP_KERNEL);
@@ -505,7 +506,15 @@ int nouveau_fbcon_init(struct drm_device *dev)
}
drm_fb_helper_single_add_all_connectors(&nfbdev->helper);
- drm_fb_helper_initial_config(&nfbdev->helper, 32);
+
+ if (dev_priv->vram_size <= 32 * 1024 * 1024)
+ preferred_bpp = 8;
+ else if (dev_priv->vram_size <= 64 * 1024 * 1024)
+ preferred_bpp = 16;
+ else
+ preferred_bpp = 32;
+
+ drm_fb_helper_initial_config(&nfbdev->helper, preferred_bpp);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index c919cfc8f2f..2f6daae68b9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -519,7 +519,7 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
if (USE_SEMA(dev) && dev_priv->chipset < 0x84) {
struct ttm_mem_reg *mem = &dev_priv->fence.bo->bo.mem;
- ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
+ ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_FROM_MEMORY,
mem->start << PAGE_SHIFT,
mem->size, NV_MEM_ACCESS_RW,
NV_MEM_TARGET_VRAM, &obj);
@@ -539,8 +539,6 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
return ret;
}
- INIT_LIST_HEAD(&chan->fence.pending);
- spin_lock_init(&chan->fence.lock);
atomic_set(&chan->fence.last_sequence_irq, 0);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.c b/drivers/gpu/drm/nouveau/nouveau_i2c.c
index cb389d01432..d39b2202b19 100644
--- a/drivers/gpu/drm/nouveau/nouveau_i2c.c
+++ b/drivers/gpu/drm/nouveau/nouveau_i2c.c
@@ -22,6 +22,8 @@
* Authors: Ben Skeggs
*/
+#include <linux/module.h>
+
#include "drmP.h"
#include "nouveau_drv.h"
#include "nouveau_i2c.h"
@@ -107,6 +109,13 @@ nv4e_i2c_getsda(void *data)
return !!((nv_rd32(dev, i2c->rd) >> 16) & 8);
}
+static const uint32_t nv50_i2c_port[] = {
+ 0x00e138, 0x00e150, 0x00e168, 0x00e180,
+ 0x00e254, 0x00e274, 0x00e764, 0x00e780,
+ 0x00e79c, 0x00e7b8
+};
+#define NV50_I2C_PORTS ARRAY_SIZE(nv50_i2c_port)
+
static int
nv50_i2c_getscl(void *data)
{
@@ -130,28 +139,32 @@ static void
nv50_i2c_setscl(void *data, int state)
{
struct nouveau_i2c_chan *i2c = data;
- struct drm_device *dev = i2c->dev;
- nv_wr32(dev, i2c->wr, 4 | (i2c->data ? 2 : 0) | (state ? 1 : 0));
+ nv_wr32(i2c->dev, i2c->wr, 4 | (i2c->data ? 2 : 0) | (state ? 1 : 0));
}
static void
nv50_i2c_setsda(void *data, int state)
{
struct nouveau_i2c_chan *i2c = data;
- struct drm_device *dev = i2c->dev;
- nv_wr32(dev, i2c->wr,
- (nv_rd32(dev, i2c->rd) & 1) | 4 | (state ? 2 : 0));
+ nv_mask(i2c->dev, i2c->wr, 0x00000006, 4 | (state ? 2 : 0));
i2c->data = state;
}
-static const uint32_t nv50_i2c_port[] = {
- 0x00e138, 0x00e150, 0x00e168, 0x00e180,
- 0x00e254, 0x00e274, 0x00e764, 0x00e780,
- 0x00e79c, 0x00e7b8
-};
-#define NV50_I2C_PORTS ARRAY_SIZE(nv50_i2c_port)
+static int
+nvd0_i2c_getscl(void *data)
+{
+ struct nouveau_i2c_chan *i2c = data;
+ return !!(nv_rd32(i2c->dev, i2c->rd) & 0x10);
+}
+
+static int
+nvd0_i2c_getsda(void *data)
+{
+ struct nouveau_i2c_chan *i2c = data;
+ return !!(nv_rd32(i2c->dev, i2c->rd) & 0x20);
+}
int
nouveau_i2c_init(struct drm_device *dev, struct dcb_i2c_entry *entry, int index)
@@ -163,7 +176,8 @@ nouveau_i2c_init(struct drm_device *dev, struct dcb_i2c_entry *entry, int index)
if (entry->chan)
return -EEXIST;
- if (dev_priv->card_type >= NV_50 && entry->read >= NV50_I2C_PORTS) {
+ if (dev_priv->card_type >= NV_50 &&
+ dev_priv->card_type <= NV_C0 && entry->read >= NV50_I2C_PORTS) {
NV_ERROR(dev, "unknown i2c port %d\n", entry->read);
return -EINVAL;
}
@@ -192,10 +206,17 @@ nouveau_i2c_init(struct drm_device *dev, struct dcb_i2c_entry *entry, int index)
case 5:
i2c->bit.setsda = nv50_i2c_setsda;
i2c->bit.setscl = nv50_i2c_setscl;
- i2c->bit.getsda = nv50_i2c_getsda;
- i2c->bit.getscl = nv50_i2c_getscl;
- i2c->rd = nv50_i2c_port[entry->read];
- i2c->wr = i2c->rd;
+ if (dev_priv->card_type < NV_D0) {
+ i2c->bit.getsda = nv50_i2c_getsda;
+ i2c->bit.getscl = nv50_i2c_getscl;
+ i2c->rd = nv50_i2c_port[entry->read];
+ i2c->wr = i2c->rd;
+ } else {
+ i2c->bit.getsda = nvd0_i2c_getsda;
+ i2c->bit.getscl = nvd0_i2c_getscl;
+ i2c->rd = 0x00d014 + (entry->read * 0x20);
+ i2c->wr = i2c->rd;
+ }
break;
case 6:
i2c->rd = entry->read;
@@ -267,7 +288,10 @@ nouveau_i2c_find(struct drm_device *dev, int index)
val = 0xe001;
}
- nv_wr32(dev, reg, (nv_rd32(dev, reg) & ~0xf003) | val);
+ /* nfi, but neither auxch or i2c work if it's 1 */
+ nv_mask(dev, reg + 0x0c, 0x00000001, 0x00000000);
+ /* nfi, but switches auxch vs normal i2c */
+ nv_mask(dev, reg + 0x00, 0x0000f003, val);
}
if (!i2c->chan && nouveau_i2c_init(dev, i2c, index))
@@ -309,7 +333,7 @@ nouveau_i2c_identify(struct drm_device *dev, const char *what,
NV_DEBUG(dev, "Probing %ss on I2C bus: %d\n", what, index);
- for (i = 0; info[i].addr; i++) {
+ for (i = 0; i2c && info[i].addr; i++) {
if (nouveau_probe_i2c_addr(i2c, info[i].addr) &&
(!match || match(i2c, &info[i]))) {
NV_INFO(dev, "Detected %s: %s\n", what, info[i].type);
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index f9ae2fc3d6f..36bec480770 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -408,8 +408,6 @@ nouveau_mem_vram_init(struct drm_device *dev)
if (ret)
return ret;
- dev_priv->fb_phys = pci_resource_start(dev->pdev, 1);
-
ret = nouveau_ttm_global_init(dev_priv);
if (ret)
return ret;
@@ -504,35 +502,146 @@ nouveau_mem_gart_init(struct drm_device *dev)
return 0;
}
+/* XXX: For now a dummy. More samples required, possibly even a card
+ * Called from nouveau_perf.c */
+void nv30_mem_timing_entry(struct drm_device *dev, struct nouveau_pm_tbl_header *hdr,
+ struct nouveau_pm_tbl_entry *e, uint8_t magic_number,
+ struct nouveau_pm_memtiming *timing) {
+
+ NV_DEBUG(dev,"Timing entry format unknown, please contact nouveau developers");
+}
+
+void nv40_mem_timing_entry(struct drm_device *dev, struct nouveau_pm_tbl_header *hdr,
+ struct nouveau_pm_tbl_entry *e, uint8_t magic_number,
+ struct nouveau_pm_memtiming *timing) {
+
+ timing->reg_0 = (e->tRC << 24 | e->tRFC << 16 | e->tRAS << 8 | e->tRP);
+
+ /* XXX: I don't trust the -1's and +1's... they must come
+ * from somewhere! */
+ timing->reg_1 = (e->tWR + 2 + magic_number) << 24 |
+ 1 << 16 |
+ (e->tUNK_1 + 2 + magic_number) << 8 |
+ (e->tCL + 2 - magic_number);
+ timing->reg_2 = (magic_number << 24 | e->tUNK_12 << 16 | e->tUNK_11 << 8 | e->tUNK_10);
+ timing->reg_2 |= 0x20200000;
+
+ NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x\n", timing->id,
+ timing->reg_0, timing->reg_1,timing->reg_2);
+}
+
+void nv50_mem_timing_entry(struct drm_device *dev, struct bit_entry *P, struct nouveau_pm_tbl_header *hdr,
+ struct nouveau_pm_tbl_entry *e, uint8_t magic_number,struct nouveau_pm_memtiming *timing) {
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ uint8_t unk18 = 1,
+ unk19 = 1,
+ unk20 = 0,
+ unk21 = 0;
+
+ switch (min(hdr->entry_len, (u8) 22)) {
+ case 22:
+ unk21 = e->tUNK_21;
+ case 21:
+ unk20 = e->tUNK_20;
+ case 20:
+ unk19 = e->tUNK_19;
+ case 19:
+ unk18 = e->tUNK_18;
+ break;
+ }
+
+ timing->reg_0 = (e->tRC << 24 | e->tRFC << 16 | e->tRAS << 8 | e->tRP);
+
+ /* XXX: I don't trust the -1's and +1's... they must come
+ * from somewhere! */
+ timing->reg_1 = (e->tWR + unk19 + 1 + magic_number) << 24 |
+ max(unk18, (u8) 1) << 16 |
+ (e->tUNK_1 + unk19 + 1 + magic_number) << 8;
+ if (dev_priv->chipset == 0xa8) {
+ timing->reg_1 |= (e->tCL - 1);
+ } else {
+ timing->reg_1 |= (e->tCL + 2 - magic_number);
+ }
+ timing->reg_2 = (e->tUNK_12 << 16 | e->tUNK_11 << 8 | e->tUNK_10);
+
+ timing->reg_5 = (e->tRAS << 24 | e->tRC);
+ timing->reg_5 += max(e->tUNK_10, e->tUNK_11) << 16;
+
+ if (P->version == 1) {
+ timing->reg_2 |= magic_number << 24;
+ timing->reg_3 = (0x14 + e->tCL) << 24 |
+ 0x16 << 16 |
+ (e->tCL - 1) << 8 |
+ (e->tCL - 1);
+ timing->reg_4 = (nv_rd32(dev,0x10022c) & 0xffff0000) | e->tUNK_13 << 8 | e->tUNK_13;
+ timing->reg_5 |= (e->tCL + 2) << 8;
+ timing->reg_7 = 0x4000202 | (e->tCL - 1) << 16;
+ } else {
+ timing->reg_2 |= (unk19 - 1) << 24;
+ /* XXX: reg_10022c for recentish cards pretty much unknown*/
+ timing->reg_3 = e->tCL - 1;
+ timing->reg_4 = (unk20 << 24 | unk21 << 16 |
+ e->tUNK_13 << 8 | e->tUNK_13);
+ /* XXX: +6? */
+ timing->reg_5 |= (unk19 + 6) << 8;
+
+ /* XXX: reg_10023c currently unknown
+ * 10023c seen as 06xxxxxx, 0bxxxxxx or 0fxxxxxx */
+ timing->reg_7 = 0x202;
+ }
+
+ NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x %08x\n", timing->id,
+ timing->reg_0, timing->reg_1,
+ timing->reg_2, timing->reg_3);
+ NV_DEBUG(dev, " 230: %08x %08x %08x %08x\n",
+ timing->reg_4, timing->reg_5,
+ timing->reg_6, timing->reg_7);
+ NV_DEBUG(dev, " 240: %08x\n", timing->reg_8);
+}
+
+void nvc0_mem_timing_entry(struct drm_device *dev, struct nouveau_pm_tbl_header *hdr,
+ struct nouveau_pm_tbl_entry *e, struct nouveau_pm_memtiming *timing) {
+ timing->reg_0 = (e->tRC << 24 | (e->tRFC & 0x7f) << 17 | e->tRAS << 8 | e->tRP);
+ timing->reg_1 = (nv_rd32(dev,0x10f294) & 0xff000000) | (e->tUNK_11&0x0f) << 20 | (e->tUNK_19 << 7) | (e->tCL & 0x0f);
+ timing->reg_2 = (nv_rd32(dev,0x10f298) & 0xff0000ff) | e->tWR << 16 | e->tUNK_1 << 8;
+ timing->reg_3 = e->tUNK_20 << 9 | e->tUNK_13;
+ timing->reg_4 = (nv_rd32(dev,0x10f2a0) & 0xfff000ff) | e->tUNK_12 << 15;
+ NV_DEBUG(dev, "Entry %d: 290: %08x %08x %08x %08x\n", timing->id,
+ timing->reg_0, timing->reg_1,
+ timing->reg_2, timing->reg_3);
+ NV_DEBUG(dev, " 2a0: %08x %08x %08x %08x\n",
+ timing->reg_4, timing->reg_5,
+ timing->reg_6, timing->reg_7);
+}
+
+/**
+ * Processes the Memory Timing BIOS table, stores generated
+ * register values
+ * @pre init scripts were run, memtiming regs are initialized
+ */
void
nouveau_mem_timing_init(struct drm_device *dev)
{
- /* cards < NVC0 only */
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
struct nouveau_pm_memtimings *memtimings = &pm->memtimings;
struct nvbios *bios = &dev_priv->vbios;
struct bit_entry P;
- u8 tUNK_0, tUNK_1, tUNK_2;
- u8 tRP; /* Byte 3 */
- u8 tRAS; /* Byte 5 */
- u8 tRFC; /* Byte 7 */
- u8 tRC; /* Byte 9 */
- u8 tUNK_10, tUNK_11, tUNK_12, tUNK_13, tUNK_14;
- u8 tUNK_18, tUNK_19, tUNK_20, tUNK_21;
- u8 magic_number = 0; /* Yeah... sorry*/
- u8 *mem = NULL, *entry;
- int i, recordlen, entries;
+ struct nouveau_pm_tbl_header *hdr = NULL;
+ uint8_t magic_number;
+ u8 *entry;
+ int i;
if (bios->type == NVBIOS_BIT) {
if (bit_table(dev, 'P', &P))
return;
if (P.version == 1)
- mem = ROMPTR(bios, P.data[4]);
+ hdr = (struct nouveau_pm_tbl_header *) ROMPTR(bios, P.data[4]);
else
if (P.version == 2)
- mem = ROMPTR(bios, P.data[8]);
+ hdr = (struct nouveau_pm_tbl_header *) ROMPTR(bios, P.data[8]);
else {
NV_WARN(dev, "unknown mem for BIT P %d\n", P.version);
}
@@ -541,150 +650,56 @@ nouveau_mem_timing_init(struct drm_device *dev)
return;
}
- if (!mem) {
+ if (!hdr) {
NV_DEBUG(dev, "memory timing table pointer invalid\n");
return;
}
- if (mem[0] != 0x10) {
- NV_WARN(dev, "memory timing table 0x%02x unknown\n", mem[0]);
+ if (hdr->version != 0x10) {
+ NV_WARN(dev, "memory timing table 0x%02x unknown\n", hdr->version);
return;
}
/* validate record length */
- entries = mem[2];
- recordlen = mem[3];
- if (recordlen < 15) {
- NV_ERROR(dev, "mem timing table length unknown: %d\n", mem[3]);
+ if (hdr->entry_len < 15) {
+ NV_ERROR(dev, "mem timing table length unknown: %d\n", hdr->entry_len);
return;
}
/* parse vbios entries into common format */
memtimings->timing =
- kcalloc(entries, sizeof(*memtimings->timing), GFP_KERNEL);
+ kcalloc(hdr->entry_cnt, sizeof(*memtimings->timing), GFP_KERNEL);
if (!memtimings->timing)
return;
/* Get "some number" from the timing reg for NV_40 and NV_50
- * Used in calculations later */
- if (dev_priv->card_type >= NV_40 && dev_priv->chipset < 0x98) {
+ * Used in calculations later... source unknown */
+ magic_number = 0;
+ if (P.version == 1) {
magic_number = (nv_rd32(dev, 0x100228) & 0x0f000000) >> 24;
}
- entry = mem + mem[1];
- for (i = 0; i < entries; i++, entry += recordlen) {
+ entry = (u8*) hdr + hdr->header_len;
+ for (i = 0; i < hdr->entry_cnt; i++, entry += hdr->entry_len) {
struct nouveau_pm_memtiming *timing = &pm->memtimings.timing[i];
if (entry[0] == 0)
continue;
- tUNK_18 = 1;
- tUNK_19 = 1;
- tUNK_20 = 0;
- tUNK_21 = 0;
- switch (min(recordlen, 22)) {
- case 22:
- tUNK_21 = entry[21];
- case 21:
- tUNK_20 = entry[20];
- case 20:
- tUNK_19 = entry[19];
- case 19:
- tUNK_18 = entry[18];
- default:
- tUNK_0 = entry[0];
- tUNK_1 = entry[1];
- tUNK_2 = entry[2];
- tRP = entry[3];
- tRAS = entry[5];
- tRFC = entry[7];
- tRC = entry[9];
- tUNK_10 = entry[10];
- tUNK_11 = entry[11];
- tUNK_12 = entry[12];
- tUNK_13 = entry[13];
- tUNK_14 = entry[14];
- break;
- }
-
- timing->reg_100220 = (tRC << 24 | tRFC << 16 | tRAS << 8 | tRP);
-
- /* XXX: I don't trust the -1's and +1's... they must come
- * from somewhere! */
- timing->reg_100224 = (tUNK_0 + tUNK_19 + 1 + magic_number) << 24 |
- max(tUNK_18, (u8) 1) << 16 |
- (tUNK_1 + tUNK_19 + 1 + magic_number) << 8;
- if (dev_priv->chipset == 0xa8) {
- timing->reg_100224 |= (tUNK_2 - 1);
- } else {
- timing->reg_100224 |= (tUNK_2 + 2 - magic_number);
- }
-
- timing->reg_100228 = (tUNK_12 << 16 | tUNK_11 << 8 | tUNK_10);
- if (dev_priv->chipset >= 0xa3 && dev_priv->chipset < 0xaa)
- timing->reg_100228 |= (tUNK_19 - 1) << 24;
- else
- timing->reg_100228 |= magic_number << 24;
-
- if (dev_priv->card_type == NV_40) {
- /* NV40: don't know what the rest of the regs are..
- * And don't need to know either */
- timing->reg_100228 |= 0x20200000;
- } else if (dev_priv->card_type >= NV_50) {
- if (dev_priv->chipset < 0x98 ||
- (dev_priv->chipset == 0x98 &&
- dev_priv->stepping <= 0xa1)) {
- timing->reg_10022c = (0x14 + tUNK_2) << 24 |
- 0x16 << 16 |
- (tUNK_2 - 1) << 8 |
- (tUNK_2 - 1);
- } else {
- /* XXX: reg_10022c for recentish cards */
- timing->reg_10022c = tUNK_2 - 1;
- }
-
- timing->reg_100230 = (tUNK_20 << 24 | tUNK_21 << 16 |
- tUNK_13 << 8 | tUNK_13);
-
- timing->reg_100234 = (tRAS << 24 | tRC);
- timing->reg_100234 += max(tUNK_10, tUNK_11) << 16;
-
- if (dev_priv->chipset < 0x98 ||
- (dev_priv->chipset == 0x98 &&
- dev_priv->stepping <= 0xa1)) {
- timing->reg_100234 |= (tUNK_2 + 2) << 8;
- } else {
- /* XXX: +6? */
- timing->reg_100234 |= (tUNK_19 + 6) << 8;
- }
-
- /* XXX; reg_100238
- * reg_100238: 0x00?????? */
- timing->reg_10023c = 0x202;
- if (dev_priv->chipset < 0x98 ||
- (dev_priv->chipset == 0x98 &&
- dev_priv->stepping <= 0xa1)) {
- timing->reg_10023c |= 0x4000000 | (tUNK_2 - 1) << 16;
- } else {
- /* XXX: reg_10023c
- * currently unknown
- * 10023c seen as 06xxxxxx, 0bxxxxxx or 0fxxxxxx */
- }
-
- /* XXX: reg_100240? */
- }
timing->id = i;
-
- NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x %08x\n", i,
- timing->reg_100220, timing->reg_100224,
- timing->reg_100228, timing->reg_10022c);
- NV_DEBUG(dev, " 230: %08x %08x %08x %08x\n",
- timing->reg_100230, timing->reg_100234,
- timing->reg_100238, timing->reg_10023c);
- NV_DEBUG(dev, " 240: %08x\n", timing->reg_100240);
+ timing->WR = entry[0];
+ timing->CL = entry[2];
+
+ if(dev_priv->card_type <= NV_40) {
+ nv40_mem_timing_entry(dev,hdr,(struct nouveau_pm_tbl_entry*) entry,magic_number,&pm->memtimings.timing[i]);
+ } else if(dev_priv->card_type == NV_50){
+ nv50_mem_timing_entry(dev,&P,hdr,(struct nouveau_pm_tbl_entry*) entry,magic_number,&pm->memtimings.timing[i]);
+ } else if(dev_priv->card_type == NV_C0) {
+ nvc0_mem_timing_entry(dev,hdr,(struct nouveau_pm_tbl_entry*) entry,&pm->memtimings.timing[i]);
+ }
}
- memtimings->nr_timing = entries;
- memtimings->supported = (dev_priv->chipset <= 0x98);
+ memtimings->nr_timing = hdr->entry_cnt;
+ memtimings->supported = P.version == 1;
}
void
@@ -693,7 +708,10 @@ nouveau_mem_timing_fini(struct drm_device *dev)
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_pm_memtimings *mem = &dev_priv->engine.pm.memtimings;
- kfree(mem->timing);
+ if(mem->timing) {
+ kfree(mem->timing);
+ mem->timing = NULL;
+ }
}
static int
diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.c b/drivers/gpu/drm/nouveau/nouveau_mm.c
index 1640dec3b82..b29ffb3d140 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mm.c
@@ -27,7 +27,7 @@
#include "nouveau_mm.h"
static inline void
-region_put(struct nouveau_mm *rmm, struct nouveau_mm_node *a)
+region_put(struct nouveau_mm *mm, struct nouveau_mm_node *a)
{
list_del(&a->nl_entry);
list_del(&a->fl_entry);
@@ -35,7 +35,7 @@ region_put(struct nouveau_mm *rmm, struct nouveau_mm_node *a)
}
static struct nouveau_mm_node *
-region_split(struct nouveau_mm *rmm, struct nouveau_mm_node *a, u32 size)
+region_split(struct nouveau_mm *mm, struct nouveau_mm_node *a, u32 size)
{
struct nouveau_mm_node *b;
@@ -57,33 +57,33 @@ region_split(struct nouveau_mm *rmm, struct nouveau_mm_node *a, u32 size)
return b;
}
-#define node(root, dir) ((root)->nl_entry.dir == &rmm->nodes) ? NULL : \
+#define node(root, dir) ((root)->nl_entry.dir == &mm->nodes) ? NULL : \
list_entry((root)->nl_entry.dir, struct nouveau_mm_node, nl_entry)
void
-nouveau_mm_put(struct nouveau_mm *rmm, struct nouveau_mm_node *this)
+nouveau_mm_put(struct nouveau_mm *mm, struct nouveau_mm_node *this)
{
struct nouveau_mm_node *prev = node(this, prev);
struct nouveau_mm_node *next = node(this, next);
- list_add(&this->fl_entry, &rmm->free);
+ list_add(&this->fl_entry, &mm->free);
this->type = 0;
if (prev && prev->type == 0) {
prev->length += this->length;
- region_put(rmm, this);
+ region_put(mm, this);
this = prev;
}
if (next && next->type == 0) {
next->offset = this->offset;
next->length += this->length;
- region_put(rmm, this);
+ region_put(mm, this);
}
}
int
-nouveau_mm_get(struct nouveau_mm *rmm, int type, u32 size, u32 size_nc,
+nouveau_mm_get(struct nouveau_mm *mm, int type, u32 size, u32 size_nc,
u32 align, struct nouveau_mm_node **pnode)
{
struct nouveau_mm_node *prev, *this, *next;
@@ -92,17 +92,17 @@ nouveau_mm_get(struct nouveau_mm *rmm, int type, u32 size, u32 size_nc,
u32 splitoff;
u32 s, e;
- list_for_each_entry(this, &rmm->free, fl_entry) {
+ list_for_each_entry(this, &mm->free, fl_entry) {
e = this->offset + this->length;
s = this->offset;
prev = node(this, prev);
if (prev && prev->type != type)
- s = roundup(s, rmm->block_size);
+ s = roundup(s, mm->block_size);
next = node(this, next);
if (next && next->type != type)
- e = rounddown(e, rmm->block_size);
+ e = rounddown(e, mm->block_size);
s = (s + align_mask) & ~align_mask;
e &= ~align_mask;
@@ -110,10 +110,10 @@ nouveau_mm_get(struct nouveau_mm *rmm, int type, u32 size, u32 size_nc,
continue;
splitoff = s - this->offset;
- if (splitoff && !region_split(rmm, this, splitoff))
+ if (splitoff && !region_split(mm, this, splitoff))
return -ENOMEM;
- this = region_split(rmm, this, min(size, e - s));
+ this = region_split(mm, this, min(size, e - s));
if (!this)
return -ENOMEM;
@@ -127,52 +127,49 @@ nouveau_mm_get(struct nouveau_mm *rmm, int type, u32 size, u32 size_nc,
}
int
-nouveau_mm_init(struct nouveau_mm **prmm, u32 offset, u32 length, u32 block)
+nouveau_mm_init(struct nouveau_mm *mm, u32 offset, u32 length, u32 block)
{
- struct nouveau_mm *rmm;
- struct nouveau_mm_node *heap;
+ struct nouveau_mm_node *node;
+
+ if (block) {
+ mutex_init(&mm->mutex);
+ INIT_LIST_HEAD(&mm->nodes);
+ INIT_LIST_HEAD(&mm->free);
+ mm->block_size = block;
+ mm->heap_nodes = 0;
+ }
- heap = kzalloc(sizeof(*heap), GFP_KERNEL);
- if (!heap)
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
return -ENOMEM;
- heap->offset = roundup(offset, block);
- heap->length = rounddown(offset + length, block) - heap->offset;
+ node->offset = roundup(offset, mm->block_size);
+ node->length = rounddown(offset + length, mm->block_size) - node->offset;
- rmm = kzalloc(sizeof(*rmm), GFP_KERNEL);
- if (!rmm) {
- kfree(heap);
- return -ENOMEM;
- }
- rmm->block_size = block;
- mutex_init(&rmm->mutex);
- INIT_LIST_HEAD(&rmm->nodes);
- INIT_LIST_HEAD(&rmm->free);
- list_add(&heap->nl_entry, &rmm->nodes);
- list_add(&heap->fl_entry, &rmm->free);
-
- *prmm = rmm;
+ list_add_tail(&node->nl_entry, &mm->nodes);
+ list_add_tail(&node->fl_entry, &mm->free);
+ mm->heap_nodes++;
return 0;
}
int
-nouveau_mm_fini(struct nouveau_mm **prmm)
+nouveau_mm_fini(struct nouveau_mm *mm)
{
- struct nouveau_mm *rmm = *prmm;
struct nouveau_mm_node *node, *heap =
- list_first_entry(&rmm->nodes, struct nouveau_mm_node, nl_entry);
-
- if (!list_is_singular(&rmm->nodes)) {
- printk(KERN_ERR "nouveau_mm not empty at destroy time!\n");
- list_for_each_entry(node, &rmm->nodes, nl_entry) {
- printk(KERN_ERR "0x%02x: 0x%08x 0x%08x\n",
- node->type, node->offset, node->length);
+ list_first_entry(&mm->nodes, struct nouveau_mm_node, nl_entry);
+ int nodes = 0;
+
+ list_for_each_entry(node, &mm->nodes, nl_entry) {
+ if (nodes++ == mm->heap_nodes) {
+ printk(KERN_ERR "nouveau_mm in use at destroy time!\n");
+ list_for_each_entry(node, &mm->nodes, nl_entry) {
+ printk(KERN_ERR "0x%02x: 0x%08x 0x%08x\n",
+ node->type, node->offset, node->length);
+ }
+ WARN_ON(1);
+ return -EBUSY;
}
- WARN_ON(1);
- return -EBUSY;
}
kfree(heap);
- kfree(rmm);
- *prmm = NULL;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.h b/drivers/gpu/drm/nouveau/nouveau_mm.h
index b9c016d2155..57a600c35c9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_mm.h
@@ -42,10 +42,11 @@ struct nouveau_mm {
struct mutex mutex;
u32 block_size;
+ int heap_nodes;
};
-int nouveau_mm_init(struct nouveau_mm **, u32 offset, u32 length, u32 block);
-int nouveau_mm_fini(struct nouveau_mm **);
+int nouveau_mm_init(struct nouveau_mm *, u32 offset, u32 length, u32 block);
+int nouveau_mm_fini(struct nouveau_mm *);
int nouveau_mm_pre(struct nouveau_mm *);
int nouveau_mm_get(struct nouveau_mm *, int type, u32 size, u32 size_nc,
u32 align, struct nouveau_mm_node **);
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c
index 159b7c437d3..02222c540ae 100644
--- a/drivers/gpu/drm/nouveau/nouveau_object.c
+++ b/drivers/gpu/drm/nouveau/nouveau_object.c
@@ -693,6 +693,7 @@ nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
static int
nvc0_gpuobj_channel_init(struct nouveau_channel *chan, struct nouveau_vm *vm)
{
+ struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
struct drm_device *dev = chan->dev;
struct nouveau_gpuobj *pgd = NULL;
struct nouveau_vm_pgd *vpgd;
@@ -722,6 +723,9 @@ nvc0_gpuobj_channel_init(struct nouveau_channel *chan, struct nouveau_vm *vm)
nv_wo32(chan->ramin, 0x020c, 0x000000ff);
/* map display semaphore buffers into channel's vm */
+ if (dev_priv->card_type >= NV_D0)
+ return 0;
+
for (i = 0; i < 2; i++) {
struct nv50_display_crtc *dispc = &nv50_display(dev)->crtc[i];
@@ -746,7 +750,7 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
int ret, i;
NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h);
- if (dev_priv->card_type == NV_C0)
+ if (dev_priv->card_type >= NV_C0)
return nvc0_gpuobj_channel_init(chan, vm);
/* Allocate a chunk of memory for per-channel object storage */
@@ -793,7 +797,7 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
return ret;
/* dma objects for display sync channel semaphore blocks */
- for (i = 0; i < 2; i++) {
+ for (i = 0; i < dev->mode_config.num_crtc; i++) {
struct nouveau_gpuobj *sem = NULL;
struct nv50_display_crtc *dispc =
&nv50_display(dev)->crtc[i];
@@ -875,18 +879,18 @@ nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
NV_DEBUG(dev, "ch%d\n", chan->id);
- if (dev_priv->card_type >= NV_50) {
+ if (dev_priv->card_type >= NV_50 && dev_priv->card_type <= NV_C0) {
struct nv50_display *disp = nv50_display(dev);
- for (i = 0; i < 2; i++) {
+ for (i = 0; i < dev->mode_config.num_crtc; i++) {
struct nv50_display_crtc *dispc = &disp->crtc[i];
nouveau_bo_vma_del(dispc->sem.bo, &chan->dispc_vma[i]);
}
-
- nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd);
- nouveau_gpuobj_ref(NULL, &chan->vm_pd);
}
+ nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd);
+ nouveau_gpuobj_ref(NULL, &chan->vm_pd);
+
if (drm_mm_initialized(&chan->ramin_heap))
drm_mm_takedown(&chan->ramin_heap);
nouveau_gpuobj_ref(NULL, &chan->ramin);
diff --git a/drivers/gpu/drm/nouveau/nouveau_perf.c b/drivers/gpu/drm/nouveau/nouveau_perf.c
index ef9dec0e6f8..33d03fbf00d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_perf.c
+++ b/drivers/gpu/drm/nouveau/nouveau_perf.c
@@ -127,13 +127,57 @@ nouveau_perf_timing(struct drm_device *dev, struct bit_entry *P,
entry += ramcfg * recordlen;
if (entry[1] >= pm->memtimings.nr_timing) {
- NV_WARN(dev, "timingset %d does not exist\n", entry[1]);
+ if (entry[1] != 0xff)
+ NV_WARN(dev, "timingset %d does not exist\n", entry[1]);
return NULL;
}
return &pm->memtimings.timing[entry[1]];
}
+static void
+nouveau_perf_voltage(struct drm_device *dev, struct bit_entry *P,
+ struct nouveau_pm_level *perflvl)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvbios *bios = &dev_priv->vbios;
+ u8 *vmap;
+ int id;
+
+ id = perflvl->volt_min;
+ perflvl->volt_min = 0;
+
+ /* boards using voltage table version <0x40 store the voltage
+ * level directly in the perflvl entry as a multiple of 10mV
+ */
+ if (dev_priv->engine.pm.voltage.version < 0x40) {
+ perflvl->volt_min = id * 10000;
+ perflvl->volt_max = perflvl->volt_min;
+ return;
+ }
+
+ /* on newer ones, the perflvl stores an index into yet another
+ * vbios table containing a min/max voltage value for the perflvl
+ */
+ if (P->version != 2 || P->length < 34) {
+ NV_DEBUG(dev, "where's our volt map table ptr? %d %d\n",
+ P->version, P->length);
+ return;
+ }
+
+ vmap = ROMPTR(bios, P->data[32]);
+ if (!vmap) {
+ NV_DEBUG(dev, "volt map table pointer invalid\n");
+ return;
+ }
+
+ if (id < vmap[3]) {
+ vmap += vmap[1] + (vmap[2] * id);
+ perflvl->volt_min = ROM32(vmap[0]);
+ perflvl->volt_max = ROM32(vmap[4]);
+ }
+}
+
void
nouveau_perf_init(struct drm_device *dev)
{
@@ -141,6 +185,8 @@ nouveau_perf_init(struct drm_device *dev)
struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
struct nvbios *bios = &dev_priv->vbios;
struct bit_entry P;
+ struct nouveau_pm_memtimings *memtimings = &pm->memtimings;
+ struct nouveau_pm_tbl_header mt_hdr;
u8 version, headerlen, recordlen, entries;
u8 *perf, *entry;
int vid, i;
@@ -188,6 +234,22 @@ nouveau_perf_init(struct drm_device *dev)
}
entry = perf + headerlen;
+
+ /* For version 0x15, initialize memtiming table */
+ if(version == 0x15) {
+ memtimings->timing =
+ kcalloc(entries, sizeof(*memtimings->timing), GFP_KERNEL);
+ if (!memtimings->timing) {
+ NV_WARN(dev,"Could not allocate memtiming table\n");
+ return;
+ }
+
+ mt_hdr.entry_cnt = entries;
+ mt_hdr.entry_len = 14;
+ mt_hdr.version = version;
+ mt_hdr.header_len = 4;
+ }
+
for (i = 0; i < entries; i++) {
struct nouveau_pm_level *perflvl = &pm->perflvl[pm->nr_perflvl];
@@ -203,7 +265,8 @@ nouveau_perf_init(struct drm_device *dev)
case 0x13:
case 0x15:
perflvl->fanspeed = entry[55];
- perflvl->voltage = (recordlen > 56) ? entry[56] : 0;
+ if (recordlen > 56)
+ perflvl->volt_min = entry[56];
perflvl->core = ROM32(entry[1]) * 10;
perflvl->memory = ROM32(entry[5]) * 20;
break;
@@ -211,9 +274,10 @@ nouveau_perf_init(struct drm_device *dev)
case 0x23:
case 0x24:
perflvl->fanspeed = entry[4];
- perflvl->voltage = entry[5];
- perflvl->core = ROM16(entry[6]) * 1000;
-
+ perflvl->volt_min = entry[5];
+ perflvl->shader = ROM16(entry[6]) * 1000;
+ perflvl->core = perflvl->shader;
+ perflvl->core += (signed char)entry[8] * 1000;
if (dev_priv->chipset == 0x49 ||
dev_priv->chipset == 0x4b)
perflvl->memory = ROM16(entry[11]) * 1000;
@@ -223,7 +287,7 @@ nouveau_perf_init(struct drm_device *dev)
break;
case 0x25:
perflvl->fanspeed = entry[4];
- perflvl->voltage = entry[5];
+ perflvl->volt_min = entry[5];
perflvl->core = ROM16(entry[6]) * 1000;
perflvl->shader = ROM16(entry[10]) * 1000;
perflvl->memory = ROM16(entry[12]) * 1000;
@@ -232,7 +296,7 @@ nouveau_perf_init(struct drm_device *dev)
perflvl->memscript = ROM16(entry[2]);
case 0x35:
perflvl->fanspeed = entry[6];
- perflvl->voltage = entry[7];
+ perflvl->volt_min = entry[7];
perflvl->core = ROM16(entry[8]) * 1000;
perflvl->shader = ROM16(entry[10]) * 1000;
perflvl->memory = ROM16(entry[12]) * 1000;
@@ -240,30 +304,34 @@ nouveau_perf_init(struct drm_device *dev)
perflvl->unk05 = ROM16(entry[16]) * 1000;
break;
case 0x40:
-#define subent(n) entry[perf[2] + ((n) * perf[3])]
+#define subent(n) (ROM16(entry[perf[2] + ((n) * perf[3])]) & 0xfff) * 1000
perflvl->fanspeed = 0; /*XXX*/
- perflvl->voltage = entry[2];
+ perflvl->volt_min = entry[2];
if (dev_priv->card_type == NV_50) {
- perflvl->core = ROM16(subent(0)) & 0xfff;
- perflvl->shader = ROM16(subent(1)) & 0xfff;
- perflvl->memory = ROM16(subent(2)) & 0xfff;
+ perflvl->core = subent(0);
+ perflvl->shader = subent(1);
+ perflvl->memory = subent(2);
+ perflvl->vdec = subent(3);
+ perflvl->unka0 = subent(4);
} else {
- perflvl->shader = ROM16(subent(3)) & 0xfff;
+ perflvl->hub06 = subent(0);
+ perflvl->hub01 = subent(1);
+ perflvl->copy = subent(2);
+ perflvl->shader = subent(3);
+ perflvl->rop = subent(4);
+ perflvl->memory = subent(5);
+ perflvl->vdec = subent(6);
+ perflvl->daemon = subent(10);
+ perflvl->hub07 = subent(11);
perflvl->core = perflvl->shader / 2;
- perflvl->unk0a = ROM16(subent(4)) & 0xfff;
- perflvl->memory = ROM16(subent(5)) & 0xfff;
}
-
- perflvl->core *= 1000;
- perflvl->shader *= 1000;
- perflvl->memory *= 1000;
- perflvl->unk0a *= 1000;
break;
}
/* make sure vid is valid */
- if (pm->voltage.supported && perflvl->voltage) {
- vid = nouveau_volt_vid_lookup(dev, perflvl->voltage);
+ nouveau_perf_voltage(dev, &P, perflvl);
+ if (pm->voltage.supported && perflvl->volt_min) {
+ vid = nouveau_volt_vid_lookup(dev, perflvl->volt_min);
if (vid < 0) {
NV_DEBUG(dev, "drop perflvl %d, bad vid\n", i);
entry += recordlen;
@@ -272,7 +340,11 @@ nouveau_perf_init(struct drm_device *dev)
}
/* get the corresponding memory timings */
- if (version > 0x15) {
+ if (version == 0x15) {
+ memtimings->timing[i].id = i;
+ nv30_mem_timing_entry(dev,&mt_hdr,(struct nouveau_pm_tbl_entry*) &entry[41],0,&memtimings->timing[i]);
+ perflvl->timing = &memtimings->timing[i];
+ } else if (version > 0x15) {
/* last 3 args are for < 0x40, ignored for >= 0x40 */
perflvl->timing =
nouveau_perf_timing(dev, &P,
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_pm.c
index da8d994d5e8..a539fd25792 100644
--- a/drivers/gpu/drm/nouveau/nouveau_pm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_pm.c
@@ -64,18 +64,26 @@ nouveau_pm_perflvl_set(struct drm_device *dev, struct nouveau_pm_level *perflvl)
if (perflvl == pm->cur)
return 0;
- if (pm->voltage.supported && pm->voltage_set && perflvl->voltage) {
- ret = pm->voltage_set(dev, perflvl->voltage);
+ if (pm->voltage.supported && pm->voltage_set && perflvl->volt_min) {
+ ret = pm->voltage_set(dev, perflvl->volt_min);
if (ret) {
NV_ERROR(dev, "voltage_set %d failed: %d\n",
- perflvl->voltage, ret);
+ perflvl->volt_min, ret);
}
}
- nouveau_pm_clock_set(dev, perflvl, PLL_CORE, perflvl->core);
- nouveau_pm_clock_set(dev, perflvl, PLL_SHADER, perflvl->shader);
- nouveau_pm_clock_set(dev, perflvl, PLL_MEMORY, perflvl->memory);
- nouveau_pm_clock_set(dev, perflvl, PLL_UNK05, perflvl->unk05);
+ if (pm->clocks_pre) {
+ void *state = pm->clocks_pre(dev, perflvl);
+ if (IS_ERR(state))
+ return PTR_ERR(state);
+ pm->clocks_set(dev, state);
+ } else
+ if (pm->clock_set) {
+ nouveau_pm_clock_set(dev, perflvl, PLL_CORE, perflvl->core);
+ nouveau_pm_clock_set(dev, perflvl, PLL_SHADER, perflvl->shader);
+ nouveau_pm_clock_set(dev, perflvl, PLL_MEMORY, perflvl->memory);
+ nouveau_pm_clock_set(dev, perflvl, PLL_UNK05, perflvl->unk05);
+ }
pm->cur = perflvl;
return 0;
@@ -92,9 +100,6 @@ nouveau_pm_profile_set(struct drm_device *dev, const char *profile)
if (nouveau_perflvl_wr != 7777)
return -EPERM;
- if (!pm->clock_set)
- return -EINVAL;
-
if (!strncmp(profile, "boot", 4))
perflvl = &pm->boot;
else {
@@ -123,31 +128,37 @@ nouveau_pm_perflvl_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
int ret;
- if (!pm->clock_get)
- return -EINVAL;
-
memset(perflvl, 0, sizeof(*perflvl));
- ret = pm->clock_get(dev, PLL_CORE);
- if (ret > 0)
- perflvl->core = ret;
+ if (pm->clocks_get) {
+ ret = pm->clocks_get(dev, perflvl);
+ if (ret)
+ return ret;
+ } else
+ if (pm->clock_get) {
+ ret = pm->clock_get(dev, PLL_CORE);
+ if (ret > 0)
+ perflvl->core = ret;
- ret = pm->clock_get(dev, PLL_MEMORY);
- if (ret > 0)
- perflvl->memory = ret;
+ ret = pm->clock_get(dev, PLL_MEMORY);
+ if (ret > 0)
+ perflvl->memory = ret;
- ret = pm->clock_get(dev, PLL_SHADER);
- if (ret > 0)
- perflvl->shader = ret;
+ ret = pm->clock_get(dev, PLL_SHADER);
+ if (ret > 0)
+ perflvl->shader = ret;
- ret = pm->clock_get(dev, PLL_UNK05);
- if (ret > 0)
- perflvl->unk05 = ret;
+ ret = pm->clock_get(dev, PLL_UNK05);
+ if (ret > 0)
+ perflvl->unk05 = ret;
+ }
if (pm->voltage.supported && pm->voltage_get) {
ret = pm->voltage_get(dev);
- if (ret > 0)
- perflvl->voltage = ret;
+ if (ret > 0) {
+ perflvl->volt_min = ret;
+ perflvl->volt_max = ret;
+ }
}
return 0;
@@ -156,7 +167,7 @@ nouveau_pm_perflvl_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
static void
nouveau_pm_perflvl_info(struct nouveau_pm_level *perflvl, char *ptr, int len)
{
- char c[16], s[16], v[16], f[16], t[16];
+ char c[16], s[16], v[32], f[16], t[16], m[16];
c[0] = '\0';
if (perflvl->core)
@@ -166,9 +177,19 @@ nouveau_pm_perflvl_info(struct nouveau_pm_level *perflvl, char *ptr, int len)
if (perflvl->shader)
snprintf(s, sizeof(s), " shader %dMHz", perflvl->shader / 1000);
+ m[0] = '\0';
+ if (perflvl->memory)
+ snprintf(m, sizeof(m), " memory %dMHz", perflvl->memory / 1000);
+
v[0] = '\0';
- if (perflvl->voltage)
- snprintf(v, sizeof(v), " voltage %dmV", perflvl->voltage * 10);
+ if (perflvl->volt_min && perflvl->volt_min != perflvl->volt_max) {
+ snprintf(v, sizeof(v), " voltage %dmV-%dmV",
+ perflvl->volt_min / 1000, perflvl->volt_max / 1000);
+ } else
+ if (perflvl->volt_min) {
+ snprintf(v, sizeof(v), " voltage %dmV",
+ perflvl->volt_min / 1000);
+ }
f[0] = '\0';
if (perflvl->fanspeed)
@@ -178,8 +199,7 @@ nouveau_pm_perflvl_info(struct nouveau_pm_level *perflvl, char *ptr, int len)
if (perflvl->timing)
snprintf(t, sizeof(t), " timing %d", perflvl->timing->id);
- snprintf(ptr, len, "memory %dMHz%s%s%s%s%s\n", perflvl->memory / 1000,
- c, s, v, f, t);
+ snprintf(ptr, len, "%s%s%s%s%s%s\n", c, s, m, t, v, f);
}
static ssize_t
@@ -190,7 +210,7 @@ nouveau_pm_get_perflvl_info(struct device *d,
char *ptr = buf;
int len = PAGE_SIZE;
- snprintf(ptr, len, "%d: ", perflvl->id);
+ snprintf(ptr, len, "%d:", perflvl->id);
ptr += strlen(buf);
len -= strlen(buf);
@@ -211,9 +231,9 @@ nouveau_pm_get_perflvl(struct device *d, struct device_attribute *a, char *buf)
if (!pm->cur)
snprintf(ptr, len, "setting: boot\n");
else if (pm->cur == &pm->boot)
- snprintf(ptr, len, "setting: boot\nc: ");
+ snprintf(ptr, len, "setting: boot\nc:");
else
- snprintf(ptr, len, "setting: static %d\nc: ", pm->cur->id);
+ snprintf(ptr, len, "setting: static %d\nc:", pm->cur->id);
ptr += strlen(buf);
len -= strlen(buf);
@@ -292,7 +312,7 @@ nouveau_sysfs_fini(struct drm_device *dev)
}
}
-#ifdef CONFIG_HWMON
+#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE))
static ssize_t
nouveau_hwmon_show_temp(struct device *d, struct device_attribute *a, char *buf)
{
@@ -409,7 +429,7 @@ static const struct attribute_group hwmon_attrgroup = {
static int
nouveau_hwmon_init(struct drm_device *dev)
{
-#ifdef CONFIG_HWMON
+#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE))
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
struct device *hwmon_dev;
@@ -442,7 +462,7 @@ nouveau_hwmon_init(struct drm_device *dev)
static void
nouveau_hwmon_fini(struct drm_device *dev)
{
-#ifdef CONFIG_HWMON
+#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE))
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
@@ -488,7 +508,7 @@ nouveau_pm_init(struct drm_device *dev)
NV_INFO(dev, "%d available performance level(s)\n", pm->nr_perflvl);
for (i = 0; i < pm->nr_perflvl; i++) {
nouveau_pm_perflvl_info(&pm->perflvl[i], info, sizeof(info));
- NV_INFO(dev, "%d: %s", pm->perflvl[i].id, info);
+ NV_INFO(dev, "%d:%s", pm->perflvl[i].id, info);
}
/* determine current ("boot") performance level */
@@ -498,7 +518,7 @@ nouveau_pm_init(struct drm_device *dev)
pm->cur = &pm->boot;
nouveau_pm_perflvl_info(&pm->boot, info, sizeof(info));
- NV_INFO(dev, "c: %s", info);
+ NV_INFO(dev, "c:%s", info);
}
/* switch performance levels now if requested */
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.h b/drivers/gpu/drm/nouveau/nouveau_pm.h
index 4a9838ddace..8ac02cdd03a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_pm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_pm.h
@@ -52,6 +52,11 @@ void *nv04_pm_clock_pre(struct drm_device *, struct nouveau_pm_level *,
u32 id, int khz);
void nv04_pm_clock_set(struct drm_device *, void *);
+/* nv40_pm.c */
+int nv40_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *);
+void *nv40_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *);
+void nv40_pm_clocks_set(struct drm_device *, void *);
+
/* nv50_pm.c */
int nv50_pm_clock_get(struct drm_device *, u32 id);
void *nv50_pm_clock_pre(struct drm_device *, struct nouveau_pm_level *,
@@ -59,10 +64,12 @@ void *nv50_pm_clock_pre(struct drm_device *, struct nouveau_pm_level *,
void nv50_pm_clock_set(struct drm_device *, void *);
/* nva3_pm.c */
-int nva3_pm_clock_get(struct drm_device *, u32 id);
-void *nva3_pm_clock_pre(struct drm_device *, struct nouveau_pm_level *,
- u32 id, int khz);
-void nva3_pm_clock_set(struct drm_device *, void *);
+int nva3_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *);
+void *nva3_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *);
+void nva3_pm_clocks_set(struct drm_device *, void *);
+
+/* nvc0_pm.c */
+int nvc0_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *);
/* nouveau_temp.c */
void nouveau_temp_init(struct drm_device *dev);
diff --git a/drivers/gpu/drm/nouveau/nouveau_reg.h b/drivers/gpu/drm/nouveau/nouveau_reg.h
index f18cdfc3400..43a96b99e18 100644
--- a/drivers/gpu/drm/nouveau/nouveau_reg.h
+++ b/drivers/gpu/drm/nouveau/nouveau_reg.h
@@ -826,9 +826,12 @@
#define NV50_PDISPLAY_SOR_DPMS_STATE_ACTIVE 0x00030000
#define NV50_PDISPLAY_SOR_DPMS_STATE_BLANKED 0x00080000
#define NV50_PDISPLAY_SOR_DPMS_STATE_WAIT 0x10000000
-#define NV50_PDISPLAY_SOR_BACKLIGHT 0x0061c084
-#define NV50_PDISPLAY_SOR_BACKLIGHT_ENABLE 0x80000000
-#define NV50_PDISPLAY_SOR_BACKLIGHT_LEVEL 0x00000fff
+#define NV50_PDISP_SOR_PWM_DIV(i) (0x0061c080 + (i) * 0x800)
+#define NV50_PDISP_SOR_PWM_CTL(i) (0x0061c084 + (i) * 0x800)
+#define NV50_PDISP_SOR_PWM_CTL_NEW 0x80000000
+#define NVA3_PDISP_SOR_PWM_CTL_UNK 0x40000000
+#define NV50_PDISP_SOR_PWM_CTL_VAL 0x000007ff
+#define NVA3_PDISP_SOR_PWM_CTL_VAL 0x00ffffff
#define NV50_SOR_DP_CTRL(i, l) (0x0061c10c + (i) * 0x800 + (l) * 0x80)
#define NV50_SOR_DP_CTRL_ENABLED 0x00000001
#define NV50_SOR_DP_CTRL_ENHANCED_FRAME_ENABLED 0x00004000
@@ -843,7 +846,7 @@
#define NV50_SOR_DP_CTRL_TRAINING_PATTERN_2 0x02000000
#define NV50_SOR_DP_UNK118(i, l) (0x0061c118 + (i) * 0x800 + (l) * 0x80)
#define NV50_SOR_DP_UNK120(i, l) (0x0061c120 + (i) * 0x800 + (l) * 0x80)
-#define NV50_SOR_DP_UNK128(i, l) (0x0061c128 + (i) * 0x800 + (l) * 0x80)
+#define NV50_SOR_DP_SCFG(i, l) (0x0061c128 + (i) * 0x800 + (l) * 0x80)
#define NV50_SOR_DP_UNK130(i, l) (0x0061c130 + (i) * 0x800 + (l) * 0x80)
#define NV50_PDISPLAY_USER(i) ((i) * 0x1000 + 0x00640000)
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index 2706cb3d871..b75258a9fe4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -12,8 +12,8 @@ struct nouveau_sgdma_be {
struct drm_device *dev;
dma_addr_t *pages;
- bool *ttm_alloced;
unsigned nr_pages;
+ bool unmap_pages;
u64 offset;
bool bound;
@@ -26,43 +26,28 @@ nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
struct drm_device *dev = nvbe->dev;
+ int i;
NV_DEBUG(nvbe->dev, "num_pages = %ld\n", num_pages);
- if (nvbe->pages)
- return -EINVAL;
-
- nvbe->pages = kmalloc(sizeof(dma_addr_t) * num_pages, GFP_KERNEL);
- if (!nvbe->pages)
- return -ENOMEM;
+ nvbe->pages = dma_addrs;
+ nvbe->nr_pages = num_pages;
+ nvbe->unmap_pages = true;
- nvbe->ttm_alloced = kmalloc(sizeof(bool) * num_pages, GFP_KERNEL);
- if (!nvbe->ttm_alloced) {
- kfree(nvbe->pages);
- nvbe->pages = NULL;
- return -ENOMEM;
+ /* this code path isn't called and is incorrect anyways */
+ if (0) { /* dma_addrs[0] != DMA_ERROR_CODE) { */
+ nvbe->unmap_pages = false;
+ return 0;
}
- nvbe->nr_pages = 0;
- while (num_pages--) {
- /* this code path isn't called and is incorrect anyways */
- if (0) { /*dma_addrs[nvbe->nr_pages] != DMA_ERROR_CODE)*/
- nvbe->pages[nvbe->nr_pages] =
- dma_addrs[nvbe->nr_pages];
- nvbe->ttm_alloced[nvbe->nr_pages] = true;
- } else {
- nvbe->pages[nvbe->nr_pages] =
- pci_map_page(dev->pdev, pages[nvbe->nr_pages], 0,
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(dev->pdev,
- nvbe->pages[nvbe->nr_pages])) {
- be->func->clear(be);
- return -EFAULT;
- }
- nvbe->ttm_alloced[nvbe->nr_pages] = false;
+ for (i = 0; i < num_pages; i++) {
+ nvbe->pages[i] = pci_map_page(dev->pdev, pages[i], 0,
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ if (pci_dma_mapping_error(dev->pdev, nvbe->pages[i])) {
+ nvbe->nr_pages = --i;
+ be->func->clear(be);
+ return -EFAULT;
}
-
- nvbe->nr_pages++;
}
return 0;
@@ -72,25 +57,16 @@ static void
nouveau_sgdma_clear(struct ttm_backend *be)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
- struct drm_device *dev;
-
- if (nvbe && nvbe->pages) {
- dev = nvbe->dev;
- NV_DEBUG(dev, "\n");
+ struct drm_device *dev = nvbe->dev;
- if (nvbe->bound)
- be->func->unbind(be);
+ if (nvbe->bound)
+ be->func->unbind(be);
+ if (nvbe->unmap_pages) {
while (nvbe->nr_pages--) {
- if (!nvbe->ttm_alloced[nvbe->nr_pages])
- pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages],
+ pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages],
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
}
- kfree(nvbe->pages);
- kfree(nvbe->ttm_alloced);
- nvbe->pages = NULL;
- nvbe->ttm_alloced = NULL;
- nvbe->nr_pages = 0;
}
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index 10656e430b4..d8831ab42bb 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -286,9 +286,9 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->gpio.get = nv10_gpio_get;
engine->gpio.set = nv10_gpio_set;
engine->gpio.irq_enable = NULL;
- engine->pm.clock_get = nv04_pm_clock_get;
- engine->pm.clock_pre = nv04_pm_clock_pre;
- engine->pm.clock_set = nv04_pm_clock_set;
+ engine->pm.clocks_get = nv40_pm_clocks_get;
+ engine->pm.clocks_pre = nv40_pm_clocks_pre;
+ engine->pm.clocks_set = nv40_pm_clocks_set;
engine->pm.voltage_get = nouveau_voltage_gpio_get;
engine->pm.voltage_set = nouveau_voltage_gpio_set;
engine->pm.temp_get = nv40_temp_get;
@@ -299,7 +299,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
case 0x50:
case 0x80: /* gotta love NVIDIA's consistency.. */
case 0x90:
- case 0xA0:
+ case 0xa0:
engine->instmem.init = nv50_instmem_init;
engine->instmem.takedown = nv50_instmem_takedown;
engine->instmem.suspend = nv50_instmem_suspend;
@@ -359,9 +359,9 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->pm.clock_set = nv50_pm_clock_set;
break;
default:
- engine->pm.clock_get = nva3_pm_clock_get;
- engine->pm.clock_pre = nva3_pm_clock_pre;
- engine->pm.clock_set = nva3_pm_clock_set;
+ engine->pm.clocks_get = nva3_pm_clocks_get;
+ engine->pm.clocks_pre = nva3_pm_clocks_pre;
+ engine->pm.clocks_set = nva3_pm_clocks_set;
break;
}
engine->pm.voltage_get = nouveau_voltage_gpio_get;
@@ -376,7 +376,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->vram.put = nv50_vram_del;
engine->vram.flags_valid = nv50_vram_flags_valid;
break;
- case 0xC0:
+ case 0xc0:
engine->instmem.init = nvc0_instmem_init;
engine->instmem.takedown = nvc0_instmem_takedown;
engine->instmem.suspend = nvc0_instmem_suspend;
@@ -422,12 +422,73 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->vram.put = nv50_vram_del;
engine->vram.flags_valid = nvc0_vram_flags_valid;
engine->pm.temp_get = nv84_temp_get;
+ engine->pm.clocks_get = nvc0_pm_clocks_get;
+ engine->pm.voltage_get = nouveau_voltage_gpio_get;
+ engine->pm.voltage_set = nouveau_voltage_gpio_set;
+ break;
+ case 0xd0:
+ engine->instmem.init = nvc0_instmem_init;
+ engine->instmem.takedown = nvc0_instmem_takedown;
+ engine->instmem.suspend = nvc0_instmem_suspend;
+ engine->instmem.resume = nvc0_instmem_resume;
+ engine->instmem.get = nv50_instmem_get;
+ engine->instmem.put = nv50_instmem_put;
+ engine->instmem.map = nv50_instmem_map;
+ engine->instmem.unmap = nv50_instmem_unmap;
+ engine->instmem.flush = nv84_instmem_flush;
+ engine->mc.init = nv50_mc_init;
+ engine->mc.takedown = nv50_mc_takedown;
+ engine->timer.init = nv04_timer_init;
+ engine->timer.read = nv04_timer_read;
+ engine->timer.takedown = nv04_timer_takedown;
+ engine->fb.init = nvc0_fb_init;
+ engine->fb.takedown = nvc0_fb_takedown;
+ engine->fifo.channels = 128;
+ engine->fifo.init = nvc0_fifo_init;
+ engine->fifo.takedown = nvc0_fifo_takedown;
+ engine->fifo.disable = nvc0_fifo_disable;
+ engine->fifo.enable = nvc0_fifo_enable;
+ engine->fifo.reassign = nvc0_fifo_reassign;
+ engine->fifo.channel_id = nvc0_fifo_channel_id;
+ engine->fifo.create_context = nvc0_fifo_create_context;
+ engine->fifo.destroy_context = nvc0_fifo_destroy_context;
+ engine->fifo.load_context = nvc0_fifo_load_context;
+ engine->fifo.unload_context = nvc0_fifo_unload_context;
+ engine->display.early_init = nouveau_stub_init;
+ engine->display.late_takedown = nouveau_stub_takedown;
+ engine->display.create = nvd0_display_create;
+ engine->display.init = nvd0_display_init;
+ engine->display.destroy = nvd0_display_destroy;
+ engine->gpio.init = nv50_gpio_init;
+ engine->gpio.takedown = nouveau_stub_takedown;
+ engine->gpio.get = nvd0_gpio_get;
+ engine->gpio.set = nvd0_gpio_set;
+ engine->gpio.irq_register = nv50_gpio_irq_register;
+ engine->gpio.irq_unregister = nv50_gpio_irq_unregister;
+ engine->gpio.irq_enable = nv50_gpio_irq_enable;
+ engine->vram.init = nvc0_vram_init;
+ engine->vram.takedown = nv50_vram_fini;
+ engine->vram.get = nvc0_vram_new;
+ engine->vram.put = nv50_vram_del;
+ engine->vram.flags_valid = nvc0_vram_flags_valid;
+ engine->pm.clocks_get = nvc0_pm_clocks_get;
+ engine->pm.voltage_get = nouveau_voltage_gpio_get;
+ engine->pm.voltage_set = nouveau_voltage_gpio_set;
break;
default:
NV_ERROR(dev, "NV%02x unsupported\n", dev_priv->chipset);
return 1;
}
+ /* headless mode */
+ if (nouveau_modeset == 2) {
+ engine->display.early_init = nouveau_stub_init;
+ engine->display.late_takedown = nouveau_stub_takedown;
+ engine->display.create = nouveau_stub_init;
+ engine->display.init = nouveau_stub_init;
+ engine->display.destroy = nouveau_stub_takedown;
+ }
+
return 0;
}
@@ -449,21 +510,6 @@ nouveau_vga_set_decode(void *priv, bool state)
return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
}
-static int
-nouveau_card_init_channel(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- int ret;
-
- ret = nouveau_channel_alloc(dev, &dev_priv->channel, NULL,
- NvDmaFB, NvDmaTT);
- if (ret)
- return ret;
-
- mutex_unlock(&dev_priv->channel->mutex);
- return 0;
-}
-
static void nouveau_switcheroo_set_state(struct pci_dev *pdev,
enum vga_switcheroo_state state)
{
@@ -533,6 +579,14 @@ nouveau_card_init(struct drm_device *dev)
if (ret)
goto out_display_early;
+ /* workaround an odd issue on nvc1 by disabling the device's
+ * nosnoop capability. hopefully won't cause issues until a
+ * better fix is found - assuming there is one...
+ */
+ if (dev_priv->chipset == 0xc1) {
+ nv_mask(dev, 0x00088080, 0x00000800, 0x00000000);
+ }
+
nouveau_pm_init(dev);
ret = engine->vram.init(dev);
@@ -630,8 +684,11 @@ nouveau_card_init(struct drm_device *dev)
break;
}
- if (dev_priv->card_type == NV_40)
- nv40_mpeg_create(dev);
+ if (dev_priv->card_type == NV_40 ||
+ dev_priv->chipset == 0x31 ||
+ dev_priv->chipset == 0x34 ||
+ dev_priv->chipset == 0x36)
+ nv31_mpeg_create(dev);
else
if (dev_priv->card_type == NV_50 &&
(dev_priv->chipset < 0x98 || dev_priv->chipset == 0xa0))
@@ -651,41 +708,69 @@ nouveau_card_init(struct drm_device *dev)
goto out_engine;
}
- ret = engine->display.create(dev);
+ ret = nouveau_irq_init(dev);
if (ret)
goto out_fifo;
- ret = drm_vblank_init(dev, nv_two_heads(dev) ? 2 : 1);
- if (ret)
- goto out_vblank;
+ /* initialise general modesetting */
+ drm_mode_config_init(dev);
+ drm_mode_create_scaling_mode_property(dev);
+ drm_mode_create_dithering_property(dev);
+ dev->mode_config.funcs = (void *)&nouveau_mode_config_funcs;
+ dev->mode_config.fb_base = pci_resource_start(dev->pdev, 1);
+ dev->mode_config.min_width = 0;
+ dev->mode_config.min_height = 0;
+ if (dev_priv->card_type < NV_10) {
+ dev->mode_config.max_width = 2048;
+ dev->mode_config.max_height = 2048;
+ } else
+ if (dev_priv->card_type < NV_50) {
+ dev->mode_config.max_width = 4096;
+ dev->mode_config.max_height = 4096;
+ } else {
+ dev->mode_config.max_width = 8192;
+ dev->mode_config.max_height = 8192;
+ }
- ret = nouveau_irq_init(dev);
+ ret = engine->display.create(dev);
if (ret)
- goto out_vblank;
+ goto out_irq;
- /* what about PVIDEO/PCRTC/PRAMDAC etc? */
+ nouveau_backlight_init(dev);
if (dev_priv->eng[NVOBJ_ENGINE_GR]) {
ret = nouveau_fence_init(dev);
if (ret)
- goto out_irq;
+ goto out_disp;
- ret = nouveau_card_init_channel(dev);
+ ret = nouveau_channel_alloc(dev, &dev_priv->channel, NULL,
+ NvDmaFB, NvDmaTT);
if (ret)
goto out_fence;
+
+ mutex_unlock(&dev_priv->channel->mutex);
+ }
+
+ if (dev->mode_config.num_crtc) {
+ ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
+ if (ret)
+ goto out_chan;
+
+ nouveau_fbcon_init(dev);
+ drm_kms_helper_poll_init(dev);
}
- nouveau_fbcon_init(dev);
- drm_kms_helper_poll_init(dev);
return 0;
+out_chan:
+ nouveau_channel_put_unlocked(&dev_priv->channel);
out_fence:
nouveau_fence_fini(dev);
+out_disp:
+ nouveau_backlight_exit(dev);
+ engine->display.destroy(dev);
out_irq:
nouveau_irq_fini(dev);
-out_vblank:
- drm_vblank_cleanup(dev);
- engine->display.destroy(dev);
out_fifo:
if (!dev_priv->noaccel)
engine->fifo.takedown(dev);
@@ -732,15 +817,20 @@ static void nouveau_card_takedown(struct drm_device *dev)
struct nouveau_engine *engine = &dev_priv->engine;
int e;
- drm_kms_helper_poll_fini(dev);
- nouveau_fbcon_fini(dev);
+ if (dev->mode_config.num_crtc) {
+ drm_kms_helper_poll_fini(dev);
+ nouveau_fbcon_fini(dev);
+ drm_vblank_cleanup(dev);
+ }
if (dev_priv->channel) {
nouveau_channel_put_unlocked(&dev_priv->channel);
nouveau_fence_fini(dev);
}
+ nouveau_backlight_exit(dev);
engine->display.destroy(dev);
+ drm_mode_config_cleanup(dev);
if (!dev_priv->noaccel) {
engine->fifo.takedown(dev);
@@ -774,7 +864,6 @@ static void nouveau_card_takedown(struct drm_device *dev)
engine->vram.takedown(dev);
nouveau_irq_fini(dev);
- drm_vblank_cleanup(dev);
nouveau_pm_fini(dev);
nouveau_bios_takedown(dev);
@@ -907,7 +996,7 @@ static int nouveau_remove_conflicting_drivers(struct drm_device *dev)
int nouveau_load(struct drm_device *dev, unsigned long flags)
{
struct drm_nouveau_private *dev_priv;
- uint32_t reg0;
+ uint32_t reg0, strap;
resource_size_t mmio_start_offs;
int ret;
@@ -951,13 +1040,11 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
/* Time to determine the card architecture */
reg0 = nv_rd32(dev, NV03_PMC_BOOT_0);
- dev_priv->stepping = 0; /* XXX: add stepping for pre-NV10? */
/* We're dealing with >=NV10 */
if ((reg0 & 0x0f000000) > 0) {
/* Bit 27-20 contain the architecture in hex */
dev_priv->chipset = (reg0 & 0xff00000) >> 20;
- dev_priv->stepping = (reg0 & 0xff);
/* NV04 or NV05 */
} else if ((reg0 & 0xff00fff0) == 0x20004000) {
if (reg0 & 0x00f00000)
@@ -987,6 +1074,9 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
case 0xc0:
dev_priv->card_type = NV_C0;
break;
+ case 0xd0:
+ dev_priv->card_type = NV_D0;
+ break;
default:
NV_INFO(dev, "Unsupported chipset 0x%08x\n", reg0);
ret = -EINVAL;
@@ -996,6 +1086,23 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
NV_INFO(dev, "Detected an NV%2x generation card (0x%08x)\n",
dev_priv->card_type, reg0);
+ /* determine frequency of timing crystal */
+ strap = nv_rd32(dev, 0x101000);
+ if ( dev_priv->chipset < 0x17 ||
+ (dev_priv->chipset >= 0x20 && dev_priv->chipset <= 0x25))
+ strap &= 0x00000040;
+ else
+ strap &= 0x00400040;
+
+ switch (strap) {
+ case 0x00000000: dev_priv->crystal = 13500; break;
+ case 0x00000040: dev_priv->crystal = 14318; break;
+ case 0x00400000: dev_priv->crystal = 27000; break;
+ case 0x00400040: dev_priv->crystal = 25000; break;
+ }
+
+ NV_DEBUG(dev, "crystal freq: %dKHz\n", dev_priv->crystal);
+
/* Determine whether we'll attempt acceleration or not, some
* cards are disabled by default here due to them being known
* non-functional, or never been tested due to lack of hw.
@@ -1003,12 +1110,13 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
dev_priv->noaccel = !!nouveau_noaccel;
if (nouveau_noaccel == -1) {
switch (dev_priv->chipset) {
- case 0xc1: /* known broken */
- case 0xc8: /* never tested */
+#if 0
+ case 0xXX: /* known broken */
NV_INFO(dev, "acceleration disabled by default, pass "
"noaccel=0 to force enable\n");
dev_priv->noaccel = true;
break;
+#endif
default:
dev_priv->noaccel = false;
break;
@@ -1030,7 +1138,7 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
ioremap(pci_resource_start(dev->pdev, ramin_bar),
dev_priv->ramin_size);
if (!dev_priv->ramin) {
- NV_ERROR(dev, "Failed to PRAMIN BAR");
+ NV_ERROR(dev, "Failed to map PRAMIN BAR\n");
ret = -ENOMEM;
goto err_mmio;
}
@@ -1130,7 +1238,7 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
getparam->value = 1;
break;
case NOUVEAU_GETPARAM_HAS_PAGEFLIP:
- getparam->value = 1;
+ getparam->value = dev_priv->card_type < NV_D0;
break;
case NOUVEAU_GETPARAM_GRAPH_UNITS:
/* NV40 and NV50 versions are quite different, but register
@@ -1198,6 +1306,23 @@ nouveau_wait_ne(struct drm_device *dev, uint64_t timeout,
return false;
}
+/* Wait until cond(data) == true, up until timeout has hit */
+bool
+nouveau_wait_cb(struct drm_device *dev, u64 timeout,
+ bool (*cond)(void *), void *data)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
+ u64 start = ptimer->read(dev);
+
+ do {
+ if (cond(data) == true)
+ return true;
+ } while (ptimer->read(dev) - start < timeout);
+
+ return false;
+}
+
/* Waits for PGRAPH to go completely idle */
bool nouveau_wait_for_idle(struct drm_device *dev)
{
diff --git a/drivers/gpu/drm/nouveau/nouveau_temp.c b/drivers/gpu/drm/nouveau/nouveau_temp.c
index 081ca7b03e8..5a46446dd5a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_temp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_temp.c
@@ -22,6 +22,8 @@
* Authors: Martin Peres
*/
+#include <linux/module.h>
+
#include "drmP.h"
#include "nouveau_drv.h"
diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.c b/drivers/gpu/drm/nouveau/nouveau_vm.c
index 244fd38fdb8..ef0832b29ad 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vm.c
@@ -172,9 +172,9 @@ nouveau_vm_unmap_pgt(struct nouveau_vm *vm, int big, u32 fpde, u32 lpde)
vm->map_pgt(vpgd->obj, pde, vpgt->obj);
}
- mutex_unlock(&vm->mm->mutex);
+ mutex_unlock(&vm->mm.mutex);
nouveau_gpuobj_ref(NULL, &pgt);
- mutex_lock(&vm->mm->mutex);
+ mutex_lock(&vm->mm.mutex);
}
}
@@ -191,18 +191,18 @@ nouveau_vm_map_pgt(struct nouveau_vm *vm, u32 pde, u32 type)
pgt_size = (1 << (vm->pgt_bits + 12)) >> type;
pgt_size *= 8;
- mutex_unlock(&vm->mm->mutex);
+ mutex_unlock(&vm->mm.mutex);
ret = nouveau_gpuobj_new(vm->dev, NULL, pgt_size, 0x1000,
NVOBJ_FLAG_ZERO_ALLOC, &pgt);
- mutex_lock(&vm->mm->mutex);
+ mutex_lock(&vm->mm.mutex);
if (unlikely(ret))
return ret;
/* someone beat us to filling the PDE while we didn't have the lock */
if (unlikely(vpgt->refcount[big]++)) {
- mutex_unlock(&vm->mm->mutex);
+ mutex_unlock(&vm->mm.mutex);
nouveau_gpuobj_ref(NULL, &pgt);
- mutex_lock(&vm->mm->mutex);
+ mutex_lock(&vm->mm.mutex);
return 0;
}
@@ -223,10 +223,10 @@ nouveau_vm_get(struct nouveau_vm *vm, u64 size, u32 page_shift,
u32 fpde, lpde, pde;
int ret;
- mutex_lock(&vm->mm->mutex);
- ret = nouveau_mm_get(vm->mm, page_shift, msize, 0, align, &vma->node);
+ mutex_lock(&vm->mm.mutex);
+ ret = nouveau_mm_get(&vm->mm, page_shift, msize, 0, align, &vma->node);
if (unlikely(ret != 0)) {
- mutex_unlock(&vm->mm->mutex);
+ mutex_unlock(&vm->mm.mutex);
return ret;
}
@@ -245,13 +245,13 @@ nouveau_vm_get(struct nouveau_vm *vm, u64 size, u32 page_shift,
if (ret) {
if (pde != fpde)
nouveau_vm_unmap_pgt(vm, big, fpde, pde - 1);
- nouveau_mm_put(vm->mm, vma->node);
- mutex_unlock(&vm->mm->mutex);
+ nouveau_mm_put(&vm->mm, vma->node);
+ mutex_unlock(&vm->mm.mutex);
vma->node = NULL;
return ret;
}
}
- mutex_unlock(&vm->mm->mutex);
+ mutex_unlock(&vm->mm.mutex);
vma->vm = vm;
vma->offset = (u64)vma->node->offset << 12;
@@ -270,11 +270,11 @@ nouveau_vm_put(struct nouveau_vma *vma)
fpde = (vma->node->offset >> vm->pgt_bits);
lpde = (vma->node->offset + vma->node->length - 1) >> vm->pgt_bits;
- mutex_lock(&vm->mm->mutex);
+ mutex_lock(&vm->mm.mutex);
nouveau_vm_unmap_pgt(vm, vma->node->type != vm->spg_shift, fpde, lpde);
- nouveau_mm_put(vm->mm, vma->node);
+ nouveau_mm_put(&vm->mm, vma->node);
vma->node = NULL;
- mutex_unlock(&vm->mm->mutex);
+ mutex_unlock(&vm->mm.mutex);
}
int
@@ -306,7 +306,7 @@ nouveau_vm_new(struct drm_device *dev, u64 offset, u64 length, u64 mm_offset,
block = length;
} else
- if (dev_priv->card_type == NV_C0) {
+ if (dev_priv->card_type >= NV_C0) {
vm->map_pgt = nvc0_vm_map_pgt;
vm->map = nvc0_vm_map;
vm->map_sg = nvc0_vm_map_sg;
@@ -360,11 +360,11 @@ nouveau_vm_link(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd)
nouveau_gpuobj_ref(pgd, &vpgd->obj);
- mutex_lock(&vm->mm->mutex);
+ mutex_lock(&vm->mm.mutex);
for (i = vm->fpde; i <= vm->lpde; i++)
vm->map_pgt(pgd, i, vm->pgt[i - vm->fpde].obj);
list_add(&vpgd->head, &vm->pgd_list);
- mutex_unlock(&vm->mm->mutex);
+ mutex_unlock(&vm->mm.mutex);
return 0;
}
@@ -377,7 +377,7 @@ nouveau_vm_unlink(struct nouveau_vm *vm, struct nouveau_gpuobj *mpgd)
if (!mpgd)
return;
- mutex_lock(&vm->mm->mutex);
+ mutex_lock(&vm->mm.mutex);
list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
if (vpgd->obj == mpgd) {
pgd = vpgd->obj;
@@ -386,7 +386,7 @@ nouveau_vm_unlink(struct nouveau_vm *vm, struct nouveau_gpuobj *mpgd)
break;
}
}
- mutex_unlock(&vm->mm->mutex);
+ mutex_unlock(&vm->mm.mutex);
nouveau_gpuobj_ref(NULL, &pgd);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.h b/drivers/gpu/drm/nouveau/nouveau_vm.h
index 579ca8cc223..6ce995f7797 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_vm.h
@@ -51,7 +51,7 @@ struct nouveau_vma {
struct nouveau_vm {
struct drm_device *dev;
- struct nouveau_mm *mm;
+ struct nouveau_mm mm;
int refcount;
struct list_head pgd_list;
diff --git a/drivers/gpu/drm/nouveau/nouveau_volt.c b/drivers/gpu/drm/nouveau/nouveau_volt.c
index 75e872741d9..86d03e15735 100644
--- a/drivers/gpu/drm/nouveau/nouveau_volt.c
+++ b/drivers/gpu/drm/nouveau/nouveau_volt.c
@@ -27,7 +27,7 @@
#include "nouveau_drv.h"
#include "nouveau_pm.h"
-static const enum dcb_gpio_tag vidtag[] = { 0x04, 0x05, 0x06, 0x1a };
+static const enum dcb_gpio_tag vidtag[] = { 0x04, 0x05, 0x06, 0x1a, 0x73 };
static int nr_vidtag = sizeof(vidtag) / sizeof(vidtag[0]);
int
@@ -170,6 +170,13 @@ nouveau_volt_init(struct drm_device *dev)
*/
vidshift = 2;
break;
+ case 0x40:
+ headerlen = volt[1];
+ recordlen = volt[2];
+ entries = volt[3]; /* not a clue what the entries are for.. */
+ vidmask = volt[11]; /* guess.. */
+ vidshift = 0;
+ break;
default:
NV_WARN(dev, "voltage table 0x%02x unknown\n", volt[0]);
return;
@@ -197,16 +204,37 @@ nouveau_volt_init(struct drm_device *dev)
}
/* parse vbios entries into common format */
- voltage->level = kcalloc(entries, sizeof(*voltage->level), GFP_KERNEL);
- if (!voltage->level)
- return;
+ voltage->version = volt[0];
+ if (voltage->version < 0x40) {
+ voltage->nr_level = entries;
+ voltage->level =
+ kcalloc(entries, sizeof(*voltage->level), GFP_KERNEL);
+ if (!voltage->level)
+ return;
- entry = volt + headerlen;
- for (i = 0; i < entries; i++, entry += recordlen) {
- voltage->level[i].voltage = entry[0];
- voltage->level[i].vid = entry[1] >> vidshift;
+ entry = volt + headerlen;
+ for (i = 0; i < entries; i++, entry += recordlen) {
+ voltage->level[i].voltage = entry[0] * 10000;
+ voltage->level[i].vid = entry[1] >> vidshift;
+ }
+ } else {
+ u32 volt_uv = ROM32(volt[4]);
+ s16 step_uv = ROM16(volt[8]);
+ u8 vid;
+
+ voltage->nr_level = voltage->vid_mask + 1;
+ voltage->level = kcalloc(voltage->nr_level,
+ sizeof(*voltage->level), GFP_KERNEL);
+ if (!voltage->level)
+ return;
+
+ for (vid = 0; vid <= voltage->vid_mask; vid++) {
+ voltage->level[vid].voltage = volt_uv;
+ voltage->level[vid].vid = vid;
+ volt_uv += step_uv;
+ }
}
- voltage->nr_level = entries;
+
voltage->supported = true;
}
diff --git a/drivers/gpu/drm/nouveau/nv04_display.c b/drivers/gpu/drm/nouveau/nv04_display.c
index 1715e1464b7..6bd8518d7b2 100644
--- a/drivers/gpu/drm/nouveau/nv04_display.c
+++ b/drivers/gpu/drm/nouveau/nv04_display.c
@@ -126,27 +126,6 @@ nv04_display_create(struct drm_device *dev)
nouveau_hw_save_vga_fonts(dev, 1);
- drm_mode_config_init(dev);
- drm_mode_create_scaling_mode_property(dev);
- drm_mode_create_dithering_property(dev);
-
- dev->mode_config.funcs = (void *)&nouveau_mode_config_funcs;
-
- dev->mode_config.min_width = 0;
- dev->mode_config.min_height = 0;
- switch (dev_priv->card_type) {
- case NV_04:
- dev->mode_config.max_width = 2048;
- dev->mode_config.max_height = 2048;
- break;
- default:
- dev->mode_config.max_width = 4096;
- dev->mode_config.max_height = 4096;
- break;
- }
-
- dev->mode_config.fb_base = dev_priv->fb_phys;
-
nv04_crtc_create(dev, 0);
if (nv_two_heads(dev))
nv04_crtc_create(dev, 1);
@@ -235,8 +214,6 @@ nv04_display_destroy(struct drm_device *dev)
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
crtc->funcs->restore(crtc);
- drm_mode_config_cleanup(dev);
-
nouveau_hw_save_vga_fonts(dev, 0);
}
diff --git a/drivers/gpu/drm/nouveau/nv04_pm.c b/drivers/gpu/drm/nouveau/nv04_pm.c
index eb1c70dd82e..9ae92a87b8c 100644
--- a/drivers/gpu/drm/nouveau/nv04_pm.c
+++ b/drivers/gpu/drm/nouveau/nv04_pm.c
@@ -68,6 +68,7 @@ void
nv04_pm_clock_set(struct drm_device *dev, void *pre_state)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
struct nv04_pm_state *state = pre_state;
u32 reg = state->pll.reg;
@@ -85,6 +86,9 @@ nv04_pm_clock_set(struct drm_device *dev, void *pre_state)
nv_mask(dev, 0x1002c0, 0, 1 << 8);
}
+ if (reg == NV_PRAMDAC_NVPLL_COEFF)
+ ptimer->init(dev);
+
kfree(state);
}
diff --git a/drivers/gpu/drm/nouveau/nv04_timer.c b/drivers/gpu/drm/nouveau/nv04_timer.c
index 1d09ddd5739..263301b809d 100644
--- a/drivers/gpu/drm/nouveau/nv04_timer.c
+++ b/drivers/gpu/drm/nouveau/nv04_timer.c
@@ -6,43 +6,75 @@
int
nv04_timer_init(struct drm_device *dev)
{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ u32 m, n, d;
+
nv_wr32(dev, NV04_PTIMER_INTR_EN_0, 0x00000000);
nv_wr32(dev, NV04_PTIMER_INTR_0, 0xFFFFFFFF);
- /* Just use the pre-existing values when possible for now; these regs
- * are not written in nv (driver writer missed a /4 on the address), and
- * writing 8 and 3 to the correct regs breaks the timings on the LVDS
- * hardware sequencing microcode.
- * A correct solution (involving calculations with the GPU PLL) can
- * be done when kernel modesetting lands
- */
- if (!nv_rd32(dev, NV04_PTIMER_NUMERATOR) ||
- !nv_rd32(dev, NV04_PTIMER_DENOMINATOR)) {
- nv_wr32(dev, NV04_PTIMER_NUMERATOR, 0x00000008);
- nv_wr32(dev, NV04_PTIMER_DENOMINATOR, 0x00000003);
+ /* aim for 31.25MHz, which gives us nanosecond timestamps */
+ d = 1000000 / 32;
+
+ /* determine base clock for timer source */
+ if (dev_priv->chipset < 0x40) {
+ n = dev_priv->engine.pm.clock_get(dev, PLL_CORE);
+ } else
+ if (dev_priv->chipset == 0x40) {
+ /*XXX: figure this out */
+ n = 0;
+ } else {
+ n = dev_priv->crystal;
+ m = 1;
+ while (n < (d * 2)) {
+ n += (n / m);
+ m++;
+ }
+
+ nv_wr32(dev, 0x009220, m - 1);
+ }
+
+ if (!n) {
+ NV_WARN(dev, "PTIMER: unknown input clock freq\n");
+ if (!nv_rd32(dev, NV04_PTIMER_NUMERATOR) ||
+ !nv_rd32(dev, NV04_PTIMER_DENOMINATOR)) {
+ nv_wr32(dev, NV04_PTIMER_NUMERATOR, 1);
+ nv_wr32(dev, NV04_PTIMER_DENOMINATOR, 1);
+ }
+ return 0;
+ }
+
+ /* reduce ratio to acceptable values */
+ while (((n % 5) == 0) && ((d % 5) == 0)) {
+ n /= 5;
+ d /= 5;
}
+ while (((n % 2) == 0) && ((d % 2) == 0)) {
+ n /= 2;
+ d /= 2;
+ }
+
+ while (n > 0xffff || d > 0xffff) {
+ n >>= 1;
+ d >>= 1;
+ }
+
+ nv_wr32(dev, NV04_PTIMER_NUMERATOR, n);
+ nv_wr32(dev, NV04_PTIMER_DENOMINATOR, d);
return 0;
}
-uint64_t
+u64
nv04_timer_read(struct drm_device *dev)
{
- uint32_t low;
- /* From kmmio dumps on nv28 this looks like how the blob does this.
- * It reads the high dword twice, before and after.
- * The only explanation seems to be that the 64-bit timer counter
- * advances between high and low dword reads and may corrupt the
- * result. Not confirmed.
- */
- uint32_t high2 = nv_rd32(dev, NV04_PTIMER_TIME_1);
- uint32_t high1;
+ u32 hi, lo;
+
do {
- high1 = high2;
- low = nv_rd32(dev, NV04_PTIMER_TIME_0);
- high2 = nv_rd32(dev, NV04_PTIMER_TIME_1);
- } while (high1 != high2);
- return (((uint64_t)high2) << 32) | (uint64_t)low;
+ hi = nv_rd32(dev, NV04_PTIMER_TIME_1);
+ lo = nv_rd32(dev, NV04_PTIMER_TIME_0);
+ } while (hi != nv_rd32(dev, NV04_PTIMER_TIME_1));
+
+ return ((u64)hi << 32 | lo);
}
void
diff --git a/drivers/gpu/drm/nouveau/nv40_mpeg.c b/drivers/gpu/drm/nouveau/nv31_mpeg.c
index ad03a0e1fc7..6f06a0713f0 100644
--- a/drivers/gpu/drm/nouveau/nv40_mpeg.c
+++ b/drivers/gpu/drm/nouveau/nv31_mpeg.c
@@ -26,10 +26,32 @@
#include "nouveau_drv.h"
#include "nouveau_ramht.h"
-struct nv40_mpeg_engine {
+struct nv31_mpeg_engine {
struct nouveau_exec_engine base;
+ atomic_t refcount;
};
+
+static int
+nv31_mpeg_context_new(struct nouveau_channel *chan, int engine)
+{
+ struct nv31_mpeg_engine *pmpeg = nv_engine(chan->dev, engine);
+
+ if (!atomic_add_unless(&pmpeg->refcount, 1, 1))
+ return -EBUSY;
+
+ chan->engctx[engine] = (void *)0xdeadcafe;
+ return 0;
+}
+
+static void
+nv31_mpeg_context_del(struct nouveau_channel *chan, int engine)
+{
+ struct nv31_mpeg_engine *pmpeg = nv_engine(chan->dev, engine);
+ atomic_dec(&pmpeg->refcount);
+ chan->engctx[engine] = NULL;
+}
+
static int
nv40_mpeg_context_new(struct nouveau_channel *chan, int engine)
{
@@ -81,7 +103,7 @@ nv40_mpeg_context_del(struct nouveau_channel *chan, int engine)
}
static int
-nv40_mpeg_object_new(struct nouveau_channel *chan, int engine,
+nv31_mpeg_object_new(struct nouveau_channel *chan, int engine,
u32 handle, u16 class)
{
struct drm_device *dev = chan->dev;
@@ -103,10 +125,10 @@ nv40_mpeg_object_new(struct nouveau_channel *chan, int engine,
}
static int
-nv40_mpeg_init(struct drm_device *dev, int engine)
+nv31_mpeg_init(struct drm_device *dev, int engine)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nv40_mpeg_engine *pmpeg = nv_engine(dev, engine);
+ struct nv31_mpeg_engine *pmpeg = nv_engine(dev, engine);
int i;
/* VPE init */
@@ -121,7 +143,7 @@ nv40_mpeg_init(struct drm_device *dev, int engine)
/* PMPEG init */
nv_wr32(dev, 0x00b32c, 0x00000000);
nv_wr32(dev, 0x00b314, 0x00000100);
- nv_wr32(dev, 0x00b220, 0x00000044);
+ nv_wr32(dev, 0x00b220, nv44_graph_class(dev) ? 0x00000044 : 0x00000031);
nv_wr32(dev, 0x00b300, 0x02001ec1);
nv_mask(dev, 0x00b32c, 0x00000001, 0x00000001);
@@ -137,7 +159,7 @@ nv40_mpeg_init(struct drm_device *dev, int engine)
}
static int
-nv40_mpeg_fini(struct drm_device *dev, int engine, bool suspend)
+nv31_mpeg_fini(struct drm_device *dev, int engine, bool suspend)
{
/*XXX: context save? */
nv_mask(dev, 0x00b32c, 0x00000001, 0x00000000);
@@ -146,7 +168,7 @@ nv40_mpeg_fini(struct drm_device *dev, int engine, bool suspend)
}
static int
-nv40_mpeg_mthd_dma(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
+nv31_mpeg_mthd_dma(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
{
struct drm_device *dev = chan->dev;
u32 inst = data << 4;
@@ -184,13 +206,17 @@ nv40_mpeg_mthd_dma(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
}
static int
-nv40_mpeg_isr_chid(struct drm_device *dev, u32 inst)
+nv31_mpeg_isr_chid(struct drm_device *dev, u32 inst)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *ctx;
unsigned long flags;
int i;
+ /* hardcode drm channel id on nv3x, so swmthd lookup works */
+ if (dev_priv->card_type < NV_40)
+ return 0;
+
spin_lock_irqsave(&dev_priv->channels.lock, flags);
for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
if (!dev_priv->channels.ptr[i])
@@ -205,7 +231,7 @@ nv40_mpeg_isr_chid(struct drm_device *dev, u32 inst)
}
static void
-nv40_vpe_set_tile_region(struct drm_device *dev, int i)
+nv31_vpe_set_tile_region(struct drm_device *dev, int i)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
@@ -216,10 +242,10 @@ nv40_vpe_set_tile_region(struct drm_device *dev, int i)
}
static void
-nv40_mpeg_isr(struct drm_device *dev)
+nv31_mpeg_isr(struct drm_device *dev)
{
u32 inst = (nv_rd32(dev, 0x00b318) & 0x000fffff) << 4;
- u32 chid = nv40_mpeg_isr_chid(dev, inst);
+ u32 chid = nv31_mpeg_isr_chid(dev, inst);
u32 stat = nv_rd32(dev, 0x00b100);
u32 type = nv_rd32(dev, 0x00b230);
u32 mthd = nv_rd32(dev, 0x00b234);
@@ -249,10 +275,10 @@ nv40_mpeg_isr(struct drm_device *dev)
}
static void
-nv40_vpe_isr(struct drm_device *dev)
+nv31_vpe_isr(struct drm_device *dev)
{
if (nv_rd32(dev, 0x00b100))
- nv40_mpeg_isr(dev);
+ nv31_mpeg_isr(dev);
if (nv_rd32(dev, 0x00b800)) {
u32 stat = nv_rd32(dev, 0x00b800);
@@ -262,9 +288,9 @@ nv40_vpe_isr(struct drm_device *dev)
}
static void
-nv40_mpeg_destroy(struct drm_device *dev, int engine)
+nv31_mpeg_destroy(struct drm_device *dev, int engine)
{
- struct nv40_mpeg_engine *pmpeg = nv_engine(dev, engine);
+ struct nv31_mpeg_engine *pmpeg = nv_engine(dev, engine);
nouveau_irq_unregister(dev, 0);
@@ -273,34 +299,41 @@ nv40_mpeg_destroy(struct drm_device *dev, int engine)
}
int
-nv40_mpeg_create(struct drm_device *dev)
+nv31_mpeg_create(struct drm_device *dev)
{
- struct nv40_mpeg_engine *pmpeg;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv31_mpeg_engine *pmpeg;
pmpeg = kzalloc(sizeof(*pmpeg), GFP_KERNEL);
if (!pmpeg)
return -ENOMEM;
-
- pmpeg->base.destroy = nv40_mpeg_destroy;
- pmpeg->base.init = nv40_mpeg_init;
- pmpeg->base.fini = nv40_mpeg_fini;
- pmpeg->base.context_new = nv40_mpeg_context_new;
- pmpeg->base.context_del = nv40_mpeg_context_del;
- pmpeg->base.object_new = nv40_mpeg_object_new;
+ atomic_set(&pmpeg->refcount, 0);
+
+ pmpeg->base.destroy = nv31_mpeg_destroy;
+ pmpeg->base.init = nv31_mpeg_init;
+ pmpeg->base.fini = nv31_mpeg_fini;
+ if (dev_priv->card_type < NV_40) {
+ pmpeg->base.context_new = nv31_mpeg_context_new;
+ pmpeg->base.context_del = nv31_mpeg_context_del;
+ } else {
+ pmpeg->base.context_new = nv40_mpeg_context_new;
+ pmpeg->base.context_del = nv40_mpeg_context_del;
+ }
+ pmpeg->base.object_new = nv31_mpeg_object_new;
/* ISR vector, PMC_ENABLE bit, and TILE regs are shared between
* all VPE engines, for this driver's purposes the PMPEG engine
* will be treated as the "master" and handle the global VPE
* bits too
*/
- pmpeg->base.set_tile_region = nv40_vpe_set_tile_region;
- nouveau_irq_register(dev, 0, nv40_vpe_isr);
+ pmpeg->base.set_tile_region = nv31_vpe_set_tile_region;
+ nouveau_irq_register(dev, 0, nv31_vpe_isr);
NVOBJ_ENGINE_ADD(dev, MPEG, &pmpeg->base);
NVOBJ_CLASS(dev, 0x3174, MPEG);
- NVOBJ_MTHD (dev, 0x3174, 0x0190, nv40_mpeg_mthd_dma);
- NVOBJ_MTHD (dev, 0x3174, 0x01a0, nv40_mpeg_mthd_dma);
- NVOBJ_MTHD (dev, 0x3174, 0x01b0, nv40_mpeg_mthd_dma);
+ NVOBJ_MTHD (dev, 0x3174, 0x0190, nv31_mpeg_mthd_dma);
+ NVOBJ_MTHD (dev, 0x3174, 0x01a0, nv31_mpeg_mthd_dma);
+ NVOBJ_MTHD (dev, 0x3174, 0x01b0, nv31_mpeg_mthd_dma);
#if 0
NVOBJ_ENGINE_ADD(dev, ME, &pme->base);
diff --git a/drivers/gpu/drm/nouveau/nv40_pm.c b/drivers/gpu/drm/nouveau/nv40_pm.c
new file mode 100644
index 00000000000..e676b0d5347
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv40_pm.c
@@ -0,0 +1,348 @@
+/*
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_bios.h"
+#include "nouveau_pm.h"
+#include "nouveau_hw.h"
+
+#define min2(a,b) ((a) < (b) ? (a) : (b))
+
+static u32
+read_pll_1(struct drm_device *dev, u32 reg)
+{
+ u32 ctrl = nv_rd32(dev, reg + 0x00);
+ int P = (ctrl & 0x00070000) >> 16;
+ int N = (ctrl & 0x0000ff00) >> 8;
+ int M = (ctrl & 0x000000ff) >> 0;
+ u32 ref = 27000, clk = 0;
+
+ if (ctrl & 0x80000000)
+ clk = ref * N / M;
+
+ return clk >> P;
+}
+
+static u32
+read_pll_2(struct drm_device *dev, u32 reg)
+{
+ u32 ctrl = nv_rd32(dev, reg + 0x00);
+ u32 coef = nv_rd32(dev, reg + 0x04);
+ int N2 = (coef & 0xff000000) >> 24;
+ int M2 = (coef & 0x00ff0000) >> 16;
+ int N1 = (coef & 0x0000ff00) >> 8;
+ int M1 = (coef & 0x000000ff) >> 0;
+ int P = (ctrl & 0x00070000) >> 16;
+ u32 ref = 27000, clk = 0;
+
+ if ((ctrl & 0x80000000) && M1) {
+ clk = ref * N1 / M1;
+ if ((ctrl & 0x40000100) == 0x40000000) {
+ if (M2)
+ clk = clk * N2 / M2;
+ else
+ clk = 0;
+ }
+ }
+
+ return clk >> P;
+}
+
+static u32
+read_clk(struct drm_device *dev, u32 src)
+{
+ switch (src) {
+ case 3:
+ return read_pll_2(dev, 0x004000);
+ case 2:
+ return read_pll_1(dev, 0x004008);
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+int
+nv40_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
+{
+ u32 ctrl = nv_rd32(dev, 0x00c040);
+
+ perflvl->core = read_clk(dev, (ctrl & 0x00000003) >> 0);
+ perflvl->shader = read_clk(dev, (ctrl & 0x00000030) >> 4);
+ perflvl->memory = read_pll_2(dev, 0x4020);
+ return 0;
+}
+
+struct nv40_pm_state {
+ u32 ctrl;
+ u32 npll_ctrl;
+ u32 npll_coef;
+ u32 spll;
+ u32 mpll_ctrl;
+ u32 mpll_coef;
+};
+
+static int
+nv40_calc_pll(struct drm_device *dev, u32 reg, struct pll_lims *pll,
+ u32 clk, int *N1, int *M1, int *N2, int *M2, int *log2P)
+{
+ struct nouveau_pll_vals coef;
+ int ret;
+
+ ret = get_pll_limits(dev, reg, pll);
+ if (ret)
+ return ret;
+
+ if (clk < pll->vco1.maxfreq)
+ pll->vco2.maxfreq = 0;
+
+ ret = nouveau_calc_pll_mnp(dev, pll, clk, &coef);
+ if (ret == 0)
+ return -ERANGE;
+
+ *N1 = coef.N1;
+ *M1 = coef.M1;
+ if (N2 && M2) {
+ if (pll->vco2.maxfreq) {
+ *N2 = coef.N2;
+ *M2 = coef.M2;
+ } else {
+ *N2 = 1;
+ *M2 = 1;
+ }
+ }
+ *log2P = coef.log2P;
+ return 0;
+}
+
+void *
+nv40_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
+{
+ struct nv40_pm_state *info;
+ struct pll_lims pll;
+ int N1, N2, M1, M2, log2P;
+ int ret;
+
+ info = kmalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return ERR_PTR(-ENOMEM);
+
+ /* core/geometric clock */
+ ret = nv40_calc_pll(dev, 0x004000, &pll, perflvl->core,
+ &N1, &M1, &N2, &M2, &log2P);
+ if (ret < 0)
+ goto out;
+
+ if (N2 == M2) {
+ info->npll_ctrl = 0x80000100 | (log2P << 16);
+ info->npll_coef = (N1 << 8) | M1;
+ } else {
+ info->npll_ctrl = 0xc0000000 | (log2P << 16);
+ info->npll_coef = (N2 << 24) | (M2 << 16) | (N1 << 8) | M1;
+ }
+
+ /* use the second PLL for shader/rop clock, if it differs from core */
+ if (perflvl->shader && perflvl->shader != perflvl->core) {
+ ret = nv40_calc_pll(dev, 0x004008, &pll, perflvl->shader,
+ &N1, &M1, NULL, NULL, &log2P);
+ if (ret < 0)
+ goto out;
+
+ info->spll = 0xc0000000 | (log2P << 16) | (N1 << 8) | M1;
+ info->ctrl = 0x00000223;
+ } else {
+ info->spll = 0x00000000;
+ info->ctrl = 0x00000333;
+ }
+
+ /* memory clock */
+ if (!perflvl->memory) {
+ info->mpll_ctrl = 0x00000000;
+ goto out;
+ }
+
+ ret = nv40_calc_pll(dev, 0x004020, &pll, perflvl->memory,
+ &N1, &M1, &N2, &M2, &log2P);
+ if (ret < 0)
+ goto out;
+
+ info->mpll_ctrl = 0x80000000 | (log2P << 16);
+ info->mpll_ctrl |= min2(pll.log2p_bias + log2P, pll.max_log2p) << 20;
+ if (N2 == M2) {
+ info->mpll_ctrl |= 0x00000100;
+ info->mpll_coef = (N1 << 8) | M1;
+ } else {
+ info->mpll_ctrl |= 0x40000000;
+ info->mpll_coef = (N2 << 24) | (M2 << 16) | (N1 << 8) | M1;
+ }
+
+out:
+ if (ret < 0) {
+ kfree(info);
+ info = ERR_PTR(ret);
+ }
+ return info;
+}
+
+static bool
+nv40_pm_gr_idle(void *data)
+{
+ struct drm_device *dev = data;
+
+ if ((nv_rd32(dev, 0x400760) & 0x000000f0) >> 4 !=
+ (nv_rd32(dev, 0x400760) & 0x0000000f))
+ return false;
+
+ if (nv_rd32(dev, 0x400700))
+ return false;
+
+ return true;
+}
+
+void
+nv40_pm_clocks_set(struct drm_device *dev, void *pre_state)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv40_pm_state *info = pre_state;
+ unsigned long flags;
+ struct bit_entry M;
+ u32 crtc_mask = 0;
+ u8 sr1[2];
+ int i;
+
+ /* determine which CRTCs are active, fetch VGA_SR1 for each */
+ for (i = 0; i < 2; i++) {
+ u32 vbl = nv_rd32(dev, 0x600808 + (i * 0x2000));
+ u32 cnt = 0;
+ do {
+ if (vbl != nv_rd32(dev, 0x600808 + (i * 0x2000))) {
+ nv_wr08(dev, 0x0c03c4 + (i * 0x2000), 0x01);
+ sr1[i] = nv_rd08(dev, 0x0c03c5 + (i * 0x2000));
+ if (!(sr1[i] & 0x20))
+ crtc_mask |= (1 << i);
+ break;
+ }
+ udelay(1);
+ } while (cnt++ < 32);
+ }
+
+ /* halt and idle engines */
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+ nv_mask(dev, 0x002500, 0x00000001, 0x00000000);
+ if (!nv_wait(dev, 0x002500, 0x00000010, 0x00000000))
+ goto resume;
+ nv_mask(dev, 0x003220, 0x00000001, 0x00000000);
+ if (!nv_wait(dev, 0x003220, 0x00000010, 0x00000000))
+ goto resume;
+ nv_mask(dev, 0x003200, 0x00000001, 0x00000000);
+ nv04_fifo_cache_pull(dev, false);
+
+ if (!nv_wait_cb(dev, nv40_pm_gr_idle, dev))
+ goto resume;
+
+ /* set engine clocks */
+ nv_mask(dev, 0x00c040, 0x00000333, 0x00000000);
+ nv_wr32(dev, 0x004004, info->npll_coef);
+ nv_mask(dev, 0x004000, 0xc0070100, info->npll_ctrl);
+ nv_mask(dev, 0x004008, 0xc007ffff, info->spll);
+ mdelay(5);
+ nv_mask(dev, 0x00c040, 0x00000333, info->ctrl);
+
+ if (!info->mpll_ctrl)
+ goto resume;
+
+ /* wait for vblank start on active crtcs, disable memory access */
+ for (i = 0; i < 2; i++) {
+ if (!(crtc_mask & (1 << i)))
+ continue;
+ nv_wait(dev, 0x600808 + (i * 0x2000), 0x00010000, 0x00000000);
+ nv_wait(dev, 0x600808 + (i * 0x2000), 0x00010000, 0x00010000);
+ nv_wr08(dev, 0x0c03c4 + (i * 0x2000), 0x01);
+ nv_wr08(dev, 0x0c03c5 + (i * 0x2000), sr1[i] | 0x20);
+ }
+
+ /* prepare ram for reclocking */
+ nv_wr32(dev, 0x1002d4, 0x00000001); /* precharge */
+ nv_wr32(dev, 0x1002d0, 0x00000001); /* refresh */
+ nv_wr32(dev, 0x1002d0, 0x00000001); /* refresh */
+ nv_mask(dev, 0x100210, 0x80000000, 0x00000000); /* no auto refresh */
+ nv_wr32(dev, 0x1002dc, 0x00000001); /* enable self-refresh */
+
+ /* change the PLL of each memory partition */
+ nv_mask(dev, 0x00c040, 0x0000c000, 0x00000000);
+ switch (dev_priv->chipset) {
+ case 0x40:
+ case 0x45:
+ case 0x41:
+ case 0x42:
+ case 0x47:
+ nv_mask(dev, 0x004044, 0xc0771100, info->mpll_ctrl);
+ nv_mask(dev, 0x00402c, 0xc0771100, info->mpll_ctrl);
+ nv_wr32(dev, 0x004048, info->mpll_coef);
+ nv_wr32(dev, 0x004030, info->mpll_coef);
+ case 0x43:
+ case 0x49:
+ case 0x4b:
+ nv_mask(dev, 0x004038, 0xc0771100, info->mpll_ctrl);
+ nv_wr32(dev, 0x00403c, info->mpll_coef);
+ default:
+ nv_mask(dev, 0x004020, 0xc0771100, info->mpll_ctrl);
+ nv_wr32(dev, 0x004024, info->mpll_coef);
+ break;
+ }
+ udelay(100);
+ nv_mask(dev, 0x00c040, 0x0000c000, 0x0000c000);
+
+ /* re-enable normal operation of memory controller */
+ nv_wr32(dev, 0x1002dc, 0x00000000);
+ nv_mask(dev, 0x100210, 0x80000000, 0x80000000);
+ udelay(100);
+
+ /* execute memory reset script from vbios */
+ if (!bit_table(dev, 'M', &M))
+ nouveau_bios_init_exec(dev, ROM16(M.data[0]));
+
+ /* make sure we're in vblank (hopefully the same one as before), and
+ * then re-enable crtc memory access
+ */
+ for (i = 0; i < 2; i++) {
+ if (!(crtc_mask & (1 << i)))
+ continue;
+ nv_wait(dev, 0x600808 + (i * 0x2000), 0x00010000, 0x00010000);
+ nv_wr08(dev, 0x0c03c4 + (i * 0x2000), 0x01);
+ nv_wr08(dev, 0x0c03c5 + (i * 0x2000), sr1[i]);
+ }
+
+ /* resume engines */
+resume:
+ nv_wr32(dev, 0x003250, 0x00000001);
+ nv_mask(dev, 0x003220, 0x00000001, 0x00000001);
+ nv_wr32(dev, 0x003200, 0x00000001);
+ nv_wr32(dev, 0x002500, 0x00000001);
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+
+ kfree(info);
+}
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
index 5d989073ba6..882080e0b4f 100644
--- a/drivers/gpu/drm/nouveau/nv50_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv50_crtc.c
@@ -329,8 +329,6 @@ nv50_crtc_destroy(struct drm_crtc *crtc)
drm_crtc_cleanup(&nv_crtc->base);
- nv50_cursor_fini(nv_crtc);
-
nouveau_bo_unmap(nv_crtc->lut.nvbo);
nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
nouveau_bo_unmap(nv_crtc->cursor.nvbo);
diff --git a/drivers/gpu/drm/nouveau/nv50_cursor.c b/drivers/gpu/drm/nouveau/nv50_cursor.c
index 9752c35bb84..adfc9b607a5 100644
--- a/drivers/gpu/drm/nouveau/nv50_cursor.c
+++ b/drivers/gpu/drm/nouveau/nv50_cursor.c
@@ -137,21 +137,3 @@ nv50_cursor_init(struct nouveau_crtc *nv_crtc)
nv_crtc->cursor.show = nv50_cursor_show;
return 0;
}
-
-void
-nv50_cursor_fini(struct nouveau_crtc *nv_crtc)
-{
- struct drm_device *dev = nv_crtc->base.dev;
- int idx = nv_crtc->index;
-
- NV_DEBUG_KMS(dev, "\n");
-
- nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(idx), 0);
- if (!nv_wait(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(idx),
- NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS, 0)) {
- NV_ERROR(dev, "timeout: CURSOR_CTRL2_STATUS == 0\n");
- NV_ERROR(dev, "CURSOR_CTRL2 = 0x%08x\n",
- nv_rd32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(idx)));
- }
-}
-
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index db1a5f4b711..d23ca00e7d6 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -247,6 +247,16 @@ static int nv50_display_disable(struct drm_device *dev)
}
}
+ for (i = 0; i < 2; i++) {
+ nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), 0);
+ if (!nv_wait(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
+ NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS, 0)) {
+ NV_ERROR(dev, "timeout: CURSOR_CTRL2_STATUS == 0\n");
+ NV_ERROR(dev, "CURSOR_CTRL2 = 0x%08x\n",
+ nv_rd32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i)));
+ }
+ }
+
nv50_evo_fini(dev);
for (i = 0; i < 3; i++) {
@@ -286,23 +296,6 @@ int nv50_display_create(struct drm_device *dev)
return -ENOMEM;
dev_priv->engine.display.priv = priv;
- /* init basic kernel modesetting */
- drm_mode_config_init(dev);
-
- /* Initialise some optional connector properties. */
- drm_mode_create_scaling_mode_property(dev);
- drm_mode_create_dithering_property(dev);
-
- dev->mode_config.min_width = 0;
- dev->mode_config.min_height = 0;
-
- dev->mode_config.funcs = (void *)&nouveau_mode_config_funcs;
-
- dev->mode_config.max_width = 8192;
- dev->mode_config.max_height = 8192;
-
- dev->mode_config.fb_base = dev_priv->fb_phys;
-
/* Create CRTC objects */
for (i = 0; i < 2; i++)
nv50_crtc_create(dev, i);
@@ -364,8 +357,6 @@ nv50_display_destroy(struct drm_device *dev)
NV_DEBUG_KMS(dev, "\n");
- drm_mode_config_cleanup(dev);
-
nv50_display_disable(dev);
nouveau_irq_unregister(dev, 26);
kfree(disp);
@@ -698,7 +689,7 @@ nv50_display_unk10_handler(struct drm_device *dev)
struct dcb_entry *dcb = &dev_priv->vbios.dcb.entry[i];
if (dcb->type == type && (dcb->or & (1 << or))) {
- nouveau_bios_run_display_table(dev, dcb, 0, -1);
+ nouveau_bios_run_display_table(dev, 0, -1, dcb, -1);
disp->irq.dcb = dcb;
goto ack;
}
@@ -711,37 +702,6 @@ ack:
}
static void
-nv50_display_unk20_dp_hack(struct drm_device *dev, struct dcb_entry *dcb)
-{
- int or = ffs(dcb->or) - 1, link = !(dcb->dpconf.sor.link & 1);
- struct drm_encoder *encoder;
- uint32_t tmp, unk0 = 0, unk1 = 0;
-
- if (dcb->type != OUTPUT_DP)
- return;
-
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-
- if (nv_encoder->dcb == dcb) {
- unk0 = nv_encoder->dp.unk0;
- unk1 = nv_encoder->dp.unk1;
- break;
- }
- }
-
- if (unk0 || unk1) {
- tmp = nv_rd32(dev, NV50_SOR_DP_CTRL(or, link));
- tmp &= 0xfffffe03;
- nv_wr32(dev, NV50_SOR_DP_CTRL(or, link), tmp | unk0);
-
- tmp = nv_rd32(dev, NV50_SOR_DP_UNK128(or, link));
- tmp &= 0xfef080c0;
- nv_wr32(dev, NV50_SOR_DP_UNK128(or, link), tmp | unk1);
- }
-}
-
-static void
nv50_display_unk20_handler(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -753,7 +713,7 @@ nv50_display_unk20_handler(struct drm_device *dev)
NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30);
dcb = disp->irq.dcb;
if (dcb) {
- nouveau_bios_run_display_table(dev, dcb, 0, -2);
+ nouveau_bios_run_display_table(dev, 0, -2, dcb, -1);
disp->irq.dcb = NULL;
}
@@ -837,9 +797,15 @@ nv50_display_unk20_handler(struct drm_device *dev)
}
script = nv50_display_script_select(dev, dcb, mc, pclk);
- nouveau_bios_run_display_table(dev, dcb, script, pclk);
+ nouveau_bios_run_display_table(dev, script, pclk, dcb, -1);
- nv50_display_unk20_dp_hack(dev, dcb);
+ if (type == OUTPUT_DP) {
+ int link = !(dcb->dpconf.sor.link & 1);
+ if ((mc & 0x000f0000) == 0x00020000)
+ nouveau_dp_tu_update(dev, or, link, pclk, 18);
+ else
+ nouveau_dp_tu_update(dev, or, link, pclk, 24);
+ }
if (dcb->type != OUTPUT_ANALOG) {
tmp = nv_rd32(dev, NV50_PDISPLAY_SOR_CLK_CTRL2(or));
@@ -904,7 +870,7 @@ nv50_display_unk40_handler(struct drm_device *dev)
if (!dcb)
goto ack;
- nouveau_bios_run_display_table(dev, dcb, script, -pclk);
+ nouveau_bios_run_display_table(dev, script, -pclk, dcb, -1);
nv50_display_unk40_dp_set_tmds(dev, dcb);
ack:
diff --git a/drivers/gpu/drm/nouveau/nv50_gpio.c b/drivers/gpu/drm/nouveau/nv50_gpio.c
index d4f4206dad7..793a5ccca12 100644
--- a/drivers/gpu/drm/nouveau/nv50_gpio.c
+++ b/drivers/gpu/drm/nouveau/nv50_gpio.c
@@ -98,6 +98,37 @@ nv50_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state)
}
int
+nvd0_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag)
+{
+ struct dcb_gpio_entry *gpio;
+ u32 v;
+
+ gpio = nouveau_bios_gpio_entry(dev, tag);
+ if (!gpio)
+ return -ENOENT;
+
+ v = nv_rd32(dev, 0x00d610 + (gpio->line * 4));
+ v &= 0x00004000;
+ return (!!v == (gpio->state[1] & 1));
+}
+
+int
+nvd0_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state)
+{
+ struct dcb_gpio_entry *gpio;
+ u32 v;
+
+ gpio = nouveau_bios_gpio_entry(dev, tag);
+ if (!gpio)
+ return -ENOENT;
+
+ v = gpio->state[state] ^ 2;
+
+ nv_mask(dev, 0x00d610 + (gpio->line * 4), 0x00003000, v << 12);
+ return 0;
+}
+
+int
nv50_gpio_irq_register(struct drm_device *dev, enum dcb_gpio_tag tag,
void (*handler)(void *, int), void *data)
{
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
index d43c46caa76..ac601f7c4e1 100644
--- a/drivers/gpu/drm/nouveau/nv50_graph.c
+++ b/drivers/gpu/drm/nouveau/nv50_graph.c
@@ -120,70 +120,62 @@ nv50_graph_unload_context(struct drm_device *dev)
return 0;
}
-static void
-nv50_graph_init_reset(struct drm_device *dev)
-{
- uint32_t pmc_e = NV_PMC_ENABLE_PGRAPH | (1 << 21);
- NV_DEBUG(dev, "\n");
-
- nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & ~pmc_e);
- nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) | pmc_e);
-}
-
-static void
-nv50_graph_init_intr(struct drm_device *dev)
-{
- NV_DEBUG(dev, "\n");
-
- nv_wr32(dev, NV03_PGRAPH_INTR, 0xffffffff);
- nv_wr32(dev, 0x400138, 0xffffffff);
- nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xffffffff);
-}
-
-static void
-nv50_graph_init_regs__nv(struct drm_device *dev)
+static int
+nv50_graph_init(struct drm_device *dev, int engine)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- uint32_t units = nv_rd32(dev, 0x1540);
+ struct nv50_graph_engine *pgraph = nv_engine(dev, engine);
+ u32 units = nv_rd32(dev, 0x001540);
int i;
NV_DEBUG(dev, "\n");
+ /* master reset */
+ nv_mask(dev, 0x000200, 0x00201000, 0x00000000);
+ nv_mask(dev, 0x000200, 0x00201000, 0x00201000);
+ nv_wr32(dev, 0x40008c, 0x00000004); /* HW_CTX_SWITCH_ENABLED */
+
+ /* reset/enable traps and interrupts */
nv_wr32(dev, 0x400804, 0xc0000000);
nv_wr32(dev, 0x406800, 0xc0000000);
nv_wr32(dev, 0x400c04, 0xc0000000);
nv_wr32(dev, 0x401800, 0xc0000000);
nv_wr32(dev, 0x405018, 0xc0000000);
nv_wr32(dev, 0x402000, 0xc0000000);
-
for (i = 0; i < 16; i++) {
- if (units & 1 << i) {
- if (dev_priv->chipset < 0xa0) {
- nv_wr32(dev, 0x408900 + (i << 12), 0xc0000000);
- nv_wr32(dev, 0x408e08 + (i << 12), 0xc0000000);
- nv_wr32(dev, 0x408314 + (i << 12), 0xc0000000);
- } else {
- nv_wr32(dev, 0x408600 + (i << 11), 0xc0000000);
- nv_wr32(dev, 0x408708 + (i << 11), 0xc0000000);
- nv_wr32(dev, 0x40831c + (i << 11), 0xc0000000);
- }
+ if (!(units & (1 << i)))
+ continue;
+
+ if (dev_priv->chipset < 0xa0) {
+ nv_wr32(dev, 0x408900 + (i << 12), 0xc0000000);
+ nv_wr32(dev, 0x408e08 + (i << 12), 0xc0000000);
+ nv_wr32(dev, 0x408314 + (i << 12), 0xc0000000);
+ } else {
+ nv_wr32(dev, 0x408600 + (i << 11), 0xc0000000);
+ nv_wr32(dev, 0x408708 + (i << 11), 0xc0000000);
+ nv_wr32(dev, 0x40831c + (i << 11), 0xc0000000);
}
}
nv_wr32(dev, 0x400108, 0xffffffff);
-
- nv_wr32(dev, 0x400824, 0x00004000);
+ nv_wr32(dev, 0x400138, 0xffffffff);
+ nv_wr32(dev, 0x400100, 0xffffffff);
+ nv_wr32(dev, 0x40013c, 0xffffffff);
nv_wr32(dev, 0x400500, 0x00010001);
-}
-
-static void
-nv50_graph_init_zcull(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- int i;
-
- NV_DEBUG(dev, "\n");
+ /* upload context program, initialise ctxctl defaults */
+ nv_wr32(dev, 0x400324, 0x00000000);
+ for (i = 0; i < pgraph->ctxprog_size; i++)
+ nv_wr32(dev, 0x400328, pgraph->ctxprog[i]);
+ nv_wr32(dev, 0x400824, 0x00000000);
+ nv_wr32(dev, 0x400828, 0x00000000);
+ nv_wr32(dev, 0x40082c, 0x00000000);
+ nv_wr32(dev, 0x400830, 0x00000000);
+ nv_wr32(dev, 0x400724, 0x00000000);
+ nv_wr32(dev, 0x40032c, 0x00000000);
+ nv_wr32(dev, 0x400320, 4); /* CTXCTL_CMD = NEWCTXDMA */
+
+ /* some unknown zcull magic */
switch (dev_priv->chipset & 0xf0) {
case 0x50:
case 0x80:
@@ -212,43 +204,7 @@ nv50_graph_init_zcull(struct drm_device *dev)
nv_wr32(dev, 0x402c28 + (i * 8), 0x00000000);
nv_wr32(dev, 0x402c2c + (i * 8), 0x00000000);
}
-}
-
-static int
-nv50_graph_init_ctxctl(struct drm_device *dev)
-{
- struct nv50_graph_engine *pgraph = nv_engine(dev, NVOBJ_ENGINE_GR);
- int i;
-
- NV_DEBUG(dev, "\n");
-
- nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
- for (i = 0; i < pgraph->ctxprog_size; i++)
- nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, pgraph->ctxprog[i]);
-
- nv_wr32(dev, 0x40008c, 0x00000004); /* HW_CTX_SWITCH_ENABLED */
- nv_wr32(dev, 0x400320, 4);
- nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0);
- nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, 0);
- return 0;
-}
-
-static int
-nv50_graph_init(struct drm_device *dev, int engine)
-{
- int ret;
-
- NV_DEBUG(dev, "\n");
-
- nv50_graph_init_reset(dev);
- nv50_graph_init_regs__nv(dev);
- nv50_graph_init_zcull(dev);
-
- ret = nv50_graph_init_ctxctl(dev);
- if (ret)
- return ret;
- nv50_graph_init_intr(dev);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv50_grctx.c b/drivers/gpu/drm/nouveau/nv50_grctx.c
index de9abff12b9..4b46d696856 100644
--- a/drivers/gpu/drm/nouveau/nv50_grctx.c
+++ b/drivers/gpu/drm/nouveau/nv50_grctx.c
@@ -40,6 +40,12 @@
#define CP_FLAG_UNK0B ((0 * 32) + 0xb)
#define CP_FLAG_UNK0B_CLEAR 0
#define CP_FLAG_UNK0B_SET 1
+#define CP_FLAG_XFER_SWITCH ((0 * 32) + 0xe)
+#define CP_FLAG_XFER_SWITCH_DISABLE 0
+#define CP_FLAG_XFER_SWITCH_ENABLE 1
+#define CP_FLAG_STATE ((0 * 32) + 0x1c)
+#define CP_FLAG_STATE_STOPPED 0
+#define CP_FLAG_STATE_RUNNING 1
#define CP_FLAG_UNK1D ((0 * 32) + 0x1d)
#define CP_FLAG_UNK1D_CLEAR 0
#define CP_FLAG_UNK1D_SET 1
@@ -194,6 +200,9 @@ nv50_grctx_init(struct nouveau_grctx *ctx)
"the devs.\n");
return -ENOSYS;
}
+
+ cp_set (ctx, STATE, RUNNING);
+ cp_set (ctx, XFER_SWITCH, ENABLE);
/* decide whether we're loading/unloading the context */
cp_bra (ctx, AUTO_SAVE, PENDING, cp_setup_save);
cp_bra (ctx, USER_SAVE, PENDING, cp_setup_save);
@@ -260,6 +269,8 @@ nv50_grctx_init(struct nouveau_grctx *ctx)
cp_name(ctx, cp_exit);
cp_set (ctx, USER_SAVE, NOT_PENDING);
cp_set (ctx, USER_LOAD, NOT_PENDING);
+ cp_set (ctx, XFER_SWITCH, DISABLE);
+ cp_set (ctx, STATE, STOPPED);
cp_out (ctx, CP_END);
ctx->ctxvals_pos += 0x400; /* padding... no idea why you need it */
@@ -590,7 +601,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
gr_def(ctx, offset + 0x1c, 0x00880000);
break;
case 0x86:
- gr_def(ctx, offset + 0x1c, 0x008c0000);
+ gr_def(ctx, offset + 0x1c, 0x018c0000);
break;
case 0x92:
case 0x96:
diff --git a/drivers/gpu/drm/nouveau/nv50_pm.c b/drivers/gpu/drm/nouveau/nv50_pm.c
index 8a2810011bd..3d5a86b9828 100644
--- a/drivers/gpu/drm/nouveau/nv50_pm.c
+++ b/drivers/gpu/drm/nouveau/nv50_pm.c
@@ -115,15 +115,15 @@ nv50_pm_clock_set(struct drm_device *dev, void *pre_state)
BIT_M.version == 1 && BIT_M.length >= 0x0b) {
script = ROM16(BIT_M.data[0x05]);
if (script)
- nouveau_bios_run_init_table(dev, script, NULL);
+ nouveau_bios_run_init_table(dev, script, NULL, -1);
script = ROM16(BIT_M.data[0x07]);
if (script)
- nouveau_bios_run_init_table(dev, script, NULL);
+ nouveau_bios_run_init_table(dev, script, NULL, -1);
script = ROM16(BIT_M.data[0x09]);
if (script)
- nouveau_bios_run_init_table(dev, script, NULL);
+ nouveau_bios_run_init_table(dev, script, NULL, -1);
- nouveau_bios_run_init_table(dev, perflvl->memscript, NULL);
+ nouveau_bios_run_init_table(dev, perflvl->memscript, NULL, -1);
}
if (state->type == PLL_MEMORY) {
diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
index ffe8b483b7b..2633aa8554e 100644
--- a/drivers/gpu/drm/nouveau/nv50_sor.c
+++ b/drivers/gpu/drm/nouveau/nv50_sor.c
@@ -124,7 +124,7 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode)
if (mode == DRM_MODE_DPMS_ON) {
u8 status = DP_SET_POWER_D0;
nouveau_dp_auxch(auxch, 8, DP_SET_POWER, &status, 1);
- nouveau_dp_link_train(encoder);
+ nouveau_dp_link_train(encoder, nv_encoder->dp.datarate);
} else {
u8 status = DP_SET_POWER_D3;
nouveau_dp_auxch(auxch, 8, DP_SET_POWER, &status, 1);
@@ -187,14 +187,13 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct drm_device *dev = encoder->dev;
struct nouveau_crtc *crtc = nouveau_crtc(encoder->crtc);
+ struct nouveau_connector *nv_connector;
uint32_t mode_ctl = 0;
int ret;
NV_DEBUG_KMS(dev, "or %d type %d -> crtc %d\n",
nv_encoder->or, nv_encoder->dcb->type, crtc->index);
- nv50_sor_dpms(encoder, DRM_MODE_DPMS_ON);
-
switch (nv_encoder->dcb->type) {
case OUTPUT_TMDS:
if (nv_encoder->dcb->sorconf.link & 1) {
@@ -206,7 +205,15 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
mode_ctl = 0x0200;
break;
case OUTPUT_DP:
- mode_ctl |= (nv_encoder->dp.mc_unknown << 16);
+ nv_connector = nouveau_encoder_connector_get(nv_encoder);
+ if (nv_connector && nv_connector->base.display_info.bpc == 6) {
+ nv_encoder->dp.datarate = crtc->mode->clock * 18 / 8;
+ mode_ctl |= 0x00020000;
+ } else {
+ nv_encoder->dp.datarate = crtc->mode->clock * 24 / 8;
+ mode_ctl |= 0x00050000;
+ }
+
if (nv_encoder->dcb->sorconf.link & 1)
mode_ctl |= 0x00000800;
else
@@ -227,6 +234,8 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
mode_ctl |= NV50_EVO_SOR_MODE_CTRL_NVSYNC;
+ nv50_sor_dpms(encoder, DRM_MODE_DPMS_ON);
+
ret = RING_SPACE(evo, 2);
if (ret) {
NV_ERROR(dev, "no space while connecting SOR\n");
@@ -313,31 +322,6 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_entry *entry)
encoder->possible_crtcs = entry->heads;
encoder->possible_clones = 0;
- if (nv_encoder->dcb->type == OUTPUT_DP) {
- int or = nv_encoder->or, link = !(entry->dpconf.sor.link & 1);
- uint32_t tmp;
-
- tmp = nv_rd32(dev, 0x61c700 + (or * 0x800));
- if (!tmp)
- tmp = nv_rd32(dev, 0x610798 + (or * 8));
-
- switch ((tmp & 0x00000f00) >> 8) {
- case 8:
- case 9:
- nv_encoder->dp.mc_unknown = (tmp & 0x000f0000) >> 16;
- tmp = nv_rd32(dev, NV50_SOR_DP_CTRL(or, link));
- nv_encoder->dp.unk0 = tmp & 0x000001fc;
- tmp = nv_rd32(dev, NV50_SOR_DP_UNK128(or, link));
- nv_encoder->dp.unk1 = tmp & 0x010f7f3f;
- break;
- default:
- break;
- }
-
- if (!nv_encoder->dp.mc_unknown)
- nv_encoder->dp.mc_unknown = 5;
- }
-
drm_mode_connector_attach_encoder(connector, encoder);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv50_vram.c b/drivers/gpu/drm/nouveau/nv50_vram.c
index af32daecd1e..2e45e57fd86 100644
--- a/drivers/gpu/drm/nouveau/nv50_vram.c
+++ b/drivers/gpu/drm/nouveau/nv50_vram.c
@@ -51,7 +51,7 @@ void
nv50_vram_del(struct drm_device *dev, struct nouveau_mem **pmem)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_mm *mm = dev_priv->engine.vram.mm;
+ struct nouveau_mm *mm = &dev_priv->engine.vram.mm;
struct nouveau_mm_node *this;
struct nouveau_mem *mem;
@@ -82,7 +82,7 @@ nv50_vram_new(struct drm_device *dev, u64 size, u32 align, u32 size_nc,
u32 memtype, struct nouveau_mem **pmem)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_mm *mm = dev_priv->engine.vram.mm;
+ struct nouveau_mm *mm = &dev_priv->engine.vram.mm;
struct nouveau_mm_node *r;
struct nouveau_mem *mem;
int comp = (memtype & 0x300) >> 8;
@@ -160,7 +160,7 @@ nv50_vram_rblock(struct drm_device *dev)
colbits = (r4 & 0x0000f000) >> 12;
rowbitsa = ((r4 & 0x000f0000) >> 16) + 8;
rowbitsb = ((r4 & 0x00f00000) >> 20) + 8;
- banks = ((r4 & 0x01000000) ? 8 : 4);
+ banks = 1 << (((r4 & 0x03000000) >> 24) + 2);
rowsize = parts * banks * (1 << colbits) * 8;
predicted = rowsize << rowbitsa;
diff --git a/drivers/gpu/drm/nouveau/nva3_pm.c b/drivers/gpu/drm/nouveau/nva3_pm.c
index e4b2b9e934b..618c144b7a3 100644
--- a/drivers/gpu/drm/nouveau/nva3_pm.c
+++ b/drivers/gpu/drm/nouveau/nva3_pm.c
@@ -27,178 +27,316 @@
#include "nouveau_bios.h"
#include "nouveau_pm.h"
-/* This is actually a lot more complex than it appears here, but hopefully
- * this should be able to deal with what the VBIOS leaves for us..
- *
- * If not, well, I'll jump off that bridge when I come to it.
- */
+static u32 read_clk(struct drm_device *, int, bool);
+static u32 read_pll(struct drm_device *, int, u32);
-struct nva3_pm_state {
- enum pll_types type;
- u32 src0;
- u32 src1;
- u32 ctrl;
- u32 coef;
- u32 old_pnm;
- u32 new_pnm;
- u32 new_div;
-};
+static u32
+read_vco(struct drm_device *dev, int clk)
+{
+ u32 sctl = nv_rd32(dev, 0x4120 + (clk * 4));
+ if ((sctl & 0x00000030) != 0x00000030)
+ return read_pll(dev, 0x41, 0x00e820);
+ return read_pll(dev, 0x42, 0x00e8a0);
+}
-static int
-nva3_pm_pll_offset(u32 id)
+static u32
+read_clk(struct drm_device *dev, int clk, bool ignore_en)
{
- static const u32 pll_map[] = {
- 0x00, PLL_CORE,
- 0x01, PLL_SHADER,
- 0x02, PLL_MEMORY,
- 0x00, 0x00
- };
- const u32 *map = pll_map;
-
- while (map[1]) {
- if (id == map[1])
- return map[0];
- map += 2;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ u32 sctl, sdiv, sclk;
+
+ /* refclk for the 0xe8xx plls is a fixed frequency */
+ if (clk >= 0x40) {
+ if (dev_priv->chipset == 0xaf) {
+ /* no joke.. seriously.. sigh.. */
+ return nv_rd32(dev, 0x00471c) * 1000;
+ }
+
+ return dev_priv->crystal;
}
- return -ENOENT;
+ sctl = nv_rd32(dev, 0x4120 + (clk * 4));
+ if (!ignore_en && !(sctl & 0x00000100))
+ return 0;
+
+ switch (sctl & 0x00003000) {
+ case 0x00000000:
+ return dev_priv->crystal;
+ case 0x00002000:
+ if (sctl & 0x00000040)
+ return 108000;
+ return 100000;
+ case 0x00003000:
+ sclk = read_vco(dev, clk);
+ sdiv = ((sctl & 0x003f0000) >> 16) + 2;
+ return (sclk * 2) / sdiv;
+ default:
+ return 0;
+ }
}
-int
-nva3_pm_clock_get(struct drm_device *dev, u32 id)
+static u32
+read_pll(struct drm_device *dev, int clk, u32 pll)
+{
+ u32 ctrl = nv_rd32(dev, pll + 0);
+ u32 sclk = 0, P = 1, N = 1, M = 1;
+
+ if (!(ctrl & 0x00000008)) {
+ if (ctrl & 0x00000001) {
+ u32 coef = nv_rd32(dev, pll + 4);
+ M = (coef & 0x000000ff) >> 0;
+ N = (coef & 0x0000ff00) >> 8;
+ P = (coef & 0x003f0000) >> 16;
+
+ /* no post-divider on these.. */
+ if ((pll & 0x00ff00) == 0x00e800)
+ P = 1;
+
+ sclk = read_clk(dev, 0x00 + clk, false);
+ }
+ } else {
+ sclk = read_clk(dev, 0x10 + clk, false);
+ }
+
+ return sclk * N / (M * P);
+}
+
+struct creg {
+ u32 clk;
+ u32 pll;
+};
+
+static int
+calc_clk(struct drm_device *dev, int clk, u32 pll, u32 khz, struct creg *reg)
{
- u32 src0, src1, ctrl, coef;
- struct pll_lims pll;
- int ret, off;
- int P, N, M;
+ struct pll_lims limits;
+ u32 oclk, sclk, sdiv;
+ int P, N, M, diff;
+ int ret;
+
+ reg->pll = 0;
+ reg->clk = 0;
+ if (!khz) {
+ NV_DEBUG(dev, "no clock for 0x%04x/0x%02x\n", pll, clk);
+ return 0;
+ }
- ret = get_pll_limits(dev, id, &pll);
+ switch (khz) {
+ case 27000:
+ reg->clk = 0x00000100;
+ return khz;
+ case 100000:
+ reg->clk = 0x00002100;
+ return khz;
+ case 108000:
+ reg->clk = 0x00002140;
+ return khz;
+ default:
+ sclk = read_vco(dev, clk);
+ sdiv = min((sclk * 2) / (khz - 2999), (u32)65);
+ /* if the clock has a PLL attached, and we can get a within
+ * [-2, 3) MHz of a divider, we'll disable the PLL and use
+ * the divider instead.
+ *
+ * divider can go as low as 2, limited here because NVIDIA
+ * and the VBIOS on my NVA8 seem to prefer using the PLL
+ * for 810MHz - is there a good reason?
+ */
+ if (sdiv > 4) {
+ oclk = (sclk * 2) / sdiv;
+ diff = khz - oclk;
+ if (!pll || (diff >= -2000 && diff < 3000)) {
+ reg->clk = (((sdiv - 2) << 16) | 0x00003100);
+ return oclk;
+ }
+ }
+
+ if (!pll) {
+ NV_ERROR(dev, "bad freq %02x: %d %d\n", clk, khz, sclk);
+ return -ERANGE;
+ }
+
+ break;
+ }
+
+ ret = get_pll_limits(dev, pll, &limits);
if (ret)
return ret;
- off = nva3_pm_pll_offset(id);
- if (off < 0)
- return off;
+ limits.refclk = read_clk(dev, clk - 0x10, true);
+ if (!limits.refclk)
+ return -EINVAL;
+
+ ret = nva3_calc_pll(dev, &limits, khz, &N, NULL, &M, &P);
+ if (ret >= 0) {
+ reg->clk = nv_rd32(dev, 0x4120 + (clk * 4));
+ reg->pll = (P << 16) | (N << 8) | M;
+ }
+ return ret;
+}
+
+static void
+prog_pll(struct drm_device *dev, int clk, u32 pll, struct creg *reg)
+{
+ const u32 src0 = 0x004120 + (clk * 4);
+ const u32 src1 = 0x004160 + (clk * 4);
+ const u32 ctrl = pll + 0;
+ const u32 coef = pll + 4;
+ u32 cntl;
+
+ if (!reg->clk && !reg->pll) {
+ NV_DEBUG(dev, "no clock for %02x\n", clk);
+ return;
+ }
- src0 = nv_rd32(dev, 0x4120 + (off * 4));
- src1 = nv_rd32(dev, 0x4160 + (off * 4));
- ctrl = nv_rd32(dev, pll.reg + 0);
- coef = nv_rd32(dev, pll.reg + 4);
- NV_DEBUG(dev, "PLL %02x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
- id, src0, src1, ctrl, coef);
+ cntl = nv_rd32(dev, ctrl) & 0xfffffff2;
+ if (reg->pll) {
+ nv_mask(dev, src0, 0x00000101, 0x00000101);
+ nv_wr32(dev, coef, reg->pll);
+ nv_wr32(dev, ctrl, cntl | 0x00000015);
+ nv_mask(dev, src1, 0x00000100, 0x00000000);
+ nv_mask(dev, src1, 0x00000001, 0x00000000);
+ } else {
+ nv_mask(dev, src1, 0x003f3141, 0x00000101 | reg->clk);
+ nv_wr32(dev, ctrl, cntl | 0x0000001d);
+ nv_mask(dev, ctrl, 0x00000001, 0x00000000);
+ nv_mask(dev, src0, 0x00000100, 0x00000000);
+ nv_mask(dev, src0, 0x00000001, 0x00000000);
+ }
+}
- if (ctrl & 0x00000008) {
- u32 div = ((src1 & 0x003c0000) >> 18) + 1;
- return (pll.refclk * 2) / div;
+static void
+prog_clk(struct drm_device *dev, int clk, struct creg *reg)
+{
+ if (!reg->clk) {
+ NV_DEBUG(dev, "no clock for %02x\n", clk);
+ return;
}
- P = (coef & 0x003f0000) >> 16;
- N = (coef & 0x0000ff00) >> 8;
- M = (coef & 0x000000ff);
- return pll.refclk * N / M / P;
+ nv_mask(dev, 0x004120 + (clk * 4), 0x003f3141, 0x00000101 | reg->clk);
+}
+
+int
+nva3_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
+{
+ perflvl->core = read_pll(dev, 0x00, 0x4200);
+ perflvl->shader = read_pll(dev, 0x01, 0x4220);
+ perflvl->memory = read_pll(dev, 0x02, 0x4000);
+ perflvl->unka0 = read_clk(dev, 0x20, false);
+ perflvl->vdec = read_clk(dev, 0x21, false);
+ perflvl->daemon = read_clk(dev, 0x25, false);
+ perflvl->copy = perflvl->core;
+ return 0;
}
+struct nva3_pm_state {
+ struct creg nclk;
+ struct creg sclk;
+ struct creg mclk;
+ struct creg vdec;
+ struct creg unka0;
+};
+
void *
-nva3_pm_clock_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl,
- u32 id, int khz)
+nva3_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
{
- struct nva3_pm_state *pll;
- struct pll_lims limits;
- int N, M, P, diff;
- int ret, off;
+ struct nva3_pm_state *info;
+ int ret;
- ret = get_pll_limits(dev, id, &limits);
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return ERR_PTR(-ENOMEM);
+
+ ret = calc_clk(dev, 0x10, 0x4200, perflvl->core, &info->nclk);
if (ret < 0)
- return (ret == -ENOENT) ? NULL : ERR_PTR(ret);
+ goto out;
- off = nva3_pm_pll_offset(id);
- if (id < 0)
- return ERR_PTR(-EINVAL);
+ ret = calc_clk(dev, 0x11, 0x4220, perflvl->shader, &info->sclk);
+ if (ret < 0)
+ goto out;
+ ret = calc_clk(dev, 0x12, 0x4000, perflvl->memory, &info->mclk);
+ if (ret < 0)
+ goto out;
- pll = kzalloc(sizeof(*pll), GFP_KERNEL);
- if (!pll)
- return ERR_PTR(-ENOMEM);
- pll->type = id;
- pll->src0 = 0x004120 + (off * 4);
- pll->src1 = 0x004160 + (off * 4);
- pll->ctrl = limits.reg + 0;
- pll->coef = limits.reg + 4;
-
- /* If target clock is within [-2, 3) MHz of a divisor, we'll
- * use that instead of calculating MNP values
- */
- pll->new_div = min((limits.refclk * 2) / (khz - 2999), 16);
- if (pll->new_div) {
- diff = khz - ((limits.refclk * 2) / pll->new_div);
- if (diff < -2000 || diff >= 3000)
- pll->new_div = 0;
- }
+ ret = calc_clk(dev, 0x20, 0x0000, perflvl->unka0, &info->unka0);
+ if (ret < 0)
+ goto out;
- if (!pll->new_div) {
- ret = nva3_calc_pll(dev, &limits, khz, &N, NULL, &M, &P);
- if (ret < 0)
- return ERR_PTR(ret);
+ ret = calc_clk(dev, 0x21, 0x0000, perflvl->vdec, &info->vdec);
+ if (ret < 0)
+ goto out;
- pll->new_pnm = (P << 16) | (N << 8) | M;
- pll->new_div = 2 - 1;
- } else {
- pll->new_pnm = 0;
- pll->new_div--;
+out:
+ if (ret < 0) {
+ kfree(info);
+ info = ERR_PTR(ret);
}
+ return info;
+}
+
+static bool
+nva3_pm_grcp_idle(void *data)
+{
+ struct drm_device *dev = data;
- if ((nv_rd32(dev, pll->src1) & 0x00000101) != 0x00000101)
- pll->old_pnm = nv_rd32(dev, pll->coef);
- return pll;
+ if (!(nv_rd32(dev, 0x400304) & 0x00000001))
+ return true;
+ if (nv_rd32(dev, 0x400308) == 0x0050001c)
+ return true;
+ return false;
}
void
-nva3_pm_clock_set(struct drm_device *dev, void *pre_state)
+nva3_pm_clocks_set(struct drm_device *dev, void *pre_state)
{
- struct nva3_pm_state *pll = pre_state;
- u32 ctrl = 0;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nva3_pm_state *info = pre_state;
+ unsigned long flags;
- /* For the memory clock, NVIDIA will build a "script" describing
- * the reclocking process and ask PDAEMON to execute it.
- */
- if (pll->type == PLL_MEMORY) {
- nv_wr32(dev, 0x100210, 0);
- nv_wr32(dev, 0x1002dc, 1);
- nv_wr32(dev, 0x004018, 0x00001000);
- ctrl = 0x18000100;
+ /* prevent any new grctx switches from starting */
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+ nv_wr32(dev, 0x400324, 0x00000000);
+ nv_wr32(dev, 0x400328, 0x0050001c); /* wait flag 0x1c */
+ /* wait for any pending grctx switches to complete */
+ if (!nv_wait_cb(dev, nva3_pm_grcp_idle, dev)) {
+ NV_ERROR(dev, "pm: ctxprog didn't go idle\n");
+ goto cleanup;
}
-
- if (pll->old_pnm || !pll->new_pnm) {
- nv_mask(dev, pll->src1, 0x003c0101, 0x00000101 |
- (pll->new_div << 18));
- nv_wr32(dev, pll->ctrl, 0x0001001d | ctrl);
- nv_mask(dev, pll->ctrl, 0x00000001, 0x00000000);
+ /* freeze PFIFO */
+ nv_mask(dev, 0x002504, 0x00000001, 0x00000001);
+ if (!nv_wait(dev, 0x002504, 0x00000010, 0x00000010)) {
+ NV_ERROR(dev, "pm: fifo didn't go idle\n");
+ goto cleanup;
}
- if (pll->new_pnm) {
- nv_mask(dev, pll->src0, 0x00000101, 0x00000101);
- nv_wr32(dev, pll->coef, pll->new_pnm);
- nv_wr32(dev, pll->ctrl, 0x0001001d | ctrl);
- nv_mask(dev, pll->ctrl, 0x00000010, 0x00000000);
- nv_mask(dev, pll->ctrl, 0x00020010, 0x00020010);
- nv_wr32(dev, pll->ctrl, 0x00010015 | ctrl);
- nv_mask(dev, pll->src1, 0x00000100, 0x00000000);
- nv_mask(dev, pll->src1, 0x00000001, 0x00000000);
- if (pll->type == PLL_MEMORY)
- nv_wr32(dev, 0x4018, 0x10005000);
- } else {
- nv_mask(dev, pll->ctrl, 0x00000001, 0x00000000);
- nv_mask(dev, pll->src0, 0x00000100, 0x00000000);
- nv_mask(dev, pll->src0, 0x00000001, 0x00000000);
- if (pll->type == PLL_MEMORY)
- nv_wr32(dev, 0x4018, 0x1000d000);
- }
+ prog_pll(dev, 0x00, 0x004200, &info->nclk);
+ prog_pll(dev, 0x01, 0x004220, &info->sclk);
+ prog_clk(dev, 0x20, &info->unka0);
+ prog_clk(dev, 0x21, &info->vdec);
- if (pll->type == PLL_MEMORY) {
+ if (info->mclk.clk || info->mclk.pll) {
+ nv_wr32(dev, 0x100210, 0);
+ nv_wr32(dev, 0x1002dc, 1);
+ nv_wr32(dev, 0x004018, 0x00001000);
+ prog_pll(dev, 0x02, 0x004000, &info->mclk);
+ if (nv_rd32(dev, 0x4000) & 0x00000008)
+ nv_wr32(dev, 0x004018, 0x1000d000);
+ else
+ nv_wr32(dev, 0x004018, 0x10005000);
nv_wr32(dev, 0x1002dc, 0);
nv_wr32(dev, 0x100210, 0x80000000);
}
- kfree(pll);
+cleanup:
+ /* unfreeze PFIFO */
+ nv_mask(dev, 0x002504, 0x00000001, 0x00000000);
+ /* restore ctxprog to normal */
+ nv_wr32(dev, 0x400324, 0x00000000);
+ nv_wr32(dev, 0x400328, 0x0070009c); /* set flag 0x1c */
+ /* unblock it if necessary */
+ if (nv_rd32(dev, 0x400308) == 0x0050001c)
+ nv_mask(dev, 0x400824, 0x10000000, 0x10000000);
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+ kfree(info);
}
-
diff --git a/drivers/gpu/drm/nouveau/nvc0_fb.c b/drivers/gpu/drm/nouveau/nvc0_fb.c
index 08e6b118f02..5bf55038fd9 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fb.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fb.c
@@ -32,6 +32,30 @@ struct nvc0_fb_priv {
dma_addr_t r100c10;
};
+static inline void
+nvc0_mfb_subp_isr(struct drm_device *dev, int unit, int subp)
+{
+ u32 subp_base = 0x141000 + (unit * 0x2000) + (subp * 0x400);
+ u32 stat = nv_rd32(dev, subp_base + 0x020);
+
+ if (stat) {
+ NV_INFO(dev, "PMFB%d_SUBP%d: 0x%08x\n", unit, subp, stat);
+ nv_wr32(dev, subp_base + 0x020, stat);
+ }
+}
+
+static void
+nvc0_mfb_isr(struct drm_device *dev)
+{
+ u32 units = nv_rd32(dev, 0x00017c);
+ while (units) {
+ u32 subp, unit = ffs(units) - 1;
+ for (subp = 0; subp < 2; subp++)
+ nvc0_mfb_subp_isr(dev, unit, subp);
+ units &= ~(1 << unit);
+ }
+}
+
static void
nvc0_fb_destroy(struct drm_device *dev)
{
@@ -39,6 +63,8 @@ nvc0_fb_destroy(struct drm_device *dev)
struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
struct nvc0_fb_priv *priv = pfb->priv;
+ nouveau_irq_unregister(dev, 25);
+
if (priv->r100c10_page) {
pci_unmap_page(dev->pdev, priv->r100c10, PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL);
@@ -74,6 +100,7 @@ nvc0_fb_create(struct drm_device *dev)
return -EFAULT;
}
+ nouveau_irq_register(dev, 25, nvc0_mfb_isr);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvc0_fifo.c b/drivers/gpu/drm/nouveau/nvc0_fifo.c
index 6f9f341c3e8..dcbe0d5d024 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fifo.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fifo.c
@@ -322,7 +322,7 @@ nvc0_fifo_init(struct drm_device *dev)
}
/* PSUBFIFO[n] */
- for (i = 0; i < 3; i++) {
+ for (i = 0; i < priv->spoon_nr; i++) {
nv_mask(dev, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
nv_wr32(dev, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
nv_wr32(dev, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTR_EN */
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.c b/drivers/gpu/drm/nouveau/nvc0_graph.c
index 5b2f6f42046..a74e501afd2 100644
--- a/drivers/gpu/drm/nouveau/nvc0_graph.c
+++ b/drivers/gpu/drm/nouveau/nvc0_graph.c
@@ -23,6 +23,7 @@
*/
#include <linux/firmware.h>
+#include <linux/module.h>
#include "drmP.h"
@@ -156,8 +157,8 @@ nvc0_graph_create_context_mmio_list(struct nouveau_channel *chan)
struct nvc0_graph_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_GR);
struct nvc0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR];
struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
int i = 0, gpc, tp, ret;
- u32 magic;
ret = nouveau_gpuobj_new(dev, chan, 0x2000, 256, NVOBJ_FLAG_VM,
&grch->unk408004);
@@ -206,14 +207,37 @@ nvc0_graph_create_context_mmio_list(struct nouveau_channel *chan)
nv_wo32(grch->mmio, i++ * 4, 0x0041880c);
nv_wo32(grch->mmio, i++ * 4, 0x80000018);
- magic = 0x02180000;
- nv_wo32(grch->mmio, i++ * 4, 0x00405830);
- nv_wo32(grch->mmio, i++ * 4, magic);
- for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
- for (tp = 0; tp < priv->tp_nr[gpc]; tp++, magic += 0x0324) {
- u32 reg = 0x504520 + (gpc * 0x8000) + (tp * 0x0800);
- nv_wo32(grch->mmio, i++ * 4, reg);
- nv_wo32(grch->mmio, i++ * 4, magic);
+ if (dev_priv->chipset != 0xc1) {
+ u32 magic = 0x02180000;
+ nv_wo32(grch->mmio, i++ * 4, 0x00405830);
+ nv_wo32(grch->mmio, i++ * 4, magic);
+ for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+ for (tp = 0; tp < priv->tp_nr[gpc]; tp++) {
+ u32 reg = TP_UNIT(gpc, tp, 0x520);
+ nv_wo32(grch->mmio, i++ * 4, reg);
+ nv_wo32(grch->mmio, i++ * 4, magic);
+ magic += 0x0324;
+ }
+ }
+ } else {
+ u32 magic = 0x02180000;
+ nv_wo32(grch->mmio, i++ * 4, 0x00405830);
+ nv_wo32(grch->mmio, i++ * 4, magic | 0x0000218);
+ nv_wo32(grch->mmio, i++ * 4, 0x004064c4);
+ nv_wo32(grch->mmio, i++ * 4, 0x0086ffff);
+ for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+ for (tp = 0; tp < priv->tp_nr[gpc]; tp++) {
+ u32 reg = TP_UNIT(gpc, tp, 0x520);
+ nv_wo32(grch->mmio, i++ * 4, reg);
+ nv_wo32(grch->mmio, i++ * 4, (1 << 28) | magic);
+ magic += 0x0324;
+ }
+ for (tp = 0; tp < priv->tp_nr[gpc]; tp++) {
+ u32 reg = TP_UNIT(gpc, tp, 0x544);
+ nv_wo32(grch->mmio, i++ * 4, reg);
+ nv_wo32(grch->mmio, i++ * 4, magic);
+ magic += 0x0324;
+ }
}
}
@@ -390,7 +414,7 @@ nvc0_graph_init_gpc_0(struct drm_device *dev)
}
nv_wr32(dev, GPC_BCAST(0x1bd4), magicgpc918);
- nv_wr32(dev, GPC_BCAST(0x08ac), priv->rop_nr);
+ nv_wr32(dev, GPC_BCAST(0x08ac), nv_rd32(dev, 0x100800));
}
static void
@@ -700,22 +724,6 @@ nvc0_graph_isr(struct drm_device *dev)
nv_wr32(dev, 0x400500, 0x00010001);
}
-static void
-nvc0_runk140_isr(struct drm_device *dev)
-{
- u32 units = nv_rd32(dev, 0x00017c) & 0x1f;
-
- while (units) {
- u32 unit = ffs(units) - 1;
- u32 reg = 0x140000 + unit * 0x2000;
- u32 st0 = nv_mask(dev, reg + 0x1020, 0, 0);
- u32 st1 = nv_mask(dev, reg + 0x1420, 0, 0);
-
- NV_DEBUG(dev, "PRUNK140: %d 0x%08x 0x%08x\n", unit, st0, st1);
- units &= ~(1 << unit);
- }
-}
-
static int
nvc0_graph_create_fw(struct drm_device *dev, const char *fwname,
struct nvc0_graph_fuc *fuc)
@@ -764,7 +772,6 @@ nvc0_graph_destroy(struct drm_device *dev, int engine)
}
nouveau_irq_unregister(dev, 12);
- nouveau_irq_unregister(dev, 25);
nouveau_gpuobj_ref(NULL, &priv->unk4188b8);
nouveau_gpuobj_ref(NULL, &priv->unk4188b4);
@@ -803,7 +810,6 @@ nvc0_graph_create(struct drm_device *dev)
NVOBJ_ENGINE_ADD(dev, GR, &priv->base);
nouveau_irq_register(dev, 12, nvc0_graph_isr);
- nouveau_irq_register(dev, 25, nvc0_runk140_isr);
if (nouveau_ctxfw) {
NV_INFO(dev, "PGRAPH: using external firmware\n");
@@ -864,6 +870,9 @@ nvc0_graph_create(struct drm_device *dev)
case 0xce: /* 4/4/0/0, 4 */
priv->magic_not_rop_nr = 0x03;
break;
+ case 0xcf: /* 4/0/0/0, 3 */
+ priv->magic_not_rop_nr = 0x03;
+ break;
}
if (!priv->magic_not_rop_nr) {
@@ -889,20 +898,3 @@ error:
nvc0_graph_destroy(dev, NVOBJ_ENGINE_GR);
return ret;
}
-
-MODULE_FIRMWARE("nouveau/nvc0_fuc409c");
-MODULE_FIRMWARE("nouveau/nvc0_fuc409d");
-MODULE_FIRMWARE("nouveau/nvc0_fuc41ac");
-MODULE_FIRMWARE("nouveau/nvc0_fuc41ad");
-MODULE_FIRMWARE("nouveau/nvc3_fuc409c");
-MODULE_FIRMWARE("nouveau/nvc3_fuc409d");
-MODULE_FIRMWARE("nouveau/nvc3_fuc41ac");
-MODULE_FIRMWARE("nouveau/nvc3_fuc41ad");
-MODULE_FIRMWARE("nouveau/nvc4_fuc409c");
-MODULE_FIRMWARE("nouveau/nvc4_fuc409d");
-MODULE_FIRMWARE("nouveau/nvc4_fuc41ac");
-MODULE_FIRMWARE("nouveau/nvc4_fuc41ad");
-MODULE_FIRMWARE("nouveau/fuc409c");
-MODULE_FIRMWARE("nouveau/fuc409d");
-MODULE_FIRMWARE("nouveau/fuc41ac");
-MODULE_FIRMWARE("nouveau/fuc41ad");
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.h b/drivers/gpu/drm/nouveau/nvc0_graph.h
index 55689e99728..636fe9812f7 100644
--- a/drivers/gpu/drm/nouveau/nvc0_graph.h
+++ b/drivers/gpu/drm/nouveau/nvc0_graph.h
@@ -82,6 +82,7 @@ nvc0_graph_class(struct drm_device *dev)
case 0xc3:
case 0xc4:
case 0xce: /* guess, mmio trace shows only 0x9097 state */
+ case 0xcf: /* guess, mmio trace shows only 0x9097 state */
return 0x9097;
case 0xc1:
return 0x9197;
diff --git a/drivers/gpu/drm/nouveau/nvc0_grctx.c b/drivers/gpu/drm/nouveau/nvc0_grctx.c
index 31018eaf527..96b0b93d94c 100644
--- a/drivers/gpu/drm/nouveau/nvc0_grctx.c
+++ b/drivers/gpu/drm/nouveau/nvc0_grctx.c
@@ -1678,7 +1678,10 @@ nvc0_grctx_generate_tp(struct drm_device *dev)
nv_wr32(dev, 0x419c04, 0x00000006);
nv_wr32(dev, 0x419c08, 0x00000002);
nv_wr32(dev, 0x419c20, 0x00000000);
- nv_wr32(dev, 0x419cb0, 0x00060048); //XXX: 0xce 0x00020048
+ if (chipset == 0xce || chipset == 0xcf)
+ nv_wr32(dev, 0x419cb0, 0x00020048);
+ else
+ nv_wr32(dev, 0x419cb0, 0x00060048);
nv_wr32(dev, 0x419ce8, 0x00000000);
nv_wr32(dev, 0x419cf4, 0x00000183);
nv_wr32(dev, 0x419d20, chipset != 0xc1 ? 0x02180000 : 0x12180000);
@@ -1783,11 +1786,7 @@ nvc0_grctx_generate(struct nouveau_channel *chan)
nv_wr32(dev, 0x40587c, 0x00000000);
if (1) {
- const u8 chipset_tp_max[] = { 16, 4, 0, 4, 8, 0, 0, 0,
- 16, 0, 0, 0, 0, 0, 8, 0 };
- u8 max = chipset_tp_max[dev_priv->chipset & 0x0f];
- u8 tpnr[GPC_MAX];
- u8 data[TP_MAX];
+ u8 tpnr[GPC_MAX], data[TP_MAX];
memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr));
memset(data, 0x1f, sizeof(data));
@@ -1801,7 +1800,7 @@ nvc0_grctx_generate(struct nouveau_channel *chan)
data[tp] = gpc;
}
- for (i = 0; i < max / 4; i++)
+ for (i = 0; i < 4; i++)
nv_wr32(dev, 0x4060a8 + (i * 4), ((u32 *)data)[i]);
}
@@ -1813,6 +1812,7 @@ nvc0_grctx_generate(struct nouveau_channel *chan)
/* calculate first set of magics */
memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr));
+ gpc = -1;
for (tp = 0; tp < priv->tp_total; tp++) {
do {
gpc = (gpc + 1) % priv->gpc_nr;
@@ -1862,30 +1862,26 @@ nvc0_grctx_generate(struct nouveau_channel *chan)
if (1) {
u32 tp_mask = 0, tp_set = 0;
- u8 tpnr[GPC_MAX];
+ u8 tpnr[GPC_MAX], a, b;
memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr));
for (gpc = 0; gpc < priv->gpc_nr; gpc++)
tp_mask |= ((1 << priv->tp_nr[gpc]) - 1) << (gpc * 8);
- gpc = -1;
- for (i = 0, gpc = -1; i < 32; i++) {
- int ltp = i * (priv->tp_total - 1) / 32;
+ for (i = 0, gpc = -1, b = -1; i < 32; i++) {
+ a = (i * (priv->tp_total - 1)) / 32;
+ if (a != b) {
+ b = a;
+ do {
+ gpc = (gpc + 1) % priv->gpc_nr;
+ } while (!tpnr[gpc]);
+ tp = priv->tp_nr[gpc] - tpnr[gpc]--;
- do {
- gpc = (gpc + 1) % priv->gpc_nr;
- } while (!tpnr[gpc]);
- tp = priv->tp_nr[gpc] - tpnr[gpc]--;
-
- tp_set |= 1 << ((gpc * 8) + tp);
+ tp_set |= 1 << ((gpc * 8) + tp);
+ }
- do {
- nv_wr32(dev, 0x406800 + (i * 0x20), tp_set);
- tp_set ^= tp_mask;
- nv_wr32(dev, 0x406c00 + (i * 0x20), tp_set);
- tp_set ^= tp_mask;
- } while (ltp == (++i * (priv->tp_total - 1) / 32));
- i--;
+ nv_wr32(dev, 0x406800 + (i * 0x20), tp_set);
+ nv_wr32(dev, 0x406c00 + (i * 0x20), tp_set ^ tp_mask);
}
}
diff --git a/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc b/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc
index 0ec2add72a7..06f5e26d1e0 100644
--- a/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc
+++ b/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc
@@ -77,6 +77,11 @@ chipsets:
.b16 nvc0_gpc_mmio_tail
.b16 nvc0_tpc_mmio_head
.b16 nvc3_tpc_mmio_tail
+.b8 0xcf 0 0 0
+.b16 nvc0_gpc_mmio_head
+.b16 nvc0_gpc_mmio_tail
+.b16 nvc0_tpc_mmio_head
+.b16 nvcf_tpc_mmio_tail
.b8 0 0 0 0
// GPC mmio lists
@@ -134,8 +139,9 @@ mmctx_data(0x000750, 2)
nvc0_tpc_mmio_tail:
mmctx_data(0x000758, 1)
mmctx_data(0x0002c4, 1)
-mmctx_data(0x0004bc, 1)
mmctx_data(0x0006e0, 1)
+nvcf_tpc_mmio_tail:
+mmctx_data(0x0004bc, 1)
nvc3_tpc_mmio_tail:
mmctx_data(0x000544, 1)
nvc1_tpc_mmio_tail:
diff --git a/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc.h b/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc.h
index 1896c898f5b..6f820324480 100644
--- a/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc.h
+++ b/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc.h
@@ -25,23 +25,26 @@ uint32_t nvc0_grgpc_data[] = {
0x00000000,
0x00000000,
0x000000c0,
- 0x011000b0,
- 0x01640114,
+ 0x011c00bc,
+ 0x01700120,
0x000000c1,
- 0x011400b0,
- 0x01780114,
+ 0x012000bc,
+ 0x01840120,
0x000000c3,
- 0x011000b0,
- 0x01740114,
+ 0x011c00bc,
+ 0x01800120,
0x000000c4,
- 0x011000b0,
- 0x01740114,
+ 0x011c00bc,
+ 0x01800120,
0x000000c8,
- 0x011000b0,
- 0x01640114,
+ 0x011c00bc,
+ 0x01700120,
0x000000ce,
- 0x011000b0,
- 0x01740114,
+ 0x011c00bc,
+ 0x01800120,
+ 0x000000cf,
+ 0x011c00bc,
+ 0x017c0120,
0x00000000,
0x00000380,
0x14000400,
@@ -90,8 +93,8 @@ uint32_t nvc0_grgpc_data[] = {
0x04000750,
0x00000758,
0x000002c4,
- 0x000004bc,
0x000006e0,
+ 0x000004bc,
0x00000544,
};
diff --git a/drivers/gpu/drm/nouveau/nvc0_grhub.fuc b/drivers/gpu/drm/nouveau/nvc0_grhub.fuc
index a1a599124cf..e4f8c7e89dd 100644
--- a/drivers/gpu/drm/nouveau/nvc0_grhub.fuc
+++ b/drivers/gpu/drm/nouveau/nvc0_grhub.fuc
@@ -56,6 +56,9 @@ chipsets:
.b8 0xce 0 0 0
.b16 nvc0_hub_mmio_head
.b16 nvc0_hub_mmio_tail
+.b8 0xcf 0 0 0
+.b16 nvc0_hub_mmio_head
+.b16 nvc0_hub_mmio_tail
.b8 0 0 0 0
nvc0_hub_mmio_head:
diff --git a/drivers/gpu/drm/nouveau/nvc0_grhub.fuc.h b/drivers/gpu/drm/nouveau/nvc0_grhub.fuc.h
index b3b541b6d04..241d3263f1e 100644
--- a/drivers/gpu/drm/nouveau/nvc0_grhub.fuc.h
+++ b/drivers/gpu/drm/nouveau/nvc0_grhub.fuc.h
@@ -23,17 +23,19 @@ uint32_t nvc0_grhub_data[] = {
0x00000000,
0x00000000,
0x000000c0,
- 0x012c0090,
+ 0x01340098,
0x000000c1,
- 0x01300090,
+ 0x01380098,
0x000000c3,
- 0x012c0090,
+ 0x01340098,
0x000000c4,
- 0x012c0090,
+ 0x01340098,
0x000000c8,
- 0x012c0090,
+ 0x01340098,
0x000000ce,
- 0x012c0090,
+ 0x01340098,
+ 0x000000cf,
+ 0x01340098,
0x00000000,
0x0417e91c,
0x04400204,
@@ -190,8 +192,6 @@ uint32_t nvc0_grhub_data[] = {
0x00000000,
0x00000000,
0x00000000,
- 0x00000000,
- 0x00000000,
};
uint32_t nvc0_grhub_code[] = {
diff --git a/drivers/gpu/drm/nouveau/nvc0_pm.c b/drivers/gpu/drm/nouveau/nvc0_pm.c
new file mode 100644
index 00000000000..929aded35cb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvc0_pm.c
@@ -0,0 +1,155 @@
+/*
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_bios.h"
+#include "nouveau_pm.h"
+
+static u32 read_div(struct drm_device *, int, u32, u32);
+static u32 read_pll(struct drm_device *, u32);
+
+static u32
+read_vco(struct drm_device *dev, u32 dsrc)
+{
+ u32 ssrc = nv_rd32(dev, dsrc);
+ if (!(ssrc & 0x00000100))
+ return read_pll(dev, 0x00e800);
+ return read_pll(dev, 0x00e820);
+}
+
+static u32
+read_pll(struct drm_device *dev, u32 pll)
+{
+ u32 ctrl = nv_rd32(dev, pll + 0);
+ u32 coef = nv_rd32(dev, pll + 4);
+ u32 P = (coef & 0x003f0000) >> 16;
+ u32 N = (coef & 0x0000ff00) >> 8;
+ u32 M = (coef & 0x000000ff) >> 0;
+ u32 sclk, doff;
+
+ if (!(ctrl & 0x00000001))
+ return 0;
+
+ switch (pll & 0xfff000) {
+ case 0x00e000:
+ sclk = 27000;
+ P = 1;
+ break;
+ case 0x137000:
+ doff = (pll - 0x137000) / 0x20;
+ sclk = read_div(dev, doff, 0x137120, 0x137140);
+ break;
+ case 0x132000:
+ switch (pll) {
+ case 0x132000:
+ sclk = read_pll(dev, 0x132020);
+ break;
+ case 0x132020:
+ sclk = read_div(dev, 0, 0x137320, 0x137330);
+ break;
+ default:
+ return 0;
+ }
+ break;
+ default:
+ return 0;
+ }
+
+ return sclk * N / M / P;
+}
+
+static u32
+read_div(struct drm_device *dev, int doff, u32 dsrc, u32 dctl)
+{
+ u32 ssrc = nv_rd32(dev, dsrc + (doff * 4));
+ u32 sctl = nv_rd32(dev, dctl + (doff * 4));
+
+ switch (ssrc & 0x00000003) {
+ case 0:
+ if ((ssrc & 0x00030000) != 0x00030000)
+ return 27000;
+ return 108000;
+ case 2:
+ return 100000;
+ case 3:
+ if (sctl & 0x80000000) {
+ u32 sclk = read_vco(dev, dsrc + (doff * 4));
+ u32 sdiv = (sctl & 0x0000003f) + 2;
+ return (sclk * 2) / sdiv;
+ }
+
+ return read_vco(dev, dsrc + (doff * 4));
+ default:
+ return 0;
+ }
+}
+
+static u32
+read_mem(struct drm_device *dev)
+{
+ u32 ssel = nv_rd32(dev, 0x1373f0);
+ if (ssel & 0x00000001)
+ return read_div(dev, 0, 0x137300, 0x137310);
+ return read_pll(dev, 0x132000);
+}
+
+static u32
+read_clk(struct drm_device *dev, int clk)
+{
+ u32 sctl = nv_rd32(dev, 0x137250 + (clk * 4));
+ u32 ssel = nv_rd32(dev, 0x137100);
+ u32 sclk, sdiv;
+
+ if (ssel & (1 << clk)) {
+ if (clk < 7)
+ sclk = read_pll(dev, 0x137000 + (clk * 0x20));
+ else
+ sclk = read_pll(dev, 0x1370e0);
+ sdiv = ((sctl & 0x00003f00) >> 8) + 2;
+ } else {
+ sclk = read_div(dev, clk, 0x137160, 0x1371d0);
+ sdiv = ((sctl & 0x0000003f) >> 0) + 2;
+ }
+
+ if (sctl & 0x80000000)
+ return (sclk * 2) / sdiv;
+ return sclk;
+}
+
+int
+nvc0_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
+{
+ perflvl->shader = read_clk(dev, 0x00);
+ perflvl->core = perflvl->shader / 2;
+ perflvl->memory = read_mem(dev);
+ perflvl->rop = read_clk(dev, 0x01);
+ perflvl->hub07 = read_clk(dev, 0x02);
+ perflvl->hub06 = read_clk(dev, 0x07);
+ perflvl->hub01 = read_clk(dev, 0x08);
+ perflvl->copy = read_clk(dev, 0x09);
+ perflvl->daemon = read_clk(dev, 0x0c);
+ perflvl->vdec = read_clk(dev, 0x0e);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvc0_vram.c b/drivers/gpu/drm/nouveau/nvc0_vram.c
index e45a24d84e9..ce984d573a5 100644
--- a/drivers/gpu/drm/nouveau/nvc0_vram.c
+++ b/drivers/gpu/drm/nouveau/nvc0_vram.c
@@ -43,7 +43,7 @@ static const u8 types[256] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 3, 3, 3, 3, 1, 1, 1, 1, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3,
- 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3,
+ 3, 3, 3, 1, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3,
3, 3, 0, 0, 0, 0, 0, 0, 3, 0, 0, 3, 0, 3, 0, 3,
3, 0, 3, 3, 3, 3, 3, 0, 0, 3, 0, 3, 0, 3, 3, 0,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 1, 1, 0
@@ -61,7 +61,7 @@ nvc0_vram_new(struct drm_device *dev, u64 size, u32 align, u32 ncmin,
u32 type, struct nouveau_mem **pmem)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_mm *mm = dev_priv->engine.vram.mm;
+ struct nouveau_mm *mm = &dev_priv->engine.vram.mm;
struct nouveau_mm_node *r;
struct nouveau_mem *mem;
int ret;
@@ -106,12 +106,54 @@ nvc0_vram_init(struct drm_device *dev)
struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
- u32 length;
+ u32 parts = nv_rd32(dev, 0x121c74);
+ u32 bsize = nv_rd32(dev, 0x10f20c);
+ u32 offset, length;
+ bool uniform = true;
+ int ret, part;
- dev_priv->vram_size = nv_rd32(dev, 0x10f20c) << 20;
- dev_priv->vram_size *= nv_rd32(dev, 0x121c74);
+ NV_DEBUG(dev, "0x100800: 0x%08x\n", nv_rd32(dev, 0x100800));
+ NV_DEBUG(dev, "parts 0x%08x bcast_mem_amount 0x%08x\n", parts, bsize);
- length = (dev_priv->vram_size >> 12) - rsvd_head - rsvd_tail;
+ /* read amount of vram attached to each memory controller */
+ part = 0;
+ while (parts) {
+ u32 psize = nv_rd32(dev, 0x11020c + (part++ * 0x1000));
+ if (psize == 0)
+ continue;
+ parts--;
- return nouveau_mm_init(&vram->mm, rsvd_head, length, 1);
+ if (psize != bsize) {
+ if (psize < bsize)
+ bsize = psize;
+ uniform = false;
+ }
+
+ NV_DEBUG(dev, "%d: mem_amount 0x%08x\n", part, psize);
+ dev_priv->vram_size += (u64)psize << 20;
+ }
+
+ /* if all controllers have the same amount attached, there's no holes */
+ if (uniform) {
+ offset = rsvd_head;
+ length = (dev_priv->vram_size >> 12) - rsvd_head - rsvd_tail;
+ return nouveau_mm_init(&vram->mm, offset, length, 1);
+ }
+
+ /* otherwise, address lowest common amount from 0GiB */
+ ret = nouveau_mm_init(&vram->mm, rsvd_head, (bsize << 8) * parts, 1);
+ if (ret)
+ return ret;
+
+ /* and the rest starting from (8GiB + common_size) */
+ offset = (0x0200000000ULL >> 12) + (bsize << 8);
+ length = (dev_priv->vram_size >> 12) - (bsize << 8) - rsvd_tail;
+
+ ret = nouveau_mm_init(&vram->mm, offset, length, 0);
+ if (ret) {
+ nouveau_mm_fini(&vram->mm);
+ return ret;
+ }
+
+ return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvd0_display.c b/drivers/gpu/drm/nouveau/nvd0_display.c
new file mode 100644
index 00000000000..23d63b4b3d7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvd0_display.c
@@ -0,0 +1,1473 @@
+/*
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <linux/dma-mapping.h>
+
+#include "drmP.h"
+#include "drm_crtc_helper.h"
+
+#include "nouveau_drv.h"
+#include "nouveau_connector.h"
+#include "nouveau_encoder.h"
+#include "nouveau_crtc.h"
+#include "nouveau_dma.h"
+#include "nouveau_fb.h"
+#include "nv50_display.h"
+
+struct nvd0_display {
+ struct nouveau_gpuobj *mem;
+ struct {
+ dma_addr_t handle;
+ u32 *ptr;
+ } evo[1];
+
+ struct tasklet_struct tasklet;
+ u32 modeset;
+};
+
+static struct nvd0_display *
+nvd0_display(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ return dev_priv->engine.display.priv;
+}
+
+static inline int
+evo_icmd(struct drm_device *dev, int id, u32 mthd, u32 data)
+{
+ int ret = 0;
+ nv_mask(dev, 0x610700 + (id * 0x10), 0x00000001, 0x00000001);
+ nv_wr32(dev, 0x610704 + (id * 0x10), data);
+ nv_mask(dev, 0x610704 + (id * 0x10), 0x80000ffc, 0x80000000 | mthd);
+ if (!nv_wait(dev, 0x610704 + (id * 0x10), 0x80000000, 0x00000000))
+ ret = -EBUSY;
+ nv_mask(dev, 0x610700 + (id * 0x10), 0x00000001, 0x00000000);
+ return ret;
+}
+
+static u32 *
+evo_wait(struct drm_device *dev, int id, int nr)
+{
+ struct nvd0_display *disp = nvd0_display(dev);
+ u32 put = nv_rd32(dev, 0x640000 + (id * 0x1000)) / 4;
+
+ if (put + nr >= (PAGE_SIZE / 4)) {
+ disp->evo[id].ptr[put] = 0x20000000;
+
+ nv_wr32(dev, 0x640000 + (id * 0x1000), 0x00000000);
+ if (!nv_wait(dev, 0x640004 + (id * 0x1000), ~0, 0x00000000)) {
+ NV_ERROR(dev, "evo %d dma stalled\n", id);
+ return NULL;
+ }
+
+ put = 0;
+ }
+
+ return disp->evo[id].ptr + put;
+}
+
+static void
+evo_kick(u32 *push, struct drm_device *dev, int id)
+{
+ struct nvd0_display *disp = nvd0_display(dev);
+ nv_wr32(dev, 0x640000 + (id * 0x1000), (push - disp->evo[id].ptr) << 2);
+}
+
+#define evo_mthd(p,m,s) *((p)++) = (((s) << 18) | (m))
+#define evo_data(p,d) *((p)++) = (d)
+
+static struct drm_crtc *
+nvd0_display_crtc_get(struct drm_encoder *encoder)
+{
+ return nouveau_encoder(encoder)->crtc;
+}
+
+/******************************************************************************
+ * CRTC
+ *****************************************************************************/
+static int
+nvd0_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool on, bool update)
+{
+ struct drm_device *dev = nv_crtc->base.dev;
+ u32 *push, mode;
+
+ mode = 0x00000000;
+ if (on) {
+ /* 0x11: 6bpc dynamic 2x2
+ * 0x13: 8bpc dynamic 2x2
+ * 0x19: 6bpc static 2x2
+ * 0x1b: 8bpc static 2x2
+ * 0x21: 6bpc temporal
+ * 0x23: 8bpc temporal
+ */
+ mode = 0x00000011;
+ }
+
+ push = evo_wait(dev, 0, 4);
+ if (push) {
+ evo_mthd(push, 0x0490 + (nv_crtc->index * 0x300), 1);
+ evo_data(push, mode);
+ if (update) {
+ evo_mthd(push, 0x0080, 1);
+ evo_data(push, 0x00000000);
+ }
+ evo_kick(push, dev, 0);
+ }
+
+ return 0;
+}
+
+static int
+nvd0_crtc_set_scale(struct nouveau_crtc *nv_crtc, int type, bool update)
+{
+ struct drm_display_mode *mode = &nv_crtc->base.mode;
+ struct drm_device *dev = nv_crtc->base.dev;
+ struct nouveau_connector *nv_connector;
+ u32 *push, outX, outY;
+
+ outX = mode->hdisplay;
+ outY = mode->vdisplay;
+
+ nv_connector = nouveau_crtc_connector_get(nv_crtc);
+ if (nv_connector && nv_connector->native_mode) {
+ struct drm_display_mode *native = nv_connector->native_mode;
+ u32 xratio = (native->hdisplay << 19) / mode->hdisplay;
+ u32 yratio = (native->vdisplay << 19) / mode->vdisplay;
+
+ switch (type) {
+ case DRM_MODE_SCALE_ASPECT:
+ if (xratio > yratio) {
+ outX = (mode->hdisplay * yratio) >> 19;
+ outY = (mode->vdisplay * yratio) >> 19;
+ } else {
+ outX = (mode->hdisplay * xratio) >> 19;
+ outY = (mode->vdisplay * xratio) >> 19;
+ }
+ break;
+ case DRM_MODE_SCALE_FULLSCREEN:
+ outX = native->hdisplay;
+ outY = native->vdisplay;
+ break;
+ default:
+ break;
+ }
+ }
+
+ push = evo_wait(dev, 0, 16);
+ if (push) {
+ evo_mthd(push, 0x04c0 + (nv_crtc->index * 0x300), 3);
+ evo_data(push, (outY << 16) | outX);
+ evo_data(push, (outY << 16) | outX);
+ evo_data(push, (outY << 16) | outX);
+ evo_mthd(push, 0x0494 + (nv_crtc->index * 0x300), 1);
+ evo_data(push, 0x00000000);
+ evo_mthd(push, 0x04b8 + (nv_crtc->index * 0x300), 1);
+ evo_data(push, (mode->vdisplay << 16) | mode->hdisplay);
+ if (update) {
+ evo_mthd(push, 0x0080, 1);
+ evo_data(push, 0x00000000);
+ }
+ evo_kick(push, dev, 0);
+ }
+
+ return 0;
+}
+
+static int
+nvd0_crtc_set_image(struct nouveau_crtc *nv_crtc, struct drm_framebuffer *fb,
+ int x, int y, bool update)
+{
+ struct nouveau_framebuffer *nvfb = nouveau_framebuffer(fb);
+ u32 *push;
+
+ push = evo_wait(fb->dev, 0, 16);
+ if (push) {
+ evo_mthd(push, 0x0460 + (nv_crtc->index * 0x300), 1);
+ evo_data(push, nvfb->nvbo->bo.offset >> 8);
+ evo_mthd(push, 0x0468 + (nv_crtc->index * 0x300), 4);
+ evo_data(push, (fb->height << 16) | fb->width);
+ evo_data(push, nvfb->r_pitch);
+ evo_data(push, nvfb->r_format);
+ evo_data(push, nvfb->r_dma);
+ evo_mthd(push, 0x04b0 + (nv_crtc->index * 0x300), 1);
+ evo_data(push, (y << 16) | x);
+ if (update) {
+ evo_mthd(push, 0x0080, 1);
+ evo_data(push, 0x00000000);
+ }
+ evo_kick(push, fb->dev, 0);
+ }
+
+ nv_crtc->fb.tile_flags = nvfb->r_dma;
+ return 0;
+}
+
+static void
+nvd0_crtc_cursor_show(struct nouveau_crtc *nv_crtc, bool show, bool update)
+{
+ struct drm_device *dev = nv_crtc->base.dev;
+ u32 *push = evo_wait(dev, 0, 16);
+ if (push) {
+ if (show) {
+ evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 2);
+ evo_data(push, 0x85000000);
+ evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8);
+ evo_mthd(push, 0x048c + (nv_crtc->index * 0x300), 1);
+ evo_data(push, NvEvoVRAM);
+ } else {
+ evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 1);
+ evo_data(push, 0x05000000);
+ evo_mthd(push, 0x048c + (nv_crtc->index * 0x300), 1);
+ evo_data(push, 0x00000000);
+ }
+
+ if (update) {
+ evo_mthd(push, 0x0080, 1);
+ evo_data(push, 0x00000000);
+ }
+
+ evo_kick(push, dev, 0);
+ }
+}
+
+static void
+nvd0_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+}
+
+static void
+nvd0_crtc_prepare(struct drm_crtc *crtc)
+{
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ u32 *push;
+
+ push = evo_wait(crtc->dev, 0, 2);
+ if (push) {
+ evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1);
+ evo_data(push, 0x00000000);
+ evo_mthd(push, 0x0440 + (nv_crtc->index * 0x300), 1);
+ evo_data(push, 0x03000000);
+ evo_mthd(push, 0x045c + (nv_crtc->index * 0x300), 1);
+ evo_data(push, 0x00000000);
+ evo_kick(push, crtc->dev, 0);
+ }
+
+ nvd0_crtc_cursor_show(nv_crtc, false, false);
+}
+
+static void
+nvd0_crtc_commit(struct drm_crtc *crtc)
+{
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ u32 *push;
+
+ push = evo_wait(crtc->dev, 0, 32);
+ if (push) {
+ evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1);
+ evo_data(push, nv_crtc->fb.tile_flags);
+ evo_mthd(push, 0x0440 + (nv_crtc->index * 0x300), 4);
+ evo_data(push, 0x83000000);
+ evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8);
+ evo_data(push, 0x00000000);
+ evo_data(push, 0x00000000);
+ evo_mthd(push, 0x045c + (nv_crtc->index * 0x300), 1);
+ evo_data(push, NvEvoVRAM);
+ evo_mthd(push, 0x0430 + (nv_crtc->index * 0x300), 1);
+ evo_data(push, 0xffffff00);
+ evo_kick(push, crtc->dev, 0);
+ }
+
+ nvd0_crtc_cursor_show(nv_crtc, nv_crtc->cursor.visible, true);
+}
+
+static bool
+nvd0_crtc_mode_fixup(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static int
+nvd0_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb)
+{
+ struct nouveau_framebuffer *nvfb = nouveau_framebuffer(crtc->fb);
+ int ret;
+
+ ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM);
+ if (ret)
+ return ret;
+
+ if (old_fb) {
+ nvfb = nouveau_framebuffer(old_fb);
+ nouveau_bo_unpin(nvfb->nvbo);
+ }
+
+ return 0;
+}
+
+static int
+nvd0_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode,
+ struct drm_display_mode *mode, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ struct nouveau_connector *nv_connector;
+ u32 htotal = mode->htotal;
+ u32 vtotal = mode->vtotal;
+ u32 hsyncw = mode->hsync_end - mode->hsync_start - 1;
+ u32 vsyncw = mode->vsync_end - mode->vsync_start - 1;
+ u32 hfrntp = mode->hsync_start - mode->hdisplay;
+ u32 vfrntp = mode->vsync_start - mode->vdisplay;
+ u32 hbackp = mode->htotal - mode->hsync_end;
+ u32 vbackp = mode->vtotal - mode->vsync_end;
+ u32 hss2be = hsyncw + hbackp;
+ u32 vss2be = vsyncw + vbackp;
+ u32 hss2de = htotal - hfrntp;
+ u32 vss2de = vtotal - vfrntp;
+ u32 syncs, *push;
+ int ret;
+
+ syncs = 0x00000001;
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ syncs |= 0x00000008;
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ syncs |= 0x00000010;
+
+ ret = nvd0_crtc_swap_fbs(crtc, old_fb);
+ if (ret)
+ return ret;
+
+ push = evo_wait(crtc->dev, 0, 64);
+ if (push) {
+ evo_mthd(push, 0x0410 + (nv_crtc->index * 0x300), 5);
+ evo_data(push, 0x00000000);
+ evo_data(push, (vtotal << 16) | htotal);
+ evo_data(push, (vsyncw << 16) | hsyncw);
+ evo_data(push, (vss2be << 16) | hss2be);
+ evo_data(push, (vss2de << 16) | hss2de);
+ evo_mthd(push, 0x042c + (nv_crtc->index * 0x300), 1);
+ evo_data(push, 0x00000000); /* ??? */
+ evo_mthd(push, 0x0450 + (nv_crtc->index * 0x300), 3);
+ evo_data(push, mode->clock * 1000);
+ evo_data(push, 0x00200000); /* ??? */
+ evo_data(push, mode->clock * 1000);
+ evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 1);
+ evo_data(push, syncs);
+ evo_kick(push, crtc->dev, 0);
+ }
+
+ nv_connector = nouveau_crtc_connector_get(nv_crtc);
+ nvd0_crtc_set_dither(nv_crtc, nv_connector->use_dithering, false);
+ nvd0_crtc_set_scale(nv_crtc, nv_connector->scaling_mode, false);
+ nvd0_crtc_set_image(nv_crtc, crtc->fb, x, y, false);
+ return 0;
+}
+
+static int
+nvd0_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ int ret;
+
+ if (!crtc->fb) {
+ NV_DEBUG_KMS(crtc->dev, "No FB bound\n");
+ return 0;
+ }
+
+ ret = nvd0_crtc_swap_fbs(crtc, old_fb);
+ if (ret)
+ return ret;
+
+ nvd0_crtc_set_image(nv_crtc, crtc->fb, x, y, true);
+ return 0;
+}
+
+static int
+nvd0_crtc_mode_set_base_atomic(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb, int x, int y,
+ enum mode_set_atomic state)
+{
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ nvd0_crtc_set_image(nv_crtc, fb, x, y, true);
+ return 0;
+}
+
+static void
+nvd0_crtc_lut_load(struct drm_crtc *crtc)
+{
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ void __iomem *lut = nvbo_kmap_obj_iovirtual(nv_crtc->lut.nvbo);
+ int i;
+
+ for (i = 0; i < 256; i++) {
+ writew(0x6000 + (nv_crtc->lut.r[i] >> 2), lut + (i * 0x20) + 0);
+ writew(0x6000 + (nv_crtc->lut.g[i] >> 2), lut + (i * 0x20) + 2);
+ writew(0x6000 + (nv_crtc->lut.b[i] >> 2), lut + (i * 0x20) + 4);
+ }
+}
+
+static int
+nvd0_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
+ uint32_t handle, uint32_t width, uint32_t height)
+{
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct drm_gem_object *gem;
+ struct nouveau_bo *nvbo;
+ bool visible = (handle != 0);
+ int i, ret = 0;
+
+ if (visible) {
+ if (width != 64 || height != 64)
+ return -EINVAL;
+
+ gem = drm_gem_object_lookup(dev, file_priv, handle);
+ if (unlikely(!gem))
+ return -ENOENT;
+ nvbo = nouveau_gem_object(gem);
+
+ ret = nouveau_bo_map(nvbo);
+ if (ret == 0) {
+ for (i = 0; i < 64 * 64; i++) {
+ u32 v = nouveau_bo_rd32(nvbo, i);
+ nouveau_bo_wr32(nv_crtc->cursor.nvbo, i, v);
+ }
+ nouveau_bo_unmap(nvbo);
+ }
+
+ drm_gem_object_unreference_unlocked(gem);
+ }
+
+ if (visible != nv_crtc->cursor.visible) {
+ nvd0_crtc_cursor_show(nv_crtc, visible, true);
+ nv_crtc->cursor.visible = visible;
+ }
+
+ return ret;
+}
+
+static int
+nvd0_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
+{
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ const u32 data = (y << 16) | x;
+
+ nv_wr32(crtc->dev, 0x64d084 + (nv_crtc->index * 0x1000), data);
+ nv_wr32(crtc->dev, 0x64d080 + (nv_crtc->index * 0x1000), 0x00000000);
+ return 0;
+}
+
+static void
+nvd0_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
+ uint32_t start, uint32_t size)
+{
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ u32 end = max(start + size, (u32)256);
+ u32 i;
+
+ for (i = start; i < end; i++) {
+ nv_crtc->lut.r[i] = r[i];
+ nv_crtc->lut.g[i] = g[i];
+ nv_crtc->lut.b[i] = b[i];
+ }
+
+ nvd0_crtc_lut_load(crtc);
+}
+
+static void
+nvd0_crtc_destroy(struct drm_crtc *crtc)
+{
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ nouveau_bo_unmap(nv_crtc->cursor.nvbo);
+ nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
+ nouveau_bo_unmap(nv_crtc->lut.nvbo);
+ nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
+ drm_crtc_cleanup(crtc);
+ kfree(crtc);
+}
+
+static const struct drm_crtc_helper_funcs nvd0_crtc_hfunc = {
+ .dpms = nvd0_crtc_dpms,
+ .prepare = nvd0_crtc_prepare,
+ .commit = nvd0_crtc_commit,
+ .mode_fixup = nvd0_crtc_mode_fixup,
+ .mode_set = nvd0_crtc_mode_set,
+ .mode_set_base = nvd0_crtc_mode_set_base,
+ .mode_set_base_atomic = nvd0_crtc_mode_set_base_atomic,
+ .load_lut = nvd0_crtc_lut_load,
+};
+
+static const struct drm_crtc_funcs nvd0_crtc_func = {
+ .cursor_set = nvd0_crtc_cursor_set,
+ .cursor_move = nvd0_crtc_cursor_move,
+ .gamma_set = nvd0_crtc_gamma_set,
+ .set_config = drm_crtc_helper_set_config,
+ .destroy = nvd0_crtc_destroy,
+};
+
+static void
+nvd0_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y)
+{
+}
+
+static void
+nvd0_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset)
+{
+}
+
+static int
+nvd0_crtc_create(struct drm_device *dev, int index)
+{
+ struct nouveau_crtc *nv_crtc;
+ struct drm_crtc *crtc;
+ int ret, i;
+
+ nv_crtc = kzalloc(sizeof(*nv_crtc), GFP_KERNEL);
+ if (!nv_crtc)
+ return -ENOMEM;
+
+ nv_crtc->index = index;
+ nv_crtc->set_dither = nvd0_crtc_set_dither;
+ nv_crtc->set_scale = nvd0_crtc_set_scale;
+ nv_crtc->cursor.set_offset = nvd0_cursor_set_offset;
+ nv_crtc->cursor.set_pos = nvd0_cursor_set_pos;
+ for (i = 0; i < 256; i++) {
+ nv_crtc->lut.r[i] = i << 8;
+ nv_crtc->lut.g[i] = i << 8;
+ nv_crtc->lut.b[i] = i << 8;
+ }
+
+ crtc = &nv_crtc->base;
+ drm_crtc_init(dev, crtc, &nvd0_crtc_func);
+ drm_crtc_helper_add(crtc, &nvd0_crtc_hfunc);
+ drm_mode_crtc_set_gamma_size(crtc, 256);
+
+ ret = nouveau_bo_new(dev, 64 * 64 * 4, 0x100, TTM_PL_FLAG_VRAM,
+ 0, 0x0000, &nv_crtc->cursor.nvbo);
+ if (!ret) {
+ ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
+ if (!ret)
+ ret = nouveau_bo_map(nv_crtc->cursor.nvbo);
+ if (ret)
+ nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
+ }
+
+ if (ret)
+ goto out;
+
+ ret = nouveau_bo_new(dev, 8192, 0x100, TTM_PL_FLAG_VRAM,
+ 0, 0x0000, &nv_crtc->lut.nvbo);
+ if (!ret) {
+ ret = nouveau_bo_pin(nv_crtc->lut.nvbo, TTM_PL_FLAG_VRAM);
+ if (!ret)
+ ret = nouveau_bo_map(nv_crtc->lut.nvbo);
+ if (ret)
+ nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
+ }
+
+ if (ret)
+ goto out;
+
+ nvd0_crtc_lut_load(crtc);
+
+out:
+ if (ret)
+ nvd0_crtc_destroy(crtc);
+ return ret;
+}
+
+/******************************************************************************
+ * DAC
+ *****************************************************************************/
+static void
+nvd0_dac_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct drm_device *dev = encoder->dev;
+ int or = nv_encoder->or;
+ u32 dpms_ctrl;
+
+ dpms_ctrl = 0x80000000;
+ if (mode == DRM_MODE_DPMS_STANDBY || mode == DRM_MODE_DPMS_OFF)
+ dpms_ctrl |= 0x00000001;
+ if (mode == DRM_MODE_DPMS_SUSPEND || mode == DRM_MODE_DPMS_OFF)
+ dpms_ctrl |= 0x00000004;
+
+ nv_wait(dev, 0x61a004 + (or * 0x0800), 0x80000000, 0x00000000);
+ nv_mask(dev, 0x61a004 + (or * 0x0800), 0xc000007f, dpms_ctrl);
+ nv_wait(dev, 0x61a004 + (or * 0x0800), 0x80000000, 0x00000000);
+}
+
+static bool
+nvd0_dac_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nouveau_connector *nv_connector;
+
+ nv_connector = nouveau_encoder_connector_get(nv_encoder);
+ if (nv_connector && nv_connector->native_mode) {
+ if (nv_connector->scaling_mode != DRM_MODE_SCALE_NONE) {
+ int id = adjusted_mode->base.id;
+ *adjusted_mode = *nv_connector->native_mode;
+ adjusted_mode->base.id = id;
+ }
+ }
+
+ return true;
+}
+
+static void
+nvd0_dac_prepare(struct drm_encoder *encoder)
+{
+}
+
+static void
+nvd0_dac_commit(struct drm_encoder *encoder)
+{
+}
+
+static void
+nvd0_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
+ u32 *push;
+
+ nvd0_dac_dpms(encoder, DRM_MODE_DPMS_ON);
+
+ push = evo_wait(encoder->dev, 0, 4);
+ if (push) {
+ evo_mthd(push, 0x0180 + (nv_encoder->or * 0x20), 2);
+ evo_data(push, 1 << nv_crtc->index);
+ evo_data(push, 0x00ff);
+ evo_kick(push, encoder->dev, 0);
+ }
+
+ nv_encoder->crtc = encoder->crtc;
+}
+
+static void
+nvd0_dac_disconnect(struct drm_encoder *encoder)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct drm_device *dev = encoder->dev;
+ u32 *push;
+
+ if (nv_encoder->crtc) {
+ nvd0_crtc_prepare(nv_encoder->crtc);
+
+ push = evo_wait(dev, 0, 4);
+ if (push) {
+ evo_mthd(push, 0x0180 + (nv_encoder->or * 0x20), 1);
+ evo_data(push, 0x00000000);
+ evo_mthd(push, 0x0080, 1);
+ evo_data(push, 0x00000000);
+ evo_kick(push, dev, 0);
+ }
+
+ nv_encoder->crtc = NULL;
+ }
+}
+
+static enum drm_connector_status
+nvd0_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
+{
+ enum drm_connector_status status = connector_status_disconnected;
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct drm_device *dev = encoder->dev;
+ int or = nv_encoder->or;
+ u32 load;
+
+ nv_wr32(dev, 0x61a00c + (or * 0x800), 0x00100000);
+ udelay(9500);
+ nv_wr32(dev, 0x61a00c + (or * 0x800), 0x80000000);
+
+ load = nv_rd32(dev, 0x61a00c + (or * 0x800));
+ if ((load & 0x38000000) == 0x38000000)
+ status = connector_status_connected;
+
+ nv_wr32(dev, 0x61a00c + (or * 0x800), 0x00000000);
+ return status;
+}
+
+static void
+nvd0_dac_destroy(struct drm_encoder *encoder)
+{
+ drm_encoder_cleanup(encoder);
+ kfree(encoder);
+}
+
+static const struct drm_encoder_helper_funcs nvd0_dac_hfunc = {
+ .dpms = nvd0_dac_dpms,
+ .mode_fixup = nvd0_dac_mode_fixup,
+ .prepare = nvd0_dac_prepare,
+ .commit = nvd0_dac_commit,
+ .mode_set = nvd0_dac_mode_set,
+ .disable = nvd0_dac_disconnect,
+ .get_crtc = nvd0_display_crtc_get,
+ .detect = nvd0_dac_detect
+};
+
+static const struct drm_encoder_funcs nvd0_dac_func = {
+ .destroy = nvd0_dac_destroy,
+};
+
+static int
+nvd0_dac_create(struct drm_connector *connector, struct dcb_entry *dcbe)
+{
+ struct drm_device *dev = connector->dev;
+ struct nouveau_encoder *nv_encoder;
+ struct drm_encoder *encoder;
+
+ nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
+ if (!nv_encoder)
+ return -ENOMEM;
+ nv_encoder->dcb = dcbe;
+ nv_encoder->or = ffs(dcbe->or) - 1;
+
+ encoder = to_drm_encoder(nv_encoder);
+ encoder->possible_crtcs = dcbe->heads;
+ encoder->possible_clones = 0;
+ drm_encoder_init(dev, encoder, &nvd0_dac_func, DRM_MODE_ENCODER_DAC);
+ drm_encoder_helper_add(encoder, &nvd0_dac_hfunc);
+
+ drm_mode_connector_attach_encoder(connector, encoder);
+ return 0;
+}
+
+/******************************************************************************
+ * SOR
+ *****************************************************************************/
+static void
+nvd0_sor_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct drm_device *dev = encoder->dev;
+ struct drm_encoder *partner;
+ int or = nv_encoder->or;
+ u32 dpms_ctrl;
+
+ nv_encoder->last_dpms = mode;
+
+ list_for_each_entry(partner, &dev->mode_config.encoder_list, head) {
+ struct nouveau_encoder *nv_partner = nouveau_encoder(partner);
+
+ if (partner->encoder_type != DRM_MODE_ENCODER_TMDS)
+ continue;
+
+ if (nv_partner != nv_encoder &&
+ nv_partner->dcb->or == nv_encoder->or) {
+ if (nv_partner->last_dpms == DRM_MODE_DPMS_ON)
+ return;
+ break;
+ }
+ }
+
+ dpms_ctrl = (mode == DRM_MODE_DPMS_ON);
+ dpms_ctrl |= 0x80000000;
+
+ nv_wait(dev, 0x61c004 + (or * 0x0800), 0x80000000, 0x00000000);
+ nv_mask(dev, 0x61c004 + (or * 0x0800), 0x80000001, dpms_ctrl);
+ nv_wait(dev, 0x61c004 + (or * 0x0800), 0x80000000, 0x00000000);
+ nv_wait(dev, 0x61c030 + (or * 0x0800), 0x10000000, 0x00000000);
+}
+
+static bool
+nvd0_sor_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nouveau_connector *nv_connector;
+
+ nv_connector = nouveau_encoder_connector_get(nv_encoder);
+ if (nv_connector && nv_connector->native_mode) {
+ if (nv_connector->scaling_mode != DRM_MODE_SCALE_NONE) {
+ int id = adjusted_mode->base.id;
+ *adjusted_mode = *nv_connector->native_mode;
+ adjusted_mode->base.id = id;
+ }
+ }
+
+ return true;
+}
+
+static void
+nvd0_sor_prepare(struct drm_encoder *encoder)
+{
+}
+
+static void
+nvd0_sor_commit(struct drm_encoder *encoder)
+{
+}
+
+static void
+nvd0_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
+ struct drm_display_mode *mode)
+{
+ struct drm_nouveau_private *dev_priv = encoder->dev->dev_private;
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
+ struct nouveau_connector *nv_connector;
+ struct nvbios *bios = &dev_priv->vbios;
+ u32 mode_ctrl = (1 << nv_crtc->index);
+ u32 *push, or_config;
+
+ nv_connector = nouveau_encoder_connector_get(nv_encoder);
+ switch (nv_encoder->dcb->type) {
+ case OUTPUT_TMDS:
+ if (nv_encoder->dcb->sorconf.link & 1) {
+ if (mode->clock < 165000)
+ mode_ctrl |= 0x00000100;
+ else
+ mode_ctrl |= 0x00000500;
+ } else {
+ mode_ctrl |= 0x00000200;
+ }
+
+ or_config = (mode_ctrl & 0x00000f00) >> 8;
+ if (mode->clock >= 165000)
+ or_config |= 0x0100;
+ break;
+ case OUTPUT_LVDS:
+ or_config = (mode_ctrl & 0x00000f00) >> 8;
+ if (bios->fp_no_ddc) {
+ if (bios->fp.dual_link)
+ or_config |= 0x0100;
+ if (bios->fp.if_is_24bit)
+ or_config |= 0x0200;
+ } else {
+ if (nv_connector->dcb->type == DCB_CONNECTOR_LVDS_SPWG) {
+ if (((u8 *)nv_connector->edid)[121] == 2)
+ or_config |= 0x0100;
+ } else
+ if (mode->clock >= bios->fp.duallink_transition_clk) {
+ or_config |= 0x0100;
+ }
+
+ if (or_config & 0x0100) {
+ if (bios->fp.strapless_is_24bit & 2)
+ or_config |= 0x0200;
+ } else {
+ if (bios->fp.strapless_is_24bit & 1)
+ or_config |= 0x0200;
+ }
+
+ if (nv_connector->base.display_info.bpc == 8)
+ or_config |= 0x0200;
+
+ }
+ break;
+ default:
+ BUG_ON(1);
+ break;
+ }
+
+ nvd0_sor_dpms(encoder, DRM_MODE_DPMS_ON);
+
+ push = evo_wait(encoder->dev, 0, 4);
+ if (push) {
+ evo_mthd(push, 0x0200 + (nv_encoder->or * 0x20), 2);
+ evo_data(push, mode_ctrl);
+ evo_data(push, or_config);
+ evo_kick(push, encoder->dev, 0);
+ }
+
+ nv_encoder->crtc = encoder->crtc;
+}
+
+static void
+nvd0_sor_disconnect(struct drm_encoder *encoder)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct drm_device *dev = encoder->dev;
+ u32 *push;
+
+ if (nv_encoder->crtc) {
+ nvd0_crtc_prepare(nv_encoder->crtc);
+
+ push = evo_wait(dev, 0, 4);
+ if (push) {
+ evo_mthd(push, 0x0200 + (nv_encoder->or * 0x20), 1);
+ evo_data(push, 0x00000000);
+ evo_mthd(push, 0x0080, 1);
+ evo_data(push, 0x00000000);
+ evo_kick(push, dev, 0);
+ }
+
+ nv_encoder->crtc = NULL;
+ nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
+ }
+}
+
+static void
+nvd0_sor_destroy(struct drm_encoder *encoder)
+{
+ drm_encoder_cleanup(encoder);
+ kfree(encoder);
+}
+
+static const struct drm_encoder_helper_funcs nvd0_sor_hfunc = {
+ .dpms = nvd0_sor_dpms,
+ .mode_fixup = nvd0_sor_mode_fixup,
+ .prepare = nvd0_sor_prepare,
+ .commit = nvd0_sor_commit,
+ .mode_set = nvd0_sor_mode_set,
+ .disable = nvd0_sor_disconnect,
+ .get_crtc = nvd0_display_crtc_get,
+};
+
+static const struct drm_encoder_funcs nvd0_sor_func = {
+ .destroy = nvd0_sor_destroy,
+};
+
+static int
+nvd0_sor_create(struct drm_connector *connector, struct dcb_entry *dcbe)
+{
+ struct drm_device *dev = connector->dev;
+ struct nouveau_encoder *nv_encoder;
+ struct drm_encoder *encoder;
+
+ nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
+ if (!nv_encoder)
+ return -ENOMEM;
+ nv_encoder->dcb = dcbe;
+ nv_encoder->or = ffs(dcbe->or) - 1;
+ nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
+
+ encoder = to_drm_encoder(nv_encoder);
+ encoder->possible_crtcs = dcbe->heads;
+ encoder->possible_clones = 0;
+ drm_encoder_init(dev, encoder, &nvd0_sor_func, DRM_MODE_ENCODER_TMDS);
+ drm_encoder_helper_add(encoder, &nvd0_sor_hfunc);
+
+ drm_mode_connector_attach_encoder(connector, encoder);
+ return 0;
+}
+
+/******************************************************************************
+ * IRQ
+ *****************************************************************************/
+static struct dcb_entry *
+lookup_dcb(struct drm_device *dev, int id, u32 mc)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int type, or, i;
+
+ if (id < 4) {
+ type = OUTPUT_ANALOG;
+ or = id;
+ } else {
+ switch (mc & 0x00000f00) {
+ case 0x00000000: type = OUTPUT_LVDS; break;
+ case 0x00000100: type = OUTPUT_TMDS; break;
+ case 0x00000200: type = OUTPUT_TMDS; break;
+ case 0x00000500: type = OUTPUT_TMDS; break;
+ default:
+ NV_ERROR(dev, "PDISP: unknown SOR mc 0x%08x\n", mc);
+ return NULL;
+ }
+
+ or = id - 4;
+ }
+
+ for (i = 0; i < dev_priv->vbios.dcb.entries; i++) {
+ struct dcb_entry *dcb = &dev_priv->vbios.dcb.entry[i];
+ if (dcb->type == type && (dcb->or & (1 << or)))
+ return dcb;
+ }
+
+ NV_ERROR(dev, "PDISP: DCB for %d/0x%08x not found\n", id, mc);
+ return NULL;
+}
+
+static void
+nvd0_display_unk1_handler(struct drm_device *dev, u32 crtc, u32 mask)
+{
+ struct dcb_entry *dcb;
+ int i;
+
+ for (i = 0; mask && i < 8; i++) {
+ u32 mcc = nv_rd32(dev, 0x640180 + (i * 0x20));
+ if (!(mcc & (1 << crtc)))
+ continue;
+
+ dcb = lookup_dcb(dev, i, mcc);
+ if (!dcb)
+ continue;
+
+ nouveau_bios_run_display_table(dev, 0x0000, -1, dcb, crtc);
+ }
+
+ nv_wr32(dev, 0x6101d4, 0x00000000);
+ nv_wr32(dev, 0x6109d4, 0x00000000);
+ nv_wr32(dev, 0x6101d0, 0x80000000);
+}
+
+static void
+nvd0_display_unk2_handler(struct drm_device *dev, u32 crtc, u32 mask)
+{
+ struct dcb_entry *dcb;
+ u32 or, tmp, pclk;
+ int i;
+
+ for (i = 0; mask && i < 8; i++) {
+ u32 mcc = nv_rd32(dev, 0x640180 + (i * 0x20));
+ if (!(mcc & (1 << crtc)))
+ continue;
+
+ dcb = lookup_dcb(dev, i, mcc);
+ if (!dcb)
+ continue;
+
+ nouveau_bios_run_display_table(dev, 0x0000, -2, dcb, crtc);
+ }
+
+ pclk = nv_rd32(dev, 0x660450 + (crtc * 0x300)) / 1000;
+ if (mask & 0x00010000) {
+ nv50_crtc_set_clock(dev, crtc, pclk);
+ }
+
+ for (i = 0; mask && i < 8; i++) {
+ u32 mcp = nv_rd32(dev, 0x660180 + (i * 0x20));
+ u32 cfg = nv_rd32(dev, 0x660184 + (i * 0x20));
+ if (!(mcp & (1 << crtc)))
+ continue;
+
+ dcb = lookup_dcb(dev, i, mcp);
+ if (!dcb)
+ continue;
+ or = ffs(dcb->or) - 1;
+
+ nouveau_bios_run_display_table(dev, cfg, pclk, dcb, crtc);
+
+ nv_wr32(dev, 0x612200 + (crtc * 0x800), 0x00000000);
+ switch (dcb->type) {
+ case OUTPUT_ANALOG:
+ nv_wr32(dev, 0x612280 + (or * 0x800), 0x00000000);
+ break;
+ case OUTPUT_TMDS:
+ case OUTPUT_LVDS:
+ if (cfg & 0x00000100)
+ tmp = 0x00000101;
+ else
+ tmp = 0x00000000;
+
+ nv_mask(dev, 0x612300 + (or * 0x800), 0x00000707, tmp);
+ break;
+ default:
+ break;
+ }
+
+ break;
+ }
+
+ nv_wr32(dev, 0x6101d4, 0x00000000);
+ nv_wr32(dev, 0x6109d4, 0x00000000);
+ nv_wr32(dev, 0x6101d0, 0x80000000);
+}
+
+static void
+nvd0_display_unk4_handler(struct drm_device *dev, u32 crtc, u32 mask)
+{
+ struct dcb_entry *dcb;
+ int pclk, i;
+
+ pclk = nv_rd32(dev, 0x660450 + (crtc * 0x300)) / 1000;
+
+ for (i = 0; mask && i < 8; i++) {
+ u32 mcp = nv_rd32(dev, 0x660180 + (i * 0x20));
+ u32 cfg = nv_rd32(dev, 0x660184 + (i * 0x20));
+ if (!(mcp & (1 << crtc)))
+ continue;
+
+ dcb = lookup_dcb(dev, i, mcp);
+ if (!dcb)
+ continue;
+
+ nouveau_bios_run_display_table(dev, cfg, -pclk, dcb, crtc);
+ }
+
+ nv_wr32(dev, 0x6101d4, 0x00000000);
+ nv_wr32(dev, 0x6109d4, 0x00000000);
+ nv_wr32(dev, 0x6101d0, 0x80000000);
+}
+
+static void
+nvd0_display_bh(unsigned long data)
+{
+ struct drm_device *dev = (struct drm_device *)data;
+ struct nvd0_display *disp = nvd0_display(dev);
+ u32 mask, crtc;
+ int i;
+
+ if (drm_debug & (DRM_UT_DRIVER | DRM_UT_KMS)) {
+ NV_INFO(dev, "PDISP: modeset req %d\n", disp->modeset);
+ NV_INFO(dev, " STAT: 0x%08x 0x%08x 0x%08x\n",
+ nv_rd32(dev, 0x6101d0),
+ nv_rd32(dev, 0x6101d4), nv_rd32(dev, 0x6109d4));
+ for (i = 0; i < 8; i++) {
+ NV_INFO(dev, " %s%d: 0x%08x 0x%08x\n",
+ i < 4 ? "DAC" : "SOR", i,
+ nv_rd32(dev, 0x640180 + (i * 0x20)),
+ nv_rd32(dev, 0x660180 + (i * 0x20)));
+ }
+ }
+
+ mask = nv_rd32(dev, 0x6101d4);
+ crtc = 0;
+ if (!mask) {
+ mask = nv_rd32(dev, 0x6109d4);
+ crtc = 1;
+ }
+
+ if (disp->modeset & 0x00000001)
+ nvd0_display_unk1_handler(dev, crtc, mask);
+ if (disp->modeset & 0x00000002)
+ nvd0_display_unk2_handler(dev, crtc, mask);
+ if (disp->modeset & 0x00000004)
+ nvd0_display_unk4_handler(dev, crtc, mask);
+}
+
+static void
+nvd0_display_intr(struct drm_device *dev)
+{
+ struct nvd0_display *disp = nvd0_display(dev);
+ u32 intr = nv_rd32(dev, 0x610088);
+
+ if (intr & 0x00000002) {
+ u32 stat = nv_rd32(dev, 0x61009c);
+ int chid = ffs(stat) - 1;
+ if (chid >= 0) {
+ u32 mthd = nv_rd32(dev, 0x6101f0 + (chid * 12));
+ u32 data = nv_rd32(dev, 0x6101f4 + (chid * 12));
+ u32 unkn = nv_rd32(dev, 0x6101f8 + (chid * 12));
+
+ NV_INFO(dev, "EvoCh: chid %d mthd 0x%04x data 0x%08x "
+ "0x%08x 0x%08x\n",
+ chid, (mthd & 0x0000ffc), data, mthd, unkn);
+ nv_wr32(dev, 0x61009c, (1 << chid));
+ nv_wr32(dev, 0x6101f0 + (chid * 12), 0x90000000);
+ }
+
+ intr &= ~0x00000002;
+ }
+
+ if (intr & 0x00100000) {
+ u32 stat = nv_rd32(dev, 0x6100ac);
+
+ if (stat & 0x00000007) {
+ disp->modeset = stat;
+ tasklet_schedule(&disp->tasklet);
+
+ nv_wr32(dev, 0x6100ac, (stat & 0x00000007));
+ stat &= ~0x00000007;
+ }
+
+ if (stat) {
+ NV_INFO(dev, "PDISP: unknown intr24 0x%08x\n", stat);
+ nv_wr32(dev, 0x6100ac, stat);
+ }
+
+ intr &= ~0x00100000;
+ }
+
+ if (intr & 0x01000000) {
+ u32 stat = nv_rd32(dev, 0x6100bc);
+ nv_wr32(dev, 0x6100bc, stat);
+ intr &= ~0x01000000;
+ }
+
+ if (intr & 0x02000000) {
+ u32 stat = nv_rd32(dev, 0x6108bc);
+ nv_wr32(dev, 0x6108bc, stat);
+ intr &= ~0x02000000;
+ }
+
+ if (intr)
+ NV_INFO(dev, "PDISP: unknown intr 0x%08x\n", intr);
+}
+
+/******************************************************************************
+ * Init
+ *****************************************************************************/
+static void
+nvd0_display_fini(struct drm_device *dev)
+{
+ int i;
+
+ /* fini cursors */
+ for (i = 14; i >= 13; i--) {
+ if (!(nv_rd32(dev, 0x610490 + (i * 0x10)) & 0x00000001))
+ continue;
+
+ nv_mask(dev, 0x610490 + (i * 0x10), 0x00000001, 0x00000000);
+ nv_wait(dev, 0x610490 + (i * 0x10), 0x00010000, 0x00000000);
+ nv_mask(dev, 0x610090, 1 << i, 0x00000000);
+ nv_mask(dev, 0x6100a0, 1 << i, 0x00000000);
+ }
+
+ /* fini master */
+ if (nv_rd32(dev, 0x610490) & 0x00000010) {
+ nv_mask(dev, 0x610490, 0x00000010, 0x00000000);
+ nv_mask(dev, 0x610490, 0x00000003, 0x00000000);
+ nv_wait(dev, 0x610490, 0x80000000, 0x00000000);
+ nv_mask(dev, 0x610090, 0x00000001, 0x00000000);
+ nv_mask(dev, 0x6100a0, 0x00000001, 0x00000000);
+ }
+}
+
+int
+nvd0_display_init(struct drm_device *dev)
+{
+ struct nvd0_display *disp = nvd0_display(dev);
+ u32 *push;
+ int i;
+
+ if (nv_rd32(dev, 0x6100ac) & 0x00000100) {
+ nv_wr32(dev, 0x6100ac, 0x00000100);
+ nv_mask(dev, 0x6194e8, 0x00000001, 0x00000000);
+ if (!nv_wait(dev, 0x6194e8, 0x00000002, 0x00000000)) {
+ NV_ERROR(dev, "PDISP: 0x6194e8 0x%08x\n",
+ nv_rd32(dev, 0x6194e8));
+ return -EBUSY;
+ }
+ }
+
+ /* nfi what these are exactly, i do know that SOR_MODE_CTRL won't
+ * work at all unless you do the SOR part below.
+ */
+ for (i = 0; i < 3; i++) {
+ u32 dac = nv_rd32(dev, 0x61a000 + (i * 0x800));
+ nv_wr32(dev, 0x6101c0 + (i * 0x800), dac);
+ }
+
+ for (i = 0; i < 4; i++) {
+ u32 sor = nv_rd32(dev, 0x61c000 + (i * 0x800));
+ nv_wr32(dev, 0x6301c4 + (i * 0x800), sor);
+ }
+
+ for (i = 0; i < 2; i++) {
+ u32 crtc0 = nv_rd32(dev, 0x616104 + (i * 0x800));
+ u32 crtc1 = nv_rd32(dev, 0x616108 + (i * 0x800));
+ u32 crtc2 = nv_rd32(dev, 0x61610c + (i * 0x800));
+ nv_wr32(dev, 0x6101b4 + (i * 0x800), crtc0);
+ nv_wr32(dev, 0x6101b8 + (i * 0x800), crtc1);
+ nv_wr32(dev, 0x6101bc + (i * 0x800), crtc2);
+ }
+
+ /* point at our hash table / objects, enable interrupts */
+ nv_wr32(dev, 0x610010, (disp->mem->vinst >> 8) | 9);
+ nv_mask(dev, 0x6100b0, 0x00000307, 0x00000307);
+
+ /* init master */
+ nv_wr32(dev, 0x610494, (disp->evo[0].handle >> 8) | 3);
+ nv_wr32(dev, 0x610498, 0x00010000);
+ nv_wr32(dev, 0x61049c, 0x00000001);
+ nv_mask(dev, 0x610490, 0x00000010, 0x00000010);
+ nv_wr32(dev, 0x640000, 0x00000000);
+ nv_wr32(dev, 0x610490, 0x01000013);
+ if (!nv_wait(dev, 0x610490, 0x80000000, 0x00000000)) {
+ NV_ERROR(dev, "PDISP: master 0x%08x\n",
+ nv_rd32(dev, 0x610490));
+ return -EBUSY;
+ }
+ nv_mask(dev, 0x610090, 0x00000001, 0x00000001);
+ nv_mask(dev, 0x6100a0, 0x00000001, 0x00000001);
+
+ /* init cursors */
+ for (i = 13; i <= 14; i++) {
+ nv_wr32(dev, 0x610490 + (i * 0x10), 0x00000001);
+ if (!nv_wait(dev, 0x610490 + (i * 0x10), 0x00010000, 0x00010000)) {
+ NV_ERROR(dev, "PDISP: curs%d 0x%08x\n", i,
+ nv_rd32(dev, 0x610490 + (i * 0x10)));
+ return -EBUSY;
+ }
+
+ nv_mask(dev, 0x610090, 1 << i, 1 << i);
+ nv_mask(dev, 0x6100a0, 1 << i, 1 << i);
+ }
+
+ push = evo_wait(dev, 0, 32);
+ if (!push)
+ return -EBUSY;
+ evo_mthd(push, 0x0088, 1);
+ evo_data(push, NvEvoSync);
+ evo_mthd(push, 0x0084, 1);
+ evo_data(push, 0x00000000);
+ evo_mthd(push, 0x0084, 1);
+ evo_data(push, 0x80000000);
+ evo_mthd(push, 0x008c, 1);
+ evo_data(push, 0x00000000);
+ evo_kick(push, dev, 0);
+
+ return 0;
+}
+
+void
+nvd0_display_destroy(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvd0_display *disp = nvd0_display(dev);
+ struct pci_dev *pdev = dev->pdev;
+
+ nvd0_display_fini(dev);
+
+ pci_free_consistent(pdev, PAGE_SIZE, disp->evo[0].ptr, disp->evo[0].handle);
+ nouveau_gpuobj_ref(NULL, &disp->mem);
+ nouveau_irq_unregister(dev, 26);
+
+ dev_priv->engine.display.priv = NULL;
+ kfree(disp);
+}
+
+int
+nvd0_display_create(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
+ struct dcb_table *dcb = &dev_priv->vbios.dcb;
+ struct drm_connector *connector, *tmp;
+ struct pci_dev *pdev = dev->pdev;
+ struct nvd0_display *disp;
+ struct dcb_entry *dcbe;
+ int ret, i;
+
+ disp = kzalloc(sizeof(*disp), GFP_KERNEL);
+ if (!disp)
+ return -ENOMEM;
+ dev_priv->engine.display.priv = disp;
+
+ /* create crtc objects to represent the hw heads */
+ for (i = 0; i < 2; i++) {
+ ret = nvd0_crtc_create(dev, i);
+ if (ret)
+ goto out;
+ }
+
+ /* create encoder/connector objects based on VBIOS DCB table */
+ for (i = 0, dcbe = &dcb->entry[0]; i < dcb->entries; i++, dcbe++) {
+ connector = nouveau_connector_create(dev, dcbe->connector);
+ if (IS_ERR(connector))
+ continue;
+
+ if (dcbe->location != DCB_LOC_ON_CHIP) {
+ NV_WARN(dev, "skipping off-chip encoder %d/%d\n",
+ dcbe->type, ffs(dcbe->or) - 1);
+ continue;
+ }
+
+ switch (dcbe->type) {
+ case OUTPUT_TMDS:
+ case OUTPUT_LVDS:
+ nvd0_sor_create(connector, dcbe);
+ break;
+ case OUTPUT_ANALOG:
+ nvd0_dac_create(connector, dcbe);
+ break;
+ default:
+ NV_WARN(dev, "skipping unsupported encoder %d/%d\n",
+ dcbe->type, ffs(dcbe->or) - 1);
+ continue;
+ }
+ }
+
+ /* cull any connectors we created that don't have an encoder */
+ list_for_each_entry_safe(connector, tmp, &dev->mode_config.connector_list, head) {
+ if (connector->encoder_ids[0])
+ continue;
+
+ NV_WARN(dev, "%s has no encoders, removing\n",
+ drm_get_connector_name(connector));
+ connector->funcs->destroy(connector);
+ }
+
+ /* setup interrupt handling */
+ tasklet_init(&disp->tasklet, nvd0_display_bh, (unsigned long)dev);
+ nouveau_irq_register(dev, 26, nvd0_display_intr);
+
+ /* hash table and dma objects for the memory areas we care about */
+ ret = nouveau_gpuobj_new(dev, NULL, 0x4000, 0x10000,
+ NVOBJ_FLAG_ZERO_ALLOC, &disp->mem);
+ if (ret)
+ goto out;
+
+ nv_wo32(disp->mem, 0x1000, 0x00000049);
+ nv_wo32(disp->mem, 0x1004, (disp->mem->vinst + 0x2000) >> 8);
+ nv_wo32(disp->mem, 0x1008, (disp->mem->vinst + 0x2fff) >> 8);
+ nv_wo32(disp->mem, 0x100c, 0x00000000);
+ nv_wo32(disp->mem, 0x1010, 0x00000000);
+ nv_wo32(disp->mem, 0x1014, 0x00000000);
+ nv_wo32(disp->mem, 0x0000, NvEvoSync);
+ nv_wo32(disp->mem, 0x0004, (0x1000 << 9) | 0x00000001);
+
+ nv_wo32(disp->mem, 0x1020, 0x00000049);
+ nv_wo32(disp->mem, 0x1024, 0x00000000);
+ nv_wo32(disp->mem, 0x1028, (dev_priv->vram_size - 1) >> 8);
+ nv_wo32(disp->mem, 0x102c, 0x00000000);
+ nv_wo32(disp->mem, 0x1030, 0x00000000);
+ nv_wo32(disp->mem, 0x1034, 0x00000000);
+ nv_wo32(disp->mem, 0x0008, NvEvoVRAM);
+ nv_wo32(disp->mem, 0x000c, (0x1020 << 9) | 0x00000001);
+
+ nv_wo32(disp->mem, 0x1040, 0x00000009);
+ nv_wo32(disp->mem, 0x1044, 0x00000000);
+ nv_wo32(disp->mem, 0x1048, (dev_priv->vram_size - 1) >> 8);
+ nv_wo32(disp->mem, 0x104c, 0x00000000);
+ nv_wo32(disp->mem, 0x1050, 0x00000000);
+ nv_wo32(disp->mem, 0x1054, 0x00000000);
+ nv_wo32(disp->mem, 0x0010, NvEvoVRAM_LP);
+ nv_wo32(disp->mem, 0x0014, (0x1040 << 9) | 0x00000001);
+
+ nv_wo32(disp->mem, 0x1060, 0x0fe00009);
+ nv_wo32(disp->mem, 0x1064, 0x00000000);
+ nv_wo32(disp->mem, 0x1068, (dev_priv->vram_size - 1) >> 8);
+ nv_wo32(disp->mem, 0x106c, 0x00000000);
+ nv_wo32(disp->mem, 0x1070, 0x00000000);
+ nv_wo32(disp->mem, 0x1074, 0x00000000);
+ nv_wo32(disp->mem, 0x0018, NvEvoFB32);
+ nv_wo32(disp->mem, 0x001c, (0x1060 << 9) | 0x00000001);
+
+ pinstmem->flush(dev);
+
+ /* push buffers for evo channels */
+ disp->evo[0].ptr =
+ pci_alloc_consistent(pdev, PAGE_SIZE, &disp->evo[0].handle);
+ if (!disp->evo[0].ptr) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = nvd0_display_init(dev);
+ if (ret)
+ goto out;
+
+out:
+ if (ret)
+ nvd0_display_destroy(dev);
+ return ret;
+}
diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
index 570e190710b..bcac90b543a 100644
--- a/drivers/gpu/drm/r128/r128_cce.c
+++ b/drivers/gpu/drm/r128/r128_cce.c
@@ -32,6 +32,7 @@
#include <linux/firmware.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include "drmP.h"
#include "drm.h"
diff --git a/drivers/gpu/drm/r128/r128_drv.c b/drivers/gpu/drm/r128/r128_drv.c
index b9e8efd2b75..4c8796ba6dd 100644
--- a/drivers/gpu/drm/r128/r128_drv.c
+++ b/drivers/gpu/drm/r128/r128_drv.c
@@ -29,6 +29,8 @@
* Gareth Hughes <gareth@valinux.com>
*/
+#include <linux/module.h>
+
#include "drmP.h"
#include "drm.h"
#include "r128_drm.h"
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index 9f363e0c4b6..cf8b4bc3e73 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -70,7 +70,7 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \
r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \
evergreen.o evergreen_cs.o evergreen_blit_shaders.o evergreen_blit_kms.o \
- radeon_trace_points.o ni.o cayman_blit_shaders.o
+ radeon_trace_points.o ni.o cayman_blit_shaders.o atombios_encoders.o
radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index a515b2a09d8..87631fede1f 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -558,7 +558,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
bpc = connector->display_info.bpc;
encoder_mode = atombios_get_encoder_mode(encoder);
if ((radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) ||
- radeon_encoder_is_dp_bridge(encoder)) {
+ (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE)) {
if (connector) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct radeon_connector_atom_dig *dig_connector =
@@ -638,44 +638,29 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
if (ss_enabled && ss->percentage)
args.v3.sInput.ucDispPllConfig |=
DISPPLL_CONFIG_SS_ENABLE;
- if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT) ||
- radeon_encoder_is_dp_bridge(encoder)) {
+ if (ENCODER_MODE_IS_DP(encoder_mode)) {
+ args.v3.sInput.ucDispPllConfig |=
+ DISPPLL_CONFIG_COHERENT_MODE;
+ /* 16200 or 27000 */
+ args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10);
+ } else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- if (encoder_mode == ATOM_ENCODER_MODE_DP) {
+ if (encoder_mode == ATOM_ENCODER_MODE_HDMI)
+ /* deep color support */
+ args.v3.sInput.usPixelClock =
+ cpu_to_le16((mode->clock * bpc / 8) / 10);
+ if (dig->coherent_mode)
args.v3.sInput.ucDispPllConfig |=
DISPPLL_CONFIG_COHERENT_MODE;
- /* 16200 or 27000 */
- args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10);
- } else {
- if (encoder_mode == ATOM_ENCODER_MODE_HDMI) {
- /* deep color support */
- args.v3.sInput.usPixelClock =
- cpu_to_le16((mode->clock * bpc / 8) / 10);
- }
- if (dig->coherent_mode)
- args.v3.sInput.ucDispPllConfig |=
- DISPPLL_CONFIG_COHERENT_MODE;
- if (mode->clock > 165000)
- args.v3.sInput.ucDispPllConfig |=
- DISPPLL_CONFIG_DUAL_LINK;
- }
- } else if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
- if (encoder_mode == ATOM_ENCODER_MODE_DP) {
+ if (mode->clock > 165000)
args.v3.sInput.ucDispPllConfig |=
- DISPPLL_CONFIG_COHERENT_MODE;
- /* 16200 or 27000 */
- args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10);
- } else if (encoder_mode != ATOM_ENCODER_MODE_LVDS) {
- if (mode->clock > 165000)
- args.v3.sInput.ucDispPllConfig |=
- DISPPLL_CONFIG_DUAL_LINK;
- }
+ DISPPLL_CONFIG_DUAL_LINK;
}
- if (radeon_encoder_is_dp_bridge(encoder)) {
- struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder);
- struct radeon_encoder *ext_radeon_encoder = to_radeon_encoder(ext_encoder);
- args.v3.sInput.ucExtTransmitterID = ext_radeon_encoder->encoder_id;
- } else
+ if (radeon_encoder_get_dp_bridge_encoder_id(encoder) !=
+ ENCODER_OBJECT_ID_NONE)
+ args.v3.sInput.ucExtTransmitterID =
+ radeon_encoder_get_dp_bridge_encoder_id(encoder);
+ else
args.v3.sInput.ucExtTransmitterID = 0;
atom_execute_table(rdev->mode_info.atom_context,
@@ -945,6 +930,7 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
bpc = connector->display_info.bpc;
switch (encoder_mode) {
+ case ATOM_ENCODER_MODE_DP_MST:
case ATOM_ENCODER_MODE_DP:
/* DP/eDP */
dp_clock = dig_connector->dp_clock / 10;
@@ -1450,7 +1436,7 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
* PPLL/DCPLL programming and only program the DP DTO for the
* crtc virtual pixel clock.
*/
- if (atombios_get_encoder_mode(test_encoder) == ATOM_ENCODER_MODE_DP) {
+ if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_encoder))) {
if (ASIC_IS_DCE5(rdev) || rdev->clock.dp_extclk)
return ATOM_PPLL_INVALID;
}
@@ -1536,12 +1522,6 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
- struct drm_device *dev = crtc->dev;
- struct radeon_device *rdev = dev->dev_private;
-
- /* adjust pm to upcoming mode change */
- radeon_pm_compute_clocks(rdev);
-
if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
return false;
return true;
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 79e8ebc0530..6fb335a4fdd 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -283,7 +283,7 @@ int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
}
}
- DRM_ERROR("aux i2c too many retries, giving up\n");
+ DRM_DEBUG_KMS("aux i2c too many retries, giving up\n");
return -EREMOTEIO;
}
@@ -482,7 +482,8 @@ static int radeon_dp_get_dp_link_clock(struct drm_connector *connector,
int bpp = convert_bpc_to_bpp(connector->display_info.bpc);
int lane_num, max_pix_clock;
- if (radeon_connector_encoder_is_dp_bridge(connector))
+ if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) ==
+ ENCODER_OBJECT_ID_NUTMEG)
return 270000;
lane_num = radeon_dp_get_dp_lane_number(connector, dpcd, pix_clock);
@@ -553,17 +554,32 @@ static void radeon_dp_set_panel_mode(struct drm_encoder *encoder,
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
int panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
if (!ASIC_IS_DCE4(rdev))
return;
- if (radeon_connector_encoder_is_dp_bridge(connector))
+ if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) ==
+ ENCODER_OBJECT_ID_NUTMEG)
panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE;
+ else if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) ==
+ ENCODER_OBJECT_ID_TRAVIS)
+ panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
+ else if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
+ u8 tmp = radeon_read_dpcd_reg(radeon_connector, DP_EDP_CONFIGURATION_CAP);
+ if (tmp & 1)
+ panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
+ }
atombios_dig_encoder_setup(encoder,
ATOM_ENCODER_CMD_SETUP_PANEL_MODE,
panel_mode);
+
+ if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) &&
+ (panel_mode == DP_PANEL_MODE_INTERNAL_DP2_MODE)) {
+ radeon_write_dpcd_reg(radeon_connector, DP_EDP_CONFIGURATION_SET, 1);
+ }
}
void radeon_dp_set_link_config(struct drm_connector *connector,
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
new file mode 100644
index 00000000000..39c04c1b847
--- /dev/null
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -0,0 +1,2369 @@
+/*
+ * Copyright 2007-11 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ */
+#include "drmP.h"
+#include "drm_crtc_helper.h"
+#include "radeon_drm.h"
+#include "radeon.h"
+#include "atom.h"
+
+extern int atom_debug;
+
+/* evil but including atombios.h is much worse */
+bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index,
+ struct drm_display_mode *mode);
+
+
+static inline bool radeon_encoder_is_digital(struct drm_encoder *encoder)
+{
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+ case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+ case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+ case ENCODER_OBJECT_ID_INTERNAL_DVO1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+ case ENCODER_OBJECT_ID_INTERNAL_DDI:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static struct drm_connector *
+radeon_get_connector_for_encoder_init(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct drm_connector *connector;
+ struct radeon_connector *radeon_connector;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ radeon_connector = to_radeon_connector(connector);
+ if (radeon_encoder->devices & radeon_connector->devices)
+ return connector;
+ }
+ return NULL;
+}
+
+static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+
+ /* set the active encoder to connector routing */
+ radeon_encoder_set_active_device(encoder);
+ drm_mode_set_crtcinfo(adjusted_mode, 0);
+
+ /* hw bug */
+ if ((mode->flags & DRM_MODE_FLAG_INTERLACE)
+ && (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2)))
+ adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2;
+
+ /* get the native mode for LVDS */
+ if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT))
+ radeon_panel_mode_fixup(encoder, adjusted_mode);
+
+ /* get the native mode for TV */
+ if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) {
+ struct radeon_encoder_atom_dac *tv_dac = radeon_encoder->enc_priv;
+ if (tv_dac) {
+ if (tv_dac->tv_std == TV_STD_NTSC ||
+ tv_dac->tv_std == TV_STD_NTSC_J ||
+ tv_dac->tv_std == TV_STD_PAL_M)
+ radeon_atom_get_tv_timings(rdev, 0, adjusted_mode);
+ else
+ radeon_atom_get_tv_timings(rdev, 1, adjusted_mode);
+ }
+ }
+
+ if (ASIC_IS_DCE3(rdev) &&
+ ((radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
+ (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE))) {
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+ radeon_dp_set_link_config(connector, mode);
+ }
+
+ return true;
+}
+
+static void
+atombios_dac_setup(struct drm_encoder *encoder, int action)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ DAC_ENCODER_CONTROL_PS_ALLOCATION args;
+ int index = 0;
+ struct radeon_encoder_atom_dac *dac_info = radeon_encoder->enc_priv;
+
+ memset(&args, 0, sizeof(args));
+
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_DAC1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+ index = GetIndexIntoMasterTable(COMMAND, DAC1EncoderControl);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DAC2:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+ index = GetIndexIntoMasterTable(COMMAND, DAC2EncoderControl);
+ break;
+ }
+
+ args.ucAction = action;
+
+ if (radeon_encoder->active_device & (ATOM_DEVICE_CRT_SUPPORT))
+ args.ucDacStandard = ATOM_DAC1_PS2;
+ else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
+ args.ucDacStandard = ATOM_DAC1_CV;
+ else {
+ switch (dac_info->tv_std) {
+ case TV_STD_PAL:
+ case TV_STD_PAL_M:
+ case TV_STD_SCART_PAL:
+ case TV_STD_SECAM:
+ case TV_STD_PAL_CN:
+ args.ucDacStandard = ATOM_DAC1_PAL;
+ break;
+ case TV_STD_NTSC:
+ case TV_STD_NTSC_J:
+ case TV_STD_PAL_60:
+ default:
+ args.ucDacStandard = ATOM_DAC1_NTSC;
+ break;
+ }
+ }
+ args.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+}
+
+static void
+atombios_tv_setup(struct drm_encoder *encoder, int action)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ TV_ENCODER_CONTROL_PS_ALLOCATION args;
+ int index = 0;
+ struct radeon_encoder_atom_dac *dac_info = radeon_encoder->enc_priv;
+
+ memset(&args, 0, sizeof(args));
+
+ index = GetIndexIntoMasterTable(COMMAND, TVEncoderControl);
+
+ args.sTVEncoder.ucAction = action;
+
+ if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
+ args.sTVEncoder.ucTvStandard = ATOM_TV_CV;
+ else {
+ switch (dac_info->tv_std) {
+ case TV_STD_NTSC:
+ args.sTVEncoder.ucTvStandard = ATOM_TV_NTSC;
+ break;
+ case TV_STD_PAL:
+ args.sTVEncoder.ucTvStandard = ATOM_TV_PAL;
+ break;
+ case TV_STD_PAL_M:
+ args.sTVEncoder.ucTvStandard = ATOM_TV_PALM;
+ break;
+ case TV_STD_PAL_60:
+ args.sTVEncoder.ucTvStandard = ATOM_TV_PAL60;
+ break;
+ case TV_STD_NTSC_J:
+ args.sTVEncoder.ucTvStandard = ATOM_TV_NTSCJ;
+ break;
+ case TV_STD_SCART_PAL:
+ args.sTVEncoder.ucTvStandard = ATOM_TV_PAL; /* ??? */
+ break;
+ case TV_STD_SECAM:
+ args.sTVEncoder.ucTvStandard = ATOM_TV_SECAM;
+ break;
+ case TV_STD_PAL_CN:
+ args.sTVEncoder.ucTvStandard = ATOM_TV_PALCN;
+ break;
+ default:
+ args.sTVEncoder.ucTvStandard = ATOM_TV_NTSC;
+ break;
+ }
+ }
+
+ args.sTVEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+}
+
+union dvo_encoder_control {
+ ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION ext_tmds;
+ DVO_ENCODER_CONTROL_PS_ALLOCATION dvo;
+ DVO_ENCODER_CONTROL_PS_ALLOCATION_V3 dvo_v3;
+};
+
+void
+atombios_dvo_setup(struct drm_encoder *encoder, int action)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ union dvo_encoder_control args;
+ int index = GetIndexIntoMasterTable(COMMAND, DVOEncoderControl);
+ uint8_t frev, crev;
+
+ memset(&args, 0, sizeof(args));
+
+ if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+ return;
+
+ switch (frev) {
+ case 1:
+ switch (crev) {
+ case 1:
+ /* R4xx, R5xx */
+ args.ext_tmds.sXTmdsEncoder.ucEnable = action;
+
+ if (radeon_encoder->pixel_clock > 165000)
+ args.ext_tmds.sXTmdsEncoder.ucMisc |= PANEL_ENCODER_MISC_DUAL;
+
+ args.ext_tmds.sXTmdsEncoder.ucMisc |= ATOM_PANEL_MISC_888RGB;
+ break;
+ case 2:
+ /* RS600/690/740 */
+ args.dvo.sDVOEncoder.ucAction = action;
+ args.dvo.sDVOEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+ /* DFP1, CRT1, TV1 depending on the type of port */
+ args.dvo.sDVOEncoder.ucDeviceType = ATOM_DEVICE_DFP1_INDEX;
+
+ if (radeon_encoder->pixel_clock > 165000)
+ args.dvo.sDVOEncoder.usDevAttr.sDigAttrib.ucAttribute |= PANEL_ENCODER_MISC_DUAL;
+ break;
+ case 3:
+ /* R6xx */
+ args.dvo_v3.ucAction = action;
+ args.dvo_v3.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+ args.dvo_v3.ucDVOConfig = 0; /* XXX */
+ break;
+ default:
+ DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+ break;
+ }
+ break;
+ default:
+ DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+ break;
+ }
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+union lvds_encoder_control {
+ LVDS_ENCODER_CONTROL_PS_ALLOCATION v1;
+ LVDS_ENCODER_CONTROL_PS_ALLOCATION_V2 v2;
+};
+
+void
+atombios_digital_setup(struct drm_encoder *encoder, int action)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ union lvds_encoder_control args;
+ int index = 0;
+ int hdmi_detected = 0;
+ uint8_t frev, crev;
+
+ if (!dig)
+ return;
+
+ if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
+ hdmi_detected = 1;
+
+ memset(&args, 0, sizeof(args));
+
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+ index = GetIndexIntoMasterTable(COMMAND, LVDSEncoderControl);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+ index = GetIndexIntoMasterTable(COMMAND, TMDS1EncoderControl);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+ if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+ index = GetIndexIntoMasterTable(COMMAND, LVDSEncoderControl);
+ else
+ index = GetIndexIntoMasterTable(COMMAND, TMDS2EncoderControl);
+ break;
+ }
+
+ if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+ return;
+
+ switch (frev) {
+ case 1:
+ case 2:
+ switch (crev) {
+ case 1:
+ args.v1.ucMisc = 0;
+ args.v1.ucAction = action;
+ if (hdmi_detected)
+ args.v1.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE;
+ args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+ if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+ if (dig->lcd_misc & ATOM_PANEL_MISC_DUAL)
+ args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL;
+ if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB)
+ args.v1.ucMisc |= ATOM_PANEL_MISC_888RGB;
+ } else {
+ if (dig->linkb)
+ args.v1.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB;
+ if (radeon_encoder->pixel_clock > 165000)
+ args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL;
+ /*if (pScrn->rgbBits == 8) */
+ args.v1.ucMisc |= ATOM_PANEL_MISC_888RGB;
+ }
+ break;
+ case 2:
+ case 3:
+ args.v2.ucMisc = 0;
+ args.v2.ucAction = action;
+ if (crev == 3) {
+ if (dig->coherent_mode)
+ args.v2.ucMisc |= PANEL_ENCODER_MISC_COHERENT;
+ }
+ if (hdmi_detected)
+ args.v2.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE;
+ args.v2.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+ args.v2.ucTruncate = 0;
+ args.v2.ucSpatial = 0;
+ args.v2.ucTemporal = 0;
+ args.v2.ucFRC = 0;
+ if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+ if (dig->lcd_misc & ATOM_PANEL_MISC_DUAL)
+ args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL;
+ if (dig->lcd_misc & ATOM_PANEL_MISC_SPATIAL) {
+ args.v2.ucSpatial = PANEL_ENCODER_SPATIAL_DITHER_EN;
+ if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB)
+ args.v2.ucSpatial |= PANEL_ENCODER_SPATIAL_DITHER_DEPTH;
+ }
+ if (dig->lcd_misc & ATOM_PANEL_MISC_TEMPORAL) {
+ args.v2.ucTemporal = PANEL_ENCODER_TEMPORAL_DITHER_EN;
+ if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB)
+ args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_DITHER_DEPTH;
+ if (((dig->lcd_misc >> ATOM_PANEL_MISC_GREY_LEVEL_SHIFT) & 0x3) == 2)
+ args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_LEVEL_4;
+ }
+ } else {
+ if (dig->linkb)
+ args.v2.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB;
+ if (radeon_encoder->pixel_clock > 165000)
+ args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL;
+ }
+ break;
+ default:
+ DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+ break;
+ }
+ break;
+ default:
+ DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+ break;
+ }
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+int
+atombios_get_encoder_mode(struct drm_encoder *encoder)
+{
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct drm_connector *connector;
+ struct radeon_connector *radeon_connector;
+ struct radeon_connector_atom_dig *dig_connector;
+
+ /* dp bridges are always DP */
+ if (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE)
+ return ATOM_ENCODER_MODE_DP;
+
+ /* DVO is always DVO */
+ if (radeon_encoder->encoder_id == ATOM_ENCODER_MODE_DVO)
+ return ATOM_ENCODER_MODE_DVO;
+
+ connector = radeon_get_connector_for_encoder(encoder);
+ /* if we don't have an active device yet, just use one of
+ * the connectors tied to the encoder.
+ */
+ if (!connector)
+ connector = radeon_get_connector_for_encoder_init(encoder);
+ radeon_connector = to_radeon_connector(connector);
+
+ switch (connector->connector_type) {
+ case DRM_MODE_CONNECTOR_DVII:
+ case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */
+ if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) {
+ /* fix me */
+ if (ASIC_IS_DCE4(rdev))
+ return ATOM_ENCODER_MODE_DVI;
+ else
+ return ATOM_ENCODER_MODE_HDMI;
+ } else if (radeon_connector->use_digital)
+ return ATOM_ENCODER_MODE_DVI;
+ else
+ return ATOM_ENCODER_MODE_CRT;
+ break;
+ case DRM_MODE_CONNECTOR_DVID:
+ case DRM_MODE_CONNECTOR_HDMIA:
+ default:
+ if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) {
+ /* fix me */
+ if (ASIC_IS_DCE4(rdev))
+ return ATOM_ENCODER_MODE_DVI;
+ else
+ return ATOM_ENCODER_MODE_HDMI;
+ } else
+ return ATOM_ENCODER_MODE_DVI;
+ break;
+ case DRM_MODE_CONNECTOR_LVDS:
+ return ATOM_ENCODER_MODE_LVDS;
+ break;
+ case DRM_MODE_CONNECTOR_DisplayPort:
+ dig_connector = radeon_connector->con_priv;
+ if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
+ (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
+ return ATOM_ENCODER_MODE_DP;
+ else if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) {
+ /* fix me */
+ if (ASIC_IS_DCE4(rdev))
+ return ATOM_ENCODER_MODE_DVI;
+ else
+ return ATOM_ENCODER_MODE_HDMI;
+ } else
+ return ATOM_ENCODER_MODE_DVI;
+ break;
+ case DRM_MODE_CONNECTOR_eDP:
+ return ATOM_ENCODER_MODE_DP;
+ case DRM_MODE_CONNECTOR_DVIA:
+ case DRM_MODE_CONNECTOR_VGA:
+ return ATOM_ENCODER_MODE_CRT;
+ break;
+ case DRM_MODE_CONNECTOR_Composite:
+ case DRM_MODE_CONNECTOR_SVIDEO:
+ case DRM_MODE_CONNECTOR_9PinDIN:
+ /* fix me */
+ return ATOM_ENCODER_MODE_TV;
+ /*return ATOM_ENCODER_MODE_CV;*/
+ break;
+ }
+}
+
+/*
+ * DIG Encoder/Transmitter Setup
+ *
+ * DCE 3.0/3.1
+ * - 2 DIG transmitter blocks. UNIPHY (links A and B) and LVTMA.
+ * Supports up to 3 digital outputs
+ * - 2 DIG encoder blocks.
+ * DIG1 can drive UNIPHY link A or link B
+ * DIG2 can drive UNIPHY link B or LVTMA
+ *
+ * DCE 3.2
+ * - 3 DIG transmitter blocks. UNIPHY0/1/2 (links A and B).
+ * Supports up to 5 digital outputs
+ * - 2 DIG encoder blocks.
+ * DIG1/2 can drive UNIPHY0/1/2 link A or link B
+ *
+ * DCE 4.0/5.0
+ * - 3 DIG transmitter blocks UNIPHY0/1/2 (links A and B).
+ * Supports up to 6 digital outputs
+ * - 6 DIG encoder blocks.
+ * - DIG to PHY mapping is hardcoded
+ * DIG1 drives UNIPHY0 link A, A+B
+ * DIG2 drives UNIPHY0 link B
+ * DIG3 drives UNIPHY1 link A, A+B
+ * DIG4 drives UNIPHY1 link B
+ * DIG5 drives UNIPHY2 link A, A+B
+ * DIG6 drives UNIPHY2 link B
+ *
+ * DCE 4.1
+ * - 3 DIG transmitter blocks UNIPHY0/1/2 (links A and B).
+ * Supports up to 6 digital outputs
+ * - 2 DIG encoder blocks.
+ * DIG1/2 can drive UNIPHY0/1/2 link A or link B
+ *
+ * Routing
+ * crtc -> dig encoder -> UNIPHY/LVTMA (1 or 2 links)
+ * Examples:
+ * crtc0 -> dig2 -> LVTMA links A+B -> TMDS/HDMI
+ * crtc1 -> dig1 -> UNIPHY0 link B -> DP
+ * crtc0 -> dig1 -> UNIPHY2 link A -> LVDS
+ * crtc1 -> dig2 -> UNIPHY1 link B+A -> TMDS/HDMI
+ */
+
+union dig_encoder_control {
+ DIG_ENCODER_CONTROL_PS_ALLOCATION v1;
+ DIG_ENCODER_CONTROL_PARAMETERS_V2 v2;
+ DIG_ENCODER_CONTROL_PARAMETERS_V3 v3;
+ DIG_ENCODER_CONTROL_PARAMETERS_V4 v4;
+};
+
+void
+atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+ union dig_encoder_control args;
+ int index = 0;
+ uint8_t frev, crev;
+ int dp_clock = 0;
+ int dp_lane_count = 0;
+ int hpd_id = RADEON_HPD_NONE;
+ int bpc = 8;
+
+ if (connector) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ struct radeon_connector_atom_dig *dig_connector =
+ radeon_connector->con_priv;
+
+ dp_clock = dig_connector->dp_clock;
+ dp_lane_count = dig_connector->dp_lane_count;
+ hpd_id = radeon_connector->hpd.hpd;
+ bpc = connector->display_info.bpc;
+ }
+
+ /* no dig encoder assigned */
+ if (dig->dig_encoder == -1)
+ return;
+
+ memset(&args, 0, sizeof(args));
+
+ if (ASIC_IS_DCE4(rdev))
+ index = GetIndexIntoMasterTable(COMMAND, DIGxEncoderControl);
+ else {
+ if (dig->dig_encoder)
+ index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl);
+ else
+ index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl);
+ }
+
+ if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+ return;
+
+ switch (frev) {
+ case 1:
+ switch (crev) {
+ case 1:
+ args.v1.ucAction = action;
+ args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+ if (action == ATOM_ENCODER_CMD_SETUP_PANEL_MODE)
+ args.v3.ucPanelMode = panel_mode;
+ else
+ args.v1.ucEncoderMode = atombios_get_encoder_mode(encoder);
+
+ if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode))
+ args.v1.ucLaneNum = dp_lane_count;
+ else if (radeon_encoder->pixel_clock > 165000)
+ args.v1.ucLaneNum = 8;
+ else
+ args.v1.ucLaneNum = 4;
+
+ if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode) && (dp_clock == 270000))
+ args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER1;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER2;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER3;
+ break;
+ }
+ if (dig->linkb)
+ args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKB;
+ else
+ args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKA;
+ break;
+ case 2:
+ case 3:
+ args.v3.ucAction = action;
+ args.v3.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+ if (action == ATOM_ENCODER_CMD_SETUP_PANEL_MODE)
+ args.v3.ucPanelMode = panel_mode;
+ else
+ args.v3.ucEncoderMode = atombios_get_encoder_mode(encoder);
+
+ if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode))
+ args.v3.ucLaneNum = dp_lane_count;
+ else if (radeon_encoder->pixel_clock > 165000)
+ args.v3.ucLaneNum = 8;
+ else
+ args.v3.ucLaneNum = 4;
+
+ if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode) && (dp_clock == 270000))
+ args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ;
+ args.v3.acConfig.ucDigSel = dig->dig_encoder;
+ switch (bpc) {
+ case 0:
+ args.v3.ucBitPerColor = PANEL_BPC_UNDEFINE;
+ break;
+ case 6:
+ args.v3.ucBitPerColor = PANEL_6BIT_PER_COLOR;
+ break;
+ case 8:
+ default:
+ args.v3.ucBitPerColor = PANEL_8BIT_PER_COLOR;
+ break;
+ case 10:
+ args.v3.ucBitPerColor = PANEL_10BIT_PER_COLOR;
+ break;
+ case 12:
+ args.v3.ucBitPerColor = PANEL_12BIT_PER_COLOR;
+ break;
+ case 16:
+ args.v3.ucBitPerColor = PANEL_16BIT_PER_COLOR;
+ break;
+ }
+ break;
+ case 4:
+ args.v4.ucAction = action;
+ args.v4.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+ if (action == ATOM_ENCODER_CMD_SETUP_PANEL_MODE)
+ args.v4.ucPanelMode = panel_mode;
+ else
+ args.v4.ucEncoderMode = atombios_get_encoder_mode(encoder);
+
+ if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode))
+ args.v4.ucLaneNum = dp_lane_count;
+ else if (radeon_encoder->pixel_clock > 165000)
+ args.v4.ucLaneNum = 8;
+ else
+ args.v4.ucLaneNum = 4;
+
+ if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode)) {
+ if (dp_clock == 270000)
+ args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_2_70GHZ;
+ else if (dp_clock == 540000)
+ args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_5_40GHZ;
+ }
+ args.v4.acConfig.ucDigSel = dig->dig_encoder;
+ switch (bpc) {
+ case 0:
+ args.v4.ucBitPerColor = PANEL_BPC_UNDEFINE;
+ break;
+ case 6:
+ args.v4.ucBitPerColor = PANEL_6BIT_PER_COLOR;
+ break;
+ case 8:
+ default:
+ args.v4.ucBitPerColor = PANEL_8BIT_PER_COLOR;
+ break;
+ case 10:
+ args.v4.ucBitPerColor = PANEL_10BIT_PER_COLOR;
+ break;
+ case 12:
+ args.v4.ucBitPerColor = PANEL_12BIT_PER_COLOR;
+ break;
+ case 16:
+ args.v4.ucBitPerColor = PANEL_16BIT_PER_COLOR;
+ break;
+ }
+ if (hpd_id == RADEON_HPD_NONE)
+ args.v4.ucHPD_ID = 0;
+ else
+ args.v4.ucHPD_ID = hpd_id + 1;
+ break;
+ default:
+ DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+ break;
+ }
+ break;
+ default:
+ DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+ break;
+ }
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+}
+
+union dig_transmitter_control {
+ DIG_TRANSMITTER_CONTROL_PS_ALLOCATION v1;
+ DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 v2;
+ DIG_TRANSMITTER_CONTROL_PARAMETERS_V3 v3;
+ DIG_TRANSMITTER_CONTROL_PARAMETERS_V4 v4;
+};
+
+void
+atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t lane_num, uint8_t lane_set)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ struct drm_connector *connector;
+ union dig_transmitter_control args;
+ int index = 0;
+ uint8_t frev, crev;
+ bool is_dp = false;
+ int pll_id = 0;
+ int dp_clock = 0;
+ int dp_lane_count = 0;
+ int connector_object_id = 0;
+ int igp_lane_info = 0;
+ int dig_encoder = dig->dig_encoder;
+
+ if (action == ATOM_TRANSMITTER_ACTION_INIT) {
+ connector = radeon_get_connector_for_encoder_init(encoder);
+ /* just needed to avoid bailing in the encoder check. the encoder
+ * isn't used for init
+ */
+ dig_encoder = 0;
+ } else
+ connector = radeon_get_connector_for_encoder(encoder);
+
+ if (connector) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ struct radeon_connector_atom_dig *dig_connector =
+ radeon_connector->con_priv;
+
+ dp_clock = dig_connector->dp_clock;
+ dp_lane_count = dig_connector->dp_lane_count;
+ connector_object_id =
+ (radeon_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
+ igp_lane_info = dig_connector->igp_lane_info;
+ }
+
+ if (encoder->crtc) {
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+ pll_id = radeon_crtc->pll_id;
+ }
+
+ /* no dig encoder assigned */
+ if (dig_encoder == -1)
+ return;
+
+ if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)))
+ is_dp = true;
+
+ memset(&args, 0, sizeof(args));
+
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+ index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ index = GetIndexIntoMasterTable(COMMAND, LVTMATransmitterControl);
+ break;
+ }
+
+ if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+ return;
+
+ switch (frev) {
+ case 1:
+ switch (crev) {
+ case 1:
+ args.v1.ucAction = action;
+ if (action == ATOM_TRANSMITTER_ACTION_INIT) {
+ args.v1.usInitInfo = cpu_to_le16(connector_object_id);
+ } else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) {
+ args.v1.asMode.ucLaneSel = lane_num;
+ args.v1.asMode.ucLaneSet = lane_set;
+ } else {
+ if (is_dp)
+ args.v1.usPixelClock =
+ cpu_to_le16(dp_clock / 10);
+ else if (radeon_encoder->pixel_clock > 165000)
+ args.v1.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
+ else
+ args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+ }
+
+ args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL;
+
+ if (dig_encoder)
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER;
+ else
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER;
+
+ if ((rdev->flags & RADEON_IS_IGP) &&
+ (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_UNIPHY)) {
+ if (is_dp || (radeon_encoder->pixel_clock <= 165000)) {
+ if (igp_lane_info & 0x1)
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_3;
+ else if (igp_lane_info & 0x2)
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_4_7;
+ else if (igp_lane_info & 0x4)
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_11;
+ else if (igp_lane_info & 0x8)
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_12_15;
+ } else {
+ if (igp_lane_info & 0x3)
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_7;
+ else if (igp_lane_info & 0xc)
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_15;
+ }
+ }
+
+ if (dig->linkb)
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB;
+ else
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA;
+
+ if (is_dp)
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT;
+ else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
+ if (dig->coherent_mode)
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT;
+ if (radeon_encoder->pixel_clock > 165000)
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_8LANE_LINK;
+ }
+ break;
+ case 2:
+ args.v2.ucAction = action;
+ if (action == ATOM_TRANSMITTER_ACTION_INIT) {
+ args.v2.usInitInfo = cpu_to_le16(connector_object_id);
+ } else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) {
+ args.v2.asMode.ucLaneSel = lane_num;
+ args.v2.asMode.ucLaneSet = lane_set;
+ } else {
+ if (is_dp)
+ args.v2.usPixelClock =
+ cpu_to_le16(dp_clock / 10);
+ else if (radeon_encoder->pixel_clock > 165000)
+ args.v2.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
+ else
+ args.v2.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+ }
+
+ args.v2.acConfig.ucEncoderSel = dig_encoder;
+ if (dig->linkb)
+ args.v2.acConfig.ucLinkSel = 1;
+
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ args.v2.acConfig.ucTransmitterSel = 0;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ args.v2.acConfig.ucTransmitterSel = 1;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ args.v2.acConfig.ucTransmitterSel = 2;
+ break;
+ }
+
+ if (is_dp) {
+ args.v2.acConfig.fCoherentMode = 1;
+ args.v2.acConfig.fDPConnector = 1;
+ } else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
+ if (dig->coherent_mode)
+ args.v2.acConfig.fCoherentMode = 1;
+ if (radeon_encoder->pixel_clock > 165000)
+ args.v2.acConfig.fDualLinkConnector = 1;
+ }
+ break;
+ case 3:
+ args.v3.ucAction = action;
+ if (action == ATOM_TRANSMITTER_ACTION_INIT) {
+ args.v3.usInitInfo = cpu_to_le16(connector_object_id);
+ } else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) {
+ args.v3.asMode.ucLaneSel = lane_num;
+ args.v3.asMode.ucLaneSet = lane_set;
+ } else {
+ if (is_dp)
+ args.v3.usPixelClock =
+ cpu_to_le16(dp_clock / 10);
+ else if (radeon_encoder->pixel_clock > 165000)
+ args.v3.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
+ else
+ args.v3.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+ }
+
+ if (is_dp)
+ args.v3.ucLaneNum = dp_lane_count;
+ else if (radeon_encoder->pixel_clock > 165000)
+ args.v3.ucLaneNum = 8;
+ else
+ args.v3.ucLaneNum = 4;
+
+ if (dig->linkb)
+ args.v3.acConfig.ucLinkSel = 1;
+ if (dig_encoder & 1)
+ args.v3.acConfig.ucEncoderSel = 1;
+
+ /* Select the PLL for the PHY
+ * DP PHY should be clocked from external src if there is
+ * one.
+ */
+ /* On DCE4, if there is an external clock, it generates the DP ref clock */
+ if (is_dp && rdev->clock.dp_extclk)
+ args.v3.acConfig.ucRefClkSource = 2; /* external src */
+ else
+ args.v3.acConfig.ucRefClkSource = pll_id;
+
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ args.v3.acConfig.ucTransmitterSel = 0;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ args.v3.acConfig.ucTransmitterSel = 1;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ args.v3.acConfig.ucTransmitterSel = 2;
+ break;
+ }
+
+ if (is_dp)
+ args.v3.acConfig.fCoherentMode = 1; /* DP requires coherent */
+ else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
+ if (dig->coherent_mode)
+ args.v3.acConfig.fCoherentMode = 1;
+ if (radeon_encoder->pixel_clock > 165000)
+ args.v3.acConfig.fDualLinkConnector = 1;
+ }
+ break;
+ case 4:
+ args.v4.ucAction = action;
+ if (action == ATOM_TRANSMITTER_ACTION_INIT) {
+ args.v4.usInitInfo = cpu_to_le16(connector_object_id);
+ } else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) {
+ args.v4.asMode.ucLaneSel = lane_num;
+ args.v4.asMode.ucLaneSet = lane_set;
+ } else {
+ if (is_dp)
+ args.v4.usPixelClock =
+ cpu_to_le16(dp_clock / 10);
+ else if (radeon_encoder->pixel_clock > 165000)
+ args.v4.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
+ else
+ args.v4.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+ }
+
+ if (is_dp)
+ args.v4.ucLaneNum = dp_lane_count;
+ else if (radeon_encoder->pixel_clock > 165000)
+ args.v4.ucLaneNum = 8;
+ else
+ args.v4.ucLaneNum = 4;
+
+ if (dig->linkb)
+ args.v4.acConfig.ucLinkSel = 1;
+ if (dig_encoder & 1)
+ args.v4.acConfig.ucEncoderSel = 1;
+
+ /* Select the PLL for the PHY
+ * DP PHY should be clocked from external src if there is
+ * one.
+ */
+ /* On DCE5 DCPLL usually generates the DP ref clock */
+ if (is_dp) {
+ if (rdev->clock.dp_extclk)
+ args.v4.acConfig.ucRefClkSource = ENCODER_REFCLK_SRC_EXTCLK;
+ else
+ args.v4.acConfig.ucRefClkSource = ENCODER_REFCLK_SRC_DCPLL;
+ } else
+ args.v4.acConfig.ucRefClkSource = pll_id;
+
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ args.v4.acConfig.ucTransmitterSel = 0;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ args.v4.acConfig.ucTransmitterSel = 1;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ args.v4.acConfig.ucTransmitterSel = 2;
+ break;
+ }
+
+ if (is_dp)
+ args.v4.acConfig.fCoherentMode = 1; /* DP requires coherent */
+ else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
+ if (dig->coherent_mode)
+ args.v4.acConfig.fCoherentMode = 1;
+ if (radeon_encoder->pixel_clock > 165000)
+ args.v4.acConfig.fDualLinkConnector = 1;
+ }
+ break;
+ default:
+ DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+ break;
+ }
+ break;
+ default:
+ DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+ break;
+ }
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+bool
+atombios_set_edp_panel_power(struct drm_connector *connector, int action)
+{
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ struct drm_device *dev = radeon_connector->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
+ union dig_transmitter_control args;
+ int index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl);
+ uint8_t frev, crev;
+
+ if (connector->connector_type != DRM_MODE_CONNECTOR_eDP)
+ goto done;
+
+ if (!ASIC_IS_DCE4(rdev))
+ goto done;
+
+ if ((action != ATOM_TRANSMITTER_ACTION_POWER_ON) &&
+ (action != ATOM_TRANSMITTER_ACTION_POWER_OFF))
+ goto done;
+
+ if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+ goto done;
+
+ memset(&args, 0, sizeof(args));
+
+ args.v1.ucAction = action;
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+ /* wait for the panel to power up */
+ if (action == ATOM_TRANSMITTER_ACTION_POWER_ON) {
+ int i;
+
+ for (i = 0; i < 300; i++) {
+ if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd))
+ return true;
+ mdelay(1);
+ }
+ return false;
+ }
+done:
+ return true;
+}
+
+union external_encoder_control {
+ EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION v1;
+ EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION_V3 v3;
+};
+
+static void
+atombios_external_encoder_setup(struct drm_encoder *encoder,
+ struct drm_encoder *ext_encoder,
+ int action)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder *ext_radeon_encoder = to_radeon_encoder(ext_encoder);
+ union external_encoder_control args;
+ struct drm_connector *connector;
+ int index = GetIndexIntoMasterTable(COMMAND, ExternalEncoderControl);
+ u8 frev, crev;
+ int dp_clock = 0;
+ int dp_lane_count = 0;
+ int connector_object_id = 0;
+ u32 ext_enum = (ext_radeon_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT;
+ int bpc = 8;
+
+ if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT)
+ connector = radeon_get_connector_for_encoder_init(encoder);
+ else
+ connector = radeon_get_connector_for_encoder(encoder);
+
+ if (connector) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ struct radeon_connector_atom_dig *dig_connector =
+ radeon_connector->con_priv;
+
+ dp_clock = dig_connector->dp_clock;
+ dp_lane_count = dig_connector->dp_lane_count;
+ connector_object_id =
+ (radeon_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
+ bpc = connector->display_info.bpc;
+ }
+
+ memset(&args, 0, sizeof(args));
+
+ if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+ return;
+
+ switch (frev) {
+ case 1:
+ /* no params on frev 1 */
+ break;
+ case 2:
+ switch (crev) {
+ case 1:
+ case 2:
+ args.v1.sDigEncoder.ucAction = action;
+ args.v1.sDigEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+ args.v1.sDigEncoder.ucEncoderMode = atombios_get_encoder_mode(encoder);
+
+ if (ENCODER_MODE_IS_DP(args.v1.sDigEncoder.ucEncoderMode)) {
+ if (dp_clock == 270000)
+ args.v1.sDigEncoder.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
+ args.v1.sDigEncoder.ucLaneNum = dp_lane_count;
+ } else if (radeon_encoder->pixel_clock > 165000)
+ args.v1.sDigEncoder.ucLaneNum = 8;
+ else
+ args.v1.sDigEncoder.ucLaneNum = 4;
+ break;
+ case 3:
+ args.v3.sExtEncoder.ucAction = action;
+ if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT)
+ args.v3.sExtEncoder.usConnectorId = cpu_to_le16(connector_object_id);
+ else
+ args.v3.sExtEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+ args.v3.sExtEncoder.ucEncoderMode = atombios_get_encoder_mode(encoder);
+
+ if (ENCODER_MODE_IS_DP(args.v3.sExtEncoder.ucEncoderMode)) {
+ if (dp_clock == 270000)
+ args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ;
+ else if (dp_clock == 540000)
+ args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_5_40GHZ;
+ args.v3.sExtEncoder.ucLaneNum = dp_lane_count;
+ } else if (radeon_encoder->pixel_clock > 165000)
+ args.v3.sExtEncoder.ucLaneNum = 8;
+ else
+ args.v3.sExtEncoder.ucLaneNum = 4;
+ switch (ext_enum) {
+ case GRAPH_OBJECT_ENUM_ID1:
+ args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER1;
+ break;
+ case GRAPH_OBJECT_ENUM_ID2:
+ args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER2;
+ break;
+ case GRAPH_OBJECT_ENUM_ID3:
+ args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER3;
+ break;
+ }
+ switch (bpc) {
+ case 0:
+ args.v3.sExtEncoder.ucBitPerColor = PANEL_BPC_UNDEFINE;
+ break;
+ case 6:
+ args.v3.sExtEncoder.ucBitPerColor = PANEL_6BIT_PER_COLOR;
+ break;
+ case 8:
+ default:
+ args.v3.sExtEncoder.ucBitPerColor = PANEL_8BIT_PER_COLOR;
+ break;
+ case 10:
+ args.v3.sExtEncoder.ucBitPerColor = PANEL_10BIT_PER_COLOR;
+ break;
+ case 12:
+ args.v3.sExtEncoder.ucBitPerColor = PANEL_12BIT_PER_COLOR;
+ break;
+ case 16:
+ args.v3.sExtEncoder.ucBitPerColor = PANEL_16BIT_PER_COLOR;
+ break;
+ }
+ break;
+ default:
+ DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
+ return;
+ }
+ break;
+ default:
+ DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
+ return;
+ }
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+static void
+atombios_yuv_setup(struct drm_encoder *encoder, bool enable)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+ ENABLE_YUV_PS_ALLOCATION args;
+ int index = GetIndexIntoMasterTable(COMMAND, EnableYUV);
+ uint32_t temp, reg;
+
+ memset(&args, 0, sizeof(args));
+
+ if (rdev->family >= CHIP_R600)
+ reg = R600_BIOS_3_SCRATCH;
+ else
+ reg = RADEON_BIOS_3_SCRATCH;
+
+ /* XXX: fix up scratch reg handling */
+ temp = RREG32(reg);
+ if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
+ WREG32(reg, (ATOM_S3_TV1_ACTIVE |
+ (radeon_crtc->crtc_id << 18)));
+ else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
+ WREG32(reg, (ATOM_S3_CV_ACTIVE | (radeon_crtc->crtc_id << 24)));
+ else
+ WREG32(reg, 0);
+
+ if (enable)
+ args.ucEnable = ATOM_ENABLE;
+ args.ucCRTC = radeon_crtc->crtc_id;
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+ WREG32(reg, temp);
+}
+
+static void
+radeon_atom_encoder_dpms_avivo(struct drm_encoder *encoder, int mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION args;
+ int index = 0;
+
+ memset(&args, 0, sizeof(args));
+
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+ index = GetIndexIntoMasterTable(COMMAND, TMDSAOutputControl);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DVO1:
+ case ENCODER_OBJECT_ID_INTERNAL_DDI:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+ index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+ index = GetIndexIntoMasterTable(COMMAND, LCD1OutputControl);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+ if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+ index = GetIndexIntoMasterTable(COMMAND, LCD1OutputControl);
+ else
+ index = GetIndexIntoMasterTable(COMMAND, LVTMAOutputControl);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DAC1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+ if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
+ index = GetIndexIntoMasterTable(COMMAND, TV1OutputControl);
+ else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
+ index = GetIndexIntoMasterTable(COMMAND, CV1OutputControl);
+ else
+ index = GetIndexIntoMasterTable(COMMAND, DAC1OutputControl);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DAC2:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+ if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
+ index = GetIndexIntoMasterTable(COMMAND, TV1OutputControl);
+ else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
+ index = GetIndexIntoMasterTable(COMMAND, CV1OutputControl);
+ else
+ index = GetIndexIntoMasterTable(COMMAND, DAC2OutputControl);
+ break;
+ default:
+ return;
+ }
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ args.ucAction = ATOM_ENABLE;
+ /* workaround for DVOOutputControl on some RS690 systems */
+ if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DDI) {
+ u32 reg = RREG32(RADEON_BIOS_3_SCRATCH);
+ WREG32(RADEON_BIOS_3_SCRATCH, reg & ~ATOM_S3_DFP2I_ACTIVE);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ WREG32(RADEON_BIOS_3_SCRATCH, reg);
+ } else
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+ args.ucAction = ATOM_LCD_BLON;
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ }
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ args.ucAction = ATOM_DISABLE;
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+ args.ucAction = ATOM_LCD_BLOFF;
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ }
+ break;
+ }
+}
+
+static void
+radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+ struct radeon_connector *radeon_connector = NULL;
+ struct radeon_connector_atom_dig *radeon_dig_connector = NULL;
+
+ if (connector) {
+ radeon_connector = to_radeon_connector(connector);
+ radeon_dig_connector = radeon_connector->con_priv;
+ }
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ /* some early dce3.2 boards have a bug in their transmitter control table */
+ if ((rdev->family == CHIP_RV710) || (rdev->family == CHIP_RV730))
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
+ else
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
+ if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
+ atombios_set_edp_panel_power(connector,
+ ATOM_TRANSMITTER_ACTION_POWER_ON);
+ radeon_dig_connector->edp_on = true;
+ }
+ if (ASIC_IS_DCE4(rdev))
+ atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0);
+ radeon_dp_link_train(encoder, connector);
+ if (ASIC_IS_DCE4(rdev))
+ atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0);
+ }
+ if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0);
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
+ if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
+ if (ASIC_IS_DCE4(rdev))
+ atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0);
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
+ atombios_set_edp_panel_power(connector,
+ ATOM_TRANSMITTER_ACTION_POWER_OFF);
+ radeon_dig_connector->edp_on = false;
+ }
+ }
+ if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0);
+ break;
+ }
+}
+
+static void
+radeon_atom_encoder_dpms_ext(struct drm_encoder *encoder,
+ struct drm_encoder *ext_encoder,
+ int mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ default:
+ if (ASIC_IS_DCE41(rdev)) {
+ atombios_external_encoder_setup(encoder, ext_encoder,
+ EXTERNAL_ENCODER_ACTION_V3_ENABLE_OUTPUT);
+ atombios_external_encoder_setup(encoder, ext_encoder,
+ EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING_OFF);
+ } else
+ atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE);
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ if (ASIC_IS_DCE41(rdev)) {
+ atombios_external_encoder_setup(encoder, ext_encoder,
+ EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING);
+ atombios_external_encoder_setup(encoder, ext_encoder,
+ EXTERNAL_ENCODER_ACTION_V3_DISABLE_OUTPUT);
+ } else
+ atombios_external_encoder_setup(encoder, ext_encoder, ATOM_DISABLE);
+ break;
+ }
+}
+
+static void
+radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder);
+
+ DRM_DEBUG_KMS("encoder dpms %d to mode %d, devices %08x, active_devices %08x\n",
+ radeon_encoder->encoder_id, mode, radeon_encoder->devices,
+ radeon_encoder->active_device);
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+ case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+ case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+ case ENCODER_OBJECT_ID_INTERNAL_DVO1:
+ case ENCODER_OBJECT_ID_INTERNAL_DDI:
+ case ENCODER_OBJECT_ID_INTERNAL_DAC2:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+ radeon_atom_encoder_dpms_avivo(encoder, mode);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ radeon_atom_encoder_dpms_dig(encoder, mode);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+ if (ASIC_IS_DCE5(rdev)) {
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ atombios_dvo_setup(encoder, ATOM_ENABLE);
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ atombios_dvo_setup(encoder, ATOM_DISABLE);
+ break;
+ }
+ } else if (ASIC_IS_DCE3(rdev))
+ radeon_atom_encoder_dpms_dig(encoder, mode);
+ else
+ radeon_atom_encoder_dpms_avivo(encoder, mode);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DAC1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+ if (ASIC_IS_DCE5(rdev)) {
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ atombios_dac_setup(encoder, ATOM_ENABLE);
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ atombios_dac_setup(encoder, ATOM_DISABLE);
+ break;
+ }
+ } else
+ radeon_atom_encoder_dpms_avivo(encoder, mode);
+ break;
+ default:
+ return;
+ }
+
+ if (ext_encoder)
+ radeon_atom_encoder_dpms_ext(encoder, ext_encoder, mode);
+
+ radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+
+}
+
+union crtc_source_param {
+ SELECT_CRTC_SOURCE_PS_ALLOCATION v1;
+ SELECT_CRTC_SOURCE_PARAMETERS_V2 v2;
+};
+
+static void
+atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+ union crtc_source_param args;
+ int index = GetIndexIntoMasterTable(COMMAND, SelectCRTC_Source);
+ uint8_t frev, crev;
+ struct radeon_encoder_atom_dig *dig;
+
+ memset(&args, 0, sizeof(args));
+
+ if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+ return;
+
+ switch (frev) {
+ case 1:
+ switch (crev) {
+ case 1:
+ default:
+ if (ASIC_IS_AVIVO(rdev))
+ args.v1.ucCRTC = radeon_crtc->crtc_id;
+ else {
+ if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DAC1) {
+ args.v1.ucCRTC = radeon_crtc->crtc_id;
+ } else {
+ args.v1.ucCRTC = radeon_crtc->crtc_id << 2;
+ }
+ }
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+ args.v1.ucDevice = ATOM_DEVICE_DFP1_INDEX;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+ case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+ if (radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT)
+ args.v1.ucDevice = ATOM_DEVICE_LCD1_INDEX;
+ else
+ args.v1.ucDevice = ATOM_DEVICE_DFP3_INDEX;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DVO1:
+ case ENCODER_OBJECT_ID_INTERNAL_DDI:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+ args.v1.ucDevice = ATOM_DEVICE_DFP2_INDEX;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DAC1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+ if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
+ args.v1.ucDevice = ATOM_DEVICE_TV1_INDEX;
+ else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
+ args.v1.ucDevice = ATOM_DEVICE_CV_INDEX;
+ else
+ args.v1.ucDevice = ATOM_DEVICE_CRT1_INDEX;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DAC2:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+ if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
+ args.v1.ucDevice = ATOM_DEVICE_TV1_INDEX;
+ else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
+ args.v1.ucDevice = ATOM_DEVICE_CV_INDEX;
+ else
+ args.v1.ucDevice = ATOM_DEVICE_CRT2_INDEX;
+ break;
+ }
+ break;
+ case 2:
+ args.v2.ucCRTC = radeon_crtc->crtc_id;
+ if (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE) {
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
+ args.v2.ucEncodeMode = ATOM_ENCODER_MODE_LVDS;
+ else if (connector->connector_type == DRM_MODE_CONNECTOR_VGA)
+ args.v2.ucEncodeMode = ATOM_ENCODER_MODE_CRT;
+ else
+ args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder);
+ } else
+ args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder);
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ dig = radeon_encoder->enc_priv;
+ switch (dig->dig_encoder) {
+ case 0:
+ args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID;
+ break;
+ case 1:
+ args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
+ break;
+ case 2:
+ args.v2.ucEncoderID = ASIC_INT_DIG3_ENCODER_ID;
+ break;
+ case 3:
+ args.v2.ucEncoderID = ASIC_INT_DIG4_ENCODER_ID;
+ break;
+ case 4:
+ args.v2.ucEncoderID = ASIC_INT_DIG5_ENCODER_ID;
+ break;
+ case 5:
+ args.v2.ucEncoderID = ASIC_INT_DIG6_ENCODER_ID;
+ break;
+ }
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+ args.v2.ucEncoderID = ASIC_INT_DVO_ENCODER_ID;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+ if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
+ args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
+ else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
+ args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
+ else
+ args.v2.ucEncoderID = ASIC_INT_DAC1_ENCODER_ID;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+ if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
+ args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
+ else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
+ args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
+ else
+ args.v2.ucEncoderID = ASIC_INT_DAC2_ENCODER_ID;
+ break;
+ }
+ break;
+ }
+ break;
+ default:
+ DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
+ return;
+ }
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+ /* update scratch regs with new routing */
+ radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
+}
+
+static void
+atombios_apply_encoder_quirks(struct drm_encoder *encoder,
+ struct drm_display_mode *mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+
+ /* Funky macbooks */
+ if ((dev->pdev->device == 0x71C5) &&
+ (dev->pdev->subsystem_vendor == 0x106b) &&
+ (dev->pdev->subsystem_device == 0x0080)) {
+ if (radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) {
+ uint32_t lvtma_bit_depth_control = RREG32(AVIVO_LVTMA_BIT_DEPTH_CONTROL);
+
+ lvtma_bit_depth_control &= ~AVIVO_LVTMA_BIT_DEPTH_CONTROL_TRUNCATE_EN;
+ lvtma_bit_depth_control &= ~AVIVO_LVTMA_BIT_DEPTH_CONTROL_SPATIAL_DITHER_EN;
+
+ WREG32(AVIVO_LVTMA_BIT_DEPTH_CONTROL, lvtma_bit_depth_control);
+ }
+ }
+
+ /* set scaler clears this on some chips */
+ if (ASIC_IS_AVIVO(rdev) &&
+ (!(radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)))) {
+ if (ASIC_IS_DCE4(rdev)) {
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset,
+ EVERGREEN_INTERLEAVE_EN);
+ else
+ WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
+ } else {
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset,
+ AVIVO_D1MODE_INTERLEAVE_EN);
+ else
+ WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
+ }
+ }
+}
+
+static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct drm_encoder *test_encoder;
+ struct radeon_encoder_atom_dig *dig;
+ uint32_t dig_enc_in_use = 0;
+
+ /* DCE4/5 */
+ if (ASIC_IS_DCE4(rdev)) {
+ dig = radeon_encoder->enc_priv;
+ if (ASIC_IS_DCE41(rdev)) {
+ /* ontario follows DCE4 */
+ if (rdev->family == CHIP_PALM) {
+ if (dig->linkb)
+ return 1;
+ else
+ return 0;
+ } else
+ /* llano follows DCE3.2 */
+ return radeon_crtc->crtc_id;
+ } else {
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ if (dig->linkb)
+ return 1;
+ else
+ return 0;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ if (dig->linkb)
+ return 3;
+ else
+ return 2;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ if (dig->linkb)
+ return 5;
+ else
+ return 4;
+ break;
+ }
+ }
+ }
+
+ /* on DCE32 and encoder can driver any block so just crtc id */
+ if (ASIC_IS_DCE32(rdev)) {
+ return radeon_crtc->crtc_id;
+ }
+
+ /* on DCE3 - LVTMA can only be driven by DIGB */
+ list_for_each_entry(test_encoder, &dev->mode_config.encoder_list, head) {
+ struct radeon_encoder *radeon_test_encoder;
+
+ if (encoder == test_encoder)
+ continue;
+
+ if (!radeon_encoder_is_digital(test_encoder))
+ continue;
+
+ radeon_test_encoder = to_radeon_encoder(test_encoder);
+ dig = radeon_test_encoder->enc_priv;
+
+ if (dig->dig_encoder >= 0)
+ dig_enc_in_use |= (1 << dig->dig_encoder);
+ }
+
+ if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA) {
+ if (dig_enc_in_use & 0x2)
+ DRM_ERROR("LVDS required digital encoder 2 but it was in use - stealing\n");
+ return 1;
+ }
+ if (!(dig_enc_in_use & 1))
+ return 0;
+ return 1;
+}
+
+/* This only needs to be called once at startup */
+void
+radeon_atom_encoder_init(struct radeon_device *rdev)
+{
+ struct drm_device *dev = rdev->ddev;
+ struct drm_encoder *encoder;
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder);
+
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT, 0, 0);
+ break;
+ default:
+ break;
+ }
+
+ if (ext_encoder && ASIC_IS_DCE41(rdev))
+ atombios_external_encoder_setup(encoder, ext_encoder,
+ EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT);
+ }
+}
+
+static void
+radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder);
+
+ radeon_encoder->pixel_clock = adjusted_mode->clock;
+
+ if (ASIC_IS_AVIVO(rdev) && !ASIC_IS_DCE4(rdev)) {
+ if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT))
+ atombios_yuv_setup(encoder, true);
+ else
+ atombios_yuv_setup(encoder, false);
+ }
+
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+ case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+ case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+ atombios_digital_setup(encoder, PANEL_ENCODER_ACTION_ENABLE);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ if (ASIC_IS_DCE4(rdev)) {
+ /* disable the transmitter */
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
+ /* setup and enable the encoder */
+ atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0);
+
+ /* enable the transmitter */
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
+ } else {
+ /* disable the encoder and transmitter */
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
+ atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0);
+
+ /* setup and enable the encoder and transmitter */
+ atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0);
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0);
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
+ }
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DDI:
+ case ENCODER_OBJECT_ID_INTERNAL_DVO1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+ atombios_dvo_setup(encoder, ATOM_ENABLE);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DAC1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+ case ENCODER_OBJECT_ID_INTERNAL_DAC2:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+ atombios_dac_setup(encoder, ATOM_ENABLE);
+ if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) {
+ if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
+ atombios_tv_setup(encoder, ATOM_ENABLE);
+ else
+ atombios_tv_setup(encoder, ATOM_DISABLE);
+ }
+ break;
+ }
+
+ if (ext_encoder) {
+ if (ASIC_IS_DCE41(rdev))
+ atombios_external_encoder_setup(encoder, ext_encoder,
+ EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP);
+ else
+ atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE);
+ }
+
+ atombios_apply_encoder_quirks(encoder, adjusted_mode);
+
+ if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
+ r600_hdmi_enable(encoder);
+ r600_hdmi_setmode(encoder, adjusted_mode);
+ }
+}
+
+static bool
+atombios_dac_load_detect(struct drm_encoder *encoder, struct drm_connector *connector)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+
+ if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT |
+ ATOM_DEVICE_CV_SUPPORT |
+ ATOM_DEVICE_CRT_SUPPORT)) {
+ DAC_LOAD_DETECTION_PS_ALLOCATION args;
+ int index = GetIndexIntoMasterTable(COMMAND, DAC_LoadDetection);
+ uint8_t frev, crev;
+
+ memset(&args, 0, sizeof(args));
+
+ if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+ return false;
+
+ args.sDacload.ucMisc = 0;
+
+ if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DAC1) ||
+ (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1))
+ args.sDacload.ucDacType = ATOM_DAC_A;
+ else
+ args.sDacload.ucDacType = ATOM_DAC_B;
+
+ if (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT)
+ args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CRT1_SUPPORT);
+ else if (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT)
+ args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CRT2_SUPPORT);
+ else if (radeon_connector->devices & ATOM_DEVICE_CV_SUPPORT) {
+ args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CV_SUPPORT);
+ if (crev >= 3)
+ args.sDacload.ucMisc = DAC_LOAD_MISC_YPrPb;
+ } else if (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT) {
+ args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_TV1_SUPPORT);
+ if (crev >= 3)
+ args.sDacload.ucMisc = DAC_LOAD_MISC_YPrPb;
+ }
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+ return true;
+ } else
+ return false;
+}
+
+static enum drm_connector_status
+radeon_atom_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ uint32_t bios_0_scratch;
+
+ if (!atombios_dac_load_detect(encoder, connector)) {
+ DRM_DEBUG_KMS("detect returned false \n");
+ return connector_status_unknown;
+ }
+
+ if (rdev->family >= CHIP_R600)
+ bios_0_scratch = RREG32(R600_BIOS_0_SCRATCH);
+ else
+ bios_0_scratch = RREG32(RADEON_BIOS_0_SCRATCH);
+
+ DRM_DEBUG_KMS("Bios 0 scratch %x %08x\n", bios_0_scratch, radeon_encoder->devices);
+ if (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT) {
+ if (bios_0_scratch & ATOM_S0_CRT1_MASK)
+ return connector_status_connected;
+ }
+ if (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT) {
+ if (bios_0_scratch & ATOM_S0_CRT2_MASK)
+ return connector_status_connected;
+ }
+ if (radeon_connector->devices & ATOM_DEVICE_CV_SUPPORT) {
+ if (bios_0_scratch & (ATOM_S0_CV_MASK|ATOM_S0_CV_MASK_A))
+ return connector_status_connected;
+ }
+ if (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT) {
+ if (bios_0_scratch & (ATOM_S0_TV1_COMPOSITE | ATOM_S0_TV1_COMPOSITE_A))
+ return connector_status_connected; /* CTV */
+ else if (bios_0_scratch & (ATOM_S0_TV1_SVIDEO | ATOM_S0_TV1_SVIDEO_A))
+ return connector_status_connected; /* STV */
+ }
+ return connector_status_disconnected;
+}
+
+static enum drm_connector_status
+radeon_atom_dig_detect(struct drm_encoder *encoder, struct drm_connector *connector)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder);
+ u32 bios_0_scratch;
+
+ if (!ASIC_IS_DCE4(rdev))
+ return connector_status_unknown;
+
+ if (!ext_encoder)
+ return connector_status_unknown;
+
+ if ((radeon_connector->devices & ATOM_DEVICE_CRT_SUPPORT) == 0)
+ return connector_status_unknown;
+
+ /* load detect on the dp bridge */
+ atombios_external_encoder_setup(encoder, ext_encoder,
+ EXTERNAL_ENCODER_ACTION_V3_DACLOAD_DETECTION);
+
+ bios_0_scratch = RREG32(R600_BIOS_0_SCRATCH);
+
+ DRM_DEBUG_KMS("Bios 0 scratch %x %08x\n", bios_0_scratch, radeon_encoder->devices);
+ if (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT) {
+ if (bios_0_scratch & ATOM_S0_CRT1_MASK)
+ return connector_status_connected;
+ }
+ if (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT) {
+ if (bios_0_scratch & ATOM_S0_CRT2_MASK)
+ return connector_status_connected;
+ }
+ if (radeon_connector->devices & ATOM_DEVICE_CV_SUPPORT) {
+ if (bios_0_scratch & (ATOM_S0_CV_MASK|ATOM_S0_CV_MASK_A))
+ return connector_status_connected;
+ }
+ if (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT) {
+ if (bios_0_scratch & (ATOM_S0_TV1_COMPOSITE | ATOM_S0_TV1_COMPOSITE_A))
+ return connector_status_connected; /* CTV */
+ else if (bios_0_scratch & (ATOM_S0_TV1_SVIDEO | ATOM_S0_TV1_SVIDEO_A))
+ return connector_status_connected; /* STV */
+ }
+ return connector_status_disconnected;
+}
+
+void
+radeon_atom_ext_encoder_setup_ddc(struct drm_encoder *encoder)
+{
+ struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder);
+
+ if (ext_encoder)
+ /* ddc_setup on the dp bridge */
+ atombios_external_encoder_setup(encoder, ext_encoder,
+ EXTERNAL_ENCODER_ACTION_V3_DDC_SETUP);
+
+}
+
+static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
+{
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+
+ if ((radeon_encoder->active_device &
+ (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
+ (radeon_encoder_get_dp_bridge_encoder_id(encoder) !=
+ ENCODER_OBJECT_ID_NONE)) {
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ if (dig)
+ dig->dig_encoder = radeon_atom_pick_dig_encoder(encoder);
+ }
+
+ radeon_atom_output_lock(encoder, true);
+ radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
+
+ if (connector) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+
+ /* select the clock/data port if it uses a router */
+ if (radeon_connector->router.cd_valid)
+ radeon_router_select_cd_port(radeon_connector);
+
+ /* turn eDP panel on for mode set */
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
+ atombios_set_edp_panel_power(connector,
+ ATOM_TRANSMITTER_ACTION_POWER_ON);
+ }
+
+ /* this is needed for the pll/ss setup to work correctly in some cases */
+ atombios_set_encoder_crtc_source(encoder);
+}
+
+static void radeon_atom_encoder_commit(struct drm_encoder *encoder)
+{
+ radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
+ radeon_atom_output_lock(encoder, false);
+}
+
+static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig;
+
+ /* check for pre-DCE3 cards with shared encoders;
+ * can't really use the links individually, so don't disable
+ * the encoder if it's in use by another connector
+ */
+ if (!ASIC_IS_DCE3(rdev)) {
+ struct drm_encoder *other_encoder;
+ struct radeon_encoder *other_radeon_encoder;
+
+ list_for_each_entry(other_encoder, &dev->mode_config.encoder_list, head) {
+ other_radeon_encoder = to_radeon_encoder(other_encoder);
+ if ((radeon_encoder->encoder_id == other_radeon_encoder->encoder_id) &&
+ drm_helper_encoder_in_use(other_encoder))
+ goto disable_done;
+ }
+ }
+
+ radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
+
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+ case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+ case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+ atombios_digital_setup(encoder, PANEL_ENCODER_ACTION_DISABLE);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ if (ASIC_IS_DCE4(rdev))
+ /* disable the transmitter */
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
+ else {
+ /* disable the encoder and transmitter */
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
+ atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0);
+ }
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DDI:
+ case ENCODER_OBJECT_ID_INTERNAL_DVO1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+ atombios_dvo_setup(encoder, ATOM_DISABLE);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DAC1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+ case ENCODER_OBJECT_ID_INTERNAL_DAC2:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+ atombios_dac_setup(encoder, ATOM_DISABLE);
+ if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
+ atombios_tv_setup(encoder, ATOM_DISABLE);
+ break;
+ }
+
+disable_done:
+ if (radeon_encoder_is_digital(encoder)) {
+ if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
+ r600_hdmi_disable(encoder);
+ dig = radeon_encoder->enc_priv;
+ dig->dig_encoder = -1;
+ }
+ radeon_encoder->active_device = 0;
+}
+
+/* these are handled by the primary encoders */
+static void radeon_atom_ext_prepare(struct drm_encoder *encoder)
+{
+
+}
+
+static void radeon_atom_ext_commit(struct drm_encoder *encoder)
+{
+
+}
+
+static void
+radeon_atom_ext_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+
+}
+
+static void radeon_atom_ext_disable(struct drm_encoder *encoder)
+{
+
+}
+
+static void
+radeon_atom_ext_dpms(struct drm_encoder *encoder, int mode)
+{
+
+}
+
+static bool radeon_atom_ext_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static const struct drm_encoder_helper_funcs radeon_atom_ext_helper_funcs = {
+ .dpms = radeon_atom_ext_dpms,
+ .mode_fixup = radeon_atom_ext_mode_fixup,
+ .prepare = radeon_atom_ext_prepare,
+ .mode_set = radeon_atom_ext_mode_set,
+ .commit = radeon_atom_ext_commit,
+ .disable = radeon_atom_ext_disable,
+ /* no detect for TMDS/LVDS yet */
+};
+
+static const struct drm_encoder_helper_funcs radeon_atom_dig_helper_funcs = {
+ .dpms = radeon_atom_encoder_dpms,
+ .mode_fixup = radeon_atom_mode_fixup,
+ .prepare = radeon_atom_encoder_prepare,
+ .mode_set = radeon_atom_encoder_mode_set,
+ .commit = radeon_atom_encoder_commit,
+ .disable = radeon_atom_encoder_disable,
+ .detect = radeon_atom_dig_detect,
+};
+
+static const struct drm_encoder_helper_funcs radeon_atom_dac_helper_funcs = {
+ .dpms = radeon_atom_encoder_dpms,
+ .mode_fixup = radeon_atom_mode_fixup,
+ .prepare = radeon_atom_encoder_prepare,
+ .mode_set = radeon_atom_encoder_mode_set,
+ .commit = radeon_atom_encoder_commit,
+ .detect = radeon_atom_dac_detect,
+};
+
+void radeon_enc_destroy(struct drm_encoder *encoder)
+{
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ kfree(radeon_encoder->enc_priv);
+ drm_encoder_cleanup(encoder);
+ kfree(radeon_encoder);
+}
+
+static const struct drm_encoder_funcs radeon_atom_enc_funcs = {
+ .destroy = radeon_enc_destroy,
+};
+
+struct radeon_encoder_atom_dac *
+radeon_atombios_set_dac_info(struct radeon_encoder *radeon_encoder)
+{
+ struct drm_device *dev = radeon_encoder->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder_atom_dac *dac = kzalloc(sizeof(struct radeon_encoder_atom_dac), GFP_KERNEL);
+
+ if (!dac)
+ return NULL;
+
+ dac->tv_std = radeon_atombios_get_tv_info(rdev);
+ return dac;
+}
+
+struct radeon_encoder_atom_dig *
+radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder)
+{
+ int encoder_enum = (radeon_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT;
+ struct radeon_encoder_atom_dig *dig = kzalloc(sizeof(struct radeon_encoder_atom_dig), GFP_KERNEL);
+
+ if (!dig)
+ return NULL;
+
+ /* coherent mode by default */
+ dig->coherent_mode = true;
+ dig->dig_encoder = -1;
+
+ if (encoder_enum == 2)
+ dig->linkb = true;
+ else
+ dig->linkb = false;
+
+ return dig;
+}
+
+void
+radeon_add_atom_encoder(struct drm_device *dev,
+ uint32_t encoder_enum,
+ uint32_t supported_device,
+ u16 caps)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ struct drm_encoder *encoder;
+ struct radeon_encoder *radeon_encoder;
+
+ /* see if we already added it */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ radeon_encoder = to_radeon_encoder(encoder);
+ if (radeon_encoder->encoder_enum == encoder_enum) {
+ radeon_encoder->devices |= supported_device;
+ return;
+ }
+
+ }
+
+ /* add a new one */
+ radeon_encoder = kzalloc(sizeof(struct radeon_encoder), GFP_KERNEL);
+ if (!radeon_encoder)
+ return;
+
+ encoder = &radeon_encoder->base;
+ switch (rdev->num_crtc) {
+ case 1:
+ encoder->possible_crtcs = 0x1;
+ break;
+ case 2:
+ default:
+ encoder->possible_crtcs = 0x3;
+ break;
+ case 4:
+ encoder->possible_crtcs = 0xf;
+ break;
+ case 6:
+ encoder->possible_crtcs = 0x3f;
+ break;
+ }
+
+ radeon_encoder->enc_priv = NULL;
+
+ radeon_encoder->encoder_enum = encoder_enum;
+ radeon_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
+ radeon_encoder->devices = supported_device;
+ radeon_encoder->rmx_type = RMX_OFF;
+ radeon_encoder->underscan_type = UNDERSCAN_OFF;
+ radeon_encoder->is_ext_encoder = false;
+ radeon_encoder->caps = caps;
+
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+ case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+ case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+ if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+ radeon_encoder->rmx_type = RMX_FULL;
+ drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS);
+ radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder);
+ } else {
+ drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS);
+ radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder);
+ }
+ drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DAC1:
+ drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC);
+ radeon_encoder->enc_priv = radeon_atombios_set_dac_info(radeon_encoder);
+ drm_encoder_helper_add(encoder, &radeon_atom_dac_helper_funcs);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DAC2:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+ drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TVDAC);
+ radeon_encoder->enc_priv = radeon_atombios_set_dac_info(radeon_encoder);
+ drm_encoder_helper_add(encoder, &radeon_atom_dac_helper_funcs);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DVO1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+ case ENCODER_OBJECT_ID_INTERNAL_DDI:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+ radeon_encoder->rmx_type = RMX_FULL;
+ drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS);
+ radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder);
+ } else if (radeon_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
+ drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC);
+ radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder);
+ } else {
+ drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS);
+ radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder);
+ }
+ drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs);
+ break;
+ case ENCODER_OBJECT_ID_SI170B:
+ case ENCODER_OBJECT_ID_CH7303:
+ case ENCODER_OBJECT_ID_EXTERNAL_SDVOA:
+ case ENCODER_OBJECT_ID_EXTERNAL_SDVOB:
+ case ENCODER_OBJECT_ID_TITFP513:
+ case ENCODER_OBJECT_ID_VT1623:
+ case ENCODER_OBJECT_ID_HDMI_SI1930:
+ case ENCODER_OBJECT_ID_TRAVIS:
+ case ENCODER_OBJECT_ID_NUTMEG:
+ /* these are handled by the primary encoders */
+ radeon_encoder->is_ext_encoder = true;
+ if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+ drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS);
+ else if (radeon_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
+ drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC);
+ else
+ drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS);
+ drm_encoder_helper_add(encoder, &radeon_atom_ext_helper_funcs);
+ break;
+ }
+}
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index c4ffa14fb2f..1d603a3335d 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -39,7 +39,7 @@
static void evergreen_gpu_init(struct radeon_device *rdev);
void evergreen_fini(struct radeon_device *rdev);
-static void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
+void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
{
@@ -157,6 +157,57 @@ int sumo_get_temp(struct radeon_device *rdev)
return actual_temp * 1000;
}
+void sumo_pm_init_profile(struct radeon_device *rdev)
+{
+ int idx;
+
+ /* default */
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
+
+ /* low,mid sh/mh */
+ if (rdev->flags & RADEON_IS_MOBILITY)
+ idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
+ else
+ idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
+
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
+
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
+
+ /* high sh/mh */
+ idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx =
+ rdev->pm.power_state[idx].num_clock_modes - 1;
+
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx =
+ rdev->pm.power_state[idx].num_clock_modes - 1;
+}
+
void evergreen_pm_misc(struct radeon_device *rdev)
{
int req_ps_idx = rdev->pm.requested_power_state_index;
@@ -353,6 +404,7 @@ void evergreen_hpd_init(struct radeon_device *rdev)
default:
break;
}
+ radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
}
if (rdev->irq.installed)
evergreen_irq_set(rdev);
@@ -893,7 +945,7 @@ int evergreen_pcie_gart_enable(struct radeon_device *rdev)
u32 tmp;
int r;
- if (rdev->gart.table.vram.robj == NULL) {
+ if (rdev->gart.robj == NULL) {
dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
return -EINVAL;
}
@@ -935,6 +987,9 @@ int evergreen_pcie_gart_enable(struct radeon_device *rdev)
WREG32(VM_CONTEXT1_CNTL, 0);
evergreen_pcie_gart_tlb_flush(rdev);
+ DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
+ (unsigned)(rdev->mc.gtt_size >> 20),
+ (unsigned long long)rdev->gart.table_addr);
rdev->gart.ready = true;
return 0;
}
@@ -942,7 +997,6 @@ int evergreen_pcie_gart_enable(struct radeon_device *rdev)
void evergreen_pcie_gart_disable(struct radeon_device *rdev)
{
u32 tmp;
- int r;
/* Disable all tables */
WREG32(VM_CONTEXT0_CNTL, 0);
@@ -962,14 +1016,7 @@ void evergreen_pcie_gart_disable(struct radeon_device *rdev)
WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
- if (rdev->gart.table.vram.robj) {
- r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
- if (likely(r == 0)) {
- radeon_bo_kunmap(rdev->gart.table.vram.robj);
- radeon_bo_unpin(rdev->gart.table.vram.robj);
- radeon_bo_unreserve(rdev->gart.table.vram.robj);
- }
- }
+ radeon_gart_table_vram_unpin(rdev);
}
void evergreen_pcie_gart_fini(struct radeon_device *rdev)
@@ -1223,7 +1270,7 @@ void evergreen_mc_program(struct radeon_device *rdev)
WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
rdev->mc.vram_end >> 12);
}
- WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
+ WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12);
if (rdev->flags & RADEON_IS_IGP) {
tmp = RREG32(MC_FUS_VM_FB_OFFSET) & 0x000FFFFF;
tmp |= ((rdev->mc.vram_end >> 20) & 0xF) << 24;
@@ -2586,7 +2633,7 @@ int evergreen_irq_set(struct radeon_device *rdev)
return 0;
}
-static inline void evergreen_irq_ack(struct radeon_device *rdev)
+static void evergreen_irq_ack(struct radeon_device *rdev)
{
u32 tmp;
@@ -2697,7 +2744,7 @@ void evergreen_irq_suspend(struct radeon_device *rdev)
r600_rlc_stop(rdev);
}
-static inline u32 evergreen_get_ih_wptr(struct radeon_device *rdev)
+static u32 evergreen_get_ih_wptr(struct radeon_device *rdev)
{
u32 wptr, tmp;
@@ -3003,8 +3050,7 @@ static int evergreen_startup(struct radeon_device *rdev)
int r;
/* enable pcie gen2 link */
- if (!ASIC_IS_DCE5(rdev))
- evergreen_pcie_gen2_enable(rdev);
+ evergreen_pcie_gen2_enable(rdev);
if (ASIC_IS_DCE5(rdev)) {
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
@@ -3029,6 +3075,10 @@ static int evergreen_startup(struct radeon_device *rdev)
}
}
+ r = r600_vram_scratch_init(rdev);
+ if (r)
+ return r;
+
evergreen_mc_program(rdev);
if (rdev->flags & RADEON_IS_AGP) {
evergreen_agp_enable(rdev);
@@ -3041,7 +3091,7 @@ static int evergreen_startup(struct radeon_device *rdev)
r = evergreen_blit_init(rdev);
if (r) {
- evergreen_blit_fini(rdev);
+ r600_blit_fini(rdev);
rdev->asic->copy = NULL;
dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
}
@@ -3107,45 +3157,14 @@ int evergreen_resume(struct radeon_device *rdev)
int evergreen_suspend(struct radeon_device *rdev)
{
- int r;
-
/* FIXME: we should wait for ring to be empty */
r700_cp_stop(rdev);
rdev->cp.ready = false;
evergreen_irq_suspend(rdev);
radeon_wb_disable(rdev);
evergreen_pcie_gart_disable(rdev);
+ r600_blit_suspend(rdev);
- /* unpin shaders bo */
- r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
- if (likely(r == 0)) {
- radeon_bo_unpin(rdev->r600_blit.shader_obj);
- radeon_bo_unreserve(rdev->r600_blit.shader_obj);
- }
-
- return 0;
-}
-
-int evergreen_copy_blit(struct radeon_device *rdev,
- uint64_t src_offset,
- uint64_t dst_offset,
- unsigned num_gpu_pages,
- struct radeon_fence *fence)
-{
- int r;
-
- mutex_lock(&rdev->r600_blit.mutex);
- rdev->r600_blit.vb_ib = NULL;
- r = evergreen_blit_prepare_copy(rdev, num_gpu_pages * RADEON_GPU_PAGE_SIZE);
- if (r) {
- if (rdev->r600_blit.vb_ib)
- radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
- mutex_unlock(&rdev->r600_blit.mutex);
- return r;
- }
- evergreen_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages * RADEON_GPU_PAGE_SIZE);
- evergreen_blit_done_copy(rdev, fence);
- mutex_unlock(&rdev->r600_blit.mutex);
return 0;
}
@@ -3257,13 +3276,14 @@ int evergreen_init(struct radeon_device *rdev)
void evergreen_fini(struct radeon_device *rdev)
{
- evergreen_blit_fini(rdev);
+ r600_blit_fini(rdev);
r700_cp_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
evergreen_pcie_gart_fini(rdev);
+ r600_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
radeon_fence_driver_fini(rdev);
radeon_agp_fini(rdev);
@@ -3273,7 +3293,7 @@ void evergreen_fini(struct radeon_device *rdev)
rdev->bios = NULL;
}
-static void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
+void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
{
u32 link_width_cntl, speed_cntl;
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
index 2eb251858e7..914e5af8416 100644
--- a/drivers/gpu/drm/radeon/evergreen_blit_kms.c
+++ b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
@@ -56,7 +56,9 @@ set_render_target(struct radeon_device *rdev, int format,
if (h < 8)
h = 8;
- cb_color_info = ((format << 2) | (1 << 24) | (1 << 8));
+ cb_color_info = CB_FORMAT(format) |
+ CB_SOURCE_FORMAT(CB_SF_EXPORT_NORM) |
+ CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
pitch = (w / 8) - 1;
slice = ((w * h) / 64) - 1;
@@ -67,7 +69,7 @@ set_render_target(struct radeon_device *rdev, int format,
radeon_ring_write(rdev, slice);
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, cb_color_info);
- radeon_ring_write(rdev, (1 << 4));
+ radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, (w - 1) | ((h - 1) << 16));
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, 0);
@@ -92,6 +94,15 @@ cp_set_surface_sync(struct radeon_device *rdev,
else
cp_coher_size = ((size + 255) >> 8);
+ if (rdev->family >= CHIP_CAYMAN) {
+ /* CP_COHER_CNTL2 has to be set manually when submitting a surface_sync
+ * to the RB directly. For IBs, the CP programs this as part of the
+ * surface_sync packet.
+ */
+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ radeon_ring_write(rdev, (0x85e8 - PACKET3_SET_CONFIG_REG_START) >> 2);
+ radeon_ring_write(rdev, 0); /* CP_COHER_CNTL2 */
+ }
radeon_ring_write(rdev, PACKET3(PACKET3_SURFACE_SYNC, 3));
radeon_ring_write(rdev, sync_type);
radeon_ring_write(rdev, cp_coher_size);
@@ -133,12 +144,16 @@ set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
u32 sq_vtx_constant_word2, sq_vtx_constant_word3;
/* high addr, stride */
- sq_vtx_constant_word2 = ((upper_32_bits(gpu_addr) & 0xff) | (16 << 8));
+ sq_vtx_constant_word2 = SQ_VTXC_BASE_ADDR_HI(upper_32_bits(gpu_addr) & 0xff) |
+ SQ_VTXC_STRIDE(16);
#ifdef __BIG_ENDIAN
- sq_vtx_constant_word2 |= (2 << 30);
+ sq_vtx_constant_word2 |= SQ_VTXC_ENDIAN_SWAP(SQ_ENDIAN_8IN32);
#endif
/* xyzw swizzles */
- sq_vtx_constant_word3 = (0 << 3) | (1 << 6) | (2 << 9) | (3 << 12);
+ sq_vtx_constant_word3 = SQ_VTCX_SEL_X(SQ_SEL_X) |
+ SQ_VTCX_SEL_Y(SQ_SEL_Y) |
+ SQ_VTCX_SEL_Z(SQ_SEL_Z) |
+ SQ_VTCX_SEL_W(SQ_SEL_W);
radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 8));
radeon_ring_write(rdev, 0x580);
@@ -149,7 +164,7 @@ set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, SQ_TEX_VTX_VALID_BUFFER << 30);
+ radeon_ring_write(rdev, S__SQ_CONSTANT_TYPE(SQ_TEX_VTX_VALID_BUFFER));
if ((rdev->family == CHIP_CEDAR) ||
(rdev->family == CHIP_PALM) ||
@@ -168,7 +183,7 @@ set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
static void
set_tex_resource(struct radeon_device *rdev,
int format, int w, int h, int pitch,
- u64 gpu_addr)
+ u64 gpu_addr, u32 size)
{
u32 sq_tex_resource_word0, sq_tex_resource_word1;
u32 sq_tex_resource_word4, sq_tex_resource_word7;
@@ -176,14 +191,22 @@ set_tex_resource(struct radeon_device *rdev,
if (h < 1)
h = 1;
- sq_tex_resource_word0 = (1 << 0); /* 2D */
+ sq_tex_resource_word0 = TEX_DIM(SQ_TEX_DIM_2D);
sq_tex_resource_word0 |= ((((pitch >> 3) - 1) << 6) |
((w - 1) << 18));
- sq_tex_resource_word1 = ((h - 1) << 0) | (1 << 28);
+ sq_tex_resource_word1 = ((h - 1) << 0) |
+ TEX_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
/* xyzw swizzles */
- sq_tex_resource_word4 = (0 << 16) | (1 << 19) | (2 << 22) | (3 << 25);
+ sq_tex_resource_word4 = TEX_DST_SEL_X(SQ_SEL_X) |
+ TEX_DST_SEL_Y(SQ_SEL_Y) |
+ TEX_DST_SEL_Z(SQ_SEL_Z) |
+ TEX_DST_SEL_W(SQ_SEL_W);
+
+ sq_tex_resource_word7 = format |
+ S__SQ_CONSTANT_TYPE(SQ_TEX_VTX_VALID_TEXTURE);
- sq_tex_resource_word7 = format | (SQ_TEX_VTX_VALID_TEXTURE << 30);
+ cp_set_surface_sync(rdev,
+ PACKET3_TC_ACTION_ENA, size, gpu_addr);
radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 8));
radeon_ring_write(rdev, 0);
@@ -584,31 +607,6 @@ set_default_state(struct radeon_device *rdev)
}
-static inline uint32_t i2f(uint32_t input)
-{
- u32 result, i, exponent, fraction;
-
- if ((input & 0x3fff) == 0)
- result = 0; /* 0 is a special case */
- else {
- exponent = 140; /* exponent biased by 127; */
- fraction = (input & 0x3fff) << 10; /* cheat and only
- handle numbers below 2^^15 */
- for (i = 0; i < 14; i++) {
- if (fraction & 0x800000)
- break;
- else {
- fraction = fraction << 1; /* keep
- shifting left until top bit = 1 */
- exponent = exponent - 1;
- }
- }
- result = exponent << 23 | (fraction & 0x7fffff); /* mask
- off top bit; assumed 1 */
- }
- return result;
-}
-
int evergreen_blit_init(struct radeon_device *rdev)
{
u32 obj_size;
@@ -617,6 +615,26 @@ int evergreen_blit_init(struct radeon_device *rdev)
u32 packet2s[16];
int num_packet2s = 0;
+ rdev->r600_blit.primitives.set_render_target = set_render_target;
+ rdev->r600_blit.primitives.cp_set_surface_sync = cp_set_surface_sync;
+ rdev->r600_blit.primitives.set_shaders = set_shaders;
+ rdev->r600_blit.primitives.set_vtx_resource = set_vtx_resource;
+ rdev->r600_blit.primitives.set_tex_resource = set_tex_resource;
+ rdev->r600_blit.primitives.set_scissors = set_scissors;
+ rdev->r600_blit.primitives.draw_auto = draw_auto;
+ rdev->r600_blit.primitives.set_default_state = set_default_state;
+
+ rdev->r600_blit.ring_size_common = 55; /* shaders + def state */
+ rdev->r600_blit.ring_size_common += 16; /* fence emit for VB IB */
+ rdev->r600_blit.ring_size_common += 5; /* done copy */
+ rdev->r600_blit.ring_size_common += 16; /* fence emit for done copy */
+
+ rdev->r600_blit.ring_size_per_loop = 74;
+ if (rdev->family >= CHIP_CAYMAN)
+ rdev->r600_blit.ring_size_per_loop += 9; /* additional DWs for surface sync */
+
+ rdev->r600_blit.max_dim = 16384;
+
/* pin copy shader into vram if already initialized */
if (rdev->r600_blit.shader_obj)
goto done;
@@ -712,277 +730,3 @@ done:
radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
return 0;
}
-
-void evergreen_blit_fini(struct radeon_device *rdev)
-{
- int r;
-
- radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
- if (rdev->r600_blit.shader_obj == NULL)
- return;
- /* If we can't reserve the bo, unref should be enough to destroy
- * it when it becomes idle.
- */
- r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
- if (!r) {
- radeon_bo_unpin(rdev->r600_blit.shader_obj);
- radeon_bo_unreserve(rdev->r600_blit.shader_obj);
- }
- radeon_bo_unref(&rdev->r600_blit.shader_obj);
-}
-
-static int evergreen_vb_ib_get(struct radeon_device *rdev)
-{
- int r;
- r = radeon_ib_get(rdev, &rdev->r600_blit.vb_ib);
- if (r) {
- DRM_ERROR("failed to get IB for vertex buffer\n");
- return r;
- }
-
- rdev->r600_blit.vb_total = 64*1024;
- rdev->r600_blit.vb_used = 0;
- return 0;
-}
-
-static void evergreen_vb_ib_put(struct radeon_device *rdev)
-{
- radeon_fence_emit(rdev, rdev->r600_blit.vb_ib->fence);
- radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
-}
-
-int evergreen_blit_prepare_copy(struct radeon_device *rdev, int size_bytes)
-{
- int r;
- int ring_size, line_size;
- int max_size;
- /* loops of emits + fence emit possible */
- int dwords_per_loop = 74, num_loops;
-
- r = evergreen_vb_ib_get(rdev);
- if (r)
- return r;
-
- /* 8 bpp vs 32 bpp for xfer unit */
- if (size_bytes & 3)
- line_size = 8192;
- else
- line_size = 8192 * 4;
-
- max_size = 8192 * line_size;
-
- /* major loops cover the max size transfer */
- num_loops = ((size_bytes + max_size) / max_size);
- /* minor loops cover the extra non aligned bits */
- num_loops += ((size_bytes % line_size) ? 1 : 0);
- /* calculate number of loops correctly */
- ring_size = num_loops * dwords_per_loop;
- /* set default + shaders */
- ring_size += 55; /* shaders + def state */
- ring_size += 10; /* fence emit for VB IB */
- ring_size += 5; /* done copy */
- ring_size += 10; /* fence emit for done copy */
- r = radeon_ring_lock(rdev, ring_size);
- if (r)
- return r;
-
- set_default_state(rdev); /* 36 */
- set_shaders(rdev); /* 16 */
- return 0;
-}
-
-void evergreen_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence)
-{
- int r;
-
- if (rdev->r600_blit.vb_ib)
- evergreen_vb_ib_put(rdev);
-
- if (fence)
- r = radeon_fence_emit(rdev, fence);
-
- radeon_ring_unlock_commit(rdev);
-}
-
-void evergreen_kms_blit_copy(struct radeon_device *rdev,
- u64 src_gpu_addr, u64 dst_gpu_addr,
- int size_bytes)
-{
- int max_bytes;
- u64 vb_gpu_addr;
- u32 *vb;
-
- DRM_DEBUG("emitting copy %16llx %16llx %d %d\n", src_gpu_addr, dst_gpu_addr,
- size_bytes, rdev->r600_blit.vb_used);
- vb = (u32 *)(rdev->r600_blit.vb_ib->ptr + rdev->r600_blit.vb_used);
- if ((size_bytes & 3) || (src_gpu_addr & 3) || (dst_gpu_addr & 3)) {
- max_bytes = 8192;
-
- while (size_bytes) {
- int cur_size = size_bytes;
- int src_x = src_gpu_addr & 255;
- int dst_x = dst_gpu_addr & 255;
- int h = 1;
- src_gpu_addr = src_gpu_addr & ~255ULL;
- dst_gpu_addr = dst_gpu_addr & ~255ULL;
-
- if (!src_x && !dst_x) {
- h = (cur_size / max_bytes);
- if (h > 8192)
- h = 8192;
- if (h == 0)
- h = 1;
- else
- cur_size = max_bytes;
- } else {
- if (cur_size > max_bytes)
- cur_size = max_bytes;
- if (cur_size > (max_bytes - dst_x))
- cur_size = (max_bytes - dst_x);
- if (cur_size > (max_bytes - src_x))
- cur_size = (max_bytes - src_x);
- }
-
- if ((rdev->r600_blit.vb_used + 48) > rdev->r600_blit.vb_total) {
- WARN_ON(1);
- }
-
- vb[0] = i2f(dst_x);
- vb[1] = 0;
- vb[2] = i2f(src_x);
- vb[3] = 0;
-
- vb[4] = i2f(dst_x);
- vb[5] = i2f(h);
- vb[6] = i2f(src_x);
- vb[7] = i2f(h);
-
- vb[8] = i2f(dst_x + cur_size);
- vb[9] = i2f(h);
- vb[10] = i2f(src_x + cur_size);
- vb[11] = i2f(h);
-
- /* src 10 */
- set_tex_resource(rdev, FMT_8,
- src_x + cur_size, h, src_x + cur_size,
- src_gpu_addr);
-
- /* 5 */
- cp_set_surface_sync(rdev,
- PACKET3_TC_ACTION_ENA, (src_x + cur_size * h), src_gpu_addr);
-
-
- /* dst 17 */
- set_render_target(rdev, COLOR_8,
- dst_x + cur_size, h,
- dst_gpu_addr);
-
- /* scissors 12 */
- set_scissors(rdev, dst_x, 0, dst_x + cur_size, h);
-
- /* 15 */
- vb_gpu_addr = rdev->r600_blit.vb_ib->gpu_addr + rdev->r600_blit.vb_used;
- set_vtx_resource(rdev, vb_gpu_addr);
-
- /* draw 10 */
- draw_auto(rdev);
-
- /* 5 */
- cp_set_surface_sync(rdev,
- PACKET3_CB_ACTION_ENA | PACKET3_CB0_DEST_BASE_ENA,
- cur_size * h, dst_gpu_addr);
-
- vb += 12;
- rdev->r600_blit.vb_used += 12 * 4;
-
- src_gpu_addr += cur_size * h;
- dst_gpu_addr += cur_size * h;
- size_bytes -= cur_size * h;
- }
- } else {
- max_bytes = 8192 * 4;
-
- while (size_bytes) {
- int cur_size = size_bytes;
- int src_x = (src_gpu_addr & 255);
- int dst_x = (dst_gpu_addr & 255);
- int h = 1;
- src_gpu_addr = src_gpu_addr & ~255ULL;
- dst_gpu_addr = dst_gpu_addr & ~255ULL;
-
- if (!src_x && !dst_x) {
- h = (cur_size / max_bytes);
- if (h > 8192)
- h = 8192;
- if (h == 0)
- h = 1;
- else
- cur_size = max_bytes;
- } else {
- if (cur_size > max_bytes)
- cur_size = max_bytes;
- if (cur_size > (max_bytes - dst_x))
- cur_size = (max_bytes - dst_x);
- if (cur_size > (max_bytes - src_x))
- cur_size = (max_bytes - src_x);
- }
-
- if ((rdev->r600_blit.vb_used + 48) > rdev->r600_blit.vb_total) {
- WARN_ON(1);
- }
-
- vb[0] = i2f(dst_x / 4);
- vb[1] = 0;
- vb[2] = i2f(src_x / 4);
- vb[3] = 0;
-
- vb[4] = i2f(dst_x / 4);
- vb[5] = i2f(h);
- vb[6] = i2f(src_x / 4);
- vb[7] = i2f(h);
-
- vb[8] = i2f((dst_x + cur_size) / 4);
- vb[9] = i2f(h);
- vb[10] = i2f((src_x + cur_size) / 4);
- vb[11] = i2f(h);
-
- /* src 10 */
- set_tex_resource(rdev, FMT_8_8_8_8,
- (src_x + cur_size) / 4,
- h, (src_x + cur_size) / 4,
- src_gpu_addr);
- /* 5 */
- cp_set_surface_sync(rdev,
- PACKET3_TC_ACTION_ENA, (src_x + cur_size * h), src_gpu_addr);
-
- /* dst 17 */
- set_render_target(rdev, COLOR_8_8_8_8,
- (dst_x + cur_size) / 4, h,
- dst_gpu_addr);
-
- /* scissors 12 */
- set_scissors(rdev, (dst_x / 4), 0, (dst_x + cur_size / 4), h);
-
- /* Vertex buffer setup 15 */
- vb_gpu_addr = rdev->r600_blit.vb_ib->gpu_addr + rdev->r600_blit.vb_used;
- set_vtx_resource(rdev, vb_gpu_addr);
-
- /* draw 10 */
- draw_auto(rdev);
-
- /* 5 */
- cp_set_surface_sync(rdev,
- PACKET3_CB_ACTION_ENA | PACKET3_CB0_DEST_BASE_ENA,
- cur_size * h, dst_gpu_addr);
-
- /* 74 ring dwords per loop */
- vb += 12;
- rdev->r600_blit.vb_used += 12 * 4;
-
- src_gpu_addr += cur_size * h;
- dst_gpu_addr += cur_size * h;
- size_bytes -= cur_size * h;
- }
- }
-}
-
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index a134790903d..7fdfa8ea757 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -122,12 +122,6 @@ static void evergreen_cs_track_init(struct evergreen_cs_track *track)
track->db_s_write_bo = NULL;
}
-static inline int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
-{
- /* XXX fill in */
- return 0;
-}
-
static int evergreen_cs_track_check(struct radeon_cs_parser *p)
{
struct evergreen_cs_track *track = p->track;
@@ -236,28 +230,6 @@ static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser *p,
}
/**
- * evergreen_cs_packet_next_is_pkt3_nop() - test if next packet is packet3 nop for reloc
- * @parser: parser structure holding parsing context.
- *
- * Check next packet is relocation packet3, do bo validation and compute
- * GPU offset using the provided start.
- **/
-static inline int evergreen_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
-{
- struct radeon_cs_packet p3reloc;
- int r;
-
- r = evergreen_cs_packet_parse(p, &p3reloc, p->idx);
- if (r) {
- return 0;
- }
- if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
- return 0;
- }
- return 1;
-}
-
-/**
* evergreen_cs_packet_next_vline() - parse userspace VLINE packet
* @parser: parser structure holding parsing context.
*
@@ -414,7 +386,7 @@ static int evergreen_cs_parse_packet0(struct radeon_cs_parser *p,
* if register is safe. If register is not flag as safe this function
* will test it against a list of register needind special handling.
*/
-static inline int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
+static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
{
struct evergreen_cs_track *track = (struct evergreen_cs_track *)p->track;
struct radeon_cs_reloc *reloc;
@@ -990,7 +962,7 @@ static inline int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u3
* This function will check that the resource has valid field and that
* the texture and mipmap bo object are big enough to cover this resource.
*/
-static inline int evergreen_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
+static int evergreen_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
struct radeon_bo *texture,
struct radeon_bo *mipmap)
{
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index 7363d9dec90..b937c49054d 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -941,11 +941,15 @@
#define CB_COLOR0_SLICE 0x28c68
#define CB_COLOR0_VIEW 0x28c6c
#define CB_COLOR0_INFO 0x28c70
+# define CB_FORMAT(x) ((x) << 2)
# define CB_ARRAY_MODE(x) ((x) << 8)
# define ARRAY_LINEAR_GENERAL 0
# define ARRAY_LINEAR_ALIGNED 1
# define ARRAY_1D_TILED_THIN1 2
# define ARRAY_2D_TILED_THIN1 4
+# define CB_SOURCE_FORMAT(x) ((x) << 24)
+# define CB_SF_EXPORT_FULL 0
+# define CB_SF_EXPORT_NORM 1
#define CB_COLOR0_ATTRIB 0x28c74
#define CB_COLOR0_DIM 0x28c78
/* only CB0-7 blocks have these regs */
@@ -1107,15 +1111,53 @@
#define CB_COLOR7_CLEAR_WORD3 0x28e3c
#define SQ_TEX_RESOURCE_WORD0_0 0x30000
+# define TEX_DIM(x) ((x) << 0)
+# define SQ_TEX_DIM_1D 0
+# define SQ_TEX_DIM_2D 1
+# define SQ_TEX_DIM_3D 2
+# define SQ_TEX_DIM_CUBEMAP 3
+# define SQ_TEX_DIM_1D_ARRAY 4
+# define SQ_TEX_DIM_2D_ARRAY 5
+# define SQ_TEX_DIM_2D_MSAA 6
+# define SQ_TEX_DIM_2D_ARRAY_MSAA 7
#define SQ_TEX_RESOURCE_WORD1_0 0x30004
# define TEX_ARRAY_MODE(x) ((x) << 28)
#define SQ_TEX_RESOURCE_WORD2_0 0x30008
#define SQ_TEX_RESOURCE_WORD3_0 0x3000C
#define SQ_TEX_RESOURCE_WORD4_0 0x30010
+# define TEX_DST_SEL_X(x) ((x) << 16)
+# define TEX_DST_SEL_Y(x) ((x) << 19)
+# define TEX_DST_SEL_Z(x) ((x) << 22)
+# define TEX_DST_SEL_W(x) ((x) << 25)
+# define SQ_SEL_X 0
+# define SQ_SEL_Y 1
+# define SQ_SEL_Z 2
+# define SQ_SEL_W 3
+# define SQ_SEL_0 4
+# define SQ_SEL_1 5
#define SQ_TEX_RESOURCE_WORD5_0 0x30014
#define SQ_TEX_RESOURCE_WORD6_0 0x30018
#define SQ_TEX_RESOURCE_WORD7_0 0x3001c
+#define SQ_VTX_CONSTANT_WORD0_0 0x30000
+#define SQ_VTX_CONSTANT_WORD1_0 0x30004
+#define SQ_VTX_CONSTANT_WORD2_0 0x30008
+# define SQ_VTXC_BASE_ADDR_HI(x) ((x) << 0)
+# define SQ_VTXC_STRIDE(x) ((x) << 8)
+# define SQ_VTXC_ENDIAN_SWAP(x) ((x) << 30)
+# define SQ_ENDIAN_NONE 0
+# define SQ_ENDIAN_8IN16 1
+# define SQ_ENDIAN_8IN32 2
+#define SQ_VTX_CONSTANT_WORD3_0 0x3000C
+# define SQ_VTCX_SEL_X(x) ((x) << 3)
+# define SQ_VTCX_SEL_Y(x) ((x) << 6)
+# define SQ_VTCX_SEL_Z(x) ((x) << 9)
+# define SQ_VTCX_SEL_W(x) ((x) << 12)
+#define SQ_VTX_CONSTANT_WORD4_0 0x30010
+#define SQ_VTX_CONSTANT_WORD5_0 0x30014
+#define SQ_VTX_CONSTANT_WORD6_0 0x30018
+#define SQ_VTX_CONSTANT_WORD7_0 0x3001c
+
/* cayman 3D regs */
#define CAYMAN_VGT_OFFCHIP_LDS_BASE 0x89B0
#define CAYMAN_DB_EQAA 0x28804
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 8c79ca97753..0e579985746 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -24,6 +24,7 @@
#include <linux/firmware.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include "drmP.h"
#include "radeon.h"
#include "radeon_asic.h"
@@ -40,6 +41,7 @@ extern void evergreen_mc_program(struct radeon_device *rdev);
extern void evergreen_irq_suspend(struct radeon_device *rdev);
extern int evergreen_mc_init(struct radeon_device *rdev);
extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev);
+extern void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
#define EVERGREEN_PFP_UCODE_SIZE 1120
#define EVERGREEN_PM4_UCODE_SIZE 1376
@@ -260,8 +262,11 @@ int ni_mc_load_microcode(struct radeon_device *rdev)
WREG32(MC_SEQ_SUP_CNTL, 0x00000001);
/* wait for training to complete */
- while (!(RREG32(MC_IO_PAD_CNTL_D0) & MEM_FALL_OUT_CMD))
- udelay(10);
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if (RREG32(MC_IO_PAD_CNTL_D0) & MEM_FALL_OUT_CMD)
+ break;
+ udelay(1);
+ }
if (running)
WREG32(MC_SHARED_BLACKOUT_CNTL, blackout);
@@ -931,7 +936,7 @@ int cayman_pcie_gart_enable(struct radeon_device *rdev)
{
int r;
- if (rdev->gart.table.vram.robj == NULL) {
+ if (rdev->gart.robj == NULL) {
dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
return -EINVAL;
}
@@ -967,14 +972,15 @@ int cayman_pcie_gart_enable(struct radeon_device *rdev)
WREG32(VM_CONTEXT1_CNTL, 0);
cayman_pcie_gart_tlb_flush(rdev);
+ DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
+ (unsigned)(rdev->mc.gtt_size >> 20),
+ (unsigned long long)rdev->gart.table_addr);
rdev->gart.ready = true;
return 0;
}
void cayman_pcie_gart_disable(struct radeon_device *rdev)
{
- int r;
-
/* Disable all tables */
WREG32(VM_CONTEXT0_CNTL, 0);
WREG32(VM_CONTEXT1_CNTL, 0);
@@ -990,14 +996,7 @@ void cayman_pcie_gart_disable(struct radeon_device *rdev)
WREG32(VM_L2_CNTL2, 0);
WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
L2_CACHE_BIGK_FRAGMENT_SIZE(6));
- if (rdev->gart.table.vram.robj) {
- r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
- if (likely(r == 0)) {
- radeon_bo_kunmap(rdev->gart.table.vram.robj);
- radeon_bo_unpin(rdev->gart.table.vram.robj);
- radeon_bo_unreserve(rdev->gart.table.vram.robj);
- }
- }
+ radeon_gart_table_vram_unpin(rdev);
}
void cayman_pcie_gart_fini(struct radeon_device *rdev)
@@ -1341,6 +1340,9 @@ static int cayman_startup(struct radeon_device *rdev)
{
int r;
+ /* enable pcie gen2 link */
+ evergreen_pcie_gen2_enable(rdev);
+
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
r = ni_init_microcode(rdev);
if (r) {
@@ -1354,6 +1356,10 @@ static int cayman_startup(struct radeon_device *rdev)
return r;
}
+ r = r600_vram_scratch_init(rdev);
+ if (r)
+ return r;
+
evergreen_mc_program(rdev);
r = cayman_pcie_gart_enable(rdev);
if (r)
@@ -1362,7 +1368,7 @@ static int cayman_startup(struct radeon_device *rdev)
r = evergreen_blit_init(rdev);
if (r) {
- evergreen_blit_fini(rdev);
+ r600_blit_fini(rdev);
rdev->asic->copy = NULL;
dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
}
@@ -1423,21 +1429,13 @@ int cayman_resume(struct radeon_device *rdev)
int cayman_suspend(struct radeon_device *rdev)
{
- int r;
-
/* FIXME: we should wait for ring to be empty */
cayman_cp_enable(rdev, false);
rdev->cp.ready = false;
evergreen_irq_suspend(rdev);
radeon_wb_disable(rdev);
cayman_pcie_gart_disable(rdev);
-
- /* unpin shaders bo */
- r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
- if (likely(r == 0)) {
- radeon_bo_unpin(rdev->r600_blit.shader_obj);
- radeon_bo_unreserve(rdev->r600_blit.shader_obj);
- }
+ r600_blit_suspend(rdev);
return 0;
}
@@ -1550,13 +1548,14 @@ int cayman_init(struct radeon_device *rdev)
void cayman_fini(struct radeon_device *rdev)
{
- evergreen_blit_fini(rdev);
+ r600_blit_fini(rdev);
cayman_cp_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
cayman_pcie_gart_fini(rdev);
+ r600_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
radeon_fence_driver_fini(rdev);
radeon_bo_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 7fcdbbbf297..ad158ea4990 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -41,6 +41,7 @@
#include <linux/firmware.h>
#include <linux/platform_device.h>
+#include <linux/module.h>
#include "r100_reg_safe.h"
#include "rn50_reg_safe.h"
@@ -68,6 +69,108 @@ MODULE_FIRMWARE(FIRMWARE_R520);
* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
*/
+int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt,
+ unsigned idx,
+ unsigned reg)
+{
+ int r;
+ u32 tile_flags = 0;
+ u32 tmp;
+ struct radeon_cs_reloc *reloc;
+ u32 value;
+
+ r = r100_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
+ r100_cs_dump_packet(p, pkt);
+ return r;
+ }
+ value = radeon_get_ib_value(p, idx);
+ tmp = value & 0x003fffff;
+ tmp += (((u32)reloc->lobj.gpu_offset) >> 10);
+
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
+ tile_flags |= RADEON_DST_TILE_MACRO;
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
+ if (reg == RADEON_SRC_PITCH_OFFSET) {
+ DRM_ERROR("Cannot src blit from microtiled surface\n");
+ r100_cs_dump_packet(p, pkt);
+ return -EINVAL;
+ }
+ tile_flags |= RADEON_DST_TILE_MICRO;
+ }
+
+ tmp |= tile_flags;
+ p->ib->ptr[idx] = (value & 0x3fc00000) | tmp;
+ return 0;
+}
+
+int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt,
+ int idx)
+{
+ unsigned c, i;
+ struct radeon_cs_reloc *reloc;
+ struct r100_cs_track *track;
+ int r = 0;
+ volatile uint32_t *ib;
+ u32 idx_value;
+
+ ib = p->ib->ptr;
+ track = (struct r100_cs_track *)p->track;
+ c = radeon_get_ib_value(p, idx++) & 0x1F;
+ if (c > 16) {
+ DRM_ERROR("Only 16 vertex buffers are allowed %d\n",
+ pkt->opcode);
+ r100_cs_dump_packet(p, pkt);
+ return -EINVAL;
+ }
+ track->num_arrays = c;
+ for (i = 0; i < (c - 1); i+=2, idx+=3) {
+ r = r100_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("No reloc for packet3 %d\n",
+ pkt->opcode);
+ r100_cs_dump_packet(p, pkt);
+ return r;
+ }
+ idx_value = radeon_get_ib_value(p, idx);
+ ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset);
+
+ track->arrays[i + 0].esize = idx_value >> 8;
+ track->arrays[i + 0].robj = reloc->robj;
+ track->arrays[i + 0].esize &= 0x7F;
+ r = r100_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("No reloc for packet3 %d\n",
+ pkt->opcode);
+ r100_cs_dump_packet(p, pkt);
+ return r;
+ }
+ ib[idx+2] = radeon_get_ib_value(p, idx + 2) + ((u32)reloc->lobj.gpu_offset);
+ track->arrays[i + 1].robj = reloc->robj;
+ track->arrays[i + 1].esize = idx_value >> 24;
+ track->arrays[i + 1].esize &= 0x7F;
+ }
+ if (c & 1) {
+ r = r100_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("No reloc for packet3 %d\n",
+ pkt->opcode);
+ r100_cs_dump_packet(p, pkt);
+ return r;
+ }
+ idx_value = radeon_get_ib_value(p, idx);
+ ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset);
+ track->arrays[i + 0].robj = reloc->robj;
+ track->arrays[i + 0].esize = idx_value >> 8;
+ track->arrays[i + 0].esize &= 0x7F;
+ }
+ return r;
+}
+
void r100_pre_page_flip(struct radeon_device *rdev, int crtc)
{
/* enable the pflip int */
@@ -434,6 +537,7 @@ void r100_hpd_init(struct radeon_device *rdev)
default:
break;
}
+ radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
}
if (rdev->irq.installed)
r100_irq_set(rdev);
@@ -474,7 +578,7 @@ int r100_pci_gart_init(struct radeon_device *rdev)
{
int r;
- if (rdev->gart.table.ram.ptr) {
+ if (rdev->gart.ptr) {
WARN(1, "R100 PCI GART already initialized\n");
return 0;
}
@@ -513,6 +617,9 @@ int r100_pci_gart_enable(struct radeon_device *rdev)
tmp = RREG32(RADEON_AIC_CNTL) | RADEON_PCIGART_TRANSLATE_EN;
WREG32(RADEON_AIC_CNTL, tmp);
r100_pci_gart_tlb_flush(rdev);
+ DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
+ (unsigned)(rdev->mc.gtt_size >> 20),
+ (unsigned long long)rdev->gart.table_addr);
rdev->gart.ready = true;
return 0;
}
@@ -530,10 +637,12 @@ void r100_pci_gart_disable(struct radeon_device *rdev)
int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
{
+ u32 *gtt = rdev->gart.ptr;
+
if (i < 0 || i > rdev->gart.num_gpu_pages) {
return -EINVAL;
}
- rdev->gart.table.ram.ptr[i] = cpu_to_le32(lower_32_bits(addr));
+ gtt[i] = cpu_to_le32(lower_32_bits(addr));
return 0;
}
@@ -588,7 +697,7 @@ void r100_irq_disable(struct radeon_device *rdev)
WREG32(R_000044_GEN_INT_STATUS, tmp);
}
-static inline uint32_t r100_irq_ack(struct radeon_device *rdev)
+static uint32_t r100_irq_ack(struct radeon_device *rdev)
{
uint32_t irqs = RREG32(RADEON_GEN_INT_STATUS);
uint32_t irq_mask = RADEON_SW_INT_TEST |
@@ -3147,7 +3256,7 @@ void r100_bandwidth_update(struct radeon_device *rdev)
}
}
-static inline void r100_cs_track_texture_print(struct r100_cs_track_texture *t)
+static void r100_cs_track_texture_print(struct r100_cs_track_texture *t)
{
DRM_ERROR("pitch %d\n", t->pitch);
DRM_ERROR("use_pitch %d\n", t->use_pitch);
@@ -3965,3 +4074,43 @@ int r100_init(struct radeon_device *rdev)
}
return 0;
}
+
+uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg)
+{
+ if (reg < rdev->rmmio_size)
+ return readl(((void __iomem *)rdev->rmmio) + reg);
+ else {
+ writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
+ return readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
+ }
+}
+
+void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
+{
+ if (reg < rdev->rmmio_size)
+ writel(v, ((void __iomem *)rdev->rmmio) + reg);
+ else {
+ writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
+ writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
+ }
+}
+
+u32 r100_io_rreg(struct radeon_device *rdev, u32 reg)
+{
+ if (reg < rdev->rio_mem_size)
+ return ioread32(rdev->rio_mem + reg);
+ else {
+ iowrite32(reg, rdev->rio_mem + RADEON_MM_INDEX);
+ return ioread32(rdev->rio_mem + RADEON_MM_DATA);
+ }
+}
+
+void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v)
+{
+ if (reg < rdev->rio_mem_size)
+ iowrite32(v, rdev->rio_mem + reg);
+ else {
+ iowrite32(reg, rdev->rio_mem + RADEON_MM_INDEX);
+ iowrite32(v, rdev->rio_mem + RADEON_MM_DATA);
+ }
+}
diff --git a/drivers/gpu/drm/radeon/r100_track.h b/drivers/gpu/drm/radeon/r100_track.h
index 686f9dc5d4b..6a603b378ad 100644
--- a/drivers/gpu/drm/radeon/r100_track.h
+++ b/drivers/gpu/drm/radeon/r100_track.h
@@ -92,106 +92,10 @@ int r200_packet0_check(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt,
unsigned idx, unsigned reg);
-
-
-static inline int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
- struct radeon_cs_packet *pkt,
- unsigned idx,
- unsigned reg)
-{
- int r;
- u32 tile_flags = 0;
- u32 tmp;
- struct radeon_cs_reloc *reloc;
- u32 value;
-
- r = r100_cs_packet_next_reloc(p, &reloc);
- if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
- r100_cs_dump_packet(p, pkt);
- return r;
- }
- value = radeon_get_ib_value(p, idx);
- tmp = value & 0x003fffff;
- tmp += (((u32)reloc->lobj.gpu_offset) >> 10);
-
- if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
- tile_flags |= RADEON_DST_TILE_MACRO;
- if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
- if (reg == RADEON_SRC_PITCH_OFFSET) {
- DRM_ERROR("Cannot src blit from microtiled surface\n");
- r100_cs_dump_packet(p, pkt);
- return -EINVAL;
- }
- tile_flags |= RADEON_DST_TILE_MICRO;
- }
-
- tmp |= tile_flags;
- p->ib->ptr[idx] = (value & 0x3fc00000) | tmp;
- return 0;
-}
-
-static inline int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
- struct radeon_cs_packet *pkt,
- int idx)
-{
- unsigned c, i;
- struct radeon_cs_reloc *reloc;
- struct r100_cs_track *track;
- int r = 0;
- volatile uint32_t *ib;
- u32 idx_value;
-
- ib = p->ib->ptr;
- track = (struct r100_cs_track *)p->track;
- c = radeon_get_ib_value(p, idx++) & 0x1F;
- if (c > 16) {
- DRM_ERROR("Only 16 vertex buffers are allowed %d\n",
- pkt->opcode);
- r100_cs_dump_packet(p, pkt);
- return -EINVAL;
- }
- track->num_arrays = c;
- for (i = 0; i < (c - 1); i+=2, idx+=3) {
- r = r100_cs_packet_next_reloc(p, &reloc);
- if (r) {
- DRM_ERROR("No reloc for packet3 %d\n",
- pkt->opcode);
- r100_cs_dump_packet(p, pkt);
- return r;
- }
- idx_value = radeon_get_ib_value(p, idx);
- ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset);
-
- track->arrays[i + 0].esize = idx_value >> 8;
- track->arrays[i + 0].robj = reloc->robj;
- track->arrays[i + 0].esize &= 0x7F;
- r = r100_cs_packet_next_reloc(p, &reloc);
- if (r) {
- DRM_ERROR("No reloc for packet3 %d\n",
- pkt->opcode);
- r100_cs_dump_packet(p, pkt);
- return r;
- }
- ib[idx+2] = radeon_get_ib_value(p, idx + 2) + ((u32)reloc->lobj.gpu_offset);
- track->arrays[i + 1].robj = reloc->robj;
- track->arrays[i + 1].esize = idx_value >> 24;
- track->arrays[i + 1].esize &= 0x7F;
- }
- if (c & 1) {
- r = r100_cs_packet_next_reloc(p, &reloc);
- if (r) {
- DRM_ERROR("No reloc for packet3 %d\n",
- pkt->opcode);
- r100_cs_dump_packet(p, pkt);
- return r;
- }
- idx_value = radeon_get_ib_value(p, idx);
- ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset);
- track->arrays[i + 0].robj = reloc->robj;
- track->arrays[i + 0].esize = idx_value >> 8;
- track->arrays[i + 0].esize &= 0x7F;
- }
- return r;
-}
+int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt,
+ unsigned idx,
+ unsigned reg);
+int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt,
+ int idx);
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 55a7f190027..400b26df652 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -74,7 +74,7 @@ void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev)
int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
{
- void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
+ void __iomem *ptr = rdev->gart.ptr;
if (i < 0 || i > rdev->gart.num_gpu_pages) {
return -EINVAL;
@@ -93,7 +93,7 @@ int rv370_pcie_gart_init(struct radeon_device *rdev)
{
int r;
- if (rdev->gart.table.vram.robj) {
+ if (rdev->gart.robj) {
WARN(1, "RV370 PCIE GART already initialized\n");
return 0;
}
@@ -116,7 +116,7 @@ int rv370_pcie_gart_enable(struct radeon_device *rdev)
uint32_t tmp;
int r;
- if (rdev->gart.table.vram.robj == NULL) {
+ if (rdev->gart.robj == NULL) {
dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
return -EINVAL;
}
@@ -144,8 +144,9 @@ int rv370_pcie_gart_enable(struct radeon_device *rdev)
tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
rv370_pcie_gart_tlb_flush(rdev);
- DRM_INFO("PCIE GART of %uM enabled (table at 0x%08X).\n",
- (unsigned)(rdev->mc.gtt_size >> 20), table_addr);
+ DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
+ (unsigned)(rdev->mc.gtt_size >> 20),
+ (unsigned long long)table_addr);
rdev->gart.ready = true;
return 0;
}
@@ -153,7 +154,6 @@ int rv370_pcie_gart_enable(struct radeon_device *rdev)
void rv370_pcie_gart_disable(struct radeon_device *rdev)
{
u32 tmp;
- int r;
WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, 0);
WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, 0);
@@ -162,14 +162,7 @@ void rv370_pcie_gart_disable(struct radeon_device *rdev)
tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN);
- if (rdev->gart.table.vram.robj) {
- r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
- if (likely(r == 0)) {
- radeon_bo_kunmap(rdev->gart.table.vram.robj);
- radeon_bo_unpin(rdev->gart.table.vram.robj);
- radeon_bo_unreserve(rdev->gart.table.vram.robj);
- }
- }
+ radeon_gart_table_vram_unpin(rdev);
}
void rv370_pcie_gart_fini(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/r300_cmdbuf.c b/drivers/gpu/drm/radeon/r300_cmdbuf.c
index c5c2742e414..1fe98b421c9 100644
--- a/drivers/gpu/drm/radeon/r300_cmdbuf.c
+++ b/drivers/gpu/drm/radeon/r300_cmdbuf.c
@@ -791,7 +791,7 @@ static __inline__ int r300_emit_packet3(drm_radeon_private_t *dev_priv,
/**
* Emit the sequence to pacify R300.
*/
-static __inline__ void r300_pacify(drm_radeon_private_t *dev_priv)
+static void r300_pacify(drm_radeon_private_t *dev_priv)
{
uint32_t cache_z, cache_3d, cache_2d;
RING_LOCALS;
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 720dd99163f..9cdda0b3b08 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -29,6 +29,7 @@
#include <linux/seq_file.h>
#include <linux/firmware.h>
#include <linux/platform_device.h>
+#include <linux/module.h>
#include "drmP.h"
#include "radeon_drm.h"
#include "radeon.h"
@@ -287,24 +288,6 @@ void r600_pm_get_dynpm_state(struct radeon_device *rdev)
pcie_lanes);
}
-static int r600_pm_get_type_index(struct radeon_device *rdev,
- enum radeon_pm_state_type ps_type,
- int instance)
-{
- int i;
- int found_instance = -1;
-
- for (i = 0; i < rdev->pm.num_power_states; i++) {
- if (rdev->pm.power_state[i].type == ps_type) {
- found_instance++;
- if (found_instance == instance)
- return i;
- }
- }
- /* return default if no match */
- return rdev->pm.default_power_state_index;
-}
-
void rs780_pm_init_profile(struct radeon_device *rdev)
{
if (rdev->pm.num_power_states == 2) {
@@ -420,6 +403,8 @@ void rs780_pm_init_profile(struct radeon_device *rdev)
void r600_pm_init_profile(struct radeon_device *rdev)
{
+ int idx;
+
if (rdev->family == CHIP_R600) {
/* XXX */
/* default */
@@ -501,81 +486,43 @@ void r600_pm_init_profile(struct radeon_device *rdev)
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
/* low sh */
- if (rdev->flags & RADEON_IS_MOBILITY) {
- rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx =
- r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
- rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx =
- r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
- rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
- rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
- } else {
- rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx =
- r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
- rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx =
- r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
- rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
- rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
- }
+ if (rdev->flags & RADEON_IS_MOBILITY)
+ idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
+ else
+ idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
/* mid sh */
- if (rdev->flags & RADEON_IS_MOBILITY) {
- rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx =
- r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
- rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx =
- r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
- rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
- rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
- } else {
- rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx =
- r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
- rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx =
- r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
- rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
- rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
- }
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
/* high sh */
- rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx =
- r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
- rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx =
- r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
+ idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx;
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
/* low mh */
- if (rdev->flags & RADEON_IS_MOBILITY) {
- rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx =
- r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
- rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx =
- r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
- rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
- rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
- } else {
- rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx =
- r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
- rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx =
- r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
- rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
- rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
- }
+ if (rdev->flags & RADEON_IS_MOBILITY)
+ idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
+ else
+ idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
/* mid mh */
- if (rdev->flags & RADEON_IS_MOBILITY) {
- rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx =
- r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
- rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx =
- r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
- rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
- rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
- } else {
- rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx =
- r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
- rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx =
- r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
- rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
- rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
- }
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
/* high mh */
- rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx =
- r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
- rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx =
- r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
+ idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx;
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
}
@@ -762,13 +709,14 @@ void r600_hpd_init(struct radeon_device *rdev)
struct drm_device *dev = rdev->ddev;
struct drm_connector *connector;
- if (ASIC_IS_DCE3(rdev)) {
- u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa);
- if (ASIC_IS_DCE32(rdev))
- tmp |= DC_HPDx_EN;
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+
+ if (ASIC_IS_DCE3(rdev)) {
+ u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa);
+ if (ASIC_IS_DCE32(rdev))
+ tmp |= DC_HPDx_EN;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- struct radeon_connector *radeon_connector = to_radeon_connector(connector);
switch (radeon_connector->hpd.hpd) {
case RADEON_HPD_1:
WREG32(DC_HPD1_CONTROL, tmp);
@@ -798,10 +746,7 @@ void r600_hpd_init(struct radeon_device *rdev)
default:
break;
}
- }
- } else {
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ } else {
switch (radeon_connector->hpd.hpd) {
case RADEON_HPD_1:
WREG32(DC_HOT_PLUG_DETECT1_CONTROL, DC_HOT_PLUG_DETECTx_EN);
@@ -819,6 +764,7 @@ void r600_hpd_init(struct radeon_device *rdev)
break;
}
}
+ radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
}
if (rdev->irq.installed)
r600_irq_set(rdev);
@@ -896,7 +842,7 @@ void r600_pcie_gart_tlb_flush(struct radeon_device *rdev)
/* flush hdp cache so updates hit vram */
if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
!(rdev->flags & RADEON_IS_AGP)) {
- void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
+ void __iomem *ptr = (void *)rdev->gart.ptr;
u32 tmp;
/* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
@@ -931,7 +877,7 @@ int r600_pcie_gart_init(struct radeon_device *rdev)
{
int r;
- if (rdev->gart.table.vram.robj) {
+ if (rdev->gart.robj) {
WARN(1, "R600 PCIE GART already initialized\n");
return 0;
}
@@ -948,7 +894,7 @@ int r600_pcie_gart_enable(struct radeon_device *rdev)
u32 tmp;
int r, i;
- if (rdev->gart.table.vram.robj == NULL) {
+ if (rdev->gart.robj == NULL) {
dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
return -EINVAL;
}
@@ -993,6 +939,9 @@ int r600_pcie_gart_enable(struct radeon_device *rdev)
WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
r600_pcie_gart_tlb_flush(rdev);
+ DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
+ (unsigned)(rdev->mc.gtt_size >> 20),
+ (unsigned long long)rdev->gart.table_addr);
rdev->gart.ready = true;
return 0;
}
@@ -1000,7 +949,7 @@ int r600_pcie_gart_enable(struct radeon_device *rdev)
void r600_pcie_gart_disable(struct radeon_device *rdev)
{
u32 tmp;
- int i, r;
+ int i;
/* Disable all tables */
for (i = 0; i < 7; i++)
@@ -1027,14 +976,7 @@ void r600_pcie_gart_disable(struct radeon_device *rdev)
WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp);
WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
- if (rdev->gart.table.vram.robj) {
- r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
- if (likely(r == 0)) {
- radeon_bo_kunmap(rdev->gart.table.vram.robj);
- radeon_bo_unpin(rdev->gart.table.vram.robj);
- radeon_bo_unreserve(rdev->gart.table.vram.robj);
- }
- }
+ radeon_gart_table_vram_unpin(rdev);
}
void r600_pcie_gart_fini(struct radeon_device *rdev)
@@ -1134,7 +1076,7 @@ static void r600_mc_program(struct radeon_device *rdev)
WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12);
WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end >> 12);
}
- WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
+ WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12);
tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
WREG32(MC_VM_FB_LOCATION, tmp);
@@ -1273,6 +1215,53 @@ int r600_mc_init(struct radeon_device *rdev)
return 0;
}
+int r600_vram_scratch_init(struct radeon_device *rdev)
+{
+ int r;
+
+ if (rdev->vram_scratch.robj == NULL) {
+ r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE,
+ PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
+ &rdev->vram_scratch.robj);
+ if (r) {
+ return r;
+ }
+ }
+
+ r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
+ if (unlikely(r != 0))
+ return r;
+ r = radeon_bo_pin(rdev->vram_scratch.robj,
+ RADEON_GEM_DOMAIN_VRAM, &rdev->vram_scratch.gpu_addr);
+ if (r) {
+ radeon_bo_unreserve(rdev->vram_scratch.robj);
+ return r;
+ }
+ r = radeon_bo_kmap(rdev->vram_scratch.robj,
+ (void **)&rdev->vram_scratch.ptr);
+ if (r)
+ radeon_bo_unpin(rdev->vram_scratch.robj);
+ radeon_bo_unreserve(rdev->vram_scratch.robj);
+
+ return r;
+}
+
+void r600_vram_scratch_fini(struct radeon_device *rdev)
+{
+ int r;
+
+ if (rdev->vram_scratch.robj == NULL) {
+ return;
+ }
+ r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
+ if (likely(r == 0)) {
+ radeon_bo_kunmap(rdev->vram_scratch.robj);
+ radeon_bo_unpin(rdev->vram_scratch.robj);
+ radeon_bo_unreserve(rdev->vram_scratch.robj);
+ }
+ radeon_bo_unref(&rdev->vram_scratch.robj);
+}
+
/* We doesn't check that the GPU really needs a reset we simply do the
* reset, it's up to the caller to determine if the GPU needs one. We
* might add an helper function to check that.
@@ -2328,6 +2317,14 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
if (rdev->wb.use_event) {
u64 addr = rdev->wb.gpu_addr + R600_WB_EVENT_OFFSET +
(u64)(rdev->fence_drv.scratch_reg - rdev->scratch.reg_base);
+ /* flush read cache over gart */
+ radeon_ring_write(rdev, PACKET3(PACKET3_SURFACE_SYNC, 3));
+ radeon_ring_write(rdev, PACKET3_TC_ACTION_ENA |
+ PACKET3_VC_ACTION_ENA |
+ PACKET3_SH_ACTION_ENA);
+ radeon_ring_write(rdev, 0xFFFFFFFF);
+ radeon_ring_write(rdev, 0);
+ radeon_ring_write(rdev, 10); /* poll interval */
/* EVENT_WRITE_EOP - flush caches, send int */
radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
radeon_ring_write(rdev, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
@@ -2336,6 +2333,14 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
radeon_ring_write(rdev, fence->seq);
radeon_ring_write(rdev, 0);
} else {
+ /* flush read cache over gart */
+ radeon_ring_write(rdev, PACKET3(PACKET3_SURFACE_SYNC, 3));
+ radeon_ring_write(rdev, PACKET3_TC_ACTION_ENA |
+ PACKET3_VC_ACTION_ENA |
+ PACKET3_SH_ACTION_ENA);
+ radeon_ring_write(rdev, 0xFFFFFFFF);
+ radeon_ring_write(rdev, 0);
+ radeon_ring_write(rdev, 10); /* poll interval */
radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0));
radeon_ring_write(rdev, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0));
/* wait for 3D idle clean */
@@ -2362,19 +2367,33 @@ int r600_copy_blit(struct radeon_device *rdev,
mutex_lock(&rdev->r600_blit.mutex);
rdev->r600_blit.vb_ib = NULL;
- r = r600_blit_prepare_copy(rdev, num_gpu_pages * RADEON_GPU_PAGE_SIZE);
+ r = r600_blit_prepare_copy(rdev, num_gpu_pages);
if (r) {
if (rdev->r600_blit.vb_ib)
radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
mutex_unlock(&rdev->r600_blit.mutex);
return r;
}
- r600_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages * RADEON_GPU_PAGE_SIZE);
+ r600_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages);
r600_blit_done_copy(rdev, fence);
mutex_unlock(&rdev->r600_blit.mutex);
return 0;
}
+void r600_blit_suspend(struct radeon_device *rdev)
+{
+ int r;
+
+ /* unpin shaders bo */
+ if (rdev->r600_blit.shader_obj) {
+ r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+ if (!r) {
+ radeon_bo_unpin(rdev->r600_blit.shader_obj);
+ radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+ }
+ }
+}
+
int r600_set_surface_reg(struct radeon_device *rdev, int reg,
uint32_t tiling_flags, uint32_t pitch,
uint32_t offset, uint32_t obj_size)
@@ -2403,6 +2422,10 @@ int r600_startup(struct radeon_device *rdev)
}
}
+ r = r600_vram_scratch_init(rdev);
+ if (r)
+ return r;
+
r600_mc_program(rdev);
if (rdev->flags & RADEON_IS_AGP) {
r600_agp_enable(rdev);
@@ -2494,8 +2517,6 @@ int r600_resume(struct radeon_device *rdev)
int r600_suspend(struct radeon_device *rdev)
{
- int r;
-
r600_audio_fini(rdev);
/* FIXME: we should wait for ring to be empty */
r600_cp_stop(rdev);
@@ -2503,14 +2524,8 @@ int r600_suspend(struct radeon_device *rdev)
r600_irq_suspend(rdev);
radeon_wb_disable(rdev);
r600_pcie_gart_disable(rdev);
- /* unpin shaders bo */
- if (rdev->r600_blit.shader_obj) {
- r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
- if (!r) {
- radeon_bo_unpin(rdev->r600_blit.shader_obj);
- radeon_bo_unreserve(rdev->r600_blit.shader_obj);
- }
- }
+ r600_blit_suspend(rdev);
+
return 0;
}
@@ -2631,6 +2646,7 @@ void r600_fini(struct radeon_device *rdev)
radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
r600_pcie_gart_fini(rdev);
+ r600_vram_scratch_fini(rdev);
radeon_agp_fini(rdev);
radeon_gem_fini(rdev);
radeon_fence_driver_fini(rdev);
@@ -3137,7 +3153,7 @@ int r600_irq_set(struct radeon_device *rdev)
return 0;
}
-static inline void r600_irq_ack(struct radeon_device *rdev)
+static void r600_irq_ack(struct radeon_device *rdev)
{
u32 tmp;
@@ -3238,7 +3254,7 @@ void r600_irq_disable(struct radeon_device *rdev)
r600_disable_interrupt_state(rdev);
}
-static inline u32 r600_get_ih_wptr(struct radeon_device *rdev)
+static u32 r600_get_ih_wptr(struct radeon_device *rdev)
{
u32 wptr, tmp;
diff --git a/drivers/gpu/drm/radeon/r600_blit.c b/drivers/gpu/drm/radeon/r600_blit.c
index 7f1043448d2..3c031a48205 100644
--- a/drivers/gpu/drm/radeon/r600_blit.c
+++ b/drivers/gpu/drm/radeon/r600_blit.c
@@ -41,7 +41,7 @@
#define COLOR_5_6_5 0x8
#define COLOR_8_8_8_8 0x1a
-static inline void
+static void
set_render_target(drm_radeon_private_t *dev_priv, int format, int w, int h, u64 gpu_addr)
{
u32 cb_color_info;
@@ -99,7 +99,7 @@ set_render_target(drm_radeon_private_t *dev_priv, int format, int w, int h, u64
ADVANCE_RING();
}
-static inline void
+static void
cp_set_surface_sync(drm_radeon_private_t *dev_priv,
u32 sync_type, u32 size, u64 mc_addr)
{
@@ -121,7 +121,7 @@ cp_set_surface_sync(drm_radeon_private_t *dev_priv,
ADVANCE_RING();
}
-static inline void
+static void
set_shaders(struct drm_device *dev)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
@@ -184,7 +184,7 @@ set_shaders(struct drm_device *dev)
R600_SH_ACTION_ENA, 512, gpu_addr);
}
-static inline void
+static void
set_vtx_resource(drm_radeon_private_t *dev_priv, u64 gpu_addr)
{
uint32_t sq_vtx_constant_word2;
@@ -220,7 +220,7 @@ set_vtx_resource(drm_radeon_private_t *dev_priv, u64 gpu_addr)
R600_VC_ACTION_ENA, 48, gpu_addr);
}
-static inline void
+static void
set_tex_resource(drm_radeon_private_t *dev_priv,
int format, int w, int h, int pitch, u64 gpu_addr)
{
@@ -258,7 +258,7 @@ set_tex_resource(drm_radeon_private_t *dev_priv,
}
-static inline void
+static void
set_scissors(drm_radeon_private_t *dev_priv, int x1, int y1, int x2, int y2)
{
RING_LOCALS;
@@ -282,7 +282,7 @@ set_scissors(drm_radeon_private_t *dev_priv, int x1, int y1, int x2, int y2)
ADVANCE_RING();
}
-static inline void
+static void
draw_auto(drm_radeon_private_t *dev_priv)
{
RING_LOCALS;
@@ -311,7 +311,7 @@ draw_auto(drm_radeon_private_t *dev_priv)
COMMIT_RING();
}
-static inline void
+static void
set_default_state(drm_radeon_private_t *dev_priv)
{
int i;
@@ -489,7 +489,7 @@ set_default_state(drm_radeon_private_t *dev_priv)
ADVANCE_RING();
}
-static inline uint32_t i2f(uint32_t input)
+static uint32_t i2f(uint32_t input)
{
u32 result, i, exponent, fraction;
@@ -515,7 +515,7 @@ static inline uint32_t i2f(uint32_t input)
}
-static inline int r600_nomm_get_vb(struct drm_device *dev)
+static int r600_nomm_get_vb(struct drm_device *dev)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
dev_priv->blit_vb = radeon_freelist_get(dev);
@@ -526,7 +526,7 @@ static inline int r600_nomm_get_vb(struct drm_device *dev)
return 0;
}
-static inline void r600_nomm_put_vb(struct drm_device *dev)
+static void r600_nomm_put_vb(struct drm_device *dev)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
@@ -534,7 +534,7 @@ static inline void r600_nomm_put_vb(struct drm_device *dev)
radeon_cp_discard_buffer(dev, dev_priv->blit_vb->file_priv->master, dev_priv->blit_vb);
}
-static inline void *r600_nomm_get_vb_ptr(struct drm_device *dev)
+static void *r600_nomm_get_vb_ptr(struct drm_device *dev)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
return (((char *)dev->agp_buffer_map->handle +
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index 9aa74c3f8cb..e09d2818f94 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -42,6 +42,9 @@
#define COLOR_5_6_5 0x8
#define COLOR_8_8_8_8 0x1a
+#define RECT_UNIT_H 32
+#define RECT_UNIT_W (RADEON_GPU_PAGE_SIZE / 4 / RECT_UNIT_H)
+
/* emits 21 on rv770+, 23 on r600 */
static void
set_render_target(struct radeon_device *rdev, int format,
@@ -54,7 +57,9 @@ set_render_target(struct radeon_device *rdev, int format,
if (h < 8)
h = 8;
- cb_color_info = ((format << 2) | (1 << 27) | (1 << 8));
+ cb_color_info = CB_FORMAT(format) |
+ CB_SOURCE_FORMAT(CB_SF_EXPORT_NORM) |
+ CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
pitch = (w / 8) - 1;
slice = ((w * h) / 64) - 1;
@@ -164,9 +169,10 @@ set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
{
u32 sq_vtx_constant_word2;
- sq_vtx_constant_word2 = ((upper_32_bits(gpu_addr) & 0xff) | (16 << 8));
+ sq_vtx_constant_word2 = SQ_VTXC_BASE_ADDR_HI(upper_32_bits(gpu_addr) & 0xff) |
+ SQ_VTXC_STRIDE(16);
#ifdef __BIG_ENDIAN
- sq_vtx_constant_word2 |= (2 << 30);
+ sq_vtx_constant_word2 |= SQ_VTXC_ENDIAN_SWAP(SQ_ENDIAN_8IN32);
#endif
radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 7));
@@ -195,25 +201,29 @@ set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
static void
set_tex_resource(struct radeon_device *rdev,
int format, int w, int h, int pitch,
- u64 gpu_addr)
+ u64 gpu_addr, u32 size)
{
uint32_t sq_tex_resource_word0, sq_tex_resource_word1, sq_tex_resource_word4;
if (h < 1)
h = 1;
- sq_tex_resource_word0 = (1 << 0) | (1 << 3);
- sq_tex_resource_word0 |= ((((pitch >> 3) - 1) << 8) |
- ((w - 1) << 19));
+ sq_tex_resource_word0 = S_038000_DIM(V_038000_SQ_TEX_DIM_2D) |
+ S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
+ sq_tex_resource_word0 |= S_038000_PITCH((pitch >> 3) - 1) |
+ S_038000_TEX_WIDTH(w - 1);
+
+ sq_tex_resource_word1 = S_038004_DATA_FORMAT(format);
+ sq_tex_resource_word1 |= S_038004_TEX_HEIGHT(h - 1);
- sq_tex_resource_word1 = (format << 26);
- sq_tex_resource_word1 |= ((h - 1) << 0);
+ sq_tex_resource_word4 = S_038010_REQUEST_SIZE(1) |
+ S_038010_DST_SEL_X(SQ_SEL_X) |
+ S_038010_DST_SEL_Y(SQ_SEL_Y) |
+ S_038010_DST_SEL_Z(SQ_SEL_Z) |
+ S_038010_DST_SEL_W(SQ_SEL_W);
- sq_tex_resource_word4 = ((1 << 14) |
- (0 << 16) |
- (1 << 19) |
- (2 << 22) |
- (3 << 25));
+ cp_set_surface_sync(rdev,
+ PACKET3_TC_ACTION_ENA, size, gpu_addr);
radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 7));
radeon_ring_write(rdev, 0);
@@ -450,7 +460,7 @@ set_default_state(struct radeon_device *rdev)
radeon_ring_write(rdev, sq_stack_resource_mgmt_2);
}
-static inline uint32_t i2f(uint32_t input)
+static uint32_t i2f(uint32_t input)
{
u32 result, i, exponent, fraction;
@@ -483,6 +493,27 @@ int r600_blit_init(struct radeon_device *rdev)
u32 packet2s[16];
int num_packet2s = 0;
+ rdev->r600_blit.primitives.set_render_target = set_render_target;
+ rdev->r600_blit.primitives.cp_set_surface_sync = cp_set_surface_sync;
+ rdev->r600_blit.primitives.set_shaders = set_shaders;
+ rdev->r600_blit.primitives.set_vtx_resource = set_vtx_resource;
+ rdev->r600_blit.primitives.set_tex_resource = set_tex_resource;
+ rdev->r600_blit.primitives.set_scissors = set_scissors;
+ rdev->r600_blit.primitives.draw_auto = draw_auto;
+ rdev->r600_blit.primitives.set_default_state = set_default_state;
+
+ rdev->r600_blit.ring_size_common = 40; /* shaders + def state */
+ rdev->r600_blit.ring_size_common += 16; /* fence emit for VB IB */
+ rdev->r600_blit.ring_size_common += 5; /* done copy */
+ rdev->r600_blit.ring_size_common += 16; /* fence emit for done copy */
+
+ rdev->r600_blit.ring_size_per_loop = 76;
+ /* set_render_target emits 2 extra dwords on rv6xx */
+ if (rdev->family > CHIP_R600 && rdev->family < CHIP_RV770)
+ rdev->r600_blit.ring_size_per_loop += 2;
+
+ rdev->r600_blit.max_dim = 8192;
+
/* pin copy shader into vram if already initialized */
if (rdev->r600_blit.shader_obj)
goto done;
@@ -600,47 +631,80 @@ static void r600_vb_ib_put(struct radeon_device *rdev)
radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
}
-int r600_blit_prepare_copy(struct radeon_device *rdev, int size_bytes)
+static unsigned r600_blit_create_rect(unsigned num_gpu_pages,
+ int *width, int *height, int max_dim)
+{
+ unsigned max_pages;
+ unsigned pages = num_gpu_pages;
+ int w, h;
+
+ if (num_gpu_pages == 0) {
+ /* not supposed to be called with no pages, but just in case */
+ h = 0;
+ w = 0;
+ pages = 0;
+ WARN_ON(1);
+ } else {
+ int rect_order = 2;
+ h = RECT_UNIT_H;
+ while (num_gpu_pages / rect_order) {
+ h *= 2;
+ rect_order *= 4;
+ if (h >= max_dim) {
+ h = max_dim;
+ break;
+ }
+ }
+ max_pages = (max_dim * h) / (RECT_UNIT_W * RECT_UNIT_H);
+ if (pages > max_pages)
+ pages = max_pages;
+ w = (pages * RECT_UNIT_W * RECT_UNIT_H) / h;
+ w = (w / RECT_UNIT_W) * RECT_UNIT_W;
+ pages = (w * h) / (RECT_UNIT_W * RECT_UNIT_H);
+ BUG_ON(pages == 0);
+ }
+
+
+ DRM_DEBUG("blit_rectangle: h=%d, w=%d, pages=%d\n", h, w, pages);
+
+ /* return width and height only of the caller wants it */
+ if (height)
+ *height = h;
+ if (width)
+ *width = w;
+
+ return pages;
+}
+
+
+int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages)
{
int r;
- int ring_size, line_size;
- int max_size;
- /* loops of emits 64 + fence emit possible */
- int dwords_per_loop = 76, num_loops;
+ int ring_size;
+ int num_loops = 0;
+ int dwords_per_loop = rdev->r600_blit.ring_size_per_loop;
r = r600_vb_ib_get(rdev);
if (r)
return r;
- /* set_render_target emits 2 extra dwords on rv6xx */
- if (rdev->family > CHIP_R600 && rdev->family < CHIP_RV770)
- dwords_per_loop += 2;
-
- /* 8 bpp vs 32 bpp for xfer unit */
- if (size_bytes & 3)
- line_size = 8192;
- else
- line_size = 8192*4;
-
- max_size = 8192 * line_size;
+ /* num loops */
+ while (num_gpu_pages) {
+ num_gpu_pages -=
+ r600_blit_create_rect(num_gpu_pages, NULL, NULL,
+ rdev->r600_blit.max_dim);
+ num_loops++;
+ }
- /* major loops cover the max size transfer */
- num_loops = ((size_bytes + max_size) / max_size);
- /* minor loops cover the extra non aligned bits */
- num_loops += ((size_bytes % line_size) ? 1 : 0);
/* calculate number of loops correctly */
ring_size = num_loops * dwords_per_loop;
- /* set default + shaders */
- ring_size += 40; /* shaders + def state */
- ring_size += 10; /* fence emit for VB IB */
- ring_size += 5; /* done copy */
- ring_size += 10; /* fence emit for done copy */
+ ring_size += rdev->r600_blit.ring_size_common;
r = radeon_ring_lock(rdev, ring_size);
if (r)
return r;
- set_default_state(rdev); /* 14 */
- set_shaders(rdev); /* 26 */
+ rdev->r600_blit.primitives.set_default_state(rdev);
+ rdev->r600_blit.primitives.set_shaders(rdev);
return 0;
}
@@ -659,182 +723,61 @@ void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence)
void r600_kms_blit_copy(struct radeon_device *rdev,
u64 src_gpu_addr, u64 dst_gpu_addr,
- int size_bytes)
+ unsigned num_gpu_pages)
{
- int max_bytes;
u64 vb_gpu_addr;
u32 *vb;
- DRM_DEBUG("emitting copy %16llx %16llx %d %d\n", src_gpu_addr, dst_gpu_addr,
- size_bytes, rdev->r600_blit.vb_used);
+ DRM_DEBUG("emitting copy %16llx %16llx %d %d\n",
+ src_gpu_addr, dst_gpu_addr,
+ num_gpu_pages, rdev->r600_blit.vb_used);
vb = (u32 *)(rdev->r600_blit.vb_ib->ptr + rdev->r600_blit.vb_used);
- if ((size_bytes & 3) || (src_gpu_addr & 3) || (dst_gpu_addr & 3)) {
- max_bytes = 8192;
-
- while (size_bytes) {
- int cur_size = size_bytes;
- int src_x = src_gpu_addr & 255;
- int dst_x = dst_gpu_addr & 255;
- int h = 1;
- src_gpu_addr = src_gpu_addr & ~255ULL;
- dst_gpu_addr = dst_gpu_addr & ~255ULL;
-
- if (!src_x && !dst_x) {
- h = (cur_size / max_bytes);
- if (h > 8192)
- h = 8192;
- if (h == 0)
- h = 1;
- else
- cur_size = max_bytes;
- } else {
- if (cur_size > max_bytes)
- cur_size = max_bytes;
- if (cur_size > (max_bytes - dst_x))
- cur_size = (max_bytes - dst_x);
- if (cur_size > (max_bytes - src_x))
- cur_size = (max_bytes - src_x);
- }
-
- if ((rdev->r600_blit.vb_used + 48) > rdev->r600_blit.vb_total) {
- WARN_ON(1);
- }
-
- vb[0] = i2f(dst_x);
- vb[1] = 0;
- vb[2] = i2f(src_x);
- vb[3] = 0;
-
- vb[4] = i2f(dst_x);
- vb[5] = i2f(h);
- vb[6] = i2f(src_x);
- vb[7] = i2f(h);
-
- vb[8] = i2f(dst_x + cur_size);
- vb[9] = i2f(h);
- vb[10] = i2f(src_x + cur_size);
- vb[11] = i2f(h);
-
- /* src 9 */
- set_tex_resource(rdev, FMT_8,
- src_x + cur_size, h, src_x + cur_size,
- src_gpu_addr);
-
- /* 5 */
- cp_set_surface_sync(rdev,
- PACKET3_TC_ACTION_ENA, (src_x + cur_size * h), src_gpu_addr);
- /* dst 23 */
- set_render_target(rdev, COLOR_8,
- dst_x + cur_size, h,
- dst_gpu_addr);
+ while (num_gpu_pages) {
+ int w, h;
+ unsigned size_in_bytes;
+ unsigned pages_per_loop =
+ r600_blit_create_rect(num_gpu_pages, &w, &h,
+ rdev->r600_blit.max_dim);
- /* scissors 12 */
- set_scissors(rdev, dst_x, 0, dst_x + cur_size, h);
+ size_in_bytes = pages_per_loop * RADEON_GPU_PAGE_SIZE;
+ DRM_DEBUG("rectangle w=%d h=%d\n", w, h);
- /* 14 */
- vb_gpu_addr = rdev->r600_blit.vb_ib->gpu_addr + rdev->r600_blit.vb_used;
- set_vtx_resource(rdev, vb_gpu_addr);
-
- /* draw 10 */
- draw_auto(rdev);
-
- /* 5 */
- cp_set_surface_sync(rdev,
- PACKET3_CB_ACTION_ENA | PACKET3_CB0_DEST_BASE_ENA,
- cur_size * h, dst_gpu_addr);
-
- vb += 12;
- rdev->r600_blit.vb_used += 12 * 4;
-
- src_gpu_addr += cur_size * h;
- dst_gpu_addr += cur_size * h;
- size_bytes -= cur_size * h;
+ if ((rdev->r600_blit.vb_used + 48) > rdev->r600_blit.vb_total) {
+ WARN_ON(1);
}
- } else {
- max_bytes = 8192 * 4;
-
- while (size_bytes) {
- int cur_size = size_bytes;
- int src_x = (src_gpu_addr & 255);
- int dst_x = (dst_gpu_addr & 255);
- int h = 1;
- src_gpu_addr = src_gpu_addr & ~255ULL;
- dst_gpu_addr = dst_gpu_addr & ~255ULL;
-
- if (!src_x && !dst_x) {
- h = (cur_size / max_bytes);
- if (h > 8192)
- h = 8192;
- if (h == 0)
- h = 1;
- else
- cur_size = max_bytes;
- } else {
- if (cur_size > max_bytes)
- cur_size = max_bytes;
- if (cur_size > (max_bytes - dst_x))
- cur_size = (max_bytes - dst_x);
- if (cur_size > (max_bytes - src_x))
- cur_size = (max_bytes - src_x);
- }
-
- if ((rdev->r600_blit.vb_used + 48) > rdev->r600_blit.vb_total) {
- WARN_ON(1);
- }
- vb[0] = i2f(dst_x / 4);
- vb[1] = 0;
- vb[2] = i2f(src_x / 4);
- vb[3] = 0;
-
- vb[4] = i2f(dst_x / 4);
- vb[5] = i2f(h);
- vb[6] = i2f(src_x / 4);
- vb[7] = i2f(h);
-
- vb[8] = i2f((dst_x + cur_size) / 4);
- vb[9] = i2f(h);
- vb[10] = i2f((src_x + cur_size) / 4);
- vb[11] = i2f(h);
-
- /* src 9 */
- set_tex_resource(rdev, FMT_8_8_8_8,
- (src_x + cur_size) / 4,
- h, (src_x + cur_size) / 4,
- src_gpu_addr);
- /* 5 */
- cp_set_surface_sync(rdev,
- PACKET3_TC_ACTION_ENA, (src_x + cur_size * h), src_gpu_addr);
-
- /* dst 23 */
- set_render_target(rdev, COLOR_8_8_8_8,
- (dst_x + cur_size) / 4, h,
- dst_gpu_addr);
-
- /* scissors 12 */
- set_scissors(rdev, (dst_x / 4), 0, (dst_x + cur_size / 4), h);
-
- /* Vertex buffer setup 14 */
- vb_gpu_addr = rdev->r600_blit.vb_ib->gpu_addr + rdev->r600_blit.vb_used;
- set_vtx_resource(rdev, vb_gpu_addr);
-
- /* draw 10 */
- draw_auto(rdev);
-
- /* 5 */
- cp_set_surface_sync(rdev,
- PACKET3_CB_ACTION_ENA | PACKET3_CB0_DEST_BASE_ENA,
- cur_size * h, dst_gpu_addr);
-
- /* 78 ring dwords per loop */
- vb += 12;
- rdev->r600_blit.vb_used += 12 * 4;
-
- src_gpu_addr += cur_size * h;
- dst_gpu_addr += cur_size * h;
- size_bytes -= cur_size * h;
- }
+ vb[0] = 0;
+ vb[1] = 0;
+ vb[2] = 0;
+ vb[3] = 0;
+
+ vb[4] = 0;
+ vb[5] = i2f(h);
+ vb[6] = 0;
+ vb[7] = i2f(h);
+
+ vb[8] = i2f(w);
+ vb[9] = i2f(h);
+ vb[10] = i2f(w);
+ vb[11] = i2f(h);
+
+ rdev->r600_blit.primitives.set_tex_resource(rdev, FMT_8_8_8_8,
+ w, h, w, src_gpu_addr, size_in_bytes);
+ rdev->r600_blit.primitives.set_render_target(rdev, COLOR_8_8_8_8,
+ w, h, dst_gpu_addr);
+ rdev->r600_blit.primitives.set_scissors(rdev, 0, 0, w, h);
+ vb_gpu_addr = rdev->r600_blit.vb_ib->gpu_addr + rdev->r600_blit.vb_used;
+ rdev->r600_blit.primitives.set_vtx_resource(rdev, vb_gpu_addr);
+ rdev->r600_blit.primitives.draw_auto(rdev);
+ rdev->r600_blit.primitives.cp_set_surface_sync(rdev,
+ PACKET3_CB_ACTION_ENA | PACKET3_CB0_DEST_BASE_ENA,
+ size_in_bytes, dst_gpu_addr);
+
+ vb += 12;
+ rdev->r600_blit.vb_used += 4*12;
+ src_gpu_addr += size_in_bytes;
+ dst_gpu_addr += size_in_bytes;
+ num_gpu_pages -= pages_per_loop;
}
}
-
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c
index 45fd592f960..c9db4931913 100644
--- a/drivers/gpu/drm/radeon/r600_cp.c
+++ b/drivers/gpu/drm/radeon/r600_cp.c
@@ -26,6 +26,8 @@
* Alex Deucher <alexander.deucher@amd.com>
*/
+#include <linux/module.h>
+
#include "drmP.h"
#include "drm.h"
#include "radeon_drm.h"
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index cf83aa05a68..0a2e023c155 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -162,7 +162,7 @@ static const struct gpu_formats color_formats_table[] = {
[V_038004_FMT_32_AS_32_32_32_32] = { 1, 1, 4, 0, CHIP_CEDAR},
};
-static inline bool fmt_is_valid_color(u32 format)
+static bool fmt_is_valid_color(u32 format)
{
if (format >= ARRAY_SIZE(color_formats_table))
return false;
@@ -173,7 +173,7 @@ static inline bool fmt_is_valid_color(u32 format)
return false;
}
-static inline bool fmt_is_valid_texture(u32 format, enum radeon_family family)
+static bool fmt_is_valid_texture(u32 format, enum radeon_family family)
{
if (format >= ARRAY_SIZE(color_formats_table))
return false;
@@ -187,7 +187,7 @@ static inline bool fmt_is_valid_texture(u32 format, enum radeon_family family)
return false;
}
-static inline int fmt_get_blocksize(u32 format)
+static int fmt_get_blocksize(u32 format)
{
if (format >= ARRAY_SIZE(color_formats_table))
return 0;
@@ -195,7 +195,7 @@ static inline int fmt_get_blocksize(u32 format)
return color_formats_table[format].blocksize;
}
-static inline int fmt_get_nblocksx(u32 format, u32 w)
+static int fmt_get_nblocksx(u32 format, u32 w)
{
unsigned bw;
@@ -209,7 +209,7 @@ static inline int fmt_get_nblocksx(u32 format, u32 w)
return (w + bw - 1) / bw;
}
-static inline int fmt_get_nblocksy(u32 format, u32 h)
+static int fmt_get_nblocksy(u32 format, u32 h)
{
unsigned bh;
@@ -223,25 +223,6 @@ static inline int fmt_get_nblocksy(u32 format, u32 h)
return (h + bh - 1) / bh;
}
-static inline int r600_bpe_from_format(u32 *bpe, u32 format)
-{
- unsigned res;
-
- if (format >= ARRAY_SIZE(color_formats_table))
- goto fail;
-
- res = color_formats_table[format].blocksize;
- if (res == 0)
- goto fail;
-
- *bpe = res;
- return 0;
-
-fail:
- *bpe = 16;
- return -EINVAL;
-}
-
struct array_mode_checker {
int array_mode;
u32 group_size;
@@ -252,7 +233,7 @@ struct array_mode_checker {
};
/* returns alignment in pixels for pitch/height/depth and bytes for base */
-static inline int r600_get_array_mode_alignment(struct array_mode_checker *values,
+static int r600_get_array_mode_alignment(struct array_mode_checker *values,
u32 *pitch_align,
u32 *height_align,
u32 *depth_align,
@@ -331,7 +312,7 @@ static void r600_cs_track_init(struct r600_cs_track *track)
track->db_depth_control = 0xFFFFFFFF;
}
-static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
+static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
{
struct r600_cs_track *track = p->track;
u32 slice_tile_max, size, tmp;
@@ -737,7 +718,7 @@ static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
* Check next packet is relocation packet3, do bo validation and compute
* GPU offset using the provided start.
**/
-static inline int r600_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
+static int r600_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
{
struct radeon_cs_packet p3reloc;
int r;
@@ -911,7 +892,7 @@ static int r600_cs_parse_packet0(struct radeon_cs_parser *p,
* if register is safe. If register is not flag as safe this function
* will test it against a list of register needind special handling.
*/
-static inline int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
+static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
{
struct r600_cs_track *track = (struct r600_cs_track *)p->track;
struct radeon_cs_reloc *reloc;
@@ -1215,7 +1196,7 @@ static inline int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx
return 0;
}
-static inline unsigned mip_minify(unsigned size, unsigned level)
+static unsigned mip_minify(unsigned size, unsigned level)
{
unsigned val;
@@ -1285,7 +1266,7 @@ static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned llevel,
* This function will check that the resource has valid field and that
* the texture and mipmap bo object are big enough to cover this resource.
*/
-static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
+static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
struct radeon_bo *texture,
struct radeon_bo *mipmap,
u64 base_offset,
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 0245ae6c204..bfe1b5d92af 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -79,6 +79,11 @@
#define CB_COLOR0_SIZE 0x28060
#define CB_COLOR0_VIEW 0x28080
#define CB_COLOR0_INFO 0x280a0
+# define CB_FORMAT(x) ((x) << 2)
+# define CB_ARRAY_MODE(x) ((x) << 8)
+# define CB_SOURCE_FORMAT(x) ((x) << 27)
+# define CB_SF_EXPORT_FULL 0
+# define CB_SF_EXPORT_NORM 1
#define CB_COLOR0_TILE 0x280c0
#define CB_COLOR0_FRAG 0x280e0
#define CB_COLOR0_MASK 0x28100
@@ -417,6 +422,17 @@
#define SQ_PGM_START_VS 0x28858
#define SQ_PGM_RESOURCES_VS 0x28868
#define SQ_PGM_CF_OFFSET_VS 0x288d0
+
+#define SQ_VTX_CONSTANT_WORD0_0 0x30000
+#define SQ_VTX_CONSTANT_WORD1_0 0x30004
+#define SQ_VTX_CONSTANT_WORD2_0 0x30008
+# define SQ_VTXC_BASE_ADDR_HI(x) ((x) << 0)
+# define SQ_VTXC_STRIDE(x) ((x) << 8)
+# define SQ_VTXC_ENDIAN_SWAP(x) ((x) << 30)
+# define SQ_ENDIAN_NONE 0
+# define SQ_ENDIAN_8IN16 1
+# define SQ_ENDIAN_8IN32 2
+#define SQ_VTX_CONSTANT_WORD3_0 0x3000c
#define SQ_VTX_CONSTANT_WORD6_0 0x38018
#define S__SQ_VTX_CONSTANT_TYPE(x) (((x) & 3) << 30)
#define G__SQ_VTX_CONSTANT_TYPE(x) (((x) >> 30) & 3)
@@ -1352,6 +1368,12 @@
#define S_038010_DST_SEL_W(x) (((x) & 0x7) << 25)
#define G_038010_DST_SEL_W(x) (((x) >> 25) & 0x7)
#define C_038010_DST_SEL_W 0xF1FFFFFF
+# define SQ_SEL_X 0
+# define SQ_SEL_Y 1
+# define SQ_SEL_Z 2
+# define SQ_SEL_W 3
+# define SQ_SEL_0 4
+# define SQ_SEL_1 5
#define S_038010_BASE_LEVEL(x) (((x) & 0xF) << 28)
#define G_038010_BASE_LEVEL(x) (((x) >> 28) & 0xF)
#define C_038010_BASE_LEVEL 0x0FFFFFFF
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index c1e056b35b2..fc5a1d642cb 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -93,6 +93,7 @@ extern int radeon_audio;
extern int radeon_disp_priority;
extern int radeon_hw_i2c;
extern int radeon_pcie_gen2;
+extern int radeon_msi;
/*
* Copy from radeon_drv.h so we don't have to include both and have conflicting
@@ -102,7 +103,7 @@ extern int radeon_pcie_gen2;
#define RADEON_FENCE_JIFFIES_TIMEOUT (HZ / 2)
/* RADEON_IB_POOL_SIZE must be a power of 2 */
#define RADEON_IB_POOL_SIZE 16
-#define RADEON_DEBUGFS_MAX_NUM_FILES 32
+#define RADEON_DEBUGFS_MAX_COMPONENTS 32
#define RADEONFB_CONN_LIMIT 4
#define RADEON_BIOS_NUM_SCRATCH 8
@@ -306,30 +307,17 @@ int radeon_mode_dumb_destroy(struct drm_file *file_priv,
*/
struct radeon_mc;
-struct radeon_gart_table_ram {
- volatile uint32_t *ptr;
-};
-
-struct radeon_gart_table_vram {
- struct radeon_bo *robj;
- volatile uint32_t *ptr;
-};
-
-union radeon_gart_table {
- struct radeon_gart_table_ram ram;
- struct radeon_gart_table_vram vram;
-};
-
#define RADEON_GPU_PAGE_SIZE 4096
#define RADEON_GPU_PAGE_MASK (RADEON_GPU_PAGE_SIZE - 1)
#define RADEON_GPU_PAGE_SHIFT 12
struct radeon_gart {
dma_addr_t table_addr;
+ struct radeon_bo *robj;
+ void *ptr;
unsigned num_gpu_pages;
unsigned num_cpu_pages;
unsigned table_size;
- union radeon_gart_table table;
struct page **pages;
dma_addr_t *pages_addr;
bool *ttm_alloced;
@@ -340,6 +328,8 @@ int radeon_gart_table_ram_alloc(struct radeon_device *rdev);
void radeon_gart_table_ram_free(struct radeon_device *rdev);
int radeon_gart_table_vram_alloc(struct radeon_device *rdev);
void radeon_gart_table_vram_free(struct radeon_device *rdev);
+int radeon_gart_table_vram_pin(struct radeon_device *rdev);
+void radeon_gart_table_vram_unpin(struct radeon_device *rdev);
int radeon_gart_init(struct radeon_device *rdev);
void radeon_gart_fini(struct radeon_device *rdev);
void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
@@ -347,6 +337,7 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
int pages, struct page **pagelist,
dma_addr_t *dma_addr);
+void radeon_gart_restore(struct radeon_device *rdev);
/*
@@ -437,25 +428,26 @@ union radeon_irq_stat_regs {
struct evergreen_irq_stat_regs evergreen;
};
+#define RADEON_MAX_HPD_PINS 6
+#define RADEON_MAX_CRTCS 6
+#define RADEON_MAX_HDMI_BLOCKS 2
+
struct radeon_irq {
bool installed;
bool sw_int;
- /* FIXME: use a define max crtc rather than hardcode it */
- bool crtc_vblank_int[6];
- bool pflip[6];
+ bool crtc_vblank_int[RADEON_MAX_CRTCS];
+ bool pflip[RADEON_MAX_CRTCS];
wait_queue_head_t vblank_queue;
- /* FIXME: use defines for max hpd/dacs */
- bool hpd[6];
+ bool hpd[RADEON_MAX_HPD_PINS];
bool gui_idle;
bool gui_idle_acked;
wait_queue_head_t idle_queue;
- /* FIXME: use defines for max HDMI blocks */
- bool hdmi[2];
+ bool hdmi[RADEON_MAX_HDMI_BLOCKS];
spinlock_t sw_lock;
int sw_refcount;
union radeon_irq_stat_regs stat_regs;
- spinlock_t pflip_lock[6];
- int pflip_refcount[6];
+ spinlock_t pflip_lock[RADEON_MAX_CRTCS];
+ int pflip_refcount[RADEON_MAX_CRTCS];
};
int radeon_irq_kms_init(struct radeon_device *rdev);
@@ -523,9 +515,30 @@ struct r600_ih {
bool enabled;
};
+struct r600_blit_cp_primitives {
+ void (*set_render_target)(struct radeon_device *rdev, int format,
+ int w, int h, u64 gpu_addr);
+ void (*cp_set_surface_sync)(struct radeon_device *rdev,
+ u32 sync_type, u32 size,
+ u64 mc_addr);
+ void (*set_shaders)(struct radeon_device *rdev);
+ void (*set_vtx_resource)(struct radeon_device *rdev, u64 gpu_addr);
+ void (*set_tex_resource)(struct radeon_device *rdev,
+ int format, int w, int h, int pitch,
+ u64 gpu_addr, u32 size);
+ void (*set_scissors)(struct radeon_device *rdev, int x1, int y1,
+ int x2, int y2);
+ void (*draw_auto)(struct radeon_device *rdev);
+ void (*set_default_state)(struct radeon_device *rdev);
+};
+
struct r600_blit {
struct mutex mutex;
struct radeon_bo *shader_obj;
+ struct r600_blit_cp_primitives primitives;
+ int max_dim;
+ int ring_size_common;
+ int ring_size_per_loop;
u64 shader_gpu_addr;
u32 vs_offset, ps_offset;
u32 state_offset;
@@ -534,6 +547,8 @@ struct r600_blit {
struct radeon_ib *vb_ib;
};
+void r600_blit_suspend(struct radeon_device *rdev);
+
int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib);
void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib);
int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib);
@@ -601,32 +616,7 @@ struct radeon_cs_parser {
extern int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx);
extern int radeon_cs_finish_pages(struct radeon_cs_parser *p);
-
-
-static inline u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
-{
- struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
- u32 pg_idx, pg_offset;
- u32 idx_value = 0;
- int new_page;
-
- pg_idx = (idx * 4) / PAGE_SIZE;
- pg_offset = (idx * 4) % PAGE_SIZE;
-
- if (ibc->kpage_idx[0] == pg_idx)
- return ibc->kpage[0][pg_offset/4];
- if (ibc->kpage_idx[1] == pg_idx)
- return ibc->kpage[1][pg_offset/4];
-
- new_page = radeon_cs_update_pages(p, pg_idx);
- if (new_page < 0) {
- p->parser_error = new_page;
- return 0;
- }
-
- idx_value = ibc->kpage[new_page][pg_offset/4];
- return idx_value;
-}
+extern u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx);
struct radeon_cs_packet {
unsigned idx;
@@ -794,8 +784,7 @@ struct radeon_pm_clock_info {
struct radeon_power_state {
enum radeon_pm_state_type type;
- /* XXX: use a define for num clock modes */
- struct radeon_pm_clock_info clock_info[8];
+ struct radeon_pm_clock_info *clock_info;
/* number of valid clock modes in this power state */
int num_clock_modes;
struct radeon_pm_clock_info *default_clock_mode;
@@ -865,11 +854,14 @@ struct radeon_pm {
struct device *int_hwmon_dev;
};
+int radeon_pm_get_type_index(struct radeon_device *rdev,
+ enum radeon_pm_state_type ps_type,
+ int instance);
/*
* Benchmarking
*/
-void radeon_benchmark(struct radeon_device *rdev);
+void radeon_benchmark(struct radeon_device *rdev, int test_number);
/*
@@ -1145,12 +1137,55 @@ int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
-/* VRAM scratch page for HDP bug */
-struct r700_vram_scratch {
+/* VRAM scratch page for HDP bug, default vram page */
+struct r600_vram_scratch {
struct radeon_bo *robj;
volatile uint32_t *ptr;
+ u64 gpu_addr;
};
+
+/*
+ * Mutex which allows recursive locking from the same process.
+ */
+struct radeon_mutex {
+ struct mutex mutex;
+ struct task_struct *owner;
+ int level;
+};
+
+static inline void radeon_mutex_init(struct radeon_mutex *mutex)
+{
+ mutex_init(&mutex->mutex);
+ mutex->owner = NULL;
+ mutex->level = 0;
+}
+
+static inline void radeon_mutex_lock(struct radeon_mutex *mutex)
+{
+ if (mutex_trylock(&mutex->mutex)) {
+ /* The mutex was unlocked before, so it's ours now */
+ mutex->owner = current;
+ } else if (mutex->owner != current) {
+ /* Another process locked the mutex, take it */
+ mutex_lock(&mutex->mutex);
+ mutex->owner = current;
+ }
+ /* Otherwise the mutex was already locked by this process */
+
+ mutex->level++;
+}
+
+static inline void radeon_mutex_unlock(struct radeon_mutex *mutex)
+{
+ if (--mutex->level > 0)
+ return;
+
+ mutex->owner = NULL;
+ mutex_unlock(&mutex->mutex);
+}
+
+
/*
* Core structure, functions and helpers.
*/
@@ -1206,7 +1241,7 @@ struct radeon_device {
struct radeon_gem gem;
struct radeon_pm pm;
uint32_t bios_scratch[RADEON_BIOS_NUM_SCRATCH];
- struct mutex cs_mutex;
+ struct radeon_mutex cs_mutex;
struct radeon_wb wb;
struct radeon_dummy_page dummy_page;
bool gpu_lockup;
@@ -1220,7 +1255,7 @@ struct radeon_device {
const struct firmware *rlc_fw; /* r6/700 RLC firmware */
const struct firmware *mc_fw; /* NI MC firmware */
struct r600_blit r600_blit;
- struct r700_vram_scratch vram_scratch;
+ struct r600_vram_scratch vram_scratch;
int msi_enabled; /* msi enabled */
struct r600_ih ih; /* r6/700 interrupt ring */
struct work_struct hotplug_work;
@@ -1252,45 +1287,10 @@ int radeon_device_init(struct radeon_device *rdev,
void radeon_device_fini(struct radeon_device *rdev);
int radeon_gpu_wait_for_idle(struct radeon_device *rdev);
-static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg)
-{
- if (reg < rdev->rmmio_size)
- return readl((rdev->rmmio) + reg);
- else {
- writel(reg, (rdev->rmmio) + RADEON_MM_INDEX);
- return readl((rdev->rmmio) + RADEON_MM_DATA);
- }
-}
-
-static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
-{
- if (reg < rdev->rmmio_size)
- writel(v, (rdev->rmmio) + reg);
- else {
- writel(reg, (rdev->rmmio) + RADEON_MM_INDEX);
- writel(v, (rdev->rmmio) + RADEON_MM_DATA);
- }
-}
-
-static inline u32 r100_io_rreg(struct radeon_device *rdev, u32 reg)
-{
- if (reg < rdev->rio_mem_size)
- return ioread32(rdev->rio_mem + reg);
- else {
- iowrite32(reg, rdev->rio_mem + RADEON_MM_INDEX);
- return ioread32(rdev->rio_mem + RADEON_MM_DATA);
- }
-}
-
-static inline void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v)
-{
- if (reg < rdev->rio_mem_size)
- iowrite32(v, rdev->rio_mem + reg);
- else {
- iowrite32(reg, rdev->rio_mem + RADEON_MM_INDEX);
- iowrite32(v, rdev->rio_mem + RADEON_MM_DATA);
- }
-}
+uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg);
+void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
+u32 r100_io_rreg(struct radeon_device *rdev, u32 reg);
+void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v);
/*
* Cast helper
@@ -1413,19 +1413,19 @@ void radeon_atombios_fini(struct radeon_device *rdev);
/*
* RING helpers.
*/
+
+#if DRM_DEBUG_CODE == 0
static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
{
-#if DRM_DEBUG_CODE
- if (rdev->cp.count_dw <= 0) {
- DRM_ERROR("radeon: writting more dword to ring than expected !\n");
- }
-#endif
rdev->cp.ring[rdev->cp.wptr++] = v;
rdev->cp.wptr &= rdev->cp.ptr_mask;
rdev->cp.count_dw--;
rdev->cp.ring_free_dw--;
}
-
+#else
+/* With debugging this is just too big to inline */
+void radeon_ring_write(struct radeon_device *rdev, uint32_t v);
+#endif
/*
* ASICs macro.
@@ -1479,8 +1479,6 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
/* AGP */
extern int radeon_gpu_reset(struct radeon_device *rdev);
extern void radeon_agp_disable(struct radeon_device *rdev);
-extern int radeon_gart_table_vram_pin(struct radeon_device *rdev);
-extern void radeon_gart_restore(struct radeon_device *rdev);
extern int radeon_modeset_init(struct radeon_device *rdev);
extern void radeon_modeset_fini(struct radeon_device *rdev);
extern bool radeon_card_posted(struct radeon_device *rdev);
@@ -1504,6 +1502,12 @@ extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state);
extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size);
/*
+ * R600 vram scratch functions
+ */
+int r600_vram_scratch_init(struct radeon_device *rdev);
+void r600_vram_scratch_fini(struct radeon_device *rdev);
+
+/*
* r600 functions used by radeon_encoder.c
*/
extern void r600_hdmi_enable(struct drm_encoder *encoder);
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index df8218bb83a..a2e1eae114e 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -765,9 +765,9 @@ static struct radeon_asic evergreen_asic = {
.get_vblank_counter = &evergreen_get_vblank_counter,
.fence_ring_emit = &r600_fence_ring_emit,
.cs_parse = &evergreen_cs_parse,
- .copy_blit = &evergreen_copy_blit,
+ .copy_blit = &r600_copy_blit,
.copy_dma = NULL,
- .copy = &evergreen_copy_blit,
+ .copy = &r600_copy_blit,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
@@ -812,9 +812,9 @@ static struct radeon_asic sumo_asic = {
.get_vblank_counter = &evergreen_get_vblank_counter,
.fence_ring_emit = &r600_fence_ring_emit,
.cs_parse = &evergreen_cs_parse,
- .copy_blit = &evergreen_copy_blit,
+ .copy_blit = &r600_copy_blit,
.copy_dma = NULL,
- .copy = &evergreen_copy_blit,
+ .copy = &r600_copy_blit,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = NULL,
@@ -834,7 +834,7 @@ static struct radeon_asic sumo_asic = {
.pm_misc = &evergreen_pm_misc,
.pm_prepare = &evergreen_pm_prepare,
.pm_finish = &evergreen_pm_finish,
- .pm_init_profile = &rs780_pm_init_profile,
+ .pm_init_profile = &sumo_pm_init_profile,
.pm_get_dynpm_state = &r600_pm_get_dynpm_state,
.pre_page_flip = &evergreen_pre_page_flip,
.page_flip = &evergreen_page_flip,
@@ -859,9 +859,9 @@ static struct radeon_asic btc_asic = {
.get_vblank_counter = &evergreen_get_vblank_counter,
.fence_ring_emit = &r600_fence_ring_emit,
.cs_parse = &evergreen_cs_parse,
- .copy_blit = &evergreen_copy_blit,
+ .copy_blit = &r600_copy_blit,
.copy_dma = NULL,
- .copy = &evergreen_copy_blit,
+ .copy = &r600_copy_blit,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
@@ -906,9 +906,9 @@ static struct radeon_asic cayman_asic = {
.get_vblank_counter = &evergreen_get_vblank_counter,
.fence_ring_emit = &r600_fence_ring_emit,
.cs_parse = &evergreen_cs_parse,
- .copy_blit = &evergreen_copy_blit,
+ .copy_blit = &r600_copy_blit,
.copy_dma = NULL,
- .copy = &evergreen_copy_blit,
+ .copy = &r600_copy_blit,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 3dedaa07aac..59914842a72 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -364,11 +364,11 @@ void r600_hdmi_init(struct drm_encoder *encoder);
int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder);
void r600_hdmi_update_audio_settings(struct drm_encoder *encoder);
/* r600 blit */
-int r600_blit_prepare_copy(struct radeon_device *rdev, int size_bytes);
+int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages);
void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence);
void r600_kms_blit_copy(struct radeon_device *rdev,
u64 src_gpu_addr, u64 dst_gpu_addr,
- int size_bytes);
+ unsigned num_gpu_pages);
/*
* rv770,rv730,rv710,rv740
@@ -401,9 +401,6 @@ bool evergreen_gpu_is_lockup(struct radeon_device *rdev);
int evergreen_asic_reset(struct radeon_device *rdev);
void evergreen_bandwidth_update(struct radeon_device *rdev);
void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
-int evergreen_copy_blit(struct radeon_device *rdev,
- uint64_t src_offset, uint64_t dst_offset,
- unsigned num_gpu_pages, struct radeon_fence *fence);
void evergreen_hpd_init(struct radeon_device *rdev);
void evergreen_hpd_fini(struct radeon_device *rdev);
bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
@@ -416,18 +413,12 @@ extern int evergreen_cs_parse(struct radeon_cs_parser *p);
extern void evergreen_pm_misc(struct radeon_device *rdev);
extern void evergreen_pm_prepare(struct radeon_device *rdev);
extern void evergreen_pm_finish(struct radeon_device *rdev);
+extern void sumo_pm_init_profile(struct radeon_device *rdev);
extern void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc);
extern u32 evergreen_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
extern void evergreen_post_page_flip(struct radeon_device *rdev, int crtc);
void evergreen_disable_interrupt_state(struct radeon_device *rdev);
int evergreen_blit_init(struct radeon_device *rdev);
-void evergreen_blit_fini(struct radeon_device *rdev);
-/* evergreen blit */
-int evergreen_blit_prepare_copy(struct radeon_device *rdev, int size_bytes);
-void evergreen_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence);
-void evergreen_kms_blit_copy(struct radeon_device *rdev,
- u64 src_gpu_addr, u64 dst_gpu_addr,
- int size_bytes);
/*
* cayman
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index bf2b61584cd..d2d179267af 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -62,7 +62,7 @@ union atom_supported_devices {
struct _ATOM_SUPPORTED_DEVICES_INFO_2d1 info_2d1;
};
-static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_device *rdev,
+static struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_device *rdev,
uint8_t id)
{
struct atom_context *ctx = rdev->mode_info.atom_context;
@@ -228,7 +228,7 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev)
}
}
-static inline struct radeon_gpio_rec radeon_lookup_gpio(struct radeon_device *rdev,
+static struct radeon_gpio_rec radeon_lookup_gpio(struct radeon_device *rdev,
u8 id)
{
struct atom_context *ctx = rdev->mode_info.atom_context;
@@ -1999,6 +1999,10 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
switch (frev) {
case 1:
+ rdev->pm.power_state[state_index].clock_info =
+ kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL);
+ if (!rdev->pm.power_state[state_index].clock_info)
+ return state_index;
rdev->pm.power_state[state_index].num_clock_modes = 1;
rdev->pm.power_state[state_index].clock_info[0].mclk =
le16_to_cpu(power_info->info.asPowerPlayInfo[i].usMemoryClock);
@@ -2035,6 +2039,10 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
state_index++;
break;
case 2:
+ rdev->pm.power_state[state_index].clock_info =
+ kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL);
+ if (!rdev->pm.power_state[state_index].clock_info)
+ return state_index;
rdev->pm.power_state[state_index].num_clock_modes = 1;
rdev->pm.power_state[state_index].clock_info[0].mclk =
le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMemoryClock);
@@ -2072,6 +2080,10 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
state_index++;
break;
case 3:
+ rdev->pm.power_state[state_index].clock_info =
+ kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL);
+ if (!rdev->pm.power_state[state_index].clock_info)
+ return state_index;
rdev->pm.power_state[state_index].num_clock_modes = 1;
rdev->pm.power_state[state_index].clock_info[0].mclk =
le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMemoryClock);
@@ -2257,7 +2269,7 @@ static void radeon_atombios_parse_pplib_non_clock_info(struct radeon_device *rde
rdev->pm.default_power_state_index = state_index;
rdev->pm.power_state[state_index].default_clock_mode =
&rdev->pm.power_state[state_index].clock_info[mode_index - 1];
- if (ASIC_IS_DCE5(rdev)) {
+ if (ASIC_IS_DCE5(rdev) && !(rdev->flags & RADEON_IS_IGP)) {
/* NI chips post without MC ucode, so default clocks are strobe mode only */
rdev->pm.default_sclk = rdev->pm.power_state[state_index].clock_info[0].sclk;
rdev->pm.default_mclk = rdev->pm.power_state[state_index].clock_info[0].mclk;
@@ -2377,17 +2389,31 @@ static int radeon_atombios_parse_power_table_4_5(struct radeon_device *rdev)
le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset) +
(power_state->v1.ucNonClockStateIndex *
power_info->pplib.ucNonClockSize));
- for (j = 0; j < (power_info->pplib.ucStateEntrySize - 1); j++) {
- clock_info = (union pplib_clock_info *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(power_info->pplib.usClockInfoArrayOffset) +
- (power_state->v1.ucClockStateIndices[j] *
- power_info->pplib.ucClockInfoSize));
- valid = radeon_atombios_parse_pplib_clock_info(rdev,
- state_index, mode_index,
- clock_info);
- if (valid)
- mode_index++;
+ rdev->pm.power_state[i].clock_info = kzalloc(sizeof(struct radeon_pm_clock_info) *
+ ((power_info->pplib.ucStateEntrySize - 1) ?
+ (power_info->pplib.ucStateEntrySize - 1) : 1),
+ GFP_KERNEL);
+ if (!rdev->pm.power_state[i].clock_info)
+ return state_index;
+ if (power_info->pplib.ucStateEntrySize - 1) {
+ for (j = 0; j < (power_info->pplib.ucStateEntrySize - 1); j++) {
+ clock_info = (union pplib_clock_info *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(power_info->pplib.usClockInfoArrayOffset) +
+ (power_state->v1.ucClockStateIndices[j] *
+ power_info->pplib.ucClockInfoSize));
+ valid = radeon_atombios_parse_pplib_clock_info(rdev,
+ state_index, mode_index,
+ clock_info);
+ if (valid)
+ mode_index++;
+ }
+ } else {
+ rdev->pm.power_state[state_index].clock_info[0].mclk =
+ rdev->clock.default_mclk;
+ rdev->pm.power_state[state_index].clock_info[0].sclk =
+ rdev->clock.default_sclk;
+ mode_index++;
}
rdev->pm.power_state[state_index].num_clock_modes = mode_index;
if (mode_index) {
@@ -2456,18 +2482,32 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
non_clock_array_index = i; /* power_state->v2.nonClockInfoIndex */
non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
&non_clock_info_array->nonClockInfo[non_clock_array_index];
- for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
- clock_array_index = power_state->v2.clockInfoIndex[j];
- /* XXX this might be an inagua bug... */
- if (clock_array_index >= clock_info_array->ucNumEntries)
- continue;
- clock_info = (union pplib_clock_info *)
- &clock_info_array->clockInfo[clock_array_index];
- valid = radeon_atombios_parse_pplib_clock_info(rdev,
- state_index, mode_index,
- clock_info);
- if (valid)
- mode_index++;
+ rdev->pm.power_state[i].clock_info = kzalloc(sizeof(struct radeon_pm_clock_info) *
+ (power_state->v2.ucNumDPMLevels ?
+ power_state->v2.ucNumDPMLevels : 1),
+ GFP_KERNEL);
+ if (!rdev->pm.power_state[i].clock_info)
+ return state_index;
+ if (power_state->v2.ucNumDPMLevels) {
+ for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
+ clock_array_index = power_state->v2.clockInfoIndex[j];
+ /* XXX this might be an inagua bug... */
+ if (clock_array_index >= clock_info_array->ucNumEntries)
+ continue;
+ clock_info = (union pplib_clock_info *)
+ &clock_info_array->clockInfo[clock_array_index];
+ valid = radeon_atombios_parse_pplib_clock_info(rdev,
+ state_index, mode_index,
+ clock_info);
+ if (valid)
+ mode_index++;
+ }
+ } else {
+ rdev->pm.power_state[state_index].clock_info[0].mclk =
+ rdev->clock.default_mclk;
+ rdev->pm.power_state[state_index].clock_info[0].sclk =
+ rdev->clock.default_sclk;
+ mode_index++;
}
rdev->pm.power_state[state_index].num_clock_modes = mode_index;
if (mode_index) {
@@ -2524,19 +2564,23 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
} else {
rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state), GFP_KERNEL);
if (rdev->pm.power_state) {
- /* add the default mode */
- rdev->pm.power_state[state_index].type =
- POWER_STATE_TYPE_DEFAULT;
- rdev->pm.power_state[state_index].num_clock_modes = 1;
- rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk;
- rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk;
- rdev->pm.power_state[state_index].default_clock_mode =
- &rdev->pm.power_state[state_index].clock_info[0];
- rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
- rdev->pm.power_state[state_index].pcie_lanes = 16;
- rdev->pm.default_power_state_index = state_index;
- rdev->pm.power_state[state_index].flags = 0;
- state_index++;
+ rdev->pm.power_state[0].clock_info =
+ kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL);
+ if (rdev->pm.power_state[0].clock_info) {
+ /* add the default mode */
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_DEFAULT;
+ rdev->pm.power_state[state_index].num_clock_modes = 1;
+ rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk;
+ rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk;
+ rdev->pm.power_state[state_index].default_clock_mode =
+ &rdev->pm.power_state[state_index].clock_info[0];
+ rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
+ rdev->pm.power_state[state_index].pcie_lanes = 16;
+ rdev->pm.default_power_state_index = state_index;
+ rdev->pm.power_state[state_index].flags = 0;
+ state_index++;
+ }
}
}
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c
index 10191d9372d..17e1a9b2d8f 100644
--- a/drivers/gpu/drm/radeon/radeon_benchmark.c
+++ b/drivers/gpu/drm/radeon/radeon_benchmark.c
@@ -26,21 +26,81 @@
#include "radeon_reg.h"
#include "radeon.h"
-void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
- unsigned sdomain, unsigned ddomain)
+#define RADEON_BENCHMARK_COPY_BLIT 1
+#define RADEON_BENCHMARK_COPY_DMA 0
+
+#define RADEON_BENCHMARK_ITERATIONS 1024
+#define RADEON_BENCHMARK_COMMON_MODES_N 17
+
+static int radeon_benchmark_do_move(struct radeon_device *rdev, unsigned size,
+ uint64_t saddr, uint64_t daddr,
+ int flag, int n)
+{
+ unsigned long start_jiffies;
+ unsigned long end_jiffies;
+ struct radeon_fence *fence = NULL;
+ int i, r;
+
+ start_jiffies = jiffies;
+ for (i = 0; i < n; i++) {
+ r = radeon_fence_create(rdev, &fence);
+ if (r)
+ return r;
+
+ switch (flag) {
+ case RADEON_BENCHMARK_COPY_DMA:
+ r = radeon_copy_dma(rdev, saddr, daddr,
+ size / RADEON_GPU_PAGE_SIZE,
+ fence);
+ break;
+ case RADEON_BENCHMARK_COPY_BLIT:
+ r = radeon_copy_blit(rdev, saddr, daddr,
+ size / RADEON_GPU_PAGE_SIZE,
+ fence);
+ break;
+ default:
+ DRM_ERROR("Unknown copy method\n");
+ r = -EINVAL;
+ }
+ if (r)
+ goto exit_do_move;
+ r = radeon_fence_wait(fence, false);
+ if (r)
+ goto exit_do_move;
+ radeon_fence_unref(&fence);
+ }
+ end_jiffies = jiffies;
+ r = jiffies_to_msecs(end_jiffies - start_jiffies);
+
+exit_do_move:
+ if (fence)
+ radeon_fence_unref(&fence);
+ return r;
+}
+
+
+static void radeon_benchmark_log_results(int n, unsigned size,
+ unsigned int time,
+ unsigned sdomain, unsigned ddomain,
+ char *kind)
+{
+ unsigned int throughput = (n * (size >> 10)) / time;
+ DRM_INFO("radeon: %s %u bo moves of %u kB from"
+ " %d to %d in %u ms, throughput: %u Mb/s or %u MB/s\n",
+ kind, n, size >> 10, sdomain, ddomain, time,
+ throughput * 8, throughput);
+}
+
+static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size,
+ unsigned sdomain, unsigned ddomain)
{
struct radeon_bo *dobj = NULL;
struct radeon_bo *sobj = NULL;
- struct radeon_fence *fence = NULL;
uint64_t saddr, daddr;
- unsigned long start_jiffies;
- unsigned long end_jiffies;
- unsigned long time;
- unsigned i, n, size;
- int r;
+ int r, n;
+ int time;
- size = bsize;
- n = 1024;
+ n = RADEON_BENCHMARK_ITERATIONS;
r = radeon_bo_create(rdev, size, PAGE_SIZE, true, sdomain, &sobj);
if (r) {
goto out_cleanup;
@@ -67,65 +127,26 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
}
/* r100 doesn't have dma engine so skip the test */
- if (rdev->asic->copy_dma) {
-
- start_jiffies = jiffies;
- for (i = 0; i < n; i++) {
- r = radeon_fence_create(rdev, &fence);
- if (r) {
- goto out_cleanup;
- }
-
- r = radeon_copy_dma(rdev, saddr, daddr,
- size / RADEON_GPU_PAGE_SIZE, fence);
-
- if (r) {
- goto out_cleanup;
- }
- r = radeon_fence_wait(fence, false);
- if (r) {
- goto out_cleanup;
- }
- radeon_fence_unref(&fence);
- }
- end_jiffies = jiffies;
- time = end_jiffies - start_jiffies;
- time = jiffies_to_msecs(time);
- if (time > 0) {
- i = ((n * size) >> 10) / time;
- printk(KERN_INFO "radeon: dma %u bo moves of %ukb from"
- " %d to %d in %lums (%ukb/ms %ukb/s %uM/s)\n",
- n, size >> 10,
- sdomain, ddomain, time,
- i, i * 1000, (i * 1000) / 1024);
- }
- }
-
- start_jiffies = jiffies;
- for (i = 0; i < n; i++) {
- r = radeon_fence_create(rdev, &fence);
- if (r) {
- goto out_cleanup;
- }
- r = radeon_copy_blit(rdev, saddr, daddr, size / RADEON_GPU_PAGE_SIZE, fence);
- if (r) {
- goto out_cleanup;
- }
- r = radeon_fence_wait(fence, false);
- if (r) {
+ /* also, VRAM-to-VRAM test doesn't make much sense for DMA */
+ /* skip it as well if domains are the same */
+ if ((rdev->asic->copy_dma) && (sdomain != ddomain)) {
+ time = radeon_benchmark_do_move(rdev, size, saddr, daddr,
+ RADEON_BENCHMARK_COPY_DMA, n);
+ if (time < 0)
goto out_cleanup;
- }
- radeon_fence_unref(&fence);
- }
- end_jiffies = jiffies;
- time = end_jiffies - start_jiffies;
- time = jiffies_to_msecs(time);
- if (time > 0) {
- i = ((n * size) >> 10) / time;
- printk(KERN_INFO "radeon: blit %u bo moves of %ukb from %d to %d"
- " in %lums (%ukb/ms %ukb/s %uM/s)\n", n, size >> 10,
- sdomain, ddomain, time, i, i * 1000, (i * 1000) / 1024);
+ if (time > 0)
+ radeon_benchmark_log_results(n, size, time,
+ sdomain, ddomain, "dma");
}
+
+ time = radeon_benchmark_do_move(rdev, size, saddr, daddr,
+ RADEON_BENCHMARK_COPY_BLIT, n);
+ if (time < 0)
+ goto out_cleanup;
+ if (time > 0)
+ radeon_benchmark_log_results(n, size, time,
+ sdomain, ddomain, "blit");
+
out_cleanup:
if (sobj) {
r = radeon_bo_reserve(sobj, false);
@@ -143,18 +164,92 @@ out_cleanup:
}
radeon_bo_unref(&dobj);
}
- if (fence) {
- radeon_fence_unref(&fence);
- }
+
if (r) {
- printk(KERN_WARNING "Error while benchmarking BO move.\n");
+ DRM_ERROR("Error while benchmarking BO move.\n");
}
}
-void radeon_benchmark(struct radeon_device *rdev)
+void radeon_benchmark(struct radeon_device *rdev, int test_number)
{
- radeon_benchmark_move(rdev, 1024*1024, RADEON_GEM_DOMAIN_GTT,
- RADEON_GEM_DOMAIN_VRAM);
- radeon_benchmark_move(rdev, 1024*1024, RADEON_GEM_DOMAIN_VRAM,
- RADEON_GEM_DOMAIN_GTT);
+ int i;
+ int common_modes[RADEON_BENCHMARK_COMMON_MODES_N] = {
+ 640 * 480 * 4,
+ 720 * 480 * 4,
+ 800 * 600 * 4,
+ 848 * 480 * 4,
+ 1024 * 768 * 4,
+ 1152 * 768 * 4,
+ 1280 * 720 * 4,
+ 1280 * 800 * 4,
+ 1280 * 854 * 4,
+ 1280 * 960 * 4,
+ 1280 * 1024 * 4,
+ 1440 * 900 * 4,
+ 1400 * 1050 * 4,
+ 1680 * 1050 * 4,
+ 1600 * 1200 * 4,
+ 1920 * 1080 * 4,
+ 1920 * 1200 * 4
+ };
+
+ switch (test_number) {
+ case 1:
+ /* simple test, VRAM to GTT and GTT to VRAM */
+ radeon_benchmark_move(rdev, 1024*1024, RADEON_GEM_DOMAIN_GTT,
+ RADEON_GEM_DOMAIN_VRAM);
+ radeon_benchmark_move(rdev, 1024*1024, RADEON_GEM_DOMAIN_VRAM,
+ RADEON_GEM_DOMAIN_GTT);
+ break;
+ case 2:
+ /* simple test, VRAM to VRAM */
+ radeon_benchmark_move(rdev, 1024*1024, RADEON_GEM_DOMAIN_VRAM,
+ RADEON_GEM_DOMAIN_VRAM);
+ break;
+ case 3:
+ /* GTT to VRAM, buffer size sweep, powers of 2 */
+ for (i = 1; i <= 65536; i <<= 1)
+ radeon_benchmark_move(rdev, i*1024,
+ RADEON_GEM_DOMAIN_GTT,
+ RADEON_GEM_DOMAIN_VRAM);
+ break;
+ case 4:
+ /* VRAM to GTT, buffer size sweep, powers of 2 */
+ for (i = 1; i <= 65536; i <<= 1)
+ radeon_benchmark_move(rdev, i*1024,
+ RADEON_GEM_DOMAIN_VRAM,
+ RADEON_GEM_DOMAIN_GTT);
+ break;
+ case 5:
+ /* VRAM to VRAM, buffer size sweep, powers of 2 */
+ for (i = 1; i <= 65536; i <<= 1)
+ radeon_benchmark_move(rdev, i*1024,
+ RADEON_GEM_DOMAIN_VRAM,
+ RADEON_GEM_DOMAIN_VRAM);
+ break;
+ case 6:
+ /* GTT to VRAM, buffer size sweep, common modes */
+ for (i = 1; i < RADEON_BENCHMARK_COMMON_MODES_N; i++)
+ radeon_benchmark_move(rdev, common_modes[i],
+ RADEON_GEM_DOMAIN_GTT,
+ RADEON_GEM_DOMAIN_VRAM);
+ break;
+ case 7:
+ /* VRAM to GTT, buffer size sweep, common modes */
+ for (i = 1; i < RADEON_BENCHMARK_COMMON_MODES_N; i++)
+ radeon_benchmark_move(rdev, common_modes[i],
+ RADEON_GEM_DOMAIN_VRAM,
+ RADEON_GEM_DOMAIN_GTT);
+ break;
+ case 8:
+ /* VRAM to VRAM, buffer size sweep, common modes */
+ for (i = 1; i < RADEON_BENCHMARK_COMMON_MODES_N; i++)
+ radeon_benchmark_move(rdev, common_modes[i],
+ RADEON_GEM_DOMAIN_VRAM,
+ RADEON_GEM_DOMAIN_VRAM);
+ break;
+
+ default:
+ DRM_ERROR("Unknown benchmark\n");
+ }
}
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 63675241c7f..81fc100be7e 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -620,8 +620,8 @@ static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rde
i2c.y_data_mask = 0x80;
} else {
/* default masks for ddc pads */
- i2c.mask_clk_mask = RADEON_GPIO_EN_1;
- i2c.mask_data_mask = RADEON_GPIO_EN_0;
+ i2c.mask_clk_mask = RADEON_GPIO_MASK_1;
+ i2c.mask_data_mask = RADEON_GPIO_MASK_0;
i2c.a_clk_mask = RADEON_GPIO_A_1;
i2c.a_data_mask = RADEON_GPIO_A_0;
i2c.en_clk_mask = RADEON_GPIO_EN_1;
@@ -2563,14 +2563,17 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev)
/* allocate 2 power states */
rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * 2, GFP_KERNEL);
- if (!rdev->pm.power_state) {
- rdev->pm.default_power_state_index = state_index;
- rdev->pm.num_power_states = 0;
-
- rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
- rdev->pm.current_clock_mode_index = 0;
- return;
- }
+ if (rdev->pm.power_state) {
+ /* allocate 1 clock mode per state */
+ rdev->pm.power_state[0].clock_info =
+ kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL);
+ rdev->pm.power_state[1].clock_info =
+ kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL);
+ if (!rdev->pm.power_state[0].clock_info ||
+ !rdev->pm.power_state[1].clock_info)
+ goto pm_failed;
+ } else
+ goto pm_failed;
/* check for a thermal chip */
offset = combios_get_table_offset(dev, COMBIOS_OVERDRIVE_INFO_TABLE);
@@ -2735,6 +2738,14 @@ default_mode:
rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
rdev->pm.current_clock_mode_index = 0;
+ return;
+
+pm_failed:
+ rdev->pm.default_power_state_index = state_index;
+ rdev->pm.num_power_states = 0;
+
+ rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
+ rdev->pm.current_clock_mode_index = 0;
}
void radeon_external_tmds_setup(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 449c3d8c683..e7cb3ab0924 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -44,8 +44,6 @@ extern void
radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder,
struct drm_connector *drm_connector);
-bool radeon_connector_encoder_is_dp_bridge(struct drm_connector *connector);
-
void radeon_connector_hotplug(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
@@ -432,55 +430,6 @@ int radeon_connector_set_property(struct drm_connector *connector, struct drm_pr
return 0;
}
-/*
- * Some integrated ATI Radeon chipset implementations (e. g.
- * Asus M2A-VM HDMI) may indicate the availability of a DDC,
- * even when there's no monitor connected. For these connectors
- * following DDC probe extension will be applied: check also for the
- * availability of EDID with at least a correct EDID header. Only then,
- * DDC is assumed to be available. This prevents drm_get_edid() and
- * drm_edid_block_valid() from periodically dumping data and kernel
- * errors into the logs and onto the terminal.
- */
-static bool radeon_connector_needs_extended_probe(struct radeon_device *dev,
- uint32_t supported_device,
- int connector_type)
-{
- /* Asus M2A-VM HDMI board sends data to i2c bus even,
- * if HDMI add-on card is not plugged in or HDMI is disabled in
- * BIOS. Valid DDC can only be assumed, if also a valid EDID header
- * can be retrieved via i2c bus during DDC probe */
- if ((dev->pdev->device == 0x791e) &&
- (dev->pdev->subsystem_vendor == 0x1043) &&
- (dev->pdev->subsystem_device == 0x826d)) {
- if ((connector_type == DRM_MODE_CONNECTOR_HDMIA) &&
- (supported_device == ATOM_DEVICE_DFP2_SUPPORT))
- return true;
- }
- /* ECS A740GM-M with ATI RADEON 2100 sends data to i2c bus
- * for a DVI connector that is not implemented */
- if ((dev->pdev->device == 0x796e) &&
- (dev->pdev->subsystem_vendor == 0x1019) &&
- (dev->pdev->subsystem_device == 0x2615)) {
- if ((connector_type == DRM_MODE_CONNECTOR_DVID) &&
- (supported_device == ATOM_DEVICE_DFP2_SUPPORT))
- return true;
- }
- /* TOSHIBA Satellite L300D with ATI Mobility Radeon x1100
- * (RS690M) sends data to i2c bus for a HDMI connector that
- * is not implemented */
- if ((dev->pdev->device == 0x791f) &&
- (dev->pdev->subsystem_vendor == 0x1179) &&
- (dev->pdev->subsystem_device == 0xff68)) {
- if ((connector_type == DRM_MODE_CONNECTOR_HDMIA) &&
- (supported_device == ATOM_DEVICE_DFP2_SUPPORT))
- return true;
- }
-
- /* Default: no EDID header probe required for DDC probing */
- return false;
-}
-
static void radeon_fixup_lvds_native_mode(struct drm_encoder *encoder,
struct drm_connector *connector)
{
@@ -721,9 +670,9 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
ret = connector_status_disconnected;
if (radeon_connector->ddc_bus)
- dret = radeon_ddc_probe(radeon_connector,
- radeon_connector->requires_extended_probe);
+ dret = radeon_ddc_probe(radeon_connector);
if (dret) {
+ radeon_connector->detected_by_load = false;
if (radeon_connector->edid) {
kfree(radeon_connector->edid);
radeon_connector->edid = NULL;
@@ -750,12 +699,21 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
} else {
/* if we aren't forcing don't do destructive polling */
- if (!force)
- return connector->status;
+ if (!force) {
+ /* only return the previous status if we last
+ * detected a monitor via load.
+ */
+ if (radeon_connector->detected_by_load)
+ return connector->status;
+ else
+ return ret;
+ }
if (radeon_connector->dac_load_detect && encoder) {
encoder_funcs = encoder->helper_private;
ret = encoder_funcs->detect(encoder, connector);
+ if (ret != connector_status_disconnected)
+ radeon_connector->detected_by_load = true;
}
}
@@ -894,9 +852,9 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
bool dret = false;
if (radeon_connector->ddc_bus)
- dret = radeon_ddc_probe(radeon_connector,
- radeon_connector->requires_extended_probe);
+ dret = radeon_ddc_probe(radeon_connector);
if (dret) {
+ radeon_connector->detected_by_load = false;
if (radeon_connector->edid) {
kfree(radeon_connector->edid);
radeon_connector->edid = NULL;
@@ -959,8 +917,18 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
if ((ret == connector_status_connected) && (radeon_connector->use_digital == true))
goto out;
+ /* DVI-D and HDMI-A are digital only */
+ if ((connector->connector_type == DRM_MODE_CONNECTOR_DVID) ||
+ (connector->connector_type == DRM_MODE_CONNECTOR_HDMIA))
+ goto out;
+
+ /* if we aren't forcing don't do destructive polling */
if (!force) {
- ret = connector->status;
+ /* only return the previous status if we last
+ * detected a monitor via load.
+ */
+ if (radeon_connector->detected_by_load)
+ ret = connector->status;
goto out;
}
@@ -985,6 +953,8 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
if (ret == connector_status_connected) {
radeon_connector->use_digital = false;
}
+ if (ret != connector_status_disconnected)
+ radeon_connector->detected_by_load = true;
}
break;
}
@@ -1181,7 +1151,8 @@ static int radeon_dp_get_modes(struct drm_connector *connector)
}
} else {
/* need to setup ddc on the bridge */
- if (radeon_connector_encoder_is_dp_bridge(connector)) {
+ if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) !=
+ ENCODER_OBJECT_ID_NONE) {
if (encoder)
radeon_atom_ext_encoder_setup_ddc(encoder);
}
@@ -1191,13 +1162,12 @@ static int radeon_dp_get_modes(struct drm_connector *connector)
return ret;
}
-bool radeon_connector_encoder_is_dp_bridge(struct drm_connector *connector)
+u16 radeon_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *connector)
{
struct drm_mode_object *obj;
struct drm_encoder *encoder;
struct radeon_encoder *radeon_encoder;
int i;
- bool found = false;
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
if (connector->encoder_ids[i] == 0)
@@ -1213,14 +1183,13 @@ bool radeon_connector_encoder_is_dp_bridge(struct drm_connector *connector)
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_TRAVIS:
case ENCODER_OBJECT_ID_NUTMEG:
- found = true;
- break;
+ return radeon_encoder->encoder_id;
default:
break;
}
}
- return found;
+ return ENCODER_OBJECT_ID_NONE;
}
bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector)
@@ -1297,7 +1266,8 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
if (!radeon_dig_connector->edp_on)
atombios_set_edp_panel_power(connector,
ATOM_TRANSMITTER_ACTION_POWER_OFF);
- } else if (radeon_connector_encoder_is_dp_bridge(connector)) {
+ } else if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) !=
+ ENCODER_OBJECT_ID_NONE) {
/* DP bridges are always DP */
radeon_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT;
/* get the DPCD from the bridge */
@@ -1306,8 +1276,7 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
if (encoder) {
/* setup ddc on the bridge */
radeon_atom_ext_encoder_setup_ddc(encoder);
- if (radeon_ddc_probe(radeon_connector,
- radeon_connector->requires_extended_probe)) /* try DDC */
+ if (radeon_ddc_probe(radeon_connector)) /* try DDC */
ret = connector_status_connected;
else if (radeon_connector->dac_load_detect) { /* try load detection */
struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
@@ -1325,8 +1294,7 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
if (radeon_dp_getdpcd(radeon_connector))
ret = connector_status_connected;
} else {
- if (radeon_ddc_probe(radeon_connector,
- radeon_connector->requires_extended_probe))
+ if (radeon_ddc_probe(radeon_connector))
ret = connector_status_connected;
}
}
@@ -1471,9 +1439,7 @@ radeon_add_atom_connector(struct drm_device *dev,
radeon_connector->shared_ddc = shared_ddc;
radeon_connector->connector_object_id = connector_object_id;
radeon_connector->hpd = *hpd;
- radeon_connector->requires_extended_probe =
- radeon_connector_needs_extended_probe(rdev, supported_device,
- connector_type);
+
radeon_connector->router = *router;
if (router->ddc_valid || router->cd_valid) {
radeon_connector->router_bus = radeon_i2c_lookup(rdev, &router->i2c_info);
@@ -1820,9 +1786,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
radeon_connector->devices = supported_device;
radeon_connector->connector_object_id = connector_object_id;
radeon_connector->hpd = *hpd;
- radeon_connector->requires_extended_probe =
- radeon_connector_needs_extended_probe(rdev, supported_device,
- connector_type);
+
switch (connector_type) {
case DRM_MODE_CONNECTOR_VGA:
drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
index 045ec59478f..72ae8266b8e 100644
--- a/drivers/gpu/drm/radeon/radeon_cp.c
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
@@ -29,6 +29,8 @@
* Gareth Hughes <gareth@valinux.com>
*/
+#include <linux/module.h>
+
#include "drmP.h"
#include "drm.h"
#include "drm_sarea.h"
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index fae00c0d75a..ccaa243c144 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -222,7 +222,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
struct radeon_cs_chunk *ib_chunk;
int r;
- mutex_lock(&rdev->cs_mutex);
+ radeon_mutex_lock(&rdev->cs_mutex);
/* initialize parser */
memset(&parser, 0, sizeof(struct radeon_cs_parser));
parser.filp = filp;
@@ -233,14 +233,14 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
if (r) {
DRM_ERROR("Failed to initialize parser !\n");
radeon_cs_parser_fini(&parser, r);
- mutex_unlock(&rdev->cs_mutex);
+ radeon_mutex_unlock(&rdev->cs_mutex);
return r;
}
r = radeon_ib_get(rdev, &parser.ib);
if (r) {
DRM_ERROR("Failed to get ib !\n");
radeon_cs_parser_fini(&parser, r);
- mutex_unlock(&rdev->cs_mutex);
+ radeon_mutex_unlock(&rdev->cs_mutex);
return r;
}
r = radeon_cs_parser_relocs(&parser);
@@ -248,7 +248,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
if (r != -ERESTARTSYS)
DRM_ERROR("Failed to parse relocation %d!\n", r);
radeon_cs_parser_fini(&parser, r);
- mutex_unlock(&rdev->cs_mutex);
+ radeon_mutex_unlock(&rdev->cs_mutex);
return r;
}
/* Copy the packet into the IB, the parser will read from the
@@ -260,14 +260,14 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
if (r || parser.parser_error) {
DRM_ERROR("Invalid command stream !\n");
radeon_cs_parser_fini(&parser, r);
- mutex_unlock(&rdev->cs_mutex);
+ radeon_mutex_unlock(&rdev->cs_mutex);
return r;
}
r = radeon_cs_finish_pages(&parser);
if (r) {
DRM_ERROR("Invalid command stream !\n");
radeon_cs_parser_fini(&parser, r);
- mutex_unlock(&rdev->cs_mutex);
+ radeon_mutex_unlock(&rdev->cs_mutex);
return r;
}
r = radeon_ib_schedule(rdev, parser.ib);
@@ -275,7 +275,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
DRM_ERROR("Failed to schedule IB !\n");
}
radeon_cs_parser_fini(&parser, r);
- mutex_unlock(&rdev->cs_mutex);
+ radeon_mutex_unlock(&rdev->cs_mutex);
return r;
}
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index b51e15725c6..c4d00a17141 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -716,7 +716,7 @@ int radeon_device_init(struct radeon_device *rdev,
/* mutex initialization are all done here so we
* can recall function without having locking issues */
- mutex_init(&rdev->cs_mutex);
+ radeon_mutex_init(&rdev->cs_mutex);
mutex_init(&rdev->ib_pool.mutex);
mutex_init(&rdev->cp.mutex);
mutex_init(&rdev->dc_hw_i2c_mutex);
@@ -750,14 +750,15 @@ int radeon_device_init(struct radeon_device *rdev,
/* set DMA mask + need_dma32 flags.
* PCIE - can handle 40-bits.
- * IGP - can handle 40-bits (in theory)
+ * IGP - can handle 40-bits
* AGP - generally dma32 is safest
- * PCI - only dma32
+ * PCI - dma32 for legacy pci gart, 40 bits on newer asics
*/
rdev->need_dma32 = false;
if (rdev->flags & RADEON_IS_AGP)
rdev->need_dma32 = true;
- if (rdev->flags & RADEON_IS_PCI)
+ if ((rdev->flags & RADEON_IS_PCI) &&
+ (rdev->family < CHIP_RS400))
rdev->need_dma32 = true;
dma_bits = rdev->need_dma32 ? 32 : 40;
@@ -817,7 +818,7 @@ int radeon_device_init(struct radeon_device *rdev,
radeon_test_moves(rdev);
}
if (radeon_benchmarking) {
- radeon_benchmark(rdev);
+ radeon_benchmark(rdev, radeon_benchmarking);
}
return 0;
}
@@ -954,6 +955,9 @@ int radeon_gpu_reset(struct radeon_device *rdev)
int r;
int resched;
+ /* Prevent CS ioctl from interfering */
+ radeon_mutex_lock(&rdev->cs_mutex);
+
radeon_save_bios_scratch_regs(rdev);
/* block TTM */
resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
@@ -966,10 +970,15 @@ int radeon_gpu_reset(struct radeon_device *rdev)
radeon_restore_bios_scratch_regs(rdev);
drm_helper_resume_force_mode(rdev->ddev);
ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
- return 0;
}
- /* bad news, how to tell it to userspace ? */
- dev_info(rdev->dev, "GPU reset failed\n");
+
+ radeon_mutex_unlock(&rdev->cs_mutex);
+
+ if (r) {
+ /* bad news, how to tell it to userspace ? */
+ dev_info(rdev->dev, "GPU reset failed\n");
+ }
+
return r;
}
@@ -981,7 +990,7 @@ struct radeon_debugfs {
struct drm_info_list *files;
unsigned num_files;
};
-static struct radeon_debugfs _radeon_debugfs[RADEON_DEBUGFS_MAX_NUM_FILES];
+static struct radeon_debugfs _radeon_debugfs[RADEON_DEBUGFS_MAX_COMPONENTS];
static unsigned _radeon_debugfs_count = 0;
int radeon_debugfs_add_files(struct radeon_device *rdev,
@@ -996,14 +1005,17 @@ int radeon_debugfs_add_files(struct radeon_device *rdev,
return 0;
}
}
- if ((_radeon_debugfs_count + nfiles) > RADEON_DEBUGFS_MAX_NUM_FILES) {
- DRM_ERROR("Reached maximum number of debugfs files.\n");
- DRM_ERROR("Report so we increase RADEON_DEBUGFS_MAX_NUM_FILES.\n");
+
+ i = _radeon_debugfs_count + 1;
+ if (i > RADEON_DEBUGFS_MAX_COMPONENTS) {
+ DRM_ERROR("Reached maximum number of debugfs components.\n");
+ DRM_ERROR("Report so we increase "
+ "RADEON_DEBUGFS_MAX_COMPONENTS.\n");
return -EINVAL;
}
_radeon_debugfs[_radeon_debugfs_count].files = files;
_radeon_debugfs[_radeon_debugfs_count].num_files = nfiles;
- _radeon_debugfs_count++;
+ _radeon_debugfs_count = i;
#if defined(CONFIG_DEBUG_FS)
drm_debugfs_create_files(files, nfiles,
rdev->ddev->control->debugfs_root,
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 6adb3e58aff..a22d6e6a49a 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -33,8 +33,6 @@
#include "drm_crtc_helper.h"
#include "drm_edid.h"
-static int radeon_ddc_dump(struct drm_connector *connector);
-
static void avivo_crtc_load_lut(struct drm_crtc *crtc)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
@@ -669,7 +667,6 @@ static void radeon_print_display_setup(struct drm_device *dev)
static bool radeon_setup_enc_conn(struct drm_device *dev)
{
struct radeon_device *rdev = dev->dev_private;
- struct drm_connector *drm_connector;
bool ret = false;
if (rdev->bios) {
@@ -689,8 +686,6 @@ static bool radeon_setup_enc_conn(struct drm_device *dev)
if (ret) {
radeon_setup_encoder_clones(dev);
radeon_print_display_setup(dev);
- list_for_each_entry(drm_connector, &dev->mode_config.connector_list, head)
- radeon_ddc_dump(drm_connector);
}
return ret;
@@ -708,7 +703,8 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
(radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) ||
- radeon_connector_encoder_is_dp_bridge(&radeon_connector->base)) {
+ (radeon_connector_encoder_get_dp_bridge_encoder_id(&radeon_connector->base) !=
+ ENCODER_OBJECT_ID_NONE)) {
struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT ||
@@ -743,34 +739,6 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
return 0;
}
-static int radeon_ddc_dump(struct drm_connector *connector)
-{
- struct edid *edid;
- struct radeon_connector *radeon_connector = to_radeon_connector(connector);
- int ret = 0;
-
- /* on hw with routers, select right port */
- if (radeon_connector->router.ddc_valid)
- radeon_router_select_ddc_port(radeon_connector);
-
- if (!radeon_connector->ddc_bus)
- return -1;
- edid = drm_get_edid(connector, &radeon_connector->ddc_bus->adapter);
- /* Log EDID retrieval status here. In particular with regard to
- * connectors with requires_extended_probe flag set, that will prevent
- * function radeon_dvi_detect() to fetch EDID on this connector,
- * as long as there is no valid EDID header found */
- if (edid) {
- DRM_INFO("Radeon display connector %s: Found valid EDID",
- drm_get_connector_name(connector));
- kfree(edid);
- } else {
- DRM_INFO("Radeon display connector %s: No monitor connected or invalid EDID",
- drm_get_connector_name(connector));
- }
- return ret;
-}
-
/* avivo */
static void avivo_get_fb_div(struct radeon_pll *pll,
u32 target_clock,
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index e71d2ed7fa1..a0b35e90948 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -36,6 +36,7 @@
#include "drm_pciids.h"
#include <linux/console.h>
+#include <linux/module.h>
/*
@@ -118,6 +119,7 @@ int radeon_audio = 0;
int radeon_disp_priority = 0;
int radeon_hw_i2c = 0;
int radeon_pcie_gen2 = 0;
+int radeon_msi = -1;
MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
module_param_named(no_wb, radeon_no_wb, int, 0444);
@@ -164,6 +166,9 @@ module_param_named(hw_i2c, radeon_hw_i2c, int, 0444);
MODULE_PARM_DESC(pcie_gen2, "PCIE Gen2 mode (1 = enable)");
module_param_named(pcie_gen2, radeon_pcie_gen2, int, 0444);
+MODULE_PARM_DESC(msi, "MSI support (1 = enable, 0 = disable, -1 = auto)");
+module_param_named(msi, radeon_msi, int, 0444);
+
static int radeon_suspend(struct drm_device *dev, pm_message_t state)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index eb3f6dc6df8..06e413e6a92 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -29,12 +29,6 @@
#include "radeon.h"
#include "atom.h"
-extern int atom_debug;
-
-/* evil but including atombios.h is much worse */
-bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index,
- struct drm_display_mode *mode);
-
static uint32_t radeon_encoder_clones(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
@@ -156,27 +150,6 @@ radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device, uint8
return ret;
}
-static inline bool radeon_encoder_is_digital(struct drm_encoder *encoder)
-{
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- switch (radeon_encoder->encoder_id) {
- case ENCODER_OBJECT_ID_INTERNAL_LVDS:
- case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
- case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
- case ENCODER_OBJECT_ID_INTERNAL_DVO1:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
- case ENCODER_OBJECT_ID_INTERNAL_DDI:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
- return true;
- default:
- return false;
- }
-}
-
void
radeon_link_encoder_connector(struct drm_device *dev)
{
@@ -229,23 +202,7 @@ radeon_get_connector_for_encoder(struct drm_encoder *encoder)
return NULL;
}
-static struct drm_connector *
-radeon_get_connector_for_encoder_init(struct drm_encoder *encoder)
-{
- struct drm_device *dev = encoder->dev;
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct drm_connector *connector;
- struct radeon_connector *radeon_connector;
-
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- radeon_connector = to_radeon_connector(connector);
- if (radeon_encoder->devices & radeon_connector->devices)
- return connector;
- }
- return NULL;
-}
-
-struct drm_encoder *radeon_atom_get_external_encoder(struct drm_encoder *encoder)
+struct drm_encoder *radeon_get_external_encoder(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
@@ -266,9 +223,9 @@ struct drm_encoder *radeon_atom_get_external_encoder(struct drm_encoder *encoder
return NULL;
}
-bool radeon_encoder_is_dp_bridge(struct drm_encoder *encoder)
+u16 radeon_encoder_get_dp_bridge_encoder_id(struct drm_encoder *encoder)
{
- struct drm_encoder *other_encoder = radeon_atom_get_external_encoder(encoder);
+ struct drm_encoder *other_encoder = radeon_get_external_encoder(encoder);
if (other_encoder) {
struct radeon_encoder *radeon_encoder = to_radeon_encoder(other_encoder);
@@ -332,2105 +289,3 @@ void radeon_panel_mode_fixup(struct drm_encoder *encoder,
}
-static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
-
- /* set the active encoder to connector routing */
- radeon_encoder_set_active_device(encoder);
- drm_mode_set_crtcinfo(adjusted_mode, 0);
-
- /* hw bug */
- if ((mode->flags & DRM_MODE_FLAG_INTERLACE)
- && (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2)))
- adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2;
-
- /* get the native mode for LVDS */
- if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT))
- radeon_panel_mode_fixup(encoder, adjusted_mode);
-
- /* get the native mode for TV */
- if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) {
- struct radeon_encoder_atom_dac *tv_dac = radeon_encoder->enc_priv;
- if (tv_dac) {
- if (tv_dac->tv_std == TV_STD_NTSC ||
- tv_dac->tv_std == TV_STD_NTSC_J ||
- tv_dac->tv_std == TV_STD_PAL_M)
- radeon_atom_get_tv_timings(rdev, 0, adjusted_mode);
- else
- radeon_atom_get_tv_timings(rdev, 1, adjusted_mode);
- }
- }
-
- if (ASIC_IS_DCE3(rdev) &&
- ((radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
- radeon_encoder_is_dp_bridge(encoder))) {
- struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
- radeon_dp_set_link_config(connector, mode);
- }
-
- return true;
-}
-
-static void
-atombios_dac_setup(struct drm_encoder *encoder, int action)
-{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- DAC_ENCODER_CONTROL_PS_ALLOCATION args;
- int index = 0;
- struct radeon_encoder_atom_dac *dac_info = radeon_encoder->enc_priv;
-
- memset(&args, 0, sizeof(args));
-
- switch (radeon_encoder->encoder_id) {
- case ENCODER_OBJECT_ID_INTERNAL_DAC1:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
- index = GetIndexIntoMasterTable(COMMAND, DAC1EncoderControl);
- break;
- case ENCODER_OBJECT_ID_INTERNAL_DAC2:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
- index = GetIndexIntoMasterTable(COMMAND, DAC2EncoderControl);
- break;
- }
-
- args.ucAction = action;
-
- if (radeon_encoder->active_device & (ATOM_DEVICE_CRT_SUPPORT))
- args.ucDacStandard = ATOM_DAC1_PS2;
- else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
- args.ucDacStandard = ATOM_DAC1_CV;
- else {
- switch (dac_info->tv_std) {
- case TV_STD_PAL:
- case TV_STD_PAL_M:
- case TV_STD_SCART_PAL:
- case TV_STD_SECAM:
- case TV_STD_PAL_CN:
- args.ucDacStandard = ATOM_DAC1_PAL;
- break;
- case TV_STD_NTSC:
- case TV_STD_NTSC_J:
- case TV_STD_PAL_60:
- default:
- args.ucDacStandard = ATOM_DAC1_NTSC;
- break;
- }
- }
- args.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
-
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
-
-}
-
-static void
-atombios_tv_setup(struct drm_encoder *encoder, int action)
-{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- TV_ENCODER_CONTROL_PS_ALLOCATION args;
- int index = 0;
- struct radeon_encoder_atom_dac *dac_info = radeon_encoder->enc_priv;
-
- memset(&args, 0, sizeof(args));
-
- index = GetIndexIntoMasterTable(COMMAND, TVEncoderControl);
-
- args.sTVEncoder.ucAction = action;
-
- if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
- args.sTVEncoder.ucTvStandard = ATOM_TV_CV;
- else {
- switch (dac_info->tv_std) {
- case TV_STD_NTSC:
- args.sTVEncoder.ucTvStandard = ATOM_TV_NTSC;
- break;
- case TV_STD_PAL:
- args.sTVEncoder.ucTvStandard = ATOM_TV_PAL;
- break;
- case TV_STD_PAL_M:
- args.sTVEncoder.ucTvStandard = ATOM_TV_PALM;
- break;
- case TV_STD_PAL_60:
- args.sTVEncoder.ucTvStandard = ATOM_TV_PAL60;
- break;
- case TV_STD_NTSC_J:
- args.sTVEncoder.ucTvStandard = ATOM_TV_NTSCJ;
- break;
- case TV_STD_SCART_PAL:
- args.sTVEncoder.ucTvStandard = ATOM_TV_PAL; /* ??? */
- break;
- case TV_STD_SECAM:
- args.sTVEncoder.ucTvStandard = ATOM_TV_SECAM;
- break;
- case TV_STD_PAL_CN:
- args.sTVEncoder.ucTvStandard = ATOM_TV_PALCN;
- break;
- default:
- args.sTVEncoder.ucTvStandard = ATOM_TV_NTSC;
- break;
- }
- }
-
- args.sTVEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
-
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
-
-}
-
-union dvo_encoder_control {
- ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION ext_tmds;
- DVO_ENCODER_CONTROL_PS_ALLOCATION dvo;
- DVO_ENCODER_CONTROL_PS_ALLOCATION_V3 dvo_v3;
-};
-
-void
-atombios_dvo_setup(struct drm_encoder *encoder, int action)
-{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- union dvo_encoder_control args;
- int index = GetIndexIntoMasterTable(COMMAND, DVOEncoderControl);
-
- memset(&args, 0, sizeof(args));
-
- if (ASIC_IS_DCE3(rdev)) {
- /* DCE3+ */
- args.dvo_v3.ucAction = action;
- args.dvo_v3.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
- args.dvo_v3.ucDVOConfig = 0; /* XXX */
- } else if (ASIC_IS_DCE2(rdev)) {
- /* DCE2 (pre-DCE3 R6xx, RS600/690/740 */
- args.dvo.sDVOEncoder.ucAction = action;
- args.dvo.sDVOEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
- /* DFP1, CRT1, TV1 depending on the type of port */
- args.dvo.sDVOEncoder.ucDeviceType = ATOM_DEVICE_DFP1_INDEX;
-
- if (radeon_encoder->pixel_clock > 165000)
- args.dvo.sDVOEncoder.usDevAttr.sDigAttrib.ucAttribute |= PANEL_ENCODER_MISC_DUAL;
- } else {
- /* R4xx, R5xx */
- args.ext_tmds.sXTmdsEncoder.ucEnable = action;
-
- if (radeon_encoder->pixel_clock > 165000)
- args.ext_tmds.sXTmdsEncoder.ucMisc |= PANEL_ENCODER_MISC_DUAL;
-
- /*if (pScrn->rgbBits == 8)*/
- args.ext_tmds.sXTmdsEncoder.ucMisc |= ATOM_PANEL_MISC_888RGB;
- }
-
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
-}
-
-union lvds_encoder_control {
- LVDS_ENCODER_CONTROL_PS_ALLOCATION v1;
- LVDS_ENCODER_CONTROL_PS_ALLOCATION_V2 v2;
-};
-
-void
-atombios_digital_setup(struct drm_encoder *encoder, int action)
-{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- union lvds_encoder_control args;
- int index = 0;
- int hdmi_detected = 0;
- uint8_t frev, crev;
-
- if (!dig)
- return;
-
- if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
- hdmi_detected = 1;
-
- memset(&args, 0, sizeof(args));
-
- switch (radeon_encoder->encoder_id) {
- case ENCODER_OBJECT_ID_INTERNAL_LVDS:
- index = GetIndexIntoMasterTable(COMMAND, LVDSEncoderControl);
- break;
- case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
- index = GetIndexIntoMasterTable(COMMAND, TMDS1EncoderControl);
- break;
- case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
- if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
- index = GetIndexIntoMasterTable(COMMAND, LVDSEncoderControl);
- else
- index = GetIndexIntoMasterTable(COMMAND, TMDS2EncoderControl);
- break;
- }
-
- if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
- return;
-
- switch (frev) {
- case 1:
- case 2:
- switch (crev) {
- case 1:
- args.v1.ucMisc = 0;
- args.v1.ucAction = action;
- if (hdmi_detected)
- args.v1.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE;
- args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
- if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
- if (dig->lcd_misc & ATOM_PANEL_MISC_DUAL)
- args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL;
- if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB)
- args.v1.ucMisc |= ATOM_PANEL_MISC_888RGB;
- } else {
- if (dig->linkb)
- args.v1.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB;
- if (radeon_encoder->pixel_clock > 165000)
- args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL;
- /*if (pScrn->rgbBits == 8) */
- args.v1.ucMisc |= ATOM_PANEL_MISC_888RGB;
- }
- break;
- case 2:
- case 3:
- args.v2.ucMisc = 0;
- args.v2.ucAction = action;
- if (crev == 3) {
- if (dig->coherent_mode)
- args.v2.ucMisc |= PANEL_ENCODER_MISC_COHERENT;
- }
- if (hdmi_detected)
- args.v2.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE;
- args.v2.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
- args.v2.ucTruncate = 0;
- args.v2.ucSpatial = 0;
- args.v2.ucTemporal = 0;
- args.v2.ucFRC = 0;
- if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
- if (dig->lcd_misc & ATOM_PANEL_MISC_DUAL)
- args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL;
- if (dig->lcd_misc & ATOM_PANEL_MISC_SPATIAL) {
- args.v2.ucSpatial = PANEL_ENCODER_SPATIAL_DITHER_EN;
- if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB)
- args.v2.ucSpatial |= PANEL_ENCODER_SPATIAL_DITHER_DEPTH;
- }
- if (dig->lcd_misc & ATOM_PANEL_MISC_TEMPORAL) {
- args.v2.ucTemporal = PANEL_ENCODER_TEMPORAL_DITHER_EN;
- if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB)
- args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_DITHER_DEPTH;
- if (((dig->lcd_misc >> ATOM_PANEL_MISC_GREY_LEVEL_SHIFT) & 0x3) == 2)
- args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_LEVEL_4;
- }
- } else {
- if (dig->linkb)
- args.v2.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB;
- if (radeon_encoder->pixel_clock > 165000)
- args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL;
- }
- break;
- default:
- DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
- break;
- }
- break;
- default:
- DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
- break;
- }
-
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
-}
-
-int
-atombios_get_encoder_mode(struct drm_encoder *encoder)
-{
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct drm_connector *connector;
- struct radeon_connector *radeon_connector;
- struct radeon_connector_atom_dig *dig_connector;
-
- /* dp bridges are always DP */
- if (radeon_encoder_is_dp_bridge(encoder))
- return ATOM_ENCODER_MODE_DP;
-
- /* DVO is always DVO */
- if (radeon_encoder->encoder_id == ATOM_ENCODER_MODE_DVO)
- return ATOM_ENCODER_MODE_DVO;
-
- connector = radeon_get_connector_for_encoder(encoder);
- /* if we don't have an active device yet, just use one of
- * the connectors tied to the encoder.
- */
- if (!connector)
- connector = radeon_get_connector_for_encoder_init(encoder);
- radeon_connector = to_radeon_connector(connector);
-
- switch (connector->connector_type) {
- case DRM_MODE_CONNECTOR_DVII:
- case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */
- if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) {
- /* fix me */
- if (ASIC_IS_DCE4(rdev))
- return ATOM_ENCODER_MODE_DVI;
- else
- return ATOM_ENCODER_MODE_HDMI;
- } else if (radeon_connector->use_digital)
- return ATOM_ENCODER_MODE_DVI;
- else
- return ATOM_ENCODER_MODE_CRT;
- break;
- case DRM_MODE_CONNECTOR_DVID:
- case DRM_MODE_CONNECTOR_HDMIA:
- default:
- if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) {
- /* fix me */
- if (ASIC_IS_DCE4(rdev))
- return ATOM_ENCODER_MODE_DVI;
- else
- return ATOM_ENCODER_MODE_HDMI;
- } else
- return ATOM_ENCODER_MODE_DVI;
- break;
- case DRM_MODE_CONNECTOR_LVDS:
- return ATOM_ENCODER_MODE_LVDS;
- break;
- case DRM_MODE_CONNECTOR_DisplayPort:
- dig_connector = radeon_connector->con_priv;
- if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
- (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
- return ATOM_ENCODER_MODE_DP;
- else if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) {
- /* fix me */
- if (ASIC_IS_DCE4(rdev))
- return ATOM_ENCODER_MODE_DVI;
- else
- return ATOM_ENCODER_MODE_HDMI;
- } else
- return ATOM_ENCODER_MODE_DVI;
- break;
- case DRM_MODE_CONNECTOR_eDP:
- return ATOM_ENCODER_MODE_DP;
- case DRM_MODE_CONNECTOR_DVIA:
- case DRM_MODE_CONNECTOR_VGA:
- return ATOM_ENCODER_MODE_CRT;
- break;
- case DRM_MODE_CONNECTOR_Composite:
- case DRM_MODE_CONNECTOR_SVIDEO:
- case DRM_MODE_CONNECTOR_9PinDIN:
- /* fix me */
- return ATOM_ENCODER_MODE_TV;
- /*return ATOM_ENCODER_MODE_CV;*/
- break;
- }
-}
-
-/*
- * DIG Encoder/Transmitter Setup
- *
- * DCE 3.0/3.1
- * - 2 DIG transmitter blocks. UNIPHY (links A and B) and LVTMA.
- * Supports up to 3 digital outputs
- * - 2 DIG encoder blocks.
- * DIG1 can drive UNIPHY link A or link B
- * DIG2 can drive UNIPHY link B or LVTMA
- *
- * DCE 3.2
- * - 3 DIG transmitter blocks. UNIPHY0/1/2 (links A and B).
- * Supports up to 5 digital outputs
- * - 2 DIG encoder blocks.
- * DIG1/2 can drive UNIPHY0/1/2 link A or link B
- *
- * DCE 4.0/5.0
- * - 3 DIG transmitter blocks UNIPHY0/1/2 (links A and B).
- * Supports up to 6 digital outputs
- * - 6 DIG encoder blocks.
- * - DIG to PHY mapping is hardcoded
- * DIG1 drives UNIPHY0 link A, A+B
- * DIG2 drives UNIPHY0 link B
- * DIG3 drives UNIPHY1 link A, A+B
- * DIG4 drives UNIPHY1 link B
- * DIG5 drives UNIPHY2 link A, A+B
- * DIG6 drives UNIPHY2 link B
- *
- * DCE 4.1
- * - 3 DIG transmitter blocks UNIPHY0/1/2 (links A and B).
- * Supports up to 6 digital outputs
- * - 2 DIG encoder blocks.
- * DIG1/2 can drive UNIPHY0/1/2 link A or link B
- *
- * Routing
- * crtc -> dig encoder -> UNIPHY/LVTMA (1 or 2 links)
- * Examples:
- * crtc0 -> dig2 -> LVTMA links A+B -> TMDS/HDMI
- * crtc1 -> dig1 -> UNIPHY0 link B -> DP
- * crtc0 -> dig1 -> UNIPHY2 link A -> LVDS
- * crtc1 -> dig2 -> UNIPHY1 link B+A -> TMDS/HDMI
- */
-
-union dig_encoder_control {
- DIG_ENCODER_CONTROL_PS_ALLOCATION v1;
- DIG_ENCODER_CONTROL_PARAMETERS_V2 v2;
- DIG_ENCODER_CONTROL_PARAMETERS_V3 v3;
- DIG_ENCODER_CONTROL_PARAMETERS_V4 v4;
-};
-
-void
-atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mode)
-{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
- union dig_encoder_control args;
- int index = 0;
- uint8_t frev, crev;
- int dp_clock = 0;
- int dp_lane_count = 0;
- int hpd_id = RADEON_HPD_NONE;
- int bpc = 8;
-
- if (connector) {
- struct radeon_connector *radeon_connector = to_radeon_connector(connector);
- struct radeon_connector_atom_dig *dig_connector =
- radeon_connector->con_priv;
-
- dp_clock = dig_connector->dp_clock;
- dp_lane_count = dig_connector->dp_lane_count;
- hpd_id = radeon_connector->hpd.hpd;
- bpc = connector->display_info.bpc;
- }
-
- /* no dig encoder assigned */
- if (dig->dig_encoder == -1)
- return;
-
- memset(&args, 0, sizeof(args));
-
- if (ASIC_IS_DCE4(rdev))
- index = GetIndexIntoMasterTable(COMMAND, DIGxEncoderControl);
- else {
- if (dig->dig_encoder)
- index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl);
- else
- index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl);
- }
-
- if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
- return;
-
- args.v1.ucAction = action;
- args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
- if (action == ATOM_ENCODER_CMD_SETUP_PANEL_MODE)
- args.v3.ucPanelMode = panel_mode;
- else
- args.v1.ucEncoderMode = atombios_get_encoder_mode(encoder);
-
- if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) ||
- (args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP_MST))
- args.v1.ucLaneNum = dp_lane_count;
- else if (radeon_encoder->pixel_clock > 165000)
- args.v1.ucLaneNum = 8;
- else
- args.v1.ucLaneNum = 4;
-
- if (ASIC_IS_DCE5(rdev)) {
- if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) ||
- (args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP_MST)) {
- if (dp_clock == 270000)
- args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_2_70GHZ;
- else if (dp_clock == 540000)
- args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_5_40GHZ;
- }
- args.v4.acConfig.ucDigSel = dig->dig_encoder;
- switch (bpc) {
- case 0:
- args.v4.ucBitPerColor = PANEL_BPC_UNDEFINE;
- break;
- case 6:
- args.v4.ucBitPerColor = PANEL_6BIT_PER_COLOR;
- break;
- case 8:
- default:
- args.v4.ucBitPerColor = PANEL_8BIT_PER_COLOR;
- break;
- case 10:
- args.v4.ucBitPerColor = PANEL_10BIT_PER_COLOR;
- break;
- case 12:
- args.v4.ucBitPerColor = PANEL_12BIT_PER_COLOR;
- break;
- case 16:
- args.v4.ucBitPerColor = PANEL_16BIT_PER_COLOR;
- break;
- }
- if (hpd_id == RADEON_HPD_NONE)
- args.v4.ucHPD_ID = 0;
- else
- args.v4.ucHPD_ID = hpd_id + 1;
- } else if (ASIC_IS_DCE4(rdev)) {
- if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) && (dp_clock == 270000))
- args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ;
- args.v3.acConfig.ucDigSel = dig->dig_encoder;
- switch (bpc) {
- case 0:
- args.v3.ucBitPerColor = PANEL_BPC_UNDEFINE;
- break;
- case 6:
- args.v3.ucBitPerColor = PANEL_6BIT_PER_COLOR;
- break;
- case 8:
- default:
- args.v3.ucBitPerColor = PANEL_8BIT_PER_COLOR;
- break;
- case 10:
- args.v3.ucBitPerColor = PANEL_10BIT_PER_COLOR;
- break;
- case 12:
- args.v3.ucBitPerColor = PANEL_12BIT_PER_COLOR;
- break;
- case 16:
- args.v3.ucBitPerColor = PANEL_16BIT_PER_COLOR;
- break;
- }
- } else {
- if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) && (dp_clock == 270000))
- args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
- switch (radeon_encoder->encoder_id) {
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
- args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER1;
- break;
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
- args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER2;
- break;
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
- args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER3;
- break;
- }
- if (dig->linkb)
- args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKB;
- else
- args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKA;
- }
-
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
-
-}
-
-union dig_transmitter_control {
- DIG_TRANSMITTER_CONTROL_PS_ALLOCATION v1;
- DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 v2;
- DIG_TRANSMITTER_CONTROL_PARAMETERS_V3 v3;
- DIG_TRANSMITTER_CONTROL_PARAMETERS_V4 v4;
-};
-
-void
-atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t lane_num, uint8_t lane_set)
-{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- struct drm_connector *connector;
- union dig_transmitter_control args;
- int index = 0;
- uint8_t frev, crev;
- bool is_dp = false;
- int pll_id = 0;
- int dp_clock = 0;
- int dp_lane_count = 0;
- int connector_object_id = 0;
- int igp_lane_info = 0;
- int dig_encoder = dig->dig_encoder;
-
- if (action == ATOM_TRANSMITTER_ACTION_INIT) {
- connector = radeon_get_connector_for_encoder_init(encoder);
- /* just needed to avoid bailing in the encoder check. the encoder
- * isn't used for init
- */
- dig_encoder = 0;
- } else
- connector = radeon_get_connector_for_encoder(encoder);
-
- if (connector) {
- struct radeon_connector *radeon_connector = to_radeon_connector(connector);
- struct radeon_connector_atom_dig *dig_connector =
- radeon_connector->con_priv;
-
- dp_clock = dig_connector->dp_clock;
- dp_lane_count = dig_connector->dp_lane_count;
- connector_object_id =
- (radeon_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
- igp_lane_info = dig_connector->igp_lane_info;
- }
-
- /* no dig encoder assigned */
- if (dig_encoder == -1)
- return;
-
- if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP)
- is_dp = true;
-
- memset(&args, 0, sizeof(args));
-
- switch (radeon_encoder->encoder_id) {
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
- index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl);
- break;
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
- index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl);
- break;
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
- index = GetIndexIntoMasterTable(COMMAND, LVTMATransmitterControl);
- break;
- }
-
- if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
- return;
-
- args.v1.ucAction = action;
- if (action == ATOM_TRANSMITTER_ACTION_INIT) {
- args.v1.usInitInfo = cpu_to_le16(connector_object_id);
- } else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) {
- args.v1.asMode.ucLaneSel = lane_num;
- args.v1.asMode.ucLaneSet = lane_set;
- } else {
- if (is_dp)
- args.v1.usPixelClock =
- cpu_to_le16(dp_clock / 10);
- else if (radeon_encoder->pixel_clock > 165000)
- args.v1.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
- else
- args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
- }
- if (ASIC_IS_DCE4(rdev)) {
- if (is_dp)
- args.v3.ucLaneNum = dp_lane_count;
- else if (radeon_encoder->pixel_clock > 165000)
- args.v3.ucLaneNum = 8;
- else
- args.v3.ucLaneNum = 4;
-
- if (dig->linkb)
- args.v3.acConfig.ucLinkSel = 1;
- if (dig_encoder & 1)
- args.v3.acConfig.ucEncoderSel = 1;
-
- /* Select the PLL for the PHY
- * DP PHY should be clocked from external src if there is
- * one.
- */
- if (encoder->crtc) {
- struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
- pll_id = radeon_crtc->pll_id;
- }
-
- if (ASIC_IS_DCE5(rdev)) {
- /* On DCE5 DCPLL usually generates the DP ref clock */
- if (is_dp) {
- if (rdev->clock.dp_extclk)
- args.v4.acConfig.ucRefClkSource = ENCODER_REFCLK_SRC_EXTCLK;
- else
- args.v4.acConfig.ucRefClkSource = ENCODER_REFCLK_SRC_DCPLL;
- } else
- args.v4.acConfig.ucRefClkSource = pll_id;
- } else {
- /* On DCE4, if there is an external clock, it generates the DP ref clock */
- if (is_dp && rdev->clock.dp_extclk)
- args.v3.acConfig.ucRefClkSource = 2; /* external src */
- else
- args.v3.acConfig.ucRefClkSource = pll_id;
- }
-
- switch (radeon_encoder->encoder_id) {
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
- args.v3.acConfig.ucTransmitterSel = 0;
- break;
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
- args.v3.acConfig.ucTransmitterSel = 1;
- break;
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
- args.v3.acConfig.ucTransmitterSel = 2;
- break;
- }
-
- if (is_dp)
- args.v3.acConfig.fCoherentMode = 1; /* DP requires coherent */
- else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
- if (dig->coherent_mode)
- args.v3.acConfig.fCoherentMode = 1;
- if (radeon_encoder->pixel_clock > 165000)
- args.v3.acConfig.fDualLinkConnector = 1;
- }
- } else if (ASIC_IS_DCE32(rdev)) {
- args.v2.acConfig.ucEncoderSel = dig_encoder;
- if (dig->linkb)
- args.v2.acConfig.ucLinkSel = 1;
-
- switch (radeon_encoder->encoder_id) {
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
- args.v2.acConfig.ucTransmitterSel = 0;
- break;
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
- args.v2.acConfig.ucTransmitterSel = 1;
- break;
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
- args.v2.acConfig.ucTransmitterSel = 2;
- break;
- }
-
- if (is_dp) {
- args.v2.acConfig.fCoherentMode = 1;
- args.v2.acConfig.fDPConnector = 1;
- } else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
- if (dig->coherent_mode)
- args.v2.acConfig.fCoherentMode = 1;
- if (radeon_encoder->pixel_clock > 165000)
- args.v2.acConfig.fDualLinkConnector = 1;
- }
- } else {
- args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL;
-
- if (dig_encoder)
- args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER;
- else
- args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER;
-
- if ((rdev->flags & RADEON_IS_IGP) &&
- (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_UNIPHY)) {
- if (is_dp || (radeon_encoder->pixel_clock <= 165000)) {
- if (igp_lane_info & 0x1)
- args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_3;
- else if (igp_lane_info & 0x2)
- args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_4_7;
- else if (igp_lane_info & 0x4)
- args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_11;
- else if (igp_lane_info & 0x8)
- args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_12_15;
- } else {
- if (igp_lane_info & 0x3)
- args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_7;
- else if (igp_lane_info & 0xc)
- args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_15;
- }
- }
-
- if (dig->linkb)
- args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB;
- else
- args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA;
-
- if (is_dp)
- args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT;
- else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
- if (dig->coherent_mode)
- args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT;
- if (radeon_encoder->pixel_clock > 165000)
- args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_8LANE_LINK;
- }
- }
-
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
-}
-
-bool
-atombios_set_edp_panel_power(struct drm_connector *connector, int action)
-{
- struct radeon_connector *radeon_connector = to_radeon_connector(connector);
- struct drm_device *dev = radeon_connector->base.dev;
- struct radeon_device *rdev = dev->dev_private;
- union dig_transmitter_control args;
- int index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl);
- uint8_t frev, crev;
-
- if (connector->connector_type != DRM_MODE_CONNECTOR_eDP)
- goto done;
-
- if (!ASIC_IS_DCE4(rdev))
- goto done;
-
- if ((action != ATOM_TRANSMITTER_ACTION_POWER_ON) &&
- (action != ATOM_TRANSMITTER_ACTION_POWER_OFF))
- goto done;
-
- if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
- goto done;
-
- memset(&args, 0, sizeof(args));
-
- args.v1.ucAction = action;
-
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
-
- /* wait for the panel to power up */
- if (action == ATOM_TRANSMITTER_ACTION_POWER_ON) {
- int i;
-
- for (i = 0; i < 300; i++) {
- if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd))
- return true;
- mdelay(1);
- }
- return false;
- }
-done:
- return true;
-}
-
-union external_encoder_control {
- EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION v1;
- EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION_V3 v3;
-};
-
-static void
-atombios_external_encoder_setup(struct drm_encoder *encoder,
- struct drm_encoder *ext_encoder,
- int action)
-{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct radeon_encoder *ext_radeon_encoder = to_radeon_encoder(ext_encoder);
- union external_encoder_control args;
- struct drm_connector *connector;
- int index = GetIndexIntoMasterTable(COMMAND, ExternalEncoderControl);
- u8 frev, crev;
- int dp_clock = 0;
- int dp_lane_count = 0;
- int connector_object_id = 0;
- u32 ext_enum = (ext_radeon_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT;
- int bpc = 8;
-
- if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT)
- connector = radeon_get_connector_for_encoder_init(encoder);
- else
- connector = radeon_get_connector_for_encoder(encoder);
-
- if (connector) {
- struct radeon_connector *radeon_connector = to_radeon_connector(connector);
- struct radeon_connector_atom_dig *dig_connector =
- radeon_connector->con_priv;
-
- dp_clock = dig_connector->dp_clock;
- dp_lane_count = dig_connector->dp_lane_count;
- connector_object_id =
- (radeon_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
- bpc = connector->display_info.bpc;
- }
-
- memset(&args, 0, sizeof(args));
-
- if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
- return;
-
- switch (frev) {
- case 1:
- /* no params on frev 1 */
- break;
- case 2:
- switch (crev) {
- case 1:
- case 2:
- args.v1.sDigEncoder.ucAction = action;
- args.v1.sDigEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
- args.v1.sDigEncoder.ucEncoderMode = atombios_get_encoder_mode(encoder);
-
- if (args.v1.sDigEncoder.ucEncoderMode == ATOM_ENCODER_MODE_DP) {
- if (dp_clock == 270000)
- args.v1.sDigEncoder.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
- args.v1.sDigEncoder.ucLaneNum = dp_lane_count;
- } else if (radeon_encoder->pixel_clock > 165000)
- args.v1.sDigEncoder.ucLaneNum = 8;
- else
- args.v1.sDigEncoder.ucLaneNum = 4;
- break;
- case 3:
- args.v3.sExtEncoder.ucAction = action;
- if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT)
- args.v3.sExtEncoder.usConnectorId = cpu_to_le16(connector_object_id);
- else
- args.v3.sExtEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
- args.v3.sExtEncoder.ucEncoderMode = atombios_get_encoder_mode(encoder);
-
- if (args.v3.sExtEncoder.ucEncoderMode == ATOM_ENCODER_MODE_DP) {
- if (dp_clock == 270000)
- args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ;
- else if (dp_clock == 540000)
- args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_5_40GHZ;
- args.v3.sExtEncoder.ucLaneNum = dp_lane_count;
- } else if (radeon_encoder->pixel_clock > 165000)
- args.v3.sExtEncoder.ucLaneNum = 8;
- else
- args.v3.sExtEncoder.ucLaneNum = 4;
- switch (ext_enum) {
- case GRAPH_OBJECT_ENUM_ID1:
- args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER1;
- break;
- case GRAPH_OBJECT_ENUM_ID2:
- args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER2;
- break;
- case GRAPH_OBJECT_ENUM_ID3:
- args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER3;
- break;
- }
- switch (bpc) {
- case 0:
- args.v3.sExtEncoder.ucBitPerColor = PANEL_BPC_UNDEFINE;
- break;
- case 6:
- args.v3.sExtEncoder.ucBitPerColor = PANEL_6BIT_PER_COLOR;
- break;
- case 8:
- default:
- args.v3.sExtEncoder.ucBitPerColor = PANEL_8BIT_PER_COLOR;
- break;
- case 10:
- args.v3.sExtEncoder.ucBitPerColor = PANEL_10BIT_PER_COLOR;
- break;
- case 12:
- args.v3.sExtEncoder.ucBitPerColor = PANEL_12BIT_PER_COLOR;
- break;
- case 16:
- args.v3.sExtEncoder.ucBitPerColor = PANEL_16BIT_PER_COLOR;
- break;
- }
- break;
- default:
- DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
- return;
- }
- break;
- default:
- DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
- return;
- }
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
-}
-
-static void
-atombios_yuv_setup(struct drm_encoder *encoder, bool enable)
-{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
- ENABLE_YUV_PS_ALLOCATION args;
- int index = GetIndexIntoMasterTable(COMMAND, EnableYUV);
- uint32_t temp, reg;
-
- memset(&args, 0, sizeof(args));
-
- if (rdev->family >= CHIP_R600)
- reg = R600_BIOS_3_SCRATCH;
- else
- reg = RADEON_BIOS_3_SCRATCH;
-
- /* XXX: fix up scratch reg handling */
- temp = RREG32(reg);
- if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
- WREG32(reg, (ATOM_S3_TV1_ACTIVE |
- (radeon_crtc->crtc_id << 18)));
- else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
- WREG32(reg, (ATOM_S3_CV_ACTIVE | (radeon_crtc->crtc_id << 24)));
- else
- WREG32(reg, 0);
-
- if (enable)
- args.ucEnable = ATOM_ENABLE;
- args.ucCRTC = radeon_crtc->crtc_id;
-
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
-
- WREG32(reg, temp);
-}
-
-static void
-radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
-{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder);
- DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION args;
- int index = 0;
- bool is_dig = false;
- bool is_dce5_dac = false;
- bool is_dce5_dvo = false;
-
- memset(&args, 0, sizeof(args));
-
- DRM_DEBUG_KMS("encoder dpms %d to mode %d, devices %08x, active_devices %08x\n",
- radeon_encoder->encoder_id, mode, radeon_encoder->devices,
- radeon_encoder->active_device);
- switch (radeon_encoder->encoder_id) {
- case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
- index = GetIndexIntoMasterTable(COMMAND, TMDSAOutputControl);
- break;
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
- is_dig = true;
- break;
- case ENCODER_OBJECT_ID_INTERNAL_DVO1:
- case ENCODER_OBJECT_ID_INTERNAL_DDI:
- index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl);
- break;
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
- if (ASIC_IS_DCE5(rdev))
- is_dce5_dvo = true;
- else if (ASIC_IS_DCE3(rdev))
- is_dig = true;
- else
- index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl);
- break;
- case ENCODER_OBJECT_ID_INTERNAL_LVDS:
- index = GetIndexIntoMasterTable(COMMAND, LCD1OutputControl);
- break;
- case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
- if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
- index = GetIndexIntoMasterTable(COMMAND, LCD1OutputControl);
- else
- index = GetIndexIntoMasterTable(COMMAND, LVTMAOutputControl);
- break;
- case ENCODER_OBJECT_ID_INTERNAL_DAC1:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
- if (ASIC_IS_DCE5(rdev))
- is_dce5_dac = true;
- else {
- if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
- index = GetIndexIntoMasterTable(COMMAND, TV1OutputControl);
- else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
- index = GetIndexIntoMasterTable(COMMAND, CV1OutputControl);
- else
- index = GetIndexIntoMasterTable(COMMAND, DAC1OutputControl);
- }
- break;
- case ENCODER_OBJECT_ID_INTERNAL_DAC2:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
- if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
- index = GetIndexIntoMasterTable(COMMAND, TV1OutputControl);
- else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
- index = GetIndexIntoMasterTable(COMMAND, CV1OutputControl);
- else
- index = GetIndexIntoMasterTable(COMMAND, DAC2OutputControl);
- break;
- }
-
- if (is_dig) {
- switch (mode) {
- case DRM_MODE_DPMS_ON:
- /* some early dce3.2 boards have a bug in their transmitter control table */
- if ((rdev->family == CHIP_RV710) || (rdev->family == CHIP_RV730))
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
- else
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
- if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) {
- struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
-
- if (connector &&
- (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
- struct radeon_connector *radeon_connector = to_radeon_connector(connector);
- struct radeon_connector_atom_dig *radeon_dig_connector =
- radeon_connector->con_priv;
- atombios_set_edp_panel_power(connector,
- ATOM_TRANSMITTER_ACTION_POWER_ON);
- radeon_dig_connector->edp_on = true;
- }
- if (ASIC_IS_DCE4(rdev))
- atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0);
- radeon_dp_link_train(encoder, connector);
- if (ASIC_IS_DCE4(rdev))
- atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0);
- }
- if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0);
- break;
- case DRM_MODE_DPMS_STANDBY:
- case DRM_MODE_DPMS_SUSPEND:
- case DRM_MODE_DPMS_OFF:
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
- if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) {
- struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
-
- if (ASIC_IS_DCE4(rdev))
- atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0);
- if (connector &&
- (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
- struct radeon_connector *radeon_connector = to_radeon_connector(connector);
- struct radeon_connector_atom_dig *radeon_dig_connector =
- radeon_connector->con_priv;
- atombios_set_edp_panel_power(connector,
- ATOM_TRANSMITTER_ACTION_POWER_OFF);
- radeon_dig_connector->edp_on = false;
- }
- }
- if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0);
- break;
- }
- } else if (is_dce5_dac) {
- switch (mode) {
- case DRM_MODE_DPMS_ON:
- atombios_dac_setup(encoder, ATOM_ENABLE);
- break;
- case DRM_MODE_DPMS_STANDBY:
- case DRM_MODE_DPMS_SUSPEND:
- case DRM_MODE_DPMS_OFF:
- atombios_dac_setup(encoder, ATOM_DISABLE);
- break;
- }
- } else if (is_dce5_dvo) {
- switch (mode) {
- case DRM_MODE_DPMS_ON:
- atombios_dvo_setup(encoder, ATOM_ENABLE);
- break;
- case DRM_MODE_DPMS_STANDBY:
- case DRM_MODE_DPMS_SUSPEND:
- case DRM_MODE_DPMS_OFF:
- atombios_dvo_setup(encoder, ATOM_DISABLE);
- break;
- }
- } else {
- switch (mode) {
- case DRM_MODE_DPMS_ON:
- args.ucAction = ATOM_ENABLE;
- /* workaround for DVOOutputControl on some RS690 systems */
- if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DDI) {
- u32 reg = RREG32(RADEON_BIOS_3_SCRATCH);
- WREG32(RADEON_BIOS_3_SCRATCH, reg & ~ATOM_S3_DFP2I_ACTIVE);
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
- WREG32(RADEON_BIOS_3_SCRATCH, reg);
- } else
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
- if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
- args.ucAction = ATOM_LCD_BLON;
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
- }
- break;
- case DRM_MODE_DPMS_STANDBY:
- case DRM_MODE_DPMS_SUSPEND:
- case DRM_MODE_DPMS_OFF:
- args.ucAction = ATOM_DISABLE;
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
- if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
- args.ucAction = ATOM_LCD_BLOFF;
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
- }
- break;
- }
- }
-
- if (ext_encoder) {
- switch (mode) {
- case DRM_MODE_DPMS_ON:
- default:
- if (ASIC_IS_DCE41(rdev)) {
- atombios_external_encoder_setup(encoder, ext_encoder,
- EXTERNAL_ENCODER_ACTION_V3_ENABLE_OUTPUT);
- atombios_external_encoder_setup(encoder, ext_encoder,
- EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING_OFF);
- } else
- atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE);
- break;
- case DRM_MODE_DPMS_STANDBY:
- case DRM_MODE_DPMS_SUSPEND:
- case DRM_MODE_DPMS_OFF:
- if (ASIC_IS_DCE41(rdev)) {
- atombios_external_encoder_setup(encoder, ext_encoder,
- EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING);
- atombios_external_encoder_setup(encoder, ext_encoder,
- EXTERNAL_ENCODER_ACTION_V3_DISABLE_OUTPUT);
- } else
- atombios_external_encoder_setup(encoder, ext_encoder, ATOM_DISABLE);
- break;
- }
- }
-
- radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
-
-}
-
-union crtc_source_param {
- SELECT_CRTC_SOURCE_PS_ALLOCATION v1;
- SELECT_CRTC_SOURCE_PARAMETERS_V2 v2;
-};
-
-static void
-atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
-{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
- union crtc_source_param args;
- int index = GetIndexIntoMasterTable(COMMAND, SelectCRTC_Source);
- uint8_t frev, crev;
- struct radeon_encoder_atom_dig *dig;
-
- memset(&args, 0, sizeof(args));
-
- if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
- return;
-
- switch (frev) {
- case 1:
- switch (crev) {
- case 1:
- default:
- if (ASIC_IS_AVIVO(rdev))
- args.v1.ucCRTC = radeon_crtc->crtc_id;
- else {
- if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DAC1) {
- args.v1.ucCRTC = radeon_crtc->crtc_id;
- } else {
- args.v1.ucCRTC = radeon_crtc->crtc_id << 2;
- }
- }
- switch (radeon_encoder->encoder_id) {
- case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
- args.v1.ucDevice = ATOM_DEVICE_DFP1_INDEX;
- break;
- case ENCODER_OBJECT_ID_INTERNAL_LVDS:
- case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
- if (radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT)
- args.v1.ucDevice = ATOM_DEVICE_LCD1_INDEX;
- else
- args.v1.ucDevice = ATOM_DEVICE_DFP3_INDEX;
- break;
- case ENCODER_OBJECT_ID_INTERNAL_DVO1:
- case ENCODER_OBJECT_ID_INTERNAL_DDI:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
- args.v1.ucDevice = ATOM_DEVICE_DFP2_INDEX;
- break;
- case ENCODER_OBJECT_ID_INTERNAL_DAC1:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
- if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
- args.v1.ucDevice = ATOM_DEVICE_TV1_INDEX;
- else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
- args.v1.ucDevice = ATOM_DEVICE_CV_INDEX;
- else
- args.v1.ucDevice = ATOM_DEVICE_CRT1_INDEX;
- break;
- case ENCODER_OBJECT_ID_INTERNAL_DAC2:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
- if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
- args.v1.ucDevice = ATOM_DEVICE_TV1_INDEX;
- else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
- args.v1.ucDevice = ATOM_DEVICE_CV_INDEX;
- else
- args.v1.ucDevice = ATOM_DEVICE_CRT2_INDEX;
- break;
- }
- break;
- case 2:
- args.v2.ucCRTC = radeon_crtc->crtc_id;
- if (radeon_encoder_is_dp_bridge(encoder)) {
- struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
-
- if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
- args.v2.ucEncodeMode = ATOM_ENCODER_MODE_LVDS;
- else if (connector->connector_type == DRM_MODE_CONNECTOR_VGA)
- args.v2.ucEncodeMode = ATOM_ENCODER_MODE_CRT;
- else
- args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder);
- } else
- args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder);
- switch (radeon_encoder->encoder_id) {
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
- dig = radeon_encoder->enc_priv;
- switch (dig->dig_encoder) {
- case 0:
- args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID;
- break;
- case 1:
- args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
- break;
- case 2:
- args.v2.ucEncoderID = ASIC_INT_DIG3_ENCODER_ID;
- break;
- case 3:
- args.v2.ucEncoderID = ASIC_INT_DIG4_ENCODER_ID;
- break;
- case 4:
- args.v2.ucEncoderID = ASIC_INT_DIG5_ENCODER_ID;
- break;
- case 5:
- args.v2.ucEncoderID = ASIC_INT_DIG6_ENCODER_ID;
- break;
- }
- break;
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
- args.v2.ucEncoderID = ASIC_INT_DVO_ENCODER_ID;
- break;
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
- if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
- args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
- else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
- args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
- else
- args.v2.ucEncoderID = ASIC_INT_DAC1_ENCODER_ID;
- break;
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
- if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
- args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
- else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
- args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
- else
- args.v2.ucEncoderID = ASIC_INT_DAC2_ENCODER_ID;
- break;
- }
- break;
- }
- break;
- default:
- DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
- return;
- }
-
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
-
- /* update scratch regs with new routing */
- radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
-}
-
-static void
-atombios_apply_encoder_quirks(struct drm_encoder *encoder,
- struct drm_display_mode *mode)
-{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
-
- /* Funky macbooks */
- if ((dev->pdev->device == 0x71C5) &&
- (dev->pdev->subsystem_vendor == 0x106b) &&
- (dev->pdev->subsystem_device == 0x0080)) {
- if (radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) {
- uint32_t lvtma_bit_depth_control = RREG32(AVIVO_LVTMA_BIT_DEPTH_CONTROL);
-
- lvtma_bit_depth_control &= ~AVIVO_LVTMA_BIT_DEPTH_CONTROL_TRUNCATE_EN;
- lvtma_bit_depth_control &= ~AVIVO_LVTMA_BIT_DEPTH_CONTROL_SPATIAL_DITHER_EN;
-
- WREG32(AVIVO_LVTMA_BIT_DEPTH_CONTROL, lvtma_bit_depth_control);
- }
- }
-
- /* set scaler clears this on some chips */
- if (ASIC_IS_AVIVO(rdev) &&
- (!(radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)))) {
- if (ASIC_IS_DCE4(rdev)) {
- if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset,
- EVERGREEN_INTERLEAVE_EN);
- else
- WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
- } else {
- if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset,
- AVIVO_D1MODE_INTERLEAVE_EN);
- else
- WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
- }
- }
-}
-
-static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder)
-{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct drm_encoder *test_encoder;
- struct radeon_encoder_atom_dig *dig;
- uint32_t dig_enc_in_use = 0;
-
- /* DCE4/5 */
- if (ASIC_IS_DCE4(rdev)) {
- dig = radeon_encoder->enc_priv;
- if (ASIC_IS_DCE41(rdev)) {
- /* ontario follows DCE4 */
- if (rdev->family == CHIP_PALM) {
- if (dig->linkb)
- return 1;
- else
- return 0;
- } else
- /* llano follows DCE3.2 */
- return radeon_crtc->crtc_id;
- } else {
- switch (radeon_encoder->encoder_id) {
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
- if (dig->linkb)
- return 1;
- else
- return 0;
- break;
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
- if (dig->linkb)
- return 3;
- else
- return 2;
- break;
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
- if (dig->linkb)
- return 5;
- else
- return 4;
- break;
- }
- }
- }
-
- /* on DCE32 and encoder can driver any block so just crtc id */
- if (ASIC_IS_DCE32(rdev)) {
- return radeon_crtc->crtc_id;
- }
-
- /* on DCE3 - LVTMA can only be driven by DIGB */
- list_for_each_entry(test_encoder, &dev->mode_config.encoder_list, head) {
- struct radeon_encoder *radeon_test_encoder;
-
- if (encoder == test_encoder)
- continue;
-
- if (!radeon_encoder_is_digital(test_encoder))
- continue;
-
- radeon_test_encoder = to_radeon_encoder(test_encoder);
- dig = radeon_test_encoder->enc_priv;
-
- if (dig->dig_encoder >= 0)
- dig_enc_in_use |= (1 << dig->dig_encoder);
- }
-
- if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA) {
- if (dig_enc_in_use & 0x2)
- DRM_ERROR("LVDS required digital encoder 2 but it was in use - stealing\n");
- return 1;
- }
- if (!(dig_enc_in_use & 1))
- return 0;
- return 1;
-}
-
-/* This only needs to be called once at startup */
-void
-radeon_atom_encoder_init(struct radeon_device *rdev)
-{
- struct drm_device *dev = rdev->ddev;
- struct drm_encoder *encoder;
-
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder);
-
- switch (radeon_encoder->encoder_id) {
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT, 0, 0);
- break;
- default:
- break;
- }
-
- if (ext_encoder && ASIC_IS_DCE41(rdev))
- atombios_external_encoder_setup(encoder, ext_encoder,
- EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT);
- }
-}
-
-static void
-radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder);
-
- radeon_encoder->pixel_clock = adjusted_mode->clock;
-
- if (ASIC_IS_AVIVO(rdev) && !ASIC_IS_DCE4(rdev)) {
- if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT))
- atombios_yuv_setup(encoder, true);
- else
- atombios_yuv_setup(encoder, false);
- }
-
- switch (radeon_encoder->encoder_id) {
- case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
- case ENCODER_OBJECT_ID_INTERNAL_LVDS:
- case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
- atombios_digital_setup(encoder, PANEL_ENCODER_ACTION_ENABLE);
- break;
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
- if (ASIC_IS_DCE4(rdev)) {
- /* disable the transmitter */
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
- /* setup and enable the encoder */
- atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0);
-
- /* enable the transmitter */
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
- } else {
- /* disable the encoder and transmitter */
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
- atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0);
-
- /* setup and enable the encoder and transmitter */
- atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0);
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0);
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
- }
- break;
- case ENCODER_OBJECT_ID_INTERNAL_DDI:
- case ENCODER_OBJECT_ID_INTERNAL_DVO1:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
- atombios_dvo_setup(encoder, ATOM_ENABLE);
- break;
- case ENCODER_OBJECT_ID_INTERNAL_DAC1:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
- case ENCODER_OBJECT_ID_INTERNAL_DAC2:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
- atombios_dac_setup(encoder, ATOM_ENABLE);
- if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) {
- if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
- atombios_tv_setup(encoder, ATOM_ENABLE);
- else
- atombios_tv_setup(encoder, ATOM_DISABLE);
- }
- break;
- }
-
- if (ext_encoder) {
- if (ASIC_IS_DCE41(rdev))
- atombios_external_encoder_setup(encoder, ext_encoder,
- EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP);
- else
- atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE);
- }
-
- atombios_apply_encoder_quirks(encoder, adjusted_mode);
-
- if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
- r600_hdmi_enable(encoder);
- r600_hdmi_setmode(encoder, adjusted_mode);
- }
-}
-
-static bool
-atombios_dac_load_detect(struct drm_encoder *encoder, struct drm_connector *connector)
-{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct radeon_connector *radeon_connector = to_radeon_connector(connector);
-
- if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT |
- ATOM_DEVICE_CV_SUPPORT |
- ATOM_DEVICE_CRT_SUPPORT)) {
- DAC_LOAD_DETECTION_PS_ALLOCATION args;
- int index = GetIndexIntoMasterTable(COMMAND, DAC_LoadDetection);
- uint8_t frev, crev;
-
- memset(&args, 0, sizeof(args));
-
- if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
- return false;
-
- args.sDacload.ucMisc = 0;
-
- if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DAC1) ||
- (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1))
- args.sDacload.ucDacType = ATOM_DAC_A;
- else
- args.sDacload.ucDacType = ATOM_DAC_B;
-
- if (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT)
- args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CRT1_SUPPORT);
- else if (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT)
- args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CRT2_SUPPORT);
- else if (radeon_connector->devices & ATOM_DEVICE_CV_SUPPORT) {
- args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CV_SUPPORT);
- if (crev >= 3)
- args.sDacload.ucMisc = DAC_LOAD_MISC_YPrPb;
- } else if (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT) {
- args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_TV1_SUPPORT);
- if (crev >= 3)
- args.sDacload.ucMisc = DAC_LOAD_MISC_YPrPb;
- }
-
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
-
- return true;
- } else
- return false;
-}
-
-static enum drm_connector_status
-radeon_atom_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
-{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct radeon_connector *radeon_connector = to_radeon_connector(connector);
- uint32_t bios_0_scratch;
-
- if (!atombios_dac_load_detect(encoder, connector)) {
- DRM_DEBUG_KMS("detect returned false \n");
- return connector_status_unknown;
- }
-
- if (rdev->family >= CHIP_R600)
- bios_0_scratch = RREG32(R600_BIOS_0_SCRATCH);
- else
- bios_0_scratch = RREG32(RADEON_BIOS_0_SCRATCH);
-
- DRM_DEBUG_KMS("Bios 0 scratch %x %08x\n", bios_0_scratch, radeon_encoder->devices);
- if (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT) {
- if (bios_0_scratch & ATOM_S0_CRT1_MASK)
- return connector_status_connected;
- }
- if (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT) {
- if (bios_0_scratch & ATOM_S0_CRT2_MASK)
- return connector_status_connected;
- }
- if (radeon_connector->devices & ATOM_DEVICE_CV_SUPPORT) {
- if (bios_0_scratch & (ATOM_S0_CV_MASK|ATOM_S0_CV_MASK_A))
- return connector_status_connected;
- }
- if (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT) {
- if (bios_0_scratch & (ATOM_S0_TV1_COMPOSITE | ATOM_S0_TV1_COMPOSITE_A))
- return connector_status_connected; /* CTV */
- else if (bios_0_scratch & (ATOM_S0_TV1_SVIDEO | ATOM_S0_TV1_SVIDEO_A))
- return connector_status_connected; /* STV */
- }
- return connector_status_disconnected;
-}
-
-static enum drm_connector_status
-radeon_atom_dig_detect(struct drm_encoder *encoder, struct drm_connector *connector)
-{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct radeon_connector *radeon_connector = to_radeon_connector(connector);
- struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder);
- u32 bios_0_scratch;
-
- if (!ASIC_IS_DCE4(rdev))
- return connector_status_unknown;
-
- if (!ext_encoder)
- return connector_status_unknown;
-
- if ((radeon_connector->devices & ATOM_DEVICE_CRT_SUPPORT) == 0)
- return connector_status_unknown;
-
- /* load detect on the dp bridge */
- atombios_external_encoder_setup(encoder, ext_encoder,
- EXTERNAL_ENCODER_ACTION_V3_DACLOAD_DETECTION);
-
- bios_0_scratch = RREG32(R600_BIOS_0_SCRATCH);
-
- DRM_DEBUG_KMS("Bios 0 scratch %x %08x\n", bios_0_scratch, radeon_encoder->devices);
- if (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT) {
- if (bios_0_scratch & ATOM_S0_CRT1_MASK)
- return connector_status_connected;
- }
- if (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT) {
- if (bios_0_scratch & ATOM_S0_CRT2_MASK)
- return connector_status_connected;
- }
- if (radeon_connector->devices & ATOM_DEVICE_CV_SUPPORT) {
- if (bios_0_scratch & (ATOM_S0_CV_MASK|ATOM_S0_CV_MASK_A))
- return connector_status_connected;
- }
- if (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT) {
- if (bios_0_scratch & (ATOM_S0_TV1_COMPOSITE | ATOM_S0_TV1_COMPOSITE_A))
- return connector_status_connected; /* CTV */
- else if (bios_0_scratch & (ATOM_S0_TV1_SVIDEO | ATOM_S0_TV1_SVIDEO_A))
- return connector_status_connected; /* STV */
- }
- return connector_status_disconnected;
-}
-
-void
-radeon_atom_ext_encoder_setup_ddc(struct drm_encoder *encoder)
-{
- struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder);
-
- if (ext_encoder)
- /* ddc_setup on the dp bridge */
- atombios_external_encoder_setup(encoder, ext_encoder,
- EXTERNAL_ENCODER_ACTION_V3_DDC_SETUP);
-
-}
-
-static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
-{
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
-
- if ((radeon_encoder->active_device &
- (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
- radeon_encoder_is_dp_bridge(encoder)) {
- struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- if (dig)
- dig->dig_encoder = radeon_atom_pick_dig_encoder(encoder);
- }
-
- radeon_atom_output_lock(encoder, true);
- radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
-
- if (connector) {
- struct radeon_connector *radeon_connector = to_radeon_connector(connector);
-
- /* select the clock/data port if it uses a router */
- if (radeon_connector->router.cd_valid)
- radeon_router_select_cd_port(radeon_connector);
-
- /* turn eDP panel on for mode set */
- if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
- atombios_set_edp_panel_power(connector,
- ATOM_TRANSMITTER_ACTION_POWER_ON);
- }
-
- /* this is needed for the pll/ss setup to work correctly in some cases */
- atombios_set_encoder_crtc_source(encoder);
-}
-
-static void radeon_atom_encoder_commit(struct drm_encoder *encoder)
-{
- radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
- radeon_atom_output_lock(encoder, false);
-}
-
-static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
-{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct radeon_encoder_atom_dig *dig;
-
- /* check for pre-DCE3 cards with shared encoders;
- * can't really use the links individually, so don't disable
- * the encoder if it's in use by another connector
- */
- if (!ASIC_IS_DCE3(rdev)) {
- struct drm_encoder *other_encoder;
- struct radeon_encoder *other_radeon_encoder;
-
- list_for_each_entry(other_encoder, &dev->mode_config.encoder_list, head) {
- other_radeon_encoder = to_radeon_encoder(other_encoder);
- if ((radeon_encoder->encoder_id == other_radeon_encoder->encoder_id) &&
- drm_helper_encoder_in_use(other_encoder))
- goto disable_done;
- }
- }
-
- radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
-
- switch (radeon_encoder->encoder_id) {
- case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
- case ENCODER_OBJECT_ID_INTERNAL_LVDS:
- case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
- atombios_digital_setup(encoder, PANEL_ENCODER_ACTION_DISABLE);
- break;
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
- if (ASIC_IS_DCE4(rdev))
- /* disable the transmitter */
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
- else {
- /* disable the encoder and transmitter */
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
- atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0);
- }
- break;
- case ENCODER_OBJECT_ID_INTERNAL_DDI:
- case ENCODER_OBJECT_ID_INTERNAL_DVO1:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
- atombios_dvo_setup(encoder, ATOM_DISABLE);
- break;
- case ENCODER_OBJECT_ID_INTERNAL_DAC1:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
- case ENCODER_OBJECT_ID_INTERNAL_DAC2:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
- atombios_dac_setup(encoder, ATOM_DISABLE);
- if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
- atombios_tv_setup(encoder, ATOM_DISABLE);
- break;
- }
-
-disable_done:
- if (radeon_encoder_is_digital(encoder)) {
- if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
- r600_hdmi_disable(encoder);
- dig = radeon_encoder->enc_priv;
- dig->dig_encoder = -1;
- }
- radeon_encoder->active_device = 0;
-}
-
-/* these are handled by the primary encoders */
-static void radeon_atom_ext_prepare(struct drm_encoder *encoder)
-{
-
-}
-
-static void radeon_atom_ext_commit(struct drm_encoder *encoder)
-{
-
-}
-
-static void
-radeon_atom_ext_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
-
-}
-
-static void radeon_atom_ext_disable(struct drm_encoder *encoder)
-{
-
-}
-
-static void
-radeon_atom_ext_dpms(struct drm_encoder *encoder, int mode)
-{
-
-}
-
-static bool radeon_atom_ext_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- return true;
-}
-
-static const struct drm_encoder_helper_funcs radeon_atom_ext_helper_funcs = {
- .dpms = radeon_atom_ext_dpms,
- .mode_fixup = radeon_atom_ext_mode_fixup,
- .prepare = radeon_atom_ext_prepare,
- .mode_set = radeon_atom_ext_mode_set,
- .commit = radeon_atom_ext_commit,
- .disable = radeon_atom_ext_disable,
- /* no detect for TMDS/LVDS yet */
-};
-
-static const struct drm_encoder_helper_funcs radeon_atom_dig_helper_funcs = {
- .dpms = radeon_atom_encoder_dpms,
- .mode_fixup = radeon_atom_mode_fixup,
- .prepare = radeon_atom_encoder_prepare,
- .mode_set = radeon_atom_encoder_mode_set,
- .commit = radeon_atom_encoder_commit,
- .disable = radeon_atom_encoder_disable,
- .detect = radeon_atom_dig_detect,
-};
-
-static const struct drm_encoder_helper_funcs radeon_atom_dac_helper_funcs = {
- .dpms = radeon_atom_encoder_dpms,
- .mode_fixup = radeon_atom_mode_fixup,
- .prepare = radeon_atom_encoder_prepare,
- .mode_set = radeon_atom_encoder_mode_set,
- .commit = radeon_atom_encoder_commit,
- .detect = radeon_atom_dac_detect,
-};
-
-void radeon_enc_destroy(struct drm_encoder *encoder)
-{
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- kfree(radeon_encoder->enc_priv);
- drm_encoder_cleanup(encoder);
- kfree(radeon_encoder);
-}
-
-static const struct drm_encoder_funcs radeon_atom_enc_funcs = {
- .destroy = radeon_enc_destroy,
-};
-
-struct radeon_encoder_atom_dac *
-radeon_atombios_set_dac_info(struct radeon_encoder *radeon_encoder)
-{
- struct drm_device *dev = radeon_encoder->base.dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_encoder_atom_dac *dac = kzalloc(sizeof(struct radeon_encoder_atom_dac), GFP_KERNEL);
-
- if (!dac)
- return NULL;
-
- dac->tv_std = radeon_atombios_get_tv_info(rdev);
- return dac;
-}
-
-struct radeon_encoder_atom_dig *
-radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder)
-{
- int encoder_enum = (radeon_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT;
- struct radeon_encoder_atom_dig *dig = kzalloc(sizeof(struct radeon_encoder_atom_dig), GFP_KERNEL);
-
- if (!dig)
- return NULL;
-
- /* coherent mode by default */
- dig->coherent_mode = true;
- dig->dig_encoder = -1;
-
- if (encoder_enum == 2)
- dig->linkb = true;
- else
- dig->linkb = false;
-
- return dig;
-}
-
-void
-radeon_add_atom_encoder(struct drm_device *dev,
- uint32_t encoder_enum,
- uint32_t supported_device,
- u16 caps)
-{
- struct radeon_device *rdev = dev->dev_private;
- struct drm_encoder *encoder;
- struct radeon_encoder *radeon_encoder;
-
- /* see if we already added it */
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- radeon_encoder = to_radeon_encoder(encoder);
- if (radeon_encoder->encoder_enum == encoder_enum) {
- radeon_encoder->devices |= supported_device;
- return;
- }
-
- }
-
- /* add a new one */
- radeon_encoder = kzalloc(sizeof(struct radeon_encoder), GFP_KERNEL);
- if (!radeon_encoder)
- return;
-
- encoder = &radeon_encoder->base;
- switch (rdev->num_crtc) {
- case 1:
- encoder->possible_crtcs = 0x1;
- break;
- case 2:
- default:
- encoder->possible_crtcs = 0x3;
- break;
- case 4:
- encoder->possible_crtcs = 0xf;
- break;
- case 6:
- encoder->possible_crtcs = 0x3f;
- break;
- }
-
- radeon_encoder->enc_priv = NULL;
-
- radeon_encoder->encoder_enum = encoder_enum;
- radeon_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
- radeon_encoder->devices = supported_device;
- radeon_encoder->rmx_type = RMX_OFF;
- radeon_encoder->underscan_type = UNDERSCAN_OFF;
- radeon_encoder->is_ext_encoder = false;
- radeon_encoder->caps = caps;
-
- switch (radeon_encoder->encoder_id) {
- case ENCODER_OBJECT_ID_INTERNAL_LVDS:
- case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
- case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
- if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
- radeon_encoder->rmx_type = RMX_FULL;
- drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS);
- radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder);
- } else {
- drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS);
- radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder);
- }
- drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs);
- break;
- case ENCODER_OBJECT_ID_INTERNAL_DAC1:
- drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC);
- radeon_encoder->enc_priv = radeon_atombios_set_dac_info(radeon_encoder);
- drm_encoder_helper_add(encoder, &radeon_atom_dac_helper_funcs);
- break;
- case ENCODER_OBJECT_ID_INTERNAL_DAC2:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
- drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TVDAC);
- radeon_encoder->enc_priv = radeon_atombios_set_dac_info(radeon_encoder);
- drm_encoder_helper_add(encoder, &radeon_atom_dac_helper_funcs);
- break;
- case ENCODER_OBJECT_ID_INTERNAL_DVO1:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
- case ENCODER_OBJECT_ID_INTERNAL_DDI:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
- if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
- radeon_encoder->rmx_type = RMX_FULL;
- drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS);
- radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder);
- } else if (radeon_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
- drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC);
- radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder);
- } else {
- drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS);
- radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder);
- }
- drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs);
- break;
- case ENCODER_OBJECT_ID_SI170B:
- case ENCODER_OBJECT_ID_CH7303:
- case ENCODER_OBJECT_ID_EXTERNAL_SDVOA:
- case ENCODER_OBJECT_ID_EXTERNAL_SDVOB:
- case ENCODER_OBJECT_ID_TITFP513:
- case ENCODER_OBJECT_ID_VT1623:
- case ENCODER_OBJECT_ID_HDMI_SI1930:
- case ENCODER_OBJECT_ID_TRAVIS:
- case ENCODER_OBJECT_ID_NUTMEG:
- /* these are handled by the primary encoders */
- radeon_encoder->is_ext_encoder = true;
- if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
- drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS);
- else if (radeon_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
- drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC);
- else
- drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS);
- drm_encoder_helper_add(encoder, &radeon_atom_ext_helper_funcs);
- break;
- }
-}
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 3475a09f946..76ec0e9ed8a 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -263,7 +263,7 @@ retry:
*/
if (seq == rdev->fence_drv.last_seq && radeon_gpu_is_lockup(rdev)) {
/* good news we believe it's a lockup */
- WARN(1, "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n",
+ printk(KERN_WARNING "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n",
fence->seq, seq);
/* FIXME: what should we do ? marking everyone
* as signaled for now
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index a533f52fd16..ba7ab79e12c 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -49,27 +49,27 @@ int radeon_gart_table_ram_alloc(struct radeon_device *rdev)
rdev->gart.table_size >> PAGE_SHIFT);
}
#endif
- rdev->gart.table.ram.ptr = ptr;
- memset((void *)rdev->gart.table.ram.ptr, 0, rdev->gart.table_size);
+ rdev->gart.ptr = ptr;
+ memset((void *)rdev->gart.ptr, 0, rdev->gart.table_size);
return 0;
}
void radeon_gart_table_ram_free(struct radeon_device *rdev)
{
- if (rdev->gart.table.ram.ptr == NULL) {
+ if (rdev->gart.ptr == NULL) {
return;
}
#ifdef CONFIG_X86
if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480 ||
rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
- set_memory_wb((unsigned long)rdev->gart.table.ram.ptr,
+ set_memory_wb((unsigned long)rdev->gart.ptr,
rdev->gart.table_size >> PAGE_SHIFT);
}
#endif
pci_free_consistent(rdev->pdev, rdev->gart.table_size,
- (void *)rdev->gart.table.ram.ptr,
+ (void *)rdev->gart.ptr,
rdev->gart.table_addr);
- rdev->gart.table.ram.ptr = NULL;
+ rdev->gart.ptr = NULL;
rdev->gart.table_addr = 0;
}
@@ -77,10 +77,10 @@ int radeon_gart_table_vram_alloc(struct radeon_device *rdev)
{
int r;
- if (rdev->gart.table.vram.robj == NULL) {
+ if (rdev->gart.robj == NULL) {
r = radeon_bo_create(rdev, rdev->gart.table_size,
PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
- &rdev->gart.table.vram.robj);
+ &rdev->gart.robj);
if (r) {
return r;
}
@@ -93,38 +93,46 @@ int radeon_gart_table_vram_pin(struct radeon_device *rdev)
uint64_t gpu_addr;
int r;
- r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
+ r = radeon_bo_reserve(rdev->gart.robj, false);
if (unlikely(r != 0))
return r;
- r = radeon_bo_pin(rdev->gart.table.vram.robj,
+ r = radeon_bo_pin(rdev->gart.robj,
RADEON_GEM_DOMAIN_VRAM, &gpu_addr);
if (r) {
- radeon_bo_unreserve(rdev->gart.table.vram.robj);
+ radeon_bo_unreserve(rdev->gart.robj);
return r;
}
- r = radeon_bo_kmap(rdev->gart.table.vram.robj,
- (void **)&rdev->gart.table.vram.ptr);
+ r = radeon_bo_kmap(rdev->gart.robj, &rdev->gart.ptr);
if (r)
- radeon_bo_unpin(rdev->gart.table.vram.robj);
- radeon_bo_unreserve(rdev->gart.table.vram.robj);
+ radeon_bo_unpin(rdev->gart.robj);
+ radeon_bo_unreserve(rdev->gart.robj);
rdev->gart.table_addr = gpu_addr;
return r;
}
-void radeon_gart_table_vram_free(struct radeon_device *rdev)
+void radeon_gart_table_vram_unpin(struct radeon_device *rdev)
{
int r;
- if (rdev->gart.table.vram.robj == NULL) {
+ if (rdev->gart.robj == NULL) {
return;
}
- r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
+ r = radeon_bo_reserve(rdev->gart.robj, false);
if (likely(r == 0)) {
- radeon_bo_kunmap(rdev->gart.table.vram.robj);
- radeon_bo_unpin(rdev->gart.table.vram.robj);
- radeon_bo_unreserve(rdev->gart.table.vram.robj);
+ radeon_bo_kunmap(rdev->gart.robj);
+ radeon_bo_unpin(rdev->gart.robj);
+ radeon_bo_unreserve(rdev->gart.robj);
+ rdev->gart.ptr = NULL;
}
- radeon_bo_unref(&rdev->gart.table.vram.robj);
+}
+
+void radeon_gart_table_vram_free(struct radeon_device *rdev)
+{
+ if (rdev->gart.robj == NULL) {
+ return;
+ }
+ radeon_gart_table_vram_unpin(rdev);
+ radeon_bo_unref(&rdev->gart.robj);
}
@@ -142,7 +150,7 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
u64 page_base;
if (!rdev->gart.ready) {
- WARN(1, "trying to unbind memory to unitialized GART !\n");
+ WARN(1, "trying to unbind memory from uninitialized GART !\n");
return;
}
t = offset / RADEON_GPU_PAGE_SIZE;
@@ -151,12 +159,14 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
if (rdev->gart.pages[p]) {
if (!rdev->gart.ttm_alloced[p])
pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p],
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
rdev->gart.pages[p] = NULL;
rdev->gart.pages_addr[p] = rdev->dummy_page.addr;
page_base = rdev->gart.pages_addr[p];
for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
- radeon_gart_set_page(rdev, t, page_base);
+ if (rdev->gart.ptr) {
+ radeon_gart_set_page(rdev, t, page_base);
+ }
page_base += RADEON_GPU_PAGE_SIZE;
}
}
@@ -174,7 +184,7 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
int i, j;
if (!rdev->gart.ready) {
- WARN(1, "trying to bind memory to unitialized GART !\n");
+ WARN(1, "trying to bind memory to uninitialized GART !\n");
return -EINVAL;
}
t = offset / RADEON_GPU_PAGE_SIZE;
@@ -199,10 +209,12 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
}
}
rdev->gart.pages[p] = pagelist[i];
- page_base = rdev->gart.pages_addr[p];
- for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
- radeon_gart_set_page(rdev, t, page_base);
- page_base += RADEON_GPU_PAGE_SIZE;
+ if (rdev->gart.ptr) {
+ page_base = rdev->gart.pages_addr[p];
+ for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
+ radeon_gart_set_page(rdev, t, page_base);
+ page_base += RADEON_GPU_PAGE_SIZE;
+ }
}
}
mb();
@@ -215,6 +227,9 @@ void radeon_gart_restore(struct radeon_device *rdev)
int i, j, t;
u64 page_base;
+ if (!rdev->gart.ptr) {
+ return;
+ }
for (i = 0, t = 0; i < rdev->gart.num_cpu_pages; i++) {
page_base = rdev->gart.pages_addr[i];
for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index 6c111c1fa3f..7bb1b079f48 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -23,6 +23,8 @@
* Authors: Dave Airlie
* Alex Deucher
*/
+#include <linux/export.h>
+
#include "drmP.h"
#include "radeon_drm.h"
#include "radeon.h"
@@ -32,7 +34,7 @@
* radeon_ddc_probe
*
*/
-bool radeon_ddc_probe(struct radeon_connector *radeon_connector, bool requires_extended_probe)
+bool radeon_ddc_probe(struct radeon_connector *radeon_connector)
{
u8 out = 0x0;
u8 buf[8];
@@ -47,15 +49,11 @@ bool radeon_ddc_probe(struct radeon_connector *radeon_connector, bool requires_e
{
.addr = 0x50,
.flags = I2C_M_RD,
- .len = 1,
+ .len = 8,
.buf = buf,
}
};
- /* Read 8 bytes from i2c for extended probe of EDID header */
- if (requires_extended_probe)
- msgs[1].len = 8;
-
/* on hw with routers, select right port */
if (radeon_connector->router.ddc_valid)
radeon_router_select_ddc_port(radeon_connector);
@@ -64,25 +62,24 @@ bool radeon_ddc_probe(struct radeon_connector *radeon_connector, bool requires_e
if (ret != 2)
/* Couldn't find an accessible DDC on this connector */
return false;
- if (requires_extended_probe) {
- /* Probe also for valid EDID header
- * EDID header starts with:
- * 0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x00.
- * Only the first 6 bytes must be valid as
- * drm_edid_block_valid() can fix the last 2 bytes */
- if (drm_edid_header_is_valid(buf) < 6) {
- /* Couldn't find an accessible EDID on this
- * connector */
- return false;
- }
+ /* Probe also for valid EDID header
+ * EDID header starts with:
+ * 0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x00.
+ * Only the first 6 bytes must be valid as
+ * drm_edid_block_valid() can fix the last 2 bytes */
+ if (drm_edid_header_is_valid(buf) < 6) {
+ /* Couldn't find an accessible EDID on this
+ * connector */
+ return false;
}
return true;
}
/* bit banging i2c */
-static void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state)
+static int pre_xfer(struct i2c_adapter *i2c_adap)
{
+ struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
struct radeon_device *rdev = i2c->dev->dev_private;
struct radeon_i2c_bus_rec *rec = &i2c->rec;
uint32_t temp;
@@ -137,19 +134,30 @@ static void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state)
WREG32(rec->en_data_reg, temp);
/* mask the gpio pins for software use */
- temp = RREG32(rec->mask_clk_reg);
- if (lock_state)
- temp |= rec->mask_clk_mask;
- else
- temp &= ~rec->mask_clk_mask;
+ temp = RREG32(rec->mask_clk_reg) | rec->mask_clk_mask;
WREG32(rec->mask_clk_reg, temp);
temp = RREG32(rec->mask_clk_reg);
+ temp = RREG32(rec->mask_data_reg) | rec->mask_data_mask;
+ WREG32(rec->mask_data_reg, temp);
temp = RREG32(rec->mask_data_reg);
- if (lock_state)
- temp |= rec->mask_data_mask;
- else
- temp &= ~rec->mask_data_mask;
+
+ return 0;
+}
+
+static void post_xfer(struct i2c_adapter *i2c_adap)
+{
+ struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
+ struct radeon_device *rdev = i2c->dev->dev_private;
+ struct radeon_i2c_bus_rec *rec = &i2c->rec;
+ uint32_t temp;
+
+ /* unmask the gpio pins for software use */
+ temp = RREG32(rec->mask_clk_reg) & ~rec->mask_clk_mask;
+ WREG32(rec->mask_clk_reg, temp);
+ temp = RREG32(rec->mask_clk_reg);
+
+ temp = RREG32(rec->mask_data_reg) & ~rec->mask_data_mask;
WREG32(rec->mask_data_reg, temp);
temp = RREG32(rec->mask_data_reg);
}
@@ -209,22 +217,6 @@ static void set_data(void *i2c_priv, int data)
WREG32(rec->en_data_reg, val);
}
-static int pre_xfer(struct i2c_adapter *i2c_adap)
-{
- struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
-
- radeon_i2c_do_lock(i2c, 1);
-
- return 0;
-}
-
-static void post_xfer(struct i2c_adapter *i2c_adap)
-{
- struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
-
- radeon_i2c_do_lock(i2c, 0);
-}
-
/* hw i2c */
static u32 radeon_get_i2c_prescale(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
index 465746bd51b..00da38424df 100644
--- a/drivers/gpu/drm/radeon/radeon_irq.c
+++ b/drivers/gpu/drm/radeon/radeon_irq.c
@@ -129,7 +129,7 @@ void radeon_disable_vblank(struct drm_device *dev, int crtc)
}
}
-static inline u32 radeon_acknowledge_irqs(drm_radeon_private_t *dev_priv, u32 *r500_disp_int)
+static u32 radeon_acknowledge_irqs(drm_radeon_private_t *dev_priv, u32 *r500_disp_int)
{
u32 irqs = RADEON_READ(RADEON_GEN_INT_STATUS);
u32 irq_mask = RADEON_SW_INT_TEST;
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 9ec830c77af..8f86aeb2669 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -67,10 +67,10 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
/* Disable *all* interrupts */
rdev->irq.sw_int = false;
rdev->irq.gui_idle = false;
- for (i = 0; i < rdev->num_crtc; i++)
- rdev->irq.crtc_vblank_int[i] = false;
- for (i = 0; i < 6; i++) {
+ for (i = 0; i < RADEON_MAX_HPD_PINS; i++)
rdev->irq.hpd[i] = false;
+ for (i = 0; i < RADEON_MAX_CRTCS; i++) {
+ rdev->irq.crtc_vblank_int[i] = false;
rdev->irq.pflip[i] = false;
}
radeon_irq_set(rdev);
@@ -99,15 +99,55 @@ void radeon_driver_irq_uninstall_kms(struct drm_device *dev)
/* Disable *all* interrupts */
rdev->irq.sw_int = false;
rdev->irq.gui_idle = false;
- for (i = 0; i < rdev->num_crtc; i++)
- rdev->irq.crtc_vblank_int[i] = false;
- for (i = 0; i < 6; i++) {
+ for (i = 0; i < RADEON_MAX_HPD_PINS; i++)
rdev->irq.hpd[i] = false;
+ for (i = 0; i < RADEON_MAX_CRTCS; i++) {
+ rdev->irq.crtc_vblank_int[i] = false;
rdev->irq.pflip[i] = false;
}
radeon_irq_set(rdev);
}
+static bool radeon_msi_ok(struct radeon_device *rdev)
+{
+ /* RV370/RV380 was first asic with MSI support */
+ if (rdev->family < CHIP_RV380)
+ return false;
+
+ /* MSIs don't work on AGP */
+ if (rdev->flags & RADEON_IS_AGP)
+ return false;
+
+ /* force MSI on */
+ if (radeon_msi == 1)
+ return true;
+ else if (radeon_msi == 0)
+ return false;
+
+ /* Quirks */
+ /* HP RS690 only seems to work with MSIs. */
+ if ((rdev->pdev->device == 0x791f) &&
+ (rdev->pdev->subsystem_vendor == 0x103c) &&
+ (rdev->pdev->subsystem_device == 0x30c2))
+ return true;
+
+ /* Dell RS690 only seems to work with MSIs. */
+ if ((rdev->pdev->device == 0x791f) &&
+ (rdev->pdev->subsystem_vendor == 0x1028) &&
+ (rdev->pdev->subsystem_device == 0x01fd))
+ return true;
+
+ if (rdev->flags & RADEON_IS_IGP) {
+ /* APUs work fine with MSIs */
+ if (rdev->family >= CHIP_PALM)
+ return true;
+ /* lots of IGPs have problems with MSIs */
+ return false;
+ }
+
+ return true;
+}
+
int radeon_irq_kms_init(struct radeon_device *rdev)
{
int i;
@@ -124,12 +164,8 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
}
/* enable msi */
rdev->msi_enabled = 0;
- /* MSIs don't seem to work reliably on all IGP
- * chips. Disable MSI on them for now.
- */
- if ((rdev->family >= CHIP_RV380) &&
- ((!(rdev->flags & RADEON_IS_IGP)) || (rdev->family >= CHIP_PALM)) &&
- (!(rdev->flags & RADEON_IS_AGP))) {
+
+ if (radeon_msi_ok(rdev)) {
int ret = pci_enable_msi(rdev->pdev);
if (!ret) {
rdev->msi_enabled = 1;
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index 41a5d48e657..daadf211104 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -991,12 +991,6 @@ static bool radeon_crtc_mode_fixup(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
- struct drm_device *dev = crtc->dev;
- struct radeon_device *rdev = dev->dev_private;
-
- /* adjust pm to upcoming mode change */
- radeon_pm_compute_clocks(rdev);
-
if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
return false;
return true;
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_tv.c b/drivers/gpu/drm/radeon/radeon_legacy_tv.c
index c7b6cb428d0..b37ec0f1413 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_tv.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_tv.c
@@ -864,7 +864,7 @@ void radeon_legacy_tv_adjust_crtc_reg(struct drm_encoder *encoder,
*v_sync_strt_wid = tmp;
}
-static inline int get_post_div(int value)
+static int get_post_div(int value)
{
int post_div;
switch (value) {
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 68820f5f630..2c2e75ef8a3 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -438,15 +438,13 @@ struct radeon_connector {
struct radeon_i2c_chan *ddc_bus;
/* some systems have an hdmi and vga port with a shared ddc line */
bool shared_ddc;
- /* for some Radeon chip families we apply an additional EDID header
- check as part of the DDC probe */
- bool requires_extended_probe;
bool use_digital;
/* we need to mind the EDID between detect
and get modes due to analog/digital/tvencoder */
struct edid *edid;
void *con_priv;
bool dac_load_detect;
+ bool detected_by_load; /* if the connection status was determined by load */
uint16_t connector_object_id;
struct radeon_hpd hpd;
struct radeon_router router;
@@ -458,6 +456,8 @@ struct radeon_framebuffer {
struct drm_gem_object *obj;
};
+#define ENCODER_MODE_IS_DP(em) (((em) == ATOM_ENCODER_MODE_DP) || \
+ ((em) == ATOM_ENCODER_MODE_DP_MST))
extern enum radeon_tv_std
radeon_combios_get_tv_info(struct radeon_device *rdev);
@@ -467,8 +467,8 @@ radeon_atombios_get_tv_info(struct radeon_device *rdev);
extern struct drm_connector *
radeon_get_connector_for_encoder(struct drm_encoder *encoder);
-extern bool radeon_encoder_is_dp_bridge(struct drm_encoder *encoder);
-extern bool radeon_connector_encoder_is_dp_bridge(struct drm_connector *connector);
+extern u16 radeon_encoder_get_dp_bridge_encoder_id(struct drm_encoder *encoder);
+extern u16 radeon_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *connector);
extern bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector);
extern bool radeon_connector_is_dp12_capable(struct drm_connector *connector);
@@ -488,7 +488,7 @@ extern void atombios_dig_transmitter_setup(struct drm_encoder *encoder,
int action, uint8_t lane_num,
uint8_t lane_set);
extern void radeon_atom_ext_encoder_setup_ddc(struct drm_encoder *encoder);
-extern struct drm_encoder *radeon_atom_get_external_encoder(struct drm_encoder *encoder);
+extern struct drm_encoder *radeon_get_external_encoder(struct drm_encoder *encoder);
extern int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
u8 write_byte, u8 *read_byte);
@@ -518,8 +518,7 @@ extern void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c,
u8 val);
extern void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector);
extern void radeon_router_select_cd_port(struct radeon_connector *radeon_connector);
-extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector,
- bool requires_extended_probe);
+extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector);
extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector);
extern struct drm_encoder *radeon_best_encoder(struct drm_connector *connector);
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 976c3b1b1b6..1c851521f45 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -515,3 +515,44 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
}
return 0;
}
+
+int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait)
+{
+ int r;
+
+ r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
+ if (unlikely(r != 0))
+ return r;
+ spin_lock(&bo->tbo.bdev->fence_lock);
+ if (mem_type)
+ *mem_type = bo->tbo.mem.mem_type;
+ if (bo->tbo.sync_obj)
+ r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
+ spin_unlock(&bo->tbo.bdev->fence_lock);
+ ttm_bo_unreserve(&bo->tbo);
+ return r;
+}
+
+
+/**
+ * radeon_bo_reserve - reserve bo
+ * @bo: bo structure
+ * @no_wait: don't sleep while trying to reserve (return -EBUSY)
+ *
+ * Returns:
+ * -EBUSY: buffer is busy and @no_wait is true
+ * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
+ * a signal. Release all buffer reservations and return to user-space.
+ */
+int radeon_bo_reserve(struct radeon_bo *bo, bool no_wait)
+{
+ int r;
+
+ r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
+ if (unlikely(r != 0)) {
+ if (r != -ERESTARTSYS)
+ dev_err(bo->rdev->dev, "%p reserve failed\n", bo);
+ return r;
+ }
+ return 0;
+}
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index ede6c13628f..b07f0f9b862 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -52,28 +52,7 @@ static inline unsigned radeon_mem_type_to_domain(u32 mem_type)
return 0;
}
-/**
- * radeon_bo_reserve - reserve bo
- * @bo: bo structure
- * @no_wait: don't sleep while trying to reserve (return -EBUSY)
- *
- * Returns:
- * -EBUSY: buffer is busy and @no_wait is true
- * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
- * a signal. Release all buffer reservations and return to user-space.
- */
-static inline int radeon_bo_reserve(struct radeon_bo *bo, bool no_wait)
-{
- int r;
-
- r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
- if (unlikely(r != 0)) {
- if (r != -ERESTARTSYS)
- dev_err(bo->rdev->dev, "%p reserve failed\n", bo);
- return r;
- }
- return 0;
-}
+int radeon_bo_reserve(struct radeon_bo *bo, bool no_wait);
static inline void radeon_bo_unreserve(struct radeon_bo *bo)
{
@@ -118,23 +97,8 @@ static inline u64 radeon_bo_mmap_offset(struct radeon_bo *bo)
return bo->tbo.addr_space_offset;
}
-static inline int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
- bool no_wait)
-{
- int r;
-
- r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
- if (unlikely(r != 0))
- return r;
- spin_lock(&bo->tbo.bdev->fence_lock);
- if (mem_type)
- *mem_type = bo->tbo.mem.mem_type;
- if (bo->tbo.sync_obj)
- r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
- spin_unlock(&bo->tbo.bdev->fence_lock);
- ttm_bo_unreserve(&bo->tbo);
- return r;
-}
+extern int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
+ bool no_wait);
extern int radeon_bo_create(struct radeon_device *rdev,
unsigned long size, int byte_align,
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 6fabe89fa6a..78a665bd951 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -53,6 +53,24 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev);
#define ACPI_AC_CLASS "ac_adapter"
+int radeon_pm_get_type_index(struct radeon_device *rdev,
+ enum radeon_pm_state_type ps_type,
+ int instance)
+{
+ int i;
+ int found_instance = -1;
+
+ for (i = 0; i < rdev->pm.num_power_states; i++) {
+ if (rdev->pm.power_state[i].type == ps_type) {
+ found_instance++;
+ if (found_instance == instance)
+ return i;
+ }
+ }
+ /* return default if no match */
+ return rdev->pm.default_power_state_index;
+}
+
#ifdef CONFIG_ACPI
static int radeon_acpi_event(struct notifier_block *nb,
unsigned long val,
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 08c0233db1b..49d58202202 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -35,6 +35,44 @@
int radeon_debugfs_ib_init(struct radeon_device *rdev);
+u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
+{
+ struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
+ u32 pg_idx, pg_offset;
+ u32 idx_value = 0;
+ int new_page;
+
+ pg_idx = (idx * 4) / PAGE_SIZE;
+ pg_offset = (idx * 4) % PAGE_SIZE;
+
+ if (ibc->kpage_idx[0] == pg_idx)
+ return ibc->kpage[0][pg_offset/4];
+ if (ibc->kpage_idx[1] == pg_idx)
+ return ibc->kpage[1][pg_offset/4];
+
+ new_page = radeon_cs_update_pages(p, pg_idx);
+ if (new_page < 0) {
+ p->parser_error = new_page;
+ return 0;
+ }
+
+ idx_value = ibc->kpage[new_page][pg_offset/4];
+ return idx_value;
+}
+
+void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
+{
+#if DRM_DEBUG_CODE
+ if (rdev->cp.count_dw <= 0) {
+ DRM_ERROR("radeon: writting more dword to ring than expected !\n");
+ }
+#endif
+ rdev->cp.ring[rdev->cp.wptr++] = v;
+ rdev->cp.wptr &= rdev->cp.ptr_mask;
+ rdev->cp.count_dw--;
+ rdev->cp.ring_free_dw--;
+}
+
void radeon_ib_bogus_cleanup(struct radeon_device *rdev)
{
struct radeon_ib *ib, *n;
diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
index 92e7ea73b7c..e8422ae7fe7 100644
--- a/drivers/gpu/drm/radeon/radeon_state.c
+++ b/drivers/gpu/drm/radeon/radeon_state.c
@@ -272,12 +272,12 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
return 0;
}
-static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
- dev_priv,
- struct drm_file *file_priv,
- drm_radeon_kcmd_buffer_t *
- cmdbuf,
- unsigned int *cmdsz)
+static int radeon_check_and_fixup_packet3(drm_radeon_private_t *
+ dev_priv,
+ struct drm_file *file_priv,
+ drm_radeon_kcmd_buffer_t *
+ cmdbuf,
+ unsigned int *cmdsz)
{
u32 *cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
u32 offset, narrays;
@@ -446,8 +446,8 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
* CP hardware state programming functions
*/
-static __inline__ void radeon_emit_clip_rect(drm_radeon_private_t * dev_priv,
- struct drm_clip_rect * box)
+static void radeon_emit_clip_rect(drm_radeon_private_t * dev_priv,
+ struct drm_clip_rect * box)
{
RING_LOCALS;
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index aa6a66eeb4e..06b90c87f8f 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -77,7 +77,7 @@ int rs400_gart_init(struct radeon_device *rdev)
{
int r;
- if (rdev->gart.table.ram.ptr) {
+ if (rdev->gart.ptr) {
WARN(1, "RS400 GART already initialized\n");
return 0;
}
@@ -182,6 +182,9 @@ int rs400_gart_enable(struct radeon_device *rdev)
/* Enable gart */
WREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN | size_reg));
rs400_gart_tlb_flush(rdev);
+ DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
+ (unsigned)(rdev->mc.gtt_size >> 20),
+ (unsigned long long)rdev->gart.table_addr);
rdev->gart.ready = true;
return 0;
}
@@ -209,6 +212,7 @@ void rs400_gart_fini(struct radeon_device *rdev)
int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
{
uint32_t entry;
+ u32 *gtt = rdev->gart.ptr;
if (i < 0 || i > rdev->gart.num_gpu_pages) {
return -EINVAL;
@@ -218,7 +222,7 @@ int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
((upper_32_bits(addr) & 0xff) << 4) |
RS400_PTE_WRITEABLE | RS400_PTE_READABLE;
entry = cpu_to_le32(entry);
- rdev->gart.table.ram.ptr[i] = entry;
+ gtt[i] = entry;
return 0;
}
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 4b5d0e6974a..481b99e89f6 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -287,6 +287,7 @@ void rs600_hpd_init(struct radeon_device *rdev)
default:
break;
}
+ radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
}
if (rdev->irq.installed)
rs600_irq_set(rdev);
@@ -413,7 +414,7 @@ int rs600_gart_init(struct radeon_device *rdev)
{
int r;
- if (rdev->gart.table.vram.robj) {
+ if (rdev->gart.robj) {
WARN(1, "RS600 GART already initialized\n");
return 0;
}
@@ -431,7 +432,7 @@ static int rs600_gart_enable(struct radeon_device *rdev)
u32 tmp;
int r, i;
- if (rdev->gart.table.vram.robj == NULL) {
+ if (rdev->gart.robj == NULL) {
dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
return -EINVAL;
}
@@ -484,6 +485,9 @@ static int rs600_gart_enable(struct radeon_device *rdev)
tmp = RREG32_MC(R_000009_MC_CNTL1);
WREG32_MC(R_000009_MC_CNTL1, (tmp | S_000009_ENABLE_PAGE_TABLES(1)));
rs600_gart_tlb_flush(rdev);
+ DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
+ (unsigned)(rdev->mc.gtt_size >> 20),
+ (unsigned long long)rdev->gart.table_addr);
rdev->gart.ready = true;
return 0;
}
@@ -491,20 +495,12 @@ static int rs600_gart_enable(struct radeon_device *rdev)
void rs600_gart_disable(struct radeon_device *rdev)
{
u32 tmp;
- int r;
/* FIXME: disable out of gart access */
WREG32_MC(R_000100_MC_PT0_CNTL, 0);
tmp = RREG32_MC(R_000009_MC_CNTL1);
WREG32_MC(R_000009_MC_CNTL1, tmp & C_000009_ENABLE_PAGE_TABLES);
- if (rdev->gart.table.vram.robj) {
- r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
- if (r == 0) {
- radeon_bo_kunmap(rdev->gart.table.vram.robj);
- radeon_bo_unpin(rdev->gart.table.vram.robj);
- radeon_bo_unreserve(rdev->gart.table.vram.robj);
- }
- }
+ radeon_gart_table_vram_unpin(rdev);
}
void rs600_gart_fini(struct radeon_device *rdev)
@@ -522,7 +518,7 @@ void rs600_gart_fini(struct radeon_device *rdev)
int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
{
- void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
+ void __iomem *ptr = (void *)rdev->gart.ptr;
if (i < 0 || i > rdev->gart.num_gpu_pages) {
return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index b13c2eedc32..a983f410ab8 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -124,7 +124,7 @@ int rv770_pcie_gart_enable(struct radeon_device *rdev)
u32 tmp;
int r, i;
- if (rdev->gart.table.vram.robj == NULL) {
+ if (rdev->gart.robj == NULL) {
dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
return -EINVAL;
}
@@ -161,6 +161,9 @@ int rv770_pcie_gart_enable(struct radeon_device *rdev)
WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
r600_pcie_gart_tlb_flush(rdev);
+ DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
+ (unsigned)(rdev->mc.gtt_size >> 20),
+ (unsigned long long)rdev->gart.table_addr);
rdev->gart.ready = true;
return 0;
}
@@ -168,7 +171,7 @@ int rv770_pcie_gart_enable(struct radeon_device *rdev)
void rv770_pcie_gart_disable(struct radeon_device *rdev)
{
u32 tmp;
- int i, r;
+ int i;
/* Disable all tables */
for (i = 0; i < 7; i++)
@@ -188,14 +191,7 @@ void rv770_pcie_gart_disable(struct radeon_device *rdev)
WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
- if (rdev->gart.table.vram.robj) {
- r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
- if (likely(r == 0)) {
- radeon_bo_kunmap(rdev->gart.table.vram.robj);
- radeon_bo_unpin(rdev->gart.table.vram.robj);
- radeon_bo_unreserve(rdev->gart.table.vram.robj);
- }
- }
+ radeon_gart_table_vram_unpin(rdev);
}
void rv770_pcie_gart_fini(struct radeon_device *rdev)
@@ -279,7 +275,7 @@ static void rv770_mc_program(struct radeon_device *rdev)
WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
rdev->mc.vram_end >> 12);
}
- WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
+ WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12);
tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
WREG32(MC_VM_FB_LOCATION, tmp);
@@ -956,54 +952,6 @@ static void rv770_gpu_init(struct radeon_device *rdev)
}
-static int rv770_vram_scratch_init(struct radeon_device *rdev)
-{
- int r;
- u64 gpu_addr;
-
- if (rdev->vram_scratch.robj == NULL) {
- r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE,
- PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
- &rdev->vram_scratch.robj);
- if (r) {
- return r;
- }
- }
-
- r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
- if (unlikely(r != 0))
- return r;
- r = radeon_bo_pin(rdev->vram_scratch.robj,
- RADEON_GEM_DOMAIN_VRAM, &gpu_addr);
- if (r) {
- radeon_bo_unreserve(rdev->vram_scratch.robj);
- return r;
- }
- r = radeon_bo_kmap(rdev->vram_scratch.robj,
- (void **)&rdev->vram_scratch.ptr);
- if (r)
- radeon_bo_unpin(rdev->vram_scratch.robj);
- radeon_bo_unreserve(rdev->vram_scratch.robj);
-
- return r;
-}
-
-static void rv770_vram_scratch_fini(struct radeon_device *rdev)
-{
- int r;
-
- if (rdev->vram_scratch.robj == NULL) {
- return;
- }
- r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
- if (likely(r == 0)) {
- radeon_bo_kunmap(rdev->vram_scratch.robj);
- radeon_bo_unpin(rdev->vram_scratch.robj);
- radeon_bo_unreserve(rdev->vram_scratch.robj);
- }
- radeon_bo_unref(&rdev->vram_scratch.robj);
-}
-
void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
{
u64 size_bf, size_af;
@@ -1103,6 +1051,10 @@ static int rv770_startup(struct radeon_device *rdev)
}
}
+ r = r600_vram_scratch_init(rdev);
+ if (r)
+ return r;
+
rv770_mc_program(rdev);
if (rdev->flags & RADEON_IS_AGP) {
rv770_agp_enable(rdev);
@@ -1111,9 +1063,7 @@ static int rv770_startup(struct radeon_device *rdev)
if (r)
return r;
}
- r = rv770_vram_scratch_init(rdev);
- if (r)
- return r;
+
rv770_gpu_init(rdev);
r = r600_blit_init(rdev);
if (r) {
@@ -1184,8 +1134,6 @@ int rv770_resume(struct radeon_device *rdev)
int rv770_suspend(struct radeon_device *rdev)
{
- int r;
-
r600_audio_fini(rdev);
/* FIXME: we should wait for ring to be empty */
r700_cp_stop(rdev);
@@ -1193,14 +1141,8 @@ int rv770_suspend(struct radeon_device *rdev)
r600_irq_suspend(rdev);
radeon_wb_disable(rdev);
rv770_pcie_gart_disable(rdev);
- /* unpin shaders bo */
- if (rdev->r600_blit.shader_obj) {
- r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
- if (likely(r == 0)) {
- radeon_bo_unpin(rdev->r600_blit.shader_obj);
- radeon_bo_unreserve(rdev->r600_blit.shader_obj);
- }
- }
+ r600_blit_suspend(rdev);
+
return 0;
}
@@ -1321,7 +1263,7 @@ void rv770_fini(struct radeon_device *rdev)
radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
rv770_pcie_gart_fini(rdev);
- rv770_vram_scratch_fini(rdev);
+ r600_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
radeon_fence_driver_fini(rdev);
radeon_agp_fini(rdev);
diff --git a/drivers/gpu/drm/savage/savage_drv.c b/drivers/gpu/drm/savage/savage_drv.c
index 6464490b240..5468d1cd329 100644
--- a/drivers/gpu/drm/savage/savage_drv.c
+++ b/drivers/gpu/drm/savage/savage_drv.c
@@ -23,6 +23,8 @@
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
+#include <linux/module.h>
+
#include "drmP.h"
#include "savage_drm.h"
#include "savage_drv.h"
diff --git a/drivers/gpu/drm/sis/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c
index 46d5be6e97e..a9c5716bea4 100644
--- a/drivers/gpu/drm/sis/sis_drv.c
+++ b/drivers/gpu/drm/sis/sis_drv.c
@@ -25,6 +25,8 @@
*
*/
+#include <linux/module.h>
+
#include "drmP.h"
#include "sis_drm.h"
#include "sis_drv.h"
diff --git a/drivers/gpu/drm/tdfx/tdfx_drv.c b/drivers/gpu/drm/tdfx/tdfx_drv.c
index 8bf98810a8d..cda29911e33 100644
--- a/drivers/gpu/drm/tdfx/tdfx_drv.c
+++ b/drivers/gpu/drm/tdfx/tdfx_drv.c
@@ -30,6 +30,8 @@
* Gareth Hughes <gareth@valinux.com>
*/
+#include <linux/module.h>
+
#include "drmP.h"
#include "tdfx_drv.h"
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index ef06194c5aa..617b64678fc 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -1293,6 +1293,7 @@ int ttm_bo_create(struct ttm_bo_device *bdev,
return ret;
}
+EXPORT_SYMBOL(ttm_bo_create);
static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
unsigned mem_type, bool allow_errors)
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 58c271ebc0f..f9cc548d6d9 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -35,6 +35,7 @@
#include <linux/file.h>
#include <linux/swap.h>
#include <linux/slab.h>
+#include <linux/export.h>
#include "drm_cache.h"
#include "drm_mem_util.h"
#include "ttm/ttm_module.h"
diff --git a/drivers/gpu/drm/via/via_drv.c b/drivers/gpu/drm/via/via_drv.c
index 920a55214bc..a83e86d3956 100644
--- a/drivers/gpu/drm/via/via_drv.c
+++ b/drivers/gpu/drm/via/via_drv.c
@@ -22,6 +22,8 @@
* DEALINGS IN THE SOFTWARE.
*/
+#include <linux/module.h>
+
#include "drmP.h"
#include "via_drm.h"
#include "via_drv.h"
diff --git a/drivers/gpu/drm/vmwgfx/Kconfig b/drivers/gpu/drm/vmwgfx/Kconfig
index 30ad13344f7..794ff67c570 100644
--- a/drivers/gpu/drm/vmwgfx/Kconfig
+++ b/drivers/gpu/drm/vmwgfx/Kconfig
@@ -7,7 +7,8 @@ config DRM_VMWGFX
select FB_CFB_IMAGEBLIT
select DRM_TTM
help
- KMS enabled DRM driver for SVGA2 virtual hardware.
-
- If unsure say n. The compiled module will be
- called vmwgfx.ko
+ Choose this option if you would like to run 3D acceleration
+ in a VMware virtual machine.
+ This is a KMS enabled DRM driver for the VMware SVGA2
+ virtual hardware.
+ The compiled module will be called "vmwgfx.ko".
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
index c9281a1b1d3..586869c8c11 100644
--- a/drivers/gpu/drm/vmwgfx/Makefile
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -4,6 +4,7 @@ ccflags-y := -Iinclude/drm
vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \
vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
- vmwgfx_overlay.o vmwgfx_fence.o vmwgfx_gmrid_manager.o
+ vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \
+ vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o
obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
diff --git a/drivers/gpu/drm/vmwgfx/svga3d_reg.h b/drivers/gpu/drm/vmwgfx/svga3d_reg.h
index 77cb4533100..d0e085ee824 100644
--- a/drivers/gpu/drm/vmwgfx/svga3d_reg.h
+++ b/drivers/gpu/drm/vmwgfx/svga3d_reg.h
@@ -57,7 +57,8 @@ typedef enum {
SVGA3D_HWVERSION_WS6_B1 = SVGA3D_MAKE_HWVERSION(1, 1),
SVGA3D_HWVERSION_FUSION_11 = SVGA3D_MAKE_HWVERSION(1, 4),
SVGA3D_HWVERSION_WS65_B1 = SVGA3D_MAKE_HWVERSION(2, 0),
- SVGA3D_HWVERSION_CURRENT = SVGA3D_HWVERSION_WS65_B1,
+ SVGA3D_HWVERSION_WS8_B1 = SVGA3D_MAKE_HWVERSION(2, 1),
+ SVGA3D_HWVERSION_CURRENT = SVGA3D_HWVERSION_WS8_B1,
} SVGA3dHardwareVersion;
/*
@@ -67,7 +68,8 @@ typedef enum {
typedef uint32 SVGA3dBool; /* 32-bit Bool definition */
#define SVGA3D_NUM_CLIPPLANES 6
#define SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS 8
-
+#define SVGA3D_MAX_CONTEXT_IDS 256
+#define SVGA3D_MAX_SURFACE_IDS (32 * 1024)
/*
* Surface formats.
@@ -79,76 +81,91 @@ typedef uint32 SVGA3dBool; /* 32-bit Bool definition */
*/
typedef enum SVGA3dSurfaceFormat {
- SVGA3D_FORMAT_INVALID = 0,
+ SVGA3D_FORMAT_INVALID = 0,
- SVGA3D_X8R8G8B8 = 1,
- SVGA3D_A8R8G8B8 = 2,
+ SVGA3D_X8R8G8B8 = 1,
+ SVGA3D_A8R8G8B8 = 2,
- SVGA3D_R5G6B5 = 3,
- SVGA3D_X1R5G5B5 = 4,
- SVGA3D_A1R5G5B5 = 5,
- SVGA3D_A4R4G4B4 = 6,
+ SVGA3D_R5G6B5 = 3,
+ SVGA3D_X1R5G5B5 = 4,
+ SVGA3D_A1R5G5B5 = 5,
+ SVGA3D_A4R4G4B4 = 6,
- SVGA3D_Z_D32 = 7,
- SVGA3D_Z_D16 = 8,
- SVGA3D_Z_D24S8 = 9,
- SVGA3D_Z_D15S1 = 10,
+ SVGA3D_Z_D32 = 7,
+ SVGA3D_Z_D16 = 8,
+ SVGA3D_Z_D24S8 = 9,
+ SVGA3D_Z_D15S1 = 10,
- SVGA3D_LUMINANCE8 = 11,
- SVGA3D_LUMINANCE4_ALPHA4 = 12,
- SVGA3D_LUMINANCE16 = 13,
- SVGA3D_LUMINANCE8_ALPHA8 = 14,
+ SVGA3D_LUMINANCE8 = 11,
+ SVGA3D_LUMINANCE4_ALPHA4 = 12,
+ SVGA3D_LUMINANCE16 = 13,
+ SVGA3D_LUMINANCE8_ALPHA8 = 14,
- SVGA3D_DXT1 = 15,
- SVGA3D_DXT2 = 16,
- SVGA3D_DXT3 = 17,
- SVGA3D_DXT4 = 18,
- SVGA3D_DXT5 = 19,
+ SVGA3D_DXT1 = 15,
+ SVGA3D_DXT2 = 16,
+ SVGA3D_DXT3 = 17,
+ SVGA3D_DXT4 = 18,
+ SVGA3D_DXT5 = 19,
- SVGA3D_BUMPU8V8 = 20,
- SVGA3D_BUMPL6V5U5 = 21,
- SVGA3D_BUMPX8L8V8U8 = 22,
- SVGA3D_BUMPL8V8U8 = 23,
+ SVGA3D_BUMPU8V8 = 20,
+ SVGA3D_BUMPL6V5U5 = 21,
+ SVGA3D_BUMPX8L8V8U8 = 22,
+ SVGA3D_BUMPL8V8U8 = 23,
- SVGA3D_ARGB_S10E5 = 24, /* 16-bit floating-point ARGB */
- SVGA3D_ARGB_S23E8 = 25, /* 32-bit floating-point ARGB */
+ SVGA3D_ARGB_S10E5 = 24, /* 16-bit floating-point ARGB */
+ SVGA3D_ARGB_S23E8 = 25, /* 32-bit floating-point ARGB */
- SVGA3D_A2R10G10B10 = 26,
+ SVGA3D_A2R10G10B10 = 26,
/* signed formats */
- SVGA3D_V8U8 = 27,
- SVGA3D_Q8W8V8U8 = 28,
- SVGA3D_CxV8U8 = 29,
+ SVGA3D_V8U8 = 27,
+ SVGA3D_Q8W8V8U8 = 28,
+ SVGA3D_CxV8U8 = 29,
/* mixed formats */
- SVGA3D_X8L8V8U8 = 30,
- SVGA3D_A2W10V10U10 = 31,
+ SVGA3D_X8L8V8U8 = 30,
+ SVGA3D_A2W10V10U10 = 31,
- SVGA3D_ALPHA8 = 32,
+ SVGA3D_ALPHA8 = 32,
/* Single- and dual-component floating point formats */
- SVGA3D_R_S10E5 = 33,
- SVGA3D_R_S23E8 = 34,
- SVGA3D_RG_S10E5 = 35,
- SVGA3D_RG_S23E8 = 36,
+ SVGA3D_R_S10E5 = 33,
+ SVGA3D_R_S23E8 = 34,
+ SVGA3D_RG_S10E5 = 35,
+ SVGA3D_RG_S23E8 = 36,
/*
* Any surface can be used as a buffer object, but SVGA3D_BUFFER is
* the most efficient format to use when creating new surfaces
* expressly for index or vertex data.
*/
- SVGA3D_BUFFER = 37,
- SVGA3D_Z_D24X8 = 38,
+ SVGA3D_BUFFER = 37,
+
+ SVGA3D_Z_D24X8 = 38,
- SVGA3D_V16U16 = 39,
+ SVGA3D_V16U16 = 39,
- SVGA3D_G16R16 = 40,
- SVGA3D_A16B16G16R16 = 41,
+ SVGA3D_G16R16 = 40,
+ SVGA3D_A16B16G16R16 = 41,
/* Packed Video formats */
- SVGA3D_UYVY = 42,
- SVGA3D_YUY2 = 43,
+ SVGA3D_UYVY = 42,
+ SVGA3D_YUY2 = 43,
+
+ /* Planar video formats */
+ SVGA3D_NV12 = 44,
+
+ /* Video format with alpha */
+ SVGA3D_AYUV = 45,
+
+ SVGA3D_BC4_UNORM = 108,
+ SVGA3D_BC5_UNORM = 111,
+
+ /* Advanced D3D9 depth formats. */
+ SVGA3D_Z_DF16 = 118,
+ SVGA3D_Z_DF24 = 119,
+ SVGA3D_Z_D24S8_INT = 120,
SVGA3D_FORMAT_MAX
} SVGA3dSurfaceFormat;
@@ -414,10 +431,20 @@ typedef enum {
SVGA3D_RS_SRCBLENDALPHA = 94, /* SVGA3dBlendOp */
SVGA3D_RS_DSTBLENDALPHA = 95, /* SVGA3dBlendOp */
SVGA3D_RS_BLENDEQUATIONALPHA = 96, /* SVGA3dBlendEquation */
+ SVGA3D_RS_TRANSPARENCYANTIALIAS = 97, /* SVGA3dTransparencyAntialiasType */
+ SVGA3D_RS_LINEAA = 98, /* SVGA3dBool */
+ SVGA3D_RS_LINEWIDTH = 99, /* float */
SVGA3D_RS_MAX
} SVGA3dRenderStateName;
typedef enum {
+ SVGA3D_TRANSPARENCYANTIALIAS_NORMAL = 0,
+ SVGA3D_TRANSPARENCYANTIALIAS_ALPHATOCOVERAGE = 1,
+ SVGA3D_TRANSPARENCYANTIALIAS_SUPERSAMPLE = 2,
+ SVGA3D_TRANSPARENCYANTIALIAS_MAX
+} SVGA3dTransparencyAntialiasType;
+
+typedef enum {
SVGA3D_VERTEXMATERIAL_NONE = 0, /* Use the value in the current material */
SVGA3D_VERTEXMATERIAL_DIFFUSE = 1, /* Use the value in the diffuse component */
SVGA3D_VERTEXMATERIAL_SPECULAR = 2, /* Use the value in the specular component */
@@ -728,10 +755,10 @@ typedef enum {
SVGA3D_TEX_FILTER_NEAREST = 1,
SVGA3D_TEX_FILTER_LINEAR = 2,
SVGA3D_TEX_FILTER_ANISOTROPIC = 3,
- SVGA3D_TEX_FILTER_FLATCUBIC = 4, // Deprecated, not implemented
- SVGA3D_TEX_FILTER_GAUSSIANCUBIC = 5, // Deprecated, not implemented
- SVGA3D_TEX_FILTER_PYRAMIDALQUAD = 6, // Not currently implemented
- SVGA3D_TEX_FILTER_GAUSSIANQUAD = 7, // Not currently implemented
+ SVGA3D_TEX_FILTER_FLATCUBIC = 4, /* Deprecated, not implemented */
+ SVGA3D_TEX_FILTER_GAUSSIANCUBIC = 5, /* Deprecated, not implemented */
+ SVGA3D_TEX_FILTER_PYRAMIDALQUAD = 6, /* Not currently implemented */
+ SVGA3D_TEX_FILTER_GAUSSIANQUAD = 7, /* Not currently implemented */
SVGA3D_TEX_FILTER_MAX
} SVGA3dTextureFilter;
@@ -799,19 +826,19 @@ typedef enum {
typedef enum {
SVGA3D_DECLUSAGE_POSITION = 0,
- SVGA3D_DECLUSAGE_BLENDWEIGHT, // 1
- SVGA3D_DECLUSAGE_BLENDINDICES, // 2
- SVGA3D_DECLUSAGE_NORMAL, // 3
- SVGA3D_DECLUSAGE_PSIZE, // 4
- SVGA3D_DECLUSAGE_TEXCOORD, // 5
- SVGA3D_DECLUSAGE_TANGENT, // 6
- SVGA3D_DECLUSAGE_BINORMAL, // 7
- SVGA3D_DECLUSAGE_TESSFACTOR, // 8
- SVGA3D_DECLUSAGE_POSITIONT, // 9
- SVGA3D_DECLUSAGE_COLOR, // 10
- SVGA3D_DECLUSAGE_FOG, // 11
- SVGA3D_DECLUSAGE_DEPTH, // 12
- SVGA3D_DECLUSAGE_SAMPLE, // 13
+ SVGA3D_DECLUSAGE_BLENDWEIGHT, /* 1 */
+ SVGA3D_DECLUSAGE_BLENDINDICES, /* 2 */
+ SVGA3D_DECLUSAGE_NORMAL, /* 3 */
+ SVGA3D_DECLUSAGE_PSIZE, /* 4 */
+ SVGA3D_DECLUSAGE_TEXCOORD, /* 5 */
+ SVGA3D_DECLUSAGE_TANGENT, /* 6 */
+ SVGA3D_DECLUSAGE_BINORMAL, /* 7 */
+ SVGA3D_DECLUSAGE_TESSFACTOR, /* 8 */
+ SVGA3D_DECLUSAGE_POSITIONT, /* 9 */
+ SVGA3D_DECLUSAGE_COLOR, /* 10 */
+ SVGA3D_DECLUSAGE_FOG, /* 11 */
+ SVGA3D_DECLUSAGE_DEPTH, /* 12 */
+ SVGA3D_DECLUSAGE_SAMPLE, /* 13 */
SVGA3D_DECLUSAGE_MAX
} SVGA3dDeclUsage;
@@ -819,10 +846,10 @@ typedef enum {
SVGA3D_DECLMETHOD_DEFAULT = 0,
SVGA3D_DECLMETHOD_PARTIALU,
SVGA3D_DECLMETHOD_PARTIALV,
- SVGA3D_DECLMETHOD_CROSSUV, // Normal
+ SVGA3D_DECLMETHOD_CROSSUV, /* Normal */
SVGA3D_DECLMETHOD_UV,
- SVGA3D_DECLMETHOD_LOOKUP, // Lookup a displacement map
- SVGA3D_DECLMETHOD_LOOKUPPRESAMPLED, // Lookup a pre-sampled displacement map
+ SVGA3D_DECLMETHOD_LOOKUP, /* Lookup a displacement map */
+ SVGA3D_DECLMETHOD_LOOKUPPRESAMPLED, /* Lookup a pre-sampled displacement map */
} SVGA3dDeclMethod;
typedef enum {
@@ -930,7 +957,6 @@ typedef enum {
} SVGA3dCubeFace;
typedef enum {
- SVGA3D_SHADERTYPE_COMPILED_DX8 = 0,
SVGA3D_SHADERTYPE_VS = 1,
SVGA3D_SHADERTYPE_PS = 2,
SVGA3D_SHADERTYPE_MAX
@@ -968,12 +994,18 @@ typedef enum {
} SVGA3dTransferType;
/*
- * The maximum number vertex arrays we're guaranteed to support in
+ * The maximum number of vertex arrays we're guaranteed to support in
* SVGA_3D_CMD_DRAWPRIMITIVES.
*/
#define SVGA3D_MAX_VERTEX_ARRAYS 32
/*
+ * The maximum number of primitive ranges we're guaranteed to support
+ * in SVGA_3D_CMD_DRAWPRIMITIVES.
+ */
+#define SVGA3D_MAX_DRAW_PRIMITIVE_RANGES 32
+
+/*
* Identifiers for commands in the command FIFO.
*
* IDs between 1000 and 1039 (inclusive) were used by obsolete versions of
@@ -990,7 +1022,7 @@ typedef enum {
#define SVGA_3D_CMD_LEGACY_BASE 1000
#define SVGA_3D_CMD_BASE 1040
-#define SVGA_3D_CMD_SURFACE_DEFINE SVGA_3D_CMD_BASE + 0
+#define SVGA_3D_CMD_SURFACE_DEFINE SVGA_3D_CMD_BASE + 0 /* Deprecated */
#define SVGA_3D_CMD_SURFACE_DESTROY SVGA_3D_CMD_BASE + 1
#define SVGA_3D_CMD_SURFACE_COPY SVGA_3D_CMD_BASE + 2
#define SVGA_3D_CMD_SURFACE_STRETCHBLT SVGA_3D_CMD_BASE + 3
@@ -1008,7 +1040,7 @@ typedef enum {
#define SVGA_3D_CMD_SETVIEWPORT SVGA_3D_CMD_BASE + 15
#define SVGA_3D_CMD_SETCLIPPLANE SVGA_3D_CMD_BASE + 16
#define SVGA_3D_CMD_CLEAR SVGA_3D_CMD_BASE + 17
-#define SVGA_3D_CMD_PRESENT SVGA_3D_CMD_BASE + 18 // Deprecated
+#define SVGA_3D_CMD_PRESENT SVGA_3D_CMD_BASE + 18 /* Deprecated */
#define SVGA_3D_CMD_SHADER_DEFINE SVGA_3D_CMD_BASE + 19
#define SVGA_3D_CMD_SHADER_DESTROY SVGA_3D_CMD_BASE + 20
#define SVGA_3D_CMD_SET_SHADER SVGA_3D_CMD_BASE + 21
@@ -1018,9 +1050,13 @@ typedef enum {
#define SVGA_3D_CMD_BEGIN_QUERY SVGA_3D_CMD_BASE + 25
#define SVGA_3D_CMD_END_QUERY SVGA_3D_CMD_BASE + 26
#define SVGA_3D_CMD_WAIT_FOR_QUERY SVGA_3D_CMD_BASE + 27
-#define SVGA_3D_CMD_PRESENT_READBACK SVGA_3D_CMD_BASE + 28 // Deprecated
+#define SVGA_3D_CMD_PRESENT_READBACK SVGA_3D_CMD_BASE + 28 /* Deprecated */
#define SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN SVGA_3D_CMD_BASE + 29
-#define SVGA_3D_CMD_MAX SVGA_3D_CMD_BASE + 30
+#define SVGA_3D_CMD_SURFACE_DEFINE_V2 SVGA_3D_CMD_BASE + 30
+#define SVGA_3D_CMD_GENERATE_MIPMAPS SVGA_3D_CMD_BASE + 31
+#define SVGA_3D_CMD_ACTIVATE_SURFACE SVGA_3D_CMD_BASE + 40
+#define SVGA_3D_CMD_DEACTIVATE_SURFACE SVGA_3D_CMD_BASE + 41
+#define SVGA_3D_CMD_MAX SVGA_3D_CMD_BASE + 42
#define SVGA_3D_CMD_FUTURE_MAX 2000
@@ -1031,9 +1067,9 @@ typedef enum {
typedef struct {
union {
struct {
- uint16 function; // SVGA3dFogFunction
- uint8 type; // SVGA3dFogType
- uint8 base; // SVGA3dFogBase
+ uint16 function; /* SVGA3dFogFunction */
+ uint8 type; /* SVGA3dFogType */
+ uint8 base; /* SVGA3dFogBase */
};
uint32 uintValue;
};
@@ -1109,6 +1145,8 @@ typedef enum {
SVGA3D_SURFACE_HINT_RENDERTARGET = (1 << 6),
SVGA3D_SURFACE_HINT_DEPTHSTENCIL = (1 << 7),
SVGA3D_SURFACE_HINT_WRITEONLY = (1 << 8),
+ SVGA3D_SURFACE_MASKABLE_ANTIALIAS = (1 << 9),
+ SVGA3D_SURFACE_AUTOGENMIPMAPS = (1 << 10),
} SVGA3dSurfaceFlags;
typedef
@@ -1121,6 +1159,12 @@ struct {
uint32 sid;
SVGA3dSurfaceFlags surfaceFlags;
SVGA3dSurfaceFormat format;
+ /*
+ * If surfaceFlags has SVGA3D_SURFACE_CUBEMAP bit set, all SVGA3dSurfaceFace
+ * structures must have the same value of numMipLevels field.
+ * Otherwise, all but the first SVGA3dSurfaceFace structures must have the
+ * numMipLevels set to 0.
+ */
SVGA3dSurfaceFace face[SVGA3D_MAX_SURFACE_FACES];
/*
* Followed by an SVGA3dSize structure for each mip level in each face.
@@ -1135,6 +1179,31 @@ struct {
typedef
struct {
+ uint32 sid;
+ SVGA3dSurfaceFlags surfaceFlags;
+ SVGA3dSurfaceFormat format;
+ /*
+ * If surfaceFlags has SVGA3D_SURFACE_CUBEMAP bit set, all SVGA3dSurfaceFace
+ * structures must have the same value of numMipLevels field.
+ * Otherwise, all but the first SVGA3dSurfaceFace structures must have the
+ * numMipLevels set to 0.
+ */
+ SVGA3dSurfaceFace face[SVGA3D_MAX_SURFACE_FACES];
+ uint32 multisampleCount;
+ SVGA3dTextureFilter autogenFilter;
+ /*
+ * Followed by an SVGA3dSize structure for each mip level in each face.
+ *
+ * A note on surface sizes: Sizes are always specified in pixels,
+ * even if the true surface size is not a multiple of the minimum
+ * block size of the surface's format. For example, a 3x3x1 DXT1
+ * compressed texture would actually be stored as a 4x4x1 image in
+ * memory.
+ */
+} SVGA3dCmdDefineSurface_v2; /* SVGA_3D_CMD_SURFACE_DEFINE_V2 */
+
+typedef
+struct {
uint32 sid;
} SVGA3dCmdDestroySurface; /* SVGA_3D_CMD_SURFACE_DESTROY */
@@ -1474,10 +1543,12 @@ struct {
* SVGA3dCmdDrawPrimitives structure. In order,
* they are:
*
- * 1. SVGA3dVertexDecl, quantity 'numVertexDecls'
- * 2. SVGA3dPrimitiveRange, quantity 'numRanges'
+ * 1. SVGA3dVertexDecl, quantity 'numVertexDecls', but no more than
+ * SVGA3D_MAX_VERTEX_ARRAYS;
+ * 2. SVGA3dPrimitiveRange, quantity 'numRanges', but no more than
+ * SVGA3D_MAX_DRAW_PRIMITIVE_RANGES;
* 3. Optionally, SVGA3dVertexDivisor, quantity 'numVertexDecls' (contains
- * the frequency divisor for this the corresponding vertex decl)
+ * the frequency divisor for the corresponding vertex decl).
*/
} SVGA3dCmdDrawPrimitives; /* SVGA_3D_CMD_DRAWPRIMITIVES */
@@ -1671,6 +1742,12 @@ struct {
/* Clipping: zero or more SVGASignedRects follow */
} SVGA3dCmdBlitSurfaceToScreen; /* SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN */
+typedef
+struct {
+ uint32 sid;
+ SVGA3dTextureFilter filter;
+} SVGA3dCmdGenerateMipmaps; /* SVGA_3D_CMD_GENERATE_MIPMAPS */
+
/*
* Capability query index.
@@ -1774,6 +1851,32 @@ typedef enum {
SVGA3D_DEVCAP_SURFACEFMT_A16B16G16R16 = 67,
SVGA3D_DEVCAP_SURFACEFMT_UYVY = 68,
SVGA3D_DEVCAP_SURFACEFMT_YUY2 = 69,
+ SVGA3D_DEVCAP_MULTISAMPLE_NONMASKABLESAMPLES = 70,
+ SVGA3D_DEVCAP_MULTISAMPLE_MASKABLESAMPLES = 71,
+ SVGA3D_DEVCAP_ALPHATOCOVERAGE = 72,
+ SVGA3D_DEVCAP_SUPERSAMPLE = 73,
+ SVGA3D_DEVCAP_AUTOGENMIPMAPS = 74,
+ SVGA3D_DEVCAP_SURFACEFMT_NV12 = 75,
+ SVGA3D_DEVCAP_SURFACEFMT_AYUV = 76,
+
+ /*
+ * This is the maximum number of SVGA context IDs that the guest
+ * can define using SVGA_3D_CMD_CONTEXT_DEFINE.
+ */
+ SVGA3D_DEVCAP_MAX_CONTEXT_IDS = 77,
+
+ /*
+ * This is the maximum number of SVGA surface IDs that the guest
+ * can define using SVGA_3D_CMD_SURFACE_DEFINE*.
+ */
+ SVGA3D_DEVCAP_MAX_SURFACE_IDS = 78,
+
+ SVGA3D_DEVCAP_SURFACEFMT_Z_DF16 = 79,
+ SVGA3D_DEVCAP_SURFACEFMT_Z_DF24 = 80,
+ SVGA3D_DEVCAP_SURFACEFMT_Z_D24S8_INT = 81,
+
+ SVGA3D_DEVCAP_SURFACEFMT_BC4_UNORM = 82,
+ SVGA3D_DEVCAP_SURFACEFMT_BC5_UNORM = 83,
/*
* Don't add new caps into the previous section; the values in this
diff --git a/drivers/gpu/drm/vmwgfx/svga_escape.h b/drivers/gpu/drm/vmwgfx/svga_escape.h
index 7b85e9b8c85..8e8d9682e01 100644
--- a/drivers/gpu/drm/vmwgfx/svga_escape.h
+++ b/drivers/gpu/drm/vmwgfx/svga_escape.h
@@ -75,7 +75,7 @@
*/
#define SVGA_ESCAPE_VMWARE_HINT 0x00030000
-#define SVGA_ESCAPE_VMWARE_HINT_FULLSCREEN 0x00030001 // Deprecated
+#define SVGA_ESCAPE_VMWARE_HINT_FULLSCREEN 0x00030001 /* Deprecated */
typedef
struct {
diff --git a/drivers/gpu/drm/vmwgfx/svga_overlay.h b/drivers/gpu/drm/vmwgfx/svga_overlay.h
index f753d73c14b..f38416fcb04 100644
--- a/drivers/gpu/drm/vmwgfx/svga_overlay.h
+++ b/drivers/gpu/drm/vmwgfx/svga_overlay.h
@@ -38,9 +38,9 @@
* Video formats we support
*/
-#define VMWARE_FOURCC_YV12 0x32315659 // 'Y' 'V' '1' '2'
-#define VMWARE_FOURCC_YUY2 0x32595559 // 'Y' 'U' 'Y' '2'
-#define VMWARE_FOURCC_UYVY 0x59565955 // 'U' 'Y' 'V' 'Y'
+#define VMWARE_FOURCC_YV12 0x32315659 /* 'Y' 'V' '1' '2' */
+#define VMWARE_FOURCC_YUY2 0x32595559 /* 'Y' 'U' 'Y' '2' */
+#define VMWARE_FOURCC_UYVY 0x59565955 /* 'U' 'Y' 'V' 'Y' */
typedef enum {
SVGA_OVERLAY_FORMAT_INVALID = 0,
@@ -68,7 +68,7 @@ struct SVGAEscapeVideoSetRegs {
uint32 streamId;
} header;
- // May include zero or more items.
+ /* May include zero or more items. */
struct {
uint32 registerId;
uint32 value;
@@ -134,12 +134,12 @@ struct {
*/
static inline bool
-VMwareVideoGetAttributes(const SVGAOverlayFormat format, // IN
- uint32 *width, // IN / OUT
- uint32 *height, // IN / OUT
- uint32 *size, // OUT
- uint32 *pitches, // OUT (optional)
- uint32 *offsets) // OUT (optional)
+VMwareVideoGetAttributes(const SVGAOverlayFormat format, /* IN */
+ uint32 *width, /* IN / OUT */
+ uint32 *height, /* IN / OUT */
+ uint32 *size, /* OUT */
+ uint32 *pitches, /* OUT (optional) */
+ uint32 *offsets) /* OUT (optional) */
{
int tmp;
@@ -198,4 +198,4 @@ VMwareVideoGetAttributes(const SVGAOverlayFormat format, // IN
return true;
}
-#endif // _SVGA_OVERLAY_H_
+#endif /* _SVGA_OVERLAY_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/svga_reg.h b/drivers/gpu/drm/vmwgfx/svga_reg.h
index 1b96c2ec07d..01f63cb4967 100644
--- a/drivers/gpu/drm/vmwgfx/svga_reg.h
+++ b/drivers/gpu/drm/vmwgfx/svga_reg.h
@@ -39,6 +39,15 @@
#define PCI_DEVICE_ID_VMWARE_SVGA2 0x0405
/*
+ * SVGA_REG_ENABLE bit definitions.
+ */
+#define SVGA_REG_ENABLE_DISABLE 0
+#define SVGA_REG_ENABLE_ENABLE 1
+#define SVGA_REG_ENABLE_HIDE 2
+#define SVGA_REG_ENABLE_ENABLE_HIDE (SVGA_REG_ENABLE_ENABLE |\
+ SVGA_REG_ENABLE_HIDE)
+
+/*
* Legal values for the SVGA_REG_CURSOR_ON register in old-fashioned
* cursor bypass mode. This is still supported, but no new guest
* drivers should use it.
@@ -158,7 +167,9 @@ enum {
SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH = 44,
SVGA_REG_TRACES = 45, /* Enable trace-based updates even when FIFO is on */
- SVGA_REG_TOP = 46, /* Must be 1 more than the last register */
+ SVGA_REG_GMRS_MAX_PAGES = 46, /* Maximum number of 4KB pages for all GMRs */
+ SVGA_REG_MEMORY_SIZE = 47, /* Total dedicated device memory excluding FIFO */
+ SVGA_REG_TOP = 48, /* Must be 1 more than the last register */
SVGA_PALETTE_BASE = 1024, /* Base of SVGA color map */
/* Next 768 (== 256*3) registers exist for colormap */
@@ -265,7 +276,7 @@ enum {
* possible.
*/
#define SVGA_GMR_NULL ((uint32) -1)
-#define SVGA_GMR_FRAMEBUFFER ((uint32) -2) // Guest Framebuffer (GFB)
+#define SVGA_GMR_FRAMEBUFFER ((uint32) -2) /* Guest Framebuffer (GFB) */
typedef
struct SVGAGuestMemDescriptor {
@@ -306,13 +317,35 @@ struct SVGAGMRImageFormat {
struct {
uint32 bitsPerPixel : 8;
uint32 colorDepth : 8;
- uint32 reserved : 16; // Must be zero
+ uint32 reserved : 16; /* Must be zero */
};
uint32 value;
};
} SVGAGMRImageFormat;
+typedef
+struct SVGAGuestImage {
+ SVGAGuestPtr ptr;
+
+ /*
+ * A note on interpretation of pitch: This value of pitch is the
+ * number of bytes between vertically adjacent image
+ * blocks. Normally this is the number of bytes between the first
+ * pixel of two adjacent scanlines. With compressed textures,
+ * however, this may represent the number of bytes between
+ * compression blocks rather than between rows of pixels.
+ *
+ * XXX: Compressed textures currently must be tightly packed in guest memory.
+ *
+ * If the image is 1-dimensional, pitch is ignored.
+ *
+ * If 'pitch' is zero, the SVGA3D device calculates a pitch value
+ * assuming each row of blocks is tightly packed.
+ */
+ uint32 pitch;
+} SVGAGuestImage;
+
/*
* SVGAColorBGRX --
*
@@ -328,7 +361,7 @@ struct SVGAColorBGRX {
uint32 b : 8;
uint32 g : 8;
uint32 r : 8;
- uint32 x : 8; // Unused
+ uint32 x : 8; /* Unused */
};
uint32 value;
@@ -370,23 +403,34 @@ struct SVGASignedPoint {
* Note the holes in the bitfield. Missing bits have been deprecated,
* and must not be reused. Those capabilities will never be reported
* by new versions of the SVGA device.
+ *
+ * SVGA_CAP_GMR2 --
+ * Provides asynchronous commands to define and remap guest memory
+ * regions. Adds device registers SVGA_REG_GMRS_MAX_PAGES and
+ * SVGA_REG_MEMORY_SIZE.
+ *
+ * SVGA_CAP_SCREEN_OBJECT_2 --
+ * Allow screen object support, and require backing stores from the
+ * guest for each screen object.
*/
#define SVGA_CAP_NONE 0x00000000
#define SVGA_CAP_RECT_COPY 0x00000002
#define SVGA_CAP_CURSOR 0x00000020
-#define SVGA_CAP_CURSOR_BYPASS 0x00000040 // Legacy (Use Cursor Bypass 3 instead)
-#define SVGA_CAP_CURSOR_BYPASS_2 0x00000080 // Legacy (Use Cursor Bypass 3 instead)
+#define SVGA_CAP_CURSOR_BYPASS 0x00000040 /* Legacy (Use Cursor Bypass 3 instead) */
+#define SVGA_CAP_CURSOR_BYPASS_2 0x00000080 /* Legacy (Use Cursor Bypass 3 instead) */
#define SVGA_CAP_8BIT_EMULATION 0x00000100
#define SVGA_CAP_ALPHA_CURSOR 0x00000200
#define SVGA_CAP_3D 0x00004000
#define SVGA_CAP_EXTENDED_FIFO 0x00008000
-#define SVGA_CAP_MULTIMON 0x00010000 // Legacy multi-monitor support
+#define SVGA_CAP_MULTIMON 0x00010000 /* Legacy multi-monitor support */
#define SVGA_CAP_PITCHLOCK 0x00020000
#define SVGA_CAP_IRQMASK 0x00040000
-#define SVGA_CAP_DISPLAY_TOPOLOGY 0x00080000 // Legacy multi-monitor support
+#define SVGA_CAP_DISPLAY_TOPOLOGY 0x00080000 /* Legacy multi-monitor support */
#define SVGA_CAP_GMR 0x00100000
#define SVGA_CAP_TRACES 0x00200000
+#define SVGA_CAP_GMR2 0x00400000
+#define SVGA_CAP_SCREEN_OBJECT_2 0x00800000
/*
@@ -431,7 +475,7 @@ enum {
SVGA_FIFO_CAPABILITIES = 4,
SVGA_FIFO_FLAGS,
- // Valid with SVGA_FIFO_CAP_FENCE:
+ /* Valid with SVGA_FIFO_CAP_FENCE: */
SVGA_FIFO_FENCE,
/*
@@ -444,33 +488,47 @@ enum {
* extended FIFO.
*/
- // Valid if exists (i.e. if extended FIFO enabled):
+ /* Valid if exists (i.e. if extended FIFO enabled): */
SVGA_FIFO_3D_HWVERSION, /* See SVGA3dHardwareVersion in svga3d_reg.h */
- // Valid with SVGA_FIFO_CAP_PITCHLOCK:
+ /* Valid with SVGA_FIFO_CAP_PITCHLOCK: */
SVGA_FIFO_PITCHLOCK,
- // Valid with SVGA_FIFO_CAP_CURSOR_BYPASS_3:
+ /* Valid with SVGA_FIFO_CAP_CURSOR_BYPASS_3: */
SVGA_FIFO_CURSOR_ON, /* Cursor bypass 3 show/hide register */
SVGA_FIFO_CURSOR_X, /* Cursor bypass 3 x register */
SVGA_FIFO_CURSOR_Y, /* Cursor bypass 3 y register */
SVGA_FIFO_CURSOR_COUNT, /* Incremented when any of the other 3 change */
SVGA_FIFO_CURSOR_LAST_UPDATED,/* Last time the host updated the cursor */
- // Valid with SVGA_FIFO_CAP_RESERVE:
+ /* Valid with SVGA_FIFO_CAP_RESERVE: */
SVGA_FIFO_RESERVED, /* Bytes past NEXT_CMD with real contents */
/*
- * Valid with SVGA_FIFO_CAP_SCREEN_OBJECT:
+ * Valid with SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2:
*
* By default this is SVGA_ID_INVALID, to indicate that the cursor
* coordinates are specified relative to the virtual root. If this
* is set to a specific screen ID, cursor position is reinterpreted
- * as a signed offset relative to that screen's origin. This is the
- * only way to place the cursor on a non-rooted screen.
+ * as a signed offset relative to that screen's origin.
*/
SVGA_FIFO_CURSOR_SCREEN_ID,
/*
+ * Valid with SVGA_FIFO_CAP_DEAD
+ *
+ * An arbitrary value written by the host, drivers should not use it.
+ */
+ SVGA_FIFO_DEAD,
+
+ /*
+ * Valid with SVGA_FIFO_CAP_3D_HWVERSION_REVISED:
+ *
+ * Contains 3D HWVERSION (see SVGA3dHardwareVersion in svga3d_reg.h)
+ * on platforms that can enforce graphics resource limits.
+ */
+ SVGA_FIFO_3D_HWVERSION_REVISED,
+
+ /*
* XXX: The gap here, up until SVGA_FIFO_3D_CAPS, can be used for new
* registers, but this must be done carefully and with judicious use of
* capability bits, since comparisons based on SVGA_FIFO_MIN aren't
@@ -508,7 +566,7 @@ enum {
* sets SVGA_FIFO_MIN high enough to leave room for them.
*/
- // Valid if register exists:
+ /* Valid if register exists: */
SVGA_FIFO_GUEST_3D_HWVERSION, /* Guest driver's 3D version */
SVGA_FIFO_FENCE_GOAL, /* Matching target for SVGA_IRQFLAG_FENCE_GOAL */
SVGA_FIFO_BUSY, /* See "FIFO Synchronization Registers" */
@@ -709,6 +767,37 @@ enum {
*
* - When a screen is resized, either using Screen Object commands or
* legacy multimon registers, its contents are preserved.
+ *
+ * SVGA_FIFO_CAP_GMR2 --
+ *
+ * Provides new commands to define and remap guest memory regions (GMR).
+ *
+ * New 2D commands:
+ * DEFINE_GMR2, REMAP_GMR2.
+ *
+ * SVGA_FIFO_CAP_3D_HWVERSION_REVISED --
+ *
+ * Indicates new register SVGA_FIFO_3D_HWVERSION_REVISED exists.
+ * This register may replace SVGA_FIFO_3D_HWVERSION on platforms
+ * that enforce graphics resource limits. This allows the platform
+ * to clear SVGA_FIFO_3D_HWVERSION and disable 3D in legacy guest
+ * drivers that do not limit their resources.
+ *
+ * Note this is an alias to SVGA_FIFO_CAP_GMR2 because these indicators
+ * are codependent (and thus we use a single capability bit).
+ *
+ * SVGA_FIFO_CAP_SCREEN_OBJECT_2 --
+ *
+ * Modifies the DEFINE_SCREEN command to include a guest provided
+ * backing store in GMR memory and the bytesPerLine for the backing
+ * store. This capability requires the use of a backing store when
+ * creating screen objects. However if SVGA_FIFO_CAP_SCREEN_OBJECT
+ * is present then backing stores are optional.
+ *
+ * SVGA_FIFO_CAP_DEAD --
+ *
+ * Drivers should not use this cap bit. This cap bit can not be
+ * reused since some hosts already expose it.
*/
#define SVGA_FIFO_CAP_NONE 0
@@ -720,6 +809,10 @@ enum {
#define SVGA_FIFO_CAP_ESCAPE (1<<5)
#define SVGA_FIFO_CAP_RESERVE (1<<6)
#define SVGA_FIFO_CAP_SCREEN_OBJECT (1<<7)
+#define SVGA_FIFO_CAP_GMR2 (1<<8)
+#define SVGA_FIFO_CAP_3D_HWVERSION_REVISED SVGA_FIFO_CAP_GMR2
+#define SVGA_FIFO_CAP_SCREEN_OBJECT_2 (1<<9)
+#define SVGA_FIFO_CAP_DEAD (1<<10)
/*
@@ -730,7 +823,7 @@ enum {
#define SVGA_FIFO_FLAG_NONE 0
#define SVGA_FIFO_FLAG_ACCELFRONT (1<<0)
-#define SVGA_FIFO_FLAG_RESERVED (1<<31) // Internal use only
+#define SVGA_FIFO_FLAG_RESERVED (1<<31) /* Internal use only */
/*
* FIFO reservation sentinel value
@@ -763,22 +856,22 @@ enum {
SVGA_VIDEO_DATA_OFFSET,
SVGA_VIDEO_FORMAT,
SVGA_VIDEO_COLORKEY,
- SVGA_VIDEO_SIZE, // Deprecated
+ SVGA_VIDEO_SIZE, /* Deprecated */
SVGA_VIDEO_WIDTH,
SVGA_VIDEO_HEIGHT,
SVGA_VIDEO_SRC_X,
SVGA_VIDEO_SRC_Y,
SVGA_VIDEO_SRC_WIDTH,
SVGA_VIDEO_SRC_HEIGHT,
- SVGA_VIDEO_DST_X, // Signed int32
- SVGA_VIDEO_DST_Y, // Signed int32
+ SVGA_VIDEO_DST_X, /* Signed int32 */
+ SVGA_VIDEO_DST_Y, /* Signed int32 */
SVGA_VIDEO_DST_WIDTH,
SVGA_VIDEO_DST_HEIGHT,
SVGA_VIDEO_PITCH_1,
SVGA_VIDEO_PITCH_2,
SVGA_VIDEO_PITCH_3,
- SVGA_VIDEO_DATA_GMRID, // Optional, defaults to SVGA_GMR_FRAMEBUFFER
- SVGA_VIDEO_DST_SCREEN_ID, // Optional, defaults to virtual coords (SVGA_ID_INVALID)
+ SVGA_VIDEO_DATA_GMRID, /* Optional, defaults to SVGA_GMR_FRAMEBUFFER */
+ SVGA_VIDEO_DST_SCREEN_ID, /* Optional, defaults to virtual coords (SVGA_ID_INVALID) */
SVGA_VIDEO_NUM_REGS
};
@@ -829,15 +922,51 @@ typedef struct SVGAOverlayUnit {
* compatibility. New flags can be added, and the struct may grow,
* but existing fields must retain their meaning.
*
+ * Added with SVGA_FIFO_CAP_SCREEN_OBJECT_2 are required fields of
+ * a SVGAGuestPtr that is used to back the screen contents. This
+ * memory must come from the GFB. The guest is not allowed to
+ * access the memory and doing so will have undefined results. The
+ * backing store is required to be page aligned and the size is
+ * padded to the next page boundry. The number of pages is:
+ * (bytesPerLine * size.width * 4 + PAGE_SIZE - 1) / PAGE_SIZE
+ *
+ * The pitch in the backingStore is required to be at least large
+ * enough to hold a 32bbp scanline. It is recommended that the
+ * driver pad bytesPerLine for a potential performance win.
+ *
+ * The cloneCount field is treated as a hint from the guest that
+ * the user wants this display to be cloned, countCount times. A
+ * value of zero means no cloning should happen.
+ */
+
+#define SVGA_SCREEN_MUST_BE_SET (1 << 0) /* Must be set or results undefined */
+#define SVGA_SCREEN_HAS_ROOT SVGA_SCREEN_MUST_BE_SET /* Deprecated */
+#define SVGA_SCREEN_IS_PRIMARY (1 << 1) /* Guest considers this screen to be 'primary' */
+#define SVGA_SCREEN_FULLSCREEN_HINT (1 << 2) /* Guest is running a fullscreen app here */
+
+/*
+ * Added with SVGA_FIFO_CAP_SCREEN_OBJECT_2. When the screen is
+ * deactivated the base layer is defined to lose all contents and
+ * become black. When a screen is deactivated the backing store is
+ * optional. When set backingPtr and bytesPerLine will be ignored.
*/
+#define SVGA_SCREEN_DEACTIVATE (1 << 3)
-#define SVGA_SCREEN_HAS_ROOT (1 << 0) // Screen is present in the virtual coord space
-#define SVGA_SCREEN_IS_PRIMARY (1 << 1) // Guest considers this screen to be 'primary'
-#define SVGA_SCREEN_FULLSCREEN_HINT (1 << 2) // Guest is running a fullscreen app here
+/*
+ * Added with SVGA_FIFO_CAP_SCREEN_OBJECT_2. When this flag is set
+ * the screen contents will be outputted as all black to the user
+ * though the base layer contents is preserved. The screen base layer
+ * can still be read and written to like normal though the no visible
+ * effect will be seen by the user. When the flag is changed the
+ * screen will be blanked or redrawn to the current contents as needed
+ * without any extra commands from the driver. This flag only has an
+ * effect when the screen is not deactivated.
+ */
+#define SVGA_SCREEN_BLANKING (1 << 4)
typedef
struct SVGAScreenObject {
- uint32 structSize; // sizeof(SVGAScreenObject)
+ uint32 structSize; /* sizeof(SVGAScreenObject) */
uint32 id;
uint32 flags;
struct {
@@ -847,7 +976,14 @@ struct SVGAScreenObject {
struct {
int32 x;
int32 y;
- } root; // Only used if SVGA_SCREEN_HAS_ROOT is set.
+ } root;
+
+ /*
+ * Added and required by SVGA_FIFO_CAP_SCREEN_OBJECT_2, optional
+ * with SVGA_FIFO_CAP_SCREEN_OBJECT.
+ */
+ SVGAGuestImage backingStore;
+ uint32 cloneCount;
} SVGAScreenObject;
@@ -885,6 +1021,8 @@ typedef enum {
SVGA_CMD_BLIT_SCREEN_TO_GMRFB = 38,
SVGA_CMD_ANNOTATION_FILL = 39,
SVGA_CMD_ANNOTATION_COPY = 40,
+ SVGA_CMD_DEFINE_GMR2 = 41,
+ SVGA_CMD_REMAP_GMR2 = 42,
SVGA_CMD_MAX
} SVGAFifoCmdId;
@@ -920,7 +1058,7 @@ typedef enum {
*/
typedef
-struct {
+struct SVGAFifoCmdUpdate {
uint32 x;
uint32 y;
uint32 width;
@@ -939,7 +1077,7 @@ struct {
*/
typedef
-struct {
+struct SVGAFifoCmdRectCopy {
uint32 srcX;
uint32 srcY;
uint32 destX;
@@ -963,14 +1101,14 @@ struct {
*/
typedef
-struct {
- uint32 id; // Reserved, must be zero.
+struct SVGAFifoCmdDefineCursor {
+ uint32 id; /* Reserved, must be zero. */
uint32 hotspotX;
uint32 hotspotY;
uint32 width;
uint32 height;
- uint32 andMaskDepth; // Value must be 1 or equal to BITS_PER_PIXEL
- uint32 xorMaskDepth; // Value must be 1 or equal to BITS_PER_PIXEL
+ uint32 andMaskDepth; /* Value must be 1 or equal to BITS_PER_PIXEL */
+ uint32 xorMaskDepth; /* Value must be 1 or equal to BITS_PER_PIXEL */
/*
* Followed by scanline data for AND mask, then XOR mask.
* Each scanline is padded to a 32-bit boundary.
@@ -992,8 +1130,8 @@ struct {
*/
typedef
-struct {
- uint32 id; // Reserved, must be zero.
+struct SVGAFifoCmdDefineAlphaCursor {
+ uint32 id; /* Reserved, must be zero. */
uint32 hotspotX;
uint32 hotspotY;
uint32 width;
@@ -1015,7 +1153,7 @@ struct {
*/
typedef
-struct {
+struct SVGAFifoCmdUpdateVerbose {
uint32 x;
uint32 y;
uint32 width;
@@ -1040,13 +1178,13 @@ struct {
#define SVGA_ROP_COPY 0x03
typedef
-struct {
- uint32 color; // In the same format as the GFB
+struct SVGAFifoCmdFrontRopFill {
+ uint32 color; /* In the same format as the GFB */
uint32 x;
uint32 y;
uint32 width;
uint32 height;
- uint32 rop; // Must be SVGA_ROP_COPY
+ uint32 rop; /* Must be SVGA_ROP_COPY */
} SVGAFifoCmdFrontRopFill;
@@ -1083,7 +1221,7 @@ struct {
*/
typedef
-struct {
+struct SVGAFifoCmdEscape {
uint32 nsid;
uint32 size;
/* followed by 'size' bytes of data */
@@ -1113,12 +1251,12 @@ struct {
* registers (SVGA_REG_NUM_GUEST_DISPLAYS, SVGA_REG_DISPLAY_*).
*
* Availability:
- * SVGA_FIFO_CAP_SCREEN_OBJECT
+ * SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
*/
typedef
struct {
- SVGAScreenObject screen; // Variable-length according to version
+ SVGAScreenObject screen; /* Variable-length according to version */
} SVGAFifoCmdDefineScreen;
@@ -1129,7 +1267,7 @@ struct {
* re-use.
*
* Availability:
- * SVGA_FIFO_CAP_SCREEN_OBJECT
+ * SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
*/
typedef
@@ -1182,7 +1320,7 @@ struct {
* GMRFB.
*
* Availability:
- * SVGA_FIFO_CAP_SCREEN_OBJECT
+ * SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
*/
typedef
@@ -1219,7 +1357,7 @@ struct {
* SVGA_CMD_ANNOTATION_* commands for details.
*
* Availability:
- * SVGA_FIFO_CAP_SCREEN_OBJECT
+ * SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
*/
typedef
@@ -1267,7 +1405,7 @@ struct {
* the time any subsequent FENCE commands are reached.
*
* Availability:
- * SVGA_FIFO_CAP_SCREEN_OBJECT
+ * SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
*/
typedef
@@ -1302,7 +1440,7 @@ struct {
* user's display is being remoted over a network connection.
*
* Availability:
- * SVGA_FIFO_CAP_SCREEN_OBJECT
+ * SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
*/
typedef
@@ -1334,7 +1472,7 @@ struct {
* undefined.
*
* Availability:
- * SVGA_FIFO_CAP_SCREEN_OBJECT
+ * SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
*/
typedef
@@ -1343,4 +1481,72 @@ struct {
uint32 srcScreenId;
} SVGAFifoCmdAnnotationCopy;
+
+/*
+ * SVGA_CMD_DEFINE_GMR2 --
+ *
+ * Define guest memory region v2. See the description of GMRs above.
+ *
+ * Availability:
+ * SVGA_CAP_GMR2
+ */
+
+typedef
+struct {
+ uint32 gmrId;
+ uint32 numPages;
+} SVGAFifoCmdDefineGMR2;
+
+
+/*
+ * SVGA_CMD_REMAP_GMR2 --
+ *
+ * Remap guest memory region v2. See the description of GMRs above.
+ *
+ * This command allows guest to modify a portion of an existing GMR by
+ * invalidating it or reassigning it to different guest physical pages.
+ * The pages are identified by physical page number (PPN). The pages
+ * are assumed to be pinned and valid for DMA operations.
+ *
+ * Description of command flags:
+ *
+ * SVGA_REMAP_GMR2_VIA_GMR: If enabled, references a PPN list in a GMR.
+ * The PPN list must not overlap with the remap region (this can be
+ * handled trivially by referencing a separate GMR). If flag is
+ * disabled, PPN list is appended to SVGARemapGMR command.
+ *
+ * SVGA_REMAP_GMR2_PPN64: If set, PPN list is in PPN64 format, otherwise
+ * it is in PPN32 format.
+ *
+ * SVGA_REMAP_GMR2_SINGLE_PPN: If set, PPN list contains a single entry.
+ * A single PPN can be used to invalidate a portion of a GMR or
+ * map it to to a single guest scratch page.
+ *
+ * Availability:
+ * SVGA_CAP_GMR2
+ */
+
+typedef enum {
+ SVGA_REMAP_GMR2_PPN32 = 0,
+ SVGA_REMAP_GMR2_VIA_GMR = (1 << 0),
+ SVGA_REMAP_GMR2_PPN64 = (1 << 1),
+ SVGA_REMAP_GMR2_SINGLE_PPN = (1 << 2),
+} SVGARemapGMR2Flags;
+
+typedef
+struct {
+ uint32 gmrId;
+ SVGARemapGMR2Flags flags;
+ uint32 offsetPages; /* offset in pages to begin remap */
+ uint32 numPages; /* number of pages to remap */
+ /*
+ * Followed by additional data depending on SVGARemapGMR2Flags.
+ *
+ * If flag SVGA_REMAP_GMR2_VIA_GMR is set, single SVGAGuestPtr follows.
+ * Otherwise an array of page descriptors in PPN32 or PPN64 format
+ * (according to flag SVGA_REMAP_GMR2_PPN64) follows. If flag
+ * SVGA_REMAP_GMR2_SINGLE_PPN is set, array contains a single entry.
+ */
+} SVGAFifoCmdRemapGMR2;
+
#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index 87e43e0733b..5a72ed90823 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -42,6 +42,10 @@ static uint32_t sys_placement_flags = TTM_PL_FLAG_SYSTEM |
static uint32_t gmr_placement_flags = VMW_PL_FLAG_GMR |
TTM_PL_FLAG_CACHED;
+static uint32_t gmr_ne_placement_flags = VMW_PL_FLAG_GMR |
+ TTM_PL_FLAG_CACHED |
+ TTM_PL_FLAG_NO_EVICT;
+
struct ttm_placement vmw_vram_placement = {
.fpfn = 0,
.lpfn = 0,
@@ -56,6 +60,11 @@ static uint32_t vram_gmr_placement_flags[] = {
VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
};
+static uint32_t gmr_vram_placement_flags[] = {
+ VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED,
+ TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
+};
+
struct ttm_placement vmw_vram_gmr_placement = {
.fpfn = 0,
.lpfn = 0,
@@ -65,6 +74,20 @@ struct ttm_placement vmw_vram_gmr_placement = {
.busy_placement = &gmr_placement_flags
};
+static uint32_t vram_gmr_ne_placement_flags[] = {
+ TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT,
+ VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
+};
+
+struct ttm_placement vmw_vram_gmr_ne_placement = {
+ .fpfn = 0,
+ .lpfn = 0,
+ .num_placement = 2,
+ .placement = vram_gmr_ne_placement_flags,
+ .num_busy_placement = 1,
+ .busy_placement = &gmr_ne_placement_flags
+};
+
struct ttm_placement vmw_vram_sys_placement = {
.fpfn = 0,
.lpfn = 0,
@@ -92,6 +115,30 @@ struct ttm_placement vmw_sys_placement = {
.busy_placement = &sys_placement_flags
};
+static uint32_t evictable_placement_flags[] = {
+ TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED,
+ TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED,
+ VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
+};
+
+struct ttm_placement vmw_evictable_placement = {
+ .fpfn = 0,
+ .lpfn = 0,
+ .num_placement = 3,
+ .placement = evictable_placement_flags,
+ .num_busy_placement = 1,
+ .busy_placement = &sys_placement_flags
+};
+
+struct ttm_placement vmw_srf_placement = {
+ .fpfn = 0,
+ .lpfn = 0,
+ .num_placement = 1,
+ .num_busy_placement = 2,
+ .placement = &gmr_placement_flags,
+ .busy_placement = gmr_vram_placement_flags
+};
+
struct vmw_ttm_backend {
struct ttm_backend backend;
struct page **pages;
@@ -274,39 +321,39 @@ static int vmw_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
static void *vmw_sync_obj_ref(void *sync_obj)
{
- return sync_obj;
+
+ return (void *)
+ vmw_fence_obj_reference((struct vmw_fence_obj *) sync_obj);
}
static void vmw_sync_obj_unref(void **sync_obj)
{
- *sync_obj = NULL;
+ vmw_fence_obj_unreference((struct vmw_fence_obj **) sync_obj);
}
static int vmw_sync_obj_flush(void *sync_obj, void *sync_arg)
{
- struct vmw_private *dev_priv = (struct vmw_private *)sync_arg;
-
- mutex_lock(&dev_priv->hw_mutex);
- vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
- mutex_unlock(&dev_priv->hw_mutex);
+ vmw_fence_obj_flush((struct vmw_fence_obj *) sync_obj);
return 0;
}
static bool vmw_sync_obj_signaled(void *sync_obj, void *sync_arg)
{
- struct vmw_private *dev_priv = (struct vmw_private *)sync_arg;
- uint32_t sequence = (unsigned long) sync_obj;
+ unsigned long flags = (unsigned long) sync_arg;
+ return vmw_fence_obj_signaled((struct vmw_fence_obj *) sync_obj,
+ (uint32_t) flags);
- return vmw_fence_signaled(dev_priv, sequence);
}
static int vmw_sync_obj_wait(void *sync_obj, void *sync_arg,
bool lazy, bool interruptible)
{
- struct vmw_private *dev_priv = (struct vmw_private *)sync_arg;
- uint32_t sequence = (unsigned long) sync_obj;
+ unsigned long flags = (unsigned long) sync_arg;
- return vmw_wait_fence(dev_priv, false, sequence, false, 3*HZ);
+ return vmw_fence_obj_wait((struct vmw_fence_obj *) sync_obj,
+ (uint32_t) flags,
+ lazy, interruptible,
+ VMW_FENCE_WAIT_TIMEOUT);
}
struct ttm_bo_driver vmw_bo_driver = {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
new file mode 100644
index 00000000000..3fa884db08a
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
@@ -0,0 +1,322 @@
+/**************************************************************************
+ *
+ * Copyright © 2011 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "ttm/ttm_placement.h"
+
+#include "drmP.h"
+#include "vmwgfx_drv.h"
+
+
+/**
+ * vmw_dmabuf_to_placement - Validate a buffer to placement.
+ *
+ * @dev_priv: Driver private.
+ * @buf: DMA buffer to move.
+ * @pin: Pin buffer if true.
+ * @interruptible: Use interruptible wait.
+ *
+ * May only be called by the current master since it assumes that the
+ * master lock is the current master's lock.
+ * This function takes the master's lock in write mode.
+ * Flushes and unpins the query bo to avoid failures.
+ *
+ * Returns
+ * -ERESTARTSYS if interrupted by a signal.
+ */
+int vmw_dmabuf_to_placement(struct vmw_private *dev_priv,
+ struct vmw_dma_buffer *buf,
+ struct ttm_placement *placement,
+ bool interruptible)
+{
+ struct vmw_master *vmaster = dev_priv->active_master;
+ struct ttm_buffer_object *bo = &buf->base;
+ int ret;
+
+ ret = ttm_write_lock(&vmaster->lock, interruptible);
+ if (unlikely(ret != 0))
+ return ret;
+
+ vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
+
+ ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
+ if (unlikely(ret != 0))
+ goto err;
+
+ ret = ttm_bo_validate(bo, placement, interruptible, false, false);
+
+ ttm_bo_unreserve(bo);
+
+err:
+ ttm_write_unlock(&vmaster->lock);
+ return ret;
+}
+
+/**
+ * vmw_dmabuf_to_vram_or_gmr - Move a buffer to vram or gmr.
+ *
+ * May only be called by the current master since it assumes that the
+ * master lock is the current master's lock.
+ * This function takes the master's lock in write mode.
+ * Flushes and unpins the query bo if @pin == true to avoid failures.
+ *
+ * @dev_priv: Driver private.
+ * @buf: DMA buffer to move.
+ * @pin: Pin buffer if true.
+ * @interruptible: Use interruptible wait.
+ *
+ * Returns
+ * -ERESTARTSYS if interrupted by a signal.
+ */
+int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
+ struct vmw_dma_buffer *buf,
+ bool pin, bool interruptible)
+{
+ struct vmw_master *vmaster = dev_priv->active_master;
+ struct ttm_buffer_object *bo = &buf->base;
+ struct ttm_placement *placement;
+ int ret;
+
+ ret = ttm_write_lock(&vmaster->lock, interruptible);
+ if (unlikely(ret != 0))
+ return ret;
+
+ if (pin)
+ vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
+
+ ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
+ if (unlikely(ret != 0))
+ goto err;
+
+ /**
+ * Put BO in VRAM if there is space, otherwise as a GMR.
+ * If there is no space in VRAM and GMR ids are all used up,
+ * start evicting GMRs to make room. If the DMA buffer can't be
+ * used as a GMR, this will return -ENOMEM.
+ */
+
+ if (pin)
+ placement = &vmw_vram_gmr_ne_placement;
+ else
+ placement = &vmw_vram_gmr_placement;
+
+ ret = ttm_bo_validate(bo, placement, interruptible, false, false);
+ if (likely(ret == 0) || ret == -ERESTARTSYS)
+ goto err_unreserve;
+
+
+ /**
+ * If that failed, try VRAM again, this time evicting
+ * previous contents.
+ */
+
+ if (pin)
+ placement = &vmw_vram_ne_placement;
+ else
+ placement = &vmw_vram_placement;
+
+ ret = ttm_bo_validate(bo, placement, interruptible, false, false);
+
+err_unreserve:
+ ttm_bo_unreserve(bo);
+err:
+ ttm_write_unlock(&vmaster->lock);
+ return ret;
+}
+
+/**
+ * vmw_dmabuf_to_vram - Move a buffer to vram.
+ *
+ * May only be called by the current master since it assumes that the
+ * master lock is the current master's lock.
+ * This function takes the master's lock in write mode.
+ *
+ * @dev_priv: Driver private.
+ * @buf: DMA buffer to move.
+ * @pin: Pin buffer in vram if true.
+ * @interruptible: Use interruptible wait.
+ *
+ * Returns
+ * -ERESTARTSYS if interrupted by a signal.
+ */
+int vmw_dmabuf_to_vram(struct vmw_private *dev_priv,
+ struct vmw_dma_buffer *buf,
+ bool pin, bool interruptible)
+{
+ struct ttm_placement *placement;
+
+ if (pin)
+ placement = &vmw_vram_ne_placement;
+ else
+ placement = &vmw_vram_placement;
+
+ return vmw_dmabuf_to_placement(dev_priv, buf,
+ placement,
+ interruptible);
+}
+
+/**
+ * vmw_dmabuf_to_start_of_vram - Move a buffer to start of vram.
+ *
+ * May only be called by the current master since it assumes that the
+ * master lock is the current master's lock.
+ * This function takes the master's lock in write mode.
+ * Flushes and unpins the query bo if @pin == true to avoid failures.
+ *
+ * @dev_priv: Driver private.
+ * @buf: DMA buffer to move.
+ * @pin: Pin buffer in vram if true.
+ * @interruptible: Use interruptible wait.
+ *
+ * Returns
+ * -ERESTARTSYS if interrupted by a signal.
+ */
+int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv,
+ struct vmw_dma_buffer *buf,
+ bool pin, bool interruptible)
+{
+ struct vmw_master *vmaster = dev_priv->active_master;
+ struct ttm_buffer_object *bo = &buf->base;
+ struct ttm_placement placement;
+ int ret = 0;
+
+ if (pin)
+ placement = vmw_vram_ne_placement;
+ else
+ placement = vmw_vram_placement;
+ placement.lpfn = bo->num_pages;
+
+ ret = ttm_write_lock(&vmaster->lock, interruptible);
+ if (unlikely(ret != 0))
+ return ret;
+
+ if (pin)
+ vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
+
+ ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
+ if (unlikely(ret != 0))
+ goto err_unlock;
+
+ /* Is this buffer already in vram but not at the start of it? */
+ if (bo->mem.mem_type == TTM_PL_VRAM &&
+ bo->mem.start < bo->num_pages &&
+ bo->mem.start > 0)
+ (void) ttm_bo_validate(bo, &vmw_sys_placement, false,
+ false, false);
+
+ ret = ttm_bo_validate(bo, &placement, interruptible, false, false);
+
+ /* For some reason we didn't up at the start of vram */
+ WARN_ON(ret == 0 && bo->offset != 0);
+
+ ttm_bo_unreserve(bo);
+err_unlock:
+ ttm_write_unlock(&vmaster->lock);
+
+ return ret;
+}
+
+
+/**
+ * vmw_dmabuf_upin - Unpin the buffer given buffer, does not move the buffer.
+ *
+ * May only be called by the current master since it assumes that the
+ * master lock is the current master's lock.
+ * This function takes the master's lock in write mode.
+ *
+ * @dev_priv: Driver private.
+ * @buf: DMA buffer to unpin.
+ * @interruptible: Use interruptible wait.
+ *
+ * Returns
+ * -ERESTARTSYS if interrupted by a signal.
+ */
+int vmw_dmabuf_unpin(struct vmw_private *dev_priv,
+ struct vmw_dma_buffer *buf,
+ bool interruptible)
+{
+ /*
+ * We could in theory early out if the buffer is
+ * unpinned but we need to lock and reserve the buffer
+ * anyways so we don't gain much by that.
+ */
+ return vmw_dmabuf_to_placement(dev_priv, buf,
+ &vmw_evictable_placement,
+ interruptible);
+}
+
+
+/**
+ * vmw_bo_get_guest_ptr - Get the guest ptr representing the current placement
+ * of a buffer.
+ *
+ * @bo: Pointer to a struct ttm_buffer_object. Must be pinned or reserved.
+ * @ptr: SVGAGuestPtr returning the result.
+ */
+void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
+ SVGAGuestPtr *ptr)
+{
+ if (bo->mem.mem_type == TTM_PL_VRAM) {
+ ptr->gmrId = SVGA_GMR_FRAMEBUFFER;
+ ptr->offset = bo->offset;
+ } else {
+ ptr->gmrId = bo->mem.start;
+ ptr->offset = 0;
+ }
+}
+
+
+/**
+ * vmw_bo_pin - Pin or unpin a buffer object without moving it.
+ *
+ * @bo: The buffer object. Must be reserved, and present either in VRAM
+ * or GMR memory.
+ * @pin: Whether to pin or unpin.
+ *
+ */
+void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin)
+{
+ uint32_t pl_flags;
+ struct ttm_placement placement;
+ uint32_t old_mem_type = bo->mem.mem_type;
+ int ret;
+
+ BUG_ON(!atomic_read(&bo->reserved));
+ BUG_ON(old_mem_type != TTM_PL_VRAM &&
+ old_mem_type != VMW_PL_FLAG_GMR);
+
+ pl_flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED;
+ if (pin)
+ pl_flags |= TTM_PL_FLAG_NO_EVICT;
+
+ memset(&placement, 0, sizeof(placement));
+ placement.num_placement = 1;
+ placement.placement = &pl_flags;
+
+ ret = ttm_bo_validate(bo, &placement, false, true, true);
+
+ BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type);
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 96949b93d92..dff8fc76715 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -24,6 +24,7 @@
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
+#include <linux/module.h>
#include "drmP.h"
#include "vmwgfx_drv.h"
@@ -82,17 +83,31 @@
#define DRM_IOCTL_VMW_EXECBUF \
DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF, \
struct drm_vmw_execbuf_arg)
-#define DRM_IOCTL_VMW_FIFO_DEBUG \
- DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FIFO_DEBUG, \
- struct drm_vmw_fifo_debug_arg)
+#define DRM_IOCTL_VMW_GET_3D_CAP \
+ DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_GET_3D_CAP, \
+ struct drm_vmw_get_3d_cap_arg)
#define DRM_IOCTL_VMW_FENCE_WAIT \
DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \
struct drm_vmw_fence_wait_arg)
+#define DRM_IOCTL_VMW_FENCE_SIGNALED \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_SIGNALED, \
+ struct drm_vmw_fence_signaled_arg)
+#define DRM_IOCTL_VMW_FENCE_UNREF \
+ DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF, \
+ struct drm_vmw_fence_arg)
+#define DRM_IOCTL_VMW_FENCE_EVENT \
+ DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_EVENT, \
+ struct drm_vmw_fence_event_arg)
+#define DRM_IOCTL_VMW_PRESENT \
+ DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT, \
+ struct drm_vmw_present_arg)
+#define DRM_IOCTL_VMW_PRESENT_READBACK \
+ DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK, \
+ struct drm_vmw_present_readback_arg)
#define DRM_IOCTL_VMW_UPDATE_LAYOUT \
- DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \
+ DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \
struct drm_vmw_update_layout_arg)
-
/**
* The core DRM version of this macro doesn't account for
* DRM_COMMAND_BASE.
@@ -135,12 +150,28 @@ static struct drm_ioctl_desc vmw_ioctls[] = {
DRM_AUTH | DRM_UNLOCKED),
VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl,
DRM_AUTH | DRM_UNLOCKED),
- VMW_IOCTL_DEF(VMW_FIFO_DEBUG, vmw_fifo_debug_ioctl,
- DRM_AUTH | DRM_ROOT_ONLY | DRM_MASTER | DRM_UNLOCKED),
- VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_wait_ioctl,
+ VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
+ DRM_AUTH | DRM_UNLOCKED),
+ VMW_IOCTL_DEF(VMW_FENCE_SIGNALED,
+ vmw_fence_obj_signaled_ioctl,
+ DRM_AUTH | DRM_UNLOCKED),
+ VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl,
+ DRM_AUTH | DRM_UNLOCKED),
+ VMW_IOCTL_DEF(VMW_FENCE_EVENT,
+ vmw_fence_event_ioctl,
+ DRM_AUTH | DRM_UNLOCKED),
+ VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl,
DRM_AUTH | DRM_UNLOCKED),
- VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT, vmw_kms_update_layout_ioctl,
- DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED)
+
+ /* these allow direct access to the framebuffers mark as master only */
+ VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl,
+ DRM_MASTER | DRM_AUTH | DRM_UNLOCKED),
+ VMW_IOCTL_DEF(VMW_PRESENT_READBACK,
+ vmw_present_readback_ioctl,
+ DRM_MASTER | DRM_AUTH | DRM_UNLOCKED),
+ VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT,
+ vmw_kms_update_layout_ioctl,
+ DRM_MASTER | DRM_UNLOCKED),
};
static struct pci_device_id vmw_pci_id_list[] = {
@@ -189,8 +220,78 @@ static void vmw_print_capabilities(uint32_t capabilities)
DRM_INFO(" GMR.\n");
if (capabilities & SVGA_CAP_TRACES)
DRM_INFO(" Traces.\n");
+ if (capabilities & SVGA_CAP_GMR2)
+ DRM_INFO(" GMR2.\n");
+ if (capabilities & SVGA_CAP_SCREEN_OBJECT_2)
+ DRM_INFO(" Screen Object 2.\n");
+}
+
+
+/**
+ * vmw_execbuf_prepare_dummy_query - Initialize a query result structure at
+ * the start of a buffer object.
+ *
+ * @dev_priv: The device private structure.
+ *
+ * This function will idle the buffer using an uninterruptible wait, then
+ * map the first page and initialize a pending occlusion query result structure,
+ * Finally it will unmap the buffer.
+ *
+ * TODO: Since we're only mapping a single page, we should optimize the map
+ * to use kmap_atomic / iomap_atomic.
+ */
+static void vmw_dummy_query_bo_prepare(struct vmw_private *dev_priv)
+{
+ struct ttm_bo_kmap_obj map;
+ volatile SVGA3dQueryResult *result;
+ bool dummy;
+ int ret;
+ struct ttm_bo_device *bdev = &dev_priv->bdev;
+ struct ttm_buffer_object *bo = dev_priv->dummy_query_bo;
+
+ ttm_bo_reserve(bo, false, false, false, 0);
+ spin_lock(&bdev->fence_lock);
+ ret = ttm_bo_wait(bo, false, false, false);
+ spin_unlock(&bdev->fence_lock);
+ if (unlikely(ret != 0))
+ (void) vmw_fallback_wait(dev_priv, false, true, 0, false,
+ 10*HZ);
+
+ ret = ttm_bo_kmap(bo, 0, 1, &map);
+ if (likely(ret == 0)) {
+ result = ttm_kmap_obj_virtual(&map, &dummy);
+ result->totalSize = sizeof(*result);
+ result->state = SVGA3D_QUERYSTATE_PENDING;
+ result->result32 = 0xff;
+ ttm_bo_kunmap(&map);
+ } else
+ DRM_ERROR("Dummy query buffer map failed.\n");
+ ttm_bo_unreserve(bo);
+}
+
+
+/**
+ * vmw_dummy_query_bo_create - create a bo to hold a dummy query result
+ *
+ * @dev_priv: A device private structure.
+ *
+ * This function creates a small buffer object that holds the query
+ * result for dummy queries emitted as query barriers.
+ * No interruptible waits are done within this function.
+ *
+ * Returns an error if bo creation fails.
+ */
+static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
+{
+ return ttm_bo_create(&dev_priv->bdev,
+ PAGE_SIZE,
+ ttm_bo_type_device,
+ &vmw_vram_sys_placement,
+ 0, 0, false, NULL,
+ &dev_priv->dummy_query_bo);
}
+
static int vmw_request_device(struct vmw_private *dev_priv)
{
int ret;
@@ -200,16 +301,42 @@ static int vmw_request_device(struct vmw_private *dev_priv)
DRM_ERROR("Unable to initialize FIFO.\n");
return ret;
}
+ vmw_fence_fifo_up(dev_priv->fman);
+ ret = vmw_dummy_query_bo_create(dev_priv);
+ if (unlikely(ret != 0))
+ goto out_no_query_bo;
+ vmw_dummy_query_bo_prepare(dev_priv);
return 0;
+
+out_no_query_bo:
+ vmw_fence_fifo_down(dev_priv->fman);
+ vmw_fifo_release(dev_priv, &dev_priv->fifo);
+ return ret;
}
static void vmw_release_device(struct vmw_private *dev_priv)
{
+ /*
+ * Previous destructions should've released
+ * the pinned bo.
+ */
+
+ BUG_ON(dev_priv->pinned_bo != NULL);
+
+ ttm_bo_unref(&dev_priv->dummy_query_bo);
+ vmw_fence_fifo_down(dev_priv->fman);
vmw_fifo_release(dev_priv, &dev_priv->fifo);
}
-int vmw_3d_resource_inc(struct vmw_private *dev_priv)
+/**
+ * Increase the 3d resource refcount.
+ * If the count was prevously zero, initialize the fifo, switching to svga
+ * mode. Note that the master holds a ref as well, and may request an
+ * explicit switch to svga mode if fb is not running, using @unhide_svga.
+ */
+int vmw_3d_resource_inc(struct vmw_private *dev_priv,
+ bool unhide_svga)
{
int ret = 0;
@@ -218,19 +345,42 @@ int vmw_3d_resource_inc(struct vmw_private *dev_priv)
ret = vmw_request_device(dev_priv);
if (unlikely(ret != 0))
--dev_priv->num_3d_resources;
+ } else if (unhide_svga) {
+ mutex_lock(&dev_priv->hw_mutex);
+ vmw_write(dev_priv, SVGA_REG_ENABLE,
+ vmw_read(dev_priv, SVGA_REG_ENABLE) &
+ ~SVGA_REG_ENABLE_HIDE);
+ mutex_unlock(&dev_priv->hw_mutex);
}
+
mutex_unlock(&dev_priv->release_mutex);
return ret;
}
-
-void vmw_3d_resource_dec(struct vmw_private *dev_priv)
+/**
+ * Decrease the 3d resource refcount.
+ * If the count reaches zero, disable the fifo, switching to vga mode.
+ * Note that the master holds a refcount as well, and may request an
+ * explicit switch to vga mode when it releases its refcount to account
+ * for the situation of an X server vt switch to VGA with 3d resources
+ * active.
+ */
+void vmw_3d_resource_dec(struct vmw_private *dev_priv,
+ bool hide_svga)
{
int32_t n3d;
mutex_lock(&dev_priv->release_mutex);
if (unlikely(--dev_priv->num_3d_resources == 0))
vmw_release_device(dev_priv);
+ else if (hide_svga) {
+ mutex_lock(&dev_priv->hw_mutex);
+ vmw_write(dev_priv, SVGA_REG_ENABLE,
+ vmw_read(dev_priv, SVGA_REG_ENABLE) |
+ SVGA_REG_ENABLE_HIDE);
+ mutex_unlock(&dev_priv->hw_mutex);
+ }
+
n3d = (int32_t) dev_priv->num_3d_resources;
mutex_unlock(&dev_priv->release_mutex);
@@ -252,7 +402,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
dev_priv->dev = dev;
dev_priv->vmw_chipset = chipset;
- dev_priv->last_read_sequence = (uint32_t) -100;
+ dev_priv->last_read_seqno = (uint32_t) -100;
mutex_init(&dev_priv->hw_mutex);
mutex_init(&dev_priv->cmdbuf_mutex);
mutex_init(&dev_priv->release_mutex);
@@ -263,8 +413,10 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
mutex_init(&dev_priv->init_mutex);
init_waitqueue_head(&dev_priv->fence_queue);
init_waitqueue_head(&dev_priv->fifo_queue);
- atomic_set(&dev_priv->fence_queue_waiters, 0);
+ dev_priv->fence_queue_waiters = 0;
atomic_set(&dev_priv->fifo_queue_waiters, 0);
+ INIT_LIST_HEAD(&dev_priv->surface_lru);
+ dev_priv->used_memory_size = 0;
dev_priv->io_start = pci_resource_start(dev->pdev, 0);
dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
@@ -285,6 +437,10 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
+ dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
+ dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
+ dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
+ dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
if (dev_priv->capabilities & SVGA_CAP_GMR) {
dev_priv->max_gmr_descriptors =
vmw_read(dev_priv,
@@ -292,11 +448,19 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
dev_priv->max_gmr_ids =
vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
}
-
- dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
- dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
- dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
- dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
+ if (dev_priv->capabilities & SVGA_CAP_GMR2) {
+ dev_priv->max_gmr_pages =
+ vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES);
+ dev_priv->memory_size =
+ vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE);
+ dev_priv->memory_size -= dev_priv->vram_size;
+ } else {
+ /*
+ * An arbitrary limit of 512MiB on surface
+ * memory. But all HWV8 hardware supports GMR2.
+ */
+ dev_priv->memory_size = 512*1024*1024;
+ }
mutex_unlock(&dev_priv->hw_mutex);
@@ -308,6 +472,12 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
DRM_INFO("Max GMR descriptors is %u\n",
(unsigned)dev_priv->max_gmr_descriptors);
}
+ if (dev_priv->capabilities & SVGA_CAP_GMR2) {
+ DRM_INFO("Max number of GMR pages is %u\n",
+ (unsigned)dev_priv->max_gmr_pages);
+ DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n",
+ (unsigned)dev_priv->memory_size / 1024);
+ }
DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
dev_priv->vram_start, dev_priv->vram_size / 1024);
DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
@@ -394,22 +564,34 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
goto out_no_device;
}
}
+
+ dev_priv->fman = vmw_fence_manager_init(dev_priv);
+ if (unlikely(dev_priv->fman == NULL))
+ goto out_no_fman;
+
+ /* Need to start the fifo to check if we can do screen objects */
+ ret = vmw_3d_resource_inc(dev_priv, true);
+ if (unlikely(ret != 0))
+ goto out_no_fifo;
+ vmw_kms_save_vga(dev_priv);
+
+ /* Start kms and overlay systems, needs fifo. */
ret = vmw_kms_init(dev_priv);
if (unlikely(ret != 0))
goto out_no_kms;
vmw_overlay_init(dev_priv);
+
+ /* 3D Depends on Screen Objects being used. */
+ DRM_INFO("Detected %sdevice 3D availability.\n",
+ vmw_fifo_have_3d(dev_priv) ?
+ "" : "no ");
+
+ /* We might be done with the fifo now */
if (dev_priv->enable_fb) {
- ret = vmw_3d_resource_inc(dev_priv);
- if (unlikely(ret != 0))
- goto out_no_fifo;
- vmw_kms_save_vga(dev_priv);
vmw_fb_init(dev_priv);
- DRM_INFO("%s", vmw_fifo_have_3d(dev_priv) ?
- "Detected device 3D availability.\n" :
- "Detected no device 3D availability.\n");
} else {
- DRM_INFO("Delayed 3D detection since we're not "
- "running the device in SVGA mode yet.\n");
+ vmw_kms_restore_vga(dev_priv);
+ vmw_3d_resource_dec(dev_priv, true);
}
if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
@@ -426,15 +608,19 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
return 0;
out_no_irq:
- if (dev_priv->enable_fb) {
+ if (dev_priv->enable_fb)
vmw_fb_close(dev_priv);
- vmw_kms_restore_vga(dev_priv);
- vmw_3d_resource_dec(dev_priv);
- }
-out_no_fifo:
vmw_overlay_close(dev_priv);
vmw_kms_close(dev_priv);
out_no_kms:
+ /* We still have a 3D resource reference held */
+ if (dev_priv->enable_fb) {
+ vmw_kms_restore_vga(dev_priv);
+ vmw_3d_resource_dec(dev_priv, false);
+ }
+out_no_fifo:
+ vmw_fence_manager_takedown(dev_priv->fman);
+out_no_fman:
if (dev_priv->stealth)
pci_release_region(dev->pdev, 2);
else
@@ -467,15 +653,18 @@ static int vmw_driver_unload(struct drm_device *dev)
unregister_pm_notifier(&dev_priv->pm_nb);
+ if (dev_priv->ctx.cmd_bounce)
+ vfree(dev_priv->ctx.cmd_bounce);
if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
drm_irq_uninstall(dev_priv->dev);
if (dev_priv->enable_fb) {
vmw_fb_close(dev_priv);
vmw_kms_restore_vga(dev_priv);
- vmw_3d_resource_dec(dev_priv);
+ vmw_3d_resource_dec(dev_priv, false);
}
vmw_kms_close(dev_priv);
vmw_overlay_close(dev_priv);
+ vmw_fence_manager_takedown(dev_priv->fman);
if (dev_priv->stealth)
pci_release_region(dev->pdev, 2);
else
@@ -646,7 +835,7 @@ static int vmw_master_set(struct drm_device *dev,
int ret = 0;
if (!dev_priv->enable_fb) {
- ret = vmw_3d_resource_inc(dev_priv);
+ ret = vmw_3d_resource_inc(dev_priv, true);
if (unlikely(ret != 0))
return ret;
vmw_kms_save_vga(dev_priv);
@@ -688,7 +877,7 @@ out_no_active_lock:
vmw_write(dev_priv, SVGA_REG_TRACES, 1);
mutex_unlock(&dev_priv->hw_mutex);
vmw_kms_restore_vga(dev_priv);
- vmw_3d_resource_dec(dev_priv);
+ vmw_3d_resource_dec(dev_priv, true);
}
return ret;
}
@@ -709,7 +898,7 @@ static void vmw_master_drop(struct drm_device *dev,
vmw_fp->locked_master = drm_master_get(file_priv->master);
ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
- vmw_kms_idle_workqueues(vmaster);
+ vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
if (unlikely((ret != 0))) {
DRM_ERROR("Unable to lock TTM at VT switch.\n");
@@ -726,7 +915,7 @@ static void vmw_master_drop(struct drm_device *dev,
vmw_write(dev_priv, SVGA_REG_TRACES, 1);
mutex_unlock(&dev_priv->hw_mutex);
vmw_kms_restore_vga(dev_priv);
- vmw_3d_resource_dec(dev_priv);
+ vmw_3d_resource_dec(dev_priv, true);
}
dev_priv->active_master = &dev_priv->fbdev_master;
@@ -761,6 +950,7 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
* This empties VRAM and unbinds all GMR bindings.
* Buffer contents is moved to swappable memory.
*/
+ vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
ttm_bo_swapout_all(&dev_priv->bdev);
break;
@@ -835,7 +1025,7 @@ static int vmw_pm_prepare(struct device *kdev)
*/
dev_priv->suspended = true;
if (dev_priv->enable_fb)
- vmw_3d_resource_dec(dev_priv);
+ vmw_3d_resource_dec(dev_priv, true);
if (dev_priv->num_3d_resources != 0) {
@@ -843,7 +1033,7 @@ static int vmw_pm_prepare(struct device *kdev)
"while 3D resources are active.\n");
if (dev_priv->enable_fb)
- vmw_3d_resource_inc(dev_priv);
+ vmw_3d_resource_inc(dev_priv, true);
dev_priv->suspended = false;
return -EBUSY;
}
@@ -862,7 +1052,7 @@ static void vmw_pm_complete(struct device *kdev)
* start fifo.
*/
if (dev_priv->enable_fb)
- vmw_3d_resource_inc(dev_priv);
+ vmw_3d_resource_inc(dev_priv, false);
dev_priv->suspended = false;
}
@@ -886,6 +1076,8 @@ static struct drm_driver driver = {
.irq_uninstall = vmw_irq_uninstall,
.irq_handler = vmw_irq_handler,
.get_vblank_counter = vmw_get_vblank_counter,
+ .enable_vblank = vmw_enable_vblank,
+ .disable_vblank = vmw_disable_vblank,
.reclaim_buffers_locked = NULL,
.ioctls = vmw_ioctls,
.num_ioctls = DRM_ARRAY_SIZE(vmw_ioctls),
@@ -902,7 +1094,8 @@ static struct drm_driver driver = {
.release = drm_release,
.unlocked_ioctl = vmw_unlocked_ioctl,
.mmap = vmw_mmap,
- .poll = drm_poll,
+ .poll = vmw_fops_poll,
+ .read = vmw_fops_read,
.fasync = drm_fasync,
#if defined(CONFIG_COMPAT)
.compat_ioctl = drm_compat_ioctl,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 10fc01f69c4..8cca91a93bd 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -38,20 +38,27 @@
#include "ttm/ttm_lock.h"
#include "ttm/ttm_execbuf_util.h"
#include "ttm/ttm_module.h"
+#include "vmwgfx_fence.h"
-#define VMWGFX_DRIVER_DATE "20100927"
-#define VMWGFX_DRIVER_MAJOR 1
-#define VMWGFX_DRIVER_MINOR 4
+#define VMWGFX_DRIVER_DATE "20111025"
+#define VMWGFX_DRIVER_MAJOR 2
+#define VMWGFX_DRIVER_MINOR 3
#define VMWGFX_DRIVER_PATCHLEVEL 0
#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
#define VMWGFX_MAX_RELOCATIONS 2048
-#define VMWGFX_MAX_GMRS 2048
+#define VMWGFX_MAX_VALIDATIONS 2048
#define VMWGFX_MAX_DISPLAYS 16
+#define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768
#define VMW_PL_GMR TTM_PL_PRIV0
#define VMW_PL_FLAG_GMR TTM_PL_FLAG_PRIV0
+#define VMW_RES_CONTEXT ttm_driver_type0
+#define VMW_RES_SURFACE ttm_driver_type1
+#define VMW_RES_STREAM ttm_driver_type2
+#define VMW_RES_FENCE ttm_driver_type3
+
struct vmw_fpriv {
struct drm_master *locked_master;
struct ttm_object_file *tfile;
@@ -72,9 +79,11 @@ struct vmw_resource {
int id;
enum ttm_object_type res_type;
bool avail;
+ void (*remove_from_lists) (struct vmw_resource *res);
void (*hw_destroy) (struct vmw_resource *res);
void (*res_free) (struct vmw_resource *res);
-
+ struct list_head validate_head;
+ struct list_head query_head; /* Protected by the cmdbuf mutex */
/* TODO is a generic snooper needed? */
#if 0
void (*snoop)(struct vmw_resource *res,
@@ -90,8 +99,12 @@ struct vmw_cursor_snooper {
uint32_t *image;
};
+struct vmw_framebuffer;
+struct vmw_surface_offset;
+
struct vmw_surface {
struct vmw_resource res;
+ struct list_head lru_head; /* Protected by the resource lock */
uint32_t flags;
uint32_t format;
uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES];
@@ -102,9 +115,12 @@ struct vmw_surface {
/* TODO so far just a extra pointer */
struct vmw_cursor_snooper snooper;
+ struct ttm_buffer_object *backup;
+ struct vmw_surface_offset *offsets;
+ uint32_t backup_size;
};
-struct vmw_fence_queue {
+struct vmw_marker_queue {
struct list_head head;
struct timespec lag;
struct timespec lag_time;
@@ -115,16 +131,12 @@ struct vmw_fifo_state {
unsigned long reserved_size;
__le32 *dynamic_buffer;
__le32 *static_buffer;
- __le32 *last_buffer;
- uint32_t last_data_size;
- uint32_t last_buffer_size;
- bool last_buffer_add;
unsigned long static_buffer_size;
bool using_bounce_buffer;
uint32_t capabilities;
struct mutex fifo_mutex;
struct rw_semaphore rwsem;
- struct vmw_fence_queue fence_queue;
+ struct vmw_marker_queue marker_queue;
};
struct vmw_relocation {
@@ -136,6 +148,8 @@ struct vmw_sw_context{
struct ida bo_list;
uint32_t last_cid;
bool cid_valid;
+ bool kernel; /**< is the called made from the kernel */
+ struct vmw_resource *cur_ctx;
uint32_t last_sid;
uint32_t sid_translation;
bool sid_valid;
@@ -143,8 +157,16 @@ struct vmw_sw_context{
struct list_head validate_nodes;
struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS];
uint32_t cur_reloc;
- struct ttm_validate_buffer val_bufs[VMWGFX_MAX_GMRS];
+ struct ttm_validate_buffer val_bufs[VMWGFX_MAX_VALIDATIONS];
uint32_t cur_val_buf;
+ uint32_t *cmd_bounce;
+ uint32_t cmd_bounce_size;
+ struct list_head resource_list;
+ uint32_t fence_flags;
+ struct list_head query_list;
+ struct ttm_buffer_object *cur_query_bo;
+ uint32_t cur_query_cid;
+ bool query_cid_valid;
};
struct vmw_legacy_display;
@@ -185,6 +207,8 @@ struct vmw_private {
uint32_t capabilities;
uint32_t max_gmr_descriptors;
uint32_t max_gmr_ids;
+ uint32_t max_gmr_pages;
+ uint32_t memory_size;
bool has_gmr;
struct mutex hw_mutex;
@@ -195,12 +219,7 @@ struct vmw_private {
struct vmw_vga_topology_state vga_save[VMWGFX_MAX_DISPLAYS];
uint32_t vga_width;
uint32_t vga_height;
- uint32_t vga_depth;
uint32_t vga_bpp;
- uint32_t vga_pseudo;
- uint32_t vga_red_mask;
- uint32_t vga_green_mask;
- uint32_t vga_blue_mask;
uint32_t vga_bpl;
uint32_t vga_pitchlock;
@@ -212,6 +231,7 @@ struct vmw_private {
void *fb_info;
struct vmw_legacy_display *ldu_priv;
+ struct vmw_screen_object_display *sou_priv;
struct vmw_overlay *overlay_priv;
/*
@@ -240,13 +260,16 @@ struct vmw_private {
* Fencing and IRQs.
*/
- atomic_t fence_seq;
+ atomic_t marker_seq;
wait_queue_head_t fence_queue;
wait_queue_head_t fifo_queue;
- atomic_t fence_queue_waiters;
+ int fence_queue_waiters; /* Protected by hw_mutex */
+ int goal_queue_waiters; /* Protected by hw_mutex */
atomic_t fifo_queue_waiters;
- uint32_t last_read_sequence;
+ uint32_t last_read_seqno;
spinlock_t irq_lock;
+ struct vmw_fence_manager *fman;
+ uint32_t irq_mask;
/*
* Device state
@@ -285,6 +308,26 @@ struct vmw_private {
struct mutex release_mutex;
uint32_t num_3d_resources;
+
+ /*
+ * Query processing. These members
+ * are protected by the cmdbuf mutex.
+ */
+
+ struct ttm_buffer_object *dummy_query_bo;
+ struct ttm_buffer_object *pinned_bo;
+ uint32_t query_cid;
+ bool dummy_query_bo_pinned;
+
+ /*
+ * Surface swapping. The "surface_lru" list is protected by the
+ * resource lock in order to be able to destroy a surface and take
+ * it off the lru atomically. "used_memory_size" is currently
+ * protected by the cmdbuf mutex for simplicity.
+ */
+
+ struct list_head surface_lru;
+ uint32_t used_memory_size;
};
static inline struct vmw_private *vmw_priv(struct drm_device *dev)
@@ -319,8 +362,8 @@ static inline uint32_t vmw_read(struct vmw_private *dev_priv,
return val;
}
-int vmw_3d_resource_inc(struct vmw_private *dev_priv);
-void vmw_3d_resource_dec(struct vmw_private *dev_priv);
+int vmw_3d_resource_inc(struct vmw_private *dev_priv, bool unhide_svga);
+void vmw_3d_resource_dec(struct vmw_private *dev_priv, bool hide_svga);
/**
* GMR utilities - vmwgfx_gmr.c
@@ -345,7 +388,8 @@ extern int vmw_context_define_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_context_check(struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
- int id);
+ int id,
+ struct vmw_resource **p_res);
extern void vmw_surface_res_free(struct vmw_resource *res);
extern int vmw_surface_init(struct vmw_private *dev_priv,
struct vmw_surface *srf,
@@ -363,6 +407,8 @@ extern int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
extern int vmw_surface_check(struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
uint32_t handle, int *id);
+extern int vmw_surface_validate(struct vmw_private *dev_priv,
+ struct vmw_surface *srf);
extern void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo);
extern int vmw_dmabuf_init(struct vmw_private *dev_priv,
struct vmw_dma_buffer *vmw_bo,
@@ -378,10 +424,6 @@ extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo);
extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
uint32_t id, struct vmw_dma_buffer **out);
-extern int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
- struct vmw_dma_buffer *bo);
-extern int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv,
- struct vmw_dma_buffer *bo);
extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
@@ -390,7 +432,30 @@ extern int vmw_user_stream_lookup(struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
uint32_t *inout_id,
struct vmw_resource **out);
+extern void vmw_resource_unreserve(struct list_head *list);
+/**
+ * DMA buffer helper routines - vmwgfx_dmabuf.c
+ */
+extern int vmw_dmabuf_to_placement(struct vmw_private *vmw_priv,
+ struct vmw_dma_buffer *bo,
+ struct ttm_placement *placement,
+ bool interruptible);
+extern int vmw_dmabuf_to_vram(struct vmw_private *dev_priv,
+ struct vmw_dma_buffer *buf,
+ bool pin, bool interruptible);
+extern int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
+ struct vmw_dma_buffer *buf,
+ bool pin, bool interruptible);
+extern int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
+ struct vmw_dma_buffer *bo,
+ bool pin, bool interruptible);
+extern int vmw_dmabuf_unpin(struct vmw_private *vmw_priv,
+ struct vmw_dma_buffer *bo,
+ bool interruptible);
+extern void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf,
+ SVGAGuestPtr *ptr);
+extern void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin);
/**
* Misc Ioctl functionality - vmwgfx_ioctl.c
@@ -398,8 +463,16 @@ extern int vmw_user_stream_lookup(struct vmw_private *dev_priv,
extern int vmw_getparam_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-extern int vmw_fifo_debug_ioctl(struct drm_device *dev, void *data,
+extern int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+extern int vmw_present_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern unsigned int vmw_fops_poll(struct file *filp,
+ struct poll_table_struct *wait);
+extern ssize_t vmw_fops_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *offset);
/**
* Fifo utilities - vmwgfx_fifo.c
@@ -412,11 +485,12 @@ extern void vmw_fifo_release(struct vmw_private *dev_priv,
extern void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes);
extern void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes);
extern int vmw_fifo_send_fence(struct vmw_private *dev_priv,
- uint32_t *sequence);
+ uint32_t *seqno);
extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason);
-extern int vmw_fifo_mmap(struct file *filp, struct vm_area_struct *vma);
extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv);
extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv);
+extern int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
+ uint32_t cid);
/**
* TTM glue - vmwgfx_ttm_glue.c
@@ -434,7 +508,10 @@ extern struct ttm_placement vmw_vram_placement;
extern struct ttm_placement vmw_vram_ne_placement;
extern struct ttm_placement vmw_vram_sys_placement;
extern struct ttm_placement vmw_vram_gmr_placement;
+extern struct ttm_placement vmw_vram_gmr_ne_placement;
extern struct ttm_placement vmw_sys_placement;
+extern struct ttm_placement vmw_evictable_placement;
+extern struct ttm_placement vmw_srf_placement;
extern struct ttm_bo_driver vmw_bo_driver;
extern int vmw_dma_quiescent(struct drm_device *dev);
@@ -444,45 +521,70 @@ extern int vmw_dma_quiescent(struct drm_device *dev);
extern int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+extern int vmw_execbuf_process(struct drm_file *file_priv,
+ struct vmw_private *dev_priv,
+ void __user *user_commands,
+ void *kernel_commands,
+ uint32_t command_size,
+ uint64_t throttle_us,
+ struct drm_vmw_fence_rep __user
+ *user_fence_rep);
+
+extern void
+vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
+ bool only_on_cid_match, uint32_t cid);
+
+extern int vmw_execbuf_fence_commands(struct drm_file *file_priv,
+ struct vmw_private *dev_priv,
+ struct vmw_fence_obj **p_fence,
+ uint32_t *p_handle);
+extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
+ struct vmw_fpriv *vmw_fp,
+ int ret,
+ struct drm_vmw_fence_rep __user
+ *user_fence_rep,
+ struct vmw_fence_obj *fence,
+ uint32_t fence_handle);
/**
* IRQs and wating - vmwgfx_irq.c
*/
extern irqreturn_t vmw_irq_handler(DRM_IRQ_ARGS);
-extern int vmw_wait_fence(struct vmw_private *dev_priv, bool lazy,
- uint32_t sequence, bool interruptible,
- unsigned long timeout);
+extern int vmw_wait_seqno(struct vmw_private *dev_priv, bool lazy,
+ uint32_t seqno, bool interruptible,
+ unsigned long timeout);
extern void vmw_irq_preinstall(struct drm_device *dev);
extern int vmw_irq_postinstall(struct drm_device *dev);
extern void vmw_irq_uninstall(struct drm_device *dev);
-extern bool vmw_fence_signaled(struct vmw_private *dev_priv,
- uint32_t sequence);
-extern int vmw_fence_wait_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
+extern bool vmw_seqno_passed(struct vmw_private *dev_priv,
+ uint32_t seqno);
extern int vmw_fallback_wait(struct vmw_private *dev_priv,
bool lazy,
bool fifo_idle,
- uint32_t sequence,
+ uint32_t seqno,
bool interruptible,
unsigned long timeout);
-extern void vmw_update_sequence(struct vmw_private *dev_priv,
+extern void vmw_update_seqno(struct vmw_private *dev_priv,
struct vmw_fifo_state *fifo_state);
-
+extern void vmw_seqno_waiter_add(struct vmw_private *dev_priv);
+extern void vmw_seqno_waiter_remove(struct vmw_private *dev_priv);
+extern void vmw_goal_waiter_add(struct vmw_private *dev_priv);
+extern void vmw_goal_waiter_remove(struct vmw_private *dev_priv);
/**
- * Rudimentary fence objects currently used only for throttling -
- * vmwgfx_fence.c
+ * Rudimentary fence-like objects currently used only for throttling -
+ * vmwgfx_marker.c
*/
-extern void vmw_fence_queue_init(struct vmw_fence_queue *queue);
-extern void vmw_fence_queue_takedown(struct vmw_fence_queue *queue);
-extern int vmw_fence_push(struct vmw_fence_queue *queue,
- uint32_t sequence);
-extern int vmw_fence_pull(struct vmw_fence_queue *queue,
- uint32_t signaled_sequence);
+extern void vmw_marker_queue_init(struct vmw_marker_queue *queue);
+extern void vmw_marker_queue_takedown(struct vmw_marker_queue *queue);
+extern int vmw_marker_push(struct vmw_marker_queue *queue,
+ uint32_t seqno);
+extern int vmw_marker_pull(struct vmw_marker_queue *queue,
+ uint32_t signaled_seqno);
extern int vmw_wait_lag(struct vmw_private *dev_priv,
- struct vmw_fence_queue *queue, uint32_t us);
+ struct vmw_marker_queue *queue, uint32_t us);
/**
* Kernel framebuffer - vmwgfx_fb.c
@@ -508,16 +610,31 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf,
struct ttm_object_file *tfile,
struct ttm_buffer_object *bo,
SVGA3dCmdHeader *header);
-void vmw_kms_write_svga(struct vmw_private *vmw_priv,
- unsigned width, unsigned height, unsigned pitch,
- unsigned bbp, unsigned depth);
-int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
+int vmw_kms_write_svga(struct vmw_private *vmw_priv,
+ unsigned width, unsigned height, unsigned pitch,
+ unsigned bpp, unsigned depth);
void vmw_kms_idle_workqueues(struct vmw_master *vmaster);
bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
uint32_t pitch,
uint32_t height);
u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc);
+int vmw_enable_vblank(struct drm_device *dev, int crtc);
+void vmw_disable_vblank(struct drm_device *dev, int crtc);
+int vmw_kms_present(struct vmw_private *dev_priv,
+ struct drm_file *file_priv,
+ struct vmw_framebuffer *vfb,
+ struct vmw_surface *surface,
+ uint32_t sid, int32_t destX, int32_t destY,
+ struct drm_vmw_rect *clips,
+ uint32_t num_clips);
+int vmw_kms_readback(struct vmw_private *dev_priv,
+ struct drm_file *file_priv,
+ struct vmw_framebuffer *vfb,
+ struct drm_vmw_fence_rep __user *user_fence_rep,
+ struct drm_vmw_rect *clips,
+ uint32_t num_clips);
+int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
/**
* Overlay control - vmwgfx_overlay.c
@@ -576,4 +693,8 @@ static inline struct vmw_dma_buffer *vmw_dmabuf_reference(struct vmw_dma_buffer
return NULL;
}
+static inline struct ttm_mem_global *vmw_mem_glob(struct vmw_private *dev_priv)
+{
+ return (struct ttm_mem_global *) dev_priv->mem_global_ref.object;
+}
#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 41b95ed6dbc..40932fbdac0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -44,10 +44,71 @@ static int vmw_cmd_ok(struct vmw_private *dev_priv,
return 0;
}
+static void vmw_resource_to_validate_list(struct vmw_sw_context *sw_context,
+ struct vmw_resource **p_res)
+{
+ struct vmw_resource *res = *p_res;
+
+ if (list_empty(&res->validate_head)) {
+ list_add_tail(&res->validate_head, &sw_context->resource_list);
+ *p_res = NULL;
+ } else
+ vmw_resource_unreference(p_res);
+}
+
+/**
+ * vmw_bo_to_validate_list - add a bo to a validate list
+ *
+ * @sw_context: The software context used for this command submission batch.
+ * @bo: The buffer object to add.
+ * @fence_flags: Fence flags to be or'ed with any other fence flags for
+ * this buffer on this submission batch.
+ * @p_val_node: If non-NULL Will be updated with the validate node number
+ * on return.
+ *
+ * Returns -EINVAL if the limit of number of buffer objects per command
+ * submission is reached.
+ */
+static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
+ struct ttm_buffer_object *bo,
+ uint32_t fence_flags,
+ uint32_t *p_val_node)
+{
+ uint32_t val_node;
+ struct ttm_validate_buffer *val_buf;
+
+ val_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf);
+
+ if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) {
+ DRM_ERROR("Max number of DMA buffers per submission"
+ " exceeded.\n");
+ return -EINVAL;
+ }
+
+ val_buf = &sw_context->val_bufs[val_node];
+ if (unlikely(val_node == sw_context->cur_val_buf)) {
+ val_buf->new_sync_obj_arg = NULL;
+ val_buf->bo = ttm_bo_reference(bo);
+ list_add_tail(&val_buf->head, &sw_context->validate_nodes);
+ ++sw_context->cur_val_buf;
+ }
+
+ val_buf->new_sync_obj_arg = (void *)
+ ((unsigned long) val_buf->new_sync_obj_arg | fence_flags);
+ sw_context->fence_flags |= fence_flags;
+
+ if (p_val_node)
+ *p_val_node = val_node;
+
+ return 0;
+}
+
static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
+ struct vmw_resource *ctx;
+
struct vmw_cid_cmd {
SVGA3dCmdHeader header;
__le32 cid;
@@ -58,7 +119,8 @@ static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
if (likely(sw_context->cid_valid && cmd->cid == sw_context->last_cid))
return 0;
- ret = vmw_context_check(dev_priv, sw_context->tfile, cmd->cid);
+ ret = vmw_context_check(dev_priv, sw_context->tfile, cmd->cid,
+ &ctx);
if (unlikely(ret != 0)) {
DRM_ERROR("Could not find or use context %u\n",
(unsigned) cmd->cid);
@@ -67,6 +129,8 @@ static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
sw_context->last_cid = cmd->cid;
sw_context->cid_valid = true;
+ sw_context->cur_ctx = ctx;
+ vmw_resource_to_validate_list(sw_context, &ctx);
return 0;
}
@@ -75,29 +139,45 @@ static int vmw_cmd_sid_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
uint32_t *sid)
{
+ struct vmw_surface *srf;
+ int ret;
+ struct vmw_resource *res;
+
if (*sid == SVGA3D_INVALID_ID)
return 0;
- if (unlikely((!sw_context->sid_valid ||
- *sid != sw_context->last_sid))) {
- int real_id;
- int ret = vmw_surface_check(dev_priv, sw_context->tfile,
- *sid, &real_id);
+ if (likely((sw_context->sid_valid &&
+ *sid == sw_context->last_sid))) {
+ *sid = sw_context->sid_translation;
+ return 0;
+ }
- if (unlikely(ret != 0)) {
- DRM_ERROR("Could ot find or use surface 0x%08x "
- "address 0x%08lx\n",
- (unsigned int) *sid,
- (unsigned long) sid);
- return ret;
- }
+ ret = vmw_user_surface_lookup_handle(dev_priv,
+ sw_context->tfile,
+ *sid, &srf);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Could ot find or use surface 0x%08x "
+ "address 0x%08lx\n",
+ (unsigned int) *sid,
+ (unsigned long) sid);
+ return ret;
+ }
- sw_context->last_sid = *sid;
- sw_context->sid_valid = true;
- *sid = real_id;
- sw_context->sid_translation = real_id;
- } else
- *sid = sw_context->sid_translation;
+ ret = vmw_surface_validate(dev_priv, srf);
+ if (unlikely(ret != 0)) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("Could not validate surface.\n");
+ vmw_surface_unreference(&srf);
+ return ret;
+ }
+
+ sw_context->last_sid = *sid;
+ sw_context->sid_valid = true;
+ sw_context->sid_translation = srf->res.id;
+ *sid = sw_context->sid_translation;
+
+ res = &srf->res;
+ vmw_resource_to_validate_list(sw_context, &res);
return 0;
}
@@ -166,6 +246,12 @@ static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
} *cmd;
cmd = container_of(header, struct vmw_sid_cmd, header);
+
+ if (unlikely(!sw_context->kernel)) {
+ DRM_ERROR("Kernel only SVGA3d command: %u.\n", cmd->header.id);
+ return -EPERM;
+ }
+
return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.srcImage.sid);
}
@@ -178,10 +264,179 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv,
SVGA3dCmdPresent body;
} *cmd;
+
cmd = container_of(header, struct vmw_sid_cmd, header);
+
+ if (unlikely(!sw_context->kernel)) {
+ DRM_ERROR("Kernel only SVGA3d command: %u.\n", cmd->header.id);
+ return -EPERM;
+ }
+
return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.sid);
}
+/**
+ * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
+ *
+ * @dev_priv: The device private structure.
+ * @cid: The hardware context for the next query.
+ * @new_query_bo: The new buffer holding query results.
+ * @sw_context: The software context used for this command submission.
+ *
+ * This function checks whether @new_query_bo is suitable for holding
+ * query results, and if another buffer currently is pinned for query
+ * results. If so, the function prepares the state of @sw_context for
+ * switching pinned buffers after successful submission of the current
+ * command batch. It also checks whether we're using a new query context.
+ * In that case, it makes sure we emit a query barrier for the old
+ * context before the current query buffer is fenced.
+ */
+static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
+ uint32_t cid,
+ struct ttm_buffer_object *new_query_bo,
+ struct vmw_sw_context *sw_context)
+{
+ int ret;
+ bool add_cid = false;
+ uint32_t cid_to_add;
+
+ if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
+
+ if (unlikely(new_query_bo->num_pages > 4)) {
+ DRM_ERROR("Query buffer too large.\n");
+ return -EINVAL;
+ }
+
+ if (unlikely(sw_context->cur_query_bo != NULL)) {
+ BUG_ON(!sw_context->query_cid_valid);
+ add_cid = true;
+ cid_to_add = sw_context->cur_query_cid;
+ ret = vmw_bo_to_validate_list(sw_context,
+ sw_context->cur_query_bo,
+ DRM_VMW_FENCE_FLAG_EXEC,
+ NULL);
+ if (unlikely(ret != 0))
+ return ret;
+ }
+ sw_context->cur_query_bo = new_query_bo;
+
+ ret = vmw_bo_to_validate_list(sw_context,
+ dev_priv->dummy_query_bo,
+ DRM_VMW_FENCE_FLAG_EXEC,
+ NULL);
+ if (unlikely(ret != 0))
+ return ret;
+
+ }
+
+ if (unlikely(cid != sw_context->cur_query_cid &&
+ sw_context->query_cid_valid)) {
+ add_cid = true;
+ cid_to_add = sw_context->cur_query_cid;
+ }
+
+ sw_context->cur_query_cid = cid;
+ sw_context->query_cid_valid = true;
+
+ if (add_cid) {
+ struct vmw_resource *ctx = sw_context->cur_ctx;
+
+ if (list_empty(&ctx->query_head))
+ list_add_tail(&ctx->query_head,
+ &sw_context->query_list);
+ ret = vmw_bo_to_validate_list(sw_context,
+ dev_priv->dummy_query_bo,
+ DRM_VMW_FENCE_FLAG_EXEC,
+ NULL);
+ if (unlikely(ret != 0))
+ return ret;
+ }
+ return 0;
+}
+
+
+/**
+ * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
+ *
+ * @dev_priv: The device private structure.
+ * @sw_context: The software context used for this command submission batch.
+ *
+ * This function will check if we're switching query buffers, and will then,
+ * if no other query waits are issued this command submission batch,
+ * issue a dummy occlusion query wait used as a query barrier. When the fence
+ * object following that query wait has signaled, we are sure that all
+ * preseding queries have finished, and the old query buffer can be unpinned.
+ * However, since both the new query buffer and the old one are fenced with
+ * that fence, we can do an asynchronus unpin now, and be sure that the
+ * old query buffer won't be moved until the fence has signaled.
+ *
+ * As mentioned above, both the new - and old query buffers need to be fenced
+ * using a sequence emitted *after* calling this function.
+ */
+static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context)
+{
+
+ struct vmw_resource *ctx, *next_ctx;
+ int ret;
+
+ /*
+ * The validate list should still hold references to all
+ * contexts here.
+ */
+
+ list_for_each_entry_safe(ctx, next_ctx, &sw_context->query_list,
+ query_head) {
+ list_del_init(&ctx->query_head);
+
+ BUG_ON(list_empty(&ctx->validate_head));
+
+ ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
+
+ if (unlikely(ret != 0))
+ DRM_ERROR("Out of fifo space for dummy query.\n");
+ }
+
+ if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
+ if (dev_priv->pinned_bo) {
+ vmw_bo_pin(dev_priv->pinned_bo, false);
+ ttm_bo_unref(&dev_priv->pinned_bo);
+ }
+
+ vmw_bo_pin(sw_context->cur_query_bo, true);
+
+ /*
+ * We pin also the dummy_query_bo buffer so that we
+ * don't need to validate it when emitting
+ * dummy queries in context destroy paths.
+ */
+
+ vmw_bo_pin(dev_priv->dummy_query_bo, true);
+ dev_priv->dummy_query_bo_pinned = true;
+
+ dev_priv->query_cid = sw_context->cur_query_cid;
+ dev_priv->pinned_bo =
+ ttm_bo_reference(sw_context->cur_query_bo);
+ }
+}
+
+/**
+ * vmw_query_switch_backoff - clear query barrier list
+ * @sw_context: The sw context used for this submission batch.
+ *
+ * This function is used as part of an error path, where a previously
+ * set up list of query barriers needs to be cleared.
+ *
+ */
+static void vmw_query_switch_backoff(struct vmw_sw_context *sw_context)
+{
+ struct list_head *list, *next;
+
+ list_for_each_safe(list, next, &sw_context->query_list) {
+ list_del_init(list);
+ }
+}
+
static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGAGuestPtr *ptr,
@@ -191,8 +446,6 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
struct ttm_buffer_object *bo;
uint32_t handle = ptr->gmrId;
struct vmw_relocation *reloc;
- uint32_t cur_validate_node;
- struct ttm_validate_buffer *val_buf;
int ret;
ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
@@ -212,22 +465,11 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
reloc = &sw_context->relocs[sw_context->cur_reloc++];
reloc->location = ptr;
- cur_validate_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf);
- if (unlikely(cur_validate_node >= VMWGFX_MAX_GMRS)) {
- DRM_ERROR("Max number of DMA buffers per submission"
- " exceeded.\n");
- ret = -EINVAL;
+ ret = vmw_bo_to_validate_list(sw_context, bo, DRM_VMW_FENCE_FLAG_EXEC,
+ &reloc->index);
+ if (unlikely(ret != 0))
goto out_no_reloc;
- }
- reloc->index = cur_validate_node;
- if (unlikely(cur_validate_node == sw_context->cur_val_buf)) {
- val_buf = &sw_context->val_bufs[cur_validate_node];
- val_buf->bo = ttm_bo_reference(bo);
- val_buf->new_sync_obj_arg = (void *) dev_priv;
- list_add_tail(&val_buf->head, &sw_context->validate_nodes);
- ++sw_context->cur_val_buf;
- }
*vmw_bo_p = vmw_bo;
return 0;
@@ -259,8 +501,11 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
return ret;
+ ret = vmw_query_bo_switch_prepare(dev_priv, cmd->q.cid,
+ &vmw_bo->base, sw_context);
+
vmw_dmabuf_unreference(&vmw_bo);
- return 0;
+ return ret;
}
static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
@@ -273,6 +518,7 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
SVGA3dCmdWaitForQuery q;
} *cmd;
int ret;
+ struct vmw_resource *ctx;
cmd = container_of(header, struct vmw_query_cmd, header);
ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
@@ -286,10 +532,19 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
return ret;
vmw_dmabuf_unreference(&vmw_bo);
+
+ /*
+ * This wait will act as a barrier for previous waits for this
+ * context.
+ */
+
+ ctx = sw_context->cur_ctx;
+ if (!list_empty(&ctx->query_head))
+ list_del_init(&ctx->query_head);
+
return 0;
}
-
static int vmw_cmd_dma(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
@@ -302,6 +557,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
SVGA3dCmdSurfaceDMA dma;
} *cmd;
int ret;
+ struct vmw_resource *res;
cmd = container_of(header, struct vmw_dma_cmd, header);
ret = vmw_translate_guest_ptr(dev_priv, sw_context,
@@ -318,18 +574,28 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
goto out_no_reloc;
}
- /**
+ ret = vmw_surface_validate(dev_priv, srf);
+ if (unlikely(ret != 0)) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("Culd not validate surface.\n");
+ goto out_no_validate;
+ }
+
+ /*
* Patch command stream with device SID.
*/
-
cmd->dma.host.sid = srf->res.id;
vmw_kms_cursor_snoop(srf, sw_context->tfile, bo, header);
- /**
- * FIXME: May deadlock here when called from the
- * command parsing code.
- */
- vmw_surface_unreference(&srf);
+ vmw_dmabuf_unreference(&vmw_bo);
+
+ res = &srf->res;
+ vmw_resource_to_validate_list(sw_context, &res);
+
+ return 0;
+
+out_no_validate:
+ vmw_surface_unreference(&srf);
out_no_reloc:
vmw_dmabuf_unreference(&vmw_bo);
return ret;
@@ -419,6 +685,71 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
return 0;
}
+static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ void *buf)
+{
+ struct vmw_dma_buffer *vmw_bo;
+ int ret;
+
+ struct {
+ uint32_t header;
+ SVGAFifoCmdDefineGMRFB body;
+ } *cmd = buf;
+
+ ret = vmw_translate_guest_ptr(dev_priv, sw_context,
+ &cmd->body.ptr,
+ &vmw_bo);
+ if (unlikely(ret != 0))
+ return ret;
+
+ vmw_dmabuf_unreference(&vmw_bo);
+
+ return ret;
+}
+
+static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ void *buf, uint32_t *size)
+{
+ uint32_t size_remaining = *size;
+ uint32_t cmd_id;
+
+ cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
+ switch (cmd_id) {
+ case SVGA_CMD_UPDATE:
+ *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
+ break;
+ case SVGA_CMD_DEFINE_GMRFB:
+ *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
+ break;
+ case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
+ *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
+ break;
+ case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
+ *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
+ break;
+ default:
+ DRM_ERROR("Unsupported SVGA command: %u.\n", cmd_id);
+ return -EINVAL;
+ }
+
+ if (*size > size_remaining) {
+ DRM_ERROR("Invalid SVGA command (size mismatch):"
+ " %u.\n", cmd_id);
+ return -EINVAL;
+ }
+
+ if (unlikely(!sw_context->kernel)) {
+ DRM_ERROR("Kernel only SVGA command: %u.\n", cmd_id);
+ return -EPERM;
+ }
+
+ if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
+ return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
+
+ return 0;
+}
typedef int (*vmw_cmd_func) (struct vmw_private *,
struct vmw_sw_context *,
@@ -471,11 +802,11 @@ static int vmw_cmd_check(struct vmw_private *dev_priv,
SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
int ret;
- cmd_id = ((uint32_t *)buf)[0];
- if (cmd_id == SVGA_CMD_UPDATE) {
- *size = 5 << 2;
- return 0;
- }
+ cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
+ /* Handle any none 3D commands */
+ if (unlikely(cmd_id < SVGA_CMD_MAX))
+ return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
+
cmd_id = le32_to_cpu(header->id);
*size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader);
@@ -500,7 +831,8 @@ out_err:
static int vmw_cmd_check_all(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
- void *buf, uint32_t size)
+ void *buf,
+ uint32_t size)
{
int32_t cur_size = size;
int ret;
@@ -550,7 +882,11 @@ static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
static void vmw_clear_validations(struct vmw_sw_context *sw_context)
{
struct ttm_validate_buffer *entry, *next;
+ struct vmw_resource *res, *res_next;
+ /*
+ * Drop references to DMA buffers held during command submission.
+ */
list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
head) {
list_del(&entry->head);
@@ -559,6 +895,16 @@ static void vmw_clear_validations(struct vmw_sw_context *sw_context)
sw_context->cur_val_buf--;
}
BUG_ON(sw_context->cur_val_buf != 0);
+
+ /*
+ * Drop references to resources held during command submission.
+ */
+ vmw_resource_unreserve(&sw_context->resource_list);
+ list_for_each_entry_safe(res, res_next, &sw_context->resource_list,
+ validate_head) {
+ list_del_init(&res->validate_head);
+ vmw_resource_unreference(&res);
+ }
}
static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
@@ -566,6 +912,16 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
{
int ret;
+
+ /*
+ * Don't validate pinned buffers.
+ */
+
+ if (bo == dev_priv->pinned_bo ||
+ (bo == dev_priv->dummy_query_bo &&
+ dev_priv->dummy_query_bo_pinned))
+ return 0;
+
/**
* Put BO in VRAM if there is space, otherwise as a GMR.
* If there is no space in VRAM and GMR ids are all used up,
@@ -602,57 +958,208 @@ static int vmw_validate_buffers(struct vmw_private *dev_priv,
return 0;
}
-int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
+ uint32_t size)
+{
+ if (likely(sw_context->cmd_bounce_size >= size))
+ return 0;
+
+ if (sw_context->cmd_bounce_size == 0)
+ sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
+
+ while (sw_context->cmd_bounce_size < size) {
+ sw_context->cmd_bounce_size =
+ PAGE_ALIGN(sw_context->cmd_bounce_size +
+ (sw_context->cmd_bounce_size >> 1));
+ }
+
+ if (sw_context->cmd_bounce != NULL)
+ vfree(sw_context->cmd_bounce);
+
+ sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
+
+ if (sw_context->cmd_bounce == NULL) {
+ DRM_ERROR("Failed to allocate command bounce buffer.\n");
+ sw_context->cmd_bounce_size = 0;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/**
+ * vmw_execbuf_fence_commands - create and submit a command stream fence
+ *
+ * Creates a fence object and submits a command stream marker.
+ * If this fails for some reason, We sync the fifo and return NULL.
+ * It is then safe to fence buffers with a NULL pointer.
+ *
+ * If @p_handle is not NULL @file_priv must also not be NULL. Creates
+ * a userspace handle if @p_handle is not NULL, otherwise not.
+ */
+
+int vmw_execbuf_fence_commands(struct drm_file *file_priv,
+ struct vmw_private *dev_priv,
+ struct vmw_fence_obj **p_fence,
+ uint32_t *p_handle)
{
- struct vmw_private *dev_priv = vmw_priv(dev);
- struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data;
- struct drm_vmw_fence_rep fence_rep;
- struct drm_vmw_fence_rep __user *user_fence_rep;
- int ret;
- void *user_cmd;
- void *cmd;
uint32_t sequence;
- struct vmw_sw_context *sw_context = &dev_priv->ctx;
- struct vmw_master *vmaster = vmw_master(file_priv->master);
+ int ret;
+ bool synced = false;
- ret = ttm_read_lock(&vmaster->lock, true);
- if (unlikely(ret != 0))
- return ret;
+ /* p_handle implies file_priv. */
+ BUG_ON(p_handle != NULL && file_priv == NULL);
- ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
+ ret = vmw_fifo_send_fence(dev_priv, &sequence);
if (unlikely(ret != 0)) {
- ret = -ERESTARTSYS;
- goto out_no_cmd_mutex;
+ DRM_ERROR("Fence submission error. Syncing.\n");
+ synced = true;
}
- cmd = vmw_fifo_reserve(dev_priv, arg->command_size);
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving fifo space for commands.\n");
- ret = -ENOMEM;
- goto out_unlock;
+ if (p_handle != NULL)
+ ret = vmw_user_fence_create(file_priv, dev_priv->fman,
+ sequence,
+ DRM_VMW_FENCE_FLAG_EXEC,
+ p_fence, p_handle);
+ else
+ ret = vmw_fence_create(dev_priv->fman, sequence,
+ DRM_VMW_FENCE_FLAG_EXEC,
+ p_fence);
+
+ if (unlikely(ret != 0 && !synced)) {
+ (void) vmw_fallback_wait(dev_priv, false, false,
+ sequence, false,
+ VMW_FENCE_WAIT_TIMEOUT);
+ *p_fence = NULL;
}
- user_cmd = (void __user *)(unsigned long)arg->commands;
- ret = copy_from_user(cmd, user_cmd, arg->command_size);
+ return 0;
+}
- if (unlikely(ret != 0)) {
- ret = -EFAULT;
- DRM_ERROR("Failed copying commands.\n");
- goto out_commit;
+/**
+ * vmw_execbuf_copy_fence_user - copy fence object information to
+ * user-space.
+ *
+ * @dev_priv: Pointer to a vmw_private struct.
+ * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
+ * @ret: Return value from fence object creation.
+ * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to
+ * which the information should be copied.
+ * @fence: Pointer to the fenc object.
+ * @fence_handle: User-space fence handle.
+ *
+ * This function copies fence information to user-space. If copying fails,
+ * The user-space struct drm_vmw_fence_rep::error member is hopefully
+ * left untouched, and if it's preloaded with an -EFAULT by user-space,
+ * the error will hopefully be detected.
+ * Also if copying fails, user-space will be unable to signal the fence
+ * object so we wait for it immediately, and then unreference the
+ * user-space reference.
+ */
+void
+vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
+ struct vmw_fpriv *vmw_fp,
+ int ret,
+ struct drm_vmw_fence_rep __user *user_fence_rep,
+ struct vmw_fence_obj *fence,
+ uint32_t fence_handle)
+{
+ struct drm_vmw_fence_rep fence_rep;
+
+ if (user_fence_rep == NULL)
+ return;
+
+ memset(&fence_rep, 0, sizeof(fence_rep));
+
+ fence_rep.error = ret;
+ if (ret == 0) {
+ BUG_ON(fence == NULL);
+
+ fence_rep.handle = fence_handle;
+ fence_rep.seqno = fence->seqno;
+ vmw_update_seqno(dev_priv, &dev_priv->fifo);
+ fence_rep.passed_seqno = dev_priv->last_read_seqno;
}
+ /*
+ * copy_to_user errors will be detected by user space not
+ * seeing fence_rep::error filled in. Typically
+ * user-space would have pre-set that member to -EFAULT.
+ */
+ ret = copy_to_user(user_fence_rep, &fence_rep,
+ sizeof(fence_rep));
+
+ /*
+ * User-space lost the fence object. We need to sync
+ * and unreference the handle.
+ */
+ if (unlikely(ret != 0) && (fence_rep.error == 0)) {
+ ttm_ref_object_base_unref(vmw_fp->tfile,
+ fence_handle, TTM_REF_USAGE);
+ DRM_ERROR("Fence copy error. Syncing.\n");
+ (void) vmw_fence_obj_wait(fence, fence->signal_mask,
+ false, false,
+ VMW_FENCE_WAIT_TIMEOUT);
+ }
+}
+
+int vmw_execbuf_process(struct drm_file *file_priv,
+ struct vmw_private *dev_priv,
+ void __user *user_commands,
+ void *kernel_commands,
+ uint32_t command_size,
+ uint64_t throttle_us,
+ struct drm_vmw_fence_rep __user *user_fence_rep)
+{
+ struct vmw_sw_context *sw_context = &dev_priv->ctx;
+ struct vmw_fence_obj *fence;
+ uint32_t handle;
+ void *cmd;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
+ if (unlikely(ret != 0))
+ return -ERESTARTSYS;
+
+ if (kernel_commands == NULL) {
+ sw_context->kernel = false;
+
+ ret = vmw_resize_cmd_bounce(sw_context, command_size);
+ if (unlikely(ret != 0))
+ goto out_unlock;
+
+
+ ret = copy_from_user(sw_context->cmd_bounce,
+ user_commands, command_size);
+
+ if (unlikely(ret != 0)) {
+ ret = -EFAULT;
+ DRM_ERROR("Failed copying commands.\n");
+ goto out_unlock;
+ }
+ kernel_commands = sw_context->cmd_bounce;
+ } else
+ sw_context->kernel = true;
+
sw_context->tfile = vmw_fpriv(file_priv)->tfile;
sw_context->cid_valid = false;
sw_context->sid_valid = false;
sw_context->cur_reloc = 0;
sw_context->cur_val_buf = 0;
+ sw_context->fence_flags = 0;
+ INIT_LIST_HEAD(&sw_context->query_list);
+ INIT_LIST_HEAD(&sw_context->resource_list);
+ sw_context->cur_query_bo = dev_priv->pinned_bo;
+ sw_context->cur_query_cid = dev_priv->query_cid;
+ sw_context->query_cid_valid = (dev_priv->pinned_bo != NULL);
INIT_LIST_HEAD(&sw_context->validate_nodes);
- ret = vmw_cmd_check_all(dev_priv, sw_context, cmd, arg->command_size);
+ ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
+ command_size);
if (unlikely(ret != 0))
goto out_err;
+
ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes);
if (unlikely(ret != 0))
goto out_err;
@@ -663,57 +1170,206 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
vmw_apply_relocations(sw_context);
- if (arg->throttle_us) {
- ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.fence_queue,
- arg->throttle_us);
+ if (throttle_us) {
+ ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
+ throttle_us);
if (unlikely(ret != 0))
- goto out_err;
+ goto out_throttle;
}
- vmw_fifo_commit(dev_priv, arg->command_size);
-
- ret = vmw_fifo_send_fence(dev_priv, &sequence);
+ cmd = vmw_fifo_reserve(dev_priv, command_size);
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving fifo space for commands.\n");
+ ret = -ENOMEM;
+ goto out_throttle;
+ }
- ttm_eu_fence_buffer_objects(&sw_context->validate_nodes,
- (void *)(unsigned long) sequence);
- vmw_clear_validations(sw_context);
- mutex_unlock(&dev_priv->cmdbuf_mutex);
+ memcpy(cmd, kernel_commands, command_size);
+ vmw_fifo_commit(dev_priv, command_size);
+ vmw_query_bo_switch_commit(dev_priv, sw_context);
+ ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
+ &fence,
+ (user_fence_rep) ? &handle : NULL);
/*
* This error is harmless, because if fence submission fails,
- * vmw_fifo_send_fence will sync.
+ * vmw_fifo_send_fence will sync. The error will be propagated to
+ * user-space in @fence_rep
*/
if (ret != 0)
DRM_ERROR("Fence submission error. Syncing.\n");
- fence_rep.error = ret;
- fence_rep.fence_seq = (uint64_t) sequence;
- fence_rep.pad64 = 0;
-
- user_fence_rep = (struct drm_vmw_fence_rep __user *)
- (unsigned long)arg->fence_rep;
+ ttm_eu_fence_buffer_objects(&sw_context->validate_nodes,
+ (void *) fence);
- /*
- * copy_to_user errors will be detected by user space not
- * seeing fence_rep::error filled in.
- */
+ vmw_clear_validations(sw_context);
+ vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
+ user_fence_rep, fence, handle);
- ret = copy_to_user(user_fence_rep, &fence_rep, sizeof(fence_rep));
+ if (likely(fence != NULL))
+ vmw_fence_obj_unreference(&fence);
- vmw_kms_cursor_post_execbuf(dev_priv);
- ttm_read_unlock(&vmaster->lock);
+ mutex_unlock(&dev_priv->cmdbuf_mutex);
return 0;
+
out_err:
vmw_free_relocations(sw_context);
+out_throttle:
+ vmw_query_switch_backoff(sw_context);
ttm_eu_backoff_reservation(&sw_context->validate_nodes);
vmw_clear_validations(sw_context);
-out_commit:
- vmw_fifo_commit(dev_priv, 0);
out_unlock:
mutex_unlock(&dev_priv->cmdbuf_mutex);
-out_no_cmd_mutex:
+ return ret;
+}
+
+/**
+ * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
+ *
+ * @dev_priv: The device private structure.
+ *
+ * This function is called to idle the fifo and unpin the query buffer
+ * if the normal way to do this hits an error, which should typically be
+ * extremely rare.
+ */
+static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
+{
+ DRM_ERROR("Can't unpin query buffer. Trying to recover.\n");
+
+ (void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
+ vmw_bo_pin(dev_priv->pinned_bo, false);
+ vmw_bo_pin(dev_priv->dummy_query_bo, false);
+ dev_priv->dummy_query_bo_pinned = false;
+}
+
+
+/**
+ * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
+ * query bo.
+ *
+ * @dev_priv: The device private structure.
+ * @only_on_cid_match: Only flush and unpin if the current active query cid
+ * matches @cid.
+ * @cid: Optional context id to match.
+ *
+ * This function should be used to unpin the pinned query bo, or
+ * as a query barrier when we need to make sure that all queries have
+ * finished before the next fifo command. (For example on hardware
+ * context destructions where the hardware may otherwise leak unfinished
+ * queries).
+ *
+ * This function does not return any failure codes, but make attempts
+ * to do safe unpinning in case of errors.
+ *
+ * The function will synchronize on the previous query barrier, and will
+ * thus not finish until that barrier has executed.
+ */
+void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
+ bool only_on_cid_match, uint32_t cid)
+{
+ int ret = 0;
+ struct list_head validate_list;
+ struct ttm_validate_buffer pinned_val, query_val;
+ struct vmw_fence_obj *fence;
+
+ mutex_lock(&dev_priv->cmdbuf_mutex);
+
+ if (dev_priv->pinned_bo == NULL)
+ goto out_unlock;
+
+ if (only_on_cid_match && cid != dev_priv->query_cid)
+ goto out_unlock;
+
+ INIT_LIST_HEAD(&validate_list);
+
+ pinned_val.new_sync_obj_arg = (void *)(unsigned long)
+ DRM_VMW_FENCE_FLAG_EXEC;
+ pinned_val.bo = ttm_bo_reference(dev_priv->pinned_bo);
+ list_add_tail(&pinned_val.head, &validate_list);
+
+ query_val.new_sync_obj_arg = pinned_val.new_sync_obj_arg;
+ query_val.bo = ttm_bo_reference(dev_priv->dummy_query_bo);
+ list_add_tail(&query_val.head, &validate_list);
+
+ do {
+ ret = ttm_eu_reserve_buffers(&validate_list);
+ } while (ret == -ERESTARTSYS);
+
+ if (unlikely(ret != 0)) {
+ vmw_execbuf_unpin_panic(dev_priv);
+ goto out_no_reserve;
+ }
+
+ ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
+ if (unlikely(ret != 0)) {
+ vmw_execbuf_unpin_panic(dev_priv);
+ goto out_no_emit;
+ }
+
+ vmw_bo_pin(dev_priv->pinned_bo, false);
+ vmw_bo_pin(dev_priv->dummy_query_bo, false);
+ dev_priv->dummy_query_bo_pinned = false;
+
+ (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
+ ttm_eu_fence_buffer_objects(&validate_list, (void *) fence);
+
+ ttm_bo_unref(&query_val.bo);
+ ttm_bo_unref(&pinned_val.bo);
+ ttm_bo_unref(&dev_priv->pinned_bo);
+
+out_unlock:
+ mutex_unlock(&dev_priv->cmdbuf_mutex);
+ return;
+
+out_no_emit:
+ ttm_eu_backoff_reservation(&validate_list);
+out_no_reserve:
+ ttm_bo_unref(&query_val.bo);
+ ttm_bo_unref(&pinned_val.bo);
+ ttm_bo_unref(&dev_priv->pinned_bo);
+ mutex_unlock(&dev_priv->cmdbuf_mutex);
+}
+
+
+int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data;
+ struct vmw_master *vmaster = vmw_master(file_priv->master);
+ int ret;
+
+ /*
+ * This will allow us to extend the ioctl argument while
+ * maintaining backwards compatibility:
+ * We take different code paths depending on the value of
+ * arg->version.
+ */
+
+ if (unlikely(arg->version != DRM_VMW_EXECBUF_VERSION)) {
+ DRM_ERROR("Incorrect execbuf version.\n");
+ DRM_ERROR("You're running outdated experimental "
+ "vmwgfx user-space drivers.");
+ return -EINVAL;
+ }
+
+ ret = ttm_read_lock(&vmaster->lock, true);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = vmw_execbuf_process(file_priv, dev_priv,
+ (void __user *)(unsigned long)arg->commands,
+ NULL, arg->command_size, arg->throttle_us,
+ (void __user *)(unsigned long)arg->fence_rep);
+
+ if (unlikely(ret != 0))
+ goto out_unlock;
+
+ vmw_kms_cursor_post_execbuf(dev_priv);
+
+out_unlock:
ttm_read_unlock(&vmaster->lock);
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index bfab60c938a..34e51a1695b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -26,6 +26,8 @@
*
**************************************************************************/
+#include <linux/export.h>
+
#include "drmP.h"
#include "vmwgfx_drv.h"
@@ -158,10 +160,14 @@ static int vmw_fb_set_par(struct fb_info *info)
{
struct vmw_fb_par *par = info->par;
struct vmw_private *vmw_priv = par->vmw_priv;
+ int ret;
+
+ ret = vmw_kms_write_svga(vmw_priv, info->var.xres, info->var.yres,
+ info->fix.line_length,
+ par->bpp, par->depth);
+ if (ret)
+ return ret;
- vmw_kms_write_svga(vmw_priv, info->var.xres, info->var.yres,
- info->fix.line_length,
- par->bpp, par->depth);
if (vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) {
/* TODO check if pitch and offset changes */
vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
@@ -405,14 +411,14 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
struct fb_info *info;
unsigned initial_width, initial_height;
unsigned fb_width, fb_height;
- unsigned fb_bbp, fb_depth, fb_offset, fb_pitch, fb_size;
+ unsigned fb_bpp, fb_depth, fb_offset, fb_pitch, fb_size;
int ret;
/* XXX These shouldn't be hardcoded. */
initial_width = 800;
initial_height = 600;
- fb_bbp = 32;
+ fb_bpp = 32;
fb_depth = 24;
/* XXX As shouldn't these be as well. */
@@ -422,7 +428,7 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
initial_width = min(fb_width, initial_width);
initial_height = min(fb_height, initial_height);
- fb_pitch = fb_width * fb_bbp / 8;
+ fb_pitch = fb_width * fb_bpp / 8;
fb_size = fb_pitch * fb_height;
fb_offset = vmw_read(vmw_priv, SVGA_REG_FB_OFFSET);
@@ -437,7 +443,7 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
par = info->par;
par->vmw_priv = vmw_priv;
par->depth = fb_depth;
- par->bpp = fb_bbp;
+ par->bpp = fb_bpp;
par->vmalloc = NULL;
par->max_width = fb_width;
par->max_height = fb_height;
@@ -588,58 +594,6 @@ int vmw_fb_close(struct vmw_private *vmw_priv)
return 0;
}
-int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv,
- struct vmw_dma_buffer *vmw_bo)
-{
- struct ttm_buffer_object *bo = &vmw_bo->base;
- int ret = 0;
-
- ret = ttm_bo_reserve(bo, false, false, false, 0);
- if (unlikely(ret != 0))
- return ret;
-
- ret = ttm_bo_validate(bo, &vmw_sys_placement, false, false, false);
- ttm_bo_unreserve(bo);
-
- return ret;
-}
-
-int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
- struct vmw_dma_buffer *vmw_bo)
-{
- struct ttm_buffer_object *bo = &vmw_bo->base;
- struct ttm_placement ne_placement = vmw_vram_ne_placement;
- int ret = 0;
-
- ne_placement.lpfn = bo->num_pages;
-
- /* interuptable? */
- ret = ttm_write_lock(&vmw_priv->active_master->lock, false);
- if (unlikely(ret != 0))
- return ret;
-
- ret = ttm_bo_reserve(bo, false, false, false, 0);
- if (unlikely(ret != 0))
- goto err_unlock;
-
- if (bo->mem.mem_type == TTM_PL_VRAM &&
- bo->mem.start < bo->num_pages &&
- bo->mem.start > 0)
- (void) ttm_bo_validate(bo, &vmw_sys_placement, false,
- false, false);
-
- ret = ttm_bo_validate(bo, &ne_placement, false, false, false);
-
- /* Could probably bug on */
- WARN_ON(bo->offset != 0);
-
- ttm_bo_unreserve(bo);
-err_unlock:
- ttm_write_unlock(&vmw_priv->active_master->lock);
-
- return ret;
-}
-
int vmw_fb_off(struct vmw_private *vmw_priv)
{
struct fb_info *info;
@@ -661,7 +615,7 @@ int vmw_fb_off(struct vmw_private *vmw_priv)
par->bo_ptr = NULL;
ttm_bo_kunmap(&par->map);
- vmw_dmabuf_from_vram(vmw_priv, par->vmw_bo);
+ vmw_dmabuf_unpin(vmw_priv, par->vmw_bo, false);
return 0;
}
@@ -687,7 +641,7 @@ int vmw_fb_on(struct vmw_private *vmw_priv)
/* Make sure that all overlays are stoped when we take over */
vmw_overlay_stop_all(vmw_priv);
- ret = vmw_dmabuf_to_start_of_vram(vmw_priv, par->vmw_bo);
+ ret = vmw_dmabuf_to_start_of_vram(vmw_priv, par->vmw_bo, true, false);
if (unlikely(ret != 0)) {
DRM_ERROR("could not move buffer to start of VRAM\n");
goto err_no_buffer;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 61eacc1b5ca..15fb26088d6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -1,6 +1,6 @@
/**************************************************************************
*
- * Copyright (C) 2010 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2011 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -25,149 +25,1100 @@
*
**************************************************************************/
-
+#include "drmP.h"
#include "vmwgfx_drv.h"
-struct vmw_fence {
- struct list_head head;
- uint32_t sequence;
- struct timespec submitted;
+#define VMW_FENCE_WRAP (1 << 31)
+
+struct vmw_fence_manager {
+ int num_fence_objects;
+ struct vmw_private *dev_priv;
+ spinlock_t lock;
+ struct list_head fence_list;
+ struct work_struct work;
+ u32 user_fence_size;
+ u32 fence_size;
+ u32 event_fence_action_size;
+ bool fifo_down;
+ struct list_head cleanup_list;
+ uint32_t pending_actions[VMW_ACTION_MAX];
+ struct mutex goal_irq_mutex;
+ bool goal_irq_on; /* Protected by @goal_irq_mutex */
+ bool seqno_valid; /* Protected by @lock, and may not be set to true
+ without the @goal_irq_mutex held. */
};
-void vmw_fence_queue_init(struct vmw_fence_queue *queue)
+struct vmw_user_fence {
+ struct ttm_base_object base;
+ struct vmw_fence_obj fence;
+};
+
+/**
+ * struct vmw_event_fence_action - fence action that delivers a drm event.
+ *
+ * @e: A struct drm_pending_event that controls the event delivery.
+ * @action: A struct vmw_fence_action to hook up to a fence.
+ * @fence: A referenced pointer to the fence to keep it alive while @action
+ * hangs on it.
+ * @dev: Pointer to a struct drm_device so we can access the event stuff.
+ * @kref: Both @e and @action has destructors, so we need to refcount.
+ * @size: Size accounted for this object.
+ * @tv_sec: If non-null, the variable pointed to will be assigned
+ * current time tv_sec val when the fence signals.
+ * @tv_usec: Must be set if @tv_sec is set, and the variable pointed to will
+ * be assigned the current time tv_usec val when the fence signals.
+ */
+struct vmw_event_fence_action {
+ struct drm_pending_event e;
+ struct vmw_fence_action action;
+ struct vmw_fence_obj *fence;
+ struct drm_device *dev;
+ struct kref kref;
+ uint32_t size;
+ uint32_t *tv_sec;
+ uint32_t *tv_usec;
+};
+
+/**
+ * Note on fencing subsystem usage of irqs:
+ * Typically the vmw_fences_update function is called
+ *
+ * a) When a new fence seqno has been submitted by the fifo code.
+ * b) On-demand when we have waiters. Sleeping waiters will switch on the
+ * ANY_FENCE irq and call vmw_fences_update function each time an ANY_FENCE
+ * irq is received. When the last fence waiter is gone, that IRQ is masked
+ * away.
+ *
+ * In situations where there are no waiters and we don't submit any new fences,
+ * fence objects may not be signaled. This is perfectly OK, since there are
+ * no consumers of the signaled data, but that is NOT ok when there are fence
+ * actions attached to a fence. The fencing subsystem then makes use of the
+ * FENCE_GOAL irq and sets the fence goal seqno to that of the next fence
+ * which has an action attached, and each time vmw_fences_update is called,
+ * the subsystem makes sure the fence goal seqno is updated.
+ *
+ * The fence goal seqno irq is on as long as there are unsignaled fence
+ * objects with actions attached to them.
+ */
+
+static void vmw_fence_obj_destroy_locked(struct kref *kref)
+{
+ struct vmw_fence_obj *fence =
+ container_of(kref, struct vmw_fence_obj, kref);
+
+ struct vmw_fence_manager *fman = fence->fman;
+ unsigned int num_fences;
+
+ list_del_init(&fence->head);
+ num_fences = --fman->num_fence_objects;
+ spin_unlock_irq(&fman->lock);
+ if (fence->destroy)
+ fence->destroy(fence);
+ else
+ kfree(fence);
+
+ spin_lock_irq(&fman->lock);
+}
+
+
+/**
+ * Execute signal actions on fences recently signaled.
+ * This is done from a workqueue so we don't have to execute
+ * signal actions from atomic context.
+ */
+
+static void vmw_fence_work_func(struct work_struct *work)
{
- INIT_LIST_HEAD(&queue->head);
- queue->lag = ns_to_timespec(0);
- getrawmonotonic(&queue->lag_time);
- spin_lock_init(&queue->lock);
+ struct vmw_fence_manager *fman =
+ container_of(work, struct vmw_fence_manager, work);
+ struct list_head list;
+ struct vmw_fence_action *action, *next_action;
+ bool seqno_valid;
+
+ do {
+ INIT_LIST_HEAD(&list);
+ mutex_lock(&fman->goal_irq_mutex);
+
+ spin_lock_irq(&fman->lock);
+ list_splice_init(&fman->cleanup_list, &list);
+ seqno_valid = fman->seqno_valid;
+ spin_unlock_irq(&fman->lock);
+
+ if (!seqno_valid && fman->goal_irq_on) {
+ fman->goal_irq_on = false;
+ vmw_goal_waiter_remove(fman->dev_priv);
+ }
+ mutex_unlock(&fman->goal_irq_mutex);
+
+ if (list_empty(&list))
+ return;
+
+ /*
+ * At this point, only we should be able to manipulate the
+ * list heads of the actions we have on the private list.
+ * hence fman::lock not held.
+ */
+
+ list_for_each_entry_safe(action, next_action, &list, head) {
+ list_del_init(&action->head);
+ if (action->cleanup)
+ action->cleanup(action);
+ }
+ } while (1);
}
-void vmw_fence_queue_takedown(struct vmw_fence_queue *queue)
+struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
{
- struct vmw_fence *fence, *next;
+ struct vmw_fence_manager *fman = kzalloc(sizeof(*fman), GFP_KERNEL);
- spin_lock(&queue->lock);
- list_for_each_entry_safe(fence, next, &queue->head, head) {
- kfree(fence);
- }
- spin_unlock(&queue->lock);
+ if (unlikely(fman == NULL))
+ return NULL;
+
+ fman->dev_priv = dev_priv;
+ spin_lock_init(&fman->lock);
+ INIT_LIST_HEAD(&fman->fence_list);
+ INIT_LIST_HEAD(&fman->cleanup_list);
+ INIT_WORK(&fman->work, &vmw_fence_work_func);
+ fman->fifo_down = true;
+ fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence));
+ fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj));
+ fman->event_fence_action_size =
+ ttm_round_pot(sizeof(struct vmw_event_fence_action));
+ mutex_init(&fman->goal_irq_mutex);
+
+ return fman;
}
-int vmw_fence_push(struct vmw_fence_queue *queue,
- uint32_t sequence)
+void vmw_fence_manager_takedown(struct vmw_fence_manager *fman)
{
- struct vmw_fence *fence = kmalloc(sizeof(*fence), GFP_KERNEL);
+ unsigned long irq_flags;
+ bool lists_empty;
- if (unlikely(!fence))
- return -ENOMEM;
+ (void) cancel_work_sync(&fman->work);
- fence->sequence = sequence;
- getrawmonotonic(&fence->submitted);
- spin_lock(&queue->lock);
- list_add_tail(&fence->head, &queue->head);
- spin_unlock(&queue->lock);
+ spin_lock_irqsave(&fman->lock, irq_flags);
+ lists_empty = list_empty(&fman->fence_list) &&
+ list_empty(&fman->cleanup_list);
+ spin_unlock_irqrestore(&fman->lock, irq_flags);
- return 0;
+ BUG_ON(!lists_empty);
+ kfree(fman);
}
-int vmw_fence_pull(struct vmw_fence_queue *queue,
- uint32_t signaled_sequence)
+static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
+ struct vmw_fence_obj *fence,
+ u32 seqno,
+ uint32_t mask,
+ void (*destroy) (struct vmw_fence_obj *fence))
{
- struct vmw_fence *fence, *next;
- struct timespec now;
- bool updated = false;
+ unsigned long irq_flags;
+ unsigned int num_fences;
+ int ret = 0;
- spin_lock(&queue->lock);
- getrawmonotonic(&now);
+ fence->seqno = seqno;
+ INIT_LIST_HEAD(&fence->seq_passed_actions);
+ fence->fman = fman;
+ fence->signaled = 0;
+ fence->signal_mask = mask;
+ kref_init(&fence->kref);
+ fence->destroy = destroy;
+ init_waitqueue_head(&fence->queue);
- if (list_empty(&queue->head)) {
- queue->lag = ns_to_timespec(0);
- queue->lag_time = now;
- updated = true;
+ spin_lock_irqsave(&fman->lock, irq_flags);
+ if (unlikely(fman->fifo_down)) {
+ ret = -EBUSY;
goto out_unlock;
}
+ list_add_tail(&fence->head, &fman->fence_list);
+ num_fences = ++fman->num_fence_objects;
- list_for_each_entry_safe(fence, next, &queue->head, head) {
- if (signaled_sequence - fence->sequence > (1 << 30))
- continue;
+out_unlock:
+ spin_unlock_irqrestore(&fman->lock, irq_flags);
+ return ret;
- queue->lag = timespec_sub(now, fence->submitted);
- queue->lag_time = now;
- updated = true;
- list_del(&fence->head);
- kfree(fence);
+}
+
+struct vmw_fence_obj *vmw_fence_obj_reference(struct vmw_fence_obj *fence)
+{
+ if (unlikely(fence == NULL))
+ return NULL;
+
+ kref_get(&fence->kref);
+ return fence;
+}
+
+/**
+ * vmw_fence_obj_unreference
+ *
+ * Note that this function may not be entered with disabled irqs since
+ * it may re-enable them in the destroy function.
+ *
+ */
+void vmw_fence_obj_unreference(struct vmw_fence_obj **fence_p)
+{
+ struct vmw_fence_obj *fence = *fence_p;
+ struct vmw_fence_manager *fman;
+
+ if (unlikely(fence == NULL))
+ return;
+
+ fman = fence->fman;
+ *fence_p = NULL;
+ spin_lock_irq(&fman->lock);
+ BUG_ON(atomic_read(&fence->kref.refcount) == 0);
+ kref_put(&fence->kref, vmw_fence_obj_destroy_locked);
+ spin_unlock_irq(&fman->lock);
+}
+
+void vmw_fences_perform_actions(struct vmw_fence_manager *fman,
+ struct list_head *list)
+{
+ struct vmw_fence_action *action, *next_action;
+
+ list_for_each_entry_safe(action, next_action, list, head) {
+ list_del_init(&action->head);
+ fman->pending_actions[action->type]--;
+ if (action->seq_passed != NULL)
+ action->seq_passed(action);
+
+ /*
+ * Add the cleanup action to the cleanup list so that
+ * it will be performed by a worker task.
+ */
+
+ list_add_tail(&action->head, &fman->cleanup_list);
}
+}
+
+/**
+ * vmw_fence_goal_new_locked - Figure out a new device fence goal
+ * seqno if needed.
+ *
+ * @fman: Pointer to a fence manager.
+ * @passed_seqno: The seqno the device currently signals as passed.
+ *
+ * This function should be called with the fence manager lock held.
+ * It is typically called when we have a new passed_seqno, and
+ * we might need to update the fence goal. It checks to see whether
+ * the current fence goal has already passed, and, in that case,
+ * scans through all unsignaled fences to get the next fence object with an
+ * action attached, and sets the seqno of that fence as a new fence goal.
+ *
+ * returns true if the device goal seqno was updated. False otherwise.
+ */
+static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
+ u32 passed_seqno)
+{
+ u32 goal_seqno;
+ __le32 __iomem *fifo_mem;
+ struct vmw_fence_obj *fence;
+
+ if (likely(!fman->seqno_valid))
+ return false;
+
+ fifo_mem = fman->dev_priv->mmio_virt;
+ goal_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE_GOAL);
+ if (likely(passed_seqno - goal_seqno >= VMW_FENCE_WRAP))
+ return false;
+
+ fman->seqno_valid = false;
+ list_for_each_entry(fence, &fman->fence_list, head) {
+ if (!list_empty(&fence->seq_passed_actions)) {
+ fman->seqno_valid = true;
+ iowrite32(fence->seqno,
+ fifo_mem + SVGA_FIFO_FENCE_GOAL);
+ break;
+ }
+ }
+
+ return true;
+}
-out_unlock:
- spin_unlock(&queue->lock);
- return (updated) ? 0 : -EBUSY;
+/**
+ * vmw_fence_goal_check_locked - Replace the device fence goal seqno if
+ * needed.
+ *
+ * @fence: Pointer to a struct vmw_fence_obj the seqno of which should be
+ * considered as a device fence goal.
+ *
+ * This function should be called with the fence manager lock held.
+ * It is typically called when an action has been attached to a fence to
+ * check whether the seqno of that fence should be used for a fence
+ * goal interrupt. This is typically needed if the current fence goal is
+ * invalid, or has a higher seqno than that of the current fence object.
+ *
+ * returns true if the device goal seqno was updated. False otherwise.
+ */
+static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence)
+{
+ u32 goal_seqno;
+ __le32 __iomem *fifo_mem;
+
+ if (fence->signaled & DRM_VMW_FENCE_FLAG_EXEC)
+ return false;
+
+ fifo_mem = fence->fman->dev_priv->mmio_virt;
+ goal_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE_GOAL);
+ if (likely(fence->fman->seqno_valid &&
+ goal_seqno - fence->seqno < VMW_FENCE_WRAP))
+ return false;
+
+ iowrite32(fence->seqno, fifo_mem + SVGA_FIFO_FENCE_GOAL);
+ fence->fman->seqno_valid = true;
+
+ return true;
}
-static struct timespec vmw_timespec_add(struct timespec t1,
- struct timespec t2)
+void vmw_fences_update(struct vmw_fence_manager *fman)
{
- t1.tv_sec += t2.tv_sec;
- t1.tv_nsec += t2.tv_nsec;
- if (t1.tv_nsec >= 1000000000L) {
- t1.tv_sec += 1;
- t1.tv_nsec -= 1000000000L;
+ unsigned long flags;
+ struct vmw_fence_obj *fence, *next_fence;
+ struct list_head action_list;
+ bool needs_rerun;
+ uint32_t seqno, new_seqno;
+ __le32 __iomem *fifo_mem = fman->dev_priv->mmio_virt;
+
+ seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
+rerun:
+ spin_lock_irqsave(&fman->lock, flags);
+ list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
+ if (seqno - fence->seqno < VMW_FENCE_WRAP) {
+ list_del_init(&fence->head);
+ fence->signaled |= DRM_VMW_FENCE_FLAG_EXEC;
+ INIT_LIST_HEAD(&action_list);
+ list_splice_init(&fence->seq_passed_actions,
+ &action_list);
+ vmw_fences_perform_actions(fman, &action_list);
+ wake_up_all(&fence->queue);
+ } else
+ break;
}
- return t1;
+ needs_rerun = vmw_fence_goal_new_locked(fman, seqno);
+
+ if (!list_empty(&fman->cleanup_list))
+ (void) schedule_work(&fman->work);
+ spin_unlock_irqrestore(&fman->lock, flags);
+
+ /*
+ * Rerun if the fence goal seqno was updated, and the
+ * hardware might have raced with that update, so that
+ * we missed a fence_goal irq.
+ */
+
+ if (unlikely(needs_rerun)) {
+ new_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
+ if (new_seqno != seqno) {
+ seqno = new_seqno;
+ goto rerun;
+ }
+ }
+}
+
+bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence,
+ uint32_t flags)
+{
+ struct vmw_fence_manager *fman = fence->fman;
+ unsigned long irq_flags;
+ uint32_t signaled;
+
+ spin_lock_irqsave(&fman->lock, irq_flags);
+ signaled = fence->signaled;
+ spin_unlock_irqrestore(&fman->lock, irq_flags);
+
+ flags &= fence->signal_mask;
+ if ((signaled & flags) == flags)
+ return 1;
+
+ if ((signaled & DRM_VMW_FENCE_FLAG_EXEC) == 0)
+ vmw_fences_update(fman);
+
+ spin_lock_irqsave(&fman->lock, irq_flags);
+ signaled = fence->signaled;
+ spin_unlock_irqrestore(&fman->lock, irq_flags);
+
+ return ((signaled & flags) == flags);
}
-static struct timespec vmw_fifo_lag(struct vmw_fence_queue *queue)
+int vmw_fence_obj_wait(struct vmw_fence_obj *fence,
+ uint32_t flags, bool lazy,
+ bool interruptible, unsigned long timeout)
{
- struct timespec now;
+ struct vmw_private *dev_priv = fence->fman->dev_priv;
+ long ret;
+
+ if (likely(vmw_fence_obj_signaled(fence, flags)))
+ return 0;
+
+ vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
+ vmw_seqno_waiter_add(dev_priv);
+
+ if (interruptible)
+ ret = wait_event_interruptible_timeout
+ (fence->queue,
+ vmw_fence_obj_signaled(fence, flags),
+ timeout);
+ else
+ ret = wait_event_timeout
+ (fence->queue,
+ vmw_fence_obj_signaled(fence, flags),
+ timeout);
+
+ vmw_seqno_waiter_remove(dev_priv);
+
+ if (unlikely(ret == 0))
+ ret = -EBUSY;
+ else if (likely(ret > 0))
+ ret = 0;
+
+ return ret;
+}
+
+void vmw_fence_obj_flush(struct vmw_fence_obj *fence)
+{
+ struct vmw_private *dev_priv = fence->fman->dev_priv;
+
+ vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
+}
+
+static void vmw_fence_destroy(struct vmw_fence_obj *fence)
+{
+ struct vmw_fence_manager *fman = fence->fman;
+
+ kfree(fence);
+ /*
+ * Free kernel space accounting.
+ */
+ ttm_mem_global_free(vmw_mem_glob(fman->dev_priv),
+ fman->fence_size);
+}
+
+int vmw_fence_create(struct vmw_fence_manager *fman,
+ uint32_t seqno,
+ uint32_t mask,
+ struct vmw_fence_obj **p_fence)
+{
+ struct ttm_mem_global *mem_glob = vmw_mem_glob(fman->dev_priv);
+ struct vmw_fence_obj *fence;
+ int ret;
+
+ ret = ttm_mem_global_alloc(mem_glob, fman->fence_size,
+ false, false);
+ if (unlikely(ret != 0))
+ return ret;
+
+ fence = kzalloc(sizeof(*fence), GFP_KERNEL);
+ if (unlikely(fence == NULL)) {
+ ret = -ENOMEM;
+ goto out_no_object;
+ }
+
+ ret = vmw_fence_obj_init(fman, fence, seqno, mask,
+ vmw_fence_destroy);
+ if (unlikely(ret != 0))
+ goto out_err_init;
+
+ *p_fence = fence;
+ return 0;
+
+out_err_init:
+ kfree(fence);
+out_no_object:
+ ttm_mem_global_free(mem_glob, fman->fence_size);
+ return ret;
+}
+
+
+static void vmw_user_fence_destroy(struct vmw_fence_obj *fence)
+{
+ struct vmw_user_fence *ufence =
+ container_of(fence, struct vmw_user_fence, fence);
+ struct vmw_fence_manager *fman = fence->fman;
+
+ kfree(ufence);
+ /*
+ * Free kernel space accounting.
+ */
+ ttm_mem_global_free(vmw_mem_glob(fman->dev_priv),
+ fman->user_fence_size);
+}
+
+static void vmw_user_fence_base_release(struct ttm_base_object **p_base)
+{
+ struct ttm_base_object *base = *p_base;
+ struct vmw_user_fence *ufence =
+ container_of(base, struct vmw_user_fence, base);
+ struct vmw_fence_obj *fence = &ufence->fence;
+
+ *p_base = NULL;
+ vmw_fence_obj_unreference(&fence);
+}
+
+int vmw_user_fence_create(struct drm_file *file_priv,
+ struct vmw_fence_manager *fman,
+ uint32_t seqno,
+ uint32_t mask,
+ struct vmw_fence_obj **p_fence,
+ uint32_t *p_handle)
+{
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ struct vmw_user_fence *ufence;
+ struct vmw_fence_obj *tmp;
+ struct ttm_mem_global *mem_glob = vmw_mem_glob(fman->dev_priv);
+ int ret;
+
+ /*
+ * Kernel memory space accounting, since this object may
+ * be created by a user-space request.
+ */
+
+ ret = ttm_mem_global_alloc(mem_glob, fman->user_fence_size,
+ false, false);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ufence = kzalloc(sizeof(*ufence), GFP_KERNEL);
+ if (unlikely(ufence == NULL)) {
+ ret = -ENOMEM;
+ goto out_no_object;
+ }
+
+ ret = vmw_fence_obj_init(fman, &ufence->fence, seqno,
+ mask, vmw_user_fence_destroy);
+ if (unlikely(ret != 0)) {
+ kfree(ufence);
+ goto out_no_object;
+ }
+
+ /*
+ * The base object holds a reference which is freed in
+ * vmw_user_fence_base_release.
+ */
+ tmp = vmw_fence_obj_reference(&ufence->fence);
+ ret = ttm_base_object_init(tfile, &ufence->base, false,
+ VMW_RES_FENCE,
+ &vmw_user_fence_base_release, NULL);
- spin_lock(&queue->lock);
- getrawmonotonic(&now);
- queue->lag = vmw_timespec_add(queue->lag,
- timespec_sub(now, queue->lag_time));
- queue->lag_time = now;
- spin_unlock(&queue->lock);
- return queue->lag;
+
+ if (unlikely(ret != 0)) {
+ /*
+ * Free the base object's reference
+ */
+ vmw_fence_obj_unreference(&tmp);
+ goto out_err;
+ }
+
+ *p_fence = &ufence->fence;
+ *p_handle = ufence->base.hash.key;
+
+ return 0;
+out_err:
+ tmp = &ufence->fence;
+ vmw_fence_obj_unreference(&tmp);
+out_no_object:
+ ttm_mem_global_free(mem_glob, fman->user_fence_size);
+ return ret;
}
-static bool vmw_lag_lt(struct vmw_fence_queue *queue,
- uint32_t us)
+/**
+ * vmw_fence_fifo_down - signal all unsignaled fence objects.
+ */
+
+void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
+{
+ unsigned long irq_flags;
+ struct list_head action_list;
+ int ret;
+
+ /*
+ * The list may be altered while we traverse it, so always
+ * restart when we've released the fman->lock.
+ */
+
+ spin_lock_irqsave(&fman->lock, irq_flags);
+ fman->fifo_down = true;
+ while (!list_empty(&fman->fence_list)) {
+ struct vmw_fence_obj *fence =
+ list_entry(fman->fence_list.prev, struct vmw_fence_obj,
+ head);
+ kref_get(&fence->kref);
+ spin_unlock_irq(&fman->lock);
+
+ ret = vmw_fence_obj_wait(fence, fence->signal_mask,
+ false, false,
+ VMW_FENCE_WAIT_TIMEOUT);
+
+ if (unlikely(ret != 0)) {
+ list_del_init(&fence->head);
+ fence->signaled |= DRM_VMW_FENCE_FLAG_EXEC;
+ INIT_LIST_HEAD(&action_list);
+ list_splice_init(&fence->seq_passed_actions,
+ &action_list);
+ vmw_fences_perform_actions(fman, &action_list);
+ wake_up_all(&fence->queue);
+ }
+
+ spin_lock_irq(&fman->lock);
+
+ BUG_ON(!list_empty(&fence->head));
+ kref_put(&fence->kref, vmw_fence_obj_destroy_locked);
+ }
+ spin_unlock_irqrestore(&fman->lock, irq_flags);
+}
+
+void vmw_fence_fifo_up(struct vmw_fence_manager *fman)
{
- struct timespec lag, cond;
+ unsigned long irq_flags;
- cond = ns_to_timespec((s64) us * 1000);
- lag = vmw_fifo_lag(queue);
- return (timespec_compare(&lag, &cond) < 1);
+ spin_lock_irqsave(&fman->lock, irq_flags);
+ fman->fifo_down = false;
+ spin_unlock_irqrestore(&fman->lock, irq_flags);
}
-int vmw_wait_lag(struct vmw_private *dev_priv,
- struct vmw_fence_queue *queue, uint32_t us)
+
+int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- struct vmw_fence *fence;
- uint32_t sequence;
+ struct drm_vmw_fence_wait_arg *arg =
+ (struct drm_vmw_fence_wait_arg *)data;
+ unsigned long timeout;
+ struct ttm_base_object *base;
+ struct vmw_fence_obj *fence;
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
int ret;
+ uint64_t wait_timeout = ((uint64_t)arg->timeout_us * HZ);
+
+ /*
+ * 64-bit division not present on 32-bit systems, so do an
+ * approximation. (Divide by 1000000).
+ */
+
+ wait_timeout = (wait_timeout >> 20) + (wait_timeout >> 24) -
+ (wait_timeout >> 26);
+
+ if (!arg->cookie_valid) {
+ arg->cookie_valid = 1;
+ arg->kernel_cookie = jiffies + wait_timeout;
+ }
+
+ base = ttm_base_object_lookup(tfile, arg->handle);
+ if (unlikely(base == NULL)) {
+ printk(KERN_ERR "Wait invalid fence object handle "
+ "0x%08lx.\n",
+ (unsigned long)arg->handle);
+ return -EINVAL;
+ }
+
+ fence = &(container_of(base, struct vmw_user_fence, base)->fence);
+
+ timeout = jiffies;
+ if (time_after_eq(timeout, (unsigned long)arg->kernel_cookie)) {
+ ret = ((vmw_fence_obj_signaled(fence, arg->flags)) ?
+ 0 : -EBUSY);
+ goto out;
+ }
+
+ timeout = (unsigned long)arg->kernel_cookie - timeout;
+
+ ret = vmw_fence_obj_wait(fence, arg->flags, arg->lazy, true, timeout);
+
+out:
+ ttm_base_object_unref(&base);
+
+ /*
+ * Optionally unref the fence object.
+ */
+
+ if (ret == 0 && (arg->wait_options & DRM_VMW_WAIT_OPTION_UNREF))
+ return ttm_ref_object_base_unref(tfile, arg->handle,
+ TTM_REF_USAGE);
+ return ret;
+}
+
+int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_vmw_fence_signaled_arg *arg =
+ (struct drm_vmw_fence_signaled_arg *) data;
+ struct ttm_base_object *base;
+ struct vmw_fence_obj *fence;
+ struct vmw_fence_manager *fman;
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ struct vmw_private *dev_priv = vmw_priv(dev);
+
+ base = ttm_base_object_lookup(tfile, arg->handle);
+ if (unlikely(base == NULL)) {
+ printk(KERN_ERR "Fence signaled invalid fence object handle "
+ "0x%08lx.\n",
+ (unsigned long)arg->handle);
+ return -EINVAL;
+ }
+
+ fence = &(container_of(base, struct vmw_user_fence, base)->fence);
+ fman = fence->fman;
+
+ arg->signaled = vmw_fence_obj_signaled(fence, arg->flags);
+ spin_lock_irq(&fman->lock);
+
+ arg->signaled_flags = fence->signaled;
+ arg->passed_seqno = dev_priv->last_read_seqno;
+ spin_unlock_irq(&fman->lock);
+
+ ttm_base_object_unref(&base);
+
+ return 0;
+}
+
+
+int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_vmw_fence_arg *arg =
+ (struct drm_vmw_fence_arg *) data;
+
+ return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
+ arg->handle,
+ TTM_REF_USAGE);
+}
+
+/**
+ * vmw_event_fence_action_destroy
+ *
+ * @kref: The struct kref embedded in a struct vmw_event_fence_action.
+ *
+ * The vmw_event_fence_action destructor that may be called either after
+ * the fence action cleanup, or when the event is delivered.
+ * It frees both the vmw_event_fence_action struct and the actual
+ * event structure copied to user-space.
+ */
+static void vmw_event_fence_action_destroy(struct kref *kref)
+{
+ struct vmw_event_fence_action *eaction =
+ container_of(kref, struct vmw_event_fence_action, kref);
+ struct ttm_mem_global *mem_glob =
+ vmw_mem_glob(vmw_priv(eaction->dev));
+ uint32_t size = eaction->size;
+
+ kfree(eaction->e.event);
+ kfree(eaction);
+ ttm_mem_global_free(mem_glob, size);
+}
+
+
+/**
+ * vmw_event_fence_action_delivered
+ *
+ * @e: The struct drm_pending_event embedded in a struct
+ * vmw_event_fence_action.
+ *
+ * The struct drm_pending_event destructor that is called by drm
+ * once the event is delivered. Since we don't know whether this function
+ * will be called before or after the fence action destructor, we
+ * free a refcount and destroy if it becomes zero.
+ */
+static void vmw_event_fence_action_delivered(struct drm_pending_event *e)
+{
+ struct vmw_event_fence_action *eaction =
+ container_of(e, struct vmw_event_fence_action, e);
+
+ kref_put(&eaction->kref, vmw_event_fence_action_destroy);
+}
+
+
+/**
+ * vmw_event_fence_action_seq_passed
+ *
+ * @action: The struct vmw_fence_action embedded in a struct
+ * vmw_event_fence_action.
+ *
+ * This function is called when the seqno of the fence where @action is
+ * attached has passed. It queues the event on the submitter's event list.
+ * This function is always called from atomic context, and may be called
+ * from irq context. It ups a refcount reflecting that we now have two
+ * destructors.
+ */
+static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
+{
+ struct vmw_event_fence_action *eaction =
+ container_of(action, struct vmw_event_fence_action, action);
+ struct drm_device *dev = eaction->dev;
+ struct drm_file *file_priv = eaction->e.file_priv;
+ unsigned long irq_flags;
+
+ kref_get(&eaction->kref);
+ spin_lock_irqsave(&dev->event_lock, irq_flags);
+
+ if (likely(eaction->tv_sec != NULL)) {
+ struct timeval tv;
+
+ do_gettimeofday(&tv);
+ *eaction->tv_sec = tv.tv_sec;
+ *eaction->tv_usec = tv.tv_usec;
+ }
+
+ list_add_tail(&eaction->e.link, &file_priv->event_list);
+ wake_up_all(&file_priv->event_wait);
+ spin_unlock_irqrestore(&dev->event_lock, irq_flags);
+}
+
+/**
+ * vmw_event_fence_action_cleanup
+ *
+ * @action: The struct vmw_fence_action embedded in a struct
+ * vmw_event_fence_action.
+ *
+ * This function is the struct vmw_fence_action destructor. It's typically
+ * called from a workqueue.
+ */
+static void vmw_event_fence_action_cleanup(struct vmw_fence_action *action)
+{
+ struct vmw_event_fence_action *eaction =
+ container_of(action, struct vmw_event_fence_action, action);
+
+ vmw_fence_obj_unreference(&eaction->fence);
+ kref_put(&eaction->kref, vmw_event_fence_action_destroy);
+}
+
+
+/**
+ * vmw_fence_obj_add_action - Add an action to a fence object.
+ *
+ * @fence - The fence object.
+ * @action - The action to add.
+ *
+ * Note that the action callbacks may be executed before this function
+ * returns.
+ */
+void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
+ struct vmw_fence_action *action)
+{
+ struct vmw_fence_manager *fman = fence->fman;
+ unsigned long irq_flags;
+ bool run_update = false;
+
+ mutex_lock(&fman->goal_irq_mutex);
+ spin_lock_irqsave(&fman->lock, irq_flags);
+
+ fman->pending_actions[action->type]++;
+ if (fence->signaled & DRM_VMW_FENCE_FLAG_EXEC) {
+ struct list_head action_list;
- while (!vmw_lag_lt(queue, us)) {
- spin_lock(&queue->lock);
- if (list_empty(&queue->head))
- sequence = atomic_read(&dev_priv->fence_seq);
- else {
- fence = list_first_entry(&queue->head,
- struct vmw_fence, head);
- sequence = fence->sequence;
+ INIT_LIST_HEAD(&action_list);
+ list_add_tail(&action->head, &action_list);
+ vmw_fences_perform_actions(fman, &action_list);
+ } else {
+ list_add_tail(&action->head, &fence->seq_passed_actions);
+
+ /*
+ * This function may set fman::seqno_valid, so it must
+ * be run with the goal_irq_mutex held.
+ */
+ run_update = vmw_fence_goal_check_locked(fence);
+ }
+
+ spin_unlock_irqrestore(&fman->lock, irq_flags);
+
+ if (run_update) {
+ if (!fman->goal_irq_on) {
+ fman->goal_irq_on = true;
+ vmw_goal_waiter_add(fman->dev_priv);
}
- spin_unlock(&queue->lock);
+ vmw_fences_update(fman);
+ }
+ mutex_unlock(&fman->goal_irq_mutex);
- ret = vmw_wait_fence(dev_priv, false, sequence, true,
- 3*HZ);
+}
- if (unlikely(ret != 0))
- return ret;
+/**
+ * vmw_event_fence_action_create - Post an event for sending when a fence
+ * object seqno has passed.
+ *
+ * @file_priv: The file connection on which the event should be posted.
+ * @fence: The fence object on which to post the event.
+ * @event: Event to be posted. This event should've been alloced
+ * using k[mz]alloc, and should've been completely initialized.
+ * @interruptible: Interruptible waits if possible.
+ *
+ * As a side effect, the object pointed to by @event may have been
+ * freed when this function returns. If this function returns with
+ * an error code, the caller needs to free that object.
+ */
+
+int vmw_event_fence_action_create(struct drm_file *file_priv,
+ struct vmw_fence_obj *fence,
+ struct drm_event *event,
+ uint32_t *tv_sec,
+ uint32_t *tv_usec,
+ bool interruptible)
+{
+ struct vmw_event_fence_action *eaction;
+ struct ttm_mem_global *mem_glob =
+ vmw_mem_glob(fence->fman->dev_priv);
+ struct vmw_fence_manager *fman = fence->fman;
+ uint32_t size = fman->event_fence_action_size +
+ ttm_round_pot(event->length);
+ int ret;
+
+ /*
+ * Account for internal structure size as well as the
+ * event size itself.
+ */
+
+ ret = ttm_mem_global_alloc(mem_glob, size, false, interruptible);
+ if (unlikely(ret != 0))
+ return ret;
- (void) vmw_fence_pull(queue, sequence);
+ eaction = kzalloc(sizeof(*eaction), GFP_KERNEL);
+ if (unlikely(eaction == NULL)) {
+ ttm_mem_global_free(mem_glob, size);
+ return -ENOMEM;
}
+
+ eaction->e.event = event;
+ eaction->e.file_priv = file_priv;
+ eaction->e.destroy = vmw_event_fence_action_delivered;
+
+ eaction->action.seq_passed = vmw_event_fence_action_seq_passed;
+ eaction->action.cleanup = vmw_event_fence_action_cleanup;
+ eaction->action.type = VMW_ACTION_EVENT;
+
+ eaction->fence = vmw_fence_obj_reference(fence);
+ eaction->dev = fman->dev_priv->dev;
+ eaction->size = size;
+ eaction->tv_sec = tv_sec;
+ eaction->tv_usec = tv_usec;
+
+ kref_init(&eaction->kref);
+ vmw_fence_obj_add_action(fence, &eaction->action);
+
return 0;
}
+int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct drm_vmw_fence_event_arg *arg =
+ (struct drm_vmw_fence_event_arg *) data;
+ struct vmw_fence_obj *fence = NULL;
+ struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
+ struct drm_vmw_fence_rep __user *user_fence_rep =
+ (struct drm_vmw_fence_rep __user *)(unsigned long)
+ arg->fence_rep;
+ uint32_t handle;
+ unsigned long irq_flags;
+ struct drm_vmw_event_fence *event;
+ int ret;
+
+ /*
+ * Look up an existing fence object,
+ * and if user-space wants a new reference,
+ * add one.
+ */
+ if (arg->handle) {
+ struct ttm_base_object *base =
+ ttm_base_object_lookup(vmw_fp->tfile, arg->handle);
+ if (unlikely(base == NULL)) {
+ DRM_ERROR("Fence event invalid fence object handle "
+ "0x%08lx.\n",
+ (unsigned long)arg->handle);
+ return -EINVAL;
+ }
+ fence = &(container_of(base, struct vmw_user_fence,
+ base)->fence);
+ (void) vmw_fence_obj_reference(fence);
+
+ if (user_fence_rep != NULL) {
+ bool existed;
+
+ ret = ttm_ref_object_add(vmw_fp->tfile, base,
+ TTM_REF_USAGE, &existed);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed to reference a fence "
+ "object.\n");
+ goto out_no_ref_obj;
+ }
+ handle = base->hash.key;
+ }
+ ttm_base_object_unref(&base);
+ }
+
+ /*
+ * Create a new fence object.
+ */
+ if (!fence) {
+ ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
+ &fence,
+ (user_fence_rep) ?
+ &handle : NULL);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Fence event failed to create fence.\n");
+ return ret;
+ }
+ }
+
+ BUG_ON(fence == NULL);
+
+ spin_lock_irqsave(&dev->event_lock, irq_flags);
+
+ ret = (file_priv->event_space < sizeof(*event)) ? -EBUSY : 0;
+ if (likely(ret == 0))
+ file_priv->event_space -= sizeof(*event);
+
+ spin_unlock_irqrestore(&dev->event_lock, irq_flags);
+
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed to allocate event space for this file.\n");
+ goto out_no_event_space;
+ }
+
+ event = kzalloc(sizeof(*event), GFP_KERNEL);
+ if (unlikely(event == NULL)) {
+ DRM_ERROR("Failed to allocate an event.\n");
+ goto out_no_event;
+ }
+
+ event->base.type = DRM_VMW_EVENT_FENCE_SIGNALED;
+ event->base.length = sizeof(*event);
+ event->user_data = arg->user_data;
+
+ if (arg->flags & DRM_VMW_FE_FLAG_REQ_TIME)
+ ret = vmw_event_fence_action_create(file_priv, fence,
+ &event->base,
+ &event->tv_sec,
+ &event->tv_usec,
+ true);
+ else
+ ret = vmw_event_fence_action_create(file_priv, fence,
+ &event->base,
+ NULL,
+ NULL,
+ true);
+
+ if (unlikely(ret != 0)) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("Failed to attach event to fence.\n");
+ goto out_no_attach;
+ }
+
+ vmw_execbuf_copy_fence_user(dev_priv, vmw_fp, 0, user_fence_rep, fence,
+ handle);
+ vmw_fence_obj_unreference(&fence);
+ return 0;
+out_no_attach:
+ kfree(event);
+out_no_event:
+ spin_lock_irqsave(&dev->event_lock, irq_flags);
+ file_priv->event_space += sizeof(*event);
+ spin_unlock_irqrestore(&dev->event_lock, irq_flags);
+out_no_event_space:
+ if (user_fence_rep != NULL)
+ ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
+ handle, TTM_REF_USAGE);
+out_no_ref_obj:
+ vmw_fence_obj_unreference(&fence);
+ return ret;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
new file mode 100644
index 00000000000..0854a2096b5
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
@@ -0,0 +1,113 @@
+/**************************************************************************
+ *
+ * Copyright © 2011 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef _VMWGFX_FENCE_H_
+
+#define VMW_FENCE_WAIT_TIMEOUT (5*HZ)
+
+struct vmw_private;
+
+struct vmw_fence_manager;
+
+/**
+ *
+ *
+ */
+enum vmw_action_type {
+ VMW_ACTION_EVENT = 0,
+ VMW_ACTION_MAX
+};
+
+struct vmw_fence_action {
+ struct list_head head;
+ enum vmw_action_type type;
+ void (*seq_passed) (struct vmw_fence_action *action);
+ void (*cleanup) (struct vmw_fence_action *action);
+};
+
+struct vmw_fence_obj {
+ struct kref kref;
+ u32 seqno;
+
+ struct vmw_fence_manager *fman;
+ struct list_head head;
+ uint32_t signaled;
+ uint32_t signal_mask;
+ struct list_head seq_passed_actions;
+ void (*destroy)(struct vmw_fence_obj *fence);
+ wait_queue_head_t queue;
+};
+
+extern struct vmw_fence_manager *
+vmw_fence_manager_init(struct vmw_private *dev_priv);
+
+extern void vmw_fence_manager_takedown(struct vmw_fence_manager *fman);
+
+extern void vmw_fence_obj_unreference(struct vmw_fence_obj **fence_p);
+
+extern struct vmw_fence_obj *
+vmw_fence_obj_reference(struct vmw_fence_obj *fence);
+
+extern void vmw_fences_update(struct vmw_fence_manager *fman);
+
+extern bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence,
+ uint32_t flags);
+
+extern int vmw_fence_obj_wait(struct vmw_fence_obj *fence, uint32_t flags,
+ bool lazy,
+ bool interruptible, unsigned long timeout);
+
+extern void vmw_fence_obj_flush(struct vmw_fence_obj *fence);
+
+extern int vmw_fence_create(struct vmw_fence_manager *fman,
+ uint32_t seqno,
+ uint32_t mask,
+ struct vmw_fence_obj **p_fence);
+
+extern int vmw_user_fence_create(struct drm_file *file_priv,
+ struct vmw_fence_manager *fman,
+ uint32_t sequence,
+ uint32_t mask,
+ struct vmw_fence_obj **p_fence,
+ uint32_t *p_handle);
+
+extern void vmw_fence_fifo_up(struct vmw_fence_manager *fman);
+
+extern void vmw_fence_fifo_down(struct vmw_fence_manager *fman);
+
+extern int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
+extern int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
+extern int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
+#endif /* _VMWGFX_FENCE_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index 635c0ffee7f..03bbc2a6f9a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -45,7 +45,11 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
if (hwversion == 0)
return false;
- if (hwversion < SVGA3D_HWVERSION_WS65_B1)
+ if (hwversion < SVGA3D_HWVERSION_WS8_B1)
+ return false;
+
+ /* Non-Screen Object path does not support surfaces */
+ if (!dev_priv->sou_priv)
return false;
return true;
@@ -72,22 +76,12 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
uint32_t max;
uint32_t min;
uint32_t dummy;
- int ret;
fifo->static_buffer_size = VMWGFX_FIFO_STATIC_SIZE;
fifo->static_buffer = vmalloc(fifo->static_buffer_size);
if (unlikely(fifo->static_buffer == NULL))
return -ENOMEM;
- fifo->last_buffer_size = VMWGFX_FIFO_STATIC_SIZE;
- fifo->last_data_size = 0;
- fifo->last_buffer_add = false;
- fifo->last_buffer = vmalloc(fifo->last_buffer_size);
- if (unlikely(fifo->last_buffer == NULL)) {
- ret = -ENOMEM;
- goto out_err;
- }
-
fifo->dynamic_buffer = NULL;
fifo->reserved_size = 0;
fifo->using_bounce_buffer = false;
@@ -137,14 +131,10 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
(unsigned int) min,
(unsigned int) fifo->capabilities);
- atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence);
- iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE);
- vmw_fence_queue_init(&fifo->fence_queue);
+ atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
+ iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
+ vmw_marker_queue_init(&fifo->marker_queue);
return vmw_fifo_send_fence(dev_priv, &dummy);
-out_err:
- vfree(fifo->static_buffer);
- fifo->static_buffer = NULL;
- return ret;
}
void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
@@ -170,7 +160,7 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0)
vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
- dev_priv->last_read_sequence = ioread32(fifo_mem + SVGA_FIFO_FENCE);
+ dev_priv->last_read_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
vmw_write(dev_priv, SVGA_REG_CONFIG_DONE,
dev_priv->config_done_state);
@@ -180,12 +170,7 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
dev_priv->traces_state);
mutex_unlock(&dev_priv->hw_mutex);
- vmw_fence_queue_takedown(&fifo->fence_queue);
-
- if (likely(fifo->last_buffer != NULL)) {
- vfree(fifo->last_buffer);
- fifo->last_buffer = NULL;
- }
+ vmw_marker_queue_takedown(&fifo->marker_queue);
if (likely(fifo->static_buffer != NULL)) {
vfree(fifo->static_buffer);
@@ -262,9 +247,8 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv,
spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
outl(SVGA_IRQFLAG_FIFO_PROGRESS,
dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
- vmw_write(dev_priv, SVGA_REG_IRQMASK,
- vmw_read(dev_priv, SVGA_REG_IRQMASK) |
- SVGA_IRQFLAG_FIFO_PROGRESS);
+ dev_priv->irq_mask |= SVGA_IRQFLAG_FIFO_PROGRESS;
+ vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
}
mutex_unlock(&dev_priv->hw_mutex);
@@ -286,9 +270,8 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv,
mutex_lock(&dev_priv->hw_mutex);
if (atomic_dec_and_test(&dev_priv->fifo_queue_waiters)) {
spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
- vmw_write(dev_priv, SVGA_REG_IRQMASK,
- vmw_read(dev_priv, SVGA_REG_IRQMASK) &
- ~SVGA_IRQFLAG_FIFO_PROGRESS);
+ dev_priv->irq_mask &= ~SVGA_IRQFLAG_FIFO_PROGRESS;
+ vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
}
mutex_unlock(&dev_priv->hw_mutex);
@@ -296,6 +279,16 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv,
return ret;
}
+/**
+ * Reserve @bytes number of bytes in the fifo.
+ *
+ * This function will return NULL (error) on two conditions:
+ * If it timeouts waiting for fifo space, or if @bytes is larger than the
+ * available fifo space.
+ *
+ * Returns:
+ * Pointer to the fifo, or null on error (possible hardware hang).
+ */
void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
{
struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
@@ -466,7 +459,7 @@ void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
mutex_unlock(&fifo_state->fifo_mutex);
}
-int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence)
+int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
{
struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
struct svga_fifo_cmd_fence *cmd_fence;
@@ -476,16 +469,16 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence)
fm = vmw_fifo_reserve(dev_priv, bytes);
if (unlikely(fm == NULL)) {
- *sequence = atomic_read(&dev_priv->fence_seq);
+ *seqno = atomic_read(&dev_priv->marker_seq);
ret = -ENOMEM;
- (void)vmw_fallback_wait(dev_priv, false, true, *sequence,
+ (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
false, 3*HZ);
goto out_err;
}
do {
- *sequence = atomic_add_return(1, &dev_priv->fence_seq);
- } while (*sequence == 0);
+ *seqno = atomic_add_return(1, &dev_priv->marker_seq);
+ } while (*seqno == 0);
if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
@@ -502,61 +495,68 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence)
cmd_fence = (struct svga_fifo_cmd_fence *)
((unsigned long)fm + sizeof(__le32));
- iowrite32(*sequence, &cmd_fence->fence);
- fifo_state->last_buffer_add = true;
+ iowrite32(*seqno, &cmd_fence->fence);
vmw_fifo_commit(dev_priv, bytes);
- fifo_state->last_buffer_add = false;
- (void) vmw_fence_push(&fifo_state->fence_queue, *sequence);
- vmw_update_sequence(dev_priv, fifo_state);
+ (void) vmw_marker_push(&fifo_state->marker_queue, *seqno);
+ vmw_update_seqno(dev_priv, fifo_state);
out_err:
return ret;
}
/**
- * Map the first page of the FIFO read-only to user-space.
+ * vmw_fifo_emit_dummy_query - emits a dummy query to the fifo.
+ *
+ * @dev_priv: The device private structure.
+ * @cid: The hardware context id used for the query.
+ *
+ * This function is used to emit a dummy occlusion query with
+ * no primitives rendered between query begin and query end.
+ * It's used to provide a query barrier, in order to know that when
+ * this query is finished, all preceding queries are also finished.
+ *
+ * A Query results structure should have been initialized at the start
+ * of the dev_priv->dummy_query_bo buffer object. And that buffer object
+ * must also be either reserved or pinned when this function is called.
+ *
+ * Returns -ENOMEM on failure to reserve fifo space.
*/
-
-static int vmw_fifo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
+ uint32_t cid)
{
- int ret;
- unsigned long address = (unsigned long)vmf->virtual_address;
+ /*
+ * A query wait without a preceding query end will
+ * actually finish all queries for this cid
+ * without writing to the query result structure.
+ */
- if (address != vma->vm_start)
- return VM_FAULT_SIGBUS;
+ struct ttm_buffer_object *bo = dev_priv->dummy_query_bo;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdWaitForQuery body;
+ } *cmd;
- ret = vm_insert_pfn(vma, address, vma->vm_pgoff);
- if (likely(ret == -EBUSY || ret == 0))
- return VM_FAULT_NOPAGE;
- else if (ret == -ENOMEM)
- return VM_FAULT_OOM;
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
- return VM_FAULT_SIGBUS;
-}
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Out of fifo space for dummy query.\n");
+ return -ENOMEM;
+ }
-static struct vm_operations_struct vmw_fifo_vm_ops = {
- .fault = vmw_fifo_vm_fault,
- .open = NULL,
- .close = NULL
-};
+ cmd->header.id = SVGA_3D_CMD_WAIT_FOR_QUERY;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.cid = cid;
+ cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION;
+
+ if (bo->mem.mem_type == TTM_PL_VRAM) {
+ cmd->body.guestResult.gmrId = SVGA_GMR_FRAMEBUFFER;
+ cmd->body.guestResult.offset = bo->offset;
+ } else {
+ cmd->body.guestResult.gmrId = bo->mem.start;
+ cmd->body.guestResult.offset = 0;
+ }
+
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
-int vmw_fifo_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- struct drm_file *file_priv;
- struct vmw_private *dev_priv;
-
- file_priv = filp->private_data;
- dev_priv = vmw_priv(file_priv->minor->dev);
-
- if (vma->vm_pgoff != (dev_priv->mmio_start >> PAGE_SHIFT) ||
- (vma->vm_end - vma->vm_start) != PAGE_SIZE)
- return -EINVAL;
-
- vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
- vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_SHARED;
- vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
- vma->vm_page_prot = ttm_io_prot(TTM_PL_FLAG_UNCACHED,
- vma->vm_page_prot);
- vma->vm_ops = &vmw_fifo_vm_ops;
return 0;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
index de0c5948521..f4e7763a769 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
@@ -1,6 +1,6 @@
/**************************************************************************
*
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2011 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -29,6 +29,77 @@
#include "drmP.h"
#include "ttm/ttm_bo_driver.h"
+#define VMW_PPN_SIZE sizeof(unsigned long)
+
+static int vmw_gmr2_bind(struct vmw_private *dev_priv,
+ struct page *pages[],
+ unsigned long num_pages,
+ int gmr_id)
+{
+ SVGAFifoCmdDefineGMR2 define_cmd;
+ SVGAFifoCmdRemapGMR2 remap_cmd;
+ uint32_t define_size = sizeof(define_cmd) + 4;
+ uint32_t remap_size = VMW_PPN_SIZE * num_pages + sizeof(remap_cmd) + 4;
+ uint32_t *cmd;
+ uint32_t *cmd_orig;
+ uint32_t i;
+
+ cmd_orig = cmd = vmw_fifo_reserve(dev_priv, define_size + remap_size);
+ if (unlikely(cmd == NULL))
+ return -ENOMEM;
+
+ define_cmd.gmrId = gmr_id;
+ define_cmd.numPages = num_pages;
+
+ remap_cmd.gmrId = gmr_id;
+ remap_cmd.flags = (VMW_PPN_SIZE > sizeof(*cmd)) ?
+ SVGA_REMAP_GMR2_PPN64 : SVGA_REMAP_GMR2_PPN32;
+ remap_cmd.offsetPages = 0;
+ remap_cmd.numPages = num_pages;
+
+ *cmd++ = SVGA_CMD_DEFINE_GMR2;
+ memcpy(cmd, &define_cmd, sizeof(define_cmd));
+ cmd += sizeof(define_cmd) / sizeof(uint32);
+
+ *cmd++ = SVGA_CMD_REMAP_GMR2;
+ memcpy(cmd, &remap_cmd, sizeof(remap_cmd));
+ cmd += sizeof(remap_cmd) / sizeof(uint32);
+
+ for (i = 0; i < num_pages; ++i) {
+ if (VMW_PPN_SIZE > 4)
+ *cmd = page_to_pfn(*pages++);
+ else
+ *((uint64_t *)cmd) = page_to_pfn(*pages++);
+
+ cmd += VMW_PPN_SIZE / sizeof(*cmd);
+ }
+
+ vmw_fifo_commit(dev_priv, define_size + remap_size);
+
+ return 0;
+}
+
+static void vmw_gmr2_unbind(struct vmw_private *dev_priv,
+ int gmr_id)
+{
+ SVGAFifoCmdDefineGMR2 define_cmd;
+ uint32_t define_size = sizeof(define_cmd) + 4;
+ uint32_t *cmd;
+
+ cmd = vmw_fifo_reserve(dev_priv, define_size);
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("GMR2 unbind failed.\n");
+ return;
+ }
+ define_cmd.gmrId = gmr_id;
+ define_cmd.numPages = 0;
+
+ *cmd++ = SVGA_CMD_DEFINE_GMR2;
+ memcpy(cmd, &define_cmd, sizeof(define_cmd));
+
+ vmw_fifo_commit(dev_priv, define_size);
+}
+
/**
* FIXME: Adjust to the ttm lowmem / highmem storage to minimize
* the number of used descriptors.
@@ -170,6 +241,9 @@ int vmw_gmr_bind(struct vmw_private *dev_priv,
struct list_head desc_pages;
int ret;
+ if (likely(dev_priv->capabilities & SVGA_CAP_GMR2))
+ return vmw_gmr2_bind(dev_priv, pages, num_pages, gmr_id);
+
if (unlikely(!(dev_priv->capabilities & SVGA_CAP_GMR)))
return -EINVAL;
@@ -192,6 +266,11 @@ int vmw_gmr_bind(struct vmw_private *dev_priv,
void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id)
{
+ if (likely(dev_priv->capabilities & SVGA_CAP_GMR2)) {
+ vmw_gmr2_unbind(dev_priv, gmr_id);
+ return;
+ }
+
mutex_lock(&dev_priv->hw_mutex);
vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id);
wmb();
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
index ac6e0d1bd62..5f717152cff 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
@@ -40,6 +40,8 @@ struct vmwgfx_gmrid_man {
spinlock_t lock;
struct ida gmr_ida;
uint32_t max_gmr_ids;
+ uint32_t max_gmr_pages;
+ uint32_t used_gmr_pages;
};
static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man,
@@ -49,33 +51,50 @@ static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man,
{
struct vmwgfx_gmrid_man *gman =
(struct vmwgfx_gmrid_man *)man->priv;
- int ret;
+ int ret = 0;
int id;
mem->mm_node = NULL;
- do {
- if (unlikely(ida_pre_get(&gman->gmr_ida, GFP_KERNEL) == 0))
- return -ENOMEM;
+ spin_lock(&gman->lock);
+
+ if (gman->max_gmr_pages > 0) {
+ gman->used_gmr_pages += bo->num_pages;
+ if (unlikely(gman->used_gmr_pages > gman->max_gmr_pages))
+ goto out_err_locked;
+ }
+ do {
+ spin_unlock(&gman->lock);
+ if (unlikely(ida_pre_get(&gman->gmr_ida, GFP_KERNEL) == 0)) {
+ ret = -ENOMEM;
+ goto out_err;
+ }
spin_lock(&gman->lock);
- ret = ida_get_new(&gman->gmr_ida, &id);
+ ret = ida_get_new(&gman->gmr_ida, &id);
if (unlikely(ret == 0 && id >= gman->max_gmr_ids)) {
ida_remove(&gman->gmr_ida, id);
- spin_unlock(&gman->lock);
- return 0;
+ ret = 0;
+ goto out_err_locked;
}
-
- spin_unlock(&gman->lock);
-
} while (ret == -EAGAIN);
if (likely(ret == 0)) {
mem->mm_node = gman;
mem->start = id;
- }
+ mem->num_pages = bo->num_pages;
+ } else
+ goto out_err_locked;
+
+ spin_unlock(&gman->lock);
+ return 0;
+out_err:
+ spin_lock(&gman->lock);
+out_err_locked:
+ gman->used_gmr_pages -= bo->num_pages;
+ spin_unlock(&gman->lock);
return ret;
}
@@ -88,6 +107,7 @@ static void vmw_gmrid_man_put_node(struct ttm_mem_type_manager *man,
if (mem->mm_node) {
spin_lock(&gman->lock);
ida_remove(&gman->gmr_ida, mem->start);
+ gman->used_gmr_pages -= mem->num_pages;
spin_unlock(&gman->lock);
mem->mm_node = NULL;
}
@@ -96,6 +116,8 @@ static void vmw_gmrid_man_put_node(struct ttm_mem_type_manager *man,
static int vmw_gmrid_man_init(struct ttm_mem_type_manager *man,
unsigned long p_size)
{
+ struct vmw_private *dev_priv =
+ container_of(man->bdev, struct vmw_private, bdev);
struct vmwgfx_gmrid_man *gman =
kzalloc(sizeof(*gman), GFP_KERNEL);
@@ -103,6 +125,8 @@ static int vmw_gmrid_man_init(struct ttm_mem_type_manager *man,
return -ENOMEM;
spin_lock_init(&gman->lock);
+ gman->max_gmr_pages = dev_priv->max_gmr_pages;
+ gman->used_gmr_pages = 0;
ida_init(&gman->gmr_ida);
gman->max_gmr_ids = p_size;
man->priv = (void *) gman;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index 570d57775a5..3f6343502d1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -27,6 +27,7 @@
#include "vmwgfx_drv.h"
#include "vmwgfx_drm.h"
+#include "vmwgfx_kms.h"
int vmw_getparam_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
@@ -45,9 +46,6 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
case DRM_VMW_PARAM_3D:
param->value = vmw_fifo_have_3d(dev_priv) ? 1 : 0;
break;
- case DRM_VMW_PARAM_FIFO_OFFSET:
- param->value = dev_priv->mmio_start;
- break;
case DRM_VMW_PARAM_HW_CAPS:
param->value = dev_priv->capabilities;
break;
@@ -57,6 +55,13 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
case DRM_VMW_PARAM_MAX_FB_SIZE:
param->value = dev_priv->vram_size;
break;
+ case DRM_VMW_PARAM_FIFO_HW_VERSION:
+ {
+ __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+
+ param->value = ioread32(fifo_mem + SVGA_FIFO_3D_HWVERSION);
+ break;
+ }
default:
DRM_ERROR("Illegal vmwgfx get param request: %d\n",
param->param);
@@ -66,25 +71,259 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
return 0;
}
-int vmw_fifo_debug_ioctl(struct drm_device *dev, void *data,
+
+int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
+ struct drm_vmw_get_3d_cap_arg *arg =
+ (struct drm_vmw_get_3d_cap_arg *) data;
struct vmw_private *dev_priv = vmw_priv(dev);
- struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
- struct drm_vmw_fifo_debug_arg *arg =
- (struct drm_vmw_fifo_debug_arg *)data;
- __le32 __user *buffer = (__le32 __user *)
- (unsigned long)arg->debug_buffer;
+ uint32_t size;
+ __le32 __iomem *fifo_mem;
+ void __user *buffer = (void __user *)((unsigned long)(arg->buffer));
+ void *bounce;
+ int ret;
- if (unlikely(fifo_state->last_buffer == NULL))
+ if (unlikely(arg->pad64 != 0)) {
+ DRM_ERROR("Illegal GET_3D_CAP argument.\n");
return -EINVAL;
+ }
+
+ size = (SVGA_FIFO_3D_CAPS_LAST - SVGA_FIFO_3D_CAPS + 1) << 2;
+
+ if (arg->max_size < size)
+ size = arg->max_size;
+
+ bounce = vmalloc(size);
+ if (unlikely(bounce == NULL)) {
+ DRM_ERROR("Failed to allocate bounce buffer for 3D caps.\n");
+ return -ENOMEM;
+ }
+
+ fifo_mem = dev_priv->mmio_virt;
+ memcpy_fromio(bounce, &fifo_mem[SVGA_FIFO_3D_CAPS], size);
+
+ ret = copy_to_user(buffer, bounce, size);
+ vfree(bounce);
+
+ if (unlikely(ret != 0))
+ DRM_ERROR("Failed to report 3D caps info.\n");
+
+ return ret;
+}
+
+int vmw_present_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct drm_vmw_present_arg *arg =
+ (struct drm_vmw_present_arg *)data;
+ struct vmw_surface *surface;
+ struct vmw_master *vmaster = vmw_master(file_priv->master);
+ struct drm_vmw_rect __user *clips_ptr;
+ struct drm_vmw_rect *clips = NULL;
+ struct drm_mode_object *obj;
+ struct vmw_framebuffer *vfb;
+ uint32_t num_clips;
+ int ret;
+
+ num_clips = arg->num_clips;
+ clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
+
+ if (unlikely(num_clips == 0))
+ return 0;
+
+ if (clips_ptr == NULL) {
+ DRM_ERROR("Variable clips_ptr must be specified.\n");
+ ret = -EINVAL;
+ goto out_clips;
+ }
+
+ clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL);
+ if (clips == NULL) {
+ DRM_ERROR("Failed to allocate clip rect list.\n");
+ ret = -ENOMEM;
+ goto out_clips;
+ }
+
+ ret = copy_from_user(clips, clips_ptr, num_clips * sizeof(*clips));
+ if (ret) {
+ DRM_ERROR("Failed to copy clip rects from userspace.\n");
+ ret = -EFAULT;
+ goto out_no_copy;
+ }
+
+ ret = mutex_lock_interruptible(&dev->mode_config.mutex);
+ if (unlikely(ret != 0)) {
+ ret = -ERESTARTSYS;
+ goto out_no_mode_mutex;
+ }
+
+ obj = drm_mode_object_find(dev, arg->fb_id, DRM_MODE_OBJECT_FB);
+ if (!obj) {
+ DRM_ERROR("Invalid framebuffer id.\n");
+ ret = -EINVAL;
+ goto out_no_fb;
+ }
+
+ vfb = vmw_framebuffer_to_vfb(obj_to_fb(obj));
+ if (!vfb->dmabuf) {
+ DRM_ERROR("Framebuffer not dmabuf backed.\n");
+ ret = -EINVAL;
+ goto out_no_fb;
+ }
+
+ ret = ttm_read_lock(&vmaster->lock, true);
+ if (unlikely(ret != 0))
+ goto out_no_ttm_lock;
+
+ ret = vmw_user_surface_lookup_handle(dev_priv, tfile, arg->sid,
+ &surface);
+ if (ret)
+ goto out_no_surface;
+
+ ret = vmw_kms_present(dev_priv, file_priv,
+ vfb, surface, arg->sid,
+ arg->dest_x, arg->dest_y,
+ clips, num_clips);
- if (arg->debug_buffer_size < fifo_state->last_data_size) {
- arg->used_size = arg->debug_buffer_size;
- arg->did_not_fit = 1;
- } else {
- arg->used_size = fifo_state->last_data_size;
- arg->did_not_fit = 0;
+ /* vmw_user_surface_lookup takes one ref so does new_fb */
+ vmw_surface_unreference(&surface);
+
+out_no_surface:
+ ttm_read_unlock(&vmaster->lock);
+out_no_ttm_lock:
+out_no_fb:
+ mutex_unlock(&dev->mode_config.mutex);
+out_no_mode_mutex:
+out_no_copy:
+ kfree(clips);
+out_clips:
+ return ret;
+}
+
+int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct drm_vmw_present_readback_arg *arg =
+ (struct drm_vmw_present_readback_arg *)data;
+ struct drm_vmw_fence_rep __user *user_fence_rep =
+ (struct drm_vmw_fence_rep __user *)
+ (unsigned long)arg->fence_rep;
+ struct vmw_master *vmaster = vmw_master(file_priv->master);
+ struct drm_vmw_rect __user *clips_ptr;
+ struct drm_vmw_rect *clips = NULL;
+ struct drm_mode_object *obj;
+ struct vmw_framebuffer *vfb;
+ uint32_t num_clips;
+ int ret;
+
+ num_clips = arg->num_clips;
+ clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
+
+ if (unlikely(num_clips == 0))
+ return 0;
+
+ if (clips_ptr == NULL) {
+ DRM_ERROR("Argument clips_ptr must be specified.\n");
+ ret = -EINVAL;
+ goto out_clips;
+ }
+
+ clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL);
+ if (clips == NULL) {
+ DRM_ERROR("Failed to allocate clip rect list.\n");
+ ret = -ENOMEM;
+ goto out_clips;
+ }
+
+ ret = copy_from_user(clips, clips_ptr, num_clips * sizeof(*clips));
+ if (ret) {
+ DRM_ERROR("Failed to copy clip rects from userspace.\n");
+ ret = -EFAULT;
+ goto out_no_copy;
+ }
+
+ ret = mutex_lock_interruptible(&dev->mode_config.mutex);
+ if (unlikely(ret != 0)) {
+ ret = -ERESTARTSYS;
+ goto out_no_mode_mutex;
+ }
+
+ obj = drm_mode_object_find(dev, arg->fb_id, DRM_MODE_OBJECT_FB);
+ if (!obj) {
+ DRM_ERROR("Invalid framebuffer id.\n");
+ ret = -EINVAL;
+ goto out_no_fb;
+ }
+
+ vfb = vmw_framebuffer_to_vfb(obj_to_fb(obj));
+ if (!vfb->dmabuf) {
+ DRM_ERROR("Framebuffer not dmabuf backed.\n");
+ ret = -EINVAL;
+ goto out_no_fb;
}
- return copy_to_user(buffer, fifo_state->last_buffer, arg->used_size);
+
+ ret = ttm_read_lock(&vmaster->lock, true);
+ if (unlikely(ret != 0))
+ goto out_no_ttm_lock;
+
+ ret = vmw_kms_readback(dev_priv, file_priv,
+ vfb, user_fence_rep,
+ clips, num_clips);
+
+ ttm_read_unlock(&vmaster->lock);
+out_no_ttm_lock:
+out_no_fb:
+ mutex_unlock(&dev->mode_config.mutex);
+out_no_mode_mutex:
+out_no_copy:
+ kfree(clips);
+out_clips:
+ return ret;
+}
+
+
+/**
+ * vmw_fops_poll - wrapper around the drm_poll function
+ *
+ * @filp: See the linux fops poll documentation.
+ * @wait: See the linux fops poll documentation.
+ *
+ * Wrapper around the drm_poll function that makes sure the device is
+ * processing the fifo if drm_poll decides to wait.
+ */
+unsigned int vmw_fops_poll(struct file *filp, struct poll_table_struct *wait)
+{
+ struct drm_file *file_priv = filp->private_data;
+ struct vmw_private *dev_priv =
+ vmw_priv(file_priv->minor->dev);
+
+ vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
+ return drm_poll(filp, wait);
+}
+
+
+/**
+ * vmw_fops_read - wrapper around the drm_read function
+ *
+ * @filp: See the linux fops read documentation.
+ * @buffer: See the linux fops read documentation.
+ * @count: See the linux fops read documentation.
+ * offset: See the linux fops read documentation.
+ *
+ * Wrapper around the drm_read function that makes sure the device is
+ * processing the fifo if drm_read decides to wait.
+ */
+ssize_t vmw_fops_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *offset)
+{
+ struct drm_file *file_priv = filp->private_data;
+ struct vmw_private *dev_priv =
+ vmw_priv(file_priv->minor->dev);
+
+ vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
+ return drm_read(filp, buffer, count, offset);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
index e92298a6a38..cabc95f7517 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
@@ -34,26 +34,33 @@ irqreturn_t vmw_irq_handler(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *)arg;
struct vmw_private *dev_priv = vmw_priv(dev);
- uint32_t status;
+ uint32_t status, masked_status;
spin_lock(&dev_priv->irq_lock);
status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
+ masked_status = status & dev_priv->irq_mask;
spin_unlock(&dev_priv->irq_lock);
- if (status & SVGA_IRQFLAG_ANY_FENCE)
+ if (likely(status))
+ outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
+
+ if (!masked_status)
+ return IRQ_NONE;
+
+ if (masked_status & (SVGA_IRQFLAG_ANY_FENCE |
+ SVGA_IRQFLAG_FENCE_GOAL)) {
+ vmw_fences_update(dev_priv->fman);
wake_up_all(&dev_priv->fence_queue);
- if (status & SVGA_IRQFLAG_FIFO_PROGRESS)
+ }
+
+ if (masked_status & SVGA_IRQFLAG_FIFO_PROGRESS)
wake_up_all(&dev_priv->fifo_queue);
- if (likely(status)) {
- outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
- return IRQ_HANDLED;
- }
- return IRQ_NONE;
+ return IRQ_HANDLED;
}
-static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t sequence)
+static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno)
{
uint32_t busy;
@@ -64,43 +71,43 @@ static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t sequence)
return (busy == 0);
}
-void vmw_update_sequence(struct vmw_private *dev_priv,
+void vmw_update_seqno(struct vmw_private *dev_priv,
struct vmw_fifo_state *fifo_state)
{
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+ uint32_t seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
- uint32_t sequence = ioread32(fifo_mem + SVGA_FIFO_FENCE);
-
- if (dev_priv->last_read_sequence != sequence) {
- dev_priv->last_read_sequence = sequence;
- vmw_fence_pull(&fifo_state->fence_queue, sequence);
+ if (dev_priv->last_read_seqno != seqno) {
+ dev_priv->last_read_seqno = seqno;
+ vmw_marker_pull(&fifo_state->marker_queue, seqno);
+ vmw_fences_update(dev_priv->fman);
}
}
-bool vmw_fence_signaled(struct vmw_private *dev_priv,
- uint32_t sequence)
+bool vmw_seqno_passed(struct vmw_private *dev_priv,
+ uint32_t seqno)
{
struct vmw_fifo_state *fifo_state;
bool ret;
- if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP))
+ if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
return true;
fifo_state = &dev_priv->fifo;
- vmw_update_sequence(dev_priv, fifo_state);
- if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP))
+ vmw_update_seqno(dev_priv, fifo_state);
+ if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
return true;
if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE) &&
- vmw_fifo_idle(dev_priv, sequence))
+ vmw_fifo_idle(dev_priv, seqno))
return true;
/**
- * Then check if the sequence is higher than what we've actually
+ * Then check if the seqno is higher than what we've actually
* emitted. Then the fence is stale and signaled.
*/
- ret = ((atomic_read(&dev_priv->fence_seq) - sequence)
+ ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
> VMW_FENCE_WRAP);
return ret;
@@ -109,7 +116,7 @@ bool vmw_fence_signaled(struct vmw_private *dev_priv,
int vmw_fallback_wait(struct vmw_private *dev_priv,
bool lazy,
bool fifo_idle,
- uint32_t sequence,
+ uint32_t seqno,
bool interruptible,
unsigned long timeout)
{
@@ -123,7 +130,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
DEFINE_WAIT(__wait);
wait_condition = (fifo_idle) ? &vmw_fifo_idle :
- &vmw_fence_signaled;
+ &vmw_seqno_passed;
/**
* Block command submission while waiting for idle.
@@ -131,14 +138,14 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
if (fifo_idle)
down_read(&fifo_state->rwsem);
- signal_seq = atomic_read(&dev_priv->fence_seq);
+ signal_seq = atomic_read(&dev_priv->marker_seq);
ret = 0;
for (;;) {
prepare_to_wait(&dev_priv->fence_queue, &__wait,
(interruptible) ?
TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
- if (wait_condition(dev_priv, sequence))
+ if (wait_condition(dev_priv, seqno))
break;
if (time_after_eq(jiffies, end_jiffies)) {
DRM_ERROR("SVGA device lockup.\n");
@@ -175,68 +182,110 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
return ret;
}
-int vmw_wait_fence(struct vmw_private *dev_priv,
- bool lazy, uint32_t sequence,
- bool interruptible, unsigned long timeout)
+void vmw_seqno_waiter_add(struct vmw_private *dev_priv)
+{
+ mutex_lock(&dev_priv->hw_mutex);
+ if (dev_priv->fence_queue_waiters++ == 0) {
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
+ outl(SVGA_IRQFLAG_ANY_FENCE,
+ dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
+ dev_priv->irq_mask |= SVGA_IRQFLAG_ANY_FENCE;
+ vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
+ }
+ mutex_unlock(&dev_priv->hw_mutex);
+}
+
+void vmw_seqno_waiter_remove(struct vmw_private *dev_priv)
+{
+ mutex_lock(&dev_priv->hw_mutex);
+ if (--dev_priv->fence_queue_waiters == 0) {
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
+ dev_priv->irq_mask &= ~SVGA_IRQFLAG_ANY_FENCE;
+ vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
+ }
+ mutex_unlock(&dev_priv->hw_mutex);
+}
+
+
+void vmw_goal_waiter_add(struct vmw_private *dev_priv)
+{
+ mutex_lock(&dev_priv->hw_mutex);
+ if (dev_priv->goal_queue_waiters++ == 0) {
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
+ outl(SVGA_IRQFLAG_FENCE_GOAL,
+ dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
+ dev_priv->irq_mask |= SVGA_IRQFLAG_FENCE_GOAL;
+ vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
+ }
+ mutex_unlock(&dev_priv->hw_mutex);
+}
+
+void vmw_goal_waiter_remove(struct vmw_private *dev_priv)
+{
+ mutex_lock(&dev_priv->hw_mutex);
+ if (--dev_priv->goal_queue_waiters == 0) {
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
+ dev_priv->irq_mask &= ~SVGA_IRQFLAG_FENCE_GOAL;
+ vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
+ }
+ mutex_unlock(&dev_priv->hw_mutex);
+}
+
+int vmw_wait_seqno(struct vmw_private *dev_priv,
+ bool lazy, uint32_t seqno,
+ bool interruptible, unsigned long timeout)
{
long ret;
- unsigned long irq_flags;
struct vmw_fifo_state *fifo = &dev_priv->fifo;
- if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP))
+ if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
return 0;
- if (likely(vmw_fence_signaled(dev_priv, sequence)))
+ if (likely(vmw_seqno_passed(dev_priv, seqno)))
return 0;
vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
if (!(fifo->capabilities & SVGA_FIFO_CAP_FENCE))
- return vmw_fallback_wait(dev_priv, lazy, true, sequence,
+ return vmw_fallback_wait(dev_priv, lazy, true, seqno,
interruptible, timeout);
if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
- return vmw_fallback_wait(dev_priv, lazy, false, sequence,
+ return vmw_fallback_wait(dev_priv, lazy, false, seqno,
interruptible, timeout);
- mutex_lock(&dev_priv->hw_mutex);
- if (atomic_add_return(1, &dev_priv->fence_queue_waiters) > 0) {
- spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
- outl(SVGA_IRQFLAG_ANY_FENCE,
- dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
- vmw_write(dev_priv, SVGA_REG_IRQMASK,
- vmw_read(dev_priv, SVGA_REG_IRQMASK) |
- SVGA_IRQFLAG_ANY_FENCE);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
- }
- mutex_unlock(&dev_priv->hw_mutex);
+ vmw_seqno_waiter_add(dev_priv);
if (interruptible)
ret = wait_event_interruptible_timeout
(dev_priv->fence_queue,
- vmw_fence_signaled(dev_priv, sequence),
+ vmw_seqno_passed(dev_priv, seqno),
timeout);
else
ret = wait_event_timeout
(dev_priv->fence_queue,
- vmw_fence_signaled(dev_priv, sequence),
+ vmw_seqno_passed(dev_priv, seqno),
timeout);
+ vmw_seqno_waiter_remove(dev_priv);
+
if (unlikely(ret == 0))
ret = -EBUSY;
else if (likely(ret > 0))
ret = 0;
- mutex_lock(&dev_priv->hw_mutex);
- if (atomic_dec_and_test(&dev_priv->fence_queue_waiters)) {
- spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
- vmw_write(dev_priv, SVGA_REG_IRQMASK,
- vmw_read(dev_priv, SVGA_REG_IRQMASK) &
- ~SVGA_IRQFLAG_ANY_FENCE);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
- }
- mutex_unlock(&dev_priv->hw_mutex);
-
return ret;
}
@@ -273,25 +322,3 @@ void vmw_irq_uninstall(struct drm_device *dev)
status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
}
-
-#define VMW_FENCE_WAIT_TIMEOUT 3*HZ;
-
-int vmw_fence_wait_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_vmw_fence_wait_arg *arg =
- (struct drm_vmw_fence_wait_arg *)data;
- unsigned long timeout;
-
- if (!arg->cookie_valid) {
- arg->cookie_valid = 1;
- arg->kernel_cookie = jiffies + VMW_FENCE_WAIT_TIMEOUT;
- }
-
- timeout = jiffies;
- if (time_after_eq(timeout, (unsigned long)arg->kernel_cookie))
- return -EBUSY;
-
- timeout = (unsigned long)arg->kernel_cookie - timeout;
- return vmw_wait_fence(vmw_priv(dev), true, arg->sequence, true, timeout);
-}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index dfe32e62bd9..880e285d757 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -27,12 +27,10 @@
#include "vmwgfx_kms.h"
+
/* Might need a hrtimer here? */
#define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
-static int vmw_surface_dmabuf_pin(struct vmw_framebuffer *vfb);
-static int vmw_surface_dmabuf_unpin(struct vmw_framebuffer *vfb);
-
void vmw_display_unit_cleanup(struct vmw_display_unit *du)
{
if (du->cursor_surface)
@@ -107,12 +105,17 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
struct vmw_dma_buffer *dmabuf = NULL;
int ret;
+ /* A lot of the code assumes this */
+ if (handle && (width != 64 || height != 64))
+ return -EINVAL;
+
if (handle) {
ret = vmw_user_surface_lookup_handle(dev_priv, tfile,
handle, &surface);
if (!ret) {
if (!surface->snooper.image) {
DRM_ERROR("surface not suitable for cursor\n");
+ vmw_surface_unreference(&surface);
return -EINVAL;
}
} else {
@@ -178,7 +181,9 @@ err_unreserve:
return 0;
}
- vmw_cursor_update_position(dev_priv, true, du->cursor_x, du->cursor_y);
+ vmw_cursor_update_position(dev_priv, true,
+ du->cursor_x + du->hotspot_x,
+ du->cursor_y + du->hotspot_y);
return 0;
}
@@ -193,7 +198,8 @@ int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
du->cursor_y = y + crtc->y;
vmw_cursor_update_position(dev_priv, shown,
- du->cursor_x, du->cursor_y);
+ du->cursor_x + du->hotspot_x,
+ du->cursor_y + du->hotspot_y);
return 0;
}
@@ -214,7 +220,7 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf,
SVGA3dCmdHeader header;
SVGA3dCmdSurfaceDMA dma;
} *cmd;
- int ret;
+ int i, ret;
cmd = container_of(header, struct vmw_dma_cmd, header);
@@ -236,16 +242,19 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf,
box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) /
sizeof(SVGA3dCopyBox);
- if (cmd->dma.guest.pitch != (64 * 4) ||
- cmd->dma.guest.ptr.offset % PAGE_SIZE ||
+ if (cmd->dma.guest.ptr.offset % PAGE_SIZE ||
box->x != 0 || box->y != 0 || box->z != 0 ||
box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
- box->w != 64 || box->h != 64 || box->d != 1 ||
- box_count != 1) {
+ box->d != 1 || box_count != 1) {
/* TODO handle none page aligned offsets */
- /* TODO handle partial uploads and pitch != 256 */
- /* TODO handle more then one copy (size != 64) */
- DRM_ERROR("lazy programmer, can't handle weird stuff\n");
+ /* TODO handle more dst & src != 0 */
+ /* TODO handle more then one copy */
+ DRM_ERROR("Cant snoop dma request for cursor!\n");
+ DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n",
+ box->srcx, box->srcy, box->srcz,
+ box->x, box->y, box->z,
+ box->w, box->h, box->d, box_count,
+ cmd->dma.guest.ptr.offset);
return;
}
@@ -264,7 +273,16 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf,
virtual = ttm_kmap_obj_virtual(&map, &dummy);
- memcpy(srf->snooper.image, virtual, 64*64*4);
+ if (box->w == 64 && cmd->dma.guest.pitch == 64*4) {
+ memcpy(srf->snooper.image, virtual, 64*64*4);
+ } else {
+ /* Image is unsigned pointer. */
+ for (i = 0; i < box->h; i++)
+ memcpy(srf->snooper.image + i * 64,
+ virtual + i * cmd->dma.guest.pitch,
+ box->w * 4);
+ }
+
srf->snooper.age++;
/* we can't call this function from this function since execbuf has
@@ -329,41 +347,10 @@ struct vmw_framebuffer_surface {
struct vmw_framebuffer base;
struct vmw_surface *surface;
struct vmw_dma_buffer *buffer;
- struct delayed_work d_work;
- struct mutex work_lock;
- bool present_fs;
struct list_head head;
struct drm_master *master;
};
-/**
- * vmw_kms_idle_workqueues - Flush workqueues on this master
- *
- * @vmaster - Pointer identifying the master, for the surfaces of which
- * we idle the dirty work queues.
- *
- * This function should be called with the ttm lock held in exclusive mode
- * to idle all dirty work queues before the fifo is taken down.
- *
- * The work task may actually requeue itself, but after the flush returns we're
- * sure that there's nothing to present, since the ttm lock is held in
- * exclusive mode, so the fifo will never get used.
- */
-
-void vmw_kms_idle_workqueues(struct vmw_master *vmaster)
-{
- struct vmw_framebuffer_surface *entry;
-
- mutex_lock(&vmaster->fb_surf_mutex);
- list_for_each_entry(entry, &vmaster->fb_surf, head) {
- if (cancel_delayed_work_sync(&entry->d_work))
- (void) entry->d_work.work.func(&entry->d_work.work);
-
- (void) cancel_delayed_work_sync(&entry->d_work);
- }
- mutex_unlock(&vmaster->fb_surf_mutex);
-}
-
void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
{
struct vmw_framebuffer_surface *vfbs =
@@ -375,65 +362,129 @@ void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
list_del(&vfbs->head);
mutex_unlock(&vmaster->fb_surf_mutex);
- cancel_delayed_work_sync(&vfbs->d_work);
drm_master_put(&vfbs->master);
drm_framebuffer_cleanup(framebuffer);
vmw_surface_unreference(&vfbs->surface);
+ ttm_base_object_unref(&vfbs->base.user_obj);
kfree(vfbs);
}
-static void vmw_framebuffer_present_fs_callback(struct work_struct *work)
+static int do_surface_dirty_sou(struct vmw_private *dev_priv,
+ struct drm_file *file_priv,
+ struct vmw_framebuffer *framebuffer,
+ unsigned flags, unsigned color,
+ struct drm_clip_rect *clips,
+ unsigned num_clips, int inc)
{
- struct delayed_work *d_work =
- container_of(work, struct delayed_work, work);
- struct vmw_framebuffer_surface *vfbs =
- container_of(d_work, struct vmw_framebuffer_surface, d_work);
- struct vmw_surface *surf = vfbs->surface;
- struct drm_framebuffer *framebuffer = &vfbs->base.base;
- struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
+ struct drm_clip_rect *clips_ptr;
+ struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
+ struct drm_crtc *crtc;
+ size_t fifo_size;
+ int i, num_units;
+ int ret = 0; /* silence warning */
+ int left, right, top, bottom;
struct {
SVGA3dCmdHeader header;
- SVGA3dCmdPresent body;
- SVGA3dCopyRect cr;
+ SVGA3dCmdBlitSurfaceToScreen body;
} *cmd;
+ SVGASignedRect *blits;
- /**
- * Strictly we should take the ttm_lock in read mode before accessing
- * the fifo, to make sure the fifo is present and up. However,
- * instead we flush all workqueues under the ttm lock in exclusive mode
- * before taking down the fifo.
- */
- mutex_lock(&vfbs->work_lock);
- if (!vfbs->present_fs)
- goto out_unlock;
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
- if (unlikely(cmd == NULL))
- goto out_resched;
-
- cmd->header.id = cpu_to_le32(SVGA_3D_CMD_PRESENT);
- cmd->header.size = cpu_to_le32(sizeof(cmd->body) + sizeof(cmd->cr));
- cmd->body.sid = cpu_to_le32(surf->res.id);
- cmd->cr.x = cpu_to_le32(0);
- cmd->cr.y = cpu_to_le32(0);
- cmd->cr.srcx = cmd->cr.x;
- cmd->cr.srcy = cmd->cr.y;
- cmd->cr.w = cpu_to_le32(framebuffer->width);
- cmd->cr.h = cpu_to_le32(framebuffer->height);
- vfbs->present_fs = false;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
-out_resched:
- /**
- * Will not re-add if already pending.
- */
- schedule_delayed_work(&vfbs->d_work, VMWGFX_PRESENT_RATE);
-out_unlock:
- mutex_unlock(&vfbs->work_lock);
-}
+ num_units = 0;
+ list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list,
+ head) {
+ if (crtc->fb != &framebuffer->base)
+ continue;
+ units[num_units++] = vmw_crtc_to_du(crtc);
+ }
+
+ BUG_ON(!clips || !num_clips);
+
+ fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num_clips;
+ cmd = kzalloc(fifo_size, GFP_KERNEL);
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Temporary fifo memory alloc failed.\n");
+ return -ENOMEM;
+ }
+
+ left = clips->x1;
+ right = clips->x2;
+ top = clips->y1;
+ bottom = clips->y2;
+
+ /* skip the first clip rect */
+ for (i = 1, clips_ptr = clips + inc;
+ i < num_clips; i++, clips_ptr += inc) {
+ left = min_t(int, left, (int)clips_ptr->x1);
+ right = max_t(int, right, (int)clips_ptr->x2);
+ top = min_t(int, top, (int)clips_ptr->y1);
+ bottom = max_t(int, bottom, (int)clips_ptr->y2);
+ }
+
+ /* only need to do this once */
+ memset(cmd, 0, fifo_size);
+ cmd->header.id = cpu_to_le32(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN);
+ cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
+
+ cmd->body.srcRect.left = left;
+ cmd->body.srcRect.right = right;
+ cmd->body.srcRect.top = top;
+ cmd->body.srcRect.bottom = bottom;
+
+ clips_ptr = clips;
+ blits = (SVGASignedRect *)&cmd[1];
+ for (i = 0; i < num_clips; i++, clips_ptr += inc) {
+ blits[i].left = clips_ptr->x1 - left;
+ blits[i].right = clips_ptr->x2 - left;
+ blits[i].top = clips_ptr->y1 - top;
+ blits[i].bottom = clips_ptr->y2 - top;
+ }
+
+ /* do per unit writing, reuse fifo for each */
+ for (i = 0; i < num_units; i++) {
+ struct vmw_display_unit *unit = units[i];
+ int clip_x1 = left - unit->crtc.x;
+ int clip_y1 = top - unit->crtc.y;
+ int clip_x2 = right - unit->crtc.x;
+ int clip_y2 = bottom - unit->crtc.y;
+
+ /* skip any crtcs that misses the clip region */
+ if (clip_x1 >= unit->crtc.mode.hdisplay ||
+ clip_y1 >= unit->crtc.mode.vdisplay ||
+ clip_x2 <= 0 || clip_y2 <= 0)
+ continue;
+
+ /* need to reset sid as it is changed by execbuf */
+ cmd->body.srcImage.sid = cpu_to_le32(framebuffer->user_handle);
+
+ cmd->body.destScreenId = unit->unit;
+
+ /*
+ * The blit command is a lot more resilient then the
+ * readback command when it comes to clip rects. So its
+ * okay to go out of bounds.
+ */
+
+ cmd->body.destRect.left = clip_x1;
+ cmd->body.destRect.right = clip_x2;
+ cmd->body.destRect.top = clip_y1;
+ cmd->body.destRect.bottom = clip_y2;
+ ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
+ fifo_size, 0, NULL);
+
+ if (unlikely(ret != 0))
+ break;
+ }
+
+ kfree(cmd);
+
+ return ret;
+}
+
int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
struct drm_file *file_priv,
unsigned flags, unsigned color,
@@ -444,44 +495,20 @@ int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
struct vmw_master *vmaster = vmw_master(file_priv->master);
struct vmw_framebuffer_surface *vfbs =
vmw_framebuffer_to_vfbs(framebuffer);
- struct vmw_surface *surf = vfbs->surface;
struct drm_clip_rect norect;
- SVGA3dCopyRect *cr;
- int i, inc = 1;
- int ret;
-
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdPresent body;
- SVGA3dCopyRect cr;
- } *cmd;
+ int ret, inc = 1;
if (unlikely(vfbs->master != file_priv->master))
return -EINVAL;
+ /* Require ScreenObject support for 3D */
+ if (!dev_priv->sou_priv)
+ return -EINVAL;
+
ret = ttm_read_lock(&vmaster->lock, true);
if (unlikely(ret != 0))
return ret;
- if (!num_clips ||
- !(dev_priv->fifo.capabilities &
- SVGA_FIFO_CAP_SCREEN_OBJECT)) {
- int ret;
-
- mutex_lock(&vfbs->work_lock);
- vfbs->present_fs = true;
- ret = schedule_delayed_work(&vfbs->d_work, VMWGFX_PRESENT_RATE);
- mutex_unlock(&vfbs->work_lock);
- if (ret) {
- /**
- * No work pending, Force immediate present.
- */
- vmw_framebuffer_present_fs_callback(&vfbs->d_work.work);
- }
- ttm_read_unlock(&vmaster->lock);
- return 0;
- }
-
if (!num_clips) {
num_clips = 1;
clips = &norect;
@@ -493,29 +520,10 @@ int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
inc = 2; /* skip source rects */
}
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) + (num_clips - 1) * sizeof(cmd->cr));
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Fifo reserve failed.\n");
- ttm_read_unlock(&vmaster->lock);
- return -ENOMEM;
- }
+ ret = do_surface_dirty_sou(dev_priv, file_priv, &vfbs->base,
+ flags, color,
+ clips, num_clips, inc);
- memset(cmd, 0, sizeof(*cmd));
-
- cmd->header.id = cpu_to_le32(SVGA_3D_CMD_PRESENT);
- cmd->header.size = cpu_to_le32(sizeof(cmd->body) + num_clips * sizeof(cmd->cr));
- cmd->body.sid = cpu_to_le32(surf->res.id);
-
- for (i = 0, cr = &cmd->cr; i < num_clips; i++, cr++, clips += inc) {
- cr->x = cpu_to_le16(clips->x1);
- cr->y = cpu_to_le16(clips->y1);
- cr->srcx = cr->x;
- cr->srcy = cr->y;
- cr->w = cpu_to_le16(clips->x2 - clips->x1);
- cr->h = cpu_to_le16(clips->y2 - clips->y1);
- }
-
- vmw_fifo_commit(dev_priv, sizeof(*cmd) + (num_clips - 1) * sizeof(cmd->cr));
ttm_read_unlock(&vmaster->lock);
return 0;
}
@@ -540,6 +548,10 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
struct vmw_master *vmaster = vmw_master(file_priv->master);
int ret;
+ /* 3D is only supported on HWv8 hosts which supports screen objects */
+ if (!dev_priv->sou_priv)
+ return -ENOSYS;
+
/*
* Sanity checks.
*/
@@ -567,6 +579,9 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
case 15:
format = SVGA3D_A1R5G5B5;
break;
+ case 8:
+ format = SVGA3D_LUMINANCE8;
+ break;
default:
DRM_ERROR("Invalid color depth: %d\n", mode_cmd->depth);
return -EINVAL;
@@ -599,14 +614,11 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
vfbs->base.base.depth = mode_cmd->depth;
vfbs->base.base.width = mode_cmd->width;
vfbs->base.base.height = mode_cmd->height;
- vfbs->base.pin = &vmw_surface_dmabuf_pin;
- vfbs->base.unpin = &vmw_surface_dmabuf_unpin;
vfbs->surface = surface;
+ vfbs->base.user_handle = mode_cmd->handle;
vfbs->master = drm_master_get(file_priv->master);
- mutex_init(&vfbs->work_lock);
mutex_lock(&vmaster->fb_surf_mutex);
- INIT_DELAYED_WORK(&vfbs->d_work, &vmw_framebuffer_present_fs_callback);
list_add_tail(&vfbs->head, &vmaster->fb_surf);
mutex_unlock(&vmaster->fb_surf_mutex);
@@ -641,48 +653,33 @@ void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer)
drm_framebuffer_cleanup(framebuffer);
vmw_dmabuf_unreference(&vfbd->buffer);
+ ttm_base_object_unref(&vfbd->base.user_obj);
kfree(vfbd);
}
-int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
- struct drm_file *file_priv,
- unsigned flags, unsigned color,
- struct drm_clip_rect *clips,
- unsigned num_clips)
+static int do_dmabuf_dirty_ldu(struct vmw_private *dev_priv,
+ struct vmw_framebuffer *framebuffer,
+ unsigned flags, unsigned color,
+ struct drm_clip_rect *clips,
+ unsigned num_clips, int increment)
{
- struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
- struct vmw_master *vmaster = vmw_master(file_priv->master);
- struct drm_clip_rect norect;
- int ret;
+ size_t fifo_size;
+ int i;
+
struct {
uint32_t header;
SVGAFifoCmdUpdate body;
} *cmd;
- int i, increment = 1;
-
- ret = ttm_read_lock(&vmaster->lock, true);
- if (unlikely(ret != 0))
- return ret;
-
- if (!num_clips) {
- num_clips = 1;
- clips = &norect;
- norect.x1 = norect.y1 = 0;
- norect.x2 = framebuffer->width;
- norect.y2 = framebuffer->height;
- } else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
- num_clips /= 2;
- increment = 2;
- }
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) * num_clips);
+ fifo_size = sizeof(*cmd) * num_clips;
+ cmd = vmw_fifo_reserve(dev_priv, fifo_size);
if (unlikely(cmd == NULL)) {
DRM_ERROR("Fifo reserve failed.\n");
- ttm_read_unlock(&vmaster->lock);
return -ENOMEM;
}
+ memset(cmd, 0, fifo_size);
for (i = 0; i < num_clips; i++, clips += increment) {
cmd[i].header = cpu_to_le32(SVGA_CMD_UPDATE);
cmd[i].body.x = cpu_to_le32(clips->x1);
@@ -691,57 +688,186 @@ int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
cmd[i].body.height = cpu_to_le32(clips->y2 - clips->y1);
}
- vmw_fifo_commit(dev_priv, sizeof(*cmd) * num_clips);
- ttm_read_unlock(&vmaster->lock);
-
+ vmw_fifo_commit(dev_priv, fifo_size);
return 0;
}
-static struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = {
- .destroy = vmw_framebuffer_dmabuf_destroy,
- .dirty = vmw_framebuffer_dmabuf_dirty,
- .create_handle = vmw_framebuffer_create_handle,
-};
-
-static int vmw_surface_dmabuf_pin(struct vmw_framebuffer *vfb)
+static int do_dmabuf_define_gmrfb(struct drm_file *file_priv,
+ struct vmw_private *dev_priv,
+ struct vmw_framebuffer *framebuffer)
{
- struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
- struct vmw_framebuffer_surface *vfbs =
- vmw_framebuffer_to_vfbs(&vfb->base);
- unsigned long size = vfbs->base.base.pitch * vfbs->base.base.height;
+ int depth = framebuffer->base.depth;
+ size_t fifo_size;
int ret;
- vfbs->buffer = kzalloc(sizeof(*vfbs->buffer), GFP_KERNEL);
- if (unlikely(vfbs->buffer == NULL))
+ struct {
+ uint32_t header;
+ SVGAFifoCmdDefineGMRFB body;
+ } *cmd;
+
+ /* Emulate RGBA support, contrary to svga_reg.h this is not
+ * supported by hosts. This is only a problem if we are reading
+ * this value later and expecting what we uploaded back.
+ */
+ if (depth == 32)
+ depth = 24;
+
+ fifo_size = sizeof(*cmd);
+ cmd = kmalloc(fifo_size, GFP_KERNEL);
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed to allocate temporary cmd buffer.\n");
return -ENOMEM;
+ }
- vmw_overlay_pause_all(dev_priv);
- ret = vmw_dmabuf_init(dev_priv, vfbs->buffer, size,
- &vmw_vram_ne_placement,
- false, &vmw_dmabuf_bo_free);
- vmw_overlay_resume_all(dev_priv);
+ memset(cmd, 0, fifo_size);
+ cmd->header = SVGA_CMD_DEFINE_GMRFB;
+ cmd->body.format.bitsPerPixel = framebuffer->base.bits_per_pixel;
+ cmd->body.format.colorDepth = depth;
+ cmd->body.format.reserved = 0;
+ cmd->body.bytesPerLine = framebuffer->base.pitch;
+ cmd->body.ptr.gmrId = framebuffer->user_handle;
+ cmd->body.ptr.offset = 0;
+
+ ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
+ fifo_size, 0, NULL);
+
+ kfree(cmd);
+
+ return ret;
+}
+
+static int do_dmabuf_dirty_sou(struct drm_file *file_priv,
+ struct vmw_private *dev_priv,
+ struct vmw_framebuffer *framebuffer,
+ unsigned flags, unsigned color,
+ struct drm_clip_rect *clips,
+ unsigned num_clips, int increment)
+{
+ struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
+ struct drm_clip_rect *clips_ptr;
+ int i, k, num_units, ret;
+ struct drm_crtc *crtc;
+ size_t fifo_size;
+
+ struct {
+ uint32_t header;
+ SVGAFifoCmdBlitGMRFBToScreen body;
+ } *blits;
+
+ ret = do_dmabuf_define_gmrfb(file_priv, dev_priv, framebuffer);
if (unlikely(ret != 0))
- vfbs->buffer = NULL;
+ return ret; /* define_gmrfb prints warnings */
+
+ fifo_size = sizeof(*blits) * num_clips;
+ blits = kmalloc(fifo_size, GFP_KERNEL);
+ if (unlikely(blits == NULL)) {
+ DRM_ERROR("Failed to allocate temporary cmd buffer.\n");
+ return -ENOMEM;
+ }
+
+ num_units = 0;
+ list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) {
+ if (crtc->fb != &framebuffer->base)
+ continue;
+ units[num_units++] = vmw_crtc_to_du(crtc);
+ }
+
+ for (k = 0; k < num_units; k++) {
+ struct vmw_display_unit *unit = units[k];
+ int hit_num = 0;
+
+ clips_ptr = clips;
+ for (i = 0; i < num_clips; i++, clips_ptr += increment) {
+ int clip_x1 = clips_ptr->x1 - unit->crtc.x;
+ int clip_y1 = clips_ptr->y1 - unit->crtc.y;
+ int clip_x2 = clips_ptr->x2 - unit->crtc.x;
+ int clip_y2 = clips_ptr->y2 - unit->crtc.y;
+
+ /* skip any crtcs that misses the clip region */
+ if (clip_x1 >= unit->crtc.mode.hdisplay ||
+ clip_y1 >= unit->crtc.mode.vdisplay ||
+ clip_x2 <= 0 || clip_y2 <= 0)
+ continue;
+
+ blits[hit_num].header = SVGA_CMD_BLIT_GMRFB_TO_SCREEN;
+ blits[hit_num].body.destScreenId = unit->unit;
+ blits[hit_num].body.srcOrigin.x = clips_ptr->x1;
+ blits[hit_num].body.srcOrigin.y = clips_ptr->y1;
+ blits[hit_num].body.destRect.left = clip_x1;
+ blits[hit_num].body.destRect.top = clip_y1;
+ blits[hit_num].body.destRect.right = clip_x2;
+ blits[hit_num].body.destRect.bottom = clip_y2;
+ hit_num++;
+ }
+
+ /* no clips hit the crtc */
+ if (hit_num == 0)
+ continue;
+
+ fifo_size = sizeof(*blits) * hit_num;
+ ret = vmw_execbuf_process(file_priv, dev_priv, NULL, blits,
+ fifo_size, 0, NULL);
+
+ if (unlikely(ret != 0))
+ break;
+ }
+
+ kfree(blits);
return ret;
}
-static int vmw_surface_dmabuf_unpin(struct vmw_framebuffer *vfb)
+int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
+ struct drm_file *file_priv,
+ unsigned flags, unsigned color,
+ struct drm_clip_rect *clips,
+ unsigned num_clips)
{
- struct ttm_buffer_object *bo;
- struct vmw_framebuffer_surface *vfbs =
- vmw_framebuffer_to_vfbs(&vfb->base);
+ struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
+ struct vmw_master *vmaster = vmw_master(file_priv->master);
+ struct vmw_framebuffer_dmabuf *vfbd =
+ vmw_framebuffer_to_vfbd(framebuffer);
+ struct drm_clip_rect norect;
+ int ret, increment = 1;
- if (unlikely(vfbs->buffer == NULL))
- return 0;
+ ret = ttm_read_lock(&vmaster->lock, true);
+ if (unlikely(ret != 0))
+ return ret;
- bo = &vfbs->buffer->base;
- ttm_bo_unref(&bo);
- vfbs->buffer = NULL;
+ if (!num_clips) {
+ num_clips = 1;
+ clips = &norect;
+ norect.x1 = norect.y1 = 0;
+ norect.x2 = framebuffer->width;
+ norect.y2 = framebuffer->height;
+ } else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
+ num_clips /= 2;
+ increment = 2;
+ }
- return 0;
+ if (dev_priv->ldu_priv) {
+ ret = do_dmabuf_dirty_ldu(dev_priv, &vfbd->base,
+ flags, color,
+ clips, num_clips, increment);
+ } else {
+ ret = do_dmabuf_dirty_sou(file_priv, dev_priv, &vfbd->base,
+ flags, color,
+ clips, num_clips, increment);
+ }
+
+ ttm_read_unlock(&vmaster->lock);
+ return ret;
}
+static struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = {
+ .destroy = vmw_framebuffer_dmabuf_destroy,
+ .dirty = vmw_framebuffer_dmabuf_dirty,
+ .create_handle = vmw_framebuffer_create_handle,
+};
+
+/**
+ * Pin the dmabuffer to the start of vram.
+ */
static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb)
{
struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
@@ -749,10 +875,12 @@ static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb)
vmw_framebuffer_to_vfbd(&vfb->base);
int ret;
+ /* This code should not be used with screen objects */
+ BUG_ON(dev_priv->sou_priv);
vmw_overlay_pause_all(dev_priv);
- ret = vmw_dmabuf_to_start_of_vram(dev_priv, vfbd->buffer);
+ ret = vmw_dmabuf_to_start_of_vram(dev_priv, vfbd->buffer, true, false);
vmw_overlay_resume_all(dev_priv);
@@ -772,7 +900,7 @@ static int vmw_framebuffer_dmabuf_unpin(struct vmw_framebuffer *vfb)
return 0;
}
- return vmw_dmabuf_from_vram(dev_priv, vfbd->buffer);
+ return vmw_dmabuf_unpin(dev_priv, vfbd->buffer, false);
}
static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
@@ -794,6 +922,33 @@ static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
return -EINVAL;
}
+ /* Limited framebuffer color depth support for screen objects */
+ if (dev_priv->sou_priv) {
+ switch (mode_cmd->depth) {
+ case 32:
+ case 24:
+ /* Only support 32 bpp for 32 and 24 depth fbs */
+ if (mode_cmd->bpp == 32)
+ break;
+
+ DRM_ERROR("Invalid color depth/bbp: %d %d\n",
+ mode_cmd->depth, mode_cmd->bpp);
+ return -EINVAL;
+ case 16:
+ case 15:
+ /* Only support 16 bpp for 16 and 15 depth fbs */
+ if (mode_cmd->bpp == 16)
+ break;
+
+ DRM_ERROR("Invalid color depth/bbp: %d %d\n",
+ mode_cmd->depth, mode_cmd->bpp);
+ return -EINVAL;
+ default:
+ DRM_ERROR("Invalid color depth: %d\n", mode_cmd->depth);
+ return -EINVAL;
+ }
+ }
+
vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL);
if (!vfbd) {
ret = -ENOMEM;
@@ -815,9 +970,13 @@ static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
vfbd->base.base.depth = mode_cmd->depth;
vfbd->base.base.width = mode_cmd->width;
vfbd->base.base.height = mode_cmd->height;
- vfbd->base.pin = vmw_framebuffer_dmabuf_pin;
- vfbd->base.unpin = vmw_framebuffer_dmabuf_unpin;
+ if (!dev_priv->sou_priv) {
+ vfbd->base.pin = vmw_framebuffer_dmabuf_pin;
+ vfbd->base.unpin = vmw_framebuffer_dmabuf_unpin;
+ }
+ vfbd->base.dmabuf = true;
vfbd->buffer = dmabuf;
+ vfbd->base.user_handle = mode_cmd->handle;
*out = &vfbd->base;
return 0;
@@ -843,6 +1002,7 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
struct vmw_framebuffer *vfb = NULL;
struct vmw_surface *surface = NULL;
struct vmw_dma_buffer *bo = NULL;
+ struct ttm_base_object *user_obj;
u64 required_size;
int ret;
@@ -855,7 +1015,22 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
required_size = mode_cmd->pitch * mode_cmd->height;
if (unlikely(required_size > (u64) dev_priv->vram_size)) {
DRM_ERROR("VRAM size is too small for requested mode.\n");
- return NULL;
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /*
+ * Take a reference on the user object of the resource
+ * backing the kms fb. This ensures that user-space handle
+ * lookups on that resource will always work as long as
+ * it's registered with a kms framebuffer. This is important,
+ * since vmw_execbuf_process identifies resources in the
+ * command stream using user-space handles.
+ */
+
+ user_obj = ttm_base_object_lookup(tfile, mode_cmd->handle);
+ if (unlikely(user_obj == NULL)) {
+ DRM_ERROR("Could not locate requested kms frame buffer.\n");
+ return ERR_PTR(-ENOENT);
}
/**
@@ -878,8 +1053,10 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
if (ret) {
DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
+ ttm_base_object_unref(&user_obj);
return ERR_PTR(ret);
- }
+ } else
+ vfb->user_obj = user_obj;
return &vfb->base;
try_dmabuf:
@@ -899,8 +1076,10 @@ try_dmabuf:
if (ret) {
DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
+ ttm_base_object_unref(&user_obj);
return ERR_PTR(ret);
- }
+ } else
+ vfb->user_obj = user_obj;
return &vfb->base;
@@ -908,6 +1087,7 @@ err_not_scanout:
DRM_ERROR("surface not marked as scanout\n");
/* vmw_user_surface_lookup takes one ref */
vmw_surface_unreference(&surface);
+ ttm_base_object_unref(&user_obj);
return ERR_PTR(-EINVAL);
}
@@ -916,6 +1096,210 @@ static struct drm_mode_config_funcs vmw_kms_funcs = {
.fb_create = vmw_kms_fb_create,
};
+int vmw_kms_present(struct vmw_private *dev_priv,
+ struct drm_file *file_priv,
+ struct vmw_framebuffer *vfb,
+ struct vmw_surface *surface,
+ uint32_t sid,
+ int32_t destX, int32_t destY,
+ struct drm_vmw_rect *clips,
+ uint32_t num_clips)
+{
+ struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
+ struct drm_crtc *crtc;
+ size_t fifo_size;
+ int i, k, num_units;
+ int ret = 0; /* silence warning */
+
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdBlitSurfaceToScreen body;
+ } *cmd;
+ SVGASignedRect *blits;
+
+ num_units = 0;
+ list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) {
+ if (crtc->fb != &vfb->base)
+ continue;
+ units[num_units++] = vmw_crtc_to_du(crtc);
+ }
+
+ BUG_ON(surface == NULL);
+ BUG_ON(!clips || !num_clips);
+
+ fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num_clips;
+ cmd = kmalloc(fifo_size, GFP_KERNEL);
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed to allocate temporary fifo memory.\n");
+ return -ENOMEM;
+ }
+
+ /* only need to do this once */
+ memset(cmd, 0, fifo_size);
+ cmd->header.id = cpu_to_le32(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN);
+ cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
+
+ cmd->body.srcRect.left = 0;
+ cmd->body.srcRect.right = surface->sizes[0].width;
+ cmd->body.srcRect.top = 0;
+ cmd->body.srcRect.bottom = surface->sizes[0].height;
+
+ blits = (SVGASignedRect *)&cmd[1];
+ for (i = 0; i < num_clips; i++) {
+ blits[i].left = clips[i].x;
+ blits[i].right = clips[i].x + clips[i].w;
+ blits[i].top = clips[i].y;
+ blits[i].bottom = clips[i].y + clips[i].h;
+ }
+
+ for (k = 0; k < num_units; k++) {
+ struct vmw_display_unit *unit = units[k];
+ int clip_x1 = destX - unit->crtc.x;
+ int clip_y1 = destY - unit->crtc.y;
+ int clip_x2 = clip_x1 + surface->sizes[0].width;
+ int clip_y2 = clip_y1 + surface->sizes[0].height;
+
+ /* skip any crtcs that misses the clip region */
+ if (clip_x1 >= unit->crtc.mode.hdisplay ||
+ clip_y1 >= unit->crtc.mode.vdisplay ||
+ clip_x2 <= 0 || clip_y2 <= 0)
+ continue;
+
+ /* need to reset sid as it is changed by execbuf */
+ cmd->body.srcImage.sid = sid;
+
+ cmd->body.destScreenId = unit->unit;
+
+ /*
+ * The blit command is a lot more resilient then the
+ * readback command when it comes to clip rects. So its
+ * okay to go out of bounds.
+ */
+
+ cmd->body.destRect.left = clip_x1;
+ cmd->body.destRect.right = clip_x2;
+ cmd->body.destRect.top = clip_y1;
+ cmd->body.destRect.bottom = clip_y2;
+
+ ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
+ fifo_size, 0, NULL);
+
+ if (unlikely(ret != 0))
+ break;
+ }
+
+ kfree(cmd);
+
+ return ret;
+}
+
+int vmw_kms_readback(struct vmw_private *dev_priv,
+ struct drm_file *file_priv,
+ struct vmw_framebuffer *vfb,
+ struct drm_vmw_fence_rep __user *user_fence_rep,
+ struct drm_vmw_rect *clips,
+ uint32_t num_clips)
+{
+ struct vmw_framebuffer_dmabuf *vfbd =
+ vmw_framebuffer_to_vfbd(&vfb->base);
+ struct vmw_dma_buffer *dmabuf = vfbd->buffer;
+ struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
+ struct drm_crtc *crtc;
+ size_t fifo_size;
+ int i, k, ret, num_units, blits_pos;
+
+ struct {
+ uint32_t header;
+ SVGAFifoCmdDefineGMRFB body;
+ } *cmd;
+ struct {
+ uint32_t header;
+ SVGAFifoCmdBlitScreenToGMRFB body;
+ } *blits;
+
+ num_units = 0;
+ list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) {
+ if (crtc->fb != &vfb->base)
+ continue;
+ units[num_units++] = vmw_crtc_to_du(crtc);
+ }
+
+ BUG_ON(dmabuf == NULL);
+ BUG_ON(!clips || !num_clips);
+
+ /* take a safe guess at fifo size */
+ fifo_size = sizeof(*cmd) + sizeof(*blits) * num_clips * num_units;
+ cmd = kmalloc(fifo_size, GFP_KERNEL);
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed to allocate temporary fifo memory.\n");
+ return -ENOMEM;
+ }
+
+ memset(cmd, 0, fifo_size);
+ cmd->header = SVGA_CMD_DEFINE_GMRFB;
+ cmd->body.format.bitsPerPixel = vfb->base.bits_per_pixel;
+ cmd->body.format.colorDepth = vfb->base.depth;
+ cmd->body.format.reserved = 0;
+ cmd->body.bytesPerLine = vfb->base.pitch;
+ cmd->body.ptr.gmrId = vfb->user_handle;
+ cmd->body.ptr.offset = 0;
+
+ blits = (void *)&cmd[1];
+ blits_pos = 0;
+ for (i = 0; i < num_units; i++) {
+ struct drm_vmw_rect *c = clips;
+ for (k = 0; k < num_clips; k++, c++) {
+ /* transform clip coords to crtc origin based coords */
+ int clip_x1 = c->x - units[i]->crtc.x;
+ int clip_x2 = c->x - units[i]->crtc.x + c->w;
+ int clip_y1 = c->y - units[i]->crtc.y;
+ int clip_y2 = c->y - units[i]->crtc.y + c->h;
+ int dest_x = c->x;
+ int dest_y = c->y;
+
+ /* compensate for clipping, we negate
+ * a negative number and add that.
+ */
+ if (clip_x1 < 0)
+ dest_x += -clip_x1;
+ if (clip_y1 < 0)
+ dest_y += -clip_y1;
+
+ /* clip */
+ clip_x1 = max(clip_x1, 0);
+ clip_y1 = max(clip_y1, 0);
+ clip_x2 = min(clip_x2, units[i]->crtc.mode.hdisplay);
+ clip_y2 = min(clip_y2, units[i]->crtc.mode.vdisplay);
+
+ /* and cull any rects that misses the crtc */
+ if (clip_x1 >= units[i]->crtc.mode.hdisplay ||
+ clip_y1 >= units[i]->crtc.mode.vdisplay ||
+ clip_x2 <= 0 || clip_y2 <= 0)
+ continue;
+
+ blits[blits_pos].header = SVGA_CMD_BLIT_SCREEN_TO_GMRFB;
+ blits[blits_pos].body.srcScreenId = units[i]->unit;
+ blits[blits_pos].body.destOrigin.x = dest_x;
+ blits[blits_pos].body.destOrigin.y = dest_y;
+
+ blits[blits_pos].body.srcRect.left = clip_x1;
+ blits[blits_pos].body.srcRect.top = clip_y1;
+ blits[blits_pos].body.srcRect.right = clip_x2;
+ blits[blits_pos].body.srcRect.bottom = clip_y2;
+ blits_pos++;
+ }
+ }
+ /* reset size here and use calculated exact size from loops */
+ fifo_size = sizeof(*cmd) + sizeof(*blits) * blits_pos;
+
+ ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd, fifo_size,
+ 0, user_fence_rep);
+
+ kfree(cmd);
+
+ return ret;
+}
+
int vmw_kms_init(struct vmw_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
@@ -929,7 +1313,9 @@ int vmw_kms_init(struct vmw_private *dev_priv)
dev->mode_config.max_width = 8192;
dev->mode_config.max_height = 8192;
- ret = vmw_kms_init_legacy_display_system(dev_priv);
+ ret = vmw_kms_init_screen_object_display(dev_priv);
+ if (ret) /* Fallback */
+ (void)vmw_kms_init_legacy_display_system(dev_priv);
return 0;
}
@@ -942,7 +1328,10 @@ int vmw_kms_close(struct vmw_private *dev_priv)
* drm_encoder_cleanup which takes the lock we deadlock.
*/
drm_mode_config_cleanup(dev_priv->dev);
- vmw_kms_close_legacy_display_system(dev_priv);
+ if (dev_priv->sou_priv)
+ vmw_kms_close_screen_object_display(dev_priv);
+ else
+ vmw_kms_close_legacy_display_system(dev_priv);
return 0;
}
@@ -987,9 +1376,9 @@ out:
return ret;
}
-void vmw_kms_write_svga(struct vmw_private *vmw_priv,
+int vmw_kms_write_svga(struct vmw_private *vmw_priv,
unsigned width, unsigned height, unsigned pitch,
- unsigned bbp, unsigned depth)
+ unsigned bpp, unsigned depth)
{
if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch);
@@ -997,11 +1386,15 @@ void vmw_kms_write_svga(struct vmw_private *vmw_priv,
iowrite32(pitch, vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK);
vmw_write(vmw_priv, SVGA_REG_WIDTH, width);
vmw_write(vmw_priv, SVGA_REG_HEIGHT, height);
- vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bbp);
- vmw_write(vmw_priv, SVGA_REG_DEPTH, depth);
- vmw_write(vmw_priv, SVGA_REG_RED_MASK, 0x00ff0000);
- vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, 0x0000ff00);
- vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, 0x000000ff);
+ vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bpp);
+
+ if (vmw_read(vmw_priv, SVGA_REG_DEPTH) != depth) {
+ DRM_ERROR("Invalid depth %u for %u bpp, host expects %u\n",
+ depth, bpp, vmw_read(vmw_priv, SVGA_REG_DEPTH));
+ return -EINVAL;
+ }
+
+ return 0;
}
int vmw_kms_save_vga(struct vmw_private *vmw_priv)
@@ -1011,12 +1404,7 @@ int vmw_kms_save_vga(struct vmw_private *vmw_priv)
vmw_priv->vga_width = vmw_read(vmw_priv, SVGA_REG_WIDTH);
vmw_priv->vga_height = vmw_read(vmw_priv, SVGA_REG_HEIGHT);
- vmw_priv->vga_depth = vmw_read(vmw_priv, SVGA_REG_DEPTH);
vmw_priv->vga_bpp = vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL);
- vmw_priv->vga_pseudo = vmw_read(vmw_priv, SVGA_REG_PSEUDOCOLOR);
- vmw_priv->vga_red_mask = vmw_read(vmw_priv, SVGA_REG_RED_MASK);
- vmw_priv->vga_blue_mask = vmw_read(vmw_priv, SVGA_REG_BLUE_MASK);
- vmw_priv->vga_green_mask = vmw_read(vmw_priv, SVGA_REG_GREEN_MASK);
if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
vmw_priv->vga_pitchlock =
vmw_read(vmw_priv, SVGA_REG_PITCHLOCK);
@@ -1065,12 +1453,7 @@ int vmw_kms_restore_vga(struct vmw_private *vmw_priv)
vmw_write(vmw_priv, SVGA_REG_WIDTH, vmw_priv->vga_width);
vmw_write(vmw_priv, SVGA_REG_HEIGHT, vmw_priv->vga_height);
- vmw_write(vmw_priv, SVGA_REG_DEPTH, vmw_priv->vga_depth);
vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, vmw_priv->vga_bpp);
- vmw_write(vmw_priv, SVGA_REG_PSEUDOCOLOR, vmw_priv->vga_pseudo);
- vmw_write(vmw_priv, SVGA_REG_RED_MASK, vmw_priv->vga_red_mask);
- vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, vmw_priv->vga_green_mask);
- vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, vmw_priv->vga_blue_mask);
if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
vmw_write(vmw_priv, SVGA_REG_PITCHLOCK,
vmw_priv->vga_pitchlock);
@@ -1095,6 +1478,312 @@ int vmw_kms_restore_vga(struct vmw_private *vmw_priv)
return 0;
}
+bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
+ uint32_t pitch,
+ uint32_t height)
+{
+ return ((u64) pitch * (u64) height) < (u64) dev_priv->vram_size;
+}
+
+
+/**
+ * Function called by DRM code called with vbl_lock held.
+ */
+u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc)
+{
+ return 0;
+}
+
+/**
+ * Function called by DRM code called with vbl_lock held.
+ */
+int vmw_enable_vblank(struct drm_device *dev, int crtc)
+{
+ return -ENOSYS;
+}
+
+/**
+ * Function called by DRM code called with vbl_lock held.
+ */
+void vmw_disable_vblank(struct drm_device *dev, int crtc)
+{
+}
+
+
+/*
+ * Small shared kms functions.
+ */
+
+int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num,
+ struct drm_vmw_rect *rects)
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct vmw_display_unit *du;
+ struct drm_connector *con;
+
+ mutex_lock(&dev->mode_config.mutex);
+
+#if 0
+ {
+ unsigned int i;
+
+ DRM_INFO("%s: new layout ", __func__);
+ for (i = 0; i < num; i++)
+ DRM_INFO("(%i, %i %ux%u) ", rects[i].x, rects[i].y,
+ rects[i].w, rects[i].h);
+ DRM_INFO("\n");
+ }
+#endif
+
+ list_for_each_entry(con, &dev->mode_config.connector_list, head) {
+ du = vmw_connector_to_du(con);
+ if (num > du->unit) {
+ du->pref_width = rects[du->unit].w;
+ du->pref_height = rects[du->unit].h;
+ du->pref_active = true;
+ du->gui_x = rects[du->unit].x;
+ du->gui_y = rects[du->unit].y;
+ } else {
+ du->pref_width = 800;
+ du->pref_height = 600;
+ du->pref_active = false;
+ }
+ con->status = vmw_du_connector_detect(con, true);
+ }
+
+ mutex_unlock(&dev->mode_config.mutex);
+
+ return 0;
+}
+
+void vmw_du_crtc_save(struct drm_crtc *crtc)
+{
+}
+
+void vmw_du_crtc_restore(struct drm_crtc *crtc)
+{
+}
+
+void vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
+ u16 *r, u16 *g, u16 *b,
+ uint32_t start, uint32_t size)
+{
+ struct vmw_private *dev_priv = vmw_priv(crtc->dev);
+ int i;
+
+ for (i = 0; i < size; i++) {
+ DRM_DEBUG("%d r/g/b = 0x%04x / 0x%04x / 0x%04x\n", i,
+ r[i], g[i], b[i]);
+ vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 0, r[i] >> 8);
+ vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 1, g[i] >> 8);
+ vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 2, b[i] >> 8);
+ }
+}
+
+void vmw_du_connector_dpms(struct drm_connector *connector, int mode)
+{
+}
+
+void vmw_du_connector_save(struct drm_connector *connector)
+{
+}
+
+void vmw_du_connector_restore(struct drm_connector *connector)
+{
+}
+
+enum drm_connector_status
+vmw_du_connector_detect(struct drm_connector *connector, bool force)
+{
+ uint32_t num_displays;
+ struct drm_device *dev = connector->dev;
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct vmw_display_unit *du = vmw_connector_to_du(connector);
+
+ mutex_lock(&dev_priv->hw_mutex);
+ num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS);
+ mutex_unlock(&dev_priv->hw_mutex);
+
+ return ((vmw_connector_to_du(connector)->unit < num_displays &&
+ du->pref_active) ?
+ connector_status_connected : connector_status_disconnected);
+}
+
+static struct drm_display_mode vmw_kms_connector_builtin[] = {
+ /* 640x480@60Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
+ 752, 800, 0, 480, 489, 492, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 800x600@60Hz */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
+ 968, 1056, 0, 600, 601, 605, 628, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1024x768@60Hz */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
+ 1184, 1344, 0, 768, 771, 777, 806, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1152x864@75Hz */
+ { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
+ 1344, 1600, 0, 864, 865, 868, 900, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x768@60Hz */
+ { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
+ 1472, 1664, 0, 768, 771, 778, 798, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x800@60Hz */
+ { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
+ 1480, 1680, 0, 800, 803, 809, 831, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1280x960@60Hz */
+ { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
+ 1488, 1800, 0, 960, 961, 964, 1000, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x1024@60Hz */
+ { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
+ 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1360x768@60Hz */
+ { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
+ 1536, 1792, 0, 768, 771, 777, 795, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1440x1050@60Hz */
+ { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
+ 1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1440x900@60Hz */
+ { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
+ 1672, 1904, 0, 900, 903, 909, 934, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1600x1200@60Hz */
+ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
+ 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1680x1050@60Hz */
+ { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
+ 1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1792x1344@60Hz */
+ { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
+ 2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1853x1392@60Hz */
+ { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
+ 2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1920x1200@60Hz */
+ { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
+ 2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1920x1440@60Hz */
+ { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
+ 2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 2560x1600@60Hz */
+ { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
+ 3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* Terminate */
+ { DRM_MODE("", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) },
+};
+
+/**
+ * vmw_guess_mode_timing - Provide fake timings for a
+ * 60Hz vrefresh mode.
+ *
+ * @mode - Pointer to a struct drm_display_mode with hdisplay and vdisplay
+ * members filled in.
+ */
+static void vmw_guess_mode_timing(struct drm_display_mode *mode)
+{
+ mode->hsync_start = mode->hdisplay + 50;
+ mode->hsync_end = mode->hsync_start + 50;
+ mode->htotal = mode->hsync_end + 50;
+
+ mode->vsync_start = mode->vdisplay + 50;
+ mode->vsync_end = mode->vsync_start + 50;
+ mode->vtotal = mode->vsync_end + 50;
+
+ mode->clock = (u32)mode->htotal * (u32)mode->vtotal / 100 * 6;
+ mode->vrefresh = drm_mode_vrefresh(mode);
+}
+
+
+int vmw_du_connector_fill_modes(struct drm_connector *connector,
+ uint32_t max_width, uint32_t max_height)
+{
+ struct vmw_display_unit *du = vmw_connector_to_du(connector);
+ struct drm_device *dev = connector->dev;
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct drm_display_mode *mode = NULL;
+ struct drm_display_mode *bmode;
+ struct drm_display_mode prefmode = { DRM_MODE("preferred",
+ DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
+ };
+ int i;
+
+ /* Add preferred mode */
+ {
+ mode = drm_mode_duplicate(dev, &prefmode);
+ if (!mode)
+ return 0;
+ mode->hdisplay = du->pref_width;
+ mode->vdisplay = du->pref_height;
+ vmw_guess_mode_timing(mode);
+
+ if (vmw_kms_validate_mode_vram(dev_priv, mode->hdisplay * 2,
+ mode->vdisplay)) {
+ drm_mode_probed_add(connector, mode);
+ } else {
+ drm_mode_destroy(dev, mode);
+ mode = NULL;
+ }
+
+ if (du->pref_mode) {
+ list_del_init(&du->pref_mode->head);
+ drm_mode_destroy(dev, du->pref_mode);
+ }
+
+ /* mode might be null here, this is intended */
+ du->pref_mode = mode;
+ }
+
+ for (i = 0; vmw_kms_connector_builtin[i].type != 0; i++) {
+ bmode = &vmw_kms_connector_builtin[i];
+ if (bmode->hdisplay > max_width ||
+ bmode->vdisplay > max_height)
+ continue;
+
+ if (!vmw_kms_validate_mode_vram(dev_priv, bmode->hdisplay * 2,
+ bmode->vdisplay))
+ continue;
+
+ mode = drm_mode_duplicate(dev, bmode);
+ if (!mode)
+ return 0;
+ mode->vrefresh = drm_mode_vrefresh(mode);
+
+ drm_mode_probed_add(connector, mode);
+ }
+
+ /* Move the prefered mode first, help apps pick the right mode. */
+ if (du->pref_mode)
+ list_move(&du->pref_mode->head, &connector->probed_modes);
+
+ drm_mode_connector_list_update(connector);
+
+ return 1;
+}
+
+int vmw_du_connector_set_property(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t val)
+{
+ return 0;
+}
+
+
int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
@@ -1106,6 +1795,8 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
struct drm_vmw_rect *rects;
unsigned rects_size;
int ret;
+ int i;
+ struct drm_mode_config *mode_config = &dev->mode_config;
ret = ttm_read_lock(&vmaster->lock, true);
if (unlikely(ret != 0))
@@ -1113,7 +1804,7 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
if (!arg->num_outputs) {
struct drm_vmw_rect def_rect = {0, 0, 800, 600};
- vmw_kms_ldu_update_layout(dev_priv, 1, &def_rect);
+ vmw_du_update_layout(dev_priv, 1, &def_rect);
goto out_unlock;
}
@@ -1132,7 +1823,18 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
goto out_free;
}
- vmw_kms_ldu_update_layout(dev_priv, arg->num_outputs, rects);
+ for (i = 0; i < arg->num_outputs; ++i) {
+ if (rects->x < 0 ||
+ rects->y < 0 ||
+ rects->x + rects->w > mode_config->max_width ||
+ rects->y + rects->h > mode_config->max_height) {
+ DRM_ERROR("Invalid GUI layout.\n");
+ ret = -EINVAL;
+ goto out_free;
+ }
+ }
+
+ vmw_du_update_layout(dev_priv, arg->num_outputs, rects);
out_free:
kfree(rects);
@@ -1140,15 +1842,3 @@ out_unlock:
ttm_read_unlock(&vmaster->lock);
return ret;
}
-
-bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
- uint32_t pitch,
- uint32_t height)
-{
- return ((u64) pitch * (u64) height) < (u64) dev_priv->vram_size;
-}
-
-u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc)
-{
- return 0;
-}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index 8a398a0339b..af8e6e5bd96 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -31,6 +31,8 @@
#include "drmP.h"
#include "vmwgfx_drv.h"
+#define VMWGFX_NUM_DISPLAY_UNITS 8
+
#define vmw_framebuffer_to_vfb(x) \
container_of(x, struct vmw_framebuffer, base)
@@ -45,6 +47,9 @@ struct vmw_framebuffer {
struct drm_framebuffer base;
int (*pin)(struct vmw_framebuffer *fb);
int (*unpin)(struct vmw_framebuffer *fb);
+ bool dmabuf;
+ struct ttm_base_object *user_obj;
+ uint32_t user_handle;
};
@@ -83,22 +88,65 @@ struct vmw_display_unit {
int hotspot_y;
unsigned unit;
+
+ /*
+ * Prefered mode tracking.
+ */
+ unsigned pref_width;
+ unsigned pref_height;
+ bool pref_active;
+ struct drm_display_mode *pref_mode;
+
+ /*
+ * Gui positioning
+ */
+ int gui_x;
+ int gui_y;
+ bool is_implicit;
};
+#define vmw_crtc_to_du(x) \
+ container_of(x, struct vmw_display_unit, crtc)
+#define vmw_connector_to_du(x) \
+ container_of(x, struct vmw_display_unit, connector)
+
+
/*
* Shared display unit functions - vmwgfx_kms.c
*/
void vmw_display_unit_cleanup(struct vmw_display_unit *du);
+void vmw_du_crtc_save(struct drm_crtc *crtc);
+void vmw_du_crtc_restore(struct drm_crtc *crtc);
+void vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
+ u16 *r, u16 *g, u16 *b,
+ uint32_t start, uint32_t size);
int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
uint32_t handle, uint32_t width, uint32_t height);
int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y);
+void vmw_du_connector_dpms(struct drm_connector *connector, int mode);
+void vmw_du_connector_save(struct drm_connector *connector);
+void vmw_du_connector_restore(struct drm_connector *connector);
+enum drm_connector_status
+vmw_du_connector_detect(struct drm_connector *connector, bool force);
+int vmw_du_connector_fill_modes(struct drm_connector *connector,
+ uint32_t max_width, uint32_t max_height);
+int vmw_du_connector_set_property(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t val);
+
/*
* Legacy display unit functions - vmwgfx_ldu.c
*/
int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv);
int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv);
-int vmw_kms_ldu_update_layout(struct vmw_private *dev_priv, unsigned num,
+
+/*
+ * Screen Objects display functions - vmwgfx_scrn.c
+ */
+int vmw_kms_init_screen_object_display(struct vmw_private *dev_priv);
+int vmw_kms_close_screen_object_display(struct vmw_private *dev_priv);
+int vmw_kms_sou_update_layout(struct vmw_private *dev_priv, unsigned num,
struct drm_vmw_rect *rects);
#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index b3a2cd5118d..90c5e392849 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -27,7 +27,6 @@
#include "vmwgfx_kms.h"
-#define VMWGFX_LDU_NUM_DU 8
#define vmw_crtc_to_ldu(x) \
container_of(x, struct vmw_legacy_display_unit, base.crtc)
@@ -51,11 +50,6 @@ struct vmw_legacy_display {
struct vmw_legacy_display_unit {
struct vmw_display_unit base;
- unsigned pref_width;
- unsigned pref_height;
- bool pref_active;
- struct drm_display_mode *pref_mode;
-
struct list_head active;
};
@@ -71,20 +65,6 @@ static void vmw_ldu_destroy(struct vmw_legacy_display_unit *ldu)
* Legacy Display Unit CRTC functions
*/
-static void vmw_ldu_crtc_save(struct drm_crtc *crtc)
-{
-}
-
-static void vmw_ldu_crtc_restore(struct drm_crtc *crtc)
-{
-}
-
-static void vmw_ldu_crtc_gamma_set(struct drm_crtc *crtc,
- u16 *r, u16 *g, u16 *b,
- uint32_t start, uint32_t size)
-{
-}
-
static void vmw_ldu_crtc_destroy(struct drm_crtc *crtc)
{
vmw_ldu_destroy(vmw_crtc_to_ldu(crtc));
@@ -114,10 +94,8 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
return 0;
fb = entry->base.crtc.fb;
- vmw_kms_write_svga(dev_priv, w, h, fb->pitch,
- fb->bits_per_pixel, fb->depth);
-
- return 0;
+ return vmw_kms_write_svga(dev_priv, w, h, fb->pitch,
+ fb->bits_per_pixel, fb->depth);
}
if (!list_empty(&lds->active)) {
@@ -265,9 +243,7 @@ static int vmw_ldu_crtc_set_config(struct drm_mode_set *set)
vmw_ldu_del_active(dev_priv, ldu);
- vmw_ldu_commit_list(dev_priv);
-
- return 0;
+ return vmw_ldu_commit_list(dev_priv);
}
@@ -292,21 +268,20 @@ static int vmw_ldu_crtc_set_config(struct drm_mode_set *set)
vmw_ldu_add_active(dev_priv, ldu, vfb);
- vmw_ldu_commit_list(dev_priv);
-
- return 0;
+ return vmw_ldu_commit_list(dev_priv);
}
static struct drm_crtc_funcs vmw_legacy_crtc_funcs = {
- .save = vmw_ldu_crtc_save,
- .restore = vmw_ldu_crtc_restore,
+ .save = vmw_du_crtc_save,
+ .restore = vmw_du_crtc_restore,
.cursor_set = vmw_du_crtc_cursor_set,
.cursor_move = vmw_du_crtc_cursor_move,
- .gamma_set = vmw_ldu_crtc_gamma_set,
+ .gamma_set = vmw_du_crtc_gamma_set,
.destroy = vmw_ldu_crtc_destroy,
.set_config = vmw_ldu_crtc_set_config,
};
+
/*
* Legacy Display Unit encoder functions
*/
@@ -324,183 +299,18 @@ static struct drm_encoder_funcs vmw_legacy_encoder_funcs = {
* Legacy Display Unit connector functions
*/
-static void vmw_ldu_connector_dpms(struct drm_connector *connector, int mode)
-{
-}
-
-static void vmw_ldu_connector_save(struct drm_connector *connector)
-{
-}
-
-static void vmw_ldu_connector_restore(struct drm_connector *connector)
-{
-}
-
-static enum drm_connector_status
- vmw_ldu_connector_detect(struct drm_connector *connector,
- bool force)
-{
- if (vmw_connector_to_ldu(connector)->pref_active)
- return connector_status_connected;
- return connector_status_disconnected;
-}
-
-static const struct drm_display_mode vmw_ldu_connector_builtin[] = {
- /* 640x480@60Hz */
- { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
- 752, 800, 0, 480, 489, 492, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 800x600@60Hz */
- { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
- 968, 1056, 0, 600, 601, 605, 628, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1024x768@60Hz */
- { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
- 1184, 1344, 0, 768, 771, 777, 806, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1152x864@75Hz */
- { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
- 1344, 1600, 0, 864, 865, 868, 900, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1280x768@60Hz */
- { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
- 1472, 1664, 0, 768, 771, 778, 798, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1280x800@60Hz */
- { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
- 1480, 1680, 0, 800, 803, 809, 831, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1280x960@60Hz */
- { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
- 1488, 1800, 0, 960, 961, 964, 1000, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1280x1024@60Hz */
- { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
- 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1360x768@60Hz */
- { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
- 1536, 1792, 0, 768, 771, 777, 795, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1440x1050@60Hz */
- { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
- 1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1440x900@60Hz */
- { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
- 1672, 1904, 0, 900, 903, 909, 934, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1600x1200@60Hz */
- { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
- 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1680x1050@60Hz */
- { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
- 1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1792x1344@60Hz */
- { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
- 2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1853x1392@60Hz */
- { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
- 2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1920x1200@60Hz */
- { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
- 2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1920x1440@60Hz */
- { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
- 2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 2560x1600@60Hz */
- { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
- 3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* Terminate */
- { DRM_MODE("", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) },
-};
-
-static int vmw_ldu_connector_fill_modes(struct drm_connector *connector,
- uint32_t max_width, uint32_t max_height)
-{
- struct vmw_legacy_display_unit *ldu = vmw_connector_to_ldu(connector);
- struct drm_device *dev = connector->dev;
- struct vmw_private *dev_priv = vmw_priv(dev);
- struct drm_display_mode *mode = NULL;
- struct drm_display_mode prefmode = { DRM_MODE("preferred",
- DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
- };
- int i;
-
- /* Add preferred mode */
- {
- mode = drm_mode_duplicate(dev, &prefmode);
- if (!mode)
- return 0;
- mode->hdisplay = ldu->pref_width;
- mode->vdisplay = ldu->pref_height;
- mode->vrefresh = drm_mode_vrefresh(mode);
- if (vmw_kms_validate_mode_vram(dev_priv, mode->hdisplay * 2,
- mode->vdisplay)) {
- drm_mode_probed_add(connector, mode);
-
- if (ldu->pref_mode) {
- list_del_init(&ldu->pref_mode->head);
- drm_mode_destroy(dev, ldu->pref_mode);
- }
-
- ldu->pref_mode = mode;
- }
- }
-
- for (i = 0; vmw_ldu_connector_builtin[i].type != 0; i++) {
- const struct drm_display_mode *bmode;
-
- bmode = &vmw_ldu_connector_builtin[i];
- if (bmode->hdisplay > max_width ||
- bmode->vdisplay > max_height)
- continue;
-
- if (!vmw_kms_validate_mode_vram(dev_priv, bmode->hdisplay * 2,
- bmode->vdisplay))
- continue;
-
- mode = drm_mode_duplicate(dev, bmode);
- if (!mode)
- return 0;
- mode->vrefresh = drm_mode_vrefresh(mode);
-
- drm_mode_probed_add(connector, mode);
- }
-
- drm_mode_connector_list_update(connector);
-
- return 1;
-}
-
-static int vmw_ldu_connector_set_property(struct drm_connector *connector,
- struct drm_property *property,
- uint64_t val)
-{
- return 0;
-}
-
static void vmw_ldu_connector_destroy(struct drm_connector *connector)
{
vmw_ldu_destroy(vmw_connector_to_ldu(connector));
}
static struct drm_connector_funcs vmw_legacy_connector_funcs = {
- .dpms = vmw_ldu_connector_dpms,
- .save = vmw_ldu_connector_save,
- .restore = vmw_ldu_connector_restore,
- .detect = vmw_ldu_connector_detect,
- .fill_modes = vmw_ldu_connector_fill_modes,
- .set_property = vmw_ldu_connector_set_property,
+ .dpms = vmw_du_connector_dpms,
+ .save = vmw_du_connector_save,
+ .restore = vmw_du_connector_restore,
+ .detect = vmw_du_connector_detect,
+ .fill_modes = vmw_du_connector_fill_modes,
+ .set_property = vmw_du_connector_set_property,
.destroy = vmw_ldu_connector_destroy,
};
@@ -523,23 +333,26 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
INIT_LIST_HEAD(&ldu->active);
- ldu->pref_active = (unit == 0);
- ldu->pref_width = 800;
- ldu->pref_height = 600;
- ldu->pref_mode = NULL;
+ ldu->base.pref_active = (unit == 0);
+ ldu->base.pref_width = 800;
+ ldu->base.pref_height = 600;
+ ldu->base.pref_mode = NULL;
+ ldu->base.is_implicit = true;
drm_connector_init(dev, connector, &vmw_legacy_connector_funcs,
- DRM_MODE_CONNECTOR_LVDS);
- connector->status = vmw_ldu_connector_detect(connector, true);
+ DRM_MODE_CONNECTOR_VIRTUAL);
+ connector->status = vmw_du_connector_detect(connector, true);
drm_encoder_init(dev, encoder, &vmw_legacy_encoder_funcs,
- DRM_MODE_ENCODER_LVDS);
+ DRM_MODE_ENCODER_VIRTUAL);
drm_mode_connector_attach_encoder(connector, encoder);
encoder->possible_crtcs = (1 << unit);
encoder->possible_clones = 0;
drm_crtc_init(dev, crtc, &vmw_legacy_crtc_funcs);
+ drm_mode_crtc_set_gamma_size(crtc, 256);
+
drm_connector_attach_property(connector,
dev->mode_config.dirty_info_property,
1);
@@ -550,8 +363,7 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
- int i;
- int ret;
+ int i, ret;
if (dev_priv->ldu_priv) {
DRM_INFO("ldu system already on\n");
@@ -559,7 +371,6 @@ int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv)
}
dev_priv->ldu_priv = kmalloc(sizeof(*dev_priv->ldu_priv), GFP_KERNEL);
-
if (!dev_priv->ldu_priv)
return -ENOMEM;
@@ -568,18 +379,31 @@ int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv)
dev_priv->ldu_priv->last_num_active = 0;
dev_priv->ldu_priv->fb = NULL;
- drm_mode_create_dirty_info_property(dev_priv->dev);
+ /* for old hardware without multimon only enable one display */
+ if (dev_priv->capabilities & SVGA_CAP_MULTIMON)
+ ret = drm_vblank_init(dev, VMWGFX_NUM_DISPLAY_UNITS);
+ else
+ ret = drm_vblank_init(dev, 1);
+ if (ret != 0)
+ goto err_free;
+
+ ret = drm_mode_create_dirty_info_property(dev);
+ if (ret != 0)
+ goto err_vblank_cleanup;
- if (dev_priv->capabilities & SVGA_CAP_MULTIMON) {
- for (i = 0; i < VMWGFX_LDU_NUM_DU; ++i)
+ if (dev_priv->capabilities & SVGA_CAP_MULTIMON)
+ for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i)
vmw_ldu_init(dev_priv, i);
- ret = drm_vblank_init(dev, VMWGFX_LDU_NUM_DU);
- } else {
- /* for old hardware without multimon only enable one display */
+ else
vmw_ldu_init(dev_priv, 0);
- ret = drm_vblank_init(dev, 1);
- }
+ return 0;
+
+err_vblank_cleanup:
+ drm_vblank_cleanup(dev);
+err_free:
+ kfree(dev_priv->ldu_priv);
+ dev_priv->ldu_priv = NULL;
return ret;
}
@@ -587,52 +411,14 @@ int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
- drm_vblank_cleanup(dev);
if (!dev_priv->ldu_priv)
return -ENOSYS;
+ drm_vblank_cleanup(dev);
+
BUG_ON(!list_empty(&dev_priv->ldu_priv->active));
kfree(dev_priv->ldu_priv);
return 0;
}
-
-int vmw_kms_ldu_update_layout(struct vmw_private *dev_priv, unsigned num,
- struct drm_vmw_rect *rects)
-{
- struct drm_device *dev = dev_priv->dev;
- struct vmw_legacy_display_unit *ldu;
- struct drm_connector *con;
- int i;
-
- mutex_lock(&dev->mode_config.mutex);
-
-#if 0
- DRM_INFO("%s: new layout ", __func__);
- for (i = 0; i < (int)num; i++)
- DRM_INFO("(%i, %i %ux%u) ", rects[i].x, rects[i].y,
- rects[i].w, rects[i].h);
- DRM_INFO("\n");
-#else
- (void)i;
-#endif
-
- list_for_each_entry(con, &dev->mode_config.connector_list, head) {
- ldu = vmw_connector_to_ldu(con);
- if (num > ldu->base.unit) {
- ldu->pref_width = rects[ldu->base.unit].w;
- ldu->pref_height = rects[ldu->base.unit].h;
- ldu->pref_active = true;
- } else {
- ldu->pref_width = 800;
- ldu->pref_height = 600;
- ldu->pref_active = false;
- }
- con->status = vmw_ldu_connector_detect(con, true);
- }
-
- mutex_unlock(&dev->mode_config.mutex);
-
- return 0;
-}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
new file mode 100644
index 00000000000..8a8725c2716
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
@@ -0,0 +1,171 @@
+/**************************************************************************
+ *
+ * Copyright (C) 2010 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#include "vmwgfx_drv.h"
+
+struct vmw_marker {
+ struct list_head head;
+ uint32_t seqno;
+ struct timespec submitted;
+};
+
+void vmw_marker_queue_init(struct vmw_marker_queue *queue)
+{
+ INIT_LIST_HEAD(&queue->head);
+ queue->lag = ns_to_timespec(0);
+ getrawmonotonic(&queue->lag_time);
+ spin_lock_init(&queue->lock);
+}
+
+void vmw_marker_queue_takedown(struct vmw_marker_queue *queue)
+{
+ struct vmw_marker *marker, *next;
+
+ spin_lock(&queue->lock);
+ list_for_each_entry_safe(marker, next, &queue->head, head) {
+ kfree(marker);
+ }
+ spin_unlock(&queue->lock);
+}
+
+int vmw_marker_push(struct vmw_marker_queue *queue,
+ uint32_t seqno)
+{
+ struct vmw_marker *marker = kmalloc(sizeof(*marker), GFP_KERNEL);
+
+ if (unlikely(!marker))
+ return -ENOMEM;
+
+ marker->seqno = seqno;
+ getrawmonotonic(&marker->submitted);
+ spin_lock(&queue->lock);
+ list_add_tail(&marker->head, &queue->head);
+ spin_unlock(&queue->lock);
+
+ return 0;
+}
+
+int vmw_marker_pull(struct vmw_marker_queue *queue,
+ uint32_t signaled_seqno)
+{
+ struct vmw_marker *marker, *next;
+ struct timespec now;
+ bool updated = false;
+
+ spin_lock(&queue->lock);
+ getrawmonotonic(&now);
+
+ if (list_empty(&queue->head)) {
+ queue->lag = ns_to_timespec(0);
+ queue->lag_time = now;
+ updated = true;
+ goto out_unlock;
+ }
+
+ list_for_each_entry_safe(marker, next, &queue->head, head) {
+ if (signaled_seqno - marker->seqno > (1 << 30))
+ continue;
+
+ queue->lag = timespec_sub(now, marker->submitted);
+ queue->lag_time = now;
+ updated = true;
+ list_del(&marker->head);
+ kfree(marker);
+ }
+
+out_unlock:
+ spin_unlock(&queue->lock);
+
+ return (updated) ? 0 : -EBUSY;
+}
+
+static struct timespec vmw_timespec_add(struct timespec t1,
+ struct timespec t2)
+{
+ t1.tv_sec += t2.tv_sec;
+ t1.tv_nsec += t2.tv_nsec;
+ if (t1.tv_nsec >= 1000000000L) {
+ t1.tv_sec += 1;
+ t1.tv_nsec -= 1000000000L;
+ }
+
+ return t1;
+}
+
+static struct timespec vmw_fifo_lag(struct vmw_marker_queue *queue)
+{
+ struct timespec now;
+
+ spin_lock(&queue->lock);
+ getrawmonotonic(&now);
+ queue->lag = vmw_timespec_add(queue->lag,
+ timespec_sub(now, queue->lag_time));
+ queue->lag_time = now;
+ spin_unlock(&queue->lock);
+ return queue->lag;
+}
+
+
+static bool vmw_lag_lt(struct vmw_marker_queue *queue,
+ uint32_t us)
+{
+ struct timespec lag, cond;
+
+ cond = ns_to_timespec((s64) us * 1000);
+ lag = vmw_fifo_lag(queue);
+ return (timespec_compare(&lag, &cond) < 1);
+}
+
+int vmw_wait_lag(struct vmw_private *dev_priv,
+ struct vmw_marker_queue *queue, uint32_t us)
+{
+ struct vmw_marker *marker;
+ uint32_t seqno;
+ int ret;
+
+ while (!vmw_lag_lt(queue, us)) {
+ spin_lock(&queue->lock);
+ if (list_empty(&queue->head))
+ seqno = atomic_read(&dev_priv->marker_seq);
+ else {
+ marker = list_first_entry(&queue->head,
+ struct vmw_marker, head);
+ seqno = marker->seqno;
+ }
+ spin_unlock(&queue->lock);
+
+ ret = vmw_wait_seqno(dev_priv, false, seqno, true,
+ 3*HZ);
+
+ if (unlikely(ret != 0))
+ return ret;
+
+ (void) vmw_marker_pull(queue, seqno);
+ }
+ return 0;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
index 07ce02da78a..14399eec9c3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -87,48 +87,6 @@ static inline void fill_flush(struct vmw_escape_video_flush *cmd,
}
/**
- * Pin or unpin a buffer in vram.
- *
- * @dev_priv: Driver private.
- * @buf: DMA buffer to pin or unpin.
- * @pin: Pin buffer in vram if true.
- * @interruptible: Use interruptible wait.
- *
- * Takes the current masters ttm lock in read.
- *
- * Returns
- * -ERESTARTSYS if interrupted by a signal.
- */
-static int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *buf,
- bool pin, bool interruptible)
-{
- struct ttm_buffer_object *bo = &buf->base;
- struct ttm_placement *overlay_placement = &vmw_vram_placement;
- int ret;
-
- ret = ttm_read_lock(&dev_priv->active_master->lock, interruptible);
- if (unlikely(ret != 0))
- return ret;
-
- ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
- if (unlikely(ret != 0))
- goto err;
-
- if (pin)
- overlay_placement = &vmw_vram_ne_placement;
-
- ret = ttm_bo_validate(bo, overlay_placement, interruptible, false, false);
-
- ttm_bo_unreserve(bo);
-
-err:
- ttm_read_unlock(&dev_priv->active_master->lock);
-
- return ret;
-}
-
-/**
* Send put command to hw.
*
* Returns
@@ -139,68 +97,80 @@ static int vmw_overlay_send_put(struct vmw_private *dev_priv,
struct drm_vmw_control_stream_arg *arg,
bool interruptible)
{
+ struct vmw_escape_video_flush *flush;
+ size_t fifo_size;
+ bool have_so = dev_priv->sou_priv ? true : false;
+ int i, num_items;
+ SVGAGuestPtr ptr;
+
struct {
struct vmw_escape_header escape;
struct {
- struct {
- uint32_t cmdType;
- uint32_t streamId;
- } header;
- struct {
- uint32_t registerId;
- uint32_t value;
- } items[SVGA_VIDEO_PITCH_3 + 1];
- } body;
- struct vmw_escape_video_flush flush;
+ uint32_t cmdType;
+ uint32_t streamId;
+ } header;
} *cmds;
- uint32_t offset;
- int i, ret;
+ struct {
+ uint32_t registerId;
+ uint32_t value;
+ } *items;
- for (;;) {
- cmds = vmw_fifo_reserve(dev_priv, sizeof(*cmds));
- if (cmds)
- break;
+ /* defines are a index needs + 1 */
+ if (have_so)
+ num_items = SVGA_VIDEO_DST_SCREEN_ID + 1;
+ else
+ num_items = SVGA_VIDEO_PITCH_3 + 1;
- ret = vmw_fallback_wait(dev_priv, false, true, 0,
- interruptible, 3*HZ);
- if (interruptible && ret == -ERESTARTSYS)
- return ret;
- else
- BUG_ON(ret != 0);
+ fifo_size = sizeof(*cmds) + sizeof(*flush) + sizeof(*items) * num_items;
+
+ cmds = vmw_fifo_reserve(dev_priv, fifo_size);
+ /* hardware has hung, can't do anything here */
+ if (!cmds)
+ return -ENOMEM;
+
+ items = (typeof(items))&cmds[1];
+ flush = (struct vmw_escape_video_flush *)&items[num_items];
+
+ /* the size is header + number of items */
+ fill_escape(&cmds->escape, sizeof(*items) * (num_items + 1));
+
+ cmds->header.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS;
+ cmds->header.streamId = arg->stream_id;
+
+ /* the IDs are neatly numbered */
+ for (i = 0; i < num_items; i++)
+ items[i].registerId = i;
+
+ vmw_bo_get_guest_ptr(&buf->base, &ptr);
+ ptr.offset += arg->offset;
+
+ items[SVGA_VIDEO_ENABLED].value = true;
+ items[SVGA_VIDEO_FLAGS].value = arg->flags;
+ items[SVGA_VIDEO_DATA_OFFSET].value = ptr.offset;
+ items[SVGA_VIDEO_FORMAT].value = arg->format;
+ items[SVGA_VIDEO_COLORKEY].value = arg->color_key;
+ items[SVGA_VIDEO_SIZE].value = arg->size;
+ items[SVGA_VIDEO_WIDTH].value = arg->width;
+ items[SVGA_VIDEO_HEIGHT].value = arg->height;
+ items[SVGA_VIDEO_SRC_X].value = arg->src.x;
+ items[SVGA_VIDEO_SRC_Y].value = arg->src.y;
+ items[SVGA_VIDEO_SRC_WIDTH].value = arg->src.w;
+ items[SVGA_VIDEO_SRC_HEIGHT].value = arg->src.h;
+ items[SVGA_VIDEO_DST_X].value = arg->dst.x;
+ items[SVGA_VIDEO_DST_Y].value = arg->dst.y;
+ items[SVGA_VIDEO_DST_WIDTH].value = arg->dst.w;
+ items[SVGA_VIDEO_DST_HEIGHT].value = arg->dst.h;
+ items[SVGA_VIDEO_PITCH_1].value = arg->pitch[0];
+ items[SVGA_VIDEO_PITCH_2].value = arg->pitch[1];
+ items[SVGA_VIDEO_PITCH_3].value = arg->pitch[2];
+ if (have_so) {
+ items[SVGA_VIDEO_DATA_GMRID].value = ptr.gmrId;
+ items[SVGA_VIDEO_DST_SCREEN_ID].value = SVGA_ID_INVALID;
}
- fill_escape(&cmds->escape, sizeof(cmds->body));
- cmds->body.header.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS;
- cmds->body.header.streamId = arg->stream_id;
-
- for (i = 0; i <= SVGA_VIDEO_PITCH_3; i++)
- cmds->body.items[i].registerId = i;
-
- offset = buf->base.offset + arg->offset;
-
- cmds->body.items[SVGA_VIDEO_ENABLED].value = true;
- cmds->body.items[SVGA_VIDEO_FLAGS].value = arg->flags;
- cmds->body.items[SVGA_VIDEO_DATA_OFFSET].value = offset;
- cmds->body.items[SVGA_VIDEO_FORMAT].value = arg->format;
- cmds->body.items[SVGA_VIDEO_COLORKEY].value = arg->color_key;
- cmds->body.items[SVGA_VIDEO_SIZE].value = arg->size;
- cmds->body.items[SVGA_VIDEO_WIDTH].value = arg->width;
- cmds->body.items[SVGA_VIDEO_HEIGHT].value = arg->height;
- cmds->body.items[SVGA_VIDEO_SRC_X].value = arg->src.x;
- cmds->body.items[SVGA_VIDEO_SRC_Y].value = arg->src.y;
- cmds->body.items[SVGA_VIDEO_SRC_WIDTH].value = arg->src.w;
- cmds->body.items[SVGA_VIDEO_SRC_HEIGHT].value = arg->src.h;
- cmds->body.items[SVGA_VIDEO_DST_X].value = arg->dst.x;
- cmds->body.items[SVGA_VIDEO_DST_Y].value = arg->dst.y;
- cmds->body.items[SVGA_VIDEO_DST_WIDTH].value = arg->dst.w;
- cmds->body.items[SVGA_VIDEO_DST_HEIGHT].value = arg->dst.h;
- cmds->body.items[SVGA_VIDEO_PITCH_1].value = arg->pitch[0];
- cmds->body.items[SVGA_VIDEO_PITCH_2].value = arg->pitch[1];
- cmds->body.items[SVGA_VIDEO_PITCH_3].value = arg->pitch[2];
-
- fill_flush(&cmds->flush, arg->stream_id);
+ fill_flush(flush, arg->stream_id);
- vmw_fifo_commit(dev_priv, sizeof(*cmds));
+ vmw_fifo_commit(dev_priv, fifo_size);
return 0;
}
@@ -248,6 +218,25 @@ static int vmw_overlay_send_stop(struct vmw_private *dev_priv,
}
/**
+ * Move a buffer to vram or gmr if @pin is set, else unpin the buffer.
+ *
+ * With the introduction of screen objects buffers could now be
+ * used with GMRs instead of being locked to vram.
+ */
+static int vmw_overlay_move_buffer(struct vmw_private *dev_priv,
+ struct vmw_dma_buffer *buf,
+ bool pin, bool inter)
+{
+ if (!pin)
+ return vmw_dmabuf_unpin(dev_priv, buf, inter);
+
+ if (!dev_priv->sou_priv)
+ return vmw_dmabuf_to_vram(dev_priv, buf, true, inter);
+
+ return vmw_dmabuf_to_vram_or_gmr(dev_priv, buf, true, inter);
+}
+
+/**
* Stop or pause a stream.
*
* If the stream is paused the no evict flag is removed from the buffer
@@ -279,8 +268,8 @@ static int vmw_overlay_stop(struct vmw_private *dev_priv,
return ret;
/* We just remove the NO_EVICT flag so no -ENOMEM */
- ret = vmw_dmabuf_pin_in_vram(dev_priv, stream->buf, false,
- interruptible);
+ ret = vmw_overlay_move_buffer(dev_priv, stream->buf, false,
+ interruptible);
if (interruptible && ret == -ERESTARTSYS)
return ret;
else
@@ -342,7 +331,7 @@ static int vmw_overlay_update_stream(struct vmw_private *dev_priv,
/* We don't start the old stream if we are interrupted.
* Might return -ENOMEM if it can't fit the buffer in vram.
*/
- ret = vmw_dmabuf_pin_in_vram(dev_priv, buf, true, interruptible);
+ ret = vmw_overlay_move_buffer(dev_priv, buf, true, interruptible);
if (ret)
return ret;
@@ -351,7 +340,8 @@ static int vmw_overlay_update_stream(struct vmw_private *dev_priv,
/* This one needs to happen no matter what. We only remove
* the NO_EVICT flag so this is safe from -ENOMEM.
*/
- BUG_ON(vmw_dmabuf_pin_in_vram(dev_priv, buf, false, false) != 0);
+ BUG_ON(vmw_overlay_move_buffer(dev_priv, buf, false, false)
+ != 0);
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index bfe1bcce7f8..86c5e4cceb3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -31,10 +31,6 @@
#include "ttm/ttm_placement.h"
#include "drmP.h"
-#define VMW_RES_CONTEXT ttm_driver_type0
-#define VMW_RES_SURFACE ttm_driver_type1
-#define VMW_RES_STREAM ttm_driver_type2
-
struct vmw_user_context {
struct ttm_base_object base;
struct vmw_resource res;
@@ -43,6 +39,7 @@ struct vmw_user_context {
struct vmw_user_surface {
struct ttm_base_object base;
struct vmw_surface srf;
+ uint32_t size;
};
struct vmw_user_dma_buffer {
@@ -65,6 +62,17 @@ struct vmw_user_stream {
struct vmw_stream stream;
};
+struct vmw_surface_offset {
+ uint32_t face;
+ uint32_t mip;
+ uint32_t bo_offset;
+};
+
+
+static uint64_t vmw_user_context_size;
+static uint64_t vmw_user_surface_size;
+static uint64_t vmw_user_stream_size;
+
static inline struct vmw_dma_buffer *
vmw_dma_buffer(struct ttm_buffer_object *bo)
{
@@ -84,13 +92,36 @@ struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
return res;
}
+
+/**
+ * vmw_resource_release_id - release a resource id to the id manager.
+ *
+ * @res: Pointer to the resource.
+ *
+ * Release the resource id to the resource id manager and set it to -1
+ */
+static void vmw_resource_release_id(struct vmw_resource *res)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+
+ write_lock(&dev_priv->resource_lock);
+ if (res->id != -1)
+ idr_remove(res->idr, res->id);
+ res->id = -1;
+ write_unlock(&dev_priv->resource_lock);
+}
+
static void vmw_resource_release(struct kref *kref)
{
struct vmw_resource *res =
container_of(kref, struct vmw_resource, kref);
struct vmw_private *dev_priv = res->dev_priv;
+ int id = res->id;
+ struct idr *idr = res->idr;
- idr_remove(res->idr, res->id);
+ res->avail = false;
+ if (res->remove_from_lists != NULL)
+ res->remove_from_lists(res);
write_unlock(&dev_priv->resource_lock);
if (likely(res->hw_destroy != NULL))
@@ -102,6 +133,9 @@ static void vmw_resource_release(struct kref *kref)
kfree(res);
write_lock(&dev_priv->resource_lock);
+
+ if (id != -1)
+ idr_remove(idr, id);
}
void vmw_resource_unreference(struct vmw_resource **p_res)
@@ -115,28 +149,29 @@ void vmw_resource_unreference(struct vmw_resource **p_res)
write_unlock(&dev_priv->resource_lock);
}
-static int vmw_resource_init(struct vmw_private *dev_priv,
- struct vmw_resource *res,
- struct idr *idr,
- enum ttm_object_type obj_type,
- void (*res_free) (struct vmw_resource *res))
+
+/**
+ * vmw_resource_alloc_id - release a resource id to the id manager.
+ *
+ * @dev_priv: Pointer to the device private structure.
+ * @res: Pointer to the resource.
+ *
+ * Allocate the lowest free resource from the resource manager, and set
+ * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
+ */
+static int vmw_resource_alloc_id(struct vmw_private *dev_priv,
+ struct vmw_resource *res)
{
int ret;
- kref_init(&res->kref);
- res->hw_destroy = NULL;
- res->res_free = res_free;
- res->res_type = obj_type;
- res->idr = idr;
- res->avail = false;
- res->dev_priv = dev_priv;
+ BUG_ON(res->id != -1);
do {
- if (unlikely(idr_pre_get(idr, GFP_KERNEL) == 0))
+ if (unlikely(idr_pre_get(res->idr, GFP_KERNEL) == 0))
return -ENOMEM;
write_lock(&dev_priv->resource_lock);
- ret = idr_get_new_above(idr, res, 1, &res->id);
+ ret = idr_get_new_above(res->idr, res, 1, &res->id);
write_unlock(&dev_priv->resource_lock);
} while (ret == -EAGAIN);
@@ -144,6 +179,33 @@ static int vmw_resource_init(struct vmw_private *dev_priv,
return ret;
}
+
+static int vmw_resource_init(struct vmw_private *dev_priv,
+ struct vmw_resource *res,
+ struct idr *idr,
+ enum ttm_object_type obj_type,
+ bool delay_id,
+ void (*res_free) (struct vmw_resource *res),
+ void (*remove_from_lists)
+ (struct vmw_resource *res))
+{
+ kref_init(&res->kref);
+ res->hw_destroy = NULL;
+ res->res_free = res_free;
+ res->remove_from_lists = remove_from_lists;
+ res->res_type = obj_type;
+ res->idr = idr;
+ res->avail = false;
+ res->dev_priv = dev_priv;
+ INIT_LIST_HEAD(&res->query_head);
+ INIT_LIST_HEAD(&res->validate_head);
+ res->id = -1;
+ if (delay_id)
+ return 0;
+ else
+ return vmw_resource_alloc_id(dev_priv, res);
+}
+
/**
* vmw_resource_activate
*
@@ -198,8 +260,12 @@ static void vmw_hw_context_destroy(struct vmw_resource *res)
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDestroyContext body;
- } *cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ } *cmd;
+
+ vmw_execbuf_release_pinned_bo(dev_priv, true, res->id);
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) {
DRM_ERROR("Failed reserving FIFO space for surface "
"destruction.\n");
@@ -211,7 +277,7 @@ static void vmw_hw_context_destroy(struct vmw_resource *res)
cmd->body.cid = cpu_to_le32(res->id);
vmw_fifo_commit(dev_priv, sizeof(*cmd));
- vmw_3d_resource_dec(dev_priv);
+ vmw_3d_resource_dec(dev_priv, false);
}
static int vmw_context_init(struct vmw_private *dev_priv,
@@ -226,14 +292,17 @@ static int vmw_context_init(struct vmw_private *dev_priv,
} *cmd;
ret = vmw_resource_init(dev_priv, res, &dev_priv->context_idr,
- VMW_RES_CONTEXT, res_free);
+ VMW_RES_CONTEXT, false, res_free, NULL);
if (unlikely(ret != 0)) {
- if (res_free == NULL)
- kfree(res);
- else
- res_free(res);
- return ret;
+ DRM_ERROR("Failed to allocate a resource id.\n");
+ goto out_early;
+ }
+
+ if (unlikely(res->id >= SVGA3D_MAX_CONTEXT_IDS)) {
+ DRM_ERROR("Out of hw context ids.\n");
+ vmw_resource_unreference(&res);
+ return -ENOMEM;
}
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
@@ -248,9 +317,16 @@ static int vmw_context_init(struct vmw_private *dev_priv,
cmd->body.cid = cpu_to_le32(res->id);
vmw_fifo_commit(dev_priv, sizeof(*cmd));
- (void) vmw_3d_resource_inc(dev_priv);
+ (void) vmw_3d_resource_inc(dev_priv, false);
vmw_resource_activate(res, vmw_hw_context_destroy);
return 0;
+
+out_early:
+ if (res_free == NULL)
+ kfree(res);
+ else
+ res_free(res);
+ return ret;
}
struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
@@ -273,8 +349,11 @@ static void vmw_user_context_free(struct vmw_resource *res)
{
struct vmw_user_context *ctx =
container_of(res, struct vmw_user_context, res);
+ struct vmw_private *dev_priv = res->dev_priv;
kfree(ctx);
+ ttm_mem_global_free(vmw_mem_glob(dev_priv),
+ vmw_user_context_size);
}
/**
@@ -328,23 +407,56 @@ int vmw_context_define_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct vmw_private *dev_priv = vmw_priv(dev);
- struct vmw_user_context *ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
+ struct vmw_user_context *ctx;
struct vmw_resource *res;
struct vmw_resource *tmp;
struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ struct vmw_master *vmaster = vmw_master(file_priv->master);
int ret;
- if (unlikely(ctx == NULL))
- return -ENOMEM;
+
+ /*
+ * Approximate idr memory usage with 128 bytes. It will be limited
+ * by maximum number_of contexts anyway.
+ */
+
+ if (unlikely(vmw_user_context_size == 0))
+ vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128;
+
+ ret = ttm_read_lock(&vmaster->lock, true);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
+ vmw_user_context_size,
+ false, true);
+ if (unlikely(ret != 0)) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("Out of graphics memory for context"
+ " creation.\n");
+ goto out_unlock;
+ }
+
+ ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
+ if (unlikely(ctx == NULL)) {
+ ttm_mem_global_free(vmw_mem_glob(dev_priv),
+ vmw_user_context_size);
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
res = &ctx->res;
ctx->base.shareable = false;
ctx->base.tfile = NULL;
+ /*
+ * From here on, the destructor takes over resource freeing.
+ */
+
ret = vmw_context_init(dev_priv, res, vmw_user_context_free);
if (unlikely(ret != 0))
- return ret;
+ goto out_unlock;
tmp = vmw_resource_reference(&ctx->res);
ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
@@ -358,13 +470,16 @@ int vmw_context_define_ioctl(struct drm_device *dev, void *data,
arg->cid = res->id;
out_err:
vmw_resource_unreference(&res);
+out_unlock:
+ ttm_read_unlock(&vmaster->lock);
return ret;
}
int vmw_context_check(struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
- int id)
+ int id,
+ struct vmw_resource **p_res)
{
struct vmw_resource *res;
int ret = 0;
@@ -376,6 +491,8 @@ int vmw_context_check(struct vmw_private *dev_priv,
container_of(res, struct vmw_user_context, res);
if (ctx->base.tfile != tfile && !ctx->base.shareable)
ret = -EPERM;
+ if (p_res)
+ *p_res = vmw_resource_reference(res);
} else
ret = -EINVAL;
read_unlock(&dev_priv->resource_lock);
@@ -383,102 +500,638 @@ int vmw_context_check(struct vmw_private *dev_priv,
return ret;
}
+struct vmw_bpp {
+ uint8_t bpp;
+ uint8_t s_bpp;
+};
+
+/*
+ * Size table for the supported SVGA3D surface formats. It consists of
+ * two values. The bpp value and the s_bpp value which is short for
+ * "stride bits per pixel" The values are given in such a way that the
+ * minimum stride for the image is calculated using
+ *
+ * min_stride = w*s_bpp
+ *
+ * and the total memory requirement for the image is
+ *
+ * h*min_stride*bpp/s_bpp
+ *
+ */
+static const struct vmw_bpp vmw_sf_bpp[] = {
+ [SVGA3D_FORMAT_INVALID] = {0, 0},
+ [SVGA3D_X8R8G8B8] = {32, 32},
+ [SVGA3D_A8R8G8B8] = {32, 32},
+ [SVGA3D_R5G6B5] = {16, 16},
+ [SVGA3D_X1R5G5B5] = {16, 16},
+ [SVGA3D_A1R5G5B5] = {16, 16},
+ [SVGA3D_A4R4G4B4] = {16, 16},
+ [SVGA3D_Z_D32] = {32, 32},
+ [SVGA3D_Z_D16] = {16, 16},
+ [SVGA3D_Z_D24S8] = {32, 32},
+ [SVGA3D_Z_D15S1] = {16, 16},
+ [SVGA3D_LUMINANCE8] = {8, 8},
+ [SVGA3D_LUMINANCE4_ALPHA4] = {8, 8},
+ [SVGA3D_LUMINANCE16] = {16, 16},
+ [SVGA3D_LUMINANCE8_ALPHA8] = {16, 16},
+ [SVGA3D_DXT1] = {4, 16},
+ [SVGA3D_DXT2] = {8, 32},
+ [SVGA3D_DXT3] = {8, 32},
+ [SVGA3D_DXT4] = {8, 32},
+ [SVGA3D_DXT5] = {8, 32},
+ [SVGA3D_BUMPU8V8] = {16, 16},
+ [SVGA3D_BUMPL6V5U5] = {16, 16},
+ [SVGA3D_BUMPX8L8V8U8] = {32, 32},
+ [SVGA3D_ARGB_S10E5] = {16, 16},
+ [SVGA3D_ARGB_S23E8] = {32, 32},
+ [SVGA3D_A2R10G10B10] = {32, 32},
+ [SVGA3D_V8U8] = {16, 16},
+ [SVGA3D_Q8W8V8U8] = {32, 32},
+ [SVGA3D_CxV8U8] = {16, 16},
+ [SVGA3D_X8L8V8U8] = {32, 32},
+ [SVGA3D_A2W10V10U10] = {32, 32},
+ [SVGA3D_ALPHA8] = {8, 8},
+ [SVGA3D_R_S10E5] = {16, 16},
+ [SVGA3D_R_S23E8] = {32, 32},
+ [SVGA3D_RG_S10E5] = {16, 16},
+ [SVGA3D_RG_S23E8] = {32, 32},
+ [SVGA3D_BUFFER] = {8, 8},
+ [SVGA3D_Z_D24X8] = {32, 32},
+ [SVGA3D_V16U16] = {32, 32},
+ [SVGA3D_G16R16] = {32, 32},
+ [SVGA3D_A16B16G16R16] = {64, 64},
+ [SVGA3D_UYVY] = {12, 12},
+ [SVGA3D_YUY2] = {12, 12},
+ [SVGA3D_NV12] = {12, 8},
+ [SVGA3D_AYUV] = {32, 32},
+ [SVGA3D_BC4_UNORM] = {4, 16},
+ [SVGA3D_BC5_UNORM] = {8, 32},
+ [SVGA3D_Z_DF16] = {16, 16},
+ [SVGA3D_Z_DF24] = {24, 24},
+ [SVGA3D_Z_D24S8_INT] = {32, 32}
+};
+
/**
* Surface management.
*/
+struct vmw_surface_dma {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdSurfaceDMA body;
+ SVGA3dCopyBox cb;
+ SVGA3dCmdSurfaceDMASuffix suffix;
+};
+
+struct vmw_surface_define {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDefineSurface body;
+};
+
+struct vmw_surface_destroy {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDestroySurface body;
+};
+
+
+/**
+ * vmw_surface_dma_size - Compute fifo size for a dma command.
+ *
+ * @srf: Pointer to a struct vmw_surface
+ *
+ * Computes the required size for a surface dma command for backup or
+ * restoration of the surface represented by @srf.
+ */
+static inline uint32_t vmw_surface_dma_size(const struct vmw_surface *srf)
+{
+ return srf->num_sizes * sizeof(struct vmw_surface_dma);
+}
+
+
+/**
+ * vmw_surface_define_size - Compute fifo size for a surface define command.
+ *
+ * @srf: Pointer to a struct vmw_surface
+ *
+ * Computes the required size for a surface define command for the definition
+ * of the surface represented by @srf.
+ */
+static inline uint32_t vmw_surface_define_size(const struct vmw_surface *srf)
+{
+ return sizeof(struct vmw_surface_define) + srf->num_sizes *
+ sizeof(SVGA3dSize);
+}
+
+
+/**
+ * vmw_surface_destroy_size - Compute fifo size for a surface destroy command.
+ *
+ * Computes the required size for a surface destroy command for the destruction
+ * of a hw surface.
+ */
+static inline uint32_t vmw_surface_destroy_size(void)
+{
+ return sizeof(struct vmw_surface_destroy);
+}
+
+/**
+ * vmw_surface_destroy_encode - Encode a surface_destroy command.
+ *
+ * @id: The surface id
+ * @cmd_space: Pointer to memory area in which the commands should be encoded.
+ */
+static void vmw_surface_destroy_encode(uint32_t id,
+ void *cmd_space)
+{
+ struct vmw_surface_destroy *cmd = (struct vmw_surface_destroy *)
+ cmd_space;
+
+ cmd->header.id = SVGA_3D_CMD_SURFACE_DESTROY;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.sid = id;
+}
+
+/**
+ * vmw_surface_define_encode - Encode a surface_define command.
+ *
+ * @srf: Pointer to a struct vmw_surface object.
+ * @cmd_space: Pointer to memory area in which the commands should be encoded.
+ */
+static void vmw_surface_define_encode(const struct vmw_surface *srf,
+ void *cmd_space)
+{
+ struct vmw_surface_define *cmd = (struct vmw_surface_define *)
+ cmd_space;
+ struct drm_vmw_size *src_size;
+ SVGA3dSize *cmd_size;
+ uint32_t cmd_len;
+ int i;
+
+ cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize);
+
+ cmd->header.id = SVGA_3D_CMD_SURFACE_DEFINE;
+ cmd->header.size = cmd_len;
+ cmd->body.sid = srf->res.id;
+ cmd->body.surfaceFlags = srf->flags;
+ cmd->body.format = cpu_to_le32(srf->format);
+ for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
+ cmd->body.face[i].numMipLevels = srf->mip_levels[i];
+
+ cmd += 1;
+ cmd_size = (SVGA3dSize *) cmd;
+ src_size = srf->sizes;
+
+ for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) {
+ cmd_size->width = src_size->width;
+ cmd_size->height = src_size->height;
+ cmd_size->depth = src_size->depth;
+ }
+}
+
+
+/**
+ * vmw_surface_dma_encode - Encode a surface_dma command.
+ *
+ * @srf: Pointer to a struct vmw_surface object.
+ * @cmd_space: Pointer to memory area in which the commands should be encoded.
+ * @ptr: Pointer to an SVGAGuestPtr indicating where the surface contents
+ * should be placed or read from.
+ * @to_surface: Boolean whether to DMA to the surface or from the surface.
+ */
+static void vmw_surface_dma_encode(struct vmw_surface *srf,
+ void *cmd_space,
+ const SVGAGuestPtr *ptr,
+ bool to_surface)
+{
+ uint32_t i;
+ uint32_t bpp = vmw_sf_bpp[srf->format].bpp;
+ uint32_t stride_bpp = vmw_sf_bpp[srf->format].s_bpp;
+ struct vmw_surface_dma *cmd = (struct vmw_surface_dma *)cmd_space;
+
+ for (i = 0; i < srf->num_sizes; ++i) {
+ SVGA3dCmdHeader *header = &cmd->header;
+ SVGA3dCmdSurfaceDMA *body = &cmd->body;
+ SVGA3dCopyBox *cb = &cmd->cb;
+ SVGA3dCmdSurfaceDMASuffix *suffix = &cmd->suffix;
+ const struct vmw_surface_offset *cur_offset = &srf->offsets[i];
+ const struct drm_vmw_size *cur_size = &srf->sizes[i];
+
+ header->id = SVGA_3D_CMD_SURFACE_DMA;
+ header->size = sizeof(*body) + sizeof(*cb) + sizeof(*suffix);
+
+ body->guest.ptr = *ptr;
+ body->guest.ptr.offset += cur_offset->bo_offset;
+ body->guest.pitch = (cur_size->width * stride_bpp + 7) >> 3;
+ body->host.sid = srf->res.id;
+ body->host.face = cur_offset->face;
+ body->host.mipmap = cur_offset->mip;
+ body->transfer = ((to_surface) ? SVGA3D_WRITE_HOST_VRAM :
+ SVGA3D_READ_HOST_VRAM);
+ cb->x = 0;
+ cb->y = 0;
+ cb->z = 0;
+ cb->srcx = 0;
+ cb->srcy = 0;
+ cb->srcz = 0;
+ cb->w = cur_size->width;
+ cb->h = cur_size->height;
+ cb->d = cur_size->depth;
+
+ suffix->suffixSize = sizeof(*suffix);
+ suffix->maximumOffset = body->guest.pitch*cur_size->height*
+ cur_size->depth*bpp / stride_bpp;
+ suffix->flags.discard = 0;
+ suffix->flags.unsynchronized = 0;
+ suffix->flags.reserved = 0;
+ ++cmd;
+ }
+};
+
+
static void vmw_hw_surface_destroy(struct vmw_resource *res)
{
struct vmw_private *dev_priv = res->dev_priv;
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdDestroySurface body;
- } *cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ struct vmw_surface *srf;
+ void *cmd;
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for surface "
- "destruction.\n");
- return;
- }
+ if (res->id != -1) {
- cmd->header.id = cpu_to_le32(SVGA_3D_CMD_SURFACE_DESTROY);
- cmd->header.size = cpu_to_le32(sizeof(cmd->body));
- cmd->body.sid = cpu_to_le32(res->id);
+ cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size());
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for surface "
+ "destruction.\n");
+ return;
+ }
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
- vmw_3d_resource_dec(dev_priv);
+ vmw_surface_destroy_encode(res->id, cmd);
+ vmw_fifo_commit(dev_priv, vmw_surface_destroy_size());
+
+ /*
+ * used_memory_size_atomic, or separate lock
+ * to avoid taking dev_priv::cmdbuf_mutex in
+ * the destroy path.
+ */
+
+ mutex_lock(&dev_priv->cmdbuf_mutex);
+ srf = container_of(res, struct vmw_surface, res);
+ dev_priv->used_memory_size -= srf->backup_size;
+ mutex_unlock(&dev_priv->cmdbuf_mutex);
+
+ }
+ vmw_3d_resource_dec(dev_priv, false);
}
void vmw_surface_res_free(struct vmw_resource *res)
{
struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
+ if (srf->backup)
+ ttm_bo_unref(&srf->backup);
+ kfree(srf->offsets);
kfree(srf->sizes);
kfree(srf->snooper.image);
kfree(srf);
}
-int vmw_surface_init(struct vmw_private *dev_priv,
- struct vmw_surface *srf,
- void (*res_free) (struct vmw_resource *res))
+
+/**
+ * vmw_surface_do_validate - make a surface available to the device.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @srf: Pointer to a struct vmw_surface.
+ *
+ * If the surface doesn't have a hw id, allocate one, and optionally
+ * DMA the backed up surface contents to the device.
+ *
+ * Returns -EBUSY if there wasn't sufficient device resources to
+ * complete the validation. Retry after freeing up resources.
+ *
+ * May return other errors if the kernel is out of guest resources.
+ */
+int vmw_surface_do_validate(struct vmw_private *dev_priv,
+ struct vmw_surface *srf)
{
- int ret;
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdDefineSurface body;
- } *cmd;
- SVGA3dSize *cmd_size;
struct vmw_resource *res = &srf->res;
- struct drm_vmw_size *src_size;
- size_t submit_size;
- uint32_t cmd_len;
- int i;
+ struct list_head val_list;
+ struct ttm_validate_buffer val_buf;
+ uint32_t submit_size;
+ uint8_t *cmd;
+ int ret;
- BUG_ON(res_free == NULL);
- ret = vmw_resource_init(dev_priv, res, &dev_priv->surface_idr,
- VMW_RES_SURFACE, res_free);
+ if (likely(res->id != -1))
+ return 0;
+
+ if (unlikely(dev_priv->used_memory_size + srf->backup_size >=
+ dev_priv->memory_size))
+ return -EBUSY;
+
+ /*
+ * Reserve- and validate the backup DMA bo.
+ */
+
+ if (srf->backup) {
+ INIT_LIST_HEAD(&val_list);
+ val_buf.bo = ttm_bo_reference(srf->backup);
+ val_buf.new_sync_obj_arg = (void *)((unsigned long)
+ DRM_VMW_FENCE_FLAG_EXEC);
+ list_add_tail(&val_buf.head, &val_list);
+ ret = ttm_eu_reserve_buffers(&val_list);
+ if (unlikely(ret != 0))
+ goto out_no_reserve;
+
+ ret = ttm_bo_validate(srf->backup, &vmw_srf_placement,
+ true, false, false);
+ if (unlikely(ret != 0))
+ goto out_no_validate;
+ }
+
+ /*
+ * Alloc id for the resource.
+ */
+ ret = vmw_resource_alloc_id(dev_priv, res);
if (unlikely(ret != 0)) {
- res_free(res);
- return ret;
+ DRM_ERROR("Failed to allocate a surface id.\n");
+ goto out_no_id;
+ }
+ if (unlikely(res->id >= SVGA3D_MAX_SURFACE_IDS)) {
+ ret = -EBUSY;
+ goto out_no_fifo;
}
- submit_size = sizeof(*cmd) + srf->num_sizes * sizeof(SVGA3dSize);
- cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize);
+
+ /*
+ * Encode surface define- and dma commands.
+ */
+
+ submit_size = vmw_surface_define_size(srf);
+ if (srf->backup)
+ submit_size += vmw_surface_dma_size(srf);
cmd = vmw_fifo_reserve(dev_priv, submit_size);
if (unlikely(cmd == NULL)) {
- DRM_ERROR("Fifo reserve failed for create surface.\n");
- vmw_resource_unreference(&res);
- return -ENOMEM;
+ DRM_ERROR("Failed reserving FIFO space for surface "
+ "validation.\n");
+ ret = -ENOMEM;
+ goto out_no_fifo;
}
- cmd->header.id = cpu_to_le32(SVGA_3D_CMD_SURFACE_DEFINE);
- cmd->header.size = cpu_to_le32(cmd_len);
- cmd->body.sid = cpu_to_le32(res->id);
- cmd->body.surfaceFlags = cpu_to_le32(srf->flags);
- cmd->body.format = cpu_to_le32(srf->format);
- for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
- cmd->body.face[i].numMipLevels =
- cpu_to_le32(srf->mip_levels[i]);
+ vmw_surface_define_encode(srf, cmd);
+ if (srf->backup) {
+ SVGAGuestPtr ptr;
+
+ cmd += vmw_surface_define_size(srf);
+ vmw_bo_get_guest_ptr(srf->backup, &ptr);
+ vmw_surface_dma_encode(srf, cmd, &ptr, true);
}
- cmd += 1;
- cmd_size = (SVGA3dSize *) cmd;
- src_size = srf->sizes;
+ vmw_fifo_commit(dev_priv, submit_size);
- for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) {
- cmd_size->width = cpu_to_le32(src_size->width);
- cmd_size->height = cpu_to_le32(src_size->height);
- cmd_size->depth = cpu_to_le32(src_size->depth);
+ /*
+ * Create a fence object and fence the backup buffer.
+ */
+
+ if (srf->backup) {
+ struct vmw_fence_obj *fence;
+
+ (void) vmw_execbuf_fence_commands(NULL, dev_priv,
+ &fence, NULL);
+ ttm_eu_fence_buffer_objects(&val_list, fence);
+ if (likely(fence != NULL))
+ vmw_fence_obj_unreference(&fence);
+ ttm_bo_unref(&val_buf.bo);
+ ttm_bo_unref(&srf->backup);
+ }
+
+ /*
+ * Surface memory usage accounting.
+ */
+
+ dev_priv->used_memory_size += srf->backup_size;
+
+ return 0;
+
+out_no_fifo:
+ vmw_resource_release_id(res);
+out_no_id:
+out_no_validate:
+ if (srf->backup)
+ ttm_eu_backoff_reservation(&val_list);
+out_no_reserve:
+ if (srf->backup)
+ ttm_bo_unref(&val_buf.bo);
+ return ret;
+}
+
+/**
+ * vmw_surface_evict - Evict a hw surface.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @srf: Pointer to a struct vmw_surface
+ *
+ * DMA the contents of a hw surface to a backup guest buffer object,
+ * and destroy the hw surface, releasing its id.
+ */
+int vmw_surface_evict(struct vmw_private *dev_priv,
+ struct vmw_surface *srf)
+{
+ struct vmw_resource *res = &srf->res;
+ struct list_head val_list;
+ struct ttm_validate_buffer val_buf;
+ uint32_t submit_size;
+ uint8_t *cmd;
+ int ret;
+ struct vmw_fence_obj *fence;
+ SVGAGuestPtr ptr;
+
+ BUG_ON(res->id == -1);
+
+ /*
+ * Create a surface backup buffer object.
+ */
+
+ if (!srf->backup) {
+ ret = ttm_bo_create(&dev_priv->bdev, srf->backup_size,
+ ttm_bo_type_device,
+ &vmw_srf_placement, 0, 0, true,
+ NULL, &srf->backup);
+ if (unlikely(ret != 0))
+ return ret;
+ }
+
+ /*
+ * Reserve- and validate the backup DMA bo.
+ */
+
+ INIT_LIST_HEAD(&val_list);
+ val_buf.bo = ttm_bo_reference(srf->backup);
+ val_buf.new_sync_obj_arg = (void *)(unsigned long)
+ DRM_VMW_FENCE_FLAG_EXEC;
+ list_add_tail(&val_buf.head, &val_list);
+ ret = ttm_eu_reserve_buffers(&val_list);
+ if (unlikely(ret != 0))
+ goto out_no_reserve;
+
+ ret = ttm_bo_validate(srf->backup, &vmw_srf_placement,
+ true, false, false);
+ if (unlikely(ret != 0))
+ goto out_no_validate;
+
+
+ /*
+ * Encode the dma- and surface destroy commands.
+ */
+
+ submit_size = vmw_surface_dma_size(srf) + vmw_surface_destroy_size();
+ cmd = vmw_fifo_reserve(dev_priv, submit_size);
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for surface "
+ "eviction.\n");
+ ret = -ENOMEM;
+ goto out_no_fifo;
}
+ vmw_bo_get_guest_ptr(srf->backup, &ptr);
+ vmw_surface_dma_encode(srf, cmd, &ptr, false);
+ cmd += vmw_surface_dma_size(srf);
+ vmw_surface_destroy_encode(res->id, cmd);
vmw_fifo_commit(dev_priv, submit_size);
- (void) vmw_3d_resource_inc(dev_priv);
- vmw_resource_activate(res, vmw_hw_surface_destroy);
+
+ /*
+ * Surface memory usage accounting.
+ */
+
+ dev_priv->used_memory_size -= srf->backup_size;
+
+ /*
+ * Create a fence object and fence the DMA buffer.
+ */
+
+ (void) vmw_execbuf_fence_commands(NULL, dev_priv,
+ &fence, NULL);
+ ttm_eu_fence_buffer_objects(&val_list, fence);
+ if (likely(fence != NULL))
+ vmw_fence_obj_unreference(&fence);
+ ttm_bo_unref(&val_buf.bo);
+
+ /*
+ * Release the surface ID.
+ */
+
+ vmw_resource_release_id(res);
+
return 0;
+
+out_no_fifo:
+out_no_validate:
+ if (srf->backup)
+ ttm_eu_backoff_reservation(&val_list);
+out_no_reserve:
+ ttm_bo_unref(&val_buf.bo);
+ ttm_bo_unref(&srf->backup);
+ return ret;
+}
+
+
+/**
+ * vmw_surface_validate - make a surface available to the device, evicting
+ * other surfaces if needed.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @srf: Pointer to a struct vmw_surface.
+ *
+ * Try to validate a surface and if it fails due to limited device resources,
+ * repeatedly try to evict other surfaces until the request can be
+ * acommodated.
+ *
+ * May return errors if out of resources.
+ */
+int vmw_surface_validate(struct vmw_private *dev_priv,
+ struct vmw_surface *srf)
+{
+ int ret;
+ struct vmw_surface *evict_srf;
+
+ do {
+ write_lock(&dev_priv->resource_lock);
+ list_del_init(&srf->lru_head);
+ write_unlock(&dev_priv->resource_lock);
+
+ ret = vmw_surface_do_validate(dev_priv, srf);
+ if (likely(ret != -EBUSY))
+ break;
+
+ write_lock(&dev_priv->resource_lock);
+ if (list_empty(&dev_priv->surface_lru)) {
+ DRM_ERROR("Out of device memory for surfaces.\n");
+ ret = -EBUSY;
+ write_unlock(&dev_priv->resource_lock);
+ break;
+ }
+
+ evict_srf = vmw_surface_reference
+ (list_first_entry(&dev_priv->surface_lru,
+ struct vmw_surface,
+ lru_head));
+ list_del_init(&evict_srf->lru_head);
+
+ write_unlock(&dev_priv->resource_lock);
+ (void) vmw_surface_evict(dev_priv, evict_srf);
+
+ vmw_surface_unreference(&evict_srf);
+
+ } while (1);
+
+ if (unlikely(ret != 0 && srf->res.id != -1)) {
+ write_lock(&dev_priv->resource_lock);
+ list_add_tail(&srf->lru_head, &dev_priv->surface_lru);
+ write_unlock(&dev_priv->resource_lock);
+ }
+
+ return ret;
+}
+
+
+/**
+ * vmw_surface_remove_from_lists - Remove surface resources from lookup lists
+ *
+ * @res: Pointer to a struct vmw_resource embedded in a struct vmw_surface
+ *
+ * As part of the resource destruction, remove the surface from any
+ * lookup lists.
+ */
+static void vmw_surface_remove_from_lists(struct vmw_resource *res)
+{
+ struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
+
+ list_del_init(&srf->lru_head);
+}
+
+int vmw_surface_init(struct vmw_private *dev_priv,
+ struct vmw_surface *srf,
+ void (*res_free) (struct vmw_resource *res))
+{
+ int ret;
+ struct vmw_resource *res = &srf->res;
+
+ BUG_ON(res_free == NULL);
+ INIT_LIST_HEAD(&srf->lru_head);
+ ret = vmw_resource_init(dev_priv, res, &dev_priv->surface_idr,
+ VMW_RES_SURFACE, true, res_free,
+ vmw_surface_remove_from_lists);
+
+ if (unlikely(ret != 0))
+ res_free(res);
+
+ /*
+ * The surface won't be visible to hardware until a
+ * surface validate.
+ */
+
+ (void) vmw_3d_resource_inc(dev_priv, false);
+ vmw_resource_activate(res, vmw_hw_surface_destroy);
+ return ret;
}
static void vmw_user_surface_free(struct vmw_resource *res)
@@ -486,12 +1139,58 @@ static void vmw_user_surface_free(struct vmw_resource *res)
struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
struct vmw_user_surface *user_srf =
container_of(srf, struct vmw_user_surface, srf);
+ struct vmw_private *dev_priv = srf->res.dev_priv;
+ uint32_t size = user_srf->size;
+ if (srf->backup)
+ ttm_bo_unref(&srf->backup);
+ kfree(srf->offsets);
kfree(srf->sizes);
kfree(srf->snooper.image);
kfree(user_srf);
+ ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
+}
+
+/**
+ * vmw_resource_unreserve - unreserve resources previously reserved for
+ * command submission.
+ *
+ * @list_head: list of resources to unreserve.
+ *
+ * Currently only surfaces are considered, and unreserving a surface
+ * means putting it back on the device's surface lru list,
+ * so that it can be evicted if necessary.
+ * This function traverses the resource list and
+ * checks whether resources are surfaces, and in that case puts them back
+ * on the device's surface LRU list.
+ */
+void vmw_resource_unreserve(struct list_head *list)
+{
+ struct vmw_resource *res;
+ struct vmw_surface *srf;
+ rwlock_t *lock = NULL;
+
+ list_for_each_entry(res, list, validate_head) {
+
+ if (res->res_free != &vmw_surface_res_free &&
+ res->res_free != &vmw_user_surface_free)
+ continue;
+
+ if (unlikely(lock == NULL)) {
+ lock = &res->dev_priv->resource_lock;
+ write_lock(lock);
+ }
+
+ srf = container_of(res, struct vmw_surface, res);
+ list_del_init(&srf->lru_head);
+ list_add_tail(&srf->lru_head, &res->dev_priv->surface_lru);
+ }
+
+ if (lock != NULL)
+ write_unlock(lock);
}
+
int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
uint32_t handle, struct vmw_surface **out)
@@ -556,8 +1255,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct vmw_private *dev_priv = vmw_priv(dev);
- struct vmw_user_surface *user_srf =
- kmalloc(sizeof(*user_srf), GFP_KERNEL);
+ struct vmw_user_surface *user_srf;
struct vmw_surface *srf;
struct vmw_resource *res;
struct vmw_resource *tmp;
@@ -568,10 +1266,51 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct drm_vmw_size __user *user_sizes;
int ret;
- int i;
+ int i, j;
+ uint32_t cur_bo_offset;
+ struct drm_vmw_size *cur_size;
+ struct vmw_surface_offset *cur_offset;
+ uint32_t stride_bpp;
+ uint32_t bpp;
+ uint32_t num_sizes;
+ uint32_t size;
+ struct vmw_master *vmaster = vmw_master(file_priv->master);
- if (unlikely(user_srf == NULL))
- return -ENOMEM;
+ if (unlikely(vmw_user_surface_size == 0))
+ vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
+ 128;
+
+ num_sizes = 0;
+ for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
+ num_sizes += req->mip_levels[i];
+
+ if (num_sizes > DRM_VMW_MAX_SURFACE_FACES *
+ DRM_VMW_MAX_MIP_LEVELS)
+ return -EINVAL;
+
+ size = vmw_user_surface_size + 128 +
+ ttm_round_pot(num_sizes * sizeof(struct drm_vmw_size)) +
+ ttm_round_pot(num_sizes * sizeof(struct vmw_surface_offset));
+
+
+ ret = ttm_read_lock(&vmaster->lock, true);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
+ size, false, true);
+ if (unlikely(ret != 0)) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("Out of graphics memory for surface"
+ " creation.\n");
+ goto out_unlock;
+ }
+
+ user_srf = kmalloc(sizeof(*user_srf), GFP_KERNEL);
+ if (unlikely(user_srf == NULL)) {
+ ret = -ENOMEM;
+ goto out_no_user_srf;
+ }
srf = &user_srf->srf;
res = &srf->res;
@@ -579,21 +1318,22 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
srf->flags = req->flags;
srf->format = req->format;
srf->scanout = req->scanout;
- memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
- srf->num_sizes = 0;
- for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
- srf->num_sizes += srf->mip_levels[i];
+ srf->backup = NULL;
- if (srf->num_sizes > DRM_VMW_MAX_SURFACE_FACES *
- DRM_VMW_MAX_MIP_LEVELS) {
- ret = -EINVAL;
- goto out_err0;
- }
+ memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
+ srf->num_sizes = num_sizes;
+ user_srf->size = size;
srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL);
if (unlikely(srf->sizes == NULL)) {
ret = -ENOMEM;
- goto out_err0;
+ goto out_no_sizes;
+ }
+ srf->offsets = kmalloc(srf->num_sizes * sizeof(*srf->offsets),
+ GFP_KERNEL);
+ if (unlikely(srf->sizes == NULL)) {
+ ret = -ENOMEM;
+ goto out_no_offsets;
}
user_sizes = (struct drm_vmw_size __user *)(unsigned long)
@@ -603,9 +1343,32 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
srf->num_sizes * sizeof(*srf->sizes));
if (unlikely(ret != 0)) {
ret = -EFAULT;
- goto out_err1;
+ goto out_no_copy;
}
+ cur_bo_offset = 0;
+ cur_offset = srf->offsets;
+ cur_size = srf->sizes;
+
+ bpp = vmw_sf_bpp[srf->format].bpp;
+ stride_bpp = vmw_sf_bpp[srf->format].s_bpp;
+
+ for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
+ for (j = 0; j < srf->mip_levels[i]; ++j) {
+ uint32_t stride =
+ (cur_size->width * stride_bpp + 7) >> 3;
+
+ cur_offset->face = i;
+ cur_offset->mip = j;
+ cur_offset->bo_offset = cur_bo_offset;
+ cur_bo_offset += stride * cur_size->height *
+ cur_size->depth * bpp / stride_bpp;
+ ++cur_offset;
+ ++cur_size;
+ }
+ }
+ srf->backup_size = cur_bo_offset;
+
if (srf->scanout &&
srf->num_sizes == 1 &&
srf->sizes[0].width == 64 &&
@@ -617,7 +1380,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
if (!srf->snooper.image) {
DRM_ERROR("Failed to allocate cursor_image\n");
ret = -ENOMEM;
- goto out_err1;
+ goto out_no_copy;
}
} else {
srf->snooper.image = NULL;
@@ -634,7 +1397,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
if (unlikely(ret != 0))
- return ret;
+ goto out_unlock;
tmp = vmw_resource_reference(&srf->res);
ret = ttm_base_object_init(tfile, &user_srf->base,
@@ -644,7 +1407,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
if (unlikely(ret != 0)) {
vmw_resource_unreference(&tmp);
vmw_resource_unreference(&res);
- return ret;
+ goto out_unlock;
}
rep->sid = user_srf->base.hash.key;
@@ -652,11 +1415,19 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
DRM_ERROR("Created bad Surface ID.\n");
vmw_resource_unreference(&res);
+
+ ttm_read_unlock(&vmaster->lock);
return 0;
-out_err1:
+out_no_copy:
+ kfree(srf->offsets);
+out_no_offsets:
kfree(srf->sizes);
-out_err0:
+out_no_sizes:
kfree(user_srf);
+out_no_user_srf:
+ ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
+out_unlock:
+ ttm_read_unlock(&vmaster->lock);
return ret;
}
@@ -970,7 +1741,7 @@ static int vmw_stream_init(struct vmw_private *dev_priv,
int ret;
ret = vmw_resource_init(dev_priv, res, &dev_priv->stream_idr,
- VMW_RES_STREAM, res_free);
+ VMW_RES_STREAM, false, res_free, NULL);
if (unlikely(ret != 0)) {
if (res_free == NULL)
@@ -1000,8 +1771,11 @@ static void vmw_user_stream_free(struct vmw_resource *res)
{
struct vmw_user_stream *stream =
container_of(res, struct vmw_user_stream, stream.res);
+ struct vmw_private *dev_priv = res->dev_priv;
kfree(stream);
+ ttm_mem_global_free(vmw_mem_glob(dev_priv),
+ vmw_user_stream_size);
}
/**
@@ -1055,23 +1829,56 @@ int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct vmw_private *dev_priv = vmw_priv(dev);
- struct vmw_user_stream *stream = kmalloc(sizeof(*stream), GFP_KERNEL);
+ struct vmw_user_stream *stream;
struct vmw_resource *res;
struct vmw_resource *tmp;
struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ struct vmw_master *vmaster = vmw_master(file_priv->master);
int ret;
- if (unlikely(stream == NULL))
- return -ENOMEM;
+ /*
+ * Approximate idr memory usage with 128 bytes. It will be limited
+ * by maximum number_of streams anyway?
+ */
+
+ if (unlikely(vmw_user_stream_size == 0))
+ vmw_user_stream_size = ttm_round_pot(sizeof(*stream)) + 128;
+
+ ret = ttm_read_lock(&vmaster->lock, true);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
+ vmw_user_stream_size,
+ false, true);
+ if (unlikely(ret != 0)) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("Out of graphics memory for stream"
+ " creation.\n");
+ goto out_unlock;
+ }
+
+
+ stream = kmalloc(sizeof(*stream), GFP_KERNEL);
+ if (unlikely(stream == NULL)) {
+ ttm_mem_global_free(vmw_mem_glob(dev_priv),
+ vmw_user_stream_size);
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
res = &stream->stream.res;
stream->base.shareable = false;
stream->base.tfile = NULL;
+ /*
+ * From here on, the destructor takes over resource freeing.
+ */
+
ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free);
if (unlikely(ret != 0))
- return ret;
+ goto out_unlock;
tmp = vmw_resource_reference(res);
ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM,
@@ -1085,6 +1892,8 @@ int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
arg->stream_id = res->id;
out_err:
vmw_resource_unreference(&res);
+out_unlock:
+ ttm_read_unlock(&vmaster->lock);
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
new file mode 100644
index 00000000000..4defdcf1c72
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -0,0 +1,537 @@
+/**************************************************************************
+ *
+ * Copyright © 2011 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_kms.h"
+
+
+#define vmw_crtc_to_sou(x) \
+ container_of(x, struct vmw_screen_object_unit, base.crtc)
+#define vmw_encoder_to_sou(x) \
+ container_of(x, struct vmw_screen_object_unit, base.encoder)
+#define vmw_connector_to_sou(x) \
+ container_of(x, struct vmw_screen_object_unit, base.connector)
+
+struct vmw_screen_object_display {
+ unsigned num_implicit;
+
+ struct vmw_framebuffer *implicit_fb;
+};
+
+/**
+ * Display unit using screen objects.
+ */
+struct vmw_screen_object_unit {
+ struct vmw_display_unit base;
+
+ unsigned long buffer_size; /**< Size of allocated buffer */
+ struct vmw_dma_buffer *buffer; /**< Backing store buffer */
+
+ bool defined;
+ bool active_implicit;
+};
+
+static void vmw_sou_destroy(struct vmw_screen_object_unit *sou)
+{
+ vmw_display_unit_cleanup(&sou->base);
+ kfree(sou);
+}
+
+
+/*
+ * Screen Object Display Unit CRTC functions
+ */
+
+static void vmw_sou_crtc_destroy(struct drm_crtc *crtc)
+{
+ vmw_sou_destroy(vmw_crtc_to_sou(crtc));
+}
+
+static void vmw_sou_del_active(struct vmw_private *vmw_priv,
+ struct vmw_screen_object_unit *sou)
+{
+ struct vmw_screen_object_display *ld = vmw_priv->sou_priv;
+
+ if (sou->active_implicit) {
+ if (--(ld->num_implicit) == 0)
+ ld->implicit_fb = NULL;
+ sou->active_implicit = false;
+ }
+}
+
+static void vmw_sou_add_active(struct vmw_private *vmw_priv,
+ struct vmw_screen_object_unit *sou,
+ struct vmw_framebuffer *vfb)
+{
+ struct vmw_screen_object_display *ld = vmw_priv->sou_priv;
+
+ BUG_ON(!ld->num_implicit && ld->implicit_fb);
+
+ if (!sou->active_implicit && sou->base.is_implicit) {
+ ld->implicit_fb = vfb;
+ sou->active_implicit = true;
+ ld->num_implicit++;
+ }
+}
+
+/**
+ * Send the fifo command to create a screen.
+ */
+static int vmw_sou_fifo_create(struct vmw_private *dev_priv,
+ struct vmw_screen_object_unit *sou,
+ uint32_t x, uint32_t y,
+ struct drm_display_mode *mode)
+{
+ size_t fifo_size;
+
+ struct {
+ struct {
+ uint32_t cmdType;
+ } header;
+ SVGAScreenObject obj;
+ } *cmd;
+
+ BUG_ON(!sou->buffer);
+
+ fifo_size = sizeof(*cmd);
+ cmd = vmw_fifo_reserve(dev_priv, fifo_size);
+ /* The hardware has hung, nothing we can do about it here. */
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Fifo reserve failed.\n");
+ return -ENOMEM;
+ }
+
+ memset(cmd, 0, fifo_size);
+ cmd->header.cmdType = SVGA_CMD_DEFINE_SCREEN;
+ cmd->obj.structSize = sizeof(SVGAScreenObject);
+ cmd->obj.id = sou->base.unit;
+ cmd->obj.flags = SVGA_SCREEN_HAS_ROOT |
+ (sou->base.unit == 0 ? SVGA_SCREEN_IS_PRIMARY : 0);
+ cmd->obj.size.width = mode->hdisplay;
+ cmd->obj.size.height = mode->vdisplay;
+ if (sou->base.is_implicit) {
+ cmd->obj.root.x = x;
+ cmd->obj.root.y = y;
+ } else {
+ cmd->obj.root.x = sou->base.gui_x;
+ cmd->obj.root.y = sou->base.gui_y;
+ }
+
+ /* Ok to assume that buffer is pinned in vram */
+ vmw_bo_get_guest_ptr(&sou->buffer->base, &cmd->obj.backingStore.ptr);
+ cmd->obj.backingStore.pitch = mode->hdisplay * 4;
+
+ vmw_fifo_commit(dev_priv, fifo_size);
+
+ sou->defined = true;
+
+ return 0;
+}
+
+/**
+ * Send the fifo command to destroy a screen.
+ */
+static int vmw_sou_fifo_destroy(struct vmw_private *dev_priv,
+ struct vmw_screen_object_unit *sou)
+{
+ size_t fifo_size;
+ int ret;
+
+ struct {
+ struct {
+ uint32_t cmdType;
+ } header;
+ SVGAFifoCmdDestroyScreen body;
+ } *cmd;
+
+ /* no need to do anything */
+ if (unlikely(!sou->defined))
+ return 0;
+
+ fifo_size = sizeof(*cmd);
+ cmd = vmw_fifo_reserve(dev_priv, fifo_size);
+ /* the hardware has hung, nothing we can do about it here */
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Fifo reserve failed.\n");
+ return -ENOMEM;
+ }
+
+ memset(cmd, 0, fifo_size);
+ cmd->header.cmdType = SVGA_CMD_DESTROY_SCREEN;
+ cmd->body.screenId = sou->base.unit;
+
+ vmw_fifo_commit(dev_priv, fifo_size);
+
+ /* Force sync */
+ ret = vmw_fallback_wait(dev_priv, false, true, 0, false, 3*HZ);
+ if (unlikely(ret != 0))
+ DRM_ERROR("Failed to sync with HW");
+ else
+ sou->defined = false;
+
+ return ret;
+}
+
+/**
+ * Free the backing store.
+ */
+static void vmw_sou_backing_free(struct vmw_private *dev_priv,
+ struct vmw_screen_object_unit *sou)
+{
+ struct ttm_buffer_object *bo;
+
+ if (unlikely(sou->buffer == NULL))
+ return;
+
+ bo = &sou->buffer->base;
+ ttm_bo_unref(&bo);
+ sou->buffer = NULL;
+ sou->buffer_size = 0;
+}
+
+/**
+ * Allocate the backing store for the buffer.
+ */
+static int vmw_sou_backing_alloc(struct vmw_private *dev_priv,
+ struct vmw_screen_object_unit *sou,
+ unsigned long size)
+{
+ int ret;
+
+ if (sou->buffer_size == size)
+ return 0;
+
+ if (sou->buffer)
+ vmw_sou_backing_free(dev_priv, sou);
+
+ sou->buffer = kzalloc(sizeof(*sou->buffer), GFP_KERNEL);
+ if (unlikely(sou->buffer == NULL))
+ return -ENOMEM;
+
+ /* After we have alloced the backing store might not be able to
+ * resume the overlays, this is preferred to failing to alloc.
+ */
+ vmw_overlay_pause_all(dev_priv);
+ ret = vmw_dmabuf_init(dev_priv, sou->buffer, size,
+ &vmw_vram_ne_placement,
+ false, &vmw_dmabuf_bo_free);
+ vmw_overlay_resume_all(dev_priv);
+
+ if (unlikely(ret != 0))
+ sou->buffer = NULL; /* vmw_dmabuf_init frees on error */
+ else
+ sou->buffer_size = size;
+
+ return ret;
+}
+
+static int vmw_sou_crtc_set_config(struct drm_mode_set *set)
+{
+ struct vmw_private *dev_priv;
+ struct vmw_screen_object_unit *sou;
+ struct drm_connector *connector;
+ struct drm_display_mode *mode;
+ struct drm_encoder *encoder;
+ struct vmw_framebuffer *vfb;
+ struct drm_framebuffer *fb;
+ struct drm_crtc *crtc;
+ int ret = 0;
+
+ if (!set)
+ return -EINVAL;
+
+ if (!set->crtc)
+ return -EINVAL;
+
+ /* get the sou */
+ crtc = set->crtc;
+ sou = vmw_crtc_to_sou(crtc);
+ vfb = set->fb ? vmw_framebuffer_to_vfb(set->fb) : NULL;
+ dev_priv = vmw_priv(crtc->dev);
+
+ if (set->num_connectors > 1) {
+ DRM_ERROR("to many connectors\n");
+ return -EINVAL;
+ }
+
+ if (set->num_connectors == 1 &&
+ set->connectors[0] != &sou->base.connector) {
+ DRM_ERROR("connector doesn't match %p %p\n",
+ set->connectors[0], &sou->base.connector);
+ return -EINVAL;
+ }
+
+ /* sou only supports one fb active at the time */
+ if (sou->base.is_implicit &&
+ dev_priv->sou_priv->implicit_fb && vfb &&
+ !(dev_priv->sou_priv->num_implicit == 1 &&
+ sou->active_implicit) &&
+ dev_priv->sou_priv->implicit_fb != vfb) {
+ DRM_ERROR("Multiple framebuffers not supported\n");
+ return -EINVAL;
+ }
+
+ /* since they always map one to one these are safe */
+ connector = &sou->base.connector;
+ encoder = &sou->base.encoder;
+
+ /* should we turn the crtc off */
+ if (set->num_connectors == 0 || !set->mode || !set->fb) {
+ ret = vmw_sou_fifo_destroy(dev_priv, sou);
+ /* the hardware has hung don't do anything more */
+ if (unlikely(ret != 0))
+ return ret;
+
+ connector->encoder = NULL;
+ encoder->crtc = NULL;
+ crtc->fb = NULL;
+ crtc->x = 0;
+ crtc->y = 0;
+
+ vmw_sou_del_active(dev_priv, sou);
+
+ vmw_sou_backing_free(dev_priv, sou);
+
+ return 0;
+ }
+
+
+ /* we now know we want to set a mode */
+ mode = set->mode;
+ fb = set->fb;
+
+ if (set->x + mode->hdisplay > fb->width ||
+ set->y + mode->vdisplay > fb->height) {
+ DRM_ERROR("set outside of framebuffer\n");
+ return -EINVAL;
+ }
+
+ vmw_fb_off(dev_priv);
+
+ if (mode->hdisplay != crtc->mode.hdisplay ||
+ mode->vdisplay != crtc->mode.vdisplay) {
+ /* no need to check if depth is different, because backing
+ * store depth is forced to 4 by the device.
+ */
+
+ ret = vmw_sou_fifo_destroy(dev_priv, sou);
+ /* the hardware has hung don't do anything more */
+ if (unlikely(ret != 0))
+ return ret;
+
+ vmw_sou_backing_free(dev_priv, sou);
+ }
+
+ if (!sou->buffer) {
+ /* forced to depth 4 by the device */
+ size_t size = mode->hdisplay * mode->vdisplay * 4;
+ ret = vmw_sou_backing_alloc(dev_priv, sou, size);
+ if (unlikely(ret != 0))
+ return ret;
+ }
+
+ ret = vmw_sou_fifo_create(dev_priv, sou, set->x, set->y, mode);
+ if (unlikely(ret != 0)) {
+ /*
+ * We are in a bit of a situation here, the hardware has
+ * hung and we may or may not have a buffer hanging of
+ * the screen object, best thing to do is not do anything
+ * if we where defined, if not just turn the crtc of.
+ * Not what userspace wants but it needs to htfu.
+ */
+ if (sou->defined)
+ return ret;
+
+ connector->encoder = NULL;
+ encoder->crtc = NULL;
+ crtc->fb = NULL;
+ crtc->x = 0;
+ crtc->y = 0;
+
+ return ret;
+ }
+
+ vmw_sou_add_active(dev_priv, sou, vfb);
+
+ connector->encoder = encoder;
+ encoder->crtc = crtc;
+ crtc->mode = *mode;
+ crtc->fb = fb;
+ crtc->x = set->x;
+ crtc->y = set->y;
+
+ return 0;
+}
+
+static struct drm_crtc_funcs vmw_screen_object_crtc_funcs = {
+ .save = vmw_du_crtc_save,
+ .restore = vmw_du_crtc_restore,
+ .cursor_set = vmw_du_crtc_cursor_set,
+ .cursor_move = vmw_du_crtc_cursor_move,
+ .gamma_set = vmw_du_crtc_gamma_set,
+ .destroy = vmw_sou_crtc_destroy,
+ .set_config = vmw_sou_crtc_set_config,
+};
+
+/*
+ * Screen Object Display Unit encoder functions
+ */
+
+static void vmw_sou_encoder_destroy(struct drm_encoder *encoder)
+{
+ vmw_sou_destroy(vmw_encoder_to_sou(encoder));
+}
+
+static struct drm_encoder_funcs vmw_screen_object_encoder_funcs = {
+ .destroy = vmw_sou_encoder_destroy,
+};
+
+/*
+ * Screen Object Display Unit connector functions
+ */
+
+static void vmw_sou_connector_destroy(struct drm_connector *connector)
+{
+ vmw_sou_destroy(vmw_connector_to_sou(connector));
+}
+
+static struct drm_connector_funcs vmw_legacy_connector_funcs = {
+ .dpms = vmw_du_connector_dpms,
+ .save = vmw_du_connector_save,
+ .restore = vmw_du_connector_restore,
+ .detect = vmw_du_connector_detect,
+ .fill_modes = vmw_du_connector_fill_modes,
+ .set_property = vmw_du_connector_set_property,
+ .destroy = vmw_sou_connector_destroy,
+};
+
+static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
+{
+ struct vmw_screen_object_unit *sou;
+ struct drm_device *dev = dev_priv->dev;
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+ struct drm_crtc *crtc;
+
+ sou = kzalloc(sizeof(*sou), GFP_KERNEL);
+ if (!sou)
+ return -ENOMEM;
+
+ sou->base.unit = unit;
+ crtc = &sou->base.crtc;
+ encoder = &sou->base.encoder;
+ connector = &sou->base.connector;
+
+ sou->active_implicit = false;
+
+ sou->base.pref_active = (unit == 0);
+ sou->base.pref_width = 800;
+ sou->base.pref_height = 600;
+ sou->base.pref_mode = NULL;
+ sou->base.is_implicit = true;
+
+ drm_connector_init(dev, connector, &vmw_legacy_connector_funcs,
+ DRM_MODE_CONNECTOR_VIRTUAL);
+ connector->status = vmw_du_connector_detect(connector, true);
+
+ drm_encoder_init(dev, encoder, &vmw_screen_object_encoder_funcs,
+ DRM_MODE_ENCODER_VIRTUAL);
+ drm_mode_connector_attach_encoder(connector, encoder);
+ encoder->possible_crtcs = (1 << unit);
+ encoder->possible_clones = 0;
+
+ drm_crtc_init(dev, crtc, &vmw_screen_object_crtc_funcs);
+
+ drm_mode_crtc_set_gamma_size(crtc, 256);
+
+ drm_connector_attach_property(connector,
+ dev->mode_config.dirty_info_property,
+ 1);
+
+ return 0;
+}
+
+int vmw_kms_init_screen_object_display(struct vmw_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+ int i, ret;
+
+ if (dev_priv->sou_priv) {
+ DRM_INFO("sou system already on\n");
+ return -EINVAL;
+ }
+
+ if (!(dev_priv->fifo.capabilities & SVGA_FIFO_CAP_SCREEN_OBJECT_2)) {
+ DRM_INFO("Not using screen objects,"
+ " missing cap SCREEN_OBJECT_2\n");
+ return -ENOSYS;
+ }
+
+ ret = -ENOMEM;
+ dev_priv->sou_priv = kmalloc(sizeof(*dev_priv->sou_priv), GFP_KERNEL);
+ if (unlikely(!dev_priv->sou_priv))
+ goto err_no_mem;
+
+ dev_priv->sou_priv->num_implicit = 0;
+ dev_priv->sou_priv->implicit_fb = NULL;
+
+ ret = drm_vblank_init(dev, VMWGFX_NUM_DISPLAY_UNITS);
+ if (unlikely(ret != 0))
+ goto err_free;
+
+ ret = drm_mode_create_dirty_info_property(dev);
+ if (unlikely(ret != 0))
+ goto err_vblank_cleanup;
+
+ for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i)
+ vmw_sou_init(dev_priv, i);
+
+ DRM_INFO("Screen objects system initialized\n");
+
+ return 0;
+
+err_vblank_cleanup:
+ drm_vblank_cleanup(dev);
+err_free:
+ kfree(dev_priv->sou_priv);
+ dev_priv->sou_priv = NULL;
+err_no_mem:
+ return ret;
+}
+
+int vmw_kms_close_screen_object_display(struct vmw_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+
+ if (!dev_priv->sou_priv)
+ return -ENOSYS;
+
+ drm_vblank_cleanup(dev);
+
+ kfree(dev_priv->sou_priv);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
index 1e8eedd901e..d3c11f5184f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
@@ -34,9 +34,8 @@ int vmw_mmap(struct file *filp, struct vm_area_struct *vma)
struct vmw_private *dev_priv;
if (unlikely(vma->vm_pgoff < VMWGFX_FILE_PAGE_OFFSET)) {
- if (vmw_fifo_mmap(filp, vma) == 0)
- return 0;
- return drm_mmap(filp, vma);
+ DRM_ERROR("Illegal attempt to mmap old fifo space.\n");
+ return -EINVAL;
}
file_priv = filp->private_data;
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index 9bc7b03269d..299d2387112 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -183,9 +183,6 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input,
if (hid->product >= USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI &&
hid->product <= USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS)
table = macbookair_fn_keys;
- else if (hid->product >= USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI &&
- hid->product <= USB_DEVICE_ID_APPLE_WELLSPRING6_JIS)
- table = macbookair_fn_keys;
else if (hid->product < 0x21d || hid->product >= 0x300)
table = powerbook_fn_keys;
else
@@ -458,6 +455,9 @@ static const struct hid_device_id apple_devices[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ISO),
.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN |
APPLE_ISO_KEYBOARD },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO),
+ .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN |
+ APPLE_ISO_KEYBOARD },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_JIS),
.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI),
@@ -508,6 +508,12 @@ static const struct hid_device_id apple_devices[] = {
.driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS),
.driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI),
+ .driver_data = APPLE_HAS_FN },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO),
+ .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS),
+ .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI),
.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO),
diff --git a/drivers/hid/hid-axff.c b/drivers/hid/hid-axff.c
index 3bdb4500f95..5be858dd9a1 100644
--- a/drivers/hid/hid-axff.c
+++ b/drivers/hid/hid-axff.c
@@ -31,6 +31,7 @@
#include <linux/slab.h>
#include <linux/usb.h>
#include <linux/hid.h>
+#include <linux/module.h>
#include "hid-ids.h"
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 91adcc5bad2..848a56c0279 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1362,6 +1362,9 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ISO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ANSI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ISO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_JIS) },
@@ -1374,6 +1377,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_T91MT) },
@@ -1942,6 +1946,9 @@ static const struct hid_device_id hid_mouse_ignore_list[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ISO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_ISO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_JIS) },
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index 9a243ca96e6..ee80d733801 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -31,6 +31,7 @@
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/sched.h>
+#include <linux/export.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/poll.h>
diff --git a/drivers/hid/hid-dr.c b/drivers/hid/hid-dr.c
index 61eece47204..e832f44ae38 100644
--- a/drivers/hid/hid-dr.c
+++ b/drivers/hid/hid-dr.c
@@ -31,6 +31,7 @@
#include <linux/slab.h>
#include <linux/usb.h>
#include <linux/hid.h>
+#include <linux/module.h>
#include "hid-ids.h"
diff --git a/drivers/hid/hid-emsff.c b/drivers/hid/hid-emsff.c
index a5dc13fe367..9bdde867a02 100644
--- a/drivers/hid/hid-emsff.c
+++ b/drivers/hid/hid-emsff.c
@@ -24,6 +24,7 @@
#include <linux/hid.h>
#include <linux/input.h>
#include <linux/usb.h>
+#include <linux/module.h>
#include "hid-ids.h"
#include "usbhid/usbhid.h"
diff --git a/drivers/hid/hid-gaff.c b/drivers/hid/hid-gaff.c
index 279ba530003..f1e1bcf6742 100644
--- a/drivers/hid/hid-gaff.c
+++ b/drivers/hid/hid-gaff.c
@@ -31,6 +31,7 @@
#include <linux/slab.h>
#include <linux/usb.h>
#include <linux/hid.h>
+#include <linux/module.h>
#include "hid-ids.h"
#ifdef CONFIG_GREENASIA_FF
diff --git a/drivers/hid/hid-holtekff.c b/drivers/hid/hid-holtekff.c
index 91e3a032112..4e7542151e2 100644
--- a/drivers/hid/hid-holtekff.c
+++ b/drivers/hid/hid-holtekff.c
@@ -25,6 +25,7 @@
#include <linux/hid.h>
#include <linux/input.h>
+#include <linux/module.h>
#include <linux/slab.h>
#include <linux/usb.h>
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 1680e99b481..06ce996b8b6 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -112,6 +112,9 @@
#define USB_DEVICE_ID_APPLE_ALU_REVB_ANSI 0x024f
#define USB_DEVICE_ID_APPLE_ALU_REVB_ISO 0x0250
#define USB_DEVICE_ID_APPLE_ALU_REVB_JIS 0x0251
+#define USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI 0x0252
+#define USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO 0x0253
+#define USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS 0x0254
#define USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI 0x0249
#define USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO 0x024a
#define USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS 0x024b
@@ -121,6 +124,7 @@
#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI 0x0239
#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO 0x023a
#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS 0x023b
+#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO 0x0256
#define USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY 0x030a
#define USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY 0x030b
#define USB_DEVICE_ID_APPLE_ATV_IRCONTROL 0x8241
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index fa5d7a1ffa9..f1c909f1b23 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -291,7 +291,6 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
td->last_slot_field = usage->hid;
td->last_field_index = field->index;
td->last_mt_collection = usage->collection_index;
- hdev->quirks &= ~HID_QUIRK_MULTITOUCH;
return 1;
case HID_DG_WIDTH:
hid_map_usage(hi, usage, bit, max,
@@ -530,44 +529,12 @@ static void mt_set_input_mode(struct hid_device *hdev)
}
}
-/* a list of devices for which there is a specialized multitouch driver */
-static const struct hid_device_id mt_have_special_driver[] = {
- { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, 0x0001) },
- { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, 0x0006) },
- { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA,
- USB_DEVICE_ID_PIXART_IMAGING_INC_OPTICAL_TOUCH_SCREEN) },
- { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA,
- USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH) },
- { }
-};
-
-static bool mt_match_one_id(struct hid_device *hdev,
- const struct hid_device_id *id)
-{
- return id->bus == hdev->bus &&
- (id->vendor == HID_ANY_ID || id->vendor == hdev->vendor) &&
- (id->product == HID_ANY_ID || id->product == hdev->product);
-}
-
-static const struct hid_device_id *mt_match_id(struct hid_device *hdev,
- const struct hid_device_id *id)
-{
- for (; id->bus; id++)
- if (mt_match_one_id(hdev, id))
- return id;
-
- return NULL;
-}
-
static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
int ret, i;
struct mt_device *td;
struct mt_class *mtclass = mt_classes; /* MT_CLS_DEFAULT */
- if (mt_match_id(hdev, mt_have_special_driver))
- return -ENODEV;
-
for (i = 0; mt_classes[i].name ; i++) {
if (id->driver_data == mt_classes[i].name) {
mtclass = &(mt_classes[i]);
@@ -575,6 +542,10 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
}
}
+ /* This allows the driver to correctly support devices
+ * that emit events over several HID messages.
+ */
+ hdev->quirks |= HID_QUIRK_NO_INPUT_SYNC;
td = kzalloc(sizeof(struct mt_device), GFP_KERNEL);
if (!td) {
@@ -590,16 +561,10 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
if (ret != 0)
goto fail;
- hdev->quirks |= HID_QUIRK_MULTITOUCH;
ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
if (ret)
goto fail;
- /* This allows the driver to correctly support devices
- * that emit events over several HID messages.
- */
- hdev->quirks |= HID_QUIRK_NO_INPUT_SYNC;
-
td->slots = kzalloc(td->maxcontacts * sizeof(struct mt_slot),
GFP_KERNEL);
if (!td->slots) {
@@ -793,10 +758,6 @@ static const struct hid_device_id mt_devices[] = {
HID_USB_DEVICE(USB_VENDOR_ID_XAT,
USB_DEVICE_ID_XAT_CSR) },
- /* Rest of the world */
- { .driver_data = MT_CLS_DEFAULT,
- HID_USB_DEVICE(HID_ANY_ID, HID_ANY_ID) },
-
{ }
};
MODULE_DEVICE_TABLE(hid, mt_devices);
diff --git a/drivers/hid/hid-picolcd.c b/drivers/hid/hid-picolcd.c
index 1782693819f..01e7d2cd7c2 100644
--- a/drivers/hid/hid-picolcd.c
+++ b/drivers/hid/hid-picolcd.c
@@ -36,6 +36,7 @@
#include <linux/completion.h>
#include <linux/uaccess.h>
+#include <linux/module.h>
#define PICOLCD_NAME "PicoLCD (graphic)"
diff --git a/drivers/hid/hid-pl.c b/drivers/hid/hid-pl.c
index 06e5300d43d..070f93a5c11 100644
--- a/drivers/hid/hid-pl.c
+++ b/drivers/hid/hid-pl.c
@@ -40,6 +40,7 @@
#include <linux/input.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include <linux/usb.h>
#include <linux/hid.h>
diff --git a/drivers/hid/hid-roccat-common.c b/drivers/hid/hid-roccat-common.c
index edf898dee28..b07e7f96a35 100644
--- a/drivers/hid/hid-roccat-common.c
+++ b/drivers/hid/hid-roccat-common.c
@@ -13,6 +13,7 @@
#include <linux/hid.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include "hid-roccat-common.h"
static inline uint16_t roccat_common_feature_report(uint8_t report_id)
diff --git a/drivers/hid/hid-roccat.c b/drivers/hid/hid-roccat.c
index 5666e7587b1..b685b04dbf9 100644
--- a/drivers/hid/hid-roccat.c
+++ b/drivers/hid/hid-roccat.c
@@ -27,6 +27,7 @@
#include <linux/poll.h>
#include <linux/sched.h>
#include <linux/hid-roccat.h>
+#include <linux/module.h>
#define ROCCAT_FIRST_MINOR 0
#define ROCCAT_MAX_DEVICES 8
@@ -162,27 +163,27 @@ static int roccat_open(struct inode *inode, struct file *file)
device = devices[minor];
- mutex_lock(&device->readers_lock);
-
if (!device) {
pr_emerg("roccat device with minor %d doesn't exist\n", minor);
error = -ENODEV;
- goto exit_err;
+ goto exit_err_devices;
}
+ mutex_lock(&device->readers_lock);
+
if (!device->open++) {
/* power on device on adding first reader */
error = hid_hw_power(device->hid, PM_HINT_FULLON);
if (error < 0) {
--device->open;
- goto exit_err;
+ goto exit_err_readers;
}
error = hid_hw_open(device->hid);
if (error < 0) {
hid_hw_power(device->hid, PM_HINT_NORMAL);
--device->open;
- goto exit_err;
+ goto exit_err_readers;
}
}
@@ -193,13 +194,13 @@ static int roccat_open(struct inode *inode, struct file *file)
list_add_tail(&reader->node, &device->readers);
file->private_data = reader;
-exit_unlock:
+exit_err_readers:
mutex_unlock(&device->readers_lock);
+exit_err_devices:
mutex_unlock(&devices_lock);
+ if (error)
+ kfree(reader);
return error;
-exit_err:
- kfree(reader);
- goto exit_unlock;
}
static int roccat_release(struct inode *inode, struct file *file)
diff --git a/drivers/hid/hid-sjoy.c b/drivers/hid/hid-sjoy.c
index 670da9109f8..4b1448613ea 100644
--- a/drivers/hid/hid-sjoy.c
+++ b/drivers/hid/hid-sjoy.c
@@ -30,6 +30,7 @@
#include <linux/slab.h>
#include <linux/usb.h>
#include <linux/hid.h>
+#include <linux/module.h>
#include "hid-ids.h"
#ifdef CONFIG_SMARTJOYPLUS_FF
diff --git a/drivers/hid/hid-tmff.c b/drivers/hid/hid-tmff.c
index 575862b0688..83a933b9c2e 100644
--- a/drivers/hid/hid-tmff.c
+++ b/drivers/hid/hid-tmff.c
@@ -31,6 +31,7 @@
#include <linux/input.h>
#include <linux/slab.h>
#include <linux/usb.h>
+#include <linux/module.h>
#include "hid-ids.h"
diff --git a/drivers/hid/hid-zpff.c b/drivers/hid/hid-zpff.c
index f31fab012f2..f6ba81df71b 100644
--- a/drivers/hid/hid-zpff.c
+++ b/drivers/hid/hid-zpff.c
@@ -25,6 +25,7 @@
#include <linux/input.h>
#include <linux/slab.h>
#include <linux/usb.h>
+#include <linux/module.h>
#include "hid-ids.h"
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 4ea464151c3..5028d60a22a 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -16,6 +16,7 @@
*/
#include <linux/hid.h>
+#include <linux/export.h>
#include <linux/slab.h>
#include "../hid-ids.h"
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 9b347acf155..9ec854ae118 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -335,6 +335,7 @@ config SENSORS_I5K_AMB
config SENSORS_F71805F
tristate "Fintek F71805F/FG, F71806F/FG and F71872F/FG"
+ depends on !PPC
help
If you say yes here you get support for hardware monitoring
features of the Fintek F71805F/FG, F71806F/FG and F71872F/FG
@@ -345,6 +346,7 @@ config SENSORS_F71805F
config SENSORS_F71882FG
tristate "Fintek F71882FG and compatibles"
+ depends on !PPC
help
If you say yes here you get support for hardware monitoring
features of many Fintek Super-I/O (LPC) chips. The currently
@@ -468,6 +470,7 @@ config SENSORS_IBMPEX
config SENSORS_IT87
tristate "ITE IT87xx and compatibles"
+ depends on !PPC
select HWMON_VID
help
If you say yes here you get support for ITE IT8705F, IT8712F,
@@ -824,6 +827,7 @@ config SENSORS_NTC_THERMISTOR
config SENSORS_PC87360
tristate "National Semiconductor PC87360 family"
+ depends on !PPC
select HWMON_VID
help
If you say yes here you get access to the hardware monitoring
@@ -837,6 +841,7 @@ config SENSORS_PC87360
config SENSORS_PC87427
tristate "National Semiconductor PC87427"
+ depends on !PPC
help
If you say yes here you get access to the hardware monitoring
functions of the National Semiconductor PC87427 Super-I/O chip.
@@ -928,7 +933,7 @@ config SENSORS_SMM665
config SENSORS_DME1737
tristate "SMSC DME1737, SCH311x and compatibles"
- depends on I2C && EXPERIMENTAL
+ depends on I2C && EXPERIMENTAL && !PPC
select HWMON_VID
help
If you say yes here you get support for the hardware monitoring
@@ -970,6 +975,7 @@ config SENSORS_EMC6W201
config SENSORS_SMSC47M1
tristate "SMSC LPC47M10x and compatibles"
+ depends on !PPC
help
If you say yes here you get support for the integrated fan
monitoring and control capabilities of the SMSC LPC47B27x,
@@ -1003,7 +1009,7 @@ config SENSORS_SMSC47M192
config SENSORS_SMSC47B397
tristate "SMSC LPC47B397-NC"
- depends on EXPERIMENTAL
+ depends on EXPERIMENTAL && !PPC
help
If you say yes here you get support for the SMSC LPC47B397-NC
sensor chip.
@@ -1017,6 +1023,7 @@ config SENSORS_SCH56XX_COMMON
config SENSORS_SCH5627
tristate "SMSC SCH5627"
+ depends on !PPC
select SENSORS_SCH56XX_COMMON
help
If you say yes here you get support for the hardware monitoring
@@ -1027,6 +1034,7 @@ config SENSORS_SCH5627
config SENSORS_SCH5636
tristate "SMSC SCH5636"
+ depends on !PPC
select SENSORS_SCH56XX_COMMON
help
SMSC SCH5636 Super I/O chips include an embedded microcontroller for
@@ -1150,6 +1158,7 @@ config SENSORS_VIA686A
config SENSORS_VT1211
tristate "VIA VT1211"
+ depends on !PPC
select HWMON_VID
help
If you say yes here then you get support for hardware monitoring
@@ -1262,6 +1271,7 @@ config SENSORS_W83L786NG
config SENSORS_W83627HF
tristate "Winbond W83627HF, W83627THF, W83637HF, W83687THF, W83697HF"
+ depends on !PPC
select HWMON_VID
help
If you say yes here you get support for the Winbond W836X7 series
@@ -1272,7 +1282,8 @@ config SENSORS_W83627HF
will be called w83627hf.
config SENSORS_W83627EHF
- tristate "Winbond W83627EHF/EHG/DHG, W83667HG, NCT6775F, NCT6776F"
+ tristate "Winbond W83627EHF/EHG/DHG/UHG, W83667HG, NCT6775F, NCT6776F"
+ depends on !PPC
select HWMON_VID
help
If you say yes here you get support for the hardware
@@ -1281,7 +1292,8 @@ config SENSORS_W83627EHF
This driver also supports the W83627EHG, which is the lead-free
version of the W83627EHF, and the W83627DHG, which is a similar
chip suited for specific Intel processors that use PECI such as
- the Core 2 Duo.
+ the Core 2 Duo. And also the W83627UHG, which is a stripped down
+ version of the W83627DHG (as far as hardware monitoring goes.)
This driver also supports Nuvoton W83667HG, W83667HG-B, NCT6775F
(also known as W83667HG-I), and NCT6776F.
diff --git a/drivers/hwmon/ad7414.c b/drivers/hwmon/ad7414.c
index d46c0c758dd..df29a7fff9e 100644
--- a/drivers/hwmon/ad7414.c
+++ b/drivers/hwmon/ad7414.c
@@ -58,10 +58,9 @@ static inline int ad7414_temp_from_reg(s16 reg)
static inline int ad7414_read(struct i2c_client *client, u8 reg)
{
- if (reg == AD7414_REG_TEMP) {
- int value = i2c_smbus_read_word_data(client, reg);
- return (value < 0) ? value : swab16(value);
- } else
+ if (reg == AD7414_REG_TEMP)
+ return i2c_smbus_read_word_swapped(client, reg);
+ else
return i2c_smbus_read_byte_data(client, reg);
}
diff --git a/drivers/hwmon/ad7418.c b/drivers/hwmon/ad7418.c
index ffc781fec18..8cb718ce823 100644
--- a/drivers/hwmon/ad7418.c
+++ b/drivers/hwmon/ad7418.c
@@ -76,20 +76,6 @@ static struct i2c_driver ad7418_driver = {
.id_table = ad7418_id,
};
-/* All registers are word-sized, except for the configuration registers.
- * AD7418 uses a high-byte first convention. Do NOT use those functions to
- * access the configuration registers CONF and CONF2, as they are byte-sized.
- */
-static inline int ad7418_read(struct i2c_client *client, u8 reg)
-{
- return swab16(i2c_smbus_read_word_data(client, reg));
-}
-
-static inline int ad7418_write(struct i2c_client *client, u8 reg, u16 value)
-{
- return i2c_smbus_write_word_data(client, reg, swab16(value));
-}
-
static void ad7418_init_client(struct i2c_client *client)
{
struct ad7418_data *data = i2c_get_clientdata(client);
@@ -128,7 +114,9 @@ static struct ad7418_data *ad7418_update_device(struct device *dev)
udelay(30);
for (i = 0; i < 3; i++) {
- data->temp[i] = ad7418_read(client, AD7418_REG_TEMP[i]);
+ data->temp[i] =
+ i2c_smbus_read_word_swapped(client,
+ AD7418_REG_TEMP[i]);
}
for (i = 0, ch = 4; i < data->adc_max; i++, ch--) {
@@ -138,11 +126,12 @@ static struct ad7418_data *ad7418_update_device(struct device *dev)
udelay(15);
data->in[data->adc_max - 1 - i] =
- ad7418_read(client, AD7418_REG_ADC);
+ i2c_smbus_read_word_swapped(client,
+ AD7418_REG_ADC);
}
/* restore old configuration value */
- ad7418_write(client, AD7418_REG_CONF, cfg);
+ i2c_smbus_write_word_swapped(client, AD7418_REG_CONF, cfg);
data->last_updated = jiffies;
data->valid = 1;
@@ -182,7 +171,9 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *devattr,
mutex_lock(&data->lock);
data->temp[attr->index] = LM75_TEMP_TO_REG(temp);
- ad7418_write(client, AD7418_REG_TEMP[attr->index], data->temp[attr->index]);
+ i2c_smbus_write_word_swapped(client,
+ AD7418_REG_TEMP[attr->index],
+ data->temp[attr->index]);
mutex_unlock(&data->lock);
return count;
}
diff --git a/drivers/hwmon/ads1015.c b/drivers/hwmon/ads1015.c
index e9beeda4cbe..eedca3cf996 100644
--- a/drivers/hwmon/ads1015.c
+++ b/drivers/hwmon/ads1015.c
@@ -59,19 +59,6 @@ struct ads1015_data {
struct ads1015_channel_data channel_data[ADS1015_CHANNELS];
};
-static s32 ads1015_read_reg(struct i2c_client *client, unsigned int reg)
-{
- s32 data = i2c_smbus_read_word_data(client, reg);
-
- return (data < 0) ? data : swab16(data);
-}
-
-static s32 ads1015_write_reg(struct i2c_client *client, unsigned int reg,
- u16 val)
-{
- return i2c_smbus_write_word_data(client, reg, swab16(val));
-}
-
static int ads1015_read_value(struct i2c_client *client, unsigned int channel,
int *value)
{
@@ -87,7 +74,7 @@ static int ads1015_read_value(struct i2c_client *client, unsigned int channel,
mutex_lock(&data->update_lock);
/* get channel parameters */
- res = ads1015_read_reg(client, ADS1015_CONFIG);
+ res = i2c_smbus_read_word_swapped(client, ADS1015_CONFIG);
if (res < 0)
goto err_unlock;
config = res;
@@ -101,13 +88,13 @@ static int ads1015_read_value(struct i2c_client *client, unsigned int channel,
config |= (pga & 0x0007) << 9;
config |= (data_rate & 0x0007) << 5;
- res = ads1015_write_reg(client, ADS1015_CONFIG, config);
+ res = i2c_smbus_write_word_swapped(client, ADS1015_CONFIG, config);
if (res < 0)
goto err_unlock;
/* wait until conversion finished */
msleep(conversion_time_ms);
- res = ads1015_read_reg(client, ADS1015_CONFIG);
+ res = i2c_smbus_read_word_swapped(client, ADS1015_CONFIG);
if (res < 0)
goto err_unlock;
config = res;
@@ -117,7 +104,7 @@ static int ads1015_read_value(struct i2c_client *client, unsigned int channel,
goto err_unlock;
}
- res = ads1015_read_reg(client, ADS1015_CONVERSION);
+ res = i2c_smbus_read_word_swapped(client, ADS1015_CONVERSION);
if (res < 0)
goto err_unlock;
conversion = res;
diff --git a/drivers/hwmon/ads7828.c b/drivers/hwmon/ads7828.c
index c42c5a69a66..cfcc3b6fb6b 100644
--- a/drivers/hwmon/ads7828.c
+++ b/drivers/hwmon/ads7828.c
@@ -74,13 +74,6 @@ static int ads7828_detect(struct i2c_client *client,
static int ads7828_probe(struct i2c_client *client,
const struct i2c_device_id *id);
-/* The ADS7828 returns the 12-bit sample in two bytes,
- these are read as a word then byte-swapped */
-static u16 ads7828_read_value(struct i2c_client *client, u8 reg)
-{
- return swab16(i2c_smbus_read_word_data(client, reg));
-}
-
static inline u8 channel_cmd_byte(int ch)
{
/* cmd byte C2,C1,C0 - see datasheet */
@@ -104,7 +97,8 @@ static struct ads7828_data *ads7828_update_device(struct device *dev)
for (ch = 0; ch < ADS7828_NCH; ch++) {
u8 cmd = channel_cmd_byte(ch);
- data->adc_input[ch] = ads7828_read_value(client, cmd);
+ data->adc_input[ch] =
+ i2c_smbus_read_word_swapped(client, cmd);
}
data->last_updated = jiffies;
data->valid = 1;
@@ -203,7 +197,7 @@ static int ads7828_detect(struct i2c_client *client,
for (ch = 0; ch < ADS7828_NCH; ch++) {
u16 in_data;
u8 cmd = channel_cmd_byte(ch);
- in_data = ads7828_read_value(client, cmd);
+ in_data = i2c_smbus_read_word_swapped(client, cmd);
if (in_data & 0xF000) {
pr_debug("%s : Doesn't look like an ads7828 device\n",
__func__);
diff --git a/drivers/hwmon/asb100.c b/drivers/hwmon/asb100.c
index c02a052d308..d7bd1f3f2a3 100644
--- a/drivers/hwmon/asb100.c
+++ b/drivers/hwmon/asb100.c
@@ -829,17 +829,17 @@ static int asb100_read_value(struct i2c_client *client, u16 reg)
/* convert from ISA to LM75 I2C addresses */
switch (reg & 0xff) {
case 0x50: /* TEMP */
- res = swab16(i2c_smbus_read_word_data(cl, 0));
+ res = i2c_smbus_read_word_swapped(cl, 0);
break;
case 0x52: /* CONFIG */
res = i2c_smbus_read_byte_data(cl, 1);
break;
case 0x53: /* HYST */
- res = swab16(i2c_smbus_read_word_data(cl, 2));
+ res = i2c_smbus_read_word_swapped(cl, 2);
break;
case 0x55: /* MAX */
default:
- res = swab16(i2c_smbus_read_word_data(cl, 3));
+ res = i2c_smbus_read_word_swapped(cl, 3);
break;
}
}
@@ -877,10 +877,10 @@ static void asb100_write_value(struct i2c_client *client, u16 reg, u16 value)
i2c_smbus_write_byte_data(cl, 1, value & 0xff);
break;
case 0x53: /* HYST */
- i2c_smbus_write_word_data(cl, 2, swab16(value));
+ i2c_smbus_write_word_swapped(cl, 2, value);
break;
case 0x55: /* MAX */
- i2c_smbus_write_word_data(cl, 3, swab16(value));
+ i2c_smbus_write_word_swapped(cl, 3, value);
break;
}
}
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index 93238378664..104b3767516 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -60,14 +60,13 @@ MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius");
#ifdef CONFIG_SMP
#define TO_PHYS_ID(cpu) cpu_data(cpu).phys_proc_id
#define TO_CORE_ID(cpu) cpu_data(cpu).cpu_core_id
-#define TO_ATTR_NO(cpu) (TO_CORE_ID(cpu) + BASE_SYSFS_ATTR_NO)
#define for_each_sibling(i, cpu) for_each_cpu(i, cpu_sibling_mask(cpu))
#else
#define TO_PHYS_ID(cpu) (cpu)
#define TO_CORE_ID(cpu) (cpu)
-#define TO_ATTR_NO(cpu) (cpu)
#define for_each_sibling(i, cpu) for (i = 0; false; )
#endif
+#define TO_ATTR_NO(cpu) (TO_CORE_ID(cpu) + BASE_SYSFS_ATTR_NO)
/*
* Per-Core Temperature Data
@@ -325,15 +324,6 @@ static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
return adjust_tjmax(c, id, dev);
}
-static void __devinit get_ucode_rev_on_cpu(void *edx)
-{
- u32 eax;
-
- wrmsr(MSR_IA32_UCODE_REV, 0, 0);
- sync_core();
- rdmsr(MSR_IA32_UCODE_REV, eax, *(u32 *)edx);
-}
-
static int create_name_attr(struct platform_data *pdata, struct device *dev)
{
sysfs_attr_init(&pdata->name_attr.attr);
@@ -380,27 +370,16 @@ exit_free:
static int __cpuinit chk_ucode_version(unsigned int cpu)
{
struct cpuinfo_x86 *c = &cpu_data(cpu);
- int err;
- u32 edx;
/*
* Check if we have problem with errata AE18 of Core processors:
* Readings might stop update when processor visited too deep sleep,
* fixed for stepping D0 (6EC).
*/
- if (c->x86_model == 0xe && c->x86_mask < 0xc) {
- /* check for microcode update */
- err = smp_call_function_single(cpu, get_ucode_rev_on_cpu,
- &edx, 1);
- if (err) {
- pr_err("Cannot determine microcode revision of "
- "CPU#%u (%d)!\n", cpu, err);
- return -ENODEV;
- } else if (edx < 0x39) {
- pr_err("Errata AE18 not fixed, update BIOS or "
- "microcode of the CPU!\n");
- return -ENODEV;
- }
+ if (c->x86_model == 0xe && c->x86_mask < 0xc && c->microcode < 0x39) {
+ pr_err("Errata AE18 not fixed, update BIOS or "
+ "microcode of the CPU!\n");
+ return -ENODEV;
}
return 0;
}
diff --git a/drivers/hwmon/ds1621.c b/drivers/hwmon/ds1621.c
index e11363467a8..ef1ac996752 100644
--- a/drivers/hwmon/ds1621.c
+++ b/drivers/hwmon/ds1621.c
@@ -80,24 +80,6 @@ struct ds1621_data {
u8 conf; /* Register encoding, combined */
};
-/* Temperature registers are word-sized.
- DS1621 uses a high-byte first convention, which is exactly opposite to
- the SMBus standard. */
-static int ds1621_read_temp(struct i2c_client *client, u8 reg)
-{
- int ret;
-
- ret = i2c_smbus_read_word_data(client, reg);
- if (ret < 0)
- return ret;
- return swab16(ret);
-}
-
-static int ds1621_write_temp(struct i2c_client *client, u8 reg, u16 value)
-{
- return i2c_smbus_write_word_data(client, reg, swab16(value));
-}
-
static void ds1621_init_client(struct i2c_client *client)
{
u8 conf, new_conf;
@@ -136,7 +118,7 @@ static struct ds1621_data *ds1621_update_client(struct device *dev)
data->conf = i2c_smbus_read_byte_data(client, DS1621_REG_CONF);
for (i = 0; i < ARRAY_SIZE(data->temp); i++)
- data->temp[i] = ds1621_read_temp(client,
+ data->temp[i] = i2c_smbus_read_word_swapped(client,
DS1621_REG_TEMP[i]);
/* reset alarms if necessary */
@@ -177,8 +159,8 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *da,
mutex_lock(&data->update_lock);
data->temp[attr->index] = val;
- ds1621_write_temp(client, DS1621_REG_TEMP[attr->index],
- data->temp[attr->index]);
+ i2c_smbus_write_word_swapped(client, DS1621_REG_TEMP[attr->index],
+ data->temp[attr->index]);
mutex_unlock(&data->update_lock);
return count;
}
diff --git a/drivers/hwmon/ds620.c b/drivers/hwmon/ds620.c
index 4f7c3fc40a8..225ae4f3658 100644
--- a/drivers/hwmon/ds620.c
+++ b/drivers/hwmon/ds620.c
@@ -75,33 +75,13 @@ struct ds620_data {
s16 temp[3]; /* Register values, word */
};
-/*
- * Temperature registers are word-sized.
- * DS620 uses a high-byte first convention, which is exactly opposite to
- * the SMBus standard.
- */
-static int ds620_read_temp(struct i2c_client *client, u8 reg)
-{
- int ret;
-
- ret = i2c_smbus_read_word_data(client, reg);
- if (ret < 0)
- return ret;
- return swab16(ret);
-}
-
-static int ds620_write_temp(struct i2c_client *client, u8 reg, u16 value)
-{
- return i2c_smbus_write_word_data(client, reg, swab16(value));
-}
-
static void ds620_init_client(struct i2c_client *client)
{
struct ds620_platform_data *ds620_info = client->dev.platform_data;
u16 conf, new_conf;
new_conf = conf =
- swab16(i2c_smbus_read_word_data(client, DS620_REG_CONF));
+ i2c_smbus_read_word_swapped(client, DS620_REG_CONF);
/* switch to continuous conversion mode */
new_conf &= ~DS620_REG_CONFIG_1SHOT;
@@ -118,8 +98,7 @@ static void ds620_init_client(struct i2c_client *client)
new_conf |= DS620_REG_CONFIG_R1 | DS620_REG_CONFIG_R0;
if (conf != new_conf)
- i2c_smbus_write_word_data(client, DS620_REG_CONF,
- swab16(new_conf));
+ i2c_smbus_write_word_swapped(client, DS620_REG_CONF, new_conf);
/* start conversion */
i2c_smbus_write_byte(client, DS620_COM_START);
@@ -141,8 +120,8 @@ static struct ds620_data *ds620_update_client(struct device *dev)
dev_dbg(&client->dev, "Starting ds620 update\n");
for (i = 0; i < ARRAY_SIZE(data->temp); i++) {
- res = ds620_read_temp(client,
- DS620_REG_TEMP[i]);
+ res = i2c_smbus_read_word_swapped(client,
+ DS620_REG_TEMP[i]);
if (res < 0) {
ret = ERR_PTR(res);
goto abort;
@@ -191,8 +170,8 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *da,
mutex_lock(&data->update_lock);
data->temp[attr->index] = val;
- ds620_write_temp(client, DS620_REG_TEMP[attr->index],
- data->temp[attr->index]);
+ i2c_smbus_write_word_swapped(client, DS620_REG_TEMP[attr->index],
+ data->temp[attr->index]);
mutex_unlock(&data->update_lock);
return count;
}
@@ -210,16 +189,15 @@ static ssize_t show_alarm(struct device *dev, struct device_attribute *da,
return PTR_ERR(data);
/* reset alarms if necessary */
- res = i2c_smbus_read_word_data(client, DS620_REG_CONF);
+ res = i2c_smbus_read_word_swapped(client, DS620_REG_CONF);
if (res < 0)
return res;
- conf = swab16(res);
- new_conf = conf;
+ new_conf = conf = res;
new_conf &= ~attr->index;
if (conf != new_conf) {
- res = i2c_smbus_write_word_data(client, DS620_REG_CONF,
- swab16(new_conf));
+ res = i2c_smbus_write_word_swapped(client, DS620_REG_CONF,
+ new_conf);
if (res < 0)
return res;
}
diff --git a/drivers/hwmon/gl518sm.c b/drivers/hwmon/gl518sm.c
index e7ae5743e18..a13e2da97e3 100644
--- a/drivers/hwmon/gl518sm.c
+++ b/drivers/hwmon/gl518sm.c
@@ -591,7 +591,7 @@ static int gl518_remove(struct i2c_client *client)
static int gl518_read_value(struct i2c_client *client, u8 reg)
{
if ((reg >= 0x07) && (reg <= 0x0c))
- return swab16(i2c_smbus_read_word_data(client, reg));
+ return i2c_smbus_read_word_swapped(client, reg);
else
return i2c_smbus_read_byte_data(client, reg);
}
@@ -599,7 +599,7 @@ static int gl518_read_value(struct i2c_client *client, u8 reg)
static int gl518_write_value(struct i2c_client *client, u8 reg, u16 value)
{
if ((reg >= 0x07) && (reg <= 0x0c))
- return i2c_smbus_write_word_data(client, reg, swab16(value));
+ return i2c_smbus_write_word_swapped(client, reg, value);
else
return i2c_smbus_write_byte_data(client, reg, value);
}
diff --git a/drivers/hwmon/gl520sm.c b/drivers/hwmon/gl520sm.c
index 131ea8625f0..cd6085bbfba 100644
--- a/drivers/hwmon/gl520sm.c
+++ b/drivers/hwmon/gl520sm.c
@@ -821,7 +821,7 @@ static int gl520_remove(struct i2c_client *client)
static int gl520_read_value(struct i2c_client *client, u8 reg)
{
if ((reg >= 0x07) && (reg <= 0x0c))
- return swab16(i2c_smbus_read_word_data(client, reg));
+ return i2c_smbus_read_word_swapped(client, reg);
else
return i2c_smbus_read_byte_data(client, reg);
}
@@ -829,7 +829,7 @@ static int gl520_read_value(struct i2c_client *client, u8 reg)
static int gl520_write_value(struct i2c_client *client, u8 reg, u16 value)
{
if ((reg >= 0x07) && (reg <= 0x0c))
- return i2c_smbus_write_word_data(client, reg, swab16(value));
+ return i2c_smbus_write_word_swapped(client, reg, value);
else
return i2c_smbus_write_byte_data(client, reg, value);
}
diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
index a61e7815a2a..6460487e41b 100644
--- a/drivers/hwmon/hwmon.c
+++ b/drivers/hwmon/hwmon.c
@@ -27,8 +27,7 @@
static struct class *hwmon_class;
-static DEFINE_IDR(hwmon_idr);
-static DEFINE_SPINLOCK(idr_lock);
+static DEFINE_IDA(hwmon_ida);
/**
* hwmon_device_register - register w/ hwmon
@@ -42,30 +41,17 @@ static DEFINE_SPINLOCK(idr_lock);
struct device *hwmon_device_register(struct device *dev)
{
struct device *hwdev;
- int id, err;
-
-again:
- if (unlikely(idr_pre_get(&hwmon_idr, GFP_KERNEL) == 0))
- return ERR_PTR(-ENOMEM);
-
- spin_lock(&idr_lock);
- err = idr_get_new(&hwmon_idr, NULL, &id);
- spin_unlock(&idr_lock);
+ int id;
- if (unlikely(err == -EAGAIN))
- goto again;
- else if (unlikely(err))
- return ERR_PTR(err);
+ id = ida_simple_get(&hwmon_ida, 0, 0, GFP_KERNEL);
+ if (id < 0)
+ return ERR_PTR(id);
- id = id & MAX_ID_MASK;
hwdev = device_create(hwmon_class, dev, MKDEV(0, 0), NULL,
HWMON_ID_FORMAT, id);
- if (IS_ERR(hwdev)) {
- spin_lock(&idr_lock);
- idr_remove(&hwmon_idr, id);
- spin_unlock(&idr_lock);
- }
+ if (IS_ERR(hwdev))
+ ida_simple_remove(&hwmon_ida, id);
return hwdev;
}
@@ -81,9 +67,7 @@ void hwmon_device_unregister(struct device *dev)
if (likely(sscanf(dev_name(dev), HWMON_ID_FORMAT, &id) == 1)) {
device_unregister(dev);
- spin_lock(&idr_lock);
- idr_remove(&hwmon_idr, id);
- spin_unlock(&idr_lock);
+ ida_simple_remove(&hwmon_ida, id);
} else
dev_dbg(dev->parent,
"hwmon_device_unregister() failed: bad class ID!\n");
diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
index c316294c48b..6a967d7dbde 100644
--- a/drivers/hwmon/ibmaem.c
+++ b/drivers/hwmon/ibmaem.c
@@ -88,8 +88,7 @@
#define AEM_MIN_POWER_INTERVAL 200
#define UJ_PER_MJ 1000L
-static DEFINE_IDR(aem_idr);
-static DEFINE_SPINLOCK(aem_idr_lock);
+static DEFINE_IDA(aem_ida);
static struct platform_driver aem_driver = {
.driver = {
@@ -148,8 +147,9 @@ struct aem_data {
int id;
struct aem_ipmi_data ipmi;
- /* Function to update sensors */
+ /* Function and buffer to update sensors */
void (*update)(struct aem_data *data);
+ struct aem_read_sensor_resp *rs_resp;
/*
* AEM 1.x sensors:
@@ -246,8 +246,6 @@ static void aem_bmc_gone(int iface);
static void aem_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data);
static void aem_remove_sensors(struct aem_data *data);
-static int aem_init_aem1(struct aem_ipmi_data *probe);
-static int aem_init_aem2(struct aem_ipmi_data *probe);
static int aem1_find_sensors(struct aem_data *data);
static int aem2_find_sensors(struct aem_data *data);
static void update_aem1_sensors(struct aem_data *data);
@@ -356,47 +354,16 @@ static void aem_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data)
complete(&data->read_complete);
}
-/* ID functions */
-
-/* Obtain an id */
-static int aem_idr_get(int *id)
-{
- int i, err;
-
-again:
- if (unlikely(!idr_pre_get(&aem_idr, GFP_KERNEL)))
- return -ENOMEM;
-
- spin_lock(&aem_idr_lock);
- err = idr_get_new(&aem_idr, NULL, &i);
- spin_unlock(&aem_idr_lock);
-
- if (unlikely(err == -EAGAIN))
- goto again;
- else if (unlikely(err))
- return err;
-
- *id = i & MAX_ID_MASK;
- return 0;
-}
-
-/* Release an object ID */
-static void aem_idr_put(int id)
-{
- spin_lock(&aem_idr_lock);
- idr_remove(&aem_idr, id);
- spin_unlock(&aem_idr_lock);
-}
-
/* Sensor support functions */
-/* Read a sensor value */
+/* Read a sensor value; must be called with data->lock held */
static int aem_read_sensor(struct aem_data *data, u8 elt, u8 reg,
void *buf, size_t size)
{
int rs_size, res;
struct aem_read_sensor_req rs_req;
- struct aem_read_sensor_resp *rs_resp;
+ /* Use preallocated rx buffer */
+ struct aem_read_sensor_resp *rs_resp = data->rs_resp;
struct aem_ipmi_data *ipmi = &data->ipmi;
/* AEM registers are 1, 2, 4 or 8 bytes */
@@ -422,10 +389,6 @@ static int aem_read_sensor(struct aem_data *data, u8 elt, u8 reg,
ipmi->tx_message.data_len = sizeof(rs_req);
rs_size = sizeof(*rs_resp) + size;
- rs_resp = kzalloc(rs_size, GFP_KERNEL);
- if (!rs_resp)
- return -ENOMEM;
-
ipmi->rx_msg_data = rs_resp;
ipmi->rx_msg_len = rs_size;
@@ -468,7 +431,6 @@ static int aem_read_sensor(struct aem_data *data, u8 elt, u8 reg,
res = 0;
out:
- kfree(rs_resp);
return res;
}
@@ -526,11 +488,12 @@ static void aem_delete(struct aem_data *data)
{
list_del(&data->list);
aem_remove_sensors(data);
+ kfree(data->rs_resp);
hwmon_device_unregister(data->hwmon_dev);
ipmi_destroy_user(data->ipmi.user);
platform_set_drvdata(data->pdev, NULL);
platform_device_unregister(data->pdev);
- aem_idr_put(data->id);
+ ida_simple_remove(&aem_ida, data->id);
kfree(data);
}
@@ -587,7 +550,8 @@ static int aem_init_aem1_inst(struct aem_ipmi_data *probe, u8 module_handle)
data->power_period[i] = AEM_DEFAULT_POWER_INTERVAL;
/* Create sub-device for this fw instance */
- if (aem_idr_get(&data->id))
+ data->id = ida_simple_get(&aem_ida, 0, 0, GFP_KERNEL);
+ if (data->id < 0)
goto id_err;
data->pdev = platform_device_alloc(DRVNAME, data->id);
@@ -602,24 +566,31 @@ static int aem_init_aem1_inst(struct aem_ipmi_data *probe, u8 module_handle)
platform_set_drvdata(data->pdev, data);
/* Set up IPMI interface */
- if (aem_init_ipmi_data(&data->ipmi, probe->interface,
- probe->bmc_device))
+ res = aem_init_ipmi_data(&data->ipmi, probe->interface,
+ probe->bmc_device);
+ if (res)
goto ipmi_err;
/* Register with hwmon */
data->hwmon_dev = hwmon_device_register(&data->pdev->dev);
-
if (IS_ERR(data->hwmon_dev)) {
dev_err(&data->pdev->dev, "Unable to register hwmon "
"device for IPMI interface %d\n",
probe->interface);
+ res = PTR_ERR(data->hwmon_dev);
goto hwmon_reg_err;
}
data->update = update_aem1_sensors;
+ data->rs_resp = kzalloc(sizeof(*(data->rs_resp)) + 8, GFP_KERNEL);
+ if (!data->rs_resp) {
+ res = -ENOMEM;
+ goto alloc_resp_err;
+ }
/* Find sensors */
- if (aem1_find_sensors(data))
+ res = aem1_find_sensors(data);
+ if (res)
goto sensor_err;
/* Add to our list of AEM devices */
@@ -631,6 +602,8 @@ static int aem_init_aem1_inst(struct aem_ipmi_data *probe, u8 module_handle)
return 0;
sensor_err:
+ kfree(data->rs_resp);
+alloc_resp_err:
hwmon_device_unregister(data->hwmon_dev);
hwmon_reg_err:
ipmi_destroy_user(data->ipmi.user);
@@ -638,7 +611,7 @@ ipmi_err:
platform_set_drvdata(data->pdev, NULL);
platform_device_unregister(data->pdev);
dev_err:
- aem_idr_put(data->id);
+ ida_simple_remove(&aem_ida, data->id);
id_err:
kfree(data);
@@ -646,7 +619,7 @@ id_err:
}
/* Find and initialize all AEM1 instances */
-static int aem_init_aem1(struct aem_ipmi_data *probe)
+static void aem_init_aem1(struct aem_ipmi_data *probe)
{
int num, i, err;
@@ -657,11 +630,8 @@ static int aem_init_aem1(struct aem_ipmi_data *probe)
dev_err(probe->bmc_device,
"Error %d initializing AEM1 0x%X\n",
err, i);
- return err;
}
}
-
- return 0;
}
/* Probe functions for AEM2 devices */
@@ -720,7 +690,8 @@ static int aem_init_aem2_inst(struct aem_ipmi_data *probe,
data->power_period[i] = AEM_DEFAULT_POWER_INTERVAL;
/* Create sub-device for this fw instance */
- if (aem_idr_get(&data->id))
+ data->id = ida_simple_get(&aem_ida, 0, 0, GFP_KERNEL);
+ if (data->id < 0)
goto id_err;
data->pdev = platform_device_alloc(DRVNAME, data->id);
@@ -735,24 +706,31 @@ static int aem_init_aem2_inst(struct aem_ipmi_data *probe,
platform_set_drvdata(data->pdev, data);
/* Set up IPMI interface */
- if (aem_init_ipmi_data(&data->ipmi, probe->interface,
- probe->bmc_device))
+ res = aem_init_ipmi_data(&data->ipmi, probe->interface,
+ probe->bmc_device);
+ if (res)
goto ipmi_err;
/* Register with hwmon */
data->hwmon_dev = hwmon_device_register(&data->pdev->dev);
-
if (IS_ERR(data->hwmon_dev)) {
dev_err(&data->pdev->dev, "Unable to register hwmon "
"device for IPMI interface %d\n",
probe->interface);
+ res = PTR_ERR(data->hwmon_dev);
goto hwmon_reg_err;
}
data->update = update_aem2_sensors;
+ data->rs_resp = kzalloc(sizeof(*(data->rs_resp)) + 8, GFP_KERNEL);
+ if (!data->rs_resp) {
+ res = -ENOMEM;
+ goto alloc_resp_err;
+ }
/* Find sensors */
- if (aem2_find_sensors(data))
+ res = aem2_find_sensors(data);
+ if (res)
goto sensor_err;
/* Add to our list of AEM devices */
@@ -764,6 +742,8 @@ static int aem_init_aem2_inst(struct aem_ipmi_data *probe,
return 0;
sensor_err:
+ kfree(data->rs_resp);
+alloc_resp_err:
hwmon_device_unregister(data->hwmon_dev);
hwmon_reg_err:
ipmi_destroy_user(data->ipmi.user);
@@ -771,7 +751,7 @@ ipmi_err:
platform_set_drvdata(data->pdev, NULL);
platform_device_unregister(data->pdev);
dev_err:
- aem_idr_put(data->id);
+ ida_simple_remove(&aem_ida, data->id);
id_err:
kfree(data);
@@ -779,7 +759,7 @@ id_err:
}
/* Find and initialize all AEM2 instances */
-static int aem_init_aem2(struct aem_ipmi_data *probe)
+static void aem_init_aem2(struct aem_ipmi_data *probe)
{
struct aem_find_instance_resp fi_resp;
int err;
@@ -798,12 +778,9 @@ static int aem_init_aem2(struct aem_ipmi_data *probe)
dev_err(probe->bmc_device,
"Error %d initializing AEM2 0x%X\n",
err, fi_resp.module_handle);
- return err;
}
i++;
}
-
- return 0;
}
/* Probe a BMC for AEM firmware instances */
diff --git a/drivers/hwmon/jc42.c b/drivers/hwmon/jc42.c
index 02cebb74e20..2d3d72805ff 100644
--- a/drivers/hwmon/jc42.c
+++ b/drivers/hwmon/jc42.c
@@ -154,8 +154,6 @@ static int jc42_probe(struct i2c_client *client,
const struct i2c_device_id *id);
static int jc42_detect(struct i2c_client *client, struct i2c_board_info *info);
static int jc42_remove(struct i2c_client *client);
-static int jc42_read_value(struct i2c_client *client, u8 reg);
-static int jc42_write_value(struct i2c_client *client, u8 reg, u16 value);
static struct jc42_data *jc42_update_device(struct device *dev);
@@ -187,7 +185,7 @@ static int jc42_suspend(struct device *dev)
struct jc42_data *data = i2c_get_clientdata(client);
data->config |= JC42_CFG_SHUTDOWN;
- jc42_write_value(client, JC42_REG_CONFIG, data->config);
+ i2c_smbus_write_word_swapped(client, JC42_REG_CONFIG, data->config);
return 0;
}
@@ -197,7 +195,7 @@ static int jc42_resume(struct device *dev)
struct jc42_data *data = i2c_get_clientdata(client);
data->config &= ~JC42_CFG_SHUTDOWN;
- jc42_write_value(client, JC42_REG_CONFIG, data->config);
+ i2c_smbus_write_word_swapped(client, JC42_REG_CONFIG, data->config);
return 0;
}
@@ -315,7 +313,7 @@ static ssize_t set_##value(struct device *dev, \
return -EINVAL; \
mutex_lock(&data->update_lock); \
data->value = jc42_temp_to_reg(val, data->extended); \
- err = jc42_write_value(client, reg, data->value); \
+ err = i2c_smbus_write_word_swapped(client, reg, data->value); \
if (err < 0) \
ret = err; \
mutex_unlock(&data->update_lock); \
@@ -357,7 +355,8 @@ static ssize_t set_temp_crit_hyst(struct device *dev,
data->config = (data->config
& ~(JC42_CFG_HYST_MASK << JC42_CFG_HYST_SHIFT))
| (hyst << JC42_CFG_HYST_SHIFT);
- err = jc42_write_value(client, JC42_REG_CONFIG, data->config);
+ err = i2c_smbus_write_word_swapped(client, JC42_REG_CONFIG,
+ data->config);
if (err < 0)
ret = err;
mutex_unlock(&data->update_lock);
@@ -452,10 +451,10 @@ static int jc42_detect(struct i2c_client *new_client,
I2C_FUNC_SMBUS_WORD_DATA))
return -ENODEV;
- cap = jc42_read_value(new_client, JC42_REG_CAP);
- config = jc42_read_value(new_client, JC42_REG_CONFIG);
- manid = jc42_read_value(new_client, JC42_REG_MANID);
- devid = jc42_read_value(new_client, JC42_REG_DEVICEID);
+ cap = i2c_smbus_read_word_swapped(new_client, JC42_REG_CAP);
+ config = i2c_smbus_read_word_swapped(new_client, JC42_REG_CONFIG);
+ manid = i2c_smbus_read_word_swapped(new_client, JC42_REG_MANID);
+ devid = i2c_smbus_read_word_swapped(new_client, JC42_REG_DEVICEID);
if (cap < 0 || config < 0 || manid < 0 || devid < 0)
return -ENODEV;
@@ -489,14 +488,14 @@ static int jc42_probe(struct i2c_client *new_client,
i2c_set_clientdata(new_client, data);
mutex_init(&data->update_lock);
- cap = jc42_read_value(new_client, JC42_REG_CAP);
+ cap = i2c_smbus_read_word_swapped(new_client, JC42_REG_CAP);
if (cap < 0) {
err = -EINVAL;
goto exit_free;
}
data->extended = !!(cap & JC42_CAP_RANGE);
- config = jc42_read_value(new_client, JC42_REG_CONFIG);
+ config = i2c_smbus_read_word_swapped(new_client, JC42_REG_CONFIG);
if (config < 0) {
err = -EINVAL;
goto exit_free;
@@ -504,7 +503,8 @@ static int jc42_probe(struct i2c_client *new_client,
data->orig_config = config;
if (config & JC42_CFG_SHUTDOWN) {
config &= ~JC42_CFG_SHUTDOWN;
- jc42_write_value(new_client, JC42_REG_CONFIG, config);
+ i2c_smbus_write_word_swapped(new_client, JC42_REG_CONFIG,
+ config);
}
data->config = config;
@@ -535,25 +535,12 @@ static int jc42_remove(struct i2c_client *client)
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&client->dev.kobj, &jc42_group);
if (data->config != data->orig_config)
- jc42_write_value(client, JC42_REG_CONFIG, data->orig_config);
+ i2c_smbus_write_word_swapped(client, JC42_REG_CONFIG,
+ data->orig_config);
kfree(data);
return 0;
}
-/* All registers are word-sized. */
-static int jc42_read_value(struct i2c_client *client, u8 reg)
-{
- int ret = i2c_smbus_read_word_data(client, reg);
- if (ret < 0)
- return ret;
- return swab16(ret);
-}
-
-static int jc42_write_value(struct i2c_client *client, u8 reg, u16 value)
-{
- return i2c_smbus_write_word_data(client, reg, swab16(value));
-}
-
static struct jc42_data *jc42_update_device(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
@@ -564,28 +551,29 @@ static struct jc42_data *jc42_update_device(struct device *dev)
mutex_lock(&data->update_lock);
if (time_after(jiffies, data->last_updated + HZ) || !data->valid) {
- val = jc42_read_value(client, JC42_REG_TEMP);
+ val = i2c_smbus_read_word_swapped(client, JC42_REG_TEMP);
if (val < 0) {
ret = ERR_PTR(val);
goto abort;
}
data->temp_input = val;
- val = jc42_read_value(client, JC42_REG_TEMP_CRITICAL);
+ val = i2c_smbus_read_word_swapped(client,
+ JC42_REG_TEMP_CRITICAL);
if (val < 0) {
ret = ERR_PTR(val);
goto abort;
}
data->temp_crit = val;
- val = jc42_read_value(client, JC42_REG_TEMP_LOWER);
+ val = i2c_smbus_read_word_swapped(client, JC42_REG_TEMP_LOWER);
if (val < 0) {
ret = ERR_PTR(val);
goto abort;
}
data->temp_min = val;
- val = jc42_read_value(client, JC42_REG_TEMP_UPPER);
+ val = i2c_smbus_read_word_swapped(client, JC42_REG_TEMP_UPPER);
if (val < 0) {
ret = ERR_PTR(val);
goto abort;
diff --git a/drivers/hwmon/lm73.c b/drivers/hwmon/lm73.c
index 29b9030d42c..9e64d96620d 100644
--- a/drivers/hwmon/lm73.c
+++ b/drivers/hwmon/lm73.c
@@ -34,7 +34,7 @@ static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4c,
#define LM73_REG_CTRL 0x04
#define LM73_REG_ID 0x07
-#define LM73_ID 0x9001 /* or 0x190 after a swab16() */
+#define LM73_ID 0x9001 /* 0x0190, byte-swapped */
#define DRVNAME "lm73"
#define LM73_TEMP_MIN (-40)
#define LM73_TEMP_MAX 150
@@ -57,7 +57,7 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *da,
/* Write value */
value = (short) SENSORS_LIMIT(temp/250, (LM73_TEMP_MIN*4),
(LM73_TEMP_MAX*4)) << 5;
- i2c_smbus_write_word_data(client, attr->index, swab16(value));
+ i2c_smbus_write_word_swapped(client, attr->index, value);
return count;
}
@@ -68,8 +68,8 @@ static ssize_t show_temp(struct device *dev, struct device_attribute *da,
struct i2c_client *client = to_i2c_client(dev);
/* use integer division instead of equivalent right shift to
guarantee arithmetic shift and preserve the sign */
- int temp = ((s16) (swab16(i2c_smbus_read_word_data(client,
- attr->index)))*250) / 32;
+ int temp = ((s16) (i2c_smbus_read_word_swapped(client,
+ attr->index))*250) / 32;
return sprintf(buf, "%d\n", temp);
}
@@ -150,17 +150,31 @@ static int lm73_detect(struct i2c_client *new_client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = new_client->adapter;
- u16 id;
- u8 ctrl;
+ int id, ctrl, conf;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA |
I2C_FUNC_SMBUS_WORD_DATA))
return -ENODEV;
+ /*
+ * Do as much detection as possible with byte reads first, as word
+ * reads can confuse other devices.
+ */
+ ctrl = i2c_smbus_read_byte_data(new_client, LM73_REG_CTRL);
+ if (ctrl < 0 || (ctrl & 0x10))
+ return -ENODEV;
+
+ conf = i2c_smbus_read_byte_data(new_client, LM73_REG_CONF);
+ if (conf < 0 || (conf & 0x0c))
+ return -ENODEV;
+
+ id = i2c_smbus_read_byte_data(new_client, LM73_REG_ID);
+ if (id < 0 || id != (LM73_ID & 0xff))
+ return -ENODEV;
+
/* Check device ID */
id = i2c_smbus_read_word_data(new_client, LM73_REG_ID);
- ctrl = i2c_smbus_read_byte_data(new_client, LM73_REG_CTRL);
- if ((id != LM73_ID) || (ctrl & 0x10))
+ if (id < 0 || id != LM73_ID)
return -ENODEV;
strlcpy(info->type, "lm73", I2C_NAME_SIZE);
diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c
index 90126a2a1e4..1888dd0fc05 100644
--- a/drivers/hwmon/lm75.c
+++ b/drivers/hwmon/lm75.c
@@ -384,13 +384,10 @@ static struct i2c_driver lm75_driver = {
*/
static int lm75_read_value(struct i2c_client *client, u8 reg)
{
- int value;
-
if (reg == LM75_REG_CONF)
return i2c_smbus_read_byte_data(client, reg);
-
- value = i2c_smbus_read_word_data(client, reg);
- return (value < 0) ? value : swab16(value);
+ else
+ return i2c_smbus_read_word_swapped(client, reg);
}
static int lm75_write_value(struct i2c_client *client, u8 reg, u16 value)
@@ -398,7 +395,7 @@ static int lm75_write_value(struct i2c_client *client, u8 reg, u16 value)
if (reg == LM75_REG_CONF)
return i2c_smbus_write_byte_data(client, reg, value);
else
- return i2c_smbus_write_word_data(client, reg, swab16(value));
+ return i2c_smbus_write_word_swapped(client, reg, value);
}
static struct lm75_data *lm75_update_device(struct device *dev)
diff --git a/drivers/hwmon/lm77.c b/drivers/hwmon/lm77.c
index b28a297be50..8dfc6782d59 100644
--- a/drivers/hwmon/lm77.c
+++ b/drivers/hwmon/lm77.c
@@ -365,7 +365,7 @@ static u16 lm77_read_value(struct i2c_client *client, u8 reg)
if (reg == LM77_REG_CONF)
return i2c_smbus_read_byte_data(client, reg);
else
- return swab16(i2c_smbus_read_word_data(client, reg));
+ return i2c_smbus_read_word_swapped(client, reg);
}
static int lm77_write_value(struct i2c_client *client, u8 reg, u16 value)
@@ -373,7 +373,7 @@ static int lm77_write_value(struct i2c_client *client, u8 reg, u16 value)
if (reg == LM77_REG_CONF)
return i2c_smbus_write_byte_data(client, reg, value);
else
- return i2c_smbus_write_word_data(client, reg, swab16(value));
+ return i2c_smbus_write_word_swapped(client, reg, value);
}
static void lm77_init_client(struct i2c_client *client)
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index 90ddb877421..615bc4f4e53 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -1105,41 +1105,37 @@ static DEVICE_ATTR(pec, S_IWUSR | S_IRUGO, show_pec, set_pec);
*/
/* Return 0 if detection is successful, -ENODEV otherwise */
-static int lm90_detect(struct i2c_client *new_client,
+static int lm90_detect(struct i2c_client *client,
struct i2c_board_info *info)
{
- struct i2c_adapter *adapter = new_client->adapter;
- int address = new_client->addr;
+ struct i2c_adapter *adapter = client->adapter;
+ int address = client->addr;
const char *name = NULL;
- int man_id, chip_id, reg_config1, reg_config2, reg_convrate;
+ int man_id, chip_id, config1, config2, convrate;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return -ENODEV;
/* detection and identification */
- if ((man_id = i2c_smbus_read_byte_data(new_client,
- LM90_REG_R_MAN_ID)) < 0
- || (chip_id = i2c_smbus_read_byte_data(new_client,
- LM90_REG_R_CHIP_ID)) < 0
- || (reg_config1 = i2c_smbus_read_byte_data(new_client,
- LM90_REG_R_CONFIG1)) < 0
- || (reg_convrate = i2c_smbus_read_byte_data(new_client,
- LM90_REG_R_CONVRATE)) < 0)
+ man_id = i2c_smbus_read_byte_data(client, LM90_REG_R_MAN_ID);
+ chip_id = i2c_smbus_read_byte_data(client, LM90_REG_R_CHIP_ID);
+ config1 = i2c_smbus_read_byte_data(client, LM90_REG_R_CONFIG1);
+ convrate = i2c_smbus_read_byte_data(client, LM90_REG_R_CONVRATE);
+ if (man_id < 0 || chip_id < 0 || config1 < 0 || convrate < 0)
return -ENODEV;
if (man_id == 0x01 || man_id == 0x5C || man_id == 0x41) {
- reg_config2 = i2c_smbus_read_byte_data(new_client,
- LM90_REG_R_CONFIG2);
- if (reg_config2 < 0)
+ config2 = i2c_smbus_read_byte_data(client, LM90_REG_R_CONFIG2);
+ if (config2 < 0)
return -ENODEV;
} else
- reg_config2 = 0; /* Make compiler happy */
+ config2 = 0; /* Make compiler happy */
if ((address == 0x4C || address == 0x4D)
&& man_id == 0x01) { /* National Semiconductor */
- if ((reg_config1 & 0x2A) == 0x00
- && (reg_config2 & 0xF8) == 0x00
- && reg_convrate <= 0x09) {
+ if ((config1 & 0x2A) == 0x00
+ && (config2 & 0xF8) == 0x00
+ && convrate <= 0x09) {
if (address == 0x4C
&& (chip_id & 0xF0) == 0x20) { /* LM90 */
name = "lm90";
@@ -1163,8 +1159,8 @@ static int lm90_detect(struct i2c_client *new_client,
if ((address == 0x4C || address == 0x4D)
&& man_id == 0x41) { /* Analog Devices */
if ((chip_id & 0xF0) == 0x40 /* ADM1032 */
- && (reg_config1 & 0x3F) == 0x00
- && reg_convrate <= 0x0A) {
+ && (config1 & 0x3F) == 0x00
+ && convrate <= 0x0A) {
name = "adm1032";
/* The ADM1032 supports PEC, but only if combined
transactions are not used. */
@@ -1173,18 +1169,18 @@ static int lm90_detect(struct i2c_client *new_client,
info->flags |= I2C_CLIENT_PEC;
} else
if (chip_id == 0x51 /* ADT7461 */
- && (reg_config1 & 0x1B) == 0x00
- && reg_convrate <= 0x0A) {
+ && (config1 & 0x1B) == 0x00
+ && convrate <= 0x0A) {
name = "adt7461";
} else
if (chip_id == 0x57 /* ADT7461A, NCT1008 */
- && (reg_config1 & 0x1B) == 0x00
- && reg_convrate <= 0x0A) {
+ && (config1 & 0x1B) == 0x00
+ && convrate <= 0x0A) {
name = "adt7461a";
}
} else
if (man_id == 0x4D) { /* Maxim */
- int reg_emerg, reg_emerg2, reg_status2;
+ int emerg, emerg2, status2;
/*
* We read MAX6659_REG_R_REMOTE_EMERG twice, and re-read
@@ -1192,13 +1188,15 @@ static int lm90_detect(struct i2c_client *new_client,
* exists, both readings will reflect the same value. Otherwise,
* the readings will be different.
*/
- if ((reg_emerg = i2c_smbus_read_byte_data(new_client,
- MAX6659_REG_R_REMOTE_EMERG)) < 0
- || i2c_smbus_read_byte_data(new_client, LM90_REG_R_MAN_ID) < 0
- || (reg_emerg2 = i2c_smbus_read_byte_data(new_client,
- MAX6659_REG_R_REMOTE_EMERG)) < 0
- || (reg_status2 = i2c_smbus_read_byte_data(new_client,
- MAX6696_REG_R_STATUS2)) < 0)
+ emerg = i2c_smbus_read_byte_data(client,
+ MAX6659_REG_R_REMOTE_EMERG);
+ man_id = i2c_smbus_read_byte_data(client,
+ LM90_REG_R_MAN_ID);
+ emerg2 = i2c_smbus_read_byte_data(client,
+ MAX6659_REG_R_REMOTE_EMERG);
+ status2 = i2c_smbus_read_byte_data(client,
+ MAX6696_REG_R_STATUS2);
+ if (emerg < 0 || man_id < 0 || emerg2 < 0 || status2 < 0)
return -ENODEV;
/*
@@ -1216,8 +1214,8 @@ static int lm90_detect(struct i2c_client *new_client,
*/
if (chip_id == man_id
&& (address == 0x4C || address == 0x4D || address == 0x4E)
- && (reg_config1 & 0x1F) == (man_id & 0x0F)
- && reg_convrate <= 0x09) {
+ && (config1 & 0x1F) == (man_id & 0x0F)
+ && convrate <= 0x09) {
if (address == 0x4C)
name = "max6657";
else
@@ -1235,10 +1233,10 @@ static int lm90_detect(struct i2c_client *new_client,
* one of those registers exists.
*/
if (chip_id == 0x01
- && (reg_config1 & 0x10) == 0x00
- && (reg_status2 & 0x01) == 0x00
- && reg_emerg == reg_emerg2
- && reg_convrate <= 0x07) {
+ && (config1 & 0x10) == 0x00
+ && (status2 & 0x01) == 0x00
+ && emerg == emerg2
+ && convrate <= 0x07) {
name = "max6696";
} else
/*
@@ -1248,8 +1246,8 @@ static int lm90_detect(struct i2c_client *new_client,
* second to last bit of config1 (software reset).
*/
if (chip_id == 0x01
- && (reg_config1 & 0x03) == 0x00
- && reg_convrate <= 0x07) {
+ && (config1 & 0x03) == 0x00
+ && convrate <= 0x07) {
name = "max6680";
} else
/*
@@ -1258,21 +1256,21 @@ static int lm90_detect(struct i2c_client *new_client,
* register are unused and should return zero when read.
*/
if (chip_id == 0x59
- && (reg_config1 & 0x3f) == 0x00
- && reg_convrate <= 0x07) {
+ && (config1 & 0x3f) == 0x00
+ && convrate <= 0x07) {
name = "max6646";
}
} else
if (address == 0x4C
&& man_id == 0x5C) { /* Winbond/Nuvoton */
- if ((reg_config1 & 0x2A) == 0x00
- && (reg_config2 & 0xF8) == 0x00) {
+ if ((config1 & 0x2A) == 0x00
+ && (config2 & 0xF8) == 0x00) {
if (chip_id == 0x01 /* W83L771W/G */
- && reg_convrate <= 0x09) {
+ && convrate <= 0x09) {
name = "w83l771";
} else
if ((chip_id & 0xFE) == 0x10 /* W83L771AWG/ASG */
- && reg_convrate <= 0x08) {
+ && convrate <= 0x08) {
name = "w83l771";
}
}
@@ -1280,9 +1278,9 @@ static int lm90_detect(struct i2c_client *new_client,
if (address >= 0x48 && address <= 0x4F
&& man_id == 0xA1) { /* NXP Semiconductor/Philips */
if (chip_id == 0x00
- && (reg_config1 & 0x2A) == 0x00
- && (reg_config2 & 0xFE) == 0x00
- && reg_convrate <= 0x09) {
+ && (config1 & 0x2A) == 0x00
+ && (config2 & 0xFE) == 0x00
+ && convrate <= 0x09) {
name = "sa56004";
}
}
@@ -1301,19 +1299,18 @@ static int lm90_detect(struct i2c_client *new_client,
static void lm90_remove_files(struct i2c_client *client, struct lm90_data *data)
{
+ struct device *dev = &client->dev;
+
if (data->flags & LM90_HAVE_TEMP3)
- sysfs_remove_group(&client->dev.kobj, &lm90_temp3_group);
+ sysfs_remove_group(&dev->kobj, &lm90_temp3_group);
if (data->flags & LM90_HAVE_EMERGENCY_ALARM)
- sysfs_remove_group(&client->dev.kobj,
- &lm90_emergency_alarm_group);
+ sysfs_remove_group(&dev->kobj, &lm90_emergency_alarm_group);
if (data->flags & LM90_HAVE_EMERGENCY)
- sysfs_remove_group(&client->dev.kobj,
- &lm90_emergency_group);
+ sysfs_remove_group(&dev->kobj, &lm90_emergency_group);
if (data->flags & LM90_HAVE_OFFSET)
- device_remove_file(&client->dev,
- &sensor_dev_attr_temp2_offset.dev_attr);
- device_remove_file(&client->dev, &dev_attr_pec);
- sysfs_remove_group(&client->dev.kobj, &lm90_group);
+ device_remove_file(dev, &sensor_dev_attr_temp2_offset.dev_attr);
+ device_remove_file(dev, &dev_attr_pec);
+ sysfs_remove_group(&dev->kobj, &lm90_group);
}
static void lm90_init_client(struct i2c_client *client)
@@ -1362,10 +1359,11 @@ static void lm90_init_client(struct i2c_client *client)
i2c_smbus_write_byte_data(client, LM90_REG_W_CONFIG1, config);
}
-static int lm90_probe(struct i2c_client *new_client,
+static int lm90_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- struct i2c_adapter *adapter = to_i2c_adapter(new_client->dev.parent);
+ struct device *dev = &client->dev;
+ struct i2c_adapter *adapter = to_i2c_adapter(dev->parent);
struct lm90_data *data;
int err;
@@ -1374,14 +1372,14 @@ static int lm90_probe(struct i2c_client *new_client,
err = -ENOMEM;
goto exit;
}
- i2c_set_clientdata(new_client, data);
+ i2c_set_clientdata(client, data);
mutex_init(&data->update_lock);
/* Set the device type */
data->kind = id->driver_data;
if (data->kind == adm1032) {
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE))
- new_client->flags &= ~I2C_CLIENT_PEC;
+ client->flags &= ~I2C_CLIENT_PEC;
}
/* Different devices have different alarm bits triggering the
@@ -1396,43 +1394,41 @@ static int lm90_probe(struct i2c_client *new_client,
data->max_convrate = lm90_params[data->kind].max_convrate;
/* Initialize the LM90 chip */
- lm90_init_client(new_client);
+ lm90_init_client(client);
/* Register sysfs hooks */
- err = sysfs_create_group(&new_client->dev.kobj, &lm90_group);
+ err = sysfs_create_group(&dev->kobj, &lm90_group);
if (err)
goto exit_free;
- if (new_client->flags & I2C_CLIENT_PEC) {
- err = device_create_file(&new_client->dev, &dev_attr_pec);
+ if (client->flags & I2C_CLIENT_PEC) {
+ err = device_create_file(dev, &dev_attr_pec);
if (err)
goto exit_remove_files;
}
if (data->flags & LM90_HAVE_OFFSET) {
- err = device_create_file(&new_client->dev,
+ err = device_create_file(dev,
&sensor_dev_attr_temp2_offset.dev_attr);
if (err)
goto exit_remove_files;
}
if (data->flags & LM90_HAVE_EMERGENCY) {
- err = sysfs_create_group(&new_client->dev.kobj,
- &lm90_emergency_group);
+ err = sysfs_create_group(&dev->kobj, &lm90_emergency_group);
if (err)
goto exit_remove_files;
}
if (data->flags & LM90_HAVE_EMERGENCY_ALARM) {
- err = sysfs_create_group(&new_client->dev.kobj,
+ err = sysfs_create_group(&dev->kobj,
&lm90_emergency_alarm_group);
if (err)
goto exit_remove_files;
}
if (data->flags & LM90_HAVE_TEMP3) {
- err = sysfs_create_group(&new_client->dev.kobj,
- &lm90_temp3_group);
+ err = sysfs_create_group(&dev->kobj, &lm90_temp3_group);
if (err)
goto exit_remove_files;
}
- data->hwmon_dev = hwmon_device_register(&new_client->dev);
+ data->hwmon_dev = hwmon_device_register(dev);
if (IS_ERR(data->hwmon_dev)) {
err = PTR_ERR(data->hwmon_dev);
goto exit_remove_files;
@@ -1441,7 +1437,7 @@ static int lm90_probe(struct i2c_client *new_client,
return 0;
exit_remove_files:
- lm90_remove_files(new_client, data);
+ lm90_remove_files(client, data);
exit_free:
kfree(data);
exit:
diff --git a/drivers/hwmon/lm92.c b/drivers/hwmon/lm92.c
index 7c31e6205f8..8fcbd4d422c 100644
--- a/drivers/hwmon/lm92.c
+++ b/drivers/hwmon/lm92.c
@@ -117,16 +117,16 @@ static struct lm92_data *lm92_update_device(struct device *dev)
if (time_after(jiffies, data->last_updated + HZ)
|| !data->valid) {
dev_dbg(&client->dev, "Updating lm92 data\n");
- data->temp1_input = swab16(i2c_smbus_read_word_data(client,
- LM92_REG_TEMP));
- data->temp1_hyst = swab16(i2c_smbus_read_word_data(client,
- LM92_REG_TEMP_HYST));
- data->temp1_crit = swab16(i2c_smbus_read_word_data(client,
- LM92_REG_TEMP_CRIT));
- data->temp1_min = swab16(i2c_smbus_read_word_data(client,
- LM92_REG_TEMP_LOW));
- data->temp1_max = swab16(i2c_smbus_read_word_data(client,
- LM92_REG_TEMP_HIGH));
+ data->temp1_input = i2c_smbus_read_word_swapped(client,
+ LM92_REG_TEMP);
+ data->temp1_hyst = i2c_smbus_read_word_swapped(client,
+ LM92_REG_TEMP_HYST);
+ data->temp1_crit = i2c_smbus_read_word_swapped(client,
+ LM92_REG_TEMP_CRIT);
+ data->temp1_min = i2c_smbus_read_word_swapped(client,
+ LM92_REG_TEMP_LOW);
+ data->temp1_max = i2c_smbus_read_word_swapped(client,
+ LM92_REG_TEMP_HIGH);
data->last_updated = jiffies;
data->valid = 1;
@@ -158,7 +158,7 @@ static ssize_t set_##value(struct device *dev, struct device_attribute *attr, co
\
mutex_lock(&data->update_lock); \
data->value = TEMP_TO_REG(val); \
- i2c_smbus_write_word_data(client, reg, swab16(data->value)); \
+ i2c_smbus_write_word_swapped(client, reg, data->value); \
mutex_unlock(&data->update_lock); \
return count; \
}
@@ -194,8 +194,8 @@ static ssize_t set_temp1_crit_hyst(struct device *dev, struct device_attribute *
mutex_lock(&data->update_lock);
data->temp1_hyst = TEMP_FROM_REG(data->temp1_crit) - val;
- i2c_smbus_write_word_data(client, LM92_REG_TEMP_HYST,
- swab16(TEMP_TO_REG(data->temp1_hyst)));
+ i2c_smbus_write_word_swapped(client, LM92_REG_TEMP_HYST,
+ TEMP_TO_REG(data->temp1_hyst));
mutex_unlock(&data->update_lock);
return count;
}
diff --git a/drivers/hwmon/max16065.c b/drivers/hwmon/max16065.c
index dd2d7b9620c..385886a4f22 100644
--- a/drivers/hwmon/max16065.c
+++ b/drivers/hwmon/max16065.c
@@ -137,10 +137,10 @@ static int max16065_read_adc(struct i2c_client *client, int reg)
{
int rv;
- rv = i2c_smbus_read_word_data(client, reg);
+ rv = i2c_smbus_read_word_swapped(client, reg);
if (unlikely(rv < 0))
return rv;
- return ((rv & 0xff) << 2) | ((rv >> 14) & 0x03);
+ return rv >> 6;
}
static struct max16065_data *max16065_update_device(struct device *dev)
diff --git a/drivers/hwmon/mc13783-adc.c b/drivers/hwmon/mc13783-adc.c
index d5226c9e120..ef65ab56b09 100644
--- a/drivers/hwmon/mc13783-adc.c
+++ b/drivers/hwmon/mc13783-adc.c
@@ -31,7 +31,7 @@
#define MC13783_ADC_NAME "mc13783-adc"
struct mc13783_adc_priv {
- struct mc13783 *mc13783;
+ struct mc13xxx *mc13xxx;
struct device *hwmon_dev;
};
@@ -51,8 +51,8 @@ static int mc13783_adc_read(struct device *dev,
unsigned int sample[4];
int ret;
- ret = mc13783_adc_do_conversion(priv->mc13783,
- MC13783_ADC_MODE_MULT_CHAN,
+ ret = mc13xxx_adc_do_conversion(priv->mc13xxx,
+ MC13XXX_ADC_MODE_MULT_CHAN,
channel, sample);
if (ret)
return ret;
@@ -147,9 +147,9 @@ static const struct attribute_group mc13783_group_ts = {
static int mc13783_adc_use_touchscreen(struct platform_device *pdev)
{
struct mc13783_adc_priv *priv = platform_get_drvdata(pdev);
- unsigned flags = mc13783_get_flags(priv->mc13783);
+ unsigned flags = mc13xxx_get_flags(priv->mc13xxx);
- return flags & MC13783_USE_TOUCHSCREEN;
+ return flags & MC13XXX_USE_TOUCHSCREEN;
}
static int __init mc13783_adc_probe(struct platform_device *pdev)
@@ -161,7 +161,7 @@ static int __init mc13783_adc_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- priv->mc13783 = dev_get_drvdata(pdev->dev.parent);
+ priv->mc13xxx = dev_get_drvdata(pdev->dev.parent);
platform_set_drvdata(pdev, priv);
diff --git a/drivers/hwmon/sht21.c b/drivers/hwmon/sht21.c
index 1c8c9812f24..15398780cc0 100644
--- a/drivers/hwmon/sht21.c
+++ b/drivers/hwmon/sht21.c
@@ -83,25 +83,6 @@ static inline int sht21_rh_ticks_to_per_cent_mille(int ticks)
}
/**
- * sht21_read_word_data() - read word from register
- * @client: I2C client device
- * @reg: I2C command byte
- *
- * Returns value, negative errno on error.
- */
-static inline int sht21_read_word_data(struct i2c_client *client, u8 reg)
-{
- int ret = i2c_smbus_read_word_data(client, reg);
- if (ret < 0)
- return ret;
- /*
- * SMBus specifies low byte first, but the SHT21 returns MSB
- * first, so we have to swab16 the values
- */
- return swab16(ret);
-}
-
-/**
* sht21_update_measurements() - get updated measurements from device
* @client: I2C client device
*
@@ -119,12 +100,13 @@ static int sht21_update_measurements(struct i2c_client *client)
* maximum two measurements per second at 12bit accuracy shall be made.
*/
if (time_after(jiffies, sht21->last_update + HZ / 2) || !sht21->valid) {
- ret = sht21_read_word_data(client, SHT21_TRIG_T_MEASUREMENT_HM);
+ ret = i2c_smbus_read_word_swapped(client,
+ SHT21_TRIG_T_MEASUREMENT_HM);
if (ret < 0)
goto out;
sht21->temperature = sht21_temp_ticks_to_millicelsius(ret);
- ret = sht21_read_word_data(client,
- SHT21_TRIG_RH_MEASUREMENT_HM);
+ ret = i2c_smbus_read_word_swapped(client,
+ SHT21_TRIG_RH_MEASUREMENT_HM);
if (ret < 0)
goto out;
sht21->humidity = sht21_rh_ticks_to_per_cent_mille(ret);
diff --git a/drivers/hwmon/smm665.c b/drivers/hwmon/smm665.c
index 425df5bccd4..411638181fd 100644
--- a/drivers/hwmon/smm665.c
+++ b/drivers/hwmon/smm665.c
@@ -214,33 +214,26 @@ static int smm665_read_adc(struct smm665_data *data, int adc)
*
* Neither i2c_smbus_read_byte() nor
* i2c_smbus_read_block_data() worked here,
- * so use i2c_smbus_read_word_data() instead.
+ * so use i2c_smbus_read_word_swapped() instead.
* We could also try to use i2c_master_recv(),
* but that is not always supported.
*/
- rv = i2c_smbus_read_word_data(client, 0);
+ rv = i2c_smbus_read_word_swapped(client, 0);
if (rv < 0) {
dev_dbg(&client->dev, "Failed to read ADC value: error %d", rv);
return -1;
}
/*
* Validate/verify readback adc channel (in bit 11..14).
- * High byte is in lower 8 bit of rv, so only shift by 3.
*/
- radc = (rv >> 3) & 0x0f;
+ radc = (rv >> 11) & 0x0f;
if (radc != adc) {
dev_dbg(&client->dev, "Unexpected RADC: Expected %d got %d",
adc, radc);
return -EIO;
}
- /*
- * Chip replies with H/L, while SMBus expects L/H.
- * Thus, byte order is reversed, and we have to swap
- * the result.
- */
- rv = swab16(rv) & SMM665_ADC_MASK;
- return rv;
+ return rv & SMM665_ADC_MASK;
}
static struct smm665_data *smm665_update_device(struct device *dev)
diff --git a/drivers/hwmon/smsc47b397.c b/drivers/hwmon/smsc47b397.c
index 9fb7516e6f4..65c88ff5645 100644
--- a/drivers/hwmon/smsc47b397.c
+++ b/drivers/hwmon/smsc47b397.c
@@ -113,7 +113,7 @@ struct smsc47b397_data {
u8 temp[4];
};
-static int smsc47b397_read_value(struct smsc47b397_data* data, u8 reg)
+static int smsc47b397_read_value(struct smsc47b397_data *data, u8 reg)
{
int res;
@@ -265,7 +265,8 @@ static int __devinit smsc47b397_probe(struct platform_device *pdev)
return -EBUSY;
}
- if (!(data = kzalloc(sizeof(struct smsc47b397_data), GFP_KERNEL))) {
+ data = kzalloc(sizeof(struct smsc47b397_data), GFP_KERNEL);
+ if (!data) {
err = -ENOMEM;
goto error_release;
}
@@ -276,7 +277,8 @@ static int __devinit smsc47b397_probe(struct platform_device *pdev)
mutex_init(&data->update_lock);
platform_set_drvdata(pdev, data);
- if ((err = sysfs_create_group(&dev->kobj, &smsc47b397_group)))
+ err = sysfs_create_group(&dev->kobj, &smsc47b397_group);
+ if (err)
goto error_free;
data->hwmon_dev = hwmon_device_register(dev);
@@ -345,7 +347,7 @@ static int __init smsc47b397_find(unsigned short *addr)
superio_enter();
id = force_id ? force_id : superio_inb(SUPERIO_REG_DEVID);
- switch(id) {
+ switch (id) {
case 0x81:
name = "SCH5307-NS";
break;
@@ -379,7 +381,8 @@ static int __init smsc47b397_init(void)
unsigned short address;
int ret;
- if ((ret = smsc47b397_find(&address)))
+ ret = smsc47b397_find(&address);
+ if (ret)
return ret;
ret = platform_driver_register(&smsc47b397_driver);
diff --git a/drivers/hwmon/tmp102.c b/drivers/hwmon/tmp102.c
index 5bd19496880..643aa8c9453 100644
--- a/drivers/hwmon/tmp102.c
+++ b/drivers/hwmon/tmp102.c
@@ -55,19 +55,6 @@ struct tmp102 {
int temp[3];
};
-/* SMBus specifies low byte first, but the TMP102 returns high byte first,
- * so we have to swab16 the values */
-static inline int tmp102_read_reg(struct i2c_client *client, u8 reg)
-{
- int result = i2c_smbus_read_word_data(client, reg);
- return result < 0 ? result : swab16(result);
-}
-
-static inline int tmp102_write_reg(struct i2c_client *client, u8 reg, u16 val)
-{
- return i2c_smbus_write_word_data(client, reg, swab16(val));
-}
-
/* convert left adjusted 13-bit TMP102 register value to milliCelsius */
static inline int tmp102_reg_to_mC(s16 val)
{
@@ -94,7 +81,8 @@ static struct tmp102 *tmp102_update_device(struct i2c_client *client)
if (time_after(jiffies, tmp102->last_update + HZ / 3)) {
int i;
for (i = 0; i < ARRAY_SIZE(tmp102->temp); ++i) {
- int status = tmp102_read_reg(client, tmp102_reg[i]);
+ int status = i2c_smbus_read_word_swapped(client,
+ tmp102_reg[i]);
if (status > -1)
tmp102->temp[i] = tmp102_reg_to_mC(status);
}
@@ -130,8 +118,8 @@ static ssize_t tmp102_set_temp(struct device *dev,
mutex_lock(&tmp102->lock);
tmp102->temp[sda->index] = val;
- status = tmp102_write_reg(client, tmp102_reg[sda->index],
- tmp102_mC_to_reg(val));
+ status = i2c_smbus_write_word_swapped(client, tmp102_reg[sda->index],
+ tmp102_mC_to_reg(val));
mutex_unlock(&tmp102->lock);
return status ? : count;
}
@@ -178,18 +166,19 @@ static int __devinit tmp102_probe(struct i2c_client *client,
}
i2c_set_clientdata(client, tmp102);
- status = tmp102_read_reg(client, TMP102_CONF_REG);
+ status = i2c_smbus_read_word_swapped(client, TMP102_CONF_REG);
if (status < 0) {
dev_err(&client->dev, "error reading config register\n");
goto fail_free;
}
tmp102->config_orig = status;
- status = tmp102_write_reg(client, TMP102_CONF_REG, TMP102_CONFIG);
+ status = i2c_smbus_write_word_swapped(client, TMP102_CONF_REG,
+ TMP102_CONFIG);
if (status < 0) {
dev_err(&client->dev, "error writing config register\n");
goto fail_restore_config;
}
- status = tmp102_read_reg(client, TMP102_CONF_REG);
+ status = i2c_smbus_read_word_swapped(client, TMP102_CONF_REG);
if (status < 0) {
dev_err(&client->dev, "error reading config register\n");
goto fail_restore_config;
@@ -222,7 +211,8 @@ static int __devinit tmp102_probe(struct i2c_client *client,
fail_remove_sysfs:
sysfs_remove_group(&client->dev.kobj, &tmp102_attr_group);
fail_restore_config:
- tmp102_write_reg(client, TMP102_CONF_REG, tmp102->config_orig);
+ i2c_smbus_write_word_swapped(client, TMP102_CONF_REG,
+ tmp102->config_orig);
fail_free:
kfree(tmp102);
@@ -240,10 +230,10 @@ static int __devexit tmp102_remove(struct i2c_client *client)
if (tmp102->config_orig & TMP102_CONF_SD) {
int config;
- config = tmp102_read_reg(client, TMP102_CONF_REG);
+ config = i2c_smbus_read_word_swapped(client, TMP102_CONF_REG);
if (config >= 0)
- tmp102_write_reg(client, TMP102_CONF_REG,
- config | TMP102_CONF_SD);
+ i2c_smbus_write_word_swapped(client, TMP102_CONF_REG,
+ config | TMP102_CONF_SD);
}
kfree(tmp102);
@@ -257,12 +247,12 @@ static int tmp102_suspend(struct device *dev)
struct i2c_client *client = to_i2c_client(dev);
int config;
- config = tmp102_read_reg(client, TMP102_CONF_REG);
+ config = i2c_smbus_read_word_swapped(client, TMP102_CONF_REG);
if (config < 0)
return config;
config |= TMP102_CONF_SD;
- return tmp102_write_reg(client, TMP102_CONF_REG, config);
+ return i2c_smbus_write_word_swapped(client, TMP102_CONF_REG, config);
}
static int tmp102_resume(struct device *dev)
@@ -270,12 +260,12 @@ static int tmp102_resume(struct device *dev)
struct i2c_client *client = to_i2c_client(dev);
int config;
- config = tmp102_read_reg(client, TMP102_CONF_REG);
+ config = i2c_smbus_read_word_swapped(client, TMP102_CONF_REG);
if (config < 0)
return config;
config &= ~TMP102_CONF_SD;
- return tmp102_write_reg(client, TMP102_CONF_REG, config);
+ return i2c_smbus_write_word_swapped(client, TMP102_CONF_REG, config);
}
static const struct dev_pm_ops tmp102_dev_pm_ops = {
diff --git a/drivers/hwmon/ultra45_env.c b/drivers/hwmon/ultra45_env.c
index 27a62711e0a..3cd07bf42dc 100644
--- a/drivers/hwmon/ultra45_env.c
+++ b/drivers/hwmon/ultra45_env.c
@@ -6,6 +6,7 @@
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/io.h>
#include <linux/hwmon.h>
diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c
index 98aab4bea34..93f5fc7d605 100644
--- a/drivers/hwmon/w83627ehf.c
+++ b/drivers/hwmon/w83627ehf.c
@@ -1,7 +1,7 @@
/*
w83627ehf - Driver for the hardware monitoring functionality of
the Winbond W83627EHF Super-I/O chip
- Copyright (C) 2005 Jean Delvare <khali@linux-fr.org>
+ Copyright (C) 2005-2011 Jean Delvare <khali@linux-fr.org>
Copyright (C) 2006 Yuan Mu (Winbond),
Rudolf Marek <r.marek@assembler.cz>
David Hubbard <david.c.hubbard@gmail.com>
@@ -39,6 +39,7 @@
0x8860 0xa1
w83627dhg 9 5 4 3 0xa020 0xc1 0x5ca3
w83627dhg-p 9 5 4 3 0xb070 0xc1 0x5ca3
+ w83627uhg 8 2 2 2 0xa230 0xc1 0x5ca3
w83667hg 9 5 3 3 0xa510 0xc1 0x5ca3
w83667hg-b 9 5 3 4 0xb350 0xc1 0x5ca3
nct6775f 9 4 3 9 0xb470 0xc1 0x5ca3
@@ -61,14 +62,17 @@
#include <linux/io.h>
#include "lm75.h"
-enum kinds { w83627ehf, w83627dhg, w83627dhg_p, w83667hg, w83667hg_b, nct6775,
- nct6776 };
+enum kinds {
+ w83627ehf, w83627dhg, w83627dhg_p, w83627uhg,
+ w83667hg, w83667hg_b, nct6775, nct6776,
+};
/* used to set data->name = w83627ehf_device_names[data->sio_kind] */
static const char * const w83627ehf_device_names[] = {
"w83627ehf",
"w83627dhg",
"w83627dhg",
+ "w83627uhg",
"w83667hg",
"w83667hg",
"nct6775",
@@ -104,6 +108,7 @@ MODULE_PARM_DESC(fan_debounce, "Enable debouncing for fan RPM signal");
#define SIO_W83627EHG_ID 0x8860
#define SIO_W83627DHG_ID 0xa020
#define SIO_W83627DHG_P_ID 0xb070
+#define SIO_W83627UHG_ID 0xa230
#define SIO_W83667HG_ID 0xa510
#define SIO_W83667HG_B_ID 0xb350
#define SIO_NCT6775_ID 0xb470
@@ -388,18 +393,23 @@ div_from_reg(u8 reg)
return 1 << reg;
}
-/* Some of analog inputs have internal scaling (2x), 8mV is ADC LSB */
-
-static u8 scale_in[10] = { 8, 8, 16, 16, 8, 8, 8, 16, 16, 8 };
+/* Some of the voltage inputs have internal scaling, the tables below
+ * contain 8 (the ADC LSB in mV) * scaling factor * 100 */
+static const u16 scale_in_common[10] = {
+ 800, 800, 1600, 1600, 800, 800, 800, 1600, 1600, 800
+};
+static const u16 scale_in_w83627uhg[9] = {
+ 800, 800, 3328, 3424, 800, 800, 0, 3328, 3400
+};
-static inline long in_from_reg(u8 reg, u8 nr)
+static inline long in_from_reg(u8 reg, u8 nr, const u16 *scale_in)
{
- return reg * scale_in[nr];
+ return DIV_ROUND_CLOSEST(reg * scale_in[nr], 100);
}
-static inline u8 in_to_reg(u32 val, u8 nr)
+static inline u8 in_to_reg(u32 val, u8 nr, const u16 *scale_in)
{
- return SENSORS_LIMIT(((val + (scale_in[nr] / 2)) / scale_in[nr]), 0,
+ return SENSORS_LIMIT(DIV_ROUND_CLOSEST(val * 100, scale_in[nr]), 0,
255);
}
@@ -430,6 +440,7 @@ struct w83627ehf_data {
const u16 *REG_FAN_STOP_TIME;
const u16 *REG_FAN_MAX_OUTPUT;
const u16 *REG_FAN_STEP_OUTPUT;
+ const u16 *scale_in;
unsigned int (*fan_from_reg)(u16 reg, unsigned int divreg);
unsigned int (*fan_from_reg_min)(u16 reg, unsigned int divreg);
@@ -481,7 +492,8 @@ struct w83627ehf_data {
u8 vrm;
u16 have_temp;
- u8 in6_skip;
+ u8 in6_skip:1;
+ u8 temp3_val_only:1;
};
struct w83627ehf_sio_data {
@@ -907,7 +919,8 @@ show_##reg(struct device *dev, struct device_attribute *attr, \
struct sensor_device_attribute *sensor_attr = \
to_sensor_dev_attr(attr); \
int nr = sensor_attr->index; \
- return sprintf(buf, "%ld\n", in_from_reg(data->reg[nr], nr)); \
+ return sprintf(buf, "%ld\n", in_from_reg(data->reg[nr], nr, \
+ data->scale_in)); \
}
show_in_reg(in)
show_in_reg(in_min)
@@ -928,7 +941,7 @@ store_in_##reg(struct device *dev, struct device_attribute *attr, \
if (err < 0) \
return err; \
mutex_lock(&data->update_lock); \
- data->in_##reg[nr] = in_to_reg(val, nr); \
+ data->in_##reg[nr] = in_to_reg(val, nr, data->scale_in); \
w83627ehf_write_value(data, W83627EHF_REG_IN_##REG(nr), \
data->in_##reg[nr]); \
mutex_unlock(&data->update_lock); \
@@ -1617,25 +1630,28 @@ static struct sensor_device_attribute sda_sf3_arrays_fan4[] = {
store_fan_step_output, 3),
};
+static struct sensor_device_attribute sda_sf3_arrays_fan3[] = {
+ SENSOR_ATTR(pwm3_stop_time, S_IWUSR | S_IRUGO, show_fan_stop_time,
+ store_fan_stop_time, 2),
+ SENSOR_ATTR(pwm3_start_output, S_IWUSR | S_IRUGO, show_fan_start_output,
+ store_fan_start_output, 2),
+ SENSOR_ATTR(pwm3_stop_output, S_IWUSR | S_IRUGO, show_fan_stop_output,
+ store_fan_stop_output, 2),
+};
+
static struct sensor_device_attribute sda_sf3_arrays[] = {
SENSOR_ATTR(pwm1_stop_time, S_IWUSR | S_IRUGO, show_fan_stop_time,
store_fan_stop_time, 0),
SENSOR_ATTR(pwm2_stop_time, S_IWUSR | S_IRUGO, show_fan_stop_time,
store_fan_stop_time, 1),
- SENSOR_ATTR(pwm3_stop_time, S_IWUSR | S_IRUGO, show_fan_stop_time,
- store_fan_stop_time, 2),
SENSOR_ATTR(pwm1_start_output, S_IWUSR | S_IRUGO, show_fan_start_output,
store_fan_start_output, 0),
SENSOR_ATTR(pwm2_start_output, S_IWUSR | S_IRUGO, show_fan_start_output,
store_fan_start_output, 1),
- SENSOR_ATTR(pwm3_start_output, S_IWUSR | S_IRUGO, show_fan_start_output,
- store_fan_start_output, 2),
SENSOR_ATTR(pwm1_stop_output, S_IWUSR | S_IRUGO, show_fan_stop_output,
store_fan_stop_output, 0),
SENSOR_ATTR(pwm2_stop_output, S_IWUSR | S_IRUGO, show_fan_stop_output,
store_fan_stop_output, 1),
- SENSOR_ATTR(pwm3_stop_output, S_IWUSR | S_IRUGO, show_fan_stop_output,
- store_fan_stop_output, 2),
};
@@ -1728,6 +1744,8 @@ static void w83627ehf_device_remove_files(struct device *dev)
data->REG_FAN_STEP_OUTPUT[attr->index] != 0xff)
device_remove_file(dev, &attr->dev_attr);
}
+ for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays_fan3); i++)
+ device_remove_file(dev, &sda_sf3_arrays_fan3[i].dev_attr);
for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays_fan4); i++)
device_remove_file(dev, &sda_sf3_arrays_fan4[i].dev_attr);
for (i = 0; i < data->in_num; i++) {
@@ -1756,6 +1774,8 @@ static void w83627ehf_device_remove_files(struct device *dev)
continue;
device_remove_file(dev, &sda_temp_input[i].dev_attr);
device_remove_file(dev, &sda_temp_label[i].dev_attr);
+ if (i == 2 && data->temp3_val_only)
+ continue;
device_remove_file(dev, &sda_temp_max[i].dev_attr);
device_remove_file(dev, &sda_temp_max_hyst[i].dev_attr);
if (i > 2)
@@ -1808,11 +1828,24 @@ static inline void __devinit w83627ehf_init_device(struct w83627ehf_data *data,
case w83627ehf:
diode = w83627ehf_read_value(data, W83627EHF_REG_DIODE);
break;
+ case w83627uhg:
+ diode = 0x00;
+ break;
default:
diode = 0x70;
}
for (i = 0; i < 3; i++) {
- if ((tmp & (0x02 << i)))
+ const char *label = NULL;
+
+ if (data->temp_label)
+ label = data->temp_label[data->temp_src[i]];
+
+ /* Digital source overrides analog type */
+ if (label && strncmp(label, "PECI", 4) == 0)
+ data->temp_type[i] = 6;
+ else if (label && strncmp(label, "AMD", 3) == 0)
+ data->temp_type[i] = 5;
+ else if ((tmp & (0x02 << i)))
data->temp_type[i] = (diode & (0x10 << i)) ? 1 : 3;
else
data->temp_type[i] = 4; /* thermistor */
@@ -1846,11 +1879,31 @@ static void w82627ehf_swap_tempreg(struct w83627ehf_data *data,
}
static void __devinit
+w83627ehf_set_temp_reg_ehf(struct w83627ehf_data *data, int n_temp)
+{
+ int i;
+
+ for (i = 0; i < n_temp; i++) {
+ data->reg_temp[i] = W83627EHF_REG_TEMP[i];
+ data->reg_temp_over[i] = W83627EHF_REG_TEMP_OVER[i];
+ data->reg_temp_hyst[i] = W83627EHF_REG_TEMP_HYST[i];
+ data->reg_temp_config[i] = W83627EHF_REG_TEMP_CONFIG[i];
+ }
+}
+
+static void __devinit
w83627ehf_check_fan_inputs(const struct w83627ehf_sio_data *sio_data,
struct w83627ehf_data *data)
{
int fan3pin, fan4pin, fan4min, fan5pin, regval;
+ /* The W83627UHG is simple, only two fan inputs, no config */
+ if (sio_data->kind == w83627uhg) {
+ data->has_fan = 0x03; /* fan1 and fan2 */
+ data->has_fan_min = 0x03;
+ return;
+ }
+
superio_enter(sio_data->sioreg);
/* fan4 and fan5 share some pins with the GPIO and serial flash */
@@ -1942,23 +1995,24 @@ static int __devinit w83627ehf_probe(struct platform_device *pdev)
/* 627EHG and 627EHF have 10 voltage inputs; 627DHG and 667HG have 9 */
data->in_num = (sio_data->kind == w83627ehf) ? 10 : 9;
- /* 667HG, NCT6775F, and NCT6776F have 3 pwms */
- data->pwm_num = (sio_data->kind == w83667hg
- || sio_data->kind == w83667hg_b
- || sio_data->kind == nct6775
- || sio_data->kind == nct6776) ? 3 : 4;
+ /* 667HG, NCT6775F, and NCT6776F have 3 pwms, and 627UHG has only 2 */
+ switch (sio_data->kind) {
+ default:
+ data->pwm_num = 4;
+ break;
+ case w83667hg:
+ case w83667hg_b:
+ case nct6775:
+ case nct6776:
+ data->pwm_num = 3;
+ break;
+ case w83627uhg:
+ data->pwm_num = 2;
+ break;
+ }
+ /* Default to 3 temperature inputs, code below will adjust as needed */
data->have_temp = 0x07;
- /* Check temp3 configuration bit for 667HG */
- if (sio_data->kind == w83667hg) {
- u8 reg;
-
- reg = w83627ehf_read_value(data, W83627EHF_REG_TEMP_CONFIG[2]);
- if (reg & 0x01)
- data->have_temp &= ~(1 << 2);
- else
- data->in6_skip = 1; /* either temp3 or in6 */
- }
/* Deal with temperature register setup first. */
if (sio_data->kind == nct6775 || sio_data->kind == nct6776) {
@@ -2035,16 +2089,12 @@ static int __devinit w83627ehf_probe(struct platform_device *pdev)
} else if (sio_data->kind == w83667hg_b) {
u8 reg;
+ w83627ehf_set_temp_reg_ehf(data, 4);
+
/*
* Temperature sources are selected with bank 0, registers 0x49
* and 0x4a.
*/
- for (i = 0; i < ARRAY_SIZE(W83627EHF_REG_TEMP); i++) {
- data->reg_temp[i] = W83627EHF_REG_TEMP[i];
- data->reg_temp_over[i] = W83627EHF_REG_TEMP_OVER[i];
- data->reg_temp_hyst[i] = W83627EHF_REG_TEMP_HYST[i];
- data->reg_temp_config[i] = W83627EHF_REG_TEMP_CONFIG[i];
- }
reg = w83627ehf_read_value(data, 0x4a);
data->temp_src[0] = reg >> 5;
reg = w83627ehf_read_value(data, 0x49);
@@ -2078,13 +2128,60 @@ static int __devinit w83627ehf_probe(struct platform_device *pdev)
data->in6_skip = 1;
data->temp_label = w83667hg_b_temp_label;
+ } else if (sio_data->kind == w83627uhg) {
+ u8 reg;
+
+ w83627ehf_set_temp_reg_ehf(data, 3);
+
+ /*
+ * Temperature sources for temp1 and temp2 are selected with
+ * bank 0, registers 0x49 and 0x4a.
+ */
+ data->temp_src[0] = 0; /* SYSTIN */
+ reg = w83627ehf_read_value(data, 0x49) & 0x07;
+ /* Adjust to have the same mapping as other source registers */
+ if (reg == 0)
+ data->temp_src[1]++;
+ else if (reg >= 2 && reg <= 5)
+ data->temp_src[1] += 2;
+ else /* should never happen */
+ data->have_temp &= ~(1 << 1);
+ reg = w83627ehf_read_value(data, 0x4a);
+ data->temp_src[2] = reg >> 5;
+
+ /*
+ * Skip temp3 if source is invalid or the same as temp1
+ * or temp2.
+ */
+ if (data->temp_src[2] == 2 || data->temp_src[2] == 3 ||
+ data->temp_src[2] == data->temp_src[0] ||
+ ((data->have_temp & (1 << 1)) &&
+ data->temp_src[2] == data->temp_src[1]))
+ data->have_temp &= ~(1 << 2);
+ else
+ data->temp3_val_only = 1; /* No limit regs */
+
+ data->in6_skip = 1; /* No VIN3 */
+
+ data->temp_label = w83667hg_b_temp_label;
} else {
+ w83627ehf_set_temp_reg_ehf(data, 3);
+
/* Temperature sources are fixed */
- for (i = 0; i < 3; i++) {
- data->reg_temp[i] = W83627EHF_REG_TEMP[i];
- data->reg_temp_over[i] = W83627EHF_REG_TEMP_OVER[i];
- data->reg_temp_hyst[i] = W83627EHF_REG_TEMP_HYST[i];
- data->reg_temp_config[i] = W83627EHF_REG_TEMP_CONFIG[i];
+
+ if (sio_data->kind == w83667hg) {
+ u8 reg;
+
+ /*
+ * Chip supports either AUXTIN or VIN3. Try to find
+ * out which one.
+ */
+ reg = w83627ehf_read_value(data,
+ W83627EHF_REG_TEMP_CONFIG[2]);
+ if (reg & 0x01)
+ data->have_temp &= ~(1 << 2);
+ else
+ data->in6_skip = 1;
}
}
@@ -2144,6 +2241,12 @@ static int __devinit w83627ehf_probe(struct platform_device *pdev)
W83627EHF_REG_FAN_STEP_OUTPUT_COMMON;
}
+ /* Setup input voltage scaling factors */
+ if (sio_data->kind == w83627uhg)
+ data->scale_in = scale_in_w83627uhg;
+ else
+ data->scale_in = scale_in_common;
+
/* Initialize the chip */
w83627ehf_init_device(data, sio_data->kind);
@@ -2160,7 +2263,7 @@ static int __devinit w83627ehf_probe(struct platform_device *pdev)
err = device_create_file(dev, &dev_attr_cpu0_vid);
if (err)
goto exit_release;
- } else {
+ } else if (sio_data->kind != w83627uhg) {
superio_select(sio_data->sioreg, W83627EHF_LD_HWM);
if (superio_inb(sio_data->sioreg, SIO_REG_VID_CTRL) & 0x80) {
/* Set VID input sensibility if needed. In theory the
@@ -2250,7 +2353,14 @@ static int __devinit w83627ehf_probe(struct platform_device *pdev)
goto exit_remove;
}
}
- /* if fan4 is enabled create the sf3 files for it */
+ /* if fan3 and fan4 are enabled create the sf3 files for them */
+ if ((data->has_fan & (1 << 2)) && data->pwm_num >= 3)
+ for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays_fan3); i++) {
+ err = device_create_file(dev,
+ &sda_sf3_arrays_fan3[i].dev_attr);
+ if (err)
+ goto exit_remove;
+ }
if ((data->has_fan & (1 << 3)) && data->pwm_num >= 4)
for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays_fan4); i++) {
err = device_create_file(dev,
@@ -2318,6 +2428,8 @@ static int __devinit w83627ehf_probe(struct platform_device *pdev)
if (err)
goto exit_remove;
}
+ if (i == 2 && data->temp3_val_only)
+ continue;
if (data->reg_temp_over[i]) {
err = device_create_file(dev,
&sda_temp_max[i].dev_attr);
@@ -2401,6 +2513,7 @@ static int __init w83627ehf_find(int sioaddr, unsigned short *addr,
static const char __initdata sio_name_W83627EHG[] = "W83627EHG";
static const char __initdata sio_name_W83627DHG[] = "W83627DHG";
static const char __initdata sio_name_W83627DHG_P[] = "W83627DHG-P";
+ static const char __initdata sio_name_W83627UHG[] = "W83627UHG";
static const char __initdata sio_name_W83667HG[] = "W83667HG";
static const char __initdata sio_name_W83667HG_B[] = "W83667HG-B";
static const char __initdata sio_name_NCT6775[] = "NCT6775F";
@@ -2433,6 +2546,10 @@ static int __init w83627ehf_find(int sioaddr, unsigned short *addr,
sio_data->kind = w83627dhg_p;
sio_name = sio_name_W83627DHG_P;
break;
+ case SIO_W83627UHG_ID:
+ sio_data->kind = w83627uhg;
+ sio_name = sio_name_W83627UHG;
+ break;
case SIO_W83667HG_ID:
sio_data->kind = w83667hg;
sio_name = sio_name_W83667HG;
diff --git a/drivers/hwmon/w83781d.c b/drivers/hwmon/w83781d.c
index eed43a008be..65b685e2c7b 100644
--- a/drivers/hwmon/w83781d.c
+++ b/drivers/hwmon/w83781d.c
@@ -1245,17 +1245,17 @@ w83781d_read_value_i2c(struct w83781d_data *data, u16 reg)
/* convert from ISA to LM75 I2C addresses */
switch (reg & 0xff) {
case 0x50: /* TEMP */
- res = swab16(i2c_smbus_read_word_data(cl, 0));
+ res = i2c_smbus_read_word_swapped(cl, 0);
break;
case 0x52: /* CONFIG */
res = i2c_smbus_read_byte_data(cl, 1);
break;
case 0x53: /* HYST */
- res = swab16(i2c_smbus_read_word_data(cl, 2));
+ res = i2c_smbus_read_word_swapped(cl, 2);
break;
case 0x55: /* OVER */
default:
- res = swab16(i2c_smbus_read_word_data(cl, 3));
+ res = i2c_smbus_read_word_swapped(cl, 3);
break;
}
}
@@ -1289,10 +1289,10 @@ w83781d_write_value_i2c(struct w83781d_data *data, u16 reg, u16 value)
i2c_smbus_write_byte_data(cl, 1, value & 0xff);
break;
case 0x53: /* HYST */
- i2c_smbus_write_word_data(cl, 2, swab16(value));
+ i2c_smbus_write_word_swapped(cl, 2, value);
break;
case 0x55: /* OVER */
- i2c_smbus_write_word_data(cl, 3, swab16(value));
+ i2c_smbus_write_word_swapped(cl, 3, value);
break;
}
}
diff --git a/drivers/hwspinlock/Kconfig b/drivers/hwspinlock/Kconfig
index 1f29bab6b3e..c7c3128393d 100644
--- a/drivers/hwspinlock/Kconfig
+++ b/drivers/hwspinlock/Kconfig
@@ -2,22 +2,31 @@
# Generic HWSPINLOCK framework
#
+# HWSPINLOCK always gets selected by whoever wants it.
config HWSPINLOCK
- tristate "Generic Hardware Spinlock framework"
- depends on ARCH_OMAP4
- help
- Say y here to support the generic hardware spinlock framework.
- You only need to enable this if you have hardware spinlock module
- on your system (usually only relevant if your system has remote slave
- coprocessors).
+ tristate
- If unsure, say N.
+menu "Hardware Spinlock drivers"
config HWSPINLOCK_OMAP
tristate "OMAP Hardware Spinlock device"
- depends on HWSPINLOCK && ARCH_OMAP4
+ depends on ARCH_OMAP4
+ select HWSPINLOCK
help
Say y here to support the OMAP Hardware Spinlock device (firstly
introduced in OMAP4).
If unsure, say N.
+
+config HSEM_U8500
+ tristate "STE Hardware Semaphore functionality"
+ depends on ARCH_U8500
+ select HWSPINLOCK
+ help
+ Say y here to support the STE Hardware Semaphore functionality, which
+ provides a synchronisation mechanism for the various processor on the
+ SoC.
+
+ If unsure, say N.
+
+endmenu
diff --git a/drivers/hwspinlock/Makefile b/drivers/hwspinlock/Makefile
index 5729a3f7ed3..93eb64b6648 100644
--- a/drivers/hwspinlock/Makefile
+++ b/drivers/hwspinlock/Makefile
@@ -4,3 +4,4 @@
obj-$(CONFIG_HWSPINLOCK) += hwspinlock_core.o
obj-$(CONFIG_HWSPINLOCK_OMAP) += omap_hwspinlock.o
+obj-$(CONFIG_HSEM_U8500) += u8500_hsem.o
diff --git a/drivers/hwspinlock/hwspinlock_core.c b/drivers/hwspinlock/hwspinlock_core.c
index 43a62714b4f..61c9cf15fa5 100644
--- a/drivers/hwspinlock/hwspinlock_core.c
+++ b/drivers/hwspinlock/hwspinlock_core.c
@@ -26,6 +26,7 @@
#include <linux/radix-tree.h>
#include <linux/hwspinlock.h>
#include <linux/pm_runtime.h>
+#include <linux/mutex.h>
#include "hwspinlock_internal.h"
@@ -52,10 +53,12 @@
static RADIX_TREE(hwspinlock_tree, GFP_KERNEL);
/*
- * Synchronization of access to the tree is achieved using this spinlock,
+ * Synchronization of access to the tree is achieved using this mutex,
* as the radix-tree API requires that users provide all synchronisation.
+ * A mutex is needed because we're using non-atomic radix tree allocations.
*/
-static DEFINE_SPINLOCK(hwspinlock_tree_lock);
+static DEFINE_MUTEX(hwspinlock_tree_lock);
+
/**
* __hwspin_trylock() - attempt to lock a specific hwspinlock
@@ -114,7 +117,7 @@ int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
return -EBUSY;
/* try to take the hwspinlock device */
- ret = hwlock->ops->trylock(hwlock);
+ ret = hwlock->bank->ops->trylock(hwlock);
/* if hwlock is already taken, undo spin_trylock_* and exit */
if (!ret) {
@@ -196,8 +199,8 @@ int __hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to,
* Allow platform-specific relax handlers to prevent
* hogging the interconnect (no sleeping, though)
*/
- if (hwlock->ops->relax)
- hwlock->ops->relax(hwlock);
+ if (hwlock->bank->ops->relax)
+ hwlock->bank->ops->relax(hwlock);
}
return ret;
@@ -242,7 +245,7 @@ void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
*/
mb();
- hwlock->ops->unlock(hwlock);
+ hwlock->bank->ops->unlock(hwlock);
/* Undo the spin_trylock{_irq, _irqsave} called while locking */
if (mode == HWLOCK_IRQSTATE)
@@ -254,68 +257,37 @@ void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
}
EXPORT_SYMBOL_GPL(__hwspin_unlock);
-/**
- * hwspin_lock_register() - register a new hw spinlock
- * @hwlock: hwspinlock to register.
- *
- * This function should be called from the underlying platform-specific
- * implementation, to register a new hwspinlock instance.
- *
- * Can be called from an atomic context (will not sleep) but not from
- * within interrupt context.
- *
- * Returns 0 on success, or an appropriate error code on failure
- */
-int hwspin_lock_register(struct hwspinlock *hwlock)
+static int hwspin_lock_register_single(struct hwspinlock *hwlock, int id)
{
struct hwspinlock *tmp;
int ret;
- if (!hwlock || !hwlock->ops ||
- !hwlock->ops->trylock || !hwlock->ops->unlock) {
- pr_err("invalid parameters\n");
- return -EINVAL;
- }
-
- spin_lock_init(&hwlock->lock);
-
- spin_lock(&hwspinlock_tree_lock);
+ mutex_lock(&hwspinlock_tree_lock);
- ret = radix_tree_insert(&hwspinlock_tree, hwlock->id, hwlock);
- if (ret)
+ ret = radix_tree_insert(&hwspinlock_tree, id, hwlock);
+ if (ret) {
+ if (ret == -EEXIST)
+ pr_err("hwspinlock id %d already exists!\n", id);
goto out;
+ }
/* mark this hwspinlock as available */
- tmp = radix_tree_tag_set(&hwspinlock_tree, hwlock->id,
- HWSPINLOCK_UNUSED);
+ tmp = radix_tree_tag_set(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);
/* self-sanity check which should never fail */
WARN_ON(tmp != hwlock);
out:
- spin_unlock(&hwspinlock_tree_lock);
- return ret;
+ mutex_unlock(&hwspinlock_tree_lock);
+ return 0;
}
-EXPORT_SYMBOL_GPL(hwspin_lock_register);
-/**
- * hwspin_lock_unregister() - unregister an hw spinlock
- * @id: index of the specific hwspinlock to unregister
- *
- * This function should be called from the underlying platform-specific
- * implementation, to unregister an existing (and unused) hwspinlock.
- *
- * Can be called from an atomic context (will not sleep) but not from
- * within interrupt context.
- *
- * Returns the address of hwspinlock @id on success, or NULL on failure
- */
-struct hwspinlock *hwspin_lock_unregister(unsigned int id)
+static struct hwspinlock *hwspin_lock_unregister_single(unsigned int id)
{
struct hwspinlock *hwlock = NULL;
int ret;
- spin_lock(&hwspinlock_tree_lock);
+ mutex_lock(&hwspinlock_tree_lock);
/* make sure the hwspinlock is not in use (tag is set) */
ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);
@@ -331,9 +303,91 @@ struct hwspinlock *hwspin_lock_unregister(unsigned int id)
}
out:
- spin_unlock(&hwspinlock_tree_lock);
+ mutex_unlock(&hwspinlock_tree_lock);
return hwlock;
}
+
+/**
+ * hwspin_lock_register() - register a new hw spinlock device
+ * @bank: the hwspinlock device, which usually provides numerous hw locks
+ * @dev: the backing device
+ * @ops: hwspinlock handlers for this device
+ * @base_id: id of the first hardware spinlock in this bank
+ * @num_locks: number of hwspinlocks provided by this device
+ *
+ * This function should be called from the underlying platform-specific
+ * implementation, to register a new hwspinlock device instance.
+ *
+ * Should be called from a process context (might sleep)
+ *
+ * Returns 0 on success, or an appropriate error code on failure
+ */
+int hwspin_lock_register(struct hwspinlock_device *bank, struct device *dev,
+ const struct hwspinlock_ops *ops, int base_id, int num_locks)
+{
+ struct hwspinlock *hwlock;
+ int ret = 0, i;
+
+ if (!bank || !ops || !dev || !num_locks || !ops->trylock ||
+ !ops->unlock) {
+ pr_err("invalid parameters\n");
+ return -EINVAL;
+ }
+
+ bank->dev = dev;
+ bank->ops = ops;
+ bank->base_id = base_id;
+ bank->num_locks = num_locks;
+
+ for (i = 0; i < num_locks; i++) {
+ hwlock = &bank->lock[i];
+
+ spin_lock_init(&hwlock->lock);
+ hwlock->bank = bank;
+
+ ret = hwspin_lock_register_single(hwlock, i);
+ if (ret)
+ goto reg_failed;
+ }
+
+ return 0;
+
+reg_failed:
+ while (--i >= 0)
+ hwspin_lock_unregister_single(i);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(hwspin_lock_register);
+
+/**
+ * hwspin_lock_unregister() - unregister an hw spinlock device
+ * @bank: the hwspinlock device, which usually provides numerous hw locks
+ *
+ * This function should be called from the underlying platform-specific
+ * implementation, to unregister an existing (and unused) hwspinlock.
+ *
+ * Should be called from a process context (might sleep)
+ *
+ * Returns 0 on success, or an appropriate error code on failure
+ */
+int hwspin_lock_unregister(struct hwspinlock_device *bank)
+{
+ struct hwspinlock *hwlock, *tmp;
+ int i;
+
+ for (i = 0; i < bank->num_locks; i++) {
+ hwlock = &bank->lock[i];
+
+ tmp = hwspin_lock_unregister_single(bank->base_id + i);
+ if (!tmp)
+ return -EBUSY;
+
+ /* self-sanity check that should never fail */
+ WARN_ON(tmp != hwlock);
+ }
+
+ return 0;
+}
EXPORT_SYMBOL_GPL(hwspin_lock_unregister);
/**
@@ -348,24 +402,25 @@ EXPORT_SYMBOL_GPL(hwspin_lock_unregister);
*/
static int __hwspin_lock_request(struct hwspinlock *hwlock)
{
+ struct device *dev = hwlock->bank->dev;
struct hwspinlock *tmp;
int ret;
/* prevent underlying implementation from being removed */
- if (!try_module_get(hwlock->owner)) {
- dev_err(hwlock->dev, "%s: can't get owner\n", __func__);
+ if (!try_module_get(dev->driver->owner)) {
+ dev_err(dev, "%s: can't get owner\n", __func__);
return -EINVAL;
}
/* notify PM core that power is now needed */
- ret = pm_runtime_get_sync(hwlock->dev);
+ ret = pm_runtime_get_sync(dev);
if (ret < 0) {
- dev_err(hwlock->dev, "%s: can't power on device\n", __func__);
+ dev_err(dev, "%s: can't power on device\n", __func__);
return ret;
}
/* mark hwspinlock as used, should not fail */
- tmp = radix_tree_tag_clear(&hwspinlock_tree, hwlock->id,
+ tmp = radix_tree_tag_clear(&hwspinlock_tree, hwlock_to_id(hwlock),
HWSPINLOCK_UNUSED);
/* self-sanity check that should never fail */
@@ -387,7 +442,7 @@ int hwspin_lock_get_id(struct hwspinlock *hwlock)
return -EINVAL;
}
- return hwlock->id;
+ return hwlock_to_id(hwlock);
}
EXPORT_SYMBOL_GPL(hwspin_lock_get_id);
@@ -400,9 +455,7 @@ EXPORT_SYMBOL_GPL(hwspin_lock_get_id);
* to the remote core before it can be used for synchronization (to get the
* id of a given hwlock, use hwspin_lock_get_id()).
*
- * Can be called from an atomic context (will not sleep) but not from
- * within interrupt context (simply because there is no use case for
- * that yet).
+ * Should be called from a process context (might sleep)
*
* Returns the address of the assigned hwspinlock, or NULL on error
*/
@@ -411,7 +464,7 @@ struct hwspinlock *hwspin_lock_request(void)
struct hwspinlock *hwlock;
int ret;
- spin_lock(&hwspinlock_tree_lock);
+ mutex_lock(&hwspinlock_tree_lock);
/* look for an unused lock */
ret = radix_tree_gang_lookup_tag(&hwspinlock_tree, (void **)&hwlock,
@@ -431,7 +484,7 @@ struct hwspinlock *hwspin_lock_request(void)
hwlock = NULL;
out:
- spin_unlock(&hwspinlock_tree_lock);
+ mutex_unlock(&hwspinlock_tree_lock);
return hwlock;
}
EXPORT_SYMBOL_GPL(hwspin_lock_request);
@@ -445,9 +498,7 @@ EXPORT_SYMBOL_GPL(hwspin_lock_request);
* Usually early board code will be calling this function in order to
* reserve specific hwspinlock ids for predefined purposes.
*
- * Can be called from an atomic context (will not sleep) but not from
- * within interrupt context (simply because there is no use case for
- * that yet).
+ * Should be called from a process context (might sleep)
*
* Returns the address of the assigned hwspinlock, or NULL on error
*/
@@ -456,7 +507,7 @@ struct hwspinlock *hwspin_lock_request_specific(unsigned int id)
struct hwspinlock *hwlock;
int ret;
- spin_lock(&hwspinlock_tree_lock);
+ mutex_lock(&hwspinlock_tree_lock);
/* make sure this hwspinlock exists */
hwlock = radix_tree_lookup(&hwspinlock_tree, id);
@@ -466,7 +517,7 @@ struct hwspinlock *hwspin_lock_request_specific(unsigned int id)
}
/* sanity check (this shouldn't happen) */
- WARN_ON(hwlock->id != id);
+ WARN_ON(hwlock_to_id(hwlock) != id);
/* make sure this hwspinlock is unused */
ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);
@@ -482,7 +533,7 @@ struct hwspinlock *hwspin_lock_request_specific(unsigned int id)
hwlock = NULL;
out:
- spin_unlock(&hwspinlock_tree_lock);
+ mutex_unlock(&hwspinlock_tree_lock);
return hwlock;
}
EXPORT_SYMBOL_GPL(hwspin_lock_request_specific);
@@ -495,14 +546,13 @@ EXPORT_SYMBOL_GPL(hwspin_lock_request_specific);
* Should only be called with an @hwlock that was retrieved from
* an earlier call to omap_hwspin_lock_request{_specific}.
*
- * Can be called from an atomic context (will not sleep) but not from
- * within interrupt context (simply because there is no use case for
- * that yet).
+ * Should be called from a process context (might sleep)
*
* Returns 0 on success, or an appropriate error code on failure
*/
int hwspin_lock_free(struct hwspinlock *hwlock)
{
+ struct device *dev = hwlock->bank->dev;
struct hwspinlock *tmp;
int ret;
@@ -511,34 +561,34 @@ int hwspin_lock_free(struct hwspinlock *hwlock)
return -EINVAL;
}
- spin_lock(&hwspinlock_tree_lock);
+ mutex_lock(&hwspinlock_tree_lock);
/* make sure the hwspinlock is used */
- ret = radix_tree_tag_get(&hwspinlock_tree, hwlock->id,
+ ret = radix_tree_tag_get(&hwspinlock_tree, hwlock_to_id(hwlock),
HWSPINLOCK_UNUSED);
if (ret == 1) {
- dev_err(hwlock->dev, "%s: hwlock is already free\n", __func__);
+ dev_err(dev, "%s: hwlock is already free\n", __func__);
dump_stack();
ret = -EINVAL;
goto out;
}
/* notify the underlying device that power is not needed */
- ret = pm_runtime_put(hwlock->dev);
+ ret = pm_runtime_put(dev);
if (ret < 0)
goto out;
/* mark this hwspinlock as available */
- tmp = radix_tree_tag_set(&hwspinlock_tree, hwlock->id,
+ tmp = radix_tree_tag_set(&hwspinlock_tree, hwlock_to_id(hwlock),
HWSPINLOCK_UNUSED);
/* sanity check (this shouldn't happen) */
WARN_ON(tmp != hwlock);
- module_put(hwlock->owner);
+ module_put(dev->driver->owner);
out:
- spin_unlock(&hwspinlock_tree_lock);
+ mutex_unlock(&hwspinlock_tree_lock);
return ret;
}
EXPORT_SYMBOL_GPL(hwspin_lock_free);
diff --git a/drivers/hwspinlock/hwspinlock_internal.h b/drivers/hwspinlock/hwspinlock_internal.h
index 69935e6b93e..d26f78b8f21 100644
--- a/drivers/hwspinlock/hwspinlock_internal.h
+++ b/drivers/hwspinlock/hwspinlock_internal.h
@@ -21,6 +21,8 @@
#include <linux/spinlock.h>
#include <linux/device.h>
+struct hwspinlock_device;
+
/**
* struct hwspinlock_ops - platform-specific hwspinlock handlers
*
@@ -39,23 +41,37 @@ struct hwspinlock_ops {
/**
* struct hwspinlock - this struct represents a single hwspinlock instance
- *
- * @dev: underlying device, will be used to invoke runtime PM api
- * @ops: platform-specific hwspinlock handlers
- * @id: a global, unique, system-wide, index of the lock.
+ * @bank: the hwspinlock_device structure which owns this lock
* @lock: initialized and used by hwspinlock core
- * @owner: underlying implementation module, used to maintain module ref count
- *
- * Note: currently simplicity was opted for, but later we can squeeze some
- * memory bytes by grouping the dev, ops and owner members in a single
- * per-platform struct, and have all hwspinlocks point at it.
+ * @priv: private data, owned by the underlying platform-specific hwspinlock drv
*/
struct hwspinlock {
+ struct hwspinlock_device *bank;
+ spinlock_t lock;
+ void *priv;
+};
+
+/**
+ * struct hwspinlock_device - a device which usually spans numerous hwspinlocks
+ * @dev: underlying device, will be used to invoke runtime PM api
+ * @ops: platform-specific hwspinlock handlers
+ * @base_id: id index of the first lock in this device
+ * @num_locks: number of locks in this device
+ * @lock: dynamically allocated array of 'struct hwspinlock'
+ */
+struct hwspinlock_device {
struct device *dev;
const struct hwspinlock_ops *ops;
- int id;
- spinlock_t lock;
- struct module *owner;
+ int base_id;
+ int num_locks;
+ struct hwspinlock lock[0];
};
+static inline int hwlock_to_id(struct hwspinlock *hwlock)
+{
+ int local_id = hwlock - &hwlock->bank->lock[0];
+
+ return hwlock->bank->base_id + local_id;
+}
+
#endif /* __HWSPINLOCK_HWSPINLOCK_H */
diff --git a/drivers/hwspinlock/omap_hwspinlock.c b/drivers/hwspinlock/omap_hwspinlock.c
index a8f02734c02..887d34effb3 100644
--- a/drivers/hwspinlock/omap_hwspinlock.c
+++ b/drivers/hwspinlock/omap_hwspinlock.c
@@ -41,33 +41,20 @@
#define SPINLOCK_NOTTAKEN (0) /* free */
#define SPINLOCK_TAKEN (1) /* locked */
-#define to_omap_hwspinlock(lock) \
- container_of(lock, struct omap_hwspinlock, lock)
-
-struct omap_hwspinlock {
- struct hwspinlock lock;
- void __iomem *addr;
-};
-
-struct omap_hwspinlock_state {
- int num_locks; /* Total number of locks in system */
- void __iomem *io_base; /* Mapped base address */
-};
-
static int omap_hwspinlock_trylock(struct hwspinlock *lock)
{
- struct omap_hwspinlock *omap_lock = to_omap_hwspinlock(lock);
+ void __iomem *lock_addr = lock->priv;
/* attempt to acquire the lock by reading its value */
- return (SPINLOCK_NOTTAKEN == readl(omap_lock->addr));
+ return (SPINLOCK_NOTTAKEN == readl(lock_addr));
}
static void omap_hwspinlock_unlock(struct hwspinlock *lock)
{
- struct omap_hwspinlock *omap_lock = to_omap_hwspinlock(lock);
+ void __iomem *lock_addr = lock->priv;
/* release the lock by writing 0 to it */
- writel(SPINLOCK_NOTTAKEN, omap_lock->addr);
+ writel(SPINLOCK_NOTTAKEN, lock_addr);
}
/*
@@ -93,26 +80,23 @@ static const struct hwspinlock_ops omap_hwspinlock_ops = {
static int __devinit omap_hwspinlock_probe(struct platform_device *pdev)
{
- struct omap_hwspinlock *omap_lock;
- struct omap_hwspinlock_state *state;
- struct hwspinlock *lock;
+ struct hwspinlock_pdata *pdata = pdev->dev.platform_data;
+ struct hwspinlock_device *bank;
+ struct hwspinlock *hwlock;
struct resource *res;
void __iomem *io_base;
- int i, ret;
+ int num_locks, i, ret;
+
+ if (!pdata)
+ return -ENODEV;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENODEV;
- state = kzalloc(sizeof(*state), GFP_KERNEL);
- if (!state)
- return -ENOMEM;
-
io_base = ioremap(res->start, resource_size(res));
- if (!io_base) {
- ret = -ENOMEM;
- goto free_state;
- }
+ if (!io_base)
+ return -ENOMEM;
/* Determine number of locks */
i = readl(io_base + SYSSTATUS_OFFSET);
@@ -124,10 +108,18 @@ static int __devinit omap_hwspinlock_probe(struct platform_device *pdev)
goto iounmap_base;
}
- state->num_locks = i * 32;
- state->io_base = io_base;
+ num_locks = i * 32; /* actual number of locks in this device */
+
+ bank = kzalloc(sizeof(*bank) + num_locks * sizeof(*hwlock), GFP_KERNEL);
+ if (!bank) {
+ ret = -ENOMEM;
+ goto iounmap_base;
+ }
+
+ platform_set_drvdata(pdev, bank);
- platform_set_drvdata(pdev, state);
+ for (i = 0, hwlock = &bank->lock[0]; i < num_locks; i++, hwlock++)
+ hwlock->priv = io_base + LOCK_BASE_OFFSET + sizeof(u32) * i;
/*
* runtime PM will make sure the clock of this module is
@@ -135,79 +127,46 @@ static int __devinit omap_hwspinlock_probe(struct platform_device *pdev)
*/
pm_runtime_enable(&pdev->dev);
- for (i = 0; i < state->num_locks; i++) {
- omap_lock = kzalloc(sizeof(*omap_lock), GFP_KERNEL);
- if (!omap_lock) {
- ret = -ENOMEM;
- goto free_locks;
- }
-
- omap_lock->lock.dev = &pdev->dev;
- omap_lock->lock.owner = THIS_MODULE;
- omap_lock->lock.id = i;
- omap_lock->lock.ops = &omap_hwspinlock_ops;
- omap_lock->addr = io_base + LOCK_BASE_OFFSET + sizeof(u32) * i;
-
- ret = hwspin_lock_register(&omap_lock->lock);
- if (ret) {
- kfree(omap_lock);
- goto free_locks;
- }
- }
+ ret = hwspin_lock_register(bank, &pdev->dev, &omap_hwspinlock_ops,
+ pdata->base_id, num_locks);
+ if (ret)
+ goto reg_fail;
return 0;
-free_locks:
- while (--i >= 0) {
- lock = hwspin_lock_unregister(i);
- /* this should't happen, but let's give our best effort */
- if (!lock) {
- dev_err(&pdev->dev, "%s: cleanups failed\n", __func__);
- continue;
- }
- omap_lock = to_omap_hwspinlock(lock);
- kfree(omap_lock);
- }
+reg_fail:
pm_runtime_disable(&pdev->dev);
+ kfree(bank);
iounmap_base:
iounmap(io_base);
-free_state:
- kfree(state);
return ret;
}
-static int omap_hwspinlock_remove(struct platform_device *pdev)
+static int __devexit omap_hwspinlock_remove(struct platform_device *pdev)
{
- struct omap_hwspinlock_state *state = platform_get_drvdata(pdev);
- struct hwspinlock *lock;
- struct omap_hwspinlock *omap_lock;
- int i;
-
- for (i = 0; i < state->num_locks; i++) {
- lock = hwspin_lock_unregister(i);
- /* this shouldn't happen at this point. if it does, at least
- * don't continue with the remove */
- if (!lock) {
- dev_err(&pdev->dev, "%s: failed on %d\n", __func__, i);
- return -EBUSY;
- }
-
- omap_lock = to_omap_hwspinlock(lock);
- kfree(omap_lock);
+ struct hwspinlock_device *bank = platform_get_drvdata(pdev);
+ void __iomem *io_base = bank->lock[0].priv - LOCK_BASE_OFFSET;
+ int ret;
+
+ ret = hwspin_lock_unregister(bank);
+ if (ret) {
+ dev_err(&pdev->dev, "%s failed: %d\n", __func__, ret);
+ return ret;
}
pm_runtime_disable(&pdev->dev);
- iounmap(state->io_base);
- kfree(state);
+ iounmap(io_base);
+ kfree(bank);
return 0;
}
static struct platform_driver omap_hwspinlock_driver = {
.probe = omap_hwspinlock_probe,
- .remove = omap_hwspinlock_remove,
+ .remove = __devexit_p(omap_hwspinlock_remove),
.driver = {
.name = "omap_hwspinlock",
+ .owner = THIS_MODULE,
},
};
diff --git a/drivers/hwspinlock/u8500_hsem.c b/drivers/hwspinlock/u8500_hsem.c
new file mode 100644
index 00000000000..86980fe0411
--- /dev/null
+++ b/drivers/hwspinlock/u8500_hsem.c
@@ -0,0 +1,197 @@
+/*
+ * u8500 HWSEM driver
+ *
+ * Copyright (C) 2010-2011 ST-Ericsson
+ *
+ * Implements u8500 semaphore handling for protocol 1, no interrupts.
+ *
+ * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
+ * Heavily borrowed from the work of :
+ * Simon Que <sque@ti.com>
+ * Hari Kanigeri <h-kanigeri2@ti.com>
+ * Ohad Ben-Cohen <ohad@wizery.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/hwspinlock.h>
+#include <linux/platform_device.h>
+
+#include "hwspinlock_internal.h"
+
+/*
+ * Implementation of STE's HSem protocol 1 without interrutps.
+ * The only masterID we allow is '0x01' to force people to use
+ * HSems for synchronisation between processors rather than processes
+ * on the ARM core.
+ */
+
+#define U8500_MAX_SEMAPHORE 32 /* a total of 32 semaphore */
+#define RESET_SEMAPHORE (0) /* free */
+
+/*
+ * CPU ID for master running u8500 kernel.
+ * Hswpinlocks should only be used to synchonise operations
+ * between the Cortex A9 core and the other CPUs. Hence
+ * forcing the masterID to a preset value.
+ */
+#define HSEM_MASTER_ID 0x01
+
+#define HSEM_REGISTER_OFFSET 0x08
+
+#define HSEM_CTRL_REG 0x00
+#define HSEM_ICRALL 0x90
+#define HSEM_PROTOCOL_1 0x01
+
+static int u8500_hsem_trylock(struct hwspinlock *lock)
+{
+ void __iomem *lock_addr = lock->priv;
+
+ writel(HSEM_MASTER_ID, lock_addr);
+
+ /* get only first 4 bit and compare to masterID.
+ * if equal, we have the semaphore, otherwise
+ * someone else has it.
+ */
+ return (HSEM_MASTER_ID == (0x0F & readl(lock_addr)));
+}
+
+static void u8500_hsem_unlock(struct hwspinlock *lock)
+{
+ void __iomem *lock_addr = lock->priv;
+
+ /* release the lock by writing 0 to it */
+ writel(RESET_SEMAPHORE, lock_addr);
+}
+
+/*
+ * u8500: what value is recommended here ?
+ */
+static void u8500_hsem_relax(struct hwspinlock *lock)
+{
+ ndelay(50);
+}
+
+static const struct hwspinlock_ops u8500_hwspinlock_ops = {
+ .trylock = u8500_hsem_trylock,
+ .unlock = u8500_hsem_unlock,
+ .relax = u8500_hsem_relax,
+};
+
+static int __devinit u8500_hsem_probe(struct platform_device *pdev)
+{
+ struct hwspinlock_pdata *pdata = pdev->dev.platform_data;
+ struct hwspinlock_device *bank;
+ struct hwspinlock *hwlock;
+ struct resource *res;
+ void __iomem *io_base;
+ int i, ret, num_locks = U8500_MAX_SEMAPHORE;
+ ulong val;
+
+ if (!pdata)
+ return -ENODEV;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
+ io_base = ioremap(res->start, resource_size(res));
+ if (!io_base)
+ return -ENOMEM;
+
+ /* make sure protocol 1 is selected */
+ val = readl(io_base + HSEM_CTRL_REG);
+ writel((val & ~HSEM_PROTOCOL_1), io_base + HSEM_CTRL_REG);
+
+ /* clear all interrupts */
+ writel(0xFFFF, io_base + HSEM_ICRALL);
+
+ bank = kzalloc(sizeof(*bank) + num_locks * sizeof(*hwlock), GFP_KERNEL);
+ if (!bank) {
+ ret = -ENOMEM;
+ goto iounmap_base;
+ }
+
+ platform_set_drvdata(pdev, bank);
+
+ for (i = 0, hwlock = &bank->lock[0]; i < num_locks; i++, hwlock++)
+ hwlock->priv = io_base + HSEM_REGISTER_OFFSET + sizeof(u32) * i;
+
+ /* no pm needed for HSem but required to comply with hwspilock core */
+ pm_runtime_enable(&pdev->dev);
+
+ ret = hwspin_lock_register(bank, &pdev->dev, &u8500_hwspinlock_ops,
+ pdata->base_id, num_locks);
+ if (ret)
+ goto reg_fail;
+
+ return 0;
+
+reg_fail:
+ pm_runtime_disable(&pdev->dev);
+ kfree(bank);
+iounmap_base:
+ iounmap(io_base);
+ return ret;
+}
+
+static int __devexit u8500_hsem_remove(struct platform_device *pdev)
+{
+ struct hwspinlock_device *bank = platform_get_drvdata(pdev);
+ void __iomem *io_base = bank->lock[0].priv - HSEM_REGISTER_OFFSET;
+ int ret;
+
+ /* clear all interrupts */
+ writel(0xFFFF, io_base + HSEM_ICRALL);
+
+ ret = hwspin_lock_unregister(bank);
+ if (ret) {
+ dev_err(&pdev->dev, "%s failed: %d\n", __func__, ret);
+ return ret;
+ }
+
+ pm_runtime_disable(&pdev->dev);
+ iounmap(io_base);
+ kfree(bank);
+
+ return 0;
+}
+
+static struct platform_driver u8500_hsem_driver = {
+ .probe = u8500_hsem_probe,
+ .remove = __devexit_p(u8500_hsem_remove),
+ .driver = {
+ .name = "u8500_hsem",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init u8500_hsem_init(void)
+{
+ return platform_driver_register(&u8500_hsem_driver);
+}
+/* board init code might need to reserve hwspinlocks for predefined purposes */
+postcore_initcall(u8500_hsem_init);
+
+static void __exit u8500_hsem_exit(void)
+{
+ platform_driver_unregister(&u8500_hsem_driver);
+}
+module_exit(u8500_hsem_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Hardware Spinlock driver for u8500");
+MODULE_AUTHOR("Mathieu Poirier <mathieu.poirier@linaro.org>");
diff --git a/drivers/i2c/algos/i2c-algo-bit.c b/drivers/i2c/algos/i2c-algo-bit.c
index d6d58684712..85584a547c2 100644
--- a/drivers/i2c/algos/i2c-algo-bit.c
+++ b/drivers/i2c/algos/i2c-algo-bit.c
@@ -47,8 +47,8 @@
/* ----- global variables --------------------------------------------- */
static int bit_test; /* see if the line-setting functions work */
-module_param(bit_test, bool, 0);
-MODULE_PARM_DESC(bit_test, "Test the lines of the bus to see if it is stuck");
+module_param(bit_test, int, S_IRUGO);
+MODULE_PARM_DESC(bit_test, "lines testing - 0 off; 1 report; 2 fail if stuck");
#ifdef DEBUG
static int i2c_debug = 1;
@@ -250,7 +250,9 @@ static int test_bus(struct i2c_adapter *i2c_adap)
sda = getsda(adap);
scl = (adap->getscl == NULL) ? 1 : getscl(adap);
if (!scl || !sda) {
- printk(KERN_WARNING "%s: bus seems to be busy\n", name);
+ printk(KERN_WARNING
+ "%s: bus seems to be busy (scl=%d, sda=%d)\n",
+ name, scl, sda);
goto bailout;
}
@@ -441,7 +443,7 @@ static int readbytes(struct i2c_adapter *i2c_adap, struct i2c_msg *msg)
acknak(i2c_adap, 0);
dev_err(&i2c_adap->dev, "readbytes: invalid "
"block length (%d)\n", inval);
- return -EREMOTEIO;
+ return -EPROTO;
}
/* The original count value accounts for the extra
bytes, that is, either 1 for a regular transaction,
@@ -470,7 +472,7 @@ static int readbytes(struct i2c_adapter *i2c_adap, struct i2c_msg *msg)
* reads, writes as well as 10bit-addresses.
* returns:
* 0 everything went okay, the chip ack'ed, or IGNORE_NAK flag was set
- * -x an error occurred (like: -EREMOTEIO if the device did not answer, or
+ * -x an error occurred (like: -ENXIO if the device did not answer, or
* -ETIMEDOUT, for example if the lines are stuck...)
*/
static int bit_doAddress(struct i2c_adapter *i2c_adap, struct i2c_msg *msg)
@@ -493,14 +495,14 @@ static int bit_doAddress(struct i2c_adapter *i2c_adap, struct i2c_msg *msg)
if ((ret != 1) && !nak_ok) {
dev_err(&i2c_adap->dev,
"died at extended address code\n");
- return -EREMOTEIO;
+ return -ENXIO;
}
/* the remaining 8 bit address */
ret = i2c_outb(i2c_adap, msg->addr & 0x7f);
if ((ret != 1) && !nak_ok) {
/* the chip did not ack / xmission error occurred */
dev_err(&i2c_adap->dev, "died at 2nd address code\n");
- return -EREMOTEIO;
+ return -ENXIO;
}
if (flags & I2C_M_RD) {
bit_dbg(3, &i2c_adap->dev, "emitting repeated "
@@ -512,7 +514,7 @@ static int bit_doAddress(struct i2c_adapter *i2c_adap, struct i2c_msg *msg)
if ((ret != 1) && !nak_ok) {
dev_err(&i2c_adap->dev,
"died at repeated address code\n");
- return -EREMOTEIO;
+ return -EIO;
}
}
} else { /* normal 7bit address */
@@ -570,7 +572,7 @@ static int bit_xfer(struct i2c_adapter *i2c_adap,
ret, ret == 1 ? "" : "s");
if (ret < pmsg->len) {
if (ret >= 0)
- ret = -EREMOTEIO;
+ ret = -EIO;
goto bailout;
}
} else {
@@ -581,7 +583,7 @@ static int bit_xfer(struct i2c_adapter *i2c_adap,
ret, ret == 1 ? "" : "s");
if (ret < pmsg->len) {
if (ret >= 0)
- ret = -EREMOTEIO;
+ ret = -EIO;
goto bailout;
}
}
@@ -624,7 +626,7 @@ static int __i2c_bit_add_bus(struct i2c_adapter *adap,
if (bit_test) {
ret = test_bus(adap);
- if (ret < 0)
+ if (bit_test >= 2 && ret < 0)
return -ENODEV;
}
diff --git a/drivers/i2c/algos/i2c-algo-pca.c b/drivers/i2c/algos/i2c-algo-pca.c
index 4ca9cf9cde7..beb9ffe2564 100644
--- a/drivers/i2c/algos/i2c-algo-pca.c
+++ b/drivers/i2c/algos/i2c-algo-pca.c
@@ -196,7 +196,7 @@ static int pca_xfer(struct i2c_adapter *i2c_adap,
} else {
dev_dbg(&i2c_adap->dev, "bus is not idle. status is "
"%#04x\n", state);
- return -EAGAIN;
+ return -EBUSY;
}
}
@@ -224,7 +224,7 @@ static int pca_xfer(struct i2c_adapter *i2c_adap,
}
curmsg = 0;
- ret = -EREMOTEIO;
+ ret = -EIO;
while (curmsg < num) {
state = pca_status(adap);
@@ -259,6 +259,7 @@ static int pca_xfer(struct i2c_adapter *i2c_adap,
case 0x20: /* SLA+W has been transmitted; NOT ACK has been received */
DEB2("NOT ACK received after SLA+W\n");
pca_stop(adap);
+ ret = -ENXIO;
goto out;
case 0x40: /* SLA+R has been transmitted; ACK has been received */
@@ -283,6 +284,7 @@ static int pca_xfer(struct i2c_adapter *i2c_adap,
case 0x48: /* SLA+R has been transmitted; NOT ACK has been received */
DEB2("NOT ACK received after SLA+R\n");
pca_stop(adap);
+ ret = -ENXIO;
goto out;
case 0x30: /* Data byte in I2CDAT has been transmitted; NOT ACK has been received */
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 646068e5100..a3afac4be73 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -110,7 +110,6 @@ config I2C_I801
config I2C_ISCH
tristate "Intel SCH SMBus 1.0"
depends on PCI
- select MFD_CORE
select LPC_SCH
help
Say Y here if you want to use SMBus controller on the Intel SCH
@@ -301,7 +300,7 @@ config I2C_AT91
config I2C_AU1550
tristate "Au1550/Au1200 SMBus interface"
- depends on SOC_AU1550 || SOC_AU1200
+ depends on MIPS_ALCHEMY
help
If you say yes to this option, support will be included for the
Au1550 and Au1200 SMBus interface.
@@ -350,15 +349,25 @@ config I2C_DAVINCI
devices such as DaVinci NIC.
For details please see http://www.ti.com/davinci
-config I2C_DESIGNWARE
- tristate "Synopsys DesignWare"
+config I2C_DESIGNWARE_PLATFORM
+ tristate "Synopsys DesignWare Platfrom"
depends on HAVE_CLK
help
If you say yes to this option, support will be included for the
Synopsys DesignWare I2C adapter. Only master mode is supported.
This driver can also be built as a module. If so, the module
- will be called i2c-designware.
+ will be called i2c-designware-platform.
+
+config I2C_DESIGNWARE_PCI
+ tristate "Synopsys DesignWare PCI"
+ depends on PCI
+ help
+ If you say yes to this option, support will be included for the
+ Synopsys DesignWare I2C adapter. Only master mode is supported.
+
+ This driver can also be built as a module. If so, the module
+ will be called i2c-designware-pci.
config I2C_GPIO
tristate "GPIO-based bitbanging I2C"
@@ -789,7 +798,7 @@ config I2C_ACORN
config I2C_ELEKTOR
tristate "Elektor ISA card"
- depends on ISA && BROKEN_ON_SMP
+ depends on ISA && HAS_IOPORT && BROKEN_ON_SMP
select I2C_ALGOPCF
help
This supports the PCF8584 ISA bus I2C adapter. Say Y if you own
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index e6cf294d372..fba6da60aa0 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -33,7 +33,10 @@ obj-$(CONFIG_I2C_AU1550) += i2c-au1550.o
obj-$(CONFIG_I2C_BLACKFIN_TWI) += i2c-bfin-twi.o
obj-$(CONFIG_I2C_CPM) += i2c-cpm.o
obj-$(CONFIG_I2C_DAVINCI) += i2c-davinci.o
-obj-$(CONFIG_I2C_DESIGNWARE) += i2c-designware.o
+obj-$(CONFIG_I2C_DESIGNWARE_PLATFORM) += i2c-designware-platform.o
+i2c-designware-platform-objs := i2c-designware-platdrv.o i2c-designware-core.o
+obj-$(CONFIG_I2C_DESIGNWARE_PCI) += i2c-designware-pci.o
+i2c-designware-pci-objs := i2c-designware-pcidrv.o i2c-designware-core.o
obj-$(CONFIG_I2C_GPIO) += i2c-gpio.o
obj-$(CONFIG_I2C_HIGHLANDER) += i2c-highlander.o
obj-$(CONFIG_I2C_IBM_IIC) += i2c-ibm_iic.o
diff --git a/drivers/i2c/busses/i2c-au1550.c b/drivers/i2c/busses/i2c-au1550.c
index 532828bc50e..f314d7f433d 100644
--- a/drivers/i2c/busses/i2c-au1550.c
+++ b/drivers/i2c/busses/i2c-au1550.c
@@ -36,32 +36,44 @@
#include <linux/i2c.h>
#include <linux/slab.h>
-#include <asm/mach-au1x00/au1xxx.h>
+#include <asm/mach-au1x00/au1000.h>
#include <asm/mach-au1x00/au1xxx_psc.h>
+#define PSC_SEL 0x00
+#define PSC_CTRL 0x04
+#define PSC_SMBCFG 0x08
+#define PSC_SMBMSK 0x0C
+#define PSC_SMBPCR 0x10
+#define PSC_SMBSTAT 0x14
+#define PSC_SMBEVNT 0x18
+#define PSC_SMBTXRX 0x1C
+#define PSC_SMBTMR 0x20
+
struct i2c_au1550_data {
- u32 psc_base;
+ void __iomem *psc_base;
int xfer_timeout;
- int ack_timeout;
struct i2c_adapter adap;
struct resource *ioarea;
};
-static int
-wait_xfer_done(struct i2c_au1550_data *adap)
+static inline void WR(struct i2c_au1550_data *a, int r, unsigned long v)
{
- u32 stat;
- int i;
- volatile psc_smb_t *sp;
+ __raw_writel(v, a->psc_base + r);
+ wmb();
+}
- sp = (volatile psc_smb_t *)(adap->psc_base);
+static inline unsigned long RD(struct i2c_au1550_data *a, int r)
+{
+ return __raw_readl(a->psc_base + r);
+}
- /* Wait for Tx Buffer Empty
- */
+static int wait_xfer_done(struct i2c_au1550_data *adap)
+{
+ int i;
+
+ /* Wait for Tx Buffer Empty */
for (i = 0; i < adap->xfer_timeout; i++) {
- stat = sp->psc_smbstat;
- au_sync();
- if ((stat & PSC_SMBSTAT_TE) != 0)
+ if (RD(adap, PSC_SMBSTAT) & PSC_SMBSTAT_TE)
return 0;
udelay(1);
@@ -70,41 +82,27 @@ wait_xfer_done(struct i2c_au1550_data *adap)
return -ETIMEDOUT;
}
-static int
-wait_ack(struct i2c_au1550_data *adap)
+static int wait_ack(struct i2c_au1550_data *adap)
{
- u32 stat;
- volatile psc_smb_t *sp;
+ unsigned long stat;
if (wait_xfer_done(adap))
return -ETIMEDOUT;
- sp = (volatile psc_smb_t *)(adap->psc_base);
-
- stat = sp->psc_smbevnt;
- au_sync();
-
+ stat = RD(adap, PSC_SMBEVNT);
if ((stat & (PSC_SMBEVNT_DN | PSC_SMBEVNT_AN | PSC_SMBEVNT_AL)) != 0)
return -ETIMEDOUT;
return 0;
}
-static int
-wait_master_done(struct i2c_au1550_data *adap)
+static int wait_master_done(struct i2c_au1550_data *adap)
{
- u32 stat;
- int i;
- volatile psc_smb_t *sp;
+ int i;
- sp = (volatile psc_smb_t *)(adap->psc_base);
-
- /* Wait for Master Done.
- */
- for (i = 0; i < adap->xfer_timeout; i++) {
- stat = sp->psc_smbevnt;
- au_sync();
- if ((stat & PSC_SMBEVNT_MD) != 0)
+ /* Wait for Master Done. */
+ for (i = 0; i < 2 * adap->xfer_timeout; i++) {
+ if ((RD(adap, PSC_SMBEVNT) & PSC_SMBEVNT_MD) != 0)
return 0;
udelay(1);
}
@@ -115,29 +113,20 @@ wait_master_done(struct i2c_au1550_data *adap)
static int
do_address(struct i2c_au1550_data *adap, unsigned int addr, int rd, int q)
{
- volatile psc_smb_t *sp;
- u32 stat;
+ unsigned long stat;
- sp = (volatile psc_smb_t *)(adap->psc_base);
-
- /* Reset the FIFOs, clear events.
- */
- stat = sp->psc_smbstat;
- sp->psc_smbevnt = PSC_SMBEVNT_ALLCLR;
- au_sync();
+ /* Reset the FIFOs, clear events. */
+ stat = RD(adap, PSC_SMBSTAT);
+ WR(adap, PSC_SMBEVNT, PSC_SMBEVNT_ALLCLR);
if (!(stat & PSC_SMBSTAT_TE) || !(stat & PSC_SMBSTAT_RE)) {
- sp->psc_smbpcr = PSC_SMBPCR_DC;
- au_sync();
- do {
- stat = sp->psc_smbpcr;
- au_sync();
- } while ((stat & PSC_SMBPCR_DC) != 0);
+ WR(adap, PSC_SMBPCR, PSC_SMBPCR_DC);
+ while ((RD(adap, PSC_SMBPCR) & PSC_SMBPCR_DC) != 0)
+ cpu_relax();
udelay(50);
}
- /* Write out the i2c chip address and specify operation
- */
+ /* Write out the i2c chip address and specify operation */
addr <<= 1;
if (rd)
addr |= 1;
@@ -146,56 +135,42 @@ do_address(struct i2c_au1550_data *adap, unsigned int addr, int rd, int q)
if (q)
addr |= PSC_SMBTXRX_STP;
- /* Put byte into fifo, start up master.
- */
- sp->psc_smbtxrx = addr;
- au_sync();
- sp->psc_smbpcr = PSC_SMBPCR_MS;
- au_sync();
+ /* Put byte into fifo, start up master. */
+ WR(adap, PSC_SMBTXRX, addr);
+ WR(adap, PSC_SMBPCR, PSC_SMBPCR_MS);
if (wait_ack(adap))
return -EIO;
return (q) ? wait_master_done(adap) : 0;
}
-static u32
-wait_for_rx_byte(struct i2c_au1550_data *adap, u32 *ret_data)
+static int wait_for_rx_byte(struct i2c_au1550_data *adap, unsigned char *out)
{
- int j;
- u32 data, stat;
- volatile psc_smb_t *sp;
+ int j;
if (wait_xfer_done(adap))
return -EIO;
- sp = (volatile psc_smb_t *)(adap->psc_base);
-
j = adap->xfer_timeout * 100;
do {
j--;
if (j <= 0)
return -EIO;
- stat = sp->psc_smbstat;
- au_sync();
- if ((stat & PSC_SMBSTAT_RE) == 0)
+ if ((RD(adap, PSC_SMBSTAT) & PSC_SMBSTAT_RE) == 0)
j = 0;
else
udelay(1);
} while (j > 0);
- data = sp->psc_smbtxrx;
- au_sync();
- *ret_data = data;
+
+ *out = RD(adap, PSC_SMBTXRX);
return 0;
}
-static int
-i2c_read(struct i2c_au1550_data *adap, unsigned char *buf,
+static int i2c_read(struct i2c_au1550_data *adap, unsigned char *buf,
unsigned int len)
{
- int i;
- u32 data;
- volatile psc_smb_t *sp;
+ int i;
if (len == 0)
return 0;
@@ -204,62 +179,46 @@ i2c_read(struct i2c_au1550_data *adap, unsigned char *buf,
* zero bytes for timing, waiting for bytes to appear in the
* receive fifo, then reading the bytes.
*/
-
- sp = (volatile psc_smb_t *)(adap->psc_base);
-
i = 0;
- while (i < (len-1)) {
- sp->psc_smbtxrx = 0;
- au_sync();
- if (wait_for_rx_byte(adap, &data))
+ while (i < (len - 1)) {
+ WR(adap, PSC_SMBTXRX, 0);
+ if (wait_for_rx_byte(adap, &buf[i]))
return -EIO;
- buf[i] = data;
i++;
}
- /* The last byte has to indicate transfer done.
- */
- sp->psc_smbtxrx = PSC_SMBTXRX_STP;
- au_sync();
+ /* The last byte has to indicate transfer done. */
+ WR(adap, PSC_SMBTXRX, PSC_SMBTXRX_STP);
if (wait_master_done(adap))
return -EIO;
- data = sp->psc_smbtxrx;
- au_sync();
- buf[i] = data;
+ buf[i] = (unsigned char)(RD(adap, PSC_SMBTXRX) & 0xff);
return 0;
}
-static int
-i2c_write(struct i2c_au1550_data *adap, unsigned char *buf,
+static int i2c_write(struct i2c_au1550_data *adap, unsigned char *buf,
unsigned int len)
{
- int i;
- u32 data;
- volatile psc_smb_t *sp;
+ int i;
+ unsigned long data;
if (len == 0)
return 0;
- sp = (volatile psc_smb_t *)(adap->psc_base);
-
i = 0;
while (i < (len-1)) {
data = buf[i];
- sp->psc_smbtxrx = data;
- au_sync();
+ WR(adap, PSC_SMBTXRX, data);
if (wait_ack(adap))
return -EIO;
i++;
}
- /* The last byte has to indicate transfer done.
- */
+ /* The last byte has to indicate transfer done. */
data = buf[i];
data |= PSC_SMBTXRX_STP;
- sp->psc_smbtxrx = data;
- au_sync();
+ WR(adap, PSC_SMBTXRX, data);
if (wait_master_done(adap))
return -EIO;
return 0;
@@ -269,12 +228,10 @@ static int
au1550_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs, int num)
{
struct i2c_au1550_data *adap = i2c_adap->algo_data;
- volatile psc_smb_t *sp = (volatile psc_smb_t *)adap->psc_base;
struct i2c_msg *p;
int i, err = 0;
- sp->psc_ctrl = PSC_CTRL_ENABLE;
- au_sync();
+ WR(adap, PSC_CTRL, PSC_CTRL_ENABLE);
for (i = 0; !err && i < num; i++) {
p = &msgs[i];
@@ -293,14 +250,12 @@ au1550_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs, int num)
if (err == 0)
err = num;
- sp->psc_ctrl = PSC_CTRL_SUSPEND;
- au_sync();
+ WR(adap, PSC_CTRL, PSC_CTRL_SUSPEND);
return err;
}
-static u32
-au1550_func(struct i2c_adapter *adap)
+static u32 au1550_func(struct i2c_adapter *adap)
{
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
}
@@ -312,57 +267,45 @@ static const struct i2c_algorithm au1550_algo = {
static void i2c_au1550_setup(struct i2c_au1550_data *priv)
{
- volatile psc_smb_t *sp = (volatile psc_smb_t *)priv->psc_base;
- u32 stat;
-
- sp->psc_ctrl = PSC_CTRL_DISABLE;
- au_sync();
- sp->psc_sel = PSC_SEL_PS_SMBUSMODE;
- sp->psc_smbcfg = 0;
- au_sync();
- sp->psc_ctrl = PSC_CTRL_ENABLE;
- au_sync();
- do {
- stat = sp->psc_smbstat;
- au_sync();
- } while ((stat & PSC_SMBSTAT_SR) == 0);
+ unsigned long cfg;
- sp->psc_smbcfg = (PSC_SMBCFG_RT_FIFO8 | PSC_SMBCFG_TT_FIFO8 |
- PSC_SMBCFG_DD_DISABLE);
+ WR(priv, PSC_CTRL, PSC_CTRL_DISABLE);
+ WR(priv, PSC_SEL, PSC_SEL_PS_SMBUSMODE);
+ WR(priv, PSC_SMBCFG, 0);
+ WR(priv, PSC_CTRL, PSC_CTRL_ENABLE);
+ while ((RD(priv, PSC_SMBSTAT) & PSC_SMBSTAT_SR) == 0)
+ cpu_relax();
+
+ cfg = PSC_SMBCFG_RT_FIFO8 | PSC_SMBCFG_TT_FIFO8 | PSC_SMBCFG_DD_DISABLE;
+ WR(priv, PSC_SMBCFG, cfg);
/* Divide by 8 to get a 6.25 MHz clock. The later protocol
* timings are based on this clock.
*/
- sp->psc_smbcfg |= PSC_SMBCFG_SET_DIV(PSC_SMBCFG_DIV8);
- sp->psc_smbmsk = PSC_SMBMSK_ALLMASK;
- au_sync();
+ cfg |= PSC_SMBCFG_SET_DIV(PSC_SMBCFG_DIV8);
+ WR(priv, PSC_SMBCFG, cfg);
+ WR(priv, PSC_SMBMSK, PSC_SMBMSK_ALLMASK);
/* Set the protocol timer values. See Table 71 in the
* Au1550 Data Book for standard timing values.
*/
- sp->psc_smbtmr = PSC_SMBTMR_SET_TH(0) | PSC_SMBTMR_SET_PS(15) | \
+ WR(priv, PSC_SMBTMR, PSC_SMBTMR_SET_TH(0) | PSC_SMBTMR_SET_PS(15) | \
PSC_SMBTMR_SET_PU(15) | PSC_SMBTMR_SET_SH(15) | \
PSC_SMBTMR_SET_SU(15) | PSC_SMBTMR_SET_CL(15) | \
- PSC_SMBTMR_SET_CH(15);
- au_sync();
+ PSC_SMBTMR_SET_CH(15));
- sp->psc_smbcfg |= PSC_SMBCFG_DE_ENABLE;
- do {
- stat = sp->psc_smbstat;
- au_sync();
- } while ((stat & PSC_SMBSTAT_SR) == 0);
+ cfg |= PSC_SMBCFG_DE_ENABLE;
+ WR(priv, PSC_SMBCFG, cfg);
+ while ((RD(priv, PSC_SMBSTAT) & PSC_SMBSTAT_SR) == 0)
+ cpu_relax();
- sp->psc_ctrl = PSC_CTRL_SUSPEND;
- au_sync();
+ WR(priv, PSC_CTRL, PSC_CTRL_SUSPEND);
}
static void i2c_au1550_disable(struct i2c_au1550_data *priv)
{
- volatile psc_smb_t *sp = (volatile psc_smb_t *)priv->psc_base;
-
- sp->psc_smbcfg = 0;
- sp->psc_ctrl = PSC_CTRL_DISABLE;
- au_sync();
+ WR(priv, PSC_SMBCFG, 0);
+ WR(priv, PSC_CTRL, PSC_CTRL_DISABLE);
}
/*
@@ -396,9 +339,12 @@ i2c_au1550_probe(struct platform_device *pdev)
goto out_mem;
}
- priv->psc_base = CKSEG1ADDR(r->start);
+ priv->psc_base = ioremap(r->start, resource_size(r));
+ if (!priv->psc_base) {
+ ret = -EIO;
+ goto out_map;
+ }
priv->xfer_timeout = 200;
- priv->ack_timeout = 200;
priv->adap.nr = pdev->id;
priv->adap.algo = &au1550_algo;
@@ -406,8 +352,7 @@ i2c_au1550_probe(struct platform_device *pdev)
priv->adap.dev.parent = &pdev->dev;
strlcpy(priv->adap.name, "Au1xxx PSC I2C", sizeof(priv->adap.name));
- /* Now, set up the PSC for SMBus PIO mode.
- */
+ /* Now, set up the PSC for SMBus PIO mode. */
i2c_au1550_setup(priv);
ret = i2c_add_numbered_adapter(&priv->adap);
@@ -417,7 +362,8 @@ i2c_au1550_probe(struct platform_device *pdev)
}
i2c_au1550_disable(priv);
-
+ iounmap(priv->psc_base);
+out_map:
release_resource(priv->ioarea);
kfree(priv->ioarea);
out_mem:
@@ -426,14 +372,14 @@ out:
return ret;
}
-static int __devexit
-i2c_au1550_remove(struct platform_device *pdev)
+static int __devexit i2c_au1550_remove(struct platform_device *pdev)
{
struct i2c_au1550_data *priv = platform_get_drvdata(pdev);
platform_set_drvdata(pdev, NULL);
i2c_del_adapter(&priv->adap);
i2c_au1550_disable(priv);
+ iounmap(priv->psc_base);
release_resource(priv->ioarea);
kfree(priv->ioarea);
kfree(priv);
@@ -441,49 +387,51 @@ i2c_au1550_remove(struct platform_device *pdev)
}
#ifdef CONFIG_PM
-static int
-i2c_au1550_suspend(struct platform_device *pdev, pm_message_t state)
+static int i2c_au1550_suspend(struct device *dev)
{
- struct i2c_au1550_data *priv = platform_get_drvdata(pdev);
+ struct i2c_au1550_data *priv = dev_get_drvdata(dev);
i2c_au1550_disable(priv);
return 0;
}
-static int
-i2c_au1550_resume(struct platform_device *pdev)
+static int i2c_au1550_resume(struct device *dev)
{
- struct i2c_au1550_data *priv = platform_get_drvdata(pdev);
+ struct i2c_au1550_data *priv = dev_get_drvdata(dev);
i2c_au1550_setup(priv);
return 0;
}
+
+static const struct dev_pm_ops i2c_au1550_pmops = {
+ .suspend = i2c_au1550_suspend,
+ .resume = i2c_au1550_resume,
+};
+
+#define AU1XPSC_SMBUS_PMOPS (&i2c_au1550_pmops)
+
#else
-#define i2c_au1550_suspend NULL
-#define i2c_au1550_resume NULL
+#define AU1XPSC_SMBUS_PMOPS NULL
#endif
static struct platform_driver au1xpsc_smbus_driver = {
.driver = {
.name = "au1xpsc_smbus",
.owner = THIS_MODULE,
+ .pm = AU1XPSC_SMBUS_PMOPS,
},
.probe = i2c_au1550_probe,
.remove = __devexit_p(i2c_au1550_remove),
- .suspend = i2c_au1550_suspend,
- .resume = i2c_au1550_resume,
};
-static int __init
-i2c_au1550_init(void)
+static int __init i2c_au1550_init(void)
{
return platform_driver_register(&au1xpsc_smbus_driver);
}
-static void __exit
-i2c_au1550_exit(void)
+static void __exit i2c_au1550_exit(void)
{
platform_driver_unregister(&au1xpsc_smbus_driver);
}
diff --git a/drivers/i2c/busses/i2c-bfin-twi.c b/drivers/i2c/busses/i2c-bfin-twi.c
index cbc98aea5b0..cdb59e5b23f 100644
--- a/drivers/i2c/busses/i2c-bfin-twi.c
+++ b/drivers/i2c/busses/i2c-bfin-twi.c
@@ -631,7 +631,7 @@ static int i2c_bfin_twi_resume(struct platform_device *pdev)
struct bfin_twi_iface *iface = platform_get_drvdata(pdev);
int rc = request_irq(iface->irq, bfin_twi_interrupt_entry,
- IRQF_DISABLED, pdev->name, iface);
+ 0, pdev->name, iface);
if (rc) {
dev_err(&pdev->dev, "Can't get IRQ %d !\n", iface->irq);
return -ENODEV;
@@ -702,7 +702,7 @@ static int i2c_bfin_twi_probe(struct platform_device *pdev)
}
rc = request_irq(iface->irq, bfin_twi_interrupt_entry,
- IRQF_DISABLED, pdev->name, iface);
+ 0, pdev->name, iface);
if (rc) {
dev_err(&pdev->dev, "Can't get IRQ %d !\n", iface->irq);
rc = -ENODEV;
diff --git a/drivers/i2c/busses/i2c-designware.c b/drivers/i2c/busses/i2c-designware-core.c
index 1b42b50b599..df879924100 100644
--- a/drivers/i2c/busses/i2c-designware.c
+++ b/drivers/i2c/busses/i2c-designware-core.c
@@ -25,18 +25,15 @@
* ----------------------------------------------------------------------------
*
*/
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/i2c.h>
#include <linux/clk.h>
#include <linux/errno.h>
-#include <linux/sched.h>
#include <linux/err.h>
+#include <linux/i2c.h>
#include <linux/interrupt.h>
-#include <linux/platform_device.h>
#include <linux/io.h>
-#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+#include <linux/delay.h>
+#include "i2c-designware-core.h"
/*
* Registers offset
@@ -68,15 +65,10 @@
#define DW_IC_STATUS 0x70
#define DW_IC_TXFLR 0x74
#define DW_IC_RXFLR 0x78
-#define DW_IC_COMP_PARAM_1 0xf4
#define DW_IC_TX_ABRT_SOURCE 0x80
-
-#define DW_IC_CON_MASTER 0x1
-#define DW_IC_CON_SPEED_STD 0x2
-#define DW_IC_CON_SPEED_FAST 0x4
-#define DW_IC_CON_10BITADDR_MASTER 0x10
-#define DW_IC_CON_RESTART_EN 0x20
-#define DW_IC_CON_SLAVE_DISABLE 0x40
+#define DW_IC_COMP_PARAM_1 0xf4
+#define DW_IC_COMP_TYPE 0xfc
+#define DW_IC_COMP_TYPE_VALUE 0x44570140
#define DW_IC_INTR_RX_UNDER 0x001
#define DW_IC_INTR_RX_OVER 0x002
@@ -170,55 +162,23 @@ static char *abort_sources[] = {
"lost arbitration",
};
-/**
- * struct dw_i2c_dev - private i2c-designware data
- * @dev: driver model device node
- * @base: IO registers pointer
- * @cmd_complete: tx completion indicator
- * @lock: protect this struct and IO registers
- * @clk: input reference clock
- * @cmd_err: run time hadware error code
- * @msgs: points to an array of messages currently being transferred
- * @msgs_num: the number of elements in msgs
- * @msg_write_idx: the element index of the current tx message in the msgs
- * array
- * @tx_buf_len: the length of the current tx buffer
- * @tx_buf: the current tx buffer
- * @msg_read_idx: the element index of the current rx message in the msgs
- * array
- * @rx_buf_len: the length of the current rx buffer
- * @rx_buf: the current rx buffer
- * @msg_err: error status of the current transfer
- * @status: i2c master status, one of STATUS_*
- * @abort_source: copy of the TX_ABRT_SOURCE register
- * @irq: interrupt number for the i2c master
- * @adapter: i2c subsystem adapter node
- * @tx_fifo_depth: depth of the hardware tx fifo
- * @rx_fifo_depth: depth of the hardware rx fifo
- */
-struct dw_i2c_dev {
- struct device *dev;
- void __iomem *base;
- struct completion cmd_complete;
- struct mutex lock;
- struct clk *clk;
- int cmd_err;
- struct i2c_msg *msgs;
- int msgs_num;
- int msg_write_idx;
- u32 tx_buf_len;
- u8 *tx_buf;
- int msg_read_idx;
- u32 rx_buf_len;
- u8 *rx_buf;
- int msg_err;
- unsigned int status;
- u32 abort_source;
- int irq;
- struct i2c_adapter adapter;
- unsigned int tx_fifo_depth;
- unsigned int rx_fifo_depth;
-};
+u32 dw_readl(struct dw_i2c_dev *dev, int offset)
+{
+ u32 value = readl(dev->base + offset);
+
+ if (dev->swab)
+ return swab32(value);
+ else
+ return value;
+}
+
+void dw_writel(struct dw_i2c_dev *dev, u32 b, int offset)
+{
+ if (dev->swab)
+ b = swab32(b);
+
+ writel(b, dev->base + offset);
+}
static u32
i2c_dw_scl_hcnt(u32 ic_clk, u32 tSYMBOL, u32 tf, int cond, int offset)
@@ -283,13 +243,29 @@ static u32 i2c_dw_scl_lcnt(u32 ic_clk, u32 tLOW, u32 tf, int offset)
* This function is called during I2C init function, and in case of timeout at
* run time.
*/
-static void i2c_dw_init(struct dw_i2c_dev *dev)
+int i2c_dw_init(struct dw_i2c_dev *dev)
{
- u32 input_clock_khz = clk_get_rate(dev->clk) / 1000;
- u32 ic_con, hcnt, lcnt;
+ u32 input_clock_khz;
+ u32 hcnt, lcnt;
+ u32 reg;
+
+ input_clock_khz = dev->get_clk_rate_khz(dev);
+
+ /* Configure register endianess access */
+ reg = dw_readl(dev, DW_IC_COMP_TYPE);
+ if (reg == ___constant_swab32(DW_IC_COMP_TYPE_VALUE)) {
+ dev->swab = 1;
+ reg = DW_IC_COMP_TYPE_VALUE;
+ }
+
+ if (reg != DW_IC_COMP_TYPE_VALUE) {
+ dev_err(dev->dev, "Unknown Synopsys component type: "
+ "0x%08x\n", reg);
+ return -ENODEV;
+ }
/* Disable the adapter */
- writel(0, dev->base + DW_IC_ENABLE);
+ dw_writel(dev, 0, DW_IC_ENABLE);
/* set standard and fast speed deviders for high/low periods */
@@ -303,8 +279,8 @@ static void i2c_dw_init(struct dw_i2c_dev *dev)
47, /* tLOW = 4.7 us */
3, /* tf = 0.3 us */
0); /* No offset */
- writel(hcnt, dev->base + DW_IC_SS_SCL_HCNT);
- writel(lcnt, dev->base + DW_IC_SS_SCL_LCNT);
+ dw_writel(dev, hcnt, DW_IC_SS_SCL_HCNT);
+ dw_writel(dev, lcnt, DW_IC_SS_SCL_LCNT);
dev_dbg(dev->dev, "Standard-mode HCNT:LCNT = %d:%d\n", hcnt, lcnt);
/* Fast-mode */
@@ -317,18 +293,17 @@ static void i2c_dw_init(struct dw_i2c_dev *dev)
13, /* tLOW = 1.3 us */
3, /* tf = 0.3 us */
0); /* No offset */
- writel(hcnt, dev->base + DW_IC_FS_SCL_HCNT);
- writel(lcnt, dev->base + DW_IC_FS_SCL_LCNT);
+ dw_writel(dev, hcnt, DW_IC_FS_SCL_HCNT);
+ dw_writel(dev, lcnt, DW_IC_FS_SCL_LCNT);
dev_dbg(dev->dev, "Fast-mode HCNT:LCNT = %d:%d\n", hcnt, lcnt);
/* Configure Tx/Rx FIFO threshold levels */
- writel(dev->tx_fifo_depth - 1, dev->base + DW_IC_TX_TL);
- writel(0, dev->base + DW_IC_RX_TL);
+ dw_writel(dev, dev->tx_fifo_depth - 1, DW_IC_TX_TL);
+ dw_writel(dev, 0, DW_IC_RX_TL);
/* configure the i2c master */
- ic_con = DW_IC_CON_MASTER | DW_IC_CON_SLAVE_DISABLE |
- DW_IC_CON_RESTART_EN | DW_IC_CON_SPEED_FAST;
- writel(ic_con, dev->base + DW_IC_CON);
+ dw_writel(dev, dev->master_cfg , DW_IC_CON);
+ return 0;
}
/*
@@ -338,7 +313,7 @@ static int i2c_dw_wait_bus_not_busy(struct dw_i2c_dev *dev)
{
int timeout = TIMEOUT;
- while (readl(dev->base + DW_IC_STATUS) & DW_IC_STATUS_ACTIVITY) {
+ while (dw_readl(dev, DW_IC_STATUS) & DW_IC_STATUS_ACTIVITY) {
if (timeout <= 0) {
dev_warn(dev->dev, "timeout waiting for bus ready\n");
return -ETIMEDOUT;
@@ -356,24 +331,24 @@ static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
u32 ic_con;
/* Disable the adapter */
- writel(0, dev->base + DW_IC_ENABLE);
+ dw_writel(dev, 0, DW_IC_ENABLE);
/* set the slave (target) address */
- writel(msgs[dev->msg_write_idx].addr, dev->base + DW_IC_TAR);
+ dw_writel(dev, msgs[dev->msg_write_idx].addr, DW_IC_TAR);
/* if the slave address is ten bit address, enable 10BITADDR */
- ic_con = readl(dev->base + DW_IC_CON);
+ ic_con = dw_readl(dev, DW_IC_CON);
if (msgs[dev->msg_write_idx].flags & I2C_M_TEN)
ic_con |= DW_IC_CON_10BITADDR_MASTER;
else
ic_con &= ~DW_IC_CON_10BITADDR_MASTER;
- writel(ic_con, dev->base + DW_IC_CON);
+ dw_writel(dev, ic_con, DW_IC_CON);
/* Enable the adapter */
- writel(1, dev->base + DW_IC_ENABLE);
+ dw_writel(dev, 1, DW_IC_ENABLE);
/* Enable interrupts */
- writel(DW_IC_INTR_DEFAULT_MASK, dev->base + DW_IC_INTR_MASK);
+ dw_writel(dev, DW_IC_INTR_DEFAULT_MASK, DW_IC_INTR_MASK);
}
/*
@@ -382,7 +357,7 @@ static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
* messages into the tx buffer. Even if the size of i2c_msg data is
* longer than the size of the tx buffer, it handles everything.
*/
-static void
+void
i2c_dw_xfer_msg(struct dw_i2c_dev *dev)
{
struct i2c_msg *msgs = dev->msgs;
@@ -420,15 +395,15 @@ i2c_dw_xfer_msg(struct dw_i2c_dev *dev)
buf_len = msgs[dev->msg_write_idx].len;
}
- tx_limit = dev->tx_fifo_depth - readl(dev->base + DW_IC_TXFLR);
- rx_limit = dev->rx_fifo_depth - readl(dev->base + DW_IC_RXFLR);
+ tx_limit = dev->tx_fifo_depth - dw_readl(dev, DW_IC_TXFLR);
+ rx_limit = dev->rx_fifo_depth - dw_readl(dev, DW_IC_RXFLR);
while (buf_len > 0 && tx_limit > 0 && rx_limit > 0) {
if (msgs[dev->msg_write_idx].flags & I2C_M_RD) {
- writel(0x100, dev->base + DW_IC_DATA_CMD);
+ dw_writel(dev, 0x100, DW_IC_DATA_CMD);
rx_limit--;
} else
- writel(*buf++, dev->base + DW_IC_DATA_CMD);
+ dw_writel(dev, *buf++, DW_IC_DATA_CMD);
tx_limit--; buf_len--;
}
@@ -453,7 +428,7 @@ i2c_dw_xfer_msg(struct dw_i2c_dev *dev)
if (dev->msg_err)
intr_mask = 0;
- writel(intr_mask, dev->base + DW_IC_INTR_MASK);
+ dw_writel(dev, intr_mask, DW_IC_INTR_MASK);
}
static void
@@ -477,10 +452,10 @@ i2c_dw_read(struct dw_i2c_dev *dev)
buf = dev->rx_buf;
}
- rx_valid = readl(dev->base + DW_IC_RXFLR);
+ rx_valid = dw_readl(dev, DW_IC_RXFLR);
for (; len > 0 && rx_valid > 0; len--, rx_valid--)
- *buf++ = readl(dev->base + DW_IC_DATA_CMD);
+ *buf++ = dw_readl(dev, DW_IC_DATA_CMD);
if (len > 0) {
dev->status |= STATUS_READ_IN_PROGRESS;
@@ -518,7 +493,7 @@ static int i2c_dw_handle_tx_abort(struct dw_i2c_dev *dev)
/*
* Prepare controller for a transaction and call i2c_dw_xfer_msg
*/
-static int
+int
i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
{
struct dw_i2c_dev *dev = i2c_get_adapdata(adap);
@@ -527,6 +502,7 @@ i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
dev_dbg(dev->dev, "%s: msgs: %d\n", __func__, num);
mutex_lock(&dev->lock);
+ pm_runtime_get_sync(dev->dev);
INIT_COMPLETION(dev->cmd_complete);
dev->msgs = msgs;
@@ -563,7 +539,7 @@ i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
/* no error */
if (likely(!dev->cmd_err)) {
/* Disable the adapter */
- writel(0, dev->base + DW_IC_ENABLE);
+ dw_writel(dev, 0, DW_IC_ENABLE);
ret = num;
goto done;
}
@@ -576,19 +552,16 @@ i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
ret = -EIO;
done:
+ pm_runtime_put(dev->dev);
mutex_unlock(&dev->lock);
return ret;
}
-static u32 i2c_dw_func(struct i2c_adapter *adap)
+u32 i2c_dw_func(struct i2c_adapter *adap)
{
- return I2C_FUNC_I2C |
- I2C_FUNC_10BIT_ADDR |
- I2C_FUNC_SMBUS_BYTE |
- I2C_FUNC_SMBUS_BYTE_DATA |
- I2C_FUNC_SMBUS_WORD_DATA |
- I2C_FUNC_SMBUS_I2C_BLOCK;
+ struct dw_i2c_dev *dev = i2c_get_adapdata(adap);
+ return dev->functionality;
}
static u32 i2c_dw_read_clear_intrbits(struct dw_i2c_dev *dev)
@@ -601,47 +574,47 @@ static u32 i2c_dw_read_clear_intrbits(struct dw_i2c_dev *dev)
* in the IC_RAW_INTR_STAT register.
*
* That is,
- * stat = readl(IC_INTR_STAT);
+ * stat = dw_readl(IC_INTR_STAT);
* equals to,
- * stat = readl(IC_RAW_INTR_STAT) & readl(IC_INTR_MASK);
+ * stat = dw_readl(IC_RAW_INTR_STAT) & dw_readl(IC_INTR_MASK);
*
* The raw version might be useful for debugging purposes.
*/
- stat = readl(dev->base + DW_IC_INTR_STAT);
+ stat = dw_readl(dev, DW_IC_INTR_STAT);
/*
* Do not use the IC_CLR_INTR register to clear interrupts, or
* you'll miss some interrupts, triggered during the period from
- * readl(IC_INTR_STAT) to readl(IC_CLR_INTR).
+ * dw_readl(IC_INTR_STAT) to dw_readl(IC_CLR_INTR).
*
* Instead, use the separately-prepared IC_CLR_* registers.
*/
if (stat & DW_IC_INTR_RX_UNDER)
- readl(dev->base + DW_IC_CLR_RX_UNDER);
+ dw_readl(dev, DW_IC_CLR_RX_UNDER);
if (stat & DW_IC_INTR_RX_OVER)
- readl(dev->base + DW_IC_CLR_RX_OVER);
+ dw_readl(dev, DW_IC_CLR_RX_OVER);
if (stat & DW_IC_INTR_TX_OVER)
- readl(dev->base + DW_IC_CLR_TX_OVER);
+ dw_readl(dev, DW_IC_CLR_TX_OVER);
if (stat & DW_IC_INTR_RD_REQ)
- readl(dev->base + DW_IC_CLR_RD_REQ);
+ dw_readl(dev, DW_IC_CLR_RD_REQ);
if (stat & DW_IC_INTR_TX_ABRT) {
/*
* The IC_TX_ABRT_SOURCE register is cleared whenever
* the IC_CLR_TX_ABRT is read. Preserve it beforehand.
*/
- dev->abort_source = readl(dev->base + DW_IC_TX_ABRT_SOURCE);
- readl(dev->base + DW_IC_CLR_TX_ABRT);
+ dev->abort_source = dw_readl(dev, DW_IC_TX_ABRT_SOURCE);
+ dw_readl(dev, DW_IC_CLR_TX_ABRT);
}
if (stat & DW_IC_INTR_RX_DONE)
- readl(dev->base + DW_IC_CLR_RX_DONE);
+ dw_readl(dev, DW_IC_CLR_RX_DONE);
if (stat & DW_IC_INTR_ACTIVITY)
- readl(dev->base + DW_IC_CLR_ACTIVITY);
+ dw_readl(dev, DW_IC_CLR_ACTIVITY);
if (stat & DW_IC_INTR_STOP_DET)
- readl(dev->base + DW_IC_CLR_STOP_DET);
+ dw_readl(dev, DW_IC_CLR_STOP_DET);
if (stat & DW_IC_INTR_START_DET)
- readl(dev->base + DW_IC_CLR_START_DET);
+ dw_readl(dev, DW_IC_CLR_START_DET);
if (stat & DW_IC_INTR_GEN_CALL)
- readl(dev->base + DW_IC_CLR_GEN_CALL);
+ dw_readl(dev, DW_IC_CLR_GEN_CALL);
return stat;
}
@@ -650,13 +623,19 @@ static u32 i2c_dw_read_clear_intrbits(struct dw_i2c_dev *dev)
* Interrupt service routine. This gets called whenever an I2C interrupt
* occurs.
*/
-static irqreturn_t i2c_dw_isr(int this_irq, void *dev_id)
+irqreturn_t i2c_dw_isr(int this_irq, void *dev_id)
{
struct dw_i2c_dev *dev = dev_id;
- u32 stat;
+ u32 stat, enabled;
+
+ enabled = dw_readl(dev, DW_IC_ENABLE);
+ stat = dw_readl(dev, DW_IC_RAW_INTR_STAT);
+ dev_dbg(dev->dev, "%s: %s enabled= 0x%x stat=0x%x\n", __func__,
+ dev->adapter.name, enabled, stat);
+ if (!enabled || !(stat & ~DW_IC_INTR_ACTIVITY))
+ return IRQ_NONE;
stat = i2c_dw_read_clear_intrbits(dev);
- dev_dbg(dev->dev, "%s: stat=0x%x\n", __func__, stat);
if (stat & DW_IC_INTR_TX_ABRT) {
dev->cmd_err |= DW_IC_ERR_TX_ABRT;
@@ -666,7 +645,7 @@ static irqreturn_t i2c_dw_isr(int this_irq, void *dev_id)
* Anytime TX_ABRT is set, the contents of the tx/rx
* buffers are flushed. Make sure to skip them.
*/
- writel(0, dev->base + DW_IC_INTR_MASK);
+ dw_writel(dev, 0, DW_IC_INTR_MASK);
goto tx_aborted;
}
@@ -689,159 +668,38 @@ tx_aborted:
return IRQ_HANDLED;
}
-static struct i2c_algorithm i2c_dw_algo = {
- .master_xfer = i2c_dw_xfer,
- .functionality = i2c_dw_func,
-};
-
-static int __devinit dw_i2c_probe(struct platform_device *pdev)
+void i2c_dw_enable(struct dw_i2c_dev *dev)
{
- struct dw_i2c_dev *dev;
- struct i2c_adapter *adap;
- struct resource *mem, *ioarea;
- int irq, r;
-
- /* NOTE: driver uses the static register mapping */
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!mem) {
- dev_err(&pdev->dev, "no mem resource?\n");
- return -EINVAL;
- }
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "no irq resource?\n");
- return irq; /* -ENXIO */
- }
-
- ioarea = request_mem_region(mem->start, resource_size(mem),
- pdev->name);
- if (!ioarea) {
- dev_err(&pdev->dev, "I2C region already claimed\n");
- return -EBUSY;
- }
-
- dev = kzalloc(sizeof(struct dw_i2c_dev), GFP_KERNEL);
- if (!dev) {
- r = -ENOMEM;
- goto err_release_region;
- }
-
- init_completion(&dev->cmd_complete);
- mutex_init(&dev->lock);
- dev->dev = get_device(&pdev->dev);
- dev->irq = irq;
- platform_set_drvdata(pdev, dev);
-
- dev->clk = clk_get(&pdev->dev, NULL);
- if (IS_ERR(dev->clk)) {
- r = -ENODEV;
- goto err_free_mem;
- }
- clk_enable(dev->clk);
-
- dev->base = ioremap(mem->start, resource_size(mem));
- if (dev->base == NULL) {
- dev_err(&pdev->dev, "failure mapping io resources\n");
- r = -EBUSY;
- goto err_unuse_clocks;
- }
- {
- u32 param1 = readl(dev->base + DW_IC_COMP_PARAM_1);
-
- dev->tx_fifo_depth = ((param1 >> 16) & 0xff) + 1;
- dev->rx_fifo_depth = ((param1 >> 8) & 0xff) + 1;
- }
- i2c_dw_init(dev);
-
- writel(0, dev->base + DW_IC_INTR_MASK); /* disable IRQ */
- r = request_irq(dev->irq, i2c_dw_isr, IRQF_DISABLED, pdev->name, dev);
- if (r) {
- dev_err(&pdev->dev, "failure requesting irq %i\n", dev->irq);
- goto err_iounmap;
- }
-
- adap = &dev->adapter;
- i2c_set_adapdata(adap, dev);
- adap->owner = THIS_MODULE;
- adap->class = I2C_CLASS_HWMON;
- strlcpy(adap->name, "Synopsys DesignWare I2C adapter",
- sizeof(adap->name));
- adap->algo = &i2c_dw_algo;
- adap->dev.parent = &pdev->dev;
-
- adap->nr = pdev->id;
- r = i2c_add_numbered_adapter(adap);
- if (r) {
- dev_err(&pdev->dev, "failure adding adapter\n");
- goto err_free_irq;
- }
-
- return 0;
-
-err_free_irq:
- free_irq(dev->irq, dev);
-err_iounmap:
- iounmap(dev->base);
-err_unuse_clocks:
- clk_disable(dev->clk);
- clk_put(dev->clk);
- dev->clk = NULL;
-err_free_mem:
- platform_set_drvdata(pdev, NULL);
- put_device(&pdev->dev);
- kfree(dev);
-err_release_region:
- release_mem_region(mem->start, resource_size(mem));
-
- return r;
+ /* Enable the adapter */
+ dw_writel(dev, 1, DW_IC_ENABLE);
}
-static int __devexit dw_i2c_remove(struct platform_device *pdev)
+u32 i2c_dw_is_enabled(struct dw_i2c_dev *dev)
{
- struct dw_i2c_dev *dev = platform_get_drvdata(pdev);
- struct resource *mem;
-
- platform_set_drvdata(pdev, NULL);
- i2c_del_adapter(&dev->adapter);
- put_device(&pdev->dev);
-
- clk_disable(dev->clk);
- clk_put(dev->clk);
- dev->clk = NULL;
-
- writel(0, dev->base + DW_IC_ENABLE);
- free_irq(dev->irq, dev);
- kfree(dev);
-
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(mem->start, resource_size(mem));
- return 0;
+ return dw_readl(dev, DW_IC_ENABLE);
}
-/* work with hotplug and coldplug */
-MODULE_ALIAS("platform:i2c_designware");
+void i2c_dw_disable(struct dw_i2c_dev *dev)
+{
+ /* Disable controller */
+ dw_writel(dev, 0, DW_IC_ENABLE);
-static struct platform_driver dw_i2c_driver = {
- .remove = __devexit_p(dw_i2c_remove),
- .driver = {
- .name = "i2c_designware",
- .owner = THIS_MODULE,
- },
-};
+ /* Disable all interupts */
+ dw_writel(dev, 0, DW_IC_INTR_MASK);
+ dw_readl(dev, DW_IC_CLR_INTR);
+}
-static int __init dw_i2c_init_driver(void)
+void i2c_dw_clear_int(struct dw_i2c_dev *dev)
{
- return platform_driver_probe(&dw_i2c_driver, dw_i2c_probe);
+ dw_readl(dev, DW_IC_CLR_INTR);
}
-module_init(dw_i2c_init_driver);
-static void __exit dw_i2c_exit_driver(void)
+void i2c_dw_disable_int(struct dw_i2c_dev *dev)
{
- platform_driver_unregister(&dw_i2c_driver);
+ dw_writel(dev, 0, DW_IC_INTR_MASK);
}
-module_exit(dw_i2c_exit_driver);
-MODULE_AUTHOR("Baruch Siach <baruch@tkos.co.il>");
-MODULE_DESCRIPTION("Synopsys DesignWare I2C bus adapter");
-MODULE_LICENSE("GPL");
+u32 i2c_dw_read_comp_param(struct dw_i2c_dev *dev)
+{
+ return dw_readl(dev, DW_IC_COMP_PARAM_1);
+}
diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h
new file mode 100644
index 00000000000..02d1a2ddd85
--- /dev/null
+++ b/drivers/i2c/busses/i2c-designware-core.h
@@ -0,0 +1,105 @@
+/*
+ * Synopsys DesignWare I2C adapter driver (master only).
+ *
+ * Based on the TI DAVINCI I2C adapter driver.
+ *
+ * Copyright (C) 2006 Texas Instruments.
+ * Copyright (C) 2007 MontaVista Software Inc.
+ * Copyright (C) 2009 Provigent Ltd.
+ *
+ * ----------------------------------------------------------------------------
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * ----------------------------------------------------------------------------
+ *
+ */
+
+
+#define DW_IC_CON_MASTER 0x1
+#define DW_IC_CON_SPEED_STD 0x2
+#define DW_IC_CON_SPEED_FAST 0x4
+#define DW_IC_CON_10BITADDR_MASTER 0x10
+#define DW_IC_CON_RESTART_EN 0x20
+#define DW_IC_CON_SLAVE_DISABLE 0x40
+
+
+/**
+ * struct dw_i2c_dev - private i2c-designware data
+ * @dev: driver model device node
+ * @base: IO registers pointer
+ * @cmd_complete: tx completion indicator
+ * @lock: protect this struct and IO registers
+ * @clk: input reference clock
+ * @cmd_err: run time hadware error code
+ * @msgs: points to an array of messages currently being transfered
+ * @msgs_num: the number of elements in msgs
+ * @msg_write_idx: the element index of the current tx message in the msgs
+ * array
+ * @tx_buf_len: the length of the current tx buffer
+ * @tx_buf: the current tx buffer
+ * @msg_read_idx: the element index of the current rx message in the msgs
+ * array
+ * @rx_buf_len: the length of the current rx buffer
+ * @rx_buf: the current rx buffer
+ * @msg_err: error status of the current transfer
+ * @status: i2c master status, one of STATUS_*
+ * @abort_source: copy of the TX_ABRT_SOURCE register
+ * @irq: interrupt number for the i2c master
+ * @adapter: i2c subsystem adapter node
+ * @tx_fifo_depth: depth of the hardware tx fifo
+ * @rx_fifo_depth: depth of the hardware rx fifo
+ */
+struct dw_i2c_dev {
+ struct device *dev;
+ void __iomem *base;
+ struct completion cmd_complete;
+ struct mutex lock;
+ struct clk *clk;
+ u32 (*get_clk_rate_khz) (struct dw_i2c_dev *dev);
+ struct dw_pci_controller *controller;
+ int cmd_err;
+ struct i2c_msg *msgs;
+ int msgs_num;
+ int msg_write_idx;
+ u32 tx_buf_len;
+ u8 *tx_buf;
+ int msg_read_idx;
+ u32 rx_buf_len;
+ u8 *rx_buf;
+ int msg_err;
+ unsigned int status;
+ u32 abort_source;
+ int irq;
+ int swab;
+ struct i2c_adapter adapter;
+ u32 functionality;
+ u32 master_cfg;
+ unsigned int tx_fifo_depth;
+ unsigned int rx_fifo_depth;
+};
+
+extern u32 dw_readl(struct dw_i2c_dev *dev, int offset);
+extern void dw_writel(struct dw_i2c_dev *dev, u32 b, int offset);
+extern int i2c_dw_init(struct dw_i2c_dev *dev);
+extern int i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
+ int num);
+extern u32 i2c_dw_func(struct i2c_adapter *adap);
+extern irqreturn_t i2c_dw_isr(int this_irq, void *dev_id);
+extern void i2c_dw_enable(struct dw_i2c_dev *dev);
+extern u32 i2c_dw_is_enabled(struct dw_i2c_dev *dev);
+extern void i2c_dw_disable(struct dw_i2c_dev *dev);
+extern void i2c_dw_clear_int(struct dw_i2c_dev *dev);
+extern void i2c_dw_disable_int(struct dw_i2c_dev *dev);
+extern u32 i2c_dw_read_comp_param(struct dw_i2c_dev *dev);
diff --git a/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/i2c/busses/i2c-designware-pcidrv.c
new file mode 100644
index 00000000000..9e89e7313d6
--- /dev/null
+++ b/drivers/i2c/busses/i2c-designware-pcidrv.c
@@ -0,0 +1,392 @@
+/*
+ * Synopsys DesignWare I2C adapter driver (master only).
+ *
+ * Based on the TI DAVINCI I2C adapter driver.
+ *
+ * Copyright (C) 2006 Texas Instruments.
+ * Copyright (C) 2007 MontaVista Software Inc.
+ * Copyright (C) 2009 Provigent Ltd.
+ * Copyright (C) 2011 Intel corporation.
+ *
+ * ----------------------------------------------------------------------------
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * ----------------------------------------------------------------------------
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/pm_runtime.h>
+#include "i2c-designware-core.h"
+
+#define DRIVER_NAME "i2c-designware-pci"
+
+enum dw_pci_ctl_id_t {
+ moorestown_0,
+ moorestown_1,
+ moorestown_2,
+
+ medfield_0,
+ medfield_1,
+ medfield_2,
+ medfield_3,
+ medfield_4,
+ medfield_5,
+};
+
+struct dw_pci_controller {
+ u32 bus_num;
+ u32 bus_cfg;
+ u32 tx_fifo_depth;
+ u32 rx_fifo_depth;
+ u32 clk_khz;
+};
+
+#define INTEL_MID_STD_CFG (DW_IC_CON_MASTER | \
+ DW_IC_CON_SLAVE_DISABLE | \
+ DW_IC_CON_RESTART_EN)
+
+static struct dw_pci_controller dw_pci_controllers[] = {
+ [moorestown_0] = {
+ .bus_num = 0,
+ .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+ .tx_fifo_depth = 32,
+ .rx_fifo_depth = 32,
+ .clk_khz = 25000,
+ },
+ [moorestown_1] = {
+ .bus_num = 1,
+ .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+ .tx_fifo_depth = 32,
+ .rx_fifo_depth = 32,
+ .clk_khz = 25000,
+ },
+ [moorestown_2] = {
+ .bus_num = 2,
+ .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+ .tx_fifo_depth = 32,
+ .rx_fifo_depth = 32,
+ .clk_khz = 25000,
+ },
+ [medfield_0] = {
+ .bus_num = 0,
+ .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+ .tx_fifo_depth = 32,
+ .rx_fifo_depth = 32,
+ .clk_khz = 25000,
+ },
+ [medfield_1] = {
+ .bus_num = 1,
+ .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+ .tx_fifo_depth = 32,
+ .rx_fifo_depth = 32,
+ .clk_khz = 25000,
+ },
+ [medfield_2] = {
+ .bus_num = 2,
+ .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+ .tx_fifo_depth = 32,
+ .rx_fifo_depth = 32,
+ .clk_khz = 25000,
+ },
+ [medfield_3] = {
+ .bus_num = 3,
+ .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_STD,
+ .tx_fifo_depth = 32,
+ .rx_fifo_depth = 32,
+ .clk_khz = 25000,
+ },
+ [medfield_4] = {
+ .bus_num = 4,
+ .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+ .tx_fifo_depth = 32,
+ .rx_fifo_depth = 32,
+ .clk_khz = 25000,
+ },
+ [medfield_5] = {
+ .bus_num = 5,
+ .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+ .tx_fifo_depth = 32,
+ .rx_fifo_depth = 32,
+ .clk_khz = 25000,
+ },
+};
+static struct i2c_algorithm i2c_dw_algo = {
+ .master_xfer = i2c_dw_xfer,
+ .functionality = i2c_dw_func,
+};
+
+static int i2c_dw_pci_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
+ struct dw_i2c_dev *i2c = pci_get_drvdata(pdev);
+ int err;
+
+
+ i2c_dw_disable(i2c);
+
+ err = pci_save_state(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "pci_save_state failed\n");
+ return err;
+ }
+
+ err = pci_set_power_state(pdev, PCI_D3hot);
+ if (err) {
+ dev_err(&pdev->dev, "pci_set_power_state failed\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int i2c_dw_pci_resume(struct device *dev)
+{
+ struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
+ struct dw_i2c_dev *i2c = pci_get_drvdata(pdev);
+ int err;
+ u32 enabled;
+
+ enabled = i2c_dw_is_enabled(i2c);
+ if (enabled)
+ return 0;
+
+ err = pci_set_power_state(pdev, PCI_D0);
+ if (err) {
+ dev_err(&pdev->dev, "pci_set_power_state() failed\n");
+ return err;
+ }
+
+ pci_restore_state(pdev);
+
+ i2c_dw_init(i2c);
+ i2c_dw_enable(i2c);
+ return 0;
+}
+
+static int i2c_dw_pci_runtime_idle(struct device *dev)
+{
+ int err = pm_schedule_suspend(dev, 500);
+ dev_dbg(dev, "runtime_idle called\n");
+
+ if (err != 0)
+ return 0;
+ return -EBUSY;
+}
+
+static const struct dev_pm_ops i2c_dw_pm_ops = {
+ .resume = i2c_dw_pci_resume,
+ .suspend = i2c_dw_pci_suspend,
+ SET_RUNTIME_PM_OPS(i2c_dw_pci_suspend, i2c_dw_pci_resume,
+ i2c_dw_pci_runtime_idle)
+};
+
+static u32 i2c_dw_get_clk_rate_khz(struct dw_i2c_dev *dev)
+{
+ return dev->controller->clk_khz;
+}
+
+static int __devinit i2c_dw_pci_probe(struct pci_dev *pdev,
+const struct pci_device_id *id)
+{
+ struct dw_i2c_dev *dev;
+ struct i2c_adapter *adap;
+ unsigned long start, len;
+ void __iomem *base;
+ int r;
+ struct dw_pci_controller *controller;
+
+ if (id->driver_data >= ARRAY_SIZE(dw_pci_controllers)) {
+ printk(KERN_ERR "dw_i2c_pci_probe: invalid driver data %ld\n",
+ id->driver_data);
+ return -EINVAL;
+ }
+
+ controller = &dw_pci_controllers[id->driver_data];
+
+ r = pci_enable_device(pdev);
+ if (r) {
+ dev_err(&pdev->dev, "Failed to enable I2C PCI device (%d)\n",
+ r);
+ goto exit;
+ }
+
+ /* Determine the address of the I2C area */
+ start = pci_resource_start(pdev, 0);
+ len = pci_resource_len(pdev, 0);
+ if (!start || len == 0) {
+ dev_err(&pdev->dev, "base address not set\n");
+ r = -ENODEV;
+ goto exit;
+ }
+
+ r = pci_request_region(pdev, 0, DRIVER_NAME);
+ if (r) {
+ dev_err(&pdev->dev, "failed to request I2C region "
+ "0x%lx-0x%lx\n", start,
+ (unsigned long)pci_resource_end(pdev, 0));
+ goto exit;
+ }
+
+ base = ioremap_nocache(start, len);
+ if (!base) {
+ dev_err(&pdev->dev, "I/O memory remapping failed\n");
+ r = -ENOMEM;
+ goto err_release_region;
+ }
+
+
+ dev = kzalloc(sizeof(struct dw_i2c_dev), GFP_KERNEL);
+ if (!dev) {
+ r = -ENOMEM;
+ goto err_release_region;
+ }
+
+ init_completion(&dev->cmd_complete);
+ mutex_init(&dev->lock);
+ dev->clk = NULL;
+ dev->controller = controller;
+ dev->get_clk_rate_khz = i2c_dw_get_clk_rate_khz;
+ dev->base = base;
+ dev->dev = get_device(&pdev->dev);
+ dev->functionality =
+ I2C_FUNC_I2C |
+ I2C_FUNC_SMBUS_BYTE |
+ I2C_FUNC_SMBUS_BYTE_DATA |
+ I2C_FUNC_SMBUS_WORD_DATA |
+ I2C_FUNC_SMBUS_I2C_BLOCK;
+ dev->master_cfg = controller->bus_cfg;
+
+ pci_set_drvdata(pdev, dev);
+
+ dev->tx_fifo_depth = controller->tx_fifo_depth;
+ dev->rx_fifo_depth = controller->rx_fifo_depth;
+ r = i2c_dw_init(dev);
+ if (r)
+ goto err_iounmap;
+
+ adap = &dev->adapter;
+ i2c_set_adapdata(adap, dev);
+ adap->owner = THIS_MODULE;
+ adap->class = 0;
+ adap->algo = &i2c_dw_algo;
+ adap->dev.parent = &pdev->dev;
+ adap->nr = controller->bus_num;
+ snprintf(adap->name, sizeof(adap->name), "i2c-designware-pci-%d",
+ adap->nr);
+
+ r = request_irq(pdev->irq, i2c_dw_isr, IRQF_SHARED, adap->name, dev);
+ if (r) {
+ dev_err(&pdev->dev, "failure requesting irq %i\n", dev->irq);
+ goto err_iounmap;
+ }
+
+ i2c_dw_disable_int(dev);
+ i2c_dw_clear_int(dev);
+ r = i2c_add_numbered_adapter(adap);
+ if (r) {
+ dev_err(&pdev->dev, "failure adding adapter\n");
+ goto err_free_irq;
+ }
+
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_allow(&pdev->dev);
+
+ return 0;
+
+err_free_irq:
+ free_irq(pdev->irq, dev);
+err_iounmap:
+ iounmap(dev->base);
+ pci_set_drvdata(pdev, NULL);
+ put_device(&pdev->dev);
+ kfree(dev);
+err_release_region:
+ pci_release_region(pdev, 0);
+exit:
+ return r;
+}
+
+static void __devexit i2c_dw_pci_remove(struct pci_dev *pdev)
+{
+ struct dw_i2c_dev *dev = pci_get_drvdata(pdev);
+
+ i2c_dw_disable(dev);
+ pm_runtime_forbid(&pdev->dev);
+ pm_runtime_get_noresume(&pdev->dev);
+
+ pci_set_drvdata(pdev, NULL);
+ i2c_del_adapter(&dev->adapter);
+ put_device(&pdev->dev);
+
+ free_irq(dev->irq, dev);
+ kfree(dev);
+ pci_release_region(pdev, 0);
+}
+
+/* work with hotplug and coldplug */
+MODULE_ALIAS("i2c_designware-pci");
+
+DEFINE_PCI_DEVICE_TABLE(i2_designware_pci_ids) = {
+ /* Moorestown */
+ { PCI_VDEVICE(INTEL, 0x0802), moorestown_0 },
+ { PCI_VDEVICE(INTEL, 0x0803), moorestown_1 },
+ { PCI_VDEVICE(INTEL, 0x0804), moorestown_2 },
+ /* Medfield */
+ { PCI_VDEVICE(INTEL, 0x0817), medfield_3,},
+ { PCI_VDEVICE(INTEL, 0x0818), medfield_4 },
+ { PCI_VDEVICE(INTEL, 0x0819), medfield_5 },
+ { PCI_VDEVICE(INTEL, 0x082C), medfield_0 },
+ { PCI_VDEVICE(INTEL, 0x082D), medfield_1 },
+ { PCI_VDEVICE(INTEL, 0x082E), medfield_2 },
+ { 0,}
+};
+MODULE_DEVICE_TABLE(pci, i2_designware_pci_ids);
+
+static struct pci_driver dw_i2c_driver = {
+ .name = DRIVER_NAME,
+ .id_table = i2_designware_pci_ids,
+ .probe = i2c_dw_pci_probe,
+ .remove = __devexit_p(i2c_dw_pci_remove),
+ .driver = {
+ .pm = &i2c_dw_pm_ops,
+ },
+};
+
+static int __init dw_i2c_init_driver(void)
+{
+ return pci_register_driver(&dw_i2c_driver);
+}
+module_init(dw_i2c_init_driver);
+
+static void __exit dw_i2c_exit_driver(void)
+{
+ pci_unregister_driver(&dw_i2c_driver);
+}
+module_exit(dw_i2c_exit_driver);
+
+MODULE_AUTHOR("Baruch Siach <baruch@tkos.co.il>");
+MODULE_DESCRIPTION("Synopsys DesignWare PCI I2C bus adapter");
+MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
new file mode 100644
index 00000000000..2d3657ab125
--- /dev/null
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -0,0 +1,215 @@
+/*
+ * Synopsys DesignWare I2C adapter driver (master only).
+ *
+ * Based on the TI DAVINCI I2C adapter driver.
+ *
+ * Copyright (C) 2006 Texas Instruments.
+ * Copyright (C) 2007 MontaVista Software Inc.
+ * Copyright (C) 2009 Provigent Ltd.
+ *
+ * ----------------------------------------------------------------------------
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * ----------------------------------------------------------------------------
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/clk.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include "i2c-designware-core.h"
+
+static struct i2c_algorithm i2c_dw_algo = {
+ .master_xfer = i2c_dw_xfer,
+ .functionality = i2c_dw_func,
+};
+static u32 i2c_dw_get_clk_rate_khz(struct dw_i2c_dev *dev)
+{
+ return clk_get_rate(dev->clk)/1000;
+}
+
+static int __devinit dw_i2c_probe(struct platform_device *pdev)
+{
+ struct dw_i2c_dev *dev;
+ struct i2c_adapter *adap;
+ struct resource *mem, *ioarea;
+ int irq, r;
+
+ /* NOTE: driver uses the static register mapping */
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem) {
+ dev_err(&pdev->dev, "no mem resource?\n");
+ return -EINVAL;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "no irq resource?\n");
+ return irq; /* -ENXIO */
+ }
+
+ ioarea = request_mem_region(mem->start, resource_size(mem),
+ pdev->name);
+ if (!ioarea) {
+ dev_err(&pdev->dev, "I2C region already claimed\n");
+ return -EBUSY;
+ }
+
+ dev = kzalloc(sizeof(struct dw_i2c_dev), GFP_KERNEL);
+ if (!dev) {
+ r = -ENOMEM;
+ goto err_release_region;
+ }
+
+ init_completion(&dev->cmd_complete);
+ mutex_init(&dev->lock);
+ dev->dev = get_device(&pdev->dev);
+ dev->irq = irq;
+ platform_set_drvdata(pdev, dev);
+
+ dev->clk = clk_get(&pdev->dev, NULL);
+ dev->get_clk_rate_khz = i2c_dw_get_clk_rate_khz;
+
+ if (IS_ERR(dev->clk)) {
+ r = -ENODEV;
+ goto err_free_mem;
+ }
+ clk_enable(dev->clk);
+
+ dev->functionality =
+ I2C_FUNC_I2C |
+ I2C_FUNC_10BIT_ADDR |
+ I2C_FUNC_SMBUS_BYTE |
+ I2C_FUNC_SMBUS_BYTE_DATA |
+ I2C_FUNC_SMBUS_WORD_DATA |
+ I2C_FUNC_SMBUS_I2C_BLOCK;
+ dev->master_cfg = DW_IC_CON_MASTER | DW_IC_CON_SLAVE_DISABLE |
+ DW_IC_CON_RESTART_EN | DW_IC_CON_SPEED_FAST;
+
+ dev->base = ioremap(mem->start, resource_size(mem));
+ if (dev->base == NULL) {
+ dev_err(&pdev->dev, "failure mapping io resources\n");
+ r = -EBUSY;
+ goto err_unuse_clocks;
+ }
+ {
+ u32 param1 = i2c_dw_read_comp_param(dev);
+
+ dev->tx_fifo_depth = ((param1 >> 16) & 0xff) + 1;
+ dev->rx_fifo_depth = ((param1 >> 8) & 0xff) + 1;
+ }
+ r = i2c_dw_init(dev);
+ if (r)
+ goto err_iounmap;
+
+ i2c_dw_disable_int(dev);
+ r = request_irq(dev->irq, i2c_dw_isr, IRQF_DISABLED, pdev->name, dev);
+ if (r) {
+ dev_err(&pdev->dev, "failure requesting irq %i\n", dev->irq);
+ goto err_iounmap;
+ }
+
+ adap = &dev->adapter;
+ i2c_set_adapdata(adap, dev);
+ adap->owner = THIS_MODULE;
+ adap->class = I2C_CLASS_HWMON;
+ strlcpy(adap->name, "Synopsys DesignWare I2C adapter",
+ sizeof(adap->name));
+ adap->algo = &i2c_dw_algo;
+ adap->dev.parent = &pdev->dev;
+
+ adap->nr = pdev->id;
+ r = i2c_add_numbered_adapter(adap);
+ if (r) {
+ dev_err(&pdev->dev, "failure adding adapter\n");
+ goto err_free_irq;
+ }
+
+ return 0;
+
+err_free_irq:
+ free_irq(dev->irq, dev);
+err_iounmap:
+ iounmap(dev->base);
+err_unuse_clocks:
+ clk_disable(dev->clk);
+ clk_put(dev->clk);
+ dev->clk = NULL;
+err_free_mem:
+ platform_set_drvdata(pdev, NULL);
+ put_device(&pdev->dev);
+ kfree(dev);
+err_release_region:
+ release_mem_region(mem->start, resource_size(mem));
+
+ return r;
+}
+
+static int __devexit dw_i2c_remove(struct platform_device *pdev)
+{
+ struct dw_i2c_dev *dev = platform_get_drvdata(pdev);
+ struct resource *mem;
+
+ platform_set_drvdata(pdev, NULL);
+ i2c_del_adapter(&dev->adapter);
+ put_device(&pdev->dev);
+
+ clk_disable(dev->clk);
+ clk_put(dev->clk);
+ dev->clk = NULL;
+
+ i2c_dw_disable(dev);
+ free_irq(dev->irq, dev);
+ kfree(dev);
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(mem->start, resource_size(mem));
+ return 0;
+}
+
+/* work with hotplug and coldplug */
+MODULE_ALIAS("platform:i2c_designware");
+
+static struct platform_driver dw_i2c_driver = {
+ .remove = __devexit_p(dw_i2c_remove),
+ .driver = {
+ .name = "i2c_designware",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init dw_i2c_init_driver(void)
+{
+ return platform_driver_probe(&dw_i2c_driver, dw_i2c_probe);
+}
+module_init(dw_i2c_init_driver);
+
+static void __exit dw_i2c_exit_driver(void)
+{
+ platform_driver_unregister(&dw_i2c_driver);
+}
+module_exit(dw_i2c_exit_driver);
+
+MODULE_AUTHOR("Baruch Siach <baruch@tkos.co.il>");
+MODULE_DESCRIPTION("Synopsys DesignWare I2C bus adapter");
+MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c
index ce1a32b71e4..8cebef49aea 100644
--- a/drivers/i2c/busses/i2c-eg20t.c
+++ b/drivers/i2c/busses/i2c-eg20t.c
@@ -64,6 +64,7 @@
#define TEN_BIT_ADDR_DEFAULT 0xF000
#define TEN_BIT_ADDR_MASK 0xF0
#define PCH_START 0x0020
+#define PCH_RESTART 0x0004
#define PCH_ESR_START 0x0001
#define PCH_BUFF_START 0x1
#define PCH_REPSTART 0x0004
@@ -273,23 +274,24 @@ static s32 pch_i2c_wait_for_bus_idle(struct i2c_algo_pch_data *adap,
s32 timeout)
{
void __iomem *p = adap->pch_base_address;
+ ktime_t ns_val;
+
+ if ((ioread32(p + PCH_I2CSR) & I2CMBB_BIT) == 0)
+ return 0;
/* MAX timeout value is timeout*1000*1000nsec */
- ktime_t ns_val = ktime_add_ns(ktime_get(), timeout*1000*1000);
+ ns_val = ktime_add_ns(ktime_get(), timeout*1000*1000);
do {
- if ((ioread32(p + PCH_I2CSR) & I2CMBB_BIT) == 0)
- break;
msleep(20);
+ if ((ioread32(p + PCH_I2CSR) & I2CMBB_BIT) == 0)
+ return 0;
} while (ktime_lt(ktime_get(), ns_val));
pch_dbg(adap, "I2CSR = %x\n", ioread32(p + PCH_I2CSR));
+ pch_err(adap, "%s: Timeout Error.return%d\n", __func__, -ETIME);
+ pch_i2c_init(adap);
- if (timeout == 0) {
- pch_err(adap, "%s: Timeout Error.return%d\n", __func__, -ETIME);
- return -ETIME;
- }
-
- return 0;
+ return -ETIME;
}
/**
@@ -311,21 +313,19 @@ static void pch_i2c_start(struct i2c_algo_pch_data *adap)
*/
static s32 pch_i2c_wait_for_xfer_complete(struct i2c_algo_pch_data *adap)
{
- s32 ret;
+ long ret;
ret = wait_event_timeout(pch_event,
(adap->pch_event_flag != 0), msecs_to_jiffies(50));
- if (ret < 0) {
- pch_err(adap, "timeout: %x\n", adap->pch_event_flag);
- return ret;
- }
if (ret == 0) {
pch_err(adap, "timeout: %x\n", adap->pch_event_flag);
+ adap->pch_event_flag = 0;
return -ETIMEDOUT;
}
if (adap->pch_event_flag & I2C_ERROR_MASK) {
pch_err(adap, "error bits set: %x\n", adap->pch_event_flag);
+ adap->pch_event_flag = 0;
return -EIO;
}
@@ -394,6 +394,7 @@ static s32 pch_i2c_writebytes(struct i2c_adapter *i2c_adap,
u32 addr_2_msb;
u32 addr_8_lsb;
s32 wrcount;
+ s32 rtn;
void __iomem *p = adap->pch_base_address;
length = msgs->len;
@@ -412,15 +413,29 @@ static s32 pch_i2c_writebytes(struct i2c_adapter *i2c_adap,
}
if (msgs->flags & I2C_M_TEN) {
- addr_2_msb = ((addr & I2C_MSB_2B_MSK) >> 7);
+ addr_2_msb = ((addr & I2C_MSB_2B_MSK) >> 7) & 0x06;
iowrite32(addr_2_msb | TEN_BIT_ADDR_MASK, p + PCH_I2CDR);
if (first)
pch_i2c_start(adap);
- if (pch_i2c_wait_for_xfer_complete(adap) == 0 &&
- pch_i2c_getack(adap) == 0) {
+
+ rtn = pch_i2c_wait_for_xfer_complete(adap);
+ if (rtn == 0) {
+ if (pch_i2c_getack(adap)) {
+ pch_dbg(adap, "Receive NACK for slave address"
+ "setting\n");
+ return -EIO;
+ }
addr_8_lsb = (addr & I2C_ADDR_MSK);
iowrite32(addr_8_lsb, p + PCH_I2CDR);
- } else {
+ } else if (rtn == -EIO) { /* Arbitration Lost */
+ pch_err(adap, "Lost Arbitration\n");
+ pch_clrbit(adap->pch_base_address, PCH_I2CSR,
+ I2CMAL_BIT);
+ pch_clrbit(adap->pch_base_address, PCH_I2CSR,
+ I2CMIF_BIT);
+ pch_i2c_init(adap);
+ return -EAGAIN;
+ } else { /* wait-event timeout */
pch_i2c_stop(adap);
return -ETIME;
}
@@ -431,30 +446,51 @@ static s32 pch_i2c_writebytes(struct i2c_adapter *i2c_adap,
pch_i2c_start(adap);
}
- if ((pch_i2c_wait_for_xfer_complete(adap) == 0) &&
- (pch_i2c_getack(adap) == 0)) {
- for (wrcount = 0; wrcount < length; ++wrcount) {
- /* write buffer value to I2C data register */
- iowrite32(buf[wrcount], p + PCH_I2CDR);
- pch_dbg(adap, "writing %x to Data register\n",
- buf[wrcount]);
+ rtn = pch_i2c_wait_for_xfer_complete(adap);
+ if (rtn == 0) {
+ if (pch_i2c_getack(adap)) {
+ pch_dbg(adap, "Receive NACK for slave address"
+ "setting\n");
+ return -EIO;
+ }
+ } else if (rtn == -EIO) { /* Arbitration Lost */
+ pch_err(adap, "Lost Arbitration\n");
+ pch_clrbit(adap->pch_base_address, PCH_I2CSR, I2CMAL_BIT);
+ pch_clrbit(adap->pch_base_address, PCH_I2CSR, I2CMIF_BIT);
+ pch_i2c_init(adap);
+ return -EAGAIN;
+ } else { /* wait-event timeout */
+ pch_i2c_stop(adap);
+ return -ETIME;
+ }
- if (pch_i2c_wait_for_xfer_complete(adap) != 0)
- return -ETIME;
+ for (wrcount = 0; wrcount < length; ++wrcount) {
+ /* write buffer value to I2C data register */
+ iowrite32(buf[wrcount], p + PCH_I2CDR);
+ pch_dbg(adap, "writing %x to Data register\n", buf[wrcount]);
- if (pch_i2c_getack(adap))
+ rtn = pch_i2c_wait_for_xfer_complete(adap);
+ if (rtn == 0) {
+ if (pch_i2c_getack(adap)) {
+ pch_dbg(adap, "Receive NACK for slave address"
+ "setting\n");
return -EIO;
+ }
+ pch_clrbit(adap->pch_base_address, PCH_I2CSR,
+ I2CMCF_BIT);
+ pch_clrbit(adap->pch_base_address, PCH_I2CSR,
+ I2CMIF_BIT);
+ } else { /* wait-event timeout */
+ pch_i2c_stop(adap);
+ return -ETIME;
}
+ }
- /* check if this is the last message */
- if (last)
- pch_i2c_stop(adap);
- else
- pch_i2c_repstart(adap);
- } else {
+ /* check if this is the last message */
+ if (last)
pch_i2c_stop(adap);
- return -EIO;
- }
+ else
+ pch_i2c_repstart(adap);
pch_dbg(adap, "return=%d\n", wrcount);
@@ -484,6 +520,19 @@ static void pch_i2c_sendnack(struct i2c_algo_pch_data *adap)
}
/**
+ * pch_i2c_restart() - Generate I2C restart condition in normal mode.
+ * @adap: Pointer to struct i2c_algo_pch_data.
+ *
+ * Generate I2C restart condition in normal mode by setting I2CCTL.I2CRSTA.
+ */
+static void pch_i2c_restart(struct i2c_algo_pch_data *adap)
+{
+ void __iomem *p = adap->pch_base_address;
+ pch_dbg(adap, "I2CCTL = %x\n", ioread32(p + PCH_I2CCTL));
+ pch_setbit(adap->pch_base_address, PCH_I2CCTL, PCH_RESTART);
+}
+
+/**
* pch_i2c_readbytes() - read data from I2C bus in normal mode.
* @i2c_adap: Pointer to the struct i2c_adapter.
* @msgs: Pointer to i2c_msg structure.
@@ -500,7 +549,9 @@ static s32 pch_i2c_readbytes(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs,
u32 length;
u32 addr;
u32 addr_2_msb;
+ u32 addr_8_lsb;
void __iomem *p = adap->pch_base_address;
+ s32 rtn;
length = msgs->len;
buf = msgs->buf;
@@ -515,9 +566,55 @@ static s32 pch_i2c_readbytes(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs,
}
if (msgs->flags & I2C_M_TEN) {
- addr_2_msb = (((addr & I2C_MSB_2B_MSK) >> 7) | (I2C_RD));
+ addr_2_msb = ((addr & I2C_MSB_2B_MSK) >> 7);
iowrite32(addr_2_msb | TEN_BIT_ADDR_MASK, p + PCH_I2CDR);
+ if (first)
+ pch_i2c_start(adap);
+ rtn = pch_i2c_wait_for_xfer_complete(adap);
+ if (rtn == 0) {
+ if (pch_i2c_getack(adap)) {
+ pch_dbg(adap, "Receive NACK for slave address"
+ "setting\n");
+ return -EIO;
+ }
+ addr_8_lsb = (addr & I2C_ADDR_MSK);
+ iowrite32(addr_8_lsb, p + PCH_I2CDR);
+ } else if (rtn == -EIO) { /* Arbitration Lost */
+ pch_err(adap, "Lost Arbitration\n");
+ pch_clrbit(adap->pch_base_address, PCH_I2CSR,
+ I2CMAL_BIT);
+ pch_clrbit(adap->pch_base_address, PCH_I2CSR,
+ I2CMIF_BIT);
+ pch_i2c_init(adap);
+ return -EAGAIN;
+ } else { /* wait-event timeout */
+ pch_i2c_stop(adap);
+ return -ETIME;
+ }
+ pch_i2c_restart(adap);
+ rtn = pch_i2c_wait_for_xfer_complete(adap);
+ if (rtn == 0) {
+ if (pch_i2c_getack(adap)) {
+ pch_dbg(adap, "Receive NACK for slave address"
+ "setting\n");
+ return -EIO;
+ }
+ addr_2_msb |= I2C_RD;
+ iowrite32(addr_2_msb | TEN_BIT_ADDR_MASK,
+ p + PCH_I2CDR);
+ } else if (rtn == -EIO) { /* Arbitration Lost */
+ pch_err(adap, "Lost Arbitration\n");
+ pch_clrbit(adap->pch_base_address, PCH_I2CSR,
+ I2CMAL_BIT);
+ pch_clrbit(adap->pch_base_address, PCH_I2CSR,
+ I2CMIF_BIT);
+ pch_i2c_init(adap);
+ return -EAGAIN;
+ } else { /* wait-event timeout */
+ pch_i2c_stop(adap);
+ return -ETIME;
+ }
} else {
/* 7 address bits + R/W bit */
addr = (((addr) << 1) | (I2C_RD));
@@ -528,56 +625,81 @@ static s32 pch_i2c_readbytes(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs,
if (first)
pch_i2c_start(adap);
- if ((pch_i2c_wait_for_xfer_complete(adap) == 0) &&
- (pch_i2c_getack(adap) == 0)) {
- pch_dbg(adap, "return %d\n", 0);
+ rtn = pch_i2c_wait_for_xfer_complete(adap);
+ if (rtn == 0) {
+ if (pch_i2c_getack(adap)) {
+ pch_dbg(adap, "Receive NACK for slave address"
+ "setting\n");
+ return -EIO;
+ }
+ } else if (rtn == -EIO) { /* Arbitration Lost */
+ pch_err(adap, "Lost Arbitration\n");
+ pch_clrbit(adap->pch_base_address, PCH_I2CSR, I2CMAL_BIT);
+ pch_clrbit(adap->pch_base_address, PCH_I2CSR, I2CMIF_BIT);
+ pch_i2c_init(adap);
+ return -EAGAIN;
+ } else { /* wait-event timeout */
+ pch_i2c_stop(adap);
+ return -ETIME;
+ }
- if (length == 0) {
- pch_i2c_stop(adap);
- ioread32(p + PCH_I2CDR); /* Dummy read needs */
+ if (length == 0) {
+ pch_i2c_stop(adap);
+ ioread32(p + PCH_I2CDR); /* Dummy read needs */
- count = length;
- } else {
- int read_index;
- int loop;
- pch_i2c_sendack(adap);
+ count = length;
+ } else {
+ int read_index;
+ int loop;
+ pch_i2c_sendack(adap);
- /* Dummy read */
- for (loop = 1, read_index = 0; loop < length; loop++) {
- buf[read_index] = ioread32(p + PCH_I2CDR);
+ /* Dummy read */
+ for (loop = 1, read_index = 0; loop < length; loop++) {
+ buf[read_index] = ioread32(p + PCH_I2CDR);
- if (loop != 1)
- read_index++;
+ if (loop != 1)
+ read_index++;
- if (pch_i2c_wait_for_xfer_complete(adap) != 0) {
- pch_i2c_stop(adap);
- return -ETIME;
+ rtn = pch_i2c_wait_for_xfer_complete(adap);
+ if (rtn == 0) {
+ if (pch_i2c_getack(adap)) {
+ pch_dbg(adap, "Receive NACK for slave"
+ "address setting\n");
+ return -EIO;
}
- } /* end for */
+ } else { /* wait-event timeout */
+ pch_i2c_stop(adap);
+ return -ETIME;
+ }
- pch_i2c_sendnack(adap);
+ } /* end for */
- buf[read_index] = ioread32(p + PCH_I2CDR);
+ pch_i2c_sendnack(adap);
- if (length != 1)
- read_index++;
+ buf[read_index] = ioread32(p + PCH_I2CDR); /* Read final - 1 */
- if (pch_i2c_wait_for_xfer_complete(adap) == 0) {
- if (last)
- pch_i2c_stop(adap);
- else
- pch_i2c_repstart(adap);
+ if (length != 1)
+ read_index++;
- buf[read_index++] = ioread32(p + PCH_I2CDR);
- count = read_index;
- } else {
- count = -ETIME;
+ rtn = pch_i2c_wait_for_xfer_complete(adap);
+ if (rtn == 0) {
+ if (pch_i2c_getack(adap)) {
+ pch_dbg(adap, "Receive NACK for slave"
+ "address setting\n");
+ return -EIO;
}
-
+ } else { /* wait-event timeout */
+ pch_i2c_stop(adap);
+ return -ETIME;
}
- } else {
- count = -ETIME;
- pch_i2c_stop(adap);
+
+ if (last)
+ pch_i2c_stop(adap);
+ else
+ pch_i2c_repstart(adap);
+
+ buf[read_index++] = ioread32(p + PCH_I2CDR); /* Read Final */
+ count = read_index;
}
return count;
diff --git a/drivers/i2c/busses/i2c-highlander.c b/drivers/i2c/busses/i2c-highlander.c
index 3876a2478bd..63bb1cc2a04 100644
--- a/drivers/i2c/busses/i2c-highlander.c
+++ b/drivers/i2c/busses/i2c-highlander.c
@@ -387,7 +387,7 @@ static int __devinit highlander_i2c_probe(struct platform_device *pdev)
dev->irq = 0;
if (dev->irq) {
- ret = request_irq(dev->irq, highlander_i2c_irq, IRQF_DISABLED,
+ ret = request_irq(dev->irq, highlander_i2c_irq, 0,
pdev->name, dev);
if (unlikely(ret))
goto err_unmap;
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 4c2a62b75b5..58832e578ff 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -48,6 +48,9 @@
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_i2c.h>
#include <mach/irqs.h>
#include <mach/hardware.h>
@@ -125,6 +128,11 @@ struct imx_i2c_struct {
unsigned int ifdr; /* IMX_I2C_IFDR */
};
+static const struct of_device_id i2c_imx_dt_ids[] = {
+ { .compatible = "fsl,imx1-i2c", },
+ { /* sentinel */ }
+};
+
/** Functions for IMX I2C adapter driver ***************************************
*******************************************************************************/
@@ -466,10 +474,10 @@ static int __init i2c_imx_probe(struct platform_device *pdev)
{
struct imx_i2c_struct *i2c_imx;
struct resource *res;
- struct imxi2c_platform_data *pdata;
+ struct imxi2c_platform_data *pdata = pdev->dev.platform_data;
void __iomem *base;
resource_size_t res_size;
- int irq;
+ int irq, bitrate;
int ret;
dev_dbg(&pdev->dev, "<%s>\n", __func__);
@@ -485,19 +493,11 @@ static int __init i2c_imx_probe(struct platform_device *pdev)
return -ENOENT;
}
- pdata = pdev->dev.platform_data;
-
- if (pdata && pdata->init) {
- ret = pdata->init(&pdev->dev);
- if (ret)
- return ret;
- }
-
res_size = resource_size(res);
if (!request_mem_region(res->start, res_size, DRIVER_NAME)) {
- ret = -EBUSY;
- goto fail0;
+ dev_err(&pdev->dev, "request_mem_region failed\n");
+ return -EBUSY;
}
base = ioremap(res->start, res_size);
@@ -520,6 +520,7 @@ static int __init i2c_imx_probe(struct platform_device *pdev)
i2c_imx->adapter.algo = &i2c_imx_algo;
i2c_imx->adapter.dev.parent = &pdev->dev;
i2c_imx->adapter.nr = pdev->id;
+ i2c_imx->adapter.dev.of_node = pdev->dev.of_node;
i2c_imx->irq = irq;
i2c_imx->base = base;
i2c_imx->res = res;
@@ -546,10 +547,12 @@ static int __init i2c_imx_probe(struct platform_device *pdev)
i2c_set_adapdata(&i2c_imx->adapter, i2c_imx);
/* Set up clock divider */
- if (pdata && pdata->bitrate)
- i2c_imx_set_clk(i2c_imx, pdata->bitrate);
- else
- i2c_imx_set_clk(i2c_imx, IMX_I2C_BIT_RATE);
+ bitrate = IMX_I2C_BIT_RATE;
+ ret = of_property_read_u32(pdev->dev.of_node,
+ "clock-frequency", &bitrate);
+ if (ret < 0 && pdata && pdata->bitrate)
+ bitrate = pdata->bitrate;
+ i2c_imx_set_clk(i2c_imx, bitrate);
/* Set up chip registers to defaults */
writeb(0, i2c_imx->base + IMX_I2C_I2CR);
@@ -562,6 +565,8 @@ static int __init i2c_imx_probe(struct platform_device *pdev)
goto fail5;
}
+ of_i2c_register_devices(&i2c_imx->adapter);
+
/* Set up platform driver data */
platform_set_drvdata(pdev, i2c_imx);
@@ -586,16 +591,12 @@ fail2:
iounmap(base);
fail1:
release_mem_region(res->start, resource_size(res));
-fail0:
- if (pdata && pdata->exit)
- pdata->exit(&pdev->dev);
return ret; /* Return error number */
}
static int __exit i2c_imx_remove(struct platform_device *pdev)
{
struct imx_i2c_struct *i2c_imx = platform_get_drvdata(pdev);
- struct imxi2c_platform_data *pdata = pdev->dev.platform_data;
/* remove adapter */
dev_dbg(&i2c_imx->adapter.dev, "adapter removed\n");
@@ -611,10 +612,6 @@ static int __exit i2c_imx_remove(struct platform_device *pdev)
writeb(0, i2c_imx->base + IMX_I2C_I2CR);
writeb(0, i2c_imx->base + IMX_I2C_I2SR);
- /* Shut down hardware */
- if (pdata && pdata->exit)
- pdata->exit(&pdev->dev);
-
clk_put(i2c_imx->clk);
iounmap(i2c_imx->base);
@@ -628,6 +625,7 @@ static struct platform_driver i2c_imx_driver = {
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
+ .of_match_table = i2c_imx_dt_ids,
}
};
diff --git a/drivers/i2c/busses/i2c-ixp2000.c b/drivers/i2c/busses/i2c-ixp2000.c
index 5d8aed5ec21..c01e9519f6c 100644
--- a/drivers/i2c/busses/i2c-ixp2000.c
+++ b/drivers/i2c/busses/i2c-ixp2000.c
@@ -35,7 +35,7 @@
#include <linux/slab.h>
#include <mach/hardware.h> /* Pick up IXP2000-specific bits */
-#include <mach/gpio.h>
+#include <mach/gpio-ixp2000.h>
static inline int ixp2000_scl_pin(void *data)
{
diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c
index b228e09c5d0..5267ab93d55 100644
--- a/drivers/i2c/busses/i2c-nomadik.c
+++ b/drivers/i2c/busses/i2c-nomadik.c
@@ -63,11 +63,11 @@
/* Master controller (MCR) register */
#define I2C_MCR_OP (0x1 << 0) /* Operation */
#define I2C_MCR_A7 (0x7f << 1) /* 7-bit address */
-#define I2C_MCR_EA10 (0x7 << 8) /* 10-bit Extended address */
+#define I2C_MCR_EA10 (0x7 << 8) /* 10-bit Extended address */
#define I2C_MCR_SB (0x1 << 11) /* Extended address */
#define I2C_MCR_AM (0x3 << 12) /* Address type */
-#define I2C_MCR_STOP (0x1 << 14) /* Stop condition */
-#define I2C_MCR_LENGTH (0x7ff << 15) /* Transaction length */
+#define I2C_MCR_STOP (0x1 << 14) /* Stop condition */
+#define I2C_MCR_LENGTH (0x7ff << 15) /* Transaction length */
/* Status register (SR) */
#define I2C_SR_OP (0x3 << 0) /* Operation */
@@ -77,7 +77,7 @@
#define I2C_SR_LENGTH (0x7ff << 9) /* Transfer length */
/* Interrupt mask set/clear (IMSCR) bits */
-#define I2C_IT_TXFE (0x1 << 0)
+#define I2C_IT_TXFE (0x1 << 0)
#define I2C_IT_TXFNE (0x1 << 1)
#define I2C_IT_TXFF (0x1 << 2)
#define I2C_IT_TXFOVR (0x1 << 3)
@@ -135,31 +135,31 @@ struct i2c_nmk_client {
};
/**
- * struct nmk_i2c_dev - private data structure of the controller
- * @pdev: parent platform device
- * @adap: corresponding I2C adapter
- * @irq: interrupt line for the controller
- * @virtbase: virtual io memory area
- * @clk: hardware i2c block clock
- * @cfg: machine provided controller configuration
- * @cli: holder of client specific data
- * @stop: stop condition
- * @xfer_complete: acknowledge completion for a I2C message
- * @result: controller propogated result
- * @regulator: pointer to i2c regulator
- * @busy: Busy doing transfer
+ * struct nmk_i2c_dev - private data structure of the controller.
+ * @pdev: parent platform device.
+ * @adap: corresponding I2C adapter.
+ * @irq: interrupt line for the controller.
+ * @virtbase: virtual io memory area.
+ * @clk: hardware i2c block clock.
+ * @cfg: machine provided controller configuration.
+ * @cli: holder of client specific data.
+ * @stop: stop condition.
+ * @xfer_complete: acknowledge completion for a I2C message.
+ * @result: controller propogated result.
+ * @regulator: pointer to i2c regulator.
+ * @busy: Busy doing transfer.
*/
struct nmk_i2c_dev {
struct platform_device *pdev;
- struct i2c_adapter adap;
- int irq;
+ struct i2c_adapter adap;
+ int irq;
void __iomem *virtbase;
struct clk *clk;
struct nmk_i2c_controller cfg;
struct i2c_nmk_client cli;
- int stop;
+ int stop;
struct completion xfer_complete;
- int result;
+ int result;
struct regulator *regulator;
bool busy;
};
@@ -217,8 +217,9 @@ static int flush_i2c_fifo(struct nmk_i2c_dev *dev)
}
}
- dev_err(&dev->pdev->dev, "flushing operation timed out "
- "giving up after %d attempts", LOOP_ATTEMPTS);
+ dev_err(&dev->pdev->dev,
+ "flushing operation timed out giving up after %d attempts",
+ LOOP_ATTEMPTS);
return -ETIMEDOUT;
}
@@ -270,7 +271,7 @@ exit:
}
/* enable peripheral, master mode operation */
-#define DEFAULT_I2C_REG_CR ((1 << 1) | I2C_CR_PE)
+#define DEFAULT_I2C_REG_CR ((1 << 1) | I2C_CR_PE)
/**
* load_i2c_mcr_reg() - load the MCR register
@@ -363,8 +364,8 @@ static void setup_i2c_controller(struct nmk_i2c_dev *dev)
* and high speed (up to 3.4 Mb/s)
*/
if (dev->cfg.sm > I2C_FREQ_MODE_FAST) {
- dev_err(&dev->pdev->dev, "do not support this mode "
- "defaulting to std. mode\n");
+ dev_err(&dev->pdev->dev,
+ "do not support this mode defaulting to std. mode\n");
brcr2 = i2c_clk/(100000 * 2) & 0xffff;
writel((brcr1 | brcr2), dev->virtbase + I2C_BRCR);
writel(I2C_FREQ_MODE_STANDARD << 4,
@@ -423,7 +424,7 @@ static int read_i2c(struct nmk_i2c_dev *dev)
if (timeout < 0) {
dev_err(&dev->pdev->dev,
- "wait_for_completion_timeout"
+ "wait_for_completion_timeout "
"returned %d waiting for event\n", timeout);
status = timeout;
}
@@ -556,8 +557,8 @@ static int nmk_i2c_xfer_one(struct nmk_i2c_dev *dev, u16 flags)
if (((i2c_sr >> 2) & 0x3) == 0x3) {
/* get the abort cause */
cause = (i2c_sr >> 4) & 0x7;
- dev_err(&dev->pdev->dev, "%s\n", cause
- >= ARRAY_SIZE(abort_causes) ?
+ dev_err(&dev->pdev->dev, "%s\n",
+ cause >= ARRAY_SIZE(abort_causes) ?
"unknown reason" :
abort_causes[cause]);
}
@@ -582,13 +583,13 @@ static int nmk_i2c_xfer_one(struct nmk_i2c_dev *dev, u16 flags)
*
* NOTE:
* READ TRANSFER : We impose a restriction of the first message to be the
- * index message for any read transaction.
- * - a no index is coded as '0',
- * - 2byte big endian index is coded as '3'
- * !!! msg[0].buf holds the actual index.
- * This is compatible with generic messages of smbus emulator
- * that send a one byte index.
- * eg. a I2C transation to read 2 bytes from index 0
+ * index message for any read transaction.
+ * - a no index is coded as '0',
+ * - 2byte big endian index is coded as '3'
+ * !!! msg[0].buf holds the actual index.
+ * This is compatible with generic messages of smbus emulator
+ * that send a one byte index.
+ * eg. a I2C transation to read 2 bytes from index 0
* idx = 0;
* msg[0].addr = client->addr;
* msg[0].flags = 0x0;
@@ -644,8 +645,8 @@ static int nmk_i2c_xfer(struct i2c_adapter *i2c_adap,
for (i = 0; i < num_msgs; i++) {
if (unlikely(msgs[i].flags & I2C_M_TEN)) {
- dev_err(&dev->pdev->dev, "10 bit addressing"
- "not supported\n");
+ dev_err(&dev->pdev->dev,
+ "10 bit addressing not supported\n");
status = -EINVAL;
goto out;
@@ -789,8 +790,9 @@ static irqreturn_t i2c_irq_handler(int irq, void *arg)
if (dev->cli.count) {
dev->result = -EIO;
- dev_err(&dev->pdev->dev, "%lu bytes still remain to be"
- "xfered\n", dev->cli.count);
+ dev_err(&dev->pdev->dev,
+ "%lu bytes still remain to be xfered\n",
+ dev->cli.count);
(void) init_hw(dev);
}
complete(&dev->xfer_complete);
@@ -923,7 +925,7 @@ static int __devinit nmk_i2c_probe(struct platform_device *pdev)
}
if (request_mem_region(res->start, resource_size(res),
- DRIVER_NAME "I/O region") == NULL) {
+ DRIVER_NAME "I/O region") == NULL) {
ret = -EBUSY;
goto err_no_region;
}
@@ -935,7 +937,7 @@ static int __devinit nmk_i2c_probe(struct platform_device *pdev)
}
dev->irq = platform_get_irq(pdev, 0);
- ret = request_irq(dev->irq, i2c_irq_handler, IRQF_DISABLED,
+ ret = request_irq(dev->irq, i2c_irq_handler, 0,
DRIVER_NAME, dev);
if (ret) {
dev_err(&pdev->dev, "cannot claim the irq %d\n", dev->irq);
@@ -980,8 +982,9 @@ static int __devinit nmk_i2c_probe(struct platform_device *pdev)
i2c_set_adapdata(adap, dev);
- dev_info(&pdev->dev, "initialize %s on virtual "
- "base %p\n", adap->name, dev->virtbase);
+ dev_info(&pdev->dev,
+ "initialize %s on virtual base %p\n",
+ adap->name, dev->virtbase);
ret = i2c_add_numbered_adapter(adap);
if (ret) {
diff --git a/drivers/i2c/busses/i2c-nuc900.c b/drivers/i2c/busses/i2c-nuc900.c
index 72434263787..835e47b39bc 100644
--- a/drivers/i2c/busses/i2c-nuc900.c
+++ b/drivers/i2c/busses/i2c-nuc900.c
@@ -610,7 +610,7 @@ static int __devinit nuc900_i2c_probe(struct platform_device *pdev)
goto err_iomap;
}
- ret = request_irq(i2c->irq, nuc900_i2c_irq, IRQF_DISABLED | IRQF_SHARED,
+ ret = request_irq(i2c->irq, nuc900_i2c_irq, IRQF_SHARED,
dev_name(&pdev->dev), i2c);
if (ret != 0) {
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 2dfb6317685..a43d0023446 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -42,12 +42,12 @@
#include <linux/pm_runtime.h>
/* I2C controller revisions */
-#define OMAP_I2C_REV_2 0x20
+#define OMAP_I2C_OMAP1_REV_2 0x20
/* I2C controller revisions present on specific hardware */
#define OMAP_I2C_REV_ON_2430 0x36
#define OMAP_I2C_REV_ON_3430 0x3C
-#define OMAP_I2C_REV_ON_4430 0x40
+#define OMAP_I2C_REV_ON_3530_4430 0x40
/* timeout waiting for the controller to respond */
#define OMAP_I2C_TIMEOUT (msecs_to_jiffies(1000))
@@ -72,11 +72,12 @@ enum {
OMAP_I2C_SCLH_REG,
OMAP_I2C_SYSTEST_REG,
OMAP_I2C_BUFSTAT_REG,
- OMAP_I2C_REVNB_LO,
- OMAP_I2C_REVNB_HI,
- OMAP_I2C_IRQSTATUS_RAW,
- OMAP_I2C_IRQENABLE_SET,
- OMAP_I2C_IRQENABLE_CLR,
+ /* only on OMAP4430 */
+ OMAP_I2C_IP_V2_REVNB_LO,
+ OMAP_I2C_IP_V2_REVNB_HI,
+ OMAP_I2C_IP_V2_IRQSTATUS_RAW,
+ OMAP_I2C_IP_V2_IRQENABLE_SET,
+ OMAP_I2C_IP_V2_IRQENABLE_CLR,
};
/* I2C Interrupt Enable Register (OMAP_I2C_IE): */
@@ -193,7 +194,6 @@ struct omap_i2c_dev {
*/
u8 rev;
unsigned b_hw:1; /* bad h/w fixes */
- unsigned idle:1;
u16 iestate; /* Saved interrupt register */
u16 pscstate;
u16 scllstate;
@@ -204,7 +204,7 @@ struct omap_i2c_dev {
u16 errata;
};
-static const u8 reg_map[] = {
+static const u8 reg_map_ip_v1[] = {
[OMAP_I2C_REV_REG] = 0x00,
[OMAP_I2C_IE_REG] = 0x01,
[OMAP_I2C_STAT_REG] = 0x02,
@@ -225,7 +225,7 @@ static const u8 reg_map[] = {
[OMAP_I2C_BUFSTAT_REG] = 0x10,
};
-static const u8 omap4_reg_map[] = {
+static const u8 reg_map_ip_v2[] = {
[OMAP_I2C_REV_REG] = 0x04,
[OMAP_I2C_IE_REG] = 0x2c,
[OMAP_I2C_STAT_REG] = 0x28,
@@ -244,11 +244,11 @@ static const u8 omap4_reg_map[] = {
[OMAP_I2C_SCLH_REG] = 0xb8,
[OMAP_I2C_SYSTEST_REG] = 0xbC,
[OMAP_I2C_BUFSTAT_REG] = 0xc0,
- [OMAP_I2C_REVNB_LO] = 0x00,
- [OMAP_I2C_REVNB_HI] = 0x04,
- [OMAP_I2C_IRQSTATUS_RAW] = 0x24,
- [OMAP_I2C_IRQENABLE_SET] = 0x2c,
- [OMAP_I2C_IRQENABLE_CLR] = 0x30,
+ [OMAP_I2C_IP_V2_REVNB_LO] = 0x00,
+ [OMAP_I2C_IP_V2_REVNB_HI] = 0x04,
+ [OMAP_I2C_IP_V2_IRQSTATUS_RAW] = 0x24,
+ [OMAP_I2C_IP_V2_IRQENABLE_SET] = 0x2c,
+ [OMAP_I2C_IP_V2_IRQENABLE_CLR] = 0x30,
};
static inline void omap_i2c_write_reg(struct omap_i2c_dev *i2c_dev,
@@ -266,17 +266,11 @@ static inline u16 omap_i2c_read_reg(struct omap_i2c_dev *i2c_dev, int reg)
static void omap_i2c_unidle(struct omap_i2c_dev *dev)
{
- struct platform_device *pdev;
struct omap_i2c_bus_platform_data *pdata;
- WARN_ON(!dev->idle);
+ pdata = dev->dev->platform_data;
- pdev = to_platform_device(dev->dev);
- pdata = pdev->dev.platform_data;
-
- pm_runtime_get_sync(&pdev->dev);
-
- if (cpu_is_omap34xx()) {
+ if (pdata->flags & OMAP_I2C_FLAG_RESET_REGS_POSTIDLE) {
omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0);
omap_i2c_write_reg(dev, OMAP_I2C_PSC_REG, dev->pscstate);
omap_i2c_write_reg(dev, OMAP_I2C_SCLL_REG, dev->scllstate);
@@ -286,7 +280,6 @@ static void omap_i2c_unidle(struct omap_i2c_dev *dev)
omap_i2c_write_reg(dev, OMAP_I2C_WE_REG, dev->westate);
omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, OMAP_I2C_CON_EN);
}
- dev->idle = 0;
/*
* Don't write to this register if the IE state is 0 as it can
@@ -298,32 +291,25 @@ static void omap_i2c_unidle(struct omap_i2c_dev *dev)
static void omap_i2c_idle(struct omap_i2c_dev *dev)
{
- struct platform_device *pdev;
struct omap_i2c_bus_platform_data *pdata;
u16 iv;
- WARN_ON(dev->idle);
-
- pdev = to_platform_device(dev->dev);
- pdata = pdev->dev.platform_data;
+ pdata = dev->dev->platform_data;
dev->iestate = omap_i2c_read_reg(dev, OMAP_I2C_IE_REG);
- if (dev->rev >= OMAP_I2C_REV_ON_4430)
- omap_i2c_write_reg(dev, OMAP_I2C_IRQENABLE_CLR, 1);
+ if (pdata->rev == OMAP_I2C_IP_VERSION_2)
+ omap_i2c_write_reg(dev, OMAP_I2C_IP_V2_IRQENABLE_CLR, 1);
else
omap_i2c_write_reg(dev, OMAP_I2C_IE_REG, 0);
- if (dev->rev < OMAP_I2C_REV_2) {
+ if (dev->rev < OMAP_I2C_OMAP1_REV_2) {
iv = omap_i2c_read_reg(dev, OMAP_I2C_IV_REG); /* Read clears */
} else {
omap_i2c_write_reg(dev, OMAP_I2C_STAT_REG, dev->iestate);
- /* Flush posted write before the dev->idle store occurs */
+ /* Flush posted write */
omap_i2c_read_reg(dev, OMAP_I2C_STAT_REG);
}
- dev->idle = 1;
-
- pm_runtime_put_sync(&pdev->dev);
}
static int omap_i2c_init(struct omap_i2c_dev *dev)
@@ -334,8 +320,11 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
unsigned long timeout;
unsigned long internal_clk = 0;
struct clk *fclk;
+ struct omap_i2c_bus_platform_data *pdata;
+
+ pdata = dev->dev->platform_data;
- if (dev->rev >= OMAP_I2C_REV_2) {
+ if (dev->rev >= OMAP_I2C_OMAP1_REV_2) {
/* Disable I2C controller before soft reset */
omap_i2c_write_reg(dev, OMAP_I2C_CON_REG,
omap_i2c_read_reg(dev, OMAP_I2C_CON_REG) &
@@ -378,12 +367,13 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
* REVISIT: Some wkup sources might not be needed.
*/
dev->westate = OMAP_I2C_WE_ALL;
- omap_i2c_write_reg(dev, OMAP_I2C_WE_REG, dev->westate);
+ omap_i2c_write_reg(dev, OMAP_I2C_WE_REG,
+ dev->westate);
}
}
omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0);
- if (cpu_class_is_omap1()) {
+ if (pdata->flags & OMAP_I2C_FLAG_ALWAYS_ARMXOR_CLK) {
/*
* The I2C functional clock is the armxor_ck, so there's
* no need to get "armxor_ck" separately. Now, if OMAP2420
@@ -407,7 +397,7 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
psc = fclk_rate / 12000000;
}
- if (!(cpu_class_is_omap1() || cpu_is_omap2420())) {
+ if (!(pdata->flags & OMAP_I2C_FLAG_SIMPLE_CLOCK)) {
/*
* HSI2C controller internal clk rate should be 19.2 Mhz for
@@ -415,7 +405,8 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
* to get longer filter period for better noise suppression.
* The filter is iclk (fclk for HS) period.
*/
- if (dev->speed > 400 || cpu_is_omap2430())
+ if (dev->speed > 400 ||
+ pdata->flags & OMAP_I2C_FLAG_FORCE_19200_INT_CLK)
internal_clk = 19200;
else if (dev->speed > 100)
internal_clk = 9600;
@@ -484,7 +475,7 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
dev->errata = 0;
- if (cpu_is_omap2430() || cpu_is_omap34xx())
+ if (pdata->flags & OMAP_I2C_FLAG_APPLY_ERRATA_I207)
dev->errata |= I2C_OMAP_ERRATA_I207;
/* Enable interrupts */
@@ -493,7 +484,7 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
OMAP_I2C_IE_AL) | ((dev->fifo_size) ?
(OMAP_I2C_IE_RDR | OMAP_I2C_IE_XDR) : 0);
omap_i2c_write_reg(dev, OMAP_I2C_IE_REG, dev->iestate);
- if (cpu_is_omap34xx()) {
+ if (pdata->flags & OMAP_I2C_FLAG_RESET_REGS_POSTIDLE) {
dev->pscstate = psc;
dev->scllstate = scll;
dev->sclhstate = sclh;
@@ -642,7 +633,7 @@ omap_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
int i;
int r;
- omap_i2c_unidle(dev);
+ pm_runtime_get_sync(dev->dev);
r = omap_i2c_wait_for_bb(dev);
if (r < 0)
@@ -665,7 +656,7 @@ omap_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
omap_i2c_wait_for_bb(dev);
out:
- omap_i2c_idle(dev);
+ pm_runtime_put(dev->dev);
return r;
}
@@ -720,12 +711,12 @@ static inline void i2c_omap_errata_i207(struct omap_i2c_dev *dev, u16 stat)
#ifdef CONFIG_ARCH_OMAP15XX
static irqreturn_t
-omap_i2c_rev1_isr(int this_irq, void *dev_id)
+omap_i2c_omap1_isr(int this_irq, void *dev_id)
{
struct omap_i2c_dev *dev = dev_id;
u16 iv, w;
- if (dev->idle)
+ if (pm_runtime_suspended(dev->dev))
return IRQ_NONE;
iv = omap_i2c_read_reg(dev, OMAP_I2C_IV_REG);
@@ -774,7 +765,7 @@ omap_i2c_rev1_isr(int this_irq, void *dev_id)
return IRQ_HANDLED;
}
#else
-#define omap_i2c_rev1_isr NULL
+#define omap_i2c_omap1_isr NULL
#endif
/*
@@ -813,8 +804,11 @@ omap_i2c_isr(int this_irq, void *dev_id)
u16 bits;
u16 stat, w;
int err, count = 0;
+ struct omap_i2c_bus_platform_data *pdata;
- if (dev->idle)
+ pdata = dev->dev->platform_data;
+
+ if (pm_runtime_suspended(dev->dev))
return IRQ_NONE;
bits = omap_i2c_read_reg(dev, OMAP_I2C_IE_REG);
@@ -881,8 +875,8 @@ complete:
* Data reg in 2430, omap3 and
* omap4 is 8 bit wide
*/
- if (cpu_class_is_omap1() ||
- cpu_is_omap2420()) {
+ if (pdata->flags &
+ OMAP_I2C_FLAG_16BIT_DATA_REG) {
if (dev->buf_len) {
*dev->buf++ = w >> 8;
dev->buf_len--;
@@ -924,8 +918,8 @@ complete:
* Data reg in 2430, omap3 and
* omap4 is 8 bit wide
*/
- if (cpu_class_is_omap1() ||
- cpu_is_omap2420()) {
+ if (pdata->flags &
+ OMAP_I2C_FLAG_16BIT_DATA_REG) {
if (dev->buf_len) {
w |= *dev->buf++ << 8;
dev->buf_len--;
@@ -1016,7 +1010,6 @@ omap_i2c_probe(struct platform_device *pdev)
}
dev->speed = speed;
- dev->idle = 1;
dev->dev = &pdev->dev;
dev->irq = irq->start;
dev->base = ioremap(mem->start, resource_size(mem));
@@ -1027,27 +1020,22 @@ omap_i2c_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dev);
- if (cpu_is_omap7xx())
- dev->reg_shift = 1;
- else if (cpu_is_omap44xx())
- dev->reg_shift = 0;
- else
- dev->reg_shift = 2;
+ dev->reg_shift = (pdata->flags >> OMAP_I2C_FLAG_BUS_SHIFT__SHIFT) & 3;
- if (cpu_is_omap44xx())
- dev->regs = (u8 *) omap4_reg_map;
+ if (pdata->rev == OMAP_I2C_IP_VERSION_2)
+ dev->regs = (u8 *)reg_map_ip_v2;
else
- dev->regs = (u8 *) reg_map;
+ dev->regs = (u8 *)reg_map_ip_v1;
- pm_runtime_enable(&pdev->dev);
- omap_i2c_unidle(dev);
+ pm_runtime_enable(dev->dev);
+ pm_runtime_get_sync(dev->dev);
dev->rev = omap_i2c_read_reg(dev, OMAP_I2C_REV_REG) & 0xff;
if (dev->rev <= OMAP_I2C_REV_ON_3430)
dev->errata |= I2C_OMAP3_1P153;
- if (!(cpu_class_is_omap1() || cpu_is_omap2420())) {
+ if (!(pdata->flags & OMAP_I2C_FLAG_NO_FIFO)) {
u16 s;
/* Set up the fifo size - Get total size */
@@ -1059,7 +1047,7 @@ omap_i2c_probe(struct platform_device *pdev)
* size. This is to ensure that we can handle the status on int
* call back latencies.
*/
- if (dev->rev >= OMAP_I2C_REV_ON_4430) {
+ if (dev->rev >= OMAP_I2C_REV_ON_3530_4430) {
dev->fifo_size = 0;
dev->b_hw = 0; /* Disable hardware fixes */
} else {
@@ -1075,7 +1063,8 @@ omap_i2c_probe(struct platform_device *pdev)
/* reset ASAP, clearing any IRQs */
omap_i2c_init(dev);
- isr = (dev->rev < OMAP_I2C_REV_2) ? omap_i2c_rev1_isr : omap_i2c_isr;
+ isr = (dev->rev < OMAP_I2C_OMAP1_REV_2) ? omap_i2c_omap1_isr :
+ omap_i2c_isr;
r = request_irq(dev->irq, isr, 0, pdev->name, dev);
if (r) {
@@ -1083,10 +1072,10 @@ omap_i2c_probe(struct platform_device *pdev)
goto err_unuse_clocks;
}
- dev_info(dev->dev, "bus %d rev%d.%d at %d kHz\n",
- pdev->id, dev->rev >> 4, dev->rev & 0xf, dev->speed);
+ dev_info(dev->dev, "bus %d rev%d.%d.%d at %d kHz\n", pdev->id,
+ pdata->rev, dev->rev >> 4, dev->rev & 0xf, dev->speed);
- omap_i2c_idle(dev);
+ pm_runtime_put(dev->dev);
adap = &dev->adapter;
i2c_set_adapdata(adap, dev);
@@ -1110,7 +1099,7 @@ err_free_irq:
free_irq(dev->irq, dev);
err_unuse_clocks:
omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0);
- omap_i2c_idle(dev);
+ pm_runtime_put(dev->dev);
iounmap(dev->base);
err_free_mem:
platform_set_drvdata(pdev, NULL);
@@ -1139,12 +1128,43 @@ omap_i2c_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM_RUNTIME
+static int omap_i2c_runtime_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct omap_i2c_dev *_dev = platform_get_drvdata(pdev);
+
+ omap_i2c_idle(_dev);
+
+ return 0;
+}
+
+static int omap_i2c_runtime_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct omap_i2c_dev *_dev = platform_get_drvdata(pdev);
+
+ omap_i2c_unidle(_dev);
+
+ return 0;
+}
+
+static struct dev_pm_ops omap_i2c_pm_ops = {
+ .runtime_suspend = omap_i2c_runtime_suspend,
+ .runtime_resume = omap_i2c_runtime_resume,
+};
+#define OMAP_I2C_PM_OPS (&omap_i2c_pm_ops)
+#else
+#define OMAP_I2C_PM_OPS NULL
+#endif
+
static struct platform_driver omap_i2c_driver = {
.probe = omap_i2c_probe,
.remove = omap_i2c_remove,
.driver = {
.name = "omap_i2c",
.owner = THIS_MODULE,
+ .pm = OMAP_I2C_PM_OPS,
},
};
diff --git a/drivers/i2c/busses/i2c-pmcmsp.c b/drivers/i2c/busses/i2c-pmcmsp.c
index dfa7ae9c1b8..127051b0692 100644
--- a/drivers/i2c/busses/i2c-pmcmsp.c
+++ b/drivers/i2c/busses/i2c-pmcmsp.c
@@ -306,7 +306,7 @@ static int __devinit pmcmsptwi_probe(struct platform_device *pldev)
pmcmsptwi_data.irq = platform_get_irq(pldev, 0);
if (pmcmsptwi_data.irq) {
rc = request_irq(pmcmsptwi_data.irq, &pmcmsptwi_interrupt,
- IRQF_SHARED | IRQF_DISABLED | IRQF_SAMPLE_RANDOM,
+ IRQF_SHARED | IRQF_SAMPLE_RANDOM,
pldev->name, &pmcmsptwi_data);
if (rc == 0) {
/*
diff --git a/drivers/i2c/busses/i2c-pxa-pci.c b/drivers/i2c/busses/i2c-pxa-pci.c
index b73da6cd6f9..632e088760a 100644
--- a/drivers/i2c/busses/i2c-pxa-pci.c
+++ b/drivers/i2c/busses/i2c-pxa-pci.c
@@ -3,6 +3,7 @@
* It does not support slave mode, the register slightly moved. This PCI
* device provides three bars, every contains a single I2C controller.
*/
+#include <linux/module.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/i2c/pxa-i2c.h>
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index f84a63c6dd9..2754cef86a0 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -35,6 +35,8 @@
#include <linux/cpufreq.h>
#include <linux/slab.h>
#include <linux/io.h>
+#include <linux/of_i2c.h>
+#include <linux/of_gpio.h>
#include <asm/irq.h>
@@ -78,6 +80,8 @@ struct s3c24xx_i2c {
struct resource *ioarea;
struct i2c_adapter adap;
+ struct s3c2410_platform_i2c *pdata;
+ int gpios[2];
#ifdef CONFIG_CPU_FREQ
struct notifier_block freq_transition;
#endif
@@ -95,6 +99,12 @@ static inline int s3c24xx_i2c_is2440(struct s3c24xx_i2c *i2c)
struct platform_device *pdev = to_platform_device(i2c->dev);
enum s3c24xx_i2c_type type;
+#ifdef CONFIG_OF
+ if (i2c->dev->of_node)
+ return of_device_is_compatible(i2c->dev->of_node,
+ "samsung,s3c2440-i2c");
+#endif
+
type = platform_get_device_id(pdev)->driver_data;
return type == TYPE_S3C2440;
}
@@ -625,7 +635,7 @@ static int s3c24xx_i2c_calcdivisor(unsigned long clkin, unsigned int wanted,
static int s3c24xx_i2c_clockrate(struct s3c24xx_i2c *i2c, unsigned int *got)
{
- struct s3c2410_platform_i2c *pdata = i2c->dev->platform_data;
+ struct s3c2410_platform_i2c *pdata = i2c->pdata;
unsigned long clkin = clk_get_rate(i2c->clk);
unsigned int divs, div1;
unsigned long target_frequency;
@@ -741,6 +751,49 @@ static inline void s3c24xx_i2c_deregister_cpufreq(struct s3c24xx_i2c *i2c)
}
#endif
+#ifdef CONFIG_OF
+static int s3c24xx_i2c_parse_dt_gpio(struct s3c24xx_i2c *i2c)
+{
+ int idx, gpio, ret;
+
+ for (idx = 0; idx < 2; idx++) {
+ gpio = of_get_gpio(i2c->dev->of_node, idx);
+ if (!gpio_is_valid(gpio)) {
+ dev_err(i2c->dev, "invalid gpio[%d]: %d\n", idx, gpio);
+ goto free_gpio;
+ }
+
+ ret = gpio_request(gpio, "i2c-bus");
+ if (ret) {
+ dev_err(i2c->dev, "gpio [%d] request failed\n", gpio);
+ goto free_gpio;
+ }
+ }
+ return 0;
+
+free_gpio:
+ while (--idx >= 0)
+ gpio_free(i2c->gpios[idx]);
+ return -EINVAL;
+}
+
+static void s3c24xx_i2c_dt_gpio_free(struct s3c24xx_i2c *i2c)
+{
+ unsigned int idx;
+ for (idx = 0; idx < 2; idx++)
+ gpio_free(i2c->gpios[idx]);
+}
+#else
+static int s3c24xx_i2c_parse_dt_gpio(struct s3c24xx_i2c *i2c)
+{
+ return -EINVAL;
+}
+
+static void s3c24xx_i2c_dt_gpio_free(struct s3c24xx_i2c *i2c)
+{
+}
+#endif
+
/* s3c24xx_i2c_init
*
* initialise the controller, set the IO lines and frequency
@@ -754,12 +807,15 @@ static int s3c24xx_i2c_init(struct s3c24xx_i2c *i2c)
/* get the plafrom data */
- pdata = i2c->dev->platform_data;
+ pdata = i2c->pdata;
/* inititalise the gpio */
if (pdata->cfg_gpio)
pdata->cfg_gpio(to_platform_device(i2c->dev));
+ else
+ if (s3c24xx_i2c_parse_dt_gpio(i2c))
+ return -EINVAL;
/* write slave address */
@@ -785,6 +841,34 @@ static int s3c24xx_i2c_init(struct s3c24xx_i2c *i2c)
return 0;
}
+#ifdef CONFIG_OF
+/* s3c24xx_i2c_parse_dt
+ *
+ * Parse the device tree node and retreive the platform data.
+*/
+
+static void
+s3c24xx_i2c_parse_dt(struct device_node *np, struct s3c24xx_i2c *i2c)
+{
+ struct s3c2410_platform_i2c *pdata = i2c->pdata;
+
+ if (!np)
+ return;
+
+ pdata->bus_num = -1; /* i2c bus number is dynamically assigned */
+ of_property_read_u32(np, "samsung,i2c-sda-delay", &pdata->sda_delay);
+ of_property_read_u32(np, "samsung,i2c-slave-addr", &pdata->slave_addr);
+ of_property_read_u32(np, "samsung,i2c-max-bus-freq",
+ (u32 *)&pdata->frequency);
+}
+#else
+static void
+s3c24xx_i2c_parse_dt(struct device_node *np, struct s3c24xx_i2c *i2c)
+{
+ return;
+}
+#endif
+
/* s3c24xx_i2c_probe
*
* called by the bus driver when a suitable device is found
@@ -793,14 +877,16 @@ static int s3c24xx_i2c_init(struct s3c24xx_i2c *i2c)
static int s3c24xx_i2c_probe(struct platform_device *pdev)
{
struct s3c24xx_i2c *i2c;
- struct s3c2410_platform_i2c *pdata;
+ struct s3c2410_platform_i2c *pdata = NULL;
struct resource *res;
int ret;
- pdata = pdev->dev.platform_data;
- if (!pdata) {
- dev_err(&pdev->dev, "no platform data\n");
- return -EINVAL;
+ if (!pdev->dev.of_node) {
+ pdata = pdev->dev.platform_data;
+ if (!pdata) {
+ dev_err(&pdev->dev, "no platform data\n");
+ return -EINVAL;
+ }
}
i2c = kzalloc(sizeof(struct s3c24xx_i2c), GFP_KERNEL);
@@ -809,6 +895,17 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
return -ENOMEM;
}
+ i2c->pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!i2c->pdata) {
+ ret = -ENOMEM;
+ goto err_noclk;
+ }
+
+ if (pdata)
+ memcpy(i2c->pdata, pdata, sizeof(*pdata));
+ else
+ s3c24xx_i2c_parse_dt(pdev->dev.of_node, i2c);
+
strlcpy(i2c->adap.name, "s3c2410-i2c", sizeof(i2c->adap.name));
i2c->adap.owner = THIS_MODULE;
i2c->adap.algo = &s3c24xx_i2c_algorithm;
@@ -883,7 +980,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
goto err_iomap;
}
- ret = request_irq(i2c->irq, s3c24xx_i2c_irq, IRQF_DISABLED,
+ ret = request_irq(i2c->irq, s3c24xx_i2c_irq, 0,
dev_name(&pdev->dev), i2c);
if (ret != 0) {
@@ -903,7 +1000,8 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
* being bus 0.
*/
- i2c->adap.nr = pdata->bus_num;
+ i2c->adap.nr = i2c->pdata->bus_num;
+ i2c->adap.dev.of_node = pdev->dev.of_node;
ret = i2c_add_numbered_adapter(&i2c->adap);
if (ret < 0) {
@@ -911,6 +1009,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
goto err_cpufreq;
}
+ of_i2c_register_devices(&i2c->adap);
platform_set_drvdata(pdev, i2c);
dev_info(&pdev->dev, "%s: S3C I2C adapter\n", dev_name(&i2c->adap.dev));
@@ -959,6 +1058,7 @@ static int s3c24xx_i2c_remove(struct platform_device *pdev)
iounmap(i2c->regs);
release_resource(i2c->ioarea);
+ s3c24xx_i2c_dt_gpio_free(i2c);
kfree(i2c->ioarea);
kfree(i2c);
@@ -1012,6 +1112,17 @@ static struct platform_device_id s3c24xx_driver_ids[] = {
};
MODULE_DEVICE_TABLE(platform, s3c24xx_driver_ids);
+#ifdef CONFIG_OF
+static const struct of_device_id s3c24xx_i2c_match[] = {
+ { .compatible = "samsung,s3c2410-i2c" },
+ { .compatible = "samsung,s3c2440-i2c" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, s3c24xx_i2c_match);
+#else
+#define s3c24xx_i2c_match NULL
+#endif
+
static struct platform_driver s3c24xx_i2c_driver = {
.probe = s3c24xx_i2c_probe,
.remove = s3c24xx_i2c_remove,
@@ -1020,6 +1131,7 @@ static struct platform_driver s3c24xx_i2c_driver = {
.owner = THIS_MODULE,
.name = "s3c-i2c",
.pm = S3C24XX_DEV_PM_OPS,
+ .of_match_table = s3c24xx_i2c_match,
},
};
diff --git a/drivers/i2c/busses/i2c-sh7760.c b/drivers/i2c/busses/i2c-sh7760.c
index 3cad8fecc3d..a67132b2e09 100644
--- a/drivers/i2c/busses/i2c-sh7760.c
+++ b/drivers/i2c/busses/i2c-sh7760.c
@@ -17,6 +17,7 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/io.h>
+#include <linux/module.h>
#include <asm/clock.h>
#include <asm/i2c-sh7760.h>
@@ -502,7 +503,7 @@ static int __devinit sh7760_i2c_probe(struct platform_device *pdev)
}
OUT32(id, I2CCCR, ret);
- if (request_irq(id->irq, sh7760_i2c_irq, IRQF_DISABLED,
+ if (request_irq(id->irq, sh7760_i2c_irq, 0,
SH7760_I2C_DEVNAME, id)) {
dev_err(&pdev->dev, "cannot get irq %d\n", id->irq);
ret = -EBUSY;
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
index f633a53b6db..675c9692d14 100644
--- a/drivers/i2c/busses/i2c-sh_mobile.c
+++ b/drivers/i2c/busses/i2c-sh_mobile.c
@@ -543,7 +543,7 @@ static int sh_mobile_i2c_hook_irqs(struct platform_device *dev, int hook)
while ((res = platform_get_resource(dev, IORESOURCE_IRQ, k))) {
for (n = res->start; hook && n <= res->end; n++) {
- if (request_irq(n, sh_mobile_i2c_isr, IRQF_DISABLED,
+ if (request_irq(n, sh_mobile_i2c_isr, 0,
dev_name(&dev->dev), dev)) {
for (n--; n >= res->start; n--)
free_irq(n, dev);
diff --git a/drivers/i2c/busses/i2c-stu300.c b/drivers/i2c/busses/i2c-stu300.c
index 99879617e68..4d44af181f3 100644
--- a/drivers/i2c/busses/i2c-stu300.c
+++ b/drivers/i2c/busses/i2c-stu300.c
@@ -916,7 +916,7 @@ stu300_probe(struct platform_device *pdev)
}
dev->irq = platform_get_irq(pdev, 0);
- if (request_irq(dev->irq, stu300_irh, IRQF_DISABLED,
+ if (request_irq(dev->irq, stu300_irh, 0,
NAME, dev)) {
ret = -EIO;
goto err_no_irq;
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index 3c94c4a81a5..46b6500c547 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -27,6 +27,7 @@
#include <linux/slab.h>
#include <linux/i2c-tegra.h>
#include <linux/of_i2c.h>
+#include <linux/module.h>
#include <asm/unaligned.h>
@@ -566,7 +567,7 @@ static int tegra_i2c_probe(struct platform_device *pdev)
struct clk *clk;
struct clk *i2c_clk;
const unsigned int *prop;
- void *base;
+ void __iomem *base;
int irq;
int ret = 0;
diff --git a/drivers/i2c/busses/scx200_acb.c b/drivers/i2c/busses/scx200_acb.c
index 986e5f62deb..91e349c884c 100644
--- a/drivers/i2c/busses/scx200_acb.c
+++ b/drivers/i2c/busses/scx200_acb.c
@@ -550,7 +550,7 @@ static int __devexit scx200_remove(struct platform_device *pdev)
return 0;
}
-static struct platform_driver scx200_pci_drv = {
+static struct platform_driver scx200_pci_driver = {
.driver = {
.name = "cs5535-smb",
.owner = THIS_MODULE,
@@ -593,14 +593,14 @@ static int __init scx200_acb_init(void)
return 0;
/* No ISA devices; register the platform driver for PCI-based devices */
- return platform_driver_register(&scx200_pci_drv);
+ return platform_driver_register(&scx200_pci_driver);
}
static void __exit scx200_acb_cleanup(void)
{
struct scx200_acb_iface *iface;
- platform_driver_unregister(&scx200_pci_drv);
+ platform_driver_unregister(&scx200_pci_driver);
mutex_lock(&scx200_acb_list_mutex);
while ((iface = scx200_acb_list) != NULL) {
diff --git a/drivers/i2c/i2c-boardinfo.c b/drivers/i2c/i2c-boardinfo.c
index 3ca2e012e78..10274ffb66d 100644
--- a/drivers/i2c/i2c-boardinfo.c
+++ b/drivers/i2c/i2c-boardinfo.c
@@ -19,6 +19,7 @@
#include <linux/kernel.h>
#include <linux/i2c.h>
#include <linux/slab.h>
+#include <linux/export.h>
#include <linux/rwsem.h>
#include "i2c-core.h"
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
index 76b6d98bd29..5a26584934c 100644
--- a/drivers/ide/Kconfig
+++ b/drivers/ide/Kconfig
@@ -677,19 +677,19 @@ config BLK_DEV_IDE_PMAC_ATA100FIRST
config BLK_DEV_IDE_AU1XXX
bool "IDE for AMD Alchemy Au1200"
- depends on SOC_AU1200
+ depends on MIPS_ALCHEMY
select IDE_XFER_MODE
choice
prompt "IDE Mode for AMD Alchemy Au1200"
default BLK_DEV_IDE_AU1XXX_PIO_DBDMA
- depends on SOC_AU1200 && BLK_DEV_IDE_AU1XXX
+ depends on BLK_DEV_IDE_AU1XXX
config BLK_DEV_IDE_AU1XXX_PIO_DBDMA
bool "PIO+DbDMA IDE for AMD Alchemy Au1200"
config BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
bool "MDMA2+DbDMA IDE for AMD Alchemy Au1200"
- depends on SOC_AU1200 && BLK_DEV_IDE_AU1XXX
+ depends on BLK_DEV_IDE_AU1XXX
endchoice
config BLK_DEV_IDE_TX4938
diff --git a/drivers/ide/at91_ide.c b/drivers/ide/at91_ide.c
index 000a78e5246..6dede8f366c 100644
--- a/drivers/ide/at91_ide.c
+++ b/drivers/ide/at91_ide.c
@@ -28,7 +28,7 @@
#include <linux/platform_device.h>
#include <mach/board.h>
-#include <mach/gpio.h>
+#include <asm/gpio.h>
#include <mach/at91sam9_smc.h>
#define DRV_NAME "at91_ide"
diff --git a/drivers/ide/au1xxx-ide.c b/drivers/ide/au1xxx-ide.c
index b26c23416fa..259786ca8b7 100644
--- a/drivers/ide/au1xxx-ide.c
+++ b/drivers/ide/au1xxx-ide.c
@@ -36,13 +36,17 @@
#include <linux/ide.h>
#include <linux/scatterlist.h>
-#include <asm/mach-au1x00/au1xxx.h>
+#include <asm/mach-au1x00/au1000.h>
#include <asm/mach-au1x00/au1xxx_dbdma.h>
#include <asm/mach-au1x00/au1xxx_ide.h>
#define DRV_NAME "au1200-ide"
#define DRV_AUTHOR "Enrico Walther <enrico.walther@amd.com> / Pete Popov <ppopov@embeddedalley.com>"
+#ifndef IDE_REG_SHIFT
+#define IDE_REG_SHIFT 5
+#endif
+
/* enable the burstmode in the dbdma */
#define IDE_AU1XXX_BURSTMODE 1
@@ -317,10 +321,11 @@ static void auide_ddma_rx_callback(int irq, void *param)
}
#endif /* end CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA */
-static void auide_init_dbdma_dev(dbdev_tab_t *dev, u32 dev_id, u32 tsize, u32 devwidth, u32 flags)
+static void auide_init_dbdma_dev(dbdev_tab_t *dev, u32 dev_id, u32 tsize,
+ u32 devwidth, u32 flags, u32 regbase)
{
dev->dev_id = dev_id;
- dev->dev_physaddr = (u32)IDE_PHYS_ADDR;
+ dev->dev_physaddr = CPHYSADDR(regbase);
dev->dev_intlevel = 0;
dev->dev_intpolarity = 0;
dev->dev_tsize = tsize;
@@ -344,7 +349,7 @@ static int auide_ddma_init(ide_hwif_t *hwif, const struct ide_port_info *d)
dbdev_tab_t source_dev_tab, target_dev_tab;
u32 dev_id, tsize, devwidth, flags;
- dev_id = IDE_DDMA_REQ;
+ dev_id = hwif->ddma_id;
tsize = 8; /* 1 */
devwidth = 32; /* 16 */
@@ -356,20 +361,17 @@ static int auide_ddma_init(ide_hwif_t *hwif, const struct ide_port_info *d)
#endif
/* setup dev_tab for tx channel */
- auide_init_dbdma_dev( &source_dev_tab,
- dev_id,
- tsize, devwidth, DEV_FLAGS_OUT | flags);
+ auide_init_dbdma_dev(&source_dev_tab, dev_id, tsize, devwidth,
+ DEV_FLAGS_OUT | flags, auide->regbase);
auide->tx_dev_id = au1xxx_ddma_add_device( &source_dev_tab );
- auide_init_dbdma_dev( &source_dev_tab,
- dev_id,
- tsize, devwidth, DEV_FLAGS_IN | flags);
+ auide_init_dbdma_dev(&source_dev_tab, dev_id, tsize, devwidth,
+ DEV_FLAGS_IN | flags, auide->regbase);
auide->rx_dev_id = au1xxx_ddma_add_device( &source_dev_tab );
/* We also need to add a target device for the DMA */
- auide_init_dbdma_dev( &target_dev_tab,
- (u32)DSCR_CMD0_ALWAYS,
- tsize, devwidth, DEV_FLAGS_ANYUSE);
+ auide_init_dbdma_dev(&target_dev_tab, (u32)DSCR_CMD0_ALWAYS, tsize,
+ devwidth, DEV_FLAGS_ANYUSE, auide->regbase);
auide->target_dev_id = au1xxx_ddma_add_device(&target_dev_tab);
/* Get a channel for TX */
@@ -411,14 +413,12 @@ static int auide_ddma_init(ide_hwif_t *hwif, const struct ide_port_info *d)
#endif
/* setup dev_tab for tx channel */
- auide_init_dbdma_dev( &source_dev_tab,
- (u32)DSCR_CMD0_ALWAYS,
- 8, 32, DEV_FLAGS_OUT | flags);
+ auide_init_dbdma_dev(&source_dev_tab, (u32)DSCR_CMD0_ALWAYS, 8, 32,
+ DEV_FLAGS_OUT | flags, auide->regbase);
auide->tx_dev_id = au1xxx_ddma_add_device( &source_dev_tab );
- auide_init_dbdma_dev( &source_dev_tab,
- (u32)DSCR_CMD0_ALWAYS,
- 8, 32, DEV_FLAGS_IN | flags);
+ auide_init_dbdma_dev(&source_dev_tab, (u32)DSCR_CMD0_ALWAYS, 8, 32,
+ DEV_FLAGS_IN | flags, auide->regbase);
auide->rx_dev_id = au1xxx_ddma_add_device( &source_dev_tab );
/* Get a channel for TX */
@@ -540,6 +540,14 @@ static int au_ide_probe(struct platform_device *dev)
goto out;
}
+ res = platform_get_resource(dev, IORESOURCE_DMA, 0);
+ if (!res) {
+ pr_debug("%s: no DDMA ID resource\n", DRV_NAME);
+ ret = -ENODEV;
+ goto out;
+ }
+ ahwif->ddma_id = res->start;
+
memset(&hw, 0, sizeof(hw));
auide_setup_ports(&hw, ahwif);
hw.irq = ahwif->irq;
diff --git a/drivers/ide/buddha.c b/drivers/ide/buddha.c
index ab4f169d083..b1d38590ac0 100644
--- a/drivers/ide/buddha.c
+++ b/drivers/ide/buddha.c
@@ -23,6 +23,7 @@
#include <linux/zorro.h>
#include <linux/ide.h>
#include <linux/init.h>
+#include <linux/module.h>
#include <asm/amigahw.h>
#include <asm/amigaints.h>
diff --git a/drivers/ide/cmd640.c b/drivers/ide/cmd640.c
index cb10201a15e..a81bd757579 100644
--- a/drivers/ide/cmd640.c
+++ b/drivers/ide/cmd640.c
@@ -105,6 +105,7 @@
#include <linux/delay.h>
#include <linux/ide.h>
#include <linux/init.h>
+#include <linux/module.h>
#include <asm/io.h>
diff --git a/drivers/ide/ide-acpi.c b/drivers/ide/ide-acpi.c
index 2af8cb460a3..f22edc66b03 100644
--- a/drivers/ide/ide-acpi.c
+++ b/drivers/ide/ide-acpi.c
@@ -17,6 +17,7 @@
#include <linux/ide.h>
#include <linux/pci.h>
#include <linux/dmi.h>
+#include <linux/module.h>
#include <acpi/acpi_bus.h>
diff --git a/drivers/ide/ide-atapi.c b/drivers/ide/ide-atapi.c
index 6f218e014e9..fac3d9da2e0 100644
--- a/drivers/ide/ide-atapi.c
+++ b/drivers/ide/ide-atapi.c
@@ -5,6 +5,7 @@
#include <linux/kernel.h>
#include <linux/cdrom.h>
#include <linux/delay.h>
+#include <linux/export.h>
#include <linux/ide.h>
#include <linux/scatterlist.h>
#include <linux/gfp.h>
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 04b09564bfa..8126824dacc 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -43,7 +43,6 @@
/* For SCSI -> ATAPI command conversion */
#include <scsi/scsi.h>
-#include <linux/irq.h>
#include <linux/io.h>
#include <asm/byteorder.h>
#include <linux/uaccess.h>
diff --git a/drivers/ide/ide-disk_proc.c b/drivers/ide/ide-disk_proc.c
index f9bbd904eae..8b570a17bcd 100644
--- a/drivers/ide/ide-disk_proc.c
+++ b/drivers/ide/ide-disk_proc.c
@@ -1,6 +1,7 @@
#include <linux/kernel.h>
#include <linux/ide.h>
#include <linux/slab.h>
+#include <linux/export.h>
#include <linux/seq_file.h>
#include "ide-disk.h"
diff --git a/drivers/ide/ide-dma-sff.c b/drivers/ide/ide-dma-sff.c
index e4cdf78cc3e..289d16c87b8 100644
--- a/drivers/ide/ide-dma-sff.c
+++ b/drivers/ide/ide-dma-sff.c
@@ -1,5 +1,6 @@
#include <linux/types.h>
#include <linux/kernel.h>
+#include <linux/export.h>
#include <linux/ide.h>
#include <linux/scatterlist.h>
#include <linux/dma-mapping.h>
diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c
index d4136908f91..17a65ac5649 100644
--- a/drivers/ide/ide-dma.c
+++ b/drivers/ide/ide-dma.c
@@ -31,6 +31,7 @@
#include <linux/types.h>
#include <linux/gfp.h>
#include <linux/kernel.h>
+#include <linux/export.h>
#include <linux/ide.h>
#include <linux/scatterlist.h>
#include <linux/dma-mapping.h>
diff --git a/drivers/ide/ide-eh.c b/drivers/ide/ide-eh.c
index c0aa93fb7a6..32970664c27 100644
--- a/drivers/ide/ide-eh.c
+++ b/drivers/ide/ide-eh.c
@@ -1,5 +1,6 @@
#include <linux/kernel.h>
+#include <linux/export.h>
#include <linux/ide.h>
#include <linux/delay.h>
diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
index 61fdf544fbd..3d42043fec5 100644
--- a/drivers/ide/ide-floppy.c
+++ b/drivers/ide/ide-floppy.c
@@ -35,7 +35,6 @@
#include <scsi/scsi_ioctl.h>
#include <asm/byteorder.h>
-#include <linux/irq.h>
#include <linux/uaccess.h>
#include <linux/io.h>
#include <asm/unaligned.h>
diff --git a/drivers/ide/ide-floppy_proc.c b/drivers/ide/ide-floppy_proc.c
index d711d9b883d..1600720f3e8 100644
--- a/drivers/ide/ide-floppy_proc.c
+++ b/drivers/ide/ide-floppy_proc.c
@@ -1,4 +1,5 @@
#include <linux/kernel.h>
+#include <linux/export.h>
#include <linux/ide.h>
#include <linux/seq_file.h>
diff --git a/drivers/ide/ide-io-std.c b/drivers/ide/ide-io-std.c
index 46721c45451..19763977568 100644
--- a/drivers/ide/ide-io-std.c
+++ b/drivers/ide/ide-io-std.c
@@ -1,5 +1,6 @@
#include <linux/kernel.h>
+#include <linux/export.h>
#include <linux/ide.h>
#if defined(CONFIG_ARM) || defined(CONFIG_M68K) || defined(CONFIG_MIPS) || \
diff --git a/drivers/ide/ide-ioctls.c b/drivers/ide/ide-ioctls.c
index 9965ecd5078..4d19eb9772a 100644
--- a/drivers/ide/ide-ioctls.c
+++ b/drivers/ide/ide-ioctls.c
@@ -2,6 +2,7 @@
* IDE ioctls handling.
*/
+#include <linux/export.h>
#include <linux/hdreg.h>
#include <linux/ide.h>
#include <linux/slab.h>
diff --git a/drivers/ide/ide-legacy.c b/drivers/ide/ide-legacy.c
index b9654a7bb7b..30fe3630734 100644
--- a/drivers/ide/ide-legacy.c
+++ b/drivers/ide/ide-legacy.c
@@ -1,4 +1,5 @@
#include <linux/kernel.h>
+#include <linux/export.h>
#include <linux/ide.h>
static void ide_legacy_init_one(struct ide_hw **hws, struct ide_hw *hw,
diff --git a/drivers/ide/ide-lib.c b/drivers/ide/ide-lib.c
index e386a32dc9b..d9c9829c8b2 100644
--- a/drivers/ide/ide-lib.c
+++ b/drivers/ide/ide-lib.c
@@ -1,6 +1,7 @@
#include <linux/types.h>
#include <linux/string.h>
#include <linux/kernel.h>
+#include <linux/export.h>
#include <linux/interrupt.h>
#include <linux/ide.h>
#include <linux/bitops.h>
diff --git a/drivers/ide/ide-pnp.c b/drivers/ide/ide-pnp.c
index 017b1df3b80..e5f3db83137 100644
--- a/drivers/ide/ide-pnp.c
+++ b/drivers/ide/ide-pnp.c
@@ -17,6 +17,7 @@
#include <linux/init.h>
#include <linux/pnp.h>
#include <linux/ide.h>
+#include <linux/module.h>
#define DRV_NAME "ide-pnp"
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
index 7ecb1ade887..ce8237d3615 100644
--- a/drivers/ide/ide-tape.c
+++ b/drivers/ide/ide-tape.c
@@ -41,7 +41,6 @@
#include <scsi/scsi.h>
#include <asm/byteorder.h>
-#include <linux/irq.h>
#include <linux/uaccess.h>
#include <linux/io.h>
#include <asm/unaligned.h>
diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c
index 600c89a3d13..5bc2839ebcf 100644
--- a/drivers/ide/ide-taskfile.c
+++ b/drivers/ide/ide-taskfile.c
@@ -11,6 +11,7 @@
#include <linux/types.h>
#include <linux/string.h>
#include <linux/kernel.h>
+#include <linux/export.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/errno.h>
diff --git a/drivers/ide/ide-xfer-mode.c b/drivers/ide/ide-xfer-mode.c
index 5fc8d5c17de..eb421883c16 100644
--- a/drivers/ide/ide-xfer-mode.c
+++ b/drivers/ide/ide-xfer-mode.c
@@ -1,6 +1,7 @@
#include <linux/types.h>
#include <linux/string.h>
#include <linux/kernel.h>
+#include <linux/export.h>
#include <linux/interrupt.h>
#include <linux/ide.h>
#include <linux/bitops.h>
diff --git a/drivers/ide/macide.c b/drivers/ide/macide.c
index 505ec43e560..adc5fe9daaf 100644
--- a/drivers/ide/macide.c
+++ b/drivers/ide/macide.c
@@ -17,6 +17,7 @@
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/ide.h>
+#include <linux/module.h>
#include <asm/macintosh.h>
#include <asm/macints.h>
diff --git a/drivers/ide/pmac.c b/drivers/ide/pmac.c
index 1db7c4368db..e944c7f705f 100644
--- a/drivers/ide/pmac.c
+++ b/drivers/ide/pmac.c
@@ -28,6 +28,7 @@
#include <linux/delay.h>
#include <linux/ide.h>
#include <linux/notifier.h>
+#include <linux/module.h>
#include <linux/reboot.h>
#include <linux/pci.h>
#include <linux/adb.h>
diff --git a/drivers/ide/q40ide.c b/drivers/ide/q40ide.c
index 90786083b43..ecd0a69245f 100644
--- a/drivers/ide/q40ide.c
+++ b/drivers/ide/q40ide.c
@@ -15,6 +15,7 @@
#include <linux/interrupt.h>
#include <linux/blkdev.h>
#include <linux/ide.h>
+#include <linux/module.h>
#include <asm/ide.h>
diff --git a/drivers/ide/setup-pci.c b/drivers/ide/setup-pci.c
index ab3db61d2ba..34a5e5223d5 100644
--- a/drivers/ide/setup-pci.c
+++ b/drivers/ide/setup-pci.c
@@ -8,6 +8,7 @@
#include <linux/types.h>
#include <linux/kernel.h>
+#include <linux/export.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/interrupt.h>
diff --git a/drivers/ide/tc86c001.c b/drivers/ide/tc86c001.c
index e444d24934b..4799d5c384e 100644
--- a/drivers/ide/tc86c001.c
+++ b/drivers/ide/tc86c001.c
@@ -10,6 +10,7 @@
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/ide.h>
+#include <linux/module.h>
#define DRV_NAME "tc86c001"
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index a46dddf6107..5d2f8e13cf0 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -61,6 +61,7 @@
#include <linux/sched.h>
#include <linux/notifier.h>
#include <linux/cpu.h>
+#include <linux/module.h>
#include <asm/mwait.h>
#include <asm/msr.h>
@@ -81,7 +82,8 @@ static unsigned int mwait_substates;
static unsigned int lapic_timer_reliable_states = (1 << 1); /* Default to only C1 */
static struct cpuidle_device __percpu *intel_idle_cpuidle_devices;
-static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state);
+static int intel_idle(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index);
static struct cpuidle_state *cpuidle_state_table;
@@ -109,7 +111,6 @@ static struct cpuidle_state nehalem_cstates[MWAIT_MAX_NUM_CSTATES] = {
{ /* MWAIT C1 */
.name = "C1-NHM",
.desc = "MWAIT 0x00",
- .driver_data = (void *) 0x00,
.flags = CPUIDLE_FLAG_TIME_VALID,
.exit_latency = 3,
.target_residency = 6,
@@ -117,7 +118,6 @@ static struct cpuidle_state nehalem_cstates[MWAIT_MAX_NUM_CSTATES] = {
{ /* MWAIT C2 */
.name = "C3-NHM",
.desc = "MWAIT 0x10",
- .driver_data = (void *) 0x10,
.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 20,
.target_residency = 80,
@@ -125,7 +125,6 @@ static struct cpuidle_state nehalem_cstates[MWAIT_MAX_NUM_CSTATES] = {
{ /* MWAIT C3 */
.name = "C6-NHM",
.desc = "MWAIT 0x20",
- .driver_data = (void *) 0x20,
.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 200,
.target_residency = 800,
@@ -137,7 +136,6 @@ static struct cpuidle_state snb_cstates[MWAIT_MAX_NUM_CSTATES] = {
{ /* MWAIT C1 */
.name = "C1-SNB",
.desc = "MWAIT 0x00",
- .driver_data = (void *) 0x00,
.flags = CPUIDLE_FLAG_TIME_VALID,
.exit_latency = 1,
.target_residency = 1,
@@ -145,7 +143,6 @@ static struct cpuidle_state snb_cstates[MWAIT_MAX_NUM_CSTATES] = {
{ /* MWAIT C2 */
.name = "C3-SNB",
.desc = "MWAIT 0x10",
- .driver_data = (void *) 0x10,
.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 80,
.target_residency = 211,
@@ -153,7 +150,6 @@ static struct cpuidle_state snb_cstates[MWAIT_MAX_NUM_CSTATES] = {
{ /* MWAIT C3 */
.name = "C6-SNB",
.desc = "MWAIT 0x20",
- .driver_data = (void *) 0x20,
.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 104,
.target_residency = 345,
@@ -161,7 +157,6 @@ static struct cpuidle_state snb_cstates[MWAIT_MAX_NUM_CSTATES] = {
{ /* MWAIT C4 */
.name = "C7-SNB",
.desc = "MWAIT 0x30",
- .driver_data = (void *) 0x30,
.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 109,
.target_residency = 345,
@@ -173,7 +168,6 @@ static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = {
{ /* MWAIT C1 */
.name = "C1-ATM",
.desc = "MWAIT 0x00",
- .driver_data = (void *) 0x00,
.flags = CPUIDLE_FLAG_TIME_VALID,
.exit_latency = 1,
.target_residency = 4,
@@ -181,7 +175,6 @@ static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = {
{ /* MWAIT C2 */
.name = "C2-ATM",
.desc = "MWAIT 0x10",
- .driver_data = (void *) 0x10,
.flags = CPUIDLE_FLAG_TIME_VALID,
.exit_latency = 20,
.target_residency = 80,
@@ -190,7 +183,6 @@ static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = {
{ /* MWAIT C4 */
.name = "C4-ATM",
.desc = "MWAIT 0x30",
- .driver_data = (void *) 0x30,
.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 100,
.target_residency = 400,
@@ -199,23 +191,55 @@ static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = {
{ /* MWAIT C6 */
.name = "C6-ATM",
.desc = "MWAIT 0x52",
- .driver_data = (void *) 0x52,
.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 140,
.target_residency = 560,
.enter = &intel_idle },
};
+static int get_driver_data(int cstate)
+{
+ int driver_data;
+ switch (cstate) {
+
+ case 1: /* MWAIT C1 */
+ driver_data = 0x00;
+ break;
+ case 2: /* MWAIT C2 */
+ driver_data = 0x10;
+ break;
+ case 3: /* MWAIT C3 */
+ driver_data = 0x20;
+ break;
+ case 4: /* MWAIT C4 */
+ driver_data = 0x30;
+ break;
+ case 5: /* MWAIT C5 */
+ driver_data = 0x40;
+ break;
+ case 6: /* MWAIT C6 */
+ driver_data = 0x52;
+ break;
+ default:
+ driver_data = 0x00;
+ }
+ return driver_data;
+}
+
/**
* intel_idle
* @dev: cpuidle_device
- * @state: cpuidle state
+ * @drv: cpuidle driver
+ * @index: index of cpuidle state
*
*/
-static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state)
+static int intel_idle(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index)
{
unsigned long ecx = 1; /* break on interrupt flag */
- unsigned long eax = (unsigned long)cpuidle_get_statedata(state);
+ struct cpuidle_state *state = &drv->states[index];
+ struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
+ unsigned long eax = (unsigned long)cpuidle_get_statedata(state_usage);
unsigned int cstate;
ktime_t kt_before, kt_after;
s64 usec_delta;
@@ -256,7 +280,10 @@ static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state)
if (!(lapic_timer_reliable_states & (1 << (cstate))))
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
- return usec_delta;
+ /* Update cpuidle counters */
+ dev->last_residency = (int)usec_delta;
+
+ return index;
}
static void __setup_broadcast_timer(void *arg)
@@ -397,6 +424,60 @@ static void intel_idle_cpuidle_devices_uninit(void)
return;
}
/*
+ * intel_idle_cpuidle_driver_init()
+ * allocate, initialize cpuidle_states
+ */
+static int intel_idle_cpuidle_driver_init(void)
+{
+ int cstate;
+ struct cpuidle_driver *drv = &intel_idle_driver;
+
+ drv->state_count = 1;
+
+ for (cstate = 1; cstate < MWAIT_MAX_NUM_CSTATES; ++cstate) {
+ int num_substates;
+
+ if (cstate > max_cstate) {
+ printk(PREFIX "max_cstate %d reached\n",
+ max_cstate);
+ break;
+ }
+
+ /* does the state exist in CPUID.MWAIT? */
+ num_substates = (mwait_substates >> ((cstate) * 4))
+ & MWAIT_SUBSTATE_MASK;
+ if (num_substates == 0)
+ continue;
+ /* is the state not enabled? */
+ if (cpuidle_state_table[cstate].enter == NULL) {
+ /* does the driver not know about the state? */
+ if (*cpuidle_state_table[cstate].name == '\0')
+ pr_debug(PREFIX "unaware of model 0x%x"
+ " MWAIT %d please"
+ " contact lenb@kernel.org",
+ boot_cpu_data.x86_model, cstate);
+ continue;
+ }
+
+ if ((cstate > 2) &&
+ !boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
+ mark_tsc_unstable("TSC halts in idle"
+ " states deeper than C2");
+
+ drv->states[drv->state_count] = /* structure copy */
+ cpuidle_state_table[cstate];
+
+ drv->state_count += 1;
+ }
+
+ if (auto_demotion_disable_flags)
+ smp_call_function(auto_demotion_disable, NULL, 1);
+
+ return 0;
+}
+
+
+/*
* intel_idle_cpuidle_devices_init()
* allocate, initialize, register cpuidle_devices
*/
@@ -430,22 +511,11 @@ static int intel_idle_cpuidle_devices_init(void)
continue;
/* is the state not enabled? */
if (cpuidle_state_table[cstate].enter == NULL) {
- /* does the driver not know about the state? */
- if (*cpuidle_state_table[cstate].name == '\0')
- pr_debug(PREFIX "unaware of model 0x%x"
- " MWAIT %d please"
- " contact lenb@kernel.org",
- boot_cpu_data.x86_model, cstate);
continue;
}
- if ((cstate > 2) &&
- !boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
- mark_tsc_unstable("TSC halts in idle"
- " states deeper than C2");
-
- dev->states[dev->state_count] = /* structure copy */
- cpuidle_state_table[cstate];
+ dev->states_usage[dev->state_count].driver_data =
+ (void *)get_driver_data(cstate);
dev->state_count += 1;
}
@@ -458,8 +528,6 @@ static int intel_idle_cpuidle_devices_init(void)
return -EIO;
}
}
- if (auto_demotion_disable_flags)
- smp_call_function(auto_demotion_disable, NULL, 1);
return 0;
}
@@ -477,6 +545,7 @@ static int __init intel_idle_init(void)
if (retval)
return retval;
+ intel_idle_cpuidle_driver_init();
retval = cpuidle_register_driver(&intel_idle_driver);
if (retval) {
printk(KERN_DEBUG PREFIX "intel_idle yielding to %s",
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index 236ad9a89c0..691276bafd7 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -37,6 +37,7 @@
#include <linux/inetdevice.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
+#include <linux/module.h>
#include <net/arp.h>
#include <net/neighbour.h>
#include <net/route.h>
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index fc0f2bd9ca8..8b72f39202f 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -36,6 +36,7 @@
#include <linux/completion.h>
#include <linux/dma-mapping.h>
#include <linux/device.h>
+#include <linux/module.h>
#include <linux/err.h>
#include <linux/idr.h>
#include <linux/interrupt.h>
@@ -889,6 +890,8 @@ retest:
break;
case IB_CM_ESTABLISHED:
spin_unlock_irq(&cm_id_priv->lock);
+ if (cm_id_priv->qp_type == IB_QPT_XRC_TGT)
+ break;
ib_send_cm_dreq(cm_id, NULL, 0);
goto retest;
case IB_CM_DREQ_SENT:
@@ -1008,7 +1011,6 @@ static void cm_format_req(struct cm_req_msg *req_msg,
req_msg->service_id = param->service_id;
req_msg->local_ca_guid = cm_id_priv->id.device->node_guid;
cm_req_set_local_qpn(req_msg, cpu_to_be32(param->qp_num));
- cm_req_set_resp_res(req_msg, param->responder_resources);
cm_req_set_init_depth(req_msg, param->initiator_depth);
cm_req_set_remote_resp_timeout(req_msg,
param->remote_cm_response_timeout);
@@ -1017,12 +1019,16 @@ static void cm_format_req(struct cm_req_msg *req_msg,
cm_req_set_starting_psn(req_msg, cpu_to_be32(param->starting_psn));
cm_req_set_local_resp_timeout(req_msg,
param->local_cm_response_timeout);
- cm_req_set_retry_count(req_msg, param->retry_count);
req_msg->pkey = param->primary_path->pkey;
cm_req_set_path_mtu(req_msg, param->primary_path->mtu);
- cm_req_set_rnr_retry_count(req_msg, param->rnr_retry_count);
cm_req_set_max_cm_retries(req_msg, param->max_cm_retries);
- cm_req_set_srq(req_msg, param->srq);
+
+ if (param->qp_type != IB_QPT_XRC_INI) {
+ cm_req_set_resp_res(req_msg, param->responder_resources);
+ cm_req_set_retry_count(req_msg, param->retry_count);
+ cm_req_set_rnr_retry_count(req_msg, param->rnr_retry_count);
+ cm_req_set_srq(req_msg, param->srq);
+ }
if (pri_path->hop_limit <= 1) {
req_msg->primary_local_lid = pri_path->slid;
@@ -1080,7 +1086,8 @@ static int cm_validate_req_param(struct ib_cm_req_param *param)
if (!param->primary_path)
return -EINVAL;
- if (param->qp_type != IB_QPT_RC && param->qp_type != IB_QPT_UC)
+ if (param->qp_type != IB_QPT_RC && param->qp_type != IB_QPT_UC &&
+ param->qp_type != IB_QPT_XRC_INI)
return -EINVAL;
if (param->private_data &&
@@ -1601,18 +1608,24 @@ static void cm_format_rep(struct cm_rep_msg *rep_msg,
cm_format_mad_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid);
rep_msg->local_comm_id = cm_id_priv->id.local_id;
rep_msg->remote_comm_id = cm_id_priv->id.remote_id;
- cm_rep_set_local_qpn(rep_msg, cpu_to_be32(param->qp_num));
cm_rep_set_starting_psn(rep_msg, cpu_to_be32(param->starting_psn));
rep_msg->resp_resources = param->responder_resources;
- rep_msg->initiator_depth = param->initiator_depth;
cm_rep_set_target_ack_delay(rep_msg,
cm_id_priv->av.port->cm_dev->ack_delay);
cm_rep_set_failover(rep_msg, param->failover_accepted);
- cm_rep_set_flow_ctrl(rep_msg, param->flow_control);
cm_rep_set_rnr_retry_count(rep_msg, param->rnr_retry_count);
- cm_rep_set_srq(rep_msg, param->srq);
rep_msg->local_ca_guid = cm_id_priv->id.device->node_guid;
+ if (cm_id_priv->qp_type != IB_QPT_XRC_TGT) {
+ rep_msg->initiator_depth = param->initiator_depth;
+ cm_rep_set_flow_ctrl(rep_msg, param->flow_control);
+ cm_rep_set_srq(rep_msg, param->srq);
+ cm_rep_set_local_qpn(rep_msg, cpu_to_be32(param->qp_num));
+ } else {
+ cm_rep_set_srq(rep_msg, 1);
+ cm_rep_set_local_eecn(rep_msg, cpu_to_be32(param->qp_num));
+ }
+
if (param->private_data && param->private_data_len)
memcpy(rep_msg->private_data, param->private_data,
param->private_data_len);
@@ -1660,7 +1673,7 @@ int ib_send_cm_rep(struct ib_cm_id *cm_id,
cm_id_priv->initiator_depth = param->initiator_depth;
cm_id_priv->responder_resources = param->responder_resources;
cm_id_priv->rq_psn = cm_rep_get_starting_psn(rep_msg);
- cm_id_priv->local_qpn = cm_rep_get_local_qpn(rep_msg);
+ cm_id_priv->local_qpn = cpu_to_be32(param->qp_num & 0xFFFFFF);
out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
return ret;
@@ -1731,7 +1744,7 @@ error: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
}
EXPORT_SYMBOL(ib_send_cm_rtu);
-static void cm_format_rep_event(struct cm_work *work)
+static void cm_format_rep_event(struct cm_work *work, enum ib_qp_type qp_type)
{
struct cm_rep_msg *rep_msg;
struct ib_cm_rep_event_param *param;
@@ -1740,7 +1753,7 @@ static void cm_format_rep_event(struct cm_work *work)
param = &work->cm_event.param.rep_rcvd;
param->remote_ca_guid = rep_msg->local_ca_guid;
param->remote_qkey = be32_to_cpu(rep_msg->local_qkey);
- param->remote_qpn = be32_to_cpu(cm_rep_get_local_qpn(rep_msg));
+ param->remote_qpn = be32_to_cpu(cm_rep_get_qpn(rep_msg, qp_type));
param->starting_psn = be32_to_cpu(cm_rep_get_starting_psn(rep_msg));
param->responder_resources = rep_msg->initiator_depth;
param->initiator_depth = rep_msg->resp_resources;
@@ -1808,7 +1821,7 @@ static int cm_rep_handler(struct cm_work *work)
return -EINVAL;
}
- cm_format_rep_event(work);
+ cm_format_rep_event(work, cm_id_priv->qp_type);
spin_lock_irq(&cm_id_priv->lock);
switch (cm_id_priv->id.state) {
@@ -1823,7 +1836,7 @@ static int cm_rep_handler(struct cm_work *work)
cm_id_priv->timewait_info->work.remote_id = rep_msg->local_comm_id;
cm_id_priv->timewait_info->remote_ca_guid = rep_msg->local_ca_guid;
- cm_id_priv->timewait_info->remote_qpn = cm_rep_get_local_qpn(rep_msg);
+ cm_id_priv->timewait_info->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type);
spin_lock(&cm.lock);
/* Check for duplicate REP. */
@@ -1850,7 +1863,7 @@ static int cm_rep_handler(struct cm_work *work)
cm_id_priv->id.state = IB_CM_REP_RCVD;
cm_id_priv->id.remote_id = rep_msg->local_comm_id;
- cm_id_priv->remote_qpn = cm_rep_get_local_qpn(rep_msg);
+ cm_id_priv->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type);
cm_id_priv->initiator_depth = rep_msg->resp_resources;
cm_id_priv->responder_resources = rep_msg->initiator_depth;
cm_id_priv->sq_psn = cm_rep_get_starting_psn(rep_msg);
@@ -3492,7 +3505,8 @@ static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv,
qp_attr->path_mtu = cm_id_priv->path_mtu;
qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn);
qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn);
- if (cm_id_priv->qp_type == IB_QPT_RC) {
+ if (cm_id_priv->qp_type == IB_QPT_RC ||
+ cm_id_priv->qp_type == IB_QPT_XRC_TGT) {
*qp_attr_mask |= IB_QP_MAX_DEST_RD_ATOMIC |
IB_QP_MIN_RNR_TIMER;
qp_attr->max_dest_rd_atomic =
@@ -3537,15 +3551,21 @@ static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv,
if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT) {
*qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN;
qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn);
- if (cm_id_priv->qp_type == IB_QPT_RC) {
- *qp_attr_mask |= IB_QP_TIMEOUT | IB_QP_RETRY_CNT |
- IB_QP_RNR_RETRY |
+ switch (cm_id_priv->qp_type) {
+ case IB_QPT_RC:
+ case IB_QPT_XRC_INI:
+ *qp_attr_mask |= IB_QP_RETRY_CNT | IB_QP_RNR_RETRY |
IB_QP_MAX_QP_RD_ATOMIC;
- qp_attr->timeout = cm_id_priv->av.timeout;
qp_attr->retry_cnt = cm_id_priv->retry_count;
qp_attr->rnr_retry = cm_id_priv->rnr_retry_count;
- qp_attr->max_rd_atomic =
- cm_id_priv->initiator_depth;
+ qp_attr->max_rd_atomic = cm_id_priv->initiator_depth;
+ /* fall through */
+ case IB_QPT_XRC_TGT:
+ *qp_attr_mask |= IB_QP_TIMEOUT;
+ qp_attr->timeout = cm_id_priv->av.timeout;
+ break;
+ default:
+ break;
}
if (cm_id_priv->alt_av.ah_attr.dlid) {
*qp_attr_mask |= IB_QP_PATH_MIG_STATE;
diff --git a/drivers/infiniband/core/cm_msgs.h b/drivers/infiniband/core/cm_msgs.h
index 7e63c08f697..505db2a59e7 100644
--- a/drivers/infiniband/core/cm_msgs.h
+++ b/drivers/infiniband/core/cm_msgs.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2004 Intel Corporation. All rights reserved.
+ * Copyright (c) 2004, 2011 Intel Corporation. All rights reserved.
* Copyright (c) 2004 Topspin Corporation. All rights reserved.
* Copyright (c) 2004 Voltaire Corporation. All rights reserved.
*
@@ -86,7 +86,7 @@ struct cm_req_msg {
__be16 pkey;
/* path MTU:4, RDC exists:1, RNR retry count:3. */
u8 offset50;
- /* max CM Retries:4, SRQ:1, rsvd:3 */
+ /* max CM Retries:4, SRQ:1, extended transport type:3 */
u8 offset51;
__be16 primary_local_lid;
@@ -175,6 +175,11 @@ static inline enum ib_qp_type cm_req_get_qp_type(struct cm_req_msg *req_msg)
switch(transport_type) {
case 0: return IB_QPT_RC;
case 1: return IB_QPT_UC;
+ case 3:
+ switch (req_msg->offset51 & 0x7) {
+ case 1: return IB_QPT_XRC_TGT;
+ default: return 0;
+ }
default: return 0;
}
}
@@ -188,6 +193,12 @@ static inline void cm_req_set_qp_type(struct cm_req_msg *req_msg,
req_msg->offset40) &
0xFFFFFFF9) | 0x2);
break;
+ case IB_QPT_XRC_INI:
+ req_msg->offset40 = cpu_to_be32((be32_to_cpu(
+ req_msg->offset40) &
+ 0xFFFFFFF9) | 0x6);
+ req_msg->offset51 = (req_msg->offset51 & 0xF8) | 1;
+ break;
default:
req_msg->offset40 = cpu_to_be32(be32_to_cpu(
req_msg->offset40) &
@@ -527,6 +538,23 @@ static inline void cm_rep_set_local_qpn(struct cm_rep_msg *rep_msg, __be32 qpn)
(be32_to_cpu(rep_msg->offset12) & 0x000000FF));
}
+static inline __be32 cm_rep_get_local_eecn(struct cm_rep_msg *rep_msg)
+{
+ return cpu_to_be32(be32_to_cpu(rep_msg->offset16) >> 8);
+}
+
+static inline void cm_rep_set_local_eecn(struct cm_rep_msg *rep_msg, __be32 eecn)
+{
+ rep_msg->offset16 = cpu_to_be32((be32_to_cpu(eecn) << 8) |
+ (be32_to_cpu(rep_msg->offset16) & 0x000000FF));
+}
+
+static inline __be32 cm_rep_get_qpn(struct cm_rep_msg *rep_msg, enum ib_qp_type qp_type)
+{
+ return (qp_type == IB_QPT_XRC_INI) ?
+ cm_rep_get_local_eecn(rep_msg) : cm_rep_get_local_qpn(rep_msg);
+}
+
static inline __be32 cm_rep_get_starting_psn(struct cm_rep_msg *rep_msg)
{
return cpu_to_be32(be32_to_cpu(rep_msg->offset20) >> 8);
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index ca4c5dcd713..75ff821c0af 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -41,6 +41,7 @@
#include <linux/idr.h>
#include <linux/inetdevice.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include <net/tcp.h>
#include <net/ipv6.h>
@@ -81,6 +82,7 @@ static DEFINE_IDR(sdp_ps);
static DEFINE_IDR(tcp_ps);
static DEFINE_IDR(udp_ps);
static DEFINE_IDR(ipoib_ps);
+static DEFINE_IDR(ib_ps);
struct cma_device {
struct list_head list;
@@ -1179,6 +1181,15 @@ static void cma_set_req_event_data(struct rdma_cm_event *event,
event->param.conn.qp_num = req_data->remote_qpn;
}
+static int cma_check_req_qp_type(struct rdma_cm_id *id, struct ib_cm_event *ib_event)
+{
+ return (((ib_event->event == IB_CM_REQ_RECEIVED) ||
+ (ib_event->param.req_rcvd.qp_type == id->qp_type)) ||
+ ((ib_event->event == IB_CM_SIDR_REQ_RECEIVED) &&
+ (id->qp_type == IB_QPT_UD)) ||
+ (!id->qp_type));
+}
+
static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
{
struct rdma_id_private *listen_id, *conn_id;
@@ -1186,13 +1197,16 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
int offset, ret;
listen_id = cm_id->context;
+ if (!cma_check_req_qp_type(&listen_id->id, ib_event))
+ return -EINVAL;
+
if (cma_disable_callback(listen_id, RDMA_CM_LISTEN))
return -ECONNABORTED;
memset(&event, 0, sizeof event);
offset = cma_user_data_offset(listen_id->id.ps);
event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
- if (listen_id->id.qp_type == IB_QPT_UD) {
+ if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED) {
conn_id = cma_new_udp_id(&listen_id->id, ib_event);
event.param.ud.private_data = ib_event->private_data + offset;
event.param.ud.private_data_len =
@@ -1328,6 +1342,8 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
switch (iw_event->status) {
case 0:
event.event = RDMA_CM_EVENT_ESTABLISHED;
+ event.param.conn.initiator_depth = iw_event->ird;
+ event.param.conn.responder_resources = iw_event->ord;
break;
case -ECONNRESET:
case -ECONNREFUSED:
@@ -1343,6 +1359,8 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
break;
case IW_CM_EVENT_ESTABLISHED:
event.event = RDMA_CM_EVENT_ESTABLISHED;
+ event.param.conn.initiator_depth = iw_event->ird;
+ event.param.conn.responder_resources = iw_event->ord;
break;
default:
BUG_ON(1);
@@ -1433,8 +1451,8 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
event.param.conn.private_data = iw_event->private_data;
event.param.conn.private_data_len = iw_event->private_data_len;
- event.param.conn.initiator_depth = attr.max_qp_init_rd_atom;
- event.param.conn.responder_resources = attr.max_qp_rd_atom;
+ event.param.conn.initiator_depth = iw_event->ird;
+ event.param.conn.responder_resources = iw_event->ord;
/*
* Protect against the user destroying conn_id from another thread
@@ -2234,6 +2252,9 @@ static int cma_get_port(struct rdma_id_private *id_priv)
case RDMA_PS_IPOIB:
ps = &ipoib_ps;
break;
+ case RDMA_PS_IB:
+ ps = &ib_ps;
+ break;
default:
return -EPROTONOSUPPORT;
}
@@ -2569,7 +2590,7 @@ static int cma_connect_ib(struct rdma_id_private *id_priv,
req.service_id = cma_get_service_id(id_priv->id.ps,
(struct sockaddr *) &route->addr.dst_addr);
req.qp_num = id_priv->qp_num;
- req.qp_type = IB_QPT_RC;
+ req.qp_type = id_priv->id.qp_type;
req.starting_psn = id_priv->seq_num;
req.responder_resources = conn_param->responder_resources;
req.initiator_depth = conn_param->initiator_depth;
@@ -2616,14 +2637,16 @@ static int cma_connect_iw(struct rdma_id_private *id_priv,
if (ret)
goto out;
- iw_param.ord = conn_param->initiator_depth;
- iw_param.ird = conn_param->responder_resources;
- iw_param.private_data = conn_param->private_data;
- iw_param.private_data_len = conn_param->private_data_len;
- if (id_priv->id.qp)
+ if (conn_param) {
+ iw_param.ord = conn_param->initiator_depth;
+ iw_param.ird = conn_param->responder_resources;
+ iw_param.private_data = conn_param->private_data;
+ iw_param.private_data_len = conn_param->private_data_len;
+ iw_param.qpn = id_priv->id.qp ? id_priv->qp_num : conn_param->qp_num;
+ } else {
+ memset(&iw_param, 0, sizeof iw_param);
iw_param.qpn = id_priv->qp_num;
- else
- iw_param.qpn = conn_param->qp_num;
+ }
ret = iw_cm_connect(cm_id, &iw_param);
out:
if (ret) {
@@ -2765,14 +2788,20 @@ int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
switch (rdma_node_get_transport(id->device->node_type)) {
case RDMA_TRANSPORT_IB:
- if (id->qp_type == IB_QPT_UD)
- ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS,
- conn_param->private_data,
- conn_param->private_data_len);
- else if (conn_param)
- ret = cma_accept_ib(id_priv, conn_param);
- else
- ret = cma_rep_recv(id_priv);
+ if (id->qp_type == IB_QPT_UD) {
+ if (conn_param)
+ ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS,
+ conn_param->private_data,
+ conn_param->private_data_len);
+ else
+ ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS,
+ NULL, 0);
+ } else {
+ if (conn_param)
+ ret = cma_accept_ib(id_priv, conn_param);
+ else
+ ret = cma_rep_recv(id_priv);
+ }
break;
case RDMA_TRANSPORT_IWARP:
ret = cma_accept_iw(id_priv, conn_param);
@@ -3460,6 +3489,7 @@ static void __exit cma_cleanup(void)
idr_destroy(&tcp_ps);
idr_destroy(&udp_ps);
idr_destroy(&ipoib_ps);
+ idr_destroy(&ib_ps);
}
module_init(cma_init);
diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
index 4507043d24c..176c8f90f2b 100644
--- a/drivers/infiniband/core/fmr_pool.c
+++ b/drivers/infiniband/core/fmr_pool.c
@@ -33,6 +33,7 @@
#include <linux/errno.h>
#include <linux/spinlock.h>
+#include <linux/export.h>
#include <linux/slab.h>
#include <linux/jhash.h>
#include <linux/kthread.h>
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
index a9c042345c6..1a696f76b61 100644
--- a/drivers/infiniband/core/iwcm.c
+++ b/drivers/infiniband/core/iwcm.c
@@ -45,6 +45,7 @@
#include <linux/workqueue.h>
#include <linux/completion.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include <rdma/iw_cm.h>
#include <rdma/ib_addr.h>
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index b4d8672a3e4..2fe428bba54 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -35,6 +35,7 @@
*/
#include <linux/dma-mapping.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include <rdma/ib_cache.h>
#include "mad_priv.h"
@@ -1596,6 +1597,9 @@ find_mad_agent(struct ib_mad_port_private *port_priv,
mad->mad_hdr.class_version].class;
if (!class)
goto out;
+ if (convert_mgmt_class(mad->mad_hdr.mgmt_class) >=
+ IB_MGMT_MAX_METHODS)
+ goto out;
method = class->method_table[convert_mgmt_class(
mad->mad_hdr.mgmt_class)];
if (method)
diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c
index 68b4162fd9d..d2360a8ef0b 100644
--- a/drivers/infiniband/core/multicast.c
+++ b/drivers/infiniband/core/multicast.c
@@ -34,6 +34,7 @@
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/interrupt.h>
+#include <linux/export.h>
#include <linux/slab.h>
#include <linux/bitops.h>
#include <linux/random.h>
diff --git a/drivers/infiniband/core/netlink.c b/drivers/infiniband/core/netlink.c
index 9227f4acd79..d1c8196d15d 100644
--- a/drivers/infiniband/core/netlink.c
+++ b/drivers/infiniband/core/netlink.c
@@ -32,6 +32,7 @@
#define pr_fmt(fmt) "%s:%s: " fmt, KBUILD_MODNAME, __func__
+#include <linux/export.h>
#include <net/netlink.h>
#include <net/net_namespace.h>
#include <net/sock.h>
diff --git a/drivers/infiniband/core/packer.c b/drivers/infiniband/core/packer.c
index 019bd4b0863..1b65986c0be 100644
--- a/drivers/infiniband/core/packer.c
+++ b/drivers/infiniband/core/packer.c
@@ -31,6 +31,7 @@
* SOFTWARE.
*/
+#include <linux/export.h>
#include <linux/string.h>
#include <rdma/ib_pack.h>
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index 9ab5df72df7..c61bca30fd2 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -35,6 +35,7 @@
#include "core_priv.h"
#include <linux/slab.h>
+#include <linux/stat.h>
#include <linux/string.h>
#include <rdma/ib_mad.h>
@@ -185,17 +186,35 @@ static ssize_t rate_show(struct ib_port *p, struct port_attribute *unused,
if (ret)
return ret;
+ rate = (25 * attr.active_speed) / 10;
+
switch (attr.active_speed) {
- case 2: speed = " DDR"; break;
- case 4: speed = " QDR"; break;
+ case 2:
+ speed = " DDR";
+ break;
+ case 4:
+ speed = " QDR";
+ break;
+ case 8:
+ speed = " FDR10";
+ rate = 10;
+ break;
+ case 16:
+ speed = " FDR";
+ rate = 14;
+ break;
+ case 32:
+ speed = " EDR";
+ rate = 25;
+ break;
}
- rate = 25 * ib_width_enum_to_int(attr.active_width) * attr.active_speed;
+ rate *= ib_width_enum_to_int(attr.active_width);
if (rate < 0)
return -EINVAL;
return sprintf(buf, "%d%s Gb/sec (%dX%s)\n",
- rate / 10, rate % 10 ? ".5" : "",
+ rate, (attr.active_speed == 1) ? ".5" : "",
ib_width_enum_to_int(attr.active_width), speed);
}
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
index 08f948df8fa..b8a0b4a7811 100644
--- a/drivers/infiniband/core/ucm.c
+++ b/drivers/infiniband/core/ucm.c
@@ -1122,7 +1122,7 @@ static ssize_t ib_ucm_write(struct file *filp, const char __user *buf,
if (copy_from_user(&hdr, buf, sizeof(hdr)))
return -EFAULT;
- if (hdr.cmd < 0 || hdr.cmd >= ARRAY_SIZE(ucm_cmd_table))
+ if (hdr.cmd >= ARRAY_SIZE(ucm_cmd_table))
return -EINVAL;
if (hdr.in + sizeof(hdr) > len)
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 71be5eebd68..b37b0c02a7b 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -41,6 +41,7 @@
#include <linux/miscdevice.h>
#include <linux/slab.h>
#include <linux/sysctl.h>
+#include <linux/module.h>
#include <rdma/rdma_user_cm.h>
#include <rdma/ib_marshall.h>
@@ -276,7 +277,7 @@ static int ucma_event_handler(struct rdma_cm_id *cm_id,
ucma_set_event_context(ctx, event, uevent);
uevent->resp.event = event->event;
uevent->resp.status = event->status;
- if (cm_id->ps == RDMA_PS_UDP || cm_id->ps == RDMA_PS_IPOIB)
+ if (cm_id->qp_type == IB_QPT_UD)
ucma_copy_ud_event(&uevent->resp.param.ud, &event->param.ud);
else
ucma_copy_conn_event(&uevent->resp.param.conn,
@@ -377,6 +378,9 @@ static int ucma_get_qp_type(struct rdma_ucm_create_id *cmd, enum ib_qp_type *qp_
case RDMA_PS_IPOIB:
*qp_type = IB_QPT_UD;
return 0;
+ case RDMA_PS_IB:
+ *qp_type = cmd->qp_type;
+ return 0;
default:
return -EINVAL;
}
@@ -1270,7 +1274,7 @@ static ssize_t ucma_write(struct file *filp, const char __user *buf,
if (copy_from_user(&hdr, buf, sizeof(hdr)))
return -EFAULT;
- if (hdr.cmd < 0 || hdr.cmd >= ARRAY_SIZE(ucma_cmd_table))
+ if (hdr.cmd >= ARRAY_SIZE(ucma_cmd_table))
return -EINVAL;
if (hdr.in + sizeof(hdr) > len)
diff --git a/drivers/infiniband/core/ud_header.c b/drivers/infiniband/core/ud_header.c
index 9b737ff133e..72feee620eb 100644
--- a/drivers/infiniband/core/ud_header.c
+++ b/drivers/infiniband/core/ud_header.c
@@ -33,6 +33,7 @@
#include <linux/errno.h>
#include <linux/string.h>
+#include <linux/export.h>
#include <linux/if_ether.h>
#include <rdma/ib_pack.h>
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index b645e558876..71f0c0f7df9 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -35,6 +35,7 @@
#include <linux/mm.h>
#include <linux/dma-mapping.h>
#include <linux/sched.h>
+#include <linux/export.h>
#include <linux/hugetlb.h>
#include <linux/dma-attrs.h>
#include <linux/slab.h>
@@ -136,7 +137,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
down_write(&current->mm->mmap_sem);
- locked = npages + current->mm->locked_vm;
+ locked = npages + current->mm->pinned_vm;
lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
@@ -206,7 +207,7 @@ out:
__ib_umem_release(context->device, umem, 0);
kfree(umem);
} else
- current->mm->locked_vm = locked;
+ current->mm->pinned_vm = locked;
up_write(&current->mm->mmap_sem);
if (vma_list)
@@ -222,7 +223,7 @@ static void ib_umem_account(struct work_struct *work)
struct ib_umem *umem = container_of(work, struct ib_umem, work);
down_write(&umem->mm->mmap_sem);
- umem->mm->locked_vm -= umem->diff;
+ umem->mm->pinned_vm -= umem->diff;
up_write(&umem->mm->mmap_sem);
mmput(umem->mm);
kfree(umem);
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index 8d261b6ea5f..07db22997e9 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -458,8 +458,7 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
goto err;
}
- if (packet->mad.hdr.id < 0 ||
- packet->mad.hdr.id >= IB_UMAD_MAX_AGENTS) {
+ if (packet->mad.hdr.id >= IB_UMAD_MAX_AGENTS) {
ret = -EINVAL;
goto err;
}
@@ -703,7 +702,7 @@ static int ib_umad_unreg_agent(struct ib_umad_file *file, u32 __user *arg)
mutex_lock(&file->port->file_mutex);
mutex_lock(&file->mutex);
- if (id < 0 || id >= IB_UMAD_MAX_AGENTS || !__get_agent(file, id)) {
+ if (id >= IB_UMAD_MAX_AGENTS || !__get_agent(file, id)) {
ret = -EINVAL;
goto out;
}
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
index a078e5624d2..5bcb2afd3dc 100644
--- a/drivers/infiniband/core/uverbs.h
+++ b/drivers/infiniband/core/uverbs.h
@@ -76,6 +76,8 @@ struct ib_uverbs_device {
struct ib_device *ib_dev;
int devnum;
struct cdev cdev;
+ struct rb_root xrcd_tree;
+ struct mutex xrcd_tree_mutex;
};
struct ib_uverbs_event_file {
@@ -120,6 +122,16 @@ struct ib_uevent_object {
u32 events_reported;
};
+struct ib_uxrcd_object {
+ struct ib_uobject uobject;
+ atomic_t refcnt;
+};
+
+struct ib_usrq_object {
+ struct ib_uevent_object uevent;
+ struct ib_uxrcd_object *uxrcd;
+};
+
struct ib_uqp_object {
struct ib_uevent_object uevent;
struct list_head mcast_list;
@@ -142,6 +154,7 @@ extern struct idr ib_uverbs_ah_idr;
extern struct idr ib_uverbs_cq_idr;
extern struct idr ib_uverbs_qp_idr;
extern struct idr ib_uverbs_srq_idr;
+extern struct idr ib_uverbs_xrcd_idr;
void idr_remove_uobj(struct idr *idp, struct ib_uobject *uobj);
@@ -161,6 +174,7 @@ void ib_uverbs_qp_event_handler(struct ib_event *event, void *context_ptr);
void ib_uverbs_srq_event_handler(struct ib_event *event, void *context_ptr);
void ib_uverbs_event_handler(struct ib_event_handler *handler,
struct ib_event *event);
+void ib_uverbs_dealloc_xrcd(struct ib_uverbs_device *dev, struct ib_xrcd *xrcd);
#define IB_UVERBS_DECLARE_CMD(name) \
ssize_t ib_uverbs_##name(struct ib_uverbs_file *file, \
@@ -181,6 +195,7 @@ IB_UVERBS_DECLARE_CMD(poll_cq);
IB_UVERBS_DECLARE_CMD(req_notify_cq);
IB_UVERBS_DECLARE_CMD(destroy_cq);
IB_UVERBS_DECLARE_CMD(create_qp);
+IB_UVERBS_DECLARE_CMD(open_qp);
IB_UVERBS_DECLARE_CMD(query_qp);
IB_UVERBS_DECLARE_CMD(modify_qp);
IB_UVERBS_DECLARE_CMD(destroy_qp);
@@ -195,5 +210,8 @@ IB_UVERBS_DECLARE_CMD(create_srq);
IB_UVERBS_DECLARE_CMD(modify_srq);
IB_UVERBS_DECLARE_CMD(query_srq);
IB_UVERBS_DECLARE_CMD(destroy_srq);
+IB_UVERBS_DECLARE_CMD(create_xsrq);
+IB_UVERBS_DECLARE_CMD(open_xrcd);
+IB_UVERBS_DECLARE_CMD(close_xrcd);
#endif /* UVERBS_H */
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index c42699285f8..254f1649c73 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -47,6 +47,7 @@ static struct lock_class_key cq_lock_key;
static struct lock_class_key qp_lock_key;
static struct lock_class_key ah_lock_key;
static struct lock_class_key srq_lock_key;
+static struct lock_class_key xrcd_lock_key;
#define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \
do { \
@@ -255,6 +256,18 @@ static void put_srq_read(struct ib_srq *srq)
put_uobj_read(srq->uobject);
}
+static struct ib_xrcd *idr_read_xrcd(int xrcd_handle, struct ib_ucontext *context,
+ struct ib_uobject **uobj)
+{
+ *uobj = idr_read_uobj(&ib_uverbs_xrcd_idr, xrcd_handle, context, 0);
+ return *uobj ? (*uobj)->object : NULL;
+}
+
+static void put_xrcd_read(struct ib_uobject *uobj)
+{
+ put_uobj_read(uobj);
+}
+
ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
const char __user *buf,
int in_len, int out_len)
@@ -298,6 +311,7 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
INIT_LIST_HEAD(&ucontext->qp_list);
INIT_LIST_HEAD(&ucontext->srq_list);
INIT_LIST_HEAD(&ucontext->ah_list);
+ INIT_LIST_HEAD(&ucontext->xrcd_list);
ucontext->closing = 0;
resp.num_comp_vectors = file->device->num_comp_vectors;
@@ -579,6 +593,310 @@ ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file,
return in_len;
}
+struct xrcd_table_entry {
+ struct rb_node node;
+ struct ib_xrcd *xrcd;
+ struct inode *inode;
+};
+
+static int xrcd_table_insert(struct ib_uverbs_device *dev,
+ struct inode *inode,
+ struct ib_xrcd *xrcd)
+{
+ struct xrcd_table_entry *entry, *scan;
+ struct rb_node **p = &dev->xrcd_tree.rb_node;
+ struct rb_node *parent = NULL;
+
+ entry = kmalloc(sizeof *entry, GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ entry->xrcd = xrcd;
+ entry->inode = inode;
+
+ while (*p) {
+ parent = *p;
+ scan = rb_entry(parent, struct xrcd_table_entry, node);
+
+ if (inode < scan->inode) {
+ p = &(*p)->rb_left;
+ } else if (inode > scan->inode) {
+ p = &(*p)->rb_right;
+ } else {
+ kfree(entry);
+ return -EEXIST;
+ }
+ }
+
+ rb_link_node(&entry->node, parent, p);
+ rb_insert_color(&entry->node, &dev->xrcd_tree);
+ igrab(inode);
+ return 0;
+}
+
+static struct xrcd_table_entry *xrcd_table_search(struct ib_uverbs_device *dev,
+ struct inode *inode)
+{
+ struct xrcd_table_entry *entry;
+ struct rb_node *p = dev->xrcd_tree.rb_node;
+
+ while (p) {
+ entry = rb_entry(p, struct xrcd_table_entry, node);
+
+ if (inode < entry->inode)
+ p = p->rb_left;
+ else if (inode > entry->inode)
+ p = p->rb_right;
+ else
+ return entry;
+ }
+
+ return NULL;
+}
+
+static struct ib_xrcd *find_xrcd(struct ib_uverbs_device *dev, struct inode *inode)
+{
+ struct xrcd_table_entry *entry;
+
+ entry = xrcd_table_search(dev, inode);
+ if (!entry)
+ return NULL;
+
+ return entry->xrcd;
+}
+
+static void xrcd_table_delete(struct ib_uverbs_device *dev,
+ struct inode *inode)
+{
+ struct xrcd_table_entry *entry;
+
+ entry = xrcd_table_search(dev, inode);
+ if (entry) {
+ iput(inode);
+ rb_erase(&entry->node, &dev->xrcd_tree);
+ kfree(entry);
+ }
+}
+
+ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file,
+ const char __user *buf, int in_len,
+ int out_len)
+{
+ struct ib_uverbs_open_xrcd cmd;
+ struct ib_uverbs_open_xrcd_resp resp;
+ struct ib_udata udata;
+ struct ib_uxrcd_object *obj;
+ struct ib_xrcd *xrcd = NULL;
+ struct file *f = NULL;
+ struct inode *inode = NULL;
+ int ret = 0;
+ int new_xrcd = 0;
+
+ if (out_len < sizeof resp)
+ return -ENOSPC;
+
+ if (copy_from_user(&cmd, buf, sizeof cmd))
+ return -EFAULT;
+
+ INIT_UDATA(&udata, buf + sizeof cmd,
+ (unsigned long) cmd.response + sizeof resp,
+ in_len - sizeof cmd, out_len - sizeof resp);
+
+ mutex_lock(&file->device->xrcd_tree_mutex);
+
+ if (cmd.fd != -1) {
+ /* search for file descriptor */
+ f = fget(cmd.fd);
+ if (!f) {
+ ret = -EBADF;
+ goto err_tree_mutex_unlock;
+ }
+
+ inode = f->f_dentry->d_inode;
+ if (!inode) {
+ ret = -EBADF;
+ goto err_tree_mutex_unlock;
+ }
+
+ xrcd = find_xrcd(file->device, inode);
+ if (!xrcd && !(cmd.oflags & O_CREAT)) {
+ /* no file descriptor. Need CREATE flag */
+ ret = -EAGAIN;
+ goto err_tree_mutex_unlock;
+ }
+
+ if (xrcd && cmd.oflags & O_EXCL) {
+ ret = -EINVAL;
+ goto err_tree_mutex_unlock;
+ }
+ }
+
+ obj = kmalloc(sizeof *obj, GFP_KERNEL);
+ if (!obj) {
+ ret = -ENOMEM;
+ goto err_tree_mutex_unlock;
+ }
+
+ init_uobj(&obj->uobject, 0, file->ucontext, &xrcd_lock_key);
+
+ down_write(&obj->uobject.mutex);
+
+ if (!xrcd) {
+ xrcd = file->device->ib_dev->alloc_xrcd(file->device->ib_dev,
+ file->ucontext, &udata);
+ if (IS_ERR(xrcd)) {
+ ret = PTR_ERR(xrcd);
+ goto err;
+ }
+
+ xrcd->inode = inode;
+ xrcd->device = file->device->ib_dev;
+ atomic_set(&xrcd->usecnt, 0);
+ mutex_init(&xrcd->tgt_qp_mutex);
+ INIT_LIST_HEAD(&xrcd->tgt_qp_list);
+ new_xrcd = 1;
+ }
+
+ atomic_set(&obj->refcnt, 0);
+ obj->uobject.object = xrcd;
+ ret = idr_add_uobj(&ib_uverbs_xrcd_idr, &obj->uobject);
+ if (ret)
+ goto err_idr;
+
+ memset(&resp, 0, sizeof resp);
+ resp.xrcd_handle = obj->uobject.id;
+
+ if (inode) {
+ if (new_xrcd) {
+ /* create new inode/xrcd table entry */
+ ret = xrcd_table_insert(file->device, inode, xrcd);
+ if (ret)
+ goto err_insert_xrcd;
+ }
+ atomic_inc(&xrcd->usecnt);
+ }
+
+ if (copy_to_user((void __user *) (unsigned long) cmd.response,
+ &resp, sizeof resp)) {
+ ret = -EFAULT;
+ goto err_copy;
+ }
+
+ if (f)
+ fput(f);
+
+ mutex_lock(&file->mutex);
+ list_add_tail(&obj->uobject.list, &file->ucontext->xrcd_list);
+ mutex_unlock(&file->mutex);
+
+ obj->uobject.live = 1;
+ up_write(&obj->uobject.mutex);
+
+ mutex_unlock(&file->device->xrcd_tree_mutex);
+ return in_len;
+
+err_copy:
+ if (inode) {
+ if (new_xrcd)
+ xrcd_table_delete(file->device, inode);
+ atomic_dec(&xrcd->usecnt);
+ }
+
+err_insert_xrcd:
+ idr_remove_uobj(&ib_uverbs_xrcd_idr, &obj->uobject);
+
+err_idr:
+ ib_dealloc_xrcd(xrcd);
+
+err:
+ put_uobj_write(&obj->uobject);
+
+err_tree_mutex_unlock:
+ if (f)
+ fput(f);
+
+ mutex_unlock(&file->device->xrcd_tree_mutex);
+
+ return ret;
+}
+
+ssize_t ib_uverbs_close_xrcd(struct ib_uverbs_file *file,
+ const char __user *buf, int in_len,
+ int out_len)
+{
+ struct ib_uverbs_close_xrcd cmd;
+ struct ib_uobject *uobj;
+ struct ib_xrcd *xrcd = NULL;
+ struct inode *inode = NULL;
+ struct ib_uxrcd_object *obj;
+ int live;
+ int ret = 0;
+
+ if (copy_from_user(&cmd, buf, sizeof cmd))
+ return -EFAULT;
+
+ mutex_lock(&file->device->xrcd_tree_mutex);
+ uobj = idr_write_uobj(&ib_uverbs_xrcd_idr, cmd.xrcd_handle, file->ucontext);
+ if (!uobj) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ xrcd = uobj->object;
+ inode = xrcd->inode;
+ obj = container_of(uobj, struct ib_uxrcd_object, uobject);
+ if (atomic_read(&obj->refcnt)) {
+ put_uobj_write(uobj);
+ ret = -EBUSY;
+ goto out;
+ }
+
+ if (!inode || atomic_dec_and_test(&xrcd->usecnt)) {
+ ret = ib_dealloc_xrcd(uobj->object);
+ if (!ret)
+ uobj->live = 0;
+ }
+
+ live = uobj->live;
+ if (inode && ret)
+ atomic_inc(&xrcd->usecnt);
+
+ put_uobj_write(uobj);
+
+ if (ret)
+ goto out;
+
+ if (inode && !live)
+ xrcd_table_delete(file->device, inode);
+
+ idr_remove_uobj(&ib_uverbs_xrcd_idr, uobj);
+ mutex_lock(&file->mutex);
+ list_del(&uobj->list);
+ mutex_unlock(&file->mutex);
+
+ put_uobj(uobj);
+ ret = in_len;
+
+out:
+ mutex_unlock(&file->device->xrcd_tree_mutex);
+ return ret;
+}
+
+void ib_uverbs_dealloc_xrcd(struct ib_uverbs_device *dev,
+ struct ib_xrcd *xrcd)
+{
+ struct inode *inode;
+
+ inode = xrcd->inode;
+ if (inode && !atomic_dec_and_test(&xrcd->usecnt))
+ return;
+
+ ib_dealloc_xrcd(xrcd);
+
+ if (inode)
+ xrcd_table_delete(dev, inode);
+}
+
ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
const char __user *buf, int in_len,
int out_len)
@@ -1052,9 +1370,12 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
struct ib_uverbs_create_qp_resp resp;
struct ib_udata udata;
struct ib_uqp_object *obj;
- struct ib_pd *pd;
- struct ib_cq *scq, *rcq;
- struct ib_srq *srq;
+ struct ib_device *device;
+ struct ib_pd *pd = NULL;
+ struct ib_xrcd *xrcd = NULL;
+ struct ib_uobject *uninitialized_var(xrcd_uobj);
+ struct ib_cq *scq = NULL, *rcq = NULL;
+ struct ib_srq *srq = NULL;
struct ib_qp *qp;
struct ib_qp_init_attr attr;
int ret;
@@ -1076,15 +1397,39 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_key);
down_write(&obj->uevent.uobject.mutex);
- srq = cmd.is_srq ? idr_read_srq(cmd.srq_handle, file->ucontext) : NULL;
- pd = idr_read_pd(cmd.pd_handle, file->ucontext);
- scq = idr_read_cq(cmd.send_cq_handle, file->ucontext, 0);
- rcq = cmd.recv_cq_handle == cmd.send_cq_handle ?
- scq : idr_read_cq(cmd.recv_cq_handle, file->ucontext, 1);
+ if (cmd.qp_type == IB_QPT_XRC_TGT) {
+ xrcd = idr_read_xrcd(cmd.pd_handle, file->ucontext, &xrcd_uobj);
+ if (!xrcd) {
+ ret = -EINVAL;
+ goto err_put;
+ }
+ device = xrcd->device;
+ } else {
+ pd = idr_read_pd(cmd.pd_handle, file->ucontext);
+ scq = idr_read_cq(cmd.send_cq_handle, file->ucontext, 0);
+ if (!pd || !scq) {
+ ret = -EINVAL;
+ goto err_put;
+ }
- if (!pd || !scq || !rcq || (cmd.is_srq && !srq)) {
- ret = -EINVAL;
- goto err_put;
+ if (cmd.qp_type == IB_QPT_XRC_INI) {
+ cmd.max_recv_wr = cmd.max_recv_sge = 0;
+ } else {
+ if (cmd.is_srq) {
+ srq = idr_read_srq(cmd.srq_handle, file->ucontext);
+ if (!srq || srq->srq_type != IB_SRQT_BASIC) {
+ ret = -EINVAL;
+ goto err_put;
+ }
+ }
+ rcq = (cmd.recv_cq_handle == cmd.send_cq_handle) ?
+ scq : idr_read_cq(cmd.recv_cq_handle, file->ucontext, 1);
+ if (!rcq) {
+ ret = -EINVAL;
+ goto err_put;
+ }
+ }
+ device = pd->device;
}
attr.event_handler = ib_uverbs_qp_event_handler;
@@ -1092,6 +1437,7 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
attr.send_cq = scq;
attr.recv_cq = rcq;
attr.srq = srq;
+ attr.xrcd = xrcd;
attr.sq_sig_type = cmd.sq_sig_all ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
attr.qp_type = cmd.qp_type;
attr.create_flags = 0;
@@ -1106,26 +1452,34 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
INIT_LIST_HEAD(&obj->uevent.event_list);
INIT_LIST_HEAD(&obj->mcast_list);
- qp = pd->device->create_qp(pd, &attr, &udata);
+ if (cmd.qp_type == IB_QPT_XRC_TGT)
+ qp = ib_create_qp(pd, &attr);
+ else
+ qp = device->create_qp(pd, &attr, &udata);
+
if (IS_ERR(qp)) {
ret = PTR_ERR(qp);
goto err_put;
}
- qp->device = pd->device;
- qp->pd = pd;
- qp->send_cq = attr.send_cq;
- qp->recv_cq = attr.recv_cq;
- qp->srq = attr.srq;
- qp->uobject = &obj->uevent.uobject;
- qp->event_handler = attr.event_handler;
- qp->qp_context = attr.qp_context;
- qp->qp_type = attr.qp_type;
- atomic_inc(&pd->usecnt);
- atomic_inc(&attr.send_cq->usecnt);
- atomic_inc(&attr.recv_cq->usecnt);
- if (attr.srq)
- atomic_inc(&attr.srq->usecnt);
+ if (cmd.qp_type != IB_QPT_XRC_TGT) {
+ qp->real_qp = qp;
+ qp->device = device;
+ qp->pd = pd;
+ qp->send_cq = attr.send_cq;
+ qp->recv_cq = attr.recv_cq;
+ qp->srq = attr.srq;
+ qp->event_handler = attr.event_handler;
+ qp->qp_context = attr.qp_context;
+ qp->qp_type = attr.qp_type;
+ atomic_inc(&pd->usecnt);
+ atomic_inc(&attr.send_cq->usecnt);
+ if (attr.recv_cq)
+ atomic_inc(&attr.recv_cq->usecnt);
+ if (attr.srq)
+ atomic_inc(&attr.srq->usecnt);
+ }
+ qp->uobject = &obj->uevent.uobject;
obj->uevent.uobject.object = qp;
ret = idr_add_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
@@ -1147,9 +1501,13 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
goto err_copy;
}
- put_pd_read(pd);
- put_cq_read(scq);
- if (rcq != scq)
+ if (xrcd)
+ put_xrcd_read(xrcd_uobj);
+ if (pd)
+ put_pd_read(pd);
+ if (scq)
+ put_cq_read(scq);
+ if (rcq && rcq != scq)
put_cq_read(rcq);
if (srq)
put_srq_read(srq);
@@ -1171,6 +1529,8 @@ err_destroy:
ib_destroy_qp(qp);
err_put:
+ if (xrcd)
+ put_xrcd_read(xrcd_uobj);
if (pd)
put_pd_read(pd);
if (scq)
@@ -1184,6 +1544,98 @@ err_put:
return ret;
}
+ssize_t ib_uverbs_open_qp(struct ib_uverbs_file *file,
+ const char __user *buf, int in_len, int out_len)
+{
+ struct ib_uverbs_open_qp cmd;
+ struct ib_uverbs_create_qp_resp resp;
+ struct ib_udata udata;
+ struct ib_uqp_object *obj;
+ struct ib_xrcd *xrcd;
+ struct ib_uobject *uninitialized_var(xrcd_uobj);
+ struct ib_qp *qp;
+ struct ib_qp_open_attr attr;
+ int ret;
+
+ if (out_len < sizeof resp)
+ return -ENOSPC;
+
+ if (copy_from_user(&cmd, buf, sizeof cmd))
+ return -EFAULT;
+
+ INIT_UDATA(&udata, buf + sizeof cmd,
+ (unsigned long) cmd.response + sizeof resp,
+ in_len - sizeof cmd, out_len - sizeof resp);
+
+ obj = kmalloc(sizeof *obj, GFP_KERNEL);
+ if (!obj)
+ return -ENOMEM;
+
+ init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_key);
+ down_write(&obj->uevent.uobject.mutex);
+
+ xrcd = idr_read_xrcd(cmd.pd_handle, file->ucontext, &xrcd_uobj);
+ if (!xrcd) {
+ ret = -EINVAL;
+ goto err_put;
+ }
+
+ attr.event_handler = ib_uverbs_qp_event_handler;
+ attr.qp_context = file;
+ attr.qp_num = cmd.qpn;
+ attr.qp_type = cmd.qp_type;
+
+ obj->uevent.events_reported = 0;
+ INIT_LIST_HEAD(&obj->uevent.event_list);
+ INIT_LIST_HEAD(&obj->mcast_list);
+
+ qp = ib_open_qp(xrcd, &attr);
+ if (IS_ERR(qp)) {
+ ret = PTR_ERR(qp);
+ goto err_put;
+ }
+
+ qp->uobject = &obj->uevent.uobject;
+
+ obj->uevent.uobject.object = qp;
+ ret = idr_add_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
+ if (ret)
+ goto err_destroy;
+
+ memset(&resp, 0, sizeof resp);
+ resp.qpn = qp->qp_num;
+ resp.qp_handle = obj->uevent.uobject.id;
+
+ if (copy_to_user((void __user *) (unsigned long) cmd.response,
+ &resp, sizeof resp)) {
+ ret = -EFAULT;
+ goto err_remove;
+ }
+
+ put_xrcd_read(xrcd_uobj);
+
+ mutex_lock(&file->mutex);
+ list_add_tail(&obj->uevent.uobject.list, &file->ucontext->qp_list);
+ mutex_unlock(&file->mutex);
+
+ obj->uevent.uobject.live = 1;
+
+ up_write(&obj->uevent.uobject.mutex);
+
+ return in_len;
+
+err_remove:
+ idr_remove_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
+
+err_destroy:
+ ib_destroy_qp(qp);
+
+err_put:
+ put_xrcd_read(xrcd_uobj);
+ put_uobj_write(&obj->uevent.uobject);
+ return ret;
+}
+
ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file,
const char __user *buf, int in_len,
int out_len)
@@ -1284,6 +1736,20 @@ out:
return ret ? ret : in_len;
}
+/* Remove ignored fields set in the attribute mask */
+static int modify_qp_mask(enum ib_qp_type qp_type, int mask)
+{
+ switch (qp_type) {
+ case IB_QPT_XRC_INI:
+ return mask & ~(IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER);
+ case IB_QPT_XRC_TGT:
+ return mask & ~(IB_QP_MAX_QP_RD_ATOMIC | IB_QP_RETRY_CNT |
+ IB_QP_RNR_RETRY);
+ default:
+ return mask;
+ }
+}
+
ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
const char __user *buf, int in_len,
int out_len)
@@ -1356,7 +1822,12 @@ ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
attr->alt_ah_attr.ah_flags = cmd.alt_dest.is_global ? IB_AH_GRH : 0;
attr->alt_ah_attr.port_num = cmd.alt_dest.port_num;
- ret = qp->device->modify_qp(qp, attr, cmd.attr_mask, &udata);
+ if (qp->real_qp == qp) {
+ ret = qp->device->modify_qp(qp, attr,
+ modify_qp_mask(qp->qp_type, cmd.attr_mask), &udata);
+ } else {
+ ret = ib_modify_qp(qp, attr, modify_qp_mask(qp->qp_type, cmd.attr_mask));
+ }
put_qp_read(qp);
@@ -1553,7 +2024,7 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
}
resp.bad_wr = 0;
- ret = qp->device->post_send(qp, wr, &bad_wr);
+ ret = qp->device->post_send(qp->real_qp, wr, &bad_wr);
if (ret)
for (next = wr; next; next = next->next) {
++resp.bad_wr;
@@ -1691,7 +2162,7 @@ ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file,
goto out;
resp.bad_wr = 0;
- ret = qp->device->post_recv(qp, wr, &bad_wr);
+ ret = qp->device->post_recv(qp->real_qp, wr, &bad_wr);
put_qp_read(qp);
@@ -1975,107 +2446,199 @@ out_put:
return ret ? ret : in_len;
}
-ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file,
- const char __user *buf, int in_len,
- int out_len)
+int __uverbs_create_xsrq(struct ib_uverbs_file *file,
+ struct ib_uverbs_create_xsrq *cmd,
+ struct ib_udata *udata)
{
- struct ib_uverbs_create_srq cmd;
struct ib_uverbs_create_srq_resp resp;
- struct ib_udata udata;
- struct ib_uevent_object *obj;
+ struct ib_usrq_object *obj;
struct ib_pd *pd;
struct ib_srq *srq;
+ struct ib_uobject *uninitialized_var(xrcd_uobj);
struct ib_srq_init_attr attr;
int ret;
- if (out_len < sizeof resp)
- return -ENOSPC;
-
- if (copy_from_user(&cmd, buf, sizeof cmd))
- return -EFAULT;
-
- INIT_UDATA(&udata, buf + sizeof cmd,
- (unsigned long) cmd.response + sizeof resp,
- in_len - sizeof cmd, out_len - sizeof resp);
-
obj = kmalloc(sizeof *obj, GFP_KERNEL);
if (!obj)
return -ENOMEM;
- init_uobj(&obj->uobject, cmd.user_handle, file->ucontext, &srq_lock_key);
- down_write(&obj->uobject.mutex);
+ init_uobj(&obj->uevent.uobject, cmd->user_handle, file->ucontext, &srq_lock_key);
+ down_write(&obj->uevent.uobject.mutex);
- pd = idr_read_pd(cmd.pd_handle, file->ucontext);
+ pd = idr_read_pd(cmd->pd_handle, file->ucontext);
if (!pd) {
ret = -EINVAL;
goto err;
}
+ if (cmd->srq_type == IB_SRQT_XRC) {
+ attr.ext.xrc.cq = idr_read_cq(cmd->cq_handle, file->ucontext, 0);
+ if (!attr.ext.xrc.cq) {
+ ret = -EINVAL;
+ goto err_put_pd;
+ }
+
+ attr.ext.xrc.xrcd = idr_read_xrcd(cmd->xrcd_handle, file->ucontext, &xrcd_uobj);
+ if (!attr.ext.xrc.xrcd) {
+ ret = -EINVAL;
+ goto err_put_cq;
+ }
+
+ obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject);
+ atomic_inc(&obj->uxrcd->refcnt);
+ }
+
attr.event_handler = ib_uverbs_srq_event_handler;
attr.srq_context = file;
- attr.attr.max_wr = cmd.max_wr;
- attr.attr.max_sge = cmd.max_sge;
- attr.attr.srq_limit = cmd.srq_limit;
+ attr.srq_type = cmd->srq_type;
+ attr.attr.max_wr = cmd->max_wr;
+ attr.attr.max_sge = cmd->max_sge;
+ attr.attr.srq_limit = cmd->srq_limit;
- obj->events_reported = 0;
- INIT_LIST_HEAD(&obj->event_list);
+ obj->uevent.events_reported = 0;
+ INIT_LIST_HEAD(&obj->uevent.event_list);
- srq = pd->device->create_srq(pd, &attr, &udata);
+ srq = pd->device->create_srq(pd, &attr, udata);
if (IS_ERR(srq)) {
ret = PTR_ERR(srq);
goto err_put;
}
- srq->device = pd->device;
- srq->pd = pd;
- srq->uobject = &obj->uobject;
+ srq->device = pd->device;
+ srq->pd = pd;
+ srq->srq_type = cmd->srq_type;
+ srq->uobject = &obj->uevent.uobject;
srq->event_handler = attr.event_handler;
srq->srq_context = attr.srq_context;
+
+ if (cmd->srq_type == IB_SRQT_XRC) {
+ srq->ext.xrc.cq = attr.ext.xrc.cq;
+ srq->ext.xrc.xrcd = attr.ext.xrc.xrcd;
+ atomic_inc(&attr.ext.xrc.cq->usecnt);
+ atomic_inc(&attr.ext.xrc.xrcd->usecnt);
+ }
+
atomic_inc(&pd->usecnt);
atomic_set(&srq->usecnt, 0);
- obj->uobject.object = srq;
- ret = idr_add_uobj(&ib_uverbs_srq_idr, &obj->uobject);
+ obj->uevent.uobject.object = srq;
+ ret = idr_add_uobj(&ib_uverbs_srq_idr, &obj->uevent.uobject);
if (ret)
goto err_destroy;
memset(&resp, 0, sizeof resp);
- resp.srq_handle = obj->uobject.id;
+ resp.srq_handle = obj->uevent.uobject.id;
resp.max_wr = attr.attr.max_wr;
resp.max_sge = attr.attr.max_sge;
+ if (cmd->srq_type == IB_SRQT_XRC)
+ resp.srqn = srq->ext.xrc.srq_num;
- if (copy_to_user((void __user *) (unsigned long) cmd.response,
+ if (copy_to_user((void __user *) (unsigned long) cmd->response,
&resp, sizeof resp)) {
ret = -EFAULT;
goto err_copy;
}
+ if (cmd->srq_type == IB_SRQT_XRC) {
+ put_uobj_read(xrcd_uobj);
+ put_cq_read(attr.ext.xrc.cq);
+ }
put_pd_read(pd);
mutex_lock(&file->mutex);
- list_add_tail(&obj->uobject.list, &file->ucontext->srq_list);
+ list_add_tail(&obj->uevent.uobject.list, &file->ucontext->srq_list);
mutex_unlock(&file->mutex);
- obj->uobject.live = 1;
+ obj->uevent.uobject.live = 1;
- up_write(&obj->uobject.mutex);
+ up_write(&obj->uevent.uobject.mutex);
- return in_len;
+ return 0;
err_copy:
- idr_remove_uobj(&ib_uverbs_srq_idr, &obj->uobject);
+ idr_remove_uobj(&ib_uverbs_srq_idr, &obj->uevent.uobject);
err_destroy:
ib_destroy_srq(srq);
err_put:
+ if (cmd->srq_type == IB_SRQT_XRC) {
+ atomic_dec(&obj->uxrcd->refcnt);
+ put_uobj_read(xrcd_uobj);
+ }
+
+err_put_cq:
+ if (cmd->srq_type == IB_SRQT_XRC)
+ put_cq_read(attr.ext.xrc.cq);
+
+err_put_pd:
put_pd_read(pd);
err:
- put_uobj_write(&obj->uobject);
+ put_uobj_write(&obj->uevent.uobject);
return ret;
}
+ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file,
+ const char __user *buf, int in_len,
+ int out_len)
+{
+ struct ib_uverbs_create_srq cmd;
+ struct ib_uverbs_create_xsrq xcmd;
+ struct ib_uverbs_create_srq_resp resp;
+ struct ib_udata udata;
+ int ret;
+
+ if (out_len < sizeof resp)
+ return -ENOSPC;
+
+ if (copy_from_user(&cmd, buf, sizeof cmd))
+ return -EFAULT;
+
+ xcmd.response = cmd.response;
+ xcmd.user_handle = cmd.user_handle;
+ xcmd.srq_type = IB_SRQT_BASIC;
+ xcmd.pd_handle = cmd.pd_handle;
+ xcmd.max_wr = cmd.max_wr;
+ xcmd.max_sge = cmd.max_sge;
+ xcmd.srq_limit = cmd.srq_limit;
+
+ INIT_UDATA(&udata, buf + sizeof cmd,
+ (unsigned long) cmd.response + sizeof resp,
+ in_len - sizeof cmd, out_len - sizeof resp);
+
+ ret = __uverbs_create_xsrq(file, &xcmd, &udata);
+ if (ret)
+ return ret;
+
+ return in_len;
+}
+
+ssize_t ib_uverbs_create_xsrq(struct ib_uverbs_file *file,
+ const char __user *buf, int in_len, int out_len)
+{
+ struct ib_uverbs_create_xsrq cmd;
+ struct ib_uverbs_create_srq_resp resp;
+ struct ib_udata udata;
+ int ret;
+
+ if (out_len < sizeof resp)
+ return -ENOSPC;
+
+ if (copy_from_user(&cmd, buf, sizeof cmd))
+ return -EFAULT;
+
+ INIT_UDATA(&udata, buf + sizeof cmd,
+ (unsigned long) cmd.response + sizeof resp,
+ in_len - sizeof cmd, out_len - sizeof resp);
+
+ ret = __uverbs_create_xsrq(file, &cmd, &udata);
+ if (ret)
+ return ret;
+
+ return in_len;
+}
+
ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file,
const char __user *buf, int in_len,
int out_len)
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 56898b6578a..87963674637 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -72,6 +72,7 @@ DEFINE_IDR(ib_uverbs_ah_idr);
DEFINE_IDR(ib_uverbs_cq_idr);
DEFINE_IDR(ib_uverbs_qp_idr);
DEFINE_IDR(ib_uverbs_srq_idr);
+DEFINE_IDR(ib_uverbs_xrcd_idr);
static DEFINE_SPINLOCK(map_lock);
static DECLARE_BITMAP(dev_map, IB_UVERBS_MAX_DEVICES);
@@ -107,6 +108,10 @@ static ssize_t (*uverbs_cmd_table[])(struct ib_uverbs_file *file,
[IB_USER_VERBS_CMD_MODIFY_SRQ] = ib_uverbs_modify_srq,
[IB_USER_VERBS_CMD_QUERY_SRQ] = ib_uverbs_query_srq,
[IB_USER_VERBS_CMD_DESTROY_SRQ] = ib_uverbs_destroy_srq,
+ [IB_USER_VERBS_CMD_OPEN_XRCD] = ib_uverbs_open_xrcd,
+ [IB_USER_VERBS_CMD_CLOSE_XRCD] = ib_uverbs_close_xrcd,
+ [IB_USER_VERBS_CMD_CREATE_XSRQ] = ib_uverbs_create_xsrq,
+ [IB_USER_VERBS_CMD_OPEN_QP] = ib_uverbs_open_qp
};
static void ib_uverbs_add_one(struct ib_device *device);
@@ -202,8 +207,12 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file,
container_of(uobj, struct ib_uqp_object, uevent.uobject);
idr_remove_uobj(&ib_uverbs_qp_idr, uobj);
- ib_uverbs_detach_umcast(qp, uqp);
- ib_destroy_qp(qp);
+ if (qp != qp->real_qp) {
+ ib_close_qp(qp);
+ } else {
+ ib_uverbs_detach_umcast(qp, uqp);
+ ib_destroy_qp(qp);
+ }
ib_uverbs_release_uevent(file, &uqp->uevent);
kfree(uqp);
}
@@ -241,6 +250,18 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file,
kfree(uobj);
}
+ mutex_lock(&file->device->xrcd_tree_mutex);
+ list_for_each_entry_safe(uobj, tmp, &context->xrcd_list, list) {
+ struct ib_xrcd *xrcd = uobj->object;
+ struct ib_uxrcd_object *uxrcd =
+ container_of(uobj, struct ib_uxrcd_object, uobject);
+
+ idr_remove_uobj(&ib_uverbs_xrcd_idr, uobj);
+ ib_uverbs_dealloc_xrcd(file->device, xrcd);
+ kfree(uxrcd);
+ }
+ mutex_unlock(&file->device->xrcd_tree_mutex);
+
list_for_each_entry_safe(uobj, tmp, &context->pd_list, list) {
struct ib_pd *pd = uobj->object;
@@ -557,8 +578,7 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
if (hdr.in_words * 4 != count)
return -EINVAL;
- if (hdr.command < 0 ||
- hdr.command >= ARRAY_SIZE(uverbs_cmd_table) ||
+ if (hdr.command >= ARRAY_SIZE(uverbs_cmd_table) ||
!uverbs_cmd_table[hdr.command])
return -EINVAL;
@@ -741,6 +761,8 @@ static void ib_uverbs_add_one(struct ib_device *device)
kref_init(&uverbs_dev->ref);
init_completion(&uverbs_dev->comp);
+ uverbs_dev->xrcd_tree = RB_ROOT;
+ mutex_init(&uverbs_dev->xrcd_tree_mutex);
spin_lock(&map_lock);
devnum = find_first_zero_bit(dev_map, IB_UVERBS_MAX_DEVICES);
diff --git a/drivers/infiniband/core/uverbs_marshall.c b/drivers/infiniband/core/uverbs_marshall.c
index 1b1146f8712..e7bee46868d 100644
--- a/drivers/infiniband/core/uverbs_marshall.c
+++ b/drivers/infiniband/core/uverbs_marshall.c
@@ -30,6 +30,7 @@
* SOFTWARE.
*/
+#include <linux/export.h>
#include <rdma/ib_marshall.h>
void ib_copy_ah_attr_to_user(struct ib_uverbs_ah_attr *dst,
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index af7a8b08b2e..602b1bd723a 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -38,7 +38,9 @@
#include <linux/errno.h>
#include <linux/err.h>
+#include <linux/export.h>
#include <linux/string.h>
+#include <linux/slab.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_cache.h>
@@ -77,6 +79,31 @@ enum ib_rate mult_to_ib_rate(int mult)
}
EXPORT_SYMBOL(mult_to_ib_rate);
+int ib_rate_to_mbps(enum ib_rate rate)
+{
+ switch (rate) {
+ case IB_RATE_2_5_GBPS: return 2500;
+ case IB_RATE_5_GBPS: return 5000;
+ case IB_RATE_10_GBPS: return 10000;
+ case IB_RATE_20_GBPS: return 20000;
+ case IB_RATE_30_GBPS: return 30000;
+ case IB_RATE_40_GBPS: return 40000;
+ case IB_RATE_60_GBPS: return 60000;
+ case IB_RATE_80_GBPS: return 80000;
+ case IB_RATE_120_GBPS: return 120000;
+ case IB_RATE_14_GBPS: return 14062;
+ case IB_RATE_56_GBPS: return 56250;
+ case IB_RATE_112_GBPS: return 112500;
+ case IB_RATE_168_GBPS: return 168750;
+ case IB_RATE_25_GBPS: return 25781;
+ case IB_RATE_100_GBPS: return 103125;
+ case IB_RATE_200_GBPS: return 206250;
+ case IB_RATE_300_GBPS: return 309375;
+ default: return -1;
+ }
+}
+EXPORT_SYMBOL(ib_rate_to_mbps);
+
enum rdma_transport_type
rdma_node_get_transport(enum rdma_node_type node_type)
{
@@ -250,6 +277,13 @@ struct ib_srq *ib_create_srq(struct ib_pd *pd,
srq->uobject = NULL;
srq->event_handler = srq_init_attr->event_handler;
srq->srq_context = srq_init_attr->srq_context;
+ srq->srq_type = srq_init_attr->srq_type;
+ if (srq->srq_type == IB_SRQT_XRC) {
+ srq->ext.xrc.xrcd = srq_init_attr->ext.xrc.xrcd;
+ srq->ext.xrc.cq = srq_init_attr->ext.xrc.cq;
+ atomic_inc(&srq->ext.xrc.xrcd->usecnt);
+ atomic_inc(&srq->ext.xrc.cq->usecnt);
+ }
atomic_inc(&pd->usecnt);
atomic_set(&srq->usecnt, 0);
}
@@ -279,16 +313,29 @@ EXPORT_SYMBOL(ib_query_srq);
int ib_destroy_srq(struct ib_srq *srq)
{
struct ib_pd *pd;
+ enum ib_srq_type srq_type;
+ struct ib_xrcd *uninitialized_var(xrcd);
+ struct ib_cq *uninitialized_var(cq);
int ret;
if (atomic_read(&srq->usecnt))
return -EBUSY;
pd = srq->pd;
+ srq_type = srq->srq_type;
+ if (srq_type == IB_SRQT_XRC) {
+ xrcd = srq->ext.xrc.xrcd;
+ cq = srq->ext.xrc.cq;
+ }
ret = srq->device->destroy_srq(srq);
- if (!ret)
+ if (!ret) {
atomic_dec(&pd->usecnt);
+ if (srq_type == IB_SRQT_XRC) {
+ atomic_dec(&xrcd->usecnt);
+ atomic_dec(&cq->usecnt);
+ }
+ }
return ret;
}
@@ -296,28 +343,123 @@ EXPORT_SYMBOL(ib_destroy_srq);
/* Queue pairs */
+static void __ib_shared_qp_event_handler(struct ib_event *event, void *context)
+{
+ struct ib_qp *qp = context;
+
+ list_for_each_entry(event->element.qp, &qp->open_list, open_list)
+ event->element.qp->event_handler(event, event->element.qp->qp_context);
+}
+
+static void __ib_insert_xrcd_qp(struct ib_xrcd *xrcd, struct ib_qp *qp)
+{
+ mutex_lock(&xrcd->tgt_qp_mutex);
+ list_add(&qp->xrcd_list, &xrcd->tgt_qp_list);
+ mutex_unlock(&xrcd->tgt_qp_mutex);
+}
+
+static struct ib_qp *__ib_open_qp(struct ib_qp *real_qp,
+ void (*event_handler)(struct ib_event *, void *),
+ void *qp_context)
+{
+ struct ib_qp *qp;
+ unsigned long flags;
+
+ qp = kzalloc(sizeof *qp, GFP_KERNEL);
+ if (!qp)
+ return ERR_PTR(-ENOMEM);
+
+ qp->real_qp = real_qp;
+ atomic_inc(&real_qp->usecnt);
+ qp->device = real_qp->device;
+ qp->event_handler = event_handler;
+ qp->qp_context = qp_context;
+ qp->qp_num = real_qp->qp_num;
+ qp->qp_type = real_qp->qp_type;
+
+ spin_lock_irqsave(&real_qp->device->event_handler_lock, flags);
+ list_add(&qp->open_list, &real_qp->open_list);
+ spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags);
+
+ return qp;
+}
+
+struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
+ struct ib_qp_open_attr *qp_open_attr)
+{
+ struct ib_qp *qp, *real_qp;
+
+ if (qp_open_attr->qp_type != IB_QPT_XRC_TGT)
+ return ERR_PTR(-EINVAL);
+
+ qp = ERR_PTR(-EINVAL);
+ mutex_lock(&xrcd->tgt_qp_mutex);
+ list_for_each_entry(real_qp, &xrcd->tgt_qp_list, xrcd_list) {
+ if (real_qp->qp_num == qp_open_attr->qp_num) {
+ qp = __ib_open_qp(real_qp, qp_open_attr->event_handler,
+ qp_open_attr->qp_context);
+ break;
+ }
+ }
+ mutex_unlock(&xrcd->tgt_qp_mutex);
+ return qp;
+}
+EXPORT_SYMBOL(ib_open_qp);
+
struct ib_qp *ib_create_qp(struct ib_pd *pd,
struct ib_qp_init_attr *qp_init_attr)
{
- struct ib_qp *qp;
+ struct ib_qp *qp, *real_qp;
+ struct ib_device *device;
- qp = pd->device->create_qp(pd, qp_init_attr, NULL);
+ device = pd ? pd->device : qp_init_attr->xrcd->device;
+ qp = device->create_qp(pd, qp_init_attr, NULL);
if (!IS_ERR(qp)) {
- qp->device = pd->device;
- qp->pd = pd;
- qp->send_cq = qp_init_attr->send_cq;
- qp->recv_cq = qp_init_attr->recv_cq;
- qp->srq = qp_init_attr->srq;
- qp->uobject = NULL;
- qp->event_handler = qp_init_attr->event_handler;
- qp->qp_context = qp_init_attr->qp_context;
- qp->qp_type = qp_init_attr->qp_type;
- atomic_inc(&pd->usecnt);
- atomic_inc(&qp_init_attr->send_cq->usecnt);
- atomic_inc(&qp_init_attr->recv_cq->usecnt);
- if (qp_init_attr->srq)
- atomic_inc(&qp_init_attr->srq->usecnt);
+ qp->device = device;
+ qp->real_qp = qp;
+ qp->uobject = NULL;
+ qp->qp_type = qp_init_attr->qp_type;
+
+ if (qp_init_attr->qp_type == IB_QPT_XRC_TGT) {
+ qp->event_handler = __ib_shared_qp_event_handler;
+ qp->qp_context = qp;
+ qp->pd = NULL;
+ qp->send_cq = qp->recv_cq = NULL;
+ qp->srq = NULL;
+ qp->xrcd = qp_init_attr->xrcd;
+ atomic_inc(&qp_init_attr->xrcd->usecnt);
+ INIT_LIST_HEAD(&qp->open_list);
+ atomic_set(&qp->usecnt, 0);
+
+ real_qp = qp;
+ qp = __ib_open_qp(real_qp, qp_init_attr->event_handler,
+ qp_init_attr->qp_context);
+ if (!IS_ERR(qp))
+ __ib_insert_xrcd_qp(qp_init_attr->xrcd, real_qp);
+ else
+ real_qp->device->destroy_qp(real_qp);
+ } else {
+ qp->event_handler = qp_init_attr->event_handler;
+ qp->qp_context = qp_init_attr->qp_context;
+ if (qp_init_attr->qp_type == IB_QPT_XRC_INI) {
+ qp->recv_cq = NULL;
+ qp->srq = NULL;
+ } else {
+ qp->recv_cq = qp_init_attr->recv_cq;
+ atomic_inc(&qp_init_attr->recv_cq->usecnt);
+ qp->srq = qp_init_attr->srq;
+ if (qp->srq)
+ atomic_inc(&qp_init_attr->srq->usecnt);
+ }
+
+ qp->pd = pd;
+ qp->send_cq = qp_init_attr->send_cq;
+ qp->xrcd = NULL;
+
+ atomic_inc(&pd->usecnt);
+ atomic_inc(&qp_init_attr->send_cq->usecnt);
+ }
}
return qp;
@@ -326,8 +468,8 @@ EXPORT_SYMBOL(ib_create_qp);
static const struct {
int valid;
- enum ib_qp_attr_mask req_param[IB_QPT_RAW_ETHERTYPE + 1];
- enum ib_qp_attr_mask opt_param[IB_QPT_RAW_ETHERTYPE + 1];
+ enum ib_qp_attr_mask req_param[IB_QPT_MAX];
+ enum ib_qp_attr_mask opt_param[IB_QPT_MAX];
} qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
[IB_QPS_RESET] = {
[IB_QPS_RESET] = { .valid = 1 },
@@ -343,6 +485,12 @@ static const struct {
[IB_QPT_RC] = (IB_QP_PKEY_INDEX |
IB_QP_PORT |
IB_QP_ACCESS_FLAGS),
+ [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX |
+ IB_QP_PORT |
+ IB_QP_ACCESS_FLAGS),
+ [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX |
+ IB_QP_PORT |
+ IB_QP_ACCESS_FLAGS),
[IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
IB_QP_QKEY),
[IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
@@ -365,6 +513,12 @@ static const struct {
[IB_QPT_RC] = (IB_QP_PKEY_INDEX |
IB_QP_PORT |
IB_QP_ACCESS_FLAGS),
+ [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX |
+ IB_QP_PORT |
+ IB_QP_ACCESS_FLAGS),
+ [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX |
+ IB_QP_PORT |
+ IB_QP_ACCESS_FLAGS),
[IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
IB_QP_QKEY),
[IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
@@ -384,6 +538,16 @@ static const struct {
IB_QP_RQ_PSN |
IB_QP_MAX_DEST_RD_ATOMIC |
IB_QP_MIN_RNR_TIMER),
+ [IB_QPT_XRC_INI] = (IB_QP_AV |
+ IB_QP_PATH_MTU |
+ IB_QP_DEST_QPN |
+ IB_QP_RQ_PSN),
+ [IB_QPT_XRC_TGT] = (IB_QP_AV |
+ IB_QP_PATH_MTU |
+ IB_QP_DEST_QPN |
+ IB_QP_RQ_PSN |
+ IB_QP_MAX_DEST_RD_ATOMIC |
+ IB_QP_MIN_RNR_TIMER),
},
.opt_param = {
[IB_QPT_UD] = (IB_QP_PKEY_INDEX |
@@ -394,6 +558,12 @@ static const struct {
[IB_QPT_RC] = (IB_QP_ALT_PATH |
IB_QP_ACCESS_FLAGS |
IB_QP_PKEY_INDEX),
+ [IB_QPT_XRC_INI] = (IB_QP_ALT_PATH |
+ IB_QP_ACCESS_FLAGS |
+ IB_QP_PKEY_INDEX),
+ [IB_QPT_XRC_TGT] = (IB_QP_ALT_PATH |
+ IB_QP_ACCESS_FLAGS |
+ IB_QP_PKEY_INDEX),
[IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
IB_QP_QKEY),
[IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
@@ -414,6 +584,13 @@ static const struct {
IB_QP_RNR_RETRY |
IB_QP_SQ_PSN |
IB_QP_MAX_QP_RD_ATOMIC),
+ [IB_QPT_XRC_INI] = (IB_QP_TIMEOUT |
+ IB_QP_RETRY_CNT |
+ IB_QP_RNR_RETRY |
+ IB_QP_SQ_PSN |
+ IB_QP_MAX_QP_RD_ATOMIC),
+ [IB_QPT_XRC_TGT] = (IB_QP_TIMEOUT |
+ IB_QP_SQ_PSN),
[IB_QPT_SMI] = IB_QP_SQ_PSN,
[IB_QPT_GSI] = IB_QP_SQ_PSN,
},
@@ -429,6 +606,15 @@ static const struct {
IB_QP_ACCESS_FLAGS |
IB_QP_MIN_RNR_TIMER |
IB_QP_PATH_MIG_STATE),
+ [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE |
+ IB_QP_ALT_PATH |
+ IB_QP_ACCESS_FLAGS |
+ IB_QP_PATH_MIG_STATE),
+ [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE |
+ IB_QP_ALT_PATH |
+ IB_QP_ACCESS_FLAGS |
+ IB_QP_MIN_RNR_TIMER |
+ IB_QP_PATH_MIG_STATE),
[IB_QPT_SMI] = (IB_QP_CUR_STATE |
IB_QP_QKEY),
[IB_QPT_GSI] = (IB_QP_CUR_STATE |
@@ -453,6 +639,15 @@ static const struct {
IB_QP_ALT_PATH |
IB_QP_PATH_MIG_STATE |
IB_QP_MIN_RNR_TIMER),
+ [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE |
+ IB_QP_ACCESS_FLAGS |
+ IB_QP_ALT_PATH |
+ IB_QP_PATH_MIG_STATE),
+ [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE |
+ IB_QP_ACCESS_FLAGS |
+ IB_QP_ALT_PATH |
+ IB_QP_PATH_MIG_STATE |
+ IB_QP_MIN_RNR_TIMER),
[IB_QPT_SMI] = (IB_QP_CUR_STATE |
IB_QP_QKEY),
[IB_QPT_GSI] = (IB_QP_CUR_STATE |
@@ -465,6 +660,8 @@ static const struct {
[IB_QPT_UD] = IB_QP_EN_SQD_ASYNC_NOTIFY,
[IB_QPT_UC] = IB_QP_EN_SQD_ASYNC_NOTIFY,
[IB_QPT_RC] = IB_QP_EN_SQD_ASYNC_NOTIFY,
+ [IB_QPT_XRC_INI] = IB_QP_EN_SQD_ASYNC_NOTIFY,
+ [IB_QPT_XRC_TGT] = IB_QP_EN_SQD_ASYNC_NOTIFY, /* ??? */
[IB_QPT_SMI] = IB_QP_EN_SQD_ASYNC_NOTIFY,
[IB_QPT_GSI] = IB_QP_EN_SQD_ASYNC_NOTIFY
}
@@ -487,6 +684,15 @@ static const struct {
IB_QP_ACCESS_FLAGS |
IB_QP_MIN_RNR_TIMER |
IB_QP_PATH_MIG_STATE),
+ [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE |
+ IB_QP_ALT_PATH |
+ IB_QP_ACCESS_FLAGS |
+ IB_QP_PATH_MIG_STATE),
+ [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE |
+ IB_QP_ALT_PATH |
+ IB_QP_ACCESS_FLAGS |
+ IB_QP_MIN_RNR_TIMER |
+ IB_QP_PATH_MIG_STATE),
[IB_QPT_SMI] = (IB_QP_CUR_STATE |
IB_QP_QKEY),
[IB_QPT_GSI] = (IB_QP_CUR_STATE |
@@ -515,6 +721,25 @@ static const struct {
IB_QP_PKEY_INDEX |
IB_QP_MIN_RNR_TIMER |
IB_QP_PATH_MIG_STATE),
+ [IB_QPT_XRC_INI] = (IB_QP_PORT |
+ IB_QP_AV |
+ IB_QP_TIMEOUT |
+ IB_QP_RETRY_CNT |
+ IB_QP_RNR_RETRY |
+ IB_QP_MAX_QP_RD_ATOMIC |
+ IB_QP_ALT_PATH |
+ IB_QP_ACCESS_FLAGS |
+ IB_QP_PKEY_INDEX |
+ IB_QP_PATH_MIG_STATE),
+ [IB_QPT_XRC_TGT] = (IB_QP_PORT |
+ IB_QP_AV |
+ IB_QP_TIMEOUT |
+ IB_QP_MAX_DEST_RD_ATOMIC |
+ IB_QP_ALT_PATH |
+ IB_QP_ACCESS_FLAGS |
+ IB_QP_PKEY_INDEX |
+ IB_QP_MIN_RNR_TIMER |
+ IB_QP_PATH_MIG_STATE),
[IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
IB_QP_QKEY),
[IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
@@ -579,7 +804,7 @@ int ib_modify_qp(struct ib_qp *qp,
struct ib_qp_attr *qp_attr,
int qp_attr_mask)
{
- return qp->device->modify_qp(qp, qp_attr, qp_attr_mask, NULL);
+ return qp->device->modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL);
}
EXPORT_SYMBOL(ib_modify_qp);
@@ -589,11 +814,59 @@ int ib_query_qp(struct ib_qp *qp,
struct ib_qp_init_attr *qp_init_attr)
{
return qp->device->query_qp ?
- qp->device->query_qp(qp, qp_attr, qp_attr_mask, qp_init_attr) :
+ qp->device->query_qp(qp->real_qp, qp_attr, qp_attr_mask, qp_init_attr) :
-ENOSYS;
}
EXPORT_SYMBOL(ib_query_qp);
+int ib_close_qp(struct ib_qp *qp)
+{
+ struct ib_qp *real_qp;
+ unsigned long flags;
+
+ real_qp = qp->real_qp;
+ if (real_qp == qp)
+ return -EINVAL;
+
+ spin_lock_irqsave(&real_qp->device->event_handler_lock, flags);
+ list_del(&qp->open_list);
+ spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags);
+
+ atomic_dec(&real_qp->usecnt);
+ kfree(qp);
+
+ return 0;
+}
+EXPORT_SYMBOL(ib_close_qp);
+
+static int __ib_destroy_shared_qp(struct ib_qp *qp)
+{
+ struct ib_xrcd *xrcd;
+ struct ib_qp *real_qp;
+ int ret;
+
+ real_qp = qp->real_qp;
+ xrcd = real_qp->xrcd;
+
+ mutex_lock(&xrcd->tgt_qp_mutex);
+ ib_close_qp(qp);
+ if (atomic_read(&real_qp->usecnt) == 0)
+ list_del(&real_qp->xrcd_list);
+ else
+ real_qp = NULL;
+ mutex_unlock(&xrcd->tgt_qp_mutex);
+
+ if (real_qp) {
+ ret = ib_destroy_qp(real_qp);
+ if (!ret)
+ atomic_dec(&xrcd->usecnt);
+ else
+ __ib_insert_xrcd_qp(xrcd, real_qp);
+ }
+
+ return 0;
+}
+
int ib_destroy_qp(struct ib_qp *qp)
{
struct ib_pd *pd;
@@ -601,16 +874,25 @@ int ib_destroy_qp(struct ib_qp *qp)
struct ib_srq *srq;
int ret;
- pd = qp->pd;
- scq = qp->send_cq;
- rcq = qp->recv_cq;
- srq = qp->srq;
+ if (atomic_read(&qp->usecnt))
+ return -EBUSY;
+
+ if (qp->real_qp != qp)
+ return __ib_destroy_shared_qp(qp);
+
+ pd = qp->pd;
+ scq = qp->send_cq;
+ rcq = qp->recv_cq;
+ srq = qp->srq;
ret = qp->device->destroy_qp(qp);
if (!ret) {
- atomic_dec(&pd->usecnt);
- atomic_dec(&scq->usecnt);
- atomic_dec(&rcq->usecnt);
+ if (pd)
+ atomic_dec(&pd->usecnt);
+ if (scq)
+ atomic_dec(&scq->usecnt);
+ if (rcq)
+ atomic_dec(&rcq->usecnt);
if (srq)
atomic_dec(&srq->usecnt);
}
@@ -920,3 +1202,42 @@ int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
return qp->device->detach_mcast(qp, gid, lid);
}
EXPORT_SYMBOL(ib_detach_mcast);
+
+struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device)
+{
+ struct ib_xrcd *xrcd;
+
+ if (!device->alloc_xrcd)
+ return ERR_PTR(-ENOSYS);
+
+ xrcd = device->alloc_xrcd(device, NULL, NULL);
+ if (!IS_ERR(xrcd)) {
+ xrcd->device = device;
+ xrcd->inode = NULL;
+ atomic_set(&xrcd->usecnt, 0);
+ mutex_init(&xrcd->tgt_qp_mutex);
+ INIT_LIST_HEAD(&xrcd->tgt_qp_list);
+ }
+
+ return xrcd;
+}
+EXPORT_SYMBOL(ib_alloc_xrcd);
+
+int ib_dealloc_xrcd(struct ib_xrcd *xrcd)
+{
+ struct ib_qp *qp;
+ int ret;
+
+ if (atomic_read(&xrcd->usecnt))
+ return -EBUSY;
+
+ while (!list_empty(&xrcd->tgt_qp_list)) {
+ qp = list_entry(xrcd->tgt_qp_list.next, struct ib_qp, xrcd_list);
+ ret = ib_destroy_qp(qp);
+ if (ret)
+ return ret;
+ }
+
+ return xrcd->device->dealloc_xrcd(xrcd);
+}
+EXPORT_SYMBOL(ib_dealloc_xrcd);
diff --git a/drivers/infiniband/hw/amso1100/c2_ae.c b/drivers/infiniband/hw/amso1100/c2_ae.c
index 24f9e3a90e8..32d34e88d5c 100644
--- a/drivers/infiniband/hw/amso1100/c2_ae.c
+++ b/drivers/infiniband/hw/amso1100/c2_ae.c
@@ -288,6 +288,11 @@ void c2_ae_event(struct c2_dev *c2dev, u32 mq_index)
cm_event.private_data_len =
be32_to_cpu(req->private_data_length);
cm_event.private_data = req->private_data;
+ /*
+ * Until ird/ord negotiation via MPAv2 support is added, send
+ * max supported values
+ */
+ cm_event.ird = cm_event.ord = 128;
if (cm_id->event_handler)
cm_id->event_handler(cm_id, &cm_event);
diff --git a/drivers/infiniband/hw/amso1100/c2_intr.c b/drivers/infiniband/hw/amso1100/c2_intr.c
index 0ebe4e806b8..8951db4ae29 100644
--- a/drivers/infiniband/hw/amso1100/c2_intr.c
+++ b/drivers/infiniband/hw/amso1100/c2_intr.c
@@ -183,6 +183,11 @@ static void handle_vq(struct c2_dev *c2dev, u32 mq_index)
case IW_CM_EVENT_ESTABLISHED:
c2_set_qp_state(req->qp,
C2_QP_STATE_RTS);
+ /*
+ * Until ird/ord negotiation via MPAv2 support is added, send
+ * max supported values
+ */
+ cm_event.ird = cm_event.ord = 128;
case IW_CM_EVENT_CLOSE:
/*
diff --git a/drivers/infiniband/hw/amso1100/c2_provider.c b/drivers/infiniband/hw/amso1100/c2_provider.c
index f101bb73be6..12f923d64e4 100644
--- a/drivers/infiniband/hw/amso1100/c2_provider.c
+++ b/drivers/infiniband/hw/amso1100/c2_provider.c
@@ -753,10 +753,7 @@ static struct net_device *c2_pseudo_netdev_init(struct c2_dev *c2dev)
memcpy_fromio(netdev->dev_addr, c2dev->kva + C2_REGS_RDMA_ENADDR, 6);
/* Print out the MAC address */
- pr_debug("%s: MAC %02X:%02X:%02X:%02X:%02X:%02X\n",
- netdev->name,
- netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2],
- netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5]);
+ pr_debug("%s: MAC %pM\n", netdev->name, netdev->dev_addr);
#if 0
/* Disable network packets */
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index 6cd642aaa4d..de6d0774e60 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -753,6 +753,11 @@ static void connect_request_upcall(struct iwch_ep *ep)
event.private_data_len = ep->plen;
event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
event.provider_data = ep;
+ /*
+ * Until ird/ord negotiation via MPAv2 support is added, send max
+ * supported values
+ */
+ event.ird = event.ord = 8;
if (state_read(&ep->parent_ep->com) != DEAD) {
get_ep(&ep->com);
ep->parent_ep->com.cm_id->event_handler(
@@ -770,6 +775,11 @@ static void established_upcall(struct iwch_ep *ep)
PDBG("%s ep %p\n", __func__, ep);
memset(&event, 0, sizeof(event));
event.event = IW_CM_EVENT_ESTABLISHED;
+ /*
+ * Until ird/ord negotiation via MPAv2 support is added, send max
+ * supported values
+ */
+ event.ird = event.ord = 8;
if (ep->com.cm_id) {
PDBG("%s ep %p tid %d\n", __func__, ep, ep->hwtid);
ep->com.cm_id->event_handler(ep->com.cm_id, &event);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_ev.c b/drivers/infiniband/hw/cxgb3/iwch_ev.c
index 71e0d845da3..abcc9e76962 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_ev.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_ev.c
@@ -46,6 +46,7 @@ static void post_qp_event(struct iwch_dev *rnicp, struct iwch_cq *chp,
struct ib_event event;
struct iwch_qp_attributes attrs;
struct iwch_qp *qhp;
+ unsigned long flag;
spin_lock(&rnicp->lock);
qhp = get_qhp(rnicp, CQE_QPID(rsp_msg->cqe));
@@ -94,7 +95,9 @@ static void post_qp_event(struct iwch_dev *rnicp, struct iwch_cq *chp,
if (qhp->ibqp.event_handler)
(*qhp->ibqp.event_handler)(&event, qhp->ibqp.qp_context);
+ spin_lock_irqsave(&chp->comp_handler_lock, flag);
(*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
+ spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
if (atomic_dec_and_test(&qhp->refcnt))
wake_up(&qhp->wait);
@@ -107,6 +110,7 @@ void iwch_ev_dispatch(struct cxio_rdev *rdev_p, struct sk_buff *skb)
struct iwch_cq *chp;
struct iwch_qp *qhp;
u32 cqid = RSPQ_CQID(rsp_msg);
+ unsigned long flag;
rnicp = (struct iwch_dev *) rdev_p->ulp;
spin_lock(&rnicp->lock);
@@ -170,7 +174,9 @@ void iwch_ev_dispatch(struct cxio_rdev *rdev_p, struct sk_buff *skb)
*/
if (qhp->ep && SQ_TYPE(rsp_msg->cqe))
dst_confirm(qhp->ep->dst);
+ spin_lock_irqsave(&chp->comp_handler_lock, flag);
(*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
+ spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
break;
case TPT_ERR_STAG:
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index c7d9411f295..37c224fc3ad 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -190,6 +190,7 @@ static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, int ve
chp->rhp = rhp;
chp->ibcq.cqe = 1 << chp->cq.size_log2;
spin_lock_init(&chp->lock);
+ spin_lock_init(&chp->comp_handler_lock);
atomic_set(&chp->refcnt, 1);
init_waitqueue_head(&chp->wait);
if (insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid)) {
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.h b/drivers/infiniband/hw/cxgb3/iwch_provider.h
index 9a342c9b220..87c14b0c5ac 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.h
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.h
@@ -103,6 +103,7 @@ struct iwch_cq {
struct iwch_dev *rhp;
struct t3_cq cq;
spinlock_t lock;
+ spinlock_t comp_handler_lock;
atomic_t refcnt;
wait_queue_head_t wait;
u32 __user *user_rptr_addr;
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c
index ecd313f359a..bea5839d89e 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_qp.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c
@@ -822,8 +822,11 @@ static void __flush_qp(struct iwch_qp *qhp, struct iwch_cq *rchp,
flushed = cxio_flush_rq(&qhp->wq, &rchp->cq, count);
spin_unlock(&qhp->lock);
spin_unlock_irqrestore(&rchp->lock, *flag);
- if (flushed)
+ if (flushed) {
+ spin_lock_irqsave(&rchp->comp_handler_lock, *flag);
(*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
+ spin_unlock_irqrestore(&rchp->comp_handler_lock, *flag);
+ }
/* locking hierarchy: cq lock first, then qp lock. */
spin_lock_irqsave(&schp->lock, *flag);
@@ -833,8 +836,11 @@ static void __flush_qp(struct iwch_qp *qhp, struct iwch_cq *rchp,
flushed = cxio_flush_sq(&qhp->wq, &schp->cq, count);
spin_unlock(&qhp->lock);
spin_unlock_irqrestore(&schp->lock, *flag);
- if (flushed)
+ if (flushed) {
+ spin_lock_irqsave(&schp->comp_handler_lock, *flag);
(*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context);
+ spin_unlock_irqrestore(&schp->comp_handler_lock, *flag);
+ }
/* deref */
if (atomic_dec_and_test(&qhp->refcnt))
@@ -853,11 +859,15 @@ static void flush_qp(struct iwch_qp *qhp, unsigned long *flag)
if (qhp->ibqp.uobject) {
cxio_set_wq_in_error(&qhp->wq);
cxio_set_cq_in_error(&rchp->cq);
+ spin_lock_irqsave(&rchp->comp_handler_lock, *flag);
(*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
+ spin_unlock_irqrestore(&rchp->comp_handler_lock, *flag);
if (schp != rchp) {
cxio_set_cq_in_error(&schp->cq);
+ spin_lock_irqsave(&schp->comp_handler_lock, *flag);
(*schp->ibcq.comp_handler)(&schp->ibcq,
schp->ibcq.cq_context);
+ spin_unlock_irqrestore(&schp->comp_handler_lock, *flag);
}
return;
}
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 77f769d9227..b36cdac9c55 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -103,7 +103,8 @@ MODULE_PARM_DESC(ep_timeout_secs, "CM Endpoint operation timeout "
static int mpa_rev = 1;
module_param(mpa_rev, int, 0644);
MODULE_PARM_DESC(mpa_rev, "MPA Revision, 0 supports amso1100, "
- "1 is spec compliant. (default=1)");
+ "1 is RFC0544 spec compliant, 2 is IETF MPA Peer Connect Draft"
+ " compliant (default=1)");
static int markers_enabled;
module_param(markers_enabled, int, 0644);
@@ -497,17 +498,21 @@ static int send_connect(struct c4iw_ep *ep)
return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
}
-static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb)
+static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb,
+ u8 mpa_rev_to_use)
{
int mpalen, wrlen;
struct fw_ofld_tx_data_wr *req;
struct mpa_message *mpa;
+ struct mpa_v2_conn_params mpa_v2_params;
PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen);
BUG_ON(skb_cloned(skb));
mpalen = sizeof(*mpa) + ep->plen;
+ if (mpa_rev_to_use == 2)
+ mpalen += sizeof(struct mpa_v2_conn_params);
wrlen = roundup(mpalen + sizeof *req, 16);
skb = get_skb(skb, wrlen, GFP_KERNEL);
if (!skb) {
@@ -533,12 +538,39 @@ static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb)
mpa = (struct mpa_message *)(req + 1);
memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key));
mpa->flags = (crc_enabled ? MPA_CRC : 0) |
- (markers_enabled ? MPA_MARKERS : 0);
+ (markers_enabled ? MPA_MARKERS : 0) |
+ (mpa_rev_to_use == 2 ? MPA_ENHANCED_RDMA_CONN : 0);
mpa->private_data_size = htons(ep->plen);
- mpa->revision = mpa_rev;
+ mpa->revision = mpa_rev_to_use;
+ if (mpa_rev_to_use == 1)
+ ep->tried_with_mpa_v1 = 1;
+
+ if (mpa_rev_to_use == 2) {
+ mpa->private_data_size +=
+ htons(sizeof(struct mpa_v2_conn_params));
+ mpa_v2_params.ird = htons((u16)ep->ird);
+ mpa_v2_params.ord = htons((u16)ep->ord);
+
+ if (peer2peer) {
+ mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL);
+ if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE)
+ mpa_v2_params.ord |=
+ htons(MPA_V2_RDMA_WRITE_RTR);
+ else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ)
+ mpa_v2_params.ord |=
+ htons(MPA_V2_RDMA_READ_RTR);
+ }
+ memcpy(mpa->private_data, &mpa_v2_params,
+ sizeof(struct mpa_v2_conn_params));
- if (ep->plen)
- memcpy(mpa->private_data, ep->mpa_pkt + sizeof(*mpa), ep->plen);
+ if (ep->plen)
+ memcpy(mpa->private_data +
+ sizeof(struct mpa_v2_conn_params),
+ ep->mpa_pkt + sizeof(*mpa), ep->plen);
+ } else
+ if (ep->plen)
+ memcpy(mpa->private_data,
+ ep->mpa_pkt + sizeof(*mpa), ep->plen);
/*
* Reference the mpa skb. This ensures the data area
@@ -562,10 +594,13 @@ static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen)
struct fw_ofld_tx_data_wr *req;
struct mpa_message *mpa;
struct sk_buff *skb;
+ struct mpa_v2_conn_params mpa_v2_params;
PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen);
mpalen = sizeof(*mpa) + plen;
+ if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn)
+ mpalen += sizeof(struct mpa_v2_conn_params);
wrlen = roundup(mpalen + sizeof *req, 16);
skb = get_skb(NULL, wrlen, GFP_KERNEL);
@@ -595,8 +630,29 @@ static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen)
mpa->flags = MPA_REJECT;
mpa->revision = mpa_rev;
mpa->private_data_size = htons(plen);
- if (plen)
- memcpy(mpa->private_data, pdata, plen);
+
+ if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
+ mpa->flags |= MPA_ENHANCED_RDMA_CONN;
+ mpa->private_data_size +=
+ htons(sizeof(struct mpa_v2_conn_params));
+ mpa_v2_params.ird = htons(((u16)ep->ird) |
+ (peer2peer ? MPA_V2_PEER2PEER_MODEL :
+ 0));
+ mpa_v2_params.ord = htons(((u16)ep->ord) | (peer2peer ?
+ (p2p_type ==
+ FW_RI_INIT_P2PTYPE_RDMA_WRITE ?
+ MPA_V2_RDMA_WRITE_RTR : p2p_type ==
+ FW_RI_INIT_P2PTYPE_READ_REQ ?
+ MPA_V2_RDMA_READ_RTR : 0) : 0));
+ memcpy(mpa->private_data, &mpa_v2_params,
+ sizeof(struct mpa_v2_conn_params));
+
+ if (ep->plen)
+ memcpy(mpa->private_data +
+ sizeof(struct mpa_v2_conn_params), pdata, plen);
+ } else
+ if (plen)
+ memcpy(mpa->private_data, pdata, plen);
/*
* Reference the mpa skb again. This ensures the data area
@@ -617,10 +673,13 @@ static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen)
struct fw_ofld_tx_data_wr *req;
struct mpa_message *mpa;
struct sk_buff *skb;
+ struct mpa_v2_conn_params mpa_v2_params;
PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen);
mpalen = sizeof(*mpa) + plen;
+ if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn)
+ mpalen += sizeof(struct mpa_v2_conn_params);
wrlen = roundup(mpalen + sizeof *req, 16);
skb = get_skb(NULL, wrlen, GFP_KERNEL);
@@ -649,10 +708,36 @@ static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen)
memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) |
(markers_enabled ? MPA_MARKERS : 0);
- mpa->revision = mpa_rev;
+ mpa->revision = ep->mpa_attr.version;
mpa->private_data_size = htons(plen);
- if (plen)
- memcpy(mpa->private_data, pdata, plen);
+
+ if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
+ mpa->flags |= MPA_ENHANCED_RDMA_CONN;
+ mpa->private_data_size +=
+ htons(sizeof(struct mpa_v2_conn_params));
+ mpa_v2_params.ird = htons((u16)ep->ird);
+ mpa_v2_params.ord = htons((u16)ep->ord);
+ if (peer2peer && (ep->mpa_attr.p2p_type !=
+ FW_RI_INIT_P2PTYPE_DISABLED)) {
+ mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL);
+
+ if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE)
+ mpa_v2_params.ord |=
+ htons(MPA_V2_RDMA_WRITE_RTR);
+ else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ)
+ mpa_v2_params.ord |=
+ htons(MPA_V2_RDMA_READ_RTR);
+ }
+
+ memcpy(mpa->private_data, &mpa_v2_params,
+ sizeof(struct mpa_v2_conn_params));
+
+ if (ep->plen)
+ memcpy(mpa->private_data +
+ sizeof(struct mpa_v2_conn_params), pdata, plen);
+ } else
+ if (plen)
+ memcpy(mpa->private_data, pdata, plen);
/*
* Reference the mpa skb. This ensures the data area
@@ -695,7 +780,10 @@ static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb)
/* start MPA negotiation */
send_flowc(ep, NULL);
- send_mpa_req(ep, skb);
+ if (ep->retry_with_mpa_v1)
+ send_mpa_req(ep, skb, 1);
+ else
+ send_mpa_req(ep, skb, mpa_rev);
return 0;
}
@@ -769,8 +857,19 @@ static void connect_reply_upcall(struct c4iw_ep *ep, int status)
event.remote_addr = ep->com.remote_addr;
if ((status == 0) || (status == -ECONNREFUSED)) {
- event.private_data_len = ep->plen;
- event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
+ if (!ep->tried_with_mpa_v1) {
+ /* this means MPA_v2 is used */
+ event.private_data_len = ep->plen -
+ sizeof(struct mpa_v2_conn_params);
+ event.private_data = ep->mpa_pkt +
+ sizeof(struct mpa_message) +
+ sizeof(struct mpa_v2_conn_params);
+ } else {
+ /* this means MPA_v1 is used */
+ event.private_data_len = ep->plen;
+ event.private_data = ep->mpa_pkt +
+ sizeof(struct mpa_message);
+ }
}
PDBG("%s ep %p tid %u status %d\n", __func__, ep,
@@ -793,9 +892,22 @@ static void connect_request_upcall(struct c4iw_ep *ep)
event.event = IW_CM_EVENT_CONNECT_REQUEST;
event.local_addr = ep->com.local_addr;
event.remote_addr = ep->com.remote_addr;
- event.private_data_len = ep->plen;
- event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
event.provider_data = ep;
+ if (!ep->tried_with_mpa_v1) {
+ /* this means MPA_v2 is used */
+ event.ord = ep->ord;
+ event.ird = ep->ird;
+ event.private_data_len = ep->plen -
+ sizeof(struct mpa_v2_conn_params);
+ event.private_data = ep->mpa_pkt + sizeof(struct mpa_message) +
+ sizeof(struct mpa_v2_conn_params);
+ } else {
+ /* this means MPA_v1 is used. Send max supported */
+ event.ord = c4iw_max_read_depth;
+ event.ird = c4iw_max_read_depth;
+ event.private_data_len = ep->plen;
+ event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
+ }
if (state_read(&ep->parent_ep->com) != DEAD) {
c4iw_get_ep(&ep->com);
ep->parent_ep->com.cm_id->event_handler(
@@ -813,6 +925,8 @@ static void established_upcall(struct c4iw_ep *ep)
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
memset(&event, 0, sizeof(event));
event.event = IW_CM_EVENT_ESTABLISHED;
+ event.ird = ep->ird;
+ event.ord = ep->ord;
if (ep->com.cm_id) {
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
ep->com.cm_id->event_handler(ep->com.cm_id, &event);
@@ -848,7 +962,10 @@ static int update_rx_credits(struct c4iw_ep *ep, u32 credits)
static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
{
struct mpa_message *mpa;
+ struct mpa_v2_conn_params *mpa_v2_params;
u16 plen;
+ u16 resp_ird, resp_ord;
+ u8 rtr_mismatch = 0, insuff_ird = 0;
struct c4iw_qp_attributes attrs;
enum c4iw_qp_attr_mask mask;
int err;
@@ -888,7 +1005,9 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
mpa = (struct mpa_message *) ep->mpa_pkt;
/* Validate MPA header. */
- if (mpa->revision != mpa_rev) {
+ if (mpa->revision > mpa_rev) {
+ printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d,"
+ " Received = %d\n", __func__, mpa_rev, mpa->revision);
err = -EPROTO;
goto err;
}
@@ -938,13 +1057,66 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
ep->mpa_attr.recv_marker_enabled = markers_enabled;
ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
- ep->mpa_attr.version = mpa_rev;
- ep->mpa_attr.p2p_type = peer2peer ? p2p_type :
- FW_RI_INIT_P2PTYPE_DISABLED;
+ ep->mpa_attr.version = mpa->revision;
+ ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
+
+ if (mpa->revision == 2) {
+ ep->mpa_attr.enhanced_rdma_conn =
+ mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0;
+ if (ep->mpa_attr.enhanced_rdma_conn) {
+ mpa_v2_params = (struct mpa_v2_conn_params *)
+ (ep->mpa_pkt + sizeof(*mpa));
+ resp_ird = ntohs(mpa_v2_params->ird) &
+ MPA_V2_IRD_ORD_MASK;
+ resp_ord = ntohs(mpa_v2_params->ord) &
+ MPA_V2_IRD_ORD_MASK;
+
+ /*
+ * This is a double-check. Ideally, below checks are
+ * not required since ird/ord stuff has been taken
+ * care of in c4iw_accept_cr
+ */
+ if ((ep->ird < resp_ord) || (ep->ord > resp_ird)) {
+ err = -ENOMEM;
+ ep->ird = resp_ord;
+ ep->ord = resp_ird;
+ insuff_ird = 1;
+ }
+
+ if (ntohs(mpa_v2_params->ird) &
+ MPA_V2_PEER2PEER_MODEL) {
+ if (ntohs(mpa_v2_params->ord) &
+ MPA_V2_RDMA_WRITE_RTR)
+ ep->mpa_attr.p2p_type =
+ FW_RI_INIT_P2PTYPE_RDMA_WRITE;
+ else if (ntohs(mpa_v2_params->ord) &
+ MPA_V2_RDMA_READ_RTR)
+ ep->mpa_attr.p2p_type =
+ FW_RI_INIT_P2PTYPE_READ_REQ;
+ }
+ }
+ } else if (mpa->revision == 1)
+ if (peer2peer)
+ ep->mpa_attr.p2p_type = p2p_type;
+
PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
- "xmit_marker_enabled=%d, version=%d\n", __func__,
- ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
- ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version);
+ "xmit_marker_enabled=%d, version=%d p2p_type=%d local-p2p_type = "
+ "%d\n", __func__, ep->mpa_attr.crc_enabled,
+ ep->mpa_attr.recv_marker_enabled,
+ ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version,
+ ep->mpa_attr.p2p_type, p2p_type);
+
+ /*
+ * If responder's RTR does not match with that of initiator, assign
+ * FW_RI_INIT_P2PTYPE_DISABLED in mpa attributes so that RTR is not
+ * generated when moving QP to RTS state.
+ * A TERM message will be sent after QP has moved to RTS state
+ */
+ if ((ep->mpa_attr.version == 2) &&
+ (ep->mpa_attr.p2p_type != p2p_type)) {
+ ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
+ rtr_mismatch = 1;
+ }
attrs.mpa_attr = ep->mpa_attr;
attrs.max_ird = ep->ird;
@@ -961,6 +1133,39 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
ep->com.qp, mask, &attrs, 1);
if (err)
goto err;
+
+ /*
+ * If responder's RTR requirement did not match with what initiator
+ * supports, generate TERM message
+ */
+ if (rtr_mismatch) {
+ printk(KERN_ERR "%s: RTR mismatch, sending TERM\n", __func__);
+ attrs.layer_etype = LAYER_MPA | DDP_LLP;
+ attrs.ecode = MPA_NOMATCH_RTR;
+ attrs.next_state = C4IW_QP_STATE_TERMINATE;
+ err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
+ C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
+ err = -ENOMEM;
+ goto out;
+ }
+
+ /*
+ * Generate TERM if initiator IRD is not sufficient for responder
+ * provided ORD. Currently, we do the same behaviour even when
+ * responder provided IRD is also not sufficient as regards to
+ * initiator ORD.
+ */
+ if (insuff_ird) {
+ printk(KERN_ERR "%s: Insufficient IRD, sending TERM\n",
+ __func__);
+ attrs.layer_etype = LAYER_MPA | DDP_LLP;
+ attrs.ecode = MPA_INSUFF_IRD;
+ attrs.next_state = C4IW_QP_STATE_TERMINATE;
+ err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
+ C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
+ err = -ENOMEM;
+ goto out;
+ }
goto out;
err:
state_set(&ep->com, ABORTING);
@@ -973,6 +1178,7 @@ out:
static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
{
struct mpa_message *mpa;
+ struct mpa_v2_conn_params *mpa_v2_params;
u16 plen;
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
@@ -1013,7 +1219,9 @@ static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
/*
* Validate MPA Header.
*/
- if (mpa->revision != mpa_rev) {
+ if (mpa->revision > mpa_rev) {
+ printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d,"
+ " Received = %d\n", __func__, mpa_rev, mpa->revision);
abort_connection(ep, skb, GFP_KERNEL);
return;
}
@@ -1056,9 +1264,37 @@ static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
ep->mpa_attr.recv_marker_enabled = markers_enabled;
ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
- ep->mpa_attr.version = mpa_rev;
- ep->mpa_attr.p2p_type = peer2peer ? p2p_type :
- FW_RI_INIT_P2PTYPE_DISABLED;
+ ep->mpa_attr.version = mpa->revision;
+ if (mpa->revision == 1)
+ ep->tried_with_mpa_v1 = 1;
+ ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
+
+ if (mpa->revision == 2) {
+ ep->mpa_attr.enhanced_rdma_conn =
+ mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0;
+ if (ep->mpa_attr.enhanced_rdma_conn) {
+ mpa_v2_params = (struct mpa_v2_conn_params *)
+ (ep->mpa_pkt + sizeof(*mpa));
+ ep->ird = ntohs(mpa_v2_params->ird) &
+ MPA_V2_IRD_ORD_MASK;
+ ep->ord = ntohs(mpa_v2_params->ord) &
+ MPA_V2_IRD_ORD_MASK;
+ if (ntohs(mpa_v2_params->ird) & MPA_V2_PEER2PEER_MODEL)
+ if (peer2peer) {
+ if (ntohs(mpa_v2_params->ord) &
+ MPA_V2_RDMA_WRITE_RTR)
+ ep->mpa_attr.p2p_type =
+ FW_RI_INIT_P2PTYPE_RDMA_WRITE;
+ else if (ntohs(mpa_v2_params->ord) &
+ MPA_V2_RDMA_READ_RTR)
+ ep->mpa_attr.p2p_type =
+ FW_RI_INIT_P2PTYPE_READ_REQ;
+ }
+ }
+ } else if (mpa->revision == 1)
+ if (peer2peer)
+ ep->mpa_attr.p2p_type = p2p_type;
+
PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
"xmit_marker_enabled=%d, version=%d p2p_type=%d\n", __func__,
ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
@@ -1550,6 +1786,112 @@ static int is_neg_adv_abort(unsigned int status)
status == CPL_ERR_PERSIST_NEG_ADVICE;
}
+static int c4iw_reconnect(struct c4iw_ep *ep)
+{
+ int err = 0;
+ struct rtable *rt;
+ struct net_device *pdev;
+ struct neighbour *neigh;
+ int step;
+
+ PDBG("%s qp %p cm_id %p\n", __func__, ep->com.qp, ep->com.cm_id);
+ init_timer(&ep->timer);
+
+ /*
+ * Allocate an active TID to initiate a TCP connection.
+ */
+ ep->atid = cxgb4_alloc_atid(ep->com.dev->rdev.lldi.tids, ep);
+ if (ep->atid == -1) {
+ printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__);
+ err = -ENOMEM;
+ goto fail2;
+ }
+
+ /* find a route */
+ rt = find_route(ep->com.dev,
+ ep->com.cm_id->local_addr.sin_addr.s_addr,
+ ep->com.cm_id->remote_addr.sin_addr.s_addr,
+ ep->com.cm_id->local_addr.sin_port,
+ ep->com.cm_id->remote_addr.sin_port, 0);
+ if (!rt) {
+ printk(KERN_ERR MOD "%s - cannot find route.\n", __func__);
+ err = -EHOSTUNREACH;
+ goto fail3;
+ }
+ ep->dst = &rt->dst;
+
+ neigh = dst_get_neighbour(ep->dst);
+
+ /* get a l2t entry */
+ if (neigh->dev->flags & IFF_LOOPBACK) {
+ PDBG("%s LOOPBACK\n", __func__);
+ pdev = ip_dev_find(&init_net,
+ ep->com.cm_id->remote_addr.sin_addr.s_addr);
+ ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t,
+ neigh, pdev, 0);
+ ep->mtu = pdev->mtu;
+ ep->tx_chan = cxgb4_port_chan(pdev);
+ ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1;
+ step = ep->com.dev->rdev.lldi.ntxq /
+ ep->com.dev->rdev.lldi.nchan;
+ ep->txq_idx = cxgb4_port_idx(pdev) * step;
+ step = ep->com.dev->rdev.lldi.nrxq /
+ ep->com.dev->rdev.lldi.nchan;
+ ep->ctrlq_idx = cxgb4_port_idx(pdev);
+ ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[
+ cxgb4_port_idx(pdev) * step];
+ dev_put(pdev);
+ } else {
+ ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t,
+ neigh, neigh->dev, 0);
+ ep->mtu = dst_mtu(ep->dst);
+ ep->tx_chan = cxgb4_port_chan(neigh->dev);
+ ep->smac_idx = (cxgb4_port_viid(neigh->dev) & 0x7F) << 1;
+ step = ep->com.dev->rdev.lldi.ntxq /
+ ep->com.dev->rdev.lldi.nchan;
+ ep->txq_idx = cxgb4_port_idx(neigh->dev) * step;
+ ep->ctrlq_idx = cxgb4_port_idx(neigh->dev);
+ step = ep->com.dev->rdev.lldi.nrxq /
+ ep->com.dev->rdev.lldi.nchan;
+ ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[
+ cxgb4_port_idx(neigh->dev) * step];
+ }
+ if (!ep->l2t) {
+ printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
+ err = -ENOMEM;
+ goto fail4;
+ }
+
+ PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
+ __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid,
+ ep->l2t->idx);
+
+ state_set(&ep->com, CONNECTING);
+ ep->tos = 0;
+
+ /* send connect request to rnic */
+ err = send_connect(ep);
+ if (!err)
+ goto out;
+
+ cxgb4_l2t_release(ep->l2t);
+fail4:
+ dst_release(ep->dst);
+fail3:
+ cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
+fail2:
+ /*
+ * remember to send notification to upper layer.
+ * We are in here so the upper layer is not aware that this is
+ * re-connect attempt and so, upper layer is still waiting for
+ * response of 1st connect request.
+ */
+ connect_reply_upcall(ep, -ECONNRESET);
+ c4iw_put_ep(&ep->com);
+out:
+ return err;
+}
+
static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
{
struct cpl_abort_req_rss *req = cplhdr(skb);
@@ -1573,8 +1915,11 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
/*
* Wake up any threads in rdma_init() or rdma_fini().
+ * However, this is not needed if com state is just
+ * MPA_REQ_SENT
*/
- c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
+ if (ep->com.state != MPA_REQ_SENT)
+ c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
mutex_lock(&ep->com.mutex);
switch (ep->com.state) {
@@ -1585,7 +1930,21 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
break;
case MPA_REQ_SENT:
stop_ep_timer(ep);
- connect_reply_upcall(ep, -ECONNRESET);
+ if (mpa_rev == 2 && ep->tried_with_mpa_v1)
+ connect_reply_upcall(ep, -ECONNRESET);
+ else {
+ /*
+ * we just don't send notification upwards because we
+ * want to retry with mpa_v1 without upper layers even
+ * knowing it.
+ *
+ * do some housekeeping so as to re-initiate the
+ * connection
+ */
+ PDBG("%s: mpa_rev=%d. Retrying with mpav1\n", __func__,
+ mpa_rev);
+ ep->retry_with_mpa_v1 = 1;
+ }
break;
case MPA_REP_SENT:
break;
@@ -1621,7 +1980,9 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
dst_confirm(ep->dst);
if (ep->com.state != ABORTING) {
__state_set(&ep->com, DEAD);
- release = 1;
+ /* we don't release if we want to retry with mpa_v1 */
+ if (!ep->retry_with_mpa_v1)
+ release = 1;
}
mutex_unlock(&ep->com.mutex);
@@ -1641,6 +2002,15 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
out:
if (release)
release_ep_resources(ep);
+
+ /* retry with mpa-v1 */
+ if (ep && ep->retry_with_mpa_v1) {
+ cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid);
+ dst_release(ep->dst);
+ cxgb4_l2t_release(ep->l2t);
+ c4iw_reconnect(ep);
+ }
+
return 0;
}
@@ -1792,18 +2162,40 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
goto err;
}
- cm_id->add_ref(cm_id);
- ep->com.cm_id = cm_id;
- ep->com.qp = qp;
+ if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
+ if (conn_param->ord > ep->ird) {
+ ep->ird = conn_param->ird;
+ ep->ord = conn_param->ord;
+ send_mpa_reject(ep, conn_param->private_data,
+ conn_param->private_data_len);
+ abort_connection(ep, NULL, GFP_KERNEL);
+ err = -ENOMEM;
+ goto err;
+ }
+ if (conn_param->ird > ep->ord) {
+ if (!ep->ord)
+ conn_param->ird = 1;
+ else {
+ abort_connection(ep, NULL, GFP_KERNEL);
+ err = -ENOMEM;
+ goto err;
+ }
+ }
+ }
ep->ird = conn_param->ird;
ep->ord = conn_param->ord;
- if (peer2peer && ep->ird == 0)
- ep->ird = 1;
+ if (ep->mpa_attr.version != 2)
+ if (peer2peer && ep->ird == 0)
+ ep->ird = 1;
PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord);
+ cm_id->add_ref(cm_id);
+ ep->com.cm_id = cm_id;
+ ep->com.qp = qp;
+
/* bind QP to EP and move to RTS */
attrs.mpa_attr = ep->mpa_attr;
attrs.max_ird = ep->ird;
@@ -1944,6 +2336,8 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
ep->com.dev->rdev.lldi.nchan;
ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[
cxgb4_port_idx(neigh->dev) * step];
+ ep->retry_with_mpa_v1 = 0;
+ ep->tried_with_mpa_v1 = 0;
}
if (!ep->l2t) {
printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
@@ -2323,8 +2717,11 @@ static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb)
/*
* Wake up any threads in rdma_init() or rdma_fini().
+ * However, this is not needed if com state is just
+ * MPA_REQ_SENT
*/
- c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
+ if (ep->com.state != MPA_REQ_SENT)
+ c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
sched(dev, skb);
return 0;
}
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index 1720dc790d1..f35a935267e 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -185,7 +185,7 @@ static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq)
V_CQE_OPCODE(FW_RI_SEND) |
V_CQE_TYPE(0) |
V_CQE_SWCQE(1) |
- V_CQE_QPID(wq->rq.qid));
+ V_CQE_QPID(wq->sq.qid));
cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen));
cq->sw_queue[cq->sw_pidx] = cqe;
t4_swcq_produce(cq);
@@ -818,6 +818,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
chp->cq.size--; /* status page */
chp->ibcq.cqe = entries - 2;
spin_lock_init(&chp->lock);
+ spin_lock_init(&chp->comp_handler_lock);
atomic_set(&chp->refcnt, 1);
init_waitqueue_head(&chp->wait);
ret = insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid);
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index 40a13cc633a..6d0df6ec161 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -376,10 +376,8 @@ struct uld_ctx {
struct c4iw_dev *dev;
};
-static void c4iw_remove(struct uld_ctx *ctx)
+static void c4iw_dealloc(struct uld_ctx *ctx)
{
- PDBG("%s c4iw_dev %p\n", __func__, ctx->dev);
- c4iw_unregister_device(ctx->dev);
c4iw_rdev_close(&ctx->dev->rdev);
idr_destroy(&ctx->dev->cqidr);
idr_destroy(&ctx->dev->qpidr);
@@ -389,11 +387,30 @@ static void c4iw_remove(struct uld_ctx *ctx)
ctx->dev = NULL;
}
+static void c4iw_remove(struct uld_ctx *ctx)
+{
+ PDBG("%s c4iw_dev %p\n", __func__, ctx->dev);
+ c4iw_unregister_device(ctx->dev);
+ c4iw_dealloc(ctx);
+}
+
+static int rdma_supported(const struct cxgb4_lld_info *infop)
+{
+ return infop->vr->stag.size > 0 && infop->vr->pbl.size > 0 &&
+ infop->vr->rq.size > 0 && infop->vr->qp.size > 0 &&
+ infop->vr->cq.size > 0 && infop->vr->ocq.size > 0;
+}
+
static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
{
struct c4iw_dev *devp;
int ret;
+ if (!rdma_supported(infop)) {
+ printk(KERN_INFO MOD "%s: RDMA not supported on this device.\n",
+ pci_name(infop->pdev));
+ return ERR_PTR(-ENOSYS);
+ }
devp = (struct c4iw_dev *)ib_alloc_device(sizeof(*devp));
if (!devp) {
printk(KERN_ERR MOD "Cannot allocate ib device\n");
@@ -414,7 +431,6 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
ret = c4iw_rdev_open(&devp->rdev);
if (ret) {
- mutex_unlock(&dev_mutex);
printk(KERN_ERR MOD "Unable to open CXIO rdev err %d\n", ret);
ib_dealloc_device(&devp->ibdev);
return ERR_PTR(ret);
@@ -519,15 +535,24 @@ static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state)
case CXGB4_STATE_UP:
printk(KERN_INFO MOD "%s: Up\n", pci_name(ctx->lldi.pdev));
if (!ctx->dev) {
- int ret = 0;
+ int ret;
ctx->dev = c4iw_alloc(&ctx->lldi);
- if (!IS_ERR(ctx->dev))
- ret = c4iw_register_device(ctx->dev);
- if (IS_ERR(ctx->dev) || ret)
+ if (IS_ERR(ctx->dev)) {
+ printk(KERN_ERR MOD
+ "%s: initialization failed: %ld\n",
+ pci_name(ctx->lldi.pdev),
+ PTR_ERR(ctx->dev));
+ ctx->dev = NULL;
+ break;
+ }
+ ret = c4iw_register_device(ctx->dev);
+ if (ret) {
printk(KERN_ERR MOD
"%s: RDMA registration failed: %d\n",
pci_name(ctx->lldi.pdev), ret);
+ c4iw_dealloc(ctx);
+ }
}
break;
case CXGB4_STATE_DOWN:
diff --git a/drivers/infiniband/hw/cxgb4/ev.c b/drivers/infiniband/hw/cxgb4/ev.c
index c13041a0aeb..397cb36cf10 100644
--- a/drivers/infiniband/hw/cxgb4/ev.c
+++ b/drivers/infiniband/hw/cxgb4/ev.c
@@ -42,6 +42,7 @@ static void post_qp_event(struct c4iw_dev *dev, struct c4iw_cq *chp,
{
struct ib_event event;
struct c4iw_qp_attributes attrs;
+ unsigned long flag;
if ((qhp->attr.state == C4IW_QP_STATE_ERROR) ||
(qhp->attr.state == C4IW_QP_STATE_TERMINATE)) {
@@ -72,7 +73,9 @@ static void post_qp_event(struct c4iw_dev *dev, struct c4iw_cq *chp,
if (qhp->ibqp.event_handler)
(*qhp->ibqp.event_handler)(&event, qhp->ibqp.qp_context);
+ spin_lock_irqsave(&chp->comp_handler_lock, flag);
(*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
+ spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
}
void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
@@ -183,11 +186,14 @@ out:
int c4iw_ev_handler(struct c4iw_dev *dev, u32 qid)
{
struct c4iw_cq *chp;
+ unsigned long flag;
chp = get_chp(dev, qid);
- if (chp)
+ if (chp) {
+ spin_lock_irqsave(&chp->comp_handler_lock, flag);
(*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
- else
+ spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
+ } else
PDBG("%s unknown cqid 0x%x\n", __func__, qid);
return 0;
}
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 4f045375c8e..1357c5bf209 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -309,6 +309,7 @@ struct c4iw_cq {
struct c4iw_dev *rhp;
struct t4_cq cq;
spinlock_t lock;
+ spinlock_t comp_handler_lock;
atomic_t refcnt;
wait_queue_head_t wait;
};
@@ -323,6 +324,7 @@ struct c4iw_mpa_attributes {
u8 recv_marker_enabled;
u8 xmit_marker_enabled;
u8 crc_enabled;
+ u8 enhanced_rdma_conn;
u8 version;
u8 p2p_type;
};
@@ -349,6 +351,8 @@ struct c4iw_qp_attributes {
u8 is_terminate_local;
struct c4iw_mpa_attributes mpa_attr;
struct c4iw_ep *llp_stream_handle;
+ u8 layer_etype;
+ u8 ecode;
};
struct c4iw_qp {
@@ -501,11 +505,18 @@ enum c4iw_mmid_state {
#define MPA_KEY_REP "MPA ID Rep Frame"
#define MPA_MAX_PRIVATE_DATA 256
+#define MPA_ENHANCED_RDMA_CONN 0x10
#define MPA_REJECT 0x20
#define MPA_CRC 0x40
#define MPA_MARKERS 0x80
#define MPA_FLAGS_MASK 0xE0
+#define MPA_V2_PEER2PEER_MODEL 0x8000
+#define MPA_V2_ZERO_LEN_FPDU_RTR 0x4000
+#define MPA_V2_RDMA_WRITE_RTR 0x8000
+#define MPA_V2_RDMA_READ_RTR 0x4000
+#define MPA_V2_IRD_ORD_MASK 0x3FFF
+
#define c4iw_put_ep(ep) { \
PDBG("put_ep (via %s:%u) ep %p refcnt %d\n", __func__, __LINE__, \
ep, atomic_read(&((ep)->kref.refcount))); \
@@ -528,6 +539,11 @@ struct mpa_message {
u8 private_data[0];
};
+struct mpa_v2_conn_params {
+ __be16 ird;
+ __be16 ord;
+};
+
struct terminate_message {
u8 layer_etype;
u8 ecode;
@@ -580,7 +596,10 @@ enum c4iw_ddp_ecodes {
enum c4iw_mpa_ecodes {
MPA_CRC_ERR = 0x02,
- MPA_MARKER_ERR = 0x03
+ MPA_MARKER_ERR = 0x03,
+ MPA_LOCAL_CATA = 0x05,
+ MPA_INSUFF_IRD = 0x06,
+ MPA_NOMATCH_RTR = 0x07,
};
enum c4iw_ep_state {
@@ -651,6 +670,8 @@ struct c4iw_ep {
u16 txq_idx;
u16 ctrlq_idx;
u8 tos;
+ u8 retry_with_mpa_v1;
+ u8 tried_with_mpa_v1;
};
static inline struct c4iw_ep *to_ep(struct iw_cm_id *cm_id)
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index a41578e48c7..5f940aeaab1 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -29,6 +29,9 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
+
+#include <linux/module.h>
+
#include "iw_cxgb4.h"
static int ocqp_support = 1;
@@ -917,7 +920,11 @@ static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe,
wqe->u.terminate.type = FW_RI_TYPE_TERMINATE;
wqe->u.terminate.immdlen = cpu_to_be32(sizeof *term);
term = (struct terminate_message *)wqe->u.terminate.termmsg;
- build_term_codes(err_cqe, &term->layer_etype, &term->ecode);
+ if (qhp->attr.layer_etype == (LAYER_MPA|DDP_LLP)) {
+ term->layer_etype = qhp->attr.layer_etype;
+ term->ecode = qhp->attr.ecode;
+ } else
+ build_term_codes(err_cqe, &term->layer_etype, &term->ecode);
c4iw_ofld_send(&qhp->rhp->rdev, skb);
}
@@ -941,8 +948,11 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count);
spin_unlock(&qhp->lock);
spin_unlock_irqrestore(&rchp->lock, flag);
- if (flushed)
+ if (flushed) {
+ spin_lock_irqsave(&rchp->comp_handler_lock, flag);
(*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
+ spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
+ }
/* locking hierarchy: cq lock first, then qp lock. */
spin_lock_irqsave(&schp->lock, flag);
@@ -952,13 +962,17 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
flushed = c4iw_flush_sq(&qhp->wq, &schp->cq, count);
spin_unlock(&qhp->lock);
spin_unlock_irqrestore(&schp->lock, flag);
- if (flushed)
+ if (flushed) {
+ spin_lock_irqsave(&schp->comp_handler_lock, flag);
(*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context);
+ spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
+ }
}
static void flush_qp(struct c4iw_qp *qhp)
{
struct c4iw_cq *rchp, *schp;
+ unsigned long flag;
rchp = get_chp(qhp->rhp, qhp->attr.rcq);
schp = get_chp(qhp->rhp, qhp->attr.scq);
@@ -966,8 +980,16 @@ static void flush_qp(struct c4iw_qp *qhp)
if (qhp->ibqp.uobject) {
t4_set_wq_in_error(&qhp->wq);
t4_set_cq_in_error(&rchp->cq);
- if (schp != rchp)
+ spin_lock_irqsave(&rchp->comp_handler_lock, flag);
+ (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
+ spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
+ if (schp != rchp) {
t4_set_cq_in_error(&schp->cq);
+ spin_lock_irqsave(&schp->comp_handler_lock, flag);
+ (*schp->ibcq.comp_handler)(&schp->ibcq,
+ schp->ibcq.cq_context);
+ spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
+ }
return;
}
__flush_qp(qhp, rchp, schp);
@@ -1012,6 +1034,7 @@ out:
static void build_rtr_msg(u8 p2p_type, struct fw_ri_init *init)
{
+ PDBG("%s p2p_type = %d\n", __func__, p2p_type);
memset(&init->u, 0, sizeof init->u);
switch (p2p_type) {
case FW_RI_INIT_P2PTYPE_RDMA_WRITE:
@@ -1206,12 +1229,16 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
disconnect = 1;
c4iw_get_ep(&qhp->ep->com);
}
+ if (qhp->ibqp.uobject)
+ t4_set_wq_in_error(&qhp->wq);
ret = rdma_fini(rhp, qhp, ep);
if (ret)
goto err;
break;
case C4IW_QP_STATE_TERMINATE:
set_state(qhp, C4IW_QP_STATE_TERMINATE);
+ qhp->attr.layer_etype = attrs->layer_etype;
+ qhp->attr.ecode = attrs->ecode;
if (qhp->ibqp.uobject)
t4_set_wq_in_error(&qhp->wq);
ep = qhp->ep;
@@ -1222,6 +1249,8 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
break;
case C4IW_QP_STATE_ERROR:
set_state(qhp, C4IW_QP_STATE_ERROR);
+ if (qhp->ibqp.uobject)
+ t4_set_wq_in_error(&qhp->wq);
if (!internal) {
abort = 1;
disconnect = 1;
@@ -1334,7 +1363,10 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp)
rhp = qhp->rhp;
attrs.next_state = C4IW_QP_STATE_ERROR;
- c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
+ if (qhp->attr.state == C4IW_QP_STATE_TERMINATE)
+ c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
+ else
+ c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
wait_event(qhp->wait, !qhp->ep);
remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
diff --git a/drivers/infiniband/hw/ehca/ehca_eq.c b/drivers/infiniband/hw/ehca/ehca_eq.c
index d9b1bb40f48..818d721fc44 100644
--- a/drivers/infiniband/hw/ehca/ehca_eq.c
+++ b/drivers/infiniband/hw/ehca/ehca_eq.c
@@ -125,7 +125,7 @@ int ehca_create_eq(struct ehca_shca *shca,
tasklet_init(&eq->interrupt_task, ehca_tasklet_eq, (long)shca);
ret = ibmebus_request_irq(eq->ist, ehca_interrupt_eq,
- IRQF_DISABLED, "ehca_eq",
+ 0, "ehca_eq",
(void *)shca);
if (ret < 0)
ehca_err(ib_dev, "Can't map interrupt handler.");
@@ -133,7 +133,7 @@ int ehca_create_eq(struct ehca_shca *shca,
tasklet_init(&eq->interrupt_task, ehca_tasklet_neq, (long)shca);
ret = ibmebus_request_irq(eq->ist, ehca_interrupt_neq,
- IRQF_DISABLED, "ehca_neq",
+ 0, "ehca_neq",
(void *)shca);
if (ret < 0)
ehca_err(ib_dev, "Can't map interrupt handler.");
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c
index 32fb34201ab..964f8552079 100644
--- a/drivers/infiniband/hw/ehca/ehca_qp.c
+++ b/drivers/infiniband/hw/ehca/ehca_qp.c
@@ -977,6 +977,9 @@ struct ib_srq *ehca_create_srq(struct ib_pd *pd,
struct hcp_modify_qp_control_block *mqpcb;
u64 hret, update_mask;
+ if (srq_init_attr->srq_type != IB_SRQT_BASIC)
+ return ERR_PTR(-ENOSYS);
+
/* For common attributes, internal_create_qp() takes its info
* out of qp_init_attr, so copy all common attrs there.
*/
diff --git a/drivers/infiniband/hw/ipath/ipath_diag.c b/drivers/infiniband/hw/ipath/ipath_diag.c
index daef61d5e5b..714293b7851 100644
--- a/drivers/infiniband/hw/ipath/ipath_diag.c
+++ b/drivers/infiniband/hw/ipath/ipath_diag.c
@@ -45,6 +45,7 @@
#include <linux/pci.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
+#include <linux/export.h>
#include <asm/uaccess.h>
#include "ipath_kernel.h"
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c
index be24ac72611..bfca37b2432 100644
--- a/drivers/infiniband/hw/ipath/ipath_driver.c
+++ b/drivers/infiniband/hw/ipath/ipath_driver.c
@@ -41,6 +41,7 @@
#include <linux/vmalloc.h>
#include <linux/bitmap.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include "ipath_kernel.h"
#include "ipath_verbs.h"
diff --git a/drivers/infiniband/hw/ipath/ipath_file_ops.c b/drivers/infiniband/hw/ipath/ipath_file_ops.c
index 8697eca1435..736d9edbdbe 100644
--- a/drivers/infiniband/hw/ipath/ipath_file_ops.c
+++ b/drivers/infiniband/hw/ipath/ipath_file_ops.c
@@ -35,6 +35,7 @@
#include <linux/poll.h>
#include <linux/cdev.h>
#include <linux/swap.h>
+#include <linux/export.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/highmem.h>
diff --git a/drivers/infiniband/hw/ipath/ipath_init_chip.c b/drivers/infiniband/hw/ipath/ipath_init_chip.c
index 7c1eebe8c7c..49b09c697c7 100644
--- a/drivers/infiniband/hw/ipath/ipath_init_chip.c
+++ b/drivers/infiniband/hw/ipath/ipath_init_chip.c
@@ -33,7 +33,9 @@
#include <linux/pci.h>
#include <linux/netdevice.h>
+#include <linux/moduleparam.h>
#include <linux/slab.h>
+#include <linux/stat.h>
#include <linux/vmalloc.h>
#include "ipath_kernel.h"
diff --git a/drivers/infiniband/hw/ipath/ipath_srq.c b/drivers/infiniband/hw/ipath/ipath_srq.c
index 386e2c717c5..26271984b71 100644
--- a/drivers/infiniband/hw/ipath/ipath_srq.c
+++ b/drivers/infiniband/hw/ipath/ipath_srq.c
@@ -107,6 +107,11 @@ struct ib_srq *ipath_create_srq(struct ib_pd *ibpd,
u32 sz;
struct ib_srq *ret;
+ if (srq_init_attr->srq_type != IB_SRQT_BASIC) {
+ ret = ERR_PTR(-ENOSYS);
+ goto done;
+ }
+
if (srq_init_attr->attr.max_wr == 0) {
ret = ERR_PTR(-EINVAL);
goto done;
diff --git a/drivers/infiniband/hw/ipath/ipath_sysfs.c b/drivers/infiniband/hw/ipath/ipath_sysfs.c
index 8991677e9a0..75558f33f1c 100644
--- a/drivers/infiniband/hw/ipath/ipath_sysfs.c
+++ b/drivers/infiniband/hw/ipath/ipath_sysfs.c
@@ -32,6 +32,7 @@
*/
#include <linux/ctype.h>
+#include <linux/stat.h>
#include "ipath_kernel.h"
#include "ipath_verbs.h"
diff --git a/drivers/infiniband/hw/ipath/ipath_user_pages.c b/drivers/infiniband/hw/ipath/ipath_user_pages.c
index cfed5399f07..dc66c450691 100644
--- a/drivers/infiniband/hw/ipath/ipath_user_pages.c
+++ b/drivers/infiniband/hw/ipath/ipath_user_pages.c
@@ -79,7 +79,7 @@ static int __ipath_get_user_pages(unsigned long start_page, size_t num_pages,
goto bail_release;
}
- current->mm->locked_vm += num_pages;
+ current->mm->pinned_vm += num_pages;
ret = 0;
goto bail;
@@ -178,7 +178,7 @@ void ipath_release_user_pages(struct page **p, size_t num_pages)
__ipath_release_user_pages(p, num_pages, 1);
- current->mm->locked_vm -= num_pages;
+ current->mm->pinned_vm -= num_pages;
up_write(&current->mm->mmap_sem);
}
@@ -195,7 +195,7 @@ static void user_pages_account(struct work_struct *_work)
container_of(_work, struct ipath_user_pages_work, work);
down_write(&work->mm->mmap_sem);
- work->mm->locked_vm -= work->num_pages;
+ work->mm->pinned_vm -= work->num_pages;
up_write(&work->mm->mmap_sem);
mmput(work->mm);
kfree(work);
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c
index dd7f26d04d4..439c35d4a66 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.c
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.c
@@ -35,6 +35,7 @@
#include <rdma/ib_user_verbs.h>
#include <linux/io.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include <linux/utsname.h>
#include <linux/rculist.h>
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index fa643f4f4e2..77f3dbc0aaa 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -128,6 +128,8 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
(dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_REMOTE_INV) &&
(dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_FAST_REG_WR))
props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
+ if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC)
+ props->device_cap_flags |= IB_DEVICE_XRC;
props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
0xffffff;
@@ -181,8 +183,12 @@ mlx4_ib_port_link_layer(struct ib_device *device, u8 port_num)
static int ib_link_query_port(struct ib_device *ibdev, u8 port,
struct ib_port_attr *props,
+ struct ib_smp *in_mad,
struct ib_smp *out_mad)
{
+ int ext_active_speed;
+ int err;
+
props->lid = be16_to_cpup((__be16 *) (out_mad->data + 16));
props->lmc = out_mad->data[34] & 0x7;
props->sm_lid = be16_to_cpup((__be16 *) (out_mad->data + 18));
@@ -203,6 +209,39 @@ static int ib_link_query_port(struct ib_device *ibdev, u8 port,
props->max_vl_num = out_mad->data[37] >> 4;
props->init_type_reply = out_mad->data[41] >> 4;
+ /* Check if extended speeds (EDR/FDR/...) are supported */
+ if (props->port_cap_flags & IB_PORT_EXTENDED_SPEEDS_SUP) {
+ ext_active_speed = out_mad->data[62] >> 4;
+
+ switch (ext_active_speed) {
+ case 1:
+ props->active_speed = 16; /* FDR */
+ break;
+ case 2:
+ props->active_speed = 32; /* EDR */
+ break;
+ }
+ }
+
+ /* If reported active speed is QDR, check if is FDR-10 */
+ if (props->active_speed == 4) {
+ if (to_mdev(ibdev)->dev->caps.ext_port_cap[port] &
+ MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO) {
+ init_query_mad(in_mad);
+ in_mad->attr_id = MLX4_ATTR_EXTENDED_PORT_INFO;
+ in_mad->attr_mod = cpu_to_be32(port);
+
+ err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port,
+ NULL, NULL, in_mad, out_mad);
+ if (err)
+ return err;
+
+ /* Checking LinkSpeedActive for FDR-10 */
+ if (out_mad->data[15] & 0x1)
+ props->active_speed = 8;
+ }
+ }
+
return 0;
}
@@ -227,7 +266,7 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port,
props->pkey_tbl_len = 1;
props->bad_pkey_cntr = be16_to_cpup((__be16 *) (out_mad->data + 46));
props->qkey_viol_cntr = be16_to_cpup((__be16 *) (out_mad->data + 48));
- props->max_mtu = IB_MTU_2048;
+ props->max_mtu = IB_MTU_4096;
props->subnet_timeout = 0;
props->max_vl_num = out_mad->data[37] >> 4;
props->init_type_reply = 0;
@@ -274,7 +313,7 @@ static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
goto out;
err = mlx4_ib_port_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND ?
- ib_link_query_port(ibdev, port, props, out_mad) :
+ ib_link_query_port(ibdev, port, props, in_mad, out_mad) :
eth_link_query_port(ibdev, port, props, out_mad);
out:
@@ -566,6 +605,57 @@ static int mlx4_ib_dealloc_pd(struct ib_pd *pd)
return 0;
}
+static struct ib_xrcd *mlx4_ib_alloc_xrcd(struct ib_device *ibdev,
+ struct ib_ucontext *context,
+ struct ib_udata *udata)
+{
+ struct mlx4_ib_xrcd *xrcd;
+ int err;
+
+ if (!(to_mdev(ibdev)->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC))
+ return ERR_PTR(-ENOSYS);
+
+ xrcd = kmalloc(sizeof *xrcd, GFP_KERNEL);
+ if (!xrcd)
+ return ERR_PTR(-ENOMEM);
+
+ err = mlx4_xrcd_alloc(to_mdev(ibdev)->dev, &xrcd->xrcdn);
+ if (err)
+ goto err1;
+
+ xrcd->pd = ib_alloc_pd(ibdev);
+ if (IS_ERR(xrcd->pd)) {
+ err = PTR_ERR(xrcd->pd);
+ goto err2;
+ }
+
+ xrcd->cq = ib_create_cq(ibdev, NULL, NULL, xrcd, 1, 0);
+ if (IS_ERR(xrcd->cq)) {
+ err = PTR_ERR(xrcd->cq);
+ goto err3;
+ }
+
+ return &xrcd->ibxrcd;
+
+err3:
+ ib_dealloc_pd(xrcd->pd);
+err2:
+ mlx4_xrcd_free(to_mdev(ibdev)->dev, xrcd->xrcdn);
+err1:
+ kfree(xrcd);
+ return ERR_PTR(err);
+}
+
+static int mlx4_ib_dealloc_xrcd(struct ib_xrcd *xrcd)
+{
+ ib_destroy_cq(to_mxrcd(xrcd)->cq);
+ ib_dealloc_pd(to_mxrcd(xrcd)->pd);
+ mlx4_xrcd_free(to_mdev(xrcd->device)->dev, to_mxrcd(xrcd)->xrcdn);
+ kfree(xrcd);
+
+ return 0;
+}
+
static int add_gid_entry(struct ib_qp *ibqp, union ib_gid *gid)
{
struct mlx4_ib_qp *mqp = to_mqp(ibqp);
@@ -1044,7 +1134,9 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
(1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
(1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
(1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ);
+ (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ) |
+ (1ull << IB_USER_VERBS_CMD_OPEN_QP);
ibdev->ib_dev.query_device = mlx4_ib_query_device;
ibdev->ib_dev.query_port = mlx4_ib_query_port;
@@ -1093,6 +1185,14 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
ibdev->ib_dev.unmap_fmr = mlx4_ib_unmap_fmr;
ibdev->ib_dev.dealloc_fmr = mlx4_ib_fmr_dealloc;
+ if (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) {
+ ibdev->ib_dev.alloc_xrcd = mlx4_ib_alloc_xrcd;
+ ibdev->ib_dev.dealloc_xrcd = mlx4_ib_dealloc_xrcd;
+ ibdev->ib_dev.uverbs_cmd_mask |=
+ (1ull << IB_USER_VERBS_CMD_OPEN_XRCD) |
+ (1ull << IB_USER_VERBS_CMD_CLOSE_XRCD);
+ }
+
spin_lock_init(&iboe->lock);
if (init_node_data(ibdev))
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index e4bf2cff866..ed80345c99a 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -56,6 +56,13 @@ struct mlx4_ib_pd {
u32 pdn;
};
+struct mlx4_ib_xrcd {
+ struct ib_xrcd ibxrcd;
+ u32 xrcdn;
+ struct ib_pd *pd;
+ struct ib_cq *cq;
+};
+
struct mlx4_ib_cq_buf {
struct mlx4_buf buf;
struct mlx4_mtt mtt;
@@ -138,6 +145,7 @@ struct mlx4_ib_qp {
struct mlx4_mtt mtt;
int buf_size;
struct mutex mutex;
+ u16 xrcdn;
u32 flags;
u8 port;
u8 alt_port;
@@ -211,6 +219,11 @@ static inline struct mlx4_ib_pd *to_mpd(struct ib_pd *ibpd)
return container_of(ibpd, struct mlx4_ib_pd, ibpd);
}
+static inline struct mlx4_ib_xrcd *to_mxrcd(struct ib_xrcd *ibxrcd)
+{
+ return container_of(ibxrcd, struct mlx4_ib_xrcd, ibxrcd);
+}
+
static inline struct mlx4_ib_cq *to_mcq(struct ib_cq *ibcq)
{
return container_of(ibcq, struct mlx4_ib_cq, ibcq);
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 3a91d9d8dc5..a16f0c8e6f3 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -302,15 +302,14 @@ static int send_wqe_overhead(enum ib_qp_type type, u32 flags)
}
static int set_rq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
- int is_user, int has_srq, struct mlx4_ib_qp *qp)
+ int is_user, int has_rq, struct mlx4_ib_qp *qp)
{
/* Sanity check RQ size before proceeding */
if (cap->max_recv_wr > dev->dev->caps.max_wqes ||
cap->max_recv_sge > dev->dev->caps.max_rq_sg)
return -EINVAL;
- if (has_srq) {
- /* QPs attached to an SRQ should have no RQ */
+ if (!has_rq) {
if (cap->max_recv_wr)
return -EINVAL;
@@ -463,6 +462,14 @@ static int set_user_sq_size(struct mlx4_ib_dev *dev,
return 0;
}
+static int qp_has_rq(struct ib_qp_init_attr *attr)
+{
+ if (attr->qp_type == IB_QPT_XRC_INI || attr->qp_type == IB_QPT_XRC_TGT)
+ return 0;
+
+ return !attr->srq;
+}
+
static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
struct ib_qp_init_attr *init_attr,
struct ib_udata *udata, int sqpn, struct mlx4_ib_qp *qp)
@@ -479,7 +486,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
qp->sq_signal_bits = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE);
- err = set_rq_size(dev, &init_attr->cap, !!pd->uobject, !!init_attr->srq, qp);
+ err = set_rq_size(dev, &init_attr->cap, !!pd->uobject, qp_has_rq(init_attr), qp);
if (err)
goto err;
@@ -513,7 +520,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
if (err)
goto err_mtt;
- if (!init_attr->srq) {
+ if (qp_has_rq(init_attr)) {
err = mlx4_ib_db_map_user(to_mucontext(pd->uobject->context),
ucmd.db_addr, &qp->db);
if (err)
@@ -532,7 +539,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
if (err)
goto err;
- if (!init_attr->srq) {
+ if (qp_has_rq(init_attr)) {
err = mlx4_db_alloc(dev->dev, &qp->db, 0);
if (err)
goto err;
@@ -575,6 +582,9 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
if (err)
goto err_qpn;
+ if (init_attr->qp_type == IB_QPT_XRC_TGT)
+ qp->mqp.qpn |= (1 << 23);
+
/*
* Hardware wants QPN written in big-endian order (after
* shifting) for send doorbell. Precompute this value to save
@@ -592,9 +602,8 @@ err_qpn:
err_wrid:
if (pd->uobject) {
- if (!init_attr->srq)
- mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context),
- &qp->db);
+ if (qp_has_rq(init_attr))
+ mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), &qp->db);
} else {
kfree(qp->sq.wrid);
kfree(qp->rq.wrid);
@@ -610,7 +619,7 @@ err_buf:
mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf);
err_db:
- if (!pd->uobject && !init_attr->srq)
+ if (!pd->uobject && qp_has_rq(init_attr))
mlx4_db_free(dev->dev, &qp->db);
err:
@@ -671,6 +680,33 @@ static void del_gid_entries(struct mlx4_ib_qp *qp)
}
}
+static struct mlx4_ib_pd *get_pd(struct mlx4_ib_qp *qp)
+{
+ if (qp->ibqp.qp_type == IB_QPT_XRC_TGT)
+ return to_mpd(to_mxrcd(qp->ibqp.xrcd)->pd);
+ else
+ return to_mpd(qp->ibqp.pd);
+}
+
+static void get_cqs(struct mlx4_ib_qp *qp,
+ struct mlx4_ib_cq **send_cq, struct mlx4_ib_cq **recv_cq)
+{
+ switch (qp->ibqp.qp_type) {
+ case IB_QPT_XRC_TGT:
+ *send_cq = to_mcq(to_mxrcd(qp->ibqp.xrcd)->cq);
+ *recv_cq = *send_cq;
+ break;
+ case IB_QPT_XRC_INI:
+ *send_cq = to_mcq(qp->ibqp.send_cq);
+ *recv_cq = *send_cq;
+ break;
+ default:
+ *send_cq = to_mcq(qp->ibqp.send_cq);
+ *recv_cq = to_mcq(qp->ibqp.recv_cq);
+ break;
+ }
+}
+
static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
int is_user)
{
@@ -682,8 +718,7 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
printk(KERN_WARNING "mlx4_ib: modify QP %06x to RESET failed.\n",
qp->mqp.qpn);
- send_cq = to_mcq(qp->ibqp.send_cq);
- recv_cq = to_mcq(qp->ibqp.recv_cq);
+ get_cqs(qp, &send_cq, &recv_cq);
mlx4_ib_lock_cqs(send_cq, recv_cq);
@@ -706,7 +741,7 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
mlx4_mtt_cleanup(dev->dev, &qp->mtt);
if (is_user) {
- if (!qp->ibqp.srq)
+ if (qp->rq.wqe_cnt)
mlx4_ib_db_unmap_user(to_mucontext(qp->ibqp.uobject->context),
&qp->db);
ib_umem_release(qp->umem);
@@ -714,7 +749,7 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
kfree(qp->sq.wrid);
kfree(qp->rq.wrid);
mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf);
- if (!qp->ibqp.srq)
+ if (qp->rq.wqe_cnt)
mlx4_db_free(dev->dev, &qp->db);
}
@@ -725,10 +760,10 @@ struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
struct ib_qp_init_attr *init_attr,
struct ib_udata *udata)
{
- struct mlx4_ib_dev *dev = to_mdev(pd->device);
struct mlx4_ib_sqp *sqp;
struct mlx4_ib_qp *qp;
int err;
+ u16 xrcdn = 0;
/*
* We only support LSO and multicast loopback blocking, and
@@ -739,10 +774,20 @@ struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
return ERR_PTR(-EINVAL);
if (init_attr->create_flags &&
- (pd->uobject || init_attr->qp_type != IB_QPT_UD))
+ (udata || init_attr->qp_type != IB_QPT_UD))
return ERR_PTR(-EINVAL);
switch (init_attr->qp_type) {
+ case IB_QPT_XRC_TGT:
+ pd = to_mxrcd(init_attr->xrcd)->pd;
+ xrcdn = to_mxrcd(init_attr->xrcd)->xrcdn;
+ init_attr->send_cq = to_mxrcd(init_attr->xrcd)->cq;
+ /* fall through */
+ case IB_QPT_XRC_INI:
+ if (!(to_mdev(pd->device)->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC))
+ return ERR_PTR(-ENOSYS);
+ init_attr->recv_cq = init_attr->send_cq;
+ /* fall through */
case IB_QPT_RC:
case IB_QPT_UC:
case IB_QPT_UD:
@@ -751,13 +796,14 @@ struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
if (!qp)
return ERR_PTR(-ENOMEM);
- err = create_qp_common(dev, pd, init_attr, udata, 0, qp);
+ err = create_qp_common(to_mdev(pd->device), pd, init_attr, udata, 0, qp);
if (err) {
kfree(qp);
return ERR_PTR(err);
}
qp->ibqp.qp_num = qp->mqp.qpn;
+ qp->xrcdn = xrcdn;
break;
}
@@ -765,7 +811,7 @@ struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
case IB_QPT_GSI:
{
/* Userspace is not allowed to create special QPs: */
- if (pd->uobject)
+ if (udata)
return ERR_PTR(-EINVAL);
sqp = kzalloc(sizeof *sqp, GFP_KERNEL);
@@ -774,8 +820,8 @@ struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
qp = &sqp->qp;
- err = create_qp_common(dev, pd, init_attr, udata,
- dev->dev->caps.sqp_start +
+ err = create_qp_common(to_mdev(pd->device), pd, init_attr, udata,
+ to_mdev(pd->device)->dev->caps.sqp_start +
(init_attr->qp_type == IB_QPT_SMI ? 0 : 2) +
init_attr->port_num - 1,
qp);
@@ -801,11 +847,13 @@ int mlx4_ib_destroy_qp(struct ib_qp *qp)
{
struct mlx4_ib_dev *dev = to_mdev(qp->device);
struct mlx4_ib_qp *mqp = to_mqp(qp);
+ struct mlx4_ib_pd *pd;
if (is_qp0(dev, mqp))
mlx4_CLOSE_PORT(dev->dev, mqp->port);
- destroy_qp_common(dev, mqp, !!qp->pd->uobject);
+ pd = get_pd(mqp);
+ destroy_qp_common(dev, mqp, !!pd->ibpd.uobject);
if (is_sqp(dev, mqp))
kfree(to_msqp(mqp));
@@ -821,6 +869,8 @@ static int to_mlx4_st(enum ib_qp_type type)
case IB_QPT_RC: return MLX4_QP_ST_RC;
case IB_QPT_UC: return MLX4_QP_ST_UC;
case IB_QPT_UD: return MLX4_QP_ST_UD;
+ case IB_QPT_XRC_INI:
+ case IB_QPT_XRC_TGT: return MLX4_QP_ST_XRC;
case IB_QPT_SMI:
case IB_QPT_GSI: return MLX4_QP_ST_MLX;
default: return -1;
@@ -959,6 +1009,8 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
{
struct mlx4_ib_dev *dev = to_mdev(ibqp->device);
struct mlx4_ib_qp *qp = to_mqp(ibqp);
+ struct mlx4_ib_pd *pd;
+ struct mlx4_ib_cq *send_cq, *recv_cq;
struct mlx4_qp_context *context;
enum mlx4_qp_optpar optpar = 0;
int sqd_event;
@@ -1014,8 +1066,10 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
context->sq_size_stride = ilog2(qp->sq.wqe_cnt) << 3;
context->sq_size_stride |= qp->sq.wqe_shift - 4;
- if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
+ if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
context->sq_size_stride |= !!qp->sq_no_prefetch << 7;
+ context->xrcd = cpu_to_be32((u32) qp->xrcdn);
+ }
if (qp->ibqp.uobject)
context->usr_page = cpu_to_be32(to_mucontext(ibqp->uobject->context)->uar.index);
@@ -1079,8 +1133,12 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
optpar |= MLX4_QP_OPTPAR_ALT_ADDR_PATH;
}
- context->pd = cpu_to_be32(to_mpd(ibqp->pd)->pdn);
- context->params1 = cpu_to_be32(MLX4_IB_ACK_REQ_FREQ << 28);
+ pd = get_pd(qp);
+ get_cqs(qp, &send_cq, &recv_cq);
+ context->pd = cpu_to_be32(pd->pdn);
+ context->cqn_send = cpu_to_be32(send_cq->mcq.cqn);
+ context->cqn_recv = cpu_to_be32(recv_cq->mcq.cqn);
+ context->params1 = cpu_to_be32(MLX4_IB_ACK_REQ_FREQ << 28);
/* Set "fast registration enabled" for all kernel QPs */
if (!qp->ibqp.uobject)
@@ -1106,8 +1164,6 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
if (attr_mask & IB_QP_SQ_PSN)
context->next_send_psn = cpu_to_be32(attr->sq_psn);
- context->cqn_send = cpu_to_be32(to_mcq(ibqp->send_cq)->mcq.cqn);
-
if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
if (attr->max_dest_rd_atomic)
context->params2 |=
@@ -1130,8 +1186,6 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
if (attr_mask & IB_QP_RQ_PSN)
context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn);
- context->cqn_recv = cpu_to_be32(to_mcq(ibqp->recv_cq)->mcq.cqn);
-
if (attr_mask & IB_QP_QKEY) {
context->qkey = cpu_to_be32(attr->qkey);
optpar |= MLX4_QP_OPTPAR_Q_KEY;
@@ -1140,7 +1194,7 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
if (ibqp->srq)
context->srqn = cpu_to_be32(1 << 24 | to_msrq(ibqp->srq)->msrq.srqn);
- if (!ibqp->srq && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
+ if (qp->rq.wqe_cnt && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
context->db_rec_addr = cpu_to_be64(qp->db.dma);
if (cur_state == IB_QPS_INIT &&
@@ -1225,17 +1279,17 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
* entries and reinitialize the QP.
*/
if (new_state == IB_QPS_RESET && !ibqp->uobject) {
- mlx4_ib_cq_clean(to_mcq(ibqp->recv_cq), qp->mqp.qpn,
+ mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn,
ibqp->srq ? to_msrq(ibqp->srq): NULL);
- if (ibqp->send_cq != ibqp->recv_cq)
- mlx4_ib_cq_clean(to_mcq(ibqp->send_cq), qp->mqp.qpn, NULL);
+ if (send_cq != recv_cq)
+ mlx4_ib_cq_clean(send_cq, qp->mqp.qpn, NULL);
qp->rq.head = 0;
qp->rq.tail = 0;
qp->sq.head = 0;
qp->sq.tail = 0;
qp->sq_next_wqe = 0;
- if (!ibqp->srq)
+ if (qp->rq.wqe_cnt)
*qp->db.db = 0;
}
@@ -1547,14 +1601,13 @@ static void set_masked_atomic_seg(struct mlx4_wqe_masked_atomic_seg *aseg,
}
static void set_datagram_seg(struct mlx4_wqe_datagram_seg *dseg,
- struct ib_send_wr *wr, __be16 *vlan)
+ struct ib_send_wr *wr)
{
memcpy(dseg->av, &to_mah(wr->wr.ud.ah)->av, sizeof (struct mlx4_av));
dseg->dqpn = cpu_to_be32(wr->wr.ud.remote_qpn);
dseg->qkey = cpu_to_be32(wr->wr.ud.remote_qkey);
dseg->vlan = to_mah(wr->wr.ud.ah)->av.eth.vlan;
memcpy(dseg->mac, to_mah(wr->wr.ud.ah)->av.eth.mac, 6);
- *vlan = dseg->vlan;
}
static void set_mlx_icrc_seg(void *dseg)
@@ -1657,7 +1710,6 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
__be32 uninitialized_var(lso_hdr_sz);
__be32 blh;
int i;
- __be16 vlan = cpu_to_be16(0xffff);
spin_lock_irqsave(&qp->sq.lock, flags);
@@ -1761,7 +1813,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
break;
case IB_QPT_UD:
- set_datagram_seg(wqe, wr, &vlan);
+ set_datagram_seg(wqe, wr);
wqe += sizeof (struct mlx4_wqe_datagram_seg);
size += sizeof (struct mlx4_wqe_datagram_seg) / 16;
@@ -1824,11 +1876,6 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
ctrl->fence_size = (wr->send_flags & IB_SEND_FENCE ?
MLX4_WQE_CTRL_FENCE : 0) | size;
- if (be16_to_cpu(vlan) < 0x1000) {
- ctrl->ins_vlan = 1 << 6;
- ctrl->vlan_tag = vlan;
- }
-
/*
* Make sure descriptor is fully written before
* setting ownership bit (because HW can start
diff --git a/drivers/infiniband/hw/mlx4/srq.c b/drivers/infiniband/hw/mlx4/srq.c
index 818b7ecace5..39542f3703b 100644
--- a/drivers/infiniband/hw/mlx4/srq.c
+++ b/drivers/infiniband/hw/mlx4/srq.c
@@ -76,6 +76,8 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
struct mlx4_ib_srq *srq;
struct mlx4_wqe_srq_next_seg *next;
struct mlx4_wqe_data_seg *scatter;
+ u32 cqn;
+ u16 xrcdn;
int desc_size;
int buf_size;
int err;
@@ -174,12 +176,18 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
}
}
- err = mlx4_srq_alloc(dev->dev, to_mpd(pd)->pdn, &srq->mtt,
+ cqn = (init_attr->srq_type == IB_SRQT_XRC) ?
+ to_mcq(init_attr->ext.xrc.cq)->mcq.cqn : 0;
+ xrcdn = (init_attr->srq_type == IB_SRQT_XRC) ?
+ to_mxrcd(init_attr->ext.xrc.xrcd)->xrcdn :
+ (u16) dev->dev->caps.reserved_xrcds;
+ err = mlx4_srq_alloc(dev->dev, to_mpd(pd)->pdn, cqn, xrcdn, &srq->mtt,
srq->db.dma, &srq->msrq);
if (err)
goto err_wrid;
srq->msrq.event = mlx4_ib_srq_event;
+ srq->ibsrq.ext.xrc.srq_num = srq->msrq.srqn;
if (pd->uobject)
if (ib_copy_to_udata(udata, &srq->msrq.srqn, sizeof (__u32))) {
diff --git a/drivers/infiniband/hw/mthca/mthca_catas.c b/drivers/infiniband/hw/mthca/mthca_catas.c
index e4a08c2819e..712d2a30fbe 100644
--- a/drivers/infiniband/hw/mthca/mthca_catas.c
+++ b/drivers/infiniband/hw/mthca/mthca_catas.c
@@ -31,6 +31,7 @@
*/
#include <linux/jiffies.h>
+#include <linux/module.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
index 3082b3b3d62..9d3e5c1ac60 100644
--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
@@ -36,6 +36,7 @@
#include <linux/pci.h>
#include <linux/errno.h>
#include <linux/sched.h>
+#include <linux/module.h>
#include <linux/slab.h>
#include <asm/io.h>
#include <rdma/ib_mad.h>
diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
index ab876f928a1..ed9a989e501 100644
--- a/drivers/infiniband/hw/mthca/mthca_mr.c
+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
@@ -146,7 +146,7 @@ static int mthca_buddy_init(struct mthca_buddy *buddy, int max_order)
buddy->bits = kzalloc((buddy->max_order + 1) * sizeof (long *),
GFP_KERNEL);
- buddy->num_free = kzalloc((buddy->max_order + 1) * sizeof (int *),
+ buddy->num_free = kcalloc((buddy->max_order + 1), sizeof *buddy->num_free,
GFP_KERNEL);
if (!buddy->bits || !buddy->num_free)
goto err_out;
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 365fe0e1419..5b71d43bd89 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -40,7 +40,9 @@
#include <linux/sched.h>
#include <linux/slab.h>
+#include <linux/stat.h>
#include <linux/mm.h>
+#include <linux/export.h>
#include "mthca_dev.h"
#include "mthca_cmd.h"
@@ -438,6 +440,9 @@ static struct ib_srq *mthca_create_srq(struct ib_pd *pd,
struct mthca_srq *srq;
int err;
+ if (init_attr->srq_type != IB_SRQT_BASIC)
+ return ERR_PTR(-ENOSYS);
+
srq = kmalloc(sizeof *srq, GFP_KERNEL);
if (!srq)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/infiniband/hw/nes/Makefile b/drivers/infiniband/hw/nes/Makefile
index 35148513c47..97820c23ece 100644
--- a/drivers/infiniband/hw/nes/Makefile
+++ b/drivers/infiniband/hw/nes/Makefile
@@ -1,3 +1,3 @@
obj-$(CONFIG_INFINIBAND_NES) += iw_nes.o
-iw_nes-objs := nes.o nes_hw.o nes_nic.o nes_utils.o nes_verbs.o nes_cm.o
+iw_nes-objs := nes.o nes_hw.o nes_nic.o nes_utils.o nes_verbs.o nes_cm.o nes_mgt.o
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
index 2d668c69f6d..5965b3df8f2 100644
--- a/drivers/infiniband/hw/nes/nes.c
+++ b/drivers/infiniband/hw/nes/nes.c
@@ -84,7 +84,7 @@ module_param(send_first, int, 0644);
MODULE_PARM_DESC(send_first, "Send RDMA Message First on Active Connection");
-unsigned int nes_drv_opt = 0;
+unsigned int nes_drv_opt = NES_DRV_OPT_DISABLE_INT_MOD | NES_DRV_OPT_ENABLE_PAU;
module_param(nes_drv_opt, int, 0644);
MODULE_PARM_DESC(nes_drv_opt, "Driver option parameters");
@@ -130,9 +130,6 @@ static struct notifier_block nes_net_notifier = {
.notifier_call = nes_net_event
};
-
-
-
/**
* nes_inetaddr_event
*/
@@ -321,6 +318,9 @@ void nes_rem_ref(struct ib_qp *ibqp)
}
if (atomic_dec_and_test(&nesqp->refcount)) {
+ if (nesqp->pau_mode)
+ nes_destroy_pau_qp(nesdev, nesqp);
+
/* Destroy the QP */
cqp_request = nes_get_cqp_request(nesdev);
if (cqp_request == NULL) {
diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
index 6fe79876009..568b4f11380 100644
--- a/drivers/infiniband/hw/nes/nes.h
+++ b/drivers/infiniband/hw/nes/nes.h
@@ -102,6 +102,7 @@
#define NES_DRV_OPT_NO_INLINE_DATA 0x00000080
#define NES_DRV_OPT_DISABLE_INT_MOD 0x00000100
#define NES_DRV_OPT_DISABLE_VIRT_WQ 0x00000200
+#define NES_DRV_OPT_ENABLE_PAU 0x00000400
#define NES_AEQ_EVENT_TIMEOUT 2500
#define NES_DISCONNECT_EVENT_TIMEOUT 2000
@@ -128,6 +129,7 @@
#define NES_DBG_IW_RX 0x00020000
#define NES_DBG_IW_TX 0x00040000
#define NES_DBG_SHUTDOWN 0x00080000
+#define NES_DBG_PAU 0x00100000
#define NES_DBG_RSVD1 0x10000000
#define NES_DBG_RSVD2 0x20000000
#define NES_DBG_RSVD3 0x40000000
@@ -162,6 +164,7 @@ do { \
#include "nes_context.h"
#include "nes_user.h"
#include "nes_cm.h"
+#include "nes_mgt.h"
extern int max_mtu;
#define max_frame_len (max_mtu+ETH_HLEN)
@@ -202,6 +205,8 @@ extern atomic_t cm_nodes_created;
extern atomic_t cm_nodes_destroyed;
extern atomic_t cm_accel_dropped_pkts;
extern atomic_t cm_resets_recvd;
+extern atomic_t pau_qps_created;
+extern atomic_t pau_qps_destroyed;
extern u32 int_mod_timer_init;
extern u32 int_mod_cq_depth_256;
@@ -273,6 +278,14 @@ struct nes_device {
u8 link_recheck;
};
+/* Receive skb private area - must fit in skb->cb area */
+struct nes_rskb_cb {
+ u64 busaddr;
+ u32 maplen;
+ u32 seqnum;
+ u8 *data_start;
+ struct nes_qp *nesqp;
+};
static inline __le32 get_crc_value(struct nes_v4_quad *nes_quad)
{
@@ -305,8 +318,8 @@ set_wqe_32bit_value(__le32 *wqe_words, u32 index, u32 value)
static inline void
nes_fill_init_cqp_wqe(struct nes_hw_cqp_wqe *cqp_wqe, struct nes_device *nesdev)
{
- set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_COMP_CTX_LOW_IDX,
- (u64)((unsigned long) &nesdev->cqp));
+ cqp_wqe->wqe_words[NES_CQP_WQE_COMP_CTX_LOW_IDX] = 0;
+ cqp_wqe->wqe_words[NES_CQP_WQE_COMP_CTX_HIGH_IDX] = 0;
cqp_wqe->wqe_words[NES_CQP_WQE_COMP_SCRATCH_LOW_IDX] = 0;
cqp_wqe->wqe_words[NES_CQP_WQE_COMP_SCRATCH_HIGH_IDX] = 0;
cqp_wqe->wqe_words[NES_CQP_STAG_WQE_PBL_BLK_COUNT_IDX] = 0;
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index c118663e443..dfce9ea98a3 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -77,26 +77,19 @@ atomic_t cm_nodes_destroyed;
atomic_t cm_accel_dropped_pkts;
atomic_t cm_resets_recvd;
-static inline int mini_cm_accelerated(struct nes_cm_core *,
- struct nes_cm_node *);
-static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *,
- struct nes_vnic *, struct nes_cm_info *);
+static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
+static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
static int mini_cm_del_listen(struct nes_cm_core *, struct nes_cm_listener *);
-static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *,
- struct nes_vnic *, u16, void *, struct nes_cm_info *);
+static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *, struct nes_vnic *, u16, void *, struct nes_cm_info *);
static int mini_cm_close(struct nes_cm_core *, struct nes_cm_node *);
-static int mini_cm_accept(struct nes_cm_core *, struct ietf_mpa_frame *,
- struct nes_cm_node *);
-static int mini_cm_reject(struct nes_cm_core *, struct ietf_mpa_frame *,
- struct nes_cm_node *);
-static int mini_cm_recv_pkt(struct nes_cm_core *, struct nes_vnic *,
- struct sk_buff *);
+static int mini_cm_accept(struct nes_cm_core *, struct nes_cm_node *);
+static int mini_cm_reject(struct nes_cm_core *, struct nes_cm_node *);
+static int mini_cm_recv_pkt(struct nes_cm_core *, struct nes_vnic *, struct sk_buff *);
static int mini_cm_dealloc_core(struct nes_cm_core *);
static int mini_cm_get(struct nes_cm_core *);
static int mini_cm_set(struct nes_cm_core *, u32, u32);
-static void form_cm_frame(struct sk_buff *, struct nes_cm_node *,
- void *, u32, void *, u32, u8);
+static void form_cm_frame(struct sk_buff *, struct nes_cm_node *, void *, u32, void *, u32, u8);
static int add_ref_cm_node(struct nes_cm_node *);
static int rem_ref_cm_node(struct nes_cm_core *, struct nes_cm_node *);
@@ -111,16 +104,14 @@ static int send_syn(struct nes_cm_node *, u32, struct sk_buff *);
static int send_reset(struct nes_cm_node *, struct sk_buff *);
static int send_ack(struct nes_cm_node *cm_node, struct sk_buff *skb);
static int send_fin(struct nes_cm_node *cm_node, struct sk_buff *skb);
-static void process_packet(struct nes_cm_node *, struct sk_buff *,
- struct nes_cm_core *);
+static void process_packet(struct nes_cm_node *, struct sk_buff *, struct nes_cm_core *);
static void active_open_err(struct nes_cm_node *, struct sk_buff *, int);
static void passive_open_err(struct nes_cm_node *, struct sk_buff *, int);
static void cleanup_retrans_entry(struct nes_cm_node *);
static void handle_rcv_mpa(struct nes_cm_node *, struct sk_buff *);
static void free_retrans_entry(struct nes_cm_node *cm_node);
-static int handle_tcp_options(struct nes_cm_node *cm_node, struct tcphdr *tcph,
- struct sk_buff *skb, int optionsize, int passive);
+static int handle_tcp_options(struct nes_cm_node *cm_node, struct tcphdr *tcph, struct sk_buff *skb, int optionsize, int passive);
/* CM event handler functions */
static void cm_event_connected(struct nes_cm_event *);
@@ -130,6 +121,12 @@ static void cm_event_mpa_req(struct nes_cm_event *);
static void cm_event_mpa_reject(struct nes_cm_event *);
static void handle_recv_entry(struct nes_cm_node *cm_node, u32 rem_node);
+/* MPA build functions */
+static int cm_build_mpa_frame(struct nes_cm_node *, u8 **, u16 *, u8 *, u8);
+static void build_mpa_v2(struct nes_cm_node *, void *, u8);
+static void build_mpa_v1(struct nes_cm_node *, void *, u8);
+static void build_rdma0_msg(struct nes_cm_node *, struct nes_qp **);
+
static void print_core(struct nes_cm_core *core);
/* External CM API Interface */
@@ -159,12 +156,21 @@ atomic_t cm_connecteds;
atomic_t cm_connect_reqs;
atomic_t cm_rejects;
+int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
+{
+ return add_ref_cm_node(cm_node);
+}
+
+int nes_rem_ref_cm_node(struct nes_cm_node *cm_node)
+{
+ return rem_ref_cm_node(cm_node->cm_core, cm_node);
+}
/**
* create_event
*/
-static struct nes_cm_event *create_event(struct nes_cm_node *cm_node,
- enum nes_cm_event_type type)
+static struct nes_cm_event *create_event(struct nes_cm_node * cm_node,
+ enum nes_cm_event_type type)
{
struct nes_cm_event *event;
@@ -186,10 +192,10 @@ static struct nes_cm_event *create_event(struct nes_cm_node *cm_node,
event->cm_info.cm_id = cm_node->cm_id;
nes_debug(NES_DBG_CM, "cm_node=%p Created event=%p, type=%u, "
- "dst_addr=%08x[%x], src_addr=%08x[%x]\n",
- cm_node, event, type, event->cm_info.loc_addr,
- event->cm_info.loc_port, event->cm_info.rem_addr,
- event->cm_info.rem_port);
+ "dst_addr=%08x[%x], src_addr=%08x[%x]\n",
+ cm_node, event, type, event->cm_info.loc_addr,
+ event->cm_info.loc_port, event->cm_info.rem_addr,
+ event->cm_info.rem_port);
nes_cm_post_event(event);
return event;
@@ -201,14 +207,19 @@ static struct nes_cm_event *create_event(struct nes_cm_node *cm_node,
*/
static int send_mpa_request(struct nes_cm_node *cm_node, struct sk_buff *skb)
{
+ u8 start_addr = 0;
+ u8 *start_ptr = &start_addr;
+ u8 **start_buff = &start_ptr;
+ u16 buff_len = 0;
+
if (!skb) {
nes_debug(NES_DBG_CM, "skb set to NULL\n");
return -1;
}
/* send an MPA Request frame */
- form_cm_frame(skb, cm_node, NULL, 0, &cm_node->mpa_frame,
- cm_node->mpa_frame_size, SET_ACK);
+ cm_build_mpa_frame(cm_node, start_buff, &buff_len, NULL, MPA_KEY_REQUEST);
+ form_cm_frame(skb, cm_node, NULL, 0, *start_buff, buff_len, SET_ACK);
return schedule_nes_timer(cm_node, skb, NES_TIMER_TYPE_SEND, 1, 0);
}
@@ -217,7 +228,11 @@ static int send_mpa_request(struct nes_cm_node *cm_node, struct sk_buff *skb)
static int send_mpa_reject(struct nes_cm_node *cm_node)
{
- struct sk_buff *skb = NULL;
+ struct sk_buff *skb = NULL;
+ u8 start_addr = 0;
+ u8 *start_ptr = &start_addr;
+ u8 **start_buff = &start_ptr;
+ u16 buff_len = 0;
skb = dev_alloc_skb(MAX_CM_BUFFER);
if (!skb) {
@@ -226,8 +241,8 @@ static int send_mpa_reject(struct nes_cm_node *cm_node)
}
/* send an MPA reject frame */
- form_cm_frame(skb, cm_node, NULL, 0, &cm_node->mpa_frame,
- cm_node->mpa_frame_size, SET_ACK | SET_FIN);
+ cm_build_mpa_frame(cm_node, start_buff, &buff_len, NULL, MPA_KEY_REPLY);
+ form_cm_frame(skb, cm_node, NULL, 0, *start_buff, buff_len, SET_ACK | SET_FIN);
cm_node->state = NES_CM_STATE_FIN_WAIT1;
return schedule_nes_timer(cm_node, skb, NES_TIMER_TYPE_SEND, 1, 0);
@@ -239,24 +254,31 @@ static int send_mpa_reject(struct nes_cm_node *cm_node)
* IETF MPA frame
*/
static int parse_mpa(struct nes_cm_node *cm_node, u8 *buffer, u32 *type,
- u32 len)
+ u32 len)
{
- struct ietf_mpa_frame *mpa_frame;
+ struct ietf_mpa_v1 *mpa_frame;
+ struct ietf_mpa_v2 *mpa_v2_frame;
+ struct ietf_rtr_msg *rtr_msg;
+ int mpa_hdr_len;
+ int priv_data_len;
*type = NES_MPA_REQUEST_ACCEPT;
/* assume req frame is in tcp data payload */
- if (len < sizeof(struct ietf_mpa_frame)) {
+ if (len < sizeof(struct ietf_mpa_v1)) {
nes_debug(NES_DBG_CM, "The received ietf buffer was too small (%x)\n", len);
return -EINVAL;
}
- mpa_frame = (struct ietf_mpa_frame *)buffer;
- cm_node->mpa_frame_size = ntohs(mpa_frame->priv_data_len);
+ /* points to the beginning of the frame, which could be MPA V1 or V2 */
+ mpa_frame = (struct ietf_mpa_v1 *)buffer;
+ mpa_hdr_len = sizeof(struct ietf_mpa_v1);
+ priv_data_len = ntohs(mpa_frame->priv_data_len);
+
/* make sure mpa private data len is less than 512 bytes */
- if (cm_node->mpa_frame_size > IETF_MAX_PRIV_DATA_LEN) {
+ if (priv_data_len > IETF_MAX_PRIV_DATA_LEN) {
nes_debug(NES_DBG_CM, "The received Length of Private"
- " Data field exceeds 512 octets\n");
+ " Data field exceeds 512 octets\n");
return -EINVAL;
}
/*
@@ -264,11 +286,22 @@ static int parse_mpa(struct nes_cm_node *cm_node, u8 *buffer, u32 *type,
* received MPA version and MPA key information
*
*/
- if (mpa_frame->rev != mpa_version) {
+ if (mpa_frame->rev != IETF_MPA_V1 && mpa_frame->rev != IETF_MPA_V2) {
+ nes_debug(NES_DBG_CM, "The received mpa version"
+ " is not supported\n");
+ return -EINVAL;
+ }
+ /*
+ * backwards compatibility only
+ */
+ if (mpa_frame->rev > cm_node->mpa_frame_rev) {
nes_debug(NES_DBG_CM, "The received mpa version"
- " can not be interoperated\n");
+ " can not be interoperated\n");
return -EINVAL;
+ } else {
+ cm_node->mpa_frame_rev = mpa_frame->rev;
}
+
if (cm_node->state != NES_CM_STATE_MPAREQ_SENT) {
if (memcmp(mpa_frame->key, IEFT_MPA_KEY_REQ, IETF_MPA_KEY_SIZE)) {
nes_debug(NES_DBG_CM, "Unexpected MPA Key received \n");
@@ -281,25 +314,75 @@ static int parse_mpa(struct nes_cm_node *cm_node, u8 *buffer, u32 *type,
}
}
- if (cm_node->mpa_frame_size + sizeof(struct ietf_mpa_frame) != len) {
+
+ if (priv_data_len + mpa_hdr_len != len) {
nes_debug(NES_DBG_CM, "The received ietf buffer was not right"
- " complete (%x + %x != %x)\n",
- cm_node->mpa_frame_size,
- (u32)sizeof(struct ietf_mpa_frame), len);
+ " complete (%x + %x != %x)\n",
+ priv_data_len, mpa_hdr_len, len);
return -EINVAL;
}
/* make sure it does not exceed the max size */
if (len > MAX_CM_BUFFER) {
nes_debug(NES_DBG_CM, "The received ietf buffer was too large"
- " (%x + %x != %x)\n",
- cm_node->mpa_frame_size,
- (u32)sizeof(struct ietf_mpa_frame), len);
+ " (%x + %x != %x)\n",
+ priv_data_len, mpa_hdr_len, len);
return -EINVAL;
}
+ cm_node->mpa_frame_size = priv_data_len;
+
+ switch (mpa_frame->rev) {
+ case IETF_MPA_V2: {
+ u16 ird_size;
+ u16 ord_size;
+ mpa_v2_frame = (struct ietf_mpa_v2 *)buffer;
+ mpa_hdr_len += IETF_RTR_MSG_SIZE;
+ cm_node->mpa_frame_size -= IETF_RTR_MSG_SIZE;
+ rtr_msg = &mpa_v2_frame->rtr_msg;
+
+ /* parse rtr message */
+ rtr_msg->ctrl_ird = ntohs(rtr_msg->ctrl_ird);
+ rtr_msg->ctrl_ord = ntohs(rtr_msg->ctrl_ord);
+ ird_size = rtr_msg->ctrl_ird & IETF_NO_IRD_ORD;
+ ord_size = rtr_msg->ctrl_ord & IETF_NO_IRD_ORD;
+
+ if (!(rtr_msg->ctrl_ird & IETF_PEER_TO_PEER)) {
+ /* send reset */
+ return -EINVAL;
+ }
+
+ if (cm_node->state != NES_CM_STATE_MPAREQ_SENT) {
+ /* responder */
+ if (cm_node->ord_size > ird_size)
+ cm_node->ord_size = ird_size;
+ } else {
+ /* initiator */
+ if (cm_node->ord_size > ird_size)
+ cm_node->ord_size = ird_size;
+
+ if (cm_node->ird_size < ord_size) {
+ /* no resources available */
+ /* send terminate message */
+ return -EINVAL;
+ }
+ }
+
+ if (rtr_msg->ctrl_ord & IETF_RDMA0_READ) {
+ cm_node->send_rdma0_op = SEND_RDMA_READ_ZERO;
+ } else if (rtr_msg->ctrl_ord & IETF_RDMA0_WRITE) {
+ cm_node->send_rdma0_op = SEND_RDMA_WRITE_ZERO;
+ } else { /* Not supported RDMA0 operation */
+ return -EINVAL;
+ }
+ break;
+ }
+ case IETF_MPA_V1:
+ default:
+ break;
+ }
+
/* copy entire MPA frame to our cm_node's frame */
- memcpy(cm_node->mpa_frame_buf, buffer + sizeof(struct ietf_mpa_frame),
- cm_node->mpa_frame_size);
+ memcpy(cm_node->mpa_frame_buf, buffer + mpa_hdr_len, cm_node->mpa_frame_size);
if (mpa_frame->flags & IETF_MPA_FLAGS_REJECT)
*type = NES_MPA_REQUEST_REJECT;
@@ -312,8 +395,8 @@ static int parse_mpa(struct nes_cm_node *cm_node, u8 *buffer, u32 *type,
* node info to build.
*/
static void form_cm_frame(struct sk_buff *skb,
- struct nes_cm_node *cm_node, void *options, u32 optionsize,
- void *data, u32 datasize, u8 flags)
+ struct nes_cm_node *cm_node, void *options, u32 optionsize,
+ void *data, u32 datasize, u8 flags)
{
struct tcphdr *tcph;
struct iphdr *iph;
@@ -322,14 +405,14 @@ static void form_cm_frame(struct sk_buff *skb,
u16 packetsize = sizeof(*iph);
packetsize += sizeof(*tcph);
- packetsize += optionsize + datasize;
+ packetsize += optionsize + datasize;
+ skb_trim(skb, 0);
memset(skb->data, 0x00, ETH_HLEN + sizeof(*iph) + sizeof(*tcph));
- skb->len = 0;
buf = skb_put(skb, packetsize + ETH_HLEN);
- ethh = (struct ethhdr *) buf;
+ ethh = (struct ethhdr *)buf;
buf += ETH_HLEN;
iph = (struct iphdr *)buf;
@@ -337,7 +420,7 @@ static void form_cm_frame(struct sk_buff *skb,
tcph = (struct tcphdr *)buf;
skb_reset_mac_header(skb);
skb_set_network_header(skb, ETH_HLEN);
- skb_set_transport_header(skb, ETH_HLEN+sizeof(*iph));
+ skb_set_transport_header(skb, ETH_HLEN + sizeof(*iph));
buf += sizeof(*tcph);
skb->ip_summed = CHECKSUM_PARTIAL;
@@ -350,14 +433,14 @@ static void form_cm_frame(struct sk_buff *skb,
ethh->h_proto = htons(0x0800);
iph->version = IPVERSION;
- iph->ihl = 5; /* 5 * 4Byte words, IP headr len */
+ iph->ihl = 5; /* 5 * 4Byte words, IP headr len */
iph->tos = 0;
iph->tot_len = htons(packetsize);
iph->id = htons(++cm_node->tcp_cntxt.loc_id);
iph->frag_off = htons(0x4000);
iph->ttl = 0x40;
- iph->protocol = 0x06; /* IPPROTO_TCP */
+ iph->protocol = 0x06; /* IPPROTO_TCP */
iph->saddr = htonl(cm_node->loc_addr);
iph->daddr = htonl(cm_node->rem_addr);
@@ -370,14 +453,16 @@ static void form_cm_frame(struct sk_buff *skb,
cm_node->tcp_cntxt.loc_ack_num = cm_node->tcp_cntxt.rcv_nxt;
tcph->ack_seq = htonl(cm_node->tcp_cntxt.loc_ack_num);
tcph->ack = 1;
- } else
+ } else {
tcph->ack_seq = 0;
+ }
if (flags & SET_SYN) {
cm_node->tcp_cntxt.loc_seq_num++;
tcph->syn = 1;
- } else
+ } else {
cm_node->tcp_cntxt.loc_seq_num += datasize;
+ }
if (flags & SET_FIN) {
cm_node->tcp_cntxt.loc_seq_num++;
@@ -398,10 +483,8 @@ static void form_cm_frame(struct sk_buff *skb,
skb_shinfo(skb)->nr_frags = 0;
cm_packets_created++;
-
}
-
/**
* print_core - dump a cm core
*/
@@ -413,7 +496,7 @@ static void print_core(struct nes_cm_core *core)
return;
nes_debug(NES_DBG_CM, "---------------------------------------------\n");
- nes_debug(NES_DBG_CM, "State : %u \n", core->state);
+ nes_debug(NES_DBG_CM, "State : %u \n", core->state);
nes_debug(NES_DBG_CM, "Listen Nodes : %u \n", atomic_read(&core->listen_node_cnt));
nes_debug(NES_DBG_CM, "Active Nodes : %u \n", atomic_read(&core->node_cnt));
@@ -423,6 +506,147 @@ static void print_core(struct nes_cm_core *core)
nes_debug(NES_DBG_CM, "-------------- end core ---------------\n");
}
+/**
+ * cm_build_mpa_frame - build a MPA V1 frame or MPA V2 frame
+ */
+static int cm_build_mpa_frame(struct nes_cm_node *cm_node, u8 **start_buff,
+ u16 *buff_len, u8 *pci_mem, u8 mpa_key)
+{
+ int ret = 0;
+
+ *start_buff = (pci_mem) ? pci_mem : &cm_node->mpa_frame_buf[0];
+
+ switch (cm_node->mpa_frame_rev) {
+ case IETF_MPA_V1:
+ *start_buff = (u8 *)*start_buff + sizeof(struct ietf_rtr_msg);
+ *buff_len = sizeof(struct ietf_mpa_v1) + cm_node->mpa_frame_size;
+ build_mpa_v1(cm_node, *start_buff, mpa_key);
+ break;
+ case IETF_MPA_V2:
+ *buff_len = sizeof(struct ietf_mpa_v2) + cm_node->mpa_frame_size;
+ build_mpa_v2(cm_node, *start_buff, mpa_key);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+/**
+ * build_mpa_v2 - build a MPA V2 frame
+ */
+static void build_mpa_v2(struct nes_cm_node *cm_node,
+ void *start_addr, u8 mpa_key)
+{
+ struct ietf_mpa_v2 *mpa_frame = (struct ietf_mpa_v2 *)start_addr;
+ struct ietf_rtr_msg *rtr_msg = &mpa_frame->rtr_msg;
+
+ /* initialize the upper 5 bytes of the frame */
+ build_mpa_v1(cm_node, start_addr, mpa_key);
+ mpa_frame->flags |= IETF_MPA_V2_FLAG; /* set a bit to indicate MPA V2 */
+ mpa_frame->priv_data_len += htons(IETF_RTR_MSG_SIZE);
+
+ /* initialize RTR msg */
+ rtr_msg->ctrl_ird = (cm_node->ird_size > IETF_NO_IRD_ORD) ?
+ IETF_NO_IRD_ORD : cm_node->ird_size;
+ rtr_msg->ctrl_ord = (cm_node->ord_size > IETF_NO_IRD_ORD) ?
+ IETF_NO_IRD_ORD : cm_node->ord_size;
+
+ rtr_msg->ctrl_ird |= IETF_PEER_TO_PEER;
+ rtr_msg->ctrl_ird |= IETF_FLPDU_ZERO_LEN;
+
+ switch (mpa_key) {
+ case MPA_KEY_REQUEST:
+ rtr_msg->ctrl_ord |= IETF_RDMA0_WRITE;
+ rtr_msg->ctrl_ord |= IETF_RDMA0_READ;
+ break;
+ case MPA_KEY_REPLY:
+ switch (cm_node->send_rdma0_op) {
+ case SEND_RDMA_WRITE_ZERO:
+ rtr_msg->ctrl_ord |= IETF_RDMA0_WRITE;
+ break;
+ case SEND_RDMA_READ_ZERO:
+ rtr_msg->ctrl_ord |= IETF_RDMA0_READ;
+ break;
+ }
+ }
+ rtr_msg->ctrl_ird = htons(rtr_msg->ctrl_ird);
+ rtr_msg->ctrl_ord = htons(rtr_msg->ctrl_ord);
+}
+
+/**
+ * build_mpa_v1 - build a MPA V1 frame
+ */
+static void build_mpa_v1(struct nes_cm_node *cm_node, void *start_addr, u8 mpa_key)
+{
+ struct ietf_mpa_v1 *mpa_frame = (struct ietf_mpa_v1 *)start_addr;
+
+ switch (mpa_key) {
+ case MPA_KEY_REQUEST:
+ memcpy(mpa_frame->key, IEFT_MPA_KEY_REQ, IETF_MPA_KEY_SIZE);
+ break;
+ case MPA_KEY_REPLY:
+ memcpy(mpa_frame->key, IEFT_MPA_KEY_REP, IETF_MPA_KEY_SIZE);
+ break;
+ }
+ mpa_frame->flags = IETF_MPA_FLAGS_CRC;
+ mpa_frame->rev = cm_node->mpa_frame_rev;
+ mpa_frame->priv_data_len = htons(cm_node->mpa_frame_size);
+}
+
+static void build_rdma0_msg(struct nes_cm_node *cm_node, struct nes_qp **nesqp_addr)
+{
+ u64 u64temp;
+ struct nes_qp *nesqp = *nesqp_addr;
+ struct nes_hw_qp_wqe *wqe = &nesqp->hwqp.sq_vbase[0];
+
+ u64temp = (unsigned long)nesqp;
+ u64temp |= NES_SW_CONTEXT_ALIGN >> 1;
+ set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_COMP_CTX_LOW_IDX, u64temp);
+
+ wqe->wqe_words[NES_IWARP_SQ_WQE_FRAG0_LOW_IDX] = 0;
+ wqe->wqe_words[NES_IWARP_SQ_WQE_FRAG0_HIGH_IDX] = 0;
+
+ switch (cm_node->send_rdma0_op) {
+ case SEND_RDMA_WRITE_ZERO:
+ nes_debug(NES_DBG_CM, "Sending first write.\n");
+ wqe->wqe_words[NES_IWARP_SQ_WQE_MISC_IDX] =
+ cpu_to_le32(NES_IWARP_SQ_OP_RDMAW);
+ wqe->wqe_words[NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX] = 0;
+ wqe->wqe_words[NES_IWARP_SQ_WQE_LENGTH0_IDX] = 0;
+ wqe->wqe_words[NES_IWARP_SQ_WQE_STAG0_IDX] = 0;
+ break;
+
+ case SEND_RDMA_READ_ZERO:
+ default:
+ if (cm_node->send_rdma0_op != SEND_RDMA_READ_ZERO) {
+ printk(KERN_ERR "%s[%u]: Unsupported RDMA0 len operation=%u\n",
+ __func__, __LINE__, cm_node->send_rdma0_op);
+ WARN_ON(1);
+ }
+ nes_debug(NES_DBG_CM, "Sending first rdma operation.\n");
+ wqe->wqe_words[NES_IWARP_SQ_WQE_MISC_IDX] =
+ cpu_to_le32(NES_IWARP_SQ_OP_RDMAR);
+ wqe->wqe_words[NES_IWARP_SQ_WQE_RDMA_TO_LOW_IDX] = 1;
+ wqe->wqe_words[NES_IWARP_SQ_WQE_RDMA_TO_HIGH_IDX] = 0;
+ wqe->wqe_words[NES_IWARP_SQ_WQE_RDMA_LENGTH_IDX] = 0;
+ wqe->wqe_words[NES_IWARP_SQ_WQE_RDMA_STAG_IDX] = 1;
+ wqe->wqe_words[NES_IWARP_SQ_WQE_STAG0_IDX] = 1;
+ break;
+ }
+
+ if (nesqp->sq_kmapped) {
+ nesqp->sq_kmapped = 0;
+ kunmap(nesqp->page);
+ }
+
+ /*use the reserved spot on the WQ for the extra first WQE*/
+ nesqp->nesqp_context->ird_ord_sizes &= cpu_to_le32(~(NES_QPCONTEXT_ORDIRD_LSMM_PRESENT |
+ NES_QPCONTEXT_ORDIRD_WRPDU |
+ NES_QPCONTEXT_ORDIRD_ALSMM));
+ nesqp->skip_lsmm = 1;
+ nesqp->hwqp.sq_tail = 0;
+}
/**
* schedule_nes_timer
@@ -430,10 +654,10 @@ static void print_core(struct nes_cm_core *core)
* rem_ref_cm_node(cm_core, cm_node);add_ref_cm_node(cm_node);
*/
int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb,
- enum nes_timer_type type, int send_retrans,
- int close_when_complete)
+ enum nes_timer_type type, int send_retrans,
+ int close_when_complete)
{
- unsigned long flags;
+ unsigned long flags;
struct nes_cm_core *cm_core = cm_node->cm_core;
struct nes_timer_entry *new_send;
int ret = 0;
@@ -454,7 +678,7 @@ int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb,
new_send->close_when_complete = close_when_complete;
if (type == NES_TIMER_TYPE_CLOSE) {
- new_send->timetosend += (HZ/10);
+ new_send->timetosend += (HZ / 10);
if (cm_node->recv_entry) {
kfree(new_send);
WARN_ON(1);
@@ -475,7 +699,7 @@ int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb,
ret = nes_nic_cm_xmit(new_send->skb, cm_node->netdev);
if (ret != NETDEV_TX_OK) {
nes_debug(NES_DBG_CM, "Error sending packet %p "
- "(jiffies = %lu)\n", new_send, jiffies);
+ "(jiffies = %lu)\n", new_send, jiffies);
new_send->timetosend = jiffies;
ret = NETDEV_TX_OK;
} else {
@@ -504,6 +728,7 @@ static void nes_retrans_expired(struct nes_cm_node *cm_node)
struct iw_cm_id *cm_id = cm_node->cm_id;
enum nes_cm_node_state state = cm_node->state;
cm_node->state = NES_CM_STATE_CLOSED;
+
switch (state) {
case NES_CM_STATE_SYN_RCVD:
case NES_CM_STATE_CLOSING:
@@ -536,10 +761,10 @@ static void handle_recv_entry(struct nes_cm_node *cm_node, u32 rem_node)
spin_lock_irqsave(&nesqp->lock, qplockflags);
if (nesqp->cm_id) {
nes_debug(NES_DBG_CM, "QP%u: cm_id = %p, "
- "refcount = %d: HIT A "
- "NES_TIMER_TYPE_CLOSE with something "
- "to do!!!\n", nesqp->hwqp.qp_id, cm_id,
- atomic_read(&nesqp->refcount));
+ "refcount = %d: HIT A "
+ "NES_TIMER_TYPE_CLOSE with something "
+ "to do!!!\n", nesqp->hwqp.qp_id, cm_id,
+ atomic_read(&nesqp->refcount));
nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_CLOSED;
nesqp->last_aeq = NES_AEQE_AEID_RESET_SENT;
nesqp->ibqp_state = IB_QPS_ERR;
@@ -548,10 +773,10 @@ static void handle_recv_entry(struct nes_cm_node *cm_node, u32 rem_node)
} else {
spin_unlock_irqrestore(&nesqp->lock, qplockflags);
nes_debug(NES_DBG_CM, "QP%u: cm_id = %p, "
- "refcount = %d: HIT A "
- "NES_TIMER_TYPE_CLOSE with nothing "
- "to do!!!\n", nesqp->hwqp.qp_id, cm_id,
- atomic_read(&nesqp->refcount));
+ "refcount = %d: HIT A "
+ "NES_TIMER_TYPE_CLOSE with nothing "
+ "to do!!!\n", nesqp->hwqp.qp_id, cm_id,
+ atomic_read(&nesqp->refcount));
}
} else if (rem_node) {
/* TIME_WAIT state */
@@ -580,11 +805,12 @@ static void nes_cm_timer_tick(unsigned long pass)
int ret = NETDEV_TX_OK;
struct list_head timer_list;
+
INIT_LIST_HEAD(&timer_list);
spin_lock_irqsave(&cm_core->ht_lock, flags);
list_for_each_safe(list_node, list_core_temp,
- &cm_core->connected_nodes) {
+ &cm_core->connected_nodes) {
cm_node = container_of(list_node, struct nes_cm_node, list);
if ((cm_node->recv_entry) || (cm_node->send_entry)) {
add_ref_cm_node(cm_node);
@@ -595,18 +821,19 @@ static void nes_cm_timer_tick(unsigned long pass)
list_for_each_safe(list_node, list_core_temp, &timer_list) {
cm_node = container_of(list_node, struct nes_cm_node,
- timer_entry);
+ timer_entry);
recv_entry = cm_node->recv_entry;
if (recv_entry) {
if (time_after(recv_entry->timetosend, jiffies)) {
if (nexttimeout > recv_entry->timetosend ||
- !settimer) {
+ !settimer) {
nexttimeout = recv_entry->timetosend;
settimer = 1;
}
- } else
+ } else {
handle_recv_entry(cm_node, 1);
+ }
}
spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
@@ -617,8 +844,8 @@ static void nes_cm_timer_tick(unsigned long pass)
if (time_after(send_entry->timetosend, jiffies)) {
if (cm_node->state != NES_CM_STATE_TSA) {
if ((nexttimeout >
- send_entry->timetosend) ||
- !settimer) {
+ send_entry->timetosend) ||
+ !settimer) {
nexttimeout =
send_entry->timetosend;
settimer = 1;
@@ -630,13 +857,13 @@ static void nes_cm_timer_tick(unsigned long pass)
}
if ((cm_node->state == NES_CM_STATE_TSA) ||
- (cm_node->state == NES_CM_STATE_CLOSED)) {
+ (cm_node->state == NES_CM_STATE_CLOSED)) {
free_retrans_entry(cm_node);
break;
}
if (!send_entry->retranscount ||
- !send_entry->retrycount) {
+ !send_entry->retrycount) {
cm_packets_dropped++;
free_retrans_entry(cm_node);
@@ -645,28 +872,28 @@ static void nes_cm_timer_tick(unsigned long pass)
nes_retrans_expired(cm_node);
cm_node->state = NES_CM_STATE_CLOSED;
spin_lock_irqsave(&cm_node->retrans_list_lock,
- flags);
+ flags);
break;
}
atomic_inc(&send_entry->skb->users);
cm_packets_retrans++;
nes_debug(NES_DBG_CM, "Retransmitting send_entry %p "
- "for node %p, jiffies = %lu, time to send = "
- "%lu, retranscount = %u, send_entry->seq_num = "
- "0x%08X, cm_node->tcp_cntxt.rem_ack_num = "
- "0x%08X\n", send_entry, cm_node, jiffies,
- send_entry->timetosend,
- send_entry->retranscount,
- send_entry->seq_num,
- cm_node->tcp_cntxt.rem_ack_num);
+ "for node %p, jiffies = %lu, time to send = "
+ "%lu, retranscount = %u, send_entry->seq_num = "
+ "0x%08X, cm_node->tcp_cntxt.rem_ack_num = "
+ "0x%08X\n", send_entry, cm_node, jiffies,
+ send_entry->timetosend,
+ send_entry->retranscount,
+ send_entry->seq_num,
+ cm_node->tcp_cntxt.rem_ack_num);
spin_unlock_irqrestore(&cm_node->retrans_list_lock,
- flags);
+ flags);
ret = nes_nic_cm_xmit(send_entry->skb, cm_node->netdev);
spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
if (ret != NETDEV_TX_OK) {
nes_debug(NES_DBG_CM, "rexmit failed for "
- "node=%p\n", cm_node);
+ "node=%p\n", cm_node);
cm_packets_bounced++;
send_entry->retrycount--;
nexttimeout = jiffies + NES_SHORT_TIME;
@@ -676,18 +903,18 @@ static void nes_cm_timer_tick(unsigned long pass)
cm_packets_sent++;
}
nes_debug(NES_DBG_CM, "Packet Sent: retrans count = "
- "%u, retry count = %u.\n",
- send_entry->retranscount,
- send_entry->retrycount);
+ "%u, retry count = %u.\n",
+ send_entry->retranscount,
+ send_entry->retrycount);
if (send_entry->send_retrans) {
send_entry->retranscount--;
timetosend = (NES_RETRY_TIMEOUT <<
- (NES_DEFAULT_RETRANS - send_entry->retranscount));
+ (NES_DEFAULT_RETRANS - send_entry->retranscount));
send_entry->timetosend = jiffies +
- min(timetosend, NES_MAX_TIMEOUT);
+ min(timetosend, NES_MAX_TIMEOUT);
if (nexttimeout > send_entry->timetosend ||
- !settimer) {
+ !settimer) {
nexttimeout = send_entry->timetosend;
settimer = 1;
}
@@ -696,11 +923,11 @@ static void nes_cm_timer_tick(unsigned long pass)
close_when_complete =
send_entry->close_when_complete;
nes_debug(NES_DBG_CM, "cm_node=%p state=%d\n",
- cm_node, cm_node->state);
+ cm_node, cm_node->state);
free_retrans_entry(cm_node);
if (close_when_complete)
rem_ref_cm_node(cm_node->cm_core,
- cm_node);
+ cm_node);
}
} while (0);
@@ -710,7 +937,7 @@ static void nes_cm_timer_tick(unsigned long pass)
if (settimer) {
if (!timer_pending(&cm_core->tcp_timer)) {
- cm_core->tcp_timer.expires = nexttimeout;
+ cm_core->tcp_timer.expires = nexttimeout;
add_timer(&cm_core->tcp_timer);
}
}
@@ -721,13 +948,13 @@ static void nes_cm_timer_tick(unsigned long pass)
* send_syn
*/
static int send_syn(struct nes_cm_node *cm_node, u32 sendack,
- struct sk_buff *skb)
+ struct sk_buff *skb)
{
int ret;
int flags = SET_SYN;
char optionsbuffer[sizeof(struct option_mss) +
- sizeof(struct option_windowscale) + sizeof(struct option_base) +
- TCP_OPTIONS_PADDING];
+ sizeof(struct option_windowscale) + sizeof(struct option_base) +
+ TCP_OPTIONS_PADDING];
int optionssize = 0;
/* Sending MSS option */
@@ -854,7 +1081,7 @@ static int send_fin(struct nes_cm_node *cm_node, struct sk_buff *skb)
* find_node - find a cm node that matches the reference cm node
*/
static struct nes_cm_node *find_node(struct nes_cm_core *cm_core,
- u16 rem_port, nes_addr_t rem_addr, u16 loc_port, nes_addr_t loc_addr)
+ u16 rem_port, nes_addr_t rem_addr, u16 loc_port, nes_addr_t loc_addr)
{
unsigned long flags;
struct list_head *hte;
@@ -868,12 +1095,12 @@ static struct nes_cm_node *find_node(struct nes_cm_core *cm_core,
list_for_each_entry(cm_node, hte, list) {
/* compare quad, return node handle if a match */
nes_debug(NES_DBG_CM, "finding node %x:%x =? %x:%x ^ %x:%x =? %x:%x\n",
- cm_node->loc_addr, cm_node->loc_port,
- loc_addr, loc_port,
- cm_node->rem_addr, cm_node->rem_port,
- rem_addr, rem_port);
+ cm_node->loc_addr, cm_node->loc_port,
+ loc_addr, loc_port,
+ cm_node->rem_addr, cm_node->rem_port,
+ rem_addr, rem_port);
if ((cm_node->loc_addr == loc_addr) && (cm_node->loc_port == loc_port) &&
- (cm_node->rem_addr == rem_addr) && (cm_node->rem_port == rem_port)) {
+ (cm_node->rem_addr == rem_addr) && (cm_node->rem_port == rem_port)) {
add_ref_cm_node(cm_node);
spin_unlock_irqrestore(&cm_core->ht_lock, flags);
return cm_node;
@@ -890,7 +1117,7 @@ static struct nes_cm_node *find_node(struct nes_cm_core *cm_core,
* find_listener - find a cm node listening on this addr-port pair
*/
static struct nes_cm_listener *find_listener(struct nes_cm_core *cm_core,
- nes_addr_t dst_addr, u16 dst_port, enum nes_cm_listener_state listener_state)
+ nes_addr_t dst_addr, u16 dst_port, enum nes_cm_listener_state listener_state)
{
unsigned long flags;
struct nes_cm_listener *listen_node;
@@ -900,9 +1127,9 @@ static struct nes_cm_listener *find_listener(struct nes_cm_core *cm_core,
list_for_each_entry(listen_node, &cm_core->listen_list.list, list) {
/* compare node pair, return node handle if a match */
if (((listen_node->loc_addr == dst_addr) ||
- listen_node->loc_addr == 0x00000000) &&
- (listen_node->loc_port == dst_port) &&
- (listener_state & listen_node->listener_state)) {
+ listen_node->loc_addr == 0x00000000) &&
+ (listen_node->loc_port == dst_port) &&
+ (listener_state & listen_node->listener_state)) {
atomic_inc(&listen_node->ref_count);
spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
return listen_node;
@@ -927,7 +1154,7 @@ static int add_hte_node(struct nes_cm_core *cm_core, struct nes_cm_node *cm_node
return -EINVAL;
nes_debug(NES_DBG_CM, "Adding Node %p to Active Connection HT\n",
- cm_node);
+ cm_node);
spin_lock_irqsave(&cm_core->ht_lock, flags);
@@ -946,7 +1173,7 @@ static int add_hte_node(struct nes_cm_core *cm_core, struct nes_cm_node *cm_node
* mini_cm_dec_refcnt_listen
*/
static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
- struct nes_cm_listener *listener, int free_hanging_nodes)
+ struct nes_cm_listener *listener, int free_hanging_nodes)
{
int ret = -EINVAL;
int err = 0;
@@ -957,8 +1184,8 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
struct list_head reset_list;
nes_debug(NES_DBG_CM, "attempting listener= %p free_nodes= %d, "
- "refcnt=%d\n", listener, free_hanging_nodes,
- atomic_read(&listener->ref_count));
+ "refcnt=%d\n", listener, free_hanging_nodes,
+ atomic_read(&listener->ref_count));
/* free non-accelerated child nodes for this listener */
INIT_LIST_HEAD(&reset_list);
if (free_hanging_nodes) {
@@ -966,7 +1193,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
list_for_each_safe(list_pos, list_temp,
&g_cm_core->connected_nodes) {
cm_node = container_of(list_pos, struct nes_cm_node,
- list);
+ list);
if ((cm_node->listener == listener) &&
(!cm_node->accelerated)) {
add_ref_cm_node(cm_node);
@@ -978,7 +1205,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
list_for_each_safe(list_pos, list_temp, &reset_list) {
cm_node = container_of(list_pos, struct nes_cm_node,
- reset_entry);
+ reset_entry);
{
struct nes_cm_node *loopback = cm_node->loopbackpartner;
enum nes_cm_node_state old_state;
@@ -990,7 +1217,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
err = send_reset(cm_node, NULL);
if (err) {
cm_node->state =
- NES_CM_STATE_CLOSED;
+ NES_CM_STATE_CLOSED;
WARN_ON(1);
} else {
old_state = cm_node->state;
@@ -1035,10 +1262,9 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
- if (listener->nesvnic) {
+ if (listener->nesvnic)
nes_manage_apbvt(listener->nesvnic, listener->loc_port,
- PCI_FUNC(listener->nesvnic->nesdev->pcidev->devfn), NES_MANAGE_APBVT_DEL);
- }
+ PCI_FUNC(listener->nesvnic->nesdev->pcidev->devfn), NES_MANAGE_APBVT_DEL);
nes_debug(NES_DBG_CM, "destroying listener (%p)\n", listener);
@@ -1052,8 +1278,8 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
if (listener) {
if (atomic_read(&listener->pend_accepts_cnt) > 0)
nes_debug(NES_DBG_CM, "destroying listener (%p)"
- " with non-zero pending accepts=%u\n",
- listener, atomic_read(&listener->pend_accepts_cnt));
+ " with non-zero pending accepts=%u\n",
+ listener, atomic_read(&listener->pend_accepts_cnt));
}
return ret;
@@ -1064,7 +1290,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
* mini_cm_del_listen
*/
static int mini_cm_del_listen(struct nes_cm_core *cm_core,
- struct nes_cm_listener *listener)
+ struct nes_cm_listener *listener)
{
listener->listener_state = NES_CM_LISTENER_PASSIVE_STATE;
listener->cm_id = NULL; /* going to be destroyed pretty soon */
@@ -1076,9 +1302,10 @@ static int mini_cm_del_listen(struct nes_cm_core *cm_core,
* mini_cm_accelerated
*/
static inline int mini_cm_accelerated(struct nes_cm_core *cm_core,
- struct nes_cm_node *cm_node)
+ struct nes_cm_node *cm_node)
{
u32 was_timer_set;
+
cm_node->accelerated = 1;
if (cm_node->accept_pend) {
@@ -1112,7 +1339,7 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
rt = ip_route_output(&init_net, htonl(dst_ip), 0, 0, 0);
if (IS_ERR(rt)) {
printk(KERN_ERR "%s: ip_route_output_key failed for 0x%08X\n",
- __func__, dst_ip);
+ __func__, dst_ip);
return rc;
}
@@ -1130,7 +1357,7 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
if (arpindex >= 0) {
if (!memcmp(nesadapter->arp_table[arpindex].mac_addr,
- neigh->ha, ETH_ALEN)){
+ neigh->ha, ETH_ALEN)) {
/* Mac address same as in nes_arp_table */
neigh_release(neigh);
ip_rt_put(rt);
@@ -1138,8 +1365,8 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
}
nes_manage_arp_cache(nesvnic->netdev,
- nesadapter->arp_table[arpindex].mac_addr,
- dst_ip, NES_ARP_DELETE);
+ nesadapter->arp_table[arpindex].mac_addr,
+ dst_ip, NES_ARP_DELETE);
}
nes_manage_arp_cache(nesvnic->netdev, neigh->ha,
@@ -1161,8 +1388,8 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
* make_cm_node - create a new instance of a cm node
*/
static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
- struct nes_vnic *nesvnic, struct nes_cm_info *cm_info,
- struct nes_cm_listener *listener)
+ struct nes_vnic *nesvnic, struct nes_cm_info *cm_info,
+ struct nes_cm_listener *listener)
{
struct nes_cm_node *cm_node;
struct timespec ts;
@@ -1181,7 +1408,12 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
cm_node->rem_addr = cm_info->rem_addr;
cm_node->loc_port = cm_info->loc_port;
cm_node->rem_port = cm_info->rem_port;
- cm_node->send_write0 = send_first;
+
+ cm_node->mpa_frame_rev = mpa_version;
+ cm_node->send_rdma0_op = SEND_RDMA_READ_ZERO;
+ cm_node->ird_size = IETF_NO_IRD_ORD;
+ cm_node->ord_size = IETF_NO_IRD_ORD;
+
nes_debug(NES_DBG_CM, "Make node addresses : loc = %pI4:%x, rem = %pI4:%x\n",
&cm_node->loc_addr, cm_node->loc_port,
&cm_node->rem_addr, cm_node->rem_port);
@@ -1191,7 +1423,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
memcpy(cm_node->loc_mac, nesvnic->netdev->dev_addr, ETH_ALEN);
nes_debug(NES_DBG_CM, "listener=%p, cm_id=%p\n", cm_node->listener,
- cm_node->cm_id);
+ cm_node->cm_id);
spin_lock_init(&cm_node->retrans_list_lock);
@@ -1202,11 +1434,11 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
cm_node->tcp_cntxt.loc_id = NES_CM_DEF_LOCAL_ID;
cm_node->tcp_cntxt.rcv_wscale = NES_CM_DEFAULT_RCV_WND_SCALE;
cm_node->tcp_cntxt.rcv_wnd = NES_CM_DEFAULT_RCV_WND_SCALED >>
- NES_CM_DEFAULT_RCV_WND_SCALE;
+ NES_CM_DEFAULT_RCV_WND_SCALE;
ts = current_kernel_time();
cm_node->tcp_cntxt.loc_seq_num = htonl(ts.tv_nsec);
cm_node->tcp_cntxt.mss = nesvnic->max_frame_size - sizeof(struct iphdr) -
- sizeof(struct tcphdr) - ETH_HLEN - VLAN_HLEN;
+ sizeof(struct tcphdr) - ETH_HLEN - VLAN_HLEN;
cm_node->tcp_cntxt.rcv_nxt = 0;
/* get a unique session ID , add thread_id to an upcounter to handle race */
atomic_inc(&cm_core->node_cnt);
@@ -1222,12 +1454,11 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
cm_node->loopbackpartner = NULL;
/* get the mac addr for the remote node */
- if (ipv4_is_loopback(htonl(cm_node->rem_addr)))
+ if (ipv4_is_loopback(htonl(cm_node->rem_addr))) {
arpindex = nes_arp_table(nesdev, ntohl(nesvnic->local_ipaddr), NULL, NES_ARP_RESOLVE);
- else {
+ } else {
oldarpindex = nes_arp_table(nesdev, cm_node->rem_addr, NULL, NES_ARP_RESOLVE);
arpindex = nes_addr_resolve_neigh(nesvnic, cm_info->rem_addr, oldarpindex);
-
}
if (arpindex < 0) {
kfree(cm_node);
@@ -1260,7 +1491,7 @@ static int add_ref_cm_node(struct nes_cm_node *cm_node)
* rem_ref_cm_node - destroy an instance of a cm node
*/
static int rem_ref_cm_node(struct nes_cm_core *cm_core,
- struct nes_cm_node *cm_node)
+ struct nes_cm_node *cm_node)
{
unsigned long flags;
struct nes_qp *nesqp;
@@ -1291,9 +1522,9 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
} else {
if (cm_node->apbvt_set && cm_node->nesvnic) {
nes_manage_apbvt(cm_node->nesvnic, cm_node->loc_port,
- PCI_FUNC(
- cm_node->nesvnic->nesdev->pcidev->devfn),
- NES_MANAGE_APBVT_DEL);
+ PCI_FUNC(
+ cm_node->nesvnic->nesdev->pcidev->devfn),
+ NES_MANAGE_APBVT_DEL);
}
}
@@ -1314,7 +1545,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
* process_options
*/
static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
- u32 optionsize, u32 syn_packet)
+ u32 optionsize, u32 syn_packet)
{
u32 tmp;
u32 offset = 0;
@@ -1332,15 +1563,15 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
continue;
case OPTION_NUMBER_MSS:
nes_debug(NES_DBG_CM, "%s: MSS Length: %d Offset: %d "
- "Size: %d\n", __func__,
- all_options->as_mss.length, offset, optionsize);
+ "Size: %d\n", __func__,
+ all_options->as_mss.length, offset, optionsize);
got_mss_option = 1;
if (all_options->as_mss.length != 4) {
return 1;
} else {
tmp = ntohs(all_options->as_mss.mss);
if (tmp > 0 && tmp <
- cm_node->tcp_cntxt.mss)
+ cm_node->tcp_cntxt.mss)
cm_node->tcp_cntxt.mss = tmp;
}
break;
@@ -1348,12 +1579,9 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
cm_node->tcp_cntxt.snd_wscale =
all_options->as_windowscale.shiftcount;
break;
- case OPTION_NUMBER_WRITE0:
- cm_node->send_write0 = 1;
- break;
default:
nes_debug(NES_DBG_CM, "TCP Option not understood: %x\n",
- all_options->as_base.optionnum);
+ all_options->as_base.optionnum);
break;
}
offset += all_options->as_base.length;
@@ -1372,8 +1600,8 @@ static void drop_packet(struct sk_buff *skb)
static void handle_fin_pkt(struct nes_cm_node *cm_node)
{
nes_debug(NES_DBG_CM, "Received FIN, cm_node = %p, state = %u. "
- "refcnt=%d\n", cm_node, cm_node->state,
- atomic_read(&cm_node->ref_count));
+ "refcnt=%d\n", cm_node, cm_node->state,
+ atomic_read(&cm_node->ref_count));
switch (cm_node->state) {
case NES_CM_STATE_SYN_RCVD:
case NES_CM_STATE_SYN_SENT:
@@ -1439,7 +1667,20 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
nes_debug(NES_DBG_CM, "%s[%u] create abort for cm_node=%p "
"listener=%p state=%d\n", __func__, __LINE__, cm_node,
cm_node->listener, cm_node->state);
- active_open_err(cm_node, skb, reset);
+ switch (cm_node->mpa_frame_rev) {
+ case IETF_MPA_V2:
+ cm_node->mpa_frame_rev = IETF_MPA_V1;
+ /* send a syn and goto syn sent state */
+ cm_node->state = NES_CM_STATE_SYN_SENT;
+ if (send_syn(cm_node, 0, NULL)) {
+ active_open_err(cm_node, skb, reset);
+ }
+ break;
+ case IETF_MPA_V1:
+ default:
+ active_open_err(cm_node, skb, reset);
+ break;
+ }
break;
case NES_CM_STATE_MPAREQ_RCVD:
atomic_inc(&cm_node->passive_state);
@@ -1475,21 +1716,21 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
static void handle_rcv_mpa(struct nes_cm_node *cm_node, struct sk_buff *skb)
{
-
- int ret = 0;
+ int ret = 0;
int datasize = skb->len;
u8 *dataloc = skb->data;
enum nes_cm_event_type type = NES_CM_EVENT_UNKNOWN;
- u32 res_type;
+ u32 res_type;
+
ret = parse_mpa(cm_node, dataloc, &res_type, datasize);
if (ret) {
nes_debug(NES_DBG_CM, "didn't like MPA Request\n");
if (cm_node->state == NES_CM_STATE_MPAREQ_SENT) {
nes_debug(NES_DBG_CM, "%s[%u] create abort for "
- "cm_node=%p listener=%p state=%d\n", __func__,
- __LINE__, cm_node, cm_node->listener,
- cm_node->state);
+ "cm_node=%p listener=%p state=%d\n", __func__,
+ __LINE__, cm_node, cm_node->listener,
+ cm_node->state);
active_open_err(cm_node, skb, 1);
} else {
passive_open_err(cm_node, skb, 1);
@@ -1499,16 +1740,15 @@ static void handle_rcv_mpa(struct nes_cm_node *cm_node, struct sk_buff *skb)
switch (cm_node->state) {
case NES_CM_STATE_ESTABLISHED:
- if (res_type == NES_MPA_REQUEST_REJECT) {
+ if (res_type == NES_MPA_REQUEST_REJECT)
/*BIG problem as we are receiving the MPA.. So should
- * not be REJECT.. This is Passive Open.. We can
- * only receive it Reject for Active Open...*/
+ * not be REJECT.. This is Passive Open.. We can
+ * only receive it Reject for Active Open...*/
WARN_ON(1);
- }
cm_node->state = NES_CM_STATE_MPAREQ_RCVD;
type = NES_CM_EVENT_MPA_REQ;
atomic_set(&cm_node->passive_state,
- NES_PASSIVE_STATE_INDICATED);
+ NES_PASSIVE_STATE_INDICATED);
break;
case NES_CM_STATE_MPAREQ_SENT:
cleanup_retrans_entry(cm_node);
@@ -1535,8 +1775,8 @@ static void indicate_pkt_err(struct nes_cm_node *cm_node, struct sk_buff *skb)
case NES_CM_STATE_SYN_SENT:
case NES_CM_STATE_MPAREQ_SENT:
nes_debug(NES_DBG_CM, "%s[%u] create abort for cm_node=%p "
- "listener=%p state=%d\n", __func__, __LINE__, cm_node,
- cm_node->listener, cm_node->state);
+ "listener=%p state=%d\n", __func__, __LINE__, cm_node,
+ cm_node->listener, cm_node->state);
active_open_err(cm_node, skb, 1);
break;
case NES_CM_STATE_ESTABLISHED:
@@ -1550,11 +1790,11 @@ static void indicate_pkt_err(struct nes_cm_node *cm_node, struct sk_buff *skb)
}
static int check_syn(struct nes_cm_node *cm_node, struct tcphdr *tcph,
- struct sk_buff *skb)
+ struct sk_buff *skb)
{
int err;
- err = ((ntohl(tcph->ack_seq) == cm_node->tcp_cntxt.loc_seq_num))? 0 : 1;
+ err = ((ntohl(tcph->ack_seq) == cm_node->tcp_cntxt.loc_seq_num)) ? 0 : 1;
if (err)
active_open_err(cm_node, skb, 1);
@@ -1562,7 +1802,7 @@ static int check_syn(struct nes_cm_node *cm_node, struct tcphdr *tcph,
}
static int check_seq(struct nes_cm_node *cm_node, struct tcphdr *tcph,
- struct sk_buff *skb)
+ struct sk_buff *skb)
{
int err = 0;
u32 seq;
@@ -1570,21 +1810,22 @@ static int check_seq(struct nes_cm_node *cm_node, struct tcphdr *tcph,
u32 loc_seq_num = cm_node->tcp_cntxt.loc_seq_num;
u32 rcv_nxt = cm_node->tcp_cntxt.rcv_nxt;
u32 rcv_wnd;
+
seq = ntohl(tcph->seq);
ack_seq = ntohl(tcph->ack_seq);
rcv_wnd = cm_node->tcp_cntxt.rcv_wnd;
if (ack_seq != loc_seq_num)
err = 1;
- else if (!between(seq, rcv_nxt, (rcv_nxt+rcv_wnd)))
+ else if (!between(seq, rcv_nxt, (rcv_nxt + rcv_wnd)))
err = 1;
if (err) {
nes_debug(NES_DBG_CM, "%s[%u] create abort for cm_node=%p "
- "listener=%p state=%d\n", __func__, __LINE__, cm_node,
- cm_node->listener, cm_node->state);
+ "listener=%p state=%d\n", __func__, __LINE__, cm_node,
+ cm_node->listener, cm_node->state);
indicate_pkt_err(cm_node, skb);
nes_debug(NES_DBG_CM, "seq ERROR cm_node =%p seq=0x%08X "
- "rcv_nxt=0x%08X rcv_wnd=0x%x\n", cm_node, seq, rcv_nxt,
- rcv_wnd);
+ "rcv_nxt=0x%08X rcv_wnd=0x%x\n", cm_node, seq, rcv_nxt,
+ rcv_wnd);
}
return err;
}
@@ -1594,9 +1835,8 @@ static int check_seq(struct nes_cm_node *cm_node, struct tcphdr *tcph,
* is created with a listener or it may comein as rexmitted packet which in
* that case will be just dropped.
*/
-
static void handle_syn_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
- struct tcphdr *tcph)
+ struct tcphdr *tcph)
{
int ret;
u32 inc_sequence;
@@ -1615,15 +1855,15 @@ static void handle_syn_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
case NES_CM_STATE_LISTENING:
/* Passive OPEN */
if (atomic_read(&cm_node->listener->pend_accepts_cnt) >
- cm_node->listener->backlog) {
+ cm_node->listener->backlog) {
nes_debug(NES_DBG_CM, "drop syn due to backlog "
- "pressure \n");
+ "pressure \n");
cm_backlog_drops++;
passive_open_err(cm_node, skb, 0);
break;
}
ret = handle_tcp_options(cm_node, tcph, skb, optionsize,
- 1);
+ 1);
if (ret) {
passive_open_err(cm_node, skb, 0);
/* drop pkt */
@@ -1657,9 +1897,8 @@ static void handle_syn_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
}
static void handle_synack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
- struct tcphdr *tcph)
+ struct tcphdr *tcph)
{
-
int ret;
u32 inc_sequence;
int optionsize;
@@ -1678,7 +1917,7 @@ static void handle_synack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
ret = handle_tcp_options(cm_node, tcph, skb, optionsize, 0);
if (ret) {
nes_debug(NES_DBG_CM, "cm_node=%p tcp_options failed\n",
- cm_node);
+ cm_node);
break;
}
cleanup_retrans_entry(cm_node);
@@ -1717,12 +1956,13 @@ static void handle_synack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
}
static int handle_ack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
- struct tcphdr *tcph)
+ struct tcphdr *tcph)
{
int datasize = 0;
u32 inc_sequence;
int ret = 0;
int optionsize;
+
optionsize = (tcph->doff << 2) - sizeof(struct tcphdr);
if (check_seq(cm_node, tcph, skb))
@@ -1743,8 +1983,9 @@ static int handle_ack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
if (datasize) {
cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
handle_rcv_mpa(cm_node, skb);
- } else /* rcvd ACK only */
+ } else { /* rcvd ACK only */
dev_kfree_skb_any(skb);
+ }
break;
case NES_CM_STATE_ESTABLISHED:
/* Passive OPEN */
@@ -1752,16 +1993,18 @@ static int handle_ack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
if (datasize) {
cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
handle_rcv_mpa(cm_node, skb);
- } else
+ } else {
drop_packet(skb);
+ }
break;
case NES_CM_STATE_MPAREQ_SENT:
cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq);
if (datasize) {
cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
handle_rcv_mpa(cm_node, skb);
- } else /* Could be just an ack pkt.. */
+ } else { /* Could be just an ack pkt.. */
dev_kfree_skb_any(skb);
+ }
break;
case NES_CM_STATE_LISTENING:
cleanup_retrans_entry(cm_node);
@@ -1802,14 +2045,15 @@ static int handle_ack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
static int handle_tcp_options(struct nes_cm_node *cm_node, struct tcphdr *tcph,
- struct sk_buff *skb, int optionsize, int passive)
+ struct sk_buff *skb, int optionsize, int passive)
{
u8 *optionsloc = (u8 *)&tcph[1];
+
if (optionsize) {
if (process_options(cm_node, optionsloc, optionsize,
- (u32)tcph->syn)) {
+ (u32)tcph->syn)) {
nes_debug(NES_DBG_CM, "%s: Node %p, Sending RESET\n",
- __func__, cm_node);
+ __func__, cm_node);
if (passive)
passive_open_err(cm_node, skb, 1);
else
@@ -1819,7 +2063,7 @@ static int handle_tcp_options(struct nes_cm_node *cm_node, struct tcphdr *tcph,
}
cm_node->tcp_cntxt.snd_wnd = ntohs(tcph->window) <<
- cm_node->tcp_cntxt.snd_wscale;
+ cm_node->tcp_cntxt.snd_wscale;
if (cm_node->tcp_cntxt.snd_wnd > cm_node->tcp_cntxt.max_snd_wnd)
cm_node->tcp_cntxt.max_snd_wnd = cm_node->tcp_cntxt.snd_wnd;
@@ -1830,18 +2074,18 @@ static int handle_tcp_options(struct nes_cm_node *cm_node, struct tcphdr *tcph,
* active_open_err() will send reset() if flag set..
* It will also send ABORT event.
*/
-
static void active_open_err(struct nes_cm_node *cm_node, struct sk_buff *skb,
- int reset)
+ int reset)
{
cleanup_retrans_entry(cm_node);
if (reset) {
nes_debug(NES_DBG_CM, "ERROR active err called for cm_node=%p, "
- "state=%d\n", cm_node, cm_node->state);
+ "state=%d\n", cm_node, cm_node->state);
add_ref_cm_node(cm_node);
send_reset(cm_node, skb);
- } else
+ } else {
dev_kfree_skb_any(skb);
+ }
cm_node->state = NES_CM_STATE_CLOSED;
create_event(cm_node, NES_CM_EVENT_ABORTED);
@@ -1851,15 +2095,14 @@ static void active_open_err(struct nes_cm_node *cm_node, struct sk_buff *skb,
* passive_open_err() will either do a reset() or will free up the skb and
* remove the cm_node.
*/
-
static void passive_open_err(struct nes_cm_node *cm_node, struct sk_buff *skb,
- int reset)
+ int reset)
{
cleanup_retrans_entry(cm_node);
cm_node->state = NES_CM_STATE_CLOSED;
if (reset) {
nes_debug(NES_DBG_CM, "passive_open_err sending RST for "
- "cm_node=%p state =%d\n", cm_node, cm_node->state);
+ "cm_node=%p state =%d\n", cm_node, cm_node->state);
send_reset(cm_node, skb);
} else {
dev_kfree_skb_any(skb);
@@ -1874,6 +2117,7 @@ static void passive_open_err(struct nes_cm_node *cm_node, struct sk_buff *skb,
static void free_retrans_entry(struct nes_cm_node *cm_node)
{
struct nes_timer_entry *send_entry;
+
send_entry = cm_node->send_entry;
if (send_entry) {
cm_node->send_entry = NULL;
@@ -1897,26 +2141,28 @@ static void cleanup_retrans_entry(struct nes_cm_node *cm_node)
* Returns skb if to be freed, else it will return NULL if already used..
*/
static void process_packet(struct nes_cm_node *cm_node, struct sk_buff *skb,
- struct nes_cm_core *cm_core)
+ struct nes_cm_core *cm_core)
{
- enum nes_tcpip_pkt_type pkt_type = NES_PKT_TYPE_UNKNOWN;
+ enum nes_tcpip_pkt_type pkt_type = NES_PKT_TYPE_UNKNOWN;
struct tcphdr *tcph = tcp_hdr(skb);
- u32 fin_set = 0;
+ u32 fin_set = 0;
int ret = 0;
+
skb_pull(skb, ip_hdr(skb)->ihl << 2);
nes_debug(NES_DBG_CM, "process_packet: cm_node=%p state =%d syn=%d "
- "ack=%d rst=%d fin=%d\n", cm_node, cm_node->state, tcph->syn,
- tcph->ack, tcph->rst, tcph->fin);
+ "ack=%d rst=%d fin=%d\n", cm_node, cm_node->state, tcph->syn,
+ tcph->ack, tcph->rst, tcph->fin);
- if (tcph->rst)
+ if (tcph->rst) {
pkt_type = NES_PKT_TYPE_RST;
- else if (tcph->syn) {
+ } else if (tcph->syn) {
pkt_type = NES_PKT_TYPE_SYN;
if (tcph->ack)
pkt_type = NES_PKT_TYPE_SYNACK;
- } else if (tcph->ack)
+ } else if (tcph->ack) {
pkt_type = NES_PKT_TYPE_ACK;
+ }
if (tcph->fin)
fin_set = 1;
@@ -1947,17 +2193,17 @@ static void process_packet(struct nes_cm_node *cm_node, struct sk_buff *skb,
* mini_cm_listen - create a listen node with params
*/
static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *cm_core,
- struct nes_vnic *nesvnic, struct nes_cm_info *cm_info)
+ struct nes_vnic *nesvnic, struct nes_cm_info *cm_info)
{
struct nes_cm_listener *listener;
unsigned long flags;
nes_debug(NES_DBG_CM, "Search for 0x%08x : 0x%04x\n",
- cm_info->loc_addr, cm_info->loc_port);
+ cm_info->loc_addr, cm_info->loc_port);
/* cannot have multiple matching listeners */
listener = find_listener(cm_core, htonl(cm_info->loc_addr),
- htons(cm_info->loc_port), NES_CM_LISTENER_EITHER_STATE);
+ htons(cm_info->loc_port), NES_CM_LISTENER_EITHER_STATE);
if (listener && listener->listener_state == NES_CM_LISTENER_ACTIVE_STATE) {
/* find automatically incs ref count ??? */
atomic_dec(&listener->ref_count);
@@ -2003,9 +2249,9 @@ static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *cm_core,
}
nes_debug(NES_DBG_CM, "Api - listen(): addr=0x%08X, port=0x%04x,"
- " listener = %p, backlog = %d, cm_id = %p.\n",
- cm_info->loc_addr, cm_info->loc_port,
- listener, listener->backlog, listener->cm_id);
+ " listener = %p, backlog = %d, cm_id = %p.\n",
+ cm_info->loc_addr, cm_info->loc_port,
+ listener, listener->backlog, listener->cm_id);
return listener;
}
@@ -2015,26 +2261,20 @@ static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *cm_core,
* mini_cm_connect - make a connection node with params
*/
static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
- struct nes_vnic *nesvnic, u16 private_data_len,
- void *private_data, struct nes_cm_info *cm_info)
+ struct nes_vnic *nesvnic, u16 private_data_len,
+ void *private_data, struct nes_cm_info *cm_info)
{
int ret = 0;
struct nes_cm_node *cm_node;
struct nes_cm_listener *loopbackremotelistener;
struct nes_cm_node *loopbackremotenode;
struct nes_cm_info loopback_cm_info;
- u16 mpa_frame_size = sizeof(struct ietf_mpa_frame) + private_data_len;
- struct ietf_mpa_frame *mpa_frame = NULL;
+ u8 *start_buff;
/* create a CM connection node */
cm_node = make_cm_node(cm_core, nesvnic, cm_info, NULL);
if (!cm_node)
return NULL;
- mpa_frame = &cm_node->mpa_frame;
- memcpy(mpa_frame->key, IEFT_MPA_KEY_REQ, IETF_MPA_KEY_SIZE);
- mpa_frame->flags = IETF_MPA_FLAGS_CRC;
- mpa_frame->rev = IETF_MPA_VERSION;
- mpa_frame->priv_data_len = htons(private_data_len);
/* set our node side to client (active) side */
cm_node->tcp_cntxt.client = 1;
@@ -2042,8 +2282,8 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
if (cm_info->loc_addr == cm_info->rem_addr) {
loopbackremotelistener = find_listener(cm_core,
- ntohl(nesvnic->local_ipaddr), cm_node->rem_port,
- NES_CM_LISTENER_ACTIVE_STATE);
+ ntohl(nesvnic->local_ipaddr), cm_node->rem_port,
+ NES_CM_LISTENER_ACTIVE_STATE);
if (loopbackremotelistener == NULL) {
create_event(cm_node, NES_CM_EVENT_ABORTED);
} else {
@@ -2052,7 +2292,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
loopback_cm_info.rem_port = cm_info->loc_port;
loopback_cm_info.cm_id = loopbackremotelistener->cm_id;
loopbackremotenode = make_cm_node(cm_core, nesvnic,
- &loopback_cm_info, loopbackremotelistener);
+ &loopback_cm_info, loopbackremotelistener);
if (!loopbackremotenode) {
rem_ref_cm_node(cm_node->cm_core, cm_node);
return NULL;
@@ -2063,7 +2303,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
NES_CM_DEFAULT_RCV_WND_SCALE;
cm_node->loopbackpartner = loopbackremotenode;
memcpy(loopbackremotenode->mpa_frame_buf, private_data,
- private_data_len);
+ private_data_len);
loopbackremotenode->mpa_frame_size = private_data_len;
/* we are done handling this state. */
@@ -2091,12 +2331,10 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
return cm_node;
}
- /* set our node side to client (active) side */
- cm_node->tcp_cntxt.client = 1;
- /* init our MPA frame ptr */
- memcpy(mpa_frame->priv_data, private_data, private_data_len);
+ start_buff = &cm_node->mpa_frame_buf[0] + sizeof(struct ietf_mpa_v2);
+ cm_node->mpa_frame_size = private_data_len;
- cm_node->mpa_frame_size = mpa_frame_size;
+ memcpy(start_buff, private_data, private_data_len);
/* send a syn and goto syn sent state */
cm_node->state = NES_CM_STATE_SYN_SENT;
@@ -2105,18 +2343,19 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
if (ret) {
/* error in sending the syn free up the cm_node struct */
nes_debug(NES_DBG_CM, "Api - connect() FAILED: dest "
- "addr=0x%08X, port=0x%04x, cm_node=%p, cm_id = %p.\n",
- cm_node->rem_addr, cm_node->rem_port, cm_node,
- cm_node->cm_id);
+ "addr=0x%08X, port=0x%04x, cm_node=%p, cm_id = %p.\n",
+ cm_node->rem_addr, cm_node->rem_port, cm_node,
+ cm_node->cm_id);
rem_ref_cm_node(cm_node->cm_core, cm_node);
cm_node = NULL;
}
- if (cm_node)
+ if (cm_node) {
nes_debug(NES_DBG_CM, "Api - connect(): dest addr=0x%08X,"
- "port=0x%04x, cm_node=%p, cm_id = %p.\n",
- cm_node->rem_addr, cm_node->rem_port, cm_node,
- cm_node->cm_id);
+ "port=0x%04x, cm_node=%p, cm_id = %p.\n",
+ cm_node->rem_addr, cm_node->rem_port, cm_node,
+ cm_node->cm_id);
+ }
return cm_node;
}
@@ -2126,8 +2365,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
* mini_cm_accept - accept a connection
* This function is never called
*/
-static int mini_cm_accept(struct nes_cm_core *cm_core,
- struct ietf_mpa_frame *mpa_frame, struct nes_cm_node *cm_node)
+static int mini_cm_accept(struct nes_cm_core *cm_core, struct nes_cm_node *cm_node)
{
return 0;
}
@@ -2136,8 +2374,7 @@ static int mini_cm_accept(struct nes_cm_core *cm_core,
/**
* mini_cm_reject - reject and teardown a connection
*/
-static int mini_cm_reject(struct nes_cm_core *cm_core,
- struct ietf_mpa_frame *mpa_frame, struct nes_cm_node *cm_node)
+static int mini_cm_reject(struct nes_cm_core *cm_core, struct nes_cm_node *cm_node)
{
int ret = 0;
int err = 0;
@@ -2147,7 +2384,7 @@ static int mini_cm_reject(struct nes_cm_core *cm_core,
struct nes_cm_node *loopback = cm_node->loopbackpartner;
nes_debug(NES_DBG_CM, "%s cm_node=%p type=%d state=%d\n",
- __func__, cm_node, cm_node->tcp_cntxt.client, cm_node->state);
+ __func__, cm_node, cm_node->tcp_cntxt.client, cm_node->state);
if (cm_node->tcp_cntxt.client)
return ret;
@@ -2168,8 +2405,9 @@ static int mini_cm_reject(struct nes_cm_core *cm_core,
err = send_reset(cm_node, NULL);
if (err)
WARN_ON(1);
- } else
+ } else {
cm_id->add_ref(cm_id);
+ }
}
}
} else {
@@ -2244,7 +2482,7 @@ static int mini_cm_close(struct nes_cm_core *cm_core, struct nes_cm_node *cm_nod
case NES_CM_STATE_TSA:
if (cm_node->send_entry)
printk(KERN_ERR "ERROR Close got called from STATE_TSA "
- "send_entry=%p\n", cm_node->send_entry);
+ "send_entry=%p\n", cm_node->send_entry);
ret = rem_ref_cm_node(cm_core, cm_node);
break;
}
@@ -2257,7 +2495,7 @@ static int mini_cm_close(struct nes_cm_core *cm_core, struct nes_cm_node *cm_nod
* node state machine
*/
static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
- struct nes_vnic *nesvnic, struct sk_buff *skb)
+ struct nes_vnic *nesvnic, struct sk_buff *skb)
{
struct nes_cm_node *cm_node = NULL;
struct nes_cm_listener *listener = NULL;
@@ -2269,9 +2507,8 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
if (!skb)
return 0;
- if (skb->len < sizeof(struct iphdr) + sizeof(struct tcphdr)) {
+ if (skb->len < sizeof(struct iphdr) + sizeof(struct tcphdr))
return 0;
- }
iph = (struct iphdr *)skb->data;
tcph = (struct tcphdr *)(skb->data + sizeof(struct iphdr));
@@ -2289,8 +2526,8 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
do {
cm_node = find_node(cm_core,
- nfo.rem_port, nfo.rem_addr,
- nfo.loc_port, nfo.loc_addr);
+ nfo.rem_port, nfo.rem_addr,
+ nfo.loc_port, nfo.loc_addr);
if (!cm_node) {
/* Only type of packet accepted are for */
@@ -2300,8 +2537,8 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
break;
}
listener = find_listener(cm_core, nfo.loc_addr,
- nfo.loc_port,
- NES_CM_LISTENER_ACTIVE_STATE);
+ nfo.loc_port,
+ NES_CM_LISTENER_ACTIVE_STATE);
if (!listener) {
nfo.cm_id = NULL;
nfo.conn_type = 0;
@@ -2312,10 +2549,10 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
nfo.cm_id = listener->cm_id;
nfo.conn_type = listener->conn_type;
cm_node = make_cm_node(cm_core, nesvnic, &nfo,
- listener);
+ listener);
if (!cm_node) {
nes_debug(NES_DBG_CM, "Unable to allocate "
- "node\n");
+ "node\n");
cm_packets_dropped++;
atomic_dec(&listener->ref_count);
dev_kfree_skb_any(skb);
@@ -2331,9 +2568,13 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
}
add_ref_cm_node(cm_node);
} else if (cm_node->state == NES_CM_STATE_TSA) {
- rem_ref_cm_node(cm_core, cm_node);
- atomic_inc(&cm_accel_dropped_pkts);
- dev_kfree_skb_any(skb);
+ if (cm_node->nesqp->pau_mode)
+ nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
+ else {
+ rem_ref_cm_node(cm_core, cm_node);
+ atomic_inc(&cm_accel_dropped_pkts);
+ dev_kfree_skb_any(skb);
+ }
break;
}
skb_reset_network_header(skb);
@@ -2363,7 +2604,7 @@ static struct nes_cm_core *nes_cm_alloc_core(void)
init_timer(&cm_core->tcp_timer);
cm_core->tcp_timer.function = nes_cm_timer_tick;
- cm_core->mtu = NES_CM_DEFAULT_MTU;
+ cm_core->mtu = NES_CM_DEFAULT_MTU;
cm_core->state = NES_CM_STATE_INITED;
cm_core->free_tx_pkt_max = NES_CM_DEFAULT_FREE_PKTS;
@@ -2401,9 +2642,8 @@ static int mini_cm_dealloc_core(struct nes_cm_core *cm_core)
barrier();
- if (timer_pending(&cm_core->tcp_timer)) {
+ if (timer_pending(&cm_core->tcp_timer))
del_timer(&cm_core->tcp_timer);
- }
destroy_workqueue(cm_core->event_wq);
destroy_workqueue(cm_core->disconn_wq);
@@ -2458,8 +2698,8 @@ static int nes_cm_init_tsa_conn(struct nes_qp *nesqp, struct nes_cm_node *cm_nod
return -EINVAL;
nesqp->nesqp_context->misc |= cpu_to_le32(NES_QPCONTEXT_MISC_IPV4 |
- NES_QPCONTEXT_MISC_NO_NAGLE | NES_QPCONTEXT_MISC_DO_NOT_FRAG |
- NES_QPCONTEXT_MISC_DROS);
+ NES_QPCONTEXT_MISC_NO_NAGLE | NES_QPCONTEXT_MISC_DO_NOT_FRAG |
+ NES_QPCONTEXT_MISC_DROS);
if (cm_node->tcp_cntxt.snd_wscale || cm_node->tcp_cntxt.rcv_wscale)
nesqp->nesqp_context->misc |= cpu_to_le32(NES_QPCONTEXT_MISC_WSCALE);
@@ -2469,15 +2709,15 @@ static int nes_cm_init_tsa_conn(struct nes_qp *nesqp, struct nes_cm_node *cm_nod
nesqp->nesqp_context->mss |= cpu_to_le32(((u32)cm_node->tcp_cntxt.mss) << 16);
nesqp->nesqp_context->tcp_state_flow_label |= cpu_to_le32(
- (u32)NES_QPCONTEXT_TCPSTATE_EST << NES_QPCONTEXT_TCPFLOW_TCP_STATE_SHIFT);
+ (u32)NES_QPCONTEXT_TCPSTATE_EST << NES_QPCONTEXT_TCPFLOW_TCP_STATE_SHIFT);
nesqp->nesqp_context->pd_index_wscale |= cpu_to_le32(
- (cm_node->tcp_cntxt.snd_wscale << NES_QPCONTEXT_PDWSCALE_SND_WSCALE_SHIFT) &
- NES_QPCONTEXT_PDWSCALE_SND_WSCALE_MASK);
+ (cm_node->tcp_cntxt.snd_wscale << NES_QPCONTEXT_PDWSCALE_SND_WSCALE_SHIFT) &
+ NES_QPCONTEXT_PDWSCALE_SND_WSCALE_MASK);
nesqp->nesqp_context->pd_index_wscale |= cpu_to_le32(
- (cm_node->tcp_cntxt.rcv_wscale << NES_QPCONTEXT_PDWSCALE_RCV_WSCALE_SHIFT) &
- NES_QPCONTEXT_PDWSCALE_RCV_WSCALE_MASK);
+ (cm_node->tcp_cntxt.rcv_wscale << NES_QPCONTEXT_PDWSCALE_RCV_WSCALE_SHIFT) &
+ NES_QPCONTEXT_PDWSCALE_RCV_WSCALE_MASK);
nesqp->nesqp_context->keepalive = cpu_to_le32(0x80);
nesqp->nesqp_context->ts_recent = 0;
@@ -2486,24 +2726,24 @@ static int nes_cm_init_tsa_conn(struct nes_qp *nesqp, struct nes_cm_node *cm_nod
nesqp->nesqp_context->snd_wnd = cpu_to_le32(cm_node->tcp_cntxt.snd_wnd);
nesqp->nesqp_context->rcv_nxt = cpu_to_le32(cm_node->tcp_cntxt.rcv_nxt);
nesqp->nesqp_context->rcv_wnd = cpu_to_le32(cm_node->tcp_cntxt.rcv_wnd <<
- cm_node->tcp_cntxt.rcv_wscale);
+ cm_node->tcp_cntxt.rcv_wscale);
nesqp->nesqp_context->snd_max = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num);
nesqp->nesqp_context->snd_una = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num);
nesqp->nesqp_context->srtt = 0;
nesqp->nesqp_context->rttvar = cpu_to_le32(0x6);
nesqp->nesqp_context->ssthresh = cpu_to_le32(0x3FFFC000);
- nesqp->nesqp_context->cwnd = cpu_to_le32(2*cm_node->tcp_cntxt.mss);
+ nesqp->nesqp_context->cwnd = cpu_to_le32(2 * cm_node->tcp_cntxt.mss);
nesqp->nesqp_context->snd_wl1 = cpu_to_le32(cm_node->tcp_cntxt.rcv_nxt);
nesqp->nesqp_context->snd_wl2 = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num);
nesqp->nesqp_context->max_snd_wnd = cpu_to_le32(cm_node->tcp_cntxt.max_snd_wnd);
nes_debug(NES_DBG_CM, "QP%u: rcv_nxt = 0x%08X, snd_nxt = 0x%08X,"
- " Setting MSS to %u, PDWscale = 0x%08X, rcv_wnd = %u, context misc = 0x%08X.\n",
- nesqp->hwqp.qp_id, le32_to_cpu(nesqp->nesqp_context->rcv_nxt),
- le32_to_cpu(nesqp->nesqp_context->snd_nxt),
- cm_node->tcp_cntxt.mss, le32_to_cpu(nesqp->nesqp_context->pd_index_wscale),
- le32_to_cpu(nesqp->nesqp_context->rcv_wnd),
- le32_to_cpu(nesqp->nesqp_context->misc));
+ " Setting MSS to %u, PDWscale = 0x%08X, rcv_wnd = %u, context misc = 0x%08X.\n",
+ nesqp->hwqp.qp_id, le32_to_cpu(nesqp->nesqp_context->rcv_nxt),
+ le32_to_cpu(nesqp->nesqp_context->snd_nxt),
+ cm_node->tcp_cntxt.mss, le32_to_cpu(nesqp->nesqp_context->pd_index_wscale),
+ le32_to_cpu(nesqp->nesqp_context->rcv_wnd),
+ le32_to_cpu(nesqp->nesqp_context->misc));
nes_debug(NES_DBG_CM, " snd_wnd = 0x%08X.\n", le32_to_cpu(nesqp->nesqp_context->snd_wnd));
nes_debug(NES_DBG_CM, " snd_cwnd = 0x%08X.\n", le32_to_cpu(nesqp->nesqp_context->cwnd));
nes_debug(NES_DBG_CM, " max_swnd = 0x%08X.\n", le32_to_cpu(nesqp->nesqp_context->max_snd_wnd));
@@ -2524,7 +2764,7 @@ int nes_cm_disconn(struct nes_qp *nesqp)
work = kzalloc(sizeof *work, GFP_ATOMIC);
if (!work)
- return -ENOMEM; /* Timer will clean up */
+ return -ENOMEM; /* Timer will clean up */
nes_add_ref(&nesqp->ibqp);
work->nesqp = nesqp;
@@ -2544,7 +2784,7 @@ static void nes_disconnect_worker(struct work_struct *work)
kfree(dwork);
nes_debug(NES_DBG_CM, "processing AEQE id 0x%04X for QP%u.\n",
- nesqp->last_aeq, nesqp->hwqp.qp_id);
+ nesqp->last_aeq, nesqp->hwqp.qp_id);
nes_cm_disconn_true(nesqp);
nes_rem_ref(&nesqp->ibqp);
}
@@ -2580,7 +2820,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
/* make sure we havent already closed this connection */
if (!cm_id) {
nes_debug(NES_DBG_CM, "QP%u disconnect_worker cmid is NULL\n",
- nesqp->hwqp.qp_id);
+ nesqp->hwqp.qp_id);
spin_unlock_irqrestore(&nesqp->lock, flags);
return -1;
}
@@ -2589,7 +2829,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
nes_debug(NES_DBG_CM, "Disconnecting QP%u\n", nesqp->hwqp.qp_id);
original_hw_tcp_state = nesqp->hw_tcp_state;
- original_ibqp_state = nesqp->ibqp_state;
+ original_ibqp_state = nesqp->ibqp_state;
last_ae = nesqp->last_aeq;
if (nesqp->term_flags) {
@@ -2647,16 +2887,16 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
cm_event.private_data_len = 0;
nes_debug(NES_DBG_CM, "Generating a CM Disconnect Event"
- " for QP%u, SQ Head = %u, SQ Tail = %u. "
- "cm_id = %p, refcount = %u.\n",
- nesqp->hwqp.qp_id, nesqp->hwqp.sq_head,
- nesqp->hwqp.sq_tail, cm_id,
- atomic_read(&nesqp->refcount));
+ " for QP%u, SQ Head = %u, SQ Tail = %u. "
+ "cm_id = %p, refcount = %u.\n",
+ nesqp->hwqp.qp_id, nesqp->hwqp.sq_head,
+ nesqp->hwqp.sq_tail, cm_id,
+ atomic_read(&nesqp->refcount));
ret = cm_id->event_handler(cm_id, &cm_event);
if (ret)
nes_debug(NES_DBG_CM, "OFA CM event_handler "
- "returned, ret=%d\n", ret);
+ "returned, ret=%d\n", ret);
}
if (issue_close) {
@@ -2674,9 +2914,8 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
cm_event.private_data_len = 0;
ret = cm_id->event_handler(cm_id, &cm_event);
- if (ret) {
+ if (ret)
nes_debug(NES_DBG_CM, "OFA CM event_handler returned, ret=%d\n", ret);
- }
cm_id->rem_ref(cm_id);
}
@@ -2716,8 +2955,8 @@ static int nes_disconnect(struct nes_qp *nesqp, int abrupt)
if (nesqp->lsmm_mr)
nesibdev->ibdev.dereg_mr(nesqp->lsmm_mr);
pci_free_consistent(nesdev->pcidev,
- nesqp->private_data_len+sizeof(struct ietf_mpa_frame),
- nesqp->ietf_frame, nesqp->ietf_frame_pbase);
+ nesqp->private_data_len + nesqp->ietf_frame_size,
+ nesqp->ietf_frame, nesqp->ietf_frame_pbase);
}
}
@@ -2756,6 +2995,12 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
struct ib_phys_buf ibphysbuf;
struct nes_pd *nespd;
u64 tagged_offset;
+ u8 mpa_frame_offset = 0;
+ struct ietf_mpa_v2 *mpa_v2_frame;
+ u8 start_addr = 0;
+ u8 *start_ptr = &start_addr;
+ u8 **start_buff = &start_ptr;
+ u16 buff_len = 0;
ibqp = nes_get_qp(cm_id->device, conn_param->qpn);
if (!ibqp)
@@ -2796,53 +3041,49 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
netdev_refcnt_read(nesvnic->netdev));
+ nesqp->ietf_frame_size = sizeof(struct ietf_mpa_v2);
/* allocate the ietf frame and space for private data */
nesqp->ietf_frame = pci_alloc_consistent(nesdev->pcidev,
- sizeof(struct ietf_mpa_frame) + conn_param->private_data_len,
- &nesqp->ietf_frame_pbase);
+ nesqp->ietf_frame_size + conn_param->private_data_len,
+ &nesqp->ietf_frame_pbase);
if (!nesqp->ietf_frame) {
- nes_debug(NES_DBG_CM, "Unable to allocate memory for private "
- "data\n");
+ nes_debug(NES_DBG_CM, "Unable to allocate memory for private data\n");
return -ENOMEM;
}
+ mpa_v2_frame = (struct ietf_mpa_v2 *)nesqp->ietf_frame;
+ if (cm_node->mpa_frame_rev == IETF_MPA_V1)
+ mpa_frame_offset = 4;
- /* setup the MPA frame */
- nesqp->private_data_len = conn_param->private_data_len;
- memcpy(nesqp->ietf_frame->key, IEFT_MPA_KEY_REP, IETF_MPA_KEY_SIZE);
-
- memcpy(nesqp->ietf_frame->priv_data, conn_param->private_data,
- conn_param->private_data_len);
+ memcpy(mpa_v2_frame->priv_data, conn_param->private_data,
+ conn_param->private_data_len);
- nesqp->ietf_frame->priv_data_len =
- cpu_to_be16(conn_param->private_data_len);
- nesqp->ietf_frame->rev = mpa_version;
- nesqp->ietf_frame->flags = IETF_MPA_FLAGS_CRC;
+ cm_build_mpa_frame(cm_node, start_buff, &buff_len, nesqp->ietf_frame, MPA_KEY_REPLY);
+ nesqp->private_data_len = conn_param->private_data_len;
/* setup our first outgoing iWarp send WQE (the IETF frame response) */
wqe = &nesqp->hwqp.sq_vbase[0];
if (cm_id->remote_addr.sin_addr.s_addr !=
- cm_id->local_addr.sin_addr.s_addr) {
+ cm_id->local_addr.sin_addr.s_addr) {
u64temp = (unsigned long)nesqp;
nesibdev = nesvnic->nesibdev;
nespd = nesqp->nespd;
- ibphysbuf.addr = nesqp->ietf_frame_pbase;
- ibphysbuf.size = conn_param->private_data_len +
- sizeof(struct ietf_mpa_frame);
- tagged_offset = (u64)(unsigned long)nesqp->ietf_frame;
+ ibphysbuf.addr = nesqp->ietf_frame_pbase + mpa_frame_offset;
+ ibphysbuf.size = buff_len;
+ tagged_offset = (u64)(unsigned long)*start_buff;
ibmr = nesibdev->ibdev.reg_phys_mr((struct ib_pd *)nespd,
- &ibphysbuf, 1,
- IB_ACCESS_LOCAL_WRITE,
- &tagged_offset);
+ &ibphysbuf, 1,
+ IB_ACCESS_LOCAL_WRITE,
+ &tagged_offset);
if (!ibmr) {
nes_debug(NES_DBG_CM, "Unable to register memory region"
- "for lSMM for cm_node = %p \n",
- cm_node);
+ "for lSMM for cm_node = %p \n",
+ cm_node);
pci_free_consistent(nesdev->pcidev,
- nesqp->private_data_len+sizeof(struct ietf_mpa_frame),
- nesqp->ietf_frame, nesqp->ietf_frame_pbase);
+ nesqp->private_data_len + nesqp->ietf_frame_size,
+ nesqp->ietf_frame, nesqp->ietf_frame_pbase);
return -ENOMEM;
}
@@ -2850,22 +3091,20 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
ibmr->device = nespd->ibpd.device;
nesqp->lsmm_mr = ibmr;
- u64temp |= NES_SW_CONTEXT_ALIGN>>1;
+ u64temp |= NES_SW_CONTEXT_ALIGN >> 1;
set_wqe_64bit_value(wqe->wqe_words,
- NES_IWARP_SQ_WQE_COMP_CTX_LOW_IDX,
- u64temp);
+ NES_IWARP_SQ_WQE_COMP_CTX_LOW_IDX,
+ u64temp);
wqe->wqe_words[NES_IWARP_SQ_WQE_MISC_IDX] =
cpu_to_le32(NES_IWARP_SQ_WQE_STREAMING |
- NES_IWARP_SQ_WQE_WRPDU);
+ NES_IWARP_SQ_WQE_WRPDU);
wqe->wqe_words[NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX] =
- cpu_to_le32(conn_param->private_data_len +
- sizeof(struct ietf_mpa_frame));
+ cpu_to_le32(buff_len);
set_wqe_64bit_value(wqe->wqe_words,
- NES_IWARP_SQ_WQE_FRAG0_LOW_IDX,
- (u64)(unsigned long)nesqp->ietf_frame);
+ NES_IWARP_SQ_WQE_FRAG0_LOW_IDX,
+ (u64)(unsigned long)(*start_buff));
wqe->wqe_words[NES_IWARP_SQ_WQE_LENGTH0_IDX] =
- cpu_to_le32(conn_param->private_data_len +
- sizeof(struct ietf_mpa_frame));
+ cpu_to_le32(buff_len);
wqe->wqe_words[NES_IWARP_SQ_WQE_STAG0_IDX] = ibmr->lkey;
if (nesqp->sq_kmapped) {
nesqp->sq_kmapped = 0;
@@ -2874,7 +3113,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
nesqp->nesqp_context->ird_ord_sizes |=
cpu_to_le32(NES_QPCONTEXT_ORDIRD_LSMM_PRESENT |
- NES_QPCONTEXT_ORDIRD_WRPDU);
+ NES_QPCONTEXT_ORDIRD_WRPDU);
} else {
nesqp->nesqp_context->ird_ord_sizes |=
cpu_to_le32(NES_QPCONTEXT_ORDIRD_WRPDU);
@@ -2888,11 +3127,11 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
/* nesqp->cm_node = (void *)cm_id->provider_data; */
cm_id->provider_data = nesqp;
- nesqp->active_conn = 0;
+ nesqp->active_conn = 0;
if (cm_node->state == NES_CM_STATE_TSA)
nes_debug(NES_DBG_CM, "Already state = TSA for cm_node=%p\n",
- cm_node);
+ cm_node);
nes_cm_init_tsa_conn(nesqp, cm_node);
@@ -2909,13 +3148,13 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
cpu_to_le32(ntohl(cm_id->remote_addr.sin_addr.s_addr));
nesqp->nesqp_context->misc2 |= cpu_to_le32(
- (u32)PCI_FUNC(nesdev->pcidev->devfn) <<
- NES_QPCONTEXT_MISC2_SRC_IP_SHIFT);
+ (u32)PCI_FUNC(nesdev->pcidev->devfn) <<
+ NES_QPCONTEXT_MISC2_SRC_IP_SHIFT);
nesqp->nesqp_context->arp_index_vlan |=
cpu_to_le32(nes_arp_table(nesdev,
- le32_to_cpu(nesqp->nesqp_context->ip0), NULL,
- NES_ARP_RESOLVE) << 16);
+ le32_to_cpu(nesqp->nesqp_context->ip0), NULL,
+ NES_ARP_RESOLVE) << 16);
nesqp->nesqp_context->ts_val_delta = cpu_to_le32(
jiffies - nes_read_indexed(nesdev, NES_IDX_TCP_NOW));
@@ -2941,7 +3180,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
crc_value = get_crc_value(&nes_quad);
nesqp->hte_index = cpu_to_be32(crc_value ^ 0xffffffff);
nes_debug(NES_DBG_CM, "HTE Index = 0x%08X, CRC = 0x%08X\n",
- nesqp->hte_index, nesqp->hte_index & adapter->hte_index_mask);
+ nesqp->hte_index, nesqp->hte_index & adapter->hte_index_mask);
nesqp->hte_index &= adapter->hte_index_mask;
nesqp->nesqp_context->hte_index = cpu_to_le32(nesqp->hte_index);
@@ -2949,17 +3188,15 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
cm_node->cm_core->api->accelerated(cm_node->cm_core, cm_node);
nes_debug(NES_DBG_CM, "QP%u, Destination IP = 0x%08X:0x%04X, local = "
- "0x%08X:0x%04X, rcv_nxt=0x%08X, snd_nxt=0x%08X, mpa + "
- "private data length=%zu.\n", nesqp->hwqp.qp_id,
- ntohl(cm_id->remote_addr.sin_addr.s_addr),
- ntohs(cm_id->remote_addr.sin_port),
- ntohl(cm_id->local_addr.sin_addr.s_addr),
- ntohs(cm_id->local_addr.sin_port),
- le32_to_cpu(nesqp->nesqp_context->rcv_nxt),
- le32_to_cpu(nesqp->nesqp_context->snd_nxt),
- conn_param->private_data_len +
- sizeof(struct ietf_mpa_frame));
-
+ "0x%08X:0x%04X, rcv_nxt=0x%08X, snd_nxt=0x%08X, mpa + "
+ "private data length=%u.\n", nesqp->hwqp.qp_id,
+ ntohl(cm_id->remote_addr.sin_addr.s_addr),
+ ntohs(cm_id->remote_addr.sin_port),
+ ntohl(cm_id->local_addr.sin_addr.s_addr),
+ ntohs(cm_id->local_addr.sin_port),
+ le32_to_cpu(nesqp->nesqp_context->rcv_nxt),
+ le32_to_cpu(nesqp->nesqp_context->snd_nxt),
+ buff_len);
/* notify OF layer that accept event was successful */
cm_id->add_ref(cm_id);
@@ -2980,12 +3217,12 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
nesqp->private_data_len;
/* copy entire MPA frame to our cm_node's frame */
memcpy(cm_node->loopbackpartner->mpa_frame_buf,
- nesqp->ietf_frame->priv_data, nesqp->private_data_len);
+ conn_param->private_data, conn_param->private_data_len);
create_event(cm_node->loopbackpartner, NES_CM_EVENT_CONNECTED);
}
if (ret)
printk(KERN_ERR "%s[%u] OFA CM event_handler returned, "
- "ret=%d\n", __func__, __LINE__, ret);
+ "ret=%d\n", __func__, __LINE__, ret);
return 0;
}
@@ -2998,34 +3235,28 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
{
struct nes_cm_node *cm_node;
struct nes_cm_node *loopback;
-
struct nes_cm_core *cm_core;
+ u8 *start_buff;
atomic_inc(&cm_rejects);
- cm_node = (struct nes_cm_node *) cm_id->provider_data;
+ cm_node = (struct nes_cm_node *)cm_id->provider_data;
loopback = cm_node->loopbackpartner;
cm_core = cm_node->cm_core;
cm_node->cm_id = cm_id;
- cm_node->mpa_frame_size = sizeof(struct ietf_mpa_frame) + pdata_len;
- if (cm_node->mpa_frame_size > MAX_CM_BUFFER)
+ if (pdata_len + sizeof(struct ietf_mpa_v2) > MAX_CM_BUFFER)
return -EINVAL;
- memcpy(&cm_node->mpa_frame.key[0], IEFT_MPA_KEY_REP, IETF_MPA_KEY_SIZE);
if (loopback) {
memcpy(&loopback->mpa_frame.priv_data, pdata, pdata_len);
loopback->mpa_frame.priv_data_len = pdata_len;
- loopback->mpa_frame_size = sizeof(struct ietf_mpa_frame) +
- pdata_len;
+ loopback->mpa_frame_size = pdata_len;
} else {
- memcpy(&cm_node->mpa_frame.priv_data, pdata, pdata_len);
- cm_node->mpa_frame.priv_data_len = cpu_to_be16(pdata_len);
+ start_buff = &cm_node->mpa_frame_buf[0] + sizeof(struct ietf_mpa_v2);
+ cm_node->mpa_frame_size = pdata_len;
+ memcpy(start_buff, pdata, pdata_len);
}
-
- cm_node->mpa_frame.rev = mpa_version;
- cm_node->mpa_frame.flags = IETF_MPA_FLAGS_CRC | IETF_MPA_FLAGS_REJECT;
-
- return cm_core->api->reject(cm_core, &cm_node->mpa_frame, cm_node);
+ return cm_core->api->reject(cm_core, cm_node);
}
@@ -3052,7 +3283,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
nesvnic = to_nesvnic(nesqp->ibqp.device);
if (!nesvnic)
return -EINVAL;
- nesdev = nesvnic->nesdev;
+ nesdev = nesvnic->nesdev;
if (!nesdev)
return -EINVAL;
@@ -3060,12 +3291,12 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
return -EINVAL;
nes_debug(NES_DBG_CM, "QP%u, current IP = 0x%08X, Destination IP = "
- "0x%08X:0x%04X, local = 0x%08X:0x%04X.\n", nesqp->hwqp.qp_id,
- ntohl(nesvnic->local_ipaddr),
- ntohl(cm_id->remote_addr.sin_addr.s_addr),
- ntohs(cm_id->remote_addr.sin_port),
- ntohl(cm_id->local_addr.sin_addr.s_addr),
- ntohs(cm_id->local_addr.sin_port));
+ "0x%08X:0x%04X, local = 0x%08X:0x%04X.\n", nesqp->hwqp.qp_id,
+ ntohl(nesvnic->local_ipaddr),
+ ntohl(cm_id->remote_addr.sin_addr.s_addr),
+ ntohs(cm_id->remote_addr.sin_port),
+ ntohl(cm_id->local_addr.sin_addr.s_addr),
+ ntohs(cm_id->local_addr.sin_port));
atomic_inc(&cm_connects);
nesqp->active_conn = 1;
@@ -3079,12 +3310,12 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
nesqp->nesqp_context->ird_ord_sizes |= cpu_to_le32((u32)conn_param->ord);
nes_debug(NES_DBG_CM, "requested ord = 0x%08X.\n", (u32)conn_param->ord);
nes_debug(NES_DBG_CM, "mpa private data len =%u\n",
- conn_param->private_data_len);
+ conn_param->private_data_len);
if (cm_id->local_addr.sin_addr.s_addr !=
- cm_id->remote_addr.sin_addr.s_addr) {
+ cm_id->remote_addr.sin_addr.s_addr) {
nes_manage_apbvt(nesvnic, ntohs(cm_id->local_addr.sin_port),
- PCI_FUNC(nesdev->pcidev->devfn), NES_MANAGE_APBVT_ADD);
+ PCI_FUNC(nesdev->pcidev->devfn), NES_MANAGE_APBVT_ADD);
apbvt_set = 1;
}
@@ -3100,13 +3331,13 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
/* create a connect CM node connection */
cm_node = g_cm_core->api->connect(g_cm_core, nesvnic,
- conn_param->private_data_len, (void *)conn_param->private_data,
- &cm_info);
+ conn_param->private_data_len, (void *)conn_param->private_data,
+ &cm_info);
if (!cm_node) {
if (apbvt_set)
nes_manage_apbvt(nesvnic, ntohs(cm_id->local_addr.sin_port),
- PCI_FUNC(nesdev->pcidev->devfn),
- NES_MANAGE_APBVT_DEL);
+ PCI_FUNC(nesdev->pcidev->devfn),
+ NES_MANAGE_APBVT_DEL);
cm_id->rem_ref(cm_id);
return -ENOMEM;
@@ -3156,7 +3387,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
cm_node = g_cm_core->api->listen(g_cm_core, nesvnic, &cm_info);
if (!cm_node) {
printk(KERN_ERR "%s[%u] Error returned from listen API call\n",
- __func__, __LINE__);
+ __func__, __LINE__);
return -ENOMEM;
}
@@ -3164,12 +3395,12 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
if (!cm_node->reused_node) {
err = nes_manage_apbvt(nesvnic,
- ntohs(cm_id->local_addr.sin_port),
- PCI_FUNC(nesvnic->nesdev->pcidev->devfn),
- NES_MANAGE_APBVT_ADD);
+ ntohs(cm_id->local_addr.sin_port),
+ PCI_FUNC(nesvnic->nesdev->pcidev->devfn),
+ NES_MANAGE_APBVT_ADD);
if (err) {
printk(KERN_ERR "nes_manage_apbvt call returned %d.\n",
- err);
+ err);
g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
return err;
}
@@ -3206,13 +3437,13 @@ int nes_destroy_listen(struct iw_cm_id *cm_id)
int nes_cm_recv(struct sk_buff *skb, struct net_device *netdevice)
{
int rc = 0;
+
cm_packets_received++;
- if ((g_cm_core) && (g_cm_core->api)) {
+ if ((g_cm_core) && (g_cm_core->api))
rc = g_cm_core->api->recv_pkt(g_cm_core, netdev_priv(netdevice), skb);
- } else {
+ else
nes_debug(NES_DBG_CM, "Unable to process packet for CM,"
- " cm is not setup properly.\n");
- }
+ " cm is not setup properly.\n");
return rc;
}
@@ -3227,11 +3458,10 @@ int nes_cm_start(void)
nes_debug(NES_DBG_CM, "\n");
/* create the primary CM core, pass this handle to subsequent core inits */
g_cm_core = nes_cm_alloc_core();
- if (g_cm_core) {
+ if (g_cm_core)
return 0;
- } else {
+ else
return -ENOMEM;
- }
}
@@ -3252,7 +3482,6 @@ int nes_cm_stop(void)
*/
static void cm_event_connected(struct nes_cm_event *event)
{
- u64 u64temp;
struct nes_qp *nesqp;
struct nes_vnic *nesvnic;
struct nes_device *nesdev;
@@ -3261,7 +3490,6 @@ static void cm_event_connected(struct nes_cm_event *event)
struct ib_qp_attr attr;
struct iw_cm_id *cm_id;
struct iw_cm_event cm_event;
- struct nes_hw_qp_wqe *wqe;
struct nes_v4_quad nes_quad;
u32 crc_value;
int ret;
@@ -3275,17 +3503,16 @@ static void cm_event_connected(struct nes_cm_event *event)
nesdev = nesvnic->nesdev;
nesadapter = nesdev->nesadapter;
- if (nesqp->destroyed) {
+ if (nesqp->destroyed)
return;
- }
atomic_inc(&cm_connecteds);
nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
- " local port 0x%04X. jiffies = %lu.\n",
- nesqp->hwqp.qp_id,
- ntohl(cm_id->remote_addr.sin_addr.s_addr),
- ntohs(cm_id->remote_addr.sin_port),
- ntohs(cm_id->local_addr.sin_port),
- jiffies);
+ " local port 0x%04X. jiffies = %lu.\n",
+ nesqp->hwqp.qp_id,
+ ntohl(cm_id->remote_addr.sin_addr.s_addr),
+ ntohs(cm_id->remote_addr.sin_port),
+ ntohs(cm_id->local_addr.sin_port),
+ jiffies);
nes_cm_init_tsa_conn(nesqp, cm_node);
@@ -3316,40 +3543,12 @@ static void cm_event_connected(struct nes_cm_event *event)
NES_QPCONTEXT_ORDIRD_IWARP_MODE_SHIFT);
/* Adjust tail for not having a LSMM */
- nesqp->hwqp.sq_tail = 1;
+ /*nesqp->hwqp.sq_tail = 1;*/
-#if defined(NES_SEND_FIRST_WRITE)
- if (cm_node->send_write0) {
- nes_debug(NES_DBG_CM, "Sending first write.\n");
- wqe = &nesqp->hwqp.sq_vbase[0];
- u64temp = (unsigned long)nesqp;
- u64temp |= NES_SW_CONTEXT_ALIGN>>1;
- set_wqe_64bit_value(wqe->wqe_words,
- NES_IWARP_SQ_WQE_COMP_CTX_LOW_IDX, u64temp);
- wqe->wqe_words[NES_IWARP_SQ_WQE_MISC_IDX] =
- cpu_to_le32(NES_IWARP_SQ_OP_RDMAW);
- wqe->wqe_words[NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX] = 0;
- wqe->wqe_words[NES_IWARP_SQ_WQE_FRAG0_LOW_IDX] = 0;
- wqe->wqe_words[NES_IWARP_SQ_WQE_FRAG0_HIGH_IDX] = 0;
- wqe->wqe_words[NES_IWARP_SQ_WQE_LENGTH0_IDX] = 0;
- wqe->wqe_words[NES_IWARP_SQ_WQE_STAG0_IDX] = 0;
+ build_rdma0_msg(cm_node, &nesqp);
- if (nesqp->sq_kmapped) {
- nesqp->sq_kmapped = 0;
- kunmap(nesqp->page);
- }
-
- /* use the reserved spot on the WQ for the extra first WQE */
- nesqp->nesqp_context->ird_ord_sizes &=
- cpu_to_le32(~(NES_QPCONTEXT_ORDIRD_LSMM_PRESENT |
- NES_QPCONTEXT_ORDIRD_WRPDU |
- NES_QPCONTEXT_ORDIRD_ALSMM));
- nesqp->skip_lsmm = 1;
- nesqp->hwqp.sq_tail = 0;
- nes_write32(nesdev->regs + NES_WQE_ALLOC,
- (1 << 24) | 0x00800000 | nesqp->hwqp.qp_id);
- }
-#endif
+ nes_write32(nesdev->regs + NES_WQE_ALLOC,
+ (1 << 24) | 0x00800000 | nesqp->hwqp.qp_id);
memset(&nes_quad, 0, sizeof(nes_quad));
@@ -3366,13 +3565,13 @@ static void cm_event_connected(struct nes_cm_event *event)
crc_value = get_crc_value(&nes_quad);
nesqp->hte_index = cpu_to_be32(crc_value ^ 0xffffffff);
nes_debug(NES_DBG_CM, "HTE Index = 0x%08X, After CRC = 0x%08X\n",
- nesqp->hte_index, nesqp->hte_index & nesadapter->hte_index_mask);
+ nesqp->hte_index, nesqp->hte_index & nesadapter->hte_index_mask);
nesqp->hte_index &= nesadapter->hte_index_mask;
nesqp->nesqp_context->hte_index = cpu_to_le32(nesqp->hte_index);
nesqp->ietf_frame = &cm_node->mpa_frame;
- nesqp->private_data_len = (u8) cm_node->mpa_frame_size;
+ nesqp->private_data_len = (u8)cm_node->mpa_frame_size;
cm_node->cm_core->api->accelerated(cm_node->cm_core, cm_node);
/* notify OF layer we successfully created the requested connection */
@@ -3384,7 +3583,9 @@ static void cm_event_connected(struct nes_cm_event *event)
cm_event.remote_addr = cm_id->remote_addr;
cm_event.private_data = (void *)event->cm_node->mpa_frame_buf;
- cm_event.private_data_len = (u8) event->cm_node->mpa_frame_size;
+ cm_event.private_data_len = (u8)event->cm_node->mpa_frame_size;
+ cm_event.ird = cm_node->ird_size;
+ cm_event.ord = cm_node->ord_size;
cm_event.local_addr.sin_addr.s_addr = event->cm_info.rem_addr;
ret = cm_id->event_handler(cm_id, &cm_event);
@@ -3392,12 +3593,12 @@ static void cm_event_connected(struct nes_cm_event *event)
if (ret)
printk(KERN_ERR "%s[%u] OFA CM event_handler returned, "
- "ret=%d\n", __func__, __LINE__, ret);
+ "ret=%d\n", __func__, __LINE__, ret);
attr.qp_state = IB_QPS_RTS;
nes_modify_qp(&nesqp->ibqp, &attr, IB_QP_STATE, NULL);
nes_debug(NES_DBG_CM, "Exiting connect thread for QP%u. jiffies = "
- "%lu\n", nesqp->hwqp.qp_id, jiffies);
+ "%lu\n", nesqp->hwqp.qp_id, jiffies);
return;
}
@@ -3418,16 +3619,14 @@ static void cm_event_connect_error(struct nes_cm_event *event)
return;
cm_id = event->cm_node->cm_id;
- if (!cm_id) {
+ if (!cm_id)
return;
- }
nes_debug(NES_DBG_CM, "cm_node=%p, cm_id=%p\n", event->cm_node, cm_id);
nesqp = cm_id->provider_data;
- if (!nesqp) {
+ if (!nesqp)
return;
- }
/* notify OF layer about this connection error event */
/* cm_id->rem_ref(cm_id); */
@@ -3442,14 +3641,14 @@ static void cm_event_connect_error(struct nes_cm_event *event)
cm_event.private_data_len = 0;
nes_debug(NES_DBG_CM, "call CM_EVENT REJECTED, local_addr=%08x, "
- "remove_addr=%08x\n", cm_event.local_addr.sin_addr.s_addr,
- cm_event.remote_addr.sin_addr.s_addr);
+ "remove_addr=%08x\n", cm_event.local_addr.sin_addr.s_addr,
+ cm_event.remote_addr.sin_addr.s_addr);
ret = cm_id->event_handler(cm_id, &cm_event);
nes_debug(NES_DBG_CM, "OFA CM event_handler returned, ret=%d\n", ret);
if (ret)
printk(KERN_ERR "%s[%u] OFA CM event_handler returned, "
- "ret=%d\n", __func__, __LINE__, ret);
+ "ret=%d\n", __func__, __LINE__, ret);
cm_id->rem_ref(cm_id);
rem_ref_cm_node(event->cm_node->cm_core, event->cm_node);
@@ -3519,7 +3718,7 @@ static void cm_event_reset(struct nes_cm_event *event)
*/
static void cm_event_mpa_req(struct nes_cm_event *event)
{
- struct iw_cm_id *cm_id;
+ struct iw_cm_id *cm_id;
struct iw_cm_event cm_event;
int ret;
struct nes_cm_node *cm_node;
@@ -3531,7 +3730,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
atomic_inc(&cm_connect_reqs);
nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
- cm_node, cm_id, jiffies);
+ cm_node, cm_id, jiffies);
cm_event.event = IW_CM_EVENT_CONNECT_REQUEST;
cm_event.status = 0;
@@ -3545,19 +3744,21 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
cm_event.remote_addr.sin_port = htons(event->cm_info.rem_port);
cm_event.remote_addr.sin_addr.s_addr = htonl(event->cm_info.rem_addr);
cm_event.private_data = cm_node->mpa_frame_buf;
- cm_event.private_data_len = (u8) cm_node->mpa_frame_size;
+ cm_event.private_data_len = (u8)cm_node->mpa_frame_size;
+ cm_event.ird = cm_node->ird_size;
+ cm_event.ord = cm_node->ord_size;
ret = cm_id->event_handler(cm_id, &cm_event);
if (ret)
printk(KERN_ERR "%s[%u] OFA CM event_handler returned, ret=%d\n",
- __func__, __LINE__, ret);
+ __func__, __LINE__, ret);
return;
}
static void cm_event_mpa_reject(struct nes_cm_event *event)
{
- struct iw_cm_id *cm_id;
+ struct iw_cm_id *cm_id;
struct iw_cm_event cm_event;
struct nes_cm_node *cm_node;
int ret;
@@ -3569,7 +3770,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
atomic_inc(&cm_connect_reqs);
nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
- cm_node, cm_id, jiffies);
+ cm_node, cm_id, jiffies);
cm_event.event = IW_CM_EVENT_CONNECT_REPLY;
cm_event.status = -ECONNREFUSED;
@@ -3584,17 +3785,17 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
cm_event.remote_addr.sin_addr.s_addr = htonl(event->cm_info.rem_addr);
cm_event.private_data = cm_node->mpa_frame_buf;
- cm_event.private_data_len = (u8) cm_node->mpa_frame_size;
+ cm_event.private_data_len = (u8)cm_node->mpa_frame_size;
nes_debug(NES_DBG_CM, "call CM_EVENT_MPA_REJECTED, local_addr=%08x, "
- "remove_addr=%08x\n",
- cm_event.local_addr.sin_addr.s_addr,
- cm_event.remote_addr.sin_addr.s_addr);
+ "remove_addr=%08x\n",
+ cm_event.local_addr.sin_addr.s_addr,
+ cm_event.remote_addr.sin_addr.s_addr);
ret = cm_id->event_handler(cm_id, &cm_event);
if (ret)
printk(KERN_ERR "%s[%u] OFA CM event_handler returned, ret=%d\n",
- __func__, __LINE__, ret);
+ __func__, __LINE__, ret);
return;
}
@@ -3613,7 +3814,7 @@ static int nes_cm_post_event(struct nes_cm_event *event)
event->cm_info.cm_id->add_ref(event->cm_info.cm_id);
INIT_WORK(&event->event_work, nes_cm_event_handler);
nes_debug(NES_DBG_CM, "cm_node=%p queue_work, event=%p\n",
- event->cm_node, event);
+ event->cm_node, event);
queue_work(event->cm_node->cm_core->event_wq, &event->event_work);
@@ -3630,7 +3831,7 @@ static int nes_cm_post_event(struct nes_cm_event *event)
static void nes_cm_event_handler(struct work_struct *work)
{
struct nes_cm_event *event = container_of(work, struct nes_cm_event,
- event_work);
+ event_work);
struct nes_cm_core *cm_core;
if ((!event) || (!event->cm_node) || (!event->cm_node->cm_core))
@@ -3638,29 +3839,29 @@ static void nes_cm_event_handler(struct work_struct *work)
cm_core = event->cm_node->cm_core;
nes_debug(NES_DBG_CM, "event=%p, event->type=%u, events posted=%u\n",
- event, event->type, atomic_read(&cm_core->events_posted));
+ event, event->type, atomic_read(&cm_core->events_posted));
switch (event->type) {
case NES_CM_EVENT_MPA_REQ:
cm_event_mpa_req(event);
nes_debug(NES_DBG_CM, "cm_node=%p CM Event: MPA REQUEST\n",
- event->cm_node);
+ event->cm_node);
break;
case NES_CM_EVENT_RESET:
nes_debug(NES_DBG_CM, "cm_node = %p CM Event: RESET\n",
- event->cm_node);
+ event->cm_node);
cm_event_reset(event);
break;
case NES_CM_EVENT_CONNECTED:
if ((!event->cm_node->cm_id) ||
- (event->cm_node->state != NES_CM_STATE_TSA))
+ (event->cm_node->state != NES_CM_STATE_TSA))
break;
cm_event_connected(event);
nes_debug(NES_DBG_CM, "CM Event: CONNECTED\n");
break;
case NES_CM_EVENT_MPA_REJECT:
if ((!event->cm_node->cm_id) ||
- (event->cm_node->state == NES_CM_STATE_TSA))
+ (event->cm_node->state == NES_CM_STATE_TSA))
break;
cm_event_mpa_reject(event);
nes_debug(NES_DBG_CM, "CM Event: REJECT\n");
@@ -3668,7 +3869,7 @@ static void nes_cm_event_handler(struct work_struct *work)
case NES_CM_EVENT_ABORTED:
if ((!event->cm_node->cm_id) ||
- (event->cm_node->state == NES_CM_STATE_TSA))
+ (event->cm_node->state == NES_CM_STATE_TSA))
break;
cm_event_connect_error(event);
nes_debug(NES_DBG_CM, "CM Event: ABORTED\n");
diff --git a/drivers/infiniband/hw/nes/nes_cm.h b/drivers/infiniband/hw/nes/nes_cm.h
index d9825fda70a..bdfa1fbb35f 100644
--- a/drivers/infiniband/hw/nes/nes_cm.h
+++ b/drivers/infiniband/hw/nes/nes_cm.h
@@ -48,7 +48,16 @@
#define IETF_MPA_KEY_SIZE 16
#define IETF_MPA_VERSION 1
#define IETF_MAX_PRIV_DATA_LEN 512
-#define IETF_MPA_FRAME_SIZE 20
+#define IETF_MPA_FRAME_SIZE 20
+#define IETF_RTR_MSG_SIZE 4
+#define IETF_MPA_V2_FLAG 0x10
+
+/* IETF RTR MSG Fields */
+#define IETF_PEER_TO_PEER 0x8000
+#define IETF_FLPDU_ZERO_LEN 0x4000
+#define IETF_RDMA0_WRITE 0x8000
+#define IETF_RDMA0_READ 0x4000
+#define IETF_NO_IRD_ORD 0x3FFF
enum ietf_mpa_flags {
IETF_MPA_FLAGS_MARKERS = 0x80, /* receive Markers */
@@ -56,7 +65,7 @@ enum ietf_mpa_flags {
IETF_MPA_FLAGS_REJECT = 0x20, /* Reject */
};
-struct ietf_mpa_frame {
+struct ietf_mpa_v1 {
u8 key[IETF_MPA_KEY_SIZE];
u8 flags;
u8 rev;
@@ -66,6 +75,20 @@ struct ietf_mpa_frame {
#define ietf_mpa_req_resp_frame ietf_mpa_frame
+struct ietf_rtr_msg {
+ __be16 ctrl_ird;
+ __be16 ctrl_ord;
+};
+
+struct ietf_mpa_v2 {
+ u8 key[IETF_MPA_KEY_SIZE];
+ u8 flags;
+ u8 rev;
+ __be16 priv_data_len;
+ struct ietf_rtr_msg rtr_msg;
+ u8 priv_data[0];
+};
+
struct nes_v4_quad {
u32 rsvd0;
__le32 DstIpAdrIndex; /* Only most significant 5 bits are valid */
@@ -171,8 +194,7 @@ struct nes_timer_entry {
#define NES_CM_DEF_SEQ2 0x18ed5740
#define NES_CM_DEF_LOCAL_ID2 0xb807
-#define MAX_CM_BUFFER (IETF_MPA_FRAME_SIZE + IETF_MAX_PRIV_DATA_LEN)
-
+#define MAX_CM_BUFFER (IETF_MPA_FRAME_SIZE + IETF_RTR_MSG_SIZE + IETF_MAX_PRIV_DATA_LEN)
typedef u32 nes_addr_t;
@@ -204,6 +226,21 @@ enum nes_cm_node_state {
NES_CM_STATE_CLOSED
};
+enum mpa_frame_version {
+ IETF_MPA_V1 = 1,
+ IETF_MPA_V2 = 2
+};
+
+enum mpa_frame_key {
+ MPA_KEY_REQUEST,
+ MPA_KEY_REPLY
+};
+
+enum send_rdma0 {
+ SEND_RDMA_READ_ZERO = 1,
+ SEND_RDMA_WRITE_ZERO = 2
+};
+
enum nes_tcpip_pkt_type {
NES_PKT_TYPE_UNKNOWN,
NES_PKT_TYPE_SYN,
@@ -245,9 +282,9 @@ struct nes_cm_tcp_context {
enum nes_cm_listener_state {
- NES_CM_LISTENER_PASSIVE_STATE=1,
- NES_CM_LISTENER_ACTIVE_STATE=2,
- NES_CM_LISTENER_EITHER_STATE=3
+ NES_CM_LISTENER_PASSIVE_STATE = 1,
+ NES_CM_LISTENER_ACTIVE_STATE = 2,
+ NES_CM_LISTENER_EITHER_STATE = 3
};
struct nes_cm_listener {
@@ -283,16 +320,20 @@ struct nes_cm_node {
struct nes_cm_node *loopbackpartner;
- struct nes_timer_entry *send_entry;
-
+ struct nes_timer_entry *send_entry;
+ struct nes_timer_entry *recv_entry;
spinlock_t retrans_list_lock;
- struct nes_timer_entry *recv_entry;
+ enum send_rdma0 send_rdma0_op;
- int send_write0;
union {
- struct ietf_mpa_frame mpa_frame;
- u8 mpa_frame_buf[MAX_CM_BUFFER];
+ struct ietf_mpa_v1 mpa_frame;
+ struct ietf_mpa_v2 mpa_v2_frame;
+ u8 mpa_frame_buf[MAX_CM_BUFFER];
};
+ enum mpa_frame_version mpa_frame_rev;
+ u16 ird_size;
+ u16 ord_size;
+
u16 mpa_frame_size;
struct iw_cm_id *cm_id;
struct list_head list;
@@ -399,10 +440,8 @@ struct nes_cm_ops {
struct nes_vnic *, u16, void *,
struct nes_cm_info *);
int (*close)(struct nes_cm_core *, struct nes_cm_node *);
- int (*accept)(struct nes_cm_core *, struct ietf_mpa_frame *,
- struct nes_cm_node *);
- int (*reject)(struct nes_cm_core *, struct ietf_mpa_frame *,
- struct nes_cm_node *);
+ int (*accept)(struct nes_cm_core *, struct nes_cm_node *);
+ int (*reject)(struct nes_cm_core *, struct nes_cm_node *);
int (*recv_pkt)(struct nes_cm_core *, struct nes_vnic *,
struct sk_buff *);
int (*destroy_cm_core)(struct nes_cm_core *);
@@ -422,5 +461,7 @@ int nes_destroy_listen(struct iw_cm_id *);
int nes_cm_recv(struct sk_buff *, struct net_device *);
int nes_cm_start(void);
int nes_cm_stop(void);
+int nes_add_ref_cm_node(struct nes_cm_node *cm_node);
+int nes_rem_ref_cm_node(struct nes_cm_node *cm_node);
#endif /* NES_CM_H */
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index be36cbeae63..7c0ff19ce38 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -110,6 +110,14 @@ static unsigned char *nes_tcp_state_str[] = {
};
#endif
+static inline void print_ip(struct nes_cm_node *cm_node)
+{
+ unsigned char *rem_addr;
+ if (cm_node) {
+ rem_addr = (unsigned char *)&cm_node->rem_addr;
+ printk(KERN_ERR PFX "Remote IP addr: %pI4\n", rem_addr);
+ }
+}
/**
* nes_nic_init_timer_defaults
@@ -1555,6 +1563,7 @@ static void nes_replenish_nic_rq(struct nes_vnic *nesvnic)
struct nes_hw_nic_rq_wqe *nic_rqe;
struct nes_hw_nic *nesnic;
struct nes_device *nesdev;
+ struct nes_rskb_cb *cb;
u32 rx_wqes_posted = 0;
nesnic = &nesvnic->nic;
@@ -1580,6 +1589,9 @@ static void nes_replenish_nic_rq(struct nes_vnic *nesvnic)
bus_address = pci_map_single(nesdev->pcidev,
skb->data, nesvnic->max_frame_size, PCI_DMA_FROMDEVICE);
+ cb = (struct nes_rskb_cb *)&skb->cb[0];
+ cb->busaddr = bus_address;
+ cb->maplen = nesvnic->max_frame_size;
nic_rqe = &nesnic->rq_vbase[nesvnic->nic.rq_head];
nic_rqe->wqe_words[NES_NIC_RQ_WQE_LENGTH_1_0_IDX] =
@@ -1669,6 +1681,7 @@ int nes_init_nic_qp(struct nes_device *nesdev, struct net_device *netdev)
u32 cqp_head;
u32 counter;
u32 wqe_count;
+ struct nes_rskb_cb *cb;
u8 jumbomode=0;
/* Allocate fragment, SQ, RQ, and CQ; Reuse CEQ based on the PCI function */
@@ -1845,6 +1858,9 @@ int nes_init_nic_qp(struct nes_device *nesdev, struct net_device *netdev)
pmem = pci_map_single(nesdev->pcidev, skb->data,
nesvnic->max_frame_size, PCI_DMA_FROMDEVICE);
+ cb = (struct nes_rskb_cb *)&skb->cb[0];
+ cb->busaddr = pmem;
+ cb->maplen = nesvnic->max_frame_size;
nic_rqe = &nesvnic->nic.rq_vbase[counter];
nic_rqe->wqe_words[NES_NIC_RQ_WQE_LENGTH_1_0_IDX] = cpu_to_le32(nesvnic->max_frame_size);
@@ -1873,6 +1889,13 @@ int nes_init_nic_qp(struct nes_device *nesdev, struct net_device *netdev)
jumbomode = 1;
nes_nic_init_timer_defaults(nesdev, jumbomode);
}
+ if ((nesdev->nesadapter->allow_unaligned_fpdus) &&
+ (nes_init_mgt_qp(nesdev, netdev, nesvnic))) {
+ nes_debug(NES_DBG_INIT, "%s: Out of memory for pau nic\n", netdev->name);
+ nes_destroy_nic_qp(nesvnic);
+ return -ENOMEM;
+ }
+
nesvnic->lro_mgr.max_aggr = nes_lro_max_aggr;
nesvnic->lro_mgr.max_desc = NES_MAX_LRO_DESCRIPTORS;
nesvnic->lro_mgr.lro_arr = nesvnic->lro_desc;
@@ -1895,28 +1918,29 @@ void nes_destroy_nic_qp(struct nes_vnic *nesvnic)
struct nes_device *nesdev = nesvnic->nesdev;
struct nes_hw_cqp_wqe *cqp_wqe;
struct nes_hw_nic_sq_wqe *nic_sqe;
- struct nes_hw_nic_rq_wqe *nic_rqe;
__le16 *wqe_fragment_length;
u16 wqe_fragment_index;
- u64 wqe_frag;
u32 cqp_head;
u32 wqm_cfg0;
unsigned long flags;
+ struct sk_buff *rx_skb;
+ struct nes_rskb_cb *cb;
int ret;
+ if (nesdev->nesadapter->allow_unaligned_fpdus)
+ nes_destroy_mgt(nesvnic);
+
/* clear wqe stall before destroying NIC QP */
wqm_cfg0 = nes_read_indexed(nesdev, NES_IDX_WQM_CONFIG0);
nes_write_indexed(nesdev, NES_IDX_WQM_CONFIG0, wqm_cfg0 & 0xFFFF7FFF);
/* Free remaining NIC receive buffers */
while (nesvnic->nic.rq_head != nesvnic->nic.rq_tail) {
- nic_rqe = &nesvnic->nic.rq_vbase[nesvnic->nic.rq_tail];
- wqe_frag = (u64)le32_to_cpu(
- nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_LOW_IDX]);
- wqe_frag |= ((u64)le32_to_cpu(
- nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_HIGH_IDX]))<<32;
- pci_unmap_single(nesdev->pcidev, (dma_addr_t)wqe_frag,
- nesvnic->max_frame_size, PCI_DMA_FROMDEVICE);
+ rx_skb = nesvnic->nic.rx_skb[nesvnic->nic.rq_tail];
+ cb = (struct nes_rskb_cb *)&rx_skb->cb[0];
+ pci_unmap_single(nesdev->pcidev, cb->busaddr, cb->maplen,
+ PCI_DMA_FROMDEVICE);
+
dev_kfree_skb(nesvnic->nic.rx_skb[nesvnic->nic.rq_tail++]);
nesvnic->nic.rq_tail &= (nesvnic->nic.rq_size - 1);
}
@@ -2775,6 +2799,7 @@ void nes_nic_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq)
struct nes_hw_nic_sq_wqe *nic_sqe;
struct sk_buff *skb;
struct sk_buff *rx_skb;
+ struct nes_rskb_cb *cb;
__le16 *wqe_fragment_length;
u32 head;
u32 cq_size;
@@ -2859,6 +2884,8 @@ void nes_nic_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq)
bus_address += ((u64)le32_to_cpu(nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_HIGH_IDX])) << 32;
pci_unmap_single(nesdev->pcidev, bus_address,
nesvnic->max_frame_size, PCI_DMA_FROMDEVICE);
+ cb = (struct nes_rskb_cb *)&rx_skb->cb[0];
+ cb->busaddr = 0;
/* rx_skb->tail = rx_skb->data + rx_pkt_size; */
/* rx_skb->len = rx_pkt_size; */
rx_skb->len = 0; /* TODO: see if this is necessary */
@@ -2983,6 +3010,7 @@ skip_rx_indicate0:
}
+
/**
* nes_cqp_ce_handler
*/
@@ -2997,6 +3025,8 @@ static void nes_cqp_ce_handler(struct nes_device *nesdev, struct nes_hw_cq *cq)
u32 cq_size;
u32 cqe_count=0;
u32 error_code;
+ u32 opcode;
+ u32 ctx_index;
/* u32 counter; */
head = cq->cq_head;
@@ -3007,12 +3037,9 @@ static void nes_cqp_ce_handler(struct nes_device *nesdev, struct nes_hw_cq *cq)
/* nes_debug(NES_DBG_CQP, "head=%u cqe_words=%08X\n", head,
le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX])); */
- if (le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX]) & NES_CQE_VALID) {
- u64temp = (((u64)(le32_to_cpu(cq->cq_vbase[head].
- cqe_words[NES_CQE_COMP_COMP_CTX_HIGH_IDX]))) << 32) |
- ((u64)(le32_to_cpu(cq->cq_vbase[head].
- cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX])));
- cqp = *((struct nes_hw_cqp **)&u64temp);
+ opcode = le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX]);
+ if (opcode & NES_CQE_VALID) {
+ cqp = &nesdev->cqp;
error_code = le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_CQE_ERROR_CODE_IDX]);
if (error_code) {
@@ -3021,15 +3048,14 @@ static void nes_cqp_ce_handler(struct nes_device *nesdev, struct nes_hw_cq *cq)
le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX])&0x3f,
(u16)(error_code >> 16),
(u16)error_code);
- nes_debug(NES_DBG_CQP, "cqp: qp_id=%u, sq_head=%u, sq_tail=%u\n",
- cqp->qp_id, cqp->sq_head, cqp->sq_tail);
}
- u64temp = (((u64)(le32_to_cpu(nesdev->cqp.sq_vbase[cqp->sq_tail].
- wqe_words[NES_CQP_WQE_COMP_SCRATCH_HIGH_IDX]))) << 32) |
- ((u64)(le32_to_cpu(nesdev->cqp.sq_vbase[cqp->sq_tail].
- wqe_words[NES_CQP_WQE_COMP_SCRATCH_LOW_IDX])));
- cqp_request = *((struct nes_cqp_request **)&u64temp);
+ u64temp = (((u64)(le32_to_cpu(cq->cq_vbase[head].
+ cqe_words[NES_CQE_COMP_COMP_CTX_HIGH_IDX]))) << 32) |
+ ((u64)(le32_to_cpu(cq->cq_vbase[head].
+ cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX])));
+
+ cqp_request = (struct nes_cqp_request *)(unsigned long)u64temp;
if (cqp_request) {
if (cqp_request->waiting) {
/* nes_debug(NES_DBG_CQP, "%s: Waking up requestor\n"); */
@@ -3075,9 +3101,15 @@ static void nes_cqp_ce_handler(struct nes_device *nesdev, struct nes_hw_cq *cq)
cqp_wqe = &nesdev->cqp.sq_vbase[head];
memcpy(cqp_wqe, &cqp_request->cqp_wqe, sizeof(*cqp_wqe));
barrier();
- cqp_wqe->wqe_words[NES_CQP_WQE_COMP_SCRATCH_LOW_IDX] =
+
+ opcode = cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX];
+ if ((opcode & NES_CQP_OPCODE_MASK) == NES_CQP_DOWNLOAD_SEGMENT)
+ ctx_index = NES_CQP_WQE_DL_COMP_CTX_LOW_IDX;
+ else
+ ctx_index = NES_CQP_WQE_COMP_CTX_LOW_IDX;
+ cqp_wqe->wqe_words[ctx_index] =
cpu_to_le32((u32)((unsigned long)cqp_request));
- cqp_wqe->wqe_words[NES_CQP_WQE_COMP_SCRATCH_HIGH_IDX] =
+ cqp_wqe->wqe_words[ctx_index + 1] =
cpu_to_le32((u32)(upper_32_bits((unsigned long)cqp_request)));
nes_debug(NES_DBG_CQP, "CQP request %p (opcode 0x%02X) put on CQPs SQ wqe%u.\n",
cqp_request, le32_to_cpu(cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX])&0x3f, head);
@@ -3093,7 +3125,6 @@ static void nes_cqp_ce_handler(struct nes_device *nesdev, struct nes_hw_cq *cq)
nes_read32(nesdev->regs+NES_CQE_ALLOC);
}
-
static u8 *locate_mpa(u8 *pkt, u32 aeq_info)
{
if (aeq_info & NES_AEQE_Q2_DATA_ETHERNET) {
@@ -3553,9 +3584,9 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
aeqe_cq_id = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]);
if (aeq_info & NES_AEQE_QP) {
- if ((!nes_is_resource_allocated(nesadapter, nesadapter->allocated_qps,
- aeqe_cq_id)) ||
- (atomic_read(&nesqp->close_timer_started)))
+ if (!nes_is_resource_allocated(nesadapter,
+ nesadapter->allocated_qps,
+ aeqe_cq_id))
return;
}
@@ -3566,8 +3597,7 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
if (atomic_inc_return(&nesqp->close_timer_started) == 1) {
if ((tcp_state == NES_AEQE_TCP_STATE_CLOSE_WAIT) &&
- (nesqp->ibqp_state == IB_QPS_RTS) &&
- ((nesadapter->eeprom_version >> 16) != NES_A0)) {
+ (nesqp->ibqp_state == IB_QPS_RTS)) {
spin_lock_irqsave(&nesqp->lock, flags);
nesqp->hw_iwarp_state = iwarp_state;
nesqp->hw_tcp_state = tcp_state;
@@ -3594,9 +3624,10 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
return;
}
spin_lock_irqsave(&nesqp->lock, flags);
- nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_CLOSING;
+ nesqp->hw_iwarp_state = iwarp_state;
+ nesqp->hw_tcp_state = tcp_state;
+ nesqp->last_aeq = async_event_id;
spin_unlock_irqrestore(&nesqp->lock, flags);
- nes_hw_modify_qp(nesdev, nesqp, NES_CQP_QP_IWARP_STATE_CLOSING, 0, 0);
nes_cm_disconn(nesqp);
break;
@@ -3694,7 +3725,9 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
case NES_AEQE_AEID_ROE_INVALID_RDMA_WRITE_OR_READ_RESP:
printk(KERN_ERR PFX "QP[%u] async_event_id=0x%04X IB_EVENT_QP_FATAL\n",
nesqp->hwqp.qp_id, async_event_id);
- nes_terminate_connection(nesdev, nesqp, aeqe, IB_EVENT_QP_FATAL);
+ print_ip(nesqp->cm_node);
+ if (!atomic_read(&nesqp->close_timer_started))
+ nes_terminate_connection(nesdev, nesqp, aeqe, IB_EVENT_QP_FATAL);
break;
case NES_AEQE_AEID_CQ_OPERATION_ERROR:
diff --git a/drivers/infiniband/hw/nes/nes_hw.h b/drivers/infiniband/hw/nes/nes_hw.h
index c3241479ec0..0b590e152c6 100644
--- a/drivers/infiniband/hw/nes/nes_hw.h
+++ b/drivers/infiniband/hw/nes/nes_hw.h
@@ -47,6 +47,11 @@
#define NES_MULTICAST_PF_MAX 8
#define NES_A0 3
+#define NES_ENABLE_PAU 0x07000001
+#define NES_DISABLE_PAU 0x07000000
+#define NES_PAU_COUNTER 10
+#define NES_CQP_OPCODE_MASK 0x3f
+
enum pci_regs {
NES_INT_STAT = 0x0000,
NES_INT_MASK = 0x0004,
@@ -73,8 +78,10 @@ enum indexed_regs {
NES_IDX_QP_CONTROL = 0x0040,
NES_IDX_FLM_CONTROL = 0x0080,
NES_IDX_INT_CPU_STATUS = 0x00a0,
+ NES_IDX_GPR_TRIGGER = 0x00bc,
NES_IDX_GPIO_CONTROL = 0x00f0,
NES_IDX_GPIO_DATA = 0x00f4,
+ NES_IDX_GPR2 = 0x010c,
NES_IDX_TCP_CONFIG0 = 0x01e4,
NES_IDX_TCP_TIMER_CONFIG = 0x01ec,
NES_IDX_TCP_NOW = 0x01f0,
@@ -202,6 +209,7 @@ enum nes_cqp_opcodes {
NES_CQP_REGISTER_SHARED_STAG = 0x0c,
NES_CQP_DEALLOCATE_STAG = 0x0d,
NES_CQP_MANAGE_ARP_CACHE = 0x0f,
+ NES_CQP_DOWNLOAD_SEGMENT = 0x10,
NES_CQP_SUSPEND_QPS = 0x11,
NES_CQP_UPLOAD_CONTEXT = 0x13,
NES_CQP_CREATE_CEQ = 0x16,
@@ -210,7 +218,8 @@ enum nes_cqp_opcodes {
NES_CQP_DESTROY_AEQ = 0x1b,
NES_CQP_LMI_ACCESS = 0x20,
NES_CQP_FLUSH_WQES = 0x22,
- NES_CQP_MANAGE_APBVT = 0x23
+ NES_CQP_MANAGE_APBVT = 0x23,
+ NES_CQP_MANAGE_QUAD_HASH = 0x25
};
enum nes_cqp_wqe_word_idx {
@@ -222,6 +231,14 @@ enum nes_cqp_wqe_word_idx {
NES_CQP_WQE_COMP_SCRATCH_HIGH_IDX = 5,
};
+enum nes_cqp_wqe_word_download_idx { /* format differs from other cqp ops */
+ NES_CQP_WQE_DL_OPCODE_IDX = 0,
+ NES_CQP_WQE_DL_COMP_CTX_LOW_IDX = 1,
+ NES_CQP_WQE_DL_COMP_CTX_HIGH_IDX = 2,
+ NES_CQP_WQE_DL_LENGTH_0_TOTAL_IDX = 3
+ /* For index values 4-15 use NES_NIC_SQ_WQE_ values */
+};
+
enum nes_cqp_cq_wqeword_idx {
NES_CQP_CQ_WQE_PBL_LOW_IDX = 6,
NES_CQP_CQ_WQE_PBL_HIGH_IDX = 7,
@@ -242,6 +259,7 @@ enum nes_cqp_stag_wqeword_idx {
NES_CQP_STAG_WQE_PBL_LEN_IDX = 14
};
+#define NES_CQP_OP_LOGICAL_PORT_SHIFT 26
#define NES_CQP_OP_IWARP_STATE_SHIFT 28
#define NES_CQP_OP_TERMLEN_SHIFT 28
@@ -599,6 +617,7 @@ enum nes_nic_sq_wqe_bits {
enum nes_nic_cqe_word_idx {
NES_NIC_CQE_ACCQP_ID_IDX = 0,
+ NES_NIC_CQE_HASH_RCVNXT = 1,
NES_NIC_CQE_TAG_PKT_TYPE_IDX = 2,
NES_NIC_CQE_MISC_IDX = 3,
};
@@ -1005,6 +1024,11 @@ struct nes_arp_entry {
#define NES_NIC_CQ_DOWNWARD_TREND 16
#define NES_PFT_SIZE 48
+#define NES_MGT_WQ_COUNT 32
+#define NES_MGT_CTX_SIZE ((NES_NIC_CTX_RQ_SIZE_32) | (NES_NIC_CTX_SQ_SIZE_32))
+#define NES_MGT_QP_OFFSET 36
+#define NES_MGT_QP_COUNT 4
+
struct nes_hw_tune_timer {
/* u16 cq_count; */
u16 threshold_low;
@@ -1118,6 +1142,7 @@ struct nes_adapter {
u32 et_rate_sample_interval;
u32 timer_int_limit;
u32 wqm_quanta;
+ u8 allow_unaligned_fpdus;
/* Adapter base MAC address */
u32 mac_addr_low;
@@ -1251,6 +1276,14 @@ struct nes_vnic {
enum ib_event_type delayed_event;
enum ib_event_type last_dispatched_event;
spinlock_t port_ibevent_lock;
+ u32 mgt_mem_size;
+ void *mgt_vbase;
+ dma_addr_t mgt_pbase;
+ struct nes_vnic_mgt *mgtvnic[NES_MGT_QP_COUNT];
+ struct task_struct *mgt_thread;
+ wait_queue_head_t mgt_wait_queue;
+ struct sk_buff_head mgt_skb_list;
+
};
struct nes_ib_device {
diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
new file mode 100644
index 00000000000..b3b2a240c6e
--- /dev/null
+++ b/drivers/infiniband/hw/nes/nes_mgt.c
@@ -0,0 +1,1162 @@
+/*
+ * Copyright (c) 2006 - 2009 Intel-NE, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/skbuff.h>
+#include <linux/etherdevice.h>
+#include <linux/kthread.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <net/tcp.h>
+#include "nes.h"
+#include "nes_mgt.h"
+
+atomic_t pau_qps_created;
+atomic_t pau_qps_destroyed;
+
+static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
+{
+ unsigned long flags;
+ dma_addr_t bus_address;
+ struct sk_buff *skb;
+ struct nes_hw_nic_rq_wqe *nic_rqe;
+ struct nes_hw_mgt *nesmgt;
+ struct nes_device *nesdev;
+ struct nes_rskb_cb *cb;
+ u32 rx_wqes_posted = 0;
+
+ nesmgt = &mgtvnic->mgt;
+ nesdev = mgtvnic->nesvnic->nesdev;
+ spin_lock_irqsave(&nesmgt->rq_lock, flags);
+ if (nesmgt->replenishing_rq != 0) {
+ if (((nesmgt->rq_size - 1) == atomic_read(&mgtvnic->rx_skbs_needed)) &&
+ (atomic_read(&mgtvnic->rx_skb_timer_running) == 0)) {
+ atomic_set(&mgtvnic->rx_skb_timer_running, 1);
+ spin_unlock_irqrestore(&nesmgt->rq_lock, flags);
+ mgtvnic->rq_wqes_timer.expires = jiffies + (HZ / 2); /* 1/2 second */
+ add_timer(&mgtvnic->rq_wqes_timer);
+ } else {
+ spin_unlock_irqrestore(&nesmgt->rq_lock, flags);
+ }
+ return;
+ }
+ nesmgt->replenishing_rq = 1;
+ spin_unlock_irqrestore(&nesmgt->rq_lock, flags);
+ do {
+ skb = dev_alloc_skb(mgtvnic->nesvnic->max_frame_size);
+ if (skb) {
+ skb->dev = mgtvnic->nesvnic->netdev;
+
+ bus_address = pci_map_single(nesdev->pcidev,
+ skb->data, mgtvnic->nesvnic->max_frame_size, PCI_DMA_FROMDEVICE);
+ cb = (struct nes_rskb_cb *)&skb->cb[0];
+ cb->busaddr = bus_address;
+ cb->maplen = mgtvnic->nesvnic->max_frame_size;
+
+ nic_rqe = &nesmgt->rq_vbase[mgtvnic->mgt.rq_head];
+ nic_rqe->wqe_words[NES_NIC_RQ_WQE_LENGTH_1_0_IDX] =
+ cpu_to_le32(mgtvnic->nesvnic->max_frame_size);
+ nic_rqe->wqe_words[NES_NIC_RQ_WQE_LENGTH_3_2_IDX] = 0;
+ nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_LOW_IDX] =
+ cpu_to_le32((u32)bus_address);
+ nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_HIGH_IDX] =
+ cpu_to_le32((u32)((u64)bus_address >> 32));
+ nesmgt->rx_skb[nesmgt->rq_head] = skb;
+ nesmgt->rq_head++;
+ nesmgt->rq_head &= nesmgt->rq_size - 1;
+ atomic_dec(&mgtvnic->rx_skbs_needed);
+ barrier();
+ if (++rx_wqes_posted == 255) {
+ nes_write32(nesdev->regs + NES_WQE_ALLOC, (rx_wqes_posted << 24) | nesmgt->qp_id);
+ rx_wqes_posted = 0;
+ }
+ } else {
+ spin_lock_irqsave(&nesmgt->rq_lock, flags);
+ if (((nesmgt->rq_size - 1) == atomic_read(&mgtvnic->rx_skbs_needed)) &&
+ (atomic_read(&mgtvnic->rx_skb_timer_running) == 0)) {
+ atomic_set(&mgtvnic->rx_skb_timer_running, 1);
+ spin_unlock_irqrestore(&nesmgt->rq_lock, flags);
+ mgtvnic->rq_wqes_timer.expires = jiffies + (HZ / 2); /* 1/2 second */
+ add_timer(&mgtvnic->rq_wqes_timer);
+ } else {
+ spin_unlock_irqrestore(&nesmgt->rq_lock, flags);
+ }
+ break;
+ }
+ } while (atomic_read(&mgtvnic->rx_skbs_needed));
+ barrier();
+ if (rx_wqes_posted)
+ nes_write32(nesdev->regs + NES_WQE_ALLOC, (rx_wqes_posted << 24) | nesmgt->qp_id);
+ nesmgt->replenishing_rq = 0;
+}
+
+/**
+ * nes_mgt_rq_wqes_timeout
+ */
+static void nes_mgt_rq_wqes_timeout(unsigned long parm)
+{
+ struct nes_vnic_mgt *mgtvnic = (struct nes_vnic_mgt *)parm;
+
+ atomic_set(&mgtvnic->rx_skb_timer_running, 0);
+ if (atomic_read(&mgtvnic->rx_skbs_needed))
+ nes_replenish_mgt_rq(mgtvnic);
+}
+
+/**
+ * nes_mgt_free_skb - unmap and free skb
+ */
+static void nes_mgt_free_skb(struct nes_device *nesdev, struct sk_buff *skb, u32 dir)
+{
+ struct nes_rskb_cb *cb;
+
+ cb = (struct nes_rskb_cb *)&skb->cb[0];
+ pci_unmap_single(nesdev->pcidev, cb->busaddr, cb->maplen, dir);
+ cb->busaddr = 0;
+ dev_kfree_skb_any(skb);
+}
+
+/**
+ * nes_download_callback - handle download completions
+ */
+static void nes_download_callback(struct nes_device *nesdev, struct nes_cqp_request *cqp_request)
+{
+ struct pau_fpdu_info *fpdu_info = cqp_request->cqp_callback_pointer;
+ struct nes_qp *nesqp = fpdu_info->nesqp;
+ struct sk_buff *skb;
+ int i;
+
+ for (i = 0; i < fpdu_info->frag_cnt; i++) {
+ skb = fpdu_info->frags[i].skb;
+ if (fpdu_info->frags[i].cmplt) {
+ nes_mgt_free_skb(nesdev, skb, PCI_DMA_TODEVICE);
+ nes_rem_ref_cm_node(nesqp->cm_node);
+ }
+ }
+
+ if (fpdu_info->hdr_vbase)
+ pci_free_consistent(nesdev->pcidev, fpdu_info->hdr_len,
+ fpdu_info->hdr_vbase, fpdu_info->hdr_pbase);
+ kfree(fpdu_info);
+}
+
+/**
+ * nes_get_seq - Get the seq, ack_seq and window from the packet
+ */
+static u32 nes_get_seq(struct sk_buff *skb, u32 *ack, u16 *wnd, u32 *fin_rcvd, u32 *rst_rcvd)
+{
+ struct nes_rskb_cb *cb = (struct nes_rskb_cb *)&skb->cb[0];
+ struct iphdr *iph = (struct iphdr *)(cb->data_start + ETH_HLEN);
+ struct tcphdr *tcph = (struct tcphdr *)(((char *)iph) + (4 * iph->ihl));
+
+ *ack = be32_to_cpu(tcph->ack_seq);
+ *wnd = be16_to_cpu(tcph->window);
+ *fin_rcvd = tcph->fin;
+ *rst_rcvd = tcph->rst;
+ return be32_to_cpu(tcph->seq);
+}
+
+/**
+ * nes_get_next_skb - Get the next skb based on where current skb is in the queue
+ */
+static struct sk_buff *nes_get_next_skb(struct nes_device *nesdev, struct nes_qp *nesqp,
+ struct sk_buff *skb, u32 nextseq, u32 *ack,
+ u16 *wnd, u32 *fin_rcvd, u32 *rst_rcvd)
+{
+ u32 seq;
+ bool processacks;
+ struct sk_buff *old_skb;
+
+ if (skb) {
+ /* Continue processing fpdu */
+ if (skb->next == (struct sk_buff *)&nesqp->pau_list)
+ goto out;
+ skb = skb->next;
+ processacks = false;
+ } else {
+ /* Starting a new one */
+ if (skb_queue_empty(&nesqp->pau_list))
+ goto out;
+ skb = skb_peek(&nesqp->pau_list);
+ processacks = true;
+ }
+
+ while (1) {
+ seq = nes_get_seq(skb, ack, wnd, fin_rcvd, rst_rcvd);
+ if (seq == nextseq) {
+ if (skb->len || processacks)
+ break;
+ } else if (after(seq, nextseq)) {
+ goto out;
+ }
+
+ if (skb->next == (struct sk_buff *)&nesqp->pau_list)
+ goto out;
+
+ old_skb = skb;
+ skb = skb->next;
+ skb_unlink(old_skb, &nesqp->pau_list);
+ nes_mgt_free_skb(nesdev, old_skb, PCI_DMA_TODEVICE);
+ nes_rem_ref_cm_node(nesqp->cm_node);
+ }
+ return skb;
+
+out:
+ return NULL;
+}
+
+/**
+ * get_fpdu_info - Find the next complete fpdu and return its fragments.
+ */
+static int get_fpdu_info(struct nes_device *nesdev, struct nes_qp *nesqp,
+ struct pau_fpdu_info **pau_fpdu_info)
+{
+ struct sk_buff *skb;
+ struct iphdr *iph;
+ struct tcphdr *tcph;
+ struct nes_rskb_cb *cb;
+ struct pau_fpdu_info *fpdu_info = NULL;
+ struct pau_fpdu_frag frags[MAX_FPDU_FRAGS];
+ unsigned long flags;
+ u32 fpdu_len = 0;
+ u32 tmp_len;
+ int frag_cnt = 0;
+ u32 tot_len;
+ u32 frag_tot;
+ u32 ack;
+ u32 fin_rcvd;
+ u32 rst_rcvd;
+ u16 wnd;
+ int i;
+ int rc = 0;
+
+ *pau_fpdu_info = NULL;
+
+ spin_lock_irqsave(&nesqp->pau_lock, flags);
+ skb = nes_get_next_skb(nesdev, nesqp, NULL, nesqp->pau_rcv_nxt, &ack, &wnd, &fin_rcvd, &rst_rcvd);
+ if (!skb) {
+ spin_unlock_irqrestore(&nesqp->pau_lock, flags);
+ goto out;
+ }
+ cb = (struct nes_rskb_cb *)&skb->cb[0];
+ if (skb->len) {
+ fpdu_len = be16_to_cpu(*(__be16 *) skb->data) + MPA_FRAMING;
+ fpdu_len = (fpdu_len + 3) & 0xfffffffc;
+ tmp_len = fpdu_len;
+
+ /* See if we have all of the fpdu */
+ frag_tot = 0;
+ memset(&frags, 0, sizeof frags);
+ for (i = 0; i < MAX_FPDU_FRAGS; i++) {
+ frags[i].physaddr = cb->busaddr;
+ frags[i].physaddr += skb->data - cb->data_start;
+ frags[i].frag_len = min(tmp_len, skb->len);
+ frags[i].skb = skb;
+ frags[i].cmplt = (skb->len == frags[i].frag_len);
+ frag_tot += frags[i].frag_len;
+ frag_cnt++;
+
+ tmp_len -= frags[i].frag_len;
+ if (tmp_len == 0)
+ break;
+
+ skb = nes_get_next_skb(nesdev, nesqp, skb,
+ nesqp->pau_rcv_nxt + frag_tot, &ack, &wnd, &fin_rcvd, &rst_rcvd);
+ if (!skb) {
+ spin_unlock_irqrestore(&nesqp->pau_lock, flags);
+ goto out;
+ } else if (rst_rcvd) {
+ /* rst received in the middle of fpdu */
+ for (; i >= 0; i--) {
+ skb_unlink(frags[i].skb, &nesqp->pau_list);
+ nes_mgt_free_skb(nesdev, frags[i].skb, PCI_DMA_TODEVICE);
+ }
+ cb = (struct nes_rskb_cb *)&skb->cb[0];
+ frags[0].physaddr = cb->busaddr;
+ frags[0].physaddr += skb->data - cb->data_start;
+ frags[0].frag_len = skb->len;
+ frags[0].skb = skb;
+ frags[0].cmplt = true;
+ frag_cnt = 1;
+ break;
+ }
+
+ cb = (struct nes_rskb_cb *)&skb->cb[0];
+ }
+ } else {
+ /* no data */
+ frags[0].physaddr = cb->busaddr;
+ frags[0].frag_len = 0;
+ frags[0].skb = skb;
+ frags[0].cmplt = true;
+ frag_cnt = 1;
+ }
+
+ spin_unlock_irqrestore(&nesqp->pau_lock, flags);
+
+ /* Found one */
+ fpdu_info = kzalloc(sizeof(*fpdu_info), GFP_ATOMIC);
+ if (fpdu_info == NULL) {
+ nes_debug(NES_DBG_PAU, "Failed to alloc a fpdu_info.\n");
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ fpdu_info->cqp_request = nes_get_cqp_request(nesdev);
+ if (fpdu_info->cqp_request == NULL) {
+ nes_debug(NES_DBG_PAU, "Failed to get a cqp_request.\n");
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ cb = (struct nes_rskb_cb *)&frags[0].skb->cb[0];
+ iph = (struct iphdr *)(cb->data_start + ETH_HLEN);
+ tcph = (struct tcphdr *)(((char *)iph) + (4 * iph->ihl));
+ fpdu_info->hdr_len = (((unsigned char *)tcph) + 4 * (tcph->doff)) - cb->data_start;
+ fpdu_info->data_len = fpdu_len;
+ tot_len = fpdu_info->hdr_len + fpdu_len - ETH_HLEN;
+
+ if (frags[0].cmplt) {
+ fpdu_info->hdr_pbase = cb->busaddr;
+ fpdu_info->hdr_vbase = NULL;
+ } else {
+ fpdu_info->hdr_vbase = pci_alloc_consistent(nesdev->pcidev,
+ fpdu_info->hdr_len, &fpdu_info->hdr_pbase);
+ if (!fpdu_info->hdr_vbase) {
+ nes_debug(NES_DBG_PAU, "Unable to allocate memory for pau first frag\n");
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ /* Copy hdrs, adjusting len and seqnum */
+ memcpy(fpdu_info->hdr_vbase, cb->data_start, fpdu_info->hdr_len);
+ iph = (struct iphdr *)(fpdu_info->hdr_vbase + ETH_HLEN);
+ tcph = (struct tcphdr *)(((char *)iph) + (4 * iph->ihl));
+ }
+
+ iph->tot_len = cpu_to_be16(tot_len);
+ iph->saddr = cpu_to_be32(0x7f000001);
+
+ tcph->seq = cpu_to_be32(nesqp->pau_rcv_nxt);
+ tcph->ack_seq = cpu_to_be32(ack);
+ tcph->window = cpu_to_be16(wnd);
+
+ nesqp->pau_rcv_nxt += fpdu_len + fin_rcvd;
+
+ memcpy(fpdu_info->frags, frags, sizeof(fpdu_info->frags));
+ fpdu_info->frag_cnt = frag_cnt;
+ fpdu_info->nesqp = nesqp;
+ *pau_fpdu_info = fpdu_info;
+
+ /* Update skb's for next pass */
+ for (i = 0; i < frag_cnt; i++) {
+ cb = (struct nes_rskb_cb *)&frags[i].skb->cb[0];
+ skb_pull(frags[i].skb, frags[i].frag_len);
+
+ if (frags[i].skb->len == 0) {
+ /* Pull skb off the list - it will be freed in the callback */
+ spin_lock_irqsave(&nesqp->pau_lock, flags);
+ skb_unlink(frags[i].skb, &nesqp->pau_list);
+ spin_unlock_irqrestore(&nesqp->pau_lock, flags);
+ } else {
+ /* Last skb still has data so update the seq */
+ iph = (struct iphdr *)(cb->data_start + ETH_HLEN);
+ tcph = (struct tcphdr *)(((char *)iph) + (4 * iph->ihl));
+ tcph->seq = cpu_to_be32(nesqp->pau_rcv_nxt);
+ }
+ }
+
+out:
+ if (rc) {
+ if (fpdu_info) {
+ if (fpdu_info->cqp_request)
+ nes_put_cqp_request(nesdev, fpdu_info->cqp_request);
+ kfree(fpdu_info);
+ }
+ }
+ return rc;
+}
+
+/**
+ * forward_fpdu - send complete fpdus, one at a time
+ */
+static int forward_fpdus(struct nes_vnic *nesvnic, struct nes_qp *nesqp)
+{
+ struct nes_device *nesdev = nesvnic->nesdev;
+ struct pau_fpdu_info *fpdu_info;
+ struct nes_hw_cqp_wqe *cqp_wqe;
+ struct nes_cqp_request *cqp_request;
+ u64 u64tmp;
+ u32 u32tmp;
+ int rc;
+
+ while (1) {
+ rc = get_fpdu_info(nesdev, nesqp, &fpdu_info);
+ if (fpdu_info == NULL)
+ return rc;
+
+ cqp_request = fpdu_info->cqp_request;
+ cqp_wqe = &cqp_request->cqp_wqe;
+ nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_DL_OPCODE_IDX,
+ NES_CQP_DOWNLOAD_SEGMENT |
+ (((u32)nesvnic->logical_port) << NES_CQP_OP_LOGICAL_PORT_SHIFT));
+
+ u32tmp = fpdu_info->hdr_len << 16;
+ u32tmp |= fpdu_info->hdr_len + (u32)fpdu_info->data_len;
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_DL_LENGTH_0_TOTAL_IDX,
+ u32tmp);
+
+ u32tmp = (fpdu_info->frags[1].frag_len << 16) | fpdu_info->frags[0].frag_len;
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_LENGTH_2_1_IDX,
+ u32tmp);
+
+ u32tmp = (fpdu_info->frags[3].frag_len << 16) | fpdu_info->frags[2].frag_len;
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_LENGTH_4_3_IDX,
+ u32tmp);
+
+ u64tmp = (u64)fpdu_info->hdr_pbase;
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG0_LOW_IDX,
+ lower_32_bits(u64tmp));
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG0_HIGH_IDX,
+ upper_32_bits(u64tmp >> 32));
+
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG1_LOW_IDX,
+ lower_32_bits(fpdu_info->frags[0].physaddr));
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG1_HIGH_IDX,
+ upper_32_bits(fpdu_info->frags[0].physaddr));
+
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG2_LOW_IDX,
+ lower_32_bits(fpdu_info->frags[1].physaddr));
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG2_HIGH_IDX,
+ upper_32_bits(fpdu_info->frags[1].physaddr));
+
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG3_LOW_IDX,
+ lower_32_bits(fpdu_info->frags[2].physaddr));
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG3_HIGH_IDX,
+ upper_32_bits(fpdu_info->frags[2].physaddr));
+
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG4_LOW_IDX,
+ lower_32_bits(fpdu_info->frags[3].physaddr));
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG4_HIGH_IDX,
+ upper_32_bits(fpdu_info->frags[3].physaddr));
+
+ cqp_request->cqp_callback_pointer = fpdu_info;
+ cqp_request->callback = 1;
+ cqp_request->cqp_callback = nes_download_callback;
+
+ atomic_set(&cqp_request->refcount, 1);
+ nes_post_cqp_request(nesdev, cqp_request);
+ }
+
+ return 0;
+}
+
+static void process_fpdus(struct nes_vnic *nesvnic, struct nes_qp *nesqp)
+{
+ int again = 1;
+ unsigned long flags;
+
+ do {
+ /* Ignore rc - if it failed, tcp retries will cause it to try again */
+ forward_fpdus(nesvnic, nesqp);
+
+ spin_lock_irqsave(&nesqp->pau_lock, flags);
+ if (nesqp->pau_pending) {
+ nesqp->pau_pending = 0;
+ } else {
+ nesqp->pau_busy = 0;
+ again = 0;
+ }
+
+ spin_unlock_irqrestore(&nesqp->pau_lock, flags);
+ } while (again);
+}
+
+/**
+ * queue_fpdus - Handle fpdu's that hw passed up to sw
+ */
+static void queue_fpdus(struct sk_buff *skb, struct nes_vnic *nesvnic, struct nes_qp *nesqp)
+{
+ struct sk_buff *tmpskb;
+ struct nes_rskb_cb *cb;
+ struct iphdr *iph;
+ struct tcphdr *tcph;
+ unsigned char *tcph_end;
+ u32 rcv_nxt;
+ u32 rcv_wnd;
+ u32 seqnum;
+ u32 len;
+ bool process_it = false;
+ unsigned long flags;
+
+ /* Move data ptr to after tcp header */
+ iph = (struct iphdr *)skb->data;
+ tcph = (struct tcphdr *)(((char *)iph) + (4 * iph->ihl));
+ seqnum = be32_to_cpu(tcph->seq);
+ tcph_end = (((char *)tcph) + (4 * tcph->doff));
+
+ len = be16_to_cpu(iph->tot_len);
+ if (skb->len > len)
+ skb_trim(skb, len);
+ skb_pull(skb, tcph_end - skb->data);
+
+ /* Initialize tracking values */
+ cb = (struct nes_rskb_cb *)&skb->cb[0];
+ cb->seqnum = seqnum;
+
+ /* Make sure data is in the receive window */
+ rcv_nxt = nesqp->pau_rcv_nxt;
+ rcv_wnd = le32_to_cpu(nesqp->nesqp_context->rcv_wnd);
+ if (!between(seqnum, rcv_nxt, (rcv_nxt + rcv_wnd))) {
+ nes_mgt_free_skb(nesvnic->nesdev, skb, PCI_DMA_TODEVICE);
+ nes_rem_ref_cm_node(nesqp->cm_node);
+ return;
+ }
+
+ spin_lock_irqsave(&nesqp->pau_lock, flags);
+
+ if (nesqp->pau_busy)
+ nesqp->pau_pending = 1;
+ else
+ nesqp->pau_busy = 1;
+
+ /* Queue skb by sequence number */
+ if (skb_queue_len(&nesqp->pau_list) == 0) {
+ skb_queue_head(&nesqp->pau_list, skb);
+ } else {
+ tmpskb = nesqp->pau_list.next;
+ while (tmpskb != (struct sk_buff *)&nesqp->pau_list) {
+ cb = (struct nes_rskb_cb *)&tmpskb->cb[0];
+ if (before(seqnum, cb->seqnum))
+ break;
+ tmpskb = tmpskb->next;
+ }
+ skb_insert(tmpskb, skb, &nesqp->pau_list);
+ }
+ if (nesqp->pau_state == PAU_READY)
+ process_it = true;
+ spin_unlock_irqrestore(&nesqp->pau_lock, flags);
+
+ if (process_it)
+ process_fpdus(nesvnic, nesqp);
+
+ return;
+}
+
+/**
+ * mgt_thread - Handle mgt skbs in a safe context
+ */
+static int mgt_thread(void *context)
+{
+ struct nes_vnic *nesvnic = context;
+ struct sk_buff *skb;
+ struct nes_rskb_cb *cb;
+
+ while (!kthread_should_stop()) {
+ wait_event_interruptible(nesvnic->mgt_wait_queue,
+ skb_queue_len(&nesvnic->mgt_skb_list) || kthread_should_stop());
+ while ((skb_queue_len(&nesvnic->mgt_skb_list)) && !kthread_should_stop()) {
+ skb = skb_dequeue(&nesvnic->mgt_skb_list);
+ cb = (struct nes_rskb_cb *)&skb->cb[0];
+ cb->data_start = skb->data - ETH_HLEN;
+ cb->busaddr = pci_map_single(nesvnic->nesdev->pcidev, cb->data_start,
+ nesvnic->max_frame_size, PCI_DMA_TODEVICE);
+ queue_fpdus(skb, nesvnic, cb->nesqp);
+ }
+ }
+
+ /* Closing down so delete any entries on the queue */
+ while (skb_queue_len(&nesvnic->mgt_skb_list)) {
+ skb = skb_dequeue(&nesvnic->mgt_skb_list);
+ cb = (struct nes_rskb_cb *)&skb->cb[0];
+ nes_rem_ref_cm_node(cb->nesqp->cm_node);
+ dev_kfree_skb_any(skb);
+ }
+ return 0;
+}
+
+/**
+ * nes_queue_skbs - Queue skb so it can be handled in a thread context
+ */
+void nes_queue_mgt_skbs(struct sk_buff *skb, struct nes_vnic *nesvnic, struct nes_qp *nesqp)
+{
+ struct nes_rskb_cb *cb;
+
+ cb = (struct nes_rskb_cb *)&skb->cb[0];
+ cb->nesqp = nesqp;
+ skb_queue_tail(&nesvnic->mgt_skb_list, skb);
+ wake_up_interruptible(&nesvnic->mgt_wait_queue);
+}
+
+void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
+{
+ struct sk_buff *skb;
+ unsigned long flags;
+ atomic_inc(&pau_qps_destroyed);
+
+ /* Free packets that have not yet been forwarded */
+ /* Lock is acquired by skb_dequeue when removing the skb */
+ spin_lock_irqsave(&nesqp->pau_lock, flags);
+ while (skb_queue_len(&nesqp->pau_list)) {
+ skb = skb_dequeue(&nesqp->pau_list);
+ nes_mgt_free_skb(nesdev, skb, PCI_DMA_TODEVICE);
+ nes_rem_ref_cm_node(nesqp->cm_node);
+ }
+ spin_unlock_irqrestore(&nesqp->pau_lock, flags);
+}
+
+static void nes_chg_qh_handler(struct nes_device *nesdev, struct nes_cqp_request *cqp_request)
+{
+ struct pau_qh_chg *qh_chg = cqp_request->cqp_callback_pointer;
+ struct nes_cqp_request *new_request;
+ struct nes_hw_cqp_wqe *cqp_wqe;
+ struct nes_adapter *nesadapter;
+ struct nes_qp *nesqp;
+ struct nes_v4_quad nes_quad;
+ u32 crc_value;
+ u64 u64temp;
+
+ nesadapter = nesdev->nesadapter;
+ nesqp = qh_chg->nesqp;
+
+ /* Should we handle the bad completion */
+ if (cqp_request->major_code) {
+ printk(KERN_ERR PFX "Invalid cqp_request major_code=0x%x\n",
+ cqp_request->major_code);
+ WARN_ON(1);
+ }
+
+ switch (nesqp->pau_state) {
+ case PAU_DEL_QH:
+ /* Old hash code deleted, now set the new one */
+ nesqp->pau_state = PAU_ADD_LB_QH;
+ new_request = nes_get_cqp_request(nesdev);
+ if (new_request == NULL) {
+ nes_debug(NES_DBG_PAU, "Failed to get a new_request.\n");
+ WARN_ON(1);
+ return;
+ }
+
+ memset(&nes_quad, 0, sizeof(nes_quad));
+ nes_quad.DstIpAdrIndex =
+ cpu_to_le32((u32)PCI_FUNC(nesdev->pcidev->devfn) << 24);
+ nes_quad.SrcIpadr = cpu_to_be32(0x7f000001);
+ nes_quad.TcpPorts[0] = swab16(nesqp->nesqp_context->tcpPorts[1]);
+ nes_quad.TcpPorts[1] = swab16(nesqp->nesqp_context->tcpPorts[0]);
+
+ /* Produce hash key */
+ crc_value = get_crc_value(&nes_quad);
+ nesqp->hte_index = cpu_to_be32(crc_value ^ 0xffffffff);
+ nes_debug(NES_DBG_PAU, "new HTE Index = 0x%08X, CRC = 0x%08X\n",
+ nesqp->hte_index, nesqp->hte_index & nesadapter->hte_index_mask);
+
+ nesqp->hte_index &= nesadapter->hte_index_mask;
+ nesqp->nesqp_context->hte_index = cpu_to_le32(nesqp->hte_index);
+ nesqp->nesqp_context->ip0 = cpu_to_le32(0x7f000001);
+ nesqp->nesqp_context->rcv_nxt = cpu_to_le32(nesqp->pau_rcv_nxt);
+
+ cqp_wqe = &new_request->cqp_wqe;
+ nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
+ set_wqe_32bit_value(cqp_wqe->wqe_words,
+ NES_CQP_WQE_OPCODE_IDX, NES_CQP_MANAGE_QUAD_HASH |
+ NES_CQP_QP_TYPE_IWARP | NES_CQP_QP_CONTEXT_VALID | NES_CQP_QP_IWARP_STATE_RTS);
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX, nesqp->hwqp.qp_id);
+ u64temp = (u64)nesqp->nesqp_context_pbase;
+ set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_QP_WQE_CONTEXT_LOW_IDX, u64temp);
+
+ nes_debug(NES_DBG_PAU, "Waiting for CQP completion for adding the quad hash.\n");
+
+ new_request->cqp_callback_pointer = qh_chg;
+ new_request->callback = 1;
+ new_request->cqp_callback = nes_chg_qh_handler;
+ atomic_set(&new_request->refcount, 1);
+ nes_post_cqp_request(nesdev, new_request);
+ break;
+
+ case PAU_ADD_LB_QH:
+ /* Start processing the queued fpdu's */
+ nesqp->pau_state = PAU_READY;
+ process_fpdus(qh_chg->nesvnic, qh_chg->nesqp);
+ kfree(qh_chg);
+ break;
+ }
+}
+
+/**
+ * nes_change_quad_hash
+ */
+static int nes_change_quad_hash(struct nes_device *nesdev,
+ struct nes_vnic *nesvnic, struct nes_qp *nesqp)
+{
+ struct nes_cqp_request *cqp_request = NULL;
+ struct pau_qh_chg *qh_chg = NULL;
+ u64 u64temp;
+ struct nes_hw_cqp_wqe *cqp_wqe;
+ int ret = 0;
+
+ cqp_request = nes_get_cqp_request(nesdev);
+ if (cqp_request == NULL) {
+ nes_debug(NES_DBG_PAU, "Failed to get a cqp_request.\n");
+ ret = -ENOMEM;
+ goto chg_qh_err;
+ }
+
+ qh_chg = kmalloc(sizeof *qh_chg, GFP_ATOMIC);
+ if (qh_chg == NULL) {
+ nes_debug(NES_DBG_PAU, "Failed to get a cqp_request.\n");
+ ret = -ENOMEM;
+ goto chg_qh_err;
+ }
+ qh_chg->nesdev = nesdev;
+ qh_chg->nesvnic = nesvnic;
+ qh_chg->nesqp = nesqp;
+ nesqp->pau_state = PAU_DEL_QH;
+
+ cqp_wqe = &cqp_request->cqp_wqe;
+ nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
+ set_wqe_32bit_value(cqp_wqe->wqe_words,
+ NES_CQP_WQE_OPCODE_IDX, NES_CQP_MANAGE_QUAD_HASH | NES_CQP_QP_DEL_HTE |
+ NES_CQP_QP_TYPE_IWARP | NES_CQP_QP_CONTEXT_VALID | NES_CQP_QP_IWARP_STATE_RTS);
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX, nesqp->hwqp.qp_id);
+ u64temp = (u64)nesqp->nesqp_context_pbase;
+ set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_QP_WQE_CONTEXT_LOW_IDX, u64temp);
+
+ nes_debug(NES_DBG_PAU, "Waiting for CQP completion for deleting the quad hash.\n");
+
+ cqp_request->cqp_callback_pointer = qh_chg;
+ cqp_request->callback = 1;
+ cqp_request->cqp_callback = nes_chg_qh_handler;
+ atomic_set(&cqp_request->refcount, 1);
+ nes_post_cqp_request(nesdev, cqp_request);
+
+ return ret;
+
+chg_qh_err:
+ kfree(qh_chg);
+ if (cqp_request)
+ nes_put_cqp_request(nesdev, cqp_request);
+ return ret;
+}
+
+/**
+ * nes_mgt_ce_handler
+ * This management code deals with any packed and unaligned (pau) fpdu's
+ * that the hardware cannot handle.
+ */
+static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq)
+{
+ struct nes_vnic_mgt *mgtvnic = container_of(cq, struct nes_vnic_mgt, mgt_cq);
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
+ u32 head;
+ u32 cq_size;
+ u32 cqe_count = 0;
+ u32 cqe_misc;
+ u32 qp_id = 0;
+ u32 skbs_needed;
+ unsigned long context;
+ struct nes_qp *nesqp;
+ struct sk_buff *rx_skb;
+ struct nes_rskb_cb *cb;
+
+ head = cq->cq_head;
+ cq_size = cq->cq_size;
+
+ while (1) {
+ cqe_misc = le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_NIC_CQE_MISC_IDX]);
+ if (!(cqe_misc & NES_NIC_CQE_VALID))
+ break;
+
+ nesqp = NULL;
+ if (cqe_misc & NES_NIC_CQE_ACCQP_VALID) {
+ qp_id = le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_NIC_CQE_ACCQP_ID_IDX]);
+ qp_id &= 0x001fffff;
+ if (qp_id < nesadapter->max_qp) {
+ context = (unsigned long)nesadapter->qp_table[qp_id - NES_FIRST_QPN];
+ nesqp = (struct nes_qp *)context;
+ }
+ }
+
+ if (nesqp) {
+ if (nesqp->pau_mode == false) {
+ nesqp->pau_mode = true; /* First time for this qp */
+ nesqp->pau_rcv_nxt = le32_to_cpu(
+ cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
+ skb_queue_head_init(&nesqp->pau_list);
+ spin_lock_init(&nesqp->pau_lock);
+ atomic_inc(&pau_qps_created);
+ nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
+ }
+
+ rx_skb = mgtvnic->mgt.rx_skb[mgtvnic->mgt.rq_tail];
+ rx_skb->len = 0;
+ skb_put(rx_skb, cqe_misc & 0x0000ffff);
+ rx_skb->protocol = eth_type_trans(rx_skb, mgtvnic->nesvnic->netdev);
+ cb = (struct nes_rskb_cb *)&rx_skb->cb[0];
+ pci_unmap_single(nesdev->pcidev, cb->busaddr, cb->maplen, PCI_DMA_FROMDEVICE);
+ cb->busaddr = 0;
+ mgtvnic->mgt.rq_tail++;
+ mgtvnic->mgt.rq_tail &= mgtvnic->mgt.rq_size - 1;
+
+ nes_add_ref_cm_node(nesqp->cm_node);
+ nes_queue_mgt_skbs(rx_skb, mgtvnic->nesvnic, nesqp);
+ } else {
+ printk(KERN_ERR PFX "Invalid QP %d for packed/unaligned handling\n", qp_id);
+ }
+
+ cq->cq_vbase[head].cqe_words[NES_NIC_CQE_MISC_IDX] = 0;
+ cqe_count++;
+ if (++head >= cq_size)
+ head = 0;
+
+ if (cqe_count == 255) {
+ /* Replenish mgt CQ */
+ nes_write32(nesdev->regs + NES_CQE_ALLOC, cq->cq_number | (cqe_count << 16));
+ nesdev->currcq_count += cqe_count;
+ cqe_count = 0;
+ }
+
+ skbs_needed = atomic_inc_return(&mgtvnic->rx_skbs_needed);
+ if (skbs_needed > (mgtvnic->mgt.rq_size >> 1))
+ nes_replenish_mgt_rq(mgtvnic);
+ }
+
+ cq->cq_head = head;
+ nes_write32(nesdev->regs + NES_CQE_ALLOC, NES_CQE_ALLOC_NOTIFY_NEXT |
+ cq->cq_number | (cqe_count << 16));
+ nes_read32(nesdev->regs + NES_CQE_ALLOC);
+ nesdev->currcq_count += cqe_count;
+}
+
+/**
+ * nes_init_mgt_qp
+ */
+int nes_init_mgt_qp(struct nes_device *nesdev, struct net_device *netdev, struct nes_vnic *nesvnic)
+{
+ struct nes_vnic_mgt *mgtvnic;
+ u32 counter;
+ void *vmem;
+ dma_addr_t pmem;
+ struct nes_hw_cqp_wqe *cqp_wqe;
+ u32 cqp_head;
+ unsigned long flags;
+ struct nes_hw_nic_qp_context *mgt_context;
+ u64 u64temp;
+ struct nes_hw_nic_rq_wqe *mgt_rqe;
+ struct sk_buff *skb;
+ u32 wqe_count;
+ struct nes_rskb_cb *cb;
+ u32 mgt_mem_size;
+ void *mgt_vbase;
+ dma_addr_t mgt_pbase;
+ int i;
+ int ret;
+
+ /* Allocate space the all mgt QPs once */
+ mgtvnic = kzalloc(NES_MGT_QP_COUNT * sizeof(struct nes_vnic_mgt), GFP_KERNEL);
+ if (mgtvnic == NULL) {
+ nes_debug(NES_DBG_INIT, "Unable to allocate memory for mgt structure\n");
+ return -ENOMEM;
+ }
+
+ /* Allocate fragment, RQ, and CQ; Reuse CEQ based on the PCI function */
+ /* We are not sending from this NIC so sq is not allocated */
+ mgt_mem_size = 256 +
+ (NES_MGT_WQ_COUNT * sizeof(struct nes_hw_nic_rq_wqe)) +
+ (NES_MGT_WQ_COUNT * sizeof(struct nes_hw_nic_cqe)) +
+ sizeof(struct nes_hw_nic_qp_context);
+ mgt_mem_size = (mgt_mem_size + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
+ mgt_vbase = pci_alloc_consistent(nesdev->pcidev, NES_MGT_QP_COUNT * mgt_mem_size, &mgt_pbase);
+ if (!mgt_vbase) {
+ kfree(mgtvnic);
+ nes_debug(NES_DBG_INIT, "Unable to allocate memory for mgt host descriptor rings\n");
+ return -ENOMEM;
+ }
+
+ nesvnic->mgt_mem_size = NES_MGT_QP_COUNT * mgt_mem_size;
+ nesvnic->mgt_vbase = mgt_vbase;
+ nesvnic->mgt_pbase = mgt_pbase;
+
+ skb_queue_head_init(&nesvnic->mgt_skb_list);
+ init_waitqueue_head(&nesvnic->mgt_wait_queue);
+ nesvnic->mgt_thread = kthread_run(mgt_thread, nesvnic, "nes_mgt_thread");
+
+ for (i = 0; i < NES_MGT_QP_COUNT; i++) {
+ mgtvnic->nesvnic = nesvnic;
+ mgtvnic->mgt.qp_id = nesdev->mac_index + NES_MGT_QP_OFFSET + i;
+ memset(mgt_vbase, 0, mgt_mem_size);
+ nes_debug(NES_DBG_INIT, "Allocated mgt QP structures at %p (phys = %016lX), size = %u.\n",
+ mgt_vbase, (unsigned long)mgt_pbase, mgt_mem_size);
+
+ vmem = (void *)(((unsigned long)mgt_vbase + (256 - 1)) &
+ ~(unsigned long)(256 - 1));
+ pmem = (dma_addr_t)(((unsigned long long)mgt_pbase + (256 - 1)) &
+ ~(unsigned long long)(256 - 1));
+
+ spin_lock_init(&mgtvnic->mgt.rq_lock);
+
+ /* setup the RQ */
+ mgtvnic->mgt.rq_vbase = vmem;
+ mgtvnic->mgt.rq_pbase = pmem;
+ mgtvnic->mgt.rq_head = 0;
+ mgtvnic->mgt.rq_tail = 0;
+ mgtvnic->mgt.rq_size = NES_MGT_WQ_COUNT;
+
+ /* setup the CQ */
+ vmem += (NES_MGT_WQ_COUNT * sizeof(struct nes_hw_nic_rq_wqe));
+ pmem += (NES_MGT_WQ_COUNT * sizeof(struct nes_hw_nic_rq_wqe));
+
+ mgtvnic->mgt_cq.cq_number = mgtvnic->mgt.qp_id;
+ mgtvnic->mgt_cq.cq_vbase = vmem;
+ mgtvnic->mgt_cq.cq_pbase = pmem;
+ mgtvnic->mgt_cq.cq_head = 0;
+ mgtvnic->mgt_cq.cq_size = NES_MGT_WQ_COUNT;
+
+ mgtvnic->mgt_cq.ce_handler = nes_mgt_ce_handler;
+
+ /* Send CreateCQ request to CQP */
+ spin_lock_irqsave(&nesdev->cqp.lock, flags);
+ cqp_head = nesdev->cqp.sq_head;
+
+ cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head];
+ nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
+
+ cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] = cpu_to_le32(
+ NES_CQP_CREATE_CQ | NES_CQP_CQ_CEQ_VALID |
+ ((u32)mgtvnic->mgt_cq.cq_size << 16));
+ cqp_wqe->wqe_words[NES_CQP_WQE_ID_IDX] = cpu_to_le32(
+ mgtvnic->mgt_cq.cq_number | ((u32)nesdev->ceq_index << 16));
+ u64temp = (u64)mgtvnic->mgt_cq.cq_pbase;
+ set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_CQ_WQE_PBL_LOW_IDX, u64temp);
+ cqp_wqe->wqe_words[NES_CQP_CQ_WQE_CQ_CONTEXT_HIGH_IDX] = 0;
+ u64temp = (unsigned long)&mgtvnic->mgt_cq;
+ cqp_wqe->wqe_words[NES_CQP_CQ_WQE_CQ_CONTEXT_LOW_IDX] = cpu_to_le32((u32)(u64temp >> 1));
+ cqp_wqe->wqe_words[NES_CQP_CQ_WQE_CQ_CONTEXT_HIGH_IDX] =
+ cpu_to_le32(((u32)((u64temp) >> 33)) & 0x7FFFFFFF);
+ cqp_wqe->wqe_words[NES_CQP_CQ_WQE_DOORBELL_INDEX_HIGH_IDX] = 0;
+
+ if (++cqp_head >= nesdev->cqp.sq_size)
+ cqp_head = 0;
+ cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head];
+ nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
+
+ /* Send CreateQP request to CQP */
+ mgt_context = (void *)(&mgtvnic->mgt_cq.cq_vbase[mgtvnic->mgt_cq.cq_size]);
+ mgt_context->context_words[NES_NIC_CTX_MISC_IDX] =
+ cpu_to_le32((u32)NES_MGT_CTX_SIZE |
+ ((u32)PCI_FUNC(nesdev->pcidev->devfn) << 12));
+ nes_debug(NES_DBG_INIT, "RX_WINDOW_BUFFER_PAGE_TABLE_SIZE = 0x%08X, RX_WINDOW_BUFFER_SIZE = 0x%08X\n",
+ nes_read_indexed(nesdev, NES_IDX_RX_WINDOW_BUFFER_PAGE_TABLE_SIZE),
+ nes_read_indexed(nesdev, NES_IDX_RX_WINDOW_BUFFER_SIZE));
+ if (nes_read_indexed(nesdev, NES_IDX_RX_WINDOW_BUFFER_SIZE) != 0)
+ mgt_context->context_words[NES_NIC_CTX_MISC_IDX] |= cpu_to_le32(NES_NIC_BACK_STORE);
+
+ u64temp = (u64)mgtvnic->mgt.rq_pbase;
+ mgt_context->context_words[NES_NIC_CTX_SQ_LOW_IDX] = cpu_to_le32((u32)u64temp);
+ mgt_context->context_words[NES_NIC_CTX_SQ_HIGH_IDX] = cpu_to_le32((u32)(u64temp >> 32));
+ u64temp = (u64)mgtvnic->mgt.rq_pbase;
+ mgt_context->context_words[NES_NIC_CTX_RQ_LOW_IDX] = cpu_to_le32((u32)u64temp);
+ mgt_context->context_words[NES_NIC_CTX_RQ_HIGH_IDX] = cpu_to_le32((u32)(u64temp >> 32));
+
+ cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] = cpu_to_le32(NES_CQP_CREATE_QP |
+ NES_CQP_QP_TYPE_NIC);
+ cqp_wqe->wqe_words[NES_CQP_WQE_ID_IDX] = cpu_to_le32(mgtvnic->mgt.qp_id);
+ u64temp = (u64)mgtvnic->mgt_cq.cq_pbase +
+ (mgtvnic->mgt_cq.cq_size * sizeof(struct nes_hw_nic_cqe));
+ set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_QP_WQE_CONTEXT_LOW_IDX, u64temp);
+
+ if (++cqp_head >= nesdev->cqp.sq_size)
+ cqp_head = 0;
+ nesdev->cqp.sq_head = cqp_head;
+
+ barrier();
+
+ /* Ring doorbell (2 WQEs) */
+ nes_write32(nesdev->regs + NES_WQE_ALLOC, 0x02800000 | nesdev->cqp.qp_id);
+
+ spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
+ nes_debug(NES_DBG_INIT, "Waiting for create MGT QP%u to complete.\n",
+ mgtvnic->mgt.qp_id);
+
+ ret = wait_event_timeout(nesdev->cqp.waitq, (nesdev->cqp.sq_tail == cqp_head),
+ NES_EVENT_TIMEOUT);
+ nes_debug(NES_DBG_INIT, "Create MGT QP%u completed, wait_event_timeout ret = %u.\n",
+ mgtvnic->mgt.qp_id, ret);
+ if (!ret) {
+ nes_debug(NES_DBG_INIT, "MGT QP%u create timeout expired\n", mgtvnic->mgt.qp_id);
+ if (i == 0) {
+ pci_free_consistent(nesdev->pcidev, nesvnic->mgt_mem_size, nesvnic->mgt_vbase,
+ nesvnic->mgt_pbase);
+ kfree(mgtvnic);
+ } else {
+ nes_destroy_mgt(nesvnic);
+ }
+ return -EIO;
+ }
+
+ /* Populate the RQ */
+ for (counter = 0; counter < (NES_MGT_WQ_COUNT - 1); counter++) {
+ skb = dev_alloc_skb(nesvnic->max_frame_size);
+ if (!skb) {
+ nes_debug(NES_DBG_INIT, "%s: out of memory for receive skb\n", netdev->name);
+ return -ENOMEM;
+ }
+
+ skb->dev = netdev;
+
+ pmem = pci_map_single(nesdev->pcidev, skb->data,
+ nesvnic->max_frame_size, PCI_DMA_FROMDEVICE);
+ cb = (struct nes_rskb_cb *)&skb->cb[0];
+ cb->busaddr = pmem;
+ cb->maplen = nesvnic->max_frame_size;
+
+ mgt_rqe = &mgtvnic->mgt.rq_vbase[counter];
+ mgt_rqe->wqe_words[NES_NIC_RQ_WQE_LENGTH_1_0_IDX] = cpu_to_le32((u32)nesvnic->max_frame_size);
+ mgt_rqe->wqe_words[NES_NIC_RQ_WQE_LENGTH_3_2_IDX] = 0;
+ mgt_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_LOW_IDX] = cpu_to_le32((u32)pmem);
+ mgt_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_HIGH_IDX] = cpu_to_le32((u32)((u64)pmem >> 32));
+ mgtvnic->mgt.rx_skb[counter] = skb;
+ }
+
+ init_timer(&mgtvnic->rq_wqes_timer);
+ mgtvnic->rq_wqes_timer.function = nes_mgt_rq_wqes_timeout;
+ mgtvnic->rq_wqes_timer.data = (unsigned long)mgtvnic;
+
+ wqe_count = NES_MGT_WQ_COUNT - 1;
+ mgtvnic->mgt.rq_head = wqe_count;
+ barrier();
+ do {
+ counter = min(wqe_count, ((u32)255));
+ wqe_count -= counter;
+ nes_write32(nesdev->regs + NES_WQE_ALLOC, (counter << 24) | mgtvnic->mgt.qp_id);
+ } while (wqe_count);
+
+ nes_write32(nesdev->regs + NES_CQE_ALLOC, NES_CQE_ALLOC_NOTIFY_NEXT |
+ mgtvnic->mgt_cq.cq_number);
+ nes_read32(nesdev->regs + NES_CQE_ALLOC);
+
+ mgt_vbase += mgt_mem_size;
+ mgt_pbase += mgt_mem_size;
+ nesvnic->mgtvnic[i] = mgtvnic++;
+ }
+ return 0;
+}
+
+
+void nes_destroy_mgt(struct nes_vnic *nesvnic)
+{
+ struct nes_device *nesdev = nesvnic->nesdev;
+ struct nes_vnic_mgt *mgtvnic;
+ struct nes_vnic_mgt *first_mgtvnic;
+ unsigned long flags;
+ struct nes_hw_cqp_wqe *cqp_wqe;
+ u32 cqp_head;
+ struct sk_buff *rx_skb;
+ int i;
+ int ret;
+
+ kthread_stop(nesvnic->mgt_thread);
+
+ /* Free remaining NIC receive buffers */
+ first_mgtvnic = nesvnic->mgtvnic[0];
+ for (i = 0; i < NES_MGT_QP_COUNT; i++) {
+ mgtvnic = nesvnic->mgtvnic[i];
+ if (mgtvnic == NULL)
+ continue;
+
+ while (mgtvnic->mgt.rq_head != mgtvnic->mgt.rq_tail) {
+ rx_skb = mgtvnic->mgt.rx_skb[mgtvnic->mgt.rq_tail];
+ nes_mgt_free_skb(nesdev, rx_skb, PCI_DMA_FROMDEVICE);
+ mgtvnic->mgt.rq_tail++;
+ mgtvnic->mgt.rq_tail &= (mgtvnic->mgt.rq_size - 1);
+ }
+
+ spin_lock_irqsave(&nesdev->cqp.lock, flags);
+
+ /* Destroy NIC QP */
+ cqp_head = nesdev->cqp.sq_head;
+ cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head];
+ nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
+
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX,
+ (NES_CQP_DESTROY_QP | NES_CQP_QP_TYPE_NIC));
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX,
+ mgtvnic->mgt.qp_id);
+
+ if (++cqp_head >= nesdev->cqp.sq_size)
+ cqp_head = 0;
+
+ cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head];
+
+ /* Destroy NIC CQ */
+ nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX,
+ (NES_CQP_DESTROY_CQ | ((u32)mgtvnic->mgt_cq.cq_size << 16)));
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX,
+ (mgtvnic->mgt_cq.cq_number | ((u32)nesdev->ceq_index << 16)));
+
+ if (++cqp_head >= nesdev->cqp.sq_size)
+ cqp_head = 0;
+
+ nesdev->cqp.sq_head = cqp_head;
+ barrier();
+
+ /* Ring doorbell (2 WQEs) */
+ nes_write32(nesdev->regs + NES_WQE_ALLOC, 0x02800000 | nesdev->cqp.qp_id);
+
+ spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
+ nes_debug(NES_DBG_SHUTDOWN, "Waiting for CQP, cqp_head=%u, cqp.sq_head=%u,"
+ " cqp.sq_tail=%u, cqp.sq_size=%u\n",
+ cqp_head, nesdev->cqp.sq_head,
+ nesdev->cqp.sq_tail, nesdev->cqp.sq_size);
+
+ ret = wait_event_timeout(nesdev->cqp.waitq, (nesdev->cqp.sq_tail == cqp_head),
+ NES_EVENT_TIMEOUT);
+
+ nes_debug(NES_DBG_SHUTDOWN, "Destroy MGT QP returned, wait_event_timeout ret = %u, cqp_head=%u,"
+ " cqp.sq_head=%u, cqp.sq_tail=%u\n",
+ ret, cqp_head, nesdev->cqp.sq_head, nesdev->cqp.sq_tail);
+ if (!ret)
+ nes_debug(NES_DBG_SHUTDOWN, "MGT QP%u destroy timeout expired\n",
+ mgtvnic->mgt.qp_id);
+
+ nesvnic->mgtvnic[i] = NULL;
+ }
+
+ if (nesvnic->mgt_vbase) {
+ pci_free_consistent(nesdev->pcidev, nesvnic->mgt_mem_size, nesvnic->mgt_vbase,
+ nesvnic->mgt_pbase);
+ nesvnic->mgt_vbase = NULL;
+ nesvnic->mgt_pbase = 0;
+ }
+
+ kfree(first_mgtvnic);
+}
diff --git a/drivers/infiniband/hw/nes/nes_mgt.h b/drivers/infiniband/hw/nes/nes_mgt.h
new file mode 100644
index 00000000000..8c8af254555
--- /dev/null
+++ b/drivers/infiniband/hw/nes/nes_mgt.h
@@ -0,0 +1,97 @@
+/*
+* Copyright (c) 2010 Intel-NE, Inc. All rights reserved.
+*
+* This software is available to you under a choice of one of two
+* licenses. You may choose to be licensed under the terms of the GNU
+* General Public License (GPL) Version 2, available from the file
+* COPYING in the main directory of this source tree, or the
+* OpenIB.org BSD license below:
+*
+* Redistribution and use in source and binary forms, with or
+* without modification, are permitted provided that the following
+* conditions are met:
+*
+* - Redistributions of source code must retain the above
+* copyright notice, this list of conditions and the following
+* disclaimer.
+*
+* - Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials
+* provided with the distribution.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+* SOFTWARE.
+*/
+
+#ifndef __NES_MGT_H
+#define __NES_MGT_H
+
+#define MPA_FRAMING 6 /* length is 2 bytes, crc is 4 bytes */
+
+int nes_init_mgt_qp(struct nes_device *nesdev, struct net_device *netdev, struct nes_vnic *nesvnic);
+void nes_queue_mgt_skbs(struct sk_buff *skb, struct nes_vnic *nesvnic, struct nes_qp *nesqp);
+void nes_destroy_mgt(struct nes_vnic *nesvnic);
+void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp);
+
+struct nes_hw_mgt {
+ struct nes_hw_nic_rq_wqe *rq_vbase; /* virtual address of rq */
+ dma_addr_t rq_pbase; /* PCI memory for host rings */
+ struct sk_buff *rx_skb[NES_NIC_WQ_SIZE];
+ u16 qp_id;
+ u16 sq_head;
+ u16 rq_head;
+ u16 rq_tail;
+ u16 rq_size;
+ u8 replenishing_rq;
+ u8 reserved;
+ spinlock_t rq_lock;
+};
+
+struct nes_vnic_mgt {
+ struct nes_vnic *nesvnic;
+ struct nes_hw_mgt mgt;
+ struct nes_hw_nic_cq mgt_cq;
+ atomic_t rx_skbs_needed;
+ struct timer_list rq_wqes_timer;
+ atomic_t rx_skb_timer_running;
+};
+
+#define MAX_FPDU_FRAGS 4
+struct pau_fpdu_frag {
+ struct sk_buff *skb;
+ u64 physaddr;
+ u32 frag_len;
+ bool cmplt;
+};
+
+struct pau_fpdu_info {
+ struct nes_qp *nesqp;
+ struct nes_cqp_request *cqp_request;
+ void *hdr_vbase;
+ dma_addr_t hdr_pbase;
+ int hdr_len;
+ u16 data_len;
+ u16 frag_cnt;
+ struct pau_fpdu_frag frags[MAX_FPDU_FRAGS];
+};
+
+enum pau_qh_state {
+ PAU_DEL_QH,
+ PAU_ADD_LB_QH,
+ PAU_READY
+};
+
+struct pau_qh_chg {
+ struct nes_device *nesdev;
+ struct nes_vnic *nesvnic;
+ struct nes_qp *nesqp;
+};
+
+#endif /* __NES_MGT_H */
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index 47b2ee4c01e..c00d2f3f896 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -1091,6 +1091,8 @@ static const char nes_ethtool_stringset[][ETH_GSTRING_LEN] = {
"LRO aggregated",
"LRO flushed",
"LRO no_desc",
+ "PAU CreateQPs",
+ "PAU DestroyQPs",
};
#define NES_ETHTOOL_STAT_COUNT ARRAY_SIZE(nes_ethtool_stringset)
@@ -1306,6 +1308,8 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
+ target_stat_values[++index] = atomic_read(&pau_qps_created);
+ target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
}
/**
diff --git a/drivers/infiniband/hw/nes/nes_utils.c b/drivers/infiniband/hw/nes/nes_utils.c
index f9c417c6b3b..cd10968bfa2 100644
--- a/drivers/infiniband/hw/nes/nes_utils.c
+++ b/drivers/infiniband/hw/nes/nes_utils.c
@@ -51,13 +51,34 @@
#include "nes.h"
-
-
static u16 nes_read16_eeprom(void __iomem *addr, u16 offset);
u32 mh_detected;
u32 mh_pauses_sent;
+u32 nes_set_pau(struct nes_device *nesdev)
+{
+ u32 ret = 0;
+ u32 counter;
+
+ nes_write_indexed(nesdev, NES_IDX_GPR2, NES_ENABLE_PAU);
+ nes_write_indexed(nesdev, NES_IDX_GPR_TRIGGER, 1);
+
+ for (counter = 0; counter < NES_PAU_COUNTER; counter++) {
+ udelay(30);
+ if (!nes_read_indexed(nesdev, NES_IDX_GPR2)) {
+ printk(KERN_INFO PFX "PAU is supported.\n");
+ break;
+ }
+ nes_write_indexed(nesdev, NES_IDX_GPR_TRIGGER, 1);
+ }
+ if (counter == NES_PAU_COUNTER) {
+ printk(KERN_INFO PFX "PAU is not supported.\n");
+ return -EPERM;
+ }
+ return ret;
+}
+
/**
* nes_read_eeprom_values -
*/
@@ -187,6 +208,11 @@ int nes_read_eeprom_values(struct nes_device *nesdev, struct nes_adapter *nesada
if (((major_ver == 3) && (minor_ver >= 16)) || (major_ver > 3))
nesadapter->send_term_ok = 1;
+ if (nes_drv_opt & NES_DRV_OPT_ENABLE_PAU) {
+ if (!nes_set_pau(nesdev))
+ nesadapter->allow_unaligned_fpdus = 1;
+ }
+
nesadapter->firmware_version = (((u32)(u8)(eeprom_data>>8)) << 16) +
(u32)((u8)eeprom_data);
@@ -594,6 +620,7 @@ void nes_put_cqp_request(struct nes_device *nesdev,
nes_free_cqp_request(nesdev, cqp_request);
}
+
/**
* nes_post_cqp_request
*/
@@ -604,6 +631,8 @@ void nes_post_cqp_request(struct nes_device *nesdev,
unsigned long flags;
u32 cqp_head;
u64 u64temp;
+ u32 opcode;
+ int ctx_index = NES_CQP_WQE_COMP_CTX_LOW_IDX;
spin_lock_irqsave(&nesdev->cqp.lock, flags);
@@ -614,17 +643,20 @@ void nes_post_cqp_request(struct nes_device *nesdev,
nesdev->cqp.sq_head &= nesdev->cqp.sq_size-1;
cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head];
memcpy(cqp_wqe, &cqp_request->cqp_wqe, sizeof(*cqp_wqe));
+ opcode = le32_to_cpu(cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX]);
+ if ((opcode & NES_CQP_OPCODE_MASK) == NES_CQP_DOWNLOAD_SEGMENT)
+ ctx_index = NES_CQP_WQE_DL_COMP_CTX_LOW_IDX;
barrier();
u64temp = (unsigned long)cqp_request;
- set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_COMP_SCRATCH_LOW_IDX,
- u64temp);
+ set_wqe_64bit_value(cqp_wqe->wqe_words, ctx_index, u64temp);
nes_debug(NES_DBG_CQP, "CQP request (opcode 0x%02X), line 1 = 0x%08X put on CQPs SQ,"
- " request = %p, cqp_head = %u, cqp_tail = %u, cqp_size = %u,"
- " waiting = %d, refcount = %d.\n",
- le32_to_cpu(cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX])&0x3f,
- le32_to_cpu(cqp_wqe->wqe_words[NES_CQP_WQE_ID_IDX]), cqp_request,
- nesdev->cqp.sq_head, nesdev->cqp.sq_tail, nesdev->cqp.sq_size,
- cqp_request->waiting, atomic_read(&cqp_request->refcount));
+ " request = %p, cqp_head = %u, cqp_tail = %u, cqp_size = %u,"
+ " waiting = %d, refcount = %d.\n",
+ opcode & NES_CQP_OPCODE_MASK,
+ le32_to_cpu(cqp_wqe->wqe_words[NES_CQP_WQE_ID_IDX]), cqp_request,
+ nesdev->cqp.sq_head, nesdev->cqp.sq_tail, nesdev->cqp.sq_size,
+ cqp_request->waiting, atomic_read(&cqp_request->refcount));
+
barrier();
/* Ring doorbell (1 WQEs) */
@@ -645,7 +677,6 @@ void nes_post_cqp_request(struct nes_device *nesdev,
return;
}
-
/**
* nes_arp_table
*/
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index 9f2f7d4b119..5095bc41c6c 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -1458,7 +1458,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
struct ib_qp_attr attr;
struct iw_cm_id *cm_id;
struct iw_cm_event cm_event;
- int ret;
+ int ret = 0;
atomic_inc(&sw_qps_destroyed);
nesqp->destroyed = 1;
@@ -1511,7 +1511,6 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
if ((nesqp->nesrcq) && (nesqp->nesrcq != nesqp->nesscq))
nes_clean_cq(nesqp, nesqp->nesrcq);
}
-
nes_rem_ref(&nesqp->ibqp);
return 0;
}
@@ -2338,8 +2337,10 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
skip_pages = ((u32)region->offset) >> 12;
- if (ib_copy_from_udata(&req, udata, sizeof(req)))
+ if (ib_copy_from_udata(&req, udata, sizeof(req))) {
+ ib_umem_release(region);
return ERR_PTR(-EFAULT);
+ }
nes_debug(NES_DBG_MR, "Memory Registration type = %08X.\n", req.reg_type);
switch (req.reg_type) {
@@ -2631,6 +2632,7 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
return &nesmr->ibmr;
}
+ ib_umem_release(region);
return ERR_PTR(-ENOSYS);
}
diff --git a/drivers/infiniband/hw/nes/nes_verbs.h b/drivers/infiniband/hw/nes/nes_verbs.h
index 2df9993e0ca..fe6b6e92fa9 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.h
+++ b/drivers/infiniband/hw/nes/nes_verbs.h
@@ -139,7 +139,8 @@ struct nes_qp {
struct nes_cq *nesrcq;
struct nes_pd *nespd;
void *cm_node; /* handle of the node this QP is associated with */
- struct ietf_mpa_frame *ietf_frame;
+ void *ietf_frame;
+ u8 ietf_frame_size;
dma_addr_t ietf_frame_pbase;
struct ib_mr *lsmm_mr;
struct nes_hw_qp hwqp;
@@ -154,6 +155,7 @@ struct nes_qp {
u32 mmap_sq_db_index;
u32 mmap_rq_db_index;
spinlock_t lock;
+ spinlock_t pau_lock;
struct nes_qp_context *nesqp_context;
dma_addr_t nesqp_context_pbase;
void *pbl_vbase;
@@ -161,6 +163,8 @@ struct nes_qp {
struct page *page;
struct timer_list terminate_timer;
enum ib_event_type terminate_eventtype;
+ struct sk_buff_head pau_list;
+ u32 pau_rcv_nxt;
u16 active_conn:1;
u16 skip_lsmm:1;
u16 user_mode:1;
@@ -168,7 +172,8 @@ struct nes_qp {
u16 flush_issued:1;
u16 destroyed:1;
u16 sig_all:1;
- u16 rsvd:9;
+ u16 pau_mode:1;
+ u16 rsvd:8;
u16 private_data_len;
u16 term_sq_flush_code;
u16 term_rq_flush_code;
@@ -176,5 +181,8 @@ struct nes_qp {
u8 hw_tcp_state;
u8 term_flags;
u8 sq_kmapped;
+ u8 pau_busy;
+ u8 pau_pending;
+ u8 pau_state;
};
#endif /* NES_VERBS_H */
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
index c9624ea8720..b881bdc401f 100644
--- a/drivers/infiniband/hw/qib/qib.h
+++ b/drivers/infiniband/hw/qib/qib.h
@@ -171,7 +171,9 @@ struct qib_ctxtdata {
/* how many alloc_pages() chunks in rcvegrbuf_pages */
u32 rcvegrbuf_chunks;
/* how many egrbufs per chunk */
- u32 rcvegrbufs_perchunk;
+ u16 rcvegrbufs_perchunk;
+ /* ilog2 of above */
+ u16 rcvegrbufs_perchunk_shift;
/* order for rcvegrbuf_pages */
size_t rcvegrbuf_size;
/* rcvhdrq size (for freeing) */
@@ -221,6 +223,9 @@ struct qib_ctxtdata {
/* ctxt rcvhdrq head offset */
u32 head;
u32 pkt_count;
+ /* lookaside fields */
+ struct qib_qp *lookaside_qp;
+ u32 lookaside_qpn;
/* QPs waiting for context processing */
struct list_head qp_wait_list;
};
@@ -807,6 +812,10 @@ struct qib_devdata {
* supports, less gives more pio bufs/ctxt, etc.
*/
u32 cfgctxts;
+ /*
+ * number of ctxts available for PSM open
+ */
+ u32 freectxts;
/*
* hint that we should update pioavailshadow before
@@ -936,7 +945,9 @@ struct qib_devdata {
/* chip address space used by 4k pio buffers */
u32 align4k;
/* size of each rcvegrbuffer */
- u32 rcvegrbufsize;
+ u16 rcvegrbufsize;
+ /* log2 of above */
+ u16 rcvegrbufsize_shift;
/* localbus width (1, 2,4,8,16,32) from config space */
u32 lbus_width;
/* localbus speed in MHz */
diff --git a/drivers/infiniband/hw/qib/qib_diag.c b/drivers/infiniband/hw/qib/qib_diag.c
index 204c4dd9dce..9892456a434 100644
--- a/drivers/infiniband/hw/qib/qib_diag.c
+++ b/drivers/infiniband/hw/qib/qib_diag.c
@@ -46,6 +46,7 @@
#include <linux/pci.h>
#include <linux/poll.h>
#include <linux/vmalloc.h>
+#include <linux/export.h>
#include <linux/fs.h>
#include <linux/uaccess.h>
diff --git a/drivers/infiniband/hw/qib/qib_driver.c b/drivers/infiniband/hw/qib/qib_driver.c
index 23e584f4c36..c90a55f4120 100644
--- a/drivers/infiniband/hw/qib/qib_driver.c
+++ b/drivers/infiniband/hw/qib/qib_driver.c
@@ -37,6 +37,7 @@
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/vmalloc.h>
+#include <linux/module.h>
#include "qib.h"
@@ -279,10 +280,10 @@ bail:
*/
static inline void *qib_get_egrbuf(const struct qib_ctxtdata *rcd, u32 etail)
{
- const u32 chunk = etail / rcd->rcvegrbufs_perchunk;
- const u32 idx = etail % rcd->rcvegrbufs_perchunk;
+ const u32 chunk = etail >> rcd->rcvegrbufs_perchunk_shift;
+ const u32 idx = etail & ((u32)rcd->rcvegrbufs_perchunk - 1);
- return rcd->rcvegrbuf[chunk] + idx * rcd->dd->rcvegrbufsize;
+ return rcd->rcvegrbuf[chunk] + (idx << rcd->dd->rcvegrbufsize_shift);
}
/*
@@ -310,7 +311,6 @@ static u32 qib_rcv_hdrerr(struct qib_ctxtdata *rcd, struct qib_pportdata *ppd,
u32 opcode;
u32 psn;
int diff;
- unsigned long flags;
/* Sanity check packet */
if (tlen < 24)
@@ -365,7 +365,6 @@ static u32 qib_rcv_hdrerr(struct qib_ctxtdata *rcd, struct qib_pportdata *ppd,
switch (qp->ibqp.qp_type) {
case IB_QPT_RC:
- spin_lock_irqsave(&qp->s_lock, flags);
ruc_res =
qib_ruc_check_hdr(
ibp, hdr,
@@ -373,11 +372,8 @@ static u32 qib_rcv_hdrerr(struct qib_ctxtdata *rcd, struct qib_pportdata *ppd,
qp,
be32_to_cpu(ohdr->bth[0]));
if (ruc_res) {
- spin_unlock_irqrestore(&qp->s_lock,
- flags);
goto unlock;
}
- spin_unlock_irqrestore(&qp->s_lock, flags);
/* Only deal with RDMA Writes for now */
if (opcode <
@@ -547,6 +543,15 @@ move_along:
updegr = 0;
}
}
+ /*
+ * Notify qib_destroy_qp() if it is waiting
+ * for lookaside_qp to finish.
+ */
+ if (rcd->lookaside_qp) {
+ if (atomic_dec_and_test(&rcd->lookaside_qp->refcount))
+ wake_up(&rcd->lookaside_qp->wait);
+ rcd->lookaside_qp = NULL;
+ }
rcd->head = l;
rcd->pkt_count += i;
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index 26253039d2c..574600ef5b4 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -43,6 +43,7 @@
#include <linux/jiffies.h>
#include <asm/pgtable.h>
#include <linux/delay.h>
+#include <linux/export.h>
#include "qib.h"
#include "qib_common.h"
@@ -1284,6 +1285,7 @@ static int setup_ctxt(struct qib_pportdata *ppd, int ctxt,
strlcpy(rcd->comm, current->comm, sizeof(rcd->comm));
ctxt_fp(fp) = rcd;
qib_stats.sps_ctxts++;
+ dd->freectxts++;
ret = 0;
goto bail;
@@ -1792,6 +1794,7 @@ static int qib_close(struct inode *in, struct file *fp)
if (dd->pageshadow)
unlock_expected_tids(rcd);
qib_stats.sps_ctxts--;
+ dd->freectxts--;
}
mutex_unlock(&qib_mutex);
diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c
index d8ca0a0b970..781a802a321 100644
--- a/drivers/infiniband/hw/qib/qib_iba6120.c
+++ b/drivers/infiniband/hw/qib/qib_iba6120.c
@@ -3273,6 +3273,8 @@ static int init_6120_variables(struct qib_devdata *dd)
/* we always allocate at least 2048 bytes for eager buffers */
ret = ib_mtu_enum_to_int(qib_ibmtu);
dd->rcvegrbufsize = ret != -1 ? max(ret, 2048) : QIB_DEFAULT_MTU;
+ BUG_ON(!is_power_of_2(dd->rcvegrbufsize));
+ dd->rcvegrbufsize_shift = ilog2(dd->rcvegrbufsize);
qib_6120_tidtemplate(dd);
diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c
index e1f947446c2..439d3c503cd 100644
--- a/drivers/infiniband/hw/qib/qib_iba7220.c
+++ b/drivers/infiniband/hw/qib/qib_iba7220.c
@@ -39,6 +39,7 @@
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/delay.h>
+#include <linux/module.h>
#include <linux/io.h>
#include <rdma/ib_verbs.h>
@@ -4085,6 +4086,8 @@ static int qib_init_7220_variables(struct qib_devdata *dd)
/* we always allocate at least 2048 bytes for eager buffers */
ret = ib_mtu_enum_to_int(qib_ibmtu);
dd->rcvegrbufsize = ret != -1 ? max(ret, 2048) : QIB_DEFAULT_MTU;
+ BUG_ON(!is_power_of_2(dd->rcvegrbufsize));
+ dd->rcvegrbufsize_shift = ilog2(dd->rcvegrbufsize);
qib_7220_tidtemplate(dd);
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 5ea9ece23b3..5bd2162b95d 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -40,6 +40,7 @@
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/jiffies.h>
+#include <linux/module.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_smi.h>
@@ -2310,12 +2311,15 @@ static int qib_7322_bringup_serdes(struct qib_pportdata *ppd)
val = ppd->cpspec->ibcctrl_a | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<
QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
+ ppd->cpspec->ibcctrl_a = val;
/*
* Reset the PCS interface to the serdes (and also ibc, which is still
* in reset from above). Writes new value of ibcctrl_a as last step.
*/
qib_7322_mini_pcs_reset(ppd);
qib_write_kreg(dd, kr_scratch, 0ULL);
+ /* clear the linkinit cmds */
+ ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, LinkInitCmd);
if (!ppd->cpspec->ibcctrl_b) {
unsigned lse = ppd->link_speed_enabled;
@@ -2387,11 +2391,6 @@ static int qib_7322_bringup_serdes(struct qib_pportdata *ppd)
qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl);
spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
- /* Hold the link state machine for mezz boards */
- if (IS_QMH(dd) || IS_QME(dd))
- qib_set_ib_7322_lstate(ppd, 0,
- QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
-
/* Also enable IBSTATUSCHG interrupt. */
val = qib_read_kreg_port(ppd, krp_errmask);
qib_write_kreg_port(ppd, krp_errmask,
@@ -2853,9 +2852,8 @@ static irqreturn_t qib_7322intr(int irq, void *data)
for (i = 0; i < dd->first_user_ctxt; i++) {
if (ctxtrbits & rmask) {
ctxtrbits &= ~rmask;
- if (dd->rcd[i]) {
+ if (dd->rcd[i])
qib_kreceive(dd->rcd[i], NULL, &npkts);
- }
}
rmask <<= 1;
}
@@ -5230,6 +5228,8 @@ static int qib_7322_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)
QIBL_IB_AUTONEG_INPROG)))
set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
+ struct qib_qsfp_data *qd =
+ &ppd->cpspec->qsfp_data;
/* unlock the Tx settings, speed may change */
qib_write_kreg_port(ppd, krp_tx_deemph_override,
SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
@@ -5237,6 +5237,12 @@ static int qib_7322_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)
qib_cancel_sends(ppd);
/* on link down, ensure sane pcs state */
qib_7322_mini_pcs_reset(ppd);
+ /* schedule the qsfp refresh which should turn the link
+ off */
+ if (ppd->dd->flags & QIB_HAS_QSFP) {
+ qd->t_insert = get_jiffies_64();
+ schedule_work(&qd->work);
+ }
spin_lock_irqsave(&ppd->sdma_lock, flags);
if (__qib_sdma_running(ppd))
__qib_sdma_process_event(ppd,
@@ -5587,43 +5593,79 @@ static void qsfp_7322_event(struct work_struct *work)
struct qib_qsfp_data *qd;
struct qib_pportdata *ppd;
u64 pwrup;
+ unsigned long flags;
int ret;
u32 le2;
qd = container_of(work, struct qib_qsfp_data, work);
ppd = qd->ppd;
- pwrup = qd->t_insert + msecs_to_jiffies(QSFP_PWR_LAG_MSEC);
+ pwrup = qd->t_insert +
+ msecs_to_jiffies(QSFP_PWR_LAG_MSEC - QSFP_MODPRS_LAG_MSEC);
- /*
- * Some QSFP's not only do not respond until the full power-up
- * time, but may behave badly if we try. So hold off responding
- * to insertion.
- */
- while (1) {
- u64 now = get_jiffies_64();
- if (time_after64(now, pwrup))
- break;
- msleep(20);
- }
- ret = qib_refresh_qsfp_cache(ppd, &qd->cache);
- /*
- * Need to change LE2 back to defaults if we couldn't
- * read the cable type (to handle cable swaps), so do this
- * even on failure to read cable information. We don't
- * get here for QME, so IS_QME check not needed here.
- */
- if (!ret && !ppd->dd->cspec->r1) {
- if (QSFP_IS_ACTIVE_FAR(qd->cache.tech))
- le2 = LE2_QME;
- else if (qd->cache.atten[1] >= qib_long_atten &&
- QSFP_IS_CU(qd->cache.tech))
- le2 = LE2_5m;
- else
+ /* Delay for 20 msecs to allow ModPrs resistor to setup */
+ mdelay(QSFP_MODPRS_LAG_MSEC);
+
+ if (!qib_qsfp_mod_present(ppd)) {
+ ppd->cpspec->qsfp_data.modpresent = 0;
+ /* Set the physical link to disabled */
+ qib_set_ib_7322_lstate(ppd, 0,
+ QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags &= ~QIBL_LINKV;
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ } else {
+ /*
+ * Some QSFP's not only do not respond until the full power-up
+ * time, but may behave badly if we try. So hold off responding
+ * to insertion.
+ */
+ while (1) {
+ u64 now = get_jiffies_64();
+ if (time_after64(now, pwrup))
+ break;
+ msleep(20);
+ }
+
+ ret = qib_refresh_qsfp_cache(ppd, &qd->cache);
+
+ /*
+ * Need to change LE2 back to defaults if we couldn't
+ * read the cable type (to handle cable swaps), so do this
+ * even on failure to read cable information. We don't
+ * get here for QME, so IS_QME check not needed here.
+ */
+ if (!ret && !ppd->dd->cspec->r1) {
+ if (QSFP_IS_ACTIVE_FAR(qd->cache.tech))
+ le2 = LE2_QME;
+ else if (qd->cache.atten[1] >= qib_long_atten &&
+ QSFP_IS_CU(qd->cache.tech))
+ le2 = LE2_5m;
+ else
+ le2 = LE2_DEFAULT;
+ } else
le2 = LE2_DEFAULT;
- } else
- le2 = LE2_DEFAULT;
- ibsd_wr_allchans(ppd, 13, (le2 << 7), BMASK(9, 7));
- init_txdds_table(ppd, 0);
+ ibsd_wr_allchans(ppd, 13, (le2 << 7), BMASK(9, 7));
+ /*
+ * We always change parameteters, since we can choose
+ * values for cables without eeproms, and the cable may have
+ * changed from a cable with full or partial eeprom content
+ * to one with partial or no content.
+ */
+ init_txdds_table(ppd, 0);
+ /* The physical link is being re-enabled only when the
+ * previous state was DISABLED and the VALID bit is not
+ * set. This should only happen when the cable has been
+ * physically pulled. */
+ if (!ppd->cpspec->qsfp_data.modpresent &&
+ (ppd->lflags & (QIBL_LINKV | QIBL_IB_LINK_DISABLED))) {
+ ppd->cpspec->qsfp_data.modpresent = 1;
+ qib_set_ib_7322_lstate(ppd, 0,
+ QLOGIC_IB_IBCC_LINKINITCMD_SLEEP);
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags |= QIBL_LINKV;
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ }
+ }
}
/*
@@ -5727,7 +5769,8 @@ static void set_no_qsfp_atten(struct qib_devdata *dd, int change)
/* now change the IBC and serdes, overriding generic */
init_txdds_table(ppd, 1);
/* Re-enable the physical state machine on mezz boards
- * now that the correct settings have been set. */
+ * now that the correct settings have been set.
+ * QSFP boards are handles by the QSFP event handler */
if (IS_QMH(dd) || IS_QME(dd))
qib_set_ib_7322_lstate(ppd, 0,
QLOGIC_IB_IBCC_LINKINITCMD_SLEEP);
@@ -6205,6 +6248,8 @@ static int qib_init_7322_variables(struct qib_devdata *dd)
/* we always allocate at least 2048 bytes for eager buffers */
dd->rcvegrbufsize = max(mtu, 2048);
+ BUG_ON(!is_power_of_2(dd->rcvegrbufsize));
+ dd->rcvegrbufsize_shift = ilog2(dd->rcvegrbufsize);
qib_7322_tidtemplate(dd);
@@ -7147,7 +7192,8 @@ static void find_best_ent(struct qib_pportdata *ppd,
}
}
- /* Lookup serdes setting by cable type and attenuation */
+ /* Active cables don't have attenuation so we only set SERDES
+ * settings to account for the attenuation of the board traces. */
if (!override && QSFP_IS_ACTIVE(qd->tech)) {
*sdr_dds = txdds_sdr + ppd->dd->board_atten;
*ddr_dds = txdds_ddr + ppd->dd->board_atten;
@@ -7464,12 +7510,6 @@ static int serdes_7322_init_new(struct qib_pportdata *ppd)
u32 le_val, rxcaldone;
int chan, chan_done = (1 << SERDES_CHANS) - 1;
- /*
- * Initialize the Tx DDS tables. Also done every QSFP event,
- * for adapters with QSFP
- */
- init_txdds_table(ppd, 0);
-
/* Clear cmode-override, may be set from older driver */
ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14);
@@ -7655,6 +7695,12 @@ static int serdes_7322_init_new(struct qib_pportdata *ppd)
/* VGA output common mode */
ibsd_wr_allchans(ppd, 12, (3 << 2), BMASK(3, 2));
+ /*
+ * Initialize the Tx DDS tables. Also done every QSFP event,
+ * for adapters with QSFP
+ */
+ init_txdds_table(ppd, 0);
+
return 0;
}
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
index a01f3fce8eb..58b0f8ad4a2 100644
--- a/drivers/infiniband/hw/qib/qib_init.c
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -37,6 +37,7 @@
#include <linux/vmalloc.h>
#include <linux/delay.h>
#include <linux/idr.h>
+#include <linux/module.h>
#include "qib.h"
#include "qib_common.h"
@@ -183,6 +184,9 @@ struct qib_ctxtdata *qib_create_ctxtdata(struct qib_pportdata *ppd, u32 ctxt)
rcd->rcvegrbuf_chunks = (rcd->rcvegrcnt +
rcd->rcvegrbufs_perchunk - 1) /
rcd->rcvegrbufs_perchunk;
+ BUG_ON(!is_power_of_2(rcd->rcvegrbufs_perchunk));
+ rcd->rcvegrbufs_perchunk_shift =
+ ilog2(rcd->rcvegrbufs_perchunk);
}
return rcd;
}
@@ -398,6 +402,7 @@ static void enable_chip(struct qib_devdata *dd)
if (rcd)
dd->f_rcvctrl(rcd->ppd, rcvmask, i);
}
+ dd->freectxts = dd->cfgctxts - dd->first_user_ctxt;
}
static void verify_interrupt(unsigned long opaque)
@@ -581,10 +586,6 @@ int qib_init(struct qib_devdata *dd, int reinit)
continue;
}
- /* let link come up, and enable IBC */
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags &= ~QIBL_IB_LINK_DISABLED;
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
portok++;
}
diff --git a/drivers/infiniband/hw/qib/qib_pcie.c b/drivers/infiniband/hw/qib/qib_pcie.c
index 4426782ad28..97a8bdf68e6 100644
--- a/drivers/infiniband/hw/qib/qib_pcie.c
+++ b/drivers/infiniband/hw/qib/qib_pcie.c
@@ -35,6 +35,7 @@
#include <linux/delay.h>
#include <linux/vmalloc.h>
#include <linux/aer.h>
+#include <linux/module.h>
#include "qib.h"
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
index e16751f8639..7e7e16fbee9 100644
--- a/drivers/infiniband/hw/qib/qib_qp.c
+++ b/drivers/infiniband/hw/qib/qib_qp.c
@@ -34,6 +34,7 @@
#include <linux/err.h>
#include <linux/vmalloc.h>
+#include <linux/jhash.h>
#include "qib.h"
@@ -204,6 +205,13 @@ static void free_qpn(struct qib_qpn_table *qpt, u32 qpn)
clear_bit(qpn & BITS_PER_PAGE_MASK, map->page);
}
+static inline unsigned qpn_hash(struct qib_ibdev *dev, u32 qpn)
+{
+ return jhash_1word(qpn, dev->qp_rnd) &
+ (dev->qp_table_size - 1);
+}
+
+
/*
* Put the QP into the hash table.
* The hash table holds a reference to the QP.
@@ -211,22 +219,23 @@ static void free_qpn(struct qib_qpn_table *qpt, u32 qpn)
static void insert_qp(struct qib_ibdev *dev, struct qib_qp *qp)
{
struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
- unsigned n = qp->ibqp.qp_num % dev->qp_table_size;
unsigned long flags;
+ unsigned n = qpn_hash(dev, qp->ibqp.qp_num);
spin_lock_irqsave(&dev->qpt_lock, flags);
+ atomic_inc(&qp->refcount);
if (qp->ibqp.qp_num == 0)
- ibp->qp0 = qp;
+ rcu_assign_pointer(ibp->qp0, qp);
else if (qp->ibqp.qp_num == 1)
- ibp->qp1 = qp;
+ rcu_assign_pointer(ibp->qp1, qp);
else {
qp->next = dev->qp_table[n];
- dev->qp_table[n] = qp;
+ rcu_assign_pointer(dev->qp_table[n], qp);
}
- atomic_inc(&qp->refcount);
spin_unlock_irqrestore(&dev->qpt_lock, flags);
+ synchronize_rcu();
}
/*
@@ -236,29 +245,32 @@ static void insert_qp(struct qib_ibdev *dev, struct qib_qp *qp)
static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp)
{
struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
- struct qib_qp *q, **qpp;
+ unsigned n = qpn_hash(dev, qp->ibqp.qp_num);
unsigned long flags;
- qpp = &dev->qp_table[qp->ibqp.qp_num % dev->qp_table_size];
-
spin_lock_irqsave(&dev->qpt_lock, flags);
if (ibp->qp0 == qp) {
- ibp->qp0 = NULL;
atomic_dec(&qp->refcount);
+ rcu_assign_pointer(ibp->qp0, NULL);
} else if (ibp->qp1 == qp) {
- ibp->qp1 = NULL;
atomic_dec(&qp->refcount);
- } else
+ rcu_assign_pointer(ibp->qp1, NULL);
+ } else {
+ struct qib_qp *q, **qpp;
+
+ qpp = &dev->qp_table[n];
for (; (q = *qpp) != NULL; qpp = &q->next)
if (q == qp) {
- *qpp = qp->next;
- qp->next = NULL;
atomic_dec(&qp->refcount);
+ rcu_assign_pointer(*qpp, qp->next);
+ qp->next = NULL;
break;
}
+ }
spin_unlock_irqrestore(&dev->qpt_lock, flags);
+ synchronize_rcu();
}
/**
@@ -280,21 +292,24 @@ unsigned qib_free_all_qps(struct qib_devdata *dd)
if (!qib_mcast_tree_empty(ibp))
qp_inuse++;
- if (ibp->qp0)
+ rcu_read_lock();
+ if (rcu_dereference(ibp->qp0))
qp_inuse++;
- if (ibp->qp1)
+ if (rcu_dereference(ibp->qp1))
qp_inuse++;
+ rcu_read_unlock();
}
spin_lock_irqsave(&dev->qpt_lock, flags);
for (n = 0; n < dev->qp_table_size; n++) {
qp = dev->qp_table[n];
- dev->qp_table[n] = NULL;
+ rcu_assign_pointer(dev->qp_table[n], NULL);
for (; qp; qp = qp->next)
qp_inuse++;
}
spin_unlock_irqrestore(&dev->qpt_lock, flags);
+ synchronize_rcu();
return qp_inuse;
}
@@ -309,25 +324,28 @@ unsigned qib_free_all_qps(struct qib_devdata *dd)
*/
struct qib_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn)
{
- struct qib_ibdev *dev = &ppd_from_ibp(ibp)->dd->verbs_dev;
- unsigned long flags;
- struct qib_qp *qp;
+ struct qib_qp *qp = NULL;
- spin_lock_irqsave(&dev->qpt_lock, flags);
+ if (unlikely(qpn <= 1)) {
+ rcu_read_lock();
+ if (qpn == 0)
+ qp = rcu_dereference(ibp->qp0);
+ else
+ qp = rcu_dereference(ibp->qp1);
+ } else {
+ struct qib_ibdev *dev = &ppd_from_ibp(ibp)->dd->verbs_dev;
+ unsigned n = qpn_hash(dev, qpn);
- if (qpn == 0)
- qp = ibp->qp0;
- else if (qpn == 1)
- qp = ibp->qp1;
- else
- for (qp = dev->qp_table[qpn % dev->qp_table_size]; qp;
- qp = qp->next)
+ rcu_read_lock();
+ for (qp = dev->qp_table[n]; rcu_dereference(qp); qp = qp->next)
if (qp->ibqp.qp_num == qpn)
break;
+ }
if (qp)
- atomic_inc(&qp->refcount);
+ if (unlikely(!atomic_inc_not_zero(&qp->refcount)))
+ qp = NULL;
- spin_unlock_irqrestore(&dev->qpt_lock, flags);
+ rcu_read_unlock();
return qp;
}
@@ -765,8 +783,10 @@ int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
}
}
- if (attr_mask & IB_QP_PATH_MTU)
+ if (attr_mask & IB_QP_PATH_MTU) {
qp->path_mtu = pmtu;
+ qp->pmtu = ib_mtu_enum_to_int(pmtu);
+ }
if (attr_mask & IB_QP_RETRY_CNT) {
qp->s_retry_cnt = attr->retry_cnt;
@@ -781,8 +801,12 @@ int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if (attr_mask & IB_QP_MIN_RNR_TIMER)
qp->r_min_rnr_timer = attr->min_rnr_timer;
- if (attr_mask & IB_QP_TIMEOUT)
+ if (attr_mask & IB_QP_TIMEOUT) {
qp->timeout = attr->timeout;
+ qp->timeout_jiffies =
+ usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
+ 1000UL);
+ }
if (attr_mask & IB_QP_QKEY)
qp->qkey = attr->qkey;
@@ -1013,6 +1037,10 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
ret = ERR_PTR(-ENOMEM);
goto bail_swq;
}
+ RCU_INIT_POINTER(qp->next, NULL);
+ qp->timeout_jiffies =
+ usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
+ 1000UL);
if (init_attr->srq)
sz = 0;
else {
diff --git a/drivers/infiniband/hw/qib/qib_qsfp.c b/drivers/infiniband/hw/qib/qib_qsfp.c
index 3374a52232c..e06c4ed383f 100644
--- a/drivers/infiniband/hw/qib/qib_qsfp.c
+++ b/drivers/infiniband/hw/qib/qib_qsfp.c
@@ -273,18 +273,12 @@ int qib_refresh_qsfp_cache(struct qib_pportdata *ppd, struct qib_qsfp_cache *cp)
int ret;
int idx;
u16 cks;
- u32 mask;
u8 peek[4];
/* ensure sane contents on invalid reads, for cable swaps */
memset(cp, 0, sizeof(*cp));
- mask = QSFP_GPIO_MOD_PRS_N;
- if (ppd->hw_pidx)
- mask <<= QSFP_GPIO_PORT2_SHIFT;
-
- ret = ppd->dd->f_gpio_mod(ppd->dd, 0, 0, 0);
- if (ret & mask) {
+ if (!qib_qsfp_mod_present(ppd)) {
ret = -ENODEV;
goto bail;
}
@@ -444,6 +438,19 @@ const char * const qib_qsfp_devtech[16] = {
static const char *pwr_codes = "1.5W2.0W2.5W3.5W";
+int qib_qsfp_mod_present(struct qib_pportdata *ppd)
+{
+ u32 mask;
+ int ret;
+
+ mask = QSFP_GPIO_MOD_PRS_N <<
+ (ppd->hw_pidx * QSFP_GPIO_PORT2_SHIFT);
+ ret = ppd->dd->f_gpio_mod(ppd->dd, 0, 0, 0);
+
+ return !((ret & mask) >>
+ ((ppd->hw_pidx * QSFP_GPIO_PORT2_SHIFT) + 3));
+}
+
/*
* Initialize structures that control access to QSFP. Called once per port
* on cards that support QSFP.
@@ -452,7 +459,6 @@ void qib_qsfp_init(struct qib_qsfp_data *qd,
void (*fevent)(struct work_struct *))
{
u32 mask, highs;
- int pins;
struct qib_devdata *dd = qd->ppd->dd;
@@ -480,8 +486,7 @@ void qib_qsfp_init(struct qib_qsfp_data *qd,
mask <<= QSFP_GPIO_PORT2_SHIFT;
/* Do not try to wait here. Better to let event handle it */
- pins = dd->f_gpio_mod(dd, 0, 0, 0);
- if (pins & mask)
+ if (!qib_qsfp_mod_present(qd->ppd))
goto bail;
/* We see a module, but it may be unwise to look yet. Just schedule */
qd->t_insert = get_jiffies_64();
diff --git a/drivers/infiniband/hw/qib/qib_qsfp.h b/drivers/infiniband/hw/qib/qib_qsfp.h
index c109bbdc90a..46002a9417c 100644
--- a/drivers/infiniband/hw/qib/qib_qsfp.h
+++ b/drivers/infiniband/hw/qib/qib_qsfp.h
@@ -34,6 +34,7 @@
#define QSFP_DEV 0xA0
#define QSFP_PWR_LAG_MSEC 2000
+#define QSFP_MODPRS_LAG_MSEC 20
/*
* Below are masks for various QSFP signals, for Port 1.
@@ -177,10 +178,12 @@ struct qib_qsfp_data {
struct work_struct work;
struct qib_qsfp_cache cache;
u64 t_insert;
+ u8 modpresent;
};
extern int qib_refresh_qsfp_cache(struct qib_pportdata *ppd,
struct qib_qsfp_cache *cp);
+extern int qib_qsfp_mod_present(struct qib_pportdata *ppd);
extern void qib_qsfp_init(struct qib_qsfp_data *qd,
void (*fevent)(struct work_struct *));
extern void qib_qsfp_deinit(struct qib_qsfp_data *qd);
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
index eca0c41f122..894afac26f3 100644
--- a/drivers/infiniband/hw/qib/qib_rc.c
+++ b/drivers/infiniband/hw/qib/qib_rc.c
@@ -59,8 +59,7 @@ static void start_timer(struct qib_qp *qp)
qp->s_flags |= QIB_S_TIMER;
qp->s_timer.function = rc_timeout;
/* 4.096 usec. * (1 << qp->timeout) */
- qp->s_timer.expires = jiffies +
- usecs_to_jiffies((4096UL * (1UL << qp->timeout)) / 1000UL);
+ qp->s_timer.expires = jiffies + qp->timeout_jiffies;
add_timer(&qp->s_timer);
}
@@ -239,7 +238,7 @@ int qib_make_rc_req(struct qib_qp *qp)
u32 len;
u32 bth0;
u32 bth2;
- u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
+ u32 pmtu = qp->pmtu;
char newreq;
unsigned long flags;
int ret = 0;
@@ -272,13 +271,9 @@ int qib_make_rc_req(struct qib_qp *qp)
goto bail;
}
wqe = get_swqe_ptr(qp, qp->s_last);
- while (qp->s_last != qp->s_acked) {
- qib_send_complete(qp, wqe, IB_WC_SUCCESS);
- if (++qp->s_last >= qp->s_size)
- qp->s_last = 0;
- wqe = get_swqe_ptr(qp, qp->s_last);
- }
- qib_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
+ qib_send_complete(qp, wqe, qp->s_last != qp->s_acked ?
+ IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR);
+ /* will get called again */
goto done;
}
@@ -1519,9 +1514,7 @@ read_middle:
* 4.096 usec. * (1 << qp->timeout)
*/
qp->s_flags |= QIB_S_TIMER;
- mod_timer(&qp->s_timer, jiffies +
- usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
- 1000UL));
+ mod_timer(&qp->s_timer, jiffies + qp->timeout_jiffies);
if (qp->s_flags & QIB_S_WAIT_ACK) {
qp->s_flags &= ~QIB_S_WAIT_ACK;
qib_schedule_send(qp);
@@ -1732,7 +1725,7 @@ static int qib_rc_rcv_error(struct qib_other_headers *ohdr,
* same request.
*/
offset = ((psn - e->psn) & QIB_PSN_MASK) *
- ib_mtu_enum_to_int(qp->path_mtu);
+ qp->pmtu;
len = be32_to_cpu(reth->length);
if (unlikely(offset + len != e->rdma_sge.sge_length))
goto unlock_done;
@@ -1876,7 +1869,7 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
u32 psn;
u32 pad;
struct ib_wc wc;
- u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
+ u32 pmtu = qp->pmtu;
int diff;
struct ib_reth *reth;
unsigned long flags;
@@ -1892,10 +1885,8 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
}
opcode = be32_to_cpu(ohdr->bth[0]);
- spin_lock_irqsave(&qp->s_lock, flags);
if (qib_ruc_check_hdr(ibp, hdr, has_grh, qp, opcode))
- goto sunlock;
- spin_unlock_irqrestore(&qp->s_lock, flags);
+ return;
psn = be32_to_cpu(ohdr->bth[2]);
opcode >>= 24;
@@ -1955,8 +1946,6 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
break;
}
- memset(&wc, 0, sizeof wc);
-
if (qp->state == IB_QPS_RTR && !(qp->r_flags & QIB_R_COMM_EST)) {
qp->r_flags |= QIB_R_COMM_EST;
if (qp->ibqp.event_handler) {
@@ -2009,16 +1998,19 @@ send_middle:
goto rnr_nak;
qp->r_rcv_len = 0;
if (opcode == OP(SEND_ONLY))
- goto send_last;
- /* FALLTHROUGH */
+ goto no_immediate_data;
+ /* FALLTHROUGH for SEND_ONLY_WITH_IMMEDIATE */
case OP(SEND_LAST_WITH_IMMEDIATE):
send_last_imm:
wc.ex.imm_data = ohdr->u.imm_data;
hdrsize += 4;
wc.wc_flags = IB_WC_WITH_IMM;
- /* FALLTHROUGH */
+ goto send_last;
case OP(SEND_LAST):
case OP(RDMA_WRITE_LAST):
+no_immediate_data:
+ wc.wc_flags = 0;
+ wc.ex.imm_data = 0;
send_last:
/* Get the number of bytes the message was padded by. */
pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
@@ -2051,6 +2043,12 @@ send_last:
wc.src_qp = qp->remote_qpn;
wc.slid = qp->remote_ah_attr.dlid;
wc.sl = qp->remote_ah_attr.sl;
+ /* zero fields that are N/A */
+ wc.vendor_err = 0;
+ wc.pkey_index = 0;
+ wc.dlid_path_bits = 0;
+ wc.port_num = 0;
+ wc.csum_ok = 0;
/* Signal completion event if the solicited bit is set. */
qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
(ohdr->bth[0] &
@@ -2089,7 +2087,7 @@ send_last:
if (opcode == OP(RDMA_WRITE_FIRST))
goto send_middle;
else if (opcode == OP(RDMA_WRITE_ONLY))
- goto send_last;
+ goto no_immediate_data;
ret = qib_get_rwqe(qp, 1);
if (ret < 0)
goto nack_op_err;
diff --git a/drivers/infiniband/hw/qib/qib_ruc.c b/drivers/infiniband/hw/qib/qib_ruc.c
index eb78d9367f0..b4b37e47321 100644
--- a/drivers/infiniband/hw/qib/qib_ruc.c
+++ b/drivers/infiniband/hw/qib/qib_ruc.c
@@ -260,12 +260,15 @@ static int gid_ok(union ib_gid *gid, __be64 gid_prefix, __be64 id)
/*
*
- * This should be called with the QP s_lock held.
+ * This should be called with the QP r_lock held.
+ *
+ * The s_lock will be acquired around the qib_migrate_qp() call.
*/
int qib_ruc_check_hdr(struct qib_ibport *ibp, struct qib_ib_header *hdr,
int has_grh, struct qib_qp *qp, u32 bth0)
{
__be64 guid;
+ unsigned long flags;
if (qp->s_mig_state == IB_MIG_ARMED && (bth0 & IB_BTH_MIG_REQ)) {
if (!has_grh) {
@@ -295,7 +298,9 @@ int qib_ruc_check_hdr(struct qib_ibport *ibp, struct qib_ib_header *hdr,
if (be16_to_cpu(hdr->lrh[3]) != qp->alt_ah_attr.dlid ||
ppd_from_ibp(ibp)->port != qp->alt_ah_attr.port_num)
goto err;
+ spin_lock_irqsave(&qp->s_lock, flags);
qib_migrate_qp(qp);
+ spin_unlock_irqrestore(&qp->s_lock, flags);
} else {
if (!has_grh) {
if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
diff --git a/drivers/infiniband/hw/qib/qib_sd7220.c b/drivers/infiniband/hw/qib/qib_sd7220.c
index e9f9f8bc320..de1a4b2f33c 100644
--- a/drivers/infiniband/hw/qib/qib_sd7220.c
+++ b/drivers/infiniband/hw/qib/qib_sd7220.c
@@ -38,6 +38,7 @@
#include <linux/pci.h>
#include <linux/delay.h>
+#include <linux/module.h>
#include <linux/firmware.h>
#include "qib.h"
diff --git a/drivers/infiniband/hw/qib/qib_sdma.c b/drivers/infiniband/hw/qib/qib_sdma.c
index cad44491320..12a9604310d 100644
--- a/drivers/infiniband/hw/qib/qib_sdma.c
+++ b/drivers/infiniband/hw/qib/qib_sdma.c
@@ -32,6 +32,7 @@
#include <linux/spinlock.h>
#include <linux/netdevice.h>
+#include <linux/moduleparam.h>
#include "qib.h"
#include "qib_common.h"
diff --git a/drivers/infiniband/hw/qib/qib_srq.c b/drivers/infiniband/hw/qib/qib_srq.c
index c3ec8efc2ed..d6235931a1b 100644
--- a/drivers/infiniband/hw/qib/qib_srq.c
+++ b/drivers/infiniband/hw/qib/qib_srq.c
@@ -107,6 +107,11 @@ struct ib_srq *qib_create_srq(struct ib_pd *ibpd,
u32 sz;
struct ib_srq *ret;
+ if (srq_init_attr->srq_type != IB_SRQT_BASIC) {
+ ret = ERR_PTR(-ENOSYS);
+ goto done;
+ }
+
if (srq_init_attr->attr.max_sge == 0 ||
srq_init_attr->attr.max_sge > ib_qib_max_srq_sges ||
srq_init_attr->attr.max_wr == 0 ||
diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c
index 14d129de432..78fbd56879d 100644
--- a/drivers/infiniband/hw/qib/qib_sysfs.c
+++ b/drivers/infiniband/hw/qib/qib_sysfs.c
@@ -515,8 +515,7 @@ static ssize_t show_nfreectxts(struct device *device,
struct qib_devdata *dd = dd_from_dev(dev);
/* Return the number of free user ports (contexts) available. */
- return scnprintf(buf, PAGE_SIZE, "%u\n", dd->cfgctxts -
- dd->first_user_ctxt - (u32)qib_stats.sps_ctxts);
+ return scnprintf(buf, PAGE_SIZE, "%u\n", dd->freectxts);
}
static ssize_t show_serial(struct device *device,
diff --git a/drivers/infiniband/hw/qib/qib_tx.c b/drivers/infiniband/hw/qib/qib_tx.c
index 7f36454c225..1bf626c4017 100644
--- a/drivers/infiniband/hw/qib/qib_tx.c
+++ b/drivers/infiniband/hw/qib/qib_tx.c
@@ -36,6 +36,7 @@
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/vmalloc.h>
+#include <linux/moduleparam.h>
#include "qib.h"
diff --git a/drivers/infiniband/hw/qib/qib_uc.c b/drivers/infiniband/hw/qib/qib_uc.c
index 32ccf3c824c..847e7afdfd9 100644
--- a/drivers/infiniband/hw/qib/qib_uc.c
+++ b/drivers/infiniband/hw/qib/qib_uc.c
@@ -51,7 +51,7 @@ int qib_make_uc_req(struct qib_qp *qp)
u32 hwords;
u32 bth0;
u32 len;
- u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
+ u32 pmtu = qp->pmtu;
int ret = 0;
spin_lock_irqsave(&qp->s_lock, flags);
@@ -243,13 +243,12 @@ void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
int has_grh, void *data, u32 tlen, struct qib_qp *qp)
{
struct qib_other_headers *ohdr;
- unsigned long flags;
u32 opcode;
u32 hdrsize;
u32 psn;
u32 pad;
struct ib_wc wc;
- u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
+ u32 pmtu = qp->pmtu;
struct ib_reth *reth;
int ret;
@@ -263,14 +262,11 @@ void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
}
opcode = be32_to_cpu(ohdr->bth[0]);
- spin_lock_irqsave(&qp->s_lock, flags);
if (qib_ruc_check_hdr(ibp, hdr, has_grh, qp, opcode))
- goto sunlock;
- spin_unlock_irqrestore(&qp->s_lock, flags);
+ return;
psn = be32_to_cpu(ohdr->bth[2]);
opcode >>= 24;
- memset(&wc, 0, sizeof wc);
/* Compare the PSN verses the expected PSN. */
if (unlikely(qib_cmp24(psn, qp->r_psn) != 0)) {
@@ -370,7 +366,7 @@ send_first:
}
qp->r_rcv_len = 0;
if (opcode == OP(SEND_ONLY))
- goto send_last;
+ goto no_immediate_data;
else if (opcode == OP(SEND_ONLY_WITH_IMMEDIATE))
goto send_last_imm;
/* FALLTHROUGH */
@@ -389,8 +385,11 @@ send_last_imm:
wc.ex.imm_data = ohdr->u.imm_data;
hdrsize += 4;
wc.wc_flags = IB_WC_WITH_IMM;
- /* FALLTHROUGH */
+ goto send_last;
case OP(SEND_LAST):
+no_immediate_data:
+ wc.ex.imm_data = 0;
+ wc.wc_flags = 0;
send_last:
/* Get the number of bytes the message was padded by. */
pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
@@ -418,6 +417,12 @@ last_imm:
wc.src_qp = qp->remote_qpn;
wc.slid = qp->remote_ah_attr.dlid;
wc.sl = qp->remote_ah_attr.sl;
+ /* zero fields that are N/A */
+ wc.vendor_err = 0;
+ wc.pkey_index = 0;
+ wc.dlid_path_bits = 0;
+ wc.port_num = 0;
+ wc.csum_ok = 0;
/* Signal completion event if the solicited bit is set. */
qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
(ohdr->bth[0] &
@@ -546,6 +551,4 @@ op_err:
qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
return;
-sunlock:
- spin_unlock_irqrestore(&qp->s_lock, flags);
}
diff --git a/drivers/infiniband/hw/qib/qib_user_pages.c b/drivers/infiniband/hw/qib/qib_user_pages.c
index 7689e49c13c..2bc1d2b9629 100644
--- a/drivers/infiniband/hw/qib/qib_user_pages.c
+++ b/drivers/infiniband/hw/qib/qib_user_pages.c
@@ -74,7 +74,7 @@ static int __qib_get_user_pages(unsigned long start_page, size_t num_pages,
goto bail_release;
}
- current->mm->locked_vm += num_pages;
+ current->mm->pinned_vm += num_pages;
ret = 0;
goto bail;
@@ -151,7 +151,7 @@ void qib_release_user_pages(struct page **p, size_t num_pages)
__qib_release_user_pages(p, num_pages, 1);
if (current->mm) {
- current->mm->locked_vm -= num_pages;
+ current->mm->pinned_vm -= num_pages;
up_write(&current->mm->mmap_sem);
}
}
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
index 9fab4048885..a894762da46 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.c
+++ b/drivers/infiniband/hw/qib/qib_verbs.c
@@ -35,14 +35,16 @@
#include <rdma/ib_mad.h>
#include <rdma/ib_user_verbs.h>
#include <linux/io.h>
+#include <linux/module.h>
#include <linux/utsname.h>
#include <linux/rculist.h>
#include <linux/mm.h>
+#include <linux/random.h>
#include "qib.h"
#include "qib_common.h"
-static unsigned int ib_qib_qp_table_size = 251;
+static unsigned int ib_qib_qp_table_size = 256;
module_param_named(qp_table_size, ib_qib_qp_table_size, uint, S_IRUGO);
MODULE_PARM_DESC(qp_table_size, "QP table size");
@@ -659,17 +661,25 @@ void qib_ib_rcv(struct qib_ctxtdata *rcd, void *rhdr, void *data, u32 tlen)
if (atomic_dec_return(&mcast->refcount) <= 1)
wake_up(&mcast->wait);
} else {
- qp = qib_lookup_qpn(ibp, qp_num);
- if (!qp)
- goto drop;
+ if (rcd->lookaside_qp) {
+ if (rcd->lookaside_qpn != qp_num) {
+ if (atomic_dec_and_test(
+ &rcd->lookaside_qp->refcount))
+ wake_up(
+ &rcd->lookaside_qp->wait);
+ rcd->lookaside_qp = NULL;
+ }
+ }
+ if (!rcd->lookaside_qp) {
+ qp = qib_lookup_qpn(ibp, qp_num);
+ if (!qp)
+ goto drop;
+ rcd->lookaside_qp = qp;
+ rcd->lookaside_qpn = qp_num;
+ } else
+ qp = rcd->lookaside_qp;
ibp->n_unicast_rcv++;
qib_qp_rcv(rcd, hdr, lnh == QIB_LRH_GRH, data, tlen, qp);
- /*
- * Notify qib_destroy_qp() if it is waiting
- * for us to finish.
- */
- if (atomic_dec_and_test(&qp->refcount))
- wake_up(&qp->wait);
}
return;
@@ -1974,6 +1984,8 @@ static void init_ibport(struct qib_pportdata *ppd)
ibp->z_excessive_buffer_overrun_errors =
cntrs.excessive_buffer_overrun_errors;
ibp->z_vl15_dropped = cntrs.vl15_dropped;
+ RCU_INIT_POINTER(ibp->qp0, NULL);
+ RCU_INIT_POINTER(ibp->qp1, NULL);
}
/**
@@ -1990,12 +2002,15 @@ int qib_register_ib_device(struct qib_devdata *dd)
int ret;
dev->qp_table_size = ib_qib_qp_table_size;
- dev->qp_table = kzalloc(dev->qp_table_size * sizeof *dev->qp_table,
+ get_random_bytes(&dev->qp_rnd, sizeof(dev->qp_rnd));
+ dev->qp_table = kmalloc(dev->qp_table_size * sizeof *dev->qp_table,
GFP_KERNEL);
if (!dev->qp_table) {
ret = -ENOMEM;
goto err_qpt;
}
+ for (i = 0; i < dev->qp_table_size; i++)
+ RCU_INIT_POINTER(dev->qp_table[i], NULL);
for (i = 0; i < dd->num_pports; i++)
init_ibport(ppd + i);
diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h
index 95e5b47223b..0c19ef0c412 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.h
+++ b/drivers/infiniband/hw/qib/qib_verbs.h
@@ -485,6 +485,7 @@ struct qib_qp {
u8 alt_timeout; /* Alternate path timeout for this QP */
u8 port_num;
enum ib_mtu path_mtu;
+ u32 pmtu; /* decoded from path_mtu */
u32 remote_qpn;
u32 qkey; /* QKEY for this QP (for UD or RD) */
u32 s_size; /* send work queue size */
@@ -495,6 +496,7 @@ struct qib_qp {
u32 s_last; /* last completed entry */
u32 s_ssn; /* SSN of tail entry */
u32 s_lsn; /* limit sequence number (credit) */
+ unsigned long timeout_jiffies; /* computed from timeout */
struct qib_swqe *s_wq; /* send work queue */
struct qib_swqe *s_wqe;
struct qib_rq r_rq; /* receive work queue */
@@ -723,7 +725,8 @@ struct qib_ibdev {
dma_addr_t pio_hdrs_phys;
/* list of QPs waiting for RNR timer */
spinlock_t pending_lock; /* protect wait lists, PMA counters, etc. */
- unsigned qp_table_size; /* size of the hash table */
+ u32 qp_table_size; /* size of the hash table */
+ u32 qp_rnd; /* random bytes for hash */
spinlock_t qpt_lock;
u32 n_piowait;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index c74548a586e..014504d8e43 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -37,6 +37,7 @@
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
+#include <linux/moduleparam.h>
#include "ipoib.h"
@@ -84,7 +85,7 @@ static void ipoib_cm_dma_unmap_rx(struct ipoib_dev_priv *priv, int frags,
ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE);
for (i = 0; i < frags; ++i)
- ib_dma_unmap_single(priv->ca, mapping[i + 1], PAGE_SIZE, DMA_FROM_DEVICE);
+ ib_dma_unmap_page(priv->ca, mapping[i + 1], PAGE_SIZE, DMA_FROM_DEVICE);
}
static int ipoib_cm_post_receive_srq(struct net_device *dev, int id)
@@ -183,7 +184,7 @@ partial_error:
ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE);
for (; i > 0; --i)
- ib_dma_unmap_single(priv->ca, mapping[i], PAGE_SIZE, DMA_FROM_DEVICE);
+ ib_dma_unmap_page(priv->ca, mapping[i], PAGE_SIZE, DMA_FROM_DEVICE);
dev_kfree_skb_any(skb);
return NULL;
@@ -1497,6 +1498,7 @@ static void ipoib_cm_create_srq(struct net_device *dev, int max_sge)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ib_srq_init_attr srq_init_attr = {
+ .srq_type = IB_SRQT_BASIC,
.attr = {
.max_wr = ipoib_recvq_size,
.max_sge = max_sge
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_fs.c b/drivers/infiniband/ulp/ipoib/ipoib_fs.c
index 86eae229dc4..50061854616 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_fs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_fs.c
@@ -37,6 +37,7 @@
struct file_operations;
#include <linux/debugfs.h>
+#include <linux/export.h>
#include "ipoib.h"
@@ -212,16 +213,15 @@ static int ipoib_path_seq_show(struct seq_file *file, void *iter_ptr)
gid_buf, path.pathrec.dlid ? "yes" : "no");
if (path.pathrec.dlid) {
- rate = ib_rate_to_mult(path.pathrec.rate) * 25;
+ rate = ib_rate_to_mbps(path.pathrec.rate);
seq_printf(file,
" DLID: 0x%04x\n"
" SL: %12d\n"
- " rate: %*d%s Gb/sec\n",
+ " rate: %8d.%d Gb/sec\n",
be16_to_cpu(path.pathrec.dlid),
path.pathrec.sl,
- 10 - ((rate % 10) ? 2 : 0),
- rate / 10, rate % 10 ? ".5" : "");
+ rate / 1000, rate % 1000);
}
seq_putc(file, '\n');
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 2b060f45bec..0ef9af94997 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -34,6 +34,7 @@
*/
#include <linux/delay.h>
+#include <linux/moduleparam.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index ecea4fe1ed0..1b7a9768635 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -34,6 +34,7 @@
#include <linux/skbuff.h>
#include <linux/rtnetlink.h>
+#include <linux/moduleparam.h>
#include <linux/ip.h>
#include <linux/in.h>
#include <linux/igmp.h>
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 9c61b9c2c59..7e7373a700e 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -57,6 +57,7 @@
#include <linux/scatterlist.h>
#include <linux/delay.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include <net/sock.h>
@@ -151,7 +152,6 @@ int iser_initialize_task_headers(struct iscsi_task *task,
tx_desc->tx_sg[0].length = ISER_HEADERS_LEN;
tx_desc->tx_sg[0].lkey = device->mr->lkey;
- iser_task->headers_initialized = 1;
iser_task->iser_conn = iser_conn;
return 0;
}
@@ -166,8 +166,7 @@ iscsi_iser_task_init(struct iscsi_task *task)
{
struct iscsi_iser_task *iser_task = task->dd_data;
- if (!iser_task->headers_initialized)
- if (iser_initialize_task_headers(task, &iser_task->desc))
+ if (iser_initialize_task_headers(task, &iser_task->desc))
return -ENOMEM;
/* mgmt task */
@@ -278,6 +277,13 @@ iscsi_iser_task_xmit(struct iscsi_task *task)
static void iscsi_iser_cleanup_task(struct iscsi_task *task)
{
struct iscsi_iser_task *iser_task = task->dd_data;
+ struct iser_tx_desc *tx_desc = &iser_task->desc;
+
+ struct iscsi_iser_conn *iser_conn = task->conn->dd_data;
+ struct iser_device *device = iser_conn->ib_conn->device;
+
+ ib_dma_unmap_single(device->ib_device,
+ tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE);
/* mgmt tasks do not need special cleanup */
if (!task->sc)
@@ -632,6 +638,59 @@ iscsi_iser_ep_disconnect(struct iscsi_endpoint *ep)
iser_conn_terminate(ib_conn);
}
+static mode_t iser_attr_is_visible(int param_type, int param)
+{
+ switch (param_type) {
+ case ISCSI_HOST_PARAM:
+ switch (param) {
+ case ISCSI_HOST_PARAM_NETDEV_NAME:
+ case ISCSI_HOST_PARAM_HWADDRESS:
+ case ISCSI_HOST_PARAM_INITIATOR_NAME:
+ return S_IRUGO;
+ default:
+ return 0;
+ }
+ case ISCSI_PARAM:
+ switch (param) {
+ case ISCSI_PARAM_MAX_RECV_DLENGTH:
+ case ISCSI_PARAM_MAX_XMIT_DLENGTH:
+ case ISCSI_PARAM_HDRDGST_EN:
+ case ISCSI_PARAM_DATADGST_EN:
+ case ISCSI_PARAM_CONN_ADDRESS:
+ case ISCSI_PARAM_CONN_PORT:
+ case ISCSI_PARAM_EXP_STATSN:
+ case ISCSI_PARAM_PERSISTENT_ADDRESS:
+ case ISCSI_PARAM_PERSISTENT_PORT:
+ case ISCSI_PARAM_PING_TMO:
+ case ISCSI_PARAM_RECV_TMO:
+ case ISCSI_PARAM_INITIAL_R2T_EN:
+ case ISCSI_PARAM_MAX_R2T:
+ case ISCSI_PARAM_IMM_DATA_EN:
+ case ISCSI_PARAM_FIRST_BURST:
+ case ISCSI_PARAM_MAX_BURST:
+ case ISCSI_PARAM_PDU_INORDER_EN:
+ case ISCSI_PARAM_DATASEQ_INORDER_EN:
+ case ISCSI_PARAM_TARGET_NAME:
+ case ISCSI_PARAM_TPGT:
+ case ISCSI_PARAM_USERNAME:
+ case ISCSI_PARAM_PASSWORD:
+ case ISCSI_PARAM_USERNAME_IN:
+ case ISCSI_PARAM_PASSWORD_IN:
+ case ISCSI_PARAM_FAST_ABORT:
+ case ISCSI_PARAM_ABORT_TMO:
+ case ISCSI_PARAM_LU_RESET_TMO:
+ case ISCSI_PARAM_TGT_RESET_TMO:
+ case ISCSI_PARAM_IFACE_NAME:
+ case ISCSI_PARAM_INITIATOR_NAME:
+ return S_IRUGO;
+ default:
+ return 0;
+ }
+ }
+
+ return 0;
+}
+
static struct scsi_host_template iscsi_iser_sht = {
.module = THIS_MODULE,
.name = "iSCSI Initiator over iSER, v." DRV_VER,
@@ -653,32 +712,6 @@ static struct iscsi_transport iscsi_iser_transport = {
.owner = THIS_MODULE,
.name = "iser",
.caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T,
- .param_mask = ISCSI_MAX_RECV_DLENGTH |
- ISCSI_MAX_XMIT_DLENGTH |
- ISCSI_HDRDGST_EN |
- ISCSI_DATADGST_EN |
- ISCSI_INITIAL_R2T_EN |
- ISCSI_MAX_R2T |
- ISCSI_IMM_DATA_EN |
- ISCSI_FIRST_BURST |
- ISCSI_MAX_BURST |
- ISCSI_PDU_INORDER_EN |
- ISCSI_DATASEQ_INORDER_EN |
- ISCSI_CONN_PORT |
- ISCSI_CONN_ADDRESS |
- ISCSI_EXP_STATSN |
- ISCSI_PERSISTENT_PORT |
- ISCSI_PERSISTENT_ADDRESS |
- ISCSI_TARGET_NAME | ISCSI_TPGT |
- ISCSI_USERNAME | ISCSI_PASSWORD |
- ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
- ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
- ISCSI_LU_RESET_TMO | ISCSI_TGT_RESET_TMO |
- ISCSI_PING_TMO | ISCSI_RECV_TMO |
- ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
- .host_param_mask = ISCSI_HOST_HWADDRESS |
- ISCSI_HOST_NETDEV_NAME |
- ISCSI_HOST_INITIATOR_NAME,
/* session management */
.create_session = iscsi_iser_session_create,
.destroy_session = iscsi_iser_session_destroy,
@@ -686,6 +719,7 @@ static struct iscsi_transport iscsi_iser_transport = {
.create_conn = iscsi_iser_conn_create,
.bind_conn = iscsi_iser_conn_bind,
.destroy_conn = iscsi_iser_conn_destroy,
+ .attr_is_visible = iser_attr_is_visible,
.set_param = iscsi_iser_set_param,
.get_conn_param = iscsi_conn_get_param,
.get_ep_param = iscsi_iser_get_ep_param,
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index db6f3ce9f3b..db7ea3704da 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -257,7 +257,8 @@ struct iser_conn {
struct list_head conn_list; /* entry in ig conn list */
char *login_buf;
- u64 login_dma;
+ char *login_req_buf, *login_resp_buf;
+ u64 login_req_dma, login_resp_dma;
unsigned int rx_desc_head;
struct iser_rx_desc *rx_descs;
struct ib_recv_wr rx_wr[ISER_MIN_POSTED_RX];
@@ -277,7 +278,6 @@ struct iscsi_iser_task {
struct iser_regd_buf rdma_regd[ISER_DIRS_NUM];/* regd rdma buf */
struct iser_data_buf data[ISER_DIRS_NUM]; /* orig. data des*/
struct iser_data_buf data_copy[ISER_DIRS_NUM];/* contig. copy */
- int headers_initialized;
};
struct iser_page_vec {
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index f299de6b419..a607542fc79 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -221,8 +221,14 @@ void iser_free_rx_descriptors(struct iser_conn *ib_conn)
struct iser_device *device = ib_conn->device;
if (ib_conn->login_buf) {
- ib_dma_unmap_single(device->ib_device, ib_conn->login_dma,
- ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE);
+ if (ib_conn->login_req_dma)
+ ib_dma_unmap_single(device->ib_device,
+ ib_conn->login_req_dma,
+ ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_TO_DEVICE);
+ if (ib_conn->login_resp_dma)
+ ib_dma_unmap_single(device->ib_device,
+ ib_conn->login_resp_dma,
+ ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE);
kfree(ib_conn->login_buf);
}
@@ -394,6 +400,7 @@ int iser_send_control(struct iscsi_conn *conn,
unsigned long data_seg_len;
int err = 0;
struct iser_device *device;
+ struct iser_conn *ib_conn = iser_conn->ib_conn;
/* build the tx desc regd header and add it to the tx desc dto */
mdesc->type = ISCSI_TX_CONTROL;
@@ -409,9 +416,19 @@ int iser_send_control(struct iscsi_conn *conn,
iser_err("data present on non login task!!!\n");
goto send_control_error;
}
- memcpy(iser_conn->ib_conn->login_buf, task->data,
+
+ ib_dma_sync_single_for_cpu(device->ib_device,
+ ib_conn->login_req_dma, task->data_count,
+ DMA_TO_DEVICE);
+
+ memcpy(iser_conn->ib_conn->login_req_buf, task->data,
task->data_count);
- tx_dsg->addr = iser_conn->ib_conn->login_dma;
+
+ ib_dma_sync_single_for_device(device->ib_device,
+ ib_conn->login_req_dma, task->data_count,
+ DMA_TO_DEVICE);
+
+ tx_dsg->addr = iser_conn->ib_conn->login_req_dma;
tx_dsg->length = task->data_count;
tx_dsg->lkey = device->mr->lkey;
mdesc->num_sge = 2;
@@ -445,8 +462,8 @@ void iser_rcv_completion(struct iser_rx_desc *rx_desc,
int rx_buflen, outstanding, count, err;
/* differentiate between login to all other PDUs */
- if ((char *)rx_desc == ib_conn->login_buf) {
- rx_dma = ib_conn->login_dma;
+ if ((char *)rx_desc == ib_conn->login_resp_buf) {
+ rx_dma = ib_conn->login_resp_dma;
rx_buflen = ISER_RX_LOGIN_SIZE;
} else {
rx_dma = rx_desc->dma_addr;
@@ -473,7 +490,7 @@ void iser_rcv_completion(struct iser_rx_desc *rx_desc,
* for the posted rx bufs refcount to become zero handles everything */
conn->ib_conn->post_recv_buf_count--;
- if (rx_dma == ib_conn->login_dma)
+ if (rx_dma == ib_conn->login_resp_dma)
return;
outstanding = ib_conn->post_recv_buf_count;
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index ede1475bee0..e28877c4ce1 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -155,20 +155,39 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
{
struct iser_device *device;
struct ib_qp_init_attr init_attr;
- int ret = -ENOMEM;
+ int req_err, resp_err, ret = -ENOMEM;
struct ib_fmr_pool_param params;
BUG_ON(ib_conn->device == NULL);
device = ib_conn->device;
- ib_conn->login_buf = kmalloc(ISER_RX_LOGIN_SIZE, GFP_KERNEL);
+ ib_conn->login_buf = kmalloc(ISCSI_DEF_MAX_RECV_SEG_LEN +
+ ISER_RX_LOGIN_SIZE, GFP_KERNEL);
if (!ib_conn->login_buf)
goto out_err;
- ib_conn->login_dma = ib_dma_map_single(ib_conn->device->ib_device,
- (void *)ib_conn->login_buf, ISER_RX_LOGIN_SIZE,
- DMA_FROM_DEVICE);
+ ib_conn->login_req_buf = ib_conn->login_buf;
+ ib_conn->login_resp_buf = ib_conn->login_buf + ISCSI_DEF_MAX_RECV_SEG_LEN;
+
+ ib_conn->login_req_dma = ib_dma_map_single(ib_conn->device->ib_device,
+ (void *)ib_conn->login_req_buf,
+ ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_TO_DEVICE);
+
+ ib_conn->login_resp_dma = ib_dma_map_single(ib_conn->device->ib_device,
+ (void *)ib_conn->login_resp_buf,
+ ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE);
+
+ req_err = ib_dma_mapping_error(device->ib_device, ib_conn->login_req_dma);
+ resp_err = ib_dma_mapping_error(device->ib_device, ib_conn->login_resp_dma);
+
+ if (req_err || resp_err) {
+ if (req_err)
+ ib_conn->login_req_dma = 0;
+ if (resp_err)
+ ib_conn->login_resp_dma = 0;
+ goto out_err;
+ }
ib_conn->page_vec = kmalloc(sizeof(struct iser_page_vec) +
(sizeof(u64) * (ISCSI_ISER_SG_TABLESIZE +1)),
@@ -658,11 +677,11 @@ int iser_post_recvl(struct iser_conn *ib_conn)
struct ib_sge sge;
int ib_ret;
- sge.addr = ib_conn->login_dma;
+ sge.addr = ib_conn->login_resp_dma;
sge.length = ISER_RX_LOGIN_SIZE;
sge.lkey = ib_conn->device->mr->lkey;
- rx_wr.wr_id = (unsigned long)ib_conn->login_buf;
+ rx_wr.wr_id = (unsigned long)ib_conn->login_resp_buf;
rx_wr.sg_list = &sge;
rx_wr.num_sge = 1;
rx_wr.next = NULL;
diff --git a/drivers/input/Kconfig b/drivers/input/Kconfig
index 23e82e46656..001b147c7f9 100644
--- a/drivers/input/Kconfig
+++ b/drivers/input/Kconfig
@@ -3,7 +3,7 @@
#
menu "Input device support"
- depends on !S390
+ depends on !S390 && !UML
config INPUT
tristate "Generic input layer (needed for keyboard, mouse, ...)" if EXPERT
diff --git a/drivers/input/ff-core.c b/drivers/input/ff-core.c
index 3367f760d75..480eb9d9876 100644
--- a/drivers/input/ff-core.c
+++ b/drivers/input/ff-core.c
@@ -309,9 +309,10 @@ EXPORT_SYMBOL_GPL(input_ff_event);
* Once ff device is created you need to setup its upload, erase,
* playback and other handlers before registering input device
*/
-int input_ff_create(struct input_dev *dev, int max_effects)
+int input_ff_create(struct input_dev *dev, unsigned int max_effects)
{
struct ff_device *ff;
+ size_t ff_dev_size;
int i;
if (!max_effects) {
@@ -319,8 +320,12 @@ int input_ff_create(struct input_dev *dev, int max_effects)
return -EINVAL;
}
- ff = kzalloc(sizeof(struct ff_device) +
- max_effects * sizeof(struct file *), GFP_KERNEL);
+ ff_dev_size = sizeof(struct ff_device) +
+ max_effects * sizeof(struct file *);
+ if (ff_dev_size < max_effects) /* overflow */
+ return -EINVAL;
+
+ ff = kzalloc(ff_dev_size, GFP_KERNEL);
if (!ff)
return -ENOMEM;
diff --git a/drivers/input/input-compat.c b/drivers/input/input-compat.c
index 1accb89ae66..e46a86776a6 100644
--- a/drivers/input/input-compat.c
+++ b/drivers/input/input-compat.c
@@ -8,6 +8,7 @@
* the Free Software Foundation.
*/
+#include <linux/export.h>
#include <asm/uaccess.h>
#include "input-compat.h"
diff --git a/drivers/input/input-mt.c b/drivers/input/input-mt.c
index c48c81f0308..f658086fbbe 100644
--- a/drivers/input/input-mt.c
+++ b/drivers/input/input-mt.c
@@ -9,6 +9,7 @@
*/
#include <linux/input/mt.h>
+#include <linux/export.h>
#include <linux/slab.h>
#define TRKID_SGN ((TRKID_MAX + 1) >> 1)
@@ -117,6 +118,7 @@ void input_mt_report_finger_count(struct input_dev *dev, int count)
input_event(dev, EV_KEY, BTN_TOOL_DOUBLETAP, count == 2);
input_event(dev, EV_KEY, BTN_TOOL_TRIPLETAP, count == 3);
input_event(dev, EV_KEY, BTN_TOOL_QUADTAP, count == 4);
+ input_event(dev, EV_KEY, BTN_TOOL_QUINTTAP, count == 5);
}
EXPORT_SYMBOL(input_mt_report_finger_count);
diff --git a/drivers/input/input-polldev.c b/drivers/input/input-polldev.c
index b1aabde8752..7dfe1009fae 100644
--- a/drivers/input/input-polldev.c
+++ b/drivers/input/input-polldev.c
@@ -14,6 +14,7 @@
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/workqueue.h>
+#include <linux/module.h>
#include <linux/input-polldev.h>
MODULE_AUTHOR("Dmitry Torokhov <dtor@mail.ru>");
@@ -49,8 +50,10 @@ static int input_open_polled_device(struct input_dev *input)
dev->open(dev);
/* Only start polling if polling is enabled */
- if (dev->poll_interval > 0)
- queue_delayed_work(system_freezable_wq, &dev->work, 0);
+ if (dev->poll_interval > 0) {
+ dev->poll(dev);
+ input_polldev_queue_work(dev);
+ }
return 0;
}
diff --git a/drivers/input/joystick/as5011.c b/drivers/input/joystick/as5011.c
index f6732b57ca0..6d6e7418dc2 100644
--- a/drivers/input/joystick/as5011.c
+++ b/drivers/input/joystick/as5011.c
@@ -30,6 +30,7 @@
#include <linux/delay.h>
#include <linux/input/as5011.h>
#include <linux/slab.h>
+#include <linux/module.h>
#define DRIVER_DESC "Driver for Austria Microsystems AS5011 joystick"
#define MODULE_DEVICE_ALIAS "as5011"
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index b4dee9d5a05..615c21f2a55 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -33,10 +33,10 @@ config KEYBOARD_ADP5588
module will be called adp5588-keys.
config KEYBOARD_ADP5589
- tristate "ADP5589 I2C QWERTY Keypad and IO Expander"
+ tristate "ADP5585/ADP5589 I2C QWERTY Keypad and IO Expander"
depends on I2C
help
- Say Y here if you want to use a ADP5589 attached to your
+ Say Y here if you want to use a ADP5585/ADP5589 attached to your
system I2C bus.
To compile this driver as a module, choose M here: the
diff --git a/drivers/input/keyboard/adp5588-keys.c b/drivers/input/keyboard/adp5588-keys.c
index e34eeb8ae37..4a7f534cf64 100644
--- a/drivers/input/keyboard/adp5588-keys.c
+++ b/drivers/input/keyboard/adp5588-keys.c
@@ -550,7 +550,7 @@ static int __devinit adp5588_probe(struct i2c_client *client,
}
error = request_irq(client->irq, adp5588_irq,
- IRQF_TRIGGER_FALLING | IRQF_DISABLED,
+ IRQF_TRIGGER_FALLING,
client->dev.driver->name, kpad);
if (error) {
dev_err(&client->dev, "irq %d busy?\n", client->irq);
diff --git a/drivers/input/keyboard/adp5589-keys.c b/drivers/input/keyboard/adp5589-keys.c
index c7708263051..02b5d53031b 100644
--- a/drivers/input/keyboard/adp5589-keys.c
+++ b/drivers/input/keyboard/adp5589-keys.c
@@ -1,5 +1,5 @@
/*
- * Description: keypad driver for ADP5589
+ * Description: keypad driver for ADP5589, ADP5585
* I2C QWERTY Keypad and IO Expander
* Bugs: Enter bugs at http://blackfin.uclinux.org/
*
@@ -22,35 +22,165 @@
#include <linux/input/adp5589.h>
+/* ADP5589/ADP5585 Common Registers */
+#define ADP5589_5_ID 0x00
+#define ADP5589_5_INT_STATUS 0x01
+#define ADP5589_5_STATUS 0x02
+#define ADP5589_5_FIFO_1 0x03
+#define ADP5589_5_FIFO_2 0x04
+#define ADP5589_5_FIFO_3 0x05
+#define ADP5589_5_FIFO_4 0x06
+#define ADP5589_5_FIFO_5 0x07
+#define ADP5589_5_FIFO_6 0x08
+#define ADP5589_5_FIFO_7 0x09
+#define ADP5589_5_FIFO_8 0x0A
+#define ADP5589_5_FIFO_9 0x0B
+#define ADP5589_5_FIFO_10 0x0C
+#define ADP5589_5_FIFO_11 0x0D
+#define ADP5589_5_FIFO_12 0x0E
+#define ADP5589_5_FIFO_13 0x0F
+#define ADP5589_5_FIFO_14 0x10
+#define ADP5589_5_FIFO_15 0x11
+#define ADP5589_5_FIFO_16 0x12
+#define ADP5589_5_GPI_INT_STAT_A 0x13
+#define ADP5589_5_GPI_INT_STAT_B 0x14
+
+/* ADP5589 Registers */
+#define ADP5589_GPI_INT_STAT_C 0x15
+#define ADP5589_GPI_STATUS_A 0x16
+#define ADP5589_GPI_STATUS_B 0x17
+#define ADP5589_GPI_STATUS_C 0x18
+#define ADP5589_RPULL_CONFIG_A 0x19
+#define ADP5589_RPULL_CONFIG_B 0x1A
+#define ADP5589_RPULL_CONFIG_C 0x1B
+#define ADP5589_RPULL_CONFIG_D 0x1C
+#define ADP5589_RPULL_CONFIG_E 0x1D
+#define ADP5589_GPI_INT_LEVEL_A 0x1E
+#define ADP5589_GPI_INT_LEVEL_B 0x1F
+#define ADP5589_GPI_INT_LEVEL_C 0x20
+#define ADP5589_GPI_EVENT_EN_A 0x21
+#define ADP5589_GPI_EVENT_EN_B 0x22
+#define ADP5589_GPI_EVENT_EN_C 0x23
+#define ADP5589_GPI_INTERRUPT_EN_A 0x24
+#define ADP5589_GPI_INTERRUPT_EN_B 0x25
+#define ADP5589_GPI_INTERRUPT_EN_C 0x26
+#define ADP5589_DEBOUNCE_DIS_A 0x27
+#define ADP5589_DEBOUNCE_DIS_B 0x28
+#define ADP5589_DEBOUNCE_DIS_C 0x29
+#define ADP5589_GPO_DATA_OUT_A 0x2A
+#define ADP5589_GPO_DATA_OUT_B 0x2B
+#define ADP5589_GPO_DATA_OUT_C 0x2C
+#define ADP5589_GPO_OUT_MODE_A 0x2D
+#define ADP5589_GPO_OUT_MODE_B 0x2E
+#define ADP5589_GPO_OUT_MODE_C 0x2F
+#define ADP5589_GPIO_DIRECTION_A 0x30
+#define ADP5589_GPIO_DIRECTION_B 0x31
+#define ADP5589_GPIO_DIRECTION_C 0x32
+#define ADP5589_UNLOCK1 0x33
+#define ADP5589_UNLOCK2 0x34
+#define ADP5589_EXT_LOCK_EVENT 0x35
+#define ADP5589_UNLOCK_TIMERS 0x36
+#define ADP5589_LOCK_CFG 0x37
+#define ADP5589_RESET1_EVENT_A 0x38
+#define ADP5589_RESET1_EVENT_B 0x39
+#define ADP5589_RESET1_EVENT_C 0x3A
+#define ADP5589_RESET2_EVENT_A 0x3B
+#define ADP5589_RESET2_EVENT_B 0x3C
+#define ADP5589_RESET_CFG 0x3D
+#define ADP5589_PWM_OFFT_LOW 0x3E
+#define ADP5589_PWM_OFFT_HIGH 0x3F
+#define ADP5589_PWM_ONT_LOW 0x40
+#define ADP5589_PWM_ONT_HIGH 0x41
+#define ADP5589_PWM_CFG 0x42
+#define ADP5589_CLOCK_DIV_CFG 0x43
+#define ADP5589_LOGIC_1_CFG 0x44
+#define ADP5589_LOGIC_2_CFG 0x45
+#define ADP5589_LOGIC_FF_CFG 0x46
+#define ADP5589_LOGIC_INT_EVENT_EN 0x47
+#define ADP5589_POLL_PTIME_CFG 0x48
+#define ADP5589_PIN_CONFIG_A 0x49
+#define ADP5589_PIN_CONFIG_B 0x4A
+#define ADP5589_PIN_CONFIG_C 0x4B
+#define ADP5589_PIN_CONFIG_D 0x4C
+#define ADP5589_GENERAL_CFG 0x4D
+#define ADP5589_INT_EN 0x4E
+
+/* ADP5585 Registers */
+#define ADP5585_GPI_STATUS_A 0x15
+#define ADP5585_GPI_STATUS_B 0x16
+#define ADP5585_RPULL_CONFIG_A 0x17
+#define ADP5585_RPULL_CONFIG_B 0x18
+#define ADP5585_RPULL_CONFIG_C 0x19
+#define ADP5585_RPULL_CONFIG_D 0x1A
+#define ADP5585_GPI_INT_LEVEL_A 0x1B
+#define ADP5585_GPI_INT_LEVEL_B 0x1C
+#define ADP5585_GPI_EVENT_EN_A 0x1D
+#define ADP5585_GPI_EVENT_EN_B 0x1E
+#define ADP5585_GPI_INTERRUPT_EN_A 0x1F
+#define ADP5585_GPI_INTERRUPT_EN_B 0x20
+#define ADP5585_DEBOUNCE_DIS_A 0x21
+#define ADP5585_DEBOUNCE_DIS_B 0x22
+#define ADP5585_GPO_DATA_OUT_A 0x23
+#define ADP5585_GPO_DATA_OUT_B 0x24
+#define ADP5585_GPO_OUT_MODE_A 0x25
+#define ADP5585_GPO_OUT_MODE_B 0x26
+#define ADP5585_GPIO_DIRECTION_A 0x27
+#define ADP5585_GPIO_DIRECTION_B 0x28
+#define ADP5585_RESET1_EVENT_A 0x29
+#define ADP5585_RESET1_EVENT_B 0x2A
+#define ADP5585_RESET1_EVENT_C 0x2B
+#define ADP5585_RESET2_EVENT_A 0x2C
+#define ADP5585_RESET2_EVENT_B 0x2D
+#define ADP5585_RESET_CFG 0x2E
+#define ADP5585_PWM_OFFT_LOW 0x2F
+#define ADP5585_PWM_OFFT_HIGH 0x30
+#define ADP5585_PWM_ONT_LOW 0x31
+#define ADP5585_PWM_ONT_HIGH 0x32
+#define ADP5585_PWM_CFG 0x33
+#define ADP5585_LOGIC_CFG 0x34
+#define ADP5585_LOGIC_FF_CFG 0x35
+#define ADP5585_LOGIC_INT_EVENT_EN 0x36
+#define ADP5585_POLL_PTIME_CFG 0x37
+#define ADP5585_PIN_CONFIG_A 0x38
+#define ADP5585_PIN_CONFIG_B 0x39
+#define ADP5585_PIN_CONFIG_D 0x3A
+#define ADP5585_GENERAL_CFG 0x3B
+#define ADP5585_INT_EN 0x3C
+
+/* ID Register */
+#define ADP5589_5_DEVICE_ID_MASK 0xF
+#define ADP5589_5_MAN_ID_MASK 0xF
+#define ADP5589_5_MAN_ID_SHIFT 4
+#define ADP5589_5_MAN_ID 0x02
+
/* GENERAL_CFG Register */
#define OSC_EN (1 << 7)
#define CORE_CLK(x) (((x) & 0x3) << 5)
-#define LCK_TRK_LOGIC (1 << 4)
-#define LCK_TRK_GPI (1 << 3)
+#define LCK_TRK_LOGIC (1 << 4) /* ADP5589 only */
+#define LCK_TRK_GPI (1 << 3) /* ADP5589 only */
#define INT_CFG (1 << 1)
#define RST_CFG (1 << 0)
/* INT_EN Register */
-#define LOGIC2_IEN (1 << 5)
+#define LOGIC2_IEN (1 << 5) /* ADP5589 only */
#define LOGIC1_IEN (1 << 4)
-#define LOCK_IEN (1 << 3)
+#define LOCK_IEN (1 << 3) /* ADP5589 only */
#define OVRFLOW_IEN (1 << 2)
#define GPI_IEN (1 << 1)
#define EVENT_IEN (1 << 0)
/* Interrupt Status Register */
-#define LOGIC2_INT (1 << 5)
+#define LOGIC2_INT (1 << 5) /* ADP5589 only */
#define LOGIC1_INT (1 << 4)
-#define LOCK_INT (1 << 3)
+#define LOCK_INT (1 << 3) /* ADP5589 only */
#define OVRFLOW_INT (1 << 2)
#define GPI_INT (1 << 1)
#define EVENT_INT (1 << 0)
/* STATUS Register */
-
-#define LOGIC2_STAT (1 << 7)
+#define LOGIC2_STAT (1 << 7) /* ADP5589 only */
#define LOGIC1_STAT (1 << 6)
-#define LOCK_STAT (1 << 5)
+#define LOCK_STAT (1 << 5) /* ADP5589 only */
#define KEC 0xF
/* PIN_CONFIG_D Register */
@@ -61,27 +191,54 @@
#define LOCK_EN (1 << 0)
#define PTIME_MASK 0x3
-#define LTIME_MASK 0x3
+#define LTIME_MASK 0x3 /* ADP5589 only */
/* Key Event Register xy */
#define KEY_EV_PRESSED (1 << 7)
#define KEY_EV_MASK (0x7F)
#define KEYP_MAX_EVENT 16
+#define ADP5589_MAXGPIO 19
+#define ADP5585_MAXGPIO 11 /* 10 on the ADP5585-01, 11 on ADP5585-02 */
-#define MAXGPIO 19
-#define ADP_BANK(offs) ((offs) >> 3)
-#define ADP_BIT(offs) (1u << ((offs) & 0x7))
+enum {
+ ADP5589,
+ ADP5585_01,
+ ADP5585_02
+};
+
+struct adp_constants {
+ u8 maxgpio;
+ u8 keymapsize;
+ u8 gpi_pin_row_base;
+ u8 gpi_pin_row_end;
+ u8 gpi_pin_col_base;
+ u8 gpi_pin_base;
+ u8 gpi_pin_end;
+ u8 gpimapsize_max;
+ u8 max_row_num;
+ u8 max_col_num;
+ u8 row_mask;
+ u8 col_mask;
+ u8 col_shift;
+ u8 c4_extend_cfg;
+ u8 (*bank) (u8 offset);
+ u8 (*bit) (u8 offset);
+ u8 (*reg) (u8 reg);
+};
struct adp5589_kpad {
struct i2c_client *client;
struct input_dev *input;
+ const struct adp_constants *var;
unsigned short keycode[ADP5589_KEYMAPSIZE];
const struct adp5589_gpi_map *gpimap;
unsigned short gpimapsize;
unsigned extend_cfg;
+ bool is_adp5585;
+ bool adp5585_support_row5;
#ifdef CONFIG_GPIOLIB
- unsigned char gpiomap[MAXGPIO];
+ unsigned char gpiomap[ADP5589_MAXGPIO];
bool export_gpio;
struct gpio_chip gc;
struct mutex gpio_lock; /* Protect cached dir, dat_out */
@@ -90,6 +247,129 @@ struct adp5589_kpad {
#endif
};
+/*
+ * ADP5589 / ADP5585 derivative / variant handling
+ */
+
+
+/* ADP5589 */
+
+static unsigned char adp5589_bank(unsigned char offset)
+{
+ return offset >> 3;
+}
+
+static unsigned char adp5589_bit(unsigned char offset)
+{
+ return 1u << (offset & 0x7);
+}
+
+static unsigned char adp5589_reg(unsigned char reg)
+{
+ return reg;
+}
+
+static const struct adp_constants const_adp5589 = {
+ .maxgpio = ADP5589_MAXGPIO,
+ .keymapsize = ADP5589_KEYMAPSIZE,
+ .gpi_pin_row_base = ADP5589_GPI_PIN_ROW_BASE,
+ .gpi_pin_row_end = ADP5589_GPI_PIN_ROW_END,
+ .gpi_pin_col_base = ADP5589_GPI_PIN_COL_BASE,
+ .gpi_pin_base = ADP5589_GPI_PIN_BASE,
+ .gpi_pin_end = ADP5589_GPI_PIN_END,
+ .gpimapsize_max = ADP5589_GPIMAPSIZE_MAX,
+ .c4_extend_cfg = 12,
+ .max_row_num = ADP5589_MAX_ROW_NUM,
+ .max_col_num = ADP5589_MAX_COL_NUM,
+ .row_mask = ADP5589_ROW_MASK,
+ .col_mask = ADP5589_COL_MASK,
+ .col_shift = ADP5589_COL_SHIFT,
+ .bank = adp5589_bank,
+ .bit = adp5589_bit,
+ .reg = adp5589_reg,
+};
+
+/* ADP5585 */
+
+static unsigned char adp5585_bank(unsigned char offset)
+{
+ return offset > ADP5585_MAX_ROW_NUM;
+}
+
+static unsigned char adp5585_bit(unsigned char offset)
+{
+ return (offset > ADP5585_MAX_ROW_NUM) ?
+ 1u << (offset - ADP5585_COL_SHIFT) : 1u << offset;
+}
+
+static const unsigned char adp5585_reg_lut[] = {
+ [ADP5589_GPI_STATUS_A] = ADP5585_GPI_STATUS_A,
+ [ADP5589_GPI_STATUS_B] = ADP5585_GPI_STATUS_B,
+ [ADP5589_RPULL_CONFIG_A] = ADP5585_RPULL_CONFIG_A,
+ [ADP5589_RPULL_CONFIG_B] = ADP5585_RPULL_CONFIG_B,
+ [ADP5589_RPULL_CONFIG_C] = ADP5585_RPULL_CONFIG_C,
+ [ADP5589_RPULL_CONFIG_D] = ADP5585_RPULL_CONFIG_D,
+ [ADP5589_GPI_INT_LEVEL_A] = ADP5585_GPI_INT_LEVEL_A,
+ [ADP5589_GPI_INT_LEVEL_B] = ADP5585_GPI_INT_LEVEL_B,
+ [ADP5589_GPI_EVENT_EN_A] = ADP5585_GPI_EVENT_EN_A,
+ [ADP5589_GPI_EVENT_EN_B] = ADP5585_GPI_EVENT_EN_B,
+ [ADP5589_GPI_INTERRUPT_EN_A] = ADP5585_GPI_INTERRUPT_EN_A,
+ [ADP5589_GPI_INTERRUPT_EN_B] = ADP5585_GPI_INTERRUPT_EN_B,
+ [ADP5589_DEBOUNCE_DIS_A] = ADP5585_DEBOUNCE_DIS_A,
+ [ADP5589_DEBOUNCE_DIS_B] = ADP5585_DEBOUNCE_DIS_B,
+ [ADP5589_GPO_DATA_OUT_A] = ADP5585_GPO_DATA_OUT_A,
+ [ADP5589_GPO_DATA_OUT_B] = ADP5585_GPO_DATA_OUT_B,
+ [ADP5589_GPO_OUT_MODE_A] = ADP5585_GPO_OUT_MODE_A,
+ [ADP5589_GPO_OUT_MODE_B] = ADP5585_GPO_OUT_MODE_B,
+ [ADP5589_GPIO_DIRECTION_A] = ADP5585_GPIO_DIRECTION_A,
+ [ADP5589_GPIO_DIRECTION_B] = ADP5585_GPIO_DIRECTION_B,
+ [ADP5589_RESET1_EVENT_A] = ADP5585_RESET1_EVENT_A,
+ [ADP5589_RESET1_EVENT_B] = ADP5585_RESET1_EVENT_B,
+ [ADP5589_RESET1_EVENT_C] = ADP5585_RESET1_EVENT_C,
+ [ADP5589_RESET2_EVENT_A] = ADP5585_RESET2_EVENT_A,
+ [ADP5589_RESET2_EVENT_B] = ADP5585_RESET2_EVENT_B,
+ [ADP5589_RESET_CFG] = ADP5585_RESET_CFG,
+ [ADP5589_PWM_OFFT_LOW] = ADP5585_PWM_OFFT_LOW,
+ [ADP5589_PWM_OFFT_HIGH] = ADP5585_PWM_OFFT_HIGH,
+ [ADP5589_PWM_ONT_LOW] = ADP5585_PWM_ONT_LOW,
+ [ADP5589_PWM_ONT_HIGH] = ADP5585_PWM_ONT_HIGH,
+ [ADP5589_PWM_CFG] = ADP5585_PWM_CFG,
+ [ADP5589_LOGIC_1_CFG] = ADP5585_LOGIC_CFG,
+ [ADP5589_LOGIC_FF_CFG] = ADP5585_LOGIC_FF_CFG,
+ [ADP5589_LOGIC_INT_EVENT_EN] = ADP5585_LOGIC_INT_EVENT_EN,
+ [ADP5589_POLL_PTIME_CFG] = ADP5585_POLL_PTIME_CFG,
+ [ADP5589_PIN_CONFIG_A] = ADP5585_PIN_CONFIG_A,
+ [ADP5589_PIN_CONFIG_B] = ADP5585_PIN_CONFIG_B,
+ [ADP5589_PIN_CONFIG_D] = ADP5585_PIN_CONFIG_D,
+ [ADP5589_GENERAL_CFG] = ADP5585_GENERAL_CFG,
+ [ADP5589_INT_EN] = ADP5585_INT_EN,
+};
+
+static unsigned char adp5585_reg(unsigned char reg)
+{
+ return adp5585_reg_lut[reg];
+}
+
+static const struct adp_constants const_adp5585 = {
+ .maxgpio = ADP5585_MAXGPIO,
+ .keymapsize = ADP5585_KEYMAPSIZE,
+ .gpi_pin_row_base = ADP5585_GPI_PIN_ROW_BASE,
+ .gpi_pin_row_end = ADP5585_GPI_PIN_ROW_END,
+ .gpi_pin_col_base = ADP5585_GPI_PIN_COL_BASE,
+ .gpi_pin_base = ADP5585_GPI_PIN_BASE,
+ .gpi_pin_end = ADP5585_GPI_PIN_END,
+ .gpimapsize_max = ADP5585_GPIMAPSIZE_MAX,
+ .c4_extend_cfg = 10,
+ .max_row_num = ADP5585_MAX_ROW_NUM,
+ .max_col_num = ADP5585_MAX_COL_NUM,
+ .row_mask = ADP5585_ROW_MASK,
+ .col_mask = ADP5585_COL_MASK,
+ .col_shift = ADP5585_COL_SHIFT,
+ .bank = adp5585_bank,
+ .bit = adp5585_bit,
+ .reg = adp5585_reg,
+};
+
static int adp5589_read(struct i2c_client *client, u8 reg)
{
int ret = i2c_smbus_read_byte_data(client, reg);
@@ -109,19 +389,20 @@ static int adp5589_write(struct i2c_client *client, u8 reg, u8 val)
static int adp5589_gpio_get_value(struct gpio_chip *chip, unsigned off)
{
struct adp5589_kpad *kpad = container_of(chip, struct adp5589_kpad, gc);
- unsigned int bank = ADP_BANK(kpad->gpiomap[off]);
- unsigned int bit = ADP_BIT(kpad->gpiomap[off]);
+ unsigned int bank = kpad->var->bank(kpad->gpiomap[off]);
+ unsigned int bit = kpad->var->bit(kpad->gpiomap[off]);
- return !!(adp5589_read(kpad->client, ADP5589_GPI_STATUS_A + bank) &
- bit);
+ return !!(adp5589_read(kpad->client,
+ kpad->var->reg(ADP5589_GPI_STATUS_A) + bank) &
+ bit);
}
static void adp5589_gpio_set_value(struct gpio_chip *chip,
unsigned off, int val)
{
struct adp5589_kpad *kpad = container_of(chip, struct adp5589_kpad, gc);
- unsigned int bank = ADP_BANK(kpad->gpiomap[off]);
- unsigned int bit = ADP_BIT(kpad->gpiomap[off]);
+ unsigned int bank = kpad->var->bank(kpad->gpiomap[off]);
+ unsigned int bit = kpad->var->bit(kpad->gpiomap[off]);
mutex_lock(&kpad->gpio_lock);
@@ -130,8 +411,8 @@ static void adp5589_gpio_set_value(struct gpio_chip *chip,
else
kpad->dat_out[bank] &= ~bit;
- adp5589_write(kpad->client, ADP5589_GPO_DATA_OUT_A + bank,
- kpad->dat_out[bank]);
+ adp5589_write(kpad->client, kpad->var->reg(ADP5589_GPO_DATA_OUT_A) +
+ bank, kpad->dat_out[bank]);
mutex_unlock(&kpad->gpio_lock);
}
@@ -139,14 +420,15 @@ static void adp5589_gpio_set_value(struct gpio_chip *chip,
static int adp5589_gpio_direction_input(struct gpio_chip *chip, unsigned off)
{
struct adp5589_kpad *kpad = container_of(chip, struct adp5589_kpad, gc);
- unsigned int bank = ADP_BANK(kpad->gpiomap[off]);
- unsigned int bit = ADP_BIT(kpad->gpiomap[off]);
+ unsigned int bank = kpad->var->bank(kpad->gpiomap[off]);
+ unsigned int bit = kpad->var->bit(kpad->gpiomap[off]);
int ret;
mutex_lock(&kpad->gpio_lock);
kpad->dir[bank] &= ~bit;
- ret = adp5589_write(kpad->client, ADP5589_GPIO_DIRECTION_A + bank,
+ ret = adp5589_write(kpad->client,
+ kpad->var->reg(ADP5589_GPIO_DIRECTION_A) + bank,
kpad->dir[bank]);
mutex_unlock(&kpad->gpio_lock);
@@ -158,8 +440,8 @@ static int adp5589_gpio_direction_output(struct gpio_chip *chip,
unsigned off, int val)
{
struct adp5589_kpad *kpad = container_of(chip, struct adp5589_kpad, gc);
- unsigned int bank = ADP_BANK(kpad->gpiomap[off]);
- unsigned int bit = ADP_BIT(kpad->gpiomap[off]);
+ unsigned int bank = kpad->var->bank(kpad->gpiomap[off]);
+ unsigned int bit = kpad->var->bit(kpad->gpiomap[off]);
int ret;
mutex_lock(&kpad->gpio_lock);
@@ -171,9 +453,10 @@ static int adp5589_gpio_direction_output(struct gpio_chip *chip,
else
kpad->dat_out[bank] &= ~bit;
- ret = adp5589_write(kpad->client, ADP5589_GPO_DATA_OUT_A + bank,
- kpad->dat_out[bank]);
- ret |= adp5589_write(kpad->client, ADP5589_GPIO_DIRECTION_A + bank,
+ ret = adp5589_write(kpad->client, kpad->var->reg(ADP5589_GPO_DATA_OUT_A)
+ + bank, kpad->dat_out[bank]);
+ ret |= adp5589_write(kpad->client,
+ kpad->var->reg(ADP5589_GPIO_DIRECTION_A) + bank,
kpad->dir[bank]);
mutex_unlock(&kpad->gpio_lock);
@@ -184,26 +467,29 @@ static int adp5589_gpio_direction_output(struct gpio_chip *chip,
static int __devinit adp5589_build_gpiomap(struct adp5589_kpad *kpad,
const struct adp5589_kpad_platform_data *pdata)
{
- bool pin_used[MAXGPIO];
+ bool pin_used[ADP5589_MAXGPIO];
int n_unused = 0;
int i;
memset(pin_used, false, sizeof(pin_used));
- for (i = 0; i < MAXGPIO; i++)
+ for (i = 0; i < kpad->var->maxgpio; i++)
if (pdata->keypad_en_mask & (1 << i))
pin_used[i] = true;
for (i = 0; i < kpad->gpimapsize; i++)
- pin_used[kpad->gpimap[i].pin - ADP5589_GPI_PIN_BASE] = true;
+ pin_used[kpad->gpimap[i].pin - kpad->var->gpi_pin_base] = true;
if (kpad->extend_cfg & R4_EXTEND_CFG)
pin_used[4] = true;
if (kpad->extend_cfg & C4_EXTEND_CFG)
- pin_used[12] = true;
+ pin_used[kpad->var->c4_extend_cfg] = true;
+
+ if (!kpad->adp5585_support_row5)
+ pin_used[5] = true;
- for (i = 0; i < MAXGPIO; i++)
+ for (i = 0; i < kpad->var->maxgpio; i++)
if (!pin_used[i])
kpad->gpiomap[n_unused++] = i;
@@ -246,11 +532,11 @@ static int __devinit adp5589_gpio_add(struct adp5589_kpad *kpad)
return error;
}
- for (i = 0; i <= ADP_BANK(MAXGPIO); i++) {
- kpad->dat_out[i] = adp5589_read(kpad->client,
- ADP5589_GPO_DATA_OUT_A + i);
- kpad->dir[i] = adp5589_read(kpad->client,
- ADP5589_GPIO_DIRECTION_A + i);
+ for (i = 0; i <= kpad->var->bank(kpad->var->maxgpio); i++) {
+ kpad->dat_out[i] = adp5589_read(kpad->client, kpad->var->reg(
+ ADP5589_GPO_DATA_OUT_A) + i);
+ kpad->dir[i] = adp5589_read(kpad->client, kpad->var->reg(
+ ADP5589_GPIO_DIRECTION_A) + i);
}
if (gpio_data->setup) {
@@ -317,11 +603,11 @@ static void adp5589_report_events(struct adp5589_kpad *kpad, int ev_cnt)
int i;
for (i = 0; i < ev_cnt; i++) {
- int key = adp5589_read(kpad->client, ADP5589_FIFO_1 + i);
+ int key = adp5589_read(kpad->client, ADP5589_5_FIFO_1 + i);
int key_val = key & KEY_EV_MASK;
- if (key_val >= ADP5589_GPI_PIN_BASE &&
- key_val <= ADP5589_GPI_PIN_END) {
+ if (key_val >= kpad->var->gpi_pin_base &&
+ key_val <= kpad->var->gpi_pin_end) {
adp5589_report_switches(kpad, key, key_val);
} else {
input_report_key(kpad->input,
@@ -337,29 +623,30 @@ static irqreturn_t adp5589_irq(int irq, void *handle)
struct i2c_client *client = kpad->client;
int status, ev_cnt;
- status = adp5589_read(client, ADP5589_INT_STATUS);
+ status = adp5589_read(client, ADP5589_5_INT_STATUS);
if (status & OVRFLOW_INT) /* Unlikely and should never happen */
dev_err(&client->dev, "Event Overflow Error\n");
if (status & EVENT_INT) {
- ev_cnt = adp5589_read(client, ADP5589_STATUS) & KEC;
+ ev_cnt = adp5589_read(client, ADP5589_5_STATUS) & KEC;
if (ev_cnt) {
adp5589_report_events(kpad, ev_cnt);
input_sync(kpad->input);
}
}
- adp5589_write(client, ADP5589_INT_STATUS, status); /* Status is W1C */
+ adp5589_write(client, ADP5589_5_INT_STATUS, status); /* Status is W1C */
return IRQ_HANDLED;
}
-static int __devinit adp5589_get_evcode(struct adp5589_kpad *kpad, unsigned short key)
+static int __devinit adp5589_get_evcode(struct adp5589_kpad *kpad,
+ unsigned short key)
{
int i;
- for (i = 0; i < ADP5589_KEYMAPSIZE; i++)
+ for (i = 0; i < kpad->var->keymapsize; i++)
if (key == kpad->keycode[i])
return (i + 1) | KEY_EV_PRESSED;
@@ -372,19 +659,23 @@ static int __devinit adp5589_setup(struct adp5589_kpad *kpad)
{
struct i2c_client *client = kpad->client;
const struct adp5589_kpad_platform_data *pdata =
- client->dev.platform_data;
- int i, ret;
+ client->dev.platform_data;
+ u8 (*reg) (u8) = kpad->var->reg;
unsigned char evt_mode1 = 0, evt_mode2 = 0, evt_mode3 = 0;
unsigned char pull_mask = 0;
+ int i, ret;
+
+ ret = adp5589_write(client, reg(ADP5589_PIN_CONFIG_A),
+ pdata->keypad_en_mask & kpad->var->row_mask);
+ ret |= adp5589_write(client, reg(ADP5589_PIN_CONFIG_B),
+ (pdata->keypad_en_mask >> kpad->var->col_shift) &
+ kpad->var->col_mask);
- ret = adp5589_write(client, ADP5589_PIN_CONFIG_A,
- pdata->keypad_en_mask & 0xFF);
- ret |= adp5589_write(client, ADP5589_PIN_CONFIG_B,
- (pdata->keypad_en_mask >> 8) & 0xFF);
- ret |= adp5589_write(client, ADP5589_PIN_CONFIG_C,
- (pdata->keypad_en_mask >> 16) & 0xFF);
+ if (!kpad->is_adp5585)
+ ret |= adp5589_write(client, ADP5589_PIN_CONFIG_C,
+ (pdata->keypad_en_mask >> 16) & 0xFF);
- if (pdata->en_keylock) {
+ if (!kpad->is_adp5585 && pdata->en_keylock) {
ret |= adp5589_write(client, ADP5589_UNLOCK1,
pdata->unlock_key1);
ret |= adp5589_write(client, ADP5589_UNLOCK2,
@@ -395,96 +686,130 @@ static int __devinit adp5589_setup(struct adp5589_kpad *kpad)
}
for (i = 0; i < KEYP_MAX_EVENT; i++)
- ret |= adp5589_read(client, ADP5589_FIFO_1 + i);
+ ret |= adp5589_read(client, ADP5589_5_FIFO_1 + i);
for (i = 0; i < pdata->gpimapsize; i++) {
unsigned short pin = pdata->gpimap[i].pin;
- if (pin <= ADP5589_GPI_PIN_ROW_END) {
- evt_mode1 |= (1 << (pin - ADP5589_GPI_PIN_ROW_BASE));
+ if (pin <= kpad->var->gpi_pin_row_end) {
+ evt_mode1 |= (1 << (pin - kpad->var->gpi_pin_row_base));
} else {
evt_mode2 |=
- ((1 << (pin - ADP5589_GPI_PIN_COL_BASE)) & 0xFF);
- evt_mode3 |=
- ((1 << (pin - ADP5589_GPI_PIN_COL_BASE)) >> 8);
+ ((1 << (pin - kpad->var->gpi_pin_col_base)) & 0xFF);
+ if (!kpad->is_adp5585)
+ evt_mode3 |= ((1 << (pin -
+ kpad->var->gpi_pin_col_base)) >> 8);
}
}
if (pdata->gpimapsize) {
- ret |= adp5589_write(client, ADP5589_GPI_EVENT_EN_A, evt_mode1);
- ret |= adp5589_write(client, ADP5589_GPI_EVENT_EN_B, evt_mode2);
- ret |= adp5589_write(client, ADP5589_GPI_EVENT_EN_C, evt_mode3);
+ ret |= adp5589_write(client, reg(ADP5589_GPI_EVENT_EN_A),
+ evt_mode1);
+ ret |= adp5589_write(client, reg(ADP5589_GPI_EVENT_EN_B),
+ evt_mode2);
+ if (!kpad->is_adp5585)
+ ret |= adp5589_write(client,
+ reg(ADP5589_GPI_EVENT_EN_C),
+ evt_mode3);
}
if (pdata->pull_dis_mask & pdata->pullup_en_100k &
- pdata->pullup_en_300k & pdata->pulldown_en_300k)
+ pdata->pullup_en_300k & pdata->pulldown_en_300k)
dev_warn(&client->dev, "Conflicting pull resistor config\n");
- for (i = 0; i < MAXGPIO; i++) {
- unsigned val = 0;
+ for (i = 0; i <= kpad->var->max_row_num; i++) {
+ unsigned val = 0, bit = (1 << i);
+ if (pdata->pullup_en_300k & bit)
+ val = 0;
+ else if (pdata->pulldown_en_300k & bit)
+ val = 1;
+ else if (pdata->pullup_en_100k & bit)
+ val = 2;
+ else if (pdata->pull_dis_mask & bit)
+ val = 3;
+
+ pull_mask |= val << (2 * (i & 0x3));
+
+ if (i == 3 || i == kpad->var->max_row_num) {
+ ret |= adp5589_write(client, reg(ADP5585_RPULL_CONFIG_A)
+ + (i >> 2), pull_mask);
+ pull_mask = 0;
+ }
+ }
- if (pdata->pullup_en_300k & (1 << i))
+ for (i = 0; i <= kpad->var->max_col_num; i++) {
+ unsigned val = 0, bit = 1 << (i + kpad->var->col_shift);
+ if (pdata->pullup_en_300k & bit)
val = 0;
- else if (pdata->pulldown_en_300k & (1 << i))
+ else if (pdata->pulldown_en_300k & bit)
val = 1;
- else if (pdata->pullup_en_100k & (1 << i))
+ else if (pdata->pullup_en_100k & bit)
val = 2;
- else if (pdata->pull_dis_mask & (1 << i))
+ else if (pdata->pull_dis_mask & bit)
val = 3;
pull_mask |= val << (2 * (i & 0x3));
- if ((i & 0x3) == 0x3 || i == MAXGPIO - 1) {
+ if (i == 3 || i == kpad->var->max_col_num) {
ret |= adp5589_write(client,
- ADP5589_RPULL_CONFIG_A + (i >> 2),
- pull_mask);
+ reg(ADP5585_RPULL_CONFIG_C) +
+ (i >> 2), pull_mask);
pull_mask = 0;
}
}
if (pdata->reset1_key_1 && pdata->reset1_key_2 && pdata->reset1_key_3) {
- ret |= adp5589_write(client, ADP5589_RESET1_EVENT_A,
+ ret |= adp5589_write(client, reg(ADP5589_RESET1_EVENT_A),
adp5589_get_evcode(kpad,
pdata->reset1_key_1));
- ret |= adp5589_write(client, ADP5589_RESET1_EVENT_B,
+ ret |= adp5589_write(client, reg(ADP5589_RESET1_EVENT_B),
adp5589_get_evcode(kpad,
pdata->reset1_key_2));
- ret |= adp5589_write(client, ADP5589_RESET1_EVENT_C,
+ ret |= adp5589_write(client, reg(ADP5589_RESET1_EVENT_C),
adp5589_get_evcode(kpad,
pdata->reset1_key_3));
kpad->extend_cfg |= R4_EXTEND_CFG;
}
if (pdata->reset2_key_1 && pdata->reset2_key_2) {
- ret |= adp5589_write(client, ADP5589_RESET2_EVENT_A,
+ ret |= adp5589_write(client, reg(ADP5589_RESET2_EVENT_A),
adp5589_get_evcode(kpad,
pdata->reset2_key_1));
- ret |= adp5589_write(client, ADP5589_RESET2_EVENT_B,
+ ret |= adp5589_write(client, reg(ADP5589_RESET2_EVENT_B),
adp5589_get_evcode(kpad,
pdata->reset2_key_2));
kpad->extend_cfg |= C4_EXTEND_CFG;
}
if (kpad->extend_cfg) {
- ret |= adp5589_write(client, ADP5589_RESET_CFG,
+ ret |= adp5589_write(client, reg(ADP5589_RESET_CFG),
pdata->reset_cfg);
- ret |= adp5589_write(client, ADP5589_PIN_CONFIG_D,
+ ret |= adp5589_write(client, reg(ADP5589_PIN_CONFIG_D),
kpad->extend_cfg);
}
- for (i = 0; i <= ADP_BANK(MAXGPIO); i++)
- ret |= adp5589_write(client, ADP5589_DEBOUNCE_DIS_A + i,
- pdata->debounce_dis_mask >> (i * 8));
+ ret |= adp5589_write(client, reg(ADP5589_DEBOUNCE_DIS_A),
+ pdata->debounce_dis_mask & kpad->var->row_mask);
- ret |= adp5589_write(client, ADP5589_POLL_PTIME_CFG,
+ ret |= adp5589_write(client, reg(ADP5589_DEBOUNCE_DIS_B),
+ (pdata->debounce_dis_mask >> kpad->var->col_shift)
+ & kpad->var->col_mask);
+
+ if (!kpad->is_adp5585)
+ ret |= adp5589_write(client, reg(ADP5589_DEBOUNCE_DIS_C),
+ (pdata->debounce_dis_mask >> 16) & 0xFF);
+
+ ret |= adp5589_write(client, reg(ADP5589_POLL_PTIME_CFG),
pdata->scan_cycle_time & PTIME_MASK);
- ret |= adp5589_write(client, ADP5589_INT_STATUS, LOGIC2_INT |
- LOGIC1_INT | OVRFLOW_INT | LOCK_INT |
+ ret |= adp5589_write(client, ADP5589_5_INT_STATUS,
+ (kpad->is_adp5585 ? 0 : LOGIC2_INT) |
+ LOGIC1_INT | OVRFLOW_INT |
+ (kpad->is_adp5585 ? 0 : LOCK_INT) |
GPI_INT | EVENT_INT); /* Status is W1C */
- ret |= adp5589_write(client, ADP5589_GENERAL_CFG,
+ ret |= adp5589_write(client, reg(ADP5589_GENERAL_CFG),
INT_CFG | OSC_EN | CORE_CLK(3));
- ret |= adp5589_write(client, ADP5589_INT_EN,
+ ret |= adp5589_write(client, reg(ADP5589_INT_EN),
OVRFLOW_IEN | GPI_IEN | EVENT_IEN);
if (ret < 0) {
@@ -497,30 +822,33 @@ static int __devinit adp5589_setup(struct adp5589_kpad *kpad)
static void __devinit adp5589_report_switch_state(struct adp5589_kpad *kpad)
{
- int gpi_stat1 = adp5589_read(kpad->client, ADP5589_GPI_STATUS_A);
- int gpi_stat2 = adp5589_read(kpad->client, ADP5589_GPI_STATUS_B);
- int gpi_stat3 = adp5589_read(kpad->client, ADP5589_GPI_STATUS_C);
int gpi_stat_tmp, pin_loc;
int i;
+ int gpi_stat1 = adp5589_read(kpad->client,
+ kpad->var->reg(ADP5589_GPI_STATUS_A));
+ int gpi_stat2 = adp5589_read(kpad->client,
+ kpad->var->reg(ADP5589_GPI_STATUS_B));
+ int gpi_stat3 = !kpad->is_adp5585 ?
+ adp5589_read(kpad->client, ADP5589_GPI_STATUS_C) : 0;
for (i = 0; i < kpad->gpimapsize; i++) {
unsigned short pin = kpad->gpimap[i].pin;
- if (pin <= ADP5589_GPI_PIN_ROW_END) {
+ if (pin <= kpad->var->gpi_pin_row_end) {
gpi_stat_tmp = gpi_stat1;
- pin_loc = pin - ADP5589_GPI_PIN_ROW_BASE;
- } else if ((pin - ADP5589_GPI_PIN_COL_BASE) < 8) {
+ pin_loc = pin - kpad->var->gpi_pin_row_base;
+ } else if ((pin - kpad->var->gpi_pin_col_base) < 8) {
gpi_stat_tmp = gpi_stat2;
- pin_loc = pin - ADP5589_GPI_PIN_COL_BASE;
+ pin_loc = pin - kpad->var->gpi_pin_col_base;
} else {
gpi_stat_tmp = gpi_stat3;
- pin_loc = pin - ADP5589_GPI_PIN_COL_BASE - 8;
+ pin_loc = pin - kpad->var->gpi_pin_col_base - 8;
}
if (gpi_stat_tmp < 0) {
dev_err(&kpad->client->dev,
- "Can't read GPIO_DAT_STAT switch"
- " %d default to OFF\n", pin);
+ "Can't read GPIO_DAT_STAT switch %d, default to OFF\n",
+ pin);
gpi_stat_tmp = 0;
}
@@ -536,7 +864,8 @@ static int __devinit adp5589_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct adp5589_kpad *kpad;
- const struct adp5589_kpad_platform_data *pdata;
+ const struct adp5589_kpad_platform_data *pdata =
+ client->dev.platform_data;
struct input_dev *input;
unsigned int revid;
int ret, i;
@@ -548,56 +877,79 @@ static int __devinit adp5589_probe(struct i2c_client *client,
return -EIO;
}
- pdata = client->dev.platform_data;
if (!pdata) {
dev_err(&client->dev, "no platform data?\n");
return -EINVAL;
}
- if (!((pdata->keypad_en_mask & 0xFF) &&
- (pdata->keypad_en_mask >> 8)) || !pdata->keymap) {
+ kpad = kzalloc(sizeof(*kpad), GFP_KERNEL);
+ if (!kpad)
+ return -ENOMEM;
+
+ switch (id->driver_data) {
+ case ADP5585_02:
+ kpad->adp5585_support_row5 = true;
+ case ADP5585_01:
+ kpad->is_adp5585 = true;
+ kpad->var = &const_adp5585;
+ break;
+ case ADP5589:
+ kpad->var = &const_adp5589;
+ break;
+ }
+
+ if (!((pdata->keypad_en_mask & kpad->var->row_mask) &&
+ (pdata->keypad_en_mask >> kpad->var->col_shift)) ||
+ !pdata->keymap) {
dev_err(&client->dev, "no rows, cols or keymap from pdata\n");
- return -EINVAL;
+ error = -EINVAL;
+ goto err_free_mem;
}
- if (pdata->keymapsize != ADP5589_KEYMAPSIZE) {
+ if (pdata->keymapsize != kpad->var->keymapsize) {
dev_err(&client->dev, "invalid keymapsize\n");
- return -EINVAL;
+ error = -EINVAL;
+ goto err_free_mem;
}
if (!pdata->gpimap && pdata->gpimapsize) {
dev_err(&client->dev, "invalid gpimap from pdata\n");
- return -EINVAL;
+ error = -EINVAL;
+ goto err_free_mem;
}
- if (pdata->gpimapsize > ADP5589_GPIMAPSIZE_MAX) {
+ if (pdata->gpimapsize > kpad->var->gpimapsize_max) {
dev_err(&client->dev, "invalid gpimapsize\n");
- return -EINVAL;
+ error = -EINVAL;
+ goto err_free_mem;
}
for (i = 0; i < pdata->gpimapsize; i++) {
unsigned short pin = pdata->gpimap[i].pin;
- if (pin < ADP5589_GPI_PIN_BASE || pin > ADP5589_GPI_PIN_END) {
+ if (pin < kpad->var->gpi_pin_base ||
+ pin > kpad->var->gpi_pin_end) {
dev_err(&client->dev, "invalid gpi pin data\n");
- return -EINVAL;
+ error = -EINVAL;
+ goto err_free_mem;
}
- if ((1 << (pin - ADP5589_GPI_PIN_ROW_BASE)) &
+ if ((1 << (pin - kpad->var->gpi_pin_row_base)) &
pdata->keypad_en_mask) {
dev_err(&client->dev, "invalid gpi row/col data\n");
- return -EINVAL;
+ error = -EINVAL;
+ goto err_free_mem;
}
}
if (!client->irq) {
dev_err(&client->dev, "no IRQ?\n");
- return -EINVAL;
+ error = -EINVAL;
+ goto err_free_mem;
}
- kpad = kzalloc(sizeof(*kpad), GFP_KERNEL);
input = input_allocate_device();
- if (!kpad || !input) {
+ if (!input) {
error = -ENOMEM;
goto err_free_mem;
}
@@ -605,13 +957,13 @@ static int __devinit adp5589_probe(struct i2c_client *client,
kpad->client = client;
kpad->input = input;
- ret = adp5589_read(client, ADP5589_ID);
+ ret = adp5589_read(client, ADP5589_5_ID);
if (ret < 0) {
error = ret;
- goto err_free_mem;
+ goto err_free_input;
}
- revid = (u8) ret & ADP5589_DEVICE_ID_MASK;
+ revid = (u8) ret & ADP5589_5_DEVICE_ID_MASK;
input->name = client->name;
input->phys = "adp5589-keys/input0";
@@ -652,7 +1004,7 @@ static int __devinit adp5589_probe(struct i2c_client *client,
error = input_register_device(input);
if (error) {
dev_err(&client->dev, "unable to register input device\n");
- goto err_free_mem;
+ goto err_free_input;
}
error = request_threaded_irq(client->irq, NULL, adp5589_irq,
@@ -685,8 +1037,9 @@ err_free_irq:
err_unreg_dev:
input_unregister_device(input);
input = NULL;
-err_free_mem:
+err_free_input:
input_free_device(input);
+err_free_mem:
kfree(kpad);
return error;
@@ -696,7 +1049,7 @@ static int __devexit adp5589_remove(struct i2c_client *client)
{
struct adp5589_kpad *kpad = i2c_get_clientdata(client);
- adp5589_write(client, ADP5589_GENERAL_CFG, 0);
+ adp5589_write(client, kpad->var->reg(ADP5589_GENERAL_CFG), 0);
free_irq(client->irq, kpad);
input_unregister_device(kpad->input);
adp5589_gpio_remove(kpad);
@@ -736,7 +1089,9 @@ static int adp5589_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(adp5589_dev_pm_ops, adp5589_suspend, adp5589_resume);
static const struct i2c_device_id adp5589_id[] = {
- {"adp5589-keys", 0},
+ {"adp5589-keys", ADP5589},
+ {"adp5585-keys", ADP5585_01},
+ {"adp5585-02-keys", ADP5585_02}, /* Adds ROW5 to ADP5585 */
{}
};
@@ -767,4 +1122,4 @@ module_exit(adp5589_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
-MODULE_DESCRIPTION("ADP5589 Keypad driver");
+MODULE_DESCRIPTION("ADP5589/ADP5585 Keypad driver");
diff --git a/drivers/input/keyboard/davinci_keyscan.c b/drivers/input/keyboard/davinci_keyscan.c
index cd89d17162a..9d82b3aeff5 100644
--- a/drivers/input/keyboard/davinci_keyscan.c
+++ b/drivers/input/keyboard/davinci_keyscan.c
@@ -271,7 +271,7 @@ static int __init davinci_ks_probe(struct platform_device *pdev)
}
error = request_irq(davinci_ks->irq, davinci_ks_interrupt,
- IRQF_DISABLED, pdev->name, davinci_ks);
+ 0, pdev->name, davinci_ks);
if (error < 0) {
dev_err(dev, "unable to register davinci key scan interrupt\n");
goto fail5;
diff --git a/drivers/input/keyboard/ep93xx_keypad.c b/drivers/input/keyboard/ep93xx_keypad.c
index aa17e024d80..4662c5da801 100644
--- a/drivers/input/keyboard/ep93xx_keypad.c
+++ b/drivers/input/keyboard/ep93xx_keypad.c
@@ -323,7 +323,7 @@ static int __devinit ep93xx_keypad_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, keypad);
err = request_irq(keypad->irq, ep93xx_keypad_irq_handler,
- IRQF_DISABLED, pdev->name, keypad);
+ 0, pdev->name, keypad);
if (err)
goto failed_free_dev;
diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c
index 67df91af842..ed1ed469d08 100644
--- a/drivers/input/keyboard/gpio_keys.c
+++ b/drivers/input/keyboard/gpio_keys.c
@@ -461,8 +461,7 @@ static int gpio_keys_get_devtree_pdata(struct device *dev,
struct device_node *node, *pp;
int i;
struct gpio_keys_button *buttons;
- const u32 *reg;
- int len;
+ u32 reg;
node = dev->of_node;
if (node == NULL)
@@ -470,7 +469,7 @@ static int gpio_keys_get_devtree_pdata(struct device *dev,
memset(pdata, 0, sizeof *pdata);
- pdata->rep = !!of_get_property(node, "autorepeat", &len);
+ pdata->rep = !!of_get_property(node, "autorepeat", NULL);
/* First count the subnodes */
pdata->nbuttons = 0;
@@ -498,22 +497,25 @@ static int gpio_keys_get_devtree_pdata(struct device *dev,
buttons[i].gpio = of_get_gpio_flags(pp, 0, &flags);
buttons[i].active_low = flags & OF_GPIO_ACTIVE_LOW;
- reg = of_get_property(pp, "linux,code", &len);
- if (!reg) {
+ if (of_property_read_u32(pp, "linux,code", &reg)) {
dev_err(dev, "Button without keycode: 0x%x\n", buttons[i].gpio);
goto out_fail;
}
- buttons[i].code = be32_to_cpup(reg);
+ buttons[i].code = reg;
- buttons[i].desc = of_get_property(pp, "label", &len);
+ buttons[i].desc = of_get_property(pp, "label", NULL);
- reg = of_get_property(pp, "linux,input-type", &len);
- buttons[i].type = reg ? be32_to_cpup(reg) : EV_KEY;
+ if (of_property_read_u32(pp, "linux,input-type", &reg) == 0)
+ buttons[i].type = reg;
+ else
+ buttons[i].type = EV_KEY;
buttons[i].wakeup = !!of_get_property(pp, "gpio-key,wakeup", NULL);
- reg = of_get_property(pp, "debounce-interval", &len);
- buttons[i].debounce_interval = reg ? be32_to_cpup(reg) : 5;
+ if (of_property_read_u32(pp, "debounce-interval", &reg) == 0)
+ buttons[i].debounce_interval = reg;
+ else
+ buttons[i].debounce_interval = 5;
i++;
}
diff --git a/drivers/input/keyboard/imx_keypad.c b/drivers/input/keyboard/imx_keypad.c
index d92c15c39e6..ccebd2d0915 100644
--- a/drivers/input/keyboard/imx_keypad.c
+++ b/drivers/input/keyboard/imx_keypad.c
@@ -510,7 +510,7 @@ static int __devinit imx_keypad_probe(struct platform_device *pdev)
/* Ensure that the keypad will stay dormant until opened */
imx_keypad_inhibit(keypad);
- error = request_irq(irq, imx_keypad_irq_handler, IRQF_DISABLED,
+ error = request_irq(irq, imx_keypad_irq_handler, 0,
pdev->name, keypad);
if (error) {
dev_err(&pdev->dev, "failed to request IRQ\n");
@@ -567,10 +567,54 @@ static int __devexit imx_keypad_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int imx_kbd_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct imx_keypad *kbd = platform_get_drvdata(pdev);
+ struct input_dev *input_dev = kbd->input_dev;
+
+ /* imx kbd can wake up system even clock is disabled */
+ mutex_lock(&input_dev->mutex);
+
+ if (input_dev->users)
+ clk_disable(kbd->clk);
+
+ mutex_unlock(&input_dev->mutex);
+
+ if (device_may_wakeup(&pdev->dev))
+ enable_irq_wake(kbd->irq);
+
+ return 0;
+}
+
+static int imx_kbd_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct imx_keypad *kbd = platform_get_drvdata(pdev);
+ struct input_dev *input_dev = kbd->input_dev;
+
+ if (device_may_wakeup(&pdev->dev))
+ disable_irq_wake(kbd->irq);
+
+ mutex_lock(&input_dev->mutex);
+
+ if (input_dev->users)
+ clk_enable(kbd->clk);
+
+ mutex_unlock(&input_dev->mutex);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(imx_kbd_pm_ops, imx_kbd_suspend, imx_kbd_resume);
+
static struct platform_driver imx_keypad_driver = {
.driver = {
.name = "imx-keypad",
.owner = THIS_MODULE,
+ .pm = &imx_kbd_pm_ops,
},
.probe = imx_keypad_probe,
.remove = __devexit_p(imx_keypad_remove),
diff --git a/drivers/input/keyboard/jornada720_kbd.c b/drivers/input/keyboard/jornada720_kbd.c
index 2cd3e1d56ea..0aa6740e60d 100644
--- a/drivers/input/keyboard/jornada720_kbd.c
+++ b/drivers/input/keyboard/jornada720_kbd.c
@@ -129,7 +129,7 @@ static int __devinit jornada720_kbd_probe(struct platform_device *pdev)
err = request_irq(IRQ_GPIO0,
jornada720_kbd_interrupt,
- IRQF_DISABLED | IRQF_TRIGGER_FALLING,
+ IRQF_TRIGGER_FALLING,
"jornadakbd", pdev);
if (err) {
printk(KERN_INFO "jornadakbd720_kbd: Unable to grab IRQ\n");
diff --git a/drivers/input/keyboard/lm8323.c b/drivers/input/keyboard/lm8323.c
index 756348a7f93..82d1dc8badd 100644
--- a/drivers/input/keyboard/lm8323.c
+++ b/drivers/input/keyboard/lm8323.c
@@ -788,7 +788,7 @@ static int __devexit lm8323_remove(struct i2c_client *client)
return 0;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
/*
* We don't need to explicitly suspend the chip, as it already switches off
* when there's no activity.
diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c
index b02e4268e18..e2ae657717e 100644
--- a/drivers/input/keyboard/matrix_keypad.c
+++ b/drivers/input/keyboard/matrix_keypad.c
@@ -343,7 +343,6 @@ static int __devinit init_matrix_gpio(struct platform_device *pdev,
for (i = 0; i < pdata->num_row_gpios; i++) {
err = request_irq(gpio_to_irq(pdata->row_gpios[i]),
matrix_keypad_interrupt,
- IRQF_DISABLED |
IRQF_TRIGGER_RISING |
IRQF_TRIGGER_FALLING,
"matrix-keypad", keypad);
diff --git a/drivers/input/keyboard/nomadik-ske-keypad.c b/drivers/input/keyboard/nomadik-ske-keypad.c
index 6e0f2309136..fcdec5e2b29 100644
--- a/drivers/input/keyboard/nomadik-ske-keypad.c
+++ b/drivers/input/keyboard/nomadik-ske-keypad.c
@@ -18,6 +18,7 @@
#include <linux/input.h>
#include <linux/slab.h>
#include <linux/clk.h>
+#include <linux/module.h>
#include <plat/ske.h>
diff --git a/drivers/input/keyboard/omap-keypad.c b/drivers/input/keyboard/omap-keypad.c
index 33d0bdc837c..323bcdfff24 100644
--- a/drivers/input/keyboard/omap-keypad.c
+++ b/drivers/input/keyboard/omap-keypad.c
@@ -35,7 +35,7 @@
#include <linux/mutex.h>
#include <linux/errno.h>
#include <linux/slab.h>
-#include <mach/gpio.h>
+#include <asm/gpio.h>
#include <plat/keypad.h>
#include <plat/menelaus.h>
#include <asm/irq.h>
diff --git a/drivers/input/keyboard/pxa27x_keypad.c b/drivers/input/keyboard/pxa27x_keypad.c
index 4b0ec35259a..eca6ae63de1 100644
--- a/drivers/input/keyboard/pxa27x_keypad.c
+++ b/drivers/input/keyboard/pxa27x_keypad.c
@@ -535,7 +535,7 @@ static int __devinit pxa27x_keypad_probe(struct platform_device *pdev)
input_dev->evbit[0] |= BIT_MASK(EV_REL);
}
- error = request_irq(irq, pxa27x_keypad_irq_handler, IRQF_DISABLED,
+ error = request_irq(irq, pxa27x_keypad_irq_handler, 0,
pdev->name, keypad);
if (error) {
dev_err(&pdev->dev, "failed to request IRQ\n");
diff --git a/drivers/input/keyboard/pxa930_rotary.c b/drivers/input/keyboard/pxa930_rotary.c
index b7123a44b6e..35451bf780c 100644
--- a/drivers/input/keyboard/pxa930_rotary.c
+++ b/drivers/input/keyboard/pxa930_rotary.c
@@ -148,7 +148,7 @@ static int __devinit pxa930_rotary_probe(struct platform_device *pdev)
r->input_dev = input_dev;
input_set_drvdata(input_dev, r);
- err = request_irq(irq, rotary_irq, IRQF_DISABLED,
+ err = request_irq(irq, rotary_irq, 0,
"enhanced rotary", r);
if (err) {
dev_err(&pdev->dev, "failed to request IRQ\n");
diff --git a/drivers/input/keyboard/tc3589x-keypad.c b/drivers/input/keyboard/tc3589x-keypad.c
index 99122f59e98..f60c9e82f20 100644
--- a/drivers/input/keyboard/tc3589x-keypad.c
+++ b/drivers/input/keyboard/tc3589x-keypad.c
@@ -90,7 +90,7 @@ struct tc_keypad {
bool keypad_stopped;
};
-static int __devinit tc3589x_keypad_init_key_hardware(struct tc_keypad *keypad)
+static int tc3589x_keypad_init_key_hardware(struct tc_keypad *keypad)
{
int ret;
struct tc3589x *tc3589x = keypad->tc3589x;
diff --git a/drivers/input/keyboard/tegra-kbc.c b/drivers/input/keyboard/tegra-kbc.c
index a5a77915c65..cf3228b0ab9 100644
--- a/drivers/input/keyboard/tegra-kbc.c
+++ b/drivers/input/keyboard/tegra-kbc.c
@@ -55,6 +55,7 @@
#define KBC_ROW_CFG0_0 0x8
#define KBC_COL_CFG0_0 0x18
+#define KBC_TO_CNT_0 0x24
#define KBC_INIT_DLY_0 0x28
#define KBC_RPT_DLY_0 0x2c
#define KBC_KP_ENT0_0 0x30
@@ -70,6 +71,7 @@ struct tegra_kbc {
spinlock_t lock;
unsigned int repoll_dly;
unsigned long cp_dly_jiffies;
+ unsigned int cp_to_wkup_dly;
bool use_fn_map;
bool use_ghost_filter;
const struct tegra_kbc_platform_data *pdata;
@@ -258,12 +260,10 @@ static void tegra_kbc_report_keys(struct tegra_kbc *kbc)
u32 val = 0;
unsigned int i;
unsigned int num_down = 0;
- unsigned long flags;
bool fn_keypress = false;
bool key_in_same_row = false;
bool key_in_same_col = false;
- spin_lock_irqsave(&kbc->lock, flags);
for (i = 0; i < KBC_MAX_KPENT; i++) {
if ((i % 4) == 0)
val = readl(kbc->mmio + KBC_KP_ENT0_0 + i);
@@ -292,7 +292,7 @@ static void tegra_kbc_report_keys(struct tegra_kbc *kbc)
* any 2 of the 3 keys share a row, and any 2 of them share a column.
* If so ignore the key presses for this iteration.
*/
- if ((kbc->use_ghost_filter) && (num_down >= 3)) {
+ if (kbc->use_ghost_filter && num_down >= 3) {
for (i = 0; i < num_down; i++) {
unsigned int j;
u8 curr_col = scancodes[i] & 0x07;
@@ -325,8 +325,6 @@ static void tegra_kbc_report_keys(struct tegra_kbc *kbc)
}
}
- spin_unlock_irqrestore(&kbc->lock, flags);
-
/* Ignore the key presses for this iteration? */
if (key_in_same_col && key_in_same_row)
return;
@@ -341,6 +339,18 @@ static void tegra_kbc_report_keys(struct tegra_kbc *kbc)
kbc->num_pressed_keys = num_down;
}
+static void tegra_kbc_set_fifo_interrupt(struct tegra_kbc *kbc, bool enable)
+{
+ u32 val;
+
+ val = readl(kbc->mmio + KBC_CONTROL_0);
+ if (enable)
+ val |= KBC_CONTROL_FIFO_CNT_INT_EN;
+ else
+ val &= ~KBC_CONTROL_FIFO_CNT_INT_EN;
+ writel(val, kbc->mmio + KBC_CONTROL_0);
+}
+
static void tegra_kbc_keypress_timer(unsigned long data)
{
struct tegra_kbc *kbc = (struct tegra_kbc *)data;
@@ -348,6 +358,8 @@ static void tegra_kbc_keypress_timer(unsigned long data)
u32 val;
unsigned int i;
+ spin_lock_irqsave(&kbc->lock, flags);
+
val = (readl(kbc->mmio + KBC_INT_0) >> 4) & 0xf;
if (val) {
unsigned long dly;
@@ -369,26 +381,19 @@ static void tegra_kbc_keypress_timer(unsigned long data)
kbc->num_pressed_keys = 0;
/* All keys are released so enable the keypress interrupt */
- spin_lock_irqsave(&kbc->lock, flags);
- val = readl(kbc->mmio + KBC_CONTROL_0);
- val |= KBC_CONTROL_FIFO_CNT_INT_EN;
- writel(val, kbc->mmio + KBC_CONTROL_0);
- spin_unlock_irqrestore(&kbc->lock, flags);
+ tegra_kbc_set_fifo_interrupt(kbc, true);
}
+
+ spin_unlock_irqrestore(&kbc->lock, flags);
}
static irqreturn_t tegra_kbc_isr(int irq, void *args)
{
struct tegra_kbc *kbc = args;
- u32 val, ctl;
+ unsigned long flags;
+ u32 val;
- /*
- * Until all keys are released, defer further processing to
- * the polling loop in tegra_kbc_keypress_timer
- */
- ctl = readl(kbc->mmio + KBC_CONTROL_0);
- ctl &= ~KBC_CONTROL_FIFO_CNT_INT_EN;
- writel(ctl, kbc->mmio + KBC_CONTROL_0);
+ spin_lock_irqsave(&kbc->lock, flags);
/*
* Quickly bail out & reenable interrupts if the fifo threshold
@@ -399,15 +404,15 @@ static irqreturn_t tegra_kbc_isr(int irq, void *args)
if (val & KBC_INT_FIFO_CNT_INT_STATUS) {
/*
- * Schedule timer to run when hardware is in continuous
- * polling mode.
+ * Until all keys are released, defer further processing to
+ * the polling loop in tegra_kbc_keypress_timer.
*/
+ tegra_kbc_set_fifo_interrupt(kbc, false);
mod_timer(&kbc->timer, jiffies + kbc->cp_dly_jiffies);
- } else {
- ctl |= KBC_CONTROL_FIFO_CNT_INT_EN;
- writel(ctl, kbc->mmio + KBC_CONTROL_0);
}
+ spin_unlock_irqrestore(&kbc->lock, flags);
+
return IRQ_HANDLED;
}
@@ -455,7 +460,6 @@ static void tegra_kbc_config_pins(struct tegra_kbc *kbc)
static int tegra_kbc_start(struct tegra_kbc *kbc)
{
const struct tegra_kbc_platform_data *pdata = kbc->pdata;
- unsigned long flags;
unsigned int debounce_cnt;
u32 val = 0;
@@ -493,7 +497,6 @@ static int tegra_kbc_start(struct tegra_kbc *kbc)
* Atomically clear out any remaining entries in the key FIFO
* and enable keyboard interrupts.
*/
- spin_lock_irqsave(&kbc->lock, flags);
while (1) {
val = readl(kbc->mmio + KBC_INT_0);
val >>= 4;
@@ -504,7 +507,6 @@ static int tegra_kbc_start(struct tegra_kbc *kbc)
val = readl(kbc->mmio + KBC_KP_ENT1_0);
}
writel(0x7, kbc->mmio + KBC_INT_0);
- spin_unlock_irqrestore(&kbc->lock, flags);
enable_irq(kbc->irq);
@@ -734,18 +736,30 @@ static int tegra_kbc_suspend(struct device *dev)
struct platform_device *pdev = to_platform_device(dev);
struct tegra_kbc *kbc = platform_get_drvdata(pdev);
+ mutex_lock(&kbc->idev->mutex);
if (device_may_wakeup(&pdev->dev)) {
- tegra_kbc_setup_wakekeys(kbc, true);
- enable_irq_wake(kbc->irq);
+ disable_irq(kbc->irq);
+ del_timer_sync(&kbc->timer);
+ tegra_kbc_set_fifo_interrupt(kbc, false);
+
/* Forcefully clear the interrupt status */
writel(0x7, kbc->mmio + KBC_INT_0);
+ /*
+ * Store the previous resident time of continuous polling mode.
+ * Force the keyboard into interrupt mode.
+ */
+ kbc->cp_to_wkup_dly = readl(kbc->mmio + KBC_TO_CNT_0);
+ writel(0, kbc->mmio + KBC_TO_CNT_0);
+
+ tegra_kbc_setup_wakekeys(kbc, true);
msleep(30);
+
+ enable_irq_wake(kbc->irq);
} else {
- mutex_lock(&kbc->idev->mutex);
if (kbc->idev->users)
tegra_kbc_stop(kbc);
- mutex_unlock(&kbc->idev->mutex);
}
+ mutex_unlock(&kbc->idev->mutex);
return 0;
}
@@ -756,15 +770,22 @@ static int tegra_kbc_resume(struct device *dev)
struct tegra_kbc *kbc = platform_get_drvdata(pdev);
int err = 0;
+ mutex_lock(&kbc->idev->mutex);
if (device_may_wakeup(&pdev->dev)) {
disable_irq_wake(kbc->irq);
tegra_kbc_setup_wakekeys(kbc, false);
+
+ /* Restore the resident time of continuous polling mode. */
+ writel(kbc->cp_to_wkup_dly, kbc->mmio + KBC_TO_CNT_0);
+
+ tegra_kbc_set_fifo_interrupt(kbc, true);
+
+ enable_irq(kbc->irq);
} else {
- mutex_lock(&kbc->idev->mutex);
if (kbc->idev->users)
err = tegra_kbc_start(kbc);
- mutex_unlock(&kbc->idev->mutex);
}
+ mutex_unlock(&kbc->idev->mutex);
return err;
}
diff --git a/drivers/input/keyboard/tnetv107x-keypad.c b/drivers/input/keyboard/tnetv107x-keypad.c
index 1c58681de81..66e55e5cfdd 100644
--- a/drivers/input/keyboard/tnetv107x-keypad.c
+++ b/drivers/input/keyboard/tnetv107x-keypad.c
@@ -24,6 +24,7 @@
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/input/matrix_keypad.h>
+#include <linux/module.h>
#define BITS(x) (BIT(x) - 1)
diff --git a/drivers/input/keyboard/w90p910_keypad.c b/drivers/input/keyboard/w90p910_keypad.c
index ee2bf6bcf29..318586dadac 100644
--- a/drivers/input/keyboard/w90p910_keypad.c
+++ b/drivers/input/keyboard/w90p910_keypad.c
@@ -203,7 +203,7 @@ static int __devinit w90p910_keypad_probe(struct platform_device *pdev)
input_dev->keycode, input_dev->keybit);
error = request_irq(keypad->irq, w90p910_keypad_irq_handler,
- IRQF_DISABLED, pdev->name, keypad);
+ 0, pdev->name, keypad);
if (error) {
dev_err(&pdev->dev, "failed to request IRQ\n");
goto failed_put_clk;
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index c9104bb4db0..22d875fde53 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -62,6 +62,17 @@ config INPUT_AD714X_SPI
To compile this driver as a module, choose M here: the
module will be called ad714x-spi.
+config INPUT_BMA150
+ tristate "BMA150/SMB380 acceleration sensor support"
+ depends on I2C
+ select INPUT_POLLDEV
+ help
+ Say Y here if you have Bosch Sensortec's BMA150 or SMB380
+ acceleration sensor hooked to an I2C bus.
+
+ To compile this driver as a module, choose M here: the
+ module will be called bma150.
+
config INPUT_PCSPKR
tristate "PC Speaker support"
depends on PCSPKR_PLATFORM
@@ -74,6 +85,29 @@ config INPUT_PCSPKR
To compile this driver as a module, choose M here: the
module will be called pcspkr.
+config INPUT_PM8XXX_VIBRATOR
+ tristate "Qualcomm PM8XXX vibrator support"
+ depends on MFD_PM8XXX
+ select INPUT_FF_MEMLESS
+ help
+ This option enables device driver support for the vibrator
+ on Qualcomm PM8xxx chip. This driver supports ff-memless interface
+ from input framework.
+
+ To compile this driver as module, choose M here: the
+ module will be called pm8xxx-vibrator.
+
+config INPUT_PMIC8XXX_PWRKEY
+ tristate "PMIC8XXX power key support"
+ depends on MFD_PM8XXX
+ help
+ Say Y here if you want support for the PMIC8XXX power key.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called pmic8xxx-pwrkey.
+
config INPUT_SPARCSPKR
tristate "SPARC Speaker support"
depends on PCI && SPARC64
@@ -100,6 +134,16 @@ config INPUT_MAX8925_ONKEY
To compile this driver as a module, choose M here: the module
will be called max8925_onkey.
+config INPUT_MC13783_PWRBUTTON
+ tristate "MC13783 ON buttons"
+ depends on MFD_MC13783
+ help
+ Support the ON buttons of MC13783 PMIC as an input device
+ reporting power button status.
+
+ To compile this driver as a module, choose M here: the module
+ will be called mc13783-pwrbutton.
+
config INPUT_MMA8450
tristate "MMA8450 - Freescale's 3-Axis, 8/12-bit Digital Accelerometer"
depends on I2C
@@ -183,22 +227,6 @@ config INPUT_ATLAS_BTNS
To compile this driver as a module, choose M here: the module will
be called atlas_btns.
-config INPUT_ATI_REMOTE
- tristate "ATI / X10 USB RF remote control"
- depends on USB_ARCH_HAS_HCD
- select USB
- help
- Say Y here if you want to use an ATI or X10 "Lola" USB remote control.
- These are RF remotes with USB receivers.
- The ATI remote comes with many of ATI's All-In-Wonder video cards.
- The X10 "Lola" remote is available at:
- <http://www.x10.com/products/lola_sg1.htm>
- This driver provides mouse pointer, left and right mouse buttons,
- and maps all the other remote buttons to keypress events.
-
- To compile this driver as a module, choose M here: the module will be
- called ati_remote.
-
config INPUT_ATI_REMOTE2
tristate "ATI / Philips USB RF remote control"
depends on USB_ARCH_HAS_HCD
@@ -379,17 +407,6 @@ config INPUT_PWM_BEEPER
To compile this driver as a module, choose M here: the module will be
called pwm-beeper.
-config INPUT_PMIC8XXX_PWRKEY
- tristate "PMIC8XXX power key support"
- depends on MFD_PM8XXX
- help
- Say Y here if you want support for the PMIC8XXX power key.
-
- If unsure, say N.
-
- To compile this driver as a module, choose M here: the
- module will be called pmic8xxx-pwrkey.
-
config INPUT_GPIO_ROTARY_ENCODER
tristate "Rotary encoders connected to GPIO pins"
depends on GPIOLIB && GENERIC_GPIO
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index 299ad5edba8..a244fc6a781 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -13,10 +13,10 @@ obj-$(CONFIG_INPUT_ADXL34X) += adxl34x.o
obj-$(CONFIG_INPUT_ADXL34X_I2C) += adxl34x-i2c.o
obj-$(CONFIG_INPUT_ADXL34X_SPI) += adxl34x-spi.o
obj-$(CONFIG_INPUT_APANEL) += apanel.o
-obj-$(CONFIG_INPUT_ATI_REMOTE) += ati_remote.o
obj-$(CONFIG_INPUT_ATI_REMOTE2) += ati_remote2.o
obj-$(CONFIG_INPUT_ATLAS_BTNS) += atlas_btns.o
obj-$(CONFIG_INPUT_BFIN_ROTARY) += bfin_rotary.o
+obj-$(CONFIG_INPUT_BMA150) += bma150.o
obj-$(CONFIG_INPUT_CM109) += cm109.o
obj-$(CONFIG_INPUT_CMA3000) += cma3000_d0x.o
obj-$(CONFIG_INPUT_CMA3000_I2C) += cma3000_d0x_i2c.o
@@ -28,15 +28,17 @@ obj-$(CONFIG_INPUT_KEYSPAN_REMOTE) += keyspan_remote.o
obj-$(CONFIG_INPUT_KXTJ9) += kxtj9.o
obj-$(CONFIG_INPUT_M68K_BEEP) += m68kspkr.o
obj-$(CONFIG_INPUT_MAX8925_ONKEY) += max8925_onkey.o
+obj-$(CONFIG_INPUT_MC13783_PWRBUTTON) += mc13783-pwrbutton.o
obj-$(CONFIG_INPUT_MMA8450) += mma8450.o
obj-$(CONFIG_INPUT_MPU3050) += mpu3050.o
obj-$(CONFIG_INPUT_PCAP) += pcap_keys.o
obj-$(CONFIG_INPUT_PCF50633_PMU) += pcf50633-input.o
obj-$(CONFIG_INPUT_PCF8574) += pcf8574_keypad.o
obj-$(CONFIG_INPUT_PCSPKR) += pcspkr.o
+obj-$(CONFIG_INPUT_PM8XXX_VIBRATOR) += pm8xxx-vibrator.o
+obj-$(CONFIG_INPUT_PMIC8XXX_PWRKEY) += pmic8xxx-pwrkey.o
obj-$(CONFIG_INPUT_POWERMATE) += powermate.o
obj-$(CONFIG_INPUT_PWM_BEEPER) += pwm-beeper.o
-obj-$(CONFIG_INPUT_PMIC8XXX_PWRKEY) += pmic8xxx-pwrkey.o
obj-$(CONFIG_INPUT_RB532_BUTTON) += rb532_button.o
obj-$(CONFIG_INPUT_GPIO_ROTARY_ENCODER) += rotary_encoder.o
obj-$(CONFIG_INPUT_SGI_BTNS) += sgi_btns.o
diff --git a/drivers/input/misc/ad714x-i2c.c b/drivers/input/misc/ad714x-i2c.c
index 025417d74ca..56810fb4ead 100644
--- a/drivers/input/misc/ad714x-i2c.c
+++ b/drivers/input/misc/ad714x-i2c.c
@@ -116,13 +116,13 @@ static struct i2c_driver ad714x_i2c_driver = {
.id_table = ad714x_id,
};
-static __init int ad714x_i2c_init(void)
+static int __init ad714x_i2c_init(void)
{
return i2c_add_driver(&ad714x_i2c_driver);
}
module_init(ad714x_i2c_init);
-static __exit void ad714x_i2c_exit(void)
+static void __exit ad714x_i2c_exit(void)
{
i2c_del_driver(&ad714x_i2c_driver);
}
diff --git a/drivers/input/misc/ad714x.c b/drivers/input/misc/ad714x.c
index ca42c7d2a3c..0ac75bbad4d 100644
--- a/drivers/input/misc/ad714x.c
+++ b/drivers/input/misc/ad714x.c
@@ -12,6 +12,7 @@
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/input/ad714x.h>
+#include <linux/module.h>
#include "ad714x.h"
#define AD714X_PWR_CTRL 0x0
diff --git a/drivers/input/misc/adxl34x.c b/drivers/input/misc/adxl34x.c
index 144ddbdeb9b..09244804fb9 100644
--- a/drivers/input/misc/adxl34x.c
+++ b/drivers/input/misc/adxl34x.c
@@ -16,6 +16,7 @@
#include <linux/slab.h>
#include <linux/workqueue.h>
#include <linux/input/adxl34x.h>
+#include <linux/module.h>
#include "adxl34x.h"
diff --git a/drivers/input/misc/ati_remote2.c b/drivers/input/misc/ati_remote2.c
index 1de58e8a1b7..8d345e87075 100644
--- a/drivers/input/misc/ati_remote2.c
+++ b/drivers/input/misc/ati_remote2.c
@@ -11,6 +11,7 @@
#include <linux/usb/input.h>
#include <linux/slab.h>
+#include <linux/module.h>
#define DRIVER_DESC "ATI/Philips USB RF remote driver"
#define DRIVER_VERSION "0.3"
diff --git a/drivers/input/misc/bma150.c b/drivers/input/misc/bma150.c
new file mode 100644
index 00000000000..8f55b54352b
--- /dev/null
+++ b/drivers/input/misc/bma150.c
@@ -0,0 +1,691 @@
+/*
+ * Copyright (c) 2011 Bosch Sensortec GmbH
+ * Copyright (c) 2011 Unixphere
+ *
+ * This driver adds support for Bosch Sensortec's digital acceleration
+ * sensors BMA150 and SMB380.
+ * The SMB380 is fully compatible with BMA150 and only differs in packaging.
+ *
+ * The datasheet for the BMA150 chip can be found here:
+ * http://www.bosch-sensortec.com/content/language1/downloads/BST-BMA150-DS000-07.pdf
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/input-polldev.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/bma150.h>
+
+#define ABSMAX_ACC_VAL 0x01FF
+#define ABSMIN_ACC_VAL -(ABSMAX_ACC_VAL)
+
+/* Each axis is represented by a 2-byte data word */
+#define BMA150_XYZ_DATA_SIZE 6
+
+/* Input poll interval in milliseconds */
+#define BMA150_POLL_INTERVAL 10
+#define BMA150_POLL_MAX 200
+#define BMA150_POLL_MIN 0
+
+#define BMA150_BW_25HZ 0
+#define BMA150_BW_50HZ 1
+#define BMA150_BW_100HZ 2
+#define BMA150_BW_190HZ 3
+#define BMA150_BW_375HZ 4
+#define BMA150_BW_750HZ 5
+#define BMA150_BW_1500HZ 6
+
+#define BMA150_RANGE_2G 0
+#define BMA150_RANGE_4G 1
+#define BMA150_RANGE_8G 2
+
+#define BMA150_MODE_NORMAL 0
+#define BMA150_MODE_SLEEP 2
+#define BMA150_MODE_WAKE_UP 3
+
+/* Data register addresses */
+#define BMA150_DATA_0_REG 0x00
+#define BMA150_DATA_1_REG 0x01
+#define BMA150_DATA_2_REG 0x02
+
+/* Control register addresses */
+#define BMA150_CTRL_0_REG 0x0A
+#define BMA150_CTRL_1_REG 0x0B
+#define BMA150_CTRL_2_REG 0x14
+#define BMA150_CTRL_3_REG 0x15
+
+/* Configuration/Setting register addresses */
+#define BMA150_CFG_0_REG 0x0C
+#define BMA150_CFG_1_REG 0x0D
+#define BMA150_CFG_2_REG 0x0E
+#define BMA150_CFG_3_REG 0x0F
+#define BMA150_CFG_4_REG 0x10
+#define BMA150_CFG_5_REG 0x11
+
+#define BMA150_CHIP_ID 2
+#define BMA150_CHIP_ID_REG BMA150_DATA_0_REG
+
+#define BMA150_ACC_X_LSB_REG BMA150_DATA_2_REG
+
+#define BMA150_SLEEP_POS 0
+#define BMA150_SLEEP_MSK 0x01
+#define BMA150_SLEEP_REG BMA150_CTRL_0_REG
+
+#define BMA150_BANDWIDTH_POS 0
+#define BMA150_BANDWIDTH_MSK 0x07
+#define BMA150_BANDWIDTH_REG BMA150_CTRL_2_REG
+
+#define BMA150_RANGE_POS 3
+#define BMA150_RANGE_MSK 0x18
+#define BMA150_RANGE_REG BMA150_CTRL_2_REG
+
+#define BMA150_WAKE_UP_POS 0
+#define BMA150_WAKE_UP_MSK 0x01
+#define BMA150_WAKE_UP_REG BMA150_CTRL_3_REG
+
+#define BMA150_SW_RES_POS 1
+#define BMA150_SW_RES_MSK 0x02
+#define BMA150_SW_RES_REG BMA150_CTRL_0_REG
+
+/* Any-motion interrupt register fields */
+#define BMA150_ANY_MOTION_EN_POS 6
+#define BMA150_ANY_MOTION_EN_MSK 0x40
+#define BMA150_ANY_MOTION_EN_REG BMA150_CTRL_1_REG
+
+#define BMA150_ANY_MOTION_DUR_POS 6
+#define BMA150_ANY_MOTION_DUR_MSK 0xC0
+#define BMA150_ANY_MOTION_DUR_REG BMA150_CFG_5_REG
+
+#define BMA150_ANY_MOTION_THRES_REG BMA150_CFG_4_REG
+
+/* Advanced interrupt register fields */
+#define BMA150_ADV_INT_EN_POS 6
+#define BMA150_ADV_INT_EN_MSK 0x40
+#define BMA150_ADV_INT_EN_REG BMA150_CTRL_3_REG
+
+/* High-G interrupt register fields */
+#define BMA150_HIGH_G_EN_POS 1
+#define BMA150_HIGH_G_EN_MSK 0x02
+#define BMA150_HIGH_G_EN_REG BMA150_CTRL_1_REG
+
+#define BMA150_HIGH_G_HYST_POS 3
+#define BMA150_HIGH_G_HYST_MSK 0x38
+#define BMA150_HIGH_G_HYST_REG BMA150_CFG_5_REG
+
+#define BMA150_HIGH_G_DUR_REG BMA150_CFG_3_REG
+#define BMA150_HIGH_G_THRES_REG BMA150_CFG_2_REG
+
+/* Low-G interrupt register fields */
+#define BMA150_LOW_G_EN_POS 0
+#define BMA150_LOW_G_EN_MSK 0x01
+#define BMA150_LOW_G_EN_REG BMA150_CTRL_1_REG
+
+#define BMA150_LOW_G_HYST_POS 0
+#define BMA150_LOW_G_HYST_MSK 0x07
+#define BMA150_LOW_G_HYST_REG BMA150_CFG_5_REG
+
+#define BMA150_LOW_G_DUR_REG BMA150_CFG_1_REG
+#define BMA150_LOW_G_THRES_REG BMA150_CFG_0_REG
+
+struct bma150_data {
+ struct i2c_client *client;
+ struct input_polled_dev *input_polled;
+ struct input_dev *input;
+ u8 mode;
+};
+
+/*
+ * The settings for the given range, bandwidth and interrupt features
+ * are stated and verified by Bosch Sensortec where they are configured
+ * to provide a generic sensitivity performance.
+ */
+static struct bma150_cfg default_cfg __devinitdata = {
+ .any_motion_int = 1,
+ .hg_int = 1,
+ .lg_int = 1,
+ .any_motion_dur = 0,
+ .any_motion_thres = 0,
+ .hg_hyst = 0,
+ .hg_dur = 150,
+ .hg_thres = 160,
+ .lg_hyst = 0,
+ .lg_dur = 150,
+ .lg_thres = 20,
+ .range = BMA150_RANGE_2G,
+ .bandwidth = BMA150_BW_50HZ
+};
+
+static int bma150_write_byte(struct i2c_client *client, u8 reg, u8 val)
+{
+ s32 ret;
+
+ /* As per specification, disable irq in between register writes */
+ if (client->irq)
+ disable_irq_nosync(client->irq);
+
+ ret = i2c_smbus_write_byte_data(client, reg, val);
+
+ if (client->irq)
+ enable_irq(client->irq);
+
+ return ret;
+}
+
+static int bma150_set_reg_bits(struct i2c_client *client,
+ int val, int shift, u8 mask, u8 reg)
+{
+ int data;
+
+ data = i2c_smbus_read_byte_data(client, reg);
+ if (data < 0)
+ return data;
+
+ data = (data & ~mask) | ((val << shift) & mask);
+ return bma150_write_byte(client, reg, data);
+}
+
+static int bma150_set_mode(struct bma150_data *bma150, u8 mode)
+{
+ int error;
+
+ error = bma150_set_reg_bits(bma150->client, mode, BMA150_WAKE_UP_POS,
+ BMA150_WAKE_UP_MSK, BMA150_WAKE_UP_REG);
+ if (error)
+ return error;
+
+ error = bma150_set_reg_bits(bma150->client, mode, BMA150_SLEEP_POS,
+ BMA150_SLEEP_MSK, BMA150_SLEEP_REG);
+ if (error)
+ return error;
+
+ if (mode == BMA150_MODE_NORMAL)
+ msleep(2);
+
+ bma150->mode = mode;
+ return 0;
+}
+
+static int __devinit bma150_soft_reset(struct bma150_data *bma150)
+{
+ int error;
+
+ error = bma150_set_reg_bits(bma150->client, 1, BMA150_SW_RES_POS,
+ BMA150_SW_RES_MSK, BMA150_SW_RES_REG);
+ if (error)
+ return error;
+
+ msleep(2);
+ return 0;
+}
+
+static int __devinit bma150_set_range(struct bma150_data *bma150, u8 range)
+{
+ return bma150_set_reg_bits(bma150->client, range, BMA150_RANGE_POS,
+ BMA150_RANGE_MSK, BMA150_RANGE_REG);
+}
+
+static int __devinit bma150_set_bandwidth(struct bma150_data *bma150, u8 bw)
+{
+ return bma150_set_reg_bits(bma150->client, bw, BMA150_BANDWIDTH_POS,
+ BMA150_BANDWIDTH_MSK, BMA150_BANDWIDTH_REG);
+}
+
+static int __devinit bma150_set_low_g_interrupt(struct bma150_data *bma150,
+ u8 enable, u8 hyst, u8 dur, u8 thres)
+{
+ int error;
+
+ error = bma150_set_reg_bits(bma150->client, hyst,
+ BMA150_LOW_G_HYST_POS, BMA150_LOW_G_HYST_MSK,
+ BMA150_LOW_G_HYST_REG);
+ if (error)
+ return error;
+
+ error = bma150_write_byte(bma150->client, BMA150_LOW_G_DUR_REG, dur);
+ if (error)
+ return error;
+
+ error = bma150_write_byte(bma150->client, BMA150_LOW_G_THRES_REG, thres);
+ if (error)
+ return error;
+
+ return bma150_set_reg_bits(bma150->client, !!enable,
+ BMA150_LOW_G_EN_POS, BMA150_LOW_G_EN_MSK,
+ BMA150_LOW_G_EN_REG);
+}
+
+static int __devinit bma150_set_high_g_interrupt(struct bma150_data *bma150,
+ u8 enable, u8 hyst, u8 dur, u8 thres)
+{
+ int error;
+
+ error = bma150_set_reg_bits(bma150->client, hyst,
+ BMA150_HIGH_G_HYST_POS, BMA150_HIGH_G_HYST_MSK,
+ BMA150_HIGH_G_HYST_REG);
+ if (error)
+ return error;
+
+ error = bma150_write_byte(bma150->client,
+ BMA150_HIGH_G_DUR_REG, dur);
+ if (error)
+ return error;
+
+ error = bma150_write_byte(bma150->client,
+ BMA150_HIGH_G_THRES_REG, thres);
+ if (error)
+ return error;
+
+ return bma150_set_reg_bits(bma150->client, !!enable,
+ BMA150_HIGH_G_EN_POS, BMA150_HIGH_G_EN_MSK,
+ BMA150_HIGH_G_EN_REG);
+}
+
+
+static int __devinit bma150_set_any_motion_interrupt(struct bma150_data *bma150,
+ u8 enable, u8 dur, u8 thres)
+{
+ int error;
+
+ error = bma150_set_reg_bits(bma150->client, dur,
+ BMA150_ANY_MOTION_DUR_POS,
+ BMA150_ANY_MOTION_DUR_MSK,
+ BMA150_ANY_MOTION_DUR_REG);
+ if (error)
+ return error;
+
+ error = bma150_write_byte(bma150->client,
+ BMA150_ANY_MOTION_THRES_REG, thres);
+ if (error)
+ return error;
+
+ error = bma150_set_reg_bits(bma150->client, !!enable,
+ BMA150_ADV_INT_EN_POS, BMA150_ADV_INT_EN_MSK,
+ BMA150_ADV_INT_EN_REG);
+ if (error)
+ return error;
+
+ return bma150_set_reg_bits(bma150->client, !!enable,
+ BMA150_ANY_MOTION_EN_POS,
+ BMA150_ANY_MOTION_EN_MSK,
+ BMA150_ANY_MOTION_EN_REG);
+}
+
+static void bma150_report_xyz(struct bma150_data *bma150)
+{
+ u8 data[BMA150_XYZ_DATA_SIZE];
+ s16 x, y, z;
+ s32 ret;
+
+ ret = i2c_smbus_read_i2c_block_data(bma150->client,
+ BMA150_ACC_X_LSB_REG, BMA150_XYZ_DATA_SIZE, data);
+ if (ret != BMA150_XYZ_DATA_SIZE)
+ return;
+
+ x = ((0xc0 & data[0]) >> 6) | (data[1] << 2);
+ y = ((0xc0 & data[2]) >> 6) | (data[3] << 2);
+ z = ((0xc0 & data[4]) >> 6) | (data[5] << 2);
+
+ /* sign extension */
+ x = (s16) (x << 6) >> 6;
+ y = (s16) (y << 6) >> 6;
+ z = (s16) (z << 6) >> 6;
+
+ input_report_abs(bma150->input, ABS_X, x);
+ input_report_abs(bma150->input, ABS_Y, y);
+ input_report_abs(bma150->input, ABS_Z, z);
+ input_sync(bma150->input);
+}
+
+static irqreturn_t bma150_irq_thread(int irq, void *dev)
+{
+ bma150_report_xyz(dev);
+
+ return IRQ_HANDLED;
+}
+
+static void bma150_poll(struct input_polled_dev *dev)
+{
+ bma150_report_xyz(dev->private);
+}
+
+static int bma150_open(struct bma150_data *bma150)
+{
+ int error;
+
+ error = pm_runtime_get_sync(&bma150->client->dev);
+ if (error && error != -ENOSYS)
+ return error;
+
+ /*
+ * See if runtime PM woke up the device. If runtime PM
+ * is disabled we need to do it ourselves.
+ */
+ if (bma150->mode != BMA150_MODE_NORMAL) {
+ error = bma150_set_mode(bma150, BMA150_MODE_NORMAL);
+ if (error)
+ return error;
+ }
+
+ return 0;
+}
+
+static void bma150_close(struct bma150_data *bma150)
+{
+ pm_runtime_put_sync(&bma150->client->dev);
+
+ if (bma150->mode != BMA150_MODE_SLEEP)
+ bma150_set_mode(bma150, BMA150_MODE_SLEEP);
+}
+
+static int bma150_irq_open(struct input_dev *input)
+{
+ struct bma150_data *bma150 = input_get_drvdata(input);
+
+ return bma150_open(bma150);
+}
+
+static void bma150_irq_close(struct input_dev *input)
+{
+ struct bma150_data *bma150 = input_get_drvdata(input);
+
+ bma150_close(bma150);
+}
+
+static void bma150_poll_open(struct input_polled_dev *ipoll_dev)
+{
+ struct bma150_data *bma150 = ipoll_dev->private;
+
+ bma150_open(bma150);
+}
+
+static void bma150_poll_close(struct input_polled_dev *ipoll_dev)
+{
+ struct bma150_data *bma150 = ipoll_dev->private;
+
+ bma150_close(bma150);
+}
+
+static int __devinit bma150_initialize(struct bma150_data *bma150,
+ const struct bma150_cfg *cfg)
+{
+ int error;
+
+ error = bma150_soft_reset(bma150);
+ if (error)
+ return error;
+
+ error = bma150_set_bandwidth(bma150, cfg->bandwidth);
+ if (error)
+ return error;
+
+ error = bma150_set_range(bma150, cfg->range);
+ if (error)
+ return error;
+
+ if (bma150->client->irq) {
+ error = bma150_set_any_motion_interrupt(bma150,
+ cfg->any_motion_int,
+ cfg->any_motion_dur,
+ cfg->any_motion_thres);
+ if (error)
+ return error;
+
+ error = bma150_set_high_g_interrupt(bma150,
+ cfg->hg_int, cfg->hg_hyst,
+ cfg->hg_dur, cfg->hg_thres);
+ if (error)
+ return error;
+
+ error = bma150_set_low_g_interrupt(bma150,
+ cfg->lg_int, cfg->lg_hyst,
+ cfg->lg_dur, cfg->lg_thres);
+ if (error)
+ return error;
+ }
+
+ return bma150_set_mode(bma150, BMA150_MODE_SLEEP);
+}
+
+static void __devinit bma150_init_input_device(struct bma150_data *bma150,
+ struct input_dev *idev)
+{
+ idev->name = BMA150_DRIVER;
+ idev->phys = BMA150_DRIVER "/input0";
+ idev->id.bustype = BUS_I2C;
+ idev->dev.parent = &bma150->client->dev;
+
+ idev->evbit[0] = BIT_MASK(EV_ABS);
+ input_set_abs_params(idev, ABS_X, ABSMIN_ACC_VAL, ABSMAX_ACC_VAL, 0, 0);
+ input_set_abs_params(idev, ABS_Y, ABSMIN_ACC_VAL, ABSMAX_ACC_VAL, 0, 0);
+ input_set_abs_params(idev, ABS_Z, ABSMIN_ACC_VAL, ABSMAX_ACC_VAL, 0, 0);
+}
+
+static int __devinit bma150_register_input_device(struct bma150_data *bma150)
+{
+ struct input_dev *idev;
+ int error;
+
+ idev = input_allocate_device();
+ if (!idev)
+ return -ENOMEM;
+
+ bma150_init_input_device(bma150, idev);
+
+ idev->open = bma150_irq_open;
+ idev->close = bma150_irq_close;
+ input_set_drvdata(idev, bma150);
+
+ error = input_register_device(idev);
+ if (error) {
+ input_free_device(idev);
+ return error;
+ }
+
+ bma150->input = idev;
+ return 0;
+}
+
+static int __devinit bma150_register_polled_device(struct bma150_data *bma150)
+{
+ struct input_polled_dev *ipoll_dev;
+ int error;
+
+ ipoll_dev = input_allocate_polled_device();
+ if (!ipoll_dev)
+ return -ENOMEM;
+
+ ipoll_dev->private = bma150;
+ ipoll_dev->open = bma150_poll_open;
+ ipoll_dev->close = bma150_poll_close;
+ ipoll_dev->poll = bma150_poll;
+ ipoll_dev->poll_interval = BMA150_POLL_INTERVAL;
+ ipoll_dev->poll_interval_min = BMA150_POLL_MIN;
+ ipoll_dev->poll_interval_max = BMA150_POLL_MAX;
+
+ bma150_init_input_device(bma150, ipoll_dev->input);
+
+ error = input_register_polled_device(ipoll_dev);
+ if (error) {
+ input_free_polled_device(ipoll_dev);
+ return error;
+ }
+
+ bma150->input_polled = ipoll_dev;
+ bma150->input = ipoll_dev->input;
+
+ return 0;
+}
+
+static int __devinit bma150_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ const struct bma150_platform_data *pdata = client->dev.platform_data;
+ const struct bma150_cfg *cfg;
+ struct bma150_data *bma150;
+ int chip_id;
+ int error;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ dev_err(&client->dev, "i2c_check_functionality error\n");
+ return -EIO;
+ }
+
+ chip_id = i2c_smbus_read_byte_data(client, BMA150_CHIP_ID_REG);
+ if (chip_id != BMA150_CHIP_ID) {
+ dev_err(&client->dev, "BMA150 chip id error: %d\n", chip_id);
+ return -EINVAL;
+ }
+
+ bma150 = kzalloc(sizeof(struct bma150_data), GFP_KERNEL);
+ if (!bma150)
+ return -ENOMEM;
+
+ bma150->client = client;
+
+ if (pdata) {
+ if (pdata->irq_gpio_cfg) {
+ error = pdata->irq_gpio_cfg();
+ if (error) {
+ dev_err(&client->dev,
+ "IRQ GPIO conf. error %d, error %d\n",
+ client->irq, error);
+ goto err_free_mem;
+ }
+ }
+ cfg = &pdata->cfg;
+ } else {
+ cfg = &default_cfg;
+ }
+
+ error = bma150_initialize(bma150, cfg);
+ if (error)
+ goto err_free_mem;
+
+ if (client->irq > 0) {
+ error = bma150_register_input_device(bma150);
+ if (error)
+ goto err_free_mem;
+
+ error = request_threaded_irq(client->irq,
+ NULL, bma150_irq_thread,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ BMA150_DRIVER, bma150);
+ if (error) {
+ dev_err(&client->dev,
+ "irq request failed %d, error %d\n",
+ client->irq, error);
+ input_unregister_device(bma150->input);
+ goto err_free_mem;
+ }
+ } else {
+ error = bma150_register_polled_device(bma150);
+ if (error)
+ goto err_free_mem;
+ }
+
+ i2c_set_clientdata(client, bma150);
+
+ pm_runtime_enable(&client->dev);
+
+ return 0;
+
+err_free_mem:
+ kfree(bma150);
+ return error;
+}
+
+static int __devexit bma150_remove(struct i2c_client *client)
+{
+ struct bma150_data *bma150 = i2c_get_clientdata(client);
+
+ pm_runtime_disable(&client->dev);
+
+ if (client->irq > 0) {
+ free_irq(client->irq, bma150);
+ input_unregister_device(bma150->input);
+ } else {
+ input_unregister_polled_device(bma150->input_polled);
+ input_free_polled_device(bma150->input_polled);
+ }
+
+ kfree(bma150);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int bma150_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct bma150_data *bma150 = i2c_get_clientdata(client);
+
+ return bma150_set_mode(bma150, BMA150_MODE_SLEEP);
+}
+
+static int bma150_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct bma150_data *bma150 = i2c_get_clientdata(client);
+
+ return bma150_set_mode(bma150, BMA150_MODE_NORMAL);
+}
+#endif
+
+static UNIVERSAL_DEV_PM_OPS(bma150_pm, bma150_suspend, bma150_resume, NULL);
+
+static const struct i2c_device_id bma150_id[] = {
+ { "bma150", 0 },
+ { "smb380", 0 },
+ { "bma023", 0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(i2c, bma150_id);
+
+static struct i2c_driver bma150_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = BMA150_DRIVER,
+ .pm = &bma150_pm,
+ },
+ .class = I2C_CLASS_HWMON,
+ .id_table = bma150_id,
+ .probe = bma150_probe,
+ .remove = __devexit_p(bma150_remove),
+};
+
+static int __init BMA150_init(void)
+{
+ return i2c_add_driver(&bma150_driver);
+}
+
+static void __exit BMA150_exit(void)
+{
+ i2c_del_driver(&bma150_driver);
+}
+
+MODULE_AUTHOR("Albert Zhang <xu.zhang@bosch-sensortec.com>");
+MODULE_DESCRIPTION("BMA150 driver");
+MODULE_LICENSE("GPL");
+
+module_init(BMA150_init);
+module_exit(BMA150_exit);
diff --git a/drivers/input/misc/cma3000_d0x.c b/drivers/input/misc/cma3000_d0x.c
index 1633b634226..80793f1608e 100644
--- a/drivers/input/misc/cma3000_d0x.c
+++ b/drivers/input/misc/cma3000_d0x.c
@@ -23,6 +23,7 @@
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/input/cma3000.h>
+#include <linux/module.h>
#include "cma3000_d0x.h"
diff --git a/drivers/input/misc/dm355evm_keys.c b/drivers/input/misc/dm355evm_keys.c
index 19af682c24f..7283dd2a1ad 100644
--- a/drivers/input/misc/dm355evm_keys.c
+++ b/drivers/input/misc/dm355evm_keys.c
@@ -17,6 +17,7 @@
#include <linux/interrupt.h>
#include <linux/i2c/dm355evm_msp.h>
+#include <linux/module.h>
/*
diff --git a/drivers/input/misc/ixp4xx-beeper.c b/drivers/input/misc/ixp4xx-beeper.c
index 1f38302a595..302ab46ce75 100644
--- a/drivers/input/misc/ixp4xx-beeper.c
+++ b/drivers/input/misc/ixp4xx-beeper.c
@@ -111,7 +111,7 @@ static int __devinit ixp4xx_spkr_probe(struct platform_device *dev)
input_dev->event = ixp4xx_spkr_event;
err = request_irq(IRQ_IXP4XX_TIMER2, &ixp4xx_spkr_interrupt,
- IRQF_DISABLED | IRQF_NO_SUSPEND, "ixp4xx-beeper",
+ IRQF_NO_SUSPEND, "ixp4xx-beeper",
(void *) dev->id);
if (err)
goto err_free_device;
diff --git a/drivers/input/misc/mc13783-pwrbutton.c b/drivers/input/misc/mc13783-pwrbutton.c
new file mode 100644
index 00000000000..09b05228865
--- /dev/null
+++ b/drivers/input/misc/mc13783-pwrbutton.c
@@ -0,0 +1,282 @@
+/**
+ * Copyright (C) 2011 Philippe Rétornaz
+ *
+ * Based on twl4030-pwrbutton driver by:
+ * Peter De Schrijver <peter.de-schrijver@nokia.com>
+ * Felipe Balbi <felipe.balbi@nokia.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of this
+ * archive for more details.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/mc13783.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+struct mc13783_pwrb {
+ struct input_dev *pwr;
+ struct mc13xxx *mc13783;
+#define MC13783_PWRB_B1_POL_INVERT (1 << 0)
+#define MC13783_PWRB_B2_POL_INVERT (1 << 1)
+#define MC13783_PWRB_B3_POL_INVERT (1 << 2)
+ int flags;
+ unsigned short keymap[3];
+};
+
+#define MC13783_REG_INTERRUPT_SENSE_1 5
+#define MC13783_IRQSENSE1_ONOFD1S (1 << 3)
+#define MC13783_IRQSENSE1_ONOFD2S (1 << 4)
+#define MC13783_IRQSENSE1_ONOFD3S (1 << 5)
+
+#define MC13783_REG_POWER_CONTROL_2 15
+#define MC13783_POWER_CONTROL_2_ON1BDBNC 4
+#define MC13783_POWER_CONTROL_2_ON2BDBNC 6
+#define MC13783_POWER_CONTROL_2_ON3BDBNC 8
+#define MC13783_POWER_CONTROL_2_ON1BRSTEN (1 << 1)
+#define MC13783_POWER_CONTROL_2_ON2BRSTEN (1 << 2)
+#define MC13783_POWER_CONTROL_2_ON3BRSTEN (1 << 3)
+
+static irqreturn_t button_irq(int irq, void *_priv)
+{
+ struct mc13783_pwrb *priv = _priv;
+ int val;
+
+ mc13xxx_irq_ack(priv->mc13783, irq);
+ mc13xxx_reg_read(priv->mc13783, MC13783_REG_INTERRUPT_SENSE_1, &val);
+
+ switch (irq) {
+ case MC13783_IRQ_ONOFD1:
+ val = val & MC13783_IRQSENSE1_ONOFD1S ? 1 : 0;
+ if (priv->flags & MC13783_PWRB_B1_POL_INVERT)
+ val ^= 1;
+ input_report_key(priv->pwr, priv->keymap[0], val);
+ break;
+
+ case MC13783_IRQ_ONOFD2:
+ val = val & MC13783_IRQSENSE1_ONOFD2S ? 1 : 0;
+ if (priv->flags & MC13783_PWRB_B2_POL_INVERT)
+ val ^= 1;
+ input_report_key(priv->pwr, priv->keymap[1], val);
+ break;
+
+ case MC13783_IRQ_ONOFD3:
+ val = val & MC13783_IRQSENSE1_ONOFD3S ? 1 : 0;
+ if (priv->flags & MC13783_PWRB_B3_POL_INVERT)
+ val ^= 1;
+ input_report_key(priv->pwr, priv->keymap[2], val);
+ break;
+ }
+
+ input_sync(priv->pwr);
+
+ return IRQ_HANDLED;
+}
+
+static int __devinit mc13783_pwrbutton_probe(struct platform_device *pdev)
+{
+ const struct mc13xxx_buttons_platform_data *pdata;
+ struct mc13xxx *mc13783 = dev_get_drvdata(pdev->dev.parent);
+ struct input_dev *pwr;
+ struct mc13783_pwrb *priv;
+ int err = 0;
+ int reg = 0;
+
+ pdata = dev_get_platdata(&pdev->dev);
+ if (!pdata) {
+ dev_err(&pdev->dev, "missing platform data\n");
+ return -ENODEV;
+ }
+
+ pwr = input_allocate_device();
+ if (!pwr) {
+ dev_dbg(&pdev->dev, "Can't allocate power button\n");
+ return -ENOMEM;
+ }
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ err = -ENOMEM;
+ dev_dbg(&pdev->dev, "Can't allocate power button\n");
+ goto free_input_dev;
+ }
+
+ reg |= (pdata->b1on_flags & 0x3) << MC13783_POWER_CONTROL_2_ON1BDBNC;
+ reg |= (pdata->b2on_flags & 0x3) << MC13783_POWER_CONTROL_2_ON2BDBNC;
+ reg |= (pdata->b3on_flags & 0x3) << MC13783_POWER_CONTROL_2_ON3BDBNC;
+
+ priv->pwr = pwr;
+ priv->mc13783 = mc13783;
+
+ mc13xxx_lock(mc13783);
+
+ if (pdata->b1on_flags & MC13783_BUTTON_ENABLE) {
+ priv->keymap[0] = pdata->b1on_key;
+ if (pdata->b1on_key != KEY_RESERVED)
+ __set_bit(pdata->b1on_key, pwr->keybit);
+
+ if (pdata->b1on_flags & MC13783_BUTTON_POL_INVERT)
+ priv->flags |= MC13783_PWRB_B1_POL_INVERT;
+
+ if (pdata->b1on_flags & MC13783_BUTTON_RESET_EN)
+ reg |= MC13783_POWER_CONTROL_2_ON1BRSTEN;
+
+ err = mc13xxx_irq_request(mc13783, MC13783_IRQ_ONOFD1,
+ button_irq, "b1on", priv);
+ if (err) {
+ dev_dbg(&pdev->dev, "Can't request irq\n");
+ goto free_priv;
+ }
+ }
+
+ if (pdata->b2on_flags & MC13783_BUTTON_ENABLE) {
+ priv->keymap[1] = pdata->b2on_key;
+ if (pdata->b2on_key != KEY_RESERVED)
+ __set_bit(pdata->b2on_key, pwr->keybit);
+
+ if (pdata->b2on_flags & MC13783_BUTTON_POL_INVERT)
+ priv->flags |= MC13783_PWRB_B2_POL_INVERT;
+
+ if (pdata->b2on_flags & MC13783_BUTTON_RESET_EN)
+ reg |= MC13783_POWER_CONTROL_2_ON2BRSTEN;
+
+ err = mc13xxx_irq_request(mc13783, MC13783_IRQ_ONOFD2,
+ button_irq, "b2on", priv);
+ if (err) {
+ dev_dbg(&pdev->dev, "Can't request irq\n");
+ goto free_irq_b1;
+ }
+ }
+
+ if (pdata->b3on_flags & MC13783_BUTTON_ENABLE) {
+ priv->keymap[2] = pdata->b3on_key;
+ if (pdata->b3on_key != KEY_RESERVED)
+ __set_bit(pdata->b3on_key, pwr->keybit);
+
+ if (pdata->b3on_flags & MC13783_BUTTON_POL_INVERT)
+ priv->flags |= MC13783_PWRB_B3_POL_INVERT;
+
+ if (pdata->b3on_flags & MC13783_BUTTON_RESET_EN)
+ reg |= MC13783_POWER_CONTROL_2_ON3BRSTEN;
+
+ err = mc13xxx_irq_request(mc13783, MC13783_IRQ_ONOFD3,
+ button_irq, "b3on", priv);
+ if (err) {
+ dev_dbg(&pdev->dev, "Can't request irq: %d\n", err);
+ goto free_irq_b2;
+ }
+ }
+
+ mc13xxx_reg_rmw(mc13783, MC13783_REG_POWER_CONTROL_2, 0x3FE, reg);
+
+ mc13xxx_unlock(mc13783);
+
+ pwr->name = "mc13783_pwrbutton";
+ pwr->phys = "mc13783_pwrbutton/input0";
+ pwr->dev.parent = &pdev->dev;
+
+ pwr->keycode = priv->keymap;
+ pwr->keycodemax = ARRAY_SIZE(priv->keymap);
+ pwr->keycodesize = sizeof(priv->keymap[0]);
+ __set_bit(EV_KEY, pwr->evbit);
+
+ err = input_register_device(pwr);
+ if (err) {
+ dev_dbg(&pdev->dev, "Can't register power button: %d\n", err);
+ goto free_irq;
+ }
+
+ platform_set_drvdata(pdev, priv);
+
+ return 0;
+
+free_irq:
+ mc13xxx_lock(mc13783);
+
+ if (pdata->b3on_flags & MC13783_BUTTON_ENABLE)
+ mc13xxx_irq_free(mc13783, MC13783_IRQ_ONOFD3, priv);
+
+free_irq_b2:
+ if (pdata->b2on_flags & MC13783_BUTTON_ENABLE)
+ mc13xxx_irq_free(mc13783, MC13783_IRQ_ONOFD2, priv);
+
+free_irq_b1:
+ if (pdata->b1on_flags & MC13783_BUTTON_ENABLE)
+ mc13xxx_irq_free(mc13783, MC13783_IRQ_ONOFD1, priv);
+
+free_priv:
+ mc13xxx_unlock(mc13783);
+ kfree(priv);
+
+free_input_dev:
+ input_free_device(pwr);
+
+ return err;
+}
+
+static int __devexit mc13783_pwrbutton_remove(struct platform_device *pdev)
+{
+ struct mc13783_pwrb *priv = platform_get_drvdata(pdev);
+ const struct mc13xxx_buttons_platform_data *pdata;
+
+ pdata = dev_get_platdata(&pdev->dev);
+
+ mc13xxx_lock(priv->mc13783);
+
+ if (pdata->b3on_flags & MC13783_BUTTON_ENABLE)
+ mc13xxx_irq_free(priv->mc13783, MC13783_IRQ_ONOFD3, priv);
+ if (pdata->b2on_flags & MC13783_BUTTON_ENABLE)
+ mc13xxx_irq_free(priv->mc13783, MC13783_IRQ_ONOFD2, priv);
+ if (pdata->b1on_flags & MC13783_BUTTON_ENABLE)
+ mc13xxx_irq_free(priv->mc13783, MC13783_IRQ_ONOFD1, priv);
+
+ mc13xxx_unlock(priv->mc13783);
+
+ input_unregister_device(priv->pwr);
+ kfree(priv);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+struct platform_driver mc13783_pwrbutton_driver = {
+ .probe = mc13783_pwrbutton_probe,
+ .remove = __devexit_p(mc13783_pwrbutton_remove),
+ .driver = {
+ .name = "mc13783-pwrbutton",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init mc13783_pwrbutton_init(void)
+{
+ return platform_driver_register(&mc13783_pwrbutton_driver);
+}
+module_init(mc13783_pwrbutton_init);
+
+static void __exit mc13783_pwrbutton_exit(void)
+{
+ platform_driver_unregister(&mc13783_pwrbutton_driver);
+}
+module_exit(mc13783_pwrbutton_exit);
+
+MODULE_ALIAS("platform:mc13783-pwrbutton");
+MODULE_DESCRIPTION("MC13783 Power Button");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Philippe Retornaz");
diff --git a/drivers/input/misc/mma8450.c b/drivers/input/misc/mma8450.c
index 0794778295f..4d60080bb5d 100644
--- a/drivers/input/misc/mma8450.c
+++ b/drivers/input/misc/mma8450.c
@@ -88,13 +88,13 @@ static int mma8450_write(struct mma8450 *m, unsigned off, u8 v)
return 0;
}
-static int mma8450_read_xyz(struct mma8450 *m, int *x, int *y, int *z)
+static int mma8450_read_block(struct mma8450 *m, unsigned off,
+ u8 *buf, size_t size)
{
struct i2c_client *c = m->client;
- u8 buff[6];
int err;
- err = i2c_smbus_read_i2c_block_data(c, MMA8450_OUT_X_LSB, 6, buff);
+ err = i2c_smbus_read_i2c_block_data(c, off, size, buf);
if (err < 0) {
dev_err(&c->dev,
"failed to read block data at 0x%02x, error %d\n",
@@ -102,10 +102,6 @@ static int mma8450_read_xyz(struct mma8450 *m, int *x, int *y, int *z)
return err;
}
- *x = ((buff[1] << 4) & 0xff0) | (buff[0] & 0xf);
- *y = ((buff[3] << 4) & 0xff0) | (buff[2] & 0xf);
- *z = ((buff[5] << 4) & 0xff0) | (buff[4] & 0xf);
-
return 0;
}
@@ -114,7 +110,7 @@ static void mma8450_poll(struct input_polled_dev *dev)
struct mma8450 *m = dev->private;
int x, y, z;
int ret;
- int err;
+ u8 buf[6];
ret = mma8450_read(m, MMA8450_STATUS);
if (ret < 0)
@@ -123,10 +119,14 @@ static void mma8450_poll(struct input_polled_dev *dev)
if (!(ret & MMA8450_STATUS_ZXYDR))
return;
- err = mma8450_read_xyz(m, &x, &y, &z);
- if (err)
+ ret = mma8450_read_block(m, MMA8450_OUT_X_LSB, buf, sizeof(buf));
+ if (ret < 0)
return;
+ x = ((buf[1] << 4) & 0xff0) | (buf[0] & 0xf);
+ y = ((buf[3] << 4) & 0xff0) | (buf[2] & 0xf);
+ z = ((buf[5] << 4) & 0xff0) | (buf[4] & 0xf);
+
input_report_abs(dev->input, ABS_X, x);
input_report_abs(dev->input, ABS_Y, y);
input_report_abs(dev->input, ABS_Z, z);
diff --git a/drivers/input/misc/pm8xxx-vibrator.c b/drivers/input/misc/pm8xxx-vibrator.c
new file mode 100644
index 00000000000..43192930824
--- /dev/null
+++ b/drivers/input/misc/pm8xxx-vibrator.c
@@ -0,0 +1,296 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/platform_device.h>
+#include <linux/input.h>
+#include <linux/slab.h>
+#include <linux/mfd/pm8xxx/core.h>
+
+#define VIB_DRV 0x4A
+
+#define VIB_DRV_SEL_MASK 0xf8
+#define VIB_DRV_SEL_SHIFT 0x03
+#define VIB_DRV_EN_MANUAL_MASK 0xfc
+
+#define VIB_MAX_LEVEL_mV (3100)
+#define VIB_MIN_LEVEL_mV (1200)
+#define VIB_MAX_LEVELS (VIB_MAX_LEVEL_mV - VIB_MIN_LEVEL_mV)
+
+#define MAX_FF_SPEED 0xff
+
+/**
+ * struct pm8xxx_vib - structure to hold vibrator data
+ * @vib_input_dev: input device supporting force feedback
+ * @work: work structure to set the vibration parameters
+ * @dev: device supporting force feedback
+ * @speed: speed of vibration set from userland
+ * @active: state of vibrator
+ * @level: level of vibration to set in the chip
+ * @reg_vib_drv: VIB_DRV register value
+ */
+struct pm8xxx_vib {
+ struct input_dev *vib_input_dev;
+ struct work_struct work;
+ struct device *dev;
+ int speed;
+ int level;
+ bool active;
+ u8 reg_vib_drv;
+};
+
+/**
+ * pm8xxx_vib_read_u8 - helper to read a byte from pmic chip
+ * @vib: pointer to vibrator structure
+ * @data: placeholder for data to be read
+ * @reg: register address
+ */
+static int pm8xxx_vib_read_u8(struct pm8xxx_vib *vib,
+ u8 *data, u16 reg)
+{
+ int rc;
+
+ rc = pm8xxx_readb(vib->dev->parent, reg, data);
+ if (rc < 0)
+ dev_warn(vib->dev, "Error reading pm8xxx reg 0x%x(0x%x)\n",
+ reg, rc);
+ return rc;
+}
+
+/**
+ * pm8xxx_vib_write_u8 - helper to write a byte to pmic chip
+ * @vib: pointer to vibrator structure
+ * @data: data to write
+ * @reg: register address
+ */
+static int pm8xxx_vib_write_u8(struct pm8xxx_vib *vib,
+ u8 data, u16 reg)
+{
+ int rc;
+
+ rc = pm8xxx_writeb(vib->dev->parent, reg, data);
+ if (rc < 0)
+ dev_warn(vib->dev, "Error writing pm8xxx reg 0x%x(0x%x)\n",
+ reg, rc);
+ return rc;
+}
+
+/**
+ * pm8xxx_vib_set - handler to start/stop vibration
+ * @vib: pointer to vibrator structure
+ * @on: state to set
+ */
+static int pm8xxx_vib_set(struct pm8xxx_vib *vib, bool on)
+{
+ int rc;
+ u8 val = vib->reg_vib_drv;
+
+ if (on)
+ val |= ((vib->level << VIB_DRV_SEL_SHIFT) & VIB_DRV_SEL_MASK);
+ else
+ val &= ~VIB_DRV_SEL_MASK;
+
+ rc = pm8xxx_vib_write_u8(vib, val, VIB_DRV);
+ if (rc < 0)
+ return rc;
+
+ vib->reg_vib_drv = val;
+ return 0;
+}
+
+/**
+ * pm8xxx_work_handler - worker to set vibration level
+ * @work: pointer to work_struct
+ */
+static void pm8xxx_work_handler(struct work_struct *work)
+{
+ struct pm8xxx_vib *vib = container_of(work, struct pm8xxx_vib, work);
+ int rc;
+ u8 val;
+
+ rc = pm8xxx_vib_read_u8(vib, &val, VIB_DRV);
+ if (rc < 0)
+ return;
+
+ /*
+ * pmic vibrator supports voltage ranges from 1.2 to 3.1V, so
+ * scale the level to fit into these ranges.
+ */
+ if (vib->speed) {
+ vib->active = true;
+ vib->level = ((VIB_MAX_LEVELS * vib->speed) / MAX_FF_SPEED) +
+ VIB_MIN_LEVEL_mV;
+ vib->level /= 100;
+ } else {
+ vib->active = false;
+ vib->level = VIB_MIN_LEVEL_mV / 100;
+ }
+
+ pm8xxx_vib_set(vib, vib->active);
+}
+
+/**
+ * pm8xxx_vib_close - callback of input close callback
+ * @dev: input device pointer
+ *
+ * Turns off the vibrator.
+ */
+static void pm8xxx_vib_close(struct input_dev *dev)
+{
+ struct pm8xxx_vib *vib = input_get_drvdata(dev);
+
+ cancel_work_sync(&vib->work);
+ if (vib->active)
+ pm8xxx_vib_set(vib, false);
+}
+
+/**
+ * pm8xxx_vib_play_effect - function to handle vib effects.
+ * @dev: input device pointer
+ * @data: data of effect
+ * @effect: effect to play
+ *
+ * Currently this driver supports only rumble effects.
+ */
+static int pm8xxx_vib_play_effect(struct input_dev *dev, void *data,
+ struct ff_effect *effect)
+{
+ struct pm8xxx_vib *vib = input_get_drvdata(dev);
+
+ vib->speed = effect->u.rumble.strong_magnitude >> 8;
+ if (!vib->speed)
+ vib->speed = effect->u.rumble.weak_magnitude >> 9;
+
+ schedule_work(&vib->work);
+
+ return 0;
+}
+
+static int __devinit pm8xxx_vib_probe(struct platform_device *pdev)
+
+{
+ struct pm8xxx_vib *vib;
+ struct input_dev *input_dev;
+ int error;
+ u8 val;
+
+ vib = kzalloc(sizeof(*vib), GFP_KERNEL);
+ input_dev = input_allocate_device();
+ if (!vib || !input_dev) {
+ dev_err(&pdev->dev, "couldn't allocate memory\n");
+ error = -ENOMEM;
+ goto err_free_mem;
+ }
+
+ INIT_WORK(&vib->work, pm8xxx_work_handler);
+ vib->dev = &pdev->dev;
+ vib->vib_input_dev = input_dev;
+
+ /* operate in manual mode */
+ error = pm8xxx_vib_read_u8(vib, &val, VIB_DRV);
+ if (error < 0)
+ goto err_free_mem;
+ val &= ~VIB_DRV_EN_MANUAL_MASK;
+ error = pm8xxx_vib_write_u8(vib, val, VIB_DRV);
+ if (error < 0)
+ goto err_free_mem;
+
+ vib->reg_vib_drv = val;
+
+ input_dev->name = "pm8xxx_vib_ffmemless";
+ input_dev->id.version = 1;
+ input_dev->dev.parent = &pdev->dev;
+ input_dev->close = pm8xxx_vib_close;
+ input_set_drvdata(input_dev, vib);
+ input_set_capability(vib->vib_input_dev, EV_FF, FF_RUMBLE);
+
+ error = input_ff_create_memless(input_dev, NULL,
+ pm8xxx_vib_play_effect);
+ if (error) {
+ dev_err(&pdev->dev,
+ "couldn't register vibrator as FF device\n");
+ goto err_free_mem;
+ }
+
+ error = input_register_device(input_dev);
+ if (error) {
+ dev_err(&pdev->dev, "couldn't register input device\n");
+ goto err_destroy_memless;
+ }
+
+ platform_set_drvdata(pdev, vib);
+ return 0;
+
+err_destroy_memless:
+ input_ff_destroy(input_dev);
+err_free_mem:
+ input_free_device(input_dev);
+ kfree(vib);
+
+ return error;
+}
+
+static int __devexit pm8xxx_vib_remove(struct platform_device *pdev)
+{
+ struct pm8xxx_vib *vib = platform_get_drvdata(pdev);
+
+ input_unregister_device(vib->vib_input_dev);
+ kfree(vib);
+
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int pm8xxx_vib_suspend(struct device *dev)
+{
+ struct pm8xxx_vib *vib = dev_get_drvdata(dev);
+
+ /* Turn off the vibrator */
+ pm8xxx_vib_set(vib, false);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(pm8xxx_vib_pm_ops, pm8xxx_vib_suspend, NULL);
+
+static struct platform_driver pm8xxx_vib_driver = {
+ .probe = pm8xxx_vib_probe,
+ .remove = __devexit_p(pm8xxx_vib_remove),
+ .driver = {
+ .name = "pm8xxx-vib",
+ .owner = THIS_MODULE,
+ .pm = &pm8xxx_vib_pm_ops,
+ },
+};
+
+static int __init pm8xxx_vib_init(void)
+{
+ return platform_driver_register(&pm8xxx_vib_driver);
+}
+module_init(pm8xxx_vib_init);
+
+static void __exit pm8xxx_vib_exit(void)
+{
+ platform_driver_unregister(&pm8xxx_vib_driver);
+}
+module_exit(pm8xxx_vib_exit);
+
+MODULE_ALIAS("platform:pm8xxx_vib");
+MODULE_DESCRIPTION("PMIC8xxx vibrator driver based on ff-memless framework");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Amy Maloche <amaloche@codeaurora.org>");
diff --git a/drivers/input/misc/twl6040-vibra.c b/drivers/input/misc/twl6040-vibra.c
index c43002e7ec7..ad153a417ee 100644
--- a/drivers/input/misc/twl6040-vibra.c
+++ b/drivers/input/misc/twl6040-vibra.c
@@ -74,12 +74,12 @@ static irqreturn_t twl6040_vib_irq_handler(int irq, void *data)
if (status & TWL6040_VIBLOCDET) {
dev_warn(info->dev, "Left Vibrator overcurrent detected\n");
twl6040_clear_bits(twl6040, TWL6040_REG_VIBCTLL,
- TWL6040_VIBENAL);
+ TWL6040_VIBENA);
}
if (status & TWL6040_VIBROCDET) {
dev_warn(info->dev, "Right Vibrator overcurrent detected\n");
twl6040_clear_bits(twl6040, TWL6040_REG_VIBCTLR,
- TWL6040_VIBENAR);
+ TWL6040_VIBENA);
}
return IRQ_HANDLED;
@@ -97,23 +97,23 @@ static void twl6040_vibra_enable(struct vibra_info *info)
}
twl6040_power(info->twl6040, 1);
- if (twl6040->rev <= TWL6040_REV_ES1_1) {
+ if (twl6040_get_revid(twl6040) <= TWL6040_REV_ES1_1) {
/*
* ERRATA: Disable overcurrent protection for at least
* 3ms when enabling vibrator drivers to avoid false
* overcurrent detection
*/
twl6040_reg_write(twl6040, TWL6040_REG_VIBCTLL,
- TWL6040_VIBENAL | TWL6040_VIBCTRLL);
+ TWL6040_VIBENA | TWL6040_VIBCTRL);
twl6040_reg_write(twl6040, TWL6040_REG_VIBCTLR,
- TWL6040_VIBENAR | TWL6040_VIBCTRLR);
+ TWL6040_VIBENA | TWL6040_VIBCTRL);
usleep_range(3000, 3500);
}
twl6040_reg_write(twl6040, TWL6040_REG_VIBCTLL,
- TWL6040_VIBENAL);
+ TWL6040_VIBENA);
twl6040_reg_write(twl6040, TWL6040_REG_VIBCTLR,
- TWL6040_VIBENAR);
+ TWL6040_VIBENA);
info->enabled = true;
}
@@ -201,6 +201,13 @@ static int vibra_play(struct input_dev *input, void *data,
struct vibra_info *info = input_get_drvdata(input);
int ret;
+ /* Do not allow effect, while the routing is set to use audio */
+ ret = twl6040_get_vibralr_status(info->twl6040);
+ if (ret & TWL6040_VIBSEL) {
+ dev_info(&input->dev, "Vibra is configured for audio\n");
+ return -EBUSY;
+ }
+
info->weak_speed = effect->u.rumble.weak_magnitude;
info->strong_speed = effect->u.rumble.strong_magnitude;
info->direction = effect->direction < EFFECT_DIR_180_DEG ? 1 : -1;
@@ -228,7 +235,7 @@ static void twl6040_vibra_close(struct input_dev *input)
mutex_unlock(&info->mutex);
}
-#if CONFIG_PM_SLEEP
+#ifdef CONFIG_PM_SLEEP
static int twl6040_vibra_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index 99d58764ef0..003587c71f4 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -23,13 +23,6 @@
#include "psmouse.h"
#include "alps.h"
-#undef DEBUG
-#ifdef DEBUG
-#define dbg(format, arg...) printk(KERN_INFO "alps.c: " format "\n", ## arg)
-#else
-#define dbg(format, arg...) do {} while (0)
-#endif
-
#define ALPS_OLDPROTO 0x01 /* old style input */
#define ALPS_DUALPOINT 0x02 /* touchpad has trackstick */
#define ALPS_PASS 0x04 /* device has a pass-through port */
@@ -297,10 +290,10 @@ static psmouse_ret_t alps_handle_interleaved_ps2(struct psmouse *psmouse)
psmouse->packet[4] |
psmouse->packet[5]) & 0x80) ||
(!alps_is_valid_first_byte(priv->i, psmouse->packet[6]))) {
- dbg("refusing packet %x %x %x %x "
- "(suspected interleaved ps/2)\n",
- psmouse->packet[3], psmouse->packet[4],
- psmouse->packet[5], psmouse->packet[6]);
+ psmouse_dbg(psmouse,
+ "refusing packet %x %x %x %x (suspected interleaved ps/2)\n",
+ psmouse->packet[3], psmouse->packet[4],
+ psmouse->packet[5], psmouse->packet[6]);
return PSMOUSE_BAD_DATA;
}
@@ -319,13 +312,13 @@ static psmouse_ret_t alps_handle_interleaved_ps2(struct psmouse *psmouse)
* There is also possibility that we got 6-byte ALPS
* packet followed by 3-byte packet from trackpoint. We
* can not distinguish between these 2 scenarios but
- * becase the latter is unlikely to happen in course of
+ * because the latter is unlikely to happen in course of
* normal operation (user would need to press all
* buttons on the pad and start moving trackpoint
* without touching the pad surface) we assume former.
* Even if we are wrong the wost thing that would happen
* the cursor would jump but we should not get protocol
- * desynchronization.
+ * de-synchronization.
*/
alps_report_bare_ps2_packet(psmouse, &psmouse->packet[3],
@@ -361,10 +354,10 @@ static void alps_flush_packet(unsigned long data)
if ((psmouse->packet[3] |
psmouse->packet[4] |
psmouse->packet[5]) & 0x80) {
- dbg("refusing packet %x %x %x "
- "(suspected interleaved ps/2)\n",
- psmouse->packet[3], psmouse->packet[4],
- psmouse->packet[5]);
+ psmouse_dbg(psmouse,
+ "refusing packet %x %x %x (suspected interleaved ps/2)\n",
+ psmouse->packet[3], psmouse->packet[4],
+ psmouse->packet[5]);
} else {
alps_process_packet(psmouse);
}
@@ -396,16 +389,18 @@ static psmouse_ret_t alps_process_byte(struct psmouse *psmouse)
}
if (!alps_is_valid_first_byte(model, psmouse->packet[0])) {
- dbg("refusing packet[0] = %x (mask0 = %x, byte0 = %x)\n",
- psmouse->packet[0], model->mask0, model->byte0);
+ psmouse_dbg(psmouse,
+ "refusing packet[0] = %x (mask0 = %x, byte0 = %x)\n",
+ psmouse->packet[0], model->mask0, model->byte0);
return PSMOUSE_BAD_DATA;
}
/* Bytes 2 - 6 should have 0 in the highest bit */
if (psmouse->pktcnt >= 2 && psmouse->pktcnt <= 6 &&
(psmouse->packet[psmouse->pktcnt - 1] & 0x80)) {
- dbg("refusing packet[%i] = %x\n",
- psmouse->pktcnt - 1, psmouse->packet[psmouse->pktcnt - 1]);
+ psmouse_dbg(psmouse, "refusing packet[%i] = %x\n",
+ psmouse->pktcnt - 1,
+ psmouse->packet[psmouse->pktcnt - 1]);
return PSMOUSE_BAD_DATA;
}
@@ -439,7 +434,8 @@ static const struct alps_model_info *alps_get_model(struct psmouse *psmouse, int
if (ps2_command(ps2dev, param, PSMOUSE_CMD_GETINFO))
return NULL;
- dbg("E6 report: %2.2x %2.2x %2.2x", param[0], param[1], param[2]);
+ psmouse_dbg(psmouse, "E6 report: %2.2x %2.2x %2.2x",
+ param[0], param[1], param[2]);
if (param[0] != 0 || param[1] != 0 || (param[2] != 10 && param[2] != 100))
return NULL;
@@ -459,7 +455,8 @@ static const struct alps_model_info *alps_get_model(struct psmouse *psmouse, int
if (ps2_command(ps2dev, param, PSMOUSE_CMD_GETINFO))
return NULL;
- dbg("E7 report: %2.2x %2.2x %2.2x", param[0], param[1], param[2]);
+ psmouse_dbg(psmouse, "E7 report: %2.2x %2.2x %2.2x",
+ param[0], param[1], param[2]);
if (version) {
for (i = 0; i < ARRAY_SIZE(rates) && param[2] != rates[i]; i++)
@@ -527,7 +524,8 @@ static int alps_get_status(struct psmouse *psmouse, char *param)
ps2_command(ps2dev, param, PSMOUSE_CMD_GETINFO))
return -1;
- dbg("Status: %2.2x %2.2x %2.2x", param[0], param[1], param[2]);
+ psmouse_dbg(psmouse, "Status: %2.2x %2.2x %2.2x",
+ param[0], param[1], param[2]);
return 0;
}
@@ -605,12 +603,12 @@ static int alps_hw_init(struct psmouse *psmouse)
}
if (alps_tap_mode(psmouse, true)) {
- printk(KERN_WARNING "alps.c: Failed to enable hardware tapping\n");
+ psmouse_warn(psmouse, "Failed to enable hardware tapping\n");
return -1;
}
if (alps_absolute_mode(psmouse)) {
- printk(KERN_ERR "alps.c: Failed to enable absolute mode\n");
+ psmouse_err(psmouse, "Failed to enable absolute mode\n");
return -1;
}
@@ -621,7 +619,7 @@ static int alps_hw_init(struct psmouse *psmouse)
/* ALPS needs stream mode, otherwise it won't report any data */
if (ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_SETSTREAM)) {
- printk(KERN_ERR "alps.c: Failed to enable stream mode\n");
+ psmouse_err(psmouse, "Failed to enable stream mode\n");
return -1;
}
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 32503565faf..09b93b11a27 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -10,8 +10,6 @@
* Trademarks are the property of their respective owners.
*/
-#define pr_fmt(fmt) KBUILD_BASENAME ": " fmt
-
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/module.h>
@@ -25,13 +23,10 @@
#define elantech_debug(fmt, ...) \
do { \
if (etd->debug) \
- printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); \
+ psmouse_printk(KERN_DEBUG, psmouse, \
+ fmt, ##__VA_ARGS__); \
} while (0)
-static bool force_elantech;
-module_param_named(force_elantech, force_elantech, bool, 0644);
-MODULE_PARM_DESC(force_elantech, "Force the Elantech PS/2 protocol extension to be used, 1 = enabled, 0 = disabled (default).");
-
/*
* Send a Synaptics style sliced query command
*/
@@ -40,7 +35,7 @@ static int synaptics_send_cmd(struct psmouse *psmouse, unsigned char c,
{
if (psmouse_sliced_command(psmouse, c) ||
ps2_command(&psmouse->ps2dev, param, PSMOUSE_CMD_GETINFO)) {
- pr_err("synaptics_send_cmd query 0x%02x failed.\n", c);
+ psmouse_err(psmouse, "%s query 0x%02x failed.\n", __func__, c);
return -1;
}
@@ -69,7 +64,7 @@ static int elantech_ps2_command(struct psmouse *psmouse,
} while (tries > 0);
if (rc)
- pr_err("ps2 command 0x%02x failed.\n", command);
+ psmouse_err(psmouse, "ps2 command 0x%02x failed.\n", command);
return rc;
}
@@ -84,7 +79,7 @@ static int elantech_read_reg(struct psmouse *psmouse, unsigned char reg,
unsigned char param[3];
int rc = 0;
- if (reg < 0x10 || reg > 0x26)
+ if (reg < 0x07 || reg > 0x26)
return -1;
if (reg > 0x11 && reg < 0x20)
@@ -108,12 +103,24 @@ static int elantech_read_reg(struct psmouse *psmouse, unsigned char reg,
rc = -1;
}
break;
+
+ case 3 ... 4:
+ if (elantech_ps2_command(psmouse, NULL, ETP_PS2_CUSTOM_COMMAND) ||
+ elantech_ps2_command(psmouse, NULL, ETP_REGISTER_READWRITE) ||
+ elantech_ps2_command(psmouse, NULL, ETP_PS2_CUSTOM_COMMAND) ||
+ elantech_ps2_command(psmouse, NULL, reg) ||
+ elantech_ps2_command(psmouse, param, PSMOUSE_CMD_GETINFO)) {
+ rc = -1;
+ }
+ break;
}
if (rc)
- pr_err("failed to read register 0x%02x.\n", reg);
- else
+ psmouse_err(psmouse, "failed to read register 0x%02x.\n", reg);
+ else if (etd->hw_version != 4)
*val = param[0];
+ else
+ *val = param[1];
return rc;
}
@@ -127,7 +134,7 @@ static int elantech_write_reg(struct psmouse *psmouse, unsigned char reg,
struct elantech_data *etd = psmouse->private;
int rc = 0;
- if (reg < 0x10 || reg > 0x26)
+ if (reg < 0x07 || reg > 0x26)
return -1;
if (reg > 0x11 && reg < 0x20)
@@ -154,11 +161,38 @@ static int elantech_write_reg(struct psmouse *psmouse, unsigned char reg,
rc = -1;
}
break;
+
+ case 3:
+ if (elantech_ps2_command(psmouse, NULL, ETP_PS2_CUSTOM_COMMAND) ||
+ elantech_ps2_command(psmouse, NULL, ETP_REGISTER_READWRITE) ||
+ elantech_ps2_command(psmouse, NULL, ETP_PS2_CUSTOM_COMMAND) ||
+ elantech_ps2_command(psmouse, NULL, reg) ||
+ elantech_ps2_command(psmouse, NULL, ETP_PS2_CUSTOM_COMMAND) ||
+ elantech_ps2_command(psmouse, NULL, val) ||
+ elantech_ps2_command(psmouse, NULL, PSMOUSE_CMD_SETSCALE11)) {
+ rc = -1;
+ }
+ break;
+
+ case 4:
+ if (elantech_ps2_command(psmouse, NULL, ETP_PS2_CUSTOM_COMMAND) ||
+ elantech_ps2_command(psmouse, NULL, ETP_REGISTER_READWRITE) ||
+ elantech_ps2_command(psmouse, NULL, ETP_PS2_CUSTOM_COMMAND) ||
+ elantech_ps2_command(psmouse, NULL, reg) ||
+ elantech_ps2_command(psmouse, NULL, ETP_PS2_CUSTOM_COMMAND) ||
+ elantech_ps2_command(psmouse, NULL, ETP_REGISTER_READWRITE) ||
+ elantech_ps2_command(psmouse, NULL, ETP_PS2_CUSTOM_COMMAND) ||
+ elantech_ps2_command(psmouse, NULL, val) ||
+ elantech_ps2_command(psmouse, NULL, PSMOUSE_CMD_SETSCALE11)) {
+ rc = -1;
+ }
+ break;
}
if (rc)
- pr_err("failed to write register 0x%02x with value 0x%02x.\n",
- reg, val);
+ psmouse_err(psmouse,
+ "failed to write register 0x%02x with value 0x%02x.\n",
+ reg, val);
return rc;
}
@@ -166,13 +200,13 @@ static int elantech_write_reg(struct psmouse *psmouse, unsigned char reg,
/*
* Dump a complete mouse movement packet to the syslog
*/
-static void elantech_packet_dump(unsigned char *packet, int size)
+static void elantech_packet_dump(struct psmouse *psmouse)
{
int i;
- printk(KERN_DEBUG pr_fmt("PS/2 packet ["));
- for (i = 0; i < size; i++)
- printk("%s0x%02x ", (i) ? ", " : " ", packet[i]);
+ psmouse_printk(KERN_DEBUG, psmouse, "PS/2 packet [");
+ for (i = 0; i < psmouse->pktsize; i++)
+ printk("%s0x%02x ", i ? ", " : " ", psmouse->packet[i]);
printk("]\n");
}
@@ -223,7 +257,7 @@ static void elantech_report_absolute_v1(struct psmouse *psmouse)
input_report_abs(dev, ABS_X,
((packet[1] & 0x0c) << 6) | packet[2]);
input_report_abs(dev, ABS_Y,
- ETP_YMAX_V1 - (((packet[1] & 0x03) << 8) | packet[3]));
+ etd->y_max - (((packet[1] & 0x03) << 8) | packet[3]));
}
input_report_key(dev, BTN_TOOL_FINGER, fingers == 1);
@@ -233,7 +267,7 @@ static void elantech_report_absolute_v1(struct psmouse *psmouse)
input_report_key(dev, BTN_RIGHT, packet[0] & 0x02);
if (etd->fw_version < 0x020000 &&
- (etd->capabilities & ETP_CAP_HAS_ROCKER)) {
+ (etd->capabilities[0] & ETP_CAP_HAS_ROCKER)) {
/* rocker up */
input_report_key(dev, BTN_FORWARD, packet[0] & 0x40);
/* rocker down */
@@ -273,11 +307,11 @@ static void elantech_report_absolute_v2(struct psmouse *psmouse)
struct elantech_data *etd = psmouse->private;
struct input_dev *dev = psmouse->dev;
unsigned char *packet = psmouse->packet;
- unsigned int fingers, x1 = 0, y1 = 0, x2 = 0, y2 = 0, width = 0, pres = 0;
+ unsigned int fingers, x1 = 0, y1 = 0, x2 = 0, y2 = 0;
+ unsigned int width = 0, pres = 0;
/* byte 0: n1 n0 . . . . R L */
fingers = (packet[0] & 0xc0) >> 6;
- input_report_key(dev, BTN_TOUCH, fingers != 0);
switch (fingers) {
case 3:
@@ -290,18 +324,15 @@ static void elantech_report_absolute_v2(struct psmouse *psmouse)
/* pass through... */
case 1:
/*
- * byte 1: . . . . . x10 x9 x8
+ * byte 1: . . . . x11 x10 x9 x8
* byte 2: x7 x6 x5 x4 x4 x2 x1 x0
*/
- x1 = ((packet[1] & 0x07) << 8) | packet[2];
+ x1 = ((packet[1] & 0x0f) << 8) | packet[2];
/*
- * byte 4: . . . . . . y9 y8
+ * byte 4: . . . . y11 y10 y9 y8
* byte 5: y7 y6 y5 y4 y3 y2 y1 y0
*/
- y1 = ETP_YMAX_V2 - (((packet[4] & 0x03) << 8) | packet[5]);
-
- input_report_abs(dev, ABS_X, x1);
- input_report_abs(dev, ABS_Y, y1);
+ y1 = etd->y_max - (((packet[4] & 0x0f) << 8) | packet[5]);
pres = (packet[1] & 0xf0) | ((packet[4] & 0xf0) >> 4);
width = ((packet[0] & 0x30) >> 2) | ((packet[3] & 0x30) >> 4);
@@ -314,22 +345,18 @@ static void elantech_report_absolute_v2(struct psmouse *psmouse)
* byte 0: . . ay8 ax8 . . . .
* byte 1: ax7 ax6 ax5 ax4 ax3 ax2 ax1 ax0
*/
- x1 = ((packet[0] & 0x10) << 4) | packet[1];
+ x1 = (((packet[0] & 0x10) << 4) | packet[1]) << 2;
/* byte 2: ay7 ay6 ay5 ay4 ay3 ay2 ay1 ay0 */
- y1 = ETP_2FT_YMAX - (((packet[0] & 0x20) << 3) | packet[2]);
+ y1 = etd->y_max -
+ ((((packet[0] & 0x20) << 3) | packet[2]) << 2);
/*
* byte 3: . . by8 bx8 . . . .
* byte 4: bx7 bx6 bx5 bx4 bx3 bx2 bx1 bx0
*/
- x2 = ((packet[3] & 0x10) << 4) | packet[4];
+ x2 = (((packet[3] & 0x10) << 4) | packet[4]) << 2;
/* byte 5: by7 by8 by5 by4 by3 by2 by1 by0 */
- y2 = ETP_2FT_YMAX - (((packet[3] & 0x20) << 3) | packet[5]);
- /*
- * For compatibility with the X Synaptics driver scale up
- * one coordinate and report as ordinary mouse movent
- */
- input_report_abs(dev, ABS_X, x1 << 2);
- input_report_abs(dev, ABS_Y, y1 << 2);
+ y2 = etd->y_max -
+ ((((packet[3] & 0x20) << 3) | packet[5]) << 2);
/* Unknown so just report sensible values */
pres = 127;
@@ -337,6 +364,11 @@ static void elantech_report_absolute_v2(struct psmouse *psmouse)
break;
}
+ input_report_key(dev, BTN_TOUCH, fingers != 0);
+ if (fingers != 0) {
+ input_report_abs(dev, ABS_X, x1);
+ input_report_abs(dev, ABS_Y, y1);
+ }
elantech_report_semi_mt_data(dev, fingers, x1, y1, x2, y2);
input_report_key(dev, BTN_TOOL_FINGER, fingers == 1);
input_report_key(dev, BTN_TOOL_DOUBLETAP, fingers == 2);
@@ -352,7 +384,208 @@ static void elantech_report_absolute_v2(struct psmouse *psmouse)
input_sync(dev);
}
-static int elantech_check_parity_v1(struct psmouse *psmouse)
+/*
+ * Interpret complete data packets and report absolute mode input events for
+ * hardware version 3. (12 byte packets for two fingers)
+ */
+static void elantech_report_absolute_v3(struct psmouse *psmouse,
+ int packet_type)
+{
+ struct input_dev *dev = psmouse->dev;
+ struct elantech_data *etd = psmouse->private;
+ unsigned char *packet = psmouse->packet;
+ unsigned int fingers = 0, x1 = 0, y1 = 0, x2 = 0, y2 = 0;
+ unsigned int width = 0, pres = 0;
+
+ /* byte 0: n1 n0 . . . . R L */
+ fingers = (packet[0] & 0xc0) >> 6;
+
+ switch (fingers) {
+ case 3:
+ case 1:
+ /*
+ * byte 1: . . . . x11 x10 x9 x8
+ * byte 2: x7 x6 x5 x4 x4 x2 x1 x0
+ */
+ x1 = ((packet[1] & 0x0f) << 8) | packet[2];
+ /*
+ * byte 4: . . . . y11 y10 y9 y8
+ * byte 5: y7 y6 y5 y4 y3 y2 y1 y0
+ */
+ y1 = etd->y_max - (((packet[4] & 0x0f) << 8) | packet[5]);
+ break;
+
+ case 2:
+ if (packet_type == PACKET_V3_HEAD) {
+ /*
+ * byte 1: . . . . ax11 ax10 ax9 ax8
+ * byte 2: ax7 ax6 ax5 ax4 ax3 ax2 ax1 ax0
+ */
+ etd->mt[0].x = ((packet[1] & 0x0f) << 8) | packet[2];
+ /*
+ * byte 4: . . . . ay11 ay10 ay9 ay8
+ * byte 5: ay7 ay6 ay5 ay4 ay3 ay2 ay1 ay0
+ */
+ etd->mt[0].y = etd->y_max -
+ (((packet[4] & 0x0f) << 8) | packet[5]);
+ /*
+ * wait for next packet
+ */
+ return;
+ }
+
+ /* packet_type == PACKET_V3_TAIL */
+ x1 = etd->mt[0].x;
+ y1 = etd->mt[0].y;
+ x2 = ((packet[1] & 0x0f) << 8) | packet[2];
+ y2 = etd->y_max - (((packet[4] & 0x0f) << 8) | packet[5]);
+ break;
+ }
+
+ pres = (packet[1] & 0xf0) | ((packet[4] & 0xf0) >> 4);
+ width = ((packet[0] & 0x30) >> 2) | ((packet[3] & 0x30) >> 4);
+
+ input_report_key(dev, BTN_TOUCH, fingers != 0);
+ if (fingers != 0) {
+ input_report_abs(dev, ABS_X, x1);
+ input_report_abs(dev, ABS_Y, y1);
+ }
+ elantech_report_semi_mt_data(dev, fingers, x1, y1, x2, y2);
+ input_report_key(dev, BTN_TOOL_FINGER, fingers == 1);
+ input_report_key(dev, BTN_TOOL_DOUBLETAP, fingers == 2);
+ input_report_key(dev, BTN_TOOL_TRIPLETAP, fingers == 3);
+ input_report_key(dev, BTN_LEFT, packet[0] & 0x01);
+ input_report_key(dev, BTN_RIGHT, packet[0] & 0x02);
+ input_report_abs(dev, ABS_PRESSURE, pres);
+ input_report_abs(dev, ABS_TOOL_WIDTH, width);
+
+ input_sync(dev);
+}
+
+static void elantech_input_sync_v4(struct psmouse *psmouse)
+{
+ struct input_dev *dev = psmouse->dev;
+ unsigned char *packet = psmouse->packet;
+
+ input_report_key(dev, BTN_LEFT, packet[0] & 0x01);
+ input_report_key(dev, BTN_RIGHT, packet[0] & 0x02);
+ input_mt_report_pointer_emulation(dev, true);
+ input_sync(dev);
+}
+
+static void process_packet_status_v4(struct psmouse *psmouse)
+{
+ struct input_dev *dev = psmouse->dev;
+ unsigned char *packet = psmouse->packet;
+ unsigned fingers;
+ int i;
+
+ /* notify finger state change */
+ fingers = packet[1] & 0x1f;
+ for (i = 0; i < ETP_MAX_FINGERS; i++) {
+ if ((fingers & (1 << i)) == 0) {
+ input_mt_slot(dev, i);
+ input_mt_report_slot_state(dev, MT_TOOL_FINGER, false);
+ }
+ }
+
+ elantech_input_sync_v4(psmouse);
+}
+
+static void process_packet_head_v4(struct psmouse *psmouse)
+{
+ struct input_dev *dev = psmouse->dev;
+ struct elantech_data *etd = psmouse->private;
+ unsigned char *packet = psmouse->packet;
+ int id = ((packet[3] & 0xe0) >> 5) - 1;
+ int pres, traces;
+
+ if (id < 0)
+ return;
+
+ etd->mt[id].x = ((packet[1] & 0x0f) << 8) | packet[2];
+ etd->mt[id].y = etd->y_max - (((packet[4] & 0x0f) << 8) | packet[5]);
+ pres = (packet[1] & 0xf0) | ((packet[4] & 0xf0) >> 4);
+ traces = (packet[0] & 0xf0) >> 4;
+
+ input_mt_slot(dev, id);
+ input_mt_report_slot_state(dev, MT_TOOL_FINGER, true);
+
+ input_report_abs(dev, ABS_MT_POSITION_X, etd->mt[id].x);
+ input_report_abs(dev, ABS_MT_POSITION_Y, etd->mt[id].y);
+ input_report_abs(dev, ABS_MT_PRESSURE, pres);
+ input_report_abs(dev, ABS_MT_TOUCH_MAJOR, traces * etd->width);
+ /* report this for backwards compatibility */
+ input_report_abs(dev, ABS_TOOL_WIDTH, traces);
+
+ elantech_input_sync_v4(psmouse);
+}
+
+static void process_packet_motion_v4(struct psmouse *psmouse)
+{
+ struct input_dev *dev = psmouse->dev;
+ struct elantech_data *etd = psmouse->private;
+ unsigned char *packet = psmouse->packet;
+ int weight, delta_x1 = 0, delta_y1 = 0, delta_x2 = 0, delta_y2 = 0;
+ int id, sid;
+
+ id = ((packet[0] & 0xe0) >> 5) - 1;
+ if (id < 0)
+ return;
+
+ sid = ((packet[3] & 0xe0) >> 5) - 1;
+ weight = (packet[0] & 0x10) ? ETP_WEIGHT_VALUE : 1;
+ /*
+ * Motion packets give us the delta of x, y values of specific fingers,
+ * but in two's complement. Let the compiler do the conversion for us.
+ * Also _enlarge_ the numbers to int, in case of overflow.
+ */
+ delta_x1 = (signed char)packet[1];
+ delta_y1 = (signed char)packet[2];
+ delta_x2 = (signed char)packet[4];
+ delta_y2 = (signed char)packet[5];
+
+ etd->mt[id].x += delta_x1 * weight;
+ etd->mt[id].y -= delta_y1 * weight;
+ input_mt_slot(dev, id);
+ input_report_abs(dev, ABS_MT_POSITION_X, etd->mt[id].x);
+ input_report_abs(dev, ABS_MT_POSITION_Y, etd->mt[id].y);
+
+ if (sid >= 0) {
+ etd->mt[sid].x += delta_x2 * weight;
+ etd->mt[sid].y -= delta_y2 * weight;
+ input_mt_slot(dev, sid);
+ input_report_abs(dev, ABS_MT_POSITION_X, etd->mt[sid].x);
+ input_report_abs(dev, ABS_MT_POSITION_Y, etd->mt[sid].y);
+ }
+
+ elantech_input_sync_v4(psmouse);
+}
+
+static void elantech_report_absolute_v4(struct psmouse *psmouse,
+ int packet_type)
+{
+ switch (packet_type) {
+ case PACKET_V4_STATUS:
+ process_packet_status_v4(psmouse);
+ break;
+
+ case PACKET_V4_HEAD:
+ process_packet_head_v4(psmouse);
+ break;
+
+ case PACKET_V4_MOTION:
+ process_packet_motion_v4(psmouse);
+ break;
+
+ case PACKET_UNKNOWN:
+ default:
+ /* impossible to get here */
+ break;
+ }
+}
+
+static int elantech_packet_check_v1(struct psmouse *psmouse)
{
struct elantech_data *etd = psmouse->private;
unsigned char *packet = psmouse->packet;
@@ -376,31 +609,142 @@ static int elantech_check_parity_v1(struct psmouse *psmouse)
etd->parity[packet[3]] == p3;
}
+static int elantech_debounce_check_v2(struct psmouse *psmouse)
+{
+ /*
+ * When we encounter packet that matches this exactly, it means the
+ * hardware is in debounce status. Just ignore the whole packet.
+ */
+ const u8 debounce_packet[] = { 0x84, 0xff, 0xff, 0x02, 0xff, 0xff };
+ unsigned char *packet = psmouse->packet;
+
+ return !memcmp(packet, debounce_packet, sizeof(debounce_packet));
+}
+
+static int elantech_packet_check_v2(struct psmouse *psmouse)
+{
+ struct elantech_data *etd = psmouse->private;
+ unsigned char *packet = psmouse->packet;
+
+ /*
+ * V2 hardware has two flavors. Older ones that do not report pressure,
+ * and newer ones that reports pressure and width. With newer ones, all
+ * packets (1, 2, 3 finger touch) have the same constant bits. With
+ * older ones, 1/3 finger touch packets and 2 finger touch packets
+ * have different constant bits.
+ * With all three cases, if the constant bits are not exactly what I
+ * expected, I consider them invalid.
+ */
+ if (etd->reports_pressure)
+ return (packet[0] & 0x0c) == 0x04 &&
+ (packet[3] & 0x0f) == 0x02;
+
+ if ((packet[0] & 0xc0) == 0x80)
+ return (packet[0] & 0x0c) == 0x0c &&
+ (packet[3] & 0x0e) == 0x08;
+
+ return (packet[0] & 0x3c) == 0x3c &&
+ (packet[1] & 0xf0) == 0x00 &&
+ (packet[3] & 0x3e) == 0x38 &&
+ (packet[4] & 0xf0) == 0x00;
+}
+
+/*
+ * We check the constant bits to determine what packet type we get,
+ * so packet checking is mandatory for v3 and later hardware.
+ */
+static int elantech_packet_check_v3(struct psmouse *psmouse)
+{
+ const u8 debounce_packet[] = { 0xc4, 0xff, 0xff, 0x02, 0xff, 0xff };
+ unsigned char *packet = psmouse->packet;
+
+ /*
+ * check debounce first, it has the same signature in byte 0
+ * and byte 3 as PACKET_V3_HEAD.
+ */
+ if (!memcmp(packet, debounce_packet, sizeof(debounce_packet)))
+ return PACKET_DEBOUNCE;
+
+ if ((packet[0] & 0x0c) == 0x04 && (packet[3] & 0xcf) == 0x02)
+ return PACKET_V3_HEAD;
+
+ if ((packet[0] & 0x0c) == 0x0c && (packet[3] & 0xce) == 0x0c)
+ return PACKET_V3_TAIL;
+
+ return PACKET_UNKNOWN;
+}
+
+static int elantech_packet_check_v4(struct psmouse *psmouse)
+{
+ unsigned char *packet = psmouse->packet;
+
+ if ((packet[0] & 0x0c) == 0x04 &&
+ (packet[3] & 0x1f) == 0x11)
+ return PACKET_V4_HEAD;
+
+ if ((packet[0] & 0x0c) == 0x04 &&
+ (packet[3] & 0x1f) == 0x12)
+ return PACKET_V4_MOTION;
+
+ if ((packet[0] & 0x0c) == 0x04 &&
+ (packet[3] & 0x1f) == 0x10)
+ return PACKET_V4_STATUS;
+
+ return PACKET_UNKNOWN;
+}
+
/*
* Process byte stream from mouse and handle complete packets
*/
static psmouse_ret_t elantech_process_byte(struct psmouse *psmouse)
{
struct elantech_data *etd = psmouse->private;
+ int packet_type;
if (psmouse->pktcnt < psmouse->pktsize)
return PSMOUSE_GOOD_DATA;
if (etd->debug > 1)
- elantech_packet_dump(psmouse->packet, psmouse->pktsize);
+ elantech_packet_dump(psmouse);
switch (etd->hw_version) {
case 1:
- if (etd->paritycheck && !elantech_check_parity_v1(psmouse))
+ if (etd->paritycheck && !elantech_packet_check_v1(psmouse))
return PSMOUSE_BAD_DATA;
elantech_report_absolute_v1(psmouse);
break;
case 2:
- /* We don't know how to check parity in protocol v2 */
+ /* ignore debounce */
+ if (elantech_debounce_check_v2(psmouse))
+ return PSMOUSE_FULL_PACKET;
+
+ if (etd->paritycheck && !elantech_packet_check_v2(psmouse))
+ return PSMOUSE_BAD_DATA;
+
elantech_report_absolute_v2(psmouse);
break;
+
+ case 3:
+ packet_type = elantech_packet_check_v3(psmouse);
+ /* ignore debounce */
+ if (packet_type == PACKET_DEBOUNCE)
+ return PSMOUSE_FULL_PACKET;
+
+ if (packet_type == PACKET_UNKNOWN)
+ return PSMOUSE_BAD_DATA;
+
+ elantech_report_absolute_v3(psmouse, packet_type);
+ break;
+
+ case 4:
+ packet_type = elantech_packet_check_v4(psmouse);
+ if (packet_type == PACKET_UNKNOWN)
+ return PSMOUSE_BAD_DATA;
+
+ elantech_report_absolute_v4(psmouse, packet_type);
+ break;
}
return PSMOUSE_FULL_PACKET;
@@ -435,15 +779,29 @@ static int elantech_set_absolute_mode(struct psmouse *psmouse)
elantech_write_reg(psmouse, 0x11, etd->reg_11) ||
elantech_write_reg(psmouse, 0x21, etd->reg_21)) {
rc = -1;
- break;
}
+ break;
+
+ case 3:
+ etd->reg_10 = 0x0b;
+ if (elantech_write_reg(psmouse, 0x10, etd->reg_10))
+ rc = -1;
+
+ break;
+
+ case 4:
+ etd->reg_07 = 0x01;
+ if (elantech_write_reg(psmouse, 0x07, etd->reg_07))
+ rc = -1;
+
+ goto skip_readback_reg_10; /* v4 has no reg 0x10 to read */
}
if (rc == 0) {
/*
* Read back reg 0x10. For hardware version 1 we must make
* sure the absolute mode bit is set. For hardware version 2
- * the touchpad is probably initalising and not ready until
+ * the touchpad is probably initializing and not ready until
* we read back the value we just wrote.
*/
do {
@@ -456,27 +814,115 @@ static int elantech_set_absolute_mode(struct psmouse *psmouse)
} while (tries > 0);
if (rc) {
- pr_err("failed to read back register 0x10.\n");
+ psmouse_err(psmouse,
+ "failed to read back register 0x10.\n");
} else if (etd->hw_version == 1 &&
!(val & ETP_R10_ABSOLUTE_MODE)) {
- pr_err("touchpad refuses to switch to absolute mode.\n");
+ psmouse_err(psmouse,
+ "touchpad refuses to switch to absolute mode.\n");
rc = -1;
}
}
+ skip_readback_reg_10:
if (rc)
- pr_err("failed to initialise registers.\n");
+ psmouse_err(psmouse, "failed to initialise registers.\n");
return rc;
}
+static int elantech_set_range(struct psmouse *psmouse,
+ unsigned int *x_min, unsigned int *y_min,
+ unsigned int *x_max, unsigned int *y_max,
+ unsigned int *width)
+{
+ struct elantech_data *etd = psmouse->private;
+ unsigned char param[3];
+ unsigned char traces;
+
+ switch (etd->hw_version) {
+ case 1:
+ *x_min = ETP_XMIN_V1;
+ *y_min = ETP_YMIN_V1;
+ *x_max = ETP_XMAX_V1;
+ *y_max = ETP_YMAX_V1;
+ break;
+
+ case 2:
+ if (etd->fw_version == 0x020800 ||
+ etd->fw_version == 0x020b00 ||
+ etd->fw_version == 0x020030) {
+ *x_min = ETP_XMIN_V2;
+ *y_min = ETP_YMIN_V2;
+ *x_max = ETP_XMAX_V2;
+ *y_max = ETP_YMAX_V2;
+ } else {
+ int i;
+ int fixed_dpi;
+
+ i = (etd->fw_version > 0x020800 &&
+ etd->fw_version < 0x020900) ? 1 : 2;
+
+ if (synaptics_send_cmd(psmouse, ETP_FW_ID_QUERY, param))
+ return -1;
+
+ fixed_dpi = param[1] & 0x10;
+
+ if (((etd->fw_version >> 16) == 0x14) && fixed_dpi) {
+ if (synaptics_send_cmd(psmouse, ETP_SAMPLE_QUERY, param))
+ return -1;
+
+ *x_max = (etd->capabilities[1] - i) * param[1] / 2;
+ *y_max = (etd->capabilities[2] - i) * param[2] / 2;
+ } else if (etd->fw_version == 0x040216) {
+ *x_max = 819;
+ *y_max = 405;
+ } else if (etd->fw_version == 0x040219 || etd->fw_version == 0x040215) {
+ *x_max = 900;
+ *y_max = 500;
+ } else {
+ *x_max = (etd->capabilities[1] - i) * 64;
+ *y_max = (etd->capabilities[2] - i) * 64;
+ }
+ }
+ break;
+
+ case 3:
+ if (synaptics_send_cmd(psmouse, ETP_FW_ID_QUERY, param))
+ return -1;
+
+ *x_max = (0x0f & param[0]) << 8 | param[1];
+ *y_max = (0xf0 & param[0]) << 4 | param[2];
+ break;
+
+ case 4:
+ if (synaptics_send_cmd(psmouse, ETP_FW_ID_QUERY, param))
+ return -1;
+
+ *x_max = (0x0f & param[0]) << 8 | param[1];
+ *y_max = (0xf0 & param[0]) << 4 | param[2];
+ traces = etd->capabilities[1];
+ if ((traces < 2) || (traces > *x_max))
+ return -1;
+
+ *width = *x_max / (traces - 1);
+ break;
+ }
+
+ return 0;
+}
+
/*
* Set the appropriate event bits for the input subsystem
*/
-static void elantech_set_input_params(struct psmouse *psmouse)
+static int elantech_set_input_params(struct psmouse *psmouse)
{
struct input_dev *dev = psmouse->dev;
struct elantech_data *etd = psmouse->private;
+ unsigned int x_min = 0, y_min = 0, x_max = 0, y_max = 0, width = 0;
+
+ if (elantech_set_range(psmouse, &x_min, &y_min, &x_max, &y_max, &width))
+ return -1;
__set_bit(EV_KEY, dev->evbit);
__set_bit(EV_ABS, dev->evbit);
@@ -494,30 +940,64 @@ static void elantech_set_input_params(struct psmouse *psmouse)
case 1:
/* Rocker button */
if (etd->fw_version < 0x020000 &&
- (etd->capabilities & ETP_CAP_HAS_ROCKER)) {
+ (etd->capabilities[0] & ETP_CAP_HAS_ROCKER)) {
__set_bit(BTN_FORWARD, dev->keybit);
__set_bit(BTN_BACK, dev->keybit);
}
- input_set_abs_params(dev, ABS_X, ETP_XMIN_V1, ETP_XMAX_V1, 0, 0);
- input_set_abs_params(dev, ABS_Y, ETP_YMIN_V1, ETP_YMAX_V1, 0, 0);
+ input_set_abs_params(dev, ABS_X, x_min, x_max, 0, 0);
+ input_set_abs_params(dev, ABS_Y, y_min, y_max, 0, 0);
break;
case 2:
__set_bit(BTN_TOOL_QUADTAP, dev->keybit);
- input_set_abs_params(dev, ABS_X, ETP_XMIN_V2, ETP_XMAX_V2, 0, 0);
- input_set_abs_params(dev, ABS_Y, ETP_YMIN_V2, ETP_YMAX_V2, 0, 0);
+ __set_bit(INPUT_PROP_SEMI_MT, dev->propbit);
+ /* fall through */
+ case 3:
+ input_set_abs_params(dev, ABS_X, x_min, x_max, 0, 0);
+ input_set_abs_params(dev, ABS_Y, y_min, y_max, 0, 0);
if (etd->reports_pressure) {
input_set_abs_params(dev, ABS_PRESSURE, ETP_PMIN_V2,
ETP_PMAX_V2, 0, 0);
input_set_abs_params(dev, ABS_TOOL_WIDTH, ETP_WMIN_V2,
ETP_WMAX_V2, 0, 0);
}
- __set_bit(INPUT_PROP_SEMI_MT, dev->propbit);
input_mt_init_slots(dev, 2);
- input_set_abs_params(dev, ABS_MT_POSITION_X, ETP_XMIN_V2, ETP_XMAX_V2, 0, 0);
- input_set_abs_params(dev, ABS_MT_POSITION_Y, ETP_YMIN_V2, ETP_YMAX_V2, 0, 0);
+ input_set_abs_params(dev, ABS_MT_POSITION_X, x_min, x_max, 0, 0);
+ input_set_abs_params(dev, ABS_MT_POSITION_Y, y_min, y_max, 0, 0);
+ break;
+
+ case 4:
+ __set_bit(BTN_TOOL_QUADTAP, dev->keybit);
+ /* For X to recognize me as touchpad. */
+ input_set_abs_params(dev, ABS_X, x_min, x_max, 0, 0);
+ input_set_abs_params(dev, ABS_Y, y_min, y_max, 0, 0);
+ /*
+ * range of pressure and width is the same as v2,
+ * report ABS_PRESSURE, ABS_TOOL_WIDTH for compatibility.
+ */
+ input_set_abs_params(dev, ABS_PRESSURE, ETP_PMIN_V2,
+ ETP_PMAX_V2, 0, 0);
+ input_set_abs_params(dev, ABS_TOOL_WIDTH, ETP_WMIN_V2,
+ ETP_WMAX_V2, 0, 0);
+ /* Multitouch capable pad, up to 5 fingers. */
+ input_mt_init_slots(dev, ETP_MAX_FINGERS);
+ input_set_abs_params(dev, ABS_MT_POSITION_X, x_min, x_max, 0, 0);
+ input_set_abs_params(dev, ABS_MT_POSITION_Y, y_min, y_max, 0, 0);
+ input_set_abs_params(dev, ABS_MT_PRESSURE, ETP_PMIN_V2,
+ ETP_PMAX_V2, 0, 0);
+ /*
+ * The firmware reports how many trace lines the finger spans,
+ * convert to surface unit as Protocol-B requires.
+ */
+ input_set_abs_params(dev, ABS_MT_TOUCH_MAJOR, 0,
+ ETP_WMAX_V2 * width, 0, 0);
break;
}
+
+ etd->y_max = y_max;
+ etd->width = width;
+
+ return 0;
}
struct elantech_attr_data {
@@ -587,6 +1067,7 @@ static ssize_t elantech_set_int_attr(struct psmouse *psmouse,
elantech_show_int_attr, \
elantech_set_int_attr)
+ELANTECH_INT_ATTR(reg_07, 0x07);
ELANTECH_INT_ATTR(reg_10, 0x10);
ELANTECH_INT_ATTR(reg_11, 0x11);
ELANTECH_INT_ATTR(reg_20, 0x20);
@@ -600,6 +1081,7 @@ ELANTECH_INT_ATTR(debug, 0);
ELANTECH_INT_ATTR(paritycheck, 0);
static struct attribute *elantech_attrs[] = {
+ &psmouse_attr_reg_07.dattr.attr,
&psmouse_attr_reg_10.dattr.attr,
&psmouse_attr_reg_11.dattr.attr,
&psmouse_attr_reg_20.dattr.attr,
@@ -651,7 +1133,7 @@ int elantech_detect(struct psmouse *psmouse, bool set_properties)
ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) ||
ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) ||
ps2_command(ps2dev, param, PSMOUSE_CMD_GETINFO)) {
- pr_debug("sending Elantech magic knock failed.\n");
+ psmouse_dbg(psmouse, "sending Elantech magic knock failed.\n");
return -1;
}
@@ -659,9 +1141,11 @@ int elantech_detect(struct psmouse *psmouse, bool set_properties)
* Report this in case there are Elantech models that use a different
* set of magic numbers
*/
- if (param[0] != 0x3c || param[1] != 0x03 || param[2] != 0xc8) {
- pr_debug("unexpected magic knock result 0x%02x, 0x%02x, 0x%02x.\n",
- param[0], param[1], param[2]);
+ if (param[0] != 0x3c || param[1] != 0x03 ||
+ (param[2] != 0xc8 && param[2] != 0x00)) {
+ psmouse_dbg(psmouse,
+ "unexpected magic knock result 0x%02x, 0x%02x, 0x%02x.\n",
+ param[0], param[1], param[2]);
return -1;
}
@@ -671,20 +1155,18 @@ int elantech_detect(struct psmouse *psmouse, bool set_properties)
* to Elantech magic knock and there might be more.
*/
if (synaptics_send_cmd(psmouse, ETP_FW_VERSION_QUERY, param)) {
- pr_debug("failed to query firmware version.\n");
+ psmouse_dbg(psmouse, "failed to query firmware version.\n");
return -1;
}
- pr_debug("Elantech version query result 0x%02x, 0x%02x, 0x%02x.\n",
- param[0], param[1], param[2]);
+ psmouse_dbg(psmouse,
+ "Elantech version query result 0x%02x, 0x%02x, 0x%02x.\n",
+ param[0], param[1], param[2]);
if (!elantech_is_signature_valid(param)) {
- if (!force_elantech) {
- pr_debug("Probably not a real Elantech touchpad. Aborting.\n");
- return -1;
- }
-
- pr_debug("Probably not a real Elantech touchpad. Enabling anyway due to force_elantech.\n");
+ psmouse_dbg(psmouse,
+ "Probably not a real Elantech touchpad. Aborting.\n");
+ return -1;
}
if (set_properties) {
@@ -715,7 +1197,8 @@ static int elantech_reconnect(struct psmouse *psmouse)
return -1;
if (elantech_set_absolute_mode(psmouse)) {
- pr_err("failed to put touchpad back into absolute mode.\n");
+ psmouse_err(psmouse,
+ "failed to put touchpad back into absolute mode.\n");
return -1;
}
@@ -723,6 +1206,48 @@ static int elantech_reconnect(struct psmouse *psmouse)
}
/*
+ * determine hardware version and set some properties according to it.
+ */
+static int elantech_set_properties(struct elantech_data *etd)
+{
+ int ver = (etd->fw_version & 0x0f0000) >> 16;
+
+ if (etd->fw_version < 0x020030 || etd->fw_version == 0x020600)
+ etd->hw_version = 1;
+ else if (etd->fw_version < 0x150600)
+ etd->hw_version = 2;
+ else if (ver == 5)
+ etd->hw_version = 3;
+ else if (ver == 6)
+ etd->hw_version = 4;
+ else
+ return -1;
+
+ /*
+ * Turn on packet checking by default.
+ */
+ etd->paritycheck = 1;
+
+ /*
+ * This firmware suffers from misreporting coordinates when
+ * a touch action starts causing the mouse cursor or scrolled page
+ * to jump. Enable a workaround.
+ */
+ etd->jumpy_cursor =
+ (etd->fw_version == 0x020022 || etd->fw_version == 0x020600);
+
+ if (etd->hw_version > 1) {
+ /* For now show extra debug information */
+ etd->debug = 1;
+
+ if (etd->fw_version >= 0x020800)
+ etd->reports_pressure = true;
+ }
+
+ return 0;
+}
+
+/*
* Initialize the touchpad and create sysfs entries
*/
int elantech_init(struct psmouse *psmouse)
@@ -743,70 +1268,53 @@ int elantech_init(struct psmouse *psmouse)
* Do the version query again so we can store the result
*/
if (synaptics_send_cmd(psmouse, ETP_FW_VERSION_QUERY, param)) {
- pr_err("failed to query firmware version.\n");
+ psmouse_err(psmouse, "failed to query firmware version.\n");
goto init_fail;
}
-
etd->fw_version = (param[0] << 16) | (param[1] << 8) | param[2];
- /*
- * Assume every version greater than this is new EeePC style
- * hardware with 6 byte packets
- */
- if (etd->fw_version >= 0x020030) {
- etd->hw_version = 2;
- /* For now show extra debug information */
- etd->debug = 1;
- /* Don't know how to do parity checking for version 2 */
- etd->paritycheck = 0;
-
- if (etd->fw_version >= 0x020800)
- etd->reports_pressure = true;
-
- } else {
- etd->hw_version = 1;
- etd->paritycheck = 1;
- }
-
- pr_info("assuming hardware version %d, firmware version %d.%d.%d\n",
- etd->hw_version, param[0], param[1], param[2]);
-
- if (synaptics_send_cmd(psmouse, ETP_CAPABILITIES_QUERY, param)) {
- pr_err("failed to query capabilities.\n");
+ if (elantech_set_properties(etd)) {
+ psmouse_err(psmouse, "unknown hardware version, aborting...\n");
goto init_fail;
}
- pr_info("Synaptics capabilities query result 0x%02x, 0x%02x, 0x%02x.\n",
- param[0], param[1], param[2]);
- etd->capabilities = param[0];
+ psmouse_info(psmouse,
+ "assuming hardware version %d (with firmware version 0x%02x%02x%02x)\n",
+ etd->hw_version, param[0], param[1], param[2]);
- /*
- * This firmware suffers from misreporting coordinates when
- * a touch action starts causing the mouse cursor or scrolled page
- * to jump. Enable a workaround.
- */
- if (etd->fw_version == 0x020022 || etd->fw_version == 0x020600) {
- pr_info("firmware version 2.0.34/2.6.0 detected, enabling jumpy cursor workaround\n");
- etd->jumpy_cursor = true;
+ if (synaptics_send_cmd(psmouse, ETP_CAPABILITIES_QUERY,
+ etd->capabilities)) {
+ psmouse_err(psmouse, "failed to query capabilities.\n");
+ goto init_fail;
}
+ psmouse_info(psmouse,
+ "Synaptics capabilities query result 0x%02x, 0x%02x, 0x%02x.\n",
+ etd->capabilities[0], etd->capabilities[1],
+ etd->capabilities[2]);
if (elantech_set_absolute_mode(psmouse)) {
- pr_err("failed to put touchpad into absolute mode.\n");
+ psmouse_err(psmouse,
+ "failed to put touchpad into absolute mode.\n");
goto init_fail;
}
- elantech_set_input_params(psmouse);
+ if (elantech_set_input_params(psmouse)) {
+ psmouse_err(psmouse, "failed to query touchpad range.\n");
+ goto init_fail;
+ }
error = sysfs_create_group(&psmouse->ps2dev.serio->dev.kobj,
&elantech_attr_group);
if (error) {
- pr_err("failed to create sysfs attributes, error: %d.\n", error);
+ psmouse_err(psmouse,
+ "failed to create sysfs attributes, error: %d.\n",
+ error);
goto init_fail;
}
psmouse->protocol_handler = elantech_process_byte;
psmouse->disconnect = elantech_disconnect;
psmouse->reconnect = elantech_reconnect;
- psmouse->pktsize = etd->hw_version == 2 ? 6 : 4;
+ psmouse->pktsize = etd->hw_version > 1 ? 6 : 4;
return 0;
diff --git a/drivers/input/mouse/elantech.h b/drivers/input/mouse/elantech.h
index fabb2b99615..9e5f1aabea7 100644
--- a/drivers/input/mouse/elantech.h
+++ b/drivers/input/mouse/elantech.h
@@ -16,14 +16,17 @@
/*
* Command values for Synaptics style queries
*/
+#define ETP_FW_ID_QUERY 0x00
#define ETP_FW_VERSION_QUERY 0x01
#define ETP_CAPABILITIES_QUERY 0x02
+#define ETP_SAMPLE_QUERY 0x03
/*
* Command values for register reading or writing
*/
#define ETP_REGISTER_READ 0x10
#define ETP_REGISTER_WRITE 0x11
+#define ETP_REGISTER_READWRITE 0x00
/*
* Hardware version 2 custom PS/2 command value
@@ -66,16 +69,13 @@
#define ETP_YMAX_V1 (384 - ETP_EDGE_FUZZ_V1)
/*
- * It seems the resolution for hardware version 2 doubled.
- * Hence the X and Y ranges are doubled too.
- * The bezel around the pad also appears to be smaller
+ * The resolution for older v2 hardware doubled.
+ * (newer v2's firmware provides command so we can query)
*/
-#define ETP_EDGE_FUZZ_V2 8
-
-#define ETP_XMIN_V2 ( 0 + ETP_EDGE_FUZZ_V2)
-#define ETP_XMAX_V2 (1152 - ETP_EDGE_FUZZ_V2)
-#define ETP_YMIN_V2 ( 0 + ETP_EDGE_FUZZ_V2)
-#define ETP_YMAX_V2 ( 768 - ETP_EDGE_FUZZ_V2)
+#define ETP_XMIN_V2 0
+#define ETP_XMAX_V2 1152
+#define ETP_YMIN_V2 0
+#define ETP_YMAX_V2 768
#define ETP_PMIN_V2 0
#define ETP_PMAX_V2 255
@@ -83,17 +83,37 @@
#define ETP_WMAX_V2 15
/*
- * For two finger touches the coordinate of each finger gets reported
- * separately but with reduced resolution.
+ * v3 hardware has 2 kinds of packet types,
+ * v4 hardware has 3.
+ */
+#define PACKET_UNKNOWN 0x01
+#define PACKET_DEBOUNCE 0x02
+#define PACKET_V3_HEAD 0x03
+#define PACKET_V3_TAIL 0x04
+#define PACKET_V4_HEAD 0x05
+#define PACKET_V4_MOTION 0x06
+#define PACKET_V4_STATUS 0x07
+
+/*
+ * track up to 5 fingers for v4 hardware
+ */
+#define ETP_MAX_FINGERS 5
+
+/*
+ * weight value for v4 hardware
*/
-#define ETP_2FT_FUZZ 4
+#define ETP_WEIGHT_VALUE 5
-#define ETP_2FT_XMIN ( 0 + ETP_2FT_FUZZ)
-#define ETP_2FT_XMAX (288 - ETP_2FT_FUZZ)
-#define ETP_2FT_YMIN ( 0 + ETP_2FT_FUZZ)
-#define ETP_2FT_YMAX (192 - ETP_2FT_FUZZ)
+/*
+ * The base position for one finger, v4 hardware
+ */
+struct finger_pos {
+ unsigned int x;
+ unsigned int y;
+};
struct elantech_data {
+ unsigned char reg_07;
unsigned char reg_10;
unsigned char reg_11;
unsigned char reg_20;
@@ -104,13 +124,16 @@ struct elantech_data {
unsigned char reg_25;
unsigned char reg_26;
unsigned char debug;
- unsigned char capabilities;
+ unsigned char capabilities[3];
bool paritycheck;
bool jumpy_cursor;
bool reports_pressure;
unsigned char hw_version;
unsigned int fw_version;
unsigned int single_finger_reports;
+ unsigned int y_max;
+ unsigned int width;
+ struct finger_pos mt[ETP_MAX_FINGERS];
unsigned char parity[256];
};
diff --git a/drivers/input/mouse/hgpk.c b/drivers/input/mouse/hgpk.c
index 4d17d9f3320..0470dd46b56 100644
--- a/drivers/input/mouse/hgpk.c
+++ b/drivers/input/mouse/hgpk.c
@@ -136,10 +136,10 @@ static int hgpk_discard_decay_hack(struct psmouse *psmouse, int x, int y)
/* discard if too big, or half that but > 4 times the prev delta */
if (avx > recalib_delta ||
(avx > recalib_delta / 2 && ((avx / 4) > priv->xlast))) {
- hgpk_err(psmouse, "detected %dpx jump in x\n", x);
+ psmouse_warn(psmouse, "detected %dpx jump in x\n", x);
priv->xbigj = avx;
} else if (approx_half(avx, priv->xbigj)) {
- hgpk_err(psmouse, "detected secondary %dpx jump in x\n", x);
+ psmouse_warn(psmouse, "detected secondary %dpx jump in x\n", x);
priv->xbigj = avx;
priv->xsaw_secondary++;
} else {
@@ -151,10 +151,10 @@ static int hgpk_discard_decay_hack(struct psmouse *psmouse, int x, int y)
if (avy > recalib_delta ||
(avy > recalib_delta / 2 && ((avy / 4) > priv->ylast))) {
- hgpk_err(psmouse, "detected %dpx jump in y\n", y);
+ psmouse_warn(psmouse, "detected %dpx jump in y\n", y);
priv->ybigj = avy;
} else if (approx_half(avy, priv->ybigj)) {
- hgpk_err(psmouse, "detected secondary %dpx jump in y\n", y);
+ psmouse_warn(psmouse, "detected secondary %dpx jump in y\n", y);
priv->ybigj = avy;
priv->ysaw_secondary++;
} else {
@@ -168,7 +168,7 @@ static int hgpk_discard_decay_hack(struct psmouse *psmouse, int x, int y)
priv->ylast = avy;
if (do_recal && jumpy_delay) {
- hgpk_err(psmouse, "scheduling recalibration\n");
+ psmouse_warn(psmouse, "scheduling recalibration\n");
psmouse_queue_work(psmouse, &priv->recalib_wq,
msecs_to_jiffies(jumpy_delay));
}
@@ -260,8 +260,8 @@ static void hgpk_spewing_hack(struct psmouse *psmouse,
* movement, it is probably a case of the user moving the
* cursor very slowly across the screen. */
if (abs(priv->x_tally) < 3 && abs(priv->y_tally) < 3) {
- hgpk_err(psmouse, "packet spew detected (%d,%d)\n",
- priv->x_tally, priv->y_tally);
+ psmouse_warn(psmouse, "packet spew detected (%d,%d)\n",
+ priv->x_tally, priv->y_tally);
priv->spew_flag = RECALIBRATING;
psmouse_queue_work(psmouse, &priv->recalib_wq,
msecs_to_jiffies(spew_delay));
@@ -333,12 +333,12 @@ static bool hgpk_is_byte_valid(struct psmouse *psmouse, unsigned char *packet)
}
if (!valid)
- hgpk_dbg(psmouse,
- "bad data, mode %d (%d) %02x %02x %02x %02x %02x %02x\n",
- priv->mode, pktcnt,
- psmouse->packet[0], psmouse->packet[1],
- psmouse->packet[2], psmouse->packet[3],
- psmouse->packet[4], psmouse->packet[5]);
+ psmouse_dbg(psmouse,
+ "bad data, mode %d (%d) %02x %02x %02x %02x %02x %02x\n",
+ priv->mode, pktcnt,
+ psmouse->packet[0], psmouse->packet[1],
+ psmouse->packet[2], psmouse->packet[3],
+ psmouse->packet[4], psmouse->packet[5]);
return valid;
}
@@ -361,19 +361,20 @@ static void hgpk_process_advanced_packet(struct psmouse *psmouse)
input_report_abs(idev, ABS_PRESSURE, z);
if (tpdebug)
- hgpk_dbg(psmouse, "pd=%d fd=%d z=%d",
- pt_down, finger_down, z);
+ psmouse_dbg(psmouse, "pd=%d fd=%d z=%d",
+ pt_down, finger_down, z);
} else {
/*
* PenTablet mode does not report pressure, so we don't
* report it here
*/
if (tpdebug)
- hgpk_dbg(psmouse, "pd=%d ", down);
+ psmouse_dbg(psmouse, "pd=%d ", down);
}
if (tpdebug)
- hgpk_dbg(psmouse, "l=%d r=%d x=%d y=%d\n", left, right, x, y);
+ psmouse_dbg(psmouse, "l=%d r=%d x=%d y=%d\n",
+ left, right, x, y);
input_report_key(idev, BTN_TOUCH, down);
input_report_key(idev, BTN_LEFT, left);
@@ -395,7 +396,7 @@ static void hgpk_process_advanced_packet(struct psmouse *psmouse)
if (x == priv->abs_x && y == priv->abs_y) {
if (++priv->dupe_count > SPEW_WATCH_COUNT) {
if (tpdebug)
- hgpk_dbg(psmouse, "hard spew detected\n");
+ psmouse_dbg(psmouse, "hard spew detected\n");
priv->spew_flag = RECALIBRATING;
psmouse_queue_work(psmouse, &priv->recalib_wq,
msecs_to_jiffies(spew_delay));
@@ -412,7 +413,7 @@ static void hgpk_process_advanced_packet(struct psmouse *psmouse)
int y_diff = priv->abs_y - y;
if (hgpk_discard_decay_hack(psmouse, x_diff, y_diff)) {
if (tpdebug)
- hgpk_dbg(psmouse, "discarding\n");
+ psmouse_dbg(psmouse, "discarding\n");
goto done;
}
hgpk_spewing_hack(psmouse, left, right, x_diff, y_diff);
@@ -437,20 +438,21 @@ static void hgpk_process_simple_packet(struct psmouse *psmouse)
int y = ((packet[0] << 3) & 0x100) - packet[2];
if (packet[0] & 0xc0)
- hgpk_dbg(psmouse,
- "overflow -- 0x%02x 0x%02x 0x%02x\n",
- packet[0], packet[1], packet[2]);
+ psmouse_dbg(psmouse,
+ "overflow -- 0x%02x 0x%02x 0x%02x\n",
+ packet[0], packet[1], packet[2]);
if (hgpk_discard_decay_hack(psmouse, x, y)) {
if (tpdebug)
- hgpk_dbg(psmouse, "discarding\n");
+ psmouse_dbg(psmouse, "discarding\n");
return;
}
hgpk_spewing_hack(psmouse, left, right, x, y);
if (tpdebug)
- hgpk_dbg(psmouse, "l=%d r=%d x=%d y=%d\n", left, right, x, y);
+ psmouse_dbg(psmouse, "l=%d r=%d x=%d y=%d\n",
+ left, right, x, y);
input_report_key(dev, BTN_LEFT, left);
input_report_key(dev, BTN_RIGHT, right);
@@ -482,9 +484,8 @@ static psmouse_ret_t hgpk_process_byte(struct psmouse *psmouse)
* ugh, got a packet inside our recalibration
* window, schedule another recalibration.
*/
- hgpk_dbg(psmouse,
- "packet inside calibration window, "
- "queueing another recalibration\n");
+ psmouse_dbg(psmouse,
+ "packet inside calibration window, queueing another recalibration\n");
psmouse_queue_work(psmouse, &priv->recalib_wq,
msecs_to_jiffies(post_interrupt_delay));
}
@@ -628,7 +629,7 @@ static int hgpk_reset_device(struct psmouse *psmouse, bool recalibrate)
err = hgpk_select_mode(psmouse);
if (err) {
- hgpk_err(psmouse, "failed to select mode\n");
+ psmouse_err(psmouse, "failed to select mode\n");
return err;
}
@@ -648,11 +649,11 @@ static int hgpk_force_recalibrate(struct psmouse *psmouse)
return 0;
if (!autorecal) {
- hgpk_dbg(psmouse, "recalibrations disabled, ignoring\n");
+ psmouse_dbg(psmouse, "recalibration disabled, ignoring\n");
return 0;
}
- hgpk_dbg(psmouse, "recalibrating touchpad..\n");
+ psmouse_dbg(psmouse, "recalibrating touchpad..\n");
/* we don't want to race with the irq handler, nor with resyncs */
psmouse_set_state(psmouse, PSMOUSE_INITIALIZING);
@@ -675,7 +676,7 @@ static int hgpk_force_recalibrate(struct psmouse *psmouse)
psmouse_set_state(psmouse, PSMOUSE_ACTIVATED);
if (tpdebug)
- hgpk_dbg(psmouse, "touchpad reactivated\n");
+ psmouse_dbg(psmouse, "touchpad reactivated\n");
/*
* If we get packets right away after recalibrating, it's likely
@@ -727,16 +728,16 @@ static int hgpk_toggle_powersave(struct psmouse *psmouse, int enable)
err = hgpk_reset_device(psmouse, false);
if (err) {
- hgpk_err(psmouse, "Failed to reset device!\n");
+ psmouse_err(psmouse, "Failed to reset device!\n");
return err;
}
/* should be all set, enable the touchpad */
ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_ENABLE);
psmouse_set_state(psmouse, PSMOUSE_ACTIVATED);
- hgpk_dbg(psmouse, "Touchpad powered up.\n");
+ psmouse_dbg(psmouse, "Touchpad powered up.\n");
} else {
- hgpk_dbg(psmouse, "Powering off touchpad.\n");
+ psmouse_dbg(psmouse, "Powering off touchpad.\n");
if (ps2_command(ps2dev, NULL, 0xec) ||
ps2_command(ps2dev, NULL, 0xec) ||
@@ -923,7 +924,7 @@ static void hgpk_recalib_work(struct work_struct *work)
struct psmouse *psmouse = priv->psmouse;
if (hgpk_force_recalibrate(psmouse))
- hgpk_err(psmouse, "recalibration failed!\n");
+ psmouse_err(psmouse, "recalibration failed!\n");
}
static int hgpk_register(struct psmouse *psmouse)
@@ -947,14 +948,15 @@ static int hgpk_register(struct psmouse *psmouse)
err = device_create_file(&psmouse->ps2dev.serio->dev,
&psmouse_attr_powered.dattr);
if (err) {
- hgpk_err(psmouse, "Failed creating 'powered' sysfs node\n");
+ psmouse_err(psmouse, "Failed creating 'powered' sysfs node\n");
return err;
}
err = device_create_file(&psmouse->ps2dev.serio->dev,
&psmouse_attr_hgpk_mode.dattr);
if (err) {
- hgpk_err(psmouse, "Failed creating 'hgpk_mode' sysfs node\n");
+ psmouse_err(psmouse,
+ "Failed creating 'hgpk_mode' sysfs node\n");
goto err_remove_powered;
}
@@ -963,8 +965,8 @@ static int hgpk_register(struct psmouse *psmouse)
err = device_create_file(&psmouse->ps2dev.serio->dev,
&psmouse_attr_recalibrate.dattr);
if (err) {
- hgpk_err(psmouse,
- "Failed creating 'recalibrate' sysfs node\n");
+ psmouse_err(psmouse,
+ "Failed creating 'recalibrate' sysfs node\n");
goto err_remove_mode;
}
}
@@ -1027,13 +1029,13 @@ static enum hgpk_model_t hgpk_get_model(struct psmouse *psmouse)
return -EIO;
}
- hgpk_dbg(psmouse, "ID: %02x %02x %02x\n", param[0], param[1], param[2]);
+ psmouse_dbg(psmouse, "ID: %02x %02x %02x\n", param[0], param[1], param[2]);
/* HGPK signature: 0x67, 0x00, 0x<model> */
if (param[0] != 0x67 || param[1] != 0x00)
return -ENODEV;
- hgpk_info(psmouse, "OLPC touchpad revision 0x%x\n", param[2]);
+ psmouse_info(psmouse, "OLPC touchpad revision 0x%x\n", param[2]);
return param[2];
}
diff --git a/drivers/input/mouse/hgpk.h b/drivers/input/mouse/hgpk.h
index 311c0e87fcb..dd686771cfe 100644
--- a/drivers/input/mouse/hgpk.h
+++ b/drivers/input/mouse/hgpk.h
@@ -46,17 +46,6 @@ struct hgpk_data {
int xsaw_secondary, ysaw_secondary; /* jumpiness detection */
};
-#define hgpk_dbg(psmouse, format, arg...) \
- dev_dbg(&(psmouse)->ps2dev.serio->dev, format, ## arg)
-#define hgpk_err(psmouse, format, arg...) \
- dev_err(&(psmouse)->ps2dev.serio->dev, format, ## arg)
-#define hgpk_info(psmouse, format, arg...) \
- dev_info(&(psmouse)->ps2dev.serio->dev, format, ## arg)
-#define hgpk_warn(psmouse, format, arg...) \
- dev_warn(&(psmouse)->ps2dev.serio->dev, format, ## arg)
-#define hgpk_notice(psmouse, format, arg...) \
- dev_notice(&(psmouse)->ps2dev.serio->dev, format, ## arg)
-
#ifdef CONFIG_MOUSE_PS2_OLPC
void hgpk_module_init(void);
int hgpk_detect(struct psmouse *psmouse, bool set_properties);
diff --git a/drivers/input/mouse/lifebook.c b/drivers/input/mouse/lifebook.c
index 83bcaba96b8..2c4db636de6 100644
--- a/drivers/input/mouse/lifebook.c
+++ b/drivers/input/mouse/lifebook.c
@@ -169,8 +169,8 @@ static psmouse_ret_t lifebook_process_byte(struct psmouse *psmouse)
if (relative_packet) {
if (!dev2)
- printk(KERN_WARNING "lifebook.c: got relative packet "
- "but no relative device set up\n");
+ psmouse_warn(psmouse,
+ "got relative packet but no relative device set up\n");
} else {
if (lifebook_use_6byte_proto) {
input_report_abs(dev1, ABS_X,
@@ -212,7 +212,7 @@ static int lifebook_absolute_mode(struct psmouse *psmouse)
/*
* Enable absolute output -- ps2_command fails always but if
- * you leave this call out the touchsreen will never send
+ * you leave this call out the touchscreen will never send
* absolute coordinates
*/
param = lifebook_use_6byte_proto ? 0x08 : 0x07;
diff --git a/drivers/input/mouse/logips2pp.c b/drivers/input/mouse/logips2pp.c
index c9983aee908..faac2c3bef7 100644
--- a/drivers/input/mouse/logips2pp.c
+++ b/drivers/input/mouse/logips2pp.c
@@ -82,11 +82,11 @@ static psmouse_ret_t ps2pp_process_byte(struct psmouse *psmouse)
packet[0] = packet[2] | 0x08;
break;
-#ifdef DEBUG
default:
- printk(KERN_WARNING "psmouse.c: Received PS2++ packet #%x, but don't know how to handle.\n",
- (packet[1] >> 4) | (packet[0] & 0x30));
-#endif
+ psmouse_dbg(psmouse,
+ "Received PS2++ packet #%x, but don't know how to handle.\n",
+ (packet[1] >> 4) | (packet[0] & 0x30));
+ break;
}
} else {
/* Standard PS/2 motion data */
@@ -382,7 +382,7 @@ int ps2pp_init(struct psmouse *psmouse, bool set_properties)
}
} else {
- printk(KERN_WARNING "logips2pp: Detected unknown logitech mouse model %d\n", model);
+ psmouse_warn(psmouse, "Detected unknown Logitech mouse model %d\n", model);
}
if (set_properties) {
@@ -400,9 +400,9 @@ int ps2pp_init(struct psmouse *psmouse, bool set_properties)
error = device_create_file(&psmouse->ps2dev.serio->dev,
&psmouse_attr_smartscroll.dattr);
if (error) {
- printk(KERN_ERR
- "logips2pp.c: failed to create smartscroll "
- "sysfs attribute, error: %d\n", error);
+ psmouse_err(psmouse,
+ "failed to create smartscroll sysfs attribute, error: %d\n",
+ error);
return -1;
}
}
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
index 3f74baee102..9f352fbd7b4 100644
--- a/drivers/input/mouse/psmouse-base.c
+++ b/drivers/input/mouse/psmouse-base.c
@@ -11,6 +11,9 @@
* the Free Software Foundation.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#define psmouse_fmt(fmt) fmt
+
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -251,11 +254,14 @@ static int psmouse_handle_byte(struct psmouse *psmouse)
switch (rc) {
case PSMOUSE_BAD_DATA:
if (psmouse->state == PSMOUSE_ACTIVATED) {
- printk(KERN_WARNING "psmouse.c: %s at %s lost sync at byte %d\n",
- psmouse->name, psmouse->phys, psmouse->pktcnt);
+ psmouse_warn(psmouse,
+ "%s at %s lost sync at byte %d\n",
+ psmouse->name, psmouse->phys,
+ psmouse->pktcnt);
if (++psmouse->out_of_sync_cnt == psmouse->resetafter) {
__psmouse_set_state(psmouse, PSMOUSE_IGNORE);
- printk(KERN_NOTICE "psmouse.c: issuing reconnect request\n");
+ psmouse_notice(psmouse,
+ "issuing reconnect request\n");
serio_reconnect(psmouse->ps2dev.serio);
return -1;
}
@@ -267,8 +273,9 @@ static int psmouse_handle_byte(struct psmouse *psmouse)
psmouse->pktcnt = 0;
if (psmouse->out_of_sync_cnt) {
psmouse->out_of_sync_cnt = 0;
- printk(KERN_NOTICE "psmouse.c: %s at %s - driver resynched.\n",
- psmouse->name, psmouse->phys);
+ psmouse_notice(psmouse,
+ "%s at %s - driver resynced.\n",
+ psmouse->name, psmouse->phys);
}
break;
@@ -295,9 +302,10 @@ static irqreturn_t psmouse_interrupt(struct serio *serio,
((flags & SERIO_PARITY) && !psmouse->ignore_parity))) {
if (psmouse->state == PSMOUSE_ACTIVATED)
- printk(KERN_WARNING "psmouse.c: bad data from KBC -%s%s\n",
- flags & SERIO_TIMEOUT ? " timeout" : "",
- flags & SERIO_PARITY ? " bad parity" : "");
+ psmouse_warn(psmouse,
+ "bad data from KBC -%s%s\n",
+ flags & SERIO_TIMEOUT ? " timeout" : "",
+ flags & SERIO_PARITY ? " bad parity" : "");
ps2_cmd_aborted(&psmouse->ps2dev);
goto out;
}
@@ -315,8 +323,8 @@ static irqreturn_t psmouse_interrupt(struct serio *serio,
if (psmouse->state == PSMOUSE_ACTIVATED &&
psmouse->pktcnt && time_after(jiffies, psmouse->last + HZ/2)) {
- printk(KERN_INFO "psmouse.c: %s at %s lost synchronization, throwing %d bytes away.\n",
- psmouse->name, psmouse->phys, psmouse->pktcnt);
+ psmouse_info(psmouse, "%s at %s lost synchronization, throwing %d bytes away.\n",
+ psmouse->name, psmouse->phys, psmouse->pktcnt);
psmouse->badbyte = psmouse->packet[0];
__psmouse_set_state(psmouse, PSMOUSE_RESYNCING);
psmouse_queue_work(psmouse, &psmouse->resync_work, 0);
@@ -943,7 +951,8 @@ static int psmouse_probe(struct psmouse *psmouse)
*/
if (ps2_command(ps2dev, NULL, PSMOUSE_CMD_RESET_DIS))
- printk(KERN_WARNING "psmouse.c: Failed to reset mouse on %s\n", ps2dev->serio->phys);
+ psmouse_warn(psmouse, "Failed to reset mouse on %s\n",
+ ps2dev->serio->phys);
return 0;
}
@@ -1005,8 +1014,8 @@ static void psmouse_initialize(struct psmouse *psmouse)
static void psmouse_activate(struct psmouse *psmouse)
{
if (ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_ENABLE))
- printk(KERN_WARNING "psmouse.c: Failed to enable mouse on %s\n",
- psmouse->ps2dev.serio->phys);
+ psmouse_warn(psmouse, "Failed to enable mouse on %s\n",
+ psmouse->ps2dev.serio->phys);
psmouse_set_state(psmouse, PSMOUSE_ACTIVATED);
}
@@ -1020,14 +1029,14 @@ static void psmouse_activate(struct psmouse *psmouse)
static void psmouse_deactivate(struct psmouse *psmouse)
{
if (ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_DISABLE))
- printk(KERN_WARNING "psmouse.c: Failed to deactivate mouse on %s\n",
- psmouse->ps2dev.serio->phys);
+ psmouse_warn(psmouse, "Failed to deactivate mouse on %s\n",
+ psmouse->ps2dev.serio->phys);
psmouse_set_state(psmouse, PSMOUSE_CMD_MODE);
}
/*
- * psmouse_poll() - default poll hanlder. Everyone except for ALPS uses it.
+ * psmouse_poll() - default poll handler. Everyone except for ALPS uses it.
*/
static int psmouse_poll(struct psmouse *psmouse)
@@ -1115,14 +1124,15 @@ static void psmouse_resync(struct work_struct *work)
}
if (!enabled) {
- printk(KERN_WARNING "psmouse.c: failed to re-enable mouse on %s\n",
- psmouse->ps2dev.serio->phys);
+ psmouse_warn(psmouse, "failed to re-enable mouse on %s\n",
+ psmouse->ps2dev.serio->phys);
failed = true;
}
if (failed) {
psmouse_set_state(psmouse, PSMOUSE_IGNORE);
- printk(KERN_INFO "psmouse.c: resync failed, issuing reconnect request\n");
+ psmouse_info(psmouse,
+ "resync failed, issuing reconnect request\n");
serio_reconnect(serio);
} else
psmouse_set_state(psmouse, PSMOUSE_ACTIVATED);
@@ -1155,8 +1165,8 @@ static void psmouse_cleanup(struct serio *serio)
* Disable stream mode so cleanup routine can proceed undisturbed.
*/
if (ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_DISABLE))
- printk(KERN_WARNING "psmouse.c: Failed to disable mouse on %s\n",
- psmouse->ps2dev.serio->phys);
+ psmouse_warn(psmouse, "Failed to disable mouse on %s\n",
+ psmouse->ps2dev.serio->phys);
if (psmouse->cleanup)
psmouse->cleanup(psmouse);
@@ -1400,7 +1410,8 @@ static int psmouse_reconnect(struct serio *serio)
int rc = -1;
if (!drv || !psmouse) {
- printk(KERN_DEBUG "psmouse: reconnect request, but serio is disconnected, ignoring...\n");
+ psmouse_dbg(psmouse,
+ "reconnect request, but serio is disconnected, ignoring...\n");
return -1;
}
@@ -1427,8 +1438,9 @@ static int psmouse_reconnect(struct serio *serio)
goto out;
}
- /* ok, the device type (and capabilities) match the old one,
- * we can continue using it, complete intialization
+ /*
+ * OK, the device type (and capabilities) match the old one,
+ * we can continue using it, complete initialization
*/
psmouse_set_state(psmouse, PSMOUSE_CMD_MODE);
@@ -1586,9 +1598,8 @@ static ssize_t psmouse_attr_set_protocol(struct psmouse *psmouse, void *data, co
while (!list_empty(&serio->children)) {
if (++retry > 3) {
- printk(KERN_WARNING
- "psmouse: failed to destroy children ports, "
- "protocol change aborted.\n");
+ psmouse_warn(psmouse,
+ "failed to destroy children ports, protocol change aborted.\n");
input_free_device(new_dev);
return -EIO;
}
@@ -1715,7 +1726,7 @@ static int __init psmouse_init(void)
kpsmoused_wq = create_singlethread_workqueue("kpsmoused");
if (!kpsmoused_wq) {
- printk(KERN_ERR "psmouse: failed to create kpsmoused workqueue\n");
+ pr_err("failed to create kpsmoused workqueue\n");
return -ENOMEM;
}
diff --git a/drivers/input/mouse/psmouse.h b/drivers/input/mouse/psmouse.h
index 593e910bfc7..9b84b0c4e37 100644
--- a/drivers/input/mouse/psmouse.h
+++ b/drivers/input/mouse/psmouse.h
@@ -150,4 +150,29 @@ static struct psmouse_attribute psmouse_attr_##_name = { \
static ssize_t _set(struct psmouse *, void *, const char *, size_t); \
__PSMOUSE_DEFINE_ATTR_VAR(_name, _mode, _data, NULL, _set, true)
+#ifndef psmouse_fmt
+#define psmouse_fmt(fmt) KBUILD_BASENAME ": " fmt
+#endif
+
+#define psmouse_dbg(psmouse, format, ...) \
+ dev_dbg(&(psmouse)->ps2dev.serio->dev, \
+ psmouse_fmt(format), ##__VA_ARGS__)
+#define psmouse_info(psmouse, format, ...) \
+ dev_info(&(psmouse)->ps2dev.serio->dev, \
+ psmouse_fmt(format), ##__VA_ARGS__)
+#define psmouse_warn(psmouse, format, ...) \
+ dev_warn(&(psmouse)->ps2dev.serio->dev, \
+ psmouse_fmt(format), ##__VA_ARGS__)
+#define psmouse_err(psmouse, format, ...) \
+ dev_err(&(psmouse)->ps2dev.serio->dev, \
+ psmouse_fmt(format), ##__VA_ARGS__)
+#define psmouse_notice(psmouse, format, ...) \
+ dev_notice(&(psmouse)->ps2dev.serio->dev, \
+ psmouse_fmt(format), ##__VA_ARGS__)
+#define psmouse_printk(level, psmouse, format, ...) \
+ dev_printk(level, \
+ &(psmouse)->ps2dev.serio->dev, \
+ psmouse_fmt(format), ##__VA_ARGS__)
+
+
#endif /* _PSMOUSE_H */
diff --git a/drivers/input/mouse/pxa930_trkball.c b/drivers/input/mouse/pxa930_trkball.c
index 6c5d84fcdea..ee3b0ca9d59 100644
--- a/drivers/input/mouse/pxa930_trkball.c
+++ b/drivers/input/mouse/pxa930_trkball.c
@@ -183,7 +183,7 @@ static int __devinit pxa930_trkball_probe(struct platform_device *pdev)
/* held the module in reset, will be enabled in open() */
pxa930_trkball_disable(trkball);
- error = request_irq(irq, pxa930_trkball_interrupt, IRQF_DISABLED,
+ error = request_irq(irq, pxa930_trkball_interrupt, 0,
pdev->name, trkball);
if (error) {
dev_err(&pdev->dev, "failed to request irq: %d\n", error);
diff --git a/drivers/input/mouse/sentelic.c b/drivers/input/mouse/sentelic.c
index 2fc887a5106..c5b12d2e955 100644
--- a/drivers/input/mouse/sentelic.c
+++ b/drivers/input/mouse/sentelic.c
@@ -607,11 +607,12 @@ static void fsp_packet_debug(unsigned char packet[])
ps2_packet_cnt++;
jiffies_msec = jiffies_to_msecs(jiffies);
- printk(KERN_DEBUG "%08dms PS/2 packets: %02x, %02x, %02x, %02x\n",
- jiffies_msec, packet[0], packet[1], packet[2], packet[3]);
+ psmouse_dbg(psmouse,
+ "%08dms PS/2 packets: %02x, %02x, %02x, %02x\n",
+ jiffies_msec, packet[0], packet[1], packet[2], packet[3]);
if (jiffies_msec - ps2_last_second > 1000) {
- printk(KERN_DEBUG "PS/2 packets/sec = %d\n", ps2_packet_cnt);
+ psmouse_dbg(psmouse, "PS/2 packets/sec = %d\n", ps2_packet_cnt);
ps2_packet_cnt = 0;
ps2_last_second = jiffies_msec;
}
@@ -820,9 +821,9 @@ int fsp_init(struct psmouse *psmouse)
return -ENODEV;
}
- printk(KERN_INFO
- "Finger Sensing Pad, hw: %d.%d.%d, sw: %s, buttons: %d\n",
- ver >> 4, ver & 0x0F, rev, fsp_drv_ver, buttons & 7);
+ psmouse_info(psmouse,
+ "Finger Sensing Pad, hw: %d.%d.%d, sw: %s, buttons: %d\n",
+ ver >> 4, ver & 0x0F, rev, fsp_drv_ver, buttons & 7);
psmouse->private = priv = kzalloc(sizeof(struct fsp_data), GFP_KERNEL);
if (!priv)
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 5538fc657af..c080b828e5d 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -44,6 +44,16 @@
#define YMIN_NOMINAL 1408
#define YMAX_NOMINAL 4448
+/*
+ * Synaptics touchpads report the y coordinate from bottom to top, which is
+ * opposite from what userspace expects.
+ * This function is used to invert y before reporting.
+ */
+static int synaptics_invert_y(int y)
+{
+ return YMAX_NOMINAL + YMIN_NOMINAL - y;
+}
+
/*****************************************************************************
* Stuff we need even when we do not want native Synaptics support
@@ -157,8 +167,8 @@ static int synaptics_capability(struct psmouse *psmouse)
if (SYN_EXT_CAP_REQUESTS(priv->capabilities) >= 1) {
if (synaptics_send_cmd(psmouse, SYN_QUE_EXT_CAPAB, cap)) {
- printk(KERN_ERR "Synaptics claims to have extended capabilities,"
- " but I'm not able to read them.\n");
+ psmouse_warn(psmouse,
+ "device claims to have extended capabilities, but I'm not able to read them.\n");
} else {
priv->ext_cap = (cap[0] << 16) | (cap[1] << 8) | cap[2];
@@ -173,8 +183,8 @@ static int synaptics_capability(struct psmouse *psmouse)
if (SYN_EXT_CAP_REQUESTS(priv->capabilities) >= 4) {
if (synaptics_send_cmd(psmouse, SYN_QUE_EXT_CAPAB_0C, cap)) {
- printk(KERN_ERR "Synaptics claims to have extended capability 0x0c,"
- " but I'm not able to read it.\n");
+ psmouse_warn(psmouse,
+ "device claims to have extended capability 0x0c, but I'm not able to read it.\n");
} else {
priv->ext_cap_0c = (cap[0] << 16) | (cap[1] << 8) | cap[2];
}
@@ -222,8 +232,8 @@ static int synaptics_resolution(struct psmouse *psmouse)
if (SYN_EXT_CAP_REQUESTS(priv->capabilities) >= 5 &&
SYN_CAP_MAX_DIMENSIONS(priv->ext_cap_0c)) {
if (synaptics_send_cmd(psmouse, SYN_QUE_EXT_MAX_COORDS, resp)) {
- printk(KERN_ERR "Synaptics claims to have max coordinates"
- " query, but I'm not able to read it.\n");
+ psmouse_warn(psmouse,
+ "device claims to have max coordinates query, but I'm not able to read it.\n");
} else {
priv->x_max = (resp[0] << 5) | ((resp[1] & 0x0f) << 1);
priv->y_max = (resp[2] << 5) | ((resp[1] & 0xf0) >> 3);
@@ -233,8 +243,8 @@ static int synaptics_resolution(struct psmouse *psmouse)
if (SYN_EXT_CAP_REQUESTS(priv->capabilities) >= 7 &&
SYN_CAP_MIN_DIMENSIONS(priv->ext_cap_0c)) {
if (synaptics_send_cmd(psmouse, SYN_QUE_EXT_MIN_COORDS, resp)) {
- printk(KERN_ERR "Synaptics claims to have min coordinates"
- " query, but I'm not able to read it.\n");
+ psmouse_warn(psmouse,
+ "device claims to have min coordinates query, but I'm not able to read it.\n");
} else {
priv->x_min = (resp[0] << 5) | ((resp[1] & 0x0f) << 1);
priv->y_min = (resp[2] << 5) | ((resp[1] & 0xf0) >> 3);
@@ -294,7 +304,8 @@ static int synaptics_set_advanced_gesture_mode(struct psmouse *psmouse)
static unsigned char param = 0xc8;
struct synaptics_data *priv = psmouse->private;
- if (!SYN_CAP_ADV_GESTURE(priv->ext_cap_0c))
+ if (!(SYN_CAP_ADV_GESTURE(priv->ext_cap_0c) ||
+ SYN_CAP_IMAGE_SENSOR(priv->ext_cap_0c)))
return 0;
if (psmouse_sliced_command(psmouse, SYN_QUE_MODEL))
@@ -377,7 +388,8 @@ static void synaptics_pt_activate(struct psmouse *psmouse)
priv->mode &= ~SYN_BIT_FOUR_BYTE_CLIENT;
if (synaptics_mode_cmd(psmouse, priv->mode))
- printk(KERN_INFO "synaptics: failed to switch guest protocol\n");
+ psmouse_warn(psmouse,
+ "failed to switch guest protocol\n");
}
}
@@ -387,7 +399,8 @@ static void synaptics_pt_create(struct psmouse *psmouse)
serio = kzalloc(sizeof(struct serio), GFP_KERNEL);
if (!serio) {
- printk(KERN_ERR "synaptics: not enough memory to allocate pass-through port\n");
+ psmouse_err(psmouse,
+ "not enough memory for pass-through port\n");
return;
}
@@ -401,7 +414,8 @@ static void synaptics_pt_create(struct psmouse *psmouse)
psmouse->pt_activate = synaptics_pt_activate;
- printk(KERN_INFO "serio: %s port at %s\n", serio->name, psmouse->phys);
+ psmouse_info(psmouse, "serio: %s port at %s\n",
+ serio->name, psmouse->phys);
serio_register_port(serio);
}
@@ -409,6 +423,44 @@ static void synaptics_pt_create(struct psmouse *psmouse)
* Functions to interpret the absolute mode packets
****************************************************************************/
+static void synaptics_mt_state_set(struct synaptics_mt_state *state, int count,
+ int sgm, int agm)
+{
+ state->count = count;
+ state->sgm = sgm;
+ state->agm = agm;
+}
+
+static void synaptics_parse_agm(const unsigned char buf[],
+ struct synaptics_data *priv,
+ struct synaptics_hw_state *hw)
+{
+ struct synaptics_hw_state *agm = &priv->agm;
+ int agm_packet_type;
+
+ agm_packet_type = (buf[5] & 0x30) >> 4;
+ switch (agm_packet_type) {
+ case 1:
+ /* Gesture packet: (x, y, z) half resolution */
+ agm->w = hw->w;
+ agm->x = (((buf[4] & 0x0f) << 8) | buf[1]) << 1;
+ agm->y = (((buf[4] & 0xf0) << 4) | buf[2]) << 1;
+ agm->z = ((buf[3] & 0x30) | (buf[5] & 0x0f)) << 1;
+ break;
+
+ case 2:
+ /* AGM-CONTACT packet: (count, sgm, agm) */
+ synaptics_mt_state_set(&agm->mt_state, buf[1], buf[2], buf[4]);
+ break;
+
+ default:
+ break;
+ }
+
+ /* Record that at least one AGM has been received since last SGM */
+ priv->agm_pending = true;
+}
+
static int synaptics_parse_hw_state(const unsigned char buf[],
struct synaptics_data *priv,
struct synaptics_hw_state *hw)
@@ -442,11 +494,10 @@ static int synaptics_parse_hw_state(const unsigned char buf[],
hw->down = ((buf[0] ^ buf[3]) & 0x02) ? 1 : 0;
}
- if (SYN_CAP_ADV_GESTURE(priv->ext_cap_0c) && hw->w == 2) {
- /* Gesture packet: (x, y, z) at half resolution */
- priv->mt.x = (((buf[4] & 0x0f) << 8) | buf[1]) << 1;
- priv->mt.y = (((buf[4] & 0xf0) << 4) | buf[2]) << 1;
- priv->mt.z = ((buf[3] & 0x30) | (buf[5] & 0x0f)) << 1;
+ if ((SYN_CAP_ADV_GESTURE(priv->ext_cap_0c) ||
+ SYN_CAP_IMAGE_SENSOR(priv->ext_cap_0c)) &&
+ hw->w == 2) {
+ synaptics_parse_agm(buf, priv, hw);
return 1;
}
@@ -502,8 +553,7 @@ static void synaptics_report_semi_mt_slot(struct input_dev *dev, int slot,
input_mt_report_slot_state(dev, MT_TOOL_FINGER, active);
if (active) {
input_report_abs(dev, ABS_MT_POSITION_X, x);
- input_report_abs(dev, ABS_MT_POSITION_Y,
- YMAX_NOMINAL + YMIN_NOMINAL - y);
+ input_report_abs(dev, ABS_MT_POSITION_Y, synaptics_invert_y(y));
}
}
@@ -526,6 +576,388 @@ static void synaptics_report_semi_mt_data(struct input_dev *dev,
}
}
+static void synaptics_report_buttons(struct psmouse *psmouse,
+ const struct synaptics_hw_state *hw)
+{
+ struct input_dev *dev = psmouse->dev;
+ struct synaptics_data *priv = psmouse->private;
+ int i;
+
+ input_report_key(dev, BTN_LEFT, hw->left);
+ input_report_key(dev, BTN_RIGHT, hw->right);
+
+ if (SYN_CAP_MIDDLE_BUTTON(priv->capabilities))
+ input_report_key(dev, BTN_MIDDLE, hw->middle);
+
+ if (SYN_CAP_FOUR_BUTTON(priv->capabilities)) {
+ input_report_key(dev, BTN_FORWARD, hw->up);
+ input_report_key(dev, BTN_BACK, hw->down);
+ }
+
+ for (i = 0; i < SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap); i++)
+ input_report_key(dev, BTN_0 + i, hw->ext_buttons & (1 << i));
+}
+
+static void synaptics_report_slot(struct input_dev *dev, int slot,
+ const struct synaptics_hw_state *hw)
+{
+ input_mt_slot(dev, slot);
+ input_mt_report_slot_state(dev, MT_TOOL_FINGER, (hw != NULL));
+ if (!hw)
+ return;
+
+ input_report_abs(dev, ABS_MT_POSITION_X, hw->x);
+ input_report_abs(dev, ABS_MT_POSITION_Y, synaptics_invert_y(hw->y));
+ input_report_abs(dev, ABS_MT_PRESSURE, hw->z);
+}
+
+static void synaptics_report_mt_data(struct psmouse *psmouse,
+ struct synaptics_mt_state *mt_state,
+ const struct synaptics_hw_state *sgm)
+{
+ struct input_dev *dev = psmouse->dev;
+ struct synaptics_data *priv = psmouse->private;
+ struct synaptics_hw_state *agm = &priv->agm;
+ struct synaptics_mt_state *old = &priv->mt_state;
+
+ switch (mt_state->count) {
+ case 0:
+ synaptics_report_slot(dev, 0, NULL);
+ synaptics_report_slot(dev, 1, NULL);
+ break;
+ case 1:
+ if (mt_state->sgm == -1) {
+ synaptics_report_slot(dev, 0, NULL);
+ synaptics_report_slot(dev, 1, NULL);
+ } else if (mt_state->sgm == 0) {
+ synaptics_report_slot(dev, 0, sgm);
+ synaptics_report_slot(dev, 1, NULL);
+ } else {
+ synaptics_report_slot(dev, 0, NULL);
+ synaptics_report_slot(dev, 1, sgm);
+ }
+ break;
+ default:
+ /*
+ * If the finger slot contained in SGM is valid, and either
+ * hasn't changed, or is new, then report SGM in MTB slot 0.
+ * Otherwise, empty MTB slot 0.
+ */
+ if (mt_state->sgm != -1 &&
+ (mt_state->sgm == old->sgm || old->sgm == -1))
+ synaptics_report_slot(dev, 0, sgm);
+ else
+ synaptics_report_slot(dev, 0, NULL);
+
+ /*
+ * If the finger slot contained in AGM is valid, and either
+ * hasn't changed, or is new, then report AGM in MTB slot 1.
+ * Otherwise, empty MTB slot 1.
+ */
+ if (mt_state->agm != -1 &&
+ (mt_state->agm == old->agm || old->agm == -1))
+ synaptics_report_slot(dev, 1, agm);
+ else
+ synaptics_report_slot(dev, 1, NULL);
+ break;
+ }
+
+ /* Don't use active slot count to generate BTN_TOOL events. */
+ input_mt_report_pointer_emulation(dev, false);
+
+ /* Send the number of fingers reported by touchpad itself. */
+ input_mt_report_finger_count(dev, mt_state->count);
+
+ synaptics_report_buttons(psmouse, sgm);
+
+ input_sync(dev);
+}
+
+/* Handle case where mt_state->count = 0 */
+static void synaptics_image_sensor_0f(struct synaptics_data *priv,
+ struct synaptics_mt_state *mt_state)
+{
+ synaptics_mt_state_set(mt_state, 0, -1, -1);
+ priv->mt_state_lost = false;
+}
+
+/* Handle case where mt_state->count = 1 */
+static void synaptics_image_sensor_1f(struct synaptics_data *priv,
+ struct synaptics_mt_state *mt_state)
+{
+ struct synaptics_hw_state *agm = &priv->agm;
+ struct synaptics_mt_state *old = &priv->mt_state;
+
+ /*
+ * If the last AGM was (0,0,0), and there is only one finger left,
+ * then we absolutely know that SGM contains slot 0, and all other
+ * fingers have been removed.
+ */
+ if (priv->agm_pending && agm->z == 0) {
+ synaptics_mt_state_set(mt_state, 1, 0, -1);
+ priv->mt_state_lost = false;
+ return;
+ }
+
+ switch (old->count) {
+ case 0:
+ synaptics_mt_state_set(mt_state, 1, 0, -1);
+ break;
+ case 1:
+ /*
+ * If mt_state_lost, then the previous transition was 3->1,
+ * and SGM now contains either slot 0 or 1, but we don't know
+ * which. So, we just assume that the SGM now contains slot 1.
+ *
+ * If pending AGM and either:
+ * (a) the previous SGM slot contains slot 0, or
+ * (b) there was no SGM slot
+ * then, the SGM now contains slot 1
+ *
+ * Case (a) happens with very rapid "drum roll" gestures, where
+ * slot 0 finger is lifted and a new slot 1 finger touches
+ * within one reporting interval.
+ *
+ * Case (b) happens if initially two or more fingers tap
+ * briefly, and all but one lift before the end of the first
+ * reporting interval.
+ *
+ * (In both these cases, slot 0 will becomes empty, so SGM
+ * contains slot 1 with the new finger)
+ *
+ * Else, if there was no previous SGM, it now contains slot 0.
+ *
+ * Otherwise, SGM still contains the same slot.
+ */
+ if (priv->mt_state_lost ||
+ (priv->agm_pending && old->sgm <= 0))
+ synaptics_mt_state_set(mt_state, 1, 1, -1);
+ else if (old->sgm == -1)
+ synaptics_mt_state_set(mt_state, 1, 0, -1);
+ break;
+ case 2:
+ /*
+ * If mt_state_lost, we don't know which finger SGM contains.
+ *
+ * So, report 1 finger, but with both slots empty.
+ * We will use slot 1 on subsequent 1->1
+ */
+ if (priv->mt_state_lost) {
+ synaptics_mt_state_set(mt_state, 1, -1, -1);
+ break;
+ }
+ /*
+ * Since the last AGM was NOT (0,0,0), it was the finger in
+ * slot 0 that has been removed.
+ * So, SGM now contains previous AGM's slot, and AGM is now
+ * empty.
+ */
+ synaptics_mt_state_set(mt_state, 1, old->agm, -1);
+ break;
+ case 3:
+ /*
+ * Since last AGM was not (0,0,0), we don't know which finger
+ * is left.
+ *
+ * So, report 1 finger, but with both slots empty.
+ * We will use slot 1 on subsequent 1->1
+ */
+ synaptics_mt_state_set(mt_state, 1, -1, -1);
+ priv->mt_state_lost = true;
+ break;
+ case 4:
+ case 5:
+ /* mt_state was updated by AGM-CONTACT packet */
+ break;
+ }
+}
+
+/* Handle case where mt_state->count = 2 */
+static void synaptics_image_sensor_2f(struct synaptics_data *priv,
+ struct synaptics_mt_state *mt_state)
+{
+ struct synaptics_mt_state *old = &priv->mt_state;
+
+ switch (old->count) {
+ case 0:
+ synaptics_mt_state_set(mt_state, 2, 0, 1);
+ break;
+ case 1:
+ /*
+ * If previous SGM contained slot 1 or higher, SGM now contains
+ * slot 0 (the newly touching finger) and AGM contains SGM's
+ * previous slot.
+ *
+ * Otherwise, SGM still contains slot 0 and AGM now contains
+ * slot 1.
+ */
+ if (old->sgm >= 1)
+ synaptics_mt_state_set(mt_state, 2, 0, old->sgm);
+ else
+ synaptics_mt_state_set(mt_state, 2, 0, 1);
+ break;
+ case 2:
+ /*
+ * If mt_state_lost, SGM now contains either finger 1 or 2, but
+ * we don't know which.
+ * So, we just assume that the SGM contains slot 0 and AGM 1.
+ */
+ if (priv->mt_state_lost)
+ synaptics_mt_state_set(mt_state, 2, 0, 1);
+ /*
+ * Otherwise, use the same mt_state, since it either hasn't
+ * changed, or was updated by a recently received AGM-CONTACT
+ * packet.
+ */
+ break;
+ case 3:
+ /*
+ * 3->2 transitions have two unsolvable problems:
+ * 1) no indication is given which finger was removed
+ * 2) no way to tell if agm packet was for finger 3
+ * before 3->2, or finger 2 after 3->2.
+ *
+ * So, report 2 fingers, but empty all slots.
+ * We will guess slots [0,1] on subsequent 2->2.
+ */
+ synaptics_mt_state_set(mt_state, 2, -1, -1);
+ priv->mt_state_lost = true;
+ break;
+ case 4:
+ case 5:
+ /* mt_state was updated by AGM-CONTACT packet */
+ break;
+ }
+}
+
+/* Handle case where mt_state->count = 3 */
+static void synaptics_image_sensor_3f(struct synaptics_data *priv,
+ struct synaptics_mt_state *mt_state)
+{
+ struct synaptics_mt_state *old = &priv->mt_state;
+
+ switch (old->count) {
+ case 0:
+ synaptics_mt_state_set(mt_state, 3, 0, 2);
+ break;
+ case 1:
+ /*
+ * If previous SGM contained slot 2 or higher, SGM now contains
+ * slot 0 (one of the newly touching fingers) and AGM contains
+ * SGM's previous slot.
+ *
+ * Otherwise, SGM now contains slot 0 and AGM contains slot 2.
+ */
+ if (old->sgm >= 2)
+ synaptics_mt_state_set(mt_state, 3, 0, old->sgm);
+ else
+ synaptics_mt_state_set(mt_state, 3, 0, 2);
+ break;
+ case 2:
+ /*
+ * If the AGM previously contained slot 3 or higher, then the
+ * newly touching finger is in the lowest available slot.
+ *
+ * If SGM was previously 1 or higher, then the new SGM is
+ * now slot 0 (with a new finger), otherwise, the new finger
+ * is now in a hidden slot between 0 and AGM's slot.
+ *
+ * In all such cases, the SGM now contains slot 0, and the AGM
+ * continues to contain the same slot as before.
+ */
+ if (old->agm >= 3) {
+ synaptics_mt_state_set(mt_state, 3, 0, old->agm);
+ break;
+ }
+
+ /*
+ * After some 3->1 and all 3->2 transitions, we lose track
+ * of which slot is reported by SGM and AGM.
+ *
+ * For 2->3 in this state, report 3 fingers, but empty all
+ * slots, and we will guess (0,2) on a subsequent 0->3.
+ *
+ * To userspace, the resulting transition will look like:
+ * 2:[0,1] -> 3:[-1,-1] -> 3:[0,2]
+ */
+ if (priv->mt_state_lost) {
+ synaptics_mt_state_set(mt_state, 3, -1, -1);
+ break;
+ }
+
+ /*
+ * If the (SGM,AGM) really previously contained slots (0, 1),
+ * then we cannot know what slot was just reported by the AGM,
+ * because the 2->3 transition can occur either before or after
+ * the AGM packet. Thus, this most recent AGM could contain
+ * either the same old slot 1 or the new slot 2.
+ * Subsequent AGMs will be reporting slot 2.
+ *
+ * To userspace, the resulting transition will look like:
+ * 2:[0,1] -> 3:[0,-1] -> 3:[0,2]
+ */
+ synaptics_mt_state_set(mt_state, 3, 0, -1);
+ break;
+ case 3:
+ /*
+ * If, for whatever reason, the previous agm was invalid,
+ * Assume SGM now contains slot 0, AGM now contains slot 2.
+ */
+ if (old->agm <= 2)
+ synaptics_mt_state_set(mt_state, 3, 0, 2);
+ /*
+ * mt_state either hasn't changed, or was updated by a recently
+ * received AGM-CONTACT packet.
+ */
+ break;
+
+ case 4:
+ case 5:
+ /* mt_state was updated by AGM-CONTACT packet */
+ break;
+ }
+}
+
+/* Handle case where mt_state->count = 4, or = 5 */
+static void synaptics_image_sensor_45f(struct synaptics_data *priv,
+ struct synaptics_mt_state *mt_state)
+{
+ /* mt_state was updated correctly by AGM-CONTACT packet */
+ priv->mt_state_lost = false;
+}
+
+static void synaptics_image_sensor_process(struct psmouse *psmouse,
+ struct synaptics_hw_state *sgm)
+{
+ struct synaptics_data *priv = psmouse->private;
+ struct synaptics_hw_state *agm = &priv->agm;
+ struct synaptics_mt_state mt_state;
+
+ /* Initialize using current mt_state (as updated by last agm) */
+ mt_state = agm->mt_state;
+
+ /*
+ * Update mt_state using the new finger count and current mt_state.
+ */
+ if (sgm->z == 0)
+ synaptics_image_sensor_0f(priv, &mt_state);
+ else if (sgm->w >= 4)
+ synaptics_image_sensor_1f(priv, &mt_state);
+ else if (sgm->w == 0)
+ synaptics_image_sensor_2f(priv, &mt_state);
+ else if (sgm->w == 1 && mt_state.count <= 3)
+ synaptics_image_sensor_3f(priv, &mt_state);
+ else
+ synaptics_image_sensor_45f(priv, &mt_state);
+
+ /* Send resulting input events to user space */
+ synaptics_report_mt_data(psmouse, &mt_state, sgm);
+
+ /* Store updated mt_state */
+ priv->mt_state = agm->mt_state = mt_state;
+ priv->agm_pending = false;
+}
+
/*
* called for each full received packet from the touchpad
*/
@@ -536,11 +968,15 @@ static void synaptics_process_packet(struct psmouse *psmouse)
struct synaptics_hw_state hw;
int num_fingers;
int finger_width;
- int i;
if (synaptics_parse_hw_state(psmouse->packet, priv, &hw))
return;
+ if (SYN_CAP_IMAGE_SENSOR(priv->ext_cap_0c)) {
+ synaptics_image_sensor_process(psmouse, &hw);
+ return;
+ }
+
if (hw.scroll) {
priv->scroll += hw.scroll;
@@ -586,7 +1022,8 @@ static void synaptics_process_packet(struct psmouse *psmouse)
}
if (SYN_CAP_ADV_GESTURE(priv->ext_cap_0c))
- synaptics_report_semi_mt_data(dev, &hw, &priv->mt, num_fingers);
+ synaptics_report_semi_mt_data(dev, &hw, &priv->agm,
+ num_fingers);
/* Post events
* BTN_TOUCH has to be first as mousedev relies on it when doing
@@ -597,7 +1034,7 @@ static void synaptics_process_packet(struct psmouse *psmouse)
if (num_fingers > 0) {
input_report_abs(dev, ABS_X, hw.x);
- input_report_abs(dev, ABS_Y, YMAX_NOMINAL + YMIN_NOMINAL - hw.y);
+ input_report_abs(dev, ABS_Y, synaptics_invert_y(hw.y));
}
input_report_abs(dev, ABS_PRESSURE, hw.z);
@@ -605,35 +1042,25 @@ static void synaptics_process_packet(struct psmouse *psmouse)
input_report_abs(dev, ABS_TOOL_WIDTH, finger_width);
input_report_key(dev, BTN_TOOL_FINGER, num_fingers == 1);
- input_report_key(dev, BTN_LEFT, hw.left);
- input_report_key(dev, BTN_RIGHT, hw.right);
-
if (SYN_CAP_MULTIFINGER(priv->capabilities)) {
input_report_key(dev, BTN_TOOL_DOUBLETAP, num_fingers == 2);
input_report_key(dev, BTN_TOOL_TRIPLETAP, num_fingers == 3);
}
- if (SYN_CAP_MIDDLE_BUTTON(priv->capabilities))
- input_report_key(dev, BTN_MIDDLE, hw.middle);
-
- if (SYN_CAP_FOUR_BUTTON(priv->capabilities)) {
- input_report_key(dev, BTN_FORWARD, hw.up);
- input_report_key(dev, BTN_BACK, hw.down);
- }
-
- for (i = 0; i < SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap); i++)
- input_report_key(dev, BTN_0 + i, hw.ext_buttons & (1 << i));
+ synaptics_report_buttons(psmouse, &hw);
input_sync(dev);
}
-static int synaptics_validate_byte(unsigned char packet[], int idx, unsigned char pkt_type)
+static int synaptics_validate_byte(struct psmouse *psmouse,
+ int idx, unsigned char pkt_type)
{
static const unsigned char newabs_mask[] = { 0xC8, 0x00, 0x00, 0xC8, 0x00 };
static const unsigned char newabs_rel_mask[] = { 0xC0, 0x00, 0x00, 0xC0, 0x00 };
static const unsigned char newabs_rslt[] = { 0x80, 0x00, 0x00, 0xC0, 0x00 };
static const unsigned char oldabs_mask[] = { 0xC0, 0x60, 0x00, 0xC0, 0x60 };
static const unsigned char oldabs_rslt[] = { 0xC0, 0x00, 0x00, 0x80, 0x00 };
+ const char *packet = psmouse->packet;
if (idx < 0 || idx > 4)
return 0;
@@ -651,7 +1078,7 @@ static int synaptics_validate_byte(unsigned char packet[], int idx, unsigned cha
return (packet[idx] & oldabs_mask[idx]) == oldabs_rslt[idx];
default:
- printk(KERN_ERR "synaptics: unknown packet type %d\n", pkt_type);
+ psmouse_err(psmouse, "unknown packet type %d\n", pkt_type);
return 0;
}
}
@@ -661,8 +1088,8 @@ static unsigned char synaptics_detect_pkt_type(struct psmouse *psmouse)
int i;
for (i = 0; i < 5; i++)
- if (!synaptics_validate_byte(psmouse->packet, i, SYN_NEWABS_STRICT)) {
- printk(KERN_INFO "synaptics: using relaxed packet validation\n");
+ if (!synaptics_validate_byte(psmouse, i, SYN_NEWABS_STRICT)) {
+ psmouse_info(psmouse, "using relaxed packet validation\n");
return SYN_NEWABS_RELAXED;
}
@@ -687,46 +1114,56 @@ static psmouse_ret_t synaptics_process_byte(struct psmouse *psmouse)
return PSMOUSE_FULL_PACKET;
}
- return synaptics_validate_byte(psmouse->packet, psmouse->pktcnt - 1, priv->pkt_type) ?
+ return synaptics_validate_byte(psmouse, psmouse->pktcnt - 1, priv->pkt_type) ?
PSMOUSE_GOOD_DATA : PSMOUSE_BAD_DATA;
}
/*****************************************************************************
* Driver initialization/cleanup functions
****************************************************************************/
-static void set_input_params(struct input_dev *dev, struct synaptics_data *priv)
+static void set_abs_position_params(struct input_dev *dev,
+ struct synaptics_data *priv, int x_code,
+ int y_code)
{
- int i;
+ int x_min = priv->x_min ?: XMIN_NOMINAL;
+ int x_max = priv->x_max ?: XMAX_NOMINAL;
+ int y_min = priv->y_min ?: YMIN_NOMINAL;
+ int y_max = priv->y_max ?: YMAX_NOMINAL;
int fuzz = SYN_CAP_REDUCED_FILTERING(priv->ext_cap_0c) ?
SYN_REDUCED_FILTER_FUZZ : 0;
+ input_set_abs_params(dev, x_code, x_min, x_max, fuzz, 0);
+ input_set_abs_params(dev, y_code, y_min, y_max, fuzz, 0);
+ input_abs_set_res(dev, x_code, priv->x_res);
+ input_abs_set_res(dev, y_code, priv->y_res);
+}
+
+static void set_input_params(struct input_dev *dev, struct synaptics_data *priv)
+{
+ int i;
+
__set_bit(INPUT_PROP_POINTER, dev->propbit);
__set_bit(EV_ABS, dev->evbit);
- input_set_abs_params(dev, ABS_X,
- priv->x_min ?: XMIN_NOMINAL,
- priv->x_max ?: XMAX_NOMINAL,
- fuzz, 0);
- input_set_abs_params(dev, ABS_Y,
- priv->y_min ?: YMIN_NOMINAL,
- priv->y_max ?: YMAX_NOMINAL,
- fuzz, 0);
+ set_abs_position_params(dev, priv, ABS_X, ABS_Y);
input_set_abs_params(dev, ABS_PRESSURE, 0, 255, 0, 0);
- if (SYN_CAP_ADV_GESTURE(priv->ext_cap_0c)) {
+ if (SYN_CAP_IMAGE_SENSOR(priv->ext_cap_0c)) {
+ input_mt_init_slots(dev, 2);
+ set_abs_position_params(dev, priv, ABS_MT_POSITION_X,
+ ABS_MT_POSITION_Y);
+ /* Image sensors can report per-contact pressure */
+ input_set_abs_params(dev, ABS_MT_PRESSURE, 0, 255, 0, 0);
+
+ /* Image sensors can signal 4 and 5 finger clicks */
+ __set_bit(BTN_TOOL_QUADTAP, dev->keybit);
+ __set_bit(BTN_TOOL_QUINTTAP, dev->keybit);
+ } else if (SYN_CAP_ADV_GESTURE(priv->ext_cap_0c)) {
+ /* Non-image sensors with AGM use semi-mt */
__set_bit(INPUT_PROP_SEMI_MT, dev->propbit);
input_mt_init_slots(dev, 2);
- input_set_abs_params(dev, ABS_MT_POSITION_X,
- priv->x_min ?: XMIN_NOMINAL,
- priv->x_max ?: XMAX_NOMINAL,
- fuzz, 0);
- input_set_abs_params(dev, ABS_MT_POSITION_Y,
- priv->y_min ?: YMIN_NOMINAL,
- priv->y_max ?: YMAX_NOMINAL,
- fuzz, 0);
-
- input_abs_set_res(dev, ABS_MT_POSITION_X, priv->x_res);
- input_abs_set_res(dev, ABS_MT_POSITION_Y, priv->y_res);
+ set_abs_position_params(dev, priv, ABS_MT_POSITION_X,
+ ABS_MT_POSITION_Y);
}
if (SYN_CAP_PALMDETECT(priv->capabilities))
@@ -759,9 +1196,6 @@ static void set_input_params(struct input_dev *dev, struct synaptics_data *priv)
__clear_bit(REL_X, dev->relbit);
__clear_bit(REL_Y, dev->relbit);
- input_abs_set_res(dev, ABS_X, priv->x_res);
- input_abs_set_res(dev, ABS_Y, priv->y_res);
-
if (SYN_CAP_CLICKPAD(priv->ext_cap_0c)) {
__set_bit(INPUT_PROP_BUTTONPAD, dev->propbit);
/* Clickpads report only left button */
@@ -793,21 +1227,21 @@ static int synaptics_reconnect(struct psmouse *psmouse)
return -1;
if (retry > 1)
- printk(KERN_DEBUG "Synaptics reconnected after %d tries\n",
- retry);
+ psmouse_dbg(psmouse, "reconnected after %d tries\n", retry);
if (synaptics_query_hardware(psmouse)) {
- printk(KERN_ERR "Unable to query Synaptics hardware.\n");
+ psmouse_err(psmouse, "Unable to query device.\n");
return -1;
}
if (synaptics_set_absolute_mode(psmouse)) {
- printk(KERN_ERR "Unable to initialize Synaptics hardware.\n");
+ psmouse_err(psmouse, "Unable to initialize device.\n");
return -1;
}
if (synaptics_set_advanced_gesture_mode(psmouse)) {
- printk(KERN_ERR "Advanced gesture mode reconnect failed.\n");
+ psmouse_err(psmouse,
+ "Advanced gesture mode reconnect failed.\n");
return -1;
}
@@ -815,12 +1249,12 @@ static int synaptics_reconnect(struct psmouse *psmouse)
old_priv.model_id != priv->model_id ||
old_priv.capabilities != priv->capabilities ||
old_priv.ext_cap != priv->ext_cap) {
- printk(KERN_ERR "Synaptics hardware appears to be different: "
- "id(%ld-%ld), model(%ld-%ld), caps(%lx-%lx), ext(%lx-%lx).\n",
- old_priv.identity, priv->identity,
- old_priv.model_id, priv->model_id,
- old_priv.capabilities, priv->capabilities,
- old_priv.ext_cap, priv->ext_cap);
+ psmouse_err(psmouse,
+ "hardware appears to be different: id(%ld-%ld), model(%ld-%ld), caps(%lx-%lx), ext(%lx-%lx).\n",
+ old_priv.identity, priv->identity,
+ old_priv.model_id, priv->model_id,
+ old_priv.capabilities, priv->capabilities,
+ old_priv.ext_cap, priv->ext_cap);
return -1;
}
@@ -901,7 +1335,8 @@ int synaptics_init(struct psmouse *psmouse)
* just fine.
*/
if (broken_olpc_ec) {
- printk(KERN_INFO "synaptics: OLPC XO detected, not enabling Synaptics protocol.\n");
+ psmouse_info(psmouse,
+ "OLPC XO detected, not enabling Synaptics protocol.\n");
return -ENODEV;
}
@@ -912,26 +1347,28 @@ int synaptics_init(struct psmouse *psmouse)
psmouse_reset(psmouse);
if (synaptics_query_hardware(psmouse)) {
- printk(KERN_ERR "Unable to query Synaptics hardware.\n");
+ psmouse_err(psmouse, "Unable to query device.\n");
goto init_fail;
}
if (synaptics_set_absolute_mode(psmouse)) {
- printk(KERN_ERR "Unable to initialize Synaptics hardware.\n");
+ psmouse_err(psmouse, "Unable to initialize device.\n");
goto init_fail;
}
if (synaptics_set_advanced_gesture_mode(psmouse)) {
- printk(KERN_ERR "Advanced gesture mode init failed.\n");
+ psmouse_err(psmouse, "Advanced gesture mode init failed.\n");
goto init_fail;
}
priv->pkt_type = SYN_MODEL_NEWABS(priv->model_id) ? SYN_NEWABS : SYN_OLDABS;
- printk(KERN_INFO "Synaptics Touchpad, model: %ld, fw: %ld.%ld, id: %#lx, caps: %#lx/%#lx/%#lx\n",
- SYN_ID_MODEL(priv->identity),
- SYN_ID_MAJOR(priv->identity), SYN_ID_MINOR(priv->identity),
- priv->model_id, priv->capabilities, priv->ext_cap, priv->ext_cap_0c);
+ psmouse_info(psmouse,
+ "Touchpad model: %ld, fw: %ld.%ld, id: %#lx, caps: %#lx/%#lx/%#lx\n",
+ SYN_ID_MODEL(priv->identity),
+ SYN_ID_MAJOR(priv->identity), SYN_ID_MINOR(priv->identity),
+ priv->model_id,
+ priv->capabilities, priv->ext_cap, priv->ext_cap_0c);
set_input_params(psmouse->dev, priv);
@@ -963,8 +1400,9 @@ int synaptics_init(struct psmouse *psmouse)
* the same rate as a standard PS/2 mouse).
*/
if (psmouse->rate >= 80 && impaired_toshiba_kbc) {
- printk(KERN_INFO "synaptics: Toshiba %s detected, limiting rate to 40pps.\n",
- dmi_get_system_info(DMI_PRODUCT_NAME));
+ psmouse_info(psmouse,
+ "Toshiba %s detected, limiting rate to 40pps.\n",
+ dmi_get_system_info(DMI_PRODUCT_NAME));
psmouse->rate = 40;
}
diff --git a/drivers/input/mouse/synaptics.h b/drivers/input/mouse/synaptics.h
index ca040aa80fa..622aea8dd7e 100644
--- a/drivers/input/mouse/synaptics.h
+++ b/drivers/input/mouse/synaptics.h
@@ -74,6 +74,8 @@
* 2 0x04 reduced filtering firmware does less filtering on
* position data, driver should watch
* for noise.
+ * 2 0x08 image sensor image sensor tracks 5 fingers, but only
+ * reports 2.
* 2 0x20 report min query 0x0f gives min coord reported
*/
#define SYN_CAP_CLICKPAD(ex0c) ((ex0c) & 0x100000) /* 1-button ClickPad */
@@ -82,6 +84,7 @@
#define SYN_CAP_MIN_DIMENSIONS(ex0c) ((ex0c) & 0x002000)
#define SYN_CAP_ADV_GESTURE(ex0c) ((ex0c) & 0x080000)
#define SYN_CAP_REDUCED_FILTERING(ex0c) ((ex0c) & 0x000400)
+#define SYN_CAP_IMAGE_SENSOR(ex0c) ((ex0c) & 0x000800)
/* synaptics modes query bits */
#define SYN_MODE_ABSOLUTE(m) ((m) & (1 << 7))
@@ -112,9 +115,18 @@
#define SYN_REDUCED_FILTER_FUZZ 8
/*
- * A structure to describe the state of the touchpad hardware (buttons and pad)
+ * A structure to describe which internal touchpad finger slots are being
+ * reported in raw packets.
*/
+struct synaptics_mt_state {
+ int count; /* num fingers being tracked */
+ int sgm; /* which slot is reported by sgm pkt */
+ int agm; /* which slot is reported by agm pkt*/
+};
+/*
+ * A structure to describe the state of the touchpad hardware (buttons and pad)
+ */
struct synaptics_hw_state {
int x;
int y;
@@ -127,6 +139,9 @@ struct synaptics_hw_state {
unsigned int down:1;
unsigned char ext_buttons;
signed char scroll;
+
+ /* As reported in last AGM-CONTACT packets */
+ struct synaptics_mt_state mt_state;
};
struct synaptics_data {
@@ -146,7 +161,15 @@ struct synaptics_data {
struct serio *pt_port; /* Pass-through serio port */
- struct synaptics_hw_state mt; /* current gesture packet */
+ struct synaptics_mt_state mt_state; /* Current mt finger state */
+ bool mt_state_lost; /* mt_state may be incorrect */
+
+ /*
+ * Last received Advanced Gesture Mode (AGM) packet. An AGM packet
+ * contains position data for a second contact, at half resolution.
+ */
+ struct synaptics_hw_state agm;
+ bool agm_pending; /* new AGM packet received */
};
void synaptics_module_init(void);
diff --git a/drivers/input/mouse/synaptics_i2c.c b/drivers/input/mouse/synaptics_i2c.c
index cba3c84d2f2..4b755cb5b38 100644
--- a/drivers/input/mouse/synaptics_i2c.c
+++ b/drivers/input/mouse/synaptics_i2c.c
@@ -570,7 +570,7 @@ static int __devinit synaptics_i2c_probe(struct i2c_client *client,
"Requesting IRQ: %d\n", touch->client->irq);
ret = request_irq(touch->client->irq, synaptics_i2c_irq,
- IRQF_DISABLED|IRQ_TYPE_EDGE_FALLING,
+ IRQ_TYPE_EDGE_FALLING,
DRIVER_NAME, touch);
if (ret) {
dev_warn(&touch->client->dev,
@@ -619,7 +619,7 @@ static int __devexit synaptics_i2c_remove(struct i2c_client *client)
return 0;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int synaptics_i2c_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
diff --git a/drivers/input/serio/serio_raw.c b/drivers/input/serio/serio_raw.c
index b7ba4597f7f..4d4cd142bbb 100644
--- a/drivers/input/serio/serio_raw.c
+++ b/drivers/input/serio/serio_raw.c
@@ -9,6 +9,7 @@
* the Free Software Foundation.
*/
+#include <linux/kref.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/poll.h>
@@ -33,15 +34,16 @@ struct serio_raw {
unsigned int tail, head;
char name[16];
- unsigned int refcnt;
+ struct kref kref;
struct serio *serio;
struct miscdevice dev;
wait_queue_head_t wait;
- struct list_head list;
+ struct list_head client_list;
struct list_head node;
+ bool dead;
};
-struct serio_raw_list {
+struct serio_raw_client {
struct fasync_struct *fasync;
struct serio_raw *serio_raw;
struct list_head node;
@@ -49,7 +51,6 @@ struct serio_raw_list {
static DEFINE_MUTEX(serio_raw_mutex);
static LIST_HEAD(serio_raw_list);
-static unsigned int serio_raw_no;
/*********************************************************************
* Interface with userspace (file operations) *
@@ -57,9 +58,9 @@ static unsigned int serio_raw_no;
static int serio_raw_fasync(int fd, struct file *file, int on)
{
- struct serio_raw_list *list = file->private_data;
+ struct serio_raw_client *client = file->private_data;
- return fasync_helper(fd, file, on, &list->fasync);
+ return fasync_helper(fd, file, on, &client->fasync);
}
static struct serio_raw *serio_raw_locate(int minor)
@@ -77,8 +78,8 @@ static struct serio_raw *serio_raw_locate(int minor)
static int serio_raw_open(struct inode *inode, struct file *file)
{
struct serio_raw *serio_raw;
- struct serio_raw_list *list;
- int retval = 0;
+ struct serio_raw_client *client;
+ int retval;
retval = mutex_lock_interruptible(&serio_raw_mutex);
if (retval)
@@ -90,60 +91,61 @@ static int serio_raw_open(struct inode *inode, struct file *file)
goto out;
}
- if (!serio_raw->serio) {
+ if (serio_raw->dead) {
retval = -ENODEV;
goto out;
}
- list = kzalloc(sizeof(struct serio_raw_list), GFP_KERNEL);
- if (!list) {
+ client = kzalloc(sizeof(struct serio_raw_client), GFP_KERNEL);
+ if (!client) {
retval = -ENOMEM;
goto out;
}
- list->serio_raw = serio_raw;
- file->private_data = list;
+ client->serio_raw = serio_raw;
+ file->private_data = client;
+
+ kref_get(&serio_raw->kref);
- serio_raw->refcnt++;
- list_add_tail(&list->node, &serio_raw->list);
+ serio_pause_rx(serio_raw->serio);
+ list_add_tail(&client->node, &serio_raw->client_list);
+ serio_continue_rx(serio_raw->serio);
out:
mutex_unlock(&serio_raw_mutex);
return retval;
}
-static int serio_raw_cleanup(struct serio_raw *serio_raw)
+static void serio_raw_free(struct kref *kref)
{
- if (--serio_raw->refcnt == 0) {
- misc_deregister(&serio_raw->dev);
- list_del_init(&serio_raw->node);
- kfree(serio_raw);
+ struct serio_raw *serio_raw =
+ container_of(kref, struct serio_raw, kref);
- return 1;
- }
-
- return 0;
+ put_device(&serio_raw->serio->dev);
+ kfree(serio_raw);
}
static int serio_raw_release(struct inode *inode, struct file *file)
{
- struct serio_raw_list *list = file->private_data;
- struct serio_raw *serio_raw = list->serio_raw;
+ struct serio_raw_client *client = file->private_data;
+ struct serio_raw *serio_raw = client->serio_raw;
- mutex_lock(&serio_raw_mutex);
+ serio_pause_rx(serio_raw->serio);
+ list_del(&client->node);
+ serio_continue_rx(serio_raw->serio);
- serio_raw_cleanup(serio_raw);
+ kfree(client);
+
+ kref_put(&serio_raw->kref, serio_raw_free);
- mutex_unlock(&serio_raw_mutex);
return 0;
}
-static int serio_raw_fetch_byte(struct serio_raw *serio_raw, char *c)
+static bool serio_raw_fetch_byte(struct serio_raw *serio_raw, char *c)
{
- unsigned long flags;
- int empty;
+ bool empty;
- spin_lock_irqsave(&serio_raw->serio->lock, flags);
+ serio_pause_rx(serio_raw->serio);
empty = serio_raw->head == serio_raw->tail;
if (!empty) {
@@ -151,30 +153,31 @@ static int serio_raw_fetch_byte(struct serio_raw *serio_raw, char *c)
serio_raw->tail = (serio_raw->tail + 1) % SERIO_RAW_QUEUE_LEN;
}
- spin_unlock_irqrestore(&serio_raw->serio->lock, flags);
+ serio_continue_rx(serio_raw->serio);
return !empty;
}
-static ssize_t serio_raw_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos)
+static ssize_t serio_raw_read(struct file *file, char __user *buffer,
+ size_t count, loff_t *ppos)
{
- struct serio_raw_list *list = file->private_data;
- struct serio_raw *serio_raw = list->serio_raw;
+ struct serio_raw_client *client = file->private_data;
+ struct serio_raw *serio_raw = client->serio_raw;
char uninitialized_var(c);
ssize_t retval = 0;
- if (!serio_raw->serio)
+ if (serio_raw->dead)
return -ENODEV;
if (serio_raw->head == serio_raw->tail && (file->f_flags & O_NONBLOCK))
return -EAGAIN;
- retval = wait_event_interruptible(list->serio_raw->wait,
- serio_raw->head != serio_raw->tail || !serio_raw->serio);
+ retval = wait_event_interruptible(serio_raw->wait,
+ serio_raw->head != serio_raw->tail || serio_raw->dead);
if (retval)
return retval;
- if (!serio_raw->serio)
+ if (serio_raw->dead)
return -ENODEV;
while (retval < count && serio_raw_fetch_byte(serio_raw, &c)) {
@@ -186,9 +189,11 @@ static ssize_t serio_raw_read(struct file *file, char __user *buffer, size_t cou
return retval;
}
-static ssize_t serio_raw_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos)
+static ssize_t serio_raw_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos)
{
- struct serio_raw_list *list = file->private_data;
+ struct serio_raw_client *client = file->private_data;
+ struct serio_raw *serio_raw = client->serio_raw;
ssize_t written = 0;
int retval;
unsigned char c;
@@ -197,7 +202,7 @@ static ssize_t serio_raw_write(struct file *file, const char __user *buffer, siz
if (retval)
return retval;
- if (!list->serio_raw->serio) {
+ if (serio_raw->dead) {
retval = -ENODEV;
goto out;
}
@@ -210,7 +215,7 @@ static ssize_t serio_raw_write(struct file *file, const char __user *buffer, siz
retval = -EFAULT;
goto out;
}
- if (serio_write(list->serio_raw->serio, c)) {
+ if (serio_write(serio_raw->serio, c)) {
retval = -EIO;
goto out;
}
@@ -224,46 +229,49 @@ out:
static unsigned int serio_raw_poll(struct file *file, poll_table *wait)
{
- struct serio_raw_list *list = file->private_data;
+ struct serio_raw_client *client = file->private_data;
+ struct serio_raw *serio_raw = client->serio_raw;
+ unsigned int mask;
- poll_wait(file, &list->serio_raw->wait, wait);
+ poll_wait(file, &serio_raw->wait, wait);
- if (list->serio_raw->head != list->serio_raw->tail)
+ mask = serio_raw->dead ? POLLHUP | POLLERR : POLLOUT | POLLWRNORM;
+ if (serio_raw->head != serio_raw->tail)
return POLLIN | POLLRDNORM;
return 0;
}
static const struct file_operations serio_raw_fops = {
- .owner = THIS_MODULE,
- .open = serio_raw_open,
- .release = serio_raw_release,
- .read = serio_raw_read,
- .write = serio_raw_write,
- .poll = serio_raw_poll,
- .fasync = serio_raw_fasync,
- .llseek = noop_llseek,
+ .owner = THIS_MODULE,
+ .open = serio_raw_open,
+ .release = serio_raw_release,
+ .read = serio_raw_read,
+ .write = serio_raw_write,
+ .poll = serio_raw_poll,
+ .fasync = serio_raw_fasync,
+ .llseek = noop_llseek,
};
/*********************************************************************
- * Interface with serio port *
+ * Interface with serio port *
*********************************************************************/
static irqreturn_t serio_raw_interrupt(struct serio *serio, unsigned char data,
unsigned int dfl)
{
struct serio_raw *serio_raw = serio_get_drvdata(serio);
- struct serio_raw_list *list;
+ struct serio_raw_client *client;
unsigned int head = serio_raw->head;
- /* we are holding serio->lock here so we are prootected */
+ /* we are holding serio->lock here so we are protected */
serio_raw->queue[head] = data;
head = (head + 1) % SERIO_RAW_QUEUE_LEN;
if (likely(head != serio_raw->tail)) {
serio_raw->head = head;
- list_for_each_entry(list, &serio_raw->list, node)
- kill_fasync(&list->fasync, SIGIO, POLL_IN);
+ list_for_each_entry(client, &serio_raw->client_list, node)
+ kill_fasync(&client->fasync, SIGIO, POLL_IN);
wake_up_interruptible(&serio_raw->wait);
}
@@ -272,29 +280,37 @@ static irqreturn_t serio_raw_interrupt(struct serio *serio, unsigned char data,
static int serio_raw_connect(struct serio *serio, struct serio_driver *drv)
{
+ static atomic_t serio_raw_no = ATOMIC_INIT(0);
struct serio_raw *serio_raw;
int err;
- if (!(serio_raw = kzalloc(sizeof(struct serio_raw), GFP_KERNEL))) {
- printk(KERN_ERR "serio_raw.c: can't allocate memory for a device\n");
+ serio_raw = kzalloc(sizeof(struct serio_raw), GFP_KERNEL);
+ if (!serio_raw) {
+ dev_dbg(&serio->dev, "can't allocate memory for a device\n");
return -ENOMEM;
}
- mutex_lock(&serio_raw_mutex);
+ snprintf(serio_raw->name, sizeof(serio_raw->name),
+ "serio_raw%ld", (long)atomic_inc_return(&serio_raw_no) - 1);
+ kref_init(&serio_raw->kref);
+ INIT_LIST_HEAD(&serio_raw->client_list);
+ init_waitqueue_head(&serio_raw->wait);
- snprintf(serio_raw->name, sizeof(serio_raw->name), "serio_raw%d", serio_raw_no++);
- serio_raw->refcnt = 1;
serio_raw->serio = serio;
- INIT_LIST_HEAD(&serio_raw->list);
- init_waitqueue_head(&serio_raw->wait);
+ get_device(&serio->dev);
serio_set_drvdata(serio, serio_raw);
err = serio_open(serio, drv);
if (err)
- goto out_free;
+ goto err_free;
+
+ err = mutex_lock_killable(&serio_raw_mutex);
+ if (err)
+ goto err_close;
list_add_tail(&serio_raw->node, &serio_raw_list);
+ mutex_unlock(&serio_raw_mutex);
serio_raw->dev.minor = PSMOUSE_MINOR;
serio_raw->dev.name = serio_raw->name;
@@ -308,23 +324,23 @@ static int serio_raw_connect(struct serio *serio, struct serio_driver *drv)
}
if (err) {
- printk(KERN_INFO "serio_raw: failed to register raw access device for %s\n",
+ dev_err(&serio->dev,
+ "failed to register raw access device for %s\n",
serio->phys);
- goto out_close;
+ goto err_unlink;
}
- printk(KERN_INFO "serio_raw: raw access enabled on %s (%s, minor %d)\n",
- serio->phys, serio_raw->name, serio_raw->dev.minor);
- goto out;
+ dev_info(&serio->dev, "raw access enabled on %s (%s, minor %d)\n",
+ serio->phys, serio_raw->name, serio_raw->dev.minor);
+ return 0;
-out_close:
- serio_close(serio);
+err_unlink:
list_del_init(&serio_raw->node);
-out_free:
+err_close:
+ serio_close(serio);
+err_free:
serio_set_drvdata(serio, NULL);
- kfree(serio_raw);
-out:
- mutex_unlock(&serio_raw_mutex);
+ kref_put(&serio_raw->kref, serio_raw_free);
return err;
}
@@ -334,7 +350,8 @@ static int serio_raw_reconnect(struct serio *serio)
struct serio_driver *drv = serio->drv;
if (!drv || !serio_raw) {
- printk(KERN_DEBUG "serio_raw: reconnect request, but serio is disconnected, ignoring...\n");
+ dev_dbg(&serio->dev,
+ "reconnect request, but serio is disconnected, ignoring...\n");
return -1;
}
@@ -345,22 +362,40 @@ static int serio_raw_reconnect(struct serio *serio)
return 0;
}
+/*
+ * Wake up users waiting for IO so they can disconnect from
+ * dead device.
+ */
+static void serio_raw_hangup(struct serio_raw *serio_raw)
+{
+ struct serio_raw_client *client;
+
+ serio_pause_rx(serio_raw->serio);
+ list_for_each_entry(client, &serio_raw->client_list, node)
+ kill_fasync(&client->fasync, SIGIO, POLL_HUP);
+ serio_continue_rx(serio_raw->serio);
+
+ wake_up_interruptible(&serio_raw->wait);
+}
+
+
static void serio_raw_disconnect(struct serio *serio)
{
- struct serio_raw *serio_raw;
+ struct serio_raw *serio_raw = serio_get_drvdata(serio);
+
+ misc_deregister(&serio_raw->dev);
mutex_lock(&serio_raw_mutex);
+ serio_raw->dead = true;
+ list_del_init(&serio_raw->node);
+ mutex_unlock(&serio_raw_mutex);
- serio_raw = serio_get_drvdata(serio);
+ serio_raw_hangup(serio_raw);
serio_close(serio);
- serio_set_drvdata(serio, NULL);
-
- serio_raw->serio = NULL;
- if (!serio_raw_cleanup(serio_raw))
- wake_up_interruptible(&serio_raw->wait);
+ kref_put(&serio_raw->kref, serio_raw_free);
- mutex_unlock(&serio_raw_mutex);
+ serio_set_drvdata(serio, NULL);
}
static struct serio_device_id serio_raw_serio_ids[] = {
@@ -391,7 +426,7 @@ static struct serio_driver serio_raw_drv = {
.connect = serio_raw_connect,
.reconnect = serio_raw_reconnect,
.disconnect = serio_raw_disconnect,
- .manual_bind = 1,
+ .manual_bind = true,
};
static int __init serio_raw_init(void)
diff --git a/drivers/input/sparse-keymap.c b/drivers/input/sparse-keymap.c
index fdb6a3976f9..75fb040a343 100644
--- a/drivers/input/sparse-keymap.c
+++ b/drivers/input/sparse-keymap.c
@@ -15,6 +15,7 @@
#include <linux/input.h>
#include <linux/input/sparse-keymap.h>
+#include <linux/module.h>
#include <linux/slab.h>
MODULE_AUTHOR("Dmitry Torokhov <dtor@mail.ru>");
diff --git a/drivers/input/tablet/wacom.h b/drivers/input/tablet/wacom.h
index 23317bd09c8..0783864a7dc 100644
--- a/drivers/input/tablet/wacom.h
+++ b/drivers/input/tablet/wacom.h
@@ -11,7 +11,7 @@
* Copyright (c) 2000 Daniel Egger <egger@suse.de>
* Copyright (c) 2001 Frederic Lepied <flepied@mandrakesoft.com>
* Copyright (c) 2004 Panagiotis Issaris <panagiotis.issaris@mech.kuleuven.ac.be>
- * Copyright (c) 2002-2009 Ping Cheng <pingc@wacom.com>
+ * Copyright (c) 2002-2011 Ping Cheng <pingc@wacom.com>
*
* ChangeLog:
* v0.1 (vp) - Initial release
@@ -93,7 +93,7 @@
/*
* Version Information
*/
-#define DRIVER_VERSION "v1.52"
+#define DRIVER_VERSION "v1.53"
#define DRIVER_AUTHOR "Vojtech Pavlik <vojtech@ucw.cz>"
#define DRIVER_DESC "USB Wacom tablet driver"
#define DRIVER_LICENSE "GPL"
@@ -114,6 +114,12 @@ struct wacom {
struct mutex lock;
bool open;
char phys[32];
+ struct wacom_led {
+ u8 select[2]; /* status led selector (0..3) */
+ u8 llv; /* status led brightness no button (1..127) */
+ u8 hlv; /* status led brightness button pressed (1..127) */
+ u8 img_lum; /* OLED matrix display brightness */
+ } led;
};
extern const struct usb_device_id wacom_ids[];
diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c
index 958b4eb6369..1c1b7b43cf9 100644
--- a/drivers/input/tablet/wacom_sys.c
+++ b/drivers/input/tablet/wacom_sys.c
@@ -48,27 +48,49 @@ struct hid_descriptor {
/* defines to get/set USB message */
#define USB_REQ_GET_REPORT 0x01
#define USB_REQ_SET_REPORT 0x09
+
#define WAC_HID_FEATURE_REPORT 0x03
#define WAC_MSG_RETRIES 5
-static int usb_get_report(struct usb_interface *intf, unsigned char type,
- unsigned char id, void *buf, int size)
+#define WAC_CMD_LED_CONTROL 0x20
+#define WAC_CMD_ICON_START 0x21
+#define WAC_CMD_ICON_XFER 0x23
+#define WAC_CMD_RETRIES 10
+
+static int wacom_get_report(struct usb_interface *intf, u8 type, u8 id,
+ void *buf, size_t size, unsigned int retries)
{
- return usb_control_msg(interface_to_usbdev(intf),
- usb_rcvctrlpipe(interface_to_usbdev(intf), 0),
- USB_REQ_GET_REPORT, USB_TYPE_CLASS | USB_RECIP_INTERFACE,
- (type << 8) + id, intf->altsetting[0].desc.bInterfaceNumber,
- buf, size, 100);
+ struct usb_device *dev = interface_to_usbdev(intf);
+ int retval;
+
+ do {
+ retval = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
+ USB_REQ_GET_REPORT,
+ USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+ (type << 8) + id,
+ intf->altsetting[0].desc.bInterfaceNumber,
+ buf, size, 100);
+ } while ((retval == -ETIMEDOUT || retval == -EPIPE) && --retries);
+
+ return retval;
}
-static int usb_set_report(struct usb_interface *intf, unsigned char type,
- unsigned char id, void *buf, int size)
+static int wacom_set_report(struct usb_interface *intf, u8 type, u8 id,
+ void *buf, size_t size, unsigned int retries)
{
- return usb_control_msg(interface_to_usbdev(intf),
- usb_sndctrlpipe(interface_to_usbdev(intf), 0),
- USB_REQ_SET_REPORT, USB_TYPE_CLASS | USB_RECIP_INTERFACE,
- (type << 8) + id, intf->altsetting[0].desc.bInterfaceNumber,
- buf, size, 1000);
+ struct usb_device *dev = interface_to_usbdev(intf);
+ int retval;
+
+ do {
+ retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
+ USB_REQ_SET_REPORT,
+ USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+ (type << 8) + id,
+ intf->altsetting[0].desc.bInterfaceNumber,
+ buf, size, 1000);
+ } while ((retval == -ETIMEDOUT || retval == -EPIPE) && --retries);
+
+ return retval;
}
static void wacom_sys_irq(struct urb *urb)
@@ -319,23 +341,23 @@ static int wacom_query_tablet_data(struct usb_interface *intf, struct wacom_feat
rep_data[2] = 0;
rep_data[3] = 0;
report_id = 3;
- error = usb_set_report(intf, WAC_HID_FEATURE_REPORT,
- report_id, rep_data, 4);
+ error = wacom_set_report(intf, WAC_HID_FEATURE_REPORT,
+ report_id, rep_data, 4, 1);
if (error >= 0)
- error = usb_get_report(intf,
- WAC_HID_FEATURE_REPORT, report_id,
- rep_data, 4);
+ error = wacom_get_report(intf,
+ WAC_HID_FEATURE_REPORT,
+ report_id, rep_data, 4, 1);
} while ((error < 0 || rep_data[1] != 4) && limit++ < WAC_MSG_RETRIES);
} else if (features->type != TABLETPC) {
do {
rep_data[0] = 2;
rep_data[1] = 2;
- error = usb_set_report(intf, WAC_HID_FEATURE_REPORT,
- report_id, rep_data, 2);
+ error = wacom_set_report(intf, WAC_HID_FEATURE_REPORT,
+ report_id, rep_data, 2, 1);
if (error >= 0)
- error = usb_get_report(intf,
- WAC_HID_FEATURE_REPORT, report_id,
- rep_data, 2);
+ error = wacom_get_report(intf,
+ WAC_HID_FEATURE_REPORT,
+ report_id, rep_data, 2, 1);
} while ((error < 0 || rep_data[1] != 2) && limit++ < WAC_MSG_RETRIES);
}
@@ -454,6 +476,275 @@ static void wacom_remove_shared_data(struct wacom_wac *wacom)
}
}
+static int wacom_led_control(struct wacom *wacom)
+{
+ unsigned char *buf;
+ int retval, led = 0;
+
+ buf = kzalloc(9, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ if (wacom->wacom_wac.features.type == WACOM_21UX2)
+ led = (wacom->led.select[1] << 4) | 0x40;
+
+ led |= wacom->led.select[0] | 0x4;
+
+ buf[0] = WAC_CMD_LED_CONTROL;
+ buf[1] = led;
+ buf[2] = wacom->led.llv;
+ buf[3] = wacom->led.hlv;
+ buf[4] = wacom->led.img_lum;
+
+ retval = wacom_set_report(wacom->intf, 0x03, WAC_CMD_LED_CONTROL,
+ buf, 9, WAC_CMD_RETRIES);
+ kfree(buf);
+
+ return retval;
+}
+
+static int wacom_led_putimage(struct wacom *wacom, int button_id, const void *img)
+{
+ unsigned char *buf;
+ int i, retval;
+
+ buf = kzalloc(259, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ /* Send 'start' command */
+ buf[0] = WAC_CMD_ICON_START;
+ buf[1] = 1;
+ retval = wacom_set_report(wacom->intf, 0x03, WAC_CMD_ICON_START,
+ buf, 2, WAC_CMD_RETRIES);
+ if (retval < 0)
+ goto out;
+
+ buf[0] = WAC_CMD_ICON_XFER;
+ buf[1] = button_id & 0x07;
+ for (i = 0; i < 4; i++) {
+ buf[2] = i;
+ memcpy(buf + 3, img + i * 256, 256);
+
+ retval = wacom_set_report(wacom->intf, 0x03, WAC_CMD_ICON_XFER,
+ buf, 259, WAC_CMD_RETRIES);
+ if (retval < 0)
+ break;
+ }
+
+ /* Send 'stop' */
+ buf[0] = WAC_CMD_ICON_START;
+ buf[1] = 0;
+ wacom_set_report(wacom->intf, 0x03, WAC_CMD_ICON_START,
+ buf, 2, WAC_CMD_RETRIES);
+
+out:
+ kfree(buf);
+ return retval;
+}
+
+static ssize_t wacom_led_select_store(struct device *dev, int set_id,
+ const char *buf, size_t count)
+{
+ struct wacom *wacom = dev_get_drvdata(dev);
+ unsigned int id;
+ int err;
+
+ err = kstrtouint(buf, 10, &id);
+ if (err)
+ return err;
+
+ mutex_lock(&wacom->lock);
+
+ wacom->led.select[set_id] = id & 0x3;
+ err = wacom_led_control(wacom);
+
+ mutex_unlock(&wacom->lock);
+
+ return err < 0 ? err : count;
+}
+
+#define DEVICE_LED_SELECT_ATTR(SET_ID) \
+static ssize_t wacom_led##SET_ID##_select_store(struct device *dev, \
+ struct device_attribute *attr, const char *buf, size_t count) \
+{ \
+ return wacom_led_select_store(dev, SET_ID, buf, count); \
+} \
+static ssize_t wacom_led##SET_ID##_select_show(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct wacom *wacom = dev_get_drvdata(dev); \
+ return snprintf(buf, 2, "%d\n", wacom->led.select[SET_ID]); \
+} \
+static DEVICE_ATTR(status_led##SET_ID##_select, S_IWUSR | S_IRUSR, \
+ wacom_led##SET_ID##_select_show, \
+ wacom_led##SET_ID##_select_store)
+
+DEVICE_LED_SELECT_ATTR(0);
+DEVICE_LED_SELECT_ATTR(1);
+
+static ssize_t wacom_luminance_store(struct wacom *wacom, u8 *dest,
+ const char *buf, size_t count)
+{
+ unsigned int value;
+ int err;
+
+ err = kstrtouint(buf, 10, &value);
+ if (err)
+ return err;
+
+ mutex_lock(&wacom->lock);
+
+ *dest = value & 0x7f;
+ err = wacom_led_control(wacom);
+
+ mutex_unlock(&wacom->lock);
+
+ return err < 0 ? err : count;
+}
+
+#define DEVICE_LUMINANCE_ATTR(name, field) \
+static ssize_t wacom_##name##_luminance_store(struct device *dev, \
+ struct device_attribute *attr, const char *buf, size_t count) \
+{ \
+ struct wacom *wacom = dev_get_drvdata(dev); \
+ \
+ return wacom_luminance_store(wacom, &wacom->led.field, \
+ buf, count); \
+} \
+static DEVICE_ATTR(name##_luminance, S_IWUSR, \
+ NULL, wacom_##name##_luminance_store)
+
+DEVICE_LUMINANCE_ATTR(status0, llv);
+DEVICE_LUMINANCE_ATTR(status1, hlv);
+DEVICE_LUMINANCE_ATTR(buttons, img_lum);
+
+static ssize_t wacom_button_image_store(struct device *dev, int button_id,
+ const char *buf, size_t count)
+{
+ struct wacom *wacom = dev_get_drvdata(dev);
+ int err;
+
+ if (count != 1024)
+ return -EINVAL;
+
+ mutex_lock(&wacom->lock);
+
+ err = wacom_led_putimage(wacom, button_id, buf);
+
+ mutex_unlock(&wacom->lock);
+
+ return err < 0 ? err : count;
+}
+
+#define DEVICE_BTNIMG_ATTR(BUTTON_ID) \
+static ssize_t wacom_btnimg##BUTTON_ID##_store(struct device *dev, \
+ struct device_attribute *attr, const char *buf, size_t count) \
+{ \
+ return wacom_button_image_store(dev, BUTTON_ID, buf, count); \
+} \
+static DEVICE_ATTR(button##BUTTON_ID##_rawimg, S_IWUSR, \
+ NULL, wacom_btnimg##BUTTON_ID##_store)
+
+DEVICE_BTNIMG_ATTR(0);
+DEVICE_BTNIMG_ATTR(1);
+DEVICE_BTNIMG_ATTR(2);
+DEVICE_BTNIMG_ATTR(3);
+DEVICE_BTNIMG_ATTR(4);
+DEVICE_BTNIMG_ATTR(5);
+DEVICE_BTNIMG_ATTR(6);
+DEVICE_BTNIMG_ATTR(7);
+
+static struct attribute *cintiq_led_attrs[] = {
+ &dev_attr_status_led0_select.attr,
+ &dev_attr_status_led1_select.attr,
+ NULL
+};
+
+static struct attribute_group cintiq_led_attr_group = {
+ .name = "wacom_led",
+ .attrs = cintiq_led_attrs,
+};
+
+static struct attribute *intuos4_led_attrs[] = {
+ &dev_attr_status0_luminance.attr,
+ &dev_attr_status1_luminance.attr,
+ &dev_attr_status_led0_select.attr,
+ &dev_attr_buttons_luminance.attr,
+ &dev_attr_button0_rawimg.attr,
+ &dev_attr_button1_rawimg.attr,
+ &dev_attr_button2_rawimg.attr,
+ &dev_attr_button3_rawimg.attr,
+ &dev_attr_button4_rawimg.attr,
+ &dev_attr_button5_rawimg.attr,
+ &dev_attr_button6_rawimg.attr,
+ &dev_attr_button7_rawimg.attr,
+ NULL
+};
+
+static struct attribute_group intuos4_led_attr_group = {
+ .name = "wacom_led",
+ .attrs = intuos4_led_attrs,
+};
+
+static int wacom_initialize_leds(struct wacom *wacom)
+{
+ int error;
+
+ /* Initialize default values */
+ switch (wacom->wacom_wac.features.type) {
+ case INTUOS4:
+ case INTUOS4L:
+ wacom->led.select[0] = 0;
+ wacom->led.select[1] = 0;
+ wacom->led.llv = 10;
+ wacom->led.hlv = 20;
+ wacom->led.img_lum = 10;
+ error = sysfs_create_group(&wacom->intf->dev.kobj,
+ &intuos4_led_attr_group);
+ break;
+
+ case WACOM_21UX2:
+ wacom->led.select[0] = 0;
+ wacom->led.select[1] = 0;
+ wacom->led.llv = 0;
+ wacom->led.hlv = 0;
+ wacom->led.img_lum = 0;
+
+ error = sysfs_create_group(&wacom->intf->dev.kobj,
+ &cintiq_led_attr_group);
+ break;
+
+ default:
+ return 0;
+ }
+
+ if (error) {
+ dev_err(&wacom->intf->dev,
+ "cannot create sysfs group err: %d\n", error);
+ return error;
+ }
+ wacom_led_control(wacom);
+
+ return 0;
+}
+
+static void wacom_destroy_leds(struct wacom *wacom)
+{
+ switch (wacom->wacom_wac.features.type) {
+ case INTUOS4:
+ case INTUOS4L:
+ sysfs_remove_group(&wacom->intf->dev.kobj,
+ &intuos4_led_attr_group);
+ break;
+
+ case WACOM_21UX2:
+ sysfs_remove_group(&wacom->intf->dev.kobj,
+ &cintiq_led_attr_group);
+ break;
+ }
+}
+
static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *id)
{
struct usb_device *dev = interface_to_usbdev(intf);
@@ -542,16 +833,21 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i
wacom->irq->transfer_dma = wacom->data_dma;
wacom->irq->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
- error = input_register_device(input_dev);
+ error = wacom_initialize_leds(wacom);
if (error)
goto fail4;
+ error = input_register_device(input_dev);
+ if (error)
+ goto fail5;
+
/* Note that if query fails it is not a hard failure */
wacom_query_tablet_data(intf, features);
usb_set_intfdata(intf, wacom);
return 0;
+ fail5: wacom_destroy_leds(wacom);
fail4: wacom_remove_shared_data(wacom_wac);
fail3: usb_free_urb(wacom->irq);
fail2: usb_free_coherent(dev, WACOM_PKGLEN_MAX, wacom_wac->data, wacom->data_dma);
@@ -568,6 +864,7 @@ static void wacom_disconnect(struct usb_interface *intf)
usb_kill_urb(wacom->irq);
input_unregister_device(wacom->wacom_wac.input);
+ wacom_destroy_leds(wacom);
usb_free_urb(wacom->irq);
usb_free_coherent(interface_to_usbdev(intf), WACOM_PKGLEN_MAX,
wacom->wacom_wac.data, wacom->data_dma);
@@ -590,17 +887,16 @@ static int wacom_resume(struct usb_interface *intf)
{
struct wacom *wacom = usb_get_intfdata(intf);
struct wacom_features *features = &wacom->wacom_wac.features;
- int rv;
+ int rv = 0;
mutex_lock(&wacom->lock);
/* switch to wacom mode first */
wacom_query_tablet_data(intf, features);
+ wacom_led_control(wacom);
- if (wacom->open)
- rv = usb_submit_urb(wacom->irq, GFP_NOIO);
- else
- rv = 0;
+ if (wacom->open && usb_submit_urb(wacom->irq, GFP_NOIO) < 0)
+ rv = -EIO;
mutex_unlock(&wacom->lock);
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index 9dea71849f4..da0d8761e77 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -874,7 +874,15 @@ static int wacom_bpt_pen(struct wacom_wac *wacom)
x = le16_to_cpup((__le16 *)&data[2]);
y = le16_to_cpup((__le16 *)&data[4]);
p = le16_to_cpup((__le16 *)&data[6]);
- d = data[8];
+ /*
+ * Convert distance from out prox to distance from tablet.
+ * distance will be greater than distance_max once
+ * touching and applying pressure; do not report negative
+ * distance.
+ */
+ if (data[8] <= wacom->features.distance_max)
+ d = wacom->features.distance_max - data[8];
+
pen = data[1] & 0x01;
btn1 = data[1] & 0x02;
btn2 = data[1] & 0x04;
@@ -1030,8 +1038,6 @@ void wacom_setup_device_quirks(struct wacom_features *features)
features->y_max <<= 5;
features->x_fuzz <<= 5;
features->y_fuzz <<= 5;
- features->pressure_max = 256;
- features->pressure_fuzz = 16;
features->quirks |= WACOM_QUIRK_BBTOUCH_LOWRES;
}
}
@@ -1241,14 +1247,14 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,
input_set_abs_params(input_dev, ABS_MT_POSITION_Y,
0, features->y_max,
features->y_fuzz, 0);
- input_set_abs_params(input_dev, ABS_MT_PRESSURE,
- 0, features->pressure_max,
- features->pressure_fuzz, 0);
} else if (features->device_type == BTN_TOOL_PEN) {
__set_bit(BTN_TOOL_RUBBER, input_dev->keybit);
__set_bit(BTN_TOOL_PEN, input_dev->keybit);
__set_bit(BTN_STYLUS, input_dev->keybit);
__set_bit(BTN_STYLUS2, input_dev->keybit);
+ input_set_abs_params(input_dev, ABS_DISTANCE, 0,
+ features->distance_max,
+ 0, 0);
}
break;
}
@@ -1469,37 +1475,37 @@ static const struct wacom_features wacom_features_0x47 =
31, INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0xD0 =
{ "Wacom Bamboo 2FG", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023,
- 63, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0xD1 =
{ "Wacom Bamboo 2FG 4x5", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023,
- 63, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0xD2 =
{ "Wacom Bamboo Craft", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023,
- 63, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0xD3 =
- { "Wacom Bamboo 2FG 6x8", WACOM_PKGLEN_BBFUN, 21648, 13530, 1023,
- 63, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ { "Wacom Bamboo 2FG 6x8", WACOM_PKGLEN_BBFUN, 21648, 13700, 1023,
+ 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0xD4 =
{ "Wacom Bamboo Pen", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023,
- 63, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0xD5 =
- { "Wacom Bamboo Pen 6x8", WACOM_PKGLEN_BBFUN, 21648, 13530, 1023,
- 63, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ { "Wacom Bamboo Pen 6x8", WACOM_PKGLEN_BBFUN, 21648, 13700, 1023,
+ 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0xD6 =
{ "Wacom BambooPT 2FG 4x5", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023,
- 63, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0xD7 =
{ "Wacom BambooPT 2FG Small", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023,
- 63, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0xD8 =
- { "Wacom Bamboo Comic 2FG", WACOM_PKGLEN_BBFUN, 21648, 13530, 1023,
- 63, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ { "Wacom Bamboo Comic 2FG", WACOM_PKGLEN_BBFUN, 21648, 13700, 1023,
+ 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0xDA =
{ "Wacom Bamboo 2FG 4x5 SE", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023,
- 63, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static struct wacom_features wacom_features_0xDB =
- { "Wacom Bamboo 2FG 6x8 SE", WACOM_PKGLEN_BBFUN, 21648, 13530, 1023,
- 63, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ { "Wacom Bamboo 2FG 6x8 SE", WACOM_PKGLEN_BBFUN, 21648, 13700, 1023,
+ 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0x6004 =
{ "ISD-V4", WACOM_PKGLEN_GRAPHIRE, 12800, 8000, 255,
0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index cabd9e54863..3488ffe1fa0 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -651,6 +651,18 @@ config TOUCHSCREEN_TOUCHIT213
To compile this driver as a module, choose M here: the
module will be called touchit213.
+config TOUCHSCREEN_TSC_SERIO
+ tristate "TSC-10/25/40 serial touchscreen support"
+ select SERIO
+ help
+ Say Y here if you have a TSC-10, 25 or 40 serial touchscreen connected
+ to your system.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called tsc40.
+
config TOUCHSCREEN_TSC2005
tristate "TSC2005 based touchscreens"
depends on SPI_MASTER && GENERIC_HARDIRQS
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index 282d6f76ae2..f957676035a 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -46,6 +46,7 @@ obj-$(CONFIG_TOUCHSCREEN_TNETV107X) += tnetv107x-ts.o
obj-$(CONFIG_TOUCHSCREEN_TOUCHIT213) += touchit213.o
obj-$(CONFIG_TOUCHSCREEN_TOUCHRIGHT) += touchright.o
obj-$(CONFIG_TOUCHSCREEN_TOUCHWIN) += touchwin.o
+obj-$(CONFIG_TOUCHSCREEN_TSC_SERIO) += tsc40.o
obj-$(CONFIG_TOUCHSCREEN_TSC2005) += tsc2005.o
obj-$(CONFIG_TOUCHSCREEN_TSC2007) += tsc2007.o
obj-$(CONFIG_TOUCHSCREEN_UCB1400) += ucb1400_ts.o
diff --git a/drivers/input/touchscreen/ad7877.c b/drivers/input/touchscreen/ad7877.c
index 714d4e0f9f9..400131df677 100644
--- a/drivers/input/touchscreen/ad7877.c
+++ b/drivers/input/touchscreen/ad7877.c
@@ -45,6 +45,7 @@
#include <linux/slab.h>
#include <linux/spi/spi.h>
#include <linux/spi/ad7877.h>
+#include <linux/module.h>
#include <asm/irq.h>
#define TS_PEN_UP_TIMEOUT msecs_to_jiffies(100)
diff --git a/drivers/input/touchscreen/ad7879-i2c.c b/drivers/input/touchscreen/ad7879-i2c.c
index 4e4e58cec6c..c789b974c79 100644
--- a/drivers/input/touchscreen/ad7879-i2c.c
+++ b/drivers/input/touchscreen/ad7879-i2c.c
@@ -16,7 +16,7 @@
#define AD7879_DEVID 0x79 /* AD7879-1/AD7889-1 */
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int ad7879_i2c_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
@@ -36,9 +36,9 @@ static int ad7879_i2c_resume(struct device *dev)
return 0;
}
+#endif
static SIMPLE_DEV_PM_OPS(ad7879_i2c_pm, ad7879_i2c_suspend, ad7879_i2c_resume);
-#endif
/* All registers are word-sized.
* AD7879 uses a high-byte first convention.
@@ -119,9 +119,7 @@ static struct i2c_driver ad7879_i2c_driver = {
.driver = {
.name = "ad7879",
.owner = THIS_MODULE,
-#ifdef CONFIG_PM
.pm = &ad7879_i2c_pm,
-#endif
},
.probe = ad7879_i2c_probe,
.remove = __devexit_p(ad7879_i2c_remove),
diff --git a/drivers/input/touchscreen/ad7879-spi.c b/drivers/input/touchscreen/ad7879-spi.c
index ddf732f3caf..b1643c8fa7c 100644
--- a/drivers/input/touchscreen/ad7879-spi.c
+++ b/drivers/input/touchscreen/ad7879-spi.c
@@ -9,6 +9,7 @@
#include <linux/input.h> /* BUS_SPI */
#include <linux/pm.h>
#include <linux/spi/spi.h>
+#include <linux/module.h>
#include "ad7879.h"
diff --git a/drivers/input/touchscreen/ad7879.c b/drivers/input/touchscreen/ad7879.c
index 131f9d1c921..3b2e9ed2aee 100644
--- a/drivers/input/touchscreen/ad7879.c
+++ b/drivers/input/touchscreen/ad7879.c
@@ -33,6 +33,7 @@
#include <linux/gpio.h>
#include <linux/spi/ad7879.h>
+#include <linux/module.h>
#include "ad7879.h"
#define AD7879_REG_ZEROS 0
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
index d507b9b6780..de31ec6fe9e 100644
--- a/drivers/input/touchscreen/ads7846.c
+++ b/drivers/input/touchscreen/ads7846.c
@@ -31,6 +31,7 @@
#include <linux/spi/spi.h>
#include <linux/spi/ads7846.h>
#include <linux/regulator/consumer.h>
+#include <linux/module.h>
#include <asm/irq.h>
/*
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index f5d66859f23..a596c2775d1 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -910,12 +910,17 @@ static ssize_t mxt_object_show(struct device *dev,
for (i = 0; i < data->info.object_num; i++) {
object = data->object_table + i;
- count += sprintf(buf + count,
- "Object Table Element %d(Type %d)\n",
+ count += snprintf(buf + count, PAGE_SIZE - count,
+ "Object[%d] (Type %d)\n",
i + 1, object->type);
+ if (count >= PAGE_SIZE)
+ return PAGE_SIZE - 1;
if (!mxt_object_readable(object->type)) {
- count += sprintf(buf + count, "\n");
+ count += snprintf(buf + count, PAGE_SIZE - count,
+ "\n");
+ if (count >= PAGE_SIZE)
+ return PAGE_SIZE - 1;
continue;
}
@@ -925,11 +930,15 @@ static ssize_t mxt_object_show(struct device *dev,
if (error)
return error;
- count += sprintf(buf + count,
- " Byte %d: 0x%x (%d)\n", j, val, val);
+ count += snprintf(buf + count, PAGE_SIZE - count,
+ "\t[%2d]: %02x (%d)\n", j, val, val);
+ if (count >= PAGE_SIZE)
+ return PAGE_SIZE - 1;
}
- count += sprintf(buf + count, "\n");
+ count += snprintf(buf + count, PAGE_SIZE - count, "\n");
+ if (count >= PAGE_SIZE)
+ return PAGE_SIZE - 1;
}
return count;
diff --git a/drivers/input/touchscreen/atmel_tsadcc.c b/drivers/input/touchscreen/atmel_tsadcc.c
index 432c69be6ac..122a8788365 100644
--- a/drivers/input/touchscreen/atmel_tsadcc.c
+++ b/drivers/input/touchscreen/atmel_tsadcc.c
@@ -229,7 +229,7 @@ static int __devinit atmel_tsadcc_probe(struct platform_device *pdev)
goto err_release_mem;
}
- err = request_irq(ts_dev->irq, atmel_tsadcc_interrupt, IRQF_DISABLED,
+ err = request_irq(ts_dev->irq, atmel_tsadcc_interrupt, 0,
pdev->dev.driver->name, ts_dev);
if (err) {
dev_err(&pdev->dev, "failed to allocate irq.\n");
diff --git a/drivers/input/touchscreen/bu21013_ts.c b/drivers/input/touchscreen/bu21013_ts.c
index 1507ce108d5..902c7214e88 100644
--- a/drivers/input/touchscreen/bu21013_ts.c
+++ b/drivers/input/touchscreen/bu21013_ts.c
@@ -13,6 +13,7 @@
#include <linux/input/bu21013.h>
#include <linux/slab.h>
#include <linux/regulator/consumer.h>
+#include <linux/module.h>
#define PEN_DOWN_INTR 0
#define MAX_FINGERS 2
diff --git a/drivers/input/touchscreen/h3600_ts_input.c b/drivers/input/touchscreen/h3600_ts_input.c
index 211811ae552..6107e563e68 100644
--- a/drivers/input/touchscreen/h3600_ts_input.c
+++ b/drivers/input/touchscreen/h3600_ts_input.c
@@ -396,14 +396,14 @@ static int h3600ts_connect(struct serio *serio, struct serio_driver *drv)
set_GPIO_IRQ_edge(GPIO_BITSY_NPOWER_BUTTON, GPIO_RISING_EDGE);
if (request_irq(IRQ_GPIO_BITSY_ACTION_BUTTON, action_button_handler,
- IRQF_SHARED | IRQF_DISABLED, "h3600_action", ts->dev)) {
+ IRQF_SHARED, "h3600_action", ts->dev)) {
printk(KERN_ERR "h3600ts.c: Could not allocate Action Button IRQ!\n");
err = -EBUSY;
goto fail1;
}
if (request_irq(IRQ_GPIO_BITSY_NPOWER_BUTTON, npower_button_handler,
- IRQF_SHARED | IRQF_DISABLED, "h3600_suspend", ts->dev)) {
+ IRQF_SHARED, "h3600_suspend", ts->dev)) {
printk(KERN_ERR "h3600ts.c: Could not allocate Power Button IRQ!\n");
err = -EBUSY;
goto fail2;
diff --git a/drivers/input/touchscreen/hp680_ts_input.c b/drivers/input/touchscreen/hp680_ts_input.c
index dd4e8f020b9..639a6044183 100644
--- a/drivers/input/touchscreen/hp680_ts_input.c
+++ b/drivers/input/touchscreen/hp680_ts_input.c
@@ -93,7 +93,7 @@ static int __init hp680_ts_init(void)
hp680_ts_dev->phys = "hp680_ts/input0";
if (request_irq(HP680_TS_IRQ, hp680_ts_interrupt,
- IRQF_DISABLED, MODNAME, 0) < 0) {
+ 0, MODNAME, 0) < 0) {
printk(KERN_ERR "hp680_touchscreen.c: Can't allocate irq %d\n",
HP680_TS_IRQ);
err = -EBUSY;
diff --git a/drivers/input/touchscreen/jornada720_ts.c b/drivers/input/touchscreen/jornada720_ts.c
index 4b0a061811f..50076c2d59e 100644
--- a/drivers/input/touchscreen/jornada720_ts.c
+++ b/drivers/input/touchscreen/jornada720_ts.c
@@ -127,7 +127,7 @@ static int __devinit jornada720_ts_probe(struct platform_device *pdev)
error = request_irq(IRQ_GPIO9,
jornada720_ts_interrupt,
- IRQF_DISABLED | IRQF_TRIGGER_RISING,
+ IRQF_TRIGGER_RISING,
"HP7XX Touchscreen driver", pdev);
if (error) {
printk(KERN_INFO "HP7XX TS : Unable to acquire irq!\n");
diff --git a/drivers/input/touchscreen/lpc32xx_ts.c b/drivers/input/touchscreen/lpc32xx_ts.c
index dcf803f5a1f..0a484ed5295 100644
--- a/drivers/input/touchscreen/lpc32xx_ts.c
+++ b/drivers/input/touchscreen/lpc32xx_ts.c
@@ -276,7 +276,7 @@ static int __devinit lpc32xx_ts_probe(struct platform_device *pdev)
input_set_drvdata(input, tsc);
error = request_irq(tsc->irq, lpc32xx_ts_interrupt,
- IRQF_DISABLED, pdev->name, tsc);
+ 0, pdev->name, tsc);
if (error) {
dev_err(&pdev->dev, "failed requesting interrupt\n");
goto err_put_clock;
diff --git a/drivers/input/touchscreen/mc13783_ts.c b/drivers/input/touchscreen/mc13783_ts.c
index c5bc62d85bb..ede02743eac 100644
--- a/drivers/input/touchscreen/mc13783_ts.c
+++ b/drivers/input/touchscreen/mc13783_ts.c
@@ -35,7 +35,7 @@ MODULE_PARM_DESC(sample_tolerance,
struct mc13783_ts_priv {
struct input_dev *idev;
- struct mc13783 *mc13783;
+ struct mc13xxx *mc13xxx;
struct delayed_work work;
struct workqueue_struct *workq;
unsigned int sample[4];
@@ -45,7 +45,7 @@ static irqreturn_t mc13783_ts_handler(int irq, void *data)
{
struct mc13783_ts_priv *priv = data;
- mc13783_irq_ack(priv->mc13783, irq);
+ mc13xxx_irq_ack(priv->mc13xxx, irq);
/*
* Kick off reading coordinates. Note that if work happens already
@@ -121,10 +121,10 @@ static void mc13783_ts_work(struct work_struct *work)
{
struct mc13783_ts_priv *priv =
container_of(work, struct mc13783_ts_priv, work.work);
- unsigned int mode = MC13783_ADC_MODE_TS;
+ unsigned int mode = MC13XXX_ADC_MODE_TS;
unsigned int channel = 12;
- if (mc13783_adc_do_conversion(priv->mc13783,
+ if (mc13xxx_adc_do_conversion(priv->mc13xxx,
mode, channel, priv->sample) == 0)
mc13783_ts_report_sample(priv);
}
@@ -134,21 +134,21 @@ static int mc13783_ts_open(struct input_dev *dev)
struct mc13783_ts_priv *priv = input_get_drvdata(dev);
int ret;
- mc13783_lock(priv->mc13783);
+ mc13xxx_lock(priv->mc13xxx);
- mc13783_irq_ack(priv->mc13783, MC13783_IRQ_TS);
+ mc13xxx_irq_ack(priv->mc13xxx, MC13XXX_IRQ_TS);
- ret = mc13783_irq_request(priv->mc13783, MC13783_IRQ_TS,
+ ret = mc13xxx_irq_request(priv->mc13xxx, MC13XXX_IRQ_TS,
mc13783_ts_handler, MC13783_TS_NAME, priv);
if (ret)
goto out;
- ret = mc13783_reg_rmw(priv->mc13783, MC13783_ADC0,
- MC13783_ADC0_TSMOD_MASK, MC13783_ADC0_TSMOD0);
+ ret = mc13xxx_reg_rmw(priv->mc13xxx, MC13XXX_ADC0,
+ MC13XXX_ADC0_TSMOD_MASK, MC13XXX_ADC0_TSMOD0);
if (ret)
- mc13783_irq_free(priv->mc13783, MC13783_IRQ_TS, priv);
+ mc13xxx_irq_free(priv->mc13xxx, MC13XXX_IRQ_TS, priv);
out:
- mc13783_unlock(priv->mc13783);
+ mc13xxx_unlock(priv->mc13xxx);
return ret;
}
@@ -156,11 +156,11 @@ static void mc13783_ts_close(struct input_dev *dev)
{
struct mc13783_ts_priv *priv = input_get_drvdata(dev);
- mc13783_lock(priv->mc13783);
- mc13783_reg_rmw(priv->mc13783, MC13783_ADC0,
- MC13783_ADC0_TSMOD_MASK, 0);
- mc13783_irq_free(priv->mc13783, MC13783_IRQ_TS, priv);
- mc13783_unlock(priv->mc13783);
+ mc13xxx_lock(priv->mc13xxx);
+ mc13xxx_reg_rmw(priv->mc13xxx, MC13XXX_ADC0,
+ MC13XXX_ADC0_TSMOD_MASK, 0);
+ mc13xxx_irq_free(priv->mc13xxx, MC13XXX_IRQ_TS, priv);
+ mc13xxx_unlock(priv->mc13xxx);
cancel_delayed_work_sync(&priv->work);
}
@@ -177,7 +177,7 @@ static int __init mc13783_ts_probe(struct platform_device *pdev)
goto err_free_mem;
INIT_DELAYED_WORK(&priv->work, mc13783_ts_work);
- priv->mc13783 = dev_get_drvdata(pdev->dev.parent);
+ priv->mc13xxx = dev_get_drvdata(pdev->dev.parent);
priv->idev = idev;
/*
diff --git a/drivers/input/touchscreen/penmount.c b/drivers/input/touchscreen/penmount.c
index c7f9cebebbb..4c012fb2b01 100644
--- a/drivers/input/touchscreen/penmount.c
+++ b/drivers/input/touchscreen/penmount.c
@@ -2,6 +2,7 @@
* Penmount serial touchscreen driver
*
* Copyright (c) 2006 Rick Koch <n1gp@hotmail.com>
+ * Copyright (c) 2011 John Sung <penmount.touch@gmail.com>
*
* Based on ELO driver (drivers/input/touchscreen/elo.c)
* Copyright (c) 2004 Vojtech Pavlik
@@ -18,12 +19,14 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/input.h>
+#include <linux/input/mt.h>
#include <linux/serio.h>
#include <linux/init.h>
-#define DRIVER_DESC "Penmount serial touchscreen driver"
+#define DRIVER_DESC "PenMount serial touchscreen driver"
MODULE_AUTHOR("Rick Koch <n1gp@hotmail.com>");
+MODULE_AUTHOR("John Sung <penmount.touch@gmail.com>");
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
@@ -31,7 +34,19 @@ MODULE_LICENSE("GPL");
* Definitions & global arrays.
*/
-#define PM_MAX_LENGTH 5
+#define PM_MAX_LENGTH 6
+#define PM_MAX_MTSLOT 16
+#define PM_3000_MTSLOT 2
+#define PM_6250_MTSLOT 12
+
+/*
+ * Multi-touch slot
+ */
+
+struct mt_slot {
+ unsigned short x, y;
+ bool active; /* is the touch valid? */
+};
/*
* Per-touchscreen data.
@@ -43,25 +58,119 @@ struct pm {
int idx;
unsigned char data[PM_MAX_LENGTH];
char phys[32];
+ unsigned char packetsize;
+ unsigned char maxcontacts;
+ struct mt_slot slots[PM_MAX_MTSLOT];
+ void (*parse_packet)(struct pm *);
};
-static irqreturn_t pm_interrupt(struct serio *serio,
- unsigned char data, unsigned int flags)
+/*
+ * pm_mtevent() sends mt events and also emulates pointer movement
+ */
+
+static void pm_mtevent(struct pm *pm, struct input_dev *input)
+{
+ int i;
+
+ for (i = 0; i < pm->maxcontacts; ++i) {
+ input_mt_slot(input, i);
+ input_mt_report_slot_state(input, MT_TOOL_FINGER,
+ pm->slots[i].active);
+ if (pm->slots[i].active) {
+ input_event(input, EV_ABS, ABS_MT_POSITION_X, pm->slots[i].x);
+ input_event(input, EV_ABS, ABS_MT_POSITION_Y, pm->slots[i].y);
+ }
+ }
+
+ input_mt_report_pointer_emulation(input, true);
+ input_sync(input);
+}
+
+/*
+ * pm_checkpacket() checks if data packet is valid
+ */
+
+static bool pm_checkpacket(unsigned char *packet)
+{
+ int total = 0;
+ int i;
+
+ for (i = 0; i < 5; i++)
+ total += packet[i];
+
+ return packet[5] == (unsigned char)~(total & 0xff);
+}
+
+static void pm_parse_9000(struct pm *pm)
{
- struct pm *pm = serio_get_drvdata(serio);
struct input_dev *dev = pm->dev;
- pm->data[pm->idx] = data;
+ if ((pm->data[0] & 0x80) && pm->packetsize == ++pm->idx) {
+ input_report_abs(dev, ABS_X, pm->data[1] * 128 + pm->data[2]);
+ input_report_abs(dev, ABS_Y, pm->data[3] * 128 + pm->data[4]);
+ input_report_key(dev, BTN_TOUCH, !!(pm->data[0] & 0x40));
+ input_sync(dev);
+ pm->idx = 0;
+ }
+}
- if (pm->data[0] & 0x80) {
- if (PM_MAX_LENGTH == ++pm->idx) {
- input_report_abs(dev, ABS_X, pm->data[2] * 128 + pm->data[1]);
- input_report_abs(dev, ABS_Y, pm->data[4] * 128 + pm->data[3]);
- input_report_key(dev, BTN_TOUCH, !!(pm->data[0] & 0x40));
+static void pm_parse_6000(struct pm *pm)
+{
+ struct input_dev *dev = pm->dev;
+
+ if ((pm->data[0] & 0xbf) == 0x30 && pm->packetsize == ++pm->idx) {
+ if (pm_checkpacket(pm->data)) {
+ input_report_abs(dev, ABS_X,
+ pm->data[2] * 256 + pm->data[1]);
+ input_report_abs(dev, ABS_Y,
+ pm->data[4] * 256 + pm->data[3]);
+ input_report_key(dev, BTN_TOUCH, pm->data[0] & 0x40);
input_sync(dev);
- pm->idx = 0;
}
+ pm->idx = 0;
+ }
+}
+
+static void pm_parse_3000(struct pm *pm)
+{
+ struct input_dev *dev = pm->dev;
+
+ if ((pm->data[0] & 0xce) == 0x40 && pm->packetsize == ++pm->idx) {
+ if (pm_checkpacket(pm->data)) {
+ int slotnum = pm->data[0] & 0x0f;
+ pm->slots[slotnum].active = pm->data[0] & 0x30;
+ pm->slots[slotnum].x = pm->data[2] * 256 + pm->data[1];
+ pm->slots[slotnum].y = pm->data[4] * 256 + pm->data[3];
+ pm_mtevent(pm, dev);
+ }
+ pm->idx = 0;
+ }
+}
+
+static void pm_parse_6250(struct pm *pm)
+{
+ struct input_dev *dev = pm->dev;
+
+ if ((pm->data[0] & 0xb0) == 0x30 && pm->packetsize == ++pm->idx) {
+ if (pm_checkpacket(pm->data)) {
+ int slotnum = pm->data[0] & 0x0f;
+ pm->slots[slotnum].active = pm->data[0] & 0x40;
+ pm->slots[slotnum].x = pm->data[2] * 256 + pm->data[1];
+ pm->slots[slotnum].y = pm->data[4] * 256 + pm->data[3];
+ pm_mtevent(pm, dev);
+ }
+ pm->idx = 0;
}
+}
+
+static irqreturn_t pm_interrupt(struct serio *serio,
+ unsigned char data, unsigned int flags)
+{
+ struct pm *pm = serio_get_drvdata(serio);
+
+ pm->data[pm->idx] = data;
+
+ pm->parse_packet(pm);
return IRQ_HANDLED;
}
@@ -74,17 +183,17 @@ static void pm_disconnect(struct serio *serio)
{
struct pm *pm = serio_get_drvdata(serio);
- input_get_device(pm->dev);
- input_unregister_device(pm->dev);
serio_close(serio);
- serio_set_drvdata(serio, NULL);
- input_put_device(pm->dev);
+
+ input_unregister_device(pm->dev);
kfree(pm);
+
+ serio_set_drvdata(serio, NULL);
}
/*
* pm_connect() is the routine that is called when someone adds a
- * new serio device that supports Gunze protocol and registers it as
+ * new serio device that supports PenMount protocol and registers it as
* an input device.
*/
@@ -92,6 +201,7 @@ static int pm_connect(struct serio *serio, struct serio_driver *drv)
{
struct pm *pm;
struct input_dev *input_dev;
+ int max_x, max_y;
int err;
pm = kzalloc(sizeof(struct pm), GFP_KERNEL);
@@ -104,8 +214,9 @@ static int pm_connect(struct serio *serio, struct serio_driver *drv)
pm->serio = serio;
pm->dev = input_dev;
snprintf(pm->phys, sizeof(pm->phys), "%s/input0", serio->phys);
+ pm->maxcontacts = 1;
- input_dev->name = "Penmount Serial TouchScreen";
+ input_dev->name = "PenMount Serial TouchScreen";
input_dev->phys = pm->phys;
input_dev->id.bustype = BUS_RS232;
input_dev->id.vendor = SERIO_PENMOUNT;
@@ -113,10 +224,52 @@ static int pm_connect(struct serio *serio, struct serio_driver *drv)
input_dev->id.version = 0x0100;
input_dev->dev.parent = &serio->dev;
- input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
- input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
- input_set_abs_params(pm->dev, ABS_X, 0, 0x3ff, 0, 0);
- input_set_abs_params(pm->dev, ABS_Y, 0, 0x3ff, 0, 0);
+ input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
+ input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
+
+ switch (serio->id.id) {
+ default:
+ case 0:
+ pm->packetsize = 5;
+ pm->parse_packet = pm_parse_9000;
+ input_dev->id.product = 0x9000;
+ max_x = max_y = 0x3ff;
+ break;
+
+ case 1:
+ pm->packetsize = 6;
+ pm->parse_packet = pm_parse_6000;
+ input_dev->id.product = 0x6000;
+ max_x = max_y = 0x3ff;
+ break;
+
+ case 2:
+ pm->packetsize = 6;
+ pm->parse_packet = pm_parse_3000;
+ input_dev->id.product = 0x3000;
+ max_x = max_y = 0x7ff;
+ pm->maxcontacts = PM_3000_MTSLOT;
+ break;
+
+ case 3:
+ pm->packetsize = 6;
+ pm->parse_packet = pm_parse_6250;
+ input_dev->id.product = 0x6250;
+ max_x = max_y = 0x3ff;
+ pm->maxcontacts = PM_6250_MTSLOT;
+ break;
+ }
+
+ input_set_abs_params(pm->dev, ABS_X, 0, max_x, 0, 0);
+ input_set_abs_params(pm->dev, ABS_Y, 0, max_y, 0, 0);
+
+ if (pm->maxcontacts > 1) {
+ input_mt_init_slots(pm->dev, pm->maxcontacts);
+ input_set_abs_params(pm->dev,
+ ABS_MT_POSITION_X, 0, max_x, 0, 0);
+ input_set_abs_params(pm->dev,
+ ABS_MT_POSITION_Y, 0, max_y, 0, 0);
+ }
serio_set_drvdata(serio, pm);
@@ -155,7 +308,7 @@ MODULE_DEVICE_TABLE(serio, pm_serio_ids);
static struct serio_driver pm_drv = {
.driver = {
- .name = "penmountlpc",
+ .name = "serio-penmount",
},
.description = DRIVER_DESC,
.id_table = pm_serio_ids,
diff --git a/drivers/input/touchscreen/s3c2410_ts.c b/drivers/input/touchscreen/s3c2410_ts.c
index 8feb7f3c8be..64ce697a345 100644
--- a/drivers/input/touchscreen/s3c2410_ts.c
+++ b/drivers/input/touchscreen/s3c2410_ts.c
@@ -328,7 +328,7 @@ static int __devinit s3c2410ts_probe(struct platform_device *pdev)
ts.shift = info->oversampling_shift;
ts.features = platform_get_device_id(pdev)->driver_data;
- ret = request_irq(ts.irq_tc, stylus_irq, IRQF_DISABLED,
+ ret = request_irq(ts.irq_tc, stylus_irq, 0,
"s3c2410_ts_pen", ts.input);
if (ret) {
dev_err(dev, "cannot get TC interrupt\n");
diff --git a/drivers/input/touchscreen/tsc2007.c b/drivers/input/touchscreen/tsc2007.c
index fadc11545b1..1f674cb6c55 100644
--- a/drivers/input/touchscreen/tsc2007.c
+++ b/drivers/input/touchscreen/tsc2007.c
@@ -66,7 +66,6 @@ struct ts_event {
struct tsc2007 {
struct input_dev *input;
char phys[32];
- struct delayed_work work;
struct i2c_client *client;
@@ -76,9 +75,11 @@ struct tsc2007 {
unsigned long poll_delay;
unsigned long poll_period;
- bool pendown;
int irq;
+ wait_queue_head_t wait;
+ bool stopped;
+
int (*get_pendown_state)(void);
void (*clear_penirq)(void);
};
@@ -141,25 +142,8 @@ static u32 tsc2007_calculate_pressure(struct tsc2007 *tsc, struct ts_event *tc)
return rt;
}
-static void tsc2007_send_up_event(struct tsc2007 *tsc)
+static bool tsc2007_is_pen_down(struct tsc2007 *ts)
{
- struct input_dev *input = tsc->input;
-
- dev_dbg(&tsc->client->dev, "UP\n");
-
- input_report_key(input, BTN_TOUCH, 0);
- input_report_abs(input, ABS_PRESSURE, 0);
- input_sync(input);
-}
-
-static void tsc2007_work(struct work_struct *work)
-{
- struct tsc2007 *ts =
- container_of(to_delayed_work(work), struct tsc2007, work);
- bool debounced = false;
- struct ts_event tc;
- u32 rt;
-
/*
* NOTE: We can't rely on the pressure to determine the pen down
* state, even though this controller has a pressure sensor.
@@ -170,79 +154,82 @@ static void tsc2007_work(struct work_struct *work)
* The only safe way to check for the pen up condition is in the
* work function by reading the pen signal state (it's a GPIO
* and IRQ). Unfortunately such callback is not always available,
- * in that case we have rely on the pressure anyway.
+ * in that case we assume that the pen is down and expect caller
+ * to fall back on the pressure reading.
*/
- if (ts->get_pendown_state) {
- if (unlikely(!ts->get_pendown_state())) {
- tsc2007_send_up_event(ts);
- ts->pendown = false;
- goto out;
- }
- dev_dbg(&ts->client->dev, "pen is still down\n");
- }
+ if (!ts->get_pendown_state)
+ return true;
+
+ return ts->get_pendown_state();
+}
+
+static irqreturn_t tsc2007_soft_irq(int irq, void *handle)
+{
+ struct tsc2007 *ts = handle;
+ struct input_dev *input = ts->input;
+ struct ts_event tc;
+ u32 rt;
- tsc2007_read_values(ts, &tc);
+ while (!ts->stopped && tsc2007_is_pen_down(ts)) {
- rt = tsc2007_calculate_pressure(ts, &tc);
- if (rt > ts->max_rt) {
- /*
- * Sample found inconsistent by debouncing or pressure is
- * beyond the maximum. Don't report it to user space,
- * repeat at least once more the measurement.
- */
- dev_dbg(&ts->client->dev, "ignored pressure %d\n", rt);
- debounced = true;
- goto out;
+ /* pen is down, continue with the measurement */
+ tsc2007_read_values(ts, &tc);
- }
+ rt = tsc2007_calculate_pressure(ts, &tc);
- if (rt) {
- struct input_dev *input = ts->input;
+ if (rt == 0 && !ts->get_pendown_state) {
+ /*
+ * If pressure reported is 0 and we don't have
+ * callback to check pendown state, we have to
+ * assume that pen was lifted up.
+ */
+ break;
+ }
- if (!ts->pendown) {
- dev_dbg(&ts->client->dev, "DOWN\n");
+ if (rt <= ts->max_rt) {
+ dev_dbg(&ts->client->dev,
+ "DOWN point(%4d,%4d), pressure (%4u)\n",
+ tc.x, tc.y, rt);
input_report_key(input, BTN_TOUCH, 1);
- ts->pendown = true;
+ input_report_abs(input, ABS_X, tc.x);
+ input_report_abs(input, ABS_Y, tc.y);
+ input_report_abs(input, ABS_PRESSURE, rt);
+
+ input_sync(input);
+
+ } else {
+ /*
+ * Sample found inconsistent by debouncing or pressure is
+ * beyond the maximum. Don't report it to user space,
+ * repeat at least once more the measurement.
+ */
+ dev_dbg(&ts->client->dev, "ignored pressure %d\n", rt);
}
- input_report_abs(input, ABS_X, tc.x);
- input_report_abs(input, ABS_Y, tc.y);
- input_report_abs(input, ABS_PRESSURE, rt);
+ wait_event_timeout(ts->wait, ts->stopped,
+ msecs_to_jiffies(ts->poll_period));
+ }
- input_sync(input);
+ dev_dbg(&ts->client->dev, "UP\n");
- dev_dbg(&ts->client->dev, "point(%4d,%4d), pressure (%4u)\n",
- tc.x, tc.y, rt);
+ input_report_key(input, BTN_TOUCH, 0);
+ input_report_abs(input, ABS_PRESSURE, 0);
+ input_sync(input);
- } else if (!ts->get_pendown_state && ts->pendown) {
- /*
- * We don't have callback to check pendown state, so we
- * have to assume that since pressure reported is 0 the
- * pen was lifted up.
- */
- tsc2007_send_up_event(ts);
- ts->pendown = false;
- }
+ if (ts->clear_penirq)
+ ts->clear_penirq();
- out:
- if (ts->pendown || debounced)
- schedule_delayed_work(&ts->work,
- msecs_to_jiffies(ts->poll_period));
- else
- enable_irq(ts->irq);
+ return IRQ_HANDLED;
}
-static irqreturn_t tsc2007_irq(int irq, void *handle)
+static irqreturn_t tsc2007_hard_irq(int irq, void *handle)
{
struct tsc2007 *ts = handle;
- if (!ts->get_pendown_state || likely(ts->get_pendown_state())) {
- disable_irq_nosync(ts->irq);
- schedule_delayed_work(&ts->work,
- msecs_to_jiffies(ts->poll_delay));
- }
+ if (!ts->get_pendown_state || likely(ts->get_pendown_state()))
+ return IRQ_WAKE_THREAD;
if (ts->clear_penirq)
ts->clear_penirq();
@@ -250,17 +237,40 @@ static irqreturn_t tsc2007_irq(int irq, void *handle)
return IRQ_HANDLED;
}
-static void tsc2007_free_irq(struct tsc2007 *ts)
+static void tsc2007_stop(struct tsc2007 *ts)
{
- free_irq(ts->irq, ts);
- if (cancel_delayed_work_sync(&ts->work)) {
- /*
- * Work was pending, therefore we need to enable
- * IRQ here to balance the disable_irq() done in the
- * interrupt handler.
- */
- enable_irq(ts->irq);
+ ts->stopped = true;
+ mb();
+ wake_up(&ts->wait);
+
+ disable_irq(ts->irq);
+}
+
+static int tsc2007_open(struct input_dev *input_dev)
+{
+ struct tsc2007 *ts = input_get_drvdata(input_dev);
+ int err;
+
+ ts->stopped = false;
+ mb();
+
+ enable_irq(ts->irq);
+
+ /* Prepare for touch readings - power down ADC and enable PENIRQ */
+ err = tsc2007_xfer(ts, PWRDOWN);
+ if (err < 0) {
+ tsc2007_stop(ts);
+ return err;
}
+
+ return 0;
+}
+
+static void tsc2007_close(struct input_dev *input_dev)
+{
+ struct tsc2007 *ts = input_get_drvdata(input_dev);
+
+ tsc2007_stop(ts);
}
static int __devinit tsc2007_probe(struct i2c_client *client,
@@ -290,7 +300,7 @@ static int __devinit tsc2007_probe(struct i2c_client *client,
ts->client = client;
ts->irq = client->irq;
ts->input = input_dev;
- INIT_DELAYED_WORK(&ts->work, tsc2007_work);
+ init_waitqueue_head(&ts->wait);
ts->model = pdata->model;
ts->x_plate_ohms = pdata->x_plate_ohms;
@@ -300,6 +310,12 @@ static int __devinit tsc2007_probe(struct i2c_client *client,
ts->get_pendown_state = pdata->get_pendown_state;
ts->clear_penirq = pdata->clear_penirq;
+ if (pdata->x_plate_ohms == 0) {
+ dev_err(&client->dev, "x_plate_ohms is not set up in platform data");
+ err = -EINVAL;
+ goto err_free_mem;
+ }
+
snprintf(ts->phys, sizeof(ts->phys),
"%s/input0", dev_name(&client->dev));
@@ -307,6 +323,11 @@ static int __devinit tsc2007_probe(struct i2c_client *client,
input_dev->phys = ts->phys;
input_dev->id.bustype = BUS_I2C;
+ input_dev->open = tsc2007_open;
+ input_dev->close = tsc2007_close;
+
+ input_set_drvdata(input_dev, ts);
+
input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
@@ -318,17 +339,14 @@ static int __devinit tsc2007_probe(struct i2c_client *client,
if (pdata->init_platform_hw)
pdata->init_platform_hw();
- err = request_irq(ts->irq, tsc2007_irq, 0,
- client->dev.driver->name, ts);
+ err = request_threaded_irq(ts->irq, tsc2007_hard_irq, tsc2007_soft_irq,
+ IRQF_ONESHOT, client->dev.driver->name, ts);
if (err < 0) {
dev_err(&client->dev, "irq %d busy?\n", ts->irq);
goto err_free_mem;
}
- /* Prepare for touch readings - power down ADC and enable PENIRQ */
- err = tsc2007_xfer(ts, PWRDOWN);
- if (err < 0)
- goto err_free_irq;
+ tsc2007_stop(ts);
err = input_register_device(input_dev);
if (err)
@@ -339,7 +357,7 @@ static int __devinit tsc2007_probe(struct i2c_client *client,
return 0;
err_free_irq:
- tsc2007_free_irq(ts);
+ free_irq(ts->irq, ts);
if (pdata->exit_platform_hw)
pdata->exit_platform_hw();
err_free_mem:
@@ -353,7 +371,7 @@ static int __devexit tsc2007_remove(struct i2c_client *client)
struct tsc2007 *ts = i2c_get_clientdata(client);
struct tsc2007_platform_data *pdata = client->dev.platform_data;
- tsc2007_free_irq(ts);
+ free_irq(ts->irq, ts);
if (pdata->exit_platform_hw)
pdata->exit_platform_hw();
diff --git a/drivers/input/touchscreen/tsc40.c b/drivers/input/touchscreen/tsc40.c
new file mode 100644
index 00000000000..29d5ed4dd31
--- /dev/null
+++ b/drivers/input/touchscreen/tsc40.c
@@ -0,0 +1,184 @@
+/*
+ * TSC-40 serial touchscreen driver. It should be compatible with
+ * TSC-10 and 25.
+ *
+ * Author: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ * License: GPLv2 as published by the FSF.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/input.h>
+#include <linux/serio.h>
+#include <linux/init.h>
+
+#define PACKET_LENGTH 5
+struct tsc_ser {
+ struct input_dev *dev;
+ struct serio *serio;
+ u32 idx;
+ unsigned char data[PACKET_LENGTH];
+ char phys[32];
+};
+
+static void tsc_process_data(struct tsc_ser *ptsc)
+{
+ struct input_dev *dev = ptsc->dev;
+ u8 *data = ptsc->data;
+ u32 x;
+ u32 y;
+
+ x = ((data[1] & 0x03) << 8) | data[2];
+ y = ((data[3] & 0x03) << 8) | data[4];
+
+ input_report_abs(dev, ABS_X, x);
+ input_report_abs(dev, ABS_Y, y);
+ input_report_key(dev, BTN_TOUCH, 1);
+
+ input_sync(dev);
+}
+
+static irqreturn_t tsc_interrupt(struct serio *serio,
+ unsigned char data, unsigned int flags)
+{
+ struct tsc_ser *ptsc = serio_get_drvdata(serio);
+ struct input_dev *dev = ptsc->dev;
+
+ ptsc->data[ptsc->idx] = data;
+ switch (ptsc->idx++) {
+ case 0:
+ if (unlikely((data & 0x3e) != 0x10)) {
+ dev_dbg(&serio->dev,
+ "unsynchronized packet start (0x%02x)\n", data);
+ ptsc->idx = 0;
+ } else if (!(data & 0x01)) {
+ input_report_key(dev, BTN_TOUCH, 0);
+ input_sync(dev);
+ ptsc->idx = 0;
+ }
+ break;
+
+ case 1:
+ case 3:
+ if (unlikely(data & 0xfc)) {
+ dev_dbg(&serio->dev,
+ "unsynchronized data 0x%02x at offset %d\n",
+ data, ptsc->idx - 1);
+ ptsc->idx = 0;
+ }
+ break;
+
+ case 4:
+ tsc_process_data(ptsc);
+ ptsc->idx = 0;
+ break;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int tsc_connect(struct serio *serio, struct serio_driver *drv)
+{
+ struct tsc_ser *ptsc;
+ struct input_dev *input_dev;
+ int error;
+
+ ptsc = kzalloc(sizeof(struct tsc_ser), GFP_KERNEL);
+ input_dev = input_allocate_device();
+ if (!ptsc || !input_dev) {
+ error = -ENOMEM;
+ goto fail1;
+ }
+
+ ptsc->serio = serio;
+ ptsc->dev = input_dev;
+ snprintf(ptsc->phys, sizeof(ptsc->phys), "%s/input0", serio->phys);
+
+ input_dev->name = "TSC-10/25/40 Serial TouchScreen";
+ input_dev->phys = ptsc->phys;
+ input_dev->id.bustype = BUS_RS232;
+ input_dev->id.vendor = SERIO_TSC40;
+ input_dev->id.product = 40;
+ input_dev->id.version = 0x0001;
+ input_dev->dev.parent = &serio->dev;
+
+ input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
+ __set_bit(BTN_TOUCH, input_dev->keybit);
+ input_set_abs_params(ptsc->dev, ABS_X, 0, 0x3ff, 0, 0);
+ input_set_abs_params(ptsc->dev, ABS_Y, 0, 0x3ff, 0, 0);
+ input_set_abs_params(ptsc->dev, ABS_PRESSURE, 0, 0, 0, 0);
+
+ serio_set_drvdata(serio, ptsc);
+
+ error = serio_open(serio, drv);
+ if (error)
+ goto fail2;
+
+ error = input_register_device(ptsc->dev);
+ if (error)
+ goto fail3;
+
+ return 0;
+
+fail3:
+ serio_close(serio);
+fail2:
+ serio_set_drvdata(serio, NULL);
+fail1:
+ input_free_device(input_dev);
+ kfree(ptsc);
+ return error;
+}
+
+static void tsc_disconnect(struct serio *serio)
+{
+ struct tsc_ser *ptsc = serio_get_drvdata(serio);
+
+ serio_close(serio);
+
+ input_unregister_device(ptsc->dev);
+ kfree(ptsc);
+
+ serio_set_drvdata(serio, NULL);
+}
+
+static struct serio_device_id tsc_serio_ids[] = {
+ {
+ .type = SERIO_RS232,
+ .proto = SERIO_TSC40,
+ .id = SERIO_ANY,
+ .extra = SERIO_ANY,
+ },
+ { 0 }
+};
+MODULE_DEVICE_TABLE(serio, tsc_serio_ids);
+
+#define DRIVER_DESC "TSC-10/25/40 serial touchscreen driver"
+
+static struct serio_driver tsc_drv = {
+ .driver = {
+ .name = "tsc40",
+ },
+ .description = DRIVER_DESC,
+ .id_table = tsc_serio_ids,
+ .interrupt = tsc_interrupt,
+ .connect = tsc_connect,
+ .disconnect = tsc_disconnect,
+};
+
+static int __init tsc_ser_init(void)
+{
+ return serio_register_driver(&tsc_drv);
+}
+module_init(tsc_ser_init);
+
+static void __exit tsc_exit(void)
+{
+ serio_unregister_driver(&tsc_drv);
+}
+module_exit(tsc_exit);
+
+MODULE_AUTHOR("Sebastian Andrzej Siewior <bigeasy@linutronix.de>");
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/w90p910_ts.c b/drivers/input/touchscreen/w90p910_ts.c
index 7a45d68c351..217aa51135c 100644
--- a/drivers/input/touchscreen/w90p910_ts.c
+++ b/drivers/input/touchscreen/w90p910_ts.c
@@ -279,7 +279,7 @@ static int __devinit w90x900ts_probe(struct platform_device *pdev)
w90p910_ts->irq_num = platform_get_irq(pdev, 0);
if (request_irq(w90p910_ts->irq_num, w90p910_ts_interrupt,
- IRQF_DISABLED, "w90p910ts", w90p910_ts)) {
+ 0, "w90p910ts", w90p910_ts)) {
err = -EBUSY;
goto fail4;
}
diff --git a/drivers/input/touchscreen/wacom_w8001.c b/drivers/input/touchscreen/wacom_w8001.c
index 9941d39df43..1569a3934ab 100644
--- a/drivers/input/touchscreen/wacom_w8001.c
+++ b/drivers/input/touchscreen/wacom_w8001.c
@@ -367,6 +367,20 @@ static int w8001_command(struct w8001 *w8001, unsigned char command,
return rc;
}
+static int w8001_open(struct input_dev *dev)
+{
+ struct w8001 *w8001 = input_get_drvdata(dev);
+
+ return w8001_command(w8001, W8001_CMD_START, false);
+}
+
+static void w8001_close(struct input_dev *dev)
+{
+ struct w8001 *w8001 = input_get_drvdata(dev);
+
+ w8001_command(w8001, W8001_CMD_STOP, false);
+}
+
static int w8001_setup(struct w8001 *w8001)
{
struct input_dev *dev = w8001->dev;
@@ -476,7 +490,7 @@ static int w8001_setup(struct w8001 *w8001)
strlcat(w8001->name, " Touchscreen", sizeof(w8001->name));
- return w8001_command(w8001, W8001_CMD_START, false);
+ return 0;
}
/*
@@ -487,12 +501,12 @@ static void w8001_disconnect(struct serio *serio)
{
struct w8001 *w8001 = serio_get_drvdata(serio);
- input_get_device(w8001->dev);
- input_unregister_device(w8001->dev);
serio_close(serio);
- serio_set_drvdata(serio, NULL);
- input_put_device(w8001->dev);
+
+ input_unregister_device(w8001->dev);
kfree(w8001);
+
+ serio_set_drvdata(serio, NULL);
}
/*
@@ -536,6 +550,11 @@ static int w8001_connect(struct serio *serio, struct serio_driver *drv)
input_dev->id.version = 0x0100;
input_dev->dev.parent = &serio->dev;
+ input_dev->open = w8001_open;
+ input_dev->close = w8001_close;
+
+ input_set_drvdata(input_dev, w8001);
+
err = input_register_device(w8001->dev);
if (err)
goto fail3;
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 7d7eaa15e77..5414253b185 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -112,4 +112,23 @@ config IRQ_REMAP
To use x2apic mode in the CPU's which support x2APIC enhancements or
to support platforms with CPU's having > 8 bit APIC ID, say Y.
+# OMAP IOMMU support
+config OMAP_IOMMU
+ bool "OMAP IOMMU Support"
+ depends on ARCH_OMAP
+ select IOMMU_API
+
+config OMAP_IOVMM
+ tristate "OMAP IO Virtual Memory Manager Support"
+ depends on OMAP_IOMMU
+
+config OMAP_IOMMU_DEBUG
+ tristate "Export OMAP IOMMU/IOVMM internals in DebugFS"
+ depends on OMAP_IOVMM && DEBUG_FS
+ help
+ Select this to see extensive information about
+ the internal state of OMAP IOMMU/IOVMM in debugfs.
+
+ Say N unless you know you need this.
+
endif # IOMMU_SUPPORT
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index 6394994a2b9..2f4448794bc 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -4,3 +4,6 @@ obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o
obj-$(CONFIG_DMAR_TABLE) += dmar.o
obj-$(CONFIG_INTEL_IOMMU) += iova.o intel-iommu.o
obj-$(CONFIG_IRQ_REMAP) += intr_remapping.o
+obj-$(CONFIG_OMAP_IOMMU) += omap-iommu.o
+obj-$(CONFIG_OMAP_IOVMM) += omap-iovmm.o
+obj-$(CONFIG_OMAP_IOMMU_DEBUG) += omap-iommu-debug.o
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 0e4227f457a..4ee277a8521 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -1283,7 +1283,7 @@ static int alloc_new_range(struct dma_ops_domain *dma_dom,
if (!pte || !IOMMU_PTE_PRESENT(*pte))
continue;
- dma_ops_reserve_addresses(dma_dom, i << PAGE_SHIFT, 1);
+ dma_ops_reserve_addresses(dma_dom, i >> PAGE_SHIFT, 1);
}
update_domain(&dma_dom->domain);
@@ -2495,7 +2495,7 @@ static unsigned device_dma_ops_init(void)
void __init amd_iommu_init_api(void)
{
- register_iommu(&amd_iommu_ops);
+ bus_set_iommu(&pci_bus_type, &amd_iommu_ops);
}
int __init amd_iommu_init_dma_ops(void)
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index be1953c239b..c0c7820d4c4 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -24,6 +24,7 @@
#include <linux/init.h>
#include <linux/bitmap.h>
#include <linux/debugfs.h>
+#include <linux/export.h>
#include <linux/slab.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
@@ -3642,7 +3643,7 @@ int __init intel_iommu_init(void)
init_iommu_pm_ops();
- register_iommu(&intel_iommu_ops);
+ bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
bus_register_notifier(&pci_bus_type, &device_nb);
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 6e6b6a11b3c..2fb2963df55 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -16,6 +16,8 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#include <linux/device.h>
+#include <linux/kernel.h>
#include <linux/bug.h>
#include <linux/types.h>
#include <linux/module.h>
@@ -23,32 +25,78 @@
#include <linux/errno.h>
#include <linux/iommu.h>
-static struct iommu_ops *iommu_ops;
+static void iommu_bus_init(struct bus_type *bus, struct iommu_ops *ops)
+{
+}
-void register_iommu(struct iommu_ops *ops)
+/**
+ * bus_set_iommu - set iommu-callbacks for the bus
+ * @bus: bus.
+ * @ops: the callbacks provided by the iommu-driver
+ *
+ * This function is called by an iommu driver to set the iommu methods
+ * used for a particular bus. Drivers for devices on that bus can use
+ * the iommu-api after these ops are registered.
+ * This special function is needed because IOMMUs are usually devices on
+ * the bus itself, so the iommu drivers are not initialized when the bus
+ * is set up. With this function the iommu-driver can set the iommu-ops
+ * afterwards.
+ */
+int bus_set_iommu(struct bus_type *bus, struct iommu_ops *ops)
{
- if (iommu_ops)
- BUG();
+ if (bus->iommu_ops != NULL)
+ return -EBUSY;
+
+ bus->iommu_ops = ops;
+
+ /* Do IOMMU specific setup for this bus-type */
+ iommu_bus_init(bus, ops);
- iommu_ops = ops;
+ return 0;
}
+EXPORT_SYMBOL_GPL(bus_set_iommu);
-bool iommu_found(void)
+bool iommu_present(struct bus_type *bus)
{
- return iommu_ops != NULL;
+ return bus->iommu_ops != NULL;
}
-EXPORT_SYMBOL_GPL(iommu_found);
+EXPORT_SYMBOL_GPL(iommu_present);
-struct iommu_domain *iommu_domain_alloc(void)
+/**
+ * iommu_set_fault_handler() - set a fault handler for an iommu domain
+ * @domain: iommu domain
+ * @handler: fault handler
+ *
+ * This function should be used by IOMMU users which want to be notified
+ * whenever an IOMMU fault happens.
+ *
+ * The fault handler itself should return 0 on success, and an appropriate
+ * error code otherwise.
+ */
+void iommu_set_fault_handler(struct iommu_domain *domain,
+ iommu_fault_handler_t handler)
+{
+ BUG_ON(!domain);
+
+ domain->handler = handler;
+}
+EXPORT_SYMBOL_GPL(iommu_set_fault_handler);
+
+struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
{
struct iommu_domain *domain;
int ret;
+ if (bus == NULL || bus->iommu_ops == NULL)
+ return NULL;
+
domain = kmalloc(sizeof(*domain), GFP_KERNEL);
if (!domain)
return NULL;
- ret = iommu_ops->domain_init(domain);
+ domain->ops = bus->iommu_ops;
+
+ ret = domain->ops->domain_init(domain);
if (ret)
goto out_free;
@@ -63,62 +111,78 @@ EXPORT_SYMBOL_GPL(iommu_domain_alloc);
void iommu_domain_free(struct iommu_domain *domain)
{
- iommu_ops->domain_destroy(domain);
+ if (likely(domain->ops->domain_destroy != NULL))
+ domain->ops->domain_destroy(domain);
+
kfree(domain);
}
EXPORT_SYMBOL_GPL(iommu_domain_free);
int iommu_attach_device(struct iommu_domain *domain, struct device *dev)
{
- return iommu_ops->attach_dev(domain, dev);
+ if (unlikely(domain->ops->attach_dev == NULL))
+ return -ENODEV;
+
+ return domain->ops->attach_dev(domain, dev);
}
EXPORT_SYMBOL_GPL(iommu_attach_device);
void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
{
- iommu_ops->detach_dev(domain, dev);
+ if (unlikely(domain->ops->detach_dev == NULL))
+ return;
+
+ domain->ops->detach_dev(domain, dev);
}
EXPORT_SYMBOL_GPL(iommu_detach_device);
phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain,
unsigned long iova)
{
- return iommu_ops->iova_to_phys(domain, iova);
+ if (unlikely(domain->ops->iova_to_phys == NULL))
+ return 0;
+
+ return domain->ops->iova_to_phys(domain, iova);
}
EXPORT_SYMBOL_GPL(iommu_iova_to_phys);
int iommu_domain_has_cap(struct iommu_domain *domain,
unsigned long cap)
{
- return iommu_ops->domain_has_cap(domain, cap);
+ if (unlikely(domain->ops->domain_has_cap == NULL))
+ return 0;
+
+ return domain->ops->domain_has_cap(domain, cap);
}
EXPORT_SYMBOL_GPL(iommu_domain_has_cap);
int iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, int gfp_order, int prot)
{
- unsigned long invalid_mask;
size_t size;
- size = 0x1000UL << gfp_order;
- invalid_mask = size - 1;
+ if (unlikely(domain->ops->map == NULL))
+ return -ENODEV;
- BUG_ON((iova | paddr) & invalid_mask);
+ size = PAGE_SIZE << gfp_order;
- return iommu_ops->map(domain, iova, paddr, gfp_order, prot);
+ BUG_ON(!IS_ALIGNED(iova | paddr, size));
+
+ return domain->ops->map(domain, iova, paddr, gfp_order, prot);
}
EXPORT_SYMBOL_GPL(iommu_map);
int iommu_unmap(struct iommu_domain *domain, unsigned long iova, int gfp_order)
{
- unsigned long invalid_mask;
size_t size;
- size = 0x1000UL << gfp_order;
- invalid_mask = size - 1;
+ if (unlikely(domain->ops->unmap == NULL))
+ return -ENODEV;
+
+ size = PAGE_SIZE << gfp_order;
- BUG_ON(iova & invalid_mask);
+ BUG_ON(!IS_ALIGNED(iova, size));
- return iommu_ops->unmap(domain, iova, gfp_order);
+ return domain->ops->unmap(domain, iova, gfp_order);
}
EXPORT_SYMBOL_GPL(iommu_unmap);
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
index 1a584e077c6..5865dd2e28f 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu.c
@@ -543,6 +543,13 @@ static int msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
}
ret = __flush_iotlb(domain);
+
+ /*
+ * the IOMMU API requires us to return the order of the unmapped
+ * page (on success).
+ */
+ if (!ret)
+ ret = order;
fail:
spin_unlock_irqrestore(&msm_iommu_lock, flags);
return ret;
@@ -721,7 +728,7 @@ static void __init setup_iommu_tex_classes(void)
static int __init msm_iommu_init(void)
{
setup_iommu_tex_classes();
- register_iommu(&msm_iommu_ops);
+ bus_set_iommu(&platform_bus_type, &msm_iommu_ops);
return 0;
}
diff --git a/drivers/iommu/omap-iommu-debug.c b/drivers/iommu/omap-iommu-debug.c
new file mode 100644
index 00000000000..288da5c1499
--- /dev/null
+++ b/drivers/iommu/omap-iommu-debug.c
@@ -0,0 +1,419 @@
+/*
+ * omap iommu: debugfs interface
+ *
+ * Copyright (C) 2008-2009 Nokia Corporation
+ *
+ * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/platform_device.h>
+#include <linux/debugfs.h>
+
+#include <plat/iommu.h>
+#include <plat/iovmm.h>
+
+#include <plat/iopgtable.h>
+
+#define MAXCOLUMN 100 /* for short messages */
+
+static DEFINE_MUTEX(iommu_debug_lock);
+
+static struct dentry *iommu_debug_root;
+
+static ssize_t debug_read_ver(struct file *file, char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ u32 ver = omap_iommu_arch_version();
+ char buf[MAXCOLUMN], *p = buf;
+
+ p += sprintf(p, "H/W version: %d.%d\n", (ver >> 4) & 0xf , ver & 0xf);
+
+ return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
+}
+
+static ssize_t debug_read_regs(struct file *file, char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct omap_iommu *obj = file->private_data;
+ char *p, *buf;
+ ssize_t bytes;
+
+ buf = kmalloc(count, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ p = buf;
+
+ mutex_lock(&iommu_debug_lock);
+
+ bytes = omap_iommu_dump_ctx(obj, p, count);
+ bytes = simple_read_from_buffer(userbuf, count, ppos, buf, bytes);
+
+ mutex_unlock(&iommu_debug_lock);
+ kfree(buf);
+
+ return bytes;
+}
+
+static ssize_t debug_read_tlb(struct file *file, char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct omap_iommu *obj = file->private_data;
+ char *p, *buf;
+ ssize_t bytes, rest;
+
+ buf = kmalloc(count, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ p = buf;
+
+ mutex_lock(&iommu_debug_lock);
+
+ p += sprintf(p, "%8s %8s\n", "cam:", "ram:");
+ p += sprintf(p, "-----------------------------------------\n");
+ rest = count - (p - buf);
+ p += omap_dump_tlb_entries(obj, p, rest);
+
+ bytes = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
+
+ mutex_unlock(&iommu_debug_lock);
+ kfree(buf);
+
+ return bytes;
+}
+
+static ssize_t debug_write_pagetable(struct file *file,
+ const char __user *userbuf, size_t count, loff_t *ppos)
+{
+ struct iotlb_entry e;
+ struct cr_regs cr;
+ int err;
+ struct omap_iommu *obj = file->private_data;
+ char buf[MAXCOLUMN], *p = buf;
+
+ count = min(count, sizeof(buf));
+
+ mutex_lock(&iommu_debug_lock);
+ if (copy_from_user(p, userbuf, count)) {
+ mutex_unlock(&iommu_debug_lock);
+ return -EFAULT;
+ }
+
+ sscanf(p, "%x %x", &cr.cam, &cr.ram);
+ if (!cr.cam || !cr.ram) {
+ mutex_unlock(&iommu_debug_lock);
+ return -EINVAL;
+ }
+
+ omap_iotlb_cr_to_e(&cr, &e);
+ err = omap_iopgtable_store_entry(obj, &e);
+ if (err)
+ dev_err(obj->dev, "%s: fail to store cr\n", __func__);
+
+ mutex_unlock(&iommu_debug_lock);
+ return count;
+}
+
+#define dump_ioptable_entry_one(lv, da, val) \
+ ({ \
+ int __err = 0; \
+ ssize_t bytes; \
+ const int maxcol = 22; \
+ const char *str = "%d: %08x %08x\n"; \
+ bytes = snprintf(p, maxcol, str, lv, da, val); \
+ p += bytes; \
+ len -= bytes; \
+ if (len < maxcol) \
+ __err = -ENOMEM; \
+ __err; \
+ })
+
+static ssize_t dump_ioptable(struct omap_iommu *obj, char *buf, ssize_t len)
+{
+ int i;
+ u32 *iopgd;
+ char *p = buf;
+
+ spin_lock(&obj->page_table_lock);
+
+ iopgd = iopgd_offset(obj, 0);
+ for (i = 0; i < PTRS_PER_IOPGD; i++, iopgd++) {
+ int j, err;
+ u32 *iopte;
+ u32 da;
+
+ if (!*iopgd)
+ continue;
+
+ if (!(*iopgd & IOPGD_TABLE)) {
+ da = i << IOPGD_SHIFT;
+
+ err = dump_ioptable_entry_one(1, da, *iopgd);
+ if (err)
+ goto out;
+ continue;
+ }
+
+ iopte = iopte_offset(iopgd, 0);
+
+ for (j = 0; j < PTRS_PER_IOPTE; j++, iopte++) {
+ if (!*iopte)
+ continue;
+
+ da = (i << IOPGD_SHIFT) + (j << IOPTE_SHIFT);
+ err = dump_ioptable_entry_one(2, da, *iopgd);
+ if (err)
+ goto out;
+ }
+ }
+out:
+ spin_unlock(&obj->page_table_lock);
+
+ return p - buf;
+}
+
+static ssize_t debug_read_pagetable(struct file *file, char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct omap_iommu *obj = file->private_data;
+ char *p, *buf;
+ size_t bytes;
+
+ buf = (char *)__get_free_page(GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ p = buf;
+
+ p += sprintf(p, "L: %8s %8s\n", "da:", "pa:");
+ p += sprintf(p, "-----------------------------------------\n");
+
+ mutex_lock(&iommu_debug_lock);
+
+ bytes = PAGE_SIZE - (p - buf);
+ p += dump_ioptable(obj, p, bytes);
+
+ bytes = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
+
+ mutex_unlock(&iommu_debug_lock);
+ free_page((unsigned long)buf);
+
+ return bytes;
+}
+
+static ssize_t debug_read_mmap(struct file *file, char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct omap_iommu *obj = file->private_data;
+ char *p, *buf;
+ struct iovm_struct *tmp;
+ int uninitialized_var(i);
+ ssize_t bytes;
+
+ buf = (char *)__get_free_page(GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ p = buf;
+
+ p += sprintf(p, "%-3s %-8s %-8s %6s %8s\n",
+ "No", "start", "end", "size", "flags");
+ p += sprintf(p, "-------------------------------------------------\n");
+
+ mutex_lock(&iommu_debug_lock);
+
+ list_for_each_entry(tmp, &obj->mmap, list) {
+ size_t len;
+ const char *str = "%3d %08x-%08x %6x %8x\n";
+ const int maxcol = 39;
+
+ len = tmp->da_end - tmp->da_start;
+ p += snprintf(p, maxcol, str,
+ i, tmp->da_start, tmp->da_end, len, tmp->flags);
+
+ if (PAGE_SIZE - (p - buf) < maxcol)
+ break;
+ i++;
+ }
+
+ bytes = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
+
+ mutex_unlock(&iommu_debug_lock);
+ free_page((unsigned long)buf);
+
+ return bytes;
+}
+
+static ssize_t debug_read_mem(struct file *file, char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct omap_iommu *obj = file->private_data;
+ char *p, *buf;
+ struct iovm_struct *area;
+ ssize_t bytes;
+
+ count = min_t(ssize_t, count, PAGE_SIZE);
+
+ buf = (char *)__get_free_page(GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ p = buf;
+
+ mutex_lock(&iommu_debug_lock);
+
+ area = omap_find_iovm_area(obj, (u32)ppos);
+ if (IS_ERR(area)) {
+ bytes = -EINVAL;
+ goto err_out;
+ }
+ memcpy(p, area->va, count);
+ p += count;
+
+ bytes = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
+err_out:
+ mutex_unlock(&iommu_debug_lock);
+ free_page((unsigned long)buf);
+
+ return bytes;
+}
+
+static ssize_t debug_write_mem(struct file *file, const char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct omap_iommu *obj = file->private_data;
+ struct iovm_struct *area;
+ char *p, *buf;
+
+ count = min_t(size_t, count, PAGE_SIZE);
+
+ buf = (char *)__get_free_page(GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ p = buf;
+
+ mutex_lock(&iommu_debug_lock);
+
+ if (copy_from_user(p, userbuf, count)) {
+ count = -EFAULT;
+ goto err_out;
+ }
+
+ area = omap_find_iovm_area(obj, (u32)ppos);
+ if (IS_ERR(area)) {
+ count = -EINVAL;
+ goto err_out;
+ }
+ memcpy(area->va, p, count);
+err_out:
+ mutex_unlock(&iommu_debug_lock);
+ free_page((unsigned long)buf);
+
+ return count;
+}
+
+static int debug_open_generic(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+#define DEBUG_FOPS(name) \
+ static const struct file_operations debug_##name##_fops = { \
+ .open = debug_open_generic, \
+ .read = debug_read_##name, \
+ .write = debug_write_##name, \
+ .llseek = generic_file_llseek, \
+ };
+
+#define DEBUG_FOPS_RO(name) \
+ static const struct file_operations debug_##name##_fops = { \
+ .open = debug_open_generic, \
+ .read = debug_read_##name, \
+ .llseek = generic_file_llseek, \
+ };
+
+DEBUG_FOPS_RO(ver);
+DEBUG_FOPS_RO(regs);
+DEBUG_FOPS_RO(tlb);
+DEBUG_FOPS(pagetable);
+DEBUG_FOPS_RO(mmap);
+DEBUG_FOPS(mem);
+
+#define __DEBUG_ADD_FILE(attr, mode) \
+ { \
+ struct dentry *dent; \
+ dent = debugfs_create_file(#attr, mode, parent, \
+ obj, &debug_##attr##_fops); \
+ if (!dent) \
+ return -ENOMEM; \
+ }
+
+#define DEBUG_ADD_FILE(name) __DEBUG_ADD_FILE(name, 600)
+#define DEBUG_ADD_FILE_RO(name) __DEBUG_ADD_FILE(name, 400)
+
+static int iommu_debug_register(struct device *dev, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct omap_iommu *obj = platform_get_drvdata(pdev);
+ struct dentry *d, *parent;
+
+ if (!obj || !obj->dev)
+ return -EINVAL;
+
+ d = debugfs_create_dir(obj->name, iommu_debug_root);
+ if (!d)
+ return -ENOMEM;
+ parent = d;
+
+ d = debugfs_create_u8("nr_tlb_entries", 400, parent,
+ (u8 *)&obj->nr_tlb_entries);
+ if (!d)
+ return -ENOMEM;
+
+ DEBUG_ADD_FILE_RO(ver);
+ DEBUG_ADD_FILE_RO(regs);
+ DEBUG_ADD_FILE_RO(tlb);
+ DEBUG_ADD_FILE(pagetable);
+ DEBUG_ADD_FILE_RO(mmap);
+ DEBUG_ADD_FILE(mem);
+
+ return 0;
+}
+
+static int __init iommu_debug_init(void)
+{
+ struct dentry *d;
+ int err;
+
+ d = debugfs_create_dir("iommu", NULL);
+ if (!d)
+ return -ENOMEM;
+ iommu_debug_root = d;
+
+ err = omap_foreach_iommu_device(d, iommu_debug_register);
+ if (err)
+ goto err_out;
+ return 0;
+
+err_out:
+ debugfs_remove_recursive(iommu_debug_root);
+ return err;
+}
+module_init(iommu_debug_init)
+
+static void __exit iommu_debugfs_exit(void)
+{
+ debugfs_remove_recursive(iommu_debug_root);
+}
+module_exit(iommu_debugfs_exit)
+
+MODULE_DESCRIPTION("omap iommu: debugfs interface");
+MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
new file mode 100644
index 00000000000..8f32b2bf758
--- /dev/null
+++ b/drivers/iommu/omap-iommu.c
@@ -0,0 +1,1245 @@
+/*
+ * omap iommu: tlb and pagetable primitives
+ *
+ * Copyright (C) 2008-2010 Nokia Corporation
+ *
+ * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>,
+ * Paul Mundt and Toshihiro Kobayashi
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/iommu.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+
+#include <asm/cacheflush.h>
+
+#include <plat/iommu.h>
+
+#include <plat/iopgtable.h>
+
+#define for_each_iotlb_cr(obj, n, __i, cr) \
+ for (__i = 0; \
+ (__i < (n)) && (cr = __iotlb_read_cr((obj), __i), true); \
+ __i++)
+
+/**
+ * struct omap_iommu_domain - omap iommu domain
+ * @pgtable: the page table
+ * @iommu_dev: an omap iommu device attached to this domain. only a single
+ * iommu device can be attached for now.
+ * @lock: domain lock, should be taken when attaching/detaching
+ */
+struct omap_iommu_domain {
+ u32 *pgtable;
+ struct omap_iommu *iommu_dev;
+ spinlock_t lock;
+};
+
+/* accommodate the difference between omap1 and omap2/3 */
+static const struct iommu_functions *arch_iommu;
+
+static struct platform_driver omap_iommu_driver;
+static struct kmem_cache *iopte_cachep;
+
+/**
+ * omap_install_iommu_arch - Install archtecure specific iommu functions
+ * @ops: a pointer to architecture specific iommu functions
+ *
+ * There are several kind of iommu algorithm(tlb, pagetable) among
+ * omap series. This interface installs such an iommu algorighm.
+ **/
+int omap_install_iommu_arch(const struct iommu_functions *ops)
+{
+ if (arch_iommu)
+ return -EBUSY;
+
+ arch_iommu = ops;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(omap_install_iommu_arch);
+
+/**
+ * omap_uninstall_iommu_arch - Uninstall archtecure specific iommu functions
+ * @ops: a pointer to architecture specific iommu functions
+ *
+ * This interface uninstalls the iommu algorighm installed previously.
+ **/
+void omap_uninstall_iommu_arch(const struct iommu_functions *ops)
+{
+ if (arch_iommu != ops)
+ pr_err("%s: not your arch\n", __func__);
+
+ arch_iommu = NULL;
+}
+EXPORT_SYMBOL_GPL(omap_uninstall_iommu_arch);
+
+/**
+ * omap_iommu_save_ctx - Save registers for pm off-mode support
+ * @obj: target iommu
+ **/
+void omap_iommu_save_ctx(struct omap_iommu *obj)
+{
+ arch_iommu->save_ctx(obj);
+}
+EXPORT_SYMBOL_GPL(omap_iommu_save_ctx);
+
+/**
+ * omap_iommu_restore_ctx - Restore registers for pm off-mode support
+ * @obj: target iommu
+ **/
+void omap_iommu_restore_ctx(struct omap_iommu *obj)
+{
+ arch_iommu->restore_ctx(obj);
+}
+EXPORT_SYMBOL_GPL(omap_iommu_restore_ctx);
+
+/**
+ * omap_iommu_arch_version - Return running iommu arch version
+ **/
+u32 omap_iommu_arch_version(void)
+{
+ return arch_iommu->version;
+}
+EXPORT_SYMBOL_GPL(omap_iommu_arch_version);
+
+static int iommu_enable(struct omap_iommu *obj)
+{
+ int err;
+
+ if (!obj)
+ return -EINVAL;
+
+ if (!arch_iommu)
+ return -ENODEV;
+
+ clk_enable(obj->clk);
+
+ err = arch_iommu->enable(obj);
+
+ clk_disable(obj->clk);
+ return err;
+}
+
+static void iommu_disable(struct omap_iommu *obj)
+{
+ if (!obj)
+ return;
+
+ clk_enable(obj->clk);
+
+ arch_iommu->disable(obj);
+
+ clk_disable(obj->clk);
+}
+
+/*
+ * TLB operations
+ */
+void omap_iotlb_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e)
+{
+ BUG_ON(!cr || !e);
+
+ arch_iommu->cr_to_e(cr, e);
+}
+EXPORT_SYMBOL_GPL(omap_iotlb_cr_to_e);
+
+static inline int iotlb_cr_valid(struct cr_regs *cr)
+{
+ if (!cr)
+ return -EINVAL;
+
+ return arch_iommu->cr_valid(cr);
+}
+
+static inline struct cr_regs *iotlb_alloc_cr(struct omap_iommu *obj,
+ struct iotlb_entry *e)
+{
+ if (!e)
+ return NULL;
+
+ return arch_iommu->alloc_cr(obj, e);
+}
+
+static u32 iotlb_cr_to_virt(struct cr_regs *cr)
+{
+ return arch_iommu->cr_to_virt(cr);
+}
+
+static u32 get_iopte_attr(struct iotlb_entry *e)
+{
+ return arch_iommu->get_pte_attr(e);
+}
+
+static u32 iommu_report_fault(struct omap_iommu *obj, u32 *da)
+{
+ return arch_iommu->fault_isr(obj, da);
+}
+
+static void iotlb_lock_get(struct omap_iommu *obj, struct iotlb_lock *l)
+{
+ u32 val;
+
+ val = iommu_read_reg(obj, MMU_LOCK);
+
+ l->base = MMU_LOCK_BASE(val);
+ l->vict = MMU_LOCK_VICT(val);
+
+}
+
+static void iotlb_lock_set(struct omap_iommu *obj, struct iotlb_lock *l)
+{
+ u32 val;
+
+ val = (l->base << MMU_LOCK_BASE_SHIFT);
+ val |= (l->vict << MMU_LOCK_VICT_SHIFT);
+
+ iommu_write_reg(obj, val, MMU_LOCK);
+}
+
+static void iotlb_read_cr(struct omap_iommu *obj, struct cr_regs *cr)
+{
+ arch_iommu->tlb_read_cr(obj, cr);
+}
+
+static void iotlb_load_cr(struct omap_iommu *obj, struct cr_regs *cr)
+{
+ arch_iommu->tlb_load_cr(obj, cr);
+
+ iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
+ iommu_write_reg(obj, 1, MMU_LD_TLB);
+}
+
+/**
+ * iotlb_dump_cr - Dump an iommu tlb entry into buf
+ * @obj: target iommu
+ * @cr: contents of cam and ram register
+ * @buf: output buffer
+ **/
+static inline ssize_t iotlb_dump_cr(struct omap_iommu *obj, struct cr_regs *cr,
+ char *buf)
+{
+ BUG_ON(!cr || !buf);
+
+ return arch_iommu->dump_cr(obj, cr, buf);
+}
+
+/* only used in iotlb iteration for-loop */
+static struct cr_regs __iotlb_read_cr(struct omap_iommu *obj, int n)
+{
+ struct cr_regs cr;
+ struct iotlb_lock l;
+
+ iotlb_lock_get(obj, &l);
+ l.vict = n;
+ iotlb_lock_set(obj, &l);
+ iotlb_read_cr(obj, &cr);
+
+ return cr;
+}
+
+/**
+ * load_iotlb_entry - Set an iommu tlb entry
+ * @obj: target iommu
+ * @e: an iommu tlb entry info
+ **/
+#ifdef PREFETCH_IOTLB
+static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
+{
+ int err = 0;
+ struct iotlb_lock l;
+ struct cr_regs *cr;
+
+ if (!obj || !obj->nr_tlb_entries || !e)
+ return -EINVAL;
+
+ clk_enable(obj->clk);
+
+ iotlb_lock_get(obj, &l);
+ if (l.base == obj->nr_tlb_entries) {
+ dev_warn(obj->dev, "%s: preserve entries full\n", __func__);
+ err = -EBUSY;
+ goto out;
+ }
+ if (!e->prsvd) {
+ int i;
+ struct cr_regs tmp;
+
+ for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, tmp)
+ if (!iotlb_cr_valid(&tmp))
+ break;
+
+ if (i == obj->nr_tlb_entries) {
+ dev_dbg(obj->dev, "%s: full: no entry\n", __func__);
+ err = -EBUSY;
+ goto out;
+ }
+
+ iotlb_lock_get(obj, &l);
+ } else {
+ l.vict = l.base;
+ iotlb_lock_set(obj, &l);
+ }
+
+ cr = iotlb_alloc_cr(obj, e);
+ if (IS_ERR(cr)) {
+ clk_disable(obj->clk);
+ return PTR_ERR(cr);
+ }
+
+ iotlb_load_cr(obj, cr);
+ kfree(cr);
+
+ if (e->prsvd)
+ l.base++;
+ /* increment victim for next tlb load */
+ if (++l.vict == obj->nr_tlb_entries)
+ l.vict = l.base;
+ iotlb_lock_set(obj, &l);
+out:
+ clk_disable(obj->clk);
+ return err;
+}
+
+#else /* !PREFETCH_IOTLB */
+
+static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
+{
+ return 0;
+}
+
+#endif /* !PREFETCH_IOTLB */
+
+static int prefetch_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
+{
+ return load_iotlb_entry(obj, e);
+}
+
+/**
+ * flush_iotlb_page - Clear an iommu tlb entry
+ * @obj: target iommu
+ * @da: iommu device virtual address
+ *
+ * Clear an iommu tlb entry which includes 'da' address.
+ **/
+static void flush_iotlb_page(struct omap_iommu *obj, u32 da)
+{
+ int i;
+ struct cr_regs cr;
+
+ clk_enable(obj->clk);
+
+ for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, cr) {
+ u32 start;
+ size_t bytes;
+
+ if (!iotlb_cr_valid(&cr))
+ continue;
+
+ start = iotlb_cr_to_virt(&cr);
+ bytes = iopgsz_to_bytes(cr.cam & 3);
+
+ if ((start <= da) && (da < start + bytes)) {
+ dev_dbg(obj->dev, "%s: %08x<=%08x(%x)\n",
+ __func__, start, da, bytes);
+ iotlb_load_cr(obj, &cr);
+ iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
+ }
+ }
+ clk_disable(obj->clk);
+
+ if (i == obj->nr_tlb_entries)
+ dev_dbg(obj->dev, "%s: no page for %08x\n", __func__, da);
+}
+
+/**
+ * flush_iotlb_all - Clear all iommu tlb entries
+ * @obj: target iommu
+ **/
+static void flush_iotlb_all(struct omap_iommu *obj)
+{
+ struct iotlb_lock l;
+
+ clk_enable(obj->clk);
+
+ l.base = 0;
+ l.vict = 0;
+ iotlb_lock_set(obj, &l);
+
+ iommu_write_reg(obj, 1, MMU_GFLUSH);
+
+ clk_disable(obj->clk);
+}
+
+#if defined(CONFIG_OMAP_IOMMU_DEBUG) || defined(CONFIG_OMAP_IOMMU_DEBUG_MODULE)
+
+ssize_t omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t bytes)
+{
+ if (!obj || !buf)
+ return -EINVAL;
+
+ clk_enable(obj->clk);
+
+ bytes = arch_iommu->dump_ctx(obj, buf, bytes);
+
+ clk_disable(obj->clk);
+
+ return bytes;
+}
+EXPORT_SYMBOL_GPL(omap_iommu_dump_ctx);
+
+static int
+__dump_tlb_entries(struct omap_iommu *obj, struct cr_regs *crs, int num)
+{
+ int i;
+ struct iotlb_lock saved;
+ struct cr_regs tmp;
+ struct cr_regs *p = crs;
+
+ clk_enable(obj->clk);
+ iotlb_lock_get(obj, &saved);
+
+ for_each_iotlb_cr(obj, num, i, tmp) {
+ if (!iotlb_cr_valid(&tmp))
+ continue;
+ *p++ = tmp;
+ }
+
+ iotlb_lock_set(obj, &saved);
+ clk_disable(obj->clk);
+
+ return p - crs;
+}
+
+/**
+ * omap_dump_tlb_entries - dump cr arrays to given buffer
+ * @obj: target iommu
+ * @buf: output buffer
+ **/
+size_t omap_dump_tlb_entries(struct omap_iommu *obj, char *buf, ssize_t bytes)
+{
+ int i, num;
+ struct cr_regs *cr;
+ char *p = buf;
+
+ num = bytes / sizeof(*cr);
+ num = min(obj->nr_tlb_entries, num);
+
+ cr = kcalloc(num, sizeof(*cr), GFP_KERNEL);
+ if (!cr)
+ return 0;
+
+ num = __dump_tlb_entries(obj, cr, num);
+ for (i = 0; i < num; i++)
+ p += iotlb_dump_cr(obj, cr + i, p);
+ kfree(cr);
+
+ return p - buf;
+}
+EXPORT_SYMBOL_GPL(omap_dump_tlb_entries);
+
+int omap_foreach_iommu_device(void *data, int (*fn)(struct device *, void *))
+{
+ return driver_for_each_device(&omap_iommu_driver.driver,
+ NULL, data, fn);
+}
+EXPORT_SYMBOL_GPL(omap_foreach_iommu_device);
+
+#endif /* CONFIG_OMAP_IOMMU_DEBUG_MODULE */
+
+/*
+ * H/W pagetable operations
+ */
+static void flush_iopgd_range(u32 *first, u32 *last)
+{
+ /* FIXME: L2 cache should be taken care of if it exists */
+ do {
+ asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pgd"
+ : : "r" (first));
+ first += L1_CACHE_BYTES / sizeof(*first);
+ } while (first <= last);
+}
+
+static void flush_iopte_range(u32 *first, u32 *last)
+{
+ /* FIXME: L2 cache should be taken care of if it exists */
+ do {
+ asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pte"
+ : : "r" (first));
+ first += L1_CACHE_BYTES / sizeof(*first);
+ } while (first <= last);
+}
+
+static void iopte_free(u32 *iopte)
+{
+ /* Note: freed iopte's must be clean ready for re-use */
+ kmem_cache_free(iopte_cachep, iopte);
+}
+
+static u32 *iopte_alloc(struct omap_iommu *obj, u32 *iopgd, u32 da)
+{
+ u32 *iopte;
+
+ /* a table has already existed */
+ if (*iopgd)
+ goto pte_ready;
+
+ /*
+ * do the allocation outside the page table lock
+ */
+ spin_unlock(&obj->page_table_lock);
+ iopte = kmem_cache_zalloc(iopte_cachep, GFP_KERNEL);
+ spin_lock(&obj->page_table_lock);
+
+ if (!*iopgd) {
+ if (!iopte)
+ return ERR_PTR(-ENOMEM);
+
+ *iopgd = virt_to_phys(iopte) | IOPGD_TABLE;
+ flush_iopgd_range(iopgd, iopgd);
+
+ dev_vdbg(obj->dev, "%s: a new pte:%p\n", __func__, iopte);
+ } else {
+ /* We raced, free the reduniovant table */
+ iopte_free(iopte);
+ }
+
+pte_ready:
+ iopte = iopte_offset(iopgd, da);
+
+ dev_vdbg(obj->dev,
+ "%s: da:%08x pgd:%p *pgd:%08x pte:%p *pte:%08x\n",
+ __func__, da, iopgd, *iopgd, iopte, *iopte);
+
+ return iopte;
+}
+
+static int iopgd_alloc_section(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
+{
+ u32 *iopgd = iopgd_offset(obj, da);
+
+ if ((da | pa) & ~IOSECTION_MASK) {
+ dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
+ __func__, da, pa, IOSECTION_SIZE);
+ return -EINVAL;
+ }
+
+ *iopgd = (pa & IOSECTION_MASK) | prot | IOPGD_SECTION;
+ flush_iopgd_range(iopgd, iopgd);
+ return 0;
+}
+
+static int iopgd_alloc_super(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
+{
+ u32 *iopgd = iopgd_offset(obj, da);
+ int i;
+
+ if ((da | pa) & ~IOSUPER_MASK) {
+ dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
+ __func__, da, pa, IOSUPER_SIZE);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < 16; i++)
+ *(iopgd + i) = (pa & IOSUPER_MASK) | prot | IOPGD_SUPER;
+ flush_iopgd_range(iopgd, iopgd + 15);
+ return 0;
+}
+
+static int iopte_alloc_page(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
+{
+ u32 *iopgd = iopgd_offset(obj, da);
+ u32 *iopte = iopte_alloc(obj, iopgd, da);
+
+ if (IS_ERR(iopte))
+ return PTR_ERR(iopte);
+
+ *iopte = (pa & IOPAGE_MASK) | prot | IOPTE_SMALL;
+ flush_iopte_range(iopte, iopte);
+
+ dev_vdbg(obj->dev, "%s: da:%08x pa:%08x pte:%p *pte:%08x\n",
+ __func__, da, pa, iopte, *iopte);
+
+ return 0;
+}
+
+static int iopte_alloc_large(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
+{
+ u32 *iopgd = iopgd_offset(obj, da);
+ u32 *iopte = iopte_alloc(obj, iopgd, da);
+ int i;
+
+ if ((da | pa) & ~IOLARGE_MASK) {
+ dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
+ __func__, da, pa, IOLARGE_SIZE);
+ return -EINVAL;
+ }
+
+ if (IS_ERR(iopte))
+ return PTR_ERR(iopte);
+
+ for (i = 0; i < 16; i++)
+ *(iopte + i) = (pa & IOLARGE_MASK) | prot | IOPTE_LARGE;
+ flush_iopte_range(iopte, iopte + 15);
+ return 0;
+}
+
+static int
+iopgtable_store_entry_core(struct omap_iommu *obj, struct iotlb_entry *e)
+{
+ int (*fn)(struct omap_iommu *, u32, u32, u32);
+ u32 prot;
+ int err;
+
+ if (!obj || !e)
+ return -EINVAL;
+
+ switch (e->pgsz) {
+ case MMU_CAM_PGSZ_16M:
+ fn = iopgd_alloc_super;
+ break;
+ case MMU_CAM_PGSZ_1M:
+ fn = iopgd_alloc_section;
+ break;
+ case MMU_CAM_PGSZ_64K:
+ fn = iopte_alloc_large;
+ break;
+ case MMU_CAM_PGSZ_4K:
+ fn = iopte_alloc_page;
+ break;
+ default:
+ fn = NULL;
+ BUG();
+ break;
+ }
+
+ prot = get_iopte_attr(e);
+
+ spin_lock(&obj->page_table_lock);
+ err = fn(obj, e->da, e->pa, prot);
+ spin_unlock(&obj->page_table_lock);
+
+ return err;
+}
+
+/**
+ * omap_iopgtable_store_entry - Make an iommu pte entry
+ * @obj: target iommu
+ * @e: an iommu tlb entry info
+ **/
+int omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e)
+{
+ int err;
+
+ flush_iotlb_page(obj, e->da);
+ err = iopgtable_store_entry_core(obj, e);
+ if (!err)
+ prefetch_iotlb_entry(obj, e);
+ return err;
+}
+EXPORT_SYMBOL_GPL(omap_iopgtable_store_entry);
+
+/**
+ * iopgtable_lookup_entry - Lookup an iommu pte entry
+ * @obj: target iommu
+ * @da: iommu device virtual address
+ * @ppgd: iommu pgd entry pointer to be returned
+ * @ppte: iommu pte entry pointer to be returned
+ **/
+static void
+iopgtable_lookup_entry(struct omap_iommu *obj, u32 da, u32 **ppgd, u32 **ppte)
+{
+ u32 *iopgd, *iopte = NULL;
+
+ iopgd = iopgd_offset(obj, da);
+ if (!*iopgd)
+ goto out;
+
+ if (iopgd_is_table(*iopgd))
+ iopte = iopte_offset(iopgd, da);
+out:
+ *ppgd = iopgd;
+ *ppte = iopte;
+}
+
+static size_t iopgtable_clear_entry_core(struct omap_iommu *obj, u32 da)
+{
+ size_t bytes;
+ u32 *iopgd = iopgd_offset(obj, da);
+ int nent = 1;
+
+ if (!*iopgd)
+ return 0;
+
+ if (iopgd_is_table(*iopgd)) {
+ int i;
+ u32 *iopte = iopte_offset(iopgd, da);
+
+ bytes = IOPTE_SIZE;
+ if (*iopte & IOPTE_LARGE) {
+ nent *= 16;
+ /* rewind to the 1st entry */
+ iopte = iopte_offset(iopgd, (da & IOLARGE_MASK));
+ }
+ bytes *= nent;
+ memset(iopte, 0, nent * sizeof(*iopte));
+ flush_iopte_range(iopte, iopte + (nent - 1) * sizeof(*iopte));
+
+ /*
+ * do table walk to check if this table is necessary or not
+ */
+ iopte = iopte_offset(iopgd, 0);
+ for (i = 0; i < PTRS_PER_IOPTE; i++)
+ if (iopte[i])
+ goto out;
+
+ iopte_free(iopte);
+ nent = 1; /* for the next L1 entry */
+ } else {
+ bytes = IOPGD_SIZE;
+ if ((*iopgd & IOPGD_SUPER) == IOPGD_SUPER) {
+ nent *= 16;
+ /* rewind to the 1st entry */
+ iopgd = iopgd_offset(obj, (da & IOSUPER_MASK));
+ }
+ bytes *= nent;
+ }
+ memset(iopgd, 0, nent * sizeof(*iopgd));
+ flush_iopgd_range(iopgd, iopgd + (nent - 1) * sizeof(*iopgd));
+out:
+ return bytes;
+}
+
+/**
+ * iopgtable_clear_entry - Remove an iommu pte entry
+ * @obj: target iommu
+ * @da: iommu device virtual address
+ **/
+static size_t iopgtable_clear_entry(struct omap_iommu *obj, u32 da)
+{
+ size_t bytes;
+
+ spin_lock(&obj->page_table_lock);
+
+ bytes = iopgtable_clear_entry_core(obj, da);
+ flush_iotlb_page(obj, da);
+
+ spin_unlock(&obj->page_table_lock);
+
+ return bytes;
+}
+
+static void iopgtable_clear_entry_all(struct omap_iommu *obj)
+{
+ int i;
+
+ spin_lock(&obj->page_table_lock);
+
+ for (i = 0; i < PTRS_PER_IOPGD; i++) {
+ u32 da;
+ u32 *iopgd;
+
+ da = i << IOPGD_SHIFT;
+ iopgd = iopgd_offset(obj, da);
+
+ if (!*iopgd)
+ continue;
+
+ if (iopgd_is_table(*iopgd))
+ iopte_free(iopte_offset(iopgd, 0));
+
+ *iopgd = 0;
+ flush_iopgd_range(iopgd, iopgd);
+ }
+
+ flush_iotlb_all(obj);
+
+ spin_unlock(&obj->page_table_lock);
+}
+
+/*
+ * Device IOMMU generic operations
+ */
+static irqreturn_t iommu_fault_handler(int irq, void *data)
+{
+ u32 da, errs;
+ u32 *iopgd, *iopte;
+ struct omap_iommu *obj = data;
+ struct iommu_domain *domain = obj->domain;
+
+ if (!obj->refcount)
+ return IRQ_NONE;
+
+ clk_enable(obj->clk);
+ errs = iommu_report_fault(obj, &da);
+ clk_disable(obj->clk);
+ if (errs == 0)
+ return IRQ_HANDLED;
+
+ /* Fault callback or TLB/PTE Dynamic loading */
+ if (!report_iommu_fault(domain, obj->dev, da, 0))
+ return IRQ_HANDLED;
+
+ iommu_disable(obj);
+
+ iopgd = iopgd_offset(obj, da);
+
+ if (!iopgd_is_table(*iopgd)) {
+ dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p "
+ "*pgd:px%08x\n", obj->name, errs, da, iopgd, *iopgd);
+ return IRQ_NONE;
+ }
+
+ iopte = iopte_offset(iopgd, da);
+
+ dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:0x%08x "
+ "pte:0x%p *pte:0x%08x\n", obj->name, errs, da, iopgd, *iopgd,
+ iopte, *iopte);
+
+ return IRQ_NONE;
+}
+
+static int device_match_by_alias(struct device *dev, void *data)
+{
+ struct omap_iommu *obj = to_iommu(dev);
+ const char *name = data;
+
+ pr_debug("%s: %s %s\n", __func__, obj->name, name);
+
+ return strcmp(obj->name, name) == 0;
+}
+
+/**
+ * omap_find_iommu_device() - find an omap iommu device by name
+ * @name: name of the iommu device
+ *
+ * The generic iommu API requires the caller to provide the device
+ * he wishes to attach to a certain iommu domain.
+ *
+ * Drivers generally should not bother with this as it should just
+ * be taken care of by the DMA-API using dev_archdata.
+ *
+ * This function is provided as an interim solution until the latter
+ * materializes, and omap3isp is fully migrated to the DMA-API.
+ */
+struct device *omap_find_iommu_device(const char *name)
+{
+ return driver_find_device(&omap_iommu_driver.driver, NULL,
+ (void *)name,
+ device_match_by_alias);
+}
+EXPORT_SYMBOL_GPL(omap_find_iommu_device);
+
+/**
+ * omap_iommu_attach() - attach iommu device to an iommu domain
+ * @dev: target omap iommu device
+ * @iopgd: page table
+ **/
+static struct omap_iommu *omap_iommu_attach(struct device *dev, u32 *iopgd)
+{
+ int err = -ENOMEM;
+ struct omap_iommu *obj = to_iommu(dev);
+
+ spin_lock(&obj->iommu_lock);
+
+ /* an iommu device can only be attached once */
+ if (++obj->refcount > 1) {
+ dev_err(dev, "%s: already attached!\n", obj->name);
+ err = -EBUSY;
+ goto err_enable;
+ }
+
+ obj->iopgd = iopgd;
+ err = iommu_enable(obj);
+ if (err)
+ goto err_enable;
+ flush_iotlb_all(obj);
+
+ if (!try_module_get(obj->owner))
+ goto err_module;
+
+ spin_unlock(&obj->iommu_lock);
+
+ dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name);
+ return obj;
+
+err_module:
+ if (obj->refcount == 1)
+ iommu_disable(obj);
+err_enable:
+ obj->refcount--;
+ spin_unlock(&obj->iommu_lock);
+ return ERR_PTR(err);
+}
+
+/**
+ * omap_iommu_detach - release iommu device
+ * @obj: target iommu
+ **/
+static void omap_iommu_detach(struct omap_iommu *obj)
+{
+ if (!obj || IS_ERR(obj))
+ return;
+
+ spin_lock(&obj->iommu_lock);
+
+ if (--obj->refcount == 0)
+ iommu_disable(obj);
+
+ module_put(obj->owner);
+
+ obj->iopgd = NULL;
+
+ spin_unlock(&obj->iommu_lock);
+
+ dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name);
+}
+
+/*
+ * OMAP Device MMU(IOMMU) detection
+ */
+static int __devinit omap_iommu_probe(struct platform_device *pdev)
+{
+ int err = -ENODEV;
+ int irq;
+ struct omap_iommu *obj;
+ struct resource *res;
+ struct iommu_platform_data *pdata = pdev->dev.platform_data;
+
+ if (pdev->num_resources != 2)
+ return -EINVAL;
+
+ obj = kzalloc(sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL);
+ if (!obj)
+ return -ENOMEM;
+
+ obj->clk = clk_get(&pdev->dev, pdata->clk_name);
+ if (IS_ERR(obj->clk))
+ goto err_clk;
+
+ obj->nr_tlb_entries = pdata->nr_tlb_entries;
+ obj->name = pdata->name;
+ obj->dev = &pdev->dev;
+ obj->ctx = (void *)obj + sizeof(*obj);
+ obj->da_start = pdata->da_start;
+ obj->da_end = pdata->da_end;
+
+ spin_lock_init(&obj->iommu_lock);
+ mutex_init(&obj->mmap_lock);
+ spin_lock_init(&obj->page_table_lock);
+ INIT_LIST_HEAD(&obj->mmap);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ err = -ENODEV;
+ goto err_mem;
+ }
+
+ res = request_mem_region(res->start, resource_size(res),
+ dev_name(&pdev->dev));
+ if (!res) {
+ err = -EIO;
+ goto err_mem;
+ }
+
+ obj->regbase = ioremap(res->start, resource_size(res));
+ if (!obj->regbase) {
+ err = -ENOMEM;
+ goto err_ioremap;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ err = -ENODEV;
+ goto err_irq;
+ }
+ err = request_irq(irq, iommu_fault_handler, IRQF_SHARED,
+ dev_name(&pdev->dev), obj);
+ if (err < 0)
+ goto err_irq;
+ platform_set_drvdata(pdev, obj);
+
+ dev_info(&pdev->dev, "%s registered\n", obj->name);
+ return 0;
+
+err_irq:
+ iounmap(obj->regbase);
+err_ioremap:
+ release_mem_region(res->start, resource_size(res));
+err_mem:
+ clk_put(obj->clk);
+err_clk:
+ kfree(obj);
+ return err;
+}
+
+static int __devexit omap_iommu_remove(struct platform_device *pdev)
+{
+ int irq;
+ struct resource *res;
+ struct omap_iommu *obj = platform_get_drvdata(pdev);
+
+ platform_set_drvdata(pdev, NULL);
+
+ iopgtable_clear_entry_all(obj);
+
+ irq = platform_get_irq(pdev, 0);
+ free_irq(irq, obj);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(res->start, resource_size(res));
+ iounmap(obj->regbase);
+
+ clk_put(obj->clk);
+ dev_info(&pdev->dev, "%s removed\n", obj->name);
+ kfree(obj);
+ return 0;
+}
+
+static struct platform_driver omap_iommu_driver = {
+ .probe = omap_iommu_probe,
+ .remove = __devexit_p(omap_iommu_remove),
+ .driver = {
+ .name = "omap-iommu",
+ },
+};
+
+static void iopte_cachep_ctor(void *iopte)
+{
+ clean_dcache_area(iopte, IOPTE_TABLE_SIZE);
+}
+
+static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
+ phys_addr_t pa, int order, int prot)
+{
+ struct omap_iommu_domain *omap_domain = domain->priv;
+ struct omap_iommu *oiommu = omap_domain->iommu_dev;
+ struct device *dev = oiommu->dev;
+ size_t bytes = PAGE_SIZE << order;
+ struct iotlb_entry e;
+ int omap_pgsz;
+ u32 ret, flags;
+
+ /* we only support mapping a single iommu page for now */
+ omap_pgsz = bytes_to_iopgsz(bytes);
+ if (omap_pgsz < 0) {
+ dev_err(dev, "invalid size to map: %d\n", bytes);
+ return -EINVAL;
+ }
+
+ dev_dbg(dev, "mapping da 0x%lx to pa 0x%x size 0x%x\n", da, pa, bytes);
+
+ flags = omap_pgsz | prot;
+
+ iotlb_init_entry(&e, da, pa, flags);
+
+ ret = omap_iopgtable_store_entry(oiommu, &e);
+ if (ret)
+ dev_err(dev, "omap_iopgtable_store_entry failed: %d\n", ret);
+
+ return ret;
+}
+
+static int omap_iommu_unmap(struct iommu_domain *domain, unsigned long da,
+ int order)
+{
+ struct omap_iommu_domain *omap_domain = domain->priv;
+ struct omap_iommu *oiommu = omap_domain->iommu_dev;
+ struct device *dev = oiommu->dev;
+ size_t unmap_size;
+
+ dev_dbg(dev, "unmapping da 0x%lx order %d\n", da, order);
+
+ unmap_size = iopgtable_clear_entry(oiommu, da);
+
+ return unmap_size ? get_order(unmap_size) : -EINVAL;
+}
+
+static int
+omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
+{
+ struct omap_iommu_domain *omap_domain = domain->priv;
+ struct omap_iommu *oiommu;
+ int ret = 0;
+
+ spin_lock(&omap_domain->lock);
+
+ /* only a single device is supported per domain for now */
+ if (omap_domain->iommu_dev) {
+ dev_err(dev, "iommu domain is already attached\n");
+ ret = -EBUSY;
+ goto out;
+ }
+
+ /* get a handle to and enable the omap iommu */
+ oiommu = omap_iommu_attach(dev, omap_domain->pgtable);
+ if (IS_ERR(oiommu)) {
+ ret = PTR_ERR(oiommu);
+ dev_err(dev, "can't get omap iommu: %d\n", ret);
+ goto out;
+ }
+
+ omap_domain->iommu_dev = oiommu;
+ oiommu->domain = domain;
+
+out:
+ spin_unlock(&omap_domain->lock);
+ return ret;
+}
+
+static void omap_iommu_detach_dev(struct iommu_domain *domain,
+ struct device *dev)
+{
+ struct omap_iommu_domain *omap_domain = domain->priv;
+ struct omap_iommu *oiommu = to_iommu(dev);
+
+ spin_lock(&omap_domain->lock);
+
+ /* only a single device is supported per domain for now */
+ if (omap_domain->iommu_dev != oiommu) {
+ dev_err(dev, "invalid iommu device\n");
+ goto out;
+ }
+
+ iopgtable_clear_entry_all(oiommu);
+
+ omap_iommu_detach(oiommu);
+
+ omap_domain->iommu_dev = NULL;
+
+out:
+ spin_unlock(&omap_domain->lock);
+}
+
+static int omap_iommu_domain_init(struct iommu_domain *domain)
+{
+ struct omap_iommu_domain *omap_domain;
+
+ omap_domain = kzalloc(sizeof(*omap_domain), GFP_KERNEL);
+ if (!omap_domain) {
+ pr_err("kzalloc failed\n");
+ goto out;
+ }
+
+ omap_domain->pgtable = kzalloc(IOPGD_TABLE_SIZE, GFP_KERNEL);
+ if (!omap_domain->pgtable) {
+ pr_err("kzalloc failed\n");
+ goto fail_nomem;
+ }
+
+ /*
+ * should never fail, but please keep this around to ensure
+ * we keep the hardware happy
+ */
+ BUG_ON(!IS_ALIGNED((long)omap_domain->pgtable, IOPGD_TABLE_SIZE));
+
+ clean_dcache_area(omap_domain->pgtable, IOPGD_TABLE_SIZE);
+ spin_lock_init(&omap_domain->lock);
+
+ domain->priv = omap_domain;
+
+ return 0;
+
+fail_nomem:
+ kfree(omap_domain);
+out:
+ return -ENOMEM;
+}
+
+/* assume device was already detached */
+static void omap_iommu_domain_destroy(struct iommu_domain *domain)
+{
+ struct omap_iommu_domain *omap_domain = domain->priv;
+
+ domain->priv = NULL;
+
+ kfree(omap_domain->pgtable);
+ kfree(omap_domain);
+}
+
+static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain,
+ unsigned long da)
+{
+ struct omap_iommu_domain *omap_domain = domain->priv;
+ struct omap_iommu *oiommu = omap_domain->iommu_dev;
+ struct device *dev = oiommu->dev;
+ u32 *pgd, *pte;
+ phys_addr_t ret = 0;
+
+ iopgtable_lookup_entry(oiommu, da, &pgd, &pte);
+
+ if (pte) {
+ if (iopte_is_small(*pte))
+ ret = omap_iommu_translate(*pte, da, IOPTE_MASK);
+ else if (iopte_is_large(*pte))
+ ret = omap_iommu_translate(*pte, da, IOLARGE_MASK);
+ else
+ dev_err(dev, "bogus pte 0x%x", *pte);
+ } else {
+ if (iopgd_is_section(*pgd))
+ ret = omap_iommu_translate(*pgd, da, IOSECTION_MASK);
+ else if (iopgd_is_super(*pgd))
+ ret = omap_iommu_translate(*pgd, da, IOSUPER_MASK);
+ else
+ dev_err(dev, "bogus pgd 0x%x", *pgd);
+ }
+
+ return ret;
+}
+
+static int omap_iommu_domain_has_cap(struct iommu_domain *domain,
+ unsigned long cap)
+{
+ return 0;
+}
+
+static struct iommu_ops omap_iommu_ops = {
+ .domain_init = omap_iommu_domain_init,
+ .domain_destroy = omap_iommu_domain_destroy,
+ .attach_dev = omap_iommu_attach_dev,
+ .detach_dev = omap_iommu_detach_dev,
+ .map = omap_iommu_map,
+ .unmap = omap_iommu_unmap,
+ .iova_to_phys = omap_iommu_iova_to_phys,
+ .domain_has_cap = omap_iommu_domain_has_cap,
+};
+
+static int __init omap_iommu_init(void)
+{
+ struct kmem_cache *p;
+ const unsigned long flags = SLAB_HWCACHE_ALIGN;
+ size_t align = 1 << 10; /* L2 pagetable alignement */
+
+ p = kmem_cache_create("iopte_cache", IOPTE_TABLE_SIZE, align, flags,
+ iopte_cachep_ctor);
+ if (!p)
+ return -ENOMEM;
+ iopte_cachep = p;
+
+ bus_set_iommu(&platform_bus_type, &omap_iommu_ops);
+
+ return platform_driver_register(&omap_iommu_driver);
+}
+module_init(omap_iommu_init);
+
+static void __exit omap_iommu_exit(void)
+{
+ kmem_cache_destroy(iopte_cachep);
+
+ platform_driver_unregister(&omap_iommu_driver);
+}
+module_exit(omap_iommu_exit);
+
+MODULE_DESCRIPTION("omap iommu: tlb and pagetable primitives");
+MODULE_ALIAS("platform:omap-iommu");
+MODULE_AUTHOR("Hiroshi DOYU, Paul Mundt and Toshihiro Kobayashi");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/omap-iovmm.c b/drivers/iommu/omap-iovmm.c
new file mode 100644
index 00000000000..46be456fcc0
--- /dev/null
+++ b/drivers/iommu/omap-iovmm.c
@@ -0,0 +1,743 @@
+/*
+ * omap iommu: simple virtual address space management
+ *
+ * Copyright (C) 2008-2009 Nokia Corporation
+ *
+ * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/device.h>
+#include <linux/scatterlist.h>
+#include <linux/iommu.h>
+
+#include <asm/cacheflush.h>
+#include <asm/mach/map.h>
+
+#include <plat/iommu.h>
+#include <plat/iovmm.h>
+
+#include <plat/iopgtable.h>
+
+static struct kmem_cache *iovm_area_cachep;
+
+/* return the offset of the first scatterlist entry in a sg table */
+static unsigned int sgtable_offset(const struct sg_table *sgt)
+{
+ if (!sgt || !sgt->nents)
+ return 0;
+
+ return sgt->sgl->offset;
+}
+
+/* return total bytes of sg buffers */
+static size_t sgtable_len(const struct sg_table *sgt)
+{
+ unsigned int i, total = 0;
+ struct scatterlist *sg;
+
+ if (!sgt)
+ return 0;
+
+ for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+ size_t bytes;
+
+ bytes = sg->length + sg->offset;
+
+ if (!iopgsz_ok(bytes)) {
+ pr_err("%s: sg[%d] not iommu pagesize(%u %u)\n",
+ __func__, i, bytes, sg->offset);
+ return 0;
+ }
+
+ if (i && sg->offset) {
+ pr_err("%s: sg[%d] offset not allowed in internal "
+ "entries\n", __func__, i);
+ return 0;
+ }
+
+ total += bytes;
+ }
+
+ return total;
+}
+#define sgtable_ok(x) (!!sgtable_len(x))
+
+static unsigned max_alignment(u32 addr)
+{
+ int i;
+ unsigned pagesize[] = { SZ_16M, SZ_1M, SZ_64K, SZ_4K, };
+ for (i = 0; i < ARRAY_SIZE(pagesize) && addr & (pagesize[i] - 1); i++)
+ ;
+ return (i < ARRAY_SIZE(pagesize)) ? pagesize[i] : 0;
+}
+
+/*
+ * calculate the optimal number sg elements from total bytes based on
+ * iommu superpages
+ */
+static unsigned sgtable_nents(size_t bytes, u32 da, u32 pa)
+{
+ unsigned nr_entries = 0, ent_sz;
+
+ if (!IS_ALIGNED(bytes, PAGE_SIZE)) {
+ pr_err("%s: wrong size %08x\n", __func__, bytes);
+ return 0;
+ }
+
+ while (bytes) {
+ ent_sz = max_alignment(da | pa);
+ ent_sz = min_t(unsigned, ent_sz, iopgsz_max(bytes));
+ nr_entries++;
+ da += ent_sz;
+ pa += ent_sz;
+ bytes -= ent_sz;
+ }
+
+ return nr_entries;
+}
+
+/* allocate and initialize sg_table header(a kind of 'superblock') */
+static struct sg_table *sgtable_alloc(const size_t bytes, u32 flags,
+ u32 da, u32 pa)
+{
+ unsigned int nr_entries;
+ int err;
+ struct sg_table *sgt;
+
+ if (!bytes)
+ return ERR_PTR(-EINVAL);
+
+ if (!IS_ALIGNED(bytes, PAGE_SIZE))
+ return ERR_PTR(-EINVAL);
+
+ if (flags & IOVMF_LINEAR) {
+ nr_entries = sgtable_nents(bytes, da, pa);
+ if (!nr_entries)
+ return ERR_PTR(-EINVAL);
+ } else
+ nr_entries = bytes / PAGE_SIZE;
+
+ sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
+ if (!sgt)
+ return ERR_PTR(-ENOMEM);
+
+ err = sg_alloc_table(sgt, nr_entries, GFP_KERNEL);
+ if (err) {
+ kfree(sgt);
+ return ERR_PTR(err);
+ }
+
+ pr_debug("%s: sgt:%p(%d entries)\n", __func__, sgt, nr_entries);
+
+ return sgt;
+}
+
+/* free sg_table header(a kind of superblock) */
+static void sgtable_free(struct sg_table *sgt)
+{
+ if (!sgt)
+ return;
+
+ sg_free_table(sgt);
+ kfree(sgt);
+
+ pr_debug("%s: sgt:%p\n", __func__, sgt);
+}
+
+/* map 'sglist' to a contiguous mpu virtual area and return 'va' */
+static void *vmap_sg(const struct sg_table *sgt)
+{
+ u32 va;
+ size_t total;
+ unsigned int i;
+ struct scatterlist *sg;
+ struct vm_struct *new;
+ const struct mem_type *mtype;
+
+ mtype = get_mem_type(MT_DEVICE);
+ if (!mtype)
+ return ERR_PTR(-EINVAL);
+
+ total = sgtable_len(sgt);
+ if (!total)
+ return ERR_PTR(-EINVAL);
+
+ new = __get_vm_area(total, VM_IOREMAP, VMALLOC_START, VMALLOC_END);
+ if (!new)
+ return ERR_PTR(-ENOMEM);
+ va = (u32)new->addr;
+
+ for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+ size_t bytes;
+ u32 pa;
+ int err;
+
+ pa = sg_phys(sg) - sg->offset;
+ bytes = sg->length + sg->offset;
+
+ BUG_ON(bytes != PAGE_SIZE);
+
+ err = ioremap_page(va, pa, mtype);
+ if (err)
+ goto err_out;
+
+ va += bytes;
+ }
+
+ flush_cache_vmap((unsigned long)new->addr,
+ (unsigned long)(new->addr + total));
+ return new->addr;
+
+err_out:
+ WARN_ON(1); /* FIXME: cleanup some mpu mappings */
+ vunmap(new->addr);
+ return ERR_PTR(-EAGAIN);
+}
+
+static inline void vunmap_sg(const void *va)
+{
+ vunmap(va);
+}
+
+static struct iovm_struct *__find_iovm_area(struct omap_iommu *obj,
+ const u32 da)
+{
+ struct iovm_struct *tmp;
+
+ list_for_each_entry(tmp, &obj->mmap, list) {
+ if ((da >= tmp->da_start) && (da < tmp->da_end)) {
+ size_t len;
+
+ len = tmp->da_end - tmp->da_start;
+
+ dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n",
+ __func__, tmp->da_start, da, tmp->da_end, len,
+ tmp->flags);
+
+ return tmp;
+ }
+ }
+
+ return NULL;
+}
+
+/**
+ * omap_find_iovm_area - find iovma which includes @da
+ * @da: iommu device virtual address
+ *
+ * Find the existing iovma starting at @da
+ */
+struct iovm_struct *omap_find_iovm_area(struct omap_iommu *obj, u32 da)
+{
+ struct iovm_struct *area;
+
+ mutex_lock(&obj->mmap_lock);
+ area = __find_iovm_area(obj, da);
+ mutex_unlock(&obj->mmap_lock);
+
+ return area;
+}
+EXPORT_SYMBOL_GPL(omap_find_iovm_area);
+
+/*
+ * This finds the hole(area) which fits the requested address and len
+ * in iovmas mmap, and returns the new allocated iovma.
+ */
+static struct iovm_struct *alloc_iovm_area(struct omap_iommu *obj, u32 da,
+ size_t bytes, u32 flags)
+{
+ struct iovm_struct *new, *tmp;
+ u32 start, prev_end, alignment;
+
+ if (!obj || !bytes)
+ return ERR_PTR(-EINVAL);
+
+ start = da;
+ alignment = PAGE_SIZE;
+
+ if (~flags & IOVMF_DA_FIXED) {
+ /* Don't map address 0 */
+ start = obj->da_start ? obj->da_start : alignment;
+
+ if (flags & IOVMF_LINEAR)
+ alignment = iopgsz_max(bytes);
+ start = roundup(start, alignment);
+ } else if (start < obj->da_start || start > obj->da_end ||
+ obj->da_end - start < bytes) {
+ return ERR_PTR(-EINVAL);
+ }
+
+ tmp = NULL;
+ if (list_empty(&obj->mmap))
+ goto found;
+
+ prev_end = 0;
+ list_for_each_entry(tmp, &obj->mmap, list) {
+
+ if (prev_end > start)
+ break;
+
+ if (tmp->da_start > start && (tmp->da_start - start) >= bytes)
+ goto found;
+
+ if (tmp->da_end >= start && ~flags & IOVMF_DA_FIXED)
+ start = roundup(tmp->da_end + 1, alignment);
+
+ prev_end = tmp->da_end;
+ }
+
+ if ((start >= prev_end) && (obj->da_end - start >= bytes))
+ goto found;
+
+ dev_dbg(obj->dev, "%s: no space to fit %08x(%x) flags: %08x\n",
+ __func__, da, bytes, flags);
+
+ return ERR_PTR(-EINVAL);
+
+found:
+ new = kmem_cache_zalloc(iovm_area_cachep, GFP_KERNEL);
+ if (!new)
+ return ERR_PTR(-ENOMEM);
+
+ new->iommu = obj;
+ new->da_start = start;
+ new->da_end = start + bytes;
+ new->flags = flags;
+
+ /*
+ * keep ascending order of iovmas
+ */
+ if (tmp)
+ list_add_tail(&new->list, &tmp->list);
+ else
+ list_add(&new->list, &obj->mmap);
+
+ dev_dbg(obj->dev, "%s: found %08x-%08x-%08x(%x) %08x\n",
+ __func__, new->da_start, start, new->da_end, bytes, flags);
+
+ return new;
+}
+
+static void free_iovm_area(struct omap_iommu *obj, struct iovm_struct *area)
+{
+ size_t bytes;
+
+ BUG_ON(!obj || !area);
+
+ bytes = area->da_end - area->da_start;
+
+ dev_dbg(obj->dev, "%s: %08x-%08x(%x) %08x\n",
+ __func__, area->da_start, area->da_end, bytes, area->flags);
+
+ list_del(&area->list);
+ kmem_cache_free(iovm_area_cachep, area);
+}
+
+/**
+ * omap_da_to_va - convert (d) to (v)
+ * @obj: objective iommu
+ * @da: iommu device virtual address
+ * @va: mpu virtual address
+ *
+ * Returns mpu virtual addr which corresponds to a given device virtual addr
+ */
+void *omap_da_to_va(struct omap_iommu *obj, u32 da)
+{
+ void *va = NULL;
+ struct iovm_struct *area;
+
+ mutex_lock(&obj->mmap_lock);
+
+ area = __find_iovm_area(obj, da);
+ if (!area) {
+ dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da);
+ goto out;
+ }
+ va = area->va;
+out:
+ mutex_unlock(&obj->mmap_lock);
+
+ return va;
+}
+EXPORT_SYMBOL_GPL(omap_da_to_va);
+
+static void sgtable_fill_vmalloc(struct sg_table *sgt, void *_va)
+{
+ unsigned int i;
+ struct scatterlist *sg;
+ void *va = _va;
+ void *va_end;
+
+ for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+ struct page *pg;
+ const size_t bytes = PAGE_SIZE;
+
+ /*
+ * iommu 'superpage' isn't supported with 'omap_iommu_vmalloc()'
+ */
+ pg = vmalloc_to_page(va);
+ BUG_ON(!pg);
+ sg_set_page(sg, pg, bytes, 0);
+
+ va += bytes;
+ }
+
+ va_end = _va + PAGE_SIZE * i;
+}
+
+static inline void sgtable_drain_vmalloc(struct sg_table *sgt)
+{
+ /*
+ * Actually this is not necessary at all, just exists for
+ * consistency of the code readability.
+ */
+ BUG_ON(!sgt);
+}
+
+/* create 'da' <-> 'pa' mapping from 'sgt' */
+static int map_iovm_area(struct iommu_domain *domain, struct iovm_struct *new,
+ const struct sg_table *sgt, u32 flags)
+{
+ int err;
+ unsigned int i, j;
+ struct scatterlist *sg;
+ u32 da = new->da_start;
+ int order;
+
+ if (!domain || !sgt)
+ return -EINVAL;
+
+ BUG_ON(!sgtable_ok(sgt));
+
+ for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+ u32 pa;
+ size_t bytes;
+
+ pa = sg_phys(sg) - sg->offset;
+ bytes = sg->length + sg->offset;
+
+ flags &= ~IOVMF_PGSZ_MASK;
+
+ if (bytes_to_iopgsz(bytes) < 0)
+ goto err_out;
+
+ order = get_order(bytes);
+
+ pr_debug("%s: [%d] %08x %08x(%x)\n", __func__,
+ i, da, pa, bytes);
+
+ err = iommu_map(domain, da, pa, order, flags);
+ if (err)
+ goto err_out;
+
+ da += bytes;
+ }
+ return 0;
+
+err_out:
+ da = new->da_start;
+
+ for_each_sg(sgt->sgl, sg, i, j) {
+ size_t bytes;
+
+ bytes = sg->length + sg->offset;
+ order = get_order(bytes);
+
+ /* ignore failures.. we're already handling one */
+ iommu_unmap(domain, da, order);
+
+ da += bytes;
+ }
+ return err;
+}
+
+/* release 'da' <-> 'pa' mapping */
+static void unmap_iovm_area(struct iommu_domain *domain, struct omap_iommu *obj,
+ struct iovm_struct *area)
+{
+ u32 start;
+ size_t total = area->da_end - area->da_start;
+ const struct sg_table *sgt = area->sgt;
+ struct scatterlist *sg;
+ int i, err;
+
+ BUG_ON(!sgtable_ok(sgt));
+ BUG_ON((!total) || !IS_ALIGNED(total, PAGE_SIZE));
+
+ start = area->da_start;
+ for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+ size_t bytes;
+ int order;
+
+ bytes = sg->length + sg->offset;
+ order = get_order(bytes);
+
+ err = iommu_unmap(domain, start, order);
+ if (err < 0)
+ break;
+
+ dev_dbg(obj->dev, "%s: unmap %08x(%x) %08x\n",
+ __func__, start, bytes, area->flags);
+
+ BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE));
+
+ total -= bytes;
+ start += bytes;
+ }
+ BUG_ON(total);
+}
+
+/* template function for all unmapping */
+static struct sg_table *unmap_vm_area(struct iommu_domain *domain,
+ struct omap_iommu *obj, const u32 da,
+ void (*fn)(const void *), u32 flags)
+{
+ struct sg_table *sgt = NULL;
+ struct iovm_struct *area;
+
+ if (!IS_ALIGNED(da, PAGE_SIZE)) {
+ dev_err(obj->dev, "%s: alignment err(%08x)\n", __func__, da);
+ return NULL;
+ }
+
+ mutex_lock(&obj->mmap_lock);
+
+ area = __find_iovm_area(obj, da);
+ if (!area) {
+ dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da);
+ goto out;
+ }
+
+ if ((area->flags & flags) != flags) {
+ dev_err(obj->dev, "%s: wrong flags(%08x)\n", __func__,
+ area->flags);
+ goto out;
+ }
+ sgt = (struct sg_table *)area->sgt;
+
+ unmap_iovm_area(domain, obj, area);
+
+ fn(area->va);
+
+ dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n", __func__,
+ area->da_start, da, area->da_end,
+ area->da_end - area->da_start, area->flags);
+
+ free_iovm_area(obj, area);
+out:
+ mutex_unlock(&obj->mmap_lock);
+
+ return sgt;
+}
+
+static u32 map_iommu_region(struct iommu_domain *domain, struct omap_iommu *obj,
+ u32 da, const struct sg_table *sgt, void *va,
+ size_t bytes, u32 flags)
+{
+ int err = -ENOMEM;
+ struct iovm_struct *new;
+
+ mutex_lock(&obj->mmap_lock);
+
+ new = alloc_iovm_area(obj, da, bytes, flags);
+ if (IS_ERR(new)) {
+ err = PTR_ERR(new);
+ goto err_alloc_iovma;
+ }
+ new->va = va;
+ new->sgt = sgt;
+
+ if (map_iovm_area(domain, new, sgt, new->flags))
+ goto err_map;
+
+ mutex_unlock(&obj->mmap_lock);
+
+ dev_dbg(obj->dev, "%s: da:%08x(%x) flags:%08x va:%p\n",
+ __func__, new->da_start, bytes, new->flags, va);
+
+ return new->da_start;
+
+err_map:
+ free_iovm_area(obj, new);
+err_alloc_iovma:
+ mutex_unlock(&obj->mmap_lock);
+ return err;
+}
+
+static inline u32
+__iommu_vmap(struct iommu_domain *domain, struct omap_iommu *obj,
+ u32 da, const struct sg_table *sgt,
+ void *va, size_t bytes, u32 flags)
+{
+ return map_iommu_region(domain, obj, da, sgt, va, bytes, flags);
+}
+
+/**
+ * omap_iommu_vmap - (d)-(p)-(v) address mapper
+ * @obj: objective iommu
+ * @sgt: address of scatter gather table
+ * @flags: iovma and page property
+ *
+ * Creates 1-n-1 mapping with given @sgt and returns @da.
+ * All @sgt element must be io page size aligned.
+ */
+u32 omap_iommu_vmap(struct iommu_domain *domain, struct omap_iommu *obj, u32 da,
+ const struct sg_table *sgt, u32 flags)
+{
+ size_t bytes;
+ void *va = NULL;
+
+ if (!obj || !obj->dev || !sgt)
+ return -EINVAL;
+
+ bytes = sgtable_len(sgt);
+ if (!bytes)
+ return -EINVAL;
+ bytes = PAGE_ALIGN(bytes);
+
+ if (flags & IOVMF_MMIO) {
+ va = vmap_sg(sgt);
+ if (IS_ERR(va))
+ return PTR_ERR(va);
+ }
+
+ flags |= IOVMF_DISCONT;
+ flags |= IOVMF_MMIO;
+
+ da = __iommu_vmap(domain, obj, da, sgt, va, bytes, flags);
+ if (IS_ERR_VALUE(da))
+ vunmap_sg(va);
+
+ return da + sgtable_offset(sgt);
+}
+EXPORT_SYMBOL_GPL(omap_iommu_vmap);
+
+/**
+ * omap_iommu_vunmap - release virtual mapping obtained by 'omap_iommu_vmap()'
+ * @obj: objective iommu
+ * @da: iommu device virtual address
+ *
+ * Free the iommu virtually contiguous memory area starting at
+ * @da, which was returned by 'omap_iommu_vmap()'.
+ */
+struct sg_table *
+omap_iommu_vunmap(struct iommu_domain *domain, struct omap_iommu *obj, u32 da)
+{
+ struct sg_table *sgt;
+ /*
+ * 'sgt' is allocated before 'omap_iommu_vmalloc()' is called.
+ * Just returns 'sgt' to the caller to free
+ */
+ da &= PAGE_MASK;
+ sgt = unmap_vm_area(domain, obj, da, vunmap_sg,
+ IOVMF_DISCONT | IOVMF_MMIO);
+ if (!sgt)
+ dev_dbg(obj->dev, "%s: No sgt\n", __func__);
+ return sgt;
+}
+EXPORT_SYMBOL_GPL(omap_iommu_vunmap);
+
+/**
+ * omap_iommu_vmalloc - (d)-(p)-(v) address allocator and mapper
+ * @obj: objective iommu
+ * @da: contiguous iommu virtual memory
+ * @bytes: allocation size
+ * @flags: iovma and page property
+ *
+ * Allocate @bytes linearly and creates 1-n-1 mapping and returns
+ * @da again, which might be adjusted if 'IOVMF_DA_FIXED' is not set.
+ */
+u32
+omap_iommu_vmalloc(struct iommu_domain *domain, struct omap_iommu *obj, u32 da,
+ size_t bytes, u32 flags)
+{
+ void *va;
+ struct sg_table *sgt;
+
+ if (!obj || !obj->dev || !bytes)
+ return -EINVAL;
+
+ bytes = PAGE_ALIGN(bytes);
+
+ va = vmalloc(bytes);
+ if (!va)
+ return -ENOMEM;
+
+ flags |= IOVMF_DISCONT;
+ flags |= IOVMF_ALLOC;
+
+ sgt = sgtable_alloc(bytes, flags, da, 0);
+ if (IS_ERR(sgt)) {
+ da = PTR_ERR(sgt);
+ goto err_sgt_alloc;
+ }
+ sgtable_fill_vmalloc(sgt, va);
+
+ da = __iommu_vmap(domain, obj, da, sgt, va, bytes, flags);
+ if (IS_ERR_VALUE(da))
+ goto err_iommu_vmap;
+
+ return da;
+
+err_iommu_vmap:
+ sgtable_drain_vmalloc(sgt);
+ sgtable_free(sgt);
+err_sgt_alloc:
+ vfree(va);
+ return da;
+}
+EXPORT_SYMBOL_GPL(omap_iommu_vmalloc);
+
+/**
+ * omap_iommu_vfree - release memory allocated by 'omap_iommu_vmalloc()'
+ * @obj: objective iommu
+ * @da: iommu device virtual address
+ *
+ * Frees the iommu virtually continuous memory area starting at
+ * @da, as obtained from 'omap_iommu_vmalloc()'.
+ */
+void omap_iommu_vfree(struct iommu_domain *domain, struct omap_iommu *obj,
+ const u32 da)
+{
+ struct sg_table *sgt;
+
+ sgt = unmap_vm_area(domain, obj, da, vfree,
+ IOVMF_DISCONT | IOVMF_ALLOC);
+ if (!sgt)
+ dev_dbg(obj->dev, "%s: No sgt\n", __func__);
+ sgtable_free(sgt);
+}
+EXPORT_SYMBOL_GPL(omap_iommu_vfree);
+
+static int __init iovmm_init(void)
+{
+ const unsigned long flags = SLAB_HWCACHE_ALIGN;
+ struct kmem_cache *p;
+
+ p = kmem_cache_create("iovm_area_cache", sizeof(struct iovm_struct), 0,
+ flags, NULL);
+ if (!p)
+ return -ENOMEM;
+ iovm_area_cachep = p;
+
+ return 0;
+}
+module_init(iovmm_init);
+
+static void __exit iovmm_exit(void)
+{
+ kmem_cache_destroy(iovm_area_cachep);
+}
+module_exit(iovmm_exit);
+
+MODULE_DESCRIPTION("omap iommu: simple virtual address space management");
+MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/isdn/Kconfig b/drivers/isdn/Kconfig
index 4fb601670de..a233ed53913 100644
--- a/drivers/isdn/Kconfig
+++ b/drivers/isdn/Kconfig
@@ -5,7 +5,7 @@
menuconfig ISDN
bool "ISDN support"
depends on NET
- depends on !S390
+ depends on !S390 && !UML
---help---
ISDN ("Integrated Services Digital Network", called RNIS in France)
is a fully digital telephone service that can be used for voice and
diff --git a/drivers/isdn/capi/kcapi_proc.c b/drivers/isdn/capi/kcapi_proc.c
index ea2dff602e4..8d51cd1bf67 100644
--- a/drivers/isdn/capi/kcapi_proc.c
+++ b/drivers/isdn/capi/kcapi_proc.c
@@ -14,6 +14,7 @@
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/init.h>
+#include <linux/export.h>
static char *state2str(unsigned short state)
{
diff --git a/drivers/isdn/gigaset/asyncdata.c b/drivers/isdn/gigaset/asyncdata.c
index c3b1dc3a13a..fddae72e3f9 100644
--- a/drivers/isdn/gigaset/asyncdata.c
+++ b/drivers/isdn/gigaset/asyncdata.c
@@ -16,6 +16,7 @@
#include "gigaset.h"
#include <linux/crc-ccitt.h>
#include <linux/bitrev.h>
+#include <linux/export.h>
/* check if byte must be stuffed/escaped
* I'm not sure which data should be encoded.
diff --git a/drivers/isdn/gigaset/capi.c b/drivers/isdn/gigaset/capi.c
index 658e75f18d0..6d5ceeece9f 100644
--- a/drivers/isdn/gigaset/capi.c
+++ b/drivers/isdn/gigaset/capi.c
@@ -17,6 +17,7 @@
#include <linux/isdn/capilli.h>
#include <linux/isdn/capicmd.h>
#include <linux/isdn/capiutil.h>
+#include <linux/export.h>
/* missing from kernelcapi.h */
#define CapiNcpiNotSupportedByProtocol 0x0001
diff --git a/drivers/isdn/gigaset/dummyll.c b/drivers/isdn/gigaset/dummyll.c
index bd0b1eaa757..19b1c779d50 100644
--- a/drivers/isdn/gigaset/dummyll.c
+++ b/drivers/isdn/gigaset/dummyll.c
@@ -11,6 +11,7 @@
* =====================================================================
*/
+#include <linux/export.h>
#include "gigaset.h"
void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *skb)
diff --git a/drivers/isdn/gigaset/ev-layer.c b/drivers/isdn/gigaset/ev-layer.c
index ba74646cf0e..6d12623c1db 100644
--- a/drivers/isdn/gigaset/ev-layer.c
+++ b/drivers/isdn/gigaset/ev-layer.c
@@ -13,6 +13,7 @@
* =====================================================================
*/
+#include <linux/export.h>
#include "gigaset.h"
/* ========================================================== */
diff --git a/drivers/isdn/gigaset/i4l.c b/drivers/isdn/gigaset/i4l.c
index 9bec8b96996..04231cb2f03 100644
--- a/drivers/isdn/gigaset/i4l.c
+++ b/drivers/isdn/gigaset/i4l.c
@@ -15,6 +15,7 @@
#include "gigaset.h"
#include <linux/isdnif.h>
+#include <linux/export.h>
#define SBUFSIZE 4096 /* sk_buff payload size */
#define TRANSBUFSIZE 768 /* bytes per skb for transparent receive */
diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
index e35058bcd7b..ee0a549a933 100644
--- a/drivers/isdn/gigaset/interface.c
+++ b/drivers/isdn/gigaset/interface.c
@@ -14,6 +14,7 @@
#include "gigaset.h"
#include <linux/gigaset_dev.h>
#include <linux/tty_flip.h>
+#include <linux/module.h>
/*** our ioctls ***/
diff --git a/drivers/isdn/hardware/mISDN/mISDNisar.c b/drivers/isdn/hardware/mISDN/mISDNisar.c
index d13fa5b119f..7034af28d46 100644
--- a/drivers/isdn/hardware/mISDN/mISDNisar.c
+++ b/drivers/isdn/hardware/mISDN/mISDNisar.c
@@ -29,6 +29,7 @@
#include <linux/delay.h>
#include <linux/vmalloc.h>
#include <linux/mISDNhw.h>
+#include <linux/module.h>
#include "isar.h"
#define ISAR_REV "2.1"
diff --git a/drivers/isdn/hisax/callc.c b/drivers/isdn/hisax/callc.c
index 37e685eafd2..c4897e1075d 100644
--- a/drivers/isdn/hisax/callc.c
+++ b/drivers/isdn/hisax/callc.c
@@ -65,7 +65,7 @@ hisax_findcard(int driverid)
return (struct IsdnCardState *) 0;
}
-static __attribute__((format(printf, 3, 4))) void
+static __printf(3, 4) void
link_debug(struct Channel *chanp, int direction, char *fmt, ...)
{
va_list args;
@@ -1068,7 +1068,7 @@ init_d_st(struct Channel *chanp)
return 0;
}
-static __attribute__((format(printf, 2, 3))) void
+static __printf(2, 3) void
callc_debug(struct FsmInst *fi, char *fmt, ...)
{
va_list args;
diff --git a/drivers/isdn/hisax/hisax.h b/drivers/isdn/hisax/hisax.h
index 0a5c42a3f12..aff45a11a92 100644
--- a/drivers/isdn/hisax/hisax.h
+++ b/drivers/isdn/hisax/hisax.h
@@ -1287,9 +1287,9 @@ int jiftime(char *s, long mark);
int HiSax_command(isdn_ctrl * ic);
int HiSax_writebuf_skb(int id, int chan, int ack, struct sk_buff *skb);
-__attribute__((format(printf, 3, 4)))
+__printf(3, 4)
void HiSax_putstatus(struct IsdnCardState *cs, char *head, char *fmt, ...);
-__attribute__((format(printf, 3, 0)))
+__printf(3, 0)
void VHiSax_putstatus(struct IsdnCardState *cs, char *head, char *fmt, va_list args);
void HiSax_reportcard(int cardnr, int sel);
int QuickHex(char *txt, u_char * p, int cnt);
diff --git a/drivers/isdn/hisax/isdnl1.h b/drivers/isdn/hisax/isdnl1.h
index 425d86116f2..66ddcab19bb 100644
--- a/drivers/isdn/hisax/isdnl1.h
+++ b/drivers/isdn/hisax/isdnl1.h
@@ -21,7 +21,7 @@
#define B_XMTBUFREADY 1
#define B_ACKPENDING 2
-__attribute__((format(printf, 2, 3)))
+__printf(2, 3)
void debugl1(struct IsdnCardState *cs, char *fmt, ...);
void DChannel_proc_xmt(struct IsdnCardState *cs);
void DChannel_proc_rcv(struct IsdnCardState *cs);
diff --git a/drivers/isdn/hisax/isdnl3.c b/drivers/isdn/hisax/isdnl3.c
index ad291f21b20..1c24e4457b6 100644
--- a/drivers/isdn/hisax/isdnl3.c
+++ b/drivers/isdn/hisax/isdnl3.c
@@ -66,7 +66,7 @@ static char *strL3Event[] =
"EV_TIMEOUT",
};
-static __attribute__((format(printf, 2, 3))) void
+static __printf(2, 3) void
l3m_debug(struct FsmInst *fi, char *fmt, ...)
{
va_list args;
diff --git a/drivers/isdn/hisax/l3dss1.c b/drivers/isdn/hisax/l3dss1.c
index b0d9ab1f21c..6a8acf65777 100644
--- a/drivers/isdn/hisax/l3dss1.c
+++ b/drivers/isdn/hisax/l3dss1.c
@@ -353,7 +353,7 @@ l3dss1_parse_facility(struct PStack *st, struct l3_process *pc,
{ l3dss1_dummy_invoke(st, cr, id, ident, p, nlen);
return;
}
-#ifdef HISAX_DE_AOC
+#ifdef CONFIG_DE_AOC
{
#define FOO1(s,a,b) \
@@ -422,9 +422,9 @@ l3dss1_parse_facility(struct PStack *st, struct l3_process *pc,
#undef FOO1
}
-#else /* not HISAX_DE_AOC */
+#else /* not CONFIG_DE_AOC */
l3_debug(st, "invoke break");
-#endif /* not HISAX_DE_AOC */
+#endif /* not CONFIG_DE_AOC */
break;
case 2: /* return result */
/* if no process available handle separately */
diff --git a/drivers/isdn/hisax/st5481_d.c b/drivers/isdn/hisax/st5481_d.c
index 44082637a09..db247b79e56 100644
--- a/drivers/isdn/hisax/st5481_d.c
+++ b/drivers/isdn/hisax/st5481_d.c
@@ -167,7 +167,7 @@ static struct FsmNode L1FnList[] __initdata =
{ST_L1_F8, EV_IND_RSY, l1_ignore},
};
-static __attribute__((format(printf, 2, 3)))
+static __printf(2, 3)
void l1m_debug(struct FsmInst *fi, char *fmt, ...)
{
va_list args;
@@ -270,7 +270,7 @@ static char *strDoutEvent[] =
"EV_DOUT_UNDERRUN",
};
-static __attribute__((format(printf, 2, 3)))
+static __printf(2, 3)
void dout_debug(struct FsmInst *fi, char *fmt, ...)
{
va_list args;
diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
index e5546cb3ac6..2c26b64ebbe 100644
--- a/drivers/isdn/i4l/isdn_tty.c
+++ b/drivers/isdn/i4l/isdn_tty.c
@@ -750,6 +750,7 @@ isdn_tty_modem_hup(modem_info * info, int local)
* supplementary service (CAPI 2.0 part III)
*/
#include <linux/isdn/capicmd.h>
+#include <linux/module.h>
int
isdn_tty_capi_facility(capi_msg *cm) {
diff --git a/drivers/isdn/mISDN/clock.c b/drivers/isdn/mISDN/clock.c
index 1fa629b3b94..7418f2d811d 100644
--- a/drivers/isdn/mISDN/clock.c
+++ b/drivers/isdn/mISDN/clock.c
@@ -38,6 +38,7 @@
#include <linux/stddef.h>
#include <linux/spinlock.h>
#include <linux/mISDNif.h>
+#include <linux/export.h>
#include "core.h"
static u_int *debug;
diff --git a/drivers/isdn/mISDN/dsp_audio.c b/drivers/isdn/mISDN/dsp_audio.c
index 9c7c6451bf3..b8f18bd09e4 100644
--- a/drivers/isdn/mISDN/dsp_audio.c
+++ b/drivers/isdn/mISDN/dsp_audio.c
@@ -12,6 +12,7 @@
#include <linux/delay.h>
#include <linux/mISDNif.h>
#include <linux/mISDNdsp.h>
+#include <linux/export.h>
#include "core.h"
#include "dsp.h"
diff --git a/drivers/isdn/mISDN/dsp_pipeline.c b/drivers/isdn/mISDN/dsp_pipeline.c
index 621f3100709..b6c9a588934 100644
--- a/drivers/isdn/mISDN/dsp_pipeline.c
+++ b/drivers/isdn/mISDN/dsp_pipeline.c
@@ -30,6 +30,7 @@
#include <linux/string.h>
#include <linux/mISDNif.h>
#include <linux/mISDNdsp.h>
+#include <linux/export.h>
#include "dsp.h"
#include "dsp_hwec.h"
diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
index 8e325227b4c..738ea8dd0ad 100644
--- a/drivers/isdn/mISDN/socket.c
+++ b/drivers/isdn/mISDN/socket.c
@@ -17,6 +17,7 @@
#include <linux/mISDNif.h>
#include <linux/slab.h>
+#include <linux/export.h>
#include "core.h"
static u_int *debug;
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 807c875f1c2..ff203a42186 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -113,14 +113,6 @@ config LEDS_WRAP
help
This option enables support for the PCEngines WRAP programmable LEDs.
-config LEDS_ALIX2
- tristate "LED Support for ALIX.2 and ALIX.3 series"
- depends on LEDS_CLASS
- depends on X86 && !GPIO_CS5535 && !CS5535_GPIO
- help
- This option enables support for the PCEngines ALIX.2 and ALIX.3 LEDs.
- You have to set leds-alix2.force=1 for boards with Award BIOS.
-
config LEDS_COBALT_QUBE
tristate "LED Support for the Cobalt Qube series front LED"
depends on LEDS_CLASS
@@ -383,6 +375,18 @@ config LEDS_ASIC3
cannot be used. This driver supports hardware blinking with an on+off
period from 62ms to 125s. Say Y to enable LEDs on the HP iPAQ hx4700.
+config LEDS_RENESAS_TPU
+ bool "LED support for Renesas TPU"
+ depends on LEDS_CLASS && HAVE_CLK && GENERIC_GPIO
+ help
+ This option enables build of the LED TPU platform driver,
+ suitable to drive any TPU channel on newer Renesas SoCs.
+ The driver controls the GPIO pin connected to the LED via
+ the GPIO framework and expects the LED to be connected to
+ a pin that can be driven in both GPIO mode and using TPU
+ pin function. The latter to support brightness control.
+ Brightness control is supported but hardware blinking is not.
+
config LEDS_TRIGGERS
bool "LED Trigger support"
depends on LEDS_CLASS
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index bbfd2e367dc..e4f6bf56888 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -16,7 +16,6 @@ obj-$(CONFIG_LEDS_AMS_DELTA) += leds-ams-delta.o
obj-$(CONFIG_LEDS_NET48XX) += leds-net48xx.o
obj-$(CONFIG_LEDS_NET5501) += leds-net5501.o
obj-$(CONFIG_LEDS_WRAP) += leds-wrap.o
-obj-$(CONFIG_LEDS_ALIX2) += leds-alix2.o
obj-$(CONFIG_LEDS_COBALT_QUBE) += leds-cobalt-qube.o
obj-$(CONFIG_LEDS_COBALT_RAQ) += leds-cobalt-raq.o
obj-$(CONFIG_LEDS_SUNFIRE) += leds-sunfire.o
@@ -43,6 +42,7 @@ obj-$(CONFIG_LEDS_MC13783) += leds-mc13783.o
obj-$(CONFIG_LEDS_NS2) += leds-ns2.o
obj-$(CONFIG_LEDS_NETXBIG) += leds-netxbig.o
obj-$(CONFIG_LEDS_ASIC3) += leds-asic3.o
+obj-$(CONFIG_LEDS_RENESAS_TPU) += leds-renesas-tpu.o
# LED SPI Drivers
obj-$(CONFIG_LEDS_DAC124S085) += leds-dac124s085.o
diff --git a/drivers/leds/dell-led.c b/drivers/leds/dell-led.c
index 52590296af3..e5c57389efd 100644
--- a/drivers/leds/dell-led.c
+++ b/drivers/leds/dell-led.c
@@ -14,6 +14,7 @@
#include <linux/acpi.h>
#include <linux/leds.h>
#include <linux/slab.h>
+#include <linux/module.h>
MODULE_AUTHOR("Louis Davis/Jim Dailey");
MODULE_DESCRIPTION("Dell LED Control Driver");
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index dc3d3d83191..661b692573e 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -267,9 +267,14 @@ void led_blink_set(struct led_classdev *led_cdev,
unsigned long *delay_on,
unsigned long *delay_off)
{
+ del_timer_sync(&led_cdev->blink_timer);
+
if (led_cdev->blink_set &&
- !led_cdev->blink_set(led_cdev, delay_on, delay_off))
+ !led_cdev->blink_set(led_cdev, delay_on, delay_off)) {
+ led_cdev->blink_delay_on = *delay_on;
+ led_cdev->blink_delay_off = *delay_off;
return;
+ }
/* blink with 1 Hz as default if nothing specified */
if (!*delay_on && !*delay_off)
diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c
index 4bebae73334..6f1ff93d7ce 100644
--- a/drivers/leds/led-triggers.c
+++ b/drivers/leds/led-triggers.c
@@ -261,9 +261,12 @@ void led_trigger_register_simple(const char *name, struct led_trigger **tp)
if (trigger) {
trigger->name = name;
err = led_trigger_register(trigger);
- if (err < 0)
+ if (err < 0) {
+ kfree(trigger);
+ trigger = NULL;
printk(KERN_WARNING "LED trigger %s failed to register"
" (%d)\n", name, err);
+ }
} else
printk(KERN_WARNING "LED trigger %s failed to register"
" (no memory)\n", name);
diff --git a/drivers/leds/leds-88pm860x.c b/drivers/leds/leds-88pm860x.c
index 0d4c16678ac..0810604dc70 100644
--- a/drivers/leds/leds-88pm860x.c
+++ b/drivers/leds/leds-88pm860x.c
@@ -18,6 +18,7 @@
#include <linux/slab.h>
#include <linux/workqueue.h>
#include <linux/mfd/88pm860x.h>
+#include <linux/module.h>
#define LED_PWM_SHIFT (3)
#define LED_PWM_MASK (0x1F)
diff --git a/drivers/leds/leds-alix2.c b/drivers/leds/leds-alix2.c
deleted file mode 100644
index f59ffadf512..00000000000
--- a/drivers/leds/leds-alix2.c
+++ /dev/null
@@ -1,239 +0,0 @@
-/*
- * LEDs driver for PCEngines ALIX.2 and ALIX.3
- *
- * Copyright (C) 2008 Constantin Baranov <const@mimas.ru>
- */
-
-#include <linux/err.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/leds.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/string.h>
-#include <linux/pci.h>
-
-static int force = 0;
-module_param(force, bool, 0444);
-MODULE_PARM_DESC(force, "Assume system has ALIX.2/ALIX.3 style LEDs");
-
-#define MSR_LBAR_GPIO 0x5140000C
-#define CS5535_GPIO_SIZE 256
-
-static u32 gpio_base;
-
-static struct pci_device_id divil_pci[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_CS5535_ISA) },
- { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA) },
- { } /* NULL entry */
-};
-MODULE_DEVICE_TABLE(pci, divil_pci);
-
-struct alix_led {
- struct led_classdev cdev;
- unsigned short port;
- unsigned int on_value;
- unsigned int off_value;
-};
-
-static void alix_led_set(struct led_classdev *led_cdev,
- enum led_brightness brightness)
-{
- struct alix_led *led_dev =
- container_of(led_cdev, struct alix_led, cdev);
-
- if (brightness)
- outl(led_dev->on_value, gpio_base + led_dev->port);
- else
- outl(led_dev->off_value, gpio_base + led_dev->port);
-}
-
-static struct alix_led alix_leds[] = {
- {
- .cdev = {
- .name = "alix:1",
- .brightness_set = alix_led_set,
- },
- .port = 0x00,
- .on_value = 1 << 22,
- .off_value = 1 << 6,
- },
- {
- .cdev = {
- .name = "alix:2",
- .brightness_set = alix_led_set,
- },
- .port = 0x80,
- .on_value = 1 << 25,
- .off_value = 1 << 9,
- },
- {
- .cdev = {
- .name = "alix:3",
- .brightness_set = alix_led_set,
- },
- .port = 0x80,
- .on_value = 1 << 27,
- .off_value = 1 << 11,
- },
-};
-
-static int __init alix_led_probe(struct platform_device *pdev)
-{
- int i;
- int ret;
-
- for (i = 0; i < ARRAY_SIZE(alix_leds); i++) {
- alix_leds[i].cdev.flags |= LED_CORE_SUSPENDRESUME;
- ret = led_classdev_register(&pdev->dev, &alix_leds[i].cdev);
- if (ret < 0)
- goto fail;
- }
- return 0;
-
-fail:
- while (--i >= 0)
- led_classdev_unregister(&alix_leds[i].cdev);
- return ret;
-}
-
-static int alix_led_remove(struct platform_device *pdev)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(alix_leds); i++)
- led_classdev_unregister(&alix_leds[i].cdev);
- return 0;
-}
-
-static struct platform_driver alix_led_driver = {
- .remove = alix_led_remove,
- .driver = {
- .name = KBUILD_MODNAME,
- .owner = THIS_MODULE,
- },
-};
-
-static int __init alix_present(unsigned long bios_phys,
- const char *alix_sig,
- size_t alix_sig_len)
-{
- const size_t bios_len = 0x00010000;
- const char *bios_virt;
- const char *scan_end;
- const char *p;
- char name[64];
-
- if (force) {
- printk(KERN_NOTICE "%s: forced to skip BIOS test, "
- "assume system has ALIX.2 style LEDs\n",
- KBUILD_MODNAME);
- return 1;
- }
-
- bios_virt = phys_to_virt(bios_phys);
- scan_end = bios_virt + bios_len - (alix_sig_len + 2);
- for (p = bios_virt; p < scan_end; p++) {
- const char *tail;
- char *a;
-
- if (memcmp(p, alix_sig, alix_sig_len) != 0)
- continue;
-
- memcpy(name, p, sizeof(name));
-
- /* remove the first \0 character from string */
- a = strchr(name, '\0');
- if (a)
- *a = ' ';
-
- /* cut the string at a newline */
- a = strchr(name, '\r');
- if (a)
- *a = '\0';
-
- tail = p + alix_sig_len;
- if ((tail[0] == '2' || tail[0] == '3')) {
- printk(KERN_INFO
- "%s: system is recognized as \"%s\"\n",
- KBUILD_MODNAME, name);
- return 1;
- }
- }
-
- return 0;
-}
-
-static struct platform_device *pdev;
-
-static int __init alix_pci_led_init(void)
-{
- u32 low, hi;
-
- if (pci_dev_present(divil_pci) == 0) {
- printk(KERN_WARNING KBUILD_MODNAME": DIVIL not found\n");
- return -ENODEV;
- }
-
- /* Grab the GPIO I/O range */
- rdmsr(MSR_LBAR_GPIO, low, hi);
-
- /* Check the mask and whether GPIO is enabled (sanity check) */
- if (hi != 0x0000f001) {
- printk(KERN_WARNING KBUILD_MODNAME": GPIO not enabled\n");
- return -ENODEV;
- }
-
- /* Mask off the IO base address */
- gpio_base = low & 0x0000ff00;
-
- if (!request_region(gpio_base, CS5535_GPIO_SIZE, KBUILD_MODNAME)) {
- printk(KERN_ERR KBUILD_MODNAME": can't allocate I/O for GPIO\n");
- return -ENODEV;
- }
-
- /* Set GPIO function to output */
- outl(1 << 6, gpio_base + 0x04);
- outl(1 << 9, gpio_base + 0x84);
- outl(1 << 11, gpio_base + 0x84);
-
- return 0;
-}
-
-static int __init alix_led_init(void)
-{
- int ret = -ENODEV;
- const char tinybios_sig[] = "PC Engines ALIX.";
- const char coreboot_sig[] = "PC Engines\0ALIX.";
-
- if (alix_present(0xf0000, tinybios_sig, sizeof(tinybios_sig) - 1) ||
- alix_present(0x500, coreboot_sig, sizeof(coreboot_sig) - 1))
- ret = alix_pci_led_init();
-
- if (ret < 0)
- return ret;
-
- pdev = platform_device_register_simple(KBUILD_MODNAME, -1, NULL, 0);
- if (!IS_ERR(pdev)) {
- ret = platform_driver_probe(&alix_led_driver, alix_led_probe);
- if (ret)
- platform_device_unregister(pdev);
- } else
- ret = PTR_ERR(pdev);
-
- return ret;
-}
-
-static void __exit alix_led_exit(void)
-{
- platform_device_unregister(pdev);
- platform_driver_unregister(&alix_led_driver);
- release_region(gpio_base, CS5535_GPIO_SIZE);
-}
-
-module_init(alix_led_init);
-module_exit(alix_led_exit);
-
-MODULE_AUTHOR("Constantin Baranov <const@mimas.ru>");
-MODULE_DESCRIPTION("PCEngines ALIX.2 and ALIX.3 LED driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/leds/leds-asic3.c b/drivers/leds/leds-asic3.c
index 22f847c890c..48d9fe61bdf 100644
--- a/drivers/leds/leds-asic3.c
+++ b/drivers/leds/leds-asic3.c
@@ -14,6 +14,7 @@
#include <linux/mfd/asic3.h>
#include <linux/mfd/core.h>
+#include <linux/module.h>
/*
* The HTC ASIC3 LED GPIOs are inputs, not outputs.
@@ -107,9 +108,10 @@ static int __devinit asic3_led_probe(struct platform_device *pdev)
}
led->cdev->name = led->name;
- led->cdev->default_trigger = led->default_trigger;
+ led->cdev->flags = LED_CORE_SUSPENDRESUME;
led->cdev->brightness_set = brightness_set;
led->cdev->blink_set = blink_set;
+ led->cdev->default_trigger = led->default_trigger;
ret = led_classdev_register(&pdev->dev, led->cdev);
if (ret < 0)
@@ -136,12 +138,44 @@ static int __devexit asic3_led_remove(struct platform_device *pdev)
return mfd_cell_disable(pdev);
}
+static int asic3_led_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ const struct mfd_cell *cell = mfd_get_cell(pdev);
+ int ret;
+
+ ret = 0;
+ if (cell->suspend)
+ ret = (*cell->suspend)(pdev);
+
+ return ret;
+}
+
+static int asic3_led_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ const struct mfd_cell *cell = mfd_get_cell(pdev);
+ int ret;
+
+ ret = 0;
+ if (cell->resume)
+ ret = (*cell->resume)(pdev);
+
+ return ret;
+}
+
+static const struct dev_pm_ops asic3_led_pm_ops = {
+ .suspend = asic3_led_suspend,
+ .resume = asic3_led_resume,
+};
+
static struct platform_driver asic3_led_driver = {
.probe = asic3_led_probe,
.remove = __devexit_p(asic3_led_remove),
.driver = {
.name = "leds-asic3",
.owner = THIS_MODULE,
+ .pm = &asic3_led_pm_ops,
},
};
diff --git a/drivers/leds/leds-atmel-pwm.c b/drivers/leds/leds-atmel-pwm.c
index c941d906bba..109c875ea23 100644
--- a/drivers/leds/leds-atmel-pwm.c
+++ b/drivers/leds/leds-atmel-pwm.c
@@ -4,6 +4,7 @@
#include <linux/io.h>
#include <linux/atmel_pwm.h>
#include <linux/slab.h>
+#include <linux/module.h>
struct pwmled {
diff --git a/drivers/leds/leds-cobalt-raq.c b/drivers/leds/leds-cobalt-raq.c
index 438d4838463..aac1c073fe7 100644
--- a/drivers/leds/leds-cobalt-raq.c
+++ b/drivers/leds/leds-cobalt-raq.c
@@ -24,6 +24,7 @@
#include <linux/platform_device.h>
#include <linux/spinlock.h>
#include <linux/types.h>
+#include <linux/export.h>
#define LED_WEB 0x04
#define LED_POWER_OFF 0x08
diff --git a/drivers/leds/leds-fsg.c b/drivers/leds/leds-fsg.c
index d11d05be0de..49aceffaa5b 100644
--- a/drivers/leds/leds-fsg.c
+++ b/drivers/leds/leds-fsg.c
@@ -19,6 +19,7 @@
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/leds.h>
+#include <linux/module.h>
#include <mach/hardware.h>
#include <asm/io.h>
diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
index 3d8bc327a68..399a86f2013 100644
--- a/drivers/leds/leds-gpio.c
+++ b/drivers/leds/leds-gpio.c
@@ -18,6 +18,7 @@
#include <linux/of_gpio.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
+#include <linux/module.h>
#include <asm/gpio.h>
@@ -121,7 +122,7 @@ static int __devinit create_gpio_led(const struct gpio_led *template,
}
led_dat->cdev.brightness_set = gpio_led_set;
if (template->default_state == LEDS_GPIO_DEFSTATE_KEEP)
- state = !!gpio_get_value(led_dat->gpio) ^ led_dat->active_low;
+ state = !!gpio_get_value_cansleep(led_dat->gpio) ^ led_dat->active_low;
else
state = (template->default_state == LEDS_GPIO_DEFSTATE_ON);
led_dat->cdev.brightness = state ? LED_FULL : LED_OFF;
diff --git a/drivers/leds/leds-lm3530.c b/drivers/leds/leds-lm3530.c
index 3dd7090a9a9..0630e4f4b28 100644
--- a/drivers/leds/leds-lm3530.c
+++ b/drivers/leds/leds-lm3530.c
@@ -18,6 +18,7 @@
#include <linux/led-lm3530.h>
#include <linux/types.h>
#include <linux/regulator/consumer.h>
+#include <linux/module.h>
#define LM3530_LED_DEV "lcd-backlight"
#define LM3530_NAME "lm3530-led"
@@ -421,7 +422,6 @@ err_class_register:
err_reg_init:
regulator_put(drvdata->regulator);
err_regulator_get:
- i2c_set_clientdata(client, NULL);
kfree(drvdata);
err_out:
return err;
@@ -449,7 +449,7 @@ MODULE_DEVICE_TABLE(i2c, lm3530_id);
static struct i2c_driver lm3530_i2c_driver = {
.probe = lm3530_probe,
- .remove = lm3530_remove,
+ .remove = __devexit_p(lm3530_remove),
.id_table = lm3530_id,
.driver = {
.name = LM3530_NAME,
diff --git a/drivers/leds/leds-locomo.c b/drivers/leds/leds-locomo.c
index 1f7c10f6b7f..80ba048889d 100644
--- a/drivers/leds/leds-locomo.c
+++ b/drivers/leds/leds-locomo.c
@@ -10,6 +10,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/module.h>
#include <linux/device.h>
#include <linux/leds.h>
diff --git a/drivers/leds/leds-lp5521.c b/drivers/leds/leds-lp5521.c
index 9fc122c81f0..cb641f1b334 100644
--- a/drivers/leds/leds-lp5521.c
+++ b/drivers/leds/leds-lp5521.c
@@ -97,6 +97,9 @@
/* Status */
#define LP5521_EXT_CLK_USED 0x08
+/* default R channel current register value */
+#define LP5521_REG_R_CURR_DEFAULT 0xAF
+
struct lp5521_engine {
int id;
u8 mode;
@@ -175,14 +178,14 @@ static int lp5521_set_engine_mode(struct lp5521_engine *engine, u8 mode)
mode = LP5521_CMD_DIRECT;
ret = lp5521_read(client, LP5521_REG_OP_MODE, &engine_state);
+ if (ret < 0)
+ return ret;
/* set mode only for this engine */
engine_state &= ~(engine->engine_mask);
mode &= engine->engine_mask;
engine_state |= mode;
- ret |= lp5521_write(client, LP5521_REG_OP_MODE, engine_state);
-
- return ret;
+ return lp5521_write(client, LP5521_REG_OP_MODE, engine_state);
}
static int lp5521_load_program(struct lp5521_engine *eng, const u8 *pattern)
@@ -643,6 +646,7 @@ static int __devinit lp5521_probe(struct i2c_client *client,
struct lp5521_chip *chip;
struct lp5521_platform_data *pdata;
int ret, i, led;
+ u8 buf;
chip = kzalloc(sizeof(*chip), GFP_KERNEL);
if (!chip)
@@ -681,6 +685,20 @@ static int __devinit lp5521_probe(struct i2c_client *client,
* Exact value is not available. 10 - 20ms
* appears to be enough for reset.
*/
+
+ /*
+ * Make sure that the chip is reset by reading back the r channel
+ * current reg. This is dummy read is required on some platforms -
+ * otherwise further access to the R G B channels in the
+ * LP5521_REG_ENABLE register will not have any effect - strange!
+ */
+ lp5521_read(client, LP5521_REG_R_CURRENT, &buf);
+ if (buf != LP5521_REG_R_CURR_DEFAULT) {
+ dev_err(&client->dev, "error in reseting chip\n");
+ goto fail2;
+ }
+ usleep_range(10000, 20000);
+
ret = lp5521_detect(client);
if (ret) {
diff --git a/drivers/leds/leds-lt3593.c b/drivers/leds/leds-lt3593.c
index 2579678f97a..53f67b8ce55 100644
--- a/drivers/leds/leds-lt3593.c
+++ b/drivers/leds/leds-lt3593.c
@@ -24,6 +24,7 @@
#include <linux/delay.h>
#include <linux/gpio.h>
#include <linux/slab.h>
+#include <linux/module.h>
struct lt3593_led_data {
struct led_classdev cdev;
diff --git a/drivers/leds/leds-mc13783.c b/drivers/leds/leds-mc13783.c
index f369e56d654..b3393a9f213 100644
--- a/drivers/leds/leds-mc13783.c
+++ b/drivers/leds/leds-mc13783.c
@@ -21,13 +21,13 @@
#include <linux/platform_device.h>
#include <linux/leds.h>
#include <linux/workqueue.h>
-#include <linux/mfd/mc13783.h>
+#include <linux/mfd/mc13xxx.h>
#include <linux/slab.h>
struct mc13783_led {
struct led_classdev cdev;
struct work_struct work;
- struct mc13783 *master;
+ struct mc13xxx *master;
enum led_brightness new_brightness;
int id;
};
@@ -111,11 +111,11 @@ static void mc13783_led_work(struct work_struct *work)
break;
}
- mc13783_lock(led->master);
+ mc13xxx_lock(led->master);
- mc13783_reg_rmw(led->master, reg, mask, value);
+ mc13xxx_reg_rmw(led->master, reg, mask, value);
- mc13783_unlock(led->master);
+ mc13xxx_unlock(led->master);
}
static void mc13783_led_set(struct led_classdev *led_cdev,
@@ -172,23 +172,23 @@ static int __devinit mc13783_led_setup(struct mc13783_led *led, int max_current)
break;
}
- mc13783_lock(led->master);
+ mc13xxx_lock(led->master);
- ret = mc13783_reg_rmw(led->master, reg, mask << shift,
+ ret = mc13xxx_reg_rmw(led->master, reg, mask << shift,
value << shift);
- mc13783_unlock(led->master);
+ mc13xxx_unlock(led->master);
return ret;
}
static int __devinit mc13783_leds_prepare(struct platform_device *pdev)
{
- struct mc13783_leds_platform_data *pdata = dev_get_platdata(&pdev->dev);
- struct mc13783 *dev = dev_get_drvdata(pdev->dev.parent);
+ struct mc13xxx_leds_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct mc13xxx *dev = dev_get_drvdata(pdev->dev.parent);
int ret = 0;
int reg = 0;
- mc13783_lock(dev);
+ mc13xxx_lock(dev);
if (pdata->flags & MC13783_LED_TC1HALF)
reg |= MC13783_LED_C1_TC1HALF_BIT;
@@ -196,7 +196,7 @@ static int __devinit mc13783_leds_prepare(struct platform_device *pdev)
if (pdata->flags & MC13783_LED_SLEWLIMTC)
reg |= MC13783_LED_Cx_SLEWLIM_BIT;
- ret = mc13783_reg_write(dev, MC13783_REG_LED_CONTROL_1, reg);
+ ret = mc13xxx_reg_write(dev, MC13783_REG_LED_CONTROL_1, reg);
if (ret)
goto out;
@@ -206,7 +206,7 @@ static int __devinit mc13783_leds_prepare(struct platform_device *pdev)
if (pdata->flags & MC13783_LED_SLEWLIMBL)
reg |= MC13783_LED_Cx_SLEWLIM_BIT;
- ret = mc13783_reg_write(dev, MC13783_REG_LED_CONTROL_2, reg);
+ ret = mc13xxx_reg_write(dev, MC13783_REG_LED_CONTROL_2, reg);
if (ret)
goto out;
@@ -216,7 +216,7 @@ static int __devinit mc13783_leds_prepare(struct platform_device *pdev)
if (pdata->flags & MC13783_LED_TRIODE_TC1)
reg |= MC13783_LED_Cx_TRIODE_TC_BIT;
- ret = mc13783_reg_write(dev, MC13783_REG_LED_CONTROL_3, reg);
+ ret = mc13xxx_reg_write(dev, MC13783_REG_LED_CONTROL_3, reg);
if (ret)
goto out;
@@ -226,7 +226,7 @@ static int __devinit mc13783_leds_prepare(struct platform_device *pdev)
if (pdata->flags & MC13783_LED_TRIODE_TC2)
reg |= MC13783_LED_Cx_TRIODE_TC_BIT;
- ret = mc13783_reg_write(dev, MC13783_REG_LED_CONTROL_4, reg);
+ ret = mc13xxx_reg_write(dev, MC13783_REG_LED_CONTROL_4, reg);
if (ret)
goto out;
@@ -236,7 +236,7 @@ static int __devinit mc13783_leds_prepare(struct platform_device *pdev)
if (pdata->flags & MC13783_LED_TRIODE_TC3)
reg |= MC13783_LED_Cx_TRIODE_TC_BIT;
- ret = mc13783_reg_write(dev, MC13783_REG_LED_CONTROL_5, reg);
+ ret = mc13xxx_reg_write(dev, MC13783_REG_LED_CONTROL_5, reg);
if (ret)
goto out;
@@ -255,17 +255,17 @@ static int __devinit mc13783_leds_prepare(struct platform_device *pdev)
reg |= (pdata->abref & MC13783_LED_C0_ABREF_MASK) <<
MC13783_LED_C0_ABREF;
- ret = mc13783_reg_write(dev, MC13783_REG_LED_CONTROL_0, reg);
+ ret = mc13xxx_reg_write(dev, MC13783_REG_LED_CONTROL_0, reg);
out:
- mc13783_unlock(dev);
+ mc13xxx_unlock(dev);
return ret;
}
static int __devinit mc13783_led_probe(struct platform_device *pdev)
{
- struct mc13783_leds_platform_data *pdata = dev_get_platdata(&pdev->dev);
- struct mc13783_led_platform_data *led_cur;
+ struct mc13xxx_leds_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct mc13xxx_led_platform_data *led_cur;
struct mc13783_led *led, *led_dat;
int ret, i;
int init_led = 0;
@@ -351,9 +351,9 @@ err_free:
static int __devexit mc13783_led_remove(struct platform_device *pdev)
{
- struct mc13783_leds_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct mc13xxx_leds_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct mc13783_led *led = platform_get_drvdata(pdev);
- struct mc13783 *dev = dev_get_drvdata(pdev->dev.parent);
+ struct mc13xxx *dev = dev_get_drvdata(pdev->dev.parent);
int i;
for (i = 0; i < pdata->num_leds; i++) {
@@ -361,16 +361,16 @@ static int __devexit mc13783_led_remove(struct platform_device *pdev)
cancel_work_sync(&led[i].work);
}
- mc13783_lock(dev);
+ mc13xxx_lock(dev);
- mc13783_reg_write(dev, MC13783_REG_LED_CONTROL_0, 0);
- mc13783_reg_write(dev, MC13783_REG_LED_CONTROL_1, 0);
- mc13783_reg_write(dev, MC13783_REG_LED_CONTROL_2, 0);
- mc13783_reg_write(dev, MC13783_REG_LED_CONTROL_3, 0);
- mc13783_reg_write(dev, MC13783_REG_LED_CONTROL_4, 0);
- mc13783_reg_write(dev, MC13783_REG_LED_CONTROL_5, 0);
+ mc13xxx_reg_write(dev, MC13783_REG_LED_CONTROL_0, 0);
+ mc13xxx_reg_write(dev, MC13783_REG_LED_CONTROL_1, 0);
+ mc13xxx_reg_write(dev, MC13783_REG_LED_CONTROL_2, 0);
+ mc13xxx_reg_write(dev, MC13783_REG_LED_CONTROL_3, 0);
+ mc13xxx_reg_write(dev, MC13783_REG_LED_CONTROL_4, 0);
+ mc13xxx_reg_write(dev, MC13783_REG_LED_CONTROL_5, 0);
- mc13783_unlock(dev);
+ mc13xxx_unlock(dev);
kfree(led);
return 0;
diff --git a/drivers/leds/leds-net48xx.c b/drivers/leds/leds-net48xx.c
index 93987a12da4..f117f7326c5 100644
--- a/drivers/leds/leds-net48xx.c
+++ b/drivers/leds/leds-net48xx.c
@@ -18,6 +18,7 @@
#include <asm/io.h>
#include <linux/nsc_gpio.h>
#include <linux/scx200_gpio.h>
+#include <linux/module.h>
#define DRVNAME "net48xx-led"
#define NET48XX_ERROR_LED_GPIO 20
diff --git a/drivers/leds/leds-net5501.c b/drivers/leds/leds-net5501.c
index 7e764b8365e..0555d4709a7 100644
--- a/drivers/leds/leds-net5501.c
+++ b/drivers/leds/leds-net5501.c
@@ -16,6 +16,7 @@
#include <linux/leds.h>
#include <linux/platform_device.h>
#include <linux/gpio.h>
+#include <linux/module.h>
#include <asm/geode.h>
diff --git a/drivers/leds/leds-ns2.c b/drivers/leds/leds-ns2.c
index f77d48d0b3e..37b7d0cfe58 100644
--- a/drivers/leds/leds-ns2.c
+++ b/drivers/leds/leds-ns2.c
@@ -28,6 +28,7 @@
#include <linux/slab.h>
#include <linux/gpio.h>
#include <linux/leds.h>
+#include <linux/module.h>
#include <mach/leds-ns2.h>
/*
diff --git a/drivers/leds/leds-renesas-tpu.c b/drivers/leds/leds-renesas-tpu.c
new file mode 100644
index 00000000000..3ee540eb127
--- /dev/null
+++ b/drivers/leds/leds-renesas-tpu.c
@@ -0,0 +1,357 @@
+/*
+ * LED control using Renesas TPU
+ *
+ * Copyright (C) 2011 Magnus Damm
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/printk.h>
+#include <linux/ioport.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/leds.h>
+#include <linux/platform_data/leds-renesas-tpu.h>
+#include <linux/gpio.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+#include <linux/workqueue.h>
+
+enum r_tpu_pin { R_TPU_PIN_UNUSED, R_TPU_PIN_GPIO, R_TPU_PIN_GPIO_FN };
+enum r_tpu_timer { R_TPU_TIMER_UNUSED, R_TPU_TIMER_ON };
+
+struct r_tpu_priv {
+ struct led_classdev ldev;
+ void __iomem *mapbase;
+ struct clk *clk;
+ struct platform_device *pdev;
+ enum r_tpu_pin pin_state;
+ enum r_tpu_timer timer_state;
+ unsigned long min_rate;
+ unsigned int refresh_rate;
+ struct work_struct work;
+ enum led_brightness new_brightness;
+};
+
+static DEFINE_SPINLOCK(r_tpu_lock);
+
+#define TSTR -1 /* Timer start register (shared register) */
+#define TCR 0 /* Timer control register (+0x00) */
+#define TMDR 1 /* Timer mode register (+0x04) */
+#define TIOR 2 /* Timer I/O control register (+0x08) */
+#define TIER 3 /* Timer interrupt enable register (+0x0c) */
+#define TSR 4 /* Timer status register (+0x10) */
+#define TCNT 5 /* Timer counter (+0x14) */
+#define TGRA 6 /* Timer general register A (+0x18) */
+#define TGRB 7 /* Timer general register B (+0x1c) */
+#define TGRC 8 /* Timer general register C (+0x20) */
+#define TGRD 9 /* Timer general register D (+0x24) */
+
+static inline unsigned short r_tpu_read(struct r_tpu_priv *p, int reg_nr)
+{
+ struct led_renesas_tpu_config *cfg = p->pdev->dev.platform_data;
+ void __iomem *base = p->mapbase;
+ unsigned long offs = reg_nr << 2;
+
+ if (reg_nr == TSTR)
+ return ioread16(base - cfg->channel_offset);
+
+ return ioread16(base + offs);
+}
+
+static inline void r_tpu_write(struct r_tpu_priv *p, int reg_nr,
+ unsigned short value)
+{
+ struct led_renesas_tpu_config *cfg = p->pdev->dev.platform_data;
+ void __iomem *base = p->mapbase;
+ unsigned long offs = reg_nr << 2;
+
+ if (reg_nr == TSTR) {
+ iowrite16(value, base - cfg->channel_offset);
+ return;
+ }
+
+ iowrite16(value, base + offs);
+}
+
+static void r_tpu_start_stop_ch(struct r_tpu_priv *p, int start)
+{
+ struct led_renesas_tpu_config *cfg = p->pdev->dev.platform_data;
+ unsigned long flags, value;
+
+ /* start stop register shared by multiple timer channels */
+ spin_lock_irqsave(&r_tpu_lock, flags);
+ value = r_tpu_read(p, TSTR);
+
+ if (start)
+ value |= 1 << cfg->timer_bit;
+ else
+ value &= ~(1 << cfg->timer_bit);
+
+ r_tpu_write(p, TSTR, value);
+ spin_unlock_irqrestore(&r_tpu_lock, flags);
+}
+
+static int r_tpu_enable(struct r_tpu_priv *p, enum led_brightness brightness)
+{
+ struct led_renesas_tpu_config *cfg = p->pdev->dev.platform_data;
+ int prescaler[] = { 1, 4, 16, 64 };
+ int k, ret;
+ unsigned long rate, tmp;
+
+ if (p->timer_state == R_TPU_TIMER_ON)
+ return 0;
+
+ /* wake up device and enable clock */
+ pm_runtime_get_sync(&p->pdev->dev);
+ ret = clk_enable(p->clk);
+ if (ret) {
+ dev_err(&p->pdev->dev, "cannot enable clock\n");
+ return ret;
+ }
+
+ /* make sure channel is disabled */
+ r_tpu_start_stop_ch(p, 0);
+
+ /* get clock rate after enabling it */
+ rate = clk_get_rate(p->clk);
+
+ /* pick the lowest acceptable rate */
+ for (k = 0; k < ARRAY_SIZE(prescaler); k++)
+ if ((rate / prescaler[k]) < p->min_rate)
+ break;
+
+ if (!k) {
+ dev_err(&p->pdev->dev, "clock rate mismatch\n");
+ goto err0;
+ }
+ dev_dbg(&p->pdev->dev, "rate = %lu, prescaler %u\n",
+ rate, prescaler[k - 1]);
+
+ /* clear TCNT on TGRB match, count on rising edge, set prescaler */
+ r_tpu_write(p, TCR, 0x0040 | (k - 1));
+
+ /* output 0 until TGRA, output 1 until TGRB */
+ r_tpu_write(p, TIOR, 0x0002);
+
+ rate /= prescaler[k - 1] * p->refresh_rate;
+ r_tpu_write(p, TGRB, rate);
+ dev_dbg(&p->pdev->dev, "TRGB = 0x%04lx\n", rate);
+
+ tmp = (cfg->max_brightness - brightness) * rate;
+ r_tpu_write(p, TGRA, tmp / cfg->max_brightness);
+ dev_dbg(&p->pdev->dev, "TRGA = 0x%04lx\n", tmp / cfg->max_brightness);
+
+ /* PWM mode */
+ r_tpu_write(p, TMDR, 0x0002);
+
+ /* enable channel */
+ r_tpu_start_stop_ch(p, 1);
+
+ p->timer_state = R_TPU_TIMER_ON;
+ return 0;
+ err0:
+ clk_disable(p->clk);
+ pm_runtime_put_sync(&p->pdev->dev);
+ return -ENOTSUPP;
+}
+
+static void r_tpu_disable(struct r_tpu_priv *p)
+{
+ if (p->timer_state == R_TPU_TIMER_UNUSED)
+ return;
+
+ /* disable channel */
+ r_tpu_start_stop_ch(p, 0);
+
+ /* stop clock and mark device as idle */
+ clk_disable(p->clk);
+ pm_runtime_put_sync(&p->pdev->dev);
+
+ p->timer_state = R_TPU_TIMER_UNUSED;
+}
+
+static void r_tpu_set_pin(struct r_tpu_priv *p, enum r_tpu_pin new_state,
+ enum led_brightness brightness)
+{
+ struct led_renesas_tpu_config *cfg = p->pdev->dev.platform_data;
+
+ if (p->pin_state == new_state) {
+ if (p->pin_state == R_TPU_PIN_GPIO)
+ gpio_set_value(cfg->pin_gpio, brightness);
+ return;
+ }
+
+ if (p->pin_state == R_TPU_PIN_GPIO)
+ gpio_free(cfg->pin_gpio);
+
+ if (p->pin_state == R_TPU_PIN_GPIO_FN)
+ gpio_free(cfg->pin_gpio_fn);
+
+ if (new_state == R_TPU_PIN_GPIO) {
+ gpio_request(cfg->pin_gpio, cfg->name);
+ gpio_direction_output(cfg->pin_gpio, !!brightness);
+ }
+ if (new_state == R_TPU_PIN_GPIO_FN)
+ gpio_request(cfg->pin_gpio_fn, cfg->name);
+
+ p->pin_state = new_state;
+}
+
+static void r_tpu_work(struct work_struct *work)
+{
+ struct r_tpu_priv *p = container_of(work, struct r_tpu_priv, work);
+ enum led_brightness brightness = p->new_brightness;
+
+ r_tpu_disable(p);
+
+ /* off and maximum are handled as GPIO pins, in between PWM */
+ if ((brightness == 0) || (brightness == p->ldev.max_brightness))
+ r_tpu_set_pin(p, R_TPU_PIN_GPIO, brightness);
+ else {
+ r_tpu_set_pin(p, R_TPU_PIN_GPIO_FN, 0);
+ r_tpu_enable(p, brightness);
+ }
+}
+
+static void r_tpu_set_brightness(struct led_classdev *ldev,
+ enum led_brightness brightness)
+{
+ struct r_tpu_priv *p = container_of(ldev, struct r_tpu_priv, ldev);
+ p->new_brightness = brightness;
+ schedule_work(&p->work);
+}
+
+static int __devinit r_tpu_probe(struct platform_device *pdev)
+{
+ struct led_renesas_tpu_config *cfg = pdev->dev.platform_data;
+ struct r_tpu_priv *p;
+ struct resource *res;
+ int ret = -ENXIO;
+
+ if (!cfg) {
+ dev_err(&pdev->dev, "missing platform data\n");
+ goto err0;
+ }
+
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
+ if (p == NULL) {
+ dev_err(&pdev->dev, "failed to allocate driver data\n");
+ ret = -ENOMEM;
+ goto err0;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "failed to get I/O memory\n");
+ goto err1;
+ }
+
+ /* map memory, let mapbase point to our channel */
+ p->mapbase = ioremap_nocache(res->start, resource_size(res));
+ if (p->mapbase == NULL) {
+ dev_err(&pdev->dev, "failed to remap I/O memory\n");
+ goto err1;
+ }
+
+ /* get hold of clock */
+ p->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(p->clk)) {
+ dev_err(&pdev->dev, "cannot get clock\n");
+ ret = PTR_ERR(p->clk);
+ goto err2;
+ }
+
+ p->pdev = pdev;
+ p->pin_state = R_TPU_PIN_UNUSED;
+ p->timer_state = R_TPU_TIMER_UNUSED;
+ p->refresh_rate = cfg->refresh_rate ? cfg->refresh_rate : 100;
+ r_tpu_set_pin(p, R_TPU_PIN_GPIO, LED_OFF);
+ platform_set_drvdata(pdev, p);
+
+ INIT_WORK(&p->work, r_tpu_work);
+
+ p->ldev.name = cfg->name;
+ p->ldev.brightness = LED_OFF;
+ p->ldev.max_brightness = cfg->max_brightness;
+ p->ldev.brightness_set = r_tpu_set_brightness;
+ p->ldev.flags |= LED_CORE_SUSPENDRESUME;
+ ret = led_classdev_register(&pdev->dev, &p->ldev);
+ if (ret < 0)
+ goto err3;
+
+ /* max_brightness may be updated by the LED core code */
+ p->min_rate = p->ldev.max_brightness * p->refresh_rate;
+
+ pm_runtime_enable(&pdev->dev);
+ return 0;
+
+ err3:
+ r_tpu_set_pin(p, R_TPU_PIN_UNUSED, LED_OFF);
+ clk_put(p->clk);
+ err2:
+ iounmap(p->mapbase);
+ err1:
+ kfree(p);
+ err0:
+ return ret;
+}
+
+static int __devexit r_tpu_remove(struct platform_device *pdev)
+{
+ struct r_tpu_priv *p = platform_get_drvdata(pdev);
+
+ r_tpu_set_brightness(&p->ldev, LED_OFF);
+ led_classdev_unregister(&p->ldev);
+ cancel_work_sync(&p->work);
+ r_tpu_disable(p);
+ r_tpu_set_pin(p, R_TPU_PIN_UNUSED, LED_OFF);
+
+ pm_runtime_disable(&pdev->dev);
+ clk_put(p->clk);
+
+ iounmap(p->mapbase);
+ kfree(p);
+ return 0;
+}
+
+static struct platform_driver r_tpu_device_driver = {
+ .probe = r_tpu_probe,
+ .remove = __devexit_p(r_tpu_remove),
+ .driver = {
+ .name = "leds-renesas-tpu",
+ }
+};
+
+static int __init r_tpu_init(void)
+{
+ return platform_driver_register(&r_tpu_device_driver);
+}
+
+static void __exit r_tpu_exit(void)
+{
+ platform_driver_unregister(&r_tpu_device_driver);
+}
+
+module_init(r_tpu_init);
+module_exit(r_tpu_exit);
+
+MODULE_AUTHOR("Magnus Damm");
+MODULE_DESCRIPTION("Renesas TPU LED Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/leds-s3c24xx.c b/drivers/leds/leds-s3c24xx.c
index a77771dc2e9..29f8b0f0e2c 100644
--- a/drivers/leds/leds-s3c24xx.c
+++ b/drivers/leds/leds-s3c24xx.c
@@ -17,6 +17,7 @@
#include <linux/leds.h>
#include <linux/gpio.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include <mach/hardware.h>
#include <mach/regs-gpio.h>
diff --git a/drivers/leds/leds-wm831x-status.c b/drivers/leds/leds-wm831x-status.c
index ef5c24140a4..b1eb34c3e81 100644
--- a/drivers/leds/leds-wm831x-status.c
+++ b/drivers/leds/leds-wm831x-status.c
@@ -18,6 +18,7 @@
#include <linux/mfd/wm831x/core.h>
#include <linux/mfd/wm831x/pdata.h>
#include <linux/mfd/wm831x/status.h>
+#include <linux/module.h>
struct wm831x_status {
diff --git a/drivers/leds/leds-wm8350.c b/drivers/leds/leds-wm8350.c
index f14edd82cb0..4a127657835 100644
--- a/drivers/leds/leds-wm8350.c
+++ b/drivers/leds/leds-wm8350.c
@@ -17,6 +17,7 @@
#include <linux/mfd/wm8350/pmic.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
+#include <linux/module.h>
/* Microamps */
static const int isink_cur[] = {
diff --git a/drivers/leds/leds-wrap.c b/drivers/leds/leds-wrap.c
index 2982c86ac4c..6e21e654bb0 100644
--- a/drivers/leds/leds-wrap.c
+++ b/drivers/leds/leds-wrap.c
@@ -17,6 +17,7 @@
#include <linux/err.h>
#include <asm/io.h>
#include <linux/scx200_gpio.h>
+#include <linux/module.h>
#define DRVNAME "wrap-led"
#define WRAP_POWER_LED_GPIO 2
diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
index 2535933c49f..b5fdcb78a75 100644
--- a/drivers/lguest/core.c
+++ b/drivers/lguest/core.c
@@ -232,6 +232,13 @@ int run_guest(struct lg_cpu *cpu, unsigned long __user *user)
}
}
+ /*
+ * All long-lived kernel loops need to check with this horrible
+ * thing called the freezer. If the Host is trying to suspend,
+ * it stops us.
+ */
+ try_to_freeze();
+
/* Check for signals */
if (signal_pending(current))
return -ERESTARTSYS;
@@ -246,13 +253,6 @@ int run_guest(struct lg_cpu *cpu, unsigned long __user *user)
try_deliver_interrupt(cpu, irq, more);
/*
- * All long-lived kernel loops need to check with this horrible
- * thing called the freezer. If the Host is trying to suspend,
- * it stops us.
- */
- try_to_freeze();
-
- /*
* Just make absolutely sure the Guest is still alive. One of
* those hypercalls could have been fatal, for example.
*/
@@ -313,7 +313,7 @@ static int __init init(void)
int err;
/* Lguest can't run under Xen, VMI or itself. It does Tricky Stuff. */
- if (paravirt_enabled()) {
+ if (get_kernel_rpl() != 0) {
printk("lguest is afraid of being a guest\n");
return -EPERM;
}
diff --git a/drivers/lguest/lguest_device.c b/drivers/lguest/lguest_device.c
index 5289ffa2e50..0dc30ffde5a 100644
--- a/drivers/lguest/lguest_device.c
+++ b/drivers/lguest/lguest_device.c
@@ -15,6 +15,7 @@
#include <linux/interrupt.h>
#include <linux/virtio_ring.h>
#include <linux/err.h>
+#include <linux/export.h>
#include <linux/slab.h>
#include <asm/io.h>
#include <asm/paravirt.h>
diff --git a/drivers/lguest/lguest_user.c b/drivers/lguest/lguest_user.c
index f97e625241a..ff4a0bc9904 100644
--- a/drivers/lguest/lguest_user.c
+++ b/drivers/lguest/lguest_user.c
@@ -13,6 +13,7 @@
#include <linux/eventfd.h>
#include <linux/file.h>
#include <linux/slab.h>
+#include <linux/export.h>
#include "lg.h"
/*L:056
diff --git a/drivers/macintosh/via-macii.c b/drivers/macintosh/via-macii.c
index 817f37a875c..c9570fcf1cc 100644
--- a/drivers/macintosh/via-macii.c
+++ b/drivers/macintosh/via-macii.c
@@ -159,7 +159,7 @@ int macii_init(void)
err = macii_init_via();
if (err) goto out;
- err = request_irq(IRQ_MAC_ADB, macii_interrupt, IRQ_FLG_LOCK, "ADB",
+ err = request_irq(IRQ_MAC_ADB, macii_interrupt, 0, "ADB",
macii_interrupt);
if (err) goto out;
diff --git a/drivers/macintosh/via-maciisi.c b/drivers/macintosh/via-maciisi.c
index 9ab5b0c34f0..34d02a91b29 100644
--- a/drivers/macintosh/via-maciisi.c
+++ b/drivers/macintosh/via-maciisi.c
@@ -122,8 +122,8 @@ maciisi_init(void)
return err;
}
- if (request_irq(IRQ_MAC_ADB, maciisi_interrupt, IRQ_FLG_LOCK | IRQ_FLG_FAST,
- "ADB", maciisi_interrupt)) {
+ if (request_irq(IRQ_MAC_ADB, maciisi_interrupt, 0, "ADB",
+ maciisi_interrupt)) {
printk(KERN_ERR "maciisi_init: can't get irq %d\n", IRQ_MAC_ADB);
return -EAGAIN;
}
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index f75a66e7d31..faa4741df6d 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -208,6 +208,16 @@ config DM_DEBUG
If unsure, say N.
+config DM_BUFIO
+ tristate
+ depends on BLK_DEV_DM && EXPERIMENTAL
+ ---help---
+ This interface allows you to do buffered I/O on a device and acts
+ as a cache, holding recently-read blocks in memory and performing
+ delayed writes.
+
+source "drivers/md/persistent-data/Kconfig"
+
config DM_CRYPT
tristate "Crypt target support"
depends on BLK_DEV_DM
@@ -233,6 +243,32 @@ config DM_SNAPSHOT
---help---
Allow volume managers to take writable snapshots of a device.
+config DM_THIN_PROVISIONING
+ tristate "Thin provisioning target (EXPERIMENTAL)"
+ depends on BLK_DEV_DM && EXPERIMENTAL
+ select DM_PERSISTENT_DATA
+ ---help---
+ Provides thin provisioning and snapshots that share a data store.
+
+config DM_DEBUG_BLOCK_STACK_TRACING
+ boolean "Keep stack trace of thin provisioning block lock holders"
+ depends on STACKTRACE_SUPPORT && DM_THIN_PROVISIONING
+ select STACKTRACE
+ ---help---
+ Enable this for messages that may help debug problems with the
+ block manager locking used by thin provisioning.
+
+ If unsure, say N.
+
+config DM_DEBUG_SPACE_MAPS
+ boolean "Extra validation for thin provisioning space maps"
+ depends on DM_THIN_PROVISIONING
+ ---help---
+ Enable this for messages that may help debug problems with the
+ space maps used by thin provisioning.
+
+ If unsure, say N.
+
config DM_MIRROR
tristate "Mirror target"
depends on BLK_DEV_DM
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index 448838b1f92..046860c7a16 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -10,6 +10,7 @@ dm-snapshot-y += dm-snap.o dm-exception-store.o dm-snap-transient.o \
dm-mirror-y += dm-raid1.o
dm-log-userspace-y \
+= dm-log-userspace-base.o dm-log-userspace-transfer.o
+dm-thin-pool-y += dm-thin.o dm-thin-metadata.o
md-mod-y += md.o bitmap.o
raid456-y += raid5.o
@@ -27,6 +28,7 @@ obj-$(CONFIG_MD_MULTIPATH) += multipath.o
obj-$(CONFIG_MD_FAULTY) += faulty.o
obj-$(CONFIG_BLK_DEV_MD) += md-mod.o
obj-$(CONFIG_BLK_DEV_DM) += dm-mod.o
+obj-$(CONFIG_DM_BUFIO) += dm-bufio.o
obj-$(CONFIG_DM_CRYPT) += dm-crypt.o
obj-$(CONFIG_DM_DELAY) += dm-delay.o
obj-$(CONFIG_DM_FLAKEY) += dm-flakey.o
@@ -34,10 +36,12 @@ obj-$(CONFIG_DM_MULTIPATH) += dm-multipath.o dm-round-robin.o
obj-$(CONFIG_DM_MULTIPATH_QL) += dm-queue-length.o
obj-$(CONFIG_DM_MULTIPATH_ST) += dm-service-time.o
obj-$(CONFIG_DM_SNAPSHOT) += dm-snapshot.o
+obj-$(CONFIG_DM_PERSISTENT_DATA) += persistent-data/
obj-$(CONFIG_DM_MIRROR) += dm-mirror.o dm-log.o dm-region-hash.o
obj-$(CONFIG_DM_LOG_USERSPACE) += dm-log-userspace.o
obj-$(CONFIG_DM_ZERO) += dm-zero.o
obj-$(CONFIG_DM_RAID) += dm-raid.o
+obj-$(CONFIG_DM_THIN_PROVISIONING) += dm-thin-pool.o
ifeq ($(CONFIG_DM_UEVENT),y)
dm-mod-objs += dm-uevent.o
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
new file mode 100644
index 00000000000..0a6806f80ab
--- /dev/null
+++ b/drivers/md/dm-bufio.c
@@ -0,0 +1,1700 @@
+/*
+ * Copyright (C) 2009-2011 Red Hat, Inc.
+ *
+ * Author: Mikulas Patocka <mpatocka@redhat.com>
+ *
+ * This file is released under the GPL.
+ */
+
+#include "dm-bufio.h"
+
+#include <linux/device-mapper.h>
+#include <linux/dm-io.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/version.h>
+#include <linux/shrinker.h>
+#include <linux/module.h>
+
+#define DM_MSG_PREFIX "bufio"
+
+/*
+ * Memory management policy:
+ * Limit the number of buffers to DM_BUFIO_MEMORY_PERCENT of main memory
+ * or DM_BUFIO_VMALLOC_PERCENT of vmalloc memory (whichever is lower).
+ * Always allocate at least DM_BUFIO_MIN_BUFFERS buffers.
+ * Start background writeback when there are DM_BUFIO_WRITEBACK_PERCENT
+ * dirty buffers.
+ */
+#define DM_BUFIO_MIN_BUFFERS 8
+
+#define DM_BUFIO_MEMORY_PERCENT 2
+#define DM_BUFIO_VMALLOC_PERCENT 25
+#define DM_BUFIO_WRITEBACK_PERCENT 75
+
+/*
+ * Check buffer ages in this interval (seconds)
+ */
+#define DM_BUFIO_WORK_TIMER_SECS 10
+
+/*
+ * Free buffers when they are older than this (seconds)
+ */
+#define DM_BUFIO_DEFAULT_AGE_SECS 60
+
+/*
+ * The number of bvec entries that are embedded directly in the buffer.
+ * If the chunk size is larger, dm-io is used to do the io.
+ */
+#define DM_BUFIO_INLINE_VECS 16
+
+/*
+ * Buffer hash
+ */
+#define DM_BUFIO_HASH_BITS 20
+#define DM_BUFIO_HASH(block) \
+ ((((block) >> DM_BUFIO_HASH_BITS) ^ (block)) & \
+ ((1 << DM_BUFIO_HASH_BITS) - 1))
+
+/*
+ * Don't try to use kmem_cache_alloc for blocks larger than this.
+ * For explanation, see alloc_buffer_data below.
+ */
+#define DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT (PAGE_SIZE >> 1)
+#define DM_BUFIO_BLOCK_SIZE_GFP_LIMIT (PAGE_SIZE << (MAX_ORDER - 1))
+
+/*
+ * dm_buffer->list_mode
+ */
+#define LIST_CLEAN 0
+#define LIST_DIRTY 1
+#define LIST_SIZE 2
+
+/*
+ * Linking of buffers:
+ * All buffers are linked to cache_hash with their hash_list field.
+ *
+ * Clean buffers that are not being written (B_WRITING not set)
+ * are linked to lru[LIST_CLEAN] with their lru_list field.
+ *
+ * Dirty and clean buffers that are being written are linked to
+ * lru[LIST_DIRTY] with their lru_list field. When the write
+ * finishes, the buffer cannot be relinked immediately (because we
+ * are in an interrupt context and relinking requires process
+ * context), so some clean-not-writing buffers can be held on
+ * dirty_lru too. They are later added to lru in the process
+ * context.
+ */
+struct dm_bufio_client {
+ struct mutex lock;
+
+ struct list_head lru[LIST_SIZE];
+ unsigned long n_buffers[LIST_SIZE];
+
+ struct block_device *bdev;
+ unsigned block_size;
+ unsigned char sectors_per_block_bits;
+ unsigned char pages_per_block_bits;
+ unsigned char blocks_per_page_bits;
+ unsigned aux_size;
+ void (*alloc_callback)(struct dm_buffer *);
+ void (*write_callback)(struct dm_buffer *);
+
+ struct dm_io_client *dm_io;
+
+ struct list_head reserved_buffers;
+ unsigned need_reserved_buffers;
+
+ struct hlist_head *cache_hash;
+ wait_queue_head_t free_buffer_wait;
+
+ int async_write_error;
+
+ struct list_head client_list;
+ struct shrinker shrinker;
+};
+
+/*
+ * Buffer state bits.
+ */
+#define B_READING 0
+#define B_WRITING 1
+#define B_DIRTY 2
+
+/*
+ * Describes how the block was allocated:
+ * kmem_cache_alloc(), __get_free_pages() or vmalloc().
+ * See the comment at alloc_buffer_data.
+ */
+enum data_mode {
+ DATA_MODE_SLAB = 0,
+ DATA_MODE_GET_FREE_PAGES = 1,
+ DATA_MODE_VMALLOC = 2,
+ DATA_MODE_LIMIT = 3
+};
+
+struct dm_buffer {
+ struct hlist_node hash_list;
+ struct list_head lru_list;
+ sector_t block;
+ void *data;
+ enum data_mode data_mode;
+ unsigned char list_mode; /* LIST_* */
+ unsigned hold_count;
+ int read_error;
+ int write_error;
+ unsigned long state;
+ unsigned long last_accessed;
+ struct dm_bufio_client *c;
+ struct bio bio;
+ struct bio_vec bio_vec[DM_BUFIO_INLINE_VECS];
+};
+
+/*----------------------------------------------------------------*/
+
+static struct kmem_cache *dm_bufio_caches[PAGE_SHIFT - SECTOR_SHIFT];
+static char *dm_bufio_cache_names[PAGE_SHIFT - SECTOR_SHIFT];
+
+static inline int dm_bufio_cache_index(struct dm_bufio_client *c)
+{
+ unsigned ret = c->blocks_per_page_bits - 1;
+
+ BUG_ON(ret >= ARRAY_SIZE(dm_bufio_caches));
+
+ return ret;
+}
+
+#define DM_BUFIO_CACHE(c) (dm_bufio_caches[dm_bufio_cache_index(c)])
+#define DM_BUFIO_CACHE_NAME(c) (dm_bufio_cache_names[dm_bufio_cache_index(c)])
+
+#define dm_bufio_in_request() (!!current->bio_list)
+
+static void dm_bufio_lock(struct dm_bufio_client *c)
+{
+ mutex_lock_nested(&c->lock, dm_bufio_in_request());
+}
+
+static int dm_bufio_trylock(struct dm_bufio_client *c)
+{
+ return mutex_trylock(&c->lock);
+}
+
+static void dm_bufio_unlock(struct dm_bufio_client *c)
+{
+ mutex_unlock(&c->lock);
+}
+
+/*
+ * FIXME Move to sched.h?
+ */
+#ifdef CONFIG_PREEMPT_VOLUNTARY
+# define dm_bufio_cond_resched() \
+do { \
+ if (unlikely(need_resched())) \
+ _cond_resched(); \
+} while (0)
+#else
+# define dm_bufio_cond_resched() do { } while (0)
+#endif
+
+/*----------------------------------------------------------------*/
+
+/*
+ * Default cache size: available memory divided by the ratio.
+ */
+static unsigned long dm_bufio_default_cache_size;
+
+/*
+ * Total cache size set by the user.
+ */
+static unsigned long dm_bufio_cache_size;
+
+/*
+ * A copy of dm_bufio_cache_size because dm_bufio_cache_size can change
+ * at any time. If it disagrees, the user has changed cache size.
+ */
+static unsigned long dm_bufio_cache_size_latch;
+
+static DEFINE_SPINLOCK(param_spinlock);
+
+/*
+ * Buffers are freed after this timeout
+ */
+static unsigned dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS;
+
+static unsigned long dm_bufio_peak_allocated;
+static unsigned long dm_bufio_allocated_kmem_cache;
+static unsigned long dm_bufio_allocated_get_free_pages;
+static unsigned long dm_bufio_allocated_vmalloc;
+static unsigned long dm_bufio_current_allocated;
+
+/*----------------------------------------------------------------*/
+
+/*
+ * Per-client cache: dm_bufio_cache_size / dm_bufio_client_count
+ */
+static unsigned long dm_bufio_cache_size_per_client;
+
+/*
+ * The current number of clients.
+ */
+static int dm_bufio_client_count;
+
+/*
+ * The list of all clients.
+ */
+static LIST_HEAD(dm_bufio_all_clients);
+
+/*
+ * This mutex protects dm_bufio_cache_size_latch,
+ * dm_bufio_cache_size_per_client and dm_bufio_client_count
+ */
+static DEFINE_MUTEX(dm_bufio_clients_lock);
+
+/*----------------------------------------------------------------*/
+
+static void adjust_total_allocated(enum data_mode data_mode, long diff)
+{
+ static unsigned long * const class_ptr[DATA_MODE_LIMIT] = {
+ &dm_bufio_allocated_kmem_cache,
+ &dm_bufio_allocated_get_free_pages,
+ &dm_bufio_allocated_vmalloc,
+ };
+
+ spin_lock(&param_spinlock);
+
+ *class_ptr[data_mode] += diff;
+
+ dm_bufio_current_allocated += diff;
+
+ if (dm_bufio_current_allocated > dm_bufio_peak_allocated)
+ dm_bufio_peak_allocated = dm_bufio_current_allocated;
+
+ spin_unlock(&param_spinlock);
+}
+
+/*
+ * Change the number of clients and recalculate per-client limit.
+ */
+static void __cache_size_refresh(void)
+{
+ BUG_ON(!mutex_is_locked(&dm_bufio_clients_lock));
+ BUG_ON(dm_bufio_client_count < 0);
+
+ dm_bufio_cache_size_latch = dm_bufio_cache_size;
+
+ barrier();
+
+ /*
+ * Use default if set to 0 and report the actual cache size used.
+ */
+ if (!dm_bufio_cache_size_latch) {
+ (void)cmpxchg(&dm_bufio_cache_size, 0,
+ dm_bufio_default_cache_size);
+ dm_bufio_cache_size_latch = dm_bufio_default_cache_size;
+ }
+
+ dm_bufio_cache_size_per_client = dm_bufio_cache_size_latch /
+ (dm_bufio_client_count ? : 1);
+}
+
+/*
+ * Allocating buffer data.
+ *
+ * Small buffers are allocated with kmem_cache, to use space optimally.
+ *
+ * For large buffers, we choose between get_free_pages and vmalloc.
+ * Each has advantages and disadvantages.
+ *
+ * __get_free_pages can randomly fail if the memory is fragmented.
+ * __vmalloc won't randomly fail, but vmalloc space is limited (it may be
+ * as low as 128M) so using it for caching is not appropriate.
+ *
+ * If the allocation may fail we use __get_free_pages. Memory fragmentation
+ * won't have a fatal effect here, but it just causes flushes of some other
+ * buffers and more I/O will be performed. Don't use __get_free_pages if it
+ * always fails (i.e. order >= MAX_ORDER).
+ *
+ * If the allocation shouldn't fail we use __vmalloc. This is only for the
+ * initial reserve allocation, so there's no risk of wasting all vmalloc
+ * space.
+ */
+static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
+ enum data_mode *data_mode)
+{
+ if (c->block_size <= DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT) {
+ *data_mode = DATA_MODE_SLAB;
+ return kmem_cache_alloc(DM_BUFIO_CACHE(c), gfp_mask);
+ }
+
+ if (c->block_size <= DM_BUFIO_BLOCK_SIZE_GFP_LIMIT &&
+ gfp_mask & __GFP_NORETRY) {
+ *data_mode = DATA_MODE_GET_FREE_PAGES;
+ return (void *)__get_free_pages(gfp_mask,
+ c->pages_per_block_bits);
+ }
+
+ *data_mode = DATA_MODE_VMALLOC;
+ return __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);
+}
+
+/*
+ * Free buffer's data.
+ */
+static void free_buffer_data(struct dm_bufio_client *c,
+ void *data, enum data_mode data_mode)
+{
+ switch (data_mode) {
+ case DATA_MODE_SLAB:
+ kmem_cache_free(DM_BUFIO_CACHE(c), data);
+ break;
+
+ case DATA_MODE_GET_FREE_PAGES:
+ free_pages((unsigned long)data, c->pages_per_block_bits);
+ break;
+
+ case DATA_MODE_VMALLOC:
+ vfree(data);
+ break;
+
+ default:
+ DMCRIT("dm_bufio_free_buffer_data: bad data mode: %d",
+ data_mode);
+ BUG();
+ }
+}
+
+/*
+ * Allocate buffer and its data.
+ */
+static struct dm_buffer *alloc_buffer(struct dm_bufio_client *c, gfp_t gfp_mask)
+{
+ struct dm_buffer *b = kmalloc(sizeof(struct dm_buffer) + c->aux_size,
+ gfp_mask);
+
+ if (!b)
+ return NULL;
+
+ b->c = c;
+
+ b->data = alloc_buffer_data(c, gfp_mask, &b->data_mode);
+ if (!b->data) {
+ kfree(b);
+ return NULL;
+ }
+
+ adjust_total_allocated(b->data_mode, (long)c->block_size);
+
+ return b;
+}
+
+/*
+ * Free buffer and its data.
+ */
+static void free_buffer(struct dm_buffer *b)
+{
+ struct dm_bufio_client *c = b->c;
+
+ adjust_total_allocated(b->data_mode, -(long)c->block_size);
+
+ free_buffer_data(c, b->data, b->data_mode);
+ kfree(b);
+}
+
+/*
+ * Link buffer to the hash list and clean or dirty queue.
+ */
+static void __link_buffer(struct dm_buffer *b, sector_t block, int dirty)
+{
+ struct dm_bufio_client *c = b->c;
+
+ c->n_buffers[dirty]++;
+ b->block = block;
+ b->list_mode = dirty;
+ list_add(&b->lru_list, &c->lru[dirty]);
+ hlist_add_head(&b->hash_list, &c->cache_hash[DM_BUFIO_HASH(block)]);
+ b->last_accessed = jiffies;
+}
+
+/*
+ * Unlink buffer from the hash list and dirty or clean queue.
+ */
+static void __unlink_buffer(struct dm_buffer *b)
+{
+ struct dm_bufio_client *c = b->c;
+
+ BUG_ON(!c->n_buffers[b->list_mode]);
+
+ c->n_buffers[b->list_mode]--;
+ hlist_del(&b->hash_list);
+ list_del(&b->lru_list);
+}
+
+/*
+ * Place the buffer to the head of dirty or clean LRU queue.
+ */
+static void __relink_lru(struct dm_buffer *b, int dirty)
+{
+ struct dm_bufio_client *c = b->c;
+
+ BUG_ON(!c->n_buffers[b->list_mode]);
+
+ c->n_buffers[b->list_mode]--;
+ c->n_buffers[dirty]++;
+ b->list_mode = dirty;
+ list_del(&b->lru_list);
+ list_add(&b->lru_list, &c->lru[dirty]);
+}
+
+/*----------------------------------------------------------------
+ * Submit I/O on the buffer.
+ *
+ * Bio interface is faster but it has some problems:
+ * the vector list is limited (increasing this limit increases
+ * memory-consumption per buffer, so it is not viable);
+ *
+ * the memory must be direct-mapped, not vmalloced;
+ *
+ * the I/O driver can reject requests spuriously if it thinks that
+ * the requests are too big for the device or if they cross a
+ * controller-defined memory boundary.
+ *
+ * If the buffer is small enough (up to DM_BUFIO_INLINE_VECS pages) and
+ * it is not vmalloced, try using the bio interface.
+ *
+ * If the buffer is big, if it is vmalloced or if the underlying device
+ * rejects the bio because it is too large, use dm-io layer to do the I/O.
+ * The dm-io layer splits the I/O into multiple requests, avoiding the above
+ * shortcomings.
+ *--------------------------------------------------------------*/
+
+/*
+ * dm-io completion routine. It just calls b->bio.bi_end_io, pretending
+ * that the request was handled directly with bio interface.
+ */
+static void dmio_complete(unsigned long error, void *context)
+{
+ struct dm_buffer *b = context;
+
+ b->bio.bi_end_io(&b->bio, error ? -EIO : 0);
+}
+
+static void use_dmio(struct dm_buffer *b, int rw, sector_t block,
+ bio_end_io_t *end_io)
+{
+ int r;
+ struct dm_io_request io_req = {
+ .bi_rw = rw,
+ .notify.fn = dmio_complete,
+ .notify.context = b,
+ .client = b->c->dm_io,
+ };
+ struct dm_io_region region = {
+ .bdev = b->c->bdev,
+ .sector = block << b->c->sectors_per_block_bits,
+ .count = b->c->block_size >> SECTOR_SHIFT,
+ };
+
+ if (b->data_mode != DATA_MODE_VMALLOC) {
+ io_req.mem.type = DM_IO_KMEM;
+ io_req.mem.ptr.addr = b->data;
+ } else {
+ io_req.mem.type = DM_IO_VMA;
+ io_req.mem.ptr.vma = b->data;
+ }
+
+ b->bio.bi_end_io = end_io;
+
+ r = dm_io(&io_req, 1, &region, NULL);
+ if (r)
+ end_io(&b->bio, r);
+}
+
+static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block,
+ bio_end_io_t *end_io)
+{
+ char *ptr;
+ int len;
+
+ bio_init(&b->bio);
+ b->bio.bi_io_vec = b->bio_vec;
+ b->bio.bi_max_vecs = DM_BUFIO_INLINE_VECS;
+ b->bio.bi_sector = block << b->c->sectors_per_block_bits;
+ b->bio.bi_bdev = b->c->bdev;
+ b->bio.bi_end_io = end_io;
+
+ /*
+ * We assume that if len >= PAGE_SIZE ptr is page-aligned.
+ * If len < PAGE_SIZE the buffer doesn't cross page boundary.
+ */
+ ptr = b->data;
+ len = b->c->block_size;
+
+ if (len >= PAGE_SIZE)
+ BUG_ON((unsigned long)ptr & (PAGE_SIZE - 1));
+ else
+ BUG_ON((unsigned long)ptr & (len - 1));
+
+ do {
+ if (!bio_add_page(&b->bio, virt_to_page(ptr),
+ len < PAGE_SIZE ? len : PAGE_SIZE,
+ virt_to_phys(ptr) & (PAGE_SIZE - 1))) {
+ BUG_ON(b->c->block_size <= PAGE_SIZE);
+ use_dmio(b, rw, block, end_io);
+ return;
+ }
+
+ len -= PAGE_SIZE;
+ ptr += PAGE_SIZE;
+ } while (len > 0);
+
+ submit_bio(rw, &b->bio);
+}
+
+static void submit_io(struct dm_buffer *b, int rw, sector_t block,
+ bio_end_io_t *end_io)
+{
+ if (rw == WRITE && b->c->write_callback)
+ b->c->write_callback(b);
+
+ if (b->c->block_size <= DM_BUFIO_INLINE_VECS * PAGE_SIZE &&
+ b->data_mode != DATA_MODE_VMALLOC)
+ use_inline_bio(b, rw, block, end_io);
+ else
+ use_dmio(b, rw, block, end_io);
+}
+
+/*----------------------------------------------------------------
+ * Writing dirty buffers
+ *--------------------------------------------------------------*/
+
+/*
+ * The endio routine for write.
+ *
+ * Set the error, clear B_WRITING bit and wake anyone who was waiting on
+ * it.
+ */
+static void write_endio(struct bio *bio, int error)
+{
+ struct dm_buffer *b = container_of(bio, struct dm_buffer, bio);
+
+ b->write_error = error;
+ if (error) {
+ struct dm_bufio_client *c = b->c;
+ (void)cmpxchg(&c->async_write_error, 0, error);
+ }
+
+ BUG_ON(!test_bit(B_WRITING, &b->state));
+
+ smp_mb__before_clear_bit();
+ clear_bit(B_WRITING, &b->state);
+ smp_mb__after_clear_bit();
+
+ wake_up_bit(&b->state, B_WRITING);
+}
+
+/*
+ * This function is called when wait_on_bit is actually waiting.
+ */
+static int do_io_schedule(void *word)
+{
+ io_schedule();
+
+ return 0;
+}
+
+/*
+ * Initiate a write on a dirty buffer, but don't wait for it.
+ *
+ * - If the buffer is not dirty, exit.
+ * - If there some previous write going on, wait for it to finish (we can't
+ * have two writes on the same buffer simultaneously).
+ * - Submit our write and don't wait on it. We set B_WRITING indicating
+ * that there is a write in progress.
+ */
+static void __write_dirty_buffer(struct dm_buffer *b)
+{
+ if (!test_bit(B_DIRTY, &b->state))
+ return;
+
+ clear_bit(B_DIRTY, &b->state);
+ wait_on_bit_lock(&b->state, B_WRITING,
+ do_io_schedule, TASK_UNINTERRUPTIBLE);
+
+ submit_io(b, WRITE, b->block, write_endio);
+}
+
+/*
+ * Wait until any activity on the buffer finishes. Possibly write the
+ * buffer if it is dirty. When this function finishes, there is no I/O
+ * running on the buffer and the buffer is not dirty.
+ */
+static void __make_buffer_clean(struct dm_buffer *b)
+{
+ BUG_ON(b->hold_count);
+
+ if (!b->state) /* fast case */
+ return;
+
+ wait_on_bit(&b->state, B_READING, do_io_schedule, TASK_UNINTERRUPTIBLE);
+ __write_dirty_buffer(b);
+ wait_on_bit(&b->state, B_WRITING, do_io_schedule, TASK_UNINTERRUPTIBLE);
+}
+
+/*
+ * Find some buffer that is not held by anybody, clean it, unlink it and
+ * return it.
+ */
+static struct dm_buffer *__get_unclaimed_buffer(struct dm_bufio_client *c)
+{
+ struct dm_buffer *b;
+
+ list_for_each_entry_reverse(b, &c->lru[LIST_CLEAN], lru_list) {
+ BUG_ON(test_bit(B_WRITING, &b->state));
+ BUG_ON(test_bit(B_DIRTY, &b->state));
+
+ if (!b->hold_count) {
+ __make_buffer_clean(b);
+ __unlink_buffer(b);
+ return b;
+ }
+ dm_bufio_cond_resched();
+ }
+
+ list_for_each_entry_reverse(b, &c->lru[LIST_DIRTY], lru_list) {
+ BUG_ON(test_bit(B_READING, &b->state));
+
+ if (!b->hold_count) {
+ __make_buffer_clean(b);
+ __unlink_buffer(b);
+ return b;
+ }
+ dm_bufio_cond_resched();
+ }
+
+ return NULL;
+}
+
+/*
+ * Wait until some other threads free some buffer or release hold count on
+ * some buffer.
+ *
+ * This function is entered with c->lock held, drops it and regains it
+ * before exiting.
+ */
+static void __wait_for_free_buffer(struct dm_bufio_client *c)
+{
+ DECLARE_WAITQUEUE(wait, current);
+
+ add_wait_queue(&c->free_buffer_wait, &wait);
+ set_task_state(current, TASK_UNINTERRUPTIBLE);
+ dm_bufio_unlock(c);
+
+ io_schedule();
+
+ set_task_state(current, TASK_RUNNING);
+ remove_wait_queue(&c->free_buffer_wait, &wait);
+
+ dm_bufio_lock(c);
+}
+
+/*
+ * Allocate a new buffer. If the allocation is not possible, wait until
+ * some other thread frees a buffer.
+ *
+ * May drop the lock and regain it.
+ */
+static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client *c)
+{
+ struct dm_buffer *b;
+
+ /*
+ * dm-bufio is resistant to allocation failures (it just keeps
+ * one buffer reserved in cases all the allocations fail).
+ * So set flags to not try too hard:
+ * GFP_NOIO: don't recurse into the I/O layer
+ * __GFP_NORETRY: don't retry and rather return failure
+ * __GFP_NOMEMALLOC: don't use emergency reserves
+ * __GFP_NOWARN: don't print a warning in case of failure
+ *
+ * For debugging, if we set the cache size to 1, no new buffers will
+ * be allocated.
+ */
+ while (1) {
+ if (dm_bufio_cache_size_latch != 1) {
+ b = alloc_buffer(c, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
+ if (b)
+ return b;
+ }
+
+ if (!list_empty(&c->reserved_buffers)) {
+ b = list_entry(c->reserved_buffers.next,
+ struct dm_buffer, lru_list);
+ list_del(&b->lru_list);
+ c->need_reserved_buffers++;
+
+ return b;
+ }
+
+ b = __get_unclaimed_buffer(c);
+ if (b)
+ return b;
+
+ __wait_for_free_buffer(c);
+ }
+}
+
+static struct dm_buffer *__alloc_buffer_wait(struct dm_bufio_client *c)
+{
+ struct dm_buffer *b = __alloc_buffer_wait_no_callback(c);
+
+ if (c->alloc_callback)
+ c->alloc_callback(b);
+
+ return b;
+}
+
+/*
+ * Free a buffer and wake other threads waiting for free buffers.
+ */
+static void __free_buffer_wake(struct dm_buffer *b)
+{
+ struct dm_bufio_client *c = b->c;
+
+ if (!c->need_reserved_buffers)
+ free_buffer(b);
+ else {
+ list_add(&b->lru_list, &c->reserved_buffers);
+ c->need_reserved_buffers--;
+ }
+
+ wake_up(&c->free_buffer_wait);
+}
+
+static void __write_dirty_buffers_async(struct dm_bufio_client *c, int no_wait)
+{
+ struct dm_buffer *b, *tmp;
+
+ list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) {
+ BUG_ON(test_bit(B_READING, &b->state));
+
+ if (!test_bit(B_DIRTY, &b->state) &&
+ !test_bit(B_WRITING, &b->state)) {
+ __relink_lru(b, LIST_CLEAN);
+ continue;
+ }
+
+ if (no_wait && test_bit(B_WRITING, &b->state))
+ return;
+
+ __write_dirty_buffer(b);
+ dm_bufio_cond_resched();
+ }
+}
+
+/*
+ * Get writeback threshold and buffer limit for a given client.
+ */
+static void __get_memory_limit(struct dm_bufio_client *c,
+ unsigned long *threshold_buffers,
+ unsigned long *limit_buffers)
+{
+ unsigned long buffers;
+
+ if (dm_bufio_cache_size != dm_bufio_cache_size_latch) {
+ mutex_lock(&dm_bufio_clients_lock);
+ __cache_size_refresh();
+ mutex_unlock(&dm_bufio_clients_lock);
+ }
+
+ buffers = dm_bufio_cache_size_per_client >>
+ (c->sectors_per_block_bits + SECTOR_SHIFT);
+
+ if (buffers < DM_BUFIO_MIN_BUFFERS)
+ buffers = DM_BUFIO_MIN_BUFFERS;
+
+ *limit_buffers = buffers;
+ *threshold_buffers = buffers * DM_BUFIO_WRITEBACK_PERCENT / 100;
+}
+
+/*
+ * Check if we're over watermark.
+ * If we are over threshold_buffers, start freeing buffers.
+ * If we're over "limit_buffers", block until we get under the limit.
+ */
+static void __check_watermark(struct dm_bufio_client *c)
+{
+ unsigned long threshold_buffers, limit_buffers;
+
+ __get_memory_limit(c, &threshold_buffers, &limit_buffers);
+
+ while (c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY] >
+ limit_buffers) {
+
+ struct dm_buffer *b = __get_unclaimed_buffer(c);
+
+ if (!b)
+ return;
+
+ __free_buffer_wake(b);
+ dm_bufio_cond_resched();
+ }
+
+ if (c->n_buffers[LIST_DIRTY] > threshold_buffers)
+ __write_dirty_buffers_async(c, 1);
+}
+
+/*
+ * Find a buffer in the hash.
+ */
+static struct dm_buffer *__find(struct dm_bufio_client *c, sector_t block)
+{
+ struct dm_buffer *b;
+ struct hlist_node *hn;
+
+ hlist_for_each_entry(b, hn, &c->cache_hash[DM_BUFIO_HASH(block)],
+ hash_list) {
+ dm_bufio_cond_resched();
+ if (b->block == block)
+ return b;
+ }
+
+ return NULL;
+}
+
+/*----------------------------------------------------------------
+ * Getting a buffer
+ *--------------------------------------------------------------*/
+
+enum new_flag {
+ NF_FRESH = 0,
+ NF_READ = 1,
+ NF_GET = 2
+};
+
+static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block,
+ enum new_flag nf, struct dm_buffer **bp,
+ int *need_submit)
+{
+ struct dm_buffer *b, *new_b = NULL;
+
+ *need_submit = 0;
+
+ b = __find(c, block);
+ if (b) {
+ b->hold_count++;
+ __relink_lru(b, test_bit(B_DIRTY, &b->state) ||
+ test_bit(B_WRITING, &b->state));
+ return b;
+ }
+
+ if (nf == NF_GET)
+ return NULL;
+
+ new_b = __alloc_buffer_wait(c);
+
+ /*
+ * We've had a period where the mutex was unlocked, so need to
+ * recheck the hash table.
+ */
+ b = __find(c, block);
+ if (b) {
+ __free_buffer_wake(new_b);
+ b->hold_count++;
+ __relink_lru(b, test_bit(B_DIRTY, &b->state) ||
+ test_bit(B_WRITING, &b->state));
+ return b;
+ }
+
+ __check_watermark(c);
+
+ b = new_b;
+ b->hold_count = 1;
+ b->read_error = 0;
+ b->write_error = 0;
+ __link_buffer(b, block, LIST_CLEAN);
+
+ if (nf == NF_FRESH) {
+ b->state = 0;
+ return b;
+ }
+
+ b->state = 1 << B_READING;
+ *need_submit = 1;
+
+ return b;
+}
+
+/*
+ * The endio routine for reading: set the error, clear the bit and wake up
+ * anyone waiting on the buffer.
+ */
+static void read_endio(struct bio *bio, int error)
+{
+ struct dm_buffer *b = container_of(bio, struct dm_buffer, bio);
+
+ b->read_error = error;
+
+ BUG_ON(!test_bit(B_READING, &b->state));
+
+ smp_mb__before_clear_bit();
+ clear_bit(B_READING, &b->state);
+ smp_mb__after_clear_bit();
+
+ wake_up_bit(&b->state, B_READING);
+}
+
+/*
+ * A common routine for dm_bufio_new and dm_bufio_read. Operation of these
+ * functions is similar except that dm_bufio_new doesn't read the
+ * buffer from the disk (assuming that the caller overwrites all the data
+ * and uses dm_bufio_mark_buffer_dirty to write new data back).
+ */
+static void *new_read(struct dm_bufio_client *c, sector_t block,
+ enum new_flag nf, struct dm_buffer **bp)
+{
+ int need_submit;
+ struct dm_buffer *b;
+
+ dm_bufio_lock(c);
+ b = __bufio_new(c, block, nf, bp, &need_submit);
+ dm_bufio_unlock(c);
+
+ if (!b || IS_ERR(b))
+ return b;
+
+ if (need_submit)
+ submit_io(b, READ, b->block, read_endio);
+
+ wait_on_bit(&b->state, B_READING, do_io_schedule, TASK_UNINTERRUPTIBLE);
+
+ if (b->read_error) {
+ int error = b->read_error;
+
+ dm_bufio_release(b);
+
+ return ERR_PTR(error);
+ }
+
+ *bp = b;
+
+ return b->data;
+}
+
+void *dm_bufio_get(struct dm_bufio_client *c, sector_t block,
+ struct dm_buffer **bp)
+{
+ return new_read(c, block, NF_GET, bp);
+}
+EXPORT_SYMBOL_GPL(dm_bufio_get);
+
+void *dm_bufio_read(struct dm_bufio_client *c, sector_t block,
+ struct dm_buffer **bp)
+{
+ BUG_ON(dm_bufio_in_request());
+
+ return new_read(c, block, NF_READ, bp);
+}
+EXPORT_SYMBOL_GPL(dm_bufio_read);
+
+void *dm_bufio_new(struct dm_bufio_client *c, sector_t block,
+ struct dm_buffer **bp)
+{
+ BUG_ON(dm_bufio_in_request());
+
+ return new_read(c, block, NF_FRESH, bp);
+}
+EXPORT_SYMBOL_GPL(dm_bufio_new);
+
+void dm_bufio_release(struct dm_buffer *b)
+{
+ struct dm_bufio_client *c = b->c;
+
+ dm_bufio_lock(c);
+
+ BUG_ON(test_bit(B_READING, &b->state));
+ BUG_ON(!b->hold_count);
+
+ b->hold_count--;
+ if (!b->hold_count) {
+ wake_up(&c->free_buffer_wait);
+
+ /*
+ * If there were errors on the buffer, and the buffer is not
+ * to be written, free the buffer. There is no point in caching
+ * invalid buffer.
+ */
+ if ((b->read_error || b->write_error) &&
+ !test_bit(B_WRITING, &b->state) &&
+ !test_bit(B_DIRTY, &b->state)) {
+ __unlink_buffer(b);
+ __free_buffer_wake(b);
+ }
+ }
+
+ dm_bufio_unlock(c);
+}
+EXPORT_SYMBOL_GPL(dm_bufio_release);
+
+void dm_bufio_mark_buffer_dirty(struct dm_buffer *b)
+{
+ struct dm_bufio_client *c = b->c;
+
+ dm_bufio_lock(c);
+
+ if (!test_and_set_bit(B_DIRTY, &b->state))
+ __relink_lru(b, LIST_DIRTY);
+
+ dm_bufio_unlock(c);
+}
+EXPORT_SYMBOL_GPL(dm_bufio_mark_buffer_dirty);
+
+void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c)
+{
+ BUG_ON(dm_bufio_in_request());
+
+ dm_bufio_lock(c);
+ __write_dirty_buffers_async(c, 0);
+ dm_bufio_unlock(c);
+}
+EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers_async);
+
+/*
+ * For performance, it is essential that the buffers are written asynchronously
+ * and simultaneously (so that the block layer can merge the writes) and then
+ * waited upon.
+ *
+ * Finally, we flush hardware disk cache.
+ */
+int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c)
+{
+ int a, f;
+ unsigned long buffers_processed = 0;
+ struct dm_buffer *b, *tmp;
+
+ dm_bufio_lock(c);
+ __write_dirty_buffers_async(c, 0);
+
+again:
+ list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) {
+ int dropped_lock = 0;
+
+ if (buffers_processed < c->n_buffers[LIST_DIRTY])
+ buffers_processed++;
+
+ BUG_ON(test_bit(B_READING, &b->state));
+
+ if (test_bit(B_WRITING, &b->state)) {
+ if (buffers_processed < c->n_buffers[LIST_DIRTY]) {
+ dropped_lock = 1;
+ b->hold_count++;
+ dm_bufio_unlock(c);
+ wait_on_bit(&b->state, B_WRITING,
+ do_io_schedule,
+ TASK_UNINTERRUPTIBLE);
+ dm_bufio_lock(c);
+ b->hold_count--;
+ } else
+ wait_on_bit(&b->state, B_WRITING,
+ do_io_schedule,
+ TASK_UNINTERRUPTIBLE);
+ }
+
+ if (!test_bit(B_DIRTY, &b->state) &&
+ !test_bit(B_WRITING, &b->state))
+ __relink_lru(b, LIST_CLEAN);
+
+ dm_bufio_cond_resched();
+
+ /*
+ * If we dropped the lock, the list is no longer consistent,
+ * so we must restart the search.
+ *
+ * In the most common case, the buffer just processed is
+ * relinked to the clean list, so we won't loop scanning the
+ * same buffer again and again.
+ *
+ * This may livelock if there is another thread simultaneously
+ * dirtying buffers, so we count the number of buffers walked
+ * and if it exceeds the total number of buffers, it means that
+ * someone is doing some writes simultaneously with us. In
+ * this case, stop, dropping the lock.
+ */
+ if (dropped_lock)
+ goto again;
+ }
+ wake_up(&c->free_buffer_wait);
+ dm_bufio_unlock(c);
+
+ a = xchg(&c->async_write_error, 0);
+ f = dm_bufio_issue_flush(c);
+ if (a)
+ return a;
+
+ return f;
+}
+EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers);
+
+/*
+ * Use dm-io to send and empty barrier flush the device.
+ */
+int dm_bufio_issue_flush(struct dm_bufio_client *c)
+{
+ struct dm_io_request io_req = {
+ .bi_rw = REQ_FLUSH,
+ .mem.type = DM_IO_KMEM,
+ .mem.ptr.addr = NULL,
+ .client = c->dm_io,
+ };
+ struct dm_io_region io_reg = {
+ .bdev = c->bdev,
+ .sector = 0,
+ .count = 0,
+ };
+
+ BUG_ON(dm_bufio_in_request());
+
+ return dm_io(&io_req, 1, &io_reg, NULL);
+}
+EXPORT_SYMBOL_GPL(dm_bufio_issue_flush);
+
+/*
+ * We first delete any other buffer that may be at that new location.
+ *
+ * Then, we write the buffer to the original location if it was dirty.
+ *
+ * Then, if we are the only one who is holding the buffer, relink the buffer
+ * in the hash queue for the new location.
+ *
+ * If there was someone else holding the buffer, we write it to the new
+ * location but not relink it, because that other user needs to have the buffer
+ * at the same place.
+ */
+void dm_bufio_release_move(struct dm_buffer *b, sector_t new_block)
+{
+ struct dm_bufio_client *c = b->c;
+ struct dm_buffer *new;
+
+ BUG_ON(dm_bufio_in_request());
+
+ dm_bufio_lock(c);
+
+retry:
+ new = __find(c, new_block);
+ if (new) {
+ if (new->hold_count) {
+ __wait_for_free_buffer(c);
+ goto retry;
+ }
+
+ /*
+ * FIXME: Is there any point waiting for a write that's going
+ * to be overwritten in a bit?
+ */
+ __make_buffer_clean(new);
+ __unlink_buffer(new);
+ __free_buffer_wake(new);
+ }
+
+ BUG_ON(!b->hold_count);
+ BUG_ON(test_bit(B_READING, &b->state));
+
+ __write_dirty_buffer(b);
+ if (b->hold_count == 1) {
+ wait_on_bit(&b->state, B_WRITING,
+ do_io_schedule, TASK_UNINTERRUPTIBLE);
+ set_bit(B_DIRTY, &b->state);
+ __unlink_buffer(b);
+ __link_buffer(b, new_block, LIST_DIRTY);
+ } else {
+ sector_t old_block;
+ wait_on_bit_lock(&b->state, B_WRITING,
+ do_io_schedule, TASK_UNINTERRUPTIBLE);
+ /*
+ * Relink buffer to "new_block" so that write_callback
+ * sees "new_block" as a block number.
+ * After the write, link the buffer back to old_block.
+ * All this must be done in bufio lock, so that block number
+ * change isn't visible to other threads.
+ */
+ old_block = b->block;
+ __unlink_buffer(b);
+ __link_buffer(b, new_block, b->list_mode);
+ submit_io(b, WRITE, new_block, write_endio);
+ wait_on_bit(&b->state, B_WRITING,
+ do_io_schedule, TASK_UNINTERRUPTIBLE);
+ __unlink_buffer(b);
+ __link_buffer(b, old_block, b->list_mode);
+ }
+
+ dm_bufio_unlock(c);
+ dm_bufio_release(b);
+}
+EXPORT_SYMBOL_GPL(dm_bufio_release_move);
+
+unsigned dm_bufio_get_block_size(struct dm_bufio_client *c)
+{
+ return c->block_size;
+}
+EXPORT_SYMBOL_GPL(dm_bufio_get_block_size);
+
+sector_t dm_bufio_get_device_size(struct dm_bufio_client *c)
+{
+ return i_size_read(c->bdev->bd_inode) >>
+ (SECTOR_SHIFT + c->sectors_per_block_bits);
+}
+EXPORT_SYMBOL_GPL(dm_bufio_get_device_size);
+
+sector_t dm_bufio_get_block_number(struct dm_buffer *b)
+{
+ return b->block;
+}
+EXPORT_SYMBOL_GPL(dm_bufio_get_block_number);
+
+void *dm_bufio_get_block_data(struct dm_buffer *b)
+{
+ return b->data;
+}
+EXPORT_SYMBOL_GPL(dm_bufio_get_block_data);
+
+void *dm_bufio_get_aux_data(struct dm_buffer *b)
+{
+ return b + 1;
+}
+EXPORT_SYMBOL_GPL(dm_bufio_get_aux_data);
+
+struct dm_bufio_client *dm_bufio_get_client(struct dm_buffer *b)
+{
+ return b->c;
+}
+EXPORT_SYMBOL_GPL(dm_bufio_get_client);
+
+static void drop_buffers(struct dm_bufio_client *c)
+{
+ struct dm_buffer *b;
+ int i;
+
+ BUG_ON(dm_bufio_in_request());
+
+ /*
+ * An optimization so that the buffers are not written one-by-one.
+ */
+ dm_bufio_write_dirty_buffers_async(c);
+
+ dm_bufio_lock(c);
+
+ while ((b = __get_unclaimed_buffer(c)))
+ __free_buffer_wake(b);
+
+ for (i = 0; i < LIST_SIZE; i++)
+ list_for_each_entry(b, &c->lru[i], lru_list)
+ DMERR("leaked buffer %llx, hold count %u, list %d",
+ (unsigned long long)b->block, b->hold_count, i);
+
+ for (i = 0; i < LIST_SIZE; i++)
+ BUG_ON(!list_empty(&c->lru[i]));
+
+ dm_bufio_unlock(c);
+}
+
+/*
+ * Test if the buffer is unused and too old, and commit it.
+ * At if noio is set, we must not do any I/O because we hold
+ * dm_bufio_clients_lock and we would risk deadlock if the I/O gets rerouted to
+ * different bufio client.
+ */
+static int __cleanup_old_buffer(struct dm_buffer *b, gfp_t gfp,
+ unsigned long max_jiffies)
+{
+ if (jiffies - b->last_accessed < max_jiffies)
+ return 1;
+
+ if (!(gfp & __GFP_IO)) {
+ if (test_bit(B_READING, &b->state) ||
+ test_bit(B_WRITING, &b->state) ||
+ test_bit(B_DIRTY, &b->state))
+ return 1;
+ }
+
+ if (b->hold_count)
+ return 1;
+
+ __make_buffer_clean(b);
+ __unlink_buffer(b);
+ __free_buffer_wake(b);
+
+ return 0;
+}
+
+static void __scan(struct dm_bufio_client *c, unsigned long nr_to_scan,
+ struct shrink_control *sc)
+{
+ int l;
+ struct dm_buffer *b, *tmp;
+
+ for (l = 0; l < LIST_SIZE; l++) {
+ list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list)
+ if (!__cleanup_old_buffer(b, sc->gfp_mask, 0) &&
+ !--nr_to_scan)
+ return;
+ dm_bufio_cond_resched();
+ }
+}
+
+static int shrink(struct shrinker *shrinker, struct shrink_control *sc)
+{
+ struct dm_bufio_client *c =
+ container_of(shrinker, struct dm_bufio_client, shrinker);
+ unsigned long r;
+ unsigned long nr_to_scan = sc->nr_to_scan;
+
+ if (sc->gfp_mask & __GFP_IO)
+ dm_bufio_lock(c);
+ else if (!dm_bufio_trylock(c))
+ return !nr_to_scan ? 0 : -1;
+
+ if (nr_to_scan)
+ __scan(c, nr_to_scan, sc);
+
+ r = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY];
+ if (r > INT_MAX)
+ r = INT_MAX;
+
+ dm_bufio_unlock(c);
+
+ return r;
+}
+
+/*
+ * Create the buffering interface
+ */
+struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsigned block_size,
+ unsigned reserved_buffers, unsigned aux_size,
+ void (*alloc_callback)(struct dm_buffer *),
+ void (*write_callback)(struct dm_buffer *))
+{
+ int r;
+ struct dm_bufio_client *c;
+ unsigned i;
+
+ BUG_ON(block_size < 1 << SECTOR_SHIFT ||
+ (block_size & (block_size - 1)));
+
+ c = kmalloc(sizeof(*c), GFP_KERNEL);
+ if (!c) {
+ r = -ENOMEM;
+ goto bad_client;
+ }
+ c->cache_hash = vmalloc(sizeof(struct hlist_head) << DM_BUFIO_HASH_BITS);
+ if (!c->cache_hash) {
+ r = -ENOMEM;
+ goto bad_hash;
+ }
+
+ c->bdev = bdev;
+ c->block_size = block_size;
+ c->sectors_per_block_bits = ffs(block_size) - 1 - SECTOR_SHIFT;
+ c->pages_per_block_bits = (ffs(block_size) - 1 >= PAGE_SHIFT) ?
+ ffs(block_size) - 1 - PAGE_SHIFT : 0;
+ c->blocks_per_page_bits = (ffs(block_size) - 1 < PAGE_SHIFT ?
+ PAGE_SHIFT - (ffs(block_size) - 1) : 0);
+
+ c->aux_size = aux_size;
+ c->alloc_callback = alloc_callback;
+ c->write_callback = write_callback;
+
+ for (i = 0; i < LIST_SIZE; i++) {
+ INIT_LIST_HEAD(&c->lru[i]);
+ c->n_buffers[i] = 0;
+ }
+
+ for (i = 0; i < 1 << DM_BUFIO_HASH_BITS; i++)
+ INIT_HLIST_HEAD(&c->cache_hash[i]);
+
+ mutex_init(&c->lock);
+ INIT_LIST_HEAD(&c->reserved_buffers);
+ c->need_reserved_buffers = reserved_buffers;
+
+ init_waitqueue_head(&c->free_buffer_wait);
+ c->async_write_error = 0;
+
+ c->dm_io = dm_io_client_create();
+ if (IS_ERR(c->dm_io)) {
+ r = PTR_ERR(c->dm_io);
+ goto bad_dm_io;
+ }
+
+ mutex_lock(&dm_bufio_clients_lock);
+ if (c->blocks_per_page_bits) {
+ if (!DM_BUFIO_CACHE_NAME(c)) {
+ DM_BUFIO_CACHE_NAME(c) = kasprintf(GFP_KERNEL, "dm_bufio_cache-%u", c->block_size);
+ if (!DM_BUFIO_CACHE_NAME(c)) {
+ r = -ENOMEM;
+ mutex_unlock(&dm_bufio_clients_lock);
+ goto bad_cache;
+ }
+ }
+
+ if (!DM_BUFIO_CACHE(c)) {
+ DM_BUFIO_CACHE(c) = kmem_cache_create(DM_BUFIO_CACHE_NAME(c),
+ c->block_size,
+ c->block_size, 0, NULL);
+ if (!DM_BUFIO_CACHE(c)) {
+ r = -ENOMEM;
+ mutex_unlock(&dm_bufio_clients_lock);
+ goto bad_cache;
+ }
+ }
+ }
+ mutex_unlock(&dm_bufio_clients_lock);
+
+ while (c->need_reserved_buffers) {
+ struct dm_buffer *b = alloc_buffer(c, GFP_KERNEL);
+
+ if (!b) {
+ r = -ENOMEM;
+ goto bad_buffer;
+ }
+ __free_buffer_wake(b);
+ }
+
+ mutex_lock(&dm_bufio_clients_lock);
+ dm_bufio_client_count++;
+ list_add(&c->client_list, &dm_bufio_all_clients);
+ __cache_size_refresh();
+ mutex_unlock(&dm_bufio_clients_lock);
+
+ c->shrinker.shrink = shrink;
+ c->shrinker.seeks = 1;
+ c->shrinker.batch = 0;
+ register_shrinker(&c->shrinker);
+
+ return c;
+
+bad_buffer:
+bad_cache:
+ while (!list_empty(&c->reserved_buffers)) {
+ struct dm_buffer *b = list_entry(c->reserved_buffers.next,
+ struct dm_buffer, lru_list);
+ list_del(&b->lru_list);
+ free_buffer(b);
+ }
+ dm_io_client_destroy(c->dm_io);
+bad_dm_io:
+ vfree(c->cache_hash);
+bad_hash:
+ kfree(c);
+bad_client:
+ return ERR_PTR(r);
+}
+EXPORT_SYMBOL_GPL(dm_bufio_client_create);
+
+/*
+ * Free the buffering interface.
+ * It is required that there are no references on any buffers.
+ */
+void dm_bufio_client_destroy(struct dm_bufio_client *c)
+{
+ unsigned i;
+
+ drop_buffers(c);
+
+ unregister_shrinker(&c->shrinker);
+
+ mutex_lock(&dm_bufio_clients_lock);
+
+ list_del(&c->client_list);
+ dm_bufio_client_count--;
+ __cache_size_refresh();
+
+ mutex_unlock(&dm_bufio_clients_lock);
+
+ for (i = 0; i < 1 << DM_BUFIO_HASH_BITS; i++)
+ BUG_ON(!hlist_empty(&c->cache_hash[i]));
+
+ BUG_ON(c->need_reserved_buffers);
+
+ while (!list_empty(&c->reserved_buffers)) {
+ struct dm_buffer *b = list_entry(c->reserved_buffers.next,
+ struct dm_buffer, lru_list);
+ list_del(&b->lru_list);
+ free_buffer(b);
+ }
+
+ for (i = 0; i < LIST_SIZE; i++)
+ if (c->n_buffers[i])
+ DMERR("leaked buffer count %d: %ld", i, c->n_buffers[i]);
+
+ for (i = 0; i < LIST_SIZE; i++)
+ BUG_ON(c->n_buffers[i]);
+
+ dm_io_client_destroy(c->dm_io);
+ vfree(c->cache_hash);
+ kfree(c);
+}
+EXPORT_SYMBOL_GPL(dm_bufio_client_destroy);
+
+static void cleanup_old_buffers(void)
+{
+ unsigned long max_age = dm_bufio_max_age;
+ struct dm_bufio_client *c;
+
+ barrier();
+
+ if (max_age > ULONG_MAX / HZ)
+ max_age = ULONG_MAX / HZ;
+
+ mutex_lock(&dm_bufio_clients_lock);
+ list_for_each_entry(c, &dm_bufio_all_clients, client_list) {
+ if (!dm_bufio_trylock(c))
+ continue;
+
+ while (!list_empty(&c->lru[LIST_CLEAN])) {
+ struct dm_buffer *b;
+ b = list_entry(c->lru[LIST_CLEAN].prev,
+ struct dm_buffer, lru_list);
+ if (__cleanup_old_buffer(b, 0, max_age * HZ))
+ break;
+ dm_bufio_cond_resched();
+ }
+
+ dm_bufio_unlock(c);
+ dm_bufio_cond_resched();
+ }
+ mutex_unlock(&dm_bufio_clients_lock);
+}
+
+static struct workqueue_struct *dm_bufio_wq;
+static struct delayed_work dm_bufio_work;
+
+static void work_fn(struct work_struct *w)
+{
+ cleanup_old_buffers();
+
+ queue_delayed_work(dm_bufio_wq, &dm_bufio_work,
+ DM_BUFIO_WORK_TIMER_SECS * HZ);
+}
+
+/*----------------------------------------------------------------
+ * Module setup
+ *--------------------------------------------------------------*/
+
+/*
+ * This is called only once for the whole dm_bufio module.
+ * It initializes memory limit.
+ */
+static int __init dm_bufio_init(void)
+{
+ __u64 mem;
+
+ memset(&dm_bufio_caches, 0, sizeof dm_bufio_caches);
+ memset(&dm_bufio_cache_names, 0, sizeof dm_bufio_cache_names);
+
+ mem = (__u64)((totalram_pages - totalhigh_pages) *
+ DM_BUFIO_MEMORY_PERCENT / 100) << PAGE_SHIFT;
+
+ if (mem > ULONG_MAX)
+ mem = ULONG_MAX;
+
+#ifdef CONFIG_MMU
+ /*
+ * Get the size of vmalloc space the same way as VMALLOC_TOTAL
+ * in fs/proc/internal.h
+ */
+ if (mem > (VMALLOC_END - VMALLOC_START) * DM_BUFIO_VMALLOC_PERCENT / 100)
+ mem = (VMALLOC_END - VMALLOC_START) * DM_BUFIO_VMALLOC_PERCENT / 100;
+#endif
+
+ dm_bufio_default_cache_size = mem;
+
+ mutex_lock(&dm_bufio_clients_lock);
+ __cache_size_refresh();
+ mutex_unlock(&dm_bufio_clients_lock);
+
+ dm_bufio_wq = create_singlethread_workqueue("dm_bufio_cache");
+ if (!dm_bufio_wq)
+ return -ENOMEM;
+
+ INIT_DELAYED_WORK(&dm_bufio_work, work_fn);
+ queue_delayed_work(dm_bufio_wq, &dm_bufio_work,
+ DM_BUFIO_WORK_TIMER_SECS * HZ);
+
+ return 0;
+}
+
+/*
+ * This is called once when unloading the dm_bufio module.
+ */
+static void __exit dm_bufio_exit(void)
+{
+ int bug = 0;
+ int i;
+
+ cancel_delayed_work_sync(&dm_bufio_work);
+ destroy_workqueue(dm_bufio_wq);
+
+ for (i = 0; i < ARRAY_SIZE(dm_bufio_caches); i++) {
+ struct kmem_cache *kc = dm_bufio_caches[i];
+
+ if (kc)
+ kmem_cache_destroy(kc);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(dm_bufio_cache_names); i++)
+ kfree(dm_bufio_cache_names[i]);
+
+ if (dm_bufio_client_count) {
+ DMCRIT("%s: dm_bufio_client_count leaked: %d",
+ __func__, dm_bufio_client_count);
+ bug = 1;
+ }
+
+ if (dm_bufio_current_allocated) {
+ DMCRIT("%s: dm_bufio_current_allocated leaked: %lu",
+ __func__, dm_bufio_current_allocated);
+ bug = 1;
+ }
+
+ if (dm_bufio_allocated_get_free_pages) {
+ DMCRIT("%s: dm_bufio_allocated_get_free_pages leaked: %lu",
+ __func__, dm_bufio_allocated_get_free_pages);
+ bug = 1;
+ }
+
+ if (dm_bufio_allocated_vmalloc) {
+ DMCRIT("%s: dm_bufio_vmalloc leaked: %lu",
+ __func__, dm_bufio_allocated_vmalloc);
+ bug = 1;
+ }
+
+ if (bug)
+ BUG();
+}
+
+module_init(dm_bufio_init)
+module_exit(dm_bufio_exit)
+
+module_param_named(max_cache_size_bytes, dm_bufio_cache_size, ulong, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(max_cache_size_bytes, "Size of metadata cache");
+
+module_param_named(max_age_seconds, dm_bufio_max_age, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(max_age_seconds, "Max age of a buffer in seconds");
+
+module_param_named(peak_allocated_bytes, dm_bufio_peak_allocated, ulong, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(peak_allocated_bytes, "Tracks the maximum allocated memory");
+
+module_param_named(allocated_kmem_cache_bytes, dm_bufio_allocated_kmem_cache, ulong, S_IRUGO);
+MODULE_PARM_DESC(allocated_kmem_cache_bytes, "Memory allocated with kmem_cache_alloc");
+
+module_param_named(allocated_get_free_pages_bytes, dm_bufio_allocated_get_free_pages, ulong, S_IRUGO);
+MODULE_PARM_DESC(allocated_get_free_pages_bytes, "Memory allocated with get_free_pages");
+
+module_param_named(allocated_vmalloc_bytes, dm_bufio_allocated_vmalloc, ulong, S_IRUGO);
+MODULE_PARM_DESC(allocated_vmalloc_bytes, "Memory allocated with vmalloc");
+
+module_param_named(current_allocated_bytes, dm_bufio_current_allocated, ulong, S_IRUGO);
+MODULE_PARM_DESC(current_allocated_bytes, "Memory currently used by the cache");
+
+MODULE_AUTHOR("Mikulas Patocka <dm-devel@redhat.com>");
+MODULE_DESCRIPTION(DM_NAME " buffered I/O library");
+MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-bufio.h b/drivers/md/dm-bufio.h
new file mode 100644
index 00000000000..5c4c3a04e38
--- /dev/null
+++ b/drivers/md/dm-bufio.h
@@ -0,0 +1,112 @@
+/*
+ * Copyright (C) 2009-2011 Red Hat, Inc.
+ *
+ * Author: Mikulas Patocka <mpatocka@redhat.com>
+ *
+ * This file is released under the GPL.
+ */
+
+#ifndef DM_BUFIO_H
+#define DM_BUFIO_H
+
+#include <linux/blkdev.h>
+#include <linux/types.h>
+
+/*----------------------------------------------------------------*/
+
+struct dm_bufio_client;
+struct dm_buffer;
+
+/*
+ * Create a buffered IO cache on a given device
+ */
+struct dm_bufio_client *
+dm_bufio_client_create(struct block_device *bdev, unsigned block_size,
+ unsigned reserved_buffers, unsigned aux_size,
+ void (*alloc_callback)(struct dm_buffer *),
+ void (*write_callback)(struct dm_buffer *));
+
+/*
+ * Release a buffered IO cache.
+ */
+void dm_bufio_client_destroy(struct dm_bufio_client *c);
+
+/*
+ * WARNING: to avoid deadlocks, these conditions are observed:
+ *
+ * - At most one thread can hold at most "reserved_buffers" simultaneously.
+ * - Each other threads can hold at most one buffer.
+ * - Threads which call only dm_bufio_get can hold unlimited number of
+ * buffers.
+ */
+
+/*
+ * Read a given block from disk. Returns pointer to data. Returns a
+ * pointer to dm_buffer that can be used to release the buffer or to make
+ * it dirty.
+ */
+void *dm_bufio_read(struct dm_bufio_client *c, sector_t block,
+ struct dm_buffer **bp);
+
+/*
+ * Like dm_bufio_read, but return buffer from cache, don't read
+ * it. If the buffer is not in the cache, return NULL.
+ */
+void *dm_bufio_get(struct dm_bufio_client *c, sector_t block,
+ struct dm_buffer **bp);
+
+/*
+ * Like dm_bufio_read, but don't read anything from the disk. It is
+ * expected that the caller initializes the buffer and marks it dirty.
+ */
+void *dm_bufio_new(struct dm_bufio_client *c, sector_t block,
+ struct dm_buffer **bp);
+
+/*
+ * Release a reference obtained with dm_bufio_{read,get,new}. The data
+ * pointer and dm_buffer pointer is no longer valid after this call.
+ */
+void dm_bufio_release(struct dm_buffer *b);
+
+/*
+ * Mark a buffer dirty. It should be called after the buffer is modified.
+ *
+ * In case of memory pressure, the buffer may be written after
+ * dm_bufio_mark_buffer_dirty, but before dm_bufio_write_dirty_buffers. So
+ * dm_bufio_write_dirty_buffers guarantees that the buffer is on-disk but
+ * the actual writing may occur earlier.
+ */
+void dm_bufio_mark_buffer_dirty(struct dm_buffer *b);
+
+/*
+ * Initiate writing of dirty buffers, without waiting for completion.
+ */
+void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c);
+
+/*
+ * Write all dirty buffers. Guarantees that all dirty buffers created prior
+ * to this call are on disk when this call exits.
+ */
+int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c);
+
+/*
+ * Send an empty write barrier to the device to flush hardware disk cache.
+ */
+int dm_bufio_issue_flush(struct dm_bufio_client *c);
+
+/*
+ * Like dm_bufio_release but also move the buffer to the new
+ * block. dm_bufio_write_dirty_buffers is needed to commit the new block.
+ */
+void dm_bufio_release_move(struct dm_buffer *b, sector_t new_block);
+
+unsigned dm_bufio_get_block_size(struct dm_bufio_client *c);
+sector_t dm_bufio_get_device_size(struct dm_bufio_client *c);
+sector_t dm_bufio_get_block_number(struct dm_buffer *b);
+void *dm_bufio_get_block_data(struct dm_buffer *b);
+void *dm_bufio_get_aux_data(struct dm_buffer *b);
+struct dm_bufio_client *dm_bufio_get_client(struct dm_buffer *b);
+
+/*----------------------------------------------------------------*/
+
+#endif
diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c
index 0bdb201c2c2..042e7199656 100644
--- a/drivers/md/dm-exception-store.c
+++ b/drivers/md/dm-exception-store.c
@@ -11,6 +11,7 @@
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/vmalloc.h>
+#include <linux/module.h>
#include <linux/slab.h>
#define DM_MSG_PREFIX "snapshot exception stores"
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 2e9a3ca37bd..31c2dc25886 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -1215,6 +1215,7 @@ static int table_load(struct dm_ioctl *param, size_t param_size)
struct hash_cell *hc;
struct dm_table *t;
struct mapped_device *md;
+ struct target_type *immutable_target_type;
md = find_device(param);
if (!md)
@@ -1230,6 +1231,16 @@ static int table_load(struct dm_ioctl *param, size_t param_size)
goto out;
}
+ immutable_target_type = dm_get_immutable_target_type(md);
+ if (immutable_target_type &&
+ (immutable_target_type != dm_table_get_immutable_target_type(t))) {
+ DMWARN("can't replace immutable target type %s",
+ immutable_target_type->name);
+ dm_table_destroy(t);
+ r = -EINVAL;
+ goto out;
+ }
+
/* Protect md->type and md->queue against concurrent table loads. */
dm_lock_md_type(md);
if (dm_get_md_type(md) == DM_TYPE_NONE)
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index 32ac70861d6..bed444c93d8 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -66,6 +66,8 @@ struct dm_kcopyd_client {
struct list_head pages_jobs;
};
+static struct page_list zero_page_list;
+
static void wake(struct dm_kcopyd_client *kc)
{
queue_work(kc->kcopyd_wq, &kc->kcopyd_work);
@@ -254,6 +256,9 @@ int __init dm_kcopyd_init(void)
if (!_job_cache)
return -ENOMEM;
+ zero_page_list.next = &zero_page_list;
+ zero_page_list.page = ZERO_PAGE(0);
+
return 0;
}
@@ -322,7 +327,7 @@ static int run_complete_job(struct kcopyd_job *job)
dm_kcopyd_notify_fn fn = job->fn;
struct dm_kcopyd_client *kc = job->kc;
- if (job->pages)
+ if (job->pages && job->pages != &zero_page_list)
kcopyd_put_pages(kc, job->pages);
/*
* If this is the master job, the sub jobs have already
@@ -484,6 +489,8 @@ static void dispatch_job(struct kcopyd_job *job)
atomic_inc(&kc->nr_jobs);
if (unlikely(!job->source.count))
push(&kc->complete_jobs, job);
+ else if (job->pages == &zero_page_list)
+ push(&kc->io_jobs, job);
else
push(&kc->pages_jobs, job);
wake(kc);
@@ -592,14 +599,20 @@ int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
job->flags = flags;
job->read_err = 0;
job->write_err = 0;
- job->rw = READ;
-
- job->source = *from;
job->num_dests = num_dests;
memcpy(&job->dests, dests, sizeof(*dests) * num_dests);
- job->pages = NULL;
+ if (from) {
+ job->source = *from;
+ job->pages = NULL;
+ job->rw = READ;
+ } else {
+ memset(&job->source, 0, sizeof job->source);
+ job->source.count = job->dests[0].count;
+ job->pages = &zero_page_list;
+ job->rw = WRITE;
+ }
job->fn = fn;
job->context = context;
@@ -617,6 +630,14 @@ int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
}
EXPORT_SYMBOL(dm_kcopyd_copy);
+int dm_kcopyd_zero(struct dm_kcopyd_client *kc,
+ unsigned num_dests, struct dm_io_region *dests,
+ unsigned flags, dm_kcopyd_notify_fn fn, void *context)
+{
+ return dm_kcopyd_copy(kc, NULL, num_dests, dests, flags, fn, context);
+}
+EXPORT_SYMBOL(dm_kcopyd_zero);
+
void *dm_kcopyd_prepare_callback(struct dm_kcopyd_client *kc,
dm_kcopyd_notify_fn fn, void *context)
{
diff --git a/drivers/md/dm-log-userspace-base.c b/drivers/md/dm-log-userspace-base.c
index 1021c898601..9429159d9ee 100644
--- a/drivers/md/dm-log-userspace-base.c
+++ b/drivers/md/dm-log-userspace-base.c
@@ -9,6 +9,7 @@
#include <linux/dm-dirty-log.h>
#include <linux/device-mapper.h>
#include <linux/dm-log-userspace.h>
+#include <linux/module.h>
#include "dm-log-userspace-transfer.h"
@@ -30,6 +31,7 @@ struct flush_entry {
struct log_c {
struct dm_target *ti;
+ struct dm_dev *log_dev;
uint32_t region_size;
region_t region_count;
uint64_t luid;
@@ -146,7 +148,7 @@ static int build_constructor_string(struct dm_target *ti,
* <UUID> <other args>
* Where 'other args' is the userspace implementation specific log
* arguments. An example might be:
- * <UUID> clustered_disk <arg count> <log dev> <region_size> [[no]sync]
+ * <UUID> clustered-disk <arg count> <log dev> <region_size> [[no]sync]
*
* So, this module will strip off the <UUID> for identification purposes
* when communicating with userspace about a log; but will pass on everything
@@ -161,13 +163,15 @@ static int userspace_ctr(struct dm_dirty_log *log, struct dm_target *ti,
struct log_c *lc = NULL;
uint64_t rdata;
size_t rdata_size = sizeof(rdata);
+ char *devices_rdata = NULL;
+ size_t devices_rdata_size = DM_NAME_LEN;
if (argc < 3) {
DMWARN("Too few arguments to userspace dirty log");
return -EINVAL;
}
- lc = kmalloc(sizeof(*lc), GFP_KERNEL);
+ lc = kzalloc(sizeof(*lc), GFP_KERNEL);
if (!lc) {
DMWARN("Unable to allocate userspace log context.");
return -ENOMEM;
@@ -195,9 +199,19 @@ static int userspace_ctr(struct dm_dirty_log *log, struct dm_target *ti,
return str_size;
}
- /* Send table string */
+ devices_rdata = kzalloc(devices_rdata_size, GFP_KERNEL);
+ if (!devices_rdata) {
+ DMERR("Failed to allocate memory for device information");
+ r = -ENOMEM;
+ goto out;
+ }
+
+ /*
+ * Send table string and get back any opened device.
+ */
r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_CTR,
- ctr_str, str_size, NULL, NULL);
+ ctr_str, str_size,
+ devices_rdata, &devices_rdata_size);
if (r < 0) {
if (r == -ESRCH)
@@ -220,7 +234,20 @@ static int userspace_ctr(struct dm_dirty_log *log, struct dm_target *ti,
lc->region_size = (uint32_t)rdata;
lc->region_count = dm_sector_div_up(ti->len, lc->region_size);
+ if (devices_rdata_size) {
+ if (devices_rdata[devices_rdata_size - 1] != '\0') {
+ DMERR("DM_ULOG_CTR device return string not properly terminated");
+ r = -EINVAL;
+ goto out;
+ }
+ r = dm_get_device(ti, devices_rdata,
+ dm_table_get_mode(ti->table), &lc->log_dev);
+ if (r)
+ DMERR("Failed to register %s with device-mapper",
+ devices_rdata);
+ }
out:
+ kfree(devices_rdata);
if (r) {
kfree(lc);
kfree(ctr_str);
@@ -241,6 +268,9 @@ static void userspace_dtr(struct dm_dirty_log *log)
NULL, 0,
NULL, NULL);
+ if (lc->log_dev)
+ dm_put_device(lc->ti, lc->log_dev);
+
kfree(lc->usr_argv_str);
kfree(lc);
diff --git a/drivers/md/dm-path-selector.c b/drivers/md/dm-path-selector.c
index 42c04f04a0c..fa0ccc585cb 100644
--- a/drivers/md/dm-path-selector.c
+++ b/drivers/md/dm-path-selector.c
@@ -10,6 +10,7 @@
*/
#include <linux/device-mapper.h>
+#include <linux/module.h>
#include "dm-path-selector.h"
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 37a37266a1e..c2907d836e4 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -6,6 +6,7 @@
*/
#include <linux/slab.h>
+#include <linux/module.h>
#include "md.h"
#include "raid1.h"
@@ -1017,30 +1018,56 @@ static int raid_status(struct dm_target *ti, status_type_t type,
struct raid_set *rs = ti->private;
unsigned raid_param_cnt = 1; /* at least 1 for chunksize */
unsigned sz = 0;
- int i;
+ int i, array_in_sync = 0;
sector_t sync;
switch (type) {
case STATUSTYPE_INFO:
DMEMIT("%s %d ", rs->raid_type->name, rs->md.raid_disks);
- for (i = 0; i < rs->md.raid_disks; i++) {
- if (test_bit(Faulty, &rs->dev[i].rdev.flags))
- DMEMIT("D");
- else if (test_bit(In_sync, &rs->dev[i].rdev.flags))
- DMEMIT("A");
- else
- DMEMIT("a");
- }
-
if (test_bit(MD_RECOVERY_RUNNING, &rs->md.recovery))
sync = rs->md.curr_resync_completed;
else
sync = rs->md.recovery_cp;
- if (sync > rs->md.resync_max_sectors)
+ if (sync >= rs->md.resync_max_sectors) {
+ array_in_sync = 1;
sync = rs->md.resync_max_sectors;
+ } else {
+ /*
+ * The array may be doing an initial sync, or it may
+ * be rebuilding individual components. If all the
+ * devices are In_sync, then it is the array that is
+ * being initialized.
+ */
+ for (i = 0; i < rs->md.raid_disks; i++)
+ if (!test_bit(In_sync, &rs->dev[i].rdev.flags))
+ array_in_sync = 1;
+ }
+ /*
+ * Status characters:
+ * 'D' = Dead/Failed device
+ * 'a' = Alive but not in-sync
+ * 'A' = Alive and in-sync
+ */
+ for (i = 0; i < rs->md.raid_disks; i++) {
+ if (test_bit(Faulty, &rs->dev[i].rdev.flags))
+ DMEMIT("D");
+ else if (!array_in_sync ||
+ !test_bit(In_sync, &rs->dev[i].rdev.flags))
+ DMEMIT("a");
+ else
+ DMEMIT("A");
+ }
+ /*
+ * In-sync ratio:
+ * The in-sync ratio shows the progress of:
+ * - Initializing the array
+ * - Rebuilding a subset of devices of the array
+ * The user can distinguish between the two by referring
+ * to the status characters.
+ */
DMEMIT(" %llu/%llu",
(unsigned long long) sync,
(unsigned long long) rs->md.resync_max_sectors);
diff --git a/drivers/md/dm-round-robin.c b/drivers/md/dm-round-robin.c
index 24752f449be..27f1d423b76 100644
--- a/drivers/md/dm-round-robin.c
+++ b/drivers/md/dm-round-robin.c
@@ -14,6 +14,7 @@
#include "dm-path-selector.h"
#include <linux/slab.h>
+#include <linux/module.h>
#define DM_MSG_PREFIX "multipath round-robin"
diff --git a/drivers/md/dm-service-time.c b/drivers/md/dm-service-time.c
index 9c6c2e47ad6..59883bd7821 100644
--- a/drivers/md/dm-service-time.c
+++ b/drivers/md/dm-service-time.c
@@ -12,6 +12,7 @@
#include "dm-path-selector.h"
#include <linux/slab.h>
+#include <linux/module.h>
#define DM_MSG_PREFIX "multipath service-time"
#define ST_MIN_IO 1
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index d1f1d701710..3ac415675b6 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -10,6 +10,7 @@
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/vmalloc.h>
+#include <linux/export.h>
#include <linux/slab.h>
#include <linux/dm-io.h>
diff --git a/drivers/md/dm-snap-transient.c b/drivers/md/dm-snap-transient.c
index a0898a66a2f..1ce9a2586e4 100644
--- a/drivers/md/dm-snap-transient.c
+++ b/drivers/md/dm-snap-transient.c
@@ -10,6 +10,7 @@
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/vmalloc.h>
+#include <linux/export.h>
#include <linux/slab.h>
#include <linux/dm-io.h>
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index bc04518e9d8..8e913213014 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -54,7 +54,9 @@ struct dm_table {
sector_t *highs;
struct dm_target *targets;
+ struct target_type *immutable_target_type;
unsigned integrity_supported:1;
+ unsigned singleton:1;
/*
* Indicates the rw permissions for the new logical
@@ -740,6 +742,12 @@ int dm_table_add_target(struct dm_table *t, const char *type,
char **argv;
struct dm_target *tgt;
+ if (t->singleton) {
+ DMERR("%s: target type %s must appear alone in table",
+ dm_device_name(t->md), t->targets->type->name);
+ return -EINVAL;
+ }
+
if ((r = check_space(t)))
return r;
@@ -758,6 +766,36 @@ int dm_table_add_target(struct dm_table *t, const char *type,
return -EINVAL;
}
+ if (dm_target_needs_singleton(tgt->type)) {
+ if (t->num_targets) {
+ DMERR("%s: target type %s must appear alone in table",
+ dm_device_name(t->md), type);
+ return -EINVAL;
+ }
+ t->singleton = 1;
+ }
+
+ if (dm_target_always_writeable(tgt->type) && !(t->mode & FMODE_WRITE)) {
+ DMERR("%s: target type %s may not be included in read-only tables",
+ dm_device_name(t->md), type);
+ return -EINVAL;
+ }
+
+ if (t->immutable_target_type) {
+ if (t->immutable_target_type != tgt->type) {
+ DMERR("%s: immutable target type %s cannot be mixed with other target types",
+ dm_device_name(t->md), t->immutable_target_type->name);
+ return -EINVAL;
+ }
+ } else if (dm_target_is_immutable(tgt->type)) {
+ if (t->num_targets) {
+ DMERR("%s: immutable target type %s cannot be mixed with other target types",
+ dm_device_name(t->md), tgt->type->name);
+ return -EINVAL;
+ }
+ t->immutable_target_type = tgt->type;
+ }
+
tgt->table = t;
tgt->begin = start;
tgt->len = len;
@@ -915,6 +953,11 @@ unsigned dm_table_get_type(struct dm_table *t)
return t->type;
}
+struct target_type *dm_table_get_immutable_target_type(struct dm_table *t)
+{
+ return t->immutable_target_type;
+}
+
bool dm_table_request_based(struct dm_table *t)
{
return dm_table_get_type(t) == DM_TYPE_REQUEST_BASED;
@@ -1299,6 +1342,31 @@ static bool dm_table_discard_zeroes_data(struct dm_table *t)
return 1;
}
+static int device_is_nonrot(struct dm_target *ti, struct dm_dev *dev,
+ sector_t start, sector_t len, void *data)
+{
+ struct request_queue *q = bdev_get_queue(dev->bdev);
+
+ return q && blk_queue_nonrot(q);
+}
+
+static bool dm_table_is_nonrot(struct dm_table *t)
+{
+ struct dm_target *ti;
+ unsigned i = 0;
+
+ /* Ensure that all underlying device are non-rotational. */
+ while (i < dm_table_get_num_targets(t)) {
+ ti = dm_table_get_target(t, i++);
+
+ if (!ti->type->iterate_devices ||
+ !ti->type->iterate_devices(ti, device_is_nonrot, NULL))
+ return 0;
+ }
+
+ return 1;
+}
+
void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
struct queue_limits *limits)
{
@@ -1324,6 +1392,11 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
if (!dm_table_discard_zeroes_data(t))
q->limits.discard_zeroes_data = 0;
+ if (dm_table_is_nonrot(t))
+ queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
+ else
+ queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, q);
+
dm_table_set_integrity(t);
/*
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
new file mode 100644
index 00000000000..59c4f0446ff
--- /dev/null
+++ b/drivers/md/dm-thin-metadata.c
@@ -0,0 +1,1391 @@
+/*
+ * Copyright (C) 2011 Red Hat, Inc.
+ *
+ * This file is released under the GPL.
+ */
+
+#include "dm-thin-metadata.h"
+#include "persistent-data/dm-btree.h"
+#include "persistent-data/dm-space-map.h"
+#include "persistent-data/dm-space-map-disk.h"
+#include "persistent-data/dm-transaction-manager.h"
+
+#include <linux/list.h>
+#include <linux/device-mapper.h>
+#include <linux/workqueue.h>
+
+/*--------------------------------------------------------------------------
+ * As far as the metadata goes, there is:
+ *
+ * - A superblock in block zero, taking up fewer than 512 bytes for
+ * atomic writes.
+ *
+ * - A space map managing the metadata blocks.
+ *
+ * - A space map managing the data blocks.
+ *
+ * - A btree mapping our internal thin dev ids onto struct disk_device_details.
+ *
+ * - A hierarchical btree, with 2 levels which effectively maps (thin
+ * dev id, virtual block) -> block_time. Block time is a 64-bit
+ * field holding the time in the low 24 bits, and block in the top 48
+ * bits.
+ *
+ * BTrees consist solely of btree_nodes, that fill a block. Some are
+ * internal nodes, as such their values are a __le64 pointing to other
+ * nodes. Leaf nodes can store data of any reasonable size (ie. much
+ * smaller than the block size). The nodes consist of the header,
+ * followed by an array of keys, followed by an array of values. We have
+ * to binary search on the keys so they're all held together to help the
+ * cpu cache.
+ *
+ * Space maps have 2 btrees:
+ *
+ * - One maps a uint64_t onto a struct index_entry. Which points to a
+ * bitmap block, and has some details about how many free entries there
+ * are etc.
+ *
+ * - The bitmap blocks have a header (for the checksum). Then the rest
+ * of the block is pairs of bits. With the meaning being:
+ *
+ * 0 - ref count is 0
+ * 1 - ref count is 1
+ * 2 - ref count is 2
+ * 3 - ref count is higher than 2
+ *
+ * - If the count is higher than 2 then the ref count is entered in a
+ * second btree that directly maps the block_address to a uint32_t ref
+ * count.
+ *
+ * The space map metadata variant doesn't have a bitmaps btree. Instead
+ * it has one single blocks worth of index_entries. This avoids
+ * recursive issues with the bitmap btree needing to allocate space in
+ * order to insert. With a small data block size such as 64k the
+ * metadata support data devices that are hundreds of terrabytes.
+ *
+ * The space maps allocate space linearly from front to back. Space that
+ * is freed in a transaction is never recycled within that transaction.
+ * To try and avoid fragmenting _free_ space the allocator always goes
+ * back and fills in gaps.
+ *
+ * All metadata io is in THIN_METADATA_BLOCK_SIZE sized/aligned chunks
+ * from the block manager.
+ *--------------------------------------------------------------------------*/
+
+#define DM_MSG_PREFIX "thin metadata"
+
+#define THIN_SUPERBLOCK_MAGIC 27022010
+#define THIN_SUPERBLOCK_LOCATION 0
+#define THIN_VERSION 1
+#define THIN_METADATA_CACHE_SIZE 64
+#define SECTOR_TO_BLOCK_SHIFT 3
+
+/* This should be plenty */
+#define SPACE_MAP_ROOT_SIZE 128
+
+/*
+ * Little endian on-disk superblock and device details.
+ */
+struct thin_disk_superblock {
+ __le32 csum; /* Checksum of superblock except for this field. */
+ __le32 flags;
+ __le64 blocknr; /* This block number, dm_block_t. */
+
+ __u8 uuid[16];
+ __le64 magic;
+ __le32 version;
+ __le32 time;
+
+ __le64 trans_id;
+
+ /*
+ * Root held by userspace transactions.
+ */
+ __le64 held_root;
+
+ __u8 data_space_map_root[SPACE_MAP_ROOT_SIZE];
+ __u8 metadata_space_map_root[SPACE_MAP_ROOT_SIZE];
+
+ /*
+ * 2-level btree mapping (dev_id, (dev block, time)) -> data block
+ */
+ __le64 data_mapping_root;
+
+ /*
+ * Device detail root mapping dev_id -> device_details
+ */
+ __le64 device_details_root;
+
+ __le32 data_block_size; /* In 512-byte sectors. */
+
+ __le32 metadata_block_size; /* In 512-byte sectors. */
+ __le64 metadata_nr_blocks;
+
+ __le32 compat_flags;
+ __le32 compat_ro_flags;
+ __le32 incompat_flags;
+} __packed;
+
+struct disk_device_details {
+ __le64 mapped_blocks;
+ __le64 transaction_id; /* When created. */
+ __le32 creation_time;
+ __le32 snapshotted_time;
+} __packed;
+
+struct dm_pool_metadata {
+ struct hlist_node hash;
+
+ struct block_device *bdev;
+ struct dm_block_manager *bm;
+ struct dm_space_map *metadata_sm;
+ struct dm_space_map *data_sm;
+ struct dm_transaction_manager *tm;
+ struct dm_transaction_manager *nb_tm;
+
+ /*
+ * Two-level btree.
+ * First level holds thin_dev_t.
+ * Second level holds mappings.
+ */
+ struct dm_btree_info info;
+
+ /*
+ * Non-blocking version of the above.
+ */
+ struct dm_btree_info nb_info;
+
+ /*
+ * Just the top level for deleting whole devices.
+ */
+ struct dm_btree_info tl_info;
+
+ /*
+ * Just the bottom level for creating new devices.
+ */
+ struct dm_btree_info bl_info;
+
+ /*
+ * Describes the device details btree.
+ */
+ struct dm_btree_info details_info;
+
+ struct rw_semaphore root_lock;
+ uint32_t time;
+ int need_commit;
+ dm_block_t root;
+ dm_block_t details_root;
+ struct list_head thin_devices;
+ uint64_t trans_id;
+ unsigned long flags;
+ sector_t data_block_size;
+};
+
+struct dm_thin_device {
+ struct list_head list;
+ struct dm_pool_metadata *pmd;
+ dm_thin_id id;
+
+ int open_count;
+ int changed;
+ uint64_t mapped_blocks;
+ uint64_t transaction_id;
+ uint32_t creation_time;
+ uint32_t snapshotted_time;
+};
+
+/*----------------------------------------------------------------
+ * superblock validator
+ *--------------------------------------------------------------*/
+
+#define SUPERBLOCK_CSUM_XOR 160774
+
+static void sb_prepare_for_write(struct dm_block_validator *v,
+ struct dm_block *b,
+ size_t block_size)
+{
+ struct thin_disk_superblock *disk_super = dm_block_data(b);
+
+ disk_super->blocknr = cpu_to_le64(dm_block_location(b));
+ disk_super->csum = cpu_to_le32(dm_bm_checksum(&disk_super->flags,
+ block_size - sizeof(__le32),
+ SUPERBLOCK_CSUM_XOR));
+}
+
+static int sb_check(struct dm_block_validator *v,
+ struct dm_block *b,
+ size_t block_size)
+{
+ struct thin_disk_superblock *disk_super = dm_block_data(b);
+ __le32 csum_le;
+
+ if (dm_block_location(b) != le64_to_cpu(disk_super->blocknr)) {
+ DMERR("sb_check failed: blocknr %llu: "
+ "wanted %llu", le64_to_cpu(disk_super->blocknr),
+ (unsigned long long)dm_block_location(b));
+ return -ENOTBLK;
+ }
+
+ if (le64_to_cpu(disk_super->magic) != THIN_SUPERBLOCK_MAGIC) {
+ DMERR("sb_check failed: magic %llu: "
+ "wanted %llu", le64_to_cpu(disk_super->magic),
+ (unsigned long long)THIN_SUPERBLOCK_MAGIC);
+ return -EILSEQ;
+ }
+
+ csum_le = cpu_to_le32(dm_bm_checksum(&disk_super->flags,
+ block_size - sizeof(__le32),
+ SUPERBLOCK_CSUM_XOR));
+ if (csum_le != disk_super->csum) {
+ DMERR("sb_check failed: csum %u: wanted %u",
+ le32_to_cpu(csum_le), le32_to_cpu(disk_super->csum));
+ return -EILSEQ;
+ }
+
+ return 0;
+}
+
+static struct dm_block_validator sb_validator = {
+ .name = "superblock",
+ .prepare_for_write = sb_prepare_for_write,
+ .check = sb_check
+};
+
+/*----------------------------------------------------------------
+ * Methods for the btree value types
+ *--------------------------------------------------------------*/
+
+static uint64_t pack_block_time(dm_block_t b, uint32_t t)
+{
+ return (b << 24) | t;
+}
+
+static void unpack_block_time(uint64_t v, dm_block_t *b, uint32_t *t)
+{
+ *b = v >> 24;
+ *t = v & ((1 << 24) - 1);
+}
+
+static void data_block_inc(void *context, void *value_le)
+{
+ struct dm_space_map *sm = context;
+ __le64 v_le;
+ uint64_t b;
+ uint32_t t;
+
+ memcpy(&v_le, value_le, sizeof(v_le));
+ unpack_block_time(le64_to_cpu(v_le), &b, &t);
+ dm_sm_inc_block(sm, b);
+}
+
+static void data_block_dec(void *context, void *value_le)
+{
+ struct dm_space_map *sm = context;
+ __le64 v_le;
+ uint64_t b;
+ uint32_t t;
+
+ memcpy(&v_le, value_le, sizeof(v_le));
+ unpack_block_time(le64_to_cpu(v_le), &b, &t);
+ dm_sm_dec_block(sm, b);
+}
+
+static int data_block_equal(void *context, void *value1_le, void *value2_le)
+{
+ __le64 v1_le, v2_le;
+ uint64_t b1, b2;
+ uint32_t t;
+
+ memcpy(&v1_le, value1_le, sizeof(v1_le));
+ memcpy(&v2_le, value2_le, sizeof(v2_le));
+ unpack_block_time(le64_to_cpu(v1_le), &b1, &t);
+ unpack_block_time(le64_to_cpu(v2_le), &b2, &t);
+
+ return b1 == b2;
+}
+
+static void subtree_inc(void *context, void *value)
+{
+ struct dm_btree_info *info = context;
+ __le64 root_le;
+ uint64_t root;
+
+ memcpy(&root_le, value, sizeof(root_le));
+ root = le64_to_cpu(root_le);
+ dm_tm_inc(info->tm, root);
+}
+
+static void subtree_dec(void *context, void *value)
+{
+ struct dm_btree_info *info = context;
+ __le64 root_le;
+ uint64_t root;
+
+ memcpy(&root_le, value, sizeof(root_le));
+ root = le64_to_cpu(root_le);
+ if (dm_btree_del(info, root))
+ DMERR("btree delete failed\n");
+}
+
+static int subtree_equal(void *context, void *value1_le, void *value2_le)
+{
+ __le64 v1_le, v2_le;
+ memcpy(&v1_le, value1_le, sizeof(v1_le));
+ memcpy(&v2_le, value2_le, sizeof(v2_le));
+
+ return v1_le == v2_le;
+}
+
+/*----------------------------------------------------------------*/
+
+static int superblock_all_zeroes(struct dm_block_manager *bm, int *result)
+{
+ int r;
+ unsigned i;
+ struct dm_block *b;
+ __le64 *data_le, zero = cpu_to_le64(0);
+ unsigned block_size = dm_bm_block_size(bm) / sizeof(__le64);
+
+ /*
+ * We can't use a validator here - it may be all zeroes.
+ */
+ r = dm_bm_read_lock(bm, THIN_SUPERBLOCK_LOCATION, NULL, &b);
+ if (r)
+ return r;
+
+ data_le = dm_block_data(b);
+ *result = 1;
+ for (i = 0; i < block_size; i++) {
+ if (data_le[i] != zero) {
+ *result = 0;
+ break;
+ }
+ }
+
+ return dm_bm_unlock(b);
+}
+
+static int init_pmd(struct dm_pool_metadata *pmd,
+ struct dm_block_manager *bm,
+ dm_block_t nr_blocks, int create)
+{
+ int r;
+ struct dm_space_map *sm, *data_sm;
+ struct dm_transaction_manager *tm;
+ struct dm_block *sblock;
+
+ if (create) {
+ r = dm_tm_create_with_sm(bm, THIN_SUPERBLOCK_LOCATION,
+ &sb_validator, &tm, &sm, &sblock);
+ if (r < 0) {
+ DMERR("tm_create_with_sm failed");
+ return r;
+ }
+
+ data_sm = dm_sm_disk_create(tm, nr_blocks);
+ if (IS_ERR(data_sm)) {
+ DMERR("sm_disk_create failed");
+ r = PTR_ERR(data_sm);
+ goto bad;
+ }
+ } else {
+ struct thin_disk_superblock *disk_super = NULL;
+ size_t space_map_root_offset =
+ offsetof(struct thin_disk_superblock, metadata_space_map_root);
+
+ r = dm_tm_open_with_sm(bm, THIN_SUPERBLOCK_LOCATION,
+ &sb_validator, space_map_root_offset,
+ SPACE_MAP_ROOT_SIZE, &tm, &sm, &sblock);
+ if (r < 0) {
+ DMERR("tm_open_with_sm failed");
+ return r;
+ }
+
+ disk_super = dm_block_data(sblock);
+ data_sm = dm_sm_disk_open(tm, disk_super->data_space_map_root,
+ sizeof(disk_super->data_space_map_root));
+ if (IS_ERR(data_sm)) {
+ DMERR("sm_disk_open failed");
+ r = PTR_ERR(data_sm);
+ goto bad;
+ }
+ }
+
+
+ r = dm_tm_unlock(tm, sblock);
+ if (r < 0) {
+ DMERR("couldn't unlock superblock");
+ goto bad_data_sm;
+ }
+
+ pmd->bm = bm;
+ pmd->metadata_sm = sm;
+ pmd->data_sm = data_sm;
+ pmd->tm = tm;
+ pmd->nb_tm = dm_tm_create_non_blocking_clone(tm);
+ if (!pmd->nb_tm) {
+ DMERR("could not create clone tm");
+ r = -ENOMEM;
+ goto bad_data_sm;
+ }
+
+ pmd->info.tm = tm;
+ pmd->info.levels = 2;
+ pmd->info.value_type.context = pmd->data_sm;
+ pmd->info.value_type.size = sizeof(__le64);
+ pmd->info.value_type.inc = data_block_inc;
+ pmd->info.value_type.dec = data_block_dec;
+ pmd->info.value_type.equal = data_block_equal;
+
+ memcpy(&pmd->nb_info, &pmd->info, sizeof(pmd->nb_info));
+ pmd->nb_info.tm = pmd->nb_tm;
+
+ pmd->tl_info.tm = tm;
+ pmd->tl_info.levels = 1;
+ pmd->tl_info.value_type.context = &pmd->info;
+ pmd->tl_info.value_type.size = sizeof(__le64);
+ pmd->tl_info.value_type.inc = subtree_inc;
+ pmd->tl_info.value_type.dec = subtree_dec;
+ pmd->tl_info.value_type.equal = subtree_equal;
+
+ pmd->bl_info.tm = tm;
+ pmd->bl_info.levels = 1;
+ pmd->bl_info.value_type.context = pmd->data_sm;
+ pmd->bl_info.value_type.size = sizeof(__le64);
+ pmd->bl_info.value_type.inc = data_block_inc;
+ pmd->bl_info.value_type.dec = data_block_dec;
+ pmd->bl_info.value_type.equal = data_block_equal;
+
+ pmd->details_info.tm = tm;
+ pmd->details_info.levels = 1;
+ pmd->details_info.value_type.context = NULL;
+ pmd->details_info.value_type.size = sizeof(struct disk_device_details);
+ pmd->details_info.value_type.inc = NULL;
+ pmd->details_info.value_type.dec = NULL;
+ pmd->details_info.value_type.equal = NULL;
+
+ pmd->root = 0;
+
+ init_rwsem(&pmd->root_lock);
+ pmd->time = 0;
+ pmd->need_commit = 0;
+ pmd->details_root = 0;
+ pmd->trans_id = 0;
+ pmd->flags = 0;
+ INIT_LIST_HEAD(&pmd->thin_devices);
+
+ return 0;
+
+bad_data_sm:
+ dm_sm_destroy(data_sm);
+bad:
+ dm_tm_destroy(tm);
+ dm_sm_destroy(sm);
+
+ return r;
+}
+
+static int __begin_transaction(struct dm_pool_metadata *pmd)
+{
+ int r;
+ u32 features;
+ struct thin_disk_superblock *disk_super;
+ struct dm_block *sblock;
+
+ /*
+ * __maybe_commit_transaction() resets these
+ */
+ WARN_ON(pmd->need_commit);
+
+ /*
+ * We re-read the superblock every time. Shouldn't need to do this
+ * really.
+ */
+ r = dm_bm_read_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION,
+ &sb_validator, &sblock);
+ if (r)
+ return r;
+
+ disk_super = dm_block_data(sblock);
+ pmd->time = le32_to_cpu(disk_super->time);
+ pmd->root = le64_to_cpu(disk_super->data_mapping_root);
+ pmd->details_root = le64_to_cpu(disk_super->device_details_root);
+ pmd->trans_id = le64_to_cpu(disk_super->trans_id);
+ pmd->flags = le32_to_cpu(disk_super->flags);
+ pmd->data_block_size = le32_to_cpu(disk_super->data_block_size);
+
+ features = le32_to_cpu(disk_super->incompat_flags) & ~THIN_FEATURE_INCOMPAT_SUPP;
+ if (features) {
+ DMERR("could not access metadata due to "
+ "unsupported optional features (%lx).",
+ (unsigned long)features);
+ r = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * Check for read-only metadata to skip the following RDWR checks.
+ */
+ if (get_disk_ro(pmd->bdev->bd_disk))
+ goto out;
+
+ features = le32_to_cpu(disk_super->compat_ro_flags) & ~THIN_FEATURE_COMPAT_RO_SUPP;
+ if (features) {
+ DMERR("could not access metadata RDWR due to "
+ "unsupported optional features (%lx).",
+ (unsigned long)features);
+ r = -EINVAL;
+ }
+
+out:
+ dm_bm_unlock(sblock);
+ return r;
+}
+
+static int __write_changed_details(struct dm_pool_metadata *pmd)
+{
+ int r;
+ struct dm_thin_device *td, *tmp;
+ struct disk_device_details details;
+ uint64_t key;
+
+ list_for_each_entry_safe(td, tmp, &pmd->thin_devices, list) {
+ if (!td->changed)
+ continue;
+
+ key = td->id;
+
+ details.mapped_blocks = cpu_to_le64(td->mapped_blocks);
+ details.transaction_id = cpu_to_le64(td->transaction_id);
+ details.creation_time = cpu_to_le32(td->creation_time);
+ details.snapshotted_time = cpu_to_le32(td->snapshotted_time);
+ __dm_bless_for_disk(&details);
+
+ r = dm_btree_insert(&pmd->details_info, pmd->details_root,
+ &key, &details, &pmd->details_root);
+ if (r)
+ return r;
+
+ if (td->open_count)
+ td->changed = 0;
+ else {
+ list_del(&td->list);
+ kfree(td);
+ }
+
+ pmd->need_commit = 1;
+ }
+
+ return 0;
+}
+
+static int __commit_transaction(struct dm_pool_metadata *pmd)
+{
+ /*
+ * FIXME: Associated pool should be made read-only on failure.
+ */
+ int r;
+ size_t metadata_len, data_len;
+ struct thin_disk_superblock *disk_super;
+ struct dm_block *sblock;
+
+ /*
+ * We need to know if the thin_disk_superblock exceeds a 512-byte sector.
+ */
+ BUILD_BUG_ON(sizeof(struct thin_disk_superblock) > 512);
+
+ r = __write_changed_details(pmd);
+ if (r < 0)
+ goto out;
+
+ if (!pmd->need_commit)
+ goto out;
+
+ r = dm_sm_commit(pmd->data_sm);
+ if (r < 0)
+ goto out;
+
+ r = dm_tm_pre_commit(pmd->tm);
+ if (r < 0)
+ goto out;
+
+ r = dm_sm_root_size(pmd->metadata_sm, &metadata_len);
+ if (r < 0)
+ goto out;
+
+ r = dm_sm_root_size(pmd->metadata_sm, &data_len);
+ if (r < 0)
+ goto out;
+
+ r = dm_bm_write_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION,
+ &sb_validator, &sblock);
+ if (r)
+ goto out;
+
+ disk_super = dm_block_data(sblock);
+ disk_super->time = cpu_to_le32(pmd->time);
+ disk_super->data_mapping_root = cpu_to_le64(pmd->root);
+ disk_super->device_details_root = cpu_to_le64(pmd->details_root);
+ disk_super->trans_id = cpu_to_le64(pmd->trans_id);
+ disk_super->flags = cpu_to_le32(pmd->flags);
+
+ r = dm_sm_copy_root(pmd->metadata_sm, &disk_super->metadata_space_map_root,
+ metadata_len);
+ if (r < 0)
+ goto out_locked;
+
+ r = dm_sm_copy_root(pmd->data_sm, &disk_super->data_space_map_root,
+ data_len);
+ if (r < 0)
+ goto out_locked;
+
+ r = dm_tm_commit(pmd->tm, sblock);
+ if (!r)
+ pmd->need_commit = 0;
+
+out:
+ return r;
+
+out_locked:
+ dm_bm_unlock(sblock);
+ return r;
+}
+
+struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev,
+ sector_t data_block_size)
+{
+ int r;
+ struct thin_disk_superblock *disk_super;
+ struct dm_pool_metadata *pmd;
+ sector_t bdev_size = i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
+ struct dm_block_manager *bm;
+ int create;
+ struct dm_block *sblock;
+
+ pmd = kmalloc(sizeof(*pmd), GFP_KERNEL);
+ if (!pmd) {
+ DMERR("could not allocate metadata struct");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /*
+ * Max hex locks:
+ * 3 for btree insert +
+ * 2 for btree lookup used within space map
+ */
+ bm = dm_block_manager_create(bdev, THIN_METADATA_BLOCK_SIZE,
+ THIN_METADATA_CACHE_SIZE, 5);
+ if (!bm) {
+ DMERR("could not create block manager");
+ kfree(pmd);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ r = superblock_all_zeroes(bm, &create);
+ if (r) {
+ dm_block_manager_destroy(bm);
+ kfree(pmd);
+ return ERR_PTR(r);
+ }
+
+
+ r = init_pmd(pmd, bm, 0, create);
+ if (r) {
+ dm_block_manager_destroy(bm);
+ kfree(pmd);
+ return ERR_PTR(r);
+ }
+ pmd->bdev = bdev;
+
+ if (!create) {
+ r = __begin_transaction(pmd);
+ if (r < 0)
+ goto bad;
+ return pmd;
+ }
+
+ /*
+ * Create.
+ */
+ r = dm_bm_write_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION,
+ &sb_validator, &sblock);
+ if (r)
+ goto bad;
+
+ disk_super = dm_block_data(sblock);
+ disk_super->magic = cpu_to_le64(THIN_SUPERBLOCK_MAGIC);
+ disk_super->version = cpu_to_le32(THIN_VERSION);
+ disk_super->time = 0;
+ disk_super->metadata_block_size = cpu_to_le32(THIN_METADATA_BLOCK_SIZE >> SECTOR_SHIFT);
+ disk_super->metadata_nr_blocks = cpu_to_le64(bdev_size >> SECTOR_TO_BLOCK_SHIFT);
+ disk_super->data_block_size = cpu_to_le32(data_block_size);
+
+ r = dm_bm_unlock(sblock);
+ if (r < 0)
+ goto bad;
+
+ r = dm_btree_empty(&pmd->info, &pmd->root);
+ if (r < 0)
+ goto bad;
+
+ r = dm_btree_empty(&pmd->details_info, &pmd->details_root);
+ if (r < 0) {
+ DMERR("couldn't create devices root");
+ goto bad;
+ }
+
+ pmd->flags = 0;
+ pmd->need_commit = 1;
+ r = dm_pool_commit_metadata(pmd);
+ if (r < 0) {
+ DMERR("%s: dm_pool_commit_metadata() failed, error = %d",
+ __func__, r);
+ goto bad;
+ }
+
+ return pmd;
+
+bad:
+ if (dm_pool_metadata_close(pmd) < 0)
+ DMWARN("%s: dm_pool_metadata_close() failed.", __func__);
+ return ERR_PTR(r);
+}
+
+int dm_pool_metadata_close(struct dm_pool_metadata *pmd)
+{
+ int r;
+ unsigned open_devices = 0;
+ struct dm_thin_device *td, *tmp;
+
+ down_read(&pmd->root_lock);
+ list_for_each_entry_safe(td, tmp, &pmd->thin_devices, list) {
+ if (td->open_count)
+ open_devices++;
+ else {
+ list_del(&td->list);
+ kfree(td);
+ }
+ }
+ up_read(&pmd->root_lock);
+
+ if (open_devices) {
+ DMERR("attempt to close pmd when %u device(s) are still open",
+ open_devices);
+ return -EBUSY;
+ }
+
+ r = __commit_transaction(pmd);
+ if (r < 0)
+ DMWARN("%s: __commit_transaction() failed, error = %d",
+ __func__, r);
+
+ dm_tm_destroy(pmd->tm);
+ dm_tm_destroy(pmd->nb_tm);
+ dm_block_manager_destroy(pmd->bm);
+ dm_sm_destroy(pmd->metadata_sm);
+ dm_sm_destroy(pmd->data_sm);
+ kfree(pmd);
+
+ return 0;
+}
+
+static int __open_device(struct dm_pool_metadata *pmd,
+ dm_thin_id dev, int create,
+ struct dm_thin_device **td)
+{
+ int r, changed = 0;
+ struct dm_thin_device *td2;
+ uint64_t key = dev;
+ struct disk_device_details details_le;
+
+ /*
+ * Check the device isn't already open.
+ */
+ list_for_each_entry(td2, &pmd->thin_devices, list)
+ if (td2->id == dev) {
+ td2->open_count++;
+ *td = td2;
+ return 0;
+ }
+
+ /*
+ * Check the device exists.
+ */
+ r = dm_btree_lookup(&pmd->details_info, pmd->details_root,
+ &key, &details_le);
+ if (r) {
+ if (r != -ENODATA || !create)
+ return r;
+
+ changed = 1;
+ details_le.mapped_blocks = 0;
+ details_le.transaction_id = cpu_to_le64(pmd->trans_id);
+ details_le.creation_time = cpu_to_le32(pmd->time);
+ details_le.snapshotted_time = cpu_to_le32(pmd->time);
+ }
+
+ *td = kmalloc(sizeof(**td), GFP_NOIO);
+ if (!*td)
+ return -ENOMEM;
+
+ (*td)->pmd = pmd;
+ (*td)->id = dev;
+ (*td)->open_count = 1;
+ (*td)->changed = changed;
+ (*td)->mapped_blocks = le64_to_cpu(details_le.mapped_blocks);
+ (*td)->transaction_id = le64_to_cpu(details_le.transaction_id);
+ (*td)->creation_time = le32_to_cpu(details_le.creation_time);
+ (*td)->snapshotted_time = le32_to_cpu(details_le.snapshotted_time);
+
+ list_add(&(*td)->list, &pmd->thin_devices);
+
+ return 0;
+}
+
+static void __close_device(struct dm_thin_device *td)
+{
+ --td->open_count;
+}
+
+static int __create_thin(struct dm_pool_metadata *pmd,
+ dm_thin_id dev)
+{
+ int r;
+ dm_block_t dev_root;
+ uint64_t key = dev;
+ struct disk_device_details details_le;
+ struct dm_thin_device *td;
+ __le64 value;
+
+ r = dm_btree_lookup(&pmd->details_info, pmd->details_root,
+ &key, &details_le);
+ if (!r)
+ return -EEXIST;
+
+ /*
+ * Create an empty btree for the mappings.
+ */
+ r = dm_btree_empty(&pmd->bl_info, &dev_root);
+ if (r)
+ return r;
+
+ /*
+ * Insert it into the main mapping tree.
+ */
+ value = cpu_to_le64(dev_root);
+ __dm_bless_for_disk(&value);
+ r = dm_btree_insert(&pmd->tl_info, pmd->root, &key, &value, &pmd->root);
+ if (r) {
+ dm_btree_del(&pmd->bl_info, dev_root);
+ return r;
+ }
+
+ r = __open_device(pmd, dev, 1, &td);
+ if (r) {
+ __close_device(td);
+ dm_btree_remove(&pmd->tl_info, pmd->root, &key, &pmd->root);
+ dm_btree_del(&pmd->bl_info, dev_root);
+ return r;
+ }
+ td->changed = 1;
+ __close_device(td);
+
+ return r;
+}
+
+int dm_pool_create_thin(struct dm_pool_metadata *pmd, dm_thin_id dev)
+{
+ int r;
+
+ down_write(&pmd->root_lock);
+ r = __create_thin(pmd, dev);
+ up_write(&pmd->root_lock);
+
+ return r;
+}
+
+static int __set_snapshot_details(struct dm_pool_metadata *pmd,
+ struct dm_thin_device *snap,
+ dm_thin_id origin, uint32_t time)
+{
+ int r;
+ struct dm_thin_device *td;
+
+ r = __open_device(pmd, origin, 0, &td);
+ if (r)
+ return r;
+
+ td->changed = 1;
+ td->snapshotted_time = time;
+
+ snap->mapped_blocks = td->mapped_blocks;
+ snap->snapshotted_time = time;
+ __close_device(td);
+
+ return 0;
+}
+
+static int __create_snap(struct dm_pool_metadata *pmd,
+ dm_thin_id dev, dm_thin_id origin)
+{
+ int r;
+ dm_block_t origin_root;
+ uint64_t key = origin, dev_key = dev;
+ struct dm_thin_device *td;
+ struct disk_device_details details_le;
+ __le64 value;
+
+ /* check this device is unused */
+ r = dm_btree_lookup(&pmd->details_info, pmd->details_root,
+ &dev_key, &details_le);
+ if (!r)
+ return -EEXIST;
+
+ /* find the mapping tree for the origin */
+ r = dm_btree_lookup(&pmd->tl_info, pmd->root, &key, &value);
+ if (r)
+ return r;
+ origin_root = le64_to_cpu(value);
+
+ /* clone the origin, an inc will do */
+ dm_tm_inc(pmd->tm, origin_root);
+
+ /* insert into the main mapping tree */
+ value = cpu_to_le64(origin_root);
+ __dm_bless_for_disk(&value);
+ key = dev;
+ r = dm_btree_insert(&pmd->tl_info, pmd->root, &key, &value, &pmd->root);
+ if (r) {
+ dm_tm_dec(pmd->tm, origin_root);
+ return r;
+ }
+
+ pmd->time++;
+
+ r = __open_device(pmd, dev, 1, &td);
+ if (r)
+ goto bad;
+
+ r = __set_snapshot_details(pmd, td, origin, pmd->time);
+ if (r)
+ goto bad;
+
+ __close_device(td);
+ return 0;
+
+bad:
+ __close_device(td);
+ dm_btree_remove(&pmd->tl_info, pmd->root, &key, &pmd->root);
+ dm_btree_remove(&pmd->details_info, pmd->details_root,
+ &key, &pmd->details_root);
+ return r;
+}
+
+int dm_pool_create_snap(struct dm_pool_metadata *pmd,
+ dm_thin_id dev,
+ dm_thin_id origin)
+{
+ int r;
+
+ down_write(&pmd->root_lock);
+ r = __create_snap(pmd, dev, origin);
+ up_write(&pmd->root_lock);
+
+ return r;
+}
+
+static int __delete_device(struct dm_pool_metadata *pmd, dm_thin_id dev)
+{
+ int r;
+ uint64_t key = dev;
+ struct dm_thin_device *td;
+
+ /* TODO: failure should mark the transaction invalid */
+ r = __open_device(pmd, dev, 0, &td);
+ if (r)
+ return r;
+
+ if (td->open_count > 1) {
+ __close_device(td);
+ return -EBUSY;
+ }
+
+ list_del(&td->list);
+ kfree(td);
+ r = dm_btree_remove(&pmd->details_info, pmd->details_root,
+ &key, &pmd->details_root);
+ if (r)
+ return r;
+
+ r = dm_btree_remove(&pmd->tl_info, pmd->root, &key, &pmd->root);
+ if (r)
+ return r;
+
+ pmd->need_commit = 1;
+
+ return 0;
+}
+
+int dm_pool_delete_thin_device(struct dm_pool_metadata *pmd,
+ dm_thin_id dev)
+{
+ int r;
+
+ down_write(&pmd->root_lock);
+ r = __delete_device(pmd, dev);
+ up_write(&pmd->root_lock);
+
+ return r;
+}
+
+int dm_pool_set_metadata_transaction_id(struct dm_pool_metadata *pmd,
+ uint64_t current_id,
+ uint64_t new_id)
+{
+ down_write(&pmd->root_lock);
+ if (pmd->trans_id != current_id) {
+ up_write(&pmd->root_lock);
+ DMERR("mismatched transaction id");
+ return -EINVAL;
+ }
+
+ pmd->trans_id = new_id;
+ pmd->need_commit = 1;
+ up_write(&pmd->root_lock);
+
+ return 0;
+}
+
+int dm_pool_get_metadata_transaction_id(struct dm_pool_metadata *pmd,
+ uint64_t *result)
+{
+ down_read(&pmd->root_lock);
+ *result = pmd->trans_id;
+ up_read(&pmd->root_lock);
+
+ return 0;
+}
+
+static int __get_held_metadata_root(struct dm_pool_metadata *pmd,
+ dm_block_t *result)
+{
+ int r;
+ struct thin_disk_superblock *disk_super;
+ struct dm_block *sblock;
+
+ r = dm_bm_write_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION,
+ &sb_validator, &sblock);
+ if (r)
+ return r;
+
+ disk_super = dm_block_data(sblock);
+ *result = le64_to_cpu(disk_super->held_root);
+
+ return dm_bm_unlock(sblock);
+}
+
+int dm_pool_get_held_metadata_root(struct dm_pool_metadata *pmd,
+ dm_block_t *result)
+{
+ int r;
+
+ down_read(&pmd->root_lock);
+ r = __get_held_metadata_root(pmd, result);
+ up_read(&pmd->root_lock);
+
+ return r;
+}
+
+int dm_pool_open_thin_device(struct dm_pool_metadata *pmd, dm_thin_id dev,
+ struct dm_thin_device **td)
+{
+ int r;
+
+ down_write(&pmd->root_lock);
+ r = __open_device(pmd, dev, 0, td);
+ up_write(&pmd->root_lock);
+
+ return r;
+}
+
+int dm_pool_close_thin_device(struct dm_thin_device *td)
+{
+ down_write(&td->pmd->root_lock);
+ __close_device(td);
+ up_write(&td->pmd->root_lock);
+
+ return 0;
+}
+
+dm_thin_id dm_thin_dev_id(struct dm_thin_device *td)
+{
+ return td->id;
+}
+
+static int __snapshotted_since(struct dm_thin_device *td, uint32_t time)
+{
+ return td->snapshotted_time > time;
+}
+
+int dm_thin_find_block(struct dm_thin_device *td, dm_block_t block,
+ int can_block, struct dm_thin_lookup_result *result)
+{
+ int r;
+ uint64_t block_time = 0;
+ __le64 value;
+ struct dm_pool_metadata *pmd = td->pmd;
+ dm_block_t keys[2] = { td->id, block };
+
+ if (can_block) {
+ down_read(&pmd->root_lock);
+ r = dm_btree_lookup(&pmd->info, pmd->root, keys, &value);
+ if (!r)
+ block_time = le64_to_cpu(value);
+ up_read(&pmd->root_lock);
+
+ } else if (down_read_trylock(&pmd->root_lock)) {
+ r = dm_btree_lookup(&pmd->nb_info, pmd->root, keys, &value);
+ if (!r)
+ block_time = le64_to_cpu(value);
+ up_read(&pmd->root_lock);
+
+ } else
+ return -EWOULDBLOCK;
+
+ if (!r) {
+ dm_block_t exception_block;
+ uint32_t exception_time;
+ unpack_block_time(block_time, &exception_block,
+ &exception_time);
+ result->block = exception_block;
+ result->shared = __snapshotted_since(td, exception_time);
+ }
+
+ return r;
+}
+
+static int __insert(struct dm_thin_device *td, dm_block_t block,
+ dm_block_t data_block)
+{
+ int r, inserted;
+ __le64 value;
+ struct dm_pool_metadata *pmd = td->pmd;
+ dm_block_t keys[2] = { td->id, block };
+
+ pmd->need_commit = 1;
+ value = cpu_to_le64(pack_block_time(data_block, pmd->time));
+ __dm_bless_for_disk(&value);
+
+ r = dm_btree_insert_notify(&pmd->info, pmd->root, keys, &value,
+ &pmd->root, &inserted);
+ if (r)
+ return r;
+
+ if (inserted) {
+ td->mapped_blocks++;
+ td->changed = 1;
+ }
+
+ return 0;
+}
+
+int dm_thin_insert_block(struct dm_thin_device *td, dm_block_t block,
+ dm_block_t data_block)
+{
+ int r;
+
+ down_write(&td->pmd->root_lock);
+ r = __insert(td, block, data_block);
+ up_write(&td->pmd->root_lock);
+
+ return r;
+}
+
+static int __remove(struct dm_thin_device *td, dm_block_t block)
+{
+ int r;
+ struct dm_pool_metadata *pmd = td->pmd;
+ dm_block_t keys[2] = { td->id, block };
+
+ r = dm_btree_remove(&pmd->info, pmd->root, keys, &pmd->root);
+ if (r)
+ return r;
+
+ pmd->need_commit = 1;
+
+ return 0;
+}
+
+int dm_thin_remove_block(struct dm_thin_device *td, dm_block_t block)
+{
+ int r;
+
+ down_write(&td->pmd->root_lock);
+ r = __remove(td, block);
+ up_write(&td->pmd->root_lock);
+
+ return r;
+}
+
+int dm_pool_alloc_data_block(struct dm_pool_metadata *pmd, dm_block_t *result)
+{
+ int r;
+
+ down_write(&pmd->root_lock);
+
+ r = dm_sm_new_block(pmd->data_sm, result);
+ pmd->need_commit = 1;
+
+ up_write(&pmd->root_lock);
+
+ return r;
+}
+
+int dm_pool_commit_metadata(struct dm_pool_metadata *pmd)
+{
+ int r;
+
+ down_write(&pmd->root_lock);
+
+ r = __commit_transaction(pmd);
+ if (r <= 0)
+ goto out;
+
+ /*
+ * Open the next transaction.
+ */
+ r = __begin_transaction(pmd);
+out:
+ up_write(&pmd->root_lock);
+ return r;
+}
+
+int dm_pool_get_free_block_count(struct dm_pool_metadata *pmd, dm_block_t *result)
+{
+ int r;
+
+ down_read(&pmd->root_lock);
+ r = dm_sm_get_nr_free(pmd->data_sm, result);
+ up_read(&pmd->root_lock);
+
+ return r;
+}
+
+int dm_pool_get_free_metadata_block_count(struct dm_pool_metadata *pmd,
+ dm_block_t *result)
+{
+ int r;
+
+ down_read(&pmd->root_lock);
+ r = dm_sm_get_nr_free(pmd->metadata_sm, result);
+ up_read(&pmd->root_lock);
+
+ return r;
+}
+
+int dm_pool_get_metadata_dev_size(struct dm_pool_metadata *pmd,
+ dm_block_t *result)
+{
+ int r;
+
+ down_read(&pmd->root_lock);
+ r = dm_sm_get_nr_blocks(pmd->metadata_sm, result);
+ up_read(&pmd->root_lock);
+
+ return r;
+}
+
+int dm_pool_get_data_block_size(struct dm_pool_metadata *pmd, sector_t *result)
+{
+ down_read(&pmd->root_lock);
+ *result = pmd->data_block_size;
+ up_read(&pmd->root_lock);
+
+ return 0;
+}
+
+int dm_pool_get_data_dev_size(struct dm_pool_metadata *pmd, dm_block_t *result)
+{
+ int r;
+
+ down_read(&pmd->root_lock);
+ r = dm_sm_get_nr_blocks(pmd->data_sm, result);
+ up_read(&pmd->root_lock);
+
+ return r;
+}
+
+int dm_thin_get_mapped_count(struct dm_thin_device *td, dm_block_t *result)
+{
+ struct dm_pool_metadata *pmd = td->pmd;
+
+ down_read(&pmd->root_lock);
+ *result = td->mapped_blocks;
+ up_read(&pmd->root_lock);
+
+ return 0;
+}
+
+static int __highest_block(struct dm_thin_device *td, dm_block_t *result)
+{
+ int r;
+ __le64 value_le;
+ dm_block_t thin_root;
+ struct dm_pool_metadata *pmd = td->pmd;
+
+ r = dm_btree_lookup(&pmd->tl_info, pmd->root, &td->id, &value_le);
+ if (r)
+ return r;
+
+ thin_root = le64_to_cpu(value_le);
+
+ return dm_btree_find_highest_key(&pmd->bl_info, thin_root, result);
+}
+
+int dm_thin_get_highest_mapped_block(struct dm_thin_device *td,
+ dm_block_t *result)
+{
+ int r;
+ struct dm_pool_metadata *pmd = td->pmd;
+
+ down_read(&pmd->root_lock);
+ r = __highest_block(td, result);
+ up_read(&pmd->root_lock);
+
+ return r;
+}
+
+static int __resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_count)
+{
+ int r;
+ dm_block_t old_count;
+
+ r = dm_sm_get_nr_blocks(pmd->data_sm, &old_count);
+ if (r)
+ return r;
+
+ if (new_count == old_count)
+ return 0;
+
+ if (new_count < old_count) {
+ DMERR("cannot reduce size of data device");
+ return -EINVAL;
+ }
+
+ r = dm_sm_extend(pmd->data_sm, new_count - old_count);
+ if (!r)
+ pmd->need_commit = 1;
+
+ return r;
+}
+
+int dm_pool_resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_count)
+{
+ int r;
+
+ down_write(&pmd->root_lock);
+ r = __resize_data_dev(pmd, new_count);
+ up_write(&pmd->root_lock);
+
+ return r;
+}
diff --git a/drivers/md/dm-thin-metadata.h b/drivers/md/dm-thin-metadata.h
new file mode 100644
index 00000000000..859c1689687
--- /dev/null
+++ b/drivers/md/dm-thin-metadata.h
@@ -0,0 +1,156 @@
+/*
+ * Copyright (C) 2010-2011 Red Hat, Inc.
+ *
+ * This file is released under the GPL.
+ */
+
+#ifndef DM_THIN_METADATA_H
+#define DM_THIN_METADATA_H
+
+#include "persistent-data/dm-block-manager.h"
+
+#define THIN_METADATA_BLOCK_SIZE 4096
+
+/*----------------------------------------------------------------*/
+
+struct dm_pool_metadata;
+struct dm_thin_device;
+
+/*
+ * Device identifier
+ */
+typedef uint64_t dm_thin_id;
+
+/*
+ * Reopens or creates a new, empty metadata volume.
+ */
+struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev,
+ sector_t data_block_size);
+
+int dm_pool_metadata_close(struct dm_pool_metadata *pmd);
+
+/*
+ * Compat feature flags. Any incompat flags beyond the ones
+ * specified below will prevent use of the thin metadata.
+ */
+#define THIN_FEATURE_COMPAT_SUPP 0UL
+#define THIN_FEATURE_COMPAT_RO_SUPP 0UL
+#define THIN_FEATURE_INCOMPAT_SUPP 0UL
+
+/*
+ * Device creation/deletion.
+ */
+int dm_pool_create_thin(struct dm_pool_metadata *pmd, dm_thin_id dev);
+
+/*
+ * An internal snapshot.
+ *
+ * You can only snapshot a quiesced origin i.e. one that is either
+ * suspended or not instanced at all.
+ */
+int dm_pool_create_snap(struct dm_pool_metadata *pmd, dm_thin_id dev,
+ dm_thin_id origin);
+
+/*
+ * Deletes a virtual device from the metadata. It _is_ safe to call this
+ * when that device is open. Operations on that device will just start
+ * failing. You still need to call close() on the device.
+ */
+int dm_pool_delete_thin_device(struct dm_pool_metadata *pmd,
+ dm_thin_id dev);
+
+/*
+ * Commits _all_ metadata changes: device creation, deletion, mapping
+ * updates.
+ */
+int dm_pool_commit_metadata(struct dm_pool_metadata *pmd);
+
+/*
+ * Set/get userspace transaction id.
+ */
+int dm_pool_set_metadata_transaction_id(struct dm_pool_metadata *pmd,
+ uint64_t current_id,
+ uint64_t new_id);
+
+int dm_pool_get_metadata_transaction_id(struct dm_pool_metadata *pmd,
+ uint64_t *result);
+
+/*
+ * Hold/get root for userspace transaction.
+ */
+int dm_pool_hold_metadata_root(struct dm_pool_metadata *pmd);
+
+int dm_pool_get_held_metadata_root(struct dm_pool_metadata *pmd,
+ dm_block_t *result);
+
+/*
+ * Actions on a single virtual device.
+ */
+
+/*
+ * Opening the same device more than once will fail with -EBUSY.
+ */
+int dm_pool_open_thin_device(struct dm_pool_metadata *pmd, dm_thin_id dev,
+ struct dm_thin_device **td);
+
+int dm_pool_close_thin_device(struct dm_thin_device *td);
+
+dm_thin_id dm_thin_dev_id(struct dm_thin_device *td);
+
+struct dm_thin_lookup_result {
+ dm_block_t block;
+ int shared;
+};
+
+/*
+ * Returns:
+ * -EWOULDBLOCK iff @can_block is set and would block.
+ * -ENODATA iff that mapping is not present.
+ * 0 success
+ */
+int dm_thin_find_block(struct dm_thin_device *td, dm_block_t block,
+ int can_block, struct dm_thin_lookup_result *result);
+
+/*
+ * Obtain an unused block.
+ */
+int dm_pool_alloc_data_block(struct dm_pool_metadata *pmd, dm_block_t *result);
+
+/*
+ * Insert or remove block.
+ */
+int dm_thin_insert_block(struct dm_thin_device *td, dm_block_t block,
+ dm_block_t data_block);
+
+int dm_thin_remove_block(struct dm_thin_device *td, dm_block_t block);
+
+/*
+ * Queries.
+ */
+int dm_thin_get_highest_mapped_block(struct dm_thin_device *td,
+ dm_block_t *highest_mapped);
+
+int dm_thin_get_mapped_count(struct dm_thin_device *td, dm_block_t *result);
+
+int dm_pool_get_free_block_count(struct dm_pool_metadata *pmd,
+ dm_block_t *result);
+
+int dm_pool_get_free_metadata_block_count(struct dm_pool_metadata *pmd,
+ dm_block_t *result);
+
+int dm_pool_get_metadata_dev_size(struct dm_pool_metadata *pmd,
+ dm_block_t *result);
+
+int dm_pool_get_data_block_size(struct dm_pool_metadata *pmd, sector_t *result);
+
+int dm_pool_get_data_dev_size(struct dm_pool_metadata *pmd, dm_block_t *result);
+
+/*
+ * Returns -ENOSPC if the new size is too small and already allocated
+ * blocks would be lost.
+ */
+int dm_pool_resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_size);
+
+/*----------------------------------------------------------------*/
+
+#endif
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
new file mode 100644
index 00000000000..c3087575fef
--- /dev/null
+++ b/drivers/md/dm-thin.c
@@ -0,0 +1,2428 @@
+/*
+ * Copyright (C) 2011 Red Hat UK.
+ *
+ * This file is released under the GPL.
+ */
+
+#include "dm-thin-metadata.h"
+
+#include <linux/device-mapper.h>
+#include <linux/dm-io.h>
+#include <linux/dm-kcopyd.h>
+#include <linux/list.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#define DM_MSG_PREFIX "thin"
+
+/*
+ * Tunable constants
+ */
+#define ENDIO_HOOK_POOL_SIZE 10240
+#define DEFERRED_SET_SIZE 64
+#define MAPPING_POOL_SIZE 1024
+#define PRISON_CELLS 1024
+
+/*
+ * The block size of the device holding pool data must be
+ * between 64KB and 1GB.
+ */
+#define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (64 * 1024 >> SECTOR_SHIFT)
+#define DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT)
+
+/*
+ * The metadata device is currently limited in size. The limitation is
+ * checked lower down in dm-space-map-metadata, but we also check it here
+ * so we can fail early.
+ *
+ * We have one block of index, which can hold 255 index entries. Each
+ * index entry contains allocation info about 16k metadata blocks.
+ */
+#define METADATA_DEV_MAX_SECTORS (255 * (1 << 14) * (THIN_METADATA_BLOCK_SIZE / (1 << SECTOR_SHIFT)))
+
+/*
+ * Device id is restricted to 24 bits.
+ */
+#define MAX_DEV_ID ((1 << 24) - 1)
+
+/*
+ * How do we handle breaking sharing of data blocks?
+ * =================================================
+ *
+ * We use a standard copy-on-write btree to store the mappings for the
+ * devices (note I'm talking about copy-on-write of the metadata here, not
+ * the data). When you take an internal snapshot you clone the root node
+ * of the origin btree. After this there is no concept of an origin or a
+ * snapshot. They are just two device trees that happen to point to the
+ * same data blocks.
+ *
+ * When we get a write in we decide if it's to a shared data block using
+ * some timestamp magic. If it is, we have to break sharing.
+ *
+ * Let's say we write to a shared block in what was the origin. The
+ * steps are:
+ *
+ * i) plug io further to this physical block. (see bio_prison code).
+ *
+ * ii) quiesce any read io to that shared data block. Obviously
+ * including all devices that share this block. (see deferred_set code)
+ *
+ * iii) copy the data block to a newly allocate block. This step can be
+ * missed out if the io covers the block. (schedule_copy).
+ *
+ * iv) insert the new mapping into the origin's btree
+ * (process_prepared_mappings). This act of inserting breaks some
+ * sharing of btree nodes between the two devices. Breaking sharing only
+ * effects the btree of that specific device. Btrees for the other
+ * devices that share the block never change. The btree for the origin
+ * device as it was after the last commit is untouched, ie. we're using
+ * persistent data structures in the functional programming sense.
+ *
+ * v) unplug io to this physical block, including the io that triggered
+ * the breaking of sharing.
+ *
+ * Steps (ii) and (iii) occur in parallel.
+ *
+ * The metadata _doesn't_ need to be committed before the io continues. We
+ * get away with this because the io is always written to a _new_ block.
+ * If there's a crash, then:
+ *
+ * - The origin mapping will point to the old origin block (the shared
+ * one). This will contain the data as it was before the io that triggered
+ * the breaking of sharing came in.
+ *
+ * - The snap mapping still points to the old block. As it would after
+ * the commit.
+ *
+ * The downside of this scheme is the timestamp magic isn't perfect, and
+ * will continue to think that data block in the snapshot device is shared
+ * even after the write to the origin has broken sharing. I suspect data
+ * blocks will typically be shared by many different devices, so we're
+ * breaking sharing n + 1 times, rather than n, where n is the number of
+ * devices that reference this data block. At the moment I think the
+ * benefits far, far outweigh the disadvantages.
+ */
+
+/*----------------------------------------------------------------*/
+
+/*
+ * Sometimes we can't deal with a bio straight away. We put them in prison
+ * where they can't cause any mischief. Bios are put in a cell identified
+ * by a key, multiple bios can be in the same cell. When the cell is
+ * subsequently unlocked the bios become available.
+ */
+struct bio_prison;
+
+struct cell_key {
+ int virtual;
+ dm_thin_id dev;
+ dm_block_t block;
+};
+
+struct cell {
+ struct hlist_node list;
+ struct bio_prison *prison;
+ struct cell_key key;
+ unsigned count;
+ struct bio_list bios;
+};
+
+struct bio_prison {
+ spinlock_t lock;
+ mempool_t *cell_pool;
+
+ unsigned nr_buckets;
+ unsigned hash_mask;
+ struct hlist_head *cells;
+};
+
+static uint32_t calc_nr_buckets(unsigned nr_cells)
+{
+ uint32_t n = 128;
+
+ nr_cells /= 4;
+ nr_cells = min(nr_cells, 8192u);
+
+ while (n < nr_cells)
+ n <<= 1;
+
+ return n;
+}
+
+/*
+ * @nr_cells should be the number of cells you want in use _concurrently_.
+ * Don't confuse it with the number of distinct keys.
+ */
+static struct bio_prison *prison_create(unsigned nr_cells)
+{
+ unsigned i;
+ uint32_t nr_buckets = calc_nr_buckets(nr_cells);
+ size_t len = sizeof(struct bio_prison) +
+ (sizeof(struct hlist_head) * nr_buckets);
+ struct bio_prison *prison = kmalloc(len, GFP_KERNEL);
+
+ if (!prison)
+ return NULL;
+
+ spin_lock_init(&prison->lock);
+ prison->cell_pool = mempool_create_kmalloc_pool(nr_cells,
+ sizeof(struct cell));
+ if (!prison->cell_pool) {
+ kfree(prison);
+ return NULL;
+ }
+
+ prison->nr_buckets = nr_buckets;
+ prison->hash_mask = nr_buckets - 1;
+ prison->cells = (struct hlist_head *) (prison + 1);
+ for (i = 0; i < nr_buckets; i++)
+ INIT_HLIST_HEAD(prison->cells + i);
+
+ return prison;
+}
+
+static void prison_destroy(struct bio_prison *prison)
+{
+ mempool_destroy(prison->cell_pool);
+ kfree(prison);
+}
+
+static uint32_t hash_key(struct bio_prison *prison, struct cell_key *key)
+{
+ const unsigned long BIG_PRIME = 4294967291UL;
+ uint64_t hash = key->block * BIG_PRIME;
+
+ return (uint32_t) (hash & prison->hash_mask);
+}
+
+static int keys_equal(struct cell_key *lhs, struct cell_key *rhs)
+{
+ return (lhs->virtual == rhs->virtual) &&
+ (lhs->dev == rhs->dev) &&
+ (lhs->block == rhs->block);
+}
+
+static struct cell *__search_bucket(struct hlist_head *bucket,
+ struct cell_key *key)
+{
+ struct cell *cell;
+ struct hlist_node *tmp;
+
+ hlist_for_each_entry(cell, tmp, bucket, list)
+ if (keys_equal(&cell->key, key))
+ return cell;
+
+ return NULL;
+}
+
+/*
+ * This may block if a new cell needs allocating. You must ensure that
+ * cells will be unlocked even if the calling thread is blocked.
+ *
+ * Returns the number of entries in the cell prior to the new addition
+ * or < 0 on failure.
+ */
+static int bio_detain(struct bio_prison *prison, struct cell_key *key,
+ struct bio *inmate, struct cell **ref)
+{
+ int r;
+ unsigned long flags;
+ uint32_t hash = hash_key(prison, key);
+ struct cell *uninitialized_var(cell), *cell2 = NULL;
+
+ BUG_ON(hash > prison->nr_buckets);
+
+ spin_lock_irqsave(&prison->lock, flags);
+ cell = __search_bucket(prison->cells + hash, key);
+
+ if (!cell) {
+ /*
+ * Allocate a new cell
+ */
+ spin_unlock_irqrestore(&prison->lock, flags);
+ cell2 = mempool_alloc(prison->cell_pool, GFP_NOIO);
+ spin_lock_irqsave(&prison->lock, flags);
+
+ /*
+ * We've been unlocked, so we have to double check that
+ * nobody else has inserted this cell in the meantime.
+ */
+ cell = __search_bucket(prison->cells + hash, key);
+
+ if (!cell) {
+ cell = cell2;
+ cell2 = NULL;
+
+ cell->prison = prison;
+ memcpy(&cell->key, key, sizeof(cell->key));
+ cell->count = 0;
+ bio_list_init(&cell->bios);
+ hlist_add_head(&cell->list, prison->cells + hash);
+ }
+ }
+
+ r = cell->count++;
+ bio_list_add(&cell->bios, inmate);
+ spin_unlock_irqrestore(&prison->lock, flags);
+
+ if (cell2)
+ mempool_free(cell2, prison->cell_pool);
+
+ *ref = cell;
+
+ return r;
+}
+
+/*
+ * @inmates must have been initialised prior to this call
+ */
+static void __cell_release(struct cell *cell, struct bio_list *inmates)
+{
+ struct bio_prison *prison = cell->prison;
+
+ hlist_del(&cell->list);
+
+ if (inmates)
+ bio_list_merge(inmates, &cell->bios);
+
+ mempool_free(cell, prison->cell_pool);
+}
+
+static void cell_release(struct cell *cell, struct bio_list *bios)
+{
+ unsigned long flags;
+ struct bio_prison *prison = cell->prison;
+
+ spin_lock_irqsave(&prison->lock, flags);
+ __cell_release(cell, bios);
+ spin_unlock_irqrestore(&prison->lock, flags);
+}
+
+/*
+ * There are a couple of places where we put a bio into a cell briefly
+ * before taking it out again. In these situations we know that no other
+ * bio may be in the cell. This function releases the cell, and also does
+ * a sanity check.
+ */
+static void cell_release_singleton(struct cell *cell, struct bio *bio)
+{
+ struct bio_prison *prison = cell->prison;
+ struct bio_list bios;
+ struct bio *b;
+ unsigned long flags;
+
+ bio_list_init(&bios);
+
+ spin_lock_irqsave(&prison->lock, flags);
+ __cell_release(cell, &bios);
+ spin_unlock_irqrestore(&prison->lock, flags);
+
+ b = bio_list_pop(&bios);
+ BUG_ON(b != bio);
+ BUG_ON(!bio_list_empty(&bios));
+}
+
+static void cell_error(struct cell *cell)
+{
+ struct bio_prison *prison = cell->prison;
+ struct bio_list bios;
+ struct bio *bio;
+ unsigned long flags;
+
+ bio_list_init(&bios);
+
+ spin_lock_irqsave(&prison->lock, flags);
+ __cell_release(cell, &bios);
+ spin_unlock_irqrestore(&prison->lock, flags);
+
+ while ((bio = bio_list_pop(&bios)))
+ bio_io_error(bio);
+}
+
+/*----------------------------------------------------------------*/
+
+/*
+ * We use the deferred set to keep track of pending reads to shared blocks.
+ * We do this to ensure the new mapping caused by a write isn't performed
+ * until these prior reads have completed. Otherwise the insertion of the
+ * new mapping could free the old block that the read bios are mapped to.
+ */
+
+struct deferred_set;
+struct deferred_entry {
+ struct deferred_set *ds;
+ unsigned count;
+ struct list_head work_items;
+};
+
+struct deferred_set {
+ spinlock_t lock;
+ unsigned current_entry;
+ unsigned sweeper;
+ struct deferred_entry entries[DEFERRED_SET_SIZE];
+};
+
+static void ds_init(struct deferred_set *ds)
+{
+ int i;
+
+ spin_lock_init(&ds->lock);
+ ds->current_entry = 0;
+ ds->sweeper = 0;
+ for (i = 0; i < DEFERRED_SET_SIZE; i++) {
+ ds->entries[i].ds = ds;
+ ds->entries[i].count = 0;
+ INIT_LIST_HEAD(&ds->entries[i].work_items);
+ }
+}
+
+static struct deferred_entry *ds_inc(struct deferred_set *ds)
+{
+ unsigned long flags;
+ struct deferred_entry *entry;
+
+ spin_lock_irqsave(&ds->lock, flags);
+ entry = ds->entries + ds->current_entry;
+ entry->count++;
+ spin_unlock_irqrestore(&ds->lock, flags);
+
+ return entry;
+}
+
+static unsigned ds_next(unsigned index)
+{
+ return (index + 1) % DEFERRED_SET_SIZE;
+}
+
+static void __sweep(struct deferred_set *ds, struct list_head *head)
+{
+ while ((ds->sweeper != ds->current_entry) &&
+ !ds->entries[ds->sweeper].count) {
+ list_splice_init(&ds->entries[ds->sweeper].work_items, head);
+ ds->sweeper = ds_next(ds->sweeper);
+ }
+
+ if ((ds->sweeper == ds->current_entry) && !ds->entries[ds->sweeper].count)
+ list_splice_init(&ds->entries[ds->sweeper].work_items, head);
+}
+
+static void ds_dec(struct deferred_entry *entry, struct list_head *head)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&entry->ds->lock, flags);
+ BUG_ON(!entry->count);
+ --entry->count;
+ __sweep(entry->ds, head);
+ spin_unlock_irqrestore(&entry->ds->lock, flags);
+}
+
+/*
+ * Returns 1 if deferred or 0 if no pending items to delay job.
+ */
+static int ds_add_work(struct deferred_set *ds, struct list_head *work)
+{
+ int r = 1;
+ unsigned long flags;
+ unsigned next_entry;
+
+ spin_lock_irqsave(&ds->lock, flags);
+ if ((ds->sweeper == ds->current_entry) &&
+ !ds->entries[ds->current_entry].count)
+ r = 0;
+ else {
+ list_add(work, &ds->entries[ds->current_entry].work_items);
+ next_entry = ds_next(ds->current_entry);
+ if (!ds->entries[next_entry].count)
+ ds->current_entry = next_entry;
+ }
+ spin_unlock_irqrestore(&ds->lock, flags);
+
+ return r;
+}
+
+/*----------------------------------------------------------------*/
+
+/*
+ * Key building.
+ */
+static void build_data_key(struct dm_thin_device *td,
+ dm_block_t b, struct cell_key *key)
+{
+ key->virtual = 0;
+ key->dev = dm_thin_dev_id(td);
+ key->block = b;
+}
+
+static void build_virtual_key(struct dm_thin_device *td, dm_block_t b,
+ struct cell_key *key)
+{
+ key->virtual = 1;
+ key->dev = dm_thin_dev_id(td);
+ key->block = b;
+}
+
+/*----------------------------------------------------------------*/
+
+/*
+ * A pool device ties together a metadata device and a data device. It
+ * also provides the interface for creating and destroying internal
+ * devices.
+ */
+struct new_mapping;
+struct pool {
+ struct list_head list;
+ struct dm_target *ti; /* Only set if a pool target is bound */
+
+ struct mapped_device *pool_md;
+ struct block_device *md_dev;
+ struct dm_pool_metadata *pmd;
+
+ uint32_t sectors_per_block;
+ unsigned block_shift;
+ dm_block_t offset_mask;
+ dm_block_t low_water_blocks;
+
+ unsigned zero_new_blocks:1;
+ unsigned low_water_triggered:1; /* A dm event has been sent */
+ unsigned no_free_space:1; /* A -ENOSPC warning has been issued */
+
+ struct bio_prison *prison;
+ struct dm_kcopyd_client *copier;
+
+ struct workqueue_struct *wq;
+ struct work_struct worker;
+
+ unsigned ref_count;
+
+ spinlock_t lock;
+ struct bio_list deferred_bios;
+ struct bio_list deferred_flush_bios;
+ struct list_head prepared_mappings;
+
+ struct bio_list retry_on_resume_list;
+
+ struct deferred_set ds; /* FIXME: move to thin_c */
+
+ struct new_mapping *next_mapping;
+ mempool_t *mapping_pool;
+ mempool_t *endio_hook_pool;
+};
+
+/*
+ * Target context for a pool.
+ */
+struct pool_c {
+ struct dm_target *ti;
+ struct pool *pool;
+ struct dm_dev *data_dev;
+ struct dm_dev *metadata_dev;
+ struct dm_target_callbacks callbacks;
+
+ dm_block_t low_water_blocks;
+ unsigned zero_new_blocks:1;
+};
+
+/*
+ * Target context for a thin.
+ */
+struct thin_c {
+ struct dm_dev *pool_dev;
+ dm_thin_id dev_id;
+
+ struct pool *pool;
+ struct dm_thin_device *td;
+};
+
+/*----------------------------------------------------------------*/
+
+/*
+ * A global list of pools that uses a struct mapped_device as a key.
+ */
+static struct dm_thin_pool_table {
+ struct mutex mutex;
+ struct list_head pools;
+} dm_thin_pool_table;
+
+static void pool_table_init(void)
+{
+ mutex_init(&dm_thin_pool_table.mutex);
+ INIT_LIST_HEAD(&dm_thin_pool_table.pools);
+}
+
+static void __pool_table_insert(struct pool *pool)
+{
+ BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
+ list_add(&pool->list, &dm_thin_pool_table.pools);
+}
+
+static void __pool_table_remove(struct pool *pool)
+{
+ BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
+ list_del(&pool->list);
+}
+
+static struct pool *__pool_table_lookup(struct mapped_device *md)
+{
+ struct pool *pool = NULL, *tmp;
+
+ BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
+
+ list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
+ if (tmp->pool_md == md) {
+ pool = tmp;
+ break;
+ }
+ }
+
+ return pool;
+}
+
+static struct pool *__pool_table_lookup_metadata_dev(struct block_device *md_dev)
+{
+ struct pool *pool = NULL, *tmp;
+
+ BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
+
+ list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
+ if (tmp->md_dev == md_dev) {
+ pool = tmp;
+ break;
+ }
+ }
+
+ return pool;
+}
+
+/*----------------------------------------------------------------*/
+
+static void __requeue_bio_list(struct thin_c *tc, struct bio_list *master)
+{
+ struct bio *bio;
+ struct bio_list bios;
+
+ bio_list_init(&bios);
+ bio_list_merge(&bios, master);
+ bio_list_init(master);
+
+ while ((bio = bio_list_pop(&bios))) {
+ if (dm_get_mapinfo(bio)->ptr == tc)
+ bio_endio(bio, DM_ENDIO_REQUEUE);
+ else
+ bio_list_add(master, bio);
+ }
+}
+
+static void requeue_io(struct thin_c *tc)
+{
+ struct pool *pool = tc->pool;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pool->lock, flags);
+ __requeue_bio_list(tc, &pool->deferred_bios);
+ __requeue_bio_list(tc, &pool->retry_on_resume_list);
+ spin_unlock_irqrestore(&pool->lock, flags);
+}
+
+/*
+ * This section of code contains the logic for processing a thin device's IO.
+ * Much of the code depends on pool object resources (lists, workqueues, etc)
+ * but most is exclusively called from the thin target rather than the thin-pool
+ * target.
+ */
+
+static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)
+{
+ return bio->bi_sector >> tc->pool->block_shift;
+}
+
+static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
+{
+ struct pool *pool = tc->pool;
+
+ bio->bi_bdev = tc->pool_dev->bdev;
+ bio->bi_sector = (block << pool->block_shift) +
+ (bio->bi_sector & pool->offset_mask);
+}
+
+static void remap_and_issue(struct thin_c *tc, struct bio *bio,
+ dm_block_t block)
+{
+ struct pool *pool = tc->pool;
+ unsigned long flags;
+
+ remap(tc, bio, block);
+
+ /*
+ * Batch together any FUA/FLUSH bios we find and then issue
+ * a single commit for them in process_deferred_bios().
+ */
+ if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) {
+ spin_lock_irqsave(&pool->lock, flags);
+ bio_list_add(&pool->deferred_flush_bios, bio);
+ spin_unlock_irqrestore(&pool->lock, flags);
+ } else
+ generic_make_request(bio);
+}
+
+/*
+ * wake_worker() is used when new work is queued and when pool_resume is
+ * ready to continue deferred IO processing.
+ */
+static void wake_worker(struct pool *pool)
+{
+ queue_work(pool->wq, &pool->worker);
+}
+
+/*----------------------------------------------------------------*/
+
+/*
+ * Bio endio functions.
+ */
+struct endio_hook {
+ struct thin_c *tc;
+ bio_end_io_t *saved_bi_end_io;
+ struct deferred_entry *entry;
+};
+
+struct new_mapping {
+ struct list_head list;
+
+ int prepared;
+
+ struct thin_c *tc;
+ dm_block_t virt_block;
+ dm_block_t data_block;
+ struct cell *cell;
+ int err;
+
+ /*
+ * If the bio covers the whole area of a block then we can avoid
+ * zeroing or copying. Instead this bio is hooked. The bio will
+ * still be in the cell, so care has to be taken to avoid issuing
+ * the bio twice.
+ */
+ struct bio *bio;
+ bio_end_io_t *saved_bi_end_io;
+};
+
+static void __maybe_add_mapping(struct new_mapping *m)
+{
+ struct pool *pool = m->tc->pool;
+
+ if (list_empty(&m->list) && m->prepared) {
+ list_add(&m->list, &pool->prepared_mappings);
+ wake_worker(pool);
+ }
+}
+
+static void copy_complete(int read_err, unsigned long write_err, void *context)
+{
+ unsigned long flags;
+ struct new_mapping *m = context;
+ struct pool *pool = m->tc->pool;
+
+ m->err = read_err || write_err ? -EIO : 0;
+
+ spin_lock_irqsave(&pool->lock, flags);
+ m->prepared = 1;
+ __maybe_add_mapping(m);
+ spin_unlock_irqrestore(&pool->lock, flags);
+}
+
+static void overwrite_endio(struct bio *bio, int err)
+{
+ unsigned long flags;
+ struct new_mapping *m = dm_get_mapinfo(bio)->ptr;
+ struct pool *pool = m->tc->pool;
+
+ m->err = err;
+
+ spin_lock_irqsave(&pool->lock, flags);
+ m->prepared = 1;
+ __maybe_add_mapping(m);
+ spin_unlock_irqrestore(&pool->lock, flags);
+}
+
+static void shared_read_endio(struct bio *bio, int err)
+{
+ struct list_head mappings;
+ struct new_mapping *m, *tmp;
+ struct endio_hook *h = dm_get_mapinfo(bio)->ptr;
+ unsigned long flags;
+ struct pool *pool = h->tc->pool;
+
+ bio->bi_end_io = h->saved_bi_end_io;
+ bio_endio(bio, err);
+
+ INIT_LIST_HEAD(&mappings);
+ ds_dec(h->entry, &mappings);
+
+ spin_lock_irqsave(&pool->lock, flags);
+ list_for_each_entry_safe(m, tmp, &mappings, list) {
+ list_del(&m->list);
+ INIT_LIST_HEAD(&m->list);
+ __maybe_add_mapping(m);
+ }
+ spin_unlock_irqrestore(&pool->lock, flags);
+
+ mempool_free(h, pool->endio_hook_pool);
+}
+
+/*----------------------------------------------------------------*/
+
+/*
+ * Workqueue.
+ */
+
+/*
+ * Prepared mapping jobs.
+ */
+
+/*
+ * This sends the bios in the cell back to the deferred_bios list.
+ */
+static void cell_defer(struct thin_c *tc, struct cell *cell,
+ dm_block_t data_block)
+{
+ struct pool *pool = tc->pool;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pool->lock, flags);
+ cell_release(cell, &pool->deferred_bios);
+ spin_unlock_irqrestore(&tc->pool->lock, flags);
+
+ wake_worker(pool);
+}
+
+/*
+ * Same as cell_defer above, except it omits one particular detainee,
+ * a write bio that covers the block and has already been processed.
+ */
+static void cell_defer_except(struct thin_c *tc, struct cell *cell,
+ struct bio *exception)
+{
+ struct bio_list bios;
+ struct bio *bio;
+ struct pool *pool = tc->pool;
+ unsigned long flags;
+
+ bio_list_init(&bios);
+ cell_release(cell, &bios);
+
+ spin_lock_irqsave(&pool->lock, flags);
+ while ((bio = bio_list_pop(&bios)))
+ if (bio != exception)
+ bio_list_add(&pool->deferred_bios, bio);
+ spin_unlock_irqrestore(&pool->lock, flags);
+
+ wake_worker(pool);
+}
+
+static void process_prepared_mapping(struct new_mapping *m)
+{
+ struct thin_c *tc = m->tc;
+ struct bio *bio;
+ int r;
+
+ bio = m->bio;
+ if (bio)
+ bio->bi_end_io = m->saved_bi_end_io;
+
+ if (m->err) {
+ cell_error(m->cell);
+ return;
+ }
+
+ /*
+ * Commit the prepared block into the mapping btree.
+ * Any I/O for this block arriving after this point will get
+ * remapped to it directly.
+ */
+ r = dm_thin_insert_block(tc->td, m->virt_block, m->data_block);
+ if (r) {
+ DMERR("dm_thin_insert_block() failed");
+ cell_error(m->cell);
+ return;
+ }
+
+ /*
+ * Release any bios held while the block was being provisioned.
+ * If we are processing a write bio that completely covers the block,
+ * we already processed it so can ignore it now when processing
+ * the bios in the cell.
+ */
+ if (bio) {
+ cell_defer_except(tc, m->cell, bio);
+ bio_endio(bio, 0);
+ } else
+ cell_defer(tc, m->cell, m->data_block);
+
+ list_del(&m->list);
+ mempool_free(m, tc->pool->mapping_pool);
+}
+
+static void process_prepared_mappings(struct pool *pool)
+{
+ unsigned long flags;
+ struct list_head maps;
+ struct new_mapping *m, *tmp;
+
+ INIT_LIST_HEAD(&maps);
+ spin_lock_irqsave(&pool->lock, flags);
+ list_splice_init(&pool->prepared_mappings, &maps);
+ spin_unlock_irqrestore(&pool->lock, flags);
+
+ list_for_each_entry_safe(m, tmp, &maps, list)
+ process_prepared_mapping(m);
+}
+
+/*
+ * Deferred bio jobs.
+ */
+static int io_overwrites_block(struct pool *pool, struct bio *bio)
+{
+ return ((bio_data_dir(bio) == WRITE) &&
+ !(bio->bi_sector & pool->offset_mask)) &&
+ (bio->bi_size == (pool->sectors_per_block << SECTOR_SHIFT));
+}
+
+static void save_and_set_endio(struct bio *bio, bio_end_io_t **save,
+ bio_end_io_t *fn)
+{
+ *save = bio->bi_end_io;
+ bio->bi_end_io = fn;
+}
+
+static int ensure_next_mapping(struct pool *pool)
+{
+ if (pool->next_mapping)
+ return 0;
+
+ pool->next_mapping = mempool_alloc(pool->mapping_pool, GFP_ATOMIC);
+
+ return pool->next_mapping ? 0 : -ENOMEM;
+}
+
+static struct new_mapping *get_next_mapping(struct pool *pool)
+{
+ struct new_mapping *r = pool->next_mapping;
+
+ BUG_ON(!pool->next_mapping);
+
+ pool->next_mapping = NULL;
+
+ return r;
+}
+
+static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
+ dm_block_t data_origin, dm_block_t data_dest,
+ struct cell *cell, struct bio *bio)
+{
+ int r;
+ struct pool *pool = tc->pool;
+ struct new_mapping *m = get_next_mapping(pool);
+
+ INIT_LIST_HEAD(&m->list);
+ m->prepared = 0;
+ m->tc = tc;
+ m->virt_block = virt_block;
+ m->data_block = data_dest;
+ m->cell = cell;
+ m->err = 0;
+ m->bio = NULL;
+
+ ds_add_work(&pool->ds, &m->list);
+
+ /*
+ * IO to pool_dev remaps to the pool target's data_dev.
+ *
+ * If the whole block of data is being overwritten, we can issue the
+ * bio immediately. Otherwise we use kcopyd to clone the data first.
+ */
+ if (io_overwrites_block(pool, bio)) {
+ m->bio = bio;
+ save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
+ dm_get_mapinfo(bio)->ptr = m;
+ remap_and_issue(tc, bio, data_dest);
+ } else {
+ struct dm_io_region from, to;
+
+ from.bdev = tc->pool_dev->bdev;
+ from.sector = data_origin * pool->sectors_per_block;
+ from.count = pool->sectors_per_block;
+
+ to.bdev = tc->pool_dev->bdev;
+ to.sector = data_dest * pool->sectors_per_block;
+ to.count = pool->sectors_per_block;
+
+ r = dm_kcopyd_copy(pool->copier, &from, 1, &to,
+ 0, copy_complete, m);
+ if (r < 0) {
+ mempool_free(m, pool->mapping_pool);
+ DMERR("dm_kcopyd_copy() failed");
+ cell_error(cell);
+ }
+ }
+}
+
+static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
+ dm_block_t data_block, struct cell *cell,
+ struct bio *bio)
+{
+ struct pool *pool = tc->pool;
+ struct new_mapping *m = get_next_mapping(pool);
+
+ INIT_LIST_HEAD(&m->list);
+ m->prepared = 0;
+ m->tc = tc;
+ m->virt_block = virt_block;
+ m->data_block = data_block;
+ m->cell = cell;
+ m->err = 0;
+ m->bio = NULL;
+
+ /*
+ * If the whole block of data is being overwritten or we are not
+ * zeroing pre-existing data, we can issue the bio immediately.
+ * Otherwise we use kcopyd to zero the data first.
+ */
+ if (!pool->zero_new_blocks)
+ process_prepared_mapping(m);
+
+ else if (io_overwrites_block(pool, bio)) {
+ m->bio = bio;
+ save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
+ dm_get_mapinfo(bio)->ptr = m;
+ remap_and_issue(tc, bio, data_block);
+
+ } else {
+ int r;
+ struct dm_io_region to;
+
+ to.bdev = tc->pool_dev->bdev;
+ to.sector = data_block * pool->sectors_per_block;
+ to.count = pool->sectors_per_block;
+
+ r = dm_kcopyd_zero(pool->copier, 1, &to, 0, copy_complete, m);
+ if (r < 0) {
+ mempool_free(m, pool->mapping_pool);
+ DMERR("dm_kcopyd_zero() failed");
+ cell_error(cell);
+ }
+ }
+}
+
+static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
+{
+ int r;
+ dm_block_t free_blocks;
+ unsigned long flags;
+ struct pool *pool = tc->pool;
+
+ r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
+ if (r)
+ return r;
+
+ if (free_blocks <= pool->low_water_blocks && !pool->low_water_triggered) {
+ DMWARN("%s: reached low water mark, sending event.",
+ dm_device_name(pool->pool_md));
+ spin_lock_irqsave(&pool->lock, flags);
+ pool->low_water_triggered = 1;
+ spin_unlock_irqrestore(&pool->lock, flags);
+ dm_table_event(pool->ti->table);
+ }
+
+ if (!free_blocks) {
+ if (pool->no_free_space)
+ return -ENOSPC;
+ else {
+ /*
+ * Try to commit to see if that will free up some
+ * more space.
+ */
+ r = dm_pool_commit_metadata(pool->pmd);
+ if (r) {
+ DMERR("%s: dm_pool_commit_metadata() failed, error = %d",
+ __func__, r);
+ return r;
+ }
+
+ r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
+ if (r)
+ return r;
+
+ /*
+ * If we still have no space we set a flag to avoid
+ * doing all this checking and return -ENOSPC.
+ */
+ if (!free_blocks) {
+ DMWARN("%s: no free space available.",
+ dm_device_name(pool->pool_md));
+ spin_lock_irqsave(&pool->lock, flags);
+ pool->no_free_space = 1;
+ spin_unlock_irqrestore(&pool->lock, flags);
+ return -ENOSPC;
+ }
+ }
+ }
+
+ r = dm_pool_alloc_data_block(pool->pmd, result);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+/*
+ * If we have run out of space, queue bios until the device is
+ * resumed, presumably after having been reloaded with more space.
+ */
+static void retry_on_resume(struct bio *bio)
+{
+ struct thin_c *tc = dm_get_mapinfo(bio)->ptr;
+ struct pool *pool = tc->pool;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pool->lock, flags);
+ bio_list_add(&pool->retry_on_resume_list, bio);
+ spin_unlock_irqrestore(&pool->lock, flags);
+}
+
+static void no_space(struct cell *cell)
+{
+ struct bio *bio;
+ struct bio_list bios;
+
+ bio_list_init(&bios);
+ cell_release(cell, &bios);
+
+ while ((bio = bio_list_pop(&bios)))
+ retry_on_resume(bio);
+}
+
+static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
+ struct cell_key *key,
+ struct dm_thin_lookup_result *lookup_result,
+ struct cell *cell)
+{
+ int r;
+ dm_block_t data_block;
+
+ r = alloc_data_block(tc, &data_block);
+ switch (r) {
+ case 0:
+ schedule_copy(tc, block, lookup_result->block,
+ data_block, cell, bio);
+ break;
+
+ case -ENOSPC:
+ no_space(cell);
+ break;
+
+ default:
+ DMERR("%s: alloc_data_block() failed, error = %d", __func__, r);
+ cell_error(cell);
+ break;
+ }
+}
+
+static void process_shared_bio(struct thin_c *tc, struct bio *bio,
+ dm_block_t block,
+ struct dm_thin_lookup_result *lookup_result)
+{
+ struct cell *cell;
+ struct pool *pool = tc->pool;
+ struct cell_key key;
+
+ /*
+ * If cell is already occupied, then sharing is already in the process
+ * of being broken so we have nothing further to do here.
+ */
+ build_data_key(tc->td, lookup_result->block, &key);
+ if (bio_detain(pool->prison, &key, bio, &cell))
+ return;
+
+ if (bio_data_dir(bio) == WRITE)
+ break_sharing(tc, bio, block, &key, lookup_result, cell);
+ else {
+ struct endio_hook *h;
+ h = mempool_alloc(pool->endio_hook_pool, GFP_NOIO);
+
+ h->tc = tc;
+ h->entry = ds_inc(&pool->ds);
+ save_and_set_endio(bio, &h->saved_bi_end_io, shared_read_endio);
+ dm_get_mapinfo(bio)->ptr = h;
+
+ cell_release_singleton(cell, bio);
+ remap_and_issue(tc, bio, lookup_result->block);
+ }
+}
+
+static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block,
+ struct cell *cell)
+{
+ int r;
+ dm_block_t data_block;
+
+ /*
+ * Remap empty bios (flushes) immediately, without provisioning.
+ */
+ if (!bio->bi_size) {
+ cell_release_singleton(cell, bio);
+ remap_and_issue(tc, bio, 0);
+ return;
+ }
+
+ /*
+ * Fill read bios with zeroes and complete them immediately.
+ */
+ if (bio_data_dir(bio) == READ) {
+ zero_fill_bio(bio);
+ cell_release_singleton(cell, bio);
+ bio_endio(bio, 0);
+ return;
+ }
+
+ r = alloc_data_block(tc, &data_block);
+ switch (r) {
+ case 0:
+ schedule_zero(tc, block, data_block, cell, bio);
+ break;
+
+ case -ENOSPC:
+ no_space(cell);
+ break;
+
+ default:
+ DMERR("%s: alloc_data_block() failed, error = %d", __func__, r);
+ cell_error(cell);
+ break;
+ }
+}
+
+static void process_bio(struct thin_c *tc, struct bio *bio)
+{
+ int r;
+ dm_block_t block = get_bio_block(tc, bio);
+ struct cell *cell;
+ struct cell_key key;
+ struct dm_thin_lookup_result lookup_result;
+
+ /*
+ * If cell is already occupied, then the block is already
+ * being provisioned so we have nothing further to do here.
+ */
+ build_virtual_key(tc->td, block, &key);
+ if (bio_detain(tc->pool->prison, &key, bio, &cell))
+ return;
+
+ r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
+ switch (r) {
+ case 0:
+ /*
+ * We can release this cell now. This thread is the only
+ * one that puts bios into a cell, and we know there were
+ * no preceding bios.
+ */
+ /*
+ * TODO: this will probably have to change when discard goes
+ * back in.
+ */
+ cell_release_singleton(cell, bio);
+
+ if (lookup_result.shared)
+ process_shared_bio(tc, bio, block, &lookup_result);
+ else
+ remap_and_issue(tc, bio, lookup_result.block);
+ break;
+
+ case -ENODATA:
+ provision_block(tc, bio, block, cell);
+ break;
+
+ default:
+ DMERR("dm_thin_find_block() failed, error = %d", r);
+ bio_io_error(bio);
+ break;
+ }
+}
+
+static void process_deferred_bios(struct pool *pool)
+{
+ unsigned long flags;
+ struct bio *bio;
+ struct bio_list bios;
+ int r;
+
+ bio_list_init(&bios);
+
+ spin_lock_irqsave(&pool->lock, flags);
+ bio_list_merge(&bios, &pool->deferred_bios);
+ bio_list_init(&pool->deferred_bios);
+ spin_unlock_irqrestore(&pool->lock, flags);
+
+ while ((bio = bio_list_pop(&bios))) {
+ struct thin_c *tc = dm_get_mapinfo(bio)->ptr;
+ /*
+ * If we've got no free new_mapping structs, and processing
+ * this bio might require one, we pause until there are some
+ * prepared mappings to process.
+ */
+ if (ensure_next_mapping(pool)) {
+ spin_lock_irqsave(&pool->lock, flags);
+ bio_list_merge(&pool->deferred_bios, &bios);
+ spin_unlock_irqrestore(&pool->lock, flags);
+
+ break;
+ }
+ process_bio(tc, bio);
+ }
+
+ /*
+ * If there are any deferred flush bios, we must commit
+ * the metadata before issuing them.
+ */
+ bio_list_init(&bios);
+ spin_lock_irqsave(&pool->lock, flags);
+ bio_list_merge(&bios, &pool->deferred_flush_bios);
+ bio_list_init(&pool->deferred_flush_bios);
+ spin_unlock_irqrestore(&pool->lock, flags);
+
+ if (bio_list_empty(&bios))
+ return;
+
+ r = dm_pool_commit_metadata(pool->pmd);
+ if (r) {
+ DMERR("%s: dm_pool_commit_metadata() failed, error = %d",
+ __func__, r);
+ while ((bio = bio_list_pop(&bios)))
+ bio_io_error(bio);
+ return;
+ }
+
+ while ((bio = bio_list_pop(&bios)))
+ generic_make_request(bio);
+}
+
+static void do_worker(struct work_struct *ws)
+{
+ struct pool *pool = container_of(ws, struct pool, worker);
+
+ process_prepared_mappings(pool);
+ process_deferred_bios(pool);
+}
+
+/*----------------------------------------------------------------*/
+
+/*
+ * Mapping functions.
+ */
+
+/*
+ * Called only while mapping a thin bio to hand it over to the workqueue.
+ */
+static void thin_defer_bio(struct thin_c *tc, struct bio *bio)
+{
+ unsigned long flags;
+ struct pool *pool = tc->pool;
+
+ spin_lock_irqsave(&pool->lock, flags);
+ bio_list_add(&pool->deferred_bios, bio);
+ spin_unlock_irqrestore(&pool->lock, flags);
+
+ wake_worker(pool);
+}
+
+/*
+ * Non-blocking function called from the thin target's map function.
+ */
+static int thin_bio_map(struct dm_target *ti, struct bio *bio,
+ union map_info *map_context)
+{
+ int r;
+ struct thin_c *tc = ti->private;
+ dm_block_t block = get_bio_block(tc, bio);
+ struct dm_thin_device *td = tc->td;
+ struct dm_thin_lookup_result result;
+
+ /*
+ * Save the thin context for easy access from the deferred bio later.
+ */
+ map_context->ptr = tc;
+
+ if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) {
+ thin_defer_bio(tc, bio);
+ return DM_MAPIO_SUBMITTED;
+ }
+
+ r = dm_thin_find_block(td, block, 0, &result);
+
+ /*
+ * Note that we defer readahead too.
+ */
+ switch (r) {
+ case 0:
+ if (unlikely(result.shared)) {
+ /*
+ * We have a race condition here between the
+ * result.shared value returned by the lookup and
+ * snapshot creation, which may cause new
+ * sharing.
+ *
+ * To avoid this always quiesce the origin before
+ * taking the snap. You want to do this anyway to
+ * ensure a consistent application view
+ * (i.e. lockfs).
+ *
+ * More distant ancestors are irrelevant. The
+ * shared flag will be set in their case.
+ */
+ thin_defer_bio(tc, bio);
+ r = DM_MAPIO_SUBMITTED;
+ } else {
+ remap(tc, bio, result.block);
+ r = DM_MAPIO_REMAPPED;
+ }
+ break;
+
+ case -ENODATA:
+ /*
+ * In future, the failed dm_thin_find_block above could
+ * provide the hint to load the metadata into cache.
+ */
+ case -EWOULDBLOCK:
+ thin_defer_bio(tc, bio);
+ r = DM_MAPIO_SUBMITTED;
+ break;
+ }
+
+ return r;
+}
+
+static int pool_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
+{
+ int r;
+ unsigned long flags;
+ struct pool_c *pt = container_of(cb, struct pool_c, callbacks);
+
+ spin_lock_irqsave(&pt->pool->lock, flags);
+ r = !bio_list_empty(&pt->pool->retry_on_resume_list);
+ spin_unlock_irqrestore(&pt->pool->lock, flags);
+
+ if (!r) {
+ struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
+ r = bdi_congested(&q->backing_dev_info, bdi_bits);
+ }
+
+ return r;
+}
+
+static void __requeue_bios(struct pool *pool)
+{
+ bio_list_merge(&pool->deferred_bios, &pool->retry_on_resume_list);
+ bio_list_init(&pool->retry_on_resume_list);
+}
+
+/*----------------------------------------------------------------
+ * Binding of control targets to a pool object
+ *--------------------------------------------------------------*/
+static int bind_control_target(struct pool *pool, struct dm_target *ti)
+{
+ struct pool_c *pt = ti->private;
+
+ pool->ti = ti;
+ pool->low_water_blocks = pt->low_water_blocks;
+ pool->zero_new_blocks = pt->zero_new_blocks;
+
+ return 0;
+}
+
+static void unbind_control_target(struct pool *pool, struct dm_target *ti)
+{
+ if (pool->ti == ti)
+ pool->ti = NULL;
+}
+
+/*----------------------------------------------------------------
+ * Pool creation
+ *--------------------------------------------------------------*/
+static void __pool_destroy(struct pool *pool)
+{
+ __pool_table_remove(pool);
+
+ if (dm_pool_metadata_close(pool->pmd) < 0)
+ DMWARN("%s: dm_pool_metadata_close() failed.", __func__);
+
+ prison_destroy(pool->prison);
+ dm_kcopyd_client_destroy(pool->copier);
+
+ if (pool->wq)
+ destroy_workqueue(pool->wq);
+
+ if (pool->next_mapping)
+ mempool_free(pool->next_mapping, pool->mapping_pool);
+ mempool_destroy(pool->mapping_pool);
+ mempool_destroy(pool->endio_hook_pool);
+ kfree(pool);
+}
+
+static struct pool *pool_create(struct mapped_device *pool_md,
+ struct block_device *metadata_dev,
+ unsigned long block_size, char **error)
+{
+ int r;
+ void *err_p;
+ struct pool *pool;
+ struct dm_pool_metadata *pmd;
+
+ pmd = dm_pool_metadata_open(metadata_dev, block_size);
+ if (IS_ERR(pmd)) {
+ *error = "Error creating metadata object";
+ return (struct pool *)pmd;
+ }
+
+ pool = kmalloc(sizeof(*pool), GFP_KERNEL);
+ if (!pool) {
+ *error = "Error allocating memory for pool";
+ err_p = ERR_PTR(-ENOMEM);
+ goto bad_pool;
+ }
+
+ pool->pmd = pmd;
+ pool->sectors_per_block = block_size;
+ pool->block_shift = ffs(block_size) - 1;
+ pool->offset_mask = block_size - 1;
+ pool->low_water_blocks = 0;
+ pool->zero_new_blocks = 1;
+ pool->prison = prison_create(PRISON_CELLS);
+ if (!pool->prison) {
+ *error = "Error creating pool's bio prison";
+ err_p = ERR_PTR(-ENOMEM);
+ goto bad_prison;
+ }
+
+ pool->copier = dm_kcopyd_client_create();
+ if (IS_ERR(pool->copier)) {
+ r = PTR_ERR(pool->copier);
+ *error = "Error creating pool's kcopyd client";
+ err_p = ERR_PTR(r);
+ goto bad_kcopyd_client;
+ }
+
+ /*
+ * Create singlethreaded workqueue that will service all devices
+ * that use this metadata.
+ */
+ pool->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM);
+ if (!pool->wq) {
+ *error = "Error creating pool's workqueue";
+ err_p = ERR_PTR(-ENOMEM);
+ goto bad_wq;
+ }
+
+ INIT_WORK(&pool->worker, do_worker);
+ spin_lock_init(&pool->lock);
+ bio_list_init(&pool->deferred_bios);
+ bio_list_init(&pool->deferred_flush_bios);
+ INIT_LIST_HEAD(&pool->prepared_mappings);
+ pool->low_water_triggered = 0;
+ pool->no_free_space = 0;
+ bio_list_init(&pool->retry_on_resume_list);
+ ds_init(&pool->ds);
+
+ pool->next_mapping = NULL;
+ pool->mapping_pool =
+ mempool_create_kmalloc_pool(MAPPING_POOL_SIZE, sizeof(struct new_mapping));
+ if (!pool->mapping_pool) {
+ *error = "Error creating pool's mapping mempool";
+ err_p = ERR_PTR(-ENOMEM);
+ goto bad_mapping_pool;
+ }
+
+ pool->endio_hook_pool =
+ mempool_create_kmalloc_pool(ENDIO_HOOK_POOL_SIZE, sizeof(struct endio_hook));
+ if (!pool->endio_hook_pool) {
+ *error = "Error creating pool's endio_hook mempool";
+ err_p = ERR_PTR(-ENOMEM);
+ goto bad_endio_hook_pool;
+ }
+ pool->ref_count = 1;
+ pool->pool_md = pool_md;
+ pool->md_dev = metadata_dev;
+ __pool_table_insert(pool);
+
+ return pool;
+
+bad_endio_hook_pool:
+ mempool_destroy(pool->mapping_pool);
+bad_mapping_pool:
+ destroy_workqueue(pool->wq);
+bad_wq:
+ dm_kcopyd_client_destroy(pool->copier);
+bad_kcopyd_client:
+ prison_destroy(pool->prison);
+bad_prison:
+ kfree(pool);
+bad_pool:
+ if (dm_pool_metadata_close(pmd))
+ DMWARN("%s: dm_pool_metadata_close() failed.", __func__);
+
+ return err_p;
+}
+
+static void __pool_inc(struct pool *pool)
+{
+ BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
+ pool->ref_count++;
+}
+
+static void __pool_dec(struct pool *pool)
+{
+ BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
+ BUG_ON(!pool->ref_count);
+ if (!--pool->ref_count)
+ __pool_destroy(pool);
+}
+
+static struct pool *__pool_find(struct mapped_device *pool_md,
+ struct block_device *metadata_dev,
+ unsigned long block_size, char **error)
+{
+ struct pool *pool = __pool_table_lookup_metadata_dev(metadata_dev);
+
+ if (pool) {
+ if (pool->pool_md != pool_md)
+ return ERR_PTR(-EBUSY);
+ __pool_inc(pool);
+
+ } else {
+ pool = __pool_table_lookup(pool_md);
+ if (pool) {
+ if (pool->md_dev != metadata_dev)
+ return ERR_PTR(-EINVAL);
+ __pool_inc(pool);
+
+ } else
+ pool = pool_create(pool_md, metadata_dev, block_size, error);
+ }
+
+ return pool;
+}
+
+/*----------------------------------------------------------------
+ * Pool target methods
+ *--------------------------------------------------------------*/
+static void pool_dtr(struct dm_target *ti)
+{
+ struct pool_c *pt = ti->private;
+
+ mutex_lock(&dm_thin_pool_table.mutex);
+
+ unbind_control_target(pt->pool, ti);
+ __pool_dec(pt->pool);
+ dm_put_device(ti, pt->metadata_dev);
+ dm_put_device(ti, pt->data_dev);
+ kfree(pt);
+
+ mutex_unlock(&dm_thin_pool_table.mutex);
+}
+
+struct pool_features {
+ unsigned zero_new_blocks:1;
+};
+
+static int parse_pool_features(struct dm_arg_set *as, struct pool_features *pf,
+ struct dm_target *ti)
+{
+ int r;
+ unsigned argc;
+ const char *arg_name;
+
+ static struct dm_arg _args[] = {
+ {0, 1, "Invalid number of pool feature arguments"},
+ };
+
+ /*
+ * No feature arguments supplied.
+ */
+ if (!as->argc)
+ return 0;
+
+ r = dm_read_arg_group(_args, as, &argc, &ti->error);
+ if (r)
+ return -EINVAL;
+
+ while (argc && !r) {
+ arg_name = dm_shift_arg(as);
+ argc--;
+
+ if (!strcasecmp(arg_name, "skip_block_zeroing")) {
+ pf->zero_new_blocks = 0;
+ continue;
+ }
+
+ ti->error = "Unrecognised pool feature requested";
+ r = -EINVAL;
+ }
+
+ return r;
+}
+
+/*
+ * thin-pool <metadata dev> <data dev>
+ * <data block size (sectors)>
+ * <low water mark (blocks)>
+ * [<#feature args> [<arg>]*]
+ *
+ * Optional feature arguments are:
+ * skip_block_zeroing: skips the zeroing of newly-provisioned blocks.
+ */
+static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
+{
+ int r;
+ struct pool_c *pt;
+ struct pool *pool;
+ struct pool_features pf;
+ struct dm_arg_set as;
+ struct dm_dev *data_dev;
+ unsigned long block_size;
+ dm_block_t low_water_blocks;
+ struct dm_dev *metadata_dev;
+ sector_t metadata_dev_size;
+
+ /*
+ * FIXME Remove validation from scope of lock.
+ */
+ mutex_lock(&dm_thin_pool_table.mutex);
+
+ if (argc < 4) {
+ ti->error = "Invalid argument count";
+ r = -EINVAL;
+ goto out_unlock;
+ }
+ as.argc = argc;
+ as.argv = argv;
+
+ r = dm_get_device(ti, argv[0], FMODE_READ | FMODE_WRITE, &metadata_dev);
+ if (r) {
+ ti->error = "Error opening metadata block device";
+ goto out_unlock;
+ }
+
+ metadata_dev_size = i_size_read(metadata_dev->bdev->bd_inode) >> SECTOR_SHIFT;
+ if (metadata_dev_size > METADATA_DEV_MAX_SECTORS) {
+ ti->error = "Metadata device is too large";
+ r = -EINVAL;
+ goto out_metadata;
+ }
+
+ r = dm_get_device(ti, argv[1], FMODE_READ | FMODE_WRITE, &data_dev);
+ if (r) {
+ ti->error = "Error getting data device";
+ goto out_metadata;
+ }
+
+ if (kstrtoul(argv[2], 10, &block_size) || !block_size ||
+ block_size < DATA_DEV_BLOCK_SIZE_MIN_SECTORS ||
+ block_size > DATA_DEV_BLOCK_SIZE_MAX_SECTORS ||
+ !is_power_of_2(block_size)) {
+ ti->error = "Invalid block size";
+ r = -EINVAL;
+ goto out;
+ }
+
+ if (kstrtoull(argv[3], 10, (unsigned long long *)&low_water_blocks)) {
+ ti->error = "Invalid low water mark";
+ r = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * Set default pool features.
+ */
+ memset(&pf, 0, sizeof(pf));
+ pf.zero_new_blocks = 1;
+
+ dm_consume_args(&as, 4);
+ r = parse_pool_features(&as, &pf, ti);
+ if (r)
+ goto out;
+
+ pt = kzalloc(sizeof(*pt), GFP_KERNEL);
+ if (!pt) {
+ r = -ENOMEM;
+ goto out;
+ }
+
+ pool = __pool_find(dm_table_get_md(ti->table), metadata_dev->bdev,
+ block_size, &ti->error);
+ if (IS_ERR(pool)) {
+ r = PTR_ERR(pool);
+ goto out_free_pt;
+ }
+
+ pt->pool = pool;
+ pt->ti = ti;
+ pt->metadata_dev = metadata_dev;
+ pt->data_dev = data_dev;
+ pt->low_water_blocks = low_water_blocks;
+ pt->zero_new_blocks = pf.zero_new_blocks;
+ ti->num_flush_requests = 1;
+ ti->num_discard_requests = 0;
+ ti->private = pt;
+
+ pt->callbacks.congested_fn = pool_is_congested;
+ dm_table_add_target_callbacks(ti->table, &pt->callbacks);
+
+ mutex_unlock(&dm_thin_pool_table.mutex);
+
+ return 0;
+
+out_free_pt:
+ kfree(pt);
+out:
+ dm_put_device(ti, data_dev);
+out_metadata:
+ dm_put_device(ti, metadata_dev);
+out_unlock:
+ mutex_unlock(&dm_thin_pool_table.mutex);
+
+ return r;
+}
+
+static int pool_map(struct dm_target *ti, struct bio *bio,
+ union map_info *map_context)
+{
+ int r;
+ struct pool_c *pt = ti->private;
+ struct pool *pool = pt->pool;
+ unsigned long flags;
+
+ /*
+ * As this is a singleton target, ti->begin is always zero.
+ */
+ spin_lock_irqsave(&pool->lock, flags);
+ bio->bi_bdev = pt->data_dev->bdev;
+ r = DM_MAPIO_REMAPPED;
+ spin_unlock_irqrestore(&pool->lock, flags);
+
+ return r;
+}
+
+/*
+ * Retrieves the number of blocks of the data device from
+ * the superblock and compares it to the actual device size,
+ * thus resizing the data device in case it has grown.
+ *
+ * This both copes with opening preallocated data devices in the ctr
+ * being followed by a resume
+ * -and-
+ * calling the resume method individually after userspace has
+ * grown the data device in reaction to a table event.
+ */
+static int pool_preresume(struct dm_target *ti)
+{
+ int r;
+ struct pool_c *pt = ti->private;
+ struct pool *pool = pt->pool;
+ dm_block_t data_size, sb_data_size;
+
+ /*
+ * Take control of the pool object.
+ */
+ r = bind_control_target(pool, ti);
+ if (r)
+ return r;
+
+ data_size = ti->len >> pool->block_shift;
+ r = dm_pool_get_data_dev_size(pool->pmd, &sb_data_size);
+ if (r) {
+ DMERR("failed to retrieve data device size");
+ return r;
+ }
+
+ if (data_size < sb_data_size) {
+ DMERR("pool target too small, is %llu blocks (expected %llu)",
+ data_size, sb_data_size);
+ return -EINVAL;
+
+ } else if (data_size > sb_data_size) {
+ r = dm_pool_resize_data_dev(pool->pmd, data_size);
+ if (r) {
+ DMERR("failed to resize data device");
+ return r;
+ }
+
+ r = dm_pool_commit_metadata(pool->pmd);
+ if (r) {
+ DMERR("%s: dm_pool_commit_metadata() failed, error = %d",
+ __func__, r);
+ return r;
+ }
+ }
+
+ return 0;
+}
+
+static void pool_resume(struct dm_target *ti)
+{
+ struct pool_c *pt = ti->private;
+ struct pool *pool = pt->pool;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pool->lock, flags);
+ pool->low_water_triggered = 0;
+ pool->no_free_space = 0;
+ __requeue_bios(pool);
+ spin_unlock_irqrestore(&pool->lock, flags);
+
+ wake_worker(pool);
+}
+
+static void pool_postsuspend(struct dm_target *ti)
+{
+ int r;
+ struct pool_c *pt = ti->private;
+ struct pool *pool = pt->pool;
+
+ flush_workqueue(pool->wq);
+
+ r = dm_pool_commit_metadata(pool->pmd);
+ if (r < 0) {
+ DMERR("%s: dm_pool_commit_metadata() failed, error = %d",
+ __func__, r);
+ /* FIXME: invalidate device? error the next FUA or FLUSH bio ?*/
+ }
+}
+
+static int check_arg_count(unsigned argc, unsigned args_required)
+{
+ if (argc != args_required) {
+ DMWARN("Message received with %u arguments instead of %u.",
+ argc, args_required);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int read_dev_id(char *arg, dm_thin_id *dev_id, int warning)
+{
+ if (!kstrtoull(arg, 10, (unsigned long long *)dev_id) &&
+ *dev_id <= MAX_DEV_ID)
+ return 0;
+
+ if (warning)
+ DMWARN("Message received with invalid device id: %s", arg);
+
+ return -EINVAL;
+}
+
+static int process_create_thin_mesg(unsigned argc, char **argv, struct pool *pool)
+{
+ dm_thin_id dev_id;
+ int r;
+
+ r = check_arg_count(argc, 2);
+ if (r)
+ return r;
+
+ r = read_dev_id(argv[1], &dev_id, 1);
+ if (r)
+ return r;
+
+ r = dm_pool_create_thin(pool->pmd, dev_id);
+ if (r) {
+ DMWARN("Creation of new thinly-provisioned device with id %s failed.",
+ argv[1]);
+ return r;
+ }
+
+ return 0;
+}
+
+static int process_create_snap_mesg(unsigned argc, char **argv, struct pool *pool)
+{
+ dm_thin_id dev_id;
+ dm_thin_id origin_dev_id;
+ int r;
+
+ r = check_arg_count(argc, 3);
+ if (r)
+ return r;
+
+ r = read_dev_id(argv[1], &dev_id, 1);
+ if (r)
+ return r;
+
+ r = read_dev_id(argv[2], &origin_dev_id, 1);
+ if (r)
+ return r;
+
+ r = dm_pool_create_snap(pool->pmd, dev_id, origin_dev_id);
+ if (r) {
+ DMWARN("Creation of new snapshot %s of device %s failed.",
+ argv[1], argv[2]);
+ return r;
+ }
+
+ return 0;
+}
+
+static int process_delete_mesg(unsigned argc, char **argv, struct pool *pool)
+{
+ dm_thin_id dev_id;
+ int r;
+
+ r = check_arg_count(argc, 2);
+ if (r)
+ return r;
+
+ r = read_dev_id(argv[1], &dev_id, 1);
+ if (r)
+ return r;
+
+ r = dm_pool_delete_thin_device(pool->pmd, dev_id);
+ if (r)
+ DMWARN("Deletion of thin device %s failed.", argv[1]);
+
+ return r;
+}
+
+static int process_set_transaction_id_mesg(unsigned argc, char **argv, struct pool *pool)
+{
+ dm_thin_id old_id, new_id;
+ int r;
+
+ r = check_arg_count(argc, 3);
+ if (r)
+ return r;
+
+ if (kstrtoull(argv[1], 10, (unsigned long long *)&old_id)) {
+ DMWARN("set_transaction_id message: Unrecognised id %s.", argv[1]);
+ return -EINVAL;
+ }
+
+ if (kstrtoull(argv[2], 10, (unsigned long long *)&new_id)) {
+ DMWARN("set_transaction_id message: Unrecognised new id %s.", argv[2]);
+ return -EINVAL;
+ }
+
+ r = dm_pool_set_metadata_transaction_id(pool->pmd, old_id, new_id);
+ if (r) {
+ DMWARN("Failed to change transaction id from %s to %s.",
+ argv[1], argv[2]);
+ return r;
+ }
+
+ return 0;
+}
+
+/*
+ * Messages supported:
+ * create_thin <dev_id>
+ * create_snap <dev_id> <origin_id>
+ * delete <dev_id>
+ * trim <dev_id> <new_size_in_sectors>
+ * set_transaction_id <current_trans_id> <new_trans_id>
+ */
+static int pool_message(struct dm_target *ti, unsigned argc, char **argv)
+{
+ int r = -EINVAL;
+ struct pool_c *pt = ti->private;
+ struct pool *pool = pt->pool;
+
+ if (!strcasecmp(argv[0], "create_thin"))
+ r = process_create_thin_mesg(argc, argv, pool);
+
+ else if (!strcasecmp(argv[0], "create_snap"))
+ r = process_create_snap_mesg(argc, argv, pool);
+
+ else if (!strcasecmp(argv[0], "delete"))
+ r = process_delete_mesg(argc, argv, pool);
+
+ else if (!strcasecmp(argv[0], "set_transaction_id"))
+ r = process_set_transaction_id_mesg(argc, argv, pool);
+
+ else
+ DMWARN("Unrecognised thin pool target message received: %s", argv[0]);
+
+ if (!r) {
+ r = dm_pool_commit_metadata(pool->pmd);
+ if (r)
+ DMERR("%s message: dm_pool_commit_metadata() failed, error = %d",
+ argv[0], r);
+ }
+
+ return r;
+}
+
+/*
+ * Status line is:
+ * <transaction id> <used metadata sectors>/<total metadata sectors>
+ * <used data sectors>/<total data sectors> <held metadata root>
+ */
+static int pool_status(struct dm_target *ti, status_type_t type,
+ char *result, unsigned maxlen)
+{
+ int r;
+ unsigned sz = 0;
+ uint64_t transaction_id;
+ dm_block_t nr_free_blocks_data;
+ dm_block_t nr_free_blocks_metadata;
+ dm_block_t nr_blocks_data;
+ dm_block_t nr_blocks_metadata;
+ dm_block_t held_root;
+ char buf[BDEVNAME_SIZE];
+ char buf2[BDEVNAME_SIZE];
+ struct pool_c *pt = ti->private;
+ struct pool *pool = pt->pool;
+
+ switch (type) {
+ case STATUSTYPE_INFO:
+ r = dm_pool_get_metadata_transaction_id(pool->pmd,
+ &transaction_id);
+ if (r)
+ return r;
+
+ r = dm_pool_get_free_metadata_block_count(pool->pmd,
+ &nr_free_blocks_metadata);
+ if (r)
+ return r;
+
+ r = dm_pool_get_metadata_dev_size(pool->pmd, &nr_blocks_metadata);
+ if (r)
+ return r;
+
+ r = dm_pool_get_free_block_count(pool->pmd,
+ &nr_free_blocks_data);
+ if (r)
+ return r;
+
+ r = dm_pool_get_data_dev_size(pool->pmd, &nr_blocks_data);
+ if (r)
+ return r;
+
+ r = dm_pool_get_held_metadata_root(pool->pmd, &held_root);
+ if (r)
+ return r;
+
+ DMEMIT("%llu %llu/%llu %llu/%llu ",
+ (unsigned long long)transaction_id,
+ (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata),
+ (unsigned long long)nr_blocks_metadata,
+ (unsigned long long)(nr_blocks_data - nr_free_blocks_data),
+ (unsigned long long)nr_blocks_data);
+
+ if (held_root)
+ DMEMIT("%llu", held_root);
+ else
+ DMEMIT("-");
+
+ break;
+
+ case STATUSTYPE_TABLE:
+ DMEMIT("%s %s %lu %llu ",
+ format_dev_t(buf, pt->metadata_dev->bdev->bd_dev),
+ format_dev_t(buf2, pt->data_dev->bdev->bd_dev),
+ (unsigned long)pool->sectors_per_block,
+ (unsigned long long)pt->low_water_blocks);
+
+ DMEMIT("%u ", !pool->zero_new_blocks);
+
+ if (!pool->zero_new_blocks)
+ DMEMIT("skip_block_zeroing ");
+ break;
+ }
+
+ return 0;
+}
+
+static int pool_iterate_devices(struct dm_target *ti,
+ iterate_devices_callout_fn fn, void *data)
+{
+ struct pool_c *pt = ti->private;
+
+ return fn(ti, pt->data_dev, 0, ti->len, data);
+}
+
+static int pool_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
+ struct bio_vec *biovec, int max_size)
+{
+ struct pool_c *pt = ti->private;
+ struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
+
+ if (!q->merge_bvec_fn)
+ return max_size;
+
+ bvm->bi_bdev = pt->data_dev->bdev;
+
+ return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
+}
+
+static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
+{
+ struct pool_c *pt = ti->private;
+ struct pool *pool = pt->pool;
+
+ blk_limits_io_min(limits, 0);
+ blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT);
+}
+
+static struct target_type pool_target = {
+ .name = "thin-pool",
+ .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
+ DM_TARGET_IMMUTABLE,
+ .version = {1, 0, 0},
+ .module = THIS_MODULE,
+ .ctr = pool_ctr,
+ .dtr = pool_dtr,
+ .map = pool_map,
+ .postsuspend = pool_postsuspend,
+ .preresume = pool_preresume,
+ .resume = pool_resume,
+ .message = pool_message,
+ .status = pool_status,
+ .merge = pool_merge,
+ .iterate_devices = pool_iterate_devices,
+ .io_hints = pool_io_hints,
+};
+
+/*----------------------------------------------------------------
+ * Thin target methods
+ *--------------------------------------------------------------*/
+static void thin_dtr(struct dm_target *ti)
+{
+ struct thin_c *tc = ti->private;
+
+ mutex_lock(&dm_thin_pool_table.mutex);
+
+ __pool_dec(tc->pool);
+ dm_pool_close_thin_device(tc->td);
+ dm_put_device(ti, tc->pool_dev);
+ kfree(tc);
+
+ mutex_unlock(&dm_thin_pool_table.mutex);
+}
+
+/*
+ * Thin target parameters:
+ *
+ * <pool_dev> <dev_id>
+ *
+ * pool_dev: the path to the pool (eg, /dev/mapper/my_pool)
+ * dev_id: the internal device identifier
+ */
+static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
+{
+ int r;
+ struct thin_c *tc;
+ struct dm_dev *pool_dev;
+ struct mapped_device *pool_md;
+
+ mutex_lock(&dm_thin_pool_table.mutex);
+
+ if (argc != 2) {
+ ti->error = "Invalid argument count";
+ r = -EINVAL;
+ goto out_unlock;
+ }
+
+ tc = ti->private = kzalloc(sizeof(*tc), GFP_KERNEL);
+ if (!tc) {
+ ti->error = "Out of memory";
+ r = -ENOMEM;
+ goto out_unlock;
+ }
+
+ r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &pool_dev);
+ if (r) {
+ ti->error = "Error opening pool device";
+ goto bad_pool_dev;
+ }
+ tc->pool_dev = pool_dev;
+
+ if (read_dev_id(argv[1], (unsigned long long *)&tc->dev_id, 0)) {
+ ti->error = "Invalid device id";
+ r = -EINVAL;
+ goto bad_common;
+ }
+
+ pool_md = dm_get_md(tc->pool_dev->bdev->bd_dev);
+ if (!pool_md) {
+ ti->error = "Couldn't get pool mapped device";
+ r = -EINVAL;
+ goto bad_common;
+ }
+
+ tc->pool = __pool_table_lookup(pool_md);
+ if (!tc->pool) {
+ ti->error = "Couldn't find pool object";
+ r = -EINVAL;
+ goto bad_pool_lookup;
+ }
+ __pool_inc(tc->pool);
+
+ r = dm_pool_open_thin_device(tc->pool->pmd, tc->dev_id, &tc->td);
+ if (r) {
+ ti->error = "Couldn't open thin internal device";
+ goto bad_thin_open;
+ }
+
+ ti->split_io = tc->pool->sectors_per_block;
+ ti->num_flush_requests = 1;
+ ti->num_discard_requests = 0;
+ ti->discards_supported = 0;
+
+ dm_put(pool_md);
+
+ mutex_unlock(&dm_thin_pool_table.mutex);
+
+ return 0;
+
+bad_thin_open:
+ __pool_dec(tc->pool);
+bad_pool_lookup:
+ dm_put(pool_md);
+bad_common:
+ dm_put_device(ti, tc->pool_dev);
+bad_pool_dev:
+ kfree(tc);
+out_unlock:
+ mutex_unlock(&dm_thin_pool_table.mutex);
+
+ return r;
+}
+
+static int thin_map(struct dm_target *ti, struct bio *bio,
+ union map_info *map_context)
+{
+ bio->bi_sector -= ti->begin;
+
+ return thin_bio_map(ti, bio, map_context);
+}
+
+static void thin_postsuspend(struct dm_target *ti)
+{
+ if (dm_noflush_suspending(ti))
+ requeue_io((struct thin_c *)ti->private);
+}
+
+/*
+ * <nr mapped sectors> <highest mapped sector>
+ */
+static int thin_status(struct dm_target *ti, status_type_t type,
+ char *result, unsigned maxlen)
+{
+ int r;
+ ssize_t sz = 0;
+ dm_block_t mapped, highest;
+ char buf[BDEVNAME_SIZE];
+ struct thin_c *tc = ti->private;
+
+ if (!tc->td)
+ DMEMIT("-");
+ else {
+ switch (type) {
+ case STATUSTYPE_INFO:
+ r = dm_thin_get_mapped_count(tc->td, &mapped);
+ if (r)
+ return r;
+
+ r = dm_thin_get_highest_mapped_block(tc->td, &highest);
+ if (r < 0)
+ return r;
+
+ DMEMIT("%llu ", mapped * tc->pool->sectors_per_block);
+ if (r)
+ DMEMIT("%llu", ((highest + 1) *
+ tc->pool->sectors_per_block) - 1);
+ else
+ DMEMIT("-");
+ break;
+
+ case STATUSTYPE_TABLE:
+ DMEMIT("%s %lu",
+ format_dev_t(buf, tc->pool_dev->bdev->bd_dev),
+ (unsigned long) tc->dev_id);
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int thin_iterate_devices(struct dm_target *ti,
+ iterate_devices_callout_fn fn, void *data)
+{
+ dm_block_t blocks;
+ struct thin_c *tc = ti->private;
+
+ /*
+ * We can't call dm_pool_get_data_dev_size() since that blocks. So
+ * we follow a more convoluted path through to the pool's target.
+ */
+ if (!tc->pool->ti)
+ return 0; /* nothing is bound */
+
+ blocks = tc->pool->ti->len >> tc->pool->block_shift;
+ if (blocks)
+ return fn(ti, tc->pool_dev, 0, tc->pool->sectors_per_block * blocks, data);
+
+ return 0;
+}
+
+static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
+{
+ struct thin_c *tc = ti->private;
+
+ blk_limits_io_min(limits, 0);
+ blk_limits_io_opt(limits, tc->pool->sectors_per_block << SECTOR_SHIFT);
+}
+
+static struct target_type thin_target = {
+ .name = "thin",
+ .version = {1, 0, 0},
+ .module = THIS_MODULE,
+ .ctr = thin_ctr,
+ .dtr = thin_dtr,
+ .map = thin_map,
+ .postsuspend = thin_postsuspend,
+ .status = thin_status,
+ .iterate_devices = thin_iterate_devices,
+ .io_hints = thin_io_hints,
+};
+
+/*----------------------------------------------------------------*/
+
+static int __init dm_thin_init(void)
+{
+ int r;
+
+ pool_table_init();
+
+ r = dm_register_target(&thin_target);
+ if (r)
+ return r;
+
+ r = dm_register_target(&pool_target);
+ if (r)
+ dm_unregister_target(&thin_target);
+
+ return r;
+}
+
+static void dm_thin_exit(void)
+{
+ dm_unregister_target(&thin_target);
+ dm_unregister_target(&pool_target);
+}
+
+module_init(dm_thin_init);
+module_exit(dm_thin_exit);
+
+MODULE_DESCRIPTION(DM_NAME "device-mapper thin provisioning target");
+MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-uevent.c b/drivers/md/dm-uevent.c
index 6b1e3b61b25..8efe033bab5 100644
--- a/drivers/md/dm-uevent.c
+++ b/drivers/md/dm-uevent.c
@@ -22,6 +22,7 @@
#include <linux/slab.h>
#include <linux/kobject.h>
#include <linux/dm-ioctl.h>
+#include <linux/export.h>
#include "dm.h"
#include "dm-uevent.h"
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 52b39f335bb..4720f68f817 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -25,6 +25,16 @@
#define DM_MSG_PREFIX "core"
+#ifdef CONFIG_PRINTK
+/*
+ * ratelimit state to be used in DMXXX_LIMIT().
+ */
+DEFINE_RATELIMIT_STATE(dm_ratelimit_state,
+ DEFAULT_RATELIMIT_INTERVAL,
+ DEFAULT_RATELIMIT_BURST);
+EXPORT_SYMBOL(dm_ratelimit_state);
+#endif
+
/*
* Cookies are numeric values sent with CHANGE and REMOVE
* uevents while resuming, removing or renaming the device.
@@ -130,6 +140,8 @@ struct mapped_device {
/* Protect queue and type against concurrent access. */
struct mutex type_lock;
+ struct target_type *immutable_target_type;
+
struct gendisk *disk;
char name[16];
@@ -180,9 +192,6 @@ struct mapped_device {
/* forced geometry settings */
struct hd_geometry geometry;
- /* For saving the address of __make_request for request based dm */
- make_request_fn *saved_make_request_fn;
-
/* sysfs handle */
struct kobject kobj;
@@ -1391,7 +1400,7 @@ out:
* The request function that just remaps the bio built up by
* dm_merge_bvec.
*/
-static int _dm_request(struct request_queue *q, struct bio *bio)
+static void _dm_request(struct request_queue *q, struct bio *bio)
{
int rw = bio_data_dir(bio);
struct mapped_device *md = q->queuedata;
@@ -1412,19 +1421,12 @@ static int _dm_request(struct request_queue *q, struct bio *bio)
queue_io(md, bio);
else
bio_io_error(bio);
- return 0;
+ return;
}
__split_and_process_bio(md, bio);
up_read(&md->io_lock);
- return 0;
-}
-
-static int dm_make_request(struct request_queue *q, struct bio *bio)
-{
- struct mapped_device *md = q->queuedata;
-
- return md->saved_make_request_fn(q, bio); /* call __make_request() */
+ return;
}
static int dm_request_based(struct mapped_device *md)
@@ -1432,14 +1434,14 @@ static int dm_request_based(struct mapped_device *md)
return blk_queue_stackable(md->queue);
}
-static int dm_request(struct request_queue *q, struct bio *bio)
+static void dm_request(struct request_queue *q, struct bio *bio)
{
struct mapped_device *md = q->queuedata;
if (dm_request_based(md))
- return dm_make_request(q, bio);
-
- return _dm_request(q, bio);
+ blk_queue_bio(q, bio);
+ else
+ _dm_request(q, bio);
}
void dm_dispatch_request(struct request *rq)
@@ -2086,6 +2088,8 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
write_lock_irqsave(&md->map_lock, flags);
old_map = md->map;
md->map = t;
+ md->immutable_target_type = dm_table_get_immutable_target_type(t);
+
dm_table_set_restrictions(t, q, limits);
if (merge_is_optional)
set_bit(DMF_MERGE_IS_OPTIONAL, &md->flags);
@@ -2156,6 +2160,11 @@ unsigned dm_get_md_type(struct mapped_device *md)
return md->type;
}
+struct target_type *dm_get_immutable_target_type(struct mapped_device *md)
+{
+ return md->immutable_target_type;
+}
+
/*
* Fully initialize a request-based queue (->elevator, ->request_fn, etc).
*/
@@ -2172,7 +2181,6 @@ static int dm_init_request_based_queue(struct mapped_device *md)
return 0;
md->queue = q;
- md->saved_make_request_fn = md->queue->make_request_fn;
dm_init_md_queue(md);
blk_queue_softirq_done(md->queue, dm_softirq_done);
blk_queue_prep_rq(md->queue, dm_prep_fn);
@@ -2231,6 +2239,7 @@ struct mapped_device *dm_get_md(dev_t dev)
return md;
}
+EXPORT_SYMBOL_GPL(dm_get_md);
void *dm_get_mdptr(struct mapped_device *md)
{
@@ -2316,7 +2325,6 @@ static int dm_wait_for_completion(struct mapped_device *md, int interruptible)
while (1) {
set_current_state(interruptible);
- smp_mb();
if (!md_in_flight(md))
break;
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index 6745dbd278a..b7dacd59d8d 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -60,6 +60,7 @@ int dm_table_resume_targets(struct dm_table *t);
int dm_table_any_congested(struct dm_table *t, int bdi_bits);
int dm_table_any_busy_target(struct dm_table *t);
unsigned dm_table_get_type(struct dm_table *t);
+struct target_type *dm_table_get_immutable_target_type(struct dm_table *t);
bool dm_table_request_based(struct dm_table *t);
bool dm_table_supports_discards(struct dm_table *t);
int dm_table_alloc_md_mempools(struct dm_table *t);
@@ -72,6 +73,7 @@ void dm_lock_md_type(struct mapped_device *md);
void dm_unlock_md_type(struct mapped_device *md);
void dm_set_md_type(struct mapped_device *md, unsigned type);
unsigned dm_get_md_type(struct mapped_device *md);
+struct target_type *dm_get_immutable_target_type(struct mapped_device *md);
int dm_setup_md_queue(struct mapped_device *md);
diff --git a/drivers/md/faulty.c b/drivers/md/faulty.c
index 60816b132c2..feb2c3c7bb4 100644
--- a/drivers/md/faulty.c
+++ b/drivers/md/faulty.c
@@ -63,6 +63,7 @@
#define MaxFault 50
#include <linux/blkdev.h>
+#include <linux/module.h>
#include <linux/raid/md_u.h>
#include <linux/slab.h>
#include "md.h"
@@ -169,7 +170,7 @@ static void add_sector(struct faulty_conf *conf, sector_t start, int mode)
conf->nfaults = n+1;
}
-static int make_request(struct mddev *mddev, struct bio *bio)
+static void make_request(struct mddev *mddev, struct bio *bio)
{
struct faulty_conf *conf = mddev->private;
int failit = 0;
@@ -181,7 +182,7 @@ static int make_request(struct mddev *mddev, struct bio *bio)
* just fail immediately
*/
bio_endio(bio, -EIO);
- return 0;
+ return;
}
if (check_sector(conf, bio->bi_sector, bio->bi_sector+(bio->bi_size>>9),
@@ -211,15 +212,15 @@ static int make_request(struct mddev *mddev, struct bio *bio)
}
if (failit) {
struct bio *b = bio_clone_mddev(bio, GFP_NOIO, mddev);
+
b->bi_bdev = conf->rdev->bdev;
b->bi_private = bio;
b->bi_end_io = faulty_fail;
- generic_make_request(b);
- return 0;
- } else {
+ bio = b;
+ } else
bio->bi_bdev = conf->rdev->bdev;
- return 1;
- }
+
+ generic_make_request(bio);
}
static void status(struct seq_file *seq, struct mddev *mddev)
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 10c5844460c..c3273efd08c 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -19,6 +19,7 @@
#include <linux/blkdev.h>
#include <linux/raid/md_u.h>
#include <linux/seq_file.h>
+#include <linux/module.h>
#include <linux/slab.h>
#include "md.h"
#include "linear.h"
@@ -264,14 +265,14 @@ static int linear_stop (struct mddev *mddev)
return 0;
}
-static int linear_make_request (struct mddev *mddev, struct bio *bio)
+static void linear_make_request(struct mddev *mddev, struct bio *bio)
{
struct dev_info *tmp_dev;
sector_t start_sector;
if (unlikely(bio->bi_rw & REQ_FLUSH)) {
md_flush_request(mddev, bio);
- return 0;
+ return;
}
rcu_read_lock();
@@ -293,7 +294,7 @@ static int linear_make_request (struct mddev *mddev, struct bio *bio)
(unsigned long long)start_sector);
rcu_read_unlock();
bio_io_error(bio);
- return 0;
+ return;
}
if (unlikely(bio->bi_sector + (bio->bi_size >> 9) >
tmp_dev->end_sector)) {
@@ -307,20 +308,17 @@ static int linear_make_request (struct mddev *mddev, struct bio *bio)
bp = bio_split(bio, end_sector - bio->bi_sector);
- if (linear_make_request(mddev, &bp->bio1))
- generic_make_request(&bp->bio1);
- if (linear_make_request(mddev, &bp->bio2))
- generic_make_request(&bp->bio2);
+ linear_make_request(mddev, &bp->bio1);
+ linear_make_request(mddev, &bp->bio2);
bio_pair_release(bp);
- return 0;
+ return;
}
bio->bi_bdev = tmp_dev->rdev->bdev;
bio->bi_sector = bio->bi_sector - start_sector
+ tmp_dev->rdev->data_offset;
rcu_read_unlock();
-
- return 1;
+ generic_make_request(bio);
}
static void linear_status (struct seq_file *seq, struct mddev *mddev)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 266e82ebaf1..84acfe7d10e 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -44,6 +44,7 @@
#include <linux/hdreg.h>
#include <linux/proc_fs.h>
#include <linux/random.h>
+#include <linux/module.h>
#include <linux/reboot.h>
#include <linux/file.h>
#include <linux/compat.h>
@@ -332,18 +333,17 @@ static DEFINE_SPINLOCK(all_mddevs_lock);
* call has finished, the bio has been linked into some internal structure
* and so is visible to ->quiesce(), so we don't need the refcount any more.
*/
-static int md_make_request(struct request_queue *q, struct bio *bio)
+static void md_make_request(struct request_queue *q, struct bio *bio)
{
const int rw = bio_data_dir(bio);
struct mddev *mddev = q->queuedata;
- int rv;
int cpu;
unsigned int sectors;
if (mddev == NULL || mddev->pers == NULL
|| !mddev->ready) {
bio_io_error(bio);
- return 0;
+ return;
}
smp_rmb(); /* Ensure implications of 'active' are visible */
rcu_read_lock();
@@ -368,7 +368,7 @@ static int md_make_request(struct request_queue *q, struct bio *bio)
* go away inside make_request
*/
sectors = bio_sectors(bio);
- rv = mddev->pers->make_request(mddev, bio);
+ mddev->pers->make_request(mddev, bio);
cpu = part_stat_lock();
part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
@@ -377,8 +377,6 @@ static int md_make_request(struct request_queue *q, struct bio *bio)
if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
wake_up(&mddev->sb_wait);
-
- return rv;
}
/* mddev_suspend makes sure no new requests are submitted
@@ -477,8 +475,7 @@ static void md_submit_flush_data(struct work_struct *ws)
bio_endio(bio, 0);
else {
bio->bi_rw &= ~REQ_FLUSH;
- if (mddev->pers->make_request(mddev, bio))
- generic_make_request(bio);
+ mddev->pers->make_request(mddev, bio);
}
mddev->flush_bio = NULL;
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 51c1d91557e..cf742d9306e 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -419,7 +419,7 @@ struct md_personality
int level;
struct list_head list;
struct module *owner;
- int (*make_request)(struct mddev *mddev, struct bio *bio);
+ void (*make_request)(struct mddev *mddev, struct bio *bio);
int (*run)(struct mddev *mddev);
int (*stop)(struct mddev *mddev);
void (*status)(struct seq_file *seq, struct mddev *mddev);
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index d32c785e17d..5899246fa37 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -20,6 +20,7 @@
*/
#include <linux/blkdev.h>
+#include <linux/module.h>
#include <linux/raid/md_u.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
@@ -106,7 +107,7 @@ static void multipath_end_request(struct bio *bio, int error)
rdev_dec_pending(rdev, conf->mddev);
}
-static int multipath_make_request(struct mddev *mddev, struct bio * bio)
+static void multipath_make_request(struct mddev *mddev, struct bio * bio)
{
struct mpconf *conf = mddev->private;
struct multipath_bh * mp_bh;
@@ -114,7 +115,7 @@ static int multipath_make_request(struct mddev *mddev, struct bio * bio)
if (unlikely(bio->bi_rw & REQ_FLUSH)) {
md_flush_request(mddev, bio);
- return 0;
+ return;
}
mp_bh = mempool_alloc(conf->pool, GFP_NOIO);
@@ -126,7 +127,7 @@ static int multipath_make_request(struct mddev *mddev, struct bio * bio)
if (mp_bh->path < 0) {
bio_endio(bio, -EIO);
mempool_free(mp_bh, conf->pool);
- return 0;
+ return;
}
multipath = conf->multipaths + mp_bh->path;
@@ -137,7 +138,7 @@ static int multipath_make_request(struct mddev *mddev, struct bio * bio)
mp_bh->bio.bi_end_io = multipath_end_request;
mp_bh->bio.bi_private = mp_bh;
generic_make_request(&mp_bh->bio);
- return 0;
+ return;
}
static void multipath_status (struct seq_file *seq, struct mddev *mddev)
diff --git a/drivers/md/persistent-data/Kconfig b/drivers/md/persistent-data/Kconfig
new file mode 100644
index 00000000000..ceb359050a5
--- /dev/null
+++ b/drivers/md/persistent-data/Kconfig
@@ -0,0 +1,8 @@
+config DM_PERSISTENT_DATA
+ tristate
+ depends on BLK_DEV_DM && EXPERIMENTAL
+ select LIBCRC32C
+ select DM_BUFIO
+ ---help---
+ Library providing immutable on-disk data structure support for
+ device-mapper targets such as the thin provisioning target.
diff --git a/drivers/md/persistent-data/Makefile b/drivers/md/persistent-data/Makefile
new file mode 100644
index 00000000000..cfa95f66223
--- /dev/null
+++ b/drivers/md/persistent-data/Makefile
@@ -0,0 +1,11 @@
+obj-$(CONFIG_DM_PERSISTENT_DATA) += dm-persistent-data.o
+dm-persistent-data-objs := \
+ dm-block-manager.o \
+ dm-space-map-checker.o \
+ dm-space-map-common.o \
+ dm-space-map-disk.o \
+ dm-space-map-metadata.o \
+ dm-transaction-manager.o \
+ dm-btree.o \
+ dm-btree-remove.o \
+ dm-btree-spine.o
diff --git a/drivers/md/persistent-data/dm-block-manager.c b/drivers/md/persistent-data/dm-block-manager.c
new file mode 100644
index 00000000000..0317ecdc6e5
--- /dev/null
+++ b/drivers/md/persistent-data/dm-block-manager.c
@@ -0,0 +1,620 @@
+/*
+ * Copyright (C) 2011 Red Hat, Inc.
+ *
+ * This file is released under the GPL.
+ */
+#include "dm-block-manager.h"
+#include "dm-persistent-data-internal.h"
+#include "../dm-bufio.h"
+
+#include <linux/crc32c.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/rwsem.h>
+#include <linux/device-mapper.h>
+#include <linux/stacktrace.h>
+
+#define DM_MSG_PREFIX "block manager"
+
+/*----------------------------------------------------------------*/
+
+/*
+ * This is a read/write semaphore with a couple of differences.
+ *
+ * i) There is a restriction on the number of concurrent read locks that
+ * may be held at once. This is just an implementation detail.
+ *
+ * ii) Recursive locking attempts are detected and return EINVAL. A stack
+ * trace is also emitted for the previous lock aquisition.
+ *
+ * iii) Priority is given to write locks.
+ */
+#define MAX_HOLDERS 4
+#define MAX_STACK 10
+
+typedef unsigned long stack_entries[MAX_STACK];
+
+struct block_lock {
+ spinlock_t lock;
+ __s32 count;
+ struct list_head waiters;
+ struct task_struct *holders[MAX_HOLDERS];
+
+#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
+ struct stack_trace traces[MAX_HOLDERS];
+ stack_entries entries[MAX_HOLDERS];
+#endif
+};
+
+struct waiter {
+ struct list_head list;
+ struct task_struct *task;
+ int wants_write;
+};
+
+static unsigned __find_holder(struct block_lock *lock,
+ struct task_struct *task)
+{
+ unsigned i;
+
+ for (i = 0; i < MAX_HOLDERS; i++)
+ if (lock->holders[i] == task)
+ break;
+
+ BUG_ON(i == MAX_HOLDERS);
+ return i;
+}
+
+/* call this *after* you increment lock->count */
+static void __add_holder(struct block_lock *lock, struct task_struct *task)
+{
+ unsigned h = __find_holder(lock, NULL);
+#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
+ struct stack_trace *t;
+#endif
+
+ get_task_struct(task);
+ lock->holders[h] = task;
+
+#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
+ t = lock->traces + h;
+ t->nr_entries = 0;
+ t->max_entries = MAX_STACK;
+ t->entries = lock->entries[h];
+ t->skip = 2;
+ save_stack_trace(t);
+#endif
+}
+
+/* call this *before* you decrement lock->count */
+static void __del_holder(struct block_lock *lock, struct task_struct *task)
+{
+ unsigned h = __find_holder(lock, task);
+ lock->holders[h] = NULL;
+ put_task_struct(task);
+}
+
+static int __check_holder(struct block_lock *lock)
+{
+ unsigned i;
+#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
+ static struct stack_trace t;
+ static stack_entries entries;
+#endif
+
+ for (i = 0; i < MAX_HOLDERS; i++) {
+ if (lock->holders[i] == current) {
+ DMERR("recursive lock detected in pool metadata");
+#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
+ DMERR("previously held here:");
+ print_stack_trace(lock->traces + i, 4);
+
+ DMERR("subsequent aquisition attempted here:");
+ t.nr_entries = 0;
+ t.max_entries = MAX_STACK;
+ t.entries = entries;
+ t.skip = 3;
+ save_stack_trace(&t);
+ print_stack_trace(&t, 4);
+#endif
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static void __wait(struct waiter *w)
+{
+ for (;;) {
+ set_task_state(current, TASK_UNINTERRUPTIBLE);
+
+ if (!w->task)
+ break;
+
+ schedule();
+ }
+
+ set_task_state(current, TASK_RUNNING);
+}
+
+static void __wake_waiter(struct waiter *w)
+{
+ struct task_struct *task;
+
+ list_del(&w->list);
+ task = w->task;
+ smp_mb();
+ w->task = NULL;
+ wake_up_process(task);
+}
+
+/*
+ * We either wake a few readers or a single writer.
+ */
+static void __wake_many(struct block_lock *lock)
+{
+ struct waiter *w, *tmp;
+
+ BUG_ON(lock->count < 0);
+ list_for_each_entry_safe(w, tmp, &lock->waiters, list) {
+ if (lock->count >= MAX_HOLDERS)
+ return;
+
+ if (w->wants_write) {
+ if (lock->count > 0)
+ return; /* still read locked */
+
+ lock->count = -1;
+ __add_holder(lock, w->task);
+ __wake_waiter(w);
+ return;
+ }
+
+ lock->count++;
+ __add_holder(lock, w->task);
+ __wake_waiter(w);
+ }
+}
+
+static void bl_init(struct block_lock *lock)
+{
+ int i;
+
+ spin_lock_init(&lock->lock);
+ lock->count = 0;
+ INIT_LIST_HEAD(&lock->waiters);
+ for (i = 0; i < MAX_HOLDERS; i++)
+ lock->holders[i] = NULL;
+}
+
+static int __available_for_read(struct block_lock *lock)
+{
+ return lock->count >= 0 &&
+ lock->count < MAX_HOLDERS &&
+ list_empty(&lock->waiters);
+}
+
+static int bl_down_read(struct block_lock *lock)
+{
+ int r;
+ struct waiter w;
+
+ spin_lock(&lock->lock);
+ r = __check_holder(lock);
+ if (r) {
+ spin_unlock(&lock->lock);
+ return r;
+ }
+
+ if (__available_for_read(lock)) {
+ lock->count++;
+ __add_holder(lock, current);
+ spin_unlock(&lock->lock);
+ return 0;
+ }
+
+ get_task_struct(current);
+
+ w.task = current;
+ w.wants_write = 0;
+ list_add_tail(&w.list, &lock->waiters);
+ spin_unlock(&lock->lock);
+
+ __wait(&w);
+ put_task_struct(current);
+ return 0;
+}
+
+static int bl_down_read_nonblock(struct block_lock *lock)
+{
+ int r;
+
+ spin_lock(&lock->lock);
+ r = __check_holder(lock);
+ if (r)
+ goto out;
+
+ if (__available_for_read(lock)) {
+ lock->count++;
+ __add_holder(lock, current);
+ r = 0;
+ } else
+ r = -EWOULDBLOCK;
+
+out:
+ spin_unlock(&lock->lock);
+ return r;
+}
+
+static void bl_up_read(struct block_lock *lock)
+{
+ spin_lock(&lock->lock);
+ BUG_ON(lock->count <= 0);
+ __del_holder(lock, current);
+ --lock->count;
+ if (!list_empty(&lock->waiters))
+ __wake_many(lock);
+ spin_unlock(&lock->lock);
+}
+
+static int bl_down_write(struct block_lock *lock)
+{
+ int r;
+ struct waiter w;
+
+ spin_lock(&lock->lock);
+ r = __check_holder(lock);
+ if (r) {
+ spin_unlock(&lock->lock);
+ return r;
+ }
+
+ if (lock->count == 0 && list_empty(&lock->waiters)) {
+ lock->count = -1;
+ __add_holder(lock, current);
+ spin_unlock(&lock->lock);
+ return 0;
+ }
+
+ get_task_struct(current);
+ w.task = current;
+ w.wants_write = 1;
+
+ /*
+ * Writers given priority. We know there's only one mutator in the
+ * system, so ignoring the ordering reversal.
+ */
+ list_add(&w.list, &lock->waiters);
+ spin_unlock(&lock->lock);
+
+ __wait(&w);
+ put_task_struct(current);
+
+ return 0;
+}
+
+static void bl_up_write(struct block_lock *lock)
+{
+ spin_lock(&lock->lock);
+ __del_holder(lock, current);
+ lock->count = 0;
+ if (!list_empty(&lock->waiters))
+ __wake_many(lock);
+ spin_unlock(&lock->lock);
+}
+
+static void report_recursive_bug(dm_block_t b, int r)
+{
+ if (r == -EINVAL)
+ DMERR("recursive acquisition of block %llu requested.",
+ (unsigned long long) b);
+}
+
+/*----------------------------------------------------------------*/
+
+/*
+ * Block manager is currently implemented using dm-bufio. struct
+ * dm_block_manager and struct dm_block map directly onto a couple of
+ * structs in the bufio interface. I want to retain the freedom to move
+ * away from bufio in the future. So these structs are just cast within
+ * this .c file, rather than making it through to the public interface.
+ */
+static struct dm_buffer *to_buffer(struct dm_block *b)
+{
+ return (struct dm_buffer *) b;
+}
+
+static struct dm_bufio_client *to_bufio(struct dm_block_manager *bm)
+{
+ return (struct dm_bufio_client *) bm;
+}
+
+dm_block_t dm_block_location(struct dm_block *b)
+{
+ return dm_bufio_get_block_number(to_buffer(b));
+}
+EXPORT_SYMBOL_GPL(dm_block_location);
+
+void *dm_block_data(struct dm_block *b)
+{
+ return dm_bufio_get_block_data(to_buffer(b));
+}
+EXPORT_SYMBOL_GPL(dm_block_data);
+
+struct buffer_aux {
+ struct dm_block_validator *validator;
+ struct block_lock lock;
+ int write_locked;
+};
+
+static void dm_block_manager_alloc_callback(struct dm_buffer *buf)
+{
+ struct buffer_aux *aux = dm_bufio_get_aux_data(buf);
+ aux->validator = NULL;
+ bl_init(&aux->lock);
+}
+
+static void dm_block_manager_write_callback(struct dm_buffer *buf)
+{
+ struct buffer_aux *aux = dm_bufio_get_aux_data(buf);
+ if (aux->validator) {
+ aux->validator->prepare_for_write(aux->validator, (struct dm_block *) buf,
+ dm_bufio_get_block_size(dm_bufio_get_client(buf)));
+ }
+}
+
+/*----------------------------------------------------------------
+ * Public interface
+ *--------------------------------------------------------------*/
+struct dm_block_manager *dm_block_manager_create(struct block_device *bdev,
+ unsigned block_size,
+ unsigned cache_size,
+ unsigned max_held_per_thread)
+{
+ return (struct dm_block_manager *)
+ dm_bufio_client_create(bdev, block_size, max_held_per_thread,
+ sizeof(struct buffer_aux),
+ dm_block_manager_alloc_callback,
+ dm_block_manager_write_callback);
+}
+EXPORT_SYMBOL_GPL(dm_block_manager_create);
+
+void dm_block_manager_destroy(struct dm_block_manager *bm)
+{
+ return dm_bufio_client_destroy(to_bufio(bm));
+}
+EXPORT_SYMBOL_GPL(dm_block_manager_destroy);
+
+unsigned dm_bm_block_size(struct dm_block_manager *bm)
+{
+ return dm_bufio_get_block_size(to_bufio(bm));
+}
+EXPORT_SYMBOL_GPL(dm_bm_block_size);
+
+dm_block_t dm_bm_nr_blocks(struct dm_block_manager *bm)
+{
+ return dm_bufio_get_device_size(to_bufio(bm));
+}
+
+static int dm_bm_validate_buffer(struct dm_block_manager *bm,
+ struct dm_buffer *buf,
+ struct buffer_aux *aux,
+ struct dm_block_validator *v)
+{
+ if (unlikely(!aux->validator)) {
+ int r;
+ if (!v)
+ return 0;
+ r = v->check(v, (struct dm_block *) buf, dm_bufio_get_block_size(to_bufio(bm)));
+ if (unlikely(r))
+ return r;
+ aux->validator = v;
+ } else {
+ if (unlikely(aux->validator != v)) {
+ DMERR("validator mismatch (old=%s vs new=%s) for block %llu",
+ aux->validator->name, v ? v->name : "NULL",
+ (unsigned long long)
+ dm_bufio_get_block_number(buf));
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+int dm_bm_read_lock(struct dm_block_manager *bm, dm_block_t b,
+ struct dm_block_validator *v,
+ struct dm_block **result)
+{
+ struct buffer_aux *aux;
+ void *p;
+ int r;
+
+ p = dm_bufio_read(to_bufio(bm), b, (struct dm_buffer **) result);
+ if (unlikely(IS_ERR(p)))
+ return PTR_ERR(p);
+
+ aux = dm_bufio_get_aux_data(to_buffer(*result));
+ r = bl_down_read(&aux->lock);
+ if (unlikely(r)) {
+ dm_bufio_release(to_buffer(*result));
+ report_recursive_bug(b, r);
+ return r;
+ }
+
+ aux->write_locked = 0;
+
+ r = dm_bm_validate_buffer(bm, to_buffer(*result), aux, v);
+ if (unlikely(r)) {
+ bl_up_read(&aux->lock);
+ dm_bufio_release(to_buffer(*result));
+ return r;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dm_bm_read_lock);
+
+int dm_bm_write_lock(struct dm_block_manager *bm,
+ dm_block_t b, struct dm_block_validator *v,
+ struct dm_block **result)
+{
+ struct buffer_aux *aux;
+ void *p;
+ int r;
+
+ p = dm_bufio_read(to_bufio(bm), b, (struct dm_buffer **) result);
+ if (unlikely(IS_ERR(p)))
+ return PTR_ERR(p);
+
+ aux = dm_bufio_get_aux_data(to_buffer(*result));
+ r = bl_down_write(&aux->lock);
+ if (r) {
+ dm_bufio_release(to_buffer(*result));
+ report_recursive_bug(b, r);
+ return r;
+ }
+
+ aux->write_locked = 1;
+
+ r = dm_bm_validate_buffer(bm, to_buffer(*result), aux, v);
+ if (unlikely(r)) {
+ bl_up_write(&aux->lock);
+ dm_bufio_release(to_buffer(*result));
+ return r;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dm_bm_write_lock);
+
+int dm_bm_read_try_lock(struct dm_block_manager *bm,
+ dm_block_t b, struct dm_block_validator *v,
+ struct dm_block **result)
+{
+ struct buffer_aux *aux;
+ void *p;
+ int r;
+
+ p = dm_bufio_get(to_bufio(bm), b, (struct dm_buffer **) result);
+ if (unlikely(IS_ERR(p)))
+ return PTR_ERR(p);
+ if (unlikely(!p))
+ return -EWOULDBLOCK;
+
+ aux = dm_bufio_get_aux_data(to_buffer(*result));
+ r = bl_down_read_nonblock(&aux->lock);
+ if (r < 0) {
+ dm_bufio_release(to_buffer(*result));
+ report_recursive_bug(b, r);
+ return r;
+ }
+ aux->write_locked = 0;
+
+ r = dm_bm_validate_buffer(bm, to_buffer(*result), aux, v);
+ if (unlikely(r)) {
+ bl_up_read(&aux->lock);
+ dm_bufio_release(to_buffer(*result));
+ return r;
+ }
+
+ return 0;
+}
+
+int dm_bm_write_lock_zero(struct dm_block_manager *bm,
+ dm_block_t b, struct dm_block_validator *v,
+ struct dm_block **result)
+{
+ int r;
+ struct buffer_aux *aux;
+ void *p;
+
+ p = dm_bufio_new(to_bufio(bm), b, (struct dm_buffer **) result);
+ if (unlikely(IS_ERR(p)))
+ return PTR_ERR(p);
+
+ memset(p, 0, dm_bm_block_size(bm));
+
+ aux = dm_bufio_get_aux_data(to_buffer(*result));
+ r = bl_down_write(&aux->lock);
+ if (r) {
+ dm_bufio_release(to_buffer(*result));
+ return r;
+ }
+
+ aux->write_locked = 1;
+ aux->validator = v;
+
+ return 0;
+}
+
+int dm_bm_unlock(struct dm_block *b)
+{
+ struct buffer_aux *aux;
+ aux = dm_bufio_get_aux_data(to_buffer(b));
+
+ if (aux->write_locked) {
+ dm_bufio_mark_buffer_dirty(to_buffer(b));
+ bl_up_write(&aux->lock);
+ } else
+ bl_up_read(&aux->lock);
+
+ dm_bufio_release(to_buffer(b));
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dm_bm_unlock);
+
+int dm_bm_unlock_move(struct dm_block *b, dm_block_t n)
+{
+ struct buffer_aux *aux;
+
+ aux = dm_bufio_get_aux_data(to_buffer(b));
+
+ if (aux->write_locked) {
+ dm_bufio_mark_buffer_dirty(to_buffer(b));
+ bl_up_write(&aux->lock);
+ } else
+ bl_up_read(&aux->lock);
+
+ dm_bufio_release_move(to_buffer(b), n);
+ return 0;
+}
+
+int dm_bm_flush_and_unlock(struct dm_block_manager *bm,
+ struct dm_block *superblock)
+{
+ int r;
+
+ r = dm_bufio_write_dirty_buffers(to_bufio(bm));
+ if (unlikely(r))
+ return r;
+ r = dm_bufio_issue_flush(to_bufio(bm));
+ if (unlikely(r))
+ return r;
+
+ dm_bm_unlock(superblock);
+
+ r = dm_bufio_write_dirty_buffers(to_bufio(bm));
+ if (unlikely(r))
+ return r;
+ r = dm_bufio_issue_flush(to_bufio(bm));
+ if (unlikely(r))
+ return r;
+
+ return 0;
+}
+
+u32 dm_bm_checksum(const void *data, size_t len, u32 init_xor)
+{
+ return crc32c(~(u32) 0, data, len) ^ init_xor;
+}
+EXPORT_SYMBOL_GPL(dm_bm_checksum);
+
+/*----------------------------------------------------------------*/
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
+MODULE_DESCRIPTION("Immutable metadata library for dm");
+
+/*----------------------------------------------------------------*/
diff --git a/drivers/md/persistent-data/dm-block-manager.h b/drivers/md/persistent-data/dm-block-manager.h
new file mode 100644
index 00000000000..924833d2dfa
--- /dev/null
+++ b/drivers/md/persistent-data/dm-block-manager.h
@@ -0,0 +1,123 @@
+/*
+ * Copyright (C) 2011 Red Hat, Inc.
+ *
+ * This file is released under the GPL.
+ */
+
+#ifndef _LINUX_DM_BLOCK_MANAGER_H
+#define _LINUX_DM_BLOCK_MANAGER_H
+
+#include <linux/types.h>
+#include <linux/blkdev.h>
+
+/*----------------------------------------------------------------*/
+
+/*
+ * Block number.
+ */
+typedef uint64_t dm_block_t;
+struct dm_block;
+
+dm_block_t dm_block_location(struct dm_block *b);
+void *dm_block_data(struct dm_block *b);
+
+/*----------------------------------------------------------------*/
+
+/*
+ * @name should be a unique identifier for the block manager, no longer
+ * than 32 chars.
+ *
+ * @max_held_per_thread should be the maximum number of locks, read or
+ * write, that an individual thread holds at any one time.
+ */
+struct dm_block_manager;
+struct dm_block_manager *dm_block_manager_create(
+ struct block_device *bdev, unsigned block_size,
+ unsigned cache_size, unsigned max_held_per_thread);
+void dm_block_manager_destroy(struct dm_block_manager *bm);
+
+unsigned dm_bm_block_size(struct dm_block_manager *bm);
+dm_block_t dm_bm_nr_blocks(struct dm_block_manager *bm);
+
+/*----------------------------------------------------------------*/
+
+/*
+ * The validator allows the caller to verify newly-read data and modify
+ * the data just before writing, e.g. to calculate checksums. It's
+ * important to be consistent with your use of validators. The only time
+ * you can change validators is if you call dm_bm_write_lock_zero.
+ */
+struct dm_block_validator {
+ const char *name;
+ void (*prepare_for_write)(struct dm_block_validator *v, struct dm_block *b, size_t block_size);
+
+ /*
+ * Return 0 if the checksum is valid or < 0 on error.
+ */
+ int (*check)(struct dm_block_validator *v, struct dm_block *b, size_t block_size);
+};
+
+/*----------------------------------------------------------------*/
+
+/*
+ * You can have multiple concurrent readers or a single writer holding a
+ * block lock.
+ */
+
+/*
+ * dm_bm_lock() locks a block and returns through @result a pointer to
+ * memory that holds a copy of that block. If you have write-locked the
+ * block then any changes you make to memory pointed to by @result will be
+ * written back to the disk sometime after dm_bm_unlock is called.
+ */
+int dm_bm_read_lock(struct dm_block_manager *bm, dm_block_t b,
+ struct dm_block_validator *v,
+ struct dm_block **result);
+
+int dm_bm_write_lock(struct dm_block_manager *bm, dm_block_t b,
+ struct dm_block_validator *v,
+ struct dm_block **result);
+
+/*
+ * The *_try_lock variants return -EWOULDBLOCK if the block isn't
+ * available immediately.
+ */
+int dm_bm_read_try_lock(struct dm_block_manager *bm, dm_block_t b,
+ struct dm_block_validator *v,
+ struct dm_block **result);
+
+/*
+ * Use dm_bm_write_lock_zero() when you know you're going to
+ * overwrite the block completely. It saves a disk read.
+ */
+int dm_bm_write_lock_zero(struct dm_block_manager *bm, dm_block_t b,
+ struct dm_block_validator *v,
+ struct dm_block **result);
+
+int dm_bm_unlock(struct dm_block *b);
+
+/*
+ * An optimisation; we often want to copy a block's contents to a new
+ * block. eg, as part of the shadowing operation. It's far better for
+ * bufio to do this move behind the scenes than hold 2 locks and memcpy the
+ * data.
+ */
+int dm_bm_unlock_move(struct dm_block *b, dm_block_t n);
+
+/*
+ * It's a common idiom to have a superblock that should be committed last.
+ *
+ * @superblock should be write-locked on entry. It will be unlocked during
+ * this function. All dirty blocks are guaranteed to be written and flushed
+ * before the superblock.
+ *
+ * This method always blocks.
+ */
+int dm_bm_flush_and_unlock(struct dm_block_manager *bm,
+ struct dm_block *superblock);
+
+u32 dm_bm_checksum(const void *data, size_t len, u32 init_xor);
+
+/*----------------------------------------------------------------*/
+
+#endif /* _LINUX_DM_BLOCK_MANAGER_H */
diff --git a/drivers/md/persistent-data/dm-btree-internal.h b/drivers/md/persistent-data/dm-btree-internal.h
new file mode 100644
index 00000000000..d279c768f8f
--- /dev/null
+++ b/drivers/md/persistent-data/dm-btree-internal.h
@@ -0,0 +1,137 @@
+/*
+ * Copyright (C) 2011 Red Hat, Inc.
+ *
+ * This file is released under the GPL.
+ */
+
+#ifndef DM_BTREE_INTERNAL_H
+#define DM_BTREE_INTERNAL_H
+
+#include "dm-btree.h"
+
+/*----------------------------------------------------------------*/
+
+/*
+ * We'll need 2 accessor functions for n->csum and n->blocknr
+ * to support dm-btree-spine.c in that case.
+ */
+
+enum node_flags {
+ INTERNAL_NODE = 1,
+ LEAF_NODE = 1 << 1
+};
+
+/*
+ * Every btree node begins with this structure. Make sure it's a multiple
+ * of 8-bytes in size, otherwise the 64bit keys will be mis-aligned.
+ */
+struct node_header {
+ __le32 csum;
+ __le32 flags;
+ __le64 blocknr; /* Block this node is supposed to live in. */
+
+ __le32 nr_entries;
+ __le32 max_entries;
+ __le32 value_size;
+ __le32 padding;
+} __packed;
+
+struct node {
+ struct node_header header;
+ __le64 keys[0];
+} __packed;
+
+
+void inc_children(struct dm_transaction_manager *tm, struct node *n,
+ struct dm_btree_value_type *vt);
+
+int new_block(struct dm_btree_info *info, struct dm_block **result);
+int unlock_block(struct dm_btree_info *info, struct dm_block *b);
+
+/*
+ * Spines keep track of the rolling locks. There are 2 variants, read-only
+ * and one that uses shadowing. These are separate structs to allow the
+ * type checker to spot misuse, for example accidentally calling read_lock
+ * on a shadow spine.
+ */
+struct ro_spine {
+ struct dm_btree_info *info;
+
+ int count;
+ struct dm_block *nodes[2];
+};
+
+void init_ro_spine(struct ro_spine *s, struct dm_btree_info *info);
+int exit_ro_spine(struct ro_spine *s);
+int ro_step(struct ro_spine *s, dm_block_t new_child);
+struct node *ro_node(struct ro_spine *s);
+
+struct shadow_spine {
+ struct dm_btree_info *info;
+
+ int count;
+ struct dm_block *nodes[2];
+
+ dm_block_t root;
+};
+
+void init_shadow_spine(struct shadow_spine *s, struct dm_btree_info *info);
+int exit_shadow_spine(struct shadow_spine *s);
+
+int shadow_step(struct shadow_spine *s, dm_block_t b,
+ struct dm_btree_value_type *vt);
+
+/*
+ * The spine must have at least one entry before calling this.
+ */
+struct dm_block *shadow_current(struct shadow_spine *s);
+
+/*
+ * The spine must have at least two entries before calling this.
+ */
+struct dm_block *shadow_parent(struct shadow_spine *s);
+
+int shadow_has_parent(struct shadow_spine *s);
+
+int shadow_root(struct shadow_spine *s);
+
+/*
+ * Some inlines.
+ */
+static inline __le64 *key_ptr(struct node *n, uint32_t index)
+{
+ return n->keys + index;
+}
+
+static inline void *value_base(struct node *n)
+{
+ return &n->keys[le32_to_cpu(n->header.max_entries)];
+}
+
+/*
+ * FIXME: Now that value size is stored in node we don't need the third parm.
+ */
+static inline void *value_ptr(struct node *n, uint32_t index, size_t value_size)
+{
+ BUG_ON(value_size != le32_to_cpu(n->header.value_size));
+ return value_base(n) + (value_size * index);
+}
+
+/*
+ * Assumes the values are suitably-aligned and converts to core format.
+ */
+static inline uint64_t value64(struct node *n, uint32_t index)
+{
+ __le64 *values_le = value_base(n);
+
+ return le64_to_cpu(values_le[index]);
+}
+
+/*
+ * Searching for a key within a single node.
+ */
+int lower_bound(struct node *n, uint64_t key);
+
+extern struct dm_block_validator btree_node_validator;
+
+#endif /* DM_BTREE_INTERNAL_H */
diff --git a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c
new file mode 100644
index 00000000000..023fbc2d389
--- /dev/null
+++ b/drivers/md/persistent-data/dm-btree-remove.c
@@ -0,0 +1,566 @@
+/*
+ * Copyright (C) 2011 Red Hat, Inc.
+ *
+ * This file is released under the GPL.
+ */
+
+#include "dm-btree.h"
+#include "dm-btree-internal.h"
+#include "dm-transaction-manager.h"
+
+#include <linux/export.h>
+
+/*
+ * Removing an entry from a btree
+ * ==============================
+ *
+ * A very important constraint for our btree is that no node, except the
+ * root, may have fewer than a certain number of entries.
+ * (MIN_ENTRIES <= nr_entries <= MAX_ENTRIES).
+ *
+ * Ensuring this is complicated by the way we want to only ever hold the
+ * locks on 2 nodes concurrently, and only change nodes in a top to bottom
+ * fashion.
+ *
+ * Each node may have a left or right sibling. When decending the spine,
+ * if a node contains only MIN_ENTRIES then we try and increase this to at
+ * least MIN_ENTRIES + 1. We do this in the following ways:
+ *
+ * [A] No siblings => this can only happen if the node is the root, in which
+ * case we copy the childs contents over the root.
+ *
+ * [B] No left sibling
+ * ==> rebalance(node, right sibling)
+ *
+ * [C] No right sibling
+ * ==> rebalance(left sibling, node)
+ *
+ * [D] Both siblings, total_entries(left, node, right) <= DEL_THRESHOLD
+ * ==> delete node adding it's contents to left and right
+ *
+ * [E] Both siblings, total_entries(left, node, right) > DEL_THRESHOLD
+ * ==> rebalance(left, node, right)
+ *
+ * After these operations it's possible that the our original node no
+ * longer contains the desired sub tree. For this reason this rebalancing
+ * is performed on the children of the current node. This also avoids
+ * having a special case for the root.
+ *
+ * Once this rebalancing has occurred we can then step into the child node
+ * for internal nodes. Or delete the entry for leaf nodes.
+ */
+
+/*
+ * Some little utilities for moving node data around.
+ */
+static void node_shift(struct node *n, int shift)
+{
+ uint32_t nr_entries = le32_to_cpu(n->header.nr_entries);
+ uint32_t value_size = le32_to_cpu(n->header.value_size);
+
+ if (shift < 0) {
+ shift = -shift;
+ BUG_ON(shift > nr_entries);
+ BUG_ON((void *) key_ptr(n, shift) >= value_ptr(n, shift, value_size));
+ memmove(key_ptr(n, 0),
+ key_ptr(n, shift),
+ (nr_entries - shift) * sizeof(__le64));
+ memmove(value_ptr(n, 0, value_size),
+ value_ptr(n, shift, value_size),
+ (nr_entries - shift) * value_size);
+ } else {
+ BUG_ON(nr_entries + shift > le32_to_cpu(n->header.max_entries));
+ memmove(key_ptr(n, shift),
+ key_ptr(n, 0),
+ nr_entries * sizeof(__le64));
+ memmove(value_ptr(n, shift, value_size),
+ value_ptr(n, 0, value_size),
+ nr_entries * value_size);
+ }
+}
+
+static void node_copy(struct node *left, struct node *right, int shift)
+{
+ uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
+ uint32_t value_size = le32_to_cpu(left->header.value_size);
+ BUG_ON(value_size != le32_to_cpu(right->header.value_size));
+
+ if (shift < 0) {
+ shift = -shift;
+ BUG_ON(nr_left + shift > le32_to_cpu(left->header.max_entries));
+ memcpy(key_ptr(left, nr_left),
+ key_ptr(right, 0),
+ shift * sizeof(__le64));
+ memcpy(value_ptr(left, nr_left, value_size),
+ value_ptr(right, 0, value_size),
+ shift * value_size);
+ } else {
+ BUG_ON(shift > le32_to_cpu(right->header.max_entries));
+ memcpy(key_ptr(right, 0),
+ key_ptr(left, nr_left - shift),
+ shift * sizeof(__le64));
+ memcpy(value_ptr(right, 0, value_size),
+ value_ptr(left, nr_left - shift, value_size),
+ shift * value_size);
+ }
+}
+
+/*
+ * Delete a specific entry from a leaf node.
+ */
+static void delete_at(struct node *n, unsigned index)
+{
+ unsigned nr_entries = le32_to_cpu(n->header.nr_entries);
+ unsigned nr_to_copy = nr_entries - (index + 1);
+ uint32_t value_size = le32_to_cpu(n->header.value_size);
+ BUG_ON(index >= nr_entries);
+
+ if (nr_to_copy) {
+ memmove(key_ptr(n, index),
+ key_ptr(n, index + 1),
+ nr_to_copy * sizeof(__le64));
+
+ memmove(value_ptr(n, index, value_size),
+ value_ptr(n, index + 1, value_size),
+ nr_to_copy * value_size);
+ }
+
+ n->header.nr_entries = cpu_to_le32(nr_entries - 1);
+}
+
+static unsigned del_threshold(struct node *n)
+{
+ return le32_to_cpu(n->header.max_entries) / 3;
+}
+
+static unsigned merge_threshold(struct node *n)
+{
+ /*
+ * The extra one is because we know we're potentially going to
+ * delete an entry.
+ */
+ return 2 * (le32_to_cpu(n->header.max_entries) / 3) + 1;
+}
+
+struct child {
+ unsigned index;
+ struct dm_block *block;
+ struct node *n;
+};
+
+static struct dm_btree_value_type le64_type = {
+ .context = NULL,
+ .size = sizeof(__le64),
+ .inc = NULL,
+ .dec = NULL,
+ .equal = NULL
+};
+
+static int init_child(struct dm_btree_info *info, struct node *parent,
+ unsigned index, struct child *result)
+{
+ int r, inc;
+ dm_block_t root;
+
+ result->index = index;
+ root = value64(parent, index);
+
+ r = dm_tm_shadow_block(info->tm, root, &btree_node_validator,
+ &result->block, &inc);
+ if (r)
+ return r;
+
+ result->n = dm_block_data(result->block);
+
+ if (inc)
+ inc_children(info->tm, result->n, &le64_type);
+
+ *((__le64 *) value_ptr(parent, index, sizeof(__le64))) =
+ cpu_to_le64(dm_block_location(result->block));
+
+ return 0;
+}
+
+static int exit_child(struct dm_btree_info *info, struct child *c)
+{
+ return dm_tm_unlock(info->tm, c->block);
+}
+
+static void shift(struct node *left, struct node *right, int count)
+{
+ if (!count)
+ return;
+
+ if (count > 0) {
+ node_shift(right, count);
+ node_copy(left, right, count);
+ } else {
+ node_copy(left, right, count);
+ node_shift(right, count);
+ }
+
+ left->header.nr_entries =
+ cpu_to_le32(le32_to_cpu(left->header.nr_entries) - count);
+ BUG_ON(le32_to_cpu(left->header.nr_entries) > le32_to_cpu(left->header.max_entries));
+
+ right->header.nr_entries =
+ cpu_to_le32(le32_to_cpu(right->header.nr_entries) + count);
+ BUG_ON(le32_to_cpu(right->header.nr_entries) > le32_to_cpu(right->header.max_entries));
+}
+
+static void __rebalance2(struct dm_btree_info *info, struct node *parent,
+ struct child *l, struct child *r)
+{
+ struct node *left = l->n;
+ struct node *right = r->n;
+ uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
+ uint32_t nr_right = le32_to_cpu(right->header.nr_entries);
+
+ if (nr_left + nr_right <= merge_threshold(left)) {
+ /*
+ * Merge
+ */
+ node_copy(left, right, -nr_right);
+ left->header.nr_entries = cpu_to_le32(nr_left + nr_right);
+ delete_at(parent, r->index);
+
+ /*
+ * We need to decrement the right block, but not it's
+ * children, since they're still referenced by left.
+ */
+ dm_tm_dec(info->tm, dm_block_location(r->block));
+ } else {
+ /*
+ * Rebalance.
+ */
+ unsigned target_left = (nr_left + nr_right) / 2;
+ unsigned shift_ = nr_left - target_left;
+ BUG_ON(le32_to_cpu(left->header.max_entries) <= nr_left - shift_);
+ BUG_ON(le32_to_cpu(right->header.max_entries) <= nr_right + shift_);
+ shift(left, right, nr_left - target_left);
+ *key_ptr(parent, r->index) = right->keys[0];
+ }
+}
+
+static int rebalance2(struct shadow_spine *s, struct dm_btree_info *info,
+ unsigned left_index)
+{
+ int r;
+ struct node *parent;
+ struct child left, right;
+
+ parent = dm_block_data(shadow_current(s));
+
+ r = init_child(info, parent, left_index, &left);
+ if (r)
+ return r;
+
+ r = init_child(info, parent, left_index + 1, &right);
+ if (r) {
+ exit_child(info, &left);
+ return r;
+ }
+
+ __rebalance2(info, parent, &left, &right);
+
+ r = exit_child(info, &left);
+ if (r) {
+ exit_child(info, &right);
+ return r;
+ }
+
+ return exit_child(info, &right);
+}
+
+static void __rebalance3(struct dm_btree_info *info, struct node *parent,
+ struct child *l, struct child *c, struct child *r)
+{
+ struct node *left = l->n;
+ struct node *center = c->n;
+ struct node *right = r->n;
+
+ uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
+ uint32_t nr_center = le32_to_cpu(center->header.nr_entries);
+ uint32_t nr_right = le32_to_cpu(right->header.nr_entries);
+ uint32_t max_entries = le32_to_cpu(left->header.max_entries);
+
+ unsigned target;
+
+ BUG_ON(left->header.max_entries != center->header.max_entries);
+ BUG_ON(center->header.max_entries != right->header.max_entries);
+
+ if (((nr_left + nr_center + nr_right) / 2) < merge_threshold(center)) {
+ /*
+ * Delete center node:
+ *
+ * We dump as many entries from center as possible into
+ * left, then the rest in right, then rebalance2. This
+ * wastes some cpu, but I want something simple atm.
+ */
+ unsigned shift = min(max_entries - nr_left, nr_center);
+
+ BUG_ON(nr_left + shift > max_entries);
+ node_copy(left, center, -shift);
+ left->header.nr_entries = cpu_to_le32(nr_left + shift);
+
+ if (shift != nr_center) {
+ shift = nr_center - shift;
+ BUG_ON((nr_right + shift) >= max_entries);
+ node_shift(right, shift);
+ node_copy(center, right, shift);
+ right->header.nr_entries = cpu_to_le32(nr_right + shift);
+ }
+ *key_ptr(parent, r->index) = right->keys[0];
+
+ delete_at(parent, c->index);
+ r->index--;
+
+ dm_tm_dec(info->tm, dm_block_location(c->block));
+ __rebalance2(info, parent, l, r);
+
+ return;
+ }
+
+ /*
+ * Rebalance
+ */
+ target = (nr_left + nr_center + nr_right) / 3;
+ BUG_ON(target > max_entries);
+
+ /*
+ * Adjust the left node
+ */
+ shift(left, center, nr_left - target);
+
+ /*
+ * Adjust the right node
+ */
+ shift(center, right, target - nr_right);
+ *key_ptr(parent, c->index) = center->keys[0];
+ *key_ptr(parent, r->index) = right->keys[0];
+}
+
+static int rebalance3(struct shadow_spine *s, struct dm_btree_info *info,
+ unsigned left_index)
+{
+ int r;
+ struct node *parent = dm_block_data(shadow_current(s));
+ struct child left, center, right;
+
+ /*
+ * FIXME: fill out an array?
+ */
+ r = init_child(info, parent, left_index, &left);
+ if (r)
+ return r;
+
+ r = init_child(info, parent, left_index + 1, &center);
+ if (r) {
+ exit_child(info, &left);
+ return r;
+ }
+
+ r = init_child(info, parent, left_index + 2, &right);
+ if (r) {
+ exit_child(info, &left);
+ exit_child(info, &center);
+ return r;
+ }
+
+ __rebalance3(info, parent, &left, &center, &right);
+
+ r = exit_child(info, &left);
+ if (r) {
+ exit_child(info, &center);
+ exit_child(info, &right);
+ return r;
+ }
+
+ r = exit_child(info, &center);
+ if (r) {
+ exit_child(info, &right);
+ return r;
+ }
+
+ r = exit_child(info, &right);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+static int get_nr_entries(struct dm_transaction_manager *tm,
+ dm_block_t b, uint32_t *result)
+{
+ int r;
+ struct dm_block *block;
+ struct node *n;
+
+ r = dm_tm_read_lock(tm, b, &btree_node_validator, &block);
+ if (r)
+ return r;
+
+ n = dm_block_data(block);
+ *result = le32_to_cpu(n->header.nr_entries);
+
+ return dm_tm_unlock(tm, block);
+}
+
+static int rebalance_children(struct shadow_spine *s,
+ struct dm_btree_info *info, uint64_t key)
+{
+ int i, r, has_left_sibling, has_right_sibling;
+ uint32_t child_entries;
+ struct node *n;
+
+ n = dm_block_data(shadow_current(s));
+
+ if (le32_to_cpu(n->header.nr_entries) == 1) {
+ struct dm_block *child;
+ dm_block_t b = value64(n, 0);
+
+ r = dm_tm_read_lock(info->tm, b, &btree_node_validator, &child);
+ if (r)
+ return r;
+
+ memcpy(n, dm_block_data(child),
+ dm_bm_block_size(dm_tm_get_bm(info->tm)));
+ r = dm_tm_unlock(info->tm, child);
+ if (r)
+ return r;
+
+ dm_tm_dec(info->tm, dm_block_location(child));
+ return 0;
+ }
+
+ i = lower_bound(n, key);
+ if (i < 0)
+ return -ENODATA;
+
+ r = get_nr_entries(info->tm, value64(n, i), &child_entries);
+ if (r)
+ return r;
+
+ if (child_entries > del_threshold(n))
+ return 0;
+
+ has_left_sibling = i > 0;
+ has_right_sibling = i < (le32_to_cpu(n->header.nr_entries) - 1);
+
+ if (!has_left_sibling)
+ r = rebalance2(s, info, i);
+
+ else if (!has_right_sibling)
+ r = rebalance2(s, info, i - 1);
+
+ else
+ r = rebalance3(s, info, i - 1);
+
+ return r;
+}
+
+static int do_leaf(struct node *n, uint64_t key, unsigned *index)
+{
+ int i = lower_bound(n, key);
+
+ if ((i < 0) ||
+ (i >= le32_to_cpu(n->header.nr_entries)) ||
+ (le64_to_cpu(n->keys[i]) != key))
+ return -ENODATA;
+
+ *index = i;
+
+ return 0;
+}
+
+/*
+ * Prepares for removal from one level of the hierarchy. The caller must
+ * call delete_at() to remove the entry at index.
+ */
+static int remove_raw(struct shadow_spine *s, struct dm_btree_info *info,
+ struct dm_btree_value_type *vt, dm_block_t root,
+ uint64_t key, unsigned *index)
+{
+ int i = *index, r;
+ struct node *n;
+
+ for (;;) {
+ r = shadow_step(s, root, vt);
+ if (r < 0)
+ break;
+
+ /*
+ * We have to patch up the parent node, ugly, but I don't
+ * see a way to do this automatically as part of the spine
+ * op.
+ */
+ if (shadow_has_parent(s)) {
+ __le64 location = cpu_to_le64(dm_block_location(shadow_current(s)));
+ memcpy(value_ptr(dm_block_data(shadow_parent(s)), i, sizeof(__le64)),
+ &location, sizeof(__le64));
+ }
+
+ n = dm_block_data(shadow_current(s));
+
+ if (le32_to_cpu(n->header.flags) & LEAF_NODE)
+ return do_leaf(n, key, index);
+
+ r = rebalance_children(s, info, key);
+ if (r)
+ break;
+
+ n = dm_block_data(shadow_current(s));
+ if (le32_to_cpu(n->header.flags) & LEAF_NODE)
+ return do_leaf(n, key, index);
+
+ i = lower_bound(n, key);
+
+ /*
+ * We know the key is present, or else
+ * rebalance_children would have returned
+ * -ENODATA
+ */
+ root = value64(n, i);
+ }
+
+ return r;
+}
+
+int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
+ uint64_t *keys, dm_block_t *new_root)
+{
+ unsigned level, last_level = info->levels - 1;
+ int index = 0, r = 0;
+ struct shadow_spine spine;
+ struct node *n;
+
+ init_shadow_spine(&spine, info);
+ for (level = 0; level < info->levels; level++) {
+ r = remove_raw(&spine, info,
+ (level == last_level ?
+ &info->value_type : &le64_type),
+ root, keys[level], (unsigned *)&index);
+ if (r < 0)
+ break;
+
+ n = dm_block_data(shadow_current(&spine));
+ if (level != last_level) {
+ root = value64(n, index);
+ continue;
+ }
+
+ BUG_ON(index < 0 || index >= le32_to_cpu(n->header.nr_entries));
+
+ if (info->value_type.dec)
+ info->value_type.dec(info->value_type.context,
+ value_ptr(n, index, info->value_type.size));
+
+ delete_at(n, index);
+ }
+
+ *new_root = shadow_root(&spine);
+ exit_shadow_spine(&spine);
+
+ return r;
+}
+EXPORT_SYMBOL_GPL(dm_btree_remove);
diff --git a/drivers/md/persistent-data/dm-btree-spine.c b/drivers/md/persistent-data/dm-btree-spine.c
new file mode 100644
index 00000000000..d9a7912ee8e
--- /dev/null
+++ b/drivers/md/persistent-data/dm-btree-spine.c
@@ -0,0 +1,244 @@
+/*
+ * Copyright (C) 2011 Red Hat, Inc.
+ *
+ * This file is released under the GPL.
+ */
+
+#include "dm-btree-internal.h"
+#include "dm-transaction-manager.h"
+
+#include <linux/device-mapper.h>
+
+#define DM_MSG_PREFIX "btree spine"
+
+/*----------------------------------------------------------------*/
+
+#define BTREE_CSUM_XOR 121107
+
+static int node_check(struct dm_block_validator *v,
+ struct dm_block *b,
+ size_t block_size);
+
+static void node_prepare_for_write(struct dm_block_validator *v,
+ struct dm_block *b,
+ size_t block_size)
+{
+ struct node *n = dm_block_data(b);
+ struct node_header *h = &n->header;
+
+ h->blocknr = cpu_to_le64(dm_block_location(b));
+ h->csum = cpu_to_le32(dm_bm_checksum(&h->flags,
+ block_size - sizeof(__le32),
+ BTREE_CSUM_XOR));
+
+ BUG_ON(node_check(v, b, 4096));
+}
+
+static int node_check(struct dm_block_validator *v,
+ struct dm_block *b,
+ size_t block_size)
+{
+ struct node *n = dm_block_data(b);
+ struct node_header *h = &n->header;
+ size_t value_size;
+ __le32 csum_disk;
+ uint32_t flags;
+
+ if (dm_block_location(b) != le64_to_cpu(h->blocknr)) {
+ DMERR("node_check failed blocknr %llu wanted %llu",
+ le64_to_cpu(h->blocknr), dm_block_location(b));
+ return -ENOTBLK;
+ }
+
+ csum_disk = cpu_to_le32(dm_bm_checksum(&h->flags,
+ block_size - sizeof(__le32),
+ BTREE_CSUM_XOR));
+ if (csum_disk != h->csum) {
+ DMERR("node_check failed csum %u wanted %u",
+ le32_to_cpu(csum_disk), le32_to_cpu(h->csum));
+ return -EILSEQ;
+ }
+
+ value_size = le32_to_cpu(h->value_size);
+
+ if (sizeof(struct node_header) +
+ (sizeof(__le64) + value_size) * le32_to_cpu(h->max_entries) > block_size) {
+ DMERR("node_check failed: max_entries too large");
+ return -EILSEQ;
+ }
+
+ if (le32_to_cpu(h->nr_entries) > le32_to_cpu(h->max_entries)) {
+ DMERR("node_check failed, too many entries");
+ return -EILSEQ;
+ }
+
+ /*
+ * The node must be either INTERNAL or LEAF.
+ */
+ flags = le32_to_cpu(h->flags);
+ if (!(flags & INTERNAL_NODE) && !(flags & LEAF_NODE)) {
+ DMERR("node_check failed, node is neither INTERNAL or LEAF");
+ return -EILSEQ;
+ }
+
+ return 0;
+}
+
+struct dm_block_validator btree_node_validator = {
+ .name = "btree_node",
+ .prepare_for_write = node_prepare_for_write,
+ .check = node_check
+};
+
+/*----------------------------------------------------------------*/
+
+static int bn_read_lock(struct dm_btree_info *info, dm_block_t b,
+ struct dm_block **result)
+{
+ return dm_tm_read_lock(info->tm, b, &btree_node_validator, result);
+}
+
+static int bn_shadow(struct dm_btree_info *info, dm_block_t orig,
+ struct dm_btree_value_type *vt,
+ struct dm_block **result)
+{
+ int r, inc;
+
+ r = dm_tm_shadow_block(info->tm, orig, &btree_node_validator,
+ result, &inc);
+ if (!r && inc)
+ inc_children(info->tm, dm_block_data(*result), vt);
+
+ return r;
+}
+
+int new_block(struct dm_btree_info *info, struct dm_block **result)
+{
+ return dm_tm_new_block(info->tm, &btree_node_validator, result);
+}
+
+int unlock_block(struct dm_btree_info *info, struct dm_block *b)
+{
+ return dm_tm_unlock(info->tm, b);
+}
+
+/*----------------------------------------------------------------*/
+
+void init_ro_spine(struct ro_spine *s, struct dm_btree_info *info)
+{
+ s->info = info;
+ s->count = 0;
+ s->nodes[0] = NULL;
+ s->nodes[1] = NULL;
+}
+
+int exit_ro_spine(struct ro_spine *s)
+{
+ int r = 0, i;
+
+ for (i = 0; i < s->count; i++) {
+ int r2 = unlock_block(s->info, s->nodes[i]);
+ if (r2 < 0)
+ r = r2;
+ }
+
+ return r;
+}
+
+int ro_step(struct ro_spine *s, dm_block_t new_child)
+{
+ int r;
+
+ if (s->count == 2) {
+ r = unlock_block(s->info, s->nodes[0]);
+ if (r < 0)
+ return r;
+ s->nodes[0] = s->nodes[1];
+ s->count--;
+ }
+
+ r = bn_read_lock(s->info, new_child, s->nodes + s->count);
+ if (!r)
+ s->count++;
+
+ return r;
+}
+
+struct node *ro_node(struct ro_spine *s)
+{
+ struct dm_block *block;
+
+ BUG_ON(!s->count);
+ block = s->nodes[s->count - 1];
+
+ return dm_block_data(block);
+}
+
+/*----------------------------------------------------------------*/
+
+void init_shadow_spine(struct shadow_spine *s, struct dm_btree_info *info)
+{
+ s->info = info;
+ s->count = 0;
+}
+
+int exit_shadow_spine(struct shadow_spine *s)
+{
+ int r = 0, i;
+
+ for (i = 0; i < s->count; i++) {
+ int r2 = unlock_block(s->info, s->nodes[i]);
+ if (r2 < 0)
+ r = r2;
+ }
+
+ return r;
+}
+
+int shadow_step(struct shadow_spine *s, dm_block_t b,
+ struct dm_btree_value_type *vt)
+{
+ int r;
+
+ if (s->count == 2) {
+ r = unlock_block(s->info, s->nodes[0]);
+ if (r < 0)
+ return r;
+ s->nodes[0] = s->nodes[1];
+ s->count--;
+ }
+
+ r = bn_shadow(s->info, b, vt, s->nodes + s->count);
+ if (!r) {
+ if (!s->count)
+ s->root = dm_block_location(s->nodes[0]);
+
+ s->count++;
+ }
+
+ return r;
+}
+
+struct dm_block *shadow_current(struct shadow_spine *s)
+{
+ BUG_ON(!s->count);
+
+ return s->nodes[s->count - 1];
+}
+
+struct dm_block *shadow_parent(struct shadow_spine *s)
+{
+ BUG_ON(s->count != 2);
+
+ return s->count == 2 ? s->nodes[0] : NULL;
+}
+
+int shadow_has_parent(struct shadow_spine *s)
+{
+ return s->count >= 2;
+}
+
+int shadow_root(struct shadow_spine *s)
+{
+ return s->root;
+}
diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
new file mode 100644
index 00000000000..bd1e7ffbe26
--- /dev/null
+++ b/drivers/md/persistent-data/dm-btree.c
@@ -0,0 +1,805 @@
+/*
+ * Copyright (C) 2011 Red Hat, Inc.
+ *
+ * This file is released under the GPL.
+ */
+
+#include "dm-btree-internal.h"
+#include "dm-space-map.h"
+#include "dm-transaction-manager.h"
+
+#include <linux/export.h>
+#include <linux/device-mapper.h>
+
+#define DM_MSG_PREFIX "btree"
+
+/*----------------------------------------------------------------
+ * Array manipulation
+ *--------------------------------------------------------------*/
+static void memcpy_disk(void *dest, const void *src, size_t len)
+ __dm_written_to_disk(src)
+{
+ memcpy(dest, src, len);
+ __dm_unbless_for_disk(src);
+}
+
+static void array_insert(void *base, size_t elt_size, unsigned nr_elts,
+ unsigned index, void *elt)
+ __dm_written_to_disk(elt)
+{
+ if (index < nr_elts)
+ memmove(base + (elt_size * (index + 1)),
+ base + (elt_size * index),
+ (nr_elts - index) * elt_size);
+
+ memcpy_disk(base + (elt_size * index), elt, elt_size);
+}
+
+/*----------------------------------------------------------------*/
+
+/* makes the assumption that no two keys are the same. */
+static int bsearch(struct node *n, uint64_t key, int want_hi)
+{
+ int lo = -1, hi = le32_to_cpu(n->header.nr_entries);
+
+ while (hi - lo > 1) {
+ int mid = lo + ((hi - lo) / 2);
+ uint64_t mid_key = le64_to_cpu(n->keys[mid]);
+
+ if (mid_key == key)
+ return mid;
+
+ if (mid_key < key)
+ lo = mid;
+ else
+ hi = mid;
+ }
+
+ return want_hi ? hi : lo;
+}
+
+int lower_bound(struct node *n, uint64_t key)
+{
+ return bsearch(n, key, 0);
+}
+
+void inc_children(struct dm_transaction_manager *tm, struct node *n,
+ struct dm_btree_value_type *vt)
+{
+ unsigned i;
+ uint32_t nr_entries = le32_to_cpu(n->header.nr_entries);
+
+ if (le32_to_cpu(n->header.flags) & INTERNAL_NODE)
+ for (i = 0; i < nr_entries; i++)
+ dm_tm_inc(tm, value64(n, i));
+ else if (vt->inc)
+ for (i = 0; i < nr_entries; i++)
+ vt->inc(vt->context,
+ value_ptr(n, i, vt->size));
+}
+
+static int insert_at(size_t value_size, struct node *node, unsigned index,
+ uint64_t key, void *value)
+ __dm_written_to_disk(value)
+{
+ uint32_t nr_entries = le32_to_cpu(node->header.nr_entries);
+ __le64 key_le = cpu_to_le64(key);
+
+ if (index > nr_entries ||
+ index >= le32_to_cpu(node->header.max_entries)) {
+ DMERR("too many entries in btree node for insert");
+ __dm_unbless_for_disk(value);
+ return -ENOMEM;
+ }
+
+ __dm_bless_for_disk(&key_le);
+
+ array_insert(node->keys, sizeof(*node->keys), nr_entries, index, &key_le);
+ array_insert(value_base(node), value_size, nr_entries, index, value);
+ node->header.nr_entries = cpu_to_le32(nr_entries + 1);
+
+ return 0;
+}
+
+/*----------------------------------------------------------------*/
+
+/*
+ * We want 3n entries (for some n). This works more nicely for repeated
+ * insert remove loops than (2n + 1).
+ */
+static uint32_t calc_max_entries(size_t value_size, size_t block_size)
+{
+ uint32_t total, n;
+ size_t elt_size = sizeof(uint64_t) + value_size; /* key + value */
+
+ block_size -= sizeof(struct node_header);
+ total = block_size / elt_size;
+ n = total / 3; /* rounds down */
+
+ return 3 * n;
+}
+
+int dm_btree_empty(struct dm_btree_info *info, dm_block_t *root)
+{
+ int r;
+ struct dm_block *b;
+ struct node *n;
+ size_t block_size;
+ uint32_t max_entries;
+
+ r = new_block(info, &b);
+ if (r < 0)
+ return r;
+
+ block_size = dm_bm_block_size(dm_tm_get_bm(info->tm));
+ max_entries = calc_max_entries(info->value_type.size, block_size);
+
+ n = dm_block_data(b);
+ memset(n, 0, block_size);
+ n->header.flags = cpu_to_le32(LEAF_NODE);
+ n->header.nr_entries = cpu_to_le32(0);
+ n->header.max_entries = cpu_to_le32(max_entries);
+ n->header.value_size = cpu_to_le32(info->value_type.size);
+
+ *root = dm_block_location(b);
+ return unlock_block(info, b);
+}
+EXPORT_SYMBOL_GPL(dm_btree_empty);
+
+/*----------------------------------------------------------------*/
+
+/*
+ * Deletion uses a recursive algorithm, since we have limited stack space
+ * we explicitly manage our own stack on the heap.
+ */
+#define MAX_SPINE_DEPTH 64
+struct frame {
+ struct dm_block *b;
+ struct node *n;
+ unsigned level;
+ unsigned nr_children;
+ unsigned current_child;
+};
+
+struct del_stack {
+ struct dm_transaction_manager *tm;
+ int top;
+ struct frame spine[MAX_SPINE_DEPTH];
+};
+
+static int top_frame(struct del_stack *s, struct frame **f)
+{
+ if (s->top < 0) {
+ DMERR("btree deletion stack empty");
+ return -EINVAL;
+ }
+
+ *f = s->spine + s->top;
+
+ return 0;
+}
+
+static int unprocessed_frames(struct del_stack *s)
+{
+ return s->top >= 0;
+}
+
+static int push_frame(struct del_stack *s, dm_block_t b, unsigned level)
+{
+ int r;
+ uint32_t ref_count;
+
+ if (s->top >= MAX_SPINE_DEPTH - 1) {
+ DMERR("btree deletion stack out of memory");
+ return -ENOMEM;
+ }
+
+ r = dm_tm_ref(s->tm, b, &ref_count);
+ if (r)
+ return r;
+
+ if (ref_count > 1)
+ /*
+ * This is a shared node, so we can just decrement it's
+ * reference counter and leave the children.
+ */
+ dm_tm_dec(s->tm, b);
+
+ else {
+ struct frame *f = s->spine + ++s->top;
+
+ r = dm_tm_read_lock(s->tm, b, &btree_node_validator, &f->b);
+ if (r) {
+ s->top--;
+ return r;
+ }
+
+ f->n = dm_block_data(f->b);
+ f->level = level;
+ f->nr_children = le32_to_cpu(f->n->header.nr_entries);
+ f->current_child = 0;
+ }
+
+ return 0;
+}
+
+static void pop_frame(struct del_stack *s)
+{
+ struct frame *f = s->spine + s->top--;
+
+ dm_tm_dec(s->tm, dm_block_location(f->b));
+ dm_tm_unlock(s->tm, f->b);
+}
+
+int dm_btree_del(struct dm_btree_info *info, dm_block_t root)
+{
+ int r;
+ struct del_stack *s;
+
+ s = kmalloc(sizeof(*s), GFP_KERNEL);
+ if (!s)
+ return -ENOMEM;
+ s->tm = info->tm;
+ s->top = -1;
+
+ r = push_frame(s, root, 1);
+ if (r)
+ goto out;
+
+ while (unprocessed_frames(s)) {
+ uint32_t flags;
+ struct frame *f;
+ dm_block_t b;
+
+ r = top_frame(s, &f);
+ if (r)
+ goto out;
+
+ if (f->current_child >= f->nr_children) {
+ pop_frame(s);
+ continue;
+ }
+
+ flags = le32_to_cpu(f->n->header.flags);
+ if (flags & INTERNAL_NODE) {
+ b = value64(f->n, f->current_child);
+ f->current_child++;
+ r = push_frame(s, b, f->level);
+ if (r)
+ goto out;
+
+ } else if (f->level != (info->levels - 1)) {
+ b = value64(f->n, f->current_child);
+ f->current_child++;
+ r = push_frame(s, b, f->level + 1);
+ if (r)
+ goto out;
+
+ } else {
+ if (info->value_type.dec) {
+ unsigned i;
+
+ for (i = 0; i < f->nr_children; i++)
+ info->value_type.dec(info->value_type.context,
+ value_ptr(f->n, i, info->value_type.size));
+ }
+ f->current_child = f->nr_children;
+ }
+ }
+
+out:
+ kfree(s);
+ return r;
+}
+EXPORT_SYMBOL_GPL(dm_btree_del);
+
+/*----------------------------------------------------------------*/
+
+static int btree_lookup_raw(struct ro_spine *s, dm_block_t block, uint64_t key,
+ int (*search_fn)(struct node *, uint64_t),
+ uint64_t *result_key, void *v, size_t value_size)
+{
+ int i, r;
+ uint32_t flags, nr_entries;
+
+ do {
+ r = ro_step(s, block);
+ if (r < 0)
+ return r;
+
+ i = search_fn(ro_node(s), key);
+
+ flags = le32_to_cpu(ro_node(s)->header.flags);
+ nr_entries = le32_to_cpu(ro_node(s)->header.nr_entries);
+ if (i < 0 || i >= nr_entries)
+ return -ENODATA;
+
+ if (flags & INTERNAL_NODE)
+ block = value64(ro_node(s), i);
+
+ } while (!(flags & LEAF_NODE));
+
+ *result_key = le64_to_cpu(ro_node(s)->keys[i]);
+ memcpy(v, value_ptr(ro_node(s), i, value_size), value_size);
+
+ return 0;
+}
+
+int dm_btree_lookup(struct dm_btree_info *info, dm_block_t root,
+ uint64_t *keys, void *value_le)
+{
+ unsigned level, last_level = info->levels - 1;
+ int r = -ENODATA;
+ uint64_t rkey;
+ __le64 internal_value_le;
+ struct ro_spine spine;
+
+ init_ro_spine(&spine, info);
+ for (level = 0; level < info->levels; level++) {
+ size_t size;
+ void *value_p;
+
+ if (level == last_level) {
+ value_p = value_le;
+ size = info->value_type.size;
+
+ } else {
+ value_p = &internal_value_le;
+ size = sizeof(uint64_t);
+ }
+
+ r = btree_lookup_raw(&spine, root, keys[level],
+ lower_bound, &rkey,
+ value_p, size);
+
+ if (!r) {
+ if (rkey != keys[level]) {
+ exit_ro_spine(&spine);
+ return -ENODATA;
+ }
+ } else {
+ exit_ro_spine(&spine);
+ return r;
+ }
+
+ root = le64_to_cpu(internal_value_le);
+ }
+ exit_ro_spine(&spine);
+
+ return r;
+}
+EXPORT_SYMBOL_GPL(dm_btree_lookup);
+
+/*
+ * Splits a node by creating a sibling node and shifting half the nodes
+ * contents across. Assumes there is a parent node, and it has room for
+ * another child.
+ *
+ * Before:
+ * +--------+
+ * | Parent |
+ * +--------+
+ * |
+ * v
+ * +----------+
+ * | A ++++++ |
+ * +----------+
+ *
+ *
+ * After:
+ * +--------+
+ * | Parent |
+ * +--------+
+ * | |
+ * v +------+
+ * +---------+ |
+ * | A* +++ | v
+ * +---------+ +-------+
+ * | B +++ |
+ * +-------+
+ *
+ * Where A* is a shadow of A.
+ */
+static int btree_split_sibling(struct shadow_spine *s, dm_block_t root,
+ unsigned parent_index, uint64_t key)
+{
+ int r;
+ size_t size;
+ unsigned nr_left, nr_right;
+ struct dm_block *left, *right, *parent;
+ struct node *ln, *rn, *pn;
+ __le64 location;
+
+ left = shadow_current(s);
+
+ r = new_block(s->info, &right);
+ if (r < 0)
+ return r;
+
+ ln = dm_block_data(left);
+ rn = dm_block_data(right);
+
+ nr_left = le32_to_cpu(ln->header.nr_entries) / 2;
+ nr_right = le32_to_cpu(ln->header.nr_entries) - nr_left;
+
+ ln->header.nr_entries = cpu_to_le32(nr_left);
+
+ rn->header.flags = ln->header.flags;
+ rn->header.nr_entries = cpu_to_le32(nr_right);
+ rn->header.max_entries = ln->header.max_entries;
+ rn->header.value_size = ln->header.value_size;
+ memcpy(rn->keys, ln->keys + nr_left, nr_right * sizeof(rn->keys[0]));
+
+ size = le32_to_cpu(ln->header.flags) & INTERNAL_NODE ?
+ sizeof(uint64_t) : s->info->value_type.size;
+ memcpy(value_ptr(rn, 0, size), value_ptr(ln, nr_left, size),
+ size * nr_right);
+
+ /*
+ * Patch up the parent
+ */
+ parent = shadow_parent(s);
+
+ pn = dm_block_data(parent);
+ location = cpu_to_le64(dm_block_location(left));
+ __dm_bless_for_disk(&location);
+ memcpy_disk(value_ptr(pn, parent_index, sizeof(__le64)),
+ &location, sizeof(__le64));
+
+ location = cpu_to_le64(dm_block_location(right));
+ __dm_bless_for_disk(&location);
+
+ r = insert_at(sizeof(__le64), pn, parent_index + 1,
+ le64_to_cpu(rn->keys[0]), &location);
+ if (r)
+ return r;
+
+ if (key < le64_to_cpu(rn->keys[0])) {
+ unlock_block(s->info, right);
+ s->nodes[1] = left;
+ } else {
+ unlock_block(s->info, left);
+ s->nodes[1] = right;
+ }
+
+ return 0;
+}
+
+/*
+ * Splits a node by creating two new children beneath the given node.
+ *
+ * Before:
+ * +----------+
+ * | A ++++++ |
+ * +----------+
+ *
+ *
+ * After:
+ * +------------+
+ * | A (shadow) |
+ * +------------+
+ * | |
+ * +------+ +----+
+ * | |
+ * v v
+ * +-------+ +-------+
+ * | B +++ | | C +++ |
+ * +-------+ +-------+
+ */
+static int btree_split_beneath(struct shadow_spine *s, uint64_t key)
+{
+ int r;
+ size_t size;
+ unsigned nr_left, nr_right;
+ struct dm_block *left, *right, *new_parent;
+ struct node *pn, *ln, *rn;
+ __le64 val;
+
+ new_parent = shadow_current(s);
+
+ r = new_block(s->info, &left);
+ if (r < 0)
+ return r;
+
+ r = new_block(s->info, &right);
+ if (r < 0) {
+ /* FIXME: put left */
+ return r;
+ }
+
+ pn = dm_block_data(new_parent);
+ ln = dm_block_data(left);
+ rn = dm_block_data(right);
+
+ nr_left = le32_to_cpu(pn->header.nr_entries) / 2;
+ nr_right = le32_to_cpu(pn->header.nr_entries) - nr_left;
+
+ ln->header.flags = pn->header.flags;
+ ln->header.nr_entries = cpu_to_le32(nr_left);
+ ln->header.max_entries = pn->header.max_entries;
+ ln->header.value_size = pn->header.value_size;
+
+ rn->header.flags = pn->header.flags;
+ rn->header.nr_entries = cpu_to_le32(nr_right);
+ rn->header.max_entries = pn->header.max_entries;
+ rn->header.value_size = pn->header.value_size;
+
+ memcpy(ln->keys, pn->keys, nr_left * sizeof(pn->keys[0]));
+ memcpy(rn->keys, pn->keys + nr_left, nr_right * sizeof(pn->keys[0]));
+
+ size = le32_to_cpu(pn->header.flags) & INTERNAL_NODE ?
+ sizeof(__le64) : s->info->value_type.size;
+ memcpy(value_ptr(ln, 0, size), value_ptr(pn, 0, size), nr_left * size);
+ memcpy(value_ptr(rn, 0, size), value_ptr(pn, nr_left, size),
+ nr_right * size);
+
+ /* new_parent should just point to l and r now */
+ pn->header.flags = cpu_to_le32(INTERNAL_NODE);
+ pn->header.nr_entries = cpu_to_le32(2);
+ pn->header.max_entries = cpu_to_le32(
+ calc_max_entries(sizeof(__le64),
+ dm_bm_block_size(
+ dm_tm_get_bm(s->info->tm))));
+ pn->header.value_size = cpu_to_le32(sizeof(__le64));
+
+ val = cpu_to_le64(dm_block_location(left));
+ __dm_bless_for_disk(&val);
+ pn->keys[0] = ln->keys[0];
+ memcpy_disk(value_ptr(pn, 0, sizeof(__le64)), &val, sizeof(__le64));
+
+ val = cpu_to_le64(dm_block_location(right));
+ __dm_bless_for_disk(&val);
+ pn->keys[1] = rn->keys[0];
+ memcpy_disk(value_ptr(pn, 1, sizeof(__le64)), &val, sizeof(__le64));
+
+ /*
+ * rejig the spine. This is ugly, since it knows too
+ * much about the spine
+ */
+ if (s->nodes[0] != new_parent) {
+ unlock_block(s->info, s->nodes[0]);
+ s->nodes[0] = new_parent;
+ }
+ if (key < le64_to_cpu(rn->keys[0])) {
+ unlock_block(s->info, right);
+ s->nodes[1] = left;
+ } else {
+ unlock_block(s->info, left);
+ s->nodes[1] = right;
+ }
+ s->count = 2;
+
+ return 0;
+}
+
+static int btree_insert_raw(struct shadow_spine *s, dm_block_t root,
+ struct dm_btree_value_type *vt,
+ uint64_t key, unsigned *index)
+{
+ int r, i = *index, top = 1;
+ struct node *node;
+
+ for (;;) {
+ r = shadow_step(s, root, vt);
+ if (r < 0)
+ return r;
+
+ node = dm_block_data(shadow_current(s));
+
+ /*
+ * We have to patch up the parent node, ugly, but I don't
+ * see a way to do this automatically as part of the spine
+ * op.
+ */
+ if (shadow_has_parent(s) && i >= 0) { /* FIXME: second clause unness. */
+ __le64 location = cpu_to_le64(dm_block_location(shadow_current(s)));
+
+ __dm_bless_for_disk(&location);
+ memcpy_disk(value_ptr(dm_block_data(shadow_parent(s)), i, sizeof(uint64_t)),
+ &location, sizeof(__le64));
+ }
+
+ node = dm_block_data(shadow_current(s));
+
+ if (node->header.nr_entries == node->header.max_entries) {
+ if (top)
+ r = btree_split_beneath(s, key);
+ else
+ r = btree_split_sibling(s, root, i, key);
+
+ if (r < 0)
+ return r;
+ }
+
+ node = dm_block_data(shadow_current(s));
+
+ i = lower_bound(node, key);
+
+ if (le32_to_cpu(node->header.flags) & LEAF_NODE)
+ break;
+
+ if (i < 0) {
+ /* change the bounds on the lowest key */
+ node->keys[0] = cpu_to_le64(key);
+ i = 0;
+ }
+
+ root = value64(node, i);
+ top = 0;
+ }
+
+ if (i < 0 || le64_to_cpu(node->keys[i]) != key)
+ i++;
+
+ *index = i;
+ return 0;
+}
+
+static int insert(struct dm_btree_info *info, dm_block_t root,
+ uint64_t *keys, void *value, dm_block_t *new_root,
+ int *inserted)
+ __dm_written_to_disk(value)
+{
+ int r, need_insert;
+ unsigned level, index = -1, last_level = info->levels - 1;
+ dm_block_t block = root;
+ struct shadow_spine spine;
+ struct node *n;
+ struct dm_btree_value_type le64_type;
+
+ le64_type.context = NULL;
+ le64_type.size = sizeof(__le64);
+ le64_type.inc = NULL;
+ le64_type.dec = NULL;
+ le64_type.equal = NULL;
+
+ init_shadow_spine(&spine, info);
+
+ for (level = 0; level < (info->levels - 1); level++) {
+ r = btree_insert_raw(&spine, block, &le64_type, keys[level], &index);
+ if (r < 0)
+ goto bad;
+
+ n = dm_block_data(shadow_current(&spine));
+ need_insert = ((index >= le32_to_cpu(n->header.nr_entries)) ||
+ (le64_to_cpu(n->keys[index]) != keys[level]));
+
+ if (need_insert) {
+ dm_block_t new_tree;
+ __le64 new_le;
+
+ r = dm_btree_empty(info, &new_tree);
+ if (r < 0)
+ goto bad;
+
+ new_le = cpu_to_le64(new_tree);
+ __dm_bless_for_disk(&new_le);
+
+ r = insert_at(sizeof(uint64_t), n, index,
+ keys[level], &new_le);
+ if (r)
+ goto bad;
+ }
+
+ if (level < last_level)
+ block = value64(n, index);
+ }
+
+ r = btree_insert_raw(&spine, block, &info->value_type,
+ keys[level], &index);
+ if (r < 0)
+ goto bad;
+
+ n = dm_block_data(shadow_current(&spine));
+ need_insert = ((index >= le32_to_cpu(n->header.nr_entries)) ||
+ (le64_to_cpu(n->keys[index]) != keys[level]));
+
+ if (need_insert) {
+ if (inserted)
+ *inserted = 1;
+
+ r = insert_at(info->value_type.size, n, index,
+ keys[level], value);
+ if (r)
+ goto bad_unblessed;
+ } else {
+ if (inserted)
+ *inserted = 0;
+
+ if (info->value_type.dec &&
+ (!info->value_type.equal ||
+ !info->value_type.equal(
+ info->value_type.context,
+ value_ptr(n, index, info->value_type.size),
+ value))) {
+ info->value_type.dec(info->value_type.context,
+ value_ptr(n, index, info->value_type.size));
+ }
+ memcpy_disk(value_ptr(n, index, info->value_type.size),
+ value, info->value_type.size);
+ }
+
+ *new_root = shadow_root(&spine);
+ exit_shadow_spine(&spine);
+
+ return 0;
+
+bad:
+ __dm_unbless_for_disk(value);
+bad_unblessed:
+ exit_shadow_spine(&spine);
+ return r;
+}
+
+int dm_btree_insert(struct dm_btree_info *info, dm_block_t root,
+ uint64_t *keys, void *value, dm_block_t *new_root)
+ __dm_written_to_disk(value)
+{
+ return insert(info, root, keys, value, new_root, NULL);
+}
+EXPORT_SYMBOL_GPL(dm_btree_insert);
+
+int dm_btree_insert_notify(struct dm_btree_info *info, dm_block_t root,
+ uint64_t *keys, void *value, dm_block_t *new_root,
+ int *inserted)
+ __dm_written_to_disk(value)
+{
+ return insert(info, root, keys, value, new_root, inserted);
+}
+EXPORT_SYMBOL_GPL(dm_btree_insert_notify);
+
+/*----------------------------------------------------------------*/
+
+static int find_highest_key(struct ro_spine *s, dm_block_t block,
+ uint64_t *result_key, dm_block_t *next_block)
+{
+ int i, r;
+ uint32_t flags;
+
+ do {
+ r = ro_step(s, block);
+ if (r < 0)
+ return r;
+
+ flags = le32_to_cpu(ro_node(s)->header.flags);
+ i = le32_to_cpu(ro_node(s)->header.nr_entries);
+ if (!i)
+ return -ENODATA;
+ else
+ i--;
+
+ *result_key = le64_to_cpu(ro_node(s)->keys[i]);
+ if (next_block || flags & INTERNAL_NODE)
+ block = value64(ro_node(s), i);
+
+ } while (flags & INTERNAL_NODE);
+
+ if (next_block)
+ *next_block = block;
+ return 0;
+}
+
+int dm_btree_find_highest_key(struct dm_btree_info *info, dm_block_t root,
+ uint64_t *result_keys)
+{
+ int r = 0, count = 0, level;
+ struct ro_spine spine;
+
+ init_ro_spine(&spine, info);
+ for (level = 0; level < info->levels; level++) {
+ r = find_highest_key(&spine, root, result_keys + level,
+ level == info->levels - 1 ? NULL : &root);
+ if (r == -ENODATA) {
+ r = 0;
+ break;
+
+ } else if (r)
+ break;
+
+ count++;
+ }
+ exit_ro_spine(&spine);
+
+ return r ? r : count;
+}
+EXPORT_SYMBOL_GPL(dm_btree_find_highest_key);
diff --git a/drivers/md/persistent-data/dm-btree.h b/drivers/md/persistent-data/dm-btree.h
new file mode 100644
index 00000000000..ae02c84410f
--- /dev/null
+++ b/drivers/md/persistent-data/dm-btree.h
@@ -0,0 +1,145 @@
+/*
+ * Copyright (C) 2011 Red Hat, Inc.
+ *
+ * This file is released under the GPL.
+ */
+#ifndef _LINUX_DM_BTREE_H
+#define _LINUX_DM_BTREE_H
+
+#include "dm-block-manager.h"
+
+struct dm_transaction_manager;
+
+/*----------------------------------------------------------------*/
+
+/*
+ * Annotations used to check on-disk metadata is handled as little-endian.
+ */
+#ifdef __CHECKER__
+# define __dm_written_to_disk(x) __releases(x)
+# define __dm_reads_from_disk(x) __acquires(x)
+# define __dm_bless_for_disk(x) __acquire(x)
+# define __dm_unbless_for_disk(x) __release(x)
+#else
+# define __dm_written_to_disk(x)
+# define __dm_reads_from_disk(x)
+# define __dm_bless_for_disk(x)
+# define __dm_unbless_for_disk(x)
+#endif
+
+/*----------------------------------------------------------------*/
+
+/*
+ * Manipulates hierarchical B+ trees with 64-bit keys and arbitrary-sized
+ * values.
+ */
+
+/*
+ * Infomation about the values stored within the btree.
+ */
+struct dm_btree_value_type {
+ void *context;
+
+ /*
+ * The size in bytes of each value.
+ */
+ uint32_t size;
+
+ /*
+ * Any of these methods can be safely set to NULL if you do not
+ * need the corresponding feature.
+ */
+
+ /*
+ * The btree is making a duplicate of the value, for instance
+ * because previously-shared btree nodes have now diverged.
+ * @value argument is the new copy that the copy function may modify.
+ * (Probably it just wants to increment a reference count
+ * somewhere.) This method is _not_ called for insertion of a new
+ * value: It is assumed the ref count is already 1.
+ */
+ void (*inc)(void *context, void *value);
+
+ /*
+ * This value is being deleted. The btree takes care of freeing
+ * the memory pointed to by @value. Often the del function just
+ * needs to decrement a reference count somewhere.
+ */
+ void (*dec)(void *context, void *value);
+
+ /*
+ * A test for equality between two values. When a value is
+ * overwritten with a new one, the old one has the dec method
+ * called _unless_ the new and old value are deemed equal.
+ */
+ int (*equal)(void *context, void *value1, void *value2);
+};
+
+/*
+ * The shape and contents of a btree.
+ */
+struct dm_btree_info {
+ struct dm_transaction_manager *tm;
+
+ /*
+ * Number of nested btrees. (Not the depth of a single tree.)
+ */
+ unsigned levels;
+ struct dm_btree_value_type value_type;
+};
+
+/*
+ * Set up an empty tree. O(1).
+ */
+int dm_btree_empty(struct dm_btree_info *info, dm_block_t *root);
+
+/*
+ * Delete a tree. O(n) - this is the slow one! It can also block, so
+ * please don't call it on an IO path.
+ */
+int dm_btree_del(struct dm_btree_info *info, dm_block_t root);
+
+/*
+ * All the lookup functions return -ENODATA if the key cannot be found.
+ */
+
+/*
+ * Tries to find a key that matches exactly. O(ln(n))
+ */
+int dm_btree_lookup(struct dm_btree_info *info, dm_block_t root,
+ uint64_t *keys, void *value_le);
+
+/*
+ * Insertion (or overwrite an existing value). O(ln(n))
+ */
+int dm_btree_insert(struct dm_btree_info *info, dm_block_t root,
+ uint64_t *keys, void *value, dm_block_t *new_root)
+ __dm_written_to_disk(value);
+
+/*
+ * A variant of insert that indicates whether it actually inserted or just
+ * overwrote. Useful if you're keeping track of the number of entries in a
+ * tree.
+ */
+int dm_btree_insert_notify(struct dm_btree_info *info, dm_block_t root,
+ uint64_t *keys, void *value, dm_block_t *new_root,
+ int *inserted)
+ __dm_written_to_disk(value);
+
+/*
+ * Remove a key if present. This doesn't remove empty sub trees. Normally
+ * subtrees represent a separate entity, like a snapshot map, so this is
+ * correct behaviour. O(ln(n)).
+ */
+int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
+ uint64_t *keys, dm_block_t *new_root);
+
+/*
+ * Returns < 0 on failure. Otherwise the number of key entries that have
+ * been filled out. Remember trees can have zero entries, and as such have
+ * no highest key.
+ */
+int dm_btree_find_highest_key(struct dm_btree_info *info, dm_block_t root,
+ uint64_t *result_keys);
+
+#endif /* _LINUX_DM_BTREE_H */
diff --git a/drivers/md/persistent-data/dm-persistent-data-internal.h b/drivers/md/persistent-data/dm-persistent-data-internal.h
new file mode 100644
index 00000000000..c49e26fff36
--- /dev/null
+++ b/drivers/md/persistent-data/dm-persistent-data-internal.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) 2011 Red Hat, Inc.
+ *
+ * This file is released under the GPL.
+ */
+
+#ifndef _DM_PERSISTENT_DATA_INTERNAL_H
+#define _DM_PERSISTENT_DATA_INTERNAL_H
+
+#include "dm-block-manager.h"
+
+static inline unsigned dm_hash_block(dm_block_t b, unsigned hash_mask)
+{
+ const unsigned BIG_PRIME = 4294967291UL;
+
+ return (((unsigned) b) * BIG_PRIME) & hash_mask;
+}
+
+#endif /* _PERSISTENT_DATA_INTERNAL_H */
diff --git a/drivers/md/persistent-data/dm-space-map-checker.c b/drivers/md/persistent-data/dm-space-map-checker.c
new file mode 100644
index 00000000000..50ed53bf4aa
--- /dev/null
+++ b/drivers/md/persistent-data/dm-space-map-checker.c
@@ -0,0 +1,438 @@
+/*
+ * Copyright (C) 2011 Red Hat, Inc.
+ *
+ * This file is released under the GPL.
+ */
+
+#include "dm-space-map-checker.h"
+
+#include <linux/device-mapper.h>
+#include <linux/export.h>
+
+#ifdef CONFIG_DM_DEBUG_SPACE_MAPS
+
+#define DM_MSG_PREFIX "space map checker"
+
+/*----------------------------------------------------------------*/
+
+struct count_array {
+ dm_block_t nr;
+ dm_block_t nr_free;
+
+ uint32_t *counts;
+};
+
+static int ca_get_count(struct count_array *ca, dm_block_t b, uint32_t *count)
+{
+ if (b >= ca->nr)
+ return -EINVAL;
+
+ *count = ca->counts[b];
+ return 0;
+}
+
+static int ca_count_more_than_one(struct count_array *ca, dm_block_t b, int *r)
+{
+ if (b >= ca->nr)
+ return -EINVAL;
+
+ *r = ca->counts[b] > 1;
+ return 0;
+}
+
+static int ca_set_count(struct count_array *ca, dm_block_t b, uint32_t count)
+{
+ uint32_t old_count;
+
+ if (b >= ca->nr)
+ return -EINVAL;
+
+ old_count = ca->counts[b];
+
+ if (!count && old_count)
+ ca->nr_free++;
+
+ else if (count && !old_count)
+ ca->nr_free--;
+
+ ca->counts[b] = count;
+ return 0;
+}
+
+static int ca_inc_block(struct count_array *ca, dm_block_t b)
+{
+ if (b >= ca->nr)
+ return -EINVAL;
+
+ ca_set_count(ca, b, ca->counts[b] + 1);
+ return 0;
+}
+
+static int ca_dec_block(struct count_array *ca, dm_block_t b)
+{
+ if (b >= ca->nr)
+ return -EINVAL;
+
+ BUG_ON(ca->counts[b] == 0);
+ ca_set_count(ca, b, ca->counts[b] - 1);
+ return 0;
+}
+
+static int ca_create(struct count_array *ca, struct dm_space_map *sm)
+{
+ int r;
+ dm_block_t nr_blocks;
+
+ r = dm_sm_get_nr_blocks(sm, &nr_blocks);
+ if (r)
+ return r;
+
+ ca->nr = nr_blocks;
+ ca->nr_free = nr_blocks;
+ ca->counts = kzalloc(sizeof(*ca->counts) * nr_blocks, GFP_KERNEL);
+ if (!ca->counts)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int ca_load(struct count_array *ca, struct dm_space_map *sm)
+{
+ int r;
+ uint32_t count;
+ dm_block_t nr_blocks, i;
+
+ r = dm_sm_get_nr_blocks(sm, &nr_blocks);
+ if (r)
+ return r;
+
+ BUG_ON(ca->nr != nr_blocks);
+
+ DMWARN("Loading debug space map from disk. This may take some time");
+ for (i = 0; i < nr_blocks; i++) {
+ r = dm_sm_get_count(sm, i, &count);
+ if (r) {
+ DMERR("load failed");
+ return r;
+ }
+
+ ca_set_count(ca, i, count);
+ }
+ DMWARN("Load complete");
+
+ return 0;
+}
+
+static int ca_extend(struct count_array *ca, dm_block_t extra_blocks)
+{
+ dm_block_t nr_blocks = ca->nr + extra_blocks;
+ uint32_t *counts = kzalloc(sizeof(*counts) * nr_blocks, GFP_KERNEL);
+ if (!counts)
+ return -ENOMEM;
+
+ memcpy(counts, ca->counts, sizeof(*counts) * ca->nr);
+ kfree(ca->counts);
+ ca->nr = nr_blocks;
+ ca->nr_free += extra_blocks;
+ ca->counts = counts;
+ return 0;
+}
+
+static int ca_commit(struct count_array *old, struct count_array *new)
+{
+ if (old->nr != new->nr) {
+ BUG_ON(old->nr > new->nr);
+ ca_extend(old, new->nr - old->nr);
+ }
+
+ BUG_ON(old->nr != new->nr);
+ old->nr_free = new->nr_free;
+ memcpy(old->counts, new->counts, sizeof(*old->counts) * old->nr);
+ return 0;
+}
+
+static void ca_destroy(struct count_array *ca)
+{
+ kfree(ca->counts);
+}
+
+/*----------------------------------------------------------------*/
+
+struct sm_checker {
+ struct dm_space_map sm;
+
+ struct count_array old_counts;
+ struct count_array counts;
+
+ struct dm_space_map *real_sm;
+};
+
+static void sm_checker_destroy(struct dm_space_map *sm)
+{
+ struct sm_checker *smc = container_of(sm, struct sm_checker, sm);
+
+ dm_sm_destroy(smc->real_sm);
+ ca_destroy(&smc->old_counts);
+ ca_destroy(&smc->counts);
+ kfree(smc);
+}
+
+static int sm_checker_get_nr_blocks(struct dm_space_map *sm, dm_block_t *count)
+{
+ struct sm_checker *smc = container_of(sm, struct sm_checker, sm);
+ int r = dm_sm_get_nr_blocks(smc->real_sm, count);
+ if (!r)
+ BUG_ON(smc->old_counts.nr != *count);
+ return r;
+}
+
+static int sm_checker_get_nr_free(struct dm_space_map *sm, dm_block_t *count)
+{
+ struct sm_checker *smc = container_of(sm, struct sm_checker, sm);
+ int r = dm_sm_get_nr_free(smc->real_sm, count);
+ if (!r) {
+ /*
+ * Slow, but we know it's correct.
+ */
+ dm_block_t b, n = 0;
+ for (b = 0; b < smc->old_counts.nr; b++)
+ if (smc->old_counts.counts[b] == 0 &&
+ smc->counts.counts[b] == 0)
+ n++;
+
+ if (n != *count)
+ DMERR("free block counts differ, checker %u, sm-disk:%u",
+ (unsigned) n, (unsigned) *count);
+ }
+ return r;
+}
+
+static int sm_checker_new_block(struct dm_space_map *sm, dm_block_t *b)
+{
+ struct sm_checker *smc = container_of(sm, struct sm_checker, sm);
+ int r = dm_sm_new_block(smc->real_sm, b);
+
+ if (!r) {
+ BUG_ON(*b >= smc->old_counts.nr);
+ BUG_ON(smc->old_counts.counts[*b] != 0);
+ BUG_ON(*b >= smc->counts.nr);
+ BUG_ON(smc->counts.counts[*b] != 0);
+ ca_set_count(&smc->counts, *b, 1);
+ }
+
+ return r;
+}
+
+static int sm_checker_inc_block(struct dm_space_map *sm, dm_block_t b)
+{
+ struct sm_checker *smc = container_of(sm, struct sm_checker, sm);
+ int r = dm_sm_inc_block(smc->real_sm, b);
+ int r2 = ca_inc_block(&smc->counts, b);
+ BUG_ON(r != r2);
+ return r;
+}
+
+static int sm_checker_dec_block(struct dm_space_map *sm, dm_block_t b)
+{
+ struct sm_checker *smc = container_of(sm, struct sm_checker, sm);
+ int r = dm_sm_dec_block(smc->real_sm, b);
+ int r2 = ca_dec_block(&smc->counts, b);
+ BUG_ON(r != r2);
+ return r;
+}
+
+static int sm_checker_get_count(struct dm_space_map *sm, dm_block_t b, uint32_t *result)
+{
+ struct sm_checker *smc = container_of(sm, struct sm_checker, sm);
+ uint32_t result2 = 0;
+ int r = dm_sm_get_count(smc->real_sm, b, result);
+ int r2 = ca_get_count(&smc->counts, b, &result2);
+
+ BUG_ON(r != r2);
+ if (!r)
+ BUG_ON(*result != result2);
+ return r;
+}
+
+static int sm_checker_count_more_than_one(struct dm_space_map *sm, dm_block_t b, int *result)
+{
+ struct sm_checker *smc = container_of(sm, struct sm_checker, sm);
+ int result2 = 0;
+ int r = dm_sm_count_is_more_than_one(smc->real_sm, b, result);
+ int r2 = ca_count_more_than_one(&smc->counts, b, &result2);
+
+ BUG_ON(r != r2);
+ if (!r)
+ BUG_ON(!(*result) && result2);
+ return r;
+}
+
+static int sm_checker_set_count(struct dm_space_map *sm, dm_block_t b, uint32_t count)
+{
+ struct sm_checker *smc = container_of(sm, struct sm_checker, sm);
+ uint32_t old_rc;
+ int r = dm_sm_set_count(smc->real_sm, b, count);
+ int r2;
+
+ BUG_ON(b >= smc->counts.nr);
+ old_rc = smc->counts.counts[b];
+ r2 = ca_set_count(&smc->counts, b, count);
+ BUG_ON(r != r2);
+
+ return r;
+}
+
+static int sm_checker_commit(struct dm_space_map *sm)
+{
+ struct sm_checker *smc = container_of(sm, struct sm_checker, sm);
+ int r;
+
+ r = dm_sm_commit(smc->real_sm);
+ if (r)
+ return r;
+
+ r = ca_commit(&smc->old_counts, &smc->counts);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+static int sm_checker_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
+{
+ struct sm_checker *smc = container_of(sm, struct sm_checker, sm);
+ int r = dm_sm_extend(smc->real_sm, extra_blocks);
+ if (r)
+ return r;
+
+ return ca_extend(&smc->counts, extra_blocks);
+}
+
+static int sm_checker_root_size(struct dm_space_map *sm, size_t *result)
+{
+ struct sm_checker *smc = container_of(sm, struct sm_checker, sm);
+ return dm_sm_root_size(smc->real_sm, result);
+}
+
+static int sm_checker_copy_root(struct dm_space_map *sm, void *copy_to_here_le, size_t len)
+{
+ struct sm_checker *smc = container_of(sm, struct sm_checker, sm);
+ return dm_sm_copy_root(smc->real_sm, copy_to_here_le, len);
+}
+
+/*----------------------------------------------------------------*/
+
+static struct dm_space_map ops_ = {
+ .destroy = sm_checker_destroy,
+ .get_nr_blocks = sm_checker_get_nr_blocks,
+ .get_nr_free = sm_checker_get_nr_free,
+ .inc_block = sm_checker_inc_block,
+ .dec_block = sm_checker_dec_block,
+ .new_block = sm_checker_new_block,
+ .get_count = sm_checker_get_count,
+ .count_is_more_than_one = sm_checker_count_more_than_one,
+ .set_count = sm_checker_set_count,
+ .commit = sm_checker_commit,
+ .extend = sm_checker_extend,
+ .root_size = sm_checker_root_size,
+ .copy_root = sm_checker_copy_root
+};
+
+struct dm_space_map *dm_sm_checker_create(struct dm_space_map *sm)
+{
+ int r;
+ struct sm_checker *smc;
+
+ if (!sm)
+ return NULL;
+
+ smc = kmalloc(sizeof(*smc), GFP_KERNEL);
+ if (!smc)
+ return NULL;
+
+ memcpy(&smc->sm, &ops_, sizeof(smc->sm));
+ r = ca_create(&smc->old_counts, sm);
+ if (r) {
+ kfree(smc);
+ return NULL;
+ }
+
+ r = ca_create(&smc->counts, sm);
+ if (r) {
+ ca_destroy(&smc->old_counts);
+ kfree(smc);
+ return NULL;
+ }
+
+ smc->real_sm = sm;
+
+ r = ca_load(&smc->counts, sm);
+ if (r) {
+ ca_destroy(&smc->counts);
+ ca_destroy(&smc->old_counts);
+ kfree(smc);
+ return NULL;
+ }
+
+ r = ca_commit(&smc->old_counts, &smc->counts);
+ if (r) {
+ ca_destroy(&smc->counts);
+ ca_destroy(&smc->old_counts);
+ kfree(smc);
+ return NULL;
+ }
+
+ return &smc->sm;
+}
+EXPORT_SYMBOL_GPL(dm_sm_checker_create);
+
+struct dm_space_map *dm_sm_checker_create_fresh(struct dm_space_map *sm)
+{
+ int r;
+ struct sm_checker *smc;
+
+ if (!sm)
+ return NULL;
+
+ smc = kmalloc(sizeof(*smc), GFP_KERNEL);
+ if (!smc)
+ return NULL;
+
+ memcpy(&smc->sm, &ops_, sizeof(smc->sm));
+ r = ca_create(&smc->old_counts, sm);
+ if (r) {
+ kfree(smc);
+ return NULL;
+ }
+
+ r = ca_create(&smc->counts, sm);
+ if (r) {
+ ca_destroy(&smc->old_counts);
+ kfree(smc);
+ return NULL;
+ }
+
+ smc->real_sm = sm;
+ return &smc->sm;
+}
+EXPORT_SYMBOL_GPL(dm_sm_checker_create_fresh);
+
+/*----------------------------------------------------------------*/
+
+#else
+
+struct dm_space_map *dm_sm_checker_create(struct dm_space_map *sm)
+{
+ return sm;
+}
+EXPORT_SYMBOL_GPL(dm_sm_checker_create);
+
+struct dm_space_map *dm_sm_checker_create_fresh(struct dm_space_map *sm)
+{
+ return sm;
+}
+EXPORT_SYMBOL_GPL(dm_sm_checker_create_fresh);
+
+/*----------------------------------------------------------------*/
+
+#endif
diff --git a/drivers/md/persistent-data/dm-space-map-checker.h b/drivers/md/persistent-data/dm-space-map-checker.h
new file mode 100644
index 00000000000..444dccf6688
--- /dev/null
+++ b/drivers/md/persistent-data/dm-space-map-checker.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2011 Red Hat, Inc.
+ *
+ * This file is released under the GPL.
+ */
+
+#ifndef SNAPSHOTS_SPACE_MAP_CHECKER_H
+#define SNAPSHOTS_SPACE_MAP_CHECKER_H
+
+#include "dm-space-map.h"
+
+/*----------------------------------------------------------------*/
+
+/*
+ * This space map wraps a real on-disk space map, and verifies all of its
+ * operations. It uses a lot of memory, so only use if you have a specific
+ * problem that you're debugging.
+ *
+ * Ownership of @sm passes.
+ */
+struct dm_space_map *dm_sm_checker_create(struct dm_space_map *sm);
+struct dm_space_map *dm_sm_checker_create_fresh(struct dm_space_map *sm);
+
+/*----------------------------------------------------------------*/
+
+#endif
diff --git a/drivers/md/persistent-data/dm-space-map-common.c b/drivers/md/persistent-data/dm-space-map-common.c
new file mode 100644
index 00000000000..df2494c06cd
--- /dev/null
+++ b/drivers/md/persistent-data/dm-space-map-common.c
@@ -0,0 +1,705 @@
+/*
+ * Copyright (C) 2011 Red Hat, Inc.
+ *
+ * This file is released under the GPL.
+ */
+
+#include "dm-space-map-common.h"
+#include "dm-transaction-manager.h"
+
+#include <linux/bitops.h>
+#include <linux/device-mapper.h>
+
+#define DM_MSG_PREFIX "space map common"
+
+/*----------------------------------------------------------------*/
+
+/*
+ * Index validator.
+ */
+#define INDEX_CSUM_XOR 160478
+
+static void index_prepare_for_write(struct dm_block_validator *v,
+ struct dm_block *b,
+ size_t block_size)
+{
+ struct disk_metadata_index *mi_le = dm_block_data(b);
+
+ mi_le->blocknr = cpu_to_le64(dm_block_location(b));
+ mi_le->csum = cpu_to_le32(dm_bm_checksum(&mi_le->padding,
+ block_size - sizeof(__le32),
+ INDEX_CSUM_XOR));
+}
+
+static int index_check(struct dm_block_validator *v,
+ struct dm_block *b,
+ size_t block_size)
+{
+ struct disk_metadata_index *mi_le = dm_block_data(b);
+ __le32 csum_disk;
+
+ if (dm_block_location(b) != le64_to_cpu(mi_le->blocknr)) {
+ DMERR("index_check failed blocknr %llu wanted %llu",
+ le64_to_cpu(mi_le->blocknr), dm_block_location(b));
+ return -ENOTBLK;
+ }
+
+ csum_disk = cpu_to_le32(dm_bm_checksum(&mi_le->padding,
+ block_size - sizeof(__le32),
+ INDEX_CSUM_XOR));
+ if (csum_disk != mi_le->csum) {
+ DMERR("index_check failed csum %u wanted %u",
+ le32_to_cpu(csum_disk), le32_to_cpu(mi_le->csum));
+ return -EILSEQ;
+ }
+
+ return 0;
+}
+
+static struct dm_block_validator index_validator = {
+ .name = "index",
+ .prepare_for_write = index_prepare_for_write,
+ .check = index_check
+};
+
+/*----------------------------------------------------------------*/
+
+/*
+ * Bitmap validator
+ */
+#define BITMAP_CSUM_XOR 240779
+
+static void bitmap_prepare_for_write(struct dm_block_validator *v,
+ struct dm_block *b,
+ size_t block_size)
+{
+ struct disk_bitmap_header *disk_header = dm_block_data(b);
+
+ disk_header->blocknr = cpu_to_le64(dm_block_location(b));
+ disk_header->csum = cpu_to_le32(dm_bm_checksum(&disk_header->not_used,
+ block_size - sizeof(__le32),
+ BITMAP_CSUM_XOR));
+}
+
+static int bitmap_check(struct dm_block_validator *v,
+ struct dm_block *b,
+ size_t block_size)
+{
+ struct disk_bitmap_header *disk_header = dm_block_data(b);
+ __le32 csum_disk;
+
+ if (dm_block_location(b) != le64_to_cpu(disk_header->blocknr)) {
+ DMERR("bitmap check failed blocknr %llu wanted %llu",
+ le64_to_cpu(disk_header->blocknr), dm_block_location(b));
+ return -ENOTBLK;
+ }
+
+ csum_disk = cpu_to_le32(dm_bm_checksum(&disk_header->not_used,
+ block_size - sizeof(__le32),
+ BITMAP_CSUM_XOR));
+ if (csum_disk != disk_header->csum) {
+ DMERR("bitmap check failed csum %u wanted %u",
+ le32_to_cpu(csum_disk), le32_to_cpu(disk_header->csum));
+ return -EILSEQ;
+ }
+
+ return 0;
+}
+
+static struct dm_block_validator dm_sm_bitmap_validator = {
+ .name = "sm_bitmap",
+ .prepare_for_write = bitmap_prepare_for_write,
+ .check = bitmap_check
+};
+
+/*----------------------------------------------------------------*/
+
+#define ENTRIES_PER_WORD 32
+#define ENTRIES_SHIFT 5
+
+static void *dm_bitmap_data(struct dm_block *b)
+{
+ return dm_block_data(b) + sizeof(struct disk_bitmap_header);
+}
+
+#define WORD_MASK_HIGH 0xAAAAAAAAAAAAAAAAULL
+
+static unsigned bitmap_word_used(void *addr, unsigned b)
+{
+ __le64 *words_le = addr;
+ __le64 *w_le = words_le + (b >> ENTRIES_SHIFT);
+
+ uint64_t bits = le64_to_cpu(*w_le);
+ uint64_t mask = (bits + WORD_MASK_HIGH + 1) & WORD_MASK_HIGH;
+
+ return !(~bits & mask);
+}
+
+static unsigned sm_lookup_bitmap(void *addr, unsigned b)
+{
+ __le64 *words_le = addr;
+ __le64 *w_le = words_le + (b >> ENTRIES_SHIFT);
+ unsigned hi, lo;
+
+ b = (b & (ENTRIES_PER_WORD - 1)) << 1;
+ hi = !!test_bit_le(b, (void *) w_le);
+ lo = !!test_bit_le(b + 1, (void *) w_le);
+ return (hi << 1) | lo;
+}
+
+static void sm_set_bitmap(void *addr, unsigned b, unsigned val)
+{
+ __le64 *words_le = addr;
+ __le64 *w_le = words_le + (b >> ENTRIES_SHIFT);
+
+ b = (b & (ENTRIES_PER_WORD - 1)) << 1;
+
+ if (val & 2)
+ __set_bit_le(b, (void *) w_le);
+ else
+ __clear_bit_le(b, (void *) w_le);
+
+ if (val & 1)
+ __set_bit_le(b + 1, (void *) w_le);
+ else
+ __clear_bit_le(b + 1, (void *) w_le);
+}
+
+static int sm_find_free(void *addr, unsigned begin, unsigned end,
+ unsigned *result)
+{
+ while (begin < end) {
+ if (!(begin & (ENTRIES_PER_WORD - 1)) &&
+ bitmap_word_used(addr, begin)) {
+ begin += ENTRIES_PER_WORD;
+ continue;
+ }
+
+ if (!sm_lookup_bitmap(addr, begin)) {
+ *result = begin;
+ return 0;
+ }
+
+ begin++;
+ }
+
+ return -ENOSPC;
+}
+
+/*----------------------------------------------------------------*/
+
+static int sm_ll_init(struct ll_disk *ll, struct dm_transaction_manager *tm)
+{
+ ll->tm = tm;
+
+ ll->bitmap_info.tm = tm;
+ ll->bitmap_info.levels = 1;
+
+ /*
+ * Because the new bitmap blocks are created via a shadow
+ * operation, the old entry has already had its reference count
+ * decremented and we don't need the btree to do any bookkeeping.
+ */
+ ll->bitmap_info.value_type.size = sizeof(struct disk_index_entry);
+ ll->bitmap_info.value_type.inc = NULL;
+ ll->bitmap_info.value_type.dec = NULL;
+ ll->bitmap_info.value_type.equal = NULL;
+
+ ll->ref_count_info.tm = tm;
+ ll->ref_count_info.levels = 1;
+ ll->ref_count_info.value_type.size = sizeof(uint32_t);
+ ll->ref_count_info.value_type.inc = NULL;
+ ll->ref_count_info.value_type.dec = NULL;
+ ll->ref_count_info.value_type.equal = NULL;
+
+ ll->block_size = dm_bm_block_size(dm_tm_get_bm(tm));
+
+ if (ll->block_size > (1 << 30)) {
+ DMERR("block size too big to hold bitmaps");
+ return -EINVAL;
+ }
+
+ ll->entries_per_block = (ll->block_size - sizeof(struct disk_bitmap_header)) *
+ ENTRIES_PER_BYTE;
+ ll->nr_blocks = 0;
+ ll->bitmap_root = 0;
+ ll->ref_count_root = 0;
+
+ return 0;
+}
+
+int sm_ll_extend(struct ll_disk *ll, dm_block_t extra_blocks)
+{
+ int r;
+ dm_block_t i, nr_blocks, nr_indexes;
+ unsigned old_blocks, blocks;
+
+ nr_blocks = ll->nr_blocks + extra_blocks;
+ old_blocks = dm_sector_div_up(ll->nr_blocks, ll->entries_per_block);
+ blocks = dm_sector_div_up(nr_blocks, ll->entries_per_block);
+
+ nr_indexes = dm_sector_div_up(nr_blocks, ll->entries_per_block);
+ if (nr_indexes > ll->max_entries(ll)) {
+ DMERR("space map too large");
+ return -EINVAL;
+ }
+
+ for (i = old_blocks; i < blocks; i++) {
+ struct dm_block *b;
+ struct disk_index_entry idx;
+
+ r = dm_tm_new_block(ll->tm, &dm_sm_bitmap_validator, &b);
+ if (r < 0)
+ return r;
+ idx.blocknr = cpu_to_le64(dm_block_location(b));
+
+ r = dm_tm_unlock(ll->tm, b);
+ if (r < 0)
+ return r;
+
+ idx.nr_free = cpu_to_le32(ll->entries_per_block);
+ idx.none_free_before = 0;
+
+ r = ll->save_ie(ll, i, &idx);
+ if (r < 0)
+ return r;
+ }
+
+ ll->nr_blocks = nr_blocks;
+ return 0;
+}
+
+int sm_ll_lookup_bitmap(struct ll_disk *ll, dm_block_t b, uint32_t *result)
+{
+ int r;
+ dm_block_t index = b;
+ struct disk_index_entry ie_disk;
+ struct dm_block *blk;
+
+ b = do_div(index, ll->entries_per_block);
+ r = ll->load_ie(ll, index, &ie_disk);
+ if (r < 0)
+ return r;
+
+ r = dm_tm_read_lock(ll->tm, le64_to_cpu(ie_disk.blocknr),
+ &dm_sm_bitmap_validator, &blk);
+ if (r < 0)
+ return r;
+
+ *result = sm_lookup_bitmap(dm_bitmap_data(blk), b);
+
+ return dm_tm_unlock(ll->tm, blk);
+}
+
+int sm_ll_lookup(struct ll_disk *ll, dm_block_t b, uint32_t *result)
+{
+ __le32 le_rc;
+ int r = sm_ll_lookup_bitmap(ll, b, result);
+
+ if (r)
+ return r;
+
+ if (*result != 3)
+ return r;
+
+ r = dm_btree_lookup(&ll->ref_count_info, ll->ref_count_root, &b, &le_rc);
+ if (r < 0)
+ return r;
+
+ *result = le32_to_cpu(le_rc);
+
+ return r;
+}
+
+int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin,
+ dm_block_t end, dm_block_t *result)
+{
+ int r;
+ struct disk_index_entry ie_disk;
+ dm_block_t i, index_begin = begin;
+ dm_block_t index_end = dm_sector_div_up(end, ll->entries_per_block);
+
+ /*
+ * FIXME: Use shifts
+ */
+ begin = do_div(index_begin, ll->entries_per_block);
+ end = do_div(end, ll->entries_per_block);
+
+ for (i = index_begin; i < index_end; i++, begin = 0) {
+ struct dm_block *blk;
+ unsigned position;
+ uint32_t bit_end;
+
+ r = ll->load_ie(ll, i, &ie_disk);
+ if (r < 0)
+ return r;
+
+ if (le32_to_cpu(ie_disk.nr_free) == 0)
+ continue;
+
+ r = dm_tm_read_lock(ll->tm, le64_to_cpu(ie_disk.blocknr),
+ &dm_sm_bitmap_validator, &blk);
+ if (r < 0)
+ return r;
+
+ bit_end = (i == index_end - 1) ? end : ll->entries_per_block;
+
+ r = sm_find_free(dm_bitmap_data(blk),
+ max_t(unsigned, begin, le32_to_cpu(ie_disk.none_free_before)),
+ bit_end, &position);
+ if (r == -ENOSPC) {
+ /*
+ * This might happen because we started searching
+ * part way through the bitmap.
+ */
+ dm_tm_unlock(ll->tm, blk);
+ continue;
+
+ } else if (r < 0) {
+ dm_tm_unlock(ll->tm, blk);
+ return r;
+ }
+
+ r = dm_tm_unlock(ll->tm, blk);
+ if (r < 0)
+ return r;
+
+ *result = i * ll->entries_per_block + (dm_block_t) position;
+ return 0;
+ }
+
+ return -ENOSPC;
+}
+
+int sm_ll_insert(struct ll_disk *ll, dm_block_t b,
+ uint32_t ref_count, enum allocation_event *ev)
+{
+ int r;
+ uint32_t bit, old;
+ struct dm_block *nb;
+ dm_block_t index = b;
+ struct disk_index_entry ie_disk;
+ void *bm_le;
+ int inc;
+
+ bit = do_div(index, ll->entries_per_block);
+ r = ll->load_ie(ll, index, &ie_disk);
+ if (r < 0)
+ return r;
+
+ r = dm_tm_shadow_block(ll->tm, le64_to_cpu(ie_disk.blocknr),
+ &dm_sm_bitmap_validator, &nb, &inc);
+ if (r < 0) {
+ DMERR("dm_tm_shadow_block() failed");
+ return r;
+ }
+ ie_disk.blocknr = cpu_to_le64(dm_block_location(nb));
+
+ bm_le = dm_bitmap_data(nb);
+ old = sm_lookup_bitmap(bm_le, bit);
+
+ if (ref_count <= 2) {
+ sm_set_bitmap(bm_le, bit, ref_count);
+
+ r = dm_tm_unlock(ll->tm, nb);
+ if (r < 0)
+ return r;
+
+#if 0
+ /* FIXME: dm_btree_remove doesn't handle this yet */
+ if (old > 2) {
+ r = dm_btree_remove(&ll->ref_count_info,
+ ll->ref_count_root,
+ &b, &ll->ref_count_root);
+ if (r)
+ return r;
+ }
+#endif
+
+ } else {
+ __le32 le_rc = cpu_to_le32(ref_count);
+
+ sm_set_bitmap(bm_le, bit, 3);
+ r = dm_tm_unlock(ll->tm, nb);
+ if (r < 0)
+ return r;
+
+ __dm_bless_for_disk(&le_rc);
+ r = dm_btree_insert(&ll->ref_count_info, ll->ref_count_root,
+ &b, &le_rc, &ll->ref_count_root);
+ if (r < 0) {
+ DMERR("ref count insert failed");
+ return r;
+ }
+ }
+
+ if (ref_count && !old) {
+ *ev = SM_ALLOC;
+ ll->nr_allocated++;
+ ie_disk.nr_free = cpu_to_le32(le32_to_cpu(ie_disk.nr_free) - 1);
+ if (le32_to_cpu(ie_disk.none_free_before) == bit)
+ ie_disk.none_free_before = cpu_to_le32(bit + 1);
+
+ } else if (old && !ref_count) {
+ *ev = SM_FREE;
+ ll->nr_allocated--;
+ ie_disk.nr_free = cpu_to_le32(le32_to_cpu(ie_disk.nr_free) + 1);
+ ie_disk.none_free_before = cpu_to_le32(min(le32_to_cpu(ie_disk.none_free_before), bit));
+ }
+
+ return ll->save_ie(ll, index, &ie_disk);
+}
+
+int sm_ll_inc(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev)
+{
+ int r;
+ uint32_t rc;
+
+ r = sm_ll_lookup(ll, b, &rc);
+ if (r)
+ return r;
+
+ return sm_ll_insert(ll, b, rc + 1, ev);
+}
+
+int sm_ll_dec(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev)
+{
+ int r;
+ uint32_t rc;
+
+ r = sm_ll_lookup(ll, b, &rc);
+ if (r)
+ return r;
+
+ if (!rc)
+ return -EINVAL;
+
+ return sm_ll_insert(ll, b, rc - 1, ev);
+}
+
+int sm_ll_commit(struct ll_disk *ll)
+{
+ return ll->commit(ll);
+}
+
+/*----------------------------------------------------------------*/
+
+static int metadata_ll_load_ie(struct ll_disk *ll, dm_block_t index,
+ struct disk_index_entry *ie)
+{
+ memcpy(ie, ll->mi_le.index + index, sizeof(*ie));
+ return 0;
+}
+
+static int metadata_ll_save_ie(struct ll_disk *ll, dm_block_t index,
+ struct disk_index_entry *ie)
+{
+ memcpy(ll->mi_le.index + index, ie, sizeof(*ie));
+ return 0;
+}
+
+static int metadata_ll_init_index(struct ll_disk *ll)
+{
+ int r;
+ struct dm_block *b;
+
+ r = dm_tm_new_block(ll->tm, &index_validator, &b);
+ if (r < 0)
+ return r;
+
+ memcpy(dm_block_data(b), &ll->mi_le, sizeof(ll->mi_le));
+ ll->bitmap_root = dm_block_location(b);
+
+ return dm_tm_unlock(ll->tm, b);
+}
+
+static int metadata_ll_open(struct ll_disk *ll)
+{
+ int r;
+ struct dm_block *block;
+
+ r = dm_tm_read_lock(ll->tm, ll->bitmap_root,
+ &index_validator, &block);
+ if (r)
+ return r;
+
+ memcpy(&ll->mi_le, dm_block_data(block), sizeof(ll->mi_le));
+ return dm_tm_unlock(ll->tm, block);
+}
+
+static dm_block_t metadata_ll_max_entries(struct ll_disk *ll)
+{
+ return MAX_METADATA_BITMAPS;
+}
+
+static int metadata_ll_commit(struct ll_disk *ll)
+{
+ int r, inc;
+ struct dm_block *b;
+
+ r = dm_tm_shadow_block(ll->tm, ll->bitmap_root, &index_validator, &b, &inc);
+ if (r)
+ return r;
+
+ memcpy(dm_block_data(b), &ll->mi_le, sizeof(ll->mi_le));
+ ll->bitmap_root = dm_block_location(b);
+
+ return dm_tm_unlock(ll->tm, b);
+}
+
+int sm_ll_new_metadata(struct ll_disk *ll, struct dm_transaction_manager *tm)
+{
+ int r;
+
+ r = sm_ll_init(ll, tm);
+ if (r < 0)
+ return r;
+
+ ll->load_ie = metadata_ll_load_ie;
+ ll->save_ie = metadata_ll_save_ie;
+ ll->init_index = metadata_ll_init_index;
+ ll->open_index = metadata_ll_open;
+ ll->max_entries = metadata_ll_max_entries;
+ ll->commit = metadata_ll_commit;
+
+ ll->nr_blocks = 0;
+ ll->nr_allocated = 0;
+
+ r = ll->init_index(ll);
+ if (r < 0)
+ return r;
+
+ r = dm_btree_empty(&ll->ref_count_info, &ll->ref_count_root);
+ if (r < 0)
+ return r;
+
+ return 0;
+}
+
+int sm_ll_open_metadata(struct ll_disk *ll, struct dm_transaction_manager *tm,
+ void *root_le, size_t len)
+{
+ int r;
+ struct disk_sm_root *smr = root_le;
+
+ if (len < sizeof(struct disk_sm_root)) {
+ DMERR("sm_metadata root too small");
+ return -ENOMEM;
+ }
+
+ r = sm_ll_init(ll, tm);
+ if (r < 0)
+ return r;
+
+ ll->load_ie = metadata_ll_load_ie;
+ ll->save_ie = metadata_ll_save_ie;
+ ll->init_index = metadata_ll_init_index;
+ ll->open_index = metadata_ll_open;
+ ll->max_entries = metadata_ll_max_entries;
+ ll->commit = metadata_ll_commit;
+
+ ll->nr_blocks = le64_to_cpu(smr->nr_blocks);
+ ll->nr_allocated = le64_to_cpu(smr->nr_allocated);
+ ll->bitmap_root = le64_to_cpu(smr->bitmap_root);
+ ll->ref_count_root = le64_to_cpu(smr->ref_count_root);
+
+ return ll->open_index(ll);
+}
+
+/*----------------------------------------------------------------*/
+
+static int disk_ll_load_ie(struct ll_disk *ll, dm_block_t index,
+ struct disk_index_entry *ie)
+{
+ return dm_btree_lookup(&ll->bitmap_info, ll->bitmap_root, &index, ie);
+}
+
+static int disk_ll_save_ie(struct ll_disk *ll, dm_block_t index,
+ struct disk_index_entry *ie)
+{
+ __dm_bless_for_disk(ie);
+ return dm_btree_insert(&ll->bitmap_info, ll->bitmap_root,
+ &index, ie, &ll->bitmap_root);
+}
+
+static int disk_ll_init_index(struct ll_disk *ll)
+{
+ return dm_btree_empty(&ll->bitmap_info, &ll->bitmap_root);
+}
+
+static int disk_ll_open(struct ll_disk *ll)
+{
+ /* nothing to do */
+ return 0;
+}
+
+static dm_block_t disk_ll_max_entries(struct ll_disk *ll)
+{
+ return -1ULL;
+}
+
+static int disk_ll_commit(struct ll_disk *ll)
+{
+ return 0;
+}
+
+int sm_ll_new_disk(struct ll_disk *ll, struct dm_transaction_manager *tm)
+{
+ int r;
+
+ r = sm_ll_init(ll, tm);
+ if (r < 0)
+ return r;
+
+ ll->load_ie = disk_ll_load_ie;
+ ll->save_ie = disk_ll_save_ie;
+ ll->init_index = disk_ll_init_index;
+ ll->open_index = disk_ll_open;
+ ll->max_entries = disk_ll_max_entries;
+ ll->commit = disk_ll_commit;
+
+ ll->nr_blocks = 0;
+ ll->nr_allocated = 0;
+
+ r = ll->init_index(ll);
+ if (r < 0)
+ return r;
+
+ r = dm_btree_empty(&ll->ref_count_info, &ll->ref_count_root);
+ if (r < 0)
+ return r;
+
+ return 0;
+}
+
+int sm_ll_open_disk(struct ll_disk *ll, struct dm_transaction_manager *tm,
+ void *root_le, size_t len)
+{
+ int r;
+ struct disk_sm_root *smr = root_le;
+
+ if (len < sizeof(struct disk_sm_root)) {
+ DMERR("sm_metadata root too small");
+ return -ENOMEM;
+ }
+
+ r = sm_ll_init(ll, tm);
+ if (r < 0)
+ return r;
+
+ ll->load_ie = disk_ll_load_ie;
+ ll->save_ie = disk_ll_save_ie;
+ ll->init_index = disk_ll_init_index;
+ ll->open_index = disk_ll_open;
+ ll->max_entries = disk_ll_max_entries;
+ ll->commit = disk_ll_commit;
+
+ ll->nr_blocks = le64_to_cpu(smr->nr_blocks);
+ ll->nr_allocated = le64_to_cpu(smr->nr_allocated);
+ ll->bitmap_root = le64_to_cpu(smr->bitmap_root);
+ ll->ref_count_root = le64_to_cpu(smr->ref_count_root);
+
+ return ll->open_index(ll);
+}
+
+/*----------------------------------------------------------------*/
diff --git a/drivers/md/persistent-data/dm-space-map-common.h b/drivers/md/persistent-data/dm-space-map-common.h
new file mode 100644
index 00000000000..8f220821a9a
--- /dev/null
+++ b/drivers/md/persistent-data/dm-space-map-common.h
@@ -0,0 +1,126 @@
+/*
+ * Copyright (C) 2011 Red Hat, Inc.
+ *
+ * This file is released under the GPL.
+ */
+
+#ifndef DM_SPACE_MAP_COMMON_H
+#define DM_SPACE_MAP_COMMON_H
+
+#include "dm-btree.h"
+
+/*----------------------------------------------------------------*/
+
+/*
+ * Low level disk format
+ *
+ * Bitmap btree
+ * ------------
+ *
+ * Each value stored in the btree is an index_entry. This points to a
+ * block that is used as a bitmap. Within the bitmap hold 2 bits per
+ * entry, which represent UNUSED = 0, REF_COUNT = 1, REF_COUNT = 2 and
+ * REF_COUNT = many.
+ *
+ * Refcount btree
+ * --------------
+ *
+ * Any entry that has a ref count higher than 2 gets entered in the ref
+ * count tree. The leaf values for this tree is the 32-bit ref count.
+ */
+
+struct disk_index_entry {
+ __le64 blocknr;
+ __le32 nr_free;
+ __le32 none_free_before;
+} __packed;
+
+
+#define MAX_METADATA_BITMAPS 255
+struct disk_metadata_index {
+ __le32 csum;
+ __le32 padding;
+ __le64 blocknr;
+
+ struct disk_index_entry index[MAX_METADATA_BITMAPS];
+} __packed;
+
+struct ll_disk;
+
+typedef int (*load_ie_fn)(struct ll_disk *ll, dm_block_t index, struct disk_index_entry *result);
+typedef int (*save_ie_fn)(struct ll_disk *ll, dm_block_t index, struct disk_index_entry *ie);
+typedef int (*init_index_fn)(struct ll_disk *ll);
+typedef int (*open_index_fn)(struct ll_disk *ll);
+typedef dm_block_t (*max_index_entries_fn)(struct ll_disk *ll);
+typedef int (*commit_fn)(struct ll_disk *ll);
+
+struct ll_disk {
+ struct dm_transaction_manager *tm;
+ struct dm_btree_info bitmap_info;
+ struct dm_btree_info ref_count_info;
+
+ uint32_t block_size;
+ uint32_t entries_per_block;
+ dm_block_t nr_blocks;
+ dm_block_t nr_allocated;
+
+ /*
+ * bitmap_root may be a btree root or a simple index.
+ */
+ dm_block_t bitmap_root;
+
+ dm_block_t ref_count_root;
+
+ struct disk_metadata_index mi_le;
+ load_ie_fn load_ie;
+ save_ie_fn save_ie;
+ init_index_fn init_index;
+ open_index_fn open_index;
+ max_index_entries_fn max_entries;
+ commit_fn commit;
+};
+
+struct disk_sm_root {
+ __le64 nr_blocks;
+ __le64 nr_allocated;
+ __le64 bitmap_root;
+ __le64 ref_count_root;
+} __packed;
+
+#define ENTRIES_PER_BYTE 4
+
+struct disk_bitmap_header {
+ __le32 csum;
+ __le32 not_used;
+ __le64 blocknr;
+} __packed;
+
+enum allocation_event {
+ SM_NONE,
+ SM_ALLOC,
+ SM_FREE,
+};
+
+/*----------------------------------------------------------------*/
+
+int sm_ll_extend(struct ll_disk *ll, dm_block_t extra_blocks);
+int sm_ll_lookup_bitmap(struct ll_disk *ll, dm_block_t b, uint32_t *result);
+int sm_ll_lookup(struct ll_disk *ll, dm_block_t b, uint32_t *result);
+int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin,
+ dm_block_t end, dm_block_t *result);
+int sm_ll_insert(struct ll_disk *ll, dm_block_t b, uint32_t ref_count, enum allocation_event *ev);
+int sm_ll_inc(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev);
+int sm_ll_dec(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev);
+int sm_ll_commit(struct ll_disk *ll);
+
+int sm_ll_new_metadata(struct ll_disk *ll, struct dm_transaction_manager *tm);
+int sm_ll_open_metadata(struct ll_disk *ll, struct dm_transaction_manager *tm,
+ void *root_le, size_t len);
+
+int sm_ll_new_disk(struct ll_disk *ll, struct dm_transaction_manager *tm);
+int sm_ll_open_disk(struct ll_disk *ll, struct dm_transaction_manager *tm,
+ void *root_le, size_t len);
+
+/*----------------------------------------------------------------*/
+
+#endif /* DM_SPACE_MAP_COMMON_H */
diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c
new file mode 100644
index 00000000000..fc469ba9f62
--- /dev/null
+++ b/drivers/md/persistent-data/dm-space-map-disk.c
@@ -0,0 +1,335 @@
+/*
+ * Copyright (C) 2011 Red Hat, Inc.
+ *
+ * This file is released under the GPL.
+ */
+
+#include "dm-space-map-checker.h"
+#include "dm-space-map-common.h"
+#include "dm-space-map-disk.h"
+#include "dm-space-map.h"
+#include "dm-transaction-manager.h"
+
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+#include <linux/device-mapper.h>
+
+#define DM_MSG_PREFIX "space map disk"
+
+/*----------------------------------------------------------------*/
+
+/*
+ * Space map interface.
+ */
+struct sm_disk {
+ struct dm_space_map sm;
+
+ struct ll_disk ll;
+ struct ll_disk old_ll;
+
+ dm_block_t begin;
+ dm_block_t nr_allocated_this_transaction;
+};
+
+static void sm_disk_destroy(struct dm_space_map *sm)
+{
+ struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
+
+ kfree(smd);
+}
+
+static int sm_disk_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
+{
+ struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
+
+ return sm_ll_extend(&smd->ll, extra_blocks);
+}
+
+static int sm_disk_get_nr_blocks(struct dm_space_map *sm, dm_block_t *count)
+{
+ struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
+ *count = smd->old_ll.nr_blocks;
+
+ return 0;
+}
+
+static int sm_disk_get_nr_free(struct dm_space_map *sm, dm_block_t *count)
+{
+ struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
+ *count = (smd->old_ll.nr_blocks - smd->old_ll.nr_allocated) - smd->nr_allocated_this_transaction;
+
+ return 0;
+}
+
+static int sm_disk_get_count(struct dm_space_map *sm, dm_block_t b,
+ uint32_t *result)
+{
+ struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
+ return sm_ll_lookup(&smd->ll, b, result);
+}
+
+static int sm_disk_count_is_more_than_one(struct dm_space_map *sm, dm_block_t b,
+ int *result)
+{
+ int r;
+ uint32_t count;
+
+ r = sm_disk_get_count(sm, b, &count);
+ if (r)
+ return r;
+
+ return count > 1;
+}
+
+static int sm_disk_set_count(struct dm_space_map *sm, dm_block_t b,
+ uint32_t count)
+{
+ int r;
+ uint32_t old_count;
+ enum allocation_event ev;
+ struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
+
+ r = sm_ll_insert(&smd->ll, b, count, &ev);
+ if (!r) {
+ switch (ev) {
+ case SM_NONE:
+ break;
+
+ case SM_ALLOC:
+ /*
+ * This _must_ be free in the prior transaction
+ * otherwise we've lost atomicity.
+ */
+ smd->nr_allocated_this_transaction++;
+ break;
+
+ case SM_FREE:
+ /*
+ * It's only free if it's also free in the last
+ * transaction.
+ */
+ r = sm_ll_lookup(&smd->old_ll, b, &old_count);
+ if (r)
+ return r;
+
+ if (!old_count)
+ smd->nr_allocated_this_transaction--;
+ break;
+ }
+ }
+
+ return r;
+}
+
+static int sm_disk_inc_block(struct dm_space_map *sm, dm_block_t b)
+{
+ int r;
+ enum allocation_event ev;
+ struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
+
+ r = sm_ll_inc(&smd->ll, b, &ev);
+ if (!r && (ev == SM_ALLOC))
+ /*
+ * This _must_ be free in the prior transaction
+ * otherwise we've lost atomicity.
+ */
+ smd->nr_allocated_this_transaction++;
+
+ return r;
+}
+
+static int sm_disk_dec_block(struct dm_space_map *sm, dm_block_t b)
+{
+ int r;
+ uint32_t old_count;
+ enum allocation_event ev;
+ struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
+
+ r = sm_ll_dec(&smd->ll, b, &ev);
+ if (!r && (ev == SM_FREE)) {
+ /*
+ * It's only free if it's also free in the last
+ * transaction.
+ */
+ r = sm_ll_lookup(&smd->old_ll, b, &old_count);
+ if (r)
+ return r;
+
+ if (!old_count)
+ smd->nr_allocated_this_transaction--;
+ }
+
+ return r;
+}
+
+static int sm_disk_new_block(struct dm_space_map *sm, dm_block_t *b)
+{
+ int r;
+ enum allocation_event ev;
+ struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
+
+ /* FIXME: we should loop round a couple of times */
+ r = sm_ll_find_free_block(&smd->old_ll, smd->begin, smd->old_ll.nr_blocks, b);
+ if (r)
+ return r;
+
+ smd->begin = *b + 1;
+ r = sm_ll_inc(&smd->ll, *b, &ev);
+ if (!r) {
+ BUG_ON(ev != SM_ALLOC);
+ smd->nr_allocated_this_transaction++;
+ }
+
+ return r;
+}
+
+static int sm_disk_commit(struct dm_space_map *sm)
+{
+ int r;
+ dm_block_t nr_free;
+ struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
+
+ r = sm_disk_get_nr_free(sm, &nr_free);
+ if (r)
+ return r;
+
+ r = sm_ll_commit(&smd->ll);
+ if (r)
+ return r;
+
+ memcpy(&smd->old_ll, &smd->ll, sizeof(smd->old_ll));
+ smd->begin = 0;
+ smd->nr_allocated_this_transaction = 0;
+
+ r = sm_disk_get_nr_free(sm, &nr_free);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+static int sm_disk_root_size(struct dm_space_map *sm, size_t *result)
+{
+ *result = sizeof(struct disk_sm_root);
+
+ return 0;
+}
+
+static int sm_disk_copy_root(struct dm_space_map *sm, void *where_le, size_t max)
+{
+ struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
+ struct disk_sm_root root_le;
+
+ root_le.nr_blocks = cpu_to_le64(smd->ll.nr_blocks);
+ root_le.nr_allocated = cpu_to_le64(smd->ll.nr_allocated);
+ root_le.bitmap_root = cpu_to_le64(smd->ll.bitmap_root);
+ root_le.ref_count_root = cpu_to_le64(smd->ll.ref_count_root);
+
+ if (max < sizeof(root_le))
+ return -ENOSPC;
+
+ memcpy(where_le, &root_le, sizeof(root_le));
+
+ return 0;
+}
+
+/*----------------------------------------------------------------*/
+
+static struct dm_space_map ops = {
+ .destroy = sm_disk_destroy,
+ .extend = sm_disk_extend,
+ .get_nr_blocks = sm_disk_get_nr_blocks,
+ .get_nr_free = sm_disk_get_nr_free,
+ .get_count = sm_disk_get_count,
+ .count_is_more_than_one = sm_disk_count_is_more_than_one,
+ .set_count = sm_disk_set_count,
+ .inc_block = sm_disk_inc_block,
+ .dec_block = sm_disk_dec_block,
+ .new_block = sm_disk_new_block,
+ .commit = sm_disk_commit,
+ .root_size = sm_disk_root_size,
+ .copy_root = sm_disk_copy_root
+};
+
+static struct dm_space_map *dm_sm_disk_create_real(
+ struct dm_transaction_manager *tm,
+ dm_block_t nr_blocks)
+{
+ int r;
+ struct sm_disk *smd;
+
+ smd = kmalloc(sizeof(*smd), GFP_KERNEL);
+ if (!smd)
+ return ERR_PTR(-ENOMEM);
+
+ smd->begin = 0;
+ smd->nr_allocated_this_transaction = 0;
+ memcpy(&smd->sm, &ops, sizeof(smd->sm));
+
+ r = sm_ll_new_disk(&smd->ll, tm);
+ if (r)
+ goto bad;
+
+ r = sm_ll_extend(&smd->ll, nr_blocks);
+ if (r)
+ goto bad;
+
+ r = sm_disk_commit(&smd->sm);
+ if (r)
+ goto bad;
+
+ return &smd->sm;
+
+bad:
+ kfree(smd);
+ return ERR_PTR(r);
+}
+
+struct dm_space_map *dm_sm_disk_create(struct dm_transaction_manager *tm,
+ dm_block_t nr_blocks)
+{
+ struct dm_space_map *sm = dm_sm_disk_create_real(tm, nr_blocks);
+ return dm_sm_checker_create_fresh(sm);
+}
+EXPORT_SYMBOL_GPL(dm_sm_disk_create);
+
+static struct dm_space_map *dm_sm_disk_open_real(
+ struct dm_transaction_manager *tm,
+ void *root_le, size_t len)
+{
+ int r;
+ struct sm_disk *smd;
+
+ smd = kmalloc(sizeof(*smd), GFP_KERNEL);
+ if (!smd)
+ return ERR_PTR(-ENOMEM);
+
+ smd->begin = 0;
+ smd->nr_allocated_this_transaction = 0;
+ memcpy(&smd->sm, &ops, sizeof(smd->sm));
+
+ r = sm_ll_open_disk(&smd->ll, tm, root_le, len);
+ if (r)
+ goto bad;
+
+ r = sm_disk_commit(&smd->sm);
+ if (r)
+ goto bad;
+
+ return &smd->sm;
+
+bad:
+ kfree(smd);
+ return ERR_PTR(r);
+}
+
+struct dm_space_map *dm_sm_disk_open(struct dm_transaction_manager *tm,
+ void *root_le, size_t len)
+{
+ return dm_sm_checker_create(
+ dm_sm_disk_open_real(tm, root_le, len));
+}
+EXPORT_SYMBOL_GPL(dm_sm_disk_open);
+
+/*----------------------------------------------------------------*/
diff --git a/drivers/md/persistent-data/dm-space-map-disk.h b/drivers/md/persistent-data/dm-space-map-disk.h
new file mode 100644
index 00000000000..447a0a9a2d9
--- /dev/null
+++ b/drivers/md/persistent-data/dm-space-map-disk.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2011 Red Hat, Inc.
+ *
+ * This file is released under the GPL.
+ */
+
+#ifndef _LINUX_DM_SPACE_MAP_DISK_H
+#define _LINUX_DM_SPACE_MAP_DISK_H
+
+#include "dm-block-manager.h"
+
+struct dm_space_map;
+struct dm_transaction_manager;
+
+/*
+ * Unfortunately we have to use two-phase construction due to the cycle
+ * between the tm and sm.
+ */
+struct dm_space_map *dm_sm_disk_create(struct dm_transaction_manager *tm,
+ dm_block_t nr_blocks);
+
+struct dm_space_map *dm_sm_disk_open(struct dm_transaction_manager *tm,
+ void *root, size_t len);
+
+#endif /* _LINUX_DM_SPACE_MAP_DISK_H */
diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
new file mode 100644
index 00000000000..e89ae5e7a51
--- /dev/null
+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
@@ -0,0 +1,596 @@
+/*
+ * Copyright (C) 2011 Red Hat, Inc.
+ *
+ * This file is released under the GPL.
+ */
+
+#include "dm-space-map.h"
+#include "dm-space-map-common.h"
+#include "dm-space-map-metadata.h"
+
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/device-mapper.h>
+
+#define DM_MSG_PREFIX "space map metadata"
+
+/*----------------------------------------------------------------*/
+
+/*
+ * Space map interface.
+ *
+ * The low level disk format is written using the standard btree and
+ * transaction manager. This means that performing disk operations may
+ * cause us to recurse into the space map in order to allocate new blocks.
+ * For this reason we have a pool of pre-allocated blocks large enough to
+ * service any metadata_ll_disk operation.
+ */
+
+/*
+ * FIXME: we should calculate this based on the size of the device.
+ * Only the metadata space map needs this functionality.
+ */
+#define MAX_RECURSIVE_ALLOCATIONS 1024
+
+enum block_op_type {
+ BOP_INC,
+ BOP_DEC
+};
+
+struct block_op {
+ enum block_op_type type;
+ dm_block_t block;
+};
+
+struct sm_metadata {
+ struct dm_space_map sm;
+
+ struct ll_disk ll;
+ struct ll_disk old_ll;
+
+ dm_block_t begin;
+
+ unsigned recursion_count;
+ unsigned allocated_this_transaction;
+ unsigned nr_uncommitted;
+ struct block_op uncommitted[MAX_RECURSIVE_ALLOCATIONS];
+};
+
+static int add_bop(struct sm_metadata *smm, enum block_op_type type, dm_block_t b)
+{
+ struct block_op *op;
+
+ if (smm->nr_uncommitted == MAX_RECURSIVE_ALLOCATIONS) {
+ DMERR("too many recursive allocations");
+ return -ENOMEM;
+ }
+
+ op = smm->uncommitted + smm->nr_uncommitted++;
+ op->type = type;
+ op->block = b;
+
+ return 0;
+}
+
+static int commit_bop(struct sm_metadata *smm, struct block_op *op)
+{
+ int r = 0;
+ enum allocation_event ev;
+
+ switch (op->type) {
+ case BOP_INC:
+ r = sm_ll_inc(&smm->ll, op->block, &ev);
+ break;
+
+ case BOP_DEC:
+ r = sm_ll_dec(&smm->ll, op->block, &ev);
+ break;
+ }
+
+ return r;
+}
+
+static void in(struct sm_metadata *smm)
+{
+ smm->recursion_count++;
+}
+
+static int out(struct sm_metadata *smm)
+{
+ int r = 0;
+
+ /*
+ * If we're not recursing then very bad things are happening.
+ */
+ if (!smm->recursion_count) {
+ DMERR("lost track of recursion depth");
+ return -ENOMEM;
+ }
+
+ if (smm->recursion_count == 1 && smm->nr_uncommitted) {
+ while (smm->nr_uncommitted && !r) {
+ smm->nr_uncommitted--;
+ r = commit_bop(smm, smm->uncommitted +
+ smm->nr_uncommitted);
+ if (r)
+ break;
+ }
+ }
+
+ smm->recursion_count--;
+
+ return r;
+}
+
+/*
+ * When using the out() function above, we often want to combine an error
+ * code for the operation run in the recursive context with that from
+ * out().
+ */
+static int combine_errors(int r1, int r2)
+{
+ return r1 ? r1 : r2;
+}
+
+static int recursing(struct sm_metadata *smm)
+{
+ return smm->recursion_count;
+}
+
+static void sm_metadata_destroy(struct dm_space_map *sm)
+{
+ struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
+
+ kfree(smm);
+}
+
+static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
+{
+ DMERR("doesn't support extend");
+ return -EINVAL;
+}
+
+static int sm_metadata_get_nr_blocks(struct dm_space_map *sm, dm_block_t *count)
+{
+ struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
+
+ *count = smm->ll.nr_blocks;
+
+ return 0;
+}
+
+static int sm_metadata_get_nr_free(struct dm_space_map *sm, dm_block_t *count)
+{
+ struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
+
+ *count = smm->old_ll.nr_blocks - smm->old_ll.nr_allocated -
+ smm->allocated_this_transaction;
+
+ return 0;
+}
+
+static int sm_metadata_get_count(struct dm_space_map *sm, dm_block_t b,
+ uint32_t *result)
+{
+ int r, i;
+ struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
+ unsigned adjustment = 0;
+
+ /*
+ * We may have some uncommitted adjustments to add. This list
+ * should always be really short.
+ */
+ for (i = 0; i < smm->nr_uncommitted; i++) {
+ struct block_op *op = smm->uncommitted + i;
+
+ if (op->block != b)
+ continue;
+
+ switch (op->type) {
+ case BOP_INC:
+ adjustment++;
+ break;
+
+ case BOP_DEC:
+ adjustment--;
+ break;
+ }
+ }
+
+ r = sm_ll_lookup(&smm->ll, b, result);
+ if (r)
+ return r;
+
+ *result += adjustment;
+
+ return 0;
+}
+
+static int sm_metadata_count_is_more_than_one(struct dm_space_map *sm,
+ dm_block_t b, int *result)
+{
+ int r, i, adjustment = 0;
+ struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
+ uint32_t rc;
+
+ /*
+ * We may have some uncommitted adjustments to add. This list
+ * should always be really short.
+ */
+ for (i = 0; i < smm->nr_uncommitted; i++) {
+ struct block_op *op = smm->uncommitted + i;
+
+ if (op->block != b)
+ continue;
+
+ switch (op->type) {
+ case BOP_INC:
+ adjustment++;
+ break;
+
+ case BOP_DEC:
+ adjustment--;
+ break;
+ }
+ }
+
+ if (adjustment > 1) {
+ *result = 1;
+ return 0;
+ }
+
+ r = sm_ll_lookup_bitmap(&smm->ll, b, &rc);
+ if (r)
+ return r;
+
+ if (rc == 3)
+ /*
+ * We err on the side of caution, and always return true.
+ */
+ *result = 1;
+ else
+ *result = rc + adjustment > 1;
+
+ return 0;
+}
+
+static int sm_metadata_set_count(struct dm_space_map *sm, dm_block_t b,
+ uint32_t count)
+{
+ int r, r2;
+ enum allocation_event ev;
+ struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
+
+ if (smm->recursion_count) {
+ DMERR("cannot recurse set_count()");
+ return -EINVAL;
+ }
+
+ in(smm);
+ r = sm_ll_insert(&smm->ll, b, count, &ev);
+ r2 = out(smm);
+
+ return combine_errors(r, r2);
+}
+
+static int sm_metadata_inc_block(struct dm_space_map *sm, dm_block_t b)
+{
+ int r, r2 = 0;
+ enum allocation_event ev;
+ struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
+
+ if (recursing(smm))
+ r = add_bop(smm, BOP_INC, b);
+ else {
+ in(smm);
+ r = sm_ll_inc(&smm->ll, b, &ev);
+ r2 = out(smm);
+ }
+
+ return combine_errors(r, r2);
+}
+
+static int sm_metadata_dec_block(struct dm_space_map *sm, dm_block_t b)
+{
+ int r, r2 = 0;
+ enum allocation_event ev;
+ struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
+
+ if (recursing(smm))
+ r = add_bop(smm, BOP_DEC, b);
+ else {
+ in(smm);
+ r = sm_ll_dec(&smm->ll, b, &ev);
+ r2 = out(smm);
+ }
+
+ return combine_errors(r, r2);
+}
+
+static int sm_metadata_new_block_(struct dm_space_map *sm, dm_block_t *b)
+{
+ int r, r2 = 0;
+ enum allocation_event ev;
+ struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
+
+ r = sm_ll_find_free_block(&smm->old_ll, smm->begin, smm->old_ll.nr_blocks, b);
+ if (r)
+ return r;
+
+ smm->begin = *b + 1;
+
+ if (recursing(smm))
+ r = add_bop(smm, BOP_INC, *b);
+ else {
+ in(smm);
+ r = sm_ll_inc(&smm->ll, *b, &ev);
+ r2 = out(smm);
+ }
+
+ if (!r)
+ smm->allocated_this_transaction++;
+
+ return combine_errors(r, r2);
+}
+
+static int sm_metadata_new_block(struct dm_space_map *sm, dm_block_t *b)
+{
+ int r = sm_metadata_new_block_(sm, b);
+ if (r)
+ DMERR("out of metadata space");
+ return r;
+}
+
+static int sm_metadata_commit(struct dm_space_map *sm)
+{
+ int r;
+ struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
+
+ r = sm_ll_commit(&smm->ll);
+ if (r)
+ return r;
+
+ memcpy(&smm->old_ll, &smm->ll, sizeof(smm->old_ll));
+ smm->begin = 0;
+ smm->allocated_this_transaction = 0;
+
+ return 0;
+}
+
+static int sm_metadata_root_size(struct dm_space_map *sm, size_t *result)
+{
+ *result = sizeof(struct disk_sm_root);
+
+ return 0;
+}
+
+static int sm_metadata_copy_root(struct dm_space_map *sm, void *where_le, size_t max)
+{
+ struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
+ struct disk_sm_root root_le;
+
+ root_le.nr_blocks = cpu_to_le64(smm->ll.nr_blocks);
+ root_le.nr_allocated = cpu_to_le64(smm->ll.nr_allocated);
+ root_le.bitmap_root = cpu_to_le64(smm->ll.bitmap_root);
+ root_le.ref_count_root = cpu_to_le64(smm->ll.ref_count_root);
+
+ if (max < sizeof(root_le))
+ return -ENOSPC;
+
+ memcpy(where_le, &root_le, sizeof(root_le));
+
+ return 0;
+}
+
+static struct dm_space_map ops = {
+ .destroy = sm_metadata_destroy,
+ .extend = sm_metadata_extend,
+ .get_nr_blocks = sm_metadata_get_nr_blocks,
+ .get_nr_free = sm_metadata_get_nr_free,
+ .get_count = sm_metadata_get_count,
+ .count_is_more_than_one = sm_metadata_count_is_more_than_one,
+ .set_count = sm_metadata_set_count,
+ .inc_block = sm_metadata_inc_block,
+ .dec_block = sm_metadata_dec_block,
+ .new_block = sm_metadata_new_block,
+ .commit = sm_metadata_commit,
+ .root_size = sm_metadata_root_size,
+ .copy_root = sm_metadata_copy_root
+};
+
+/*----------------------------------------------------------------*/
+
+/*
+ * When a new space map is created that manages its own space. We use
+ * this tiny bootstrap allocator.
+ */
+static void sm_bootstrap_destroy(struct dm_space_map *sm)
+{
+}
+
+static int sm_bootstrap_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
+{
+ DMERR("boostrap doesn't support extend");
+
+ return -EINVAL;
+}
+
+static int sm_bootstrap_get_nr_blocks(struct dm_space_map *sm, dm_block_t *count)
+{
+ struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
+
+ return smm->ll.nr_blocks;
+}
+
+static int sm_bootstrap_get_nr_free(struct dm_space_map *sm, dm_block_t *count)
+{
+ struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
+
+ *count = smm->ll.nr_blocks - smm->begin;
+
+ return 0;
+}
+
+static int sm_bootstrap_get_count(struct dm_space_map *sm, dm_block_t b,
+ uint32_t *result)
+{
+ struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
+
+ return b < smm->begin ? 1 : 0;
+}
+
+static int sm_bootstrap_count_is_more_than_one(struct dm_space_map *sm,
+ dm_block_t b, int *result)
+{
+ *result = 0;
+
+ return 0;
+}
+
+static int sm_bootstrap_set_count(struct dm_space_map *sm, dm_block_t b,
+ uint32_t count)
+{
+ DMERR("boostrap doesn't support set_count");
+
+ return -EINVAL;
+}
+
+static int sm_bootstrap_new_block(struct dm_space_map *sm, dm_block_t *b)
+{
+ struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
+
+ /*
+ * We know the entire device is unused.
+ */
+ if (smm->begin == smm->ll.nr_blocks)
+ return -ENOSPC;
+
+ *b = smm->begin++;
+
+ return 0;
+}
+
+static int sm_bootstrap_inc_block(struct dm_space_map *sm, dm_block_t b)
+{
+ struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
+
+ return add_bop(smm, BOP_INC, b);
+}
+
+static int sm_bootstrap_dec_block(struct dm_space_map *sm, dm_block_t b)
+{
+ struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
+
+ return add_bop(smm, BOP_DEC, b);
+}
+
+static int sm_bootstrap_commit(struct dm_space_map *sm)
+{
+ return 0;
+}
+
+static int sm_bootstrap_root_size(struct dm_space_map *sm, size_t *result)
+{
+ DMERR("boostrap doesn't support root_size");
+
+ return -EINVAL;
+}
+
+static int sm_bootstrap_copy_root(struct dm_space_map *sm, void *where,
+ size_t max)
+{
+ DMERR("boostrap doesn't support copy_root");
+
+ return -EINVAL;
+}
+
+static struct dm_space_map bootstrap_ops = {
+ .destroy = sm_bootstrap_destroy,
+ .extend = sm_bootstrap_extend,
+ .get_nr_blocks = sm_bootstrap_get_nr_blocks,
+ .get_nr_free = sm_bootstrap_get_nr_free,
+ .get_count = sm_bootstrap_get_count,
+ .count_is_more_than_one = sm_bootstrap_count_is_more_than_one,
+ .set_count = sm_bootstrap_set_count,
+ .inc_block = sm_bootstrap_inc_block,
+ .dec_block = sm_bootstrap_dec_block,
+ .new_block = sm_bootstrap_new_block,
+ .commit = sm_bootstrap_commit,
+ .root_size = sm_bootstrap_root_size,
+ .copy_root = sm_bootstrap_copy_root
+};
+
+/*----------------------------------------------------------------*/
+
+struct dm_space_map *dm_sm_metadata_init(void)
+{
+ struct sm_metadata *smm;
+
+ smm = kmalloc(sizeof(*smm), GFP_KERNEL);
+ if (!smm)
+ return ERR_PTR(-ENOMEM);
+
+ memcpy(&smm->sm, &ops, sizeof(smm->sm));
+
+ return &smm->sm;
+}
+
+int dm_sm_metadata_create(struct dm_space_map *sm,
+ struct dm_transaction_manager *tm,
+ dm_block_t nr_blocks,
+ dm_block_t superblock)
+{
+ int r;
+ dm_block_t i;
+ enum allocation_event ev;
+ struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
+
+ smm->begin = superblock + 1;
+ smm->recursion_count = 0;
+ smm->allocated_this_transaction = 0;
+ smm->nr_uncommitted = 0;
+
+ memcpy(&smm->sm, &bootstrap_ops, sizeof(smm->sm));
+
+ r = sm_ll_new_metadata(&smm->ll, tm);
+ if (r)
+ return r;
+
+ r = sm_ll_extend(&smm->ll, nr_blocks);
+ if (r)
+ return r;
+
+ memcpy(&smm->sm, &ops, sizeof(smm->sm));
+
+ /*
+ * Now we need to update the newly created data structures with the
+ * allocated blocks that they were built from.
+ */
+ for (i = superblock; !r && i < smm->begin; i++)
+ r = sm_ll_inc(&smm->ll, i, &ev);
+
+ if (r)
+ return r;
+
+ return sm_metadata_commit(sm);
+}
+
+int dm_sm_metadata_open(struct dm_space_map *sm,
+ struct dm_transaction_manager *tm,
+ void *root_le, size_t len)
+{
+ int r;
+ struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
+
+ r = sm_ll_open_metadata(&smm->ll, tm, root_le, len);
+ if (r)
+ return r;
+
+ smm->begin = 0;
+ smm->recursion_count = 0;
+ smm->allocated_this_transaction = 0;
+ smm->nr_uncommitted = 0;
+
+ memcpy(&smm->old_ll, &smm->ll, sizeof(smm->old_ll));
+ return 0;
+}
diff --git a/drivers/md/persistent-data/dm-space-map-metadata.h b/drivers/md/persistent-data/dm-space-map-metadata.h
new file mode 100644
index 00000000000..39bba0801cf
--- /dev/null
+++ b/drivers/md/persistent-data/dm-space-map-metadata.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2011 Red Hat, Inc.
+ *
+ * This file is released under the GPL.
+ */
+
+#ifndef DM_SPACE_MAP_METADATA_H
+#define DM_SPACE_MAP_METADATA_H
+
+#include "dm-transaction-manager.h"
+
+/*
+ * Unfortunately we have to use two-phase construction due to the cycle
+ * between the tm and sm.
+ */
+struct dm_space_map *dm_sm_metadata_init(void);
+
+/*
+ * Create a fresh space map.
+ */
+int dm_sm_metadata_create(struct dm_space_map *sm,
+ struct dm_transaction_manager *tm,
+ dm_block_t nr_blocks,
+ dm_block_t superblock);
+
+/*
+ * Open from a previously-recorded root.
+ */
+int dm_sm_metadata_open(struct dm_space_map *sm,
+ struct dm_transaction_manager *tm,
+ void *root_le, size_t len);
+
+#endif /* DM_SPACE_MAP_METADATA_H */
diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
new file mode 100644
index 00000000000..1cbfc6b1638
--- /dev/null
+++ b/drivers/md/persistent-data/dm-space-map.h
@@ -0,0 +1,134 @@
+/*
+ * Copyright (C) 2011 Red Hat, Inc.
+ *
+ * This file is released under the GPL.
+ */
+
+#ifndef _LINUX_DM_SPACE_MAP_H
+#define _LINUX_DM_SPACE_MAP_H
+
+#include "dm-block-manager.h"
+
+/*
+ * struct dm_space_map keeps a record of how many times each block in a device
+ * is referenced. It needs to be fixed on disk as part of the transaction.
+ */
+struct dm_space_map {
+ void (*destroy)(struct dm_space_map *sm);
+
+ /*
+ * You must commit before allocating the newly added space.
+ */
+ int (*extend)(struct dm_space_map *sm, dm_block_t extra_blocks);
+
+ /*
+ * Extensions do not appear in this count until after commit has
+ * been called.
+ */
+ int (*get_nr_blocks)(struct dm_space_map *sm, dm_block_t *count);
+
+ /*
+ * Space maps must never allocate a block from the previous
+ * transaction, in case we need to rollback. This complicates the
+ * semantics of get_nr_free(), it should return the number of blocks
+ * that are available for allocation _now_. For instance you may
+ * have blocks with a zero reference count that will not be
+ * available for allocation until after the next commit.
+ */
+ int (*get_nr_free)(struct dm_space_map *sm, dm_block_t *count);
+
+ int (*get_count)(struct dm_space_map *sm, dm_block_t b, uint32_t *result);
+ int (*count_is_more_than_one)(struct dm_space_map *sm, dm_block_t b,
+ int *result);
+ int (*set_count)(struct dm_space_map *sm, dm_block_t b, uint32_t count);
+
+ int (*commit)(struct dm_space_map *sm);
+
+ int (*inc_block)(struct dm_space_map *sm, dm_block_t b);
+ int (*dec_block)(struct dm_space_map *sm, dm_block_t b);
+
+ /*
+ * new_block will increment the returned block.
+ */
+ int (*new_block)(struct dm_space_map *sm, dm_block_t *b);
+
+ /*
+ * The root contains all the information needed to fix the space map.
+ * Generally this info is small, so squirrel it away in a disk block
+ * along with other info.
+ */
+ int (*root_size)(struct dm_space_map *sm, size_t *result);
+ int (*copy_root)(struct dm_space_map *sm, void *copy_to_here_le, size_t len);
+};
+
+/*----------------------------------------------------------------*/
+
+static inline void dm_sm_destroy(struct dm_space_map *sm)
+{
+ sm->destroy(sm);
+}
+
+static inline int dm_sm_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
+{
+ return sm->extend(sm, extra_blocks);
+}
+
+static inline int dm_sm_get_nr_blocks(struct dm_space_map *sm, dm_block_t *count)
+{
+ return sm->get_nr_blocks(sm, count);
+}
+
+static inline int dm_sm_get_nr_free(struct dm_space_map *sm, dm_block_t *count)
+{
+ return sm->get_nr_free(sm, count);
+}
+
+static inline int dm_sm_get_count(struct dm_space_map *sm, dm_block_t b,
+ uint32_t *result)
+{
+ return sm->get_count(sm, b, result);
+}
+
+static inline int dm_sm_count_is_more_than_one(struct dm_space_map *sm,
+ dm_block_t b, int *result)
+{
+ return sm->count_is_more_than_one(sm, b, result);
+}
+
+static inline int dm_sm_set_count(struct dm_space_map *sm, dm_block_t b,
+ uint32_t count)
+{
+ return sm->set_count(sm, b, count);
+}
+
+static inline int dm_sm_commit(struct dm_space_map *sm)
+{
+ return sm->commit(sm);
+}
+
+static inline int dm_sm_inc_block(struct dm_space_map *sm, dm_block_t b)
+{
+ return sm->inc_block(sm, b);
+}
+
+static inline int dm_sm_dec_block(struct dm_space_map *sm, dm_block_t b)
+{
+ return sm->dec_block(sm, b);
+}
+
+static inline int dm_sm_new_block(struct dm_space_map *sm, dm_block_t *b)
+{
+ return sm->new_block(sm, b);
+}
+
+static inline int dm_sm_root_size(struct dm_space_map *sm, size_t *result)
+{
+ return sm->root_size(sm, result);
+}
+
+static inline int dm_sm_copy_root(struct dm_space_map *sm, void *copy_to_here_le, size_t len)
+{
+ return sm->copy_root(sm, copy_to_here_le, len);
+}
+
+#endif /* _LINUX_DM_SPACE_MAP_H */
diff --git a/drivers/md/persistent-data/dm-transaction-manager.c b/drivers/md/persistent-data/dm-transaction-manager.c
new file mode 100644
index 00000000000..6f8d38747d7
--- /dev/null
+++ b/drivers/md/persistent-data/dm-transaction-manager.c
@@ -0,0 +1,400 @@
+/*
+ * Copyright (C) 2011 Red Hat, Inc.
+ *
+ * This file is released under the GPL.
+ */
+#include "dm-transaction-manager.h"
+#include "dm-space-map.h"
+#include "dm-space-map-checker.h"
+#include "dm-space-map-disk.h"
+#include "dm-space-map-metadata.h"
+#include "dm-persistent-data-internal.h"
+
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/device-mapper.h>
+
+#define DM_MSG_PREFIX "transaction manager"
+
+/*----------------------------------------------------------------*/
+
+struct shadow_info {
+ struct hlist_node hlist;
+ dm_block_t where;
+};
+
+/*
+ * It would be nice if we scaled with the size of transaction.
+ */
+#define HASH_SIZE 256
+#define HASH_MASK (HASH_SIZE - 1)
+
+struct dm_transaction_manager {
+ int is_clone;
+ struct dm_transaction_manager *real;
+
+ struct dm_block_manager *bm;
+ struct dm_space_map *sm;
+
+ spinlock_t lock;
+ struct hlist_head buckets[HASH_SIZE];
+};
+
+/*----------------------------------------------------------------*/
+
+static int is_shadow(struct dm_transaction_manager *tm, dm_block_t b)
+{
+ int r = 0;
+ unsigned bucket = dm_hash_block(b, HASH_MASK);
+ struct shadow_info *si;
+ struct hlist_node *n;
+
+ spin_lock(&tm->lock);
+ hlist_for_each_entry(si, n, tm->buckets + bucket, hlist)
+ if (si->where == b) {
+ r = 1;
+ break;
+ }
+ spin_unlock(&tm->lock);
+
+ return r;
+}
+
+/*
+ * This can silently fail if there's no memory. We're ok with this since
+ * creating redundant shadows causes no harm.
+ */
+static void insert_shadow(struct dm_transaction_manager *tm, dm_block_t b)
+{
+ unsigned bucket;
+ struct shadow_info *si;
+
+ si = kmalloc(sizeof(*si), GFP_NOIO);
+ if (si) {
+ si->where = b;
+ bucket = dm_hash_block(b, HASH_MASK);
+ spin_lock(&tm->lock);
+ hlist_add_head(&si->hlist, tm->buckets + bucket);
+ spin_unlock(&tm->lock);
+ }
+}
+
+static void wipe_shadow_table(struct dm_transaction_manager *tm)
+{
+ struct shadow_info *si;
+ struct hlist_node *n, *tmp;
+ struct hlist_head *bucket;
+ int i;
+
+ spin_lock(&tm->lock);
+ for (i = 0; i < HASH_SIZE; i++) {
+ bucket = tm->buckets + i;
+ hlist_for_each_entry_safe(si, n, tmp, bucket, hlist)
+ kfree(si);
+
+ INIT_HLIST_HEAD(bucket);
+ }
+
+ spin_unlock(&tm->lock);
+}
+
+/*----------------------------------------------------------------*/
+
+static struct dm_transaction_manager *dm_tm_create(struct dm_block_manager *bm,
+ struct dm_space_map *sm)
+{
+ int i;
+ struct dm_transaction_manager *tm;
+
+ tm = kmalloc(sizeof(*tm), GFP_KERNEL);
+ if (!tm)
+ return ERR_PTR(-ENOMEM);
+
+ tm->is_clone = 0;
+ tm->real = NULL;
+ tm->bm = bm;
+ tm->sm = sm;
+
+ spin_lock_init(&tm->lock);
+ for (i = 0; i < HASH_SIZE; i++)
+ INIT_HLIST_HEAD(tm->buckets + i);
+
+ return tm;
+}
+
+struct dm_transaction_manager *dm_tm_create_non_blocking_clone(struct dm_transaction_manager *real)
+{
+ struct dm_transaction_manager *tm;
+
+ tm = kmalloc(sizeof(*tm), GFP_KERNEL);
+ if (tm) {
+ tm->is_clone = 1;
+ tm->real = real;
+ }
+
+ return tm;
+}
+EXPORT_SYMBOL_GPL(dm_tm_create_non_blocking_clone);
+
+void dm_tm_destroy(struct dm_transaction_manager *tm)
+{
+ kfree(tm);
+}
+EXPORT_SYMBOL_GPL(dm_tm_destroy);
+
+int dm_tm_pre_commit(struct dm_transaction_manager *tm)
+{
+ int r;
+
+ if (tm->is_clone)
+ return -EWOULDBLOCK;
+
+ r = dm_sm_commit(tm->sm);
+ if (r < 0)
+ return r;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dm_tm_pre_commit);
+
+int dm_tm_commit(struct dm_transaction_manager *tm, struct dm_block *root)
+{
+ if (tm->is_clone)
+ return -EWOULDBLOCK;
+
+ wipe_shadow_table(tm);
+
+ return dm_bm_flush_and_unlock(tm->bm, root);
+}
+EXPORT_SYMBOL_GPL(dm_tm_commit);
+
+int dm_tm_new_block(struct dm_transaction_manager *tm,
+ struct dm_block_validator *v,
+ struct dm_block **result)
+{
+ int r;
+ dm_block_t new_block;
+
+ if (tm->is_clone)
+ return -EWOULDBLOCK;
+
+ r = dm_sm_new_block(tm->sm, &new_block);
+ if (r < 0)
+ return r;
+
+ r = dm_bm_write_lock_zero(tm->bm, new_block, v, result);
+ if (r < 0) {
+ dm_sm_dec_block(tm->sm, new_block);
+ return r;
+ }
+
+ /*
+ * New blocks count as shadows in that they don't need to be
+ * shadowed again.
+ */
+ insert_shadow(tm, new_block);
+
+ return 0;
+}
+
+static int __shadow_block(struct dm_transaction_manager *tm, dm_block_t orig,
+ struct dm_block_validator *v,
+ struct dm_block **result)
+{
+ int r;
+ dm_block_t new;
+ struct dm_block *orig_block;
+
+ r = dm_sm_new_block(tm->sm, &new);
+ if (r < 0)
+ return r;
+
+ r = dm_sm_dec_block(tm->sm, orig);
+ if (r < 0)
+ return r;
+
+ r = dm_bm_read_lock(tm->bm, orig, v, &orig_block);
+ if (r < 0)
+ return r;
+
+ r = dm_bm_unlock_move(orig_block, new);
+ if (r < 0) {
+ dm_bm_unlock(orig_block);
+ return r;
+ }
+
+ return dm_bm_write_lock(tm->bm, new, v, result);
+}
+
+int dm_tm_shadow_block(struct dm_transaction_manager *tm, dm_block_t orig,
+ struct dm_block_validator *v, struct dm_block **result,
+ int *inc_children)
+{
+ int r;
+
+ if (tm->is_clone)
+ return -EWOULDBLOCK;
+
+ r = dm_sm_count_is_more_than_one(tm->sm, orig, inc_children);
+ if (r < 0)
+ return r;
+
+ if (is_shadow(tm, orig) && !*inc_children)
+ return dm_bm_write_lock(tm->bm, orig, v, result);
+
+ r = __shadow_block(tm, orig, v, result);
+ if (r < 0)
+ return r;
+ insert_shadow(tm, dm_block_location(*result));
+
+ return r;
+}
+
+int dm_tm_read_lock(struct dm_transaction_manager *tm, dm_block_t b,
+ struct dm_block_validator *v,
+ struct dm_block **blk)
+{
+ if (tm->is_clone)
+ return dm_bm_read_try_lock(tm->real->bm, b, v, blk);
+
+ return dm_bm_read_lock(tm->bm, b, v, blk);
+}
+
+int dm_tm_unlock(struct dm_transaction_manager *tm, struct dm_block *b)
+{
+ return dm_bm_unlock(b);
+}
+EXPORT_SYMBOL_GPL(dm_tm_unlock);
+
+void dm_tm_inc(struct dm_transaction_manager *tm, dm_block_t b)
+{
+ /*
+ * The non-blocking clone doesn't support this.
+ */
+ BUG_ON(tm->is_clone);
+
+ dm_sm_inc_block(tm->sm, b);
+}
+EXPORT_SYMBOL_GPL(dm_tm_inc);
+
+void dm_tm_dec(struct dm_transaction_manager *tm, dm_block_t b)
+{
+ /*
+ * The non-blocking clone doesn't support this.
+ */
+ BUG_ON(tm->is_clone);
+
+ dm_sm_dec_block(tm->sm, b);
+}
+EXPORT_SYMBOL_GPL(dm_tm_dec);
+
+int dm_tm_ref(struct dm_transaction_manager *tm, dm_block_t b,
+ uint32_t *result)
+{
+ if (tm->is_clone)
+ return -EWOULDBLOCK;
+
+ return dm_sm_get_count(tm->sm, b, result);
+}
+
+struct dm_block_manager *dm_tm_get_bm(struct dm_transaction_manager *tm)
+{
+ return tm->bm;
+}
+
+/*----------------------------------------------------------------*/
+
+static int dm_tm_create_internal(struct dm_block_manager *bm,
+ dm_block_t sb_location,
+ struct dm_block_validator *sb_validator,
+ size_t root_offset, size_t root_max_len,
+ struct dm_transaction_manager **tm,
+ struct dm_space_map **sm,
+ struct dm_block **sblock,
+ int create)
+{
+ int r;
+ struct dm_space_map *inner;
+
+ inner = dm_sm_metadata_init();
+ if (IS_ERR(inner))
+ return PTR_ERR(inner);
+
+ *tm = dm_tm_create(bm, inner);
+ if (IS_ERR(*tm)) {
+ dm_sm_destroy(inner);
+ return PTR_ERR(*tm);
+ }
+
+ if (create) {
+ r = dm_bm_write_lock_zero(dm_tm_get_bm(*tm), sb_location,
+ sb_validator, sblock);
+ if (r < 0) {
+ DMERR("couldn't lock superblock");
+ goto bad1;
+ }
+
+ r = dm_sm_metadata_create(inner, *tm, dm_bm_nr_blocks(bm),
+ sb_location);
+ if (r) {
+ DMERR("couldn't create metadata space map");
+ goto bad2;
+ }
+
+ *sm = dm_sm_checker_create(inner);
+ if (!*sm)
+ goto bad2;
+
+ } else {
+ r = dm_bm_write_lock(dm_tm_get_bm(*tm), sb_location,
+ sb_validator, sblock);
+ if (r < 0) {
+ DMERR("couldn't lock superblock");
+ goto bad1;
+ }
+
+ r = dm_sm_metadata_open(inner, *tm,
+ dm_block_data(*sblock) + root_offset,
+ root_max_len);
+ if (r) {
+ DMERR("couldn't open metadata space map");
+ goto bad2;
+ }
+
+ *sm = dm_sm_checker_create(inner);
+ if (!*sm)
+ goto bad2;
+ }
+
+ return 0;
+
+bad2:
+ dm_tm_unlock(*tm, *sblock);
+bad1:
+ dm_tm_destroy(*tm);
+ dm_sm_destroy(inner);
+ return r;
+}
+
+int dm_tm_create_with_sm(struct dm_block_manager *bm, dm_block_t sb_location,
+ struct dm_block_validator *sb_validator,
+ struct dm_transaction_manager **tm,
+ struct dm_space_map **sm, struct dm_block **sblock)
+{
+ return dm_tm_create_internal(bm, sb_location, sb_validator,
+ 0, 0, tm, sm, sblock, 1);
+}
+EXPORT_SYMBOL_GPL(dm_tm_create_with_sm);
+
+int dm_tm_open_with_sm(struct dm_block_manager *bm, dm_block_t sb_location,
+ struct dm_block_validator *sb_validator,
+ size_t root_offset, size_t root_max_len,
+ struct dm_transaction_manager **tm,
+ struct dm_space_map **sm, struct dm_block **sblock)
+{
+ return dm_tm_create_internal(bm, sb_location, sb_validator, root_offset,
+ root_max_len, tm, sm, sblock, 0);
+}
+EXPORT_SYMBOL_GPL(dm_tm_open_with_sm);
+
+/*----------------------------------------------------------------*/
diff --git a/drivers/md/persistent-data/dm-transaction-manager.h b/drivers/md/persistent-data/dm-transaction-manager.h
new file mode 100644
index 00000000000..6da784871db
--- /dev/null
+++ b/drivers/md/persistent-data/dm-transaction-manager.h
@@ -0,0 +1,130 @@
+/*
+ * Copyright (C) 2011 Red Hat, Inc.
+ *
+ * This file is released under the GPL.
+ */
+
+#ifndef _LINUX_DM_TRANSACTION_MANAGER_H
+#define _LINUX_DM_TRANSACTION_MANAGER_H
+
+#include "dm-block-manager.h"
+
+struct dm_transaction_manager;
+struct dm_space_map;
+
+/*----------------------------------------------------------------*/
+
+/*
+ * This manages the scope of a transaction. It also enforces immutability
+ * of the on-disk data structures by limiting access to writeable blocks.
+ *
+ * Clients should not fiddle with the block manager directly.
+ */
+
+void dm_tm_destroy(struct dm_transaction_manager *tm);
+
+/*
+ * The non-blocking version of a transaction manager is intended for use in
+ * fast path code that needs to do lookups e.g. a dm mapping function.
+ * You create the non-blocking variant from a normal tm. The interface is
+ * the same, except that most functions will just return -EWOULDBLOCK.
+ * Methods that return void yet may block should not be called on a clone
+ * viz. dm_tm_inc, dm_tm_dec. Call dm_tm_destroy() as you would with a normal
+ * tm when you've finished with it. You may not destroy the original prior
+ * to clones.
+ */
+struct dm_transaction_manager *dm_tm_create_non_blocking_clone(struct dm_transaction_manager *real);
+
+/*
+ * We use a 2-phase commit here.
+ *
+ * i) In the first phase the block manager is told to start flushing, and
+ * the changes to the space map are written to disk. You should interrogate
+ * your particular space map to get detail of its root node etc. to be
+ * included in your superblock.
+ *
+ * ii) @root will be committed last. You shouldn't use more than the
+ * first 512 bytes of @root if you wish the transaction to survive a power
+ * failure. You *must* have a write lock held on @root for both stage (i)
+ * and (ii). The commit will drop the write lock.
+ */
+int dm_tm_pre_commit(struct dm_transaction_manager *tm);
+int dm_tm_commit(struct dm_transaction_manager *tm, struct dm_block *root);
+
+/*
+ * These methods are the only way to get hold of a writeable block.
+ */
+
+/*
+ * dm_tm_new_block() is pretty self-explanatory. Make sure you do actually
+ * write to the whole of @data before you unlock, otherwise you could get
+ * a data leak. (The other option is for tm_new_block() to zero new blocks
+ * before handing them out, which will be redundant in most, if not all,
+ * cases).
+ * Zeroes the new block and returns with write lock held.
+ */
+int dm_tm_new_block(struct dm_transaction_manager *tm,
+ struct dm_block_validator *v,
+ struct dm_block **result);
+
+/*
+ * dm_tm_shadow_block() allocates a new block and copies the data from @orig
+ * to it. It then decrements the reference count on original block. Use
+ * this to update the contents of a block in a data structure, don't
+ * confuse this with a clone - you shouldn't access the orig block after
+ * this operation. Because the tm knows the scope of the transaction it
+ * can optimise requests for a shadow of a shadow to a no-op. Don't forget
+ * to unlock when you've finished with the shadow.
+ *
+ * The @inc_children flag is used to tell the caller whether it needs to
+ * adjust reference counts for children. (Data in the block may refer to
+ * other blocks.)
+ *
+ * Shadowing implicitly drops a reference on @orig so you must not have
+ * it locked when you call this.
+ */
+int dm_tm_shadow_block(struct dm_transaction_manager *tm, dm_block_t orig,
+ struct dm_block_validator *v,
+ struct dm_block **result, int *inc_children);
+
+/*
+ * Read access. You can lock any block you want. If there's a write lock
+ * on it outstanding then it'll block.
+ */
+int dm_tm_read_lock(struct dm_transaction_manager *tm, dm_block_t b,
+ struct dm_block_validator *v,
+ struct dm_block **result);
+
+int dm_tm_unlock(struct dm_transaction_manager *tm, struct dm_block *b);
+
+/*
+ * Functions for altering the reference count of a block directly.
+ */
+void dm_tm_inc(struct dm_transaction_manager *tm, dm_block_t b);
+
+void dm_tm_dec(struct dm_transaction_manager *tm, dm_block_t b);
+
+int dm_tm_ref(struct dm_transaction_manager *tm, dm_block_t b,
+ uint32_t *result);
+
+struct dm_block_manager *dm_tm_get_bm(struct dm_transaction_manager *tm);
+
+/*
+ * A little utility that ties the knot by producing a transaction manager
+ * that has a space map managed by the transaction manager...
+ *
+ * Returns a tm that has an open transaction to write the new disk sm.
+ * Caller should store the new sm root and commit.
+ */
+int dm_tm_create_with_sm(struct dm_block_manager *bm, dm_block_t sb_location,
+ struct dm_block_validator *sb_validator,
+ struct dm_transaction_manager **tm,
+ struct dm_space_map **sm, struct dm_block **sblock);
+
+int dm_tm_open_with_sm(struct dm_block_manager *bm, dm_block_t sb_location,
+ struct dm_block_validator *sb_validator,
+ size_t root_offset, size_t root_max_len,
+ struct dm_transaction_manager **tm,
+ struct dm_space_map **sm, struct dm_block **sblock);
+
+#endif /* _LINUX_DM_TRANSACTION_MANAGER_H */
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 0eb08a4df75..7294bd115e3 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -20,6 +20,7 @@
#include <linux/blkdev.h>
#include <linux/seq_file.h>
+#include <linux/module.h>
#include <linux/slab.h>
#include "md.h"
#include "raid0.h"
@@ -468,7 +469,7 @@ static inline int is_io_in_chunk_boundary(struct mddev *mddev,
}
}
-static int raid0_make_request(struct mddev *mddev, struct bio *bio)
+static void raid0_make_request(struct mddev *mddev, struct bio *bio)
{
unsigned int chunk_sects;
sector_t sector_offset;
@@ -477,7 +478,7 @@ static int raid0_make_request(struct mddev *mddev, struct bio *bio)
if (unlikely(bio->bi_rw & REQ_FLUSH)) {
md_flush_request(mddev, bio);
- return 0;
+ return;
}
chunk_sects = mddev->chunk_sectors;
@@ -497,13 +498,10 @@ static int raid0_make_request(struct mddev *mddev, struct bio *bio)
else
bp = bio_split(bio, chunk_sects -
sector_div(sector, chunk_sects));
- if (raid0_make_request(mddev, &bp->bio1))
- generic_make_request(&bp->bio1);
- if (raid0_make_request(mddev, &bp->bio2))
- generic_make_request(&bp->bio2);
-
+ raid0_make_request(mddev, &bp->bio1);
+ raid0_make_request(mddev, &bp->bio2);
bio_pair_release(bp);
- return 0;
+ return;
}
sector_offset = bio->bi_sector;
@@ -513,10 +511,9 @@ static int raid0_make_request(struct mddev *mddev, struct bio *bio)
bio->bi_bdev = tmp_dev->bdev;
bio->bi_sector = sector_offset + zone->dev_start +
tmp_dev->data_offset;
- /*
- * Let the main block layer submit the IO and resolve recursion:
- */
- return 1;
+
+ generic_make_request(bio);
+ return;
bad_map:
printk("md/raid0:%s: make_request bug: can't convert block across chunks"
@@ -525,7 +522,7 @@ bad_map:
(unsigned long long)bio->bi_sector, bio->bi_size >> 10);
bio_io_error(bio);
- return 0;
+ return;
}
static void raid0_status(struct seq_file *seq, struct mddev *mddev)
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 4602fc57c96..ede2461e79c 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -34,6 +34,7 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/blkdev.h>
+#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/ratelimit.h>
#include "md.h"
@@ -807,7 +808,7 @@ do_sync_io:
pr_debug("%dB behind alloc failed, doing sync I/O\n", bio->bi_size);
}
-static int make_request(struct mddev *mddev, struct bio * bio)
+static void make_request(struct mddev *mddev, struct bio * bio)
{
struct r1conf *conf = mddev->private;
struct mirror_info *mirror;
@@ -892,7 +893,7 @@ read_again:
if (rdisk < 0) {
/* couldn't find anywhere to read from */
raid_end_bio_io(r1_bio);
- return 0;
+ return;
}
mirror = conf->mirrors + rdisk;
@@ -950,7 +951,7 @@ read_again:
goto read_again;
} else
generic_make_request(read_bio);
- return 0;
+ return;
}
/*
@@ -1151,8 +1152,6 @@ read_again:
if (do_sync || !bitmap || !plugged)
md_wakeup_thread(mddev->thread);
-
- return 0;
}
static void status(struct seq_file *seq, struct mddev *mddev)
@@ -2193,7 +2192,6 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
bio->bi_next = NULL;
bio->bi_flags &= ~(BIO_POOL_MASK-1);
bio->bi_flags |= 1 << BIO_UPTODATE;
- bio->bi_comp_cpu = -1;
bio->bi_rw = READ;
bio->bi_vcnt = 0;
bio->bi_idx = 0;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 132c18ef866..685ddf325ee 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -21,6 +21,7 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/blkdev.h>
+#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/ratelimit.h>
#include "md.h"
@@ -842,7 +843,7 @@ static void unfreeze_array(struct r10conf *conf)
spin_unlock_irq(&conf->resync_lock);
}
-static int make_request(struct mddev *mddev, struct bio * bio)
+static void make_request(struct mddev *mddev, struct bio * bio)
{
struct r10conf *conf = mddev->private;
struct mirror_info *mirror;
@@ -861,7 +862,7 @@ static int make_request(struct mddev *mddev, struct bio * bio)
if (unlikely(bio->bi_rw & REQ_FLUSH)) {
md_flush_request(mddev, bio);
- return 0;
+ return;
}
/* If this request crosses a chunk boundary, we need to
@@ -893,10 +894,8 @@ static int make_request(struct mddev *mddev, struct bio * bio)
conf->nr_waiting++;
spin_unlock_irq(&conf->resync_lock);
- if (make_request(mddev, &bp->bio1))
- generic_make_request(&bp->bio1);
- if (make_request(mddev, &bp->bio2))
- generic_make_request(&bp->bio2);
+ make_request(mddev, &bp->bio1);
+ make_request(mddev, &bp->bio2);
spin_lock_irq(&conf->resync_lock);
conf->nr_waiting--;
@@ -904,14 +903,14 @@ static int make_request(struct mddev *mddev, struct bio * bio)
spin_unlock_irq(&conf->resync_lock);
bio_pair_release(bp);
- return 0;
+ return;
bad_map:
printk("md/raid10:%s: make_request bug: can't convert block across chunks"
" or bigger than %dk %llu %d\n", mdname(mddev), chunk_sects/2,
(unsigned long long)bio->bi_sector, bio->bi_size >> 10);
bio_io_error(bio);
- return 0;
+ return;
}
md_write_start(mddev, bio);
@@ -954,7 +953,7 @@ read_again:
slot = r10_bio->read_slot;
if (disk < 0) {
raid_end_bio_io(r10_bio);
- return 0;
+ return;
}
mirror = conf->mirrors + disk;
@@ -1002,7 +1001,7 @@ read_again:
goto read_again;
} else
generic_make_request(read_bio);
- return 0;
+ return;
}
/*
@@ -1176,7 +1175,6 @@ retry_write:
if (do_sync || !mddev->bitmap || !plugged)
md_wakeup_thread(mddev->thread);
- return 0;
}
static void status(struct seq_file *seq, struct mddev *mddev)
@@ -1355,7 +1353,7 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
struct mirror_info *p = &conf->mirrors[mirror];
if (p->recovery_disabled == mddev->recovery_disabled)
continue;
- if (!p->rdev)
+ if (p->rdev)
continue;
disk_stack_limits(mddev->gendisk, rdev->bdev,
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index f6fe053a5be..297e2609217 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -47,6 +47,7 @@
#include <linux/kthread.h>
#include <linux/raid/pq.h>
#include <linux/async_tx.h>
+#include <linux/module.h>
#include <linux/async.h>
#include <linux/seq_file.h>
#include <linux/cpu.h>
@@ -3109,7 +3110,7 @@ static void handle_stripe(struct stripe_head *sh)
struct r5dev *pdev, *qdev;
clear_bit(STRIPE_HANDLE, &sh->state);
- if (test_and_set_bit(STRIPE_ACTIVE, &sh->state)) {
+ if (test_and_set_bit_lock(STRIPE_ACTIVE, &sh->state)) {
/* already being handled, ensure it gets handled
* again when current action finishes */
set_bit(STRIPE_HANDLE, &sh->state);
@@ -3158,10 +3159,14 @@ static void handle_stripe(struct stripe_head *sh)
/* check if the array has lost more than max_degraded devices and,
* if so, some requests might need to be failed.
*/
- if (s.failed > conf->max_degraded && s.to_read+s.to_write+s.written)
- handle_failed_stripe(conf, sh, &s, disks, &s.return_bi);
- if (s.failed > conf->max_degraded && s.syncing)
- handle_failed_sync(conf, sh, &s);
+ if (s.failed > conf->max_degraded) {
+ sh->check_state = 0;
+ sh->reconstruct_state = 0;
+ if (s.to_read+s.to_write+s.written)
+ handle_failed_stripe(conf, sh, &s, disks, &s.return_bi);
+ if (s.syncing)
+ handle_failed_sync(conf, sh, &s);
+ }
/*
* might be able to return some write requests if the parity blocks
@@ -3370,7 +3375,7 @@ finish:
return_io(s.return_bi);
- clear_bit(STRIPE_ACTIVE, &sh->state);
+ clear_bit_unlock(STRIPE_ACTIVE, &sh->state);
}
static void raid5_activate_delayed(struct r5conf *conf)
@@ -3688,7 +3693,7 @@ static struct stripe_head *__get_priority_stripe(struct r5conf *conf)
return sh;
}
-static int make_request(struct mddev *mddev, struct bio * bi)
+static void make_request(struct mddev *mddev, struct bio * bi)
{
struct r5conf *conf = mddev->private;
int dd_idx;
@@ -3701,7 +3706,7 @@ static int make_request(struct mddev *mddev, struct bio * bi)
if (unlikely(bi->bi_rw & REQ_FLUSH)) {
md_flush_request(mddev, bi);
- return 0;
+ return;
}
md_write_start(mddev, bi);
@@ -3709,7 +3714,7 @@ static int make_request(struct mddev *mddev, struct bio * bi)
if (rw == READ &&
mddev->reshape_position == MaxSector &&
chunk_aligned_read(mddev,bi))
- return 0;
+ return;
logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
last_sector = bi->bi_sector + (bi->bi_size>>9);
@@ -3844,8 +3849,6 @@ static int make_request(struct mddev *mddev, struct bio * bi)
bio_endio(bi, 0);
}
-
- return 0;
}
static sector_t raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks);
diff --git a/drivers/media/common/saa7146_core.c b/drivers/media/common/saa7146_core.c
index 9af2140b57a..d6b1cf66042 100644
--- a/drivers/media/common/saa7146_core.c
+++ b/drivers/media/common/saa7146_core.c
@@ -18,7 +18,10 @@
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <media/saa7146.h>
+#include <linux/module.h>
LIST_HEAD(saa7146_devices);
DEFINE_MUTEX(saa7146_devices_lock);
@@ -35,10 +38,9 @@ static void dump_registers(struct saa7146_dev* dev)
{
int i = 0;
- INFO((" @ %li jiffies:\n",jiffies));
- for(i = 0; i <= 0x148; i+=4) {
- printk("0x%03x: 0x%08x\n",i,saa7146_read(dev,i));
- }
+ pr_info(" @ %li jiffies:\n", jiffies);
+ for (i = 0; i <= 0x148; i += 4)
+ pr_info("0x%03x: 0x%08x\n", i, saa7146_read(dev, i));
}
#endif
@@ -72,9 +74,8 @@ static inline int saa7146_wait_for_debi_done_sleep(struct saa7146_dev *dev,
if (saa7146_read(dev, MC2) & 2)
break;
if (err) {
- printk(KERN_ERR "%s: %s timed out while waiting for "
- "registers getting programmed\n",
- dev->name, __func__);
+ pr_err("%s: %s timed out while waiting for registers getting programmed\n",
+ dev->name, __func__);
return -ETIMEDOUT;
}
msleep(1);
@@ -88,8 +89,8 @@ static inline int saa7146_wait_for_debi_done_sleep(struct saa7146_dev *dev,
break;
saa7146_read(dev, MC2);
if (err) {
- DEB_S(("%s: %s timed out while waiting for transfer "
- "completion\n", dev->name, __func__));
+ DEB_S("%s: %s timed out while waiting for transfer completion\n",
+ dev->name, __func__);
return -ETIMEDOUT;
}
msleep(1);
@@ -109,9 +110,8 @@ static inline int saa7146_wait_for_debi_done_busyloop(struct saa7146_dev *dev,
if (saa7146_read(dev, MC2) & 2)
break;
if (!loops--) {
- printk(KERN_ERR "%s: %s timed out while waiting for "
- "registers getting programmed\n",
- dev->name, __func__);
+ pr_err("%s: %s timed out while waiting for registers getting programmed\n",
+ dev->name, __func__);
return -ETIMEDOUT;
}
udelay(1);
@@ -124,8 +124,8 @@ static inline int saa7146_wait_for_debi_done_busyloop(struct saa7146_dev *dev,
break;
saa7146_read(dev, MC2);
if (!loops--) {
- DEB_S(("%s: %s timed out while waiting for transfer "
- "completion\n", dev->name, __func__));
+ DEB_S("%s: %s timed out while waiting for transfer completion\n",
+ dev->name, __func__);
return -ETIMEDOUT;
}
udelay(5);
@@ -264,7 +264,9 @@ int saa7146_pgtable_build_single(struct pci_dev *pci, struct saa7146_pgtable *pt
ptr = pt->cpu;
for (i = 0; i < sglen; i++, list++) {
/*
- printk("i:%d, adr:0x%08x, len:%d, offset:%d\n", i,sg_dma_address(list), sg_dma_len(list), list->offset);
+ pr_debug("i:%d, adr:0x%08x, len:%d, offset:%d\n",
+ i, sg_dma_address(list), sg_dma_len(list),
+ list->offset);
*/
for (p = 0; p * 4096 < list->length; p++, ptr++) {
*ptr = cpu_to_le32(sg_dma_address(list) + p * 4096);
@@ -281,9 +283,9 @@ int saa7146_pgtable_build_single(struct pci_dev *pci, struct saa7146_pgtable *pt
/*
ptr = pt->cpu;
- printk("offset: %d\n",pt->offset);
+ pr_debug("offset: %d\n", pt->offset);
for(i=0;i<5;i++) {
- printk("ptr1 %d: 0x%08x\n",i,ptr[i]);
+ pr_debug("ptr1 %d: 0x%08x\n", i, ptr[i]);
}
*/
return 0;
@@ -314,7 +316,7 @@ static irqreturn_t interrupt_hw(int irq, void *dev_id)
}
}
if (0 != (isr & (MASK_27))) {
- DEB_INT(("irq: RPS0 (0x%08x).\n",isr));
+ DEB_INT("irq: RPS0 (0x%08x)\n", isr);
if (dev->vv_data && dev->vv_callback)
dev->vv_callback(dev,isr);
isr &= ~MASK_27;
@@ -333,14 +335,15 @@ static irqreturn_t interrupt_hw(int irq, void *dev_id)
} else {
u32 psr = saa7146_read(dev, PSR);
u32 ssr = saa7146_read(dev, SSR);
- printk(KERN_WARNING "%s: unexpected i2c irq: isr %08x psr %08x ssr %08x\n",
- dev->name, isr, psr, ssr);
+ pr_warn("%s: unexpected i2c irq: isr %08x psr %08x ssr %08x\n",
+ dev->name, isr, psr, ssr);
}
isr &= ~(MASK_16|MASK_17);
}
if( 0 != isr ) {
- ERR(("warning: interrupt enabled, but not handled properly.(0x%08x)\n",isr));
- ERR(("disabling interrupt source(s)!\n"));
+ ERR("warning: interrupt enabled, but not handled properly.(0x%08x)\n",
+ isr);
+ ERR("disabling interrupt source(s)!\n");
SAA7146_IER_DISABLE(dev,isr);
}
saa7146_write(dev, ISR, ack_isr);
@@ -360,15 +363,15 @@ static int saa7146_init_one(struct pci_dev *pci, const struct pci_device_id *ent
/* clear out mem for sure */
dev = kzalloc(sizeof(struct saa7146_dev), GFP_KERNEL);
if (!dev) {
- ERR(("out of memory.\n"));
+ ERR("out of memory\n");
goto out;
}
- DEB_EE(("pci:%p\n",pci));
+ DEB_EE("pci:%p\n", pci);
err = pci_enable_device(pci);
if (err < 0) {
- ERR(("pci_enable_device() failed.\n"));
+ ERR("pci_enable_device() failed\n");
goto err_free;
}
@@ -389,7 +392,7 @@ static int saa7146_init_one(struct pci_dev *pci, const struct pci_device_id *ent
dev->mem = ioremap(pci_resource_start(pci, 0),
pci_resource_len(pci, 0));
if (!dev->mem) {
- ERR(("ioremap() failed.\n"));
+ ERR("ioremap() failed\n");
err = -ENODEV;
goto err_release;
}
@@ -414,7 +417,7 @@ static int saa7146_init_one(struct pci_dev *pci, const struct pci_device_id *ent
err = request_irq(pci->irq, interrupt_hw, IRQF_SHARED | IRQF_DISABLED,
dev->name, dev);
if (err < 0) {
- ERR(("request_irq() failed.\n"));
+ ERR("request_irq() failed\n");
goto err_unmap;
}
@@ -444,7 +447,9 @@ static int saa7146_init_one(struct pci_dev *pci, const struct pci_device_id *ent
/* create a nice device name */
sprintf(dev->name, "saa7146 (%d)", saa7146_num);
- INFO(("found saa7146 @ mem %p (revision %d, irq %d) (0x%04x,0x%04x).\n", dev->mem, dev->revision, pci->irq, pci->subsystem_vendor, pci->subsystem_device));
+ pr_info("found saa7146 @ mem %p (revision %d, irq %d) (0x%04x,0x%04x)\n",
+ dev->mem, dev->revision, pci->irq,
+ pci->subsystem_vendor, pci->subsystem_device);
dev->ext = ext;
mutex_init(&dev->v4l2_lock);
@@ -464,12 +469,12 @@ static int saa7146_init_one(struct pci_dev *pci, const struct pci_device_id *ent
err = -ENODEV;
if (ext->probe && ext->probe(dev)) {
- DEB_D(("ext->probe() failed for %p. skipping device.\n",dev));
+ DEB_D("ext->probe() failed for %p. skipping device.\n", dev);
goto err_free_i2c;
}
if (ext->attach(dev, pci_ext)) {
- DEB_D(("ext->attach() failed for %p. skipping device.\n",dev));
+ DEB_D("ext->attach() failed for %p. skipping device.\n", dev);
goto err_free_i2c;
}
/* V4L extensions will set the pci drvdata to the v4l2_device in the
@@ -521,7 +526,7 @@ static void saa7146_remove_one(struct pci_dev *pdev)
{ NULL, 0 }
}, *p;
- DEB_EE(("dev:%p\n",dev));
+ DEB_EE("dev:%p\n", dev);
dev->ext->detach(dev);
/* Zero the PCI drvdata after use. */
@@ -552,21 +557,21 @@ static void saa7146_remove_one(struct pci_dev *pdev)
int saa7146_register_extension(struct saa7146_extension* ext)
{
- DEB_EE(("ext:%p\n",ext));
+ DEB_EE("ext:%p\n", ext);
ext->driver.name = ext->name;
ext->driver.id_table = ext->pci_tbl;
ext->driver.probe = saa7146_init_one;
ext->driver.remove = saa7146_remove_one;
- printk("saa7146: register extension '%s'.\n",ext->name);
+ pr_info("register extension '%s'\n", ext->name);
return pci_register_driver(&ext->driver);
}
int saa7146_unregister_extension(struct saa7146_extension* ext)
{
- DEB_EE(("ext:%p\n",ext));
- printk("saa7146: unregister extension '%s'.\n",ext->name);
+ DEB_EE("ext:%p\n", ext);
+ pr_info("unregister extension '%s'\n", ext->name);
pci_unregister_driver(&ext->driver);
return 0;
}
diff --git a/drivers/media/common/saa7146_fops.c b/drivers/media/common/saa7146_fops.c
index 1bd3dd762c6..71f8e018e56 100644
--- a/drivers/media/common/saa7146_fops.c
+++ b/drivers/media/common/saa7146_fops.c
@@ -1,4 +1,7 @@
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <media/saa7146_vv.h>
+#include <linux/module.h>
/****************************************************************************/
/* resource management functions, shamelessly stolen from saa7134 driver */
@@ -9,21 +12,23 @@ int saa7146_res_get(struct saa7146_fh *fh, unsigned int bit)
struct saa7146_vv *vv = dev->vv_data;
if (fh->resources & bit) {
- DEB_D(("already allocated! want: 0x%02x, cur:0x%02x\n",bit,vv->resources));
+ DEB_D("already allocated! want: 0x%02x, cur:0x%02x\n",
+ bit, vv->resources);
/* have it already allocated */
return 1;
}
/* is it free? */
if (vv->resources & bit) {
- DEB_D(("locked! vv->resources:0x%02x, we want:0x%02x\n",vv->resources,bit));
+ DEB_D("locked! vv->resources:0x%02x, we want:0x%02x\n",
+ vv->resources, bit);
/* no, someone else uses it */
return 0;
}
/* it's free, grab it */
- fh->resources |= bit;
+ fh->resources |= bit;
vv->resources |= bit;
- DEB_D(("res: get 0x%02x, cur:0x%02x\n",bit,vv->resources));
+ DEB_D("res: get 0x%02x, cur:0x%02x\n", bit, vv->resources);
return 1;
}
@@ -34,9 +39,9 @@ void saa7146_res_free(struct saa7146_fh *fh, unsigned int bits)
BUG_ON((fh->resources & bits) != bits);
- fh->resources &= ~bits;
+ fh->resources &= ~bits;
vv->resources &= ~bits;
- DEB_D(("res: put 0x%02x, cur:0x%02x\n",bits,vv->resources));
+ DEB_D("res: put 0x%02x, cur:0x%02x\n", bits, vv->resources);
}
@@ -47,7 +52,7 @@ void saa7146_dma_free(struct saa7146_dev *dev,struct videobuf_queue *q,
struct saa7146_buf *buf)
{
struct videobuf_dmabuf *dma=videobuf_to_dma(&buf->vb);
- DEB_EE(("dev:%p, buf:%p\n",dev,buf));
+ DEB_EE("dev:%p, buf:%p\n", dev, buf);
BUG_ON(in_interrupt());
@@ -66,18 +71,19 @@ int saa7146_buffer_queue(struct saa7146_dev *dev,
struct saa7146_buf *buf)
{
assert_spin_locked(&dev->slock);
- DEB_EE(("dev:%p, dmaq:%p, buf:%p\n", dev, q, buf));
+ DEB_EE("dev:%p, dmaq:%p, buf:%p\n", dev, q, buf);
BUG_ON(!q);
if (NULL == q->curr) {
q->curr = buf;
- DEB_D(("immediately activating buffer %p\n", buf));
+ DEB_D("immediately activating buffer %p\n", buf);
buf->activate(dev,buf,NULL);
} else {
list_add_tail(&buf->vb.queue,&q->queue);
buf->vb.state = VIDEOBUF_QUEUED;
- DEB_D(("adding buffer %p to queue. (active buffer present)\n", buf));
+ DEB_D("adding buffer %p to queue. (active buffer present)\n",
+ buf);
}
return 0;
}
@@ -87,14 +93,14 @@ void saa7146_buffer_finish(struct saa7146_dev *dev,
int state)
{
assert_spin_locked(&dev->slock);
- DEB_EE(("dev:%p, dmaq:%p, state:%d\n", dev, q, state));
- DEB_EE(("q->curr:%p\n",q->curr));
+ DEB_EE("dev:%p, dmaq:%p, state:%d\n", dev, q, state);
+ DEB_EE("q->curr:%p\n", q->curr);
BUG_ON(!q->curr);
/* finish current buffer */
if (NULL == q->curr) {
- DEB_D(("aiii. no current buffer\n"));
+ DEB_D("aiii. no current buffer\n");
return;
}
@@ -112,7 +118,7 @@ void saa7146_buffer_next(struct saa7146_dev *dev,
BUG_ON(!q);
- DEB_INT(("dev:%p, dmaq:%p, vbi:%d\n", dev, q, vbi));
+ DEB_INT("dev:%p, dmaq:%p, vbi:%d\n", dev, q, vbi);
assert_spin_locked(&dev->slock);
if (!list_empty(&q->queue)) {
@@ -122,10 +128,11 @@ void saa7146_buffer_next(struct saa7146_dev *dev,
if (!list_empty(&q->queue))
next = list_entry(q->queue.next,struct saa7146_buf, vb.queue);
q->curr = buf;
- DEB_INT(("next buffer: buf:%p, prev:%p, next:%p\n", buf, q->queue.prev,q->queue.next));
+ DEB_INT("next buffer: buf:%p, prev:%p, next:%p\n",
+ buf, q->queue.prev, q->queue.next);
buf->activate(dev,buf,next);
} else {
- DEB_INT(("no next buffer. stopping.\n"));
+ DEB_INT("no next buffer. stopping.\n");
if( 0 != vbi ) {
/* turn off video-dma3 */
saa7146_write(dev,MC1, MASK_20);
@@ -162,11 +169,11 @@ void saa7146_buffer_timeout(unsigned long data)
struct saa7146_dev *dev = q->dev;
unsigned long flags;
- DEB_EE(("dev:%p, dmaq:%p\n", dev, q));
+ DEB_EE("dev:%p, dmaq:%p\n", dev, q);
spin_lock_irqsave(&dev->slock,flags);
if (q->curr) {
- DEB_D(("timeout on %p\n", q->curr));
+ DEB_D("timeout on %p\n", q->curr);
saa7146_buffer_finish(dev,q,VIDEOBUF_ERROR);
}
@@ -194,12 +201,12 @@ static int fops_open(struct file *file)
enum v4l2_buf_type type;
- DEB_EE(("file:%p, dev:%s\n", file, video_device_node_name(vdev)));
+ DEB_EE("file:%p, dev:%s\n", file, video_device_node_name(vdev));
if (mutex_lock_interruptible(&saa7146_devices_lock))
return -ERESTARTSYS;
- DEB_D(("using: %p\n",dev));
+ DEB_D("using: %p\n", dev);
type = vdev->vfl_type == VFL_TYPE_GRABBER
? V4L2_BUF_TYPE_VIDEO_CAPTURE
@@ -207,7 +214,7 @@ static int fops_open(struct file *file)
/* check if an extension is registered */
if( NULL == dev->ext ) {
- DEB_S(("no extension registered for this device.\n"));
+ DEB_S("no extension registered for this device\n");
result = -ENODEV;
goto out;
}
@@ -215,7 +222,7 @@ static int fops_open(struct file *file)
/* allocate per open data */
fh = kzalloc(sizeof(*fh),GFP_KERNEL);
if (NULL == fh) {
- DEB_S(("cannot allocate memory for per open data.\n"));
+ DEB_S("cannot allocate memory for per open data\n");
result = -ENOMEM;
goto out;
}
@@ -225,13 +232,13 @@ static int fops_open(struct file *file)
fh->type = type;
if( fh->type == V4L2_BUF_TYPE_VBI_CAPTURE) {
- DEB_S(("initializing vbi...\n"));
+ DEB_S("initializing vbi...\n");
if (dev->ext_vv_data->capabilities & V4L2_CAP_VBI_CAPTURE)
result = saa7146_vbi_uops.open(dev,file);
if (dev->ext_vv_data->vbi_fops.open)
dev->ext_vv_data->vbi_fops.open(file);
} else {
- DEB_S(("initializing video...\n"));
+ DEB_S("initializing video...\n");
result = saa7146_video_uops.open(dev,file);
}
@@ -259,7 +266,7 @@ static int fops_release(struct file *file)
struct saa7146_fh *fh = file->private_data;
struct saa7146_dev *dev = fh->dev;
- DEB_EE(("file:%p\n", file));
+ DEB_EE("file:%p\n", file);
if (mutex_lock_interruptible(&saa7146_devices_lock))
return -ERESTARTSYS;
@@ -289,12 +296,14 @@ static int fops_mmap(struct file *file, struct vm_area_struct * vma)
switch (fh->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE: {
- DEB_EE(("V4L2_BUF_TYPE_VIDEO_CAPTURE: file:%p, vma:%p\n",file, vma));
+ DEB_EE("V4L2_BUF_TYPE_VIDEO_CAPTURE: file:%p, vma:%p\n",
+ file, vma);
q = &fh->video_q;
break;
}
case V4L2_BUF_TYPE_VBI_CAPTURE: {
- DEB_EE(("V4L2_BUF_TYPE_VBI_CAPTURE: file:%p, vma:%p\n",file, vma));
+ DEB_EE("V4L2_BUF_TYPE_VBI_CAPTURE: file:%p, vma:%p\n",
+ file, vma);
q = &fh->vbi_q;
break;
}
@@ -312,14 +321,14 @@ static unsigned int fops_poll(struct file *file, struct poll_table_struct *wait)
struct videobuf_buffer *buf = NULL;
struct videobuf_queue *q;
- DEB_EE(("file:%p, poll:%p\n",file, wait));
+ DEB_EE("file:%p, poll:%p\n", file, wait);
if (V4L2_BUF_TYPE_VBI_CAPTURE == fh->type) {
if( 0 == fh->vbi_q.streaming )
return videobuf_poll_stream(file, &fh->vbi_q, wait);
q = &fh->vbi_q;
} else {
- DEB_D(("using video queue.\n"));
+ DEB_D("using video queue\n");
q = &fh->video_q;
}
@@ -327,17 +336,17 @@ static unsigned int fops_poll(struct file *file, struct poll_table_struct *wait)
buf = list_entry(q->stream.next, struct videobuf_buffer, stream);
if (!buf) {
- DEB_D(("buf == NULL!\n"));
+ DEB_D("buf == NULL!\n");
return POLLERR;
}
poll_wait(file, &buf->done, wait);
if (buf->state == VIDEOBUF_DONE || buf->state == VIDEOBUF_ERROR) {
- DEB_D(("poll succeeded!\n"));
+ DEB_D("poll succeeded!\n");
return POLLIN|POLLRDNORM;
}
- DEB_D(("nothing to poll for, buf->state:%d\n",buf->state));
+ DEB_D("nothing to poll for, buf->state:%d\n", buf->state);
return 0;
}
@@ -346,18 +355,20 @@ static ssize_t fops_read(struct file *file, char __user *data, size_t count, lof
struct saa7146_fh *fh = file->private_data;
switch (fh->type) {
- case V4L2_BUF_TYPE_VIDEO_CAPTURE: {
-// DEB_EE(("V4L2_BUF_TYPE_VIDEO_CAPTURE: file:%p, data:%p, count:%lun", file, data, (unsigned long)count));
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+/*
+ DEB_EE("V4L2_BUF_TYPE_VIDEO_CAPTURE: file:%p, data:%p, count:%lun",
+ file, data, (unsigned long)count);
+*/
return saa7146_video_uops.read(file,data,count,ppos);
- }
- case V4L2_BUF_TYPE_VBI_CAPTURE: {
-// DEB_EE(("V4L2_BUF_TYPE_VBI_CAPTURE: file:%p, data:%p, count:%lu\n", file, data, (unsigned long)count));
+ case V4L2_BUF_TYPE_VBI_CAPTURE:
+/*
+ DEB_EE("V4L2_BUF_TYPE_VBI_CAPTURE: file:%p, data:%p, count:%lu\n",
+ file, data, (unsigned long)count);
+*/
if (fh->dev->ext_vv_data->capabilities & V4L2_CAP_VBI_CAPTURE)
return saa7146_vbi_uops.read(file,data,count,ppos);
- else
- return -EINVAL;
- }
- break;
+ return -EINVAL;
default:
BUG();
return 0;
@@ -398,22 +409,22 @@ static void vv_callback(struct saa7146_dev *dev, unsigned long status)
{
u32 isr = status;
- DEB_INT(("dev:%p, isr:0x%08x\n",dev,(u32)status));
+ DEB_INT("dev:%p, isr:0x%08x\n", dev, (u32)status);
if (0 != (isr & (MASK_27))) {
- DEB_INT(("irq: RPS0 (0x%08x).\n",isr));
+ DEB_INT("irq: RPS0 (0x%08x)\n", isr);
saa7146_video_uops.irq_done(dev,isr);
}
if (0 != (isr & (MASK_28))) {
u32 mc2 = saa7146_read(dev, MC2);
if( 0 != (mc2 & MASK_15)) {
- DEB_INT(("irq: RPS1 vbi workaround (0x%08x).\n",isr));
+ DEB_INT("irq: RPS1 vbi workaround (0x%08x)\n", isr);
wake_up(&dev->vv_data->vbi_wq);
saa7146_write(dev,MC2, MASK_31);
return;
}
- DEB_INT(("irq: RPS1 (0x%08x).\n",isr));
+ DEB_INT("irq: RPS1 (0x%08x)\n", isr);
saa7146_vbi_uops.irq_done(dev,isr);
}
}
@@ -429,13 +440,13 @@ int saa7146_vv_init(struct saa7146_dev* dev, struct saa7146_ext_vv *ext_vv)
vv = kzalloc(sizeof(struct saa7146_vv), GFP_KERNEL);
if (vv == NULL) {
- ERR(("out of memory. aborting.\n"));
+ ERR("out of memory. aborting.\n");
return -ENOMEM;
}
ext_vv->ops = saa7146_video_ioctl_ops;
ext_vv->core_ops = &saa7146_video_ioctl_ops;
- DEB_EE(("dev:%p\n",dev));
+ DEB_EE("dev:%p\n", dev);
/* set default values for video parts of the saa7146 */
saa7146_write(dev, BCS_CTRL, 0x80400040);
@@ -450,7 +461,7 @@ int saa7146_vv_init(struct saa7146_dev* dev, struct saa7146_ext_vv *ext_vv)
vv->d_clipping.cpu_addr = pci_alloc_consistent(dev->pci, SAA7146_CLIPPING_MEM, &vv->d_clipping.dma_handle);
if( NULL == vv->d_clipping.cpu_addr ) {
- ERR(("out of memory. aborting.\n"));
+ ERR("out of memory. aborting.\n");
kfree(vv);
return -1;
}
@@ -471,7 +482,7 @@ int saa7146_vv_release(struct saa7146_dev* dev)
{
struct saa7146_vv *vv = dev->vv_data;
- DEB_EE(("dev:%p\n",dev));
+ DEB_EE("dev:%p\n", dev);
v4l2_device_unregister(&dev->v4l2_dev);
pci_free_consistent(dev->pci, SAA7146_CLIPPING_MEM, vv->d_clipping.cpu_addr, vv->d_clipping.dma_handle);
@@ -490,7 +501,7 @@ int saa7146_register_device(struct video_device **vid, struct saa7146_dev* dev,
int err;
int i;
- DEB_EE(("dev:%p, name:'%s', type:%d\n",dev,name,type));
+ DEB_EE("dev:%p, name:'%s', type:%d\n", dev, name, type);
// released by vfd->release
vfd = video_device_alloc();
@@ -509,13 +520,13 @@ int saa7146_register_device(struct video_device **vid, struct saa7146_dev* dev,
err = video_register_device(vfd, type, -1);
if (err < 0) {
- ERR(("cannot register v4l2 device. skipping.\n"));
+ ERR("cannot register v4l2 device. skipping.\n");
video_device_release(vfd);
return err;
}
- INFO(("%s: registered device %s [v4l2]\n",
- dev->name, video_device_node_name(vfd)));
+ pr_info("%s: registered device %s [v4l2]\n",
+ dev->name, video_device_node_name(vfd));
*vid = vfd;
return 0;
@@ -524,7 +535,7 @@ EXPORT_SYMBOL_GPL(saa7146_register_device);
int saa7146_unregister_device(struct video_device **vid, struct saa7146_dev* dev)
{
- DEB_EE(("dev:%p\n",dev));
+ DEB_EE("dev:%p\n", dev);
video_unregister_device(*vid);
*vid = NULL;
diff --git a/drivers/media/common/saa7146_hlp.c b/drivers/media/common/saa7146_hlp.c
index 1d1d8d20075..bc1f545c95c 100644
--- a/drivers/media/common/saa7146_hlp.c
+++ b/drivers/media/common/saa7146_hlp.c
@@ -1,4 +1,7 @@
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
+#include <linux/export.h>
#include <media/saa7146_vv.h>
static void calculate_output_format_register(struct saa7146_dev* saa, u32 palette, u32* clip_format)
@@ -711,8 +714,8 @@ static int calculate_video_dma_grab_packed(struct saa7146_dev* dev, struct saa71
int depth = sfmt->depth;
- DEB_CAP(("[size=%dx%d,fields=%s]\n",
- width,height,v4l2_field_names[field]));
+ DEB_CAP("[size=%dx%d,fields=%s]\n",
+ width, height, v4l2_field_names[field]);
if( bytesperline != 0) {
vdma1.pitch = bytesperline*2;
@@ -837,8 +840,8 @@ static int calculate_video_dma_grab_planar(struct saa7146_dev* dev, struct saa71
BUG_ON(0 == buf->pt[1].dma);
BUG_ON(0 == buf->pt[2].dma);
- DEB_CAP(("[size=%dx%d,fields=%s]\n",
- width,height,v4l2_field_names[field]));
+ DEB_CAP("[size=%dx%d,fields=%s]\n",
+ width, height, v4l2_field_names[field]);
/* fixme: look at bytesperline! */
@@ -998,12 +1001,12 @@ void saa7146_set_capture(struct saa7146_dev *dev, struct saa7146_buf *buf, struc
struct saa7146_vv *vv = dev->vv_data;
u32 vdma1_prot_addr;
- DEB_CAP(("buf:%p, next:%p\n",buf,next));
+ DEB_CAP("buf:%p, next:%p\n", buf, next);
vdma1_prot_addr = saa7146_read(dev, PROT_ADDR1);
if( 0 == vdma1_prot_addr ) {
/* clear out beginning of streaming bit (rps register 0)*/
- DEB_CAP(("forcing sync to new frame\n"));
+ DEB_CAP("forcing sync to new frame\n");
saa7146_write(dev, MC2, MASK_27 );
}
diff --git a/drivers/media/common/saa7146_i2c.c b/drivers/media/common/saa7146_i2c.c
index b2ba9dc0dd6..22027198129 100644
--- a/drivers/media/common/saa7146_i2c.c
+++ b/drivers/media/common/saa7146_i2c.c
@@ -1,8 +1,10 @@
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <media/saa7146_vv.h>
static u32 saa7146_i2c_func(struct i2c_adapter *adapter)
{
-//fm DEB_I2C(("'%s'.\n", adapter->name));
+ /* DEB_I2C("'%s'\n", adapter->name); */
return I2C_FUNC_I2C
| I2C_FUNC_SMBUS_QUICK
@@ -14,9 +16,7 @@ static u32 saa7146_i2c_func(struct i2c_adapter *adapter)
static inline u32 saa7146_i2c_status(struct saa7146_dev *dev)
{
u32 iicsta = saa7146_read(dev, I2C_STATUS);
-/*
- DEB_I2C(("status: 0x%08x\n",iicsta));
-*/
+ /* DEB_I2C("status: 0x%08x\n", iicsta); */
return iicsta;
}
@@ -39,10 +39,11 @@ static int saa7146_i2c_msg_prepare(const struct i2c_msg *m, int num, __le32 *op)
plus one extra byte to address the device */
mem = 1 + ((mem-1) / 3);
- /* we assume that op points to a memory of at least SAA7146_I2C_MEM bytes
- size. if we exceed this limit... */
- if ( (4*mem) > SAA7146_I2C_MEM ) {
-//fm DEB_I2C(("cannot prepare i2c-message.\n"));
+ /* we assume that op points to a memory of at least
+ * SAA7146_I2C_MEM bytes size. if we exceed this limit...
+ */
+ if ((4 * mem) > SAA7146_I2C_MEM) {
+ /* DEB_I2C("cannot prepare i2c-message\n"); */
return -ENOMEM;
}
@@ -123,7 +124,7 @@ static int saa7146_i2c_reset(struct saa7146_dev *dev)
if ( 0 != ( status & SAA7146_I2C_BUSY) ) {
/* yes, kill ongoing operation */
- DEB_I2C(("busy_state detected.\n"));
+ DEB_I2C("busy_state detected\n");
/* set "ABORT-OPERATION"-bit (bit 7)*/
saa7146_write(dev, I2C_STATUS, (dev->i2c_bitrate | MASK_07));
@@ -141,7 +142,7 @@ static int saa7146_i2c_reset(struct saa7146_dev *dev)
if ( dev->i2c_bitrate != status ) {
- DEB_I2C(("error_state detected. status:0x%08x\n",status));
+ DEB_I2C("error_state detected. status:0x%08x\n", status);
/* Repeat the abort operation. This seems to be necessary
after serious protocol errors caused by e.g. the SAA7740 */
@@ -164,7 +165,7 @@ static int saa7146_i2c_reset(struct saa7146_dev *dev)
/* if any error is still present, a fatal error has occurred ... */
status = saa7146_i2c_status(dev);
if ( dev->i2c_bitrate != status ) {
- DEB_I2C(("fatal error. status:0x%08x\n",status));
+ DEB_I2C("fatal error. status:0x%08x\n", status);
return -1;
}
@@ -181,7 +182,8 @@ static int saa7146_i2c_writeout(struct saa7146_dev *dev, __le32 *dword, int shor
unsigned long timeout;
/* write out i2c-command */
- DEB_I2C(("before: 0x%08x (status: 0x%08x), %d\n",*dword,saa7146_read(dev, I2C_STATUS), dev->i2c_op));
+ DEB_I2C("before: 0x%08x (status: 0x%08x), %d\n",
+ *dword, saa7146_read(dev, I2C_STATUS), dev->i2c_op);
if( 0 != (SAA7146_USE_I2C_IRQ & dev->ext->flags)) {
@@ -202,7 +204,7 @@ static int saa7146_i2c_writeout(struct saa7146_dev *dev, __le32 *dword, int shor
/* a signal arrived */
return -ERESTARTSYS;
- printk(KERN_WARNING "%s %s [irq]: timed out waiting for end of xfer\n",
+ pr_warn("%s %s [irq]: timed out waiting for end of xfer\n",
dev->name, __func__);
return -EIO;
}
@@ -220,7 +222,7 @@ static int saa7146_i2c_writeout(struct saa7146_dev *dev, __le32 *dword, int shor
break;
}
if (time_after(jiffies,timeout)) {
- printk(KERN_WARNING "%s %s: timed out waiting for MC2\n",
+ pr_warn("%s %s: timed out waiting for MC2\n",
dev->name, __func__);
return -EIO;
}
@@ -237,7 +239,7 @@ static int saa7146_i2c_writeout(struct saa7146_dev *dev, __le32 *dword, int shor
/* this is normal when probing the bus
* (no answer from nonexisistant device...)
*/
- printk(KERN_WARNING "%s %s [poll]: timed out waiting for end of xfer\n",
+ pr_warn("%s %s [poll]: timed out waiting for end of xfer\n",
dev->name, __func__);
return -EIO;
}
@@ -257,24 +259,24 @@ static int saa7146_i2c_writeout(struct saa7146_dev *dev, __le32 *dword, int shor
if ( 0 == (status & SAA7146_I2C_ERR) ||
0 == (status & SAA7146_I2C_BUSY) ) {
/* it may take some time until ERR goes high - ignore */
- DEB_I2C(("unexpected i2c status %04x\n", status));
+ DEB_I2C("unexpected i2c status %04x\n", status);
}
if( 0 != (status & SAA7146_I2C_SPERR) ) {
- DEB_I2C(("error due to invalid start/stop condition.\n"));
+ DEB_I2C("error due to invalid start/stop condition\n");
}
if( 0 != (status & SAA7146_I2C_DTERR) ) {
- DEB_I2C(("error in data transmission.\n"));
+ DEB_I2C("error in data transmission\n");
}
if( 0 != (status & SAA7146_I2C_DRERR) ) {
- DEB_I2C(("error when receiving data.\n"));
+ DEB_I2C("error when receiving data\n");
}
if( 0 != (status & SAA7146_I2C_AL) ) {
- DEB_I2C(("error because arbitration lost.\n"));
+ DEB_I2C("error because arbitration lost\n");
}
/* we handle address-errors here */
if( 0 != (status & SAA7146_I2C_APERR) ) {
- DEB_I2C(("error in address phase.\n"));
+ DEB_I2C("error in address phase\n");
return -EREMOTEIO;
}
@@ -284,7 +286,7 @@ static int saa7146_i2c_writeout(struct saa7146_dev *dev, __le32 *dword, int shor
/* read back data, just in case we were reading ... */
*dword = cpu_to_le32(saa7146_read(dev, I2C_TRANSFER));
- DEB_I2C(("after: 0x%08x\n",*dword));
+ DEB_I2C("after: 0x%08x\n", *dword);
return 0;
}
@@ -299,7 +301,7 @@ static int saa7146_i2c_transfer(struct saa7146_dev *dev, const struct i2c_msg *m
return -ERESTARTSYS;
for(i=0;i<num;i++) {
- DEB_I2C(("msg:%d/%d\n",i+1,num));
+ DEB_I2C("msg:%d/%d\n", i+1, num);
}
/* prepare the message(s), get number of u32s to transfer */
@@ -316,7 +318,7 @@ static int saa7146_i2c_transfer(struct saa7146_dev *dev, const struct i2c_msg *m
/* reset the i2c-device if necessary */
err = saa7146_i2c_reset(dev);
if ( 0 > err ) {
- DEB_I2C(("could not reset i2c-device.\n"));
+ DEB_I2C("could not reset i2c-device\n");
goto out;
}
@@ -336,7 +338,7 @@ static int saa7146_i2c_transfer(struct saa7146_dev *dev, const struct i2c_msg *m
address error and trust the saa7146 address error detection. */
if (-EREMOTEIO == err && 0 != (SAA7146_USE_I2C_IRQ & dev->ext->flags))
goto out;
- DEB_I2C(("error while sending message(s). starting again.\n"));
+ DEB_I2C("error while sending message(s). starting again\n");
break;
}
}
@@ -356,13 +358,13 @@ static int saa7146_i2c_transfer(struct saa7146_dev *dev, const struct i2c_msg *m
/* if any things had to be read, get the results */
if ( 0 != saa7146_i2c_msg_cleanup(msgs, num, buffer)) {
- DEB_I2C(("could not cleanup i2c-message.\n"));
+ DEB_I2C("could not cleanup i2c-message\n");
err = -1;
goto out;
}
/* return the number of delivered messages */
- DEB_I2C(("transmission successful. (msg:%d).\n",err));
+ DEB_I2C("transmission successful. (msg:%d)\n", err);
out:
/* another bug in revision 0: the i2c-registers get uploaded randomly by other
uploads, so we better clear them out before continuing */
@@ -370,7 +372,7 @@ out:
__le32 zero = 0;
saa7146_i2c_reset(dev);
if( 0 != saa7146_i2c_writeout(dev, &zero, short_delay)) {
- INFO(("revision 0 error. this should never happen.\n"));
+ pr_info("revision 0 error. this should never happen\n");
}
}
@@ -400,7 +402,7 @@ static struct i2c_algorithm saa7146_algo = {
int saa7146_i2c_adapter_prepare(struct saa7146_dev *dev, struct i2c_adapter *i2c_adapter, u32 bitrate)
{
- DEB_EE(("bitrate: 0x%08x\n",bitrate));
+ DEB_EE("bitrate: 0x%08x\n", bitrate);
/* enable i2c-port pins */
saa7146_write(dev, MC1, (MASK_08 | MASK_24));
diff --git a/drivers/media/common/saa7146_vbi.c b/drivers/media/common/saa7146_vbi.c
index afe85801d6c..b2e71834373 100644
--- a/drivers/media/common/saa7146_vbi.c
+++ b/drivers/media/common/saa7146_vbi.c
@@ -14,7 +14,7 @@ static int vbi_workaround(struct saa7146_dev *dev)
DECLARE_WAITQUEUE(wait, current);
- DEB_VBI(("dev:%p\n",dev));
+ DEB_VBI("dev:%p\n", dev);
/* once again, a bug in the saa7146: the brs acquisition
is buggy and especially the BXO-counter does not work
@@ -40,14 +40,14 @@ static int vbi_workaround(struct saa7146_dev *dev)
WRITE_RPS1(0xc000008c);
/* wait for vbi_a or vbi_b*/
if ( 0 != (SAA7146_USE_PORT_B_FOR_VBI & dev->ext_vv_data->flags)) {
- DEB_D(("...using port b\n"));
+ DEB_D("...using port b\n");
WRITE_RPS1(CMD_PAUSE | CMD_OAN | CMD_SIG1 | CMD_E_FID_B);
WRITE_RPS1(CMD_PAUSE | CMD_OAN | CMD_SIG1 | CMD_O_FID_B);
/*
WRITE_RPS1(CMD_PAUSE | MASK_09);
*/
} else {
- DEB_D(("...using port a\n"));
+ DEB_D("...using port a\n");
WRITE_RPS1(CMD_PAUSE | MASK_10);
}
/* upload brs */
@@ -103,7 +103,7 @@ static int vbi_workaround(struct saa7146_dev *dev)
schedule();
- DEB_VBI(("brs bug workaround %d/1.\n",i));
+ DEB_VBI("brs bug workaround %d/1\n", i);
remove_wait_queue(&vv->vbi_wq, &wait);
current->state = TASK_RUNNING;
@@ -116,7 +116,8 @@ static int vbi_workaround(struct saa7146_dev *dev)
if(signal_pending(current)) {
- DEB_VBI(("aborted (rps:0x%08x).\n",saa7146_read(dev,RPS_ADDR1)));
+ DEB_VBI("aborted (rps:0x%08x)\n",
+ saa7146_read(dev, RPS_ADDR1));
/* stop rps1 for sure */
saa7146_write(dev, MC1, MASK_29);
@@ -207,7 +208,7 @@ static int buffer_activate(struct saa7146_dev *dev,
struct saa7146_vv *vv = dev->vv_data;
buf->vb.state = VIDEOBUF_ACTIVE;
- DEB_VBI(("dev:%p, buf:%p, next:%p\n",dev,buf,next));
+ DEB_VBI("dev:%p, buf:%p, next:%p\n", dev, buf, next);
saa7146_set_vbi_capture(dev,buf,next);
mod_timer(&vv->vbi_q.timeout, jiffies+BUFFER_TIMEOUT);
@@ -228,10 +229,10 @@ static int buffer_prepare(struct videobuf_queue *q, struct videobuf_buffer *vb,e
llength = vbi_pixel_to_capture;
size = lines * llength;
- DEB_VBI(("vb:%p\n",vb));
+ DEB_VBI("vb:%p\n", vb);
if (0 != buf->vb.baddr && buf->vb.bsize < size) {
- DEB_VBI(("size mismatch.\n"));
+ DEB_VBI("size mismatch\n");
return -EINVAL;
}
@@ -263,7 +264,7 @@ static int buffer_prepare(struct videobuf_queue *q, struct videobuf_buffer *vb,e
return 0;
oops:
- DEB_VBI(("error out.\n"));
+ DEB_VBI("error out\n");
saa7146_dma_free(dev,q,buf);
return err;
@@ -279,7 +280,7 @@ static int buffer_setup(struct videobuf_queue *q, unsigned int *count, unsigned
*size = lines * llength;
*count = 2;
- DEB_VBI(("count:%d, size:%d\n",*count,*size));
+ DEB_VBI("count:%d, size:%d\n", *count, *size);
return 0;
}
@@ -292,7 +293,7 @@ static void buffer_queue(struct videobuf_queue *q, struct videobuf_buffer *vb)
struct saa7146_vv *vv = dev->vv_data;
struct saa7146_buf *buf = (struct saa7146_buf *)vb;
- DEB_VBI(("vb:%p\n",vb));
+ DEB_VBI("vb:%p\n", vb);
saa7146_buffer_queue(dev,&vv->vbi_q,buf);
}
@@ -303,7 +304,7 @@ static void buffer_release(struct videobuf_queue *q, struct videobuf_buffer *vb)
struct saa7146_dev *dev = fh->dev;
struct saa7146_buf *buf = (struct saa7146_buf *)vb;
- DEB_VBI(("vb:%p\n",vb));
+ DEB_VBI("vb:%p\n", vb);
saa7146_dma_free(dev,q,buf);
}
@@ -321,7 +322,7 @@ static void vbi_stop(struct saa7146_fh *fh, struct file *file)
struct saa7146_dev *dev = fh->dev;
struct saa7146_vv *vv = dev->vv_data;
unsigned long flags;
- DEB_VBI(("dev:%p, fh:%p\n",dev, fh));
+ DEB_VBI("dev:%p, fh:%p\n", dev, fh);
spin_lock_irqsave(&dev->slock,flags);
@@ -354,14 +355,14 @@ static void vbi_read_timeout(unsigned long data)
struct saa7146_fh *fh = file->private_data;
struct saa7146_dev *dev = fh->dev;
- DEB_VBI(("dev:%p, fh:%p\n",dev, fh));
+ DEB_VBI("dev:%p, fh:%p\n", dev, fh);
vbi_stop(fh, file);
}
static void vbi_init(struct saa7146_dev *dev, struct saa7146_vv *vv)
{
- DEB_VBI(("dev:%p\n",dev));
+ DEB_VBI("dev:%p\n", dev);
INIT_LIST_HEAD(&vv->vbi_q.queue);
@@ -380,11 +381,11 @@ static int vbi_open(struct saa7146_dev *dev, struct file *file)
u32 arbtr_ctrl = saa7146_read(dev, PCI_BT_V1);
int ret = 0;
- DEB_VBI(("dev:%p, fh:%p\n",dev,fh));
+ DEB_VBI("dev:%p, fh:%p\n", dev, fh);
ret = saa7146_res_get(fh, RESOURCE_DMA3_BRS);
if (0 == ret) {
- DEB_S(("cannot get vbi RESOURCE_DMA3_BRS resource\n"));
+ DEB_S("cannot get vbi RESOURCE_DMA3_BRS resource\n");
return -EBUSY;
}
@@ -425,7 +426,7 @@ static int vbi_open(struct saa7146_dev *dev, struct file *file)
saa7146_write(dev, BRS_CTRL, 0x00000001);
if (0 != (ret = vbi_workaround(dev))) {
- DEB_VBI(("vbi workaround failed!\n"));
+ DEB_VBI("vbi workaround failed!\n");
/* return ret;*/
}
}
@@ -439,7 +440,7 @@ static void vbi_close(struct saa7146_dev *dev, struct file *file)
{
struct saa7146_fh *fh = file->private_data;
struct saa7146_vv *vv = dev->vv_data;
- DEB_VBI(("dev:%p, fh:%p\n",dev,fh));
+ DEB_VBI("dev:%p, fh:%p\n", dev, fh);
if( fh == vv->vbi_streaming ) {
vbi_stop(fh, file);
@@ -453,13 +454,13 @@ static void vbi_irq_done(struct saa7146_dev *dev, unsigned long status)
spin_lock(&dev->slock);
if (vv->vbi_q.curr) {
- DEB_VBI(("dev:%p, curr:%p\n",dev,vv->vbi_q.curr));
+ DEB_VBI("dev:%p, curr:%p\n", dev, vv->vbi_q.curr);
/* this must be += 2, one count for each field */
vv->vbi_fieldcount+=2;
vv->vbi_q.curr->vb.field_count = vv->vbi_fieldcount;
saa7146_buffer_finish(dev,&vv->vbi_q,VIDEOBUF_DONE);
} else {
- DEB_VBI(("dev:%p\n",dev));
+ DEB_VBI("dev:%p\n", dev);
}
saa7146_buffer_next(dev,&vv->vbi_q,1);
@@ -473,7 +474,7 @@ static ssize_t vbi_read(struct file *file, char __user *data, size_t count, loff
struct saa7146_vv *vv = dev->vv_data;
ssize_t ret = 0;
- DEB_VBI(("dev:%p, fh:%p\n",dev,fh));
+ DEB_VBI("dev:%p, fh:%p\n", dev, fh);
if( NULL == vv->vbi_streaming ) {
// fixme: check if dma3 is available
@@ -482,7 +483,8 @@ static ssize_t vbi_read(struct file *file, char __user *data, size_t count, loff
}
if( fh != vv->vbi_streaming ) {
- DEB_VBI(("open %p is already using vbi capture.",vv->vbi_streaming));
+ DEB_VBI("open %p is already using vbi capture\n",
+ vv->vbi_streaming);
return -EBUSY;
}
diff --git a/drivers/media/common/saa7146_video.c b/drivers/media/common/saa7146_video.c
index 9aafa4e969a..ce30533fd97 100644
--- a/drivers/media/common/saa7146_video.c
+++ b/drivers/media/common/saa7146_video.c
@@ -1,5 +1,8 @@
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <media/saa7146_vv.h>
#include <media/v4l2-chip-ident.h>
+#include <linux/module.h>
static int max_memory = 32;
@@ -94,7 +97,7 @@ struct saa7146_format* saa7146_format_by_fourcc(struct saa7146_dev *dev, int fou
}
}
- DEB_D(("unknown pixelformat:'%4.4s'\n",(char *)&fourcc));
+ DEB_D("unknown pixelformat:'%4.4s'\n", (char *)&fourcc);
return NULL;
}
@@ -107,32 +110,32 @@ int saa7146_start_preview(struct saa7146_fh *fh)
struct v4l2_format fmt;
int ret = 0, err = 0;
- DEB_EE(("dev:%p, fh:%p\n",dev,fh));
+ DEB_EE("dev:%p, fh:%p\n", dev, fh);
/* check if we have overlay informations */
if( NULL == fh->ov.fh ) {
- DEB_D(("no overlay data available. try S_FMT first.\n"));
+ DEB_D("no overlay data available. try S_FMT first.\n");
return -EAGAIN;
}
/* check if streaming capture is running */
if (IS_CAPTURE_ACTIVE(fh) != 0) {
- DEB_D(("streaming capture is active.\n"));
+ DEB_D("streaming capture is active\n");
return -EBUSY;
}
/* check if overlay is running */
if (IS_OVERLAY_ACTIVE(fh) != 0) {
if (vv->video_fh == fh) {
- DEB_D(("overlay is already active.\n"));
+ DEB_D("overlay is already active\n");
return 0;
}
- DEB_D(("overlay is already active in another open.\n"));
+ DEB_D("overlay is already active in another open\n");
return -EBUSY;
}
if (0 == saa7146_res_get(fh, RESOURCE_DMA1_HPS|RESOURCE_DMA2_CLP)) {
- DEB_D(("cannot get necessary overlay resources\n"));
+ DEB_D("cannot get necessary overlay resources\n");
return -EBUSY;
}
@@ -145,13 +148,13 @@ int saa7146_start_preview(struct saa7146_fh *fh)
fh->ov.win = fmt.fmt.win;
vv->ov_data = &fh->ov;
- DEB_D(("%dx%d+%d+%d %s field=%s\n",
- fh->ov.win.w.width,fh->ov.win.w.height,
- fh->ov.win.w.left,fh->ov.win.w.top,
- vv->ov_fmt->name,v4l2_field_names[fh->ov.win.field]));
+ DEB_D("%dx%d+%d+%d %s field=%s\n",
+ fh->ov.win.w.width, fh->ov.win.w.height,
+ fh->ov.win.w.left, fh->ov.win.w.top,
+ vv->ov_fmt->name, v4l2_field_names[fh->ov.win.field]);
if (0 != (ret = saa7146_enable_overlay(fh))) {
- DEB_D(("enabling overlay failed: %d\n",ret));
+ DEB_D("enabling overlay failed: %d\n", ret);
saa7146_res_free(vv->video_fh, RESOURCE_DMA1_HPS|RESOURCE_DMA2_CLP);
return ret;
}
@@ -168,22 +171,22 @@ int saa7146_stop_preview(struct saa7146_fh *fh)
struct saa7146_dev *dev = fh->dev;
struct saa7146_vv *vv = dev->vv_data;
- DEB_EE(("dev:%p, fh:%p\n",dev,fh));
+ DEB_EE("dev:%p, fh:%p\n", dev, fh);
/* check if streaming capture is running */
if (IS_CAPTURE_ACTIVE(fh) != 0) {
- DEB_D(("streaming capture is active.\n"));
+ DEB_D("streaming capture is active\n");
return -EBUSY;
}
/* check if overlay is running at all */
if ((vv->video_status & STATUS_OVERLAY) == 0) {
- DEB_D(("no active overlay.\n"));
+ DEB_D("no active overlay\n");
return 0;
}
if (vv->video_fh != fh) {
- DEB_D(("overlay is active, but in another open.\n"));
+ DEB_D("overlay is active, but in another open\n");
return -EBUSY;
}
@@ -268,7 +271,7 @@ static int saa7146_pgtable_build(struct saa7146_dev *dev, struct saa7146_buf *bu
int length = dma->sglen;
struct saa7146_format *sfmt = saa7146_format_by_fourcc(dev,buf->fmt->pixelformat);
- DEB_EE(("dev:%p, buf:%p, sg_len:%d\n",dev,buf,length));
+ DEB_EE("dev:%p, buf:%p, sg_len:%d\n", dev, buf, length);
if( 0 != IS_PLANAR(sfmt->trans)) {
struct saa7146_pgtable *pt1 = &buf->pt[0];
@@ -288,7 +291,8 @@ static int saa7146_pgtable_build(struct saa7146_dev *dev, struct saa7146_buf *bu
m3 = ((size+(size/2)+PAGE_SIZE)/PAGE_SIZE)-1;
o1 = size%PAGE_SIZE;
o2 = (size+(size/4))%PAGE_SIZE;
- DEB_CAP(("size:%d, m1:%d, m2:%d, m3:%d, o1:%d, o2:%d\n",size,m1,m2,m3,o1,o2));
+ DEB_CAP("size:%d, m1:%d, m2:%d, m3:%d, o1:%d, o2:%d\n",
+ size, m1, m2, m3, o1, o2);
break;
}
case 16: {
@@ -298,7 +302,8 @@ static int saa7146_pgtable_build(struct saa7146_dev *dev, struct saa7146_buf *bu
m3 = ((2*size+PAGE_SIZE)/PAGE_SIZE)-1;
o1 = size%PAGE_SIZE;
o2 = (size+(size/2))%PAGE_SIZE;
- DEB_CAP(("size:%d, m1:%d, m2:%d, m3:%d, o1:%d, o2:%d\n",size,m1,m2,m3,o1,o2));
+ DEB_CAP("size:%d, m1:%d, m2:%d, m3:%d, o1:%d, o2:%d\n",
+ size, m1, m2, m3, o1, o2);
break;
}
default: {
@@ -387,23 +392,23 @@ static int video_begin(struct saa7146_fh *fh)
unsigned int resource;
int ret = 0, err = 0;
- DEB_EE(("dev:%p, fh:%p\n",dev,fh));
+ DEB_EE("dev:%p, fh:%p\n", dev, fh);
if ((vv->video_status & STATUS_CAPTURE) != 0) {
if (vv->video_fh == fh) {
- DEB_S(("already capturing.\n"));
+ DEB_S("already capturing\n");
return 0;
}
- DEB_S(("already capturing in another open.\n"));
+ DEB_S("already capturing in another open\n");
return -EBUSY;
}
if ((vv->video_status & STATUS_OVERLAY) != 0) {
- DEB_S(("warning: suspending overlay video for streaming capture.\n"));
+ DEB_S("warning: suspending overlay video for streaming capture\n");
vv->ov_suspend = vv->video_fh;
err = saa7146_stop_preview(vv->video_fh); /* side effect: video_status is now 0, video_fh is NULL */
if (0 != err) {
- DEB_D(("suspending video failed. aborting\n"));
+ DEB_D("suspending video failed. aborting\n");
return err;
}
}
@@ -420,7 +425,7 @@ static int video_begin(struct saa7146_fh *fh)
ret = saa7146_res_get(fh, resource);
if (0 == ret) {
- DEB_S(("cannot get capture resource %d\n",resource));
+ DEB_S("cannot get capture resource %d\n", resource);
if (vv->ov_suspend != NULL) {
saa7146_start_preview(vv->ov_suspend);
vv->ov_suspend = NULL;
@@ -448,15 +453,15 @@ static int video_end(struct saa7146_fh *fh, struct file *file)
unsigned long flags;
unsigned int resource;
u32 dmas = 0;
- DEB_EE(("dev:%p, fh:%p\n",dev,fh));
+ DEB_EE("dev:%p, fh:%p\n", dev, fh);
if ((vv->video_status & STATUS_CAPTURE) != STATUS_CAPTURE) {
- DEB_S(("not capturing.\n"));
+ DEB_S("not capturing\n");
return 0;
}
if (vv->video_fh != fh) {
- DEB_S(("capturing, but in another open.\n"));
+ DEB_S("capturing, but in another open\n");
return -EBUSY;
}
@@ -530,7 +535,7 @@ static int vidioc_s_fbuf(struct file *file, void *fh, struct v4l2_framebuffer *f
struct saa7146_vv *vv = dev->vv_data;
struct saa7146_format *fmt;
- DEB_EE(("VIDIOC_S_FBUF\n"));
+ DEB_EE("VIDIOC_S_FBUF\n");
if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RAWIO))
return -EPERM;
@@ -542,13 +547,13 @@ static int vidioc_s_fbuf(struct file *file, void *fh, struct v4l2_framebuffer *f
/* planar formats are not allowed for overlay video, clipping and video dma would clash */
if (fmt->flags & FORMAT_IS_PLANAR)
- DEB_S(("planar pixelformat '%4.4s' not allowed for overlay\n",
- (char *)&fmt->pixelformat));
+ DEB_S("planar pixelformat '%4.4s' not allowed for overlay\n",
+ (char *)&fmt->pixelformat);
/* check if overlay is running */
if (IS_OVERLAY_ACTIVE(fh) != 0) {
if (vv->video_fh != fh) {
- DEB_D(("refusing to change framebuffer informations while overlay is active in another open.\n"));
+ DEB_D("refusing to change framebuffer informations while overlay is active in another open\n");
return -EBUSY;
}
}
@@ -559,7 +564,7 @@ static int vidioc_s_fbuf(struct file *file, void *fh, struct v4l2_framebuffer *f
if (vv->ov_fb.fmt.bytesperline < vv->ov_fb.fmt.width) {
vv->ov_fb.fmt.bytesperline = vv->ov_fb.fmt.width * fmt->depth / 8;
- DEB_D(("setting bytesperline to %d\n", vv->ov_fb.fmt.bytesperline));
+ DEB_D("setting bytesperline to %d\n", vv->ov_fb.fmt.bytesperline);
}
return 0;
}
@@ -588,7 +593,7 @@ static int vidioc_queryctrl(struct file *file, void *fh, struct v4l2_queryctrl *
if (ctrl == NULL)
return -EINVAL;
- DEB_EE(("VIDIOC_QUERYCTRL: id:%d\n", c->id));
+ DEB_EE("VIDIOC_QUERYCTRL: id:%d\n", c->id);
*c = *ctrl;
return 0;
}
@@ -607,25 +612,25 @@ static int vidioc_g_ctrl(struct file *file, void *fh, struct v4l2_control *c)
case V4L2_CID_BRIGHTNESS:
value = saa7146_read(dev, BCS_CTRL);
c->value = 0xff & (value >> 24);
- DEB_D(("V4L2_CID_BRIGHTNESS: %d\n", c->value));
+ DEB_D("V4L2_CID_BRIGHTNESS: %d\n", c->value);
break;
case V4L2_CID_CONTRAST:
value = saa7146_read(dev, BCS_CTRL);
c->value = 0x7f & (value >> 16);
- DEB_D(("V4L2_CID_CONTRAST: %d\n", c->value));
+ DEB_D("V4L2_CID_CONTRAST: %d\n", c->value);
break;
case V4L2_CID_SATURATION:
value = saa7146_read(dev, BCS_CTRL);
c->value = 0x7f & (value >> 0);
- DEB_D(("V4L2_CID_SATURATION: %d\n", c->value));
+ DEB_D("V4L2_CID_SATURATION: %d\n", c->value);
break;
case V4L2_CID_VFLIP:
c->value = vv->vflip;
- DEB_D(("V4L2_CID_VFLIP: %d\n", c->value));
+ DEB_D("V4L2_CID_VFLIP: %d\n", c->value);
break;
case V4L2_CID_HFLIP:
c->value = vv->hflip;
- DEB_D(("V4L2_CID_HFLIP: %d\n", c->value));
+ DEB_D("V4L2_CID_HFLIP: %d\n", c->value);
break;
default:
return -EINVAL;
@@ -641,7 +646,7 @@ static int vidioc_s_ctrl(struct file *file, void *fh, struct v4l2_control *c)
ctrl = ctrl_by_id(c->id);
if (NULL == ctrl) {
- DEB_D(("unknown control %d\n", c->id));
+ DEB_D("unknown control %d\n", c->id);
return -EINVAL;
}
@@ -686,14 +691,14 @@ static int vidioc_s_ctrl(struct file *file, void *fh, struct v4l2_control *c)
case V4L2_CID_HFLIP:
/* fixme: we can support changing VFLIP and HFLIP here... */
if (IS_CAPTURE_ACTIVE(fh) != 0) {
- DEB_D(("V4L2_CID_HFLIP while active capture.\n"));
+ DEB_D("V4L2_CID_HFLIP while active capture\n");
return -EBUSY;
}
vv->hflip = c->value;
break;
case V4L2_CID_VFLIP:
if (IS_CAPTURE_ACTIVE(fh) != 0) {
- DEB_D(("V4L2_CID_VFLIP while active capture.\n"));
+ DEB_D("V4L2_CID_VFLIP while active capture\n");
return -EBUSY;
}
vv->vflip = c->value;
@@ -748,7 +753,7 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *fh, struct v4l2_forma
int maxw, maxh;
int calc_bpl;
- DEB_EE(("V4L2_BUF_TYPE_VIDEO_CAPTURE: dev:%p, fh:%p\n", dev, fh));
+ DEB_EE("V4L2_BUF_TYPE_VIDEO_CAPTURE: dev:%p, fh:%p\n", dev, fh);
fmt = saa7146_format_by_fourcc(dev, f->fmt.pix.pixelformat);
if (NULL == fmt)
@@ -777,7 +782,7 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *fh, struct v4l2_forma
vv->last_field = V4L2_FIELD_INTERLACED;
break;
default:
- DEB_D(("no known field mode '%d'.\n", field));
+ DEB_D("no known field mode '%d'\n", field);
return -EINVAL;
}
@@ -796,8 +801,9 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *fh, struct v4l2_forma
f->fmt.pix.bytesperline = calc_bpl;
f->fmt.pix.sizeimage = f->fmt.pix.bytesperline * f->fmt.pix.height;
- DEB_D(("w:%d, h:%d, bytesperline:%d, sizeimage:%d\n", f->fmt.pix.width,
- f->fmt.pix.height, f->fmt.pix.bytesperline, f->fmt.pix.sizeimage));
+ DEB_D("w:%d, h:%d, bytesperline:%d, sizeimage:%d\n",
+ f->fmt.pix.width, f->fmt.pix.height,
+ f->fmt.pix.bytesperline, f->fmt.pix.sizeimage);
return 0;
}
@@ -811,22 +817,23 @@ static int vidioc_try_fmt_vid_overlay(struct file *file, void *fh, struct v4l2_f
enum v4l2_field field;
int maxw, maxh;
- DEB_EE(("dev:%p\n", dev));
+ DEB_EE("dev:%p\n", dev);
if (NULL == vv->ov_fb.base) {
- DEB_D(("no fb base set.\n"));
+ DEB_D("no fb base set\n");
return -EINVAL;
}
if (NULL == vv->ov_fmt) {
- DEB_D(("no fb fmt set.\n"));
+ DEB_D("no fb fmt set\n");
return -EINVAL;
}
if (win->w.width < 48 || win->w.height < 32) {
- DEB_D(("min width/height. (%d,%d)\n", win->w.width, win->w.height));
+ DEB_D("min width/height. (%d,%d)\n",
+ win->w.width, win->w.height);
return -EINVAL;
}
if (win->clipcount > 16) {
- DEB_D(("clipcount too big.\n"));
+ DEB_D("clipcount too big\n");
return -EINVAL;
}
@@ -848,7 +855,7 @@ static int vidioc_try_fmt_vid_overlay(struct file *file, void *fh, struct v4l2_f
case V4L2_FIELD_INTERLACED:
break;
default:
- DEB_D(("no known field mode '%d'.\n", field));
+ DEB_D("no known field mode '%d'\n", field);
return -EINVAL;
}
@@ -868,16 +875,17 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *__fh, struct v4l2_forma
struct saa7146_vv *vv = dev->vv_data;
int err;
- DEB_EE(("V4L2_BUF_TYPE_VIDEO_CAPTURE: dev:%p, fh:%p\n", dev, fh));
+ DEB_EE("V4L2_BUF_TYPE_VIDEO_CAPTURE: dev:%p, fh:%p\n", dev, fh);
if (IS_CAPTURE_ACTIVE(fh) != 0) {
- DEB_EE(("streaming capture is active\n"));
+ DEB_EE("streaming capture is active\n");
return -EBUSY;
}
err = vidioc_try_fmt_vid_cap(file, fh, f);
if (0 != err)
return err;
fh->video_fmt = f->fmt.pix;
- DEB_EE(("set to pixelformat '%4.4s'\n", (char *)&fh->video_fmt.pixelformat));
+ DEB_EE("set to pixelformat '%4.4s'\n",
+ (char *)&fh->video_fmt.pixelformat);
return 0;
}
@@ -888,7 +896,7 @@ static int vidioc_s_fmt_vid_overlay(struct file *file, void *__fh, struct v4l2_f
struct saa7146_vv *vv = dev->vv_data;
int err;
- DEB_EE(("V4L2_BUF_TYPE_VIDEO_OVERLAY: dev:%p, fh:%p\n", dev, fh));
+ DEB_EE("V4L2_BUF_TYPE_VIDEO_OVERLAY: dev:%p, fh:%p\n", dev, fh);
err = vidioc_try_fmt_vid_overlay(file, fh, f);
if (0 != err)
return err;
@@ -931,7 +939,7 @@ static int vidioc_g_std(struct file *file, void *fh, v4l2_std_id *norm)
if (e->index < 0 )
return -EINVAL;
if( e->index < dev->ext_vv_data->num_stds ) {
- DEB_EE(("VIDIOC_ENUMSTD: index:%d\n",e->index));
+ DEB_EE("VIDIOC_ENUMSTD: index:%d\n", e->index);
v4l2_video_std_construct(e, dev->ext_vv_data->stds[e->index].id, dev->ext_vv_data->stds[e->index].name);
return 0;
}
@@ -946,10 +954,10 @@ static int vidioc_s_std(struct file *file, void *fh, v4l2_std_id *id)
int found = 0;
int err, i;
- DEB_EE(("VIDIOC_S_STD\n"));
+ DEB_EE("VIDIOC_S_STD\n");
if ((vv->video_status & STATUS_CAPTURE) == STATUS_CAPTURE) {
- DEB_D(("cannot change video standard while streaming capture is active\n"));
+ DEB_D("cannot change video standard while streaming capture is active\n");
return -EBUSY;
}
@@ -957,7 +965,7 @@ static int vidioc_s_std(struct file *file, void *fh, v4l2_std_id *id)
vv->ov_suspend = vv->video_fh;
err = saa7146_stop_preview(vv->video_fh); /* side effect: video_status is now 0, video_fh is NULL */
if (0 != err) {
- DEB_D(("suspending video failed. aborting\n"));
+ DEB_D("suspending video failed. aborting\n");
return err;
}
}
@@ -978,11 +986,11 @@ static int vidioc_s_std(struct file *file, void *fh, v4l2_std_id *id)
}
if (!found) {
- DEB_EE(("VIDIOC_S_STD: standard not found.\n"));
+ DEB_EE("VIDIOC_S_STD: standard not found\n");
return -EINVAL;
}
- DEB_EE(("VIDIOC_S_STD: set to standard to '%s'\n", vv->standard->name));
+ DEB_EE("VIDIOC_S_STD: set to standard to '%s'\n", vv->standard->name);
return 0;
}
@@ -990,7 +998,7 @@ static int vidioc_overlay(struct file *file, void *fh, unsigned int on)
{
int err;
- DEB_D(("VIDIOC_OVERLAY on:%d\n", on));
+ DEB_D("VIDIOC_OVERLAY on:%d\n", on);
if (on)
err = saa7146_start_preview(fh);
else
@@ -1047,7 +1055,7 @@ static int vidioc_streamon(struct file *file, void *__fh, enum v4l2_buf_type typ
struct saa7146_fh *fh = __fh;
int err;
- DEB_D(("VIDIOC_STREAMON, type:%d\n", type));
+ DEB_D("VIDIOC_STREAMON, type:%d\n", type);
err = video_begin(fh);
if (err)
@@ -1066,18 +1074,18 @@ static int vidioc_streamoff(struct file *file, void *__fh, enum v4l2_buf_type ty
struct saa7146_vv *vv = dev->vv_data;
int err;
- DEB_D(("VIDIOC_STREAMOFF, type:%d\n", type));
+ DEB_D("VIDIOC_STREAMOFF, type:%d\n", type);
/* ugly: we need to copy some checks from video_end(),
because videobuf_streamoff() relies on the capture running.
check and fix this */
if ((vv->video_status & STATUS_CAPTURE) != STATUS_CAPTURE) {
- DEB_S(("not capturing.\n"));
+ DEB_S("not capturing\n");
return 0;
}
if (vv->video_fh != fh) {
- DEB_S(("capturing, but in another open.\n"));
+ DEB_S("capturing, but in another open\n");
return -EBUSY;
}
@@ -1087,7 +1095,7 @@ static int vidioc_streamoff(struct file *file, void *__fh, enum v4l2_buf_type ty
else if (type == V4L2_BUF_TYPE_VBI_CAPTURE)
err = videobuf_streamoff(&fh->vbi_q);
if (0 != err) {
- DEB_D(("warning: videobuf_streamoff() failed.\n"));
+ DEB_D("warning: videobuf_streamoff() failed\n");
video_end(fh, file);
} else {
err = video_end(fh, file);
@@ -1174,25 +1182,27 @@ static int buffer_prepare(struct videobuf_queue *q,
struct saa7146_buf *buf = (struct saa7146_buf *)vb;
int size,err = 0;
- DEB_CAP(("vbuf:%p\n",vb));
+ DEB_CAP("vbuf:%p\n", vb);
/* sanity checks */
if (fh->video_fmt.width < 48 ||
fh->video_fmt.height < 32 ||
fh->video_fmt.width > vv->standard->h_max_out ||
fh->video_fmt.height > vv->standard->v_max_out) {
- DEB_D(("w (%d) / h (%d) out of bounds.\n",fh->video_fmt.width,fh->video_fmt.height));
+ DEB_D("w (%d) / h (%d) out of bounds\n",
+ fh->video_fmt.width, fh->video_fmt.height);
return -EINVAL;
}
size = fh->video_fmt.sizeimage;
if (0 != buf->vb.baddr && buf->vb.bsize < size) {
- DEB_D(("size mismatch.\n"));
+ DEB_D("size mismatch\n");
return -EINVAL;
}
- DEB_CAP(("buffer_prepare [size=%dx%d,bytes=%d,fields=%s]\n",
- fh->video_fmt.width,fh->video_fmt.height,size,v4l2_field_names[fh->video_fmt.field]));
+ DEB_CAP("buffer_prepare [size=%dx%d,bytes=%d,fields=%s]\n",
+ fh->video_fmt.width, fh->video_fmt.height,
+ size, v4l2_field_names[fh->video_fmt.field]);
if (buf->vb.width != fh->video_fmt.width ||
buf->vb.bytesperline != fh->video_fmt.bytesperline ||
buf->vb.height != fh->video_fmt.height ||
@@ -1238,7 +1248,7 @@ static int buffer_prepare(struct videobuf_queue *q,
return 0;
oops:
- DEB_D(("error out.\n"));
+ DEB_D("error out\n");
saa7146_dma_free(dev,q,buf);
return err;
@@ -1259,7 +1269,7 @@ static int buffer_setup(struct videobuf_queue *q, unsigned int *count, unsigned
*count = (max_memory*1048576) / *size;
}
- DEB_CAP(("%d buffers, %d bytes each.\n",*count,*size));
+ DEB_CAP("%d buffers, %d bytes each\n", *count, *size);
return 0;
}
@@ -1272,7 +1282,7 @@ static void buffer_queue(struct videobuf_queue *q, struct videobuf_buffer *vb)
struct saa7146_vv *vv = dev->vv_data;
struct saa7146_buf *buf = (struct saa7146_buf *)vb;
- DEB_CAP(("vbuf:%p\n",vb));
+ DEB_CAP("vbuf:%p\n", vb);
saa7146_buffer_queue(fh->dev,&vv->video_q,buf);
}
@@ -1283,7 +1293,7 @@ static void buffer_release(struct videobuf_queue *q, struct videobuf_buffer *vb)
struct saa7146_dev *dev = fh->dev;
struct saa7146_buf *buf = (struct saa7146_buf *)vb;
- DEB_CAP(("vbuf:%p\n",vb));
+ DEB_CAP("vbuf:%p\n", vb);
saa7146_dma_free(dev,q,buf);
@@ -1347,18 +1357,14 @@ static void video_close(struct saa7146_dev *dev, struct file *file)
struct saa7146_fh *fh = file->private_data;
struct saa7146_vv *vv = dev->vv_data;
struct videobuf_queue *q = &fh->video_q;
- int err;
- if (IS_CAPTURE_ACTIVE(fh) != 0) {
- err = video_end(fh, file);
- } else if (IS_OVERLAY_ACTIVE(fh) != 0) {
- err = saa7146_stop_preview(fh);
- }
+ if (IS_CAPTURE_ACTIVE(fh) != 0)
+ video_end(fh, file);
+ else if (IS_OVERLAY_ACTIVE(fh) != 0)
+ saa7146_stop_preview(fh);
videobuf_stop(q);
-
/* hmm, why is this function declared void? */
- /* return err */
}
@@ -1368,7 +1374,7 @@ static void video_irq_done(struct saa7146_dev *dev, unsigned long st)
struct saa7146_dmaqueue *q = &vv->video_q;
spin_lock(&dev->slock);
- DEB_CAP(("called.\n"));
+ DEB_CAP("called\n");
/* only finish the buffer if we have one... */
if( NULL != q->curr ) {
@@ -1386,15 +1392,15 @@ static ssize_t video_read(struct file *file, char __user *data, size_t count, lo
struct saa7146_vv *vv = dev->vv_data;
ssize_t ret = 0;
- DEB_EE(("called.\n"));
+ DEB_EE("called\n");
if ((vv->video_status & STATUS_CAPTURE) != 0) {
/* fixme: should we allow read() captures while streaming capture? */
if (vv->video_fh == fh) {
- DEB_S(("already capturing.\n"));
+ DEB_S("already capturing\n");
return -EBUSY;
}
- DEB_S(("already capturing in another open.\n"));
+ DEB_S("already capturing in another open\n");
return -EBUSY;
}
diff --git a/drivers/media/common/tuners/Makefile b/drivers/media/common/tuners/Makefile
index 20d24fca2cf..196c12a55f9 100644
--- a/drivers/media/common/tuners/Makefile
+++ b/drivers/media/common/tuners/Makefile
@@ -28,5 +28,5 @@ obj-$(CONFIG_MEDIA_TUNER_MAX2165) += max2165.o
obj-$(CONFIG_MEDIA_TUNER_TDA18218) += tda18218.o
obj-$(CONFIG_MEDIA_TUNER_TDA18212) += tda18212.o
-EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core
-EXTRA_CFLAGS += -Idrivers/media/dvb/frontends
+ccflags-y += -Idrivers/media/dvb/dvb-core
+ccflags-y += -Idrivers/media/dvb/frontends
diff --git a/drivers/media/common/tuners/mt20xx.c b/drivers/media/common/tuners/mt20xx.c
index d0e70e10a71..0e74e97e0d1 100644
--- a/drivers/media/common/tuners/mt20xx.c
+++ b/drivers/media/common/tuners/mt20xx.c
@@ -430,11 +430,10 @@ static void mt2050_set_antenna(struct dvb_frontend *fe, unsigned char antenna)
{
struct microtune_priv *priv = fe->tuner_priv;
unsigned char buf[2];
- int ret;
buf[0] = 6;
buf[1] = antenna ? 0x11 : 0x10;
- ret=tuner_i2c_xfer_send(&priv->i2c_props,buf,2);
+ tuner_i2c_xfer_send(&priv->i2c_props, buf, 2);
tuner_dbg("mt2050: enabled antenna connector %d\n", antenna);
}
@@ -574,21 +573,20 @@ static int mt2050_init(struct dvb_frontend *fe)
{
struct microtune_priv *priv = fe->tuner_priv;
unsigned char buf[2];
- int ret;
- buf[0]=6;
- buf[1]=0x10;
- ret=tuner_i2c_xfer_send(&priv->i2c_props,buf,2); // power
+ buf[0] = 6;
+ buf[1] = 0x10;
+ tuner_i2c_xfer_send(&priv->i2c_props, buf, 2); /* power */
- buf[0]=0x0f;
- buf[1]=0x0f;
- ret=tuner_i2c_xfer_send(&priv->i2c_props,buf,2); // m1lo
+ buf[0] = 0x0f;
+ buf[1] = 0x0f;
+ tuner_i2c_xfer_send(&priv->i2c_props, buf, 2); /* m1lo */
- buf[0]=0x0d;
- ret=tuner_i2c_xfer_send(&priv->i2c_props,buf,1);
- tuner_i2c_xfer_recv(&priv->i2c_props,buf,1);
+ buf[0] = 0x0d;
+ tuner_i2c_xfer_send(&priv->i2c_props, buf, 1);
+ tuner_i2c_xfer_recv(&priv->i2c_props, buf, 1);
- tuner_dbg("mt2050: sro is %x\n",buf[0]);
+ tuner_dbg("mt2050: sro is %x\n", buf[0]);
memcpy(&fe->ops.tuner_ops, &mt2050_tuner_ops, sizeof(struct dvb_tuner_ops));
diff --git a/drivers/media/common/tuners/mxl5005s.c b/drivers/media/common/tuners/mxl5005s.c
index 56fe75c94de..54be9e6faaa 100644
--- a/drivers/media/common/tuners/mxl5005s.c
+++ b/drivers/media/common/tuners/mxl5005s.c
@@ -309,7 +309,6 @@ static u16 MXL_ControlWrite_Group(struct dvb_frontend *fe, u16 controlNum,
static u16 MXL_SetGPIO(struct dvb_frontend *fe, u8 GPIO_Num, u8 GPIO_Val);
static u16 MXL_GetInitRegister(struct dvb_frontend *fe, u8 *RegNum,
u8 *RegVal, int *count);
-static u32 MXL_GetXtalInt(u32 Xtal_Freq);
static u16 MXL_TuneRF(struct dvb_frontend *fe, u32 RF_Freq);
static void MXL_SynthIFLO_Calc(struct dvb_frontend *fe);
static void MXL_SynthRFTGLO_Calc(struct dvb_frontend *fe);
@@ -2307,14 +2306,6 @@ static u16 MXL_IFSynthInit(struct dvb_frontend *fe)
return status ;
}
-static u32 MXL_GetXtalInt(u32 Xtal_Freq)
-{
- if ((Xtal_Freq % 1000000) == 0)
- return (Xtal_Freq / 10000);
- else
- return (((Xtal_Freq / 1000000) + 1)*100);
-}
-
static u16 MXL_TuneRF(struct dvb_frontend *fe, u32 RF_Freq)
{
struct mxl5005s_state *state = fe->tuner_priv;
@@ -2324,13 +2315,10 @@ static u16 MXL_TuneRF(struct dvb_frontend *fe, u32 RF_Freq)
u32 Kdbl_RF = 2;
u32 tg_divval;
u32 tg_lo;
- u32 Xtal_Int;
u32 Fref_TG;
u32 Fvco;
- Xtal_Int = MXL_GetXtalInt(state->Fxtal);
-
state->RF_IN = RF_Freq;
MXL_SynthRFTGLO_Calc(fe);
@@ -2779,6 +2767,16 @@ static u16 MXL_TuneRF(struct dvb_frontend *fe, u32 RF_Freq)
tg_lo = (((Fmax/10 - Fvco)/100)*32) / ((Fmax-Fmin)/1000)+8;
/* below equation is same as above but much harder to debug.
+ *
+ * static u32 MXL_GetXtalInt(u32 Xtal_Freq)
+ * {
+ * if ((Xtal_Freq % 1000000) == 0)
+ * return (Xtal_Freq / 10000);
+ * else
+ * return (((Xtal_Freq / 1000000) + 1)*100);
+ * }
+ *
+ * u32 Xtal_Int = MXL_GetXtalInt(state->Fxtal);
* tg_lo = ( ((Fmax/10000 * Xtal_Int)/100) -
* ((state->TG_LO/10000)*divider_val *
* (state->Fxtal/10000)/100) )*32/((Fmax-Fmin)/10000 *
diff --git a/drivers/media/common/tuners/tda18212.c b/drivers/media/common/tuners/tda18212.c
index 1f1db20d46b..e29cc2bc113 100644
--- a/drivers/media/common/tuners/tda18212.c
+++ b/drivers/media/common/tuners/tda18212.c
@@ -18,7 +18,20 @@
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
-#include "tda18212_priv.h"
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "tda18212.h"
+
+struct tda18212_priv {
+ struct tda18212_config *cfg;
+ struct i2c_adapter *i2c;
+};
+
+#define dbg(fmt, arg...) \
+do { \
+ if (debug) \
+ pr_info("%s: " fmt, __func__, ##arg); \
+} while (0)
static int debug;
module_param(debug, int, 0644);
@@ -46,7 +59,8 @@ static int tda18212_wr_regs(struct tda18212_priv *priv, u8 reg, u8 *val,
if (ret == 1) {
ret = 0;
} else {
- warn("i2c wr failed ret:%d reg:%02x len:%d", ret, reg, len);
+ pr_warn("i2c wr failed ret:%d reg:%02x len:%d\n",
+ ret, reg, len);
ret = -EREMOTEIO;
}
return ret;
@@ -77,7 +91,8 @@ static int tda18212_rd_regs(struct tda18212_priv *priv, u8 reg, u8 *val,
memcpy(val, buf, len);
ret = 0;
} else {
- warn("i2c rd failed ret:%d reg:%02x len:%d", ret, reg, len);
+ pr_warn("i2c rd failed ret:%d reg:%02x len:%d\n",
+ ret, reg, len);
ret = -EREMOTEIO;
}
@@ -129,8 +144,8 @@ static int tda18212_set_params(struct dvb_frontend *fe,
{ 0x92, 0x53, 0x03 }, /* DVB-C */
};
- dbg("%s: delsys=%d RF=%d BW=%d", __func__,
- c->delivery_system, c->frequency, c->bandwidth_hz);
+ dbg("delsys=%d RF=%d BW=%d\n",
+ c->delivery_system, c->frequency, c->bandwidth_hz);
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1); /* open I2C-gate */
@@ -196,7 +211,7 @@ exit:
return ret;
error:
- dbg("%s: failed:%d", __func__, ret);
+ dbg("failed:%d\n", ret);
goto exit;
}
@@ -245,13 +260,13 @@ struct dvb_frontend *tda18212_attach(struct dvb_frontend *fe,
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 0); /* close I2C-gate */
- dbg("%s: ret:%d chip ID:%02x", __func__, ret, val);
+ dbg("ret:%d chip ID:%02x\n", ret, val);
if (ret || val != 0xc7) {
kfree(priv);
return NULL;
}
- info("NXP TDA18212HN successfully identified.");
+ pr_info("NXP TDA18212HN successfully identified\n");
memcpy(&fe->ops.tuner_ops, &tda18212_tuner_ops,
sizeof(struct dvb_tuner_ops));
diff --git a/drivers/media/common/tuners/tda18271-common.c b/drivers/media/common/tuners/tda18271-common.c
index aae40e52af5..39c645787b6 100644
--- a/drivers/media/common/tuners/tda18271-common.c
+++ b/drivers/media/common/tuners/tda18271-common.c
@@ -676,10 +676,28 @@ fail:
return ret;
}
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
+int _tda_printk(struct tda18271_priv *state, const char *level,
+ const char *func, const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+ int rtn;
+
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ if (state)
+ rtn = printk("%s%s: [%d-%04x|%c] %pV",
+ level, func, i2c_adapter_id(state->i2c_props.adap),
+ state->i2c_props.addr,
+ (state->role == TDA18271_MASTER) ? 'M' : 'S',
+ &vaf);
+ else
+ rtn = printk("%s%s: %pV", level, func, &vaf);
+
+ va_end(args);
+
+ return rtn;
+}
diff --git a/drivers/media/common/tuners/tda18271-fe.c b/drivers/media/common/tuners/tda18271-fe.c
index 57022e88e33..63cc4004e21 100644
--- a/drivers/media/common/tuners/tda18271-fe.c
+++ b/drivers/media/common/tuners/tda18271-fe.c
@@ -1230,7 +1230,7 @@ static int tda18271_set_config(struct dvb_frontend *fe, void *priv_cfg)
return 0;
}
-static struct dvb_tuner_ops tda18271_tuner_ops = {
+static const struct dvb_tuner_ops tda18271_tuner_ops = {
.info = {
.name = "NXP TDA18271HD",
.frequency_min = 45000000,
diff --git a/drivers/media/common/tuners/tda18271-priv.h b/drivers/media/common/tuners/tda18271-priv.h
index 9589ab0576d..94340f47562 100644
--- a/drivers/media/common/tuners/tda18271-priv.h
+++ b/drivers/media/common/tuners/tda18271-priv.h
@@ -136,29 +136,26 @@ extern int tda18271_debug;
#define DBG_ADV 8
#define DBG_CAL 16
-#define tda_printk(st, kern, fmt, arg...) do {\
- if (st) { \
- struct tda18271_priv *state = st; \
- printk(kern "%s: [%d-%04x|%s] " fmt, __func__, \
- i2c_adapter_id(state->i2c_props.adap), \
- state->i2c_props.addr, \
- (state->role == TDA18271_MASTER) \
- ? "M" : "S", ##arg); \
- } else \
- printk(kern "%s: " fmt, __func__, ##arg); \
+__attribute__((format(printf, 4, 5)))
+int _tda_printk(struct tda18271_priv *state, const char *level,
+ const char *func, const char *fmt, ...);
+
+#define tda_printk(st, lvl, fmt, arg...) \
+ _tda_printk(st, lvl, __func__, fmt, ##arg)
+
+#define tda_dprintk(st, lvl, fmt, arg...) \
+do { \
+ if (tda18271_debug & lvl) \
+ tda_printk(st, KERN_DEBUG, fmt, ##arg); \
} while (0)
-#define tda_dprintk(st, lvl, fmt, arg...) do {\
- if (tda18271_debug & lvl) \
- tda_printk(st, KERN_DEBUG, fmt, ##arg); } while (0)
-
-#define tda_info(fmt, arg...) printk(KERN_INFO fmt, ##arg)
-#define tda_warn(fmt, arg...) tda_printk(priv, KERN_WARNING, fmt, ##arg)
-#define tda_err(fmt, arg...) tda_printk(priv, KERN_ERR, fmt, ##arg)
-#define tda_dbg(fmt, arg...) tda_dprintk(priv, DBG_INFO, fmt, ##arg)
-#define tda_map(fmt, arg...) tda_dprintk(priv, DBG_MAP, fmt, ##arg)
-#define tda_reg(fmt, arg...) tda_dprintk(priv, DBG_REG, fmt, ##arg)
-#define tda_cal(fmt, arg...) tda_dprintk(priv, DBG_CAL, fmt, ##arg)
+#define tda_info(fmt, arg...) pr_info(fmt, ##arg)
+#define tda_warn(fmt, arg...) tda_printk(priv, KERN_WARNING, fmt, ##arg)
+#define tda_err(fmt, arg...) tda_printk(priv, KERN_ERR, fmt, ##arg)
+#define tda_dbg(fmt, arg...) tda_dprintk(priv, DBG_INFO, fmt, ##arg)
+#define tda_map(fmt, arg...) tda_dprintk(priv, DBG_MAP, fmt, ##arg)
+#define tda_reg(fmt, arg...) tda_dprintk(priv, DBG_REG, fmt, ##arg)
+#define tda_cal(fmt, arg...) tda_dprintk(priv, DBG_CAL, fmt, ##arg)
#define tda_fail(ret) \
({ \
diff --git a/drivers/media/common/tuners/tda827x.c b/drivers/media/common/tuners/tda827x.c
index b21b6ea68b2..e0d5b43772b 100644
--- a/drivers/media/common/tuners/tda827x.c
+++ b/drivers/media/common/tuners/tda827x.c
@@ -176,7 +176,7 @@ static int tda827xo_set_params(struct dvb_frontend *fe,
if_freq = 5000000;
break;
}
- tuner_freq = params->frequency + if_freq;
+ tuner_freq = params->frequency;
i = 0;
while (tda827x_table[i].lomax < tuner_freq) {
@@ -185,6 +185,8 @@ static int tda827xo_set_params(struct dvb_frontend *fe,
i++;
}
+ tuner_freq += if_freq;
+
N = ((tuner_freq + 125000) / 250000) << (tda827x_table[i].spd + 2);
buf[0] = 0;
buf[1] = (N>>8) | 0x40;
@@ -540,7 +542,7 @@ static int tda827xa_set_params(struct dvb_frontend *fe,
if_freq = 5000000;
break;
}
- tuner_freq = params->frequency + if_freq;
+ tuner_freq = params->frequency;
if (fe->ops.info.type == FE_QAM) {
dprintk("%s select tda827xa_dvbc\n", __func__);
@@ -554,6 +556,8 @@ static int tda827xa_set_params(struct dvb_frontend *fe,
i++;
}
+ tuner_freq += if_freq;
+
N = ((tuner_freq + 31250) / 62500) << frequency_map[i].spd;
buf[0] = 0; // subaddress
buf[1] = N >> 8;
diff --git a/drivers/media/common/tuners/tuner-types.c b/drivers/media/common/tuners/tuner-types.c
index 94a603a6084..e13683bab6b 100644
--- a/drivers/media/common/tuners/tuner-types.c
+++ b/drivers/media/common/tuners/tuner-types.c
@@ -5,6 +5,7 @@
*/
#include <linux/i2c.h>
+#include <linux/module.h>
#include <media/tuner.h>
#include <media/tuner-types.h>
diff --git a/drivers/media/common/tuners/tuner-xc2028.c b/drivers/media/common/tuners/tuner-xc2028.c
index 16fba6b5961..3acbaa04e1b 100644
--- a/drivers/media/common/tuners/tuner-xc2028.c
+++ b/drivers/media/common/tuners/tuner-xc2028.c
@@ -614,6 +614,13 @@ static int load_firmware(struct dvb_frontend *fe, unsigned int type,
p += len;
size -= len;
}
+
+ /* silently fail if the frontend doesn't support I2C flush */
+ rc = do_tuner_callback(fe, XC2028_I2C_FLUSH, 0);
+ if ((rc < 0) && (rc != -EINVAL)) {
+ tuner_err("error executing flush: %d\n", rc);
+ return rc;
+ }
}
return 0;
}
@@ -933,11 +940,16 @@ static int generic_set_freq(struct dvb_frontend *fe, u32 freq /* in HZ */,
* that xc2028 will be in a safe state.
* Maybe this might also be needed for DTV.
*/
- if (new_type == V4L2_TUNER_ANALOG_TV) {
+ switch (new_type) {
+ case V4L2_TUNER_ANALOG_TV:
rc = send_seq(priv, {0x00, 0x00});
- /* Analog modes require offset = 0 */
- } else {
+ /* Analog mode requires offset = 0 */
+ break;
+ case V4L2_TUNER_RADIO:
+ /* Radio mode requires offset = 0 */
+ break;
+ case V4L2_TUNER_DIGITAL_TV:
/*
* Digital modes require an offset to adjust to the
* proper frequency. The offset depends on what
diff --git a/drivers/media/common/tuners/tuner-xc2028.h b/drivers/media/common/tuners/tuner-xc2028.h
index 9778c96a500..9ebfb2d0ff1 100644
--- a/drivers/media/common/tuners/tuner-xc2028.h
+++ b/drivers/media/common/tuners/tuner-xc2028.h
@@ -54,6 +54,7 @@ struct xc2028_config {
/* xc2028 commands for callback */
#define XC2028_TUNER_RESET 0
#define XC2028_RESET_CLK 1
+#define XC2028_I2C_FLUSH 2
#if defined(CONFIG_MEDIA_TUNER_XC2028) || (defined(CONFIG_MEDIA_TUNER_XC2028_MODULE) && defined(MODULE))
extern struct dvb_frontend *xc2028_attach(struct dvb_frontend *fe,
diff --git a/drivers/media/dvb/b2c2/Makefile b/drivers/media/dvb/b2c2/Makefile
index b97cf7208a1..3d04a8dba99 100644
--- a/drivers/media/dvb/b2c2/Makefile
+++ b/drivers/media/dvb/b2c2/Makefile
@@ -12,5 +12,5 @@ obj-$(CONFIG_DVB_B2C2_FLEXCOP_PCI) += b2c2-flexcop-pci.o
b2c2-flexcop-usb-objs = flexcop-usb.o
obj-$(CONFIG_DVB_B2C2_FLEXCOP_USB) += b2c2-flexcop-usb.o
-EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core/ -Idrivers/media/dvb/frontends/
-EXTRA_CFLAGS += -Idrivers/media/common/tuners/
+ccflags-y += -Idrivers/media/dvb/dvb-core/ -Idrivers/media/dvb/frontends/
+ccflags-y += -Idrivers/media/common/tuners/
diff --git a/drivers/media/dvb/bt8xx/Makefile b/drivers/media/dvb/bt8xx/Makefile
index d98f1d49ffa..0713b3af205 100644
--- a/drivers/media/dvb/bt8xx/Makefile
+++ b/drivers/media/dvb/bt8xx/Makefile
@@ -1,6 +1,6 @@
obj-$(CONFIG_DVB_BT8XX) += bt878.o dvb-bt8xx.o dst.o dst_ca.o
-EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core
-EXTRA_CFLAGS += -Idrivers/media/dvb/frontends
-EXTRA_CFLAGS += -Idrivers/media/video/bt8xx
-EXTRA_CFLAGS += -Idrivers/media/common/tuners
+ccflags-y += -Idrivers/media/dvb/dvb-core
+ccflags-y += -Idrivers/media/dvb/frontends
+ccflags-y += -Idrivers/media/video/bt8xx
+ccflags-y += -Idrivers/media/common/tuners
diff --git a/drivers/media/dvb/ddbridge/Makefile b/drivers/media/dvb/ddbridge/Makefile
index de4fe193c3e..38019bafb86 100644
--- a/drivers/media/dvb/ddbridge/Makefile
+++ b/drivers/media/dvb/ddbridge/Makefile
@@ -6,9 +6,9 @@ ddbridge-objs := ddbridge-core.o
obj-$(CONFIG_DVB_DDBRIDGE) += ddbridge.o
-EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core/
-EXTRA_CFLAGS += -Idrivers/media/dvb/frontends/
-EXTRA_CFLAGS += -Idrivers/media/common/tuners/
+ccflags-y += -Idrivers/media/dvb/dvb-core/
+ccflags-y += -Idrivers/media/dvb/frontends/
+ccflags-y += -Idrivers/media/common/tuners/
# For the staging CI driver cxd2099
-EXTRA_CFLAGS += -Idrivers/staging/cxd2099/
+ccflags-y += -Idrivers/staging/media/cxd2099/
diff --git a/drivers/media/dvb/ddbridge/ddbridge-core.c b/drivers/media/dvb/ddbridge/ddbridge-core.c
index 573d540f213..ba9a643b9c6 100644
--- a/drivers/media/dvb/ddbridge/ddbridge-core.c
+++ b/drivers/media/dvb/ddbridge/ddbridge-core.c
@@ -507,15 +507,14 @@ static u32 ddb_input_avail(struct ddb_input *input)
return 0;
}
-static size_t ddb_input_read(struct ddb_input *input, u8 *buf, size_t count)
+static ssize_t ddb_input_read(struct ddb_input *input, u8 *buf, size_t count)
{
struct ddb *dev = input->port->dev;
u32 left = count;
- u32 idx, off, free, stat = input->stat;
+ u32 idx, free, stat = input->stat;
int ret;
idx = (stat >> 11) & 0x1f;
- off = (stat & 0x7ff) << 7;
while (left) {
if (input->cbuf == idx)
@@ -525,6 +524,8 @@ static size_t ddb_input_read(struct ddb_input *input, u8 *buf, size_t count)
free = left;
ret = copy_to_user(buf, input->vbuf[input->cbuf] +
input->coff, free);
+ if (ret)
+ return -EFAULT;
input->coff += free;
if (input->coff == input->dma_buf_size) {
input->coff = 0;
@@ -939,6 +940,8 @@ static ssize_t ts_read(struct file *file, char *buf,
break;
}
read = ddb_input_read(input, buf, left);
+ if (read < 0)
+ return read;
left -= read;
buf += read;
}
@@ -1438,7 +1441,7 @@ static long ddb_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct ddb *dev = file->private_data;
void *parg = (void *)arg;
- int res = -EFAULT;
+ int res;
switch (cmd) {
case IOCTL_DDB_FLASHIO:
@@ -1447,29 +1450,29 @@ static long ddb_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
u8 *rbuf, *wbuf;
if (copy_from_user(&fio, parg, sizeof(fio)))
- break;
- if (fio.write_len + fio.read_len > 1028) {
- printk(KERN_ERR "IOBUF too small\n");
- return -ENOMEM;
- }
+ return -EFAULT;
+
+ if (fio.write_len > 1028 || fio.read_len > 1028)
+ return -EINVAL;
+ if (fio.write_len + fio.read_len > 1028)
+ return -EINVAL;
+
wbuf = &dev->iobuf[0];
- if (!wbuf)
- return -ENOMEM;
rbuf = wbuf + fio.write_len;
- if (copy_from_user(wbuf, fio.write_buf, fio.write_len)) {
- vfree(wbuf);
- break;
- }
- res = flashio(dev, wbuf, fio.write_len,
- rbuf, fio.read_len);
+
+ if (copy_from_user(wbuf, fio.write_buf, fio.write_len))
+ return -EFAULT;
+ res = flashio(dev, wbuf, fio.write_len, rbuf, fio.read_len);
+ if (res)
+ return res;
if (copy_to_user(fio.read_buf, rbuf, fio.read_len))
- res = -EFAULT;
+ return -EFAULT;
break;
}
default:
- break;
+ return -ENOTTY;
}
- return res;
+ return 0;
}
static const struct file_operations ddb_fops = {
diff --git a/drivers/media/dvb/dm1105/Makefile b/drivers/media/dvb/dm1105/Makefile
index 8ac28b0546a..95a008b71fe 100644
--- a/drivers/media/dvb/dm1105/Makefile
+++ b/drivers/media/dvb/dm1105/Makefile
@@ -1,3 +1,3 @@
obj-$(CONFIG_DVB_DM1105) += dm1105.o
-EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core/ -Idrivers/media/dvb/frontends
+ccflags-y += -Idrivers/media/dvb/dvb-core/ -Idrivers/media/dvb/frontends
diff --git a/drivers/media/dvb/dvb-core/dvb_frontend.c b/drivers/media/dvb/dvb-core/dvb_frontend.c
index efe9c30605e..2c0acdb4d81 100644
--- a/drivers/media/dvb/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb/dvb-core/dvb_frontend.c
@@ -149,30 +149,25 @@ static void dvb_frontend_add_event(struct dvb_frontend *fe, fe_status_t status)
dprintk ("%s\n", __func__);
- if (mutex_lock_interruptible (&events->mtx))
- return;
+ if ((status & FE_HAS_LOCK) && fe->ops.get_frontend)
+ fe->ops.get_frontend(fe, &fepriv->parameters_out);
- wp = (events->eventw + 1) % MAX_EVENT;
+ mutex_lock(&events->mtx);
+ wp = (events->eventw + 1) % MAX_EVENT;
if (wp == events->eventr) {
events->overflow = 1;
events->eventr = (events->eventr + 1) % MAX_EVENT;
}
e = &events->events[events->eventw];
-
- if (status & FE_HAS_LOCK)
- if (fe->ops.get_frontend)
- fe->ops.get_frontend(fe, &fepriv->parameters_out);
-
+ e->status = status;
e->parameters = fepriv->parameters_out;
events->eventw = wp;
mutex_unlock(&events->mtx);
- e->status = status;
-
wake_up_interruptible (&events->wait_queue);
}
@@ -207,19 +202,24 @@ static int dvb_frontend_get_event(struct dvb_frontend *fe,
return ret;
}
- if (mutex_lock_interruptible (&events->mtx))
- return -ERESTARTSYS;
-
- memcpy (event, &events->events[events->eventr],
- sizeof(struct dvb_frontend_event));
-
+ mutex_lock(&events->mtx);
+ *event = events->events[events->eventr];
events->eventr = (events->eventr + 1) % MAX_EVENT;
-
mutex_unlock(&events->mtx);
return 0;
}
+static void dvb_frontend_clear_events(struct dvb_frontend *fe)
+{
+ struct dvb_frontend_private *fepriv = fe->frontend_priv;
+ struct dvb_fe_events *events = &fepriv->events;
+
+ mutex_lock(&events->mtx);
+ events->eventr = events->eventw;
+ mutex_unlock(&events->mtx);
+}
+
static void dvb_frontend_init(struct dvb_frontend *fe)
{
dprintk ("DVB: initialising adapter %i frontend %i (%s)...\n",
@@ -537,7 +537,6 @@ static int dvb_frontend_thread(void *data)
{
struct dvb_frontend *fe = data;
struct dvb_frontend_private *fepriv = fe->frontend_priv;
- unsigned long timeout;
fe_status_t s;
enum dvbfe_algo algo;
@@ -558,7 +557,7 @@ static int dvb_frontend_thread(void *data)
while (1) {
up(&fepriv->sem); /* is locked when we enter the thread... */
restart:
- timeout = wait_event_interruptible_timeout(fepriv->wait_queue,
+ wait_event_interruptible_timeout(fepriv->wait_queue,
dvb_frontend_should_wakeup(fe) || kthread_should_stop()
|| freezing(current),
fepriv->delay);
@@ -577,12 +576,10 @@ restart:
if (fepriv->reinitialise) {
dvb_frontend_init(fe);
- if (fepriv->tone != -1) {
+ if (fe->ops.set_tone && fepriv->tone != -1)
fe->ops.set_tone(fe, fepriv->tone);
- }
- if (fepriv->voltage != -1) {
+ if (fe->ops.set_voltage && fepriv->voltage != -1)
fe->ops.set_voltage(fe, fepriv->voltage);
- }
fepriv->reinitialise = 0;
}
@@ -1019,6 +1016,29 @@ static int is_legacy_delivery_system(fe_delivery_system_t s)
return 0;
}
+/* Initialize the cache with some default values derived from the
+ * legacy frontend_info structure.
+ */
+static void dtv_property_cache_init(struct dvb_frontend *fe,
+ struct dtv_frontend_properties *c)
+{
+ switch (fe->ops.info.type) {
+ case FE_QPSK:
+ c->modulation = QPSK; /* implied for DVB-S in legacy API */
+ c->rolloff = ROLLOFF_35;/* implied for DVB-S */
+ c->delivery_system = SYS_DVBS;
+ break;
+ case FE_QAM:
+ c->delivery_system = SYS_DVBC_ANNEX_AC;
+ break;
+ case FE_OFDM:
+ c->delivery_system = SYS_DVBT;
+ break;
+ case FE_ATSC:
+ break;
+ }
+}
+
/* Synchronise the legacy tuning parameters into the cache, so that demodulator
* drivers can use a single set_frontend tuning function, regardless of whether
* it's being used for the legacy or new API, reducing code and complexity.
@@ -1032,17 +1052,13 @@ static void dtv_property_cache_sync(struct dvb_frontend *fe,
switch (fe->ops.info.type) {
case FE_QPSK:
- c->modulation = QPSK; /* implied for DVB-S in legacy API */
- c->rolloff = ROLLOFF_35;/* implied for DVB-S */
c->symbol_rate = p->u.qpsk.symbol_rate;
c->fec_inner = p->u.qpsk.fec_inner;
- c->delivery_system = SYS_DVBS;
break;
case FE_QAM:
c->symbol_rate = p->u.qam.symbol_rate;
c->fec_inner = p->u.qam.fec_inner;
c->modulation = p->u.qam.modulation;
- c->delivery_system = SYS_DVBC_ANNEX_AC;
break;
case FE_OFDM:
if (p->u.ofdm.bandwidth == BANDWIDTH_6_MHZ)
@@ -1060,7 +1076,6 @@ static void dtv_property_cache_sync(struct dvb_frontend *fe,
c->transmission_mode = p->u.ofdm.transmission_mode;
c->guard_interval = p->u.ofdm.guard_interval;
c->hierarchy = p->u.ofdm.hierarchy_information;
- c->delivery_system = SYS_DVBT;
break;
case FE_ATSC:
c->modulation = p->u.vsb.modulation;
@@ -1132,16 +1147,13 @@ static void dtv_property_adv_params_sync(struct dvb_frontend *fe)
p->frequency = c->frequency;
p->inversion = c->inversion;
- switch(c->modulation) {
- case PSK_8:
- case APSK_16:
- case APSK_32:
- case QPSK:
+ if (c->delivery_system == SYS_DSS ||
+ c->delivery_system == SYS_DVBS ||
+ c->delivery_system == SYS_DVBS2 ||
+ c->delivery_system == SYS_ISDBS ||
+ c->delivery_system == SYS_TURBO) {
p->u.qpsk.symbol_rate = c->symbol_rate;
p->u.qpsk.fec_inner = c->fec_inner;
- break;
- default:
- break;
}
/* Fake out a generic DVB-T request so we pass validation in the ioctl */
@@ -1824,9 +1836,17 @@ static int dvb_frontend_ioctl_legacy(struct file *file,
memcpy (&fepriv->parameters_in, parg,
sizeof (struct dvb_frontend_parameters));
+ dtv_property_cache_init(fe, c);
dtv_property_cache_sync(fe, c, &fepriv->parameters_in);
}
+ /*
+ * Initialize output parameters to match the values given by
+ * the user. FE_SET_FRONTEND triggers an initial frontend event
+ * with status = 0, which copies output parameters to userspace.
+ */
+ fepriv->parameters_out = fepriv->parameters_in;
+
memset(&fetunesettings, 0, sizeof(struct dvb_frontend_tune_settings));
memcpy(&fetunesettings.parameters, parg,
sizeof (struct dvb_frontend_parameters));
@@ -1884,8 +1904,9 @@ static int dvb_frontend_ioctl_legacy(struct file *file,
/* Request the search algorithm to search */
fepriv->algo_status |= DVBFE_ALGO_SEARCH_AGAIN;
- dvb_frontend_wakeup(fe);
+ dvb_frontend_clear_events(fe);
dvb_frontend_add_event(fe, 0);
+ dvb_frontend_wakeup(fe);
fepriv->status = 0;
err = 0;
break;
diff --git a/drivers/media/dvb/dvb-core/dvb_frontend.h b/drivers/media/dvb/dvb-core/dvb_frontend.h
index 5590eb6eb40..67bbfa72801 100644
--- a/drivers/media/dvb/dvb-core/dvb_frontend.h
+++ b/drivers/media/dvb/dvb-core/dvb_frontend.h
@@ -209,6 +209,7 @@ struct dvb_tuner_ops {
int (*get_frequency)(struct dvb_frontend *fe, u32 *frequency);
int (*get_bandwidth)(struct dvb_frontend *fe, u32 *bandwidth);
+ int (*get_if_frequency)(struct dvb_frontend *fe, u32 *frequency);
#define TUNER_STATUS_LOCKED 1
#define TUNER_STATUS_STEREO 2
diff --git a/drivers/media/dvb/dvb-usb/Kconfig b/drivers/media/dvb/dvb-usb/Kconfig
index 5d73dec8ac0..58257165761 100644
--- a/drivers/media/dvb/dvb-usb/Kconfig
+++ b/drivers/media/dvb/dvb-usb/Kconfig
@@ -258,6 +258,19 @@ config DVB_USB_AF9005_REMOTE
Say Y here to support the default remote control decoding for the
Afatech AF9005 based receiver.
+config DVB_USB_PCTV452E
+ tristate "Pinnacle PCTV HDTV Pro USB device/TT Connect S2-3600"
+ depends on DVB_USB
+ select TTPCI_EEPROM
+ select DVB_LNBP22 if !DVB_FE_CUSTOMISE
+ select DVB_STB0899 if !DVB_FE_CUSTOMISE
+ select DVB_STB6100 if !DVB_FE_CUSTOMISE
+ help
+ Support for external USB adapter designed by Pinnacle,
+ shipped under the brand name 'PCTV HDTV Pro USB'.
+ Also supports TT Connect S2-3600/3650 cards.
+ Say Y if you own such a device and want to use it.
+
config DVB_USB_DW2102
tristate "DvbWorld & TeVii DVB-S/S2 USB2.0 support"
depends on DVB_USB
@@ -374,3 +387,18 @@ config DVB_USB_TECHNISAT_USB2
select DVB_STV6110x if !DVB_FE_CUSTOMISE
help
Say Y here to support the Technisat USB2 DVB-S/S2 device
+
+config DVB_USB_IT913X
+ tristate "it913x driver"
+ depends on DVB_USB
+ select DVB_IT913X_FE
+ help
+ Say Y here to support the it913x device
+
+config DVB_USB_MXL111SF
+ tristate "MxL111SF DTV USB2.0 support"
+ depends on DVB_USB
+ select DVB_LGDT3305 if !DVB_FE_CUSTOMISE
+ select VIDEO_TVEEPROM
+ help
+ Say Y here to support the MxL111SF USB2.0 DTV receiver.
diff --git a/drivers/media/dvb/dvb-usb/Makefile b/drivers/media/dvb/dvb-usb/Makefile
index 4bac13da0c3..26c8b9e5705 100644
--- a/drivers/media/dvb/dvb-usb/Makefile
+++ b/drivers/media/dvb/dvb-usb/Makefile
@@ -64,6 +64,9 @@ obj-$(CONFIG_DVB_USB_AF9005_REMOTE) += dvb-usb-af9005-remote.o
dvb-usb-anysee-objs = anysee.o
obj-$(CONFIG_DVB_USB_ANYSEE) += dvb-usb-anysee.o
+dvb-usb-pctv452e-objs = pctv452e.o
+obj-$(CONFIG_DVB_USB_PCTV452E) += dvb-usb-pctv452e.o
+
dvb-usb-dw2102-objs = dw2102.o
obj-$(CONFIG_DVB_USB_DW2102) += dvb-usb-dw2102.o
@@ -94,7 +97,16 @@ obj-$(CONFIG_DVB_USB_LME2510) += dvb-usb-lmedm04.o
dvb-usb-technisat-usb2-objs = technisat-usb2.o
obj-$(CONFIG_DVB_USB_TECHNISAT_USB2) += dvb-usb-technisat-usb2.o
-EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core/ -Idrivers/media/dvb/frontends/
+dvb-usb-it913x-objs := it913x.o
+obj-$(CONFIG_DVB_USB_IT913X) += dvb-usb-it913x.o
+
+dvb-usb-mxl111sf-objs = mxl111sf.o mxl111sf-phy.o mxl111sf-i2c.o mxl111sf-gpio.o
+obj-$(CONFIG_DVB_USB_MXL111SF) += dvb-usb-mxl111sf.o
+obj-$(CONFIG_DVB_USB_MXL111SF) += mxl111sf-demod.o
+obj-$(CONFIG_DVB_USB_MXL111SF) += mxl111sf-tuner.o
+
+ccflags-y += -Idrivers/media/dvb/dvb-core/ -Idrivers/media/dvb/frontends/
# due to tuner-xc3028
-EXTRA_CFLAGS += -Idrivers/media/common/tuners
+ccflags-y += -Idrivers/media/common/tuners
+EXTRA_CFLAGS += -Idrivers/media/dvb/ttpci
diff --git a/drivers/media/dvb/dvb-usb/a800.c b/drivers/media/dvb/dvb-usb/a800.c
index b95a95e1784..2aef3c89e9f 100644
--- a/drivers/media/dvb/dvb-usb/a800.c
+++ b/drivers/media/dvb/dvb-usb/a800.c
@@ -127,6 +127,8 @@ static struct dvb_usb_device_properties a800_properties = {
.num_adapters = 1,
.adapter = {
{
+ .num_frontends = 1,
+ .fe = {{
.caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
.pid_filter_count = 32,
.streaming_ctrl = dibusb2_0_streaming_ctrl,
@@ -147,7 +149,7 @@ static struct dvb_usb_device_properties a800_properties = {
}
}
},
-
+ }},
.size_of_priv = sizeof(struct dibusb_state),
},
},
diff --git a/drivers/media/dvb/dvb-usb/af9005-fe.c b/drivers/media/dvb/dvb-usb/af9005-fe.c
index 6ad94745bbd..3263e9749d0 100644
--- a/drivers/media/dvb/dvb-usb/af9005-fe.c
+++ b/drivers/media/dvb/dvb-usb/af9005-fe.c
@@ -63,11 +63,9 @@ static int af9005_write_word_agc(struct dvb_usb_device *d, u16 reghi,
u16 reglo, u8 pos, u8 len, u16 value)
{
int ret;
- u8 temp;
if ((ret = af9005_write_ofdm_register(d, reglo, (u8) (value & 0xff))))
return ret;
- temp = (u8) ((value & 0x0300) >> 8);
return af9005_write_register_bits(d, reghi, pos, len,
(u8) ((value & 0x300) >> 8));
}
diff --git a/drivers/media/dvb/dvb-usb/af9005.c b/drivers/media/dvb/dvb-usb/af9005.c
index 0351c0e52dd..bd51a764351 100644
--- a/drivers/media/dvb/dvb-usb/af9005.c
+++ b/drivers/media/dvb/dvb-usb/af9005.c
@@ -815,7 +815,7 @@ static int af9005_frontend_attach(struct dvb_usb_adapter *adap)
debug_dump(buf, 8, printk);
}
}
- adap->fe = af9005_fe_attach(adap->dev);
+ adap->fe_adap[0].fe = af9005_fe_attach(adap->dev);
return 0;
}
@@ -999,6 +999,8 @@ static struct dvb_usb_device_properties af9005_properties = {
.num_adapters = 1,
.adapter = {
{
+ .num_frontends = 1,
+ .fe = {{
.caps =
DVB_USB_ADAP_HAS_PID_FILTER |
DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
@@ -1018,6 +1020,7 @@ static struct dvb_usb_device_properties af9005_properties = {
}
}
},
+ }},
}
},
.power_ctrl = af9005_power_ctrl,
diff --git a/drivers/media/dvb/dvb-usb/af9015.c b/drivers/media/dvb/dvb-usb/af9015.c
index d7ad05fc383..c6c275bac08 100644
--- a/drivers/media/dvb/dvb-usb/af9015.c
+++ b/drivers/media/dvb/dvb-usb/af9015.c
@@ -744,29 +744,31 @@ static const struct af9015_rc_setup af9015_rc_setup_hashes[] = {
};
static const struct af9015_rc_setup af9015_rc_setup_usbids[] = {
- { (USB_VID_TERRATEC << 16) + USB_PID_TERRATEC_CINERGY_T_STICK_RC,
+ { (USB_VID_TERRATEC << 16) | USB_PID_TERRATEC_CINERGY_T_STICK_RC,
RC_MAP_TERRATEC_SLIM_2 },
- { (USB_VID_TERRATEC << 16) + USB_PID_TERRATEC_CINERGY_T_STICK_DUAL_RC,
+ { (USB_VID_TERRATEC << 16) | USB_PID_TERRATEC_CINERGY_T_STICK_DUAL_RC,
RC_MAP_TERRATEC_SLIM },
- { (USB_VID_VISIONPLUS << 16) + USB_PID_AZUREWAVE_AD_TU700,
+ { (USB_VID_VISIONPLUS << 16) | USB_PID_AZUREWAVE_AD_TU700,
RC_MAP_AZUREWAVE_AD_TU700 },
- { (USB_VID_VISIONPLUS << 16) + USB_PID_TINYTWIN,
+ { (USB_VID_VISIONPLUS << 16) | USB_PID_TINYTWIN,
RC_MAP_AZUREWAVE_AD_TU700 },
- { (USB_VID_MSI_2 << 16) + USB_PID_MSI_DIGI_VOX_MINI_III,
+ { (USB_VID_MSI_2 << 16) | USB_PID_MSI_DIGI_VOX_MINI_III,
RC_MAP_MSI_DIGIVOX_III },
- { (USB_VID_MSI_2 << 16) + USB_PID_MSI_DIGIVOX_DUO,
+ { (USB_VID_MSI_2 << 16) | USB_PID_MSI_DIGIVOX_DUO,
RC_MAP_MSI_DIGIVOX_III },
- { (USB_VID_LEADTEK << 16) + USB_PID_WINFAST_DTV_DONGLE_GOLD,
+ { (USB_VID_LEADTEK << 16) | USB_PID_WINFAST_DTV_DONGLE_GOLD,
RC_MAP_LEADTEK_Y04G0051 },
- { (USB_VID_AVERMEDIA << 16) + USB_PID_AVERMEDIA_VOLAR_X,
+ { (USB_VID_LEADTEK << 16) | USB_PID_WINFAST_DTV2000DS,
+ RC_MAP_LEADTEK_Y04G0051 },
+ { (USB_VID_AVERMEDIA << 16) | USB_PID_AVERMEDIA_VOLAR_X,
RC_MAP_AVERMEDIA_M135A },
- { (USB_VID_AFATECH << 16) + USB_PID_TREKSTOR_DVBT,
+ { (USB_VID_AFATECH << 16) | USB_PID_TREKSTOR_DVBT,
RC_MAP_TREKSTOR },
- { (USB_VID_KWORLD_2 << 16) + USB_PID_TINYTWIN_2,
+ { (USB_VID_KWORLD_2 << 16) | USB_PID_TINYTWIN_2,
RC_MAP_DIGITALNOW_TINYTWIN },
- { (USB_VID_GTEK << 16) + USB_PID_TINYTWIN_3,
+ { (USB_VID_GTEK << 16) | USB_PID_TINYTWIN_3,
RC_MAP_DIGITALNOW_TINYTWIN },
- { (USB_VID_KWORLD_2 << 16) + USB_PID_SVEON_STV22,
+ { (USB_VID_KWORLD_2 << 16) | USB_PID_SVEON_STV22,
RC_MAP_MSI_DIGIVOX_III },
{ }
};
@@ -859,13 +861,13 @@ static int af9015_read_config(struct usb_device *udev)
for (i = 0; i < af9015_properties_count; i++) {
/* USB1.1 set smaller buffersize and disable 2nd adapter */
if (udev->speed == USB_SPEED_FULL) {
- af9015_properties[i].adapter[0].stream.u.bulk.buffersize
+ af9015_properties[i].adapter[0].fe[0].stream.u.bulk.buffersize
= TS_USB11_FRAME_SIZE;
/* disable 2nd adapter because we don't have
PID-filters */
af9015_config.dual_mode = 0;
} else {
- af9015_properties[i].adapter[0].stream.u.bulk.buffersize
+ af9015_properties[i].adapter[0].fe[0].stream.u.bulk.buffersize
= TS_USB20_FRAME_SIZE;
}
}
@@ -1111,10 +1113,10 @@ static int af9015_af9013_frontend_attach(struct dvb_usb_adapter *adap)
}
/* attach demodulator */
- adap->fe = dvb_attach(af9013_attach, &af9015_af9013_config[adap->id],
+ adap->fe_adap[0].fe = dvb_attach(af9013_attach, &af9015_af9013_config[adap->id],
&adap->dev->i2c_adap);
- return adap->fe == NULL ? -ENODEV : 0;
+ return adap->fe_adap[0].fe == NULL ? -ENODEV : 0;
}
static struct mt2060_config af9015_mt2060_config = {
@@ -1188,49 +1190,49 @@ static int af9015_tuner_attach(struct dvb_usb_adapter *adap)
switch (af9015_af9013_config[adap->id].tuner) {
case AF9013_TUNER_MT2060:
case AF9013_TUNER_MT2060_2:
- ret = dvb_attach(mt2060_attach, adap->fe, &adap->dev->i2c_adap,
+ ret = dvb_attach(mt2060_attach, adap->fe_adap[0].fe, &adap->dev->i2c_adap,
&af9015_mt2060_config,
af9015_config.mt2060_if1[adap->id])
== NULL ? -ENODEV : 0;
break;
case AF9013_TUNER_QT1010:
case AF9013_TUNER_QT1010A:
- ret = dvb_attach(qt1010_attach, adap->fe, &adap->dev->i2c_adap,
+ ret = dvb_attach(qt1010_attach, adap->fe_adap[0].fe, &adap->dev->i2c_adap,
&af9015_qt1010_config) == NULL ? -ENODEV : 0;
break;
case AF9013_TUNER_TDA18271:
- ret = dvb_attach(tda18271_attach, adap->fe, 0xc0,
+ ret = dvb_attach(tda18271_attach, adap->fe_adap[0].fe, 0xc0,
&adap->dev->i2c_adap,
&af9015_tda18271_config) == NULL ? -ENODEV : 0;
break;
case AF9013_TUNER_TDA18218:
- ret = dvb_attach(tda18218_attach, adap->fe,
+ ret = dvb_attach(tda18218_attach, adap->fe_adap[0].fe,
&adap->dev->i2c_adap,
&af9015_tda18218_config) == NULL ? -ENODEV : 0;
break;
case AF9013_TUNER_MXL5003D:
- ret = dvb_attach(mxl5005s_attach, adap->fe,
+ ret = dvb_attach(mxl5005s_attach, adap->fe_adap[0].fe,
&adap->dev->i2c_adap,
&af9015_mxl5003_config) == NULL ? -ENODEV : 0;
break;
case AF9013_TUNER_MXL5005D:
case AF9013_TUNER_MXL5005R:
- ret = dvb_attach(mxl5005s_attach, adap->fe,
+ ret = dvb_attach(mxl5005s_attach, adap->fe_adap[0].fe,
&adap->dev->i2c_adap,
&af9015_mxl5005_config) == NULL ? -ENODEV : 0;
break;
case AF9013_TUNER_ENV77H11D5:
- ret = dvb_attach(dvb_pll_attach, adap->fe, 0xc0,
+ ret = dvb_attach(dvb_pll_attach, adap->fe_adap[0].fe, 0xc0,
&adap->dev->i2c_adap,
DVB_PLL_TDA665X) == NULL ? -ENODEV : 0;
break;
case AF9013_TUNER_MC44S803:
- ret = dvb_attach(mc44s803_attach, adap->fe,
+ ret = dvb_attach(mc44s803_attach, adap->fe_adap[0].fe,
&adap->dev->i2c_adap,
&af9015_mc44s803_config) == NULL ? -ENODEV : 0;
break;
case AF9013_TUNER_MXL5007T:
- ret = dvb_attach(mxl5007t_attach, adap->fe,
+ ret = dvb_attach(mxl5007t_attach, adap->fe_adap[0].fe,
&adap->dev->i2c_adap,
0xc0, &af9015_mxl5007t_config) == NULL ? -ENODEV : 0;
break;
@@ -1304,6 +1306,8 @@ static struct dvb_usb_device_properties af9015_properties[] = {
.num_adapters = 2,
.adapter = {
{
+ .num_frontends = 1,
+ .fe = {{
.caps = DVB_USB_ADAP_HAS_PID_FILTER |
DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
@@ -1319,8 +1323,11 @@ static struct dvb_usb_device_properties af9015_properties[] = {
.count = 6,
.endpoint = 0x84,
},
+ }},
},
{
+ .num_frontends = 1,
+ .fe = {{
.frontend_attach =
af9015_af9013_frontend_attach,
.tuner_attach = af9015_tuner_attach,
@@ -1335,6 +1342,7 @@ static struct dvb_usb_device_properties af9015_properties[] = {
}
}
},
+ }},
}
},
@@ -1432,6 +1440,8 @@ static struct dvb_usb_device_properties af9015_properties[] = {
.num_adapters = 2,
.adapter = {
{
+ .num_frontends = 1,
+ .fe = {{
.caps = DVB_USB_ADAP_HAS_PID_FILTER |
DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
@@ -1447,8 +1457,11 @@ static struct dvb_usb_device_properties af9015_properties[] = {
.count = 6,
.endpoint = 0x84,
},
+ }},
},
{
+ .num_frontends = 1,
+ .fe = {{
.frontend_attach =
af9015_af9013_frontend_attach,
.tuner_attach = af9015_tuner_attach,
@@ -1463,6 +1476,7 @@ static struct dvb_usb_device_properties af9015_properties[] = {
}
}
},
+ }},
}
},
@@ -1549,6 +1563,8 @@ static struct dvb_usb_device_properties af9015_properties[] = {
.num_adapters = 2,
.adapter = {
{
+ .num_frontends = 1,
+ .fe = {{
.caps = DVB_USB_ADAP_HAS_PID_FILTER |
DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
@@ -1564,8 +1580,11 @@ static struct dvb_usb_device_properties af9015_properties[] = {
.count = 6,
.endpoint = 0x84,
},
+ }},
},
{
+ .num_frontends = 1,
+ .fe = {{
.frontend_attach =
af9015_af9013_frontend_attach,
.tuner_attach = af9015_tuner_attach,
@@ -1580,6 +1599,7 @@ static struct dvb_usb_device_properties af9015_properties[] = {
}
}
},
+ }},
}
},
diff --git a/drivers/media/dvb/dvb-usb/anysee.c b/drivers/media/dvb/dvb-usb/anysee.c
index 2cbf19a52e3..5f2278b73ee 100644
--- a/drivers/media/dvb/dvb-usb/anysee.c
+++ b/drivers/media/dvb/dvb-usb/anysee.c
@@ -446,6 +446,114 @@ static struct isl6423_config anysee_isl6423_config = {
* IOE[5] STV0903 1=enabled
*/
+static int anysee_frontend_ctrl(struct dvb_frontend *fe, int onoff)
+{
+ struct dvb_usb_adapter *adap = fe->dvb->priv;
+ struct anysee_state *state = adap->dev->priv;
+ int ret;
+
+ deb_info("%s: fe=%d onoff=%d\n", __func__, fe->id, onoff);
+
+ /* no frontend sleep control */
+ if (onoff == 0)
+ return 0;
+
+ switch (state->hw) {
+ case ANYSEE_HW_507FA: /* 15 */
+ /* E30 Combo Plus */
+ /* E30 C Plus */
+
+ if ((fe->id ^ dvb_usb_anysee_delsys) == 0) {
+ /* disable DVB-T demod on IOD[0] */
+ ret = anysee_wr_reg_mask(adap->dev, REG_IOD, (0 << 0),
+ 0x01);
+ if (ret)
+ goto error;
+
+ /* enable DVB-C demod on IOD[5] */
+ ret = anysee_wr_reg_mask(adap->dev, REG_IOD, (1 << 5),
+ 0x20);
+ if (ret)
+ goto error;
+
+ /* enable DVB-C tuner on IOE[0] */
+ ret = anysee_wr_reg_mask(adap->dev, REG_IOE, (1 << 0),
+ 0x01);
+ if (ret)
+ goto error;
+ } else {
+ /* disable DVB-C demod on IOD[5] */
+ ret = anysee_wr_reg_mask(adap->dev, REG_IOD, (0 << 5),
+ 0x20);
+ if (ret)
+ goto error;
+
+ /* enable DVB-T demod on IOD[0] */
+ ret = anysee_wr_reg_mask(adap->dev, REG_IOD, (1 << 0),
+ 0x01);
+ if (ret)
+ goto error;
+
+ /* enable DVB-T tuner on IOE[0] */
+ ret = anysee_wr_reg_mask(adap->dev, REG_IOE, (0 << 0),
+ 0x01);
+ if (ret)
+ goto error;
+ }
+
+ break;
+ case ANYSEE_HW_508TC: /* 18 */
+ case ANYSEE_HW_508PTC: /* 21 */
+ /* E7 TC */
+ /* E7 PTC */
+
+ if ((fe->id ^ dvb_usb_anysee_delsys) == 0) {
+ /* disable DVB-T demod on IOD[6] */
+ ret = anysee_wr_reg_mask(adap->dev, REG_IOD, (0 << 6),
+ 0x40);
+ if (ret)
+ goto error;
+
+ /* enable DVB-C demod on IOD[5] */
+ ret = anysee_wr_reg_mask(adap->dev, REG_IOD, (1 << 5),
+ 0x20);
+ if (ret)
+ goto error;
+
+ /* enable IF route on IOE[0] */
+ ret = anysee_wr_reg_mask(adap->dev, REG_IOE, (1 << 0),
+ 0x01);
+ if (ret)
+ goto error;
+ } else {
+ /* disable DVB-C demod on IOD[5] */
+ ret = anysee_wr_reg_mask(adap->dev, REG_IOD, (0 << 5),
+ 0x20);
+ if (ret)
+ goto error;
+
+ /* enable DVB-T demod on IOD[6] */
+ ret = anysee_wr_reg_mask(adap->dev, REG_IOD, (1 << 6),
+ 0x40);
+ if (ret)
+ goto error;
+
+ /* enable IF route on IOE[0] */
+ ret = anysee_wr_reg_mask(adap->dev, REG_IOE, (0 << 0),
+ 0x01);
+ if (ret)
+ goto error;
+ }
+
+ break;
+ default:
+ ret = 0;
+ }
+
+error:
+ return ret;
+}
+
static int anysee_frontend_attach(struct dvb_usb_adapter *adap)
{
int ret;
@@ -466,41 +574,54 @@ static int anysee_frontend_attach(struct dvb_usb_adapter *adap)
}
};
- /* Check which hardware we have.
- * We must do this call two times to get reliable values (hw bug).
- */
- ret = anysee_get_hw_info(adap->dev, hw_info);
- if (ret)
- goto error;
+ /* detect hardware only once */
+ if (adap->fe_adap[0].fe == NULL) {
+ /* Check which hardware we have.
+ * We must do this call two times to get reliable values (hw bug).
+ */
+ ret = anysee_get_hw_info(adap->dev, hw_info);
+ if (ret)
+ goto error;
- ret = anysee_get_hw_info(adap->dev, hw_info);
- if (ret)
- goto error;
+ ret = anysee_get_hw_info(adap->dev, hw_info);
+ if (ret)
+ goto error;
- /* Meaning of these info bytes are guessed. */
- info("firmware version:%d.%d hardware id:%d",
- hw_info[1], hw_info[2], hw_info[0]);
+ /* Meaning of these info bytes are guessed. */
+ info("firmware version:%d.%d hardware id:%d",
+ hw_info[1], hw_info[2], hw_info[0]);
- state->hw = hw_info[0];
+ state->hw = hw_info[0];
+ }
+
+ /* set current frondend ID for devices having two frondends */
+ if (adap->fe_adap[0].fe)
+ state->fe_id++;
switch (state->hw) {
case ANYSEE_HW_507T: /* 2 */
/* E30 */
+ if (state->fe_id)
+ break;
+
/* attach demod */
- adap->fe = dvb_attach(mt352_attach, &anysee_mt352_config,
+ adap->fe_adap[0].fe = dvb_attach(mt352_attach, &anysee_mt352_config,
&adap->dev->i2c_adap);
- if (adap->fe)
+ if (adap->fe_adap[0].fe)
break;
/* attach demod */
- adap->fe = dvb_attach(zl10353_attach, &anysee_zl10353_config,
+ adap->fe_adap[0].fe = dvb_attach(zl10353_attach, &anysee_zl10353_config,
&adap->dev->i2c_adap);
break;
case ANYSEE_HW_507CD: /* 6 */
/* E30 Plus */
+ if (state->fe_id)
+ break;
+
/* enable DVB-T demod on IOD[0] */
ret = anysee_wr_reg_mask(adap->dev, REG_IOD, (1 << 0), 0x01);
if (ret)
@@ -512,33 +633,39 @@ static int anysee_frontend_attach(struct dvb_usb_adapter *adap)
goto error;
/* attach demod */
- adap->fe = dvb_attach(zl10353_attach, &anysee_zl10353_config,
- &adap->dev->i2c_adap);
+ adap->fe_adap[0].fe = dvb_attach(zl10353_attach,
+ &anysee_zl10353_config, &adap->dev->i2c_adap);
break;
case ANYSEE_HW_507DC: /* 10 */
/* E30 C Plus */
+ if (state->fe_id)
+ break;
+
/* enable DVB-C demod on IOD[0] */
ret = anysee_wr_reg_mask(adap->dev, REG_IOD, (1 << 0), 0x01);
if (ret)
goto error;
/* attach demod */
- adap->fe = dvb_attach(tda10023_attach, &anysee_tda10023_config,
- &adap->dev->i2c_adap, 0x48);
+ adap->fe_adap[0].fe = dvb_attach(tda10023_attach,
+ &anysee_tda10023_config, &adap->dev->i2c_adap, 0x48);
break;
case ANYSEE_HW_507SI: /* 11 */
/* E30 S2 Plus */
+ if (state->fe_id)
+ break;
+
/* enable DVB-S/S2 demod on IOD[0] */
ret = anysee_wr_reg_mask(adap->dev, REG_IOD, (1 << 0), 0x01);
if (ret)
goto error;
/* attach demod */
- adap->fe = dvb_attach(cx24116_attach, &anysee_cx24116_config,
+ adap->fe_adap[0].fe = dvb_attach(cx24116_attach, &anysee_cx24116_config,
&adap->dev->i2c_adap);
break;
@@ -564,55 +691,59 @@ static int anysee_frontend_attach(struct dvb_usb_adapter *adap)
if (ret)
goto error;
- if (dvb_usb_anysee_delsys) {
- /* disable DVB-C demod on IOD[5] */
- ret = anysee_wr_reg_mask(adap->dev, REG_IOD, (0 << 5),
- 0x20);
+ if ((state->fe_id ^ dvb_usb_anysee_delsys) == 0) {
+ /* disable DVB-T demod on IOD[0] */
+ ret = anysee_wr_reg_mask(adap->dev, REG_IOD, (0 << 0),
+ 0x01);
if (ret)
goto error;
- /* enable DVB-T demod on IOD[0] */
- ret = anysee_wr_reg_mask(adap->dev, REG_IOD, (1 << 0),
- 0x01);
+ /* enable DVB-C demod on IOD[5] */
+ ret = anysee_wr_reg_mask(adap->dev, REG_IOD, (1 << 5),
+ 0x20);
if (ret)
goto error;
/* attach demod */
if (tmp == 0xc7) {
/* TDA18212 config */
- adap->fe = dvb_attach(zl10353_attach,
- &anysee_zl10353_tda18212_config2,
- &adap->dev->i2c_adap);
+ adap->fe_adap[state->fe_id].fe = dvb_attach(
+ tda10023_attach,
+ &anysee_tda10023_tda18212_config,
+ &adap->dev->i2c_adap, 0x48);
} else {
/* PLL config */
- adap->fe = dvb_attach(zl10353_attach,
- &anysee_zl10353_config,
- &adap->dev->i2c_adap);
+ adap->fe_adap[state->fe_id].fe = dvb_attach(
+ tda10023_attach,
+ &anysee_tda10023_config,
+ &adap->dev->i2c_adap, 0x48);
}
} else {
- /* disable DVB-T demod on IOD[0] */
- ret = anysee_wr_reg_mask(adap->dev, REG_IOD, (0 << 0),
- 0x01);
+ /* disable DVB-C demod on IOD[5] */
+ ret = anysee_wr_reg_mask(adap->dev, REG_IOD, (0 << 5),
+ 0x20);
if (ret)
goto error;
- /* enable DVB-C demod on IOD[5] */
- ret = anysee_wr_reg_mask(adap->dev, REG_IOD, (1 << 5),
- 0x20);
+ /* enable DVB-T demod on IOD[0] */
+ ret = anysee_wr_reg_mask(adap->dev, REG_IOD, (1 << 0),
+ 0x01);
if (ret)
goto error;
/* attach demod */
if (tmp == 0xc7) {
/* TDA18212 config */
- adap->fe = dvb_attach(tda10023_attach,
- &anysee_tda10023_tda18212_config,
- &adap->dev->i2c_adap, 0x48);
+ adap->fe_adap[state->fe_id].fe = dvb_attach(
+ zl10353_attach,
+ &anysee_zl10353_tda18212_config2,
+ &adap->dev->i2c_adap);
} else {
/* PLL config */
- adap->fe = dvb_attach(tda10023_attach,
- &anysee_tda10023_config,
- &adap->dev->i2c_adap, 0x48);
+ adap->fe_adap[state->fe_id].fe = dvb_attach(
+ zl10353_attach,
+ &anysee_zl10353_config,
+ &adap->dev->i2c_adap);
}
}
@@ -627,52 +758,40 @@ static int anysee_frontend_attach(struct dvb_usb_adapter *adap)
if (ret)
goto error;
- if (dvb_usb_anysee_delsys) {
- /* disable DVB-C demod on IOD[5] */
- ret = anysee_wr_reg_mask(adap->dev, REG_IOD, (0 << 5),
- 0x20);
- if (ret)
- goto error;
-
- /* enable DVB-T demod on IOD[6] */
- ret = anysee_wr_reg_mask(adap->dev, REG_IOD, (1 << 6),
+ if ((state->fe_id ^ dvb_usb_anysee_delsys) == 0) {
+ /* disable DVB-T demod on IOD[6] */
+ ret = anysee_wr_reg_mask(adap->dev, REG_IOD, (0 << 6),
0x40);
if (ret)
goto error;
- /* enable IF route on IOE[0] */
- ret = anysee_wr_reg_mask(adap->dev, REG_IOE, (0 << 0),
- 0x01);
+ /* enable DVB-C demod on IOD[5] */
+ ret = anysee_wr_reg_mask(adap->dev, REG_IOD, (1 << 5),
+ 0x20);
if (ret)
goto error;
/* attach demod */
- adap->fe = dvb_attach(zl10353_attach,
- &anysee_zl10353_tda18212_config,
- &adap->dev->i2c_adap);
+ adap->fe_adap[state->fe_id].fe = dvb_attach(tda10023_attach,
+ &anysee_tda10023_tda18212_config,
+ &adap->dev->i2c_adap, 0x48);
} else {
- /* disable DVB-T demod on IOD[6] */
- ret = anysee_wr_reg_mask(adap->dev, REG_IOD, (0 << 6),
- 0x40);
- if (ret)
- goto error;
-
- /* enable DVB-C demod on IOD[5] */
- ret = anysee_wr_reg_mask(adap->dev, REG_IOD, (1 << 5),
+ /* disable DVB-C demod on IOD[5] */
+ ret = anysee_wr_reg_mask(adap->dev, REG_IOD, (0 << 5),
0x20);
if (ret)
goto error;
- /* enable IF route on IOE[0] */
- ret = anysee_wr_reg_mask(adap->dev, REG_IOE, (1 << 0),
- 0x01);
+ /* enable DVB-T demod on IOD[6] */
+ ret = anysee_wr_reg_mask(adap->dev, REG_IOD, (1 << 6),
+ 0x40);
if (ret)
goto error;
/* attach demod */
- adap->fe = dvb_attach(tda10023_attach,
- &anysee_tda10023_tda18212_config,
- &adap->dev->i2c_adap, 0x48);
+ adap->fe_adap[state->fe_id].fe = dvb_attach(zl10353_attach,
+ &anysee_zl10353_tda18212_config,
+ &adap->dev->i2c_adap);
}
break;
@@ -681,6 +800,9 @@ static int anysee_frontend_attach(struct dvb_usb_adapter *adap)
/* E7 S2 */
/* E7 PS2 */
+ if (state->fe_id)
+ break;
+
/* enable transport stream on IOA[7] */
ret = anysee_wr_reg_mask(adap->dev, REG_IOA, (1 << 7), 0x80);
if (ret)
@@ -692,13 +814,13 @@ static int anysee_frontend_attach(struct dvb_usb_adapter *adap)
goto error;
/* attach demod */
- adap->fe = dvb_attach(stv0900_attach, &anysee_stv0900_config,
+ adap->fe_adap[0].fe = dvb_attach(stv0900_attach, &anysee_stv0900_config,
&adap->dev->i2c_adap, 0);
break;
}
- if (!adap->fe) {
+ if (!adap->fe_adap[0].fe) {
/* we have no frontend :-( */
ret = -ENODEV;
err("Unsupported Anysee version. " \
@@ -713,14 +835,14 @@ static int anysee_tuner_attach(struct dvb_usb_adapter *adap)
struct anysee_state *state = adap->dev->priv;
struct dvb_frontend *fe;
int ret;
- deb_info("%s:\n", __func__);
+ deb_info("%s: fe=%d\n", __func__, state->fe_id);
switch (state->hw) {
case ANYSEE_HW_507T: /* 2 */
/* E30 */
/* attach tuner */
- fe = dvb_attach(dvb_pll_attach, adap->fe, (0xc2 >> 1),
+ fe = dvb_attach(dvb_pll_attach, adap->fe_adap[0].fe, (0xc2 >> 1),
NULL, DVB_PLL_THOMSON_DTT7579);
break;
@@ -728,7 +850,7 @@ static int anysee_tuner_attach(struct dvb_usb_adapter *adap)
/* E30 Plus */
/* attach tuner */
- fe = dvb_attach(dvb_pll_attach, adap->fe, (0xc2 >> 1),
+ fe = dvb_attach(dvb_pll_attach, adap->fe_adap[0].fe, (0xc2 >> 1),
&adap->dev->i2c_adap, DVB_PLL_THOMSON_DTT7579);
break;
@@ -736,7 +858,7 @@ static int anysee_tuner_attach(struct dvb_usb_adapter *adap)
/* E30 C Plus */
/* attach tuner */
- fe = dvb_attach(dvb_pll_attach, adap->fe, (0xc0 >> 1),
+ fe = dvb_attach(dvb_pll_attach, adap->fe_adap[0].fe, (0xc0 >> 1),
&adap->dev->i2c_adap, DVB_PLL_SAMSUNG_DTOS403IH102A);
break;
@@ -744,28 +866,14 @@ static int anysee_tuner_attach(struct dvb_usb_adapter *adap)
/* E30 S2 Plus */
/* attach LNB controller */
- fe = dvb_attach(isl6423_attach, adap->fe, &adap->dev->i2c_adap,
- &anysee_isl6423_config);
+ fe = dvb_attach(isl6423_attach, adap->fe_adap[0].fe,
+ &adap->dev->i2c_adap, &anysee_isl6423_config);
break;
case ANYSEE_HW_507FA: /* 15 */
/* E30 Combo Plus */
/* E30 C Plus */
- if (dvb_usb_anysee_delsys) {
- /* enable DVB-T tuner on IOE[0] */
- ret = anysee_wr_reg_mask(adap->dev, REG_IOE, (0 << 0),
- 0x01);
- if (ret)
- goto error;
- } else {
- /* enable DVB-C tuner on IOE[0] */
- ret = anysee_wr_reg_mask(adap->dev, REG_IOE, (1 << 0),
- 0x01);
- if (ret)
- goto error;
- }
-
/* Try first attach TDA18212 silicon tuner on IOE[4], if that
* fails attach old simple PLL. */
@@ -775,8 +883,8 @@ static int anysee_tuner_attach(struct dvb_usb_adapter *adap)
goto error;
/* attach tuner */
- fe = dvb_attach(tda18212_attach, adap->fe, &adap->dev->i2c_adap,
- &anysee_tda18212_config);
+ fe = dvb_attach(tda18212_attach, adap->fe_adap[state->fe_id].fe,
+ &adap->dev->i2c_adap, &anysee_tda18212_config);
if (fe)
break;
@@ -786,8 +894,9 @@ static int anysee_tuner_attach(struct dvb_usb_adapter *adap)
goto error;
/* attach tuner */
- fe = dvb_attach(dvb_pll_attach, adap->fe, (0xc0 >> 1),
- &adap->dev->i2c_adap, DVB_PLL_SAMSUNG_DTOS403IH102A);
+ fe = dvb_attach(dvb_pll_attach, adap->fe_adap[state->fe_id].fe,
+ (0xc0 >> 1), &adap->dev->i2c_adap,
+ DVB_PLL_SAMSUNG_DTOS403IH102A);
break;
case ANYSEE_HW_508TC: /* 18 */
@@ -801,8 +910,8 @@ static int anysee_tuner_attach(struct dvb_usb_adapter *adap)
goto error;
/* attach tuner */
- fe = dvb_attach(tda18212_attach, adap->fe, &adap->dev->i2c_adap,
- &anysee_tda18212_config);
+ fe = dvb_attach(tda18212_attach, adap->fe_adap[state->fe_id].fe,
+ &adap->dev->i2c_adap, &anysee_tda18212_config);
break;
case ANYSEE_HW_508S2: /* 19 */
@@ -811,12 +920,12 @@ static int anysee_tuner_attach(struct dvb_usb_adapter *adap)
/* E7 PS2 */
/* attach tuner */
- fe = dvb_attach(stv6110_attach, adap->fe,
+ fe = dvb_attach(stv6110_attach, adap->fe_adap[0].fe,
&anysee_stv6110_config, &adap->dev->i2c_adap);
if (fe) {
/* attach LNB controller */
- fe = dvb_attach(isl6423_attach, adap->fe,
+ fe = dvb_attach(isl6423_attach, adap->fe_adap[0].fe,
&adap->dev->i2c_adap, &anysee_isl6423_config);
}
@@ -918,6 +1027,23 @@ static struct dvb_usb_device_properties anysee_properties = {
.num_adapters = 1,
.adapter = {
{
+ .num_frontends = 2,
+ .frontend_ctrl = anysee_frontend_ctrl,
+ .fe = {{
+ .streaming_ctrl = anysee_streaming_ctrl,
+ .frontend_attach = anysee_frontend_attach,
+ .tuner_attach = anysee_tuner_attach,
+ .stream = {
+ .type = USB_BULK,
+ .count = 8,
+ .endpoint = 0x82,
+ .u = {
+ .bulk = {
+ .buffersize = (16*512),
+ }
+ }
+ },
+ }, {
.streaming_ctrl = anysee_streaming_ctrl,
.frontend_attach = anysee_frontend_attach,
.tuner_attach = anysee_tuner_attach,
@@ -931,6 +1057,7 @@ static struct dvb_usb_device_properties anysee_properties = {
}
}
},
+ }},
}
},
diff --git a/drivers/media/dvb/dvb-usb/anysee.h b/drivers/media/dvb/dvb-usb/anysee.h
index ad6ccd1ea2d..57ee500b8c0 100644
--- a/drivers/media/dvb/dvb-usb/anysee.h
+++ b/drivers/media/dvb/dvb-usb/anysee.h
@@ -59,6 +59,7 @@ enum cmd {
struct anysee_state {
u8 hw; /* PCB ID */
u8 seq;
+ u8 fe_id:1; /* frondend ID */
};
#define ANYSEE_HW_507T 2 /* E30 */
diff --git a/drivers/media/dvb/dvb-usb/au6610.c b/drivers/media/dvb/dvb-usb/au6610.c
index 2351077ff2b..b77994967b9 100644
--- a/drivers/media/dvb/dvb-usb/au6610.c
+++ b/drivers/media/dvb/dvb-usb/au6610.c
@@ -140,9 +140,9 @@ static struct zl10353_config au6610_zl10353_config = {
static int au6610_zl10353_frontend_attach(struct dvb_usb_adapter *adap)
{
- adap->fe = dvb_attach(zl10353_attach, &au6610_zl10353_config,
+ adap->fe_adap[0].fe = dvb_attach(zl10353_attach, &au6610_zl10353_config,
&adap->dev->i2c_adap);
- if (adap->fe == NULL)
+ if (adap->fe_adap[0].fe == NULL)
return -ENODEV;
return 0;
@@ -155,7 +155,7 @@ static struct qt1010_config au6610_qt1010_config = {
static int au6610_qt1010_tuner_attach(struct dvb_usb_adapter *adap)
{
return dvb_attach(qt1010_attach,
- adap->fe, &adap->dev->i2c_adap,
+ adap->fe_adap[0].fe, &adap->dev->i2c_adap,
&au6610_qt1010_config) == NULL ? -ENODEV : 0;
}
@@ -204,6 +204,8 @@ static struct dvb_usb_device_properties au6610_properties = {
.num_adapters = 1,
.adapter = {
{
+ .num_frontends = 1,
+ .fe = {{
.frontend_attach = au6610_zl10353_frontend_attach,
.tuner_attach = au6610_qt1010_tuner_attach,
@@ -219,6 +221,7 @@ static struct dvb_usb_device_properties au6610_properties = {
}
}
},
+ }},
}
},
diff --git a/drivers/media/dvb/dvb-usb/az6027.c b/drivers/media/dvb/dvb-usb/az6027.c
index 57e2444d51a..bf67b4dfd82 100644
--- a/drivers/media/dvb/dvb-usb/az6027.c
+++ b/drivers/media/dvb/dvb-usb/az6027.c
@@ -40,7 +40,6 @@ static const struct stb0899_s1_reg az6027_stb0899_s1_init_1[] = {
{ STB0899_DISRX_ST0 , 0x04 },
{ STB0899_DISRX_ST1 , 0x00 },
{ STB0899_DISPARITY , 0x00 },
- { STB0899_DISFIFO , 0x00 },
{ STB0899_DISSTATUS , 0x20 },
{ STB0899_DISF22 , 0x99 },
{ STB0899_DISF22RX , 0xa8 },
@@ -782,7 +781,6 @@ static int az6027_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage)
{
u8 buf;
- int ret;
struct dvb_usb_adapter *adap = fe->dvb->priv;
struct i2c_msg i2c_msg = {
@@ -800,17 +798,17 @@ static int az6027_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage)
switch (voltage) {
case SEC_VOLTAGE_13:
buf = 1;
- ret = i2c_transfer(&adap->dev->i2c_adap, &i2c_msg, 1);
+ i2c_transfer(&adap->dev->i2c_adap, &i2c_msg, 1);
break;
case SEC_VOLTAGE_18:
buf = 2;
- ret = i2c_transfer(&adap->dev->i2c_adap, &i2c_msg, 1);
+ i2c_transfer(&adap->dev->i2c_adap, &i2c_msg, 1);
break;
case SEC_VOLTAGE_OFF:
buf = 0;
- ret = i2c_transfer(&adap->dev->i2c_adap, &i2c_msg, 1);
+ i2c_transfer(&adap->dev->i2c_adap, &i2c_msg, 1);
break;
default:
@@ -910,16 +908,16 @@ static int az6027_frontend_attach(struct dvb_usb_adapter *adap)
az6027_frontend_reset(adap);
deb_info("adap = %p, dev = %p\n", adap, adap->dev);
- adap->fe = stb0899_attach(&az6027_stb0899_config, &adap->dev->i2c_adap);
+ adap->fe_adap[0].fe = stb0899_attach(&az6027_stb0899_config, &adap->dev->i2c_adap);
- if (adap->fe) {
+ if (adap->fe_adap[0].fe) {
deb_info("found STB0899 DVB-S/DVB-S2 frontend @0x%02x", az6027_stb0899_config.demod_address);
- if (stb6100_attach(adap->fe, &az6027_stb6100_config, &adap->dev->i2c_adap)) {
+ if (stb6100_attach(adap->fe_adap[0].fe, &az6027_stb6100_config, &adap->dev->i2c_adap)) {
deb_info("found STB6100 DVB-S/DVB-S2 frontend @0x%02x", az6027_stb6100_config.tuner_address);
- adap->fe->ops.set_voltage = az6027_set_voltage;
+ adap->fe_adap[0].fe->ops.set_voltage = az6027_set_voltage;
az6027_ci_init(adap);
} else {
- adap->fe = NULL;
+ adap->fe_adap[0].fe = NULL;
}
} else
warn("no front-end attached\n");
@@ -954,7 +952,6 @@ static int az6027_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], int n
{
struct dvb_usb_device *d = i2c_get_adapdata(adap);
int i = 0, j = 0, len = 0;
- int ret;
u16 index;
u16 value;
int length;
@@ -990,7 +987,7 @@ static int az6027_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], int n
index = (((msg[i].buf[0] << 8) & 0xff00) | (msg[i].buf[1] & 0x00ff));
value = msg[i].addr + (msg[i].len << 8);
length = msg[i + 1].len + 6;
- ret = az6027_usb_in_op(d, req, value, index, data, length);
+ az6027_usb_in_op(d, req, value, index, data, length);
len = msg[i + 1].len;
for (j = 0; j < len; j++)
msg[i + 1].buf[j] = data[j + 5];
@@ -1017,7 +1014,7 @@ static int az6027_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], int n
index = 0x0;
value = msg[i].addr;
length = msg[i].len + 6;
- ret = az6027_usb_in_op(d, req, value, index, data, length);
+ az6027_usb_in_op(d, req, value, index, data, length);
len = msg[i].len;
for (j = 0; j < len; j++)
msg[i].buf[j] = data[j + 5];
@@ -1106,6 +1103,8 @@ static struct dvb_usb_device_properties az6027_properties = {
.num_adapters = 1,
.adapter = {
{
+ .num_frontends = 1,
+ .fe = {{
.streaming_ctrl = az6027_streaming_ctrl,
.frontend_attach = az6027_frontend_attach,
@@ -1120,6 +1119,7 @@ static struct dvb_usb_device_properties az6027_properties = {
}
}
},
+ }},
}
},
/*
diff --git a/drivers/media/dvb/dvb-usb/ce6230.c b/drivers/media/dvb/dvb-usb/ce6230.c
index 6d1a3041540..57afb5a9157 100644
--- a/drivers/media/dvb/dvb-usb/ce6230.c
+++ b/drivers/media/dvb/dvb-usb/ce6230.c
@@ -186,9 +186,9 @@ static struct zl10353_config ce6230_zl10353_config = {
static int ce6230_zl10353_frontend_attach(struct dvb_usb_adapter *adap)
{
deb_info("%s:\n", __func__);
- adap->fe = dvb_attach(zl10353_attach, &ce6230_zl10353_config,
+ adap->fe_adap[0].fe = dvb_attach(zl10353_attach, &ce6230_zl10353_config,
&adap->dev->i2c_adap);
- if (adap->fe == NULL)
+ if (adap->fe_adap[0].fe == NULL)
return -ENODEV;
return 0;
}
@@ -214,7 +214,7 @@ static int ce6230_mxl5003s_tuner_attach(struct dvb_usb_adapter *adap)
{
int ret;
deb_info("%s:\n", __func__);
- ret = dvb_attach(mxl5005s_attach, adap->fe, &adap->dev->i2c_adap,
+ ret = dvb_attach(mxl5005s_attach, adap->fe_adap[0].fe, &adap->dev->i2c_adap,
&ce6230_mxl5003s_config) == NULL ? -ENODEV : 0;
return ret;
}
@@ -273,6 +273,8 @@ static struct dvb_usb_device_properties ce6230_properties = {
.num_adapters = 1,
.adapter = {
{
+ .num_frontends = 1,
+ .fe = {{
.frontend_attach = ce6230_zl10353_frontend_attach,
.tuner_attach = ce6230_mxl5003s_tuner_attach,
.stream = {
@@ -285,6 +287,7 @@ static struct dvb_usb_device_properties ce6230_properties = {
}
}
},
+ }},
}
},
diff --git a/drivers/media/dvb/dvb-usb/cinergyT2-core.c b/drivers/media/dvb/dvb-usb/cinergyT2-core.c
index 16f2ce2bc15..f9d905002ec 100644
--- a/drivers/media/dvb/dvb-usb/cinergyT2-core.c
+++ b/drivers/media/dvb/dvb-usb/cinergyT2-core.c
@@ -69,7 +69,7 @@ static int cinergyt2_frontend_attach(struct dvb_usb_adapter *adap)
char state[3];
int ret;
- adap->fe = cinergyt2_fe_attach(adap->dev);
+ adap->fe_adap[0].fe = cinergyt2_fe_attach(adap->dev);
ret = dvb_usb_generic_rw(adap->dev, query, sizeof(query), state,
sizeof(state), 0);
@@ -198,6 +198,8 @@ static struct dvb_usb_device_properties cinergyt2_properties = {
.num_adapters = 1,
.adapter = {
{
+ .num_frontends = 1,
+ .fe = {{
.streaming_ctrl = cinergyt2_streaming_ctrl,
.frontend_attach = cinergyt2_frontend_attach,
@@ -212,6 +214,7 @@ static struct dvb_usb_device_properties cinergyt2_properties = {
}
}
},
+ }},
}
},
diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c
index acb5fb2d2e7..9f2a02c4837 100644
--- a/drivers/media/dvb/dvb-usb/cxusb.c
+++ b/drivers/media/dvb/dvb-usb/cxusb.c
@@ -347,7 +347,7 @@ static void cxusb_d680_dmb_drain_message(struct dvb_usb_device *d)
static void cxusb_d680_dmb_drain_video(struct dvb_usb_device *d)
{
- struct usb_data_stream_properties *p = &d->props.adapter[0].stream;
+ struct usb_data_stream_properties *p = &d->props.adapter[0].fe[0].stream;
const int timeout = 100;
const int junk_len = p->u.bulk.buffersize;
u8 *junk;
@@ -725,7 +725,7 @@ static struct max2165_config mygica_d689_max2165_cfg = {
/* Callbacks for DVB USB */
static int cxusb_fmd1216me_tuner_attach(struct dvb_usb_adapter *adap)
{
- dvb_attach(simple_tuner_attach, adap->fe,
+ dvb_attach(simple_tuner_attach, adap->fe_adap[0].fe,
&adap->dev->i2c_adap, 0x61,
TUNER_PHILIPS_FMD1216ME_MK3);
return 0;
@@ -733,27 +733,27 @@ static int cxusb_fmd1216me_tuner_attach(struct dvb_usb_adapter *adap)
static int cxusb_dee1601_tuner_attach(struct dvb_usb_adapter *adap)
{
- dvb_attach(dvb_pll_attach, adap->fe, 0x61,
+ dvb_attach(dvb_pll_attach, adap->fe_adap[0].fe, 0x61,
NULL, DVB_PLL_THOMSON_DTT7579);
return 0;
}
static int cxusb_lgz201_tuner_attach(struct dvb_usb_adapter *adap)
{
- dvb_attach(dvb_pll_attach, adap->fe, 0x61, NULL, DVB_PLL_LG_Z201);
+ dvb_attach(dvb_pll_attach, adap->fe_adap[0].fe, 0x61, NULL, DVB_PLL_LG_Z201);
return 0;
}
static int cxusb_dtt7579_tuner_attach(struct dvb_usb_adapter *adap)
{
- dvb_attach(dvb_pll_attach, adap->fe, 0x60,
+ dvb_attach(dvb_pll_attach, adap->fe_adap[0].fe, 0x60,
NULL, DVB_PLL_THOMSON_DTT7579);
return 0;
}
static int cxusb_lgh064f_tuner_attach(struct dvb_usb_adapter *adap)
{
- dvb_attach(simple_tuner_attach, adap->fe,
+ dvb_attach(simple_tuner_attach, adap->fe_adap[0].fe,
&adap->dev->i2c_adap, 0x61, TUNER_LG_TDVS_H06XF);
return 0;
}
@@ -795,9 +795,9 @@ static int cxusb_dvico_xc3028_tuner_attach(struct dvb_usb_adapter *adap)
};
/* FIXME: generalize & move to common area */
- adap->fe->callback = dvico_bluebird_xc2028_callback;
+ adap->fe_adap[0].fe->callback = dvico_bluebird_xc2028_callback;
- fe = dvb_attach(xc2028_attach, adap->fe, &cfg);
+ fe = dvb_attach(xc2028_attach, adap->fe_adap[0].fe, &cfg);
if (fe == NULL || fe->ops.tuner_ops.set_config == NULL)
return -EIO;
@@ -808,7 +808,7 @@ static int cxusb_dvico_xc3028_tuner_attach(struct dvb_usb_adapter *adap)
static int cxusb_mxl5003s_tuner_attach(struct dvb_usb_adapter *adap)
{
- dvb_attach(mxl5005s_attach, adap->fe,
+ dvb_attach(mxl5005s_attach, adap->fe_adap[0].fe,
&adap->dev->i2c_adap, &aver_a868r_tuner);
return 0;
}
@@ -816,7 +816,7 @@ static int cxusb_mxl5003s_tuner_attach(struct dvb_usb_adapter *adap)
static int cxusb_d680_dmb_tuner_attach(struct dvb_usb_adapter *adap)
{
struct dvb_frontend *fe;
- fe = dvb_attach(mxl5005s_attach, adap->fe,
+ fe = dvb_attach(mxl5005s_attach, adap->fe_adap[0].fe,
&adap->dev->i2c_adap, &d680_dmb_tuner);
return (fe == NULL) ? -EIO : 0;
}
@@ -824,7 +824,7 @@ static int cxusb_d680_dmb_tuner_attach(struct dvb_usb_adapter *adap)
static int cxusb_mygica_d689_tuner_attach(struct dvb_usb_adapter *adap)
{
struct dvb_frontend *fe;
- fe = dvb_attach(max2165_attach, adap->fe,
+ fe = dvb_attach(max2165_attach, adap->fe_adap[0].fe,
&adap->dev->i2c_adap, &mygica_d689_max2165_cfg);
return (fe == NULL) ? -EIO : 0;
}
@@ -837,8 +837,9 @@ static int cxusb_cx22702_frontend_attach(struct dvb_usb_adapter *adap)
cxusb_ctrl_msg(adap->dev, CMD_DIGITAL, NULL, 0, &b, 1);
- if ((adap->fe = dvb_attach(cx22702_attach, &cxusb_cx22702_config,
- &adap->dev->i2c_adap)) != NULL)
+ adap->fe_adap[0].fe = dvb_attach(cx22702_attach, &cxusb_cx22702_config,
+ &adap->dev->i2c_adap);
+ if ((adap->fe_adap[0].fe) != NULL)
return 0;
return -EIO;
@@ -851,8 +852,10 @@ static int cxusb_lgdt3303_frontend_attach(struct dvb_usb_adapter *adap)
cxusb_ctrl_msg(adap->dev, CMD_DIGITAL, NULL, 0, NULL, 0);
- if ((adap->fe = dvb_attach(lgdt330x_attach, &cxusb_lgdt3303_config,
- &adap->dev->i2c_adap)) != NULL)
+ adap->fe_adap[0].fe = dvb_attach(lgdt330x_attach,
+ &cxusb_lgdt3303_config,
+ &adap->dev->i2c_adap);
+ if ((adap->fe_adap[0].fe) != NULL)
return 0;
return -EIO;
@@ -860,9 +863,9 @@ static int cxusb_lgdt3303_frontend_attach(struct dvb_usb_adapter *adap)
static int cxusb_aver_lgdt3303_frontend_attach(struct dvb_usb_adapter *adap)
{
- adap->fe = dvb_attach(lgdt330x_attach, &cxusb_aver_lgdt3303_config,
+ adap->fe_adap[0].fe = dvb_attach(lgdt330x_attach, &cxusb_aver_lgdt3303_config,
&adap->dev->i2c_adap);
- if (adap->fe != NULL)
+ if (adap->fe_adap[0].fe != NULL)
return 0;
return -EIO;
@@ -876,8 +879,9 @@ static int cxusb_mt352_frontend_attach(struct dvb_usb_adapter *adap)
cxusb_ctrl_msg(adap->dev, CMD_DIGITAL, NULL, 0, NULL, 0);
- if ((adap->fe = dvb_attach(mt352_attach, &cxusb_mt352_config,
- &adap->dev->i2c_adap)) != NULL)
+ adap->fe_adap[0].fe = dvb_attach(mt352_attach, &cxusb_mt352_config,
+ &adap->dev->i2c_adap);
+ if ((adap->fe_adap[0].fe) != NULL)
return 0;
return -EIO;
@@ -890,11 +894,15 @@ static int cxusb_dee1601_frontend_attach(struct dvb_usb_adapter *adap)
cxusb_ctrl_msg(adap->dev, CMD_DIGITAL, NULL, 0, NULL, 0);
- if (((adap->fe = dvb_attach(mt352_attach, &cxusb_dee1601_config,
- &adap->dev->i2c_adap)) != NULL) ||
- ((adap->fe = dvb_attach(zl10353_attach,
- &cxusb_zl10353_dee1601_config,
- &adap->dev->i2c_adap)) != NULL))
+ adap->fe_adap[0].fe = dvb_attach(mt352_attach, &cxusb_dee1601_config,
+ &adap->dev->i2c_adap);
+ if ((adap->fe_adap[0].fe) != NULL)
+ return 0;
+
+ adap->fe_adap[0].fe = dvb_attach(zl10353_attach,
+ &cxusb_zl10353_dee1601_config,
+ &adap->dev->i2c_adap);
+ if ((adap->fe_adap[0].fe) != NULL)
return 0;
return -EIO;
@@ -917,9 +925,11 @@ static int cxusb_dualdig4_frontend_attach(struct dvb_usb_adapter *adap)
cxusb_bluebird_gpio_pulse(adap->dev, 0x01, 1);
cxusb_bluebird_gpio_pulse(adap->dev, 0x02, 1);
- if ((adap->fe = dvb_attach(zl10353_attach,
- &cxusb_zl10353_xc3028_config_no_i2c_gate,
- &adap->dev->i2c_adap)) == NULL)
+ adap->fe_adap[0].fe =
+ dvb_attach(zl10353_attach,
+ &cxusb_zl10353_xc3028_config_no_i2c_gate,
+ &adap->dev->i2c_adap);
+ if ((adap->fe_adap[0].fe) == NULL)
return -EIO;
/* try to determine if there is no IR decoder on the I2C bus */
@@ -1031,9 +1041,9 @@ static int cxusb_dualdig4_rev2_frontend_attach(struct dvb_usb_adapter *adap)
return -ENODEV;
}
- adap->fe = dvb_attach(dib7000p_attach, &adap->dev->i2c_adap, 0x80,
+ adap->fe_adap[0].fe = dvb_attach(dib7000p_attach, &adap->dev->i2c_adap, 0x80,
&cxusb_dualdig4_rev2_config);
- if (adap->fe == NULL)
+ if (adap->fe_adap[0].fe == NULL)
return -EIO;
return 0;
@@ -1084,15 +1094,15 @@ static int cxusb_dualdig4_rev2_tuner_attach(struct dvb_usb_adapter *adap)
{
struct dib0700_adapter_state *st = adap->priv;
struct i2c_adapter *tun_i2c =
- dib7000p_get_i2c_master(adap->fe,
+ dib7000p_get_i2c_master(adap->fe_adap[0].fe,
DIBX000_I2C_INTERFACE_TUNER, 1);
- if (dvb_attach(dib0070_attach, adap->fe, tun_i2c,
+ if (dvb_attach(dib0070_attach, adap->fe_adap[0].fe, tun_i2c,
&dib7070p_dib0070_config) == NULL)
return -ENODEV;
- st->set_param_save = adap->fe->ops.tuner_ops.set_params;
- adap->fe->ops.tuner_ops.set_params = dib7070_set_param_override;
+ st->set_param_save = adap->fe_adap[0].fe->ops.tuner_ops.set_params;
+ adap->fe_adap[0].fe->ops.tuner_ops.set_params = dib7070_set_param_override;
return 0;
}
@@ -1108,14 +1118,16 @@ static int cxusb_nano2_frontend_attach(struct dvb_usb_adapter *adap)
cxusb_bluebird_gpio_pulse(adap->dev, 0x01, 1);
cxusb_bluebird_gpio_pulse(adap->dev, 0x02, 1);
- if ((adap->fe = dvb_attach(zl10353_attach,
- &cxusb_zl10353_xc3028_config,
- &adap->dev->i2c_adap)) != NULL)
+ adap->fe_adap[0].fe = dvb_attach(zl10353_attach,
+ &cxusb_zl10353_xc3028_config,
+ &adap->dev->i2c_adap);
+ if ((adap->fe_adap[0].fe) != NULL)
return 0;
- if ((adap->fe = dvb_attach(mt352_attach,
- &cxusb_mt352_xc3028_config,
- &adap->dev->i2c_adap)) != NULL)
+ adap->fe_adap[0].fe = dvb_attach(mt352_attach,
+ &cxusb_mt352_xc3028_config,
+ &adap->dev->i2c_adap);
+ if ((adap->fe_adap[0].fe) != NULL)
return 0;
return -EIO;
@@ -1150,7 +1162,7 @@ static int cxusb_d680_dmb_frontend_attach(struct dvb_usb_adapter *adap)
usb_clear_halt(d->udev,
usb_rcvbulkpipe(d->udev, d->props.generic_bulk_ctrl_endpoint));
usb_clear_halt(d->udev,
- usb_rcvbulkpipe(d->udev, d->props.adapter[0].stream.endpoint));
+ usb_rcvbulkpipe(d->udev, d->props.adapter[0].fe[0].stream.endpoint));
/* Drain USB pipes to avoid hang after reboot */
for (n = 0; n < 5; n++) {
@@ -1172,8 +1184,8 @@ static int cxusb_d680_dmb_frontend_attach(struct dvb_usb_adapter *adap)
msleep(100);
/* Attach frontend */
- adap->fe = dvb_attach(lgs8gxx_attach, &d680_lgs8gl5_cfg, &d->i2c_adap);
- if (adap->fe == NULL)
+ adap->fe_adap[0].fe = dvb_attach(lgs8gxx_attach, &d680_lgs8gl5_cfg, &d->i2c_adap);
+ if (adap->fe_adap[0].fe == NULL)
return -EIO;
return 0;
@@ -1207,7 +1219,7 @@ static int cxusb_mygica_d689_frontend_attach(struct dvb_usb_adapter *adap)
usb_clear_halt(d->udev,
usb_rcvbulkpipe(d->udev, d->props.generic_bulk_ctrl_endpoint));
usb_clear_halt(d->udev,
- usb_rcvbulkpipe(d->udev, d->props.adapter[0].stream.endpoint));
+ usb_rcvbulkpipe(d->udev, d->props.adapter[0].fe[0].stream.endpoint));
/* Reset the tuner */
@@ -1223,9 +1235,9 @@ static int cxusb_mygica_d689_frontend_attach(struct dvb_usb_adapter *adap)
msleep(100);
/* Attach frontend */
- adap->fe = dvb_attach(atbm8830_attach, &mygica_d689_atbm8830_cfg,
+ adap->fe_adap[0].fe = dvb_attach(atbm8830_attach, &mygica_d689_atbm8830_cfg,
&d->i2c_adap);
- if (adap->fe == NULL)
+ if (adap->fe_adap[0].fe == NULL)
return -EIO;
return 0;
@@ -1383,6 +1395,8 @@ static struct dvb_usb_device_properties cxusb_medion_properties = {
.num_adapters = 1,
.adapter = {
{
+ .num_frontends = 1,
+ .fe = {{
.streaming_ctrl = cxusb_streaming_ctrl,
.frontend_attach = cxusb_cx22702_frontend_attach,
.tuner_attach = cxusb_fmd1216me_tuner_attach,
@@ -1397,7 +1411,7 @@ static struct dvb_usb_device_properties cxusb_medion_properties = {
}
}
},
-
+ }},
},
},
.power_ctrl = cxusb_power_ctrl,
@@ -1429,6 +1443,8 @@ static struct dvb_usb_device_properties cxusb_bluebird_lgh064f_properties = {
.num_adapters = 1,
.adapter = {
{
+ .num_frontends = 1,
+ .fe = {{
.streaming_ctrl = cxusb_streaming_ctrl,
.frontend_attach = cxusb_lgdt3303_frontend_attach,
.tuner_attach = cxusb_lgh064f_tuner_attach,
@@ -1444,6 +1460,7 @@ static struct dvb_usb_device_properties cxusb_bluebird_lgh064f_properties = {
}
}
},
+ }},
},
},
@@ -1483,6 +1500,8 @@ static struct dvb_usb_device_properties cxusb_bluebird_dee1601_properties = {
.num_adapters = 1,
.adapter = {
{
+ .num_frontends = 1,
+ .fe = {{
.streaming_ctrl = cxusb_streaming_ctrl,
.frontend_attach = cxusb_dee1601_frontend_attach,
.tuner_attach = cxusb_dee1601_tuner_attach,
@@ -1497,6 +1516,7 @@ static struct dvb_usb_device_properties cxusb_bluebird_dee1601_properties = {
}
}
},
+ }},
},
},
@@ -1544,6 +1564,8 @@ static struct dvb_usb_device_properties cxusb_bluebird_lgz201_properties = {
.num_adapters = 2,
.adapter = {
{
+ .num_frontends = 1,
+ .fe = {{
.streaming_ctrl = cxusb_streaming_ctrl,
.frontend_attach = cxusb_mt352_frontend_attach,
.tuner_attach = cxusb_lgz201_tuner_attach,
@@ -1559,6 +1581,7 @@ static struct dvb_usb_device_properties cxusb_bluebird_lgz201_properties = {
}
}
},
+ }},
},
},
.power_ctrl = cxusb_bluebird_power_ctrl,
@@ -1596,6 +1619,8 @@ static struct dvb_usb_device_properties cxusb_bluebird_dtt7579_properties = {
.num_adapters = 1,
.adapter = {
{
+ .num_frontends = 1,
+ .fe = {{
.streaming_ctrl = cxusb_streaming_ctrl,
.frontend_attach = cxusb_mt352_frontend_attach,
.tuner_attach = cxusb_dtt7579_tuner_attach,
@@ -1611,6 +1636,7 @@ static struct dvb_usb_device_properties cxusb_bluebird_dtt7579_properties = {
}
}
},
+ }},
},
},
.power_ctrl = cxusb_bluebird_power_ctrl,
@@ -1645,6 +1671,8 @@ static struct dvb_usb_device_properties cxusb_bluebird_dualdig4_properties = {
.num_adapters = 1,
.adapter = {
{
+ .num_frontends = 1,
+ .fe = {{
.streaming_ctrl = cxusb_streaming_ctrl,
.frontend_attach = cxusb_dualdig4_frontend_attach,
.tuner_attach = cxusb_dvico_xc3028_tuner_attach,
@@ -1659,6 +1687,7 @@ static struct dvb_usb_device_properties cxusb_bluebird_dualdig4_properties = {
}
}
},
+ }},
},
},
@@ -1695,6 +1724,8 @@ static struct dvb_usb_device_properties cxusb_bluebird_nano2_properties = {
.num_adapters = 1,
.adapter = {
{
+ .num_frontends = 1,
+ .fe = {{
.streaming_ctrl = cxusb_streaming_ctrl,
.frontend_attach = cxusb_nano2_frontend_attach,
.tuner_attach = cxusb_dvico_xc3028_tuner_attach,
@@ -1709,6 +1740,7 @@ static struct dvb_usb_device_properties cxusb_bluebird_nano2_properties = {
}
}
},
+ }},
},
},
@@ -1747,6 +1779,8 @@ static struct dvb_usb_device_properties cxusb_bluebird_nano2_needsfirmware_prope
.num_adapters = 1,
.adapter = {
{
+ .num_frontends = 1,
+ .fe = {{
.streaming_ctrl = cxusb_streaming_ctrl,
.frontend_attach = cxusb_nano2_frontend_attach,
.tuner_attach = cxusb_dvico_xc3028_tuner_attach,
@@ -1761,6 +1795,7 @@ static struct dvb_usb_device_properties cxusb_bluebird_nano2_needsfirmware_prope
}
}
},
+ }},
},
},
@@ -1796,6 +1831,8 @@ static struct dvb_usb_device_properties cxusb_aver_a868r_properties = {
.num_adapters = 1,
.adapter = {
{
+ .num_frontends = 1,
+ .fe = {{
.streaming_ctrl = cxusb_aver_streaming_ctrl,
.frontend_attach = cxusb_aver_lgdt3303_frontend_attach,
.tuner_attach = cxusb_mxl5003s_tuner_attach,
@@ -1810,7 +1847,7 @@ static struct dvb_usb_device_properties cxusb_aver_a868r_properties = {
}
}
},
-
+ }},
},
},
.power_ctrl = cxusb_aver_power_ctrl,
@@ -1839,10 +1876,12 @@ struct dvb_usb_device_properties cxusb_bluebird_dualdig4_rev2_properties = {
.num_adapters = 1,
.adapter = {
{
+ .size_of_priv = sizeof(struct dib0700_adapter_state),
+ .num_frontends = 1,
+ .fe = {{
.streaming_ctrl = cxusb_streaming_ctrl,
.frontend_attach = cxusb_dualdig4_rev2_frontend_attach,
.tuner_attach = cxusb_dualdig4_rev2_tuner_attach,
- .size_of_priv = sizeof(struct dib0700_adapter_state),
/* parameter for the MPEG2-data transfer */
.stream = {
.type = USB_BULK,
@@ -1854,6 +1893,7 @@ struct dvb_usb_device_properties cxusb_bluebird_dualdig4_rev2_properties = {
}
}
},
+ }},
},
},
@@ -1889,6 +1929,8 @@ static struct dvb_usb_device_properties cxusb_d680_dmb_properties = {
.num_adapters = 1,
.adapter = {
{
+ .num_frontends = 1,
+ .fe = {{
.streaming_ctrl = cxusb_d680_dmb_streaming_ctrl,
.frontend_attach = cxusb_d680_dmb_frontend_attach,
.tuner_attach = cxusb_d680_dmb_tuner_attach,
@@ -1904,6 +1946,7 @@ static struct dvb_usb_device_properties cxusb_d680_dmb_properties = {
}
}
},
+ }},
},
},
@@ -1940,6 +1983,8 @@ static struct dvb_usb_device_properties cxusb_mygica_d689_properties = {
.num_adapters = 1,
.adapter = {
{
+ .num_frontends = 1,
+ .fe = {{
.streaming_ctrl = cxusb_d680_dmb_streaming_ctrl,
.frontend_attach = cxusb_mygica_d689_frontend_attach,
.tuner_attach = cxusb_mygica_d689_tuner_attach,
@@ -1955,6 +2000,7 @@ static struct dvb_usb_device_properties cxusb_mygica_d689_properties = {
}
}
},
+ }},
},
},
diff --git a/drivers/media/dvb/dvb-usb/dib0700_core.c b/drivers/media/dvb/dvb-usb/dib0700_core.c
index 5eb91b4f8fd..156cbfc9c79 100644
--- a/drivers/media/dvb/dvb-usb/dib0700_core.c
+++ b/drivers/media/dvb/dvb-usb/dib0700_core.c
@@ -30,6 +30,11 @@ int dib0700_get_version(struct dvb_usb_device *d, u32 *hwversion,
struct dib0700_state *st = d->priv;
int ret;
+ if (mutex_lock_interruptible(&d->usb_mutex) < 0) {
+ err("could not acquire lock");
+ return 0;
+ }
+
ret = usb_control_msg(d->udev, usb_rcvctrlpipe(d->udev, 0),
REQUEST_GET_VERSION,
USB_TYPE_VENDOR | USB_DIR_IN, 0, 0,
@@ -46,6 +51,7 @@ int dib0700_get_version(struct dvb_usb_device *d, u32 *hwversion,
if (fwtype != NULL)
*fwtype = (st->buf[12] << 24) | (st->buf[13] << 16) |
(st->buf[14] << 8) | st->buf[15];
+ mutex_unlock(&d->usb_mutex);
return ret;
}
@@ -108,7 +114,12 @@ int dib0700_ctrl_rd(struct dvb_usb_device *d, u8 *tx, u8 txlen, u8 *rx, u8 rxlen
int dib0700_set_gpio(struct dvb_usb_device *d, enum dib07x0_gpios gpio, u8 gpio_dir, u8 gpio_val)
{
struct dib0700_state *st = d->priv;
- s16 ret;
+ int ret;
+
+ if (mutex_lock_interruptible(&d->usb_mutex) < 0) {
+ err("could not acquire lock");
+ return 0;
+ }
st->buf[0] = REQUEST_SET_GPIO;
st->buf[1] = gpio;
@@ -116,6 +127,7 @@ int dib0700_set_gpio(struct dvb_usb_device *d, enum dib07x0_gpios gpio, u8 gpio_
ret = dib0700_ctrl_wr(d, st->buf, 3);
+ mutex_unlock(&d->usb_mutex);
return ret;
}
@@ -125,6 +137,11 @@ static int dib0700_set_usb_xfer_len(struct dvb_usb_device *d, u16 nb_ts_packets)
int ret;
if (st->fw_version >= 0x10201) {
+ if (mutex_lock_interruptible(&d->usb_mutex) < 0) {
+ err("could not acquire lock");
+ return 0;
+ }
+
st->buf[0] = REQUEST_SET_USB_XFER_LEN;
st->buf[1] = (nb_ts_packets >> 8) & 0xff;
st->buf[2] = nb_ts_packets & 0xff;
@@ -132,6 +149,7 @@ static int dib0700_set_usb_xfer_len(struct dvb_usb_device *d, u16 nb_ts_packets)
deb_info("set the USB xfer len to %i Ts packet\n", nb_ts_packets);
ret = dib0700_ctrl_wr(d, st->buf, 3);
+ mutex_unlock(&d->usb_mutex);
} else {
deb_info("this firmware does not allow to change the USB xfer len\n");
ret = -EIO;
@@ -208,6 +226,10 @@ static int dib0700_i2c_xfer_new(struct i2c_adapter *adap, struct i2c_msg *msg,
} else {
/* Write request */
+ if (mutex_lock_interruptible(&d->usb_mutex) < 0) {
+ err("could not acquire lock");
+ return 0;
+ }
st->buf[0] = REQUEST_NEW_I2C_WRITE;
st->buf[1] = msg[i].addr << 1;
st->buf[2] = (en_start << 7) | (en_stop << 6) |
@@ -227,6 +249,7 @@ static int dib0700_i2c_xfer_new(struct i2c_adapter *adap, struct i2c_msg *msg,
USB_TYPE_VENDOR | USB_DIR_OUT,
0, 0, st->buf, msg[i].len + 4,
USB_CTRL_GET_TIMEOUT);
+ mutex_unlock(&d->usb_mutex);
if (result < 0) {
deb_info("i2c write error (status = %d)\n", result);
break;
@@ -249,6 +272,10 @@ static int dib0700_i2c_xfer_legacy(struct i2c_adapter *adap,
if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
return -EAGAIN;
+ if (mutex_lock_interruptible(&d->usb_mutex) < 0) {
+ err("could not acquire lock");
+ return 0;
+ }
for (i = 0; i < num; i++) {
/* fill in the address */
@@ -279,6 +306,7 @@ static int dib0700_i2c_xfer_legacy(struct i2c_adapter *adap,
break;
}
}
+ mutex_unlock(&d->usb_mutex);
mutex_unlock(&d->i2c_mutex);
return i;
@@ -337,7 +365,12 @@ static int dib0700_set_clock(struct dvb_usb_device *d, u8 en_pll,
u16 pll_loopdiv, u16 free_div, u16 dsuScaler)
{
struct dib0700_state *st = d->priv;
- s16 ret;
+ int ret;
+
+ if (mutex_lock_interruptible(&d->usb_mutex) < 0) {
+ err("could not acquire lock");
+ return 0;
+ }
st->buf[0] = REQUEST_SET_CLOCK;
st->buf[1] = (en_pll << 7) | (pll_src << 6) |
@@ -352,6 +385,7 @@ static int dib0700_set_clock(struct dvb_usb_device *d, u8 en_pll,
st->buf[9] = dsuScaler & 0xff; /* LSB */
ret = dib0700_ctrl_wr(d, st->buf, 10);
+ mutex_unlock(&d->usb_mutex);
return ret;
}
@@ -360,10 +394,16 @@ int dib0700_set_i2c_speed(struct dvb_usb_device *d, u16 scl_kHz)
{
struct dib0700_state *st = d->priv;
u16 divider;
+ int ret;
if (scl_kHz == 0)
return -EINVAL;
+ if (mutex_lock_interruptible(&d->usb_mutex) < 0) {
+ err("could not acquire lock");
+ return 0;
+ }
+
st->buf[0] = REQUEST_SET_I2C_PARAM;
divider = (u16) (30000 / scl_kHz);
st->buf[1] = 0;
@@ -379,7 +419,11 @@ int dib0700_set_i2c_speed(struct dvb_usb_device *d, u16 scl_kHz)
deb_info("setting I2C speed: %04x %04x %04x (%d kHz).",
(st->buf[2] << 8) | (st->buf[3]), (st->buf[4] << 8) |
st->buf[5], (st->buf[6] << 8) | st->buf[7], scl_kHz);
- return dib0700_ctrl_wr(d, st->buf, 8);
+
+ ret = dib0700_ctrl_wr(d, st->buf, 8);
+ mutex_unlock(&d->usb_mutex);
+
+ return ret;
}
@@ -484,13 +528,13 @@ int dib0700_download_firmware(struct usb_device *udev, const struct firmware *fw
for (adap_num = 0; adap_num < dib0700_devices[i].num_adapters;
adap_num++) {
if (fw_version >= 0x10201) {
- dib0700_devices[i].adapter[adap_num].stream.u.bulk.buffersize = 188*nb_packet_buffer_size;
+ dib0700_devices[i].adapter[adap_num].fe[0].stream.u.bulk.buffersize = 188*nb_packet_buffer_size;
} else {
/* for fw version older than 1.20.1,
* the buffersize has to be n times 512 */
- dib0700_devices[i].adapter[adap_num].stream.u.bulk.buffersize = ((188*nb_packet_buffer_size+188/2)/512)*512;
- if (dib0700_devices[i].adapter[adap_num].stream.u.bulk.buffersize < 512)
- dib0700_devices[i].adapter[adap_num].stream.u.bulk.buffersize = 512;
+ dib0700_devices[i].adapter[adap_num].fe[0].stream.u.bulk.buffersize = ((188*nb_packet_buffer_size+188/2)/512)*512;
+ if (dib0700_devices[i].adapter[adap_num].fe[0].stream.u.bulk.buffersize < 512)
+ dib0700_devices[i].adapter[adap_num].fe[0].stream.u.bulk.buffersize = 512;
}
}
}
@@ -515,6 +559,11 @@ int dib0700_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff)
}
}
+ if (mutex_lock_interruptible(&adap->dev->usb_mutex) < 0) {
+ err("could not acquire lock");
+ return 0;
+ }
+
st->buf[0] = REQUEST_ENABLE_VIDEO;
/* this bit gives a kind of command,
* rather than enabling something or not */
@@ -530,25 +579,28 @@ int dib0700_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff)
deb_info("modifying (%d) streaming state for %d\n", onoff, adap->id);
st->channel_state &= ~0x3;
- if ((adap->stream.props.endpoint != 2)
- && (adap->stream.props.endpoint != 3)) {
- deb_info("the endpoint number (%i) is not correct, use the adapter id instead", adap->stream.props.endpoint);
+ if ((adap->fe_adap[0].stream.props.endpoint != 2)
+ && (adap->fe_adap[0].stream.props.endpoint != 3)) {
+ deb_info("the endpoint number (%i) is not correct, use the adapter id instead", adap->fe_adap[0].stream.props.endpoint);
if (onoff)
st->channel_state |= 1 << (adap->id);
else
st->channel_state |= 1 << ~(adap->id);
} else {
if (onoff)
- st->channel_state |= 1 << (adap->stream.props.endpoint-2);
+ st->channel_state |= 1 << (adap->fe_adap[0].stream.props.endpoint-2);
else
- st->channel_state |= 1 << (3-adap->stream.props.endpoint);
+ st->channel_state |= 1 << (3-adap->fe_adap[0].stream.props.endpoint);
}
st->buf[2] |= st->channel_state;
deb_info("data for streaming: %x %x\n", st->buf[1], st->buf[2]);
- return dib0700_ctrl_wr(adap->dev, st->buf, 4);
+ ret = dib0700_ctrl_wr(adap->dev, st->buf, 4);
+ mutex_unlock(&adap->dev->usb_mutex);
+
+ return ret;
}
int dib0700_change_protocol(struct rc_dev *rc, u64 rc_type)
@@ -557,6 +609,11 @@ int dib0700_change_protocol(struct rc_dev *rc, u64 rc_type)
struct dib0700_state *st = d->priv;
int new_proto, ret;
+ if (mutex_lock_interruptible(&d->usb_mutex) < 0) {
+ err("could not acquire lock");
+ return 0;
+ }
+
st->buf[0] = REQUEST_SET_RC;
st->buf[1] = 0;
st->buf[2] = 0;
@@ -567,23 +624,29 @@ int dib0700_change_protocol(struct rc_dev *rc, u64 rc_type)
else if (rc_type == RC_TYPE_NEC)
new_proto = 0;
else if (rc_type == RC_TYPE_RC6) {
- if (st->fw_version < 0x10200)
- return -EINVAL;
+ if (st->fw_version < 0x10200) {
+ ret = -EINVAL;
+ goto out;
+ }
new_proto = 2;
- } else
- return -EINVAL;
+ } else {
+ ret = -EINVAL;
+ goto out;
+ }
st->buf[1] = new_proto;
ret = dib0700_ctrl_wr(d, st->buf, 3);
if (ret < 0) {
err("ir protocol setup failed");
- return ret;
+ goto out;
}
d->props.rc.core.protocol = rc_type;
+out:
+ mutex_unlock(&d->usb_mutex);
return ret;
}
diff --git a/drivers/media/dvb/dvb-usb/dib0700_devices.c b/drivers/media/dvb/dvb-usb/dib0700_devices.c
index d0ea5b64f6b..f313182eb9d 100644
--- a/drivers/media/dvb/dvb-usb/dib0700_devices.c
+++ b/drivers/media/dvb/dvb-usb/dib0700_devices.c
@@ -101,7 +101,7 @@ static int bristol_frontend_attach(struct dvb_usb_adapter *adap)
}
}
st->mt2060_if1[adap->id] = 1220;
- return (adap->fe = dvb_attach(dib3000mc_attach, &adap->dev->i2c_adap,
+ return (adap->fe_adap[0].fe = dvb_attach(dib3000mc_attach, &adap->dev->i2c_adap,
(10 + adap->id) << 1, &bristol_dib3000mc_config[adap->id])) == NULL ? -ENODEV : 0;
}
@@ -118,15 +118,16 @@ static int eeprom_read(struct i2c_adapter *adap,u8 adrs,u8 *pval)
static int bristol_tuner_attach(struct dvb_usb_adapter *adap)
{
struct i2c_adapter *prim_i2c = &adap->dev->i2c_adap;
- struct i2c_adapter *tun_i2c = dib3000mc_get_tuner_i2c_master(adap->fe, 1);
+ struct i2c_adapter *tun_i2c = dib3000mc_get_tuner_i2c_master(adap->fe_adap[0].fe, 1);
s8 a;
int if1=1220;
if (adap->dev->udev->descriptor.idVendor == cpu_to_le16(USB_VID_HAUPPAUGE) &&
adap->dev->udev->descriptor.idProduct == cpu_to_le16(USB_PID_HAUPPAUGE_NOVA_T_500_2)) {
if (!eeprom_read(prim_i2c,0x59 + adap->id,&a)) if1=1220+a;
}
- return dvb_attach(mt2060_attach,adap->fe, tun_i2c,&bristol_mt2060_config[adap->id],
- if1) == NULL ? -ENODEV : 0;
+ return dvb_attach(mt2060_attach, adap->fe_adap[0].fe, tun_i2c,
+ &bristol_mt2060_config[adap->id], if1) == NULL ?
+ -ENODEV : 0;
}
/* STK7700D: Pinnacle/Terratec/Hauppauge Dual DVB-T Diversity */
@@ -279,10 +280,12 @@ static int stk7700P2_frontend_attach(struct dvb_usb_adapter *adap)
}
}
- adap->fe = dvb_attach(dib7000p_attach, &adap->dev->i2c_adap,0x80+(adap->id << 1),
- &stk7700d_dib7000p_mt2266_config[adap->id]);
+ adap->fe_adap[0].fe =
+ dvb_attach(dib7000p_attach, &adap->dev->i2c_adap,
+ 0x80 + (adap->id << 1),
+ &stk7700d_dib7000p_mt2266_config[adap->id]);
- return adap->fe == NULL ? -ENODEV : 0;
+ return adap->fe_adap[0].fe == NULL ? -ENODEV : 0;
}
static int stk7700d_frontend_attach(struct dvb_usb_adapter *adap)
@@ -306,17 +309,19 @@ static int stk7700d_frontend_attach(struct dvb_usb_adapter *adap)
}
}
- adap->fe = dvb_attach(dib7000p_attach, &adap->dev->i2c_adap,0x80+(adap->id << 1),
- &stk7700d_dib7000p_mt2266_config[adap->id]);
+ adap->fe_adap[0].fe =
+ dvb_attach(dib7000p_attach, &adap->dev->i2c_adap,
+ 0x80 + (adap->id << 1),
+ &stk7700d_dib7000p_mt2266_config[adap->id]);
- return adap->fe == NULL ? -ENODEV : 0;
+ return adap->fe_adap[0].fe == NULL ? -ENODEV : 0;
}
static int stk7700d_tuner_attach(struct dvb_usb_adapter *adap)
{
struct i2c_adapter *tun_i2c;
- tun_i2c = dib7000p_get_i2c_master(adap->fe, DIBX000_I2C_INTERFACE_TUNER, 1);
- return dvb_attach(mt2266_attach, adap->fe, tun_i2c,
+ tun_i2c = dib7000p_get_i2c_master(adap->fe_adap[0].fe, DIBX000_I2C_INTERFACE_TUNER, 1);
+ return dvb_attach(mt2266_attach, adap->fe_adap[0].fe, tun_i2c,
&stk7700d_mt2266_config[adap->id]) == NULL ? -ENODEV : 0;
}
@@ -396,8 +401,8 @@ static int stk7700ph_xc3028_callback(void *ptr, int component,
switch (command) {
case XC2028_TUNER_RESET:
/* Send the tuner in then out of reset */
- dib7000p_set_gpio(adap->fe, 8, 0, 0); msleep(10);
- dib7000p_set_gpio(adap->fe, 8, 0, 1);
+ dib7000p_set_gpio(adap->fe_adap[0].fe, 8, 0, 0); msleep(10);
+ dib7000p_set_gpio(adap->fe_adap[0].fe, 8, 0, 1);
break;
case XC2028_RESET_CLK:
break;
@@ -447,25 +452,25 @@ static int stk7700ph_frontend_attach(struct dvb_usb_adapter *adap)
return -ENODEV;
}
- adap->fe = dvb_attach(dib7000p_attach, &adap->dev->i2c_adap, 0x80,
+ adap->fe_adap[0].fe = dvb_attach(dib7000p_attach, &adap->dev->i2c_adap, 0x80,
&stk7700ph_dib7700_xc3028_config);
- return adap->fe == NULL ? -ENODEV : 0;
+ return adap->fe_adap[0].fe == NULL ? -ENODEV : 0;
}
static int stk7700ph_tuner_attach(struct dvb_usb_adapter *adap)
{
struct i2c_adapter *tun_i2c;
- tun_i2c = dib7000p_get_i2c_master(adap->fe,
+ tun_i2c = dib7000p_get_i2c_master(adap->fe_adap[0].fe,
DIBX000_I2C_INTERFACE_TUNER, 1);
stk7700ph_xc3028_config.i2c_adap = tun_i2c;
/* FIXME: generalize & move to common area */
- adap->fe->callback = stk7700ph_xc3028_callback;
+ adap->fe_adap[0].fe->callback = stk7700ph_xc3028_callback;
- return dvb_attach(xc2028_attach, adap->fe, &stk7700ph_xc3028_config)
+ return dvb_attach(xc2028_attach, adap->fe_adap[0].fe, &stk7700ph_xc3028_config)
== NULL ? -ENODEV : 0;
}
@@ -685,12 +690,12 @@ static int stk7700p_frontend_attach(struct dvb_usb_adapter *adap)
st->mt2060_if1[0] = 1220;
if (dib7000pc_detection(&adap->dev->i2c_adap)) {
- adap->fe = dvb_attach(dib7000p_attach, &adap->dev->i2c_adap, 18, &stk7700p_dib7000p_config);
+ adap->fe_adap[0].fe = dvb_attach(dib7000p_attach, &adap->dev->i2c_adap, 18, &stk7700p_dib7000p_config);
st->is_dib7000pc = 1;
} else
- adap->fe = dvb_attach(dib7000m_attach, &adap->dev->i2c_adap, 18, &stk7700p_dib7000m_config);
+ adap->fe_adap[0].fe = dvb_attach(dib7000m_attach, &adap->dev->i2c_adap, 18, &stk7700p_dib7000m_config);
- return adap->fe == NULL ? -ENODEV : 0;
+ return adap->fe_adap[0].fe == NULL ? -ENODEV : 0;
}
static struct mt2060_config stk7700p_mt2060_config = {
@@ -709,11 +714,11 @@ static int stk7700p_tuner_attach(struct dvb_usb_adapter *adap)
if (!eeprom_read(prim_i2c,0x58,&a)) if1=1220+a;
}
if (st->is_dib7000pc)
- tun_i2c = dib7000p_get_i2c_master(adap->fe, DIBX000_I2C_INTERFACE_TUNER, 1);
+ tun_i2c = dib7000p_get_i2c_master(adap->fe_adap[0].fe, DIBX000_I2C_INTERFACE_TUNER, 1);
else
- tun_i2c = dib7000m_get_i2c_master(adap->fe, DIBX000_I2C_INTERFACE_TUNER, 1);
+ tun_i2c = dib7000m_get_i2c_master(adap->fe_adap[0].fe, DIBX000_I2C_INTERFACE_TUNER, 1);
- return dvb_attach(mt2060_attach, adap->fe, tun_i2c, &stk7700p_mt2060_config,
+ return dvb_attach(mt2060_attach, adap->fe_adap[0].fe, tun_i2c, &stk7700p_mt2060_config,
if1) == NULL ? -ENODEV : 0;
}
@@ -843,33 +848,33 @@ static int dib7770_set_param_override(struct dvb_frontend *fe,
static int dib7770p_tuner_attach(struct dvb_usb_adapter *adap)
{
struct dib0700_adapter_state *st = adap->priv;
- struct i2c_adapter *tun_i2c = dib7000p_get_i2c_master(adap->fe,
+ struct i2c_adapter *tun_i2c = dib7000p_get_i2c_master(adap->fe_adap[0].fe,
DIBX000_I2C_INTERFACE_TUNER, 1);
- if (dvb_attach(dib0070_attach, adap->fe, tun_i2c,
- &dib7770p_dib0070_config) == NULL)
+ if (dvb_attach(dib0070_attach, adap->fe_adap[0].fe, tun_i2c,
+ &dib7770p_dib0070_config) == NULL)
return -ENODEV;
- st->set_param_save = adap->fe->ops.tuner_ops.set_params;
- adap->fe->ops.tuner_ops.set_params = dib7770_set_param_override;
+ st->set_param_save = adap->fe_adap[0].fe->ops.tuner_ops.set_params;
+ adap->fe_adap[0].fe->ops.tuner_ops.set_params = dib7770_set_param_override;
return 0;
}
static int dib7070p_tuner_attach(struct dvb_usb_adapter *adap)
{
struct dib0700_adapter_state *st = adap->priv;
- struct i2c_adapter *tun_i2c = dib7000p_get_i2c_master(adap->fe, DIBX000_I2C_INTERFACE_TUNER, 1);
+ struct i2c_adapter *tun_i2c = dib7000p_get_i2c_master(adap->fe_adap[0].fe, DIBX000_I2C_INTERFACE_TUNER, 1);
if (adap->id == 0) {
- if (dvb_attach(dib0070_attach, adap->fe, tun_i2c, &dib7070p_dib0070_config[0]) == NULL)
+ if (dvb_attach(dib0070_attach, adap->fe_adap[0].fe, tun_i2c, &dib7070p_dib0070_config[0]) == NULL)
return -ENODEV;
} else {
- if (dvb_attach(dib0070_attach, adap->fe, tun_i2c, &dib7070p_dib0070_config[1]) == NULL)
+ if (dvb_attach(dib0070_attach, adap->fe_adap[0].fe, tun_i2c, &dib7070p_dib0070_config[1]) == NULL)
return -ENODEV;
}
- st->set_param_save = adap->fe->ops.tuner_ops.set_params;
- adap->fe->ops.tuner_ops.set_params = dib7070_set_param_override;
+ st->set_param_save = adap->fe_adap[0].fe->ops.tuner_ops.set_params;
+ adap->fe_adap[0].fe->ops.tuner_ops.set_params = dib7070_set_param_override;
return 0;
}
@@ -878,26 +883,26 @@ static int stk7700p_pid_filter(struct dvb_usb_adapter *adapter, int index,
{
struct dib0700_state *st = adapter->dev->priv;
if (st->is_dib7000pc)
- return dib7000p_pid_filter(adapter->fe, index, pid, onoff);
- return dib7000m_pid_filter(adapter->fe, index, pid, onoff);
+ return dib7000p_pid_filter(adapter->fe_adap[0].fe, index, pid, onoff);
+ return dib7000m_pid_filter(adapter->fe_adap[0].fe, index, pid, onoff);
}
static int stk7700p_pid_filter_ctrl(struct dvb_usb_adapter *adapter, int onoff)
{
struct dib0700_state *st = adapter->dev->priv;
if (st->is_dib7000pc)
- return dib7000p_pid_filter_ctrl(adapter->fe, onoff);
- return dib7000m_pid_filter_ctrl(adapter->fe, onoff);
+ return dib7000p_pid_filter_ctrl(adapter->fe_adap[0].fe, onoff);
+ return dib7000m_pid_filter_ctrl(adapter->fe_adap[0].fe, onoff);
}
static int stk70x0p_pid_filter(struct dvb_usb_adapter *adapter, int index, u16 pid, int onoff)
{
- return dib7000p_pid_filter(adapter->fe, index, pid, onoff);
+ return dib7000p_pid_filter(adapter->fe_adap[0].fe, index, pid, onoff);
}
static int stk70x0p_pid_filter_ctrl(struct dvb_usb_adapter *adapter, int onoff)
{
- return dib7000p_pid_filter_ctrl(adapter->fe, onoff);
+ return dib7000p_pid_filter_ctrl(adapter->fe_adap[0].fe, onoff);
}
static struct dibx000_bandwidth_config dib7070_bw_config_12_mhz = {
@@ -955,9 +960,9 @@ static int stk7070p_frontend_attach(struct dvb_usb_adapter *adap)
return -ENODEV;
}
- adap->fe = dvb_attach(dib7000p_attach, &adap->dev->i2c_adap, 0x80,
+ adap->fe_adap[0].fe = dvb_attach(dib7000p_attach, &adap->dev->i2c_adap, 0x80,
&dib7070p_dib7000p_config);
- return adap->fe == NULL ? -ENODEV : 0;
+ return adap->fe_adap[0].fe == NULL ? -ENODEV : 0;
}
/* STK7770P */
@@ -1007,9 +1012,9 @@ static int stk7770p_frontend_attach(struct dvb_usb_adapter *adap)
return -ENODEV;
}
- adap->fe = dvb_attach(dib7000p_attach, &adap->dev->i2c_adap, 0x80,
+ adap->fe_adap[0].fe = dvb_attach(dib7000p_attach, &adap->dev->i2c_adap, 0x80,
&dib7770p_dib7000p_config);
- return adap->fe == NULL ? -ENODEV : 0;
+ return adap->fe_adap[0].fe == NULL ? -ENODEV : 0;
}
/* DIB807x generic */
@@ -1225,34 +1230,34 @@ static int dib807x_set_param_override(struct dvb_frontend *fe,
static int dib807x_tuner_attach(struct dvb_usb_adapter *adap)
{
struct dib0700_adapter_state *st = adap->priv;
- struct i2c_adapter *tun_i2c = dib8000_get_i2c_master(adap->fe,
+ struct i2c_adapter *tun_i2c = dib8000_get_i2c_master(adap->fe_adap[0].fe,
DIBX000_I2C_INTERFACE_TUNER, 1);
if (adap->id == 0) {
- if (dvb_attach(dib0070_attach, adap->fe, tun_i2c,
+ if (dvb_attach(dib0070_attach, adap->fe_adap[0].fe, tun_i2c,
&dib807x_dib0070_config[0]) == NULL)
return -ENODEV;
} else {
- if (dvb_attach(dib0070_attach, adap->fe, tun_i2c,
+ if (dvb_attach(dib0070_attach, adap->fe_adap[0].fe, tun_i2c,
&dib807x_dib0070_config[1]) == NULL)
return -ENODEV;
}
- st->set_param_save = adap->fe->ops.tuner_ops.set_params;
- adap->fe->ops.tuner_ops.set_params = dib807x_set_param_override;
+ st->set_param_save = adap->fe_adap[0].fe->ops.tuner_ops.set_params;
+ adap->fe_adap[0].fe->ops.tuner_ops.set_params = dib807x_set_param_override;
return 0;
}
static int stk80xx_pid_filter(struct dvb_usb_adapter *adapter, int index,
u16 pid, int onoff)
{
- return dib8000_pid_filter(adapter->fe, index, pid, onoff);
+ return dib8000_pid_filter(adapter->fe_adap[0].fe, index, pid, onoff);
}
static int stk80xx_pid_filter_ctrl(struct dvb_usb_adapter *adapter,
int onoff)
{
- return dib8000_pid_filter_ctrl(adapter->fe, onoff);
+ return dib8000_pid_filter_ctrl(adapter->fe_adap[0].fe, onoff);
}
/* STK807x */
@@ -1276,10 +1281,10 @@ static int stk807x_frontend_attach(struct dvb_usb_adapter *adap)
dib8000_i2c_enumeration(&adap->dev->i2c_adap, 1, 18,
0x80);
- adap->fe = dvb_attach(dib8000_attach, &adap->dev->i2c_adap, 0x80,
+ adap->fe_adap[0].fe = dvb_attach(dib8000_attach, &adap->dev->i2c_adap, 0x80,
&dib807x_dib8000_config[0]);
- return adap->fe == NULL ? -ENODEV : 0;
+ return adap->fe_adap[0].fe == NULL ? -ENODEV : 0;
}
/* STK807xPVR */
@@ -1305,10 +1310,10 @@ static int stk807xpvr_frontend_attach0(struct dvb_usb_adapter *adap)
/* initialize IC 0 */
dib8000_i2c_enumeration(&adap->dev->i2c_adap, 1, 0x22, 0x80);
- adap->fe = dvb_attach(dib8000_attach, &adap->dev->i2c_adap, 0x80,
+ adap->fe_adap[0].fe = dvb_attach(dib8000_attach, &adap->dev->i2c_adap, 0x80,
&dib807x_dib8000_config[0]);
- return adap->fe == NULL ? -ENODEV : 0;
+ return adap->fe_adap[0].fe == NULL ? -ENODEV : 0;
}
static int stk807xpvr_frontend_attach1(struct dvb_usb_adapter *adap)
@@ -1316,10 +1321,10 @@ static int stk807xpvr_frontend_attach1(struct dvb_usb_adapter *adap)
/* initialize IC 1 */
dib8000_i2c_enumeration(&adap->dev->i2c_adap, 1, 0x12, 0x82);
- adap->fe = dvb_attach(dib8000_attach, &adap->dev->i2c_adap, 0x82,
+ adap->fe_adap[0].fe = dvb_attach(dib8000_attach, &adap->dev->i2c_adap, 0x82,
&dib807x_dib8000_config[1]);
- return adap->fe == NULL ? -ENODEV : 0;
+ return adap->fe_adap[0].fe == NULL ? -ENODEV : 0;
}
/* STK8096GP */
@@ -1546,13 +1551,13 @@ static int dib8096_set_param_override(struct dvb_frontend *fe,
static int dib809x_tuner_attach(struct dvb_usb_adapter *adap)
{
struct dib0700_adapter_state *st = adap->priv;
- struct i2c_adapter *tun_i2c = dib8000_get_i2c_master(adap->fe, DIBX000_I2C_INTERFACE_TUNER, 1);
+ struct i2c_adapter *tun_i2c = dib8000_get_i2c_master(adap->fe_adap[0].fe, DIBX000_I2C_INTERFACE_TUNER, 1);
- if (dvb_attach(dib0090_register, adap->fe, tun_i2c, &dib809x_dib0090_config) == NULL)
+ if (dvb_attach(dib0090_register, adap->fe_adap[0].fe, tun_i2c, &dib809x_dib0090_config) == NULL)
return -ENODEV;
- st->set_param_save = adap->fe->ops.tuner_ops.set_params;
- adap->fe->ops.tuner_ops.set_params = dib8096_set_param_override;
+ st->set_param_save = adap->fe_adap[0].fe->ops.tuner_ops.set_params;
+ adap->fe_adap[0].fe->ops.tuner_ops.set_params = dib8096_set_param_override;
return 0;
}
@@ -1575,30 +1580,30 @@ static int stk809x_frontend_attach(struct dvb_usb_adapter *adap)
dib8000_i2c_enumeration(&adap->dev->i2c_adap, 1, 18, 0x80);
- adap->fe = dvb_attach(dib8000_attach, &adap->dev->i2c_adap, 0x80, &dib809x_dib8000_config[0]);
+ adap->fe_adap[0].fe = dvb_attach(dib8000_attach, &adap->dev->i2c_adap, 0x80, &dib809x_dib8000_config[0]);
- return adap->fe == NULL ? -ENODEV : 0;
+ return adap->fe_adap[0].fe == NULL ? -ENODEV : 0;
}
static int nim8096md_tuner_attach(struct dvb_usb_adapter *adap)
{
struct dib0700_adapter_state *st = adap->priv;
struct i2c_adapter *tun_i2c;
- struct dvb_frontend *fe_slave = dib8000_get_slave_frontend(adap->fe, 1);
+ struct dvb_frontend *fe_slave = dib8000_get_slave_frontend(adap->fe_adap[0].fe, 1);
if (fe_slave) {
tun_i2c = dib8000_get_i2c_master(fe_slave, DIBX000_I2C_INTERFACE_TUNER, 1);
if (dvb_attach(dib0090_register, fe_slave, tun_i2c, &dib809x_dib0090_config) == NULL)
return -ENODEV;
- fe_slave->dvb = adap->fe->dvb;
+ fe_slave->dvb = adap->fe_adap[0].fe->dvb;
fe_slave->ops.tuner_ops.set_params = dib8096_set_param_override;
}
- tun_i2c = dib8000_get_i2c_master(adap->fe, DIBX000_I2C_INTERFACE_TUNER, 1);
- if (dvb_attach(dib0090_register, adap->fe, tun_i2c, &dib809x_dib0090_config) == NULL)
+ tun_i2c = dib8000_get_i2c_master(adap->fe_adap[0].fe, DIBX000_I2C_INTERFACE_TUNER, 1);
+ if (dvb_attach(dib0090_register, adap->fe_adap[0].fe, tun_i2c, &dib809x_dib0090_config) == NULL)
return -ENODEV;
- st->set_param_save = adap->fe->ops.tuner_ops.set_params;
- adap->fe->ops.tuner_ops.set_params = dib8096_set_param_override;
+ st->set_param_save = adap->fe_adap[0].fe->ops.tuner_ops.set_params;
+ adap->fe_adap[0].fe->ops.tuner_ops.set_params = dib8096_set_param_override;
return 0;
}
@@ -1626,12 +1631,12 @@ static int nim8096md_frontend_attach(struct dvb_usb_adapter *adap)
dib8000_i2c_enumeration(&adap->dev->i2c_adap, 2, 18, 0x80);
- adap->fe = dvb_attach(dib8000_attach, &adap->dev->i2c_adap, 0x80, &dib809x_dib8000_config[0]);
- if (adap->fe == NULL)
+ adap->fe_adap[0].fe = dvb_attach(dib8000_attach, &adap->dev->i2c_adap, 0x80, &dib809x_dib8000_config[0]);
+ if (adap->fe_adap[0].fe == NULL)
return -ENODEV;
fe_slave = dvb_attach(dib8000_attach, &adap->dev->i2c_adap, 0x82, &dib809x_dib8000_config[1]);
- dib8000_set_slave_frontend(adap->fe, fe_slave);
+ dib8000_set_slave_frontend(adap->fe_adap[0].fe, fe_slave);
return fe_slave == NULL ? -ENODEV : 0;
}
@@ -1639,12 +1644,12 @@ static int nim8096md_frontend_attach(struct dvb_usb_adapter *adap)
/* STK9090M */
static int dib90x0_pid_filter(struct dvb_usb_adapter *adapter, int index, u16 pid, int onoff)
{
- return dib9000_fw_pid_filter(adapter->fe, index, pid, onoff);
+ return dib9000_fw_pid_filter(adapter->fe_adap[0].fe, index, pid, onoff);
}
static int dib90x0_pid_filter_ctrl(struct dvb_usb_adapter *adapter, int onoff)
{
- return dib9000_fw_pid_filter_ctrl(adapter->fe, onoff);
+ return dib9000_fw_pid_filter_ctrl(adapter->fe_adap[0].fe, onoff);
}
static int dib90x0_tuner_reset(struct dvb_frontend *fe, int onoff)
@@ -1856,15 +1861,15 @@ static int stk9090m_frontend_attach(struct dvb_usb_adapter *adap)
stk9090m_config.microcode_B_fe_size = state->frontend_firmware->size;
stk9090m_config.microcode_B_fe_buffer = state->frontend_firmware->data;
- adap->fe = dvb_attach(dib9000_attach, &adap->dev->i2c_adap, 0x80, &stk9090m_config);
+ adap->fe_adap[0].fe = dvb_attach(dib9000_attach, &adap->dev->i2c_adap, 0x80, &stk9090m_config);
- return adap->fe == NULL ? -ENODEV : 0;
+ return adap->fe_adap[0].fe == NULL ? -ENODEV : 0;
}
static int dib9090_tuner_attach(struct dvb_usb_adapter *adap)
{
struct dib0700_adapter_state *state = adap->priv;
- struct i2c_adapter *i2c = dib9000_get_tuner_interface(adap->fe);
+ struct i2c_adapter *i2c = dib9000_get_tuner_interface(adap->fe_adap[0].fe);
u16 data_dib190[10] = {
1, 0x1374,
2, 0x01a2,
@@ -1873,13 +1878,13 @@ static int dib9090_tuner_attach(struct dvb_usb_adapter *adap)
8, 0x0486,
};
- if (dvb_attach(dib0090_fw_register, adap->fe, i2c, &dib9090_dib0090_config) == NULL)
+ if (dvb_attach(dib0090_fw_register, adap->fe_adap[0].fe, i2c, &dib9090_dib0090_config) == NULL)
return -ENODEV;
- i2c = dib9000_get_i2c_master(adap->fe, DIBX000_I2C_INTERFACE_GPIO_1_2, 0);
+ i2c = dib9000_get_i2c_master(adap->fe_adap[0].fe, DIBX000_I2C_INTERFACE_GPIO_1_2, 0);
if (dib01x0_pmu_update(i2c, data_dib190, 10) != 0)
return -ENODEV;
dib0700_set_i2c_speed(adap->dev, 2000);
- if (dib9000_firmware_post_pll_init(adap->fe) < 0)
+ if (dib9000_firmware_post_pll_init(adap->fe_adap[0].fe) < 0)
return -ENODEV;
release_firmware(state->frontend_firmware);
return 0;
@@ -1925,16 +1930,16 @@ static int nim9090md_frontend_attach(struct dvb_usb_adapter *adap)
nim9090md_config[1].microcode_B_fe_buffer = state->frontend_firmware->data;
dib9000_i2c_enumeration(&adap->dev->i2c_adap, 1, 0x20, 0x80);
- adap->fe = dvb_attach(dib9000_attach, &adap->dev->i2c_adap, 0x80, &nim9090md_config[0]);
+ adap->fe_adap[0].fe = dvb_attach(dib9000_attach, &adap->dev->i2c_adap, 0x80, &nim9090md_config[0]);
- if (adap->fe == NULL)
+ if (adap->fe_adap[0].fe == NULL)
return -ENODEV;
- i2c = dib9000_get_i2c_master(adap->fe, DIBX000_I2C_INTERFACE_GPIO_3_4, 0);
+ i2c = dib9000_get_i2c_master(adap->fe_adap[0].fe, DIBX000_I2C_INTERFACE_GPIO_3_4, 0);
dib9000_i2c_enumeration(i2c, 1, 0x12, 0x82);
fe_slave = dvb_attach(dib9000_attach, i2c, 0x82, &nim9090md_config[1]);
- dib9000_set_slave_frontend(adap->fe, fe_slave);
+ dib9000_set_slave_frontend(adap->fe_adap[0].fe, fe_slave);
return fe_slave == NULL ? -ENODEV : 0;
}
@@ -1951,26 +1956,26 @@ static int nim9090md_tuner_attach(struct dvb_usb_adapter *adap)
0, 0x00ef,
8, 0x0406,
};
- i2c = dib9000_get_tuner_interface(adap->fe);
- if (dvb_attach(dib0090_fw_register, adap->fe, i2c, &nim9090md_dib0090_config[0]) == NULL)
+ i2c = dib9000_get_tuner_interface(adap->fe_adap[0].fe);
+ if (dvb_attach(dib0090_fw_register, adap->fe_adap[0].fe, i2c, &nim9090md_dib0090_config[0]) == NULL)
return -ENODEV;
- i2c = dib9000_get_i2c_master(adap->fe, DIBX000_I2C_INTERFACE_GPIO_1_2, 0);
+ i2c = dib9000_get_i2c_master(adap->fe_adap[0].fe, DIBX000_I2C_INTERFACE_GPIO_1_2, 0);
if (dib01x0_pmu_update(i2c, data_dib190, 10) < 0)
return -ENODEV;
dib0700_set_i2c_speed(adap->dev, 2000);
- if (dib9000_firmware_post_pll_init(adap->fe) < 0)
+ if (dib9000_firmware_post_pll_init(adap->fe_adap[0].fe) < 0)
return -ENODEV;
- fe_slave = dib9000_get_slave_frontend(adap->fe, 1);
+ fe_slave = dib9000_get_slave_frontend(adap->fe_adap[0].fe, 1);
if (fe_slave != NULL) {
- i2c = dib9000_get_component_bus_interface(adap->fe);
+ i2c = dib9000_get_component_bus_interface(adap->fe_adap[0].fe);
dib9000_set_i2c_adapter(fe_slave, i2c);
i2c = dib9000_get_tuner_interface(fe_slave);
if (dvb_attach(dib0090_fw_register, fe_slave, i2c, &nim9090md_dib0090_config[1]) == NULL)
return -ENODEV;
- fe_slave->dvb = adap->fe->dvb;
- dib9000_fw_set_component_bus_speed(adap->fe, 2000);
+ fe_slave->dvb = adap->fe_adap[0].fe->dvb;
+ dib9000_fw_set_component_bus_speed(adap->fe_adap[0].fe, 2000);
if (dib9000_firmware_post_pll_init(fe_slave) < 0)
return -ENODEV;
}
@@ -2393,23 +2398,23 @@ static int nim7090_frontend_attach(struct dvb_usb_adapter *adap)
err("%s: dib7000p_i2c_enumeration failed. Cannot continue\n", __func__);
return -ENODEV;
}
- adap->fe = dvb_attach(dib7000p_attach, &adap->dev->i2c_adap, 0x80, &nim7090_dib7000p_config);
+ adap->fe_adap[0].fe = dvb_attach(dib7000p_attach, &adap->dev->i2c_adap, 0x80, &nim7090_dib7000p_config);
- return adap->fe == NULL ? -ENODEV : 0;
+ return adap->fe_adap[0].fe == NULL ? -ENODEV : 0;
}
static int nim7090_tuner_attach(struct dvb_usb_adapter *adap)
{
struct dib0700_adapter_state *st = adap->priv;
- struct i2c_adapter *tun_i2c = dib7090_get_i2c_tuner(adap->fe);
+ struct i2c_adapter *tun_i2c = dib7090_get_i2c_tuner(adap->fe_adap[0].fe);
- if (dvb_attach(dib0090_register, adap->fe, tun_i2c, &nim7090_dib0090_config) == NULL)
+ if (dvb_attach(dib0090_register, adap->fe_adap[0].fe, tun_i2c, &nim7090_dib0090_config) == NULL)
return -ENODEV;
- dib7000p_set_gpio(adap->fe, 8, 0, 1);
+ dib7000p_set_gpio(adap->fe_adap[0].fe, 8, 0, 1);
- st->set_param_save = adap->fe->ops.tuner_ops.set_params;
- adap->fe->ops.tuner_ops.set_params = dib7090_agc_startup;
+ st->set_param_save = adap->fe_adap[0].fe->ops.tuner_ops.set_params;
+ adap->fe_adap[0].fe->ops.tuner_ops.set_params = dib7090_agc_startup;
return 0;
}
@@ -2439,11 +2444,11 @@ static int tfe7090pvr_frontend0_attach(struct dvb_usb_adapter *adap)
}
dib0700_set_i2c_speed(adap->dev, 340);
- adap->fe = dvb_attach(dib7000p_attach, &adap->dev->i2c_adap, 0x90, &tfe7090pvr_dib7000p_config[0]);
- if (adap->fe == NULL)
+ adap->fe_adap[0].fe = dvb_attach(dib7000p_attach, &adap->dev->i2c_adap, 0x90, &tfe7090pvr_dib7000p_config[0]);
+ if (adap->fe_adap[0].fe == NULL)
return -ENODEV;
- dib7090_slave_reset(adap->fe);
+ dib7090_slave_reset(adap->fe_adap[0].fe);
return 0;
}
@@ -2452,50 +2457,50 @@ static int tfe7090pvr_frontend1_attach(struct dvb_usb_adapter *adap)
{
struct i2c_adapter *i2c;
- if (adap->dev->adapter[0].fe == NULL) {
+ if (adap->dev->adapter[0].fe_adap[0].fe == NULL) {
err("the master dib7090 has to be initialized first");
return -ENODEV; /* the master device has not been initialized */
}
- i2c = dib7000p_get_i2c_master(adap->dev->adapter[0].fe, DIBX000_I2C_INTERFACE_GPIO_6_7, 1);
+ i2c = dib7000p_get_i2c_master(adap->dev->adapter[0].fe_adap[0].fe, DIBX000_I2C_INTERFACE_GPIO_6_7, 1);
if (dib7000p_i2c_enumeration(i2c, 1, 0x10, &tfe7090pvr_dib7000p_config[1]) != 0) {
err("%s: dib7000p_i2c_enumeration failed. Cannot continue\n", __func__);
return -ENODEV;
}
- adap->fe = dvb_attach(dib7000p_attach, i2c, 0x92, &tfe7090pvr_dib7000p_config[1]);
+ adap->fe_adap[0].fe = dvb_attach(dib7000p_attach, i2c, 0x92, &tfe7090pvr_dib7000p_config[1]);
dib0700_set_i2c_speed(adap->dev, 200);
- return adap->fe == NULL ? -ENODEV : 0;
+ return adap->fe_adap[0].fe == NULL ? -ENODEV : 0;
}
static int tfe7090pvr_tuner0_attach(struct dvb_usb_adapter *adap)
{
struct dib0700_adapter_state *st = adap->priv;
- struct i2c_adapter *tun_i2c = dib7090_get_i2c_tuner(adap->fe);
+ struct i2c_adapter *tun_i2c = dib7090_get_i2c_tuner(adap->fe_adap[0].fe);
- if (dvb_attach(dib0090_register, adap->fe, tun_i2c, &tfe7090pvr_dib0090_config[0]) == NULL)
+ if (dvb_attach(dib0090_register, adap->fe_adap[0].fe, tun_i2c, &tfe7090pvr_dib0090_config[0]) == NULL)
return -ENODEV;
- dib7000p_set_gpio(adap->fe, 8, 0, 1);
+ dib7000p_set_gpio(adap->fe_adap[0].fe, 8, 0, 1);
- st->set_param_save = adap->fe->ops.tuner_ops.set_params;
- adap->fe->ops.tuner_ops.set_params = dib7090_agc_startup;
+ st->set_param_save = adap->fe_adap[0].fe->ops.tuner_ops.set_params;
+ adap->fe_adap[0].fe->ops.tuner_ops.set_params = dib7090_agc_startup;
return 0;
}
static int tfe7090pvr_tuner1_attach(struct dvb_usb_adapter *adap)
{
struct dib0700_adapter_state *st = adap->priv;
- struct i2c_adapter *tun_i2c = dib7090_get_i2c_tuner(adap->fe);
+ struct i2c_adapter *tun_i2c = dib7090_get_i2c_tuner(adap->fe_adap[0].fe);
- if (dvb_attach(dib0090_register, adap->fe, tun_i2c, &tfe7090pvr_dib0090_config[1]) == NULL)
+ if (dvb_attach(dib0090_register, adap->fe_adap[0].fe, tun_i2c, &tfe7090pvr_dib0090_config[1]) == NULL)
return -ENODEV;
- dib7000p_set_gpio(adap->fe, 8, 0, 1);
+ dib7000p_set_gpio(adap->fe_adap[0].fe, 8, 0, 1);
- st->set_param_save = adap->fe->ops.tuner_ops.set_params;
- adap->fe->ops.tuner_ops.set_params = dib7090_agc_startup;
+ st->set_param_save = adap->fe_adap[0].fe->ops.tuner_ops.set_params;
+ adap->fe_adap[0].fe->ops.tuner_ops.set_params = dib7090_agc_startup;
return 0;
}
@@ -2555,14 +2560,14 @@ static int stk7070pd_frontend_attach0(struct dvb_usb_adapter *adap)
return -ENODEV;
}
- adap->fe = dvb_attach(dib7000p_attach, &adap->dev->i2c_adap, 0x80, &stk7070pd_dib7000p_config[0]);
- return adap->fe == NULL ? -ENODEV : 0;
+ adap->fe_adap[0].fe = dvb_attach(dib7000p_attach, &adap->dev->i2c_adap, 0x80, &stk7070pd_dib7000p_config[0]);
+ return adap->fe_adap[0].fe == NULL ? -ENODEV : 0;
}
static int stk7070pd_frontend_attach1(struct dvb_usb_adapter *adap)
{
- adap->fe = dvb_attach(dib7000p_attach, &adap->dev->i2c_adap, 0x82, &stk7070pd_dib7000p_config[1]);
- return adap->fe == NULL ? -ENODEV : 0;
+ adap->fe_adap[0].fe = dvb_attach(dib7000p_attach, &adap->dev->i2c_adap, 0x82, &stk7070pd_dib7000p_config[1]);
+ return adap->fe_adap[0].fe == NULL ? -ENODEV : 0;
}
/* S5H1411 */
@@ -2617,9 +2622,9 @@ static int s5h1411_frontend_attach(struct dvb_usb_adapter *adap)
dib0700_set_gpio(adap->dev, GPIO2, GPIO_OUT, 1);
/* GPIOs are initialized, do the attach */
- adap->fe = dvb_attach(s5h1411_attach, &pinnacle_801e_config,
+ adap->fe_adap[0].fe = dvb_attach(s5h1411_attach, &pinnacle_801e_config,
&adap->dev->i2c_adap);
- return adap->fe == NULL ? -ENODEV : 0;
+ return adap->fe_adap[0].fe == NULL ? -ENODEV : 0;
}
static int dib0700_xc5000_tuner_callback(void *priv, int component,
@@ -2649,9 +2654,9 @@ static struct xc5000_config s5h1411_xc5000_tunerconfig = {
static int xc5000_tuner_attach(struct dvb_usb_adapter *adap)
{
/* FIXME: generalize & move to common area */
- adap->fe->callback = dib0700_xc5000_tuner_callback;
+ adap->fe_adap[0].fe->callback = dib0700_xc5000_tuner_callback;
- return dvb_attach(xc5000_attach, adap->fe, &adap->dev->i2c_adap,
+ return dvb_attach(xc5000_attach, adap->fe_adap[0].fe, &adap->dev->i2c_adap,
&s5h1411_xc5000_tunerconfig)
== NULL ? -ENODEV : 0;
}
@@ -2663,9 +2668,9 @@ static int dib0700_xc4000_tuner_callback(void *priv, int component,
if (command == XC4000_TUNER_RESET) {
/* Reset the tuner */
- dib7000p_set_gpio(adap->fe, 8, 0, 0);
+ dib7000p_set_gpio(adap->fe_adap[0].fe, 8, 0, 0);
msleep(10);
- dib7000p_set_gpio(adap->fe, 8, 0, 1);
+ dib7000p_set_gpio(adap->fe_adap[0].fe, 8, 0, 1);
} else {
err("xc4000: unknown tuner callback command: %d\n", command);
return -EINVAL;
@@ -2771,11 +2776,11 @@ static int pctv340e_frontend_attach(struct dvb_usb_adapter *adap)
return -ENODEV;
}
- adap->fe = dvb_attach(dib7000p_attach, &adap->dev->i2c_adap, 0x12,
+ adap->fe_adap[0].fe = dvb_attach(dib7000p_attach, &adap->dev->i2c_adap, 0x12,
&pctv_340e_config);
st->is_dib7000pc = 1;
- return adap->fe == NULL ? -ENODEV : 0;
+ return adap->fe_adap[0].fe == NULL ? -ENODEV : 0;
}
static struct xc4000_config dib7000p_xc4000_tunerconfig = {
@@ -2791,7 +2796,7 @@ static int xc4000_tuner_attach(struct dvb_usb_adapter *adap)
struct i2c_adapter *tun_i2c;
/* The xc4000 is not on the main i2c bus */
- tun_i2c = dib7000p_get_i2c_master(adap->fe,
+ tun_i2c = dib7000p_get_i2c_master(adap->fe_adap[0].fe,
DIBX000_I2C_INTERFACE_TUNER, 1);
if (tun_i2c == NULL) {
printk(KERN_ERR "Could not reach tuner i2c bus\n");
@@ -2799,9 +2804,9 @@ static int xc4000_tuner_attach(struct dvb_usb_adapter *adap)
}
/* Setup the reset callback */
- adap->fe->callback = dib0700_xc4000_tuner_callback;
+ adap->fe_adap[0].fe->callback = dib0700_xc4000_tuner_callback;
- return dvb_attach(xc4000_attach, adap->fe, tun_i2c,
+ return dvb_attach(xc4000_attach, adap->fe_adap[0].fe, tun_i2c,
&dib7000p_xc4000_tunerconfig)
== NULL ? -ENODEV : 0;
}
@@ -2857,16 +2862,16 @@ static int lgdt3305_frontend_attach(struct dvb_usb_adapter *adap)
dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 1);
msleep(30);
- adap->fe = dvb_attach(lgdt3305_attach,
+ adap->fe_adap[0].fe = dvb_attach(lgdt3305_attach,
&hcw_lgdt3305_config,
&adap->dev->i2c_adap);
- return adap->fe == NULL ? -ENODEV : 0;
+ return adap->fe_adap[0].fe == NULL ? -ENODEV : 0;
}
static int mxl5007t_tuner_attach(struct dvb_usb_adapter *adap)
{
- return dvb_attach(mxl5007t_attach, adap->fe,
+ return dvb_attach(mxl5007t_attach, adap->fe_adap[0].fe,
&adap->dev->i2c_adap, 0x60,
&hcw_mxl5007t_config) == NULL ? -ENODEV : 0;
}
@@ -2989,6 +2994,8 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.num_adapters = 1,
.adapter = {
{
+ .num_frontends = 1,
+ .fe = {{
.caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
.pid_filter_count = 32,
.pid_filter = stk7700p_pid_filter,
@@ -2997,6 +3004,7 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.tuner_attach = stk7700p_tuner_attach,
DIB0700_DEFAULT_STREAMING_CONFIG(0x02),
+ }},
},
},
@@ -3050,15 +3058,21 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.num_adapters = 2,
.adapter = {
{
+ .num_frontends = 1,
+ .fe = {{
.frontend_attach = bristol_frontend_attach,
.tuner_attach = bristol_tuner_attach,
DIB0700_DEFAULT_STREAMING_CONFIG(0x02),
+ }},
}, {
+ .num_frontends = 1,
+ .fe = {{
.frontend_attach = bristol_frontend_attach,
.tuner_attach = bristol_tuner_attach,
DIB0700_DEFAULT_STREAMING_CONFIG(0x03),
+ }},
}
},
@@ -3084,6 +3098,8 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.num_adapters = 2,
.adapter = {
{
+ .num_frontends = 1,
+ .fe = {{
.caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
.pid_filter_count = 32,
.pid_filter = stk70x0p_pid_filter,
@@ -3092,7 +3108,10 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.tuner_attach = stk7700d_tuner_attach,
DIB0700_DEFAULT_STREAMING_CONFIG(0x02),
+ }},
}, {
+ .num_frontends = 1,
+ .fe = {{
.caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
.pid_filter_count = 32,
.pid_filter = stk70x0p_pid_filter,
@@ -3101,6 +3120,7 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.tuner_attach = stk7700d_tuner_attach,
DIB0700_DEFAULT_STREAMING_CONFIG(0x03),
+ }},
}
},
@@ -3143,6 +3163,8 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.num_adapters = 1,
.adapter = {
{
+ .num_frontends = 1,
+ .fe = {{
.caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
.pid_filter_count = 32,
.pid_filter = stk70x0p_pid_filter,
@@ -3151,6 +3173,7 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.tuner_attach = stk7700d_tuner_attach,
DIB0700_DEFAULT_STREAMING_CONFIG(0x02),
+ }},
},
},
@@ -3185,6 +3208,8 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.num_adapters = 1,
.adapter = {
{
+ .num_frontends = 1,
+ .fe = {{
.caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
.pid_filter_count = 32,
.pid_filter = stk70x0p_pid_filter,
@@ -3193,7 +3218,7 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.tuner_attach = dib7070p_tuner_attach,
DIB0700_DEFAULT_STREAMING_CONFIG(0x02),
-
+ }},
.size_of_priv = sizeof(struct dib0700_adapter_state),
},
},
@@ -3261,6 +3286,8 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.num_adapters = 1,
.adapter = {
{
+ .num_frontends = 1,
+ .fe = {{
.caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
.pid_filter_count = 32,
.pid_filter = stk70x0p_pid_filter,
@@ -3269,7 +3296,7 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.tuner_attach = dib7070p_tuner_attach,
DIB0700_DEFAULT_STREAMING_CONFIG(0x02),
-
+ }},
.size_of_priv = sizeof(struct dib0700_adapter_state),
},
},
@@ -3305,6 +3332,8 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.num_adapters = 2,
.adapter = {
{
+ .num_frontends = 1,
+ .fe = {{
.caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
.pid_filter_count = 32,
.pid_filter = stk70x0p_pid_filter,
@@ -3313,9 +3342,11 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.tuner_attach = dib7070p_tuner_attach,
DIB0700_DEFAULT_STREAMING_CONFIG(0x02),
-
+ }},
.size_of_priv = sizeof(struct dib0700_adapter_state),
}, {
+ .num_frontends = 1,
+ .fe = {{
.caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
.pid_filter_count = 32,
.pid_filter = stk70x0p_pid_filter,
@@ -3324,7 +3355,7 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.tuner_attach = dib7070p_tuner_attach,
DIB0700_DEFAULT_STREAMING_CONFIG(0x03),
-
+ }},
.size_of_priv = sizeof(struct dib0700_adapter_state),
}
},
@@ -3373,6 +3404,8 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.num_adapters = 2,
.adapter = {
{
+ .num_frontends = 1,
+ .fe = {{
.caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
.pid_filter_count = 32,
.pid_filter = stk70x0p_pid_filter,
@@ -3381,9 +3414,11 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.tuner_attach = dib7070p_tuner_attach,
DIB0700_DEFAULT_STREAMING_CONFIG(0x02),
-
+ }},
.size_of_priv = sizeof(struct dib0700_adapter_state),
}, {
+ .num_frontends = 1,
+ .fe = {{
.caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
.pid_filter_count = 32,
.pid_filter = stk70x0p_pid_filter,
@@ -3392,7 +3427,7 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.tuner_attach = dib7070p_tuner_attach,
DIB0700_DEFAULT_STREAMING_CONFIG(0x03),
-
+ }},
.size_of_priv = sizeof(struct dib0700_adapter_state),
}
},
@@ -3420,6 +3455,8 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.num_adapters = 1,
.adapter = {
{
+ .num_frontends = 1,
+ .fe = {{
.caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
.pid_filter_count = 32,
.pid_filter = stk70x0p_pid_filter,
@@ -3428,7 +3465,7 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.tuner_attach = stk7700ph_tuner_attach,
DIB0700_DEFAULT_STREAMING_CONFIG(0x02),
-
+ }},
.size_of_priv = sizeof(struct
dib0700_adapter_state),
},
@@ -3488,11 +3525,13 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.num_adapters = 1,
.adapter = {
{
+ .num_frontends = 1,
+ .fe = {{
.frontend_attach = s5h1411_frontend_attach,
.tuner_attach = xc5000_tuner_attach,
DIB0700_DEFAULT_STREAMING_CONFIG(0x02),
-
+ }},
.size_of_priv = sizeof(struct
dib0700_adapter_state),
},
@@ -3524,11 +3563,13 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.num_adapters = 1,
.adapter = {
{
+ .num_frontends = 1,
+ .fe = {{
.frontend_attach = lgdt3305_frontend_attach,
.tuner_attach = mxl5007t_tuner_attach,
DIB0700_DEFAULT_STREAMING_CONFIG(0x02),
-
+ }},
.size_of_priv = sizeof(struct
dib0700_adapter_state),
},
@@ -3550,6 +3591,8 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.num_adapters = 1,
.adapter = {
{
+ .num_frontends = 1,
+ .fe = {{
.caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
.pid_filter_count = 32,
.pid_filter = stk70x0p_pid_filter,
@@ -3558,7 +3601,7 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.tuner_attach = dib7770p_tuner_attach,
DIB0700_DEFAULT_STREAMING_CONFIG(0x02),
-
+ }},
.size_of_priv =
sizeof(struct dib0700_adapter_state),
},
@@ -3600,6 +3643,8 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.num_adapters = 1,
.adapter = {
{
+ .num_frontends = 1,
+ .fe = {{
.caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
.pid_filter_count = 32,
.pid_filter = stk80xx_pid_filter,
@@ -3608,7 +3653,7 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.tuner_attach = dib807x_tuner_attach,
DIB0700_DEFAULT_STREAMING_CONFIG(0x02),
-
+ }},
.size_of_priv =
sizeof(struct dib0700_adapter_state),
},
@@ -3644,6 +3689,8 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.num_adapters = 2,
.adapter = {
{
+ .num_frontends = 1,
+ .fe = {{
.caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
.pid_filter_count = 32,
.pid_filter = stk80xx_pid_filter,
@@ -3652,11 +3699,13 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.tuner_attach = dib807x_tuner_attach,
DIB0700_DEFAULT_STREAMING_CONFIG(0x02),
-
+ }},
.size_of_priv =
sizeof(struct dib0700_adapter_state),
},
{
+ .num_frontends = 1,
+ .fe = {{
.caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
.pid_filter_count = 32,
.pid_filter = stk80xx_pid_filter,
@@ -3665,7 +3714,7 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.tuner_attach = dib807x_tuner_attach,
DIB0700_DEFAULT_STREAMING_CONFIG(0x03),
-
+ }},
.size_of_priv =
sizeof(struct dib0700_adapter_state),
},
@@ -3693,6 +3742,8 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.num_adapters = 1,
.adapter = {
{
+ .num_frontends = 1,
+ .fe = {{
.caps = DVB_USB_ADAP_HAS_PID_FILTER |
DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
.pid_filter_count = 32,
@@ -3702,7 +3753,7 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.tuner_attach = dib809x_tuner_attach,
DIB0700_DEFAULT_STREAMING_CONFIG(0x02),
-
+ }},
.size_of_priv =
sizeof(struct dib0700_adapter_state),
},
@@ -3730,6 +3781,8 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.num_adapters = 1,
.adapter = {
{
+ .num_frontends = 1,
+ .fe = {{
.caps = DVB_USB_ADAP_HAS_PID_FILTER |
DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
.pid_filter_count = 32,
@@ -3739,7 +3792,7 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.tuner_attach = dib9090_tuner_attach,
DIB0700_DEFAULT_STREAMING_CONFIG(0x02),
-
+ }},
.size_of_priv =
sizeof(struct dib0700_adapter_state),
},
@@ -3767,6 +3820,8 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.num_adapters = 1,
.adapter = {
{
+ .num_frontends = 1,
+ .fe = {{
.caps = DVB_USB_ADAP_HAS_PID_FILTER |
DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
.pid_filter_count = 32,
@@ -3776,7 +3831,7 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.tuner_attach = nim8096md_tuner_attach,
DIB0700_DEFAULT_STREAMING_CONFIG(0x02),
-
+ }},
.size_of_priv =
sizeof(struct dib0700_adapter_state),
},
@@ -3804,6 +3859,8 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.num_adapters = 1,
.adapter = {
{
+ .num_frontends = 1,
+ .fe = {{
.caps = DVB_USB_ADAP_HAS_PID_FILTER |
DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
.pid_filter_count = 32,
@@ -3813,7 +3870,7 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.tuner_attach = nim9090md_tuner_attach,
DIB0700_DEFAULT_STREAMING_CONFIG(0x02),
-
+ }},
.size_of_priv =
sizeof(struct dib0700_adapter_state),
},
@@ -3841,6 +3898,8 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.num_adapters = 1,
.adapter = {
{
+ .num_frontends = 1,
+ .fe = {{
.caps = DVB_USB_ADAP_HAS_PID_FILTER |
DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
.pid_filter_count = 32,
@@ -3850,7 +3909,7 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.tuner_attach = nim7090_tuner_attach,
DIB0700_DEFAULT_STREAMING_CONFIG(0x02),
-
+ }},
.size_of_priv =
sizeof(struct dib0700_adapter_state),
},
@@ -3878,6 +3937,8 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.num_adapters = 2,
.adapter = {
{
+ .num_frontends = 1,
+ .fe = {{
.caps = DVB_USB_ADAP_HAS_PID_FILTER |
DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
.pid_filter_count = 32,
@@ -3887,11 +3948,13 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.tuner_attach = tfe7090pvr_tuner0_attach,
DIB0700_DEFAULT_STREAMING_CONFIG(0x03),
-
+ }},
.size_of_priv =
sizeof(struct dib0700_adapter_state),
},
{
+ .num_frontends = 1,
+ .fe = {{
.caps = DVB_USB_ADAP_HAS_PID_FILTER |
DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
.pid_filter_count = 32,
@@ -3901,7 +3964,7 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.tuner_attach = tfe7090pvr_tuner1_attach,
DIB0700_DEFAULT_STREAMING_CONFIG(0x02),
-
+ }},
.size_of_priv =
sizeof(struct dib0700_adapter_state),
},
@@ -3929,11 +3992,13 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.num_adapters = 1,
.adapter = {
{
+ .num_frontends = 1,
+ .fe = {{
.frontend_attach = pctv340e_frontend_attach,
.tuner_attach = xc4000_tuner_attach,
DIB0700_DEFAULT_STREAMING_CONFIG(0x02),
-
+ }},
.size_of_priv = sizeof(struct
dib0700_adapter_state),
},
diff --git a/drivers/media/dvb/dvb-usb/dibusb-common.c b/drivers/media/dvb/dvb-usb/dibusb-common.c
index 4c2a689c820..a76bbb29ca3 100644
--- a/drivers/media/dvb/dvb-usb/dibusb-common.c
+++ b/drivers/media/dvb/dvb-usb/dibusb-common.c
@@ -23,7 +23,7 @@ int dibusb_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff)
if (adap->priv != NULL) {
struct dibusb_state *st = adap->priv;
if (st->ops.fifo_ctrl != NULL)
- if (st->ops.fifo_ctrl(adap->fe,onoff)) {
+ if (st->ops.fifo_ctrl(adap->fe_adap[0].fe, onoff)) {
err("error while controlling the fifo of the demod.");
return -ENODEV;
}
@@ -37,7 +37,8 @@ int dibusb_pid_filter(struct dvb_usb_adapter *adap, int index, u16 pid, int onof
if (adap->priv != NULL) {
struct dibusb_state *st = adap->priv;
if (st->ops.pid_ctrl != NULL)
- st->ops.pid_ctrl(adap->fe,index,pid,onoff);
+ st->ops.pid_ctrl(adap->fe_adap[0].fe,
+ index, pid, onoff);
}
return 0;
}
@@ -48,7 +49,7 @@ int dibusb_pid_filter_ctrl(struct dvb_usb_adapter *adap, int onoff)
if (adap->priv != NULL) {
struct dibusb_state *st = adap->priv;
if (st->ops.pid_parse != NULL)
- if (st->ops.pid_parse(adap->fe,onoff) < 0)
+ if (st->ops.pid_parse(adap->fe_adap[0].fe, onoff) < 0)
err("could not handle pid_parser");
}
return 0;
@@ -254,8 +255,16 @@ int dibusb_dib3000mc_frontend_attach(struct dvb_usb_adapter *adap)
msleep(1000);
}
- if ((adap->fe = dvb_attach(dib3000mc_attach, &adap->dev->i2c_adap, DEFAULT_DIB3000P_I2C_ADDRESS, &mod3000p_dib3000p_config)) != NULL ||
- (adap->fe = dvb_attach(dib3000mc_attach, &adap->dev->i2c_adap, DEFAULT_DIB3000MC_I2C_ADDRESS, &mod3000p_dib3000p_config)) != NULL) {
+ adap->fe_adap[0].fe = dvb_attach(dib3000mc_attach,
+ &adap->dev->i2c_adap,
+ DEFAULT_DIB3000P_I2C_ADDRESS,
+ &mod3000p_dib3000p_config);
+ if ((adap->fe_adap[0].fe) == NULL)
+ adap->fe_adap[0].fe = dvb_attach(dib3000mc_attach,
+ &adap->dev->i2c_adap,
+ DEFAULT_DIB3000MC_I2C_ADDRESS,
+ &mod3000p_dib3000p_config);
+ if ((adap->fe_adap[0].fe) != NULL) {
if (adap->priv != NULL) {
struct dibusb_state *st = adap->priv;
st->ops.pid_parse = dib3000mc_pid_parse;
@@ -309,15 +318,15 @@ int dibusb_dib3000mc_tuner_attach(struct dvb_usb_adapter *adap)
}
}
- tun_i2c = dib3000mc_get_tuner_i2c_master(adap->fe, 1);
- if (dvb_attach(mt2060_attach, adap->fe, tun_i2c, &stk3000p_mt2060_config, if1) == NULL) {
+ tun_i2c = dib3000mc_get_tuner_i2c_master(adap->fe_adap[0].fe, 1);
+ if (dvb_attach(mt2060_attach, adap->fe_adap[0].fe, tun_i2c, &stk3000p_mt2060_config, if1) == NULL) {
/* not found - use panasonic pll parameters */
- if (dvb_attach(dvb_pll_attach, adap->fe, 0x60, tun_i2c, DVB_PLL_ENV57H1XD5) == NULL)
+ if (dvb_attach(dvb_pll_attach, adap->fe_adap[0].fe, 0x60, tun_i2c, DVB_PLL_ENV57H1XD5) == NULL)
return -ENOMEM;
} else {
st->mt2060_present = 1;
/* set the correct parameters for the dib3000p */
- dib3000mc_set_config(adap->fe, &stk3000p_dib3000p_config);
+ dib3000mc_set_config(adap->fe_adap[0].fe, &stk3000p_dib3000p_config);
}
return 0;
}
diff --git a/drivers/media/dvb/dvb-usb/dibusb-mb.c b/drivers/media/dvb/dvb-usb/dibusb-mb.c
index 04d91bdd356..7270791f834 100644
--- a/drivers/media/dvb/dvb-usb/dibusb-mb.c
+++ b/drivers/media/dvb/dvb-usb/dibusb-mb.c
@@ -31,11 +31,12 @@ static int dibusb_dib3000mb_frontend_attach(struct dvb_usb_adapter *adap)
demod_cfg.demod_address = 0x8;
- if ((adap->fe = dvb_attach(dib3000mb_attach, &demod_cfg,
- &adap->dev->i2c_adap, &st->ops)) == NULL)
+ adap->fe_adap[0].fe = dvb_attach(dib3000mb_attach, &demod_cfg,
+ &adap->dev->i2c_adap, &st->ops);
+ if ((adap->fe_adap[0].fe) == NULL)
return -ENODEV;
- adap->fe->ops.i2c_gate_ctrl = dib3000mb_i2c_gate_ctrl;
+ adap->fe_adap[0].fe->ops.i2c_gate_ctrl = dib3000mb_i2c_gate_ctrl;
return 0;
}
@@ -46,7 +47,7 @@ static int dibusb_thomson_tuner_attach(struct dvb_usb_adapter *adap)
st->tuner_addr = 0x61;
- dvb_attach(dvb_pll_attach, adap->fe, 0x61, &adap->dev->i2c_adap,
+ dvb_attach(dvb_pll_attach, adap->fe_adap[0].fe, 0x61, &adap->dev->i2c_adap,
DVB_PLL_TUA6010XS);
return 0;
}
@@ -57,7 +58,7 @@ static int dibusb_panasonic_tuner_attach(struct dvb_usb_adapter *adap)
st->tuner_addr = 0x60;
- dvb_attach(dvb_pll_attach, adap->fe, 0x60, &adap->dev->i2c_adap,
+ dvb_attach(dvb_pll_attach, adap->fe_adap[0].fe, 0x60, &adap->dev->i2c_adap,
DVB_PLL_TDA665X);
return 0;
}
@@ -78,16 +79,16 @@ static int dibusb_tuner_probe_and_attach(struct dvb_usb_adapter *adap)
/* the Panasonic sits on I2C addrass 0x60, the Thomson on 0x61 */
msg[0].addr = msg[1].addr = st->tuner_addr = 0x60;
- if (adap->fe->ops.i2c_gate_ctrl)
- adap->fe->ops.i2c_gate_ctrl(adap->fe,1);
+ if (adap->fe_adap[0].fe->ops.i2c_gate_ctrl)
+ adap->fe_adap[0].fe->ops.i2c_gate_ctrl(adap->fe_adap[0].fe, 1);
if (i2c_transfer(&adap->dev->i2c_adap, msg, 2) != 2) {
err("tuner i2c write failed.");
ret = -EREMOTEIO;
}
- if (adap->fe->ops.i2c_gate_ctrl)
- adap->fe->ops.i2c_gate_ctrl(adap->fe,0);
+ if (adap->fe_adap[0].fe->ops.i2c_gate_ctrl)
+ adap->fe_adap[0].fe->ops.i2c_gate_ctrl(adap->fe_adap[0].fe, 0);
if (b2[0] == 0xfe) {
info("This device has the Thomson Cable onboard. Which is default.");
@@ -185,6 +186,8 @@ static struct dvb_usb_device_properties dibusb1_1_properties = {
.num_adapters = 1,
.adapter = {
{
+ .num_frontends = 1,
+ .fe = {{
.caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
.pid_filter_count = 16,
@@ -205,6 +208,7 @@ static struct dvb_usb_device_properties dibusb1_1_properties = {
}
}
},
+ }},
.size_of_priv = sizeof(struct dibusb_state),
}
},
@@ -272,6 +276,8 @@ static struct dvb_usb_device_properties dibusb1_1_an2235_properties = {
.num_adapters = 1,
.adapter = {
{
+ .num_frontends = 1,
+ .fe = {{
.caps = DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF | DVB_USB_ADAP_HAS_PID_FILTER,
.pid_filter_count = 16,
@@ -292,6 +298,7 @@ static struct dvb_usb_device_properties dibusb1_1_an2235_properties = {
}
}
},
+ }},
.size_of_priv = sizeof(struct dibusb_state),
},
},
@@ -338,6 +345,8 @@ static struct dvb_usb_device_properties dibusb2_0b_properties = {
.num_adapters = 1,
.adapter = {
{
+ .num_frontends = 1,
+ .fe = {{
.caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
.pid_filter_count = 16,
@@ -358,6 +367,7 @@ static struct dvb_usb_device_properties dibusb2_0b_properties = {
}
}
},
+ }},
.size_of_priv = sizeof(struct dibusb_state),
}
},
@@ -398,6 +408,8 @@ static struct dvb_usb_device_properties artec_t1_usb2_properties = {
.num_adapters = 1,
.adapter = {
{
+ .num_frontends = 1,
+ .fe = {{
.caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
.pid_filter_count = 16,
@@ -417,6 +429,7 @@ static struct dvb_usb_device_properties artec_t1_usb2_properties = {
}
}
},
+ }},
.size_of_priv = sizeof(struct dibusb_state),
}
},
diff --git a/drivers/media/dvb/dvb-usb/dibusb-mc.c b/drivers/media/dvb/dvb-usb/dibusb-mc.c
index c1d9094b61e..9c165e2569d 100644
--- a/drivers/media/dvb/dvb-usb/dibusb-mc.c
+++ b/drivers/media/dvb/dvb-usb/dibusb-mc.c
@@ -57,6 +57,8 @@ static struct dvb_usb_device_properties dibusb_mc_properties = {
.num_adapters = 1,
.adapter = {
{
+ .num_frontends = 1,
+ .fe = {{
.caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
.pid_filter_count = 32,
.streaming_ctrl = dibusb2_0_streaming_ctrl,
@@ -76,6 +78,7 @@ static struct dvb_usb_device_properties dibusb_mc_properties = {
}
}
},
+ }},
.size_of_priv = sizeof(struct dibusb_state),
}
},
diff --git a/drivers/media/dvb/dvb-usb/digitv.c b/drivers/media/dvb/dvb-usb/digitv.c
index f6344cdd360..f7184111aa6 100644
--- a/drivers/media/dvb/dvb-usb/digitv.c
+++ b/drivers/media/dvb/dvb-usb/digitv.c
@@ -137,11 +137,16 @@ static int digitv_frontend_attach(struct dvb_usb_adapter *adap)
{
struct digitv_state *st = adap->dev->priv;
- if ((adap->fe = dvb_attach(mt352_attach, &digitv_mt352_config, &adap->dev->i2c_adap)) != NULL) {
+ adap->fe_adap[0].fe = dvb_attach(mt352_attach, &digitv_mt352_config,
+ &adap->dev->i2c_adap);
+ if ((adap->fe_adap[0].fe) != NULL) {
st->is_nxt6000 = 0;
return 0;
}
- if ((adap->fe = dvb_attach(nxt6000_attach, &digitv_nxt6000_config, &adap->dev->i2c_adap)) != NULL) {
+ adap->fe_adap[0].fe = dvb_attach(nxt6000_attach,
+ &digitv_nxt6000_config,
+ &adap->dev->i2c_adap);
+ if ((adap->fe_adap[0].fe) != NULL) {
st->is_nxt6000 = 1;
return 0;
}
@@ -152,11 +157,11 @@ static int digitv_tuner_attach(struct dvb_usb_adapter *adap)
{
struct digitv_state *st = adap->dev->priv;
- if (!dvb_attach(dvb_pll_attach, adap->fe, 0x60, NULL, DVB_PLL_TDED4))
+ if (!dvb_attach(dvb_pll_attach, adap->fe_adap[0].fe, 0x60, NULL, DVB_PLL_TDED4))
return -ENODEV;
if (st->is_nxt6000)
- adap->fe->ops.tuner_ops.set_params = digitv_nxt6000_tuner_set_params;
+ adap->fe_adap[0].fe->ops.tuner_ops.set_params = digitv_nxt6000_tuner_set_params;
return 0;
}
@@ -292,6 +297,8 @@ static struct dvb_usb_device_properties digitv_properties = {
.num_adapters = 1,
.adapter = {
{
+ .num_frontends = 1,
+ .fe = {{
.frontend_attach = digitv_frontend_attach,
.tuner_attach = digitv_tuner_attach,
@@ -306,6 +313,7 @@ static struct dvb_usb_device_properties digitv_properties = {
}
}
},
+ }},
}
},
.identify_state = digitv_identify_state,
diff --git a/drivers/media/dvb/dvb-usb/dtt200u.c b/drivers/media/dvb/dvb-usb/dtt200u.c
index ecd86eca254..106dfd55ff9 100644
--- a/drivers/media/dvb/dvb-usb/dtt200u.c
+++ b/drivers/media/dvb/dvb-usb/dtt200u.c
@@ -90,7 +90,7 @@ static int dtt200u_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
static int dtt200u_frontend_attach(struct dvb_usb_adapter *adap)
{
- adap->fe = dtt200u_fe_attach(adap->dev);
+ adap->fe_adap[0].fe = dtt200u_fe_attach(adap->dev);
return 0;
}
@@ -140,6 +140,8 @@ static struct dvb_usb_device_properties dtt200u_properties = {
.num_adapters = 1,
.adapter = {
{
+ .num_frontends = 1,
+ .fe = {{
.caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_NEED_PID_FILTERING,
.pid_filter_count = 15,
@@ -157,6 +159,7 @@ static struct dvb_usb_device_properties dtt200u_properties = {
}
}
},
+ }},
}
},
.power_ctrl = dtt200u_power_ctrl,
@@ -187,6 +190,8 @@ static struct dvb_usb_device_properties wt220u_properties = {
.num_adapters = 1,
.adapter = {
{
+ .num_frontends = 1,
+ .fe = {{
.caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_NEED_PID_FILTERING,
.pid_filter_count = 15,
@@ -204,6 +209,7 @@ static struct dvb_usb_device_properties wt220u_properties = {
}
}
},
+ }},
}
},
.power_ctrl = dtt200u_power_ctrl,
@@ -234,6 +240,8 @@ static struct dvb_usb_device_properties wt220u_fc_properties = {
.num_adapters = 1,
.adapter = {
{
+ .num_frontends = 1,
+ .fe = {{
.caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_NEED_PID_FILTERING,
.pid_filter_count = 15,
@@ -251,6 +259,7 @@ static struct dvb_usb_device_properties wt220u_fc_properties = {
}
}
},
+ }},
}
},
.power_ctrl = dtt200u_power_ctrl,
@@ -281,6 +290,8 @@ static struct dvb_usb_device_properties wt220u_zl0353_properties = {
.num_adapters = 1,
.adapter = {
{
+ .num_frontends = 1,
+ .fe = {{
.caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_NEED_PID_FILTERING,
.pid_filter_count = 15,
@@ -298,6 +309,7 @@ static struct dvb_usb_device_properties wt220u_zl0353_properties = {
}
}
},
+ }},
}
},
.power_ctrl = dtt200u_power_ctrl,
diff --git a/drivers/media/dvb/dvb-usb/dtv5100.c b/drivers/media/dvb/dvb-usb/dtv5100.c
index 078ce92ca43..7373132163d 100644
--- a/drivers/media/dvb/dvb-usb/dtv5100.c
+++ b/drivers/media/dvb/dvb-usb/dtv5100.c
@@ -115,13 +115,13 @@ static struct zl10353_config dtv5100_zl10353_config = {
static int dtv5100_frontend_attach(struct dvb_usb_adapter *adap)
{
- adap->fe = dvb_attach(zl10353_attach, &dtv5100_zl10353_config,
+ adap->fe_adap[0].fe = dvb_attach(zl10353_attach, &dtv5100_zl10353_config,
&adap->dev->i2c_adap);
- if (adap->fe == NULL)
+ if (adap->fe_adap[0].fe == NULL)
return -EIO;
/* disable i2c gate, or it won't work... is this safe? */
- adap->fe->ops.i2c_gate_ctrl = NULL;
+ adap->fe_adap[0].fe->ops.i2c_gate_ctrl = NULL;
return 0;
}
@@ -133,7 +133,7 @@ static struct qt1010_config dtv5100_qt1010_config = {
static int dtv5100_tuner_attach(struct dvb_usb_adapter *adap)
{
return dvb_attach(qt1010_attach,
- adap->fe, &adap->dev->i2c_adap,
+ adap->fe_adap[0].fe, &adap->dev->i2c_adap,
&dtv5100_qt1010_config) == NULL ? -ENODEV : 0;
}
@@ -180,6 +180,8 @@ static struct dvb_usb_device_properties dtv5100_properties = {
.num_adapters = 1,
.adapter = {{
+ .num_frontends = 1,
+ .fe = {{
.frontend_attach = dtv5100_frontend_attach,
.tuner_attach = dtv5100_tuner_attach,
@@ -193,6 +195,7 @@ static struct dvb_usb_device_properties dtv5100_properties = {
}
}
},
+ }},
} },
.i2c_algo = &dtv5100_i2c_algo,
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb-dvb.c b/drivers/media/dvb/dvb-usb/dvb-usb-dvb.c
index b3cb626ed56..ba4a7517354 100644
--- a/drivers/media/dvb/dvb-usb/dvb-usb-dvb.c
+++ b/drivers/media/dvb/dvb-usb/dvb-usb-dvb.c
@@ -17,15 +17,20 @@ static int dvb_usb_ctrl_feed(struct dvb_demux_feed *dvbdmxfeed, int onoff)
if (adap == NULL)
return -ENODEV;
+ if ((adap->active_fe < 0) ||
+ (adap->active_fe >= adap->num_frontends_initialized)) {
+ return -EINVAL;
+ }
+
newfeedcount = adap->feedcount + (onoff ? 1 : -1);
/* stop feed before setting a new pid if there will be no pid anymore */
if (newfeedcount == 0) {
deb_ts("stop feeding\n");
- usb_urb_kill(&adap->stream);
+ usb_urb_kill(&adap->fe_adap[adap->active_fe].stream);
- if (adap->props.streaming_ctrl != NULL) {
- ret = adap->props.streaming_ctrl(adap, 0);
+ if (adap->props.fe[adap->active_fe].streaming_ctrl != NULL) {
+ ret = adap->props.fe[adap->active_fe].streaming_ctrl(adap, 0);
if (ret < 0) {
err("error while stopping stream.");
return ret;
@@ -36,36 +41,37 @@ static int dvb_usb_ctrl_feed(struct dvb_demux_feed *dvbdmxfeed, int onoff)
adap->feedcount = newfeedcount;
/* activate the pid on the device specific pid_filter */
- deb_ts("setting pid (%s): %5d %04x at index %d '%s'\n",adap->pid_filtering ?
- "yes" : "no", dvbdmxfeed->pid,dvbdmxfeed->pid,dvbdmxfeed->index,onoff ?
- "on" : "off");
- if (adap->props.caps & DVB_USB_ADAP_HAS_PID_FILTER &&
- adap->pid_filtering &&
- adap->props.pid_filter != NULL)
- adap->props.pid_filter(adap, dvbdmxfeed->index, dvbdmxfeed->pid,onoff);
+ deb_ts("setting pid (%s): %5d %04x at index %d '%s'\n",
+ adap->fe_adap[adap->active_fe].pid_filtering ?
+ "yes" : "no", dvbdmxfeed->pid, dvbdmxfeed->pid,
+ dvbdmxfeed->index, onoff ? "on" : "off");
+ if (adap->props.fe[adap->active_fe].caps & DVB_USB_ADAP_HAS_PID_FILTER &&
+ adap->fe_adap[adap->active_fe].pid_filtering &&
+ adap->props.fe[adap->active_fe].pid_filter != NULL)
+ adap->props.fe[adap->active_fe].pid_filter(adap, dvbdmxfeed->index, dvbdmxfeed->pid, onoff);
/* start the feed if this was the first feed and there is still a feed
* for reception.
*/
if (adap->feedcount == onoff && adap->feedcount > 0) {
deb_ts("submitting all URBs\n");
- usb_urb_submit(&adap->stream);
+ usb_urb_submit(&adap->fe_adap[adap->active_fe].stream);
deb_ts("controlling pid parser\n");
- if (adap->props.caps & DVB_USB_ADAP_HAS_PID_FILTER &&
- adap->props.caps &
+ if (adap->props.fe[adap->active_fe].caps & DVB_USB_ADAP_HAS_PID_FILTER &&
+ adap->props.fe[adap->active_fe].caps &
DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF &&
- adap->props.pid_filter_ctrl != NULL) {
- ret = adap->props.pid_filter_ctrl(adap,
- adap->pid_filtering);
+ adap->props.fe[adap->active_fe].pid_filter_ctrl != NULL) {
+ ret = adap->props.fe[adap->active_fe].pid_filter_ctrl(adap,
+ adap->fe_adap[adap->active_fe].pid_filtering);
if (ret < 0) {
err("could not handle pid_parser");
return ret;
}
}
deb_ts("start feeding\n");
- if (adap->props.streaming_ctrl != NULL) {
- ret = adap->props.streaming_ctrl(adap, 1);
+ if (adap->props.fe[adap->active_fe].streaming_ctrl != NULL) {
+ ret = adap->props.fe[adap->active_fe].streaming_ctrl(adap, 1);
if (ret < 0) {
err("error while enabling fifo.");
return ret;
@@ -90,6 +96,7 @@ static int dvb_usb_stop_feed(struct dvb_demux_feed *dvbdmxfeed)
int dvb_usb_adapter_dvb_init(struct dvb_usb_adapter *adap, short *adapter_nums)
{
+ int i;
int ret = dvb_register_adapter(&adap->dvb_adap, adap->dev->desc->name,
adap->dev->owner, &adap->dev->udev->dev,
adapter_nums);
@@ -112,7 +119,12 @@ int dvb_usb_adapter_dvb_init(struct dvb_usb_adapter *adap, short *adapter_nums)
adap->demux.dmx.capabilities = DMX_TS_FILTERING | DMX_SECTION_FILTERING;
adap->demux.priv = adap;
- adap->demux.feednum = adap->demux.filternum = adap->max_feed_count;
+ adap->demux.filternum = 0;
+ for (i = 0; i < adap->props.num_frontends; i++) {
+ if (adap->demux.filternum < adap->fe_adap[i].max_feed_count)
+ adap->demux.filternum = adap->fe_adap[i].max_feed_count;
+ }
+ adap->demux.feednum = adap->demux.filternum;
adap->demux.start_feed = dvb_usb_start_feed;
adap->demux.stop_feed = dvb_usb_stop_feed;
adap->demux.write_to_decoder = NULL;
@@ -156,14 +168,33 @@ int dvb_usb_adapter_dvb_exit(struct dvb_usb_adapter *adap)
return 0;
}
+static int dvb_usb_set_active_fe(struct dvb_frontend *fe, int onoff)
+{
+ struct dvb_usb_adapter *adap = fe->dvb->priv;
+
+ int ret = (adap->props.frontend_ctrl) ?
+ adap->props.frontend_ctrl(fe, onoff) : 0;
+
+ if (ret < 0) {
+ err("frontend_ctrl request failed");
+ return ret;
+ }
+ if (onoff)
+ adap->active_fe = fe->id;
+
+ return 0;
+}
+
static int dvb_usb_fe_wakeup(struct dvb_frontend *fe)
{
struct dvb_usb_adapter *adap = fe->dvb->priv;
dvb_usb_device_power_ctrl(adap->dev, 1);
- if (adap->fe_init)
- adap->fe_init(fe);
+ dvb_usb_set_active_fe(fe, 1);
+
+ if (adap->fe_adap[fe->id].fe_init)
+ adap->fe_adap[fe->id].fe_init(fe);
return 0;
}
@@ -172,45 +203,81 @@ static int dvb_usb_fe_sleep(struct dvb_frontend *fe)
{
struct dvb_usb_adapter *adap = fe->dvb->priv;
- if (adap->fe_sleep)
- adap->fe_sleep(fe);
+ if (adap->fe_adap[fe->id].fe_sleep)
+ adap->fe_adap[fe->id].fe_sleep(fe);
+
+ dvb_usb_set_active_fe(fe, 0);
return dvb_usb_device_power_ctrl(adap->dev, 0);
}
int dvb_usb_adapter_frontend_init(struct dvb_usb_adapter *adap)
{
- if (adap->props.frontend_attach == NULL) {
- err("strange: '%s' #%d doesn't want to attach a frontend.",adap->dev->desc->name, adap->id);
- return 0;
- }
+ int ret, i;
- /* re-assign sleep and wakeup functions */
- if (adap->props.frontend_attach(adap) == 0 && adap->fe != NULL) {
- adap->fe_init = adap->fe->ops.init; adap->fe->ops.init = dvb_usb_fe_wakeup;
- adap->fe_sleep = adap->fe->ops.sleep; adap->fe->ops.sleep = dvb_usb_fe_sleep;
+ /* register all given adapter frontends */
+ for (i = 0; i < adap->props.num_frontends; i++) {
- if (dvb_register_frontend(&adap->dvb_adap, adap->fe)) {
- err("Frontend registration failed.");
- dvb_frontend_detach(adap->fe);
- adap->fe = NULL;
- return -ENODEV;
+ if (adap->props.fe[i].frontend_attach == NULL) {
+ err("strange: '%s' #%d,%d "
+ "doesn't want to attach a frontend.",
+ adap->dev->desc->name, adap->id, i);
+
+ return 0;
+ }
+
+ ret = adap->props.fe[i].frontend_attach(adap);
+ if (ret || adap->fe_adap[i].fe == NULL) {
+ /* only print error when there is no FE at all */
+ if (i == 0)
+ err("no frontend was attached by '%s'",
+ adap->dev->desc->name);
+
+ return 0;
+ }
+
+ adap->fe_adap[i].fe->id = i;
+
+ /* re-assign sleep and wakeup functions */
+ adap->fe_adap[i].fe_init = adap->fe_adap[i].fe->ops.init;
+ adap->fe_adap[i].fe->ops.init = dvb_usb_fe_wakeup;
+ adap->fe_adap[i].fe_sleep = adap->fe_adap[i].fe->ops.sleep;
+ adap->fe_adap[i].fe->ops.sleep = dvb_usb_fe_sleep;
+
+ if (dvb_register_frontend(&adap->dvb_adap, adap->fe_adap[i].fe)) {
+ err("Frontend %d registration failed.", i);
+ dvb_frontend_detach(adap->fe_adap[i].fe);
+ adap->fe_adap[i].fe = NULL;
+ /* In error case, do not try register more FEs,
+ * still leaving already registered FEs alive. */
+ if (i == 0)
+ return -ENODEV;
+ else
+ return 0;
}
/* only attach the tuner if the demod is there */
- if (adap->props.tuner_attach != NULL)
- adap->props.tuner_attach(adap);
- } else
- err("no frontend was attached by '%s'",adap->dev->desc->name);
+ if (adap->props.fe[i].tuner_attach != NULL)
+ adap->props.fe[i].tuner_attach(adap);
+
+ adap->num_frontends_initialized++;
+ }
return 0;
}
int dvb_usb_adapter_frontend_exit(struct dvb_usb_adapter *adap)
{
- if (adap->fe != NULL) {
- dvb_unregister_frontend(adap->fe);
- dvb_frontend_detach(adap->fe);
+ int i = adap->num_frontends_initialized - 1;
+
+ /* unregister all given adapter frontends */
+ for (; i >= 0; i--) {
+ if (adap->fe_adap[i].fe != NULL) {
+ dvb_unregister_frontend(adap->fe_adap[i].fe);
+ dvb_frontend_detach(adap->fe_adap[i].fe);
+ }
}
+ adap->num_frontends_initialized = 0;
+
return 0;
}
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb-ids.h b/drivers/media/dvb/dvb-usb/dvb-usb-ids.h
index 2a79b8fb3e8..2d08c9b5128 100644
--- a/drivers/media/dvb/dvb-usb/dvb-usb-ids.h
+++ b/drivers/media/dvb/dvb-usb/dvb-usb-ids.h
@@ -37,6 +37,7 @@
#define USB_VID_HAUPPAUGE 0x2040
#define USB_VID_HYPER_PALTEK 0x1025
#define USB_VID_INTEL 0x8086
+#define USB_VID_ITETECH 0x048d
#define USB_VID_KWORLD 0xeb2a
#define USB_VID_KWORLD_2 0x1b80
#define USB_VID_KYE 0x0458
@@ -126,6 +127,7 @@
#define USB_PID_GRANDTEC_DVBT_USB_COLD 0x0fa0
#define USB_PID_GRANDTEC_DVBT_USB_WARM 0x0fa1
#define USB_PID_INTEL_CE9500 0x9500
+#define USB_PID_ITETECH_IT9135 0x9135
#define USB_PID_KWORLD_399U 0xe399
#define USB_PID_KWORLD_399U_2 0xe400
#define USB_PID_KWORLD_395U 0xe396
@@ -136,6 +138,7 @@
#define USB_PID_KWORLD_PC160_2T 0xc160
#define USB_PID_KWORLD_PC160_T 0xc161
#define USB_PID_KWORLD_UB383_T 0xe383
+#define USB_PID_KWORLD_UB499_2T_T09 0xe409
#define USB_PID_KWORLD_VSTREAM_COLD 0x17de
#define USB_PID_KWORLD_VSTREAM_WARM 0x17df
#define USB_PID_TERRATEC_CINERGY_T_USB_XE 0x0055
@@ -240,6 +243,9 @@
#define USB_PID_PCTV_200E 0x020e
#define USB_PID_PCTV_400E 0x020f
#define USB_PID_PCTV_450E 0x0222
+#define USB_PID_PCTV_452E 0x021f
+#define USB_PID_TECHNOTREND_CONNECT_S2_3600 0x3007
+#define USB_PID_TECHNOTREND_CONNECT_S2_3650_CI 0x300a
#define USB_PID_NEBULA_DIGITV 0x0201
#define USB_PID_DVICO_BLUEBIRD_LGDT 0xd820
#define USB_PID_DVICO_BLUEBIRD_LG064F_COLD 0xd500
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb-init.c b/drivers/media/dvb/dvb-usb/dvb-usb-init.c
index 2e3ea0fa28e..169196ec2d4 100644
--- a/drivers/media/dvb/dvb-usb/dvb-usb-init.c
+++ b/drivers/media/dvb/dvb-usb/dvb-usb-init.c
@@ -29,7 +29,7 @@ MODULE_PARM_DESC(force_pid_filter_usage, "force all dvb-usb-devices to use a PID
static int dvb_usb_adapter_init(struct dvb_usb_device *d, short *adapter_nrs)
{
struct dvb_usb_adapter *adap;
- int ret, n;
+ int ret, n, o;
for (n = 0; n < d->props.num_adapters; n++) {
adap = &d->adapter[n];
@@ -38,31 +38,42 @@ static int dvb_usb_adapter_init(struct dvb_usb_device *d, short *adapter_nrs)
memcpy(&adap->props, &d->props.adapter[n], sizeof(struct dvb_usb_adapter_properties));
+ for (o = 0; o < adap->props.num_frontends; o++) {
+ struct dvb_usb_adapter_fe_properties *props = &adap->props.fe[o];
/* speed - when running at FULL speed we need a HW PID filter */
- if (d->udev->speed == USB_SPEED_FULL && !(adap->props.caps & DVB_USB_ADAP_HAS_PID_FILTER)) {
+ if (d->udev->speed == USB_SPEED_FULL && !(props->caps & DVB_USB_ADAP_HAS_PID_FILTER)) {
err("This USB2.0 device cannot be run on a USB1.1 port. (it lacks a hardware PID filter)");
return -ENODEV;
}
- if ((d->udev->speed == USB_SPEED_FULL && adap->props.caps & DVB_USB_ADAP_HAS_PID_FILTER) ||
- (adap->props.caps & DVB_USB_ADAP_NEED_PID_FILTERING)) {
- info("will use the device's hardware PID filter (table count: %d).", adap->props.pid_filter_count);
- adap->pid_filtering = 1;
- adap->max_feed_count = adap->props.pid_filter_count;
+ if ((d->udev->speed == USB_SPEED_FULL && props->caps & DVB_USB_ADAP_HAS_PID_FILTER) ||
+ (props->caps & DVB_USB_ADAP_NEED_PID_FILTERING)) {
+ info("will use the device's hardware PID filter (table count: %d).", props->pid_filter_count);
+ adap->fe_adap[o].pid_filtering = 1;
+ adap->fe_adap[o].max_feed_count = props->pid_filter_count;
} else {
info("will pass the complete MPEG2 transport stream to the software demuxer.");
- adap->pid_filtering = 0;
- adap->max_feed_count = 255;
+ adap->fe_adap[o].pid_filtering = 0;
+ adap->fe_adap[o].max_feed_count = 255;
}
- if (!adap->pid_filtering &&
+ if (!adap->fe_adap[o].pid_filtering &&
dvb_usb_force_pid_filter_usage &&
- adap->props.caps & DVB_USB_ADAP_HAS_PID_FILTER) {
+ props->caps & DVB_USB_ADAP_HAS_PID_FILTER) {
info("pid filter enabled by module option.");
- adap->pid_filtering = 1;
- adap->max_feed_count = adap->props.pid_filter_count;
+ adap->fe_adap[o].pid_filtering = 1;
+ adap->fe_adap[o].max_feed_count = props->pid_filter_count;
}
+ if (props->size_of_priv > 0) {
+ adap->fe_adap[o].priv = kzalloc(props->size_of_priv, GFP_KERNEL);
+ if (adap->fe_adap[o].priv == NULL) {
+ err("no memory for priv for adapter %d fe %d.", n, o);
+ return -ENOMEM;
+ }
+ }
+ }
+
if (adap->props.size_of_priv > 0) {
adap->priv = kzalloc(adap->props.size_of_priv, GFP_KERNEL);
if (adap->priv == NULL) {
@@ -77,6 +88,10 @@ static int dvb_usb_adapter_init(struct dvb_usb_device *d, short *adapter_nrs)
return ret;
}
+ /* use exclusive FE lock if there is multiple shared FEs */
+ if (adap->fe_adap[1].fe)
+ adap->dvb_adap.mfe_shared = 1;
+
d->num_adapters_initialized++;
d->state |= DVB_USB_STATE_DVB;
}
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb-urb.c b/drivers/media/dvb/dvb-usb/dvb-usb-urb.c
index bb46ba6a357..53a5c30b51b 100644
--- a/drivers/media/dvb/dvb-usb/dvb-usb-urb.c
+++ b/drivers/media/dvb/dvb-usb/dvb-usb-urb.c
@@ -82,16 +82,28 @@ static void dvb_usb_data_complete_204(struct usb_data_stream *stream, u8 *buffer
int dvb_usb_adapter_stream_init(struct dvb_usb_adapter *adap)
{
- adap->stream.udev = adap->dev->udev;
- if (adap->props.caps & DVB_USB_ADAP_RECEIVES_204_BYTE_TS)
- adap->stream.complete = dvb_usb_data_complete_204;
- else
- adap->stream.complete = dvb_usb_data_complete;
- adap->stream.user_priv = adap;
- return usb_urb_init(&adap->stream, &adap->props.stream);
+ int i, ret = 0;
+ for (i = 0; i < adap->props.num_frontends; i++) {
+
+ adap->fe_adap[i].stream.udev = adap->dev->udev;
+ if (adap->props.fe[i].caps & DVB_USB_ADAP_RECEIVES_204_BYTE_TS)
+ adap->fe_adap[i].stream.complete =
+ dvb_usb_data_complete_204;
+ else
+ adap->fe_adap[i].stream.complete = dvb_usb_data_complete;
+ adap->fe_adap[i].stream.user_priv = adap;
+ ret = usb_urb_init(&adap->fe_adap[i].stream,
+ &adap->props.fe[i].stream);
+ if (ret < 0)
+ break;
+ }
+ return ret;
}
int dvb_usb_adapter_stream_exit(struct dvb_usb_adapter *adap)
{
- return usb_urb_exit(&adap->stream);
+ int i;
+ for (i = 0; i < adap->props.num_frontends; i++)
+ usb_urb_exit(&adap->fe_adap[i].stream);
+ return 0;
}
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb.h b/drivers/media/dvb/dvb-usb/dvb-usb.h
index 7d35d078342..6d7d13f9ce6 100644
--- a/drivers/media/dvb/dvb-usb/dvb-usb.h
+++ b/drivers/media/dvb/dvb-usb/dvb-usb.h
@@ -124,6 +124,8 @@ struct usb_data_stream_properties {
* @caps: capabilities of the DVB USB device.
* @pid_filter_count: number of PID filter position in the optional hardware
* PID-filter.
+ * @num_frontends: number of frontends of the DVB USB adapter.
+ * @frontend_ctrl: called to power on/off active frontend.
* @streaming_ctrl: called to start and stop the MPEG2-TS streaming of the
* device (not URB submitting/killing).
* @pid_filter_ctrl: called to en/disable the PID filter, if any.
@@ -134,7 +136,7 @@ struct usb_data_stream_properties {
* pll_desc and pll_init_buf of struct dvb_usb_device).
* @stream: configuration of the USB streaming
*/
-struct dvb_usb_adapter_properties {
+struct dvb_usb_adapter_fe_properties {
#define DVB_USB_ADAP_HAS_PID_FILTER 0x01
#define DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF 0x02
#define DVB_USB_ADAP_NEED_PID_FILTERING 0x04
@@ -152,9 +154,18 @@ struct dvb_usb_adapter_properties {
struct usb_data_stream_properties stream;
int size_of_priv;
+};
+
+#define MAX_NO_OF_FE_PER_ADAP 2
+struct dvb_usb_adapter_properties {
+ int size_of_priv;
+ int (*frontend_ctrl) (struct dvb_frontend *, int);
int (*fe_ioctl_override) (struct dvb_frontend *,
unsigned int, void *, unsigned int);
+
+ int num_frontends;
+ struct dvb_usb_adapter_fe_properties fe[MAX_NO_OF_FE_PER_ADAP];
};
/**
@@ -345,6 +356,20 @@ struct usb_data_stream {
*
* @stream: the usb data stream.
*/
+struct dvb_usb_fe_adapter {
+ struct dvb_frontend *fe;
+
+ int (*fe_init) (struct dvb_frontend *);
+ int (*fe_sleep) (struct dvb_frontend *);
+
+ struct usb_data_stream stream;
+
+ int pid_filtering;
+ int max_feed_count;
+
+ void *priv;
+};
+
struct dvb_usb_adapter {
struct dvb_usb_device *dev;
struct dvb_usb_adapter_properties props;
@@ -356,20 +381,16 @@ struct dvb_usb_adapter {
u8 id;
int feedcount;
- int pid_filtering;
/* dvb */
struct dvb_adapter dvb_adap;
struct dmxdev dmxdev;
struct dvb_demux demux;
struct dvb_net dvb_net;
- struct dvb_frontend *fe;
- int max_feed_count;
-
- int (*fe_init) (struct dvb_frontend *);
- int (*fe_sleep) (struct dvb_frontend *);
- struct usb_data_stream stream;
+ struct dvb_usb_fe_adapter fe_adap[MAX_NO_OF_FE_PER_ADAP];
+ int active_fe;
+ int num_frontends_initialized;
void *priv;
};
diff --git a/drivers/media/dvb/dvb-usb/dw2102.c b/drivers/media/dvb/dvb-usb/dw2102.c
index 058b2318abe..f103ec1fe82 100644
--- a/drivers/media/dvb/dvb-usb/dw2102.c
+++ b/drivers/media/dvb/dvb-usb/dw2102.c
@@ -992,18 +992,18 @@ static int dw2104_frontend_attach(struct dvb_usb_adapter *d)
struct dvb_tuner_ops *tuner_ops = NULL;
if (demod_probe & 4) {
- d->fe = dvb_attach(stv0900_attach, &dw2104a_stv0900_config,
+ d->fe_adap[0].fe = dvb_attach(stv0900_attach, &dw2104a_stv0900_config,
&d->dev->i2c_adap, 0);
- if (d->fe != NULL) {
- if (dvb_attach(stb6100_attach, d->fe,
+ if (d->fe_adap[0].fe != NULL) {
+ if (dvb_attach(stb6100_attach, d->fe_adap[0].fe,
&dw2104a_stb6100_config,
&d->dev->i2c_adap)) {
- tuner_ops = &d->fe->ops.tuner_ops;
+ tuner_ops = &d->fe_adap[0].fe->ops.tuner_ops;
tuner_ops->set_frequency = stb6100_set_freq;
tuner_ops->get_frequency = stb6100_get_freq;
tuner_ops->set_bandwidth = stb6100_set_bandw;
tuner_ops->get_bandwidth = stb6100_get_bandw;
- d->fe->ops.set_voltage = dw210x_set_voltage;
+ d->fe_adap[0].fe->ops.set_voltage = dw210x_set_voltage;
info("Attached STV0900+STB6100!\n");
return 0;
}
@@ -1011,13 +1011,13 @@ static int dw2104_frontend_attach(struct dvb_usb_adapter *d)
}
if (demod_probe & 2) {
- d->fe = dvb_attach(stv0900_attach, &dw2104_stv0900_config,
+ d->fe_adap[0].fe = dvb_attach(stv0900_attach, &dw2104_stv0900_config,
&d->dev->i2c_adap, 0);
- if (d->fe != NULL) {
- if (dvb_attach(stv6110_attach, d->fe,
+ if (d->fe_adap[0].fe != NULL) {
+ if (dvb_attach(stv6110_attach, d->fe_adap[0].fe,
&dw2104_stv6110_config,
&d->dev->i2c_adap)) {
- d->fe->ops.set_voltage = dw210x_set_voltage;
+ d->fe_adap[0].fe->ops.set_voltage = dw210x_set_voltage;
info("Attached STV0900+STV6110A!\n");
return 0;
}
@@ -1025,19 +1025,19 @@ static int dw2104_frontend_attach(struct dvb_usb_adapter *d)
}
if (demod_probe & 1) {
- d->fe = dvb_attach(cx24116_attach, &dw2104_config,
+ d->fe_adap[0].fe = dvb_attach(cx24116_attach, &dw2104_config,
&d->dev->i2c_adap);
- if (d->fe != NULL) {
- d->fe->ops.set_voltage = dw210x_set_voltage;
+ if (d->fe_adap[0].fe != NULL) {
+ d->fe_adap[0].fe->ops.set_voltage = dw210x_set_voltage;
info("Attached cx24116!\n");
return 0;
}
}
- d->fe = dvb_attach(ds3000_attach, &dw2104_ds3000_config,
+ d->fe_adap[0].fe = dvb_attach(ds3000_attach, &dw2104_ds3000_config,
&d->dev->i2c_adap);
- if (d->fe != NULL) {
- d->fe->ops.set_voltage = dw210x_set_voltage;
+ if (d->fe_adap[0].fe != NULL) {
+ d->fe_adap[0].fe->ops.set_voltage = dw210x_set_voltage;
info("Attached DS3000!\n");
return 0;
}
@@ -1053,22 +1053,22 @@ static int dw2102_frontend_attach(struct dvb_usb_adapter *d)
{
if (dw2102_properties.i2c_algo == &dw2102_serit_i2c_algo) {
/*dw2102_properties.adapter->tuner_attach = NULL;*/
- d->fe = dvb_attach(si21xx_attach, &serit_sp1511lhb_config,
+ d->fe_adap[0].fe = dvb_attach(si21xx_attach, &serit_sp1511lhb_config,
&d->dev->i2c_adap);
- if (d->fe != NULL) {
- d->fe->ops.set_voltage = dw210x_set_voltage;
+ if (d->fe_adap[0].fe != NULL) {
+ d->fe_adap[0].fe->ops.set_voltage = dw210x_set_voltage;
info("Attached si21xx!\n");
return 0;
}
}
if (dw2102_properties.i2c_algo == &dw2102_earda_i2c_algo) {
- d->fe = dvb_attach(stv0288_attach, &earda_config,
+ d->fe_adap[0].fe = dvb_attach(stv0288_attach, &earda_config,
&d->dev->i2c_adap);
- if (d->fe != NULL) {
- if (dvb_attach(stb6000_attach, d->fe, 0x61,
+ if (d->fe_adap[0].fe != NULL) {
+ if (dvb_attach(stb6000_attach, d->fe_adap[0].fe, 0x61,
&d->dev->i2c_adap)) {
- d->fe->ops.set_voltage = dw210x_set_voltage;
+ d->fe_adap[0].fe->ops.set_voltage = dw210x_set_voltage;
info("Attached stv0288!\n");
return 0;
}
@@ -1077,10 +1077,10 @@ static int dw2102_frontend_attach(struct dvb_usb_adapter *d)
if (dw2102_properties.i2c_algo == &dw2102_i2c_algo) {
/*dw2102_properties.adapter->tuner_attach = dw2102_tuner_attach;*/
- d->fe = dvb_attach(stv0299_attach, &sharp_z0194a_config,
+ d->fe_adap[0].fe = dvb_attach(stv0299_attach, &sharp_z0194a_config,
&d->dev->i2c_adap);
- if (d->fe != NULL) {
- d->fe->ops.set_voltage = dw210x_set_voltage;
+ if (d->fe_adap[0].fe != NULL) {
+ d->fe_adap[0].fe->ops.set_voltage = dw210x_set_voltage;
info("Attached stv0299!\n");
return 0;
}
@@ -1090,9 +1090,9 @@ static int dw2102_frontend_attach(struct dvb_usb_adapter *d)
static int dw3101_frontend_attach(struct dvb_usb_adapter *d)
{
- d->fe = dvb_attach(tda10023_attach, &dw3101_tda10023_config,
+ d->fe_adap[0].fe = dvb_attach(tda10023_attach, &dw3101_tda10023_config,
&d->dev->i2c_adap, 0x48);
- if (d->fe != NULL) {
+ if (d->fe_adap[0].fe != NULL) {
info("Attached tda10023!\n");
return 0;
}
@@ -1101,12 +1101,12 @@ static int dw3101_frontend_attach(struct dvb_usb_adapter *d)
static int zl100313_frontend_attach(struct dvb_usb_adapter *d)
{
- d->fe = dvb_attach(mt312_attach, &zl313_config,
+ d->fe_adap[0].fe = dvb_attach(mt312_attach, &zl313_config,
&d->dev->i2c_adap);
- if (d->fe != NULL) {
- if (dvb_attach(zl10039_attach, d->fe, 0x60,
+ if (d->fe_adap[0].fe != NULL) {
+ if (dvb_attach(zl10039_attach, d->fe_adap[0].fe, 0x60,
&d->dev->i2c_adap)) {
- d->fe->ops.set_voltage = dw210x_set_voltage;
+ d->fe_adap[0].fe->ops.set_voltage = dw210x_set_voltage;
info("Attached zl100313+zl10039!\n");
return 0;
}
@@ -1119,16 +1119,16 @@ static int stv0288_frontend_attach(struct dvb_usb_adapter *d)
{
u8 obuf[] = {7, 1};
- d->fe = dvb_attach(stv0288_attach, &earda_config,
+ d->fe_adap[0].fe = dvb_attach(stv0288_attach, &earda_config,
&d->dev->i2c_adap);
- if (d->fe == NULL)
+ if (d->fe_adap[0].fe == NULL)
return -EIO;
- if (NULL == dvb_attach(stb6000_attach, d->fe, 0x61, &d->dev->i2c_adap))
+ if (NULL == dvb_attach(stb6000_attach, d->fe_adap[0].fe, 0x61, &d->dev->i2c_adap))
return -EIO;
- d->fe->ops.set_voltage = dw210x_set_voltage;
+ d->fe_adap[0].fe->ops.set_voltage = dw210x_set_voltage;
dw210x_op_rw(d->dev->udev, 0x8a, 0, 0, obuf, 2, DW210X_WRITE_MSG);
@@ -1143,14 +1143,14 @@ static int ds3000_frontend_attach(struct dvb_usb_adapter *d)
struct s6x0_state *st = (struct s6x0_state *)d->dev->priv;
u8 obuf[] = {7, 1};
- d->fe = dvb_attach(ds3000_attach, &dw2104_ds3000_config,
+ d->fe_adap[0].fe = dvb_attach(ds3000_attach, &dw2104_ds3000_config,
&d->dev->i2c_adap);
- if (d->fe == NULL)
+ if (d->fe_adap[0].fe == NULL)
return -EIO;
- st->old_set_voltage = d->fe->ops.set_voltage;
- d->fe->ops.set_voltage = s660_set_voltage;
+ st->old_set_voltage = d->fe_adap[0].fe->ops.set_voltage;
+ d->fe_adap[0].fe->ops.set_voltage = s660_set_voltage;
dw210x_op_rw(d->dev->udev, 0x8a, 0, 0, obuf, 2, DW210X_WRITE_MSG);
@@ -1163,12 +1163,12 @@ static int prof_7500_frontend_attach(struct dvb_usb_adapter *d)
{
u8 obuf[] = {7, 1};
- d->fe = dvb_attach(stv0900_attach, &prof_7500_stv0900_config,
+ d->fe_adap[0].fe = dvb_attach(stv0900_attach, &prof_7500_stv0900_config,
&d->dev->i2c_adap, 0);
- if (d->fe == NULL)
+ if (d->fe_adap[0].fe == NULL)
return -EIO;
- d->fe->ops.set_voltage = dw210x_set_voltage;
+ d->fe_adap[0].fe->ops.set_voltage = dw210x_set_voltage;
dw210x_op_rw(d->dev->udev, 0x8a, 0, 0, obuf, 2, DW210X_WRITE_MSG);
@@ -1204,9 +1204,9 @@ static int su3000_frontend_attach(struct dvb_usb_adapter *d)
if (dvb_usb_generic_rw(d->dev, obuf, 1, ibuf, 1, 0) < 0)
err("command 0x51 transfer failed.");
- d->fe = dvb_attach(ds3000_attach, &su3000_ds3000_config,
+ d->fe_adap[0].fe = dvb_attach(ds3000_attach, &su3000_ds3000_config,
&d->dev->i2c_adap);
- if (d->fe == NULL)
+ if (d->fe_adap[0].fe == NULL)
return -EIO;
info("Attached DS3000!\n");
@@ -1216,14 +1216,14 @@ static int su3000_frontend_attach(struct dvb_usb_adapter *d)
static int dw2102_tuner_attach(struct dvb_usb_adapter *adap)
{
- dvb_attach(dvb_pll_attach, adap->fe, 0x60,
+ dvb_attach(dvb_pll_attach, adap->fe_adap[0].fe, 0x60,
&adap->dev->i2c_adap, DVB_PLL_OPERA1);
return 0;
}
static int dw3101_tuner_attach(struct dvb_usb_adapter *adap)
{
- dvb_attach(dvb_pll_attach, adap->fe, 0x60,
+ dvb_attach(dvb_pll_attach, adap->fe_adap[0].fe, 0x60,
&adap->dev->i2c_adap, DVB_PLL_TUA6034);
return 0;
@@ -1535,7 +1535,7 @@ static int dw2102_load_firmware(struct usb_device *dev,
DW210X_READ_MSG);
if ((reset16[0] == 0xa1) || (reset16[0] == 0x80)) {
dw2102_properties.i2c_algo = &dw2102_i2c_algo;
- dw2102_properties.adapter->tuner_attach = &dw2102_tuner_attach;
+ dw2102_properties.adapter->fe[0].tuner_attach = &dw2102_tuner_attach;
break;
} else {
/* check STV0288 frontend */
@@ -1591,6 +1591,8 @@ static struct dvb_usb_device_properties dw2102_properties = {
.read_mac_address = dw210x_read_mac_address,
.adapter = {
{
+ .num_frontends = 1,
+ .fe = {{
.frontend_attach = dw2102_frontend_attach,
.stream = {
.type = USB_BULK,
@@ -1602,6 +1604,7 @@ static struct dvb_usb_device_properties dw2102_properties = {
}
}
},
+ }},
}
},
.num_device_descs = 3,
@@ -1642,6 +1645,8 @@ static struct dvb_usb_device_properties dw2104_properties = {
.read_mac_address = dw210x_read_mac_address,
.adapter = {
{
+ .num_frontends = 1,
+ .fe = {{
.frontend_attach = dw2104_frontend_attach,
.stream = {
.type = USB_BULK,
@@ -1653,6 +1658,7 @@ static struct dvb_usb_device_properties dw2104_properties = {
}
}
},
+ }},
}
},
.num_device_descs = 2,
@@ -1689,6 +1695,8 @@ static struct dvb_usb_device_properties dw3101_properties = {
.read_mac_address = dw210x_read_mac_address,
.adapter = {
{
+ .num_frontends = 1,
+ .fe = {{
.frontend_attach = dw3101_frontend_attach,
.tuner_attach = dw3101_tuner_attach,
.stream = {
@@ -1701,6 +1709,7 @@ static struct dvb_usb_device_properties dw3101_properties = {
}
}
},
+ }},
}
},
.num_device_descs = 1,
@@ -1733,6 +1742,8 @@ static struct dvb_usb_device_properties s6x0_properties = {
.read_mac_address = s6x0_read_mac_address,
.adapter = {
{
+ .num_frontends = 1,
+ .fe = {{
.frontend_attach = zl100313_frontend_attach,
.stream = {
.type = USB_BULK,
@@ -1744,6 +1755,7 @@ static struct dvb_usb_device_properties s6x0_properties = {
}
}
},
+ }},
}
},
.num_device_descs = 1,
@@ -1810,6 +1822,8 @@ static struct dvb_usb_device_properties su3000_properties = {
.adapter = {
{
+ .num_frontends = 1,
+ .fe = {{
.streaming_ctrl = su3000_streaming_ctrl,
.frontend_attach = su3000_frontend_attach,
.stream = {
@@ -1822,6 +1836,7 @@ static struct dvb_usb_device_properties su3000_properties = {
}
}
}
+ }},
}
},
.num_device_descs = 3,
@@ -1855,7 +1870,7 @@ static int dw2102_probe(struct usb_interface *intf,
p1100->devices[0] = d1100;
p1100->rc.legacy.rc_map_table = rc_map_tbs_table;
p1100->rc.legacy.rc_map_size = ARRAY_SIZE(rc_map_tbs_table);
- p1100->adapter->frontend_attach = stv0288_frontend_attach;
+ p1100->adapter->fe[0].frontend_attach = stv0288_frontend_attach;
s660 = kzalloc(sizeof(struct dvb_usb_device_properties), GFP_KERNEL);
if (!s660) {
@@ -1869,7 +1884,7 @@ static int dw2102_probe(struct usb_interface *intf,
s660->devices[0] = d660;
s660->devices[1] = d480_1;
s660->devices[2] = d480_2;
- s660->adapter->frontend_attach = ds3000_frontend_attach;
+ s660->adapter->fe[0].frontend_attach = ds3000_frontend_attach;
p7500 = kzalloc(sizeof(struct dvb_usb_device_properties), GFP_KERNEL);
if (!p7500) {
@@ -1883,7 +1898,7 @@ static int dw2102_probe(struct usb_interface *intf,
p7500->devices[0] = d7500;
p7500->rc.legacy.rc_map_table = rc_map_tbs_table;
p7500->rc.legacy.rc_map_size = ARRAY_SIZE(rc_map_tbs_table);
- p7500->adapter->frontend_attach = prof_7500_frontend_attach;
+ p7500->adapter->fe[0].frontend_attach = prof_7500_frontend_attach;
if (0 == dvb_usb_device_init(intf, &dw2102_properties,
THIS_MODULE, NULL, adapter_nr) ||
diff --git a/drivers/media/dvb/dvb-usb/ec168.c b/drivers/media/dvb/dvb-usb/ec168.c
index 1ba3e5dbee1..78442fe4aa5 100644
--- a/drivers/media/dvb/dvb-usb/ec168.c
+++ b/drivers/media/dvb/dvb-usb/ec168.c
@@ -200,9 +200,9 @@ static struct ec100_config ec168_ec100_config = {
static int ec168_ec100_frontend_attach(struct dvb_usb_adapter *adap)
{
deb_info("%s:\n", __func__);
- adap->fe = dvb_attach(ec100_attach, &ec168_ec100_config,
+ adap->fe_adap[0].fe = dvb_attach(ec100_attach, &ec168_ec100_config,
&adap->dev->i2c_adap);
- if (adap->fe == NULL)
+ if (adap->fe_adap[0].fe == NULL)
return -ENODEV;
return 0;
@@ -228,7 +228,7 @@ static struct mxl5005s_config ec168_mxl5003s_config = {
static int ec168_mxl5003s_tuner_attach(struct dvb_usb_adapter *adap)
{
deb_info("%s:\n", __func__);
- return dvb_attach(mxl5005s_attach, adap->fe, &adap->dev->i2c_adap,
+ return dvb_attach(mxl5005s_attach, adap->fe_adap[0].fe, &adap->dev->i2c_adap,
&ec168_mxl5003s_config) == NULL ? -ENODEV : 0;
}
@@ -382,6 +382,8 @@ static struct dvb_usb_device_properties ec168_properties = {
.num_adapters = 1,
.adapter = {
{
+ .num_frontends = 1,
+ .fe = {{
.streaming_ctrl = ec168_streaming_ctrl,
.frontend_attach = ec168_ec100_frontend_attach,
.tuner_attach = ec168_mxl5003s_tuner_attach,
@@ -395,6 +397,7 @@ static struct dvb_usb_device_properties ec168_properties = {
}
}
},
+ }},
}
},
diff --git a/drivers/media/dvb/dvb-usb/friio.c b/drivers/media/dvb/dvb-usb/friio.c
index 76159aed9bb..b092dc2137c 100644
--- a/drivers/media/dvb/dvb-usb/friio.c
+++ b/drivers/media/dvb/dvb-usb/friio.c
@@ -403,8 +403,8 @@ static int friio_frontend_attach(struct dvb_usb_adapter *adap)
if (friio_initialize(adap->dev) < 0)
return -EIO;
- adap->fe = jdvbt90502_attach(adap->dev);
- if (adap->fe == NULL)
+ adap->fe_adap[0].fe = jdvbt90502_attach(adap->dev);
+ if (adap->fe_adap[0].fe == NULL)
return -EIO;
return 0;
@@ -473,6 +473,8 @@ static struct dvb_usb_device_properties friio_properties = {
/* caps:0 => no pid filter, 188B TS packet */
/* GL861 has a HW pid filter, but no info available. */
{
+ .num_frontends = 1,
+ .fe = {{
.caps = 0,
.frontend_attach = friio_frontend_attach,
@@ -490,6 +492,7 @@ static struct dvb_usb_device_properties friio_properties = {
}
}
},
+ }},
}
},
.i2c_algo = &gl861_i2c_algo,
diff --git a/drivers/media/dvb/dvb-usb/gl861.c b/drivers/media/dvb/dvb-usb/gl861.c
index 6f596ed4176..63681df244c 100644
--- a/drivers/media/dvb/dvb-usb/gl861.c
+++ b/drivers/media/dvb/dvb-usb/gl861.c
@@ -103,9 +103,9 @@ static struct zl10353_config gl861_zl10353_config = {
static int gl861_frontend_attach(struct dvb_usb_adapter *adap)
{
- adap->fe = dvb_attach(zl10353_attach, &gl861_zl10353_config,
+ adap->fe_adap[0].fe = dvb_attach(zl10353_attach, &gl861_zl10353_config,
&adap->dev->i2c_adap);
- if (adap->fe == NULL)
+ if (adap->fe_adap[0].fe == NULL)
return -EIO;
return 0;
@@ -118,7 +118,7 @@ static struct qt1010_config gl861_qt1010_config = {
static int gl861_tuner_attach(struct dvb_usb_adapter *adap)
{
return dvb_attach(qt1010_attach,
- adap->fe, &adap->dev->i2c_adap,
+ adap->fe_adap[0].fe, &adap->dev->i2c_adap,
&gl861_qt1010_config) == NULL ? -ENODEV : 0;
}
@@ -167,6 +167,8 @@ static struct dvb_usb_device_properties gl861_properties = {
.num_adapters = 1,
.adapter = {{
+ .num_frontends = 1,
+ .fe = {{
.frontend_attach = gl861_frontend_attach,
.tuner_attach = gl861_tuner_attach,
@@ -181,6 +183,7 @@ static struct dvb_usb_device_properties gl861_properties = {
}
}
},
+ }},
} },
.i2c_algo = &gl861_i2c_algo,
diff --git a/drivers/media/dvb/dvb-usb/gp8psk-fe.c b/drivers/media/dvb/dvb-usb/gp8psk-fe.c
index 60d11e57e7d..5426267980c 100644
--- a/drivers/media/dvb/dvb-usb/gp8psk-fe.c
+++ b/drivers/media/dvb/dvb-usb/gp8psk-fe.c
@@ -144,19 +144,25 @@ static int gp8psk_fe_set_frontend(struct dvb_frontend* fe,
cmd[6] = (freq >> 16) & 0xff;
cmd[7] = (freq >> 24) & 0xff;
+ /* backwards compatibility: DVB-S + 8-PSK were used for Turbo-FEC */
+ if (c->delivery_system == SYS_DVBS && c->modulation == PSK_8)
+ c->delivery_system = SYS_TURBO;
+
switch (c->delivery_system) {
case SYS_DVBS:
- /* Allow QPSK and 8PSK (even for DVB-S) */
- if (c->modulation != QPSK && c->modulation != PSK_8) {
+ if (c->modulation != QPSK) {
deb_fe("%s: unsupported modulation selected (%d)\n",
__func__, c->modulation);
return -EOPNOTSUPP;
}
c->fec_inner = FEC_AUTO;
break;
- case SYS_DVBS2:
+ case SYS_DVBS2: /* kept for backwards compatibility */
deb_fe("%s: DVB-S2 delivery system selected\n", __func__);
break;
+ case SYS_TURBO:
+ deb_fe("%s: Turbo-FEC delivery system selected\n", __func__);
+ break;
default:
deb_fe("%s: unsupported delivery system selected (%d)\n",
@@ -189,7 +195,10 @@ static int gp8psk_fe_set_frontend(struct dvb_frontend* fe,
default:
cmd[9] = 5; break;
}
- cmd[8] = ADV_MOD_DVB_QPSK;
+ if (c->delivery_system == SYS_TURBO)
+ cmd[8] = ADV_MOD_TURBO_QPSK;
+ else
+ cmd[8] = ADV_MOD_DVB_QPSK;
break;
case PSK_8: /* PSK_8 is for compatibility with DN */
cmd[8] = ADV_MOD_TURBO_8PSK;
diff --git a/drivers/media/dvb/dvb-usb/gp8psk.c b/drivers/media/dvb/dvb-usb/gp8psk.c
index 1cb3d9a66e0..5f71284703d 100644
--- a/drivers/media/dvb/dvb-usb/gp8psk.c
+++ b/drivers/media/dvb/dvb-usb/gp8psk.c
@@ -230,7 +230,7 @@ static int gp8psk_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff)
static int gp8psk_frontend_attach(struct dvb_usb_adapter *adap)
{
- adap->fe = gp8psk_fe_attach(adap->dev);
+ adap->fe_adap[0].fe = gp8psk_fe_attach(adap->dev);
return 0;
}
@@ -268,6 +268,8 @@ static struct dvb_usb_device_properties gp8psk_properties = {
.num_adapters = 1,
.adapter = {
{
+ .num_frontends = 1,
+ .fe = {{
.streaming_ctrl = gp8psk_streaming_ctrl,
.frontend_attach = gp8psk_frontend_attach,
/* parameter for the MPEG2-data transfer */
@@ -281,6 +283,7 @@ static struct dvb_usb_device_properties gp8psk_properties = {
}
}
},
+ }},
}
},
.power_ctrl = gp8psk_power_ctrl,
diff --git a/drivers/media/dvb/dvb-usb/it913x.c b/drivers/media/dvb/dvb-usb/it913x.c
new file mode 100644
index 00000000000..c4622618714
--- /dev/null
+++ b/drivers/media/dvb/dvb-usb/it913x.c
@@ -0,0 +1,702 @@
+/* DVB USB compliant linux driver for IT9137
+ *
+ * Copyright (C) 2011 Malcolm Priestley (tvboxspy@gmail.com)
+ * IT9137 (C) ITE Tech Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * see Documentation/dvb/README.dvb-usb for more information
+ * see Documentation/dvb/it9137.txt for firmware information
+ *
+ */
+#define DVB_USB_LOG_PREFIX "it913x"
+
+#include <linux/usb.h>
+#include <linux/usb/input.h>
+#include <media/rc-core.h>
+
+#include "dvb-usb.h"
+#include "it913x-fe.h"
+
+/* debug */
+static int dvb_usb_it913x_debug;
+#define l_dprintk(var, level, args...) do { \
+ if ((var >= level)) \
+ printk(KERN_DEBUG DVB_USB_LOG_PREFIX ": " args); \
+} while (0)
+
+#define deb_info(level, args...) l_dprintk(dvb_usb_it913x_debug, level, args)
+#define debug_data_snipet(level, name, p) \
+ deb_info(level, name" (%02x%02x%02x%02x%02x%02x%02x%02x)", \
+ *p, *(p+1), *(p+2), *(p+3), *(p+4), \
+ *(p+5), *(p+6), *(p+7));
+
+
+module_param_named(debug, dvb_usb_it913x_debug, int, 0644);
+MODULE_PARM_DESC(debug, "set debugging level (1=info (or-able))."
+ DVB_USB_DEBUG_STATUS);
+
+static int pid_filter;
+module_param_named(pid, pid_filter, int, 0644);
+MODULE_PARM_DESC(pid, "set default 0=on 1=off");
+
+int cmd_counter;
+
+DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
+
+struct it913x_state {
+ u8 id;
+};
+
+struct ite_config {
+ u8 chip_ver;
+ u16 chip_type;
+ u32 firmware;
+ u8 tuner_id_0;
+ u8 tuner_id_1;
+ u8 dual_mode;
+};
+
+struct ite_config it913x_config;
+
+static int it913x_bulk_write(struct usb_device *dev,
+ u8 *snd, int len, u8 pipe)
+{
+ int ret, actual_l;
+
+ ret = usb_bulk_msg(dev, usb_sndbulkpipe(dev, pipe),
+ snd, len , &actual_l, 100);
+ return ret;
+}
+
+static int it913x_bulk_read(struct usb_device *dev,
+ u8 *rev, int len, u8 pipe)
+{
+ int ret, actual_l;
+
+ ret = usb_bulk_msg(dev, usb_rcvbulkpipe(dev, pipe),
+ rev, len , &actual_l, 200);
+ return ret;
+}
+
+static u16 check_sum(u8 *p, u8 len)
+{
+ u16 sum = 0;
+ u8 i = 1;
+ while (i < len)
+ sum += (i++ & 1) ? (*p++) << 8 : *p++;
+ return ~sum;
+}
+
+static int it913x_io(struct usb_device *udev, u8 mode, u8 pro,
+ u8 cmd, u32 reg, u8 addr, u8 *data, u8 len)
+{
+ int ret = 0, i, buf_size = 1;
+ u8 *buff;
+ u8 rlen;
+ u16 chk_sum;
+
+ buff = kzalloc(256, GFP_KERNEL);
+ if (!buff) {
+ info("USB Buffer Failed");
+ return -ENOMEM;
+ }
+
+ buff[buf_size++] = pro;
+ buff[buf_size++] = cmd;
+ buff[buf_size++] = cmd_counter;
+
+ switch (mode) {
+ case READ_LONG:
+ case WRITE_LONG:
+ buff[buf_size++] = len;
+ buff[buf_size++] = 2;
+ buff[buf_size++] = (reg >> 24);
+ buff[buf_size++] = (reg >> 16) & 0xff;
+ buff[buf_size++] = (reg >> 8) & 0xff;
+ buff[buf_size++] = reg & 0xff;
+ break;
+ case READ_SHORT:
+ buff[buf_size++] = addr;
+ break;
+ case WRITE_SHORT:
+ buff[buf_size++] = len;
+ buff[buf_size++] = addr;
+ buff[buf_size++] = (reg >> 8) & 0xff;
+ buff[buf_size++] = reg & 0xff;
+ break;
+ case READ_DATA:
+ case WRITE_DATA:
+ break;
+ case WRITE_CMD:
+ mode = 7;
+ break;
+ default:
+ kfree(buff);
+ return -EINVAL;
+ }
+
+ if (mode & 1) {
+ for (i = 0; i < len ; i++)
+ buff[buf_size++] = data[i];
+ }
+ chk_sum = check_sum(&buff[1], buf_size);
+
+ buff[buf_size++] = chk_sum >> 8;
+ buff[0] = buf_size;
+ buff[buf_size++] = (chk_sum & 0xff);
+
+ ret = it913x_bulk_write(udev, buff, buf_size , 0x02);
+
+ ret |= it913x_bulk_read(udev, buff, (mode & 1) ?
+ 5 : len + 5 , 0x01);
+
+ rlen = (mode & 0x1) ? 0x1 : len;
+
+ if (mode & 1)
+ ret |= buff[2];
+ else
+ memcpy(data, &buff[3], rlen);
+
+ cmd_counter++;
+
+ kfree(buff);
+
+ return (ret < 0) ? -ENODEV : 0;
+}
+
+static int it913x_wr_reg(struct usb_device *udev, u8 pro, u32 reg , u8 data)
+{
+ int ret;
+ u8 b[1];
+ b[0] = data;
+ ret = it913x_io(udev, WRITE_LONG, pro,
+ CMD_DEMOD_WRITE, reg, 0, b, sizeof(b));
+
+ return ret;
+}
+
+static int it913x_read_reg(struct usb_device *udev, u32 reg)
+{
+ int ret;
+ u8 data[1];
+
+ ret = it913x_io(udev, READ_LONG, DEV_0,
+ CMD_DEMOD_READ, reg, 0, &data[0], 1);
+
+ return (ret < 0) ? ret : data[0];
+}
+
+static u32 it913x_query(struct usb_device *udev, u8 pro)
+{
+ int ret;
+ u8 data[4];
+ ret = it913x_io(udev, READ_LONG, pro, CMD_DEMOD_READ,
+ 0x1222, 0, &data[0], 3);
+
+ it913x_config.chip_ver = data[0];
+ it913x_config.chip_type = (u16)(data[2] << 8) + data[1];
+
+ info("Chip Version=%02x Chip Type=%04x", it913x_config.chip_ver,
+ it913x_config.chip_type);
+
+ ret |= it913x_io(udev, READ_SHORT, pro,
+ CMD_QUERYINFO, 0, 0x1, &data[0], 4);
+
+ it913x_config.firmware = (data[0] << 24) + (data[1] << 16) +
+ (data[2] << 8) + data[3];
+
+ return (ret < 0) ? 0 : it913x_config.firmware;
+}
+
+static int it913x_pid_filter_ctrl(struct dvb_usb_adapter *adap, int onoff)
+{
+ int ret = 0;
+ u8 pro = (adap->id == 0) ? DEV_0_DMOD : DEV_1_DMOD;
+
+ if (mutex_lock_interruptible(&adap->dev->i2c_mutex) < 0)
+ return -EAGAIN;
+ deb_info(1, "PID_C (%02x)", onoff);
+
+ if (!onoff)
+ ret = it913x_wr_reg(adap->dev->udev, pro, PID_RST, 0x1);
+
+ mutex_unlock(&adap->dev->i2c_mutex);
+ return ret;
+}
+
+static int it913x_pid_filter(struct dvb_usb_adapter *adap,
+ int index, u16 pid, int onoff)
+{
+ struct usb_device *udev = adap->dev->udev;
+ int ret = 0;
+ u8 pro = (adap->id == 0) ? DEV_0_DMOD : DEV_1_DMOD;
+
+ if (pid_filter > 0)
+ return 0;
+
+ if (mutex_lock_interruptible(&adap->dev->i2c_mutex) < 0)
+ return -EAGAIN;
+ deb_info(1, "PID_F (%02x)", onoff);
+ if (onoff) {
+ ret = it913x_wr_reg(udev, pro, PID_EN, 0x1);
+
+ ret |= it913x_wr_reg(udev, pro, PID_LSB, (u8)(pid & 0xff));
+
+ ret |= it913x_wr_reg(udev, pro, PID_MSB, (u8)(pid >> 8));
+
+ ret |= it913x_wr_reg(udev, pro, PID_INX_EN, (u8)onoff);
+
+ ret |= it913x_wr_reg(udev, pro, PID_INX, (u8)(index & 0x1f));
+
+ }
+
+ mutex_unlock(&adap->dev->i2c_mutex);
+ return 0;
+}
+
+
+static int it913x_return_status(struct usb_device *udev)
+{
+ u32 firm = 0;
+
+ firm = it913x_query(udev, DEV_0);
+ if (firm > 0)
+ info("Firmware Version %d", firm);
+
+ return (firm > 0) ? firm : 0;
+}
+
+static int it913x_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
+ int num)
+{
+ struct dvb_usb_device *d = i2c_get_adapdata(adap);
+ static u8 data[256];
+ int ret;
+ u32 reg;
+ u8 pro;
+ if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
+ return -EAGAIN;
+
+ debug_data_snipet(1, "Message out", msg[0].buf);
+ deb_info(2, "num of messages %d address %02x", num, msg[0].addr);
+
+ pro = (msg[0].addr & 0x2) ? DEV_0_DMOD : 0x0;
+ pro |= (msg[0].addr & 0x20) ? DEV_1 : DEV_0;
+ memcpy(data, msg[0].buf, msg[0].len);
+ reg = (data[0] << 24) + (data[1] << 16) +
+ (data[2] << 8) + data[3];
+ if (num == 2) {
+ ret = it913x_io(d->udev, READ_LONG, pro,
+ CMD_DEMOD_READ, reg, 0, data, msg[1].len);
+ memcpy(msg[1].buf, data, msg[1].len);
+ } else
+ ret = it913x_io(d->udev, WRITE_LONG, pro, CMD_DEMOD_WRITE,
+ reg, 0, &data[4], msg[0].len - 4);
+
+ mutex_unlock(&d->i2c_mutex);
+
+ return ret;
+}
+
+static u32 it913x_i2c_func(struct i2c_adapter *adapter)
+{
+ return I2C_FUNC_I2C;
+}
+
+static struct i2c_algorithm it913x_i2c_algo = {
+ .master_xfer = it913x_i2c_xfer,
+ .functionality = it913x_i2c_func,
+};
+
+/* Callbacks for DVB USB */
+#define IT913X_POLL 250
+static int it913x_rc_query(struct dvb_usb_device *d)
+{
+ u8 ibuf[4];
+ int ret;
+ u32 key;
+ /* Avoid conflict with frontends*/
+ if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
+ return -EAGAIN;
+
+ ret = it913x_io(d->udev, READ_LONG, PRO_LINK, CMD_IR_GET,
+ 0, 0, &ibuf[0], sizeof(ibuf));
+
+ if ((ibuf[2] + ibuf[3]) == 0xff) {
+ key = ibuf[2];
+ key += ibuf[0] << 8;
+ deb_info(1, "INT Key =%08x", key);
+ if (d->rc_dev != NULL)
+ rc_keydown(d->rc_dev, key, 0);
+ }
+ mutex_unlock(&d->i2c_mutex);
+
+ return ret;
+}
+static int it913x_identify_state(struct usb_device *udev,
+ struct dvb_usb_device_properties *props,
+ struct dvb_usb_device_description **desc,
+ int *cold)
+{
+ int ret = 0, firm_no;
+ u8 reg, remote;
+
+ firm_no = it913x_return_status(udev);
+
+ /* checnk for dual mode */
+ it913x_config.dual_mode = it913x_read_reg(udev, 0x49c5);
+
+ /* TODO different remotes */
+ remote = it913x_read_reg(udev, 0x49ac); /* Remote */
+ if (remote == 0)
+ props->rc.core.rc_codes = NULL;
+
+ /* TODO at the moment tuner_id is always assigned to 0x38 */
+ it913x_config.tuner_id_0 = it913x_read_reg(udev, 0x49d0);
+
+ info("Dual mode=%x Remote=%x Tuner Type=%x", it913x_config.dual_mode
+ , remote, it913x_config.tuner_id_0);
+
+ if (firm_no > 0) {
+ *cold = 0;
+ return 0;
+ }
+
+ if (it913x_config.dual_mode) {
+ it913x_config.tuner_id_1 = it913x_read_reg(udev, 0x49e0);
+ ret = it913x_wr_reg(udev, DEV_0, GPIOH1_EN, 0x1);
+ ret |= it913x_wr_reg(udev, DEV_0, GPIOH1_ON, 0x1);
+ ret |= it913x_wr_reg(udev, DEV_0, GPIOH1_O, 0x1);
+ msleep(50);
+ ret |= it913x_wr_reg(udev, DEV_0, GPIOH1_O, 0x0);
+ msleep(50);
+ reg = it913x_read_reg(udev, GPIOH1_O);
+ if (reg == 0) {
+ ret |= it913x_wr_reg(udev, DEV_0, GPIOH1_O, 0x1);
+ ret |= it913x_return_status(udev);
+ if (ret != 0)
+ ret = it913x_wr_reg(udev, DEV_0,
+ GPIOH1_O, 0x0);
+ }
+ props->num_adapters = 2;
+ } else
+ props->num_adapters = 1;
+
+ reg = it913x_read_reg(udev, IO_MUX_POWER_CLK);
+
+ if (it913x_config.dual_mode) {
+ ret |= it913x_wr_reg(udev, DEV_0, 0x4bfb, CHIP2_I2C_ADDR);
+ ret |= it913x_wr_reg(udev, DEV_0, CLK_O_EN, 0x1);
+ } else {
+ ret |= it913x_wr_reg(udev, DEV_0, 0x4bfb, 0x0);
+ ret |= it913x_wr_reg(udev, DEV_0, CLK_O_EN, 0x0);
+ }
+
+ *cold = 1;
+
+ return (ret < 0) ? -ENODEV : 0;
+}
+
+static int it913x_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff)
+{
+ int ret = 0;
+ u8 pro = (adap->id == 0) ? DEV_0_DMOD : DEV_1_DMOD;
+
+ if (mutex_lock_interruptible(&adap->dev->i2c_mutex) < 0)
+ return -EAGAIN;
+ deb_info(1, "STM (%02x)", onoff);
+
+ if (!onoff)
+ ret = it913x_wr_reg(adap->dev->udev, pro, PID_RST, 0x1);
+
+
+ mutex_unlock(&adap->dev->i2c_mutex);
+
+ return ret;
+}
+
+
+static int it913x_download_firmware(struct usb_device *udev,
+ const struct firmware *fw)
+{
+ int ret = 0, i;
+ u8 packet_size, dlen;
+ u8 *fw_data;
+
+ packet_size = 0x29;
+
+ ret = it913x_wr_reg(udev, DEV_0, I2C_CLK, I2C_CLK_100);
+
+ info("FRM Starting Firmware Download");
+ /* This uses scatter write firmware headers follow */
+ /* 03 XX 00 XX = chip number? */
+
+ for (i = 0; i < fw->size; i += packet_size) {
+ if (i > 0)
+ packet_size = 0x39;
+ fw_data = (u8 *)(fw->data + i);
+ dlen = ((i + packet_size) > fw->size)
+ ? (fw->size - i) : packet_size;
+ ret |= it913x_io(udev, WRITE_DATA, DEV_0,
+ CMD_SCATTER_WRITE, 0, 0, fw_data, dlen);
+ udelay(1000);
+ }
+
+ ret |= it913x_io(udev, WRITE_CMD, DEV_0,
+ CMD_BOOT, 0, 0, NULL, 0);
+
+ msleep(100);
+
+ if (ret < 0)
+ info("FRM Firmware Download Failed (%04x)" , ret);
+ else
+ info("FRM Firmware Download Completed - Resetting Device");
+
+ ret |= it913x_return_status(udev);
+
+ msleep(30);
+
+ ret |= it913x_wr_reg(udev, DEV_0, I2C_CLK, I2C_CLK_400);
+
+ /* Tuner function */
+ if (it913x_config.dual_mode)
+ ret |= it913x_wr_reg(udev, DEV_0_DMOD , 0xec4c, 0xa0);
+
+ ret |= it913x_wr_reg(udev, DEV_0, PADODPU, 0x0);
+ ret |= it913x_wr_reg(udev, DEV_0, AGC_O_D, 0x0);
+ if (it913x_config.dual_mode) {
+ ret |= it913x_wr_reg(udev, DEV_1, PADODPU, 0x0);
+ ret |= it913x_wr_reg(udev, DEV_1, AGC_O_D, 0x0);
+ }
+
+ return (ret < 0) ? -ENODEV : 0;
+}
+
+static int it913x_name(struct dvb_usb_adapter *adap)
+{
+ const char *desc = adap->dev->desc->name;
+ char *fe_name[] = {"_1", "_2", "_3", "_4"};
+ char *name = adap->fe_adap[0].fe->ops.info.name;
+
+ strlcpy(name, desc, 128);
+ strlcat(name, fe_name[adap->id], 128);
+
+ return 0;
+}
+
+static int it913x_frontend_attach(struct dvb_usb_adapter *adap)
+{
+ struct usb_device *udev = adap->dev->udev;
+ int ret = 0;
+ u8 adf = it913x_read_reg(udev, IO_MUX_POWER_CLK);
+ u8 adap_addr = I2C_BASE_ADDR + (adap->id << 5);
+ u16 ep_size = adap->props.fe[0].stream.u.bulk.buffersize;
+ u8 tuner_id, tuner_type;
+
+ if (adap->id == 0)
+ tuner_id = it913x_config.tuner_id_0;
+ else
+ tuner_id = it913x_config.tuner_id_1;
+
+ /* TODO we always use IT9137 possible references here*/
+ /* Documentation suggests don't care */
+ switch (tuner_id) {
+ case 0x51:
+ case 0x52:
+ case 0x60:
+ case 0x61:
+ case 0x62:
+ default:
+ case 0x38:
+ tuner_type = IT9137;
+ }
+
+ adap->fe_adap[0].fe = dvb_attach(it913x_fe_attach,
+ &adap->dev->i2c_adap, adap_addr, adf, tuner_type);
+
+ if (adap->id == 0 && adap->fe_adap[0].fe) {
+ ret = it913x_wr_reg(udev, DEV_0_DMOD, MP2_SW_RST, 0x1);
+ ret = it913x_wr_reg(udev, DEV_0_DMOD, MP2IF2_SW_RST, 0x1);
+ ret = it913x_wr_reg(udev, DEV_0, EP0_TX_EN, 0x0f);
+ ret = it913x_wr_reg(udev, DEV_0, EP0_TX_NAK, 0x1b);
+ ret = it913x_wr_reg(udev, DEV_0, EP0_TX_EN, 0x2f);
+ ret = it913x_wr_reg(udev, DEV_0, EP4_TX_LEN_LSB,
+ ep_size & 0xff);
+ ret = it913x_wr_reg(udev, DEV_0, EP4_TX_LEN_MSB, ep_size >> 8);
+ ret = it913x_wr_reg(udev, DEV_0, EP4_MAX_PKT, 0x80);
+ } else if (adap->id == 1 && adap->fe_adap[0].fe) {
+ ret = it913x_wr_reg(udev, DEV_0, EP0_TX_EN, 0x6f);
+ ret = it913x_wr_reg(udev, DEV_0, EP5_TX_LEN_LSB,
+ ep_size & 0xff);
+ ret = it913x_wr_reg(udev, DEV_0, EP5_TX_LEN_MSB, ep_size >> 8);
+ ret = it913x_wr_reg(udev, DEV_0, EP5_MAX_PKT, 0x80);
+ ret = it913x_wr_reg(udev, DEV_0_DMOD, MP2IF2_EN, 0x1);
+ ret = it913x_wr_reg(udev, DEV_1_DMOD, MP2IF_SERIAL, 0x1);
+ ret = it913x_wr_reg(udev, DEV_1, TOP_HOSTB_SER_MODE, 0x1);
+ ret = it913x_wr_reg(udev, DEV_0_DMOD, TSIS_ENABLE, 0x1);
+ ret = it913x_wr_reg(udev, DEV_0_DMOD, MP2_SW_RST, 0x0);
+ ret = it913x_wr_reg(udev, DEV_0_DMOD, MP2IF2_SW_RST, 0x0);
+ ret = it913x_wr_reg(udev, DEV_0_DMOD, MP2IF2_HALF_PSB, 0x0);
+ ret = it913x_wr_reg(udev, DEV_0_DMOD, MP2IF_STOP_EN, 0x1);
+ ret = it913x_wr_reg(udev, DEV_1_DMOD, MPEG_FULL_SPEED, 0x0);
+ ret = it913x_wr_reg(udev, DEV_1_DMOD, MP2IF_STOP_EN, 0x0);
+ } else
+ return -ENODEV;
+
+ ret = it913x_name(adap);
+
+ return ret;
+}
+
+/* DVB USB Driver */
+static struct dvb_usb_device_properties it913x_properties;
+
+static int it913x_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ cmd_counter = 0;
+ if (0 == dvb_usb_device_init(intf, &it913x_properties,
+ THIS_MODULE, NULL, adapter_nr)) {
+ info("DEV registering device driver");
+ return 0;
+ }
+
+ info("DEV it913x Error");
+ return -ENODEV;
+
+}
+
+static struct usb_device_id it913x_table[] = {
+ { USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_UB499_2T_T09) },
+ { USB_DEVICE(USB_VID_ITETECH, USB_PID_ITETECH_IT9135) },
+ {} /* Terminating entry */
+};
+
+MODULE_DEVICE_TABLE(usb, it913x_table);
+
+static struct dvb_usb_device_properties it913x_properties = {
+ .caps = DVB_USB_IS_AN_I2C_ADAPTER,
+ .usb_ctrl = DEVICE_SPECIFIC,
+ .download_firmware = it913x_download_firmware,
+ .firmware = "dvb-usb-it9137-01.fw",
+ .no_reconnect = 1,
+ .size_of_priv = sizeof(struct it913x_state),
+ .num_adapters = 2,
+ .adapter = {
+ {
+ .num_frontends = 1,
+ .fe = {{
+ .caps = DVB_USB_ADAP_HAS_PID_FILTER|
+ DVB_USB_ADAP_NEED_PID_FILTERING|
+ DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
+ .streaming_ctrl = it913x_streaming_ctrl,
+ .pid_filter_count = 31,
+ .pid_filter = it913x_pid_filter,
+ .pid_filter_ctrl = it913x_pid_filter_ctrl,
+ .frontend_attach = it913x_frontend_attach,
+ /* parameter for the MPEG2-data transfer */
+ .stream = {
+ .type = USB_BULK,
+ .count = 10,
+ .endpoint = 0x04,
+ .u = {/* Keep Low if PID filter on */
+ .bulk = {
+ .buffersize = 3584,
+
+ }
+ }
+ }
+ }},
+ },
+ {
+ .num_frontends = 1,
+ .fe = {{
+ .caps = DVB_USB_ADAP_HAS_PID_FILTER|
+ DVB_USB_ADAP_NEED_PID_FILTERING|
+ DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
+ .streaming_ctrl = it913x_streaming_ctrl,
+ .pid_filter_count = 31,
+ .pid_filter = it913x_pid_filter,
+ .pid_filter_ctrl = it913x_pid_filter_ctrl,
+ .frontend_attach = it913x_frontend_attach,
+ /* parameter for the MPEG2-data transfer */
+ .stream = {
+ .type = USB_BULK,
+ .count = 5,
+ .endpoint = 0x05,
+ .u = {
+ .bulk = {
+ .buffersize = 3584,
+
+ }
+ }
+ }
+ }},
+ }
+ },
+ .identify_state = it913x_identify_state,
+ .rc.core = {
+ .protocol = RC_TYPE_NEC,
+ .module_name = "it913x",
+ .rc_query = it913x_rc_query,
+ .rc_interval = IT913X_POLL,
+ .allowed_protos = RC_TYPE_NEC,
+ .rc_codes = RC_MAP_KWORLD_315U,
+ },
+ .i2c_algo = &it913x_i2c_algo,
+ .num_device_descs = 2,
+ .devices = {
+ { "Kworld UB499-2T T09(IT9137)",
+ { &it913x_table[0], NULL },
+ },
+ { "ITE 9135 Generic",
+ { &it913x_table[1], NULL },
+ },
+ }
+};
+
+static struct usb_driver it913x_driver = {
+ .name = "it913x",
+ .probe = it913x_probe,
+ .disconnect = dvb_usb_device_exit,
+ .id_table = it913x_table,
+};
+
+/* module stuff */
+static int __init it913x_module_init(void)
+{
+ int result = usb_register(&it913x_driver);
+ if (result) {
+ err("usb_register failed. Error number %d", result);
+ return result;
+ }
+
+ return 0;
+}
+
+static void __exit it913x_module_exit(void)
+{
+ /* deregister this driver from the USB subsystem */
+ usb_deregister(&it913x_driver);
+}
+
+module_init(it913x_module_init);
+module_exit(it913x_module_exit);
+
+MODULE_AUTHOR("Malcolm Priestley <tvboxspy@gmail.com>");
+MODULE_DESCRIPTION("it913x USB 2 Driver");
+MODULE_VERSION("1.07");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/dvb-usb/lmedm04.c b/drivers/media/dvb/dvb-usb/lmedm04.c
index 37b146961ae..b9228240f5c 100644
--- a/drivers/media/dvb/dvb-usb/lmedm04.c
+++ b/drivers/media/dvb/dvb-usb/lmedm04.c
@@ -162,7 +162,7 @@ static int lme2510_usb_talk(struct dvb_usb_device *d,
int ret = 0;
if (st->usb_buffer == NULL) {
- st->usb_buffer = kmalloc(512, GFP_KERNEL);
+ st->usb_buffer = kmalloc(64, GFP_KERNEL);
if (st->usb_buffer == NULL) {
info("MEM Error no memory");
return -ENOMEM;
@@ -175,8 +175,8 @@ static int lme2510_usb_talk(struct dvb_usb_device *d,
if (ret < 0)
return -EAGAIN;
- /* the read/write capped at 512 */
- memcpy(buff, wbuf, (wlen > 512) ? 512 : wlen);
+ /* the read/write capped at 64 */
+ memcpy(buff, wbuf, (wlen < 64) ? wlen : 64);
ret |= usb_clear_halt(d->udev, usb_sndbulkpipe(d->udev, 0x01));
@@ -186,8 +186,8 @@ static int lme2510_usb_talk(struct dvb_usb_device *d,
ret |= usb_clear_halt(d->udev, usb_rcvbulkpipe(d->udev, 0x01));
- ret |= lme2510_bulk_read(d->udev, buff, (rlen > 512) ?
- 512 : rlen , 0x01);
+ ret |= lme2510_bulk_read(d->udev, buff, (rlen < 64) ?
+ rlen : 64 , 0x01);
if (rlen > 0)
memcpy(rbuf, buff, rlen);
@@ -333,7 +333,7 @@ static int lme2510_int_read(struct dvb_usb_adapter *adap)
if (lme_int->lme_urb == NULL)
return -ENOMEM;
- lme_int->buffer = usb_alloc_coherent(adap->dev->udev, 5000, GFP_ATOMIC,
+ lme_int->buffer = usb_alloc_coherent(adap->dev->udev, 128, GFP_ATOMIC,
&lme_int->lme_urb->transfer_dma);
if (lme_int->buffer == NULL)
@@ -343,10 +343,10 @@ static int lme2510_int_read(struct dvb_usb_adapter *adap)
adap->dev->udev,
usb_rcvintpipe(adap->dev->udev, 0xa),
lme_int->buffer,
- 4096,
+ 128,
lme2510_int_response,
adap,
- 11);
+ 8);
lme_int->lme_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
@@ -580,7 +580,7 @@ static int lme2510_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
{
struct dvb_usb_device *d = i2c_get_adapdata(adap);
struct lme2510_state *st = d->priv;
- static u8 obuf[64], ibuf[512];
+ static u8 obuf[64], ibuf[64];
int i, read, read_o;
u16 len;
u8 gate = st->i2c_gate;
@@ -621,7 +621,7 @@ static int lme2510_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
len = msg[i].len+3;
}
- if (lme2510_msg(d, obuf, len, ibuf, 512) < 0) {
+ if (lme2510_msg(d, obuf, len, ibuf, 64) < 0) {
deb_info(1, "i2c transfer failed.");
return -EAGAIN;
}
@@ -941,7 +941,7 @@ static int lme_name(struct dvb_usb_adapter *adap)
const char *desc = adap->dev->desc->name;
char *fe_name[] = {"", " LG TDQY-P001F", " SHARP:BS2F7HZ7395",
" SHARP:BS2F7HZ0194"};
- char *name = adap->fe->ops.info.name;
+ char *name = adap->fe_adap[0].fe->ops.info.name;
strlcpy(name, desc, 128);
strlcat(name, fe_name[st->tuner_config], 128);
@@ -958,10 +958,10 @@ static int dm04_lme2510_frontend_attach(struct dvb_usb_adapter *adap)
st->i2c_talk_onoff = 1;
st->i2c_gate = 4;
- adap->fe = dvb_attach(tda10086_attach, &tda10086_config,
+ adap->fe_adap[0].fe = dvb_attach(tda10086_attach, &tda10086_config,
&adap->dev->i2c_adap);
- if (adap->fe) {
+ if (adap->fe_adap[0].fe) {
info("TUN Found Frontend TDA10086");
st->i2c_tuner_gate_w = 4;
st->i2c_tuner_gate_r = 4;
@@ -975,9 +975,9 @@ static int dm04_lme2510_frontend_attach(struct dvb_usb_adapter *adap)
}
st->i2c_gate = 4;
- adap->fe = dvb_attach(stv0299_attach, &sharp_z0194_config,
+ adap->fe_adap[0].fe = dvb_attach(stv0299_attach, &sharp_z0194_config,
&adap->dev->i2c_adap);
- if (adap->fe) {
+ if (adap->fe_adap[0].fe) {
info("FE Found Stv0299");
st->i2c_tuner_gate_w = 4;
st->i2c_tuner_gate_r = 5;
@@ -991,9 +991,9 @@ static int dm04_lme2510_frontend_attach(struct dvb_usb_adapter *adap)
}
st->i2c_gate = 5;
- adap->fe = dvb_attach(stv0288_attach, &lme_config,
+ adap->fe_adap[0].fe = dvb_attach(stv0288_attach, &lme_config,
&adap->dev->i2c_adap);
- if (adap->fe) {
+ if (adap->fe_adap[0].fe) {
info("FE Found Stv0288");
st->i2c_tuner_gate_w = 4;
st->i2c_tuner_gate_r = 5;
@@ -1010,15 +1010,15 @@ static int dm04_lme2510_frontend_attach(struct dvb_usb_adapter *adap)
end: if (ret) {
- if (adap->fe) {
- dvb_frontend_detach(adap->fe);
- adap->fe = NULL;
+ if (adap->fe_adap[0].fe) {
+ dvb_frontend_detach(adap->fe_adap[0].fe);
+ adap->fe_adap[0].fe = NULL;
}
adap->dev->props.rc.core.rc_codes = NULL;
return -ENODEV;
}
- adap->fe->ops.set_voltage = dm04_lme2510_set_voltage;
+ adap->fe_adap[0].fe->ops.set_voltage = dm04_lme2510_set_voltage;
ret = lme_name(adap);
return ret;
}
@@ -1031,17 +1031,17 @@ static int dm04_lme2510_tuner(struct dvb_usb_adapter *adap)
switch (st->tuner_config) {
case TUNER_LG:
- if (dvb_attach(tda826x_attach, adap->fe, 0xc0,
+ if (dvb_attach(tda826x_attach, adap->fe_adap[0].fe, 0xc0,
&adap->dev->i2c_adap, 1))
ret = st->tuner_config;
break;
case TUNER_S7395:
- if (dvb_attach(ix2505v_attach , adap->fe, &lme_tuner,
+ if (dvb_attach(ix2505v_attach , adap->fe_adap[0].fe, &lme_tuner,
&adap->dev->i2c_adap))
ret = st->tuner_config;
break;
case TUNER_S0194:
- if (dvb_attach(dvb_pll_attach , adap->fe, 0xc0,
+ if (dvb_attach(dvb_pll_attach , adap->fe_adap[0].fe, 0xc0,
&adap->dev->i2c_adap, DVB_PLL_OPERA1))
ret = st->tuner_config;
break;
@@ -1145,6 +1145,8 @@ static struct dvb_usb_device_properties lme2510_properties = {
.num_adapters = 1,
.adapter = {
{
+ .num_frontends = 1,
+ .fe = {{
.caps = DVB_USB_ADAP_HAS_PID_FILTER|
DVB_USB_ADAP_NEED_PID_FILTERING|
DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
@@ -1166,6 +1168,7 @@ static struct dvb_usb_device_properties lme2510_properties = {
}
}
}
+ }},
}
},
.rc.core = {
@@ -1193,6 +1196,8 @@ static struct dvb_usb_device_properties lme2510c_properties = {
.num_adapters = 1,
.adapter = {
{
+ .num_frontends = 1,
+ .fe = {{
.caps = DVB_USB_ADAP_HAS_PID_FILTER|
DVB_USB_ADAP_NEED_PID_FILTERING|
DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
@@ -1214,6 +1219,7 @@ static struct dvb_usb_device_properties lme2510c_properties = {
}
}
}
+ }},
}
},
.rc.core = {
@@ -1241,7 +1247,7 @@ static void *lme2510_exit_int(struct dvb_usb_device *d)
void *buffer = NULL;
if (adap != NULL) {
- lme2510_kill_urb(&adap->stream);
+ lme2510_kill_urb(&adap->fe_adap[0].stream);
adap->feedcount = 0;
}
@@ -1255,7 +1261,7 @@ static void *lme2510_exit_int(struct dvb_usb_device *d)
if (st->lme_urb != NULL) {
usb_kill_urb(st->lme_urb);
- usb_free_coherent(d->udev, 5000, st->buffer,
+ usb_free_coherent(d->udev, 128, st->buffer,
st->lme_urb->transfer_dma);
info("Interrupt Service Stopped");
}
@@ -1306,5 +1312,5 @@ module_exit(lme2510_module_exit);
MODULE_AUTHOR("Malcolm Priestley <tvboxspy@gmail.com>");
MODULE_DESCRIPTION("LME2510(C) DVB-S USB2.0");
-MODULE_VERSION("1.88");
+MODULE_VERSION("1.90");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/dvb-usb/m920x.c b/drivers/media/dvb/dvb-usb/m920x.c
index 9456792f219..a1e1287c949 100644
--- a/drivers/media/dvb/dvb-usb/m920x.c
+++ b/drivers/media/dvb/dvb-usb/m920x.c
@@ -86,12 +86,12 @@ static int m920x_init(struct dvb_usb_device *d, struct m920x_inits *rc_seq)
}
for (i = 0; i < d->props.num_adapters; i++)
- flags |= d->adapter[i].props.caps;
+ flags |= d->adapter[i].props.fe[0].caps;
/* Some devices(Dposh) might crash if we attempt touch at all. */
if (flags & DVB_USB_ADAP_HAS_PID_FILTER) {
for (i = 0; i < d->props.num_adapters; i++) {
- epi = d->adapter[i].props.stream.endpoint - 0x81;
+ epi = d->adapter[i].props.fe[0].stream.endpoint - 0x81;
if (epi < 0 || epi >= M9206_MAX_ADAPTERS) {
printk(KERN_INFO "m920x: Unexpected adapter endpoint!\n");
@@ -292,7 +292,7 @@ static int m920x_update_filters(struct dvb_usb_adapter *adap)
struct m920x_state *m = adap->dev->priv;
int enabled = m->filtering_enabled[adap->id];
int i, ret = 0, filter = 0;
- int ep = adap->props.stream.endpoint;
+ int ep = adap->props.fe[0].stream.endpoint;
for (i = 0; i < M9206_MAX_FILTERS; i++)
if (m->filters[adap->id][i] == 8192)
@@ -501,9 +501,10 @@ static int m920x_mt352_frontend_attach(struct dvb_usb_adapter *adap)
{
deb("%s\n",__func__);
- if ((adap->fe = dvb_attach(mt352_attach,
- &m920x_mt352_config,
- &adap->dev->i2c_adap)) == NULL)
+ adap->fe_adap[0].fe = dvb_attach(mt352_attach,
+ &m920x_mt352_config,
+ &adap->dev->i2c_adap);
+ if ((adap->fe_adap[0].fe) == NULL)
return -EIO;
return 0;
@@ -513,9 +514,10 @@ static int m920x_tda10046_08_frontend_attach(struct dvb_usb_adapter *adap)
{
deb("%s\n",__func__);
- if ((adap->fe = dvb_attach(tda10046_attach,
- &m920x_tda10046_08_config,
- &adap->dev->i2c_adap)) == NULL)
+ adap->fe_adap[0].fe = dvb_attach(tda10046_attach,
+ &m920x_tda10046_08_config,
+ &adap->dev->i2c_adap);
+ if ((adap->fe_adap[0].fe) == NULL)
return -EIO;
return 0;
@@ -525,9 +527,10 @@ static int m920x_tda10046_0b_frontend_attach(struct dvb_usb_adapter *adap)
{
deb("%s\n",__func__);
- if ((adap->fe = dvb_attach(tda10046_attach,
- &m920x_tda10046_0b_config,
- &adap->dev->i2c_adap)) == NULL)
+ adap->fe_adap[0].fe = dvb_attach(tda10046_attach,
+ &m920x_tda10046_0b_config,
+ &adap->dev->i2c_adap);
+ if ((adap->fe_adap[0].fe) == NULL)
return -EIO;
return 0;
@@ -537,7 +540,7 @@ static int m920x_qt1010_tuner_attach(struct dvb_usb_adapter *adap)
{
deb("%s\n",__func__);
- if (dvb_attach(qt1010_attach, adap->fe, &adap->dev->i2c_adap, &m920x_qt1010_config) == NULL)
+ if (dvb_attach(qt1010_attach, adap->fe_adap[0].fe, &adap->dev->i2c_adap, &m920x_qt1010_config) == NULL)
return -ENODEV;
return 0;
@@ -547,7 +550,7 @@ static int m920x_tda8275_60_tuner_attach(struct dvb_usb_adapter *adap)
{
deb("%s\n",__func__);
- if (dvb_attach(tda827x_attach, adap->fe, 0x60, &adap->dev->i2c_adap, NULL) == NULL)
+ if (dvb_attach(tda827x_attach, adap->fe_adap[0].fe, 0x60, &adap->dev->i2c_adap, NULL) == NULL)
return -ENODEV;
return 0;
@@ -557,7 +560,7 @@ static int m920x_tda8275_61_tuner_attach(struct dvb_usb_adapter *adap)
{
deb("%s\n",__func__);
- if (dvb_attach(tda827x_attach, adap->fe, 0x61, &adap->dev->i2c_adap, NULL) == NULL)
+ if (dvb_attach(tda827x_attach, adap->fe_adap[0].fe, 0x61, &adap->dev->i2c_adap, NULL) == NULL)
return -ENODEV;
return 0;
@@ -565,7 +568,7 @@ static int m920x_tda8275_61_tuner_attach(struct dvb_usb_adapter *adap)
static int m920x_fmd1216me_tuner_attach(struct dvb_usb_adapter *adap)
{
- dvb_attach(simple_tuner_attach, adap->fe,
+ dvb_attach(simple_tuner_attach, adap->fe_adap[0].fe,
&adap->dev->i2c_adap, 0x61,
TUNER_PHILIPS_FMD1216ME_MK3);
return 0;
@@ -807,6 +810,9 @@ static struct dvb_usb_device_properties megasky_properties = {
.identify_state = m920x_identify_state,
.num_adapters = 1,
.adapter = {{
+ .num_frontends = 1,
+ .fe = {{
+
.caps = DVB_USB_ADAP_HAS_PID_FILTER |
DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
@@ -827,6 +833,7 @@ static struct dvb_usb_device_properties megasky_properties = {
}
}
},
+ }},
}},
.i2c_algo = &m920x_i2c_algo,
@@ -851,6 +858,9 @@ static struct dvb_usb_device_properties digivox_mini_ii_properties = {
.identify_state = m920x_identify_state,
.num_adapters = 1,
.adapter = {{
+ .num_frontends = 1,
+ .fe = {{
+
.caps = DVB_USB_ADAP_HAS_PID_FILTER |
DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
@@ -871,6 +881,7 @@ static struct dvb_usb_device_properties digivox_mini_ii_properties = {
}
}
},
+ }},
}},
.i2c_algo = &m920x_i2c_algo,
@@ -910,6 +921,9 @@ static struct dvb_usb_device_properties tvwalkertwin_properties = {
.identify_state = m920x_identify_state,
.num_adapters = 2,
.adapter = {{
+ .num_frontends = 1,
+ .fe = {{
+
.caps = DVB_USB_ADAP_HAS_PID_FILTER |
DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
@@ -929,7 +943,11 @@ static struct dvb_usb_device_properties tvwalkertwin_properties = {
.buffersize = 512,
}
}
+ }},
}},{
+ .num_frontends = 1,
+ .fe = {{
+
.caps = DVB_USB_ADAP_HAS_PID_FILTER |
DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
@@ -949,6 +967,7 @@ static struct dvb_usb_device_properties tvwalkertwin_properties = {
.buffersize = 512,
}
}
+ }},
},
}},
.i2c_algo = &m920x_i2c_algo,
@@ -974,6 +993,8 @@ static struct dvb_usb_device_properties dposh_properties = {
.identify_state = m920x_identify_state,
.num_adapters = 1,
.adapter = {{
+ .num_frontends = 1,
+ .fe = {{
/* Hardware pid filters don't work with this device/firmware */
.frontend_attach = m920x_mt352_frontend_attach,
@@ -989,6 +1010,7 @@ static struct dvb_usb_device_properties dposh_properties = {
}
}
},
+ }},
}},
.i2c_algo = &m920x_i2c_algo,
@@ -1019,6 +1041,9 @@ static struct dvb_usb_device_properties pinnacle_pctv310e_properties = {
.identify_state = m920x_identify_state,
.num_adapters = 1,
.adapter = {{
+ .num_frontends = 1,
+ .fe = {{
+
.caps = DVB_USB_ADAP_HAS_PID_FILTER |
DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
@@ -1041,6 +1066,7 @@ static struct dvb_usb_device_properties pinnacle_pctv310e_properties = {
}
}
},
+ }},
} },
.i2c_algo = &m920x_i2c_algo,
diff --git a/drivers/media/dvb/dvb-usb/mxl111sf-demod.c b/drivers/media/dvb/dvb-usb/mxl111sf-demod.c
new file mode 100644
index 00000000000..d1f58371c71
--- /dev/null
+++ b/drivers/media/dvb/dvb-usb/mxl111sf-demod.c
@@ -0,0 +1,614 @@
+/*
+ * mxl111sf-demod.c - driver for the MaxLinear MXL111SF DVB-T demodulator
+ *
+ * Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include "mxl111sf-demod.h"
+#include "mxl111sf-reg.h"
+
+/* debug */
+static int mxl111sf_demod_debug;
+module_param_named(debug, mxl111sf_demod_debug, int, 0644);
+MODULE_PARM_DESC(debug, "set debugging level (1=info (or-able)).");
+
+#define mxl_dbg(fmt, arg...) \
+ if (mxl111sf_demod_debug) \
+ mxl_printk(KERN_DEBUG, fmt, ##arg)
+
+/* ------------------------------------------------------------------------ */
+
+struct mxl111sf_demod_state {
+ struct mxl111sf_state *mxl_state;
+
+ struct mxl111sf_demod_config *cfg;
+
+ struct dvb_frontend fe;
+};
+
+/* ------------------------------------------------------------------------ */
+
+static int mxl111sf_demod_read_reg(struct mxl111sf_demod_state *state,
+ u8 addr, u8 *data)
+{
+ return (state->cfg->read_reg) ?
+ state->cfg->read_reg(state->mxl_state, addr, data) :
+ -EINVAL;
+}
+
+static int mxl111sf_demod_write_reg(struct mxl111sf_demod_state *state,
+ u8 addr, u8 data)
+{
+ return (state->cfg->write_reg) ?
+ state->cfg->write_reg(state->mxl_state, addr, data) :
+ -EINVAL;
+}
+
+static
+int mxl111sf_demod_program_regs(struct mxl111sf_demod_state *state,
+ struct mxl111sf_reg_ctrl_info *ctrl_reg_info)
+{
+ return (state->cfg->program_regs) ?
+ state->cfg->program_regs(state->mxl_state, ctrl_reg_info) :
+ -EINVAL;
+}
+
+/* ------------------------------------------------------------------------ */
+/* TPS */
+
+static
+int mxl1x1sf_demod_get_tps_code_rate(struct mxl111sf_demod_state *state,
+ fe_code_rate_t *code_rate)
+{
+ u8 val;
+ int ret = mxl111sf_demod_read_reg(state, V6_CODE_RATE_TPS_REG, &val);
+ /* bit<2:0> - 000:1/2, 001:2/3, 010:3/4, 011:5/6, 100:7/8 */
+ if (mxl_fail(ret))
+ goto fail;
+
+ switch (val & V6_CODE_RATE_TPS_MASK) {
+ case 0:
+ *code_rate = FEC_1_2;
+ break;
+ case 1:
+ *code_rate = FEC_2_3;
+ break;
+ case 2:
+ *code_rate = FEC_3_4;
+ break;
+ case 3:
+ *code_rate = FEC_5_6;
+ break;
+ case 4:
+ *code_rate = FEC_7_8;
+ break;
+ }
+fail:
+ return ret;
+}
+
+static
+int mxl1x1sf_demod_get_tps_constellation(struct mxl111sf_demod_state *state,
+ fe_modulation_t *constellation)
+{
+ u8 val;
+ int ret = mxl111sf_demod_read_reg(state, V6_MODORDER_TPS_REG, &val);
+ /* Constellation, 00 : QPSK, 01 : 16QAM, 10:64QAM */
+ if (mxl_fail(ret))
+ goto fail;
+
+ switch ((val & V6_PARAM_CONSTELLATION_MASK) >> 4) {
+ case 0:
+ *constellation = QPSK;
+ break;
+ case 1:
+ *constellation = QAM_16;
+ break;
+ case 2:
+ *constellation = QAM_64;
+ break;
+ }
+fail:
+ return ret;
+}
+
+static
+int mxl1x1sf_demod_get_tps_guard_fft_mode(struct mxl111sf_demod_state *state,
+ fe_transmit_mode_t *fft_mode)
+{
+ u8 val;
+ int ret = mxl111sf_demod_read_reg(state, V6_MODE_TPS_REG, &val);
+ /* FFT Mode, 00:2K, 01:8K, 10:4K */
+ if (mxl_fail(ret))
+ goto fail;
+
+ switch ((val & V6_PARAM_FFT_MODE_MASK) >> 2) {
+ case 0:
+ *fft_mode = TRANSMISSION_MODE_2K;
+ break;
+ case 1:
+ *fft_mode = TRANSMISSION_MODE_8K;
+ break;
+ case 2:
+ *fft_mode = TRANSMISSION_MODE_4K;
+ break;
+ }
+fail:
+ return ret;
+}
+
+static
+int mxl1x1sf_demod_get_tps_guard_interval(struct mxl111sf_demod_state *state,
+ fe_guard_interval_t *guard)
+{
+ u8 val;
+ int ret = mxl111sf_demod_read_reg(state, V6_CP_TPS_REG, &val);
+ /* 00:1/32, 01:1/16, 10:1/8, 11:1/4 */
+ if (mxl_fail(ret))
+ goto fail;
+
+ switch ((val & V6_PARAM_GI_MASK) >> 4) {
+ case 0:
+ *guard = GUARD_INTERVAL_1_32;
+ break;
+ case 1:
+ *guard = GUARD_INTERVAL_1_16;
+ break;
+ case 2:
+ *guard = GUARD_INTERVAL_1_8;
+ break;
+ case 3:
+ *guard = GUARD_INTERVAL_1_4;
+ break;
+ }
+fail:
+ return ret;
+}
+
+static
+int mxl1x1sf_demod_get_tps_hierarchy(struct mxl111sf_demod_state *state,
+ fe_hierarchy_t *hierarchy)
+{
+ u8 val;
+ int ret = mxl111sf_demod_read_reg(state, V6_TPS_HIERACHY_REG, &val);
+ /* bit<6:4> - 000:Non hierarchy, 001:1, 010:2, 011:4 */
+ if (mxl_fail(ret))
+ goto fail;
+
+ switch ((val & V6_TPS_HIERARCHY_INFO_MASK) >> 6) {
+ case 0:
+ *hierarchy = HIERARCHY_NONE;
+ break;
+ case 1:
+ *hierarchy = HIERARCHY_1;
+ break;
+ case 2:
+ *hierarchy = HIERARCHY_2;
+ break;
+ case 3:
+ *hierarchy = HIERARCHY_4;
+ break;
+ }
+fail:
+ return ret;
+}
+
+/* ------------------------------------------------------------------------ */
+/* LOCKS */
+
+static
+int mxl1x1sf_demod_get_sync_lock_status(struct mxl111sf_demod_state *state,
+ int *sync_lock)
+{
+ u8 val = 0;
+ int ret = mxl111sf_demod_read_reg(state, V6_SYNC_LOCK_REG, &val);
+ if (mxl_fail(ret))
+ goto fail;
+ *sync_lock = (val & SYNC_LOCK_MASK) >> 4;
+fail:
+ return ret;
+}
+
+static
+int mxl1x1sf_demod_get_rs_lock_status(struct mxl111sf_demod_state *state,
+ int *rs_lock)
+{
+ u8 val = 0;
+ int ret = mxl111sf_demod_read_reg(state, V6_RS_LOCK_DET_REG, &val);
+ if (mxl_fail(ret))
+ goto fail;
+ *rs_lock = (val & RS_LOCK_DET_MASK) >> 3;
+fail:
+ return ret;
+}
+
+static
+int mxl1x1sf_demod_get_tps_lock_status(struct mxl111sf_demod_state *state,
+ int *tps_lock)
+{
+ u8 val = 0;
+ int ret = mxl111sf_demod_read_reg(state, V6_TPS_LOCK_REG, &val);
+ if (mxl_fail(ret))
+ goto fail;
+ *tps_lock = (val & V6_PARAM_TPS_LOCK_MASK) >> 6;
+fail:
+ return ret;
+}
+
+static
+int mxl1x1sf_demod_get_fec_lock_status(struct mxl111sf_demod_state *state,
+ int *fec_lock)
+{
+ u8 val = 0;
+ int ret = mxl111sf_demod_read_reg(state, V6_IRQ_STATUS_REG, &val);
+ if (mxl_fail(ret))
+ goto fail;
+ *fec_lock = (val & IRQ_MASK_FEC_LOCK) >> 4;
+fail:
+ return ret;
+}
+
+#if 0
+static
+int mxl1x1sf_demod_get_cp_lock_status(struct mxl111sf_demod_state *state,
+ int *cp_lock)
+{
+ u8 val = 0;
+ int ret = mxl111sf_demod_read_reg(state, V6_CP_LOCK_DET_REG, &val);
+ if (mxl_fail(ret))
+ goto fail;
+ *cp_lock = (val & V6_CP_LOCK_DET_MASK) >> 2;
+fail:
+ return ret;
+}
+#endif
+
+static int mxl1x1sf_demod_reset_irq_status(struct mxl111sf_demod_state *state)
+{
+ return mxl111sf_demod_write_reg(state, 0x0e, 0xff);
+}
+
+/* ------------------------------------------------------------------------ */
+
+static int mxl111sf_demod_set_frontend(struct dvb_frontend *fe,
+ struct dvb_frontend_parameters *param)
+{
+ struct mxl111sf_demod_state *state = fe->demodulator_priv;
+ int ret = 0;
+
+ struct mxl111sf_reg_ctrl_info phy_pll_patch[] = {
+ {0x00, 0xff, 0x01}, /* change page to 1 */
+ {0x40, 0xff, 0x05},
+ {0x40, 0xff, 0x01},
+ {0x41, 0xff, 0xca},
+ {0x41, 0xff, 0xc0},
+ {0x00, 0xff, 0x00}, /* change page to 0 */
+ {0, 0, 0}
+ };
+
+ mxl_dbg("()");
+
+ if (fe->ops.tuner_ops.set_params) {
+ ret = fe->ops.tuner_ops.set_params(fe, param);
+ if (mxl_fail(ret))
+ goto fail;
+ msleep(50);
+ }
+ ret = mxl111sf_demod_program_regs(state, phy_pll_patch);
+ mxl_fail(ret);
+ msleep(50);
+ ret = mxl1x1sf_demod_reset_irq_status(state);
+ mxl_fail(ret);
+ msleep(100);
+fail:
+ return ret;
+}
+
+/* ------------------------------------------------------------------------ */
+
+#if 0
+/* resets TS Packet error count */
+/* After setting 7th bit of V5_PER_COUNT_RESET_REG, it should be reset to 0. */
+static
+int mxl1x1sf_demod_reset_packet_error_count(struct mxl111sf_demod_state *state)
+{
+ struct mxl111sf_reg_ctrl_info reset_per_count[] = {
+ {0x20, 0x01, 0x01},
+ {0x20, 0x01, 0x00},
+ {0, 0, 0}
+ };
+ return mxl111sf_demod_program_regs(state, reset_per_count);
+}
+#endif
+
+/* returns TS Packet error count */
+/* PER Count = FEC_PER_COUNT * (2 ** (FEC_PER_SCALE * 4)) */
+static int mxl111sf_demod_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
+{
+ struct mxl111sf_demod_state *state = fe->demodulator_priv;
+ u32 fec_per_count, fec_per_scale;
+ u8 val;
+ int ret;
+
+ *ucblocks = 0;
+
+ /* FEC_PER_COUNT Register */
+ ret = mxl111sf_demod_read_reg(state, V6_FEC_PER_COUNT_REG, &val);
+ if (mxl_fail(ret))
+ goto fail;
+
+ fec_per_count = val;
+
+ /* FEC_PER_SCALE Register */
+ ret = mxl111sf_demod_read_reg(state, V6_FEC_PER_SCALE_REG, &val);
+ if (mxl_fail(ret))
+ goto fail;
+
+ val &= V6_FEC_PER_SCALE_MASK;
+ val *= 4;
+
+ fec_per_scale = 1 << val;
+
+ fec_per_count *= fec_per_scale;
+
+ *ucblocks = fec_per_count;
+fail:
+ return ret;
+}
+
+#ifdef MXL111SF_DEMOD_ENABLE_CALCULATIONS
+/* FIXME: leaving this enabled breaks the build on some architectures,
+ * and we shouldn't have any floating point math in the kernel, anyway.
+ *
+ * These macros need to be re-written, but it's harmless to simply
+ * return zero for now. */
+#define CALCULATE_BER(avg_errors, count) \
+ ((u32)(avg_errors * 4)/(count*64*188*8))
+#define CALCULATE_SNR(data) \
+ ((u32)((10 * (u32)data / 64) - 2.5))
+#else
+#define CALCULATE_BER(avg_errors, count) 0
+#define CALCULATE_SNR(data) 0
+#endif
+
+static int mxl111sf_demod_read_ber(struct dvb_frontend *fe, u32 *ber)
+{
+ struct mxl111sf_demod_state *state = fe->demodulator_priv;
+ u8 val1, val2, val3;
+ int ret;
+
+ *ber = 0;
+
+ ret = mxl111sf_demod_read_reg(state, V6_RS_AVG_ERRORS_LSB_REG, &val1);
+ if (mxl_fail(ret))
+ goto fail;
+ ret = mxl111sf_demod_read_reg(state, V6_RS_AVG_ERRORS_MSB_REG, &val2);
+ if (mxl_fail(ret))
+ goto fail;
+ ret = mxl111sf_demod_read_reg(state, V6_N_ACCUMULATE_REG, &val3);
+ if (mxl_fail(ret))
+ goto fail;
+
+ *ber = CALCULATE_BER((val1 | (val2 << 8)), val3);
+fail:
+ return ret;
+}
+
+static int mxl111sf_demod_calc_snr(struct mxl111sf_demod_state *state,
+ u16 *snr)
+{
+ u8 val1, val2;
+ int ret;
+
+ *snr = 0;
+
+ ret = mxl111sf_demod_read_reg(state, V6_SNR_RB_LSB_REG, &val1);
+ if (mxl_fail(ret))
+ goto fail;
+ ret = mxl111sf_demod_read_reg(state, V6_SNR_RB_MSB_REG, &val2);
+ if (mxl_fail(ret))
+ goto fail;
+
+ *snr = CALCULATE_SNR(val1 | ((val2 & 0x03) << 8));
+fail:
+ return ret;
+}
+
+static int mxl111sf_demod_read_snr(struct dvb_frontend *fe, u16 *snr)
+{
+ struct mxl111sf_demod_state *state = fe->demodulator_priv;
+
+ int ret = mxl111sf_demod_calc_snr(state, snr);
+ if (mxl_fail(ret))
+ goto fail;
+
+ *snr /= 10; /* 0.1 dB */
+fail:
+ return ret;
+}
+
+static int mxl111sf_demod_read_status(struct dvb_frontend *fe,
+ fe_status_t *status)
+{
+ struct mxl111sf_demod_state *state = fe->demodulator_priv;
+ int ret, locked, cr_lock, sync_lock, fec_lock;
+
+ *status = 0;
+
+ ret = mxl1x1sf_demod_get_rs_lock_status(state, &locked);
+ if (mxl_fail(ret))
+ goto fail;
+ ret = mxl1x1sf_demod_get_tps_lock_status(state, &cr_lock);
+ if (mxl_fail(ret))
+ goto fail;
+ ret = mxl1x1sf_demod_get_sync_lock_status(state, &sync_lock);
+ if (mxl_fail(ret))
+ goto fail;
+ ret = mxl1x1sf_demod_get_fec_lock_status(state, &fec_lock);
+ if (mxl_fail(ret))
+ goto fail;
+
+ if (locked)
+ *status |= FE_HAS_SIGNAL;
+ if (cr_lock)
+ *status |= FE_HAS_CARRIER;
+ if (sync_lock)
+ *status |= FE_HAS_SYNC;
+ if (fec_lock) /* false positives? */
+ *status |= FE_HAS_VITERBI;
+
+ if ((locked) && (cr_lock) && (sync_lock))
+ *status |= FE_HAS_LOCK;
+fail:
+ return ret;
+}
+
+static int mxl111sf_demod_read_signal_strength(struct dvb_frontend *fe,
+ u16 *signal_strength)
+{
+ struct mxl111sf_demod_state *state = fe->demodulator_priv;
+ fe_modulation_t constellation;
+ u16 snr;
+
+ mxl111sf_demod_calc_snr(state, &snr);
+ mxl1x1sf_demod_get_tps_constellation(state, &constellation);
+
+ switch (constellation) {
+ case QPSK:
+ *signal_strength = (snr >= 1300) ?
+ min(65535, snr * 44) : snr * 38;
+ break;
+ case QAM_16:
+ *signal_strength = (snr >= 1500) ?
+ min(65535, snr * 38) : snr * 33;
+ break;
+ case QAM_64:
+ *signal_strength = (snr >= 2000) ?
+ min(65535, snr * 29) : snr * 25;
+ break;
+ default:
+ *signal_strength = 0;
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int mxl111sf_demod_get_frontend(struct dvb_frontend *fe,
+ struct dvb_frontend_parameters *p)
+{
+ struct mxl111sf_demod_state *state = fe->demodulator_priv;
+
+ mxl_dbg("()");
+#if 0
+ p->inversion = /* FIXME */ ? INVERSION_ON : INVERSION_OFF;
+#endif
+ if (fe->ops.tuner_ops.get_bandwidth)
+ fe->ops.tuner_ops.get_bandwidth(fe, &p->u.ofdm.bandwidth);
+ if (fe->ops.tuner_ops.get_frequency)
+ fe->ops.tuner_ops.get_frequency(fe, &p->frequency);
+ mxl1x1sf_demod_get_tps_code_rate(state, &p->u.ofdm.code_rate_HP);
+ mxl1x1sf_demod_get_tps_code_rate(state, &p->u.ofdm.code_rate_LP);
+ mxl1x1sf_demod_get_tps_constellation(state, &p->u.ofdm.constellation);
+ mxl1x1sf_demod_get_tps_guard_fft_mode(state,
+ &p->u.ofdm.transmission_mode);
+ mxl1x1sf_demod_get_tps_guard_interval(state,
+ &p->u.ofdm.guard_interval);
+ mxl1x1sf_demod_get_tps_hierarchy(state,
+ &p->u.ofdm.hierarchy_information);
+
+ return 0;
+}
+
+static
+int mxl111sf_demod_get_tune_settings(struct dvb_frontend *fe,
+ struct dvb_frontend_tune_settings *tune)
+{
+ tune->min_delay_ms = 1000;
+ return 0;
+}
+
+static void mxl111sf_demod_release(struct dvb_frontend *fe)
+{
+ struct mxl111sf_demod_state *state = fe->demodulator_priv;
+ mxl_dbg("()");
+ kfree(state);
+ fe->demodulator_priv = NULL;
+}
+
+static struct dvb_frontend_ops mxl111sf_demod_ops = {
+
+ .info = {
+ .name = "MaxLinear MxL111SF DVB-T demodulator",
+ .type = FE_OFDM,
+ .frequency_min = 177000000,
+ .frequency_max = 858000000,
+ .frequency_stepsize = 166666,
+ .caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
+ FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
+ FE_CAN_QPSK | FE_CAN_QAM_16 | FE_CAN_QAM_64 |
+ FE_CAN_QAM_AUTO |
+ FE_CAN_HIERARCHY_AUTO | FE_CAN_GUARD_INTERVAL_AUTO |
+ FE_CAN_TRANSMISSION_MODE_AUTO | FE_CAN_RECOVER
+ },
+ .release = mxl111sf_demod_release,
+#if 0
+ .init = mxl111sf_init,
+ .i2c_gate_ctrl = mxl111sf_i2c_gate_ctrl,
+#endif
+ .set_frontend = mxl111sf_demod_set_frontend,
+ .get_frontend = mxl111sf_demod_get_frontend,
+ .get_tune_settings = mxl111sf_demod_get_tune_settings,
+ .read_status = mxl111sf_demod_read_status,
+ .read_signal_strength = mxl111sf_demod_read_signal_strength,
+ .read_ber = mxl111sf_demod_read_ber,
+ .read_snr = mxl111sf_demod_read_snr,
+ .read_ucblocks = mxl111sf_demod_read_ucblocks,
+};
+
+struct dvb_frontend *mxl111sf_demod_attach(struct mxl111sf_state *mxl_state,
+ struct mxl111sf_demod_config *cfg)
+{
+ struct mxl111sf_demod_state *state = NULL;
+
+ mxl_dbg("()");
+
+ state = kzalloc(sizeof(struct mxl111sf_demod_state), GFP_KERNEL);
+ if (state == NULL)
+ return NULL;
+
+ state->mxl_state = mxl_state;
+ state->cfg = cfg;
+
+ memcpy(&state->fe.ops, &mxl111sf_demod_ops,
+ sizeof(struct dvb_frontend_ops));
+
+ state->fe.demodulator_priv = state;
+ return &state->fe;
+}
+EXPORT_SYMBOL_GPL(mxl111sf_demod_attach);
+
+MODULE_DESCRIPTION("MaxLinear MxL111SF DVB-T demodulator driver");
+MODULE_AUTHOR("Michael Krufky <mkrufky@kernellabs.com>");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("0.1");
+
+/*
+ * Local variables:
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/drivers/media/dvb/dvb-usb/mxl111sf-demod.h b/drivers/media/dvb/dvb-usb/mxl111sf-demod.h
new file mode 100644
index 00000000000..432706ae527
--- /dev/null
+++ b/drivers/media/dvb/dvb-usb/mxl111sf-demod.h
@@ -0,0 +1,55 @@
+/*
+ * mxl111sf-demod.h - driver for the MaxLinear MXL111SF DVB-T demodulator
+ *
+ * Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __MXL111SF_DEMOD_H__
+#define __MXL111SF_DEMOD_H__
+
+#include "dvb_frontend.h"
+#include "mxl111sf.h"
+
+struct mxl111sf_demod_config {
+ int (*read_reg)(struct mxl111sf_state *state, u8 addr, u8 *data);
+ int (*write_reg)(struct mxl111sf_state *state, u8 addr, u8 data);
+ int (*program_regs)(struct mxl111sf_state *state,
+ struct mxl111sf_reg_ctrl_info *ctrl_reg_info);
+};
+
+#if defined(CONFIG_DVB_USB_MXL111SF) || \
+ (defined(CONFIG_DVB_USB_MXL111SF_MODULE) && defined(MODULE))
+extern
+struct dvb_frontend *mxl111sf_demod_attach(struct mxl111sf_state *mxl_state,
+ struct mxl111sf_demod_config *cfg);
+#else
+static inline
+struct dvb_frontend *mxl111sf_demod_attach(struct mxl111sf_state *mxl_state,
+ struct mxl111sf_demod_config *cfg)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return NULL;
+}
+#endif /* CONFIG_DVB_USB_MXL111SF */
+
+#endif /* __MXL111SF_DEMOD_H__ */
+
+/*
+ * Local variables:
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/drivers/media/dvb/dvb-usb/mxl111sf-gpio.c b/drivers/media/dvb/dvb-usb/mxl111sf-gpio.c
new file mode 100644
index 00000000000..e4121cb8f5e
--- /dev/null
+++ b/drivers/media/dvb/dvb-usb/mxl111sf-gpio.c
@@ -0,0 +1,763 @@
+/*
+ * mxl111sf-gpio.c - driver for the MaxLinear MXL111SF
+ *
+ * Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include "mxl111sf-gpio.h"
+#include "mxl111sf-i2c.h"
+#include "mxl111sf.h"
+
+/* ------------------------------------------------------------------------- */
+
+#define MXL_GPIO_MUX_REG_0 0x84
+#define MXL_GPIO_MUX_REG_1 0x89
+#define MXL_GPIO_MUX_REG_2 0x82
+
+#define MXL_GPIO_DIR_INPUT 0
+#define MXL_GPIO_DIR_OUTPUT 1
+
+
+static int mxl111sf_set_gpo_state(struct mxl111sf_state *state, u8 pin, u8 val)
+{
+ int ret;
+ u8 tmp;
+
+ mxl_debug_adv("(%d, %d)", pin, val);
+
+ if ((pin > 0) && (pin < 8)) {
+ ret = mxl111sf_read_reg(state, 0x19, &tmp);
+ if (mxl_fail(ret))
+ goto fail;
+ tmp &= ~(1 << (pin - 1));
+ tmp |= (val << (pin - 1));
+ ret = mxl111sf_write_reg(state, 0x19, tmp);
+ if (mxl_fail(ret))
+ goto fail;
+ } else if (pin <= 10) {
+ if (pin == 0)
+ pin += 7;
+ ret = mxl111sf_read_reg(state, 0x30, &tmp);
+ if (mxl_fail(ret))
+ goto fail;
+ tmp &= ~(1 << (pin - 3));
+ tmp |= (val << (pin - 3));
+ ret = mxl111sf_write_reg(state, 0x30, tmp);
+ if (mxl_fail(ret))
+ goto fail;
+ } else
+ ret = -EINVAL;
+fail:
+ return ret;
+}
+
+static int mxl111sf_get_gpi_state(struct mxl111sf_state *state, u8 pin, u8 *val)
+{
+ int ret;
+ u8 tmp;
+
+ mxl_debug("(0x%02x)", pin);
+
+ *val = 0;
+
+ switch (pin) {
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ ret = mxl111sf_read_reg(state, 0x23, &tmp);
+ if (mxl_fail(ret))
+ goto fail;
+ *val = (tmp >> (pin + 4)) & 0x01;
+ break;
+ case 4:
+ case 5:
+ case 6:
+ case 7:
+ ret = mxl111sf_read_reg(state, 0x2f, &tmp);
+ if (mxl_fail(ret))
+ goto fail;
+ *val = (tmp >> pin) & 0x01;
+ break;
+ case 8:
+ case 9:
+ case 10:
+ ret = mxl111sf_read_reg(state, 0x22, &tmp);
+ if (mxl_fail(ret))
+ goto fail;
+ *val = (tmp >> (pin - 3)) & 0x01;
+ break;
+ default:
+ return -EINVAL; /* invalid pin */
+ }
+fail:
+ return ret;
+}
+
+struct mxl_gpio_cfg {
+ u8 pin;
+ u8 dir;
+ u8 val;
+};
+
+static int mxl111sf_config_gpio_pins(struct mxl111sf_state *state,
+ struct mxl_gpio_cfg *gpio_cfg)
+{
+ int ret;
+ u8 tmp;
+
+ mxl_debug_adv("(%d, %d)", gpio_cfg->pin, gpio_cfg->dir);
+
+ switch (gpio_cfg->pin) {
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ ret = mxl111sf_read_reg(state, MXL_GPIO_MUX_REG_0, &tmp);
+ if (mxl_fail(ret))
+ goto fail;
+ tmp &= ~(1 << (gpio_cfg->pin + 4));
+ tmp |= (gpio_cfg->dir << (gpio_cfg->pin + 4));
+ ret = mxl111sf_write_reg(state, MXL_GPIO_MUX_REG_0, tmp);
+ if (mxl_fail(ret))
+ goto fail;
+ break;
+ case 4:
+ case 5:
+ case 6:
+ case 7:
+ ret = mxl111sf_read_reg(state, MXL_GPIO_MUX_REG_1, &tmp);
+ if (mxl_fail(ret))
+ goto fail;
+ tmp &= ~(1 << gpio_cfg->pin);
+ tmp |= (gpio_cfg->dir << gpio_cfg->pin);
+ ret = mxl111sf_write_reg(state, MXL_GPIO_MUX_REG_1, tmp);
+ if (mxl_fail(ret))
+ goto fail;
+ break;
+ case 8:
+ case 9:
+ case 10:
+ ret = mxl111sf_read_reg(state, MXL_GPIO_MUX_REG_2, &tmp);
+ if (mxl_fail(ret))
+ goto fail;
+ tmp &= ~(1 << (gpio_cfg->pin - 3));
+ tmp |= (gpio_cfg->dir << (gpio_cfg->pin - 3));
+ ret = mxl111sf_write_reg(state, MXL_GPIO_MUX_REG_2, tmp);
+ if (mxl_fail(ret))
+ goto fail;
+ break;
+ default:
+ return -EINVAL; /* invalid pin */
+ }
+
+ ret = (MXL_GPIO_DIR_OUTPUT == gpio_cfg->dir) ?
+ mxl111sf_set_gpo_state(state,
+ gpio_cfg->pin, gpio_cfg->val) :
+ mxl111sf_get_gpi_state(state,
+ gpio_cfg->pin, &gpio_cfg->val);
+ mxl_fail(ret);
+fail:
+ return ret;
+}
+
+static int mxl111sf_hw_do_set_gpio(struct mxl111sf_state *state,
+ int gpio, int direction, int val)
+{
+ struct mxl_gpio_cfg gpio_config = {
+ .pin = gpio,
+ .dir = direction,
+ .val = val,
+ };
+
+ mxl_debug("(%d, %d, %d)", gpio, direction, val);
+
+ return mxl111sf_config_gpio_pins(state, &gpio_config);
+}
+
+/* ------------------------------------------------------------------------- */
+
+#define PIN_MUX_MPEG_MODE_MASK 0x40 /* 0x17 <6> */
+#define PIN_MUX_MPEG_PAR_EN_MASK 0x01 /* 0x18 <0> */
+#define PIN_MUX_MPEG_SER_EN_MASK 0x02 /* 0x18 <1> */
+#define PIN_MUX_MPG_IN_MUX_MASK 0x80 /* 0x3D <7> */
+#define PIN_MUX_BT656_ENABLE_MASK 0x04 /* 0x12 <2> */
+#define PIN_MUX_I2S_ENABLE_MASK 0x40 /* 0x15 <6> */
+#define PIN_MUX_SPI_MODE_MASK 0x10 /* 0x3D <4> */
+#define PIN_MUX_MCLK_EN_CTRL_MASK 0x10 /* 0x82 <4> */
+#define PIN_MUX_MPSYN_EN_CTRL_MASK 0x20 /* 0x82 <5> */
+#define PIN_MUX_MDVAL_EN_CTRL_MASK 0x40 /* 0x82 <6> */
+#define PIN_MUX_MPERR_EN_CTRL_MASK 0x80 /* 0x82 <7> */
+#define PIN_MUX_MDAT_EN_0_MASK 0x10 /* 0x84 <4> */
+#define PIN_MUX_MDAT_EN_1_MASK 0x20 /* 0x84 <5> */
+#define PIN_MUX_MDAT_EN_2_MASK 0x40 /* 0x84 <6> */
+#define PIN_MUX_MDAT_EN_3_MASK 0x80 /* 0x84 <7> */
+#define PIN_MUX_MDAT_EN_4_MASK 0x10 /* 0x89 <4> */
+#define PIN_MUX_MDAT_EN_5_MASK 0x20 /* 0x89 <5> */
+#define PIN_MUX_MDAT_EN_6_MASK 0x40 /* 0x89 <6> */
+#define PIN_MUX_MDAT_EN_7_MASK 0x80 /* 0x89 <7> */
+
+int mxl111sf_config_pin_mux_modes(struct mxl111sf_state *state,
+ enum mxl111sf_mux_config pin_mux_config)
+{
+ u8 r12, r15, r17, r18, r3D, r82, r84, r89;
+ int ret;
+
+ mxl_debug("(%d)", pin_mux_config);
+
+ ret = mxl111sf_read_reg(state, 0x17, &r17);
+ if (mxl_fail(ret))
+ goto fail;
+ ret = mxl111sf_read_reg(state, 0x18, &r18);
+ if (mxl_fail(ret))
+ goto fail;
+ ret = mxl111sf_read_reg(state, 0x12, &r12);
+ if (mxl_fail(ret))
+ goto fail;
+ ret = mxl111sf_read_reg(state, 0x15, &r15);
+ if (mxl_fail(ret))
+ goto fail;
+ ret = mxl111sf_read_reg(state, 0x82, &r82);
+ if (mxl_fail(ret))
+ goto fail;
+ ret = mxl111sf_read_reg(state, 0x84, &r84);
+ if (mxl_fail(ret))
+ goto fail;
+ ret = mxl111sf_read_reg(state, 0x89, &r89);
+ if (mxl_fail(ret))
+ goto fail;
+ ret = mxl111sf_read_reg(state, 0x3D, &r3D);
+ if (mxl_fail(ret))
+ goto fail;
+
+ switch (pin_mux_config) {
+ case PIN_MUX_TS_OUT_PARALLEL:
+ /* mpeg_mode = 1 */
+ r17 |= PIN_MUX_MPEG_MODE_MASK;
+ /* mpeg_par_en = 1 */
+ r18 |= PIN_MUX_MPEG_PAR_EN_MASK;
+ /* mpeg_ser_en = 0 */
+ r18 &= ~PIN_MUX_MPEG_SER_EN_MASK;
+ /* mpg_in_mux = 0 */
+ r3D &= ~PIN_MUX_MPG_IN_MUX_MASK;
+ /* bt656_enable = 0 */
+ r12 &= ~PIN_MUX_BT656_ENABLE_MASK;
+ /* i2s_enable = 0 */
+ r15 &= ~PIN_MUX_I2S_ENABLE_MASK;
+ /* spi_mode = 0 */
+ r3D &= ~PIN_MUX_SPI_MODE_MASK;
+ /* mclk_en_ctrl = 1 */
+ r82 |= PIN_MUX_MCLK_EN_CTRL_MASK;
+ /* mperr_en_ctrl = 1 */
+ r82 |= PIN_MUX_MPERR_EN_CTRL_MASK;
+ /* mdval_en_ctrl = 1 */
+ r82 |= PIN_MUX_MDVAL_EN_CTRL_MASK;
+ /* mpsyn_en_ctrl = 1 */
+ r82 |= PIN_MUX_MPSYN_EN_CTRL_MASK;
+ /* mdat_en_ctrl[3:0] = 0xF */
+ r84 |= 0xF0;
+ /* mdat_en_ctrl[7:4] = 0xF */
+ r89 |= 0xF0;
+ break;
+ case PIN_MUX_TS_OUT_SERIAL:
+ /* mpeg_mode = 1 */
+ r17 |= PIN_MUX_MPEG_MODE_MASK;
+ /* mpeg_par_en = 0 */
+ r18 &= ~PIN_MUX_MPEG_PAR_EN_MASK;
+ /* mpeg_ser_en = 1 */
+ r18 |= PIN_MUX_MPEG_SER_EN_MASK;
+ /* mpg_in_mux = 0 */
+ r3D &= ~PIN_MUX_MPG_IN_MUX_MASK;
+ /* bt656_enable = 0 */
+ r12 &= ~PIN_MUX_BT656_ENABLE_MASK;
+ /* i2s_enable = 0 */
+ r15 &= ~PIN_MUX_I2S_ENABLE_MASK;
+ /* spi_mode = 0 */
+ r3D &= ~PIN_MUX_SPI_MODE_MASK;
+ /* mclk_en_ctrl = 1 */
+ r82 |= PIN_MUX_MCLK_EN_CTRL_MASK;
+ /* mperr_en_ctrl = 1 */
+ r82 |= PIN_MUX_MPERR_EN_CTRL_MASK;
+ /* mdval_en_ctrl = 1 */
+ r82 |= PIN_MUX_MDVAL_EN_CTRL_MASK;
+ /* mpsyn_en_ctrl = 1 */
+ r82 |= PIN_MUX_MPSYN_EN_CTRL_MASK;
+ /* mdat_en_ctrl[3:0] = 0xF */
+ r84 |= 0xF0;
+ /* mdat_en_ctrl[7:4] = 0xF */
+ r89 |= 0xF0;
+ break;
+ case PIN_MUX_GPIO_MODE:
+ /* mpeg_mode = 0 */
+ r17 &= ~PIN_MUX_MPEG_MODE_MASK;
+ /* mpeg_par_en = 0 */
+ r18 &= ~PIN_MUX_MPEG_PAR_EN_MASK;
+ /* mpeg_ser_en = 0 */
+ r18 &= ~PIN_MUX_MPEG_SER_EN_MASK;
+ /* mpg_in_mux = 0 */
+ r3D &= ~PIN_MUX_MPG_IN_MUX_MASK;
+ /* bt656_enable = 0 */
+ r12 &= ~PIN_MUX_BT656_ENABLE_MASK;
+ /* i2s_enable = 0 */
+ r15 &= ~PIN_MUX_I2S_ENABLE_MASK;
+ /* spi_mode = 0 */
+ r3D &= ~PIN_MUX_SPI_MODE_MASK;
+ /* mclk_en_ctrl = 0 */
+ r82 &= ~PIN_MUX_MCLK_EN_CTRL_MASK;
+ /* mperr_en_ctrl = 0 */
+ r82 &= ~PIN_MUX_MPERR_EN_CTRL_MASK;
+ /* mdval_en_ctrl = 0 */
+ r82 &= ~PIN_MUX_MDVAL_EN_CTRL_MASK;
+ /* mpsyn_en_ctrl = 0 */
+ r82 &= ~PIN_MUX_MPSYN_EN_CTRL_MASK;
+ /* mdat_en_ctrl[3:0] = 0x0 */
+ r84 &= 0x0F;
+ /* mdat_en_ctrl[7:4] = 0x0 */
+ r89 &= 0x0F;
+ break;
+ case PIN_MUX_TS_SERIAL_IN_MODE_0:
+ /* mpeg_mode = 0 */
+ r17 &= ~PIN_MUX_MPEG_MODE_MASK;
+ /* mpeg_par_en = 0 */
+ r18 &= ~PIN_MUX_MPEG_PAR_EN_MASK;
+ /* mpeg_ser_en = 1 */
+ r18 |= PIN_MUX_MPEG_SER_EN_MASK;
+ /* mpg_in_mux = 0 */
+ r3D &= ~PIN_MUX_MPG_IN_MUX_MASK;
+ /* bt656_enable = 0 */
+ r12 &= ~PIN_MUX_BT656_ENABLE_MASK;
+ /* i2s_enable = 0 */
+ r15 &= ~PIN_MUX_I2S_ENABLE_MASK;
+ /* spi_mode = 0 */
+ r3D &= ~PIN_MUX_SPI_MODE_MASK;
+ /* mclk_en_ctrl = 0 */
+ r82 &= ~PIN_MUX_MCLK_EN_CTRL_MASK;
+ /* mperr_en_ctrl = 0 */
+ r82 &= ~PIN_MUX_MPERR_EN_CTRL_MASK;
+ /* mdval_en_ctrl = 0 */
+ r82 &= ~PIN_MUX_MDVAL_EN_CTRL_MASK;
+ /* mpsyn_en_ctrl = 0 */
+ r82 &= ~PIN_MUX_MPSYN_EN_CTRL_MASK;
+ /* mdat_en_ctrl[3:0] = 0x0 */
+ r84 &= 0x0F;
+ /* mdat_en_ctrl[7:4] = 0x0 */
+ r89 &= 0x0F;
+ break;
+ case PIN_MUX_TS_SERIAL_IN_MODE_1:
+ /* mpeg_mode = 0 */
+ r17 &= ~PIN_MUX_MPEG_MODE_MASK;
+ /* mpeg_par_en = 0 */
+ r18 &= ~PIN_MUX_MPEG_PAR_EN_MASK;
+ /* mpeg_ser_en = 1 */
+ r18 |= PIN_MUX_MPEG_SER_EN_MASK;
+ /* mpg_in_mux = 1 */
+ r3D |= PIN_MUX_MPG_IN_MUX_MASK;
+ /* bt656_enable = 0 */
+ r12 &= ~PIN_MUX_BT656_ENABLE_MASK;
+ /* i2s_enable = 0 */
+ r15 &= ~PIN_MUX_I2S_ENABLE_MASK;
+ /* spi_mode = 0 */
+ r3D &= ~PIN_MUX_SPI_MODE_MASK;
+ /* mclk_en_ctrl = 0 */
+ r82 &= ~PIN_MUX_MCLK_EN_CTRL_MASK;
+ /* mperr_en_ctrl = 0 */
+ r82 &= ~PIN_MUX_MPERR_EN_CTRL_MASK;
+ /* mdval_en_ctrl = 0 */
+ r82 &= ~PIN_MUX_MDVAL_EN_CTRL_MASK;
+ /* mpsyn_en_ctrl = 0 */
+ r82 &= ~PIN_MUX_MPSYN_EN_CTRL_MASK;
+ /* mdat_en_ctrl[3:0] = 0x0 */
+ r84 &= 0x0F;
+ /* mdat_en_ctrl[7:4] = 0x0 */
+ r89 &= 0x0F;
+ break;
+ case PIN_MUX_TS_SPI_IN_MODE_1:
+ /* mpeg_mode = 0 */
+ r17 &= ~PIN_MUX_MPEG_MODE_MASK;
+ /* mpeg_par_en = 0 */
+ r18 &= ~PIN_MUX_MPEG_PAR_EN_MASK;
+ /* mpeg_ser_en = 1 */
+ r18 |= PIN_MUX_MPEG_SER_EN_MASK;
+ /* mpg_in_mux = 1 */
+ r3D |= PIN_MUX_MPG_IN_MUX_MASK;
+ /* bt656_enable = 0 */
+ r12 &= ~PIN_MUX_BT656_ENABLE_MASK;
+ /* i2s_enable = 1 */
+ r15 |= PIN_MUX_I2S_ENABLE_MASK;
+ /* spi_mode = 1 */
+ r3D |= PIN_MUX_SPI_MODE_MASK;
+ /* mclk_en_ctrl = 0 */
+ r82 &= ~PIN_MUX_MCLK_EN_CTRL_MASK;
+ /* mperr_en_ctrl = 0 */
+ r82 &= ~PIN_MUX_MPERR_EN_CTRL_MASK;
+ /* mdval_en_ctrl = 0 */
+ r82 &= ~PIN_MUX_MDVAL_EN_CTRL_MASK;
+ /* mpsyn_en_ctrl = 0 */
+ r82 &= ~PIN_MUX_MPSYN_EN_CTRL_MASK;
+ /* mdat_en_ctrl[3:0] = 0x0 */
+ r84 &= 0x0F;
+ /* mdat_en_ctrl[7:4] = 0x0 */
+ r89 &= 0x0F;
+ break;
+ case PIN_MUX_TS_SPI_IN_MODE_0:
+ /* mpeg_mode = 0 */
+ r17 &= ~PIN_MUX_MPEG_MODE_MASK;
+ /* mpeg_par_en = 0 */
+ r18 &= ~PIN_MUX_MPEG_PAR_EN_MASK;
+ /* mpeg_ser_en = 1 */
+ r18 |= PIN_MUX_MPEG_SER_EN_MASK;
+ /* mpg_in_mux = 0 */
+ r3D &= ~PIN_MUX_MPG_IN_MUX_MASK;
+ /* bt656_enable = 0 */
+ r12 &= ~PIN_MUX_BT656_ENABLE_MASK;
+ /* i2s_enable = 1 */
+ r15 |= PIN_MUX_I2S_ENABLE_MASK;
+ /* spi_mode = 1 */
+ r3D |= PIN_MUX_SPI_MODE_MASK;
+ /* mclk_en_ctrl = 0 */
+ r82 &= ~PIN_MUX_MCLK_EN_CTRL_MASK;
+ /* mperr_en_ctrl = 0 */
+ r82 &= ~PIN_MUX_MPERR_EN_CTRL_MASK;
+ /* mdval_en_ctrl = 0 */
+ r82 &= ~PIN_MUX_MDVAL_EN_CTRL_MASK;
+ /* mpsyn_en_ctrl = 0 */
+ r82 &= ~PIN_MUX_MPSYN_EN_CTRL_MASK;
+ /* mdat_en_ctrl[3:0] = 0x0 */
+ r84 &= 0x0F;
+ /* mdat_en_ctrl[7:4] = 0x0 */
+ r89 &= 0x0F;
+ break;
+ case PIN_MUX_TS_PARALLEL_IN:
+ /* mpeg_mode = 0 */
+ r17 &= ~PIN_MUX_MPEG_MODE_MASK;
+ /* mpeg_par_en = 1 */
+ r18 |= PIN_MUX_MPEG_PAR_EN_MASK;
+ /* mpeg_ser_en = 0 */
+ r18 &= ~PIN_MUX_MPEG_SER_EN_MASK;
+ /* mpg_in_mux = 0 */
+ r3D &= ~PIN_MUX_MPG_IN_MUX_MASK;
+ /* bt656_enable = 0 */
+ r12 &= ~PIN_MUX_BT656_ENABLE_MASK;
+ /* i2s_enable = 0 */
+ r15 &= ~PIN_MUX_I2S_ENABLE_MASK;
+ /* spi_mode = 0 */
+ r3D &= ~PIN_MUX_SPI_MODE_MASK;
+ /* mclk_en_ctrl = 0 */
+ r82 &= ~PIN_MUX_MCLK_EN_CTRL_MASK;
+ /* mperr_en_ctrl = 0 */
+ r82 &= ~PIN_MUX_MPERR_EN_CTRL_MASK;
+ /* mdval_en_ctrl = 0 */
+ r82 &= ~PIN_MUX_MDVAL_EN_CTRL_MASK;
+ /* mpsyn_en_ctrl = 0 */
+ r82 &= ~PIN_MUX_MPSYN_EN_CTRL_MASK;
+ /* mdat_en_ctrl[3:0] = 0x0 */
+ r84 &= 0x0F;
+ /* mdat_en_ctrl[7:4] = 0x0 */
+ r89 &= 0x0F;
+ break;
+ case PIN_MUX_BT656_I2S_MODE:
+ /* mpeg_mode = 0 */
+ r17 &= ~PIN_MUX_MPEG_MODE_MASK;
+ /* mpeg_par_en = 0 */
+ r18 &= ~PIN_MUX_MPEG_PAR_EN_MASK;
+ /* mpeg_ser_en = 0 */
+ r18 &= ~PIN_MUX_MPEG_SER_EN_MASK;
+ /* mpg_in_mux = 0 */
+ r3D &= ~PIN_MUX_MPG_IN_MUX_MASK;
+ /* bt656_enable = 1 */
+ r12 |= PIN_MUX_BT656_ENABLE_MASK;
+ /* i2s_enable = 1 */
+ r15 |= PIN_MUX_I2S_ENABLE_MASK;
+ /* spi_mode = 0 */
+ r3D &= ~PIN_MUX_SPI_MODE_MASK;
+ /* mclk_en_ctrl = 0 */
+ r82 &= ~PIN_MUX_MCLK_EN_CTRL_MASK;
+ /* mperr_en_ctrl = 0 */
+ r82 &= ~PIN_MUX_MPERR_EN_CTRL_MASK;
+ /* mdval_en_ctrl = 0 */
+ r82 &= ~PIN_MUX_MDVAL_EN_CTRL_MASK;
+ /* mpsyn_en_ctrl = 0 */
+ r82 &= ~PIN_MUX_MPSYN_EN_CTRL_MASK;
+ /* mdat_en_ctrl[3:0] = 0x0 */
+ r84 &= 0x0F;
+ /* mdat_en_ctrl[7:4] = 0x0 */
+ r89 &= 0x0F;
+ break;
+ case PIN_MUX_DEFAULT:
+ default:
+ /* mpeg_mode = 1 */
+ r17 |= PIN_MUX_MPEG_MODE_MASK;
+ /* mpeg_par_en = 0 */
+ r18 &= ~PIN_MUX_MPEG_PAR_EN_MASK;
+ /* mpeg_ser_en = 0 */
+ r18 &= ~PIN_MUX_MPEG_SER_EN_MASK;
+ /* mpg_in_mux = 0 */
+ r3D &= ~PIN_MUX_MPG_IN_MUX_MASK;
+ /* bt656_enable = 0 */
+ r12 &= ~PIN_MUX_BT656_ENABLE_MASK;
+ /* i2s_enable = 0 */
+ r15 &= ~PIN_MUX_I2S_ENABLE_MASK;
+ /* spi_mode = 0 */
+ r3D &= ~PIN_MUX_SPI_MODE_MASK;
+ /* mclk_en_ctrl = 0 */
+ r82 &= ~PIN_MUX_MCLK_EN_CTRL_MASK;
+ /* mperr_en_ctrl = 0 */
+ r82 &= ~PIN_MUX_MPERR_EN_CTRL_MASK;
+ /* mdval_en_ctrl = 0 */
+ r82 &= ~PIN_MUX_MDVAL_EN_CTRL_MASK;
+ /* mpsyn_en_ctrl = 0 */
+ r82 &= ~PIN_MUX_MPSYN_EN_CTRL_MASK;
+ /* mdat_en_ctrl[3:0] = 0x0 */
+ r84 &= 0x0F;
+ /* mdat_en_ctrl[7:4] = 0x0 */
+ r89 &= 0x0F;
+ break;
+ }
+
+ ret = mxl111sf_write_reg(state, 0x17, r17);
+ if (mxl_fail(ret))
+ goto fail;
+ ret = mxl111sf_write_reg(state, 0x18, r18);
+ if (mxl_fail(ret))
+ goto fail;
+ ret = mxl111sf_write_reg(state, 0x12, r12);
+ if (mxl_fail(ret))
+ goto fail;
+ ret = mxl111sf_write_reg(state, 0x15, r15);
+ if (mxl_fail(ret))
+ goto fail;
+ ret = mxl111sf_write_reg(state, 0x82, r82);
+ if (mxl_fail(ret))
+ goto fail;
+ ret = mxl111sf_write_reg(state, 0x84, r84);
+ if (mxl_fail(ret))
+ goto fail;
+ ret = mxl111sf_write_reg(state, 0x89, r89);
+ if (mxl_fail(ret))
+ goto fail;
+ ret = mxl111sf_write_reg(state, 0x3D, r3D);
+ if (mxl_fail(ret))
+ goto fail;
+fail:
+ return ret;
+}
+
+/* ------------------------------------------------------------------------- */
+
+static int mxl111sf_hw_set_gpio(struct mxl111sf_state *state, int gpio, int val)
+{
+ return mxl111sf_hw_do_set_gpio(state, gpio, MXL_GPIO_DIR_OUTPUT, val);
+}
+
+static int mxl111sf_hw_gpio_initialize(struct mxl111sf_state *state)
+{
+ u8 gpioval = 0x07; /* write protect enabled, signal LEDs off */
+ int i, ret;
+
+ mxl_debug("()");
+
+ for (i = 3; i < 8; i++) {
+ ret = mxl111sf_hw_set_gpio(state, i, (gpioval >> i) & 0x01);
+ if (mxl_fail(ret))
+ break;
+ }
+
+ return ret;
+}
+
+#define PCA9534_I2C_ADDR (0x40 >> 1)
+static int pca9534_set_gpio(struct mxl111sf_state *state, int gpio, int val)
+{
+ u8 w[2] = { 1, 0 };
+ u8 r = 0;
+ struct i2c_msg msg[] = {
+ { .addr = PCA9534_I2C_ADDR,
+ .flags = 0, .buf = w, .len = 1 },
+ { .addr = PCA9534_I2C_ADDR,
+ .flags = I2C_M_RD, .buf = &r, .len = 1 },
+ };
+
+ mxl_debug("(%d, %d)", gpio, val);
+
+ /* read current GPIO levels from flip-flop */
+ i2c_transfer(&state->d->i2c_adap, msg, 2);
+
+ /* prepare write buffer with current GPIO levels */
+ msg[0].len = 2;
+#if 0
+ w[0] = 1;
+#endif
+ w[1] = r;
+
+ /* clear the desired GPIO */
+ w[1] &= ~(1 << gpio);
+
+ /* set the desired GPIO value */
+ w[1] |= ((val ? 1 : 0) << gpio);
+
+ /* write new GPIO levels to flip-flop */
+ i2c_transfer(&state->d->i2c_adap, &msg[0], 1);
+
+ return 0;
+}
+
+static int pca9534_init_port_expander(struct mxl111sf_state *state)
+{
+ u8 w[2] = { 1, 0x07 }; /* write protect enabled, signal LEDs off */
+
+ struct i2c_msg msg = {
+ .addr = PCA9534_I2C_ADDR,
+ .flags = 0, .buf = w, .len = 2
+ };
+
+ mxl_debug("()");
+
+ i2c_transfer(&state->d->i2c_adap, &msg, 1);
+
+ /* configure all pins as outputs */
+ w[0] = 3;
+ w[1] = 0;
+
+ i2c_transfer(&state->d->i2c_adap, &msg, 1);
+
+ return 0;
+}
+
+int mxl111sf_set_gpio(struct mxl111sf_state *state, int gpio, int val)
+{
+ mxl_debug("(%d, %d)", gpio, val);
+
+ switch (state->gpio_port_expander) {
+ default:
+ mxl_printk(KERN_ERR,
+ "gpio_port_expander undefined, assuming PCA9534");
+ /* fall-thru */
+ case mxl111sf_PCA9534:
+ return pca9534_set_gpio(state, gpio, val);
+ case mxl111sf_gpio_hw:
+ return mxl111sf_hw_set_gpio(state, gpio, val);
+ }
+}
+
+static int mxl111sf_probe_port_expander(struct mxl111sf_state *state)
+{
+ int ret;
+ u8 w = 1;
+ u8 r = 0;
+ struct i2c_msg msg[] = {
+ { .flags = 0, .buf = &w, .len = 1 },
+ { .flags = I2C_M_RD, .buf = &r, .len = 1 },
+ };
+
+ mxl_debug("()");
+
+ msg[0].addr = 0x70 >> 1;
+ msg[1].addr = 0x70 >> 1;
+
+ /* read current GPIO levels from flip-flop */
+ ret = i2c_transfer(&state->d->i2c_adap, msg, 2);
+ if (ret == 2) {
+ state->port_expander_addr = msg[0].addr;
+ state->gpio_port_expander = mxl111sf_PCA9534;
+ mxl_debug("found port expander at 0x%02x",
+ state->port_expander_addr);
+ return 0;
+ }
+
+ msg[0].addr = 0x40 >> 1;
+ msg[1].addr = 0x40 >> 1;
+
+ ret = i2c_transfer(&state->d->i2c_adap, msg, 2);
+ if (ret == 2) {
+ state->port_expander_addr = msg[0].addr;
+ state->gpio_port_expander = mxl111sf_PCA9534;
+ mxl_debug("found port expander at 0x%02x",
+ state->port_expander_addr);
+ return 0;
+ }
+ state->port_expander_addr = 0xff;
+ state->gpio_port_expander = mxl111sf_gpio_hw;
+ mxl_debug("using hardware gpio");
+ return 0;
+}
+
+int mxl111sf_init_port_expander(struct mxl111sf_state *state)
+{
+ mxl_debug("()");
+
+ if (0x00 == state->port_expander_addr)
+ mxl111sf_probe_port_expander(state);
+
+ switch (state->gpio_port_expander) {
+ default:
+ mxl_printk(KERN_ERR,
+ "gpio_port_expander undefined, assuming PCA9534");
+ /* fall-thru */
+ case mxl111sf_PCA9534:
+ return pca9534_init_port_expander(state);
+ case mxl111sf_gpio_hw:
+ return mxl111sf_hw_gpio_initialize(state);
+ }
+}
+
+/* ------------------------------------------------------------------------ */
+
+int mxl111sf_gpio_mode_switch(struct mxl111sf_state *state, unsigned int mode)
+{
+/* GPO:
+ * 3 - ATSC/MH# | 1 = ATSC transport, 0 = MH transport | default 0
+ * 4 - ATSC_RST## | 1 = ATSC enable, 0 = ATSC Reset | default 0
+ * 5 - ATSC_EN | 1 = ATSC power enable, 0 = ATSC power off | default 0
+ * 6 - MH_RESET# | 1 = MH enable, 0 = MH Reset | default 0
+ * 7 - MH_EN | 1 = MH power enable, 0 = MH power off | default 0
+ */
+ mxl_debug("(%d)", mode);
+
+ switch (mode) {
+ case MXL111SF_GPIO_MOD_MH:
+ mxl111sf_set_gpio(state, 4, 0);
+ mxl111sf_set_gpio(state, 5, 0);
+ msleep(50);
+ mxl111sf_set_gpio(state, 7, 1);
+ msleep(50);
+ mxl111sf_set_gpio(state, 6, 1);
+ msleep(50);
+
+ mxl111sf_set_gpio(state, 3, 0);
+ break;
+ case MXL111SF_GPIO_MOD_ATSC:
+ mxl111sf_set_gpio(state, 6, 0);
+ mxl111sf_set_gpio(state, 7, 0);
+ msleep(50);
+ mxl111sf_set_gpio(state, 5, 1);
+ msleep(50);
+ mxl111sf_set_gpio(state, 4, 1);
+ msleep(50);
+ mxl111sf_set_gpio(state, 3, 1);
+ break;
+ default: /* DVBT / STANDBY */
+ mxl111sf_init_port_expander(state);
+ break;
+ }
+ return 0;
+}
+
+/*
+ * Local variables:
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/drivers/media/dvb/dvb-usb/mxl111sf-gpio.h b/drivers/media/dvb/dvb-usb/mxl111sf-gpio.h
new file mode 100644
index 00000000000..0220f54299a
--- /dev/null
+++ b/drivers/media/dvb/dvb-usb/mxl111sf-gpio.h
@@ -0,0 +1,56 @@
+/*
+ * mxl111sf-gpio.h - driver for the MaxLinear MXL111SF
+ *
+ * Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _DVB_USB_MXL111SF_GPIO_H_
+#define _DVB_USB_MXL111SF_GPIO_H_
+
+#include "mxl111sf.h"
+
+int mxl111sf_set_gpio(struct mxl111sf_state *state, int gpio, int val);
+int mxl111sf_init_port_expander(struct mxl111sf_state *state);
+
+#define MXL111SF_GPIO_MOD_DVBT 0
+#define MXL111SF_GPIO_MOD_MH 1
+#define MXL111SF_GPIO_MOD_ATSC 2
+int mxl111sf_gpio_mode_switch(struct mxl111sf_state *state, unsigned int mode);
+
+enum mxl111sf_mux_config {
+ PIN_MUX_DEFAULT = 0,
+ PIN_MUX_TS_OUT_PARALLEL,
+ PIN_MUX_TS_OUT_SERIAL,
+ PIN_MUX_GPIO_MODE,
+ PIN_MUX_TS_SERIAL_IN_MODE_0,
+ PIN_MUX_TS_SERIAL_IN_MODE_1,
+ PIN_MUX_TS_SPI_IN_MODE_0,
+ PIN_MUX_TS_SPI_IN_MODE_1,
+ PIN_MUX_TS_PARALLEL_IN,
+ PIN_MUX_BT656_I2S_MODE,
+};
+
+int mxl111sf_config_pin_mux_modes(struct mxl111sf_state *state,
+ enum mxl111sf_mux_config pin_mux_config);
+
+#endif /* _DVB_USB_MXL111SF_GPIO_H_ */
+
+/*
+ * Local variables:
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/drivers/media/dvb/dvb-usb/mxl111sf-i2c.c b/drivers/media/dvb/dvb-usb/mxl111sf-i2c.c
new file mode 100644
index 00000000000..34434557ef6
--- /dev/null
+++ b/drivers/media/dvb/dvb-usb/mxl111sf-i2c.c
@@ -0,0 +1,850 @@
+/*
+ * mxl111sf-i2c.c - driver for the MaxLinear MXL111SF
+ *
+ * Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include "mxl111sf-i2c.h"
+#include "mxl111sf.h"
+
+/* SW-I2C ----------------------------------------------------------------- */
+
+#define SW_I2C_ADDR 0x1a
+#define SW_I2C_EN 0x02
+#define SW_SCL_OUT 0x04
+#define SW_SDA_OUT 0x08
+#define SW_SDA_IN 0x04
+
+#define SW_I2C_BUSY_ADDR 0x2f
+#define SW_I2C_BUSY 0x02
+
+static int mxl111sf_i2c_bitbang_sendbyte(struct mxl111sf_state *state,
+ u8 byte)
+{
+ int i, ret;
+ u8 data = 0;
+
+ mxl_i2c("(0x%02x)", byte);
+
+ ret = mxl111sf_read_reg(state, SW_I2C_BUSY_ADDR, &data);
+ if (mxl_fail(ret))
+ goto fail;
+
+ for (i = 0; i < 8; i++) {
+
+ data = (byte & (0x80 >> i)) ? SW_SDA_OUT : 0;
+
+ ret = mxl111sf_write_reg(state, SW_I2C_ADDR,
+ 0x10 | SW_I2C_EN | data);
+ if (mxl_fail(ret))
+ goto fail;
+
+ ret = mxl111sf_write_reg(state, SW_I2C_ADDR,
+ 0x10 | SW_I2C_EN | data | SW_SCL_OUT);
+ if (mxl_fail(ret))
+ goto fail;
+
+ ret = mxl111sf_write_reg(state, SW_I2C_ADDR,
+ 0x10 | SW_I2C_EN | data);
+ if (mxl_fail(ret))
+ goto fail;
+ }
+
+ /* last bit was 0 so we need to release SDA */
+ if (!(byte & 1)) {
+ ret = mxl111sf_write_reg(state, SW_I2C_ADDR,
+ 0x10 | SW_I2C_EN | SW_SDA_OUT);
+ if (mxl_fail(ret))
+ goto fail;
+ }
+
+ /* CLK high for ACK readback */
+ ret = mxl111sf_write_reg(state, SW_I2C_ADDR,
+ 0x10 | SW_I2C_EN | SW_SCL_OUT | SW_SDA_OUT);
+ if (mxl_fail(ret))
+ goto fail;
+
+ ret = mxl111sf_read_reg(state, SW_I2C_BUSY_ADDR, &data);
+ if (mxl_fail(ret))
+ goto fail;
+
+ /* drop the CLK after getting ACK, SDA will go high right away */
+ ret = mxl111sf_write_reg(state, SW_I2C_ADDR,
+ 0x10 | SW_I2C_EN | SW_SDA_OUT);
+ if (mxl_fail(ret))
+ goto fail;
+
+ if (data & SW_SDA_IN)
+ ret = -EIO;
+fail:
+ return ret;
+}
+
+static int mxl111sf_i2c_bitbang_recvbyte(struct mxl111sf_state *state,
+ u8 *pbyte)
+{
+ int i, ret;
+ u8 byte = 0;
+ u8 data = 0;
+
+ mxl_i2c("()");
+
+ *pbyte = 0;
+
+ ret = mxl111sf_write_reg(state, SW_I2C_ADDR,
+ 0x10 | SW_I2C_EN | SW_SDA_OUT);
+ if (mxl_fail(ret))
+ goto fail;
+
+ for (i = 0; i < 8; i++) {
+ ret = mxl111sf_write_reg(state, SW_I2C_ADDR,
+ 0x10 | SW_I2C_EN |
+ SW_SCL_OUT | SW_SDA_OUT);
+ if (mxl_fail(ret))
+ goto fail;
+
+ ret = mxl111sf_read_reg(state, SW_I2C_BUSY_ADDR, &data);
+ if (mxl_fail(ret))
+ goto fail;
+
+ if (data & SW_SDA_IN)
+ byte |= (0x80 >> i);
+
+ ret = mxl111sf_write_reg(state, SW_I2C_ADDR,
+ 0x10 | SW_I2C_EN | SW_SDA_OUT);
+ if (mxl_fail(ret))
+ goto fail;
+ }
+ *pbyte = byte;
+fail:
+ return ret;
+}
+
+static int mxl111sf_i2c_start(struct mxl111sf_state *state)
+{
+ int ret;
+
+ mxl_i2c("()");
+
+ ret = mxl111sf_write_reg(state, SW_I2C_ADDR,
+ 0x10 | SW_I2C_EN | SW_SCL_OUT | SW_SDA_OUT);
+ if (mxl_fail(ret))
+ goto fail;
+
+ ret = mxl111sf_write_reg(state, SW_I2C_ADDR,
+ 0x10 | SW_I2C_EN | SW_SCL_OUT);
+ if (mxl_fail(ret))
+ goto fail;
+
+ ret = mxl111sf_write_reg(state, SW_I2C_ADDR,
+ 0x10 | SW_I2C_EN); /* start */
+ mxl_fail(ret);
+fail:
+ return ret;
+}
+
+static int mxl111sf_i2c_stop(struct mxl111sf_state *state)
+{
+ int ret;
+
+ mxl_i2c("()");
+
+ ret = mxl111sf_write_reg(state, SW_I2C_ADDR,
+ 0x10 | SW_I2C_EN); /* stop */
+ if (mxl_fail(ret))
+ goto fail;
+
+ ret = mxl111sf_write_reg(state, SW_I2C_ADDR,
+ 0x10 | SW_I2C_EN | SW_SCL_OUT);
+ if (mxl_fail(ret))
+ goto fail;
+
+ ret = mxl111sf_write_reg(state, SW_I2C_ADDR,
+ 0x10 | SW_I2C_EN | SW_SCL_OUT | SW_SDA_OUT);
+ if (mxl_fail(ret))
+ goto fail;
+
+ ret = mxl111sf_write_reg(state, SW_I2C_ADDR,
+ 0x10 | SW_SCL_OUT | SW_SDA_OUT);
+ mxl_fail(ret);
+fail:
+ return ret;
+}
+
+static int mxl111sf_i2c_ack(struct mxl111sf_state *state)
+{
+ int ret;
+ u8 b = 0;
+
+ mxl_i2c("()");
+
+ ret = mxl111sf_read_reg(state, SW_I2C_BUSY_ADDR, &b);
+ if (mxl_fail(ret))
+ goto fail;
+
+ ret = mxl111sf_write_reg(state, SW_I2C_ADDR,
+ 0x10 | SW_I2C_EN);
+ if (mxl_fail(ret))
+ goto fail;
+
+ /* pull SDA low */
+ ret = mxl111sf_write_reg(state, SW_I2C_ADDR,
+ 0x10 | SW_I2C_EN | SW_SCL_OUT);
+ if (mxl_fail(ret))
+ goto fail;
+
+ ret = mxl111sf_write_reg(state, SW_I2C_ADDR,
+ 0x10 | SW_I2C_EN | SW_SDA_OUT);
+ mxl_fail(ret);
+fail:
+ return ret;
+}
+
+static int mxl111sf_i2c_nack(struct mxl111sf_state *state)
+{
+ int ret;
+
+ mxl_i2c("()");
+
+ /* SDA high to signal last byte read from slave */
+ ret = mxl111sf_write_reg(state, SW_I2C_ADDR,
+ 0x10 | SW_I2C_EN | SW_SCL_OUT | SW_SDA_OUT);
+ if (mxl_fail(ret))
+ goto fail;
+
+ ret = mxl111sf_write_reg(state, SW_I2C_ADDR,
+ 0x10 | SW_I2C_EN | SW_SDA_OUT);
+ mxl_fail(ret);
+fail:
+ return ret;
+}
+
+/* ------------------------------------------------------------------------ */
+
+static int mxl111sf_i2c_sw_xfer_msg(struct mxl111sf_state *state,
+ struct i2c_msg *msg)
+{
+ int i, ret;
+
+ mxl_i2c("()");
+
+ if (msg->flags & I2C_M_RD) {
+
+ ret = mxl111sf_i2c_start(state);
+ if (mxl_fail(ret))
+ goto fail;
+
+ ret = mxl111sf_i2c_bitbang_sendbyte(state,
+ (msg->addr << 1) | 0x01);
+ if (mxl_fail(ret)) {
+ mxl111sf_i2c_stop(state);
+ goto fail;
+ }
+
+ for (i = 0; i < msg->len; i++) {
+ ret = mxl111sf_i2c_bitbang_recvbyte(state,
+ &msg->buf[i]);
+ if (mxl_fail(ret)) {
+ mxl111sf_i2c_stop(state);
+ goto fail;
+ }
+
+ if (i < msg->len - 1)
+ mxl111sf_i2c_ack(state);
+ }
+
+ mxl111sf_i2c_nack(state);
+
+ ret = mxl111sf_i2c_stop(state);
+ if (mxl_fail(ret))
+ goto fail;
+
+ } else {
+
+ ret = mxl111sf_i2c_start(state);
+ if (mxl_fail(ret))
+ goto fail;
+
+ ret = mxl111sf_i2c_bitbang_sendbyte(state,
+ (msg->addr << 1) & 0xfe);
+ if (mxl_fail(ret)) {
+ mxl111sf_i2c_stop(state);
+ goto fail;
+ }
+
+ for (i = 0; i < msg->len; i++) {
+ ret = mxl111sf_i2c_bitbang_sendbyte(state,
+ msg->buf[i]);
+ if (mxl_fail(ret)) {
+ mxl111sf_i2c_stop(state);
+ goto fail;
+ }
+ }
+
+ /* FIXME: we only want to do this on the last transaction */
+ mxl111sf_i2c_stop(state);
+ }
+fail:
+ return ret;
+}
+
+/* HW-I2C ----------------------------------------------------------------- */
+
+#define USB_WRITE_I2C_CMD 0x99
+#define USB_READ_I2C_CMD 0xdd
+#define USB_END_I2C_CMD 0xfe
+
+#define USB_WRITE_I2C_CMD_LEN 26
+#define USB_READ_I2C_CMD_LEN 24
+
+#define I2C_MUX_REG 0x30
+#define I2C_CONTROL_REG 0x00
+#define I2C_SLAVE_ADDR_REG 0x08
+#define I2C_DATA_REG 0x0c
+#define I2C_INT_STATUS_REG 0x10
+
+static int mxl111sf_i2c_send_data(struct mxl111sf_state *state,
+ u8 index, u8 *wdata)
+{
+ int ret = mxl111sf_ctrl_msg(state->d, wdata[0],
+ &wdata[1], 25, NULL, 0);
+ mxl_fail(ret);
+
+ return ret;
+}
+
+static int mxl111sf_i2c_get_data(struct mxl111sf_state *state,
+ u8 index, u8 *wdata, u8 *rdata)
+{
+ int ret = mxl111sf_ctrl_msg(state->d, wdata[0],
+ &wdata[1], 25, rdata, 24);
+ mxl_fail(ret);
+
+ return ret;
+}
+
+static u8 mxl111sf_i2c_check_status(struct mxl111sf_state *state)
+{
+ u8 status = 0;
+ u8 buf[26];
+
+ mxl_i2c_adv("()");
+
+ buf[0] = USB_READ_I2C_CMD;
+ buf[1] = 0x00;
+
+ buf[2] = I2C_INT_STATUS_REG;
+ buf[3] = 0x00;
+ buf[4] = 0x00;
+
+ buf[5] = USB_END_I2C_CMD;
+
+ mxl111sf_i2c_get_data(state, 0, buf, buf);
+
+ if (buf[1] & 0x04)
+ status = 1;
+
+ return status;
+}
+
+static u8 mxl111sf_i2c_check_fifo(struct mxl111sf_state *state)
+{
+ u8 status = 0;
+ u8 buf[26];
+
+ mxl_i2c("()");
+
+ buf[0] = USB_READ_I2C_CMD;
+ buf[1] = 0x00;
+
+ buf[2] = I2C_MUX_REG;
+ buf[3] = 0x00;
+ buf[4] = 0x00;
+
+ buf[5] = I2C_INT_STATUS_REG;
+ buf[6] = 0x00;
+ buf[7] = 0x00;
+ buf[8] = USB_END_I2C_CMD;
+
+ mxl111sf_i2c_get_data(state, 0, buf, buf);
+
+ if (0x08 == (buf[1] & 0x08))
+ status = 1;
+
+ if ((buf[5] & 0x02) == 0x02)
+ mxl_i2c("(buf[5] & 0x02) == 0x02"); /* FIXME */
+
+ return status;
+}
+
+static int mxl111sf_i2c_readagain(struct mxl111sf_state *state,
+ u8 count, u8 *rbuf)
+{
+ u8 i2c_w_data[26];
+ u8 i2c_r_data[24];
+ u8 i = 0;
+ u8 fifo_status = 0;
+ int status = 0;
+
+ mxl_i2c("read %d bytes", count);
+
+ while ((fifo_status == 0) && (i++ < 5))
+ fifo_status = mxl111sf_i2c_check_fifo(state);
+
+ i2c_w_data[0] = 0xDD;
+ i2c_w_data[1] = 0x00;
+
+ for (i = 2; i < 26; i++)
+ i2c_w_data[i] = 0xFE;
+
+ for (i = 0; i < count; i++) {
+ i2c_w_data[2+(i*3)] = 0x0C;
+ i2c_w_data[3+(i*3)] = 0x00;
+ i2c_w_data[4+(i*3)] = 0x00;
+ }
+
+ mxl111sf_i2c_get_data(state, 0, i2c_w_data, i2c_r_data);
+
+ /* Check for I2C NACK status */
+ if (mxl111sf_i2c_check_status(state) == 1) {
+ mxl_i2c("error!");
+ } else {
+ for (i = 0; i < count; i++) {
+ rbuf[i] = i2c_r_data[(i*3)+1];
+ mxl_i2c("%02x\t %02x",
+ i2c_r_data[(i*3)+1],
+ i2c_r_data[(i*3)+2]);
+ }
+
+ status = 1;
+ }
+
+ return status;
+}
+
+#define HWI2C400 1
+static int mxl111sf_i2c_hw_xfer_msg(struct mxl111sf_state *state,
+ struct i2c_msg *msg)
+{
+ int i, k, ret = 0;
+ u16 index = 0;
+ u8 buf[26];
+ u8 i2c_r_data[24];
+ u16 block_len;
+ u16 left_over_len;
+ u8 rd_status[8];
+ u8 ret_status;
+ u8 readbuff[26];
+
+ mxl_i2c("addr: 0x%02x, read buff len: %d, write buff len: %d",
+ msg->addr, (msg->flags & I2C_M_RD) ? msg->len : 0,
+ (!(msg->flags & I2C_M_RD)) ? msg->len : 0);
+
+ for (index = 0; index < 26; index++)
+ buf[index] = USB_END_I2C_CMD;
+
+ /* command to indicate data payload is destined for I2C interface */
+ buf[0] = USB_WRITE_I2C_CMD;
+ buf[1] = 0x00;
+
+ /* enable I2C interface */
+ buf[2] = I2C_MUX_REG;
+ buf[3] = 0x80;
+ buf[4] = 0x00;
+
+ /* enable I2C interface */
+ buf[5] = I2C_MUX_REG;
+ buf[6] = 0x81;
+ buf[7] = 0x00;
+
+ /* set Timeout register on I2C interface */
+ buf[8] = 0x14;
+ buf[9] = 0xff;
+ buf[10] = 0x00;
+#if 0
+ /* enable Interrupts on I2C interface */
+ buf[8] = 0x24;
+ buf[9] = 0xF7;
+ buf[10] = 0x00;
+#endif
+ buf[11] = 0x24;
+ buf[12] = 0xF7;
+ buf[13] = 0x00;
+
+ ret = mxl111sf_i2c_send_data(state, 0, buf);
+
+ /* write data on I2C bus */
+ if (!(msg->flags & I2C_M_RD) && (msg->len > 0)) {
+ mxl_i2c("%d\t%02x", msg->len, msg->buf[0]);
+
+ /* control register on I2C interface to initialize I2C bus */
+ buf[2] = I2C_CONTROL_REG;
+ buf[3] = 0x5E;
+ buf[4] = (HWI2C400) ? 0x03 : 0x0D;
+
+ /* I2C Slave device Address */
+ buf[5] = I2C_SLAVE_ADDR_REG;
+ buf[6] = (msg->addr);
+ buf[7] = 0x00;
+ buf[8] = USB_END_I2C_CMD;
+ ret = mxl111sf_i2c_send_data(state, 0, buf);
+
+ /* check for slave device status */
+ if (mxl111sf_i2c_check_status(state) == 1) {
+ mxl_i2c("NACK writing slave address %02x",
+ msg->addr);
+ /* if NACK, stop I2C bus and exit */
+ buf[2] = I2C_CONTROL_REG;
+ buf[3] = 0x4E;
+ buf[4] = (HWI2C400) ? 0x03 : 0x0D;
+ ret = -EIO;
+ goto exit;
+ }
+
+ /* I2C interface can do I2C operations in block of 8 bytes of
+ I2C data. calculation to figure out number of blocks of i2c
+ data required to program */
+ block_len = (msg->len / 8);
+ left_over_len = (msg->len % 8);
+ index = 0;
+
+ mxl_i2c("block_len %d, left_over_len %d",
+ block_len, left_over_len);
+
+ for (index = 0; index < block_len; index++) {
+ for (i = 0; i < 8; i++) {
+ /* write data on I2C interface */
+ buf[2+(i*3)] = I2C_DATA_REG;
+ buf[3+(i*3)] = msg->buf[(index*8)+i];
+ buf[4+(i*3)] = 0x00;
+ }
+
+ ret = mxl111sf_i2c_send_data(state, 0, buf);
+
+ /* check for I2C NACK status */
+ if (mxl111sf_i2c_check_status(state) == 1) {
+ mxl_i2c("NACK writing slave address %02x",
+ msg->addr);
+
+ /* if NACK, stop I2C bus and exit */
+ buf[2] = I2C_CONTROL_REG;
+ buf[3] = 0x4E;
+ buf[4] = (HWI2C400) ? 0x03 : 0x0D;
+ ret = -EIO;
+ goto exit;
+ }
+
+ }
+
+ if (left_over_len) {
+ for (k = 0; k < 26; k++)
+ buf[k] = USB_END_I2C_CMD;
+
+ buf[0] = 0x99;
+ buf[1] = 0x00;
+
+ for (i = 0; i < left_over_len; i++) {
+ buf[2+(i*3)] = I2C_DATA_REG;
+ buf[3+(i*3)] = msg->buf[(index*8)+i];
+ mxl_i2c("index = %d %d data %d",
+ index, i, msg->buf[(index*8)+i]);
+ buf[4+(i*3)] = 0x00;
+ }
+ ret = mxl111sf_i2c_send_data(state, 0, buf);
+
+ /* check for I2C NACK status */
+ if (mxl111sf_i2c_check_status(state) == 1) {
+ mxl_i2c("NACK writing slave address %02x",
+ msg->addr);
+
+ /* if NACK, stop I2C bus and exit */
+ buf[2] = I2C_CONTROL_REG;
+ buf[3] = 0x4E;
+ buf[4] = (HWI2C400) ? 0x03 : 0x0D;
+ ret = -EIO;
+ goto exit;
+ }
+
+ }
+
+ /* issue I2C STOP after write */
+ buf[2] = I2C_CONTROL_REG;
+ buf[3] = 0x4E;
+ buf[4] = (HWI2C400) ? 0x03 : 0x0D;
+
+ }
+
+ /* read data from I2C bus */
+ if ((msg->flags & I2C_M_RD) && (msg->len > 0)) {
+ mxl_i2c("read buf len %d", msg->len);
+
+ /* command to indicate data payload is
+ destined for I2C interface */
+ buf[2] = I2C_CONTROL_REG;
+ buf[3] = 0xDF;
+ buf[4] = (HWI2C400) ? 0x03 : 0x0D;
+
+ /* I2C xfer length */
+ buf[5] = 0x14;
+ buf[6] = (msg->len & 0xFF);
+ buf[7] = 0;
+
+ /* I2C slave device Address */
+ buf[8] = I2C_SLAVE_ADDR_REG;
+ buf[9] = msg->addr;
+ buf[10] = 0x00;
+ buf[11] = USB_END_I2C_CMD;
+ ret = mxl111sf_i2c_send_data(state, 0, buf);
+
+ /* check for I2C NACK status */
+ if (mxl111sf_i2c_check_status(state) == 1) {
+ mxl_i2c("NACK reading slave address %02x",
+ msg->addr);
+
+ /* if NACK, stop I2C bus and exit */
+ buf[2] = I2C_CONTROL_REG;
+ buf[3] = 0xC7;
+ buf[4] = (HWI2C400) ? 0x03 : 0x0D;
+ ret = -EIO;
+ goto exit;
+ }
+
+ /* I2C interface can do I2C operations in block of 8 bytes of
+ I2C data. calculation to figure out number of blocks of
+ i2c data required to program */
+ block_len = ((msg->len) / 8);
+ left_over_len = ((msg->len) % 8);
+ index = 0;
+
+ mxl_i2c("block_len %d, left_over_len %d",
+ block_len, left_over_len);
+
+ /* command to read data from I2C interface */
+ buf[0] = USB_READ_I2C_CMD;
+ buf[1] = 0x00;
+
+ for (index = 0; index < block_len; index++) {
+ /* setup I2C read request packet on I2C interface */
+ for (i = 0; i < 8; i++) {
+ buf[2+(i*3)] = I2C_DATA_REG;
+ buf[3+(i*3)] = 0x00;
+ buf[4+(i*3)] = 0x00;
+ }
+
+ ret = mxl111sf_i2c_get_data(state, 0, buf, i2c_r_data);
+
+ /* check for I2C NACK status */
+ if (mxl111sf_i2c_check_status(state) == 1) {
+ mxl_i2c("NACK reading slave address %02x",
+ msg->addr);
+
+ /* if NACK, stop I2C bus and exit */
+ buf[2] = I2C_CONTROL_REG;
+ buf[3] = 0xC7;
+ buf[4] = (HWI2C400) ? 0x03 : 0x0D;
+ ret = -EIO;
+ goto exit;
+ }
+
+ /* copy data from i2c data payload to read buffer */
+ for (i = 0; i < 8; i++) {
+ rd_status[i] = i2c_r_data[(i*3)+2];
+
+ if (rd_status[i] == 0x04) {
+ if (i < 7) {
+ mxl_i2c("i2c fifo empty!"
+ " @ %d", i);
+ msg->buf[(index*8)+i] =
+ i2c_r_data[(i*3)+1];
+ /* read again */
+ ret_status =
+ mxl111sf_i2c_readagain(
+ state, 8-(i+1),
+ readbuff);
+ if (ret_status == 1) {
+ for (k = 0;
+ k < 8-(i+1);
+ k++) {
+
+ msg->buf[(index*8)+(k+i+1)] =
+ readbuff[k];
+ mxl_i2c("read data: %02x\t %02x",
+ msg->buf[(index*8)+(k+i)],
+ (index*8)+(k+i));
+ mxl_i2c("read data: %02x\t %02x",
+ msg->buf[(index*8)+(k+i+1)],
+ readbuff[k]);
+
+ }
+ goto stop_copy;
+ } else {
+ mxl_i2c("readagain "
+ "ERROR!");
+ }
+ } else {
+ msg->buf[(index*8)+i] =
+ i2c_r_data[(i*3)+1];
+ }
+ } else {
+ msg->buf[(index*8)+i] =
+ i2c_r_data[(i*3)+1];
+ }
+ }
+stop_copy:
+ ;
+
+ }
+
+ if (left_over_len) {
+ for (k = 0; k < 26; k++)
+ buf[k] = USB_END_I2C_CMD;
+
+ buf[0] = 0xDD;
+ buf[1] = 0x00;
+
+ for (i = 0; i < left_over_len; i++) {
+ buf[2+(i*3)] = I2C_DATA_REG;
+ buf[3+(i*3)] = 0x00;
+ buf[4+(i*3)] = 0x00;
+ }
+ ret = mxl111sf_i2c_get_data(state, 0, buf,
+ i2c_r_data);
+
+ /* check for I2C NACK status */
+ if (mxl111sf_i2c_check_status(state) == 1) {
+ mxl_i2c("NACK reading slave address %02x",
+ msg->addr);
+
+ /* if NACK, stop I2C bus and exit */
+ buf[2] = I2C_CONTROL_REG;
+ buf[3] = 0xC7;
+ buf[4] = (HWI2C400) ? 0x03 : 0x0D;
+ ret = -EIO;
+ goto exit;
+ }
+
+ for (i = 0; i < left_over_len; i++) {
+ msg->buf[(block_len*8)+i] =
+ i2c_r_data[(i*3)+1];
+ mxl_i2c("read data: %02x\t %02x",
+ i2c_r_data[(i*3)+1],
+ i2c_r_data[(i*3)+2]);
+ }
+ }
+
+ /* indicate I2C interface to issue NACK
+ after next I2C read op */
+ buf[0] = USB_WRITE_I2C_CMD;
+ buf[1] = 0x00;
+
+ /* control register */
+ buf[2] = I2C_CONTROL_REG;
+ buf[3] = 0x17;
+ buf[4] = (HWI2C400) ? 0x03 : 0x0D;
+
+ buf[5] = USB_END_I2C_CMD;
+ ret = mxl111sf_i2c_send_data(state, 0, buf);
+
+ /* control register */
+ buf[2] = I2C_CONTROL_REG;
+ buf[3] = 0xC7;
+ buf[4] = (HWI2C400) ? 0x03 : 0x0D;
+
+ }
+exit:
+ /* STOP and disable I2C MUX */
+ buf[0] = USB_WRITE_I2C_CMD;
+ buf[1] = 0x00;
+
+ /* de-initilize I2C BUS */
+ buf[5] = USB_END_I2C_CMD;
+ mxl111sf_i2c_send_data(state, 0, buf);
+
+ /* Control Register */
+ buf[2] = I2C_CONTROL_REG;
+ buf[3] = 0xDF;
+ buf[4] = 0x03;
+
+ /* disable I2C interface */
+ buf[5] = I2C_MUX_REG;
+ buf[6] = 0x00;
+ buf[7] = 0x00;
+
+ /* de-initilize I2C BUS */
+ buf[8] = USB_END_I2C_CMD;
+ mxl111sf_i2c_send_data(state, 0, buf);
+
+ /* disable I2C interface */
+ buf[2] = I2C_MUX_REG;
+ buf[3] = 0x81;
+ buf[4] = 0x00;
+
+ /* disable I2C interface */
+ buf[5] = I2C_MUX_REG;
+ buf[6] = 0x00;
+ buf[7] = 0x00;
+
+ /* disable I2C interface */
+ buf[8] = I2C_MUX_REG;
+ buf[9] = 0x00;
+ buf[10] = 0x00;
+
+ buf[11] = USB_END_I2C_CMD;
+ mxl111sf_i2c_send_data(state, 0, buf);
+
+ return ret;
+}
+
+/* ------------------------------------------------------------------------ */
+
+int mxl111sf_i2c_xfer(struct i2c_adapter *adap,
+ struct i2c_msg msg[], int num)
+{
+ struct dvb_usb_device *d = i2c_get_adapdata(adap);
+ struct mxl111sf_state *state = d->priv;
+ int hwi2c = (state->chip_rev > MXL111SF_V6);
+ int i, ret;
+
+ if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
+ return -EAGAIN;
+
+ for (i = 0; i < num; i++) {
+ ret = (hwi2c) ?
+ mxl111sf_i2c_hw_xfer_msg(state, &msg[i]) :
+ mxl111sf_i2c_sw_xfer_msg(state, &msg[i]);
+ if (mxl_fail(ret)) {
+ mxl_debug_adv("failed with error %d on i2c "
+ "transaction %d of %d, %sing %d bytes "
+ "to/from 0x%02x", ret, i+1, num,
+ (msg[i].flags & I2C_M_RD) ?
+ "read" : "writ",
+ msg[i].len, msg[i].addr);
+
+ break;
+ }
+ }
+
+ mutex_unlock(&d->i2c_mutex);
+
+ return i == num ? num : -EREMOTEIO;
+}
+
+/*
+ * Local variables:
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/drivers/media/dvb/dvb-usb/mxl111sf-i2c.h b/drivers/media/dvb/dvb-usb/mxl111sf-i2c.h
new file mode 100644
index 00000000000..a57a45ffb9e
--- /dev/null
+++ b/drivers/media/dvb/dvb-usb/mxl111sf-i2c.h
@@ -0,0 +1,35 @@
+/*
+ * mxl111sf-i2c.h - driver for the MaxLinear MXL111SF
+ *
+ * Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _DVB_USB_MXL111SF_I2C_H_
+#define _DVB_USB_MXL111SF_I2C_H_
+
+#include <linux/i2c.h>
+
+int mxl111sf_i2c_xfer(struct i2c_adapter *adap,
+ struct i2c_msg msg[], int num);
+
+#endif /* _DVB_USB_MXL111SF_I2C_H_ */
+
+/*
+ * Local variables:
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/drivers/media/dvb/dvb-usb/mxl111sf-phy.c b/drivers/media/dvb/dvb-usb/mxl111sf-phy.c
new file mode 100644
index 00000000000..b741b3a7a32
--- /dev/null
+++ b/drivers/media/dvb/dvb-usb/mxl111sf-phy.c
@@ -0,0 +1,343 @@
+/*
+ * mxl111sf-phy.c - driver for the MaxLinear MXL111SF
+ *
+ * Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include "mxl111sf-phy.h"
+#include "mxl111sf-reg.h"
+
+int mxl111sf_init_tuner_demod(struct mxl111sf_state *state)
+{
+ struct mxl111sf_reg_ctrl_info mxl_111_overwrite_default[] = {
+ {0x07, 0xff, 0x0c},
+ {0x58, 0xff, 0x9d},
+ {0x09, 0xff, 0x00},
+ {0x06, 0xff, 0x06},
+ {0xc8, 0xff, 0x40}, /* ED_LE_WIN_OLD = 0 */
+ {0x8d, 0x01, 0x01}, /* NEGATE_Q */
+ {0x32, 0xff, 0xac}, /* DIG_RFREFSELECT = 12 */
+ {0x42, 0xff, 0x43}, /* DIG_REG_AMP = 4 */
+ {0x74, 0xff, 0xc4}, /* SSPUR_FS_PRIO = 4 */
+ {0x71, 0xff, 0xe6}, /* SPUR_ROT_PRIO_VAL = 1 */
+ {0x83, 0xff, 0x64}, /* INF_FILT1_THD_SC = 100 */
+ {0x85, 0xff, 0x64}, /* INF_FILT2_THD_SC = 100 */
+ {0x88, 0xff, 0xf0}, /* INF_THD = 240 */
+ {0x6f, 0xf0, 0xb0}, /* DFE_DLY = 11 */
+ {0x00, 0xff, 0x01}, /* Change to page 1 */
+ {0x81, 0xff, 0x11}, /* DSM_FERR_BYPASS = 1 */
+ {0xf4, 0xff, 0x07}, /* DIG_FREQ_CORR = 1 */
+ {0xd4, 0x1f, 0x0f}, /* SPUR_TEST_NOISE_TH = 15 */
+ {0xd6, 0xff, 0x0c}, /* SPUR_TEST_NOISE_PAPR = 12 */
+ {0x00, 0xff, 0x00}, /* Change to page 0 */
+ {0, 0, 0}
+ };
+
+ mxl_debug("()");
+
+ return mxl111sf_ctrl_program_regs(state, mxl_111_overwrite_default);
+}
+
+int mxl1x1sf_soft_reset(struct mxl111sf_state *state)
+{
+ int ret;
+ mxl_debug("()");
+
+ ret = mxl111sf_write_reg(state, 0xff, 0x00); /* AIC */
+ if (mxl_fail(ret))
+ goto fail;
+ ret = mxl111sf_write_reg(state, 0x02, 0x01); /* get out of reset */
+ mxl_fail(ret);
+fail:
+ return ret;
+}
+
+int mxl1x1sf_set_device_mode(struct mxl111sf_state *state, int mode)
+{
+ int ret;
+
+ mxl_debug("(%s)", MXL_SOC_MODE == mode ?
+ "MXL_SOC_MODE" : "MXL_TUNER_MODE");
+
+ /* set device mode */
+ ret = mxl111sf_write_reg(state, 0x03,
+ MXL_SOC_MODE == mode ? 0x01 : 0x00);
+ if (mxl_fail(ret))
+ goto fail;
+
+ ret = mxl111sf_write_reg_mask(state,
+ 0x7d, 0x40, MXL_SOC_MODE == mode ?
+ 0x00 : /* enable impulse noise filter,
+ INF_BYP = 0 */
+ 0x40); /* disable impulse noise filter,
+ INF_BYP = 1 */
+ if (mxl_fail(ret))
+ goto fail;
+
+ state->device_mode = mode;
+fail:
+ return ret;
+}
+
+/* power up tuner */
+int mxl1x1sf_top_master_ctrl(struct mxl111sf_state *state, int onoff)
+{
+ mxl_debug("(%d)", onoff);
+
+ return mxl111sf_write_reg(state, 0x01, onoff ? 0x01 : 0x00);
+}
+
+int mxl111sf_disable_656_port(struct mxl111sf_state *state)
+{
+ mxl_debug("()");
+
+ return mxl111sf_write_reg_mask(state, 0x12, 0x04, 0x00);
+}
+
+int mxl111sf_enable_usb_output(struct mxl111sf_state *state)
+{
+ mxl_debug("()");
+
+ return mxl111sf_write_reg_mask(state, 0x17, 0x40, 0x00);
+}
+
+/* initialize TSIF as input port of MxL1X1SF for MPEG2 data transfer */
+int mxl111sf_config_mpeg_in(struct mxl111sf_state *state,
+ unsigned int parallel_serial,
+ unsigned int msb_lsb_1st,
+ unsigned int clock_phase,
+ unsigned int mpeg_valid_pol,
+ unsigned int mpeg_sync_pol)
+{
+ int ret;
+ u8 mode, tmp;
+
+ mxl_debug("(%u,%u,%u,%u,%u)", parallel_serial, msb_lsb_1st,
+ clock_phase, mpeg_valid_pol, mpeg_sync_pol);
+
+ /* Enable PIN MUX */
+ ret = mxl111sf_write_reg(state, V6_PIN_MUX_MODE_REG, V6_ENABLE_PIN_MUX);
+ mxl_fail(ret);
+
+ /* Configure MPEG Clock phase */
+ mxl111sf_read_reg(state, V6_MPEG_IN_CLK_INV_REG, &mode);
+
+ if (clock_phase == TSIF_NORMAL)
+ mode &= ~V6_INVERTED_CLK_PHASE;
+ else
+ mode |= V6_INVERTED_CLK_PHASE;
+
+ ret = mxl111sf_write_reg(state, V6_MPEG_IN_CLK_INV_REG, mode);
+ mxl_fail(ret);
+
+ /* Configure data input mode, MPEG Valid polarity, MPEG Sync polarity
+ * Get current configuration */
+ ret = mxl111sf_read_reg(state, V6_MPEG_IN_CTRL_REG, &mode);
+ mxl_fail(ret);
+
+ /* Data Input mode */
+ if (parallel_serial == TSIF_INPUT_PARALLEL) {
+ /* Disable serial mode */
+ mode &= ~V6_MPEG_IN_DATA_SERIAL;
+
+ /* Enable Parallel mode */
+ mode |= V6_MPEG_IN_DATA_PARALLEL;
+ } else {
+ /* Disable Parallel mode */
+ mode &= ~V6_MPEG_IN_DATA_PARALLEL;
+
+ /* Enable Serial Mode */
+ mode |= V6_MPEG_IN_DATA_SERIAL;
+
+ /* If serial interface is chosen, configure
+ MSB or LSB order in transmission */
+ ret = mxl111sf_read_reg(state,
+ V6_MPEG_INOUT_BIT_ORDER_CTRL_REG,
+ &tmp);
+ mxl_fail(ret);
+
+ if (msb_lsb_1st == MPEG_SER_MSB_FIRST_ENABLED)
+ tmp |= V6_MPEG_SER_MSB_FIRST;
+ else
+ tmp &= ~V6_MPEG_SER_MSB_FIRST;
+
+ ret = mxl111sf_write_reg(state,
+ V6_MPEG_INOUT_BIT_ORDER_CTRL_REG,
+ tmp);
+ mxl_fail(ret);
+ }
+
+ /* MPEG Sync polarity */
+ if (mpeg_sync_pol == TSIF_NORMAL)
+ mode &= ~V6_INVERTED_MPEG_SYNC;
+ else
+ mode |= V6_INVERTED_MPEG_SYNC;
+
+ /* MPEG Valid polarity */
+ if (mpeg_valid_pol == 0)
+ mode &= ~V6_INVERTED_MPEG_VALID;
+ else
+ mode |= V6_INVERTED_MPEG_VALID;
+
+ ret = mxl111sf_write_reg(state, V6_MPEG_IN_CTRL_REG, mode);
+ mxl_fail(ret);
+
+ return ret;
+}
+
+int mxl111sf_init_i2s_port(struct mxl111sf_state *state, u8 sample_size)
+{
+ static struct mxl111sf_reg_ctrl_info init_i2s[] = {
+ {0x1b, 0xff, 0x1e}, /* pin mux mode, Choose 656/I2S input */
+ {0x15, 0x60, 0x60}, /* Enable I2S */
+ {0x17, 0xe0, 0x20}, /* Input, MPEG MODE USB,
+ Inverted 656 Clock, I2S_SOFT_RESET,
+ 0 : Normal operation, 1 : Reset State */
+#if 0
+ {0x12, 0x01, 0x00}, /* AUDIO_IRQ_CLR (Overflow Indicator) */
+#endif
+ {0x00, 0xff, 0x02}, /* Change to Control Page */
+ {0x26, 0x0d, 0x0d}, /* I2S_MODE & BT656_SRC_SEL for FPGA only */
+ {0x00, 0xff, 0x00},
+ {0, 0, 0}
+ };
+ int ret;
+
+ mxl_debug("(0x%02x)", sample_size);
+
+ ret = mxl111sf_ctrl_program_regs(state, init_i2s);
+ if (mxl_fail(ret))
+ goto fail;
+
+ ret = mxl111sf_write_reg(state, V6_I2S_NUM_SAMPLES_REG, sample_size);
+ mxl_fail(ret);
+fail:
+ return ret;
+}
+
+int mxl111sf_disable_i2s_port(struct mxl111sf_state *state)
+{
+ static struct mxl111sf_reg_ctrl_info disable_i2s[] = {
+ {0x15, 0x40, 0x00},
+ {0, 0, 0}
+ };
+
+ mxl_debug("()");
+
+ return mxl111sf_ctrl_program_regs(state, disable_i2s);
+}
+
+int mxl111sf_config_i2s(struct mxl111sf_state *state,
+ u8 msb_start_pos, u8 data_width)
+{
+ int ret;
+ u8 tmp;
+
+ mxl_debug("(0x%02x, 0x%02x)", msb_start_pos, data_width);
+
+ ret = mxl111sf_read_reg(state, V6_I2S_STREAM_START_BIT_REG, &tmp);
+ if (mxl_fail(ret))
+ goto fail;
+
+ tmp &= 0xe0;
+ tmp |= msb_start_pos;
+ ret = mxl111sf_write_reg(state, V6_I2S_STREAM_START_BIT_REG, tmp);
+ if (mxl_fail(ret))
+ goto fail;
+
+ ret = mxl111sf_read_reg(state, V6_I2S_STREAM_END_BIT_REG, &tmp);
+ if (mxl_fail(ret))
+ goto fail;
+
+ tmp &= 0xe0;
+ tmp |= data_width;
+ ret = mxl111sf_write_reg(state, V6_I2S_STREAM_END_BIT_REG, tmp);
+ mxl_fail(ret);
+fail:
+ return ret;
+}
+
+int mxl111sf_config_spi(struct mxl111sf_state *state, int onoff)
+{
+ u8 val;
+ int ret;
+
+ mxl_debug("(%d)", onoff);
+
+ ret = mxl111sf_write_reg(state, 0x00, 0x02);
+ if (mxl_fail(ret))
+ goto fail;
+
+ ret = mxl111sf_read_reg(state, V8_SPI_MODE_REG, &val);
+ if (mxl_fail(ret))
+ goto fail;
+
+ if (onoff)
+ val |= 0x04;
+ else
+ val &= ~0x04;
+
+ ret = mxl111sf_write_reg(state, V8_SPI_MODE_REG, val);
+ if (mxl_fail(ret))
+ goto fail;
+
+ ret = mxl111sf_write_reg(state, 0x00, 0x00);
+ mxl_fail(ret);
+fail:
+ return ret;
+}
+
+int mxl111sf_idac_config(struct mxl111sf_state *state,
+ u8 control_mode, u8 current_setting,
+ u8 current_value, u8 hysteresis_value)
+{
+ int ret;
+ u8 val;
+ /* current value will be set for both automatic & manual IDAC control */
+ val = current_value;
+
+ if (control_mode == IDAC_MANUAL_CONTROL) {
+ /* enable manual control of IDAC */
+ val |= IDAC_MANUAL_CONTROL_BIT_MASK;
+
+ if (current_setting == IDAC_CURRENT_SINKING_ENABLE)
+ /* enable current sinking in manual mode */
+ val |= IDAC_CURRENT_SINKING_BIT_MASK;
+ else
+ /* disable current sinking in manual mode */
+ val &= ~IDAC_CURRENT_SINKING_BIT_MASK;
+ } else {
+ /* disable manual control of IDAC */
+ val &= ~IDAC_MANUAL_CONTROL_BIT_MASK;
+
+ /* set hysteresis value reg: 0x0B<5:0> */
+ ret = mxl111sf_write_reg(state, V6_IDAC_HYSTERESIS_REG,
+ (hysteresis_value & 0x3F));
+ mxl_fail(ret);
+ }
+
+ ret = mxl111sf_write_reg(state, V6_IDAC_SETTINGS_REG, val);
+ mxl_fail(ret);
+
+ return ret;
+}
+
+/*
+ * Local variables:
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/drivers/media/dvb/dvb-usb/mxl111sf-phy.h b/drivers/media/dvb/dvb-usb/mxl111sf-phy.h
new file mode 100644
index 00000000000..f0756071d34
--- /dev/null
+++ b/drivers/media/dvb/dvb-usb/mxl111sf-phy.h
@@ -0,0 +1,53 @@
+/*
+ * mxl111sf-phy.h - driver for the MaxLinear MXL111SF
+ *
+ * Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _DVB_USB_MXL111SF_PHY_H_
+#define _DVB_USB_MXL111SF_PHY_H_
+
+#include "mxl111sf.h"
+
+int mxl1x1sf_soft_reset(struct mxl111sf_state *state);
+int mxl1x1sf_set_device_mode(struct mxl111sf_state *state, int mode);
+int mxl1x1sf_top_master_ctrl(struct mxl111sf_state *state, int onoff);
+int mxl111sf_disable_656_port(struct mxl111sf_state *state);
+int mxl111sf_init_tuner_demod(struct mxl111sf_state *state);
+int mxl111sf_enable_usb_output(struct mxl111sf_state *state);
+int mxl111sf_config_mpeg_in(struct mxl111sf_state *state,
+ unsigned int parallel_serial,
+ unsigned int msb_lsb_1st,
+ unsigned int clock_phase,
+ unsigned int mpeg_valid_pol,
+ unsigned int mpeg_sync_pol);
+int mxl111sf_config_i2s(struct mxl111sf_state *state,
+ u8 msb_start_pos, u8 data_width);
+int mxl111sf_init_i2s_port(struct mxl111sf_state *state, u8 sample_size);
+int mxl111sf_disable_i2s_port(struct mxl111sf_state *state);
+int mxl111sf_config_spi(struct mxl111sf_state *state, int onoff);
+int mxl111sf_idac_config(struct mxl111sf_state *state,
+ u8 control_mode, u8 current_setting,
+ u8 current_value, u8 hysteresis_value);
+
+#endif /* _DVB_USB_MXL111SF_PHY_H_ */
+
+/*
+ * Local variables:
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/drivers/media/dvb/dvb-usb/mxl111sf-reg.h b/drivers/media/dvb/dvb-usb/mxl111sf-reg.h
new file mode 100644
index 00000000000..17831b0fb9d
--- /dev/null
+++ b/drivers/media/dvb/dvb-usb/mxl111sf-reg.h
@@ -0,0 +1,179 @@
+/*
+ * mxl111sf-reg.h - driver for the MaxLinear MXL111SF
+ *
+ * Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _DVB_USB_MXL111SF_REG_H_
+#define _DVB_USB_MXL111SF_REG_H_
+
+#define CHIP_ID_REG 0xFC
+#define TOP_CHIP_REV_ID_REG 0xFA
+
+#define V6_SNR_RB_LSB_REG 0x27
+#define V6_SNR_RB_MSB_REG 0x28
+
+#define V6_N_ACCUMULATE_REG 0x11
+#define V6_RS_AVG_ERRORS_LSB_REG 0x2C
+#define V6_RS_AVG_ERRORS_MSB_REG 0x2D
+
+#define V6_IRQ_STATUS_REG 0x24
+#define IRQ_MASK_FEC_LOCK 0x10
+
+#define V6_SYNC_LOCK_REG 0x28
+#define SYNC_LOCK_MASK 0x10
+
+#define V6_RS_LOCK_DET_REG 0x28
+#define RS_LOCK_DET_MASK 0x08
+
+#define V6_INITACQ_NODETECT_REG 0x20
+#define V6_FORCE_NFFT_CPSIZE_REG 0x20
+
+#define V6_CODE_RATE_TPS_REG 0x29
+#define V6_CODE_RATE_TPS_MASK 0x07
+
+
+#define V6_CP_LOCK_DET_REG 0x28
+#define V6_CP_LOCK_DET_MASK 0x04
+
+#define V6_TPS_HIERACHY_REG 0x29
+#define V6_TPS_HIERARCHY_INFO_MASK 0x40
+
+#define V6_MODORDER_TPS_REG 0x2A
+#define V6_PARAM_CONSTELLATION_MASK 0x30
+
+#define V6_MODE_TPS_REG 0x2A
+#define V6_PARAM_FFT_MODE_MASK 0x0C
+
+
+#define V6_CP_TPS_REG 0x29
+#define V6_PARAM_GI_MASK 0x30
+
+#define V6_TPS_LOCK_REG 0x2A
+#define V6_PARAM_TPS_LOCK_MASK 0x40
+
+#define V6_FEC_PER_COUNT_REG 0x2E
+#define V6_FEC_PER_SCALE_REG 0x2B
+#define V6_FEC_PER_SCALE_MASK 0x03
+#define V6_FEC_PER_CLR_REG 0x20
+#define V6_FEC_PER_CLR_MASK 0x01
+
+#define V6_PIN_MUX_MODE_REG 0x1B
+#define V6_ENABLE_PIN_MUX 0x1E
+
+#define V6_I2S_NUM_SAMPLES_REG 0x16
+
+#define V6_MPEG_IN_CLK_INV_REG 0x17
+#define V6_MPEG_IN_CTRL_REG 0x18
+
+#define V6_INVERTED_CLK_PHASE 0x20
+#define V6_MPEG_IN_DATA_PARALLEL 0x01
+#define V6_MPEG_IN_DATA_SERIAL 0x02
+
+#define V6_INVERTED_MPEG_SYNC 0x04
+#define V6_INVERTED_MPEG_VALID 0x08
+
+#define TSIF_INPUT_PARALLEL 0
+#define TSIF_INPUT_SERIAL 1
+#define TSIF_NORMAL 0
+
+#define V6_MPEG_INOUT_BIT_ORDER_CTRL_REG 0x19
+#define V6_MPEG_SER_MSB_FIRST 0x80
+#define MPEG_SER_MSB_FIRST_ENABLED 0x01
+
+#define V6_656_I2S_BUFF_STATUS_REG 0x2F
+#define V6_656_OVERFLOW_MASK_BIT 0x08
+#define V6_I2S_OVERFLOW_MASK_BIT 0x01
+
+#define V6_I2S_STREAM_START_BIT_REG 0x14
+#define V6_I2S_STREAM_END_BIT_REG 0x15
+#define I2S_RIGHT_JUSTIFIED 0
+#define I2S_LEFT_JUSTIFIED 1
+#define I2S_DATA_FORMAT 2
+
+#define V6_TUNER_LOOP_THRU_CONTROL_REG 0x09
+#define V6_ENABLE_LOOP_THRU 0x01
+
+#define TOTAL_NUM_IF_OUTPUT_FREQ 16
+
+#define TUNER_NORMAL_IF_SPECTRUM 0x0
+#define TUNER_INVERT_IF_SPECTRUM 0x10
+
+#define V6_TUNER_IF_SEL_REG 0x06
+#define V6_TUNER_IF_FCW_REG 0x3C
+#define V6_TUNER_IF_FCW_BYP_REG 0x3D
+#define V6_RF_LOCK_STATUS_REG 0x23
+
+#define NUM_DIG_TV_CHANNEL 1000
+
+#define V6_DIG_CLK_FREQ_SEL_REG 0x07
+#define V6_REF_SYNTH_INT_REG 0x5C
+#define V6_REF_SYNTH_REMAIN_REG 0x58
+#define V6_DIG_RFREFSELECT_REG 0x32
+#define V6_XTAL_CLK_OUT_GAIN_REG 0x31
+#define V6_TUNER_LOOP_THRU_CTRL_REG 0x09
+#define V6_DIG_XTAL_ENABLE_REG 0x06
+#define V6_DIG_XTAL_BIAS_REG 0x66
+#define V6_XTAL_CAP_REG 0x08
+
+#define V6_GPO_CTRL_REG 0x18
+#define MXL_GPO_0 0x00
+#define MXL_GPO_1 0x01
+#define V6_GPO_0_MASK 0x10
+#define V6_GPO_1_MASK 0x20
+
+#define V6_111SF_GPO_CTRL_REG 0x19
+#define MXL_111SF_GPO_1 0x00
+#define MXL_111SF_GPO_2 0x01
+#define MXL_111SF_GPO_3 0x02
+#define MXL_111SF_GPO_4 0x03
+#define MXL_111SF_GPO_5 0x04
+#define MXL_111SF_GPO_6 0x05
+#define MXL_111SF_GPO_7 0x06
+
+#define MXL_111SF_GPO_0_MASK 0x01
+#define MXL_111SF_GPO_1_MASK 0x02
+#define MXL_111SF_GPO_2_MASK 0x04
+#define MXL_111SF_GPO_3_MASK 0x08
+#define MXL_111SF_GPO_4_MASK 0x10
+#define MXL_111SF_GPO_5_MASK 0x20
+#define MXL_111SF_GPO_6_MASK 0x40
+
+#define V6_ATSC_CONFIG_REG 0x0A
+
+#define MXL_MODE_REG 0x03
+#define START_TUNE_REG 0x1C
+
+#define V6_IDAC_HYSTERESIS_REG 0x0B
+#define V6_IDAC_SETTINGS_REG 0x0C
+#define IDAC_MANUAL_CONTROL 1
+#define IDAC_CURRENT_SINKING_ENABLE 1
+#define IDAC_MANUAL_CONTROL_BIT_MASK 0x80
+#define IDAC_CURRENT_SINKING_BIT_MASK 0x40
+
+#define V8_SPI_MODE_REG 0xE9
+
+#define V6_DIG_RF_PWR_LSB_REG 0x46
+#define V6_DIG_RF_PWR_MSB_REG 0x47
+
+#endif /* _DVB_USB_MXL111SF_REG_H_ */
+
+/*
+ * Local variables:
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/drivers/media/dvb/dvb-usb/mxl111sf-tuner.c b/drivers/media/dvb/dvb-usb/mxl111sf-tuner.c
new file mode 100644
index 00000000000..a6341058c4e
--- /dev/null
+++ b/drivers/media/dvb/dvb-usb/mxl111sf-tuner.c
@@ -0,0 +1,476 @@
+/*
+ * mxl111sf-tuner.c - driver for the MaxLinear MXL111SF CMOS tuner
+ *
+ * Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include "mxl111sf-tuner.h"
+#include "mxl111sf-phy.h"
+#include "mxl111sf-reg.h"
+
+/* debug */
+static int mxl111sf_tuner_debug;
+module_param_named(debug, mxl111sf_tuner_debug, int, 0644);
+MODULE_PARM_DESC(debug, "set debugging level (1=info (or-able)).");
+
+#define mxl_dbg(fmt, arg...) \
+ if (mxl111sf_tuner_debug) \
+ mxl_printk(KERN_DEBUG, fmt, ##arg)
+
+/* ------------------------------------------------------------------------ */
+
+struct mxl111sf_tuner_state {
+ struct mxl111sf_state *mxl_state;
+
+ struct mxl111sf_tuner_config *cfg;
+
+ u32 frequency;
+ u32 bandwidth;
+};
+
+static int mxl111sf_tuner_read_reg(struct mxl111sf_tuner_state *state,
+ u8 addr, u8 *data)
+{
+ return (state->cfg->read_reg) ?
+ state->cfg->read_reg(state->mxl_state, addr, data) :
+ -EINVAL;
+}
+
+static int mxl111sf_tuner_write_reg(struct mxl111sf_tuner_state *state,
+ u8 addr, u8 data)
+{
+ return (state->cfg->write_reg) ?
+ state->cfg->write_reg(state->mxl_state, addr, data) :
+ -EINVAL;
+}
+
+static int mxl111sf_tuner_program_regs(struct mxl111sf_tuner_state *state,
+ struct mxl111sf_reg_ctrl_info *ctrl_reg_info)
+{
+ return (state->cfg->program_regs) ?
+ state->cfg->program_regs(state->mxl_state, ctrl_reg_info) :
+ -EINVAL;
+}
+
+static int mxl1x1sf_tuner_top_master_ctrl(struct mxl111sf_tuner_state *state,
+ int onoff)
+{
+ return (state->cfg->top_master_ctrl) ?
+ state->cfg->top_master_ctrl(state->mxl_state, onoff) :
+ -EINVAL;
+}
+
+/* ------------------------------------------------------------------------ */
+
+static struct mxl111sf_reg_ctrl_info mxl_phy_tune_rf[] = {
+ {0x1d, 0x7f, 0x00}, /* channel bandwidth section 1/2/3,
+ DIG_MODEINDEX, _A, _CSF, */
+ {0x1e, 0xff, 0x00}, /* channel frequency (lo and fractional) */
+ {0x1f, 0xff, 0x00}, /* channel frequency (hi for integer portion) */
+ {0, 0, 0}
+};
+
+/* ------------------------------------------------------------------------ */
+
+static struct mxl111sf_reg_ctrl_info *mxl111sf_calc_phy_tune_regs(u32 freq,
+ u8 bw)
+{
+ u8 filt_bw;
+
+ /* set channel bandwidth */
+ switch (bw) {
+ case 0: /* ATSC */
+ filt_bw = 25;
+ break;
+ case 1: /* QAM */
+ filt_bw = 69;
+ break;
+ case 6:
+ filt_bw = 21;
+ break;
+ case 7:
+ filt_bw = 42;
+ break;
+ case 8:
+ filt_bw = 63;
+ break;
+ default:
+ err("%s: invalid bandwidth setting!", __func__);
+ return NULL;
+ }
+
+ /* calculate RF channel */
+ freq /= 1000000;
+
+ freq *= 64;
+#if 0
+ /* do round */
+ freq += 0.5;
+#endif
+ /* set bandwidth */
+ mxl_phy_tune_rf[0].data = filt_bw;
+
+ /* set RF */
+ mxl_phy_tune_rf[1].data = (freq & 0xff);
+ mxl_phy_tune_rf[2].data = (freq >> 8) & 0xff;
+
+ /* start tune */
+ return mxl_phy_tune_rf;
+}
+
+static int mxl1x1sf_tuner_set_if_output_freq(struct mxl111sf_tuner_state *state)
+{
+ int ret;
+ u8 ctrl;
+#if 0
+ u16 iffcw;
+ u32 if_freq;
+#endif
+ mxl_dbg("(IF polarity = %d, IF freq = 0x%02x)",
+ state->cfg->invert_spectrum, state->cfg->if_freq);
+
+ /* set IF polarity */
+ ctrl = state->cfg->invert_spectrum;
+
+ ctrl |= state->cfg->if_freq;
+
+ ret = mxl111sf_tuner_write_reg(state, V6_TUNER_IF_SEL_REG, ctrl);
+ if (mxl_fail(ret))
+ goto fail;
+
+#if 0
+ if_freq /= 1000000;
+
+ /* do round */
+ if_freq += 0.5;
+
+ if (MXL_IF_LO == state->cfg->if_freq) {
+ ctrl = 0x08;
+ iffcw = (u16)(if_freq / (108 * 4096));
+ } else if (MXL_IF_HI == state->cfg->if_freq) {
+ ctrl = 0x08;
+ iffcw = (u16)(if_freq / (216 * 4096));
+ } else {
+ ctrl = 0;
+ iffcw = 0;
+ }
+
+ ctrl |= (iffcw >> 8);
+#endif
+ ret = mxl111sf_tuner_read_reg(state, V6_TUNER_IF_FCW_BYP_REG, &ctrl);
+ if (mxl_fail(ret))
+ goto fail;
+
+ ctrl &= 0xf0;
+ ctrl |= 0x90;
+
+ ret = mxl111sf_tuner_write_reg(state, V6_TUNER_IF_FCW_BYP_REG, ctrl);
+ if (mxl_fail(ret))
+ goto fail;
+
+#if 0
+ ctrl = iffcw & 0x00ff;
+#endif
+ ret = mxl111sf_tuner_write_reg(state, V6_TUNER_IF_FCW_REG, ctrl);
+ mxl_fail(ret);
+fail:
+ return ret;
+}
+
+static int mxl1x1sf_tune_rf(struct dvb_frontend *fe, u32 freq, u8 bw)
+{
+ struct mxl111sf_tuner_state *state = fe->tuner_priv;
+ static struct mxl111sf_reg_ctrl_info *reg_ctrl_array;
+ int ret;
+ u8 mxl_mode;
+
+ mxl_dbg("(freq = %d, bw = 0x%x)", freq, bw);
+
+ /* stop tune */
+ ret = mxl111sf_tuner_write_reg(state, START_TUNE_REG, 0);
+ if (mxl_fail(ret))
+ goto fail;
+
+ /* check device mode */
+ ret = mxl111sf_tuner_read_reg(state, MXL_MODE_REG, &mxl_mode);
+ if (mxl_fail(ret))
+ goto fail;
+
+ /* Fill out registers for channel tune */
+ reg_ctrl_array = mxl111sf_calc_phy_tune_regs(freq, bw);
+ if (!reg_ctrl_array)
+ return -EINVAL;
+
+ ret = mxl111sf_tuner_program_regs(state, reg_ctrl_array);
+ if (mxl_fail(ret))
+ goto fail;
+
+ if ((mxl_mode & MXL_DEV_MODE_MASK) == MXL_TUNER_MODE) {
+ /* IF tuner mode only */
+ mxl1x1sf_tuner_top_master_ctrl(state, 0);
+ mxl1x1sf_tuner_top_master_ctrl(state, 1);
+ mxl1x1sf_tuner_set_if_output_freq(state);
+ }
+
+ ret = mxl111sf_tuner_write_reg(state, START_TUNE_REG, 1);
+ if (mxl_fail(ret))
+ goto fail;
+
+ if (state->cfg->ant_hunt)
+ state->cfg->ant_hunt(fe);
+fail:
+ return ret;
+}
+
+static int mxl1x1sf_tuner_get_lock_status(struct mxl111sf_tuner_state *state,
+ int *rf_synth_lock,
+ int *ref_synth_lock)
+{
+ int ret;
+ u8 data;
+
+ *rf_synth_lock = 0;
+ *ref_synth_lock = 0;
+
+ ret = mxl111sf_tuner_read_reg(state, V6_RF_LOCK_STATUS_REG, &data);
+ if (mxl_fail(ret))
+ goto fail;
+
+ *ref_synth_lock = ((data & 0x03) == 0x03) ? 1 : 0;
+ *rf_synth_lock = ((data & 0x0c) == 0x0c) ? 1 : 0;
+fail:
+ return ret;
+}
+
+#if 0
+static int mxl1x1sf_tuner_loop_thru_ctrl(struct mxl111sf_tuner_state *state,
+ int onoff)
+{
+ return mxl111sf_tuner_write_reg(state, V6_TUNER_LOOP_THRU_CTRL_REG,
+ onoff ? 1 : 0);
+}
+#endif
+
+/* ------------------------------------------------------------------------ */
+
+static int mxl111sf_tuner_set_params(struct dvb_frontend *fe,
+ struct dvb_frontend_parameters *params)
+{
+ struct mxl111sf_tuner_state *state = fe->tuner_priv;
+ int ret;
+ u8 bw;
+
+ mxl_dbg("()");
+
+ if (fe->ops.info.type == FE_ATSC) {
+ switch (params->u.vsb.modulation) {
+ case VSB_8:
+ case VSB_16:
+ bw = 0; /* ATSC */
+ break;
+ case QAM_64:
+ case QAM_256:
+ bw = 1; /* US CABLE */
+ break;
+ default:
+ err("%s: modulation not set!", __func__);
+ return -EINVAL;
+ }
+ } else if (fe->ops.info.type == FE_OFDM) {
+ switch (params->u.ofdm.bandwidth) {
+ case BANDWIDTH_6_MHZ:
+ bw = 6;
+ break;
+ case BANDWIDTH_7_MHZ:
+ bw = 7;
+ break;
+ case BANDWIDTH_8_MHZ:
+ bw = 8;
+ break;
+ default:
+ err("%s: bandwidth not set!", __func__);
+ return -EINVAL;
+ }
+ } else {
+ err("%s: modulation type not supported!", __func__);
+ return -EINVAL;
+ }
+ ret = mxl1x1sf_tune_rf(fe, params->frequency, bw);
+ if (mxl_fail(ret))
+ goto fail;
+
+ state->frequency = params->frequency;
+ state->bandwidth = (fe->ops.info.type == FE_OFDM) ?
+ params->u.ofdm.bandwidth : 0;
+fail:
+ return ret;
+}
+
+/* ------------------------------------------------------------------------ */
+
+#if 0
+static int mxl111sf_tuner_init(struct dvb_frontend *fe)
+{
+ struct mxl111sf_tuner_state *state = fe->tuner_priv;
+ int ret;
+
+ /* wake from standby handled by usb driver */
+
+ return ret;
+}
+
+static int mxl111sf_tuner_sleep(struct dvb_frontend *fe)
+{
+ struct mxl111sf_tuner_state *state = fe->tuner_priv;
+ int ret;
+
+ /* enter standby mode handled by usb driver */
+
+ return ret;
+}
+#endif
+
+/* ------------------------------------------------------------------------ */
+
+static int mxl111sf_tuner_get_status(struct dvb_frontend *fe, u32 *status)
+{
+ struct mxl111sf_tuner_state *state = fe->tuner_priv;
+ int rf_locked, ref_locked, ret;
+
+ *status = 0;
+
+ ret = mxl1x1sf_tuner_get_lock_status(state, &rf_locked, &ref_locked);
+ if (mxl_fail(ret))
+ goto fail;
+ mxl_info("%s%s", rf_locked ? "rf locked " : "",
+ ref_locked ? "ref locked" : "");
+
+ if ((rf_locked) || (ref_locked))
+ *status |= TUNER_STATUS_LOCKED;
+fail:
+ return ret;
+}
+
+static int mxl111sf_get_rf_strength(struct dvb_frontend *fe, u16 *strength)
+{
+ struct mxl111sf_tuner_state *state = fe->tuner_priv;
+ u8 val1, val2;
+ int ret;
+
+ *strength = 0;
+
+ ret = mxl111sf_tuner_write_reg(state, 0x00, 0x02);
+ if (mxl_fail(ret))
+ goto fail;
+ ret = mxl111sf_tuner_read_reg(state, V6_DIG_RF_PWR_LSB_REG, &val1);
+ if (mxl_fail(ret))
+ goto fail;
+ ret = mxl111sf_tuner_read_reg(state, V6_DIG_RF_PWR_MSB_REG, &val2);
+ if (mxl_fail(ret))
+ goto fail;
+
+ *strength = val1 | ((val2 & 0x07) << 8);
+fail:
+ ret = mxl111sf_tuner_write_reg(state, 0x00, 0x00);
+ mxl_fail(ret);
+
+ return ret;
+}
+
+/* ------------------------------------------------------------------------ */
+
+static int mxl111sf_tuner_get_frequency(struct dvb_frontend *fe, u32 *frequency)
+{
+ struct mxl111sf_tuner_state *state = fe->tuner_priv;
+ *frequency = state->frequency;
+ return 0;
+}
+
+static int mxl111sf_tuner_get_bandwidth(struct dvb_frontend *fe, u32 *bandwidth)
+{
+ struct mxl111sf_tuner_state *state = fe->tuner_priv;
+ *bandwidth = state->bandwidth;
+ return 0;
+}
+
+static int mxl111sf_tuner_release(struct dvb_frontend *fe)
+{
+ struct mxl111sf_tuner_state *state = fe->tuner_priv;
+ mxl_dbg("()");
+ kfree(state);
+ fe->tuner_priv = NULL;
+ return 0;
+}
+
+/* ------------------------------------------------------------------------- */
+
+static struct dvb_tuner_ops mxl111sf_tuner_tuner_ops = {
+ .info = {
+ .name = "MaxLinear MxL111SF",
+#if 0
+ .frequency_min = ,
+ .frequency_max = ,
+ .frequency_step = ,
+#endif
+ },
+#if 0
+ .init = mxl111sf_tuner_init,
+ .sleep = mxl111sf_tuner_sleep,
+#endif
+ .set_params = mxl111sf_tuner_set_params,
+ .get_status = mxl111sf_tuner_get_status,
+ .get_rf_strength = mxl111sf_get_rf_strength,
+ .get_frequency = mxl111sf_tuner_get_frequency,
+ .get_bandwidth = mxl111sf_tuner_get_bandwidth,
+ .release = mxl111sf_tuner_release,
+};
+
+struct dvb_frontend *mxl111sf_tuner_attach(struct dvb_frontend *fe,
+ struct mxl111sf_state *mxl_state,
+ struct mxl111sf_tuner_config *cfg)
+{
+ struct mxl111sf_tuner_state *state = NULL;
+
+ mxl_dbg("()");
+
+ state = kzalloc(sizeof(struct mxl111sf_tuner_state), GFP_KERNEL);
+ if (state == NULL)
+ return NULL;
+
+ state->mxl_state = mxl_state;
+ state->cfg = cfg;
+
+ memcpy(&fe->ops.tuner_ops, &mxl111sf_tuner_tuner_ops,
+ sizeof(struct dvb_tuner_ops));
+
+ fe->tuner_priv = state;
+ return fe;
+}
+EXPORT_SYMBOL_GPL(mxl111sf_tuner_attach);
+
+MODULE_DESCRIPTION("MaxLinear MxL111SF CMOS tuner driver");
+MODULE_AUTHOR("Michael Krufky <mkrufky@kernellabs.com>");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("0.1");
+
+/*
+ * Overrides for Emacs so that we follow Linus's tabbing style.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/drivers/media/dvb/dvb-usb/mxl111sf-tuner.h b/drivers/media/dvb/dvb-usb/mxl111sf-tuner.h
new file mode 100644
index 00000000000..ff333960b18
--- /dev/null
+++ b/drivers/media/dvb/dvb-usb/mxl111sf-tuner.h
@@ -0,0 +1,89 @@
+/*
+ * mxl111sf-tuner.h - driver for the MaxLinear MXL111SF CMOS tuner
+ *
+ * Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __MXL111SF_TUNER_H__
+#define __MXL111SF_TUNER_H__
+
+#include "dvb_frontend.h"
+
+#include "mxl111sf.h"
+
+enum mxl_if_freq {
+#if 0
+ MXL_IF_LO = 0x00, /* other IF < 9MHz */
+#endif
+ MXL_IF_4_0 = 0x01, /* 4.0 MHz */
+ MXL_IF_4_5 = 0x02, /* 4.5 MHz */
+ MXL_IF_4_57 = 0x03, /* 4.57 MHz */
+ MXL_IF_5_0 = 0x04, /* 5.0 MHz */
+ MXL_IF_5_38 = 0x05, /* 5.38 MHz */
+ MXL_IF_6_0 = 0x06, /* 6.0 MHz */
+ MXL_IF_6_28 = 0x07, /* 6.28 MHz */
+ MXL_IF_7_2 = 0x08, /* 7.2 MHz */
+ MXL_IF_35_25 = 0x09, /* 35.25 MHz */
+ MXL_IF_36 = 0x0a, /* 36 MHz */
+ MXL_IF_36_15 = 0x0b, /* 36.15 MHz */
+ MXL_IF_44 = 0x0c, /* 44 MHz */
+#if 0
+ MXL_IF_HI = 0x0f, /* other IF > 35 MHz and < 45 MHz */
+#endif
+};
+
+struct mxl111sf_tuner_config {
+ enum mxl_if_freq if_freq;
+ unsigned int invert_spectrum:1;
+
+ int (*read_reg)(struct mxl111sf_state *state, u8 addr, u8 *data);
+ int (*write_reg)(struct mxl111sf_state *state, u8 addr, u8 data);
+ int (*program_regs)(struct mxl111sf_state *state,
+ struct mxl111sf_reg_ctrl_info *ctrl_reg_info);
+ int (*top_master_ctrl)(struct mxl111sf_state *state, int onoff);
+ int (*ant_hunt)(struct dvb_frontend *fe);
+};
+
+/* ------------------------------------------------------------------------ */
+
+#if defined(CONFIG_DVB_USB_MXL111SF) || \
+ (defined(CONFIG_DVB_USB_MXL111SF_MODULE) && defined(MODULE))
+extern
+struct dvb_frontend *mxl111sf_tuner_attach(struct dvb_frontend *fe,
+ struct mxl111sf_state *mxl_state,
+ struct mxl111sf_tuner_config *cfg);
+#else
+static inline
+struct dvb_frontend *mxl111sf_tuner_attach(struct dvb_frontend *fe,
+ struct mxl111sf_state *mxl_state
+ struct mxl111sf_tuner_config *cfg)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return NULL;
+}
+#endif
+
+#endif /* __MXL111SF_TUNER_H__ */
+
+/*
+ * Overrides for Emacs so that we follow Linus's tabbing style.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-basic-offset: 8
+ * End:
+ */
+
diff --git a/drivers/media/dvb/dvb-usb/mxl111sf.c b/drivers/media/dvb/dvb-usb/mxl111sf.c
new file mode 100644
index 00000000000..b5c98da5d9e
--- /dev/null
+++ b/drivers/media/dvb/dvb-usb/mxl111sf.c
@@ -0,0 +1,1086 @@
+/*
+ * Copyright (C) 2010 Michael Krufky (mkrufky@kernellabs.com)
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation, version 2.
+ *
+ * see Documentation/dvb/README.dvb-usb for more information
+ */
+
+#include <linux/vmalloc.h>
+#include <linux/i2c.h>
+
+#include "mxl111sf.h"
+#include "mxl111sf-reg.h"
+#include "mxl111sf-phy.h"
+#include "mxl111sf-i2c.h"
+#include "mxl111sf-gpio.h"
+
+#include "mxl111sf-demod.h"
+#include "mxl111sf-tuner.h"
+
+#include "lgdt3305.h"
+
+int dvb_usb_mxl111sf_debug;
+module_param_named(debug, dvb_usb_mxl111sf_debug, int, 0644);
+MODULE_PARM_DESC(debug, "set debugging level "
+ "(1=info, 2=xfer, 4=i2c, 8=reg, 16=adv (or-able)).");
+
+int dvb_usb_mxl111sf_isoc;
+module_param_named(isoc, dvb_usb_mxl111sf_isoc, int, 0644);
+MODULE_PARM_DESC(isoc, "enable usb isoc xfer (0=bulk, 1=isoc).");
+
+#define ANT_PATH_AUTO 0
+#define ANT_PATH_EXTERNAL 1
+#define ANT_PATH_INTERNAL 2
+
+int dvb_usb_mxl111sf_rfswitch =
+#if 0
+ ANT_PATH_AUTO;
+#else
+ ANT_PATH_EXTERNAL;
+#endif
+
+module_param_named(rfswitch, dvb_usb_mxl111sf_rfswitch, int, 0644);
+MODULE_PARM_DESC(rfswitch, "force rf switch position (0=auto, 1=ext, 2=int).");
+
+DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
+
+#define deb_info(args...) dprintk(dvb_usb_mxl111sf_debug, 0x13, args)
+#define deb_reg(args...) dprintk(dvb_usb_mxl111sf_debug, 0x08, args)
+#define deb_adv(args...) dprintk(dvb_usb_mxl111sf_debug, MXL_ADV_DBG, args)
+
+int mxl111sf_ctrl_msg(struct dvb_usb_device *d,
+ u8 cmd, u8 *wbuf, int wlen, u8 *rbuf, int rlen)
+{
+ int wo = (rbuf == NULL || rlen == 0); /* write-only */
+ int ret;
+ u8 sndbuf[1+wlen];
+
+ deb_adv("%s(wlen = %d, rlen = %d)\n", __func__, wlen, rlen);
+
+ memset(sndbuf, 0, 1+wlen);
+
+ sndbuf[0] = cmd;
+ memcpy(&sndbuf[1], wbuf, wlen);
+
+ ret = (wo) ? dvb_usb_generic_write(d, sndbuf, 1+wlen) :
+ dvb_usb_generic_rw(d, sndbuf, 1+wlen, rbuf, rlen, 0);
+ mxl_fail(ret);
+
+ return ret;
+}
+
+/* ------------------------------------------------------------------------ */
+
+#define MXL_CMD_REG_READ 0xaa
+#define MXL_CMD_REG_WRITE 0x55
+
+int mxl111sf_read_reg(struct mxl111sf_state *state, u8 addr, u8 *data)
+{
+ u8 buf[2];
+ int ret;
+
+ ret = mxl111sf_ctrl_msg(state->d, MXL_CMD_REG_READ, &addr, 1, buf, 2);
+ if (mxl_fail(ret)) {
+ mxl_debug("error reading reg: 0x%02x", addr);
+ goto fail;
+ }
+
+ if (buf[0] == addr)
+ *data = buf[1];
+ else {
+ err("invalid response reading reg: 0x%02x != 0x%02x, 0x%02x",
+ addr, buf[0], buf[1]);
+ ret = -EINVAL;
+ }
+
+ deb_reg("R: (0x%02x, 0x%02x)\n", addr, *data);
+fail:
+ return ret;
+}
+
+int mxl111sf_write_reg(struct mxl111sf_state *state, u8 addr, u8 data)
+{
+ u8 buf[] = { addr, data };
+ int ret;
+
+ deb_reg("W: (0x%02x, 0x%02x)\n", addr, data);
+
+ ret = mxl111sf_ctrl_msg(state->d, MXL_CMD_REG_WRITE, buf, 2, NULL, 0);
+ if (mxl_fail(ret))
+ err("error writing reg: 0x%02x, val: 0x%02x", addr, data);
+ return ret;
+}
+
+/* ------------------------------------------------------------------------ */
+
+int mxl111sf_write_reg_mask(struct mxl111sf_state *state,
+ u8 addr, u8 mask, u8 data)
+{
+ int ret;
+ u8 val;
+
+ if (mask != 0xff) {
+ ret = mxl111sf_read_reg(state, addr, &val);
+#if 1
+ /* dont know why this usually errors out on the first try */
+ if (mxl_fail(ret))
+ err("error writing addr: 0x%02x, mask: 0x%02x, "
+ "data: 0x%02x, retrying...", addr, mask, data);
+
+ ret = mxl111sf_read_reg(state, addr, &val);
+#endif
+ if (mxl_fail(ret))
+ goto fail;
+ }
+ val &= ~mask;
+ val |= data;
+
+ ret = mxl111sf_write_reg(state, addr, val);
+ mxl_fail(ret);
+fail:
+ return ret;
+}
+
+/* ------------------------------------------------------------------------ */
+
+int mxl111sf_ctrl_program_regs(struct mxl111sf_state *state,
+ struct mxl111sf_reg_ctrl_info *ctrl_reg_info)
+{
+ int i, ret = 0;
+
+ for (i = 0; ctrl_reg_info[i].addr |
+ ctrl_reg_info[i].mask |
+ ctrl_reg_info[i].data; i++) {
+
+ ret = mxl111sf_write_reg_mask(state,
+ ctrl_reg_info[i].addr,
+ ctrl_reg_info[i].mask,
+ ctrl_reg_info[i].data);
+ if (mxl_fail(ret)) {
+ err("failed on reg #%d (0x%02x)", i,
+ ctrl_reg_info[i].addr);
+ break;
+ }
+ }
+ return ret;
+}
+
+/* ------------------------------------------------------------------------ */
+
+static int mxl1x1sf_get_chip_info(struct mxl111sf_state *state)
+{
+ int ret;
+ u8 id, ver;
+ char *mxl_chip, *mxl_rev;
+
+ if ((state->chip_id) && (state->chip_ver))
+ return 0;
+
+ ret = mxl111sf_read_reg(state, CHIP_ID_REG, &id);
+ if (mxl_fail(ret))
+ goto fail;
+ state->chip_id = id;
+
+ ret = mxl111sf_read_reg(state, TOP_CHIP_REV_ID_REG, &ver);
+ if (mxl_fail(ret))
+ goto fail;
+ state->chip_ver = ver;
+
+ switch (id) {
+ case 0x61:
+ mxl_chip = "MxL101SF";
+ break;
+ case 0x63:
+ mxl_chip = "MxL111SF";
+ break;
+ default:
+ mxl_chip = "UNKNOWN MxL1X1";
+ break;
+ }
+ switch (ver) {
+ case 0x36:
+ state->chip_rev = MXL111SF_V6;
+ mxl_rev = "v6";
+ break;
+ case 0x08:
+ state->chip_rev = MXL111SF_V8_100;
+ mxl_rev = "v8_100";
+ break;
+ case 0x18:
+ state->chip_rev = MXL111SF_V8_200;
+ mxl_rev = "v8_200";
+ break;
+ default:
+ state->chip_rev = 0;
+ mxl_rev = "UNKNOWN REVISION";
+ break;
+ }
+ info("%s detected, %s (0x%x)", mxl_chip, mxl_rev, ver);
+fail:
+ return ret;
+}
+
+#define get_chip_info(state) \
+({ \
+ int ___ret; \
+ ___ret = mxl1x1sf_get_chip_info(state); \
+ if (mxl_fail(___ret)) { \
+ mxl_debug("failed to get chip info" \
+ " on first probe attempt"); \
+ ___ret = mxl1x1sf_get_chip_info(state); \
+ if (mxl_fail(___ret)) \
+ err("failed to get chip info during probe"); \
+ else \
+ mxl_debug("probe needed a retry " \
+ "in order to succeed."); \
+ } \
+ ___ret; \
+})
+
+/* ------------------------------------------------------------------------ */
+
+static int mxl111sf_power_ctrl(struct dvb_usb_device *d, int onoff)
+{
+ /* power control depends on which adapter is being woken:
+ * save this for init, instead, via mxl111sf_adap_fe_init */
+ return 0;
+}
+
+static int mxl111sf_adap_fe_init(struct dvb_frontend *fe)
+{
+ struct dvb_usb_adapter *adap = fe->dvb->priv;
+ struct dvb_usb_device *d = adap->dev;
+ struct mxl111sf_state *state = d->priv;
+ struct mxl111sf_adap_state *adap_state = adap->fe_adap[fe->id].priv;
+
+ int err;
+
+ /* exit if we didnt initialize the driver yet */
+ if (!state->chip_id) {
+ mxl_debug("driver not yet initialized, exit.");
+ goto fail;
+ }
+
+ deb_info("%s()\n", __func__);
+
+ mutex_lock(&state->fe_lock);
+
+ state->alt_mode = adap_state->alt_mode;
+
+ if (usb_set_interface(adap->dev->udev, 0, state->alt_mode) < 0)
+ err("set interface failed");
+
+ err = mxl1x1sf_soft_reset(state);
+ mxl_fail(err);
+ err = mxl111sf_init_tuner_demod(state);
+ mxl_fail(err);
+ err = mxl1x1sf_set_device_mode(state, adap_state->device_mode);
+
+ mxl_fail(err);
+ mxl111sf_enable_usb_output(state);
+ mxl_fail(err);
+ mxl1x1sf_top_master_ctrl(state, 1);
+ mxl_fail(err);
+
+ if ((MXL111SF_GPIO_MOD_DVBT != adap_state->gpio_mode) &&
+ (state->chip_rev > MXL111SF_V6)) {
+ mxl111sf_config_pin_mux_modes(state,
+ PIN_MUX_TS_SPI_IN_MODE_1);
+ mxl_fail(err);
+ }
+ err = mxl111sf_init_port_expander(state);
+ if (!mxl_fail(err)) {
+ state->gpio_mode = adap_state->gpio_mode;
+ err = mxl111sf_gpio_mode_switch(state, state->gpio_mode);
+ mxl_fail(err);
+#if 0
+ err = fe->ops.init(fe);
+#endif
+ msleep(100); /* add short delay after enabling
+ * the demod before touching it */
+ }
+
+ return (adap_state->fe_init) ? adap_state->fe_init(fe) : 0;
+fail:
+ return -ENODEV;
+}
+
+static int mxl111sf_adap_fe_sleep(struct dvb_frontend *fe)
+{
+ struct dvb_usb_adapter *adap = fe->dvb->priv;
+ struct dvb_usb_device *d = adap->dev;
+ struct mxl111sf_state *state = d->priv;
+ struct mxl111sf_adap_state *adap_state = adap->fe_adap[fe->id].priv;
+ int err;
+
+ /* exit if we didnt initialize the driver yet */
+ if (!state->chip_id) {
+ mxl_debug("driver not yet initialized, exit.");
+ goto fail;
+ }
+
+ deb_info("%s()\n", __func__);
+
+ err = (adap_state->fe_sleep) ? adap_state->fe_sleep(fe) : 0;
+
+ mutex_unlock(&state->fe_lock);
+
+ return err;
+fail:
+ return -ENODEV;
+}
+
+
+static int mxl111sf_ep6_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff)
+{
+ struct dvb_usb_device *d = adap->dev;
+ struct mxl111sf_state *state = d->priv;
+ struct mxl111sf_adap_state *adap_state = adap->fe_adap[adap->active_fe].priv;
+ int ret = 0;
+ u8 tmp;
+
+ deb_info("%s(%d)\n", __func__, onoff);
+
+ if (onoff) {
+ ret = mxl111sf_enable_usb_output(state);
+ mxl_fail(ret);
+ ret = mxl111sf_config_mpeg_in(state, 1, 1,
+ adap_state->ep6_clockphase,
+ 0, 0);
+ mxl_fail(ret);
+ } else {
+ ret = mxl111sf_disable_656_port(state);
+ mxl_fail(ret);
+ }
+
+ mxl111sf_read_reg(state, 0x12, &tmp);
+ tmp &= ~0x04;
+ mxl111sf_write_reg(state, 0x12, tmp);
+
+ return ret;
+}
+
+static int mxl111sf_ep4_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff)
+{
+ struct dvb_usb_device *d = adap->dev;
+ struct mxl111sf_state *state = d->priv;
+ int ret = 0;
+
+ deb_info("%s(%d)\n", __func__, onoff);
+
+ if (onoff) {
+ ret = mxl111sf_enable_usb_output(state);
+ mxl_fail(ret);
+ }
+
+ return ret;
+}
+
+/* ------------------------------------------------------------------------ */
+
+static struct lgdt3305_config hauppauge_lgdt3305_config = {
+ .i2c_addr = 0xb2 >> 1,
+ .mpeg_mode = LGDT3305_MPEG_SERIAL,
+ .tpclk_edge = LGDT3305_TPCLK_RISING_EDGE,
+ .tpvalid_polarity = LGDT3305_TP_VALID_HIGH,
+ .deny_i2c_rptr = 1,
+ .spectral_inversion = 0,
+ .qam_if_khz = 6000,
+ .vsb_if_khz = 6000,
+};
+
+static int mxl111sf_lgdt3305_frontend_attach(struct dvb_usb_adapter *adap)
+{
+ struct dvb_usb_device *d = adap->dev;
+ struct mxl111sf_state *state = d->priv;
+ int fe_id = adap->num_frontends_initialized;
+ struct mxl111sf_adap_state *adap_state = adap->fe_adap[fe_id].priv;
+ int ret;
+
+ deb_adv("%s()\n", __func__);
+
+ /* save a pointer to the dvb_usb_device in device state */
+ state->d = d;
+ adap_state->alt_mode = (dvb_usb_mxl111sf_isoc) ? 2 : 1;
+ state->alt_mode = adap_state->alt_mode;
+
+ if (usb_set_interface(adap->dev->udev, 0, state->alt_mode) < 0)
+ err("set interface failed");
+
+ state->gpio_mode = MXL111SF_GPIO_MOD_ATSC;
+ adap_state->gpio_mode = state->gpio_mode;
+ adap_state->device_mode = MXL_TUNER_MODE;
+ adap_state->ep6_clockphase = 1;
+
+ ret = mxl1x1sf_soft_reset(state);
+ if (mxl_fail(ret))
+ goto fail;
+ ret = mxl111sf_init_tuner_demod(state);
+ if (mxl_fail(ret))
+ goto fail;
+
+ ret = mxl1x1sf_set_device_mode(state, adap_state->device_mode);
+ if (mxl_fail(ret))
+ goto fail;
+
+ ret = mxl111sf_enable_usb_output(state);
+ if (mxl_fail(ret))
+ goto fail;
+ ret = mxl1x1sf_top_master_ctrl(state, 1);
+ if (mxl_fail(ret))
+ goto fail;
+
+ ret = mxl111sf_init_port_expander(state);
+ if (mxl_fail(ret))
+ goto fail;
+ ret = mxl111sf_gpio_mode_switch(state, state->gpio_mode);
+ if (mxl_fail(ret))
+ goto fail;
+
+ adap->fe_adap[fe_id].fe = dvb_attach(lgdt3305_attach,
+ &hauppauge_lgdt3305_config,
+ &adap->dev->i2c_adap);
+ if (adap->fe_adap[fe_id].fe) {
+ adap_state->fe_init = adap->fe_adap[fe_id].fe->ops.init;
+ adap->fe_adap[fe_id].fe->ops.init = mxl111sf_adap_fe_init;
+ adap_state->fe_sleep = adap->fe_adap[fe_id].fe->ops.sleep;
+ adap->fe_adap[fe_id].fe->ops.sleep = mxl111sf_adap_fe_sleep;
+ return 0;
+ }
+ ret = -EIO;
+fail:
+ return ret;
+}
+
+static struct mxl111sf_demod_config mxl_demod_config = {
+ .read_reg = mxl111sf_read_reg,
+ .write_reg = mxl111sf_write_reg,
+ .program_regs = mxl111sf_ctrl_program_regs,
+};
+
+static int mxl111sf_attach_demod(struct dvb_usb_adapter *adap)
+{
+ struct dvb_usb_device *d = adap->dev;
+ struct mxl111sf_state *state = d->priv;
+ int fe_id = adap->num_frontends_initialized;
+ struct mxl111sf_adap_state *adap_state = adap->fe_adap[fe_id].priv;
+ int ret;
+
+ deb_adv("%s()\n", __func__);
+
+ /* save a pointer to the dvb_usb_device in device state */
+ state->d = d;
+ adap_state->alt_mode = (dvb_usb_mxl111sf_isoc) ? 1 : 2;
+ state->alt_mode = adap_state->alt_mode;
+
+ if (usb_set_interface(adap->dev->udev, 0, state->alt_mode) < 0)
+ err("set interface failed");
+
+ state->gpio_mode = MXL111SF_GPIO_MOD_DVBT;
+ adap_state->gpio_mode = state->gpio_mode;
+ adap_state->device_mode = MXL_SOC_MODE;
+ adap_state->ep6_clockphase = 1;
+
+ ret = mxl1x1sf_soft_reset(state);
+ if (mxl_fail(ret))
+ goto fail;
+ ret = mxl111sf_init_tuner_demod(state);
+ if (mxl_fail(ret))
+ goto fail;
+
+ ret = mxl1x1sf_set_device_mode(state, adap_state->device_mode);
+ if (mxl_fail(ret))
+ goto fail;
+
+ ret = mxl111sf_enable_usb_output(state);
+ if (mxl_fail(ret))
+ goto fail;
+ ret = mxl1x1sf_top_master_ctrl(state, 1);
+ if (mxl_fail(ret))
+ goto fail;
+
+ /* dont care if this fails */
+ mxl111sf_init_port_expander(state);
+
+ adap->fe_adap[fe_id].fe = dvb_attach(mxl111sf_demod_attach, state,
+ &mxl_demod_config);
+ if (adap->fe_adap[fe_id].fe) {
+ adap_state->fe_init = adap->fe_adap[fe_id].fe->ops.init;
+ adap->fe_adap[fe_id].fe->ops.init = mxl111sf_adap_fe_init;
+ adap_state->fe_sleep = adap->fe_adap[fe_id].fe->ops.sleep;
+ adap->fe_adap[fe_id].fe->ops.sleep = mxl111sf_adap_fe_sleep;
+ return 0;
+ }
+ ret = -EIO;
+fail:
+ return ret;
+}
+
+static inline int mxl111sf_set_ant_path(struct mxl111sf_state *state,
+ int antpath)
+{
+ return mxl111sf_idac_config(state, 1, 1,
+ (antpath == ANT_PATH_INTERNAL) ?
+ 0x3f : 0x00, 0);
+}
+
+#define DbgAntHunt(x, pwr0, pwr1, pwr2, pwr3) \
+ err("%s(%d) FINAL input set to %s rxPwr:%d|%d|%d|%d\n", \
+ __func__, __LINE__, \
+ (ANT_PATH_EXTERNAL == x) ? "EXTERNAL" : "INTERNAL", \
+ pwr0, pwr1, pwr2, pwr3)
+
+#define ANT_HUNT_SLEEP 90
+#define ANT_EXT_TWEAK 0
+
+static int mxl111sf_ant_hunt(struct dvb_frontend *fe)
+{
+ struct dvb_usb_adapter *adap = fe->dvb->priv;
+ struct dvb_usb_device *d = adap->dev;
+ struct mxl111sf_state *state = d->priv;
+
+ int antctrl = dvb_usb_mxl111sf_rfswitch;
+
+ u16 rxPwrA, rxPwr0, rxPwr1, rxPwr2;
+
+ /* FIXME: must force EXTERNAL for QAM - done elsewhere */
+ mxl111sf_set_ant_path(state, antctrl == ANT_PATH_AUTO ?
+ ANT_PATH_EXTERNAL : antctrl);
+
+ if (antctrl == ANT_PATH_AUTO) {
+#if 0
+ msleep(ANT_HUNT_SLEEP);
+#endif
+ fe->ops.tuner_ops.get_rf_strength(fe, &rxPwrA);
+
+ mxl111sf_set_ant_path(state, ANT_PATH_EXTERNAL);
+ msleep(ANT_HUNT_SLEEP);
+ fe->ops.tuner_ops.get_rf_strength(fe, &rxPwr0);
+
+ mxl111sf_set_ant_path(state, ANT_PATH_EXTERNAL);
+ msleep(ANT_HUNT_SLEEP);
+ fe->ops.tuner_ops.get_rf_strength(fe, &rxPwr1);
+
+ mxl111sf_set_ant_path(state, ANT_PATH_INTERNAL);
+ msleep(ANT_HUNT_SLEEP);
+ fe->ops.tuner_ops.get_rf_strength(fe, &rxPwr2);
+
+ if (rxPwr1+ANT_EXT_TWEAK >= rxPwr2) {
+ /* return with EXTERNAL enabled */
+ mxl111sf_set_ant_path(state, ANT_PATH_EXTERNAL);
+ DbgAntHunt(ANT_PATH_EXTERNAL, rxPwrA,
+ rxPwr0, rxPwr1, rxPwr2);
+ } else {
+ /* return with INTERNAL enabled */
+ DbgAntHunt(ANT_PATH_INTERNAL, rxPwrA,
+ rxPwr0, rxPwr1, rxPwr2);
+ }
+ }
+ return 0;
+}
+
+static struct mxl111sf_tuner_config mxl_tuner_config = {
+ .if_freq = MXL_IF_6_0, /* applies to external IF output, only */
+ .invert_spectrum = 0,
+ .read_reg = mxl111sf_read_reg,
+ .write_reg = mxl111sf_write_reg,
+ .program_regs = mxl111sf_ctrl_program_regs,
+ .top_master_ctrl = mxl1x1sf_top_master_ctrl,
+ .ant_hunt = mxl111sf_ant_hunt,
+};
+
+static int mxl111sf_attach_tuner(struct dvb_usb_adapter *adap)
+{
+ struct dvb_usb_device *d = adap->dev;
+ struct mxl111sf_state *state = d->priv;
+ int fe_id = adap->num_frontends_initialized;
+
+ deb_adv("%s()\n", __func__);
+
+ if (NULL != dvb_attach(mxl111sf_tuner_attach,
+ adap->fe_adap[fe_id].fe, state,
+ &mxl_tuner_config))
+ return 0;
+
+ return -EIO;
+}
+
+static int mxl111sf_fe_ioctl_override(struct dvb_frontend *fe,
+ unsigned int cmd, void *parg,
+ unsigned int stage)
+{
+ int err = 0;
+
+ switch (stage) {
+ case DVB_FE_IOCTL_PRE:
+
+ switch (cmd) {
+ case FE_READ_SIGNAL_STRENGTH:
+ err = fe->ops.tuner_ops.get_rf_strength(fe, parg);
+ /* If no error occurs, prevent dvb-core from handling
+ * this IOCTL, otherwise return the error */
+ if (0 == err)
+ err = 1;
+ break;
+ }
+ break;
+
+ case DVB_FE_IOCTL_POST:
+ /* no post-ioctl handling required */
+ break;
+ }
+ return err;
+};
+
+static u32 mxl111sf_i2c_func(struct i2c_adapter *adapter)
+{
+ return I2C_FUNC_I2C;
+}
+
+struct i2c_algorithm mxl111sf_i2c_algo = {
+ .master_xfer = mxl111sf_i2c_xfer,
+ .functionality = mxl111sf_i2c_func,
+#ifdef NEED_ALGO_CONTROL
+ .algo_control = dummy_algo_control,
+#endif
+};
+
+static struct dvb_usb_device_properties mxl111sf_dvbt_bulk_properties;
+static struct dvb_usb_device_properties mxl111sf_dvbt_isoc_properties;
+static struct dvb_usb_device_properties mxl111sf_atsc_bulk_properties;
+static struct dvb_usb_device_properties mxl111sf_atsc_isoc_properties;
+
+static int mxl111sf_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ struct dvb_usb_device *d = NULL;
+
+ deb_adv("%s()\n", __func__);
+
+ if (((dvb_usb_mxl111sf_isoc) &&
+ (0 == dvb_usb_device_init(intf,
+ &mxl111sf_dvbt_isoc_properties,
+ THIS_MODULE, &d, adapter_nr) ||
+ 0 == dvb_usb_device_init(intf,
+ &mxl111sf_atsc_isoc_properties,
+ THIS_MODULE, &d, adapter_nr))) ||
+ 0 == dvb_usb_device_init(intf,
+ &mxl111sf_dvbt_bulk_properties,
+ THIS_MODULE, &d, adapter_nr) ||
+ 0 == dvb_usb_device_init(intf,
+ &mxl111sf_atsc_bulk_properties,
+ THIS_MODULE, &d, adapter_nr) || 0) {
+
+ struct mxl111sf_state *state = d->priv;
+ static u8 eeprom[256];
+ struct i2c_client c;
+ int ret;
+
+ ret = get_chip_info(state);
+ if (mxl_fail(ret))
+ err("failed to get chip info during probe");
+
+ mutex_init(&state->fe_lock);
+
+ if (state->chip_rev > MXL111SF_V6)
+ mxl111sf_config_pin_mux_modes(state,
+ PIN_MUX_TS_SPI_IN_MODE_1);
+
+ c.adapter = &d->i2c_adap;
+ c.addr = 0xa0 >> 1;
+
+ ret = tveeprom_read(&c, eeprom, sizeof(eeprom));
+ if (mxl_fail(ret))
+ return 0;
+ tveeprom_hauppauge_analog(&c, &state->tv,
+ (0x84 == eeprom[0xa0]) ?
+ eeprom + 0xa0 : eeprom + 0x80);
+#if 0
+ switch (state->tv.model) {
+ case 117001:
+ case 126001:
+ case 138001:
+ break;
+ default:
+ printk(KERN_WARNING "%s: warning: "
+ "unknown hauppauge model #%d\n",
+ __func__, state->tv.model);
+ }
+#endif
+ return 0;
+ }
+ err("Your device is not yet supported by this driver. "
+ "See kernellabs.com for more info");
+ return -EINVAL;
+}
+
+static struct usb_device_id mxl111sf_table[] = {
+/* 0 */ { USB_DEVICE(USB_VID_HAUPPAUGE, 0xc600) }, /* ATSC+ IR */
+ { USB_DEVICE(USB_VID_HAUPPAUGE, 0xc601) }, /* ATSC */
+ { USB_DEVICE(USB_VID_HAUPPAUGE, 0xc602) }, /* + */
+ { USB_DEVICE(USB_VID_HAUPPAUGE, 0xc603) }, /* ATSC+ */
+ { USB_DEVICE(USB_VID_HAUPPAUGE, 0xc604) }, /* DVBT */
+/* 5 */ { USB_DEVICE(USB_VID_HAUPPAUGE, 0xc609) }, /* ATSC IR */
+ { USB_DEVICE(USB_VID_HAUPPAUGE, 0xc60a) }, /* + IR */
+ { USB_DEVICE(USB_VID_HAUPPAUGE, 0xc60b) }, /* ATSC+ IR */
+ { USB_DEVICE(USB_VID_HAUPPAUGE, 0xc60c) }, /* DVBT IR */
+ { USB_DEVICE(USB_VID_HAUPPAUGE, 0xc653) }, /* ATSC+ */
+/*10 */ { USB_DEVICE(USB_VID_HAUPPAUGE, 0xc65b) }, /* ATSC+ IR */
+ { USB_DEVICE(USB_VID_HAUPPAUGE, 0xb700) }, /* ATSC+ sw */
+ { USB_DEVICE(USB_VID_HAUPPAUGE, 0xb701) }, /* ATSC sw */
+ { USB_DEVICE(USB_VID_HAUPPAUGE, 0xb702) }, /* + sw */
+ { USB_DEVICE(USB_VID_HAUPPAUGE, 0xb703) }, /* ATSC+ sw */
+/*15 */ { USB_DEVICE(USB_VID_HAUPPAUGE, 0xb704) }, /* DVBT sw */
+ { USB_DEVICE(USB_VID_HAUPPAUGE, 0xb753) }, /* ATSC+ sw */
+ { USB_DEVICE(USB_VID_HAUPPAUGE, 0xb763) }, /* ATSC+ no */
+ { USB_DEVICE(USB_VID_HAUPPAUGE, 0xb764) }, /* DVBT no */
+ { USB_DEVICE(USB_VID_HAUPPAUGE, 0xd853) }, /* ATSC+ sw */
+/*20 */ { USB_DEVICE(USB_VID_HAUPPAUGE, 0xd854) }, /* DVBT sw */
+ { USB_DEVICE(USB_VID_HAUPPAUGE, 0xd863) }, /* ATSC+ no */
+ { USB_DEVICE(USB_VID_HAUPPAUGE, 0xd864) }, /* DVBT no */
+ { USB_DEVICE(USB_VID_HAUPPAUGE, 0xd8d3) }, /* ATSC+ sw */
+ { USB_DEVICE(USB_VID_HAUPPAUGE, 0xd8d4) }, /* DVBT sw */
+/*25 */ { USB_DEVICE(USB_VID_HAUPPAUGE, 0xd8e3) }, /* ATSC+ no */
+ { USB_DEVICE(USB_VID_HAUPPAUGE, 0xd8e4) }, /* DVBT no */
+ { USB_DEVICE(USB_VID_HAUPPAUGE, 0xd8ff) }, /* ATSC+ */
+ { USB_DEVICE(USB_VID_HAUPPAUGE, 0xc612) }, /* + */
+ { USB_DEVICE(USB_VID_HAUPPAUGE, 0xc613) }, /* ATSC+ */
+/*30 */ { USB_DEVICE(USB_VID_HAUPPAUGE, 0xc61a) }, /* + IR */
+ { USB_DEVICE(USB_VID_HAUPPAUGE, 0xc61b) }, /* ATSC+ IR */
+ { USB_DEVICE(USB_VID_HAUPPAUGE, 0xb757) }, /* ATSC+DVBT sw */
+ { USB_DEVICE(USB_VID_HAUPPAUGE, 0xb767) }, /* ATSC+DVBT no */
+ {} /* Terminating entry */
+};
+MODULE_DEVICE_TABLE(usb, mxl111sf_table);
+
+
+#define MXL111SF_EP4_BULK_STREAMING_CONFIG \
+ .streaming_ctrl = mxl111sf_ep4_streaming_ctrl, \
+ .stream = { \
+ .type = USB_BULK, \
+ .count = 5, \
+ .endpoint = 0x04, \
+ .u = { \
+ .bulk = { \
+ .buffersize = 8192, \
+ } \
+ } \
+ }
+
+/* FIXME: works for v6 but not v8 silicon */
+#define MXL111SF_EP4_ISOC_STREAMING_CONFIG \
+ .streaming_ctrl = mxl111sf_ep4_streaming_ctrl, \
+ .stream = { \
+ .type = USB_ISOC, \
+ .count = 5, \
+ .endpoint = 0x04, \
+ .u = { \
+ .isoc = { \
+ .framesperurb = 96, \
+ /* FIXME: v6 SILICON: */ \
+ .framesize = 564, \
+ .interval = 1, \
+ } \
+ } \
+ }
+
+#define MXL111SF_EP6_BULK_STREAMING_CONFIG \
+ .streaming_ctrl = mxl111sf_ep6_streaming_ctrl, \
+ .stream = { \
+ .type = USB_BULK, \
+ .count = 5, \
+ .endpoint = 0x06, \
+ .u = { \
+ .bulk = { \
+ .buffersize = 8192, \
+ } \
+ } \
+ }
+
+/* FIXME */
+#define MXL111SF_EP6_ISOC_STREAMING_CONFIG \
+ .streaming_ctrl = mxl111sf_ep6_streaming_ctrl, \
+ .stream = { \
+ .type = USB_ISOC, \
+ .count = 5, \
+ .endpoint = 0x06, \
+ .u = { \
+ .isoc = { \
+ .framesperurb = 24, \
+ .framesize = 3072, \
+ .interval = 1, \
+ } \
+ } \
+ }
+
+#define MXL111SF_DEFAULT_DEVICE_PROPERTIES \
+ .caps = DVB_USB_IS_AN_I2C_ADAPTER, \
+ .usb_ctrl = DEVICE_SPECIFIC, \
+ /* use usb alt setting 1 for EP4 ISOC transfer (dvb-t), \
+ EP6 BULK transfer (atsc/qam), \
+ use usb alt setting 2 for EP4 BULK transfer (dvb-t), \
+ EP6 ISOC transfer (atsc/qam), \
+ */ \
+ .power_ctrl = mxl111sf_power_ctrl, \
+ .i2c_algo = &mxl111sf_i2c_algo, \
+ .generic_bulk_ctrl_endpoint = MXL_EP2_REG_WRITE, \
+ .generic_bulk_ctrl_endpoint_response = MXL_EP1_REG_READ, \
+ .size_of_priv = sizeof(struct mxl111sf_state)
+
+static struct dvb_usb_device_properties mxl111sf_dvbt_bulk_properties = {
+ MXL111SF_DEFAULT_DEVICE_PROPERTIES,
+
+ .num_adapters = 1,
+ .adapter = {
+ {
+ .fe_ioctl_override = mxl111sf_fe_ioctl_override,
+ .num_frontends = 1,
+ .fe = {{
+ .size_of_priv = sizeof(struct mxl111sf_adap_state),
+
+ .frontend_attach = mxl111sf_attach_demod,
+ .tuner_attach = mxl111sf_attach_tuner,
+
+ MXL111SF_EP4_BULK_STREAMING_CONFIG,
+ } },
+ },
+ },
+ .num_device_descs = 4,
+ .devices = {
+ { "Hauppauge 126xxx DVBT (bulk)",
+ { NULL },
+ { &mxl111sf_table[4], &mxl111sf_table[8],
+ NULL },
+ },
+ { "Hauppauge 117xxx DVBT (bulk)",
+ { NULL },
+ { &mxl111sf_table[15], &mxl111sf_table[18],
+ NULL },
+ },
+ { "Hauppauge 138xxx DVBT (bulk)",
+ { NULL },
+ { &mxl111sf_table[20], &mxl111sf_table[22],
+ &mxl111sf_table[24], &mxl111sf_table[26],
+ NULL },
+ },
+ { "Hauppauge 126xxx (tp-bulk)",
+ { NULL },
+ { &mxl111sf_table[28], &mxl111sf_table[30],
+ NULL },
+ },
+ }
+};
+
+static struct dvb_usb_device_properties mxl111sf_dvbt_isoc_properties = {
+ MXL111SF_DEFAULT_DEVICE_PROPERTIES,
+
+ .num_adapters = 1,
+ .adapter = {
+ {
+ .fe_ioctl_override = mxl111sf_fe_ioctl_override,
+ .num_frontends = 1,
+ .fe = {{
+ .size_of_priv = sizeof(struct mxl111sf_adap_state),
+
+ .frontend_attach = mxl111sf_attach_demod,
+ .tuner_attach = mxl111sf_attach_tuner,
+
+ MXL111SF_EP4_ISOC_STREAMING_CONFIG,
+ } },
+ },
+ },
+ .num_device_descs = 4,
+ .devices = {
+ { "Hauppauge 126xxx DVBT (isoc)",
+ { NULL },
+ { &mxl111sf_table[4], &mxl111sf_table[8],
+ NULL },
+ },
+ { "Hauppauge 117xxx DVBT (isoc)",
+ { NULL },
+ { &mxl111sf_table[15], &mxl111sf_table[18],
+ NULL },
+ },
+ { "Hauppauge 138xxx DVBT (isoc)",
+ { NULL },
+ { &mxl111sf_table[20], &mxl111sf_table[22],
+ &mxl111sf_table[24], &mxl111sf_table[26],
+ NULL },
+ },
+ { "Hauppauge 126xxx (tp-isoc)",
+ { NULL },
+ { &mxl111sf_table[28], &mxl111sf_table[30],
+ NULL },
+ },
+ }
+};
+
+static struct dvb_usb_device_properties mxl111sf_atsc_bulk_properties = {
+ MXL111SF_DEFAULT_DEVICE_PROPERTIES,
+
+ .num_adapters = 1,
+ .adapter = {
+ {
+ .fe_ioctl_override = mxl111sf_fe_ioctl_override,
+ .num_frontends = 2,
+ .fe = {{
+ .size_of_priv = sizeof(struct mxl111sf_adap_state),
+
+ .frontend_attach = mxl111sf_lgdt3305_frontend_attach,
+ .tuner_attach = mxl111sf_attach_tuner,
+
+ MXL111SF_EP6_BULK_STREAMING_CONFIG,
+ },
+ {
+ .size_of_priv = sizeof(struct mxl111sf_adap_state),
+
+ .frontend_attach = mxl111sf_attach_demod,
+ .tuner_attach = mxl111sf_attach_tuner,
+
+ MXL111SF_EP4_BULK_STREAMING_CONFIG,
+ }},
+ },
+ },
+ .num_device_descs = 6,
+ .devices = {
+ { "Hauppauge 126xxx ATSC (bulk)",
+ { NULL },
+ { &mxl111sf_table[1], &mxl111sf_table[5],
+ NULL },
+ },
+ { "Hauppauge 117xxx ATSC (bulk)",
+ { NULL },
+ { &mxl111sf_table[12],
+ NULL },
+ },
+ { "Hauppauge 126xxx ATSC+ (bulk)",
+ { NULL },
+ { &mxl111sf_table[0], &mxl111sf_table[3],
+ &mxl111sf_table[7], &mxl111sf_table[9],
+ &mxl111sf_table[10], NULL },
+ },
+ { "Hauppauge 117xxx ATSC+ (bulk)",
+ { NULL },
+ { &mxl111sf_table[11], &mxl111sf_table[14],
+ &mxl111sf_table[16], &mxl111sf_table[17],
+ &mxl111sf_table[32], &mxl111sf_table[33],
+ NULL },
+ },
+ { "Hauppauge Mercury (tp-bulk)",
+ { NULL },
+ { &mxl111sf_table[19], &mxl111sf_table[21],
+ &mxl111sf_table[23], &mxl111sf_table[25],
+ &mxl111sf_table[27], NULL },
+ },
+ { "Hauppauge WinTV-Aero-M",
+ { NULL },
+ { &mxl111sf_table[29], &mxl111sf_table[31],
+ NULL },
+ },
+ }
+};
+
+static struct dvb_usb_device_properties mxl111sf_atsc_isoc_properties = {
+ MXL111SF_DEFAULT_DEVICE_PROPERTIES,
+
+ .num_adapters = 1,
+ .adapter = {
+ {
+ .fe_ioctl_override = mxl111sf_fe_ioctl_override,
+ .num_frontends = 2,
+ .fe = {{
+ .size_of_priv = sizeof(struct mxl111sf_adap_state),
+
+ .frontend_attach = mxl111sf_lgdt3305_frontend_attach,
+ .tuner_attach = mxl111sf_attach_tuner,
+
+ MXL111SF_EP6_ISOC_STREAMING_CONFIG,
+ },
+ {
+ .size_of_priv = sizeof(struct mxl111sf_adap_state),
+
+ .frontend_attach = mxl111sf_attach_demod,
+ .tuner_attach = mxl111sf_attach_tuner,
+
+ MXL111SF_EP4_ISOC_STREAMING_CONFIG,
+ }},
+ },
+ },
+ .num_device_descs = 6,
+ .devices = {
+ { "Hauppauge 126xxx ATSC (isoc)",
+ { NULL },
+ { &mxl111sf_table[1], &mxl111sf_table[5],
+ NULL },
+ },
+ { "Hauppauge 117xxx ATSC (isoc)",
+ { NULL },
+ { &mxl111sf_table[12],
+ NULL },
+ },
+ { "Hauppauge 126xxx ATSC+ (isoc)",
+ { NULL },
+ { &mxl111sf_table[0], &mxl111sf_table[3],
+ &mxl111sf_table[7], &mxl111sf_table[9],
+ &mxl111sf_table[10], NULL },
+ },
+ { "Hauppauge 117xxx ATSC+ (isoc)",
+ { NULL },
+ { &mxl111sf_table[11], &mxl111sf_table[14],
+ &mxl111sf_table[16], &mxl111sf_table[17],
+ &mxl111sf_table[32], &mxl111sf_table[33],
+ NULL },
+ },
+ { "Hauppauge Mercury (tp-isoc)",
+ { NULL },
+ { &mxl111sf_table[19], &mxl111sf_table[21],
+ &mxl111sf_table[23], &mxl111sf_table[25],
+ &mxl111sf_table[27], NULL },
+ },
+ { "Hauppauge WinTV-Aero-M (tp-isoc)",
+ { NULL },
+ { &mxl111sf_table[29], &mxl111sf_table[31],
+ NULL },
+ },
+ }
+};
+
+static struct usb_driver mxl111sf_driver = {
+ .name = "dvb_usb_mxl111sf",
+ .probe = mxl111sf_probe,
+ .disconnect = dvb_usb_device_exit,
+ .id_table = mxl111sf_table,
+};
+
+static int __init mxl111sf_module_init(void)
+{
+ int result = usb_register(&mxl111sf_driver);
+ if (result) {
+ err("usb_register failed. Error number %d", result);
+ return result;
+ }
+
+ return 0;
+}
+
+static void __exit mxl111sf_module_exit(void)
+{
+ usb_deregister(&mxl111sf_driver);
+}
+
+module_init(mxl111sf_module_init);
+module_exit(mxl111sf_module_exit);
+
+MODULE_AUTHOR("Michael Krufky <mkrufky@kernellabs.com>");
+MODULE_DESCRIPTION("Driver for MaxLinear MxL111SF");
+MODULE_VERSION("1.0");
+MODULE_LICENSE("GPL");
+
+/*
+ * Local variables:
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/drivers/media/dvb/dvb-usb/mxl111sf.h b/drivers/media/dvb/dvb-usb/mxl111sf.h
new file mode 100644
index 00000000000..364d89f826b
--- /dev/null
+++ b/drivers/media/dvb/dvb-usb/mxl111sf.h
@@ -0,0 +1,158 @@
+/*
+ * Copyright (C) 2010 Michael Krufky (mkrufky@kernellabs.com)
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation, version 2.
+ *
+ * see Documentation/dvb/README.dvb-usb for more information
+ */
+
+#ifndef _DVB_USB_MXL111SF_H_
+#define _DVB_USB_MXL111SF_H_
+
+#ifdef DVB_USB_LOG_PREFIX
+#undef DVB_USB_LOG_PREFIX
+#endif
+#define DVB_USB_LOG_PREFIX "mxl111sf"
+#include "dvb-usb.h"
+#include <media/tveeprom.h>
+
+#define MXL_EP1_REG_READ 1
+#define MXL_EP2_REG_WRITE 2
+#define MXL_EP3_INTERRUPT 3
+#define MXL_EP4_MPEG2 4
+#define MXL_EP5_I2S 5
+#define MXL_EP6_656 6
+#define MXL_EP6_MPEG2 6
+
+#ifdef USING_ENUM_mxl111sf_current_mode
+enum mxl111sf_current_mode {
+ mxl_mode_dvbt = MXL_EP4_MPEG2,
+ mxl_mode_mh = MXL_EP5_I2S,
+ mxl_mode_atsc = MXL_EP6_MPEG2,
+};
+#endif
+
+enum mxl111sf_gpio_port_expander {
+ mxl111sf_gpio_hw,
+ mxl111sf_PCA9534,
+};
+
+struct mxl111sf_state {
+ struct dvb_usb_device *d;
+
+ enum mxl111sf_gpio_port_expander gpio_port_expander;
+ u8 port_expander_addr;
+
+ u8 chip_id;
+ u8 chip_ver;
+#define MXL111SF_V6 1
+#define MXL111SF_V8_100 2
+#define MXL111SF_V8_200 3
+ u8 chip_rev;
+
+#ifdef USING_ENUM_mxl111sf_current_mode
+ enum mxl111sf_current_mode current_mode;
+#endif
+
+#define MXL_TUNER_MODE 0
+#define MXL_SOC_MODE 1
+#define MXL_DEV_MODE_MASK 0x01
+#if 1
+ int device_mode;
+#endif
+ /* use usb alt setting 1 for EP4 ISOC transfer (dvb-t),
+ EP5 BULK transfer (atsc-mh),
+ EP6 BULK transfer (atsc/qam),
+ use usb alt setting 2 for EP4 BULK transfer (dvb-t),
+ EP5 ISOC transfer (atsc-mh),
+ EP6 ISOC transfer (atsc/qam),
+ */
+ int alt_mode;
+ int gpio_mode;
+ struct tveeprom tv;
+
+ struct mutex fe_lock;
+};
+
+struct mxl111sf_adap_state {
+ int alt_mode;
+ int gpio_mode;
+ int device_mode;
+ int ep6_clockphase;
+ int (*fe_init)(struct dvb_frontend *);
+ int (*fe_sleep)(struct dvb_frontend *);
+};
+
+int mxl111sf_read_reg(struct mxl111sf_state *state, u8 addr, u8 *data);
+int mxl111sf_write_reg(struct mxl111sf_state *state, u8 addr, u8 data);
+
+struct mxl111sf_reg_ctrl_info {
+ u8 addr;
+ u8 mask;
+ u8 data;
+};
+
+int mxl111sf_write_reg_mask(struct mxl111sf_state *state,
+ u8 addr, u8 mask, u8 data);
+int mxl111sf_ctrl_program_regs(struct mxl111sf_state *state,
+ struct mxl111sf_reg_ctrl_info *ctrl_reg_info);
+
+/* needed for hardware i2c functions in mxl111sf-i2c.c:
+ * mxl111sf_i2c_send_data / mxl111sf_i2c_get_data */
+int mxl111sf_ctrl_msg(struct dvb_usb_device *d,
+ u8 cmd, u8 *wbuf, int wlen, u8 *rbuf, int rlen);
+
+#define mxl_printk(kern, fmt, arg...) \
+ printk(kern "%s: " fmt "\n", __func__, ##arg)
+
+#define mxl_info(fmt, arg...) \
+ mxl_printk(KERN_INFO, fmt, ##arg)
+
+extern int dvb_usb_mxl111sf_debug;
+#define mxl_debug(fmt, arg...) \
+ if (dvb_usb_mxl111sf_debug) \
+ mxl_printk(KERN_DEBUG, fmt, ##arg)
+
+#define MXL_I2C_DBG 0x04
+#define MXL_ADV_DBG 0x10
+#define mxl_debug_adv(fmt, arg...) \
+ if (dvb_usb_mxl111sf_debug & MXL_ADV_DBG) \
+ mxl_printk(KERN_DEBUG, fmt, ##arg)
+
+#define mxl_i2c(fmt, arg...) \
+ if (dvb_usb_mxl111sf_debug & MXL_I2C_DBG) \
+ mxl_printk(KERN_DEBUG, fmt, ##arg)
+
+#define mxl_i2c_adv(fmt, arg...) \
+ if ((dvb_usb_mxl111sf_debug & (MXL_I2C_DBG | MXL_ADV_DBG)) == \
+ (MXL_I2C_DBG | MXL_ADV_DBG)) \
+ mxl_printk(KERN_DEBUG, fmt, ##arg)
+
+/* The following allows the mxl_fail() macro defined below to work
+ * in externel modules, such as mxl111sf-tuner.ko, even though
+ * dvb_usb_mxl111sf_debug is not defined within those modules */
+#if (defined(__MXL111SF_TUNER_H__)) || (defined(__MXL111SF_DEMOD_H__))
+#define MXL_ADV_DEBUG_ENABLED MXL_ADV_DBG
+#else
+#define MXL_ADV_DEBUG_ENABLED dvb_usb_mxl111sf_debug
+#endif
+
+#define mxl_fail(ret) \
+({ \
+ int __ret; \
+ __ret = (ret < 0); \
+ if ((__ret) && (MXL_ADV_DEBUG_ENABLED & MXL_ADV_DBG)) \
+ mxl_printk(KERN_ERR, "error %d on line %d", \
+ ret, __LINE__); \
+ __ret; \
+})
+
+#endif /* _DVB_USB_MXL111SF_H_ */
+
+/*
+ * Local variables:
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/drivers/media/dvb/dvb-usb/nova-t-usb2.c b/drivers/media/dvb/dvb-usb/nova-t-usb2.c
index bc350e982b7..21384da6570 100644
--- a/drivers/media/dvb/dvb-usb/nova-t-usb2.c
+++ b/drivers/media/dvb/dvb-usb/nova-t-usb2.c
@@ -166,6 +166,8 @@ static struct dvb_usb_device_properties nova_t_properties = {
.num_adapters = 1,
.adapter = {
{
+ .num_frontends = 1,
+ .fe = {{
.caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
.pid_filter_count = 32,
@@ -186,7 +188,7 @@ static struct dvb_usb_device_properties nova_t_properties = {
}
}
},
-
+ }},
.size_of_priv = sizeof(struct dibusb_state),
}
},
diff --git a/drivers/media/dvb/dvb-usb/opera1.c b/drivers/media/dvb/dvb-usb/opera1.c
index 2e4fab7215f..98fd9a6092b 100644
--- a/drivers/media/dvb/dvb-usb/opera1.c
+++ b/drivers/media/dvb/dvb-usb/opera1.c
@@ -263,10 +263,10 @@ static struct stv0299_config opera1_stv0299_config = {
static int opera1_frontend_attach(struct dvb_usb_adapter *d)
{
- if ((d->fe =
- dvb_attach(stv0299_attach, &opera1_stv0299_config,
- &d->dev->i2c_adap)) != NULL) {
- d->fe->ops.set_voltage = opera1_set_voltage;
+ d->fe_adap[0].fe = dvb_attach(stv0299_attach, &opera1_stv0299_config,
+ &d->dev->i2c_adap);
+ if ((d->fe_adap[0].fe) != NULL) {
+ d->fe_adap[0].fe->ops.set_voltage = opera1_set_voltage;
return 0;
}
info("not attached stv0299");
@@ -276,7 +276,7 @@ static int opera1_frontend_attach(struct dvb_usb_adapter *d)
static int opera1_tuner_attach(struct dvb_usb_adapter *adap)
{
dvb_attach(
- dvb_pll_attach, adap->fe, 0xc0>>1,
+ dvb_pll_attach, adap->fe_adap[0].fe, 0xc0>>1,
&adap->dev->i2c_adap, DVB_PLL_OPERA1
);
return 0;
@@ -516,6 +516,8 @@ static struct dvb_usb_device_properties opera1_properties = {
.num_adapters = 1,
.adapter = {
{
+ .num_frontends = 1,
+ .fe = {{
.frontend_attach = opera1_frontend_attach,
.streaming_ctrl = opera1_streaming_ctrl,
.tuner_attach = opera1_tuner_attach,
@@ -535,6 +537,7 @@ static struct dvb_usb_device_properties opera1_properties = {
}
}
},
+ }},
}
},
.num_device_descs = 1,
diff --git a/drivers/media/dvb/dvb-usb/pctv452e.c b/drivers/media/dvb/dvb-usb/pctv452e.c
new file mode 100644
index 00000000000..f9aec5cb6e7
--- /dev/null
+++ b/drivers/media/dvb/dvb-usb/pctv452e.c
@@ -0,0 +1,1079 @@
+/*
+ * PCTV 452e DVB driver
+ *
+ * Copyright (c) 2006-2008 Dominik Kuhlen <dkuhlen@gmx.net>
+ *
+ * TT connect S2-3650-CI Common Interface support, MAC readout
+ * Copyright (C) 2008 Michael H. Schimek <mschimek@gmx.at>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+
+/* dvb usb framework */
+#define DVB_USB_LOG_PREFIX "pctv452e"
+#include "dvb-usb.h"
+
+/* Demodulator */
+#include "stb0899_drv.h"
+#include "stb0899_reg.h"
+#include "stb0899_cfg.h"
+/* Tuner */
+#include "stb6100.h"
+#include "stb6100_cfg.h"
+/* FE Power */
+#include "lnbp22.h"
+
+#include "dvb_ca_en50221.h"
+#include "ttpci-eeprom.h"
+
+static int debug;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "Turn on/off debugging (default:off).");
+
+DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
+
+#define ISOC_INTERFACE_ALTERNATIVE 3
+
+#define SYNC_BYTE_OUT 0xaa
+#define SYNC_BYTE_IN 0x55
+
+/* guessed: (copied from ttusb-budget) */
+#define PCTV_CMD_RESET 0x15
+/* command to poll IR receiver */
+#define PCTV_CMD_IR 0x1b
+/* command to send I2C */
+#define PCTV_CMD_I2C 0x31
+
+#define I2C_ADDR_STB0899 (0xd0 >> 1)
+#define I2C_ADDR_STB6100 (0xc0 >> 1)
+#define I2C_ADDR_LNBP22 (0x10 >> 1)
+#define I2C_ADDR_24C16 (0xa0 >> 1)
+#define I2C_ADDR_24C64 (0xa2 >> 1)
+
+
+/* pctv452e sends us this amount of data for each issued usb-command */
+#define PCTV_ANSWER_LEN 64
+/* Wait up to 1000ms for device */
+#define PCTV_TIMEOUT 1000
+
+
+#define PCTV_LED_GPIO STB0899_GPIO01
+#define PCTV_LED_GREEN 0x82
+#define PCTV_LED_ORANGE 0x02
+
+#define ci_dbg(format, arg...) \
+do { \
+ if (0) \
+ printk(KERN_DEBUG DVB_USB_LOG_PREFIX \
+ ": " format "\n" , ## arg); \
+} while (0)
+
+enum {
+ TT3650_CMD_CI_TEST = 0x40,
+ TT3650_CMD_CI_RD_CTRL,
+ TT3650_CMD_CI_WR_CTRL,
+ TT3650_CMD_CI_RD_ATTR,
+ TT3650_CMD_CI_WR_ATTR,
+ TT3650_CMD_CI_RESET,
+ TT3650_CMD_CI_SET_VIDEO_PORT
+};
+
+
+static struct stb0899_postproc pctv45e_postproc[] = {
+ { PCTV_LED_GPIO, STB0899_GPIOPULLUP },
+ { 0, 0 }
+};
+
+/*
+ * stores all private variables for communication with the PCTV452e DVB-S2
+ */
+struct pctv452e_state {
+ struct dvb_ca_en50221 ca;
+ struct mutex ca_mutex;
+
+ u8 c; /* transaction counter, wraps around... */
+ u8 initialized; /* set to 1 if 0x15 has been sent */
+ u16 last_rc_key;
+};
+
+static int tt3650_ci_msg(struct dvb_usb_device *d, u8 cmd, u8 *data,
+ unsigned int write_len, unsigned int read_len)
+{
+ struct pctv452e_state *state = (struct pctv452e_state *)d->priv;
+ u8 buf[64];
+ u8 id;
+ unsigned int rlen;
+ int ret;
+
+ BUG_ON(NULL == data && 0 != (write_len | read_len));
+ BUG_ON(write_len > 64 - 4);
+ BUG_ON(read_len > 64 - 4);
+
+ id = state->c++;
+
+ buf[0] = SYNC_BYTE_OUT;
+ buf[1] = id;
+ buf[2] = cmd;
+ buf[3] = write_len;
+
+ memcpy(buf + 4, data, write_len);
+
+ rlen = (read_len > 0) ? 64 : 0;
+ ret = dvb_usb_generic_rw(d, buf, 4 + write_len,
+ buf, rlen, /* delay_ms */ 0);
+ if (0 != ret)
+ goto failed;
+
+ ret = -EIO;
+ if (SYNC_BYTE_IN != buf[0] || id != buf[1])
+ goto failed;
+
+ memcpy(data, buf + 4, read_len);
+
+ return 0;
+
+failed:
+ err("CI error %d; %02X %02X %02X -> %02X %02X %02X.",
+ ret, SYNC_BYTE_OUT, id, cmd, buf[0], buf[1], buf[2]);
+
+ return ret;
+}
+
+static int tt3650_ci_msg_locked(struct dvb_ca_en50221 *ca,
+ u8 cmd, u8 *data, unsigned int write_len,
+ unsigned int read_len)
+{
+ struct dvb_usb_device *d = (struct dvb_usb_device *)ca->data;
+ struct pctv452e_state *state = (struct pctv452e_state *)d->priv;
+ int ret;
+
+ mutex_lock(&state->ca_mutex);
+ ret = tt3650_ci_msg(d, cmd, data, write_len, read_len);
+ mutex_unlock(&state->ca_mutex);
+
+ return ret;
+}
+
+static int tt3650_ci_read_attribute_mem(struct dvb_ca_en50221 *ca,
+ int slot, int address)
+{
+ u8 buf[3];
+ int ret;
+
+ if (0 != slot)
+ return -EINVAL;
+
+ buf[0] = (address >> 8) & 0x0F;
+ buf[1] = address;
+
+ ret = tt3650_ci_msg_locked(ca, TT3650_CMD_CI_RD_ATTR, buf, 2, 3);
+
+ ci_dbg("%s %04x -> %d 0x%02x",
+ __func__, address, ret, buf[2]);
+
+ if (ret < 0)
+ return ret;
+
+ return buf[2];
+}
+
+static int tt3650_ci_write_attribute_mem(struct dvb_ca_en50221 *ca,
+ int slot, int address, u8 value)
+{
+ u8 buf[3];
+
+ ci_dbg("%s %d 0x%04x 0x%02x",
+ __func__, slot, address, value);
+
+ if (0 != slot)
+ return -EINVAL;
+
+ buf[0] = (address >> 8) & 0x0F;
+ buf[1] = address;
+ buf[2] = value;
+
+ return tt3650_ci_msg_locked(ca, TT3650_CMD_CI_WR_ATTR, buf, 3, 3);
+}
+
+static int tt3650_ci_read_cam_control(struct dvb_ca_en50221 *ca,
+ int slot,
+ u8 address)
+{
+ u8 buf[2];
+ int ret;
+
+ if (0 != slot)
+ return -EINVAL;
+
+ buf[0] = address & 3;
+
+ ret = tt3650_ci_msg_locked(ca, TT3650_CMD_CI_RD_CTRL, buf, 1, 2);
+
+ ci_dbg("%s 0x%02x -> %d 0x%02x",
+ __func__, address, ret, buf[1]);
+
+ if (ret < 0)
+ return ret;
+
+ return buf[1];
+}
+
+static int tt3650_ci_write_cam_control(struct dvb_ca_en50221 *ca,
+ int slot,
+ u8 address,
+ u8 value)
+{
+ u8 buf[2];
+
+ ci_dbg("%s %d 0x%02x 0x%02x",
+ __func__, slot, address, value);
+
+ if (0 != slot)
+ return -EINVAL;
+
+ buf[0] = address;
+ buf[1] = value;
+
+ return tt3650_ci_msg_locked(ca, TT3650_CMD_CI_WR_CTRL, buf, 2, 2);
+}
+
+static int tt3650_ci_set_video_port(struct dvb_ca_en50221 *ca,
+ int slot,
+ int enable)
+{
+ u8 buf[1];
+ int ret;
+
+ ci_dbg("%s %d %d", __func__, slot, enable);
+
+ if (0 != slot)
+ return -EINVAL;
+
+ enable = !!enable;
+ buf[0] = enable;
+
+ ret = tt3650_ci_msg_locked(ca, TT3650_CMD_CI_SET_VIDEO_PORT, buf, 1, 1);
+ if (ret < 0)
+ return ret;
+
+ if (enable != buf[0]) {
+ err("CI not %sabled.", enable ? "en" : "dis");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int tt3650_ci_slot_shutdown(struct dvb_ca_en50221 *ca, int slot)
+{
+ return tt3650_ci_set_video_port(ca, slot, /* enable */ 0);
+}
+
+static int tt3650_ci_slot_ts_enable(struct dvb_ca_en50221 *ca, int slot)
+{
+ return tt3650_ci_set_video_port(ca, slot, /* enable */ 1);
+}
+
+static int tt3650_ci_slot_reset(struct dvb_ca_en50221 *ca, int slot)
+{
+ struct dvb_usb_device *d = (struct dvb_usb_device *)ca->data;
+ struct pctv452e_state *state = (struct pctv452e_state *)d->priv;
+ u8 buf[1];
+ int ret;
+
+ ci_dbg("%s %d", __func__, slot);
+
+ if (0 != slot)
+ return -EINVAL;
+
+ buf[0] = 0;
+
+ mutex_lock(&state->ca_mutex);
+
+ ret = tt3650_ci_msg(d, TT3650_CMD_CI_RESET, buf, 1, 1);
+ if (0 != ret)
+ goto failed;
+
+ msleep(500);
+
+ buf[0] = 1;
+
+ ret = tt3650_ci_msg(d, TT3650_CMD_CI_RESET, buf, 1, 1);
+ if (0 != ret)
+ goto failed;
+
+ msleep(500);
+
+ buf[0] = 0; /* FTA */
+
+ ret = tt3650_ci_msg(d, TT3650_CMD_CI_SET_VIDEO_PORT, buf, 1, 1);
+
+ failed:
+ mutex_unlock(&state->ca_mutex);
+
+ return ret;
+}
+
+static int tt3650_ci_poll_slot_status(struct dvb_ca_en50221 *ca,
+ int slot,
+ int open)
+{
+ u8 buf[1];
+ int ret;
+
+ if (0 != slot)
+ return -EINVAL;
+
+ ret = tt3650_ci_msg_locked(ca, TT3650_CMD_CI_TEST, buf, 0, 1);
+ if (0 != ret)
+ return ret;
+
+ if (1 == buf[0])
+ return DVB_CA_EN50221_POLL_CAM_PRESENT |
+ DVB_CA_EN50221_POLL_CAM_READY;
+
+ return 0;
+
+}
+
+static void tt3650_ci_uninit(struct dvb_usb_device *d)
+{
+ struct pctv452e_state *state;
+
+ ci_dbg("%s", __func__);
+
+ if (NULL == d)
+ return;
+
+ state = (struct pctv452e_state *)d->priv;
+ if (NULL == state)
+ return;
+
+ if (NULL == state->ca.data)
+ return;
+
+ /* Error ignored. */
+ tt3650_ci_set_video_port(&state->ca, /* slot */ 0, /* enable */ 0);
+
+ dvb_ca_en50221_release(&state->ca);
+
+ memset(&state->ca, 0, sizeof(state->ca));
+}
+
+static int tt3650_ci_init(struct dvb_usb_adapter *a)
+{
+ struct dvb_usb_device *d = a->dev;
+ struct pctv452e_state *state = (struct pctv452e_state *)d->priv;
+ int ret;
+
+ ci_dbg("%s", __func__);
+
+ mutex_init(&state->ca_mutex);
+
+ state->ca.owner = THIS_MODULE;
+ state->ca.read_attribute_mem = tt3650_ci_read_attribute_mem;
+ state->ca.write_attribute_mem = tt3650_ci_write_attribute_mem;
+ state->ca.read_cam_control = tt3650_ci_read_cam_control;
+ state->ca.write_cam_control = tt3650_ci_write_cam_control;
+ state->ca.slot_reset = tt3650_ci_slot_reset;
+ state->ca.slot_shutdown = tt3650_ci_slot_shutdown;
+ state->ca.slot_ts_enable = tt3650_ci_slot_ts_enable;
+ state->ca.poll_slot_status = tt3650_ci_poll_slot_status;
+ state->ca.data = d;
+
+ ret = dvb_ca_en50221_init(&a->dvb_adap,
+ &state->ca,
+ /* flags */ 0,
+ /* n_slots */ 1);
+ if (0 != ret) {
+ err("Cannot initialize CI: Error %d.", ret);
+ memset(&state->ca, 0, sizeof(state->ca));
+ return ret;
+ }
+
+ info("CI initialized.");
+
+ return 0;
+}
+
+#define CMD_BUFFER_SIZE 0x28
+static int pctv452e_i2c_msg(struct dvb_usb_device *d, u8 addr,
+ const u8 *snd_buf, u8 snd_len,
+ u8 *rcv_buf, u8 rcv_len)
+{
+ struct pctv452e_state *state = (struct pctv452e_state *)d->priv;
+ u8 buf[64];
+ u8 id;
+ int ret;
+
+ id = state->c++;
+
+ ret = -EINVAL;
+ if (snd_len > 64 - 7 || rcv_len > 64 - 7)
+ goto failed;
+
+ buf[0] = SYNC_BYTE_OUT;
+ buf[1] = id;
+ buf[2] = PCTV_CMD_I2C;
+ buf[3] = snd_len + 3;
+ buf[4] = addr << 1;
+ buf[5] = snd_len;
+ buf[6] = rcv_len;
+
+ memcpy(buf + 7, snd_buf, snd_len);
+
+ ret = dvb_usb_generic_rw(d, buf, 7 + snd_len,
+ buf, /* rcv_len */ 64,
+ /* delay_ms */ 0);
+ if (ret < 0)
+ goto failed;
+
+ /* TT USB protocol error. */
+ ret = -EIO;
+ if (SYNC_BYTE_IN != buf[0] || id != buf[1])
+ goto failed;
+
+ /* I2C device didn't respond as expected. */
+ ret = -EREMOTEIO;
+ if (buf[5] < snd_len || buf[6] < rcv_len)
+ goto failed;
+
+ memcpy(rcv_buf, buf + 7, rcv_len);
+
+ return rcv_len;
+
+failed:
+ err("I2C error %d; %02X %02X %02X %02X %02X -> "
+ "%02X %02X %02X %02X %02X.",
+ ret, SYNC_BYTE_OUT, id, addr << 1, snd_len, rcv_len,
+ buf[0], buf[1], buf[4], buf[5], buf[6]);
+
+ return ret;
+}
+
+static int pctv452e_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msg,
+ int num)
+{
+ struct dvb_usb_device *d = i2c_get_adapdata(adapter);
+ int i;
+
+ if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
+ return -EAGAIN;
+
+ for (i = 0; i < num; i++) {
+ u8 addr, snd_len, rcv_len, *snd_buf, *rcv_buf;
+ int ret;
+
+ if (msg[i].flags & I2C_M_RD) {
+ addr = msg[i].addr;
+ snd_buf = NULL;
+ snd_len = 0;
+ rcv_buf = msg[i].buf;
+ rcv_len = msg[i].len;
+ } else {
+ addr = msg[i].addr;
+ snd_buf = msg[i].buf;
+ snd_len = msg[i].len;
+ rcv_buf = NULL;
+ rcv_len = 0;
+ }
+
+ ret = pctv452e_i2c_msg(d, addr, snd_buf, snd_len, rcv_buf,
+ rcv_len);
+ if (ret < rcv_len)
+ break;
+ }
+
+ mutex_unlock(&d->i2c_mutex);
+ return i;
+}
+
+static u32 pctv452e_i2c_func(struct i2c_adapter *adapter)
+{
+ return I2C_FUNC_I2C;
+}
+
+static int pctv452e_power_ctrl(struct dvb_usb_device *d, int i)
+{
+ struct pctv452e_state *state = (struct pctv452e_state *)d->priv;
+ u8 b0[] = { 0xaa, 0, PCTV_CMD_RESET, 1, 0 };
+ u8 rx[PCTV_ANSWER_LEN];
+ int ret;
+
+ info("%s: %d\n", __func__, i);
+
+ if (!i)
+ return 0;
+
+ if (state->initialized)
+ return 0;
+
+ /* hmm where shoud this should go? */
+ ret = usb_set_interface(d->udev, 0, ISOC_INTERFACE_ALTERNATIVE);
+ if (ret != 0)
+ info("%s: Warning set interface returned: %d\n",
+ __func__, ret);
+
+ /* this is a one-time initialization, dont know where to put */
+ b0[1] = state->c++;
+ /* reset board */
+ ret = dvb_usb_generic_rw(d, b0, sizeof(b0), rx, PCTV_ANSWER_LEN, 0);
+ if (ret)
+ return ret;
+
+ b0[1] = state->c++;
+ b0[4] = 1;
+ /* reset board (again?) */
+ ret = dvb_usb_generic_rw(d, b0, sizeof(b0), rx, PCTV_ANSWER_LEN, 0);
+ if (ret)
+ return ret;
+
+ state->initialized = 1;
+
+ return 0;
+}
+
+static int pctv452e_rc_query(struct dvb_usb_device *d)
+{
+ struct pctv452e_state *state = (struct pctv452e_state *)d->priv;
+ u8 b[CMD_BUFFER_SIZE];
+ u8 rx[PCTV_ANSWER_LEN];
+ int ret, i;
+ u8 id = state->c++;
+
+ /* prepare command header */
+ b[0] = SYNC_BYTE_OUT;
+ b[1] = id;
+ b[2] = PCTV_CMD_IR;
+ b[3] = 0;
+
+ /* send ir request */
+ ret = dvb_usb_generic_rw(d, b, 4, rx, PCTV_ANSWER_LEN, 0);
+ if (ret != 0)
+ return ret;
+
+ if (debug > 3) {
+ info("%s: read: %2d: %02x %02x %02x: ", __func__,
+ ret, rx[0], rx[1], rx[2]);
+ for (i = 0; (i < rx[3]) && ((i+3) < PCTV_ANSWER_LEN); i++)
+ info(" %02x", rx[i+3]);
+
+ info("\n");
+ }
+
+ if ((rx[3] == 9) && (rx[12] & 0x01)) {
+ /* got a "press" event */
+ state->last_rc_key = (rx[7] << 8) | rx[6];
+ if (debug > 2)
+ info("%s: cmd=0x%02x sys=0x%02x\n",
+ __func__, rx[6], rx[7]);
+
+ rc_keydown(d->rc_dev, state->last_rc_key, 0);
+ } else if (state->last_rc_key) {
+ rc_keyup(d->rc_dev);
+ state->last_rc_key = 0;
+ }
+
+ return 0;
+}
+
+static int pctv452e_read_mac_address(struct dvb_usb_device *d, u8 mac[6])
+{
+ const u8 mem_addr[] = { 0x1f, 0xcc };
+ u8 encoded_mac[20];
+ int ret;
+
+ ret = -EAGAIN;
+ if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
+ goto failed;
+
+ ret = pctv452e_i2c_msg(d, I2C_ADDR_24C16,
+ mem_addr + 1, /* snd_len */ 1,
+ encoded_mac, /* rcv_len */ 20);
+ if (-EREMOTEIO == ret)
+ /* Caution! A 24C16 interprets 0xA2 0x1F 0xCC as a
+ byte write if /WC is low. */
+ ret = pctv452e_i2c_msg(d, I2C_ADDR_24C64,
+ mem_addr, 2,
+ encoded_mac, 20);
+
+ mutex_unlock(&d->i2c_mutex);
+
+ if (20 != ret)
+ goto failed;
+
+ ret = ttpci_eeprom_decode_mac(mac, encoded_mac);
+ if (0 != ret)
+ goto failed;
+
+ return 0;
+
+failed:
+ memset(mac, 0, 6);
+
+ return ret;
+}
+
+static const struct stb0899_s1_reg pctv452e_init_dev[] = {
+ { STB0899_DISCNTRL1, 0x26 },
+ { STB0899_DISCNTRL2, 0x80 },
+ { STB0899_DISRX_ST0, 0x04 },
+ { STB0899_DISRX_ST1, 0x20 },
+ { STB0899_DISPARITY, 0x00 },
+ { STB0899_DISFIFO, 0x00 },
+ { STB0899_DISF22, 0x99 },
+ { STB0899_DISF22RX, 0x85 }, /* 0xa8 */
+ { STB0899_ACRPRESC, 0x11 },
+ { STB0899_ACRDIV1, 0x0a },
+ { STB0899_ACRDIV2, 0x05 },
+ { STB0899_DACR1 , 0x00 },
+ { STB0899_DACR2 , 0x00 },
+ { STB0899_OUTCFG, 0x00 },
+ { STB0899_MODECFG, 0x00 }, /* Inversion */
+ { STB0899_IRQMSK_3, 0xf3 },
+ { STB0899_IRQMSK_2, 0xfc },
+ { STB0899_IRQMSK_1, 0xff },
+ { STB0899_IRQMSK_0, 0xff },
+ { STB0899_I2CCFG, 0x88 },
+ { STB0899_I2CRPT, 0x58 },
+ { STB0899_GPIO00CFG, 0x82 },
+ { STB0899_GPIO01CFG, 0x82 }, /* LED: 0x02 green, 0x82 orange */
+ { STB0899_GPIO02CFG, 0x82 },
+ { STB0899_GPIO03CFG, 0x82 },
+ { STB0899_GPIO04CFG, 0x82 },
+ { STB0899_GPIO05CFG, 0x82 },
+ { STB0899_GPIO06CFG, 0x82 },
+ { STB0899_GPIO07CFG, 0x82 },
+ { STB0899_GPIO08CFG, 0x82 },
+ { STB0899_GPIO09CFG, 0x82 },
+ { STB0899_GPIO10CFG, 0x82 },
+ { STB0899_GPIO11CFG, 0x82 },
+ { STB0899_GPIO12CFG, 0x82 },
+ { STB0899_GPIO13CFG, 0x82 },
+ { STB0899_GPIO14CFG, 0x82 },
+ { STB0899_GPIO15CFG, 0x82 },
+ { STB0899_GPIO16CFG, 0x82 },
+ { STB0899_GPIO17CFG, 0x82 },
+ { STB0899_GPIO18CFG, 0x82 },
+ { STB0899_GPIO19CFG, 0x82 },
+ { STB0899_GPIO20CFG, 0x82 },
+ { STB0899_SDATCFG, 0xb8 },
+ { STB0899_SCLTCFG, 0xba },
+ { STB0899_AGCRFCFG, 0x1c }, /* 0x11 DVB-S; 0x1c DVB-S2 (1c, rjkm) */
+ { STB0899_GPIO22, 0x82 },
+ { STB0899_GPIO21, 0x91 },
+ { STB0899_DIRCLKCFG, 0x82 },
+ { STB0899_CLKOUT27CFG, 0x7e },
+ { STB0899_STDBYCFG, 0x82 },
+ { STB0899_CS0CFG, 0x82 },
+ { STB0899_CS1CFG, 0x82 },
+ { STB0899_DISEQCOCFG, 0x20 },
+ { STB0899_NCOARSE, 0x15 }, /* 0x15 27Mhz, F/3 198MHz, F/6 108MHz */
+ { STB0899_SYNTCTRL, 0x00 }, /* 0x00 CLKI, 0x02 XTALI */
+ { STB0899_FILTCTRL, 0x00 },
+ { STB0899_SYSCTRL, 0x00 },
+ { STB0899_STOPCLK1, 0x20 }, /* orig: 0x00 budget-ci: 0x20 */
+ { STB0899_STOPCLK2, 0x00 },
+ { STB0899_INTBUFCTRL, 0x0a },
+ { STB0899_AGC2I1, 0x00 },
+ { STB0899_AGC2I2, 0x00 },
+ { STB0899_AGCIQIN, 0x00 },
+ { STB0899_TSTRES, 0x40 }, /* rjkm */
+ { 0xffff, 0xff },
+};
+
+static const struct stb0899_s1_reg pctv452e_init_s1_demod[] = {
+ { STB0899_DEMOD, 0x00 },
+ { STB0899_RCOMPC, 0xc9 },
+ { STB0899_AGC1CN, 0x01 },
+ { STB0899_AGC1REF, 0x10 },
+ { STB0899_RTC, 0x23 },
+ { STB0899_TMGCFG, 0x4e },
+ { STB0899_AGC2REF, 0x34 },
+ { STB0899_TLSR, 0x84 },
+ { STB0899_CFD, 0xf7 },
+ { STB0899_ACLC, 0x87 },
+ { STB0899_BCLC, 0x94 },
+ { STB0899_EQON, 0x41 },
+ { STB0899_LDT, 0xf1 },
+ { STB0899_LDT2, 0xe3 },
+ { STB0899_EQUALREF, 0xb4 },
+ { STB0899_TMGRAMP, 0x10 },
+ { STB0899_TMGTHD, 0x30 },
+ { STB0899_IDCCOMP, 0xfd },
+ { STB0899_QDCCOMP, 0xff },
+ { STB0899_POWERI, 0x0c },
+ { STB0899_POWERQ, 0x0f },
+ { STB0899_RCOMP, 0x6c },
+ { STB0899_AGCIQIN, 0x80 },
+ { STB0899_AGC2I1, 0x06 },
+ { STB0899_AGC2I2, 0x00 },
+ { STB0899_TLIR, 0x30 },
+ { STB0899_RTF, 0x7f },
+ { STB0899_DSTATUS, 0x00 },
+ { STB0899_LDI, 0xbc },
+ { STB0899_CFRM, 0xea },
+ { STB0899_CFRL, 0x31 },
+ { STB0899_NIRM, 0x2b },
+ { STB0899_NIRL, 0x80 },
+ { STB0899_ISYMB, 0x1d },
+ { STB0899_QSYMB, 0xa6 },
+ { STB0899_SFRH, 0x2f },
+ { STB0899_SFRM, 0x68 },
+ { STB0899_SFRL, 0x40 },
+ { STB0899_SFRUPH, 0x2f },
+ { STB0899_SFRUPM, 0x68 },
+ { STB0899_SFRUPL, 0x40 },
+ { STB0899_EQUAI1, 0x02 },
+ { STB0899_EQUAQ1, 0xff },
+ { STB0899_EQUAI2, 0x04 },
+ { STB0899_EQUAQ2, 0x05 },
+ { STB0899_EQUAI3, 0x02 },
+ { STB0899_EQUAQ3, 0xfd },
+ { STB0899_EQUAI4, 0x03 },
+ { STB0899_EQUAQ4, 0x07 },
+ { STB0899_EQUAI5, 0x08 },
+ { STB0899_EQUAQ5, 0xf5 },
+ { STB0899_DSTATUS2, 0x00 },
+ { STB0899_VSTATUS, 0x00 },
+ { STB0899_VERROR, 0x86 },
+ { STB0899_IQSWAP, 0x2a },
+ { STB0899_ECNT1M, 0x00 },
+ { STB0899_ECNT1L, 0x00 },
+ { STB0899_ECNT2M, 0x00 },
+ { STB0899_ECNT2L, 0x00 },
+ { STB0899_ECNT3M, 0x0a },
+ { STB0899_ECNT3L, 0xad },
+ { STB0899_FECAUTO1, 0x06 },
+ { STB0899_FECM, 0x01 },
+ { STB0899_VTH12, 0xb0 },
+ { STB0899_VTH23, 0x7a },
+ { STB0899_VTH34, 0x58 },
+ { STB0899_VTH56, 0x38 },
+ { STB0899_VTH67, 0x34 },
+ { STB0899_VTH78, 0x24 },
+ { STB0899_PRVIT, 0xff },
+ { STB0899_VITSYNC, 0x19 },
+ { STB0899_RSULC, 0xb1 }, /* DVB = 0xb1, DSS = 0xa1 */
+ { STB0899_TSULC, 0x42 },
+ { STB0899_RSLLC, 0x41 },
+ { STB0899_TSLPL, 0x12 },
+ { STB0899_TSCFGH, 0x0c },
+ { STB0899_TSCFGM, 0x00 },
+ { STB0899_TSCFGL, 0x00 },
+ { STB0899_TSOUT, 0x69 }, /* 0x0d for CAM */
+ { STB0899_RSSYNCDEL, 0x00 },
+ { STB0899_TSINHDELH, 0x02 },
+ { STB0899_TSINHDELM, 0x00 },
+ { STB0899_TSINHDELL, 0x00 },
+ { STB0899_TSLLSTKM, 0x1b },
+ { STB0899_TSLLSTKL, 0xb3 },
+ { STB0899_TSULSTKM, 0x00 },
+ { STB0899_TSULSTKL, 0x00 },
+ { STB0899_PCKLENUL, 0xbc },
+ { STB0899_PCKLENLL, 0xcc },
+ { STB0899_RSPCKLEN, 0xbd },
+ { STB0899_TSSTATUS, 0x90 },
+ { STB0899_ERRCTRL1, 0xb6 },
+ { STB0899_ERRCTRL2, 0x95 },
+ { STB0899_ERRCTRL3, 0x8d },
+ { STB0899_DMONMSK1, 0x27 },
+ { STB0899_DMONMSK0, 0x03 },
+ { STB0899_DEMAPVIT, 0x5c },
+ { STB0899_PLPARM, 0x19 },
+ { STB0899_PDELCTRL, 0x48 },
+ { STB0899_PDELCTRL2, 0x00 },
+ { STB0899_BBHCTRL1, 0x00 },
+ { STB0899_BBHCTRL2, 0x00 },
+ { STB0899_HYSTTHRESH, 0x77 },
+ { STB0899_MATCSTM, 0x00 },
+ { STB0899_MATCSTL, 0x00 },
+ { STB0899_UPLCSTM, 0x00 },
+ { STB0899_UPLCSTL, 0x00 },
+ { STB0899_DFLCSTM, 0x00 },
+ { STB0899_DFLCSTL, 0x00 },
+ { STB0899_SYNCCST, 0x00 },
+ { STB0899_SYNCDCSTM, 0x00 },
+ { STB0899_SYNCDCSTL, 0x00 },
+ { STB0899_ISI_ENTRY, 0x00 },
+ { STB0899_ISI_BIT_EN, 0x00 },
+ { STB0899_MATSTRM, 0xf0 },
+ { STB0899_MATSTRL, 0x02 },
+ { STB0899_UPLSTRM, 0x45 },
+ { STB0899_UPLSTRL, 0x60 },
+ { STB0899_DFLSTRM, 0xe3 },
+ { STB0899_DFLSTRL, 0x00 },
+ { STB0899_SYNCSTR, 0x47 },
+ { STB0899_SYNCDSTRM, 0x05 },
+ { STB0899_SYNCDSTRL, 0x18 },
+ { STB0899_CFGPDELSTATUS1, 0x19 },
+ { STB0899_CFGPDELSTATUS2, 0x2b },
+ { STB0899_BBFERRORM, 0x00 },
+ { STB0899_BBFERRORL, 0x01 },
+ { STB0899_UPKTERRORM, 0x00 },
+ { STB0899_UPKTERRORL, 0x00 },
+ { 0xffff, 0xff },
+};
+
+static struct stb0899_config stb0899_config = {
+ .init_dev = pctv452e_init_dev,
+ .init_s2_demod = stb0899_s2_init_2,
+ .init_s1_demod = pctv452e_init_s1_demod,
+ .init_s2_fec = stb0899_s2_init_4,
+ .init_tst = stb0899_s1_init_5,
+
+ .demod_address = I2C_ADDR_STB0899, /* I2C Address */
+ .block_sync_mode = STB0899_SYNC_FORCED, /* ? */
+
+ .xtal_freq = 27000000, /* Assume Hz ? */
+ .inversion = IQ_SWAP_ON, /* ? */
+
+ .lo_clk = 76500000,
+ .hi_clk = 99000000,
+
+ .ts_output_mode = 0, /* Use parallel mode */
+ .clock_polarity = 0,
+ .data_clk_parity = 0,
+ .fec_mode = 0,
+
+ .esno_ave = STB0899_DVBS2_ESNO_AVE,
+ .esno_quant = STB0899_DVBS2_ESNO_QUANT,
+ .avframes_coarse = STB0899_DVBS2_AVFRAMES_COARSE,
+ .avframes_fine = STB0899_DVBS2_AVFRAMES_FINE,
+ .miss_threshold = STB0899_DVBS2_MISS_THRESHOLD,
+ .uwp_threshold_acq = STB0899_DVBS2_UWP_THRESHOLD_ACQ,
+ .uwp_threshold_track = STB0899_DVBS2_UWP_THRESHOLD_TRACK,
+ .uwp_threshold_sof = STB0899_DVBS2_UWP_THRESHOLD_SOF,
+ .sof_search_timeout = STB0899_DVBS2_SOF_SEARCH_TIMEOUT,
+
+ .btr_nco_bits = STB0899_DVBS2_BTR_NCO_BITS,
+ .btr_gain_shift_offset = STB0899_DVBS2_BTR_GAIN_SHIFT_OFFSET,
+ .crl_nco_bits = STB0899_DVBS2_CRL_NCO_BITS,
+ .ldpc_max_iter = STB0899_DVBS2_LDPC_MAX_ITER,
+
+ .tuner_get_frequency = stb6100_get_frequency,
+ .tuner_set_frequency = stb6100_set_frequency,
+ .tuner_set_bandwidth = stb6100_set_bandwidth,
+ .tuner_get_bandwidth = stb6100_get_bandwidth,
+ .tuner_set_rfsiggain = NULL,
+
+ /* helper for switching LED green/orange */
+ .postproc = pctv45e_postproc
+};
+
+static struct stb6100_config stb6100_config = {
+ .tuner_address = I2C_ADDR_STB6100,
+ .refclock = 27000000
+};
+
+
+static struct i2c_algorithm pctv452e_i2c_algo = {
+ .master_xfer = pctv452e_i2c_xfer,
+ .functionality = pctv452e_i2c_func
+};
+
+static int pctv452e_frontend_attach(struct dvb_usb_adapter *a)
+{
+ struct usb_device_id *id;
+
+ a->fe_adap[0].fe = dvb_attach(stb0899_attach, &stb0899_config,
+ &a->dev->i2c_adap);
+ if (!a->fe_adap[0].fe)
+ return -ENODEV;
+ if ((dvb_attach(lnbp22_attach, a->fe_adap[0].fe,
+ &a->dev->i2c_adap)) == 0)
+ err("Cannot attach lnbp22\n");
+
+ id = a->dev->desc->warm_ids[0];
+ if (USB_VID_TECHNOTREND == id->idVendor
+ && USB_PID_TECHNOTREND_CONNECT_S2_3650_CI == id->idProduct)
+ /* Error ignored. */
+ tt3650_ci_init(a);
+
+ return 0;
+}
+
+static int pctv452e_tuner_attach(struct dvb_usb_adapter *a)
+{
+ if (!a->fe_adap[0].fe)
+ return -ENODEV;
+ if (dvb_attach(stb6100_attach, a->fe_adap[0].fe, &stb6100_config,
+ &a->dev->i2c_adap) == 0) {
+ err("%s failed\n", __func__);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static struct usb_device_id pctv452e_usb_table[] = {
+ {USB_DEVICE(USB_VID_PINNACLE, USB_PID_PCTV_452E)},
+ {USB_DEVICE(USB_VID_TECHNOTREND, USB_PID_TECHNOTREND_CONNECT_S2_3600)},
+ {USB_DEVICE(USB_VID_TECHNOTREND,
+ USB_PID_TECHNOTREND_CONNECT_S2_3650_CI)},
+ {}
+};
+MODULE_DEVICE_TABLE(usb, pctv452e_usb_table);
+
+static struct dvb_usb_device_properties pctv452e_properties = {
+ .caps = DVB_USB_IS_AN_I2C_ADAPTER, /* more ? */
+ .usb_ctrl = DEVICE_SPECIFIC,
+
+ .size_of_priv = sizeof(struct pctv452e_state),
+
+ .power_ctrl = pctv452e_power_ctrl,
+
+ .rc.core = {
+ .rc_codes = RC_MAP_DIB0700_RC5_TABLE,
+ .allowed_protos = RC_TYPE_UNKNOWN,
+ .rc_query = pctv452e_rc_query,
+ .rc_interval = 100,
+ },
+
+ .num_adapters = 1,
+ .adapter = {{
+ .num_frontends = 1,
+ .fe = {{
+ .frontend_attach = pctv452e_frontend_attach,
+ .tuner_attach = pctv452e_tuner_attach,
+
+ /* parameter for the MPEG2-data transfer */
+ .stream = {
+ .type = USB_ISOC,
+ .count = 4,
+ .endpoint = 0x02,
+ .u = {
+ .isoc = {
+ .framesperurb = 4,
+ .framesize = 940,
+ .interval = 1
+ }
+ }
+ },
+ } },
+ } },
+
+ .i2c_algo = &pctv452e_i2c_algo,
+
+ .generic_bulk_ctrl_endpoint = 1, /* allow generice rw function */
+
+ .num_device_descs = 1,
+ .devices = {
+ { .name = "PCTV HDTV USB",
+ .cold_ids = { NULL, NULL }, /* this is a warm only device */
+ .warm_ids = { &pctv452e_usb_table[0], NULL }
+ },
+ { 0 },
+ }
+};
+
+static struct dvb_usb_device_properties tt_connect_s2_3600_properties = {
+ .caps = DVB_USB_IS_AN_I2C_ADAPTER, /* more ? */
+ .usb_ctrl = DEVICE_SPECIFIC,
+
+ .size_of_priv = sizeof(struct pctv452e_state),
+
+ .power_ctrl = pctv452e_power_ctrl,
+ .read_mac_address = pctv452e_read_mac_address,
+
+ .rc.core = {
+ .rc_codes = RC_MAP_TT_1500,
+ .allowed_protos = RC_TYPE_UNKNOWN,
+ .rc_query = pctv452e_rc_query,
+ .rc_interval = 100,
+ },
+
+ .num_adapters = 1,
+ .adapter = {{
+ .num_frontends = 1,
+ .fe = {{
+ .frontend_attach = pctv452e_frontend_attach,
+ .tuner_attach = pctv452e_tuner_attach,
+
+ /* parameter for the MPEG2-data transfer */
+ .stream = {
+ .type = USB_ISOC,
+ .count = 7,
+ .endpoint = 0x02,
+ .u = {
+ .isoc = {
+ .framesperurb = 4,
+ .framesize = 940,
+ .interval = 1
+ }
+ }
+ },
+
+ } },
+ } },
+
+ .i2c_algo = &pctv452e_i2c_algo,
+
+ .generic_bulk_ctrl_endpoint = 1, /* allow generic rw function*/
+
+ .num_device_descs = 2,
+ .devices = {
+ { .name = "Technotrend TT Connect S2-3600",
+ .cold_ids = { NULL, NULL }, /* this is a warm only device */
+ .warm_ids = { &pctv452e_usb_table[1], NULL }
+ },
+ { .name = "Technotrend TT Connect S2-3650-CI",
+ .cold_ids = { NULL, NULL },
+ .warm_ids = { &pctv452e_usb_table[2], NULL }
+ },
+ { 0 },
+ }
+};
+
+static void pctv452e_usb_disconnect(struct usb_interface *intf)
+{
+ struct dvb_usb_device *d = usb_get_intfdata(intf);
+
+ tt3650_ci_uninit(d);
+ dvb_usb_device_exit(intf);
+}
+
+static int pctv452e_usb_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ if (0 == dvb_usb_device_init(intf, &pctv452e_properties,
+ THIS_MODULE, NULL, adapter_nr) ||
+ 0 == dvb_usb_device_init(intf, &tt_connect_s2_3600_properties,
+ THIS_MODULE, NULL, adapter_nr))
+ return 0;
+
+ return -ENODEV;
+}
+
+static struct usb_driver pctv452e_usb_driver = {
+ .name = "pctv452e",
+ .probe = pctv452e_usb_probe,
+ .disconnect = pctv452e_usb_disconnect,
+ .id_table = pctv452e_usb_table,
+};
+
+static int __init pctv452e_usb_init(void)
+{
+ int ret = usb_register(&pctv452e_usb_driver);
+ if (ret)
+ err("%s: usb_register failed! Error %d", __FILE__, ret);
+
+ return ret;
+}
+
+static void __exit pctv452e_usb_exit(void)
+{
+ usb_deregister(&pctv452e_usb_driver);
+}
+
+module_init(pctv452e_usb_init);
+module_exit(pctv452e_usb_exit);
+
+MODULE_AUTHOR("Dominik Kuhlen <dkuhlen@gmx.net>");
+MODULE_AUTHOR("Andre Weidemann <Andre.Weidemann@web.de>");
+MODULE_AUTHOR("Michael H. Schimek <mschimek@gmx.at>");
+MODULE_DESCRIPTION("Pinnacle PCTV HDTV USB DVB / TT connect S2-3600 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/dvb-usb/technisat-usb2.c b/drivers/media/dvb/dvb-usb/technisat-usb2.c
index 473b95ed4d5..0998fe96195 100644
--- a/drivers/media/dvb/dvb-usb/technisat-usb2.c
+++ b/drivers/media/dvb/dvb-usb/technisat-usb2.c
@@ -292,7 +292,7 @@ static void technisat_usb2_green_led_control(struct work_struct *work)
{
struct technisat_usb2_state *state =
container_of(work, struct technisat_usb2_state, green_led_work.work);
- struct dvb_frontend *fe = state->dev->adapter[0].fe;
+ struct dvb_frontend *fe = state->dev->adapter[0].fe_adap[0].fe;
if (state->power_state == 0)
goto schedule;
@@ -505,14 +505,14 @@ static int technisat_usb2_frontend_attach(struct dvb_usb_adapter *a)
struct usb_device *udev = a->dev->udev;
int ret;
- a->fe = dvb_attach(stv090x_attach, &technisat_usb2_stv090x_config,
+ a->fe_adap[0].fe = dvb_attach(stv090x_attach, &technisat_usb2_stv090x_config,
&a->dev->i2c_adap, STV090x_DEMODULATOR_0);
- if (a->fe) {
+ if (a->fe_adap[0].fe) {
struct stv6110x_devctl *ctl;
ctl = dvb_attach(stv6110x_attach,
- a->fe,
+ a->fe_adap[0].fe,
&technisat_usb2_stv6110x_config,
&a->dev->i2c_adap);
@@ -532,8 +532,8 @@ static int technisat_usb2_frontend_attach(struct dvb_usb_adapter *a)
/* call the init function once to initialize
tuner's clock output divider and demod's
master clock */
- if (a->fe->ops.init)
- a->fe->ops.init(a->fe);
+ if (a->fe_adap[0].fe->ops.init)
+ a->fe_adap[0].fe->ops.init(a->fe_adap[0].fe);
if (mutex_lock_interruptible(&a->dev->i2c_mutex) < 0)
return -EAGAIN;
@@ -548,20 +548,20 @@ static int technisat_usb2_frontend_attach(struct dvb_usb_adapter *a)
if (ret != 0)
err("could not set IF_CLK to external");
- a->fe->ops.set_voltage = technisat_usb2_set_voltage;
+ a->fe_adap[0].fe->ops.set_voltage = technisat_usb2_set_voltage;
/* if everything was successful assign a nice name to the frontend */
- strlcpy(a->fe->ops.info.name, a->dev->desc->name,
- sizeof(a->fe->ops.info.name));
+ strlcpy(a->fe_adap[0].fe->ops.info.name, a->dev->desc->name,
+ sizeof(a->fe_adap[0].fe->ops.info.name));
} else {
- dvb_frontend_detach(a->fe);
- a->fe = NULL;
+ dvb_frontend_detach(a->fe_adap[0].fe);
+ a->fe_adap[0].fe = NULL;
}
}
technisat_usb2_set_led_timer(a->dev, 1, 1);
- return a->fe == NULL ? -ENODEV : 0;
+ return a->fe_adap[0].fe == NULL ? -ENODEV : 0;
}
/* Remote control */
@@ -697,6 +697,8 @@ static struct dvb_usb_device_properties technisat_usb2_devices = {
.num_adapters = 1,
.adapter = {
{
+ .num_frontends = 1,
+ .fe = {{
.frontend_attach = technisat_usb2_frontend_attach,
.stream = {
@@ -711,7 +713,7 @@ static struct dvb_usb_device_properties technisat_usb2_devices = {
}
}
},
-
+ }},
.size_of_priv = 0,
},
},
diff --git a/drivers/media/dvb/dvb-usb/ttusb2.c b/drivers/media/dvb/dvb-usb/ttusb2.c
index 0d4709ff9cb..ea4eab8b396 100644
--- a/drivers/media/dvb/dvb-usb/ttusb2.c
+++ b/drivers/media/dvb/dvb-usb/ttusb2.c
@@ -30,18 +30,43 @@
#include "tda826x.h"
#include "tda10086.h"
#include "tda1002x.h"
+#include "tda10048.h"
#include "tda827x.h"
#include "lnbp21.h"
+/* CA */
+#include "dvb_ca_en50221.h"
/* debug */
static int dvb_usb_ttusb2_debug;
#define deb_info(args...) dprintk(dvb_usb_ttusb2_debug,0x01,args)
module_param_named(debug,dvb_usb_ttusb2_debug, int, 0644);
MODULE_PARM_DESC(debug, "set debugging level (1=info (or-able))." DVB_USB_DEBUG_STATUS);
+static int dvb_usb_ttusb2_debug_ci;
+module_param_named(debug_ci,dvb_usb_ttusb2_debug_ci, int, 0644);
+MODULE_PARM_DESC(debug_ci, "set debugging ci." DVB_USB_DEBUG_STATUS);
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
+#define ci_dbg(format, arg...) \
+do { \
+ if (dvb_usb_ttusb2_debug_ci) \
+ printk(KERN_DEBUG DVB_USB_LOG_PREFIX \
+ ": %s " format "\n" , __func__, ## arg); \
+} while (0)
+
+enum {
+ TT3650_CMD_CI_TEST = 0x40,
+ TT3650_CMD_CI_RD_CTRL,
+ TT3650_CMD_CI_WR_CTRL,
+ TT3650_CMD_CI_RD_ATTR,
+ TT3650_CMD_CI_WR_ATTR,
+ TT3650_CMD_CI_RESET,
+ TT3650_CMD_CI_SET_VIDEO_PORT
+};
+
struct ttusb2_state {
+ struct dvb_ca_en50221 ca;
+ struct mutex ca_mutex;
u8 id;
u16 last_rc_key;
};
@@ -78,11 +103,260 @@ static int ttusb2_msg(struct dvb_usb_device *d, u8 cmd,
return 0;
}
+/* ci */
+static int tt3650_ci_msg(struct dvb_usb_device *d, u8 cmd, u8 *data, unsigned int write_len, unsigned int read_len)
+{
+ int ret;
+ u8 rx[60];/* (64 -4) */
+ ret = ttusb2_msg(d, cmd, data, write_len, rx, read_len);
+ if (!ret)
+ memcpy(data, rx, read_len);
+ return ret;
+}
+
+static int tt3650_ci_msg_locked(struct dvb_ca_en50221 *ca, u8 cmd, u8 *data, unsigned int write_len, unsigned int read_len)
+{
+ struct dvb_usb_device *d = ca->data;
+ struct ttusb2_state *state = d->priv;
+ int ret;
+
+ mutex_lock(&state->ca_mutex);
+ ret = tt3650_ci_msg(d, cmd, data, write_len, read_len);
+ mutex_unlock(&state->ca_mutex);
+
+ return ret;
+}
+
+static int tt3650_ci_read_attribute_mem(struct dvb_ca_en50221 *ca, int slot, int address)
+{
+ u8 buf[3];
+ int ret = 0;
+
+ if (slot)
+ return -EINVAL;
+
+ buf[0] = (address >> 8) & 0x0F;
+ buf[1] = address;
+
+
+ ret = tt3650_ci_msg_locked(ca, TT3650_CMD_CI_RD_ATTR, buf, 2, 3);
+
+ ci_dbg("%04x -> %d 0x%02x", address, ret, buf[2]);
+
+ if (ret < 0)
+ return ret;
+
+ return buf[2];
+}
+
+static int tt3650_ci_write_attribute_mem(struct dvb_ca_en50221 *ca, int slot, int address, u8 value)
+{
+ u8 buf[3];
+
+ ci_dbg("%d 0x%04x 0x%02x", slot, address, value);
+
+ if (slot)
+ return -EINVAL;
+
+ buf[0] = (address >> 8) & 0x0F;
+ buf[1] = address;
+ buf[2] = value;
+
+ return tt3650_ci_msg_locked(ca, TT3650_CMD_CI_WR_ATTR, buf, 3, 3);
+}
+
+static int tt3650_ci_read_cam_control(struct dvb_ca_en50221 *ca, int slot, u8 address)
+{
+ u8 buf[2];
+ int ret;
+
+ if (slot)
+ return -EINVAL;
+
+ buf[0] = address & 3;
+
+ ret = tt3650_ci_msg_locked(ca, TT3650_CMD_CI_RD_CTRL, buf, 1, 2);
+
+ ci_dbg("0x%02x -> %d 0x%02x", address, ret, buf[1]);
+
+ if (ret < 0)
+ return ret;
+
+ return buf[1];
+}
+
+static int tt3650_ci_write_cam_control(struct dvb_ca_en50221 *ca, int slot, u8 address, u8 value)
+{
+ u8 buf[2];
+
+ ci_dbg("%d 0x%02x 0x%02x", slot, address, value);
+
+ if (slot)
+ return -EINVAL;
+
+ buf[0] = address;
+ buf[1] = value;
+
+ return tt3650_ci_msg_locked(ca, TT3650_CMD_CI_WR_CTRL, buf, 2, 2);
+}
+
+static int tt3650_ci_set_video_port(struct dvb_ca_en50221 *ca, int slot, int enable)
+{
+ u8 buf[1];
+ int ret;
+
+ ci_dbg("%d %d", slot, enable);
+
+ if (slot)
+ return -EINVAL;
+
+ buf[0] = enable;
+
+ ret = tt3650_ci_msg_locked(ca, TT3650_CMD_CI_SET_VIDEO_PORT, buf, 1, 1);
+ if (ret < 0)
+ return ret;
+
+ if (enable != buf[0]) {
+ err("CI not %sabled.", enable ? "en" : "dis");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int tt3650_ci_slot_shutdown(struct dvb_ca_en50221 *ca, int slot)
+{
+ return tt3650_ci_set_video_port(ca, slot, 0);
+}
+
+static int tt3650_ci_slot_ts_enable(struct dvb_ca_en50221 *ca, int slot)
+{
+ return tt3650_ci_set_video_port(ca, slot, 1);
+}
+
+static int tt3650_ci_slot_reset(struct dvb_ca_en50221 *ca, int slot)
+{
+ struct dvb_usb_device *d = ca->data;
+ struct ttusb2_state *state = d->priv;
+ u8 buf[1];
+ int ret;
+
+ ci_dbg("%d", slot);
+
+ if (slot)
+ return -EINVAL;
+
+ buf[0] = 0;
+
+ mutex_lock(&state->ca_mutex);
+
+ ret = tt3650_ci_msg(d, TT3650_CMD_CI_RESET, buf, 1, 1);
+ if (ret)
+ goto failed;
+
+ msleep(500);
+
+ buf[0] = 1;
+
+ ret = tt3650_ci_msg(d, TT3650_CMD_CI_RESET, buf, 1, 1);
+ if (ret)
+ goto failed;
+
+ msleep(500);
+
+ buf[0] = 0; /* FTA */
+
+ ret = tt3650_ci_msg(d, TT3650_CMD_CI_SET_VIDEO_PORT, buf, 1, 1);
+
+ msleep(1100);
+
+ failed:
+ mutex_unlock(&state->ca_mutex);
+
+ return ret;
+}
+
+static int tt3650_ci_poll_slot_status(struct dvb_ca_en50221 *ca, int slot, int open)
+{
+ u8 buf[1];
+ int ret;
+
+ if (slot)
+ return -EINVAL;
+
+ ret = tt3650_ci_msg_locked(ca, TT3650_CMD_CI_TEST, buf, 0, 1);
+ if (ret)
+ return ret;
+
+ if (1 == buf[0]) {
+ return DVB_CA_EN50221_POLL_CAM_PRESENT |
+ DVB_CA_EN50221_POLL_CAM_READY;
+ }
+ return 0;
+}
+
+static void tt3650_ci_uninit(struct dvb_usb_device *d)
+{
+ struct ttusb2_state *state;
+
+ ci_dbg("");
+
+ if (NULL == d)
+ return;
+
+ state = d->priv;
+ if (NULL == state)
+ return;
+
+ if (NULL == state->ca.data)
+ return;
+
+ dvb_ca_en50221_release(&state->ca);
+
+ memset(&state->ca, 0, sizeof(state->ca));
+}
+
+static int tt3650_ci_init(struct dvb_usb_adapter *a)
+{
+ struct dvb_usb_device *d = a->dev;
+ struct ttusb2_state *state = d->priv;
+ int ret;
+
+ ci_dbg("");
+
+ mutex_init(&state->ca_mutex);
+
+ state->ca.owner = THIS_MODULE;
+ state->ca.read_attribute_mem = tt3650_ci_read_attribute_mem;
+ state->ca.write_attribute_mem = tt3650_ci_write_attribute_mem;
+ state->ca.read_cam_control = tt3650_ci_read_cam_control;
+ state->ca.write_cam_control = tt3650_ci_write_cam_control;
+ state->ca.slot_reset = tt3650_ci_slot_reset;
+ state->ca.slot_shutdown = tt3650_ci_slot_shutdown;
+ state->ca.slot_ts_enable = tt3650_ci_slot_ts_enable;
+ state->ca.poll_slot_status = tt3650_ci_poll_slot_status;
+ state->ca.data = d;
+
+ ret = dvb_ca_en50221_init(&a->dvb_adap,
+ &state->ca,
+ /* flags */ 0,
+ /* n_slots */ 1);
+ if (ret) {
+ err("Cannot initialize CI: Error %d.", ret);
+ memset(&state->ca, 0, sizeof(state->ca));
+ return ret;
+ }
+
+ info("CI initialized.");
+
+ return 0;
+}
+
static int ttusb2_i2c_xfer(struct i2c_adapter *adap,struct i2c_msg msg[],int num)
{
struct dvb_usb_device *d = i2c_get_adapdata(adap);
static u8 obuf[60], ibuf[60];
- int i,read;
+ int i, write_read, read;
if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
return -EAGAIN;
@@ -91,28 +365,35 @@ static int ttusb2_i2c_xfer(struct i2c_adapter *adap,struct i2c_msg msg[],int num
warn("more than 2 i2c messages at a time is not handled yet. TODO.");
for (i = 0; i < num; i++) {
- read = i+1 < num && (msg[i+1].flags & I2C_M_RD);
+ write_read = i+1 < num && (msg[i+1].flags & I2C_M_RD);
+ read = msg[i].flags & I2C_M_RD;
- obuf[0] = (msg[i].addr << 1) | read;
- obuf[1] = msg[i].len;
+ obuf[0] = (msg[i].addr << 1) | (write_read | read);
+ if (read)
+ obuf[1] = 0;
+ else
+ obuf[1] = msg[i].len;
/* read request */
- if (read)
+ if (write_read)
obuf[2] = msg[i+1].len;
+ else if (read)
+ obuf[2] = msg[i].len;
else
obuf[2] = 0;
- memcpy(&obuf[3],msg[i].buf,msg[i].len);
+ memcpy(&obuf[3], msg[i].buf, msg[i].len);
if (ttusb2_msg(d, CMD_I2C_XFER, obuf, msg[i].len+3, ibuf, obuf[2] + 3) < 0) {
err("i2c transfer failed.");
break;
}
- if (read) {
- memcpy(msg[i+1].buf,&ibuf[3],msg[i+1].len);
+ if (write_read) {
+ memcpy(msg[i+1].buf, &ibuf[3], msg[i+1].len);
i++;
- }
+ } else if (read)
+ memcpy(msg[i].buf, &ibuf[3], msg[i].len);
}
mutex_unlock(&d->i2c_mutex);
@@ -190,12 +471,31 @@ static struct tda10023_config tda10023_config = {
.deltaf = 0xa511,
};
+static struct tda10048_config tda10048_config = {
+ .demod_address = 0x10 >> 1,
+ .output_mode = TDA10048_PARALLEL_OUTPUT,
+ .inversion = TDA10048_INVERSION_ON,
+ .dtv6_if_freq_khz = TDA10048_IF_4000,
+ .dtv7_if_freq_khz = TDA10048_IF_4500,
+ .dtv8_if_freq_khz = TDA10048_IF_5000,
+ .clk_freq_khz = TDA10048_CLK_16000,
+ .no_firmware = 1,
+ .set_pll = true ,
+ .pll_m = 5,
+ .pll_n = 3,
+ .pll_p = 0,
+};
+
+static struct tda827x_config tda827x_config = {
+ .config = 0,
+};
+
static int ttusb2_frontend_tda10086_attach(struct dvb_usb_adapter *adap)
{
if (usb_set_interface(adap->dev->udev,0,3) < 0)
err("set interface to alts=3 failed");
- if ((adap->fe = dvb_attach(tda10086_attach, &tda10086_config, &adap->dev->i2c_adap)) == NULL) {
+ if ((adap->fe_adap[0].fe = dvb_attach(tda10086_attach, &tda10086_config, &adap->dev->i2c_adap)) == NULL) {
deb_info("TDA10086 attach failed\n");
return -ENODEV;
}
@@ -203,20 +503,57 @@ static int ttusb2_frontend_tda10086_attach(struct dvb_usb_adapter *adap)
return 0;
}
+static int ttusb2_ct3650_i2c_gate_ctrl(struct dvb_frontend *fe, int enable)
+{
+ struct dvb_usb_adapter *adap = fe->dvb->priv;
+
+ return adap->fe_adap[0].fe->ops.i2c_gate_ctrl(adap->fe_adap[0].fe, enable);
+}
+
static int ttusb2_frontend_tda10023_attach(struct dvb_usb_adapter *adap)
{
if (usb_set_interface(adap->dev->udev, 0, 3) < 0)
err("set interface to alts=3 failed");
- if ((adap->fe = dvb_attach(tda10023_attach, &tda10023_config, &adap->dev->i2c_adap, 0x48)) == NULL) {
- deb_info("TDA10023 attach failed\n");
- return -ENODEV;
+
+ if (adap->fe_adap[0].fe == NULL) {
+ /* FE 0 DVB-C */
+ adap->fe_adap[0].fe = dvb_attach(tda10023_attach,
+ &tda10023_config, &adap->dev->i2c_adap, 0x48);
+
+ if (adap->fe_adap[0].fe == NULL) {
+ deb_info("TDA10023 attach failed\n");
+ return -ENODEV;
+ }
+ tt3650_ci_init(adap);
+ } else {
+ adap->fe_adap[1].fe = dvb_attach(tda10048_attach,
+ &tda10048_config, &adap->dev->i2c_adap);
+
+ if (adap->fe_adap[1].fe == NULL) {
+ deb_info("TDA10048 attach failed\n");
+ return -ENODEV;
+ }
+
+ /* tuner is behind TDA10023 I2C-gate */
+ adap->fe_adap[1].fe->ops.i2c_gate_ctrl = ttusb2_ct3650_i2c_gate_ctrl;
+
}
+
return 0;
}
static int ttusb2_tuner_tda827x_attach(struct dvb_usb_adapter *adap)
{
- if (dvb_attach(tda827x_attach, adap->fe, 0x61, &adap->dev->i2c_adap, NULL) == NULL) {
+ struct dvb_frontend *fe;
+
+ /* MFE: select correct FE to attach tuner since that's called twice */
+ if (adap->fe_adap[1].fe == NULL)
+ fe = adap->fe_adap[0].fe;
+ else
+ fe = adap->fe_adap[1].fe;
+
+ /* attach tuner */
+ if (dvb_attach(tda827x_attach, fe, 0x61, &adap->dev->i2c_adap, &tda827x_config) == NULL) {
printk(KERN_ERR "%s: No tda827x found!\n", __func__);
return -ENODEV;
}
@@ -225,12 +562,12 @@ static int ttusb2_tuner_tda827x_attach(struct dvb_usb_adapter *adap)
static int ttusb2_tuner_tda826x_attach(struct dvb_usb_adapter *adap)
{
- if (dvb_attach(tda826x_attach, adap->fe, 0x60, &adap->dev->i2c_adap, 0) == NULL) {
+ if (dvb_attach(tda826x_attach, adap->fe_adap[0].fe, 0x60, &adap->dev->i2c_adap, 0) == NULL) {
deb_info("TDA8263 attach failed\n");
return -ENODEV;
}
- if (dvb_attach(lnbp21_attach, adap->fe, &adap->dev->i2c_adap, 0, 0) == NULL) {
+ if (dvb_attach(lnbp21_attach, adap->fe_adap[0].fe, &adap->dev->i2c_adap, 0, 0) == NULL) {
deb_info("LNBP21 attach failed\n");
return -ENODEV;
}
@@ -242,6 +579,14 @@ static struct dvb_usb_device_properties ttusb2_properties;
static struct dvb_usb_device_properties ttusb2_properties_s2400;
static struct dvb_usb_device_properties ttusb2_properties_ct3650;
+static void ttusb2_usb_disconnect(struct usb_interface *intf)
+{
+ struct dvb_usb_device *d = usb_get_intfdata(intf);
+
+ tt3650_ci_uninit(d);
+ dvb_usb_device_exit(intf);
+}
+
static int ttusb2_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
@@ -277,6 +622,8 @@ static struct dvb_usb_device_properties ttusb2_properties = {
.num_adapters = 1,
.adapter = {
{
+ .num_frontends = 1,
+ .fe = {{
.streaming_ctrl = NULL, // ttusb2_streaming_ctrl,
.frontend_attach = ttusb2_frontend_tda10086_attach,
@@ -295,6 +642,7 @@ static struct dvb_usb_device_properties ttusb2_properties = {
}
}
}
+ }},
}
},
@@ -329,6 +677,8 @@ static struct dvb_usb_device_properties ttusb2_properties_s2400 = {
.num_adapters = 1,
.adapter = {
{
+ .num_frontends = 1,
+ .fe = {{
.streaming_ctrl = NULL,
.frontend_attach = ttusb2_frontend_tda10086_attach,
@@ -347,6 +697,7 @@ static struct dvb_usb_device_properties ttusb2_properties_s2400 = {
}
}
}
+ }},
}
},
@@ -383,6 +734,27 @@ static struct dvb_usb_device_properties ttusb2_properties_ct3650 = {
.num_adapters = 1,
.adapter = {
{
+ .num_frontends = 2,
+ .fe = {{
+ .streaming_ctrl = NULL,
+
+ .frontend_attach = ttusb2_frontend_tda10023_attach,
+ .tuner_attach = ttusb2_tuner_tda827x_attach,
+
+ /* parameter for the MPEG2-data transfer */
+ .stream = {
+ .type = USB_ISOC,
+ .count = 5,
+ .endpoint = 0x02,
+ .u = {
+ .isoc = {
+ .framesperurb = 4,
+ .framesize = 940,
+ .interval = 1,
+ }
+ }
+ }
+ }, {
.streaming_ctrl = NULL,
.frontend_attach = ttusb2_frontend_tda10023_attach,
@@ -401,6 +773,7 @@ static struct dvb_usb_device_properties ttusb2_properties_ct3650 = {
}
}
}
+ }},
},
},
@@ -422,7 +795,7 @@ static struct dvb_usb_device_properties ttusb2_properties_ct3650 = {
static struct usb_driver ttusb2_driver = {
.name = "dvb_usb_ttusb2",
.probe = ttusb2_probe,
- .disconnect = dvb_usb_device_exit,
+ .disconnect = ttusb2_usb_disconnect,
.id_table = ttusb2_table,
};
diff --git a/drivers/media/dvb/dvb-usb/umt-010.c b/drivers/media/dvb/dvb-usb/umt-010.c
index 118aab1a3e5..463673a5c2b 100644
--- a/drivers/media/dvb/dvb-usb/umt-010.c
+++ b/drivers/media/dvb/dvb-usb/umt-010.c
@@ -60,14 +60,14 @@ static int umt_mt352_frontend_attach(struct dvb_usb_adapter *adap)
umt_config.demod_init = umt_mt352_demod_init;
umt_config.demod_address = 0xf;
- adap->fe = dvb_attach(mt352_attach, &umt_config, &adap->dev->i2c_adap);
+ adap->fe_adap[0].fe = dvb_attach(mt352_attach, &umt_config, &adap->dev->i2c_adap);
return 0;
}
static int umt_tuner_attach (struct dvb_usb_adapter *adap)
{
- dvb_attach(dvb_pll_attach, adap->fe, 0x61, NULL, DVB_PLL_TUA6034);
+ dvb_attach(dvb_pll_attach, adap->fe_adap[0].fe, 0x61, NULL, DVB_PLL_TUA6034);
return 0;
}
@@ -100,6 +100,8 @@ static struct dvb_usb_device_properties umt_properties = {
.num_adapters = 1,
.adapter = {
{
+ .num_frontends = 1,
+ .fe = {{
.streaming_ctrl = dibusb2_0_streaming_ctrl,
.frontend_attach = umt_mt352_frontend_attach,
.tuner_attach = umt_tuner_attach,
@@ -115,7 +117,7 @@ static struct dvb_usb_device_properties umt_properties = {
}
}
},
-
+ }},
.size_of_priv = sizeof(struct dibusb_state),
}
},
diff --git a/drivers/media/dvb/dvb-usb/usb-urb.c b/drivers/media/dvb/dvb-usb/usb-urb.c
index 86d68933b6b..d62ee0f5a16 100644
--- a/drivers/media/dvb/dvb-usb/usb-urb.c
+++ b/drivers/media/dvb/dvb-usb/usb-urb.c
@@ -148,7 +148,7 @@ static int usb_bulk_urb_init(struct usb_data_stream *stream)
if (!stream->urb_list[i]) {
deb_mem("not enough memory for urb_alloc_urb!.\n");
for (j = 0; j < i; j++)
- usb_free_urb(stream->urb_list[i]);
+ usb_free_urb(stream->urb_list[j]);
return -ENOMEM;
}
usb_fill_bulk_urb( stream->urb_list[i], stream->udev,
@@ -181,7 +181,7 @@ static int usb_isoc_urb_init(struct usb_data_stream *stream)
if (!stream->urb_list[i]) {
deb_mem("not enough memory for urb_alloc_urb!\n");
for (j = 0; j < i; j++)
- usb_free_urb(stream->urb_list[i]);
+ usb_free_urb(stream->urb_list[j]);
return -ENOMEM;
}
diff --git a/drivers/media/dvb/dvb-usb/vp702x.c b/drivers/media/dvb/dvb-usb/vp702x.c
index 54355f84a98..45e31f22481 100644
--- a/drivers/media/dvb/dvb-usb/vp702x.c
+++ b/drivers/media/dvb/dvb-usb/vp702x.c
@@ -320,7 +320,7 @@ static int vp702x_frontend_attach(struct dvb_usb_adapter *adap)
vp702x_init_pid_filter(adap);
- adap->fe = vp702x_fe_attach(adap->dev);
+ adap->fe_adap[0].fe = vp702x_fe_attach(adap->dev);
vp702x_usb_out_op(adap->dev, SET_TUNER_POWER_REQ, 1, 7, NULL, 0);
return 0;
@@ -383,6 +383,8 @@ static struct dvb_usb_device_properties vp702x_properties = {
.num_adapters = 1,
.adapter = {
{
+ .num_frontends = 1,
+ .fe = {{
.caps = DVB_USB_ADAP_RECEIVES_204_BYTE_TS,
.streaming_ctrl = vp702x_streaming_ctrl,
@@ -399,6 +401,7 @@ static struct dvb_usb_device_properties vp702x_properties = {
}
}
},
+ }},
.size_of_priv = sizeof(struct vp702x_adapter_state),
}
},
diff --git a/drivers/media/dvb/dvb-usb/vp7045.c b/drivers/media/dvb/dvb-usb/vp7045.c
index 536c16c943b..90873af5682 100644
--- a/drivers/media/dvb/dvb-usb/vp7045.c
+++ b/drivers/media/dvb/dvb-usb/vp7045.c
@@ -214,7 +214,7 @@ static int vp7045_frontend_attach(struct dvb_usb_adapter *adap)
/* Dump the EEPROM */
/* vp7045_read_eeprom(d,buf, 255, FX2_ID_ADDR); */
- adap->fe = vp7045_fe_attach(adap->dev);
+ adap->fe_adap[0].fe = vp7045_fe_attach(adap->dev);
return 0;
}
@@ -245,6 +245,8 @@ static struct dvb_usb_device_properties vp7045_properties = {
.num_adapters = 1,
.adapter = {
{
+ .num_frontends = 1,
+ .fe = {{
.frontend_attach = vp7045_frontend_attach,
/* parameter for the MPEG2-data transfer */
.stream = {
@@ -257,6 +259,7 @@ static struct dvb_usb_device_properties vp7045_properties = {
}
}
},
+ }},
}
},
.power_ctrl = vp7045_power_ctrl,
diff --git a/drivers/media/dvb/frontends/Kconfig b/drivers/media/dvb/frontends/Kconfig
index 32e08e35152..4a2d2e6c91a 100644
--- a/drivers/media/dvb/frontends/Kconfig
+++ b/drivers/media/dvb/frontends/Kconfig
@@ -236,6 +236,13 @@ config DVB_MB86A16
A DVB-S/DSS Direct Conversion reveiver.
Say Y when you want to support this frontend.
+config DVB_TDA10071
+ tristate "NXP TDA10071"
+ depends on DVB_CORE && I2C
+ default m if DVB_FE_CUSTOMISE
+ help
+ Say Y when you want to support this frontend.
+
comment "DVB-T (terrestrial) frontends"
depends on DVB_CORE
@@ -600,6 +607,16 @@ config DVB_LNBP21
help
An SEC control chips.
+config DVB_LNBP22
+ tristate "LNBP22 SEC controllers"
+ depends on DVB_CORE && I2C
+ default m if DVB_FE_CUSTOMISE
+ help
+ LNB power supply and control voltage
+ regulator chip with step-up converter
+ and I2C interface.
+ Say Y when you want to support this chip.
+
config DVB_ISL6405
tristate "ISL6405 SEC controller"
depends on DVB_CORE && I2C
@@ -621,6 +638,11 @@ config DVB_ISL6423
help
A SEC controller chip from Intersil
+config DVB_A8293
+ tristate "Allegro A8293"
+ depends on DVB_CORE && I2C
+ default m if DVB_FE_CUSTOMISE
+
config DVB_LGS8GL5
tristate "Silicon Legend LGS-8GL5 demodulator (OFDM)"
depends on DVB_CORE && I2C
@@ -661,6 +683,14 @@ config DVB_IX2505V
help
A DVB-S tuner module. Say Y when you want to support this frontend.
+config DVB_IT913X_FE
+ tristate "it913x frontend and it9137 tuner"
+ depends on DVB_CORE && I2C
+ default m if DVB_FE_CUSTOMISE
+ help
+ A DVB-T tuner module.
+ Say Y when you want to support this frontend.
+
comment "Tools to develop new frontends"
config DVB_DUMMY_FE
diff --git a/drivers/media/dvb/frontends/Makefile b/drivers/media/dvb/frontends/Makefile
index 6a6ba053ead..f639f678155 100644
--- a/drivers/media/dvb/frontends/Makefile
+++ b/drivers/media/dvb/frontends/Makefile
@@ -2,8 +2,8 @@
# Makefile for the kernel DVB frontend device drivers.
#
-EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core/
-EXTRA_CFLAGS += -Idrivers/media/common/tuners/
+ccflags-y += -Idrivers/media/dvb/dvb-core/
+ccflags-y += -Idrivers/media/common/tuners/
stb0899-objs = stb0899_drv.o stb0899_algo.o
stv0900-objs = stv0900_core.o stv0900_sw.o
@@ -52,6 +52,7 @@ obj-$(CONFIG_DVB_LGDT330X) += lgdt330x.o
obj-$(CONFIG_DVB_LGDT3305) += lgdt3305.o
obj-$(CONFIG_DVB_CX24123) += cx24123.o
obj-$(CONFIG_DVB_LNBP21) += lnbp21.o
+obj-$(CONFIG_DVB_LNBP22) += lnbp22.o
obj-$(CONFIG_DVB_ISL6405) += isl6405.o
obj-$(CONFIG_DVB_ISL6421) += isl6421.o
obj-$(CONFIG_DVB_TDA10086) += tda10086.o
@@ -91,4 +92,7 @@ obj-$(CONFIG_DVB_STV0367) += stv0367.o
obj-$(CONFIG_DVB_CXD2820R) += cxd2820r.o
obj-$(CONFIG_DVB_DRXK) += drxk.o
obj-$(CONFIG_DVB_TDA18271C2DD) += tda18271c2dd.o
+obj-$(CONFIG_DVB_IT913X_FE) += it913x-fe.o
+obj-$(CONFIG_DVB_A8293) += a8293.o
+obj-$(CONFIG_DVB_TDA10071) += tda10071.o
diff --git a/drivers/media/dvb/frontends/a8293.c b/drivers/media/dvb/frontends/a8293.c
new file mode 100644
index 00000000000..bb56497e940
--- /dev/null
+++ b/drivers/media/dvb/frontends/a8293.c
@@ -0,0 +1,184 @@
+/*
+ * Allegro A8293 SEC driver
+ *
+ * Copyright (C) 2011 Antti Palosaari <crope@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include "dvb_frontend.h"
+#include "a8293.h"
+
+static int debug;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "Turn on/off debugging (default:off).");
+
+#define LOG_PREFIX "a8293"
+
+#undef dbg
+#define dbg(f, arg...) \
+ if (debug) \
+ printk(KERN_INFO LOG_PREFIX": " f "\n" , ## arg)
+#undef err
+#define err(f, arg...) printk(KERN_ERR LOG_PREFIX": " f "\n" , ## arg)
+#undef info
+#define info(f, arg...) printk(KERN_INFO LOG_PREFIX": " f "\n" , ## arg)
+#undef warn
+#define warn(f, arg...) printk(KERN_WARNING LOG_PREFIX": " f "\n" , ## arg)
+
+
+struct a8293_priv {
+ struct i2c_adapter *i2c;
+ const struct a8293_config *cfg;
+ u8 reg[2];
+};
+
+static int a8293_i2c(struct a8293_priv *priv, u8 *val, int len, bool rd)
+{
+ int ret;
+ struct i2c_msg msg[1] = {
+ {
+ .addr = priv->cfg->i2c_addr,
+ .len = len,
+ .buf = val,
+ }
+ };
+
+ if (rd)
+ msg[0].flags = I2C_M_RD;
+ else
+ msg[0].flags = 0;
+
+ ret = i2c_transfer(priv->i2c, msg, 1);
+ if (ret == 1) {
+ ret = 0;
+ } else {
+ warn("i2c failed=%d rd=%d", ret, rd);
+ ret = -EREMOTEIO;
+ }
+
+ return ret;
+}
+
+static int a8293_wr(struct a8293_priv *priv, u8 *val, int len)
+{
+ return a8293_i2c(priv, val, len, 0);
+}
+
+static int a8293_rd(struct a8293_priv *priv, u8 *val, int len)
+{
+ return a8293_i2c(priv, val, len, 1);
+}
+
+static int a8293_set_voltage(struct dvb_frontend *fe,
+ fe_sec_voltage_t fe_sec_voltage)
+{
+ struct a8293_priv *priv = fe->sec_priv;
+ int ret;
+
+ dbg("%s: fe_sec_voltage=%d", __func__, fe_sec_voltage);
+
+ switch (fe_sec_voltage) {
+ case SEC_VOLTAGE_OFF:
+ /* ENB=0 */
+ priv->reg[0] = 0x10;
+ break;
+ case SEC_VOLTAGE_13:
+ /* VSEL0=1, VSEL1=0, VSEL2=0, VSEL3=0, ENB=1*/
+ priv->reg[0] = 0x31;
+ break;
+ case SEC_VOLTAGE_18:
+ /* VSEL0=0, VSEL1=0, VSEL2=0, VSEL3=1, ENB=1*/
+ priv->reg[0] = 0x38;
+ break;
+ default:
+ ret = -EINVAL;
+ goto err;
+ };
+
+ ret = a8293_wr(priv, &priv->reg[0], 1);
+ if (ret)
+ goto err;
+
+ return ret;
+err:
+ dbg("%s: failed=%d", __func__, ret);
+ return ret;
+}
+
+static void a8293_release_sec(struct dvb_frontend *fe)
+{
+ dbg("%s:", __func__);
+
+ a8293_set_voltage(fe, SEC_VOLTAGE_OFF);
+
+ kfree(fe->sec_priv);
+ fe->sec_priv = NULL;
+}
+
+struct dvb_frontend *a8293_attach(struct dvb_frontend *fe,
+ struct i2c_adapter *i2c, const struct a8293_config *cfg)
+{
+ int ret;
+ struct a8293_priv *priv = NULL;
+ u8 buf[2];
+
+ /* allocate memory for the internal priv */
+ priv = kzalloc(sizeof(struct a8293_priv), GFP_KERNEL);
+ if (priv == NULL) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ /* setup the priv */
+ priv->i2c = i2c;
+ priv->cfg = cfg;
+ fe->sec_priv = priv;
+
+ /* check if the SEC is there */
+ ret = a8293_rd(priv, buf, 2);
+ if (ret)
+ goto err;
+
+ /* ENB=0 */
+ priv->reg[0] = 0x10;
+ ret = a8293_wr(priv, &priv->reg[1], 1);
+ if (ret)
+ goto err;
+
+ /* TMODE=0, TGATE=1 */
+ priv->reg[1] = 0x82;
+ ret = a8293_wr(priv, &priv->reg[1], 1);
+ if (ret)
+ goto err;
+
+ info("Allegro A8293 SEC attached.");
+
+ fe->ops.release_sec = a8293_release_sec;
+
+ /* override frontend ops */
+ fe->ops.set_voltage = a8293_set_voltage;
+
+ return fe;
+err:
+ dbg("%s: failed=%d", __func__, ret);
+ kfree(priv);
+ return NULL;
+}
+EXPORT_SYMBOL(a8293_attach);
+
+MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
+MODULE_DESCRIPTION("Allegro A8293 SEC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/common/tuners/tda18212_priv.h b/drivers/media/dvb/frontends/a8293.h
index 9adff9356b7..ed29e5504f7 100644
--- a/drivers/media/common/tuners/tda18212_priv.h
+++ b/drivers/media/dvb/frontends/a8293.h
@@ -1,5 +1,5 @@
/*
- * NXP TDA18212HN silicon tuner driver
+ * Allegro A8293 SEC driver
*
* Copyright (C) 2011 Antti Palosaari <crope@iki.fi>
*
@@ -18,27 +18,24 @@
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
-#ifndef TDA18212_PRIV_H
-#define TDA18212_PRIV_H
+#ifndef A8293_H
+#define A8293_H
-#include "tda18212.h"
-
-#define LOG_PREFIX "tda18212"
-
-#undef dbg
-#define dbg(f, arg...) \
- if (debug) \
- printk(KERN_INFO LOG_PREFIX": " f "\n" , ## arg)
-#undef err
-#define err(f, arg...) printk(KERN_ERR LOG_PREFIX": " f "\n" , ## arg)
-#undef info
-#define info(f, arg...) printk(KERN_INFO LOG_PREFIX": " f "\n" , ## arg)
-#undef warn
-#define warn(f, arg...) printk(KERN_WARNING LOG_PREFIX": " f "\n" , ## arg)
-
-struct tda18212_priv {
- struct tda18212_config *cfg;
- struct i2c_adapter *i2c;
+struct a8293_config {
+ u8 i2c_addr;
};
+#if defined(CONFIG_DVB_A8293) || \
+ (defined(CONFIG_DVB_A8293_MODULE) && defined(MODULE))
+extern struct dvb_frontend *a8293_attach(struct dvb_frontend *fe,
+ struct i2c_adapter *i2c, const struct a8293_config *cfg);
+#else
+static inline struct dvb_frontend *a8293_attach(struct dvb_frontend *fe,
+ struct i2c_adapter *i2c, const struct a8293_config *cfg)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return NULL;
+}
#endif
+
+#endif /* A8293_H */
diff --git a/drivers/media/dvb/frontends/cxd2820r.h b/drivers/media/dvb/frontends/cxd2820r.h
index 2906582dc94..03cab7b547f 100644
--- a/drivers/media/dvb/frontends/cxd2820r.h
+++ b/drivers/media/dvb/frontends/cxd2820r.h
@@ -93,9 +93,6 @@ extern struct dvb_frontend *cxd2820r_attach(
struct i2c_adapter *i2c,
struct dvb_frontend *fe
);
-extern struct i2c_adapter *cxd2820r_get_tuner_i2c_adapter(
- struct dvb_frontend *fe
-);
#else
static inline struct dvb_frontend *cxd2820r_attach(
const struct cxd2820r_config *config,
@@ -106,12 +103,6 @@ static inline struct dvb_frontend *cxd2820r_attach(
printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
return NULL;
}
-static inline struct i2c_adapter *cxd2820r_get_tuner_i2c_adapter(
- struct dvb_frontend *fe
-)
-{
- return NULL;
-}
#endif
diff --git a/drivers/media/dvb/frontends/cxd2820r_c.c b/drivers/media/dvb/frontends/cxd2820r_c.c
index 3c07d400731..b85f5011e34 100644
--- a/drivers/media/dvb/frontends/cxd2820r_c.c
+++ b/drivers/media/dvb/frontends/cxd2820r_c.c
@@ -335,4 +335,3 @@ int cxd2820r_get_tune_settings_c(struct dvb_frontend *fe,
return 0;
}
-
diff --git a/drivers/media/dvb/frontends/cxd2820r_core.c b/drivers/media/dvb/frontends/cxd2820r_core.c
index d416e85589e..036480f967b 100644
--- a/drivers/media/dvb/frontends/cxd2820r_core.c
+++ b/drivers/media/dvb/frontends/cxd2820r_core.c
@@ -727,72 +727,22 @@ static void cxd2820r_release(struct dvb_frontend *fe)
struct cxd2820r_priv *priv = fe->demodulator_priv;
dbg("%s", __func__);
- if (fe->ops.info.type == FE_OFDM) {
- i2c_del_adapter(&priv->tuner_i2c_adapter);
+ if (fe->ops.info.type == FE_OFDM)
kfree(priv);
- }
return;
}
-static u32 cxd2820r_tuner_i2c_func(struct i2c_adapter *adapter)
-{
- return I2C_FUNC_I2C;
-}
-
-static int cxd2820r_tuner_i2c_xfer(struct i2c_adapter *i2c_adap,
- struct i2c_msg msg[], int num)
-{
- struct cxd2820r_priv *priv = i2c_get_adapdata(i2c_adap);
- int ret;
- u8 *obuf = kmalloc(msg[0].len + 2, GFP_KERNEL);
- struct i2c_msg msg2[2] = {
- {
- .addr = priv->cfg.i2c_address,
- .flags = 0,
- .len = msg[0].len + 2,
- .buf = obuf,
- }, {
- .addr = priv->cfg.i2c_address,
- .flags = I2C_M_RD,
- .len = msg[1].len,
- .buf = msg[1].buf,
- }
- };
-
- if (!obuf)
- return -ENOMEM;
-
- obuf[0] = 0x09;
- obuf[1] = (msg[0].addr << 1);
- if (num == 2) { /* I2C read */
- obuf[1] = (msg[0].addr << 1) | I2C_M_RD; /* I2C RD flag */
- msg2[0].len = msg[0].len + 2 - 1; /* '-1' maybe HW bug ? */
- }
- memcpy(&obuf[2], msg[0].buf, msg[0].len);
-
- ret = i2c_transfer(priv->i2c, msg2, num);
- if (ret < 0)
- warn("tuner i2c failed ret:%d", ret);
-
- kfree(obuf);
-
- return ret;
-}
-
-static struct i2c_algorithm cxd2820r_tuner_i2c_algo = {
- .master_xfer = cxd2820r_tuner_i2c_xfer,
- .functionality = cxd2820r_tuner_i2c_func,
-};
-
-struct i2c_adapter *cxd2820r_get_tuner_i2c_adapter(struct dvb_frontend *fe)
+static int cxd2820r_i2c_gate_ctrl(struct dvb_frontend *fe, int enable)
{
struct cxd2820r_priv *priv = fe->demodulator_priv;
- return &priv->tuner_i2c_adapter;
+ dbg("%s: %d", __func__, enable);
+
+ /* Bit 0 of reg 0xdb in bank 0x00 controls I2C repeater */
+ return cxd2820r_wr_reg_mask(priv, 0xdb, enable ? 1 : 0, 0x1);
}
-EXPORT_SYMBOL(cxd2820r_get_tuner_i2c_adapter);
-static struct dvb_frontend_ops cxd2820r_ops[2];
+static const struct dvb_frontend_ops cxd2820r_ops[2];
struct dvb_frontend *cxd2820r_attach(const struct cxd2820r_config *cfg,
struct i2c_adapter *i2c, struct dvb_frontend *fe)
@@ -831,18 +781,6 @@ struct dvb_frontend *cxd2820r_attach(const struct cxd2820r_config *cfg,
priv->fe[0].demodulator_priv = priv;
priv->fe[1].demodulator_priv = priv;
- /* create tuner i2c adapter */
- strlcpy(priv->tuner_i2c_adapter.name,
- "CXD2820R tuner I2C adapter",
- sizeof(priv->tuner_i2c_adapter.name));
- priv->tuner_i2c_adapter.algo = &cxd2820r_tuner_i2c_algo;
- priv->tuner_i2c_adapter.algo_data = NULL;
- i2c_set_adapdata(&priv->tuner_i2c_adapter, priv);
- if (i2c_add_adapter(&priv->tuner_i2c_adapter) < 0) {
- err("tuner I2C bus could not be initialized");
- goto error;
- }
-
return &priv->fe[0];
} else {
@@ -858,7 +796,7 @@ error:
}
EXPORT_SYMBOL(cxd2820r_attach);
-static struct dvb_frontend_ops cxd2820r_ops[2] = {
+static const struct dvb_frontend_ops cxd2820r_ops[2] = {
{
/* DVB-T/T2 */
.info = {
@@ -883,6 +821,7 @@ static struct dvb_frontend_ops cxd2820r_ops[2] = {
.sleep = cxd2820r_sleep,
.get_tune_settings = cxd2820r_get_tune_settings,
+ .i2c_gate_ctrl = cxd2820r_i2c_gate_ctrl,
.get_frontend = cxd2820r_get_frontend,
@@ -911,6 +850,7 @@ static struct dvb_frontend_ops cxd2820r_ops[2] = {
.sleep = cxd2820r_sleep,
.get_tune_settings = cxd2820r_get_tune_settings,
+ .i2c_gate_ctrl = cxd2820r_i2c_gate_ctrl,
.set_frontend = cxd2820r_set_frontend,
.get_frontend = cxd2820r_get_frontend,
diff --git a/drivers/media/dvb/frontends/cxd2820r_priv.h b/drivers/media/dvb/frontends/cxd2820r_priv.h
index 0c0ebc9d5c4..95539134efd 100644
--- a/drivers/media/dvb/frontends/cxd2820r_priv.h
+++ b/drivers/media/dvb/frontends/cxd2820r_priv.h
@@ -50,7 +50,6 @@ struct cxd2820r_priv {
struct i2c_adapter *i2c;
struct dvb_frontend fe[2];
struct cxd2820r_config cfg;
- struct i2c_adapter tuner_i2c_adapter;
struct mutex fe_lock; /* FE lock */
int active_fe:2; /* FE lock, -1=NONE, 0=DVB-T/T2, 1=DVB-C */
diff --git a/drivers/media/dvb/frontends/cxd2820r_t.c b/drivers/media/dvb/frontends/cxd2820r_t.c
index 6582564c930..a04f9c81010 100644
--- a/drivers/media/dvb/frontends/cxd2820r_t.c
+++ b/drivers/media/dvb/frontends/cxd2820r_t.c
@@ -446,4 +446,3 @@ int cxd2820r_get_tune_settings_t(struct dvb_frontend *fe,
return 0;
}
-
diff --git a/drivers/media/dvb/frontends/cxd2820r_t2.c b/drivers/media/dvb/frontends/cxd2820r_t2.c
index c47b35c8acf..6548588309f 100644
--- a/drivers/media/dvb/frontends/cxd2820r_t2.c
+++ b/drivers/media/dvb/frontends/cxd2820r_t2.c
@@ -420,4 +420,3 @@ int cxd2820r_get_tune_settings_t2(struct dvb_frontend *fe,
return 0;
}
-
diff --git a/drivers/media/dvb/frontends/dib0070.c b/drivers/media/dvb/frontends/dib0070.c
index 1d47d4da7d4..dc1cb17a6ea 100644
--- a/drivers/media/dvb/frontends/dib0070.c
+++ b/drivers/media/dvb/frontends/dib0070.c
@@ -27,6 +27,7 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/i2c.h>
+#include <linux/mutex.h>
#include "dvb_frontend.h"
@@ -78,10 +79,18 @@ struct dib0070_state {
struct i2c_msg msg[2];
u8 i2c_write_buffer[3];
u8 i2c_read_buffer[2];
+ struct mutex i2c_buffer_lock;
};
-static uint16_t dib0070_read_reg(struct dib0070_state *state, u8 reg)
+static u16 dib0070_read_reg(struct dib0070_state *state, u8 reg)
{
+ u16 ret;
+
+ if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
+ dprintk("could not acquire lock");
+ return 0;
+ }
+
state->i2c_write_buffer[0] = reg;
memset(state->msg, 0, 2 * sizeof(struct i2c_msg));
@@ -96,13 +105,23 @@ static uint16_t dib0070_read_reg(struct dib0070_state *state, u8 reg)
if (i2c_transfer(state->i2c, state->msg, 2) != 2) {
printk(KERN_WARNING "DiB0070 I2C read failed\n");
- return 0;
- }
- return (state->i2c_read_buffer[0] << 8) | state->i2c_read_buffer[1];
+ ret = 0;
+ } else
+ ret = (state->i2c_read_buffer[0] << 8)
+ | state->i2c_read_buffer[1];
+
+ mutex_unlock(&state->i2c_buffer_lock);
+ return ret;
}
static int dib0070_write_reg(struct dib0070_state *state, u8 reg, u16 val)
{
+ int ret;
+
+ if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
+ dprintk("could not acquire lock");
+ return -EINVAL;
+ }
state->i2c_write_buffer[0] = reg;
state->i2c_write_buffer[1] = val >> 8;
state->i2c_write_buffer[2] = val & 0xff;
@@ -115,9 +134,12 @@ static int dib0070_write_reg(struct dib0070_state *state, u8 reg, u16 val)
if (i2c_transfer(state->i2c, state->msg, 1) != 1) {
printk(KERN_WARNING "DiB0070 I2C write failed\n");
- return -EREMOTEIO;
- }
- return 0;
+ ret = -EREMOTEIO;
+ } else
+ ret = 0;
+
+ mutex_unlock(&state->i2c_buffer_lock);
+ return ret;
}
#define HARD_RESET(state) do { \
@@ -734,6 +756,7 @@ struct dvb_frontend *dib0070_attach(struct dvb_frontend *fe, struct i2c_adapter
state->cfg = cfg;
state->i2c = i2c;
state->fe = fe;
+ mutex_init(&state->i2c_buffer_lock);
fe->tuner_priv = state;
if (dib0070_reset(fe) != 0)
diff --git a/drivers/media/dvb/frontends/dib0090.c b/drivers/media/dvb/frontends/dib0090.c
index c9c935ae41e..b174d1c7858 100644
--- a/drivers/media/dvb/frontends/dib0090.c
+++ b/drivers/media/dvb/frontends/dib0090.c
@@ -27,6 +27,7 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/i2c.h>
+#include <linux/mutex.h>
#include "dvb_frontend.h"
@@ -196,6 +197,7 @@ struct dib0090_state {
struct i2c_msg msg[2];
u8 i2c_write_buffer[3];
u8 i2c_read_buffer[2];
+ struct mutex i2c_buffer_lock;
};
struct dib0090_fw_state {
@@ -208,10 +210,18 @@ struct dib0090_fw_state {
struct i2c_msg msg;
u8 i2c_write_buffer[2];
u8 i2c_read_buffer[2];
+ struct mutex i2c_buffer_lock;
};
static u16 dib0090_read_reg(struct dib0090_state *state, u8 reg)
{
+ u16 ret;
+
+ if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
+ dprintk("could not acquire lock");
+ return 0;
+ }
+
state->i2c_write_buffer[0] = reg;
memset(state->msg, 0, 2 * sizeof(struct i2c_msg));
@@ -226,14 +236,24 @@ static u16 dib0090_read_reg(struct dib0090_state *state, u8 reg)
if (i2c_transfer(state->i2c, state->msg, 2) != 2) {
printk(KERN_WARNING "DiB0090 I2C read failed\n");
- return 0;
- }
+ ret = 0;
+ } else
+ ret = (state->i2c_read_buffer[0] << 8)
+ | state->i2c_read_buffer[1];
- return (state->i2c_read_buffer[0] << 8) | state->i2c_read_buffer[1];
+ mutex_unlock(&state->i2c_buffer_lock);
+ return ret;
}
static int dib0090_write_reg(struct dib0090_state *state, u32 reg, u16 val)
{
+ int ret;
+
+ if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
+ dprintk("could not acquire lock");
+ return -EINVAL;
+ }
+
state->i2c_write_buffer[0] = reg & 0xff;
state->i2c_write_buffer[1] = val >> 8;
state->i2c_write_buffer[2] = val & 0xff;
@@ -246,13 +266,23 @@ static int dib0090_write_reg(struct dib0090_state *state, u32 reg, u16 val)
if (i2c_transfer(state->i2c, state->msg, 1) != 1) {
printk(KERN_WARNING "DiB0090 I2C write failed\n");
- return -EREMOTEIO;
- }
- return 0;
+ ret = -EREMOTEIO;
+ } else
+ ret = 0;
+
+ mutex_unlock(&state->i2c_buffer_lock);
+ return ret;
}
static u16 dib0090_fw_read_reg(struct dib0090_fw_state *state, u8 reg)
{
+ u16 ret;
+
+ if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
+ dprintk("could not acquire lock");
+ return 0;
+ }
+
state->i2c_write_buffer[0] = reg;
memset(&state->msg, 0, sizeof(struct i2c_msg));
@@ -262,13 +292,24 @@ static u16 dib0090_fw_read_reg(struct dib0090_fw_state *state, u8 reg)
state->msg.len = 2;
if (i2c_transfer(state->i2c, &state->msg, 1) != 1) {
printk(KERN_WARNING "DiB0090 I2C read failed\n");
- return 0;
- }
- return (state->i2c_read_buffer[0] << 8) | state->i2c_read_buffer[1];
+ ret = 0;
+ } else
+ ret = (state->i2c_read_buffer[0] << 8)
+ | state->i2c_read_buffer[1];
+
+ mutex_unlock(&state->i2c_buffer_lock);
+ return ret;
}
static int dib0090_fw_write_reg(struct dib0090_fw_state *state, u8 reg, u16 val)
{
+ int ret;
+
+ if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
+ dprintk("could not acquire lock");
+ return -EINVAL;
+ }
+
state->i2c_write_buffer[0] = val >> 8;
state->i2c_write_buffer[1] = val & 0xff;
@@ -279,9 +320,12 @@ static int dib0090_fw_write_reg(struct dib0090_fw_state *state, u8 reg, u16 val)
state->msg.len = 2;
if (i2c_transfer(state->i2c, &state->msg, 1) != 1) {
printk(KERN_WARNING "DiB0090 I2C write failed\n");
- return -EREMOTEIO;
- }
- return 0;
+ ret = -EREMOTEIO;
+ } else
+ ret = 0;
+
+ mutex_unlock(&state->i2c_buffer_lock);
+ return ret;
}
#define HARD_RESET(state) do { if (cfg->reset) { if (cfg->sleep) cfg->sleep(fe, 0); msleep(10); cfg->reset(fe, 1); msleep(10); cfg->reset(fe, 0); msleep(10); } } while (0)
@@ -2440,6 +2484,7 @@ struct dvb_frontend *dib0090_register(struct dvb_frontend *fe, struct i2c_adapte
st->config = config;
st->i2c = i2c;
st->fe = fe;
+ mutex_init(&st->i2c_buffer_lock);
fe->tuner_priv = st;
if (config->wbd == NULL)
@@ -2471,6 +2516,7 @@ struct dvb_frontend *dib0090_fw_register(struct dvb_frontend *fe, struct i2c_ada
st->config = config;
st->i2c = i2c;
st->fe = fe;
+ mutex_init(&st->i2c_buffer_lock);
fe->tuner_priv = st;
if (dib0090_fw_reset_digital(fe, st->config) != 0)
diff --git a/drivers/media/dvb/frontends/dib7000m.c b/drivers/media/dvb/frontends/dib7000m.c
index 79cb1c20df2..dbb76d75c93 100644
--- a/drivers/media/dvb/frontends/dib7000m.c
+++ b/drivers/media/dvb/frontends/dib7000m.c
@@ -11,6 +11,7 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/i2c.h>
+#include <linux/mutex.h>
#include "dvb_frontend.h"
@@ -55,6 +56,7 @@ struct dib7000m_state {
struct i2c_msg msg[2];
u8 i2c_write_buffer[4];
u8 i2c_read_buffer[2];
+ struct mutex i2c_buffer_lock;
};
enum dib7000m_power_mode {
@@ -69,6 +71,13 @@ enum dib7000m_power_mode {
static u16 dib7000m_read_word(struct dib7000m_state *state, u16 reg)
{
+ u16 ret;
+
+ if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
+ dprintk("could not acquire lock");
+ return 0;
+ }
+
state->i2c_write_buffer[0] = (reg >> 8) | 0x80;
state->i2c_write_buffer[1] = reg & 0xff;
@@ -85,11 +94,21 @@ static u16 dib7000m_read_word(struct dib7000m_state *state, u16 reg)
if (i2c_transfer(state->i2c_adap, state->msg, 2) != 2)
dprintk("i2c read error on %d",reg);
- return (state->i2c_read_buffer[0] << 8) | state->i2c_read_buffer[1];
+ ret = (state->i2c_read_buffer[0] << 8) | state->i2c_read_buffer[1];
+ mutex_unlock(&state->i2c_buffer_lock);
+
+ return ret;
}
static int dib7000m_write_word(struct dib7000m_state *state, u16 reg, u16 val)
{
+ int ret;
+
+ if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
+ dprintk("could not acquire lock");
+ return -EINVAL;
+ }
+
state->i2c_write_buffer[0] = (reg >> 8) & 0xff;
state->i2c_write_buffer[1] = reg & 0xff;
state->i2c_write_buffer[2] = (val >> 8) & 0xff;
@@ -101,7 +120,10 @@ static int dib7000m_write_word(struct dib7000m_state *state, u16 reg, u16 val)
state->msg[0].buf = state->i2c_write_buffer;
state->msg[0].len = 4;
- return i2c_transfer(state->i2c_adap, state->msg, 1) != 1 ? -EREMOTEIO : 0;
+ ret = (i2c_transfer(state->i2c_adap, state->msg, 1) != 1 ?
+ -EREMOTEIO : 0);
+ mutex_unlock(&state->i2c_buffer_lock);
+ return ret;
}
static void dib7000m_write_tab(struct dib7000m_state *state, u16 *buf)
{
@@ -1385,6 +1407,7 @@ struct dvb_frontend * dib7000m_attach(struct i2c_adapter *i2c_adap, u8 i2c_addr,
demod = &st->demod;
demod->demodulator_priv = st;
memcpy(&st->demod.ops, &dib7000m_ops, sizeof(struct dvb_frontend_ops));
+ mutex_init(&st->i2c_buffer_lock);
st->timf_default = cfg->bw->timf;
diff --git a/drivers/media/dvb/frontends/dib7000p.c b/drivers/media/dvb/frontends/dib7000p.c
index a64a538ba36..ce8534ff142 100644
--- a/drivers/media/dvb/frontends/dib7000p.c
+++ b/drivers/media/dvb/frontends/dib7000p.c
@@ -10,6 +10,7 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/i2c.h>
+#include <linux/mutex.h>
#include "dvb_math.h"
#include "dvb_frontend.h"
@@ -68,6 +69,7 @@ struct dib7000p_state {
struct i2c_msg msg[2];
u8 i2c_write_buffer[4];
u8 i2c_read_buffer[2];
+ struct mutex i2c_buffer_lock;
};
enum dib7000p_power_mode {
@@ -81,6 +83,13 @@ static int dib7090_set_diversity_in(struct dvb_frontend *fe, int onoff);
static u16 dib7000p_read_word(struct dib7000p_state *state, u16 reg)
{
+ u16 ret;
+
+ if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
+ dprintk("could not acquire lock");
+ return 0;
+ }
+
state->i2c_write_buffer[0] = reg >> 8;
state->i2c_write_buffer[1] = reg & 0xff;
@@ -97,11 +106,20 @@ static u16 dib7000p_read_word(struct dib7000p_state *state, u16 reg)
if (i2c_transfer(state->i2c_adap, state->msg, 2) != 2)
dprintk("i2c read error on %d", reg);
- return (state->i2c_read_buffer[0] << 8) | state->i2c_read_buffer[1];
+ ret = (state->i2c_read_buffer[0] << 8) | state->i2c_read_buffer[1];
+ mutex_unlock(&state->i2c_buffer_lock);
+ return ret;
}
static int dib7000p_write_word(struct dib7000p_state *state, u16 reg, u16 val)
{
+ int ret;
+
+ if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
+ dprintk("could not acquire lock");
+ return -EINVAL;
+ }
+
state->i2c_write_buffer[0] = (reg >> 8) & 0xff;
state->i2c_write_buffer[1] = reg & 0xff;
state->i2c_write_buffer[2] = (val >> 8) & 0xff;
@@ -113,7 +131,10 @@ static int dib7000p_write_word(struct dib7000p_state *state, u16 reg, u16 val)
state->msg[0].buf = state->i2c_write_buffer;
state->msg[0].len = 4;
- return i2c_transfer(state->i2c_adap, state->msg, 1) != 1 ? -EREMOTEIO : 0;
+ ret = (i2c_transfer(state->i2c_adap, state->msg, 1) != 1 ?
+ -EREMOTEIO : 0);
+ mutex_unlock(&state->i2c_buffer_lock);
+ return ret;
}
static void dib7000p_write_tab(struct dib7000p_state *state, u16 * buf)
@@ -1577,8 +1598,8 @@ int dib7000pc_detection(struct i2c_adapter *i2c_adap)
return -ENOMEM;
rx = kzalloc(2*sizeof(u8), GFP_KERNEL);
if (!rx) {
- goto rx_memory_error;
ret = -ENOMEM;
+ goto rx_memory_error;
}
msg[0].buf = tx;
@@ -1646,6 +1667,7 @@ int dib7000p_i2c_enumeration(struct i2c_adapter *i2c, int no_of_demods, u8 defau
return -ENOMEM;
dpst->i2c_adap = i2c;
+ mutex_init(&dpst->i2c_buffer_lock);
for (k = no_of_demods - 1; k >= 0; k--) {
dpst->cfg = cfg[k];
@@ -2324,6 +2346,7 @@ struct dvb_frontend *dib7000p_attach(struct i2c_adapter *i2c_adap, u8 i2c_addr,
demod = &st->demod;
demod->demodulator_priv = st;
memcpy(&st->demod.ops, &dib7000p_ops, sizeof(struct dvb_frontend_ops));
+ mutex_init(&st->i2c_buffer_lock);
dib7000p_write_word(st, 1287, 0x0003); /* sram lead in, rdy */
@@ -2333,8 +2356,9 @@ struct dvb_frontend *dib7000p_attach(struct i2c_adapter *i2c_adap, u8 i2c_addr,
st->version = dib7000p_read_word(st, 897);
/* FIXME: make sure the dev.parent field is initialized, or else
- request_firmware() will hit an OOPS (this should be moved somewhere
- more common) */
+ request_firmware() will hit an OOPS (this should be moved somewhere
+ more common) */
+ st->i2c_master.gated_tuner_i2c_adap.dev.parent = i2c_adap->dev.parent;
/* FIXME: make sure the dev.parent field is initialized, or else
request_firmware() will hit an OOPS (this should be moved somewhere
diff --git a/drivers/media/dvb/frontends/dib8000.c b/drivers/media/dvb/frontends/dib8000.c
index 7d2ea112ae2..fe284d5292f 100644
--- a/drivers/media/dvb/frontends/dib8000.c
+++ b/drivers/media/dvb/frontends/dib8000.c
@@ -10,6 +10,8 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/i2c.h>
+#include <linux/mutex.h>
+
#include "dvb_math.h"
#include "dvb_frontend.h"
@@ -37,6 +39,7 @@ struct i2c_device {
u8 addr;
u8 *i2c_write_buffer;
u8 *i2c_read_buffer;
+ struct mutex *i2c_buffer_lock;
};
struct dib8000_state {
@@ -77,6 +80,7 @@ struct dib8000_state {
struct i2c_msg msg[2];
u8 i2c_write_buffer[4];
u8 i2c_read_buffer[2];
+ struct mutex i2c_buffer_lock;
};
enum dib8000_power_mode {
@@ -86,24 +90,39 @@ enum dib8000_power_mode {
static u16 dib8000_i2c_read16(struct i2c_device *i2c, u16 reg)
{
+ u16 ret;
struct i2c_msg msg[2] = {
- {.addr = i2c->addr >> 1, .flags = 0,
- .buf = i2c->i2c_write_buffer, .len = 2},
- {.addr = i2c->addr >> 1, .flags = I2C_M_RD,
- .buf = i2c->i2c_read_buffer, .len = 2},
+ {.addr = i2c->addr >> 1, .flags = 0, .len = 2},
+ {.addr = i2c->addr >> 1, .flags = I2C_M_RD, .len = 2},
};
+ if (mutex_lock_interruptible(i2c->i2c_buffer_lock) < 0) {
+ dprintk("could not acquire lock");
+ return 0;
+ }
+
+ msg[0].buf = i2c->i2c_write_buffer;
msg[0].buf[0] = reg >> 8;
msg[0].buf[1] = reg & 0xff;
+ msg[1].buf = i2c->i2c_read_buffer;
if (i2c_transfer(i2c->adap, msg, 2) != 2)
dprintk("i2c read error on %d", reg);
- return (msg[1].buf[0] << 8) | msg[1].buf[1];
+ ret = (msg[1].buf[0] << 8) | msg[1].buf[1];
+ mutex_unlock(i2c->i2c_buffer_lock);
+ return ret;
}
static u16 dib8000_read_word(struct dib8000_state *state, u16 reg)
{
+ u16 ret;
+
+ if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
+ dprintk("could not acquire lock");
+ return 0;
+ }
+
state->i2c_write_buffer[0] = reg >> 8;
state->i2c_write_buffer[1] = reg & 0xff;
@@ -120,7 +139,10 @@ static u16 dib8000_read_word(struct dib8000_state *state, u16 reg)
if (i2c_transfer(state->i2c.adap, state->msg, 2) != 2)
dprintk("i2c read error on %d", reg);
- return (state->i2c_read_buffer[0] << 8) | state->i2c_read_buffer[1];
+ ret = (state->i2c_read_buffer[0] << 8) | state->i2c_read_buffer[1];
+ mutex_unlock(&state->i2c_buffer_lock);
+
+ return ret;
}
static u32 dib8000_read32(struct dib8000_state *state, u16 reg)
@@ -135,22 +157,35 @@ static u32 dib8000_read32(struct dib8000_state *state, u16 reg)
static int dib8000_i2c_write16(struct i2c_device *i2c, u16 reg, u16 val)
{
- struct i2c_msg msg = {.addr = i2c->addr >> 1, .flags = 0,
- .buf = i2c->i2c_write_buffer, .len = 4};
+ struct i2c_msg msg = {.addr = i2c->addr >> 1, .flags = 0, .len = 4};
int ret = 0;
+ if (mutex_lock_interruptible(i2c->i2c_buffer_lock) < 0) {
+ dprintk("could not acquire lock");
+ return -EINVAL;
+ }
+
+ msg.buf = i2c->i2c_write_buffer;
msg.buf[0] = (reg >> 8) & 0xff;
msg.buf[1] = reg & 0xff;
msg.buf[2] = (val >> 8) & 0xff;
msg.buf[3] = val & 0xff;
ret = i2c_transfer(i2c->adap, &msg, 1) != 1 ? -EREMOTEIO : 0;
+ mutex_unlock(i2c->i2c_buffer_lock);
return ret;
}
static int dib8000_write_word(struct dib8000_state *state, u16 reg, u16 val)
{
+ int ret;
+
+ if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
+ dprintk("could not acquire lock");
+ return -EINVAL;
+ }
+
state->i2c_write_buffer[0] = (reg >> 8) & 0xff;
state->i2c_write_buffer[1] = reg & 0xff;
state->i2c_write_buffer[2] = (val >> 8) & 0xff;
@@ -162,7 +197,11 @@ static int dib8000_write_word(struct dib8000_state *state, u16 reg, u16 val)
state->msg[0].buf = state->i2c_write_buffer;
state->msg[0].len = 4;
- return i2c_transfer(state->i2c.adap, state->msg, 1) != 1 ? -EREMOTEIO : 0;
+ ret = (i2c_transfer(state->i2c.adap, state->msg, 1) != 1 ?
+ -EREMOTEIO : 0);
+ mutex_unlock(&state->i2c_buffer_lock);
+
+ return ret;
}
static const s16 coeff_2k_sb_1seg_dqpsk[8] = {
@@ -2434,8 +2473,15 @@ int dib8000_i2c_enumeration(struct i2c_adapter *host, int no_of_demods, u8 defau
if (!client.i2c_read_buffer) {
dprintk("%s: not enough memory", __func__);
ret = -ENOMEM;
- goto error_memory;
+ goto error_memory_read;
+ }
+ client.i2c_buffer_lock = kzalloc(sizeof(struct mutex), GFP_KERNEL);
+ if (!client.i2c_buffer_lock) {
+ dprintk("%s: not enough memory", __func__);
+ ret = -ENOMEM;
+ goto error_memory_lock;
}
+ mutex_init(client.i2c_buffer_lock);
for (k = no_of_demods - 1; k >= 0; k--) {
/* designated i2c address */
@@ -2476,8 +2522,10 @@ int dib8000_i2c_enumeration(struct i2c_adapter *host, int no_of_demods, u8 defau
}
error:
+ kfree(client.i2c_buffer_lock);
+error_memory_lock:
kfree(client.i2c_read_buffer);
-error_memory:
+error_memory_read:
kfree(client.i2c_write_buffer);
return ret;
@@ -2581,6 +2629,8 @@ struct dvb_frontend *dib8000_attach(struct i2c_adapter *i2c_adap, u8 i2c_addr, s
state->i2c.addr = i2c_addr;
state->i2c.i2c_write_buffer = state->i2c_write_buffer;
state->i2c.i2c_read_buffer = state->i2c_read_buffer;
+ mutex_init(&state->i2c_buffer_lock);
+ state->i2c.i2c_buffer_lock = &state->i2c_buffer_lock;
state->gpio_val = cfg->gpio_val;
state->gpio_dir = cfg->gpio_dir;
diff --git a/drivers/media/dvb/frontends/dib9000.c b/drivers/media/dvb/frontends/dib9000.c
index a0855883b5c..660f80661ed 100644
--- a/drivers/media/dvb/frontends/dib9000.c
+++ b/drivers/media/dvb/frontends/dib9000.c
@@ -38,6 +38,15 @@ struct i2c_device {
#define DibInitLock(lock) mutex_init(lock)
#define DibFreeLock(lock)
+struct dib9000_pid_ctrl {
+#define DIB9000_PID_FILTER_CTRL 0
+#define DIB9000_PID_FILTER 1
+ u8 cmd;
+ u8 id;
+ u16 pid;
+ u8 onoff;
+};
+
struct dib9000_state {
struct i2c_device i2c;
@@ -99,6 +108,10 @@ struct dib9000_state {
struct i2c_msg msg[2];
u8 i2c_write_buffer[255];
u8 i2c_read_buffer[255];
+ DIB_LOCK demod_lock;
+ u8 get_frontend_internal;
+ struct dib9000_pid_ctrl pid_ctrl[10];
+ s8 pid_ctrl_index; /* -1: empty list; -2: do not use the list */
};
static const u32 fe_info[44] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -1167,8 +1180,8 @@ static int dib9000_fw_get_channel(struct dvb_frontend *fe, struct dvb_frontend_p
DibAcquireLock(&state->platform.risc.mem_mbx_lock);
if (dib9000_fw_memmbx_sync(state, FE_SYNC_CHANNEL) < 0) {
- goto error;
ret = -EIO;
+ goto error;
}
dib9000_risc_mem_read(state, FE_MM_R_CHANNEL_UNION,
@@ -1743,19 +1756,56 @@ EXPORT_SYMBOL(dib9000_set_gpio);
int dib9000_fw_pid_filter_ctrl(struct dvb_frontend *fe, u8 onoff)
{
struct dib9000_state *state = fe->demodulator_priv;
- u16 val = dib9000_read_word(state, 294 + 1) & 0xffef;
+ u16 val;
+ int ret;
+
+ if ((state->pid_ctrl_index != -2) && (state->pid_ctrl_index < 9)) {
+ /* postpone the pid filtering cmd */
+ dprintk("pid filter cmd postpone");
+ state->pid_ctrl_index++;
+ state->pid_ctrl[state->pid_ctrl_index].cmd = DIB9000_PID_FILTER_CTRL;
+ state->pid_ctrl[state->pid_ctrl_index].onoff = onoff;
+ return 0;
+ }
+
+ DibAcquireLock(&state->demod_lock);
+
+ val = dib9000_read_word(state, 294 + 1) & 0xffef;
val |= (onoff & 0x1) << 4;
dprintk("PID filter enabled %d", onoff);
- return dib9000_write_word(state, 294 + 1, val);
+ ret = dib9000_write_word(state, 294 + 1, val);
+ DibReleaseLock(&state->demod_lock);
+ return ret;
+
}
EXPORT_SYMBOL(dib9000_fw_pid_filter_ctrl);
int dib9000_fw_pid_filter(struct dvb_frontend *fe, u8 id, u16 pid, u8 onoff)
{
struct dib9000_state *state = fe->demodulator_priv;
+ int ret;
+
+ if (state->pid_ctrl_index != -2) {
+ /* postpone the pid filtering cmd */
+ dprintk("pid filter postpone");
+ if (state->pid_ctrl_index < 9) {
+ state->pid_ctrl_index++;
+ state->pid_ctrl[state->pid_ctrl_index].cmd = DIB9000_PID_FILTER;
+ state->pid_ctrl[state->pid_ctrl_index].id = id;
+ state->pid_ctrl[state->pid_ctrl_index].pid = pid;
+ state->pid_ctrl[state->pid_ctrl_index].onoff = onoff;
+ } else
+ dprintk("can not add any more pid ctrl cmd");
+ return 0;
+ }
+
+ DibAcquireLock(&state->demod_lock);
dprintk("Index %x, PID %d, OnOff %d", id, pid, onoff);
- return dib9000_write_word(state, 300 + 1 + id, onoff ? (1 << 13) | pid : 0);
+ ret = dib9000_write_word(state, 300 + 1 + id,
+ onoff ? (1 << 13) | pid : 0);
+ DibReleaseLock(&state->demod_lock);
+ return ret;
}
EXPORT_SYMBOL(dib9000_fw_pid_filter);
@@ -1778,6 +1828,7 @@ static void dib9000_release(struct dvb_frontend *demod)
DibFreeLock(&state->platform.risc.mbx_lock);
DibFreeLock(&state->platform.risc.mem_lock);
DibFreeLock(&state->platform.risc.mem_mbx_lock);
+ DibFreeLock(&state->demod_lock);
dibx000_exit_i2c_master(&st->i2c_master);
i2c_del_adapter(&st->tuner_adap);
@@ -1795,14 +1846,19 @@ static int dib9000_sleep(struct dvb_frontend *fe)
{
struct dib9000_state *state = fe->demodulator_priv;
u8 index_frontend;
- int ret;
+ int ret = 0;
+ DibAcquireLock(&state->demod_lock);
for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) {
ret = state->fe[index_frontend]->ops.sleep(state->fe[index_frontend]);
if (ret < 0)
- return ret;
+ goto error;
}
- return dib9000_mbx_send(state, OUT_MSG_FE_SLEEP, NULL, 0);
+ ret = dib9000_mbx_send(state, OUT_MSG_FE_SLEEP, NULL, 0);
+
+error:
+ DibReleaseLock(&state->demod_lock);
+ return ret;
}
static int dib9000_fe_get_tune_settings(struct dvb_frontend *fe, struct dvb_frontend_tune_settings *tune)
@@ -1816,7 +1872,10 @@ static int dib9000_get_frontend(struct dvb_frontend *fe, struct dvb_frontend_par
struct dib9000_state *state = fe->demodulator_priv;
u8 index_frontend, sub_index_frontend;
fe_status_t stat;
- int ret;
+ int ret = 0;
+
+ if (state->get_frontend_internal == 0)
+ DibAcquireLock(&state->demod_lock);
for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) {
state->fe[index_frontend]->ops.read_status(state->fe[index_frontend], &stat);
@@ -1846,14 +1905,15 @@ static int dib9000_get_frontend(struct dvb_frontend *fe, struct dvb_frontend_par
state->fe[index_frontend]->dtv_property_cache.rolloff;
}
}
- return 0;
+ ret = 0;
+ goto return_value;
}
}
/* get the channel from master chip */
ret = dib9000_fw_get_channel(fe, fep);
if (ret != 0)
- return ret;
+ goto return_value;
/* synchronize the cache with the other frontends */
for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) {
@@ -1866,8 +1926,12 @@ static int dib9000_get_frontend(struct dvb_frontend *fe, struct dvb_frontend_par
state->fe[index_frontend]->dtv_property_cache.code_rate_LP = fe->dtv_property_cache.code_rate_LP;
state->fe[index_frontend]->dtv_property_cache.rolloff = fe->dtv_property_cache.rolloff;
}
+ ret = 0;
- return 0;
+return_value:
+ if (state->get_frontend_internal == 0)
+ DibReleaseLock(&state->demod_lock);
+ return ret;
}
static int dib9000_set_tune_state(struct dvb_frontend *fe, enum frontend_tune_state tune_state)
@@ -1912,6 +1976,10 @@ static int dib9000_set_frontend(struct dvb_frontend *fe, struct dvb_frontend_par
dprintk("dib9000: must specify bandwidth ");
return 0;
}
+
+ state->pid_ctrl_index = -1; /* postpone the pid filtering cmd */
+ DibAcquireLock(&state->demod_lock);
+
fe->dtv_property_cache.delivery_system = SYS_DVBT;
/* set the master status */
@@ -1974,13 +2042,18 @@ static int dib9000_set_frontend(struct dvb_frontend *fe, struct dvb_frontend_par
/* check the tune result */
if (exit_condition == 1) { /* tune failed */
dprintk("tune failed");
+ DibReleaseLock(&state->demod_lock);
+ /* tune failed; put all the pid filtering cmd to junk */
+ state->pid_ctrl_index = -1;
return 0;
}
dprintk("tune success on frontend%i", index_frontend_success);
/* synchronize all the channel cache */
+ state->get_frontend_internal = 1;
dib9000_get_frontend(state->fe[0], fep);
+ state->get_frontend_internal = 0;
/* retune the other frontends with the found channel */
channel_status.status = CHANNEL_STATUS_PARAMETERS_SET;
@@ -2025,6 +2098,28 @@ static int dib9000_set_frontend(struct dvb_frontend *fe, struct dvb_frontend_par
/* turn off the diversity for the last frontend */
dib9000_fw_set_diversity_in(state->fe[index_frontend - 1], 0);
+ DibReleaseLock(&state->demod_lock);
+ if (state->pid_ctrl_index >= 0) {
+ u8 index_pid_filter_cmd;
+ u8 pid_ctrl_index = state->pid_ctrl_index;
+
+ state->pid_ctrl_index = -2;
+ for (index_pid_filter_cmd = 0;
+ index_pid_filter_cmd <= pid_ctrl_index;
+ index_pid_filter_cmd++) {
+ if (state->pid_ctrl[index_pid_filter_cmd].cmd == DIB9000_PID_FILTER_CTRL)
+ dib9000_fw_pid_filter_ctrl(state->fe[0],
+ state->pid_ctrl[index_pid_filter_cmd].onoff);
+ else if (state->pid_ctrl[index_pid_filter_cmd].cmd == DIB9000_PID_FILTER)
+ dib9000_fw_pid_filter(state->fe[0],
+ state->pid_ctrl[index_pid_filter_cmd].id,
+ state->pid_ctrl[index_pid_filter_cmd].pid,
+ state->pid_ctrl[index_pid_filter_cmd].onoff);
+ }
+ }
+ /* do not postpone any more the pid filtering */
+ state->pid_ctrl_index = -2;
+
return 0;
}
@@ -2041,6 +2136,7 @@ static int dib9000_read_status(struct dvb_frontend *fe, fe_status_t * stat)
u8 index_frontend;
u16 lock = 0, lock_slave = 0;
+ DibAcquireLock(&state->demod_lock);
for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++)
lock_slave |= dib9000_read_lock(state->fe[index_frontend]);
@@ -2059,6 +2155,8 @@ static int dib9000_read_status(struct dvb_frontend *fe, fe_status_t * stat)
if ((lock & 0x0008) || (lock_slave & 0x0008))
*stat |= FE_HAS_LOCK;
+ DibReleaseLock(&state->demod_lock);
+
return 0;
}
@@ -2066,10 +2164,15 @@ static int dib9000_read_ber(struct dvb_frontend *fe, u32 * ber)
{
struct dib9000_state *state = fe->demodulator_priv;
u16 *c;
+ int ret = 0;
+ DibAcquireLock(&state->demod_lock);
DibAcquireLock(&state->platform.risc.mem_mbx_lock);
- if (dib9000_fw_memmbx_sync(state, FE_SYNC_CHANNEL) < 0)
- return -EIO;
+ if (dib9000_fw_memmbx_sync(state, FE_SYNC_CHANNEL) < 0) {
+ DibReleaseLock(&state->platform.risc.mem_mbx_lock);
+ ret = -EIO;
+ goto error;
+ }
dib9000_risc_mem_read(state, FE_MM_R_FE_MONITOR,
state->i2c_read_buffer, 16 * 2);
DibReleaseLock(&state->platform.risc.mem_mbx_lock);
@@ -2077,7 +2180,10 @@ static int dib9000_read_ber(struct dvb_frontend *fe, u32 * ber)
c = (u16 *)state->i2c_read_buffer;
*ber = c[10] << 16 | c[11];
- return 0;
+
+error:
+ DibReleaseLock(&state->demod_lock);
+ return ret;
}
static int dib9000_read_signal_strength(struct dvb_frontend *fe, u16 * strength)
@@ -2086,7 +2192,9 @@ static int dib9000_read_signal_strength(struct dvb_frontend *fe, u16 * strength)
u8 index_frontend;
u16 *c = (u16 *)state->i2c_read_buffer;
u16 val;
+ int ret = 0;
+ DibAcquireLock(&state->demod_lock);
*strength = 0;
for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) {
state->fe[index_frontend]->ops.read_signal_strength(state->fe[index_frontend], &val);
@@ -2097,8 +2205,10 @@ static int dib9000_read_signal_strength(struct dvb_frontend *fe, u16 * strength)
}
DibAcquireLock(&state->platform.risc.mem_mbx_lock);
- if (dib9000_fw_memmbx_sync(state, FE_SYNC_CHANNEL) < 0)
- return -EIO;
+ if (dib9000_fw_memmbx_sync(state, FE_SYNC_CHANNEL) < 0) {
+ ret = -EIO;
+ goto error;
+ }
dib9000_risc_mem_read(state, FE_MM_R_FE_MONITOR, (u8 *) c, 16 * 2);
DibReleaseLock(&state->platform.risc.mem_mbx_lock);
@@ -2107,7 +2217,10 @@ static int dib9000_read_signal_strength(struct dvb_frontend *fe, u16 * strength)
*strength = 65535;
else
*strength += val;
- return 0;
+
+error:
+ DibReleaseLock(&state->demod_lock);
+ return ret;
}
static u32 dib9000_get_snr(struct dvb_frontend *fe)
@@ -2151,6 +2264,7 @@ static int dib9000_read_snr(struct dvb_frontend *fe, u16 * snr)
u8 index_frontend;
u32 snr_master;
+ DibAcquireLock(&state->demod_lock);
snr_master = dib9000_get_snr(fe);
for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++)
snr_master += dib9000_get_snr(state->fe[index_frontend]);
@@ -2161,6 +2275,8 @@ static int dib9000_read_snr(struct dvb_frontend *fe, u16 * snr)
} else
*snr = 0;
+ DibReleaseLock(&state->demod_lock);
+
return 0;
}
@@ -2168,15 +2284,22 @@ static int dib9000_read_unc_blocks(struct dvb_frontend *fe, u32 * unc)
{
struct dib9000_state *state = fe->demodulator_priv;
u16 *c = (u16 *)state->i2c_read_buffer;
+ int ret = 0;
+ DibAcquireLock(&state->demod_lock);
DibAcquireLock(&state->platform.risc.mem_mbx_lock);
- if (dib9000_fw_memmbx_sync(state, FE_SYNC_CHANNEL) < 0)
- return -EIO;
+ if (dib9000_fw_memmbx_sync(state, FE_SYNC_CHANNEL) < 0) {
+ ret = -EIO;
+ goto error;
+ }
dib9000_risc_mem_read(state, FE_MM_R_FE_MONITOR, (u8 *) c, 16 * 2);
DibReleaseLock(&state->platform.risc.mem_mbx_lock);
*unc = c[12];
- return 0;
+
+error:
+ DibReleaseLock(&state->demod_lock);
+ return ret;
}
int dib9000_i2c_enumeration(struct i2c_adapter *i2c, int no_of_demods, u8 default_addr, u8 first_addr)
@@ -2322,6 +2445,10 @@ struct dvb_frontend *dib9000_attach(struct i2c_adapter *i2c_adap, u8 i2c_addr, c
DibInitLock(&st->platform.risc.mbx_lock);
DibInitLock(&st->platform.risc.mem_lock);
DibInitLock(&st->platform.risc.mem_mbx_lock);
+ DibInitLock(&st->demod_lock);
+ st->get_frontend_internal = 0;
+
+ st->pid_ctrl_index = -2;
st->fe[0] = fe;
fe->demodulator_priv = st;
diff --git a/drivers/media/dvb/frontends/dibx000_common.c b/drivers/media/dvb/frontends/dibx000_common.c
index dc5d17a6757..43be7238311 100644
--- a/drivers/media/dvb/frontends/dibx000_common.c
+++ b/drivers/media/dvb/frontends/dibx000_common.c
@@ -1,4 +1,6 @@
#include <linux/i2c.h>
+#include <linux/mutex.h>
+#include <linux/module.h>
#include "dibx000_common.h"
@@ -10,6 +12,13 @@ MODULE_PARM_DESC(debug, "turn on debugging (default: 0)");
static int dibx000_write_word(struct dibx000_i2c_master *mst, u16 reg, u16 val)
{
+ int ret;
+
+ if (mutex_lock_interruptible(&mst->i2c_buffer_lock) < 0) {
+ dprintk("could not acquire lock");
+ return -EINVAL;
+ }
+
mst->i2c_write_buffer[0] = (reg >> 8) & 0xff;
mst->i2c_write_buffer[1] = reg & 0xff;
mst->i2c_write_buffer[2] = (val >> 8) & 0xff;
@@ -21,11 +30,21 @@ static int dibx000_write_word(struct dibx000_i2c_master *mst, u16 reg, u16 val)
mst->msg[0].buf = mst->i2c_write_buffer;
mst->msg[0].len = 4;
- return i2c_transfer(mst->i2c_adap, mst->msg, 1) != 1 ? -EREMOTEIO : 0;
+ ret = i2c_transfer(mst->i2c_adap, mst->msg, 1) != 1 ? -EREMOTEIO : 0;
+ mutex_unlock(&mst->i2c_buffer_lock);
+
+ return ret;
}
static u16 dibx000_read_word(struct dibx000_i2c_master *mst, u16 reg)
{
+ u16 ret;
+
+ if (mutex_lock_interruptible(&mst->i2c_buffer_lock) < 0) {
+ dprintk("could not acquire lock");
+ return 0;
+ }
+
mst->i2c_write_buffer[0] = reg >> 8;
mst->i2c_write_buffer[1] = reg & 0xff;
@@ -42,7 +61,10 @@ static u16 dibx000_read_word(struct dibx000_i2c_master *mst, u16 reg)
if (i2c_transfer(mst->i2c_adap, mst->msg, 2) != 2)
dprintk("i2c read error on %d", reg);
- return (mst->i2c_read_buffer[0] << 8) | mst->i2c_read_buffer[1];
+ ret = (mst->i2c_read_buffer[0] << 8) | mst->i2c_read_buffer[1];
+ mutex_unlock(&mst->i2c_buffer_lock);
+
+ return ret;
}
static int dibx000_is_i2c_done(struct dibx000_i2c_master *mst)
@@ -257,6 +279,7 @@ static int dibx000_i2c_gated_gpio67_xfer(struct i2c_adapter *i2c_adap,
struct i2c_msg msg[], int num)
{
struct dibx000_i2c_master *mst = i2c_get_adapdata(i2c_adap);
+ int ret;
if (num > 32) {
dprintk("%s: too much I2C message to be transmitted (%i).\
@@ -264,10 +287,15 @@ static int dibx000_i2c_gated_gpio67_xfer(struct i2c_adapter *i2c_adap,
return -ENOMEM;
}
- memset(mst->msg, 0, sizeof(struct i2c_msg) * (2 + num));
-
dibx000_i2c_select_interface(mst, DIBX000_I2C_INTERFACE_GPIO_6_7);
+ if (mutex_lock_interruptible(&mst->i2c_buffer_lock) < 0) {
+ dprintk("could not acquire lock");
+ return -EINVAL;
+ }
+
+ memset(mst->msg, 0, sizeof(struct i2c_msg) * (2 + num));
+
/* open the gate */
dibx000_i2c_gate_ctrl(mst, &mst->i2c_write_buffer[0], msg[0].addr, 1);
mst->msg[0].addr = mst->i2c_addr;
@@ -282,7 +310,11 @@ static int dibx000_i2c_gated_gpio67_xfer(struct i2c_adapter *i2c_adap,
mst->msg[num + 1].buf = &mst->i2c_write_buffer[4];
mst->msg[num + 1].len = 4;
- return i2c_transfer(mst->i2c_adap, mst->msg, 2 + num) == 2 + num ? num : -EIO;
+ ret = (i2c_transfer(mst->i2c_adap, mst->msg, 2 + num) == 2 + num ?
+ num : -EIO);
+
+ mutex_unlock(&mst->i2c_buffer_lock);
+ return ret;
}
static struct i2c_algorithm dibx000_i2c_gated_gpio67_algo = {
@@ -294,6 +326,7 @@ static int dibx000_i2c_gated_tuner_xfer(struct i2c_adapter *i2c_adap,
struct i2c_msg msg[], int num)
{
struct dibx000_i2c_master *mst = i2c_get_adapdata(i2c_adap);
+ int ret;
if (num > 32) {
dprintk("%s: too much I2C message to be transmitted (%i).\
@@ -301,10 +334,14 @@ static int dibx000_i2c_gated_tuner_xfer(struct i2c_adapter *i2c_adap,
return -ENOMEM;
}
- memset(mst->msg, 0, sizeof(struct i2c_msg) * (2 + num));
-
dibx000_i2c_select_interface(mst, DIBX000_I2C_INTERFACE_TUNER);
+ if (mutex_lock_interruptible(&mst->i2c_buffer_lock) < 0) {
+ dprintk("could not acquire lock");
+ return -EINVAL;
+ }
+ memset(mst->msg, 0, sizeof(struct i2c_msg) * (2 + num));
+
/* open the gate */
dibx000_i2c_gate_ctrl(mst, &mst->i2c_write_buffer[0], msg[0].addr, 1);
mst->msg[0].addr = mst->i2c_addr;
@@ -319,7 +356,10 @@ static int dibx000_i2c_gated_tuner_xfer(struct i2c_adapter *i2c_adap,
mst->msg[num + 1].buf = &mst->i2c_write_buffer[4];
mst->msg[num + 1].len = 4;
- return i2c_transfer(mst->i2c_adap, mst->msg, 2 + num) == 2 + num ? num : -EIO;
+ ret = (i2c_transfer(mst->i2c_adap, mst->msg, 2 + num) == 2 + num ?
+ num : -EIO);
+ mutex_unlock(&mst->i2c_buffer_lock);
+ return ret;
}
static struct i2c_algorithm dibx000_i2c_gated_tuner_algo = {
@@ -390,8 +430,18 @@ static int i2c_adapter_init(struct i2c_adapter *i2c_adap,
int dibx000_init_i2c_master(struct dibx000_i2c_master *mst, u16 device_rev,
struct i2c_adapter *i2c_adap, u8 i2c_addr)
{
- u8 tx[4];
- struct i2c_msg m = {.addr = i2c_addr >> 1,.buf = tx,.len = 4 };
+ int ret;
+
+ mutex_init(&mst->i2c_buffer_lock);
+ if (mutex_lock_interruptible(&mst->i2c_buffer_lock) < 0) {
+ dprintk("could not acquire lock");
+ return -EINVAL;
+ }
+ memset(mst->msg, 0, sizeof(struct i2c_msg));
+ mst->msg[0].addr = i2c_addr >> 1;
+ mst->msg[0].flags = 0;
+ mst->msg[0].buf = mst->i2c_write_buffer;
+ mst->msg[0].len = 4;
mst->device_rev = device_rev;
mst->i2c_adap = i2c_adap;
@@ -431,9 +481,12 @@ int dibx000_init_i2c_master(struct dibx000_i2c_master *mst, u16 device_rev,
"DiBX000: could not initialize the master i2c_adapter\n");
/* initialize the i2c-master by closing the gate */
- dibx000_i2c_gate_ctrl(mst, tx, 0, 0);
+ dibx000_i2c_gate_ctrl(mst, mst->i2c_write_buffer, 0, 0);
+
+ ret = (i2c_transfer(i2c_adap, mst->msg, 1) == 1);
+ mutex_unlock(&mst->i2c_buffer_lock);
- return i2c_transfer(i2c_adap, &m, 1) == 1;
+ return ret;
}
EXPORT_SYMBOL(dibx000_init_i2c_master);
diff --git a/drivers/media/dvb/frontends/dibx000_common.h b/drivers/media/dvb/frontends/dibx000_common.h
index f031165c045..5e011474be4 100644
--- a/drivers/media/dvb/frontends/dibx000_common.h
+++ b/drivers/media/dvb/frontends/dibx000_common.h
@@ -33,6 +33,7 @@ struct dibx000_i2c_master {
struct i2c_msg msg[34];
u8 i2c_write_buffer[8];
u8 i2c_read_buffer[2];
+ struct mutex i2c_buffer_lock;
};
extern int dibx000_init_i2c_master(struct dibx000_i2c_master *mst,
diff --git a/drivers/media/dvb/frontends/drxd_hard.c b/drivers/media/dvb/frontends/drxd_hard.c
index 2238bf0be95..88e46f4cdbb 100644
--- a/drivers/media/dvb/frontends/drxd_hard.c
+++ b/drivers/media/dvb/frontends/drxd_hard.c
@@ -889,10 +889,15 @@ static int ReadIFAgc(struct drxd_state *state, u32 * pValue)
u32 R2 = state->if_agc_cfg.R2;
u32 R3 = state->if_agc_cfg.R3;
- u32 Vmax = (3300 * R2) / (R1 + R2);
- u32 Rpar = (R2 * R3) / (R3 + R2);
- u32 Vmin = (3300 * Rpar) / (R1 + Rpar);
- u32 Vout = Vmin + ((Vmax - Vmin) * Value) / 1024;
+ u32 Vmax, Rpar, Vmin, Vout;
+
+ if (R2 == 0 && (R1 == 0 || R3 == 0))
+ return 0;
+
+ Vmax = (3300 * R2) / (R1 + R2);
+ Rpar = (R2 * R3) / (R3 + R2);
+ Vmin = (3300 * Rpar) / (R1 + Rpar);
+ Vout = Vmin + ((Vmax - Vmin) * Value) / 1024;
*pValue = Vout;
}
@@ -926,16 +931,15 @@ static int DownloadMicrocode(struct drxd_state *state,
const u8 *pMCImage, u32 Length)
{
u8 *pSrc;
- u16 Flags;
u32 Address;
u16 nBlocks;
u16 BlockSize;
- u16 BlockCRC;
u32 offset = 0;
int i, status = 0;
pSrc = (u8 *) pMCImage;
- Flags = (pSrc[0] << 8) | pSrc[1];
+ /* We're not using Flags */
+ /* Flags = (pSrc[0] << 8) | pSrc[1]; */
pSrc += sizeof(u16);
offset += sizeof(u16);
nBlocks = (pSrc[0] << 8) | pSrc[1];
@@ -952,11 +956,13 @@ static int DownloadMicrocode(struct drxd_state *state,
pSrc += sizeof(u16);
offset += sizeof(u16);
- Flags = (pSrc[0] << 8) | pSrc[1];
+ /* We're not using Flags */
+ /* u16 Flags = (pSrc[0] << 8) | pSrc[1]; */
pSrc += sizeof(u16);
offset += sizeof(u16);
- BlockCRC = (pSrc[0] << 8) | pSrc[1];
+ /* We're not using BlockCRC */
+ /* u16 BlockCRC = (pSrc[0] << 8) | pSrc[1]; */
pSrc += sizeof(u16);
offset += sizeof(u16);
diff --git a/drivers/media/dvb/frontends/drxk_hard.c b/drivers/media/dvb/frontends/drxk_hard.c
index 41b083820da..f6431ef827d 100644
--- a/drivers/media/dvb/frontends/drxk_hard.c
+++ b/drivers/media/dvb/frontends/drxk_hard.c
@@ -6211,6 +6211,14 @@ static int drxk_set_parameters(struct dvb_frontend *fe,
u32 IF;
dprintk(1, "\n");
+
+ if (!fe->ops.tuner_ops.get_if_frequency) {
+ printk(KERN_ERR
+ "drxk: Error: get_if_frequency() not defined at tuner. Can't work without it!\n");
+ return -EINVAL;
+ }
+
+
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
if (fe->ops.tuner_ops.set_params)
@@ -6218,7 +6226,7 @@ static int drxk_set_parameters(struct dvb_frontend *fe,
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 0);
state->param = *p;
- fe->ops.tuner_ops.get_frequency(fe, &IF);
+ fe->ops.tuner_ops.get_if_frequency(fe, &IF);
Start(state, 0, IF);
/* printk(KERN_DEBUG "drxk: %s IF=%d done\n", __func__, IF); */
diff --git a/drivers/media/dvb/frontends/it913x-fe-priv.h b/drivers/media/dvb/frontends/it913x-fe-priv.h
new file mode 100644
index 00000000000..1c6fb4b6625
--- /dev/null
+++ b/drivers/media/dvb/frontends/it913x-fe-priv.h
@@ -0,0 +1,336 @@
+
+struct it913xset { u32 pro;
+ u32 address;
+ u8 reg[15];
+ u8 count;
+};
+
+struct adctable { u32 adcFrequency;
+ u32 bandwidth;
+ u32 coeff_1_2048;
+ u32 coeff_1_4096;
+ u32 coeff_1_8191;
+ u32 coeff_1_8192;
+ u32 coeff_1_8193;
+ u32 coeff_2_2k;
+ u32 coeff_2_4k;
+ u32 coeff_2_8k;
+ u16 bfsfcw_fftinx_ratio;
+ u16 fftinx_bfsfcw_ratio;
+};
+
+/* clock and coeff tables only table 3 is used with IT9137*/
+/* TODO other tables relate AF9035 may be removed */
+static struct adctable tab1[] = {
+ { 20156250, BANDWIDTH_6_MHZ,
+ 0x02b8ba6e, 0x015c5d37, 0x00ae340d, 0x00ae2e9b, 0x00ae292a,
+ 0x015c5d37, 0x00ae2e9b, 0x0057174e, 0x02f1, 0x015c },
+ { 20156250, BANDWIDTH_7_MHZ,
+ 0x032cd980, 0x01966cc0, 0x00cb3cba, 0x00cb3660, 0x00cb3007,
+ 0x01966cc0, 0x00cb3660, 0x00659b30, 0x0285, 0x0196 },
+ { 20156250, BANDWIDTH_8_MHZ,
+ 0x03a0f893, 0x01d07c49, 0x00e84567, 0x00e83e25, 0x00e836e3,
+ 0x01d07c49, 0x00e83e25, 0x00741f12, 0x0234, 0x01d0 },
+ { 20156250, BANDWIDTH_5_MHZ,
+ 0x02449b5c, 0x01224dae, 0x00912b60, 0x009126d7, 0x0091224e,
+ 0x01224dae, 0x009126d7, 0x0048936b, 0x0387, 0x0122 }
+};
+
+static struct adctable tab2[] = {
+ { 20187500, BANDWIDTH_6_MHZ,
+ 0x02b7a654, 0x015bd32a, 0x00adef04, 0x00ade995, 0x00ade426,
+ 0x015bd32a, 0x00ade995, 0x0056f4ca, 0x02f2, 0x015c },
+ { 20187500, BANDWIDTH_7_MHZ,
+ 0x032b9761, 0x0195cbb1, 0x00caec30, 0x00cae5d8, 0x00cadf81,
+ 0x0195cbb1, 0x00cae5d8, 0x006572ec, 0x0286, 0x0196 },
+ { 20187500, BANDWIDTH_8_MHZ,
+ 0x039f886f, 0x01cfc438, 0x00e7e95b, 0x00e7e21c, 0x00e7dadd,
+ 0x01cfc438, 0x00e7e21c, 0x0073f10e, 0x0235, 0x01d0 },
+ { 20187500, BANDWIDTH_5_MHZ,
+ 0x0243b546, 0x0121daa3, 0x0090f1d9, 0x0090ed51, 0x0090e8ca,
+ 0x0121daa3, 0x0090ed51, 0x004876a9, 0x0388, 0x0122 }
+
+};
+
+static struct adctable tab3[] = {
+ { 20250000, BANDWIDTH_6_MHZ,
+ 0x02b580ad, 0x015ac057, 0x00ad6597, 0x00ad602b, 0x00ad5ac1,
+ 0x015ac057, 0x00ad602b, 0x0056b016, 0x02f4, 0x015b },
+ { 20250000, BANDWIDTH_7_MHZ,
+ 0x03291620, 0x01948b10, 0x00ca4bda, 0x00ca4588, 0x00ca3f36,
+ 0x01948b10, 0x00ca4588, 0x006522c4, 0x0288, 0x0195 },
+ { 20250000, BANDWIDTH_8_MHZ,
+ 0x039cab92, 0x01ce55c9, 0x00e7321e, 0x00e72ae4, 0x00e723ab,
+ 0x01ce55c9, 0x00e72ae4, 0x00739572, 0x0237, 0x01ce },
+ { 20250000, BANDWIDTH_5_MHZ,
+ 0x0241eb3b, 0x0120f59e, 0x00907f53, 0x00907acf, 0x0090764b,
+ 0x0120f59e, 0x00907acf, 0x00483d67, 0x038b, 0x0121 }
+
+};
+
+static struct adctable tab4[] = {
+ { 20583333, BANDWIDTH_6_MHZ,
+ 0x02aa4598, 0x015522cc, 0x00aa96bb, 0x00aa9166, 0x00aa8c12,
+ 0x015522cc, 0x00aa9166, 0x005548b3, 0x0300, 0x0155 },
+ { 20583333, BANDWIDTH_7_MHZ,
+ 0x031bfbdc, 0x018dfdee, 0x00c7052f, 0x00c6fef7, 0x00c6f8bf,
+ 0x018dfdee, 0x00c6fef7, 0x00637f7b, 0x0293, 0x018e },
+ { 20583333, BANDWIDTH_8_MHZ,
+ 0x038db21f, 0x01c6d910, 0x00e373a3, 0x00e36c88, 0x00e3656d,
+ 0x01c6d910, 0x00e36c88, 0x0071b644, 0x0240, 0x01c7 },
+ { 20583333, BANDWIDTH_5_MHZ,
+ 0x02388f54, 0x011c47aa, 0x008e2846, 0x008e23d5, 0x008e1f64,
+ 0x011c47aa, 0x008e23d5, 0x004711ea, 0x039a, 0x011c }
+
+};
+
+static struct adctable tab5[] = {
+ { 20416667, BANDWIDTH_6_MHZ,
+ 0x02afd765, 0x0157ebb3, 0x00abfb39, 0x00abf5d9, 0x00abf07a,
+ 0x0157ebb3, 0x00abf5d9, 0x0055faed, 0x02fa, 0x0158 },
+ { 20416667, BANDWIDTH_7_MHZ,
+ 0x03227b4b, 0x01913da6, 0x00c8a518, 0x00c89ed3, 0x00c8988e,
+ 0x01913da6, 0x00c89ed3, 0x00644f69, 0x028d, 0x0191 },
+ { 20416667, BANDWIDTH_8_MHZ,
+ 0x03951f32, 0x01ca8f99, 0x00e54ef7, 0x00e547cc, 0x00e540a2,
+ 0x01ca8f99, 0x00e547cc, 0x0072a3e6, 0x023c, 0x01cb },
+ { 20416667, BANDWIDTH_5_MHZ,
+ 0x023d337f, 0x011e99c0, 0x008f515a, 0x008f4ce0, 0x008f4865,
+ 0x011e99c0, 0x008f4ce0, 0x0047a670, 0x0393, 0x011f }
+
+};
+
+static struct adctable tab6[] = {
+ { 20480000, BANDWIDTH_6_MHZ,
+ 0x02adb6db, 0x0156db6e, 0x00ab7312, 0x00ab6db7, 0x00ab685c,
+ 0x0156db6e, 0x00ab6db7, 0x0055b6db, 0x02fd, 0x0157 },
+ { 20480000, BANDWIDTH_7_MHZ,
+ 0x03200000, 0x01900000, 0x00c80640, 0x00c80000, 0x00c7f9c0,
+ 0x01900000, 0x00c80000, 0x00640000, 0x028f, 0x0190 },
+ { 20480000, BANDWIDTH_8_MHZ,
+ 0x03924925, 0x01c92492, 0x00e4996e, 0x00e49249, 0x00e48b25,
+ 0x01c92492, 0x00e49249, 0x00724925, 0x023d, 0x01c9 },
+ { 20480000, BANDWIDTH_5_MHZ,
+ 0x023b6db7, 0x011db6db, 0x008edfe5, 0x008edb6e, 0x008ed6f7,
+ 0x011db6db, 0x008edb6e, 0x00476db7, 0x0396, 0x011e }
+};
+
+static struct adctable tab7[] = {
+ { 20500000, BANDWIDTH_6_MHZ,
+ 0x02ad0b99, 0x015685cc, 0x00ab4840, 0x00ab42e6, 0x00ab3d8c,
+ 0x015685cc, 0x00ab42e6, 0x0055a173, 0x02fd, 0x0157 },
+ { 20500000, BANDWIDTH_7_MHZ,
+ 0x031f3832, 0x018f9c19, 0x00c7d44b, 0x00c7ce0c, 0x00c7c7ce,
+ 0x018f9c19, 0x00c7ce0c, 0x0063e706, 0x0290, 0x0190 },
+ { 20500000, BANDWIDTH_8_MHZ,
+ 0x039164cb, 0x01c8b266, 0x00e46056, 0x00e45933, 0x00e45210,
+ 0x01c8b266, 0x00e45933, 0x00722c99, 0x023e, 0x01c9 },
+ { 20500000, BANDWIDTH_5_MHZ,
+ 0x023adeff, 0x011d6f80, 0x008ebc36, 0x008eb7c0, 0x008eb34a,
+ 0x011d6f80, 0x008eb7c0, 0x00475be0, 0x0396, 0x011d }
+
+};
+
+static struct adctable tab8[] = {
+ { 20625000, BANDWIDTH_6_MHZ,
+ 0x02a8e4bd, 0x0154725e, 0x00aa3e81, 0x00aa392f, 0x00aa33de,
+ 0x0154725e, 0x00aa392f, 0x00551c98, 0x0302, 0x0154 },
+ { 20625000, BANDWIDTH_7_MHZ,
+ 0x031a6032, 0x018d3019, 0x00c69e41, 0x00c6980c, 0x00c691d8,
+ 0x018d3019, 0x00c6980c, 0x00634c06, 0x0294, 0x018d },
+ { 20625000, BANDWIDTH_8_MHZ,
+ 0x038bdba6, 0x01c5edd3, 0x00e2fe02, 0x00e2f6ea, 0x00e2efd2,
+ 0x01c5edd3, 0x00e2f6ea, 0x00717b75, 0x0242, 0x01c6 },
+ { 20625000, BANDWIDTH_5_MHZ,
+ 0x02376948, 0x011bb4a4, 0x008ddec1, 0x008dda52, 0x008dd5e3,
+ 0x011bb4a4, 0x008dda52, 0x0046ed29, 0x039c, 0x011c }
+
+};
+
+struct table {
+ u32 xtal;
+ struct adctable *table;
+};
+
+static struct table fe_clockTable[] = {
+ {12000000, tab3}, /* FPGA */
+ {16384000, tab6}, /* 16.38MHz */
+ {20480000, tab6}, /* 20.48MHz */
+ {36000000, tab3}, /* 36.00MHz */
+ {30000000, tab1}, /* 30.00MHz */
+ {26000000, tab4}, /* 26.00MHz */
+ {28000000, tab5}, /* 28.00MHz */
+ {32000000, tab7}, /* 32.00MHz */
+ {34000000, tab2}, /* 34.00MHz */
+ {24000000, tab1}, /* 24.00MHz */
+ {22000000, tab8}, /* 22.00MHz */
+ {12000000, tab3} /* 12.00MHz */
+};
+
+/* fe get */
+fe_code_rate_t fe_code[] = {
+ FEC_1_2,
+ FEC_2_3,
+ FEC_3_4,
+ FEC_5_6,
+ FEC_7_8,
+ FEC_NONE,
+};
+
+fe_guard_interval_t fe_gi[] = {
+ GUARD_INTERVAL_1_32,
+ GUARD_INTERVAL_1_16,
+ GUARD_INTERVAL_1_8,
+ GUARD_INTERVAL_1_4,
+};
+
+fe_hierarchy_t fe_hi[] = {
+ HIERARCHY_NONE,
+ HIERARCHY_1,
+ HIERARCHY_2,
+ HIERARCHY_4,
+};
+
+fe_transmit_mode_t fe_mode[] = {
+ TRANSMISSION_MODE_2K,
+ TRANSMISSION_MODE_8K,
+ TRANSMISSION_MODE_4K,
+};
+
+fe_modulation_t fe_con[] = {
+ QPSK,
+ QAM_16,
+ QAM_64,
+};
+
+/* Standard demodulator functions */
+static struct it913xset set_solo_fe[] = {
+ {PRO_LINK, DVBT_INTEN, {0x04}, 0x01},
+ {PRO_LINK, DVBT_ENABLE, {0x05}, 0x01},
+ {PRO_DMOD, MP2IF_MPEG_PAR_MODE, {0x00}, 0x01},
+ {PRO_LINK, HOSTB_MPEG_SER_MODE, {0x00}, 0x01},
+ {PRO_LINK, HOSTB_MPEG_PAR_MODE, {0x00}, 0x01},
+ {PRO_DMOD, DCA_UPPER_CHIP, {0x00}, 0x01},
+ {PRO_LINK, HOSTB_DCA_UPPER, {0x00}, 0x01},
+ {PRO_DMOD, DCA_LOWER_CHIP, {0x00}, 0x01},
+ {PRO_LINK, HOSTB_DCA_LOWER, {0x00}, 0x01},
+ {PRO_DMOD, DCA_PLATCH, {0x00}, 0x01},
+ {PRO_DMOD, DCA_FPGA_LATCH, {0x00}, 0x01},
+ {PRO_DMOD, DCA_STAND_ALONE, {0x01}, 0x01},
+ {PRO_DMOD, DCA_ENABLE, {0x00}, 0x01},
+ {PRO_DMOD, MP2IF_MPEG_PAR_MODE, {0x00}, 0x01},
+ {PRO_DMOD, BFS_FCW, {0x00, 0x00, 0x00}, 0x03},
+ {0xff, 0x0000, {0x00}, 0x00}, /* Terminating Entry */
+};
+
+
+static struct it913xset init_1[] = {
+ {PRO_LINK, LOCK3_OUT, {0x01}, 0x01},
+ {PRO_LINK, PADMISCDRSR, {0x01}, 0x01},
+ {PRO_LINK, PADMISCDR2, {0x00}, 0x01},
+ {PRO_LINK, PADMISCDR4, {0x00}, 0x01}, /* Power up */
+ {PRO_LINK, PADMISCDR8, {0x00}, 0x01},
+ {0xff, 0x0000, {0x00}, 0x00} /* Terminating Entry */
+};
+
+/* ---------IT9137 0x38 tuner init---------- */
+static struct it913xset it9137_set[] = {
+ {PRO_DMOD, 0x0043, {0x00}, 0x01},
+ {PRO_DMOD, 0x0046, {0x38}, 0x01},
+ {PRO_DMOD, 0x0051, {0x01}, 0x01},
+ {PRO_DMOD, 0x005f, {0x00, 0x00}, 0x02},
+ {PRO_DMOD, 0x0068, {0x0a}, 0x01},
+ {PRO_DMOD, 0x0070, {0x0a, 0x05, 0x02}, 0x03},
+ {PRO_DMOD, 0x0075, {0x8c, 0x8c, 0x8c, 0xc8, 0x01}, 0x05},
+ {PRO_DMOD, 0x007e, {0x04, 0x00}, 0x02},
+ {PRO_DMOD, 0x0081, { 0x0a, 0x12, 0x02, 0x0a, 0x03, 0xc8, 0xb8,
+ 0xd0, 0xc3, 0x01 }, 0x0a},
+ {PRO_DMOD, 0x008e, {0x01}, 0x01},
+ {PRO_DMOD, 0x0092, {0x06, 0x00, 0x00, 0x00, 0x00}, 0x05},
+ {PRO_DMOD, 0x0099, {0x01}, 0x01},
+ {PRO_DMOD, 0x009b, {0x3c, 0x28}, 0x02},
+ {PRO_DMOD, 0x009f, {0xe1, 0xcf}, 0x02},
+ {PRO_DMOD, 0x00a3, {0x01, 0x5a, 0x01, 0x01}, 0x04},
+ {PRO_DMOD, 0x00a9, {0x00, 0x01}, 0x02},
+ {PRO_DMOD, 0x00b0, {0x01}, 0x01},
+ {PRO_DMOD, 0x00b3, {0x02, 0x32}, 0x02},
+ {PRO_DMOD, 0x00b6, {0x14}, 0x01},
+ {PRO_DMOD, 0x00c0, {0x11, 0x00, 0x05}, 0x03},
+ {PRO_DMOD, 0x00c4, {0x00}, 0x01},
+ {PRO_DMOD, 0x00c6, {0x19, 0x00}, 0x02},
+ {PRO_DMOD, 0x00cc, {0x2e, 0x51, 0x33}, 0x03},
+ {PRO_DMOD, 0x00f3, {0x05, 0x8c, 0x8c}, 0x03},
+ {PRO_DMOD, 0x00f8, {0x03, 0x06, 0x06}, 0x03},
+ {PRO_DMOD, 0x00fc, { 0x02, 0x02, 0x02, 0x09, 0x50, 0x7b, 0x77,
+ 0x00, 0x02, 0xc8, 0x05, 0x7b }, 0x0c},
+ {PRO_DMOD, 0x0109, {0x02}, 0x01},
+ {PRO_DMOD, 0x0115, {0x0a, 0x03}, 0x02},
+ {PRO_DMOD, 0x011a, {0xc8, 0x7b, 0xbc, 0xa0}, 0x04},
+ {PRO_DMOD, 0x0122, {0x02, 0x18, 0xc3}, 0x03},
+ {PRO_DMOD, 0x0127, {0x00, 0x07}, 0x02},
+ {PRO_DMOD, 0x012a, {0x53, 0x51, 0x4e, 0x43}, 0x04},
+ {PRO_DMOD, 0x0137, {0x01, 0x00, 0x07, 0x00, 0x06}, 0x05},
+ {PRO_DMOD, 0x013d, {0x00, 0x01, 0x5b, 0xc8}, 0x04},
+ {PRO_DMOD, 0xf130, {0x04}, 0x01},
+ {PRO_DMOD, 0xf132, {0x04}, 0x01},
+ {PRO_DMOD, 0xf144, {0x1a}, 0x01},
+ {PRO_DMOD, 0xf146, {0x00}, 0x01},
+ {PRO_DMOD, 0xf14a, {0x01}, 0x01},
+ {PRO_DMOD, 0xf14c, {0x00, 0x00}, 0x02},
+ {PRO_DMOD, 0xf14f, {0x04}, 0x01},
+ {PRO_DMOD, 0xf158, {0x7f}, 0x01},
+ {PRO_DMOD, 0xf15a, {0x00, 0x08}, 0x02},
+ {PRO_DMOD, 0xf15d, {0x03, 0x05}, 0x02},
+ {PRO_DMOD, 0xf163, {0x05}, 0x01},
+ {PRO_DMOD, 0xf166, {0x01, 0x40, 0x0f}, 0x03},
+ {PRO_DMOD, 0xf17a, {0x00, 0x00}, 0x02},
+ {PRO_DMOD, 0xf183, {0x01}, 0x01},
+ {PRO_DMOD, 0xf19d, {0x40}, 0x01},
+ {PRO_DMOD, 0xf1bc, {0x36, 0x00}, 0x02},
+ {PRO_DMOD, 0xf1cb, {0xa0, 0x01}, 0x02},
+ {PRO_DMOD, 0xf204, {0x10}, 0x01},
+ {PRO_DMOD, 0xf214, {0x00}, 0x01},
+ {PRO_DMOD, 0xf24c, {0x88, 0x95, 0x9a, 0x90}, 0x04},
+ {PRO_DMOD, 0xf25a, {0x07, 0xe8, 0x03, 0xb0, 0x04}, 0x05},
+ {PRO_DMOD, 0xf270, {0x01, 0x02, 0x01, 0x02}, 0x04},
+ {PRO_DMOD, 0xf40e, {0x0a, 0x40, 0x08}, 0x03},
+ {PRO_DMOD, 0xf55f, {0x0a}, 0x01},
+ {PRO_DMOD, 0xf561, {0x15, 0x20}, 0x02},
+ {PRO_DMOD, 0xf5df, {0xfb, 0x00}, 0x02},
+ {PRO_DMOD, 0xf5e3, {0x09, 0x01, 0x01}, 0x03},
+ {PRO_DMOD, 0xf5f8, {0x01}, 0x01},
+ {PRO_DMOD, 0xf5fd, {0x01}, 0x01},
+ {PRO_DMOD, 0xf600, { 0x05, 0x08, 0x0b, 0x0e, 0x11, 0x14, 0x17,
+ 0x1f }, 0x08},
+ {PRO_DMOD, 0xf60e, {0x00, 0x04, 0x32, 0x10}, 0x04},
+ {PRO_DMOD, 0xf707, {0xfc, 0x00, 0x37, 0x00}, 0x04},
+ {PRO_DMOD, 0xf78b, {0x01}, 0x01},
+ {PRO_DMOD, 0xf80f, {0x40, 0x54, 0x5a}, 0x03},
+ {PRO_DMOD, 0xf905, {0x01}, 0x01},
+ {PRO_DMOD, 0xfb06, {0x03}, 0x01},
+ {PRO_DMOD, 0xfd8b, {0x00}, 0x01},
+ {PRO_LINK, GPIOH5_EN, {0x01}, 0x01},
+ {PRO_LINK, GPIOH5_ON, {0x01}, 0x01},
+ {PRO_LINK, GPIOH5_O, {0x00}, 0x01},
+ {PRO_LINK, GPIOH5_O, {0x01}, 0x01},
+ {0xff, 0x0000, {0x00}, 0x00}, /* Terminating Entry */
+};
+
+static struct it913xset it9137_tuner_off[] = {
+ {PRO_DMOD, 0xfba8, {0x01}, 0x01}, /* Tuner Clock Off */
+ {PRO_DMOD, 0xec40, {0x00}, 0x01}, /* Power Down Tuner */
+ {PRO_DMOD, 0xec02, {0x3f, 0x1f, 0x3f, 0x3f}, 0x04},
+ {PRO_DMOD, 0xec3f, {0x01}, 0x01},
+ {0xff, 0x0000, {0x00}, 0x00}, /* Terminating Entry */
+};
+
+static struct it913xset set_it9137_template[] = {
+ {PRO_DMOD, 0xee06, {0x00}, 0x01},
+ {PRO_DMOD, 0xec56, {0x00}, 0x01},
+ {PRO_DMOD, 0xec4c, {0x00}, 0x01},
+ {PRO_DMOD, 0xec4d, {0x00}, 0x01},
+ {PRO_DMOD, 0xec4e, {0x00}, 0x01},
+ {PRO_DMOD, 0xec4f, {0x00}, 0x01},
+ {PRO_DMOD, 0xec50, {0x00}, 0x01},
+ {0xff, 0x0000, {0x00}, 0x00}, /* Terminating Entry */
+};
diff --git a/drivers/media/dvb/frontends/it913x-fe.c b/drivers/media/dvb/frontends/it913x-fe.c
new file mode 100644
index 00000000000..d4bd24eb470
--- /dev/null
+++ b/drivers/media/dvb/frontends/it913x-fe.c
@@ -0,0 +1,839 @@
+/*
+ * Driver for it913x-fe Frontend
+ *
+ * with support for on chip it9137 integral tuner
+ *
+ * Copyright (C) 2011 Malcolm Priestley (tvboxspy@gmail.com)
+ * IT9137 Copyright (C) ITE Tech Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ *
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.=
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include "dvb_frontend.h"
+#include "it913x-fe.h"
+#include "it913x-fe-priv.h"
+
+static int it913x_debug;
+
+module_param_named(debug, it913x_debug, int, 0644);
+MODULE_PARM_DESC(debug, "set debugging level (1=info (or-able)).");
+
+#define dprintk(level, args...) do { \
+ if (level & it913x_debug) \
+ printk(KERN_DEBUG "it913x-fe: " args); \
+} while (0)
+
+#define deb_info(args...) dprintk(0x01, args)
+#define debug_data_snipet(level, name, p) \
+ dprintk(level, name" (%02x%02x%02x%02x%02x%02x%02x%02x)", \
+ *p, *(p+1), *(p+2), *(p+3), *(p+4), \
+ *(p+5), *(p+6), *(p+7));
+
+struct it913x_fe_state {
+ struct dvb_frontend frontend;
+ struct i2c_adapter *i2c_adap;
+ u8 i2c_addr;
+ u32 frequency;
+ u8 adf;
+ u32 crystalFrequency;
+ u32 adcFrequency;
+ u8 tuner_type;
+ struct adctable *table;
+ fe_status_t it913x_status;
+ u16 tun_xtal;
+ u8 tun_fdiv;
+ u8 tun_clk_mode;
+ u32 tun_fn_min;
+};
+
+static int it913x_read_reg(struct it913x_fe_state *state,
+ u32 reg, u8 *data, u8 count)
+{
+ int ret;
+ u8 pro = PRO_DMOD; /* All reads from demodulator */
+ u8 b[4];
+ struct i2c_msg msg[2] = {
+ { .addr = state->i2c_addr + (pro << 1), .flags = 0,
+ .buf = b, .len = sizeof(b) },
+ { .addr = state->i2c_addr + (pro << 1), .flags = I2C_M_RD,
+ .buf = data, .len = count }
+ };
+ b[0] = (u8) reg >> 24;
+ b[1] = (u8)(reg >> 16) & 0xff;
+ b[2] = (u8)(reg >> 8) & 0xff;
+ b[3] = (u8) reg & 0xff;
+
+ ret = i2c_transfer(state->i2c_adap, msg, 2);
+
+ return ret;
+}
+
+static int it913x_read_reg_u8(struct it913x_fe_state *state, u32 reg)
+{
+ int ret;
+ u8 b[1];
+ ret = it913x_read_reg(state, reg, &b[0], sizeof(b));
+ return (ret < 0) ? -ENODEV : b[0];
+}
+
+static int it913x_write(struct it913x_fe_state *state,
+ u8 pro, u32 reg, u8 buf[], u8 count)
+{
+ u8 b[256];
+ struct i2c_msg msg[1] = {
+ { .addr = state->i2c_addr + (pro << 1), .flags = 0,
+ .buf = b, .len = count + 4 }
+ };
+ int ret;
+
+ b[0] = (u8) reg >> 24;
+ b[1] = (u8)(reg >> 16) & 0xff;
+ b[2] = (u8)(reg >> 8) & 0xff;
+ b[3] = (u8) reg & 0xff;
+ memcpy(&b[4], buf, count);
+
+ ret = i2c_transfer(state->i2c_adap, msg, 1);
+
+ if (ret < 0)
+ return -EIO;
+
+ return 0;
+}
+
+static int it913x_write_reg(struct it913x_fe_state *state,
+ u8 pro, u32 reg, u32 data)
+{
+ int ret;
+ u8 b[4];
+ u8 s;
+
+ b[0] = data >> 24;
+ b[1] = (data >> 16) & 0xff;
+ b[2] = (data >> 8) & 0xff;
+ b[3] = data & 0xff;
+ /* expand write as needed */
+ if (data < 0x100)
+ s = 3;
+ else if (data < 0x1000)
+ s = 2;
+ else if (data < 0x100000)
+ s = 1;
+ else
+ s = 0;
+
+ ret = it913x_write(state, pro, reg, &b[s], sizeof(b) - s);
+
+ return ret;
+}
+
+static int it913x_fe_script_loader(struct it913x_fe_state *state,
+ struct it913xset *loadscript)
+{
+ int ret, i;
+ if (loadscript == NULL)
+ return -EINVAL;
+
+ for (i = 0; i < 1000; ++i) {
+ if (loadscript[i].pro == 0xff)
+ break;
+ ret = it913x_write(state, loadscript[i].pro,
+ loadscript[i].address,
+ loadscript[i].reg, loadscript[i].count);
+ if (ret < 0)
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static int it913x_init_tuner(struct it913x_fe_state *state)
+{
+ int ret, i, reg;
+ u8 val, nv_val;
+ u8 nv[] = {48, 32, 24, 16, 12, 8, 6, 4, 2};
+ u8 b[2];
+
+ reg = it913x_read_reg_u8(state, 0xec86);
+ switch (reg) {
+ case 0:
+ state->tun_clk_mode = reg;
+ state->tun_xtal = 2000;
+ state->tun_fdiv = 3;
+ val = 16;
+ break;
+ case -ENODEV:
+ return -ENODEV;
+ case 1:
+ default:
+ state->tun_clk_mode = reg;
+ state->tun_xtal = 640;
+ state->tun_fdiv = 1;
+ val = 6;
+ break;
+ }
+
+ reg = it913x_read_reg_u8(state, 0xed03);
+
+ if (reg < 0)
+ return -ENODEV;
+ else if (reg < sizeof(nv))
+ nv_val = nv[reg];
+ else
+ nv_val = 2;
+
+ for (i = 0; i < 50; i++) {
+ ret = it913x_read_reg(state, 0xed23, &b[0], sizeof(b));
+ reg = (b[1] << 8) + b[0];
+ if (reg > 0)
+ break;
+ if (ret < 0)
+ return -ENODEV;
+ udelay(2000);
+ }
+ state->tun_fn_min = state->tun_xtal * reg;
+ state->tun_fn_min /= (state->tun_fdiv * nv_val);
+ deb_info("Tuner fn_min %d", state->tun_fn_min);
+
+ for (i = 0; i < 50; i++) {
+ reg = it913x_read_reg_u8(state, 0xec82);
+ if (reg > 0)
+ break;
+ if (reg < 0)
+ return -ENODEV;
+ udelay(2000);
+ }
+
+ return it913x_write_reg(state, PRO_DMOD, 0xed81, val);
+}
+
+static int it9137_set_tuner(struct it913x_fe_state *state,
+ enum fe_bandwidth bandwidth, u32 frequency_m)
+{
+ struct it913xset *set_tuner = set_it9137_template;
+ int ret, reg;
+ u32 frequency = frequency_m / 1000;
+ u32 freq, temp_f, tmp;
+ u16 iqik_m_cal;
+ u16 n_div;
+ u8 n;
+ u8 l_band;
+ u8 lna_band;
+ u8 bw;
+
+ deb_info("Tuner Frequency %d Bandwidth %d", frequency, bandwidth);
+
+ if (frequency >= 51000 && frequency <= 440000) {
+ l_band = 0;
+ lna_band = 0;
+ } else if (frequency > 440000 && frequency <= 484000) {
+ l_band = 1;
+ lna_band = 1;
+ } else if (frequency > 484000 && frequency <= 533000) {
+ l_band = 1;
+ lna_band = 2;
+ } else if (frequency > 533000 && frequency <= 587000) {
+ l_band = 1;
+ lna_band = 3;
+ } else if (frequency > 587000 && frequency <= 645000) {
+ l_band = 1;
+ lna_band = 4;
+ } else if (frequency > 645000 && frequency <= 710000) {
+ l_band = 1;
+ lna_band = 5;
+ } else if (frequency > 710000 && frequency <= 782000) {
+ l_band = 1;
+ lna_band = 6;
+ } else if (frequency > 782000 && frequency <= 860000) {
+ l_band = 1;
+ lna_band = 7;
+ } else if (frequency > 1450000 && frequency <= 1492000) {
+ l_band = 1;
+ lna_band = 0;
+ } else if (frequency > 1660000 && frequency <= 1685000) {
+ l_band = 1;
+ lna_band = 1;
+ } else
+ return -EINVAL;
+ set_tuner[0].reg[0] = lna_band;
+
+ if (bandwidth == BANDWIDTH_5_MHZ)
+ bw = 0;
+ else if (bandwidth == BANDWIDTH_6_MHZ)
+ bw = 2;
+ else if (bandwidth == BANDWIDTH_7_MHZ)
+ bw = 4;
+ else if (bandwidth == BANDWIDTH_8_MHZ)
+ bw = 6;
+ else
+ bw = 6;
+
+ set_tuner[1].reg[0] = bw;
+ set_tuner[2].reg[0] = 0xa0 | (l_band << 3);
+
+ if (frequency > 53000 && frequency <= 74000) {
+ n_div = 48;
+ n = 0;
+ } else if (frequency > 74000 && frequency <= 111000) {
+ n_div = 32;
+ n = 1;
+ } else if (frequency > 111000 && frequency <= 148000) {
+ n_div = 24;
+ n = 2;
+ } else if (frequency > 148000 && frequency <= 222000) {
+ n_div = 16;
+ n = 3;
+ } else if (frequency > 222000 && frequency <= 296000) {
+ n_div = 12;
+ n = 4;
+ } else if (frequency > 296000 && frequency <= 445000) {
+ n_div = 8;
+ n = 5;
+ } else if (frequency > 445000 && frequency <= state->tun_fn_min) {
+ n_div = 6;
+ n = 6;
+ } else if (frequency > state->tun_fn_min && frequency <= 950000) {
+ n_div = 4;
+ n = 7;
+ } else if (frequency > 1450000 && frequency <= 1680000) {
+ n_div = 2;
+ n = 0;
+ } else
+ return -EINVAL;
+
+ reg = it913x_read_reg_u8(state, 0xed81);
+ iqik_m_cal = (u16)reg * n_div;
+
+ if (reg < 0x20) {
+ if (state->tun_clk_mode == 0)
+ iqik_m_cal = (iqik_m_cal * 9) >> 5;
+ else
+ iqik_m_cal >>= 1;
+ } else {
+ iqik_m_cal = 0x40 - iqik_m_cal;
+ if (state->tun_clk_mode == 0)
+ iqik_m_cal = ~((iqik_m_cal * 9) >> 5);
+ else
+ iqik_m_cal = ~(iqik_m_cal >> 1);
+ }
+
+ temp_f = frequency * (u32)n_div * (u32)state->tun_fdiv;
+ freq = temp_f / state->tun_xtal;
+ tmp = freq * state->tun_xtal;
+
+ if ((temp_f - tmp) >= (state->tun_xtal >> 1))
+ freq++;
+
+ freq += (u32) n << 13;
+ /* Frequency OMEGA_IQIK_M_CAL_MID*/
+ temp_f = freq + (u32)iqik_m_cal;
+
+ set_tuner[3].reg[0] = temp_f & 0xff;
+ set_tuner[4].reg[0] = (temp_f >> 8) & 0xff;
+
+ deb_info("High Frequency = %04x", temp_f);
+
+ /* Lower frequency */
+ set_tuner[5].reg[0] = freq & 0xff;
+ set_tuner[6].reg[0] = (freq >> 8) & 0xff;
+
+ deb_info("low Frequency = %04x", freq);
+
+ ret = it913x_fe_script_loader(state, set_tuner);
+
+ return (ret < 0) ? -ENODEV : 0;
+}
+
+static int it913x_fe_select_bw(struct it913x_fe_state *state,
+ enum fe_bandwidth bandwidth, u32 adcFrequency)
+{
+ int ret, i;
+ u8 buffer[256];
+ u32 coeff[8];
+ u16 bfsfcw_fftinx_ratio;
+ u16 fftinx_bfsfcw_ratio;
+ u8 count;
+ u8 bw;
+ u8 adcmultiplier;
+
+ deb_info("Bandwidth %d Adc %d", bandwidth, adcFrequency);
+
+ if (bandwidth == BANDWIDTH_5_MHZ)
+ bw = 3;
+ else if (bandwidth == BANDWIDTH_6_MHZ)
+ bw = 0;
+ else if (bandwidth == BANDWIDTH_7_MHZ)
+ bw = 1;
+ else if (bandwidth == BANDWIDTH_8_MHZ)
+ bw = 2;
+ else
+ bw = 2;
+
+ ret = it913x_write_reg(state, PRO_DMOD, REG_BW, bw);
+
+ if (state->table == NULL)
+ return -EINVAL;
+
+ /* In write order */
+ coeff[0] = state->table[bw].coeff_1_2048;
+ coeff[1] = state->table[bw].coeff_2_2k;
+ coeff[2] = state->table[bw].coeff_1_8191;
+ coeff[3] = state->table[bw].coeff_1_8192;
+ coeff[4] = state->table[bw].coeff_1_8193;
+ coeff[5] = state->table[bw].coeff_2_8k;
+ coeff[6] = state->table[bw].coeff_1_4096;
+ coeff[7] = state->table[bw].coeff_2_4k;
+ bfsfcw_fftinx_ratio = state->table[bw].bfsfcw_fftinx_ratio;
+ fftinx_bfsfcw_ratio = state->table[bw].fftinx_bfsfcw_ratio;
+
+ /* ADC multiplier */
+ ret = it913x_read_reg_u8(state, ADC_X_2);
+ if (ret < 0)
+ return -EINVAL;
+
+ adcmultiplier = ret;
+
+ count = 0;
+
+ /* Build Buffer for COEFF Registers */
+ for (i = 0; i < 8; i++) {
+ if (adcmultiplier == 1)
+ coeff[i] /= 2;
+ buffer[count++] = (coeff[i] >> 24) & 0x3;
+ buffer[count++] = (coeff[i] >> 16) & 0xff;
+ buffer[count++] = (coeff[i] >> 8) & 0xff;
+ buffer[count++] = coeff[i] & 0xff;
+ }
+
+ /* bfsfcw_fftinx_ratio register 0x21-0x22 */
+ buffer[count++] = bfsfcw_fftinx_ratio & 0xff;
+ buffer[count++] = (bfsfcw_fftinx_ratio >> 8) & 0xff;
+ /* fftinx_bfsfcw_ratio register 0x23-0x24 */
+ buffer[count++] = fftinx_bfsfcw_ratio & 0xff;
+ buffer[count++] = (fftinx_bfsfcw_ratio >> 8) & 0xff;
+ /* start at COEFF_1_2048 and write through to fftinx_bfsfcw_ratio*/
+ ret = it913x_write(state, PRO_DMOD, COEFF_1_2048, buffer, count);
+
+ for (i = 0; i < 42; i += 8)
+ debug_data_snipet(0x1, "Buffer", &buffer[i]);
+
+ return ret;
+}
+
+
+
+static int it913x_fe_read_status(struct dvb_frontend *fe, fe_status_t *status)
+{
+ struct it913x_fe_state *state = fe->demodulator_priv;
+ int ret, i;
+ fe_status_t old_status = state->it913x_status;
+ *status = 0;
+
+ if (state->it913x_status == 0) {
+ ret = it913x_read_reg_u8(state, EMPTY_CHANNEL_STATUS);
+ if (ret == 0x1) {
+ *status |= FE_HAS_SIGNAL;
+ for (i = 0; i < 40; i++) {
+ ret = it913x_read_reg_u8(state, MP2IF_SYNC_LK);
+ if (ret == 0x1)
+ break;
+ msleep(25);
+ }
+ if (ret == 0x1)
+ *status |= FE_HAS_CARRIER
+ | FE_HAS_VITERBI
+ | FE_HAS_SYNC;
+ state->it913x_status = *status;
+ }
+ }
+
+ if (state->it913x_status & FE_HAS_SYNC) {
+ ret = it913x_read_reg_u8(state, TPSD_LOCK);
+ if (ret == 0x1)
+ *status |= FE_HAS_LOCK
+ | state->it913x_status;
+ else
+ state->it913x_status = 0;
+ if (old_status != state->it913x_status)
+ ret = it913x_write_reg(state, PRO_LINK, GPIOH3_O, ret);
+ }
+
+ return 0;
+}
+
+static int it913x_fe_read_signal_strength(struct dvb_frontend *fe,
+ u16 *strength)
+{
+ struct it913x_fe_state *state = fe->demodulator_priv;
+ int ret = it913x_read_reg_u8(state, SIGNAL_LEVEL);
+ /*SIGNAL_LEVEL always returns 100%! so using FE_HAS_SIGNAL as switch*/
+ if (state->it913x_status & FE_HAS_SIGNAL)
+ ret = (ret * 0xff) / 0x64;
+ else
+ ret = 0x0;
+ ret |= ret << 0x8;
+ *strength = ret;
+ return 0;
+}
+
+static int it913x_fe_read_snr(struct dvb_frontend *fe, u16* snr)
+{
+ struct it913x_fe_state *state = fe->demodulator_priv;
+ int ret = it913x_read_reg_u8(state, SIGNAL_QUALITY);
+ ret = (ret * 0xff) / 0x64;
+ ret |= (ret << 0x8);
+ *snr = ~ret;
+ return 0;
+}
+
+static int it913x_fe_read_ber(struct dvb_frontend *fe, u32 *ber)
+{
+ *ber = 0;
+ return 0;
+}
+
+static int it913x_fe_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
+{
+ *ucblocks = 0;
+ return 0;
+}
+
+static int it913x_fe_get_frontend(struct dvb_frontend *fe,
+ struct dvb_frontend_parameters *p)
+{
+ struct it913x_fe_state *state = fe->demodulator_priv;
+ int ret;
+ u8 reg[8];
+
+ ret = it913x_read_reg(state, REG_TPSD_TX_MODE, reg, sizeof(reg));
+
+ if (reg[3] < 3)
+ p->u.ofdm.constellation = fe_con[reg[3]];
+
+ if (reg[0] < 3)
+ p->u.ofdm.transmission_mode = fe_mode[reg[0]];
+
+ if (reg[1] < 4)
+ p->u.ofdm.guard_interval = fe_gi[reg[1]];
+
+ if (reg[2] < 4)
+ p->u.ofdm.hierarchy_information = fe_hi[reg[2]];
+
+ p->u.ofdm.code_rate_HP = (reg[6] < 6) ? fe_code[reg[6]] : FEC_NONE;
+ p->u.ofdm.code_rate_LP = (reg[7] < 6) ? fe_code[reg[7]] : FEC_NONE;
+
+ return 0;
+}
+
+static int it913x_fe_set_frontend(struct dvb_frontend *fe,
+ struct dvb_frontend_parameters *p)
+{
+ struct it913x_fe_state *state = fe->demodulator_priv;
+ int ret, i;
+ u8 empty_ch, last_ch;
+
+ state->it913x_status = 0;
+
+ /* Set bw*/
+ ret = it913x_fe_select_bw(state, p->u.ofdm.bandwidth,
+ state->adcFrequency);
+
+ /* Training Mode Off */
+ ret = it913x_write_reg(state, PRO_LINK, TRAINING_MODE, 0x0);
+
+ /* Clear Empty Channel */
+ ret = it913x_write_reg(state, PRO_DMOD, EMPTY_CHANNEL_STATUS, 0x0);
+
+ /* Clear bits */
+ ret = it913x_write_reg(state, PRO_DMOD, MP2IF_SYNC_LK, 0x0);
+ /* LED on */
+ ret = it913x_write_reg(state, PRO_LINK, GPIOH3_O, 0x1);
+ /* Select Band*/
+ if ((p->frequency >= 51000000) && (p->frequency <= 230000000))
+ i = 0;
+ else if ((p->frequency >= 350000000) && (p->frequency <= 900000000))
+ i = 1;
+ else if ((p->frequency >= 1450000000) && (p->frequency <= 1680000000))
+ i = 2;
+ else
+ return -EOPNOTSUPP;
+
+ ret = it913x_write_reg(state, PRO_DMOD, FREE_BAND, i);
+
+ deb_info("Frontend Set Tuner Type %02x", state->tuner_type);
+ switch (state->tuner_type) {
+ case IT9137: /* Tuner type 0x38 */
+ ret = it9137_set_tuner(state,
+ p->u.ofdm.bandwidth, p->frequency);
+ break;
+ default:
+ if (fe->ops.tuner_ops.set_params) {
+ fe->ops.tuner_ops.set_params(fe, p);
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 0);
+ }
+ break;
+ }
+ /* LED off */
+ ret = it913x_write_reg(state, PRO_LINK, GPIOH3_O, 0x0);
+ /* Trigger ofsm */
+ ret = it913x_write_reg(state, PRO_DMOD, TRIGGER_OFSM, 0x0);
+ last_ch = 2;
+ for (i = 0; i < 40; ++i) {
+ empty_ch = it913x_read_reg_u8(state, EMPTY_CHANNEL_STATUS);
+ if (last_ch == 1 && empty_ch == 1)
+ break;
+ if (last_ch == 2 && empty_ch == 2)
+ return 0;
+ last_ch = empty_ch;
+ msleep(25);
+ }
+ for (i = 0; i < 40; ++i) {
+ if (it913x_read_reg_u8(state, D_TPSD_LOCK) == 1)
+ break;
+ msleep(25);
+ }
+
+ state->frequency = p->frequency;
+ return 0;
+}
+
+static int it913x_fe_suspend(struct it913x_fe_state *state)
+{
+ int ret, i;
+ u8 b;
+
+ ret = it913x_write_reg(state, PRO_DMOD, SUSPEND_FLAG, 0x1);
+
+ ret |= it913x_write_reg(state, PRO_DMOD, TRIGGER_OFSM, 0x0);
+
+ for (i = 0; i < 128; i++) {
+ ret = it913x_read_reg(state, SUSPEND_FLAG, &b, 1);
+ if (ret < 0)
+ return -ENODEV;
+ if (b == 0)
+ break;
+
+ }
+
+ ret |= it913x_write_reg(state, PRO_DMOD, AFE_MEM0, 0x8);
+ /* Turn LED off */
+ ret |= it913x_write_reg(state, PRO_LINK, GPIOH3_O, 0x0);
+
+ ret |= it913x_fe_script_loader(state, it9137_tuner_off);
+
+ return (ret < 0) ? -ENODEV : 0;
+}
+
+/* Power sequence */
+/* Power Up Tuner on -> Frontend suspend off -> Tuner clk on */
+/* Power Down Frontend suspend on -> Tuner clk off -> Tuner off */
+
+static int it913x_fe_sleep(struct dvb_frontend *fe)
+{
+ struct it913x_fe_state *state = fe->demodulator_priv;
+ return it913x_fe_suspend(state);
+}
+
+static u32 compute_div(u32 a, u32 b, u32 x)
+{
+ u32 res = 0;
+ u32 c = 0;
+ u32 i = 0;
+
+ if (a > b) {
+ c = a / b;
+ a = a - c * b;
+ }
+
+ for (i = 0; i < x; i++) {
+ if (a >= b) {
+ res += 1;
+ a -= b;
+ }
+ a <<= 1;
+ res <<= 1;
+ }
+
+ res = (c << x) + res;
+
+ return res;
+}
+
+static int it913x_fe_start(struct it913x_fe_state *state)
+{
+ struct it913xset *set_fe;
+ struct it913xset *set_mode;
+ int ret;
+ u8 adf = (state->adf & 0xf);
+ u32 adc, xtal;
+ u8 b[4];
+
+ ret = it913x_init_tuner(state);
+
+ if (adf < 12) {
+ state->crystalFrequency = fe_clockTable[adf].xtal ;
+ state->table = fe_clockTable[adf].table;
+ state->adcFrequency = state->table->adcFrequency;
+
+ adc = compute_div(state->adcFrequency, 1000000ul, 19ul);
+ xtal = compute_div(state->crystalFrequency, 1000000ul, 19ul);
+
+ } else
+ return -EINVAL;
+
+ deb_info("Xtal Freq :%d Adc Freq :%d Adc %08x Xtal %08x",
+ state->crystalFrequency, state->adcFrequency, adc, xtal);
+
+ /* Set LED indicator on GPIOH3 */
+ ret = it913x_write_reg(state, PRO_LINK, GPIOH3_EN, 0x1);
+ ret |= it913x_write_reg(state, PRO_LINK, GPIOH3_ON, 0x1);
+ ret |= it913x_write_reg(state, PRO_LINK, GPIOH3_O, 0x1);
+
+ ret |= it913x_write_reg(state, PRO_LINK, 0xf641, state->tuner_type);
+ ret |= it913x_write_reg(state, PRO_DMOD, 0xf5ca, 0x01);
+ ret |= it913x_write_reg(state, PRO_DMOD, 0xf715, 0x01);
+
+ b[0] = xtal & 0xff;
+ b[1] = (xtal >> 8) & 0xff;
+ b[2] = (xtal >> 16) & 0xff;
+ b[3] = (xtal >> 24);
+ ret |= it913x_write(state, PRO_DMOD, XTAL_CLK, b , 4);
+
+ b[0] = adc & 0xff;
+ b[1] = (adc >> 8) & 0xff;
+ b[2] = (adc >> 16) & 0xff;
+ ret |= it913x_write(state, PRO_DMOD, ADC_FREQ, b, 3);
+
+ switch (state->tuner_type) {
+ case IT9137: /* Tuner type 0x38 */
+ set_fe = it9137_set;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* set the demod */
+ ret = it913x_fe_script_loader(state, set_fe);
+ /* Always solo frontend */
+ set_mode = set_solo_fe;
+ ret |= it913x_fe_script_loader(state, set_mode);
+
+ ret |= it913x_fe_suspend(state);
+ return 0;
+}
+
+static int it913x_fe_init(struct dvb_frontend *fe)
+{
+ struct it913x_fe_state *state = fe->demodulator_priv;
+ int ret = 0;
+ /* Power Up Tuner - common all versions */
+ ret = it913x_write_reg(state, PRO_DMOD, 0xec40, 0x1);
+
+ ret |= it913x_write_reg(state, PRO_DMOD, AFE_MEM0, 0x0);
+
+ ret |= it913x_fe_script_loader(state, init_1);
+
+ switch (state->tuner_type) {
+ case IT9137:
+ ret |= it913x_write_reg(state, PRO_DMOD, 0xfba8, 0x0);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return (ret < 0) ? -ENODEV : 0;
+}
+
+static void it913x_fe_release(struct dvb_frontend *fe)
+{
+ struct it913x_fe_state *state = fe->demodulator_priv;
+ kfree(state);
+}
+
+static struct dvb_frontend_ops it913x_fe_ofdm_ops;
+
+struct dvb_frontend *it913x_fe_attach(struct i2c_adapter *i2c_adap,
+ u8 i2c_addr, u8 adf, u8 type)
+{
+ struct it913x_fe_state *state = NULL;
+ int ret;
+ /* allocate memory for the internal state */
+ state = kzalloc(sizeof(struct it913x_fe_state), GFP_KERNEL);
+ if (state == NULL)
+ goto error;
+
+ state->i2c_adap = i2c_adap;
+ state->i2c_addr = i2c_addr;
+ state->adf = adf;
+ state->tuner_type = type;
+
+ ret = it913x_fe_start(state);
+ if (ret < 0)
+ goto error;
+
+
+ /* create dvb_frontend */
+ memcpy(&state->frontend.ops, &it913x_fe_ofdm_ops,
+ sizeof(struct dvb_frontend_ops));
+ state->frontend.demodulator_priv = state;
+
+ return &state->frontend;
+error:
+ kfree(state);
+ return NULL;
+}
+EXPORT_SYMBOL(it913x_fe_attach);
+
+static struct dvb_frontend_ops it913x_fe_ofdm_ops = {
+
+ .info = {
+ .name = "it913x-fe DVB-T",
+ .type = FE_OFDM,
+ .frequency_min = 51000000,
+ .frequency_max = 1680000000,
+ .frequency_stepsize = 62500,
+ .caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
+ FE_CAN_FEC_4_5 | FE_CAN_FEC_5_6 | FE_CAN_FEC_6_7 |
+ FE_CAN_FEC_7_8 | FE_CAN_FEC_8_9 | FE_CAN_FEC_AUTO |
+ FE_CAN_QAM_16 | FE_CAN_QAM_64 | FE_CAN_QAM_AUTO |
+ FE_CAN_TRANSMISSION_MODE_AUTO |
+ FE_CAN_GUARD_INTERVAL_AUTO |
+ FE_CAN_HIERARCHY_AUTO,
+ },
+
+ .release = it913x_fe_release,
+
+ .init = it913x_fe_init,
+ .sleep = it913x_fe_sleep,
+
+ .set_frontend = it913x_fe_set_frontend,
+ .get_frontend = it913x_fe_get_frontend,
+
+ .read_status = it913x_fe_read_status,
+ .read_signal_strength = it913x_fe_read_signal_strength,
+ .read_snr = it913x_fe_read_snr,
+ .read_ber = it913x_fe_read_ber,
+ .read_ucblocks = it913x_fe_read_ucblocks,
+};
+
+MODULE_DESCRIPTION("it913x Frontend and it9137 tuner");
+MODULE_AUTHOR("Malcolm Priestley tvboxspy@gmail.com");
+MODULE_VERSION("1.07");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/frontends/it913x-fe.h b/drivers/media/dvb/frontends/it913x-fe.h
new file mode 100644
index 00000000000..9d97f32e690
--- /dev/null
+++ b/drivers/media/dvb/frontends/it913x-fe.h
@@ -0,0 +1,196 @@
+/*
+ * Driver for it913x Frontend
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ *
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.=
+ */
+
+#ifndef IT913X_FE_H
+#define IT913X_FE_H
+
+#include <linux/dvb/frontend.h>
+#include "dvb_frontend.h"
+#if defined(CONFIG_DVB_IT913X_FE) || (defined(CONFIG_DVB_IT913X_FE_MODULE) && \
+defined(MODULE))
+extern struct dvb_frontend *it913x_fe_attach(struct i2c_adapter *i2c_adap,
+ u8 i2c_addr, u8 adf, u8 type);
+#else
+static inline struct dvb_frontend *it913x_fe_attach(
+ struct i2c_adapter *i2c_adap, u8 i2c_addr, u8 adf, u8 type)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return NULL;
+}
+#endif /* CONFIG_IT913X_FE */
+#define I2C_BASE_ADDR 0x10
+#define DEV_0 0x0
+#define DEV_1 0x10
+#define PRO_LINK 0x0
+#define PRO_DMOD 0x1
+#define DEV_0_DMOD (PRO_DMOD << 0x7)
+#define DEV_1_DMOD (DEV_0_DMOD | DEV_1)
+#define CHIP2_I2C_ADDR 0x3a
+
+#define AFE_MEM0 0xfb24
+
+#define MP2_SW_RST 0xf99d
+#define MP2IF2_SW_RST 0xf9a4
+
+#define PADODPU 0xd827
+#define THIRDODPU 0xd828
+#define AGC_O_D 0xd829
+
+#define EP0_TX_EN 0xdd11
+#define EP0_TX_NAK 0xdd13
+#define EP4_TX_LEN_LSB 0xdd88
+#define EP4_TX_LEN_MSB 0xdd89
+#define EP4_MAX_PKT 0xdd0c
+#define EP5_TX_LEN_LSB 0xdd8a
+#define EP5_TX_LEN_MSB 0xdd8b
+#define EP5_MAX_PKT 0xdd0d
+
+#define IO_MUX_POWER_CLK 0xd800
+#define CLK_O_EN 0xd81a
+#define I2C_CLK 0xf103
+#define I2C_CLK_100 0x7
+#define I2C_CLK_400 0x1a
+
+#define D_TPSD_LOCK 0xf5a9
+#define MP2IF2_EN 0xf9a3
+#define MP2IF_SERIAL 0xf985
+#define TSIS_ENABLE 0xf9cd
+#define MP2IF2_HALF_PSB 0xf9a5
+#define MP2IF_STOP_EN 0xf9b5
+#define MPEG_FULL_SPEED 0xf990
+#define TOP_HOSTB_SER_MODE 0xd91c
+
+#define PID_RST 0xf992
+#define PID_EN 0xf993
+#define PID_INX_EN 0xf994
+#define PID_INX 0xf995
+#define PID_LSB 0xf996
+#define PID_MSB 0xf997
+
+#define MP2IF_MPEG_PAR_MODE 0xf986
+#define DCA_UPPER_CHIP 0xf731
+#define DCA_LOWER_CHIP 0xf732
+#define DCA_PLATCH 0xf730
+#define DCA_FPGA_LATCH 0xf778
+#define DCA_STAND_ALONE 0xf73c
+#define DCA_ENABLE 0xf776
+
+#define DVBT_INTEN 0xf41f
+#define DVBT_ENABLE 0xf41a
+#define HOSTB_DCA_LOWER 0xd91f
+#define HOSTB_MPEG_PAR_MODE 0xd91b
+#define HOSTB_MPEG_SER_MODE 0xd91c
+#define HOSTB_MPEG_SER_DO7 0xd91d
+#define HOSTB_DCA_UPPER 0xd91e
+#define PADMISCDR2 0xd830
+#define PADMISCDR4 0xd831
+#define PADMISCDR8 0xd832
+#define PADMISCDRSR 0xd833
+#define LOCK3_OUT 0xd8fd
+
+#define GPIOH1_O 0xd8af
+#define GPIOH1_EN 0xd8b0
+#define GPIOH1_ON 0xd8b1
+#define GPIOH3_O 0xd8b3
+#define GPIOH3_EN 0xd8b4
+#define GPIOH3_ON 0xd8b5
+#define GPIOH5_O 0xd8bb
+#define GPIOH5_EN 0xd8bc
+#define GPIOH5_ON 0xd8bd
+
+#define AFE_MEM0 0xfb24
+
+#define REG_TPSD_TX_MODE 0xf900
+#define REG_TPSD_GI 0xf901
+#define REG_TPSD_HIER 0xf902
+#define REG_TPSD_CONST 0xf903
+#define REG_BW 0xf904
+#define REG_PRIV 0xf905
+#define REG_TPSD_HP_CODE 0xf906
+#define REG_TPSD_LP_CODE 0xf907
+
+#define MP2IF_SYNC_LK 0xf999
+#define ADC_FREQ 0xf1cd
+
+#define TRIGGER_OFSM 0x0000
+/* COEFF Registers start at 0x0001 to 0x0020 */
+#define COEFF_1_2048 0x0001
+#define XTAL_CLK 0x0025
+#define BFS_FCW 0x0029
+#define TPSD_LOCK 0x003c
+#define TRAINING_MODE 0x0040
+#define ADC_X_2 0x0045
+#define TUNER_ID 0x0046
+#define EMPTY_CHANNEL_STATUS 0x0047
+#define SIGNAL_LEVEL 0x0048
+#define SIGNAL_QUALITY 0x0049
+#define EST_SIGNAL_LEVEL 0x004a
+#define FREE_BAND 0x004b
+#define SUSPEND_FLAG 0x004c
+/* Build in tuners */
+#define IT9137 0x38
+
+enum {
+ CMD_DEMOD_READ = 0,
+ CMD_DEMOD_WRITE,
+ CMD_TUNER_READ,
+ CMD_TUNER_WRITE,
+ CMD_REG_EEPROM_READ,
+ CMD_REG_EEPROM_WRITE,
+ CMD_DATA_READ,
+ CMD_VAR_READ = 8,
+ CMD_VAR_WRITE,
+ CMD_PLATFORM_GET,
+ CMD_PLATFORM_SET,
+ CMD_IP_CACHE,
+ CMD_IP_ADD,
+ CMD_IP_REMOVE,
+ CMD_PID_ADD,
+ CMD_PID_REMOVE,
+ CMD_SIPSI_GET,
+ CMD_SIPSI_MPE_RESET,
+ CMD_H_PID_ADD = 0x15,
+ CMD_H_PID_REMOVE,
+ CMD_ABORT,
+ CMD_IR_GET,
+ CMD_IR_SET,
+ CMD_FW_DOWNLOAD = 0x21,
+ CMD_QUERYINFO,
+ CMD_BOOT,
+ CMD_FW_DOWNLOAD_BEGIN,
+ CMD_FW_DOWNLOAD_END,
+ CMD_RUN_CODE,
+ CMD_SCATTER_READ = 0x28,
+ CMD_SCATTER_WRITE,
+ CMD_GENERIC_READ,
+ CMD_GENERIC_WRITE
+};
+
+enum {
+ READ_LONG,
+ WRITE_LONG,
+ READ_SHORT,
+ WRITE_SHORT,
+ READ_DATA,
+ WRITE_DATA,
+ WRITE_CMD,
+};
+
+#endif /* IT913X_FE_H */
diff --git a/drivers/media/dvb/frontends/lnbp22.c b/drivers/media/dvb/frontends/lnbp22.c
new file mode 100644
index 00000000000..84ad0390a4a
--- /dev/null
+++ b/drivers/media/dvb/frontends/lnbp22.c
@@ -0,0 +1,148 @@
+/*
+ * lnbp22.h - driver for lnb supply and control ic lnbp22
+ *
+ * Copyright (C) 2006 Dominik Kuhlen
+ * Based on lnbp21 driver
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * Or, point your browser to http://www.gnu.org/copyleft/gpl.html
+ *
+ *
+ * the project's page is at http://www.linuxtv.org
+ */
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+
+#include "dvb_frontend.h"
+#include "lnbp22.h"
+
+static int debug;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "Turn on/off debugging (default:off).");
+
+
+#define dprintk(lvl, arg...) if (debug >= (lvl)) printk(arg)
+
+struct lnbp22 {
+ u8 config[4];
+ struct i2c_adapter *i2c;
+};
+
+static int lnbp22_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage)
+{
+ struct lnbp22 *lnbp22 = (struct lnbp22 *)fe->sec_priv;
+ struct i2c_msg msg = {
+ .addr = 0x08,
+ .flags = 0,
+ .buf = (char *)&lnbp22->config,
+ .len = sizeof(lnbp22->config),
+ };
+
+ dprintk(1, "%s: %d (18V=%d 13V=%d)\n", __func__, voltage,
+ SEC_VOLTAGE_18, SEC_VOLTAGE_13);
+
+ lnbp22->config[3] = 0x60; /* Power down */
+ switch (voltage) {
+ case SEC_VOLTAGE_OFF:
+ break;
+ case SEC_VOLTAGE_13:
+ lnbp22->config[3] |= LNBP22_EN;
+ break;
+ case SEC_VOLTAGE_18:
+ lnbp22->config[3] |= (LNBP22_EN | LNBP22_VSEL);
+ break;
+ default:
+ return -EINVAL;
+ };
+
+ dprintk(1, "%s: 0x%02x)\n", __func__, lnbp22->config[3]);
+ return (i2c_transfer(lnbp22->i2c, &msg, 1) == 1) ? 0 : -EIO;
+}
+
+static int lnbp22_enable_high_lnb_voltage(struct dvb_frontend *fe, long arg)
+{
+ struct lnbp22 *lnbp22 = (struct lnbp22 *) fe->sec_priv;
+ struct i2c_msg msg = {
+ .addr = 0x08,
+ .flags = 0,
+ .buf = (char *)&lnbp22->config,
+ .len = sizeof(lnbp22->config),
+ };
+
+ dprintk(1, "%s: %d\n", __func__, (int)arg);
+ if (arg)
+ lnbp22->config[3] |= LNBP22_LLC;
+ else
+ lnbp22->config[3] &= ~LNBP22_LLC;
+
+ return (i2c_transfer(lnbp22->i2c, &msg, 1) == 1) ? 0 : -EIO;
+}
+
+static void lnbp22_release(struct dvb_frontend *fe)
+{
+ dprintk(1, "%s\n", __func__);
+ /* LNBP power off */
+ lnbp22_set_voltage(fe, SEC_VOLTAGE_OFF);
+
+ /* free data */
+ kfree(fe->sec_priv);
+ fe->sec_priv = NULL;
+}
+
+struct dvb_frontend *lnbp22_attach(struct dvb_frontend *fe,
+ struct i2c_adapter *i2c)
+{
+ struct lnbp22 *lnbp22 = kmalloc(sizeof(struct lnbp22), GFP_KERNEL);
+ if (!lnbp22)
+ return NULL;
+
+ /* default configuration */
+ lnbp22->config[0] = 0x00; /* ? */
+ lnbp22->config[1] = 0x28; /* ? */
+ lnbp22->config[2] = 0x48; /* ? */
+ lnbp22->config[3] = 0x60; /* Power down */
+ lnbp22->i2c = i2c;
+ fe->sec_priv = lnbp22;
+
+ /* detect if it is present or not */
+ if (lnbp22_set_voltage(fe, SEC_VOLTAGE_OFF)) {
+ dprintk(0, "%s LNBP22 not found\n", __func__);
+ kfree(lnbp22);
+ fe->sec_priv = NULL;
+ return NULL;
+ }
+
+ /* install release callback */
+ fe->ops.release_sec = lnbp22_release;
+
+ /* override frontend ops */
+ fe->ops.set_voltage = lnbp22_set_voltage;
+ fe->ops.enable_high_lnb_voltage = lnbp22_enable_high_lnb_voltage;
+
+ return fe;
+}
+EXPORT_SYMBOL(lnbp22_attach);
+
+MODULE_DESCRIPTION("Driver for lnb supply and control ic lnbp22");
+MODULE_AUTHOR("Dominik Kuhlen");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/frontends/lnbp22.h b/drivers/media/dvb/frontends/lnbp22.h
new file mode 100644
index 00000000000..63e2dec7e68
--- /dev/null
+++ b/drivers/media/dvb/frontends/lnbp22.h
@@ -0,0 +1,57 @@
+/*
+ * lnbp22.h - driver for lnb supply and control ic lnbp22
+ *
+ * Copyright (C) 2006 Dominik Kuhlen
+ * Based on lnbp21.h
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * Or, point your browser to http://www.gnu.org/copyleft/gpl.html
+ *
+ *
+ * the project's page is at http://www.linuxtv.org
+ */
+
+#ifndef _LNBP22_H
+#define _LNBP22_H
+
+/* Enable */
+#define LNBP22_EN 0x10
+/* Voltage selection */
+#define LNBP22_VSEL 0x02
+/* Plus 1 Volt Bit */
+#define LNBP22_LLC 0x01
+
+#include <linux/dvb/frontend.h>
+
+#if defined(CONFIG_DVB_LNBP22) || \
+ (defined(CONFIG_DVB_LNBP22_MODULE) && defined(MODULE))
+/*
+ * override_set and override_clear control which system register bits (above)
+ * to always set & clear
+ */
+extern struct dvb_frontend *lnbp22_attach(struct dvb_frontend *fe,
+ struct i2c_adapter *i2c);
+#else
+static inline struct dvb_frontend *lnbp22_attach(struct dvb_frontend *fe,
+ struct i2c_adapter *i2c)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return NULL;
+}
+#endif /* CONFIG_DVB_LNBP22 */
+
+#endif /* _LNBP22_H */
diff --git a/drivers/media/dvb/frontends/stb0899_algo.c b/drivers/media/dvb/frontends/stb0899_algo.c
index d70eee00f33..117a56926dc 100644
--- a/drivers/media/dvb/frontends/stb0899_algo.c
+++ b/drivers/media/dvb/frontends/stb0899_algo.c
@@ -358,6 +358,9 @@ static enum stb0899_status stb0899_check_data(struct stb0899_state *state)
else
dataTime = 500;
+ /* clear previous failed END_LOOPVIT */
+ stb0899_read_reg(state, STB0899_VSTATUS);
+
stb0899_write_reg(state, STB0899_DSTATUS2, 0x00); /* force search loop */
while (1) {
/* WARNING! VIT LOCKED has to be tested before VIT_END_LOOOP */
diff --git a/drivers/media/dvb/frontends/stb0899_drv.c b/drivers/media/dvb/frontends/stb0899_drv.c
index 37a222d9ddb..8408ef877b4 100644
--- a/drivers/media/dvb/frontends/stb0899_drv.c
+++ b/drivers/media/dvb/frontends/stb0899_drv.c
@@ -706,7 +706,7 @@ static int stb0899_send_diseqc_msg(struct dvb_frontend *fe, struct dvb_diseqc_ma
stb0899_write_reg(state, STB0899_DISCNTRL1, reg);
for (i = 0; i < cmd->msg_len; i++) {
/* wait for FIFO empty */
- if (stb0899_wait_diseqc_fifo_empty(state, 10) < 0)
+ if (stb0899_wait_diseqc_fifo_empty(state, 100) < 0)
return -ETIMEDOUT;
stb0899_write_reg(state, STB0899_DISFIFO, cmd->msg[i]);
@@ -1426,9 +1426,9 @@ static void stb0899_set_iterations(struct stb0899_state *state)
if (iter_scale > config->ldpc_max_iter)
iter_scale = config->ldpc_max_iter;
- reg = STB0899_READ_S2REG(STB0899_S2DEMOD, MAX_ITER);
+ reg = STB0899_READ_S2REG(STB0899_S2FEC, MAX_ITER);
STB0899_SETFIELD_VAL(MAX_ITERATIONS, reg, iter_scale);
- stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_MAX_ITER, STB0899_OFF0_MAX_ITER, reg);
+ stb0899_write_s2reg(state, STB0899_S2FEC, STB0899_BASE_MAX_ITER, STB0899_OFF0_MAX_ITER, reg);
}
static enum dvbfe_search stb0899_search(struct dvb_frontend *fe, struct dvb_frontend_parameters *p)
diff --git a/drivers/media/dvb/frontends/stv0288.c b/drivers/media/dvb/frontends/stv0288.c
index 8e0cfadba68..0aa3962ff18 100644
--- a/drivers/media/dvb/frontends/stv0288.c
+++ b/drivers/media/dvb/frontends/stv0288.c
@@ -127,6 +127,11 @@ static int stv0288_set_symbolrate(struct dvb_frontend *fe, u32 srate)
if ((srate < 1000000) || (srate > 45000000))
return -EINVAL;
+ stv0288_writeregI(state, 0x22, 0);
+ stv0288_writeregI(state, 0x23, 0);
+ stv0288_writeregI(state, 0x2b, 0xff);
+ stv0288_writeregI(state, 0x2c, 0xf7);
+
temp = (unsigned int)srate / 1000;
temp = temp * 32768;
@@ -461,6 +466,7 @@ static int stv0288_set_frontend(struct dvb_frontend *fe,
char tm;
unsigned char tda[3];
+ u8 reg, time_out = 0;
dprintk("%s : FE_SET_FRONTEND\n", __func__);
@@ -488,22 +494,29 @@ static int stv0288_set_frontend(struct dvb_frontend *fe,
/* Carrier lock control register */
stv0288_writeregI(state, 0x15, 0xc5);
- tda[0] = 0x2b; /* CFRM */
tda[2] = 0x0; /* CFRL */
- for (tm = -6; tm < 7;) {
+ for (tm = -9; tm < 7;) {
/* Viterbi status */
- if (stv0288_readreg(state, 0x24) & 0x8)
- break;
-
- tda[2] += 40;
- if (tda[2] < 40)
+ reg = stv0288_readreg(state, 0x24);
+ if (reg & 0x8)
+ break;
+ if (reg & 0x80) {
+ time_out++;
+ if (time_out > 10)
+ break;
+ tda[2] += 40;
+ if (tda[2] < 40)
+ tm++;
+ } else {
tm++;
+ tda[2] = 0;
+ time_out = 0;
+ }
tda[1] = (unsigned char)tm;
stv0288_writeregI(state, 0x2b, tda[1]);
stv0288_writeregI(state, 0x2c, tda[2]);
udelay(30);
}
-
state->tuner_frequency = c->frequency;
state->fec_inner = FEC_AUTO;
state->symbol_rate = c->symbol_rate;
diff --git a/drivers/media/dvb/frontends/stv090x.c b/drivers/media/dvb/frontends/stv090x.c
index 52d8712411e..ebda41936b9 100644
--- a/drivers/media/dvb/frontends/stv090x.c
+++ b/drivers/media/dvb/frontends/stv090x.c
@@ -3463,9 +3463,15 @@ static enum dvbfe_search stv090x_search(struct dvb_frontend *fe, struct dvb_fron
static int stv090x_read_status(struct dvb_frontend *fe, enum fe_status *status)
{
struct stv090x_state *state = fe->demodulator_priv;
- u32 reg;
+ u32 reg, dstatus;
u8 search_state;
+ *status = 0;
+
+ dstatus = STV090x_READ_DEMOD(state, DSTATUS);
+ if (STV090x_GETFIELD_Px(dstatus, CAR_LOCK_FIELD))
+ *status |= FE_HAS_SIGNAL | FE_HAS_CARRIER;
+
reg = STV090x_READ_DEMOD(state, DMDSTATE);
search_state = STV090x_GETFIELD_Px(reg, HEADER_MODE_FIELD);
@@ -3474,41 +3480,30 @@ static int stv090x_read_status(struct dvb_frontend *fe, enum fe_status *status)
case 1: /* first PLH detected */
default:
dprintk(FE_DEBUG, 1, "Status: Unlocked (Searching ..)");
- *status = 0;
break;
case 2: /* DVB-S2 mode */
dprintk(FE_DEBUG, 1, "Delivery system: DVB-S2");
- reg = STV090x_READ_DEMOD(state, DSTATUS);
- if (STV090x_GETFIELD_Px(reg, LOCK_DEFINITIF_FIELD)) {
+ if (STV090x_GETFIELD_Px(dstatus, LOCK_DEFINITIF_FIELD)) {
reg = STV090x_READ_DEMOD(state, PDELSTATUS1);
if (STV090x_GETFIELD_Px(reg, PKTDELIN_LOCK_FIELD)) {
+ *status |= FE_HAS_VITERBI;
reg = STV090x_READ_DEMOD(state, TSSTATUS);
- if (STV090x_GETFIELD_Px(reg, TSFIFO_LINEOK_FIELD)) {
- *status = FE_HAS_SIGNAL |
- FE_HAS_CARRIER |
- FE_HAS_VITERBI |
- FE_HAS_SYNC |
- FE_HAS_LOCK;
- }
+ if (STV090x_GETFIELD_Px(reg, TSFIFO_LINEOK_FIELD))
+ *status |= FE_HAS_SYNC | FE_HAS_LOCK;
}
}
break;
case 3: /* DVB-S1/legacy mode */
dprintk(FE_DEBUG, 1, "Delivery system: DVB-S");
- reg = STV090x_READ_DEMOD(state, DSTATUS);
- if (STV090x_GETFIELD_Px(reg, LOCK_DEFINITIF_FIELD)) {
+ if (STV090x_GETFIELD_Px(dstatus, LOCK_DEFINITIF_FIELD)) {
reg = STV090x_READ_DEMOD(state, VSTATUSVIT);
if (STV090x_GETFIELD_Px(reg, LOCKEDVIT_FIELD)) {
+ *status |= FE_HAS_VITERBI;
reg = STV090x_READ_DEMOD(state, TSSTATUS);
- if (STV090x_GETFIELD_Px(reg, TSFIFO_LINEOK_FIELD)) {
- *status = FE_HAS_SIGNAL |
- FE_HAS_CARRIER |
- FE_HAS_VITERBI |
- FE_HAS_SYNC |
- FE_HAS_LOCK;
- }
+ if (STV090x_GETFIELD_Px(reg, TSFIFO_LINEOK_FIELD))
+ *status |= FE_HAS_SYNC | FE_HAS_LOCK;
}
}
break;
diff --git a/drivers/media/dvb/frontends/tda10048.c b/drivers/media/dvb/frontends/tda10048.c
index 93f6a75c238..7f105946a43 100644
--- a/drivers/media/dvb/frontends/tda10048.c
+++ b/drivers/media/dvb/frontends/tda10048.c
@@ -206,15 +206,16 @@ static struct init_tab {
static struct pll_tab {
u32 clk_freq_khz;
u32 if_freq_khz;
- u8 m, n, p;
} pll_tab[] = {
- { TDA10048_CLK_4000, TDA10048_IF_36130, 10, 0, 0 },
- { TDA10048_CLK_16000, TDA10048_IF_3300, 10, 3, 0 },
- { TDA10048_CLK_16000, TDA10048_IF_3500, 10, 3, 0 },
- { TDA10048_CLK_16000, TDA10048_IF_3800, 10, 3, 0 },
- { TDA10048_CLK_16000, TDA10048_IF_4000, 10, 3, 0 },
- { TDA10048_CLK_16000, TDA10048_IF_4300, 10, 3, 0 },
- { TDA10048_CLK_16000, TDA10048_IF_36130, 10, 3, 0 },
+ { TDA10048_CLK_4000, TDA10048_IF_36130 },
+ { TDA10048_CLK_16000, TDA10048_IF_3300 },
+ { TDA10048_CLK_16000, TDA10048_IF_3500 },
+ { TDA10048_CLK_16000, TDA10048_IF_3800 },
+ { TDA10048_CLK_16000, TDA10048_IF_4000 },
+ { TDA10048_CLK_16000, TDA10048_IF_4300 },
+ { TDA10048_CLK_16000, TDA10048_IF_4500 },
+ { TDA10048_CLK_16000, TDA10048_IF_5000 },
+ { TDA10048_CLK_16000, TDA10048_IF_36130 },
};
static int tda10048_writereg(struct tda10048_state *state, u8 reg, u8 data)
@@ -460,9 +461,6 @@ static int tda10048_set_if(struct dvb_frontend *fe, enum fe_bandwidth bw)
state->freq_if_hz = pll_tab[i].if_freq_khz * 1000;
state->xtal_hz = pll_tab[i].clk_freq_khz * 1000;
- state->pll_mfactor = pll_tab[i].m;
- state->pll_nfactor = pll_tab[i].n;
- state->pll_pfactor = pll_tab[i].p;
break;
}
}
@@ -781,6 +779,10 @@ static int tda10048_init(struct dvb_frontend *fe)
dprintk(1, "%s()\n", __func__);
+ /* PLL */
+ init_tab[4].data = (u8)(state->pll_mfactor);
+ init_tab[5].data = (u8)(state->pll_nfactor) | 0x40;
+
/* Apply register defaults */
for (i = 0; i < ARRAY_SIZE(init_tab); i++)
tda10048_writereg(state, init_tab[i].reg, init_tab[i].data);
@@ -1123,7 +1125,7 @@ struct dvb_frontend *tda10048_attach(const struct tda10048_config *config,
/* setup the state and clone the config */
memcpy(&state->config, config, sizeof(*config));
state->i2c = i2c;
- state->fwloaded = 0;
+ state->fwloaded = config->no_firmware;
state->bandwidth = BANDWIDTH_8_MHZ;
/* check if the demod is present */
@@ -1135,6 +1137,17 @@ struct dvb_frontend *tda10048_attach(const struct tda10048_config *config,
sizeof(struct dvb_frontend_ops));
state->frontend.demodulator_priv = state;
+ /* set pll */
+ if (config->set_pll) {
+ state->pll_mfactor = config->pll_m;
+ state->pll_nfactor = config->pll_n;
+ state->pll_pfactor = config->pll_p;
+ } else {
+ state->pll_mfactor = 10;
+ state->pll_nfactor = 3;
+ state->pll_pfactor = 0;
+ }
+
/* Establish any defaults the the user didn't pass */
tda10048_establish_defaults(&state->frontend);
diff --git a/drivers/media/dvb/frontends/tda10048.h b/drivers/media/dvb/frontends/tda10048.h
index 8828ceaf74b..fb2ef5ac948 100644
--- a/drivers/media/dvb/frontends/tda10048.h
+++ b/drivers/media/dvb/frontends/tda10048.h
@@ -51,6 +51,7 @@ struct tda10048_config {
#define TDA10048_IF_4300 4300
#define TDA10048_IF_4500 4500
#define TDA10048_IF_4750 4750
+#define TDA10048_IF_5000 5000
#define TDA10048_IF_36130 36130
u16 dtv6_if_freq_khz;
u16 dtv7_if_freq_khz;
@@ -62,6 +63,13 @@ struct tda10048_config {
/* Disable I2C gate access */
u8 disable_gate_access;
+
+ bool no_firmware;
+
+ bool set_pll;
+ u8 pll_m;
+ u8 pll_p;
+ u8 pll_n;
};
#if defined(CONFIG_DVB_TDA10048) || \
diff --git a/drivers/media/dvb/frontends/tda10071.c b/drivers/media/dvb/frontends/tda10071.c
new file mode 100644
index 00000000000..0c37434d19e
--- /dev/null
+++ b/drivers/media/dvb/frontends/tda10071.c
@@ -0,0 +1,1269 @@
+/*
+ * NXP TDA10071 + Conexant CX24118A DVB-S/S2 demodulator + tuner driver
+ *
+ * Copyright (C) 2011 Antti Palosaari <crope@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include "tda10071_priv.h"
+
+int tda10071_debug;
+module_param_named(debug, tda10071_debug, int, 0644);
+MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off).");
+
+static struct dvb_frontend_ops tda10071_ops;
+
+/* write multiple registers */
+static int tda10071_wr_regs(struct tda10071_priv *priv, u8 reg, u8 *val,
+ int len)
+{
+ int ret;
+ u8 buf[len+1];
+ struct i2c_msg msg[1] = {
+ {
+ .addr = priv->cfg.i2c_address,
+ .flags = 0,
+ .len = sizeof(buf),
+ .buf = buf,
+ }
+ };
+
+ buf[0] = reg;
+ memcpy(&buf[1], val, len);
+
+ ret = i2c_transfer(priv->i2c, msg, 1);
+ if (ret == 1) {
+ ret = 0;
+ } else {
+ warn("i2c wr failed=%d reg=%02x len=%d", ret, reg, len);
+ ret = -EREMOTEIO;
+ }
+ return ret;
+}
+
+/* read multiple registers */
+static int tda10071_rd_regs(struct tda10071_priv *priv, u8 reg, u8 *val,
+ int len)
+{
+ int ret;
+ u8 buf[len];
+ struct i2c_msg msg[2] = {
+ {
+ .addr = priv->cfg.i2c_address,
+ .flags = 0,
+ .len = 1,
+ .buf = &reg,
+ }, {
+ .addr = priv->cfg.i2c_address,
+ .flags = I2C_M_RD,
+ .len = sizeof(buf),
+ .buf = buf,
+ }
+ };
+
+ ret = i2c_transfer(priv->i2c, msg, 2);
+ if (ret == 2) {
+ memcpy(val, buf, len);
+ ret = 0;
+ } else {
+ warn("i2c rd failed=%d reg=%02x len=%d", ret, reg, len);
+ ret = -EREMOTEIO;
+ }
+ return ret;
+}
+
+/* write single register */
+static int tda10071_wr_reg(struct tda10071_priv *priv, u8 reg, u8 val)
+{
+ return tda10071_wr_regs(priv, reg, &val, 1);
+}
+
+/* read single register */
+static int tda10071_rd_reg(struct tda10071_priv *priv, u8 reg, u8 *val)
+{
+ return tda10071_rd_regs(priv, reg, val, 1);
+}
+
+/* write single register with mask */
+int tda10071_wr_reg_mask(struct tda10071_priv *priv, u8 reg, u8 val, u8 mask)
+{
+ int ret;
+ u8 tmp;
+
+ /* no need for read if whole reg is written */
+ if (mask != 0xff) {
+ ret = tda10071_rd_regs(priv, reg, &tmp, 1);
+ if (ret)
+ return ret;
+
+ val &= mask;
+ tmp &= ~mask;
+ val |= tmp;
+ }
+
+ return tda10071_wr_regs(priv, reg, &val, 1);
+}
+
+/* read single register with mask */
+int tda10071_rd_reg_mask(struct tda10071_priv *priv, u8 reg, u8 *val, u8 mask)
+{
+ int ret, i;
+ u8 tmp;
+
+ ret = tda10071_rd_regs(priv, reg, &tmp, 1);
+ if (ret)
+ return ret;
+
+ tmp &= mask;
+
+ /* find position of the first bit */
+ for (i = 0; i < 8; i++) {
+ if ((mask >> i) & 0x01)
+ break;
+ }
+ *val = tmp >> i;
+
+ return 0;
+}
+
+/* execute firmware command */
+static int tda10071_cmd_execute(struct tda10071_priv *priv,
+ struct tda10071_cmd *cmd)
+{
+ int ret, i;
+ u8 tmp;
+
+ if (!priv->warm) {
+ ret = -EFAULT;
+ goto error;
+ }
+
+ /* write cmd and args for firmware */
+ ret = tda10071_wr_regs(priv, 0x00, cmd->args, cmd->len);
+ if (ret)
+ goto error;
+
+ /* start cmd execution */
+ ret = tda10071_wr_reg(priv, 0x1f, 1);
+ if (ret)
+ goto error;
+
+ /* wait cmd execution terminate */
+ for (i = 1000, tmp = 1; i && tmp; i--) {
+ ret = tda10071_rd_reg(priv, 0x1f, &tmp);
+ if (ret)
+ goto error;
+
+ usleep_range(200, 5000);
+ }
+
+ dbg("%s: loop=%d", __func__, i);
+
+ if (i == 0) {
+ ret = -ETIMEDOUT;
+ goto error;
+ }
+
+ return ret;
+error:
+ dbg("%s: failed=%d", __func__, ret);
+ return ret;
+}
+
+static int tda10071_set_tone(struct dvb_frontend *fe,
+ fe_sec_tone_mode_t fe_sec_tone_mode)
+{
+ struct tda10071_priv *priv = fe->demodulator_priv;
+ struct tda10071_cmd cmd;
+ int ret;
+ u8 tone;
+
+ if (!priv->warm) {
+ ret = -EFAULT;
+ goto error;
+ }
+
+ dbg("%s: tone_mode=%d", __func__, fe_sec_tone_mode);
+
+ switch (fe_sec_tone_mode) {
+ case SEC_TONE_ON:
+ tone = 1;
+ break;
+ case SEC_TONE_OFF:
+ tone = 0;
+ break;
+ default:
+ dbg("%s: invalid fe_sec_tone_mode", __func__);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ cmd.args[0x00] = CMD_LNB_PCB_CONFIG;
+ cmd.args[0x01] = 0;
+ cmd.args[0x02] = 0x00;
+ cmd.args[0x03] = 0x00;
+ cmd.args[0x04] = tone;
+ cmd.len = 0x05;
+ ret = tda10071_cmd_execute(priv, &cmd);
+ if (ret)
+ goto error;
+
+ return ret;
+error:
+ dbg("%s: failed=%d", __func__, ret);
+ return ret;
+}
+
+static int tda10071_set_voltage(struct dvb_frontend *fe,
+ fe_sec_voltage_t fe_sec_voltage)
+{
+ struct tda10071_priv *priv = fe->demodulator_priv;
+ struct tda10071_cmd cmd;
+ int ret;
+ u8 voltage;
+
+ if (!priv->warm) {
+ ret = -EFAULT;
+ goto error;
+ }
+
+ dbg("%s: voltage=%d", __func__, fe_sec_voltage);
+
+ switch (fe_sec_voltage) {
+ case SEC_VOLTAGE_13:
+ voltage = 0;
+ break;
+ case SEC_VOLTAGE_18:
+ voltage = 1;
+ break;
+ case SEC_VOLTAGE_OFF:
+ voltage = 0;
+ break;
+ default:
+ dbg("%s: invalid fe_sec_voltage", __func__);
+ ret = -EINVAL;
+ goto error;
+ };
+
+ cmd.args[0x00] = CMD_LNB_SET_DC_LEVEL;
+ cmd.args[0x01] = 0;
+ cmd.args[0x02] = voltage;
+ cmd.len = 0x03;
+ ret = tda10071_cmd_execute(priv, &cmd);
+ if (ret)
+ goto error;
+
+ return ret;
+error:
+ dbg("%s: failed=%d", __func__, ret);
+ return ret;
+}
+
+static int tda10071_diseqc_send_master_cmd(struct dvb_frontend *fe,
+ struct dvb_diseqc_master_cmd *diseqc_cmd)
+{
+ struct tda10071_priv *priv = fe->demodulator_priv;
+ struct tda10071_cmd cmd;
+ int ret, i;
+ u8 tmp;
+
+ if (!priv->warm) {
+ ret = -EFAULT;
+ goto error;
+ }
+
+ dbg("%s: msg_len=%d", __func__, diseqc_cmd->msg_len);
+
+ if (diseqc_cmd->msg_len < 3 || diseqc_cmd->msg_len > 16) {
+ ret = -EINVAL;
+ goto error;
+ }
+
+ /* wait LNB TX */
+ for (i = 500, tmp = 0; i && !tmp; i--) {
+ ret = tda10071_rd_reg_mask(priv, 0x47, &tmp, 0x01);
+ if (ret)
+ goto error;
+
+ usleep_range(10000, 20000);
+ }
+
+ dbg("%s: loop=%d", __func__, i);
+
+ if (i == 0) {
+ ret = -ETIMEDOUT;
+ goto error;
+ }
+
+ ret = tda10071_wr_reg_mask(priv, 0x47, 0x00, 0x01);
+ if (ret)
+ goto error;
+
+ cmd.args[0x00] = CMD_LNB_SEND_DISEQC;
+ cmd.args[0x01] = 0;
+ cmd.args[0x02] = 0;
+ cmd.args[0x03] = 0;
+ cmd.args[0x04] = 2;
+ cmd.args[0x05] = 0;
+ cmd.args[0x06] = diseqc_cmd->msg_len;
+ memcpy(&cmd.args[0x07], diseqc_cmd->msg, diseqc_cmd->msg_len);
+ cmd.len = 0x07 + diseqc_cmd->msg_len;
+ ret = tda10071_cmd_execute(priv, &cmd);
+ if (ret)
+ goto error;
+
+ return ret;
+error:
+ dbg("%s: failed=%d", __func__, ret);
+ return ret;
+}
+
+static int tda10071_diseqc_recv_slave_reply(struct dvb_frontend *fe,
+ struct dvb_diseqc_slave_reply *reply)
+{
+ struct tda10071_priv *priv = fe->demodulator_priv;
+ struct tda10071_cmd cmd;
+ int ret, i;
+ u8 tmp;
+
+ if (!priv->warm) {
+ ret = -EFAULT;
+ goto error;
+ }
+
+ dbg("%s:", __func__);
+
+ /* wait LNB RX */
+ for (i = 500, tmp = 0; i && !tmp; i--) {
+ ret = tda10071_rd_reg_mask(priv, 0x47, &tmp, 0x02);
+ if (ret)
+ goto error;
+
+ usleep_range(10000, 20000);
+ }
+
+ dbg("%s: loop=%d", __func__, i);
+
+ if (i == 0) {
+ ret = -ETIMEDOUT;
+ goto error;
+ }
+
+ /* reply len */
+ ret = tda10071_rd_reg(priv, 0x46, &tmp);
+ if (ret)
+ goto error;
+
+ reply->msg_len = tmp & 0x1f; /* [4:0] */;
+ if (reply->msg_len > sizeof(reply->msg))
+ reply->msg_len = sizeof(reply->msg); /* truncate API max */
+
+ /* read reply */
+ cmd.args[0x00] = CMD_LNB_UPDATE_REPLY;
+ cmd.args[0x01] = 0;
+ cmd.len = 0x02;
+ ret = tda10071_cmd_execute(priv, &cmd);
+ if (ret)
+ goto error;
+
+ ret = tda10071_rd_regs(priv, cmd.len, reply->msg, reply->msg_len);
+ if (ret)
+ goto error;
+
+ return ret;
+error:
+ dbg("%s: failed=%d", __func__, ret);
+ return ret;
+}
+
+static int tda10071_diseqc_send_burst(struct dvb_frontend *fe,
+ fe_sec_mini_cmd_t fe_sec_mini_cmd)
+{
+ struct tda10071_priv *priv = fe->demodulator_priv;
+ struct tda10071_cmd cmd;
+ int ret, i;
+ u8 tmp, burst;
+
+ if (!priv->warm) {
+ ret = -EFAULT;
+ goto error;
+ }
+
+ dbg("%s: fe_sec_mini_cmd=%d", __func__, fe_sec_mini_cmd);
+
+ switch (fe_sec_mini_cmd) {
+ case SEC_MINI_A:
+ burst = 0;
+ break;
+ case SEC_MINI_B:
+ burst = 1;
+ break;
+ default:
+ dbg("%s: invalid fe_sec_mini_cmd", __func__);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ /* wait LNB TX */
+ for (i = 500, tmp = 0; i && !tmp; i--) {
+ ret = tda10071_rd_reg_mask(priv, 0x47, &tmp, 0x01);
+ if (ret)
+ goto error;
+
+ usleep_range(10000, 20000);
+ }
+
+ dbg("%s: loop=%d", __func__, i);
+
+ if (i == 0) {
+ ret = -ETIMEDOUT;
+ goto error;
+ }
+
+ ret = tda10071_wr_reg_mask(priv, 0x47, 0x00, 0x01);
+ if (ret)
+ goto error;
+
+ cmd.args[0x00] = CMD_LNB_SEND_TONEBURST;
+ cmd.args[0x01] = 0;
+ cmd.args[0x02] = burst;
+ cmd.len = 0x03;
+ ret = tda10071_cmd_execute(priv, &cmd);
+ if (ret)
+ goto error;
+
+ return ret;
+error:
+ dbg("%s: failed=%d", __func__, ret);
+ return ret;
+}
+
+static int tda10071_read_status(struct dvb_frontend *fe, fe_status_t *status)
+{
+ struct tda10071_priv *priv = fe->demodulator_priv;
+ int ret;
+ u8 tmp;
+
+ *status = 0;
+
+ if (!priv->warm) {
+ ret = 0;
+ goto error;
+ }
+
+ ret = tda10071_rd_reg(priv, 0x39, &tmp);
+ if (ret)
+ goto error;
+
+ if (tmp & 0x01) /* tuner PLL */
+ *status |= FE_HAS_SIGNAL;
+ if (tmp & 0x02) /* demod PLL */
+ *status |= FE_HAS_CARRIER;
+ if (tmp & 0x04) /* viterbi or LDPC*/
+ *status |= FE_HAS_VITERBI;
+ if (tmp & 0x08) /* RS or BCH */
+ *status |= FE_HAS_SYNC | FE_HAS_LOCK;
+
+ priv->fe_status = *status;
+
+ return ret;
+error:
+ dbg("%s: failed=%d", __func__, ret);
+ return ret;
+}
+
+static int tda10071_read_snr(struct dvb_frontend *fe, u16 *snr)
+{
+ struct tda10071_priv *priv = fe->demodulator_priv;
+ int ret;
+ u8 buf[2];
+
+ if (!priv->warm || !(priv->fe_status & FE_HAS_LOCK)) {
+ *snr = 0;
+ ret = 0;
+ goto error;
+ }
+
+ ret = tda10071_rd_regs(priv, 0x3a, buf, 2);
+ if (ret)
+ goto error;
+
+ /* Es/No dBx10 */
+ *snr = buf[0] << 8 | buf[1];
+
+ return ret;
+error:
+ dbg("%s: failed=%d", __func__, ret);
+ return ret;
+}
+
+static int tda10071_read_signal_strength(struct dvb_frontend *fe, u16 *strength)
+{
+ struct tda10071_priv *priv = fe->demodulator_priv;
+ struct tda10071_cmd cmd;
+ int ret;
+ u8 tmp;
+
+ if (!priv->warm || !(priv->fe_status & FE_HAS_LOCK)) {
+ *strength = 0;
+ ret = 0;
+ goto error;
+ }
+
+ cmd.args[0x00] = CMD_GET_AGCACC;
+ cmd.args[0x01] = 0;
+ cmd.len = 0x02;
+ ret = tda10071_cmd_execute(priv, &cmd);
+ if (ret)
+ goto error;
+
+ /* input power estimate dBm */
+ ret = tda10071_rd_reg(priv, 0x50, &tmp);
+ if (ret)
+ goto error;
+
+ if (tmp < 181)
+ tmp = 181; /* -75 dBm */
+ else if (tmp > 236)
+ tmp = 236; /* -20 dBm */
+
+ /* scale value to 0x0000-0xffff */
+ *strength = (tmp-181) * 0xffff / (236-181);
+
+ return ret;
+error:
+ dbg("%s: failed=%d", __func__, ret);
+ return ret;
+}
+
+static int tda10071_read_ber(struct dvb_frontend *fe, u32 *ber)
+{
+ struct tda10071_priv *priv = fe->demodulator_priv;
+ struct tda10071_cmd cmd;
+ int ret, i, len;
+ u8 tmp, reg, buf[8];
+
+ if (!priv->warm || !(priv->fe_status & FE_HAS_LOCK)) {
+ *ber = priv->ber = 0;
+ ret = 0;
+ goto error;
+ }
+
+ switch (priv->delivery_system) {
+ case SYS_DVBS:
+ reg = 0x4c;
+ len = 8;
+ i = 1;
+ break;
+ case SYS_DVBS2:
+ reg = 0x4d;
+ len = 4;
+ i = 0;
+ break;
+ default:
+ *ber = priv->ber = 0;
+ return 0;
+ }
+
+ ret = tda10071_rd_reg(priv, reg, &tmp);
+ if (ret)
+ goto error;
+
+ if (priv->meas_count[i] == tmp) {
+ dbg("%s: meas not ready=%02x", __func__, tmp);
+ *ber = priv->ber;
+ return 0;
+ } else {
+ priv->meas_count[i] = tmp;
+ }
+
+ cmd.args[0x00] = CMD_BER_UPDATE_COUNTERS;
+ cmd.args[0x01] = 0;
+ cmd.args[0x02] = i;
+ cmd.len = 0x03;
+ ret = tda10071_cmd_execute(priv, &cmd);
+ if (ret)
+ goto error;
+
+ ret = tda10071_rd_regs(priv, cmd.len, buf, len);
+ if (ret)
+ goto error;
+
+ if (priv->delivery_system == SYS_DVBS) {
+ *ber = (buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3];
+ priv->ucb += (buf[4] << 8) | buf[5];
+ } else {
+ *ber = (buf[0] << 8) | buf[1];
+ }
+ priv->ber = *ber;
+
+ return ret;
+error:
+ dbg("%s: failed=%d", __func__, ret);
+ return ret;
+}
+
+static int tda10071_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
+{
+ struct tda10071_priv *priv = fe->demodulator_priv;
+ int ret = 0;
+
+ if (!priv->warm || !(priv->fe_status & FE_HAS_LOCK)) {
+ *ucblocks = 0;
+ goto error;
+ }
+
+ /* UCB is updated when BER is read. Assume BER is read anyway. */
+
+ *ucblocks = priv->ucb;
+
+ return ret;
+error:
+ dbg("%s: failed=%d", __func__, ret);
+ return ret;
+}
+
+static int tda10071_set_frontend(struct dvb_frontend *fe,
+ struct dvb_frontend_parameters *params)
+{
+ struct tda10071_priv *priv = fe->demodulator_priv;
+ struct tda10071_cmd cmd;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ int ret, i;
+ u8 mode, rolloff, pilot, inversion, div;
+
+ dbg("%s: delivery_system=%d modulation=%d frequency=%d " \
+ "symbol_rate=%d inversion=%d pilot=%d rolloff=%d", __func__,
+ c->delivery_system, c->modulation, c->frequency,
+ c->symbol_rate, c->inversion, c->pilot, c->rolloff);
+
+ priv->delivery_system = SYS_UNDEFINED;
+
+ if (!priv->warm) {
+ ret = -EFAULT;
+ goto error;
+ }
+
+ switch (c->inversion) {
+ case INVERSION_OFF:
+ inversion = 1;
+ break;
+ case INVERSION_ON:
+ inversion = 0;
+ break;
+ case INVERSION_AUTO:
+ /* 2 = auto; try first on then off
+ * 3 = auto; try first off then on */
+ inversion = 3;
+ break;
+ default:
+ dbg("%s: invalid inversion", __func__);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ switch (c->delivery_system) {
+ case SYS_DVBS:
+ rolloff = 0;
+ pilot = 2;
+ break;
+ case SYS_DVBS2:
+ switch (c->rolloff) {
+ case ROLLOFF_20:
+ rolloff = 2;
+ break;
+ case ROLLOFF_25:
+ rolloff = 1;
+ break;
+ case ROLLOFF_35:
+ rolloff = 0;
+ break;
+ case ROLLOFF_AUTO:
+ default:
+ dbg("%s: invalid rolloff", __func__);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ switch (c->pilot) {
+ case PILOT_OFF:
+ pilot = 0;
+ break;
+ case PILOT_ON:
+ pilot = 1;
+ break;
+ case PILOT_AUTO:
+ pilot = 2;
+ break;
+ default:
+ dbg("%s: invalid pilot", __func__);
+ ret = -EINVAL;
+ goto error;
+ }
+ break;
+ default:
+ dbg("%s: invalid delivery_system", __func__);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ for (i = 0, mode = 0xff; i < ARRAY_SIZE(TDA10071_MODCOD); i++) {
+ if (c->delivery_system == TDA10071_MODCOD[i].delivery_system &&
+ c->modulation == TDA10071_MODCOD[i].modulation &&
+ c->fec_inner == TDA10071_MODCOD[i].fec) {
+ mode = TDA10071_MODCOD[i].val;
+ dbg("%s: mode found=%02x", __func__, mode);
+ break;
+ }
+ }
+
+ if (mode == 0xff) {
+ dbg("%s: invalid parameter combination", __func__);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ if (c->symbol_rate <= 5000000)
+ div = 14;
+ else
+ div = 4;
+
+ ret = tda10071_wr_reg(priv, 0x81, div);
+ if (ret)
+ goto error;
+
+ ret = tda10071_wr_reg(priv, 0xe3, div);
+ if (ret)
+ goto error;
+
+ cmd.args[0x00] = CMD_CHANGE_CHANNEL;
+ cmd.args[0x01] = 0;
+ cmd.args[0x02] = mode;
+ cmd.args[0x03] = (c->frequency >> 16) & 0xff;
+ cmd.args[0x04] = (c->frequency >> 8) & 0xff;
+ cmd.args[0x05] = (c->frequency >> 0) & 0xff;
+ cmd.args[0x06] = ((c->symbol_rate / 1000) >> 8) & 0xff;
+ cmd.args[0x07] = ((c->symbol_rate / 1000) >> 0) & 0xff;
+ cmd.args[0x08] = (tda10071_ops.info.frequency_tolerance >> 8) & 0xff;
+ cmd.args[0x09] = (tda10071_ops.info.frequency_tolerance >> 0) & 0xff;
+ cmd.args[0x0a] = rolloff;
+ cmd.args[0x0b] = inversion;
+ cmd.args[0x0c] = pilot;
+ cmd.args[0x0d] = 0x00;
+ cmd.args[0x0e] = 0x00;
+ cmd.len = 0x0f;
+ ret = tda10071_cmd_execute(priv, &cmd);
+ if (ret)
+ goto error;
+
+ priv->delivery_system = c->delivery_system;
+
+ return ret;
+error:
+ dbg("%s: failed=%d", __func__, ret);
+ return ret;
+}
+
+static int tda10071_get_frontend(struct dvb_frontend *fe,
+ struct dvb_frontend_parameters *p)
+{
+ struct tda10071_priv *priv = fe->demodulator_priv;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ int ret, i;
+ u8 buf[5], tmp;
+
+ if (!priv->warm || !(priv->fe_status & FE_HAS_LOCK)) {
+ ret = -EFAULT;
+ goto error;
+ }
+
+ ret = tda10071_rd_regs(priv, 0x30, buf, 5);
+ if (ret)
+ goto error;
+
+ tmp = buf[0] & 0x3f;
+ for (i = 0; i < ARRAY_SIZE(TDA10071_MODCOD); i++) {
+ if (tmp == TDA10071_MODCOD[i].val) {
+ c->modulation = TDA10071_MODCOD[i].modulation;
+ c->fec_inner = TDA10071_MODCOD[i].fec;
+ c->delivery_system = TDA10071_MODCOD[i].delivery_system;
+ }
+ }
+
+ switch ((buf[1] >> 0) & 0x01) {
+ case 0:
+ c->inversion = INVERSION_OFF;
+ break;
+ case 1:
+ c->inversion = INVERSION_ON;
+ break;
+ }
+
+ switch ((buf[1] >> 7) & 0x01) {
+ case 0:
+ c->pilot = PILOT_OFF;
+ break;
+ case 1:
+ c->pilot = PILOT_ON;
+ break;
+ }
+
+ c->frequency = (buf[2] << 16) | (buf[3] << 8) | (buf[4] << 0);
+
+ ret = tda10071_rd_regs(priv, 0x52, buf, 3);
+ if (ret)
+ goto error;
+
+ c->symbol_rate = (buf[0] << 16) | (buf[1] << 8) | (buf[2] << 0);
+
+ return ret;
+error:
+ dbg("%s: failed=%d", __func__, ret);
+ return ret;
+}
+
+static int tda10071_init(struct dvb_frontend *fe)
+{
+ struct tda10071_priv *priv = fe->demodulator_priv;
+ struct tda10071_cmd cmd;
+ int ret, i, len, remaining, fw_size;
+ const struct firmware *fw;
+ u8 *fw_file = TDA10071_DEFAULT_FIRMWARE;
+ u8 tmp, buf[4];
+ struct tda10071_reg_val_mask tab[] = {
+ { 0xcd, 0x00, 0x07 },
+ { 0x80, 0x00, 0x02 },
+ { 0xcd, 0x00, 0xc0 },
+ { 0xce, 0x00, 0x1b },
+ { 0x9d, 0x00, 0x01 },
+ { 0x9d, 0x00, 0x02 },
+ { 0x9e, 0x00, 0x01 },
+ { 0x87, 0x00, 0x80 },
+ { 0xce, 0x00, 0x08 },
+ { 0xce, 0x00, 0x10 },
+ };
+ struct tda10071_reg_val_mask tab2[] = {
+ { 0xf1, 0x70, 0xff },
+ { 0x88, priv->cfg.pll_multiplier, 0x3f },
+ { 0x89, 0x00, 0x10 },
+ { 0x89, 0x10, 0x10 },
+ { 0xc0, 0x01, 0x01 },
+ { 0xc0, 0x00, 0x01 },
+ { 0xe0, 0xff, 0xff },
+ { 0xe0, 0x00, 0xff },
+ { 0x96, 0x1e, 0x7e },
+ { 0x8b, 0x08, 0x08 },
+ { 0x8b, 0x00, 0x08 },
+ { 0x8f, 0x1a, 0x7e },
+ { 0x8c, 0x68, 0xff },
+ { 0x8d, 0x08, 0xff },
+ { 0x8e, 0x4c, 0xff },
+ { 0x8f, 0x01, 0x01 },
+ { 0x8b, 0x04, 0x04 },
+ { 0x8b, 0x00, 0x04 },
+ { 0x87, 0x05, 0x07 },
+ { 0x80, 0x00, 0x20 },
+ { 0xc8, 0x01, 0xff },
+ { 0xb4, 0x47, 0xff },
+ { 0xb5, 0x9c, 0xff },
+ { 0xb6, 0x7d, 0xff },
+ { 0xba, 0x00, 0x03 },
+ { 0xb7, 0x47, 0xff },
+ { 0xb8, 0x9c, 0xff },
+ { 0xb9, 0x7d, 0xff },
+ { 0xba, 0x00, 0x0c },
+ { 0xc8, 0x00, 0xff },
+ { 0xcd, 0x00, 0x04 },
+ { 0xcd, 0x00, 0x20 },
+ { 0xe8, 0x02, 0xff },
+ { 0xcf, 0x20, 0xff },
+ { 0x9b, 0xd7, 0xff },
+ { 0x9a, 0x01, 0x03 },
+ { 0xa8, 0x05, 0x0f },
+ { 0xa8, 0x65, 0xf0 },
+ { 0xa6, 0xa0, 0xf0 },
+ { 0x9d, 0x50, 0xfc },
+ { 0x9e, 0x20, 0xe0 },
+ { 0xa3, 0x1c, 0x7c },
+ { 0xd5, 0x03, 0x03 },
+ };
+
+ /* firmware status */
+ ret = tda10071_rd_reg(priv, 0x51, &tmp);
+ if (ret)
+ goto error;
+
+ if (!tmp) {
+ /* warm state - wake up device from sleep */
+ priv->warm = 1;
+
+ for (i = 0; i < ARRAY_SIZE(tab); i++) {
+ ret = tda10071_wr_reg_mask(priv, tab[i].reg,
+ tab[i].val, tab[i].mask);
+ if (ret)
+ goto error;
+ }
+
+ cmd.args[0x00] = CMD_SET_SLEEP_MODE;
+ cmd.args[0x01] = 0;
+ cmd.args[0x02] = 0;
+ cmd.len = 0x03;
+ ret = tda10071_cmd_execute(priv, &cmd);
+ if (ret)
+ goto error;
+ } else {
+ /* cold state - try to download firmware */
+ priv->warm = 0;
+
+ /* request the firmware, this will block and timeout */
+ ret = request_firmware(&fw, fw_file, priv->i2c->dev.parent);
+ if (ret) {
+ err("did not find the firmware file. (%s) "
+ "Please see linux/Documentation/dvb/ for more" \
+ " details on firmware-problems. (%d)",
+ fw_file, ret);
+ goto error;
+ }
+
+ /* init */
+ for (i = 0; i < ARRAY_SIZE(tab2); i++) {
+ ret = tda10071_wr_reg_mask(priv, tab2[i].reg,
+ tab2[i].val, tab2[i].mask);
+ if (ret)
+ goto error_release_firmware;
+ }
+
+ /* download firmware */
+ ret = tda10071_wr_reg(priv, 0xe0, 0x7f);
+ if (ret)
+ goto error_release_firmware;
+
+ ret = tda10071_wr_reg(priv, 0xf7, 0x81);
+ if (ret)
+ goto error_release_firmware;
+
+ ret = tda10071_wr_reg(priv, 0xf8, 0x00);
+ if (ret)
+ goto error_release_firmware;
+
+ ret = tda10071_wr_reg(priv, 0xf9, 0x00);
+ if (ret)
+ goto error_release_firmware;
+
+ info("found a '%s' in cold state, will try to load a firmware",
+ tda10071_ops.info.name);
+
+ info("downloading firmware from file '%s'", fw_file);
+
+ /* do not download last byte */
+ fw_size = fw->size - 1;
+
+ for (remaining = fw_size; remaining > 0;
+ remaining -= (priv->cfg.i2c_wr_max - 1)) {
+ len = remaining;
+ if (len > (priv->cfg.i2c_wr_max - 1))
+ len = (priv->cfg.i2c_wr_max - 1);
+
+ ret = tda10071_wr_regs(priv, 0xfa,
+ (u8 *) &fw->data[fw_size - remaining], len);
+ if (ret) {
+ err("firmware download failed=%d", ret);
+ if (ret)
+ goto error_release_firmware;
+ }
+ }
+ release_firmware(fw);
+
+ ret = tda10071_wr_reg(priv, 0xf7, 0x0c);
+ if (ret)
+ goto error;
+
+ ret = tda10071_wr_reg(priv, 0xe0, 0x00);
+ if (ret)
+ goto error;
+
+ /* wait firmware start */
+ msleep(250);
+
+ /* firmware status */
+ ret = tda10071_rd_reg(priv, 0x51, &tmp);
+ if (ret)
+ goto error;
+
+ if (tmp) {
+ info("firmware did not run");
+ ret = -EFAULT;
+ goto error;
+ } else {
+ priv->warm = 1;
+ }
+
+ cmd.args[0x00] = CMD_GET_FW_VERSION;
+ cmd.len = 0x01;
+ ret = tda10071_cmd_execute(priv, &cmd);
+ if (ret)
+ goto error;
+
+ ret = tda10071_rd_regs(priv, cmd.len, buf, 4);
+ if (ret)
+ goto error;
+
+ info("firmware version %d.%d.%d.%d",
+ buf[0], buf[1], buf[2], buf[3]);
+ info("found a '%s' in warm state.", tda10071_ops.info.name);
+
+ ret = tda10071_rd_regs(priv, 0x81, buf, 2);
+ if (ret)
+ goto error;
+
+ cmd.args[0x00] = CMD_DEMOD_INIT;
+ cmd.args[0x01] = ((priv->cfg.xtal / 1000) >> 8) & 0xff;
+ cmd.args[0x02] = ((priv->cfg.xtal / 1000) >> 0) & 0xff;
+ cmd.args[0x03] = buf[0];
+ cmd.args[0x04] = buf[1];
+ cmd.args[0x05] = priv->cfg.pll_multiplier;
+ cmd.args[0x06] = priv->cfg.spec_inv;
+ cmd.args[0x07] = 0x00;
+ cmd.len = 0x08;
+ ret = tda10071_cmd_execute(priv, &cmd);
+ if (ret)
+ goto error;
+
+ cmd.args[0x00] = CMD_TUNER_INIT;
+ cmd.args[0x01] = 0x00;
+ cmd.args[0x02] = 0x00;
+ cmd.args[0x03] = 0x00;
+ cmd.args[0x04] = 0x00;
+ cmd.args[0x05] = 0x14;
+ cmd.args[0x06] = 0x00;
+ cmd.args[0x07] = 0x03;
+ cmd.args[0x08] = 0x02;
+ cmd.args[0x09] = 0x02;
+ cmd.args[0x0a] = 0x00;
+ cmd.args[0x0b] = 0x00;
+ cmd.args[0x0c] = 0x00;
+ cmd.args[0x0d] = 0x00;
+ cmd.args[0x0e] = 0x00;
+ cmd.len = 0x0f;
+ ret = tda10071_cmd_execute(priv, &cmd);
+ if (ret)
+ goto error;
+
+ cmd.args[0x00] = CMD_MPEG_CONFIG;
+ cmd.args[0x01] = 0;
+ cmd.args[0x02] = priv->cfg.ts_mode;
+ cmd.args[0x03] = 0x00;
+ cmd.args[0x04] = 0x04;
+ cmd.args[0x05] = 0x00;
+ cmd.len = 0x06;
+ ret = tda10071_cmd_execute(priv, &cmd);
+ if (ret)
+ goto error;
+
+ ret = tda10071_wr_reg_mask(priv, 0xf0, 0x01, 0x01);
+ if (ret)
+ goto error;
+
+ cmd.args[0x00] = CMD_LNB_CONFIG;
+ cmd.args[0x01] = 0;
+ cmd.args[0x02] = 150;
+ cmd.args[0x03] = 3;
+ cmd.args[0x04] = 22;
+ cmd.args[0x05] = 1;
+ cmd.args[0x06] = 1;
+ cmd.args[0x07] = 30;
+ cmd.args[0x08] = 30;
+ cmd.args[0x09] = 30;
+ cmd.args[0x0a] = 30;
+ cmd.len = 0x0b;
+ ret = tda10071_cmd_execute(priv, &cmd);
+ if (ret)
+ goto error;
+
+ cmd.args[0x00] = CMD_BER_CONTROL;
+ cmd.args[0x01] = 0;
+ cmd.args[0x02] = 14;
+ cmd.args[0x03] = 14;
+ cmd.len = 0x04;
+ ret = tda10071_cmd_execute(priv, &cmd);
+ if (ret)
+ goto error;
+ }
+
+ return ret;
+error_release_firmware:
+ release_firmware(fw);
+error:
+ dbg("%s: failed=%d", __func__, ret);
+ return ret;
+}
+
+static int tda10071_sleep(struct dvb_frontend *fe)
+{
+ struct tda10071_priv *priv = fe->demodulator_priv;
+ struct tda10071_cmd cmd;
+ int ret, i;
+ struct tda10071_reg_val_mask tab[] = {
+ { 0xcd, 0x07, 0x07 },
+ { 0x80, 0x02, 0x02 },
+ { 0xcd, 0xc0, 0xc0 },
+ { 0xce, 0x1b, 0x1b },
+ { 0x9d, 0x01, 0x01 },
+ { 0x9d, 0x02, 0x02 },
+ { 0x9e, 0x01, 0x01 },
+ { 0x87, 0x80, 0x80 },
+ { 0xce, 0x08, 0x08 },
+ { 0xce, 0x10, 0x10 },
+ };
+
+ if (!priv->warm) {
+ ret = -EFAULT;
+ goto error;
+ }
+
+ cmd.args[0x00] = CMD_SET_SLEEP_MODE;
+ cmd.args[0x01] = 0;
+ cmd.args[0x02] = 1;
+ cmd.len = 0x03;
+ ret = tda10071_cmd_execute(priv, &cmd);
+ if (ret)
+ goto error;
+
+ for (i = 0; i < ARRAY_SIZE(tab); i++) {
+ ret = tda10071_wr_reg_mask(priv, tab[i].reg, tab[i].val,
+ tab[i].mask);
+ if (ret)
+ goto error;
+ }
+
+ return ret;
+error:
+ dbg("%s: failed=%d", __func__, ret);
+ return ret;
+}
+
+static int tda10071_get_tune_settings(struct dvb_frontend *fe,
+ struct dvb_frontend_tune_settings *s)
+{
+ s->min_delay_ms = 8000;
+ s->step_size = 0;
+ s->max_drift = 0;
+
+ return 0;
+}
+
+static void tda10071_release(struct dvb_frontend *fe)
+{
+ struct tda10071_priv *priv = fe->demodulator_priv;
+ kfree(priv);
+}
+
+struct dvb_frontend *tda10071_attach(const struct tda10071_config *config,
+ struct i2c_adapter *i2c)
+{
+ int ret;
+ struct tda10071_priv *priv = NULL;
+ u8 tmp;
+
+ /* allocate memory for the internal priv */
+ priv = kzalloc(sizeof(struct tda10071_priv), GFP_KERNEL);
+ if (priv == NULL) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ /* setup the priv */
+ priv->i2c = i2c;
+ memcpy(&priv->cfg, config, sizeof(struct tda10071_config));
+
+ /* chip ID */
+ ret = tda10071_rd_reg(priv, 0xff, &tmp);
+ if (ret || tmp != 0x0f)
+ goto error;
+
+ /* chip type */
+ ret = tda10071_rd_reg(priv, 0xdd, &tmp);
+ if (ret || tmp != 0x00)
+ goto error;
+
+ /* chip version */
+ ret = tda10071_rd_reg(priv, 0xfe, &tmp);
+ if (ret || tmp != 0x01)
+ goto error;
+
+ /* create dvb_frontend */
+ memcpy(&priv->fe.ops, &tda10071_ops, sizeof(struct dvb_frontend_ops));
+ priv->fe.demodulator_priv = priv;
+
+ return &priv->fe;
+error:
+ dbg("%s: failed=%d", __func__, ret);
+ kfree(priv);
+ return NULL;
+}
+EXPORT_SYMBOL(tda10071_attach);
+
+static struct dvb_frontend_ops tda10071_ops = {
+ .info = {
+ .name = "NXP TDA10071",
+ .type = FE_QPSK,
+ .frequency_min = 950000,
+ .frequency_max = 2150000,
+ .frequency_tolerance = 5000,
+ .symbol_rate_min = 1000000,
+ .symbol_rate_max = 45000000,
+ .caps = FE_CAN_INVERSION_AUTO |
+ FE_CAN_FEC_1_2 |
+ FE_CAN_FEC_2_3 |
+ FE_CAN_FEC_3_4 |
+ FE_CAN_FEC_4_5 |
+ FE_CAN_FEC_5_6 |
+ FE_CAN_FEC_6_7 |
+ FE_CAN_FEC_7_8 |
+ FE_CAN_FEC_8_9 |
+ FE_CAN_FEC_AUTO |
+ FE_CAN_QPSK |
+ FE_CAN_RECOVER |
+ FE_CAN_2G_MODULATION
+ },
+
+ .release = tda10071_release,
+
+ .get_tune_settings = tda10071_get_tune_settings,
+
+ .init = tda10071_init,
+ .sleep = tda10071_sleep,
+
+ .set_frontend = tda10071_set_frontend,
+ .get_frontend = tda10071_get_frontend,
+
+ .read_status = tda10071_read_status,
+ .read_snr = tda10071_read_snr,
+ .read_signal_strength = tda10071_read_signal_strength,
+ .read_ber = tda10071_read_ber,
+ .read_ucblocks = tda10071_read_ucblocks,
+
+ .diseqc_send_master_cmd = tda10071_diseqc_send_master_cmd,
+ .diseqc_recv_slave_reply = tda10071_diseqc_recv_slave_reply,
+ .diseqc_send_burst = tda10071_diseqc_send_burst,
+
+ .set_tone = tda10071_set_tone,
+ .set_voltage = tda10071_set_voltage,
+};
+
+MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
+MODULE_DESCRIPTION("NXP TDA10071 DVB-S/S2 demodulator driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/frontends/tda10071.h b/drivers/media/dvb/frontends/tda10071.h
new file mode 100644
index 00000000000..21163c4b555
--- /dev/null
+++ b/drivers/media/dvb/frontends/tda10071.h
@@ -0,0 +1,81 @@
+/*
+ * NXP TDA10071 + Conexant CX24118A DVB-S/S2 demodulator + tuner driver
+ *
+ * Copyright (C) 2011 Antti Palosaari <crope@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef TDA10071_H
+#define TDA10071_H
+
+#include <linux/dvb/frontend.h>
+
+struct tda10071_config {
+ /* Demodulator I2C address.
+ * Default: none, must set
+ * Values: 0x55,
+ */
+ u8 i2c_address;
+
+ /* Max bytes I2C provider can write at once.
+ * Note: Buffer is taken from the stack currently!
+ * Default: none, must set
+ * Values:
+ */
+ u16 i2c_wr_max;
+
+ /* TS output mode.
+ * Default: TDA10071_TS_SERIAL
+ * Values:
+ */
+#define TDA10071_TS_SERIAL 0
+#define TDA10071_TS_PARALLEL 1
+ u8 ts_mode;
+
+ /* Input spectrum inversion.
+ * Default: 0
+ * Values: 0, 1
+ */
+ bool spec_inv;
+
+ /* Xtal frequency Hz
+ * Default: none, must set
+ * Values:
+ */
+ u32 xtal;
+
+ /* PLL multiplier.
+ * Default: none, must set
+ * Values:
+ */
+ u8 pll_multiplier;
+};
+
+
+#if defined(CONFIG_DVB_TDA10071) || \
+ (defined(CONFIG_DVB_TDA10071_MODULE) && defined(MODULE))
+extern struct dvb_frontend *tda10071_attach(
+ const struct tda10071_config *config, struct i2c_adapter *i2c);
+#else
+static inline struct dvb_frontend *tda10071_attach(
+ const struct tda10071_config *config, struct i2c_adapter *i2c)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return NULL;
+}
+#endif
+
+#endif /* TDA10071_H */
diff --git a/drivers/media/dvb/frontends/tda10071_priv.h b/drivers/media/dvb/frontends/tda10071_priv.h
new file mode 100644
index 00000000000..93c5e6317f0
--- /dev/null
+++ b/drivers/media/dvb/frontends/tda10071_priv.h
@@ -0,0 +1,122 @@
+/*
+ * NXP TDA10071 + Conexant CX24118A DVB-S/S2 demodulator + tuner driver
+ *
+ * Copyright (C) 2011 Antti Palosaari <crope@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef TDA10071_PRIV
+#define TDA10071_PRIV
+
+#include "dvb_frontend.h"
+#include "tda10071.h"
+#include <linux/firmware.h>
+
+#define LOG_PREFIX "tda10071"
+
+#undef dbg
+#define dbg(f, arg...) \
+ if (tda10071_debug) \
+ printk(KERN_INFO LOG_PREFIX": " f "\n" , ## arg)
+#undef err
+#define err(f, arg...) printk(KERN_ERR LOG_PREFIX": " f "\n" , ## arg)
+#undef info
+#define info(f, arg...) printk(KERN_INFO LOG_PREFIX": " f "\n" , ## arg)
+#undef warn
+#define warn(f, arg...) printk(KERN_WARNING LOG_PREFIX": " f "\n" , ## arg)
+
+struct tda10071_priv {
+ struct i2c_adapter *i2c;
+ struct dvb_frontend fe;
+ struct tda10071_config cfg;
+
+ u8 meas_count[2];
+ u32 ber;
+ u32 ucb;
+ fe_status_t fe_status;
+ fe_delivery_system_t delivery_system;
+ bool warm; /* FW running */
+};
+
+static struct tda10071_modcod {
+ fe_delivery_system_t delivery_system;
+ fe_modulation_t modulation;
+ fe_code_rate_t fec;
+ u8 val;
+} TDA10071_MODCOD[] = {
+ /* NBC-QPSK */
+ { SYS_DVBS2, QPSK, FEC_AUTO, 0x00 },
+ { SYS_DVBS2, QPSK, FEC_1_2, 0x04 },
+ { SYS_DVBS2, QPSK, FEC_3_5, 0x05 },
+ { SYS_DVBS2, QPSK, FEC_2_3, 0x06 },
+ { SYS_DVBS2, QPSK, FEC_3_4, 0x07 },
+ { SYS_DVBS2, QPSK, FEC_4_5, 0x08 },
+ { SYS_DVBS2, QPSK, FEC_5_6, 0x09 },
+ { SYS_DVBS2, QPSK, FEC_8_9, 0x0a },
+ { SYS_DVBS2, QPSK, FEC_9_10, 0x0b },
+ /* 8PSK */
+ { SYS_DVBS2, PSK_8, FEC_3_5, 0x0c },
+ { SYS_DVBS2, PSK_8, FEC_2_3, 0x0d },
+ { SYS_DVBS2, PSK_8, FEC_3_4, 0x0e },
+ { SYS_DVBS2, PSK_8, FEC_5_6, 0x0f },
+ { SYS_DVBS2, PSK_8, FEC_8_9, 0x10 },
+ { SYS_DVBS2, PSK_8, FEC_9_10, 0x11 },
+ /* QPSK */
+ { SYS_DVBS, QPSK, FEC_AUTO, 0x2d },
+ { SYS_DVBS, QPSK, FEC_1_2, 0x2e },
+ { SYS_DVBS, QPSK, FEC_2_3, 0x2f },
+ { SYS_DVBS, QPSK, FEC_3_4, 0x30 },
+ { SYS_DVBS, QPSK, FEC_5_6, 0x31 },
+ { SYS_DVBS, QPSK, FEC_7_8, 0x32 },
+};
+
+struct tda10071_reg_val_mask {
+ u8 reg;
+ u8 val;
+ u8 mask;
+};
+
+/* firmware filename */
+#define TDA10071_DEFAULT_FIRMWARE "dvb-fe-tda10071.fw"
+
+/* firmware commands */
+#define CMD_DEMOD_INIT 0x10
+#define CMD_CHANGE_CHANNEL 0x11
+#define CMD_MPEG_CONFIG 0x13
+#define CMD_TUNER_INIT 0x15
+#define CMD_GET_AGCACC 0x1a
+
+#define CMD_LNB_CONFIG 0x20
+#define CMD_LNB_SEND_DISEQC 0x21
+#define CMD_LNB_SET_DC_LEVEL 0x22
+#define CMD_LNB_PCB_CONFIG 0x23
+#define CMD_LNB_SEND_TONEBURST 0x24
+#define CMD_LNB_UPDATE_REPLY 0x25
+
+#define CMD_GET_FW_VERSION 0x35
+#define CMD_SET_SLEEP_MODE 0x36
+#define CMD_BER_CONTROL 0x3e
+#define CMD_BER_UPDATE_COUNTERS 0x3f
+
+/* firmare command struct */
+#define TDA10071_ARGLEN 0x1e
+struct tda10071_cmd {
+ u8 args[TDA10071_ARGLEN];
+ u8 len;
+};
+
+
+#endif /* TDA10071_PRIV */
diff --git a/drivers/media/dvb/frontends/tda18271c2dd.c b/drivers/media/dvb/frontends/tda18271c2dd.c
index 0384e8da4f5..1b1bf200c55 100644
--- a/drivers/media/dvb/frontends/tda18271c2dd.c
+++ b/drivers/media/dvb/frontends/tda18271c2dd.c
@@ -1195,7 +1195,7 @@ static int GetSignalStrength(s32 *pSignalStrength, u32 RFAgc, u32 IFAgc)
}
#endif
-static int get_frequency(struct dvb_frontend *fe, u32 *frequency)
+static int get_if_frequency(struct dvb_frontend *fe, u32 *frequency)
{
struct tda_state *state = fe->tuner_priv;
@@ -1222,7 +1222,7 @@ static struct dvb_tuner_ops tuner_ops = {
.sleep = sleep,
.set_params = set_params,
.release = release,
- .get_frequency = get_frequency,
+ .get_if_frequency = get_if_frequency,
.get_bandwidth = get_bandwidth,
};
diff --git a/drivers/media/dvb/mantis/Makefile b/drivers/media/dvb/mantis/Makefile
index 98dc5cd258a..ec8116dcb36 100644
--- a/drivers/media/dvb/mantis/Makefile
+++ b/drivers/media/dvb/mantis/Makefile
@@ -25,4 +25,4 @@ obj-$(CONFIG_MANTIS_CORE) += mantis_core.o
obj-$(CONFIG_DVB_MANTIS) += mantis.o
obj-$(CONFIG_DVB_HOPPER) += hopper.o
-EXTRA_CFLAGS = -Idrivers/media/dvb/dvb-core/ -Idrivers/media/dvb/frontends/
+ccflags-y += -Idrivers/media/dvb/dvb-core/ -Idrivers/media/dvb/frontends/
diff --git a/drivers/media/dvb/mantis/hopper_cards.c b/drivers/media/dvb/mantis/hopper_cards.c
index 1402062f2c8..71622f65c03 100644
--- a/drivers/media/dvb/mantis/hopper_cards.c
+++ b/drivers/media/dvb/mantis/hopper_cards.c
@@ -65,7 +65,7 @@ static int devs;
static irqreturn_t hopper_irq_handler(int irq, void *dev_id)
{
- u32 stat = 0, mask = 0, lstat = 0, mstat = 0;
+ u32 stat = 0, mask = 0, lstat = 0;
u32 rst_stat = 0, rst_mask = 0;
struct mantis_pci *mantis;
@@ -80,7 +80,7 @@ static irqreturn_t hopper_irq_handler(int irq, void *dev_id)
stat = mmread(MANTIS_INT_STAT);
mask = mmread(MANTIS_INT_MASK);
- mstat = lstat = stat & ~MANTIS_INT_RISCSTAT;
+ lstat = stat & ~MANTIS_INT_RISCSTAT;
if (!(stat & mask))
return IRQ_NONE;
@@ -126,7 +126,7 @@ static irqreturn_t hopper_irq_handler(int irq, void *dev_id)
}
if (stat & MANTIS_INT_RISCI) {
dprintk(MANTIS_DEBUG, 0, "<%s>", label[8]);
- mantis->finished_block = (stat & MANTIS_INT_RISCSTAT) >> 28;
+ mantis->busy_block = (stat & MANTIS_INT_RISCSTAT) >> 28;
tasklet_schedule(&mantis->tasklet);
}
if (stat & MANTIS_INT_I2CDONE) {
diff --git a/drivers/media/dvb/mantis/mantis_cards.c b/drivers/media/dvb/mantis/mantis_cards.c
index 05cbb9d9572..c2bb90b3e52 100644
--- a/drivers/media/dvb/mantis/mantis_cards.c
+++ b/drivers/media/dvb/mantis/mantis_cards.c
@@ -73,7 +73,7 @@ static char *label[10] = {
static irqreturn_t mantis_irq_handler(int irq, void *dev_id)
{
- u32 stat = 0, mask = 0, lstat = 0, mstat = 0;
+ u32 stat = 0, mask = 0, lstat = 0;
u32 rst_stat = 0, rst_mask = 0;
struct mantis_pci *mantis;
@@ -88,7 +88,7 @@ static irqreturn_t mantis_irq_handler(int irq, void *dev_id)
stat = mmread(MANTIS_INT_STAT);
mask = mmread(MANTIS_INT_MASK);
- mstat = lstat = stat & ~MANTIS_INT_RISCSTAT;
+ lstat = stat & ~MANTIS_INT_RISCSTAT;
if (!(stat & mask))
return IRQ_NONE;
@@ -134,7 +134,7 @@ static irqreturn_t mantis_irq_handler(int irq, void *dev_id)
}
if (stat & MANTIS_INT_RISCI) {
dprintk(MANTIS_DEBUG, 0, "<%s>", label[8]);
- mantis->finished_block = (stat & MANTIS_INT_RISCSTAT) >> 28;
+ mantis->busy_block = (stat & MANTIS_INT_RISCSTAT) >> 28;
tasklet_schedule(&mantis->tasklet);
}
if (stat & MANTIS_INT_I2CDONE) {
diff --git a/drivers/media/dvb/mantis/mantis_common.h b/drivers/media/dvb/mantis/mantis_common.h
index 49dbca145bb..f2410cf0a6b 100644
--- a/drivers/media/dvb/mantis/mantis_common.h
+++ b/drivers/media/dvb/mantis/mantis_common.h
@@ -123,11 +123,8 @@ struct mantis_pci {
unsigned int num;
/* RISC Core */
- u32 finished_block;
+ u32 busy_block;
u32 last_block;
- u32 line_bytes;
- u32 line_count;
- u32 risc_pos;
u8 *buf_cpu;
dma_addr_t buf_dma;
u32 *risc_cpu;
diff --git a/drivers/media/dvb/mantis/mantis_dma.c b/drivers/media/dvb/mantis/mantis_dma.c
index 46202a4012a..c61ca7d3dae 100644
--- a/drivers/media/dvb/mantis/mantis_dma.c
+++ b/drivers/media/dvb/mantis/mantis_dma.c
@@ -43,13 +43,17 @@
#define RISC_IRQ (0x01 << 24)
#define RISC_STATUS(status) ((((~status) & 0x0f) << 20) | ((status & 0x0f) << 16))
-#define RISC_FLUSH() (mantis->risc_pos = 0)
-#define RISC_INSTR(opcode) (mantis->risc_cpu[mantis->risc_pos++] = cpu_to_le32(opcode))
+#define RISC_FLUSH(risc_pos) (risc_pos = 0)
+#define RISC_INSTR(risc_pos, opcode) (mantis->risc_cpu[risc_pos++] = cpu_to_le32(opcode))
#define MANTIS_BUF_SIZE (64 * 1024)
-#define MANTIS_BLOCK_BYTES (MANTIS_BUF_SIZE >> 4)
-#define MANTIS_BLOCK_COUNT (1 << 4)
-#define MANTIS_RISC_SIZE PAGE_SIZE
+#define MANTIS_BLOCK_BYTES (MANTIS_BUF_SIZE / 4)
+#define MANTIS_DMA_TR_BYTES (2 * 1024) /* upper limit: 4095 bytes. */
+#define MANTIS_BLOCK_COUNT (MANTIS_BUF_SIZE / MANTIS_BLOCK_BYTES)
+
+#define MANTIS_DMA_TR_UNITS (MANTIS_BLOCK_BYTES / MANTIS_DMA_TR_BYTES)
+/* MANTIS_BUF_SIZE / MANTIS_DMA_TR_UNITS must not exceed MANTIS_RISC_SIZE (4k RISC cmd buffer) */
+#define MANTIS_RISC_SIZE PAGE_SIZE /* RISC program must fit here. */
int mantis_dma_exit(struct mantis_pci *mantis)
{
@@ -124,27 +128,6 @@ err:
return -ENOMEM;
}
-static inline int mantis_calc_lines(struct mantis_pci *mantis)
-{
- mantis->line_bytes = MANTIS_BLOCK_BYTES;
- mantis->line_count = MANTIS_BLOCK_COUNT;
-
- while (mantis->line_bytes > 4095) {
- mantis->line_bytes >>= 1;
- mantis->line_count <<= 1;
- }
-
- dprintk(MANTIS_DEBUG, 1, "Mantis RISC block bytes=[%d], line bytes=[%d], line count=[%d]",
- MANTIS_BLOCK_BYTES, mantis->line_bytes, mantis->line_count);
-
- if (mantis->line_count > 255) {
- dprintk(MANTIS_ERROR, 1, "Buffer size error");
- return -EINVAL;
- }
-
- return 0;
-}
-
int mantis_dma_init(struct mantis_pci *mantis)
{
int err = 0;
@@ -158,12 +141,6 @@ int mantis_dma_init(struct mantis_pci *mantis)
goto err;
}
- err = mantis_calc_lines(mantis);
- if (err < 0) {
- dprintk(MANTIS_ERROR, 1, "Mantis calc lines failed");
-
- goto err;
- }
return 0;
err:
@@ -174,31 +151,32 @@ EXPORT_SYMBOL_GPL(mantis_dma_init);
static inline void mantis_risc_program(struct mantis_pci *mantis)
{
u32 buf_pos = 0;
- u32 line;
+ u32 line, step;
+ u32 risc_pos;
dprintk(MANTIS_DEBUG, 1, "Mantis create RISC program");
- RISC_FLUSH();
-
- dprintk(MANTIS_DEBUG, 1, "risc len lines %u, bytes per line %u",
- mantis->line_count, mantis->line_bytes);
-
- for (line = 0; line < mantis->line_count; line++) {
- dprintk(MANTIS_DEBUG, 1, "RISC PROG line=[%d]", line);
- if (!(buf_pos % MANTIS_BLOCK_BYTES)) {
- RISC_INSTR(RISC_WRITE |
- RISC_IRQ |
- RISC_STATUS(((buf_pos / MANTIS_BLOCK_BYTES) +
- (MANTIS_BLOCK_COUNT - 1)) %
- MANTIS_BLOCK_COUNT) |
- mantis->line_bytes);
- } else {
- RISC_INSTR(RISC_WRITE | mantis->line_bytes);
- }
- RISC_INSTR(mantis->buf_dma + buf_pos);
- buf_pos += mantis->line_bytes;
+ RISC_FLUSH(risc_pos);
+
+ dprintk(MANTIS_DEBUG, 1, "risc len lines %u, bytes per line %u, bytes per DMA tr %u",
+ MANTIS_BLOCK_COUNT, MANTIS_BLOCK_BYTES, MANTIS_DMA_TR_BYTES);
+
+ for (line = 0; line < MANTIS_BLOCK_COUNT; line++) {
+ for (step = 0; step < MANTIS_DMA_TR_UNITS; step++) {
+ dprintk(MANTIS_DEBUG, 1, "RISC PROG line=[%d], step=[%d]", line, step);
+ if (step == 0) {
+ RISC_INSTR(risc_pos, RISC_WRITE |
+ RISC_IRQ |
+ RISC_STATUS(line) |
+ MANTIS_DMA_TR_BYTES);
+ } else {
+ RISC_INSTR(risc_pos, RISC_WRITE | MANTIS_DMA_TR_BYTES);
+ }
+ RISC_INSTR(risc_pos, mantis->buf_dma + buf_pos);
+ buf_pos += MANTIS_DMA_TR_BYTES;
+ }
}
- RISC_INSTR(RISC_JUMP);
- RISC_INSTR(mantis->risc_dma);
+ RISC_INSTR(risc_pos, RISC_JUMP);
+ RISC_INSTR(risc_pos, mantis->risc_dma);
}
void mantis_dma_start(struct mantis_pci *mantis)
@@ -210,7 +188,7 @@ void mantis_dma_start(struct mantis_pci *mantis)
mmwrite(mmread(MANTIS_GPIF_ADDR) | MANTIS_GPIF_HIFRDWRN, MANTIS_GPIF_ADDR);
mmwrite(0, MANTIS_DMA_CTL);
- mantis->last_block = mantis->finished_block = 0;
+ mantis->last_block = mantis->busy_block = 0;
mmwrite(mmread(MANTIS_INT_MASK) | MANTIS_INT_RISCI, MANTIS_INT_MASK);
@@ -245,9 +223,9 @@ void mantis_dma_xfer(unsigned long data)
struct mantis_pci *mantis = (struct mantis_pci *) data;
struct mantis_hwconfig *config = mantis->hwconfig;
- while (mantis->last_block != mantis->finished_block) {
+ while (mantis->last_block != mantis->busy_block) {
dprintk(MANTIS_DEBUG, 1, "last block=[%d] finished block=[%d]",
- mantis->last_block, mantis->finished_block);
+ mantis->last_block, mantis->busy_block);
(config->ts_size ? dvb_dmx_swfilter_204 : dvb_dmx_swfilter)
(&mantis->demux, &mantis->buf_cpu[mantis->last_block * MANTIS_BLOCK_BYTES], MANTIS_BLOCK_BYTES);
diff --git a/drivers/media/dvb/mantis/mantis_vp1041.c b/drivers/media/dvb/mantis/mantis_vp1041.c
index 38a436ca2fd..07aa887a4b4 100644
--- a/drivers/media/dvb/mantis/mantis_vp1041.c
+++ b/drivers/media/dvb/mantis/mantis_vp1041.c
@@ -51,7 +51,6 @@ static const struct stb0899_s1_reg vp1041_stb0899_s1_init_1[] = {
{ STB0899_DISRX_ST0 , 0x04 },
{ STB0899_DISRX_ST1 , 0x00 },
{ STB0899_DISPARITY , 0x00 },
- { STB0899_DISFIFO , 0x00 },
{ STB0899_DISSTATUS , 0x20 },
{ STB0899_DISF22 , 0x99 },
{ STB0899_DISF22RX , 0xa8 },
diff --git a/drivers/media/dvb/ngene/Makefile b/drivers/media/dvb/ngene/Makefile
index 2bc96874d04..13ebeffb705 100644
--- a/drivers/media/dvb/ngene/Makefile
+++ b/drivers/media/dvb/ngene/Makefile
@@ -6,9 +6,9 @@ ngene-objs := ngene-core.o ngene-i2c.o ngene-cards.o ngene-dvb.o
obj-$(CONFIG_DVB_NGENE) += ngene.o
-EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core/
-EXTRA_CFLAGS += -Idrivers/media/dvb/frontends/
-EXTRA_CFLAGS += -Idrivers/media/common/tuners/
+ccflags-y += -Idrivers/media/dvb/dvb-core/
+ccflags-y += -Idrivers/media/dvb/frontends/
+ccflags-y += -Idrivers/media/common/tuners/
# For the staging CI driver cxd2099
-EXTRA_CFLAGS += -Idrivers/staging/cxd2099/
+ccflags-y += -Idrivers/staging/media/cxd2099/
diff --git a/drivers/media/dvb/pluto2/Makefile b/drivers/media/dvb/pluto2/Makefile
index 7ac128724df..700822350ec 100644
--- a/drivers/media/dvb/pluto2/Makefile
+++ b/drivers/media/dvb/pluto2/Makefile
@@ -1,3 +1,3 @@
obj-$(CONFIG_DVB_PLUTO2) += pluto2.o
-EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core/ -Idrivers/media/dvb/frontends/
+ccflags-y += -Idrivers/media/dvb/dvb-core/ -Idrivers/media/dvb/frontends/
diff --git a/drivers/media/dvb/pt1/Makefile b/drivers/media/dvb/pt1/Makefile
index a66da17bbe3..d80d8e8e7c5 100644
--- a/drivers/media/dvb/pt1/Makefile
+++ b/drivers/media/dvb/pt1/Makefile
@@ -2,4 +2,4 @@ earth-pt1-objs := pt1.o va1j5jf8007s.o va1j5jf8007t.o
obj-$(CONFIG_DVB_PT1) += earth-pt1.o
-EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core -Idrivers/media/dvb/frontends
+ccflags-y += -Idrivers/media/dvb/dvb-core -Idrivers/media/dvb/frontends
diff --git a/drivers/media/dvb/siano/Makefile b/drivers/media/dvb/siano/Makefile
index c54140b5ab5..f233b57c86f 100644
--- a/drivers/media/dvb/siano/Makefile
+++ b/drivers/media/dvb/siano/Makefile
@@ -5,7 +5,7 @@ obj-$(CONFIG_SMS_SIANO_MDTV) += smsmdtv.o smsdvb.o
obj-$(CONFIG_SMS_USB_DRV) += smsusb.o
obj-$(CONFIG_SMS_SDIO_DRV) += smssdio.o
-EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core
+ccflags-y += -Idrivers/media/dvb/dvb-core
-EXTRA_CFLAGS += $(extra-cflags-y) $(extra-cflags-m)
+ccflags-y += $(extra-cflags-y) $(extra-cflags-m)
diff --git a/drivers/media/dvb/siano/sms-cards.c b/drivers/media/dvb/siano/sms-cards.c
index af121db88ea..680c781c8dd 100644
--- a/drivers/media/dvb/siano/sms-cards.c
+++ b/drivers/media/dvb/siano/sms-cards.c
@@ -19,6 +19,7 @@
#include "sms-cards.h"
#include "smsir.h"
+#include <linux/module.h>
static int sms_dbg;
module_param_named(cards_dbg, sms_dbg, int, 0644);
diff --git a/drivers/media/dvb/siano/smsendian.c b/drivers/media/dvb/siano/smsendian.c
index 457b6d02ef8..e2657c2f010 100644
--- a/drivers/media/dvb/siano/smsendian.c
+++ b/drivers/media/dvb/siano/smsendian.c
@@ -19,6 +19,7 @@
****************************************************************/
+#include <linux/export.h>
#include <asm/byteorder.h>
#include "smsendian.h"
diff --git a/drivers/media/dvb/siano/smssdio.c b/drivers/media/dvb/siano/smssdio.c
index e57d38b0197..91f8c8291e2 100644
--- a/drivers/media/dvb/siano/smssdio.c
+++ b/drivers/media/dvb/siano/smssdio.c
@@ -39,6 +39,7 @@
#include <linux/mmc/card.h>
#include <linux/mmc/sdio_func.h>
#include <linux/mmc/sdio_ids.h>
+#include <linux/module.h>
#include "smscoreapi.h"
#include "sms-cards.h"
diff --git a/drivers/media/dvb/siano/smsusb.c b/drivers/media/dvb/siano/smsusb.c
index 0c8164a2cc3..51c7121b321 100644
--- a/drivers/media/dvb/siano/smsusb.c
+++ b/drivers/media/dvb/siano/smsusb.c
@@ -24,6 +24,7 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
#include <linux/usb.h>
#include <linux/firmware.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include "smscoreapi.h"
#include "sms-cards.h"
diff --git a/drivers/media/dvb/ttpci/Makefile b/drivers/media/dvb/ttpci/Makefile
index 8a4d5bb20a5..f6e869372e3 100644
--- a/drivers/media/dvb/ttpci/Makefile
+++ b/drivers/media/dvb/ttpci/Makefile
@@ -17,5 +17,5 @@ obj-$(CONFIG_DVB_BUDGET_CI) += budget-ci.o
obj-$(CONFIG_DVB_BUDGET_PATCH) += budget-patch.o
obj-$(CONFIG_DVB_AV7110) += dvb-ttpci.o
-EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core/ -Idrivers/media/dvb/frontends/
-EXTRA_CFLAGS += -Idrivers/media/common/tuners
+ccflags-y += -Idrivers/media/dvb/dvb-core/ -Idrivers/media/dvb/frontends/
+ccflags-y += -Idrivers/media/common/tuners
diff --git a/drivers/media/dvb/ttpci/av7110_v4l.c b/drivers/media/dvb/ttpci/av7110_v4l.c
index cdd31cae46c..ee8ee1d481f 100644
--- a/drivers/media/dvb/ttpci/av7110_v4l.c
+++ b/drivers/media/dvb/ttpci/av7110_v4l.c
@@ -25,6 +25,8 @@
* the project's page is at http://www.linuxtv.org/
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/delay.h>
@@ -253,7 +255,7 @@ static int av7110_dvb_c_switch(struct saa7146_fh *fh)
switch (av7110->current_input) {
case 1:
- dprintk(1, "switching SAA7113 to Analog Tuner Input.\n");
+ dprintk(1, "switching SAA7113 to Analog Tuner Input\n");
msp_writereg(av7110, MSP_WR_DSP, 0x0008, 0x0000); // loudspeaker source
msp_writereg(av7110, MSP_WR_DSP, 0x0009, 0x0000); // headphone source
msp_writereg(av7110, MSP_WR_DSP, 0x000a, 0x0000); // SCART 1 source
@@ -263,7 +265,7 @@ static int av7110_dvb_c_switch(struct saa7146_fh *fh)
if (av7110->analog_tuner_flags & ANALOG_TUNER_VES1820) {
if (ves1820_writereg(dev, 0x09, 0x0f, 0x60))
- dprintk(1, "setting band in demodulator failed.\n");
+ dprintk(1, "setting band in demodulator failed\n");
} else if (av7110->analog_tuner_flags & ANALOG_TUNER_STV0297) {
saa7146_setgpio(dev, 1, SAA7146_GPIO_OUTHI); // TDA9819 pin9(STD)
saa7146_setgpio(dev, 3, SAA7146_GPIO_OUTHI); // TDA9819 pin30(VIF)
@@ -272,17 +274,17 @@ static int av7110_dvb_c_switch(struct saa7146_fh *fh)
dprintk(1, "saa7113 write failed @ card %d", av7110->dvb_adapter.num);
break;
case 2:
- dprintk(1, "switching SAA7113 to Video AV CVBS Input.\n");
+ dprintk(1, "switching SAA7113 to Video AV CVBS Input\n");
if (i2c_writereg(av7110, 0x48, 0x02, 0xd2) != 1)
dprintk(1, "saa7113 write failed @ card %d", av7110->dvb_adapter.num);
break;
case 3:
- dprintk(1, "switching SAA7113 to Video AV Y/C Input.\n");
+ dprintk(1, "switching SAA7113 to Video AV Y/C Input\n");
if (i2c_writereg(av7110, 0x48, 0x02, 0xd9) != 1)
dprintk(1, "saa7113 write failed @ card %d", av7110->dvb_adapter.num);
break;
default:
- dprintk(1, "switching SAA7113 to Input: AV7110: SAA7113: invalid input.\n");
+ dprintk(1, "switching SAA7113 to Input: AV7110: SAA7113: invalid input\n");
}
} else {
adswitch = 0;
@@ -299,7 +301,7 @@ static int av7110_dvb_c_switch(struct saa7146_fh *fh)
if (av7110->analog_tuner_flags & ANALOG_TUNER_VES1820) {
if (ves1820_writereg(dev, 0x09, 0x0f, 0x20))
- dprintk(1, "setting band in demodulator failed.\n");
+ dprintk(1, "setting band in demodulator failed\n");
} else if (av7110->analog_tuner_flags & ANALOG_TUNER_STV0297) {
saa7146_setgpio(dev, 1, SAA7146_GPIO_OUTLO); // TDA9819 pin9(STD)
saa7146_setgpio(dev, 3, SAA7146_GPIO_OUTLO); // TDA9819 pin30(VIF)
@@ -413,7 +415,7 @@ static int vidioc_g_frequency(struct file *file, void *fh, struct v4l2_frequency
struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev;
struct av7110 *av7110 = (struct av7110 *)dev->ext_priv;
- dprintk(2, "VIDIOC_G_FREQ: freq:0x%08x.\n", f->frequency);
+ dprintk(2, "VIDIOC_G_FREQ: freq:0x%08x\n", f->frequency);
if (!av7110->analog_tuner_flags || av7110->current_input != 1)
return -EINVAL;
@@ -429,7 +431,7 @@ static int vidioc_s_frequency(struct file *file, void *fh, struct v4l2_frequency
struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev;
struct av7110 *av7110 = (struct av7110 *)dev->ext_priv;
- dprintk(2, "VIDIOC_S_FREQUENCY: freq:0x%08x.\n", f->frequency);
+ dprintk(2, "VIDIOC_S_FREQUENCY: freq:0x%08x\n", f->frequency);
if (!av7110->analog_tuner_flags || av7110->current_input != 1)
return -EINVAL;
@@ -689,12 +691,12 @@ int av7110_init_analog_module(struct av7110 *av7110)
if (i2c_writereg(av7110, 0x80, 0x0, 0x80) == 1 &&
i2c_writereg(av7110, 0x80, 0x0, 0) == 1) {
- printk("dvb-ttpci: DVB-C analog module @ card %d detected, initializing MSP3400\n",
+ pr_info("DVB-C analog module @ card %d detected, initializing MSP3400\n",
av7110->dvb_adapter.num);
av7110->adac_type = DVB_ADAC_MSP34x0;
} else if (i2c_writereg(av7110, 0x84, 0x0, 0x80) == 1 &&
i2c_writereg(av7110, 0x84, 0x0, 0) == 1) {
- printk("dvb-ttpci: DVB-C analog module @ card %d detected, initializing MSP3415\n",
+ pr_info("DVB-C analog module @ card %d detected, initializing MSP3415\n",
av7110->dvb_adapter.num);
av7110->adac_type = DVB_ADAC_MSP34x5;
} else
@@ -715,7 +717,7 @@ int av7110_init_analog_module(struct av7110 *av7110)
msp_writereg(av7110, MSP_WR_DSP, 0x000d, 0x1900); // prescale SCART
if (i2c_writereg(av7110, 0x48, 0x01, 0x00)!=1) {
- INFO(("saa7113 not accessible.\n"));
+ pr_info("saa7113 not accessible\n");
} else {
u8 *i = saa7113_init_regs;
@@ -733,7 +735,7 @@ int av7110_init_analog_module(struct av7110 *av7110)
/* setup for DVB by default */
if (av7110->analog_tuner_flags & ANALOG_TUNER_VES1820) {
if (ves1820_writereg(av7110->dev, 0x09, 0x0f, 0x20))
- dprintk(1, "setting band in demodulator failed.\n");
+ dprintk(1, "setting band in demodulator failed\n");
} else if (av7110->analog_tuner_flags & ANALOG_TUNER_STV0297) {
saa7146_setgpio(av7110->dev, 1, SAA7146_GPIO_OUTLO); // TDA9819 pin9(STD)
saa7146_setgpio(av7110->dev, 3, SAA7146_GPIO_OUTLO); // TDA9819 pin30(VIF)
@@ -797,7 +799,7 @@ int av7110_init_v4l(struct av7110 *av7110)
ret = saa7146_vv_init(dev, vv_data);
if (ret) {
- ERR(("cannot init capture device. skipping.\n"));
+ ERR("cannot init capture device. skipping\n");
return -ENODEV;
}
vv_data->ops.vidioc_enum_input = vidioc_enum_input;
@@ -814,12 +816,12 @@ int av7110_init_v4l(struct av7110 *av7110)
vv_data->ops.vidioc_s_fmt_sliced_vbi_out = vidioc_s_fmt_sliced_vbi_out;
if (saa7146_register_device(&av7110->v4l_dev, dev, "av7110", VFL_TYPE_GRABBER)) {
- ERR(("cannot register capture device. skipping.\n"));
+ ERR("cannot register capture device. skipping\n");
saa7146_vv_release(dev);
return -ENODEV;
}
if (saa7146_register_device(&av7110->vbi_dev, dev, "av7110", VFL_TYPE_VBI))
- ERR(("cannot register vbi v4l2 device. skipping.\n"));
+ ERR("cannot register vbi v4l2 device. skipping\n");
return 0;
}
diff --git a/drivers/media/dvb/ttpci/budget-av.c b/drivers/media/dvb/ttpci/budget-av.c
index e957d7690bc..78d32f7e49f 100644
--- a/drivers/media/dvb/ttpci/budget-av.c
+++ b/drivers/media/dvb/ttpci/budget-av.c
@@ -33,6 +33,8 @@
* the project's page is at http://www.linuxtv.org/
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include "budget.h"
#include "stv0299.h"
#include "stb0899_drv.h"
@@ -149,7 +151,7 @@ static int ciintf_read_attribute_mem(struct dvb_ca_en50221 *ca, int slot, int ad
result = ttpci_budget_debiread(&budget_av->budget, DEBICICAM, address & 0xfff, 1, 0, 1);
if (result == -ETIMEDOUT) {
ciintf_slot_shutdown(ca, slot);
- printk(KERN_INFO "budget-av: cam ejected 1\n");
+ pr_info("cam ejected 1\n");
}
return result;
}
@@ -168,7 +170,7 @@ static int ciintf_write_attribute_mem(struct dvb_ca_en50221 *ca, int slot, int a
result = ttpci_budget_debiwrite(&budget_av->budget, DEBICICAM, address & 0xfff, 1, value, 0, 1);
if (result == -ETIMEDOUT) {
ciintf_slot_shutdown(ca, slot);
- printk(KERN_INFO "budget-av: cam ejected 2\n");
+ pr_info("cam ejected 2\n");
}
return result;
}
@@ -187,7 +189,7 @@ static int ciintf_read_cam_control(struct dvb_ca_en50221 *ca, int slot, u8 addre
result = ttpci_budget_debiread(&budget_av->budget, DEBICICAM, address & 3, 1, 0, 0);
if (result == -ETIMEDOUT) {
ciintf_slot_shutdown(ca, slot);
- printk(KERN_INFO "budget-av: cam ejected 3\n");
+ pr_info("cam ejected 3\n");
return -ETIMEDOUT;
}
return result;
@@ -207,7 +209,7 @@ static int ciintf_write_cam_control(struct dvb_ca_en50221 *ca, int slot, u8 addr
result = ttpci_budget_debiwrite(&budget_av->budget, DEBICICAM, address & 3, 1, value, 0, 0);
if (result == -ETIMEDOUT) {
ciintf_slot_shutdown(ca, slot);
- printk(KERN_INFO "budget-av: cam ejected 5\n");
+ pr_info("cam ejected 5\n");
}
return result;
}
@@ -289,7 +291,7 @@ static int ciintf_poll_slot_status(struct dvb_ca_en50221 *ca, int slot, int open
if (saa7146_read(saa, PSR) & MASK_06) {
if (budget_av->slot_status == SLOTSTATUS_NONE) {
budget_av->slot_status = SLOTSTATUS_PRESENT;
- printk(KERN_INFO "budget-av: cam inserted A\n");
+ pr_info("cam inserted A\n");
}
}
saa7146_setgpio(saa, 3, SAA7146_GPIO_OUTLO);
@@ -306,11 +308,11 @@ static int ciintf_poll_slot_status(struct dvb_ca_en50221 *ca, int slot, int open
result = ttpci_budget_debiread(&budget_av->budget, DEBICICAM, 0, 1, 0, 1);
if ((result >= 0) && (budget_av->slot_status == SLOTSTATUS_NONE)) {
budget_av->slot_status = SLOTSTATUS_PRESENT;
- printk(KERN_INFO "budget-av: cam inserted B\n");
+ pr_info("cam inserted B\n");
} else if (result < 0) {
if (budget_av->slot_status != SLOTSTATUS_NONE) {
ciintf_slot_shutdown(ca, slot);
- printk(KERN_INFO "budget-av: cam ejected 5\n");
+ pr_info("cam ejected 5\n");
return 0;
}
}
@@ -365,11 +367,11 @@ static int ciintf_init(struct budget_av *budget_av)
if ((result = dvb_ca_en50221_init(&budget_av->budget.dvb_adapter,
&budget_av->ca, 0, 1)) != 0) {
- printk(KERN_ERR "budget-av: ci initialisation failed.\n");
+ pr_err("ci initialisation failed\n");
goto error;
}
- printk(KERN_INFO "budget-av: ci interface initialised.\n");
+ pr_info("ci interface initialised\n");
return 0;
error:
@@ -896,7 +898,6 @@ static const struct stb0899_s1_reg knc1_stb0899_s1_init_1[] = {
{ STB0899_DISRX_ST0 , 0x04 },
{ STB0899_DISRX_ST1 , 0x00 },
{ STB0899_DISPARITY , 0x00 },
- { STB0899_DISFIFO , 0x00 },
{ STB0899_DISSTATUS , 0x20 },
{ STB0899_DISF22 , 0x8c },
{ STB0899_DISF22RX , 0x9a },
@@ -1197,6 +1198,7 @@ static u8 read_pwm(struct budget_av *budget_av)
#define SUBID_DVBC_KNC1 0x0020
#define SUBID_DVBC_KNC1_PLUS 0x0021
#define SUBID_DVBC_KNC1_MK3 0x0022
+#define SUBID_DVBC_KNC1_TDA10024 0x0028
#define SUBID_DVBC_KNC1_PLUS_MK3 0x0023
#define SUBID_DVBC_CINERGY1200 0x1156
#define SUBID_DVBC_CINERGY1200_MK3 0x1176
@@ -1316,6 +1318,7 @@ static void frontend_init(struct budget_av *budget_av)
case SUBID_DVBC_EASYWATCH_MK3:
case SUBID_DVBC_CINERGY1200_MK3:
case SUBID_DVBC_KNC1_MK3:
+ case SUBID_DVBC_KNC1_TDA10024:
case SUBID_DVBC_KNC1_PLUS_MK3:
budget_av->reinitialise_demod = 1;
budget_av->budget.dev->i2c_bitrate = SAA7146_I2C_BUS_BIT_RATE_240;
@@ -1343,8 +1346,7 @@ static void frontend_init(struct budget_av *budget_av)
}
if (fe == NULL) {
- printk(KERN_ERR "budget-av: A frontend driver was not found "
- "for device [%04x:%04x] subsystem [%04x:%04x]\n",
+ pr_err("A frontend driver was not found for device [%04x:%04x] subsystem [%04x:%04x]\n",
saa->pci->vendor,
saa->pci->device,
saa->pci->subsystem_vendor,
@@ -1356,7 +1358,7 @@ static void frontend_init(struct budget_av *budget_av)
if (dvb_register_frontend(&budget_av->budget.dvb_adapter,
budget_av->budget.dvb_frontend)) {
- printk(KERN_ERR "budget-av: Frontend registration failed!\n");
+ pr_err("Frontend registration failed!\n");
dvb_frontend_detach(budget_av->budget.dvb_frontend);
budget_av->budget.dvb_frontend = NULL;
}
@@ -1414,7 +1416,7 @@ static struct v4l2_input knc1_inputs[KNC1_INPUTS] = {
static int vidioc_enum_input(struct file *file, void *fh, struct v4l2_input *i)
{
- dprintk(1, "VIDIOC_ENUMINPUT %d.\n", i->index);
+ dprintk(1, "VIDIOC_ENUMINPUT %d\n", i->index);
if (i->index >= KNC1_INPUTS)
return -EINVAL;
memcpy(i, &knc1_inputs[i->index], sizeof(struct v4l2_input));
@@ -1428,7 +1430,7 @@ static int vidioc_g_input(struct file *file, void *fh, unsigned int *i)
*i = budget_av->cur_input;
- dprintk(1, "VIDIOC_G_INPUT %d.\n", *i);
+ dprintk(1, "VIDIOC_G_INPUT %d\n", *i);
return 0;
}
@@ -1437,7 +1439,7 @@ static int vidioc_s_input(struct file *file, void *fh, unsigned int input)
struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev;
struct budget_av *budget_av = (struct budget_av *)dev->ext_priv;
- dprintk(1, "VIDIOC_S_INPUT %d.\n", input);
+ dprintk(1, "VIDIOC_S_INPUT %d\n", input);
return saa7113_setinput(budget_av, input);
}
@@ -1476,7 +1478,7 @@ static int budget_av_attach(struct saa7146_dev *dev, struct saa7146_pci_extensio
if (0 != saa7146_vv_init(dev, &vv_data)) {
/* fixme: proper cleanup here */
- ERR(("cannot init vv subsystem.\n"));
+ ERR("cannot init vv subsystem\n");
return err;
}
vv_data.ops.vidioc_enum_input = vidioc_enum_input;
@@ -1485,7 +1487,7 @@ static int budget_av_attach(struct saa7146_dev *dev, struct saa7146_pci_extensio
if ((err = saa7146_register_device(&budget_av->vd, dev, "knc1", VFL_TYPE_GRABBER))) {
/* fixme: proper cleanup here */
- ERR(("cannot register capture v4l2 device.\n"));
+ ERR("cannot register capture v4l2 device\n");
saa7146_vv_release(dev);
return err;
}
@@ -1502,13 +1504,12 @@ static int budget_av_attach(struct saa7146_dev *dev, struct saa7146_pci_extensio
mac = budget_av->budget.dvb_adapter.proposed_mac;
if (i2c_readregs(&budget_av->budget.i2c_adap, 0xa0, 0x30, mac, 6)) {
- printk(KERN_ERR "KNC1-%d: Could not read MAC from KNC1 card\n",
+ pr_err("KNC1-%d: Could not read MAC from KNC1 card\n",
budget_av->budget.dvb_adapter.num);
memset(mac, 0, 6);
} else {
- printk(KERN_INFO "KNC1-%d: MAC addr = %.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n",
- budget_av->budget.dvb_adapter.num,
- mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
+ pr_info("KNC1-%d: MAC addr = %pM\n",
+ budget_av->budget.dvb_adapter.num, mac);
}
budget_av->budget.dvb_adapter.priv = budget_av;
@@ -1558,6 +1559,7 @@ MAKE_BUDGET_INFO(knc1sp, "KNC1 DVB-S Plus", BUDGET_KNC1SP);
MAKE_BUDGET_INFO(knc1spx4, "KNC1 DVB-S Plus X4", BUDGET_KNC1SP);
MAKE_BUDGET_INFO(knc1cp, "KNC1 DVB-C Plus", BUDGET_KNC1CP);
MAKE_BUDGET_INFO(knc1cmk3, "KNC1 DVB-C MK3", BUDGET_KNC1C_MK3);
+MAKE_BUDGET_INFO(knc1ctda10024, "KNC1 DVB-C TDA10024", BUDGET_KNC1C_TDA10024);
MAKE_BUDGET_INFO(knc1cpmk3, "KNC1 DVB-C Plus MK3", BUDGET_KNC1CP_MK3);
MAKE_BUDGET_INFO(knc1tp, "KNC1 DVB-T Plus", BUDGET_KNC1TP);
MAKE_BUDGET_INFO(cin1200s, "TerraTec Cinergy 1200 DVB-S", BUDGET_CIN1200S);
@@ -1587,6 +1589,7 @@ static struct pci_device_id pci_tbl[] = {
MAKE_EXTENSION_PCI(knc1c, 0x1894, 0x0020),
MAKE_EXTENSION_PCI(knc1cp, 0x1894, 0x0021),
MAKE_EXTENSION_PCI(knc1cmk3, 0x1894, 0x0022),
+ MAKE_EXTENSION_PCI(knc1ctda10024, 0x1894, 0x0028),
MAKE_EXTENSION_PCI(knc1cpmk3, 0x1894, 0x0023),
MAKE_EXTENSION_PCI(knc1t, 0x1894, 0x0030),
MAKE_EXTENSION_PCI(knc1tp, 0x1894, 0x0031),
diff --git a/drivers/media/dvb/ttpci/budget-ci.c b/drivers/media/dvb/ttpci/budget-ci.c
index 926f299b522..ca02e972217 100644
--- a/drivers/media/dvb/ttpci/budget-ci.c
+++ b/drivers/media/dvb/ttpci/budget-ci.c
@@ -1053,7 +1053,6 @@ static const struct stb0899_s1_reg tt3200_stb0899_s1_init_1[] = {
{ STB0899_DISRX_ST0 , 0x04 },
{ STB0899_DISRX_ST1 , 0x00 },
{ STB0899_DISPARITY , 0x00 },
- { STB0899_DISFIFO , 0x00 },
{ STB0899_DISSTATUS , 0x20 },
{ STB0899_DISF22 , 0x8c },
{ STB0899_DISF22RX , 0x9a },
diff --git a/drivers/media/dvb/ttpci/budget-core.c b/drivers/media/dvb/ttpci/budget-core.c
index 37666d4edab..37d02fe0913 100644
--- a/drivers/media/dvb/ttpci/budget-core.c
+++ b/drivers/media/dvb/ttpci/budget-core.c
@@ -110,6 +110,7 @@ static int start_ts_capture(struct budget *budget)
break;
case BUDGET_CIN1200C_MK3:
case BUDGET_KNC1C_MK3:
+ case BUDGET_KNC1C_TDA10024:
case BUDGET_KNC1CP_MK3:
if (budget->video_port == BUDGET_VIDEO_PORTA) {
saa7146_write(dev, DD1_INIT, 0x06000200);
@@ -434,6 +435,7 @@ int ttpci_budget_init(struct budget *budget, struct saa7146_dev *dev,
case BUDGET_KNC1CP:
case BUDGET_CIN1200C:
case BUDGET_KNC1C_MK3:
+ case BUDGET_KNC1C_TDA10024:
case BUDGET_KNC1CP_MK3:
case BUDGET_CIN1200C_MK3:
budget->buffer_width = TS_WIDTH_DVBC;
diff --git a/drivers/media/dvb/ttpci/budget.h b/drivers/media/dvb/ttpci/budget.h
index 3ad0c6789ba..3d8a806c20b 100644
--- a/drivers/media/dvb/ttpci/budget.h
+++ b/drivers/media/dvb/ttpci/budget.h
@@ -104,6 +104,7 @@ static struct saa7146_pci_extension_data x_var = { \
#define BUDGET_KNC1C_MK3 16
#define BUDGET_KNC1CP_MK3 17
#define BUDGET_KNC1S2 18
+#define BUDGET_KNC1C_TDA10024 19
#define BUDGET_VIDEO_PORTA 0
#define BUDGET_VIDEO_PORTB 1
diff --git a/drivers/media/dvb/ttpci/ttpci-eeprom.c b/drivers/media/dvb/ttpci/ttpci-eeprom.c
index 7dd54b3026a..32d43156c54 100644
--- a/drivers/media/dvb/ttpci/ttpci-eeprom.c
+++ b/drivers/media/dvb/ttpci/ttpci-eeprom.c
@@ -85,6 +85,35 @@ static int getmac_tt(u8 * decodedMAC, u8 * encodedMAC)
return 0;
}
+int ttpci_eeprom_decode_mac(u8 *decodedMAC, u8 *encodedMAC)
+{
+ u8 xor[20] = { 0x72, 0x23, 0x68, 0x19, 0x5c, 0xa8, 0x71, 0x2c,
+ 0x54, 0xd3, 0x7b, 0xf1, 0x9E, 0x23, 0x16, 0xf6,
+ 0x1d, 0x36, 0x64, 0x78};
+ u8 data[20];
+ int i;
+
+ memcpy(data, encodedMAC, 20);
+
+ for (i = 0; i < 20; i++)
+ data[i] ^= xor[i];
+ for (i = 0; i < 10; i++)
+ data[i] = ((data[2 * i + 1] << 8) | data[2 * i])
+ >> ((data[2 * i + 1] >> 6) & 3);
+
+ if (check_mac_tt(data))
+ return -ENODEV;
+
+ decodedMAC[0] = data[2];
+ decodedMAC[1] = data[1];
+ decodedMAC[2] = data[0];
+ decodedMAC[3] = data[6];
+ decodedMAC[4] = data[5];
+ decodedMAC[5] = data[4];
+ return 0;
+}
+EXPORT_SYMBOL(ttpci_eeprom_decode_mac);
+
static int ttpci_eeprom_read_encodedMAC(struct i2c_adapter *adapter, u8 * encodedMAC)
{
int ret;
diff --git a/drivers/media/dvb/ttpci/ttpci-eeprom.h b/drivers/media/dvb/ttpci/ttpci-eeprom.h
index e2dc6cfe205..dcc33d5a5cb 100644
--- a/drivers/media/dvb/ttpci/ttpci-eeprom.h
+++ b/drivers/media/dvb/ttpci/ttpci-eeprom.h
@@ -28,6 +28,7 @@
#include <linux/types.h>
#include <linux/i2c.h>
+extern int ttpci_eeprom_decode_mac(u8 *decodedMAC, u8 *encodedMAC);
extern int ttpci_eeprom_parse_mac(struct i2c_adapter *adapter, u8 *propsed_mac);
#endif
diff --git a/drivers/media/dvb/ttusb-budget/Makefile b/drivers/media/dvb/ttusb-budget/Makefile
index fbe2b9514c2..8d6c4acb7f1 100644
--- a/drivers/media/dvb/ttusb-budget/Makefile
+++ b/drivers/media/dvb/ttusb-budget/Makefile
@@ -1,3 +1,3 @@
obj-$(CONFIG_DVB_TTUSB_BUDGET) += dvb-ttusb-budget.o
-EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core/ -Idrivers/media/dvb/frontends
+ccflags-y += -Idrivers/media/dvb/dvb-core/ -Idrivers/media/dvb/frontends
diff --git a/drivers/media/dvb/ttusb-dec/Makefile b/drivers/media/dvb/ttusb-dec/Makefile
index 2d70a826939..ed28b5384d2 100644
--- a/drivers/media/dvb/ttusb-dec/Makefile
+++ b/drivers/media/dvb/ttusb-dec/Makefile
@@ -1,3 +1,3 @@
obj-$(CONFIG_DVB_TTUSB_DEC) += ttusb_dec.o ttusbdecfe.o
-EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core/
+ccflags-y += -Idrivers/media/dvb/dvb-core/
diff --git a/drivers/media/media-device.c b/drivers/media/media-device.c
index 16b70b4412f..6edc9ba8120 100644
--- a/drivers/media/media-device.c
+++ b/drivers/media/media-device.c
@@ -23,6 +23,7 @@
#include <linux/types.h>
#include <linux/ioctl.h>
#include <linux/media.h>
+#include <linux/export.h>
#include <media/media-device.h>
#include <media/media-devnode.h>
diff --git a/drivers/media/radio/Kconfig b/drivers/media/radio/Kconfig
index 52798a111e1..ccd5f0d8a01 100644
--- a/drivers/media/radio/Kconfig
+++ b/drivers/media/radio/Kconfig
@@ -426,7 +426,6 @@ config RADIO_TIMBERDALE
config RADIO_WL1273
tristate "Texas Instruments WL1273 I2C FM Radio"
depends on I2C && VIDEO_V4L2
- select MFD_CORE
select MFD_WL1273_CORE
select FW_LOADER
---help---
diff --git a/drivers/media/radio/Makefile b/drivers/media/radio/Makefile
index f484a6e04eb..390daf94d84 100644
--- a/drivers/media/radio/Makefile
+++ b/drivers/media/radio/Makefile
@@ -27,4 +27,4 @@ obj-$(CONFIG_RADIO_TIMBERDALE) += radio-timb.o
obj-$(CONFIG_RADIO_WL1273) += radio-wl1273.o
obj-$(CONFIG_RADIO_WL128X) += wl128x/
-EXTRA_CFLAGS += -Isound
+ccflags-y += -Isound
diff --git a/drivers/media/radio/radio-si4713.c b/drivers/media/radio/radio-si4713.c
index 444b4cf7e65..d1fab588506 100644
--- a/drivers/media/radio/radio-si4713.c
+++ b/drivers/media/radio/radio-si4713.c
@@ -92,10 +92,6 @@ static int radio_si4713_s_audout(struct file *file, void *priv,
static int radio_si4713_querycap(struct file *file, void *priv,
struct v4l2_capability *capability)
{
- struct radio_si4713_device *rsdev;
-
- rsdev = video_get_drvdata(video_devdata(file));
-
strlcpy(capability->driver, "radio-si4713", sizeof(capability->driver));
strlcpy(capability->card, "Silicon Labs Si4713 Modulator",
sizeof(capability->card));
diff --git a/drivers/media/radio/radio-tea5764.c b/drivers/media/radio/radio-tea5764.c
index 95ddcc4845d..db20904d01f 100644
--- a/drivers/media/radio/radio-tea5764.c
+++ b/drivers/media/radio/radio-tea5764.c
@@ -128,8 +128,10 @@ struct tea5764_write_regs {
u16 rdsbbl; /* PAUSEDET & RDSBBL */
} __attribute__ ((packed));
-#ifndef RADIO_TEA5764_XTAL
+#ifdef CONFIG_RADIO_TEA5764_XTAL
#define RADIO_TEA5764_XTAL 1
+#else
+#define RADIO_TEA5764_XTAL 0
#endif
static int radio_nr = -1;
diff --git a/drivers/media/radio/radio-timb.c b/drivers/media/radio/radio-timb.c
index f17b540d68a..3e9209f84e0 100644
--- a/drivers/media/radio/radio-timb.c
+++ b/drivers/media/radio/radio-timb.c
@@ -23,6 +23,7 @@
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/i2c.h>
+#include <linux/module.h>
#include <media/timb_radio.h>
#define DRIVER_NAME "timb-radio"
diff --git a/drivers/media/radio/radio-wl1273.c b/drivers/media/radio/radio-wl1273.c
index 46cacf84504..8aa4968d57b 100644
--- a/drivers/media/radio/radio-wl1273.c
+++ b/drivers/media/radio/radio-wl1273.c
@@ -23,6 +23,7 @@
#include <linux/interrupt.h>
#include <linux/mfd/wl1273-core.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include <media/v4l2-common.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
@@ -2109,7 +2110,7 @@ static int __devinit wl1273_fm_radio_probe(struct platform_device *pdev)
V4L2_CID_TUNE_ANTENNA_CAPACITOR,
0, 255, 1, 255);
if (ctrl)
- ctrl->is_volatile = 1;
+ ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
if (radio->ctrl_handler.error) {
r = radio->ctrl_handler.error;
diff --git a/drivers/media/radio/si470x/radio-si470x-usb.c b/drivers/media/radio/si470x/radio-si470x-usb.c
index 4cf537043f9..a6ad707fae9 100644
--- a/drivers/media/radio/si470x/radio-si470x-usb.c
+++ b/drivers/media/radio/si470x/radio-si470x-usb.c
@@ -395,7 +395,6 @@ int si470x_disconnect_check(struct si470x_device *radio)
static void si470x_int_in_callback(struct urb *urb)
{
struct si470x_device *radio = urb->context;
- unsigned char buf[RDS_REPORT_SIZE];
int retval;
unsigned char regnr;
unsigned char blocknum;
@@ -423,7 +422,6 @@ static void si470x_int_in_callback(struct urb *urb)
if (urb->actual_length > 0) {
/* Update RDS registers with URB data */
- buf[0] = RDS_REPORT;
for (regnr = 0; regnr < RDS_REGISTER_NUM; regnr++)
radio->registers[STATUSRSSI + regnr] =
get_unaligned_be16(&radio->int_in_buffer[
diff --git a/drivers/media/radio/si4713-i2c.c b/drivers/media/radio/si4713-i2c.c
index c9f4a8e65dc..27aba936fb2 100644
--- a/drivers/media/radio/si4713-i2c.c
+++ b/drivers/media/radio/si4713-i2c.c
@@ -29,6 +29,7 @@
#include <linux/slab.h>
#include <linux/gpio.h>
#include <linux/regulator/consumer.h>
+#include <linux/module.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-common.h>
diff --git a/drivers/media/radio/wl128x/fmdrv_v4l2.c b/drivers/media/radio/wl128x/fmdrv_v4l2.c
index ec1d52f3890..4f5c43d2566 100644
--- a/drivers/media/radio/wl128x/fmdrv_v4l2.c
+++ b/drivers/media/radio/wl128x/fmdrv_v4l2.c
@@ -28,6 +28,8 @@
*
*/
+#include <linux/export.h>
+
#include "fmdrv.h"
#include "fmdrv_v4l2.h"
#include "fmdrv_common.h"
@@ -84,12 +86,14 @@ static ssize_t fm_v4l2_fops_write(struct file *file, const char __user * buf,
ret = copy_from_user(&rds, buf, sizeof(rds));
fmdbg("(%d)type: %d, text %s, af %d\n",
ret, rds.text_type, rds.text, rds.af_freq);
+ if (ret)
+ return -EFAULT;
fmdev = video_drvdata(file);
fm_tx_set_radio_text(fmdev, rds.text, rds.text_type);
fm_tx_set_af(fmdev, rds.af_freq);
- return 0;
+ return sizeof(rds);
}
static u32 fm_v4l2_fops_poll(struct file *file, struct poll_table_struct *pts)
@@ -557,7 +561,7 @@ int fm_v4l2_init_video_device(struct fmdev *fmdev, int radio_nr)
255, 1, 255);
if (ctrl)
- ctrl->is_volatile = 1;
+ ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
return 0;
}
diff --git a/drivers/media/rc/Kconfig b/drivers/media/rc/Kconfig
index 899f783d92f..aeb7f43dfb6 100644
--- a/drivers/media/rc/Kconfig
+++ b/drivers/media/rc/Kconfig
@@ -4,8 +4,8 @@ menuconfig RC_CORE
default INPUT
---help---
Enable support for Remote Controllers on Linux. This is
- needed in order to support several video capture adapters.
- Currently, all supported devices use InfraRed.
+ needed in order to support several video capture adapters,
+ standalone IR receivers/transmitters, and RF receivers.
Enable this option if you have a video capture board even
if you don't need IR, as otherwise, you may not be able to
@@ -108,6 +108,25 @@ config IR_LIRC_CODEC
Enable this option to pass raw IR to and from userspace via
the LIRC interface.
+config RC_ATI_REMOTE
+ tristate "ATI / X10 based USB RF remote controls"
+ depends on USB_ARCH_HAS_HCD
+ depends on RC_CORE
+ select USB
+ help
+ Say Y here if you want to use an X10 based USB remote control.
+ These are RF remotes with USB receivers.
+
+ Such devices include the ATI remote that comes with many of ATI's
+ All-In-Wonder video cards, the X10 "Lola" remote, NVIDIA RF remote,
+ Medion RF remote, and SnapStream FireFly remote.
+
+ This driver provides mouse pointer, left and right mouse buttons,
+ and maps all the other remote buttons to keypress events.
+
+ To compile this driver as a module, choose M here: the module will be
+ called ati_remote.
+
config IR_ENE
tristate "ENE eHome Receiver/Transceiver (pnp id: ENE0100/ENE02xxx)"
depends on PNP
diff --git a/drivers/media/rc/Makefile b/drivers/media/rc/Makefile
index f224db027c4..2156e786b55 100644
--- a/drivers/media/rc/Makefile
+++ b/drivers/media/rc/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_IR_MCE_KBD_DECODER) += ir-mce_kbd-decoder.o
obj-$(CONFIG_IR_LIRC_CODEC) += ir-lirc-codec.o
# stand-alone IR receivers/transmitters
+obj-$(CONFIG_RC_ATI_REMOTE) += ati_remote.o
obj-$(CONFIG_IR_IMON) += imon.o
obj-$(CONFIG_IR_ITE_CIR) += ite-cir.o
obj-$(CONFIG_IR_MCEUSB) += mceusb.o
diff --git a/drivers/input/misc/ati_remote.c b/drivers/media/rc/ati_remote.c
index bce57129afb..303f22ea04c 100644
--- a/drivers/input/misc/ati_remote.c
+++ b/drivers/media/rc/ati_remote.c
@@ -1,6 +1,7 @@
/*
* USB ATI Remote support
*
+ * Copyright (c) 2011 Anssi Hannula <anssi.hannula@iki.fi>
* Version 2.2.0 Copyright (c) 2004 Torrey Hoffman <thoffman@arnor.net>
* Version 2.1.1 Copyright (c) 2002 Vladimir Dergachev
*
@@ -90,9 +91,11 @@
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/usb/input.h>
#include <linux/wait.h>
#include <linux/jiffies.h>
+#include <media/rc-core.h>
/*
* Module and Version Information, Module Parameters
@@ -104,8 +107,9 @@
#define ATI_REMOTE_PRODUCT_ID 0x0004
#define NVIDIA_REMOTE_PRODUCT_ID 0x0005
#define MEDION_REMOTE_PRODUCT_ID 0x0006
+#define FIREFLY_REMOTE_PRODUCT_ID 0x0008
-#define DRIVER_VERSION "2.2.1"
+#define DRIVER_VERSION "2.2.1"
#define DRIVER_AUTHOR "Torrey Hoffman <thoffman@arnor.net>"
#define DRIVER_DESC "ATI/X10 RF USB Remote Control"
@@ -139,16 +143,21 @@ static int repeat_delay = REPEAT_DELAY;
module_param(repeat_delay, int, 0644);
MODULE_PARM_DESC(repeat_delay, "Delay before sending repeats, default = 500 msec");
+static bool mouse = true;
+module_param(mouse, bool, 0444);
+MODULE_PARM_DESC(mouse, "Enable mouse device, default = yes");
+
#define dbginfo(dev, format, arg...) do { if (debug) dev_info(dev , format , ## arg); } while (0)
#undef err
#define err(format, arg...) printk(KERN_ERR format , ## arg)
static struct usb_device_id ati_remote_table[] = {
- { USB_DEVICE(ATI_REMOTE_VENDOR_ID, LOLA_REMOTE_PRODUCT_ID) },
- { USB_DEVICE(ATI_REMOTE_VENDOR_ID, LOLA2_REMOTE_PRODUCT_ID) },
- { USB_DEVICE(ATI_REMOTE_VENDOR_ID, ATI_REMOTE_PRODUCT_ID) },
- { USB_DEVICE(ATI_REMOTE_VENDOR_ID, NVIDIA_REMOTE_PRODUCT_ID) },
- { USB_DEVICE(ATI_REMOTE_VENDOR_ID, MEDION_REMOTE_PRODUCT_ID) },
+ { USB_DEVICE(ATI_REMOTE_VENDOR_ID, LOLA_REMOTE_PRODUCT_ID), .driver_info = (unsigned long)RC_MAP_ATI_X10 },
+ { USB_DEVICE(ATI_REMOTE_VENDOR_ID, LOLA2_REMOTE_PRODUCT_ID), .driver_info = (unsigned long)RC_MAP_ATI_X10 },
+ { USB_DEVICE(ATI_REMOTE_VENDOR_ID, ATI_REMOTE_PRODUCT_ID), .driver_info = (unsigned long)RC_MAP_ATI_X10 },
+ { USB_DEVICE(ATI_REMOTE_VENDOR_ID, NVIDIA_REMOTE_PRODUCT_ID), .driver_info = (unsigned long)RC_MAP_ATI_X10 },
+ { USB_DEVICE(ATI_REMOTE_VENDOR_ID, MEDION_REMOTE_PRODUCT_ID), .driver_info = (unsigned long)RC_MAP_MEDION_X10 },
+ { USB_DEVICE(ATI_REMOTE_VENDOR_ID, FIREFLY_REMOTE_PRODUCT_ID), .driver_info = (unsigned long)RC_MAP_SNAPSTREAM_FIREFLY },
{} /* Terminating entry */
};
@@ -167,6 +176,7 @@ static char init2[] = { 0x01, 0x00, 0x20, 0x14, 0x20, 0x20, 0x20 };
struct ati_remote {
struct input_dev *idev;
+ struct rc_dev *rdev;
struct usb_device *udev;
struct usb_interface *interface;
@@ -186,11 +196,16 @@ struct ati_remote {
unsigned int repeat_count;
- char name[NAME_BUFSIZE];
- char phys[NAME_BUFSIZE];
+ char rc_name[NAME_BUFSIZE];
+ char rc_phys[NAME_BUFSIZE];
+ char mouse_name[NAME_BUFSIZE];
+ char mouse_phys[NAME_BUFSIZE];
wait_queue_head_t wait;
int send_flags;
+
+ int users; /* 0-2, users are rc and input */
+ struct mutex open_mutex;
};
/* "Kinds" of messages sent from the hardware to the driver. */
@@ -233,64 +248,11 @@ static const struct {
{KIND_FILTERED, 0x3f, 0x7a, EV_KEY, BTN_SIDE, 1}, /* left dblclick */
{KIND_FILTERED, 0x43, 0x7e, EV_KEY, BTN_EXTRA, 1},/* right dblclick */
- /* keyboard. */
- {KIND_FILTERED, 0xd2, 0x0d, EV_KEY, KEY_1, 1},
- {KIND_FILTERED, 0xd3, 0x0e, EV_KEY, KEY_2, 1},
- {KIND_FILTERED, 0xd4, 0x0f, EV_KEY, KEY_3, 1},
- {KIND_FILTERED, 0xd5, 0x10, EV_KEY, KEY_4, 1},
- {KIND_FILTERED, 0xd6, 0x11, EV_KEY, KEY_5, 1},
- {KIND_FILTERED, 0xd7, 0x12, EV_KEY, KEY_6, 1},
- {KIND_FILTERED, 0xd8, 0x13, EV_KEY, KEY_7, 1},
- {KIND_FILTERED, 0xd9, 0x14, EV_KEY, KEY_8, 1},
- {KIND_FILTERED, 0xda, 0x15, EV_KEY, KEY_9, 1},
- {KIND_FILTERED, 0xdc, 0x17, EV_KEY, KEY_0, 1},
- {KIND_FILTERED, 0xc5, 0x00, EV_KEY, KEY_A, 1},
- {KIND_FILTERED, 0xc6, 0x01, EV_KEY, KEY_B, 1},
- {KIND_FILTERED, 0xde, 0x19, EV_KEY, KEY_C, 1},
- {KIND_FILTERED, 0xe0, 0x1b, EV_KEY, KEY_D, 1},
- {KIND_FILTERED, 0xe6, 0x21, EV_KEY, KEY_E, 1},
- {KIND_FILTERED, 0xe8, 0x23, EV_KEY, KEY_F, 1},
-
- /* "special" keys */
- {KIND_FILTERED, 0xdd, 0x18, EV_KEY, KEY_KPENTER, 1}, /* "check" */
- {KIND_FILTERED, 0xdb, 0x16, EV_KEY, KEY_MENU, 1}, /* "menu" */
- {KIND_FILTERED, 0xc7, 0x02, EV_KEY, KEY_POWER, 1}, /* Power */
- {KIND_FILTERED, 0xc8, 0x03, EV_KEY, KEY_TV, 1}, /* TV */
- {KIND_FILTERED, 0xc9, 0x04, EV_KEY, KEY_DVD, 1}, /* DVD */
- {KIND_FILTERED, 0xca, 0x05, EV_KEY, KEY_WWW, 1}, /* WEB */
- {KIND_FILTERED, 0xcb, 0x06, EV_KEY, KEY_BOOKMARKS, 1}, /* "book" */
- {KIND_FILTERED, 0xcc, 0x07, EV_KEY, KEY_EDIT, 1}, /* "hand" */
- {KIND_FILTERED, 0xe1, 0x1c, EV_KEY, KEY_COFFEE, 1}, /* "timer" */
- {KIND_FILTERED, 0xe5, 0x20, EV_KEY, KEY_FRONT, 1}, /* "max" */
- {KIND_FILTERED, 0xe2, 0x1d, EV_KEY, KEY_LEFT, 1}, /* left */
- {KIND_FILTERED, 0xe4, 0x1f, EV_KEY, KEY_RIGHT, 1}, /* right */
- {KIND_FILTERED, 0xe7, 0x22, EV_KEY, KEY_DOWN, 1}, /* down */
- {KIND_FILTERED, 0xdf, 0x1a, EV_KEY, KEY_UP, 1}, /* up */
- {KIND_FILTERED, 0xe3, 0x1e, EV_KEY, KEY_OK, 1}, /* "OK" */
- {KIND_FILTERED, 0xce, 0x09, EV_KEY, KEY_VOLUMEDOWN, 1}, /* VOL + */
- {KIND_FILTERED, 0xcd, 0x08, EV_KEY, KEY_VOLUMEUP, 1}, /* VOL - */
- {KIND_FILTERED, 0xcf, 0x0a, EV_KEY, KEY_MUTE, 1}, /* MUTE */
- {KIND_FILTERED, 0xd0, 0x0b, EV_KEY, KEY_CHANNELUP, 1}, /* CH + */
- {KIND_FILTERED, 0xd1, 0x0c, EV_KEY, KEY_CHANNELDOWN, 1},/* CH - */
- {KIND_FILTERED, 0xec, 0x27, EV_KEY, KEY_RECORD, 1}, /* ( o) red */
- {KIND_FILTERED, 0xea, 0x25, EV_KEY, KEY_PLAY, 1}, /* ( >) */
- {KIND_FILTERED, 0xe9, 0x24, EV_KEY, KEY_REWIND, 1}, /* (<<) */
- {KIND_FILTERED, 0xeb, 0x26, EV_KEY, KEY_FORWARD, 1}, /* (>>) */
- {KIND_FILTERED, 0xed, 0x28, EV_KEY, KEY_STOP, 1}, /* ([]) */
- {KIND_FILTERED, 0xee, 0x29, EV_KEY, KEY_PAUSE, 1}, /* ('') */
- {KIND_FILTERED, 0xf0, 0x2b, EV_KEY, KEY_PREVIOUS, 1}, /* (<-) */
- {KIND_FILTERED, 0xef, 0x2a, EV_KEY, KEY_NEXT, 1}, /* (>+) */
- {KIND_FILTERED, 0xf2, 0x2D, EV_KEY, KEY_INFO, 1}, /* PLAYING */
- {KIND_FILTERED, 0xf3, 0x2E, EV_KEY, KEY_HOME, 1}, /* TOP */
- {KIND_FILTERED, 0xf4, 0x2F, EV_KEY, KEY_END, 1}, /* END */
- {KIND_FILTERED, 0xf5, 0x30, EV_KEY, KEY_SELECT, 1}, /* SELECT */
-
+ /* Non-mouse events are handled by rc-core */
{KIND_END, 0x00, 0x00, EV_MAX + 1, 0, 0}
};
/* Local function prototypes */
-static int ati_remote_open (struct input_dev *inputdev);
-static void ati_remote_close (struct input_dev *inputdev);
static int ati_remote_sendpacket (struct ati_remote *ati_remote, u16 cmd, unsigned char *data);
static void ati_remote_irq_out (struct urb *urb);
static void ati_remote_irq_in (struct urb *urb);
@@ -313,9 +275,10 @@ static struct usb_driver ati_remote_driver = {
static void ati_remote_dump(struct device *dev, unsigned char *data,
unsigned int len)
{
- if ((len == 1) && (data[0] != (unsigned char)0xff) && (data[0] != 0x00))
- dev_warn(dev, "Weird byte 0x%02x\n", data[0]);
- else if (len == 4)
+ if (len == 1) {
+ if (data[0] != (unsigned char)0xff && data[0] != 0x00)
+ dev_warn(dev, "Weird byte 0x%02x\n", data[0]);
+ } else if (len == 4)
dev_warn(dev, "Weird key %02x %02x %02x %02x\n",
data[0], data[1], data[2], data[3]);
else
@@ -326,29 +289,60 @@ static void ati_remote_dump(struct device *dev, unsigned char *data,
/*
* ati_remote_open
*/
-static int ati_remote_open(struct input_dev *inputdev)
+static int ati_remote_open(struct ati_remote *ati_remote)
{
- struct ati_remote *ati_remote = input_get_drvdata(inputdev);
+ int err = 0;
+
+ mutex_lock(&ati_remote->open_mutex);
+
+ if (ati_remote->users++ != 0)
+ goto out; /* one was already active */
/* On first open, submit the read urb which was set up previously. */
ati_remote->irq_urb->dev = ati_remote->udev;
if (usb_submit_urb(ati_remote->irq_urb, GFP_KERNEL)) {
dev_err(&ati_remote->interface->dev,
"%s: usb_submit_urb failed!\n", __func__);
- return -EIO;
+ err = -EIO;
}
- return 0;
+out: mutex_unlock(&ati_remote->open_mutex);
+ return err;
}
/*
* ati_remote_close
*/
-static void ati_remote_close(struct input_dev *inputdev)
+static void ati_remote_close(struct ati_remote *ati_remote)
+{
+ mutex_lock(&ati_remote->open_mutex);
+ if (--ati_remote->users == 0)
+ usb_kill_urb(ati_remote->irq_urb);
+ mutex_unlock(&ati_remote->open_mutex);
+}
+
+static int ati_remote_input_open(struct input_dev *inputdev)
{
struct ati_remote *ati_remote = input_get_drvdata(inputdev);
+ return ati_remote_open(ati_remote);
+}
- usb_kill_urb(ati_remote->irq_urb);
+static void ati_remote_input_close(struct input_dev *inputdev)
+{
+ struct ati_remote *ati_remote = input_get_drvdata(inputdev);
+ ati_remote_close(ati_remote);
+}
+
+static int ati_remote_rc_open(struct rc_dev *rdev)
+{
+ struct ati_remote *ati_remote = rdev->priv;
+ return ati_remote_open(ati_remote);
+}
+
+static void ati_remote_rc_close(struct rc_dev *rdev)
+{
+ struct ati_remote *ati_remote = rdev->priv;
+ ati_remote_close(ati_remote);
}
/*
@@ -413,10 +407,8 @@ static int ati_remote_event_lookup(int rem, unsigned char d1, unsigned char d2)
/*
* Decide if the table entry matches the remote input.
*/
- if ((((ati_remote_tbl[i].data1 & 0x0f) == (d1 & 0x0f))) &&
- ((((ati_remote_tbl[i].data1 >> 4) -
- (d1 >> 4) + rem) & 0x0f) == 0x0f) &&
- (ati_remote_tbl[i].data2 == d2))
+ if (ati_remote_tbl[i].data1 == d1 &&
+ ati_remote_tbl[i].data2 == d2)
return i;
}
@@ -468,8 +460,10 @@ static void ati_remote_input_report(struct urb *urb)
struct ati_remote *ati_remote = urb->context;
unsigned char *data= ati_remote->inbuf;
struct input_dev *dev = ati_remote->idev;
- int index, acc;
+ int index = -1;
+ int acc;
int remote_num;
+ unsigned char scancode[2];
/* Deal with strange looking inputs */
if ( (urb->actual_length != 4) || (data[0] != 0x14) ||
@@ -481,26 +475,41 @@ static void ati_remote_input_report(struct urb *urb)
/* Mask unwanted remote channels. */
/* note: remote_num is 0-based, channel 1 on remote == 0 here */
remote_num = (data[3] >> 4) & 0x0f;
- if (channel_mask & (1 << (remote_num + 1))) {
+ if (channel_mask & (1 << (remote_num + 1))) {
dbginfo(&ati_remote->interface->dev,
"Masked input from channel 0x%02x: data %02x,%02x, mask= 0x%02lx\n",
remote_num, data[1], data[2], channel_mask);
return;
}
- /* Look up event code index in translation table */
- index = ati_remote_event_lookup(remote_num, data[1], data[2]);
- if (index < 0) {
- dev_warn(&ati_remote->interface->dev,
- "Unknown input from channel 0x%02x: data %02x,%02x\n",
- remote_num, data[1], data[2]);
- return;
- }
- dbginfo(&ati_remote->interface->dev,
- "channel 0x%02x; data %02x,%02x; index %d; keycode %d\n",
- remote_num, data[1], data[2], index, ati_remote_tbl[index].code);
+ scancode[0] = (((data[1] - ((remote_num + 1) << 4)) & 0xf0) | (data[1] & 0x0f));
- if (ati_remote_tbl[index].kind == KIND_LITERAL) {
+ /*
+ * Some devices (e.g. SnapStream Firefly) use 8080 as toggle code,
+ * so we have to clear them. The first bit is a bit tricky as the
+ * "non-toggled" state depends on remote_num, so we xor it with the
+ * second bit which is only used for toggle.
+ */
+ scancode[0] ^= (data[2] & 0x80);
+
+ scancode[1] = data[2] & ~0x80;
+
+ /* Look up event code index in mouse translation table. */
+ index = ati_remote_event_lookup(remote_num, scancode[0], scancode[1]);
+
+ if (index >= 0) {
+ dbginfo(&ati_remote->interface->dev,
+ "channel 0x%02x; mouse data %02x,%02x; index %d; keycode %d\n",
+ remote_num, data[1], data[2], index, ati_remote_tbl[index].code);
+ if (!dev)
+ return; /* no mouse device */
+ } else
+ dbginfo(&ati_remote->interface->dev,
+ "channel 0x%02x; key data %02x,%02x, scancode %02x,%02x\n",
+ remote_num, data[1], data[2], scancode[0], scancode[1]);
+
+
+ if (index >= 0 && ati_remote_tbl[index].kind == KIND_LITERAL) {
input_event(dev, ati_remote_tbl[index].type,
ati_remote_tbl[index].code,
ati_remote_tbl[index].value);
@@ -510,7 +519,7 @@ static void ati_remote_input_report(struct urb *urb)
return;
}
- if (ati_remote_tbl[index].kind == KIND_FILTERED) {
+ if (index < 0 || ati_remote_tbl[index].kind == KIND_FILTERED) {
unsigned long now = jiffies;
/* Filter duplicate events which happen "too close" together. */
@@ -538,6 +547,20 @@ static void ati_remote_input_report(struct urb *urb)
msecs_to_jiffies(repeat_delay))))
return;
+ if (index < 0) {
+ /* Not a mouse event, hand it to rc-core. */
+ u32 rc_code = (scancode[0] << 8) | scancode[1];
+
+ /*
+ * We don't use the rc-core repeat handling yet as
+ * it would cause ghost repeats which would be a
+ * regression for this driver.
+ */
+ rc_keydown_notimeout(ati_remote->rdev, rc_code,
+ data[2]);
+ rc_keyup(ati_remote->rdev);
+ return;
+ }
input_event(dev, ati_remote_tbl[index].type,
ati_remote_tbl[index].code, 1);
@@ -630,7 +653,7 @@ static int ati_remote_alloc_buffers(struct usb_device *udev,
return -1;
ati_remote->outbuf = usb_alloc_coherent(udev, DATA_BUFSIZE, GFP_ATOMIC,
- &ati_remote->outbuf_dma);
+ &ati_remote->outbuf_dma);
if (!ati_remote->outbuf)
return -1;
@@ -675,14 +698,33 @@ static void ati_remote_input_init(struct ati_remote *ati_remote)
input_set_drvdata(idev, ati_remote);
- idev->open = ati_remote_open;
- idev->close = ati_remote_close;
+ idev->open = ati_remote_input_open;
+ idev->close = ati_remote_input_close;
- idev->name = ati_remote->name;
- idev->phys = ati_remote->phys;
+ idev->name = ati_remote->mouse_name;
+ idev->phys = ati_remote->mouse_phys;
usb_to_input_id(ati_remote->udev, &idev->id);
- idev->dev.parent = &ati_remote->udev->dev;
+ idev->dev.parent = &ati_remote->interface->dev;
+}
+
+static void ati_remote_rc_init(struct ati_remote *ati_remote)
+{
+ struct rc_dev *rdev = ati_remote->rdev;
+
+ rdev->priv = ati_remote;
+ rdev->driver_type = RC_DRIVER_SCANCODE;
+ rdev->allowed_protos = RC_TYPE_OTHER;
+ rdev->driver_name = "ati_remote";
+
+ rdev->open = ati_remote_rc_open;
+ rdev->close = ati_remote_rc_close;
+
+ rdev->input_name = ati_remote->rc_name;
+ rdev->input_phys = ati_remote->rc_phys;
+
+ usb_to_input_id(ati_remote->udev, &rdev->input_id);
+ rdev->dev.parent = &ati_remote->interface->dev;
}
static int ati_remote_initialize(struct ati_remote *ati_remote)
@@ -735,6 +777,7 @@ static int ati_remote_probe(struct usb_interface *interface, const struct usb_de
struct usb_endpoint_descriptor *endpoint_in, *endpoint_out;
struct ati_remote *ati_remote;
struct input_dev *input_dev;
+ struct rc_dev *rc_dev;
int err = -ENOMEM;
if (iface_host->desc.bNumEndpoints != 2) {
@@ -755,8 +798,8 @@ static int ati_remote_probe(struct usb_interface *interface, const struct usb_de
}
ati_remote = kzalloc(sizeof (struct ati_remote), GFP_KERNEL);
- input_dev = input_allocate_device();
- if (!ati_remote || !input_dev)
+ rc_dev = rc_allocate_device();
+ if (!ati_remote || !rc_dev)
goto fail1;
/* Allocate URB buffers, URBs */
@@ -766,44 +809,78 @@ static int ati_remote_probe(struct usb_interface *interface, const struct usb_de
ati_remote->endpoint_in = endpoint_in;
ati_remote->endpoint_out = endpoint_out;
ati_remote->udev = udev;
- ati_remote->idev = input_dev;
+ ati_remote->rdev = rc_dev;
ati_remote->interface = interface;
- usb_make_path(udev, ati_remote->phys, sizeof(ati_remote->phys));
- strlcat(ati_remote->phys, "/input0", sizeof(ati_remote->phys));
+ usb_make_path(udev, ati_remote->rc_phys, sizeof(ati_remote->rc_phys));
+ strlcpy(ati_remote->mouse_phys, ati_remote->rc_phys,
+ sizeof(ati_remote->mouse_phys));
+
+ strlcat(ati_remote->rc_phys, "/input0", sizeof(ati_remote->rc_phys));
+ strlcat(ati_remote->mouse_phys, "/input1", sizeof(ati_remote->mouse_phys));
if (udev->manufacturer)
- strlcpy(ati_remote->name, udev->manufacturer, sizeof(ati_remote->name));
+ strlcpy(ati_remote->rc_name, udev->manufacturer,
+ sizeof(ati_remote->rc_name));
if (udev->product)
- snprintf(ati_remote->name, sizeof(ati_remote->name),
- "%s %s", ati_remote->name, udev->product);
+ snprintf(ati_remote->rc_name, sizeof(ati_remote->rc_name),
+ "%s %s", ati_remote->rc_name, udev->product);
- if (!strlen(ati_remote->name))
- snprintf(ati_remote->name, sizeof(ati_remote->name),
+ if (!strlen(ati_remote->rc_name))
+ snprintf(ati_remote->rc_name, sizeof(ati_remote->rc_name),
DRIVER_DESC "(%04x,%04x)",
le16_to_cpu(ati_remote->udev->descriptor.idVendor),
le16_to_cpu(ati_remote->udev->descriptor.idProduct));
- ati_remote_input_init(ati_remote);
+ snprintf(ati_remote->mouse_name, sizeof(ati_remote->mouse_name),
+ "%s mouse", ati_remote->rc_name);
+
+ if (id->driver_info)
+ rc_dev->map_name = (const char *)id->driver_info;
+ else
+ rc_dev->map_name = RC_MAP_ATI_X10;
+
+ ati_remote_rc_init(ati_remote);
+ mutex_init(&ati_remote->open_mutex);
/* Device Hardware Initialization - fills in ati_remote->idev from udev. */
err = ati_remote_initialize(ati_remote);
if (err)
goto fail3;
- /* Set up and register input device */
- err = input_register_device(ati_remote->idev);
+ /* Set up and register rc device */
+ err = rc_register_device(ati_remote->rdev);
if (err)
goto fail3;
+ /* use our delay for rc_dev */
+ ati_remote->rdev->input_dev->rep[REP_DELAY] = repeat_delay;
+
+ /* Set up and register mouse input device */
+ if (mouse) {
+ input_dev = input_allocate_device();
+ if (!input_dev)
+ goto fail4;
+
+ ati_remote->idev = input_dev;
+ ati_remote_input_init(ati_remote);
+ err = input_register_device(input_dev);
+
+ if (err)
+ goto fail5;
+ }
+
usb_set_intfdata(interface, ati_remote);
return 0;
+ fail5: input_free_device(input_dev);
+ fail4: rc_unregister_device(rc_dev);
+ rc_dev = NULL;
fail3: usb_kill_urb(ati_remote->irq_urb);
usb_kill_urb(ati_remote->out_urb);
fail2: ati_remote_free_buffers(ati_remote);
- fail1: input_free_device(input_dev);
+ fail1: rc_free_device(rc_dev);
kfree(ati_remote);
return err;
}
@@ -824,7 +901,9 @@ static void ati_remote_disconnect(struct usb_interface *interface)
usb_kill_urb(ati_remote->irq_urb);
usb_kill_urb(ati_remote->out_urb);
- input_unregister_device(ati_remote->idev);
+ if (ati_remote->idev)
+ input_unregister_device(ati_remote->idev);
+ rc_unregister_device(ati_remote->rdev);
ati_remote_free_buffers(ati_remote);
kfree(ati_remote);
}
diff --git a/drivers/media/rc/ene_ir.c b/drivers/media/rc/ene_ir.c
index 2b9c2569d74..cf10ecf5ace 100644
--- a/drivers/media/rc/ene_ir.c
+++ b/drivers/media/rc/ene_ir.c
@@ -30,6 +30,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pnp.h>
@@ -118,31 +120,31 @@ static int ene_hw_detect(struct ene_device *dev)
dev->pll_freq == ENE_DEFAULT_PLL_FREQ ? 2 : 4;
if (hw_revision == 0xFF) {
- ene_warn("device seems to be disabled");
- ene_warn("send a mail to lirc-list@lists.sourceforge.net");
- ene_warn("please attach output of acpidump and dmidecode");
+ pr_warn("device seems to be disabled\n");
+ pr_warn("send a mail to lirc-list@lists.sourceforge.net\n");
+ pr_warn("please attach output of acpidump and dmidecode\n");
return -ENODEV;
}
- ene_notice("chip is 0x%02x%02x - kbver = 0x%02x, rev = 0x%02x",
- chip_major, chip_minor, old_ver, hw_revision);
+ pr_notice("chip is 0x%02x%02x - kbver = 0x%02x, rev = 0x%02x\n",
+ chip_major, chip_minor, old_ver, hw_revision);
- ene_notice("PLL freq = %d", dev->pll_freq);
+ pr_notice("PLL freq = %d\n", dev->pll_freq);
if (chip_major == 0x33) {
- ene_warn("chips 0x33xx aren't supported");
+ pr_warn("chips 0x33xx aren't supported\n");
return -ENODEV;
}
if (chip_major == 0x39 && chip_minor == 0x26 && hw_revision == 0xC0) {
dev->hw_revision = ENE_HW_C;
- ene_notice("KB3926C detected");
+ pr_notice("KB3926C detected\n");
} else if (old_ver == 0x24 && hw_revision == 0xC0) {
dev->hw_revision = ENE_HW_B;
- ene_notice("KB3926B detected");
+ pr_notice("KB3926B detected\n");
} else {
dev->hw_revision = ENE_HW_D;
- ene_notice("KB3926D or higher detected");
+ pr_notice("KB3926D or higher detected\n");
}
/* detect features hardware supports */
@@ -152,7 +154,7 @@ static int ene_hw_detect(struct ene_device *dev)
fw_reg1 = ene_read_reg(dev, ENE_FW1);
fw_reg2 = ene_read_reg(dev, ENE_FW2);
- ene_notice("Firmware regs: %02x %02x", fw_reg1, fw_reg2);
+ pr_notice("Firmware regs: %02x %02x\n", fw_reg1, fw_reg2);
dev->hw_use_gpio_0a = !!(fw_reg2 & ENE_FW2_GP0A);
dev->hw_learning_and_tx_capable = !!(fw_reg2 & ENE_FW2_LEARNING);
@@ -161,30 +163,29 @@ static int ene_hw_detect(struct ene_device *dev)
if (dev->hw_learning_and_tx_capable)
dev->hw_fan_input = !!(fw_reg2 & ENE_FW2_FAN_INPUT);
- ene_notice("Hardware features:");
+ pr_notice("Hardware features:\n");
if (dev->hw_learning_and_tx_capable) {
- ene_notice("* Supports transmitting & learning mode");
- ene_notice(" This feature is rare and therefore,");
- ene_notice(" you are welcome to test it,");
- ene_notice(" and/or contact the author via:");
- ene_notice(" lirc-list@lists.sourceforge.net");
- ene_notice(" or maximlevitsky@gmail.com");
+ pr_notice("* Supports transmitting & learning mode\n");
+ pr_notice(" This feature is rare and therefore,\n");
+ pr_notice(" you are welcome to test it,\n");
+ pr_notice(" and/or contact the author via:\n");
+ pr_notice(" lirc-list@lists.sourceforge.net\n");
+ pr_notice(" or maximlevitsky@gmail.com\n");
- ene_notice("* Uses GPIO %s for IR raw input",
- dev->hw_use_gpio_0a ? "40" : "0A");
+ pr_notice("* Uses GPIO %s for IR raw input\n",
+ dev->hw_use_gpio_0a ? "40" : "0A");
if (dev->hw_fan_input)
- ene_notice("* Uses unused fan feedback input as source"
- " of demodulated IR data");
+ pr_notice("* Uses unused fan feedback input as source of demodulated IR data\n");
}
if (!dev->hw_fan_input)
- ene_notice("* Uses GPIO %s for IR demodulated input",
- dev->hw_use_gpio_0a ? "0A" : "40");
+ pr_notice("* Uses GPIO %s for IR demodulated input\n",
+ dev->hw_use_gpio_0a ? "0A" : "40");
if (dev->hw_extra_buffer)
- ene_notice("* Uses new style input buffer");
+ pr_notice("* Uses new style input buffer\n");
return 0;
}
@@ -215,13 +216,13 @@ static void ene_rx_setup_hw_buffer(struct ene_device *dev)
dev->buffer_len = dev->extra_buf1_len + dev->extra_buf2_len + 8;
- ene_notice("Hardware uses 2 extended buffers:");
- ene_notice(" 0x%04x - len : %d", dev->extra_buf1_address,
- dev->extra_buf1_len);
- ene_notice(" 0x%04x - len : %d", dev->extra_buf2_address,
- dev->extra_buf2_len);
+ pr_notice("Hardware uses 2 extended buffers:\n");
+ pr_notice(" 0x%04x - len : %d\n",
+ dev->extra_buf1_address, dev->extra_buf1_len);
+ pr_notice(" 0x%04x - len : %d\n",
+ dev->extra_buf2_address, dev->extra_buf2_len);
- ene_notice("Total buffer len = %d", dev->buffer_len);
+ pr_notice("Total buffer len = %d\n", dev->buffer_len);
if (dev->buffer_len > 64 || dev->buffer_len < 16)
goto error;
@@ -240,7 +241,7 @@ static void ene_rx_setup_hw_buffer(struct ene_device *dev)
ene_set_reg_mask(dev, ENE_FW1, ENE_FW1_EXTRA_BUF_HND);
return;
error:
- ene_warn("Error validating extra buffers, device probably won't work");
+ pr_warn("Error validating extra buffers, device probably won't work\n");
dev->hw_extra_buffer = false;
ene_clear_reg_mask(dev, ENE_FW1, ENE_FW1_EXTRA_BUF_HND);
}
@@ -588,7 +589,7 @@ static void ene_tx_enable(struct ene_device *dev)
dbg("TX: Transmitter #2 is connected");
if (!(fwreg2 & (ENE_FW2_EMMITER1_CONN | ENE_FW2_EMMITER2_CONN)))
- ene_warn("TX: transmitter cable isn't connected!");
+ pr_warn("TX: transmitter cable isn't connected!\n");
/* disable receive on revc */
if (dev->hw_revision == ENE_HW_C)
@@ -615,7 +616,7 @@ static void ene_tx_sample(struct ene_device *dev)
bool pulse = dev->tx_sample_pulse;
if (!dev->tx_buffer) {
- ene_warn("TX: BUG: attempt to transmit NULL buffer");
+ pr_warn("TX: BUG: attempt to transmit NULL buffer\n");
return;
}
@@ -1049,7 +1050,7 @@ static int ene_probe(struct pnp_dev *pnp_dev, const struct pnp_device_id *id)
dev->hw_learning_and_tx_capable = true;
setup_timer(&dev->tx_sim_timer, ene_tx_irqsim,
(long unsigned int)dev);
- ene_warn("Simulation of TX activated");
+ pr_warn("Simulation of TX activated\n");
}
if (!dev->hw_learning_and_tx_capable)
@@ -1089,7 +1090,7 @@ static int ene_probe(struct pnp_dev *pnp_dev, const struct pnp_device_id *id)
if (error < 0)
goto error;
- ene_notice("driver has been successfully loaded");
+ pr_notice("driver has been successfully loaded\n");
return 0;
error:
if (dev && dev->irq >= 0)
diff --git a/drivers/media/rc/ene_ir.h b/drivers/media/rc/ene_ir.h
index 017c209cdf8..fd108d90f75 100644
--- a/drivers/media/rc/ene_ir.h
+++ b/drivers/media/rc/ene_ir.h
@@ -182,20 +182,11 @@
#define ENE_HW_C 2 /* 3926C */
#define ENE_HW_D 3 /* 3926D or later */
-#define ene_printk(level, text, ...) \
- printk(level ENE_DRIVER_NAME ": " text "\n", ## __VA_ARGS__)
-
-#define ene_notice(text, ...) ene_printk(KERN_NOTICE, text, ## __VA_ARGS__)
-#define ene_warn(text, ...) ene_printk(KERN_WARNING, text, ## __VA_ARGS__)
-
-
-#define __dbg(level, format, ...) \
- do { \
- if (debug >= level) \
- printk(KERN_DEBUG ENE_DRIVER_NAME \
- ": " format "\n", ## __VA_ARGS__); \
- } while (0)
-
+#define __dbg(level, format, ...) \
+do { \
+ if (debug >= level) \
+ pr_debug(format "\n", ## __VA_ARGS__); \
+} while (0)
#define dbg(format, ...) __dbg(1, format, ## __VA_ARGS__)
#define dbg_verbose(format, ...) __dbg(2, format, ## __VA_ARGS__)
diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
index 6bc35eeb653..6ed96465137 100644
--- a/drivers/media/rc/imon.c
+++ b/drivers/media/rc/imon.c
@@ -34,6 +34,7 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
+#include <linux/ratelimit.h>
#include <linux/input.h>
#include <linux/usb.h>
@@ -516,19 +517,19 @@ static int send_packet(struct imon_context *ictx)
if (retval) {
ictx->tx.busy = false;
smp_rmb(); /* ensure later readers know we're not busy */
- pr_err("error submitting urb(%d)\n", retval);
+ pr_err_ratelimited("error submitting urb(%d)\n", retval);
} else {
/* Wait for transmission to complete (or abort) */
mutex_unlock(&ictx->lock);
retval = wait_for_completion_interruptible(
&ictx->tx.finished);
if (retval)
- pr_err("task interrupted\n");
+ pr_err_ratelimited("task interrupted\n");
mutex_lock(&ictx->lock);
retval = ictx->tx.status;
if (retval)
- pr_err("packet tx failed (%d)\n", retval);
+ pr_err_ratelimited("packet tx failed (%d)\n", retval);
}
kfree(control_req);
@@ -830,20 +831,20 @@ static ssize_t vfd_write(struct file *file, const char *buf,
ictx = file->private_data;
if (!ictx) {
- pr_err("no context for device\n");
+ pr_err_ratelimited("no context for device\n");
return -ENODEV;
}
mutex_lock(&ictx->lock);
if (!ictx->dev_present_intf0) {
- pr_err("no iMON device present\n");
+ pr_err_ratelimited("no iMON device present\n");
retval = -ENODEV;
goto exit;
}
if (n_bytes <= 0 || n_bytes > 32) {
- pr_err("invalid payload size\n");
+ pr_err_ratelimited("invalid payload size\n");
retval = -EINVAL;
goto exit;
}
@@ -869,7 +870,7 @@ static ssize_t vfd_write(struct file *file, const char *buf,
retval = send_packet(ictx);
if (retval) {
- pr_err("send packet failed for packet #%d\n", seq / 2);
+ pr_err_ratelimited("send packet #%d failed\n", seq / 2);
goto exit;
} else {
seq += 2;
@@ -883,7 +884,7 @@ static ssize_t vfd_write(struct file *file, const char *buf,
ictx->usb_tx_buf[7] = (unsigned char) seq;
retval = send_packet(ictx);
if (retval)
- pr_err("send packet failed for packet #%d\n", seq / 2);
+ pr_err_ratelimited("send packet #%d failed\n", seq / 2);
exit:
mutex_unlock(&ictx->lock);
@@ -912,20 +913,21 @@ static ssize_t lcd_write(struct file *file, const char *buf,
ictx = file->private_data;
if (!ictx) {
- pr_err("no context for device\n");
+ pr_err_ratelimited("no context for device\n");
return -ENODEV;
}
mutex_lock(&ictx->lock);
if (!ictx->display_supported) {
- pr_err("no iMON display present\n");
+ pr_err_ratelimited("no iMON display present\n");
retval = -ENODEV;
goto exit;
}
if (n_bytes != 8) {
- pr_err("invalid payload size: %d (expected 8)\n", (int)n_bytes);
+ pr_err_ratelimited("invalid payload size: %d (expected 8)\n",
+ (int)n_bytes);
retval = -EINVAL;
goto exit;
}
@@ -937,7 +939,7 @@ static ssize_t lcd_write(struct file *file, const char *buf,
retval = send_packet(ictx);
if (retval) {
- pr_err("send packet failed!\n");
+ pr_err_ratelimited("send packet failed!\n");
goto exit;
} else {
dev_dbg(ictx->dev, "%s: write %d bytes to LCD\n",
@@ -1656,7 +1658,7 @@ static void usb_rx_callback_intf0(struct urb *urb)
return;
ictx = (struct imon_context *)urb->context;
- if (!ictx)
+ if (!ictx || !ictx->dev_present_intf0)
return;
switch (urb->status) {
@@ -1688,7 +1690,7 @@ static void usb_rx_callback_intf1(struct urb *urb)
return;
ictx = (struct imon_context *)urb->context;
- if (!ictx)
+ if (!ictx || !ictx->dev_present_intf1)
return;
switch (urb->status) {
@@ -2116,7 +2118,6 @@ static struct imon_context *imon_init_intf0(struct usb_interface *intf)
ictx->dev = dev;
ictx->usbdev_intf0 = usb_get_dev(interface_to_usbdev(intf));
- ictx->dev_present_intf0 = true;
ictx->rx_urb_intf0 = rx_urb;
ictx->tx_urb = tx_urb;
ictx->rf_device = false;
@@ -2155,6 +2156,8 @@ static struct imon_context *imon_init_intf0(struct usb_interface *intf)
goto rdev_setup_failed;
}
+ ictx->dev_present_intf0 = true;
+
mutex_unlock(&ictx->lock);
return ictx;
@@ -2198,7 +2201,6 @@ static struct imon_context *imon_init_intf1(struct usb_interface *intf,
}
ictx->usbdev_intf1 = usb_get_dev(interface_to_usbdev(intf));
- ictx->dev_present_intf1 = true;
ictx->rx_urb_intf1 = rx_urb;
ret = -ENODEV;
@@ -2227,6 +2229,8 @@ static struct imon_context *imon_init_intf1(struct usb_interface *intf,
goto urb_submit_failed;
}
+ ictx->dev_present_intf1 = true;
+
mutex_unlock(&ictx->lock);
return ictx;
diff --git a/drivers/media/rc/ir-jvc-decoder.c b/drivers/media/rc/ir-jvc-decoder.c
index 624449afaa6..035668e27f6 100644
--- a/drivers/media/rc/ir-jvc-decoder.c
+++ b/drivers/media/rc/ir-jvc-decoder.c
@@ -13,6 +13,7 @@
*/
#include <linux/bitrev.h>
+#include <linux/module.h>
#include "rc-core-priv.h"
#define JVC_NBITS 16 /* dev(8) + func(8) */
diff --git a/drivers/media/rc/ir-lirc-codec.c b/drivers/media/rc/ir-lirc-codec.c
index e5eeec4da76..5faba2a2fdd 100644
--- a/drivers/media/rc/ir-lirc-codec.c
+++ b/drivers/media/rc/ir-lirc-codec.c
@@ -14,6 +14,7 @@
#include <linux/sched.h>
#include <linux/wait.h>
+#include <linux/module.h>
#include <media/lirc.h>
#include <media/lirc_dev.h>
#include <media/rc-core.h>
@@ -98,7 +99,7 @@ static int ir_lirc_decode(struct rc_dev *dev, struct ir_raw_event ev)
return 0;
}
-static ssize_t ir_lirc_transmit_ir(struct file *file, const char *buf,
+static ssize_t ir_lirc_transmit_ir(struct file *file, const char __user *buf,
size_t n, loff_t *ppos)
{
struct lirc_codec *lirc;
@@ -140,10 +141,11 @@ out:
}
static long ir_lirc_ioctl(struct file *filep, unsigned int cmd,
- unsigned long __user arg)
+ unsigned long arg)
{
struct lirc_codec *lirc;
struct rc_dev *dev;
+ u32 __user *argp = (u32 __user *)(arg);
int ret = 0;
__u32 val = 0, tmp;
@@ -156,7 +158,7 @@ static long ir_lirc_ioctl(struct file *filep, unsigned int cmd,
return -EFAULT;
if (_IOC_DIR(cmd) & _IOC_WRITE) {
- ret = get_user(val, (__u32 *)arg);
+ ret = get_user(val, argp);
if (ret)
return ret;
}
@@ -265,7 +267,7 @@ static long ir_lirc_ioctl(struct file *filep, unsigned int cmd,
}
if (_IOC_DIR(cmd) & _IOC_READ)
- ret = put_user(val, (__u32 *)arg);
+ ret = put_user(val, argp);
return ret;
}
diff --git a/drivers/media/rc/ir-nec-decoder.c b/drivers/media/rc/ir-nec-decoder.c
index 63ee722dbd0..17f8db00435 100644
--- a/drivers/media/rc/ir-nec-decoder.c
+++ b/drivers/media/rc/ir-nec-decoder.c
@@ -13,6 +13,7 @@
*/
#include <linux/bitrev.h>
+#include <linux/module.h>
#include "rc-core-priv.h"
#define NEC_NBITS 32
diff --git a/drivers/media/rc/ir-raw.c b/drivers/media/rc/ir-raw.c
index 27808bb59eb..2e5cd3100b6 100644
--- a/drivers/media/rc/ir-raw.c
+++ b/drivers/media/rc/ir-raw.c
@@ -12,8 +12,10 @@
* GNU General Public License for more details.
*/
+#include <linux/export.h>
#include <linux/kthread.h>
#include <linux/mutex.h>
+#include <linux/kmod.h>
#include <linux/sched.h>
#include <linux/freezer.h>
#include "rc-core-priv.h"
diff --git a/drivers/media/rc/ir-rc5-decoder.c b/drivers/media/rc/ir-rc5-decoder.c
index ebdba553991..9ab663a507a 100644
--- a/drivers/media/rc/ir-rc5-decoder.c
+++ b/drivers/media/rc/ir-rc5-decoder.c
@@ -21,6 +21,7 @@
*/
#include "rc-core-priv.h"
+#include <linux/module.h>
#define RC5_NBITS 14
#define RC5X_NBITS 20
diff --git a/drivers/media/rc/ir-rc5-sz-decoder.c b/drivers/media/rc/ir-rc5-sz-decoder.c
index 90aa8868629..ec8d4a2e2c5 100644
--- a/drivers/media/rc/ir-rc5-sz-decoder.c
+++ b/drivers/media/rc/ir-rc5-sz-decoder.c
@@ -21,6 +21,7 @@
*/
#include "rc-core-priv.h"
+#include <linux/module.h>
#define RC5_SZ_NBITS 15
#define RC5_UNIT 888888 /* ns */
diff --git a/drivers/media/rc/ir-rc6-decoder.c b/drivers/media/rc/ir-rc6-decoder.c
index 755dafa3871..140fb67e2f8 100644
--- a/drivers/media/rc/ir-rc6-decoder.c
+++ b/drivers/media/rc/ir-rc6-decoder.c
@@ -13,6 +13,7 @@
*/
#include "rc-core-priv.h"
+#include <linux/module.h>
/*
* This decoder currently supports:
diff --git a/drivers/media/rc/ir-sony-decoder.c b/drivers/media/rc/ir-sony-decoder.c
index a92de80c48d..d5e2b50aff1 100644
--- a/drivers/media/rc/ir-sony-decoder.c
+++ b/drivers/media/rc/ir-sony-decoder.c
@@ -13,6 +13,7 @@
*/
#include <linux/bitrev.h>
+#include <linux/module.h>
#include "rc-core-priv.h"
#define SONY_UNIT 600000 /* ns */
diff --git a/drivers/media/rc/keymaps/Makefile b/drivers/media/rc/keymaps/Makefile
index b57fc83fb4d..36e4d5e8dd6 100644
--- a/drivers/media/rc/keymaps/Makefile
+++ b/drivers/media/rc/keymaps/Makefile
@@ -4,6 +4,7 @@ obj-$(CONFIG_RC_MAP) += rc-adstech-dvb-t-pci.o \
rc-apac-viewcomp.o \
rc-asus-pc39.o \
rc-ati-tv-wonder-hd-600.o \
+ rc-ati-x10.o \
rc-avermedia-a16d.o \
rc-avermedia.o \
rc-avermedia-cardbus.o \
@@ -47,6 +48,7 @@ obj-$(CONFIG_RC_MAP) += rc-adstech-dvb-t-pci.o \
rc-lirc.o \
rc-lme2510.o \
rc-manli.o \
+ rc-medion-x10.o \
rc-msi-digivox-ii.o \
rc-msi-digivox-iii.o \
rc-msi-tvanywhere.o \
@@ -70,6 +72,7 @@ obj-$(CONFIG_RC_MAP) += rc-adstech-dvb-t-pci.o \
rc-hauppauge.o \
rc-rc6-mce.o \
rc-real-audio-220-32-keys.o \
+ rc-snapstream-firefly.o \
rc-streamzap.o \
rc-tbs-nec.o \
rc-technisat-usb2.o \
diff --git a/drivers/media/rc/keymaps/rc-adstech-dvb-t-pci.c b/drivers/media/rc/keymaps/rc-adstech-dvb-t-pci.c
index 9a8752fdcca..b0e42df7ff8 100644
--- a/drivers/media/rc/keymaps/rc-adstech-dvb-t-pci.c
+++ b/drivers/media/rc/keymaps/rc-adstech-dvb-t-pci.c
@@ -11,6 +11,7 @@
*/
#include <media/rc-map.h>
+#include <linux/module.h>
/* ADS Tech Instant TV DVB-T PCI Remote */
diff --git a/drivers/media/rc/keymaps/rc-alink-dtu-m.c b/drivers/media/rc/keymaps/rc-alink-dtu-m.c
index fe652e928dc..4e6ade8e616 100644
--- a/drivers/media/rc/keymaps/rc-alink-dtu-m.c
+++ b/drivers/media/rc/keymaps/rc-alink-dtu-m.c
@@ -19,6 +19,7 @@
*/
#include <media/rc-map.h>
+#include <linux/module.h>
/* A-Link DTU(m) slim remote, 6 rows, 3 columns. */
static struct rc_map_table alink_dtu_m[] = {
diff --git a/drivers/media/rc/keymaps/rc-anysee.c b/drivers/media/rc/keymaps/rc-anysee.c
index 884f1b51a8e..c735fe10a39 100644
--- a/drivers/media/rc/keymaps/rc-anysee.c
+++ b/drivers/media/rc/keymaps/rc-anysee.c
@@ -19,6 +19,7 @@
*/
#include <media/rc-map.h>
+#include <linux/module.h>
static struct rc_map_table anysee[] = {
{ 0x0800, KEY_0 },
diff --git a/drivers/media/rc/keymaps/rc-apac-viewcomp.c b/drivers/media/rc/keymaps/rc-apac-viewcomp.c
index 7af188209ff..8c92ff95f94 100644
--- a/drivers/media/rc/keymaps/rc-apac-viewcomp.c
+++ b/drivers/media/rc/keymaps/rc-apac-viewcomp.c
@@ -11,6 +11,7 @@
*/
#include <media/rc-map.h>
+#include <linux/module.h>
/* Attila Kondoros <attila.kondoros@chello.hu> */
diff --git a/drivers/media/rc/keymaps/rc-asus-pc39.c b/drivers/media/rc/keymaps/rc-asus-pc39.c
index b2481154491..2caf2117759 100644
--- a/drivers/media/rc/keymaps/rc-asus-pc39.c
+++ b/drivers/media/rc/keymaps/rc-asus-pc39.c
@@ -11,6 +11,7 @@
*/
#include <media/rc-map.h>
+#include <linux/module.h>
/*
* Marc Fargas <telenieko@telenieko.com>
diff --git a/drivers/media/rc/keymaps/rc-ati-tv-wonder-hd-600.c b/drivers/media/rc/keymaps/rc-ati-tv-wonder-hd-600.c
index f766b24b015..2031224a202 100644
--- a/drivers/media/rc/keymaps/rc-ati-tv-wonder-hd-600.c
+++ b/drivers/media/rc/keymaps/rc-ati-tv-wonder-hd-600.c
@@ -11,6 +11,7 @@
*/
#include <media/rc-map.h>
+#include <linux/module.h>
/* ATI TV Wonder HD 600 USB
Devin Heitmueller <devin.heitmueller@gmail.com>
diff --git a/drivers/media/rc/keymaps/rc-ati-x10.c b/drivers/media/rc/keymaps/rc-ati-x10.c
new file mode 100644
index 00000000000..e1b8b2605c4
--- /dev/null
+++ b/drivers/media/rc/keymaps/rc-ati-x10.c
@@ -0,0 +1,104 @@
+/*
+ * ATI X10 RF remote keytable
+ *
+ * Copyright (C) 2011 Anssi Hannula <anssi.hannula@?ki.fi>
+ *
+ * This file is based on the static generic keytable previously found in
+ * ati_remote.c, which is
+ * Copyright (c) 2004 Torrey Hoffman <thoffman@arnor.net>
+ * Copyright (c) 2002 Vladimir Dergachev
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <linux/module.h>
+#include <media/rc-map.h>
+
+static struct rc_map_table ati_x10[] = {
+ { 0xd20d, KEY_1 },
+ { 0xd30e, KEY_2 },
+ { 0xd40f, KEY_3 },
+ { 0xd510, KEY_4 },
+ { 0xd611, KEY_5 },
+ { 0xd712, KEY_6 },
+ { 0xd813, KEY_7 },
+ { 0xd914, KEY_8 },
+ { 0xda15, KEY_9 },
+ { 0xdc17, KEY_0 },
+ { 0xc500, KEY_A },
+ { 0xc601, KEY_B },
+ { 0xde19, KEY_C },
+ { 0xe01b, KEY_D },
+ { 0xe621, KEY_E },
+ { 0xe823, KEY_F },
+
+ { 0xdd18, KEY_KPENTER }, /* "check" */
+ { 0xdb16, KEY_MENU }, /* "menu" */
+ { 0xc702, KEY_POWER }, /* Power */
+ { 0xc803, KEY_TV }, /* TV */
+ { 0xc904, KEY_DVD }, /* DVD */
+ { 0xca05, KEY_WWW }, /* WEB */
+ { 0xcb06, KEY_BOOKMARKS }, /* "book" */
+ { 0xcc07, KEY_EDIT }, /* "hand" */
+ { 0xe11c, KEY_COFFEE }, /* "timer" */
+ { 0xe520, KEY_FRONT }, /* "max" */
+ { 0xe21d, KEY_LEFT }, /* left */
+ { 0xe41f, KEY_RIGHT }, /* right */
+ { 0xe722, KEY_DOWN }, /* down */
+ { 0xdf1a, KEY_UP }, /* up */
+ { 0xe31e, KEY_OK }, /* "OK" */
+ { 0xce09, KEY_VOLUMEDOWN }, /* VOL + */
+ { 0xcd08, KEY_VOLUMEUP }, /* VOL - */
+ { 0xcf0a, KEY_MUTE }, /* MUTE */
+ { 0xd00b, KEY_CHANNELUP }, /* CH + */
+ { 0xd10c, KEY_CHANNELDOWN },/* CH - */
+ { 0xec27, KEY_RECORD }, /* ( o) red */
+ { 0xea25, KEY_PLAY }, /* ( >) */
+ { 0xe924, KEY_REWIND }, /* (<<) */
+ { 0xeb26, KEY_FORWARD }, /* (>>) */
+ { 0xed28, KEY_STOP }, /* ([]) */
+ { 0xee29, KEY_PAUSE }, /* ('') */
+ { 0xf02b, KEY_PREVIOUS }, /* (<-) */
+ { 0xef2a, KEY_NEXT }, /* (>+) */
+ { 0xf22d, KEY_INFO }, /* PLAYING */
+ { 0xf32e, KEY_HOME }, /* TOP */
+ { 0xf42f, KEY_END }, /* END */
+ { 0xf530, KEY_SELECT }, /* SELECT */
+};
+
+static struct rc_map_list ati_x10_map = {
+ .map = {
+ .scan = ati_x10,
+ .size = ARRAY_SIZE(ati_x10),
+ .rc_type = RC_TYPE_OTHER,
+ .name = RC_MAP_ATI_X10,
+ }
+};
+
+static int __init init_rc_map_ati_x10(void)
+{
+ return rc_map_register(&ati_x10_map);
+}
+
+static void __exit exit_rc_map_ati_x10(void)
+{
+ rc_map_unregister(&ati_x10_map);
+}
+
+module_init(init_rc_map_ati_x10)
+module_exit(exit_rc_map_ati_x10)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Anssi Hannula <anssi.hannula@iki.fi>");
diff --git a/drivers/media/rc/keymaps/rc-avermedia-a16d.c b/drivers/media/rc/keymaps/rc-avermedia-a16d.c
index ec9beeebd41..894939ac17f 100644
--- a/drivers/media/rc/keymaps/rc-avermedia-a16d.c
+++ b/drivers/media/rc/keymaps/rc-avermedia-a16d.c
@@ -11,6 +11,7 @@
*/
#include <media/rc-map.h>
+#include <linux/module.h>
static struct rc_map_table avermedia_a16d[] = {
{ 0x20, KEY_LIST},
diff --git a/drivers/media/rc/keymaps/rc-avermedia-cardbus.c b/drivers/media/rc/keymaps/rc-avermedia-cardbus.c
index 22f54d413a3..d2aaf5b9e39 100644
--- a/drivers/media/rc/keymaps/rc-avermedia-cardbus.c
+++ b/drivers/media/rc/keymaps/rc-avermedia-cardbus.c
@@ -11,6 +11,7 @@
*/
#include <media/rc-map.h>
+#include <linux/module.h>
/* Oldrich Jedlicka <oldium.pro@seznam.cz> */
diff --git a/drivers/media/rc/keymaps/rc-avermedia-dvbt.c b/drivers/media/rc/keymaps/rc-avermedia-dvbt.c
index c25809d4c81..dc2baf06239 100644
--- a/drivers/media/rc/keymaps/rc-avermedia-dvbt.c
+++ b/drivers/media/rc/keymaps/rc-avermedia-dvbt.c
@@ -11,6 +11,7 @@
*/
#include <media/rc-map.h>
+#include <linux/module.h>
/* Matt Jesson <dvb@jesson.eclipse.co.uk */
diff --git a/drivers/media/rc/keymaps/rc-avermedia-m135a.c b/drivers/media/rc/keymaps/rc-avermedia-m135a.c
index 3d2cbe4e5e4..04269d31fa1 100644
--- a/drivers/media/rc/keymaps/rc-avermedia-m135a.c
+++ b/drivers/media/rc/keymaps/rc-avermedia-m135a.c
@@ -10,6 +10,7 @@
*/
#include <media/rc-map.h>
+#include <linux/module.h>
/*
* Avermedia M135A with RM-JX and RM-K6 remote controls
diff --git a/drivers/media/rc/keymaps/rc-avermedia-m733a-rm-k6.c b/drivers/media/rc/keymaps/rc-avermedia-m733a-rm-k6.c
index 8cd7f28808b..e83b1a1939b 100644
--- a/drivers/media/rc/keymaps/rc-avermedia-m733a-rm-k6.c
+++ b/drivers/media/rc/keymaps/rc-avermedia-m733a-rm-k6.c
@@ -9,6 +9,7 @@
*/
#include <media/rc-map.h>
+#include <linux/module.h>
/*
* Avermedia M733A with IR model RM-K6
diff --git a/drivers/media/rc/keymaps/rc-avermedia-rm-ks.c b/drivers/media/rc/keymaps/rc-avermedia-rm-ks.c
index 9d68af217d8..8344bcc595b 100644
--- a/drivers/media/rc/keymaps/rc-avermedia-rm-ks.c
+++ b/drivers/media/rc/keymaps/rc-avermedia-rm-ks.c
@@ -19,6 +19,7 @@
*/
#include <media/rc-map.h>
+#include <linux/module.h>
/* Initial keytable is from Jose Alberto Reguero <jareguero@telefonica.net>
and Felipe Morales Moreno <felipe.morales.moreno@gmail.com> */
diff --git a/drivers/media/rc/keymaps/rc-avermedia.c b/drivers/media/rc/keymaps/rc-avermedia.c
index edfa71506d3..c6063dfcd50 100644
--- a/drivers/media/rc/keymaps/rc-avermedia.c
+++ b/drivers/media/rc/keymaps/rc-avermedia.c
@@ -11,6 +11,7 @@
*/
#include <media/rc-map.h>
+#include <linux/module.h>
/* Alex Hermann <gaaf@gmx.net> */
diff --git a/drivers/media/rc/keymaps/rc-avertv-303.c b/drivers/media/rc/keymaps/rc-avertv-303.c
index 32e94988dc9..14f78451e64 100644
--- a/drivers/media/rc/keymaps/rc-avertv-303.c
+++ b/drivers/media/rc/keymaps/rc-avertv-303.c
@@ -11,6 +11,7 @@
*/
#include <media/rc-map.h>
+#include <linux/module.h>
/* AVERTV STUDIO 303 Remote */
diff --git a/drivers/media/rc/keymaps/rc-azurewave-ad-tu700.c b/drivers/media/rc/keymaps/rc-azurewave-ad-tu700.c
index c3f6d62ac89..ea7f2d0f31e 100644
--- a/drivers/media/rc/keymaps/rc-azurewave-ad-tu700.c
+++ b/drivers/media/rc/keymaps/rc-azurewave-ad-tu700.c
@@ -19,6 +19,7 @@
*/
#include <media/rc-map.h>
+#include <linux/module.h>
static struct rc_map_table azurewave_ad_tu700[] = {
{ 0x0000, KEY_TAB }, /* Tab */
diff --git a/drivers/media/rc/keymaps/rc-behold-columbus.c b/drivers/media/rc/keymaps/rc-behold-columbus.c
index 8bf058f67f0..086b4b1f19e 100644
--- a/drivers/media/rc/keymaps/rc-behold-columbus.c
+++ b/drivers/media/rc/keymaps/rc-behold-columbus.c
@@ -11,6 +11,7 @@
*/
#include <media/rc-map.h>
+#include <linux/module.h>
/* Beholder Intl. Ltd. 2008
* Dmitry Belimov d.belimov@google.com
diff --git a/drivers/media/rc/keymaps/rc-behold.c b/drivers/media/rc/keymaps/rc-behold.c
index c909a234c77..0877e348094 100644
--- a/drivers/media/rc/keymaps/rc-behold.c
+++ b/drivers/media/rc/keymaps/rc-behold.c
@@ -11,6 +11,7 @@
*/
#include <media/rc-map.h>
+#include <linux/module.h>
/*
* Igor Kuznetsov <igk72@ya.ru>
diff --git a/drivers/media/rc/keymaps/rc-budget-ci-old.c b/drivers/media/rc/keymaps/rc-budget-ci-old.c
index 2f66e4310d2..8311e092c09 100644
--- a/drivers/media/rc/keymaps/rc-budget-ci-old.c
+++ b/drivers/media/rc/keymaps/rc-budget-ci-old.c
@@ -11,6 +11,7 @@
*/
#include <media/rc-map.h>
+#include <linux/module.h>
/*
* From reading the following remotes:
diff --git a/drivers/media/rc/keymaps/rc-cinergy-1400.c b/drivers/media/rc/keymaps/rc-cinergy-1400.c
index 284534b67e7..0c87fbaf99a 100644
--- a/drivers/media/rc/keymaps/rc-cinergy-1400.c
+++ b/drivers/media/rc/keymaps/rc-cinergy-1400.c
@@ -11,6 +11,7 @@
*/
#include <media/rc-map.h>
+#include <linux/module.h>
/* Cinergy 1400 DVB-T */
diff --git a/drivers/media/rc/keymaps/rc-cinergy.c b/drivers/media/rc/keymaps/rc-cinergy.c
index cf3a6bfb190..309e9e3fb6f 100644
--- a/drivers/media/rc/keymaps/rc-cinergy.c
+++ b/drivers/media/rc/keymaps/rc-cinergy.c
@@ -11,6 +11,7 @@
*/
#include <media/rc-map.h>
+#include <linux/module.h>
static struct rc_map_table cinergy[] = {
{ 0x00, KEY_0 },
diff --git a/drivers/media/rc/keymaps/rc-dib0700-nec.c b/drivers/media/rc/keymaps/rc-dib0700-nec.c
index 7a5f5300caf..4d13a7f2e5c 100644
--- a/drivers/media/rc/keymaps/rc-dib0700-nec.c
+++ b/drivers/media/rc/keymaps/rc-dib0700-nec.c
@@ -16,6 +16,7 @@
*/
#include <media/rc-map.h>
+#include <linux/module.h>
static struct rc_map_table dib0700_nec_table[] = {
/* Key codes for the Pixelview SBTVD remote */
diff --git a/drivers/media/rc/keymaps/rc-dib0700-rc5.c b/drivers/media/rc/keymaps/rc-dib0700-rc5.c
index 4af12e45dfb..ba81d9697cf 100644
--- a/drivers/media/rc/keymaps/rc-dib0700-rc5.c
+++ b/drivers/media/rc/keymaps/rc-dib0700-rc5.c
@@ -16,6 +16,7 @@
*/
#include <media/rc-map.h>
+#include <linux/module.h>
static struct rc_map_table dib0700_rc5_table[] = {
/* Key codes for the tiny Pinnacle remote*/
diff --git a/drivers/media/rc/keymaps/rc-digitalnow-tinytwin.c b/drivers/media/rc/keymaps/rc-digitalnow-tinytwin.c
index f68b450f559..bed78acb919 100644
--- a/drivers/media/rc/keymaps/rc-digitalnow-tinytwin.c
+++ b/drivers/media/rc/keymaps/rc-digitalnow-tinytwin.c
@@ -19,6 +19,7 @@
*/
#include <media/rc-map.h>
+#include <linux/module.h>
static struct rc_map_table digitalnow_tinytwin[] = {
{ 0x0000, KEY_MUTE }, /* [symbol speaker] */
diff --git a/drivers/media/rc/keymaps/rc-digittrade.c b/drivers/media/rc/keymaps/rc-digittrade.c
index 21d49871f2a..a3b97a1fe22 100644
--- a/drivers/media/rc/keymaps/rc-digittrade.c
+++ b/drivers/media/rc/keymaps/rc-digittrade.c
@@ -19,6 +19,7 @@
*/
#include <media/rc-map.h>
+#include <linux/module.h>
/* Digittrade DVB-T USB Stick remote controller. */
/* Imported from af9015.h.
diff --git a/drivers/media/rc/keymaps/rc-dm1105-nec.c b/drivers/media/rc/keymaps/rc-dm1105-nec.c
index d024fbf88bc..67fc9fb0c00 100644
--- a/drivers/media/rc/keymaps/rc-dm1105-nec.c
+++ b/drivers/media/rc/keymaps/rc-dm1105-nec.c
@@ -11,6 +11,7 @@
*/
#include <media/rc-map.h>
+#include <linux/module.h>
/* DVBWorld remotes
Igor M. Liplianin <liplianin@me.by>
diff --git a/drivers/media/rc/keymaps/rc-dntv-live-dvb-t.c b/drivers/media/rc/keymaps/rc-dntv-live-dvb-t.c
index 82c0200029a..91ea91de917 100644
--- a/drivers/media/rc/keymaps/rc-dntv-live-dvb-t.c
+++ b/drivers/media/rc/keymaps/rc-dntv-live-dvb-t.c
@@ -11,6 +11,7 @@
*/
#include <media/rc-map.h>
+#include <linux/module.h>
/* DigitalNow DNTV Live DVB-T Remote */
diff --git a/drivers/media/rc/keymaps/rc-dntv-live-dvbt-pro.c b/drivers/media/rc/keymaps/rc-dntv-live-dvbt-pro.c
index 015e99de06d..fd680d4d3eb 100644
--- a/drivers/media/rc/keymaps/rc-dntv-live-dvbt-pro.c
+++ b/drivers/media/rc/keymaps/rc-dntv-live-dvbt-pro.c
@@ -11,6 +11,7 @@
*/
#include <media/rc-map.h>
+#include <linux/module.h>
/* DigitalNow DNTV Live! DVB-T Pro Remote */
diff --git a/drivers/media/rc/keymaps/rc-em-terratec.c b/drivers/media/rc/keymaps/rc-em-terratec.c
index 269d4299da3..d1fcd64c0f9 100644
--- a/drivers/media/rc/keymaps/rc-em-terratec.c
+++ b/drivers/media/rc/keymaps/rc-em-terratec.c
@@ -11,6 +11,7 @@
*/
#include <media/rc-map.h>
+#include <linux/module.h>
static struct rc_map_table em_terratec[] = {
{ 0x01, KEY_CHANNEL },
diff --git a/drivers/media/rc/keymaps/rc-encore-enltv-fm53.c b/drivers/media/rc/keymaps/rc-encore-enltv-fm53.c
index e388698a069..2fe45e41fe4 100644
--- a/drivers/media/rc/keymaps/rc-encore-enltv-fm53.c
+++ b/drivers/media/rc/keymaps/rc-encore-enltv-fm53.c
@@ -11,6 +11,7 @@
*/
#include <media/rc-map.h>
+#include <linux/module.h>
/* Encore ENLTV-FM v5.3
Mauro Carvalho Chehab <mchehab@infradead.org>
diff --git a/drivers/media/rc/keymaps/rc-encore-enltv.c b/drivers/media/rc/keymaps/rc-encore-enltv.c
index e56ac6e9670..223de75a6d1 100644
--- a/drivers/media/rc/keymaps/rc-encore-enltv.c
+++ b/drivers/media/rc/keymaps/rc-encore-enltv.c
@@ -11,6 +11,7 @@
*/
#include <media/rc-map.h>
+#include <linux/module.h>
/* Encore ENLTV-FM - black plastic, white front cover with white glowing buttons
Juan Pablo Sormani <sorman@gmail.com> */
diff --git a/drivers/media/rc/keymaps/rc-encore-enltv2.c b/drivers/media/rc/keymaps/rc-encore-enltv2.c
index b6264f1bc4c..669cbff22b7 100644
--- a/drivers/media/rc/keymaps/rc-encore-enltv2.c
+++ b/drivers/media/rc/keymaps/rc-encore-enltv2.c
@@ -11,6 +11,7 @@
*/
#include <media/rc-map.h>
+#include <linux/module.h>
/* Encore ENLTV2-FM - silver plastic - "Wand Media" written at the botton
Mauro Carvalho Chehab <mchehab@infradead.org> */
diff --git a/drivers/media/rc/keymaps/rc-evga-indtube.c b/drivers/media/rc/keymaps/rc-evga-indtube.c
index a2bf24f6dfb..2c647fc2591 100644
--- a/drivers/media/rc/keymaps/rc-evga-indtube.c
+++ b/drivers/media/rc/keymaps/rc-evga-indtube.c
@@ -11,6 +11,7 @@
*/
#include <media/rc-map.h>
+#include <linux/module.h>
/* EVGA inDtube
Devin Heitmueller <devin.heitmueller@gmail.com>
diff --git a/drivers/media/rc/keymaps/rc-eztv.c b/drivers/media/rc/keymaps/rc-eztv.c
index 1e8e5b2d6e3..76921445c1d 100644
--- a/drivers/media/rc/keymaps/rc-eztv.c
+++ b/drivers/media/rc/keymaps/rc-eztv.c
@@ -11,6 +11,7 @@
*/
#include <media/rc-map.h>
+#include <linux/module.h>
/* Alfons Geser <a.geser@cox.net>
* updates from Job D. R. Borges <jobdrb@ig.com.br> */
diff --git a/drivers/media/rc/keymaps/rc-flydvb.c b/drivers/media/rc/keymaps/rc-flydvb.c
index a8b0f66edaa..3a6bba311b0 100644
--- a/drivers/media/rc/keymaps/rc-flydvb.c
+++ b/drivers/media/rc/keymaps/rc-flydvb.c
@@ -11,6 +11,7 @@
*/
#include <media/rc-map.h>
+#include <linux/module.h>
static struct rc_map_table flydvb[] = {
{ 0x01, KEY_ZOOM }, /* Full Screen */
diff --git a/drivers/media/rc/keymaps/rc-flyvideo.c b/drivers/media/rc/keymaps/rc-flyvideo.c
index 5bbe6837175..bf9da584643 100644
--- a/drivers/media/rc/keymaps/rc-flyvideo.c
+++ b/drivers/media/rc/keymaps/rc-flyvideo.c
@@ -11,6 +11,7 @@
*/
#include <media/rc-map.h>
+#include <linux/module.h>
static struct rc_map_table flyvideo[] = {
{ 0x0f, KEY_0 },
diff --git a/drivers/media/rc/keymaps/rc-fusionhdtv-mce.c b/drivers/media/rc/keymaps/rc-fusionhdtv-mce.c
index c80b25c65b5..2f0970fe783 100644
--- a/drivers/media/rc/keymaps/rc-fusionhdtv-mce.c
+++ b/drivers/media/rc/keymaps/rc-fusionhdtv-mce.c
@@ -11,6 +11,7 @@
*/
#include <media/rc-map.h>
+#include <linux/module.h>
/* DViCO FUSION HDTV MCE remote */
diff --git a/drivers/media/rc/keymaps/rc-gadmei-rm008z.c b/drivers/media/rc/keymaps/rc-gadmei-rm008z.c
index 068c9ead98d..0e98ec467c3 100644
--- a/drivers/media/rc/keymaps/rc-gadmei-rm008z.c
+++ b/drivers/media/rc/keymaps/rc-gadmei-rm008z.c
@@ -11,6 +11,7 @@
*/
#include <media/rc-map.h>
+#include <linux/module.h>
/* GADMEI UTV330+ RM008Z remote
Shine Liu <shinel@foxmail.com>
diff --git a/drivers/media/rc/keymaps/rc-genius-tvgo-a11mce.c b/drivers/media/rc/keymaps/rc-genius-tvgo-a11mce.c
index cdbbed46792..a2e2faa1d1b 100644
--- a/drivers/media/rc/keymaps/rc-genius-tvgo-a11mce.c
+++ b/drivers/media/rc/keymaps/rc-genius-tvgo-a11mce.c
@@ -11,6 +11,7 @@
*/
#include <media/rc-map.h>
+#include <linux/module.h>
/*
* Remote control for the Genius TVGO A11MCE
diff --git a/drivers/media/rc/keymaps/rc-gotview7135.c b/drivers/media/rc/keymaps/rc-gotview7135.c
index a38bdde8c14..864614e1931 100644
--- a/drivers/media/rc/keymaps/rc-gotview7135.c
+++ b/drivers/media/rc/keymaps/rc-gotview7135.c
@@ -11,6 +11,7 @@
*/
#include <media/rc-map.h>
+#include <linux/module.h>
/* Mike Baikov <mike@baikov.com> */
diff --git a/drivers/media/rc/keymaps/rc-hauppauge.c b/drivers/media/rc/keymaps/rc-hauppauge.c
index cd3db777977..e51c6163378 100644
--- a/drivers/media/rc/keymaps/rc-hauppauge.c
+++ b/drivers/media/rc/keymaps/rc-hauppauge.c
@@ -17,6 +17,7 @@
*/
#include <media/rc-map.h>
+#include <linux/module.h>
/*
* Hauppauge:the newer, gray remotes (seems there are multiple
diff --git a/drivers/media/rc/keymaps/rc-imon-mce.c b/drivers/media/rc/keymaps/rc-imon-mce.c
index 0ea2aa190d8..124c7228ba8 100644
--- a/drivers/media/rc/keymaps/rc-imon-mce.c
+++ b/drivers/media/rc/keymaps/rc-imon-mce.c
@@ -10,6 +10,7 @@
*/
#include <media/rc-map.h>
+#include <linux/module.h>
/* mce-mode imon mce remote key table */
static struct rc_map_table imon_mce[] = {
diff --git a/drivers/media/rc/keymaps/rc-imon-pad.c b/drivers/media/rc/keymaps/rc-imon-pad.c
index 75d3843fdc3..999c6295c70 100644
--- a/drivers/media/rc/keymaps/rc-imon-pad.c
+++ b/drivers/media/rc/keymaps/rc-imon-pad.c
@@ -10,6 +10,7 @@
*/
#include <media/rc-map.h>
+#include <linux/module.h>
/*
* standard imon remote key table, which isn't really entirely
diff --git a/drivers/media/rc/keymaps/rc-iodata-bctv7e.c b/drivers/media/rc/keymaps/rc-iodata-bctv7e.c
index 1f59e163f75..34540dfc3df 100644
--- a/drivers/media/rc/keymaps/rc-iodata-bctv7e.c
+++ b/drivers/media/rc/keymaps/rc-iodata-bctv7e.c
@@ -11,6 +11,7 @@
*/
#include <media/rc-map.h>
+#include <linux/module.h>
/* IO-DATA BCTV7E Remote */
diff --git a/drivers/media/rc/keymaps/rc-kaiomy.c b/drivers/media/rc/keymaps/rc-kaiomy.c
index f31dc5c1ad9..4264a787c15 100644
--- a/drivers/media/rc/keymaps/rc-kaiomy.c
+++ b/drivers/media/rc/keymaps/rc-kaiomy.c
@@ -11,6 +11,7 @@
*/
#include <media/rc-map.h>
+#include <linux/module.h>
/* Kaiomy TVnPC U2
Mauro Carvalho Chehab <mchehab@infradead.org>
diff --git a/drivers/media/rc/keymaps/rc-kworld-315u.c b/drivers/media/rc/keymaps/rc-kworld-315u.c
index 7f33edb4724..e48cd267dda 100644
--- a/drivers/media/rc/keymaps/rc-kworld-315u.c
+++ b/drivers/media/rc/keymaps/rc-kworld-315u.c
@@ -11,6 +11,7 @@
*/
#include <media/rc-map.h>
+#include <linux/module.h>
/* Kworld 315U
*/
diff --git a/drivers/media/rc/keymaps/rc-kworld-plus-tv-analog.c b/drivers/media/rc/keymaps/rc-kworld-plus-tv-analog.c
index 7fa17a369f2..32998d6b787 100644
--- a/drivers/media/rc/keymaps/rc-kworld-plus-tv-analog.c
+++ b/drivers/media/rc/keymaps/rc-kworld-plus-tv-analog.c
@@ -11,6 +11,7 @@
*/
#include <media/rc-map.h>
+#include <linux/module.h>
/* Kworld Plus TV Analog Lite PCI IR
Mauro Carvalho Chehab <mchehab@infradead.org>
diff --git a/drivers/media/rc/keymaps/rc-leadtek-y04g0051.c b/drivers/media/rc/keymaps/rc-leadtek-y04g0051.c
index 8faa54ff16e..03d762d986e 100644
--- a/drivers/media/rc/keymaps/rc-leadtek-y04g0051.c
+++ b/drivers/media/rc/keymaps/rc-leadtek-y04g0051.c
@@ -19,6 +19,7 @@
*/
#include <media/rc-map.h>
+#include <linux/module.h>
static struct rc_map_table leadtek_y04g0051[] = {
{ 0x0300, KEY_POWER2 },
diff --git a/drivers/media/rc/keymaps/rc-lirc.c b/drivers/media/rc/keymaps/rc-lirc.c
index e8e23e233c3..fbf08fa6f46 100644
--- a/drivers/media/rc/keymaps/rc-lirc.c
+++ b/drivers/media/rc/keymaps/rc-lirc.c
@@ -10,6 +10,7 @@
*/
#include <media/rc-core.h>
+#include <linux/module.h>
static struct rc_map_table lirc[] = {
{ },
diff --git a/drivers/media/rc/keymaps/rc-lme2510.c b/drivers/media/rc/keymaps/rc-lme2510.c
index 129d3f9a461..51f18bb50a3 100644
--- a/drivers/media/rc/keymaps/rc-lme2510.c
+++ b/drivers/media/rc/keymaps/rc-lme2510.c
@@ -10,6 +10,7 @@
*/
#include <media/rc-map.h>
+#include <linux/module.h>
static struct rc_map_table lme2510_rc[] = {
diff --git a/drivers/media/rc/keymaps/rc-manli.c b/drivers/media/rc/keymaps/rc-manli.c
index 23b2d04e7a9..e7038bb71bf 100644
--- a/drivers/media/rc/keymaps/rc-manli.c
+++ b/drivers/media/rc/keymaps/rc-manli.c
@@ -11,6 +11,7 @@
*/
#include <media/rc-map.h>
+#include <linux/module.h>
/* Michael Tokarev <mjt@tls.msk.ru>
keytable is used by MANLI MTV00[0x0c] and BeholdTV 40[13] at
diff --git a/drivers/media/rc/keymaps/rc-medion-x10.c b/drivers/media/rc/keymaps/rc-medion-x10.c
new file mode 100644
index 00000000000..09e2cc01d11
--- /dev/null
+++ b/drivers/media/rc/keymaps/rc-medion-x10.c
@@ -0,0 +1,117 @@
+/*
+ * Medion X10 RF remote keytable
+ *
+ * Copyright (C) 2011 Anssi Hannula <anssi.hannula@?ki.fi>
+ *
+ * This file is based on a keytable provided by
+ * Jan Losinski <losinski@wh2.tu-dresden.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <linux/module.h>
+#include <media/rc-map.h>
+
+static struct rc_map_table medion_x10[] = {
+ { 0xf12c, KEY_TV }, /* TV */
+ { 0xf22d, KEY_VCR }, /* VCR */
+ { 0xc904, KEY_DVD }, /* DVD */
+ { 0xcb06, KEY_AUDIO }, /* MUSIC */
+
+ { 0xf32e, KEY_RADIO }, /* RADIO */
+ { 0xca05, KEY_DIRECTORY }, /* PHOTO */
+ { 0xf42f, KEY_INFO }, /* TV-PREVIEW */
+ { 0xf530, KEY_LIST }, /* CHANNEL-LST */
+
+ { 0xe01b, KEY_SETUP }, /* SETUP */
+ { 0xf631, KEY_VIDEO }, /* VIDEO DESKTOP */
+
+ { 0xcd08, KEY_VOLUMEDOWN }, /* VOL - */
+ { 0xce09, KEY_VOLUMEUP }, /* VOL + */
+ { 0xd00b, KEY_CHANNELUP }, /* CHAN + */
+ { 0xd10c, KEY_CHANNELDOWN }, /* CHAN - */
+ { 0xc500, KEY_MUTE }, /* MUTE */
+
+ { 0xf732, KEY_RED }, /* red */
+ { 0xf833, KEY_GREEN }, /* green */
+ { 0xf934, KEY_YELLOW }, /* yellow */
+ { 0xfa35, KEY_BLUE }, /* blue */
+ { 0xdb16, KEY_TEXT }, /* TXT */
+
+ { 0xd20d, KEY_1 },
+ { 0xd30e, KEY_2 },
+ { 0xd40f, KEY_3 },
+ { 0xd510, KEY_4 },
+ { 0xd611, KEY_5 },
+ { 0xd712, KEY_6 },
+ { 0xd813, KEY_7 },
+ { 0xd914, KEY_8 },
+ { 0xda15, KEY_9 },
+ { 0xdc17, KEY_0 },
+ { 0xe11c, KEY_SEARCH }, /* TV/RAD, CH SRC */
+ { 0xe520, KEY_DELETE }, /* DELETE */
+
+ { 0xfb36, KEY_KEYBOARD }, /* RENAME */
+ { 0xdd18, KEY_SCREEN }, /* SNAPSHOT */
+
+ { 0xdf1a, KEY_UP }, /* up */
+ { 0xe722, KEY_DOWN }, /* down */
+ { 0xe21d, KEY_LEFT }, /* left */
+ { 0xe41f, KEY_RIGHT }, /* right */
+ { 0xe31e, KEY_OK }, /* OK */
+
+ { 0xfc37, KEY_SELECT }, /* ACQUIRE IMAGE */
+ { 0xfd38, KEY_EDIT }, /* EDIT IMAGE */
+
+ { 0xe924, KEY_REWIND }, /* rewind (<<) */
+ { 0xea25, KEY_PLAY }, /* play ( >) */
+ { 0xeb26, KEY_FORWARD }, /* forward (>>) */
+ { 0xec27, KEY_RECORD }, /* record ( o) */
+ { 0xed28, KEY_STOP }, /* stop ([]) */
+ { 0xee29, KEY_PAUSE }, /* pause ('') */
+
+ { 0xe621, KEY_PREVIOUS }, /* prev */
+ { 0xfe39, KEY_SWITCHVIDEOMODE }, /* F SCR */
+ { 0xe823, KEY_NEXT }, /* next */
+ { 0xde19, KEY_MENU }, /* MENU */
+ { 0xff3a, KEY_LANGUAGE }, /* AUDIO */
+
+ { 0xc702, KEY_POWER }, /* POWER */
+};
+
+static struct rc_map_list medion_x10_map = {
+ .map = {
+ .scan = medion_x10,
+ .size = ARRAY_SIZE(medion_x10),
+ .rc_type = RC_TYPE_OTHER,
+ .name = RC_MAP_MEDION_X10,
+ }
+};
+
+static int __init init_rc_map_medion_x10(void)
+{
+ return rc_map_register(&medion_x10_map);
+}
+
+static void __exit exit_rc_map_medion_x10(void)
+{
+ rc_map_unregister(&medion_x10_map);
+}
+
+module_init(init_rc_map_medion_x10)
+module_exit(exit_rc_map_medion_x10)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Anssi Hannula <anssi.hannula@iki.fi>");
diff --git a/drivers/media/rc/keymaps/rc-msi-digivox-ii.c b/drivers/media/rc/keymaps/rc-msi-digivox-ii.c
index 7b9a01b6e4c..c64e9e30045 100644
--- a/drivers/media/rc/keymaps/rc-msi-digivox-ii.c
+++ b/drivers/media/rc/keymaps/rc-msi-digivox-ii.c
@@ -19,6 +19,7 @@
*/
#include <media/rc-map.h>
+#include <linux/module.h>
static struct rc_map_table msi_digivox_ii[] = {
{ 0x0002, KEY_2 },
diff --git a/drivers/media/rc/keymaps/rc-msi-digivox-iii.c b/drivers/media/rc/keymaps/rc-msi-digivox-iii.c
index ae9d06b3915..303a0b73175 100644
--- a/drivers/media/rc/keymaps/rc-msi-digivox-iii.c
+++ b/drivers/media/rc/keymaps/rc-msi-digivox-iii.c
@@ -19,6 +19,7 @@
*/
#include <media/rc-map.h>
+#include <linux/module.h>
/* MSI DIGIVOX mini III */
/* Uses NEC extended 0x61d6. */
diff --git a/drivers/media/rc/keymaps/rc-msi-tvanywhere-plus.c b/drivers/media/rc/keymaps/rc-msi-tvanywhere-plus.c
index 8e9969d1239..c393d8a50bc 100644
--- a/drivers/media/rc/keymaps/rc-msi-tvanywhere-plus.c
+++ b/drivers/media/rc/keymaps/rc-msi-tvanywhere-plus.c
@@ -11,6 +11,7 @@
*/
#include <media/rc-map.h>
+#include <linux/module.h>
/*
Keycodes for remote on the MSI TV@nywhere Plus. The controller IC on the card
diff --git a/drivers/media/rc/keymaps/rc-msi-tvanywhere.c b/drivers/media/rc/keymaps/rc-msi-tvanywhere.c
index fdd213ff1ad..a7003d3a3c8 100644
--- a/drivers/media/rc/keymaps/rc-msi-tvanywhere.c
+++ b/drivers/media/rc/keymaps/rc-msi-tvanywhere.c
@@ -11,6 +11,7 @@
*/
#include <media/rc-map.h>
+#include <linux/module.h>
/* MSI TV@nywhere MASTER remote */
diff --git a/drivers/media/rc/keymaps/rc-nebula.c b/drivers/media/rc/keymaps/rc-nebula.c
index ddae20e9cd9..3f0ddd7afd3 100644
--- a/drivers/media/rc/keymaps/rc-nebula.c
+++ b/drivers/media/rc/keymaps/rc-nebula.c
@@ -11,6 +11,7 @@
*/
#include <media/rc-map.h>
+#include <linux/module.h>
static struct rc_map_table nebula[] = {
{ 0x00, KEY_0 },
diff --git a/drivers/media/rc/keymaps/rc-nec-terratec-cinergy-xs.c b/drivers/media/rc/keymaps/rc-nec-terratec-cinergy-xs.c
index 26f114c5c0d..f3b86c8db67 100644
--- a/drivers/media/rc/keymaps/rc-nec-terratec-cinergy-xs.c
+++ b/drivers/media/rc/keymaps/rc-nec-terratec-cinergy-xs.c
@@ -11,6 +11,7 @@
*/
#include <media/rc-map.h>
+#include <linux/module.h>
/* Terratec Cinergy Hybrid T USB XS FM
Mauro Carvalho Chehab <mchehab@redhat.com>
diff --git a/drivers/media/rc/keymaps/rc-norwood.c b/drivers/media/rc/keymaps/rc-norwood.c
index f9f2fa2819b..9e65f07157a 100644
--- a/drivers/media/rc/keymaps/rc-norwood.c
+++ b/drivers/media/rc/keymaps/rc-norwood.c
@@ -11,6 +11,7 @@
*/
#include <media/rc-map.h>
+#include <linux/module.h>
/* Norwood Micro (non-Pro) TV Tuner
By Peter Naulls <peter@chocky.org>
diff --git a/drivers/media/rc/keymaps/rc-npgtech.c b/drivers/media/rc/keymaps/rc-npgtech.c
index 4aa588bf6d6..65d0cfc3c33 100644
--- a/drivers/media/rc/keymaps/rc-npgtech.c
+++ b/drivers/media/rc/keymaps/rc-npgtech.c
@@ -11,6 +11,7 @@
*/
#include <media/rc-map.h>
+#include <linux/module.h>
static struct rc_map_table npgtech[] = {
{ 0x1d, KEY_SWITCHVIDEOMODE }, /* switch inputs */
diff --git a/drivers/media/rc/keymaps/rc-pctv-sedna.c b/drivers/media/rc/keymaps/rc-pctv-sedna.c
index 7cdef6e6cc0..bf2cbdfe2e3 100644
--- a/drivers/media/rc/keymaps/rc-pctv-sedna.c
+++ b/drivers/media/rc/keymaps/rc-pctv-sedna.c
@@ -11,6 +11,7 @@
*/
#include <media/rc-map.h>
+#include <linux/module.h>
/* Mapping for the 28 key remote control as seen at
http://www.sednacomputer.com/photo/cardbus-tv.jpg
diff --git a/drivers/media/rc/keymaps/rc-pinnacle-color.c b/drivers/media/rc/keymaps/rc-pinnacle-color.c
index 23b8c505c6a..b46cd8fe643 100644
--- a/drivers/media/rc/keymaps/rc-pinnacle-color.c
+++ b/drivers/media/rc/keymaps/rc-pinnacle-color.c
@@ -11,6 +11,7 @@
*/
#include <media/rc-map.h>
+#include <linux/module.h>
static struct rc_map_table pinnacle_color[] = {
{ 0x59, KEY_MUTE },
diff --git a/drivers/media/rc/keymaps/rc-pinnacle-grey.c b/drivers/media/rc/keymaps/rc-pinnacle-grey.c
index 6ba8c368d10..d525df9ad86 100644
--- a/drivers/media/rc/keymaps/rc-pinnacle-grey.c
+++ b/drivers/media/rc/keymaps/rc-pinnacle-grey.c
@@ -11,6 +11,7 @@
*/
#include <media/rc-map.h>
+#include <linux/module.h>
static struct rc_map_table pinnacle_grey[] = {
{ 0x3a, KEY_0 },
diff --git a/drivers/media/rc/keymaps/rc-pinnacle-pctv-hd.c b/drivers/media/rc/keymaps/rc-pinnacle-pctv-hd.c
index 8d558ae6345..a4603d03537 100644
--- a/drivers/media/rc/keymaps/rc-pinnacle-pctv-hd.c
+++ b/drivers/media/rc/keymaps/rc-pinnacle-pctv-hd.c
@@ -11,6 +11,7 @@
*/
#include <media/rc-map.h>
+#include <linux/module.h>
/* Pinnacle PCTV HD 800i mini remote */
@@ -20,6 +21,7 @@ static struct rc_map_table pinnacle_pctv_hd[] = {
{ 0x0701, KEY_MENU }, /* Pinnacle logo */
{ 0x0739, KEY_POWER },
{ 0x0703, KEY_VOLUMEUP },
+ { 0x0705, KEY_OK },
{ 0x0709, KEY_VOLUMEDOWN },
{ 0x0706, KEY_CHANNELUP },
{ 0x070c, KEY_CHANNELDOWN },
diff --git a/drivers/media/rc/keymaps/rc-pixelview-002t.c b/drivers/media/rc/keymaps/rc-pixelview-002t.c
index e5ab071f635..33eb64333c6 100644
--- a/drivers/media/rc/keymaps/rc-pixelview-002t.c
+++ b/drivers/media/rc/keymaps/rc-pixelview-002t.c
@@ -11,6 +11,7 @@
*/
#include <media/rc-map.h>
+#include <linux/module.h>
/*
* Keytable for 002-T IR remote provided together with Pixelview
diff --git a/drivers/media/rc/keymaps/rc-pixelview-mk12.c b/drivers/media/rc/keymaps/rc-pixelview-mk12.c
index 125fc3949c1..21f4dd25c2e 100644
--- a/drivers/media/rc/keymaps/rc-pixelview-mk12.c
+++ b/drivers/media/rc/keymaps/rc-pixelview-mk12.c
@@ -11,6 +11,7 @@
*/
#include <media/rc-map.h>
+#include <linux/module.h>
/*
* Keytable for MK-F12 IR remote provided together with Pixelview
diff --git a/drivers/media/rc/keymaps/rc-pixelview-new.c b/drivers/media/rc/keymaps/rc-pixelview-new.c
index bd78d6ac1e1..f944ad2cac2 100644
--- a/drivers/media/rc/keymaps/rc-pixelview-new.c
+++ b/drivers/media/rc/keymaps/rc-pixelview-new.c
@@ -11,6 +11,7 @@
*/
#include <media/rc-map.h>
+#include <linux/module.h>
/*
Mauro Carvalho Chehab <mchehab@infradead.org>
diff --git a/drivers/media/rc/keymaps/rc-pixelview.c b/drivers/media/rc/keymaps/rc-pixelview.c
index 06187e7db44..a6020eea7b9 100644
--- a/drivers/media/rc/keymaps/rc-pixelview.c
+++ b/drivers/media/rc/keymaps/rc-pixelview.c
@@ -11,6 +11,7 @@
*/
#include <media/rc-map.h>
+#include <linux/module.h>
static struct rc_map_table pixelview[] = {
diff --git a/drivers/media/rc/keymaps/rc-powercolor-real-angel.c b/drivers/media/rc/keymaps/rc-powercolor-real-angel.c
index 5f9d546a86c..e74c571a5e4 100644
--- a/drivers/media/rc/keymaps/rc-powercolor-real-angel.c
+++ b/drivers/media/rc/keymaps/rc-powercolor-real-angel.c
@@ -11,6 +11,7 @@
*/
#include <media/rc-map.h>
+#include <linux/module.h>
/*
* Remote control for Powercolor Real Angel 330
diff --git a/drivers/media/rc/keymaps/rc-proteus-2309.c b/drivers/media/rc/keymaps/rc-proteus-2309.c
index 8a3a643879d..adee8035ce9 100644
--- a/drivers/media/rc/keymaps/rc-proteus-2309.c
+++ b/drivers/media/rc/keymaps/rc-proteus-2309.c
@@ -11,6 +11,7 @@
*/
#include <media/rc-map.h>
+#include <linux/module.h>
/* Michal Majchrowicz <mmajchrowicz@gmail.com> */
diff --git a/drivers/media/rc/keymaps/rc-purpletv.c b/drivers/media/rc/keymaps/rc-purpletv.c
index ef90296bfd6..722597a20e4 100644
--- a/drivers/media/rc/keymaps/rc-purpletv.c
+++ b/drivers/media/rc/keymaps/rc-purpletv.c
@@ -11,6 +11,7 @@
*/
#include <media/rc-map.h>
+#include <linux/module.h>
static struct rc_map_table purpletv[] = {
{ 0x03, KEY_POWER },
diff --git a/drivers/media/rc/keymaps/rc-pv951.c b/drivers/media/rc/keymaps/rc-pv951.c
index 5e8beee94de..0105d63c07a 100644
--- a/drivers/media/rc/keymaps/rc-pv951.c
+++ b/drivers/media/rc/keymaps/rc-pv951.c
@@ -11,6 +11,7 @@
*/
#include <media/rc-map.h>
+#include <linux/module.h>
/* Mark Phalan <phalanm@o2.ie> */
diff --git a/drivers/media/rc/keymaps/rc-rc6-mce.c b/drivers/media/rc/keymaps/rc-rc6-mce.c
index c3907e211d3..753e43ec787 100644
--- a/drivers/media/rc/keymaps/rc-rc6-mce.c
+++ b/drivers/media/rc/keymaps/rc-rc6-mce.c
@@ -13,6 +13,7 @@
*/
#include <media/rc-map.h>
+#include <linux/module.h>
static struct rc_map_table rc6_mce[] = {
diff --git a/drivers/media/rc/keymaps/rc-real-audio-220-32-keys.c b/drivers/media/rc/keymaps/rc-real-audio-220-32-keys.c
index 6813d110211..073694d50f4 100644
--- a/drivers/media/rc/keymaps/rc-real-audio-220-32-keys.c
+++ b/drivers/media/rc/keymaps/rc-real-audio-220-32-keys.c
@@ -11,6 +11,7 @@
*/
#include <media/rc-map.h>
+#include <linux/module.h>
/* Zogis Real Audio 220 - 32 keys IR */
diff --git a/drivers/media/rc/keymaps/rc-snapstream-firefly.c b/drivers/media/rc/keymaps/rc-snapstream-firefly.c
new file mode 100644
index 00000000000..ef146520931
--- /dev/null
+++ b/drivers/media/rc/keymaps/rc-snapstream-firefly.c
@@ -0,0 +1,107 @@
+/*
+ * SnapStream Firefly X10 RF remote keytable
+ *
+ * Copyright (C) 2011 Anssi Hannula <anssi.hannula@?ki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <linux/module.h>
+#include <media/rc-map.h>
+
+static struct rc_map_table snapstream_firefly[] = {
+ { 0xf12c, KEY_ZOOM }, /* Maximize */
+ { 0xc702, KEY_CLOSE },
+
+ { 0xd20d, KEY_1 },
+ { 0xd30e, KEY_2 },
+ { 0xd40f, KEY_3 },
+ { 0xd510, KEY_4 },
+ { 0xd611, KEY_5 },
+ { 0xd712, KEY_6 },
+ { 0xd813, KEY_7 },
+ { 0xd914, KEY_8 },
+ { 0xda15, KEY_9 },
+ { 0xdc17, KEY_0 },
+ { 0xdb16, KEY_BACK },
+ { 0xdd18, KEY_KPENTER }, /* ent */
+
+ { 0xce09, KEY_VOLUMEUP },
+ { 0xcd08, KEY_VOLUMEDOWN },
+ { 0xcf0a, KEY_MUTE },
+ { 0xd00b, KEY_CHANNELUP },
+ { 0xd10c, KEY_CHANNELDOWN },
+ { 0xc500, KEY_VENDOR }, /* firefly */
+
+ { 0xf32e, KEY_INFO },
+ { 0xf42f, KEY_OPTION },
+
+ { 0xe21d, KEY_LEFT },
+ { 0xe41f, KEY_RIGHT },
+ { 0xe722, KEY_DOWN },
+ { 0xdf1a, KEY_UP },
+ { 0xe31e, KEY_OK },
+
+ { 0xe11c, KEY_MENU },
+ { 0xe520, KEY_EXIT },
+
+ { 0xec27, KEY_RECORD },
+ { 0xea25, KEY_PLAY },
+ { 0xed28, KEY_STOP },
+ { 0xe924, KEY_REWIND },
+ { 0xeb26, KEY_FORWARD },
+ { 0xee29, KEY_PAUSE },
+ { 0xf02b, KEY_PREVIOUS },
+ { 0xef2a, KEY_NEXT },
+
+ { 0xcb06, KEY_AUDIO }, /* Music */
+ { 0xca05, KEY_IMAGES }, /* Photos */
+ { 0xc904, KEY_DVD },
+ { 0xc803, KEY_TV },
+ { 0xcc07, KEY_VIDEO },
+
+ { 0xc601, KEY_HELP },
+ { 0xf22d, KEY_MODE }, /* Mouse */
+
+ { 0xde19, KEY_A },
+ { 0xe01b, KEY_B },
+ { 0xe621, KEY_C },
+ { 0xe823, KEY_D },
+};
+
+static struct rc_map_list snapstream_firefly_map = {
+ .map = {
+ .scan = snapstream_firefly,
+ .size = ARRAY_SIZE(snapstream_firefly),
+ .rc_type = RC_TYPE_OTHER,
+ .name = RC_MAP_SNAPSTREAM_FIREFLY,
+ }
+};
+
+static int __init init_rc_map_snapstream_firefly(void)
+{
+ return rc_map_register(&snapstream_firefly_map);
+}
+
+static void __exit exit_rc_map_snapstream_firefly(void)
+{
+ rc_map_unregister(&snapstream_firefly_map);
+}
+
+module_init(init_rc_map_snapstream_firefly)
+module_exit(exit_rc_map_snapstream_firefly)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Anssi Hannula <anssi.hannula@iki.fi>");
diff --git a/drivers/media/rc/keymaps/rc-streamzap.c b/drivers/media/rc/keymaps/rc-streamzap.c
index 92cc10d2f9c..f9a07578d98 100644
--- a/drivers/media/rc/keymaps/rc-streamzap.c
+++ b/drivers/media/rc/keymaps/rc-streamzap.c
@@ -10,6 +10,7 @@
*/
#include <media/rc-map.h>
+#include <linux/module.h>
static struct rc_map_table streamzap[] = {
/*
diff --git a/drivers/media/rc/keymaps/rc-tbs-nec.c b/drivers/media/rc/keymaps/rc-tbs-nec.c
index 7242ee66f6e..5039be782bc 100644
--- a/drivers/media/rc/keymaps/rc-tbs-nec.c
+++ b/drivers/media/rc/keymaps/rc-tbs-nec.c
@@ -11,6 +11,7 @@
*/
#include <media/rc-map.h>
+#include <linux/module.h>
static struct rc_map_table tbs_nec[] = {
{ 0x84, KEY_POWER2}, /* power */
diff --git a/drivers/media/rc/keymaps/rc-technisat-usb2.c b/drivers/media/rc/keymaps/rc-technisat-usb2.c
index 4afe5774f19..f9733bb289d 100644
--- a/drivers/media/rc/keymaps/rc-technisat-usb2.c
+++ b/drivers/media/rc/keymaps/rc-technisat-usb2.c
@@ -30,6 +30,7 @@
*/
#include <media/rc-map.h>
+#include <linux/module.h>
static struct rc_map_table technisat_usb2[] = {
{0x0a0c, KEY_POWER},
diff --git a/drivers/media/rc/keymaps/rc-terratec-cinergy-xs.c b/drivers/media/rc/keymaps/rc-terratec-cinergy-xs.c
index bc38e34b9fd..53629fb0151 100644
--- a/drivers/media/rc/keymaps/rc-terratec-cinergy-xs.c
+++ b/drivers/media/rc/keymaps/rc-terratec-cinergy-xs.c
@@ -11,6 +11,7 @@
*/
#include <media/rc-map.h>
+#include <linux/module.h>
/* Terratec Cinergy Hybrid T USB XS
Devin Heitmueller <dheitmueller@linuxtv.org>
diff --git a/drivers/media/rc/keymaps/rc-terratec-slim-2.c b/drivers/media/rc/keymaps/rc-terratec-slim-2.c
index 44093918cf0..4c149ef712d 100644
--- a/drivers/media/rc/keymaps/rc-terratec-slim-2.c
+++ b/drivers/media/rc/keymaps/rc-terratec-slim-2.c
@@ -20,6 +20,7 @@
*/
#include <media/rc-map.h>
+#include <linux/module.h>
/*
* TerraTec slim remote, 6 rows, 3 columns.
diff --git a/drivers/media/rc/keymaps/rc-terratec-slim.c b/drivers/media/rc/keymaps/rc-terratec-slim.c
index 1abafa5fd30..3d8a19cdb5a 100644
--- a/drivers/media/rc/keymaps/rc-terratec-slim.c
+++ b/drivers/media/rc/keymaps/rc-terratec-slim.c
@@ -19,6 +19,7 @@
*/
#include <media/rc-map.h>
+#include <linux/module.h>
/* TerraTec slim remote, 7 rows, 4 columns. */
/* Uses NEC extended 0x02bd. */
diff --git a/drivers/media/rc/keymaps/rc-tevii-nec.c b/drivers/media/rc/keymaps/rc-tevii-nec.c
index ef5ba3f3273..f2c3b75d858 100644
--- a/drivers/media/rc/keymaps/rc-tevii-nec.c
+++ b/drivers/media/rc/keymaps/rc-tevii-nec.c
@@ -11,6 +11,7 @@
*/
#include <media/rc-map.h>
+#include <linux/module.h>
static struct rc_map_table tevii_nec[] = {
{ 0x0a, KEY_POWER2},
diff --git a/drivers/media/rc/keymaps/rc-tivo.c b/drivers/media/rc/keymaps/rc-tivo.c
index 98ad085531f..454e0629569 100644
--- a/drivers/media/rc/keymaps/rc-tivo.c
+++ b/drivers/media/rc/keymaps/rc-tivo.c
@@ -9,6 +9,7 @@
*/
#include <media/rc-map.h>
+#include <linux/module.h>
/*
* Initial mapping is for the TiVo remote included in the Nero LiquidTV bundle,
diff --git a/drivers/media/rc/keymaps/rc-total-media-in-hand.c b/drivers/media/rc/keymaps/rc-total-media-in-hand.c
index 20ac4e19fb3..5b9f9ec1368 100644
--- a/drivers/media/rc/keymaps/rc-total-media-in-hand.c
+++ b/drivers/media/rc/keymaps/rc-total-media-in-hand.c
@@ -19,6 +19,7 @@
*/
#include <media/rc-map.h>
+#include <linux/module.h>
/* Uses NEC extended 0x02bd */
static struct rc_map_table total_media_in_hand[] = {
diff --git a/drivers/media/rc/keymaps/rc-trekstor.c b/drivers/media/rc/keymaps/rc-trekstor.c
index f8190ead2e3..f9a2e0fabb9 100644
--- a/drivers/media/rc/keymaps/rc-trekstor.c
+++ b/drivers/media/rc/keymaps/rc-trekstor.c
@@ -19,6 +19,7 @@
*/
#include <media/rc-map.h>
+#include <linux/module.h>
/* TrekStor DVB-T USB Stick remote controller. */
/* Imported from af9015.h.
diff --git a/drivers/media/rc/keymaps/rc-tt-1500.c b/drivers/media/rc/keymaps/rc-tt-1500.c
index 295f3738e30..caeff85603e 100644
--- a/drivers/media/rc/keymaps/rc-tt-1500.c
+++ b/drivers/media/rc/keymaps/rc-tt-1500.c
@@ -11,6 +11,7 @@
*/
#include <media/rc-map.h>
+#include <linux/module.h>
/* for the Technotrend 1500 bundled remotes (grey and black): */
diff --git a/drivers/media/rc/keymaps/rc-twinhan1027.c b/drivers/media/rc/keymaps/rc-twinhan1027.c
index 8bf8df64b08..509299b90c9 100644
--- a/drivers/media/rc/keymaps/rc-twinhan1027.c
+++ b/drivers/media/rc/keymaps/rc-twinhan1027.c
@@ -1,4 +1,5 @@
#include <media/rc-map.h>
+#include <linux/module.h>
static struct rc_map_table twinhan_vp1027[] = {
{ 0x16, KEY_POWER2 },
diff --git a/drivers/media/rc/keymaps/rc-videomate-m1f.c b/drivers/media/rc/keymaps/rc-videomate-m1f.c
index 4994d405c0a..3bd1de1f585 100644
--- a/drivers/media/rc/keymaps/rc-videomate-m1f.c
+++ b/drivers/media/rc/keymaps/rc-videomate-m1f.c
@@ -11,6 +11,7 @@
*/
#include <media/rc-map.h>
+#include <linux/module.h>
static struct rc_map_table videomate_m1f[] = {
{ 0x01, KEY_POWER },
diff --git a/drivers/media/rc/keymaps/rc-videomate-s350.c b/drivers/media/rc/keymaps/rc-videomate-s350.c
index 9e474a6024e..8bfc3e8d909 100644
--- a/drivers/media/rc/keymaps/rc-videomate-s350.c
+++ b/drivers/media/rc/keymaps/rc-videomate-s350.c
@@ -11,6 +11,7 @@
*/
#include <media/rc-map.h>
+#include <linux/module.h>
static struct rc_map_table videomate_s350[] = {
{ 0x00, KEY_TV},
diff --git a/drivers/media/rc/keymaps/rc-videomate-tv-pvr.c b/drivers/media/rc/keymaps/rc-videomate-tv-pvr.c
index 5f2a46e1f8f..390ce9431b3 100644
--- a/drivers/media/rc/keymaps/rc-videomate-tv-pvr.c
+++ b/drivers/media/rc/keymaps/rc-videomate-tv-pvr.c
@@ -11,6 +11,7 @@
*/
#include <media/rc-map.h>
+#include <linux/module.h>
static struct rc_map_table videomate_tv_pvr[] = {
{ 0x14, KEY_MUTE },
diff --git a/drivers/media/rc/keymaps/rc-winfast-usbii-deluxe.c b/drivers/media/rc/keymaps/rc-winfast-usbii-deluxe.c
index bd8d021f40a..2852bf70506 100644
--- a/drivers/media/rc/keymaps/rc-winfast-usbii-deluxe.c
+++ b/drivers/media/rc/keymaps/rc-winfast-usbii-deluxe.c
@@ -11,6 +11,7 @@
*/
#include <media/rc-map.h>
+#include <linux/module.h>
/* Leadtek Winfast TV USB II Deluxe remote
Magnus Alm <magnus.alm@gmail.com>
diff --git a/drivers/media/rc/keymaps/rc-winfast.c b/drivers/media/rc/keymaps/rc-winfast.c
index d8a34c14676..2df1cba2360 100644
--- a/drivers/media/rc/keymaps/rc-winfast.c
+++ b/drivers/media/rc/keymaps/rc-winfast.c
@@ -11,6 +11,7 @@
*/
#include <media/rc-map.h>
+#include <linux/module.h>
/* Table for Leadtek Winfast Remote Controls - used by both bttv and cx88 */
diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
index 85ff9a1ffb3..60d3c1e0971 100644
--- a/drivers/media/rc/mceusb.c
+++ b/drivers/media/rc/mceusb.c
@@ -1,7 +1,7 @@
/*
* Driver for USB Windows Media Center Ed. eHome Infrared Transceivers
*
- * Copyright (c) 2010 by Jarod Wilson <jarod@redhat.com>
+ * Copyright (c) 2010-2011, Jarod Wilson <jarod@redhat.com>
*
* Based on the original lirc_mceusb and lirc_mceusb2 drivers, by Dan
* Conti, Martin Blatter and Daniel Melander, the latter of which was
@@ -15,6 +15,11 @@
* Jon Smirl, which included enhancements and simplifications to the
* incoming IR buffer parsing routines.
*
+ * Updated in July of 2011 with the aid of Microsoft's official
+ * remote/transceiver requirements and specification document, found at
+ * download.microsoft.com, title
+ * Windows-Media-Center-RC-IR-Collection-Green-Button-Specification-03-08-2011-V2.pdf
+ *
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -37,10 +42,11 @@
#include <linux/slab.h>
#include <linux/usb.h>
#include <linux/usb/input.h>
+#include <linux/pm_wakeup.h>
#include <media/rc-core.h>
-#define DRIVER_VERSION "1.91"
-#define DRIVER_AUTHOR "Jarod Wilson <jarod@wilsonet.com>"
+#define DRIVER_VERSION "1.92"
+#define DRIVER_AUTHOR "Jarod Wilson <jarod@redhat.com>"
#define DRIVER_DESC "Windows Media Center Ed. eHome Infrared Transceiver " \
"device driver"
#define DRIVER_NAME "mceusb"
@@ -63,43 +69,90 @@
#define MCE_PULSE_MASK 0x7f /* Pulse mask */
#define MCE_MAX_PULSE_LENGTH 0x7f /* Longest transmittable pulse symbol */
-#define MCE_HW_CMD_HEADER 0xff /* MCE hardware command header */
-#define MCE_COMMAND_HEADER 0x9f /* MCE command header */
-#define MCE_COMMAND_MASK 0xe0 /* Mask out command bits */
-#define MCE_COMMAND_NULL 0x00 /* These show up various places... */
-/* if buf[i] & MCE_COMMAND_MASK == 0x80 and buf[i] != MCE_COMMAND_HEADER,
- * then we're looking at a raw IR data sample */
-#define MCE_COMMAND_IRDATA 0x80
-#define MCE_PACKET_LENGTH_MASK 0x1f /* Packet length mask */
-
-/* Sub-commands, which follow MCE_COMMAND_HEADER or MCE_HW_CMD_HEADER */
+/*
+ * The interface between the host and the IR hardware is command-response
+ * based. All commands and responses have a consistent format, where a lead
+ * byte always identifies the type of data following it. The lead byte has
+ * a port value in the 3 highest bits and a length value in the 5 lowest
+ * bits.
+ *
+ * The length field is overloaded, with a value of 11111 indicating that the
+ * following byte is a command or response code, and the length of the entire
+ * message is determined by the code. If the length field is not 11111, then
+ * it specifies the number of bytes of port data that follow.
+ */
+#define MCE_CMD 0x1f
+#define MCE_PORT_IR 0x4 /* (0x4 << 5) | MCE_CMD = 0x9f */
+#define MCE_PORT_SYS 0x7 /* (0x7 << 5) | MCE_CMD = 0xff */
+#define MCE_PORT_SER 0x6 /* 0xc0 thru 0xdf flush & 0x1f bytes */
+#define MCE_PORT_MASK 0xe0 /* Mask out command bits */
+
+/* Command port headers */
+#define MCE_CMD_PORT_IR 0x9f /* IR-related cmd/rsp */
+#define MCE_CMD_PORT_SYS 0xff /* System (non-IR) device cmd/rsp */
+
+/* Commands that set device state (2-4 bytes in length) */
+#define MCE_CMD_RESET 0xfe /* Reset device, 2 bytes */
+#define MCE_CMD_RESUME 0xaa /* Resume device after error, 2 bytes */
+#define MCE_CMD_SETIRCFS 0x06 /* Set tx carrier, 4 bytes */
+#define MCE_CMD_SETIRTIMEOUT 0x0c /* Set timeout, 4 bytes */
+#define MCE_CMD_SETIRTXPORTS 0x08 /* Set tx ports, 3 bytes */
+#define MCE_CMD_SETIRRXPORTEN 0x14 /* Set rx ports, 3 bytes */
+#define MCE_CMD_FLASHLED 0x23 /* Flash receiver LED, 2 bytes */
+
+/* Commands that query device state (all 2 bytes, unless noted) */
+#define MCE_CMD_GETIRCFS 0x07 /* Get carrier */
+#define MCE_CMD_GETIRTIMEOUT 0x0d /* Get timeout */
+#define MCE_CMD_GETIRTXPORTS 0x13 /* Get tx ports */
+#define MCE_CMD_GETIRRXPORTEN 0x15 /* Get rx ports */
+#define MCE_CMD_GETPORTSTATUS 0x11 /* Get tx port status, 3 bytes */
+#define MCE_CMD_GETIRNUMPORTS 0x16 /* Get number of ports */
+#define MCE_CMD_GETWAKESOURCE 0x17 /* Get wake source */
+#define MCE_CMD_GETEMVER 0x22 /* Get emulator interface version */
+#define MCE_CMD_GETDEVDETAILS 0x21 /* Get device details (em ver2 only) */
+#define MCE_CMD_GETWAKESUPPORT 0x20 /* Get wake details (em ver2 only) */
+#define MCE_CMD_GETWAKEVERSION 0x18 /* Get wake pattern (em ver2 only) */
+
+/* Misc commands */
+#define MCE_CMD_NOP 0xff /* No operation */
+
+/* Responses to commands (non-error cases) */
+#define MCE_RSP_EQIRCFS 0x06 /* tx carrier, 4 bytes */
+#define MCE_RSP_EQIRTIMEOUT 0x0c /* rx timeout, 4 bytes */
+#define MCE_RSP_GETWAKESOURCE 0x17 /* wake source, 3 bytes */
+#define MCE_RSP_EQIRTXPORTS 0x08 /* tx port mask, 3 bytes */
+#define MCE_RSP_EQIRRXPORTEN 0x14 /* rx port mask, 3 bytes */
+#define MCE_RSP_GETPORTSTATUS 0x11 /* tx port status, 7 bytes */
+#define MCE_RSP_EQIRRXCFCNT 0x15 /* rx carrier count, 4 bytes */
+#define MCE_RSP_EQIRNUMPORTS 0x16 /* number of ports, 4 bytes */
+#define MCE_RSP_EQWAKESUPPORT 0x20 /* wake capabilities, 3 bytes */
+#define MCE_RSP_EQWAKEVERSION 0x18 /* wake pattern details, 6 bytes */
+#define MCE_RSP_EQDEVDETAILS 0x21 /* device capabilities, 3 bytes */
+#define MCE_RSP_EQEMVER 0x22 /* emulator interface ver, 3 bytes */
+#define MCE_RSP_FLASHLED 0x23 /* success flashing LED, 2 bytes */
+
+/* Responses to error cases, must send MCE_CMD_RESUME to clear them */
+#define MCE_RSP_CMD_ILLEGAL 0xfe /* illegal command for port, 2 bytes */
+#define MCE_RSP_TX_TIMEOUT 0x81 /* tx timed out, 2 bytes */
+
+/* Misc commands/responses not defined in the MCE remote/transceiver spec */
#define MCE_CMD_SIG_END 0x01 /* End of signal */
#define MCE_CMD_PING 0x03 /* Ping device */
#define MCE_CMD_UNKNOWN 0x04 /* Unknown */
#define MCE_CMD_UNKNOWN2 0x05 /* Unknown */
-#define MCE_CMD_S_CARRIER 0x06 /* Set TX carrier frequency */
-#define MCE_CMD_G_CARRIER 0x07 /* Get TX carrier frequency */
-#define MCE_CMD_S_TXMASK 0x08 /* Set TX port bitmask */
#define MCE_CMD_UNKNOWN3 0x09 /* Unknown */
#define MCE_CMD_UNKNOWN4 0x0a /* Unknown */
#define MCE_CMD_G_REVISION 0x0b /* Get hw/sw revision */
-#define MCE_CMD_S_TIMEOUT 0x0c /* Set RX timeout value */
-#define MCE_CMD_G_TIMEOUT 0x0d /* Get RX timeout value */
#define MCE_CMD_UNKNOWN5 0x0e /* Unknown */
#define MCE_CMD_UNKNOWN6 0x0f /* Unknown */
-#define MCE_CMD_G_RXPORTSTS 0x11 /* Get RX port status */
-#define MCE_CMD_G_TXMASK 0x13 /* Set TX port bitmask */
-#define MCE_CMD_S_RXSENSOR 0x14 /* Set RX sensor (std/learning) */
-#define MCE_CMD_G_RXSENSOR 0x15 /* Get RX sensor (std/learning) */
-#define MCE_RSP_PULSE_COUNT 0x15 /* RX pulse count (only if learning) */
-#define MCE_CMD_TX_PORTS 0x16 /* Get number of TX ports */
-#define MCE_CMD_G_WAKESRC 0x17 /* Get wake source */
-#define MCE_CMD_UNKNOWN7 0x18 /* Unknown */
#define MCE_CMD_UNKNOWN8 0x19 /* Unknown */
#define MCE_CMD_UNKNOWN9 0x1b /* Unknown */
-#define MCE_CMD_DEVICE_RESET 0xaa /* Reset the hardware */
-#define MCE_RSP_CMD_INVALID 0xfe /* Invalid command issued */
+#define MCE_CMD_NULL 0x00 /* These show up various places... */
+/* if buf[i] & MCE_PORT_MASK == 0x80 and buf[i] != MCE_CMD_PORT_IR,
+ * then we're looking at a raw IR data sample */
+#define MCE_COMMAND_IRDATA 0x80
+#define MCE_PACKET_LENGTH_MASK 0x1f /* Packet length mask */
/* module parameters */
#ifdef CONFIG_USB_DEBUG
@@ -388,48 +441,37 @@ struct mceusb_dev {
char name[128];
char phys[64];
enum mceusb_model_type model;
+
+ bool need_reset; /* flag to issue a device resume cmd */
+ u8 emver; /* emulator interface version */
+ u8 num_txports; /* number of transmit ports */
+ u8 num_rxports; /* number of receive sensors */
+ u8 txports_cabled; /* bitmask of transmitters with cable */
+ u8 rxports_active; /* bitmask of active receive sensors */
};
-/*
- * MCE Device Command Strings
- * Device command responses vary from device to device...
- * - DEVICE_RESET resets the hardware to its default state
- * - GET_REVISION fetches the hardware/software revision, common
- * replies are ff 0b 45 ff 1b 08 and ff 0b 50 ff 1b 42
- * - GET_CARRIER_FREQ gets the carrier mode and frequency of the
- * device, with replies in the form of 9f 06 MM FF, where MM is 0-3,
- * meaning clk of 10000000, 2500000, 625000 or 156250, and FF is
- * ((clk / frequency) - 1)
- * - GET_RX_TIMEOUT fetches the receiver timeout in units of 50us,
- * response in the form of 9f 0c msb lsb
- * - GET_TX_BITMASK fetches the transmitter bitmask, replies in
- * the form of 9f 08 bm, where bm is the bitmask
- * - GET_RX_SENSOR fetches the RX sensor setting -- long-range
- * general use one or short-range learning one, in the form of
- * 9f 14 ss, where ss is either 01 for long-range or 02 for short
- * - SET_CARRIER_FREQ sets a new carrier mode and frequency
- * - SET_TX_BITMASK sets the transmitter bitmask
- * - SET_RX_TIMEOUT sets the receiver timeout
- * - SET_RX_SENSOR sets which receiver sensor to use
- */
-static char DEVICE_RESET[] = {MCE_COMMAND_NULL, MCE_HW_CMD_HEADER,
- MCE_CMD_DEVICE_RESET};
-static char GET_REVISION[] = {MCE_HW_CMD_HEADER, MCE_CMD_G_REVISION};
-static char GET_UNKNOWN[] = {MCE_HW_CMD_HEADER, MCE_CMD_UNKNOWN7};
-static char GET_UNKNOWN2[] = {MCE_COMMAND_HEADER, MCE_CMD_UNKNOWN2};
-static char GET_CARRIER_FREQ[] = {MCE_COMMAND_HEADER, MCE_CMD_G_CARRIER};
-static char GET_RX_TIMEOUT[] = {MCE_COMMAND_HEADER, MCE_CMD_G_TIMEOUT};
-static char GET_TX_BITMASK[] = {MCE_COMMAND_HEADER, MCE_CMD_G_TXMASK};
-static char GET_RX_SENSOR[] = {MCE_COMMAND_HEADER, MCE_CMD_G_RXSENSOR};
+/* MCE Device Command Strings, generally a port and command pair */
+static char DEVICE_RESUME[] = {MCE_CMD_NULL, MCE_CMD_PORT_SYS,
+ MCE_CMD_RESUME};
+static char GET_REVISION[] = {MCE_CMD_PORT_SYS, MCE_CMD_G_REVISION};
+static char GET_EMVER[] = {MCE_CMD_PORT_SYS, MCE_CMD_GETEMVER};
+static char GET_WAKEVERSION[] = {MCE_CMD_PORT_SYS, MCE_CMD_GETWAKEVERSION};
+static char FLASH_LED[] = {MCE_CMD_PORT_SYS, MCE_CMD_FLASHLED};
+static char GET_UNKNOWN2[] = {MCE_CMD_PORT_IR, MCE_CMD_UNKNOWN2};
+static char GET_CARRIER_FREQ[] = {MCE_CMD_PORT_IR, MCE_CMD_GETIRCFS};
+static char GET_RX_TIMEOUT[] = {MCE_CMD_PORT_IR, MCE_CMD_GETIRTIMEOUT};
+static char GET_NUM_PORTS[] = {MCE_CMD_PORT_IR, MCE_CMD_GETIRNUMPORTS};
+static char GET_TX_BITMASK[] = {MCE_CMD_PORT_IR, MCE_CMD_GETIRTXPORTS};
+static char GET_RX_SENSOR[] = {MCE_CMD_PORT_IR, MCE_CMD_GETIRRXPORTEN};
/* sub in desired values in lower byte or bytes for full command */
/* FIXME: make use of these for transmit.
-static char SET_CARRIER_FREQ[] = {MCE_COMMAND_HEADER,
- MCE_CMD_S_CARRIER, 0x00, 0x00};
-static char SET_TX_BITMASK[] = {MCE_COMMAND_HEADER, MCE_CMD_S_TXMASK, 0x00};
-static char SET_RX_TIMEOUT[] = {MCE_COMMAND_HEADER,
- MCE_CMD_S_TIMEOUT, 0x00, 0x00};
-static char SET_RX_SENSOR[] = {MCE_COMMAND_HEADER,
- MCE_CMD_S_RXSENSOR, 0x00};
+static char SET_CARRIER_FREQ[] = {MCE_CMD_PORT_IR,
+ MCE_CMD_SETIRCFS, 0x00, 0x00};
+static char SET_TX_BITMASK[] = {MCE_CMD_PORT_IR, MCE_CMD_SETIRTXPORTS, 0x00};
+static char SET_RX_TIMEOUT[] = {MCE_CMD_PORT_IR,
+ MCE_CMD_SETIRTIMEOUT, 0x00, 0x00};
+static char SET_RX_SENSOR[] = {MCE_CMD_PORT_IR,
+ MCE_RSP_EQIRRXPORTEN, 0x00};
*/
static int mceusb_cmdsize(u8 cmd, u8 subcmd)
@@ -437,27 +479,33 @@ static int mceusb_cmdsize(u8 cmd, u8 subcmd)
int datasize = 0;
switch (cmd) {
- case MCE_COMMAND_NULL:
- if (subcmd == MCE_HW_CMD_HEADER)
+ case MCE_CMD_NULL:
+ if (subcmd == MCE_CMD_PORT_SYS)
datasize = 1;
break;
- case MCE_HW_CMD_HEADER:
+ case MCE_CMD_PORT_SYS:
switch (subcmd) {
+ case MCE_RSP_EQWAKEVERSION:
+ datasize = 4;
+ break;
case MCE_CMD_G_REVISION:
datasize = 2;
break;
+ case MCE_RSP_EQWAKESUPPORT:
+ datasize = 1;
+ break;
}
- case MCE_COMMAND_HEADER:
+ case MCE_CMD_PORT_IR:
switch (subcmd) {
case MCE_CMD_UNKNOWN:
- case MCE_CMD_S_CARRIER:
- case MCE_CMD_S_TIMEOUT:
- case MCE_RSP_PULSE_COUNT:
+ case MCE_RSP_EQIRCFS:
+ case MCE_RSP_EQIRTIMEOUT:
+ case MCE_RSP_EQIRRXCFCNT:
datasize = 2;
break;
case MCE_CMD_SIG_END:
- case MCE_CMD_S_TXMASK:
- case MCE_CMD_S_RXSENSOR:
+ case MCE_RSP_EQIRTXPORTS:
+ case MCE_RSP_EQIRRXPORTEN:
datasize = 1;
break;
}
@@ -470,9 +518,10 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, char *buf,
{
char codes[USB_BUFLEN * 3 + 1];
char inout[9];
- u8 cmd, subcmd, data1, data2;
+ u8 cmd, subcmd, data1, data2, data3, data4, data5;
struct device *dev = ir->dev;
int i, start, skip = 0;
+ u32 carrier, period;
if (!debug)
return;
@@ -500,18 +549,28 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, char *buf,
subcmd = buf[start + 1] & 0xff;
data1 = buf[start + 2] & 0xff;
data2 = buf[start + 3] & 0xff;
+ data3 = buf[start + 4] & 0xff;
+ data4 = buf[start + 5] & 0xff;
+ data5 = buf[start + 6] & 0xff;
switch (cmd) {
- case MCE_COMMAND_NULL:
- if ((subcmd == MCE_HW_CMD_HEADER) &&
- (data1 == MCE_CMD_DEVICE_RESET))
- dev_info(dev, "Device reset requested\n");
+ case MCE_CMD_NULL:
+ if (subcmd == MCE_CMD_NULL)
+ break;
+ if ((subcmd == MCE_CMD_PORT_SYS) &&
+ (data1 == MCE_CMD_RESUME))
+ dev_info(dev, "Device resume requested\n");
else
dev_info(dev, "Unknown command 0x%02x 0x%02x\n",
cmd, subcmd);
break;
- case MCE_HW_CMD_HEADER:
+ case MCE_CMD_PORT_SYS:
switch (subcmd) {
+ case MCE_RSP_EQEMVER:
+ if (!out)
+ dev_info(dev, "Emulator interface version %x\n",
+ data1);
+ break;
case MCE_CMD_G_REVISION:
if (len == 2)
dev_info(dev, "Get hw/sw rev?\n");
@@ -520,21 +579,35 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, char *buf,
"0x%02x 0x%02x\n", data1, data2,
buf[start + 4], buf[start + 5]);
break;
- case MCE_CMD_DEVICE_RESET:
- dev_info(dev, "Device reset requested\n");
+ case MCE_CMD_RESUME:
+ dev_info(dev, "Device resume requested\n");
+ break;
+ case MCE_RSP_CMD_ILLEGAL:
+ dev_info(dev, "Illegal PORT_SYS command\n");
break;
- case MCE_RSP_CMD_INVALID:
- dev_info(dev, "Previous command not supported\n");
+ case MCE_RSP_EQWAKEVERSION:
+ if (!out)
+ dev_info(dev, "Wake version, proto: 0x%02x, "
+ "payload: 0x%02x, address: 0x%02x, "
+ "version: 0x%02x\n",
+ data1, data2, data3, data4);
+ break;
+ case MCE_RSP_GETPORTSTATUS:
+ if (!out)
+ /* We use data1 + 1 here, to match hw labels */
+ dev_info(dev, "TX port %d: blaster is%s connected\n",
+ data1 + 1, data4 ? " not" : "");
+ break;
+ case MCE_CMD_FLASHLED:
+ dev_info(dev, "Attempting to flash LED\n");
break;
- case MCE_CMD_UNKNOWN7:
- case MCE_CMD_UNKNOWN9:
default:
dev_info(dev, "Unknown command 0x%02x 0x%02x\n",
cmd, subcmd);
break;
}
break;
- case MCE_COMMAND_HEADER:
+ case MCE_CMD_PORT_IR:
switch (subcmd) {
case MCE_CMD_SIG_END:
dev_info(dev, "End of signal\n");
@@ -546,47 +619,55 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, char *buf,
dev_info(dev, "Resp to 9f 05 of 0x%02x 0x%02x\n",
data1, data2);
break;
- case MCE_CMD_S_CARRIER:
- dev_info(dev, "%s carrier mode and freq of "
- "0x%02x 0x%02x\n", inout, data1, data2);
+ case MCE_RSP_EQIRCFS:
+ period = DIV_ROUND_CLOSEST(
+ (1 << data1 * 2) * (data2 + 1), 10);
+ if (!period)
+ break;
+ carrier = (1000 * 1000) / period;
+ dev_info(dev, "%s carrier of %u Hz (period %uus)\n",
+ inout, carrier, period);
break;
- case MCE_CMD_G_CARRIER:
+ case MCE_CMD_GETIRCFS:
dev_info(dev, "Get carrier mode and freq\n");
break;
- case MCE_CMD_S_TXMASK:
+ case MCE_RSP_EQIRTXPORTS:
dev_info(dev, "%s transmit blaster mask of 0x%02x\n",
inout, data1);
break;
- case MCE_CMD_S_TIMEOUT:
+ case MCE_RSP_EQIRTIMEOUT:
/* value is in units of 50us, so x*50/1000 ms */
+ period = ((data1 << 8) | data2) * MCE_TIME_UNIT / 1000;
dev_info(dev, "%s receive timeout of %d ms\n",
- inout,
- ((data1 << 8) | data2) * MCE_TIME_UNIT / 1000);
+ inout, period);
break;
- case MCE_CMD_G_TIMEOUT:
+ case MCE_CMD_GETIRTIMEOUT:
dev_info(dev, "Get receive timeout\n");
break;
- case MCE_CMD_G_TXMASK:
+ case MCE_CMD_GETIRTXPORTS:
dev_info(dev, "Get transmit blaster mask\n");
break;
- case MCE_CMD_S_RXSENSOR:
+ case MCE_RSP_EQIRRXPORTEN:
dev_info(dev, "%s %s-range receive sensor in use\n",
inout, data1 == 0x02 ? "short" : "long");
break;
- case MCE_CMD_G_RXSENSOR:
- /* aka MCE_RSP_PULSE_COUNT */
+ case MCE_CMD_GETIRRXPORTEN:
+ /* aka MCE_RSP_EQIRRXCFCNT */
if (out)
dev_info(dev, "Get receive sensor\n");
else if (ir->learning_enabled)
dev_info(dev, "RX pulse count: %d\n",
((data1 << 8) | data2));
break;
- case MCE_RSP_CMD_INVALID:
- dev_info(dev, "Error! Hardware is likely wedged...\n");
+ case MCE_RSP_EQIRNUMPORTS:
+ if (out)
+ break;
+ dev_info(dev, "Num TX ports: %x, num RX ports: %x\n",
+ data1, data2);
+ break;
+ case MCE_RSP_CMD_ILLEGAL:
+ dev_info(dev, "Illegal PORT_IR command\n");
break;
- case MCE_CMD_UNKNOWN2:
- case MCE_CMD_UNKNOWN3:
- case MCE_CMD_UNKNOWN5:
default:
dev_info(dev, "Unknown command 0x%02x 0x%02x\n",
cmd, subcmd);
@@ -599,8 +680,8 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, char *buf,
if (cmd == MCE_IRDATA_TRAILER)
dev_info(dev, "End of raw IR data\n");
- else if ((cmd != MCE_COMMAND_HEADER) &&
- ((cmd & MCE_COMMAND_MASK) == MCE_COMMAND_IRDATA))
+ else if ((cmd != MCE_CMD_PORT_IR) &&
+ ((cmd & MCE_PORT_MASK) == MCE_COMMAND_IRDATA))
dev_info(dev, "Raw IR data, %d pulse/space samples\n", ir->rem);
}
@@ -616,9 +697,6 @@ static void mce_async_callback(struct urb *urb, struct pt_regs *regs)
if (ir) {
len = urb->actual_length;
- mce_dbg(ir->dev, "callback called (status=%d len=%d)\n",
- urb->status, len);
-
mceusb_dev_printdata(ir, urb->transfer_buffer, 0, len, true);
}
@@ -683,7 +761,16 @@ static void mce_request_packet(struct mceusb_dev *ir, unsigned char *data,
static void mce_async_out(struct mceusb_dev *ir, unsigned char *data, int size)
{
+ int rsize = sizeof(DEVICE_RESUME);
+
+ if (ir->need_reset) {
+ ir->need_reset = false;
+ mce_request_packet(ir, DEVICE_RESUME, rsize, MCEUSB_TX);
+ msleep(10);
+ }
+
mce_request_packet(ir, data, size, MCEUSB_TX);
+ msleep(10);
}
static void mce_flush_rx_buffer(struct mceusb_dev *ir, int size)
@@ -708,8 +795,8 @@ static int mceusb_tx_ir(struct rc_dev *dev, unsigned *txbuf, unsigned count)
return -ENOMEM;
/* MCE tx init header */
- cmdbuf[cmdcount++] = MCE_COMMAND_HEADER;
- cmdbuf[cmdcount++] = MCE_CMD_S_TXMASK;
+ cmdbuf[cmdcount++] = MCE_CMD_PORT_IR;
+ cmdbuf[cmdcount++] = MCE_CMD_SETIRTXPORTS;
cmdbuf[cmdcount++] = ir->tx_mask;
/* Generate mce packet data */
@@ -795,8 +882,8 @@ static int mceusb_set_tx_carrier(struct rc_dev *dev, u32 carrier)
struct mceusb_dev *ir = dev->priv;
int clk = 10000000;
int prescaler = 0, divisor = 0;
- unsigned char cmdbuf[4] = { MCE_COMMAND_HEADER,
- MCE_CMD_S_CARRIER, 0x00, 0x00 };
+ unsigned char cmdbuf[4] = { MCE_CMD_PORT_IR,
+ MCE_CMD_SETIRCFS, 0x00, 0x00 };
/* Carrier has changed */
if (ir->carrier != carrier) {
@@ -844,17 +931,34 @@ static void mceusb_handle_command(struct mceusb_dev *ir, int index)
u8 lo = ir->buf_in[index + 2] & 0xff;
switch (ir->buf_in[index]) {
+ /* the one and only 5-byte return value command */
+ case MCE_RSP_GETPORTSTATUS:
+ if ((ir->buf_in[index + 4] & 0xff) == 0x00)
+ ir->txports_cabled |= 1 << hi;
+ break;
+
/* 2-byte return value commands */
- case MCE_CMD_S_TIMEOUT:
+ case MCE_RSP_EQIRTIMEOUT:
ir->rc->timeout = US_TO_NS((hi << 8 | lo) * MCE_TIME_UNIT);
break;
+ case MCE_RSP_EQIRNUMPORTS:
+ ir->num_txports = hi;
+ ir->num_rxports = lo;
+ break;
/* 1-byte return value commands */
- case MCE_CMD_S_TXMASK:
+ case MCE_RSP_EQEMVER:
+ ir->emver = hi;
+ break;
+ case MCE_RSP_EQIRTXPORTS:
ir->tx_mask = hi;
break;
- case MCE_CMD_S_RXSENSOR:
- ir->learning_enabled = (hi == 0x02);
+ case MCE_RSP_EQIRRXPORTEN:
+ ir->learning_enabled = ((hi & 0x02) == 0x02);
+ ir->rxports_active = hi;
+ break;
+ case MCE_RSP_CMD_ILLEGAL:
+ ir->need_reset = true;
break;
default:
break;
@@ -903,8 +1007,8 @@ static void mceusb_process_ir_data(struct mceusb_dev *ir, int buf_len)
/* decode mce packets of the form (84),AA,BB,CC,DD */
/* IR data packets can span USB messages - rem */
ir->cmd = ir->buf_in[i];
- if ((ir->cmd == MCE_COMMAND_HEADER) ||
- ((ir->cmd & MCE_COMMAND_MASK) !=
+ if ((ir->cmd == MCE_CMD_PORT_IR) ||
+ ((ir->cmd & MCE_PORT_MASK) !=
MCE_COMMAND_IRDATA)) {
ir->parser_state = SUBCMD;
continue;
@@ -969,6 +1073,13 @@ static void mceusb_dev_recv(struct urb *urb, struct pt_regs *regs)
usb_submit_urb(urb, GFP_ATOMIC);
}
+static void mceusb_get_emulator_version(struct mceusb_dev *ir)
+{
+ /* If we get no reply or an illegal command reply, its ver 1, says MS */
+ ir->emver = 1;
+ mce_async_out(ir, GET_EMVER, sizeof(GET_EMVER));
+}
+
static void mceusb_gen1_init(struct mceusb_dev *ir)
{
int ret;
@@ -1011,8 +1122,8 @@ static void mceusb_gen1_init(struct mceusb_dev *ir)
0x0000, 0x0100, NULL, 0, HZ * 3);
mce_dbg(dev, "%s - retC = %d\n", __func__, ret);
- /* device reset */
- mce_async_out(ir, DEVICE_RESET, sizeof(DEVICE_RESET));
+ /* device resume */
+ mce_async_out(ir, DEVICE_RESUME, sizeof(DEVICE_RESUME));
/* get hw/sw revision? */
mce_async_out(ir, GET_REVISION, sizeof(GET_REVISION));
@@ -1022,23 +1133,36 @@ static void mceusb_gen1_init(struct mceusb_dev *ir)
static void mceusb_gen2_init(struct mceusb_dev *ir)
{
- /* device reset */
- mce_async_out(ir, DEVICE_RESET, sizeof(DEVICE_RESET));
+ /* device resume */
+ mce_async_out(ir, DEVICE_RESUME, sizeof(DEVICE_RESUME));
/* get hw/sw revision? */
mce_async_out(ir, GET_REVISION, sizeof(GET_REVISION));
- /* unknown what the next two actually return... */
- mce_async_out(ir, GET_UNKNOWN, sizeof(GET_UNKNOWN));
+ /* get wake version (protocol, key, address) */
+ mce_async_out(ir, GET_WAKEVERSION, sizeof(GET_WAKEVERSION));
+
+ /* unknown what this one actually returns... */
mce_async_out(ir, GET_UNKNOWN2, sizeof(GET_UNKNOWN2));
}
static void mceusb_get_parameters(struct mceusb_dev *ir)
{
+ int i;
+ unsigned char cmdbuf[3] = { MCE_CMD_PORT_SYS,
+ MCE_CMD_GETPORTSTATUS, 0x00 };
+
+ /* defaults, if the hardware doesn't support querying */
+ ir->num_txports = 2;
+ ir->num_rxports = 2;
+
+ /* get number of tx and rx ports */
+ mce_async_out(ir, GET_NUM_PORTS, sizeof(GET_NUM_PORTS));
+
/* get the carrier and frequency */
mce_async_out(ir, GET_CARRIER_FREQ, sizeof(GET_CARRIER_FREQ));
- if (!ir->flags.no_tx)
+ if (ir->num_txports && !ir->flags.no_tx)
/* get the transmitter bitmask */
mce_async_out(ir, GET_TX_BITMASK, sizeof(GET_TX_BITMASK));
@@ -1047,6 +1171,19 @@ static void mceusb_get_parameters(struct mceusb_dev *ir)
/* get receiver sensor setting */
mce_async_out(ir, GET_RX_SENSOR, sizeof(GET_RX_SENSOR));
+
+ for (i = 0; i < ir->num_txports; i++) {
+ cmdbuf[2] = i;
+ mce_async_out(ir, cmdbuf, sizeof(cmdbuf));
+ }
+}
+
+static void mceusb_flash_led(struct mceusb_dev *ir)
+{
+ if (ir->emver < 2)
+ return;
+
+ mce_async_out(ir, FLASH_LED, sizeof(FLASH_LED));
}
static struct rc_dev *mceusb_init_rc_dev(struct mceusb_dev *ir)
@@ -1220,6 +1357,9 @@ static int __devinit mceusb_dev_probe(struct usb_interface *intf,
mce_dbg(&intf->dev, "Flushing receive buffers\n");
mce_flush_rx_buffer(ir, maxp);
+ /* figure out which firmware/emulator version this hardware has */
+ mceusb_get_emulator_version(ir);
+
/* initialize device */
if (ir->flags.microsoft_gen1)
mceusb_gen1_init(ir);
@@ -1228,13 +1368,23 @@ static int __devinit mceusb_dev_probe(struct usb_interface *intf,
mceusb_get_parameters(ir);
+ mceusb_flash_led(ir);
+
if (!ir->flags.no_tx)
mceusb_set_tx_mask(ir->rc, MCE_DEFAULT_TX_MASK);
usb_set_intfdata(intf, ir);
- dev_info(&intf->dev, "Registered %s on usb%d:%d\n", name,
- dev->bus->busnum, dev->devnum);
+ /* enable wake via this device */
+ device_set_wakeup_capable(ir->dev, true);
+ device_set_wakeup_enable(ir->dev, true);
+
+ dev_info(&intf->dev, "Registered %s with mce emulator interface "
+ "version %x\n", name, ir->emver);
+ dev_info(&intf->dev, "%x tx ports (0x%x cabled) and "
+ "%x rx sensors (0x%x active)\n",
+ ir->num_txports, ir->txports_cabled,
+ ir->num_rxports, ir->rxports_active);
return 0;
diff --git a/drivers/media/rc/rc-core-priv.h b/drivers/media/rc/rc-core-priv.h
index 04c2c722b6e..c6ca870e8b7 100644
--- a/drivers/media/rc/rc-core-priv.h
+++ b/drivers/media/rc/rc-core-priv.h
@@ -162,49 +162,49 @@ void ir_raw_init(void);
#ifdef CONFIG_IR_NEC_DECODER_MODULE
#define load_nec_decode() request_module("ir-nec-decoder")
#else
-#define load_nec_decode() 0
+static inline void load_nec_decode(void) { }
#endif
/* from ir-rc5-decoder.c */
#ifdef CONFIG_IR_RC5_DECODER_MODULE
#define load_rc5_decode() request_module("ir-rc5-decoder")
#else
-#define load_rc5_decode() 0
+static inline void load_rc5_decode(void) { }
#endif
/* from ir-rc6-decoder.c */
#ifdef CONFIG_IR_RC6_DECODER_MODULE
#define load_rc6_decode() request_module("ir-rc6-decoder")
#else
-#define load_rc6_decode() 0
+static inline void load_rc6_decode(void) { }
#endif
/* from ir-jvc-decoder.c */
#ifdef CONFIG_IR_JVC_DECODER_MODULE
#define load_jvc_decode() request_module("ir-jvc-decoder")
#else
-#define load_jvc_decode() 0
+static inline void load_jvc_decode(void) { }
#endif
/* from ir-sony-decoder.c */
#ifdef CONFIG_IR_SONY_DECODER_MODULE
#define load_sony_decode() request_module("ir-sony-decoder")
#else
-#define load_sony_decode() 0
+static inline void load_sony_decode(void) { }
#endif
/* from ir-mce_kbd-decoder.c */
#ifdef CONFIG_IR_MCE_KBD_DECODER_MODULE
#define load_mce_kbd_decode() request_module("ir-mce_kbd-decoder")
#else
-#define load_mce_kbd_decode() 0
+static inline void load_mce_kbd_decode(void) { }
#endif
/* from ir-lirc-codec.c */
#ifdef CONFIG_IR_LIRC_CODEC_MODULE
#define load_lirc_codec() request_module("ir-lirc-codec")
#else
-#define load_lirc_codec() 0
+static inline void load_lirc_codec(void) { }
#endif
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index 51a23f48bc7..29f900065d8 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -18,6 +18,7 @@
#include <linux/input.h>
#include <linux/slab.h>
#include <linux/device.h>
+#include <linux/module.h>
#include "rc-core-priv.h"
/* Sizes are in bytes, 256 bytes allows for 32 entries on x64 */
@@ -928,10 +929,6 @@ out:
static void rc_dev_release(struct device *device)
{
- struct rc_dev *dev = to_rc_dev(device);
-
- kfree(dev);
- module_put(THIS_MODULE);
}
#define ADD_HOTPLUG_VAR(fmt, val...) \
@@ -945,6 +942,9 @@ static int rc_dev_uevent(struct device *device, struct kobj_uevent_env *env)
{
struct rc_dev *dev = to_rc_dev(device);
+ if (!dev || !dev->input_dev)
+ return -ENODEV;
+
if (dev->rc_map.name)
ADD_HOTPLUG_VAR("NAME=%s", dev->rc_map.name);
if (dev->driver_name)
@@ -1013,10 +1013,16 @@ EXPORT_SYMBOL_GPL(rc_allocate_device);
void rc_free_device(struct rc_dev *dev)
{
- if (dev) {
+ if (!dev)
+ return;
+
+ if (dev->input_dev)
input_free_device(dev->input_dev);
- put_device(&dev->dev);
- }
+
+ put_device(&dev->dev);
+
+ kfree(dev);
+ module_put(THIS_MODULE);
}
EXPORT_SYMBOL_GPL(rc_free_device);
@@ -1143,14 +1149,18 @@ void rc_unregister_device(struct rc_dev *dev)
if (dev->driver_type == RC_DRIVER_IR_RAW)
ir_raw_event_unregister(dev);
+ /* Freeing the table should also call the stop callback */
+ ir_free_table(&dev->rc_map);
+ IR_dprintk(1, "Freed keycode table\n");
+
input_unregister_device(dev->input_dev);
dev->input_dev = NULL;
- ir_free_table(&dev->rc_map);
- IR_dprintk(1, "Freed keycode table\n");
+ device_del(&dev->dev);
- device_unregister(&dev->dev);
+ rc_free_device(dev);
}
+
EXPORT_SYMBOL_GPL(rc_unregister_device);
/*
diff --git a/drivers/media/rc/redrat3.c b/drivers/media/rc/redrat3.c
index a1660447791..61287fcca61 100644
--- a/drivers/media/rc/redrat3.c
+++ b/drivers/media/rc/redrat3.c
@@ -195,11 +195,6 @@ struct redrat3_dev {
dma_addr_t dma_in;
dma_addr_t dma_out;
- /* true if write urb is busy */
- bool write_busy;
- /* wait for the write to finish */
- struct completion write_finished;
-
/* locks this structure */
struct mutex lock;
@@ -207,8 +202,6 @@ struct redrat3_dev {
struct timer_list rx_timeout;
u32 hw_timeout;
- /* Is the device currently receiving? */
- bool recv_in_progress;
/* is the detector enabled*/
bool det_enabled;
/* Is the device currently transmitting?*/
diff --git a/drivers/media/rc/winbond-cir.c b/drivers/media/rc/winbond-cir.c
index bec8abc965f..13f54b51194 100644
--- a/drivers/media/rc/winbond-cir.c
+++ b/drivers/media/rc/winbond-cir.c
@@ -41,6 +41,8 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/pnp.h>
#include <linux/interrupt.h>
@@ -1155,12 +1157,12 @@ wbcir_init(void)
case IR_PROTOCOL_RC6:
break;
default:
- printk(KERN_ERR DRVNAME ": Invalid power-on protocol\n");
+ pr_err("Invalid power-on protocol\n");
}
ret = pnp_register_driver(&wbcir_driver);
if (ret)
- printk(KERN_ERR DRVNAME ": Unable to register driver\n");
+ pr_err("Unable to register driver\n");
return ret;
}
diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig
index f574dc012ca..b303a3f8a9f 100644
--- a/drivers/media/video/Kconfig
+++ b/drivers/media/video/Kconfig
@@ -467,6 +467,20 @@ config VIDEO_OV7670
OV7670 VGA camera. It currently only works with the M88ALP01
controller.
+config VIDEO_MT9P031
+ tristate "Aptina MT9P031 support"
+ depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ ---help---
+ This is a Video4Linux2 sensor-level driver for the Aptina
+ (Micron) mt9p031 5 Mpixel camera.
+
+config VIDEO_MT9T001
+ tristate "Aptina MT9T001 support"
+ depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ ---help---
+ This is a Video4Linux2 sensor-level driver for the Aptina
+ (Micron) mt0t001 3 Mpixel camera.
+
config VIDEO_MT9V011
tristate "Micron mt9v011 sensor support"
depends on I2C && VIDEO_V4L2
@@ -489,6 +503,27 @@ config VIDEO_TCM825X
This is a driver for the Toshiba TCM825x VGA camera sensor.
It is used for example in Nokia N800.
+config VIDEO_SR030PC30
+ tristate "Siliconfile SR030PC30 sensor support"
+ depends on I2C && VIDEO_V4L2
+ ---help---
+ This driver supports SR030PC30 VGA camera from Siliconfile
+
+config VIDEO_NOON010PC30
+ tristate "Siliconfile NOON010PC30 sensor support"
+ depends on I2C && VIDEO_V4L2 && EXPERIMENTAL && VIDEO_V4L2_SUBDEV_API
+ ---help---
+ This driver supports NOON010PC30 CIF camera from Siliconfile
+
+source "drivers/media/video/m5mols/Kconfig"
+
+config VIDEO_S5K6AA
+ tristate "Samsung S5K6AAFX sensor support"
+ depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ ---help---
+ This is a V4L2 sensor-level driver for Samsung S5K6AA(FX) 1.3M
+ camera sensor with an embedded SoC image signal processor.
+
comment "Flash devices"
config VIDEO_ADP1653
@@ -708,6 +743,8 @@ source "drivers/media/video/cx88/Kconfig"
source "drivers/media/video/cx23885/Kconfig"
+source "drivers/media/video/cx25821/Kconfig"
+
source "drivers/media/video/au0828/Kconfig"
source "drivers/media/video/ivtv/Kconfig"
@@ -737,12 +774,6 @@ config VIDEO_M32R_AR_M64278
To compile this driver as a module, choose M here: the
module will be called arv.
-config VIDEO_SR030PC30
- tristate "SR030PC30 VGA camera sensor support"
- depends on I2C && VIDEO_V4L2
- ---help---
- This driver supports SR030PC30 VGA camera from Siliconfile
-
config VIDEO_VIA_CAMERA
tristate "VIAFB camera controller support"
depends on FB_VIA
@@ -753,18 +784,9 @@ config VIDEO_VIA_CAMERA
Chrome9 chipsets. Currently only tested on OLPC xo-1.5 systems
with ov7670 sensors.
-config VIDEO_NOON010PC30
- tristate "NOON010PC30 CIF camera sensor support"
- depends on I2C && VIDEO_V4L2
- ---help---
- This driver supports NOON010PC30 CIF camera from Siliconfile
-
-source "drivers/media/video/m5mols/Kconfig"
-
config VIDEO_OMAP3
tristate "OMAP 3 Camera support (EXPERIMENTAL)"
- select OMAP_IOMMU
- depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API && ARCH_OMAP3 && EXPERIMENTAL
+ depends on OMAP_IOVMM && VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API && ARCH_OMAP3 && EXPERIMENTAL
---help---
Driver for an OMAP 3 camera controller.
@@ -949,8 +971,9 @@ config VIDEO_MX2
Interface
config VIDEO_SAMSUNG_S5P_FIMC
- tristate "Samsung S5P and EXYNOS4 camera host interface driver"
- depends on VIDEO_DEV && VIDEO_V4L2 && PLAT_S5P
+ tristate "Samsung S5P and EXYNOS4 camera interface driver (EXPERIMENTAL)"
+ depends on VIDEO_V4L2 && I2C && PLAT_S5P && PM_RUNTIME && \
+ VIDEO_V4L2_SUBDEV_API && EXPERIMENTAL
select VIDEOBUF2_DMA_CONTIG
select V4L2_MEM2MEM_DEV
---help---
@@ -1004,6 +1027,8 @@ source "drivers/media/video/tlg2300/Kconfig"
source "drivers/media/video/cx231xx/Kconfig"
+source "drivers/media/video/tm6000/Kconfig"
+
source "drivers/media/video/usbvision/Kconfig"
source "drivers/media/video/et61x251/Kconfig"
diff --git a/drivers/media/video/Makefile b/drivers/media/video/Makefile
index 272390072ae..117f9c4b4cb 100644
--- a/drivers/media/video/Makefile
+++ b/drivers/media/video/Makefile
@@ -65,11 +65,14 @@ obj-$(CONFIG_VIDEO_UPD64083) += upd64083.o
obj-$(CONFIG_VIDEO_OV7670) += ov7670.o
obj-$(CONFIG_VIDEO_TCM825X) += tcm825x.o
obj-$(CONFIG_VIDEO_TVEEPROM) += tveeprom.o
+obj-$(CONFIG_VIDEO_MT9P031) += mt9p031.o
+obj-$(CONFIG_VIDEO_MT9T001) += mt9t001.o
obj-$(CONFIG_VIDEO_MT9V011) += mt9v011.o
obj-$(CONFIG_VIDEO_MT9V032) += mt9v032.o
obj-$(CONFIG_VIDEO_SR030PC30) += sr030pc30.o
obj-$(CONFIG_VIDEO_NOON010PC30) += noon010pc30.o
obj-$(CONFIG_VIDEO_M5MOLS) += m5mols/
+obj-$(CONFIG_VIDEO_S5K6AA) += s5k6aa.o
obj-$(CONFIG_VIDEO_ADP1653) += adp1653.o
obj-$(CONFIG_SOC_CAMERA_IMX074) += imx074.o
@@ -102,9 +105,11 @@ obj-$(CONFIG_VIDEO_CX88) += cx88/
obj-$(CONFIG_VIDEO_EM28XX) += em28xx/
obj-$(CONFIG_VIDEO_TLG2300) += tlg2300/
obj-$(CONFIG_VIDEO_CX231XX) += cx231xx/
+obj-$(CONFIG_VIDEO_CX25821) += cx25821/
obj-$(CONFIG_VIDEO_USBVISION) += usbvision/
obj-$(CONFIG_VIDEO_PVRUSB2) += pvrusb2/
obj-$(CONFIG_VIDEO_CPIA2) += cpia2/
+obj-$(CONFIG_VIDEO_TM6000) += tm6000/
obj-$(CONFIG_VIDEO_MXB) += mxb.o
obj-$(CONFIG_VIDEO_HEXIUM_ORION) += hexium_orion.o
obj-$(CONFIG_VIDEO_HEXIUM_GEMINI) += hexium_gemini.o
@@ -190,6 +195,6 @@ obj-y += davinci/
obj-$(CONFIG_ARCH_OMAP) += omap/
-EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core
-EXTRA_CFLAGS += -Idrivers/media/dvb/frontends
-EXTRA_CFLAGS += -Idrivers/media/common/tuners
+ccflags-y += -Idrivers/media/dvb/dvb-core
+ccflags-y += -Idrivers/media/dvb/frontends
+ccflags-y += -Idrivers/media/common/tuners
diff --git a/drivers/media/video/adp1653.c b/drivers/media/video/adp1653.c
index be7befd6094..12eedf4d515 100644
--- a/drivers/media/video/adp1653.c
+++ b/drivers/media/video/adp1653.c
@@ -31,7 +31,9 @@
*/
#include <linux/delay.h>
+#include <linux/module.h>
#include <linux/i2c.h>
+#include <linux/module.h>
#include <linux/slab.h>
#include <linux/version.h>
#include <media/adp1653.h>
@@ -258,7 +260,7 @@ static int adp1653_init_controls(struct adp1653_flash *flash)
if (flash->ctrls.error)
return flash->ctrls.error;
- fault->is_volatile = 1;
+ fault->flags |= V4L2_CTRL_FLAG_VOLATILE;
flash->subdev.ctrl_handler = &flash->ctrls;
return 0;
@@ -413,6 +415,10 @@ static int adp1653_probe(struct i2c_client *client,
struct adp1653_flash *flash;
int ret;
+ /* we couldn't work without platform data */
+ if (client->dev.platform_data == NULL)
+ return -ENODEV;
+
flash = kzalloc(sizeof(*flash), GFP_KERNEL);
if (flash == NULL)
return -ENOMEM;
@@ -425,12 +431,21 @@ static int adp1653_probe(struct i2c_client *client,
flash->subdev.internal_ops = &adp1653_internal_ops;
flash->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
- adp1653_init_controls(flash);
+ ret = adp1653_init_controls(flash);
+ if (ret)
+ goto free_and_quit;
ret = media_entity_init(&flash->subdev.entity, 0, NULL, 0);
if (ret < 0)
- kfree(flash);
+ goto free_and_quit;
+
+ flash->subdev.entity.type = MEDIA_ENT_T_V4L2_SUBDEV_FLASH;
+
+ return 0;
+free_and_quit:
+ v4l2_ctrl_handler_free(&flash->ctrls);
+ kfree(flash);
return ret;
}
diff --git a/drivers/media/video/adv7175.c b/drivers/media/video/adv7175.c
index d2327dbb473..206078eca85 100644
--- a/drivers/media/video/adv7175.c
+++ b/drivers/media/video/adv7175.c
@@ -61,6 +61,11 @@ static inline struct adv7175 *to_adv7175(struct v4l2_subdev *sd)
static char *inputs[] = { "pass_through", "play_back", "color_bar" };
+static enum v4l2_mbus_pixelcode adv7175_codes[] = {
+ V4L2_MBUS_FMT_UYVY8_2X8,
+ V4L2_MBUS_FMT_UYVY8_1X16,
+};
+
/* ----------------------------------------------------------------------- */
static inline int adv7175_write(struct v4l2_subdev *sd, u8 reg, u8 value)
@@ -296,6 +301,60 @@ static int adv7175_s_routing(struct v4l2_subdev *sd,
return 0;
}
+static int adv7175_enum_fmt(struct v4l2_subdev *sd, unsigned int index,
+ enum v4l2_mbus_pixelcode *code)
+{
+ if (index >= ARRAY_SIZE(adv7175_codes))
+ return -EINVAL;
+
+ *code = adv7175_codes[index];
+ return 0;
+}
+
+static int adv7175_g_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
+{
+ u8 val = adv7175_read(sd, 0x7);
+
+ if ((val & 0x40) == (1 << 6))
+ mf->code = V4L2_MBUS_FMT_UYVY8_1X16;
+ else
+ mf->code = V4L2_MBUS_FMT_UYVY8_2X8;
+
+ mf->colorspace = V4L2_COLORSPACE_SMPTE170M;
+ mf->width = 0;
+ mf->height = 0;
+ mf->field = V4L2_FIELD_ANY;
+
+ return 0;
+}
+
+static int adv7175_s_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
+{
+ u8 val = adv7175_read(sd, 0x7);
+ int ret;
+
+ switch (mf->code) {
+ case V4L2_MBUS_FMT_UYVY8_2X8:
+ val &= ~0x40;
+ break;
+
+ case V4L2_MBUS_FMT_UYVY8_1X16:
+ val |= 0x40;
+ break;
+
+ default:
+ v4l2_dbg(1, debug, sd,
+ "illegal v4l2_mbus_framefmt code: %d\n", mf->code);
+ return -EINVAL;
+ }
+
+ ret = adv7175_write(sd, 0x7, val);
+
+ return ret;
+}
+
static int adv7175_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ident *chip)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
@@ -324,6 +383,9 @@ static const struct v4l2_subdev_core_ops adv7175_core_ops = {
static const struct v4l2_subdev_video_ops adv7175_video_ops = {
.s_std_output = adv7175_s_std_output,
.s_routing = adv7175_s_routing,
+ .s_mbus_fmt = adv7175_s_fmt,
+ .g_mbus_fmt = adv7175_g_fmt,
+ .enum_mbus_fmt = adv7175_enum_fmt,
};
static const struct v4l2_subdev_ops adv7175_ops = {
diff --git a/drivers/media/video/ak881x.c b/drivers/media/video/ak881x.c
index b388654d48c..53c496c00fb 100644
--- a/drivers/media/video/ak881x.c
+++ b/drivers/media/video/ak881x.c
@@ -13,6 +13,7 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/videodev2.h>
+#include <linux/module.h>
#include <media/ak881x.h>
#include <media/v4l2-chip-ident.h>
diff --git a/drivers/media/video/atmel-isi.c b/drivers/media/video/atmel-isi.c
index 7b89f00501b..8c775c59e12 100644
--- a/drivers/media/video/atmel-isi.c
+++ b/drivers/media/video/atmel-isi.c
@@ -94,6 +94,7 @@ struct atmel_isi {
unsigned int irq;
struct isi_platform_data *pdata;
+ u16 width_flags; /* max 12 bits */
struct list_head video_buffer_list;
struct frame_buffer *active;
@@ -248,9 +249,9 @@ static int atmel_isi_wait_status(struct atmel_isi *isi, int wait_reset)
/* ------------------------------------------------------------------
Videobuf operations
------------------------------------------------------------------*/
-static int queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
- unsigned int *nplanes, unsigned long sizes[],
- void *alloc_ctxs[])
+static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], void *alloc_ctxs[])
{
struct soc_camera_device *icd = soc_camera_from_vb2q(vq);
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
@@ -341,7 +342,7 @@ static int buffer_prepare(struct vb2_buffer *vb)
/* Initialize the dma descriptor */
desc->p_fbd->fb_address =
- vb2_dma_contig_plane_paddr(vb, 0);
+ vb2_dma_contig_plane_dma_addr(vb, 0);
desc->p_fbd->next_fbd_address = 0;
set_dma_ctrl(desc->p_fbd, ISI_DMA_CTRL_WB);
@@ -404,12 +405,13 @@ static void buffer_queue(struct vb2_buffer *vb)
if (isi->active == NULL) {
isi->active = buf;
- start_dma(isi, buf);
+ if (vb2_is_streaming(vb->vb2_queue))
+ start_dma(isi, buf);
}
spin_unlock_irqrestore(&isi->lock, flags);
}
-static int start_streaming(struct vb2_queue *vq)
+static int start_streaming(struct vb2_queue *vq, unsigned int count)
{
struct soc_camera_device *icd = soc_camera_from_vb2q(vq);
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
@@ -431,17 +433,26 @@ static int start_streaming(struct vb2_queue *vq)
ret = wait_event_interruptible(isi->vsync_wq,
isi->state != ISI_STATE_IDLE);
if (ret)
- return ret;
+ goto err;
- if (isi->state != ISI_STATE_READY)
- return -EIO;
+ if (isi->state != ISI_STATE_READY) {
+ ret = -EIO;
+ goto err;
+ }
spin_lock_irq(&isi->lock);
isi->state = ISI_STATE_WAIT_SOF;
isi_writel(isi, ISI_INTDIS, ISI_SR_VSYNC);
+ if (count)
+ start_dma(isi, isi->active);
spin_unlock_irq(&isi->lock);
return 0;
+err:
+ isi->active = NULL;
+ isi->sequence = 0;
+ INIT_LIST_HEAD(&isi->video_buffer_list);
+ return ret;
}
/* abort streaming and wait for last buffer */
@@ -637,50 +648,42 @@ static bool isi_camera_packing_supported(const struct soc_mbus_pixelfmt *fmt)
fmt->packing == SOC_MBUS_PACKING_EXTEND16);
}
-static unsigned long make_bus_param(struct atmel_isi *isi)
-{
- unsigned long flags;
- /*
- * Platform specified synchronization and pixel clock polarities are
- * only a recommendation and are only used during probing. Atmel ISI
- * camera interface only works in master mode, i.e., uses HSYNC and
- * VSYNC signals from the sensor
- */
- flags = SOCAM_MASTER |
- SOCAM_HSYNC_ACTIVE_HIGH |
- SOCAM_HSYNC_ACTIVE_LOW |
- SOCAM_VSYNC_ACTIVE_HIGH |
- SOCAM_VSYNC_ACTIVE_LOW |
- SOCAM_PCLK_SAMPLE_RISING |
- SOCAM_PCLK_SAMPLE_FALLING |
- SOCAM_DATA_ACTIVE_HIGH;
-
- if (isi->pdata->data_width_flags & ISI_DATAWIDTH_10)
- flags |= SOCAM_DATAWIDTH_10;
-
- if (isi->pdata->data_width_flags & ISI_DATAWIDTH_8)
- flags |= SOCAM_DATAWIDTH_8;
-
- if (flags & SOCAM_DATAWIDTH_MASK)
- return flags;
-
- return 0;
-}
+#define ISI_BUS_PARAM (V4L2_MBUS_MASTER | \
+ V4L2_MBUS_HSYNC_ACTIVE_HIGH | \
+ V4L2_MBUS_HSYNC_ACTIVE_LOW | \
+ V4L2_MBUS_VSYNC_ACTIVE_HIGH | \
+ V4L2_MBUS_VSYNC_ACTIVE_LOW | \
+ V4L2_MBUS_PCLK_SAMPLE_RISING | \
+ V4L2_MBUS_PCLK_SAMPLE_FALLING | \
+ V4L2_MBUS_DATA_ACTIVE_HIGH)
static int isi_camera_try_bus_param(struct soc_camera_device *icd,
unsigned char buswidth)
{
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct atmel_isi *isi = ici->priv;
- unsigned long camera_flags;
+ struct v4l2_mbus_config cfg = {.type = V4L2_MBUS_PARALLEL,};
+ unsigned long common_flags;
int ret;
- camera_flags = icd->ops->query_bus_param(icd);
- ret = soc_camera_bus_param_compatible(camera_flags,
- make_bus_param(isi));
- if (!ret)
- return -EINVAL;
- return 0;
+ ret = v4l2_subdev_call(sd, video, g_mbus_config, &cfg);
+ if (!ret) {
+ common_flags = soc_mbus_config_compatible(&cfg,
+ ISI_BUS_PARAM);
+ if (!common_flags) {
+ dev_warn(icd->parent,
+ "Flags incompatible: camera 0x%x, host 0x%x\n",
+ cfg.flags, ISI_BUS_PARAM);
+ return -EINVAL;
+ }
+ } else if (ret != -ENOIOCTLCMD) {
+ return ret;
+ }
+
+ if ((1 << (buswidth - 1)) & isi->width_flags)
+ return 0;
+ return -EINVAL;
}
@@ -802,59 +805,71 @@ static int isi_camera_querycap(struct soc_camera_host *ici,
static int isi_camera_set_bus_param(struct soc_camera_device *icd, u32 pixfmt)
{
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct atmel_isi *isi = ici->priv;
- unsigned long bus_flags, camera_flags, common_flags;
+ struct v4l2_mbus_config cfg = {.type = V4L2_MBUS_PARALLEL,};
+ unsigned long common_flags;
int ret;
u32 cfg1 = 0;
- camera_flags = icd->ops->query_bus_param(icd);
-
- bus_flags = make_bus_param(isi);
- common_flags = soc_camera_bus_param_compatible(camera_flags, bus_flags);
- dev_dbg(icd->parent, "Flags cam: 0x%lx host: 0x%lx common: 0x%lx\n",
- camera_flags, bus_flags, common_flags);
- if (!common_flags)
- return -EINVAL;
+ ret = v4l2_subdev_call(sd, video, g_mbus_config, &cfg);
+ if (!ret) {
+ common_flags = soc_mbus_config_compatible(&cfg,
+ ISI_BUS_PARAM);
+ if (!common_flags) {
+ dev_warn(icd->parent,
+ "Flags incompatible: camera 0x%x, host 0x%x\n",
+ cfg.flags, ISI_BUS_PARAM);
+ return -EINVAL;
+ }
+ } else if (ret != -ENOIOCTLCMD) {
+ return ret;
+ } else {
+ common_flags = ISI_BUS_PARAM;
+ }
+ dev_dbg(icd->parent, "Flags cam: 0x%x host: 0x%x common: 0x%lx\n",
+ cfg.flags, ISI_BUS_PARAM, common_flags);
/* Make choises, based on platform preferences */
- if ((common_flags & SOCAM_HSYNC_ACTIVE_HIGH) &&
- (common_flags & SOCAM_HSYNC_ACTIVE_LOW)) {
+ if ((common_flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH) &&
+ (common_flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)) {
if (isi->pdata->hsync_act_low)
- common_flags &= ~SOCAM_HSYNC_ACTIVE_HIGH;
+ common_flags &= ~V4L2_MBUS_HSYNC_ACTIVE_HIGH;
else
- common_flags &= ~SOCAM_HSYNC_ACTIVE_LOW;
+ common_flags &= ~V4L2_MBUS_HSYNC_ACTIVE_LOW;
}
- if ((common_flags & SOCAM_VSYNC_ACTIVE_HIGH) &&
- (common_flags & SOCAM_VSYNC_ACTIVE_LOW)) {
+ if ((common_flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH) &&
+ (common_flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)) {
if (isi->pdata->vsync_act_low)
- common_flags &= ~SOCAM_VSYNC_ACTIVE_HIGH;
+ common_flags &= ~V4L2_MBUS_VSYNC_ACTIVE_HIGH;
else
- common_flags &= ~SOCAM_VSYNC_ACTIVE_LOW;
+ common_flags &= ~V4L2_MBUS_VSYNC_ACTIVE_LOW;
}
- if ((common_flags & SOCAM_PCLK_SAMPLE_RISING) &&
- (common_flags & SOCAM_PCLK_SAMPLE_FALLING)) {
+ if ((common_flags & V4L2_MBUS_PCLK_SAMPLE_RISING) &&
+ (common_flags & V4L2_MBUS_PCLK_SAMPLE_FALLING)) {
if (isi->pdata->pclk_act_falling)
- common_flags &= ~SOCAM_PCLK_SAMPLE_RISING;
+ common_flags &= ~V4L2_MBUS_PCLK_SAMPLE_RISING;
else
- common_flags &= ~SOCAM_PCLK_SAMPLE_FALLING;
+ common_flags &= ~V4L2_MBUS_PCLK_SAMPLE_FALLING;
}
- ret = icd->ops->set_bus_param(icd, common_flags);
- if (ret < 0) {
- dev_dbg(icd->parent, "Camera set_bus_param(%lx) returned %d\n",
+ cfg.flags = common_flags;
+ ret = v4l2_subdev_call(sd, video, s_mbus_config, &cfg);
+ if (ret < 0 && ret != -ENOIOCTLCMD) {
+ dev_dbg(icd->parent, "camera s_mbus_config(0x%lx) returned %d\n",
common_flags, ret);
return ret;
}
/* set bus param for ISI */
- if (common_flags & SOCAM_HSYNC_ACTIVE_LOW)
+ if (common_flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)
cfg1 |= ISI_CFG1_HSYNC_POL_ACTIVE_LOW;
- if (common_flags & SOCAM_VSYNC_ACTIVE_LOW)
+ if (common_flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)
cfg1 |= ISI_CFG1_VSYNC_POL_ACTIVE_LOW;
- if (common_flags & SOCAM_PCLK_SAMPLE_FALLING)
+ if (common_flags & V4L2_MBUS_PCLK_SAMPLE_FALLING)
cfg1 |= ISI_CFG1_PIXCLK_POL_ACTIVE_FALLING;
if (isi->pdata->has_emb_sync)
@@ -973,6 +988,11 @@ static int __devinit atmel_isi_probe(struct platform_device *pdev)
goto err_ioremap;
}
+ if (pdata->data_width_flags & ISI_DATAWIDTH_8)
+ isi->width_flags = 1 << 7;
+ if (pdata->data_width_flags & ISI_DATAWIDTH_10)
+ isi->width_flags |= 1 << 9;
+
isi_writel(isi, ISI_CTRL, ISI_CTRL_DIS);
irq = platform_get_irq(pdev, 0);
diff --git a/drivers/media/video/au0828/Makefile b/drivers/media/video/au0828/Makefile
index 5c7f2f7d980..bd22223f8d9 100644
--- a/drivers/media/video/au0828/Makefile
+++ b/drivers/media/video/au0828/Makefile
@@ -2,8 +2,8 @@ au0828-objs := au0828-core.o au0828-i2c.o au0828-cards.o au0828-dvb.o au0828-vid
obj-$(CONFIG_VIDEO_AU0828) += au0828.o
-EXTRA_CFLAGS += -Idrivers/media/common/tuners
-EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core
-EXTRA_CFLAGS += -Idrivers/media/dvb/frontends
+ccflags-y += -Idrivers/media/common/tuners
+ccflags-y += -Idrivers/media/dvb/dvb-core
+ccflags-y += -Idrivers/media/dvb/frontends
-EXTRA_CFLAGS += $(extra-cflags-y) $(extra-cflags-m)
+ccflags-y += $(extra-cflags-y) $(extra-cflags-m)
diff --git a/drivers/media/video/bt819.c b/drivers/media/video/bt819.c
index f87204461cb..859eabf5797 100644
--- a/drivers/media/video/bt819.c
+++ b/drivers/media/video/bt819.c
@@ -229,7 +229,7 @@ static int bt819_status(struct v4l2_subdev *sd, u32 *pstatus, v4l2_std_id *pstd)
if (pstd)
*pstd = std;
if (pstatus)
- *pstatus = status;
+ *pstatus = res;
v4l2_dbg(1, debug, sd, "get status %x\n", status);
return 0;
diff --git a/drivers/media/video/bt8xx/Makefile b/drivers/media/video/bt8xx/Makefile
index e415f6fc447..3f9a2b22d3d 100644
--- a/drivers/media/video/bt8xx/Makefile
+++ b/drivers/media/video/bt8xx/Makefile
@@ -8,6 +8,6 @@ bttv-objs := bttv-driver.o bttv-cards.o bttv-if.o \
obj-$(CONFIG_VIDEO_BT848) += bttv.o
-EXTRA_CFLAGS += -Idrivers/media/video
-EXTRA_CFLAGS += -Idrivers/media/common/tuners
-EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core
+ccflags-y += -Idrivers/media/video
+ccflags-y += -Idrivers/media/common/tuners
+ccflags-y += -Idrivers/media/dvb/dvb-core
diff --git a/drivers/media/video/bt8xx/bttv-cards.c b/drivers/media/video/bt8xx/bttv-cards.c
index 5b15f63bf06..5939021d8eb 100644
--- a/drivers/media/video/bt8xx/bttv-cards.c
+++ b/drivers/media/video/bt8xx/bttv-cards.c
@@ -25,6 +25,8 @@
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/kmod.h>
@@ -2905,19 +2907,17 @@ void __devinit bttv_idcard(struct bttv *btv)
if (type != -1) {
/* found it */
- printk(KERN_INFO "bttv%d: detected: %s [card=%d], "
- "PCI subsystem ID is %04x:%04x\n",
- btv->c.nr,cards[type].name,cards[type].cardnr,
- btv->cardid & 0xffff,
- (btv->cardid >> 16) & 0xffff);
+ pr_info("%d: detected: %s [card=%d], PCI subsystem ID is %04x:%04x\n",
+ btv->c.nr, cards[type].name, cards[type].cardnr,
+ btv->cardid & 0xffff,
+ (btv->cardid >> 16) & 0xffff);
btv->c.type = cards[type].cardnr;
} else {
/* 404 */
- printk(KERN_INFO "bttv%d: subsystem: %04x:%04x (UNKNOWN)\n",
- btv->c.nr, btv->cardid & 0xffff,
- (btv->cardid >> 16) & 0xffff);
- printk(KERN_DEBUG "please mail id, board name and "
- "the correct card= insmod option to linux-media@vger.kernel.org\n");
+ pr_info("%d: subsystem: %04x:%04x (UNKNOWN)\n",
+ btv->c.nr, btv->cardid & 0xffff,
+ (btv->cardid >> 16) & 0xffff);
+ pr_debug("please mail id, board name and the correct card= insmod option to linux-media@vger.kernel.org\n");
}
}
@@ -2926,10 +2926,10 @@ void __devinit bttv_idcard(struct bttv *btv)
btv->c.type=card[btv->c.nr];
/* print which card config we are using */
- printk(KERN_INFO "bttv%d: using: %s [card=%d,%s]\n",btv->c.nr,
- bttv_tvcards[btv->c.type].name, btv->c.type,
- card[btv->c.nr] < bttv_num_tvcards
- ? "insmod option" : "autodetected");
+ pr_info("%d: using: %s [card=%d,%s]\n",
+ btv->c.nr, bttv_tvcards[btv->c.type].name, btv->c.type,
+ card[btv->c.nr] < bttv_num_tvcards
+ ? "insmod option" : "autodetected");
/* overwrite gpio stuff ?? */
if (UNSET == audioall && UNSET == audiomux[0])
@@ -2948,12 +2948,13 @@ void __devinit bttv_idcard(struct bttv *btv)
}
}
bttv_tvcards[btv->c.type].gpiomask = (UNSET != gpiomask) ? gpiomask : gpiobits;
- printk(KERN_INFO "bttv%d: gpio config override: mask=0x%x, mux=",
- btv->c.nr,bttv_tvcards[btv->c.type].gpiomask);
+ pr_info("%d: gpio config override: mask=0x%x, mux=",
+ btv->c.nr, bttv_tvcards[btv->c.type].gpiomask);
for (i = 0; i < ARRAY_SIZE(bttv_tvcards->gpiomux); i++) {
- printk("%s0x%x", i ? "," : "", bttv_tvcards[btv->c.type].gpiomux[i]);
+ pr_cont("%s0x%x",
+ i ? "," : "", bttv_tvcards[btv->c.type].gpiomux[i]);
}
- printk("\n");
+ pr_cont("\n");
}
/*
@@ -2974,8 +2975,8 @@ static void identify_by_eeprom(struct bttv *btv, unsigned char eeprom_data[256])
if (-1 != type) {
btv->c.type = type;
- printk("bttv%d: detected by eeprom: %s [card=%d]\n",
- btv->c.nr, bttv_tvcards[btv->c.type].name, btv->c.type);
+ pr_info("%d: detected by eeprom: %s [card=%d]\n",
+ btv->c.nr, bttv_tvcards[btv->c.type].name, btv->c.type);
}
}
@@ -3019,7 +3020,7 @@ static void flyvideo_gpio(struct bttv *btv)
tuner_type = 3; /* Philips SECAM(+PAL) FQ1216ME or FI1216MF */
break;
default:
- printk(KERN_INFO "bttv%d: FlyVideo_gpio: unknown tuner type.\n", btv->c.nr);
+ pr_info("%d: FlyVideo_gpio: unknown tuner type\n", btv->c.nr);
break;
}
@@ -3036,13 +3037,13 @@ static void flyvideo_gpio(struct bttv *btv)
if (is_capture_only)
tuner_type = TUNER_ABSENT; /* No tuner present */
- printk(KERN_INFO "bttv%d: FlyVideo Radio=%s RemoteControl=%s Tuner=%d gpio=0x%06x\n",
- btv->c.nr, has_radio ? "yes" : "no ",
- has_remote ? "yes" : "no ", tuner_type, gpio);
- printk(KERN_INFO "bttv%d: FlyVideo LR90=%s tda9821/tda9820=%s capture_only=%s\n",
- btv->c.nr, is_lr90 ? "yes" : "no ",
- has_tda9820_tda9821 ? "yes" : "no ",
- is_capture_only ? "yes" : "no ");
+ pr_info("%d: FlyVideo Radio=%s RemoteControl=%s Tuner=%d gpio=0x%06x\n",
+ btv->c.nr, has_radio ? "yes" : "no",
+ has_remote ? "yes" : "no", tuner_type, gpio);
+ pr_info("%d: FlyVideo LR90=%s tda9821/tda9820=%s capture_only=%s\n",
+ btv->c.nr, is_lr90 ? "yes" : "no",
+ has_tda9820_tda9821 ? "yes" : "no",
+ is_capture_only ? "yes" : "no");
if (tuner_type != UNSET) /* only set if known tuner autodetected, else let insmod option through */
btv->tuner_type = tuner_type;
@@ -3091,12 +3092,11 @@ static void miro_pinnacle_gpio(struct bttv *btv)
if (btv->c.type == BTTV_BOARD_PINNACLE)
btv->c.type = BTTV_BOARD_PINNACLEPRO;
}
- printk(KERN_INFO
- "bttv%d: miro: id=%d tuner=%d radio=%s stereo=%s\n",
- btv->c.nr, id+1, btv->tuner_type,
- !btv->has_radio ? "no" :
- (btv->has_matchbox ? "matchbox" : "fmtuner"),
- (-1 == msp) ? "no" : "yes");
+ pr_info("%d: miro: id=%d tuner=%d radio=%s stereo=%s\n",
+ btv->c.nr, id+1, btv->tuner_type,
+ !btv->has_radio ? "no" :
+ (btv->has_matchbox ? "matchbox" : "fmtuner"),
+ (-1 == msp) ? "no" : "yes");
} else {
/* new cards with microtune tuner */
id = 63 - id;
@@ -3138,9 +3138,8 @@ static void miro_pinnacle_gpio(struct bttv *btv)
}
if (-1 != msp)
btv->c.type = BTTV_BOARD_PINNACLEPRO;
- printk(KERN_INFO
- "bttv%d: pinnacle/mt: id=%d info=\"%s\" radio=%s\n",
- btv->c.nr, id, info, btv->has_radio ? "yes" : "no");
+ pr_info("%d: pinnacle/mt: id=%d info=\"%s\" radio=%s\n",
+ btv->c.nr, id, info, btv->has_radio ? "yes" : "no");
btv->tuner_type = TUNER_MT2032;
}
}
@@ -3202,7 +3201,7 @@ static void gvc1100_muxsel(struct bttv *btv, unsigned int input)
static void init_lmlbt4x(struct bttv *btv)
{
- printk(KERN_DEBUG "LMLBT4x init\n");
+ pr_debug("LMLBT4x init\n");
btwrite(0x000000, BT848_GPIO_REG_INP);
gpio_inout(0xffffff, 0x0006C0);
gpio_write(0x000000);
@@ -3246,7 +3245,7 @@ static void bttv_reset_audio(struct bttv *btv)
return;
if (bttv_debug)
- printk("bttv%d: BT878A ARESET\n",btv->c.nr);
+ pr_debug("%d: BT878A ARESET\n", btv->c.nr);
btwrite((1<<7), 0x058);
udelay(10);
btwrite( 0, 0x058);
@@ -3349,7 +3348,8 @@ void __devinit bttv_init_card2(struct bttv *btv)
case BTTV_BOARD_MAGICTVIEW061:
if (btv->cardid == 0x3002144f) {
btv->has_radio=1;
- printk("bttv%d: radio detected by subsystem id (CPH05x)\n",btv->c.nr);
+ pr_info("%d: radio detected by subsystem id (CPH05x)\n",
+ btv->c.nr);
}
break;
case BTTV_BOARD_STB2:
@@ -3438,17 +3438,16 @@ void __devinit bttv_init_card2(struct bttv *btv)
btv->tuner_type = tuner[btv->c.nr];
if (btv->tuner_type == TUNER_ABSENT)
- printk(KERN_INFO "bttv%d: tuner absent\n", btv->c.nr);
- else if(btv->tuner_type == UNSET)
- printk(KERN_WARNING "bttv%d: tuner type unset\n", btv->c.nr);
+ pr_info("%d: tuner absent\n", btv->c.nr);
+ else if (btv->tuner_type == UNSET)
+ pr_warn("%d: tuner type unset\n", btv->c.nr);
else
- printk(KERN_INFO "bttv%d: tuner type=%d\n", btv->c.nr,
- btv->tuner_type);
+ pr_info("%d: tuner type=%d\n", btv->c.nr, btv->tuner_type);
if (autoload != UNSET) {
- printk(KERN_WARNING "bttv%d: the autoload option is obsolete.\n", btv->c.nr);
- printk(KERN_WARNING "bttv%d: use option msp3400, tda7432 or tvaudio to\n", btv->c.nr);
- printk(KERN_WARNING "bttv%d: override which audio module should be used.\n", btv->c.nr);
+ pr_warn("%d: the autoload option is obsolete\n", btv->c.nr);
+ pr_warn("%d: use option msp3400, tda7432 or tvaudio to override which audio module should be used\n",
+ btv->c.nr);
}
if (UNSET == btv->tuner_type)
@@ -3541,8 +3540,7 @@ void __devinit bttv_init_card2(struct bttv *btv)
}
default:
- printk(KERN_WARNING "bttv%d: unknown audiodev value!\n",
- btv->c.nr);
+ pr_warn("%d: unknown audiodev value!\n", btv->c.nr);
return;
}
@@ -3585,8 +3583,7 @@ void __devinit bttv_init_card2(struct bttv *btv)
return;
no_audio:
- printk(KERN_WARNING "bttv%d: audio absent, no audio device found!\n",
- btv->c.nr);
+ pr_warn("%d: audio absent, no audio device found!\n", btv->c.nr);
}
@@ -3639,19 +3636,19 @@ static void modtec_eeprom(struct bttv *btv)
{
if( strncmp(&(eeprom_data[0x1e]),"Temic 4066 FY5",14) ==0) {
btv->tuner_type=TUNER_TEMIC_4066FY5_PAL_I;
- printk("bttv%d: Modtec: Tuner autodetected by eeprom: %s\n",
- btv->c.nr,&eeprom_data[0x1e]);
+ pr_info("%d: Modtec: Tuner autodetected by eeprom: %s\n",
+ btv->c.nr, &eeprom_data[0x1e]);
} else if (strncmp(&(eeprom_data[0x1e]),"Alps TSBB5",10) ==0) {
btv->tuner_type=TUNER_ALPS_TSBB5_PAL_I;
- printk("bttv%d: Modtec: Tuner autodetected by eeprom: %s\n",
- btv->c.nr,&eeprom_data[0x1e]);
+ pr_info("%d: Modtec: Tuner autodetected by eeprom: %s\n",
+ btv->c.nr, &eeprom_data[0x1e]);
} else if (strncmp(&(eeprom_data[0x1e]),"Philips FM1246",14) ==0) {
btv->tuner_type=TUNER_PHILIPS_NTSC;
- printk("bttv%d: Modtec: Tuner autodetected by eeprom: %s\n",
- btv->c.nr,&eeprom_data[0x1e]);
+ pr_info("%d: Modtec: Tuner autodetected by eeprom: %s\n",
+ btv->c.nr, &eeprom_data[0x1e]);
} else {
- printk("bttv%d: Modtec: Unknown TunerString: %s\n",
- btv->c.nr,&eeprom_data[0x1e]);
+ pr_info("%d: Modtec: Unknown TunerString: %s\n",
+ btv->c.nr, &eeprom_data[0x1e]);
}
}
@@ -3663,7 +3660,7 @@ static void __devinit hauppauge_eeprom(struct bttv *btv)
btv->tuner_type = tv.tuner_type;
btv->has_radio = tv.has_radio;
- printk("bttv%d: Hauppauge eeprom indicates model#%d\n",
+ pr_info("%d: Hauppauge eeprom indicates model#%d\n",
btv->c.nr, tv.model);
/*
@@ -3671,7 +3668,7 @@ static void __devinit hauppauge_eeprom(struct bttv *btv)
* type based on model #.
*/
if(tv.model == 64900) {
- printk("bttv%d: Switching board type from %s to %s\n",
+ pr_info("%d: Switching board type from %s to %s\n",
btv->c.nr,
bttv_tvcards[btv->c.type].name,
bttv_tvcards[BTTV_BOARD_HAUPPAUGE_IMPACTVCB].name);
@@ -3698,8 +3695,7 @@ static int terratec_active_radio_upgrade(struct bttv *btv)
freq=88000/62.5;
tea5757_write(btv, 5 * freq + 0x358); /* write 0x1ed8 */
if (0x1ed8 == tea5757_read(btv)) {
- printk("bttv%d: Terratec Active Radio Upgrade found.\n",
- btv->c.nr);
+ pr_info("%d: Terratec Active Radio Upgrade found\n", btv->c.nr);
btv->has_radio = 1;
btv->has_saa6588 = 1;
btv->has_matchbox = 1;
@@ -3771,13 +3767,12 @@ static int __devinit pvr_boot(struct bttv *btv)
rc = request_firmware(&fw_entry, "hcwamc.rbf", &btv->c.pci->dev);
if (rc != 0) {
- printk(KERN_WARNING "bttv%d: no altera firmware [via hotplug]\n",
- btv->c.nr);
+ pr_warn("%d: no altera firmware [via hotplug]\n", btv->c.nr);
return rc;
}
rc = pvr_altera_load(btv, fw_entry->data, fw_entry->size);
- printk(KERN_INFO "bttv%d: altera firmware upload %s\n",
- btv->c.nr, (rc < 0) ? "failed" : "ok");
+ pr_info("%d: altera firmware upload %s\n",
+ btv->c.nr, (rc < 0) ? "failed" : "ok");
release_firmware(fw_entry);
return rc;
}
@@ -3873,29 +3868,27 @@ static void __devinit osprey_eeprom(struct bttv *btv, const u8 ee[256])
break;
default:
/* unknown...leave generic, but get serial # */
- printk(KERN_INFO "bttv%d: "
- "osprey eeprom: unknown card type 0x%04x\n",
- btv->c.nr, type);
+ pr_info("%d: osprey eeprom: unknown card type 0x%04x\n",
+ btv->c.nr, type);
break;
}
serial = get_unaligned_be32((__be32 *)(ee+6));
}
- printk(KERN_INFO "bttv%d: osprey eeprom: card=%d '%s' serial=%u\n",
- btv->c.nr, cardid,
- cardid>0 ? bttv_tvcards[cardid].name : "Unknown", serial);
+ pr_info("%d: osprey eeprom: card=%d '%s' serial=%u\n",
+ btv->c.nr, cardid,
+ cardid > 0 ? bttv_tvcards[cardid].name : "Unknown", serial);
if (cardid<0 || btv->c.type == cardid)
return;
/* card type isn't set correctly */
if (card[btv->c.nr] < bttv_num_tvcards) {
- printk(KERN_WARNING "bttv%d: osprey eeprom: "
- "Not overriding user specified card type\n", btv->c.nr);
+ pr_warn("%d: osprey eeprom: Not overriding user specified card type\n",
+ btv->c.nr);
} else {
- printk(KERN_INFO "bttv%d: osprey eeprom: "
- "Changing card type from %d to %d\n", btv->c.nr,
- btv->c.type, cardid);
+ pr_info("%d: osprey eeprom: Changing card type from %d to %d\n",
+ btv->c.nr, btv->c.type, cardid);
btv->c.type = cardid;
}
}
@@ -3938,14 +3931,14 @@ static void __devinit avermedia_eeprom(struct bttv *btv)
if (tuner_format == 0x09)
tuner_type = TUNER_LG_NTSC_NEW_TAPC; /* TAPC-G702P */
- printk(KERN_INFO "bttv%d: Avermedia eeprom[0x%02x%02x]: tuner=",
+ pr_info("%d: Avermedia eeprom[0x%02x%02x]: tuner=",
btv->c.nr, eeprom_data[0x41], eeprom_data[0x42]);
if (tuner_type) {
btv->tuner_type = tuner_type;
- printk(KERN_CONT "%d", tuner_type);
+ pr_cont("%d", tuner_type);
} else
- printk(KERN_CONT "Unknown type");
- printk(KERN_CONT " radio:%s remote control:%s\n",
+ pr_cont("Unknown type");
+ pr_cont(" radio:%s remote control:%s\n",
tuner_tv_fm ? "yes" : "no",
btv->has_remote ? "yes" : "no");
}
@@ -3993,8 +3986,8 @@ static void __devinit boot_msp34xx(struct bttv *btv, int pin)
if (bttv_gpio)
bttv_gpio_tracking(btv,"msp34xx");
if (bttv_verbose)
- printk(KERN_INFO "bttv%d: Hauppauge/Voodoo msp34xx: reset line "
- "init [%d]\n", btv->c.nr, pin);
+ pr_info("%d: Hauppauge/Voodoo msp34xx: reset line init [%d]\n",
+ btv->c.nr, pin);
}
/* ----------------------------------------------------------------------- */
@@ -4034,7 +4027,7 @@ static void __devinit init_PXC200(struct bttv *btv)
btwrite(BT848_ADC_RESERVED|BT848_ADC_AGC_EN, BT848_ADC);
/* Initialise MAX517 DAC */
- printk(KERN_INFO "Setting DAC reference voltage level ...\n");
+ pr_info("Setting DAC reference voltage level ...\n");
bttv_I2CWrite(btv,0x5E,0,0x80,1);
/* Initialise 12C508 PIC */
@@ -4043,7 +4036,7 @@ static void __devinit init_PXC200(struct bttv *btv)
* argument so the numbers are different */
- printk(KERN_INFO "Initialising 12C508 PIC chip ...\n");
+ pr_info("Initialising 12C508 PIC chip ...\n");
/* First of all, enable the clock line. This is used in the PXC200-F */
val = btread(BT848_GPIO_DMA_CTL);
@@ -4062,13 +4055,12 @@ static void __devinit init_PXC200(struct bttv *btv)
for (i = 0; i < ARRAY_SIZE(vals); i++) {
tmp=bttv_I2CWrite(btv,0x1E,0,vals[i],1);
if (tmp != -1) {
- printk(KERN_INFO
- "I2C Write(%2.2x) = %i\nI2C Read () = %2.2x\n\n",
+ pr_info("I2C Write(%2.2x) = %i\nI2C Read () = %2.2x\n\n",
vals[i],tmp,bttv_I2CRead(btv,0x1F,NULL));
}
}
- printk(KERN_INFO "PXC200 Initialised.\n");
+ pr_info("PXC200 Initialised\n");
}
@@ -4107,8 +4099,7 @@ init_RTV24 (struct bttv *btv)
uint32_t dataRead = 0;
long watchdog_value = 0x0E;
- printk (KERN_INFO
- "bttv%d: Adlink RTV-24 initialisation in progress ...\n",
+ pr_info("%d: Adlink RTV-24 initialisation in progress ...\n",
btv->c.nr);
btwrite (0x00c3feff, BT848_GPIO_OUT_EN);
@@ -4122,8 +4113,7 @@ init_RTV24 (struct bttv *btv)
dataRead = btread (BT848_GPIO_DATA);
if ((((dataRead >> 18) & 0x01) != 0) || (((dataRead >> 19) & 0x01) != 1)) {
- printk (KERN_INFO
- "bttv%d: Adlink RTV-24 initialisation(1) ERROR_CPLD_Check_Failed (read %d)\n",
+ pr_info("%d: Adlink RTV-24 initialisation(1) ERROR_CPLD_Check_Failed (read %d)\n",
btv->c.nr, dataRead);
}
@@ -4136,15 +4126,13 @@ init_RTV24 (struct bttv *btv)
dataRead = btread (BT848_GPIO_DATA);
if ((((dataRead >> 18) & 0x01) != 0) || (((dataRead >> 19) & 0x01) != 0)) {
- printk (KERN_INFO
- "bttv%d: Adlink RTV-24 initialisation(2) ERROR_CPLD_Check_Failed (read %d)\n",
+ pr_info("%d: Adlink RTV-24 initialisation(2) ERROR_CPLD_Check_Failed (read %d)\n",
btv->c.nr, dataRead);
return;
}
- printk (KERN_INFO
- "bttv%d: Adlink RTV-24 initialisation complete.\n", btv->c.nr);
+ pr_info("%d: Adlink RTV-24 initialisation complete\n", btv->c.nr);
}
@@ -4261,22 +4249,25 @@ static int tea5757_read(struct bttv *btv)
while (bus_in(btv,btv->mbox_data) && time_before(jiffies, timeout))
schedule();
if (bus_in(btv,btv->mbox_data)) {
- printk(KERN_WARNING "bttv%d: tea5757: read timeout\n",btv->c.nr);
+ pr_warn("%d: tea5757: read timeout\n", btv->c.nr);
return -1;
}
- dprintk("bttv%d: tea5757:",btv->c.nr);
+ dprintk("%d: tea5757:", btv->c.nr);
for (i = 0; i < 24; i++) {
udelay(5);
bus_high(btv,btv->mbox_clk);
udelay(5);
- dprintk("%c",(bus_in(btv,btv->mbox_most) == 0)?'T':'-');
+ dprintk_cont("%c",
+ bus_in(btv, btv->mbox_most) == 0 ? 'T' : '-');
bus_low(btv,btv->mbox_clk);
value <<= 1;
value |= (bus_in(btv,btv->mbox_data) == 0)?0:1; /* MSB first */
- dprintk("%c", (bus_in(btv,btv->mbox_most) == 0)?'S':'M');
+ dprintk_cont("%c",
+ bus_in(btv, btv->mbox_most) == 0 ? 'S' : 'M');
}
- dprintk("\nbttv%d: tea5757: read 0x%X\n", btv->c.nr, value);
+ dprintk_cont("\n");
+ dprintk("%d: tea5757: read 0x%X\n", btv->c.nr, value);
return value;
}
@@ -4295,7 +4286,7 @@ static int tea5757_write(struct bttv *btv, int value)
if (bttv_gpio)
bttv_gpio_tracking(btv,"tea5757 write");
- dprintk("bttv%d: tea5757: write 0x%X\n", btv->c.nr, value);
+ dprintk("%d: tea5757: write 0x%X\n", btv->c.nr, value);
bus_low(btv,btv->mbox_clk);
bus_high(btv,btv->mbox_we);
for (i = 0; i < 25; i++) {
@@ -4547,7 +4538,7 @@ static void picolo_tetra_init(struct bttv *btv)
static void picolo_tetra_muxsel (struct bttv* btv, unsigned int input)
{
- dprintk (KERN_DEBUG "bttv%d : picolo_tetra_muxsel => input = %d\n",btv->c.nr,input);
+ dprintk("%d : picolo_tetra_muxsel => input = %d\n", btv->c.nr, input);
/*Just set the right path in the analog multiplexers : channel 1 -> 4 ==> Analog Mux ==> MUX0*/
/*GPIO[20]&GPIO[21] used to choose the right input*/
btwrite (input<<20,BT848_GPIO_DATA);
@@ -4592,7 +4583,7 @@ static void ivc120_muxsel(struct bttv *btv, unsigned int input)
int key = input % 4;
int matrix = input / 4;
- dprintk("bttv%d: ivc120_muxsel: Input - %02d | TDA - %02d | In - %02d\n",
+ dprintk("%d: ivc120_muxsel: Input - %02d | TDA - %02d | In - %02d\n",
btv->c.nr, input, matrix, key);
/* Handles the input selection on the TDA8540's */
@@ -4649,15 +4640,17 @@ static void PXC200_muxsel(struct bttv *btv, unsigned int input)
buf[1]=0;
rc=bttv_I2CWrite(btv,(PX_I2C_PIC<<1),buf[0],buf[1],1);
if (rc) {
- printk(KERN_DEBUG "bttv%d: PXC200_muxsel: pic cfg write failed:%d\n", btv->c.nr,rc);
+ pr_debug("%d: PXC200_muxsel: pic cfg write failed:%d\n",
+ btv->c.nr, rc);
/* not PXC ? do nothing */
- return;
+ return;
}
rc=bttv_I2CRead(btv,(PX_I2C_PIC<<1),NULL);
if (!(rc & PX_CFG_PXC200F)) {
- printk(KERN_DEBUG "bttv%d: PXC200_muxsel: not PXC200F rc:%d \n", btv->c.nr,rc);
- return;
+ pr_debug("%d: PXC200_muxsel: not PXC200F rc:%d\n",
+ btv->c.nr, rc);
+ return;
}
@@ -4696,7 +4689,7 @@ static void PXC200_muxsel(struct bttv *btv, unsigned int input)
else /* older device */
btand(~BT848_IFORM_MUXSEL,BT848_IFORM);
- printk(KERN_DEBUG "bttv%d: setting input channel to:%d\n", btv->c.nr,(int)mux);
+ pr_debug("%d: setting input channel to:%d\n", btv->c.nr, (int)mux);
}
static void phytec_muxsel(struct bttv *btv, unsigned int input)
@@ -4847,29 +4840,27 @@ void __init bttv_check_chipset(void)
/* print warnings about any quirks found */
if (triton1)
- printk(KERN_INFO "bttv: Host bridge needs ETBF enabled.\n");
+ pr_info("Host bridge needs ETBF enabled\n");
if (vsfx)
- printk(KERN_INFO "bttv: Host bridge needs VSFX enabled.\n");
+ pr_info("Host bridge needs VSFX enabled\n");
if (pcipci_fail) {
- printk(KERN_INFO "bttv: bttv and your chipset may not work "
- "together.\n");
+ pr_info("bttv and your chipset may not work together\n");
if (!no_overlay) {
- printk(KERN_INFO "bttv: overlay will be disabled.\n");
+ pr_info("overlay will be disabled\n");
no_overlay = 1;
} else {
- printk(KERN_INFO "bttv: overlay forced. Use this "
- "option at your own risk.\n");
+ pr_info("overlay forced. Use this option at your own risk.\n");
}
}
if (UNSET != latency)
- printk(KERN_INFO "bttv: pci latency fixup [%d]\n",latency);
+ pr_info("pci latency fixup [%d]\n", latency);
while ((dev = pci_get_device(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_82441, dev))) {
unsigned char b;
pci_read_config_byte(dev, 0x53, &b);
if (bttv_debug)
- printk(KERN_INFO "bttv: Host bridge: 82441FX Natoma, "
- "bufcon=0x%02x\n",b);
+ pr_info("Host bridge: 82441FX Natoma, bufcon=0x%02x\n",
+ b);
}
}
@@ -4882,12 +4873,13 @@ int __devinit bttv_handle_chipset(struct bttv *btv)
if (bttv_verbose) {
if (triton1)
- printk(KERN_INFO "bttv%d: enabling ETBF (430FX/VP3 compatibilty)\n",btv->c.nr);
+ pr_info("%d: enabling ETBF (430FX/VP3 compatibility)\n",
+ btv->c.nr);
if (vsfx && btv->id >= 878)
- printk(KERN_INFO "bttv%d: enabling VSFX\n",btv->c.nr);
+ pr_info("%d: enabling VSFX\n", btv->c.nr);
if (UNSET != latency)
- printk(KERN_INFO "bttv%d: setting pci timer to %d\n",
- btv->c.nr,latency);
+ pr_info("%d: setting pci timer to %d\n",
+ btv->c.nr, latency);
}
if (btv->id < 878) {
diff --git a/drivers/media/video/bt8xx/bttv-driver.c b/drivers/media/video/bt8xx/bttv-driver.c
index 14444de67d5..3dd06607aec 100644
--- a/drivers/media/video/bt8xx/bttv-driver.c
+++ b/drivers/media/video/bt8xx/bttv-driver.c
@@ -34,6 +34,8 @@
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/init.h>
#include <linux/module.h>
#include <linux/delay.h>
@@ -942,8 +944,8 @@ static
void free_btres_lock(struct bttv *btv, struct bttv_fh *fh, int bits)
{
if ((fh->resources & bits) != bits) {
- /* trying to free ressources not allocated by us ... */
- printk("bttv: BUG! (btres)\n");
+ /* trying to free resources not allocated by us ... */
+ pr_err("BUG! (btres)\n");
}
fh->resources &= ~bits;
btv->resources &= ~bits;
@@ -1000,7 +1002,7 @@ static void set_pll(struct bttv *btv)
return;
if (btv->pll.pll_ofreq == btv->pll.pll_current) {
- dprintk("bttv%d: PLL: no change required\n",btv->c.nr);
+ dprintk("%d: PLL: no change required\n", btv->c.nr);
return;
}
@@ -1008,21 +1010,23 @@ static void set_pll(struct bttv *btv)
/* no PLL needed */
if (btv->pll.pll_current == 0)
return;
- bttv_printk(KERN_INFO "bttv%d: PLL can sleep, using XTAL (%d).\n",
- btv->c.nr,btv->pll.pll_ifreq);
+ if (bttv_verbose)
+ pr_info("%d: PLL can sleep, using XTAL (%d)\n",
+ btv->c.nr, btv->pll.pll_ifreq);
btwrite(0x00,BT848_TGCTRL);
btwrite(0x00,BT848_PLL_XCI);
btv->pll.pll_current = 0;
return;
}
- bttv_printk(KERN_INFO "bttv%d: PLL: %d => %d ",btv->c.nr,
- btv->pll.pll_ifreq, btv->pll.pll_ofreq);
+ if (bttv_verbose)
+ pr_info("%d: Setting PLL: %d => %d (needs up to 100ms)\n",
+ btv->c.nr,
+ btv->pll.pll_ifreq, btv->pll.pll_ofreq);
set_pll_freq(btv, btv->pll.pll_ifreq, btv->pll.pll_ofreq);
for (i=0; i<10; i++) {
/* Let other people run while the PLL stabilizes */
- bttv_printk(".");
msleep(10);
if (btread(BT848_DSTATUS) & BT848_DSTATUS_PLOCK) {
@@ -1030,12 +1034,14 @@ static void set_pll(struct bttv *btv)
} else {
btwrite(0x08,BT848_TGCTRL);
btv->pll.pll_current = btv->pll.pll_ofreq;
- bttv_printk(" ok\n");
+ if (bttv_verbose)
+ pr_info("PLL set ok\n");
return;
}
}
btv->pll.pll_current = -1;
- bttv_printk("failed\n");
+ if (bttv_verbose)
+ pr_info("Setting PLL failed\n");
return;
}
@@ -1047,7 +1053,7 @@ static void bt848A_set_timing(struct bttv *btv)
int fsc = bttv_tvnorms[btv->tvnorm].Fsc;
if (btv->input == btv->dig) {
- dprintk("bttv%d: load digital timing table (table_idx=%d)\n",
+ dprintk("%d: load digital timing table (table_idx=%d)\n",
btv->c.nr,table_idx);
/* timing change...reset timing generator address */
@@ -1076,7 +1082,7 @@ static void bt848_bright(struct bttv *btv, int bright)
{
int value;
- // printk("bttv: set bright: %d\n",bright); // DEBUG
+ // printk("set bright: %d\n", bright); // DEBUG
btv->bright = bright;
/* We want -128 to 127 we get 0-65535 */
@@ -1150,8 +1156,7 @@ video_mux(struct bttv *btv, unsigned int input)
}
mux = bttv_muxsel(btv, input);
btaor(mux<<5, ~(3<<5), BT848_IFORM);
- dprintk(KERN_DEBUG "bttv%d: video mux: input=%d mux=%d\n",
- btv->c.nr,input,mux);
+ dprintk("%d: video mux: input=%d mux=%d\n", btv->c.nr, input, mux);
/* card specific hook */
if(bttv_tvcards[btv->c.type].muxsel_hook)
@@ -1440,7 +1445,7 @@ static void bttv_reinit_bt848(struct bttv *btv)
unsigned long flags;
if (bttv_verbose)
- printk(KERN_INFO "bttv%d: reset, reinitialize\n",btv->c.nr);
+ pr_info("%d: reset, reinitialize\n", btv->c.nr);
spin_lock_irqsave(&btv->s_lock,flags);
btv->errors=0;
bttv_set_dma(btv,0);
@@ -1622,8 +1627,8 @@ void bttv_gpio_tracking(struct bttv *btv, char *comment)
unsigned int outbits, data;
outbits = btread(BT848_GPIO_OUT_EN);
data = btread(BT848_GPIO_DATA);
- printk(KERN_DEBUG "bttv%d: gpio: en=%08x, out=%08x in=%08x [%s]\n",
- btv->c.nr,outbits,data & outbits, data & ~outbits, comment);
+ pr_debug("%d: gpio: en=%08x, out=%08x in=%08x [%s]\n",
+ btv->c.nr, outbits, data & outbits, data & ~outbits, comment);
}
static void bttv_field_count(struct bttv *btv)
@@ -1668,7 +1673,7 @@ bttv_switch_overlay(struct bttv *btv, struct bttv_fh *fh,
unsigned long flags;
int retval = 0;
- dprintk("switch_overlay: enter [new=%p]\n",new);
+ dprintk("switch_overlay: enter [new=%p]\n", new);
if (new)
new->vb.state = VIDEOBUF_DONE;
spin_lock_irqsave(&btv->s_lock,flags);
@@ -1678,7 +1683,8 @@ bttv_switch_overlay(struct bttv *btv, struct bttv_fh *fh,
bttv_set_dma(btv, 0x03);
spin_unlock_irqrestore(&btv->s_lock,flags);
if (NULL != old) {
- dprintk("switch_overlay: old=%p state is %d\n",old,old->vb.state);
+ dprintk("switch_overlay: old=%p state is %d\n",
+ old, old->vb.state);
bttv_dma_free(&fh->cap,btv, old);
kfree(old);
}
@@ -2029,11 +2035,11 @@ static int bttv_log_status(struct file *file, void *f)
struct bttv_fh *fh = f;
struct bttv *btv = fh->btv;
- printk(KERN_INFO "bttv%d: ======== START STATUS CARD #%d ========\n",
- btv->c.nr, btv->c.nr);
+ pr_info("%d: ======== START STATUS CARD #%d ========\n",
+ btv->c.nr, btv->c.nr);
bttv_call_all(btv, core, log_status);
- printk(KERN_INFO "bttv%d: ======== END STATUS CARD #%d ========\n",
- btv->c.nr, btv->c.nr);
+ pr_info("%d: ======== END STATUS CARD #%d ========\n",
+ btv->c.nr, btv->c.nr);
return 0;
}
@@ -2598,7 +2604,7 @@ static int bttv_s_fmt_vid_overlay(struct file *file, void *priv,
struct bttv *btv = fh->btv;
if (no_overlay > 0) {
- printk(KERN_ERR "V4L2_BUF_TYPE_VIDEO_OVERLAY: no_overlay\n");
+ pr_err("V4L2_BUF_TYPE_VIDEO_OVERLAY: no_overlay\n");
return -EINVAL;
}
@@ -2673,7 +2679,7 @@ static int bttv_enum_fmt_vid_overlay(struct file *file, void *priv,
int rc;
if (no_overlay > 0) {
- printk(KERN_ERR "V4L2_BUF_TYPE_VIDEO_OVERLAY: no_overlay\n");
+ pr_err("V4L2_BUF_TYPE_VIDEO_OVERLAY: no_overlay\n");
return -EINVAL;
}
@@ -2714,7 +2720,7 @@ static int bttv_overlay(struct file *file, void *f, unsigned int on)
return -EINVAL;
}
if (unlikely(!fh->ov.setup_ok)) {
- dprintk("bttv%d: overlay: !setup_ok\n", btv->c.nr);
+ dprintk("%d: overlay: !setup_ok\n", btv->c.nr);
retval = -EINVAL;
}
if (retval)
@@ -3091,8 +3097,8 @@ static ssize_t bttv_read(struct file *file, char __user *data,
if (fh->btv->errors)
bttv_reinit_bt848(fh->btv);
- dprintk("bttv%d: read count=%d type=%s\n",
- fh->btv->c.nr,(int)count,v4l2_type_names[fh->type]);
+ dprintk("%d: read count=%d type=%s\n",
+ fh->btv->c.nr, (int)count, v4l2_type_names[fh->type]);
switch (fh->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
@@ -3174,7 +3180,7 @@ static int bttv_open(struct file *file)
struct bttv_fh *fh;
enum v4l2_buf_type type = 0;
- dprintk(KERN_DEBUG "bttv: open dev=%s\n", video_device_node_name(vdev));
+ dprintk("open dev=%s\n", video_device_node_name(vdev));
if (vdev->vfl_type == VFL_TYPE_GRABBER) {
type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
@@ -3185,8 +3191,8 @@ static int bttv_open(struct file *file)
return -ENODEV;
}
- dprintk(KERN_DEBUG "bttv%d: open called (type=%s)\n",
- btv->c.nr,v4l2_type_names[type]);
+ dprintk("%d: open called (type=%s)\n",
+ btv->c.nr, v4l2_type_names[type]);
/* allocate per filehandle data */
fh = kmalloc(sizeof(*fh), GFP_KERNEL);
@@ -3288,7 +3294,7 @@ bttv_mmap(struct file *file, struct vm_area_struct *vma)
{
struct bttv_fh *fh = file->private_data;
- dprintk("bttv%d: mmap type=%s 0x%lx+%ld\n",
+ dprintk("%d: mmap type=%s 0x%lx+%ld\n",
fh->btv->c.nr, v4l2_type_names[fh->type],
vma->vm_start, vma->vm_end - vma->vm_start);
return videobuf_mmap_mapper(bttv_queue(fh),vma);
@@ -3370,9 +3376,9 @@ static int radio_open(struct file *file)
struct bttv *btv = video_drvdata(file);
struct bttv_fh *fh;
- dprintk("bttv: open dev=%s\n", video_device_node_name(vdev));
+ dprintk("open dev=%s\n", video_device_node_name(vdev));
- dprintk("bttv%d: open called (radio)\n",btv->c.nr);
+ dprintk("%d: open called (radio)\n", btv->c.nr);
/* allocate per filehandle data */
fh = kmalloc(sizeof(*fh), GFP_KERNEL);
@@ -3616,12 +3622,12 @@ static int bttv_risc_decode(u32 risc)
};
int i;
- printk("0x%08x [ %s", risc,
+ pr_cont("0x%08x [ %s", risc,
instr[risc >> 28] ? instr[risc >> 28] : "INVALID");
for (i = ARRAY_SIZE(bits)-1; i >= 0; i--)
if (risc & (1 << (i + 12)))
- printk(" %s",bits[i]);
- printk(" count=%d ]\n", risc & 0xfff);
+ pr_cont(" %s", bits[i]);
+ pr_cont(" count=%d ]\n", risc & 0xfff);
return incr[risc >> 28] ? incr[risc >> 28] : 1;
}
@@ -3630,16 +3636,18 @@ static void bttv_risc_disasm(struct bttv *btv,
{
unsigned int i,j,n;
- printk("%s: risc disasm: %p [dma=0x%08lx]\n",
- btv->c.v4l2_dev.name, risc->cpu, (unsigned long)risc->dma);
+ pr_info("%s: risc disasm: %p [dma=0x%08lx]\n",
+ btv->c.v4l2_dev.name, risc->cpu, (unsigned long)risc->dma);
for (i = 0; i < (risc->size >> 2); i += n) {
- printk("%s: 0x%lx: ", btv->c.v4l2_dev.name,
- (unsigned long)(risc->dma + (i<<2)));
+ pr_info("%s: 0x%lx: ",
+ btv->c.v4l2_dev.name,
+ (unsigned long)(risc->dma + (i<<2)));
n = bttv_risc_decode(le32_to_cpu(risc->cpu[i]));
for (j = 1; j < n; j++)
- printk("%s: 0x%lx: 0x%08x [ arg #%d ]\n",
- btv->c.v4l2_dev.name, (unsigned long)(risc->dma + ((i+j)<<2)),
- risc->cpu[i+j], j);
+ pr_info("%s: 0x%lx: 0x%08x [ arg #%d ]\n",
+ btv->c.v4l2_dev.name,
+ (unsigned long)(risc->dma + ((i+j)<<2)),
+ risc->cpu[i+j], j);
if (0 == risc->cpu[i])
break;
}
@@ -3647,17 +3655,18 @@ static void bttv_risc_disasm(struct bttv *btv,
static void bttv_print_riscaddr(struct bttv *btv)
{
- printk(" main: %08Lx\n",
- (unsigned long long)btv->main.dma);
- printk(" vbi : o=%08Lx e=%08Lx\n",
- btv->cvbi ? (unsigned long long)btv->cvbi->top.dma : 0,
- btv->cvbi ? (unsigned long long)btv->cvbi->bottom.dma : 0);
- printk(" cap : o=%08Lx e=%08Lx\n",
- btv->curr.top ? (unsigned long long)btv->curr.top->top.dma : 0,
- btv->curr.bottom ? (unsigned long long)btv->curr.bottom->bottom.dma : 0);
- printk(" scr : o=%08Lx e=%08Lx\n",
- btv->screen ? (unsigned long long)btv->screen->top.dma : 0,
- btv->screen ? (unsigned long long)btv->screen->bottom.dma : 0);
+ pr_info(" main: %08llx\n", (unsigned long long)btv->main.dma);
+ pr_info(" vbi : o=%08llx e=%08llx\n",
+ btv->cvbi ? (unsigned long long)btv->cvbi->top.dma : 0,
+ btv->cvbi ? (unsigned long long)btv->cvbi->bottom.dma : 0);
+ pr_info(" cap : o=%08llx e=%08llx\n",
+ btv->curr.top
+ ? (unsigned long long)btv->curr.top->top.dma : 0,
+ btv->curr.bottom
+ ? (unsigned long long)btv->curr.bottom->bottom.dma : 0);
+ pr_info(" scr : o=%08llx e=%08llx\n",
+ btv->screen ? (unsigned long long)btv->screen->top.dma : 0,
+ btv->screen ? (unsigned long long)btv->screen->bottom.dma : 0);
bttv_risc_disasm(btv, &btv->main);
}
@@ -3690,34 +3699,34 @@ static void bttv_print_irqbits(u32 print, u32 mark)
{
unsigned int i;
- printk("bits:");
+ pr_cont("bits:");
for (i = 0; i < ARRAY_SIZE(irq_name); i++) {
if (print & (1 << i))
- printk(" %s",irq_name[i]);
+ pr_cont(" %s", irq_name[i]);
if (mark & (1 << i))
- printk("*");
+ pr_cont("*");
}
}
static void bttv_irq_debug_low_latency(struct bttv *btv, u32 rc)
{
- printk("bttv%d: irq: skipped frame [main=%lx,o_vbi=%lx,o_field=%lx,rc=%lx]\n",
- btv->c.nr,
- (unsigned long)btv->main.dma,
- (unsigned long)le32_to_cpu(btv->main.cpu[RISC_SLOT_O_VBI+1]),
- (unsigned long)le32_to_cpu(btv->main.cpu[RISC_SLOT_O_FIELD+1]),
- (unsigned long)rc);
+ pr_warn("%d: irq: skipped frame [main=%lx,o_vbi=%lx,o_field=%lx,rc=%lx]\n",
+ btv->c.nr,
+ (unsigned long)btv->main.dma,
+ (unsigned long)le32_to_cpu(btv->main.cpu[RISC_SLOT_O_VBI+1]),
+ (unsigned long)le32_to_cpu(btv->main.cpu[RISC_SLOT_O_FIELD+1]),
+ (unsigned long)rc);
if (0 == (btread(BT848_DSTATUS) & BT848_DSTATUS_HLOC)) {
- printk("bttv%d: Oh, there (temporarely?) is no input signal. "
- "Ok, then this is harmless, don't worry ;)\n",
- btv->c.nr);
+ pr_notice("%d: Oh, there (temporarily?) is no input signal. "
+ "Ok, then this is harmless, don't worry ;)\n",
+ btv->c.nr);
return;
}
- printk("bttv%d: Uhm. Looks like we have unusual high IRQ latencies.\n",
- btv->c.nr);
- printk("bttv%d: Lets try to catch the culpit red-handed ...\n",
- btv->c.nr);
+ pr_notice("%d: Uhm. Looks like we have unusual high IRQ latencies\n",
+ btv->c.nr);
+ pr_notice("%d: Lets try to catch the culpit red-handed ...\n",
+ btv->c.nr);
dump_stack();
}
@@ -3798,9 +3807,9 @@ bttv_irq_next_video(struct bttv *btv, struct bttv_buffer_set *set)
}
}
- dprintk("bttv%d: next set: top=%p bottom=%p [screen=%p,irq=%d,%d]\n",
- btv->c.nr,set->top, set->bottom,
- btv->screen,set->frame_irq,set->top_irq);
+ dprintk("%d: next set: top=%p bottom=%p [screen=%p,irq=%d,%d]\n",
+ btv->c.nr, set->top, set->bottom,
+ btv->screen, set->frame_irq, set->top_irq);
return 0;
}
@@ -3815,7 +3824,8 @@ bttv_irq_wakeup_video(struct bttv *btv, struct bttv_buffer_set *wakeup,
if (wakeup->top == wakeup->bottom) {
if (NULL != wakeup->top && curr->top != wakeup->top) {
if (irq_debug > 1)
- printk("bttv%d: wakeup: both=%p\n",btv->c.nr,wakeup->top);
+ pr_debug("%d: wakeup: both=%p\n",
+ btv->c.nr, wakeup->top);
wakeup->top->vb.ts = ts;
wakeup->top->vb.field_count = btv->field_count;
wakeup->top->vb.state = state;
@@ -3824,7 +3834,8 @@ bttv_irq_wakeup_video(struct bttv *btv, struct bttv_buffer_set *wakeup,
} else {
if (NULL != wakeup->top && curr->top != wakeup->top) {
if (irq_debug > 1)
- printk("bttv%d: wakeup: top=%p\n",btv->c.nr,wakeup->top);
+ pr_debug("%d: wakeup: top=%p\n",
+ btv->c.nr, wakeup->top);
wakeup->top->vb.ts = ts;
wakeup->top->vb.field_count = btv->field_count;
wakeup->top->vb.state = state;
@@ -3832,7 +3843,8 @@ bttv_irq_wakeup_video(struct bttv *btv, struct bttv_buffer_set *wakeup,
}
if (NULL != wakeup->bottom && curr->bottom != wakeup->bottom) {
if (irq_debug > 1)
- printk("bttv%d: wakeup: bottom=%p\n",btv->c.nr,wakeup->bottom);
+ pr_debug("%d: wakeup: bottom=%p\n",
+ btv->c.nr, wakeup->bottom);
wakeup->bottom->vb.ts = ts;
wakeup->bottom->vb.field_count = btv->field_count;
wakeup->bottom->vb.state = state;
@@ -3866,11 +3878,11 @@ static void bttv_irq_timeout(unsigned long data)
unsigned long flags;
if (bttv_verbose) {
- printk(KERN_INFO "bttv%d: timeout: drop=%d irq=%d/%d, risc=%08x, ",
- btv->c.nr, btv->framedrop, btv->irq_me, btv->irq_total,
- btread(BT848_RISC_COUNT));
+ pr_info("%d: timeout: drop=%d irq=%d/%d, risc=%08x, ",
+ btv->c.nr, btv->framedrop, btv->irq_me, btv->irq_total,
+ btread(BT848_RISC_COUNT));
bttv_print_irqbits(btread(BT848_INT_STAT),0);
- printk("\n");
+ pr_cont("\n");
}
spin_lock_irqsave(&btv->s_lock,flags);
@@ -4033,21 +4045,23 @@ static irqreturn_t bttv_irq(int irq, void *dev_id)
dstat=btread(BT848_DSTATUS);
if (irq_debug) {
- printk(KERN_DEBUG "bttv%d: irq loop=%d fc=%d "
- "riscs=%x, riscc=%08x, ",
- btv->c.nr, count, btv->field_count,
- stat>>28, btread(BT848_RISC_COUNT));
+ pr_debug("%d: irq loop=%d fc=%d riscs=%x, riscc=%08x, ",
+ btv->c.nr, count, btv->field_count,
+ stat>>28, btread(BT848_RISC_COUNT));
bttv_print_irqbits(stat,astat);
if (stat & BT848_INT_HLOCK)
- printk(" HLOC => %s", (dstat & BT848_DSTATUS_HLOC)
- ? "yes" : "no");
+ pr_cont(" HLOC => %s",
+ dstat & BT848_DSTATUS_HLOC
+ ? "yes" : "no");
if (stat & BT848_INT_VPRES)
- printk(" PRES => %s", (dstat & BT848_DSTATUS_PRES)
- ? "yes" : "no");
+ pr_cont(" PRES => %s",
+ dstat & BT848_DSTATUS_PRES
+ ? "yes" : "no");
if (stat & BT848_INT_FMTCHG)
- printk(" NUML => %s", (dstat & BT848_DSTATUS_NUML)
- ? "625" : "525");
- printk("\n");
+ pr_cont(" NUML => %s",
+ dstat & BT848_DSTATUS_NUML
+ ? "625" : "525");
+ pr_cont("\n");
}
if (astat&BT848_INT_VSYNC)
@@ -4075,18 +4089,19 @@ static irqreturn_t bttv_irq(int irq, void *dev_id)
audio_mute(btv, btv->mute); /* trigger automute */
if (astat & (BT848_INT_SCERR|BT848_INT_OCERR)) {
- printk(KERN_INFO "bttv%d: %s%s @ %08x,",btv->c.nr,
- (astat & BT848_INT_SCERR) ? "SCERR" : "",
- (astat & BT848_INT_OCERR) ? "OCERR" : "",
- btread(BT848_RISC_COUNT));
+ pr_info("%d: %s%s @ %08x,",
+ btv->c.nr,
+ (astat & BT848_INT_SCERR) ? "SCERR" : "",
+ (astat & BT848_INT_OCERR) ? "OCERR" : "",
+ btread(BT848_RISC_COUNT));
bttv_print_irqbits(stat,astat);
- printk("\n");
+ pr_cont("\n");
if (bttv_debug)
bttv_print_riscaddr(btv);
}
if (fdsr && astat & BT848_INT_FDSR) {
- printk(KERN_INFO "bttv%d: FDSR @ %08x\n",
- btv->c.nr,btread(BT848_RISC_COUNT));
+ pr_info("%d: FDSR @ %08x\n",
+ btv->c.nr, btread(BT848_RISC_COUNT));
if (bttv_debug)
bttv_print_riscaddr(btv);
}
@@ -4097,11 +4112,11 @@ static irqreturn_t bttv_irq(int irq, void *dev_id)
if (count > 8 || !(astat & BT848_INT_GPINT)) {
btwrite(0, BT848_INT_MASK);
- printk(KERN_ERR
- "bttv%d: IRQ lockup, cleared int mask [", btv->c.nr);
+ pr_err("%d: IRQ lockup, cleared int mask [",
+ btv->c.nr);
} else {
- printk(KERN_ERR
- "bttv%d: IRQ lockup, clearing GPINT from int mask [", btv->c.nr);
+ pr_err("%d: IRQ lockup, clearing GPINT from int mask [",
+ btv->c.nr);
btwrite(btread(BT848_INT_MASK) & (-1 ^ BT848_INT_GPINT),
BT848_INT_MASK);
@@ -4109,7 +4124,7 @@ static irqreturn_t bttv_irq(int irq, void *dev_id)
bttv_print_irqbits(stat,astat);
- printk("]\n");
+ pr_cont("]\n");
}
}
btv->irq_total++;
@@ -4171,7 +4186,7 @@ static void bttv_unregister_video(struct bttv *btv)
static int __devinit bttv_register_video(struct bttv *btv)
{
if (no_overlay > 0)
- printk("bttv: Overlay support disabled.\n");
+ pr_notice("Overlay support disabled\n");
/* video */
btv->video_dev = vdev_init(btv, &bttv_video_template, "video");
@@ -4181,12 +4196,11 @@ static int __devinit bttv_register_video(struct bttv *btv)
if (video_register_device(btv->video_dev, VFL_TYPE_GRABBER,
video_nr[btv->c.nr]) < 0)
goto err;
- printk(KERN_INFO "bttv%d: registered device %s\n",
- btv->c.nr, video_device_node_name(btv->video_dev));
+ pr_info("%d: registered device %s\n",
+ btv->c.nr, video_device_node_name(btv->video_dev));
if (device_create_file(&btv->video_dev->dev,
&dev_attr_card)<0) {
- printk(KERN_ERR "bttv%d: device_create_file 'card' "
- "failed\n", btv->c.nr);
+ pr_err("%d: device_create_file 'card' failed\n", btv->c.nr);
goto err;
}
@@ -4198,8 +4212,8 @@ static int __devinit bttv_register_video(struct bttv *btv)
if (video_register_device(btv->vbi_dev, VFL_TYPE_VBI,
vbi_nr[btv->c.nr]) < 0)
goto err;
- printk(KERN_INFO "bttv%d: registered device %s\n",
- btv->c.nr, video_device_node_name(btv->vbi_dev));
+ pr_info("%d: registered device %s\n",
+ btv->c.nr, video_device_node_name(btv->vbi_dev));
if (!btv->has_radio)
return 0;
@@ -4210,8 +4224,8 @@ static int __devinit bttv_register_video(struct bttv *btv)
if (video_register_device(btv->radio_dev, VFL_TYPE_RADIO,
radio_nr[btv->c.nr]) < 0)
goto err;
- printk(KERN_INFO "bttv%d: registered device %s\n",
- btv->c.nr, video_device_node_name(btv->radio_dev));
+ pr_info("%d: registered device %s\n",
+ btv->c.nr, video_device_node_name(btv->radio_dev));
/* all done */
return 0;
@@ -4244,10 +4258,10 @@ static int __devinit bttv_probe(struct pci_dev *dev,
if (bttv_num == BTTV_MAX)
return -ENOMEM;
- printk(KERN_INFO "bttv: Bt8xx card found (%d).\n", bttv_num);
+ pr_info("Bt8xx card found (%d)\n", bttv_num);
bttvs[bttv_num] = btv = kzalloc(sizeof(*btv), GFP_KERNEL);
if (btv == NULL) {
- printk(KERN_ERR "bttv: out of memory.\n");
+ pr_err("out of memory\n");
return -ENOMEM;
}
btv->c.nr = bttv_num;
@@ -4277,21 +4291,19 @@ static int __devinit bttv_probe(struct pci_dev *dev,
btv->c.pci = dev;
btv->id = dev->device;
if (pci_enable_device(dev)) {
- printk(KERN_WARNING "bttv%d: Can't enable device.\n",
- btv->c.nr);
+ pr_warn("%d: Can't enable device\n", btv->c.nr);
return -EIO;
}
if (pci_set_dma_mask(dev, DMA_BIT_MASK(32))) {
- printk(KERN_WARNING "bttv%d: No suitable DMA available.\n",
- btv->c.nr);
+ pr_warn("%d: No suitable DMA available\n", btv->c.nr);
return -EIO;
}
if (!request_mem_region(pci_resource_start(dev,0),
pci_resource_len(dev,0),
btv->c.v4l2_dev.name)) {
- printk(KERN_WARNING "bttv%d: can't request iomem (0x%llx).\n",
- btv->c.nr,
- (unsigned long long)pci_resource_start(dev,0));
+ pr_warn("%d: can't request iomem (0x%llx)\n",
+ btv->c.nr,
+ (unsigned long long)pci_resource_start(dev, 0));
return -EBUSY;
}
pci_set_master(dev);
@@ -4299,22 +4311,21 @@ static int __devinit bttv_probe(struct pci_dev *dev,
result = v4l2_device_register(&dev->dev, &btv->c.v4l2_dev);
if (result < 0) {
- printk(KERN_WARNING "bttv%d: v4l2_device_register() failed\n", btv->c.nr);
+ pr_warn("%d: v4l2_device_register() failed\n", btv->c.nr);
goto fail0;
}
btv->revision = dev->revision;
pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
- printk(KERN_INFO "bttv%d: Bt%d (rev %d) at %s, ",
- bttv_num,btv->id, btv->revision, pci_name(dev));
- printk("irq: %d, latency: %d, mmio: 0x%llx\n",
- btv->c.pci->irq, lat,
- (unsigned long long)pci_resource_start(dev,0));
+ pr_info("%d: Bt%d (rev %d) at %s, irq: %d, latency: %d, mmio: 0x%llx\n",
+ bttv_num, btv->id, btv->revision, pci_name(dev),
+ btv->c.pci->irq, lat,
+ (unsigned long long)pci_resource_start(dev, 0));
schedule();
btv->bt848_mmio = ioremap(pci_resource_start(dev, 0), 0x1000);
if (NULL == btv->bt848_mmio) {
- printk("bttv%d: ioremap() failed\n", btv->c.nr);
+ pr_err("%d: ioremap() failed\n", btv->c.nr);
result = -EIO;
goto fail1;
}
@@ -4327,8 +4338,8 @@ static int __devinit bttv_probe(struct pci_dev *dev,
result = request_irq(btv->c.pci->irq, bttv_irq,
IRQF_SHARED | IRQF_DISABLED, btv->c.v4l2_dev.name, (void *)btv);
if (result < 0) {
- printk(KERN_ERR "bttv%d: can't get IRQ %d\n",
- bttv_num,btv->c.pci->irq);
+ pr_err("%d: can't get IRQ %d\n",
+ bttv_num, btv->c.pci->irq);
goto fail1;
}
@@ -4433,7 +4444,7 @@ static void __devexit bttv_remove(struct pci_dev *pci_dev)
struct bttv *btv = to_bttv(v4l2_dev);
if (bttv_verbose)
- printk("bttv%d: unloading\n",btv->c.nr);
+ pr_info("%d: unloading\n", btv->c.nr);
if (bttv_tvcards[btv->c.type].has_dvb)
flush_request_modules(btv);
@@ -4481,7 +4492,7 @@ static int bttv_suspend(struct pci_dev *pci_dev, pm_message_t state)
struct bttv_buffer_set idle;
unsigned long flags;
- dprintk("bttv%d: suspend %d\n", btv->c.nr, state.event);
+ dprintk("%d: suspend %d\n", btv->c.nr, state.event);
/* stop dma + irqs */
spin_lock_irqsave(&btv->s_lock,flags);
@@ -4517,14 +4528,13 @@ static int bttv_resume(struct pci_dev *pci_dev)
unsigned long flags;
int err;
- dprintk("bttv%d: resume\n", btv->c.nr);
+ dprintk("%d: resume\n", btv->c.nr);
/* restore pci state */
if (btv->state.disabled) {
err=pci_enable_device(pci_dev);
if (err) {
- printk(KERN_WARNING "bttv%d: Can't enable device.\n",
- btv->c.nr);
+ pr_warn("%d: Can't enable device\n", btv->c.nr);
return err;
}
btv->state.disabled = 0;
@@ -4532,8 +4542,7 @@ static int bttv_resume(struct pci_dev *pci_dev)
err=pci_set_power_state(pci_dev, PCI_D0);
if (err) {
pci_disable_device(pci_dev);
- printk(KERN_WARNING "bttv%d: Can't enable device.\n",
- btv->c.nr);
+ pr_warn("%d: Can't enable device\n", btv->c.nr);
btv->state.disabled = 1;
return err;
}
@@ -4585,22 +4594,21 @@ static int __init bttv_init_module(void)
bttv_num = 0;
- printk(KERN_INFO "bttv: driver version %s loaded\n",
- BTTV_VERSION);
+ pr_info("driver version %s loaded\n", BTTV_VERSION);
if (gbuffers < 2 || gbuffers > VIDEO_MAX_FRAME)
gbuffers = 2;
if (gbufsize > BTTV_MAX_FBUF)
gbufsize = BTTV_MAX_FBUF;
gbufsize = (gbufsize + PAGE_SIZE - 1) & PAGE_MASK;
if (bttv_verbose)
- printk(KERN_INFO "bttv: using %d buffers with %dk (%d pages) each for capture\n",
- gbuffers, gbufsize >> 10, gbufsize >> PAGE_SHIFT);
+ pr_info("using %d buffers with %dk (%d pages) each for capture\n",
+ gbuffers, gbufsize >> 10, gbufsize >> PAGE_SHIFT);
bttv_check_chipset();
ret = bus_register(&bttv_sub_bus_type);
if (ret < 0) {
- printk(KERN_WARNING "bttv: bus_register error: %d\n", ret);
+ pr_warn("bus_register error: %d\n", ret);
return ret;
}
ret = pci_register_driver(&bttv_pci_driver);
diff --git a/drivers/media/video/bt8xx/bttv-gpio.c b/drivers/media/video/bt8xx/bttv-gpio.c
index 13ce72c04b3..922e8233fd0 100644
--- a/drivers/media/video/bt8xx/bttv-gpio.c
+++ b/drivers/media/video/bt8xx/bttv-gpio.c
@@ -26,6 +26,8 @@
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/delay.h>
@@ -99,7 +101,7 @@ int bttv_sub_add_device(struct bttv_core *core, char *name)
kfree(sub);
return err;
}
- printk("bttv%d: add subdevice \"%s\"\n", core->nr, dev_name(&sub->dev));
+ pr_info("%d: add subdevice \"%s\"\n", core->nr, dev_name(&sub->dev));
list_add_tail(&sub->list,&core->subs);
return 0;
}
diff --git a/drivers/media/video/bt8xx/bttv-i2c.c b/drivers/media/video/bt8xx/bttv-i2c.c
index d49b675045f..e3952af7e56 100644
--- a/drivers/media/video/bt8xx/bttv-i2c.c
+++ b/drivers/media/video/bt8xx/bttv-i2c.c
@@ -27,6 +27,8 @@
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/delay.h>
@@ -154,9 +156,7 @@ bttv_i2c_sendbytes(struct bttv *btv, const struct i2c_msg *msg, int last)
if (retval == 0)
goto eio;
if (i2c_debug) {
- printk(" <W %02x %02x", msg->addr << 1, msg->buf[0]);
- if (!(xmit & BT878_I2C_NOSTOP))
- printk(" >\n");
+ pr_cont(" <W %02x %02x", msg->addr << 1, msg->buf[0]);
}
for (cnt = 1; cnt < msg->len; cnt++ ) {
@@ -170,19 +170,18 @@ bttv_i2c_sendbytes(struct bttv *btv, const struct i2c_msg *msg, int last)
goto err;
if (retval == 0)
goto eio;
- if (i2c_debug) {
- printk(" %02x", msg->buf[cnt]);
- if (!(xmit & BT878_I2C_NOSTOP))
- printk(" >\n");
- }
+ if (i2c_debug)
+ pr_cont(" %02x", msg->buf[cnt]);
}
+ if (!(xmit & BT878_I2C_NOSTOP))
+ pr_cont(">\n");
return msg->len;
eio:
retval = -EIO;
err:
if (i2c_debug)
- printk(" ERR: %d\n",retval);
+ pr_cont(" ERR: %d\n",retval);
return retval;
}
@@ -193,7 +192,7 @@ bttv_i2c_readbytes(struct bttv *btv, const struct i2c_msg *msg, int last)
u32 cnt;
int retval;
- for(cnt = 0; cnt < msg->len; cnt++) {
+ for (cnt = 0; cnt < msg->len; cnt++) {
xmit = (msg->addr << 25) | (1 << 24) | I2C_HW;
if (cnt < msg->len-1)
xmit |= BT848_I2C_W3B;
@@ -201,6 +200,12 @@ bttv_i2c_readbytes(struct bttv *btv, const struct i2c_msg *msg, int last)
xmit |= BT878_I2C_NOSTOP;
if (cnt)
xmit |= BT878_I2C_NOSTART;
+
+ if (i2c_debug) {
+ if (!(xmit & BT878_I2C_NOSTART))
+ pr_cont(" <R %02x", (msg->addr << 1) +1);
+ }
+
btwrite(xmit, BT848_I2C);
retval = bttv_i2c_wait_done(btv);
if (retval < 0)
@@ -209,20 +214,20 @@ bttv_i2c_readbytes(struct bttv *btv, const struct i2c_msg *msg, int last)
goto eio;
msg->buf[cnt] = ((u32)btread(BT848_I2C) >> 8) & 0xff;
if (i2c_debug) {
- if (!(xmit & BT878_I2C_NOSTART))
- printk(" <R %02x", (msg->addr << 1) +1);
- printk(" =%02x", msg->buf[cnt]);
- if (!(xmit & BT878_I2C_NOSTOP))
- printk(" >\n");
+ pr_cont(" =%02x", msg->buf[cnt]);
}
+ if (i2c_debug && !(xmit & BT878_I2C_NOSTOP))
+ pr_cont(" >\n");
}
+
+
return msg->len;
eio:
retval = -EIO;
err:
if (i2c_debug)
- printk(" ERR: %d\n",retval);
+ pr_cont(" ERR: %d\n",retval);
return retval;
}
@@ -234,7 +239,8 @@ static int bttv_i2c_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs, int
int i;
if (i2c_debug)
- printk("bt-i2c:");
+ pr_debug("bt-i2c:");
+
btwrite(BT848_INT_I2CDONE|BT848_INT_RACK, BT848_INT_STAT);
for (i = 0 ; i < num; i++) {
if (msgs[i].flags & I2C_M_RD) {
@@ -271,20 +277,20 @@ int bttv_I2CRead(struct bttv *btv, unsigned char addr, char *probe_for)
if (0 != btv->i2c_rc)
return -1;
if (bttv_verbose && NULL != probe_for)
- printk(KERN_INFO "bttv%d: i2c: checking for %s @ 0x%02x... ",
- btv->c.nr,probe_for,addr);
+ pr_info("%d: i2c: checking for %s @ 0x%02x... ",
+ btv->c.nr, probe_for, addr);
btv->i2c_client.addr = addr >> 1;
if (1 != i2c_master_recv(&btv->i2c_client, &buffer, 1)) {
if (NULL != probe_for) {
if (bttv_verbose)
- printk("not found\n");
+ pr_cont("not found\n");
} else
- printk(KERN_WARNING "bttv%d: i2c read 0x%x: error\n",
- btv->c.nr,addr);
+ pr_warn("%d: i2c read 0x%x: error\n",
+ btv->c.nr, addr);
return -1;
}
if (bttv_verbose && NULL != probe_for)
- printk("found\n");
+ pr_cont("found\n");
return buffer;
}
@@ -335,8 +341,8 @@ static void do_i2c_scan(char *name, struct i2c_client *c)
rc = i2c_master_recv(c,&buf,0);
if (rc < 0)
continue;
- printk("%s: i2c scan: found device @ 0x%x [%s]\n",
- name, i << 1, i2c_devs[i] ? i2c_devs[i] : "???");
+ pr_info("%s: i2c scan: found device @ 0x%x [%s]\n",
+ name, i << 1, i2c_devs[i] ? i2c_devs[i] : "???");
}
}
diff --git a/drivers/media/video/bt8xx/bttv-input.c b/drivers/media/video/bt8xx/bttv-input.c
index 677d70c0e1c..ef4c7cd4198 100644
--- a/drivers/media/video/bt8xx/bttv-input.c
+++ b/drivers/media/video/bt8xx/bttv-input.c
@@ -18,6 +18,8 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/delay.h>
@@ -36,9 +38,10 @@ static int ir_rc5_remote_gap = 885;
module_param(ir_rc5_remote_gap, int, 0644);
#undef dprintk
-#define dprintk(arg...) do { \
- if (ir_debug >= 1) \
- printk(arg); \
+#define dprintk(fmt, ...) \
+do { \
+ if (ir_debug >= 1) \
+ pr_info(fmt, ##__VA_ARGS__); \
} while (0)
#define DEVNAME "bttv-input"
@@ -62,7 +65,7 @@ static void ir_handle_key(struct bttv *btv)
/* extract data */
data = ir_extract_bits(gpio, ir->mask_keycode);
- dprintk(KERN_INFO DEVNAME ": irq gpio=0x%x code=%d | %s%s%s\n",
+ dprintk("irq gpio=0x%x code=%d | %s%s%s\n",
gpio, data,
ir->polling ? "poll" : "irq",
(gpio & ir->mask_keydown) ? " down" : "",
@@ -96,7 +99,7 @@ static void ir_enltv_handle_key(struct bttv *btv)
keyup = (gpio & ir->mask_keyup) ? 1 << 31 : 0;
if ((ir->last_gpio & 0x7f) != data) {
- dprintk(KERN_INFO DEVNAME ": gpio=0x%x code=%d | %s\n",
+ dprintk("gpio=0x%x code=%d | %s\n",
gpio, data,
(gpio & ir->mask_keyup) ? " up" : "up/down");
@@ -107,7 +110,7 @@ static void ir_enltv_handle_key(struct bttv *btv)
if ((ir->last_gpio & 1 << 31) == keyup)
return;
- dprintk(KERN_INFO DEVNAME ":(cnt) gpio=0x%x code=%d | %s\n",
+ dprintk("(cnt) gpio=0x%x code=%d | %s\n",
gpio, data,
(gpio & ir->mask_keyup) ? " up" : "down");
@@ -177,13 +180,12 @@ static u32 bttv_rc5_decode(unsigned int code)
rc5 |= 1;
break;
case 3:
- dprintk(KERN_INFO DEVNAME ":rc5_decode(%x) bad code\n",
+ dprintk("rc5_decode(%x) bad code\n",
org_code);
return 0;
}
}
- dprintk(KERN_INFO DEVNAME ":"
- "code=%x, rc5=%x, start=%x, toggle=%x, address=%x, "
+ dprintk("code=%x, rc5=%x, start=%x, toggle=%x, address=%x, "
"instr=%x\n", rc5, org_code, RC5_START(rc5),
RC5_TOGGLE(rc5), RC5_ADDR(rc5), RC5_INSTR(rc5));
return rc5;
@@ -212,20 +214,20 @@ static void bttv_rc5_timer_end(unsigned long data)
/* Allow some timer jitter (RC5 is ~24ms anyway so this is ok) */
if (gap < 28000) {
- dprintk(KERN_INFO DEVNAME ": spurious timer_end\n");
+ dprintk("spurious timer_end\n");
return;
}
if (ir->last_bit < 20) {
/* ignore spurious codes (caused by light/other remotes) */
- dprintk(KERN_INFO DEVNAME ": short code: %x\n", ir->code);
+ dprintk("short code: %x\n", ir->code);
} else {
ir->code = (ir->code << ir->shift_by) | 1;
rc5 = bttv_rc5_decode(ir->code);
/* two start bits? */
if (RC5_START(rc5) != ir->start) {
- printk(KERN_INFO DEVNAME ":"
+ pr_info(DEVNAME ":"
" rc5 start bits invalid: %u\n", RC5_START(rc5));
/* right address? */
@@ -235,8 +237,7 @@ static void bttv_rc5_timer_end(unsigned long data)
/* Good code */
rc_keydown(ir->dev, instr, toggle);
- dprintk(KERN_INFO DEVNAME ":"
- " instruction %x, toggle %x\n",
+ dprintk("instruction %x, toggle %x\n",
instr, toggle);
}
}
@@ -265,7 +266,7 @@ static int bttv_rc5_irq(struct bttv *btv)
tv.tv_usec - ir->base_time.tv_usec;
}
- dprintk(KERN_INFO DEVNAME ": RC5 IRQ: gap %d us for %s\n",
+ dprintk("RC5 IRQ: gap %d us for %s\n",
gap, (gpio & 0x20) ? "mark" : "space");
/* remote IRQ? */
@@ -340,14 +341,14 @@ static int get_key_pv951(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
/* poll IR chip */
if (1 != i2c_master_recv(ir->c, &b, 1)) {
- dprintk(KERN_INFO DEVNAME ": read error\n");
+ dprintk("read error\n");
return -EIO;
}
/* ignore 0xaa */
if (b==0xaa)
return 0;
- dprintk(KERN_INFO DEVNAME ": key %02x\n", b);
+ dprintk("key %02x\n", b);
/*
* NOTE:
@@ -517,7 +518,7 @@ int bttv_input_init(struct bttv *btv)
break;
}
if (NULL == ir_codes) {
- dprintk(KERN_INFO "Ooops: IR config error [card=%d]\n", btv->c.type);
+ dprintk("Ooops: IR config error [card=%d]\n", btv->c.type);
err = -ENODEV;
goto err_out_free;
}
diff --git a/drivers/media/video/bt8xx/bttv-risc.c b/drivers/media/video/bt8xx/bttv-risc.c
index 9b57d091da4..82cc47d2e3f 100644
--- a/drivers/media/video/bt8xx/bttv-risc.c
+++ b/drivers/media/video/bt8xx/bttv-risc.c
@@ -24,6 +24,8 @@
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
@@ -473,8 +475,7 @@ bttv_set_dma(struct bttv *btv, int override)
capctl |= (btv->cap_ctl & 0x0c) ? 0x0c : 0x00; /* vbi data */
capctl |= override;
- d2printk(KERN_DEBUG
- "bttv%d: capctl=%x lirq=%d top=%08Lx/%08Lx even=%08Lx/%08Lx\n",
+ d2printk("%d: capctl=%x lirq=%d top=%08llx/%08llx even=%08llx/%08llx\n",
btv->c.nr,capctl,btv->loop_irq,
btv->cvbi ? (unsigned long long)btv->cvbi->top.dma : 0,
btv->curr.top ? (unsigned long long)btv->curr.top->top.dma : 0,
@@ -517,8 +518,8 @@ bttv_risc_init_main(struct bttv *btv)
if ((rc = btcx_riscmem_alloc(btv->c.pci,&btv->main,PAGE_SIZE)) < 0)
return rc;
- dprintk(KERN_DEBUG "bttv%d: risc main @ %08Lx\n",
- btv->c.nr,(unsigned long long)btv->main.dma);
+ dprintk("%d: risc main @ %08llx\n",
+ btv->c.nr, (unsigned long long)btv->main.dma);
btv->main.cpu[0] = cpu_to_le32(BT848_RISC_SYNC | BT848_RISC_RESYNC |
BT848_FIFO_STATUS_VRE);
@@ -557,12 +558,12 @@ bttv_risc_hook(struct bttv *btv, int slot, struct btcx_riscmem *risc,
unsigned long next = btv->main.dma + ((slot+2) << 2);
if (NULL == risc) {
- d2printk(KERN_DEBUG "bttv%d: risc=%p slot[%d]=NULL\n",
- btv->c.nr,risc,slot);
+ d2printk("%d: risc=%p slot[%d]=NULL\n", btv->c.nr, risc, slot);
btv->main.cpu[slot+1] = cpu_to_le32(next);
} else {
- d2printk(KERN_DEBUG "bttv%d: risc=%p slot[%d]=%08Lx irq=%d\n",
- btv->c.nr,risc,slot,(unsigned long long)risc->dma,irqflags);
+ d2printk("%d: risc=%p slot[%d]=%08llx irq=%d\n",
+ btv->c.nr, risc, slot,
+ (unsigned long long)risc->dma, irqflags);
cmd = BT848_RISC_JUMP;
if (irqflags) {
cmd |= BT848_RISC_IRQ;
@@ -708,8 +709,7 @@ bttv_buffer_risc(struct bttv *btv, struct bttv_buffer *buf)
const struct bttv_tvnorm *tvnorm = bttv_tvnorms + buf->tvnorm;
struct videobuf_dmabuf *dma=videobuf_to_dma(&buf->vb);
- dprintk(KERN_DEBUG
- "bttv%d: buffer field: %s format: %s size: %dx%d\n",
+ dprintk("%d: buffer field: %s format: %s size: %dx%d\n",
btv->c.nr, v4l2_field_names[buf->vb.field],
buf->fmt->name, buf->vb.width, buf->vb.height);
@@ -870,10 +870,9 @@ bttv_overlay_risc(struct bttv *btv,
struct bttv_buffer *buf)
{
/* check interleave, bottom+top fields */
- dprintk(KERN_DEBUG
- "bttv%d: overlay fields: %s format: %s size: %dx%d\n",
+ dprintk("%d: overlay fields: %s format: %s size: %dx%d\n",
btv->c.nr, v4l2_field_names[buf->vb.field],
- fmt->name,ov->w.width,ov->w.height);
+ fmt->name, ov->w.width, ov->w.height);
/* calculate geometry */
bttv_calc_geo(btv,&buf->geo,ov->w.width,ov->w.height,
diff --git a/drivers/media/video/bt8xx/bttv-vbi.c b/drivers/media/video/bt8xx/bttv-vbi.c
index e79a402fa6c..b433267d9aa 100644
--- a/drivers/media/video/bt8xx/bttv-vbi.c
+++ b/drivers/media/video/bt8xx/bttv-vbi.c
@@ -23,6 +23,8 @@
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/fs.h>
@@ -65,8 +67,11 @@ MODULE_PARM_DESC(vbi_debug,"vbi code debug messages, default is 0 (no)");
#ifdef dprintk
# undef dprintk
#endif
-#define dprintk(fmt, arg...) if (vbi_debug) \
- printk(KERN_DEBUG "bttv%d/vbi: " fmt, btv->c.nr , ## arg)
+#define dprintk(fmt, ...) \
+do { \
+ if (vbi_debug) \
+ pr_debug("%d: " fmt, btv->c.nr, ##__VA_ARGS__); \
+} while (0)
#define IMAGE_SIZE(fmt) \
(((fmt)->count[0] + (fmt)->count[1]) * (fmt)->samples_per_line)
diff --git a/drivers/media/video/bt8xx/bttvp.h b/drivers/media/video/bt8xx/bttvp.h
index 318edf2830b..db943a8d580 100644
--- a/drivers/media/video/bt8xx/bttvp.h
+++ b/drivers/media/video/bt8xx/bttvp.h
@@ -310,9 +310,21 @@ extern unsigned int bttv_gpio;
extern void bttv_gpio_tracking(struct bttv *btv, char *comment);
extern int init_bttv_i2c(struct bttv *btv);
-#define bttv_printk if (bttv_verbose) printk
-#define dprintk if (bttv_debug >= 1) printk
-#define d2printk if (bttv_debug >= 2) printk
+#define dprintk(fmt, ...) \
+do { \
+ if (bttv_debug >= 1) \
+ pr_debug(fmt, ##__VA_ARGS__); \
+} while (0)
+#define dprintk_cont(fmt, ...) \
+do { \
+ if (bttv_debug >= 1) \
+ pr_cont(fmt, ##__VA_ARGS__); \
+} while (0)
+#define d2printk(fmt, ...) \
+do { \
+ if (bttv_debug >= 2) \
+ printk(fmt, ##__VA_ARGS__); \
+} while (0)
#define BTTV_MAX_FBUF 0x208000
#define BTTV_TIMEOUT msecs_to_jiffies(500) /* 0.5 seconds */
diff --git a/drivers/media/video/cpia2/cpia2_usb.c b/drivers/media/video/cpia2/cpia2_usb.c
index dc5b07a20f6..59c797c1527 100644
--- a/drivers/media/video/cpia2/cpia2_usb.c
+++ b/drivers/media/video/cpia2/cpia2_usb.c
@@ -31,6 +31,7 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/usb.h>
+#include <linux/module.h>
#include "cpia2.h"
diff --git a/drivers/media/video/cx18/Makefile b/drivers/media/video/cx18/Makefile
index 2fadd9ded34..a86bab5893e 100644
--- a/drivers/media/video/cx18/Makefile
+++ b/drivers/media/video/cx18/Makefile
@@ -8,6 +8,6 @@ cx18-alsa-objs := cx18-alsa-main.o cx18-alsa-pcm.o
obj-$(CONFIG_VIDEO_CX18) += cx18.o
obj-$(CONFIG_VIDEO_CX18_ALSA) += cx18-alsa.o
-EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core
-EXTRA_CFLAGS += -Idrivers/media/dvb/frontends
-EXTRA_CFLAGS += -Idrivers/media/common/tuners
+ccflags-y += -Idrivers/media/dvb/dvb-core
+ccflags-y += -Idrivers/media/dvb/frontends
+ccflags-y += -Idrivers/media/common/tuners
diff --git a/drivers/media/video/cx18/cx18-driver.c b/drivers/media/video/cx18/cx18-driver.c
index 9e2f870f425..c6ff32a6137 100644
--- a/drivers/media/video/cx18/cx18-driver.c
+++ b/drivers/media/video/cx18/cx18-driver.c
@@ -1085,6 +1085,8 @@ static int __devinit cx18_probe(struct pci_dev *pci_dev,
setup.addr = ADDR_UNSET;
setup.type = cx->options.tuner;
setup.mode_mask = T_ANALOG_TV; /* matches TV tuners */
+ if (cx->options.radio > 0)
+ setup.mode_mask |= T_RADIO;
setup.tuner_callback = (setup.type == TUNER_XC2028) ?
cx18_reset_tuner_gpio : NULL;
cx18_call_all(cx, tuner, s_type_addr, &setup);
diff --git a/drivers/media/video/cx18/cx18-driver.h b/drivers/media/video/cx18/cx18-driver.h
index 18342072306..b9a94fc5146 100644
--- a/drivers/media/video/cx18/cx18-driver.h
+++ b/drivers/media/video/cx18/cx18-driver.h
@@ -409,6 +409,7 @@ struct cx18_stream {
/* Videobuf for YUV video */
u32 pixelformat;
+ u32 vb_bytes_per_frame;
struct list_head vb_capture; /* video capture queue */
spinlock_t vb_lock;
struct timer_list vb_timeout;
@@ -430,10 +431,6 @@ struct cx18_open_id {
u32 open_id;
int type;
struct cx18 *cx;
-
- struct videobuf_queue vbuf_q;
- spinlock_t s_lock; /* Protect vbuf_q */
- enum v4l2_buf_type vb_type;
};
static inline struct cx18_open_id *fh2id(struct v4l2_fh *fh)
diff --git a/drivers/media/video/cx18/cx18-fileops.c b/drivers/media/video/cx18/cx18-fileops.c
index 07411f34885..14cb961c22b 100644
--- a/drivers/media/video/cx18/cx18-fileops.c
+++ b/drivers/media/video/cx18/cx18-fileops.c
@@ -784,8 +784,6 @@ int cx18_v4l2_close(struct file *filp)
cx18_release_stream(s);
} else {
cx18_stop_capture(id, 0);
- if (id->type == CX18_ENC_STREAM_TYPE_YUV)
- videobuf_mmap_free(&id->vbuf_q);
}
kfree(id);
mutex_unlock(&cx->serialize_lock);
diff --git a/drivers/media/video/cx18/cx18-ioctl.c b/drivers/media/video/cx18/cx18-ioctl.c
index afe0a29e720..66b1c15c354 100644
--- a/drivers/media/video/cx18/cx18-ioctl.c
+++ b/drivers/media/video/cx18/cx18-ioctl.c
@@ -160,12 +160,7 @@ static int cx18_g_fmt_vid_cap(struct file *file, void *fh,
pixfmt->priv = 0;
if (id->type == CX18_ENC_STREAM_TYPE_YUV) {
pixfmt->pixelformat = s->pixelformat;
- /* HM12 YUV size is (Y=(h*720) + UV=(h*(720/2)))
- UYUV YUV size is (Y=(h*720) + UV=(h*(720))) */
- if (s->pixelformat == V4L2_PIX_FMT_HM12)
- pixfmt->sizeimage = pixfmt->height * 720 * 3 / 2;
- else
- pixfmt->sizeimage = pixfmt->height * 720 * 2;
+ pixfmt->sizeimage = s->vb_bytes_per_frame;
pixfmt->bytesperline = 720;
} else {
pixfmt->pixelformat = V4L2_PIX_FMT_MPEG;
@@ -296,6 +291,12 @@ static int cx18_s_fmt_vid_cap(struct file *file, void *fh,
return -EBUSY;
s->pixelformat = fmt->fmt.pix.pixelformat;
+ /* HM12 YUV size is (Y=(h*720) + UV=(h*(720/2)))
+ UYUV YUV size is (Y=(h*720) + UV=(h*(720))) */
+ if (s->pixelformat == V4L2_PIX_FMT_HM12)
+ s->vb_bytes_per_frame = h * 720 * 3 / 2;
+ else
+ s->vb_bytes_per_frame = h * 720 * 2;
mbus_fmt.width = cx->cxhdl.width = w;
mbus_fmt.height = cx->cxhdl.height = h;
@@ -463,13 +464,16 @@ static int cx18_s_register(struct file *file, void *fh,
static int cx18_querycap(struct file *file, void *fh,
struct v4l2_capability *vcap)
{
- struct cx18 *cx = fh2id(fh)->cx;
+ struct cx18_open_id *id = fh2id(fh);
+ struct cx18 *cx = id->cx;
strlcpy(vcap->driver, CX18_DRIVER_NAME, sizeof(vcap->driver));
strlcpy(vcap->card, cx->card_name, sizeof(vcap->card));
snprintf(vcap->bus_info, sizeof(vcap->bus_info),
"PCI:%s", pci_name(cx->pci_dev));
vcap->capabilities = cx->v4l2_cap; /* capabilities */
+ if (id->type == CX18_ENC_STREAM_TYPE_YUV)
+ vcap->capabilities |= V4L2_CAP_STREAMING;
return 0;
}
diff --git a/drivers/media/video/cx18/cx18-mailbox.c b/drivers/media/video/cx18/cx18-mailbox.c
index c07191e09fc..0c7796e76ac 100644
--- a/drivers/media/video/cx18/cx18-mailbox.c
+++ b/drivers/media/video/cx18/cx18-mailbox.c
@@ -196,7 +196,7 @@ static void cx18_mdl_send_to_videobuf(struct cx18_stream *s,
}
/* If we've filled the buffer as per the callers res then dispatch it */
- if (vb_buf->bytes_used >= (vb_buf->vb.width * vb_buf->vb.height * 2)) {
+ if (vb_buf->bytes_used >= s->vb_bytes_per_frame) {
dispatch = 1;
vb_buf->bytes_used = 0;
}
diff --git a/drivers/media/video/cx18/cx18-streams.c b/drivers/media/video/cx18/cx18-streams.c
index 852f420fd27..638cca156b5 100644
--- a/drivers/media/video/cx18/cx18-streams.c
+++ b/drivers/media/video/cx18/cx18-streams.c
@@ -138,6 +138,12 @@ static int cx18_prepare_buffer(struct videobuf_queue *q,
buf->tvnorm = cx->std;
s->pixelformat = pixelformat;
+ /* HM12 YUV size is (Y=(h*720) + UV=(h*(720/2)))
+ UYUV YUV size is (Y=(h*720) + UV=(h*(720))) */
+ if (s->pixelformat == V4L2_PIX_FMT_HM12)
+ s->vb_bytes_per_frame = height * 720 * 3 / 2;
+ else
+ s->vb_bytes_per_frame = height * 720 * 2;
cx18_dma_free(q, s, buf);
}
@@ -154,6 +160,12 @@ static int cx18_prepare_buffer(struct videobuf_queue *q,
buf->tvnorm = cx->std;
s->pixelformat = pixelformat;
+ /* HM12 YUV size is (Y=(h*720) + UV=(h*(720/2)))
+ UYUV YUV size is (Y=(h*720) + UV=(h*(720))) */
+ if (s->pixelformat == V4L2_PIX_FMT_HM12)
+ s->vb_bytes_per_frame = height * 720 * 3 / 2;
+ else
+ s->vb_bytes_per_frame = height * 720 * 2;
rc = videobuf_iolock(q, &buf->vb, NULL);
if (rc != 0)
goto fail;
@@ -287,6 +299,7 @@ static void cx18_stream_init(struct cx18 *cx, int type)
/* Assume the previous pixel default */
s->pixelformat = V4L2_PIX_FMT_HM12;
+ s->vb_bytes_per_frame = cx->cxhdl.height * 720 * 3 / 2;
}
}
diff --git a/drivers/media/video/cx231xx/Makefile b/drivers/media/video/cx231xx/Makefile
index 2c248435544..b3348975c7c 100644
--- a/drivers/media/video/cx231xx/Makefile
+++ b/drivers/media/video/cx231xx/Makefile
@@ -8,9 +8,9 @@ obj-$(CONFIG_VIDEO_CX231XX) += cx231xx.o
obj-$(CONFIG_VIDEO_CX231XX_ALSA) += cx231xx-alsa.o
obj-$(CONFIG_VIDEO_CX231XX_DVB) += cx231xx-dvb.o
-EXTRA_CFLAGS += -Idrivers/media/video
-EXTRA_CFLAGS += -Idrivers/media/common/tuners
-EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core
-EXTRA_CFLAGS += -Idrivers/media/dvb/frontends
-EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-usb
+ccflags-y += -Idrivers/media/video
+ccflags-y += -Idrivers/media/common/tuners
+ccflags-y += -Idrivers/media/dvb/dvb-core
+ccflags-y += -Idrivers/media/dvb/frontends
+ccflags-y += -Idrivers/media/dvb/dvb-usb
diff --git a/drivers/media/video/cx23885/Kconfig b/drivers/media/video/cx23885/Kconfig
index caab1bfb79e..b391e9bda87 100644
--- a/drivers/media/video/cx23885/Kconfig
+++ b/drivers/media/video/cx23885/Kconfig
@@ -38,7 +38,7 @@ config VIDEO_CX23885
config MEDIA_ALTERA_CI
tristate "Altera FPGA based CI module"
depends on VIDEO_CX23885 && DVB_CORE
- select STAPL_ALTERA
+ select ALTERA_STAPL
---help---
An Altera FPGA CI module for NetUP Dual DVB-T/C RF CI card.
diff --git a/drivers/media/video/cx23885/Makefile b/drivers/media/video/cx23885/Makefile
index 23293c7b6ac..f81f2796a0f 100644
--- a/drivers/media/video/cx23885/Makefile
+++ b/drivers/media/video/cx23885/Makefile
@@ -2,14 +2,14 @@ cx23885-objs := cx23885-cards.o cx23885-video.o cx23885-vbi.o \
cx23885-core.o cx23885-i2c.o cx23885-dvb.o cx23885-417.o \
cx23885-ioctl.o cx23885-ir.o cx23885-av.o cx23885-input.o \
cx23888-ir.o netup-init.o cimax2.o netup-eeprom.o \
- cx23885-f300.o
+ cx23885-f300.o cx23885-alsa.o
obj-$(CONFIG_VIDEO_CX23885) += cx23885.o
obj-$(CONFIG_MEDIA_ALTERA_CI) += altera-ci.o
-EXTRA_CFLAGS += -Idrivers/media/video
-EXTRA_CFLAGS += -Idrivers/media/common/tuners
-EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core
-EXTRA_CFLAGS += -Idrivers/media/dvb/frontends
+ccflags-y += -Idrivers/media/video
+ccflags-y += -Idrivers/media/common/tuners
+ccflags-y += -Idrivers/media/dvb/dvb-core
+ccflags-y += -Idrivers/media/dvb/frontends
-EXTRA_CFLAGS += $(extra-cflags-y) $(extra-cflags-m)
+ccflags-y += $(extra-cflags-y) $(extra-cflags-m)
diff --git a/drivers/media/video/cx23885/cx23885-alsa.c b/drivers/media/video/cx23885/cx23885-alsa.c
new file mode 100644
index 00000000000..795169237e7
--- /dev/null
+++ b/drivers/media/video/cx23885/cx23885-alsa.c
@@ -0,0 +1,535 @@
+/*
+ *
+ * Support for CX23885 analog audio capture
+ *
+ * (c) 2008 Mijhail Moreyra <mijhail.moreyra@gmail.com>
+ * Adapted from cx88-alsa.c
+ * (c) 2009 Steven Toth <stoth@kernellabs.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/vmalloc.h>
+#include <linux/dma-mapping.h>
+#include <linux/pci.h>
+
+#include <asm/delay.h>
+
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/control.h>
+#include <sound/initval.h>
+
+#include <sound/tlv.h>
+
+
+#include "cx23885.h"
+#include "cx23885-reg.h"
+
+#define AUDIO_SRAM_CHANNEL SRAM_CH07
+
+#define dprintk(level, fmt, arg...) if (audio_debug >= level) \
+ printk(KERN_INFO "%s: " fmt, chip->dev->name , ## arg)
+
+#define dprintk_core(level, fmt, arg...) if (audio_debug >= level) \
+ printk(KERN_DEBUG "%s: " fmt, chip->dev->name , ## arg)
+
+/****************************************************************************
+ Module global static vars
+ ****************************************************************************/
+
+static unsigned int disable_analog_audio;
+module_param(disable_analog_audio, int, 0644);
+MODULE_PARM_DESC(disable_analog_audio, "disable analog audio ALSA driver");
+
+static unsigned int audio_debug;
+module_param(audio_debug, int, 0644);
+MODULE_PARM_DESC(audio_debug, "enable debug messages [analog audio]");
+
+/****************************************************************************
+ Board specific funtions
+ ****************************************************************************/
+
+/* Constants taken from cx88-reg.h */
+#define AUD_INT_DN_RISCI1 (1 << 0)
+#define AUD_INT_UP_RISCI1 (1 << 1)
+#define AUD_INT_RDS_DN_RISCI1 (1 << 2)
+#define AUD_INT_DN_RISCI2 (1 << 4) /* yes, 3 is skipped */
+#define AUD_INT_UP_RISCI2 (1 << 5)
+#define AUD_INT_RDS_DN_RISCI2 (1 << 6)
+#define AUD_INT_DN_SYNC (1 << 12)
+#define AUD_INT_UP_SYNC (1 << 13)
+#define AUD_INT_RDS_DN_SYNC (1 << 14)
+#define AUD_INT_OPC_ERR (1 << 16)
+#define AUD_INT_BER_IRQ (1 << 20)
+#define AUD_INT_MCHG_IRQ (1 << 21)
+#define GP_COUNT_CONTROL_RESET 0x3
+
+/*
+ * BOARD Specific: Sets audio DMA
+ */
+
+static int cx23885_start_audio_dma(struct cx23885_audio_dev *chip)
+{
+ struct cx23885_audio_buffer *buf = chip->buf;
+ struct cx23885_dev *dev = chip->dev;
+ struct sram_channel *audio_ch =
+ &dev->sram_channels[AUDIO_SRAM_CHANNEL];
+
+ dprintk(1, "%s()\n", __func__);
+
+ /* Make sure RISC/FIFO are off before changing FIFO/RISC settings */
+ cx_clear(AUD_INT_DMA_CTL, 0x11);
+
+ /* setup fifo + format - out channel */
+ cx23885_sram_channel_setup(chip->dev, audio_ch, buf->bpl,
+ buf->risc.dma);
+
+ /* sets bpl size */
+ cx_write(AUD_INT_A_LNGTH, buf->bpl);
+
+ /* This is required to get good audio (1 seems to be ok) */
+ cx_write(AUD_INT_A_MODE, 1);
+
+ /* reset counter */
+ cx_write(AUD_INT_A_GPCNT_CTL, GP_COUNT_CONTROL_RESET);
+ atomic_set(&chip->count, 0);
+
+ dprintk(1, "Start audio DMA, %d B/line, %d lines/FIFO, %d periods, %d "
+ "byte buffer\n", buf->bpl, cx_read(audio_ch->cmds_start+12)>>1,
+ chip->num_periods, buf->bpl * chip->num_periods);
+
+ /* Enables corresponding bits at AUD_INT_STAT */
+ cx_write(AUDIO_INT_INT_MSK, AUD_INT_OPC_ERR | AUD_INT_DN_SYNC |
+ AUD_INT_DN_RISCI1);
+
+ /* Clean any pending interrupt bits already set */
+ cx_write(AUDIO_INT_INT_STAT, ~0);
+
+ /* enable audio irqs */
+ cx_set(PCI_INT_MSK, chip->dev->pci_irqmask | PCI_MSK_AUD_INT);
+
+ /* start dma */
+ cx_set(DEV_CNTRL2, (1<<5)); /* Enables Risc Processor */
+ cx_set(AUD_INT_DMA_CTL, 0x11); /* audio downstream FIFO and
+ RISC enable */
+ if (audio_debug)
+ cx23885_sram_channel_dump(chip->dev, audio_ch);
+
+ return 0;
+}
+
+/*
+ * BOARD Specific: Resets audio DMA
+ */
+static int cx23885_stop_audio_dma(struct cx23885_audio_dev *chip)
+{
+ struct cx23885_dev *dev = chip->dev;
+ dprintk(1, "Stopping audio DMA\n");
+
+ /* stop dma */
+ cx_clear(AUD_INT_DMA_CTL, 0x11);
+
+ /* disable irqs */
+ cx_clear(PCI_INT_MSK, PCI_MSK_AUD_INT);
+ cx_clear(AUDIO_INT_INT_MSK, AUD_INT_OPC_ERR | AUD_INT_DN_SYNC |
+ AUD_INT_DN_RISCI1);
+
+ if (audio_debug)
+ cx23885_sram_channel_dump(chip->dev,
+ &dev->sram_channels[AUDIO_SRAM_CHANNEL]);
+
+ return 0;
+}
+
+/*
+ * BOARD Specific: Handles audio IRQ
+ */
+int cx23885_audio_irq(struct cx23885_dev *dev, u32 status, u32 mask)
+{
+ struct cx23885_audio_dev *chip = dev->audio_dev;
+
+ if (0 == (status & mask))
+ return 0;
+
+ cx_write(AUDIO_INT_INT_STAT, status);
+
+ /* risc op code error */
+ if (status & AUD_INT_OPC_ERR) {
+ printk(KERN_WARNING "%s/1: Audio risc op code error\n",
+ dev->name);
+ cx_clear(AUD_INT_DMA_CTL, 0x11);
+ cx23885_sram_channel_dump(dev,
+ &dev->sram_channels[AUDIO_SRAM_CHANNEL]);
+ }
+ if (status & AUD_INT_DN_SYNC) {
+ dprintk(1, "Downstream sync error\n");
+ cx_write(AUD_INT_A_GPCNT_CTL, GP_COUNT_CONTROL_RESET);
+ return 1;
+ }
+ /* risc1 downstream */
+ if (status & AUD_INT_DN_RISCI1) {
+ atomic_set(&chip->count, cx_read(AUD_INT_A_GPCNT));
+ snd_pcm_period_elapsed(chip->substream);
+ }
+ /* FIXME: Any other status should deserve a special handling? */
+
+ return 1;
+}
+
+static int dsp_buffer_free(struct cx23885_audio_dev *chip)
+{
+ BUG_ON(!chip->dma_size);
+
+ dprintk(2, "Freeing buffer\n");
+ videobuf_dma_unmap(&chip->pci->dev, chip->dma_risc);
+ videobuf_dma_free(chip->dma_risc);
+ btcx_riscmem_free(chip->pci, &chip->buf->risc);
+ kfree(chip->buf);
+
+ chip->dma_risc = NULL;
+ chip->dma_size = 0;
+
+ return 0;
+}
+
+/****************************************************************************
+ ALSA PCM Interface
+ ****************************************************************************/
+
+/*
+ * Digital hardware definition
+ */
+#define DEFAULT_FIFO_SIZE 4096
+
+static struct snd_pcm_hardware snd_cx23885_digital_hw = {
+ .info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_BLOCK_TRANSFER |
+ SNDRV_PCM_INFO_MMAP_VALID,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+
+ .rates = SNDRV_PCM_RATE_48000,
+ .rate_min = 48000,
+ .rate_max = 48000,
+ .channels_min = 2,
+ .channels_max = 2,
+ /* Analog audio output will be full of clicks and pops if there
+ are not exactly four lines in the SRAM FIFO buffer. */
+ .period_bytes_min = DEFAULT_FIFO_SIZE/4,
+ .period_bytes_max = DEFAULT_FIFO_SIZE/4,
+ .periods_min = 1,
+ .periods_max = 1024,
+ .buffer_bytes_max = (1024*1024),
+};
+
+/*
+ * audio pcm capture open callback
+ */
+static int snd_cx23885_pcm_open(struct snd_pcm_substream *substream)
+{
+ struct cx23885_audio_dev *chip = snd_pcm_substream_chip(substream);
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ int err;
+
+ if (!chip) {
+ printk(KERN_ERR "BUG: cx23885 can't find device struct."
+ " Can't proceed with open\n");
+ return -ENODEV;
+ }
+
+ err = snd_pcm_hw_constraint_pow2(runtime, 0,
+ SNDRV_PCM_HW_PARAM_PERIODS);
+ if (err < 0)
+ goto _error;
+
+ chip->substream = substream;
+
+ runtime->hw = snd_cx23885_digital_hw;
+
+ if (chip->dev->sram_channels[AUDIO_SRAM_CHANNEL].fifo_size !=
+ DEFAULT_FIFO_SIZE) {
+ unsigned int bpl = chip->dev->
+ sram_channels[AUDIO_SRAM_CHANNEL].fifo_size / 4;
+ bpl &= ~7; /* must be multiple of 8 */
+ runtime->hw.period_bytes_min = bpl;
+ runtime->hw.period_bytes_max = bpl;
+ }
+
+ return 0;
+_error:
+ dprintk(1, "Error opening PCM!\n");
+ return err;
+}
+
+/*
+ * audio close callback
+ */
+static int snd_cx23885_close(struct snd_pcm_substream *substream)
+{
+ return 0;
+}
+
+/*
+ * hw_params callback
+ */
+static int snd_cx23885_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *hw_params)
+{
+ struct cx23885_audio_dev *chip = snd_pcm_substream_chip(substream);
+ struct videobuf_dmabuf *dma;
+
+ struct cx23885_audio_buffer *buf;
+ int ret;
+
+ if (substream->runtime->dma_area) {
+ dsp_buffer_free(chip);
+ substream->runtime->dma_area = NULL;
+ }
+
+ chip->period_size = params_period_bytes(hw_params);
+ chip->num_periods = params_periods(hw_params);
+ chip->dma_size = chip->period_size * params_periods(hw_params);
+
+ BUG_ON(!chip->dma_size);
+ BUG_ON(chip->num_periods & (chip->num_periods-1));
+
+ buf = kzalloc(sizeof(*buf), GFP_KERNEL);
+ if (NULL == buf)
+ return -ENOMEM;
+
+ buf->bpl = chip->period_size;
+
+ dma = &buf->dma;
+ videobuf_dma_init(dma);
+ ret = videobuf_dma_init_kernel(dma, PCI_DMA_FROMDEVICE,
+ (PAGE_ALIGN(chip->dma_size) >> PAGE_SHIFT));
+ if (ret < 0)
+ goto error;
+
+ ret = videobuf_dma_map(&chip->pci->dev, dma);
+ if (ret < 0)
+ goto error;
+
+ ret = cx23885_risc_databuffer(chip->pci, &buf->risc, dma->sglist,
+ chip->period_size, chip->num_periods, 1);
+ if (ret < 0)
+ goto error;
+
+ /* Loop back to start of program */
+ buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP|RISC_IRQ1|RISC_CNT_INC);
+ buf->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
+ buf->risc.jmp[2] = cpu_to_le32(0); /* bits 63-32 */
+
+ chip->buf = buf;
+ chip->dma_risc = dma;
+
+ substream->runtime->dma_area = chip->dma_risc->vaddr;
+ substream->runtime->dma_bytes = chip->dma_size;
+ substream->runtime->dma_addr = 0;
+
+ return 0;
+
+error:
+ kfree(buf);
+ return ret;
+}
+
+/*
+ * hw free callback
+ */
+static int snd_cx23885_hw_free(struct snd_pcm_substream *substream)
+{
+
+ struct cx23885_audio_dev *chip = snd_pcm_substream_chip(substream);
+
+ if (substream->runtime->dma_area) {
+ dsp_buffer_free(chip);
+ substream->runtime->dma_area = NULL;
+ }
+
+ return 0;
+}
+
+/*
+ * prepare callback
+ */
+static int snd_cx23885_prepare(struct snd_pcm_substream *substream)
+{
+ return 0;
+}
+
+/*
+ * trigger callback
+ */
+static int snd_cx23885_card_trigger(struct snd_pcm_substream *substream,
+ int cmd)
+{
+ struct cx23885_audio_dev *chip = snd_pcm_substream_chip(substream);
+ int err;
+
+ /* Local interrupts are already disabled by ALSA */
+ spin_lock(&chip->lock);
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ err = cx23885_start_audio_dma(chip);
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ err = cx23885_stop_audio_dma(chip);
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ spin_unlock(&chip->lock);
+
+ return err;
+}
+
+/*
+ * pointer callback
+ */
+static snd_pcm_uframes_t snd_cx23885_pointer(
+ struct snd_pcm_substream *substream)
+{
+ struct cx23885_audio_dev *chip = snd_pcm_substream_chip(substream);
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ u16 count;
+
+ count = atomic_read(&chip->count);
+
+ return runtime->period_size * (count & (runtime->periods-1));
+}
+
+/*
+ * page callback (needed for mmap)
+ */
+static struct page *snd_cx23885_page(struct snd_pcm_substream *substream,
+ unsigned long offset)
+{
+ void *pageptr = substream->runtime->dma_area + offset;
+ return vmalloc_to_page(pageptr);
+}
+
+/*
+ * operators
+ */
+static struct snd_pcm_ops snd_cx23885_pcm_ops = {
+ .open = snd_cx23885_pcm_open,
+ .close = snd_cx23885_close,
+ .ioctl = snd_pcm_lib_ioctl,
+ .hw_params = snd_cx23885_hw_params,
+ .hw_free = snd_cx23885_hw_free,
+ .prepare = snd_cx23885_prepare,
+ .trigger = snd_cx23885_card_trigger,
+ .pointer = snd_cx23885_pointer,
+ .page = snd_cx23885_page,
+};
+
+/*
+ * create a PCM device
+ */
+static int snd_cx23885_pcm(struct cx23885_audio_dev *chip, int device,
+ char *name)
+{
+ int err;
+ struct snd_pcm *pcm;
+
+ err = snd_pcm_new(chip->card, name, device, 0, 1, &pcm);
+ if (err < 0)
+ return err;
+ pcm->private_data = chip;
+ strcpy(pcm->name, name);
+ snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_cx23885_pcm_ops);
+
+ return 0;
+}
+
+/****************************************************************************
+ Basic Flow for Sound Devices
+ ****************************************************************************/
+
+/*
+ * Alsa Constructor - Component probe
+ */
+
+struct cx23885_audio_dev *cx23885_audio_register(struct cx23885_dev *dev)
+{
+ struct snd_card *card;
+ struct cx23885_audio_dev *chip;
+ int err;
+
+ if (disable_analog_audio)
+ return NULL;
+
+ if (dev->sram_channels[AUDIO_SRAM_CHANNEL].cmds_start == 0) {
+ printk(KERN_WARNING "%s(): Missing SRAM channel configuration "
+ "for analog TV Audio\n", __func__);
+ return NULL;
+ }
+
+ err = snd_card_create(SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1,
+ THIS_MODULE, sizeof(struct cx23885_audio_dev), &card);
+ if (err < 0)
+ goto error;
+
+ chip = (struct cx23885_audio_dev *) card->private_data;
+ chip->dev = dev;
+ chip->pci = dev->pci;
+ chip->card = card;
+ spin_lock_init(&chip->lock);
+
+ snd_card_set_dev(card, &dev->pci->dev);
+
+ err = snd_cx23885_pcm(chip, 0, "CX23885 Digital");
+ if (err < 0)
+ goto error;
+
+ strcpy(card->driver, "CX23885");
+ sprintf(card->shortname, "Conexant CX23885");
+ sprintf(card->longname, "%s at %s", card->shortname, dev->name);
+
+ err = snd_card_register(card);
+ if (err < 0)
+ goto error;
+
+ dprintk(0, "registered ALSA audio device\n");
+
+ return chip;
+
+error:
+ snd_card_free(card);
+ printk(KERN_ERR "%s(): Failed to register analog "
+ "audio adapter\n", __func__);
+
+ return NULL;
+}
+
+/*
+ * ALSA destructor
+ */
+void cx23885_audio_unregister(struct cx23885_dev *dev)
+{
+ struct cx23885_audio_dev *chip = dev->audio_dev;
+
+ snd_card_free(chip->card);
+}
diff --git a/drivers/media/video/cx23885/cx23885-cards.c b/drivers/media/video/cx23885/cx23885-cards.c
index 76b7563de39..c3cf08945e4 100644
--- a/drivers/media/video/cx23885/cx23885-cards.c
+++ b/drivers/media/video/cx23885/cx23885-cards.c
@@ -25,8 +25,8 @@
#include <linux/delay.h>
#include <media/cx25840.h>
#include <linux/firmware.h>
+#include <misc/altera.h>
-#include "../../../staging/altera-stapl/altera.h"
#include "cx23885.h"
#include "tuner-xc2028.h"
#include "netup-eeprom.h"
@@ -106,12 +106,14 @@ struct cx23885_board cx23885_boards[] = {
.vmux = CX25840_VIN7_CH3 |
CX25840_VIN5_CH2 |
CX25840_VIN2_CH1,
+ .amux = CX25840_AUDIO8,
.gpio0 = 0,
}, {
.type = CX23885_VMUX_COMPOSITE1,
.vmux = CX25840_VIN7_CH3 |
CX25840_VIN4_CH2 |
CX25840_VIN6_CH1,
+ .amux = CX25840_AUDIO7,
.gpio0 = 0,
}, {
.type = CX23885_VMUX_SVIDEO,
@@ -119,6 +121,7 @@ struct cx23885_board cx23885_boards[] = {
CX25840_VIN4_CH2 |
CX25840_VIN8_CH1 |
CX25840_SVIDEO_ON,
+ .amux = CX25840_AUDIO7,
.gpio0 = 0,
} },
},
@@ -153,7 +156,30 @@ struct cx23885_board cx23885_boards[] = {
},
[CX23885_BOARD_HAUPPAUGE_HVR1500] = {
.name = "Hauppauge WinTV-HVR1500",
+ .porta = CX23885_ANALOG_VIDEO,
.portc = CX23885_MPEG_DVB,
+ .tuner_type = TUNER_XC2028,
+ .tuner_addr = 0x61, /* 0xc2 >> 1 */
+ .input = {{
+ .type = CX23885_VMUX_TELEVISION,
+ .vmux = CX25840_VIN7_CH3 |
+ CX25840_VIN5_CH2 |
+ CX25840_VIN2_CH1,
+ .gpio0 = 0,
+ }, {
+ .type = CX23885_VMUX_COMPOSITE1,
+ .vmux = CX25840_VIN7_CH3 |
+ CX25840_VIN4_CH2 |
+ CX25840_VIN6_CH1,
+ .gpio0 = 0,
+ }, {
+ .type = CX23885_VMUX_SVIDEO,
+ .vmux = CX25840_VIN7_CH3 |
+ CX25840_VIN4_CH2 |
+ CX25840_VIN8_CH1 |
+ CX25840_SVIDEO_ON,
+ .gpio0 = 0,
+ } },
},
[CX23885_BOARD_HAUPPAUGE_HVR1200] = {
.name = "Hauppauge WinTV-HVR1200",
@@ -387,6 +413,31 @@ struct cx23885_board cx23885_boards[] = {
.vmux = CX25840_COMPOSITE1,
} },
},
+ [CX23885_BOARD_MPX885] = {
+ .name = "MPX-885",
+ .porta = CX23885_ANALOG_VIDEO,
+ .input = {{
+ .type = CX23885_VMUX_COMPOSITE1,
+ .vmux = CX25840_COMPOSITE1,
+ .amux = CX25840_AUDIO6,
+ .gpio0 = 0,
+ }, {
+ .type = CX23885_VMUX_COMPOSITE2,
+ .vmux = CX25840_COMPOSITE2,
+ .amux = CX25840_AUDIO6,
+ .gpio0 = 0,
+ }, {
+ .type = CX23885_VMUX_COMPOSITE3,
+ .vmux = CX25840_COMPOSITE3,
+ .amux = CX25840_AUDIO7,
+ .gpio0 = 0,
+ }, {
+ .type = CX23885_VMUX_COMPOSITE4,
+ .vmux = CX25840_COMPOSITE4,
+ .amux = CX25840_AUDIO7,
+ .gpio0 = 0,
+ } },
+ },
};
const unsigned int cx23885_bcount = ARRAY_SIZE(cx23885_boards);
@@ -1415,6 +1466,8 @@ void cx23885_card_setup(struct cx23885_dev *dev)
case CX23885_BOARD_HAUPPAUGE_HVR1290:
case CX23885_BOARD_LEADTEK_WINFAST_PXTV1200:
case CX23885_BOARD_GOTVIEW_X5_3D_HYBRID:
+ case CX23885_BOARD_HAUPPAUGE_HVR1500:
+ case CX23885_BOARD_MPX885:
dev->sd_cx25840 = v4l2_i2c_new_subdev(&dev->v4l2_dev,
&dev->i2c_bus[2].i2c_adap,
"cx25840", 0x88 >> 1, NULL);
diff --git a/drivers/media/video/cx23885/cx23885-core.c b/drivers/media/video/cx23885/cx23885-core.c
index ee41a8882f5..40e68b22015 100644
--- a/drivers/media/video/cx23885/cx23885-core.c
+++ b/drivers/media/video/cx23885/cx23885-core.c
@@ -54,7 +54,7 @@ MODULE_PARM_DESC(card, "card type");
#define dprintk(level, fmt, arg...)\
do { if (debug >= level)\
- printk(KERN_DEBUG "%s/0: " fmt, dev->name, ## arg);\
+ printk(KERN_DEBUG "%s: " fmt, dev->name, ## arg);\
} while (0)
static unsigned int cx23885_devcount;
@@ -155,12 +155,12 @@ static struct sram_channel cx23885_sram_channels[] = {
.cnt2_reg = DMA5_CNT2,
},
[SRAM_CH07] = {
- .name = "ch7",
- .cmds_start = 0x0,
- .ctrl_start = 0x0,
- .cdt = 0x0,
- .fifo_start = 0x0,
- .fifo_size = 0x0,
+ .name = "TV Audio",
+ .cmds_start = 0x10190,
+ .ctrl_start = 0x10480,
+ .cdt = 0x10a00,
+ .fifo_start = 0x7000,
+ .fifo_size = 0x1000,
.ptr1_reg = DMA6_PTR1,
.ptr2_reg = DMA6_PTR2,
.cnt1_reg = DMA6_CNT1,
@@ -1082,10 +1082,10 @@ static void cx23885_dev_unregister(struct cx23885_dev *dev)
static __le32 *cx23885_risc_field(__le32 *rp, struct scatterlist *sglist,
unsigned int offset, u32 sync_line,
unsigned int bpl, unsigned int padding,
- unsigned int lines)
+ unsigned int lines, unsigned int lpi)
{
struct scatterlist *sg;
- unsigned int line, todo;
+ unsigned int line, todo, sol;
/* sync instruction */
if (sync_line != NO_SYNC_LINE)
@@ -1098,16 +1098,22 @@ static __le32 *cx23885_risc_field(__le32 *rp, struct scatterlist *sglist,
offset -= sg_dma_len(sg);
sg++;
}
+
+ if (lpi && line > 0 && !(line % lpi))
+ sol = RISC_SOL | RISC_IRQ1 | RISC_CNT_INC;
+ else
+ sol = RISC_SOL;
+
if (bpl <= sg_dma_len(sg)-offset) {
/* fits into current chunk */
- *(rp++) = cpu_to_le32(RISC_WRITE|RISC_SOL|RISC_EOL|bpl);
+ *(rp++) = cpu_to_le32(RISC_WRITE|sol|RISC_EOL|bpl);
*(rp++) = cpu_to_le32(sg_dma_address(sg)+offset);
*(rp++) = cpu_to_le32(0); /* bits 63-32 */
offset += bpl;
} else {
/* scanline needs to be split */
todo = bpl;
- *(rp++) = cpu_to_le32(RISC_WRITE|RISC_SOL|
+ *(rp++) = cpu_to_le32(RISC_WRITE|sol|
(sg_dma_len(sg)-offset));
*(rp++) = cpu_to_le32(sg_dma_address(sg)+offset);
*(rp++) = cpu_to_le32(0); /* bits 63-32 */
@@ -1164,10 +1170,10 @@ int cx23885_risc_buffer(struct pci_dev *pci, struct btcx_riscmem *risc,
rp = risc->cpu;
if (UNSET != top_offset)
rp = cx23885_risc_field(rp, sglist, top_offset, 0,
- bpl, padding, lines);
+ bpl, padding, lines, 0);
if (UNSET != bottom_offset)
rp = cx23885_risc_field(rp, sglist, bottom_offset, 0x200,
- bpl, padding, lines);
+ bpl, padding, lines, 0);
/* save pointer to jmp instruction address */
risc->jmp = rp;
@@ -1175,11 +1181,11 @@ int cx23885_risc_buffer(struct pci_dev *pci, struct btcx_riscmem *risc,
return 0;
}
-static int cx23885_risc_databuffer(struct pci_dev *pci,
+int cx23885_risc_databuffer(struct pci_dev *pci,
struct btcx_riscmem *risc,
struct scatterlist *sglist,
unsigned int bpl,
- unsigned int lines)
+ unsigned int lines, unsigned int lpi)
{
u32 instructions;
__le32 *rp;
@@ -1199,7 +1205,55 @@ static int cx23885_risc_databuffer(struct pci_dev *pci,
/* write risc instructions */
rp = risc->cpu;
- rp = cx23885_risc_field(rp, sglist, 0, NO_SYNC_LINE, bpl, 0, lines);
+ rp = cx23885_risc_field(rp, sglist, 0, NO_SYNC_LINE,
+ bpl, 0, lines, lpi);
+
+ /* save pointer to jmp instruction address */
+ risc->jmp = rp;
+ BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
+ return 0;
+}
+
+int cx23885_risc_vbibuffer(struct pci_dev *pci, struct btcx_riscmem *risc,
+ struct scatterlist *sglist, unsigned int top_offset,
+ unsigned int bottom_offset, unsigned int bpl,
+ unsigned int padding, unsigned int lines)
+{
+ u32 instructions, fields;
+ __le32 *rp;
+ int rc;
+
+ fields = 0;
+ if (UNSET != top_offset)
+ fields++;
+ if (UNSET != bottom_offset)
+ fields++;
+
+ /* estimate risc mem: worst case is one write per page border +
+ one write per scan line + syncs + jump (all 2 dwords). Padding
+ can cause next bpl to start close to a page border. First DMA
+ region may be smaller than PAGE_SIZE */
+ /* write and jump need and extra dword */
+ instructions = fields * (1 + ((bpl + padding) * lines)
+ / PAGE_SIZE + lines);
+ instructions += 2;
+ rc = btcx_riscmem_alloc(pci, risc, instructions*12);
+ if (rc < 0)
+ return rc;
+ /* write risc instructions */
+ rp = risc->cpu;
+
+ /* Sync to line 6, so US CC line 21 will appear in line '12'
+ * in the userland vbi payload */
+ if (UNSET != top_offset)
+ rp = cx23885_risc_field(rp, sglist, top_offset, 6,
+ bpl, padding, lines, 0);
+
+ if (UNSET != bottom_offset)
+ rp = cx23885_risc_field(rp, sglist, bottom_offset, 0x207,
+ bpl, padding, lines, 0);
+
+
/* save pointer to jmp instruction address */
risc->jmp = rp;
@@ -1207,6 +1261,7 @@ static int cx23885_risc_databuffer(struct pci_dev *pci,
return 0;
}
+
int cx23885_risc_stopper(struct pci_dev *pci, struct btcx_riscmem *risc,
u32 reg, u32 mask, u32 value)
{
@@ -1517,7 +1572,7 @@ int cx23885_buf_prepare(struct videobuf_queue *q, struct cx23885_tsport *port,
goto fail;
cx23885_risc_databuffer(dev->pci, &buf->risc,
videobuf_to_dma(&buf->vb)->sglist,
- buf->vb.width, buf->vb.height);
+ buf->vb.width, buf->vb.height, 0);
}
buf->vb.state = VIDEOBUF_PREPARED;
return 0;
@@ -1741,15 +1796,19 @@ static irqreturn_t cx23885_irq(int irq, void *dev_id)
struct cx23885_tsport *ts2 = &dev->ts2;
u32 pci_status, pci_mask;
u32 vida_status, vida_mask;
+ u32 audint_status, audint_mask;
u32 ts1_status, ts1_mask;
u32 ts2_status, ts2_mask;
int vida_count = 0, ts1_count = 0, ts2_count = 0, handled = 0;
+ int audint_count = 0;
bool subdev_handled;
pci_status = cx_read(PCI_INT_STAT);
pci_mask = cx23885_irq_get_mask(dev);
vida_status = cx_read(VID_A_INT_STAT);
vida_mask = cx_read(VID_A_INT_MSK);
+ audint_status = cx_read(AUDIO_INT_INT_STAT);
+ audint_mask = cx_read(AUDIO_INT_INT_MSK);
ts1_status = cx_read(VID_B_INT_STAT);
ts1_mask = cx_read(VID_B_INT_MSK);
ts2_status = cx_read(VID_C_INT_STAT);
@@ -1759,12 +1818,15 @@ static irqreturn_t cx23885_irq(int irq, void *dev_id)
goto out;
vida_count = cx_read(VID_A_GPCNT);
+ audint_count = cx_read(AUD_INT_A_GPCNT);
ts1_count = cx_read(ts1->reg_gpcnt);
ts2_count = cx_read(ts2->reg_gpcnt);
dprintk(7, "pci_status: 0x%08x pci_mask: 0x%08x\n",
pci_status, pci_mask);
dprintk(7, "vida_status: 0x%08x vida_mask: 0x%08x count: 0x%x\n",
vida_status, vida_mask, vida_count);
+ dprintk(7, "audint_status: 0x%08x audint_mask: 0x%08x count: 0x%x\n",
+ audint_status, audint_mask, audint_count);
dprintk(7, "ts1_status: 0x%08x ts1_mask: 0x%08x count: 0x%x\n",
ts1_status, ts1_mask, ts1_count);
dprintk(7, "ts2_status: 0x%08x ts2_mask: 0x%08x count: 0x%x\n",
@@ -1861,6 +1923,9 @@ static irqreturn_t cx23885_irq(int irq, void *dev_id)
if (vida_status)
handled += cx23885_video_irq(dev, vida_status);
+ if (audint_status)
+ handled += cx23885_audio_irq(dev, audint_status, audint_mask);
+
if (pci_status & PCI_MSK_IR) {
subdev_handled = false;
v4l2_subdev_call(dev->sd_ir, core, interrupt_service_routine,
diff --git a/drivers/media/video/cx23885/cx23885-dvb.c b/drivers/media/video/cx23885/cx23885-dvb.c
index aa83f07b1b0..bcb45be44bb 100644
--- a/drivers/media/video/cx23885/cx23885-dvb.c
+++ b/drivers/media/video/cx23885/cx23885-dvb.c
@@ -844,7 +844,7 @@ static int dvb_register(struct cx23885_tsport *port)
static struct xc2028_ctrl ctl = {
.fname = XC3028L_DEFAULT_FIRMWARE,
.max_len = 64,
- .demod = 5000,
+ .demod = XC3028_FE_DIBCOM52,
/* This is true for all demods with
v36 firmware? */
.type = XC2028_D2633,
diff --git a/drivers/media/video/cx23885/cx23885-i2c.c b/drivers/media/video/cx23885/cx23885-i2c.c
index 307ff543c25..0ff7a9e98f3 100644
--- a/drivers/media/video/cx23885/cx23885-i2c.c
+++ b/drivers/media/video/cx23885/cx23885-i2c.c
@@ -287,6 +287,7 @@ static char *i2c_devs[128] = {
[0x32 >> 1] = "cx24227",
[0x88 >> 1] = "cx25837",
[0x84 >> 1] = "tda8295",
+ [0x98 >> 1] = "flatiron",
[0xa0 >> 1] = "eeprom",
[0xc0 >> 1] = "tuner/mt2131/tda8275",
[0xc2 >> 1] = "tuner/mt2131/tda8275/xc5000/xc3028",
diff --git a/drivers/media/video/cx23885/cx23885-reg.h b/drivers/media/video/cx23885/cx23885-reg.h
index c87ac682ebb..a99936e0cbc 100644
--- a/drivers/media/video/cx23885/cx23885-reg.h
+++ b/drivers/media/video/cx23885/cx23885-reg.h
@@ -203,6 +203,7 @@ Channel manager Data Structure entry = 20 DWORD
#define SD2_BIAS_CTRL 0x0000000A
#define AMP_BIAS_CTRL 0x0000000C
#define CH_PWR_CTRL1 0x0000000E
+#define FLD_CH_SEL (1 << 3)
#define CH_PWR_CTRL2 0x0000000F
#define DSM_STATUS1 0x00000010
#define DSM_STATUS2 0x00000011
@@ -271,7 +272,9 @@ Channel manager Data Structure entry = 20 DWORD
#define VID_BC_MSK_OPC_ERR (1 << 16)
#define VID_BC_MSK_SYNC (1 << 12)
#define VID_BC_MSK_OF (1 << 8)
+#define VID_BC_MSK_VBI_RISCI2 (1 << 5)
#define VID_BC_MSK_RISCI2 (1 << 4)
+#define VID_BC_MSK_VBI_RISCI1 (1 << 1)
#define VID_BC_MSK_RISCI1 1
#define VID_C_INT_MSK 0x00040040
diff --git a/drivers/media/video/cx23885/cx23885-vbi.c b/drivers/media/video/cx23885/cx23885-vbi.c
index c0b60382ad1..a1154f035bc 100644
--- a/drivers/media/video/cx23885/cx23885-vbi.c
+++ b/drivers/media/video/cx23885/cx23885-vbi.c
@@ -41,6 +41,12 @@ MODULE_PARM_DESC(vbi_debug, "enable debug messages [vbi]");
/* ------------------------------------------------------------------ */
+#define VBI_LINE_LENGTH 1440
+#define NTSC_VBI_START_LINE 10 /* line 10 - 21 */
+#define NTSC_VBI_END_LINE 21
+#define NTSC_VBI_LINES (NTSC_VBI_END_LINE - NTSC_VBI_START_LINE + 1)
+
+
int cx23885_vbi_fmt(struct file *file, void *priv,
struct v4l2_format *f)
{
@@ -49,43 +55,86 @@ int cx23885_vbi_fmt(struct file *file, void *priv,
if (dev->tvnorm & V4L2_STD_525_60) {
/* ntsc */
- f->fmt.vbi.sampling_rate = 28636363;
+ f->fmt.vbi.samples_per_line = VBI_LINE_LENGTH;
+ f->fmt.vbi.sampling_rate = 27000000;
+ f->fmt.vbi.sample_format = V4L2_PIX_FMT_GREY;
+ f->fmt.vbi.offset = 0;
+ f->fmt.vbi.flags = 0;
f->fmt.vbi.start[0] = 10;
- f->fmt.vbi.start[1] = 273;
-
+ f->fmt.vbi.count[0] = 17;
+ f->fmt.vbi.start[1] = 263 + 10 + 1;
+ f->fmt.vbi.count[1] = 17;
} else if (dev->tvnorm & V4L2_STD_625_50) {
/* pal */
f->fmt.vbi.sampling_rate = 35468950;
f->fmt.vbi.start[0] = 7 - 1;
f->fmt.vbi.start[1] = 319 - 1;
}
+
return 0;
}
+/* We're given the Video Interrupt status register.
+ * The cx23885_video_irq() func has already validated
+ * the potential error bits, we just need to
+ * deal with vbi payload and return indication if
+ * we actually processed any payload.
+ */
+int cx23885_vbi_irq(struct cx23885_dev *dev, u32 status)
+{
+ u32 count;
+ int handled = 0;
+
+ if (status & VID_BC_MSK_VBI_RISCI1) {
+ dprintk(1, "%s() VID_BC_MSK_VBI_RISCI1\n", __func__);
+ spin_lock(&dev->slock);
+ count = cx_read(VID_A_GPCNT);
+ cx23885_video_wakeup(dev, &dev->vbiq, count);
+ spin_unlock(&dev->slock);
+ handled++;
+ }
+
+ if (status & VID_BC_MSK_VBI_RISCI2) {
+ dprintk(1, "%s() VID_BC_MSK_VBI_RISCI2\n", __func__);
+ dprintk(2, "stopper vbi\n");
+ spin_lock(&dev->slock);
+ cx23885_restart_vbi_queue(dev, &dev->vbiq);
+ spin_unlock(&dev->slock);
+ handled++;
+ }
+
+ return handled;
+}
+
static int cx23885_start_vbi_dma(struct cx23885_dev *dev,
struct cx23885_dmaqueue *q,
struct cx23885_buffer *buf)
{
+ dprintk(1, "%s()\n", __func__);
+
/* setup fifo + format */
cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH02],
buf->vb.width, buf->risc.dma);
/* reset counter */
+ cx_write(VID_A_GPCNT_CTL, 3);
+ cx_write(VID_A_VBI_CTRL, 3);
+ cx_write(VBI_A_GPCNT_CTL, 3);
q->count = 1;
- /* enable irqs */
+ /* enable irq */
cx23885_irq_add_enable(dev, 0x01);
cx_set(VID_A_INT_MSK, 0x000022);
/* start dma */
cx_set(DEV_CNTRL2, (1<<5));
- cx_set(VID_A_DMA_CTL, 0x00000022);
+ cx_set(VID_A_DMA_CTL, 0x22); /* FIFO and RISC enable */
return 0;
}
-static int cx23885_restart_vbi_queue(struct cx23885_dev *dev,
+int cx23885_restart_vbi_queue(struct cx23885_dev *dev,
struct cx23885_dmaqueue *q)
{
struct cx23885_buffer *buf;
@@ -102,7 +151,7 @@ static int cx23885_restart_vbi_queue(struct cx23885_dev *dev,
buf = list_entry(item, struct cx23885_buffer, vb.queue);
buf->count = q->count++;
}
- mod_timer(&q->timeout, jiffies+BUFFER_TIMEOUT);
+ mod_timer(&q->timeout, jiffies + (BUFFER_TIMEOUT / 30));
return 0;
}
@@ -113,8 +162,7 @@ void cx23885_vbi_timeout(unsigned long data)
struct cx23885_buffer *buf;
unsigned long flags;
- cx23885_sram_channel_dump(dev, &dev->sram_channels[SRAM_CH02]);
-
+ /* Stop the VBI engine */
cx_clear(VID_A_DMA_CTL, 0x22);
spin_lock_irqsave(&dev->slock, flags);
@@ -132,7 +180,7 @@ void cx23885_vbi_timeout(unsigned long data)
}
/* ------------------------------------------------------------------ */
-#define VBI_LINE_LENGTH 2048
+#define VBI_LINE_LENGTH 1440
#define VBI_LINE_COUNT 17
static int
@@ -173,7 +221,7 @@ vbi_prepare(struct videobuf_queue *q, struct videobuf_buffer *vb,
rc = videobuf_iolock(q, &buf->vb, NULL);
if (0 != rc)
goto fail;
- cx23885_risc_buffer(dev->pci, &buf->risc,
+ cx23885_risc_vbibuffer(dev->pci, &buf->risc,
dma->sglist,
0, buf->vb.width * buf->vb.height,
buf->vb.width, 0,
@@ -207,7 +255,7 @@ vbi_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
cx23885_start_vbi_dma(dev, q, buf);
buf->vb.state = VIDEOBUF_ACTIVE;
buf->count = q->count++;
- mod_timer(&q->timeout, jiffies+BUFFER_TIMEOUT);
+ mod_timer(&q->timeout, jiffies + (BUFFER_TIMEOUT / 30));
dprintk(2, "[%p/%d] vbi_queue - first active\n",
buf, buf->vb.i);
diff --git a/drivers/media/video/cx23885/cx23885-video.c b/drivers/media/video/cx23885/cx23885-video.c
index 896bb32dbf0..e730b926301 100644
--- a/drivers/media/video/cx23885/cx23885-video.c
+++ b/drivers/media/video/cx23885/cx23885-video.c
@@ -37,6 +37,8 @@
#include "cx23885-ioctl.h"
#include "tuner-xc2028.h"
+#include <media/cx25840.h>
+
MODULE_DESCRIPTION("v4l2 driver module for cx23885 based TV cards");
MODULE_AUTHOR("Steven Toth <stoth@linuxtv.org>");
MODULE_LICENSE("GPL");
@@ -69,14 +71,14 @@ MODULE_PARM_DESC(vid_limit, "capture memory limit in megabytes");
#define dprintk(level, fmt, arg...)\
do { if (video_debug >= level)\
- printk(KERN_DEBUG "%s/0: " fmt, dev->name, ## arg);\
+ printk(KERN_DEBUG "%s: " fmt, dev->name, ## arg);\
} while (0)
/* ------------------------------------------------------------------- */
/* static data */
#define FORMAT_FLAGS_PACKED 0x01
-
+#if 0
static struct cx23885_fmt formats[] = {
{
.name = "8 bpp, gray",
@@ -130,6 +132,23 @@ static struct cx23885_fmt formats[] = {
.flags = FORMAT_FLAGS_PACKED,
},
};
+#else
+static struct cx23885_fmt formats[] = {
+ {
+#if 0
+ .name = "4:2:2, packed, UYVY",
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .depth = 16,
+ .flags = FORMAT_FLAGS_PACKED,
+ }, {
+#endif
+ .name = "4:2:2, packed, YUYV",
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .depth = 16,
+ .flags = FORMAT_FLAGS_PACKED,
+ }
+};
+#endif
static struct cx23885_fmt *format_by_fourcc(unsigned int fourcc)
{
@@ -139,7 +158,12 @@ static struct cx23885_fmt *format_by_fourcc(unsigned int fourcc)
if (formats[i].fourcc == fourcc)
return formats+i;
- printk(KERN_ERR "%s(0x%08x) NOT FOUND\n", __func__, fourcc);
+ printk(KERN_ERR "%s(%c%c%c%c) NOT FOUND\n", __func__,
+ (fourcc & 0xff),
+ ((fourcc >> 8) & 0xff),
+ ((fourcc >> 16) & 0xff),
+ ((fourcc >> 24) & 0xff)
+ );
return NULL;
}
@@ -171,7 +195,7 @@ static struct cx23885_ctrl cx23885_ctls[] = {
.id = V4L2_CID_CONTRAST,
.name = "Contrast",
.minimum = 0,
- .maximum = 0xff,
+ .maximum = 0x7f,
.step = 1,
.default_value = 0x3f,
.type = V4L2_CTRL_TYPE_INTEGER,
@@ -184,10 +208,10 @@ static struct cx23885_ctrl cx23885_ctls[] = {
.v = {
.id = V4L2_CID_HUE,
.name = "Hue",
- .minimum = 0,
- .maximum = 0xff,
+ .minimum = -127,
+ .maximum = 128,
.step = 1,
- .default_value = 0x7f,
+ .default_value = 0x0,
.type = V4L2_CTRL_TYPE_INTEGER,
},
.off = 128,
@@ -202,9 +226,9 @@ static struct cx23885_ctrl cx23885_ctls[] = {
.id = V4L2_CID_SATURATION,
.name = "Saturation",
.minimum = 0,
- .maximum = 0xff,
+ .maximum = 0x7f,
.step = 1,
- .default_value = 0x7f,
+ .default_value = 0x3f,
.type = V4L2_CTRL_TYPE_INTEGER,
},
.off = 0,
@@ -258,8 +282,8 @@ static const u32 *ctrl_classes[] = {
NULL
};
-static void cx23885_video_wakeup(struct cx23885_dev *dev,
- struct cx23885_dmaqueue *q, u32 count)
+void cx23885_video_wakeup(struct cx23885_dev *dev,
+ struct cx23885_dmaqueue *q, u32 count)
{
struct cx23885_buffer *buf;
int bc;
@@ -393,6 +417,71 @@ static void res_free(struct cx23885_dev *dev, struct cx23885_fh *fh,
mutex_unlock(&dev->lock);
}
+static int cx23885_flatiron_write(struct cx23885_dev *dev, u8 reg, u8 data)
+{
+ /* 8 bit registers, 8 bit values */
+ u8 buf[] = { reg, data };
+
+ struct i2c_msg msg = { .addr = 0x98 >> 1,
+ .flags = 0, .buf = buf, .len = 2 };
+
+ return i2c_transfer(&dev->i2c_bus[2].i2c_adap, &msg, 1);
+}
+
+static u8 cx23885_flatiron_read(struct cx23885_dev *dev, u8 reg)
+{
+ /* 8 bit registers, 8 bit values */
+ int ret;
+ u8 b0[] = { reg };
+ u8 b1[] = { 0 };
+
+ struct i2c_msg msg[] = {
+ { .addr = 0x98 >> 1, .flags = 0, .buf = b0, .len = 1 },
+ { .addr = 0x98 >> 1, .flags = I2C_M_RD, .buf = b1, .len = 1 }
+ };
+
+ ret = i2c_transfer(&dev->i2c_bus[2].i2c_adap, &msg[0], 2);
+ if (ret != 2)
+ printk(KERN_ERR "%s() error\n", __func__);
+
+ return b1[0];
+}
+
+static void cx23885_flatiron_dump(struct cx23885_dev *dev)
+{
+ int i;
+ dprintk(1, "Flatiron dump\n");
+ for (i = 0; i < 0x24; i++) {
+ dprintk(1, "FI[%02x] = %02x\n", i,
+ cx23885_flatiron_read(dev, i));
+ }
+}
+
+static int cx23885_flatiron_mux(struct cx23885_dev *dev, int input)
+{
+ u8 val;
+ dprintk(1, "%s(input = %d)\n", __func__, input);
+
+ if (input == 1)
+ val = cx23885_flatiron_read(dev, CH_PWR_CTRL1) & ~FLD_CH_SEL;
+ else if (input == 2)
+ val = cx23885_flatiron_read(dev, CH_PWR_CTRL1) | FLD_CH_SEL;
+ else
+ return -EINVAL;
+
+ val |= 0x20; /* Enable clock to delta-sigma and dec filter */
+
+ cx23885_flatiron_write(dev, CH_PWR_CTRL1, val);
+
+ /* Wake up */
+ cx23885_flatiron_write(dev, CH_PWR_CTRL2, 0);
+
+ if (video_debug)
+ cx23885_flatiron_dump(dev);
+
+ return 0;
+}
+
static int cx23885_video_mux(struct cx23885_dev *dev, unsigned int input)
{
dprintk(1, "%s() video_mux: %d [vmux=%d, gpio=0x%x,0x%x,0x%x,0x%x]\n",
@@ -413,27 +502,59 @@ static int cx23885_video_mux(struct cx23885_dev *dev, unsigned int input)
v4l2_subdev_call(dev->sd_cx25840, video, s_routing,
INPUT(input)->vmux, 0, 0);
+ if ((dev->board == CX23885_BOARD_HAUPPAUGE_HVR1800) ||
+ (dev->board == CX23885_BOARD_MPX885)) {
+ /* Configure audio routing */
+ v4l2_subdev_call(dev->sd_cx25840, audio, s_routing,
+ INPUT(input)->amux, 0, 0);
+
+ if (INPUT(input)->amux == CX25840_AUDIO7)
+ cx23885_flatiron_mux(dev, 1);
+ else if (INPUT(input)->amux == CX25840_AUDIO6)
+ cx23885_flatiron_mux(dev, 2);
+ }
+
return 0;
}
-/* ------------------------------------------------------------------ */
-static int cx23885_set_scale(struct cx23885_dev *dev, unsigned int width,
- unsigned int height, enum v4l2_field field)
+static int cx23885_audio_mux(struct cx23885_dev *dev, unsigned int input)
{
- dprintk(1, "%s()\n", __func__);
+ dprintk(1, "%s(input=%d)\n", __func__, input);
+
+ /* The baseband video core of the cx23885 has two audio inputs.
+ * LR1 and LR2. In almost every single case so far only HVR1xxx
+ * cards we've only ever supported LR1. Time to support LR2,
+ * which is available via the optional white breakout header on
+ * the board.
+ * We'll use a could of existing enums in the card struct to allow
+ * devs to specify which baseband input they need, or just default
+ * to what we've always used.
+ */
+ if (INPUT(input)->amux == CX25840_AUDIO7)
+ cx23885_flatiron_mux(dev, 1);
+ else if (INPUT(input)->amux == CX25840_AUDIO6)
+ cx23885_flatiron_mux(dev, 2);
+ else {
+ /* Not specifically defined, assume the default. */
+ cx23885_flatiron_mux(dev, 1);
+ }
+
return 0;
}
+/* ------------------------------------------------------------------ */
static int cx23885_start_video_dma(struct cx23885_dev *dev,
struct cx23885_dmaqueue *q,
struct cx23885_buffer *buf)
{
dprintk(1, "%s()\n", __func__);
+ /* Stop the dma/fifo before we tamper with it's risc programs */
+ cx_clear(VID_A_DMA_CTL, 0x11);
+
/* setup fifo + format */
cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH01],
buf->bpl, buf->risc.dma);
- cx23885_set_scale(dev, buf->vb.width, buf->vb.height, buf->vb.field);
/* reset counter */
cx_write(VID_A_GPCNT_CTL, 3);
@@ -748,7 +869,7 @@ static int video_open(struct file *file)
fh->type = type;
fh->width = 320;
fh->height = 240;
- fh->fmt = format_by_fourcc(V4L2_PIX_FMT_BGR24);
+ fh->fmt = format_by_fourcc(V4L2_PIX_FMT_YUYV);
videobuf_queue_sg_init(&fh->vidq, &cx23885_video_qops,
&dev->pci->dev, &dev->slock,
@@ -757,6 +878,14 @@ static int video_open(struct file *file)
sizeof(struct cx23885_buffer),
fh, NULL);
+ videobuf_queue_sg_init(&fh->vbiq, &cx23885_vbi_qops,
+ &dev->pci->dev, &dev->slock,
+ V4L2_BUF_TYPE_VBI_CAPTURE,
+ V4L2_FIELD_SEQ_TB,
+ sizeof(struct cx23885_buffer),
+ fh, NULL);
+
+
dprintk(1, "post videobuf_queue_init()\n");
return 0;
@@ -884,8 +1013,9 @@ static int cx23885_get_control(struct cx23885_dev *dev,
static int cx23885_set_control(struct cx23885_dev *dev,
struct v4l2_control *ctl)
{
- dprintk(1, "%s() calling cx25840(VIDIOC_S_CTRL)"
- " (disabled - no action)\n", __func__);
+ dprintk(1, "%s() calling cx25840(VIDIOC_S_CTRL)\n", __func__);
+ call_all(dev, core, s_ctrl, ctl);
+
return 0;
}
@@ -1059,13 +1189,22 @@ static int vidioc_streamon(struct file *file, void *priv,
struct cx23885_dev *dev = fh->dev;
dprintk(1, "%s()\n", __func__);
- if (unlikely(fh->type != V4L2_BUF_TYPE_VIDEO_CAPTURE))
+ if ((fh->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) &&
+ (fh->type != V4L2_BUF_TYPE_VBI_CAPTURE))
return -EINVAL;
if (unlikely(i != fh->type))
return -EINVAL;
if (unlikely(!res_get(dev, fh, get_resource(fh))))
return -EBUSY;
+
+ /* Don't start VBI streaming unless vida streaming
+ * has already started.
+ */
+ if ((fh->type == V4L2_BUF_TYPE_VBI_CAPTURE) &&
+ ((cx_read(VID_A_DMA_CTL) & 0x11) == 0))
+ return -EINVAL;
+
return videobuf_streamon(get_queue(fh));
}
@@ -1076,7 +1215,8 @@ static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
int err, res;
dprintk(1, "%s()\n", __func__);
- if (fh->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ if ((fh->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) &&
+ (fh->type != V4L2_BUF_TYPE_VBI_CAPTURE))
return -EINVAL;
if (i != fh->type)
return -EINVAL;
@@ -1119,7 +1259,7 @@ static int cx23885_enum_input(struct cx23885_dev *dev, struct v4l2_input *i)
dprintk(1, "%s()\n", __func__);
n = i->index;
- if (n >= 4)
+ if (n >= MAX_CX23885_INPUT)
return -EINVAL;
if (0 == INPUT(n)->type)
@@ -1133,6 +1273,11 @@ static int cx23885_enum_input(struct cx23885_dev *dev, struct v4l2_input *i)
i->type = V4L2_INPUT_TYPE_TUNER;
i->std = CX23885_NORMS;
}
+
+ /* Two selectable audio inputs for non-tv inputs */
+ if (INPUT(n)->type != CX23885_VMUX_TELEVISION)
+ i->audioset = 0x3;
+
return 0;
}
@@ -1159,13 +1304,20 @@ static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
dprintk(1, "%s(%d)\n", __func__, i);
- if (i >= 4) {
+ if (i >= MAX_CX23885_INPUT) {
dprintk(1, "%s() -EINVAL\n", __func__);
return -EINVAL;
}
+ if (INPUT(i)->type == 0)
+ return -EINVAL;
+
mutex_lock(&dev->lock);
cx23885_video_mux(dev, i);
+
+ /* By default establish the default audio input for the card also */
+ /* Caller is free to use VIDIOC_S_AUDIO to override afterwards */
+ cx23885_audio_mux(dev, i);
mutex_unlock(&dev->lock);
return 0;
}
@@ -1185,6 +1337,64 @@ static int vidioc_log_status(struct file *file, void *priv)
return 0;
}
+static int cx23885_query_audinput(struct file *file, void *priv,
+ struct v4l2_audio *i)
+{
+ struct cx23885_dev *dev = ((struct cx23885_fh *)priv)->dev;
+ static const char *iname[] = {
+ [0] = "Baseband L/R 1",
+ [1] = "Baseband L/R 2",
+ };
+ unsigned int n;
+ dprintk(1, "%s()\n", __func__);
+
+ n = i->index;
+ if (n >= 2)
+ return -EINVAL;
+
+ memset(i, 0, sizeof(*i));
+ i->index = n;
+ strcpy(i->name, iname[n]);
+ i->capability = V4L2_AUDCAP_STEREO;
+ i->mode = V4L2_AUDMODE_AVL;
+ return 0;
+
+}
+
+static int vidioc_enum_audinput(struct file *file, void *priv,
+ struct v4l2_audio *i)
+{
+ return cx23885_query_audinput(file, priv, i);
+}
+
+static int vidioc_g_audinput(struct file *file, void *priv,
+ struct v4l2_audio *i)
+{
+ struct cx23885_dev *dev = ((struct cx23885_fh *)priv)->dev;
+
+ i->index = dev->audinput;
+ dprintk(1, "%s(input=%d)\n", __func__, i->index);
+
+ return cx23885_query_audinput(file, priv, i);
+}
+
+static int vidioc_s_audinput(struct file *file, void *priv,
+ struct v4l2_audio *i)
+{
+ struct cx23885_dev *dev = ((struct cx23885_fh *)priv)->dev;
+ if (i->index >= 2)
+ return -EINVAL;
+
+ dprintk(1, "%s(%d)\n", __func__, i->index);
+
+ dev->audinput = i->index;
+
+ /* Skip the audio defaults from the cards struct, caller wants
+ * directly touch the audio mux hardware. */
+ cx23885_flatiron_mux(dev, dev->audinput + 1);
+ return 0;
+}
+
static int vidioc_queryctrl(struct file *file, void *priv,
struct v4l2_queryctrl *qctrl)
{
@@ -1221,10 +1431,8 @@ static int vidioc_g_tuner(struct file *file, void *priv,
return -EINVAL;
strcpy(t->name, "Television");
- t->type = V4L2_TUNER_ANALOG_TV;
- t->capability = V4L2_TUNER_CAP_NORM;
- t->rangehigh = 0xffffffffUL;
- t->signal = 0xffff ; /* LOCKED */
+
+ call_all(dev, tuner, g_tuner, t);
return 0;
}
@@ -1237,6 +1445,9 @@ static int vidioc_s_tuner(struct file *file, void *priv,
return -EINVAL;
if (0 != t->index)
return -EINVAL;
+ /* Update the A/V core */
+ call_all(dev, tuner, s_tuner, t);
+
return 0;
}
@@ -1302,10 +1513,6 @@ static void cx23885_vid_timeout(unsigned long data)
struct cx23885_buffer *buf;
unsigned long flags;
- cx23885_sram_channel_dump(dev, &dev->sram_channels[SRAM_CH01]);
-
- cx_clear(VID_A_DMA_CTL, 0x11);
-
spin_lock_irqsave(&dev->slock, flags);
while (!list_empty(&q->active)) {
buf = list_entry(q->active.next,
@@ -1313,7 +1520,7 @@ static void cx23885_vid_timeout(unsigned long data)
list_del(&buf->vb.queue);
buf->vb.state = VIDEOBUF_ERROR;
wake_up(&buf->vb.done);
- printk(KERN_ERR "%s/0: [%p/%d] timeout - dma=0x%08lx\n",
+ printk(KERN_ERR "%s: [%p/%d] timeout - dma=0x%08lx\n",
dev->name, buf, buf->vb.i,
(unsigned long)buf->risc.dma);
}
@@ -1329,27 +1536,43 @@ int cx23885_video_irq(struct cx23885_dev *dev, u32 status)
mask = cx_read(VID_A_INT_MSK);
if (0 == (status & mask))
return handled;
+
cx_write(VID_A_INT_STAT, status);
- dprintk(2, "%s() status = 0x%08x\n", __func__, status);
- /* risc op code error */
- if (status & (1 << 16)) {
- printk(KERN_WARNING "%s/0: video risc op code error\n",
- dev->name);
- cx_clear(VID_A_DMA_CTL, 0x11);
- cx23885_sram_channel_dump(dev, &dev->sram_channels[SRAM_CH01]);
+ /* risc op code error, fifo overflow or line sync detection error */
+ if ((status & VID_BC_MSK_OPC_ERR) ||
+ (status & VID_BC_MSK_SYNC) ||
+ (status & VID_BC_MSK_OF)) {
+
+ if (status & VID_BC_MSK_OPC_ERR) {
+ dprintk(7, " (VID_BC_MSK_OPC_ERR 0x%08x)\n",
+ VID_BC_MSK_OPC_ERR);
+ printk(KERN_WARNING "%s: video risc op code error\n",
+ dev->name);
+ cx23885_sram_channel_dump(dev,
+ &dev->sram_channels[SRAM_CH01]);
+ }
+
+ if (status & VID_BC_MSK_SYNC)
+ dprintk(7, " (VID_BC_MSK_SYNC 0x%08x) "
+ "video lines miss-match\n",
+ VID_BC_MSK_SYNC);
+
+ if (status & VID_BC_MSK_OF)
+ dprintk(7, " (VID_BC_MSK_OF 0x%08x) fifo overflow\n",
+ VID_BC_MSK_OF);
+
}
- /* risc1 y */
- if (status & 0x01) {
+ /* Video */
+ if (status & VID_BC_MSK_RISCI1) {
spin_lock(&dev->slock);
count = cx_read(VID_A_GPCNT);
cx23885_video_wakeup(dev, &dev->vidq, count);
spin_unlock(&dev->slock);
handled++;
}
- /* risc2 y */
- if (status & 0x10) {
+ if (status & VID_BC_MSK_RISCI2) {
dprintk(2, "stopper video\n");
spin_lock(&dev->slock);
cx23885_restart_video_queue(dev, &dev->vidq);
@@ -1357,6 +1580,9 @@ int cx23885_video_irq(struct cx23885_dev *dev, u32 status)
handled++;
}
+ /* Allow the VBI framework to process it's payload */
+ handled += cx23885_vbi_irq(dev, status);
+
return handled;
}
@@ -1405,6 +1631,9 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = {
.vidioc_g_register = cx23885_g_register,
.vidioc_s_register = cx23885_s_register,
#endif
+ .vidioc_enumaudio = vidioc_enum_audinput,
+ .vidioc_g_audio = vidioc_g_audinput,
+ .vidioc_s_audio = vidioc_s_audinput,
};
static struct video_device cx23885_vbi_template;
@@ -1429,6 +1658,14 @@ void cx23885_video_unregister(struct cx23885_dev *dev)
dprintk(1, "%s()\n", __func__);
cx23885_irq_remove(dev, 0x01);
+ if (dev->vbi_dev) {
+ if (video_is_registered(dev->vbi_dev))
+ video_unregister_device(dev->vbi_dev);
+ else
+ video_device_release(dev->vbi_dev);
+ dev->vbi_dev = NULL;
+ btcx_riscmem_free(dev->pci, &dev->vbiq.stopper);
+ }
if (dev->video_dev) {
if (video_is_registered(dev->video_dev))
video_unregister_device(dev->video_dev);
@@ -1438,6 +1675,9 @@ void cx23885_video_unregister(struct cx23885_dev *dev)
btcx_riscmem_free(dev->pci, &dev->vidq.stopper);
}
+
+ if (dev->audio_dev)
+ cx23885_audio_unregister(dev);
}
int cx23885_video_register(struct cx23885_dev *dev)
@@ -1463,7 +1703,14 @@ int cx23885_video_register(struct cx23885_dev *dev)
cx23885_risc_stopper(dev->pci, &dev->vidq.stopper,
VID_A_DMA_CTL, 0x11, 0x00);
- /* Don't enable VBI yet */
+ /* init vbi dma queues */
+ INIT_LIST_HEAD(&dev->vbiq.active);
+ INIT_LIST_HEAD(&dev->vbiq.queued);
+ dev->vbiq.timeout.function = cx23885_vbi_timeout;
+ dev->vbiq.timeout.data = (unsigned long)dev;
+ init_timer(&dev->vbiq.timeout);
+ cx23885_risc_stopper(dev->pci, &dev->vbiq.stopper,
+ VID_A_DMA_CTL, 0x22, 0x00);
cx23885_irq_add_enable(dev, 0x01);
@@ -1504,8 +1751,7 @@ int cx23885_video_register(struct cx23885_dev *dev)
}
}
-
- /* register v4l devices */
+ /* register Video device */
dev->video_dev = cx23885_vdev_init(dev, dev->pci,
&cx23885_video_template, "video");
err = video_register_device(dev->video_dev, VFL_TYPE_GRABBER,
@@ -1515,13 +1761,31 @@ int cx23885_video_register(struct cx23885_dev *dev)
dev->name);
goto fail_unreg;
}
- printk(KERN_INFO "%s/0: registered device %s [v4l2]\n",
+ printk(KERN_INFO "%s: registered device %s [v4l2]\n",
dev->name, video_device_node_name(dev->video_dev));
+
+ /* register VBI device */
+ dev->vbi_dev = cx23885_vdev_init(dev, dev->pci,
+ &cx23885_vbi_template, "vbi");
+ err = video_register_device(dev->vbi_dev, VFL_TYPE_VBI,
+ vbi_nr[dev->nr]);
+ if (err < 0) {
+ printk(KERN_INFO "%s: can't register vbi device\n",
+ dev->name);
+ goto fail_unreg;
+ }
+ printk(KERN_INFO "%s: registered device %s\n",
+ dev->name, video_device_node_name(dev->vbi_dev));
+
+ /* Register ALSA audio device */
+ dev->audio_dev = cx23885_audio_register(dev);
+
/* initial device configuration */
mutex_lock(&dev->lock);
cx23885_set_tvnorm(dev, dev->tvnorm);
init_controls(dev);
cx23885_video_mux(dev, 0);
+ cx23885_audio_mux(dev, 0);
mutex_unlock(&dev->lock);
return 0;
diff --git a/drivers/media/video/cx23885/cx23885.h b/drivers/media/video/cx23885/cx23885.h
index d86bc0b1317..b49036fe3ff 100644
--- a/drivers/media/video/cx23885/cx23885.h
+++ b/drivers/media/video/cx23885/cx23885.h
@@ -86,6 +86,7 @@
#define CX23885_BOARD_GOTVIEW_X5_3D_HYBRID 29
#define CX23885_BOARD_NETUP_DUAL_DVB_T_C_CI_RF 30
#define CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H_XC4000 31
+#define CX23885_BOARD_MPX885 32
#define GPIO_0 0x00000001
#define GPIO_1 0x00000002
@@ -192,6 +193,7 @@ struct cx23885_buffer {
struct cx23885_input {
enum cx23885_itype type;
unsigned int vmux;
+ unsigned int amux;
u32 gpio0, gpio1, gpio2, gpio3;
};
@@ -318,6 +320,34 @@ struct cx23885_kernel_ir {
struct rc_dev *rc;
};
+struct cx23885_audio_buffer {
+ unsigned int bpl;
+ struct btcx_riscmem risc;
+ struct videobuf_dmabuf dma;
+};
+
+struct cx23885_audio_dev {
+ struct cx23885_dev *dev;
+
+ struct pci_dev *pci;
+
+ struct snd_card *card;
+
+ spinlock_t lock;
+
+ atomic_t count;
+
+ unsigned int dma_size;
+ unsigned int period_size;
+ unsigned int num_periods;
+
+ struct videobuf_dmabuf *dma_risc;
+
+ struct cx23885_audio_buffer *buf;
+
+ struct snd_pcm_substream *substream;
+};
+
struct cx23885_dev {
atomic_t refcount;
struct v4l2_device v4l2_dev;
@@ -362,6 +392,7 @@ struct cx23885_dev {
/* Analog video */
u32 resources;
unsigned int input;
+ unsigned int audinput; /* Selectable audio input */
u32 tvaudio;
v4l2_std_id tvnorm;
unsigned int tuner_type;
@@ -400,6 +431,9 @@ struct cx23885_dev {
atomic_t v4l_reader_count;
struct cx23885_tvnorm encodernorm;
+ /* Analog raw audio */
+ struct cx23885_audio_dev *audio_dev;
+
};
static inline struct cx23885_dev *to_cx23885(struct v4l2_device *v4l2_dev)
@@ -478,6 +512,11 @@ extern int cx23885_risc_buffer(struct pci_dev *pci, struct btcx_riscmem *risc,
unsigned int top_offset, unsigned int bottom_offset,
unsigned int bpl, unsigned int padding, unsigned int lines);
+extern int cx23885_risc_vbibuffer(struct pci_dev *pci,
+ struct btcx_riscmem *risc, struct scatterlist *sglist,
+ unsigned int top_offset, unsigned int bottom_offset,
+ unsigned int bpl, unsigned int padding, unsigned int lines);
+
void cx23885_cancel_buffers(struct cx23885_tsport *port);
extern int cx23885_restart_queue(struct cx23885_tsport *port,
@@ -533,6 +572,8 @@ extern void cx23885_free_buffer(struct videobuf_queue *q,
extern int cx23885_video_register(struct cx23885_dev *dev);
extern void cx23885_video_unregister(struct cx23885_dev *dev);
extern int cx23885_video_irq(struct cx23885_dev *dev, u32 status);
+extern void cx23885_video_wakeup(struct cx23885_dev *dev,
+ struct cx23885_dmaqueue *q, u32 count);
/* ----------------------------------------------------------- */
/* cx23885-vbi.c */
@@ -540,6 +581,9 @@ extern int cx23885_vbi_fmt(struct file *file, void *priv,
struct v4l2_format *f);
extern void cx23885_vbi_timeout(unsigned long data);
extern struct videobuf_queue_ops cx23885_vbi_qops;
+extern int cx23885_restart_vbi_queue(struct cx23885_dev *dev,
+ struct cx23885_dmaqueue *q);
+extern int cx23885_vbi_irq(struct cx23885_dev *dev, u32 status);
/* cx23885-i2c.c */
extern int cx23885_i2c_register(struct cx23885_i2c *bus);
@@ -563,6 +607,18 @@ extern void mc417_gpio_set(struct cx23885_dev *dev, u32 mask);
extern void mc417_gpio_clear(struct cx23885_dev *dev, u32 mask);
extern void mc417_gpio_enable(struct cx23885_dev *dev, u32 mask, int asoutput);
+/* ----------------------------------------------------------- */
+/* cx23885-alsa.c */
+extern struct cx23885_audio_dev *cx23885_audio_register(
+ struct cx23885_dev *dev);
+extern void cx23885_audio_unregister(struct cx23885_dev *dev);
+extern int cx23885_audio_irq(struct cx23885_dev *dev, u32 status, u32 mask);
+extern int cx23885_risc_databuffer(struct pci_dev *pci,
+ struct btcx_riscmem *risc,
+ struct scatterlist *sglist,
+ unsigned int bpl,
+ unsigned int lines,
+ unsigned int lpi);
/* ----------------------------------------------------------- */
/* tv norms */
diff --git a/drivers/media/video/cx23885/cx23888-ir.c b/drivers/media/video/cx23885/cx23888-ir.c
index e37be6fcf67..bb1ce346425 100644
--- a/drivers/media/video/cx23885/cx23888-ir.c
+++ b/drivers/media/video/cx23885/cx23888-ir.c
@@ -673,7 +673,7 @@ static int cx23888_ir_rx_read(struct v4l2_subdev *sd, u8 *buf, size_t count,
unsigned int i, n;
union cx23888_ir_fifo_rec *p;
- unsigned u, v;
+ unsigned u, v, w;
n = count / sizeof(union cx23888_ir_fifo_rec)
* sizeof(union cx23888_ir_fifo_rec);
@@ -692,11 +692,12 @@ static int cx23888_ir_rx_read(struct v4l2_subdev *sd, u8 *buf, size_t count,
if ((p->hw_fifo_data & FIFO_RXTX_RTO) == FIFO_RXTX_RTO) {
/* Assume RTO was because of no IR light input */
u = 0;
- v4l2_dbg(2, ir_888_debug, sd, "rx read: end of rx\n");
+ w = 1;
} else {
u = (p->hw_fifo_data & FIFO_RXTX_LVL) ? 1 : 0;
if (invert)
u = u ? 0 : 1;
+ w = 0;
}
v = (unsigned) pulse_width_count_to_ns(
@@ -707,9 +708,12 @@ static int cx23888_ir_rx_read(struct v4l2_subdev *sd, u8 *buf, size_t count,
init_ir_raw_event(&p->ir_core_data);
p->ir_core_data.pulse = u;
p->ir_core_data.duration = v;
+ p->ir_core_data.timeout = w;
- v4l2_dbg(2, ir_888_debug, sd, "rx read: %10u ns %s\n",
- v, u ? "mark" : "space");
+ v4l2_dbg(2, ir_888_debug, sd, "rx read: %10u ns %s %s\n",
+ v, u ? "mark" : "space", w ? "(timed out)" : "");
+ if (w)
+ v4l2_dbg(2, ir_888_debug, sd, "rx read: end of rx\n");
}
return 0;
}
diff --git a/drivers/staging/cx25821/Kconfig b/drivers/media/video/cx25821/Kconfig
index 5f6b5421371..5f6b5421371 100644
--- a/drivers/staging/cx25821/Kconfig
+++ b/drivers/media/video/cx25821/Kconfig
diff --git a/drivers/staging/cx25821/Makefile b/drivers/media/video/cx25821/Makefile
index aedde18c68f..aedde18c68f 100644
--- a/drivers/staging/cx25821/Makefile
+++ b/drivers/media/video/cx25821/Makefile
diff --git a/drivers/staging/cx25821/cx25821-alsa.c b/drivers/media/video/cx25821/cx25821-alsa.c
index 09e99de5fd2..09e99de5fd2 100644
--- a/drivers/staging/cx25821/cx25821-alsa.c
+++ b/drivers/media/video/cx25821/cx25821-alsa.c
diff --git a/drivers/staging/cx25821/cx25821-audio-upstream.c b/drivers/media/video/cx25821/cx25821-audio-upstream.c
index c20d6dece15..c20d6dece15 100644
--- a/drivers/staging/cx25821/cx25821-audio-upstream.c
+++ b/drivers/media/video/cx25821/cx25821-audio-upstream.c
diff --git a/drivers/staging/cx25821/cx25821-audio-upstream.h b/drivers/media/video/cx25821/cx25821-audio-upstream.h
index af2ae7c5815..af2ae7c5815 100644
--- a/drivers/staging/cx25821/cx25821-audio-upstream.h
+++ b/drivers/media/video/cx25821/cx25821-audio-upstream.h
diff --git a/drivers/staging/cx25821/cx25821-audio.h b/drivers/media/video/cx25821/cx25821-audio.h
index 8eb55b7b88c..8eb55b7b88c 100644
--- a/drivers/staging/cx25821/cx25821-audio.h
+++ b/drivers/media/video/cx25821/cx25821-audio.h
diff --git a/drivers/staging/cx25821/cx25821-biffuncs.h b/drivers/media/video/cx25821/cx25821-biffuncs.h
index 9326a7c729e..9326a7c729e 100644
--- a/drivers/staging/cx25821/cx25821-biffuncs.h
+++ b/drivers/media/video/cx25821/cx25821-biffuncs.h
diff --git a/drivers/staging/cx25821/cx25821-cards.c b/drivers/media/video/cx25821/cx25821-cards.c
index 6ace60313b4..6ace60313b4 100644
--- a/drivers/staging/cx25821/cx25821-cards.c
+++ b/drivers/media/video/cx25821/cx25821-cards.c
diff --git a/drivers/staging/cx25821/cx25821-core.c b/drivers/media/video/cx25821/cx25821-core.c
index a7fa38f9594..a7fa38f9594 100644
--- a/drivers/staging/cx25821/cx25821-core.c
+++ b/drivers/media/video/cx25821/cx25821-core.c
diff --git a/drivers/staging/cx25821/cx25821-gpio.c b/drivers/media/video/cx25821/cx25821-gpio.c
index 29e43b03c85..29e43b03c85 100644
--- a/drivers/staging/cx25821/cx25821-gpio.c
+++ b/drivers/media/video/cx25821/cx25821-gpio.c
diff --git a/drivers/staging/cx25821/cx25821-i2c.c b/drivers/media/video/cx25821/cx25821-i2c.c
index 4d3d0ce4078..4d3d0ce4078 100644
--- a/drivers/staging/cx25821/cx25821-i2c.c
+++ b/drivers/media/video/cx25821/cx25821-i2c.c
diff --git a/drivers/staging/cx25821/cx25821-medusa-defines.h b/drivers/media/video/cx25821/cx25821-medusa-defines.h
index 60d197f5755..60d197f5755 100644
--- a/drivers/staging/cx25821/cx25821-medusa-defines.h
+++ b/drivers/media/video/cx25821/cx25821-medusa-defines.h
diff --git a/drivers/staging/cx25821/cx25821-medusa-reg.h b/drivers/media/video/cx25821/cx25821-medusa-reg.h
index 1c1c228352d..1c1c228352d 100644
--- a/drivers/staging/cx25821/cx25821-medusa-reg.h
+++ b/drivers/media/video/cx25821/cx25821-medusa-reg.h
diff --git a/drivers/staging/cx25821/cx25821-medusa-video.c b/drivers/media/video/cx25821/cx25821-medusa-video.c
index fc780d0908d..fc780d0908d 100644
--- a/drivers/staging/cx25821/cx25821-medusa-video.c
+++ b/drivers/media/video/cx25821/cx25821-medusa-video.c
diff --git a/drivers/staging/cx25821/cx25821-medusa-video.h b/drivers/media/video/cx25821/cx25821-medusa-video.h
index 6175e096185..6175e096185 100644
--- a/drivers/staging/cx25821/cx25821-medusa-video.h
+++ b/drivers/media/video/cx25821/cx25821-medusa-video.h
diff --git a/drivers/staging/cx25821/cx25821-reg.h b/drivers/media/video/cx25821/cx25821-reg.h
index a3fc25a4dc0..a3fc25a4dc0 100644
--- a/drivers/staging/cx25821/cx25821-reg.h
+++ b/drivers/media/video/cx25821/cx25821-reg.h
diff --git a/drivers/staging/cx25821/cx25821-sram.h b/drivers/media/video/cx25821/cx25821-sram.h
index 5f05d153bc4..5f05d153bc4 100644
--- a/drivers/staging/cx25821/cx25821-sram.h
+++ b/drivers/media/video/cx25821/cx25821-sram.h
diff --git a/drivers/staging/cx25821/cx25821-video-upstream-ch2.c b/drivers/media/video/cx25821/cx25821-video-upstream-ch2.c
index 2a724ddfa53..2a724ddfa53 100644
--- a/drivers/staging/cx25821/cx25821-video-upstream-ch2.c
+++ b/drivers/media/video/cx25821/cx25821-video-upstream-ch2.c
diff --git a/drivers/staging/cx25821/cx25821-video-upstream-ch2.h b/drivers/media/video/cx25821/cx25821-video-upstream-ch2.h
index d42dab59b66..d42dab59b66 100644
--- a/drivers/staging/cx25821/cx25821-video-upstream-ch2.h
+++ b/drivers/media/video/cx25821/cx25821-video-upstream-ch2.h
diff --git a/drivers/staging/cx25821/cx25821-video-upstream.c b/drivers/media/video/cx25821/cx25821-video-upstream.c
index c0b80068f46..c0b80068f46 100644
--- a/drivers/staging/cx25821/cx25821-video-upstream.c
+++ b/drivers/media/video/cx25821/cx25821-video-upstream.c
diff --git a/drivers/staging/cx25821/cx25821-video-upstream.h b/drivers/media/video/cx25821/cx25821-video-upstream.h
index 268ec8aa6a6..268ec8aa6a6 100644
--- a/drivers/staging/cx25821/cx25821-video-upstream.h
+++ b/drivers/media/video/cx25821/cx25821-video-upstream.h
diff --git a/drivers/staging/cx25821/cx25821-video.c b/drivers/media/video/cx25821/cx25821-video.c
index 084fc0899e1..4d6907cda75 100644
--- a/drivers/staging/cx25821/cx25821-video.c
+++ b/drivers/media/video/cx25821/cx25821-video.c
@@ -1312,7 +1312,7 @@ int cx25821_vidioc_s_input(struct file *file, void *priv, unsigned int i)
return err;
}
- if (i > 2) {
+ if (i >= CX25821_NR_INPUT) {
dprintk(1, "%s(): -EINVAL\n", __func__);
return -EINVAL;
}
diff --git a/drivers/staging/cx25821/cx25821-video.h b/drivers/media/video/cx25821/cx25821-video.h
index d0d9538ca5b..d0d9538ca5b 100644
--- a/drivers/staging/cx25821/cx25821-video.h
+++ b/drivers/media/video/cx25821/cx25821-video.h
diff --git a/drivers/staging/cx25821/cx25821.h b/drivers/media/video/cx25821/cx25821.h
index db2615b2bac..2d2d0093282 100644
--- a/drivers/staging/cx25821/cx25821.h
+++ b/drivers/media/video/cx25821/cx25821.h
@@ -98,6 +98,7 @@
#define CX25821_BOARD_CONEXANT_ATHENA10 1
#define MAX_VID_CHANNEL_NUM 12
#define VID_CHANNEL_NUM 8
+#define CX25821_NR_INPUT 2
struct cx25821_fmt {
char *name;
@@ -196,7 +197,7 @@ struct cx25821_board {
unsigned char radio_addr;
u32 clk_freq;
- struct cx25821_input input[2];
+ struct cx25821_input input[CX25821_NR_INPUT];
};
struct cx25821_subid {
diff --git a/drivers/media/video/cx25840/Makefile b/drivers/media/video/cx25840/Makefile
index 2ee96d3973b..dc40dde2e0c 100644
--- a/drivers/media/video/cx25840/Makefile
+++ b/drivers/media/video/cx25840/Makefile
@@ -3,4 +3,4 @@ cx25840-objs := cx25840-core.o cx25840-audio.o cx25840-firmware.o \
obj-$(CONFIG_VIDEO_CX25840) += cx25840.o
-EXTRA_CFLAGS += -Idrivers/media/video
+ccflags-y += -Idrivers/media/video
diff --git a/drivers/media/video/cx25840/cx25840-audio.c b/drivers/media/video/cx25840/cx25840-audio.c
index 34b96c7cfd6..005f1109364 100644
--- a/drivers/media/video/cx25840/cx25840-audio.c
+++ b/drivers/media/video/cx25840/cx25840-audio.c
@@ -480,6 +480,7 @@ void cx25840_audio_set_path(struct i2c_client *client)
static void set_volume(struct i2c_client *client, int volume)
{
+ struct cx25840_state *state = to_state(i2c_get_clientdata(client));
int vol;
/* Convert the volume to msp3400 values (0-127) */
@@ -495,7 +496,14 @@ static void set_volume(struct i2c_client *client, int volume)
}
/* PATH1_VOLUME */
- cx25840_write(client, 0x8d4, 228 - (vol * 2));
+ if (is_cx2388x(state)) {
+ /* for cx23885 volume doesn't work,
+ * the calculation always results in
+ * e4 regardless.
+ */
+ cx25840_write(client, 0x8d4, volume);
+ } else
+ cx25840_write(client, 0x8d4, 228 - (vol * 2));
}
static void set_balance(struct i2c_client *client, int balance)
diff --git a/drivers/media/video/cx25840/cx25840-core.c b/drivers/media/video/cx25840/cx25840-core.c
index b7ee2ae7058..cd9976408ab 100644
--- a/drivers/media/video/cx25840/cx25840-core.c
+++ b/drivers/media/video/cx25840/cx25840-core.c
@@ -702,6 +702,13 @@ static void cx231xx_initialize(struct i2c_client *client)
/* start microcontroller */
cx25840_and_or(client, 0x803, ~0x10, 0x10);
+
+ /* CC raw enable */
+ cx25840_write(client, 0x404, 0x0b);
+
+ /* CC on */
+ cx25840_write(client, 0x42f, 0x66);
+ cx25840_write4(client, 0x474, 0x1e1e601a);
}
/* ----------------------------------------------------------------------- */
@@ -1067,6 +1074,18 @@ static int set_input(struct i2c_client *client, enum cx25840_video_input vid_inp
cx25840_write(client, 0x919, 0x01);
}
+ if (is_cx2388x(state) && ((aud_input == CX25840_AUDIO7) ||
+ (aud_input == CX25840_AUDIO6))) {
+ /* Configure audio from LR1 or LR2 input */
+ cx25840_write4(client, 0x910, 0);
+ cx25840_write4(client, 0x8d0, 0x63073);
+ } else
+ if (is_cx2388x(state) && (aud_input == CX25840_AUDIO8)) {
+ /* Configure audio from tuner/sif input */
+ cx25840_write4(client, 0x910, 0x12b000c9);
+ cx25840_write4(client, 0x8d0, 0x1f063870);
+ }
+
return 0;
}
diff --git a/drivers/media/video/cx25840/cx25840-ir.c b/drivers/media/video/cx25840/cx25840-ir.c
index 7eb79af28aa..13c380ebb56 100644
--- a/drivers/media/video/cx25840/cx25840-ir.c
+++ b/drivers/media/video/cx25840/cx25840-ir.c
@@ -23,6 +23,7 @@
#include <linux/slab.h>
#include <linux/kfifo.h>
+#include <linux/module.h>
#include <media/cx25840.h>
#include <media/rc-core.h>
@@ -668,7 +669,7 @@ static int cx25840_ir_rx_read(struct v4l2_subdev *sd, u8 *buf, size_t count,
u16 divider;
unsigned int i, n;
union cx25840_ir_fifo_rec *p;
- unsigned u, v;
+ unsigned u, v, w;
if (ir_state == NULL)
return -ENODEV;
@@ -694,11 +695,12 @@ static int cx25840_ir_rx_read(struct v4l2_subdev *sd, u8 *buf, size_t count,
if ((p->hw_fifo_data & FIFO_RXTX_RTO) == FIFO_RXTX_RTO) {
/* Assume RTO was because of no IR light input */
u = 0;
- v4l2_dbg(2, ir_debug, sd, "rx read: end of rx\n");
+ w = 1;
} else {
u = (p->hw_fifo_data & FIFO_RXTX_LVL) ? 1 : 0;
if (invert)
u = u ? 0 : 1;
+ w = 0;
}
v = (unsigned) pulse_width_count_to_ns(
@@ -709,9 +711,12 @@ static int cx25840_ir_rx_read(struct v4l2_subdev *sd, u8 *buf, size_t count,
init_ir_raw_event(&p->ir_core_data);
p->ir_core_data.pulse = u;
p->ir_core_data.duration = v;
+ p->ir_core_data.timeout = w;
- v4l2_dbg(2, ir_debug, sd, "rx read: %10u ns %s\n",
- v, u ? "mark" : "space");
+ v4l2_dbg(2, ir_debug, sd, "rx read: %10u ns %s %s\n",
+ v, u ? "mark" : "space", w ? "(timed out)" : "");
+ if (w)
+ v4l2_dbg(2, ir_debug, sd, "rx read: end of rx\n");
}
return 0;
}
diff --git a/drivers/media/video/cx88/Makefile b/drivers/media/video/cx88/Makefile
index 5b7e26761f0..c1a2785ba24 100644
--- a/drivers/media/video/cx88/Makefile
+++ b/drivers/media/video/cx88/Makefile
@@ -10,7 +10,7 @@ obj-$(CONFIG_VIDEO_CX88_BLACKBIRD) += cx88-blackbird.o
obj-$(CONFIG_VIDEO_CX88_DVB) += cx88-dvb.o
obj-$(CONFIG_VIDEO_CX88_VP3054) += cx88-vp3054-i2c.o
-EXTRA_CFLAGS += -Idrivers/media/video
-EXTRA_CFLAGS += -Idrivers/media/common/tuners
-EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core
-EXTRA_CFLAGS += -Idrivers/media/dvb/frontends
+ccflags-y += -Idrivers/media/video
+ccflags-y += -Idrivers/media/common/tuners
+ccflags-y += -Idrivers/media/dvb/dvb-core
+ccflags-y += -Idrivers/media/dvb/frontends
diff --git a/drivers/media/video/cx88/cx88-core.c b/drivers/media/video/cx88/cx88-core.c
index fbcaa1c5b09..fbfdd806793 100644
--- a/drivers/media/video/cx88/cx88-core.c
+++ b/drivers/media/video/cx88/cx88-core.c
@@ -636,9 +636,6 @@ int cx88_reset(struct cx88_core *core)
cx_write(MO_PCI_INTSTAT, 0xFFFFFFFF); // Clear PCI int
cx_write(MO_INT1_STAT, 0xFFFFFFFF); // Clear RISC int
- /* set default notch filter */
- cx_andor(MO_HTOTAL, 0x1800, (HLNotchFilter4xFsc << 11));
-
/* Reset on-board parts */
cx_write(MO_SRST_IO, 0);
msleep(10);
diff --git a/drivers/media/video/cx88/cx88-video.c b/drivers/media/video/cx88/cx88-video.c
index 60d28fdd779..921c56d115d 100644
--- a/drivers/media/video/cx88/cx88-video.c
+++ b/drivers/media/video/cx88/cx88-video.c
@@ -266,7 +266,7 @@ static const struct cx88_ctrl cx8800_ctls[] = {
.id = V4L2_CID_BAND_STOP_FILTER,
.name = "Notch filter",
.minimum = 0,
- .maximum = 3,
+ .maximum = 1,
.step = 1,
.default_value = 0x0,
.type = V4L2_CTRL_TYPE_INTEGER,
diff --git a/drivers/media/video/davinci/dm355_ccdc.c b/drivers/media/video/davinci/dm355_ccdc.c
index c29ac88ffd7..bd443ee76ff 100644
--- a/drivers/media/video/davinci/dm355_ccdc.c
+++ b/drivers/media/video/davinci/dm355_ccdc.c
@@ -39,6 +39,7 @@
#include <linux/videodev2.h>
#include <linux/clk.h>
#include <linux/err.h>
+#include <linux/module.h>
#include <media/davinci/dm355_ccdc.h>
#include <media/davinci/vpss.h>
diff --git a/drivers/media/video/davinci/dm644x_ccdc.c b/drivers/media/video/davinci/dm644x_ccdc.c
index c8b32c1c738..8051c295647 100644
--- a/drivers/media/video/davinci/dm644x_ccdc.c
+++ b/drivers/media/video/davinci/dm644x_ccdc.c
@@ -40,6 +40,7 @@
#include <linux/gfp.h>
#include <linux/clk.h>
#include <linux/err.h>
+#include <linux/module.h>
#include <media/davinci/dm644x_ccdc.h>
#include <media/davinci/vpss.h>
diff --git a/drivers/media/video/davinci/vpbe_display.c b/drivers/media/video/davinci/vpbe_display.c
index 7f1d83a6d57..8588a86d9b4 100644
--- a/drivers/media/video/davinci/vpbe_display.c
+++ b/drivers/media/video/davinci/vpbe_display.c
@@ -43,7 +43,6 @@
static int debug;
-#define VPBE_DISPLAY_SD_BUF_SIZE (720*576*2)
#define VPBE_DEFAULT_NUM_BUFS 3
module_param(debug, int, 0644);
diff --git a/drivers/media/video/davinci/vpbe_osd.c b/drivers/media/video/davinci/vpbe_osd.c
index 5352884998f..ceccf430251 100644
--- a/drivers/media/video/davinci/vpbe_osd.c
+++ b/drivers/media/video/davinci/vpbe_osd.c
@@ -1162,7 +1162,7 @@ static int osd_probe(struct platform_device *pdev)
goto free_mem;
}
osd->osd_base_phys = res->start;
- osd->osd_size = res->end - res->start + 1;
+ osd->osd_size = resource_size(res);
if (!request_mem_region(osd->osd_base_phys, osd->osd_size,
MODULE_NAME)) {
dev_err(osd->dev, "Unable to reserve OSD MMIO region\n");
diff --git a/drivers/media/video/em28xx/Kconfig b/drivers/media/video/em28xx/Kconfig
index 281ee427c2a..f6f622e123b 100644
--- a/drivers/media/video/em28xx/Kconfig
+++ b/drivers/media/video/em28xx/Kconfig
@@ -41,6 +41,8 @@ config VIDEO_EM28XX_DVB
select DVB_CXD2820R if !DVB_FE_CUSTOMISE
select DVB_DRXK if !DVB_FE_CUSTOMISE
select DVB_TDA18271C2DD if !DVB_FE_CUSTOMISE
+ select DVB_TDA10071 if !DVB_FE_CUSTOMISE
+ select DVB_A8293 if !DVB_FE_CUSTOMISE
select VIDEOBUF_DVB
---help---
This adds support for DVB cards based on the
diff --git a/drivers/media/video/em28xx/Makefile b/drivers/media/video/em28xx/Makefile
index 38aaa004f57..2abdf76c520 100644
--- a/drivers/media/video/em28xx/Makefile
+++ b/drivers/media/video/em28xx/Makefile
@@ -9,8 +9,8 @@ obj-$(CONFIG_VIDEO_EM28XX) += em28xx.o
obj-$(CONFIG_VIDEO_EM28XX_ALSA) += em28xx-alsa.o
obj-$(CONFIG_VIDEO_EM28XX_DVB) += em28xx-dvb.o
-EXTRA_CFLAGS += -Idrivers/media/video
-EXTRA_CFLAGS += -Idrivers/media/common/tuners
-EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core
-EXTRA_CFLAGS += -Idrivers/media/dvb/frontends
+ccflags-y += -Idrivers/media/video
+ccflags-y += -Idrivers/media/common/tuners
+ccflags-y += -Idrivers/media/dvb/dvb-core
+ccflags-y += -Idrivers/media/dvb/frontends
diff --git a/drivers/media/video/em28xx/em28xx-cards.c b/drivers/media/video/em28xx/em28xx-cards.c
index 3e3959fee41..9b747c266af 100644
--- a/drivers/media/video/em28xx/em28xx-cards.c
+++ b/drivers/media/video/em28xx/em28xx-cards.c
@@ -60,7 +60,7 @@ static unsigned int card[] = {[0 ... (EM28XX_MAXBOARDS - 1)] = UNSET };
module_param_array(card, int, NULL, 0444);
MODULE_PARM_DESC(card, "card type");
-/* Bitmask marking allocated devices from 0 to EM28XX_MAXBOARDS */
+/* Bitmask marking allocated devices from 0 to EM28XX_MAXBOARDS - 1 */
static unsigned long em28xx_devused;
struct em28xx_hash_table {
@@ -317,6 +317,25 @@ static struct em28xx_reg_seq terratec_h5_digital[] = {
};
#endif
+/* 2013:024f PCTV DVB-S2 Stick 460e
+ * GPIO_0 - POWER_ON
+ * GPIO_1 - BOOST
+ * GPIO_2 - VUV_LNB (red LED)
+ * GPIO_3 - EXT_12V
+ * GPIO_4 - INT_DEM (DEMOD GPIO_0)
+ * GPIO_5 - INT_LNB
+ * GPIO_6 - RESET_DEM
+ * GPIO_7 - LED (green LED)
+ */
+static struct em28xx_reg_seq pctv_460e[] = {
+ {EM2874_R80_GPIO, 0x01, 0xff, 50},
+ {0x0d, 0xff, 0xff, 50},
+ {EM2874_R80_GPIO, 0x41, 0xff, 50}, /* GPIO_6=1 */
+ {0x0d, 0x42, 0xff, 50},
+ {EM2874_R80_GPIO, 0x61, 0xff, 50}, /* GPIO_5=1 */
+ { -1, -1, -1, -1},
+};
+
/*
* Board definitions
*/
@@ -1810,6 +1829,17 @@ struct em28xx_board em28xx_boards[] = {
.has_dvb = 1,
.ir_codes = RC_MAP_PINNACLE_PCTV_HD,
},
+ /* 2013:024f PCTV DVB-S2 Stick 460e
+ * Empia EM28174, NXP TDA10071, Conexant CX24118A and Allegro A8293 */
+ [EM28174_BOARD_PCTV_460E] = {
+ .i2c_speed = EM2874_I2C_SECONDARY_BUS_SELECT |
+ EM28XX_I2C_CLK_WAIT_ENABLE | EM28XX_I2C_FREQ_400_KHZ,
+ .name = "PCTV DVB-S2 Stick (460e)",
+ .tuner_type = TUNER_ABSENT,
+ .tuner_gpio = pctv_460e,
+ .has_dvb = 1,
+ .ir_codes = RC_MAP_PINNACLE_PCTV_HD,
+ },
};
const unsigned int em28xx_bcount = ARRAY_SIZE(em28xx_boards);
@@ -1893,6 +1923,8 @@ struct usb_device_id em28xx_id_table[] = {
.driver_info = EM2860_BOARD_TERRATEC_AV350 },
{ USB_DEVICE(0x0ccd, 0x0096),
.driver_info = EM2860_BOARD_TERRATEC_GRABBY },
+ { USB_DEVICE(0x0ccd, 0x10AF),
+ .driver_info = EM2860_BOARD_TERRATEC_GRABBY },
{ USB_DEVICE(0x0fd9, 0x0033),
.driver_info = EM2860_BOARD_ELGATO_VIDEO_CAPTURE},
{ USB_DEVICE(0x185b, 0x2870),
@@ -1941,6 +1973,8 @@ struct usb_device_id em28xx_id_table[] = {
.driver_info = EM2870_BOARD_KWORLD_A340 },
{ USB_DEVICE(0x2013, 0x024f),
.driver_info = EM28174_BOARD_PCTV_290E },
+ { USB_DEVICE(0x2013, 0x024c),
+ .driver_info = EM28174_BOARD_PCTV_460E },
{ },
};
MODULE_DEVICE_TABLE(usb, em28xx_id_table);
@@ -2768,9 +2802,9 @@ static void flush_request_modules(struct em28xx *dev)
#endif /* CONFIG_MODULES */
/*
- * em28xx_realease_resources()
+ * em28xx_release_resources()
* unregisters the v4l2,i2c and usb devices
- * called when the device gets disconected or at module unload
+ * called when the device gets disconnected or at module unload
*/
void em28xx_release_resources(struct em28xx *dev)
{
@@ -2784,8 +2818,6 @@ void em28xx_release_resources(struct em28xx *dev)
em28xx_release_analog_resources(dev);
- em28xx_remove_from_devlist(dev);
-
em28xx_i2c_unregister(dev);
v4l2_device_unregister(&dev->v4l2_dev);
@@ -2793,7 +2825,7 @@ void em28xx_release_resources(struct em28xx *dev)
usb_put_dev(dev->udev);
/* Mark device as unused */
- em28xx_devused &= ~(1 << dev->devno);
+ clear_bit(dev->devno, &em28xx_devused);
};
/*
@@ -2806,7 +2838,6 @@ static int em28xx_init_dev(struct em28xx **devhandle, struct usb_device *udev,
{
struct em28xx *dev = *devhandle;
int retval;
- int errCode;
dev->udev = udev;
mutex_init(&dev->ctrl_urb_lock);
@@ -2883,10 +2914,9 @@ static int em28xx_init_dev(struct em28xx **devhandle, struct usb_device *udev,
}
if (dev->is_audio_only) {
- errCode = em28xx_audio_setup(dev);
- if (errCode)
+ retval = em28xx_audio_setup(dev);
+ if (retval)
return -ENODEV;
- em28xx_add_into_devlist(dev);
em28xx_init_extension(dev);
return 0;
@@ -2903,7 +2933,7 @@ static int em28xx_init_dev(struct em28xx **devhandle, struct usb_device *udev,
/* Resets I2C speed */
em28xx_write_reg(dev, EM28XX_R06_I2C_CLK, dev->board.i2c_speed);
if (retval < 0) {
- em28xx_errdev("%s: em28xx_write_regs_req failed!"
+ em28xx_errdev("%s: em28xx_write_reg failed!"
" retval [%d]\n",
__func__, retval);
return retval;
@@ -2917,12 +2947,11 @@ static int em28xx_init_dev(struct em28xx **devhandle, struct usb_device *udev,
}
/* register i2c bus */
- errCode = em28xx_i2c_register(dev);
- if (errCode < 0) {
- v4l2_device_unregister(&dev->v4l2_dev);
- em28xx_errdev("%s: em28xx_i2c_register - errCode [%d]!\n",
- __func__, errCode);
- return errCode;
+ retval = em28xx_i2c_register(dev);
+ if (retval < 0) {
+ em28xx_errdev("%s: em28xx_i2c_register - error [%d]!\n",
+ __func__, retval);
+ goto unregister_dev;
}
/*
@@ -2936,11 +2965,11 @@ static int em28xx_init_dev(struct em28xx **devhandle, struct usb_device *udev,
em28xx_card_setup(dev);
/* Configure audio */
- errCode = em28xx_audio_setup(dev);
- if (errCode < 0) {
- v4l2_device_unregister(&dev->v4l2_dev);
- em28xx_errdev("%s: Error while setting audio - errCode [%d]!\n",
- __func__, errCode);
+ retval = em28xx_audio_setup(dev);
+ if (retval < 0) {
+ em28xx_errdev("%s: Error while setting audio - error [%d]!\n",
+ __func__, retval);
+ goto fail;
}
/* wake i2c devices */
@@ -2954,41 +2983,41 @@ static int em28xx_init_dev(struct em28xx **devhandle, struct usb_device *udev,
if (dev->board.has_msp34xx) {
/* Send a reset to other chips via gpio */
- errCode = em28xx_write_reg(dev, EM28XX_R08_GPIO, 0xf7);
- if (errCode < 0) {
- em28xx_errdev("%s: em28xx_write_regs_req - "
- "msp34xx(1) failed! errCode [%d]\n",
- __func__, errCode);
- return errCode;
+ retval = em28xx_write_reg(dev, EM28XX_R08_GPIO, 0xf7);
+ if (retval < 0) {
+ em28xx_errdev("%s: em28xx_write_reg - "
+ "msp34xx(1) failed! error [%d]\n",
+ __func__, retval);
+ goto fail;
}
msleep(3);
- errCode = em28xx_write_reg(dev, EM28XX_R08_GPIO, 0xff);
- if (errCode < 0) {
- em28xx_errdev("%s: em28xx_write_regs_req - "
- "msp34xx(2) failed! errCode [%d]\n",
- __func__, errCode);
- return errCode;
+ retval = em28xx_write_reg(dev, EM28XX_R08_GPIO, 0xff);
+ if (retval < 0) {
+ em28xx_errdev("%s: em28xx_write_reg - "
+ "msp34xx(2) failed! error [%d]\n",
+ __func__, retval);
+ goto fail;
}
msleep(3);
}
- em28xx_add_into_devlist(dev);
-
retval = em28xx_register_analog_devices(dev);
if (retval < 0) {
- em28xx_release_resources(dev);
- goto fail_reg_devices;
+ goto fail;
}
- em28xx_init_extension(dev);
-
/* Save some power by putting tuner to sleep */
v4l2_device_call_all(&dev->v4l2_dev, 0, core, s_power, 0);
return 0;
-fail_reg_devices:
+fail:
+ em28xx_i2c_unregister(dev);
+
+unregister_dev:
+ v4l2_device_unregister(&dev->v4l2_dev);
+
return retval;
}
@@ -3015,8 +3044,16 @@ static int em28xx_usb_probe(struct usb_interface *interface,
udev = usb_get_dev(interface_to_usbdev(interface));
/* Check to see next free device and mark as used */
- nr = find_first_zero_bit(&em28xx_devused, EM28XX_MAXBOARDS);
- em28xx_devused |= 1<<nr;
+ do {
+ nr = find_first_zero_bit(&em28xx_devused, EM28XX_MAXBOARDS);
+ if (nr >= EM28XX_MAXBOARDS) {
+ /* No free device slots */
+ printk(DRIVER_NAME ": Supports only %i em28xx boards.\n",
+ EM28XX_MAXBOARDS);
+ retval = -ENOMEM;
+ goto err_no_slot;
+ }
+ } while (test_and_set_bit(nr, &em28xx_devused));
/* Don't register audio interfaces */
if (interface->altsetting[0].desc.bInterfaceClass == USB_CLASS_AUDIO) {
@@ -3027,7 +3064,6 @@ static int em28xx_usb_probe(struct usb_interface *interface,
ifnum,
interface->altsetting[0].desc.bInterfaceClass);
- em28xx_devused &= ~(1<<nr);
retval = -ENODEV;
goto err;
}
@@ -3076,7 +3112,6 @@ static int em28xx_usb_probe(struct usb_interface *interface,
em28xx_err(DRIVER_NAME " This is an anciliary "
"interface not used by the driver\n");
- em28xx_devused &= ~(1<<nr);
retval = -ENODEV;
goto err;
}
@@ -3132,29 +3167,19 @@ static int em28xx_usb_probe(struct usb_interface *interface,
printk(DRIVER_NAME ": Device initialization failed.\n");
printk(DRIVER_NAME ": Device must be connected to a high-speed"
" USB 2.0 port.\n");
- em28xx_devused &= ~(1<<nr);
retval = -ENODEV;
goto err;
}
- if (nr >= EM28XX_MAXBOARDS) {
- printk(DRIVER_NAME ": Supports only %i em28xx boards.\n",
- EM28XX_MAXBOARDS);
- em28xx_devused &= ~(1<<nr);
- retval = -ENOMEM;
- goto err;
- }
-
/* allocate memory for our device state and initialize it */
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (dev == NULL) {
em28xx_err(DRIVER_NAME ": out of memory!\n");
- em28xx_devused &= ~(1<<nr);
retval = -ENOMEM;
goto err;
}
- snprintf(dev->name, 29, "em28xx #%d", nr);
+ snprintf(dev->name, sizeof(dev->name), "em28xx #%d", nr);
dev->devno = nr;
dev->model = id->driver_info;
dev->alt = -1;
@@ -3177,7 +3202,6 @@ static int em28xx_usb_probe(struct usb_interface *interface,
if (dev->alt_max_pkt_size == NULL) {
em28xx_errdev("out of memory!\n");
- em28xx_devused &= ~(1<<nr);
kfree(dev);
retval = -ENOMEM;
goto err;
@@ -3204,8 +3228,8 @@ static int em28xx_usb_probe(struct usb_interface *interface,
mutex_lock(&dev->lock);
retval = em28xx_init_dev(&dev, udev, interface, nr);
if (retval) {
- em28xx_devused &= ~(1<<dev->devno);
mutex_unlock(&dev->lock);
+ kfree(dev->alt_max_pkt_size);
kfree(dev);
goto err;
}
@@ -3217,15 +3241,26 @@ static int em28xx_usb_probe(struct usb_interface *interface,
*/
mutex_unlock(&dev->lock);
+ /*
+ * These extensions can be modules. If the modules are already
+ * loaded then we can initialise the device now, otherwise we
+ * will initialise it when the modules load instead.
+ */
+ em28xx_init_extension(dev);
+
return 0;
err:
+ clear_bit(nr, &em28xx_devused);
+
+err_no_slot:
+ usb_put_dev(udev);
return retval;
}
/*
* em28xx_usb_disconnect()
- * called when the device gets diconencted
+ * called when the device gets disconnected
* video device will be unregistered on v4l2_close in case it is still open
*/
static void em28xx_usb_disconnect(struct usb_interface *interface)
@@ -3273,10 +3308,10 @@ static void em28xx_usb_disconnect(struct usb_interface *interface)
em28xx_release_resources(dev);
}
- em28xx_close_extension(dev);
-
mutex_unlock(&dev->lock);
+ em28xx_close_extension(dev);
+
if (!dev->users) {
kfree(dev->alt_max_pkt_size);
kfree(dev);
diff --git a/drivers/media/video/em28xx/em28xx-core.c b/drivers/media/video/em28xx/em28xx-core.c
index 57b1b5c6d88..804a4ab47ac 100644
--- a/drivers/media/video/em28xx/em28xx-core.c
+++ b/drivers/media/video/em28xx/em28xx-core.c
@@ -1184,25 +1184,6 @@ static LIST_HEAD(em28xx_devlist);
static DEFINE_MUTEX(em28xx_devlist_mutex);
/*
- * em28xx_realease_resources()
- * unregisters the v4l2,i2c and usb devices
- * called when the device gets disconected or at module unload
-*/
-void em28xx_remove_from_devlist(struct em28xx *dev)
-{
- mutex_lock(&em28xx_devlist_mutex);
- list_del(&dev->devlist);
- mutex_unlock(&em28xx_devlist_mutex);
-};
-
-void em28xx_add_into_devlist(struct em28xx *dev)
-{
- mutex_lock(&em28xx_devlist_mutex);
- list_add_tail(&dev->devlist, &em28xx_devlist);
- mutex_unlock(&em28xx_devlist_mutex);
-};
-
-/*
* Extension interface
*/
@@ -1217,8 +1198,8 @@ int em28xx_register_extension(struct em28xx_ops *ops)
list_for_each_entry(dev, &em28xx_devlist, devlist) {
ops->init(dev);
}
- printk(KERN_INFO "Em28xx: Initialized (%s) extension\n", ops->name);
mutex_unlock(&em28xx_devlist_mutex);
+ printk(KERN_INFO "Em28xx: Initialized (%s) extension\n", ops->name);
return 0;
}
EXPORT_SYMBOL(em28xx_register_extension);
@@ -1231,36 +1212,34 @@ void em28xx_unregister_extension(struct em28xx_ops *ops)
list_for_each_entry(dev, &em28xx_devlist, devlist) {
ops->fini(dev);
}
- printk(KERN_INFO "Em28xx: Removed (%s) extension\n", ops->name);
list_del(&ops->next);
mutex_unlock(&em28xx_devlist_mutex);
+ printk(KERN_INFO "Em28xx: Removed (%s) extension\n", ops->name);
}
EXPORT_SYMBOL(em28xx_unregister_extension);
void em28xx_init_extension(struct em28xx *dev)
{
- struct em28xx_ops *ops = NULL;
+ const struct em28xx_ops *ops = NULL;
mutex_lock(&em28xx_devlist_mutex);
- if (!list_empty(&em28xx_extension_devlist)) {
- list_for_each_entry(ops, &em28xx_extension_devlist, next) {
- if (ops->init)
- ops->init(dev);
- }
+ list_add_tail(&dev->devlist, &em28xx_devlist);
+ list_for_each_entry(ops, &em28xx_extension_devlist, next) {
+ if (ops->init)
+ ops->init(dev);
}
mutex_unlock(&em28xx_devlist_mutex);
}
void em28xx_close_extension(struct em28xx *dev)
{
- struct em28xx_ops *ops = NULL;
+ const struct em28xx_ops *ops = NULL;
mutex_lock(&em28xx_devlist_mutex);
- if (!list_empty(&em28xx_extension_devlist)) {
- list_for_each_entry(ops, &em28xx_extension_devlist, next) {
- if (ops->fini)
- ops->fini(dev);
- }
+ list_for_each_entry(ops, &em28xx_extension_devlist, next) {
+ if (ops->fini)
+ ops->fini(dev);
}
+ list_del(&dev->devlist);
mutex_unlock(&em28xx_devlist_mutex);
}
diff --git a/drivers/media/video/em28xx/em28xx-dvb.c b/drivers/media/video/em28xx/em28xx-dvb.c
index e5916dee409..cef7a2d409c 100644
--- a/drivers/media/video/em28xx/em28xx-dvb.c
+++ b/drivers/media/video/em28xx/em28xx-dvb.c
@@ -42,6 +42,8 @@
#include "cxd2820r.h"
#include "tda18271c2dd.h"
#include "drxk.h"
+#include "tda10071.h"
+#include "a8293.h"
MODULE_DESCRIPTION("driver for em28xx based DVB cards");
MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>");
@@ -122,7 +124,7 @@ static inline void print_err_status(struct em28xx *dev,
}
}
-static inline int dvb_isoc_copy(struct em28xx *dev, struct urb *urb)
+static inline int em28xx_dvb_isoc_copy(struct em28xx *dev, struct urb *urb)
{
int i;
@@ -155,7 +157,7 @@ static inline int dvb_isoc_copy(struct em28xx *dev, struct urb *urb)
return 0;
}
-static int start_streaming(struct em28xx_dvb *dvb)
+static int em28xx_start_streaming(struct em28xx_dvb *dvb)
{
int rc;
struct em28xx *dev = dvb->adapter.priv;
@@ -175,10 +177,10 @@ static int start_streaming(struct em28xx_dvb *dvb)
return em28xx_init_isoc(dev, EM28XX_DVB_MAX_PACKETS,
EM28XX_DVB_NUM_BUFS, max_dvb_packet_size,
- dvb_isoc_copy);
+ em28xx_dvb_isoc_copy);
}
-static int stop_streaming(struct em28xx_dvb *dvb)
+static int em28xx_stop_streaming(struct em28xx_dvb *dvb)
{
struct em28xx *dev = dvb->adapter.priv;
@@ -189,7 +191,7 @@ static int stop_streaming(struct em28xx_dvb *dvb)
return 0;
}
-static int start_feed(struct dvb_demux_feed *feed)
+static int em28xx_start_feed(struct dvb_demux_feed *feed)
{
struct dvb_demux *demux = feed->demux;
struct em28xx_dvb *dvb = demux->priv;
@@ -203,7 +205,7 @@ static int start_feed(struct dvb_demux_feed *feed)
rc = dvb->nfeeds;
if (dvb->nfeeds == 1) {
- ret = start_streaming(dvb);
+ ret = em28xx_start_streaming(dvb);
if (ret < 0)
rc = ret;
}
@@ -212,7 +214,7 @@ static int start_feed(struct dvb_demux_feed *feed)
return rc;
}
-static int stop_feed(struct dvb_demux_feed *feed)
+static int em28xx_stop_feed(struct dvb_demux_feed *feed)
{
struct dvb_demux *demux = feed->demux;
struct em28xx_dvb *dvb = demux->priv;
@@ -222,7 +224,7 @@ static int stop_feed(struct dvb_demux_feed *feed)
dvb->nfeeds--;
if (0 == dvb->nfeeds)
- err = stop_streaming(dvb);
+ err = em28xx_stop_streaming(dvb);
mutex_unlock(&dvb->lock);
return err;
@@ -380,7 +382,7 @@ static void terratec_h5_init(struct em28xx *dev)
em28xx_gpio_set(dev, terratec_h5_end);
};
-static int mt352_terratec_xs_init(struct dvb_frontend *fe)
+static int em28xx_mt352_terratec_xs_init(struct dvb_frontend *fe)
{
/* Values extracted from a USB trace of the Terratec Windows driver */
static u8 clock_config[] = { CLOCK_CTL, 0x38, 0x2c };
@@ -412,7 +414,7 @@ static struct mt352_config terratec_xs_mt352_cfg = {
.demod_address = (0x1e >> 1),
.no_tuner = 1,
.if2 = 45600,
- .demod_init = mt352_terratec_xs_init,
+ .demod_init = em28xx_mt352_terratec_xs_init,
};
static struct tda10023_config em28xx_tda10023_config = {
@@ -438,11 +440,25 @@ static struct cxd2820r_config em28xx_cxd2820r_config = {
static struct tda18271_config em28xx_cxd2820r_tda18271_config = {
.output_opt = TDA18271_OUTPUT_LT_OFF,
+ .gate = TDA18271_GATE_DIGITAL,
+};
+
+static const struct tda10071_config em28xx_tda10071_config = {
+ .i2c_address = 0x55, /* (0xaa >> 1) */
+ .i2c_wr_max = 64,
+ .ts_mode = TDA10071_TS_SERIAL,
+ .spec_inv = 0,
+ .xtal = 40444000, /* 40.444 MHz */
+ .pll_multiplier = 20,
+};
+
+static const struct a8293_config em28xx_a8293_config = {
+ .i2c_addr = 0x08, /* (0x10 >> 1) */
};
/* ------------------------------------------------------------------ */
-static int attach_xc3028(u8 addr, struct em28xx *dev)
+static int em28xx_attach_xc3028(u8 addr, struct em28xx *dev)
{
struct dvb_frontend *fe;
struct xc2028_config cfg;
@@ -472,10 +488,8 @@ static int attach_xc3028(u8 addr, struct em28xx *dev)
/* ------------------------------------------------------------------ */
-static int register_dvb(struct em28xx_dvb *dvb,
- struct module *module,
- struct em28xx *dev,
- struct device *device)
+static int em28xx_register_dvb(struct em28xx_dvb *dvb, struct module *module,
+ struct em28xx *dev, struct device *device)
{
int result;
@@ -522,8 +536,8 @@ static int register_dvb(struct em28xx_dvb *dvb,
dvb->demux.priv = dvb;
dvb->demux.filternum = 256;
dvb->demux.feednum = 256;
- dvb->demux.start_feed = start_feed;
- dvb->demux.stop_feed = stop_feed;
+ dvb->demux.start_feed = em28xx_start_feed;
+ dvb->demux.stop_feed = em28xx_stop_feed;
result = dvb_dmx_init(&dvb->demux);
if (result < 0) {
@@ -591,7 +605,7 @@ fail_adapter:
return result;
}
-static void unregister_dvb(struct em28xx_dvb *dvb)
+static void em28xx_unregister_dvb(struct em28xx_dvb *dvb)
{
dvb_net_release(&dvb->net);
dvb->demux.dmx.remove_frontend(&dvb->demux.dmx, &dvb->fe_mem);
@@ -607,9 +621,9 @@ static void unregister_dvb(struct em28xx_dvb *dvb)
dvb_unregister_adapter(&dvb->adapter);
}
-static int dvb_init(struct em28xx *dev)
+static int em28xx_dvb_init(struct em28xx *dev)
{
- int result = 0;
+ int result = 0, mfe_shared = 0;
struct em28xx_dvb *dvb;
if (!dev->board.has_dvb) {
@@ -648,7 +662,7 @@ static int dvb_init(struct em28xx *dev)
dvb->fe[0] = dvb_attach(lgdt330x_attach,
&em2880_lgdt3303_dev,
&dev->i2c_adap);
- if (attach_xc3028(0x61, dev) < 0) {
+ if (em28xx_attach_xc3028(0x61, dev) < 0) {
result = -EINVAL;
goto out_free;
}
@@ -657,7 +671,7 @@ static int dvb_init(struct em28xx *dev)
dvb->fe[0] = dvb_attach(zl10353_attach,
&em28xx_zl10353_with_xc3028,
&dev->i2c_adap);
- if (attach_xc3028(0x61, dev) < 0) {
+ if (em28xx_attach_xc3028(0x61, dev) < 0) {
result = -EINVAL;
goto out_free;
}
@@ -668,7 +682,7 @@ static int dvb_init(struct em28xx *dev)
dvb->fe[0] = dvb_attach(zl10353_attach,
&em28xx_zl10353_xc3028_no_i2c_gate,
&dev->i2c_adap);
- if (attach_xc3028(0x61, dev) < 0) {
+ if (em28xx_attach_xc3028(0x61, dev) < 0) {
result = -EINVAL;
goto out_free;
}
@@ -689,7 +703,7 @@ static int dvb_init(struct em28xx *dev)
&dev->i2c_adap);
}
- if (attach_xc3028(0x61, dev) < 0) {
+ if (em28xx_attach_xc3028(0x61, dev) < 0) {
result = -EINVAL;
goto out_free;
}
@@ -699,7 +713,7 @@ static int dvb_init(struct em28xx *dev)
dvb->fe[0] = dvb_attach(s5h1409_attach,
&em28xx_s5h1409_with_xc3028,
&dev->i2c_adap);
- if (attach_xc3028(0x61, dev) < 0) {
+ if (em28xx_attach_xc3028(0x61, dev) < 0) {
result = -EINVAL;
goto out_free;
}
@@ -720,7 +734,7 @@ static int dvb_init(struct em28xx *dev)
case EM2882_BOARD_PINNACLE_HYBRID_PRO_330E:
dvb->fe[0] = dvb_attach(drxd_attach, &em28xx_drxd, NULL,
&dev->i2c_adap, &dev->udev->dev);
- if (attach_xc3028(0x61, dev) < 0) {
+ if (em28xx_attach_xc3028(0x61, dev) < 0) {
result = -EINVAL;
goto out_free;
}
@@ -753,11 +767,9 @@ static int dvb_init(struct em28xx *dev)
dvb->fe[0] = dvb_attach(cxd2820r_attach,
&em28xx_cxd2820r_config, &dev->i2c_adap, NULL);
if (dvb->fe[0]) {
- struct i2c_adapter *i2c_tuner;
- i2c_tuner = cxd2820r_get_tuner_i2c_adapter(dvb->fe[0]);
/* FE 0 attach tuner */
if (!dvb_attach(tda18271_attach, dvb->fe[0], 0x60,
- i2c_tuner, &em28xx_cxd2820r_tda18271_config)) {
+ &dev->i2c_adap, &em28xx_cxd2820r_tda18271_config)) {
dvb_frontend_detach(dvb->fe[0]);
result = -EINVAL;
goto out_free;
@@ -768,10 +780,12 @@ static int dvb_init(struct em28xx *dev)
dvb->fe[1]->id = 1;
/* FE 1 attach tuner */
if (!dvb_attach(tda18271_attach, dvb->fe[1], 0x60,
- i2c_tuner, &em28xx_cxd2820r_tda18271_config)) {
+ &dev->i2c_adap, &em28xx_cxd2820r_tda18271_config)) {
dvb_frontend_detach(dvb->fe[1]);
/* leave FE 0 still active */
}
+
+ mfe_shared = 1;
}
break;
case EM2884_BOARD_TERRATEC_H5:
@@ -809,6 +823,16 @@ static int dvb_init(struct em28xx *dev)
sizeof(dvb->fe[0]->ops.tuner_ops));
break;
+ case EM28174_BOARD_PCTV_460E:
+ /* attach demod */
+ dvb->fe[0] = dvb_attach(tda10071_attach,
+ &em28xx_tda10071_config, &dev->i2c_adap);
+
+ /* attach SEC */
+ if (dvb->fe[0])
+ dvb_attach(a8293_attach, dvb->fe[0], &dev->i2c_adap,
+ &em28xx_a8293_config);
+ break;
default:
em28xx_errdev("/2: The frontend of your DVB/ATSC card"
" isn't supported yet\n");
@@ -823,11 +847,14 @@ static int dvb_init(struct em28xx *dev)
dvb->fe[0]->callback = em28xx_tuner_callback;
/* register everything */
- result = register_dvb(dvb, THIS_MODULE, dev, &dev->udev->dev);
+ result = em28xx_register_dvb(dvb, THIS_MODULE, dev, &dev->udev->dev);
if (result < 0)
goto out_free;
+ /* MFE lock */
+ dvb->adapter.mfe_shared = mfe_shared;
+
em28xx_info("Successfully loaded em28xx-dvb\n");
ret:
em28xx_set_mode(dev, EM28XX_SUSPEND);
@@ -840,7 +867,14 @@ out_free:
goto ret;
}
-static int dvb_fini(struct em28xx *dev)
+static inline void prevent_sleep(struct dvb_frontend_ops *ops)
+{
+ ops->set_voltage = NULL;
+ ops->sleep = NULL;
+ ops->tuner_ops.sleep = NULL;
+}
+
+static int em28xx_dvb_fini(struct em28xx *dev)
{
if (!dev->board.has_dvb) {
/* This device does not support the extension */
@@ -848,8 +882,19 @@ static int dvb_fini(struct em28xx *dev)
}
if (dev->dvb) {
- unregister_dvb(dev->dvb);
- kfree(dev->dvb);
+ struct em28xx_dvb *dvb = dev->dvb;
+
+ if (dev->state & DEV_DISCONNECTED) {
+ /* We cannot tell the device to sleep
+ * once it has been unplugged. */
+ if (dvb->fe[0])
+ prevent_sleep(&dvb->fe[0]->ops);
+ if (dvb->fe[1])
+ prevent_sleep(&dvb->fe[1]->ops);
+ }
+
+ em28xx_unregister_dvb(dvb);
+ kfree(dvb);
dev->dvb = NULL;
}
@@ -859,8 +904,8 @@ static int dvb_fini(struct em28xx *dev)
static struct em28xx_ops dvb_ops = {
.id = EM28XX_DVB,
.name = "Em28xx dvb Extension",
- .init = dvb_init,
- .fini = dvb_fini,
+ .init = em28xx_dvb_init,
+ .fini = em28xx_dvb_fini,
};
static int __init em28xx_dvb_register(void)
diff --git a/drivers/media/video/em28xx/em28xx-input.c b/drivers/media/video/em28xx/em28xx-input.c
index 5d12b14282e..679da480428 100644
--- a/drivers/media/video/em28xx/em28xx-input.c
+++ b/drivers/media/video/em28xx/em28xx-input.c
@@ -463,11 +463,11 @@ int em28xx_ir_fini(struct em28xx *dev)
if (!ir)
return 0;
- em28xx_ir_stop(ir->rc);
- rc_unregister_device(ir->rc);
- kfree(ir);
+ if (ir->rc)
+ rc_unregister_device(ir->rc);
/* done */
+ kfree(ir);
dev->ir = NULL;
return 0;
}
diff --git a/drivers/media/video/em28xx/em28xx-video.c b/drivers/media/video/em28xx/em28xx-video.c
index d176dc0394e..9b4557a2f6d 100644
--- a/drivers/media/video/em28xx/em28xx-video.c
+++ b/drivers/media/video/em28xx/em28xx-video.c
@@ -1156,6 +1156,21 @@ static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *norm)
return 0;
}
+static int vidioc_querystd(struct file *file, void *priv, v4l2_std_id *norm)
+{
+ struct em28xx_fh *fh = priv;
+ struct em28xx *dev = fh->dev;
+ int rc;
+
+ rc = check_dev(dev);
+ if (rc < 0)
+ return rc;
+
+ v4l2_device_call_all(&dev->v4l2_dev, 0, video, querystd, norm);
+
+ return 0;
+}
+
static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id *norm)
{
struct em28xx_fh *fh = priv;
@@ -1787,6 +1802,45 @@ static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
return 0;
}
+static int vidioc_enum_framesizes(struct file *file, void *priv,
+ struct v4l2_frmsizeenum *fsize)
+{
+ struct em28xx_fh *fh = priv;
+ struct em28xx *dev = fh->dev;
+ struct em28xx_fmt *fmt;
+ unsigned int maxw = norm_maxw(dev);
+ unsigned int maxh = norm_maxh(dev);
+
+ fmt = format_by_fourcc(fsize->pixel_format);
+ if (!fmt) {
+ em28xx_videodbg("Fourcc format (%08x) invalid.\n",
+ fsize->pixel_format);
+ return -EINVAL;
+ }
+
+ if (dev->board.is_em2800) {
+ if (fsize->index > 1)
+ return -EINVAL;
+ fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
+ fsize->discrete.width = maxw / (1 + fsize->index);
+ fsize->discrete.height = maxh / (1 + fsize->index);
+ return 0;
+ }
+
+ if (fsize->index != 0)
+ return -EINVAL;
+
+ /* Report a continuous range */
+ fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
+ fsize->stepwise.min_width = 48;
+ fsize->stepwise.min_height = 32;
+ fsize->stepwise.max_width = maxw;
+ fsize->stepwise.max_height = maxh;
+ fsize->stepwise.step_width = 1;
+ fsize->stepwise.step_height = 1;
+ return 0;
+}
+
/* Sliced VBI ioctls */
static int vidioc_g_fmt_sliced_vbi_cap(struct file *file, void *priv,
struct v4l2_format *f)
@@ -2200,6 +2254,7 @@ static int em28xx_v4l2_close(struct file *filp)
free the remaining resources */
if (dev->state & DEV_DISCONNECTED) {
em28xx_release_resources(dev);
+ kfree(dev->alt_max_pkt_size);
kfree(dev);
return 0;
}
@@ -2340,10 +2395,10 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = {
.vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
.vidioc_g_fmt_vbi_cap = vidioc_g_fmt_vbi_cap,
.vidioc_s_fmt_vbi_cap = vidioc_s_fmt_vbi_cap,
+ .vidioc_enum_framesizes = vidioc_enum_framesizes,
.vidioc_g_audio = vidioc_g_audio,
.vidioc_s_audio = vidioc_s_audio,
.vidioc_cropcap = vidioc_cropcap,
-
.vidioc_g_fmt_sliced_vbi_cap = vidioc_g_fmt_sliced_vbi_cap,
.vidioc_try_fmt_sliced_vbi_cap = vidioc_try_set_sliced_vbi_cap,
.vidioc_s_fmt_sliced_vbi_cap = vidioc_try_set_sliced_vbi_cap,
@@ -2353,6 +2408,7 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = {
.vidioc_qbuf = vidioc_qbuf,
.vidioc_dqbuf = vidioc_dqbuf,
.vidioc_g_std = vidioc_g_std,
+ .vidioc_querystd = vidioc_querystd,
.vidioc_s_std = vidioc_s_std,
.vidioc_g_parm = vidioc_g_parm,
.vidioc_s_parm = vidioc_s_parm,
diff --git a/drivers/media/video/em28xx/em28xx.h b/drivers/media/video/em28xx/em28xx.h
index d80658bf3da..2a2cb7ed001 100644
--- a/drivers/media/video/em28xx/em28xx.h
+++ b/drivers/media/video/em28xx/em28xx.h
@@ -120,6 +120,7 @@
#define EM2874_BOARD_LEADERSHIP_ISDBT 77
#define EM28174_BOARD_PCTV_290E 78
#define EM2884_BOARD_TERRATEC_H5 79
+#define EM28174_BOARD_PCTV_460E 80
/* Limits minimum and default number of buffers */
#define EM28XX_MIN_BUF 4
@@ -677,8 +678,6 @@ int em28xx_isoc_dvb_max_packetsize(struct em28xx *dev);
int em28xx_set_mode(struct em28xx *dev, enum em28xx_mode set_mode);
int em28xx_gpio_set(struct em28xx *dev, struct em28xx_reg_seq *gpio);
void em28xx_wake_i2c(struct em28xx *dev);
-void em28xx_remove_from_devlist(struct em28xx *dev);
-void em28xx_add_into_devlist(struct em28xx *dev);
int em28xx_register_extension(struct em28xx_ops *dev);
void em28xx_unregister_extension(struct em28xx_ops *dev);
void em28xx_init_extension(struct em28xx *dev);
diff --git a/drivers/media/video/et61x251/et61x251.h b/drivers/media/video/et61x251/et61x251.h
index 14bb907d650..337ded4a638 100644
--- a/drivers/media/video/et61x251/et61x251.h
+++ b/drivers/media/video/et61x251/et61x251.h
@@ -165,45 +165,49 @@ et61x251_attach_sensor(struct et61x251_device* cam,
#undef DBG
#undef KDBG
#ifdef ET61X251_DEBUG
-# define DBG(level, fmt, args...) \
-do { \
- if (debug >= (level)) { \
- if ((level) == 1) \
- dev_err(&cam->usbdev->dev, fmt "\n", ## args); \
- else if ((level) == 2) \
- dev_info(&cam->usbdev->dev, fmt "\n", ## args); \
- else if ((level) >= 3) \
- dev_info(&cam->usbdev->dev, "[%s:%s:%d] " fmt "\n", \
- __FILE__, __func__, __LINE__ , ## args); \
- } \
+#define DBG(level, fmt, ...) \
+do { \
+ if (debug >= (level)) { \
+ if ((level) == 1) \
+ dev_err(&cam->usbdev->dev, fmt "\n", \
+ ##__VA_ARGS__); \
+ else if ((level) == 2) \
+ dev_info(&cam->usbdev->dev, fmt "\n", \
+ ##__VA_ARGS__); \
+ else if ((level) >= 3) \
+ dev_info(&cam->usbdev->dev, "[%s:%s:%d] " fmt "\n", \
+ __FILE__, __func__, __LINE__, \
+ ##__VA_ARGS__); \
+ } \
} while (0)
-# define KDBG(level, fmt, args...) \
-do { \
- if (debug >= (level)) { \
- if ((level) == 1 || (level) == 2) \
- pr_info("et61x251: " fmt "\n", ## args); \
- else if ((level) == 3) \
- pr_debug("sn9c102: [%s:%s:%d] " fmt "\n", __FILE__, \
- __func__, __LINE__ , ## args); \
- } \
+#define KDBG(level, fmt, ...) \
+do { \
+ if (debug >= (level)) { \
+ if ((level) == 1 || (level) == 2) \
+ pr_info(fmt "\n", ##__VA_ARGS__); \
+ else if ((level) == 3) \
+ pr_debug("[%s:%s:%d] " fmt "\n", \
+ __FILE__, __func__, __LINE__, \
+ ##__VA_ARGS__); \
+ } \
} while (0)
-# define V4LDBG(level, name, cmd) \
-do { \
- if (debug >= (level)) \
- v4l_print_ioctl(name, cmd); \
+#define V4LDBG(level, name, cmd) \
+do { \
+ if (debug >= (level)) \
+ v4l_print_ioctl(name, cmd); \
} while (0)
#else
-# define DBG(level, fmt, args...) do {;} while(0)
-# define KDBG(level, fmt, args...) do {;} while(0)
-# define V4LDBG(level, name, cmd) do {;} while(0)
+#define DBG(level, fmt, ...) do {;} while(0)
+#define KDBG(level, fmt, ...) do {;} while(0)
+#define V4LDBG(level, name, cmd) do {;} while(0)
#endif
#undef PDBG
-#define PDBG(fmt, args...) \
-dev_info(&cam->usbdev->dev, "[%s:%s:%d] " fmt "\n", __FILE__, __func__, \
- __LINE__ , ## args)
+#define PDBG(fmt, ...) \
+ dev_info(&cam->usbdev->dev, "[%s:%s:%d] " fmt "\n", \
+ __FILE__, __func__, __LINE__, ##__VA_ARGS__)
#undef PDBGG
-#define PDBGG(fmt, args...) do {;} while(0) /* placeholder */
+#define PDBGG(fmt, args...) do {;} while (0) /* placeholder */
#endif /* _ET61X251_H_ */
diff --git a/drivers/media/video/et61x251/et61x251_core.c b/drivers/media/video/et61x251/et61x251_core.c
index 9a1e80a1e14..d3777c86e1d 100644
--- a/drivers/media/video/et61x251/et61x251_core.c
+++ b/drivers/media/video/et61x251/et61x251_core.c
@@ -18,6 +18,8 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. *
***************************************************************************/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/version.h>
#include <linux/module.h>
#include <linux/init.h>
diff --git a/drivers/media/video/et61x251/et61x251_tas5130d1b.c b/drivers/media/video/et61x251/et61x251_tas5130d1b.c
index 04b7fbb310a..ced2e167935 100644
--- a/drivers/media/video/et61x251/et61x251_tas5130d1b.c
+++ b/drivers/media/video/et61x251/et61x251_tas5130d1b.c
@@ -19,6 +19,8 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. *
***************************************************************************/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include "et61x251_sensor.h"
diff --git a/drivers/media/video/gspca/Kconfig b/drivers/media/video/gspca/Kconfig
index 43d9a20caeb..103af3fe5aa 100644
--- a/drivers/media/video/gspca/Kconfig
+++ b/drivers/media/video/gspca/Kconfig
@@ -356,6 +356,16 @@ config USB_GSPCA_T613
To compile this driver as a module, choose M here: the
module will be called gspca_t613.
+config USB_GSPCA_TOPRO
+ tristate "TOPRO USB Camera Driver"
+ depends on VIDEO_V4L2 && USB_GSPCA
+ help
+ Say Y here if you want support for cameras based on the
+ TP6800 and TP6810 Topro chips.
+
+ To compile this driver as a module, choose M here: the
+ module will be called gspca_topro.
+
config USB_GSPCA_TV8532
tristate "TV8532 USB Camera Driver"
depends on VIDEO_V4L2 && USB_GSPCA
diff --git a/drivers/media/video/gspca/Makefile b/drivers/media/video/gspca/Makefile
index d6364a86333..f345f494d0f 100644
--- a/drivers/media/video/gspca/Makefile
+++ b/drivers/media/video/gspca/Makefile
@@ -35,6 +35,7 @@ obj-$(CONFIG_USB_GSPCA_SUNPLUS) += gspca_sunplus.o
obj-$(CONFIG_USB_GSPCA_STK014) += gspca_stk014.o
obj-$(CONFIG_USB_GSPCA_STV0680) += gspca_stv0680.o
obj-$(CONFIG_USB_GSPCA_T613) += gspca_t613.o
+obj-$(CONFIG_USB_GSPCA_TOPRO) += gspca_topro.o
obj-$(CONFIG_USB_GSPCA_TV8532) += gspca_tv8532.o
obj-$(CONFIG_USB_GSPCA_VC032X) += gspca_vc032x.o
obj-$(CONFIG_USB_GSPCA_VICAM) += gspca_vicam.o
@@ -78,6 +79,7 @@ gspca_stk014-objs := stk014.o
gspca_stv0680-objs := stv0680.o
gspca_sunplus-objs := sunplus.o
gspca_t613-objs := t613.o
+gspca_topro-objs := topro.o
gspca_tv8532-objs := tv8532.o
gspca_vc032x-objs := vc032x.o
gspca_vicam-objs := vicam.o
diff --git a/drivers/media/video/gspca/benq.c b/drivers/media/video/gspca/benq.c
index a09c4709d61..6ae26160b81 100644
--- a/drivers/media/video/gspca/benq.c
+++ b/drivers/media/video/gspca/benq.c
@@ -18,6 +18,8 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#define MODULE_NAME "benq"
#include "gspca.h"
@@ -62,7 +64,7 @@ static void reg_w(struct gspca_dev *gspca_dev,
0,
500);
if (ret < 0) {
- err("reg_w err %d", ret);
+ pr_err("reg_w err %d\n", ret);
gspca_dev->usb_err = ret;
}
}
@@ -84,20 +86,6 @@ static int sd_init(struct gspca_dev *gspca_dev)
return 0;
}
-static int sd_isoc_init(struct gspca_dev *gspca_dev)
-{
- int ret;
-
- ret = usb_set_interface(gspca_dev->dev, gspca_dev->iface,
- gspca_dev->nbalt - 1);
- if (ret < 0) {
- err("usb_set_interface failed");
- return ret;
- }
-/* reg_w(gspca_dev, 0x0003, 0x0002); */
- return 0;
-}
-
/* -- start the camera -- */
static int sd_start(struct gspca_dev *gspca_dev)
{
@@ -113,7 +101,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
for (n = 0; n < 4; n++) {
urb = usb_alloc_urb(SD_NPKT, GFP_KERNEL);
if (!urb) {
- err("usb_alloc_urb failed");
+ pr_err("usb_alloc_urb failed\n");
return -ENOMEM;
}
gspca_dev->urb[n] = urb;
@@ -123,7 +111,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
&urb->transfer_dma);
if (urb->transfer_buffer == NULL) {
- err("usb_alloc_coherent failed");
+ pr_err("usb_alloc_coherent failed\n");
return -ENOMEM;
}
urb->dev = gspca_dev->dev;
@@ -181,7 +169,7 @@ static void sd_isoc_irq(struct urb *urb)
if (gspca_dev->frozen)
return;
#endif
- err("urb status: %d", urb->status);
+ pr_err("urb status: %d\n", urb->status);
return;
}
@@ -209,7 +197,7 @@ static void sd_isoc_irq(struct urb *urb)
if (st == 0)
st = urb->iso_frame_desc[i].status;
if (st) {
- err("ISOC data error: [%d] status=%d",
+ pr_err("ISOC data error: [%d] status=%d\n",
i, st);
gspca_dev->last_packet_type = DISCARD_PACKET;
continue;
@@ -256,10 +244,10 @@ static void sd_isoc_irq(struct urb *urb)
/* resubmit the URBs */
st = usb_submit_urb(urb0, GFP_ATOMIC);
if (st < 0)
- err("usb_submit_urb(0) ret %d", st);
+ pr_err("usb_submit_urb(0) ret %d\n", st);
st = usb_submit_urb(urb, GFP_ATOMIC);
if (st < 0)
- err("usb_submit_urb() ret %d", st);
+ pr_err("usb_submit_urb() ret %d\n", st);
}
/* sub-driver description */
@@ -269,7 +257,6 @@ static const struct sd_desc sd_desc = {
.nctrls = ARRAY_SIZE(sd_ctrls),
.config = sd_config,
.init = sd_init,
- .isoc_init = sd_isoc_init,
.start = sd_start,
.stopN = sd_stopN,
.pkt_scan = sd_pkt_scan,
diff --git a/drivers/media/video/gspca/conex.c b/drivers/media/video/gspca/conex.c
index 8b398493f96..4c56dbef6d9 100644
--- a/drivers/media/video/gspca/conex.c
+++ b/drivers/media/video/gspca/conex.c
@@ -19,6 +19,8 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#define MODULE_NAME "conex"
#include "gspca.h"
@@ -129,7 +131,7 @@ static void reg_r(struct gspca_dev *gspca_dev,
#ifdef GSPCA_DEBUG
if (len > USB_BUF_SZ) {
- err("reg_r: buffer overflow");
+ pr_err("reg_r: buffer overflow\n");
return;
}
#endif
@@ -169,7 +171,7 @@ static void reg_w(struct gspca_dev *gspca_dev,
#ifdef GSPCA_DEBUG
if (len > USB_BUF_SZ) {
- err("reg_w: buffer overflow");
+ pr_err("reg_w: buffer overflow\n");
return;
}
PDEBUG(D_USBO, "reg write [%02x] = %02x..", index, *buffer);
diff --git a/drivers/media/video/gspca/cpia1.c b/drivers/media/video/gspca/cpia1.c
index f2a9451eea1..f9b86b2484b 100644
--- a/drivers/media/video/gspca/cpia1.c
+++ b/drivers/media/video/gspca/cpia1.c
@@ -26,6 +26,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#define MODULE_NAME "cpia1"
#include <linux/input.h>
@@ -550,8 +552,7 @@ retry:
gspca_dev->usb_buf, databytes, 1000);
if (ret < 0)
- err("usb_control_msg %02x, error %d", command[1],
- ret);
+ pr_err("usb_control_msg %02x, error %d\n", command[1], ret);
if (ret == -EPIPE && retries > 0) {
retries--;
@@ -1279,7 +1280,7 @@ static void monitor_exposure(struct gspca_dev *gspca_dev)
cmd[7] = 0;
ret = cpia_usb_transferCmd(gspca_dev, cmd);
if (ret) {
- err("ReadVPRegs(30,4,9,8) - failed: %d", ret);
+ pr_err("ReadVPRegs(30,4,9,8) - failed: %d\n", ret);
return;
}
exp_acc = gspca_dev->usb_buf[0];
diff --git a/drivers/media/video/gspca/etoms.c b/drivers/media/video/gspca/etoms.c
index 4b2c483fce6..0357d6d461d 100644
--- a/drivers/media/video/gspca/etoms.c
+++ b/drivers/media/video/gspca/etoms.c
@@ -18,6 +18,8 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#define MODULE_NAME "etoms"
#include "gspca.h"
@@ -236,7 +238,7 @@ static void reg_r(struct gspca_dev *gspca_dev,
#ifdef GSPCA_DEBUG
if (len > USB_BUF_SZ) {
- err("reg_r: buffer overflow");
+ pr_err("reg_r: buffer overflow\n");
return;
}
#endif
@@ -274,7 +276,7 @@ static void reg_w(struct gspca_dev *gspca_dev,
#ifdef GSPCA_DEBUG
if (len > USB_BUF_SZ) {
- err("reg_w: buffer overflow");
+ pr_err("reg_w: buffer overflow\n");
return;
}
PDEBUG(D_USBO, "reg write [%02x] = %02x..", index, *buffer);
diff --git a/drivers/media/video/gspca/finepix.c b/drivers/media/video/gspca/finepix.c
index 987b4b69d7a..ea48200fd3a 100644
--- a/drivers/media/video/gspca/finepix.c
+++ b/drivers/media/video/gspca/finepix.c
@@ -18,6 +18,8 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#define MODULE_NAME "finepix"
#include "gspca.h"
@@ -182,7 +184,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
/* Init the device */
ret = command(gspca_dev, 0);
if (ret < 0) {
- err("init failed %d", ret);
+ pr_err("init failed %d\n", ret);
return ret;
}
@@ -194,14 +196,14 @@ static int sd_start(struct gspca_dev *gspca_dev)
FPIX_MAX_TRANSFER, &len,
FPIX_TIMEOUT);
if (ret < 0) {
- err("usb_bulk_msg failed %d", ret);
+ pr_err("usb_bulk_msg failed %d\n", ret);
return ret;
}
/* Request a frame, but don't read it */
ret = command(gspca_dev, 1);
if (ret < 0) {
- err("frame request failed %d", ret);
+ pr_err("frame request failed %d\n", ret);
return ret;
}
diff --git a/drivers/media/video/gspca/gl860/Makefile b/drivers/media/video/gspca/gl860/Makefile
index 13c9403cc87..f511eccdfd9 100644
--- a/drivers/media/video/gspca/gl860/Makefile
+++ b/drivers/media/video/gspca/gl860/Makefile
@@ -6,5 +6,5 @@ gspca_gl860-objs := gl860.o \
gl860-ov9655.o \
gl860-mi2020.o
-EXTRA_CFLAGS += -Idrivers/media/video/gspca
+ccflags-y += -Idrivers/media/video/gspca
diff --git a/drivers/media/video/gspca/gl860/gl860.c b/drivers/media/video/gspca/gl860/gl860.c
index e8e071aa212..2ced3b73994 100644
--- a/drivers/media/video/gspca/gl860/gl860.c
+++ b/drivers/media/video/gspca/gl860/gl860.c
@@ -18,6 +18,9 @@
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include "gspca.h"
#include "gl860.h"
@@ -572,9 +575,8 @@ int gl860_RTx(struct gspca_dev *gspca_dev,
}
if (r < 0)
- err("ctrl transfer failed %4d "
- "[p%02x r%d v%04x i%04x len%d]",
- r, pref, req, val, index, len);
+ pr_err("ctrl transfer failed %4d [p%02x r%d v%04x i%04x len%d]\n",
+ r, pref, req, val, index, len);
else if (len > 1 && r < len)
PDEBUG(D_ERR, "short ctrl transfer %d/%d", r, len);
diff --git a/drivers/media/video/gspca/gspca.c b/drivers/media/video/gspca/gspca.c
index 5da4879f47f..881e04c7ffe 100644
--- a/drivers/media/video/gspca/gspca.c
+++ b/drivers/media/video/gspca/gspca.c
@@ -21,7 +21,9 @@
* Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-#define MODULE_NAME "gspca"
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#define GSPCA_VERSION "2.14.0"
#include <linux/init.h>
#include <linux/fs.h>
@@ -50,12 +52,10 @@
#error "DEF_NURBS too big"
#endif
-#define DRIVER_VERSION_NUMBER "2.13.0"
-
MODULE_AUTHOR("Jean-François Moine <http://moinejf.free.fr>");
MODULE_DESCRIPTION("GSPCA USB Camera Driver");
MODULE_LICENSE("GPL");
-MODULE_VERSION(DRIVER_VERSION_NUMBER);
+MODULE_VERSION(GSPCA_VERSION);
#ifdef GSPCA_DEBUG
int gspca_debug = D_ERR | D_PROBE;
@@ -148,7 +148,7 @@ static void int_irq(struct urb *urb)
if (ret == 0) {
ret = usb_submit_urb(urb, GFP_ATOMIC);
if (ret < 0)
- err("Resubmit URB failed with error %i", ret);
+ pr_err("Resubmit URB failed with error %i\n", ret);
}
}
@@ -177,8 +177,8 @@ static int gspca_input_connect(struct gspca_dev *dev)
err = input_register_device(input_dev);
if (err) {
- err("Input device registration failed with error %i",
- err);
+ pr_err("Input device registration failed with error %i\n",
+ err);
input_dev->dev.parent = NULL;
input_free_device(input_dev);
} else {
@@ -323,8 +323,8 @@ static void fill_frame(struct gspca_dev *gspca_dev,
/* check the packet status and length */
st = urb->iso_frame_desc[i].status;
if (st) {
- err("ISOC data error: [%d] len=%d, status=%d",
- i, len, st);
+ pr_err("ISOC data error: [%d] len=%d, status=%d\n",
+ i, len, st);
gspca_dev->last_packet_type = DISCARD_PACKET;
continue;
}
@@ -346,7 +346,7 @@ resubmit:
/* resubmit the URB */
st = usb_submit_urb(urb, GFP_ATOMIC);
if (st < 0)
- err("usb_submit_urb() ret %d", st);
+ pr_err("usb_submit_urb() ret %d\n", st);
}
/*
@@ -400,7 +400,7 @@ resubmit:
if (gspca_dev->cam.bulk_nurbs != 0) {
st = usb_submit_urb(urb, GFP_ATOMIC);
if (st < 0)
- err("usb_submit_urb() ret %d", st);
+ pr_err("usb_submit_urb() ret %d\n", st);
}
}
@@ -464,7 +464,7 @@ void gspca_frame_add(struct gspca_dev *gspca_dev,
} else {
/* !! image is NULL only when last pkt is LAST or DISCARD
if (gspca_dev->image == NULL) {
- err("gspca_frame_add() image == NULL");
+ pr_err("gspca_frame_add() image == NULL\n");
return;
}
*/
@@ -497,19 +497,6 @@ void gspca_frame_add(struct gspca_dev *gspca_dev,
}
EXPORT_SYMBOL(gspca_frame_add);
-static int gspca_is_compressed(__u32 format)
-{
- switch (format) {
- case V4L2_PIX_FMT_MJPEG:
- case V4L2_PIX_FMT_JPEG:
- case V4L2_PIX_FMT_SPCA561:
- case V4L2_PIX_FMT_PAC207:
- case V4L2_PIX_FMT_MR97310A:
- return 1;
- }
- return 0;
-}
-
static int frame_alloc(struct gspca_dev *gspca_dev, struct file *file,
enum v4l2_memory memory, unsigned int count)
{
@@ -525,7 +512,7 @@ static int frame_alloc(struct gspca_dev *gspca_dev, struct file *file,
count = GSPCA_MAX_FRAMES - 1;
gspca_dev->frbuf = vmalloc_32(frsz * count);
if (!gspca_dev->frbuf) {
- err("frame alloc failed");
+ pr_err("frame alloc failed\n");
return -ENOMEM;
}
gspca_dev->capt_file = file;
@@ -597,7 +584,7 @@ static int gspca_set_alt0(struct gspca_dev *gspca_dev)
return 0;
ret = usb_set_interface(gspca_dev->dev, gspca_dev->iface, 0);
if (ret < 0)
- err("set alt 0 err %d", ret);
+ pr_err("set alt 0 err %d\n", ret);
return ret;
}
@@ -640,53 +627,104 @@ static struct usb_host_endpoint *alt_xfer(struct usb_host_interface *alt,
return NULL;
}
+/* compute the minimum bandwidth for the current transfer */
+static u32 which_bandwidth(struct gspca_dev *gspca_dev)
+{
+ u32 bandwidth;
+ int i;
+
+ i = gspca_dev->curr_mode;
+ bandwidth = gspca_dev->cam.cam_mode[i].sizeimage;
+
+ /* if the image is compressed, estimate the mean image size */
+ if (bandwidth < gspca_dev->cam.cam_mode[i].width *
+ gspca_dev->cam.cam_mode[i].height)
+ bandwidth /= 3;
+
+ /* estimate the frame rate */
+ if (gspca_dev->sd_desc->get_streamparm) {
+ struct v4l2_streamparm parm;
+
+ parm.parm.capture.timeperframe.denominator = 15;
+ gspca_dev->sd_desc->get_streamparm(gspca_dev, &parm);
+ bandwidth *= parm.parm.capture.timeperframe.denominator;
+ } else {
+ bandwidth *= 15; /* 15 fps */
+ }
+
+ PDEBUG(D_STREAM, "min bandwidth: %d", bandwidth);
+ return bandwidth;
+}
+
+/* endpoint table */
+#define MAX_ALT 16
+struct ep_tb_s {
+ u32 alt;
+ u32 bandwidth;
+};
+
/*
- * look for an input (isoc or bulk) endpoint
- *
- * The endpoint is defined by the subdriver.
- * Use only the first isoc (some Zoran - 0x0572:0x0001 - have two such ep).
- * This routine may be called many times when the bandwidth is too small
- * (the bandwidth is checked on urb submit).
+ * build the table of the endpoints
+ * and compute the minimum bandwidth for the image transfer
*/
-static struct usb_host_endpoint *get_ep(struct gspca_dev *gspca_dev)
+static int build_ep_tb(struct gspca_dev *gspca_dev,
+ struct usb_interface *intf,
+ int xfer,
+ struct ep_tb_s *ep_tb)
{
- struct usb_interface *intf;
struct usb_host_endpoint *ep;
- int xfer, i, ret;
+ int i, j, nbalt, psize, found;
+ u32 bandwidth, last_bw;
- intf = usb_ifnum_to_if(gspca_dev->dev, gspca_dev->iface);
- ep = NULL;
- xfer = gspca_dev->cam.bulk ? USB_ENDPOINT_XFER_BULK
- : USB_ENDPOINT_XFER_ISOC;
- i = gspca_dev->alt; /* previous alt setting */
- if (gspca_dev->cam.reverse_alts) {
- while (++i < gspca_dev->nbalt) {
- ep = alt_xfer(&intf->altsetting[i], xfer);
- if (ep)
- break;
- }
- } else {
- while (--i >= 0) {
- ep = alt_xfer(&intf->altsetting[i], xfer);
- if (ep)
- break;
- }
- }
- if (ep == NULL) {
- err("no transfer endpoint found");
- return NULL;
- }
- PDEBUG(D_STREAM, "use alt %d ep 0x%02x",
- i, ep->desc.bEndpointAddress);
- gspca_dev->alt = i; /* memorize the current alt setting */
- if (gspca_dev->nbalt > 1) {
- ret = usb_set_interface(gspca_dev->dev, gspca_dev->iface, i);
- if (ret < 0) {
- err("set alt %d err %d", i, ret);
- ep = NULL;
+ nbalt = intf->num_altsetting;
+ if (nbalt > MAX_ALT)
+ nbalt = MAX_ALT; /* fixme: should warn */
+
+ /* build the endpoint table */
+ i = 0;
+ last_bw = 0;
+ for (;;) {
+ ep_tb->bandwidth = 2000 * 2000 * 120;
+ found = 0;
+ for (j = 0; j < nbalt; j++) {
+ ep = alt_xfer(&intf->altsetting[j], xfer);
+ if (ep == NULL)
+ continue;
+ psize = le16_to_cpu(ep->desc.wMaxPacketSize);
+ if (!gspca_dev->cam.bulk) /* isoc */
+ psize = (psize & 0x07ff) *
+ (1 + ((psize >> 11) & 3));
+ bandwidth = psize * ep->desc.bInterval * 1000;
+ if (gspca_dev->dev->speed == USB_SPEED_HIGH
+ || gspca_dev->dev->speed == USB_SPEED_SUPER)
+ bandwidth *= 8;
+ if (bandwidth <= last_bw)
+ continue;
+ if (bandwidth < ep_tb->bandwidth) {
+ ep_tb->bandwidth = bandwidth;
+ ep_tb->alt = j;
+ found = 1;
+ }
}
+ if (!found)
+ break;
+ PDEBUG(D_STREAM, "alt %d bandwidth %d",
+ ep_tb->alt, ep_tb->bandwidth);
+ last_bw = ep_tb->bandwidth;
+ i++;
+ ep_tb++;
+ }
+
+ /* get the requested bandwidth and start at the highest atlsetting */
+ bandwidth = which_bandwidth(gspca_dev);
+ ep_tb--;
+ while (i > 1) {
+ ep_tb--;
+ if (ep_tb->bandwidth < bandwidth)
+ break;
+ i--;
}
- return ep;
+ return i;
}
/*
@@ -731,7 +769,7 @@ static int create_urbs(struct gspca_dev *gspca_dev,
for (n = 0; n < nurbs; n++) {
urb = usb_alloc_urb(npkt, GFP_KERNEL);
if (!urb) {
- err("usb_alloc_urb failed");
+ pr_err("usb_alloc_urb failed\n");
return -ENOMEM;
}
gspca_dev->urb[n] = urb;
@@ -741,7 +779,7 @@ static int create_urbs(struct gspca_dev *gspca_dev,
&urb->transfer_dma);
if (urb->transfer_buffer == NULL) {
- err("usb_alloc_coherent failed");
+ pr_err("usb_alloc_coherent failed\n");
return -ENOMEM;
}
urb->dev = gspca_dev->dev;
@@ -752,7 +790,10 @@ static int create_urbs(struct gspca_dev *gspca_dev,
ep->desc.bEndpointAddress);
urb->transfer_flags = URB_ISO_ASAP
| URB_NO_TRANSFER_DMA_MAP;
- urb->interval = ep->desc.bInterval;
+ if (gspca_dev->dev->speed == USB_SPEED_LOW)
+ urb->interval = ep->desc.bInterval;
+ else
+ urb->interval = 1 << (ep->desc.bInterval - 1);
urb->complete = isoc_irq;
urb->number_of_packets = npkt;
for (i = 0; i < npkt; i++) {
@@ -774,9 +815,11 @@ static int create_urbs(struct gspca_dev *gspca_dev,
*/
static int gspca_init_transfer(struct gspca_dev *gspca_dev)
{
+ struct usb_interface *intf;
struct usb_host_endpoint *ep;
struct urb *urb;
- int n, ret;
+ struct ep_tb_s ep_tb[MAX_ALT];
+ int n, ret, xfer, alt, alt_idx;
if (mutex_lock_interruptible(&gspca_dev->usb_lock))
return -ERESTARTSYS;
@@ -794,30 +837,65 @@ static int gspca_init_transfer(struct gspca_dev *gspca_dev)
gspca_dev->usb_err = 0;
- /* set the higher alternate setting and
- * loop until urb submit succeeds */
- if (gspca_dev->cam.reverse_alts)
- gspca_dev->alt = 0;
- else
- gspca_dev->alt = gspca_dev->nbalt;
-
+ /* do the specific subdriver stuff before endpoint selection */
+ gspca_dev->alt = 0;
if (gspca_dev->sd_desc->isoc_init) {
ret = gspca_dev->sd_desc->isoc_init(gspca_dev);
if (ret < 0)
goto unlock;
}
+ intf = usb_ifnum_to_if(gspca_dev->dev, gspca_dev->iface);
+ xfer = gspca_dev->cam.bulk ? USB_ENDPOINT_XFER_BULK
+ : USB_ENDPOINT_XFER_ISOC;
- gspca_input_destroy_urb(gspca_dev);
- ep = get_ep(gspca_dev);
- if (ep == NULL) {
- ret = -EIO;
- goto out;
+ /* if the subdriver forced an altsetting, get the endpoint */
+ if (gspca_dev->alt != 0) {
+ gspca_dev->alt--; /* (previous version compatibility) */
+ ep = alt_xfer(&intf->altsetting[gspca_dev->alt], xfer);
+ if (ep == NULL) {
+ pr_err("bad altsetting %d\n", gspca_dev->alt);
+ ret = -EIO;
+ goto out;
+ }
+ ep_tb[0].alt = gspca_dev->alt;
+ alt_idx = 1;
+ } else {
+
+ /* else, compute the minimum bandwidth
+ * and build the endpoint table */
+ alt_idx = build_ep_tb(gspca_dev, intf, xfer, ep_tb);
+ if (alt_idx <= 0) {
+ pr_err("no transfer endpoint found\n");
+ ret = -EIO;
+ goto unlock;
+ }
}
+
+ /* set the highest alternate setting and
+ * loop until urb submit succeeds */
+ gspca_input_destroy_urb(gspca_dev);
+
+ gspca_dev->alt = ep_tb[--alt_idx].alt;
+ alt = -1;
for (;;) {
+ if (alt != gspca_dev->alt) {
+ alt = gspca_dev->alt;
+ if (gspca_dev->nbalt > 1) {
+ ret = usb_set_interface(gspca_dev->dev,
+ gspca_dev->iface,
+ alt);
+ if (ret < 0) {
+ if (ret == -ENOSPC)
+ goto retry; /*fixme: ugly*/
+ pr_err("set alt %d err %d\n", alt, ret);
+ goto out;
+ }
+ }
+ }
if (!gspca_dev->cam.no_urb_create) {
- PDEBUG(D_STREAM, "init transfer alt %d",
- gspca_dev->alt);
- ret = create_urbs(gspca_dev, ep);
+ PDEBUG(D_STREAM, "init transfer alt %d", alt);
+ ret = create_urbs(gspca_dev,
+ alt_xfer(&intf->altsetting[alt], xfer));
if (ret < 0) {
destroy_urbs(gspca_dev);
goto out;
@@ -851,29 +929,35 @@ static int gspca_init_transfer(struct gspca_dev *gspca_dev)
break;
}
if (ret >= 0)
- break;
+ break; /* transfer is started */
+
+ /* something when wrong
+ * stop the webcam and free the transfer resources */
gspca_stream_off(gspca_dev);
if (ret != -ENOSPC) {
- err("usb_submit_urb alt %d err %d",
- gspca_dev->alt, ret);
+ pr_err("usb_submit_urb alt %d err %d\n",
+ gspca_dev->alt, ret);
goto out;
}
/* the bandwidth is not wide enough
* negotiate or try a lower alternate setting */
+retry:
PDEBUG(D_ERR|D_STREAM,
- "bandwidth not wide enough - trying again");
+ "alt %d - bandwidth not wide enough - trying again",
+ alt);
msleep(20); /* wait for kill complete */
if (gspca_dev->sd_desc->isoc_nego) {
ret = gspca_dev->sd_desc->isoc_nego(gspca_dev);
if (ret < 0)
goto out;
} else {
- ep = get_ep(gspca_dev);
- if (ep == NULL) {
+ if (alt_idx <= 0) {
+ pr_err("no transfer endpoint found\n");
ret = -EIO;
goto out;
}
+ alt = ep_tb[--alt_idx].alt;
}
}
out:
@@ -1044,7 +1128,9 @@ static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
return -EINVAL; /* no more format */
fmtdesc->pixelformat = fmt_tb[index];
- if (gspca_is_compressed(fmt_tb[index]))
+ if (gspca_dev->cam.cam_mode[i].sizeimage <
+ gspca_dev->cam.cam_mode[i].width *
+ gspca_dev->cam.cam_mode[i].height)
fmtdesc->flags = V4L2_FMT_FLAG_COMPRESSED;
fmtdesc->description[0] = fmtdesc->pixelformat & 0xff;
fmtdesc->description[1] = (fmtdesc->pixelformat >> 8) & 0xff;
@@ -2195,19 +2281,20 @@ int gspca_dev_probe2(struct usb_interface *intf,
struct usb_device *dev = interface_to_usbdev(intf);
int ret;
- PDEBUG(D_PROBE, "probing %04x:%04x", id->idVendor, id->idProduct);
+ pr_info("%s-" GSPCA_VERSION " probing %04x:%04x\n",
+ sd_desc->name, id->idVendor, id->idProduct);
/* create the device */
if (dev_size < sizeof *gspca_dev)
dev_size = sizeof *gspca_dev;
gspca_dev = kzalloc(dev_size, GFP_KERNEL);
if (!gspca_dev) {
- err("couldn't kzalloc gspca struct");
+ pr_err("couldn't kzalloc gspca struct\n");
return -ENOMEM;
}
gspca_dev->usb_buf = kmalloc(USB_BUF_SZ, GFP_KERNEL);
if (!gspca_dev->usb_buf) {
- err("out of memory");
+ pr_err("out of memory\n");
ret = -ENOMEM;
goto out;
}
@@ -2264,7 +2351,7 @@ int gspca_dev_probe2(struct usb_interface *intf,
VFL_TYPE_GRABBER,
-1);
if (ret < 0) {
- err("video_register_device err %d", ret);
+ pr_err("video_register_device err %d\n", ret);
goto out;
}
@@ -2296,8 +2383,8 @@ int gspca_dev_probe(struct usb_interface *intf,
/* we don't handle multi-config cameras */
if (dev->descriptor.bNumConfigurations != 1) {
- err("%04x:%04x too many config",
- id->idVendor, id->idProduct);
+ pr_err("%04x:%04x too many config\n",
+ id->idVendor, id->idProduct);
return -ENODEV;
}
@@ -2480,7 +2567,7 @@ EXPORT_SYMBOL(gspca_auto_gain_n_exposure);
/* -- module insert / remove -- */
static int __init gspca_init(void)
{
- info("v" DRIVER_VERSION_NUMBER " registered");
+ pr_info("v" GSPCA_VERSION " registered\n");
return 0;
}
static void __exit gspca_exit(void)
diff --git a/drivers/media/video/gspca/gspca.h b/drivers/media/video/gspca/gspca.h
index 49e2fcbe81f..e444f16e149 100644
--- a/drivers/media/video/gspca/gspca.h
+++ b/drivers/media/video/gspca/gspca.h
@@ -14,11 +14,12 @@
#ifdef GSPCA_DEBUG
/* GSPCA our debug messages */
extern int gspca_debug;
-#define PDEBUG(level, fmt, args...) \
- do {\
- if (gspca_debug & (level)) \
- printk(KERN_INFO MODULE_NAME ": " fmt "\n", ## args); \
- } while (0)
+#define PDEBUG(level, fmt, ...) \
+do { \
+ if (gspca_debug & (level)) \
+ pr_info(fmt, ##__VA_ARGS__); \
+} while (0)
+
#define D_ERR 0x01
#define D_PROBE 0x02
#define D_CONF 0x04
@@ -29,17 +30,8 @@ extern int gspca_debug;
#define D_USBO 0x00
#define D_V4L2 0x0100
#else
-#define PDEBUG(level, fmt, args...)
+#define PDEBUG(level, fmt, ...)
#endif
-#undef err
-#define err(fmt, args...) \
- printk(KERN_ERR MODULE_NAME ": " fmt "\n", ## args)
-#undef info
-#define info(fmt, args...) \
- printk(KERN_INFO MODULE_NAME ": " fmt "\n", ## args)
-#undef warn
-#define warn(fmt, args...) \
- printk(KERN_WARNING MODULE_NAME ": " fmt "\n", ## args)
#define GSPCA_MAX_FRAMES 16 /* maximum number of video frame buffers */
/* image transfers */
diff --git a/drivers/media/video/gspca/jeilinj.c b/drivers/media/video/gspca/jeilinj.c
index 1bd9c4b542d..8e3dabe3007 100644
--- a/drivers/media/video/gspca/jeilinj.c
+++ b/drivers/media/video/gspca/jeilinj.c
@@ -24,6 +24,8 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#define MODULE_NAME "jeilinj"
#include <linux/slab.h>
@@ -113,8 +115,8 @@ static void jlj_write2(struct gspca_dev *gspca_dev, unsigned char *command)
usb_sndbulkpipe(gspca_dev->dev, 3),
gspca_dev->usb_buf, 2, NULL, 500);
if (retval < 0) {
- err("command write [%02x] error %d",
- gspca_dev->usb_buf[0], retval);
+ pr_err("command write [%02x] error %d\n",
+ gspca_dev->usb_buf[0], retval);
gspca_dev->usb_err = retval;
}
}
@@ -131,8 +133,8 @@ static void jlj_read1(struct gspca_dev *gspca_dev, unsigned char response)
gspca_dev->usb_buf, 1, NULL, 500);
response = gspca_dev->usb_buf[0];
if (retval < 0) {
- err("read command [%02x] error %d",
- gspca_dev->usb_buf[0], retval);
+ pr_err("read command [%02x] error %d\n",
+ gspca_dev->usb_buf[0], retval);
gspca_dev->usb_err = retval;
}
}
@@ -403,13 +405,7 @@ static int sd_config(struct gspca_dev *gspca_dev,
dev->type = id->driver_info;
gspca_dev->cam.ctrls = dev->ctrls;
dev->quality = QUALITY_DEF;
- dev->ctrls[LIGHTFREQ].def = V4L2_CID_POWER_LINE_FREQUENCY_60HZ;
- dev->ctrls[RED].def = RED_BALANCE_DEF;
- dev->ctrls[GREEN].def = GREEN_BALANCE_DEF;
- dev->ctrls[BLUE].def = BLUE_BALANCE_DEF;
- PDEBUG(D_PROBE,
- "JEILINJ camera detected"
- " (vid/pid 0x%04X:0x%04X)", id->idVendor, id->idProduct);
+
cam->cam_mode = jlj_mode;
cam->nmodes = ARRAY_SIZE(jlj_mode);
cam->bulk = 1;
@@ -422,7 +418,7 @@ static void sd_stopN(struct gspca_dev *gspca_dev)
{
int i;
u8 *buf;
- u8 stop_commands[][2] = {
+ static u8 stop_commands[][2] = {
{0x71, 0x00},
{0x70, 0x09},
{0x71, 0x80},
diff --git a/drivers/media/video/gspca/kinect.c b/drivers/media/video/gspca/kinect.c
index 26fc206f095..4fe51fda80f 100644
--- a/drivers/media/video/gspca/kinect.c
+++ b/drivers/media/video/gspca/kinect.c
@@ -24,6 +24,8 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#define MODULE_NAME "kinect"
#include "gspca.h"
@@ -34,11 +36,6 @@ MODULE_AUTHOR("Antonio Ospite <ospite@studenti.unina.it>");
MODULE_DESCRIPTION("GSPCA/Kinect Sensor Device USB Camera Driver");
MODULE_LICENSE("GPL");
-#ifdef GSPCA_DEBUG
-int gspca_debug = D_ERR | D_PROBE | D_CONF | D_STREAM | D_FRAM | D_PACK |
- D_USBI | D_USBO | D_V4L2;
-#endif
-
struct pkt_hdr {
uint8_t magic[2];
uint8_t pad;
@@ -141,7 +138,7 @@ static int send_cmd(struct gspca_dev *gspca_dev, uint16_t cmd, void *cmdbuf,
struct cam_hdr *rhdr = (void *)ibuf;
if (cmd_len & 1 || cmd_len > (0x400 - sizeof(*chdr))) {
- err("send_cmd: Invalid command length (0x%x)", cmd_len);
+ pr_err("send_cmd: Invalid command length (0x%x)\n", cmd_len);
return -1;
}
@@ -157,7 +154,7 @@ static int send_cmd(struct gspca_dev *gspca_dev, uint16_t cmd, void *cmdbuf,
PDEBUG(D_USBO, "Control cmd=%04x tag=%04x len=%04x: %d", cmd,
sd->cam_tag, cmd_len, res);
if (res < 0) {
- err("send_cmd: Output control transfer failed (%d)", res);
+ pr_err("send_cmd: Output control transfer failed (%d)\n", res);
return res;
}
@@ -166,33 +163,35 @@ static int send_cmd(struct gspca_dev *gspca_dev, uint16_t cmd, void *cmdbuf,
} while (actual_len == 0);
PDEBUG(D_USBO, "Control reply: %d", res);
if (actual_len < sizeof(*rhdr)) {
- err("send_cmd: Input control transfer failed (%d)", res);
+ pr_err("send_cmd: Input control transfer failed (%d)\n", res);
return res;
}
actual_len -= sizeof(*rhdr);
if (rhdr->magic[0] != 0x52 || rhdr->magic[1] != 0x42) {
- err("send_cmd: Bad magic %02x %02x", rhdr->magic[0],
- rhdr->magic[1]);
+ pr_err("send_cmd: Bad magic %02x %02x\n",
+ rhdr->magic[0], rhdr->magic[1]);
return -1;
}
if (rhdr->cmd != chdr->cmd) {
- err("send_cmd: Bad cmd %02x != %02x", rhdr->cmd, chdr->cmd);
+ pr_err("send_cmd: Bad cmd %02x != %02x\n",
+ rhdr->cmd, chdr->cmd);
return -1;
}
if (rhdr->tag != chdr->tag) {
- err("send_cmd: Bad tag %04x != %04x", rhdr->tag, chdr->tag);
+ pr_err("send_cmd: Bad tag %04x != %04x\n",
+ rhdr->tag, chdr->tag);
return -1;
}
if (cpu_to_le16(rhdr->len) != (actual_len/2)) {
- err("send_cmd: Bad len %04x != %04x",
- cpu_to_le16(rhdr->len), (int)(actual_len/2));
+ pr_err("send_cmd: Bad len %04x != %04x\n",
+ cpu_to_le16(rhdr->len), (int)(actual_len/2));
return -1;
}
if (actual_len > reply_len) {
- warn("send_cmd: Data buffer is %d bytes long, but got %d bytes",
- reply_len, actual_len);
+ pr_warn("send_cmd: Data buffer is %d bytes long, but got %d bytes\n",
+ reply_len, actual_len);
memcpy(replybuf, ibuf+sizeof(*rhdr), reply_len);
} else {
memcpy(replybuf, ibuf+sizeof(*rhdr), actual_len);
@@ -218,8 +217,8 @@ static int write_register(struct gspca_dev *gspca_dev, uint16_t reg,
if (res < 0)
return res;
if (res != 2) {
- warn("send_cmd returned %d [%04x %04x], 0000 expected",
- res, reply[0], reply[1]);
+ pr_warn("send_cmd returned %d [%04x %04x], 0000 expected\n",
+ res, reply[0], reply[1]);
}
return 0;
}
@@ -353,8 +352,8 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev, u8 *__data, int len)
return;
if (hdr->magic[0] != 'R' || hdr->magic[1] != 'B') {
- warn("[Stream %02x] Invalid magic %02x%02x", sd->stream_flag,
- hdr->magic[0], hdr->magic[1]);
+ pr_warn("[Stream %02x] Invalid magic %02x%02x\n",
+ sd->stream_flag, hdr->magic[0], hdr->magic[1]);
return;
}
@@ -368,7 +367,7 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev, u8 *__data, int len)
gspca_frame_add(gspca_dev, LAST_PACKET, data, datalen);
else
- warn("Packet type not recognized...");
+ pr_warn("Packet type not recognized...\n");
}
/* sub-driver description */
diff --git a/drivers/media/video/gspca/konica.c b/drivers/media/video/gspca/konica.c
index 5964691c0e9..f3f7fe0ec4b 100644
--- a/drivers/media/video/gspca/konica.c
+++ b/drivers/media/video/gspca/konica.c
@@ -28,6 +28,8 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#define MODULE_NAME "konica"
#include <linux/input.h>
@@ -200,7 +202,7 @@ static void reg_w(struct gspca_dev *gspca_dev, u16 value, u16 index)
0,
1000);
if (ret < 0) {
- err("reg_w err %d", ret);
+ pr_err("reg_w err %d\n", ret);
gspca_dev->usb_err = ret;
}
}
@@ -221,7 +223,7 @@ static void reg_r(struct gspca_dev *gspca_dev, u16 value, u16 index)
2,
1000);
if (ret < 0) {
- err("reg_w err %d", ret);
+ pr_err("reg_w err %d\n", ret);
gspca_dev->usb_err = ret;
}
}
@@ -284,7 +286,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
intf = usb_ifnum_to_if(sd->gspca_dev.dev, sd->gspca_dev.iface);
alt = usb_altnum_to_altsetting(intf, sd->gspca_dev.alt);
if (!alt) {
- err("Couldn't get altsetting");
+ pr_err("Couldn't get altsetting\n");
return -EIO;
}
@@ -315,7 +317,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
le16_to_cpu(alt->endpoint[i].desc.wMaxPacketSize);
urb = usb_alloc_urb(SD_NPKT, GFP_KERNEL);
if (!urb) {
- err("usb_alloc_urb failed");
+ pr_err("usb_alloc_urb failed\n");
return -ENOMEM;
}
gspca_dev->urb[n] = urb;
@@ -324,7 +326,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
GFP_KERNEL,
&urb->transfer_dma);
if (urb->transfer_buffer == NULL) {
- err("usb_buffer_alloc failed");
+ pr_err("usb_buffer_alloc failed\n");
return -ENOMEM;
}
@@ -386,7 +388,7 @@ static void sd_isoc_irq(struct urb *urb)
PDEBUG(D_ERR, "urb status: %d", urb->status);
st = usb_submit_urb(urb, GFP_ATOMIC);
if (st < 0)
- err("resubmit urb error %d", st);
+ pr_err("resubmit urb error %d\n", st);
return;
}
@@ -477,7 +479,7 @@ resubmit:
}
st = usb_submit_urb(status_urb, GFP_ATOMIC);
if (st < 0)
- err("usb_submit_urb(status_urb) ret %d", st);
+ pr_err("usb_submit_urb(status_urb) ret %d\n", st);
}
static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val)
diff --git a/drivers/media/video/gspca/m5602/Makefile b/drivers/media/video/gspca/m5602/Makefile
index bf7a19a1e6d..7f52961f439 100644
--- a/drivers/media/video/gspca/m5602/Makefile
+++ b/drivers/media/video/gspca/m5602/Makefile
@@ -8,4 +8,4 @@ gspca_m5602-objs := m5602_core.o \
m5602_s5k83a.o \
m5602_s5k4aa.o
-EXTRA_CFLAGS += -Idrivers/media/video/gspca
+ccflags-y += -Idrivers/media/video/gspca
diff --git a/drivers/media/video/gspca/m5602/m5602_core.c b/drivers/media/video/gspca/m5602/m5602_core.c
index a7722b1aef9..67533e5582a 100644
--- a/drivers/media/video/gspca/m5602/m5602_core.c
+++ b/drivers/media/video/gspca/m5602/m5602_core.c
@@ -16,6 +16,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include "m5602_ov9650.h"
#include "m5602_ov7660.h"
#include "m5602_mt9m111.h"
@@ -192,10 +194,9 @@ static void m5602_dump_bridge(struct sd *sd)
for (i = 0; i < 0x80; i++) {
unsigned char val = 0;
m5602_read_bridge(sd, i, &val);
- info("ALi m5602 address 0x%x contains 0x%x", i, val);
+ pr_info("ALi m5602 address 0x%x contains 0x%x\n", i, val);
}
- info("Warning: The ALi m5602 webcam probably won't work "
- "until it's power cycled");
+ pr_info("Warning: The ALi m5602 webcam probably won't work until it's power cycled\n");
}
static int m5602_probe_sensor(struct sd *sd)
@@ -231,7 +232,7 @@ static int m5602_probe_sensor(struct sd *sd)
return 0;
/* More sensor probe function goes here */
- info("Failed to find a sensor");
+ pr_info("Failed to find a sensor\n");
sd->sensor = NULL;
return -ENODEV;
}
diff --git a/drivers/media/video/gspca/m5602/m5602_mt9m111.c b/drivers/media/video/gspca/m5602/m5602_mt9m111.c
index 0d605a52b92..6268aa24ec5 100644
--- a/drivers/media/video/gspca/m5602/m5602_mt9m111.c
+++ b/drivers/media/video/gspca/m5602/m5602_mt9m111.c
@@ -16,6 +16,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include "m5602_mt9m111.h"
static int mt9m111_set_vflip(struct gspca_dev *gspca_dev, __s32 val);
@@ -163,7 +165,7 @@ int mt9m111_probe(struct sd *sd)
if (force_sensor) {
if (force_sensor == MT9M111_SENSOR) {
- info("Forcing a %s sensor", mt9m111.name);
+ pr_info("Forcing a %s sensor\n", mt9m111.name);
goto sensor_found;
}
/* If we want to force another sensor, don't try to probe this
@@ -191,7 +193,7 @@ int mt9m111_probe(struct sd *sd)
return -ENODEV;
if ((data[0] == 0x14) && (data[1] == 0x3a)) {
- info("Detected a mt9m111 sensor");
+ pr_info("Detected a mt9m111 sensor\n");
goto sensor_found;
}
@@ -612,34 +614,34 @@ static void mt9m111_dump_registers(struct sd *sd)
{
u8 address, value[2] = {0x00, 0x00};
- info("Dumping the mt9m111 register state");
+ pr_info("Dumping the mt9m111 register state\n");
- info("Dumping the mt9m111 sensor core registers");
+ pr_info("Dumping the mt9m111 sensor core registers\n");
value[1] = MT9M111_SENSOR_CORE;
m5602_write_sensor(sd, MT9M111_PAGE_MAP, value, 2);
for (address = 0; address < 0xff; address++) {
m5602_read_sensor(sd, address, value, 2);
- info("register 0x%x contains 0x%x%x",
- address, value[0], value[1]);
+ pr_info("register 0x%x contains 0x%x%x\n",
+ address, value[0], value[1]);
}
- info("Dumping the mt9m111 color pipeline registers");
+ pr_info("Dumping the mt9m111 color pipeline registers\n");
value[1] = MT9M111_COLORPIPE;
m5602_write_sensor(sd, MT9M111_PAGE_MAP, value, 2);
for (address = 0; address < 0xff; address++) {
m5602_read_sensor(sd, address, value, 2);
- info("register 0x%x contains 0x%x%x",
- address, value[0], value[1]);
+ pr_info("register 0x%x contains 0x%x%x\n",
+ address, value[0], value[1]);
}
- info("Dumping the mt9m111 camera control registers");
+ pr_info("Dumping the mt9m111 camera control registers\n");
value[1] = MT9M111_CAMERA_CONTROL;
m5602_write_sensor(sd, MT9M111_PAGE_MAP, value, 2);
for (address = 0; address < 0xff; address++) {
m5602_read_sensor(sd, address, value, 2);
- info("register 0x%x contains 0x%x%x",
- address, value[0], value[1]);
+ pr_info("register 0x%x contains 0x%x%x\n",
+ address, value[0], value[1]);
}
- info("mt9m111 register state dump complete");
+ pr_info("mt9m111 register state dump complete\n");
}
diff --git a/drivers/media/video/gspca/m5602/m5602_ov7660.c b/drivers/media/video/gspca/m5602/m5602_ov7660.c
index b12f60464b3..9a14835c128 100644
--- a/drivers/media/video/gspca/m5602/m5602_ov7660.c
+++ b/drivers/media/video/gspca/m5602/m5602_ov7660.c
@@ -16,6 +16,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include "m5602_ov7660.h"
static int ov7660_get_gain(struct gspca_dev *gspca_dev, __s32 *val);
@@ -149,7 +151,7 @@ int ov7660_probe(struct sd *sd)
if (force_sensor) {
if (force_sensor == OV7660_SENSOR) {
- info("Forcing an %s sensor", ov7660.name);
+ pr_info("Forcing an %s sensor\n", ov7660.name);
goto sensor_found;
}
/* If we want to force another sensor,
@@ -180,10 +182,10 @@ int ov7660_probe(struct sd *sd)
if (m5602_read_sensor(sd, OV7660_VER, &ver_id, 1))
return -ENODEV;
- info("Sensor reported 0x%x%x", prod_id, ver_id);
+ pr_info("Sensor reported 0x%x%x\n", prod_id, ver_id);
if ((prod_id == 0x76) && (ver_id == 0x60)) {
- info("Detected a ov7660 sensor");
+ pr_info("Detected a ov7660 sensor\n");
goto sensor_found;
}
return -ENODEV;
@@ -457,17 +459,16 @@ static int ov7660_set_vflip(struct gspca_dev *gspca_dev, __s32 val)
static void ov7660_dump_registers(struct sd *sd)
{
int address;
- info("Dumping the ov7660 register state");
+ pr_info("Dumping the ov7660 register state\n");
for (address = 0; address < 0xa9; address++) {
u8 value;
m5602_read_sensor(sd, address, &value, 1);
- info("register 0x%x contains 0x%x",
- address, value);
+ pr_info("register 0x%x contains 0x%x\n", address, value);
}
- info("ov7660 register state dump complete");
+ pr_info("ov7660 register state dump complete\n");
- info("Probing for which registers that are read/write");
+ pr_info("Probing for which registers that are read/write\n");
for (address = 0; address < 0xff; address++) {
u8 old_value, ctrl_value;
u8 test_value[2] = {0xff, 0xff};
@@ -477,9 +478,9 @@ static void ov7660_dump_registers(struct sd *sd)
m5602_read_sensor(sd, address, &ctrl_value, 1);
if (ctrl_value == test_value[0])
- info("register 0x%x is writeable", address);
+ pr_info("register 0x%x is writeable\n", address);
else
- info("register 0x%x is read only", address);
+ pr_info("register 0x%x is read only\n", address);
/* Restore original value */
m5602_write_sensor(sd, address, &old_value, 1);
diff --git a/drivers/media/video/gspca/m5602/m5602_ov9650.c b/drivers/media/video/gspca/m5602/m5602_ov9650.c
index 703d48670a2..2114a8b90ec 100644
--- a/drivers/media/video/gspca/m5602/m5602_ov9650.c
+++ b/drivers/media/video/gspca/m5602/m5602_ov9650.c
@@ -16,6 +16,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include "m5602_ov9650.h"
static int ov9650_set_exposure(struct gspca_dev *gspca_dev, __s32 val);
@@ -299,7 +301,7 @@ int ov9650_probe(struct sd *sd)
if (force_sensor) {
if (force_sensor == OV9650_SENSOR) {
- info("Forcing an %s sensor", ov9650.name);
+ pr_info("Forcing an %s sensor\n", ov9650.name);
goto sensor_found;
}
/* If we want to force another sensor,
@@ -330,7 +332,7 @@ int ov9650_probe(struct sd *sd)
return -ENODEV;
if ((prod_id == 0x96) && (ver_id == 0x52)) {
- info("Detected an ov9650 sensor");
+ pr_info("Detected an ov9650 sensor\n");
goto sensor_found;
}
return -ENODEV;
@@ -850,17 +852,16 @@ static int ov9650_set_auto_gain(struct gspca_dev *gspca_dev, __s32 val)
static void ov9650_dump_registers(struct sd *sd)
{
int address;
- info("Dumping the ov9650 register state");
+ pr_info("Dumping the ov9650 register state\n");
for (address = 0; address < 0xa9; address++) {
u8 value;
m5602_read_sensor(sd, address, &value, 1);
- info("register 0x%x contains 0x%x",
- address, value);
+ pr_info("register 0x%x contains 0x%x\n", address, value);
}
- info("ov9650 register state dump complete");
+ pr_info("ov9650 register state dump complete\n");
- info("Probing for which registers that are read/write");
+ pr_info("Probing for which registers that are read/write\n");
for (address = 0; address < 0xff; address++) {
u8 old_value, ctrl_value;
u8 test_value[2] = {0xff, 0xff};
@@ -870,9 +871,9 @@ static void ov9650_dump_registers(struct sd *sd)
m5602_read_sensor(sd, address, &ctrl_value, 1);
if (ctrl_value == test_value[0])
- info("register 0x%x is writeable", address);
+ pr_info("register 0x%x is writeable\n", address);
else
- info("register 0x%x is read only", address);
+ pr_info("register 0x%x is read only\n", address);
/* Restore original value */
m5602_write_sensor(sd, address, &old_value, 1);
diff --git a/drivers/media/video/gspca/m5602/m5602_po1030.c b/drivers/media/video/gspca/m5602/m5602_po1030.c
index 1febd34c2f0..b8771698cbc 100644
--- a/drivers/media/video/gspca/m5602/m5602_po1030.c
+++ b/drivers/media/video/gspca/m5602/m5602_po1030.c
@@ -16,6 +16,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include "m5602_po1030.h"
static int po1030_get_exposure(struct gspca_dev *gspca_dev, __s32 *val);
@@ -197,7 +199,7 @@ int po1030_probe(struct sd *sd)
if (force_sensor) {
if (force_sensor == PO1030_SENSOR) {
- info("Forcing a %s sensor", po1030.name);
+ pr_info("Forcing a %s sensor\n", po1030.name);
goto sensor_found;
}
/* If we want to force another sensor, don't try to probe this
@@ -221,7 +223,7 @@ int po1030_probe(struct sd *sd)
return -ENODEV;
if (dev_id_h == 0x30) {
- info("Detected a po1030 sensor");
+ pr_info("Detected a po1030 sensor\n");
goto sensor_found;
}
return -ENODEV;
@@ -267,7 +269,7 @@ int po1030_init(struct sd *sd)
break;
default:
- info("Invalid stream command, exiting init");
+ pr_info("Invalid stream command, exiting init\n");
return -EINVAL;
}
}
@@ -733,16 +735,15 @@ static void po1030_dump_registers(struct sd *sd)
int address;
u8 value = 0;
- info("Dumping the po1030 sensor core registers");
+ pr_info("Dumping the po1030 sensor core registers\n");
for (address = 0; address < 0x7f; address++) {
m5602_read_sensor(sd, address, &value, 1);
- info("register 0x%x contains 0x%x",
- address, value);
+ pr_info("register 0x%x contains 0x%x\n", address, value);
}
- info("po1030 register state dump complete");
+ pr_info("po1030 register state dump complete\n");
- info("Probing for which registers that are read/write");
+ pr_info("Probing for which registers that are read/write\n");
for (address = 0; address < 0xff; address++) {
u8 old_value, ctrl_value;
u8 test_value[2] = {0xff, 0xff};
@@ -752,9 +753,9 @@ static void po1030_dump_registers(struct sd *sd)
m5602_read_sensor(sd, address, &ctrl_value, 1);
if (ctrl_value == test_value[0])
- info("register 0x%x is writeable", address);
+ pr_info("register 0x%x is writeable\n", address);
else
- info("register 0x%x is read only", address);
+ pr_info("register 0x%x is read only\n", address);
/* Restore original value */
m5602_write_sensor(sd, address, &old_value, 1);
diff --git a/drivers/media/video/gspca/m5602/m5602_s5k4aa.c b/drivers/media/video/gspca/m5602/m5602_s5k4aa.c
index d27280be985..cc8ec3f7e8d 100644
--- a/drivers/media/video/gspca/m5602/m5602_s5k4aa.c
+++ b/drivers/media/video/gspca/m5602/m5602_s5k4aa.c
@@ -16,6 +16,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include "m5602_s5k4aa.h"
static int s5k4aa_get_exposure(struct gspca_dev *gspca_dev, __s32 *val);
@@ -240,7 +242,7 @@ int s5k4aa_probe(struct sd *sd)
if (force_sensor) {
if (force_sensor == S5K4AA_SENSOR) {
- info("Forcing a %s sensor", s5k4aa.name);
+ pr_info("Forcing a %s sensor\n", s5k4aa.name);
goto sensor_found;
}
/* If we want to force another sensor, don't try to probe this
@@ -276,7 +278,7 @@ int s5k4aa_probe(struct sd *sd)
data, 2);
break;
default:
- info("Invalid stream command, exiting init");
+ pr_info("Invalid stream command, exiting init\n");
return -EINVAL;
}
}
@@ -292,7 +294,7 @@ int s5k4aa_probe(struct sd *sd)
if (memcmp(prod_id, expected_prod_id, sizeof(prod_id)))
return -ENODEV;
else
- info("Detected a s5k4aa sensor");
+ pr_info("Detected a s5k4aa sensor\n");
sensor_found:
sensor_settings = kmalloc(
@@ -347,7 +349,7 @@ int s5k4aa_start(struct sd *sd)
break;
default:
- err("Invalid stream command, exiting init");
+ pr_err("Invalid stream command, exiting init\n");
return -EINVAL;
}
}
@@ -383,7 +385,7 @@ int s5k4aa_start(struct sd *sd)
break;
default:
- err("Invalid stream command, exiting init");
+ pr_err("Invalid stream command, exiting init\n");
return -EINVAL;
}
}
@@ -447,7 +449,7 @@ int s5k4aa_init(struct sd *sd)
init_s5k4aa[i][1], data, 2);
break;
default:
- info("Invalid stream command, exiting init");
+ pr_info("Invalid stream command, exiting init\n");
return -EINVAL;
}
}
@@ -686,20 +688,21 @@ static void s5k4aa_dump_registers(struct sd *sd)
m5602_read_sensor(sd, S5K4AA_PAGE_MAP, &old_page, 1);
for (page = 0; page < 16; page++) {
m5602_write_sensor(sd, S5K4AA_PAGE_MAP, &page, 1);
- info("Dumping the s5k4aa register state for page 0x%x", page);
+ pr_info("Dumping the s5k4aa register state for page 0x%x\n",
+ page);
for (address = 0; address <= 0xff; address++) {
u8 value = 0;
m5602_read_sensor(sd, address, &value, 1);
- info("register 0x%x contains 0x%x",
- address, value);
+ pr_info("register 0x%x contains 0x%x\n",
+ address, value);
}
}
- info("s5k4aa register state dump complete");
+ pr_info("s5k4aa register state dump complete\n");
for (page = 0; page < 16; page++) {
m5602_write_sensor(sd, S5K4AA_PAGE_MAP, &page, 1);
- info("Probing for which registers that are "
- "read/write for page 0x%x", page);
+ pr_info("Probing for which registers that are read/write for page 0x%x\n",
+ page);
for (address = 0; address <= 0xff; address++) {
u8 old_value, ctrl_value, test_value = 0xff;
@@ -708,14 +711,16 @@ static void s5k4aa_dump_registers(struct sd *sd)
m5602_read_sensor(sd, address, &ctrl_value, 1);
if (ctrl_value == test_value)
- info("register 0x%x is writeable", address);
+ pr_info("register 0x%x is writeable\n",
+ address);
else
- info("register 0x%x is read only", address);
+ pr_info("register 0x%x is read only\n",
+ address);
/* Restore original value */
m5602_write_sensor(sd, address, &old_value, 1);
}
}
- info("Read/write register probing complete");
+ pr_info("Read/write register probing complete\n");
m5602_write_sensor(sd, S5K4AA_PAGE_MAP, &old_page, 1);
}
diff --git a/drivers/media/video/gspca/m5602/m5602_s5k83a.c b/drivers/media/video/gspca/m5602/m5602_s5k83a.c
index fbd91545497..1de743a02b0 100644
--- a/drivers/media/video/gspca/m5602/m5602_s5k83a.c
+++ b/drivers/media/video/gspca/m5602/m5602_s5k83a.c
@@ -16,6 +16,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kthread.h>
#include "m5602_s5k83a.h"
@@ -135,7 +137,7 @@ int s5k83a_probe(struct sd *sd)
if (force_sensor) {
if (force_sensor == S5K83A_SENSOR) {
- info("Forcing a %s sensor", s5k83a.name);
+ pr_info("Forcing a %s sensor\n", s5k83a.name);
goto sensor_found;
}
/* If we want to force another sensor, don't try to probe this
@@ -168,7 +170,7 @@ int s5k83a_probe(struct sd *sd)
if ((prod_id == 0xff) || (ver_id == 0xff))
return -ENODEV;
else
- info("Detected a s5k83a sensor");
+ pr_info("Detected a s5k83a sensor\n");
sensor_found:
sens_priv = kmalloc(
@@ -227,7 +229,7 @@ int s5k83a_init(struct sd *sd)
init_s5k83a[i][1], data, 2);
break;
default:
- info("Invalid stream command, exiting init");
+ pr_info("Invalid stream command, exiting init\n");
return -EINVAL;
}
}
@@ -273,7 +275,7 @@ static int rotation_thread_function(void *data)
s5k83a_get_rotation(sd, &reg);
if (previous_rotation != reg) {
previous_rotation = reg;
- info("Camera was flipped");
+ pr_info("Camera was flipped\n");
s5k83a_get_vflip((struct gspca_dev *) sd, &vflip);
s5k83a_get_hflip((struct gspca_dev *) sd, &hflip);
@@ -566,20 +568,20 @@ static void s5k83a_dump_registers(struct sd *sd)
for (page = 0; page < 16; page++) {
m5602_write_sensor(sd, S5K83A_PAGE_MAP, &page, 1);
- info("Dumping the s5k83a register state for page 0x%x", page);
+ pr_info("Dumping the s5k83a register state for page 0x%x\n",
+ page);
for (address = 0; address <= 0xff; address++) {
u8 val = 0;
m5602_read_sensor(sd, address, &val, 1);
- info("register 0x%x contains 0x%x",
- address, val);
+ pr_info("register 0x%x contains 0x%x\n", address, val);
}
}
- info("s5k83a register state dump complete");
+ pr_info("s5k83a register state dump complete\n");
for (page = 0; page < 16; page++) {
m5602_write_sensor(sd, S5K83A_PAGE_MAP, &page, 1);
- info("Probing for which registers that are read/write "
- "for page 0x%x", page);
+ pr_info("Probing for which registers that are read/write for page 0x%x\n",
+ page);
for (address = 0; address <= 0xff; address++) {
u8 old_val, ctrl_val, test_val = 0xff;
@@ -588,14 +590,16 @@ static void s5k83a_dump_registers(struct sd *sd)
m5602_read_sensor(sd, address, &ctrl_val, 1);
if (ctrl_val == test_val)
- info("register 0x%x is writeable", address);
+ pr_info("register 0x%x is writeable\n",
+ address);
else
- info("register 0x%x is read only", address);
+ pr_info("register 0x%x is read only\n",
+ address);
/* Restore original val */
m5602_write_sensor(sd, address, &old_val, 1);
}
}
- info("Read/write register probing complete");
+ pr_info("Read/write register probing complete\n");
m5602_write_sensor(sd, S5K83A_PAGE_MAP, &old_page, 1);
}
diff --git a/drivers/media/video/gspca/mars.c b/drivers/media/video/gspca/mars.c
index 0196209a948..ef45fa57575 100644
--- a/drivers/media/video/gspca/mars.c
+++ b/drivers/media/video/gspca/mars.c
@@ -19,6 +19,8 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#define MODULE_NAME "mars"
#include "gspca.h"
@@ -178,8 +180,8 @@ static void reg_w(struct gspca_dev *gspca_dev,
&alen,
500); /* timeout in milliseconds */
if (ret < 0) {
- err("reg write [%02x] error %d",
- gspca_dev->usb_buf[0], ret);
+ pr_err("reg write [%02x] error %d\n",
+ gspca_dev->usb_buf[0], ret);
gspca_dev->usb_err = ret;
}
}
diff --git a/drivers/media/video/gspca/mr97310a.c b/drivers/media/video/gspca/mr97310a.c
index 97e50796743..473e813b680 100644
--- a/drivers/media/video/gspca/mr97310a.c
+++ b/drivers/media/video/gspca/mr97310a.c
@@ -40,6 +40,8 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#define MODULE_NAME "mr97310a"
#include "gspca.h"
@@ -267,7 +269,7 @@ static int mr_write(struct gspca_dev *gspca_dev, int len)
usb_sndbulkpipe(gspca_dev->dev, 4),
gspca_dev->usb_buf, len, NULL, 500);
if (rc < 0)
- err("reg write [%02x] error %d",
+ pr_err("reg write [%02x] error %d\n",
gspca_dev->usb_buf[0], rc);
return rc;
}
@@ -281,7 +283,7 @@ static int mr_read(struct gspca_dev *gspca_dev, int len)
usb_rcvbulkpipe(gspca_dev->dev, 3),
gspca_dev->usb_buf, len, NULL, 500);
if (rc < 0)
- err("reg read [%02x] error %d",
+ pr_err("reg read [%02x] error %d\n",
gspca_dev->usb_buf[0], rc);
return rc;
}
@@ -540,7 +542,7 @@ static int sd_config(struct gspca_dev *gspca_dev,
sd->sensor_type = 1;
break;
default:
- err("Unknown CIF Sensor id : %02x",
+ pr_err("Unknown CIF Sensor id : %02x\n",
gspca_dev->usb_buf[1]);
return -ENODEV;
}
@@ -575,10 +577,10 @@ static int sd_config(struct gspca_dev *gspca_dev,
sd->sensor_type = 2;
} else if ((gspca_dev->usb_buf[0] != 0x03) &&
(gspca_dev->usb_buf[0] != 0x04)) {
- err("Unknown VGA Sensor id Byte 0: %02x",
- gspca_dev->usb_buf[0]);
- err("Defaults assumed, may not work");
- err("Please report this");
+ pr_err("Unknown VGA Sensor id Byte 0: %02x\n",
+ gspca_dev->usb_buf[0]);
+ pr_err("Defaults assumed, may not work\n");
+ pr_err("Please report this\n");
}
/* Sakar Digital color needs to be adjusted. */
if ((gspca_dev->usb_buf[0] == 0x03) &&
@@ -595,10 +597,10 @@ static int sd_config(struct gspca_dev *gspca_dev,
/* Nothing to do here. */
break;
default:
- err("Unknown VGA Sensor id Byte 1: %02x",
- gspca_dev->usb_buf[1]);
- err("Defaults assumed, may not work");
- err("Please report this");
+ pr_err("Unknown VGA Sensor id Byte 1: %02x\n",
+ gspca_dev->usb_buf[1]);
+ pr_err("Defaults assumed, may not work\n");
+ pr_err("Please report this\n");
}
}
PDEBUG(D_PROBE, "MR97310A VGA camera detected, sensor: %d",
diff --git a/drivers/media/video/gspca/nw80x.c b/drivers/media/video/gspca/nw80x.c
index 8e754fd4dc5..7681814e594 100644
--- a/drivers/media/video/gspca/nw80x.c
+++ b/drivers/media/video/gspca/nw80x.c
@@ -20,6 +20,8 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#define MODULE_NAME "nw80x"
#include "gspca.h"
@@ -1571,7 +1573,7 @@ static void reg_w(struct gspca_dev *gspca_dev,
len,
500);
if (ret < 0) {
- err("reg_w err %d", ret);
+ pr_err("reg_w err %d\n", ret);
gspca_dev->usb_err = ret;
}
}
@@ -1592,7 +1594,7 @@ static void reg_r(struct gspca_dev *gspca_dev,
0x00, index,
gspca_dev->usb_buf, len, 500);
if (ret < 0) {
- err("reg_r err %d", ret);
+ pr_err("reg_r err %d\n", ret);
gspca_dev->usb_err = ret;
return;
}
@@ -1802,7 +1804,8 @@ static int sd_config(struct gspca_dev *gspca_dev,
}
}
if (webcam_chip[sd->webcam] != sd->bridge) {
- err("Bad webcam type %d for NW80%d", sd->webcam, sd->bridge);
+ pr_err("Bad webcam type %d for NW80%d\n",
+ sd->webcam, sd->bridge);
gspca_dev->usb_err = -ENODEV;
return gspca_dev->usb_err;
}
diff --git a/drivers/media/video/gspca/ov519.c b/drivers/media/video/gspca/ov519.c
index 18305c89083..6a01b35a947 100644
--- a/drivers/media/video/gspca/ov519.c
+++ b/drivers/media/video/gspca/ov519.c
@@ -36,6 +36,9 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#define MODULE_NAME "ov519"
#include <linux/input.h>
@@ -2171,7 +2174,7 @@ static void reg_w(struct sd *sd, u16 index, u16 value)
sd->gspca_dev.usb_buf, 1, 500);
leave:
if (ret < 0) {
- err("reg_w %02x failed %d", index, ret);
+ pr_err("reg_w %02x failed %d\n", index, ret);
sd->gspca_dev.usb_err = ret;
return;
}
@@ -2210,7 +2213,7 @@ static int reg_r(struct sd *sd, u16 index)
PDEBUG(D_USBI, "GET %02x 0000 %04x %02x",
req, index, ret);
} else {
- err("reg_r %02x failed %d", index, ret);
+ pr_err("reg_r %02x failed %d\n", index, ret);
sd->gspca_dev.usb_err = ret;
}
@@ -2235,7 +2238,7 @@ static int reg_r8(struct sd *sd,
if (ret >= 0) {
ret = sd->gspca_dev.usb_buf[0];
} else {
- err("reg_r8 %02x failed %d", index, ret);
+ pr_err("reg_r8 %02x failed %d\n", index, ret);
sd->gspca_dev.usb_err = ret;
}
@@ -2288,7 +2291,7 @@ static void ov518_reg_w32(struct sd *sd, u16 index, u32 value, int n)
0, index,
sd->gspca_dev.usb_buf, n, 500);
if (ret < 0) {
- err("reg_w32 %02x failed %d", index, ret);
+ pr_err("reg_w32 %02x failed %d\n", index, ret);
sd->gspca_dev.usb_err = ret;
}
}
@@ -2457,7 +2460,7 @@ static void ovfx2_i2c_w(struct sd *sd, u8 reg, u8 value)
(u16) value, (u16) reg, NULL, 0, 500);
if (ret < 0) {
- err("ovfx2_i2c_w %02x failed %d", reg, ret);
+ pr_err("ovfx2_i2c_w %02x failed %d\n", reg, ret);
sd->gspca_dev.usb_err = ret;
}
@@ -2481,7 +2484,7 @@ static int ovfx2_i2c_r(struct sd *sd, u8 reg)
ret = sd->gspca_dev.usb_buf[0];
PDEBUG(D_USBI, "ovfx2_i2c_r %02x %02x", reg, ret);
} else {
- err("ovfx2_i2c_r %02x failed %d", reg, ret);
+ pr_err("ovfx2_i2c_r %02x failed %d\n", reg, ret);
sd->gspca_dev.usb_err = ret;
}
@@ -2727,7 +2730,7 @@ static void ov_hires_configure(struct sd *sd)
int high, low;
if (sd->bridge != BRIDGE_OVFX2) {
- err("error hires sensors only supported with ovfx2");
+ pr_err("error hires sensors only supported with ovfx2\n");
return;
}
@@ -2762,7 +2765,7 @@ static void ov_hires_configure(struct sd *sd)
}
break;
}
- err("Error unknown sensor type: %02x%02x", high, low);
+ pr_err("Error unknown sensor type: %02x%02x\n", high, low);
}
/* This initializes the OV8110, OV8610 sensor. The OV8110 uses
@@ -2783,7 +2786,7 @@ static void ov8xx0_configure(struct sd *sd)
if ((rc & 3) == 1)
sd->sensor = SEN_OV8610;
else
- err("Unknown image sensor version: %d", rc & 3);
+ pr_err("Unknown image sensor version: %d\n", rc & 3);
}
/* This initializes the OV7610, OV7620, or OV76BE sensor. The OV76BE uses
@@ -2840,8 +2843,8 @@ static void ov7xx0_configure(struct sd *sd)
if (high == 0x76) {
switch (low) {
case 0x30:
- err("Sensor is an OV7630/OV7635");
- err("7630 is not supported by this driver");
+ pr_err("Sensor is an OV7630/OV7635\n");
+ pr_err("7630 is not supported by this driver\n");
return;
case 0x40:
PDEBUG(D_PROBE, "Sensor is an OV7645");
@@ -2868,7 +2871,7 @@ static void ov7xx0_configure(struct sd *sd)
sd->sensor = SEN_OV7620;
}
} else {
- err("Unknown image sensor version: %d", rc & 3);
+ pr_err("Unknown image sensor version: %d\n", rc & 3);
}
}
@@ -2891,8 +2894,7 @@ static void ov6xx0_configure(struct sd *sd)
switch (rc) {
case 0x00:
sd->sensor = SEN_OV6630;
- warn("WARNING: Sensor is an OV66308. Your camera may have");
- warn("been misdetected in previous driver versions.");
+ pr_warn("WARNING: Sensor is an OV66308. Your camera may have been misdetected in previous driver versions.\n");
break;
case 0x01:
sd->sensor = SEN_OV6620;
@@ -2908,11 +2910,10 @@ static void ov6xx0_configure(struct sd *sd)
break;
case 0x90:
sd->sensor = SEN_OV6630;
- warn("WARNING: Sensor is an OV66307. Your camera may have");
- warn("been misdetected in previous driver versions.");
+ pr_warn("WARNING: Sensor is an OV66307. Your camera may have been misdetected in previous driver versions.\n");
break;
default:
- err("FATAL: Unknown sensor version: 0x%02x", rc);
+ pr_err("FATAL: Unknown sensor version: 0x%02x\n", rc);
return;
}
@@ -3405,7 +3406,7 @@ static int sd_init(struct gspca_dev *gspca_dev)
} else if (init_ov_sensor(sd, OV_HIRES_SID) >= 0) {
ov_hires_configure(sd);
} else {
- err("Can't determine sensor slave IDs");
+ pr_err("Can't determine sensor slave IDs\n");
goto error;
}
@@ -3590,7 +3591,7 @@ static void ov511_mode_init_regs(struct sd *sd)
intf = usb_ifnum_to_if(sd->gspca_dev.dev, sd->gspca_dev.iface);
alt = usb_altnum_to_altsetting(intf, sd->gspca_dev.alt);
if (!alt) {
- err("Couldn't get altsetting");
+ pr_err("Couldn't get altsetting\n");
sd->gspca_dev.usb_err = -EIO;
return;
}
@@ -3713,7 +3714,7 @@ static void ov518_mode_init_regs(struct sd *sd)
intf = usb_ifnum_to_if(sd->gspca_dev.dev, sd->gspca_dev.iface);
alt = usb_altnum_to_altsetting(intf, sd->gspca_dev.alt);
if (!alt) {
- err("Couldn't get altsetting");
+ pr_err("Couldn't get altsetting\n");
sd->gspca_dev.usb_err = -EIO;
return;
}
diff --git a/drivers/media/video/gspca/ov534.c b/drivers/media/video/gspca/ov534.c
index 0c6369b7fe1..76907eced4a 100644
--- a/drivers/media/video/gspca/ov534.c
+++ b/drivers/media/video/gspca/ov534.c
@@ -28,6 +28,8 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#define MODULE_NAME "ov534"
#include "gspca.h"
@@ -775,7 +777,7 @@ static void ov534_reg_write(struct gspca_dev *gspca_dev, u16 reg, u8 val)
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0x00, reg, gspca_dev->usb_buf, 1, CTRL_TIMEOUT);
if (ret < 0) {
- err("write failed %d", ret);
+ pr_err("write failed %d\n", ret);
gspca_dev->usb_err = ret;
}
}
@@ -794,7 +796,7 @@ static u8 ov534_reg_read(struct gspca_dev *gspca_dev, u16 reg)
0x00, reg, gspca_dev->usb_buf, 1, CTRL_TIMEOUT);
PDEBUG(D_USBI, "GET 01 0000 %04x %02x", reg, gspca_dev->usb_buf[0]);
if (ret < 0) {
- err("read failed %d", ret);
+ pr_err("read failed %d\n", ret);
gspca_dev->usb_err = ret;
}
return gspca_dev->usb_buf[0];
@@ -858,7 +860,7 @@ static void sccb_reg_write(struct gspca_dev *gspca_dev, u8 reg, u8 val)
ov534_reg_write(gspca_dev, OV534_REG_OPERATION, OV534_OP_WRITE_3);
if (!sccb_check_status(gspca_dev)) {
- err("sccb_reg_write failed");
+ pr_err("sccb_reg_write failed\n");
gspca_dev->usb_err = -EIO;
}
}
@@ -868,11 +870,11 @@ static u8 sccb_reg_read(struct gspca_dev *gspca_dev, u16 reg)
ov534_reg_write(gspca_dev, OV534_REG_SUBADDR, reg);
ov534_reg_write(gspca_dev, OV534_REG_OPERATION, OV534_OP_WRITE_2);
if (!sccb_check_status(gspca_dev))
- err("sccb_reg_read failed 1");
+ pr_err("sccb_reg_read failed 1\n");
ov534_reg_write(gspca_dev, OV534_REG_OPERATION, OV534_OP_READ_2);
if (!sccb_check_status(gspca_dev))
- err("sccb_reg_read failed 2");
+ pr_err("sccb_reg_read failed 2\n");
return ov534_reg_read(gspca_dev, OV534_REG_READ);
}
diff --git a/drivers/media/video/gspca/ov534_9.c b/drivers/media/video/gspca/ov534_9.c
index aaf5428c57f..b3b1ea60a84 100644
--- a/drivers/media/video/gspca/ov534_9.c
+++ b/drivers/media/video/gspca/ov534_9.c
@@ -1,7 +1,7 @@
/*
- * ov534-ov965x gspca driver
+ * ov534-ov9xxx gspca driver
*
- * Copyright (C) 2009-2010 Jean-Francois Moine http://moinejf.free.fr
+ * Copyright (C) 2009-2011 Jean-Francois Moine http://moinejf.free.fr
* Copyright (C) 2008 Antonio Ospite <ospite@studenti.unina.it>
* Copyright (C) 2008 Jim Paris <jim@jtan.com>
*
@@ -24,6 +24,8 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#define MODULE_NAME "ov534_9"
#include "gspca.h"
@@ -45,39 +47,44 @@ MODULE_AUTHOR("Jean-Francois Moine <moinejf@free.fr>");
MODULE_DESCRIPTION("GSPCA/OV534_9 USB Camera Driver");
MODULE_LICENSE("GPL");
+/* controls */
+enum e_ctrl {
+ BRIGHTNESS,
+ CONTRAST,
+ AUTOGAIN,
+ EXPOSURE,
+ SHARPNESS,
+ SATUR,
+ LIGHTFREQ,
+ NCTRLS /* number of controls */
+};
+
/* specific webcam descriptor */
struct sd {
struct gspca_dev gspca_dev; /* !! must be the first item */
+ struct gspca_ctrl ctrls[NCTRLS];
__u32 last_pts;
u8 last_fid;
- u8 brightness;
- u8 contrast;
- u8 autogain;
- u8 exposure;
- s8 sharpness;
- u8 satur;
- u8 freq;
+ u8 sensor;
+};
+enum sensors {
+ SENSOR_OV965x, /* ov9657 */
+ SENSOR_OV971x, /* ov9712 */
+ NSENSORS
};
/* V4L2 controls supported by the driver */
-static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setautogain(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getautogain(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setexposure(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getexposure(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setsharpness(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getsharpness(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setsatur(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getsatur(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setfreq(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getfreq(struct gspca_dev *gspca_dev, __s32 *val);
-
-static const struct ctrl sd_ctrls[] = {
- { /* 0 */
+static void setbrightness(struct gspca_dev *gspca_dev);
+static void setcontrast(struct gspca_dev *gspca_dev);
+static void setautogain(struct gspca_dev *gspca_dev);
+static void setexposure(struct gspca_dev *gspca_dev);
+static void setsharpness(struct gspca_dev *gspca_dev);
+static void setsatur(struct gspca_dev *gspca_dev);
+static void setlightfreq(struct gspca_dev *gspca_dev);
+
+static const struct ctrl sd_ctrls[NCTRLS] = {
+[BRIGHTNESS] = {
{
.id = V4L2_CID_BRIGHTNESS,
.type = V4L2_CTRL_TYPE_INTEGER,
@@ -85,13 +92,11 @@ static const struct ctrl sd_ctrls[] = {
.minimum = 0,
.maximum = 15,
.step = 1,
-#define BRIGHTNESS_DEF 7
- .default_value = BRIGHTNESS_DEF,
+ .default_value = 7
},
- .set = sd_setbrightness,
- .get = sd_getbrightness,
+ .set_control = setbrightness
},
- { /* 1 */
+[CONTRAST] = {
{
.id = V4L2_CID_CONTRAST,
.type = V4L2_CTRL_TYPE_INTEGER,
@@ -99,13 +104,11 @@ static const struct ctrl sd_ctrls[] = {
.minimum = 0,
.maximum = 15,
.step = 1,
-#define CONTRAST_DEF 3
- .default_value = CONTRAST_DEF,
+ .default_value = 3
},
- .set = sd_setcontrast,
- .get = sd_getcontrast,
+ .set_control = setcontrast
},
- { /* 2 */
+[AUTOGAIN] = {
{
.id = V4L2_CID_AUTOGAIN,
.type = V4L2_CTRL_TYPE_BOOLEAN,
@@ -116,11 +119,9 @@ static const struct ctrl sd_ctrls[] = {
#define AUTOGAIN_DEF 1
.default_value = AUTOGAIN_DEF,
},
- .set = sd_setautogain,
- .get = sd_getautogain,
+ .set_control = setautogain
},
-#define EXPO_IDX 3
- { /* 3 */
+[EXPOSURE] = {
{
.id = V4L2_CID_EXPOSURE,
.type = V4L2_CTRL_TYPE_INTEGER,
@@ -128,13 +129,11 @@ static const struct ctrl sd_ctrls[] = {
.minimum = 0,
.maximum = 3,
.step = 1,
-#define EXPO_DEF 0
- .default_value = EXPO_DEF,
+ .default_value = 0
},
- .set = sd_setexposure,
- .get = sd_getexposure,
+ .set_control = setexposure
},
- { /* 4 */
+[SHARPNESS] = {
{
.id = V4L2_CID_SHARPNESS,
.type = V4L2_CTRL_TYPE_INTEGER,
@@ -142,13 +141,11 @@ static const struct ctrl sd_ctrls[] = {
.minimum = -1, /* -1 = auto */
.maximum = 4,
.step = 1,
-#define SHARPNESS_DEF -1
- .default_value = SHARPNESS_DEF,
+ .default_value = -1
},
- .set = sd_setsharpness,
- .get = sd_getsharpness,
+ .set_control = setsharpness
},
- { /* 5 */
+[SATUR] = {
{
.id = V4L2_CID_SATURATION,
.type = V4L2_CTRL_TYPE_INTEGER,
@@ -156,13 +153,11 @@ static const struct ctrl sd_ctrls[] = {
.minimum = 0,
.maximum = 4,
.step = 1,
-#define SATUR_DEF 2
- .default_value = SATUR_DEF,
+ .default_value = 2
},
- .set = sd_setsatur,
- .get = sd_getsatur,
+ .set_control = setsatur
},
- {
+[LIGHTFREQ] = {
{
.id = V4L2_CID_POWER_LINE_FREQUENCY,
.type = V4L2_CTRL_TYPE_MENU,
@@ -170,11 +165,9 @@ static const struct ctrl sd_ctrls[] = {
.minimum = 0,
.maximum = 2, /* 0: 0, 1: 50Hz, 2:60Hz */
.step = 1,
-#define FREQ_DEF 0
- .default_value = FREQ_DEF,
+ .default_value = 0
},
- .set = sd_setfreq,
- .get = sd_getfreq,
+ .set_control = setlightfreq
},
};
@@ -206,6 +199,14 @@ static const struct v4l2_pix_format ov965x_mode[] = {
.colorspace = V4L2_COLORSPACE_JPEG},
};
+static const struct v4l2_pix_format ov971x_mode[] = {
+ {640, 480, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
+ .bytesperline = 640,
+ .sizeimage = 640 * 480,
+ .colorspace = V4L2_COLORSPACE_SRGB
+ }
+};
+
static const u8 bridge_init[][2] = {
{0x88, 0xf8},
{0x89, 0xff},
@@ -240,7 +241,7 @@ static const u8 bridge_init[][2] = {
{0x94, 0x11},
};
-static const u8 sensor_init[][2] = {
+static const u8 ov965x_init[][2] = {
{0x12, 0x80}, /* com7 - SSCB reset */
{0x00, 0x00}, /* gain */
{0x01, 0x80}, /* blue */
@@ -450,7 +451,7 @@ static const u8 bridge_init_2[][2] = {
{0x94, 0x11},
};
-static const u8 sensor_init_2[][2] = {
+static const u8 ov965x_init_2[][2] = {
{0x3b, 0xc4},
{0x1e, 0x04}, /* mvfp */
{0x13, 0xe0}, /* com8 */
@@ -492,7 +493,65 @@ static const u8 sensor_init_2[][2] = {
{0x13, 0xe7}, /* com8 - everything (AGC, AWB and AEC) */
};
-static const u8 sensor_start_1_vga[][2] = { /* same for qvga */
+static const u8 ov971x_init[][2] = {
+ {0x12, 0x80},
+ {0x09, 0x10},
+ {0x1e, 0x07},
+ {0x5f, 0x18},
+ {0x69, 0x04},
+ {0x65, 0x2a},
+ {0x68, 0x0a},
+ {0x39, 0x28},
+ {0x4d, 0x90},
+ {0xc1, 0x80},
+ {0x0c, 0x30},
+ {0x6d, 0x02},
+ {0x96, 0xf1},
+ {0xbc, 0x68},
+ {0x12, 0x00},
+ {0x3b, 0x00},
+ {0x97, 0x80},
+ {0x17, 0x25},
+ {0x18, 0xa2},
+ {0x19, 0x01},
+ {0x1a, 0xca},
+ {0x03, 0x0a},
+ {0x32, 0x07},
+ {0x98, 0x40}, /*{0x98, 0x00},*/
+ {0x99, 0xA0}, /*{0x99, 0x00},*/
+ {0x9a, 0x01}, /*{0x9a, 0x00},*/
+ {0x57, 0x00},
+ {0x58, 0x78}, /*{0x58, 0xc8},*/
+ {0x59, 0x50}, /*{0x59, 0xa0},*/
+ {0x4c, 0x13},
+ {0x4b, 0x36},
+ {0x3d, 0x3c},
+ {0x3e, 0x03},
+ {0xbd, 0x50}, /*{0xbd, 0xa0},*/
+ {0xbe, 0x78}, /*{0xbe, 0xc8},*/
+ {0x4e, 0x55},
+ {0x4f, 0x55},
+ {0x50, 0x55},
+ {0x51, 0x55},
+ {0x24, 0x55},
+ {0x25, 0x40},
+ {0x26, 0xa1},
+ {0x5c, 0x59},
+ {0x5d, 0x00},
+ {0x11, 0x00},
+ {0x2a, 0x98},
+ {0x2b, 0x06},
+ {0x2d, 0x00},
+ {0x2e, 0x00},
+ {0x13, 0xa5},
+ {0x14, 0x40},
+ {0x4a, 0x00},
+ {0x49, 0xce},
+ {0x22, 0x03},
+ {0x09, 0x00}
+};
+
+static const u8 ov965x_start_1_vga[][2] = { /* same for qvga */
{0x12, 0x62}, /* com7 - 30fps VGA YUV */
{0x36, 0xfa}, /* aref3 */
{0x69, 0x0a}, /* hv */
@@ -515,7 +574,7 @@ static const u8 sensor_start_1_vga[][2] = { /* same for qvga */
{0xc0, 0xaa},
};
-static const u8 sensor_start_1_svga[][2] = {
+static const u8 ov965x_start_1_svga[][2] = {
{0x12, 0x02}, /* com7 - YUYV - VGA 15 full resolution */
{0x36, 0xf8}, /* aref3 */
{0x69, 0x02}, /* hv */
@@ -537,7 +596,7 @@ static const u8 sensor_start_1_svga[][2] = {
{0xc0, 0xe2},
};
-static const u8 sensor_start_1_xga[][2] = {
+static const u8 ov965x_start_1_xga[][2] = {
{0x12, 0x02}, /* com7 */
{0x36, 0xf8}, /* aref3 */
{0x69, 0x02}, /* hv */
@@ -560,7 +619,7 @@ static const u8 sensor_start_1_xga[][2] = {
{0xc0, 0xe2},
};
-static const u8 sensor_start_1_sxga[][2] = {
+static const u8 ov965x_start_1_sxga[][2] = {
{0x12, 0x02}, /* com7 */
{0x36, 0xf8}, /* aref3 */
{0x69, 0x02}, /* hv */
@@ -709,7 +768,7 @@ static const u8 bridge_start_sxga[][2] = {
{0x94, 0x11},
};
-static const u8 sensor_start_2_qvga[][2] = {
+static const u8 ov965x_start_2_qvga[][2] = {
{0x3b, 0xe4}, /* com11 - night mode 1/4 frame rate */
{0x1e, 0x04}, /* mvfp */
{0x13, 0xe0}, /* com8 */
@@ -727,7 +786,7 @@ static const u8 sensor_start_2_qvga[][2] = {
{0x3a, 0x80}, /* tslb - yuyv */
};
-static const u8 sensor_start_2_vga[][2] = {
+static const u8 ov965x_start_2_vga[][2] = {
{0x3b, 0xc4}, /* com11 - night mode 1/4 frame rate */
{0x1e, 0x04}, /* mvfp */
{0x13, 0xe0}, /* com8 */
@@ -743,7 +802,7 @@ static const u8 sensor_start_2_vga[][2] = {
{0x2d, 0x00}, /* advfl */
};
-static const u8 sensor_start_2_svga[][2] = { /* same for xga */
+static const u8 ov965x_start_2_svga[][2] = { /* same for xga */
{0x3b, 0xc4}, /* com11 - night mode 1/4 frame rate */
{0x1e, 0x04}, /* mvfp */
{0x13, 0xe0}, /* com8 */
@@ -757,7 +816,7 @@ static const u8 sensor_start_2_svga[][2] = { /* same for xga */
{0xa3, 0x41}, /* bd60 */
};
-static const u8 sensor_start_2_sxga[][2] = {
+static const u8 ov965x_start_2_sxga[][2] = {
{0x13, 0xe0}, /* com8 */
{0x00, 0x00},
{0x13, 0xe7}, /* com8 - everything (AGC, AWB and AEC) */
@@ -785,7 +844,7 @@ static void reg_w_i(struct gspca_dev *gspca_dev, u16 reg, u8 val)
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0x00, reg, gspca_dev->usb_buf, 1, CTRL_TIMEOUT);
if (ret < 0) {
- err("reg_w failed %d", ret);
+ pr_err("reg_w failed %d\n", ret);
gspca_dev->usb_err = ret;
}
}
@@ -810,7 +869,7 @@ static u8 reg_r(struct gspca_dev *gspca_dev, u16 reg)
0x00, reg, gspca_dev->usb_buf, 1, CTRL_TIMEOUT);
PDEBUG(D_USBI, "reg_r [%04x] -> %02x", reg, gspca_dev->usb_buf[0]);
if (ret < 0) {
- err("reg_r err %d", ret);
+ pr_err("reg_r err %d\n", ret);
gspca_dev->usb_err = ret;
}
return gspca_dev->usb_buf[0];
@@ -848,7 +907,7 @@ static void sccb_write(struct gspca_dev *gspca_dev, u8 reg, u8 val)
reg_w_i(gspca_dev, OV534_REG_OPERATION, OV534_OP_WRITE_3);
if (!sccb_check_status(gspca_dev))
- err("sccb_write failed");
+ pr_err("sccb_write failed\n");
}
static u8 sccb_read(struct gspca_dev *gspca_dev, u16 reg)
@@ -856,11 +915,11 @@ static u8 sccb_read(struct gspca_dev *gspca_dev, u16 reg)
reg_w(gspca_dev, OV534_REG_SUBADDR, reg);
reg_w(gspca_dev, OV534_REG_OPERATION, OV534_OP_WRITE_2);
if (!sccb_check_status(gspca_dev))
- err("sccb_read failed 1");
+ pr_err("sccb_read failed 1\n");
reg_w(gspca_dev, OV534_REG_OPERATION, OV534_OP_READ_2);
if (!sccb_check_status(gspca_dev))
- err("sccb_read failed 2");
+ pr_err("sccb_read failed 2\n");
return reg_r(gspca_dev, OV534_REG_READ);
}
@@ -922,7 +981,9 @@ static void setbrightness(struct gspca_dev *gspca_dev)
struct sd *sd = (struct sd *) gspca_dev;
u8 val;
- val = sd->brightness;
+ if (gspca_dev->ctrl_dis & (1 << BRIGHTNESS))
+ return;
+ val = sd->ctrls[BRIGHTNESS].val;
if (val < 8)
val = 15 - val; /* f .. 8 */
else
@@ -935,8 +996,10 @@ static void setcontrast(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
+ if (gspca_dev->ctrl_dis & (1 << CONTRAST))
+ return;
sccb_write(gspca_dev, 0x56, /* cnst1 - contrast 1 ctrl coeff */
- sd->contrast << 4);
+ sd->ctrls[CONTRAST].val << 4);
}
static void setautogain(struct gspca_dev *gspca_dev)
@@ -944,10 +1007,12 @@ static void setautogain(struct gspca_dev *gspca_dev)
struct sd *sd = (struct sd *) gspca_dev;
u8 val;
+ if (gspca_dev->ctrl_dis & (1 << AUTOGAIN))
+ return;
/*fixme: should adjust agc/awb/aec by different controls */
val = sccb_read(gspca_dev, 0x13); /* com8 */
sccb_write(gspca_dev, 0xff, 0x00);
- if (sd->autogain)
+ if (sd->ctrls[AUTOGAIN].val)
val |= 0x05; /* agc & aec */
else
val &= 0xfa;
@@ -960,8 +1025,10 @@ static void setexposure(struct gspca_dev *gspca_dev)
u8 val;
static const u8 expo[4] = {0x00, 0x25, 0x38, 0x5e};
+ if (gspca_dev->ctrl_dis & (1 << EXPOSURE))
+ return;
sccb_write(gspca_dev, 0x10, /* aec[9:2] */
- expo[sd->exposure]);
+ expo[sd->ctrls[EXPOSURE].val]);
val = sccb_read(gspca_dev, 0x13); /* com8 */
sccb_write(gspca_dev, 0xff, 0x00);
@@ -977,7 +1044,9 @@ static void setsharpness(struct gspca_dev *gspca_dev)
struct sd *sd = (struct sd *) gspca_dev;
s8 val;
- val = sd->sharpness;
+ if (gspca_dev->ctrl_dis & (1 << SHARPNESS))
+ return;
+ val = sd->ctrls[SHARPNESS].val;
if (val < 0) { /* auto */
val = sccb_read(gspca_dev, 0x42); /* com17 */
sccb_write(gspca_dev, 0xff, 0x00);
@@ -1006,8 +1075,10 @@ static void setsatur(struct gspca_dev *gspca_dev)
{0x48, 0x90}
};
- val1 = matrix[sd->satur][0];
- val2 = matrix[sd->satur][1];
+ if (gspca_dev->ctrl_dis & (1 << SATUR))
+ return;
+ val1 = matrix[sd->ctrls[SATUR].val][0];
+ val2 = matrix[sd->ctrls[SATUR].val][1];
val3 = val1 + val2;
sccb_write(gspca_dev, 0x4f, val3); /* matrix coeff */
sccb_write(gspca_dev, 0x50, val3);
@@ -1022,14 +1093,16 @@ static void setsatur(struct gspca_dev *gspca_dev)
sccb_write(gspca_dev, 0x41, val1);
}
-static void setfreq(struct gspca_dev *gspca_dev)
+static void setlightfreq(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
u8 val;
+ if (gspca_dev->ctrl_dis & (1 << LIGHTFREQ))
+ return;
val = sccb_read(gspca_dev, 0x13); /* com8 */
sccb_write(gspca_dev, 0xff, 0x00);
- if (sd->freq == 0) {
+ if (sd->ctrls[LIGHTFREQ].val == 0) {
sccb_write(gspca_dev, 0x13, val & 0xdf);
return;
}
@@ -1037,7 +1110,7 @@ static void setfreq(struct gspca_dev *gspca_dev)
val = sccb_read(gspca_dev, 0x42); /* com17 */
sccb_write(gspca_dev, 0xff, 0x00);
- if (sd->freq == 1)
+ if (sd->ctrls[LIGHTFREQ].val == 1)
val |= 0x01;
else
val &= 0xfe;
@@ -1049,34 +1122,19 @@ static int sd_config(struct gspca_dev *gspca_dev,
const struct usb_device_id *id)
{
struct sd *sd = (struct sd *) gspca_dev;
- struct cam *cam;
-
- cam = &gspca_dev->cam;
- cam->cam_mode = ov965x_mode;
- cam->nmodes = ARRAY_SIZE(ov965x_mode);
+ gspca_dev->cam.ctrls = sd->ctrls;
- sd->brightness = BRIGHTNESS_DEF;
- sd->contrast = CONTRAST_DEF;
#if AUTOGAIN_DEF != 0
- sd->autogain = AUTOGAIN_DEF;
- gspca_dev->ctrl_inac |= (1 << EXPO_IDX);
-#endif
-#if EXPO_DEF != 0
- sd->exposure = EXPO_DEF;
-#endif
-#if SHARPNESS_DEF != 0
- sd->sharpness = SHARPNESS_DEF;
+ gspca_dev->ctrl_inac |= (1 << EXPOSURE);
#endif
- sd->satur = SATUR_DEF;
- sd->freq = FREQ_DEF;
-
return 0;
}
/* this function is called at probe and resume time */
static int sd_init(struct gspca_dev *gspca_dev)
{
+ struct sd *sd = (struct sd *) gspca_dev;
u16 sensor_id;
/* reset bridge */
@@ -1099,68 +1157,117 @@ static int sd_init(struct gspca_dev *gspca_dev)
PDEBUG(D_PROBE, "Sensor ID: %04x", sensor_id);
/* initialize */
- reg_w_array(gspca_dev, bridge_init,
- ARRAY_SIZE(bridge_init));
- sccb_w_array(gspca_dev, sensor_init,
- ARRAY_SIZE(sensor_init));
- reg_w_array(gspca_dev, bridge_init_2,
- ARRAY_SIZE(bridge_init_2));
- sccb_w_array(gspca_dev, sensor_init_2,
- ARRAY_SIZE(sensor_init_2));
- reg_w(gspca_dev, 0xe0, 0x00);
- reg_w(gspca_dev, 0xe0, 0x01);
- set_led(gspca_dev, 0);
- reg_w(gspca_dev, 0xe0, 0x00);
+ if ((sensor_id & 0xfff0) == 0x9650) {
+ sd->sensor = SENSOR_OV965x;
+
+ gspca_dev->cam.cam_mode = ov965x_mode;
+ gspca_dev->cam.nmodes = ARRAY_SIZE(ov965x_mode);
+
+ reg_w_array(gspca_dev, bridge_init,
+ ARRAY_SIZE(bridge_init));
+ sccb_w_array(gspca_dev, ov965x_init,
+ ARRAY_SIZE(ov965x_init));
+ reg_w_array(gspca_dev, bridge_init_2,
+ ARRAY_SIZE(bridge_init_2));
+ sccb_w_array(gspca_dev, ov965x_init_2,
+ ARRAY_SIZE(ov965x_init_2));
+ reg_w(gspca_dev, 0xe0, 0x00);
+ reg_w(gspca_dev, 0xe0, 0x01);
+ set_led(gspca_dev, 0);
+ reg_w(gspca_dev, 0xe0, 0x00);
+ } else if ((sensor_id & 0xfff0) == 0x9710) {
+ const char *p;
+ int l;
+
+ sd->sensor = SENSOR_OV971x;
+
+ gspca_dev->cam.cam_mode = ov971x_mode;
+ gspca_dev->cam.nmodes = ARRAY_SIZE(ov971x_mode);
+
+ /* no control yet */
+ gspca_dev->ctrl_dis = (1 << NCTRLS) - 1;
+
+ gspca_dev->cam.bulk = 1;
+ gspca_dev->cam.bulk_size = 16384;
+ gspca_dev->cam.bulk_nurbs = 2;
+
+ sccb_w_array(gspca_dev, ov971x_init,
+ ARRAY_SIZE(ov971x_init));
+
+ /* set video format on bridge processor */
+ /* access bridge processor's video format registers at: 0x00 */
+ reg_w(gspca_dev, 0x1c, 0x00);
+ /*set register: 0x00 is 'RAW8', 0x40 is 'YUV422' (YUYV?)*/
+ reg_w(gspca_dev, 0x1d, 0x00);
+
+ /* Will W. specific stuff
+ * set VSYNC to
+ * output (0x1f) if first webcam
+ * input (0x17) if 2nd or 3rd webcam */
+ p = video_device_node_name(&gspca_dev->vdev);
+ l = strlen(p) - 1;
+ if (p[l] == '0')
+ reg_w(gspca_dev, 0x56, 0x1f);
+ else
+ reg_w(gspca_dev, 0x56, 0x17);
+ } else {
+ err("Unknown sensor %04x", sensor_id);
+ return -EINVAL;
+ }
return gspca_dev->usb_err;
}
static int sd_start(struct gspca_dev *gspca_dev)
{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ if (sd->sensor == SENSOR_OV971x)
+ return gspca_dev->usb_err;
switch (gspca_dev->curr_mode) {
case QVGA_MODE: /* 320x240 */
- sccb_w_array(gspca_dev, sensor_start_1_vga,
- ARRAY_SIZE(sensor_start_1_vga));
+ sccb_w_array(gspca_dev, ov965x_start_1_vga,
+ ARRAY_SIZE(ov965x_start_1_vga));
reg_w_array(gspca_dev, bridge_start_qvga,
ARRAY_SIZE(bridge_start_qvga));
- sccb_w_array(gspca_dev, sensor_start_2_qvga,
- ARRAY_SIZE(sensor_start_2_qvga));
+ sccb_w_array(gspca_dev, ov965x_start_2_qvga,
+ ARRAY_SIZE(ov965x_start_2_qvga));
break;
case VGA_MODE: /* 640x480 */
- sccb_w_array(gspca_dev, sensor_start_1_vga,
- ARRAY_SIZE(sensor_start_1_vga));
+ sccb_w_array(gspca_dev, ov965x_start_1_vga,
+ ARRAY_SIZE(ov965x_start_1_vga));
reg_w_array(gspca_dev, bridge_start_vga,
ARRAY_SIZE(bridge_start_vga));
- sccb_w_array(gspca_dev, sensor_start_2_vga,
- ARRAY_SIZE(sensor_start_2_vga));
+ sccb_w_array(gspca_dev, ov965x_start_2_vga,
+ ARRAY_SIZE(ov965x_start_2_vga));
break;
case SVGA_MODE: /* 800x600 */
- sccb_w_array(gspca_dev, sensor_start_1_svga,
- ARRAY_SIZE(sensor_start_1_svga));
+ sccb_w_array(gspca_dev, ov965x_start_1_svga,
+ ARRAY_SIZE(ov965x_start_1_svga));
reg_w_array(gspca_dev, bridge_start_svga,
ARRAY_SIZE(bridge_start_svga));
- sccb_w_array(gspca_dev, sensor_start_2_svga,
- ARRAY_SIZE(sensor_start_2_svga));
+ sccb_w_array(gspca_dev, ov965x_start_2_svga,
+ ARRAY_SIZE(ov965x_start_2_svga));
break;
case XGA_MODE: /* 1024x768 */
- sccb_w_array(gspca_dev, sensor_start_1_xga,
- ARRAY_SIZE(sensor_start_1_xga));
+ sccb_w_array(gspca_dev, ov965x_start_1_xga,
+ ARRAY_SIZE(ov965x_start_1_xga));
reg_w_array(gspca_dev, bridge_start_xga,
ARRAY_SIZE(bridge_start_xga));
- sccb_w_array(gspca_dev, sensor_start_2_svga,
- ARRAY_SIZE(sensor_start_2_svga));
+ sccb_w_array(gspca_dev, ov965x_start_2_svga,
+ ARRAY_SIZE(ov965x_start_2_svga));
break;
default:
/* case SXGA_MODE: * 1280x1024 */
- sccb_w_array(gspca_dev, sensor_start_1_sxga,
- ARRAY_SIZE(sensor_start_1_sxga));
+ sccb_w_array(gspca_dev, ov965x_start_1_sxga,
+ ARRAY_SIZE(ov965x_start_1_sxga));
reg_w_array(gspca_dev, bridge_start_sxga,
ARRAY_SIZE(bridge_start_sxga));
- sccb_w_array(gspca_dev, sensor_start_2_sxga,
- ARRAY_SIZE(sensor_start_2_sxga));
+ sccb_w_array(gspca_dev, ov965x_start_2_sxga,
+ ARRAY_SIZE(ov965x_start_2_sxga));
break;
}
- setfreq(gspca_dev);
+ setlightfreq(gspca_dev);
setautogain(gspca_dev);
setbrightness(gspca_dev);
setcontrast(gspca_dev);
@@ -1198,9 +1305,11 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
__u32 this_pts;
u8 this_fid;
int remaining_len = len;
+ int payload_len;
+ payload_len = gspca_dev->cam.bulk ? 2048 : 2040;
do {
- len = min(remaining_len, 2040);
+ len = min(remaining_len, payload_len);
/* Payloads are prefixed with a UVC-style header. We
consider a frame to start when the FID toggles, or the PTS
@@ -1262,138 +1371,6 @@ scan_next:
} while (remaining_len > 0);
}
-/* controls */
-static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->brightness = val;
- if (gspca_dev->streaming)
- setbrightness(gspca_dev);
- return gspca_dev->usb_err;
-}
-
-static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->brightness;
- return 0;
-}
-
-static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->contrast = val;
- if (gspca_dev->streaming)
- setcontrast(gspca_dev);
- return gspca_dev->usb_err;
-}
-
-static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->contrast;
- return 0;
-}
-
-static int sd_setautogain(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->autogain = val;
-
- if (gspca_dev->streaming) {
- if (val)
- gspca_dev->ctrl_inac |= (1 << EXPO_IDX);
- else
- gspca_dev->ctrl_inac &= ~(1 << EXPO_IDX);
- setautogain(gspca_dev);
- }
- return gspca_dev->usb_err;
-}
-
-static int sd_getautogain(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->autogain;
- return 0;
-}
-
-static int sd_setexposure(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->exposure = val;
- if (gspca_dev->streaming)
- setexposure(gspca_dev);
- return gspca_dev->usb_err;
-}
-
-static int sd_getexposure(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->exposure;
- return 0;
-}
-
-static int sd_setsharpness(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->sharpness = val;
- if (gspca_dev->streaming)
- setsharpness(gspca_dev);
- return gspca_dev->usb_err;
-}
-
-static int sd_getsharpness(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->sharpness;
- return 0;
-}
-
-static int sd_setsatur(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->satur = val;
- if (gspca_dev->streaming)
- setsatur(gspca_dev);
- return gspca_dev->usb_err;
-}
-
-static int sd_getsatur(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->satur;
- return 0;
-}
-static int sd_setfreq(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->freq = val;
- if (gspca_dev->streaming)
- setfreq(gspca_dev);
- return gspca_dev->usb_err;
-}
-
-static int sd_getfreq(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->freq;
- return 0;
-}
-
static int sd_querymenu(struct gspca_dev *gspca_dev,
struct v4l2_querymenu *menu)
{
@@ -1419,7 +1396,7 @@ static int sd_querymenu(struct gspca_dev *gspca_dev,
static const struct sd_desc sd_desc = {
.name = MODULE_NAME,
.ctrls = sd_ctrls,
- .nctrls = ARRAY_SIZE(sd_ctrls),
+ .nctrls = NCTRLS,
.config = sd_config,
.init = sd_init,
.start = sd_start,
@@ -1430,6 +1407,7 @@ static const struct sd_desc sd_desc = {
/* -- module initialisation -- */
static const struct usb_device_id device_table[] = {
+ {USB_DEVICE(0x05a9, 0x8065)},
{USB_DEVICE(0x06f8, 0x3003)},
{}
};
diff --git a/drivers/media/video/gspca/pac207.c b/drivers/media/video/gspca/pac207.c
index 81739a2f205..1600df152fd 100644
--- a/drivers/media/video/gspca/pac207.c
+++ b/drivers/media/video/gspca/pac207.c
@@ -23,6 +23,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#define MODULE_NAME "pac207"
#include <linux/input.h>
@@ -178,8 +180,8 @@ static int pac207_write_regs(struct gspca_dev *gspca_dev, u16 index,
0x00, index,
gspca_dev->usb_buf, length, PAC207_CTRL_TIMEOUT);
if (err < 0)
- err("Failed to write registers to index 0x%04X, error %d)",
- index, err);
+ pr_err("Failed to write registers to index 0x%04X, error %d\n",
+ index, err);
return err;
}
@@ -194,8 +196,8 @@ static int pac207_write_reg(struct gspca_dev *gspca_dev, u16 index, u16 value)
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
value, index, NULL, 0, PAC207_CTRL_TIMEOUT);
if (err)
- err("Failed to write a register (index 0x%04X,"
- " value 0x%02X, error %d)", index, value, err);
+ pr_err("Failed to write a register (index 0x%04X, value 0x%02X, error %d)\n",
+ index, value, err);
return err;
}
@@ -210,8 +212,8 @@ static int pac207_read_reg(struct gspca_dev *gspca_dev, u16 index)
0x00, index,
gspca_dev->usb_buf, 1, PAC207_CTRL_TIMEOUT);
if (res < 0) {
- err("Failed to read a register (index 0x%04X, error %d)",
- index, res);
+ pr_err("Failed to read a register (index 0x%04X, error %d)\n",
+ index, res);
return res;
}
diff --git a/drivers/media/video/gspca/pac7302.c b/drivers/media/video/gspca/pac7302.c
index 5615d7bd830..1c44f78ff9e 100644
--- a/drivers/media/video/gspca/pac7302.c
+++ b/drivers/media/video/gspca/pac7302.c
@@ -61,6 +61,8 @@
3 | 0x21 | sethvflip()
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#define MODULE_NAME "pac7302"
#include <linux/input.h>
@@ -408,8 +410,8 @@ static void reg_w_buf(struct gspca_dev *gspca_dev,
index, gspca_dev->usb_buf, len,
500);
if (ret < 0) {
- err("reg_w_buf failed index 0x%02x, error %d",
- index, ret);
+ pr_err("reg_w_buf failed index 0x%02x, error %d\n",
+ index, ret);
gspca_dev->usb_err = ret;
}
}
@@ -431,8 +433,8 @@ static void reg_w(struct gspca_dev *gspca_dev,
0, index, gspca_dev->usb_buf, 1,
500);
if (ret < 0) {
- err("reg_w() failed index 0x%02x, value 0x%02x, error %d",
- index, value, ret);
+ pr_err("reg_w() failed index 0x%02x, value 0x%02x, error %d\n",
+ index, value, ret);
gspca_dev->usb_err = ret;
}
}
@@ -466,9 +468,8 @@ static void reg_w_page(struct gspca_dev *gspca_dev,
0, index, gspca_dev->usb_buf, 1,
500);
if (ret < 0) {
- err("reg_w_page() failed index 0x%02x, "
- "value 0x%02x, error %d",
- index, page[index], ret);
+ pr_err("reg_w_page() failed index 0x%02x, value 0x%02x, error %d\n",
+ index, page[index], ret);
gspca_dev->usb_err = ret;
break;
}
diff --git a/drivers/media/video/gspca/pac7311.c b/drivers/media/video/gspca/pac7311.c
index f8801b50e64..7509d05dc06 100644
--- a/drivers/media/video/gspca/pac7311.c
+++ b/drivers/media/video/gspca/pac7311.c
@@ -49,6 +49,8 @@
for max gain, 0x14 for minimal gain.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#define MODULE_NAME "pac7311"
#include <linux/input.h>
@@ -276,8 +278,8 @@ static void reg_w_buf(struct gspca_dev *gspca_dev,
index, gspca_dev->usb_buf, len,
500);
if (ret < 0) {
- err("reg_w_buf() failed index 0x%02x, error %d",
- index, ret);
+ pr_err("reg_w_buf() failed index 0x%02x, error %d\n",
+ index, ret);
gspca_dev->usb_err = ret;
}
}
@@ -299,8 +301,8 @@ static void reg_w(struct gspca_dev *gspca_dev,
0, index, gspca_dev->usb_buf, 1,
500);
if (ret < 0) {
- err("reg_w() failed index 0x%02x, value 0x%02x, error %d",
- index, value, ret);
+ pr_err("reg_w() failed index 0x%02x, value 0x%02x, error %d\n",
+ index, value, ret);
gspca_dev->usb_err = ret;
}
}
@@ -334,9 +336,8 @@ static void reg_w_page(struct gspca_dev *gspca_dev,
0, index, gspca_dev->usb_buf, 1,
500);
if (ret < 0) {
- err("reg_w_page() failed index 0x%02x, "
- "value 0x%02x, error %d",
- index, page[index], ret);
+ pr_err("reg_w_page() failed index 0x%02x, value 0x%02x, error %d\n",
+ index, page[index], ret);
gspca_dev->usb_err = ret;
break;
}
diff --git a/drivers/media/video/gspca/se401.c b/drivers/media/video/gspca/se401.c
index 4c283c24c75..3b71bbcd977 100644
--- a/drivers/media/video/gspca/se401.c
+++ b/drivers/media/video/gspca/se401.c
@@ -23,6 +23,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#define MODULE_NAME "se401"
#define BULK_SIZE 4096
@@ -144,8 +146,8 @@ static void se401_write_req(struct gspca_dev *gspca_dev, u16 req, u16 value,
value, 0, NULL, 0, 1000);
if (err < 0) {
if (!silent)
- err("write req failed req %#04x val %#04x error %d",
- req, value, err);
+ pr_err("write req failed req %#04x val %#04x error %d\n",
+ req, value, err);
gspca_dev->usb_err = err;
}
}
@@ -158,7 +160,7 @@ static void se401_read_req(struct gspca_dev *gspca_dev, u16 req, int silent)
return;
if (USB_BUF_SZ < READ_REQ_SIZE) {
- err("USB_BUF_SZ too small!!");
+ pr_err("USB_BUF_SZ too small!!\n");
gspca_dev->usb_err = -ENOBUFS;
return;
}
@@ -169,7 +171,8 @@ static void se401_read_req(struct gspca_dev *gspca_dev, u16 req, int silent)
0, 0, gspca_dev->usb_buf, READ_REQ_SIZE, 1000);
if (err < 0) {
if (!silent)
- err("read req failed req %#04x error %d", req, err);
+ pr_err("read req failed req %#04x error %d\n",
+ req, err);
gspca_dev->usb_err = err;
}
}
@@ -188,8 +191,8 @@ static void se401_set_feature(struct gspca_dev *gspca_dev,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
param, selector, NULL, 0, 1000);
if (err < 0) {
- err("set feature failed sel %#04x param %#04x error %d",
- selector, param, err);
+ pr_err("set feature failed sel %#04x param %#04x error %d\n",
+ selector, param, err);
gspca_dev->usb_err = err;
}
}
@@ -202,7 +205,7 @@ static int se401_get_feature(struct gspca_dev *gspca_dev, u16 selector)
return gspca_dev->usb_err;
if (USB_BUF_SZ < 2) {
- err("USB_BUF_SZ too small!!");
+ pr_err("USB_BUF_SZ too small!!\n");
gspca_dev->usb_err = -ENOBUFS;
return gspca_dev->usb_err;
}
@@ -213,7 +216,8 @@ static int se401_get_feature(struct gspca_dev *gspca_dev, u16 selector)
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0, selector, gspca_dev->usb_buf, 2, 1000);
if (err < 0) {
- err("get feature failed sel %#04x error %d", selector, err);
+ pr_err("get feature failed sel %#04x error %d\n",
+ selector, err);
gspca_dev->usb_err = err;
return err;
}
@@ -300,21 +304,21 @@ static int sd_config(struct gspca_dev *gspca_dev,
return gspca_dev->usb_err;
if (cd[1] != 0x41) {
- err("Wrong descriptor type");
+ pr_err("Wrong descriptor type\n");
return -ENODEV;
}
if (!(cd[2] & SE401_FORMAT_BAYER)) {
- err("Bayer format not supported!");
+ pr_err("Bayer format not supported!\n");
return -ENODEV;
}
if (cd[3])
- info("ExtraFeatures: %d", cd[3]);
+ pr_info("ExtraFeatures: %d\n", cd[3]);
n = cd[4] | (cd[5] << 8);
if (n > MAX_MODES) {
- err("Too many frame sizes");
+ pr_err("Too many frame sizes\n");
return -ENODEV;
}
@@ -353,15 +357,16 @@ static int sd_config(struct gspca_dev *gspca_dev,
sd->fmts[i].pixelformat = V4L2_PIX_FMT_SBGGR8;
sd->fmts[i].bytesperline = widths[i];
sd->fmts[i].sizeimage = widths[i] * heights[i];
- info("Frame size: %dx%d bayer", widths[i], heights[i]);
+ pr_info("Frame size: %dx%d bayer\n",
+ widths[i], heights[i]);
} else {
/* Found a match use janggu compression */
sd->fmts[i].pixelformat = V4L2_PIX_FMT_SE401;
sd->fmts[i].bytesperline = 0;
sd->fmts[i].sizeimage = widths[i] * heights[i] * 3;
- info("Frame size: %dx%d 1/%dth janggu",
- widths[i], heights[i],
- sd->fmts[i].priv * sd->fmts[i].priv);
+ pr_info("Frame size: %dx%d 1/%dth janggu\n",
+ widths[i], heights[i],
+ sd->fmts[i].priv * sd->fmts[i].priv);
}
}
@@ -571,11 +576,12 @@ static void sd_pkt_scan_janggu(struct gspca_dev *gspca_dev, u8 *data, int len)
plen = ((bits + 47) >> 4) << 1;
/* Sanity checks */
if (plen > 1024) {
- err("invalid packet len %d restarting stream", plen);
+ pr_err("invalid packet len %d restarting stream\n",
+ plen);
goto error;
}
if (info == 3) {
- err("unknown frame info value restarting stream");
+ pr_err("unknown frame info value restarting stream\n");
goto error;
}
@@ -599,8 +605,8 @@ static void sd_pkt_scan_janggu(struct gspca_dev *gspca_dev, u8 *data, int len)
break;
case 1: /* EOF */
if (sd->pixels_read != imagesize) {
- err("frame size %d expected %d",
- sd->pixels_read, imagesize);
+ pr_err("frame size %d expected %d\n",
+ sd->pixels_read, imagesize);
goto error;
}
sd_complete_frame(gspca_dev, sd->packet, plen);
diff --git a/drivers/media/video/gspca/sn9c2028.c b/drivers/media/video/gspca/sn9c2028.c
index 4271f86dfe0..48aae3926a3 100644
--- a/drivers/media/video/gspca/sn9c2028.c
+++ b/drivers/media/video/gspca/sn9c2028.c
@@ -18,6 +18,8 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#define MODULE_NAME "sn9c2028"
#include "gspca.h"
@@ -75,8 +77,8 @@ static int sn9c2028_command(struct gspca_dev *gspca_dev, u8 *command)
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
2, 0, gspca_dev->usb_buf, 6, 500);
if (rc < 0) {
- err("command write [%02x] error %d",
- gspca_dev->usb_buf[0], rc);
+ pr_err("command write [%02x] error %d\n",
+ gspca_dev->usb_buf[0], rc);
return rc;
}
@@ -93,7 +95,7 @@ static int sn9c2028_read1(struct gspca_dev *gspca_dev)
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
1, 0, gspca_dev->usb_buf, 1, 500);
if (rc != 1) {
- err("read1 error %d", rc);
+ pr_err("read1 error %d\n", rc);
return (rc < 0) ? rc : -EIO;
}
PDEBUG(D_USBI, "read1 response %02x", gspca_dev->usb_buf[0]);
@@ -109,7 +111,7 @@ static int sn9c2028_read4(struct gspca_dev *gspca_dev, u8 *reading)
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
4, 0, gspca_dev->usb_buf, 4, 500);
if (rc != 4) {
- err("read4 error %d", rc);
+ pr_err("read4 error %d\n", rc);
return (rc < 0) ? rc : -EIO;
}
memcpy(reading, gspca_dev->usb_buf, 4);
@@ -131,7 +133,7 @@ static int sn9c2028_long_command(struct gspca_dev *gspca_dev, u8 *command)
for (i = 0; i < 256 && status < 2; i++)
status = sn9c2028_read1(gspca_dev);
if (status != 2) {
- err("long command status read error %d", status);
+ pr_err("long command status read error %d\n", status);
return (status < 0) ? status : -EIO;
}
@@ -638,7 +640,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
err_code = start_vivitar_cam(gspca_dev);
break;
default:
- err("Starting unknown camera, please report this");
+ pr_err("Starting unknown camera, please report this\n");
return -ENXIO;
}
diff --git a/drivers/media/video/gspca/sn9c20x.c b/drivers/media/video/gspca/sn9c20x.c
index c431900cd29..86e07a139a1 100644
--- a/drivers/media/video/gspca/sn9c20x.c
+++ b/drivers/media/video/gspca/sn9c20x.c
@@ -18,6 +18,8 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/input.h>
#include "gspca.h"
@@ -1123,7 +1125,7 @@ static int reg_r(struct gspca_dev *gspca_dev, u16 reg, u16 length)
length,
500);
if (unlikely(result < 0 || result != length)) {
- err("Read register failed 0x%02X", reg);
+ pr_err("Read register failed 0x%02X\n", reg);
return -EIO;
}
return 0;
@@ -1144,7 +1146,7 @@ static int reg_w(struct gspca_dev *gspca_dev, u16 reg,
length,
500);
if (unlikely(result < 0 || result != length)) {
- err("Write register failed index 0x%02X", reg);
+ pr_err("Write register failed index 0x%02X\n", reg);
return -EIO;
}
return 0;
@@ -1275,14 +1277,14 @@ static int ov9650_init_sensor(struct gspca_dev *gspca_dev)
return -EINVAL;
if (id != 0x7fa2) {
- err("sensor id for ov9650 doesn't match (0x%04x)", id);
+ pr_err("sensor id for ov9650 doesn't match (0x%04x)\n", id);
return -ENODEV;
}
for (i = 0; i < ARRAY_SIZE(ov9650_init); i++) {
if (i2c_w1(gspca_dev, ov9650_init[i].reg,
ov9650_init[i].val) < 0) {
- err("OV9650 sensor initialization failed");
+ pr_err("OV9650 sensor initialization failed\n");
return -ENODEV;
}
}
@@ -1299,7 +1301,7 @@ static int ov9655_init_sensor(struct gspca_dev *gspca_dev)
for (i = 0; i < ARRAY_SIZE(ov9655_init); i++) {
if (i2c_w1(gspca_dev, ov9655_init[i].reg,
ov9655_init[i].val) < 0) {
- err("OV9655 sensor initialization failed");
+ pr_err("OV9655 sensor initialization failed\n");
return -ENODEV;
}
}
@@ -1318,7 +1320,7 @@ static int soi968_init_sensor(struct gspca_dev *gspca_dev)
for (i = 0; i < ARRAY_SIZE(soi968_init); i++) {
if (i2c_w1(gspca_dev, soi968_init[i].reg,
soi968_init[i].val) < 0) {
- err("SOI968 sensor initialization failed");
+ pr_err("SOI968 sensor initialization failed\n");
return -ENODEV;
}
}
@@ -1338,7 +1340,7 @@ static int ov7660_init_sensor(struct gspca_dev *gspca_dev)
for (i = 0; i < ARRAY_SIZE(ov7660_init); i++) {
if (i2c_w1(gspca_dev, ov7660_init[i].reg,
ov7660_init[i].val) < 0) {
- err("OV7660 sensor initialization failed");
+ pr_err("OV7660 sensor initialization failed\n");
return -ENODEV;
}
}
@@ -1355,7 +1357,7 @@ static int ov7670_init_sensor(struct gspca_dev *gspca_dev)
for (i = 0; i < ARRAY_SIZE(ov7670_init); i++) {
if (i2c_w1(gspca_dev, ov7670_init[i].reg,
ov7670_init[i].val) < 0) {
- err("OV7670 sensor initialization failed");
+ pr_err("OV7670 sensor initialization failed\n");
return -ENODEV;
}
}
@@ -1379,14 +1381,14 @@ static int mt9v_init_sensor(struct gspca_dev *gspca_dev)
for (i = 0; i < ARRAY_SIZE(mt9v011_init); i++) {
if (i2c_w2(gspca_dev, mt9v011_init[i].reg,
mt9v011_init[i].val) < 0) {
- err("MT9V011 sensor initialization failed");
+ pr_err("MT9V011 sensor initialization failed\n");
return -ENODEV;
}
}
sd->hstart = 2;
sd->vstart = 2;
sd->sensor = SENSOR_MT9V011;
- info("MT9V011 sensor detected");
+ pr_info("MT9V011 sensor detected\n");
return 0;
}
@@ -1397,7 +1399,7 @@ static int mt9v_init_sensor(struct gspca_dev *gspca_dev)
for (i = 0; i < ARRAY_SIZE(mt9v111_init); i++) {
if (i2c_w2(gspca_dev, mt9v111_init[i].reg,
mt9v111_init[i].val) < 0) {
- err("MT9V111 sensor initialization failed");
+ pr_err("MT9V111 sensor initialization failed\n");
return -ENODEV;
}
}
@@ -1407,7 +1409,7 @@ static int mt9v_init_sensor(struct gspca_dev *gspca_dev)
sd->hstart = 2;
sd->vstart = 2;
sd->sensor = SENSOR_MT9V111;
- info("MT9V111 sensor detected");
+ pr_info("MT9V111 sensor detected\n");
return 0;
}
@@ -1422,14 +1424,14 @@ static int mt9v_init_sensor(struct gspca_dev *gspca_dev)
for (i = 0; i < ARRAY_SIZE(mt9v112_init); i++) {
if (i2c_w2(gspca_dev, mt9v112_init[i].reg,
mt9v112_init[i].val) < 0) {
- err("MT9V112 sensor initialization failed");
+ pr_err("MT9V112 sensor initialization failed\n");
return -ENODEV;
}
}
sd->hstart = 6;
sd->vstart = 2;
sd->sensor = SENSOR_MT9V112;
- info("MT9V112 sensor detected");
+ pr_info("MT9V112 sensor detected\n");
return 0;
}
@@ -1443,7 +1445,7 @@ static int mt9m112_init_sensor(struct gspca_dev *gspca_dev)
for (i = 0; i < ARRAY_SIZE(mt9m112_init); i++) {
if (i2c_w2(gspca_dev, mt9m112_init[i].reg,
mt9m112_init[i].val) < 0) {
- err("MT9M112 sensor initialization failed");
+ pr_err("MT9M112 sensor initialization failed\n");
return -ENODEV;
}
}
@@ -1461,7 +1463,7 @@ static int mt9m111_init_sensor(struct gspca_dev *gspca_dev)
for (i = 0; i < ARRAY_SIZE(mt9m111_init); i++) {
if (i2c_w2(gspca_dev, mt9m111_init[i].reg,
mt9m111_init[i].val) < 0) {
- err("MT9M111 sensor initialization failed");
+ pr_err("MT9M111 sensor initialization failed\n");
return -ENODEV;
}
}
@@ -1485,20 +1487,20 @@ static int mt9m001_init_sensor(struct gspca_dev *gspca_dev)
switch (id) {
case 0x8411:
case 0x8421:
- info("MT9M001 color sensor detected");
+ pr_info("MT9M001 color sensor detected\n");
break;
case 0x8431:
- info("MT9M001 mono sensor detected");
+ pr_info("MT9M001 mono sensor detected\n");
break;
default:
- err("No MT9M001 chip detected, ID = %x\n", id);
+ pr_err("No MT9M001 chip detected, ID = %x\n\n", id);
return -ENODEV;
}
for (i = 0; i < ARRAY_SIZE(mt9m001_init); i++) {
if (i2c_w2(gspca_dev, mt9m001_init[i].reg,
mt9m001_init[i].val) < 0) {
- err("MT9M001 sensor initialization failed");
+ pr_err("MT9M001 sensor initialization failed\n");
return -ENODEV;
}
}
@@ -1517,7 +1519,7 @@ static int hv7131r_init_sensor(struct gspca_dev *gspca_dev)
for (i = 0; i < ARRAY_SIZE(hv7131r_init); i++) {
if (i2c_w1(gspca_dev, hv7131r_init[i].reg,
hv7131r_init[i].val) < 0) {
- err("HV7131R Sensor initialization failed");
+ pr_err("HV7131R Sensor initialization failed\n");
return -ENODEV;
}
}
@@ -2103,7 +2105,7 @@ static int sd_init(struct gspca_dev *gspca_dev)
for (i = 0; i < ARRAY_SIZE(bridge_init); i++) {
value = bridge_init[i][1];
if (reg_w(gspca_dev, bridge_init[i][0], &value, 1) < 0) {
- err("Device initialization failed");
+ pr_err("Device initialization failed\n");
return -ENODEV;
}
}
@@ -2114,7 +2116,7 @@ static int sd_init(struct gspca_dev *gspca_dev)
reg_w1(gspca_dev, 0x1006, 0x20);
if (reg_w(gspca_dev, 0x10c0, i2c_init, 9) < 0) {
- err("Device initialization failed");
+ pr_err("Device initialization failed\n");
return -ENODEV;
}
@@ -2122,27 +2124,27 @@ static int sd_init(struct gspca_dev *gspca_dev)
case SENSOR_OV9650:
if (ov9650_init_sensor(gspca_dev) < 0)
return -ENODEV;
- info("OV9650 sensor detected");
+ pr_info("OV9650 sensor detected\n");
break;
case SENSOR_OV9655:
if (ov9655_init_sensor(gspca_dev) < 0)
return -ENODEV;
- info("OV9655 sensor detected");
+ pr_info("OV9655 sensor detected\n");
break;
case SENSOR_SOI968:
if (soi968_init_sensor(gspca_dev) < 0)
return -ENODEV;
- info("SOI968 sensor detected");
+ pr_info("SOI968 sensor detected\n");
break;
case SENSOR_OV7660:
if (ov7660_init_sensor(gspca_dev) < 0)
return -ENODEV;
- info("OV7660 sensor detected");
+ pr_info("OV7660 sensor detected\n");
break;
case SENSOR_OV7670:
if (ov7670_init_sensor(gspca_dev) < 0)
return -ENODEV;
- info("OV7670 sensor detected");
+ pr_info("OV7670 sensor detected\n");
break;
case SENSOR_MT9VPRB:
if (mt9v_init_sensor(gspca_dev) < 0)
@@ -2151,12 +2153,12 @@ static int sd_init(struct gspca_dev *gspca_dev)
case SENSOR_MT9M111:
if (mt9m111_init_sensor(gspca_dev) < 0)
return -ENODEV;
- info("MT9M111 sensor detected");
+ pr_info("MT9M111 sensor detected\n");
break;
case SENSOR_MT9M112:
if (mt9m112_init_sensor(gspca_dev) < 0)
return -ENODEV;
- info("MT9M112 sensor detected");
+ pr_info("MT9M112 sensor detected\n");
break;
case SENSOR_MT9M001:
if (mt9m001_init_sensor(gspca_dev) < 0)
@@ -2165,10 +2167,10 @@ static int sd_init(struct gspca_dev *gspca_dev)
case SENSOR_HV7131R:
if (hv7131r_init_sensor(gspca_dev) < 0)
return -ENODEV;
- info("HV7131R sensor detected");
+ pr_info("HV7131R sensor detected\n");
break;
default:
- info("Unsupported Sensor");
+ pr_info("Unsupported Sensor\n");
return -ENODEV;
}
@@ -2263,19 +2265,19 @@ static int sd_start(struct gspca_dev *gspca_dev)
switch (mode & SCALE_MASK) {
case SCALE_1280x1024:
scale = 0xc0;
- info("Set 1280x1024");
+ pr_info("Set 1280x1024\n");
break;
case SCALE_640x480:
scale = 0x80;
- info("Set 640x480");
+ pr_info("Set 640x480\n");
break;
case SCALE_320x240:
scale = 0x90;
- info("Set 320x240");
+ pr_info("Set 320x240\n");
break;
case SCALE_160x120:
scale = 0xa0;
- info("Set 160x120");
+ pr_info("Set 160x120\n");
break;
}
@@ -2513,7 +2515,7 @@ static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x0c45, 0x628f), SN9C20X(OV9650, 0x30, 0)},
{USB_DEVICE(0x0c45, 0x62a0), SN9C20X(OV7670, 0x21, 0)},
{USB_DEVICE(0x0c45, 0x62b0), SN9C20X(MT9VPRB, 0x00, 0)},
- {USB_DEVICE(0x0c45, 0x62b3), SN9C20X(OV9655, 0x30, 0)},
+ {USB_DEVICE(0x0c45, 0x62b3), SN9C20X(OV9655, 0x30, LED_REVERSE)},
{USB_DEVICE(0x0c45, 0x62bb), SN9C20X(OV7660, 0x21, LED_REVERSE)},
{USB_DEVICE(0x0c45, 0x62bc), SN9C20X(HV7131R, 0x11, 0)},
{USB_DEVICE(0x045e, 0x00f4), SN9C20X(OV9650, 0x30, 0)},
diff --git a/drivers/media/video/gspca/sonixj.c b/drivers/media/video/gspca/sonixj.c
index c477ad11f10..c746bf19ca1 100644
--- a/drivers/media/video/gspca/sonixj.c
+++ b/drivers/media/video/gspca/sonixj.c
@@ -1,7 +1,7 @@
/*
* Sonix sn9c102p sn9c105 sn9c120 (jpeg) subdriver
*
- * Copyright (C) 2009-2010 Jean-François Moine <http://moinejf.free.fr>
+ * Copyright (C) 2009-2011 Jean-François Moine <http://moinejf.free.fr>
* Copyright (C) 2005 Michel Xhaard mxhaard@magic.fr
*
* This program is free software; you can redistribute it and/or modify
@@ -19,6 +19,8 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#define MODULE_NAME "sonixj"
#include <linux/input.h>
@@ -136,7 +138,7 @@ static void setillum(struct gspca_dev *gspca_dev);
static void setfreq(struct gspca_dev *gspca_dev);
static const struct ctrl sd_ctrls[NCTRLS] = {
-[BRIGHTNESS] = {
+[BRIGHTNESS] = {
{
.id = V4L2_CID_BRIGHTNESS,
.type = V4L2_CTRL_TYPE_INTEGER,
@@ -157,7 +159,7 @@ static const struct ctrl sd_ctrls[NCTRLS] = {
#define CONTRAST_MAX 127
.maximum = CONTRAST_MAX,
.step = 1,
- .default_value = 63,
+ .default_value = 20,
},
.set_control = setcontrast
},
@@ -737,7 +739,7 @@ static const u8 mi0360_sensor_init[][8] = {
{0xd1, 0x5d, 0x22, 0x00, 0x00, 0x00, 0x00, 0x10},
{0xd1, 0x5d, 0x24, 0x00, 0x00, 0x00, 0x00, 0x10},
{0xd1, 0x5d, 0x26, 0x00, 0x00, 0x00, 0x24, 0x10},
- {0xd1, 0x5d, 0x2f, 0xf7, 0xB0, 0x00, 0x04, 0x10},
+ {0xd1, 0x5d, 0x2f, 0xf7, 0xb0, 0x00, 0x04, 0x10},
{0xd1, 0x5d, 0x31, 0x00, 0x00, 0x00, 0x00, 0x10},
{0xd1, 0x5d, 0x33, 0x00, 0x00, 0x01, 0x00, 0x10},
{0xb1, 0x5d, 0x3d, 0x06, 0x8f, 0x00, 0x00, 0x10},
@@ -1277,7 +1279,7 @@ static const u8 soi768_sensor_param1[][8] = {
/* global gain ? : 07 - change with 0x15 at the end */
{0xa1, 0x21, 0x10, 0x3f, 0x00, 0x00, 0x00, 0x10}, /* ???? : 063f */
{0xa1, 0x21, 0x04, 0x06, 0x00, 0x00, 0x00, 0x10},
- {0xb1, 0x21, 0x2d, 0x00, 0x02, 0x00, 0x00, 0x10},
+ {0xb1, 0x21, 0x2d, 0x63, 0x03, 0x00, 0x00, 0x10},
/* exposure ? : 0200 - change with 0x1e at the end */
{}
};
@@ -1395,7 +1397,7 @@ static void reg_r(struct gspca_dev *gspca_dev,
return;
#ifdef GSPCA_DEBUG
if (len > USB_BUF_SZ) {
- err("reg_r: buffer overflow");
+ pr_err("reg_r: buffer overflow\n");
return;
}
#endif
@@ -1408,7 +1410,7 @@ static void reg_r(struct gspca_dev *gspca_dev,
500);
PDEBUG(D_USBI, "reg_r [%02x] -> %02x", value, gspca_dev->usb_buf[0]);
if (ret < 0) {
- err("reg_r err %d", ret);
+ pr_err("reg_r err %d\n", ret);
gspca_dev->usb_err = ret;
}
}
@@ -1432,7 +1434,7 @@ static void reg_w1(struct gspca_dev *gspca_dev,
gspca_dev->usb_buf, 1,
500);
if (ret < 0) {
- err("reg_w1 err %d", ret);
+ pr_err("reg_w1 err %d\n", ret);
gspca_dev->usb_err = ret;
}
}
@@ -1449,7 +1451,7 @@ static void reg_w(struct gspca_dev *gspca_dev,
value, buffer[0], buffer[1]);
#ifdef GSPCA_DEBUG
if (len > USB_BUF_SZ) {
- err("reg_w: buffer overflow");
+ pr_err("reg_w: buffer overflow\n");
return;
}
#endif
@@ -1462,7 +1464,7 @@ static void reg_w(struct gspca_dev *gspca_dev,
gspca_dev->usb_buf, len,
500);
if (ret < 0) {
- err("reg_w err %d", ret);
+ pr_err("reg_w err %d\n", ret);
gspca_dev->usb_err = ret;
}
}
@@ -1502,7 +1504,7 @@ static void i2c_w1(struct gspca_dev *gspca_dev, u8 reg, u8 val)
gspca_dev->usb_buf, 8,
500);
if (ret < 0) {
- err("i2c_w1 err %d", ret);
+ pr_err("i2c_w1 err %d\n", ret);
gspca_dev->usb_err = ret;
}
}
@@ -1527,7 +1529,7 @@ static void i2c_w8(struct gspca_dev *gspca_dev,
500);
msleep(2);
if (ret < 0) {
- err("i2c_w8 err %d", ret);
+ pr_err("i2c_w8 err %d\n", ret);
gspca_dev->usb_err = ret;
}
}
@@ -1591,7 +1593,7 @@ static void hv7131r_probe(struct gspca_dev *gspca_dev)
PDEBUG(D_PROBE, "Sensor HV7131R found");
return;
}
- warn("Erroneous HV7131R ID 0x%02x 0x%02x 0x%02x",
+ pr_warn("Erroneous HV7131R ID 0x%02x 0x%02x 0x%02x\n",
gspca_dev->usb_buf[0], gspca_dev->usb_buf[1],
gspca_dev->usb_buf[2]);
}
@@ -1710,7 +1712,7 @@ static void ov7648_probe(struct gspca_dev *gspca_dev)
sd->sensor = SENSOR_PO1030;
return;
}
- err("Unknown sensor %04x", val);
+ pr_err("Unknown sensor %04x\n", val);
}
/* 0c45:6142 sensor may be po2030n, gc0305 or gc0307 */
@@ -1748,7 +1750,7 @@ static void po2030n_probe(struct gspca_dev *gspca_dev)
PDEBUG(D_PROBE, "Sensor po2030n");
/* sd->sensor = SENSOR_PO2030N; */
} else {
- err("Unknown sensor ID %04x", val);
+ pr_err("Unknown sensor ID %04x\n", val);
}
}
@@ -2006,8 +2008,7 @@ static void setbrightness(struct gspca_dev *gspca_dev)
case SENSOR_OM6802:
expo = brightness << 2;
sd->exposure = setexposure(gspca_dev, expo);
- k2 = brightness >> 3;
- break;
+ return; /* Y offset already set */
}
reg_w1(gspca_dev, 0x96, k2); /* color matrix Y offset */
@@ -2019,13 +2020,13 @@ static void setcontrast(struct gspca_dev *gspca_dev)
u8 k2;
u8 contrast[6];
- k2 = sd->ctrls[CONTRAST].val * 0x30 / (CONTRAST_MAX + 1)
- + 0x10; /* 10..40 */
+ k2 = sd->ctrls[CONTRAST].val * 37 / (CONTRAST_MAX + 1)
+ + 37; /* 37..73 */
contrast[0] = (k2 + 1) / 2; /* red */
contrast[1] = 0;
contrast[2] = k2; /* green */
contrast[3] = 0;
- contrast[4] = (k2 + 1) / 5; /* blue */
+ contrast[4] = k2 / 5; /* blue */
contrast[5] = 0;
reg_w(gspca_dev, 0x84, contrast, sizeof contrast);
}
@@ -2507,9 +2508,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
break;
case SENSOR_HV7131R:
case SENSOR_MI0360:
- if (mode)
- reg01 |= SYS_SEL_48M; /* 320x240: clk 48Mhz */
- else
+ if (!mode)
reg01 &= ~SYS_SEL_48M; /* 640x480: clk 24Mhz */
reg17 &= ~MCK_SIZE_MASK;
reg17 |= 0x01; /* clock / 1 */
diff --git a/drivers/media/video/gspca/spca1528.c b/drivers/media/video/gspca/spca1528.c
index 76c006b2bc8..695673106e7 100644
--- a/drivers/media/video/gspca/spca1528.c
+++ b/drivers/media/video/gspca/spca1528.c
@@ -1,7 +1,7 @@
/*
* spca1528 subdriver
*
- * Copyright (C) 2010 Jean-Francois Moine (http://moinejf.free.fr)
+ * Copyright (C) 2010-2011 Jean-Francois Moine (http://moinejf.free.fr)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -18,6 +18,8 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#define MODULE_NAME "spca1528"
#include "gspca.h"
@@ -171,7 +173,7 @@ static void reg_r(struct gspca_dev *gspca_dev,
PDEBUG(D_USBI, "GET %02x 0000 %04x %02x", req, index,
gspca_dev->usb_buf[0]);
if (ret < 0) {
- err("reg_r err %d", ret);
+ pr_err("reg_r err %d\n", ret);
gspca_dev->usb_err = ret;
}
}
@@ -193,7 +195,7 @@ static void reg_w(struct gspca_dev *gspca_dev,
value, index,
NULL, 0, 500);
if (ret < 0) {
- err("reg_w err %d", ret);
+ pr_err("reg_w err %d\n", ret);
gspca_dev->usb_err = ret;
}
}
@@ -217,21 +219,23 @@ static void reg_wb(struct gspca_dev *gspca_dev,
value, index,
gspca_dev->usb_buf, 1, 500);
if (ret < 0) {
- err("reg_w err %d", ret);
+ pr_err("reg_w err %d\n", ret);
gspca_dev->usb_err = ret;
}
}
static void wait_status_0(struct gspca_dev *gspca_dev)
{
- int i;
+ int i, w;
- i = 20;
+ i = 16;
+ w = 0;
do {
reg_r(gspca_dev, 0x21, 0x0000, 1);
if (gspca_dev->usb_buf[0] == 0)
return;
- msleep(30);
+ w += 15;
+ msleep(w);
} while (--i > 0);
PDEBUG(D_ERR, "wait_status_0 timeout");
gspca_dev->usb_err = -ETIME;
@@ -307,8 +311,6 @@ static int sd_config(struct gspca_dev *gspca_dev,
sd->color = COLOR_DEF;
sd->sharpness = SHARPNESS_DEF;
- gspca_dev->nbalt = 4; /* use alternate setting 3 */
-
return 0;
}
@@ -347,8 +349,12 @@ static int sd_isoc_init(struct gspca_dev *gspca_dev)
mode = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].priv;
reg_wb(gspca_dev, 0x25, 0x0000, 0x0004, mode);
reg_r(gspca_dev, 0x25, 0x0004, 1);
- reg_wb(gspca_dev, 0x27, 0x0000, 0x0000, 0x06);
+ reg_wb(gspca_dev, 0x27, 0x0000, 0x0000, 0x06); /* 420 */
reg_r(gspca_dev, 0x27, 0x0000, 1);
+
+/* not useful..
+ gspca_dev->alt = 4; * use alternate setting 3 */
+
return gspca_dev->usb_err;
}
@@ -361,8 +367,8 @@ static int sd_start(struct gspca_dev *gspca_dev)
jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width,
0x22); /* JPEG 411 */
- /* the JPEG quality seems to be 82% */
- jpeg_set_qual(sd->jpeg_hdr, 82);
+ /* the JPEG quality shall be 85% */
+ jpeg_set_qual(sd->jpeg_hdr, 85);
/* set the controls */
setbrightness(gspca_dev);
@@ -377,7 +383,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
/* start the capture */
wait_status_0(gspca_dev);
- reg_w(gspca_dev, 0x31, 0x0000, 0x0004);
+ reg_w(gspca_dev, 0x31, 0x0000, 0x0004); /* start request */
wait_status_1(gspca_dev);
wait_status_0(gspca_dev);
msleep(200);
@@ -390,7 +396,7 @@ static void sd_stopN(struct gspca_dev *gspca_dev)
{
/* stop the capture */
wait_status_0(gspca_dev);
- reg_w(gspca_dev, 0x31, 0x0000, 0x0000);
+ reg_w(gspca_dev, 0x31, 0x0000, 0x0000); /* stop request */
wait_status_1(gspca_dev);
wait_status_0(gspca_dev);
}
diff --git a/drivers/media/video/gspca/spca500.c b/drivers/media/video/gspca/spca500.c
index 3e76951e3c1..bb82c94ece1 100644
--- a/drivers/media/video/gspca/spca500.c
+++ b/drivers/media/video/gspca/spca500.c
@@ -19,6 +19,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#define MODULE_NAME "spca500"
#include "gspca.h"
@@ -396,7 +398,7 @@ static int reg_w(struct gspca_dev *gspca_dev,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
value, index, NULL, 0, 500);
if (ret < 0)
- err("reg write: error %d", ret);
+ pr_err("reg write: error %d\n", ret);
return ret;
}
@@ -418,7 +420,7 @@ static int reg_r_12(struct gspca_dev *gspca_dev,
gspca_dev->usb_buf, length,
500); /* timeout */
if (ret < 0) {
- err("reg_r_12 err %d", ret);
+ pr_err("reg_r_12 err %d\n", ret);
return ret;
}
return (gspca_dev->usb_buf[1] << 8) + gspca_dev->usb_buf[0];
diff --git a/drivers/media/video/gspca/spca501.c b/drivers/media/video/gspca/spca501.c
index f7ef282cc60..7aaac72aee9 100644
--- a/drivers/media/video/gspca/spca501.c
+++ b/drivers/media/video/gspca/spca501.c
@@ -19,6 +19,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#define MODULE_NAME "spca501"
#include "gspca.h"
@@ -1852,7 +1854,7 @@ static int reg_write(struct usb_device *dev,
PDEBUG(D_USBO, "reg write: 0x%02x 0x%02x 0x%02x",
req, index, value);
if (ret < 0)
- err("reg write: error %d", ret);
+ pr_err("reg write: error %d\n", ret);
return ret;
}
diff --git a/drivers/media/video/gspca/spca505.c b/drivers/media/video/gspca/spca505.c
index e5bf865147d..16722dc6039 100644
--- a/drivers/media/video/gspca/spca505.c
+++ b/drivers/media/video/gspca/spca505.c
@@ -19,6 +19,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#define MODULE_NAME "spca505"
#include "gspca.h"
@@ -578,7 +580,7 @@ static int reg_write(struct usb_device *dev,
PDEBUG(D_USBO, "reg write: 0x%02x,0x%02x:0x%02x, %d",
req, index, value, ret);
if (ret < 0)
- err("reg write: error %d", ret);
+ pr_err("reg write: error %d\n", ret);
return ret;
}
@@ -685,8 +687,8 @@ static int sd_start(struct gspca_dev *gspca_dev)
return ret;
}
if (ret != 0x0101) {
- err("After vector read returns 0x%04x should be 0x0101",
- ret);
+ pr_err("After vector read returns 0x%04x should be 0x0101\n",
+ ret);
}
ret = reg_write(gspca_dev->dev, 0x06, 0x16, 0x0a);
diff --git a/drivers/media/video/gspca/spca508.c b/drivers/media/video/gspca/spca508.c
index 9d0b46027b9..a44fe3d2596 100644
--- a/drivers/media/video/gspca/spca508.c
+++ b/drivers/media/video/gspca/spca508.c
@@ -18,6 +18,8 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#define MODULE_NAME "spca508"
#include "gspca.h"
@@ -1275,7 +1277,7 @@ static int reg_write(struct usb_device *dev,
PDEBUG(D_USBO, "reg write i:0x%04x = 0x%02x",
index, value);
if (ret < 0)
- err("reg write: error %d", ret);
+ pr_err("reg write: error %d\n", ret);
return ret;
}
@@ -1297,7 +1299,7 @@ static int reg_read(struct gspca_dev *gspca_dev,
PDEBUG(D_USBI, "reg read i:%04x --> %02x",
index, gspca_dev->usb_buf[0]);
if (ret < 0) {
- err("reg_read err %d", ret);
+ pr_err("reg_read err %d\n", ret);
return ret;
}
return gspca_dev->usb_buf[0];
diff --git a/drivers/media/video/gspca/spca561.c b/drivers/media/video/gspca/spca561.c
index e836e778dfb..c82fd53cef9 100644
--- a/drivers/media/video/gspca/spca561.c
+++ b/drivers/media/video/gspca/spca561.c
@@ -20,6 +20,8 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#define MODULE_NAME "spca561"
#include <linux/input.h>
@@ -315,7 +317,7 @@ static void reg_w_val(struct usb_device *dev, __u16 index, __u8 value)
value, index, NULL, 0, 500);
PDEBUG(D_USBO, "reg write: 0x%02x:0x%02x", index, value);
if (ret < 0)
- err("reg write: error %d", ret);
+ pr_err("reg write: error %d\n", ret);
}
static void write_vector(struct gspca_dev *gspca_dev,
diff --git a/drivers/media/video/gspca/sq905.c b/drivers/media/video/gspca/sq905.c
index 5ba96aff225..df805f79828 100644
--- a/drivers/media/video/gspca/sq905.c
+++ b/drivers/media/video/gspca/sq905.c
@@ -33,6 +33,8 @@
* drivers.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#define MODULE_NAME "sq905"
#include <linux/workqueue.h>
@@ -123,8 +125,7 @@ static int sq905_command(struct gspca_dev *gspca_dev, u16 index)
SQ905_COMMAND, index, gspca_dev->usb_buf, 1,
SQ905_CMD_TIMEOUT);
if (ret < 0) {
- err("%s: usb_control_msg failed (%d)",
- __func__, ret);
+ pr_err("%s: usb_control_msg failed (%d)\n", __func__, ret);
return ret;
}
@@ -135,8 +136,7 @@ static int sq905_command(struct gspca_dev *gspca_dev, u16 index)
SQ905_PING, 0, gspca_dev->usb_buf, 1,
SQ905_CMD_TIMEOUT);
if (ret < 0) {
- err("%s: usb_control_msg failed 2 (%d)",
- __func__, ret);
+ pr_err("%s: usb_control_msg failed 2 (%d)\n", __func__, ret);
return ret;
}
@@ -158,7 +158,7 @@ static int sq905_ack_frame(struct gspca_dev *gspca_dev)
SQ905_READ_DONE, 0, gspca_dev->usb_buf, 1,
SQ905_CMD_TIMEOUT);
if (ret < 0) {
- err("%s: usb_control_msg failed (%d)", __func__, ret);
+ pr_err("%s: usb_control_msg failed (%d)\n", __func__, ret);
return ret;
}
@@ -186,7 +186,7 @@ sq905_read_data(struct gspca_dev *gspca_dev, u8 *data, int size, int need_lock)
if (need_lock)
mutex_unlock(&gspca_dev->usb_lock);
if (ret < 0) {
- err("%s: usb_control_msg failed (%d)", __func__, ret);
+ pr_err("%s: usb_control_msg failed (%d)\n", __func__, ret);
return ret;
}
ret = usb_bulk_msg(gspca_dev->dev,
@@ -195,8 +195,7 @@ sq905_read_data(struct gspca_dev *gspca_dev, u8 *data, int size, int need_lock)
/* successful, it returns 0, otherwise negative */
if (ret < 0 || act_len != size) {
- err("bulk read fail (%d) len %d/%d",
- ret, act_len, size);
+ pr_err("bulk read fail (%d) len %d/%d\n", ret, act_len, size);
return -EIO;
}
return 0;
@@ -226,7 +225,7 @@ static void sq905_dostream(struct work_struct *work)
buffer = kmalloc(SQ905_MAX_TRANSFER, GFP_KERNEL | GFP_DMA);
if (!buffer) {
- err("Couldn't allocate USB buffer");
+ pr_err("Couldn't allocate USB buffer\n");
goto quit_stream;
}
diff --git a/drivers/media/video/gspca/sq905c.c b/drivers/media/video/gspca/sq905c.c
index 457563b7a71..c2c056056e0 100644
--- a/drivers/media/video/gspca/sq905c.c
+++ b/drivers/media/video/gspca/sq905c.c
@@ -27,6 +27,8 @@
* and may contain code fragments from it.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#define MODULE_NAME "sq905c"
#include <linux/workqueue.h>
@@ -95,8 +97,7 @@ static int sq905c_command(struct gspca_dev *gspca_dev, u16 command, u16 index)
command, index, NULL, 0,
SQ905C_CMD_TIMEOUT);
if (ret < 0) {
- err("%s: usb_control_msg failed (%d)",
- __func__, ret);
+ pr_err("%s: usb_control_msg failed (%d)\n", __func__, ret);
return ret;
}
@@ -115,8 +116,7 @@ static int sq905c_read(struct gspca_dev *gspca_dev, u16 command, u16 index,
command, index, gspca_dev->usb_buf, size,
SQ905C_CMD_TIMEOUT);
if (ret < 0) {
- err("%s: usb_control_msg failed (%d)",
- __func__, ret);
+ pr_err("%s: usb_control_msg failed (%d)\n", __func__, ret);
return ret;
}
@@ -146,7 +146,7 @@ static void sq905c_dostream(struct work_struct *work)
buffer = kmalloc(SQ905C_MAX_TRANSFER, GFP_KERNEL | GFP_DMA);
if (!buffer) {
- err("Couldn't allocate USB buffer");
+ pr_err("Couldn't allocate USB buffer\n");
goto quit_stream;
}
diff --git a/drivers/media/video/gspca/sq930x.c b/drivers/media/video/gspca/sq930x.c
index 8215d5dcd45..e4255b4905e 100644
--- a/drivers/media/video/gspca/sq930x.c
+++ b/drivers/media/video/gspca/sq930x.c
@@ -20,6 +20,8 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#define MODULE_NAME "sq930x"
#include "gspca.h"
@@ -468,7 +470,7 @@ static void reg_r(struct gspca_dev *gspca_dev,
value, 0, gspca_dev->usb_buf, len,
500);
if (ret < 0) {
- err("reg_r %04x failed %d", value, ret);
+ pr_err("reg_r %04x failed %d\n", value, ret);
gspca_dev->usb_err = ret;
}
}
@@ -488,7 +490,7 @@ static void reg_w(struct gspca_dev *gspca_dev, u16 value, u16 index)
500);
msleep(30);
if (ret < 0) {
- err("reg_w %04x %04x failed %d", value, index, ret);
+ pr_err("reg_w %04x %04x failed %d\n", value, index, ret);
gspca_dev->usb_err = ret;
}
}
@@ -511,7 +513,7 @@ static void reg_wb(struct gspca_dev *gspca_dev, u16 value, u16 index,
1000);
msleep(30);
if (ret < 0) {
- err("reg_wb %04x %04x failed %d", value, index, ret);
+ pr_err("reg_wb %04x %04x failed %d\n", value, index, ret);
gspca_dev->usb_err = ret;
}
}
@@ -556,7 +558,7 @@ static void i2c_write(struct sd *sd,
gspca_dev->usb_buf, buf - gspca_dev->usb_buf,
500);
if (ret < 0) {
- err("i2c_write failed %d", ret);
+ pr_err("i2c_write failed %d\n", ret);
gspca_dev->usb_err = ret;
}
}
@@ -575,7 +577,7 @@ static void ucbus_write(struct gspca_dev *gspca_dev,
#ifdef GSPCA_DEBUG
if ((batchsize - 1) * 3 > USB_BUF_SZ) {
- err("Bug: usb_buf overflow");
+ pr_err("Bug: usb_buf overflow\n");
gspca_dev->usb_err = -ENOMEM;
return;
}
@@ -612,7 +614,7 @@ static void ucbus_write(struct gspca_dev *gspca_dev,
gspca_dev->usb_buf, buf - gspca_dev->usb_buf,
500);
if (ret < 0) {
- err("ucbus_write failed %d", ret);
+ pr_err("ucbus_write failed %d\n", ret);
gspca_dev->usb_err = ret;
return;
}
@@ -688,7 +690,7 @@ static void cmos_probe(struct gspca_dev *gspca_dev)
break;
}
if (i >= ARRAY_SIZE(probe_order)) {
- err("Unknown sensor");
+ pr_err("Unknown sensor\n");
gspca_dev->usb_err = -EINVAL;
return;
}
@@ -696,7 +698,8 @@ static void cmos_probe(struct gspca_dev *gspca_dev)
switch (sd->sensor) {
case SENSOR_OV7660:
case SENSOR_OV9630:
- err("Sensor %s not yet treated", sensor_tb[sd->sensor].name);
+ pr_err("Sensor %s not yet treated\n",
+ sensor_tb[sd->sensor].name);
gspca_dev->usb_err = -EINVAL;
break;
}
@@ -1091,7 +1094,7 @@ static void sd_dq_callback(struct gspca_dev *gspca_dev)
gspca_dev->cam.bulk_nurbs = 1;
ret = usb_submit_urb(gspca_dev->urb[0], GFP_ATOMIC);
if (ret < 0)
- err("sd_dq_callback() err %d", ret);
+ pr_err("sd_dq_callback() err %d\n", ret);
/* wait a little time, otherwise the webcam crashes */
msleep(100);
diff --git a/drivers/media/video/gspca/stk014.c b/drivers/media/video/gspca/stk014.c
index 763747700f1..42a7a28a6c8 100644
--- a/drivers/media/video/gspca/stk014.c
+++ b/drivers/media/video/gspca/stk014.c
@@ -18,6 +18,8 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#define MODULE_NAME "stk014"
#include "gspca.h"
@@ -137,7 +139,7 @@ static u8 reg_r(struct gspca_dev *gspca_dev,
gspca_dev->usb_buf, 1,
500);
if (ret < 0) {
- err("reg_r err %d", ret);
+ pr_err("reg_r err %d\n", ret);
gspca_dev->usb_err = ret;
return 0;
}
@@ -162,7 +164,7 @@ static void reg_w(struct gspca_dev *gspca_dev,
0,
500);
if (ret < 0) {
- err("reg_w err %d", ret);
+ pr_err("reg_w err %d\n", ret);
gspca_dev->usb_err = ret;
}
}
@@ -192,7 +194,7 @@ static void rcv_val(struct gspca_dev *gspca_dev,
&alen,
500); /* timeout in milliseconds */
if (ret < 0) {
- err("rcv_val err %d", ret);
+ pr_err("rcv_val err %d\n", ret);
gspca_dev->usb_err = ret;
}
}
@@ -235,7 +237,7 @@ static void snd_val(struct gspca_dev *gspca_dev,
&alen,
500); /* timeout in milliseconds */
if (ret < 0) {
- err("snd_val err %d", ret);
+ pr_err("snd_val err %d\n", ret);
gspca_dev->usb_err = ret;
} else {
if (ads == 0x003f08) {
@@ -315,7 +317,7 @@ static int sd_init(struct gspca_dev *gspca_dev)
ret = reg_r(gspca_dev, 0x0740);
if (gspca_dev->usb_err >= 0) {
if (ret != 0xff) {
- err("init reg: 0x%02x", ret);
+ pr_err("init reg: 0x%02x\n", ret);
gspca_dev->usb_err = -EIO;
}
}
@@ -349,8 +351,8 @@ static int sd_start(struct gspca_dev *gspca_dev)
gspca_dev->iface,
gspca_dev->alt);
if (ret < 0) {
- err("set intf %d %d failed",
- gspca_dev->iface, gspca_dev->alt);
+ pr_err("set intf %d %d failed\n",
+ gspca_dev->iface, gspca_dev->alt);
gspca_dev->usb_err = ret;
goto out;
}
diff --git a/drivers/media/video/gspca/stv0680.c b/drivers/media/video/gspca/stv0680.c
index e2ef41cf72d..4dcc7e37f9f 100644
--- a/drivers/media/video/gspca/stv0680.c
+++ b/drivers/media/video/gspca/stv0680.c
@@ -27,6 +27,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#define MODULE_NAME "stv0680"
#include "gspca.h"
@@ -79,7 +81,7 @@ static int stv_sndctrl(struct gspca_dev *gspca_dev, int set, u8 req, u16 val,
val, 0, gspca_dev->usb_buf, size, 500);
if ((ret < 0) && (req != 0x0a))
- err("usb_control_msg error %i, request = 0x%x, error = %i",
+ pr_err("usb_control_msg error %i, request = 0x%x, error = %i\n",
set, req, ret);
return ret;
@@ -236,7 +238,7 @@ static int sd_config(struct gspca_dev *gspca_dev,
if (stv_sndctrl(gspca_dev, 2, 0x06, 0x0100, 0x12) != 0x12 ||
gspca_dev->usb_buf[8] != 0x53 || gspca_dev->usb_buf[9] != 0x05) {
- err("Could not get descriptor 0100.");
+ pr_err("Could not get descriptor 0100\n");
return stv0680_handle_error(gspca_dev, -EIO);
}
diff --git a/drivers/media/video/gspca/stv06xx/Makefile b/drivers/media/video/gspca/stv06xx/Makefile
index 2f3c3a606ce..5b318faf9aa 100644
--- a/drivers/media/video/gspca/stv06xx/Makefile
+++ b/drivers/media/video/gspca/stv06xx/Makefile
@@ -6,5 +6,5 @@ gspca_stv06xx-objs := stv06xx.o \
stv06xx_pb0100.o \
stv06xx_st6422.o
-EXTRA_CFLAGS += -Idrivers/media/video/gspca
+ccflags-y += -Idrivers/media/video/gspca
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx.c b/drivers/media/video/gspca/stv06xx/stv06xx.c
index abf1658fa33..b1fca7db101 100644
--- a/drivers/media/video/gspca/stv06xx/stv06xx.c
+++ b/drivers/media/video/gspca/stv06xx/stv06xx.c
@@ -27,6 +27,8 @@
* P/N 861040-0000: Sensor ST VV6410 ASIC STV0610 - QuickCam Web
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/input.h>
#include "stv06xx_sensor.h"
@@ -189,7 +191,7 @@ int stv06xx_read_sensor(struct sd *sd, const u8 address, u16 *value)
0x04, 0x40, 0x1400, 0, buf, I2C_BUFFER_LENGTH,
STV06XX_URB_MSG_TIMEOUT);
if (err < 0) {
- err("I2C: Read error writing address: %d", err);
+ pr_err("I2C: Read error writing address: %d\n", err);
return err;
}
@@ -213,14 +215,14 @@ static void stv06xx_dump_bridge(struct sd *sd)
int i;
u8 data, buf;
- info("Dumping all stv06xx bridge registers");
+ pr_info("Dumping all stv06xx bridge registers\n");
for (i = 0x1400; i < 0x160f; i++) {
stv06xx_read_bridge(sd, i, &data);
- info("Read 0x%x from address 0x%x", data, i);
+ pr_info("Read 0x%x from address 0x%x\n", data, i);
}
- info("Testing stv06xx bridge registers for writability");
+ pr_info("Testing stv06xx bridge registers for writability\n");
for (i = 0x1400; i < 0x160f; i++) {
stv06xx_read_bridge(sd, i, &data);
buf = data;
@@ -228,12 +230,12 @@ static void stv06xx_dump_bridge(struct sd *sd)
stv06xx_write_bridge(sd, i, 0xff);
stv06xx_read_bridge(sd, i, &data);
if (data == 0xff)
- info("Register 0x%x is read/write", i);
+ pr_info("Register 0x%x is read/write\n", i);
else if (data != buf)
- info("Register 0x%x is read/write,"
- " but only partially", i);
+ pr_info("Register 0x%x is read/write, but only partially\n",
+ i);
else
- info("Register 0x%x is read-only", i);
+ pr_info("Register 0x%x is read-only\n", i);
stv06xx_write_bridge(sd, i, buf);
}
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx.h b/drivers/media/video/gspca/stv06xx/stv06xx.h
index e0f63c51f40..d270a5981af 100644
--- a/drivers/media/video/gspca/stv06xx/stv06xx.h
+++ b/drivers/media/video/gspca/stv06xx/stv06xx.h
@@ -37,6 +37,8 @@
#define STV_ISOC_ENDPOINT_ADDR 0x81
+#define STV_R 0x0509
+
#define STV_REG23 0x0423
/* Control registers of the STV0600 ASIC */
@@ -61,7 +63,9 @@
/* Refers to the CIF 352x288 and QCIF 176x144 */
/* 1: 288 lines, 2: 144 lines */
-#define STV_Y_CTRL 0x15c3
+#define STV_Y_CTRL 0x15c3
+
+#define STV_RESET 0x1620
/* 0xa: 352 columns, 0x6: 176 columns */
#define STV_X_CTRL 0x1680
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx_hdcs.c b/drivers/media/video/gspca/stv06xx/stv06xx_hdcs.c
index b8156855f2b..a8698b7a756 100644
--- a/drivers/media/video/gspca/stv06xx/stv06xx_hdcs.c
+++ b/drivers/media/video/gspca/stv06xx/stv06xx_hdcs.c
@@ -28,6 +28,8 @@
* P/N 861040-0000: Sensor ST VV6410 ASIC STV0610 - QuickCam Web
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include "stv06xx_hdcs.h"
static const struct ctrl hdcs1x00_ctrl[] = {
@@ -428,7 +430,7 @@ static int hdcs_probe_1x00(struct sd *sd)
if (ret < 0 || sensor != 0x08)
return -ENODEV;
- info("HDCS-1000/1100 sensor detected");
+ pr_info("HDCS-1000/1100 sensor detected\n");
sd->gspca_dev.cam.cam_mode = hdcs1x00_mode;
sd->gspca_dev.cam.nmodes = ARRAY_SIZE(hdcs1x00_mode);
@@ -487,7 +489,7 @@ static int hdcs_probe_1020(struct sd *sd)
if (ret < 0 || sensor != 0x10)
return -ENODEV;
- info("HDCS-1020 sensor detected");
+ pr_info("HDCS-1020 sensor detected\n");
sd->gspca_dev.cam.cam_mode = hdcs1020_mode;
sd->gspca_dev.cam.nmodes = ARRAY_SIZE(hdcs1020_mode);
@@ -601,11 +603,11 @@ static int hdcs_dump(struct sd *sd)
{
u16 reg, val;
- info("Dumping sensor registers:");
+ pr_info("Dumping sensor registers:\n");
for (reg = HDCS_IDENT; reg <= HDCS_ROWEXPH; reg++) {
stv06xx_read_sensor(sd, reg, &val);
- info("reg 0x%02x = 0x%02x", reg, val);
+ pr_info("reg 0x%02x = 0x%02x\n", reg, val);
}
return 0;
}
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx_pb0100.c b/drivers/media/video/gspca/stv06xx/stv06xx_pb0100.c
index 75a5b9c2f15..26f14fc4a13 100644
--- a/drivers/media/video/gspca/stv06xx/stv06xx_pb0100.c
+++ b/drivers/media/video/gspca/stv06xx/stv06xx_pb0100.c
@@ -44,6 +44,8 @@
* PB_CFILLIN = R5 = 0x0E (14 dec) : Sets the frame rate
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include "stv06xx_pb0100.h"
static const struct ctrl pb0100_ctrl[] = {
@@ -190,7 +192,7 @@ static int pb0100_probe(struct sd *sd)
if (!sensor_settings)
return -ENOMEM;
- info("Photobit pb0100 sensor detected");
+ pr_info("Photobit pb0100 sensor detected\n");
sd->gspca_dev.cam.cam_mode = pb0100_mode;
sd->gspca_dev.cam.nmodes = ARRAY_SIZE(pb0100_mode);
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx_st6422.c b/drivers/media/video/gspca/stv06xx/stv06xx_st6422.c
index 8a456de4970..9940e035b3a 100644
--- a/drivers/media/video/gspca/stv06xx/stv06xx_st6422.c
+++ b/drivers/media/video/gspca/stv06xx/stv06xx_st6422.c
@@ -26,6 +26,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include "stv06xx_st6422.h"
/* controls */
@@ -136,7 +138,7 @@ static int st6422_probe(struct sd *sd)
if (sd->bridge != BRIDGE_ST6422)
return -ENODEV;
- info("st6422 sensor detected");
+ pr_info("st6422 sensor detected\n");
sensor_settings = kmalloc(sizeof *sensor_settings, GFP_KERNEL);
if (!sensor_settings)
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx_vv6410.c b/drivers/media/video/gspca/stv06xx/stv06xx_vv6410.c
index f8398434c32..a5c69d9ebdd 100644
--- a/drivers/media/video/gspca/stv06xx/stv06xx_vv6410.c
+++ b/drivers/media/video/gspca/stv06xx/stv06xx_vv6410.c
@@ -27,6 +27,8 @@
* P/N 861040-0000: Sensor ST VV6410 ASIC STV0610 - QuickCam Web
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include "stv06xx_vv6410.h"
static struct v4l2_pix_format vv6410_mode[] = {
@@ -112,7 +114,7 @@ static int vv6410_probe(struct sd *sd)
return -ENODEV;
if (data == 0x19) {
- info("vv6410 sensor detected");
+ pr_info("vv6410 sensor detected\n");
sensor_settings = kmalloc(ARRAY_SIZE(vv6410_ctrl) * sizeof(s32),
GFP_KERNEL);
@@ -138,18 +140,7 @@ static int vv6410_init(struct sd *sd)
s32 *sensor_settings = sd->sensor_priv;
for (i = 0; i < ARRAY_SIZE(stv_bridge_init); i++) {
- /* if NULL then len contains single value */
- if (stv_bridge_init[i].data == NULL) {
- err = stv06xx_write_bridge(sd,
- stv_bridge_init[i].start,
- stv_bridge_init[i].len);
- } else {
- int j;
- for (j = 0; j < stv_bridge_init[i].len; j++)
- err = stv06xx_write_bridge(sd,
- stv_bridge_init[i].start + j,
- stv_bridge_init[i].data[j]);
- }
+ stv06xx_write_bridge(sd, stv_bridge_init[i].addr, stv_bridge_init[i].data);
}
if (err < 0)
@@ -183,15 +174,6 @@ static int vv6410_start(struct sd *sd)
struct cam *cam = &sd->gspca_dev.cam;
u32 priv = cam->cam_mode[sd->gspca_dev.curr_mode].priv;
- if (priv & VV6410_CROP_TO_QVGA) {
- PDEBUG(D_CONF, "Cropping to QVGA");
- stv06xx_write_sensor(sd, VV6410_XENDH, 320 - 1);
- stv06xx_write_sensor(sd, VV6410_YENDH, 240 - 1);
- } else {
- stv06xx_write_sensor(sd, VV6410_XENDH, 360 - 1);
- stv06xx_write_sensor(sd, VV6410_YENDH, 294 - 1);
- }
-
if (priv & VV6410_SUBSAMPLE) {
PDEBUG(D_CONF, "Enabling subsampling");
stv06xx_write_bridge(sd, STV_Y_CTRL, 0x02);
@@ -201,8 +183,8 @@ static int vv6410_start(struct sd *sd)
} else {
stv06xx_write_bridge(sd, STV_Y_CTRL, 0x01);
stv06xx_write_bridge(sd, STV_X_CTRL, 0x0a);
+ stv06xx_write_bridge(sd, STV_SCAN_RATE, 0x00);
- stv06xx_write_bridge(sd, STV_SCAN_RATE, 0x20);
}
/* Turn on LED */
@@ -242,11 +224,11 @@ static int vv6410_dump(struct sd *sd)
u8 i;
int err = 0;
- info("Dumping all vv6410 sensor registers");
+ pr_info("Dumping all vv6410 sensor registers\n");
for (i = 0; i < 0xff && !err; i++) {
u16 data;
err = stv06xx_read_sensor(sd, i, &data);
- info("Register 0x%x contained 0x%x", i, data);
+ pr_info("Register 0x%x contained 0x%x\n", i, data);
}
return (err < 0) ? err : 0;
}
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx_vv6410.h b/drivers/media/video/gspca/stv06xx/stv06xx_vv6410.h
index 7fe3587f5f7..a25b8873f2e 100644
--- a/drivers/media/video/gspca/stv06xx/stv06xx_vv6410.h
+++ b/drivers/media/video/gspca/stv06xx/stv06xx_vv6410.h
@@ -211,49 +211,49 @@ const struct stv06xx_sensor stv06xx_sensor_vv6410 = {
/* If NULL, only single value to write, stored in len */
struct stv_init {
- const u8 *data;
- u16 start;
- u8 len;
-};
-
-static const u8 x1500[] = { /* 0x1500 - 0x150f */
- 0x0b, 0xa7, 0xb7, 0x00, 0x00
-};
-
-static const u8 x1536[] = { /* 0x1536 - 0x153b */
- 0x02, 0x00, 0x60, 0x01, 0x20, 0x01
+ u16 addr;
+ u8 data;
};
static const struct stv_init stv_bridge_init[] = {
/* This reg is written twice. Some kind of reset? */
- {NULL, 0x1620, 0x80},
- {NULL, 0x1620, 0x00},
- {NULL, 0x1443, 0x00},
- {NULL, 0x1423, 0x04},
- {x1500, 0x1500, ARRAY_SIZE(x1500)},
- {x1536, 0x1536, ARRAY_SIZE(x1536)},
+ {STV_RESET, 0x80},
+ {STV_RESET, 0x00},
+ {STV_SCAN_RATE, 0x00},
+ {STV_I2C_FLUSH, 0x04},
+ {STV_REG00, 0x0b},
+ {STV_REG01, 0xa7},
+ {STV_REG02, 0xb7},
+ {STV_REG03, 0x00},
+ {STV_REG04, 0x00},
+ {0x1536, 0x02},
+ {0x1537, 0x00},
+ {0x1538, 0x60},
+ {0x1539, 0x01},
+ {0x153a, 0x20},
+ {0x153b, 0x01},
};
static const u8 vv6410_sensor_init[][2] = {
/* Setup registers */
- {VV6410_SETUP0, VV6410_SOFT_RESET},
- {VV6410_SETUP0, VV6410_LOW_POWER_MODE},
+ {VV6410_SETUP0, VV6410_SOFT_RESET},
+ {VV6410_SETUP0, VV6410_LOW_POWER_MODE},
/* Use shuffled read-out mode */
- {VV6410_SETUP1, BIT(6)},
- /* All modes to 1 */
- {VV6410_FGMODES, BIT(6) | BIT(4) | BIT(2) | BIT(0)},
- {VV6410_PINMAPPING, 0x00},
+ {VV6410_SETUP1, BIT(6)},
+ /* All modes to 1, FST, Fast QCK, Free running QCK, Free running LST, FST will qualify visible pixels */
+ {VV6410_FGMODES, BIT(6) | BIT(4) | BIT(2) | BIT(0)},
+ {VV6410_PINMAPPING, 0x00},
/* Pre-clock generator divide off */
- {VV6410_DATAFORMAT, BIT(7) | BIT(0)},
+ {VV6410_DATAFORMAT, BIT(7) | BIT(0)},
- {VV6410_CLKDIV, VV6410_CLK_DIV_2},
+ {VV6410_CLKDIV, VV6410_CLK_DIV_2},
/* System registers */
/* Enable voltage doubler */
- {VV6410_AS0, BIT(6) | BIT(4) | BIT(3) | BIT(2) | BIT(1)},
- {VV6410_AT0, 0x00},
+ {VV6410_AS0, BIT(6) | BIT(4) | BIT(3) | BIT(2) | BIT(1)},
+ {VV6410_AT0, 0x00},
/* Power up audio, differential */
- {VV6410_AT1, BIT(4)|BIT(0)},
+ {VV6410_AT1, BIT(4) | BIT(0)},
};
#endif
diff --git a/drivers/media/video/gspca/sunplus.c b/drivers/media/video/gspca/sunplus.c
index 6ec23290218..c8909772435 100644
--- a/drivers/media/video/gspca/sunplus.c
+++ b/drivers/media/video/gspca/sunplus.c
@@ -19,6 +19,8 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#define MODULE_NAME "sunplus"
#include "gspca.h"
@@ -325,7 +327,7 @@ static void reg_r(struct gspca_dev *gspca_dev,
#ifdef GSPCA_DEBUG
if (len > USB_BUF_SZ) {
- err("reg_r: buffer overflow");
+ pr_err("reg_r: buffer overflow\n");
return;
}
#endif
@@ -340,7 +342,7 @@ static void reg_r(struct gspca_dev *gspca_dev,
len ? gspca_dev->usb_buf : NULL, len,
500);
if (ret < 0) {
- err("reg_r err %d", ret);
+ pr_err("reg_r err %d\n", ret);
gspca_dev->usb_err = ret;
}
}
@@ -365,7 +367,7 @@ static void reg_w_1(struct gspca_dev *gspca_dev,
gspca_dev->usb_buf, 1,
500);
if (ret < 0) {
- err("reg_w_1 err %d", ret);
+ pr_err("reg_w_1 err %d\n", ret);
gspca_dev->usb_err = ret;
}
}
@@ -385,7 +387,7 @@ static void reg_w_riv(struct gspca_dev *gspca_dev,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
value, index, NULL, 0, 500);
if (ret < 0) {
- err("reg_w_riv err %d", ret);
+ pr_err("reg_w_riv err %d\n", ret);
gspca_dev->usb_err = ret;
return;
}
diff --git a/drivers/media/video/gspca/t613.c b/drivers/media/video/gspca/t613.c
index d1d733b9359..90f0877eb59 100644
--- a/drivers/media/video/gspca/t613.c
+++ b/drivers/media/video/gspca/t613.c
@@ -26,6 +26,8 @@
* Costantino Leandro
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#define MODULE_NAME "t613"
#include <linux/slab.h>
@@ -572,7 +574,7 @@ static void reg_w_buf(struct gspca_dev *gspca_dev,
tmpbuf = kmemdup(buffer, len, GFP_KERNEL);
if (!tmpbuf) {
- err("Out of memory");
+ pr_err("Out of memory\n");
return;
}
usb_control_msg(gspca_dev->dev,
@@ -598,7 +600,7 @@ static void reg_w_ixbuf(struct gspca_dev *gspca_dev,
} else {
p = tmpbuf = kmalloc(len * 2, GFP_KERNEL);
if (!tmpbuf) {
- err("Out of memory");
+ pr_err("Out of memory\n");
return;
}
}
@@ -652,7 +654,7 @@ static void om6802_sensor_init(struct gspca_dev *gspca_dev)
}
byte = reg_r(gspca_dev, 0x0063);
if (byte != 0x17) {
- err("Bad sensor reset %02x", byte);
+ pr_err("Bad sensor reset %02x\n", byte);
/* continue? */
}
@@ -890,7 +892,7 @@ static int sd_init(struct gspca_dev *gspca_dev)
sd->sensor = SENSOR_OM6802;
break;
default:
- err("unknown sensor %04x", sensor_id);
+ pr_err("unknown sensor %04x\n", sensor_id);
return -EINVAL;
}
@@ -905,7 +907,7 @@ static int sd_init(struct gspca_dev *gspca_dev)
break; /* OK */
}
if (i < 0) {
- err("Bad sensor reset %02x", test_byte);
+ pr_err("Bad sensor reset %02x\n", test_byte);
return -EIO;
}
reg_w_buf(gspca_dev, n2, sizeof n2);
diff --git a/drivers/media/video/gspca/topro.c b/drivers/media/video/gspca/topro.c
new file mode 100644
index 00000000000..29596c59837
--- /dev/null
+++ b/drivers/media/video/gspca/topro.c
@@ -0,0 +1,4989 @@
+/*
+ * Topro TP6800/6810 webcam driver.
+ *
+ * Copyright (C) 2011 Jean-François Moine (http://moinejf.free.fr)
+ * Copyright (C) 2009 Anders Blomdell (anders.blomdell@control.lth.se)
+ * Copyright (C) 2008 Thomas Champagne (lafeuil@gmail.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "gspca.h"
+
+MODULE_DESCRIPTION("Topro TP6800/6810 gspca webcam driver");
+MODULE_AUTHOR("Jean-Francois Moine <http://moinejf.free.fr>, "
+ "Anders Blomdell <anders.blomdell@control.lth.se>");
+MODULE_LICENSE("GPL");
+
+static int force_sensor = -1;
+
+/* JPEG header */
+static const u8 jpeg_head[] = {
+ 0xff, 0xd8, /* jpeg */
+
+/* quantization table quality 50% */
+ 0xff, 0xdb, 0x00, 0x84, /* DQT */
+0,
+#define JPEG_QT0_OFFSET 7
+ 0x10, 0x0b, 0x0c, 0x0e, 0x0c, 0x0a, 0x10, 0x0e,
+ 0x0d, 0x0e, 0x12, 0x11, 0x10, 0x13, 0x18, 0x28,
+ 0x1a, 0x18, 0x16, 0x16, 0x18, 0x31, 0x23, 0x25,
+ 0x1d, 0x28, 0x3a, 0x33, 0x3d, 0x3c, 0x39, 0x33,
+ 0x38, 0x37, 0x40, 0x48, 0x5c, 0x4e, 0x40, 0x44,
+ 0x57, 0x45, 0x37, 0x38, 0x50, 0x6d, 0x51, 0x57,
+ 0x5f, 0x62, 0x67, 0x68, 0x67, 0x3e, 0x4d, 0x71,
+ 0x79, 0x70, 0x64, 0x78, 0x5c, 0x65, 0x67, 0x63,
+1,
+#define JPEG_QT1_OFFSET 72
+ 0x11, 0x12, 0x12, 0x18, 0x15, 0x18, 0x2f, 0x1a,
+ 0x1a, 0x2f, 0x63, 0x42, 0x38, 0x42, 0x63, 0x63,
+ 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63,
+ 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63,
+ 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63,
+ 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63,
+ 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63,
+ 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63,
+
+ /* Define Huffman table (thanks to Thomas Kaiser) */
+ 0xff, 0xc4, 0x01, 0x5e,
+ 0x00, 0x00, 0x02, 0x03,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x02,
+ 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x10,
+ 0x00, 0x02, 0x01, 0x02, 0x04, 0x04, 0x03, 0x04,
+ 0x07, 0x05, 0x04, 0x06, 0x01, 0x00, 0x00, 0x57,
+ 0x01, 0x02, 0x03, 0x00, 0x11, 0x04, 0x12, 0x21,
+ 0x31, 0x13, 0x41, 0x51, 0x61, 0x05, 0x22, 0x32,
+ 0x14, 0x71, 0x81, 0x91, 0x15, 0x23, 0x42, 0x52,
+ 0x62, 0xa1, 0xb1, 0x06, 0x33, 0x72, 0xc1, 0xd1,
+ 0x24, 0x43, 0x53, 0x82, 0x16, 0x34, 0x92, 0xa2,
+ 0xe1, 0xf1, 0xf0, 0x07, 0x08, 0x17, 0x18, 0x25,
+ 0x26, 0x27, 0x28, 0x35, 0x36, 0x37, 0x38, 0x44,
+ 0x45, 0x46, 0x47, 0x48, 0x54, 0x55, 0x56, 0x57,
+ 0x58, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x73,
+ 0x74, 0x75, 0x76, 0x77, 0x78, 0x83, 0x84, 0x85,
+ 0x86, 0x87, 0x88, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xb2,
+ 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xc2, 0xc3,
+ 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xd2, 0xd3, 0xd4,
+ 0xd5, 0xd6, 0xd7, 0xd8, 0xe2, 0xe3, 0xe4, 0xe5,
+ 0xe6, 0xe7, 0xe8, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6,
+ 0xf7, 0xf8, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0x02, 0x03, 0x04,
+ 0x05, 0x06, 0x07, 0x08, 0x09, 0x11, 0x00, 0x02,
+ 0x01, 0x02, 0x04, 0x04, 0x03, 0x04, 0x07, 0x05,
+ 0x04, 0x06, 0x01, 0x00, 0x00, 0x57, 0x00, 0x01,
+ 0x11, 0x02, 0x21, 0x03, 0x12, 0x31, 0x41, 0x13,
+ 0x22, 0x51, 0x61, 0x04, 0x32, 0x71, 0x05, 0x14,
+ 0x23, 0x42, 0x33, 0x52, 0x81, 0x91, 0xa1, 0xb1,
+ 0xf0, 0x06, 0x15, 0xc1, 0xd1, 0xe1, 0x24, 0x43,
+ 0x62, 0xf1, 0x16, 0x25, 0x34, 0x53, 0x72, 0x82,
+ 0x92, 0x07, 0x08, 0x17, 0x18, 0x26, 0x27, 0x28,
+ 0x35, 0x36, 0x37, 0x38, 0x44, 0x45, 0x46, 0x47,
+ 0x48, 0x54, 0x55, 0x56, 0x57, 0x58, 0x63, 0x64,
+ 0x65, 0x66, 0x67, 0x68, 0x73, 0x74, 0x75, 0x76,
+ 0x77, 0x78, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88,
+ 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0xa2, 0xa3,
+ 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xb2, 0xb3, 0xb4,
+ 0xb5, 0xb6, 0xb7, 0xb8, 0xc2, 0xc3, 0xc4, 0xc5,
+ 0xc6, 0xc7, 0xc8, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6,
+ 0xd7, 0xd8, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7,
+ 0xe8, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8,
+ 0xff, 0xc0, 0x00, 0x11, /* SOF0 (start of frame 0 */
+ 0x08, /* data precision */
+#define JPEG_HEIGHT_OFFSET 493
+ 0x01, 0xe0, /* height */
+ 0x02, 0x80, /* width */
+ 0x03, /* component number */
+ 0x01,
+ 0x21, /* samples Y = jpeg 422 */
+ 0x00, /* quant Y */
+ 0x02, 0x11, 0x01, /* samples CbCr - quant CbCr */
+ 0x03, 0x11, 0x01,
+
+ 0xff, 0xda, 0x00, 0x0c, /* SOS (start of scan) */
+ 0x03, 0x01, 0x00, 0x02, 0x11, 0x03, 0x11, 0x00, 0x3f, 0x00
+#define JPEG_HDR_SZ 521
+};
+
+enum e_ctrl {
+ EXPOSURE,
+ QUALITY,
+ SHARPNESS,
+ RGAIN,
+ GAIN,
+ BGAIN,
+ GAMMA,
+ AUTOGAIN,
+ NCTRLS /* number of controls */
+};
+
+#define AUTOGAIN_DEF 1
+
+struct sd {
+ struct gspca_dev gspca_dev; /* !! must be the first item */
+
+ struct gspca_ctrl ctrls[NCTRLS];
+
+ u8 framerate;
+ u8 quality; /* webcam current JPEG quality (0..16) */
+ s8 ag_cnt; /* autogain / start counter for tp6810 */
+#define AG_CNT_START 13 /* check gain every N frames */
+
+ u8 bridge;
+ u8 sensor;
+
+ u8 jpeg_hdr[JPEG_HDR_SZ];
+};
+
+enum bridges {
+ BRIDGE_TP6800,
+ BRIDGE_TP6810,
+};
+
+enum sensors {
+ SENSOR_CX0342,
+ SENSOR_SOI763A, /* ~= ov7630 / ov7648 */
+ NSENSORS
+};
+
+static const struct v4l2_pix_format vga_mode[] = {
+ {320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
+ .bytesperline = 320,
+ .sizeimage = 320 * 240 * 4 / 8 + 590,
+ .colorspace = V4L2_COLORSPACE_JPEG},
+ {640, 480, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
+ .bytesperline = 640,
+ .sizeimage = 640 * 480 * 3 / 8 + 590,
+ .colorspace = V4L2_COLORSPACE_JPEG}
+};
+
+/*
+ * JPEG quality
+ * index: webcam compression
+ * value: JPEG quality in %
+ */
+static const u8 jpeg_q[17] = {
+ 88, 77, 67, 57, 55, 55, 45, 45, 36, 36, 30, 30, 26, 26, 22, 22, 94
+};
+
+#define BULK_OUT_SIZE 0x20
+#if BULK_OUT_SIZE > USB_BUF_SZ
+#error "USB buffer too small"
+#endif
+
+static const u8 rates[] = {30, 20, 15, 10, 7, 5};
+static const struct framerates framerates[] = {
+ {
+ .rates = rates,
+ .nrates = ARRAY_SIZE(rates)
+ },
+ {
+ .rates = rates,
+ .nrates = ARRAY_SIZE(rates)
+ }
+};
+static const u8 rates_6810[] = {30, 15, 10, 7, 5};
+static const struct framerates framerates_6810[] = {
+ {
+ .rates = rates_6810,
+ .nrates = ARRAY_SIZE(rates_6810)
+ },
+ {
+ .rates = rates_6810,
+ .nrates = ARRAY_SIZE(rates_6810)
+ }
+};
+
+/*
+ * webcam quality in %
+ * the last value is the ultra fine quality
+ */
+
+/* TP6800 register offsets */
+#define TP6800_R10_SIF_TYPE 0x10
+#define TP6800_R11_SIF_CONTROL 0x11
+#define TP6800_R12_SIF_ADDR_S 0x12
+#define TP6800_R13_SIF_TX_DATA 0x13
+#define TP6800_R14_SIF_RX_DATA 0x14
+#define TP6800_R15_GPIO_PU 0x15
+#define TP6800_R16_GPIO_PD 0x16
+#define TP6800_R17_GPIO_IO 0x17
+#define TP6800_R18_GPIO_DATA 0x18
+#define TP6800_R19_SIF_ADDR_S2 0x19
+#define TP6800_R1A_SIF_TX_DATA2 0x1a
+#define TP6800_R1B_SIF_RX_DATA2 0x1b
+#define TP6800_R21_ENDP_1_CTL 0x21
+#define TP6800_R2F_TIMING_CFG 0x2f
+#define TP6800_R30_SENSOR_CFG 0x30
+#define TP6800_R31_PIXEL_START 0x31
+#define TP6800_R32_PIXEL_END_L 0x32
+#define TP6800_R33_PIXEL_END_H 0x33
+#define TP6800_R34_LINE_START 0x34
+#define TP6800_R35_LINE_END_L 0x35
+#define TP6800_R36_LINE_END_H 0x36
+#define TP6800_R37_FRONT_DARK_ST 0x37
+#define TP6800_R38_FRONT_DARK_END 0x38
+#define TP6800_R39_REAR_DARK_ST_L 0x39
+#define TP6800_R3A_REAR_DARK_ST_H 0x3a
+#define TP6800_R3B_REAR_DARK_END_L 0x3b
+#define TP6800_R3C_REAR_DARK_END_H 0x3c
+#define TP6800_R3D_HORIZ_DARK_LINE_L 0x3d
+#define TP6800_R3E_HORIZ_DARK_LINE_H 0x3e
+#define TP6800_R3F_FRAME_RATE 0x3f
+#define TP6800_R50 0x50
+#define TP6800_R51 0x51
+#define TP6800_R52 0x52
+#define TP6800_R53 0x53
+#define TP6800_R54_DARK_CFG 0x54
+#define TP6800_R55_GAMMA_R 0x55
+#define TP6800_R56_GAMMA_G 0x56
+#define TP6800_R57_GAMMA_B 0x57
+#define TP6800_R5C_EDGE_THRLD 0x5c
+#define TP6800_R5D_DEMOSAIC_CFG 0x5d
+#define TP6800_R78_FORMAT 0x78
+#define TP6800_R79_QUALITY 0x79
+#define TP6800_R7A_BLK_THRLD 0x7a
+
+/* CX0342 register offsets */
+
+#define CX0342_SENSOR_ID 0x00
+#define CX0342_VERSION_NO 0x01
+#define CX0342_ORG_X_L 0x02
+#define CX0342_ORG_X_H 0x03
+#define CX0342_ORG_Y_L 0x04
+#define CX0342_ORG_Y_H 0x05
+#define CX0342_STOP_X_L 0x06
+#define CX0342_STOP_X_H 0x07
+#define CX0342_STOP_Y_L 0x08
+#define CX0342_STOP_Y_H 0x09
+#define CX0342_FRAME_WIDTH_L 0x0a
+#define CX0342_FRAME_WIDTH_H 0x0b
+#define CX0342_FRAME_HEIGH_L 0x0c
+#define CX0342_FRAME_HEIGH_H 0x0d
+#define CX0342_EXPO_LINE_L 0x10
+#define CX0342_EXPO_LINE_H 0x11
+#define CX0342_EXPO_CLK_L 0x12
+#define CX0342_EXPO_CLK_H 0x13
+#define CX0342_RAW_GRGAIN_L 0x14
+#define CX0342_RAW_GRGAIN_H 0x15
+#define CX0342_RAW_GBGAIN_L 0x16
+#define CX0342_RAW_GBGAIN_H 0x17
+#define CX0342_RAW_RGAIN_L 0x18
+#define CX0342_RAW_RGAIN_H 0x19
+#define CX0342_RAW_BGAIN_L 0x1a
+#define CX0342_RAW_BGAIN_H 0x1b
+#define CX0342_GLOBAL_GAIN 0x1c
+#define CX0342_SYS_CTRL_0 0x20
+#define CX0342_SYS_CTRL_1 0x21
+#define CX0342_SYS_CTRL_2 0x22
+#define CX0342_BYPASS_MODE 0x23
+#define CX0342_SYS_CTRL_3 0x24
+#define CX0342_TIMING_EN 0x25
+#define CX0342_OUTPUT_CTRL 0x26
+#define CX0342_AUTO_ADC_CALIB 0x27
+#define CX0342_SYS_CTRL_4 0x28
+#define CX0342_ADCGN 0x30
+#define CX0342_SLPCR 0x31
+#define CX0342_SLPFN_LO 0x32
+#define CX0342_ADC_CTL 0x33
+#define CX0342_LVRST_BLBIAS 0x34
+#define CX0342_VTHSEL 0x35
+#define CX0342_RAMP_RIV 0x36
+#define CX0342_LDOSEL 0x37
+#define CX0342_CLOCK_GEN 0x40
+#define CX0342_SOFT_RESET 0x41
+#define CX0342_PLL 0x42
+#define CX0342_DR_ENH_PULSE_OFFSET_L 0x43
+#define CX0342_DR_ENH_PULSE_OFFSET_H 0x44
+#define CX0342_DR_ENH_PULSE_POS_L 0x45
+#define CX0342_DR_ENH_PULSE_POS_H 0x46
+#define CX0342_DR_ENH_PULSE_WIDTH 0x47
+#define CX0342_AS_CURRENT_CNT_L 0x48
+#define CX0342_AS_CURRENT_CNT_H 0x49
+#define CX0342_AS_PREVIOUS_CNT_L 0x4a
+#define CX0342_AS_PREVIOUS_CNT_H 0x4b
+#define CX0342_SPV_VALUE_L 0x4c
+#define CX0342_SPV_VALUE_H 0x4d
+#define CX0342_GPXLTHD_L 0x50
+#define CX0342_GPXLTHD_H 0x51
+#define CX0342_RBPXLTHD_L 0x52
+#define CX0342_RBPXLTHD_H 0x53
+#define CX0342_PLANETHD_L 0x54
+#define CX0342_PLANETHD_H 0x55
+#define CX0342_ROWDARK_TH 0x56
+#define CX0342_ROWDARK_TOL 0x57
+#define CX0342_RB_GAP_L 0x58
+#define CX0342_RB_GAP_H 0x59
+#define CX0342_G_GAP_L 0x5a
+#define CX0342_G_GAP_H 0x5b
+#define CX0342_AUTO_ROW_DARK 0x60
+#define CX0342_MANUAL_DARK_VALUE 0x61
+#define CX0342_GB_DARK_OFFSET 0x62
+#define CX0342_GR_DARK_OFFSET 0x63
+#define CX0342_RED_DARK_OFFSET 0x64
+#define CX0342_BLUE_DARK_OFFSET 0x65
+#define CX0342_DATA_SCALING_MULTI 0x66
+#define CX0342_AUTOD_Q_FRAME 0x67
+#define CX0342_AUTOD_ALLOW_VARI 0x68
+#define CX0342_AUTO_DARK_VALUE_L 0x69
+#define CX0342_AUTO_DARK_VALUE_H 0x6a
+#define CX0342_IO_CTRL_0 0x70
+#define CX0342_IO_CTRL_1 0x71
+#define CX0342_IO_CTRL_2 0x72
+#define CX0342_IDLE_CTRL 0x73
+#define CX0342_TEST_MODE 0x74
+#define CX0342_FRAME_FIX_DATA_TEST 0x75
+#define CX0342_FRAME_CNT_TEST 0x76
+#define CX0342_RST_OVERFLOW_L 0x80
+#define CX0342_RST_OVERFLOW_H 0x81
+#define CX0342_RST_UNDERFLOW_L 0x82
+#define CX0342_RST_UNDERFLOW_H 0x83
+#define CX0342_DATA_OVERFLOW_L 0x84
+#define CX0342_DATA_OVERFLOW_H 0x85
+#define CX0342_DATA_UNDERFLOW_L 0x86
+#define CX0342_DATA_UNDERFLOW_H 0x87
+#define CX0342_CHANNEL_0_0_L_irst 0x90
+#define CX0342_CHANNEL_0_0_H_irst 0x91
+#define CX0342_CHANNEL_0_1_L_irst 0x92
+#define CX0342_CHANNEL_0_1_H_irst 0x93
+#define CX0342_CHANNEL_0_2_L_irst 0x94
+#define CX0342_CHANNEL_0_2_H_irst 0x95
+#define CX0342_CHANNEL_0_3_L_irst 0x96
+#define CX0342_CHANNEL_0_3_H_irst 0x97
+#define CX0342_CHANNEL_0_4_L_irst 0x98
+#define CX0342_CHANNEL_0_4_H_irst 0x99
+#define CX0342_CHANNEL_0_5_L_irst 0x9a
+#define CX0342_CHANNEL_0_5_H_irst 0x9b
+#define CX0342_CHANNEL_0_6_L_irst 0x9c
+#define CX0342_CHANNEL_0_6_H_irst 0x9d
+#define CX0342_CHANNEL_0_7_L_irst 0x9e
+#define CX0342_CHANNEL_0_7_H_irst 0x9f
+#define CX0342_CHANNEL_1_0_L_itx 0xa0
+#define CX0342_CHANNEL_1_0_H_itx 0xa1
+#define CX0342_CHANNEL_1_1_L_itx 0xa2
+#define CX0342_CHANNEL_1_1_H_itx 0xa3
+#define CX0342_CHANNEL_1_2_L_itx 0xa4
+#define CX0342_CHANNEL_1_2_H_itx 0xa5
+#define CX0342_CHANNEL_1_3_L_itx 0xa6
+#define CX0342_CHANNEL_1_3_H_itx 0xa7
+#define CX0342_CHANNEL_1_4_L_itx 0xa8
+#define CX0342_CHANNEL_1_4_H_itx 0xa9
+#define CX0342_CHANNEL_1_5_L_itx 0xaa
+#define CX0342_CHANNEL_1_5_H_itx 0xab
+#define CX0342_CHANNEL_1_6_L_itx 0xac
+#define CX0342_CHANNEL_1_6_H_itx 0xad
+#define CX0342_CHANNEL_1_7_L_itx 0xae
+#define CX0342_CHANNEL_1_7_H_itx 0xaf
+#define CX0342_CHANNEL_2_0_L_iwl 0xb0
+#define CX0342_CHANNEL_2_0_H_iwl 0xb1
+#define CX0342_CHANNEL_2_1_L_iwl 0xb2
+#define CX0342_CHANNEL_2_1_H_iwl 0xb3
+#define CX0342_CHANNEL_2_2_L_iwl 0xb4
+#define CX0342_CHANNEL_2_2_H_iwl 0xb5
+#define CX0342_CHANNEL_2_3_L_iwl 0xb6
+#define CX0342_CHANNEL_2_3_H_iwl 0xb7
+#define CX0342_CHANNEL_2_4_L_iwl 0xb8
+#define CX0342_CHANNEL_2_4_H_iwl 0xb9
+#define CX0342_CHANNEL_2_5_L_iwl 0xba
+#define CX0342_CHANNEL_2_5_H_iwl 0xbb
+#define CX0342_CHANNEL_2_6_L_iwl 0xbc
+#define CX0342_CHANNEL_2_6_H_iwl 0xbd
+#define CX0342_CHANNEL_2_7_L_iwl 0xbe
+#define CX0342_CHANNEL_2_7_H_iwl 0xbf
+#define CX0342_CHANNEL_3_0_L_ensp 0xc0
+#define CX0342_CHANNEL_3_0_H_ensp 0xc1
+#define CX0342_CHANNEL_3_1_L_ensp 0xc2
+#define CX0342_CHANNEL_3_1_H_ensp 0xc3
+#define CX0342_CHANNEL_3_2_L_ensp 0xc4
+#define CX0342_CHANNEL_3_2_H_ensp 0xc5
+#define CX0342_CHANNEL_3_3_L_ensp 0xc6
+#define CX0342_CHANNEL_3_3_H_ensp 0xc7
+#define CX0342_CHANNEL_3_4_L_ensp 0xc8
+#define CX0342_CHANNEL_3_4_H_ensp 0xc9
+#define CX0342_CHANNEL_3_5_L_ensp 0xca
+#define CX0342_CHANNEL_3_5_H_ensp 0xcb
+#define CX0342_CHANNEL_3_6_L_ensp 0xcc
+#define CX0342_CHANNEL_3_6_H_ensp 0xcd
+#define CX0342_CHANNEL_3_7_L_ensp 0xce
+#define CX0342_CHANNEL_3_7_H_ensp 0xcf
+#define CX0342_CHANNEL_4_0_L_sela 0xd0
+#define CX0342_CHANNEL_4_0_H_sela 0xd1
+#define CX0342_CHANNEL_4_1_L_sela 0xd2
+#define CX0342_CHANNEL_4_1_H_sela 0xd3
+#define CX0342_CHANNEL_5_0_L_intla 0xe0
+#define CX0342_CHANNEL_5_0_H_intla 0xe1
+#define CX0342_CHANNEL_5_1_L_intla 0xe2
+#define CX0342_CHANNEL_5_1_H_intla 0xe3
+#define CX0342_CHANNEL_5_2_L_intla 0xe4
+#define CX0342_CHANNEL_5_2_H_intla 0xe5
+#define CX0342_CHANNEL_5_3_L_intla 0xe6
+#define CX0342_CHANNEL_5_3_H_intla 0xe7
+#define CX0342_CHANNEL_6_0_L_xa_sel_pos 0xf0
+#define CX0342_CHANNEL_6_0_H_xa_sel_pos 0xf1
+#define CX0342_CHANNEL_7_1_L_cds_pos 0xf2
+#define CX0342_CHANNEL_7_1_H_cds_pos 0xf3
+#define CX0342_SENSOR_HEIGHT_L 0xfb
+#define CX0342_SENSOR_HEIGHT_H 0xfc
+#define CX0342_SENSOR_WIDTH_L 0xfd
+#define CX0342_SENSOR_WIDTH_H 0xfe
+#define CX0342_VSYNC_HSYNC_READ 0xff
+
+struct cmd {
+ u8 reg;
+ u8 val;
+};
+
+static const u8 DQT[17][130] = {
+ /* Define quantization table (thanks to Thomas Kaiser) */
+ { /* Quality 0 */
+ 0x00,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x01,
+ 0x04, 0x04, 0x04, 0x06, 0x05, 0x06, 0x0b, 0x06,
+ 0x06, 0x0b, 0x18, 0x10, 0x0e, 0x10, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ },
+ { /* Quality 1 */
+ 0x00,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x09, 0x09, 0x09, 0x09, 0x09,
+ 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09,
+ 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09,
+ 0x01,
+ 0x08, 0x09, 0x09, 0x0c, 0x0a, 0x0c, 0x17, 0x0d,
+ 0x0d, 0x17, 0x31, 0x21, 0x1c, 0x21, 0x31, 0x31,
+ 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31,
+ 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31,
+ 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31,
+ 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31,
+ 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31,
+ 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31,
+ },
+ { /* Quality 2 */
+ 0x00,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x06, 0x06, 0x06, 0x04, 0x04, 0x04,
+ 0x04, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06,
+ 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06,
+ 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06,
+ 0x06, 0x06, 0x06, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e,
+ 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e,
+ 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e,
+ 0x01,
+ 0x0c, 0x0d, 0x0d, 0x12, 0x0f, 0x12, 0x23, 0x13,
+ 0x13, 0x23, 0x4a, 0x31, 0x2a, 0x31, 0x4a, 0x4a,
+ 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a,
+ 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a,
+ 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a,
+ 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a,
+ 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a,
+ 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a,
+ },
+ { /* Quality 3 */
+ 0x00,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x08, 0x08, 0x08, 0x04, 0x04, 0x04,
+ 0x04, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
+ 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
+ 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
+ 0x08, 0x08, 0x08, 0x13, 0x13, 0x13, 0x13, 0x13,
+ 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
+ 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
+ 0x01,
+ 0x11, 0x12, 0x12, 0x18, 0x15, 0x18, 0x2f, 0x1a,
+ 0x1a, 0x2f, 0x63, 0x42, 0x38, 0x42, 0x63, 0x63,
+ 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63,
+ 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63,
+ 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63,
+ 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63,
+ 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63,
+ 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63,
+ },
+ { /* Quality 4 */
+ 0x00,
+ 0x04, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05,
+ 0x05, 0x05, 0x0a, 0x0a, 0x0a, 0x05, 0x05, 0x05,
+ 0x05, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a,
+ 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a,
+ 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a,
+ 0x0a, 0x0a, 0x0a, 0x17, 0x17, 0x17, 0x17, 0x17,
+ 0x17, 0x17, 0x17, 0x17, 0x17, 0x17, 0x17, 0x17,
+ 0x17, 0x17, 0x17, 0x17, 0x17, 0x17, 0x17, 0x17,
+ 0x01,
+ 0x11, 0x16, 0x16, 0x1e, 0x1a, 0x1e, 0x3a, 0x20,
+ 0x20, 0x3a, 0x7b, 0x52, 0x46, 0x52, 0x7b, 0x7b,
+ 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b,
+ 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b,
+ 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b,
+ 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b,
+ 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b,
+ 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b,
+ },
+ { /* Quality 5 */
+ 0x00,
+ 0x04, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06,
+ 0x06, 0x06, 0x0c, 0x0c, 0x0c, 0x06, 0x06, 0x06,
+ 0x06, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c,
+ 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c,
+ 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c,
+ 0x0c, 0x0c, 0x0c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c,
+ 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c,
+ 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c,
+ 0x01,
+ 0x11, 0x1b, 0x1b, 0x24, 0x1f, 0x24, 0x46, 0x27,
+ 0x27, 0x46, 0x94, 0x63, 0x54, 0x63, 0x94, 0x94,
+ 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94,
+ 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94,
+ 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94,
+ 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94,
+ 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94,
+ 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94,
+ },
+ { /* Quality 6 */
+ 0x00,
+ 0x05, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
+ 0x07, 0x07, 0x0e, 0x0e, 0x0e, 0x07, 0x07, 0x07,
+ 0x07, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e,
+ 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e,
+ 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e,
+ 0x0e, 0x0e, 0x0e, 0x21, 0x21, 0x21, 0x21, 0x21,
+ 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21,
+ 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21,
+ 0x01,
+ 0x15, 0x1f, 0x1f, 0x2a, 0x24, 0x2a, 0x52, 0x2d,
+ 0x2d, 0x52, 0xad, 0x73, 0x62, 0x73, 0xad, 0xad,
+ 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad,
+ 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad,
+ 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad,
+ 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad,
+ 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad,
+ 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad,
+ },
+ { /* Quality 7 */
+ 0x00,
+ 0x05, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
+ 0x08, 0x08, 0x10, 0x10, 0x10, 0x08, 0x08, 0x08,
+ 0x08, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x10, 0x10, 0x26, 0x26, 0x26, 0x26, 0x26,
+ 0x26, 0x26, 0x26, 0x26, 0x26, 0x26, 0x26, 0x26,
+ 0x26, 0x26, 0x26, 0x26, 0x26, 0x26, 0x26, 0x26,
+ 0x01,
+ 0x15, 0x24, 0x24, 0x30, 0x2a, 0x30, 0x5e, 0x34,
+ 0x34, 0x5e, 0xc6, 0x84, 0x70, 0x84, 0xc6, 0xc6,
+ 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6,
+ 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6,
+ 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6,
+ 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6,
+ 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6,
+ 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6,
+ },
+ { /* Quality 8 */
+ 0x00,
+ 0x06, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a,
+ 0x0a, 0x0a, 0x14, 0x14, 0x14, 0x0a, 0x0a, 0x0a,
+ 0x0a, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
+ 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
+ 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
+ 0x14, 0x14, 0x14, 0x2f, 0x2f, 0x2f, 0x2f, 0x2f,
+ 0x2f, 0x2f, 0x2f, 0x2f, 0x2f, 0x2f, 0x2f, 0x2f,
+ 0x2f, 0x2f, 0x2f, 0x2f, 0x2f, 0x2f, 0x2f, 0x2f,
+ 0x01,
+ 0x19, 0x2d, 0x2d, 0x3c, 0x34, 0x3c, 0x75, 0x41,
+ 0x41, 0x75, 0xf7, 0xa5, 0x8c, 0xa5, 0xf7, 0xf7,
+ 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7,
+ 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7,
+ 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7,
+ 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7,
+ 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7,
+ 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7,
+ },
+ { /* Quality 9 */
+ 0x00,
+ 0x06, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c,
+ 0x0c, 0x0c, 0x18, 0x18, 0x18, 0x0c, 0x0c, 0x0c,
+ 0x0c, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x39, 0x39, 0x39, 0x39, 0x39,
+ 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39,
+ 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39,
+ 0x01,
+ 0x19, 0x36, 0x36, 0x48, 0x3f, 0x48, 0x8d, 0x4e,
+ 0x4e, 0x8d, 0xff, 0xc6, 0xa8, 0xc6, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ },
+ { /* Quality 10 */
+ 0x00,
+ 0x07, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e,
+ 0x0e, 0x0e, 0x1c, 0x1c, 0x1c, 0x0e, 0x0e, 0x0e,
+ 0x0e, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c,
+ 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c,
+ 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c,
+ 0x1c, 0x1c, 0x1c, 0x42, 0x42, 0x42, 0x42, 0x42,
+ 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42,
+ 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42,
+ 0x01,
+ 0x1d, 0x3f, 0x3f, 0x54, 0x49, 0x54, 0xa4, 0x5b,
+ 0x5b, 0xa4, 0xff, 0xe7, 0xc4, 0xe7, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ },
+ { /* Quality 11 */
+ 0x00,
+ 0x07, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x10, 0x20, 0x20, 0x20, 0x10, 0x10, 0x10,
+ 0x10, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x4c, 0x4c, 0x4c, 0x4c, 0x4c,
+ 0x4c, 0x4c, 0x4c, 0x4c, 0x4c, 0x4c, 0x4c, 0x4c,
+ 0x4c, 0x4c, 0x4c, 0x4c, 0x4c, 0x4c, 0x4c, 0x4c,
+ 0x01,
+ 0x1d, 0x48, 0x48, 0x60, 0x54, 0x60, 0xbc, 0x68,
+ 0x68, 0xbc, 0xff, 0xff, 0xe0, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ },
+ { /* Quality 12 */
+ 0x00,
+ 0x08, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
+ 0x14, 0x14, 0x28, 0x28, 0x28, 0x14, 0x14, 0x14,
+ 0x14, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28,
+ 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28,
+ 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28,
+ 0x28, 0x28, 0x28, 0x5f, 0x5f, 0x5f, 0x5f, 0x5f,
+ 0x5f, 0x5f, 0x5f, 0x5f, 0x5f, 0x5f, 0x5f, 0x5f,
+ 0x5f, 0x5f, 0x5f, 0x5f, 0x5f, 0x5f, 0x5f, 0x5f,
+ 0x01,
+ 0x22, 0x5a, 0x5a, 0x78, 0x69, 0x78, 0xeb, 0x82,
+ 0x82, 0xeb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ },
+ { /* Quality 13 */
+ 0x00,
+ 0x08, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x18, 0x18, 0x30, 0x30, 0x30, 0x18, 0x18, 0x18,
+ 0x18, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x72, 0x72, 0x72, 0x72, 0x72,
+ 0x72, 0x72, 0x72, 0x72, 0x72, 0x72, 0x72, 0x72,
+ 0x72, 0x72, 0x72, 0x72, 0x72, 0x72, 0x72, 0x72,
+ 0x01,
+ 0x22, 0x6c, 0x6c, 0x90, 0x7e, 0x90, 0xff, 0x9c,
+ 0x9c, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ },
+ { /* Quality 14 */
+ 0x00,
+ 0x0a, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c,
+ 0x1c, 0x1c, 0x38, 0x38, 0x38, 0x1c, 0x1c, 0x1c,
+ 0x1c, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38,
+ 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38,
+ 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38,
+ 0x38, 0x38, 0x38, 0x85, 0x85, 0x85, 0x85, 0x85,
+ 0x85, 0x85, 0x85, 0x85, 0x85, 0x85, 0x85, 0x85,
+ 0x85, 0x85, 0x85, 0x85, 0x85, 0x85, 0x85, 0x85,
+ 0x01,
+ 0x2a, 0x7e, 0x7e, 0xa8, 0x93, 0xa8, 0xff, 0xb6,
+ 0xb6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ },
+ { /* Quality 15 */
+ 0x00,
+ 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x40, 0x40, 0x40, 0x20, 0x20, 0x20,
+ 0x20, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
+ 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
+ 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
+ 0x40, 0x40, 0x40, 0x98, 0x98, 0x98, 0x98, 0x98,
+ 0x98, 0x98, 0x98, 0x98, 0x98, 0x98, 0x98, 0x98,
+ 0x98, 0x98, 0x98, 0x98, 0x98, 0x98, 0x98, 0x98,
+ 0x01,
+ 0x2a, 0x90, 0x90, 0xc0, 0xa8, 0xc0, 0xff, 0xd0,
+ 0xd0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ },
+ { /* Quality 16-31 */
+ 0x00,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x01,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ }
+};
+
+static const struct cmd tp6810_cx_init_common[] = {
+ {0x1c, 0x00},
+ {TP6800_R10_SIF_TYPE, 0x00},
+ {0x4e, 0x00},
+ {0x4f, 0x00},
+ {TP6800_R50, 0xff},
+ {TP6800_R51, 0x03},
+ {0x00, 0x07},
+ {TP6800_R79_QUALITY, 0x03},
+ {TP6800_R2F_TIMING_CFG, 0x37},
+ {TP6800_R30_SENSOR_CFG, 0x10},
+ {TP6800_R21_ENDP_1_CTL, 0x00},
+ {TP6800_R52, 0x40},
+ {TP6800_R53, 0x40},
+ {TP6800_R54_DARK_CFG, 0x40},
+ {TP6800_R30_SENSOR_CFG, 0x18},
+ {0x4b, 0x00},
+ {TP6800_R3F_FRAME_RATE, 0x83},
+ {TP6800_R79_QUALITY, 0x05},
+ {TP6800_R21_ENDP_1_CTL, 0x00},
+ {0x7c, 0x04},
+ {0x25, 0x14},
+ {0x26, 0x0f},
+ {0x7b, 0x10},
+};
+
+static const struct cmd tp6810_ov_init_common[] = {
+ {0x1c, 0x00},
+ {TP6800_R10_SIF_TYPE, 0x00},
+ {0x4e, 0x00},
+ {0x4f, 0x00},
+ {TP6800_R50, 0xff},
+ {TP6800_R51, 0x03},
+ {0x00, 0x07},
+ {TP6800_R52, 0x40},
+ {TP6800_R53, 0x40},
+ {TP6800_R54_DARK_CFG, 0x40},
+ {TP6800_R79_QUALITY, 0x03},
+ {TP6800_R2F_TIMING_CFG, 0x17},
+ {TP6800_R30_SENSOR_CFG, 0x18},
+ {TP6800_R21_ENDP_1_CTL, 0x00},
+ {TP6800_R3F_FRAME_RATE, 0x86},
+ {0x25, 0x18},
+ {0x26, 0x0f},
+ {0x7b, 0x90},
+};
+
+static const struct cmd tp6810_bridge_start[] = {
+ {0x59, 0x88},
+ {0x5a, 0x0f},
+ {0x5b, 0x4e},
+ {TP6800_R5C_EDGE_THRLD, 0x63},
+ {TP6800_R5D_DEMOSAIC_CFG, 0x00},
+ {0x03, 0x7f},
+ {0x04, 0x80},
+ {0x06, 0x00},
+ {0x00, 0x00},
+};
+
+static const struct cmd tp6810_late_start[] = {
+ {0x7d, 0x01},
+ {0xb0, 0x04},
+ {0xb1, 0x04},
+ {0xb2, 0x04},
+ {0xb3, 0x04},
+ {0xb4, 0x04},
+ {0xb5, 0x04},
+ {0xb6, 0x08},
+ {0xb7, 0x08},
+ {0xb8, 0x04},
+ {0xb9, 0x04},
+ {0xba, 0x04},
+ {0xbb, 0x04},
+ {0xbc, 0x04},
+ {0xbd, 0x08},
+ {0xbe, 0x08},
+ {0xbf, 0x08},
+ {0xc0, 0x04},
+ {0xc1, 0x04},
+ {0xc2, 0x08},
+ {0xc3, 0x08},
+ {0xc4, 0x08},
+ {0xc5, 0x08},
+ {0xc6, 0x08},
+ {0xc7, 0x13},
+ {0xc8, 0x04},
+ {0xc9, 0x08},
+ {0xca, 0x08},
+ {0xcb, 0x08},
+ {0xcc, 0x08},
+ {0xcd, 0x08},
+ {0xce, 0x13},
+ {0xcf, 0x13},
+ {0xd0, 0x08},
+ {0xd1, 0x08},
+ {0xd2, 0x08},
+ {0xd3, 0x08},
+ {0xd4, 0x08},
+ {0xd5, 0x13},
+ {0xd6, 0x13},
+ {0xd7, 0x13},
+ {0xd8, 0x08},
+ {0xd9, 0x08},
+ {0xda, 0x08},
+ {0xdb, 0x08},
+ {0xdc, 0x13},
+ {0xdd, 0x13},
+ {0xde, 0x13},
+ {0xdf, 0x13},
+ {0xe0, 0x08},
+ {0xe1, 0x08},
+ {0xe2, 0x08},
+ {0xe3, 0x13},
+ {0xe4, 0x13},
+ {0xe5, 0x13},
+ {0xe6, 0x13},
+ {0xe7, 0x13},
+ {0xe8, 0x08},
+ {0xe9, 0x08},
+ {0xea, 0x13},
+ {0xeb, 0x13},
+ {0xec, 0x13},
+ {0xed, 0x13},
+ {0xee, 0x13},
+ {0xef, 0x13},
+ {0x7d, 0x02},
+
+ /* later after isoc start */
+ {0x7d, 0x08},
+ {0x7d, 0x00},
+};
+
+static const struct cmd cx0342_timing_seq[] = {
+ {CX0342_CHANNEL_0_1_L_irst, 0x20},
+ {CX0342_CHANNEL_0_2_L_irst, 0x24},
+ {CX0342_CHANNEL_0_2_H_irst, 0x00},
+ {CX0342_CHANNEL_0_3_L_irst, 0x2f},
+ {CX0342_CHANNEL_0_3_H_irst, 0x00},
+ {CX0342_CHANNEL_1_0_L_itx, 0x02},
+ {CX0342_CHANNEL_1_0_H_itx, 0x00},
+ {CX0342_CHANNEL_1_1_L_itx, 0x20},
+ {CX0342_CHANNEL_1_1_H_itx, 0x00},
+ {CX0342_CHANNEL_1_2_L_itx, 0xe4},
+ {CX0342_CHANNEL_1_2_H_itx, 0x00},
+ {CX0342_CHANNEL_1_3_L_itx, 0xee},
+ {CX0342_CHANNEL_1_3_H_itx, 0x00},
+ {CX0342_CHANNEL_2_0_L_iwl, 0x30},
+ {CX0342_CHANNEL_2_0_H_iwl, 0x00},
+ {CX0342_CHANNEL_3_0_L_ensp, 0x34},
+ {CX0342_CHANNEL_3_1_L_ensp, 0xe2},
+ {CX0342_CHANNEL_3_1_H_ensp, 0x00},
+ {CX0342_CHANNEL_3_2_L_ensp, 0xf6},
+ {CX0342_CHANNEL_3_2_H_ensp, 0x00},
+ {CX0342_CHANNEL_3_3_L_ensp, 0xf4},
+ {CX0342_CHANNEL_3_3_H_ensp, 0x02},
+ {CX0342_CHANNEL_4_0_L_sela, 0x26},
+ {CX0342_CHANNEL_4_0_H_sela, 0x00},
+ {CX0342_CHANNEL_4_1_L_sela, 0xe2},
+ {CX0342_CHANNEL_4_1_H_sela, 0x00},
+ {CX0342_CHANNEL_5_0_L_intla, 0x26},
+ {CX0342_CHANNEL_5_1_L_intla, 0x29},
+ {CX0342_CHANNEL_5_2_L_intla, 0xf0},
+ {CX0342_CHANNEL_5_2_H_intla, 0x00},
+ {CX0342_CHANNEL_5_3_L_intla, 0xf3},
+ {CX0342_CHANNEL_5_3_H_intla, 0x00},
+ {CX0342_CHANNEL_6_0_L_xa_sel_pos, 0x24},
+ {CX0342_CHANNEL_7_1_L_cds_pos, 0x02},
+ {CX0342_TIMING_EN, 0x01},
+};
+
+/* define the JPEG header */
+static void jpeg_define(u8 *jpeg_hdr,
+ int height,
+ int width)
+{
+ memcpy(jpeg_hdr, jpeg_head, sizeof jpeg_head);
+ jpeg_hdr[JPEG_HEIGHT_OFFSET + 0] = height >> 8;
+ jpeg_hdr[JPEG_HEIGHT_OFFSET + 1] = height;
+ jpeg_hdr[JPEG_HEIGHT_OFFSET + 2] = width >> 8;
+ jpeg_hdr[JPEG_HEIGHT_OFFSET + 3] = width;
+}
+
+/* set the JPEG quality for sensor soi763a */
+static void jpeg_set_qual(u8 *jpeg_hdr,
+ int quality)
+{
+ int i, sc;
+
+ if (quality < 50)
+ sc = 5000 / quality;
+ else
+ sc = 200 - quality * 2;
+ for (i = 0; i < 64; i++) {
+ jpeg_hdr[JPEG_QT0_OFFSET + i] =
+ (jpeg_head[JPEG_QT0_OFFSET + i] * sc + 50) / 100;
+ jpeg_hdr[JPEG_QT1_OFFSET + i] =
+ (jpeg_head[JPEG_QT1_OFFSET + i] * sc + 50) / 100;
+ }
+}
+
+static void reg_w(struct gspca_dev *gspca_dev, u8 index, u8 value)
+{
+ struct usb_device *dev = gspca_dev->dev;
+ int ret;
+
+ if (gspca_dev->usb_err < 0)
+ return;
+ ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
+ 0x0e,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ value, index, NULL, 0, 500);
+ if (ret < 0) {
+ pr_err("reg_w err %d\n", ret);
+ gspca_dev->usb_err = ret;
+ }
+}
+
+/* the returned value is in gspca_dev->usb_buf */
+static void reg_r(struct gspca_dev *gspca_dev, u8 index)
+{
+ struct usb_device *dev = gspca_dev->dev;
+ int ret;
+
+ if (gspca_dev->usb_err < 0)
+ return;
+ ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
+ 0x0d,
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ 0, index, gspca_dev->usb_buf, 1, 500);
+ if (ret < 0) {
+ pr_err("reg_r err %d\n", ret);
+ gspca_dev->usb_err = ret;
+ }
+}
+
+static void reg_w_buf(struct gspca_dev *gspca_dev,
+ const struct cmd *p, int l)
+{
+ do {
+ reg_w(gspca_dev, p->reg, p->val);
+ p++;
+ } while (--l > 0);
+}
+
+static int i2c_w(struct gspca_dev *gspca_dev, u8 index, u8 value)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ reg_w(gspca_dev, TP6800_R11_SIF_CONTROL, 0x00);
+ reg_w(gspca_dev, TP6800_R19_SIF_ADDR_S2, index);
+ reg_w(gspca_dev, TP6800_R13_SIF_TX_DATA, value);
+ reg_w(gspca_dev, TP6800_R11_SIF_CONTROL, 0x01);
+ if (sd->bridge == BRIDGE_TP6800)
+ return 0;
+ msleep(5);
+ reg_r(gspca_dev, TP6800_R11_SIF_CONTROL);
+ if (gspca_dev->usb_buf[0] == 0)
+ return 0;
+ reg_w(gspca_dev, TP6800_R11_SIF_CONTROL, 0x00);
+ return -1; /* error */
+}
+
+static void i2c_w_buf(struct gspca_dev *gspca_dev,
+ const struct cmd *p, int l)
+{
+ do {
+ i2c_w(gspca_dev, p->reg, p->val);
+ p++;
+ } while (--l > 0);
+}
+
+static int i2c_r(struct gspca_dev *gspca_dev, u8 index, int len)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ int v;
+
+ reg_w(gspca_dev, TP6800_R19_SIF_ADDR_S2, index);
+ reg_w(gspca_dev, TP6800_R11_SIF_CONTROL, 0x02);
+ msleep(5);
+ reg_r(gspca_dev, TP6800_R14_SIF_RX_DATA);
+ v = gspca_dev->usb_buf[0];
+ if (sd->bridge == BRIDGE_TP6800)
+ return v;
+ if (len > 1) {
+ reg_r(gspca_dev, TP6800_R1B_SIF_RX_DATA2);
+ v |= (gspca_dev->usb_buf[0] << 8);
+ }
+ reg_r(gspca_dev, TP6800_R11_SIF_CONTROL);
+ if (gspca_dev->usb_buf[0] == 0)
+ return v;
+ reg_w(gspca_dev, TP6800_R11_SIF_CONTROL, 0x00);
+ return -1;
+}
+
+static void bulk_w(struct gspca_dev *gspca_dev,
+ u8 tag,
+ const u8 *data,
+ int length)
+{
+ struct usb_device *dev = gspca_dev->dev;
+ int count, actual_count, ret;
+
+ if (gspca_dev->usb_err < 0)
+ return;
+ for (;;) {
+ count = length > BULK_OUT_SIZE - 1
+ ? BULK_OUT_SIZE - 1 : length;
+ gspca_dev->usb_buf[0] = tag;
+ memcpy(&gspca_dev->usb_buf[1], data, count);
+ ret = usb_bulk_msg(dev,
+ usb_sndbulkpipe(dev, 3),
+ gspca_dev->usb_buf, count + 1,
+ &actual_count, 500);
+ if (ret < 0) {
+ pr_err("bulk write error %d tag=%02x\n",
+ ret, tag);
+ gspca_dev->usb_err = ret;
+ return;
+ }
+ length -= count;
+ if (length <= 0)
+ break;
+ data += count;
+ }
+}
+
+static int probe_6810(struct gspca_dev *gspca_dev)
+{
+ u8 gpio;
+ int ret;
+
+ reg_r(gspca_dev, TP6800_R18_GPIO_DATA);
+ gpio = gspca_dev->usb_buf[0];
+ reg_w(gspca_dev, TP6800_R18_GPIO_DATA, gpio);
+ reg_w(gspca_dev, TP6800_R18_GPIO_DATA, gpio | 0x20);
+ reg_w(gspca_dev, TP6800_R18_GPIO_DATA, gpio);
+ reg_w(gspca_dev, TP6800_R10_SIF_TYPE, 0x04); /* i2c 16 bits */
+ reg_w(gspca_dev, TP6800_R12_SIF_ADDR_S, 0x21); /* ov??? */
+ reg_w(gspca_dev, TP6800_R1A_SIF_TX_DATA2, 0x00);
+ if (i2c_w(gspca_dev, 0x00, 0x00) >= 0)
+ return SENSOR_SOI763A;
+
+ reg_w(gspca_dev, TP6800_R18_GPIO_DATA, gpio | 0x20);
+ reg_w(gspca_dev, TP6800_R10_SIF_TYPE, 0x00); /* i2c 8 bits */
+ reg_w(gspca_dev, TP6800_R12_SIF_ADDR_S, 0x7f); /* (unknown i2c) */
+ if (i2c_w(gspca_dev, 0x00, 0x00) >= 0)
+ return -2;
+
+ reg_w(gspca_dev, TP6800_R18_GPIO_DATA, gpio | 0x20);
+ reg_w(gspca_dev, TP6800_R18_GPIO_DATA, gpio);
+ reg_w(gspca_dev, TP6800_R18_GPIO_DATA, gpio | 0x20);
+ reg_w(gspca_dev, TP6800_R10_SIF_TYPE, 0x00); /* i2c 8 bits */
+ reg_w(gspca_dev, TP6800_R12_SIF_ADDR_S, 0x11); /* tas??? / hv??? */
+ ret = i2c_r(gspca_dev, 0x00, 1);
+ if (ret > 0)
+ return -3;
+
+ reg_w(gspca_dev, TP6800_R18_GPIO_DATA, gpio | 0x20);
+ reg_w(gspca_dev, TP6800_R18_GPIO_DATA, gpio);
+ reg_w(gspca_dev, TP6800_R18_GPIO_DATA, gpio | 0x20);
+ reg_w(gspca_dev, TP6800_R12_SIF_ADDR_S, 0x6e); /* po??? */
+ ret = i2c_r(gspca_dev, 0x00, 1);
+ if (ret > 0)
+ return -4;
+
+ ret = i2c_r(gspca_dev, 0x01, 1);
+ if (ret > 0)
+ return -5;
+
+ reg_w(gspca_dev, TP6800_R18_GPIO_DATA, gpio | 0x20);
+ reg_w(gspca_dev, TP6800_R18_GPIO_DATA, gpio);
+ reg_w(gspca_dev, TP6800_R18_GPIO_DATA, gpio | 0x20);
+ reg_w(gspca_dev, TP6800_R10_SIF_TYPE, 0x04); /* i2c 16 bits */
+ reg_w(gspca_dev, TP6800_R12_SIF_ADDR_S, 0x5d); /* mi/mt??? */
+ ret = i2c_r(gspca_dev, 0x00, 2);
+ if (ret > 0)
+ return -6;
+
+ reg_w(gspca_dev, TP6800_R18_GPIO_DATA, gpio | 0x20);
+ reg_w(gspca_dev, TP6800_R18_GPIO_DATA, gpio);
+ reg_w(gspca_dev, TP6800_R18_GPIO_DATA, gpio | 0x20);
+ reg_w(gspca_dev, TP6800_R12_SIF_ADDR_S, 0x5c); /* mi/mt??? */
+ ret = i2c_r(gspca_dev, 0x36, 2);
+ if (ret > 0)
+ return -7;
+
+ reg_w(gspca_dev, TP6800_R18_GPIO_DATA, gpio);
+ reg_w(gspca_dev, TP6800_R18_GPIO_DATA, gpio | 0x20);
+ reg_w(gspca_dev, TP6800_R18_GPIO_DATA, gpio);
+ reg_w(gspca_dev, TP6800_R12_SIF_ADDR_S, 0x61); /* (unknown i2c) */
+ reg_w(gspca_dev, TP6800_R1A_SIF_TX_DATA2, 0x10);
+ if (i2c_w(gspca_dev, 0xff, 0x00) >= 0)
+ return -8;
+
+ reg_w(gspca_dev, TP6800_R18_GPIO_DATA, gpio | 0x20);
+ reg_w(gspca_dev, TP6800_R18_GPIO_DATA, gpio);
+ reg_w(gspca_dev, TP6800_R18_GPIO_DATA, gpio | 0x20);
+ reg_w(gspca_dev, TP6800_R10_SIF_TYPE, 0x00); /* i2c 8 bits */
+ reg_w(gspca_dev, TP6800_R12_SIF_ADDR_S, 0x20); /* cx0342 */
+ ret = i2c_r(gspca_dev, 0x00, 1);
+ if (ret > 0)
+ return SENSOR_CX0342;
+ return -9;
+}
+
+static void cx0342_6810_init(struct gspca_dev *gspca_dev)
+{
+ static const struct cmd reg_init_1[] = {
+ {TP6800_R2F_TIMING_CFG, 0x2f},
+ {0x25, 0x02},
+ {TP6800_R21_ENDP_1_CTL, 0x00},
+ {TP6800_R3F_FRAME_RATE, 0x80},
+ {TP6800_R2F_TIMING_CFG, 0x2f},
+ {TP6800_R18_GPIO_DATA, 0xe1},
+ {TP6800_R18_GPIO_DATA, 0xc1},
+ {TP6800_R18_GPIO_DATA, 0xe1},
+ {TP6800_R11_SIF_CONTROL, 0x00},
+ };
+ static const struct cmd reg_init_2[] = {
+ {TP6800_R78_FORMAT, 0x48},
+ {TP6800_R11_SIF_CONTROL, 0x00},
+ };
+ static const struct cmd sensor_init[] = {
+ {CX0342_OUTPUT_CTRL, 0x07},
+ {CX0342_BYPASS_MODE, 0x58},
+ {CX0342_GPXLTHD_L, 0x28},
+ {CX0342_RBPXLTHD_L, 0x28},
+ {CX0342_PLANETHD_L, 0x50},
+ {CX0342_PLANETHD_H, 0x03},
+ {CX0342_RB_GAP_L, 0xff},
+ {CX0342_RB_GAP_H, 0x07},
+ {CX0342_G_GAP_L, 0xff},
+ {CX0342_G_GAP_H, 0x07},
+ {CX0342_RST_OVERFLOW_L, 0x5c},
+ {CX0342_RST_OVERFLOW_H, 0x01},
+ {CX0342_DATA_OVERFLOW_L, 0xfc},
+ {CX0342_DATA_OVERFLOW_H, 0x03},
+ {CX0342_DATA_UNDERFLOW_L, 0x00},
+ {CX0342_DATA_UNDERFLOW_H, 0x00},
+ {CX0342_SYS_CTRL_0, 0x40},
+ {CX0342_GLOBAL_GAIN, 0x01},
+ {CX0342_CLOCK_GEN, 0x00},
+ {CX0342_SYS_CTRL_0, 0x02},
+ {CX0342_IDLE_CTRL, 0x05},
+ {CX0342_ADCGN, 0x00},
+ {CX0342_ADC_CTL, 0x00},
+ {CX0342_LVRST_BLBIAS, 0x01},
+ {CX0342_VTHSEL, 0x0b},
+ {CX0342_RAMP_RIV, 0x0b},
+ {CX0342_LDOSEL, 0x07},
+ {CX0342_SPV_VALUE_L, 0x40},
+ {CX0342_SPV_VALUE_H, 0x02},
+
+ {CX0342_AUTO_ADC_CALIB, 0x81},
+ {CX0342_TIMING_EN, 0x01},
+ };
+
+ reg_w_buf(gspca_dev, reg_init_1, ARRAY_SIZE(reg_init_1));
+ reg_w_buf(gspca_dev, tp6810_cx_init_common,
+ ARRAY_SIZE(tp6810_cx_init_common));
+ reg_w_buf(gspca_dev, reg_init_2, ARRAY_SIZE(reg_init_2));
+
+ reg_w(gspca_dev, TP6800_R12_SIF_ADDR_S, 0x20); /* cx0342 I2C addr */
+ i2c_w_buf(gspca_dev, sensor_init, ARRAY_SIZE(sensor_init));
+ i2c_w_buf(gspca_dev, cx0342_timing_seq, ARRAY_SIZE(cx0342_timing_seq));
+}
+
+static void soi763a_6810_init(struct gspca_dev *gspca_dev)
+{
+ static const struct cmd reg_init_1[] = {
+ {TP6800_R2F_TIMING_CFG, 0x2f},
+ {TP6800_R18_GPIO_DATA, 0xe1},
+ {0x25, 0x02},
+ {TP6800_R21_ENDP_1_CTL, 0x00},
+ {TP6800_R3F_FRAME_RATE, 0x80},
+ {TP6800_R2F_TIMING_CFG, 0x2f},
+ {TP6800_R18_GPIO_DATA, 0xc1},
+ };
+ static const struct cmd reg_init_2[] = {
+ {TP6800_R78_FORMAT, 0x54},
+ };
+ static const struct cmd sensor_init[] = {
+ {0x00, 0x00},
+ {0x01, 0x80},
+ {0x02, 0x80},
+ {0x03, 0x90},
+ {0x04, 0x20},
+ {0x05, 0x20},
+ {0x06, 0x80},
+ {0x07, 0x00},
+ {0x08, 0xff},
+ {0x09, 0xff},
+ {0x0a, 0x76}, /* 7630 = soi673a */
+ {0x0b, 0x30},
+ {0x0c, 0x20},
+ {0x0d, 0x20},
+ {0x0e, 0xff},
+ {0x0f, 0xff},
+ {0x10, 0x41},
+ {0x15, 0x14},
+ {0x11, 0x40},
+ {0x12, 0x48},
+ {0x13, 0x80},
+ {0x14, 0x80},
+ {0x16, 0x03},
+ {0x28, 0xb0},
+ {0x71, 0x20},
+ {0x75, 0x8e},
+ {0x17, 0x1b},
+ {0x18, 0xbd},
+ {0x19, 0x05},
+ {0x1a, 0xf6},
+ {0x1b, 0x04},
+ {0x1c, 0x7f}, /* omnivision */
+ {0x1d, 0xa2},
+ {0x1e, 0x00},
+ {0x1f, 0x00},
+ {0x20, 0x45},
+ {0x21, 0x80},
+ {0x22, 0x80},
+ {0x23, 0xee},
+ {0x24, 0x50},
+ {0x25, 0x7a},
+ {0x26, 0xa0},
+ {0x27, 0x9a},
+ {0x29, 0x30},
+ {0x2a, 0x80},
+ {0x2b, 0x00},
+ {0x2c, 0xac},
+ {0x2d, 0x05},
+ {0x2e, 0x80},
+ {0x2f, 0x3c},
+ {0x30, 0x22},
+ {0x31, 0x00},
+ {0x32, 0x86},
+ {0x33, 0x08},
+ {0x34, 0xff},
+ {0x35, 0xff},
+ {0x36, 0xff},
+ {0x37, 0xff},
+ {0x38, 0xff},
+ {0x39, 0xff},
+ {0x3a, 0xfe},
+ {0x3b, 0xfe},
+ {0x3c, 0xfe},
+ {0x3d, 0xfe},
+ {0x3e, 0xfe},
+ {0x3f, 0x71},
+ {0x40, 0xff},
+ {0x41, 0xff},
+ {0x42, 0xff},
+ {0x43, 0xff},
+ {0x44, 0xff},
+ {0x45, 0xff},
+ {0x46, 0xff},
+ {0x47, 0xff},
+ {0x48, 0xff},
+ {0x49, 0xff},
+ {0x4a, 0xfe},
+ {0x4b, 0xff},
+ {0x4c, 0x00},
+ {0x4d, 0x00},
+ {0x4e, 0xff},
+ {0x4f, 0xff},
+ {0x50, 0xff},
+ {0x51, 0xff},
+ {0x52, 0xff},
+ {0x53, 0xff},
+ {0x54, 0xff},
+ {0x55, 0xff},
+ {0x56, 0xff},
+ {0x57, 0xff},
+ {0x58, 0xff},
+ {0x59, 0xff},
+ {0x5a, 0xff},
+ {0x5b, 0xfe},
+ {0x5c, 0xff},
+ {0x5d, 0x8f},
+ {0x5e, 0xff},
+ {0x5f, 0x8f},
+ {0x60, 0xa2},
+ {0x61, 0x4a},
+ {0x62, 0xf3},
+ {0x63, 0x75},
+ {0x64, 0xf0},
+ {0x65, 0x00},
+ {0x66, 0x55},
+ {0x67, 0x92},
+ {0x68, 0xa0},
+ {0x69, 0x4a},
+ {0x6a, 0x22},
+ {0x6b, 0x00},
+ {0x6c, 0x33},
+ {0x6d, 0x44},
+ {0x6e, 0x22},
+ {0x6f, 0x84},
+ {0x70, 0x0b},
+ {0x72, 0x10},
+ {0x73, 0x50},
+ {0x74, 0x21},
+ {0x76, 0x00},
+ {0x77, 0xa5},
+ {0x78, 0x80},
+ {0x79, 0x80},
+ {0x7a, 0x80},
+ {0x7b, 0xe2},
+ {0x7c, 0x00},
+ {0x7d, 0xf7},
+ {0x7e, 0x00},
+ {0x7f, 0x00},
+ };
+
+ reg_w_buf(gspca_dev, reg_init_1, ARRAY_SIZE(reg_init_1));
+ reg_w_buf(gspca_dev, tp6810_ov_init_common,
+ ARRAY_SIZE(tp6810_ov_init_common));
+ reg_w_buf(gspca_dev, reg_init_2, ARRAY_SIZE(reg_init_2));
+
+ i2c_w(gspca_dev, 0x12, 0x80); /* sensor reset */
+ msleep(10);
+ i2c_w_buf(gspca_dev, sensor_init, ARRAY_SIZE(sensor_init));
+}
+
+/* set the gain and exposure */
+static void setexposure(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ if (sd->sensor == SENSOR_CX0342) {
+ int expo;
+
+ expo = (sd->ctrls[EXPOSURE].val << 2) - 1;
+ i2c_w(gspca_dev, CX0342_EXPO_LINE_L, expo);
+ i2c_w(gspca_dev, CX0342_EXPO_LINE_H, expo >> 8);
+ if (sd->bridge == BRIDGE_TP6800)
+ i2c_w(gspca_dev, CX0342_RAW_GBGAIN_H,
+ sd->ctrls[GAIN].val >> 8);
+ i2c_w(gspca_dev, CX0342_RAW_GBGAIN_L, sd->ctrls[GAIN].val);
+ if (sd->bridge == BRIDGE_TP6800)
+ i2c_w(gspca_dev, CX0342_RAW_GRGAIN_H,
+ sd->ctrls[GAIN].val >> 8);
+ i2c_w(gspca_dev, CX0342_RAW_GRGAIN_L, sd->ctrls[GAIN].val);
+ if (sd->bridge == BRIDGE_TP6800)
+ i2c_w(gspca_dev, CX0342_RAW_BGAIN_H,
+ sd->ctrls[BGAIN].val >> 8);
+ i2c_w(gspca_dev, CX0342_RAW_BGAIN_L, sd->ctrls[BGAIN].val);
+ if (sd->bridge == BRIDGE_TP6800)
+ i2c_w(gspca_dev, CX0342_RAW_RGAIN_H,
+ sd->ctrls[RGAIN].val >> 8);
+ i2c_w(gspca_dev, CX0342_RAW_RGAIN_L, sd->ctrls[RGAIN].val);
+ i2c_w(gspca_dev, CX0342_SYS_CTRL_0,
+ sd->bridge == BRIDGE_TP6800 ? 0x80 : 0x81);
+ return;
+ }
+
+ /* soi763a */
+ i2c_w(gspca_dev, 0x10, /* AEC_H (exposure time) */
+ sd->ctrls[EXPOSURE].val);
+/* i2c_w(gspca_dev, 0x76, 0x02); * AEC_L ([1:0] */
+ i2c_w(gspca_dev, 0x00, /* gain */
+ sd->ctrls[GAIN].val);
+}
+
+/* set the JPEG quantization tables */
+static void set_dqt(struct gspca_dev *gspca_dev, u8 q)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ /* update the jpeg quantization tables */
+ PDEBUG(D_STREAM, "q %d -> %d", sd->quality, q);
+ sd->quality = q;
+ if (q > 16)
+ q = 16;
+ if (sd->sensor == SENSOR_SOI763A)
+ jpeg_set_qual(sd->jpeg_hdr, jpeg_q[q]);
+ else
+ memcpy(&sd->jpeg_hdr[JPEG_QT0_OFFSET - 1],
+ DQT[q], sizeof DQT[0]);
+}
+
+/* set the JPEG compression quality factor */
+static void setquality(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ u16 q;
+
+ q = sd->ctrls[QUALITY].val;
+ if (q != 16)
+ q = 15 - q;
+
+ reg_w(gspca_dev, TP6800_R7A_BLK_THRLD, 0x00);
+ reg_w(gspca_dev, TP6800_R79_QUALITY, 0x04);
+ reg_w(gspca_dev, TP6800_R79_QUALITY, q);
+
+ /* auto quality */
+ if (q == 15 && sd->bridge == BRIDGE_TP6810) {
+ msleep(4);
+ reg_w(gspca_dev, TP6800_R7A_BLK_THRLD, 0x19);
+ }
+}
+
+static const u8 color_null[18] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+static const u8 color_gain[NSENSORS][18] = {
+[SENSOR_CX0342] =
+ {0x4c, 0x00, 0xa9, 0x00, 0x31, 0x00, /* Y R/G/B (LE values) */
+ 0xb6, 0x03, 0x6c, 0x03, 0xe0, 0x00, /* U R/G/B */
+ 0xdf, 0x00, 0x46, 0x03, 0xdc, 0x03}, /* V R/G/B */
+[SENSOR_SOI763A] =
+ {0x4c, 0x00, 0x95, 0x00, 0x1d, 0x00, /* Y R/G/B (LE values) */
+ 0xb6, 0x03, 0x6c, 0x03, 0xd7, 0x00, /* U R/G/B */
+ 0xd5, 0x00, 0x46, 0x03, 0xdc, 0x03}, /* V R/G/B */
+};
+
+static void setgamma(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ int gamma;
+#define NGAMMA 6
+ static const u8 gamma_tb[NGAMMA][3][1024] = {
+ { /* gamma 0 - from tp6800 + soi763a */
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x02,
+ 0x02, 0x03, 0x05, 0x07, 0x07, 0x08, 0x09, 0x09,
+ 0x0a, 0x0c, 0x0c, 0x0d, 0x0e, 0x0e, 0x10, 0x11,
+ 0x11, 0x12, 0x14, 0x14, 0x15, 0x16, 0x16, 0x17,
+ 0x17, 0x18, 0x1a, 0x1a, 0x1b, 0x1b, 0x1c, 0x1e,
+ 0x1e, 0x1f, 0x1f, 0x20, 0x20, 0x22, 0x23, 0x23,
+ 0x25, 0x25, 0x26, 0x26, 0x27, 0x27, 0x28, 0x28,
+ 0x29, 0x29, 0x2b, 0x2c, 0x2c, 0x2d, 0x2d, 0x2f,
+ 0x2f, 0x30, 0x30, 0x31, 0x31, 0x33, 0x33, 0x34,
+ 0x34, 0x34, 0x35, 0x35, 0x37, 0x37, 0x38, 0x38,
+ 0x39, 0x39, 0x3a, 0x3a, 0x3b, 0x3b, 0x3b, 0x3c,
+ 0x3c, 0x3d, 0x3d, 0x3f, 0x3f, 0x40, 0x40, 0x40,
+ 0x42, 0x42, 0x43, 0x43, 0x44, 0x44, 0x44, 0x45,
+ 0x45, 0x47, 0x47, 0x47, 0x48, 0x48, 0x49, 0x49,
+ 0x4a, 0x4a, 0x4a, 0x4b, 0x4b, 0x4b, 0x4c, 0x4c,
+ 0x4d, 0x4d, 0x4d, 0x4f, 0x4f, 0x50, 0x50, 0x50,
+ 0x52, 0x52, 0x52, 0x53, 0x53, 0x54, 0x54, 0x54,
+ 0x55, 0x55, 0x55, 0x56, 0x56, 0x58, 0x58, 0x58,
+ 0x59, 0x59, 0x59, 0x5a, 0x5a, 0x5a, 0x5b, 0x5b,
+ 0x5b, 0x5c, 0x5c, 0x5c, 0x5e, 0x5e, 0x5e, 0x5f,
+ 0x5f, 0x5f, 0x60, 0x60, 0x60, 0x61, 0x61, 0x61,
+ 0x62, 0x62, 0x62, 0x63, 0x63, 0x63, 0x65, 0x65,
+ 0x65, 0x66, 0x66, 0x66, 0x67, 0x67, 0x67, 0x68,
+ 0x68, 0x68, 0x69, 0x69, 0x69, 0x69, 0x6a, 0x6a,
+ 0x6a, 0x6c, 0x6c, 0x6c, 0x6d, 0x6d, 0x6d, 0x6e,
+ 0x6e, 0x6e, 0x6e, 0x6f, 0x6f, 0x6f, 0x70, 0x70,
+ 0x70, 0x71, 0x71, 0x71, 0x71, 0x73, 0x73, 0x73,
+ 0x74, 0x74, 0x74, 0x74, 0x75, 0x75, 0x75, 0x77,
+ 0x77, 0x77, 0x77, 0x78, 0x78, 0x78, 0x79, 0x79,
+ 0x79, 0x79, 0x7a, 0x7a, 0x7a, 0x7a, 0x7b, 0x7b,
+ 0x7b, 0x7c, 0x7c, 0x7c, 0x7c, 0x7d, 0x7d, 0x7d,
+ 0x7d, 0x7f, 0x7f, 0x7f, 0x80, 0x80, 0x80, 0x80,
+ 0x81, 0x81, 0x81, 0x81, 0x82, 0x82, 0x82, 0x82,
+ 0x84, 0x84, 0x84, 0x85, 0x85, 0x85, 0x85, 0x86,
+ 0x86, 0x86, 0x86, 0x88, 0x88, 0x88, 0x88, 0x89,
+ 0x89, 0x89, 0x89, 0x8a, 0x8a, 0x8a, 0x8a, 0x8b,
+ 0x8b, 0x8b, 0x8b, 0x8d, 0x8d, 0x8d, 0x8d, 0x8e,
+ 0x8e, 0x8e, 0x8e, 0x8f, 0x8f, 0x8f, 0x8f, 0x90,
+ 0x90, 0x90, 0x90, 0x90, 0x91, 0x91, 0x91, 0x91,
+ 0x92, 0x92, 0x92, 0x92, 0x93, 0x93, 0x93, 0x93,
+ 0x94, 0x94, 0x94, 0x94, 0x96, 0x96, 0x96, 0x96,
+ 0x96, 0x97, 0x97, 0x97, 0x97, 0x98, 0x98, 0x98,
+ 0x98, 0x99, 0x99, 0x99, 0x99, 0x99, 0x9a, 0x9a,
+ 0x9a, 0x9a, 0x9b, 0x9b, 0x9b, 0x9b, 0x9c, 0x9c,
+ 0x9c, 0x9c, 0x9c, 0x9d, 0x9d, 0x9d, 0x9d, 0x9e,
+ 0x9e, 0x9e, 0x9e, 0x9e, 0xa0, 0xa0, 0xa0, 0xa0,
+ 0xa1, 0xa1, 0xa1, 0xa1, 0xa1, 0xa2, 0xa2, 0xa2,
+ 0xa2, 0xa3, 0xa3, 0xa3, 0xa3, 0xa3, 0xa4, 0xa4,
+ 0xa4, 0xa4, 0xa4, 0xa5, 0xa5, 0xa5, 0xa5, 0xa6,
+ 0xa6, 0xa6, 0xa6, 0xa6, 0xa8, 0xa8, 0xa8, 0xa8,
+ 0xa8, 0xa9, 0xa9, 0xa9, 0xa9, 0xab, 0xab, 0xab,
+ 0xab, 0xab, 0xac, 0xac, 0xac, 0xac, 0xac, 0xad,
+ 0xad, 0xad, 0xad, 0xae, 0xae, 0xae, 0xae, 0xae,
+ 0xaf, 0xaf, 0xaf, 0xaf, 0xaf, 0xb0, 0xb0, 0xb0,
+ 0xb0, 0xb0, 0xb1, 0xb1, 0xb1, 0xb1, 0xb1, 0xb2,
+ 0xb2, 0xb2, 0xb2, 0xb2, 0xb3, 0xb3, 0xb3, 0xb3,
+ 0xb4, 0xb4, 0xb4, 0xb4, 0xb4, 0xb6, 0xb6, 0xb6,
+ 0xb6, 0xb6, 0xb7, 0xb7, 0xb7, 0xb7, 0xb7, 0xb8,
+ 0xb8, 0xb8, 0xb8, 0xb8, 0xb9, 0xb9, 0xb9, 0xb9,
+ 0xb9, 0xba, 0xba, 0xba, 0xba, 0xba, 0xbc, 0xbc,
+ 0xbc, 0xbc, 0xbc, 0xbd, 0xbd, 0xbd, 0xbd, 0xbd,
+ 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbf, 0xbf, 0xbf,
+ 0xbf, 0xbf, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc2,
+ 0xc2, 0xc2, 0xc2, 0xc2, 0xc3, 0xc3, 0xc3, 0xc3,
+ 0xc3, 0xc4, 0xc4, 0xc4, 0xc4, 0xc4, 0xc5, 0xc5,
+ 0xc5, 0xc5, 0xc5, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6,
+ 0xc7, 0xc7, 0xc7, 0xc7, 0xc7, 0xc9, 0xc9, 0xc9,
+ 0xc9, 0xc9, 0xc9, 0xca, 0xca, 0xca, 0xca, 0xca,
+ 0xcb, 0xcb, 0xcb, 0xcb, 0xcb, 0xcc, 0xcc, 0xcc,
+ 0xcc, 0xcc, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xce,
+ 0xce, 0xce, 0xce, 0xce, 0xcf, 0xcf, 0xcf, 0xcf,
+ 0xcf, 0xd0, 0xd0, 0xd0, 0xd0, 0xd0, 0xd1, 0xd1,
+ 0xd1, 0xd1, 0xd1, 0xd1, 0xd3, 0xd3, 0xd3, 0xd3,
+ 0xd3, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, 0xd6, 0xd6,
+ 0xd6, 0xd6, 0xd6, 0xd7, 0xd7, 0xd7, 0xd7, 0xd7,
+ 0xd8, 0xd8, 0xd8, 0xd8, 0xd8, 0xd9, 0xd9, 0xd9,
+ 0xd9, 0xd9, 0xda, 0xda, 0xda, 0xda, 0xda, 0xdb,
+ 0xdb, 0xdb, 0xdb, 0xdb, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xde, 0xde, 0xde, 0xde, 0xde, 0xde, 0xdf,
+ 0xdf, 0xdf, 0xdf, 0xdf, 0xe0, 0xe0, 0xe0, 0xe0,
+ 0xe0, 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, 0xe2, 0xe2,
+ 0xe2, 0xe2, 0xe2, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3,
+ 0xe4, 0xe4, 0xe4, 0xe4, 0xe4, 0xe5, 0xe5, 0xe5,
+ 0xe5, 0xe5, 0xe6, 0xe6, 0xe6, 0xe6, 0xe6, 0xe7,
+ 0xe7, 0xe7, 0xe7, 0xe7, 0xe8, 0xe8, 0xe8, 0xe8,
+ 0xe8, 0xe9, 0xe9, 0xe9, 0xe9, 0xeb, 0xeb, 0xeb,
+ 0xeb, 0xeb, 0xec, 0xec, 0xec, 0xec, 0xec, 0xed,
+ 0xed, 0xed, 0xed, 0xed, 0xee, 0xee, 0xee, 0xee,
+ 0xee, 0xef, 0xef, 0xef, 0xef, 0xef, 0xf0, 0xf0,
+ 0xf0, 0xf0, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf3,
+ 0xf3, 0xf3, 0xf3, 0xf3, 0xf4, 0xf4, 0xf4, 0xf4,
+ 0xf4, 0xf5, 0xf5, 0xf5, 0xf5, 0xf6, 0xf6, 0xf6,
+ 0xf6, 0xf6, 0xf7, 0xf7, 0xf7, 0xf7, 0xf8, 0xf8,
+ 0xf8, 0xf8, 0xf8, 0xf9, 0xf9, 0xf9, 0xf9, 0xfa,
+ 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb},
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x02,
+ 0x02, 0x03, 0x05, 0x07, 0x07, 0x08, 0x09, 0x09,
+ 0x0a, 0x0c, 0x0c, 0x0d, 0x0e, 0x0e, 0x10, 0x11,
+ 0x11, 0x12, 0x14, 0x14, 0x15, 0x16, 0x16, 0x17,
+ 0x17, 0x18, 0x1a, 0x1a, 0x1b, 0x1b, 0x1c, 0x1e,
+ 0x1e, 0x1f, 0x1f, 0x20, 0x20, 0x22, 0x23, 0x23,
+ 0x25, 0x25, 0x26, 0x26, 0x27, 0x27, 0x28, 0x28,
+ 0x29, 0x29, 0x2b, 0x2c, 0x2c, 0x2d, 0x2d, 0x2f,
+ 0x2f, 0x30, 0x30, 0x31, 0x31, 0x33, 0x33, 0x34,
+ 0x34, 0x34, 0x35, 0x35, 0x37, 0x37, 0x38, 0x38,
+ 0x39, 0x39, 0x3a, 0x3a, 0x3b, 0x3b, 0x3b, 0x3c,
+ 0x3c, 0x3d, 0x3d, 0x3f, 0x3f, 0x40, 0x40, 0x40,
+ 0x42, 0x42, 0x43, 0x43, 0x44, 0x44, 0x44, 0x45,
+ 0x45, 0x47, 0x47, 0x47, 0x48, 0x48, 0x49, 0x49,
+ 0x4a, 0x4a, 0x4a, 0x4b, 0x4b, 0x4b, 0x4c, 0x4c,
+ 0x4d, 0x4d, 0x4d, 0x4f, 0x4f, 0x50, 0x50, 0x50,
+ 0x52, 0x52, 0x52, 0x53, 0x53, 0x54, 0x54, 0x54,
+ 0x55, 0x55, 0x55, 0x56, 0x56, 0x58, 0x58, 0x58,
+ 0x59, 0x59, 0x59, 0x5a, 0x5a, 0x5a, 0x5b, 0x5b,
+ 0x5b, 0x5c, 0x5c, 0x5c, 0x5e, 0x5e, 0x5e, 0x5f,
+ 0x5f, 0x5f, 0x60, 0x60, 0x60, 0x61, 0x61, 0x61,
+ 0x62, 0x62, 0x62, 0x63, 0x63, 0x63, 0x65, 0x65,
+ 0x65, 0x66, 0x66, 0x66, 0x67, 0x67, 0x67, 0x68,
+ 0x68, 0x68, 0x69, 0x69, 0x69, 0x69, 0x6a, 0x6a,
+ 0x6a, 0x6c, 0x6c, 0x6c, 0x6d, 0x6d, 0x6d, 0x6e,
+ 0x6e, 0x6e, 0x6e, 0x6f, 0x6f, 0x6f, 0x70, 0x70,
+ 0x70, 0x71, 0x71, 0x71, 0x71, 0x73, 0x73, 0x73,
+ 0x74, 0x74, 0x74, 0x74, 0x75, 0x75, 0x75, 0x77,
+ 0x77, 0x77, 0x77, 0x78, 0x78, 0x78, 0x79, 0x79,
+ 0x79, 0x79, 0x7a, 0x7a, 0x7a, 0x7a, 0x7b, 0x7b,
+ 0x7b, 0x7c, 0x7c, 0x7c, 0x7c, 0x7d, 0x7d, 0x7d,
+ 0x7d, 0x7f, 0x7f, 0x7f, 0x80, 0x80, 0x80, 0x80,
+ 0x81, 0x81, 0x81, 0x81, 0x82, 0x82, 0x82, 0x82,
+ 0x84, 0x84, 0x84, 0x85, 0x85, 0x85, 0x85, 0x86,
+ 0x86, 0x86, 0x86, 0x88, 0x88, 0x88, 0x88, 0x89,
+ 0x89, 0x89, 0x89, 0x8a, 0x8a, 0x8a, 0x8a, 0x8b,
+ 0x8b, 0x8b, 0x8b, 0x8d, 0x8d, 0x8d, 0x8d, 0x8e,
+ 0x8e, 0x8e, 0x8e, 0x8f, 0x8f, 0x8f, 0x8f, 0x90,
+ 0x90, 0x90, 0x90, 0x90, 0x91, 0x91, 0x91, 0x91,
+ 0x92, 0x92, 0x92, 0x92, 0x93, 0x93, 0x93, 0x93,
+ 0x94, 0x94, 0x94, 0x94, 0x96, 0x96, 0x96, 0x96,
+ 0x96, 0x97, 0x97, 0x97, 0x97, 0x98, 0x98, 0x98,
+ 0x98, 0x99, 0x99, 0x99, 0x99, 0x99, 0x9a, 0x9a,
+ 0x9a, 0x9a, 0x9b, 0x9b, 0x9b, 0x9b, 0x9c, 0x9c,
+ 0x9c, 0x9c, 0x9c, 0x9d, 0x9d, 0x9d, 0x9d, 0x9e,
+ 0x9e, 0x9e, 0x9e, 0x9e, 0xa0, 0xa0, 0xa0, 0xa0,
+ 0xa1, 0xa1, 0xa1, 0xa1, 0xa1, 0xa2, 0xa2, 0xa2,
+ 0xa2, 0xa3, 0xa3, 0xa3, 0xa3, 0xa3, 0xa4, 0xa4,
+ 0xa4, 0xa4, 0xa4, 0xa5, 0xa5, 0xa5, 0xa5, 0xa6,
+ 0xa6, 0xa6, 0xa6, 0xa6, 0xa8, 0xa8, 0xa8, 0xa8,
+ 0xa8, 0xa9, 0xa9, 0xa9, 0xa9, 0xab, 0xab, 0xab,
+ 0xab, 0xab, 0xac, 0xac, 0xac, 0xac, 0xac, 0xad,
+ 0xad, 0xad, 0xad, 0xae, 0xae, 0xae, 0xae, 0xae,
+ 0xaf, 0xaf, 0xaf, 0xaf, 0xaf, 0xb0, 0xb0, 0xb0,
+ 0xb0, 0xb0, 0xb1, 0xb1, 0xb1, 0xb1, 0xb1, 0xb2,
+ 0xb2, 0xb2, 0xb2, 0xb2, 0xb3, 0xb3, 0xb3, 0xb3,
+ 0xb4, 0xb4, 0xb4, 0xb4, 0xb4, 0xb6, 0xb6, 0xb6,
+ 0xb6, 0xb6, 0xb7, 0xb7, 0xb7, 0xb7, 0xb7, 0xb8,
+ 0xb8, 0xb8, 0xb8, 0xb8, 0xb9, 0xb9, 0xb9, 0xb9,
+ 0xb9, 0xba, 0xba, 0xba, 0xba, 0xba, 0xbc, 0xbc,
+ 0xbc, 0xbc, 0xbc, 0xbd, 0xbd, 0xbd, 0xbd, 0xbd,
+ 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbf, 0xbf, 0xbf,
+ 0xbf, 0xbf, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc2,
+ 0xc2, 0xc2, 0xc2, 0xc2, 0xc3, 0xc3, 0xc3, 0xc3,
+ 0xc3, 0xc4, 0xc4, 0xc4, 0xc4, 0xc4, 0xc5, 0xc5,
+ 0xc5, 0xc5, 0xc5, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6,
+ 0xc7, 0xc7, 0xc7, 0xc7, 0xc7, 0xc9, 0xc9, 0xc9,
+ 0xc9, 0xc9, 0xc9, 0xca, 0xca, 0xca, 0xca, 0xca,
+ 0xcb, 0xcb, 0xcb, 0xcb, 0xcb, 0xcc, 0xcc, 0xcc,
+ 0xcc, 0xcc, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xce,
+ 0xce, 0xce, 0xce, 0xce, 0xcf, 0xcf, 0xcf, 0xcf,
+ 0xcf, 0xd0, 0xd0, 0xd0, 0xd0, 0xd0, 0xd1, 0xd1,
+ 0xd1, 0xd1, 0xd1, 0xd1, 0xd3, 0xd3, 0xd3, 0xd3,
+ 0xd3, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, 0xd6, 0xd6,
+ 0xd6, 0xd6, 0xd6, 0xd7, 0xd7, 0xd7, 0xd7, 0xd7,
+ 0xd8, 0xd8, 0xd8, 0xd8, 0xd8, 0xd9, 0xd9, 0xd9,
+ 0xd9, 0xd9, 0xda, 0xda, 0xda, 0xda, 0xda, 0xdb,
+ 0xdb, 0xdb, 0xdb, 0xdb, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xde, 0xde, 0xde, 0xde, 0xde, 0xde, 0xdf,
+ 0xdf, 0xdf, 0xdf, 0xdf, 0xe0, 0xe0, 0xe0, 0xe0,
+ 0xe0, 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, 0xe2, 0xe2,
+ 0xe2, 0xe2, 0xe2, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3,
+ 0xe4, 0xe4, 0xe4, 0xe4, 0xe4, 0xe5, 0xe5, 0xe5,
+ 0xe5, 0xe5, 0xe6, 0xe6, 0xe6, 0xe6, 0xe6, 0xe7,
+ 0xe7, 0xe7, 0xe7, 0xe7, 0xe8, 0xe8, 0xe8, 0xe8,
+ 0xe8, 0xe9, 0xe9, 0xe9, 0xe9, 0xeb, 0xeb, 0xeb,
+ 0xeb, 0xeb, 0xec, 0xec, 0xec, 0xec, 0xec, 0xed,
+ 0xed, 0xed, 0xed, 0xed, 0xee, 0xee, 0xee, 0xee,
+ 0xee, 0xef, 0xef, 0xef, 0xef, 0xef, 0xf0, 0xf0,
+ 0xf0, 0xf0, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf3,
+ 0xf3, 0xf3, 0xf3, 0xf3, 0xf4, 0xf4, 0xf4, 0xf4,
+ 0xf4, 0xf5, 0xf5, 0xf5, 0xf5, 0xf6, 0xf6, 0xf6,
+ 0xf6, 0xf6, 0xf7, 0xf7, 0xf7, 0xf7, 0xf8, 0xf8,
+ 0xf8, 0xf8, 0xf8, 0xf9, 0xf9, 0xf9, 0xf9, 0xfa,
+ 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb},
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x02,
+ 0x02, 0x03, 0x05, 0x07, 0x07, 0x08, 0x09, 0x09,
+ 0x0a, 0x0c, 0x0c, 0x0d, 0x0e, 0x0e, 0x10, 0x11,
+ 0x11, 0x12, 0x14, 0x14, 0x15, 0x16, 0x16, 0x17,
+ 0x17, 0x18, 0x1a, 0x1a, 0x1b, 0x1b, 0x1c, 0x1e,
+ 0x1e, 0x1f, 0x1f, 0x20, 0x20, 0x22, 0x23, 0x23,
+ 0x25, 0x25, 0x26, 0x26, 0x27, 0x27, 0x28, 0x28,
+ 0x29, 0x29, 0x2b, 0x2c, 0x2c, 0x2d, 0x2d, 0x2f,
+ 0x2f, 0x30, 0x30, 0x31, 0x31, 0x33, 0x33, 0x34,
+ 0x34, 0x34, 0x35, 0x35, 0x37, 0x37, 0x38, 0x38,
+ 0x39, 0x39, 0x3a, 0x3a, 0x3b, 0x3b, 0x3b, 0x3c,
+ 0x3c, 0x3d, 0x3d, 0x3f, 0x3f, 0x40, 0x40, 0x40,
+ 0x42, 0x42, 0x43, 0x43, 0x44, 0x44, 0x44, 0x45,
+ 0x45, 0x47, 0x47, 0x47, 0x48, 0x48, 0x49, 0x49,
+ 0x4a, 0x4a, 0x4a, 0x4b, 0x4b, 0x4b, 0x4c, 0x4c,
+ 0x4d, 0x4d, 0x4d, 0x4f, 0x4f, 0x50, 0x50, 0x50,
+ 0x52, 0x52, 0x52, 0x53, 0x53, 0x54, 0x54, 0x54,
+ 0x55, 0x55, 0x55, 0x56, 0x56, 0x58, 0x58, 0x58,
+ 0x59, 0x59, 0x59, 0x5a, 0x5a, 0x5a, 0x5b, 0x5b,
+ 0x5b, 0x5c, 0x5c, 0x5c, 0x5e, 0x5e, 0x5e, 0x5f,
+ 0x5f, 0x5f, 0x60, 0x60, 0x60, 0x61, 0x61, 0x61,
+ 0x62, 0x62, 0x62, 0x63, 0x63, 0x63, 0x65, 0x65,
+ 0x65, 0x66, 0x66, 0x66, 0x67, 0x67, 0x67, 0x68,
+ 0x68, 0x68, 0x69, 0x69, 0x69, 0x69, 0x6a, 0x6a,
+ 0x6a, 0x6c, 0x6c, 0x6c, 0x6d, 0x6d, 0x6d, 0x6e,
+ 0x6e, 0x6e, 0x6e, 0x6f, 0x6f, 0x6f, 0x70, 0x70,
+ 0x70, 0x71, 0x71, 0x71, 0x71, 0x73, 0x73, 0x73,
+ 0x74, 0x74, 0x74, 0x74, 0x75, 0x75, 0x75, 0x76,
+ 0x77, 0x77, 0x77, 0x78, 0x78, 0x78, 0x79, 0x79,
+ 0x79, 0x79, 0x7a, 0x7a, 0x7a, 0x7a, 0x7b, 0x7b,
+ 0x7b, 0x7c, 0x7c, 0x7c, 0x7c, 0x7d, 0x7d, 0x7d,
+ 0x7d, 0x7f, 0x7f, 0x7f, 0x80, 0x80, 0x80, 0x80,
+ 0x81, 0x81, 0x81, 0x81, 0x82, 0x82, 0x82, 0x82,
+ 0x84, 0x84, 0x84, 0x85, 0x85, 0x85, 0x85, 0x86,
+ 0x86, 0x86, 0x86, 0x88, 0x88, 0x88, 0x88, 0x89,
+ 0x89, 0x89, 0x89, 0x8a, 0x8a, 0x8a, 0x8a, 0x8b,
+ 0x8b, 0x8b, 0x8b, 0x8d, 0x8d, 0x8d, 0x8d, 0x8e,
+ 0x8e, 0x8e, 0x8e, 0x8f, 0x8f, 0x8f, 0x8f, 0x90,
+ 0x90, 0x90, 0x90, 0x90, 0x91, 0x91, 0x91, 0x91,
+ 0x92, 0x92, 0x92, 0x92, 0x93, 0x93, 0x93, 0x93,
+ 0x94, 0x94, 0x94, 0x94, 0x96, 0x96, 0x96, 0x96,
+ 0x96, 0x97, 0x97, 0x97, 0x97, 0x98, 0x98, 0x98,
+ 0x98, 0x99, 0x99, 0x99, 0x99, 0x99, 0x9a, 0x9a,
+ 0x9a, 0x9a, 0x9b, 0x9b, 0x9b, 0x9b, 0x9c, 0x9c,
+ 0x9c, 0x9c, 0x9c, 0x9d, 0x9d, 0x9d, 0x9d, 0x9e,
+ 0x9e, 0x9e, 0x9e, 0x9e, 0xa0, 0xa0, 0xa0, 0xa0,
+ 0xa1, 0xa1, 0xa1, 0xa1, 0xa1, 0xa2, 0xa2, 0xa2,
+ 0xa2, 0xa3, 0xa3, 0xa3, 0xa3, 0xa3, 0xa4, 0xa4,
+ 0xa4, 0xa4, 0xa4, 0xa5, 0xa5, 0xa5, 0xa5, 0xa6,
+ 0xa6, 0xa6, 0xa6, 0xa6, 0xa8, 0xa8, 0xa8, 0xa8,
+ 0xa8, 0xa9, 0xa9, 0xa9, 0xa9, 0xab, 0xab, 0xab,
+ 0xab, 0xab, 0xac, 0xac, 0xac, 0xac, 0xac, 0xad,
+ 0xad, 0xad, 0xad, 0xae, 0xae, 0xae, 0xae, 0xae,
+ 0xaf, 0xaf, 0xaf, 0xaf, 0xaf, 0xb0, 0xb0, 0xb0,
+ 0xb0, 0xb0, 0xb1, 0xb1, 0xb1, 0xb1, 0xb1, 0xb2,
+ 0xb2, 0xb2, 0xb2, 0xb2, 0xb3, 0xb3, 0xb3, 0xb3,
+ 0xb4, 0xb4, 0xb4, 0xb4, 0xb4, 0xb6, 0xb6, 0xb6,
+ 0xb6, 0xb6, 0xb7, 0xb7, 0xb7, 0xb7, 0xb7, 0xb8,
+ 0xb8, 0xb8, 0xb8, 0xb8, 0xb9, 0xb9, 0xb9, 0xb9,
+ 0xb9, 0xba, 0xba, 0xba, 0xba, 0xba, 0xbc, 0xbc,
+ 0xbc, 0xbc, 0xbc, 0xbd, 0xbd, 0xbd, 0xbd, 0xbd,
+ 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbf, 0xbf, 0xbf,
+ 0xbf, 0xbf, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc2,
+ 0xc2, 0xc2, 0xc2, 0xc2, 0xc3, 0xc3, 0xc3, 0xc3,
+ 0xc3, 0xc4, 0xc4, 0xc4, 0xc4, 0xc4, 0xc5, 0xc5,
+ 0xc5, 0xc5, 0xc5, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6,
+ 0xc7, 0xc7, 0xc7, 0xc7, 0xc7, 0xc9, 0xc9, 0xc9,
+ 0xc9, 0xc9, 0xc9, 0xca, 0xca, 0xca, 0xca, 0xca,
+ 0xcb, 0xcb, 0xcb, 0xcb, 0xcb, 0xcc, 0xcc, 0xcc,
+ 0xcc, 0xcc, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xce,
+ 0xce, 0xce, 0xce, 0xce, 0xcf, 0xcf, 0xcf, 0xcf,
+ 0xcf, 0xd0, 0xd0, 0xd0, 0xd0, 0xd0, 0xd1, 0xd1,
+ 0xd1, 0xd1, 0xd1, 0xd1, 0xd3, 0xd3, 0xd3, 0xd3,
+ 0xd3, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, 0xd6, 0xd6,
+ 0xd6, 0xd6, 0xd6, 0xd7, 0xd7, 0xd7, 0xd7, 0xd7,
+ 0xd8, 0xd8, 0xd8, 0xd8, 0xd8, 0xd9, 0xd9, 0xd9,
+ 0xd9, 0xd9, 0xda, 0xda, 0xda, 0xda, 0xda, 0xdb,
+ 0xdb, 0xdb, 0xdb, 0xdb, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xde, 0xde, 0xde, 0xde, 0xde, 0xde, 0xdf,
+ 0xdf, 0xdf, 0xdf, 0xdf, 0xe0, 0xe0, 0xe0, 0xe0,
+ 0xe0, 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, 0xe2, 0xe2,
+ 0xe2, 0xe2, 0xe2, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3,
+ 0xe4, 0xe4, 0xe4, 0xe4, 0xe4, 0xe5, 0xe5, 0xe5,
+ 0xe5, 0xe5, 0xe6, 0xe6, 0xe6, 0xe6, 0xe6, 0xe7,
+ 0xe7, 0xe7, 0xe7, 0xe7, 0xe8, 0xe8, 0xe8, 0xe8,
+ 0xe8, 0xe9, 0xe9, 0xe9, 0xe9, 0xeb, 0xeb, 0xeb,
+ 0xeb, 0xeb, 0xec, 0xec, 0xec, 0xec, 0xec, 0xed,
+ 0xed, 0xed, 0xed, 0xed, 0xee, 0xee, 0xee, 0xee,
+ 0xee, 0xef, 0xef, 0xef, 0xef, 0xef, 0xf0, 0xf0,
+ 0xf0, 0xf0, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf3,
+ 0xf3, 0xf3, 0xf3, 0xf3, 0xf4, 0xf4, 0xf4, 0xf4,
+ 0xf4, 0xf5, 0xf5, 0xf5, 0xf5, 0xf6, 0xf6, 0xf6,
+ 0xf6, 0xf6, 0xf7, 0xf7, 0xf7, 0xf7, 0xf8, 0xf8,
+ 0xf8, 0xf8, 0xf8, 0xf9, 0xf9, 0xf9, 0xf9, 0xfa,
+ 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb}
+ },
+ { /* gamma 1 - from tp6810 + soi763a */
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x01, 0x02, 0x03, 0x05, 0x07, 0x08, 0x09, 0x0a,
+ 0x0c, 0x0d, 0x0e, 0x10, 0x11, 0x12, 0x14, 0x15,
+ 0x16, 0x17, 0x18, 0x1a, 0x1a, 0x1b, 0x1c, 0x1e,
+ 0x1f, 0x20, 0x22, 0x22, 0x23, 0x25, 0x26, 0x27,
+ 0x27, 0x28, 0x29, 0x2b, 0x2b, 0x2c, 0x2d, 0x2f,
+ 0x2f, 0x30, 0x31, 0x33, 0x33, 0x34, 0x35, 0x35,
+ 0x37, 0x38, 0x38, 0x39, 0x3a, 0x3a, 0x3b, 0x3c,
+ 0x3c, 0x3d, 0x3f, 0x3f, 0x40, 0x42, 0x42, 0x43,
+ 0x43, 0x44, 0x45, 0x45, 0x47, 0x47, 0x48, 0x49,
+ 0x49, 0x4a, 0x4a, 0x4b, 0x4b, 0x4c, 0x4d, 0x4d,
+ 0x4f, 0x4f, 0x50, 0x50, 0x52, 0x52, 0x53, 0x53,
+ 0x54, 0x54, 0x55, 0x56, 0x56, 0x58, 0x58, 0x59,
+ 0x59, 0x5a, 0x5a, 0x5b, 0x5b, 0x5c, 0x5c, 0x5e,
+ 0x5e, 0x5e, 0x5f, 0x5f, 0x60, 0x60, 0x61, 0x61,
+ 0x62, 0x62, 0x63, 0x63, 0x65, 0x65, 0x65, 0x66,
+ 0x66, 0x67, 0x67, 0x68, 0x68, 0x69, 0x69, 0x69,
+ 0x6a, 0x6a, 0x6c, 0x6c, 0x6d, 0x6d, 0x6d, 0x6e,
+ 0x6e, 0x6f, 0x6f, 0x6f, 0x70, 0x70, 0x71, 0x71,
+ 0x73, 0x73, 0x73, 0x74, 0x74, 0x74, 0x75, 0x75,
+ 0x77, 0x77, 0x77, 0x78, 0x78, 0x79, 0x79, 0x79,
+ 0x7a, 0x7a, 0x7a, 0x7b, 0x7b, 0x7c, 0x7c, 0x7c,
+ 0x7d, 0x7d, 0x7d, 0x7f, 0x7f, 0x80, 0x80, 0x80,
+ 0x81, 0x81, 0x81, 0x82, 0x82, 0x82, 0x84, 0x84,
+ 0x84, 0x85, 0x85, 0x85, 0x86, 0x86, 0x86, 0x88,
+ 0x88, 0x88, 0x89, 0x89, 0x89, 0x8a, 0x8a, 0x8a,
+ 0x8b, 0x8b, 0x8b, 0x8d, 0x8d, 0x8d, 0x8e, 0x8e,
+ 0x8e, 0x8f, 0x8f, 0x8f, 0x90, 0x90, 0x90, 0x91,
+ 0x91, 0x91, 0x92, 0x92, 0x92, 0x92, 0x93, 0x93,
+ 0x93, 0x94, 0x94, 0x94, 0x96, 0x96, 0x96, 0x97,
+ 0x97, 0x97, 0x97, 0x98, 0x98, 0x98, 0x99, 0x99,
+ 0x99, 0x9a, 0x9a, 0x9a, 0x9a, 0x9b, 0x9b, 0x9b,
+ 0x9c, 0x9c, 0x9c, 0x9c, 0x9d, 0x9d, 0x9d, 0x9e,
+ 0x9e, 0x9e, 0x9e, 0xa0, 0xa0, 0xa0, 0xa1, 0xa1,
+ 0xa1, 0xa1, 0xa2, 0xa2, 0xa2, 0xa2, 0xa3, 0xa3,
+ 0xa3, 0xa4, 0xa4, 0xa4, 0xa4, 0xa5, 0xa5, 0xa5,
+ 0xa5, 0xa6, 0xa6, 0xa6, 0xa8, 0xa8, 0xa8, 0xa8,
+ 0xa9, 0xa9, 0xa9, 0xa9, 0xab, 0xab, 0xab, 0xab,
+ 0xac, 0xac, 0xac, 0xad, 0xad, 0xad, 0xad, 0xae,
+ 0xae, 0xae, 0xae, 0xaf, 0xaf, 0xaf, 0xaf, 0xb0,
+ 0xb0, 0xb0, 0xb0, 0xb1, 0xb1, 0xb1, 0xb1, 0xb2,
+ 0xb2, 0xb2, 0xb2, 0xb3, 0xb3, 0xb3, 0xb3, 0xb4,
+ 0xb4, 0xb4, 0xb4, 0xb6, 0xb6, 0xb6, 0xb6, 0xb7,
+ 0xb7, 0xb7, 0xb7, 0xb7, 0xb8, 0xb8, 0xb8, 0xb8,
+ 0xb9, 0xb9, 0xb9, 0xb9, 0xba, 0xba, 0xba, 0xba,
+ 0xbc, 0xbc, 0xbc, 0xbc, 0xbd, 0xbd, 0xbd, 0xbd,
+ 0xbd, 0xbe, 0xbe, 0xbe, 0xbe, 0xbf, 0xbf, 0xbf,
+ 0xbf, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc2, 0xc2,
+ 0xc2, 0xc2, 0xc3, 0xc3, 0xc3, 0xc3, 0xc4, 0xc4,
+ 0xc4, 0xc4, 0xc4, 0xc5, 0xc5, 0xc5, 0xc5, 0xc6,
+ 0xc6, 0xc6, 0xc6, 0xc6, 0xc7, 0xc7, 0xc7, 0xc7,
+ 0xc9, 0xc9, 0xc9, 0xc9, 0xc9, 0xca, 0xca, 0xca,
+ 0xca, 0xcb, 0xcb, 0xcb, 0xcb, 0xcb, 0xcc, 0xcc,
+ 0xcc, 0xcc, 0xcc, 0xcd, 0xcd, 0xcd, 0xcd, 0xce,
+ 0xce, 0xce, 0xce, 0xce, 0xcf, 0xcf, 0xcf, 0xcf,
+ 0xcf, 0xd0, 0xd0, 0xd0, 0xd0, 0xd1, 0xd1, 0xd1,
+ 0xd1, 0xd1, 0xd3, 0xd3, 0xd3, 0xd3, 0xd3, 0xd4,
+ 0xd4, 0xd4, 0xd4, 0xd6, 0xd6, 0xd6, 0xd6, 0xd6,
+ 0xd7, 0xd7, 0xd7, 0xd7, 0xd7, 0xd8, 0xd8, 0xd8,
+ 0xd8, 0xd8, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xda,
+ 0xda, 0xda, 0xda, 0xda, 0xdb, 0xdb, 0xdb, 0xdb,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xde, 0xde, 0xde,
+ 0xde, 0xde, 0xdf, 0xdf, 0xdf, 0xdf, 0xdf, 0xe0,
+ 0xe0, 0xe0, 0xe0, 0xe0, 0xe1, 0xe1, 0xe1, 0xe1,
+ 0xe1, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe3, 0xe3,
+ 0xe3, 0xe3, 0xe3, 0xe4, 0xe4, 0xe4, 0xe4, 0xe4,
+ 0xe5, 0xe5, 0xe5, 0xe5, 0xe5, 0xe6, 0xe6, 0xe6,
+ 0xe6, 0xe6, 0xe7, 0xe7, 0xe7, 0xe7, 0xe7, 0xe8,
+ 0xe8, 0xe8, 0xe8, 0xe8, 0xe9, 0xe9, 0xe9, 0xe9,
+ 0xe9, 0xeb, 0xeb, 0xeb, 0xeb, 0xeb, 0xec, 0xec,
+ 0xec, 0xec, 0xec, 0xed, 0xed, 0xed, 0xed, 0xed,
+ 0xee, 0xee, 0xee, 0xee, 0xee, 0xef, 0xef, 0xef,
+ 0xef, 0xef, 0xef, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0,
+ 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf3, 0xf3, 0xf3,
+ 0xf3, 0xf3, 0xf4, 0xf4, 0xf4, 0xf4, 0xf4, 0xf5,
+ 0xf5, 0xf5, 0xf5, 0xf5, 0xf6, 0xf6, 0xf6, 0xf6,
+ 0xf6, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf8, 0xf8,
+ 0xf8, 0xf8, 0xf8, 0xf8, 0xf9, 0xf9, 0xf9, 0xf9,
+ 0xf9, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa,
+ 0xfa, 0xfa, 0xfa, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc,
+ 0xfc, 0xfc, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfe,
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x02, 0x03,
+ 0x05, 0x07, 0x07, 0x08, 0x09, 0x0a, 0x0c, 0x0d,
+ 0x0e, 0x10, 0x10, 0x11, 0x12, 0x14, 0x15, 0x15,
+ 0x16, 0x17, 0x18, 0x1a, 0x1a, 0x1b, 0x1c, 0x1e,
+ 0x1e, 0x1f, 0x20, 0x20, 0x22, 0x23, 0x25, 0x25,
+ 0x26, 0x27, 0x27, 0x28, 0x29, 0x29, 0x2b, 0x2c,
+ 0x2c, 0x2d, 0x2d, 0x2f, 0x30, 0x30, 0x31, 0x31,
+ 0x33, 0x34, 0x34, 0x35, 0x35, 0x37, 0x38, 0x38,
+ 0x39, 0x39, 0x3a, 0x3a, 0x3b, 0x3b, 0x3c, 0x3d,
+ 0x3d, 0x3f, 0x3f, 0x40, 0x40, 0x42, 0x42, 0x43,
+ 0x43, 0x44, 0x44, 0x45, 0x45, 0x47, 0x47, 0x48,
+ 0x48, 0x49, 0x49, 0x4a, 0x4a, 0x4b, 0x4b, 0x4c,
+ 0x4c, 0x4d, 0x4d, 0x4d, 0x4f, 0x4f, 0x50, 0x50,
+ 0x52, 0x52, 0x53, 0x53, 0x53, 0x54, 0x54, 0x55,
+ 0x55, 0x56, 0x56, 0x56, 0x58, 0x58, 0x59, 0x59,
+ 0x5a, 0x5a, 0x5a, 0x5b, 0x5b, 0x5c, 0x5c, 0x5c,
+ 0x5e, 0x5e, 0x5f, 0x5f, 0x5f, 0x60, 0x60, 0x60,
+ 0x61, 0x61, 0x62, 0x62, 0x62, 0x63, 0x63, 0x65,
+ 0x65, 0x65, 0x66, 0x66, 0x66, 0x67, 0x67, 0x67,
+ 0x68, 0x68, 0x69, 0x69, 0x69, 0x6a, 0x6a, 0x6a,
+ 0x6c, 0x6c, 0x6c, 0x6d, 0x6d, 0x6d, 0x6e, 0x6e,
+ 0x6e, 0x6f, 0x6f, 0x6f, 0x70, 0x70, 0x70, 0x71,
+ 0x71, 0x71, 0x73, 0x73, 0x73, 0x74, 0x74, 0x74,
+ 0x75, 0x75, 0x75, 0x77, 0x77, 0x77, 0x78, 0x78,
+ 0x78, 0x79, 0x79, 0x79, 0x79, 0x7a, 0x7a, 0x7a,
+ 0x7b, 0x7b, 0x7b, 0x7c, 0x7c, 0x7c, 0x7c, 0x7d,
+ 0x7d, 0x7d, 0x7f, 0x7f, 0x7f, 0x80, 0x80, 0x80,
+ 0x80, 0x81, 0x81, 0x81, 0x82, 0x82, 0x82, 0x82,
+ 0x84, 0x84, 0x84, 0x85, 0x85, 0x85, 0x85, 0x86,
+ 0x86, 0x86, 0x88, 0x88, 0x88, 0x88, 0x89, 0x89,
+ 0x89, 0x89, 0x8a, 0x8a, 0x8a, 0x8b, 0x8b, 0x8b,
+ 0x8b, 0x8d, 0x8d, 0x8d, 0x8d, 0x8e, 0x8e, 0x8e,
+ 0x8e, 0x8f, 0x8f, 0x8f, 0x90, 0x90, 0x90, 0x90,
+ 0x91, 0x91, 0x91, 0x91, 0x92, 0x92, 0x92, 0x92,
+ 0x93, 0x93, 0x93, 0x93, 0x94, 0x94, 0x94, 0x94,
+ 0x96, 0x96, 0x96, 0x96, 0x97, 0x97, 0x97, 0x97,
+ 0x98, 0x98, 0x98, 0x98, 0x99, 0x99, 0x99, 0x99,
+ 0x9a, 0x9a, 0x9a, 0x9a, 0x9a, 0x9b, 0x9b, 0x9b,
+ 0x9b, 0x9c, 0x9c, 0x9c, 0x9c, 0x9d, 0x9d, 0x9d,
+ 0x9d, 0x9e, 0x9e, 0x9e, 0x9e, 0x9e, 0xa0, 0xa0,
+ 0xa0, 0xa0, 0xa1, 0xa1, 0xa1, 0xa1, 0xa2, 0xa2,
+ 0xa2, 0xa2, 0xa2, 0xa3, 0xa3, 0xa3, 0xa3, 0xa4,
+ 0xa4, 0xa4, 0xa4, 0xa4, 0xa5, 0xa5, 0xa5, 0xa5,
+ 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa8, 0xa8, 0xa8,
+ 0xa8, 0xa9, 0xa9, 0xa9, 0xa9, 0xa9, 0xab, 0xab,
+ 0xab, 0xab, 0xac, 0xac, 0xac, 0xac, 0xac, 0xad,
+ 0xad, 0xad, 0xad, 0xad, 0xae, 0xae, 0xae, 0xae,
+ 0xaf, 0xaf, 0xaf, 0xaf, 0xaf, 0xb0, 0xb0, 0xb0,
+ 0xb0, 0xb0, 0xb1, 0xb1, 0xb1, 0xb1, 0xb1, 0xb2,
+ 0xb2, 0xb2, 0xb2, 0xb2, 0xb3, 0xb3, 0xb3, 0xb3,
+ 0xb4, 0xb4, 0xb4, 0xb4, 0xb4, 0xb6, 0xb6, 0xb6,
+ 0xb6, 0xb6, 0xb7, 0xb7, 0xb7, 0xb7, 0xb7, 0xb8,
+ 0xb8, 0xb8, 0xb8, 0xb8, 0xb9, 0xb9, 0xb9, 0xb9,
+ 0xb9, 0xba, 0xba, 0xba, 0xba, 0xba, 0xbc, 0xbc,
+ 0xbc, 0xbc, 0xbc, 0xbd, 0xbd, 0xbd, 0xbd, 0xbd,
+ 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbf, 0xbf,
+ 0xbf, 0xbf, 0xbf, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0,
+ 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc3, 0xc3, 0xc3,
+ 0xc3, 0xc3, 0xc4, 0xc4, 0xc4, 0xc4, 0xc4, 0xc5,
+ 0xc5, 0xc5, 0xc5, 0xc5, 0xc5, 0xc6, 0xc6, 0xc6,
+ 0xc6, 0xc6, 0xc7, 0xc7, 0xc7, 0xc7, 0xc7, 0xc9,
+ 0xc9, 0xc9, 0xc9, 0xc9, 0xc9, 0xca, 0xca, 0xca,
+ 0xca, 0xca, 0xcb, 0xcb, 0xcb, 0xcb, 0xcb, 0xcc,
+ 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcd, 0xcd, 0xcd,
+ 0xcd, 0xcd, 0xce, 0xce, 0xce, 0xce, 0xce, 0xcf,
+ 0xcf, 0xcf, 0xcf, 0xcf, 0xcf, 0xd0, 0xd0, 0xd0,
+ 0xd0, 0xd0, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1,
+ 0xd3, 0xd3, 0xd3, 0xd3, 0xd3, 0xd4, 0xd4, 0xd4,
+ 0xd4, 0xd4, 0xd4, 0xd6, 0xd6, 0xd6, 0xd6, 0xd6,
+ 0xd7, 0xd7, 0xd7, 0xd7, 0xd7, 0xd7, 0xd8, 0xd8,
+ 0xd8, 0xd8, 0xd8, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9,
+ 0xd9, 0xda, 0xda, 0xda, 0xda, 0xda, 0xdb, 0xdb,
+ 0xdb, 0xdb, 0xdb, 0xdb, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xde, 0xde, 0xde, 0xde, 0xde, 0xde, 0xdf,
+ 0xdf, 0xdf, 0xdf, 0xdf, 0xe0, 0xe0, 0xe0, 0xe0,
+ 0xe0, 0xe0, 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, 0xe2,
+ 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe3, 0xe3, 0xe3,
+ 0xe3, 0xe3, 0xe4, 0xe4, 0xe4, 0xe4, 0xe4, 0xe4,
+ 0xe5, 0xe5, 0xe5, 0xe5, 0xe5, 0xe6, 0xe6, 0xe6,
+ 0xe6, 0xe6, 0xe6, 0xe7, 0xe7, 0xe7, 0xe7, 0xe7,
+ 0xe8, 0xe8, 0xe8, 0xe8, 0xe8, 0xe8, 0xe9, 0xe9,
+ 0xe9, 0xe9, 0xe9, 0xeb, 0xeb, 0xeb, 0xeb, 0xeb,
+ 0xeb, 0xec, 0xec, 0xec, 0xec, 0xec, 0xed, 0xed,
+ 0xed, 0xed, 0xed, 0xed, 0xee, 0xee, 0xee, 0xee,
+ 0xee, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xf0,
+ 0xf0, 0xf0, 0xf0, 0xf0, 0xf1, 0xf1, 0xf1, 0xf1,
+ 0xf1, 0xf1, 0xf3, 0xf3, 0xf3, 0xf3, 0xf3, 0xf4,
+ 0xf4, 0xf4, 0xf4, 0xf4, 0xf5, 0xf5, 0xf5, 0xf5,
+ 0xf5, 0xf5, 0xf6, 0xf6, 0xf6, 0xf6, 0xf6, 0xf7,
+ 0xf7, 0xf7, 0xf7, 0xf7, 0xf8, 0xf8, 0xf8, 0xf8,
+ 0xf8, 0xf8, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0xfa,
+ 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa,
+ 0xfa, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfc, 0xfc,
+ 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc,
+ 0xfc, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfe, 0xfe,
+ 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x01, 0x02, 0x03, 0x05, 0x05, 0x07,
+ 0x08, 0x09, 0x0a, 0x0a, 0x0c, 0x0d, 0x0e, 0x0e,
+ 0x10, 0x11, 0x12, 0x12, 0x14, 0x15, 0x16, 0x16,
+ 0x17, 0x18, 0x18, 0x1a, 0x1b, 0x1b, 0x1c, 0x1e,
+ 0x1e, 0x1f, 0x1f, 0x20, 0x22, 0x22, 0x23, 0x23,
+ 0x25, 0x26, 0x26, 0x27, 0x27, 0x28, 0x29, 0x29,
+ 0x2b, 0x2b, 0x2c, 0x2c, 0x2d, 0x2d, 0x2f, 0x30,
+ 0x30, 0x31, 0x31, 0x33, 0x33, 0x34, 0x34, 0x35,
+ 0x35, 0x37, 0x37, 0x38, 0x38, 0x39, 0x39, 0x3a,
+ 0x3a, 0x3b, 0x3b, 0x3b, 0x3c, 0x3c, 0x3d, 0x3d,
+ 0x3f, 0x3f, 0x40, 0x40, 0x42, 0x42, 0x42, 0x43,
+ 0x43, 0x44, 0x44, 0x45, 0x45, 0x47, 0x47, 0x47,
+ 0x48, 0x48, 0x49, 0x49, 0x49, 0x4a, 0x4a, 0x4b,
+ 0x4b, 0x4b, 0x4c, 0x4c, 0x4d, 0x4d, 0x4d, 0x4f,
+ 0x4f, 0x50, 0x50, 0x50, 0x52, 0x52, 0x52, 0x53,
+ 0x53, 0x54, 0x54, 0x54, 0x55, 0x55, 0x55, 0x56,
+ 0x56, 0x58, 0x58, 0x58, 0x59, 0x59, 0x59, 0x5a,
+ 0x5a, 0x5a, 0x5b, 0x5b, 0x5b, 0x5c, 0x5c, 0x5c,
+ 0x5e, 0x5e, 0x5e, 0x5f, 0x5f, 0x5f, 0x60, 0x60,
+ 0x60, 0x61, 0x61, 0x61, 0x62, 0x62, 0x62, 0x63,
+ 0x63, 0x63, 0x65, 0x65, 0x65, 0x66, 0x66, 0x66,
+ 0x66, 0x67, 0x67, 0x67, 0x68, 0x68, 0x68, 0x69,
+ 0x69, 0x69, 0x6a, 0x6a, 0x6a, 0x6a, 0x6c, 0x6c,
+ 0x6c, 0x6d, 0x6d, 0x6d, 0x6d, 0x6e, 0x6e, 0x6e,
+ 0x6f, 0x6f, 0x6f, 0x6f, 0x70, 0x70, 0x70, 0x71,
+ 0x71, 0x71, 0x71, 0x73, 0x73, 0x73, 0x74, 0x74,
+ 0x74, 0x74, 0x75, 0x75, 0x75, 0x75, 0x77, 0x77,
+ 0x77, 0x78, 0x78, 0x78, 0x78, 0x79, 0x79, 0x79,
+ 0x79, 0x7a, 0x7a, 0x7a, 0x7a, 0x7b, 0x7b, 0x7b,
+ 0x7b, 0x7c, 0x7c, 0x7c, 0x7c, 0x7d, 0x7d, 0x7d,
+ 0x7d, 0x7f, 0x7f, 0x7f, 0x7f, 0x80, 0x80, 0x80,
+ 0x80, 0x81, 0x81, 0x81, 0x81, 0x82, 0x82, 0x82,
+ 0x82, 0x84, 0x84, 0x84, 0x84, 0x85, 0x85, 0x85,
+ 0x85, 0x86, 0x86, 0x86, 0x86, 0x88, 0x88, 0x88,
+ 0x88, 0x88, 0x89, 0x89, 0x89, 0x89, 0x8a, 0x8a,
+ 0x8a, 0x8a, 0x8b, 0x8b, 0x8b, 0x8b, 0x8b, 0x8d,
+ 0x8d, 0x8d, 0x8d, 0x8e, 0x8e, 0x8e, 0x8e, 0x8e,
+ 0x8f, 0x8f, 0x8f, 0x8f, 0x90, 0x90, 0x90, 0x90,
+ 0x90, 0x91, 0x91, 0x91, 0x91, 0x92, 0x92, 0x92,
+ 0x92, 0x92, 0x93, 0x93, 0x93, 0x93, 0x94, 0x94,
+ 0x94, 0x94, 0x94, 0x96, 0x96, 0x96, 0x96, 0x96,
+ 0x97, 0x97, 0x97, 0x97, 0x97, 0x98, 0x98, 0x98,
+ 0x98, 0x99, 0x99, 0x99, 0x99, 0x99, 0x9a, 0x9a,
+ 0x9a, 0x9a, 0x9a, 0x9b, 0x9b, 0x9b, 0x9b, 0x9b,
+ 0x9c, 0x9c, 0x9c, 0x9c, 0x9c, 0x9d, 0x9d, 0x9d,
+ 0x9d, 0x9d, 0x9e, 0x9e, 0x9e, 0x9e, 0x9e, 0xa0,
+ 0xa0, 0xa0, 0xa0, 0xa0, 0xa1, 0xa1, 0xa1, 0xa1,
+ 0xa1, 0xa2, 0xa2, 0xa2, 0xa2, 0xa2, 0xa3, 0xa3,
+ 0xa3, 0xa3, 0xa3, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4,
+ 0xa4, 0xa5, 0xa5, 0xa5, 0xa5, 0xa5, 0xa6, 0xa6,
+ 0xa6, 0xa6, 0xa6, 0xa8, 0xa8, 0xa8, 0xa8, 0xa8,
+ 0xa9, 0xa9, 0xa9, 0xa9, 0xa9, 0xa9, 0xab, 0xab,
+ 0xab, 0xab, 0xab, 0xac, 0xac, 0xac, 0xac, 0xac,
+ 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xae, 0xae,
+ 0xae, 0xae, 0xae, 0xaf, 0xaf, 0xaf, 0xaf, 0xaf,
+ 0xaf, 0xb0, 0xb0, 0xb0, 0xb0, 0xb0, 0xb1, 0xb1,
+ 0xb1, 0xb1, 0xb1, 0xb1, 0xb2, 0xb2, 0xb2, 0xb2,
+ 0xb2, 0xb3, 0xb3, 0xb3, 0xb3, 0xb3, 0xb3, 0xb4,
+ 0xb4, 0xb4, 0xb4, 0xb4, 0xb6, 0xb6, 0xb6, 0xb6,
+ 0xb6, 0xb6, 0xb7, 0xb7, 0xb7, 0xb7, 0xb7, 0xb7,
+ 0xb8, 0xb8, 0xb8, 0xb8, 0xb8, 0xb9, 0xb9, 0xb9,
+ 0xb9, 0xb9, 0xb9, 0xba, 0xba, 0xba, 0xba, 0xba,
+ 0xba, 0xbc, 0xbc, 0xbc, 0xbc, 0xbc, 0xbd, 0xbd,
+ 0xbd, 0xbd, 0xbd, 0xbd, 0xbe, 0xbe, 0xbe, 0xbe,
+ 0xbe, 0xbe, 0xbf, 0xbf, 0xbf, 0xbf, 0xbf, 0xbf,
+ 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc2, 0xc2,
+ 0xc2, 0xc2, 0xc2, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3,
+ 0xc3, 0xc4, 0xc4, 0xc4, 0xc4, 0xc4, 0xc4, 0xc5,
+ 0xc5, 0xc5, 0xc5, 0xc5, 0xc5, 0xc6, 0xc6, 0xc6,
+ 0xc6, 0xc6, 0xc6, 0xc7, 0xc7, 0xc7, 0xc7, 0xc7,
+ 0xc7, 0xc9, 0xc9, 0xc9, 0xc9, 0xc9, 0xca, 0xca,
+ 0xca, 0xca, 0xca, 0xca, 0xcb, 0xcb, 0xcb, 0xcb,
+ 0xcb, 0xcb, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
+ 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xce, 0xce,
+ 0xce, 0xce, 0xce, 0xce, 0xcf, 0xcf, 0xcf, 0xcf,
+ 0xcf, 0xcf, 0xd0, 0xd0, 0xd0, 0xd0, 0xd0, 0xd0,
+ 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd3, 0xd3,
+ 0xd3, 0xd3, 0xd3, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4,
+ 0xd4, 0xd6, 0xd6, 0xd6, 0xd6, 0xd6, 0xd6, 0xd7,
+ 0xd7, 0xd7, 0xd7, 0xd7, 0xd7, 0xd8, 0xd8, 0xd8,
+ 0xd8, 0xd8, 0xd8, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9,
+ 0xd9, 0xda, 0xda, 0xda, 0xda, 0xda, 0xda, 0xdb,
+ 0xdb, 0xdb, 0xdb, 0xdb, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd, 0xde, 0xde, 0xde, 0xde, 0xde, 0xde,
+ 0xdf, 0xdf, 0xdf, 0xdf, 0xdf, 0xdf, 0xe0, 0xe0,
+ 0xe0, 0xe0, 0xe0, 0xe0, 0xe1, 0xe1, 0xe1, 0xe1,
+ 0xe1, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe3,
+ 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe4, 0xe4, 0xe4,
+ 0xe4, 0xe4, 0xe5, 0xe5, 0xe5, 0xe5, 0xe5, 0xe5,
+ 0xe6, 0xe6, 0xe6, 0xe6, 0xe6, 0xe6, 0xe7, 0xe7,
+ 0xe7, 0xe7, 0xe7, 0xe8, 0xe8, 0xe8, 0xe8, 0xe8,
+ 0xe8, 0xe9, 0xe9, 0xe9, 0xe9, 0xe9, 0xeb, 0xeb,
+ 0xeb, 0xeb, 0xeb, 0xeb, 0xec, 0xec, 0xec, 0xec,
+ 0xec, 0xed, 0xed, 0xed, 0xed, 0xed, 0xed, 0xee,
+ 0xee, 0xee, 0xee, 0xee, 0xef, 0xef, 0xef, 0xef,
+ 0xef, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf1,
+ 0xf1, 0xf1, 0xf1, 0xf1, 0xf3, 0xf3, 0xf3, 0xf3,
+ 0xf3, 0xf4, 0xf4, 0xf4, 0xf4, 0xf4, 0xf5, 0xf5,
+ 0xf5, 0xf5, 0xf5, 0xf6, 0xf6, 0xf6, 0xf6, 0xf6,
+ 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf8, 0xf8, 0xf8,
+ 0xf8, 0xf8, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0xfa,
+ 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa,
+ 0xfa, 0xfb, 0xfb, 0xfb, 0xfb, 0xfc, 0xfc, 0xfc,
+ 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfd,
+ 0xfd, 0xfd, 0xfd, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
+ },
+ { /* gamma 2 */
+ {0x00, 0x01, 0x02, 0x05, 0x07, 0x08, 0x0a, 0x0c,
+ 0x0d, 0x0e, 0x10, 0x12, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x1a, 0x1b, 0x1c, 0x1e, 0x1f, 0x20, 0x22,
+ 0x23, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2b, 0x2c,
+ 0x2d, 0x2d, 0x2f, 0x30, 0x31, 0x33, 0x34, 0x34,
+ 0x35, 0x37, 0x38, 0x38, 0x39, 0x3a, 0x3b, 0x3b,
+ 0x3c, 0x3d, 0x3f, 0x3f, 0x40, 0x42, 0x42, 0x43,
+ 0x44, 0x44, 0x45, 0x47, 0x47, 0x48, 0x49, 0x49,
+ 0x4a, 0x4b, 0x4b, 0x4c, 0x4c, 0x4d, 0x4f, 0x4f,
+ 0x50, 0x50, 0x52, 0x53, 0x53, 0x54, 0x54, 0x55,
+ 0x55, 0x56, 0x56, 0x58, 0x58, 0x59, 0x5a, 0x5a,
+ 0x5b, 0x5b, 0x5c, 0x5c, 0x5e, 0x5e, 0x5f, 0x5f,
+ 0x60, 0x60, 0x61, 0x61, 0x62, 0x62, 0x63, 0x63,
+ 0x65, 0x65, 0x65, 0x66, 0x66, 0x67, 0x67, 0x68,
+ 0x68, 0x69, 0x69, 0x6a, 0x6a, 0x6a, 0x6c, 0x6c,
+ 0x6d, 0x6d, 0x6e, 0x6e, 0x6e, 0x6f, 0x6f, 0x70,
+ 0x70, 0x70, 0x71, 0x71, 0x73, 0x73, 0x73, 0x74,
+ 0x74, 0x75, 0x75, 0x75, 0x77, 0x77, 0x78, 0x78,
+ 0x78, 0x79, 0x79, 0x79, 0x7a, 0x7a, 0x7b, 0x7b,
+ 0x7b, 0x7c, 0x7c, 0x7c, 0x7d, 0x7d, 0x7f, 0x7f,
+ 0x7f, 0x80, 0x80, 0x80, 0x81, 0x81, 0x81, 0x82,
+ 0x82, 0x82, 0x84, 0x84, 0x84, 0x85, 0x85, 0x85,
+ 0x86, 0x86, 0x86, 0x88, 0x88, 0x88, 0x89, 0x89,
+ 0x89, 0x8a, 0x8a, 0x8a, 0x8b, 0x8b, 0x8b, 0x8d,
+ 0x8d, 0x8d, 0x8d, 0x8e, 0x8e, 0x8e, 0x8f, 0x8f,
+ 0x8f, 0x90, 0x90, 0x90, 0x91, 0x91, 0x91, 0x91,
+ 0x92, 0x92, 0x92, 0x93, 0x93, 0x93, 0x93, 0x94,
+ 0x94, 0x94, 0x96, 0x96, 0x96, 0x97, 0x97, 0x97,
+ 0x97, 0x98, 0x98, 0x98, 0x98, 0x99, 0x99, 0x99,
+ 0x9a, 0x9a, 0x9a, 0x9a, 0x9b, 0x9b, 0x9b, 0x9b,
+ 0x9c, 0x9c, 0x9c, 0x9d, 0x9d, 0x9d, 0x9d, 0x9e,
+ 0x9e, 0x9e, 0x9e, 0xa0, 0xa0, 0xa0, 0xa0, 0xa1,
+ 0xa1, 0xa1, 0xa1, 0xa2, 0xa2, 0xa2, 0xa2, 0xa3,
+ 0xa3, 0xa3, 0xa4, 0xa4, 0xa4, 0xa4, 0xa5, 0xa5,
+ 0xa5, 0xa5, 0xa5, 0xa6, 0xa6, 0xa6, 0xa6, 0xa8,
+ 0xa8, 0xa8, 0xa8, 0xa9, 0xa9, 0xa9, 0xa9, 0xab,
+ 0xab, 0xab, 0xab, 0xac, 0xac, 0xac, 0xac, 0xad,
+ 0xad, 0xad, 0xad, 0xad, 0xae, 0xae, 0xae, 0xae,
+ 0xaf, 0xaf, 0xaf, 0xaf, 0xb0, 0xb0, 0xb0, 0xb0,
+ 0xb0, 0xb1, 0xb1, 0xb1, 0xb1, 0xb2, 0xb2, 0xb2,
+ 0xb2, 0xb2, 0xb3, 0xb3, 0xb3, 0xb3, 0xb4, 0xb4,
+ 0xb4, 0xb4, 0xb4, 0xb6, 0xb6, 0xb6, 0xb6, 0xb7,
+ 0xb7, 0xb7, 0xb7, 0xb7, 0xb8, 0xb8, 0xb8, 0xb8,
+ 0xb8, 0xb9, 0xb9, 0xb9, 0xb9, 0xba, 0xba, 0xba,
+ 0xba, 0xba, 0xbc, 0xbc, 0xbc, 0xbc, 0xbc, 0xbd,
+ 0xbd, 0xbd, 0xbd, 0xbd, 0xbe, 0xbe, 0xbe, 0xbe,
+ 0xbe, 0xbf, 0xbf, 0xbf, 0xbf, 0xbf, 0xc0, 0xc0,
+ 0xc0, 0xc0, 0xc0, 0xc2, 0xc2, 0xc2, 0xc2, 0xc3,
+ 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0xc4, 0xc4, 0xc4,
+ 0xc4, 0xc4, 0xc5, 0xc5, 0xc5, 0xc5, 0xc5, 0xc6,
+ 0xc6, 0xc6, 0xc6, 0xc6, 0xc7, 0xc7, 0xc7, 0xc7,
+ 0xc7, 0xc9, 0xc9, 0xc9, 0xc9, 0xc9, 0xca, 0xca,
+ 0xca, 0xca, 0xca, 0xcb, 0xcb, 0xcb, 0xcb, 0xcb,
+ 0xcb, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcd, 0xcd,
+ 0xcd, 0xcd, 0xcd, 0xce, 0xce, 0xce, 0xce, 0xce,
+ 0xce, 0xcf, 0xcf, 0xcf, 0xcf, 0xcf, 0xd0, 0xd0,
+ 0xd0, 0xd0, 0xd0, 0xd0, 0xd1, 0xd1, 0xd1, 0xd1,
+ 0xd1, 0xd3, 0xd3, 0xd3, 0xd3, 0xd3, 0xd4, 0xd4,
+ 0xd4, 0xd4, 0xd4, 0xd4, 0xd6, 0xd6, 0xd6, 0xd6,
+ 0xd6, 0xd6, 0xd7, 0xd7, 0xd7, 0xd7, 0xd7, 0xd8,
+ 0xd8, 0xd8, 0xd8, 0xd8, 0xd8, 0xd9, 0xd9, 0xd9,
+ 0xd9, 0xd9, 0xda, 0xda, 0xda, 0xda, 0xda, 0xda,
+ 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdd, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xde, 0xde, 0xde, 0xde, 0xde,
+ 0xde, 0xdf, 0xdf, 0xdf, 0xdf, 0xdf, 0xdf, 0xe0,
+ 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe1, 0xe1, 0xe1,
+ 0xe1, 0xe1, 0xe1, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2,
+ 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe4, 0xe4,
+ 0xe4, 0xe4, 0xe4, 0xe4, 0xe5, 0xe5, 0xe5, 0xe5,
+ 0xe5, 0xe5, 0xe6, 0xe6, 0xe6, 0xe6, 0xe6, 0xe6,
+ 0xe7, 0xe7, 0xe7, 0xe7, 0xe7, 0xe7, 0xe8, 0xe8,
+ 0xe8, 0xe8, 0xe8, 0xe8, 0xe9, 0xe9, 0xe9, 0xe9,
+ 0xe9, 0xe9, 0xeb, 0xeb, 0xeb, 0xeb, 0xeb, 0xeb,
+ 0xec, 0xec, 0xec, 0xec, 0xec, 0xed, 0xed, 0xed,
+ 0xed, 0xed, 0xed, 0xee, 0xee, 0xee, 0xee, 0xee,
+ 0xee, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xf0,
+ 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf1, 0xf1, 0xf1,
+ 0xf1, 0xf1, 0xf1, 0xf3, 0xf3, 0xf3, 0xf3, 0xf3,
+ 0xf3, 0xf4, 0xf4, 0xf4, 0xf4, 0xf4, 0xf4, 0xf5,
+ 0xf5, 0xf5, 0xf5, 0xf5, 0xf5, 0xf6, 0xf6, 0xf6,
+ 0xf6, 0xf6, 0xf6, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7,
+ 0xf7, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf9,
+ 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0xfa, 0xfa, 0xfa,
+ 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa,
+ 0xfa, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb},
+ {0x00, 0x00, 0x00, 0x00, 0x01, 0x02, 0x03, 0x05,
+ 0x07, 0x08, 0x09, 0x0a, 0x0d, 0x0e, 0x10, 0x11,
+ 0x12, 0x14, 0x15, 0x16, 0x16, 0x17, 0x18, 0x1a,
+ 0x1b, 0x1c, 0x1e, 0x1f, 0x20, 0x20, 0x22, 0x23,
+ 0x25, 0x26, 0x26, 0x27, 0x28, 0x29, 0x29, 0x2b,
+ 0x2c, 0x2d, 0x2d, 0x2f, 0x30, 0x30, 0x31, 0x33,
+ 0x33, 0x34, 0x35, 0x35, 0x37, 0x38, 0x38, 0x39,
+ 0x3a, 0x3a, 0x3b, 0x3b, 0x3c, 0x3d, 0x3d, 0x3f,
+ 0x3f, 0x40, 0x42, 0x42, 0x43, 0x43, 0x44, 0x44,
+ 0x45, 0x45, 0x47, 0x47, 0x48, 0x48, 0x49, 0x4a,
+ 0x4a, 0x4b, 0x4b, 0x4c, 0x4c, 0x4d, 0x4d, 0x4d,
+ 0x4f, 0x4f, 0x50, 0x50, 0x52, 0x52, 0x53, 0x53,
+ 0x54, 0x54, 0x55, 0x55, 0x56, 0x56, 0x56, 0x58,
+ 0x58, 0x59, 0x59, 0x5a, 0x5a, 0x5a, 0x5b, 0x5b,
+ 0x5c, 0x5c, 0x5c, 0x5e, 0x5e, 0x5f, 0x5f, 0x5f,
+ 0x60, 0x60, 0x61, 0x61, 0x61, 0x62, 0x62, 0x63,
+ 0x63, 0x63, 0x65, 0x65, 0x65, 0x66, 0x66, 0x67,
+ 0x67, 0x67, 0x68, 0x68, 0x68, 0x69, 0x69, 0x69,
+ 0x6a, 0x6a, 0x6c, 0x6c, 0x6c, 0x6d, 0x6d, 0x6d,
+ 0x6e, 0x6e, 0x6e, 0x6f, 0x6f, 0x6f, 0x70, 0x70,
+ 0x70, 0x71, 0x71, 0x71, 0x73, 0x73, 0x73, 0x73,
+ 0x74, 0x74, 0x74, 0x75, 0x75, 0x75, 0x77, 0x77,
+ 0x77, 0x78, 0x78, 0x78, 0x79, 0x79, 0x79, 0x79,
+ 0x7a, 0x7a, 0x7a, 0x7b, 0x7b, 0x7b, 0x7b, 0x7c,
+ 0x7c, 0x7c, 0x7d, 0x7d, 0x7d, 0x7d, 0x7f, 0x7f,
+ 0x7f, 0x80, 0x80, 0x80, 0x80, 0x81, 0x81, 0x81,
+ 0x82, 0x82, 0x82, 0x82, 0x84, 0x84, 0x84, 0x84,
+ 0x85, 0x85, 0x85, 0x85, 0x86, 0x86, 0x86, 0x88,
+ 0x88, 0x88, 0x88, 0x89, 0x89, 0x89, 0x89, 0x8a,
+ 0x8a, 0x8a, 0x8a, 0x8b, 0x8b, 0x8b, 0x8b, 0x8d,
+ 0x8d, 0x8d, 0x8d, 0x8e, 0x8e, 0x8e, 0x8e, 0x8f,
+ 0x8f, 0x8f, 0x8f, 0x90, 0x90, 0x90, 0x90, 0x91,
+ 0x91, 0x91, 0x91, 0x91, 0x92, 0x92, 0x92, 0x92,
+ 0x93, 0x93, 0x93, 0x93, 0x94, 0x94, 0x94, 0x94,
+ 0x94, 0x96, 0x96, 0x96, 0x96, 0x97, 0x97, 0x97,
+ 0x97, 0x98, 0x98, 0x98, 0x98, 0x98, 0x99, 0x99,
+ 0x99, 0x99, 0x99, 0x9a, 0x9a, 0x9a, 0x9a, 0x9b,
+ 0x9b, 0x9b, 0x9b, 0x9b, 0x9c, 0x9c, 0x9c, 0x9c,
+ 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9e, 0x9e, 0x9e,
+ 0x9e, 0x9e, 0xa0, 0xa0, 0xa0, 0xa0, 0xa0, 0xa1,
+ 0xa1, 0xa1, 0xa1, 0xa1, 0xa2, 0xa2, 0xa2, 0xa2,
+ 0xa2, 0xa3, 0xa3, 0xa3, 0xa3, 0xa3, 0xa4, 0xa4,
+ 0xa4, 0xa4, 0xa4, 0xa5, 0xa5, 0xa5, 0xa5, 0xa5,
+ 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa8, 0xa8, 0xa8,
+ 0xa8, 0xa8, 0xa9, 0xa9, 0xa9, 0xa9, 0xa9, 0xab,
+ 0xab, 0xab, 0xab, 0xab, 0xac, 0xac, 0xac, 0xac,
+ 0xac, 0xac, 0xad, 0xad, 0xad, 0xad, 0xad, 0xae,
+ 0xae, 0xae, 0xae, 0xae, 0xaf, 0xaf, 0xaf, 0xaf,
+ 0xaf, 0xaf, 0xb0, 0xb0, 0xb0, 0xb0, 0xb0, 0xb1,
+ 0xb1, 0xb1, 0xb1, 0xb1, 0xb1, 0xb2, 0xb2, 0xb2,
+ 0xb2, 0xb2, 0xb3, 0xb3, 0xb3, 0xb3, 0xb3, 0xb3,
+ 0xb4, 0xb4, 0xb4, 0xb4, 0xb4, 0xb4, 0xb6, 0xb6,
+ 0xb6, 0xb6, 0xb6, 0xb7, 0xb7, 0xb7, 0xb7, 0xb7,
+ 0xb7, 0xb8, 0xb8, 0xb8, 0xb8, 0xb8, 0xb8, 0xb9,
+ 0xb9, 0xb9, 0xb9, 0xb9, 0xba, 0xba, 0xba, 0xba,
+ 0xba, 0xba, 0xbc, 0xbc, 0xbc, 0xbc, 0xbc, 0xbc,
+ 0xbd, 0xbd, 0xbd, 0xbd, 0xbd, 0xbd, 0xbe, 0xbe,
+ 0xbe, 0xbe, 0xbe, 0xbe, 0xbf, 0xbf, 0xbf, 0xbf,
+ 0xbf, 0xbf, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0,
+ 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc3, 0xc3,
+ 0xc3, 0xc3, 0xc3, 0xc3, 0xc4, 0xc4, 0xc4, 0xc4,
+ 0xc4, 0xc4, 0xc5, 0xc5, 0xc5, 0xc5, 0xc5, 0xc5,
+ 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc7, 0xc7,
+ 0xc7, 0xc7, 0xc7, 0xc7, 0xc7, 0xc9, 0xc9, 0xc9,
+ 0xc9, 0xc9, 0xc9, 0xca, 0xca, 0xca, 0xca, 0xca,
+ 0xca, 0xcb, 0xcb, 0xcb, 0xcb, 0xcb, 0xcb, 0xcc,
+ 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcd, 0xcd,
+ 0xcd, 0xcd, 0xcd, 0xcd, 0xce, 0xce, 0xce, 0xce,
+ 0xce, 0xce, 0xcf, 0xcf, 0xcf, 0xcf, 0xcf, 0xcf,
+ 0xcf, 0xd0, 0xd0, 0xd0, 0xd0, 0xd0, 0xd0, 0xd1,
+ 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd3, 0xd3,
+ 0xd3, 0xd3, 0xd3, 0xd3, 0xd4, 0xd4, 0xd4, 0xd4,
+ 0xd4, 0xd4, 0xd6, 0xd6, 0xd6, 0xd6, 0xd6, 0xd6,
+ 0xd6, 0xd7, 0xd7, 0xd7, 0xd7, 0xd7, 0xd7, 0xd8,
+ 0xd8, 0xd8, 0xd8, 0xd8, 0xd8, 0xd8, 0xd9, 0xd9,
+ 0xd9, 0xd9, 0xd9, 0xd9, 0xda, 0xda, 0xda, 0xda,
+ 0xda, 0xda, 0xda, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb,
+ 0xdb, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xde, 0xde, 0xde, 0xde, 0xde, 0xde, 0xdf, 0xdf,
+ 0xdf, 0xdf, 0xdf, 0xdf, 0xdf, 0xe0, 0xe0, 0xe0,
+ 0xe0, 0xe0, 0xe0, 0xe1, 0xe1, 0xe1, 0xe1, 0xe1,
+ 0xe1, 0xe1, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2,
+ 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe4,
+ 0xe4, 0xe4, 0xe4, 0xe4, 0xe4, 0xe5, 0xe5, 0xe5,
+ 0xe5, 0xe5, 0xe5, 0xe5, 0xe6, 0xe6, 0xe6, 0xe6,
+ 0xe6, 0xe6, 0xe6, 0xe7, 0xe7, 0xe7, 0xe7, 0xe7,
+ 0xe8, 0xe8, 0xe8, 0xe8, 0xe8, 0xe8, 0xe8, 0xe9,
+ 0xe9, 0xe9, 0xe9, 0xe9, 0xe9, 0xeb, 0xeb, 0xeb,
+ 0xeb, 0xeb, 0xeb, 0xeb, 0xec, 0xec, 0xec, 0xec,
+ 0xec, 0xec, 0xed, 0xed, 0xed, 0xed, 0xed, 0xed,
+ 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xef,
+ 0xef, 0xef, 0xef, 0xef, 0xef, 0xf0, 0xf0, 0xf0,
+ 0xf0, 0xf0, 0xf0, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1,
+ 0xf1, 0xf3, 0xf3, 0xf3, 0xf3, 0xf3, 0xf3, 0xf4,
+ 0xf4, 0xf4, 0xf4, 0xf4, 0xf4, 0xf4, 0xf5, 0xf5,
+ 0xf5, 0xf5, 0xf5, 0xf5, 0xf6, 0xf6, 0xf6, 0xf6,
+ 0xf6, 0xf6, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7,
+ 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf9, 0xf9,
+ 0xf9, 0xf9, 0xf9, 0xf9, 0xfa, 0xfa, 0xfa, 0xfa,
+ 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb},
+ {0x00, 0x00, 0x00, 0x01, 0x02, 0x05, 0x07, 0x08,
+ 0x09, 0x0a, 0x0c, 0x0e, 0x10, 0x11, 0x12, 0x14,
+ 0x15, 0x16, 0x17, 0x18, 0x1a, 0x1b, 0x1c, 0x1e,
+ 0x1f, 0x20, 0x20, 0x22, 0x23, 0x25, 0x26, 0x27,
+ 0x28, 0x28, 0x29, 0x2b, 0x2c, 0x2d, 0x2d, 0x2f,
+ 0x30, 0x31, 0x31, 0x33, 0x34, 0x35, 0x35, 0x37,
+ 0x38, 0x38, 0x39, 0x3a, 0x3a, 0x3b, 0x3c, 0x3c,
+ 0x3d, 0x3f, 0x3f, 0x40, 0x40, 0x42, 0x43, 0x43,
+ 0x44, 0x44, 0x45, 0x47, 0x47, 0x48, 0x48, 0x49,
+ 0x4a, 0x4a, 0x4b, 0x4b, 0x4c, 0x4c, 0x4d, 0x4d,
+ 0x4f, 0x4f, 0x50, 0x50, 0x52, 0x52, 0x53, 0x53,
+ 0x54, 0x54, 0x55, 0x55, 0x56, 0x56, 0x58, 0x58,
+ 0x59, 0x59, 0x5a, 0x5a, 0x5b, 0x5b, 0x5c, 0x5c,
+ 0x5c, 0x5e, 0x5e, 0x5f, 0x5f, 0x60, 0x60, 0x61,
+ 0x61, 0x61, 0x62, 0x62, 0x63, 0x63, 0x65, 0x65,
+ 0x65, 0x66, 0x66, 0x67, 0x67, 0x67, 0x68, 0x68,
+ 0x69, 0x69, 0x69, 0x6a, 0x6a, 0x6a, 0x6c, 0x6c,
+ 0x6d, 0x6d, 0x6d, 0x6e, 0x6e, 0x6e, 0x6f, 0x6f,
+ 0x70, 0x70, 0x70, 0x71, 0x71, 0x71, 0x73, 0x73,
+ 0x73, 0x74, 0x74, 0x74, 0x75, 0x75, 0x75, 0x77,
+ 0x77, 0x78, 0x78, 0x78, 0x79, 0x79, 0x79, 0x7a,
+ 0x7a, 0x7a, 0x7a, 0x7b, 0x7b, 0x7b, 0x7c, 0x7c,
+ 0x7c, 0x7d, 0x7d, 0x7d, 0x7f, 0x7f, 0x7f, 0x80,
+ 0x80, 0x80, 0x81, 0x81, 0x81, 0x81, 0x82, 0x82,
+ 0x82, 0x84, 0x84, 0x84, 0x85, 0x85, 0x85, 0x85,
+ 0x86, 0x86, 0x86, 0x88, 0x88, 0x88, 0x88, 0x89,
+ 0x89, 0x89, 0x8a, 0x8a, 0x8a, 0x8a, 0x8b, 0x8b,
+ 0x8b, 0x8b, 0x8d, 0x8d, 0x8d, 0x8e, 0x8e, 0x8e,
+ 0x8e, 0x8f, 0x8f, 0x8f, 0x8f, 0x90, 0x90, 0x90,
+ 0x91, 0x91, 0x91, 0x91, 0x92, 0x92, 0x92, 0x92,
+ 0x93, 0x93, 0x93, 0x93, 0x94, 0x94, 0x94, 0x94,
+ 0x96, 0x96, 0x96, 0x96, 0x97, 0x97, 0x97, 0x97,
+ 0x98, 0x98, 0x98, 0x98, 0x99, 0x99, 0x99, 0x99,
+ 0x9a, 0x9a, 0x9a, 0x9a, 0x9b, 0x9b, 0x9b, 0x9b,
+ 0x9b, 0x9c, 0x9c, 0x9c, 0x9c, 0x9d, 0x9d, 0x9d,
+ 0x9d, 0x9e, 0x9e, 0x9e, 0x9e, 0x9e, 0xa0, 0xa0,
+ 0xa0, 0xa0, 0xa1, 0xa1, 0xa1, 0xa1, 0xa2, 0xa2,
+ 0xa2, 0xa2, 0xa2, 0xa3, 0xa3, 0xa3, 0xa3, 0xa4,
+ 0xa4, 0xa4, 0xa4, 0xa4, 0xa5, 0xa5, 0xa5, 0xa5,
+ 0xa5, 0xa6, 0xa6, 0xa6, 0xa6, 0xa8, 0xa8, 0xa8,
+ 0xa8, 0xa8, 0xa9, 0xa9, 0xa9, 0xa9, 0xa9, 0xab,
+ 0xab, 0xab, 0xab, 0xab, 0xac, 0xac, 0xac, 0xac,
+ 0xad, 0xad, 0xad, 0xad, 0xad, 0xae, 0xae, 0xae,
+ 0xae, 0xae, 0xaf, 0xaf, 0xaf, 0xaf, 0xaf, 0xb0,
+ 0xb0, 0xb0, 0xb0, 0xb0, 0xb1, 0xb1, 0xb1, 0xb1,
+ 0xb1, 0xb2, 0xb2, 0xb2, 0xb2, 0xb2, 0xb3, 0xb3,
+ 0xb3, 0xb3, 0xb3, 0xb4, 0xb3, 0xb4, 0xb4, 0xb4,
+ 0xb6, 0xb6, 0xb6, 0xb6, 0xb6, 0xb6, 0xb7, 0xb7,
+ 0xb7, 0xb7, 0xb7, 0xb8, 0xb8, 0xb8, 0xb8, 0xb8,
+ 0xb9, 0xb9, 0xb9, 0xb9, 0xb9, 0xb9, 0xba, 0xba,
+ 0xba, 0xba, 0xba, 0xbc, 0xbc, 0xbc, 0xbc, 0xbc,
+ 0xbd, 0xbd, 0xbd, 0xbd, 0xbd, 0xbd, 0xbe, 0xbe,
+ 0xbe, 0xbe, 0xbe, 0xbf, 0xbf, 0xbf, 0xbf, 0xbf,
+ 0xbf, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc2, 0xc2,
+ 0xc2, 0xc2, 0xc2, 0xc2, 0xc3, 0xc3, 0xc3, 0xc3,
+ 0xc3, 0xc3, 0xc4, 0xc4, 0xc4, 0xc4, 0xc4, 0xc5,
+ 0xc5, 0xc5, 0xc5, 0xc5, 0xc5, 0xc6, 0xc6, 0xc6,
+ 0xc6, 0xc6, 0xc6, 0xc7, 0xc7, 0xc7, 0xc7, 0xc7,
+ 0xc9, 0xc9, 0xc9, 0xc9, 0xc9, 0xc9, 0xca, 0xca,
+ 0xca, 0xca, 0xca, 0xca, 0xcb, 0xcb, 0xcb, 0xcb,
+ 0xcb, 0xcb, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
+ 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xce, 0xce,
+ 0xce, 0xce, 0xce, 0xce, 0xcf, 0xcf, 0xcf, 0xcf,
+ 0xcf, 0xcf, 0xd0, 0xd0, 0xd0, 0xd0, 0xd0, 0xd0,
+ 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd3, 0xd3,
+ 0xd3, 0xd3, 0xd3, 0xd3, 0xd4, 0xd4, 0xd4, 0xd4,
+ 0xd4, 0xd4, 0xd6, 0xd6, 0xd6, 0xd6, 0xd6, 0xd6,
+ 0xd7, 0xd7, 0xd7, 0xd7, 0xd7, 0xd7, 0xd8, 0xd8,
+ 0xd8, 0xd8, 0xd8, 0xd8, 0xd9, 0xd9, 0xd9, 0xd9,
+ 0xd9, 0xd9, 0xda, 0xda, 0xda, 0xda, 0xda, 0xda,
+ 0xda, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xde, 0xde, 0xde,
+ 0xde, 0xde, 0xde, 0xdf, 0xdf, 0xdf, 0xdf, 0xdf,
+ 0xdf, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0,
+ 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, 0xe2, 0xe2,
+ 0xe2, 0xe2, 0xe2, 0xe2, 0xe3, 0xe3, 0xe3, 0xe3,
+ 0xe3, 0xe3, 0xe4, 0xe4, 0xe4, 0xe4, 0xe4, 0xe4,
+ 0xe4, 0xe5, 0xe5, 0xe5, 0xe5, 0xe5, 0xe5, 0xe6,
+ 0xe6, 0xe6, 0xe6, 0xe6, 0xe6, 0xe7, 0xe7, 0xe7,
+ 0xe7, 0xe7, 0xe7, 0xe8, 0xe8, 0xe8, 0xe8, 0xe8,
+ 0xe8, 0xe8, 0xe9, 0xe9, 0xe9, 0xe9, 0xe9, 0xe9,
+ 0xeb, 0xeb, 0xeb, 0xeb, 0xeb, 0xeb, 0xec, 0xec,
+ 0xec, 0xec, 0xec, 0xec, 0xec, 0xed, 0xed, 0xed,
+ 0xed, 0xed, 0xed, 0xee, 0xee, 0xee, 0xee, 0xee,
+ 0xee, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xf0,
+ 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf1, 0xf1,
+ 0xf1, 0xf1, 0xf1, 0xf1, 0xf3, 0xf3, 0xf3, 0xf3,
+ 0xf3, 0xf3, 0xf4, 0xf4, 0xf4, 0xf4, 0xf4, 0xf4,
+ 0xf5, 0xf5, 0xf5, 0xf5, 0xf5, 0xf5, 0xf6, 0xf6,
+ 0xf6, 0xf6, 0xf6, 0xf6, 0xf7, 0xf7, 0xf7, 0xf7,
+ 0xf7, 0xf7, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8,
+ 0xf8, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0xfa,
+ 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa,
+ 0xfa, 0xfa, 0xfa, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb}
+ },
+ { /* gamma 3 - from tp6810 + cx0342 */
+ {0x08, 0x09, 0x0c, 0x0d, 0x10, 0x11, 0x14, 0x15,
+ 0x17, 0x18, 0x1a, 0x1c, 0x1e, 0x1f, 0x20, 0x23,
+ 0x25, 0x26, 0x27, 0x28, 0x2b, 0x2c, 0x2d, 0x2f,
+ 0x30, 0x31, 0x33, 0x34, 0x35, 0x37, 0x38, 0x39,
+ 0x3a, 0x3b, 0x3c, 0x3d, 0x3f, 0x40, 0x42, 0x43,
+ 0x44, 0x45, 0x47, 0x48, 0x48, 0x49, 0x4a, 0x4b,
+ 0x4c, 0x4d, 0x4d, 0x4f, 0x50, 0x52, 0x53, 0x53,
+ 0x54, 0x55, 0x56, 0x56, 0x58, 0x59, 0x5a, 0x5a,
+ 0x5b, 0x5c, 0x5c, 0x5e, 0x5f, 0x5f, 0x60, 0x61,
+ 0x61, 0x62, 0x63, 0x63, 0x65, 0x66, 0x66, 0x67,
+ 0x68, 0x68, 0x69, 0x69, 0x6a, 0x6c, 0x6c, 0x6d,
+ 0x6d, 0x6e, 0x6f, 0x6f, 0x70, 0x70, 0x71, 0x73,
+ 0x73, 0x74, 0x74, 0x75, 0x75, 0x77, 0x77, 0x78,
+ 0x78, 0x79, 0x7a, 0x7a, 0x7b, 0x7b, 0x7c, 0x7c,
+ 0x7d, 0x7d, 0x7f, 0x7f, 0x80, 0x80, 0x81, 0x81,
+ 0x82, 0x82, 0x84, 0x84, 0x85, 0x85, 0x86, 0x86,
+ 0x86, 0x88, 0x88, 0x89, 0x89, 0x8a, 0x8a, 0x8b,
+ 0x8b, 0x8d, 0x8d, 0x8d, 0x8e, 0x8e, 0x8f, 0x8f,
+ 0x90, 0x90, 0x91, 0x91, 0x91, 0x92, 0x92, 0x93,
+ 0x93, 0x93, 0x94, 0x94, 0x96, 0x96, 0x97, 0x97,
+ 0x97, 0x98, 0x98, 0x99, 0x99, 0x99, 0x9a, 0x9a,
+ 0x9a, 0x9b, 0x9b, 0x9c, 0x9c, 0x9c, 0x9d, 0x9d,
+ 0x9e, 0x9e, 0x9e, 0xa0, 0xa0, 0xa0, 0xa1, 0xa1,
+ 0xa2, 0xa2, 0xa2, 0xa3, 0xa3, 0xa3, 0xa4, 0xa4,
+ 0xa4, 0xa5, 0xa5, 0xa5, 0xa6, 0xa6, 0xa8, 0xa8,
+ 0xa8, 0xa9, 0xa9, 0xa9, 0xab, 0xab, 0xab, 0xac,
+ 0xac, 0xac, 0xad, 0xad, 0xad, 0xae, 0xae, 0xae,
+ 0xaf, 0xaf, 0xaf, 0xb0, 0xb0, 0xb0, 0xb1, 0xb1,
+ 0xb1, 0xb2, 0xb2, 0xb2, 0xb2, 0xb3, 0xb3, 0xb3,
+ 0xb4, 0xb4, 0xb4, 0xb6, 0xb6, 0xb6, 0xb7, 0xb7,
+ 0xb7, 0xb8, 0xb8, 0xb8, 0xb8, 0xb9, 0xb9, 0xb9,
+ 0xba, 0xba, 0xba, 0xbc, 0xbc, 0xbc, 0xbc, 0xbd,
+ 0xbd, 0xbd, 0xbe, 0xbe, 0xbe, 0xbe, 0xbf, 0xbf,
+ 0xbf, 0xc0, 0xc0, 0xc0, 0xc0, 0xc2, 0xc2, 0xc2,
+ 0xc3, 0xc3, 0xc3, 0xc3, 0xc4, 0xc4, 0xc4, 0xc5,
+ 0xc5, 0xc5, 0xc5, 0xc6, 0xc6, 0xc6, 0xc6, 0xc7,
+ 0xc7, 0xc7, 0xc9, 0xc9, 0xc9, 0xc9, 0xca, 0xca,
+ 0xca, 0xca, 0xcb, 0xcb, 0xcb, 0xcb, 0xcc, 0xcc,
+ 0xcc, 0xcd, 0xcd, 0xcd, 0xcd, 0xce, 0xce, 0xce,
+ 0xce, 0xcf, 0xcf, 0xcf, 0xcf, 0xd0, 0xd0, 0xd0,
+ 0xd0, 0xd1, 0xd1, 0xd1, 0xd1, 0xd3, 0xd3, 0xd3,
+ 0xd3, 0xd4, 0xd4, 0xd4, 0xd4, 0xd6, 0xd6, 0xd6,
+ 0xd6, 0xd7, 0xd7, 0xd7, 0xd7, 0xd8, 0xd8, 0xd8,
+ 0xd8, 0xd9, 0xd9, 0xd9, 0xd9, 0xda, 0xda, 0xda,
+ 0xda, 0xda, 0xdb, 0xdb, 0xdb, 0xdb, 0xdd, 0xdd,
+ 0xdd, 0xdd, 0xde, 0xde, 0xde, 0xde, 0xdf, 0xdf,
+ 0xdf, 0xdf, 0xdf, 0xe0, 0xe0, 0xe0, 0xe0, 0xe1,
+ 0xe1, 0xe1, 0xe1, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2,
+ 0xe3, 0xe3, 0xe3, 0xe3, 0xe4, 0xe4, 0xe4, 0xe4,
+ 0xe5, 0xe5, 0xe5, 0xe5, 0xe5, 0xe6, 0xe6, 0xe6,
+ 0xe6, 0xe7, 0xe7, 0xe7, 0xe7, 0xe7, 0xe8, 0xe8,
+ 0xe8, 0xe8, 0xe8, 0xe9, 0xe9, 0xe9, 0xe9, 0xeb,
+ 0xeb, 0xeb, 0xeb, 0xeb, 0xec, 0xec, 0xec, 0xec,
+ 0xed, 0xed, 0xed, 0xed, 0xed, 0xee, 0xee, 0xee,
+ 0xee, 0xee, 0xef, 0xef, 0xef, 0xef, 0xf0, 0xf0,
+ 0xf0, 0xf0, 0xf0, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1,
+ 0xf3, 0xf3, 0xf3, 0xf3, 0xf3, 0xf4, 0xf4, 0xf4,
+ 0xf4, 0xf4, 0xf5, 0xf5, 0xf5, 0xf5, 0xf6, 0xf6,
+ 0xf6, 0xf6, 0xf6, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7,
+ 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf9, 0xf9, 0xf9,
+ 0xf9, 0xf9, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa,
+ 0xfa, 0xfa, 0xfa, 0xfa, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc,
+ 0xfc, 0xfc, 0xfc, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd,
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
+ {0x03, 0x05, 0x07, 0x09, 0x0a, 0x0c, 0x0d, 0x10,
+ 0x11, 0x12, 0x14, 0x15, 0x17, 0x18, 0x1a, 0x1b,
+ 0x1c, 0x1e, 0x1f, 0x20, 0x22, 0x23, 0x25, 0x26,
+ 0x27, 0x28, 0x29, 0x2b, 0x2c, 0x2c, 0x2d, 0x2f,
+ 0x30, 0x31, 0x33, 0x33, 0x34, 0x35, 0x37, 0x38,
+ 0x38, 0x39, 0x3a, 0x3b, 0x3b, 0x3c, 0x3d, 0x3f,
+ 0x3f, 0x40, 0x42, 0x42, 0x43, 0x44, 0x45, 0x45,
+ 0x47, 0x47, 0x48, 0x49, 0x49, 0x4a, 0x4b, 0x4b,
+ 0x4c, 0x4d, 0x4d, 0x4f, 0x4f, 0x50, 0x52, 0x52,
+ 0x53, 0x53, 0x54, 0x54, 0x55, 0x55, 0x56, 0x58,
+ 0x58, 0x59, 0x59, 0x5a, 0x5a, 0x5b, 0x5b, 0x5c,
+ 0x5c, 0x5e, 0x5e, 0x5f, 0x5f, 0x60, 0x60, 0x61,
+ 0x61, 0x62, 0x62, 0x63, 0x63, 0x65, 0x65, 0x66,
+ 0x66, 0x67, 0x67, 0x67, 0x68, 0x68, 0x69, 0x69,
+ 0x6a, 0x6a, 0x6c, 0x6c, 0x6c, 0x6d, 0x6d, 0x6e,
+ 0x6e, 0x6f, 0x6f, 0x6f, 0x70, 0x70, 0x71, 0x71,
+ 0x71, 0x73, 0x73, 0x74, 0x74, 0x74, 0x75, 0x75,
+ 0x77, 0x77, 0x77, 0x78, 0x78, 0x79, 0x79, 0x79,
+ 0x7a, 0x7a, 0x7a, 0x7b, 0x7b, 0x7b, 0x7c, 0x7c,
+ 0x7d, 0x7d, 0x7d, 0x7f, 0x7f, 0x7f, 0x80, 0x80,
+ 0x80, 0x81, 0x81, 0x81, 0x82, 0x82, 0x82, 0x84,
+ 0x84, 0x84, 0x85, 0x85, 0x85, 0x86, 0x86, 0x86,
+ 0x88, 0x88, 0x88, 0x89, 0x89, 0x89, 0x8a, 0x8a,
+ 0x8a, 0x8b, 0x8b, 0x8b, 0x8d, 0x8d, 0x8d, 0x8e,
+ 0x8e, 0x8e, 0x8e, 0x8f, 0x8f, 0x8f, 0x90, 0x90,
+ 0x90, 0x91, 0x91, 0x91, 0x91, 0x92, 0x92, 0x92,
+ 0x93, 0x93, 0x93, 0x93, 0x94, 0x94, 0x94, 0x96,
+ 0x96, 0x96, 0x96, 0x97, 0x97, 0x97, 0x98, 0x98,
+ 0x98, 0x98, 0x99, 0x99, 0x99, 0x9a, 0x9a, 0x9a,
+ 0x9a, 0x9b, 0x9b, 0x9b, 0x9b, 0x9c, 0x9c, 0x9c,
+ 0x9c, 0x9d, 0x9d, 0x9d, 0x9e, 0x9e, 0x9e, 0x9e,
+ 0xa0, 0xa0, 0xa0, 0xa0, 0xa1, 0xa1, 0xa1, 0xa1,
+ 0xa2, 0xa2, 0xa2, 0xa2, 0xa3, 0xa3, 0xa3, 0xa3,
+ 0xa4, 0xa4, 0xa4, 0xa4, 0xa5, 0xa5, 0xa5, 0xa5,
+ 0xa6, 0xa6, 0xa6, 0xa6, 0xa8, 0xa8, 0xa8, 0xa8,
+ 0xa9, 0xa9, 0xa9, 0xa9, 0xa9, 0xab, 0xab, 0xab,
+ 0xab, 0xac, 0xac, 0xac, 0xac, 0xad, 0xad, 0xad,
+ 0xad, 0xad, 0xae, 0xae, 0xae, 0xae, 0xaf, 0xaf,
+ 0xaf, 0xaf, 0xb0, 0xb0, 0xb0, 0xb0, 0xb0, 0xb1,
+ 0xb1, 0xb1, 0xb1, 0xb2, 0xb2, 0xb2, 0xb2, 0xb2,
+ 0xb3, 0xb3, 0xb3, 0xb3, 0xb3, 0xb4, 0xb4, 0xb4,
+ 0xb4, 0xb6, 0xb6, 0xb6, 0xb6, 0xb6, 0xb7, 0xb7,
+ 0xb7, 0xb7, 0xb7, 0xb8, 0xb8, 0xb8, 0xb8, 0xb9,
+ 0xb9, 0xb9, 0xb9, 0xb9, 0xba, 0xba, 0xba, 0xba,
+ 0xba, 0xbc, 0xbc, 0xbc, 0xbc, 0xbc, 0xbd, 0xbd,
+ 0xbd, 0xbd, 0xbd, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe,
+ 0xbf, 0xbf, 0xbf, 0xbf, 0xbf, 0xc0, 0xc0, 0xc0,
+ 0xc0, 0xc0, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc3,
+ 0xc3, 0xc3, 0xc3, 0xc3, 0xc4, 0xc4, 0xc4, 0xc4,
+ 0xc4, 0xc5, 0xc5, 0xc5, 0xc5, 0xc5, 0xc6, 0xc6,
+ 0xc6, 0xc6, 0xc6, 0xc7, 0xc7, 0xc7, 0xc7, 0xc7,
+ 0xc7, 0xc9, 0xc9, 0xc9, 0xc9, 0xc9, 0xca, 0xca,
+ 0xca, 0xca, 0xca, 0xcb, 0xcb, 0xcb, 0xcb, 0xcb,
+ 0xcb, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcd, 0xcd,
+ 0xcd, 0xcd, 0xcd, 0xcd, 0xce, 0xce, 0xce, 0xce,
+ 0xce, 0xcf, 0xcf, 0xcf, 0xcf, 0xcf, 0xcf, 0xd0,
+ 0xd0, 0xd0, 0xd0, 0xd0, 0xd1, 0xd1, 0xd1, 0xd1,
+ 0xd1, 0xd1, 0xd3, 0xd3, 0xd3, 0xd3, 0xd3, 0xd4,
+ 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, 0xd6, 0xd6, 0xd6,
+ 0xd6, 0xd6, 0xd6, 0xd7, 0xd7, 0xd7, 0xd7, 0xd7,
+ 0xd8, 0xd8, 0xd8, 0xd8, 0xd8, 0xd8, 0xd9, 0xd9,
+ 0xd9, 0xd9, 0xd9, 0xd9, 0xda, 0xda, 0xda, 0xda,
+ 0xda, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xde, 0xde, 0xde,
+ 0xde, 0xde, 0xde, 0xdf, 0xdf, 0xdf, 0xdf, 0xdf,
+ 0xdf, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe1,
+ 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, 0xe2, 0xe2, 0xe2,
+ 0xe2, 0xe2, 0xe2, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3,
+ 0xe3, 0xe4, 0xe4, 0xe4, 0xe4, 0xe4, 0xe4, 0xe5,
+ 0xe5, 0xe5, 0xe5, 0xe5, 0xe5, 0xe6, 0xe6, 0xe6,
+ 0xe6, 0xe6, 0xe6, 0xe7, 0xe7, 0xe7, 0xe7, 0xe7,
+ 0xe7, 0xe8, 0xe8, 0xe8, 0xe8, 0xe8, 0xe8, 0xe9,
+ 0xe9, 0xe9, 0xe9, 0xe9, 0xe9, 0xeb, 0xeb, 0xeb,
+ 0xeb, 0xeb, 0xeb, 0xec, 0xec, 0xec, 0xec, 0xec,
+ 0xec, 0xed, 0xed, 0xed, 0xed, 0xed, 0xed, 0xee,
+ 0xee, 0xee, 0xee, 0xee, 0xee, 0xef, 0xef, 0xef,
+ 0xef, 0xef, 0xef, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0,
+ 0xf0, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf3,
+ 0xf3, 0xf3, 0xf3, 0xf3, 0xf3, 0xf4, 0xf4, 0xf4,
+ 0xf4, 0xf4, 0xf4, 0xf5, 0xf5, 0xf5, 0xf5, 0xf5,
+ 0xf5, 0xf5, 0xf6, 0xf6, 0xf6, 0xf6, 0xf6, 0xf6,
+ 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf8, 0xf8,
+ 0xf8, 0xf8, 0xf8, 0xf8, 0xf9, 0xf9, 0xf9, 0xf9,
+ 0xf9, 0xf9, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa,
+ 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfc, 0xfc, 0xfc, 0xfc,
+ 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc,
+ 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfe, 0xfe,
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
+ {0x07, 0x08, 0x0a, 0x0c, 0x0e, 0x10, 0x12, 0x14,
+ 0x16, 0x17, 0x18, 0x1b, 0x1c, 0x1e, 0x1f, 0x20,
+ 0x23, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2b, 0x2d,
+ 0x2f, 0x30, 0x31, 0x33, 0x34, 0x35, 0x37, 0x38,
+ 0x39, 0x3a, 0x3b, 0x3b, 0x3c, 0x3d, 0x3f, 0x40,
+ 0x42, 0x43, 0x44, 0x44, 0x45, 0x47, 0x48, 0x49,
+ 0x4a, 0x4a, 0x4b, 0x4c, 0x4d, 0x4d, 0x4f, 0x50,
+ 0x52, 0x52, 0x53, 0x54, 0x55, 0x55, 0x56, 0x58,
+ 0x58, 0x59, 0x5a, 0x5b, 0x5b, 0x5c, 0x5e, 0x5e,
+ 0x5f, 0x5f, 0x60, 0x61, 0x61, 0x62, 0x63, 0x63,
+ 0x65, 0x65, 0x66, 0x67, 0x67, 0x68, 0x68, 0x69,
+ 0x6a, 0x6a, 0x6c, 0x6c, 0x6d, 0x6d, 0x6e, 0x6e,
+ 0x6f, 0x70, 0x70, 0x71, 0x71, 0x73, 0x73, 0x74,
+ 0x74, 0x75, 0x75, 0x77, 0x77, 0x78, 0x78, 0x79,
+ 0x79, 0x7a, 0x7a, 0x7b, 0x7b, 0x7c, 0x7c, 0x7d,
+ 0x7d, 0x7f, 0x7f, 0x80, 0x80, 0x81, 0x81, 0x81,
+ 0x82, 0x82, 0x84, 0x84, 0x85, 0x85, 0x86, 0x86,
+ 0x88, 0x88, 0x88, 0x89, 0x89, 0x8a, 0x8a, 0x8b,
+ 0x8b, 0x8b, 0x8d, 0x8d, 0x8e, 0x8e, 0x8e, 0x8f,
+ 0x8f, 0x90, 0x90, 0x90, 0x91, 0x91, 0x92, 0x92,
+ 0x92, 0x93, 0x93, 0x94, 0x94, 0x94, 0x96, 0x96,
+ 0x96, 0x97, 0x97, 0x98, 0x98, 0x98, 0x99, 0x99,
+ 0x99, 0x9a, 0x9a, 0x9b, 0x9b, 0x9b, 0x9c, 0x9c,
+ 0x9c, 0x9d, 0x9d, 0x9d, 0x9e, 0x9e, 0x9e, 0xa0,
+ 0xa0, 0xa1, 0xa1, 0xa1, 0xa2, 0xa2, 0xa2, 0xa3,
+ 0xa3, 0xa3, 0xa4, 0xa4, 0xa4, 0xa5, 0xa5, 0xa5,
+ 0xa6, 0xa6, 0xa6, 0xa8, 0xa8, 0xa8, 0xa8, 0xa9,
+ 0xa9, 0xa9, 0xab, 0xab, 0xab, 0xac, 0xac, 0xac,
+ 0xad, 0xad, 0xad, 0xae, 0xae, 0xae, 0xaf, 0xaf,
+ 0xaf, 0xaf, 0xb0, 0xb0, 0xb0, 0xb1, 0xb1, 0xb1,
+ 0xb2, 0xb2, 0xb2, 0xb2, 0xb3, 0xb3, 0xb3, 0xb4,
+ 0xb4, 0xb4, 0xb4, 0xb6, 0xb6, 0xb6, 0xb7, 0xb7,
+ 0xb7, 0xb7, 0xb8, 0xb8, 0xb8, 0xb9, 0xb9, 0xb9,
+ 0xb9, 0xba, 0xba, 0xba, 0xbc, 0xbc, 0xbc, 0xbc,
+ 0xbd, 0xbd, 0xbd, 0xbd, 0xbe, 0xbe, 0xbe, 0xbf,
+ 0xbf, 0xbf, 0xbf, 0xc0, 0xc0, 0xc0, 0xc0, 0xc2,
+ 0xc2, 0xc2, 0xc2, 0xc3, 0xc3, 0xc3, 0xc3, 0xc4,
+ 0xc4, 0xc4, 0xc5, 0xc5, 0xc5, 0xc5, 0xc6, 0xc6,
+ 0xc6, 0xc6, 0xc7, 0xc7, 0xc7, 0xc7, 0xc9, 0xc9,
+ 0xc9, 0xc9, 0xca, 0xca, 0xca, 0xca, 0xcb, 0xcb,
+ 0xcb, 0xcb, 0xcc, 0xcc, 0xcc, 0xcc, 0xcd, 0xcd,
+ 0xcd, 0xcd, 0xcd, 0xce, 0xce, 0xce, 0xce, 0xcf,
+ 0xcf, 0xcf, 0xcf, 0xd0, 0xd0, 0xd0, 0xd0, 0xd1,
+ 0xd1, 0xd1, 0xd1, 0xd3, 0xd3, 0xd3, 0xd3, 0xd3,
+ 0xd4, 0xd4, 0xd4, 0xd4, 0xd6, 0xd6, 0xd6, 0xd6,
+ 0xd7, 0xd7, 0xd7, 0xd7, 0xd7, 0xd8, 0xd8, 0xd8,
+ 0xd8, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xda, 0xda,
+ 0xda, 0xda, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xde, 0xde, 0xde, 0xde, 0xde,
+ 0xdf, 0xdf, 0xdf, 0xdf, 0xe0, 0xe0, 0xe0, 0xe0,
+ 0xe0, 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, 0xe2, 0xe2,
+ 0xe2, 0xe2, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe4,
+ 0xe4, 0xe4, 0xe4, 0xe4, 0xe5, 0xe5, 0xe5, 0xe5,
+ 0xe6, 0xe6, 0xe6, 0xe6, 0xe6, 0xe7, 0xe7, 0xe7,
+ 0xe7, 0xe7, 0xe8, 0xe8, 0xe8, 0xe8, 0xe8, 0xe9,
+ 0xe9, 0xe9, 0xe9, 0xe9, 0xeb, 0xeb, 0xeb, 0xeb,
+ 0xeb, 0xec, 0xec, 0xec, 0xec, 0xec, 0xed, 0xed,
+ 0xed, 0xed, 0xed, 0xee, 0xee, 0xee, 0xee, 0xee,
+ 0xef, 0xef, 0xef, 0xef, 0xef, 0xf0, 0xf0, 0xf0,
+ 0xf0, 0xf0, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf3,
+ 0xf3, 0xf3, 0xf3, 0xf3, 0xf4, 0xf4, 0xf4, 0xf4,
+ 0xf4, 0xf5, 0xf5, 0xf5, 0xf5, 0xf5, 0xf6, 0xf6,
+ 0xf6, 0xf6, 0xf6, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7,
+ 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf9, 0xf9, 0xf9,
+ 0xf9, 0xf9, 0xf9, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa,
+ 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc,
+ 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfd, 0xfd, 0xfd,
+ 0xfd, 0xfd, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
+ },
+ { /* gamma 4 - from tp6800 + soi763a */
+ {0x11, 0x14, 0x15, 0x17, 0x1a, 0x1b, 0x1e, 0x1f,
+ 0x22, 0x23, 0x25, 0x27, 0x28, 0x2b, 0x2c, 0x2d,
+ 0x2f, 0x31, 0x33, 0x34, 0x35, 0x38, 0x39, 0x3a,
+ 0x3b, 0x3c, 0x3d, 0x40, 0x42, 0x43, 0x44, 0x45,
+ 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4f,
+ 0x50, 0x52, 0x53, 0x53, 0x54, 0x55, 0x56, 0x58,
+ 0x59, 0x5a, 0x5b, 0x5b, 0x5c, 0x5e, 0x5f, 0x60,
+ 0x61, 0x61, 0x62, 0x63, 0x65, 0x65, 0x66, 0x67,
+ 0x68, 0x68, 0x69, 0x6a, 0x6c, 0x6c, 0x6d, 0x6e,
+ 0x6f, 0x6f, 0x70, 0x71, 0x71, 0x73, 0x74, 0x74,
+ 0x75, 0x77, 0x77, 0x78, 0x79, 0x79, 0x7a, 0x7a,
+ 0x7b, 0x7c, 0x7c, 0x7d, 0x7f, 0x7f, 0x80, 0x80,
+ 0x81, 0x81, 0x82, 0x84, 0x84, 0x85, 0x85, 0x86,
+ 0x86, 0x88, 0x89, 0x89, 0x8a, 0x8a, 0x8b, 0x8b,
+ 0x8d, 0x8d, 0x8e, 0x8e, 0x8f, 0x90, 0x90, 0x91,
+ 0x91, 0x92, 0x92, 0x93, 0x93, 0x94, 0x94, 0x96,
+ 0x96, 0x97, 0x97, 0x98, 0x98, 0x98, 0x99, 0x99,
+ 0x9a, 0x9a, 0x9b, 0x9b, 0x9c, 0x9c, 0x9d, 0x9d,
+ 0x9e, 0x9e, 0xa0, 0xa0, 0xa0, 0xa1, 0xa1, 0xa2,
+ 0xa2, 0xa3, 0xa3, 0xa3, 0xa4, 0xa4, 0xa5, 0xa5,
+ 0xa6, 0xa6, 0xa6, 0xa8, 0xa8, 0xa9, 0xa9, 0xab,
+ 0xab, 0xab, 0xac, 0xac, 0xad, 0xad, 0xad, 0xae,
+ 0xae, 0xaf, 0xaf, 0xaf, 0xb0, 0xb0, 0xb1, 0xb1,
+ 0xb1, 0xb2, 0xb2, 0xb2, 0xb3, 0xb3, 0xb4, 0xb4,
+ 0xb4, 0xb6, 0xb6, 0xb6, 0xb7, 0xb7, 0xb8, 0xb8,
+ 0xb8, 0xb9, 0xb9, 0xb9, 0xba, 0xba, 0xba, 0xbc,
+ 0xbc, 0xbd, 0xbd, 0xbd, 0xbe, 0xbe, 0xbe, 0xbf,
+ 0xbf, 0xbf, 0xc0, 0xc0, 0xc0, 0xc2, 0xc2, 0xc2,
+ 0xc3, 0xc3, 0xc3, 0xc4, 0xc4, 0xc4, 0xc5, 0xc5,
+ 0xc5, 0xc6, 0xc6, 0xc6, 0xc7, 0xc7, 0xc7, 0xc9,
+ 0xc9, 0xc9, 0xca, 0xca, 0xca, 0xcb, 0xcb, 0xcb,
+ 0xcb, 0xcc, 0xcc, 0xcc, 0xcd, 0xcd, 0xcd, 0xce,
+ 0xce, 0xce, 0xcf, 0xcf, 0xcf, 0xcf, 0xd0, 0xd0,
+ 0xd0, 0xd1, 0xd1, 0xd1, 0xd3, 0xd3, 0xd3, 0xd3,
+ 0xd4, 0xd4, 0xd4, 0xd6, 0xd6, 0xd6, 0xd7, 0xd7,
+ 0xd7, 0xd7, 0xd8, 0xd8, 0xd8, 0xd9, 0xd9, 0xd9,
+ 0xd9, 0xda, 0xda, 0xda, 0xdb, 0xdb, 0xdb, 0xdb,
+ 0xdd, 0xdd, 0xdd, 0xde, 0xde, 0xde, 0xde, 0xdf,
+ 0xdf, 0xdf, 0xdf, 0xe0, 0xe0, 0xe0, 0xe1, 0xe1,
+ 0xe1, 0xe1, 0xe2, 0xe2, 0xe2, 0xe2, 0xe3, 0xe3,
+ 0xe3, 0xe3, 0xe4, 0xe4, 0xe4, 0xe5, 0xe5, 0xe5,
+ 0xe5, 0xe6, 0xe6, 0xe6, 0xe6, 0xe7, 0xe7, 0xe7,
+ 0xe7, 0xe8, 0xe8, 0xe8, 0xe8, 0xe9, 0xe9, 0xe9,
+ 0xe9, 0xeb, 0xeb, 0xeb, 0xeb, 0xec, 0xec, 0xec,
+ 0xec, 0xed, 0xed, 0xed, 0xed, 0xee, 0xee, 0xee,
+ 0xee, 0xef, 0xef, 0xef, 0xef, 0xf0, 0xf0, 0xf0,
+ 0xf0, 0xf1, 0xf1, 0xf1, 0xf1, 0xf3, 0xf3, 0xf3,
+ 0xf3, 0xf4, 0xf4, 0xf4, 0xf4, 0xf5, 0xf5, 0xf5,
+ 0xf5, 0xf6, 0xf6, 0xf6, 0xf6, 0xf6, 0xf7, 0xf7,
+ 0xf7, 0xf7, 0xf8, 0xf8, 0xf8, 0xf8, 0xf9, 0xf9,
+ 0xf9, 0xf9, 0xfa, 0xf9, 0xfa, 0xfa, 0xfa, 0xfa,
+ 0xfa, 0xfa, 0xfa, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb},
+ {0x08, 0x0a, 0x0c, 0x0e, 0x10, 0x11, 0x14, 0x15,
+ 0x16, 0x17, 0x1a, 0x1b, 0x1c, 0x1e, 0x1f, 0x20,
+ 0x23, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2b, 0x2c,
+ 0x2d, 0x2f, 0x30, 0x31, 0x33, 0x34, 0x34, 0x35,
+ 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3c, 0x3d,
+ 0x3f, 0x40, 0x42, 0x42, 0x43, 0x44, 0x45, 0x45,
+ 0x47, 0x48, 0x49, 0x49, 0x4a, 0x4b, 0x4b, 0x4c,
+ 0x4d, 0x4f, 0x4f, 0x50, 0x52, 0x52, 0x53, 0x54,
+ 0x54, 0x55, 0x55, 0x56, 0x58, 0x58, 0x59, 0x5a,
+ 0x5a, 0x5b, 0x5b, 0x5c, 0x5e, 0x5e, 0x5f, 0x5f,
+ 0x60, 0x60, 0x61, 0x61, 0x62, 0x63, 0x63, 0x65,
+ 0x65, 0x66, 0x66, 0x67, 0x67, 0x68, 0x68, 0x69,
+ 0x69, 0x6a, 0x6a, 0x6c, 0x6c, 0x6d, 0x6d, 0x6e,
+ 0x6e, 0x6f, 0x6f, 0x70, 0x70, 0x71, 0x71, 0x73,
+ 0x73, 0x74, 0x74, 0x74, 0x75, 0x75, 0x77, 0x77,
+ 0x78, 0x78, 0x79, 0x79, 0x79, 0x7a, 0x7a, 0x7b,
+ 0x7b, 0x7c, 0x7c, 0x7c, 0x7d, 0x7d, 0x7f, 0x7f,
+ 0x7f, 0x80, 0x80, 0x81, 0x81, 0x81, 0x82, 0x82,
+ 0x84, 0x84, 0x84, 0x85, 0x85, 0x86, 0x86, 0x86,
+ 0x88, 0x88, 0x88, 0x89, 0x89, 0x8a, 0x8a, 0x8a,
+ 0x8b, 0x8b, 0x8b, 0x8d, 0x8d, 0x8d, 0x8e, 0x8e,
+ 0x8e, 0x8f, 0x8f, 0x90, 0x90, 0x90, 0x91, 0x91,
+ 0x91, 0x92, 0x92, 0x92, 0x93, 0x93, 0x93, 0x94,
+ 0x94, 0x94, 0x96, 0x96, 0x96, 0x97, 0x97, 0x97,
+ 0x98, 0x98, 0x98, 0x98, 0x99, 0x99, 0x99, 0x9a,
+ 0x9a, 0x9a, 0x9b, 0x9b, 0x9b, 0x9c, 0x9c, 0x9c,
+ 0x9c, 0x9d, 0x9d, 0x9d, 0x9e, 0x9e, 0x9e, 0xa0,
+ 0xa0, 0xa0, 0xa0, 0xa1, 0xa1, 0xa1, 0xa2, 0xa2,
+ 0xa2, 0xa3, 0xa3, 0xa3, 0xa3, 0xa4, 0xa4, 0xa4,
+ 0xa5, 0xa5, 0xa5, 0xa5, 0xa6, 0xa6, 0xa6, 0xa6,
+ 0xa8, 0xa8, 0xa8, 0xa9, 0xa9, 0xa9, 0xa9, 0xab,
+ 0xaa, 0xab, 0xab, 0xac, 0xac, 0xac, 0xad, 0xad,
+ 0xad, 0xad, 0xae, 0xae, 0xae, 0xae, 0xaf, 0xaf,
+ 0xaf, 0xaf, 0xb0, 0xb0, 0xb0, 0xb0, 0xb1, 0xb1,
+ 0xb1, 0xb1, 0xb2, 0xb2, 0xb2, 0xb2, 0xb3, 0xb3,
+ 0xb3, 0xb3, 0xb4, 0xb4, 0xb4, 0xb4, 0xb6, 0xb6,
+ 0xb6, 0xb6, 0xb7, 0xb7, 0xb7, 0xb7, 0xb8, 0xb8,
+ 0xb8, 0xb8, 0xb9, 0xb9, 0xb9, 0xb9, 0xba, 0xba,
+ 0xba, 0xba, 0xba, 0xbc, 0xbc, 0xbc, 0xbc, 0xbd,
+ 0xbd, 0xbd, 0xbd, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe,
+ 0xbf, 0xbf, 0xbf, 0xbf, 0xc0, 0xc0, 0xc0, 0xc0,
+ 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc3, 0xc3, 0xc3,
+ 0xc3, 0xc4, 0xc4, 0xc4, 0xc4, 0xc4, 0xc5, 0xc5,
+ 0xc5, 0xc5, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc7,
+ 0xc7, 0xc7, 0xc7, 0xc7, 0xc9, 0xc9, 0xc9, 0xc9,
+ 0xca, 0xca, 0xca, 0xca, 0xca, 0xcb, 0xcb, 0xcb,
+ 0xcb, 0xcb, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcd,
+ 0xcd, 0xcd, 0xcd, 0xcd, 0xce, 0xce, 0xce, 0xce,
+ 0xcf, 0xcf, 0xcf, 0xcf, 0xcf, 0xd0, 0xd0, 0xd0,
+ 0xd0, 0xd0, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd3,
+ 0xd3, 0xd3, 0xd3, 0xd3, 0xd4, 0xd4, 0xd4, 0xd4,
+ 0xd4, 0xd6, 0xd6, 0xd6, 0xd6, 0xd6, 0xd7, 0xd7,
+ 0xd7, 0xd7, 0xd7, 0xd8, 0xd8, 0xd8, 0xd8, 0xd8,
+ 0xd8, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xda, 0xda,
+ 0xda, 0xda, 0xda, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xde, 0xde, 0xde,
+ 0xde, 0xde, 0xde, 0xdf, 0xdf, 0xdf, 0xdf, 0xdf,
+ 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe1, 0xe1, 0xe1,
+ 0xe1, 0xe1, 0xe1, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2,
+ 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe4, 0xe4,
+ 0xe4, 0xe4, 0xe4, 0xe5, 0xe5, 0xe5, 0xe5, 0xe5,
+ 0xe5, 0xe6, 0xe6, 0xe6, 0xe6, 0xe6, 0xe7, 0xe7,
+ 0xe7, 0xe7, 0xe7, 0xe7, 0xe8, 0xe8, 0xe8, 0xe8,
+ 0xe8, 0xe9, 0xe9, 0xe9, 0xe9, 0xe9, 0xe9, 0xeb,
+ 0xeb, 0xeb, 0xeb, 0xeb, 0xec, 0xec, 0xec, 0xec,
+ 0xec, 0xec, 0xed, 0xed, 0xed, 0xed, 0xed, 0xed,
+ 0xee, 0xee, 0xee, 0xee, 0xee, 0xef, 0xef, 0xef,
+ 0xef, 0xef, 0xef, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0,
+ 0xf0, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf3,
+ 0xf3, 0xf3, 0xf3, 0xf3, 0xf4, 0xf4, 0xf4, 0xf4,
+ 0xf4, 0xf4, 0xf5, 0xf5, 0xf5, 0xf5, 0xf5, 0xf5,
+ 0xf6, 0xf6, 0xf6, 0xf6, 0xf6, 0xf6, 0xf7, 0xf7,
+ 0xf7, 0xf7, 0xf7, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8,
+ 0xf8, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0xfa,
+ 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa,
+ 0xfa, 0xfa, 0xfa, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb},
+ {0x0d, 0x10, 0x11, 0x14, 0x15, 0x17, 0x18, 0x1b,
+ 0x1c, 0x1e, 0x20, 0x22, 0x23, 0x26, 0x27, 0x28,
+ 0x29, 0x2b, 0x2d, 0x2f, 0x30, 0x31, 0x33, 0x34,
+ 0x35, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d,
+ 0x3f, 0x40, 0x42, 0x43, 0x44, 0x45, 0x47, 0x48,
+ 0x49, 0x4a, 0x4b, 0x4b, 0x4c, 0x4d, 0x4f, 0x50,
+ 0x52, 0x52, 0x53, 0x54, 0x55, 0x56, 0x56, 0x58,
+ 0x59, 0x5a, 0x5a, 0x5b, 0x5c, 0x5e, 0x5e, 0x5f,
+ 0x60, 0x60, 0x61, 0x62, 0x62, 0x63, 0x65, 0x65,
+ 0x66, 0x67, 0x67, 0x68, 0x69, 0x69, 0x6a, 0x6c,
+ 0x6c, 0x6d, 0x6d, 0x6e, 0x6f, 0x6f, 0x70, 0x70,
+ 0x71, 0x73, 0x73, 0x74, 0x74, 0x75, 0x75, 0x77,
+ 0x78, 0x78, 0x79, 0x79, 0x7a, 0x7a, 0x7b, 0x7b,
+ 0x7c, 0x7c, 0x7d, 0x7d, 0x7f, 0x7f, 0x80, 0x80,
+ 0x81, 0x81, 0x82, 0x82, 0x84, 0x84, 0x85, 0x85,
+ 0x86, 0x86, 0x88, 0x88, 0x89, 0x89, 0x8a, 0x8a,
+ 0x8b, 0x8b, 0x8d, 0x8d, 0x8d, 0x8e, 0x8e, 0x8f,
+ 0x8f, 0x90, 0x90, 0x91, 0x91, 0x91, 0x92, 0x92,
+ 0x93, 0x93, 0x94, 0x94, 0x94, 0x96, 0x96, 0x97,
+ 0x97, 0x98, 0x98, 0x98, 0x99, 0x99, 0x9a, 0x9a,
+ 0x9a, 0x9b, 0x9b, 0x9c, 0x9c, 0x9c, 0x9d, 0x9d,
+ 0x9d, 0x9e, 0x9e, 0xa0, 0xa0, 0xa0, 0xa1, 0xa1,
+ 0xa1, 0xa2, 0xa2, 0xa3, 0xa3, 0xa3, 0xa4, 0xa4,
+ 0xa4, 0xa5, 0xa5, 0xa5, 0xa6, 0xa6, 0xa8, 0xa8,
+ 0xa8, 0xa9, 0xa9, 0xa9, 0xab, 0xab, 0xab, 0xac,
+ 0xac, 0xac, 0xad, 0xad, 0xad, 0xae, 0xae, 0xae,
+ 0xaf, 0xaf, 0xaf, 0xb0, 0xb0, 0xb0, 0xb1, 0xb1,
+ 0xb1, 0xb2, 0xb2, 0xb2, 0xb3, 0xb3, 0xb3, 0xb4,
+ 0xb4, 0xb4, 0xb6, 0xb6, 0xb6, 0xb6, 0xb7, 0xb7,
+ 0xb7, 0xb8, 0xb8, 0xb8, 0xb9, 0xb9, 0xb9, 0xba,
+ 0xba, 0xba, 0xba, 0xbc, 0xbc, 0xbc, 0xbd, 0xbd,
+ 0xbd, 0xbe, 0xbe, 0xbe, 0xbe, 0xbf, 0xbf, 0xbf,
+ 0xc0, 0xc0, 0xc0, 0xc0, 0xc2, 0xc2, 0xc2, 0xc3,
+ 0xc3, 0xc3, 0xc3, 0xc4, 0xc4, 0xc4, 0xc5, 0xc5,
+ 0xc5, 0xc5, 0xc6, 0xc6, 0xc6, 0xc6, 0xc7, 0xc7,
+ 0xc7, 0xc9, 0xc9, 0xc9, 0xc9, 0xca, 0xca, 0xca,
+ 0xca, 0xcb, 0xcb, 0xcb, 0xcc, 0xcc, 0xcc, 0xcc,
+ 0xcd, 0xcd, 0xcd, 0xcd, 0xce, 0xce, 0xce, 0xce,
+ 0xcf, 0xcf, 0xcf, 0xcf, 0xd0, 0xd0, 0xd0, 0xd0,
+ 0xd1, 0xd1, 0xd1, 0xd1, 0xd3, 0xd3, 0xd3, 0xd3,
+ 0xd4, 0xd4, 0xd4, 0xd4, 0xd6, 0xd6, 0xd6, 0xd6,
+ 0xd7, 0xd7, 0xd7, 0xd7, 0xd8, 0xd8, 0xd8, 0xd8,
+ 0xd9, 0xd9, 0xd9, 0xd9, 0xda, 0xda, 0xda, 0xda,
+ 0xdb, 0xdb, 0xdb, 0xdb, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xde, 0xde, 0xde, 0xde, 0xdf, 0xdf, 0xdf,
+ 0xdf, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe1, 0xe1,
+ 0xe1, 0xe1, 0xe2, 0xe2, 0xe2, 0xe2, 0xe3, 0xe3,
+ 0xe3, 0xe3, 0xe3, 0xe4, 0xe4, 0xe4, 0xe4, 0xe5,
+ 0xe5, 0xe5, 0xe5, 0xe5, 0xe6, 0xe6, 0xe6, 0xe6,
+ 0xe7, 0xe7, 0xe7, 0xe7, 0xe7, 0xe8, 0xe8, 0xe8,
+ 0xe8, 0xe9, 0xe9, 0xe9, 0xe9, 0xe9, 0xeb, 0xeb,
+ 0xeb, 0xeb, 0xec, 0xec, 0xec, 0xec, 0xec, 0xed,
+ 0xed, 0xed, 0xed, 0xed, 0xee, 0xee, 0xee, 0xee,
+ 0xef, 0xef, 0xef, 0xef, 0xef, 0xf0, 0xf0, 0xf0,
+ 0xf0, 0xf0, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf3,
+ 0xf3, 0xf3, 0xf3, 0xf4, 0xf4, 0xf4, 0xf4, 0xf4,
+ 0xf5, 0xf5, 0xf5, 0xf5, 0xf5, 0xf6, 0xf6, 0xf6,
+ 0xf6, 0xf6, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf8,
+ 0xf8, 0xf8, 0xf8, 0xf8, 0xf9, 0xf9, 0xf9, 0xf9,
+ 0xf9, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa,
+ 0xfa, 0xfa, 0xfa, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb}
+ },
+ { /* gamma 5 */
+ {0x16, 0x18, 0x19, 0x1b, 0x1d, 0x1e, 0x20, 0x21,
+ 0x23, 0x24, 0x25, 0x27, 0x28, 0x2a, 0x2b, 0x2c,
+ 0x2d, 0x2f, 0x30, 0x31, 0x32, 0x34, 0x35, 0x36,
+ 0x37, 0x38, 0x39, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
+ 0x48, 0x49, 0x4a, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e,
+ 0x4f, 0x50, 0x51, 0x51, 0x52, 0x53, 0x54, 0x55,
+ 0x56, 0x56, 0x57, 0x58, 0x59, 0x59, 0x5a, 0x5b,
+ 0x5c, 0x5c, 0x5d, 0x5e, 0x5f, 0x5f, 0x60, 0x61,
+ 0x62, 0x62, 0x63, 0x64, 0x64, 0x65, 0x66, 0x66,
+ 0x67, 0x68, 0x68, 0x69, 0x6a, 0x6a, 0x6b, 0x6b,
+ 0x6c, 0x6d, 0x6d, 0x6e, 0x6f, 0x6f, 0x70, 0x70,
+ 0x71, 0x71, 0x72, 0x73, 0x73, 0x74, 0x74, 0x75,
+ 0x75, 0x76, 0x77, 0x77, 0x78, 0x78, 0x79, 0x79,
+ 0x7a, 0x7a, 0x7b, 0x7b, 0x7c, 0x7d, 0x7d, 0x7e,
+ 0x7e, 0x7f, 0x7f, 0x80, 0x80, 0x81, 0x81, 0x82,
+ 0x82, 0x83, 0x83, 0x84, 0x84, 0x84, 0x85, 0x85,
+ 0x86, 0x86, 0x87, 0x87, 0x88, 0x88, 0x89, 0x89,
+ 0x8a, 0x8a, 0x8b, 0x8b, 0x8b, 0x8c, 0x8c, 0x8d,
+ 0x8d, 0x8e, 0x8e, 0x8e, 0x8f, 0x8f, 0x90, 0x90,
+ 0x91, 0x91, 0x91, 0x92, 0x92, 0x93, 0x93, 0x94,
+ 0x94, 0x94, 0x95, 0x95, 0x96, 0x96, 0x96, 0x97,
+ 0x97, 0x98, 0x98, 0x98, 0x99, 0x99, 0x9a, 0x9a,
+ 0x9a, 0x9b, 0x9b, 0x9b, 0x9c, 0x9c, 0x9d, 0x9d,
+ 0x9d, 0x9e, 0x9e, 0x9e, 0x9f, 0x9f, 0xa0, 0xa0,
+ 0xa0, 0xa1, 0xa1, 0xa1, 0xa2, 0xa2, 0xa2, 0xa3,
+ 0xa3, 0xa4, 0xa4, 0xa4, 0xa5, 0xa5, 0xa5, 0xa6,
+ 0xa6, 0xa6, 0xa7, 0xa7, 0xa7, 0xa8, 0xa8, 0xa8,
+ 0xa9, 0xa9, 0xa9, 0xaa, 0xaa, 0xaa, 0xab, 0xab,
+ 0xab, 0xac, 0xac, 0xac, 0xad, 0xad, 0xad, 0xae,
+ 0xae, 0xae, 0xaf, 0xaf, 0xaf, 0xb0, 0xb0, 0xb0,
+ 0xb0, 0xb1, 0xb1, 0xb1, 0xb2, 0xb2, 0xb2, 0xb3,
+ 0xb3, 0xb3, 0xb4, 0xb4, 0xb4, 0xb4, 0xb5, 0xb5,
+ 0xb5, 0xb6, 0xb6, 0xb6, 0xb7, 0xb7, 0xb7, 0xb7,
+ 0xb8, 0xb8, 0xb8, 0xb9, 0xb9, 0xb9, 0xba, 0xba,
+ 0xba, 0xba, 0xbb, 0xbb, 0xbb, 0xbc, 0xbc, 0xbc,
+ 0xbc, 0xbd, 0xbd, 0xbd, 0xbe, 0xbe, 0xbe, 0xbe,
+ 0xbf, 0xbf, 0xbf, 0xc0, 0xc0, 0xc0, 0xc0, 0xc1,
+ 0xc1, 0xc1, 0xc1, 0xc2, 0xc2, 0xc2, 0xc3, 0xc3,
+ 0xc3, 0xc3, 0xc4, 0xc4, 0xc4, 0xc4, 0xc5, 0xc5,
+ 0xc5, 0xc5, 0xc6, 0xc6, 0xc6, 0xc7, 0xc7, 0xc7,
+ 0xc7, 0xc8, 0xc8, 0xc8, 0xc8, 0xc9, 0xc9, 0xc9,
+ 0xc9, 0xca, 0xca, 0xca, 0xca, 0xcb, 0xcb, 0xcb,
+ 0xcb, 0xcc, 0xcc, 0xcc, 0xcc, 0xcd, 0xcd, 0xcd,
+ 0xcd, 0xce, 0xce, 0xce, 0xce, 0xcf, 0xcf, 0xcf,
+ 0xcf, 0xd0, 0xd0, 0xd0, 0xd0, 0xd1, 0xd1, 0xd1,
+ 0xd1, 0xd2, 0xd2, 0xd2, 0xd2, 0xd3, 0xd3, 0xd3,
+ 0xd3, 0xd4, 0xd4, 0xd4, 0xd4, 0xd5, 0xd5, 0xd5,
+ 0xd5, 0xd6, 0xd6, 0xd6, 0xd6, 0xd6, 0xd7, 0xd7,
+ 0xd7, 0xd7, 0xd8, 0xd8, 0xd8, 0xd8, 0xd9, 0xd9,
+ 0xd9, 0xd9, 0xda, 0xda, 0xda, 0xda, 0xda, 0xdb,
+ 0xdb, 0xdb, 0xdb, 0xdc, 0xdc, 0xdc, 0xdc, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xde, 0xde, 0xde, 0xde,
+ 0xdf, 0xdf, 0xdf, 0xdf, 0xdf, 0xe0, 0xe0, 0xe0,
+ 0xe0, 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, 0xe2, 0xe2,
+ 0xe2, 0xe2, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe4,
+ 0xe4, 0xe4, 0xe4, 0xe5, 0xe5, 0xe5, 0xe5, 0xe5,
+ 0xe6, 0xe6, 0xe6, 0xe6, 0xe7, 0xe7, 0xe7, 0xe7,
+ 0xe7, 0xe8, 0xe8, 0xe8, 0xe8, 0xe8, 0xe9, 0xe9,
+ 0xe9, 0xe9, 0xea, 0xea, 0xea, 0xea, 0xea, 0xeb,
+ 0xeb, 0xeb, 0xeb, 0xeb, 0xec, 0xec, 0xec, 0xec,
+ 0xed, 0xed, 0xed, 0xed, 0xed, 0xee, 0xee, 0xee,
+ 0xee, 0xee, 0xef, 0xef, 0xef, 0xef, 0xef, 0xf0,
+ 0xf0, 0xf0, 0xf0, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1,
+ 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf3, 0xf3, 0xf3,
+ 0xf3, 0xf3, 0xf4, 0xf4, 0xf4, 0xf4, 0xf4, 0xf5,
+ 0xf5, 0xf5, 0xf5, 0xf5, 0xf6, 0xf6, 0xf6, 0xf6,
+ 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf8, 0xf8, 0xf8,
+ 0xf8, 0xf8, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0xfa,
+ 0xfa, 0xfa, 0xfa, 0xfa, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfd, 0xfd,
+ 0xfd, 0xfd, 0xfd, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
+ {0x0f, 0x11, 0x12, 0x14, 0x15, 0x16, 0x18, 0x19,
+ 0x1a, 0x1b, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22,
+ 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b,
+ 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x31, 0x32,
+ 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x38, 0x39,
+ 0x3a, 0x3b, 0x3c, 0x3c, 0x3d, 0x3e, 0x3f, 0x3f,
+ 0x40, 0x41, 0x42, 0x42, 0x43, 0x44, 0x44, 0x45,
+ 0x46, 0x47, 0x47, 0x48, 0x49, 0x49, 0x4a, 0x4b,
+ 0x4b, 0x4c, 0x4c, 0x4d, 0x4e, 0x4e, 0x4f, 0x50,
+ 0x50, 0x51, 0x51, 0x52, 0x53, 0x53, 0x54, 0x54,
+ 0x55, 0x55, 0x56, 0x56, 0x57, 0x58, 0x58, 0x59,
+ 0x59, 0x5a, 0x5a, 0x5b, 0x5b, 0x5c, 0x5c, 0x5d,
+ 0x5d, 0x5e, 0x5e, 0x5f, 0x5f, 0x60, 0x60, 0x61,
+ 0x61, 0x62, 0x62, 0x63, 0x63, 0x64, 0x64, 0x65,
+ 0x65, 0x66, 0x66, 0x66, 0x67, 0x67, 0x68, 0x68,
+ 0x69, 0x69, 0x6a, 0x6a, 0x6a, 0x6b, 0x6b, 0x6c,
+ 0x6c, 0x6d, 0x6d, 0x6d, 0x6e, 0x6e, 0x6f, 0x6f,
+ 0x6f, 0x70, 0x70, 0x71, 0x71, 0x71, 0x72, 0x72,
+ 0x73, 0x73, 0x73, 0x74, 0x74, 0x75, 0x75, 0x75,
+ 0x76, 0x76, 0x76, 0x77, 0x77, 0x78, 0x78, 0x78,
+ 0x79, 0x79, 0x79, 0x7a, 0x7a, 0x7a, 0x7b, 0x7b,
+ 0x7b, 0x7c, 0x7c, 0x7d, 0x7d, 0x7d, 0x7e, 0x7e,
+ 0x7e, 0x7f, 0x7f, 0x7f, 0x80, 0x80, 0x80, 0x81,
+ 0x81, 0x81, 0x82, 0x82, 0x82, 0x83, 0x83, 0x83,
+ 0x84, 0x84, 0x84, 0x84, 0x85, 0x85, 0x85, 0x86,
+ 0x86, 0x86, 0x87, 0x87, 0x87, 0x88, 0x88, 0x88,
+ 0x88, 0x89, 0x89, 0x89, 0x8a, 0x8a, 0x8a, 0x8b,
+ 0x8b, 0x8b, 0x8b, 0x8c, 0x8c, 0x8c, 0x8d, 0x8d,
+ 0x8d, 0x8e, 0x8e, 0x8e, 0x8e, 0x8f, 0x8f, 0x8f,
+ 0x90, 0x90, 0x90, 0x90, 0x91, 0x91, 0x91, 0x91,
+ 0x92, 0x92, 0x92, 0x93, 0x93, 0x93, 0x93, 0x94,
+ 0x94, 0x94, 0x94, 0x95, 0x95, 0x95, 0x96, 0x96,
+ 0x96, 0x96, 0x97, 0x97, 0x97, 0x97, 0x98, 0x98,
+ 0x98, 0x98, 0x99, 0x99, 0x99, 0x99, 0x9a, 0x9a,
+ 0x9a, 0x9a, 0x9b, 0x9b, 0x9b, 0x9b, 0x9c, 0x9c,
+ 0x9c, 0x9c, 0x9d, 0x9d, 0x9d, 0x9d, 0x9e, 0x9e,
+ 0x9e, 0x9e, 0x9f, 0x9f, 0x9f, 0x9f, 0xa0, 0xa0,
+ 0xa0, 0xa0, 0xa1, 0xa1, 0xa1, 0xa1, 0xa2, 0xa2,
+ 0xa2, 0xa2, 0xa2, 0xa3, 0xa3, 0xa3, 0xa3, 0xa4,
+ 0xa4, 0xa4, 0xa4, 0xa5, 0xa5, 0xa5, 0xa5, 0xa5,
+ 0xa6, 0xa6, 0xa6, 0xa6, 0xa7, 0xa7, 0xa7, 0xa7,
+ 0xa8, 0xa8, 0xa8, 0xa8, 0xa8, 0xa9, 0xa9, 0xa9,
+ 0xa9, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xab, 0xab,
+ 0xab, 0xab, 0xac, 0xac, 0xac, 0xac, 0xac, 0xad,
+ 0xad, 0xad, 0xad, 0xad, 0xae, 0xae, 0xae, 0xae,
+ 0xaf, 0xaf, 0xaf, 0xaf, 0xaf, 0xb0, 0xb0, 0xb0,
+ 0xb0, 0xb0, 0xb1, 0xb1, 0xb1, 0xb1, 0xb1, 0xb2,
+ 0xb2, 0xb2, 0xb2, 0xb2, 0xb3, 0xb3, 0xb3, 0xb3,
+ 0xb4, 0xb4, 0xb4, 0xb4, 0xb4, 0xb5, 0xb5, 0xb5,
+ 0xb5, 0xb5, 0xb6, 0xb6, 0xb6, 0xb6, 0xb6, 0xb7,
+ 0xb7, 0xb7, 0xb7, 0xb7, 0xb8, 0xb8, 0xb8, 0xb8,
+ 0xb8, 0xb9, 0xb9, 0xb9, 0xb9, 0xb9, 0xba, 0xba,
+ 0xba, 0xba, 0xba, 0xbb, 0xbb, 0xbb, 0xbb, 0xbb,
+ 0xbb, 0xbc, 0xbc, 0xbc, 0xbc, 0xbc, 0xbd, 0xbd,
+ 0xbd, 0xbd, 0xbd, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe,
+ 0xbf, 0xbf, 0xbf, 0xbf, 0xbf, 0xc0, 0xc0, 0xc0,
+ 0xc0, 0xc0, 0xc0, 0xc1, 0xc1, 0xc1, 0xc1, 0xc1,
+ 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc3, 0xc3, 0xc3,
+ 0xc3, 0xc3, 0xc3, 0xc4, 0xc4, 0xc4, 0xc4, 0xc4,
+ 0xc5, 0xc5, 0xc5, 0xc5, 0xc5, 0xc5, 0xc6, 0xc6,
+ 0xc6, 0xc6, 0xc6, 0xc7, 0xc7, 0xc7, 0xc7, 0xc7,
+ 0xc7, 0xc8, 0xc8, 0xc8, 0xc8, 0xc8, 0xc9, 0xc9,
+ 0xc9, 0xc9, 0xc9, 0xc9, 0xca, 0xca, 0xca, 0xca,
+ 0xca, 0xcb, 0xcb, 0xcb, 0xcb, 0xcb, 0xcb, 0xcc,
+ 0xcc, 0xcc, 0xcc, 0xcc, 0xcd, 0xcd, 0xcd, 0xcd,
+ 0xcd, 0xcd, 0xce, 0xce, 0xce, 0xce, 0xce, 0xce,
+ 0xcf, 0xcf, 0xcf, 0xcf, 0xcf, 0xd0, 0xd0, 0xd0,
+ 0xd0, 0xd0, 0xd0, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1,
+ 0xd1, 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, 0xd3,
+ 0xd3, 0xd3, 0xd3, 0xd3, 0xd4, 0xd4, 0xd4, 0xd4,
+ 0xd4, 0xd4, 0xd5, 0xd5, 0xd5, 0xd5, 0xd5, 0xd5,
+ 0xd6, 0xd6, 0xd6, 0xd6, 0xd6, 0xd6, 0xd7, 0xd7,
+ 0xd7, 0xd7, 0xd7, 0xd8, 0xd8, 0xd8, 0xd8, 0xd8,
+ 0xd8, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xda,
+ 0xda, 0xda, 0xda, 0xda, 0xda, 0xdb, 0xdb, 0xdb,
+ 0xdb, 0xdb, 0xdb, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xde, 0xde,
+ 0xde, 0xde, 0xde, 0xde, 0xdf, 0xdf, 0xdf, 0xdf,
+ 0xdf, 0xdf, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0,
+ 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, 0xe2, 0xe2,
+ 0xe2, 0xe2, 0xe2, 0xe2, 0xe3, 0xe3, 0xe3, 0xe3,
+ 0xe3, 0xe4, 0xe4, 0xe4, 0xe4, 0xe4, 0xe4, 0xe5,
+ 0xe5, 0xe5, 0xe5, 0xe5, 0xe5, 0xe6, 0xe6, 0xe6,
+ 0xe6, 0xe6, 0xe6, 0xe7, 0xe7, 0xe7, 0xe7, 0xe7,
+ 0xe7, 0xe8, 0xe8, 0xe8, 0xe8, 0xe8, 0xe9, 0xe9,
+ 0xe9, 0xe9, 0xe9, 0xe9, 0xea, 0xea, 0xea, 0xea,
+ 0xea, 0xea, 0xeb, 0xeb, 0xeb, 0xeb, 0xeb, 0xeb,
+ 0xec, 0xec, 0xec, 0xec, 0xec, 0xec, 0xed, 0xed,
+ 0xed, 0xed, 0xed, 0xee, 0xee, 0xee, 0xee, 0xee,
+ 0xee, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xf0,
+ 0xf0, 0xf0, 0xf0, 0xf0, 0xf1, 0xf1, 0xf1, 0xf1,
+ 0xf1, 0xf1, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2,
+ 0xf3, 0xf3, 0xf3, 0xf3, 0xf3, 0xf4, 0xf4, 0xf4,
+ 0xf4, 0xf4, 0xf4, 0xf5, 0xf5, 0xf5, 0xf5, 0xf5,
+ 0xf6, 0xf6, 0xf6, 0xf6, 0xf6, 0xf6, 0xf7, 0xf7,
+ 0xf7, 0xf7, 0xf7, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8,
+ 0xf8, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0xfa, 0xfa,
+ 0xfa, 0xfa, 0xfa, 0xfa, 0xfb, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfd, 0xfd,
+ 0xfd, 0xfd, 0xfd, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe,
+ 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
+ {0x13, 0x15, 0x16, 0x18, 0x19, 0x1b, 0x1c, 0x1e,
+ 0x1f, 0x20, 0x22, 0x23, 0x24, 0x26, 0x27, 0x28,
+ 0x29, 0x2a, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31,
+ 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39,
+ 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41,
+ 0x42, 0x43, 0x44, 0x44, 0x45, 0x46, 0x47, 0x48,
+ 0x49, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4d, 0x4e,
+ 0x4f, 0x50, 0x50, 0x51, 0x52, 0x53, 0x53, 0x54,
+ 0x55, 0x55, 0x56, 0x57, 0x57, 0x58, 0x59, 0x59,
+ 0x5a, 0x5b, 0x5b, 0x5c, 0x5d, 0x5d, 0x5e, 0x5f,
+ 0x5f, 0x60, 0x60, 0x61, 0x62, 0x62, 0x63, 0x63,
+ 0x64, 0x65, 0x65, 0x66, 0x66, 0x67, 0x67, 0x68,
+ 0x69, 0x69, 0x6a, 0x6a, 0x6b, 0x6b, 0x6c, 0x6c,
+ 0x6d, 0x6d, 0x6e, 0x6e, 0x6f, 0x6f, 0x70, 0x70,
+ 0x71, 0x71, 0x72, 0x72, 0x73, 0x73, 0x74, 0x74,
+ 0x75, 0x75, 0x76, 0x76, 0x77, 0x77, 0x78, 0x78,
+ 0x79, 0x79, 0x7a, 0x7a, 0x7a, 0x7b, 0x7b, 0x7c,
+ 0x7c, 0x7d, 0x7d, 0x7e, 0x7e, 0x7e, 0x7f, 0x7f,
+ 0x80, 0x80, 0x81, 0x81, 0x81, 0x82, 0x82, 0x83,
+ 0x83, 0x84, 0x84, 0x84, 0x85, 0x85, 0x86, 0x86,
+ 0x86, 0x87, 0x87, 0x88, 0x88, 0x88, 0x89, 0x89,
+ 0x89, 0x8a, 0x8a, 0x8b, 0x8b, 0x8b, 0x8c, 0x8c,
+ 0x8c, 0x8d, 0x8d, 0x8e, 0x8e, 0x8e, 0x8f, 0x8f,
+ 0x8f, 0x90, 0x90, 0x90, 0x91, 0x91, 0x92, 0x92,
+ 0x92, 0x93, 0x93, 0x93, 0x94, 0x94, 0x94, 0x95,
+ 0x95, 0x95, 0x96, 0x96, 0x96, 0x97, 0x97, 0x97,
+ 0x98, 0x98, 0x98, 0x99, 0x99, 0x99, 0x9a, 0x9a,
+ 0x9a, 0x9b, 0x9b, 0x9b, 0x9c, 0x9c, 0x9c, 0x9d,
+ 0x9d, 0x9d, 0x9e, 0x9e, 0x9e, 0x9e, 0x9f, 0x9f,
+ 0x9f, 0xa0, 0xa0, 0xa0, 0xa1, 0xa1, 0xa1, 0xa2,
+ 0xa2, 0xa2, 0xa2, 0xa3, 0xa3, 0xa3, 0xa4, 0xa4,
+ 0xa4, 0xa5, 0xa5, 0xa5, 0xa5, 0xa6, 0xa6, 0xa6,
+ 0xa7, 0xa7, 0xa7, 0xa7, 0xa8, 0xa8, 0xa8, 0xa9,
+ 0xa9, 0xa9, 0xa9, 0xaa, 0xaa, 0xaa, 0xab, 0xab,
+ 0xab, 0xab, 0xac, 0xac, 0xac, 0xac, 0xad, 0xad,
+ 0xad, 0xae, 0xae, 0xae, 0xae, 0xaf, 0xaf, 0xaf,
+ 0xaf, 0xb0, 0xb0, 0xb0, 0xb1, 0xb1, 0xb1, 0xb1,
+ 0xb2, 0xb2, 0xb2, 0xb2, 0xb3, 0xb3, 0xb3, 0xb3,
+ 0xb4, 0xb4, 0xb4, 0xb4, 0xb5, 0xb5, 0xb5, 0xb5,
+ 0xb6, 0xb6, 0xb6, 0xb6, 0xb7, 0xb7, 0xb7, 0xb7,
+ 0xb8, 0xb8, 0xb8, 0xb8, 0xb9, 0xb9, 0xb9, 0xb9,
+ 0xba, 0xba, 0xba, 0xba, 0xbb, 0xbb, 0xbb, 0xbb,
+ 0xbc, 0xbc, 0xbc, 0xbc, 0xbd, 0xbd, 0xbd, 0xbd,
+ 0xbe, 0xbe, 0xbe, 0xbe, 0xbf, 0xbf, 0xbf, 0xbf,
+ 0xbf, 0xc0, 0xc0, 0xc0, 0xc0, 0xc1, 0xc1, 0xc1,
+ 0xc1, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc3, 0xc3,
+ 0xc3, 0xc3, 0xc4, 0xc4, 0xc4, 0xc4, 0xc5, 0xc5,
+ 0xc5, 0xc5, 0xc5, 0xc6, 0xc6, 0xc6, 0xc6, 0xc7,
+ 0xc7, 0xc7, 0xc7, 0xc7, 0xc8, 0xc8, 0xc8, 0xc8,
+ 0xc9, 0xc9, 0xc9, 0xc9, 0xc9, 0xca, 0xca, 0xca,
+ 0xca, 0xcb, 0xcb, 0xcb, 0xcb, 0xcb, 0xcc, 0xcc,
+ 0xcc, 0xcc, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xce,
+ 0xce, 0xce, 0xce, 0xce, 0xcf, 0xcf, 0xcf, 0xcf,
+ 0xd0, 0xd0, 0xd0, 0xd0, 0xd0, 0xd1, 0xd1, 0xd1,
+ 0xd1, 0xd1, 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, 0xd3,
+ 0xd3, 0xd3, 0xd3, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4,
+ 0xd5, 0xd5, 0xd5, 0xd5, 0xd5, 0xd6, 0xd6, 0xd6,
+ 0xd6, 0xd6, 0xd7, 0xd7, 0xd7, 0xd7, 0xd7, 0xd8,
+ 0xd8, 0xd8, 0xd8, 0xd8, 0xd9, 0xd9, 0xd9, 0xd9,
+ 0xd9, 0xda, 0xda, 0xda, 0xda, 0xda, 0xdb, 0xdb,
+ 0xdb, 0xdb, 0xdb, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xde, 0xde, 0xde,
+ 0xde, 0xde, 0xdf, 0xdf, 0xdf, 0xdf, 0xdf, 0xe0,
+ 0xe0, 0xe0, 0xe0, 0xe0, 0xe1, 0xe1, 0xe1, 0xe1,
+ 0xe1, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe3, 0xe3,
+ 0xe3, 0xe3, 0xe3, 0xe4, 0xe4, 0xe4, 0xe4, 0xe4,
+ 0xe4, 0xe5, 0xe5, 0xe5, 0xe5, 0xe5, 0xe6, 0xe6,
+ 0xe6, 0xe6, 0xe6, 0xe7, 0xe7, 0xe7, 0xe7, 0xe7,
+ 0xe8, 0xe8, 0xe8, 0xe8, 0xe8, 0xe9, 0xe9, 0xe9,
+ 0xe9, 0xe9, 0xe9, 0xea, 0xea, 0xea, 0xea, 0xea,
+ 0xeb, 0xeb, 0xeb, 0xeb, 0xeb, 0xec, 0xec, 0xec,
+ 0xec, 0xec, 0xed, 0xed, 0xed, 0xed, 0xed, 0xed,
+ 0xee, 0xee, 0xee, 0xee, 0xee, 0xef, 0xef, 0xef,
+ 0xef, 0xef, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0,
+ 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf2, 0xf2, 0xf2,
+ 0xf2, 0xf2, 0xf3, 0xf3, 0xf3, 0xf3, 0xf3, 0xf3,
+ 0xf4, 0xf4, 0xf4, 0xf4, 0xf4, 0xf5, 0xf5, 0xf5,
+ 0xf5, 0xf5, 0xf6, 0xf6, 0xf6, 0xf6, 0xf6, 0xf6,
+ 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf8, 0xf8, 0xf8,
+ 0xf8, 0xf8, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9,
+ 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfb, 0xfb, 0xfb,
+ 0xfb, 0xfb, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc,
+ 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfe, 0xfe, 0xfe,
+ 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
+ },
+ };
+
+ reg_w(gspca_dev, TP6800_R21_ENDP_1_CTL, 0x00);
+ if (sd->bridge == BRIDGE_TP6810)
+ reg_w(gspca_dev, 0x02, 0x28);
+/* msleep(50); */
+ gamma = sd->ctrls[GAMMA].val;
+ bulk_w(gspca_dev, 0x00, gamma_tb[gamma][0], 1024);
+ bulk_w(gspca_dev, 0x01, gamma_tb[gamma][1], 1024);
+ bulk_w(gspca_dev, 0x02, gamma_tb[gamma][2], 1024);
+ if (sd->bridge == BRIDGE_TP6810) {
+ int i;
+
+ reg_w(gspca_dev, 0x02, 0x2b);
+ reg_w(gspca_dev, 0x02, 0x28);
+ for (i = 0; i < 6; i++)
+ reg_w(gspca_dev, TP6800_R55_GAMMA_R,
+ gamma_tb[gamma][0][i]);
+ reg_w(gspca_dev, 0x02, 0x2b);
+ reg_w(gspca_dev, 0x02, 0x28);
+ for (i = 0; i < 6; i++)
+ reg_w(gspca_dev, TP6800_R56_GAMMA_G,
+ gamma_tb[gamma][1][i]);
+ reg_w(gspca_dev, 0x02, 0x2b);
+ reg_w(gspca_dev, 0x02, 0x28);
+ for (i = 0; i < 6; i++)
+ reg_w(gspca_dev, TP6800_R57_GAMMA_B,
+ gamma_tb[gamma][2][i]);
+ reg_w(gspca_dev, 0x02, 0x28);
+ }
+ reg_w(gspca_dev, TP6800_R21_ENDP_1_CTL, 0x03);
+/* msleep(50); */
+}
+
+static void setsharpness(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ u8 val;
+
+ if (sd->bridge == BRIDGE_TP6800) {
+ val = sd->ctrls[SHARPNESS].val
+ | 0x08; /* grid compensation enable */
+ if (gspca_dev->width == 640)
+ reg_w(gspca_dev, TP6800_R78_FORMAT, 0x00); /* vga */
+ else
+ val |= 0x04; /* scaling down enable */
+ reg_w(gspca_dev, TP6800_R5D_DEMOSAIC_CFG, val);
+ } else {
+ val = (sd->ctrls[SHARPNESS].val << 5) | 0x08;
+ reg_w(gspca_dev, 0x59, val);
+ }
+}
+
+static void setautogain(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ if (gspca_dev->ctrl_dis & (1 << AUTOGAIN))
+ return;
+ if (sd->ctrls[AUTOGAIN].val) {
+ sd->ag_cnt = AG_CNT_START;
+ gspca_dev->ctrl_inac |= (1 << EXPOSURE) | (1 << GAIN);
+ } else {
+ sd->ag_cnt = -1;
+ gspca_dev->ctrl_inac &= ~((1 << EXPOSURE) | (1 << GAIN));
+ }
+}
+
+/* set the resolution for sensor cx0342 */
+static void set_resolution(struct gspca_dev *gspca_dev)
+{
+ reg_w(gspca_dev, TP6800_R21_ENDP_1_CTL, 0x00);
+ if (gspca_dev->width == 320) {
+ reg_w(gspca_dev, TP6800_R3F_FRAME_RATE, 0x06);
+ msleep(100);
+ i2c_w(gspca_dev, CX0342_AUTO_ADC_CALIB, 0x01);
+ msleep(100);
+ reg_w(gspca_dev, TP6800_R21_ENDP_1_CTL, 0x03);
+ reg_w(gspca_dev, TP6800_R78_FORMAT, 0x01); /* qvga */
+ reg_w(gspca_dev, TP6800_R5D_DEMOSAIC_CFG, 0x0d);
+ i2c_w(gspca_dev, CX0342_EXPO_LINE_L, 0x37);
+ i2c_w(gspca_dev, CX0342_EXPO_LINE_H, 0x01);
+ } else {
+ reg_w(gspca_dev, TP6800_R3F_FRAME_RATE, 0x05);
+ msleep(100);
+ i2c_w(gspca_dev, CX0342_AUTO_ADC_CALIB, 0x01);
+ msleep(100);
+ reg_w(gspca_dev, TP6800_R21_ENDP_1_CTL, 0x03);
+ reg_w(gspca_dev, TP6800_R78_FORMAT, 0x00); /* vga */
+ reg_w(gspca_dev, TP6800_R5D_DEMOSAIC_CFG, 0x09);
+ i2c_w(gspca_dev, CX0342_EXPO_LINE_L, 0xcf);
+ i2c_w(gspca_dev, CX0342_EXPO_LINE_H, 0x00);
+ }
+ i2c_w(gspca_dev, CX0342_SYS_CTRL_0, 0x01);
+ bulk_w(gspca_dev, 0x03, color_gain[SENSOR_CX0342],
+ ARRAY_SIZE(color_gain[0]));
+ setgamma(gspca_dev);
+ setquality(gspca_dev);
+}
+
+/* convert the frame rate to a tp68x0 value */
+static int get_fr_idx(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ int i;
+
+ if (sd->bridge == BRIDGE_TP6800) {
+ for (i = 0; i < ARRAY_SIZE(rates) - 1; i++) {
+ if (sd->framerate >= rates[i])
+ break;
+ }
+ i = 6 - i; /* 1 = 5fps .. 6 = 30fps */
+
+ /* 640x480 * 30 fps does not work */
+ if (i == 6 /* if 30 fps */
+ && gspca_dev->width == 640)
+ i = 0x86; /* 15 fps */
+ } else {
+ for (i = 0; i < ARRAY_SIZE(rates_6810) - 1; i++) {
+ if (sd->framerate >= rates_6810[i])
+ break;
+ }
+ i = 7 - i; /* 3 = 5fps .. 7 = 30fps */
+
+ /* 640x480 * 30 fps does not work */
+ if (i == 7 /* if 30 fps */
+ && gspca_dev->width == 640)
+ i = 6; /* 15 fps */
+ i |= 0x80; /* clock * 1 */
+ }
+ return i;
+}
+
+static void setframerate(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ u8 fr_idx;
+
+ fr_idx = get_fr_idx(gspca_dev);
+
+ if (sd->bridge == BRIDGE_TP6810) {
+ reg_r(gspca_dev, 0x7b);
+ reg_w(gspca_dev, 0x7b,
+ sd->sensor == SENSOR_CX0342 ? 0x10 : 0x90);
+ if (sd->ctrls[EXPOSURE].val >= 128)
+ fr_idx = 0xf0; /* lower frame rate */
+ }
+
+ reg_w(gspca_dev, TP6800_R3F_FRAME_RATE, fr_idx);
+
+ if (sd->sensor == SENSOR_CX0342)
+ i2c_w(gspca_dev, CX0342_AUTO_ADC_CALIB, 0x01);
+}
+
+static void setrgain(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ int rgain;
+
+ rgain = sd->ctrls[RGAIN].val;
+ i2c_w(gspca_dev, CX0342_RAW_RGAIN_H, rgain >> 8);
+ i2c_w(gspca_dev, CX0342_RAW_RGAIN_L, rgain);
+ i2c_w(gspca_dev, CX0342_SYS_CTRL_0, 0x80);
+}
+
+static int sd_setgain(struct gspca_dev *gspca_dev, __s32 val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ if (sd->sensor == SENSOR_CX0342) {
+ sd->ctrls[BGAIN].val = sd->ctrls[BGAIN].val
+ * val / sd->ctrls[GAIN].val;
+ if (sd->ctrls[BGAIN].val > 4095)
+ sd->ctrls[BGAIN].val = 4095;
+ sd->ctrls[RGAIN].val = sd->ctrls[RGAIN].val
+ * val / sd->ctrls[GAIN].val;
+ if (sd->ctrls[RGAIN].val > 4095)
+ sd->ctrls[RGAIN].val = 4095;
+ }
+ sd->ctrls[GAIN].val = val;
+ if (gspca_dev->streaming)
+ setexposure(gspca_dev);
+ return gspca_dev->usb_err;
+}
+
+static void setbgain(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ int bgain;
+
+ bgain = sd->ctrls[BGAIN].val;
+ i2c_w(gspca_dev, CX0342_RAW_BGAIN_H, bgain >> 8);
+ i2c_w(gspca_dev, CX0342_RAW_BGAIN_L, bgain);
+ i2c_w(gspca_dev, CX0342_SYS_CTRL_0, 0x80);
+}
+
+/* this function is called at probe time */
+static int sd_config(struct gspca_dev *gspca_dev,
+ const struct usb_device_id *id)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ sd->bridge = id->driver_info;
+
+ gspca_dev->cam.cam_mode = vga_mode;
+ gspca_dev->cam.nmodes = ARRAY_SIZE(vga_mode);
+ gspca_dev->cam.mode_framerates = sd->bridge == BRIDGE_TP6800 ?
+ framerates : framerates_6810;
+
+ sd->framerate = 30; /* default: 30 fps */
+ gspca_dev->cam.ctrls = sd->ctrls;
+ return 0;
+}
+
+/* this function is called at probe and resume time */
+static int sd_init(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ static const struct cmd tp6800_preinit[] = {
+ {TP6800_R10_SIF_TYPE, 0x01}, /* sif */
+ {TP6800_R11_SIF_CONTROL, 0x01},
+ {TP6800_R15_GPIO_PU, 0x9f},
+ {TP6800_R16_GPIO_PD, 0x9f},
+ {TP6800_R17_GPIO_IO, 0x80},
+ {TP6800_R18_GPIO_DATA, 0x40}, /* LED off */
+ };
+ static const struct cmd tp6810_preinit[] = {
+ {TP6800_R2F_TIMING_CFG, 0x2f},
+ {TP6800_R15_GPIO_PU, 0x6f},
+ {TP6800_R16_GPIO_PD, 0x40},
+ {TP6800_R17_GPIO_IO, 0x9f},
+ {TP6800_R18_GPIO_DATA, 0xc1}, /* LED off */
+ };
+
+ if (sd->bridge == BRIDGE_TP6800)
+ reg_w_buf(gspca_dev, tp6800_preinit,
+ ARRAY_SIZE(tp6800_preinit));
+ else
+ reg_w_buf(gspca_dev, tp6810_preinit,
+ ARRAY_SIZE(tp6810_preinit));
+ msleep(15);
+ reg_r(gspca_dev, TP6800_R18_GPIO_DATA);
+ PDEBUG(D_PROBE, "gpio: %02x", gspca_dev->usb_buf[0]);
+/* values:
+ * 0x80: snapshot button
+ * 0x40: LED
+ * 0x20: (bridge / sensor) reset for tp6810 ?
+ * 0x07: sensor type ?
+ */
+
+ /* guess the sensor type */
+ if (force_sensor >= 0) {
+ sd->sensor = force_sensor;
+ } else {
+ if (sd->bridge == BRIDGE_TP6800) {
+/*fixme: not sure this is working*/
+ switch (gspca_dev->usb_buf[0] & 0x07) {
+ case 0:
+ sd->sensor = SENSOR_SOI763A;
+ break;
+ case 1:
+ sd->sensor = SENSOR_CX0342;
+ break;
+ }
+ } else {
+ int sensor;
+
+ sensor = probe_6810(gspca_dev);
+ if (sensor < 0) {
+ pr_warn("Unknown sensor %d - forced to soi763a\n",
+ -sensor);
+ sensor = SENSOR_SOI763A;
+ }
+ sd->sensor = sensor;
+ }
+ }
+ if (sd->sensor == SENSOR_SOI763A) {
+ pr_info("Sensor soi763a\n");
+ sd->ctrls[GAMMA].def = sd->bridge == BRIDGE_TP6800 ? 0 : 1;
+ sd->ctrls[GAIN].max = 15;
+ sd->ctrls[GAIN].def = 3;
+ gspca_dev->ctrl_dis = (1 << RGAIN) | (1 << BGAIN);
+ if (sd->bridge == BRIDGE_TP6810) {
+ soi763a_6810_init(gspca_dev);
+#if AUTOGAIN_DEF
+ gspca_dev->ctrl_inac |= (1 << EXPOSURE) | (1 << GAIN);
+#endif
+ } else {
+ gspca_dev->ctrl_dis |= (1 << AUTOGAIN);
+ }
+ } else {
+ pr_info("Sensor cx0342\n");
+ if (sd->bridge == BRIDGE_TP6810) {
+ cx0342_6810_init(gspca_dev);
+#if AUTOGAIN_DEF
+ gspca_dev->ctrl_inac |= (1 << EXPOSURE) | (1 << GAIN);
+#endif
+ } else {
+ gspca_dev->ctrl_dis |= (1 << AUTOGAIN);
+ }
+ }
+
+ if (sd->bridge == BRIDGE_TP6810)
+ sd->ctrls[QUALITY].def = 0; /* auto quality */
+ set_dqt(gspca_dev, 0);
+ return 0;
+}
+
+/* This function is called before choosing the alt setting */
+static int sd_isoc_init(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ static const struct cmd cx_sensor_init[] = {
+ {CX0342_AUTO_ADC_CALIB, 0x81},
+ {CX0342_EXPO_LINE_L, 0x37},
+ {CX0342_EXPO_LINE_H, 0x01},
+ {CX0342_RAW_GRGAIN_L, 0x00},
+ {CX0342_RAW_GBGAIN_L, 0x00},
+ {CX0342_RAW_RGAIN_L, 0x00},
+ {CX0342_RAW_BGAIN_L, 0x00},
+ {CX0342_SYS_CTRL_0, 0x81},
+ };
+ static const struct cmd cx_bridge_init[] = {
+ {0x4d, 0x00},
+ {0x4c, 0xff},
+ {0x4e, 0xff},
+ {0x4f, 0x00},
+ };
+ static const struct cmd ov_sensor_init[] = {
+ {0x10, 0x75}, /* exposure */
+ {0x76, 0x03},
+ {0x00, 0x00}, /* gain */
+ };
+ static const struct cmd ov_bridge_init[] = {
+ {0x7b, 0x90},
+ {TP6800_R3F_FRAME_RATE, 0x87},
+ };
+
+ if (sd->bridge == BRIDGE_TP6800)
+ return 0;
+ if (sd->sensor == SENSOR_CX0342) {
+ reg_w(gspca_dev, TP6800_R12_SIF_ADDR_S, 0x20);
+ reg_w(gspca_dev, TP6800_R3F_FRAME_RATE, 0x87);
+ i2c_w_buf(gspca_dev, cx_sensor_init,
+ ARRAY_SIZE(cx_sensor_init));
+ reg_w_buf(gspca_dev, cx_bridge_init,
+ ARRAY_SIZE(cx_bridge_init));
+ bulk_w(gspca_dev, 0x03, color_null, sizeof color_null);
+ reg_w(gspca_dev, 0x59, 0x40);
+ } else {
+ reg_w(gspca_dev, TP6800_R12_SIF_ADDR_S, 0x21);
+ i2c_w_buf(gspca_dev, ov_sensor_init,
+ ARRAY_SIZE(ov_sensor_init));
+ reg_r(gspca_dev, 0x7b);
+ reg_w_buf(gspca_dev, ov_bridge_init,
+ ARRAY_SIZE(ov_bridge_init));
+ }
+ reg_w(gspca_dev, TP6800_R78_FORMAT,
+ gspca_dev->curr_mode ? 0x00 : 0x01);
+ return gspca_dev->usb_err;
+}
+
+static void set_led(struct gspca_dev *gspca_dev, int on)
+{
+ u8 data;
+
+ reg_r(gspca_dev, TP6800_R18_GPIO_DATA);
+ data = gspca_dev->usb_buf[0];
+ if (on)
+ data &= ~0x40;
+ else
+ data |= 0x40;
+ reg_w(gspca_dev, TP6800_R18_GPIO_DATA, data);
+}
+
+static void cx0342_6800_start(struct gspca_dev *gspca_dev)
+{
+ static const struct cmd reg_init[] = {
+/*fixme: is this usefull?*/
+ {TP6800_R17_GPIO_IO, 0x9f},
+ {TP6800_R16_GPIO_PD, 0x40},
+ {TP6800_R10_SIF_TYPE, 0x00}, /* i2c 8 bits */
+ {TP6800_R50, 0x00},
+ {TP6800_R51, 0x00},
+ {TP6800_R52, 0xff},
+ {TP6800_R53, 0x03},
+ {TP6800_R54_DARK_CFG, 0x07},
+ {TP6800_R5C_EDGE_THRLD, 0x40},
+ {TP6800_R7A_BLK_THRLD, 0x40},
+ {TP6800_R2F_TIMING_CFG, 0x17},
+ {TP6800_R30_SENSOR_CFG, 0x18}, /* G1B..RG0 */
+ {TP6800_R37_FRONT_DARK_ST, 0x00},
+ {TP6800_R38_FRONT_DARK_END, 0x00},
+ {TP6800_R39_REAR_DARK_ST_L, 0x00},
+ {TP6800_R3A_REAR_DARK_ST_H, 0x00},
+ {TP6800_R3B_REAR_DARK_END_L, 0x00},
+ {TP6800_R3C_REAR_DARK_END_H, 0x00},
+ {TP6800_R3D_HORIZ_DARK_LINE_L, 0x00},
+ {TP6800_R3E_HORIZ_DARK_LINE_H, 0x00},
+ {TP6800_R21_ENDP_1_CTL, 0x03},
+
+ {TP6800_R31_PIXEL_START, 0x0b},
+ {TP6800_R32_PIXEL_END_L, 0x8a},
+ {TP6800_R33_PIXEL_END_H, 0x02},
+ {TP6800_R34_LINE_START, 0x0e},
+ {TP6800_R35_LINE_END_L, 0xf4},
+ {TP6800_R36_LINE_END_H, 0x01},
+ {TP6800_R78_FORMAT, 0x00},
+ {TP6800_R12_SIF_ADDR_S, 0x20}, /* cx0342 i2c addr */
+ };
+ static const struct cmd sensor_init[] = {
+ {CX0342_OUTPUT_CTRL, 0x07},
+ {CX0342_BYPASS_MODE, 0x58},
+ {CX0342_GPXLTHD_L, 0x16},
+ {CX0342_RBPXLTHD_L, 0x16},
+ {CX0342_PLANETHD_L, 0xc0},
+ {CX0342_PLANETHD_H, 0x03},
+ {CX0342_RB_GAP_L, 0xff},
+ {CX0342_RB_GAP_H, 0x07},
+ {CX0342_G_GAP_L, 0xff},
+ {CX0342_G_GAP_H, 0x07},
+ {CX0342_RST_OVERFLOW_L, 0x5c},
+ {CX0342_RST_OVERFLOW_H, 0x01},
+ {CX0342_DATA_OVERFLOW_L, 0xfc},
+ {CX0342_DATA_OVERFLOW_H, 0x03},
+ {CX0342_DATA_UNDERFLOW_L, 0x00},
+ {CX0342_DATA_UNDERFLOW_H, 0x00},
+ {CX0342_SYS_CTRL_0, 0x40},
+ {CX0342_GLOBAL_GAIN, 0x01},
+ {CX0342_CLOCK_GEN, 0x00},
+ {CX0342_SYS_CTRL_0, 0x02},
+ {CX0342_IDLE_CTRL, 0x05},
+ {CX0342_ADCGN, 0x00},
+ {CX0342_ADC_CTL, 0x00},
+ {CX0342_LVRST_BLBIAS, 0x01},
+ {CX0342_VTHSEL, 0x0b},
+ {CX0342_RAMP_RIV, 0x0b},
+ {CX0342_LDOSEL, 0x07},
+ {CX0342_SPV_VALUE_L, 0x40},
+ {CX0342_SPV_VALUE_H, 0x02},
+ };
+
+ reg_w_buf(gspca_dev, reg_init, ARRAY_SIZE(reg_init));
+ i2c_w_buf(gspca_dev, sensor_init, ARRAY_SIZE(sensor_init));
+ i2c_w_buf(gspca_dev, cx0342_timing_seq, ARRAY_SIZE(cx0342_timing_seq));
+ reg_w(gspca_dev, TP6800_R5C_EDGE_THRLD, 0x10);
+ reg_w(gspca_dev, TP6800_R54_DARK_CFG, 0x00);
+ i2c_w(gspca_dev, CX0342_EXPO_LINE_H, 0x00);
+ i2c_w(gspca_dev, CX0342_SYS_CTRL_0, 0x01);
+ setexposure(gspca_dev);
+ set_led(gspca_dev, 1);
+ set_resolution(gspca_dev);
+}
+
+static void cx0342_6810_start(struct gspca_dev *gspca_dev)
+{
+ static const struct cmd sensor_init_2[] = {
+ {CX0342_EXPO_LINE_L, 0x6f},
+ {CX0342_EXPO_LINE_H, 0x02},
+ {CX0342_RAW_GRGAIN_L, 0x00},
+ {CX0342_RAW_GBGAIN_L, 0x00},
+ {CX0342_RAW_RGAIN_L, 0x00},
+ {CX0342_RAW_BGAIN_L, 0x00},
+ {CX0342_SYS_CTRL_0, 0x81},
+ };
+ static const struct cmd bridge_init_2[] = {
+ {0x4d, 0x00},
+ {0x4c, 0xff},
+ {0x4e, 0xff},
+ {0x4f, 0x00},
+ {TP6800_R7A_BLK_THRLD, 0x00},
+ {TP6800_R79_QUALITY, 0x04},
+ {TP6800_R79_QUALITY, 0x01},
+ };
+ static const struct cmd bridge_init_3[] = {
+ {TP6800_R31_PIXEL_START, 0x08},
+ {TP6800_R32_PIXEL_END_L, 0x87},
+ {TP6800_R33_PIXEL_END_H, 0x02},
+ {TP6800_R34_LINE_START, 0x0e},
+ {TP6800_R35_LINE_END_L, 0xf4},
+ {TP6800_R36_LINE_END_H, 0x01},
+ };
+ static const struct cmd sensor_init_3[] = {
+ {CX0342_AUTO_ADC_CALIB, 0x81},
+ {CX0342_EXPO_LINE_L, 0x6f},
+ {CX0342_EXPO_LINE_H, 0x02},
+ {CX0342_RAW_GRGAIN_L, 0x00},
+ {CX0342_RAW_GBGAIN_L, 0x00},
+ {CX0342_RAW_RGAIN_L, 0x00},
+ {CX0342_RAW_BGAIN_L, 0x00},
+ {CX0342_SYS_CTRL_0, 0x81},
+ };
+ static const struct cmd bridge_init_5[] = {
+ {0x4d, 0x00},
+ {0x4c, 0xff},
+ {0x4e, 0xff},
+ {0x4f, 0x00},
+ };
+ static const struct cmd sensor_init_4[] = {
+ {CX0342_EXPO_LINE_L, 0xd3},
+ {CX0342_EXPO_LINE_H, 0x01},
+/*fixme: gains, but 00..80 only*/
+ {CX0342_RAW_GRGAIN_L, 0x40},
+ {CX0342_RAW_GBGAIN_L, 0x40},
+ {CX0342_RAW_RGAIN_L, 0x40},
+ {CX0342_RAW_BGAIN_L, 0x40},
+ {CX0342_SYS_CTRL_0, 0x81},
+ };
+ static const struct cmd sensor_init_5[] = {
+ {CX0342_IDLE_CTRL, 0x05},
+ {CX0342_ADCGN, 0x00},
+ {CX0342_ADC_CTL, 0x00},
+ {CX0342_LVRST_BLBIAS, 0x01},
+ {CX0342_VTHSEL, 0x0b},
+ {CX0342_RAMP_RIV, 0x0b},
+ {CX0342_LDOSEL, 0x07},
+ {CX0342_SPV_VALUE_L, 0x40},
+ {CX0342_SPV_VALUE_H, 0x02},
+ {CX0342_AUTO_ADC_CALIB, 0x81},
+ };
+
+ reg_w(gspca_dev, 0x22, gspca_dev->alt);
+ i2c_w_buf(gspca_dev, sensor_init_2, ARRAY_SIZE(sensor_init_2));
+ reg_w_buf(gspca_dev, bridge_init_2, ARRAY_SIZE(bridge_init_2));
+ reg_w_buf(gspca_dev, tp6810_cx_init_common,
+ ARRAY_SIZE(tp6810_cx_init_common));
+ reg_w_buf(gspca_dev, bridge_init_3, ARRAY_SIZE(bridge_init_3));
+ if (gspca_dev->curr_mode) {
+ reg_w(gspca_dev, 0x4a, 0x7f);
+ reg_w(gspca_dev, 0x07, 0x05);
+ reg_w(gspca_dev, TP6800_R78_FORMAT, 0x00); /* vga */
+ } else {
+ reg_w(gspca_dev, 0x4a, 0xff);
+ reg_w(gspca_dev, 0x07, 0x85);
+ reg_w(gspca_dev, TP6800_R78_FORMAT, 0x01); /* qvga */
+ }
+ setgamma(gspca_dev);
+ reg_w_buf(gspca_dev, tp6810_bridge_start,
+ ARRAY_SIZE(tp6810_bridge_start));
+ setsharpness(gspca_dev);
+ bulk_w(gspca_dev, 0x03, color_gain[SENSOR_CX0342],
+ ARRAY_SIZE(color_gain[0]));
+ reg_w(gspca_dev, TP6800_R3F_FRAME_RATE, 0x87);
+ i2c_w_buf(gspca_dev, sensor_init_3, ARRAY_SIZE(sensor_init_3));
+ reg_w_buf(gspca_dev, bridge_init_5, ARRAY_SIZE(bridge_init_5));
+ i2c_w_buf(gspca_dev, sensor_init_4, ARRAY_SIZE(sensor_init_4));
+ reg_w_buf(gspca_dev, bridge_init_5, ARRAY_SIZE(bridge_init_5));
+ i2c_w_buf(gspca_dev, sensor_init_5, ARRAY_SIZE(sensor_init_5));
+
+ set_led(gspca_dev, 1);
+/* setquality(gspca_dev); */
+}
+
+static void soi763a_6800_start(struct gspca_dev *gspca_dev)
+{
+ static const struct cmd reg_init[] = {
+ {TP6800_R79_QUALITY, 0x04},
+ {TP6800_R79_QUALITY, 0x01},
+ {TP6800_R10_SIF_TYPE, 0x00}, /* i2c 8 bits */
+
+ {TP6800_R50, 0x00},
+ {TP6800_R51, 0x00},
+ {TP6800_R52, 0xff},
+ {TP6800_R53, 0x03},
+ {TP6800_R54_DARK_CFG, 0x07},
+ {TP6800_R5C_EDGE_THRLD, 0x40},
+
+ {TP6800_R79_QUALITY, 0x03},
+ {TP6800_R7A_BLK_THRLD, 0x40},
+
+ {TP6800_R2F_TIMING_CFG, 0x46},
+ {TP6800_R30_SENSOR_CFG, 0x10}, /* BG1..G0R */
+ {TP6800_R37_FRONT_DARK_ST, 0x00},
+ {TP6800_R38_FRONT_DARK_END, 0x00},
+ {TP6800_R39_REAR_DARK_ST_L, 0x00},
+ {TP6800_R3A_REAR_DARK_ST_H, 0x00},
+ {TP6800_R3B_REAR_DARK_END_L, 0x00},
+ {TP6800_R3C_REAR_DARK_END_H, 0x00},
+ {TP6800_R3D_HORIZ_DARK_LINE_L, 0x00},
+ {TP6800_R3E_HORIZ_DARK_LINE_H, 0x00},
+ {TP6800_R21_ENDP_1_CTL, 0x03},
+
+ {TP6800_R3F_FRAME_RATE, 0x04}, /* 15 fps */
+ {TP6800_R5D_DEMOSAIC_CFG, 0x0e}, /* scale down - medium edge */
+
+ {TP6800_R31_PIXEL_START, 0x1b},
+ {TP6800_R32_PIXEL_END_L, 0x9a},
+ {TP6800_R33_PIXEL_END_H, 0x02},
+ {TP6800_R34_LINE_START, 0x0f},
+ {TP6800_R35_LINE_END_L, 0xf4},
+ {TP6800_R36_LINE_END_H, 0x01},
+ {TP6800_R78_FORMAT, 0x01}, /* qvga */
+ {TP6800_R12_SIF_ADDR_S, 0x21}, /* soi763a i2c addr */
+ {TP6800_R1A_SIF_TX_DATA2, 0x00},
+ };
+ static const struct cmd sensor_init[] = {
+ {0x12, 0x48}, /* mirror - RGB */
+ {0x13, 0xa0}, /* clock - no AGC nor AEC */
+ {0x03, 0xa4}, /* saturation */
+ {0x04, 0x30}, /* hue */
+ {0x05, 0x88}, /* contrast */
+ {0x06, 0x60}, /* brightness */
+ {0x10, 0x41}, /* AEC */
+ {0x11, 0x40}, /* clock rate */
+ {0x13, 0xa0},
+ {0x14, 0x00}, /* 640x480 */
+ {0x15, 0x14},
+ {0x1f, 0x41},
+ {0x20, 0x80},
+ {0x23, 0xee},
+ {0x24, 0x50},
+ {0x25, 0x7a},
+ {0x26, 0x00},
+ {0x27, 0xe2},
+ {0x28, 0xb0},
+ {0x2a, 0x00},
+ {0x2b, 0x00},
+ {0x2d, 0x81},
+ {0x2f, 0x9d},
+ {0x60, 0x80},
+ {0x61, 0x00},
+ {0x62, 0x88},
+ {0x63, 0x11},
+ {0x64, 0x89},
+ {0x65, 0x00},
+ {0x67, 0x94},
+ {0x68, 0x7a},
+ {0x69, 0x0f},
+ {0x6c, 0x80},
+ {0x6d, 0x80},
+ {0x6e, 0x80},
+ {0x6f, 0xff},
+ {0x71, 0x20},
+ {0x74, 0x20},
+ {0x75, 0x86},
+ {0x77, 0xb5},
+ {0x17, 0x18}, /* H href start */
+ {0x18, 0xbf}, /* H href end */
+ {0x19, 0x03}, /* V start */
+ {0x1a, 0xf8}, /* V end */
+ {0x01, 0x80}, /* blue gain */
+ {0x02, 0x80}, /* red gain */
+ };
+
+ reg_w_buf(gspca_dev, reg_init, ARRAY_SIZE(reg_init));
+
+ i2c_w(gspca_dev, 0x12, 0x80); /* sensor reset */
+ msleep(10);
+
+ i2c_w_buf(gspca_dev, sensor_init, ARRAY_SIZE(sensor_init));
+
+ reg_w(gspca_dev, TP6800_R5C_EDGE_THRLD, 0x10);
+ reg_w(gspca_dev, TP6800_R54_DARK_CFG, 0x00);
+
+ setsharpness(gspca_dev);
+
+ bulk_w(gspca_dev, 0x03, color_gain[SENSOR_SOI763A],
+ ARRAY_SIZE(color_gain[0]));
+
+ set_led(gspca_dev, 1);
+ setexposure(gspca_dev);
+ setquality(gspca_dev);
+ setgamma(gspca_dev);
+}
+
+static void soi763a_6810_start(struct gspca_dev *gspca_dev)
+{
+ static const struct cmd bridge_init_2[] = {
+ {TP6800_R7A_BLK_THRLD, 0x00},
+ {TP6800_R79_QUALITY, 0x04},
+ {TP6800_R79_QUALITY, 0x01},
+ };
+ static const struct cmd bridge_init_3[] = {
+ {TP6800_R31_PIXEL_START, 0x20},
+ {TP6800_R32_PIXEL_END_L, 0x9f},
+ {TP6800_R33_PIXEL_END_H, 0x02},
+ {TP6800_R34_LINE_START, 0x13},
+ {TP6800_R35_LINE_END_L, 0xf8},
+ {TP6800_R36_LINE_END_H, 0x01},
+ };
+ static const struct cmd bridge_init_6[] = {
+ {0x08, 0xff},
+ {0x09, 0xff},
+ {0x0a, 0x5f},
+ {0x0b, 0x80},
+ };
+
+ reg_w(gspca_dev, 0x22, gspca_dev->alt);
+ bulk_w(gspca_dev, 0x03, color_null, sizeof color_null);
+ reg_w(gspca_dev, 0x59, 0x40);
+ setexposure(gspca_dev);
+ reg_w_buf(gspca_dev, bridge_init_2, ARRAY_SIZE(bridge_init_2));
+ reg_w_buf(gspca_dev, tp6810_ov_init_common,
+ ARRAY_SIZE(tp6810_ov_init_common));
+ reg_w_buf(gspca_dev, bridge_init_3, ARRAY_SIZE(bridge_init_3));
+ if (gspca_dev->curr_mode) {
+ reg_w(gspca_dev, 0x4a, 0x7f);
+ reg_w(gspca_dev, 0x07, 0x05);
+ reg_w(gspca_dev, TP6800_R78_FORMAT, 0x00); /* vga */
+ } else {
+ reg_w(gspca_dev, 0x4a, 0xff);
+ reg_w(gspca_dev, 0x07, 0x85);
+ reg_w(gspca_dev, TP6800_R78_FORMAT, 0x01); /* qvga */
+ }
+ setgamma(gspca_dev);
+ reg_w_buf(gspca_dev, tp6810_bridge_start,
+ ARRAY_SIZE(tp6810_bridge_start));
+
+ if (gspca_dev->curr_mode) {
+ reg_w(gspca_dev, 0x4f, 0x00);
+ reg_w(gspca_dev, 0x4e, 0x7c);
+ }
+
+ reg_w(gspca_dev, 0x00, 0x00);
+
+ setsharpness(gspca_dev);
+ bulk_w(gspca_dev, 0x03, color_gain[SENSOR_SOI763A],
+ ARRAY_SIZE(color_gain[0]));
+ set_led(gspca_dev, 1);
+ reg_w(gspca_dev, TP6800_R3F_FRAME_RATE, 0xf0);
+ setexposure(gspca_dev);
+ reg_w_buf(gspca_dev, bridge_init_6, ARRAY_SIZE(bridge_init_6));
+}
+
+/* -- start the camera -- */
+static int sd_start(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width);
+ set_dqt(gspca_dev, sd->quality);
+ if (sd->bridge == BRIDGE_TP6800) {
+ if (sd->sensor == SENSOR_CX0342)
+ cx0342_6800_start(gspca_dev);
+ else
+ soi763a_6800_start(gspca_dev);
+ } else {
+ if (sd->sensor == SENSOR_CX0342)
+ cx0342_6810_start(gspca_dev);
+ else
+ soi763a_6810_start(gspca_dev);
+ reg_w_buf(gspca_dev, tp6810_late_start,
+ ARRAY_SIZE(tp6810_late_start));
+ reg_w(gspca_dev, 0x80, 0x03);
+ reg_w(gspca_dev, 0x82, gspca_dev->curr_mode ? 0x0a : 0x0e);
+
+ setexposure(gspca_dev);
+ setquality(gspca_dev);
+ setautogain(gspca_dev);
+ }
+
+ setframerate(gspca_dev);
+
+ return gspca_dev->usb_err;
+}
+
+static void sd_stopN(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ if (sd->bridge == BRIDGE_TP6800)
+ reg_w(gspca_dev, TP6800_R2F_TIMING_CFG, 0x03);
+ set_led(gspca_dev, 0);
+ reg_w(gspca_dev, TP6800_R21_ENDP_1_CTL, 0x00);
+}
+
+static void sd_pkt_scan(struct gspca_dev *gspca_dev,
+ u8 *data,
+ int len)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ /* the start of frame contains:
+ * ff d8
+ * ff fe
+ * width / 16
+ * height / 8
+ * quality
+ */
+ if (sd->bridge == BRIDGE_TP6810) {
+ if (*data != 0x5a) {
+/*fixme: don't discard the whole frame..*/
+ if (*data == 0xaa || *data == 0x00)
+ return;
+ if (*data > 0xc0) {
+ PDEBUG(D_FRAM, "bad frame");
+ gspca_dev->last_packet_type = DISCARD_PACKET;
+ return;
+ }
+ }
+ data++;
+ len--;
+ if (*data == 0xff && data[1] == 0xd8) {
+/*fixme: there may be information in the 4 high bits*/
+ if ((data[6] & 0x0f) != sd->quality)
+ set_dqt(gspca_dev, data[6] & 0x0f);
+ gspca_frame_add(gspca_dev, FIRST_PACKET,
+ sd->jpeg_hdr, JPEG_HDR_SZ);
+ gspca_frame_add(gspca_dev, INTER_PACKET,
+ data + 7, len - 7);
+ } else if (data[len - 2] == 0xff && data[len - 1] == 0xd9) {
+ gspca_frame_add(gspca_dev, LAST_PACKET,
+ data, len);
+ } else {
+ gspca_frame_add(gspca_dev, INTER_PACKET,
+ data, len);
+ }
+ return;
+ }
+
+ switch (*data) {
+ case 0x55:
+ gspca_frame_add(gspca_dev, LAST_PACKET, data, 0);
+
+ if (len < 8
+ || data[1] != 0xff || data[2] != 0xd8
+ || data[3] != 0xff || data[4] != 0xfe) {
+
+ /* Have only seen this with corrupt frames */
+ gspca_dev->last_packet_type = DISCARD_PACKET;
+ return;
+ }
+ if (data[7] != sd->quality)
+ set_dqt(gspca_dev, data[7]);
+ gspca_frame_add(gspca_dev, FIRST_PACKET,
+ sd->jpeg_hdr, JPEG_HDR_SZ);
+ gspca_frame_add(gspca_dev, INTER_PACKET,
+ data + 8, len - 8);
+ break;
+ case 0xaa:
+ gspca_dev->last_packet_type = DISCARD_PACKET;
+ break;
+ case 0xcc:
+ if (data[1] != 0xff || data[2] != 0xd8)
+ gspca_frame_add(gspca_dev, INTER_PACKET,
+ data + 1, len - 1);
+ else
+ gspca_dev->last_packet_type = DISCARD_PACKET;
+ break;
+ }
+}
+
+/* -- do autogain -- */
+/* gain setting is done in setexposure() for tp6810 */
+static void setgain(struct gspca_dev *gspca_dev) {}
+/* !! coarse_grained_expo_autogain is not used !! */
+#define exp_too_low_cnt bridge
+#define exp_too_high_cnt sensor
+
+#include "autogain_functions.h"
+static void sd_dq_callback(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ int ret, alen;
+ int luma, expo;
+
+ if (sd->ag_cnt < 0)
+ return;
+ if (--sd->ag_cnt > 5)
+ return;
+ switch (sd->ag_cnt) {
+/* case 5: */
+ default:
+ reg_w(gspca_dev, 0x7d, 0x00);
+ break;
+ case 4:
+ reg_w(gspca_dev, 0x27, 0xb0);
+ break;
+ case 3:
+ reg_w(gspca_dev, 0x0c, 0x01);
+ break;
+ case 2:
+ ret = usb_bulk_msg(gspca_dev->dev,
+ usb_rcvbulkpipe(gspca_dev->dev, 0x02),
+ gspca_dev->usb_buf,
+ 32,
+ &alen,
+ 500);
+ if (ret < 0) {
+ pr_err("bulk err %d\n", ret);
+ break;
+ }
+ /* values not used (unknown) */
+ break;
+ case 1:
+ reg_w(gspca_dev, 0x27, 0xd0);
+ break;
+ case 0:
+ ret = usb_bulk_msg(gspca_dev->dev,
+ usb_rcvbulkpipe(gspca_dev->dev, 0x02),
+ gspca_dev->usb_buf,
+ 32,
+ &alen,
+ 500);
+ if (ret < 0) {
+ pr_err("bulk err %d\n", ret);
+ break;
+ }
+ luma = ((gspca_dev->usb_buf[8] << 8) + gspca_dev->usb_buf[7] +
+ (gspca_dev->usb_buf[11] << 8) + gspca_dev->usb_buf[10] +
+ (gspca_dev->usb_buf[14] << 8) + gspca_dev->usb_buf[13] +
+ (gspca_dev->usb_buf[17] << 8) + gspca_dev->usb_buf[16] +
+ (gspca_dev->usb_buf[20] << 8) + gspca_dev->usb_buf[19] +
+ (gspca_dev->usb_buf[23] << 8) + gspca_dev->usb_buf[22] +
+ (gspca_dev->usb_buf[26] << 8) + gspca_dev->usb_buf[25] +
+ (gspca_dev->usb_buf[29] << 8) + gspca_dev->usb_buf[28])
+ / 8;
+ if (gspca_dev->width == 640)
+ luma /= 4;
+ reg_w(gspca_dev, 0x7d, 0x00);
+
+ expo = sd->ctrls[EXPOSURE].val;
+ ret = auto_gain_n_exposure(gspca_dev, luma,
+ 60, /* desired luma */
+ 6, /* dead zone */
+ 2, /* gain knee */
+ 70); /* expo knee */
+ sd->ag_cnt = AG_CNT_START;
+ if (sd->bridge == BRIDGE_TP6810) {
+ if ((expo >= 128 && sd->ctrls[EXPOSURE].val < 128)
+ || (expo < 128 && sd->ctrls[EXPOSURE].val >= 128))
+ setframerate(gspca_dev);
+ }
+ break;
+ }
+}
+
+/* get stream parameters (framerate) */
+static void sd_get_streamparm(struct gspca_dev *gspca_dev,
+ struct v4l2_streamparm *parm)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ struct v4l2_captureparm *cp = &parm->parm.capture;
+ struct v4l2_fract *tpf = &cp->timeperframe;
+ int fr, i;
+
+ cp->capability |= V4L2_CAP_TIMEPERFRAME;
+ tpf->numerator = 1;
+ i = get_fr_idx(gspca_dev);
+ if (i & 0x80) {
+ if (sd->bridge == BRIDGE_TP6800)
+ fr = rates[6 - (i & 0x07)];
+ else
+ fr = rates_6810[7 - (i & 0x07)];
+ } else {
+ fr = rates[6 - i];
+ }
+ tpf->denominator = fr;
+}
+
+/* set stream parameters (framerate) */
+static void sd_set_streamparm(struct gspca_dev *gspca_dev,
+ struct v4l2_streamparm *parm)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ struct v4l2_captureparm *cp = &parm->parm.capture;
+ struct v4l2_fract *tpf = &cp->timeperframe;
+ int fr, i;
+
+ sd->framerate = tpf->denominator / tpf->numerator;
+ if (gspca_dev->streaming)
+ setframerate(gspca_dev);
+
+ /* Return the actual framerate */
+ i = get_fr_idx(gspca_dev);
+ if (i & 0x80)
+ fr = rates_6810[7 - (i & 0x07)];
+ else
+ fr = rates[6 - i];
+ tpf->numerator = 1;
+ tpf->denominator = fr;
+}
+
+static int sd_set_jcomp(struct gspca_dev *gspca_dev,
+ struct v4l2_jpegcompression *jcomp)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ if (sd->sensor == SENSOR_SOI763A)
+ jpeg_set_qual(sd->jpeg_hdr, jcomp->quality);
+/* else
+ fixme: TODO
+*/
+ return gspca_dev->usb_err;
+}
+
+static int sd_get_jcomp(struct gspca_dev *gspca_dev,
+ struct v4l2_jpegcompression *jcomp)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ memset(jcomp, 0, sizeof *jcomp);
+ jcomp->quality = jpeg_q[sd->quality];
+ jcomp->jpeg_markers = V4L2_JPEG_MARKER_DHT
+ | V4L2_JPEG_MARKER_DQT;
+ return 0;
+}
+
+static struct ctrl sd_ctrls[NCTRLS] = {
+[EXPOSURE] = {
+ {
+ .id = V4L2_CID_EXPOSURE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Exposure",
+ .minimum = 0x01,
+ .maximum = 0xdc,
+ .step = 1,
+ .default_value = 0x4e,
+ },
+ .set_control = setexposure
+ },
+[QUALITY] = {
+ {
+ .id = V4L2_CID_PRIVATE_BASE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Compression quality",
+ .minimum = 0,
+ .maximum = 15,
+ .step = 1,
+ .default_value = 13,
+ },
+ .set_control = setquality
+ },
+[RGAIN] = {
+ {
+ .id = V4L2_CID_RED_BALANCE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Red balance",
+ .minimum = 0,
+ .maximum = 4095,
+ .step = 1,
+ .default_value = 256,
+ },
+ .set_control = setrgain
+ },
+[GAIN] = {
+ {
+ .id = V4L2_CID_GAIN,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Gain",
+ .minimum = 0,
+ .maximum = 4095,
+ .step = 1,
+ .default_value = 256,
+ },
+ .set = sd_setgain
+ },
+[BGAIN] = {
+ {
+ .id = V4L2_CID_BLUE_BALANCE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Blue balance",
+ .minimum = 0,
+ .maximum = 4095,
+ .step = 1,
+ .default_value = 256,
+ },
+ .set_control = setbgain
+ },
+[SHARPNESS] = {
+ {
+ .id = V4L2_CID_SHARPNESS,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Sharpness",
+ .minimum = 0,
+ .maximum = 3,
+ .step = 1,
+ .default_value = 2,
+ },
+ .set_control = setsharpness
+ },
+[GAMMA] = {
+ {
+ .id = V4L2_CID_GAMMA,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Gamma",
+ .minimum = 0,
+ .maximum = NGAMMA - 1,
+ .step = 1,
+ .default_value = 1,
+ },
+ .set_control = setgamma
+ },
+[AUTOGAIN] = {
+ {
+ .id = V4L2_CID_AUTOGAIN,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Auto Gain",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = AUTOGAIN_DEF
+ },
+ .set_control = setautogain
+ },
+};
+
+static const struct sd_desc sd_desc = {
+ .name = KBUILD_MODNAME,
+ .ctrls = sd_ctrls,
+ .nctrls = NCTRLS,
+ .config = sd_config,
+ .init = sd_init,
+ .isoc_init = sd_isoc_init,
+ .start = sd_start,
+ .stopN = sd_stopN,
+ .pkt_scan = sd_pkt_scan,
+ .dq_callback = sd_dq_callback,
+ .get_streamparm = sd_get_streamparm,
+ .set_streamparm = sd_set_streamparm,
+ .get_jcomp = sd_get_jcomp,
+ .set_jcomp = sd_set_jcomp,
+};
+
+static const struct usb_device_id device_table[] = {
+ {USB_DEVICE(0x06a2, 0x0003), .driver_info = BRIDGE_TP6800},
+ {USB_DEVICE(0x06a2, 0x6810), .driver_info = BRIDGE_TP6810},
+ {} /* Terminating entry */
+};
+
+MODULE_DEVICE_TABLE(usb, device_table);
+
+static int sd_probe(struct usb_interface *interface,
+ const struct usb_device_id *id)
+{
+ return gspca_dev_probe(interface, id, &sd_desc, sizeof(struct sd),
+ THIS_MODULE);
+}
+
+static struct usb_driver sd_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = device_table,
+ .probe = sd_probe,
+ .disconnect = gspca_disconnect,
+#ifdef CONFIG_PM
+ .suspend = gspca_suspend,
+ .resume = gspca_resume,
+ .reset_resume = gspca_resume,
+#endif
+};
+
+/* -- module insert / remove -- */
+static int __init sd_mod_init(void)
+{
+ return usb_register(&sd_driver);
+}
+static void __exit sd_mod_exit(void)
+{
+ usb_deregister(&sd_driver);
+}
+
+module_init(sd_mod_init);
+module_exit(sd_mod_exit);
+
+module_param(force_sensor, int, 0644);
+MODULE_PARM_DESC(force_sensor,
+ "Force sensor. 0: cx0342, 1: soi763a");
diff --git a/drivers/media/video/gspca/vc032x.c b/drivers/media/video/gspca/vc032x.c
index 6caed734a06..7ee2c8271dc 100644
--- a/drivers/media/video/gspca/vc032x.c
+++ b/drivers/media/video/gspca/vc032x.c
@@ -20,6 +20,8 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#define MODULE_NAME "vc032x"
#include "gspca.h"
@@ -3169,7 +3171,7 @@ static void reg_r_i(struct gspca_dev *gspca_dev,
index, gspca_dev->usb_buf, len,
500);
if (ret < 0) {
- err("reg_r err %d", ret);
+ pr_err("reg_r err %d\n", ret);
gspca_dev->usb_err = ret;
}
}
@@ -3210,7 +3212,7 @@ static void reg_w_i(struct gspca_dev *gspca_dev,
value, index, NULL, 0,
500);
if (ret < 0) {
- err("reg_w err %d", ret);
+ pr_err("reg_w err %d\n", ret);
gspca_dev->usb_err = ret;
}
}
@@ -3235,8 +3237,7 @@ static u16 read_sensor_register(struct gspca_dev *gspca_dev,
reg_r(gspca_dev, 0xa1, 0xb33f, 1);
if (!(gspca_dev->usb_buf[0] & 0x02)) {
- err("I2c Bus Busy Wait %02x",
- gspca_dev->usb_buf[0]);
+ pr_err("I2c Bus Busy Wait %02x\n", gspca_dev->usb_buf[0]);
return 0;
}
reg_w(gspca_dev, 0xa0, address, 0xb33a);
@@ -3349,7 +3350,7 @@ static void i2c_write(struct gspca_dev *gspca_dev,
msleep(20);
} while (--retry > 0);
if (retry <= 0)
- err("i2c_write timeout");
+ pr_err("i2c_write timeout\n");
}
static void put_tab_to_reg(struct gspca_dev *gspca_dev,
@@ -3446,7 +3447,7 @@ static int sd_init(struct gspca_dev *gspca_dev)
switch (sensor) {
case -1:
- err("Unknown sensor...");
+ pr_err("Unknown sensor...\n");
return -EINVAL;
case SENSOR_HV7131R:
PDEBUG(D_PROBE, "Find Sensor HV7131R");
diff --git a/drivers/media/video/gspca/vicam.c b/drivers/media/video/gspca/vicam.c
index 84dfbab923b..81dd4c99d02 100644
--- a/drivers/media/video/gspca/vicam.c
+++ b/drivers/media/video/gspca/vicam.c
@@ -26,6 +26,8 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#define MODULE_NAME "vicam"
#define HEADER_SIZE 64
@@ -117,7 +119,7 @@ static int vicam_control_msg(struct gspca_dev *gspca_dev, u8 request,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
value, index, data, len, 1000);
if (ret < 0)
- err("control msg req %02X error %d", request, ret);
+ pr_err("control msg req %02X error %d\n", request, ret);
return ret;
}
@@ -189,8 +191,8 @@ static int vicam_read_frame(struct gspca_dev *gspca_dev, u8 *data, int size)
data, size, &act_len, 10000);
/* successful, it returns 0, otherwise negative */
if (ret < 0 || act_len != size) {
- err("bulk read fail (%d) len %d/%d",
- ret, act_len, size);
+ pr_err("bulk read fail (%d) len %d/%d\n",
+ ret, act_len, size);
return -EIO;
}
return 0;
@@ -216,7 +218,7 @@ static void vicam_dostream(struct work_struct *work)
HEADER_SIZE;
buffer = kmalloc(frame_sz, GFP_KERNEL | GFP_DMA);
if (!buffer) {
- err("Couldn't allocate USB buffer");
+ pr_err("Couldn't allocate USB buffer\n");
goto exit;
}
@@ -269,7 +271,7 @@ static int sd_init(struct gspca_dev *gspca_dev)
ret = request_ihex_firmware(&fw, "vicam/firmware.fw",
&gspca_dev->dev->dev);
if (ret) {
- err("Failed to load \"vicam/firmware.fw\": %d\n", ret);
+ pr_err("Failed to load \"vicam/firmware.fw\": %d\n", ret);
return ret;
}
diff --git a/drivers/media/video/gspca/w996Xcf.c b/drivers/media/video/gspca/w996Xcf.c
index 4a9e622e5e1..27d2cef0692 100644
--- a/drivers/media/video/gspca/w996Xcf.c
+++ b/drivers/media/video/gspca/w996Xcf.c
@@ -31,6 +31,8 @@
the sensor drivers to v4l2 sub drivers, and properly split of this
driver from ov519.c */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#define W9968CF_I2C_BUS_DELAY 4 /* delay in us for I2C bit r/w operations */
#define Y_QUANTABLE (&sd->jpeg_hdr[JPEG_QT0_OFFSET])
@@ -81,7 +83,7 @@ static void w9968cf_write_fsb(struct sd *sd, u16* data)
USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE,
value, 0x06, sd->gspca_dev.usb_buf, 6, 500);
if (ret < 0) {
- err("Write FSB registers failed (%d)", ret);
+ pr_err("Write FSB registers failed (%d)\n", ret);
sd->gspca_dev.usb_err = ret;
}
}
@@ -108,7 +110,7 @@ static void w9968cf_write_sb(struct sd *sd, u16 value)
udelay(W9968CF_I2C_BUS_DELAY);
if (ret < 0) {
- err("Write SB reg [01] %04x failed", value);
+ pr_err("Write SB reg [01] %04x failed\n", value);
sd->gspca_dev.usb_err = ret;
}
}
@@ -135,7 +137,7 @@ static int w9968cf_read_sb(struct sd *sd)
ret = sd->gspca_dev.usb_buf[0] |
(sd->gspca_dev.usb_buf[1] << 8);
} else {
- err("Read SB reg [01] failed");
+ pr_err("Read SB reg [01] failed\n");
sd->gspca_dev.usb_err = ret;
}
diff --git a/drivers/media/video/gspca/xirlink_cit.c b/drivers/media/video/gspca/xirlink_cit.c
index c089a0f6f1d..3aed42acdb5 100644
--- a/drivers/media/video/gspca/xirlink_cit.c
+++ b/drivers/media/video/gspca/xirlink_cit.c
@@ -27,6 +27,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#define MODULE_NAME "xirlink-cit"
#include <linux/input.h>
@@ -800,8 +802,8 @@ static int cit_write_reg(struct gspca_dev *gspca_dev, u16 value, u16 index)
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT,
value, index, NULL, 0, 1000);
if (err < 0)
- err("Failed to write a register (index 0x%04X,"
- " value 0x%02X, error %d)", index, value, err);
+ pr_err("Failed to write a register (index 0x%04X, value 0x%02X, error %d)\n",
+ index, value, err);
return 0;
}
@@ -816,8 +818,8 @@ static int cit_read_reg(struct gspca_dev *gspca_dev, u16 index, int verbose)
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT,
0x00, index, buf, 8, 1000);
if (res < 0) {
- err("Failed to read a register (index 0x%04X, error %d)",
- index, res);
+ pr_err("Failed to read a register (index 0x%04X, error %d)\n",
+ index, res);
return res;
}
@@ -1587,7 +1589,7 @@ static int cit_get_packet_size(struct gspca_dev *gspca_dev)
intf = usb_ifnum_to_if(gspca_dev->dev, gspca_dev->iface);
alt = usb_altnum_to_altsetting(intf, gspca_dev->alt);
if (!alt) {
- err("Couldn't get altsetting");
+ pr_err("Couldn't get altsetting\n");
return -EIO;
}
@@ -2824,7 +2826,7 @@ static int sd_isoc_nego(struct gspca_dev *gspca_dev)
ret = usb_set_interface(gspca_dev->dev, gspca_dev->iface, 1);
if (ret < 0)
- err("set alt 1 err %d", ret);
+ pr_err("set alt 1 err %d\n", ret);
return ret;
}
diff --git a/drivers/media/video/gspca/zc3xx.c b/drivers/media/video/gspca/zc3xx.c
index 61cdd56a74a..30ea1e47949 100644
--- a/drivers/media/video/gspca/zc3xx.c
+++ b/drivers/media/video/gspca/zc3xx.c
@@ -19,6 +19,8 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#define MODULE_NAME "zc3xx"
#include <linux/input.h>
@@ -5666,7 +5668,7 @@ static u8 reg_r_i(struct gspca_dev *gspca_dev,
index, gspca_dev->usb_buf, 1,
500);
if (ret < 0) {
- err("reg_r_i err %d", ret);
+ pr_err("reg_r_i err %d\n", ret);
gspca_dev->usb_err = ret;
return 0;
}
@@ -5698,7 +5700,7 @@ static void reg_w_i(struct gspca_dev *gspca_dev,
value, index, NULL, 0,
500);
if (ret < 0) {
- err("reg_w_i err %d", ret);
+ pr_err("reg_w_i err %d\n", ret);
gspca_dev->usb_err = ret;
}
}
@@ -5724,7 +5726,7 @@ static u16 i2c_read(struct gspca_dev *gspca_dev,
msleep(20);
retbyte = reg_r_i(gspca_dev, 0x0091); /* read status */
if (retbyte != 0x00)
- err("i2c_r status error %02x", retbyte);
+ pr_err("i2c_r status error %02x\n", retbyte);
retval = reg_r_i(gspca_dev, 0x0095); /* read Lowbyte */
retval |= reg_r_i(gspca_dev, 0x0096) << 8; /* read Hightbyte */
PDEBUG(D_USBI, "i2c r [%02x] -> %04x (%02x)",
@@ -5748,7 +5750,7 @@ static u8 i2c_write(struct gspca_dev *gspca_dev,
msleep(1);
retbyte = reg_r_i(gspca_dev, 0x0091); /* read status */
if (retbyte != 0x00)
- err("i2c_w status error %02x", retbyte);
+ pr_err("i2c_w status error %02x\n", retbyte);
PDEBUG(D_USBO, "i2c w [%02x] = %02x%02x (%02x)",
reg, valH, valL, retbyte);
return retbyte;
@@ -6497,7 +6499,7 @@ static int sd_init(struct gspca_dev *gspca_dev)
PDEBUG(D_PROBE, "Sensor GC0303");
break;
default:
- warn("Unknown sensor - set to TAS5130C");
+ pr_warn("Unknown sensor - set to TAS5130C\n");
sd->sensor = SENSOR_TAS5130C;
}
break;
@@ -6603,7 +6605,7 @@ static int sd_init(struct gspca_dev *gspca_dev)
sd->sensor = SENSOR_OV7620; /* same sensor (?) */
break;
default:
- err("Unknown sensor %04x", sensor);
+ pr_err("Unknown sensor %04x\n", sensor);
return -EINVAL;
}
}
@@ -6970,6 +6972,7 @@ static const struct sd_desc sd_desc = {
};
static const struct usb_device_id device_table[] = {
+ {USB_DEVICE(0x03f0, 0x1b07)},
{USB_DEVICE(0x041e, 0x041e)},
{USB_DEVICE(0x041e, 0x4017)},
{USB_DEVICE(0x041e, 0x401c), .driver_info = SENSOR_PAS106},
diff --git a/drivers/media/video/hdpvr/Makefile b/drivers/media/video/hdpvr/Makefile
index 3baa9f613ca..52f057f24e3 100644
--- a/drivers/media/video/hdpvr/Makefile
+++ b/drivers/media/video/hdpvr/Makefile
@@ -2,6 +2,6 @@ hdpvr-objs := hdpvr-control.o hdpvr-core.o hdpvr-video.o hdpvr-i2c.o
obj-$(CONFIG_VIDEO_HDPVR) += hdpvr.o
-EXTRA_CFLAGS += -Idrivers/media/video
+ccflags-y += -Idrivers/media/video
-EXTRA_CFLAGS += $(extra-cflags-y) $(extra-cflags-m)
+ccflags-y += $(extra-cflags-y) $(extra-cflags-m)
diff --git a/drivers/media/video/hdpvr/hdpvr-i2c.c b/drivers/media/video/hdpvr/hdpvr-i2c.c
index 2a1ac287591..82e819fa91c 100644
--- a/drivers/media/video/hdpvr/hdpvr-i2c.c
+++ b/drivers/media/video/hdpvr/hdpvr-i2c.c
@@ -17,6 +17,7 @@
#include <linux/i2c.h>
#include <linux/slab.h>
+#include <linux/export.h>
#include "hdpvr.h"
diff --git a/drivers/media/video/hexium_gemini.c b/drivers/media/video/hexium_gemini.c
index cbc505a2fc2..a62322d5c0d 100644
--- a/drivers/media/video/hexium_gemini.c
+++ b/drivers/media/video/hexium_gemini.c
@@ -21,9 +21,12 @@
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#define DEBUG_VARIABLE debug
#include <media/saa7146_vv.h>
+#include <linux/module.h>
static int debug;
module_param(debug, int, 0);
@@ -175,13 +178,14 @@ static int hexium_init_done(struct saa7146_dev *dev)
union i2c_smbus_data data;
int i = 0;
- DEB_D(("hexium_init_done called.\n"));
+ DEB_D("hexium_init_done called\n");
/* initialize the helper ics to useful values */
for (i = 0; i < sizeof(hexium_ks0127b); i++) {
data.byte = hexium_ks0127b[i];
if (0 != i2c_smbus_xfer(&hexium->i2c_adapter, 0x6c, 0, I2C_SMBUS_WRITE, i, I2C_SMBUS_BYTE_DATA, &data)) {
- printk("hexium_gemini: hexium_init_done() failed for address 0x%02x\n", i);
+ pr_err("hexium_init_done() failed for address 0x%02x\n",
+ i);
}
}
@@ -192,7 +196,7 @@ static int hexium_set_input(struct hexium *hexium, int input)
{
union i2c_smbus_data data;
- DEB_D((".\n"));
+ DEB_D("\n");
data.byte = hexium_input_select[input].byte;
if (0 != i2c_smbus_xfer(&hexium->i2c_adapter, 0x6c, 0, I2C_SMBUS_WRITE, hexium_input_select[input].adr, I2C_SMBUS_BYTE_DATA, &data)) {
@@ -207,12 +211,13 @@ static int hexium_set_standard(struct hexium *hexium, struct hexium_data *vdec)
union i2c_smbus_data data;
int i = 0;
- DEB_D((".\n"));
+ DEB_D("\n");
while (vdec[i].adr != -1) {
data.byte = vdec[i].byte;
if (0 != i2c_smbus_xfer(&hexium->i2c_adapter, 0x6c, 0, I2C_SMBUS_WRITE, vdec[i].adr, I2C_SMBUS_BYTE_DATA, &data)) {
- printk("hexium_init_done: hexium_set_standard() failed for address 0x%02x\n", i);
+ pr_err("hexium_init_done: hexium_set_standard() failed for address 0x%02x\n",
+ i);
return -1;
}
i++;
@@ -222,14 +227,14 @@ static int hexium_set_standard(struct hexium *hexium, struct hexium_data *vdec)
static int vidioc_enum_input(struct file *file, void *fh, struct v4l2_input *i)
{
- DEB_EE(("VIDIOC_ENUMINPUT %d.\n", i->index));
+ DEB_EE("VIDIOC_ENUMINPUT %d\n", i->index);
if (i->index >= HEXIUM_INPUTS)
return -EINVAL;
memcpy(i, &hexium_inputs[i->index], sizeof(struct v4l2_input));
- DEB_D(("v4l2_ioctl: VIDIOC_ENUMINPUT %d.\n", i->index));
+ DEB_D("v4l2_ioctl: VIDIOC_ENUMINPUT %d\n", i->index);
return 0;
}
@@ -240,7 +245,7 @@ static int vidioc_g_input(struct file *file, void *fh, unsigned int *input)
*input = hexium->cur_input;
- DEB_D(("VIDIOC_G_INPUT: %d\n", *input));
+ DEB_D("VIDIOC_G_INPUT: %d\n", *input);
return 0;
}
@@ -249,7 +254,7 @@ static int vidioc_s_input(struct file *file, void *fh, unsigned int input)
struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev;
struct hexium *hexium = (struct hexium *) dev->ext_priv;
- DEB_EE(("VIDIOC_S_INPUT %d.\n", input));
+ DEB_EE("VIDIOC_S_INPUT %d\n", input);
if (input >= HEXIUM_INPUTS)
return -EINVAL;
@@ -270,7 +275,7 @@ static int vidioc_queryctrl(struct file *file, void *fh, struct v4l2_queryctrl *
for (i = HEXIUM_CONTROLS - 1; i >= 0; i--) {
if (hexium_controls[i].id == qc->id) {
*qc = hexium_controls[i];
- DEB_D(("VIDIOC_QUERYCTRL %d.\n", qc->id));
+ DEB_D("VIDIOC_QUERYCTRL %d\n", qc->id);
return 0;
}
}
@@ -293,7 +298,7 @@ static int vidioc_g_ctrl(struct file *file, void *fh, struct v4l2_control *vc)
if (vc->id == V4L2_CID_PRIVATE_BASE) {
vc->value = hexium->cur_bw;
- DEB_D(("VIDIOC_G_CTRL BW:%d.\n", vc->value));
+ DEB_D("VIDIOC_G_CTRL BW:%d\n", vc->value);
return 0;
}
return -EINVAL;
@@ -316,7 +321,7 @@ static int vidioc_s_ctrl(struct file *file, void *fh, struct v4l2_control *vc)
if (vc->id == V4L2_CID_PRIVATE_BASE)
hexium->cur_bw = vc->value;
- DEB_D(("VIDIOC_S_CTRL BW:%d.\n", hexium->cur_bw));
+ DEB_D("VIDIOC_S_CTRL BW:%d\n", hexium->cur_bw);
if (0 == hexium->cur_bw && V4L2_STD_PAL == hexium->cur_std) {
hexium_set_standard(hexium, hexium_pal);
@@ -351,14 +356,14 @@ static struct saa7146_ext_vv vv_data;
/* this function only gets called when the probing was successful */
static int hexium_attach(struct saa7146_dev *dev, struct saa7146_pci_extension_data *info)
{
- struct hexium *hexium = (struct hexium *) dev->ext_priv;
+ struct hexium *hexium;
int ret;
- DEB_EE((".\n"));
+ DEB_EE("\n");
hexium = kzalloc(sizeof(struct hexium), GFP_KERNEL);
if (NULL == hexium) {
- printk("hexium_gemini: not enough kernel memory in hexium_attach().\n");
+ pr_err("not enough kernel memory in hexium_attach()\n");
return -ENOMEM;
}
dev->ext_priv = hexium;
@@ -371,7 +376,7 @@ static int hexium_attach(struct saa7146_dev *dev, struct saa7146_pci_extension_d
};
saa7146_i2c_adapter_prepare(dev, &hexium->i2c_adapter, SAA7146_I2C_BUS_BIT_RATE_480);
if (i2c_add_adapter(&hexium->i2c_adapter) < 0) {
- DEB_S(("cannot register i2c-device. skipping.\n"));
+ DEB_S("cannot register i2c-device. skipping.\n");
kfree(hexium);
return -EFAULT;
}
@@ -402,11 +407,11 @@ static int hexium_attach(struct saa7146_dev *dev, struct saa7146_pci_extension_d
vv_data.ops.vidioc_s_input = vidioc_s_input;
ret = saa7146_register_device(&hexium->video_dev, dev, "hexium gemini", VFL_TYPE_GRABBER);
if (ret < 0) {
- printk("hexium_gemini: cannot register capture v4l2 device. skipping.\n");
+ pr_err("cannot register capture v4l2 device. skipping.\n");
return ret;
}
- printk("hexium_gemini: found 'hexium gemini' frame grabber-%d.\n", hexium_num);
+ pr_info("found 'hexium gemini' frame grabber-%d\n", hexium_num);
hexium_num++;
return 0;
@@ -416,7 +421,7 @@ static int hexium_detach(struct saa7146_dev *dev)
{
struct hexium *hexium = (struct hexium *) dev->ext_priv;
- DEB_EE(("dev:%p\n", dev));
+ DEB_EE("dev:%p\n", dev);
saa7146_unregister_device(&hexium->video_dev, dev);
saa7146_vv_release(dev);
@@ -508,7 +513,7 @@ static struct saa7146_extension hexium_extension = {
static int __init hexium_init_module(void)
{
if (0 != saa7146_register_extension(&hexium_extension)) {
- DEB_S(("failed to register extension.\n"));
+ DEB_S("failed to register extension\n");
return -ENODEV;
}
diff --git a/drivers/media/video/hexium_orion.c b/drivers/media/video/hexium_orion.c
index 6ad7e1c8b92..23debc967d9 100644
--- a/drivers/media/video/hexium_orion.c
+++ b/drivers/media/video/hexium_orion.c
@@ -21,9 +21,12 @@
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#define DEBUG_VARIABLE debug
#include <media/saa7146_vv.h>
+#include <linux/module.h>
static int debug;
module_param(debug, int, 0);
@@ -209,7 +212,7 @@ static int hexium_probe(struct saa7146_dev *dev)
union i2c_smbus_data data;
int err = 0;
- DEB_EE((".\n"));
+ DEB_EE("\n");
/* there are no hexium orion cards with revision 0 saa7146s */
if (0 == dev->revision) {
@@ -218,7 +221,7 @@ static int hexium_probe(struct saa7146_dev *dev)
hexium = kzalloc(sizeof(struct hexium), GFP_KERNEL);
if (NULL == hexium) {
- printk("hexium_orion: hexium_probe: not enough kernel memory.\n");
+ pr_err("hexium_probe: not enough kernel memory\n");
return -ENOMEM;
}
@@ -234,7 +237,7 @@ static int hexium_probe(struct saa7146_dev *dev)
};
saa7146_i2c_adapter_prepare(dev, &hexium->i2c_adapter, SAA7146_I2C_BUS_BIT_RATE_480);
if (i2c_add_adapter(&hexium->i2c_adapter) < 0) {
- DEB_S(("cannot register i2c-device. skipping.\n"));
+ DEB_S("cannot register i2c-device. skipping.\n");
kfree(hexium);
return -EFAULT;
}
@@ -248,7 +251,7 @@ static int hexium_probe(struct saa7146_dev *dev)
/* detect newer Hexium Orion cards by subsystem ids */
if (0x17c8 == dev->pci->subsystem_vendor && 0x0101 == dev->pci->subsystem_device) {
- printk("hexium_orion: device is a Hexium Orion w/ 1 SVHS + 3 BNC inputs.\n");
+ pr_info("device is a Hexium Orion w/ 1 SVHS + 3 BNC inputs\n");
/* we store the pointer in our private data field */
dev->ext_priv = hexium;
hexium->type = HEXIUM_ORION_1SVHS_3BNC;
@@ -256,7 +259,7 @@ static int hexium_probe(struct saa7146_dev *dev)
}
if (0x17c8 == dev->pci->subsystem_vendor && 0x2101 == dev->pci->subsystem_device) {
- printk("hexium_orion: device is a Hexium Orion w/ 4 BNC inputs.\n");
+ pr_info("device is a Hexium Orion w/ 4 BNC inputs\n");
/* we store the pointer in our private data field */
dev->ext_priv = hexium;
hexium->type = HEXIUM_ORION_4BNC;
@@ -266,7 +269,7 @@ static int hexium_probe(struct saa7146_dev *dev)
/* check if this is an old hexium Orion card by looking at
a saa7110 at address 0x4e */
if (0 == (err = i2c_smbus_xfer(&hexium->i2c_adapter, 0x4e, 0, I2C_SMBUS_READ, 0x00, I2C_SMBUS_BYTE_DATA, &data))) {
- printk("hexium_orion: device is a Hexium HV-PCI6/Orion (old).\n");
+ pr_info("device is a Hexium HV-PCI6/Orion (old)\n");
/* we store the pointer in our private data field */
dev->ext_priv = hexium;
hexium->type = HEXIUM_HV_PCI6_ORION;
@@ -288,13 +291,13 @@ static int hexium_init_done(struct saa7146_dev *dev)
union i2c_smbus_data data;
int i = 0;
- DEB_D(("hexium_init_done called.\n"));
+ DEB_D("hexium_init_done called\n");
/* initialize the helper ics to useful values */
for (i = 0; i < sizeof(hexium_saa7110); i++) {
data.byte = hexium_saa7110[i];
if (0 != i2c_smbus_xfer(&hexium->i2c_adapter, 0x4e, 0, I2C_SMBUS_WRITE, i, I2C_SMBUS_BYTE_DATA, &data)) {
- printk("hexium_orion: failed for address 0x%02x\n", i);
+ pr_err("failed for address 0x%02x\n", i);
}
}
@@ -306,7 +309,7 @@ static int hexium_set_input(struct hexium *hexium, int input)
union i2c_smbus_data data;
int i = 0;
- DEB_D((".\n"));
+ DEB_D("\n");
for (i = 0; i < 8; i++) {
int adr = hexium_input_select[input].data[i].adr;
@@ -314,7 +317,7 @@ static int hexium_set_input(struct hexium *hexium, int input)
if (0 != i2c_smbus_xfer(&hexium->i2c_adapter, 0x4e, 0, I2C_SMBUS_WRITE, adr, I2C_SMBUS_BYTE_DATA, &data)) {
return -1;
}
- printk("%d: 0x%02x => 0x%02x\n",input, adr,data.byte);
+ pr_debug("%d: 0x%02x => 0x%02x\n", input, adr, data.byte);
}
return 0;
@@ -322,14 +325,14 @@ static int hexium_set_input(struct hexium *hexium, int input)
static int vidioc_enum_input(struct file *file, void *fh, struct v4l2_input *i)
{
- DEB_EE(("VIDIOC_ENUMINPUT %d.\n", i->index));
+ DEB_EE("VIDIOC_ENUMINPUT %d\n", i->index);
if (i->index >= HEXIUM_INPUTS)
return -EINVAL;
memcpy(i, &hexium_inputs[i->index], sizeof(struct v4l2_input));
- DEB_D(("v4l2_ioctl: VIDIOC_ENUMINPUT %d.\n", i->index));
+ DEB_D("v4l2_ioctl: VIDIOC_ENUMINPUT %d\n", i->index);
return 0;
}
@@ -340,7 +343,7 @@ static int vidioc_g_input(struct file *file, void *fh, unsigned int *input)
*input = hexium->cur_input;
- DEB_D(("VIDIOC_G_INPUT: %d\n", *input));
+ DEB_D("VIDIOC_G_INPUT: %d\n", *input);
return 0;
}
@@ -365,18 +368,18 @@ static int hexium_attach(struct saa7146_dev *dev, struct saa7146_pci_extension_d
{
struct hexium *hexium = (struct hexium *) dev->ext_priv;
- DEB_EE((".\n"));
+ DEB_EE("\n");
saa7146_vv_init(dev, &vv_data);
vv_data.ops.vidioc_enum_input = vidioc_enum_input;
vv_data.ops.vidioc_g_input = vidioc_g_input;
vv_data.ops.vidioc_s_input = vidioc_s_input;
if (0 != saa7146_register_device(&hexium->video_dev, dev, "hexium orion", VFL_TYPE_GRABBER)) {
- printk("hexium_orion: cannot register capture v4l2 device. skipping.\n");
+ pr_err("cannot register capture v4l2 device. skipping.\n");
return -1;
}
- printk("hexium_orion: found 'hexium orion' frame grabber-%d.\n", hexium_num);
+ pr_err("found 'hexium orion' frame grabber-%d\n", hexium_num);
hexium_num++;
/* the rest */
@@ -390,7 +393,7 @@ static int hexium_detach(struct saa7146_dev *dev)
{
struct hexium *hexium = (struct hexium *) dev->ext_priv;
- DEB_EE(("dev:%p\n", dev));
+ DEB_EE("dev:%p\n", dev);
saa7146_unregister_device(&hexium->video_dev, dev);
saa7146_vv_release(dev);
@@ -479,7 +482,7 @@ static struct saa7146_extension extension = {
static int __init hexium_init_module(void)
{
if (0 != saa7146_register_extension(&extension)) {
- DEB_S(("failed to register extension.\n"));
+ DEB_S("failed to register extension\n");
return -ENODEV;
}
diff --git a/drivers/media/video/imx074.c b/drivers/media/video/imx074.c
index 0382ea752e6..eec75bb5720 100644
--- a/drivers/media/video/imx074.c
+++ b/drivers/media/video/imx074.c
@@ -12,11 +12,12 @@
#include <linux/delay.h>
#include <linux/i2c.h>
+#include <linux/v4l2-mediabus.h>
#include <linux/slab.h>
#include <linux/videodev2.h>
+#include <linux/module.h>
#include <media/soc_camera.h>
-#include <media/soc_mediabus.h>
#include <media/v4l2-subdev.h>
#include <media/v4l2-chip-ident.h>
@@ -267,6 +268,17 @@ static int imx074_g_chip_ident(struct v4l2_subdev *sd,
return 0;
}
+static int imx074_g_mbus_config(struct v4l2_subdev *sd,
+ struct v4l2_mbus_config *cfg)
+{
+ cfg->type = V4L2_MBUS_CSI2;
+ cfg->flags = V4L2_MBUS_CSI2_2_LANE |
+ V4L2_MBUS_CSI2_CHANNEL_0 |
+ V4L2_MBUS_CSI2_CONTINUOUS_CLOCK;
+
+ return 0;
+}
+
static struct v4l2_subdev_video_ops imx074_subdev_video_ops = {
.s_stream = imx074_s_stream,
.s_mbus_fmt = imx074_s_fmt,
@@ -275,6 +287,7 @@ static struct v4l2_subdev_video_ops imx074_subdev_video_ops = {
.enum_mbus_fmt = imx074_enum_fmt,
.g_crop = imx074_g_crop,
.cropcap = imx074_cropcap,
+ .g_mbus_config = imx074_g_mbus_config,
};
static struct v4l2_subdev_core_ops imx074_subdev_core_ops = {
@@ -286,28 +299,7 @@ static struct v4l2_subdev_ops imx074_subdev_ops = {
.video = &imx074_subdev_video_ops,
};
-/*
- * We have to provide soc-camera operations, but we don't have anything to say
- * there. The MIPI CSI2 driver will provide .query_bus_param and .set_bus_param
- */
-static unsigned long imx074_query_bus_param(struct soc_camera_device *icd)
-{
- return 0;
-}
-
-static int imx074_set_bus_param(struct soc_camera_device *icd,
- unsigned long flags)
-{
- return -EINVAL;
-}
-
-static struct soc_camera_ops imx074_ops = {
- .query_bus_param = imx074_query_bus_param,
- .set_bus_param = imx074_set_bus_param,
-};
-
-static int imx074_video_probe(struct soc_camera_device *icd,
- struct i2c_client *client)
+static int imx074_video_probe(struct i2c_client *client)
{
int ret;
u16 id;
@@ -417,17 +409,10 @@ static int imx074_probe(struct i2c_client *client,
const struct i2c_device_id *did)
{
struct imx074 *priv;
- struct soc_camera_device *icd = client->dev.platform_data;
struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
- struct soc_camera_link *icl;
+ struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
int ret;
- if (!icd) {
- dev_err(&client->dev, "IMX074: missing soc-camera data!\n");
- return -EINVAL;
- }
-
- icl = to_soc_camera_link(icd);
if (!icl) {
dev_err(&client->dev, "IMX074: missing platform data!\n");
return -EINVAL;
@@ -445,12 +430,10 @@ static int imx074_probe(struct i2c_client *client,
v4l2_i2c_subdev_init(&priv->subdev, client, &imx074_subdev_ops);
- icd->ops = &imx074_ops;
priv->fmt = &imx074_colour_fmts[0];
- ret = imx074_video_probe(icd, client);
+ ret = imx074_video_probe(client);
if (ret < 0) {
- icd->ops = NULL;
kfree(priv);
return ret;
}
@@ -461,10 +444,8 @@ static int imx074_probe(struct i2c_client *client,
static int imx074_remove(struct i2c_client *client)
{
struct imx074 *priv = to_imx074(client);
- struct soc_camera_device *icd = client->dev.platform_data;
- struct soc_camera_link *icl = to_soc_camera_link(icd);
+ struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
- icd->ops = NULL;
if (icl->free_bus)
icl->free_bus(icl);
kfree(priv);
diff --git a/drivers/media/video/ivtv/Makefile b/drivers/media/video/ivtv/Makefile
index 26ce0d6eaee..71ab76a5ab2 100644
--- a/drivers/media/video/ivtv/Makefile
+++ b/drivers/media/video/ivtv/Makefile
@@ -7,8 +7,8 @@ ivtv-objs := ivtv-routing.o ivtv-cards.o ivtv-controls.o \
obj-$(CONFIG_VIDEO_IVTV) += ivtv.o
obj-$(CONFIG_VIDEO_FB_IVTV) += ivtvfb.o
-EXTRA_CFLAGS += -Idrivers/media/video
-EXTRA_CFLAGS += -Idrivers/media/common/tuners
-EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core
-EXTRA_CFLAGS += -Idrivers/media/dvb/frontends
+ccflags-y += -Idrivers/media/video
+ccflags-y += -Idrivers/media/common/tuners
+ccflags-y += -Idrivers/media/dvb/dvb-core
+ccflags-y += -Idrivers/media/dvb/frontends
diff --git a/drivers/media/video/ivtv/ivtv-driver.c b/drivers/media/video/ivtv/ivtv-driver.c
index 0fb75524484..41108a9a195 100644
--- a/drivers/media/video/ivtv/ivtv-driver.c
+++ b/drivers/media/video/ivtv/ivtv-driver.c
@@ -1180,6 +1180,8 @@ static int __devinit ivtv_probe(struct pci_dev *pdev,
setup.addr = ADDR_UNSET;
setup.type = itv->options.tuner;
setup.mode_mask = T_ANALOG_TV; /* matches TV tuners */
+ if (itv->options.radio > 0)
+ setup.mode_mask |= T_RADIO;
setup.tuner_callback = (setup.type == TUNER_XC2028) ?
ivtv_reset_tuner_gpio : NULL;
ivtv_call_all(itv, tuner, s_type_addr, &setup);
diff --git a/drivers/media/video/ivtv/ivtv-ioctl.c b/drivers/media/video/ivtv/ivtv-ioctl.c
index 3e5c090af11..ecafa697326 100644
--- a/drivers/media/video/ivtv/ivtv-ioctl.c
+++ b/drivers/media/video/ivtv/ivtv-ioctl.c
@@ -1203,9 +1203,7 @@ static int ivtv_g_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_sliced
cap->service_lines[f][l] = set;
}
}
- return 0;
- }
- if (cap->type == V4L2_BUF_TYPE_SLICED_VBI_OUTPUT) {
+ } else if (cap->type == V4L2_BUF_TYPE_SLICED_VBI_OUTPUT) {
if (!(itv->v4l2_cap & V4L2_CAP_SLICED_VBI_OUTPUT))
return -EINVAL;
if (itv->is_60hz) {
@@ -1215,9 +1213,16 @@ static int ivtv_g_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_sliced
cap->service_lines[0][23] = V4L2_SLICED_WSS_625;
cap->service_lines[0][16] = V4L2_SLICED_VPS;
}
- return 0;
+ } else {
+ return -EINVAL;
}
- return -EINVAL;
+
+ set = 0;
+ for (f = 0; f < 2; f++)
+ for (l = 0; l < 24; l++)
+ set |= cap->service_lines[f][l];
+ cap->service_set = set;
+ return 0;
}
static int ivtv_g_enc_index(struct file *file, void *fh, struct v4l2_enc_idx *idx)
diff --git a/drivers/media/video/m5mols/m5mols_core.c b/drivers/media/video/m5mols/m5mols_core.c
index fb8e4a7a9dd..05ab3700647 100644
--- a/drivers/media/video/m5mols/m5mols_core.c
+++ b/drivers/media/video/m5mols/m5mols_core.c
@@ -21,6 +21,7 @@
#include <linux/gpio.h>
#include <linux/regulator/consumer.h>
#include <linux/videodev2.h>
+#include <linux/module.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-subdev.h>
@@ -936,7 +937,7 @@ static int __devinit m5mols_probe(struct i2c_client *client,
return -EINVAL;
}
- if (!pdata->irq) {
+ if (!client->irq) {
dev_err(&client->dev, "Interrupt not assigned\n");
return -EINVAL;
}
@@ -973,7 +974,7 @@ static int __devinit m5mols_probe(struct i2c_client *client,
init_waitqueue_head(&info->irq_waitq);
INIT_WORK(&info->work_irq, m5mols_irq_work);
- ret = request_irq(pdata->irq, m5mols_irq_handler,
+ ret = request_irq(client->irq, m5mols_irq_handler,
IRQF_TRIGGER_RISING, MODULE_NAME, sd);
if (ret) {
dev_err(&client->dev, "Interrupt request failed: %d\n", ret);
@@ -998,7 +999,7 @@ static int __devexit m5mols_remove(struct i2c_client *client)
struct m5mols_info *info = to_m5mols(sd);
v4l2_device_unregister_subdev(sd);
- free_irq(info->pdata->irq, sd);
+ free_irq(client->irq, sd);
regulator_bulk_free(ARRAY_SIZE(supplies), supplies);
gpio_free(info->pdata->gpio_reset);
diff --git a/drivers/media/video/marvell-ccic/mcam-core.c b/drivers/media/video/marvell-ccic/mcam-core.c
index 83c14514cd5..80ec64d2d6d 100644
--- a/drivers/media/video/marvell-ccic/mcam-core.c
+++ b/drivers/media/video/marvell-ccic/mcam-core.c
@@ -450,7 +450,7 @@ static void mcam_set_contig_buffer(struct mcam_camera *cam, int frame)
buf = cam->vb_bufs[frame ^ 0x1];
cam->vb_bufs[frame] = buf;
mcam_reg_write(cam, frame == 0 ? REG_Y0BAR : REG_Y1BAR,
- vb2_dma_contig_plane_paddr(&buf->vb_buf, 0));
+ vb2_dma_contig_plane_dma_addr(&buf->vb_buf, 0));
set_bit(CF_SINGLE_BUFFER, &cam->flags);
singles++;
return;
@@ -461,7 +461,7 @@ static void mcam_set_contig_buffer(struct mcam_camera *cam, int frame)
buf = list_first_entry(&cam->buffers, struct mcam_vb_buffer, queue);
list_del_init(&buf->queue);
mcam_reg_write(cam, frame == 0 ? REG_Y0BAR : REG_Y1BAR,
- vb2_dma_contig_plane_paddr(&buf->vb_buf, 0));
+ vb2_dma_contig_plane_dma_addr(&buf->vb_buf, 0));
cam->vb_bufs[frame] = buf;
clear_bit(CF_SINGLE_BUFFER, &cam->flags);
}
@@ -883,8 +883,9 @@ static int mcam_read_setup(struct mcam_camera *cam)
* Videobuf2 interface code.
*/
-static int mcam_vb_queue_setup(struct vb2_queue *vq, unsigned int *nbufs,
- unsigned int *num_planes, unsigned long sizes[],
+static int mcam_vb_queue_setup(struct vb2_queue *vq,
+ const struct v4l2_format *fmt, unsigned int *nbufs,
+ unsigned int *num_planes, unsigned int sizes[],
void *alloc_ctxs[])
{
struct mcam_camera *cam = vb2_get_drv_priv(vq);
@@ -940,12 +941,14 @@ static void mcam_vb_wait_finish(struct vb2_queue *vq)
/*
* These need to be called with the mutex held from vb2
*/
-static int mcam_vb_start_streaming(struct vb2_queue *vq)
+static int mcam_vb_start_streaming(struct vb2_queue *vq, unsigned int count)
{
struct mcam_camera *cam = vb2_get_drv_priv(vq);
- if (cam->state != S_IDLE)
+ if (cam->state != S_IDLE) {
+ INIT_LIST_HEAD(&cam->buffers);
return -EINVAL;
+ }
cam->sequence = 0;
/*
* Videobuf2 sneakily hoards all the buffers and won't
diff --git a/drivers/media/video/marvell-ccic/mmp-driver.c b/drivers/media/video/marvell-ccic/mmp-driver.c
index d6b76454137..fb0b124b35f 100644
--- a/drivers/media/video/marvell-ccic/mmp-driver.c
+++ b/drivers/media/video/marvell-ccic/mmp-driver.c
@@ -29,6 +29,7 @@
#include "mcam-core.h"
+MODULE_ALIAS("platform:mmp-camera");
MODULE_AUTHOR("Jonathan Corbet <corbet@lwn.net>");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/video/mem2mem_testdev.c b/drivers/media/video/mem2mem_testdev.c
index 166bf9349c1..12897e8a331 100644
--- a/drivers/media/video/mem2mem_testdev.c
+++ b/drivers/media/video/mem2mem_testdev.c
@@ -738,9 +738,10 @@ static const struct v4l2_ioctl_ops m2mtest_ioctl_ops = {
* Queue operations
*/
-static int m2mtest_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
- unsigned int *nplanes, unsigned long sizes[],
- void *alloc_ctxs[])
+static int m2mtest_queue_setup(struct vb2_queue *vq,
+ const struct v4l2_format *fmt,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], void *alloc_ctxs[])
{
struct m2mtest_ctx *ctx = vb2_get_drv_priv(vq);
struct m2mtest_q_data *q_data;
@@ -793,10 +794,24 @@ static void m2mtest_buf_queue(struct vb2_buffer *vb)
v4l2_m2m_buf_queue(ctx->m2m_ctx, vb);
}
+static void m2mtest_wait_prepare(struct vb2_queue *q)
+{
+ struct m2mtest_ctx *ctx = vb2_get_drv_priv(q);
+ m2mtest_unlock(ctx);
+}
+
+static void m2mtest_wait_finish(struct vb2_queue *q)
+{
+ struct m2mtest_ctx *ctx = vb2_get_drv_priv(q);
+ m2mtest_lock(ctx);
+}
+
static struct vb2_ops m2mtest_qops = {
.queue_setup = m2mtest_queue_setup,
.buf_prepare = m2mtest_buf_prepare,
.buf_queue = m2mtest_buf_queue,
+ .wait_prepare = m2mtest_wait_prepare,
+ .wait_finish = m2mtest_wait_finish,
};
static int queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq)
diff --git a/drivers/media/video/msp3400-driver.c b/drivers/media/video/msp3400-driver.c
index c43c81f5f97..d0f53885728 100644
--- a/drivers/media/video/msp3400-driver.c
+++ b/drivers/media/video/msp3400-driver.c
@@ -426,6 +426,20 @@ static int msp_s_frequency(struct v4l2_subdev *sd, struct v4l2_frequency *freq)
return 0;
}
+static int msp_querystd(struct v4l2_subdev *sd, v4l2_std_id *id)
+{
+ struct msp_state *state = to_state(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ *id &= state->detected_std;
+
+ v4l_dbg(2, msp_debug, client,
+ "detected standard: %s(0x%08Lx)\n",
+ msp_standard_std_name(state->std), state->detected_std);
+
+ return 0;
+}
+
static int msp_s_std(struct v4l2_subdev *sd, v4l2_std_id id)
{
struct msp_state *state = to_state(sd);
@@ -616,6 +630,10 @@ static const struct v4l2_subdev_core_ops msp_core_ops = {
.s_std = msp_s_std,
};
+static const struct v4l2_subdev_video_ops msp_video_ops = {
+ .querystd = msp_querystd,
+};
+
static const struct v4l2_subdev_tuner_ops msp_tuner_ops = {
.s_frequency = msp_s_frequency,
.g_tuner = msp_g_tuner,
@@ -630,6 +648,7 @@ static const struct v4l2_subdev_audio_ops msp_audio_ops = {
static const struct v4l2_subdev_ops msp_ops = {
.core = &msp_core_ops,
+ .video = &msp_video_ops,
.tuner = &msp_tuner_ops,
.audio = &msp_audio_ops,
};
@@ -664,6 +683,7 @@ static int msp_probe(struct i2c_client *client, const struct i2c_device_id *id)
v4l2_i2c_subdev_init(sd, client, &msp_ops);
state->v4l2_std = V4L2_STD_NTSC;
+ state->detected_std = V4L2_STD_ALL;
state->audmode = V4L2_TUNER_MODE_STEREO;
state->input = -1;
state->i2s_mode = 0;
diff --git a/drivers/media/video/msp3400-driver.h b/drivers/media/video/msp3400-driver.h
index 32a478e532f..831e8db4368 100644
--- a/drivers/media/video/msp3400-driver.h
+++ b/drivers/media/video/msp3400-driver.h
@@ -75,7 +75,7 @@ struct msp_state {
int opmode;
int std;
int mode;
- v4l2_std_id v4l2_std;
+ v4l2_std_id v4l2_std, detected_std;
int nicam_on;
int acb;
int in_scart;
diff --git a/drivers/media/video/msp3400-kthreads.c b/drivers/media/video/msp3400-kthreads.c
index 80387e2c3ec..f8b51714f2f 100644
--- a/drivers/media/video/msp3400-kthreads.c
+++ b/drivers/media/video/msp3400-kthreads.c
@@ -37,29 +37,49 @@ static struct {
int retval;
int main, second;
char *name;
+ v4l2_std_id std;
} msp_stdlist[] = {
- { 0x0000, 0, 0, "could not detect sound standard" },
- { 0x0001, 0, 0, "autodetect start" },
- { 0x0002, MSP_CARRIER(4.5), MSP_CARRIER(4.72), "4.5/4.72 M Dual FM-Stereo" },
- { 0x0003, MSP_CARRIER(5.5), MSP_CARRIER(5.7421875), "5.5/5.74 B/G Dual FM-Stereo" },
- { 0x0004, MSP_CARRIER(6.5), MSP_CARRIER(6.2578125), "6.5/6.25 D/K1 Dual FM-Stereo" },
- { 0x0005, MSP_CARRIER(6.5), MSP_CARRIER(6.7421875), "6.5/6.74 D/K2 Dual FM-Stereo" },
- { 0x0006, MSP_CARRIER(6.5), MSP_CARRIER(6.5), "6.5 D/K FM-Mono (HDEV3)" },
- { 0x0007, MSP_CARRIER(6.5), MSP_CARRIER(5.7421875), "6.5/5.74 D/K3 Dual FM-Stereo" },
- { 0x0008, MSP_CARRIER(5.5), MSP_CARRIER(5.85), "5.5/5.85 B/G NICAM FM" },
- { 0x0009, MSP_CARRIER(6.5), MSP_CARRIER(5.85), "6.5/5.85 L NICAM AM" },
- { 0x000a, MSP_CARRIER(6.0), MSP_CARRIER(6.55), "6.0/6.55 I NICAM FM" },
- { 0x000b, MSP_CARRIER(6.5), MSP_CARRIER(5.85), "6.5/5.85 D/K NICAM FM" },
- { 0x000c, MSP_CARRIER(6.5), MSP_CARRIER(5.85), "6.5/5.85 D/K NICAM FM (HDEV2)" },
- { 0x000d, MSP_CARRIER(6.5), MSP_CARRIER(5.85), "6.5/5.85 D/K NICAM FM (HDEV3)" },
- { 0x0020, MSP_CARRIER(4.5), MSP_CARRIER(4.5), "4.5 M BTSC-Stereo" },
- { 0x0021, MSP_CARRIER(4.5), MSP_CARRIER(4.5), "4.5 M BTSC-Mono + SAP" },
- { 0x0030, MSP_CARRIER(4.5), MSP_CARRIER(4.5), "4.5 M EIA-J Japan Stereo" },
- { 0x0040, MSP_CARRIER(10.7), MSP_CARRIER(10.7), "10.7 FM-Stereo Radio" },
- { 0x0050, MSP_CARRIER(6.5), MSP_CARRIER(6.5), "6.5 SAT-Mono" },
- { 0x0051, MSP_CARRIER(7.02), MSP_CARRIER(7.20), "7.02/7.20 SAT-Stereo" },
- { 0x0060, MSP_CARRIER(7.2), MSP_CARRIER(7.2), "7.2 SAT ADR" },
- { -1, 0, 0, NULL }, /* EOF */
+ { 0x0000, 0, 0, "could not detect sound standard", V4L2_STD_ALL },
+ { 0x0001, 0, 0, "autodetect start", V4L2_STD_ALL },
+ { 0x0002, MSP_CARRIER(4.5), MSP_CARRIER(4.72),
+ "4.5/4.72 M Dual FM-Stereo", V4L2_STD_MN },
+ { 0x0003, MSP_CARRIER(5.5), MSP_CARRIER(5.7421875),
+ "5.5/5.74 B/G Dual FM-Stereo", V4L2_STD_BG },
+ { 0x0004, MSP_CARRIER(6.5), MSP_CARRIER(6.2578125),
+ "6.5/6.25 D/K1 Dual FM-Stereo", V4L2_STD_DK },
+ { 0x0005, MSP_CARRIER(6.5), MSP_CARRIER(6.7421875),
+ "6.5/6.74 D/K2 Dual FM-Stereo", V4L2_STD_DK },
+ { 0x0006, MSP_CARRIER(6.5), MSP_CARRIER(6.5),
+ "6.5 D/K FM-Mono (HDEV3)", V4L2_STD_DK },
+ { 0x0007, MSP_CARRIER(6.5), MSP_CARRIER(5.7421875),
+ "6.5/5.74 D/K3 Dual FM-Stereo", V4L2_STD_DK },
+ { 0x0008, MSP_CARRIER(5.5), MSP_CARRIER(5.85),
+ "5.5/5.85 B/G NICAM FM", V4L2_STD_BG },
+ { 0x0009, MSP_CARRIER(6.5), MSP_CARRIER(5.85),
+ "6.5/5.85 L NICAM AM", V4L2_STD_L },
+ { 0x000a, MSP_CARRIER(6.0), MSP_CARRIER(6.55),
+ "6.0/6.55 I NICAM FM", V4L2_STD_PAL_I },
+ { 0x000b, MSP_CARRIER(6.5), MSP_CARRIER(5.85),
+ "6.5/5.85 D/K NICAM FM", V4L2_STD_DK },
+ { 0x000c, MSP_CARRIER(6.5), MSP_CARRIER(5.85),
+ "6.5/5.85 D/K NICAM FM (HDEV2)", V4L2_STD_DK },
+ { 0x000d, MSP_CARRIER(6.5), MSP_CARRIER(5.85),
+ "6.5/5.85 D/K NICAM FM (HDEV3)", V4L2_STD_DK },
+ { 0x0020, MSP_CARRIER(4.5), MSP_CARRIER(4.5),
+ "4.5 M BTSC-Stereo", V4L2_STD_MTS },
+ { 0x0021, MSP_CARRIER(4.5), MSP_CARRIER(4.5),
+ "4.5 M BTSC-Mono + SAP", V4L2_STD_MTS },
+ { 0x0030, MSP_CARRIER(4.5), MSP_CARRIER(4.5),
+ "4.5 M EIA-J Japan Stereo", V4L2_STD_NTSC_M_JP },
+ { 0x0040, MSP_CARRIER(10.7), MSP_CARRIER(10.7),
+ "10.7 FM-Stereo Radio", V4L2_STD_ALL },
+ { 0x0050, MSP_CARRIER(6.5), MSP_CARRIER(6.5),
+ "6.5 SAT-Mono", V4L2_STD_ALL },
+ { 0x0051, MSP_CARRIER(7.02), MSP_CARRIER(7.20),
+ "7.02/7.20 SAT-Stereo", V4L2_STD_ALL },
+ { 0x0060, MSP_CARRIER(7.2), MSP_CARRIER(7.2),
+ "7.2 SAT ADR", V4L2_STD_ALL },
+ { -1, 0, 0, NULL, 0 }, /* EOF */
};
static struct msp3400c_init_data_dem {
@@ -156,6 +176,16 @@ const char *msp_standard_std_name(int std)
return "unknown";
}
+static v4l2_std_id msp_standard_std(int std)
+{
+ int i;
+
+ for (i = 0; msp_stdlist[i].name != NULL; i++)
+ if (msp_stdlist[i].retval == std)
+ return msp_stdlist[i].std;
+ return V4L2_STD_ALL;
+}
+
static void msp_set_source(struct i2c_client *client, u16 src)
{
struct msp_state *state = to_state(i2c_get_clientdata(client));
@@ -479,6 +509,7 @@ int msp3400c_thread(void *data)
int count, max1, max2, val1, val2, val, i;
v4l_dbg(1, msp_debug, client, "msp3400 daemon started\n");
+ state->detected_std = V4L2_STD_ALL;
set_freezable();
for (;;) {
v4l_dbg(2, msp_debug, client, "msp3400 thread: sleep\n");
@@ -579,6 +610,7 @@ restart:
state->main = msp3400c_carrier_detect_main[max1].cdo;
switch (max1) {
case 1: /* 5.5 */
+ state->detected_std = V4L2_STD_BG | V4L2_STD_PAL_H;
if (max2 == 0) {
/* B/G FM-stereo */
state->second = msp3400c_carrier_detect_55[max2].cdo;
@@ -596,6 +628,7 @@ restart:
break;
case 2: /* 6.0 */
/* PAL I NICAM */
+ state->detected_std = V4L2_STD_PAL_I;
state->second = MSP_CARRIER(6.552);
msp3400c_set_mode(client, MSP_MODE_FM_NICAM2);
state->nicam_on = 1;
@@ -607,22 +640,26 @@ restart:
state->second = msp3400c_carrier_detect_65[max2].cdo;
msp3400c_set_mode(client, MSP_MODE_FM_TERRA);
state->watch_stereo = 1;
+ state->detected_std = V4L2_STD_DK;
} else if (max2 == 0 && (state->v4l2_std & V4L2_STD_SECAM)) {
/* L NICAM or AM-mono */
state->second = msp3400c_carrier_detect_65[max2].cdo;
msp3400c_set_mode(client, MSP_MODE_AM_NICAM);
state->watch_stereo = 1;
+ state->detected_std = V4L2_STD_L;
} else if (max2 == 0 && state->has_nicam) {
/* D/K NICAM */
state->second = msp3400c_carrier_detect_65[max2].cdo;
msp3400c_set_mode(client, MSP_MODE_FM_NICAM1);
state->nicam_on = 1;
state->watch_stereo = 1;
+ state->detected_std = V4L2_STD_DK;
} else {
goto no_second;
}
break;
case 0: /* 4.5 */
+ state->detected_std = V4L2_STD_MN;
default:
no_second:
state->second = msp3400c_carrier_detect_main[max1].cdo;
@@ -662,6 +699,7 @@ int msp3410d_thread(void *data)
int val, i, std, count;
v4l_dbg(1, msp_debug, client, "msp3410 daemon started\n");
+ state->detected_std = V4L2_STD_ALL;
set_freezable();
for (;;) {
v4l_dbg(2, msp_debug, client, "msp3410 thread: sleep\n");
@@ -743,6 +781,8 @@ restart:
msp_stdlist[8].name : "unknown", val);
state->std = val = 0x0009;
msp_write_dem(client, 0x20, val);
+ } else {
+ state->detected_std = msp_standard_std(state->std);
}
/* set stereo */
@@ -957,6 +997,7 @@ int msp34xxg_thread(void *data)
int val, i;
v4l_dbg(1, msp_debug, client, "msp34xxg daemon started\n");
+ state->detected_std = V4L2_STD_ALL;
set_freezable();
for (;;) {
v4l_dbg(2, msp_debug, client, "msp34xxg thread: sleep\n");
@@ -1013,6 +1054,7 @@ unmute:
v4l_dbg(1, msp_debug, client,
"detected standard: %s (0x%04x)\n",
msp_standard_std_name(state->std), state->std);
+ state->detected_std = msp_standard_std(state->std);
if (state->std == 9) {
/* AM NICAM mode */
diff --git a/drivers/media/video/mt9m001.c b/drivers/media/video/mt9m001.c
index 4da9cca939c..e2b1029b16c 100644
--- a/drivers/media/video/mt9m001.c
+++ b/drivers/media/video/mt9m001.c
@@ -12,10 +12,13 @@
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/log2.h>
+#include <linux/module.h>
+#include <media/soc_camera.h>
+#include <media/soc_mediabus.h>
#include <media/v4l2-subdev.h>
#include <media/v4l2-chip-ident.h>
-#include <media/soc_camera.h>
+#include <media/v4l2-ctrls.h>
/*
* mt9m001 i2c address 0x5d
@@ -84,15 +87,19 @@ static const struct mt9m001_datafmt mt9m001_monochrome_fmts[] = {
struct mt9m001 {
struct v4l2_subdev subdev;
+ struct v4l2_ctrl_handler hdl;
+ struct {
+ /* exposure/auto-exposure cluster */
+ struct v4l2_ctrl *autoexposure;
+ struct v4l2_ctrl *exposure;
+ };
struct v4l2_rect rect; /* Sensor window */
const struct mt9m001_datafmt *fmt;
const struct mt9m001_datafmt *fmts;
int num_fmts;
int model; /* V4L2_IDENT_MT9M001* codes from v4l2-chip-ident.h */
- unsigned int gain;
- unsigned int exposure;
+ unsigned int total_h;
unsigned short y_skip_top; /* Lines to skip at the top */
- unsigned char autoexposure;
};
static struct mt9m001 *to_mt9m001(const struct i2c_client *client)
@@ -165,54 +172,13 @@ static int mt9m001_s_stream(struct v4l2_subdev *sd, int enable)
return 0;
}
-static int mt9m001_set_bus_param(struct soc_camera_device *icd,
- unsigned long flags)
-{
- struct soc_camera_link *icl = to_soc_camera_link(icd);
- unsigned long width_flag = flags & SOCAM_DATAWIDTH_MASK;
-
- /* Only one width bit may be set */
- if (!is_power_of_2(width_flag))
- return -EINVAL;
-
- if (icl->set_bus_param)
- return icl->set_bus_param(icl, width_flag);
-
- /*
- * Without board specific bus width settings we only support the
- * sensors native bus width
- */
- if (width_flag == SOCAM_DATAWIDTH_10)
- return 0;
-
- return -EINVAL;
-}
-
-static unsigned long mt9m001_query_bus_param(struct soc_camera_device *icd)
-{
- struct soc_camera_link *icl = to_soc_camera_link(icd);
- /* MT9M001 has all capture_format parameters fixed */
- unsigned long flags = SOCAM_PCLK_SAMPLE_FALLING |
- SOCAM_HSYNC_ACTIVE_HIGH | SOCAM_VSYNC_ACTIVE_HIGH |
- SOCAM_DATA_ACTIVE_HIGH | SOCAM_MASTER;
-
- if (icl->query_bus_param)
- flags |= icl->query_bus_param(icl) & SOCAM_DATAWIDTH_MASK;
- else
- flags |= SOCAM_DATAWIDTH_10;
-
- return soc_camera_apply_sensor_flags(icl, flags);
-}
-
static int mt9m001_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9m001 *mt9m001 = to_mt9m001(client);
struct v4l2_rect rect = a->c;
- struct soc_camera_device *icd = client->dev.platform_data;
int ret;
const u16 hblank = 9, vblank = 25;
- unsigned int total_h;
if (mt9m001->fmts == mt9m001_colour_fmts)
/*
@@ -231,7 +197,7 @@ static int mt9m001_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
soc_camera_limit_side(&rect.top, &rect.height,
MT9M001_ROW_SKIP, MT9M001_MIN_HEIGHT, MT9M001_MAX_HEIGHT);
- total_h = rect.height + mt9m001->y_skip_top + vblank;
+ mt9m001->total_h = rect.height + mt9m001->y_skip_top + vblank;
/* Blanking and start values - default... */
ret = reg_write(client, MT9M001_HORIZONTAL_BLANKING, hblank);
@@ -240,7 +206,7 @@ static int mt9m001_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
/*
* The caller provides a supported format, as verified per
- * call to icd->try_fmt()
+ * call to .try_mbus_fmt()
*/
if (!ret)
ret = reg_write(client, MT9M001_COLUMN_START, rect.left);
@@ -251,17 +217,8 @@ static int mt9m001_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
if (!ret)
ret = reg_write(client, MT9M001_WINDOW_HEIGHT,
rect.height + mt9m001->y_skip_top - 1);
- if (!ret && mt9m001->autoexposure) {
- ret = reg_write(client, MT9M001_SHUTTER_WIDTH, total_h);
- if (!ret) {
- const struct v4l2_queryctrl *qctrl =
- soc_camera_find_qctrl(icd->ops,
- V4L2_CID_EXPOSURE);
- mt9m001->exposure = (524 + (total_h - 1) *
- (qctrl->maximum - qctrl->minimum)) /
- 1048 + qctrl->minimum;
- }
- }
+ if (!ret && v4l2_ctrl_g_ctrl(mt9m001->autoexposure) == V4L2_EXPOSURE_AUTO)
+ ret = reg_write(client, MT9M001_SHUTTER_WIDTH, mt9m001->total_h);
if (!ret)
mt9m001->rect = rect;
@@ -421,107 +378,48 @@ static int mt9m001_s_register(struct v4l2_subdev *sd,
}
#endif
-static const struct v4l2_queryctrl mt9m001_controls[] = {
- {
- .id = V4L2_CID_VFLIP,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Flip Vertically",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 0,
- }, {
- .id = V4L2_CID_GAIN,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Gain",
- .minimum = 0,
- .maximum = 127,
- .step = 1,
- .default_value = 64,
- .flags = V4L2_CTRL_FLAG_SLIDER,
- }, {
- .id = V4L2_CID_EXPOSURE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Exposure",
- .minimum = 1,
- .maximum = 255,
- .step = 1,
- .default_value = 255,
- .flags = V4L2_CTRL_FLAG_SLIDER,
- }, {
- .id = V4L2_CID_EXPOSURE_AUTO,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Automatic Exposure",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 1,
- }
-};
-
-static struct soc_camera_ops mt9m001_ops = {
- .set_bus_param = mt9m001_set_bus_param,
- .query_bus_param = mt9m001_query_bus_param,
- .controls = mt9m001_controls,
- .num_controls = ARRAY_SIZE(mt9m001_controls),
-};
-
-static int mt9m001_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
+static int mt9m001_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct mt9m001 *mt9m001 = to_mt9m001(client);
- int data;
+ struct mt9m001 *mt9m001 = container_of(ctrl->handler,
+ struct mt9m001, hdl);
+ s32 min, max;
switch (ctrl->id) {
- case V4L2_CID_VFLIP:
- data = reg_read(client, MT9M001_READ_OPTIONS2);
- if (data < 0)
- return -EIO;
- ctrl->value = !!(data & 0x8000);
- break;
case V4L2_CID_EXPOSURE_AUTO:
- ctrl->value = mt9m001->autoexposure;
- break;
- case V4L2_CID_GAIN:
- ctrl->value = mt9m001->gain;
- break;
- case V4L2_CID_EXPOSURE:
- ctrl->value = mt9m001->exposure;
+ min = mt9m001->exposure->minimum;
+ max = mt9m001->exposure->maximum;
+ mt9m001->exposure->val =
+ (524 + (mt9m001->total_h - 1) * (max - min)) / 1048 + min;
break;
}
return 0;
}
-static int mt9m001_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
+static int mt9m001_s_ctrl(struct v4l2_ctrl *ctrl)
{
+ struct mt9m001 *mt9m001 = container_of(ctrl->handler,
+ struct mt9m001, hdl);
+ struct v4l2_subdev *sd = &mt9m001->subdev;
struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct mt9m001 *mt9m001 = to_mt9m001(client);
- struct soc_camera_device *icd = client->dev.platform_data;
- const struct v4l2_queryctrl *qctrl;
+ struct v4l2_ctrl *exp = mt9m001->exposure;
int data;
- qctrl = soc_camera_find_qctrl(&mt9m001_ops, ctrl->id);
-
- if (!qctrl)
- return -EINVAL;
-
switch (ctrl->id) {
case V4L2_CID_VFLIP:
- if (ctrl->value)
+ if (ctrl->val)
data = reg_set(client, MT9M001_READ_OPTIONS2, 0x8000);
else
data = reg_clear(client, MT9M001_READ_OPTIONS2, 0x8000);
if (data < 0)
return -EIO;
- break;
+ return 0;
+
case V4L2_CID_GAIN:
- if (ctrl->value > qctrl->maximum || ctrl->value < qctrl->minimum)
- return -EINVAL;
/* See Datasheet Table 7, Gain settings. */
- if (ctrl->value <= qctrl->default_value) {
+ if (ctrl->val <= ctrl->default_value) {
/* Pack it into 0..1 step 0.125, register values 0..8 */
- unsigned long range = qctrl->default_value - qctrl->minimum;
- data = ((ctrl->value - qctrl->minimum) * 8 + range / 2) / range;
+ unsigned long range = ctrl->default_value - ctrl->minimum;
+ data = ((ctrl->val - ctrl->minimum) * 8 + range / 2) / range;
dev_dbg(&client->dev, "Setting gain %d\n", data);
data = reg_write(client, MT9M001_GLOBAL_GAIN, data);
@@ -530,8 +428,8 @@ static int mt9m001_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
} else {
/* Pack it into 1.125..15 variable step, register values 9..67 */
/* We assume qctrl->maximum - qctrl->default_value - 1 > 0 */
- unsigned long range = qctrl->maximum - qctrl->default_value - 1;
- unsigned long gain = ((ctrl->value - qctrl->default_value - 1) *
+ unsigned long range = ctrl->maximum - ctrl->default_value - 1;
+ unsigned long gain = ((ctrl->val - ctrl->default_value - 1) *
111 + range / 2) / range + 9;
if (gain <= 32)
@@ -547,66 +445,44 @@ static int mt9m001_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
if (data < 0)
return -EIO;
}
+ return 0;
- /* Success */
- mt9m001->gain = ctrl->value;
- break;
- case V4L2_CID_EXPOSURE:
- /* mt9m001 has maximum == default */
- if (ctrl->value > qctrl->maximum || ctrl->value < qctrl->minimum)
- return -EINVAL;
- else {
- unsigned long range = qctrl->maximum - qctrl->minimum;
- unsigned long shutter = ((ctrl->value - qctrl->minimum) * 1048 +
+ case V4L2_CID_EXPOSURE_AUTO:
+ if (ctrl->val == V4L2_EXPOSURE_MANUAL) {
+ unsigned long range = exp->maximum - exp->minimum;
+ unsigned long shutter = ((exp->val - exp->minimum) * 1048 +
range / 2) / range + 1;
dev_dbg(&client->dev,
"Setting shutter width from %d to %lu\n",
- reg_read(client, MT9M001_SHUTTER_WIDTH),
- shutter);
+ reg_read(client, MT9M001_SHUTTER_WIDTH), shutter);
if (reg_write(client, MT9M001_SHUTTER_WIDTH, shutter) < 0)
return -EIO;
- mt9m001->exposure = ctrl->value;
- mt9m001->autoexposure = 0;
- }
- break;
- case V4L2_CID_EXPOSURE_AUTO:
- if (ctrl->value) {
+ } else {
const u16 vblank = 25;
- unsigned int total_h = mt9m001->rect.height +
+
+ mt9m001->total_h = mt9m001->rect.height +
mt9m001->y_skip_top + vblank;
- if (reg_write(client, MT9M001_SHUTTER_WIDTH,
- total_h) < 0)
+ if (reg_write(client, MT9M001_SHUTTER_WIDTH, mt9m001->total_h) < 0)
return -EIO;
- qctrl = soc_camera_find_qctrl(icd->ops, V4L2_CID_EXPOSURE);
- mt9m001->exposure = (524 + (total_h - 1) *
- (qctrl->maximum - qctrl->minimum)) /
- 1048 + qctrl->minimum;
- mt9m001->autoexposure = 1;
- } else
- mt9m001->autoexposure = 0;
- break;
+ }
+ return 0;
}
- return 0;
+ return -EINVAL;
}
/*
* Interface active, can use i2c. If it fails, it can indeed mean, that
* this wasn't our capture interface, so, we wait for the right one
*/
-static int mt9m001_video_probe(struct soc_camera_device *icd,
+static int mt9m001_video_probe(struct soc_camera_link *icl,
struct i2c_client *client)
{
struct mt9m001 *mt9m001 = to_mt9m001(client);
- struct soc_camera_link *icl = to_soc_camera_link(icd);
s32 data;
unsigned long flags;
int ret;
- /* We must have a parent by now. And it cannot be a wrong one. */
- BUG_ON(!icd->parent ||
- to_soc_camera_host(icd->parent)->nr != icd->iface);
-
/* Enable the chip */
data = reg_write(client, MT9M001_CHIP_ENABLE, 1);
dev_dbg(&client->dev, "write: %d\n", data);
@@ -661,18 +537,11 @@ static int mt9m001_video_probe(struct soc_camera_device *icd,
dev_err(&client->dev, "Failed to initialise the camera\n");
/* mt9m001_init() has reset the chip, returning registers to defaults */
- mt9m001->gain = 64;
- mt9m001->exposure = 255;
-
- return ret;
+ return v4l2_ctrl_handler_setup(&mt9m001->hdl);
}
-static void mt9m001_video_remove(struct soc_camera_device *icd)
+static void mt9m001_video_remove(struct soc_camera_link *icl)
{
- struct soc_camera_link *icl = to_soc_camera_link(icd);
-
- dev_dbg(icd->pdev, "Video removed: %p, %p\n",
- icd->parent, icd->vdev);
if (icl->free_bus)
icl->free_bus(icl);
}
@@ -687,9 +556,12 @@ static int mt9m001_g_skip_top_lines(struct v4l2_subdev *sd, u32 *lines)
return 0;
}
+static const struct v4l2_ctrl_ops mt9m001_ctrl_ops = {
+ .g_volatile_ctrl = mt9m001_g_volatile_ctrl,
+ .s_ctrl = mt9m001_s_ctrl,
+};
+
static struct v4l2_subdev_core_ops mt9m001_subdev_core_ops = {
- .g_ctrl = mt9m001_g_ctrl,
- .s_ctrl = mt9m001_s_ctrl,
.g_chip_ident = mt9m001_g_chip_ident,
#ifdef CONFIG_VIDEO_ADV_DEBUG
.g_register = mt9m001_g_register,
@@ -710,6 +582,40 @@ static int mt9m001_enum_fmt(struct v4l2_subdev *sd, unsigned int index,
return 0;
}
+static int mt9m001_g_mbus_config(struct v4l2_subdev *sd,
+ struct v4l2_mbus_config *cfg)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+
+ /* MT9M001 has all capture_format parameters fixed */
+ cfg->flags = V4L2_MBUS_PCLK_SAMPLE_FALLING |
+ V4L2_MBUS_HSYNC_ACTIVE_HIGH | V4L2_MBUS_VSYNC_ACTIVE_HIGH |
+ V4L2_MBUS_DATA_ACTIVE_HIGH | V4L2_MBUS_MASTER;
+ cfg->type = V4L2_MBUS_PARALLEL;
+ cfg->flags = soc_camera_apply_board_flags(icl, cfg);
+
+ return 0;
+}
+
+static int mt9m001_s_mbus_config(struct v4l2_subdev *sd,
+ const struct v4l2_mbus_config *cfg)
+{
+ const struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct mt9m001 *mt9m001 = to_mt9m001(client);
+ unsigned int bps = soc_mbus_get_fmtdesc(mt9m001->fmt->code)->bits_per_sample;
+
+ if (icl->set_bus_param)
+ return icl->set_bus_param(icl, 1 << (bps - 1));
+
+ /*
+ * Without board specific bus width settings we only support the
+ * sensors native bus width
+ */
+ return bps == 10 ? 0 : -EINVAL;
+}
+
static struct v4l2_subdev_video_ops mt9m001_subdev_video_ops = {
.s_stream = mt9m001_s_stream,
.s_mbus_fmt = mt9m001_s_fmt,
@@ -719,6 +625,8 @@ static struct v4l2_subdev_video_ops mt9m001_subdev_video_ops = {
.g_crop = mt9m001_g_crop,
.cropcap = mt9m001_cropcap,
.enum_mbus_fmt = mt9m001_enum_fmt,
+ .g_mbus_config = mt9m001_g_mbus_config,
+ .s_mbus_config = mt9m001_s_mbus_config,
};
static struct v4l2_subdev_sensor_ops mt9m001_subdev_sensor_ops = {
@@ -735,17 +643,10 @@ static int mt9m001_probe(struct i2c_client *client,
const struct i2c_device_id *did)
{
struct mt9m001 *mt9m001;
- struct soc_camera_device *icd = client->dev.platform_data;
struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
- struct soc_camera_link *icl;
+ struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
int ret;
- if (!icd) {
- dev_err(&client->dev, "MT9M001: missing soc-camera data!\n");
- return -EINVAL;
- }
-
- icl = to_soc_camera_link(icd);
if (!icl) {
dev_err(&client->dev, "MT9M001 driver needs platform data\n");
return -EINVAL;
@@ -762,25 +663,40 @@ static int mt9m001_probe(struct i2c_client *client,
return -ENOMEM;
v4l2_i2c_subdev_init(&mt9m001->subdev, client, &mt9m001_subdev_ops);
+ v4l2_ctrl_handler_init(&mt9m001->hdl, 4);
+ v4l2_ctrl_new_std(&mt9m001->hdl, &mt9m001_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+ v4l2_ctrl_new_std(&mt9m001->hdl, &mt9m001_ctrl_ops,
+ V4L2_CID_GAIN, 0, 127, 1, 64);
+ mt9m001->exposure = v4l2_ctrl_new_std(&mt9m001->hdl, &mt9m001_ctrl_ops,
+ V4L2_CID_EXPOSURE, 1, 255, 1, 255);
+ /*
+ * Simulated autoexposure. If enabled, we calculate shutter width
+ * ourselves in the driver based on vertical blanking and frame width
+ */
+ mt9m001->autoexposure = v4l2_ctrl_new_std_menu(&mt9m001->hdl,
+ &mt9m001_ctrl_ops, V4L2_CID_EXPOSURE_AUTO, 1, 0,
+ V4L2_EXPOSURE_AUTO);
+ mt9m001->subdev.ctrl_handler = &mt9m001->hdl;
+ if (mt9m001->hdl.error) {
+ int err = mt9m001->hdl.error;
- /* Second stage probe - when a capture adapter is there */
- icd->ops = &mt9m001_ops;
+ kfree(mt9m001);
+ return err;
+ }
+ v4l2_ctrl_auto_cluster(2, &mt9m001->autoexposure,
+ V4L2_EXPOSURE_MANUAL, true);
+ /* Second stage probe - when a capture adapter is there */
mt9m001->y_skip_top = 0;
mt9m001->rect.left = MT9M001_COLUMN_SKIP;
mt9m001->rect.top = MT9M001_ROW_SKIP;
mt9m001->rect.width = MT9M001_MAX_WIDTH;
mt9m001->rect.height = MT9M001_MAX_HEIGHT;
- /*
- * Simulated autoexposure. If enabled, we calculate shutter width
- * ourselves in the driver based on vertical blanking and frame width
- */
- mt9m001->autoexposure = 1;
-
- ret = mt9m001_video_probe(icd, client);
+ ret = mt9m001_video_probe(icl, client);
if (ret) {
- icd->ops = NULL;
+ v4l2_ctrl_handler_free(&mt9m001->hdl);
kfree(mt9m001);
}
@@ -790,10 +706,11 @@ static int mt9m001_probe(struct i2c_client *client,
static int mt9m001_remove(struct i2c_client *client)
{
struct mt9m001 *mt9m001 = to_mt9m001(client);
- struct soc_camera_device *icd = client->dev.platform_data;
+ struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
- icd->ops = NULL;
- mt9m001_video_remove(icd);
+ v4l2_device_unregister_subdev(&mt9m001->subdev);
+ v4l2_ctrl_handler_free(&mt9m001->hdl);
+ mt9m001_video_remove(icl);
kfree(mt9m001);
return 0;
diff --git a/drivers/media/video/mt9m111.c b/drivers/media/video/mt9m111.c
index a357aa889fc..cf2c0fb95f2 100644
--- a/drivers/media/video/mt9m111.c
+++ b/drivers/media/video/mt9m111.c
@@ -13,10 +13,13 @@
#include <linux/log2.h>
#include <linux/gpio.h>
#include <linux/delay.h>
+#include <linux/v4l2-mediabus.h>
+#include <linux/module.h>
+#include <media/soc_camera.h>
#include <media/v4l2-common.h>
+#include <media/v4l2-ctrls.h>
#include <media/v4l2-chip-ident.h>
-#include <media/soc_camera.h>
/*
* MT9M111, MT9M112 and MT9M131:
@@ -177,6 +180,8 @@ enum mt9m111_context {
struct mt9m111 {
struct v4l2_subdev subdev;
+ struct v4l2_ctrl_handler hdl;
+ struct v4l2_ctrl *gain;
int model; /* V4L2_IDENT_MT9M111 or V4L2_IDENT_MT9M112 code
* from v4l2-chip-ident.h */
enum mt9m111_context context;
@@ -184,13 +189,9 @@ struct mt9m111 {
struct mutex power_lock; /* lock to protect power_count */
int power_count;
const struct mt9m111_datafmt *fmt;
- unsigned int gain;
- unsigned char autoexposure;
+ int lastpage; /* PageMap cache value */
unsigned char datawidth;
unsigned int powered:1;
- unsigned int hflip:1;
- unsigned int vflip:1;
- unsigned int autowhitebalance:1;
};
static struct mt9m111 *to_mt9m111(const struct i2c_client *client)
@@ -202,17 +203,17 @@ static int reg_page_map_set(struct i2c_client *client, const u16 reg)
{
int ret;
u16 page;
- static int lastpage = -1; /* PageMap cache value */
+ struct mt9m111 *mt9m111 = to_mt9m111(client);
page = (reg >> 8);
- if (page == lastpage)
+ if (page == mt9m111->lastpage)
return 0;
if (page > 2)
return -EINVAL;
ret = i2c_smbus_write_word_data(client, MT9M111_PAGE_MAP, swab16(page));
if (!ret)
- lastpage = page;
+ mt9m111->lastpage = page;
return ret;
}
@@ -362,21 +363,6 @@ static int mt9m111_reset(struct mt9m111 *mt9m111)
return ret;
}
-static unsigned long mt9m111_query_bus_param(struct soc_camera_device *icd)
-{
- struct soc_camera_link *icl = to_soc_camera_link(icd);
- unsigned long flags = SOCAM_MASTER | SOCAM_PCLK_SAMPLE_RISING |
- SOCAM_HSYNC_ACTIVE_HIGH | SOCAM_VSYNC_ACTIVE_HIGH |
- SOCAM_DATA_ACTIVE_HIGH | SOCAM_DATAWIDTH_8;
-
- return soc_camera_apply_sensor_flags(icl, flags);
-}
-
-static int mt9m111_set_bus_param(struct soc_camera_device *icd, unsigned long f)
-{
- return 0;
-}
-
static int mt9m111_make_rect(struct mt9m111 *mt9m111,
struct v4l2_rect *rect)
{
@@ -659,50 +645,6 @@ static int mt9m111_s_register(struct v4l2_subdev *sd,
}
#endif
-static const struct v4l2_queryctrl mt9m111_controls[] = {
- {
- .id = V4L2_CID_VFLIP,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Flip Verticaly",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 0,
- }, {
- .id = V4L2_CID_HFLIP,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Flip Horizontaly",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 0,
- }, { /* gain = 1/32*val (=>gain=1 if val==32) */
- .id = V4L2_CID_GAIN,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Gain",
- .minimum = 0,
- .maximum = 63 * 2 * 2,
- .step = 1,
- .default_value = 32,
- .flags = V4L2_CTRL_FLAG_SLIDER,
- }, {
- .id = V4L2_CID_EXPOSURE_AUTO,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Auto Exposure",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 1,
- }
-};
-
-static struct soc_camera_ops mt9m111_ops = {
- .query_bus_param = mt9m111_query_bus_param,
- .set_bus_param = mt9m111_set_bus_param,
- .controls = mt9m111_controls,
- .num_controls = ARRAY_SIZE(mt9m111_controls),
-};
-
static int mt9m111_set_flip(struct mt9m111 *mt9m111, int flip, int mask)
{
struct i2c_client *client = v4l2_get_subdevdata(&mt9m111->subdev);
@@ -743,7 +685,6 @@ static int mt9m111_set_global_gain(struct mt9m111 *mt9m111, int gain)
if (gain > 63 * 2 * 2)
return -EINVAL;
- mt9m111->gain = gain;
if ((gain >= 64 * 2) && (gain < 63 * 2 * 2))
val = (1 << 10) | (1 << 9) | (gain / 4);
else if ((gain >= 64) && (gain < 64 * 2))
@@ -757,118 +698,47 @@ static int mt9m111_set_global_gain(struct mt9m111 *mt9m111, int gain)
static int mt9m111_set_autoexposure(struct mt9m111 *mt9m111, int on)
{
struct i2c_client *client = v4l2_get_subdevdata(&mt9m111->subdev);
- int ret;
if (on)
- ret = reg_set(OPER_MODE_CTRL, MT9M111_OPMODE_AUTOEXPO_EN);
- else
- ret = reg_clear(OPER_MODE_CTRL, MT9M111_OPMODE_AUTOEXPO_EN);
-
- if (!ret)
- mt9m111->autoexposure = on;
-
- return ret;
+ return reg_set(OPER_MODE_CTRL, MT9M111_OPMODE_AUTOEXPO_EN);
+ return reg_clear(OPER_MODE_CTRL, MT9M111_OPMODE_AUTOEXPO_EN);
}
static int mt9m111_set_autowhitebalance(struct mt9m111 *mt9m111, int on)
{
struct i2c_client *client = v4l2_get_subdevdata(&mt9m111->subdev);
- int ret;
if (on)
- ret = reg_set(OPER_MODE_CTRL, MT9M111_OPMODE_AUTOWHITEBAL_EN);
- else
- ret = reg_clear(OPER_MODE_CTRL, MT9M111_OPMODE_AUTOWHITEBAL_EN);
-
- if (!ret)
- mt9m111->autowhitebalance = on;
-
- return ret;
-}
-
-static int mt9m111_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct mt9m111 *mt9m111 = container_of(sd, struct mt9m111, subdev);
- int data;
-
- switch (ctrl->id) {
- case V4L2_CID_VFLIP:
- if (mt9m111->context == HIGHPOWER)
- data = reg_read(READ_MODE_B);
- else
- data = reg_read(READ_MODE_A);
-
- if (data < 0)
- return -EIO;
- ctrl->value = !!(data & MT9M111_RMB_MIRROR_ROWS);
- break;
- case V4L2_CID_HFLIP:
- if (mt9m111->context == HIGHPOWER)
- data = reg_read(READ_MODE_B);
- else
- data = reg_read(READ_MODE_A);
-
- if (data < 0)
- return -EIO;
- ctrl->value = !!(data & MT9M111_RMB_MIRROR_COLS);
- break;
- case V4L2_CID_GAIN:
- data = mt9m111_get_global_gain(mt9m111);
- if (data < 0)
- return data;
- ctrl->value = data;
- break;
- case V4L2_CID_EXPOSURE_AUTO:
- ctrl->value = mt9m111->autoexposure;
- break;
- case V4L2_CID_AUTO_WHITE_BALANCE:
- ctrl->value = mt9m111->autowhitebalance;
- break;
- }
- return 0;
+ return reg_set(OPER_MODE_CTRL, MT9M111_OPMODE_AUTOWHITEBAL_EN);
+ return reg_clear(OPER_MODE_CTRL, MT9M111_OPMODE_AUTOWHITEBAL_EN);
}
-static int mt9m111_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
+static int mt9m111_s_ctrl(struct v4l2_ctrl *ctrl)
{
- struct mt9m111 *mt9m111 = container_of(sd, struct mt9m111, subdev);
- const struct v4l2_queryctrl *qctrl;
- int ret;
-
- qctrl = soc_camera_find_qctrl(&mt9m111_ops, ctrl->id);
- if (!qctrl)
- return -EINVAL;
+ struct mt9m111 *mt9m111 = container_of(ctrl->handler,
+ struct mt9m111, hdl);
switch (ctrl->id) {
case V4L2_CID_VFLIP:
- mt9m111->vflip = ctrl->value;
- ret = mt9m111_set_flip(mt9m111, ctrl->value,
+ return mt9m111_set_flip(mt9m111, ctrl->val,
MT9M111_RMB_MIRROR_ROWS);
- break;
case V4L2_CID_HFLIP:
- mt9m111->hflip = ctrl->value;
- ret = mt9m111_set_flip(mt9m111, ctrl->value,
+ return mt9m111_set_flip(mt9m111, ctrl->val,
MT9M111_RMB_MIRROR_COLS);
- break;
case V4L2_CID_GAIN:
- ret = mt9m111_set_global_gain(mt9m111, ctrl->value);
- break;
+ return mt9m111_set_global_gain(mt9m111, ctrl->val);
case V4L2_CID_EXPOSURE_AUTO:
- ret = mt9m111_set_autoexposure(mt9m111, ctrl->value);
- break;
+ return mt9m111_set_autoexposure(mt9m111, ctrl->val);
case V4L2_CID_AUTO_WHITE_BALANCE:
- ret = mt9m111_set_autowhitebalance(mt9m111, ctrl->value);
- break;
- default:
- ret = -EINVAL;
+ return mt9m111_set_autowhitebalance(mt9m111, ctrl->val);
}
- return ret;
+ return -EINVAL;
}
static int mt9m111_suspend(struct mt9m111 *mt9m111)
{
- mt9m111->gain = mt9m111_get_global_gain(mt9m111);
+ v4l2_ctrl_s_ctrl(mt9m111->gain, mt9m111_get_global_gain(mt9m111));
return 0;
}
@@ -878,11 +748,7 @@ static void mt9m111_restore_state(struct mt9m111 *mt9m111)
mt9m111_set_context(mt9m111, mt9m111->context);
mt9m111_set_pixfmt(mt9m111, mt9m111->fmt->code);
mt9m111_setup_rect(mt9m111, &mt9m111->rect);
- mt9m111_set_flip(mt9m111, mt9m111->hflip, MT9M111_RMB_MIRROR_COLS);
- mt9m111_set_flip(mt9m111, mt9m111->vflip, MT9M111_RMB_MIRROR_ROWS);
- mt9m111_set_global_gain(mt9m111, mt9m111->gain);
- mt9m111_set_autoexposure(mt9m111, mt9m111->autoexposure);
- mt9m111_set_autowhitebalance(mt9m111, mt9m111->autowhitebalance);
+ v4l2_ctrl_handler_setup(&mt9m111->hdl);
}
static int mt9m111_resume(struct mt9m111 *mt9m111)
@@ -910,8 +776,6 @@ static int mt9m111_init(struct mt9m111 *mt9m111)
ret = mt9m111_reset(mt9m111);
if (!ret)
ret = mt9m111_set_context(mt9m111, mt9m111->context);
- if (!ret)
- ret = mt9m111_set_autoexposure(mt9m111, mt9m111->autoexposure);
if (ret)
dev_err(&client->dev, "mt9m111 init failed: %d\n", ret);
return ret;
@@ -921,20 +785,12 @@ static int mt9m111_init(struct mt9m111 *mt9m111)
* Interface active, can use i2c. If it fails, it can indeed mean, that
* this wasn't our capture interface, so, we wait for the right one
*/
-static int mt9m111_video_probe(struct soc_camera_device *icd,
- struct i2c_client *client)
+static int mt9m111_video_probe(struct i2c_client *client)
{
struct mt9m111 *mt9m111 = to_mt9m111(client);
s32 data;
int ret;
- /* We must have a parent by now. And it cannot be a wrong one. */
- BUG_ON(!icd->parent ||
- to_soc_camera_host(icd->parent)->nr != icd->iface);
-
- mt9m111->autoexposure = 1;
- mt9m111->autowhitebalance = 1;
-
data = reg_read(CHIP_VERSION);
switch (data) {
@@ -948,17 +804,16 @@ static int mt9m111_video_probe(struct soc_camera_device *icd,
dev_info(&client->dev, "Detected a MT9M112 chip ID %x\n", data);
break;
default:
- ret = -ENODEV;
dev_err(&client->dev,
"No MT9M111/MT9M112/MT9M131 chip detected register read %x\n",
data);
- goto ei2c;
+ return -ENODEV;
}
ret = mt9m111_init(mt9m111);
-
-ei2c:
- return ret;
+ if (ret)
+ return ret;
+ return v4l2_ctrl_handler_setup(&mt9m111->hdl);
}
static int mt9m111_s_power(struct v4l2_subdev *sd, int on)
@@ -995,9 +850,11 @@ out:
return ret;
}
+static const struct v4l2_ctrl_ops mt9m111_ctrl_ops = {
+ .s_ctrl = mt9m111_s_ctrl,
+};
+
static struct v4l2_subdev_core_ops mt9m111_subdev_core_ops = {
- .g_ctrl = mt9m111_g_ctrl,
- .s_ctrl = mt9m111_s_ctrl,
.g_chip_ident = mt9m111_g_chip_ident,
.s_power = mt9m111_s_power,
#ifdef CONFIG_VIDEO_ADV_DEBUG
@@ -1016,6 +873,21 @@ static int mt9m111_enum_fmt(struct v4l2_subdev *sd, unsigned int index,
return 0;
}
+static int mt9m111_g_mbus_config(struct v4l2_subdev *sd,
+ struct v4l2_mbus_config *cfg)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+
+ cfg->flags = V4L2_MBUS_MASTER | V4L2_MBUS_PCLK_SAMPLE_RISING |
+ V4L2_MBUS_HSYNC_ACTIVE_HIGH | V4L2_MBUS_VSYNC_ACTIVE_HIGH |
+ V4L2_MBUS_DATA_ACTIVE_HIGH;
+ cfg->type = V4L2_MBUS_PARALLEL;
+ cfg->flags = soc_camera_apply_board_flags(icl, cfg);
+
+ return 0;
+}
+
static struct v4l2_subdev_video_ops mt9m111_subdev_video_ops = {
.s_mbus_fmt = mt9m111_s_fmt,
.g_mbus_fmt = mt9m111_g_fmt,
@@ -1024,6 +896,7 @@ static struct v4l2_subdev_video_ops mt9m111_subdev_video_ops = {
.g_crop = mt9m111_g_crop,
.cropcap = mt9m111_cropcap,
.enum_mbus_fmt = mt9m111_enum_fmt,
+ .g_mbus_config = mt9m111_g_mbus_config,
};
static struct v4l2_subdev_ops mt9m111_subdev_ops = {
@@ -1035,17 +908,10 @@ static int mt9m111_probe(struct i2c_client *client,
const struct i2c_device_id *did)
{
struct mt9m111 *mt9m111;
- struct soc_camera_device *icd = client->dev.platform_data;
struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
- struct soc_camera_link *icl;
+ struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
int ret;
- if (!icd) {
- dev_err(&client->dev, "mt9m111: soc-camera data missing!\n");
- return -EINVAL;
- }
-
- icl = to_soc_camera_link(icd);
if (!icl) {
dev_err(&client->dev, "mt9m111: driver needs platform data\n");
return -EINVAL;
@@ -1062,19 +928,37 @@ static int mt9m111_probe(struct i2c_client *client,
return -ENOMEM;
v4l2_i2c_subdev_init(&mt9m111->subdev, client, &mt9m111_subdev_ops);
+ v4l2_ctrl_handler_init(&mt9m111->hdl, 5);
+ v4l2_ctrl_new_std(&mt9m111->hdl, &mt9m111_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+ v4l2_ctrl_new_std(&mt9m111->hdl, &mt9m111_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+ v4l2_ctrl_new_std(&mt9m111->hdl, &mt9m111_ctrl_ops,
+ V4L2_CID_AUTO_WHITE_BALANCE, 0, 1, 1, 1);
+ mt9m111->gain = v4l2_ctrl_new_std(&mt9m111->hdl, &mt9m111_ctrl_ops,
+ V4L2_CID_GAIN, 0, 63 * 2 * 2, 1, 32);
+ v4l2_ctrl_new_std_menu(&mt9m111->hdl,
+ &mt9m111_ctrl_ops, V4L2_CID_EXPOSURE_AUTO, 1, 0,
+ V4L2_EXPOSURE_AUTO);
+ mt9m111->subdev.ctrl_handler = &mt9m111->hdl;
+ if (mt9m111->hdl.error) {
+ int err = mt9m111->hdl.error;
- /* Second stage probe - when a capture adapter is there */
- icd->ops = &mt9m111_ops;
+ kfree(mt9m111);
+ return err;
+ }
+ /* Second stage probe - when a capture adapter is there */
mt9m111->rect.left = MT9M111_MIN_DARK_COLS;
mt9m111->rect.top = MT9M111_MIN_DARK_ROWS;
mt9m111->rect.width = MT9M111_MAX_WIDTH;
mt9m111->rect.height = MT9M111_MAX_HEIGHT;
mt9m111->fmt = &mt9m111_colour_fmts[0];
+ mt9m111->lastpage = -1;
- ret = mt9m111_video_probe(icd, client);
+ ret = mt9m111_video_probe(client);
if (ret) {
- icd->ops = NULL;
+ v4l2_ctrl_handler_free(&mt9m111->hdl);
kfree(mt9m111);
}
@@ -1084,9 +968,9 @@ static int mt9m111_probe(struct i2c_client *client,
static int mt9m111_remove(struct i2c_client *client)
{
struct mt9m111 *mt9m111 = to_mt9m111(client);
- struct soc_camera_device *icd = client->dev.platform_data;
- icd->ops = NULL;
+ v4l2_device_unregister_subdev(&mt9m111->subdev);
+ v4l2_ctrl_handler_free(&mt9m111->hdl);
kfree(mt9m111);
return 0;
diff --git a/drivers/media/video/mt9p031.c b/drivers/media/video/mt9p031.c
new file mode 100644
index 00000000000..73c068993f0
--- /dev/null
+++ b/drivers/media/video/mt9p031.c
@@ -0,0 +1,964 @@
+/*
+ * Driver for MT9P031 CMOS Image Sensor from Aptina
+ *
+ * Copyright (C) 2011, Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ * Copyright (C) 2011, Javier Martin <javier.martin@vista-silicon.com>
+ * Copyright (C) 2011, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+ *
+ * Based on the MT9V032 driver and Bastian Hecht's code.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/log2.h>
+#include <linux/pm.h>
+#include <linux/slab.h>
+#include <media/v4l2-subdev.h>
+#include <linux/videodev2.h>
+
+#include <media/mt9p031.h>
+#include <media/v4l2-chip-ident.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+
+#define MT9P031_PIXEL_ARRAY_WIDTH 2752
+#define MT9P031_PIXEL_ARRAY_HEIGHT 2004
+
+#define MT9P031_CHIP_VERSION 0x00
+#define MT9P031_CHIP_VERSION_VALUE 0x1801
+#define MT9P031_ROW_START 0x01
+#define MT9P031_ROW_START_MIN 0
+#define MT9P031_ROW_START_MAX 2004
+#define MT9P031_ROW_START_DEF 54
+#define MT9P031_COLUMN_START 0x02
+#define MT9P031_COLUMN_START_MIN 0
+#define MT9P031_COLUMN_START_MAX 2750
+#define MT9P031_COLUMN_START_DEF 16
+#define MT9P031_WINDOW_HEIGHT 0x03
+#define MT9P031_WINDOW_HEIGHT_MIN 2
+#define MT9P031_WINDOW_HEIGHT_MAX 2006
+#define MT9P031_WINDOW_HEIGHT_DEF 1944
+#define MT9P031_WINDOW_WIDTH 0x04
+#define MT9P031_WINDOW_WIDTH_MIN 2
+#define MT9P031_WINDOW_WIDTH_MAX 2752
+#define MT9P031_WINDOW_WIDTH_DEF 2592
+#define MT9P031_HORIZONTAL_BLANK 0x05
+#define MT9P031_HORIZONTAL_BLANK_MIN 0
+#define MT9P031_HORIZONTAL_BLANK_MAX 4095
+#define MT9P031_VERTICAL_BLANK 0x06
+#define MT9P031_VERTICAL_BLANK_MIN 0
+#define MT9P031_VERTICAL_BLANK_MAX 4095
+#define MT9P031_VERTICAL_BLANK_DEF 25
+#define MT9P031_OUTPUT_CONTROL 0x07
+#define MT9P031_OUTPUT_CONTROL_CEN 2
+#define MT9P031_OUTPUT_CONTROL_SYN 1
+#define MT9P031_OUTPUT_CONTROL_DEF 0x1f82
+#define MT9P031_SHUTTER_WIDTH_UPPER 0x08
+#define MT9P031_SHUTTER_WIDTH_LOWER 0x09
+#define MT9P031_SHUTTER_WIDTH_MIN 1
+#define MT9P031_SHUTTER_WIDTH_MAX 1048575
+#define MT9P031_SHUTTER_WIDTH_DEF 1943
+#define MT9P031_PLL_CONTROL 0x10
+#define MT9P031_PLL_CONTROL_PWROFF 0x0050
+#define MT9P031_PLL_CONTROL_PWRON 0x0051
+#define MT9P031_PLL_CONTROL_USEPLL 0x0052
+#define MT9P031_PLL_CONFIG_1 0x11
+#define MT9P031_PLL_CONFIG_2 0x12
+#define MT9P031_PIXEL_CLOCK_CONTROL 0x0a
+#define MT9P031_FRAME_RESTART 0x0b
+#define MT9P031_SHUTTER_DELAY 0x0c
+#define MT9P031_RST 0x0d
+#define MT9P031_RST_ENABLE 1
+#define MT9P031_RST_DISABLE 0
+#define MT9P031_READ_MODE_1 0x1e
+#define MT9P031_READ_MODE_2 0x20
+#define MT9P031_READ_MODE_2_ROW_MIR (1 << 15)
+#define MT9P031_READ_MODE_2_COL_MIR (1 << 14)
+#define MT9P031_READ_MODE_2_ROW_BLC (1 << 6)
+#define MT9P031_ROW_ADDRESS_MODE 0x22
+#define MT9P031_COLUMN_ADDRESS_MODE 0x23
+#define MT9P031_GLOBAL_GAIN 0x35
+#define MT9P031_GLOBAL_GAIN_MIN 8
+#define MT9P031_GLOBAL_GAIN_MAX 1024
+#define MT9P031_GLOBAL_GAIN_DEF 8
+#define MT9P031_GLOBAL_GAIN_MULT (1 << 6)
+#define MT9P031_ROW_BLACK_DEF_OFFSET 0x4b
+#define MT9P031_TEST_PATTERN 0xa0
+#define MT9P031_TEST_PATTERN_SHIFT 3
+#define MT9P031_TEST_PATTERN_ENABLE (1 << 0)
+#define MT9P031_TEST_PATTERN_DISABLE (0 << 0)
+#define MT9P031_TEST_PATTERN_GREEN 0xa1
+#define MT9P031_TEST_PATTERN_RED 0xa2
+#define MT9P031_TEST_PATTERN_BLUE 0xa3
+
+struct mt9p031_pll_divs {
+ u32 ext_freq;
+ u32 target_freq;
+ u8 m;
+ u8 n;
+ u8 p1;
+};
+
+struct mt9p031 {
+ struct v4l2_subdev subdev;
+ struct media_pad pad;
+ struct v4l2_rect crop; /* Sensor window */
+ struct v4l2_mbus_framefmt format;
+ struct v4l2_ctrl_handler ctrls;
+ struct mt9p031_platform_data *pdata;
+ struct mutex power_lock; /* lock to protect power_count */
+ int power_count;
+ u16 xskip;
+ u16 yskip;
+
+ const struct mt9p031_pll_divs *pll;
+
+ /* Registers cache */
+ u16 output_control;
+ u16 mode2;
+};
+
+static struct mt9p031 *to_mt9p031(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct mt9p031, subdev);
+}
+
+static int mt9p031_read(struct i2c_client *client, u8 reg)
+{
+ s32 data = i2c_smbus_read_word_data(client, reg);
+ return data < 0 ? data : be16_to_cpu(data);
+}
+
+static int mt9p031_write(struct i2c_client *client, u8 reg, u16 data)
+{
+ return i2c_smbus_write_word_data(client, reg, cpu_to_be16(data));
+}
+
+static int mt9p031_set_output_control(struct mt9p031 *mt9p031, u16 clear,
+ u16 set)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&mt9p031->subdev);
+ u16 value = (mt9p031->output_control & ~clear) | set;
+ int ret;
+
+ ret = mt9p031_write(client, MT9P031_OUTPUT_CONTROL, value);
+ if (ret < 0)
+ return ret;
+
+ mt9p031->output_control = value;
+ return 0;
+}
+
+static int mt9p031_set_mode2(struct mt9p031 *mt9p031, u16 clear, u16 set)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&mt9p031->subdev);
+ u16 value = (mt9p031->mode2 & ~clear) | set;
+ int ret;
+
+ ret = mt9p031_write(client, MT9P031_READ_MODE_2, value);
+ if (ret < 0)
+ return ret;
+
+ mt9p031->mode2 = value;
+ return 0;
+}
+
+static int mt9p031_reset(struct mt9p031 *mt9p031)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&mt9p031->subdev);
+ int ret;
+
+ /* Disable chip output, synchronous option update */
+ ret = mt9p031_write(client, MT9P031_RST, MT9P031_RST_ENABLE);
+ if (ret < 0)
+ return ret;
+ ret = mt9p031_write(client, MT9P031_RST, MT9P031_RST_DISABLE);
+ if (ret < 0)
+ return ret;
+
+ return mt9p031_set_output_control(mt9p031, MT9P031_OUTPUT_CONTROL_CEN,
+ 0);
+}
+
+/*
+ * This static table uses ext_freq and vdd_io values to select suitable
+ * PLL dividers m, n and p1 which have been calculated as specifiec in p36
+ * of Aptina's mt9p031 datasheet. New values should be added here.
+ */
+static const struct mt9p031_pll_divs mt9p031_divs[] = {
+ /* ext_freq target_freq m n p1 */
+ {21000000, 48000000, 26, 2, 6}
+};
+
+static int mt9p031_pll_get_divs(struct mt9p031 *mt9p031)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&mt9p031->subdev);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mt9p031_divs); i++) {
+ if (mt9p031_divs[i].ext_freq == mt9p031->pdata->ext_freq &&
+ mt9p031_divs[i].target_freq == mt9p031->pdata->target_freq) {
+ mt9p031->pll = &mt9p031_divs[i];
+ return 0;
+ }
+ }
+
+ dev_err(&client->dev, "Couldn't find PLL dividers for ext_freq = %d, "
+ "target_freq = %d\n", mt9p031->pdata->ext_freq,
+ mt9p031->pdata->target_freq);
+ return -EINVAL;
+}
+
+static int mt9p031_pll_enable(struct mt9p031 *mt9p031)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&mt9p031->subdev);
+ int ret;
+
+ ret = mt9p031_write(client, MT9P031_PLL_CONTROL,
+ MT9P031_PLL_CONTROL_PWRON);
+ if (ret < 0)
+ return ret;
+
+ ret = mt9p031_write(client, MT9P031_PLL_CONFIG_1,
+ (mt9p031->pll->m << 8) | (mt9p031->pll->n - 1));
+ if (ret < 0)
+ return ret;
+
+ ret = mt9p031_write(client, MT9P031_PLL_CONFIG_2, mt9p031->pll->p1 - 1);
+ if (ret < 0)
+ return ret;
+
+ usleep_range(1000, 2000);
+ ret = mt9p031_write(client, MT9P031_PLL_CONTROL,
+ MT9P031_PLL_CONTROL_PWRON |
+ MT9P031_PLL_CONTROL_USEPLL);
+ return ret;
+}
+
+static inline int mt9p031_pll_disable(struct mt9p031 *mt9p031)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&mt9p031->subdev);
+
+ return mt9p031_write(client, MT9P031_PLL_CONTROL,
+ MT9P031_PLL_CONTROL_PWROFF);
+}
+
+static int mt9p031_power_on(struct mt9p031 *mt9p031)
+{
+ /* Ensure RESET_BAR is low */
+ if (mt9p031->pdata->reset) {
+ mt9p031->pdata->reset(&mt9p031->subdev, 1);
+ usleep_range(1000, 2000);
+ }
+
+ /* Emable clock */
+ if (mt9p031->pdata->set_xclk)
+ mt9p031->pdata->set_xclk(&mt9p031->subdev,
+ mt9p031->pdata->ext_freq);
+
+ /* Now RESET_BAR must be high */
+ if (mt9p031->pdata->reset) {
+ mt9p031->pdata->reset(&mt9p031->subdev, 0);
+ usleep_range(1000, 2000);
+ }
+
+ return 0;
+}
+
+static void mt9p031_power_off(struct mt9p031 *mt9p031)
+{
+ if (mt9p031->pdata->reset) {
+ mt9p031->pdata->reset(&mt9p031->subdev, 1);
+ usleep_range(1000, 2000);
+ }
+
+ if (mt9p031->pdata->set_xclk)
+ mt9p031->pdata->set_xclk(&mt9p031->subdev, 0);
+}
+
+static int __mt9p031_set_power(struct mt9p031 *mt9p031, bool on)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&mt9p031->subdev);
+ int ret;
+
+ if (!on) {
+ mt9p031_power_off(mt9p031);
+ return 0;
+ }
+
+ ret = mt9p031_power_on(mt9p031);
+ if (ret < 0)
+ return ret;
+
+ ret = mt9p031_reset(mt9p031);
+ if (ret < 0) {
+ dev_err(&client->dev, "Failed to reset the camera\n");
+ return ret;
+ }
+
+ return v4l2_ctrl_handler_setup(&mt9p031->ctrls);
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 subdev video operations
+ */
+
+static int mt9p031_set_params(struct mt9p031 *mt9p031)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&mt9p031->subdev);
+ struct v4l2_mbus_framefmt *format = &mt9p031->format;
+ const struct v4l2_rect *crop = &mt9p031->crop;
+ unsigned int hblank;
+ unsigned int vblank;
+ unsigned int xskip;
+ unsigned int yskip;
+ unsigned int xbin;
+ unsigned int ybin;
+ int ret;
+
+ /* Windows position and size.
+ *
+ * TODO: Make sure the start coordinates and window size match the
+ * skipping, binning and mirroring (see description of registers 2 and 4
+ * in table 13, and Binning section on page 41).
+ */
+ ret = mt9p031_write(client, MT9P031_COLUMN_START, crop->left);
+ if (ret < 0)
+ return ret;
+ ret = mt9p031_write(client, MT9P031_ROW_START, crop->top);
+ if (ret < 0)
+ return ret;
+ ret = mt9p031_write(client, MT9P031_WINDOW_WIDTH, crop->width - 1);
+ if (ret < 0)
+ return ret;
+ ret = mt9p031_write(client, MT9P031_WINDOW_HEIGHT, crop->height - 1);
+ if (ret < 0)
+ return ret;
+
+ /* Row and column binning and skipping. Use the maximum binning value
+ * compatible with the skipping settings.
+ */
+ xskip = DIV_ROUND_CLOSEST(crop->width, format->width);
+ yskip = DIV_ROUND_CLOSEST(crop->height, format->height);
+ xbin = 1 << (ffs(xskip) - 1);
+ ybin = 1 << (ffs(yskip) - 1);
+
+ ret = mt9p031_write(client, MT9P031_COLUMN_ADDRESS_MODE,
+ ((xbin - 1) << 4) | (xskip - 1));
+ if (ret < 0)
+ return ret;
+ ret = mt9p031_write(client, MT9P031_ROW_ADDRESS_MODE,
+ ((ybin - 1) << 4) | (yskip - 1));
+ if (ret < 0)
+ return ret;
+
+ /* Blanking - use minimum value for horizontal blanking and default
+ * value for vertical blanking.
+ */
+ hblank = 346 * ybin + 64 + (80 >> max_t(unsigned int, xbin, 3));
+ vblank = MT9P031_VERTICAL_BLANK_DEF;
+
+ ret = mt9p031_write(client, MT9P031_HORIZONTAL_BLANK, hblank);
+ if (ret < 0)
+ return ret;
+ ret = mt9p031_write(client, MT9P031_VERTICAL_BLANK, vblank);
+ if (ret < 0)
+ return ret;
+
+ return ret;
+}
+
+static int mt9p031_s_stream(struct v4l2_subdev *subdev, int enable)
+{
+ struct mt9p031 *mt9p031 = to_mt9p031(subdev);
+ int ret;
+
+ if (!enable) {
+ /* Stop sensor readout */
+ ret = mt9p031_set_output_control(mt9p031,
+ MT9P031_OUTPUT_CONTROL_CEN, 0);
+ if (ret < 0)
+ return ret;
+
+ return mt9p031_pll_disable(mt9p031);
+ }
+
+ ret = mt9p031_set_params(mt9p031);
+ if (ret < 0)
+ return ret;
+
+ /* Switch to master "normal" mode */
+ ret = mt9p031_set_output_control(mt9p031, 0,
+ MT9P031_OUTPUT_CONTROL_CEN);
+ if (ret < 0)
+ return ret;
+
+ return mt9p031_pll_enable(mt9p031);
+}
+
+static int mt9p031_enum_mbus_code(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct mt9p031 *mt9p031 = to_mt9p031(subdev);
+
+ if (code->pad || code->index)
+ return -EINVAL;
+
+ code->code = mt9p031->format.code;
+ return 0;
+}
+
+static int mt9p031_enum_frame_size(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ struct mt9p031 *mt9p031 = to_mt9p031(subdev);
+
+ if (fse->index >= 8 || fse->code != mt9p031->format.code)
+ return -EINVAL;
+
+ fse->min_width = MT9P031_WINDOW_WIDTH_DEF
+ / min_t(unsigned int, 7, fse->index + 1);
+ fse->max_width = fse->min_width;
+ fse->min_height = MT9P031_WINDOW_HEIGHT_DEF / (fse->index + 1);
+ fse->max_height = fse->min_height;
+
+ return 0;
+}
+
+static struct v4l2_mbus_framefmt *
+__mt9p031_get_pad_format(struct mt9p031 *mt9p031, struct v4l2_subdev_fh *fh,
+ unsigned int pad, u32 which)
+{
+ switch (which) {
+ case V4L2_SUBDEV_FORMAT_TRY:
+ return v4l2_subdev_get_try_format(fh, pad);
+ case V4L2_SUBDEV_FORMAT_ACTIVE:
+ return &mt9p031->format;
+ default:
+ return NULL;
+ }
+}
+
+static struct v4l2_rect *
+__mt9p031_get_pad_crop(struct mt9p031 *mt9p031, struct v4l2_subdev_fh *fh,
+ unsigned int pad, u32 which)
+{
+ switch (which) {
+ case V4L2_SUBDEV_FORMAT_TRY:
+ return v4l2_subdev_get_try_crop(fh, pad);
+ case V4L2_SUBDEV_FORMAT_ACTIVE:
+ return &mt9p031->crop;
+ default:
+ return NULL;
+ }
+}
+
+static int mt9p031_get_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct mt9p031 *mt9p031 = to_mt9p031(subdev);
+
+ fmt->format = *__mt9p031_get_pad_format(mt9p031, fh, fmt->pad,
+ fmt->which);
+ return 0;
+}
+
+static int mt9p031_set_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *format)
+{
+ struct mt9p031 *mt9p031 = to_mt9p031(subdev);
+ struct v4l2_mbus_framefmt *__format;
+ struct v4l2_rect *__crop;
+ unsigned int width;
+ unsigned int height;
+ unsigned int hratio;
+ unsigned int vratio;
+
+ __crop = __mt9p031_get_pad_crop(mt9p031, fh, format->pad,
+ format->which);
+
+ /* Clamp the width and height to avoid dividing by zero. */
+ width = clamp_t(unsigned int, ALIGN(format->format.width, 2),
+ max(__crop->width / 7, MT9P031_WINDOW_WIDTH_MIN),
+ __crop->width);
+ height = clamp_t(unsigned int, ALIGN(format->format.height, 2),
+ max(__crop->height / 8, MT9P031_WINDOW_HEIGHT_MIN),
+ __crop->height);
+
+ hratio = DIV_ROUND_CLOSEST(__crop->width, width);
+ vratio = DIV_ROUND_CLOSEST(__crop->height, height);
+
+ __format = __mt9p031_get_pad_format(mt9p031, fh, format->pad,
+ format->which);
+ __format->width = __crop->width / hratio;
+ __format->height = __crop->height / vratio;
+
+ format->format = *__format;
+
+ return 0;
+}
+
+static int mt9p031_get_crop(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_crop *crop)
+{
+ struct mt9p031 *mt9p031 = to_mt9p031(subdev);
+
+ crop->rect = *__mt9p031_get_pad_crop(mt9p031, fh, crop->pad,
+ crop->which);
+ return 0;
+}
+
+static int mt9p031_set_crop(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_crop *crop)
+{
+ struct mt9p031 *mt9p031 = to_mt9p031(subdev);
+ struct v4l2_mbus_framefmt *__format;
+ struct v4l2_rect *__crop;
+ struct v4l2_rect rect;
+
+ /* Clamp the crop rectangle boundaries and align them to a multiple of 2
+ * pixels to ensure a GRBG Bayer pattern.
+ */
+ rect.left = clamp(ALIGN(crop->rect.left, 2), MT9P031_COLUMN_START_MIN,
+ MT9P031_COLUMN_START_MAX);
+ rect.top = clamp(ALIGN(crop->rect.top, 2), MT9P031_ROW_START_MIN,
+ MT9P031_ROW_START_MAX);
+ rect.width = clamp(ALIGN(crop->rect.width, 2),
+ MT9P031_WINDOW_WIDTH_MIN,
+ MT9P031_WINDOW_WIDTH_MAX);
+ rect.height = clamp(ALIGN(crop->rect.height, 2),
+ MT9P031_WINDOW_HEIGHT_MIN,
+ MT9P031_WINDOW_HEIGHT_MAX);
+
+ rect.width = min(rect.width, MT9P031_PIXEL_ARRAY_WIDTH - rect.left);
+ rect.height = min(rect.height, MT9P031_PIXEL_ARRAY_HEIGHT - rect.top);
+
+ __crop = __mt9p031_get_pad_crop(mt9p031, fh, crop->pad, crop->which);
+
+ if (rect.width != __crop->width || rect.height != __crop->height) {
+ /* Reset the output image size if the crop rectangle size has
+ * been modified.
+ */
+ __format = __mt9p031_get_pad_format(mt9p031, fh, crop->pad,
+ crop->which);
+ __format->width = rect.width;
+ __format->height = rect.height;
+ }
+
+ *__crop = rect;
+ crop->rect = rect;
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 subdev control operations
+ */
+
+#define V4L2_CID_TEST_PATTERN (V4L2_CID_USER_BASE | 0x1001)
+
+static int mt9p031_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct mt9p031 *mt9p031 =
+ container_of(ctrl->handler, struct mt9p031, ctrls);
+ struct i2c_client *client = v4l2_get_subdevdata(&mt9p031->subdev);
+ u16 data;
+ int ret;
+
+ switch (ctrl->id) {
+ case V4L2_CID_EXPOSURE:
+ ret = mt9p031_write(client, MT9P031_SHUTTER_WIDTH_UPPER,
+ (ctrl->val >> 16) & 0xffff);
+ if (ret < 0)
+ return ret;
+
+ return mt9p031_write(client, MT9P031_SHUTTER_WIDTH_LOWER,
+ ctrl->val & 0xffff);
+
+ case V4L2_CID_GAIN:
+ /* Gain is controlled by 2 analog stages and a digital stage.
+ * Valid values for the 3 stages are
+ *
+ * Stage Min Max Step
+ * ------------------------------------------
+ * First analog stage x1 x2 1
+ * Second analog stage x1 x4 0.125
+ * Digital stage x1 x16 0.125
+ *
+ * To minimize noise, the gain stages should be used in the
+ * second analog stage, first analog stage, digital stage order.
+ * Gain from a previous stage should be pushed to its maximum
+ * value before the next stage is used.
+ */
+ if (ctrl->val <= 32) {
+ data = ctrl->val;
+ } else if (ctrl->val <= 64) {
+ ctrl->val &= ~1;
+ data = (1 << 6) | (ctrl->val >> 1);
+ } else {
+ ctrl->val &= ~7;
+ data = ((ctrl->val - 64) << 5) | (1 << 6) | 32;
+ }
+
+ return mt9p031_write(client, MT9P031_GLOBAL_GAIN, data);
+
+ case V4L2_CID_HFLIP:
+ if (ctrl->val)
+ return mt9p031_set_mode2(mt9p031,
+ 0, MT9P031_READ_MODE_2_COL_MIR);
+ else
+ return mt9p031_set_mode2(mt9p031,
+ MT9P031_READ_MODE_2_COL_MIR, 0);
+
+ case V4L2_CID_VFLIP:
+ if (ctrl->val)
+ return mt9p031_set_mode2(mt9p031,
+ 0, MT9P031_READ_MODE_2_ROW_MIR);
+ else
+ return mt9p031_set_mode2(mt9p031,
+ MT9P031_READ_MODE_2_ROW_MIR, 0);
+
+ case V4L2_CID_TEST_PATTERN:
+ if (!ctrl->val) {
+ ret = mt9p031_set_mode2(mt9p031,
+ 0, MT9P031_READ_MODE_2_ROW_BLC);
+ if (ret < 0)
+ return ret;
+
+ return mt9p031_write(client, MT9P031_TEST_PATTERN,
+ MT9P031_TEST_PATTERN_DISABLE);
+ }
+
+ ret = mt9p031_write(client, MT9P031_TEST_PATTERN_GREEN, 0x05a0);
+ if (ret < 0)
+ return ret;
+ ret = mt9p031_write(client, MT9P031_TEST_PATTERN_RED, 0x0a50);
+ if (ret < 0)
+ return ret;
+ ret = mt9p031_write(client, MT9P031_TEST_PATTERN_BLUE, 0x0aa0);
+ if (ret < 0)
+ return ret;
+
+ ret = mt9p031_set_mode2(mt9p031, MT9P031_READ_MODE_2_ROW_BLC,
+ 0);
+ if (ret < 0)
+ return ret;
+ ret = mt9p031_write(client, MT9P031_ROW_BLACK_DEF_OFFSET, 0);
+ if (ret < 0)
+ return ret;
+
+ return mt9p031_write(client, MT9P031_TEST_PATTERN,
+ ((ctrl->val - 1) << MT9P031_TEST_PATTERN_SHIFT)
+ | MT9P031_TEST_PATTERN_ENABLE);
+ }
+ return 0;
+}
+
+static struct v4l2_ctrl_ops mt9p031_ctrl_ops = {
+ .s_ctrl = mt9p031_s_ctrl,
+};
+
+static const char * const mt9p031_test_pattern_menu[] = {
+ "Disabled",
+ "Color Field",
+ "Horizontal Gradient",
+ "Vertical Gradient",
+ "Diagonal Gradient",
+ "Classic Test Pattern",
+ "Walking 1s",
+ "Monochrome Horizontal Bars",
+ "Monochrome Vertical Bars",
+ "Vertical Color Bars",
+};
+
+static const struct v4l2_ctrl_config mt9p031_ctrls[] = {
+ {
+ .ops = &mt9p031_ctrl_ops,
+ .id = V4L2_CID_TEST_PATTERN,
+ .type = V4L2_CTRL_TYPE_MENU,
+ .name = "Test Pattern",
+ .min = 0,
+ .max = ARRAY_SIZE(mt9p031_test_pattern_menu) - 1,
+ .step = 0,
+ .def = 0,
+ .flags = 0,
+ .menu_skip_mask = 0,
+ .qmenu = mt9p031_test_pattern_menu,
+ }
+};
+
+/* -----------------------------------------------------------------------------
+ * V4L2 subdev core operations
+ */
+
+static int mt9p031_set_power(struct v4l2_subdev *subdev, int on)
+{
+ struct mt9p031 *mt9p031 = to_mt9p031(subdev);
+ int ret = 0;
+
+ mutex_lock(&mt9p031->power_lock);
+
+ /* If the power count is modified from 0 to != 0 or from != 0 to 0,
+ * update the power state.
+ */
+ if (mt9p031->power_count == !on) {
+ ret = __mt9p031_set_power(mt9p031, !!on);
+ if (ret < 0)
+ goto out;
+ }
+
+ /* Update the power count. */
+ mt9p031->power_count += on ? 1 : -1;
+ WARN_ON(mt9p031->power_count < 0);
+
+out:
+ mutex_unlock(&mt9p031->power_lock);
+ return ret;
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 subdev internal operations
+ */
+
+static int mt9p031_registered(struct v4l2_subdev *subdev)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(subdev);
+ struct mt9p031 *mt9p031 = to_mt9p031(subdev);
+ s32 data;
+ int ret;
+
+ ret = mt9p031_power_on(mt9p031);
+ if (ret < 0) {
+ dev_err(&client->dev, "MT9P031 power up failed\n");
+ return ret;
+ }
+
+ /* Read out the chip version register */
+ data = mt9p031_read(client, MT9P031_CHIP_VERSION);
+ if (data != MT9P031_CHIP_VERSION_VALUE) {
+ dev_err(&client->dev, "MT9P031 not detected, wrong version "
+ "0x%04x\n", data);
+ return -ENODEV;
+ }
+
+ mt9p031_power_off(mt9p031);
+
+ dev_info(&client->dev, "MT9P031 detected at address 0x%02x\n",
+ client->addr);
+
+ return ret;
+}
+
+static int mt9p031_open(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
+{
+ struct mt9p031 *mt9p031 = to_mt9p031(subdev);
+ struct v4l2_mbus_framefmt *format;
+ struct v4l2_rect *crop;
+
+ crop = v4l2_subdev_get_try_crop(fh, 0);
+ crop->left = MT9P031_COLUMN_START_DEF;
+ crop->top = MT9P031_ROW_START_DEF;
+ crop->width = MT9P031_WINDOW_WIDTH_DEF;
+ crop->height = MT9P031_WINDOW_HEIGHT_DEF;
+
+ format = v4l2_subdev_get_try_format(fh, 0);
+
+ if (mt9p031->pdata->version == MT9P031_MONOCHROME_VERSION)
+ format->code = V4L2_MBUS_FMT_Y12_1X12;
+ else
+ format->code = V4L2_MBUS_FMT_SGRBG12_1X12;
+
+ format->width = MT9P031_WINDOW_WIDTH_DEF;
+ format->height = MT9P031_WINDOW_HEIGHT_DEF;
+ format->field = V4L2_FIELD_NONE;
+ format->colorspace = V4L2_COLORSPACE_SRGB;
+
+ mt9p031->xskip = 1;
+ mt9p031->yskip = 1;
+ return mt9p031_set_power(subdev, 1);
+}
+
+static int mt9p031_close(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
+{
+ return mt9p031_set_power(subdev, 0);
+}
+
+static struct v4l2_subdev_core_ops mt9p031_subdev_core_ops = {
+ .s_power = mt9p031_set_power,
+};
+
+static struct v4l2_subdev_video_ops mt9p031_subdev_video_ops = {
+ .s_stream = mt9p031_s_stream,
+};
+
+static struct v4l2_subdev_pad_ops mt9p031_subdev_pad_ops = {
+ .enum_mbus_code = mt9p031_enum_mbus_code,
+ .enum_frame_size = mt9p031_enum_frame_size,
+ .get_fmt = mt9p031_get_format,
+ .set_fmt = mt9p031_set_format,
+ .get_crop = mt9p031_get_crop,
+ .set_crop = mt9p031_set_crop,
+};
+
+static struct v4l2_subdev_ops mt9p031_subdev_ops = {
+ .core = &mt9p031_subdev_core_ops,
+ .video = &mt9p031_subdev_video_ops,
+ .pad = &mt9p031_subdev_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops mt9p031_subdev_internal_ops = {
+ .registered = mt9p031_registered,
+ .open = mt9p031_open,
+ .close = mt9p031_close,
+};
+
+/* -----------------------------------------------------------------------------
+ * Driver initialization and probing
+ */
+
+static int mt9p031_probe(struct i2c_client *client,
+ const struct i2c_device_id *did)
+{
+ struct mt9p031_platform_data *pdata = client->dev.platform_data;
+ struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
+ struct mt9p031 *mt9p031;
+ unsigned int i;
+ int ret;
+
+ if (pdata == NULL) {
+ dev_err(&client->dev, "No platform data\n");
+ return -EINVAL;
+ }
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA)) {
+ dev_warn(&client->dev,
+ "I2C-Adapter doesn't support I2C_FUNC_SMBUS_WORD\n");
+ return -EIO;
+ }
+
+ mt9p031 = kzalloc(sizeof(*mt9p031), GFP_KERNEL);
+ if (mt9p031 == NULL)
+ return -ENOMEM;
+
+ mt9p031->pdata = pdata;
+ mt9p031->output_control = MT9P031_OUTPUT_CONTROL_DEF;
+ mt9p031->mode2 = MT9P031_READ_MODE_2_ROW_BLC;
+
+ v4l2_ctrl_handler_init(&mt9p031->ctrls, ARRAY_SIZE(mt9p031_ctrls) + 4);
+
+ v4l2_ctrl_new_std(&mt9p031->ctrls, &mt9p031_ctrl_ops,
+ V4L2_CID_EXPOSURE, MT9P031_SHUTTER_WIDTH_MIN,
+ MT9P031_SHUTTER_WIDTH_MAX, 1,
+ MT9P031_SHUTTER_WIDTH_DEF);
+ v4l2_ctrl_new_std(&mt9p031->ctrls, &mt9p031_ctrl_ops,
+ V4L2_CID_GAIN, MT9P031_GLOBAL_GAIN_MIN,
+ MT9P031_GLOBAL_GAIN_MAX, 1, MT9P031_GLOBAL_GAIN_DEF);
+ v4l2_ctrl_new_std(&mt9p031->ctrls, &mt9p031_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+ v4l2_ctrl_new_std(&mt9p031->ctrls, &mt9p031_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+
+ for (i = 0; i < ARRAY_SIZE(mt9p031_ctrls); ++i)
+ v4l2_ctrl_new_custom(&mt9p031->ctrls, &mt9p031_ctrls[i], NULL);
+
+ mt9p031->subdev.ctrl_handler = &mt9p031->ctrls;
+
+ if (mt9p031->ctrls.error)
+ printk(KERN_INFO "%s: control initialization error %d\n",
+ __func__, mt9p031->ctrls.error);
+
+ mutex_init(&mt9p031->power_lock);
+ v4l2_i2c_subdev_init(&mt9p031->subdev, client, &mt9p031_subdev_ops);
+ mt9p031->subdev.internal_ops = &mt9p031_subdev_internal_ops;
+
+ mt9p031->pad.flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_init(&mt9p031->subdev.entity, 1, &mt9p031->pad, 0);
+ if (ret < 0)
+ goto done;
+
+ mt9p031->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ mt9p031->crop.width = MT9P031_WINDOW_WIDTH_DEF;
+ mt9p031->crop.height = MT9P031_WINDOW_HEIGHT_DEF;
+ mt9p031->crop.left = MT9P031_COLUMN_START_DEF;
+ mt9p031->crop.top = MT9P031_ROW_START_DEF;
+
+ if (mt9p031->pdata->version == MT9P031_MONOCHROME_VERSION)
+ mt9p031->format.code = V4L2_MBUS_FMT_Y12_1X12;
+ else
+ mt9p031->format.code = V4L2_MBUS_FMT_SGRBG12_1X12;
+
+ mt9p031->format.width = MT9P031_WINDOW_WIDTH_DEF;
+ mt9p031->format.height = MT9P031_WINDOW_HEIGHT_DEF;
+ mt9p031->format.field = V4L2_FIELD_NONE;
+ mt9p031->format.colorspace = V4L2_COLORSPACE_SRGB;
+
+ ret = mt9p031_pll_get_divs(mt9p031);
+
+done:
+ if (ret < 0) {
+ v4l2_ctrl_handler_free(&mt9p031->ctrls);
+ media_entity_cleanup(&mt9p031->subdev.entity);
+ kfree(mt9p031);
+ }
+
+ return ret;
+}
+
+static int mt9p031_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *subdev = i2c_get_clientdata(client);
+ struct mt9p031 *mt9p031 = to_mt9p031(subdev);
+
+ v4l2_ctrl_handler_free(&mt9p031->ctrls);
+ v4l2_device_unregister_subdev(subdev);
+ media_entity_cleanup(&subdev->entity);
+ kfree(mt9p031);
+
+ return 0;
+}
+
+static const struct i2c_device_id mt9p031_id[] = {
+ { "mt9p031", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, mt9p031_id);
+
+static struct i2c_driver mt9p031_i2c_driver = {
+ .driver = {
+ .name = "mt9p031",
+ },
+ .probe = mt9p031_probe,
+ .remove = mt9p031_remove,
+ .id_table = mt9p031_id,
+};
+
+static int __init mt9p031_mod_init(void)
+{
+ return i2c_add_driver(&mt9p031_i2c_driver);
+}
+
+static void __exit mt9p031_mod_exit(void)
+{
+ i2c_del_driver(&mt9p031_i2c_driver);
+}
+
+module_init(mt9p031_mod_init);
+module_exit(mt9p031_mod_exit);
+
+MODULE_DESCRIPTION("Aptina MT9P031 Camera driver");
+MODULE_AUTHOR("Bastian Hecht <hechtb@gmail.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/video/mt9t001.c b/drivers/media/video/mt9t001.c
new file mode 100644
index 00000000000..08074b8a273
--- /dev/null
+++ b/drivers/media/video/mt9t001.c
@@ -0,0 +1,836 @@
+/*
+ * Driver for MT9T001 CMOS Image Sensor from Aptina (Micron)
+ *
+ * Copyright (C) 2010-2011, Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *
+ * Based on the MT9M001 driver,
+ *
+ * Copyright (C) 2008, Guennadi Liakhovetski <kernel@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/log2.h>
+#include <linux/slab.h>
+#include <linux/videodev2.h>
+#include <linux/v4l2-mediabus.h>
+
+#include <media/mt9t001.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+
+#define MT9T001_PIXEL_ARRAY_HEIGHT 1568
+#define MT9T001_PIXEL_ARRAY_WIDTH 2112
+
+#define MT9T001_CHIP_VERSION 0x00
+#define MT9T001_CHIP_ID 0x1621
+#define MT9T001_ROW_START 0x01
+#define MT9T001_ROW_START_MIN 0
+#define MT9T001_ROW_START_DEF 20
+#define MT9T001_ROW_START_MAX 1534
+#define MT9T001_COLUMN_START 0x02
+#define MT9T001_COLUMN_START_MIN 0
+#define MT9T001_COLUMN_START_DEF 32
+#define MT9T001_COLUMN_START_MAX 2046
+#define MT9T001_WINDOW_HEIGHT 0x03
+#define MT9T001_WINDOW_HEIGHT_MIN 1
+#define MT9T001_WINDOW_HEIGHT_DEF 1535
+#define MT9T001_WINDOW_HEIGHT_MAX 1567
+#define MT9T001_WINDOW_WIDTH 0x04
+#define MT9T001_WINDOW_WIDTH_MIN 1
+#define MT9T001_WINDOW_WIDTH_DEF 2047
+#define MT9T001_WINDOW_WIDTH_MAX 2111
+#define MT9T001_HORIZONTAL_BLANKING 0x05
+#define MT9T001_HORIZONTAL_BLANKING_MIN 21
+#define MT9T001_HORIZONTAL_BLANKING_MAX 1023
+#define MT9T001_VERTICAL_BLANKING 0x06
+#define MT9T001_VERTICAL_BLANKING_MIN 3
+#define MT9T001_VERTICAL_BLANKING_MAX 1023
+#define MT9T001_OUTPUT_CONTROL 0x07
+#define MT9T001_OUTPUT_CONTROL_SYNC (1 << 0)
+#define MT9T001_OUTPUT_CONTROL_CHIP_ENABLE (1 << 1)
+#define MT9T001_OUTPUT_CONTROL_TEST_DATA (1 << 6)
+#define MT9T001_SHUTTER_WIDTH_HIGH 0x08
+#define MT9T001_SHUTTER_WIDTH_LOW 0x09
+#define MT9T001_SHUTTER_WIDTH_MIN 1
+#define MT9T001_SHUTTER_WIDTH_DEF 1561
+#define MT9T001_SHUTTER_WIDTH_MAX (1024 * 1024)
+#define MT9T001_PIXEL_CLOCK 0x0a
+#define MT9T001_PIXEL_CLOCK_INVERT (1 << 15)
+#define MT9T001_PIXEL_CLOCK_SHIFT_MASK (7 << 8)
+#define MT9T001_PIXEL_CLOCK_SHIFT_SHIFT 8
+#define MT9T001_PIXEL_CLOCK_DIVIDE_MASK (0x7f << 0)
+#define MT9T001_FRAME_RESTART 0x0b
+#define MT9T001_SHUTTER_DELAY 0x0c
+#define MT9T001_SHUTTER_DELAY_MAX 2047
+#define MT9T001_RESET 0x0d
+#define MT9T001_READ_MODE1 0x1e
+#define MT9T001_READ_MODE_SNAPSHOT (1 << 8)
+#define MT9T001_READ_MODE_STROBE_ENABLE (1 << 9)
+#define MT9T001_READ_MODE_STROBE_WIDTH (1 << 10)
+#define MT9T001_READ_MODE_STROBE_OVERRIDE (1 << 11)
+#define MT9T001_READ_MODE2 0x20
+#define MT9T001_READ_MODE_BAD_FRAMES (1 << 0)
+#define MT9T001_READ_MODE_LINE_VALID_CONTINUOUS (1 << 9)
+#define MT9T001_READ_MODE_LINE_VALID_FRAME (1 << 10)
+#define MT9T001_READ_MODE3 0x21
+#define MT9T001_READ_MODE_GLOBAL_RESET (1 << 0)
+#define MT9T001_READ_MODE_GHST_CTL (1 << 1)
+#define MT9T001_ROW_ADDRESS_MODE 0x22
+#define MT9T001_ROW_SKIP_MASK (7 << 0)
+#define MT9T001_ROW_BIN_MASK (3 << 3)
+#define MT9T001_ROW_BIN_SHIFT 3
+#define MT9T001_COLUMN_ADDRESS_MODE 0x23
+#define MT9T001_COLUMN_SKIP_MASK (7 << 0)
+#define MT9T001_COLUMN_BIN_MASK (3 << 3)
+#define MT9T001_COLUMN_BIN_SHIFT 3
+#define MT9T001_GREEN1_GAIN 0x2b
+#define MT9T001_BLUE_GAIN 0x2c
+#define MT9T001_RED_GAIN 0x2d
+#define MT9T001_GREEN2_GAIN 0x2e
+#define MT9T001_TEST_DATA 0x32
+#define MT9T001_GLOBAL_GAIN 0x35
+#define MT9T001_GLOBAL_GAIN_MIN 8
+#define MT9T001_GLOBAL_GAIN_MAX 1024
+#define MT9T001_BLACK_LEVEL 0x49
+#define MT9T001_ROW_BLACK_DEFAULT_OFFSET 0x4b
+#define MT9T001_BLC_DELTA_THRESHOLDS 0x5d
+#define MT9T001_CAL_THRESHOLDS 0x5f
+#define MT9T001_GREEN1_OFFSET 0x60
+#define MT9T001_GREEN2_OFFSET 0x61
+#define MT9T001_BLACK_LEVEL_CALIBRATION 0x62
+#define MT9T001_BLACK_LEVEL_OVERRIDE (1 << 0)
+#define MT9T001_BLACK_LEVEL_DISABLE_OFFSET (1 << 1)
+#define MT9T001_BLACK_LEVEL_RECALCULATE (1 << 12)
+#define MT9T001_BLACK_LEVEL_LOCK_RED_BLUE (1 << 13)
+#define MT9T001_BLACK_LEVEL_LOCK_GREEN (1 << 14)
+#define MT9T001_RED_OFFSET 0x63
+#define MT9T001_BLUE_OFFSET 0x64
+
+struct mt9t001 {
+ struct v4l2_subdev subdev;
+ struct media_pad pad;
+
+ struct v4l2_mbus_framefmt format;
+ struct v4l2_rect crop;
+
+ struct v4l2_ctrl_handler ctrls;
+ struct v4l2_ctrl *gains[4];
+
+ u16 output_control;
+ u16 black_level;
+};
+
+static inline struct mt9t001 *to_mt9t001(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct mt9t001, subdev);
+}
+
+static int mt9t001_read(struct i2c_client *client, u8 reg)
+{
+ s32 data = i2c_smbus_read_word_data(client, reg);
+ return data < 0 ? data : be16_to_cpu(data);
+}
+
+static int mt9t001_write(struct i2c_client *client, u8 reg, u16 data)
+{
+ return i2c_smbus_write_word_data(client, reg, cpu_to_be16(data));
+}
+
+static int mt9t001_set_output_control(struct mt9t001 *mt9t001, u16 clear,
+ u16 set)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&mt9t001->subdev);
+ u16 value = (mt9t001->output_control & ~clear) | set;
+ int ret;
+
+ if (value == mt9t001->output_control)
+ return 0;
+
+ ret = mt9t001_write(client, MT9T001_OUTPUT_CONTROL, value);
+ if (ret < 0)
+ return ret;
+
+ mt9t001->output_control = value;
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 subdev video operations
+ */
+
+static struct v4l2_mbus_framefmt *
+__mt9t001_get_pad_format(struct mt9t001 *mt9t001, struct v4l2_subdev_fh *fh,
+ unsigned int pad, enum v4l2_subdev_format_whence which)
+{
+ switch (which) {
+ case V4L2_SUBDEV_FORMAT_TRY:
+ return v4l2_subdev_get_try_format(fh, pad);
+ case V4L2_SUBDEV_FORMAT_ACTIVE:
+ return &mt9t001->format;
+ default:
+ return NULL;
+ }
+}
+
+static struct v4l2_rect *
+__mt9t001_get_pad_crop(struct mt9t001 *mt9t001, struct v4l2_subdev_fh *fh,
+ unsigned int pad, enum v4l2_subdev_format_whence which)
+{
+ switch (which) {
+ case V4L2_SUBDEV_FORMAT_TRY:
+ return v4l2_subdev_get_try_crop(fh, pad);
+ case V4L2_SUBDEV_FORMAT_ACTIVE:
+ return &mt9t001->crop;
+ default:
+ return NULL;
+ }
+}
+
+static int mt9t001_s_stream(struct v4l2_subdev *subdev, int enable)
+{
+ const u16 mode = MT9T001_OUTPUT_CONTROL_CHIP_ENABLE;
+ struct i2c_client *client = v4l2_get_subdevdata(subdev);
+ struct mt9t001 *mt9t001 = to_mt9t001(subdev);
+ struct v4l2_mbus_framefmt *format = &mt9t001->format;
+ struct v4l2_rect *crop = &mt9t001->crop;
+ unsigned int hratio;
+ unsigned int vratio;
+ int ret;
+
+ if (!enable)
+ return mt9t001_set_output_control(mt9t001, mode, 0);
+
+ /* Configure the window size and row/column bin */
+ hratio = DIV_ROUND_CLOSEST(crop->width, format->width);
+ vratio = DIV_ROUND_CLOSEST(crop->height, format->height);
+
+ ret = mt9t001_write(client, MT9T001_ROW_ADDRESS_MODE, hratio - 1);
+ if (ret < 0)
+ return ret;
+
+ ret = mt9t001_write(client, MT9T001_COLUMN_ADDRESS_MODE, vratio - 1);
+ if (ret < 0)
+ return ret;
+
+ ret = mt9t001_write(client, MT9T001_COLUMN_START, crop->left);
+ if (ret < 0)
+ return ret;
+
+ ret = mt9t001_write(client, MT9T001_ROW_START, crop->top);
+ if (ret < 0)
+ return ret;
+
+ ret = mt9t001_write(client, MT9T001_WINDOW_WIDTH, crop->width - 1);
+ if (ret < 0)
+ return ret;
+
+ ret = mt9t001_write(client, MT9T001_WINDOW_HEIGHT, crop->height - 1);
+ if (ret < 0)
+ return ret;
+
+ /* Switch to master "normal" mode */
+ return mt9t001_set_output_control(mt9t001, 0, mode);
+}
+
+static int mt9t001_enum_mbus_code(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->index > 0)
+ return -EINVAL;
+
+ code->code = V4L2_MBUS_FMT_SGRBG10_1X10;
+ return 0;
+}
+
+static int mt9t001_enum_frame_size(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ if (fse->index >= 8 || fse->code != V4L2_MBUS_FMT_SGRBG10_1X10)
+ return -EINVAL;
+
+ fse->min_width = (MT9T001_WINDOW_WIDTH_DEF + 1) / fse->index;
+ fse->max_width = fse->min_width;
+ fse->min_height = (MT9T001_WINDOW_HEIGHT_DEF + 1) / fse->index;
+ fse->max_height = fse->min_height;
+
+ return 0;
+}
+
+static int mt9t001_get_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *format)
+{
+ struct mt9t001 *mt9t001 = to_mt9t001(subdev);
+
+ format->format = *__mt9t001_get_pad_format(mt9t001, fh, format->pad,
+ format->which);
+ return 0;
+}
+
+static int mt9t001_set_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *format)
+{
+ struct mt9t001 *mt9t001 = to_mt9t001(subdev);
+ struct v4l2_mbus_framefmt *__format;
+ struct v4l2_rect *__crop;
+ unsigned int width;
+ unsigned int height;
+ unsigned int hratio;
+ unsigned int vratio;
+
+ __crop = __mt9t001_get_pad_crop(mt9t001, fh, format->pad,
+ format->which);
+
+ /* Clamp the width and height to avoid dividing by zero. */
+ width = clamp_t(unsigned int, ALIGN(format->format.width, 2),
+ max(__crop->width / 8, MT9T001_WINDOW_HEIGHT_MIN + 1),
+ __crop->width);
+ height = clamp_t(unsigned int, ALIGN(format->format.height, 2),
+ max(__crop->height / 8, MT9T001_WINDOW_HEIGHT_MIN + 1),
+ __crop->height);
+
+ hratio = DIV_ROUND_CLOSEST(__crop->width, width);
+ vratio = DIV_ROUND_CLOSEST(__crop->height, height);
+
+ __format = __mt9t001_get_pad_format(mt9t001, fh, format->pad,
+ format->which);
+ __format->width = __crop->width / hratio;
+ __format->height = __crop->height / vratio;
+
+ format->format = *__format;
+
+ return 0;
+}
+
+static int mt9t001_get_crop(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_crop *crop)
+{
+ struct mt9t001 *mt9t001 = to_mt9t001(subdev);
+
+ crop->rect = *__mt9t001_get_pad_crop(mt9t001, fh, crop->pad,
+ crop->which);
+ return 0;
+}
+
+static int mt9t001_set_crop(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_crop *crop)
+{
+ struct mt9t001 *mt9t001 = to_mt9t001(subdev);
+ struct v4l2_mbus_framefmt *__format;
+ struct v4l2_rect *__crop;
+ struct v4l2_rect rect;
+
+ /* Clamp the crop rectangle boundaries and align them to a multiple of 2
+ * pixels.
+ */
+ rect.left = clamp(ALIGN(crop->rect.left, 2),
+ MT9T001_COLUMN_START_MIN,
+ MT9T001_COLUMN_START_MAX);
+ rect.top = clamp(ALIGN(crop->rect.top, 2),
+ MT9T001_ROW_START_MIN,
+ MT9T001_ROW_START_MAX);
+ rect.width = clamp(ALIGN(crop->rect.width, 2),
+ MT9T001_WINDOW_WIDTH_MIN + 1,
+ MT9T001_WINDOW_WIDTH_MAX + 1);
+ rect.height = clamp(ALIGN(crop->rect.height, 2),
+ MT9T001_WINDOW_HEIGHT_MIN + 1,
+ MT9T001_WINDOW_HEIGHT_MAX + 1);
+
+ rect.width = min(rect.width, MT9T001_PIXEL_ARRAY_WIDTH - rect.left);
+ rect.height = min(rect.height, MT9T001_PIXEL_ARRAY_HEIGHT - rect.top);
+
+ __crop = __mt9t001_get_pad_crop(mt9t001, fh, crop->pad, crop->which);
+
+ if (rect.width != __crop->width || rect.height != __crop->height) {
+ /* Reset the output image size if the crop rectangle size has
+ * been modified.
+ */
+ __format = __mt9t001_get_pad_format(mt9t001, fh, crop->pad,
+ crop->which);
+ __format->width = rect.width;
+ __format->height = rect.height;
+ }
+
+ *__crop = rect;
+ crop->rect = rect;
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 subdev control operations
+ */
+
+#define V4L2_CID_TEST_PATTERN (V4L2_CID_USER_BASE | 0x1001)
+#define V4L2_CID_BLACK_LEVEL_AUTO (V4L2_CID_USER_BASE | 0x1002)
+#define V4L2_CID_BLACK_LEVEL_OFFSET (V4L2_CID_USER_BASE | 0x1003)
+#define V4L2_CID_BLACK_LEVEL_CALIBRATE (V4L2_CID_USER_BASE | 0x1004)
+
+#define V4L2_CID_GAIN_RED (V4L2_CTRL_CLASS_CAMERA | 0x1001)
+#define V4L2_CID_GAIN_GREEN_RED (V4L2_CTRL_CLASS_CAMERA | 0x1002)
+#define V4L2_CID_GAIN_GREEN_BLUE (V4L2_CTRL_CLASS_CAMERA | 0x1003)
+#define V4L2_CID_GAIN_BLUE (V4L2_CTRL_CLASS_CAMERA | 0x1004)
+
+static u16 mt9t001_gain_value(s32 *gain)
+{
+ /* Gain is controlled by 2 analog stages and a digital stage. Valid
+ * values for the 3 stages are
+ *
+ * Stage Min Max Step
+ * ------------------------------------------
+ * First analog stage x1 x2 1
+ * Second analog stage x1 x4 0.125
+ * Digital stage x1 x16 0.125
+ *
+ * To minimize noise, the gain stages should be used in the second
+ * analog stage, first analog stage, digital stage order. Gain from a
+ * previous stage should be pushed to its maximum value before the next
+ * stage is used.
+ */
+ if (*gain <= 32)
+ return *gain;
+
+ if (*gain <= 64) {
+ *gain &= ~1;
+ return (1 << 6) | (*gain >> 1);
+ }
+
+ *gain &= ~7;
+ return ((*gain - 64) << 5) | (1 << 6) | 32;
+}
+
+static int mt9t001_ctrl_freeze(struct mt9t001 *mt9t001, bool freeze)
+{
+ return mt9t001_set_output_control(mt9t001,
+ freeze ? 0 : MT9T001_OUTPUT_CONTROL_SYNC,
+ freeze ? MT9T001_OUTPUT_CONTROL_SYNC : 0);
+}
+
+static int mt9t001_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ static const u8 gains[4] = {
+ MT9T001_RED_GAIN, MT9T001_GREEN1_GAIN,
+ MT9T001_GREEN2_GAIN, MT9T001_BLUE_GAIN
+ };
+
+ struct mt9t001 *mt9t001 =
+ container_of(ctrl->handler, struct mt9t001, ctrls);
+ struct i2c_client *client = v4l2_get_subdevdata(&mt9t001->subdev);
+ unsigned int count;
+ unsigned int i;
+ u16 value;
+ int ret;
+
+ switch (ctrl->id) {
+ case V4L2_CID_GAIN_RED:
+ case V4L2_CID_GAIN_GREEN_RED:
+ case V4L2_CID_GAIN_GREEN_BLUE:
+ case V4L2_CID_GAIN_BLUE:
+
+ /* Disable control updates if more than one control has changed
+ * in the cluster.
+ */
+ for (i = 0, count = 0; i < 4; ++i) {
+ struct v4l2_ctrl *gain = mt9t001->gains[i];
+
+ if (gain->val != gain->cur.val)
+ count++;
+ }
+
+ if (count > 1) {
+ ret = mt9t001_ctrl_freeze(mt9t001, true);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Update the gain controls. */
+ for (i = 0; i < 4; ++i) {
+ struct v4l2_ctrl *gain = mt9t001->gains[i];
+
+ if (gain->val == gain->cur.val)
+ continue;
+
+ value = mt9t001_gain_value(&gain->val);
+ ret = mt9t001_write(client, gains[i], value);
+ if (ret < 0) {
+ mt9t001_ctrl_freeze(mt9t001, false);
+ return ret;
+ }
+ }
+
+ /* Enable control updates. */
+ if (count > 1) {
+ ret = mt9t001_ctrl_freeze(mt9t001, false);
+ if (ret < 0)
+ return ret;
+ }
+
+ break;
+
+ case V4L2_CID_EXPOSURE:
+ ret = mt9t001_write(client, MT9T001_SHUTTER_WIDTH_LOW,
+ ctrl->val & 0xffff);
+ if (ret < 0)
+ return ret;
+
+ return mt9t001_write(client, MT9T001_SHUTTER_WIDTH_HIGH,
+ ctrl->val >> 16);
+
+ case V4L2_CID_TEST_PATTERN:
+ ret = mt9t001_set_output_control(mt9t001,
+ ctrl->val ? 0 : MT9T001_OUTPUT_CONTROL_TEST_DATA,
+ ctrl->val ? MT9T001_OUTPUT_CONTROL_TEST_DATA : 0);
+ if (ret < 0)
+ return ret;
+
+ return mt9t001_write(client, MT9T001_TEST_DATA, ctrl->val << 2);
+
+ case V4L2_CID_BLACK_LEVEL_AUTO:
+ value = ctrl->val ? 0 : MT9T001_BLACK_LEVEL_OVERRIDE;
+ ret = mt9t001_write(client, MT9T001_BLACK_LEVEL_CALIBRATION,
+ value);
+ if (ret < 0)
+ return ret;
+
+ mt9t001->black_level = value;
+ break;
+
+ case V4L2_CID_BLACK_LEVEL_OFFSET:
+ ret = mt9t001_write(client, MT9T001_GREEN1_OFFSET, ctrl->val);
+ if (ret < 0)
+ return ret;
+
+ ret = mt9t001_write(client, MT9T001_GREEN2_OFFSET, ctrl->val);
+ if (ret < 0)
+ return ret;
+
+ ret = mt9t001_write(client, MT9T001_RED_OFFSET, ctrl->val);
+ if (ret < 0)
+ return ret;
+
+ return mt9t001_write(client, MT9T001_BLUE_OFFSET, ctrl->val);
+
+ case V4L2_CID_BLACK_LEVEL_CALIBRATE:
+ return mt9t001_write(client, MT9T001_BLACK_LEVEL_CALIBRATION,
+ MT9T001_BLACK_LEVEL_RECALCULATE |
+ mt9t001->black_level);
+ }
+
+ return 0;
+}
+
+static struct v4l2_ctrl_ops mt9t001_ctrl_ops = {
+ .s_ctrl = mt9t001_s_ctrl,
+};
+
+static const struct v4l2_ctrl_config mt9t001_ctrls[] = {
+ {
+ .ops = &mt9t001_ctrl_ops,
+ .id = V4L2_CID_TEST_PATTERN,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Test pattern",
+ .min = 0,
+ .max = 1023,
+ .step = 1,
+ .def = 0,
+ .flags = 0,
+ }, {
+ .ops = &mt9t001_ctrl_ops,
+ .id = V4L2_CID_BLACK_LEVEL_AUTO,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Black Level, Auto",
+ .min = 0,
+ .max = 1,
+ .step = 1,
+ .def = 1,
+ .flags = 0,
+ }, {
+ .ops = &mt9t001_ctrl_ops,
+ .id = V4L2_CID_BLACK_LEVEL_OFFSET,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Black Level, Offset",
+ .min = -256,
+ .max = 255,
+ .step = 1,
+ .def = 32,
+ .flags = 0,
+ }, {
+ .ops = &mt9t001_ctrl_ops,
+ .id = V4L2_CID_BLACK_LEVEL_CALIBRATE,
+ .type = V4L2_CTRL_TYPE_BUTTON,
+ .name = "Black Level, Calibrate",
+ .min = 0,
+ .max = 0,
+ .step = 0,
+ .def = 0,
+ .flags = V4L2_CTRL_FLAG_WRITE_ONLY,
+ },
+};
+
+static const struct v4l2_ctrl_config mt9t001_gains[] = {
+ {
+ .ops = &mt9t001_ctrl_ops,
+ .id = V4L2_CID_GAIN_RED,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Gain, Red",
+ .min = MT9T001_GLOBAL_GAIN_MIN,
+ .max = MT9T001_GLOBAL_GAIN_MAX,
+ .step = 1,
+ .def = MT9T001_GLOBAL_GAIN_MIN,
+ .flags = 0,
+ }, {
+ .ops = &mt9t001_ctrl_ops,
+ .id = V4L2_CID_GAIN_GREEN_RED,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Gain, Green (R)",
+ .min = MT9T001_GLOBAL_GAIN_MIN,
+ .max = MT9T001_GLOBAL_GAIN_MAX,
+ .step = 1,
+ .def = MT9T001_GLOBAL_GAIN_MIN,
+ .flags = 0,
+ }, {
+ .ops = &mt9t001_ctrl_ops,
+ .id = V4L2_CID_GAIN_GREEN_BLUE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Gain, Green (B)",
+ .min = MT9T001_GLOBAL_GAIN_MIN,
+ .max = MT9T001_GLOBAL_GAIN_MAX,
+ .step = 1,
+ .def = MT9T001_GLOBAL_GAIN_MIN,
+ .flags = 0,
+ }, {
+ .ops = &mt9t001_ctrl_ops,
+ .id = V4L2_CID_GAIN_BLUE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Gain, Blue",
+ .min = MT9T001_GLOBAL_GAIN_MIN,
+ .max = MT9T001_GLOBAL_GAIN_MAX,
+ .step = 1,
+ .def = MT9T001_GLOBAL_GAIN_MIN,
+ .flags = 0,
+ },
+};
+
+/* -----------------------------------------------------------------------------
+ * V4L2 subdev internal operations
+ */
+
+static int mt9t001_open(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
+{
+ struct v4l2_mbus_framefmt *format;
+ struct v4l2_rect *crop;
+
+ crop = v4l2_subdev_get_try_crop(fh, 0);
+ crop->left = MT9T001_COLUMN_START_DEF;
+ crop->top = MT9T001_ROW_START_DEF;
+ crop->width = MT9T001_WINDOW_WIDTH_DEF + 1;
+ crop->height = MT9T001_WINDOW_HEIGHT_DEF + 1;
+
+ format = v4l2_subdev_get_try_format(fh, 0);
+ format->code = V4L2_MBUS_FMT_SGRBG10_1X10;
+ format->width = MT9T001_WINDOW_WIDTH_DEF + 1;
+ format->height = MT9T001_WINDOW_HEIGHT_DEF + 1;
+ format->field = V4L2_FIELD_NONE;
+ format->colorspace = V4L2_COLORSPACE_SRGB;
+
+ return 0;
+}
+
+static struct v4l2_subdev_video_ops mt9t001_subdev_video_ops = {
+ .s_stream = mt9t001_s_stream,
+};
+
+static struct v4l2_subdev_pad_ops mt9t001_subdev_pad_ops = {
+ .enum_mbus_code = mt9t001_enum_mbus_code,
+ .enum_frame_size = mt9t001_enum_frame_size,
+ .get_fmt = mt9t001_get_format,
+ .set_fmt = mt9t001_set_format,
+ .get_crop = mt9t001_get_crop,
+ .set_crop = mt9t001_set_crop,
+};
+
+static struct v4l2_subdev_ops mt9t001_subdev_ops = {
+ .video = &mt9t001_subdev_video_ops,
+ .pad = &mt9t001_subdev_pad_ops,
+};
+
+static struct v4l2_subdev_internal_ops mt9t001_subdev_internal_ops = {
+ .open = mt9t001_open,
+};
+
+static int mt9t001_video_probe(struct i2c_client *client)
+{
+ struct mt9t001_platform_data *pdata = client->dev.platform_data;
+ s32 data;
+ int ret;
+
+ dev_info(&client->dev, "Probing MT9T001 at address 0x%02x\n",
+ client->addr);
+
+ /* Reset the chip and stop data read out */
+ ret = mt9t001_write(client, MT9T001_RESET, 1);
+ if (ret < 0)
+ return ret;
+
+ ret = mt9t001_write(client, MT9T001_RESET, 0);
+ if (ret < 0)
+ return ret;
+
+ ret = mt9t001_write(client, MT9T001_OUTPUT_CONTROL, 0);
+ if (ret < 0)
+ return ret;
+
+ /* Configure the pixel clock polarity */
+ if (pdata && pdata->clk_pol) {
+ ret = mt9t001_write(client, MT9T001_PIXEL_CLOCK,
+ MT9T001_PIXEL_CLOCK_INVERT);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Read and check the sensor version */
+ data = mt9t001_read(client, MT9T001_CHIP_VERSION);
+ if (data != MT9T001_CHIP_ID) {
+ dev_err(&client->dev, "MT9T001 not detected, wrong version "
+ "0x%04x\n", data);
+ return -ENODEV;
+ }
+
+ dev_info(&client->dev, "MT9T001 detected at address 0x%02x\n",
+ client->addr);
+
+ return ret;
+}
+
+static int mt9t001_probe(struct i2c_client *client,
+ const struct i2c_device_id *did)
+{
+ struct mt9t001 *mt9t001;
+ unsigned int i;
+ int ret;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_WORD_DATA)) {
+ dev_warn(&client->adapter->dev,
+ "I2C-Adapter doesn't support I2C_FUNC_SMBUS_WORD\n");
+ return -EIO;
+ }
+
+ ret = mt9t001_video_probe(client);
+ if (ret < 0)
+ return ret;
+
+ mt9t001 = kzalloc(sizeof(*mt9t001), GFP_KERNEL);
+ if (!mt9t001)
+ return -ENOMEM;
+
+ v4l2_ctrl_handler_init(&mt9t001->ctrls, ARRAY_SIZE(mt9t001_ctrls) +
+ ARRAY_SIZE(mt9t001_gains) + 2);
+
+ v4l2_ctrl_new_std(&mt9t001->ctrls, &mt9t001_ctrl_ops,
+ V4L2_CID_EXPOSURE, MT9T001_SHUTTER_WIDTH_MIN,
+ MT9T001_SHUTTER_WIDTH_MAX, 1,
+ MT9T001_SHUTTER_WIDTH_DEF);
+ v4l2_ctrl_new_std(&mt9t001->ctrls, &mt9t001_ctrl_ops,
+ V4L2_CID_BLACK_LEVEL, 1, 1, 1, 1);
+
+ for (i = 0; i < ARRAY_SIZE(mt9t001_ctrls); ++i)
+ v4l2_ctrl_new_custom(&mt9t001->ctrls, &mt9t001_ctrls[i], NULL);
+
+ for (i = 0; i < ARRAY_SIZE(mt9t001_gains); ++i)
+ mt9t001->gains[i] = v4l2_ctrl_new_custom(&mt9t001->ctrls,
+ &mt9t001_gains[i], NULL);
+
+ v4l2_ctrl_cluster(ARRAY_SIZE(mt9t001_gains), mt9t001->gains);
+
+ mt9t001->subdev.ctrl_handler = &mt9t001->ctrls;
+
+ if (mt9t001->ctrls.error) {
+ printk(KERN_INFO "%s: control initialization error %d\n",
+ __func__, mt9t001->ctrls.error);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ mt9t001->crop.left = MT9T001_COLUMN_START_DEF;
+ mt9t001->crop.top = MT9T001_ROW_START_DEF;
+ mt9t001->crop.width = MT9T001_WINDOW_WIDTH_DEF + 1;
+ mt9t001->crop.height = MT9T001_WINDOW_HEIGHT_DEF + 1;
+
+ mt9t001->format.code = V4L2_MBUS_FMT_SGRBG10_1X10;
+ mt9t001->format.width = MT9T001_WINDOW_WIDTH_DEF + 1;
+ mt9t001->format.height = MT9T001_WINDOW_HEIGHT_DEF + 1;
+ mt9t001->format.field = V4L2_FIELD_NONE;
+ mt9t001->format.colorspace = V4L2_COLORSPACE_SRGB;
+
+ v4l2_i2c_subdev_init(&mt9t001->subdev, client, &mt9t001_subdev_ops);
+ mt9t001->subdev.internal_ops = &mt9t001_subdev_internal_ops;
+ mt9t001->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ mt9t001->pad.flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_init(&mt9t001->subdev.entity, 1, &mt9t001->pad, 0);
+
+done:
+ if (ret < 0) {
+ v4l2_ctrl_handler_free(&mt9t001->ctrls);
+ media_entity_cleanup(&mt9t001->subdev.entity);
+ kfree(mt9t001);
+ }
+
+ return ret;
+}
+
+static int mt9t001_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *subdev = i2c_get_clientdata(client);
+ struct mt9t001 *mt9t001 = to_mt9t001(subdev);
+
+ v4l2_ctrl_handler_free(&mt9t001->ctrls);
+ v4l2_device_unregister_subdev(subdev);
+ media_entity_cleanup(&subdev->entity);
+ kfree(mt9t001);
+ return 0;
+}
+
+static const struct i2c_device_id mt9t001_id[] = {
+ { "mt9t001", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, mt9t001_id);
+
+static struct i2c_driver mt9t001_driver = {
+ .driver = {
+ .name = "mt9t001",
+ },
+ .probe = mt9t001_probe,
+ .remove = mt9t001_remove,
+ .id_table = mt9t001_id,
+};
+
+static int __init mt9t001_init(void)
+{
+ return i2c_add_driver(&mt9t001_driver);
+}
+
+static void __exit mt9t001_exit(void)
+{
+ i2c_del_driver(&mt9t001_driver);
+}
+
+module_init(mt9t001_init);
+module_exit(mt9t001_exit);
+
+MODULE_DESCRIPTION("Aptina (Micron) MT9T001 Camera driver");
+MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/video/mt9t031.c b/drivers/media/video/mt9t031.c
index 30547cc3f89..0e78477452f 100644
--- a/drivers/media/video/mt9t031.c
+++ b/drivers/media/video/mt9t031.c
@@ -13,11 +13,21 @@
#include <linux/log2.h>
#include <linux/pm.h>
#include <linux/slab.h>
+#include <linux/v4l2-mediabus.h>
#include <linux/videodev2.h>
+#include <linux/module.h>
#include <media/soc_camera.h>
#include <media/v4l2-chip-ident.h>
#include <media/v4l2-subdev.h>
+#include <media/v4l2-ctrls.h>
+
+/*
+ * ATTENTION: this driver still cannot be used outside of the soc-camera
+ * framework because of its PM implementation, using the video_device node.
+ * If hardware becomes available for testing, alternative PM approaches shall
+ * be considered and tested.
+ */
/*
* mt9t031 i2c address 0x5d
@@ -57,21 +67,20 @@
#define MT9T031_COLUMN_SKIP 32
#define MT9T031_ROW_SKIP 20
-#define MT9T031_BUS_PARAM (SOCAM_PCLK_SAMPLE_RISING | \
- SOCAM_PCLK_SAMPLE_FALLING | SOCAM_HSYNC_ACTIVE_HIGH | \
- SOCAM_VSYNC_ACTIVE_HIGH | SOCAM_DATA_ACTIVE_HIGH | \
- SOCAM_MASTER | SOCAM_DATAWIDTH_10)
-
struct mt9t031 {
struct v4l2_subdev subdev;
+ struct v4l2_ctrl_handler hdl;
+ struct {
+ /* exposure/auto-exposure cluster */
+ struct v4l2_ctrl *autoexposure;
+ struct v4l2_ctrl *exposure;
+ };
struct v4l2_rect rect; /* Sensor window */
int model; /* V4L2_IDENT_MT9T031* codes from v4l2-chip-ident.h */
u16 xskip;
u16 yskip;
- unsigned int gain;
+ unsigned int total_h;
unsigned short y_skip_top; /* Lines to skip at the top */
- unsigned int exposure;
- unsigned char autoexposure;
};
static struct mt9t031 *to_mt9t031(const struct i2c_client *client)
@@ -179,95 +188,6 @@ static int mt9t031_s_stream(struct v4l2_subdev *sd, int enable)
return 0;
}
-static int mt9t031_set_bus_param(struct soc_camera_device *icd,
- unsigned long flags)
-{
- struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd));
-
- /* The caller should have queried our parameters, check anyway */
- if (flags & ~MT9T031_BUS_PARAM)
- return -EINVAL;
-
- if (flags & SOCAM_PCLK_SAMPLE_FALLING)
- reg_clear(client, MT9T031_PIXEL_CLOCK_CONTROL, 0x8000);
- else
- reg_set(client, MT9T031_PIXEL_CLOCK_CONTROL, 0x8000);
-
- return 0;
-}
-
-static unsigned long mt9t031_query_bus_param(struct soc_camera_device *icd)
-{
- struct soc_camera_link *icl = to_soc_camera_link(icd);
-
- return soc_camera_apply_sensor_flags(icl, MT9T031_BUS_PARAM);
-}
-
-enum {
- MT9T031_CTRL_VFLIP,
- MT9T031_CTRL_HFLIP,
- MT9T031_CTRL_GAIN,
- MT9T031_CTRL_EXPOSURE,
- MT9T031_CTRL_EXPOSURE_AUTO,
-};
-
-static const struct v4l2_queryctrl mt9t031_controls[] = {
- [MT9T031_CTRL_VFLIP] = {
- .id = V4L2_CID_VFLIP,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Flip Vertically",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 0,
- },
- [MT9T031_CTRL_HFLIP] = {
- .id = V4L2_CID_HFLIP,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Flip Horizontally",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 0,
- },
- [MT9T031_CTRL_GAIN] = {
- .id = V4L2_CID_GAIN,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Gain",
- .minimum = 0,
- .maximum = 127,
- .step = 1,
- .default_value = 64,
- .flags = V4L2_CTRL_FLAG_SLIDER,
- },
- [MT9T031_CTRL_EXPOSURE] = {
- .id = V4L2_CID_EXPOSURE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Exposure",
- .minimum = 1,
- .maximum = 255,
- .step = 1,
- .default_value = 255,
- .flags = V4L2_CTRL_FLAG_SLIDER,
- },
- [MT9T031_CTRL_EXPOSURE_AUTO] = {
- .id = V4L2_CID_EXPOSURE_AUTO,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Automatic Exposure",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 1,
- }
-};
-
-static struct soc_camera_ops mt9t031_ops = {
- .set_bus_param = mt9t031_set_bus_param,
- .query_bus_param = mt9t031_query_bus_param,
- .controls = mt9t031_controls,
- .num_controls = ARRAY_SIZE(mt9t031_controls),
-};
-
/* target must be _even_ */
static u16 mt9t031_skip(s32 *source, s32 target, s32 max)
{
@@ -353,7 +273,7 @@ static int mt9t031_set_params(struct i2c_client *client,
/*
* The caller provides a supported format, as guaranteed by
- * icd->try_fmt_cap(), soc_camera_s_crop() and soc_camera_cropcap()
+ * .try_mbus_fmt(), soc_camera_s_crop() and soc_camera_cropcap()
*/
if (ret >= 0)
ret = reg_write(client, MT9T031_COLUMN_START, rect->left);
@@ -364,17 +284,10 @@ static int mt9t031_set_params(struct i2c_client *client,
if (ret >= 0)
ret = reg_write(client, MT9T031_WINDOW_HEIGHT,
rect->height + mt9t031->y_skip_top - 1);
- if (ret >= 0 && mt9t031->autoexposure) {
- unsigned int total_h = rect->height + mt9t031->y_skip_top + vblank;
- ret = set_shutter(client, total_h);
- if (ret >= 0) {
- const u32 shutter_max = MT9T031_MAX_HEIGHT + vblank;
- const struct v4l2_queryctrl *qctrl =
- &mt9t031_controls[MT9T031_CTRL_EXPOSURE];
- mt9t031->exposure = (shutter_max / 2 + (total_h - 1) *
- (qctrl->maximum - qctrl->minimum)) /
- shutter_max + qctrl->minimum;
- }
+ if (ret >= 0 && v4l2_ctrl_g_ctrl(mt9t031->autoexposure) == V4L2_EXPOSURE_AUTO) {
+ mt9t031->total_h = rect->height + mt9t031->y_skip_top + vblank;
+
+ ret = set_shutter(client, mt9t031->total_h);
}
/* Re-enable register update, commit all changes */
@@ -543,71 +456,57 @@ static int mt9t031_s_register(struct v4l2_subdev *sd,
}
#endif
-static int mt9t031_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
+static int mt9t031_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct mt9t031 *mt9t031 = to_mt9t031(client);
- int data;
+ struct mt9t031 *mt9t031 = container_of(ctrl->handler,
+ struct mt9t031, hdl);
+ const u32 shutter_max = MT9T031_MAX_HEIGHT + MT9T031_VERTICAL_BLANK;
+ s32 min, max;
switch (ctrl->id) {
- case V4L2_CID_VFLIP:
- data = reg_read(client, MT9T031_READ_MODE_2);
- if (data < 0)
- return -EIO;
- ctrl->value = !!(data & 0x8000);
- break;
- case V4L2_CID_HFLIP:
- data = reg_read(client, MT9T031_READ_MODE_2);
- if (data < 0)
- return -EIO;
- ctrl->value = !!(data & 0x4000);
- break;
case V4L2_CID_EXPOSURE_AUTO:
- ctrl->value = mt9t031->autoexposure;
- break;
- case V4L2_CID_GAIN:
- ctrl->value = mt9t031->gain;
- break;
- case V4L2_CID_EXPOSURE:
- ctrl->value = mt9t031->exposure;
+ min = mt9t031->exposure->minimum;
+ max = mt9t031->exposure->maximum;
+ mt9t031->exposure->val =
+ (shutter_max / 2 + (mt9t031->total_h - 1) * (max - min))
+ / shutter_max + min;
break;
}
return 0;
}
-static int mt9t031_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
+static int mt9t031_s_ctrl(struct v4l2_ctrl *ctrl)
{
+ struct mt9t031 *mt9t031 = container_of(ctrl->handler,
+ struct mt9t031, hdl);
+ struct v4l2_subdev *sd = &mt9t031->subdev;
struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct mt9t031 *mt9t031 = to_mt9t031(client);
- const struct v4l2_queryctrl *qctrl;
+ struct v4l2_ctrl *exp = mt9t031->exposure;
int data;
switch (ctrl->id) {
case V4L2_CID_VFLIP:
- if (ctrl->value)
+ if (ctrl->val)
data = reg_set(client, MT9T031_READ_MODE_2, 0x8000);
else
data = reg_clear(client, MT9T031_READ_MODE_2, 0x8000);
if (data < 0)
return -EIO;
- break;
+ return 0;
case V4L2_CID_HFLIP:
- if (ctrl->value)
+ if (ctrl->val)
data = reg_set(client, MT9T031_READ_MODE_2, 0x4000);
else
data = reg_clear(client, MT9T031_READ_MODE_2, 0x4000);
if (data < 0)
return -EIO;
- break;
+ return 0;
case V4L2_CID_GAIN:
- qctrl = &mt9t031_controls[MT9T031_CTRL_GAIN];
- if (ctrl->value > qctrl->maximum || ctrl->value < qctrl->minimum)
- return -EINVAL;
/* See Datasheet Table 7, Gain settings. */
- if (ctrl->value <= qctrl->default_value) {
+ if (ctrl->val <= ctrl->default_value) {
/* Pack it into 0..1 step 0.125, register values 0..8 */
- unsigned long range = qctrl->default_value - qctrl->minimum;
- data = ((ctrl->value - qctrl->minimum) * 8 + range / 2) / range;
+ unsigned long range = ctrl->default_value - ctrl->minimum;
+ data = ((ctrl->val - ctrl->minimum) * 8 + range / 2) / range;
dev_dbg(&client->dev, "Setting gain %d\n", data);
data = reg_write(client, MT9T031_GLOBAL_GAIN, data);
@@ -616,9 +515,9 @@ static int mt9t031_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
} else {
/* Pack it into 1.125..128 variable step, register values 9..0x7860 */
/* We assume qctrl->maximum - qctrl->default_value - 1 > 0 */
- unsigned long range = qctrl->maximum - qctrl->default_value - 1;
+ unsigned long range = ctrl->maximum - ctrl->default_value - 1;
/* calculated gain: map 65..127 to 9..1024 step 0.125 */
- unsigned long gain = ((ctrl->value - qctrl->default_value - 1) *
+ unsigned long gain = ((ctrl->val - ctrl->default_value - 1) *
1015 + range / 2) / range + 9;
if (gain <= 32) /* calculated gain 9..32 -> 9..32 */
@@ -635,19 +534,13 @@ static int mt9t031_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
if (data < 0)
return -EIO;
}
+ return 0;
- /* Success */
- mt9t031->gain = ctrl->value;
- break;
- case V4L2_CID_EXPOSURE:
- qctrl = &mt9t031_controls[MT9T031_CTRL_EXPOSURE];
- /* mt9t031 has maximum == default */
- if (ctrl->value > qctrl->maximum || ctrl->value < qctrl->minimum)
- return -EINVAL;
- else {
- const unsigned long range = qctrl->maximum - qctrl->minimum;
- const u32 shutter = ((ctrl->value - qctrl->minimum) * 1048 +
- range / 2) / range + 1;
+ case V4L2_CID_EXPOSURE_AUTO:
+ if (ctrl->val == V4L2_EXPOSURE_MANUAL) {
+ unsigned int range = exp->maximum - exp->minimum;
+ unsigned int shutter = ((exp->val - exp->minimum) * 1048 +
+ range / 2) / range + 1;
u32 old;
get_shutter(client, &old);
@@ -655,27 +548,15 @@ static int mt9t031_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
old, shutter);
if (set_shutter(client, shutter) < 0)
return -EIO;
- mt9t031->exposure = ctrl->value;
- mt9t031->autoexposure = 0;
- }
- break;
- case V4L2_CID_EXPOSURE_AUTO:
- if (ctrl->value) {
+ } else {
const u16 vblank = MT9T031_VERTICAL_BLANK;
- const u32 shutter_max = MT9T031_MAX_HEIGHT + vblank;
- unsigned int total_h = mt9t031->rect.height +
+ mt9t031->total_h = mt9t031->rect.height +
mt9t031->y_skip_top + vblank;
- if (set_shutter(client, total_h) < 0)
+ if (set_shutter(client, mt9t031->total_h) < 0)
return -EIO;
- qctrl = &mt9t031_controls[MT9T031_CTRL_EXPOSURE];
- mt9t031->exposure = (shutter_max / 2 + (total_h - 1) *
- (qctrl->maximum - qctrl->minimum)) /
- shutter_max + qctrl->minimum;
- mt9t031->autoexposure = 1;
- } else
- mt9t031->autoexposure = 0;
- break;
+ }
+ return 0;
default:
return -EINVAL;
}
@@ -700,8 +581,7 @@ static int mt9t031_runtime_suspend(struct device *dev)
static int mt9t031_runtime_resume(struct device *dev)
{
struct video_device *vdev = to_video_device(dev);
- struct soc_camera_device *icd = dev_get_drvdata(vdev->parent);
- struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ struct v4l2_subdev *sd = soc_camera_vdev_to_subdev(vdev);
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9t031 *mt9t031 = to_mt9t031(client);
@@ -734,6 +614,19 @@ static struct device_type mt9t031_dev_type = {
.pm = &mt9t031_dev_pm_ops,
};
+static int mt9t031_s_power(struct v4l2_subdev *sd, int on)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct video_device *vdev = soc_camera_i2c_to_vdev(client);
+
+ if (on)
+ vdev->dev.type = &mt9t031_dev_type;
+ else
+ vdev->dev.type = NULL;
+
+ return 0;
+}
+
/*
* Interface active, can use i2c. If it fails, it can indeed mean, that
* this wasn't our capture interface, so, we wait for the right one
@@ -741,7 +634,6 @@ static struct device_type mt9t031_dev_type = {
static int mt9t031_video_probe(struct i2c_client *client)
{
struct mt9t031 *mt9t031 = to_mt9t031(client);
- struct video_device *vdev = soc_camera_i2c_to_vdev(client);
s32 data;
int ret;
@@ -768,11 +660,7 @@ static int mt9t031_video_probe(struct i2c_client *client)
if (ret < 0)
dev_err(&client->dev, "Failed to initialise the camera\n");
else
- vdev->dev.type = &mt9t031_dev_type;
-
- /* mt9t031_idle() has reset the chip to default. */
- mt9t031->exposure = 255;
- mt9t031->gain = 64;
+ v4l2_ctrl_handler_setup(&mt9t031->hdl);
return ret;
}
@@ -787,10 +675,14 @@ static int mt9t031_g_skip_top_lines(struct v4l2_subdev *sd, u32 *lines)
return 0;
}
+static const struct v4l2_ctrl_ops mt9t031_ctrl_ops = {
+ .g_volatile_ctrl = mt9t031_g_volatile_ctrl,
+ .s_ctrl = mt9t031_s_ctrl,
+};
+
static struct v4l2_subdev_core_ops mt9t031_subdev_core_ops = {
- .g_ctrl = mt9t031_g_ctrl,
- .s_ctrl = mt9t031_s_ctrl,
.g_chip_ident = mt9t031_g_chip_ident,
+ .s_power = mt9t031_s_power,
#ifdef CONFIG_VIDEO_ADV_DEBUG
.g_register = mt9t031_g_register,
.s_register = mt9t031_s_register,
@@ -807,6 +699,34 @@ static int mt9t031_enum_fmt(struct v4l2_subdev *sd, unsigned int index,
return 0;
}
+static int mt9t031_g_mbus_config(struct v4l2_subdev *sd,
+ struct v4l2_mbus_config *cfg)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+
+ cfg->flags = V4L2_MBUS_MASTER | V4L2_MBUS_PCLK_SAMPLE_RISING |
+ V4L2_MBUS_PCLK_SAMPLE_FALLING | V4L2_MBUS_HSYNC_ACTIVE_HIGH |
+ V4L2_MBUS_VSYNC_ACTIVE_HIGH | V4L2_MBUS_DATA_ACTIVE_HIGH;
+ cfg->type = V4L2_MBUS_PARALLEL;
+ cfg->flags = soc_camera_apply_board_flags(icl, cfg);
+
+ return 0;
+}
+
+static int mt9t031_s_mbus_config(struct v4l2_subdev *sd,
+ const struct v4l2_mbus_config *cfg)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+
+ if (soc_camera_apply_board_flags(icl, cfg) &
+ V4L2_MBUS_PCLK_SAMPLE_FALLING)
+ return reg_clear(client, MT9T031_PIXEL_CLOCK_CONTROL, 0x8000);
+ else
+ return reg_set(client, MT9T031_PIXEL_CLOCK_CONTROL, 0x8000);
+}
+
static struct v4l2_subdev_video_ops mt9t031_subdev_video_ops = {
.s_stream = mt9t031_s_stream,
.s_mbus_fmt = mt9t031_s_fmt,
@@ -816,6 +736,8 @@ static struct v4l2_subdev_video_ops mt9t031_subdev_video_ops = {
.g_crop = mt9t031_g_crop,
.cropcap = mt9t031_cropcap,
.enum_mbus_fmt = mt9t031_enum_fmt,
+ .g_mbus_config = mt9t031_g_mbus_config,
+ .s_mbus_config = mt9t031_s_mbus_config,
};
static struct v4l2_subdev_sensor_ops mt9t031_subdev_sensor_ops = {
@@ -832,18 +754,13 @@ static int mt9t031_probe(struct i2c_client *client,
const struct i2c_device_id *did)
{
struct mt9t031 *mt9t031;
- struct soc_camera_device *icd = client->dev.platform_data;
+ struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
int ret;
- if (icd) {
- struct soc_camera_link *icl = to_soc_camera_link(icd);
- if (!icl) {
- dev_err(&client->dev, "MT9T031 driver needs platform data\n");
- return -EINVAL;
- }
-
- icd->ops = &mt9t031_ops;
+ if (!icl) {
+ dev_err(&client->dev, "MT9T031 driver needs platform data\n");
+ return -EINVAL;
}
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA)) {
@@ -857,6 +774,33 @@ static int mt9t031_probe(struct i2c_client *client,
return -ENOMEM;
v4l2_i2c_subdev_init(&mt9t031->subdev, client, &mt9t031_subdev_ops);
+ v4l2_ctrl_handler_init(&mt9t031->hdl, 5);
+ v4l2_ctrl_new_std(&mt9t031->hdl, &mt9t031_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+ v4l2_ctrl_new_std(&mt9t031->hdl, &mt9t031_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+ v4l2_ctrl_new_std(&mt9t031->hdl, &mt9t031_ctrl_ops,
+ V4L2_CID_GAIN, 0, 127, 1, 64);
+
+ /*
+ * Simulated autoexposure. If enabled, we calculate shutter width
+ * ourselves in the driver based on vertical blanking and frame width
+ */
+ mt9t031->autoexposure = v4l2_ctrl_new_std_menu(&mt9t031->hdl,
+ &mt9t031_ctrl_ops, V4L2_CID_EXPOSURE_AUTO, 1, 0,
+ V4L2_EXPOSURE_AUTO);
+ mt9t031->exposure = v4l2_ctrl_new_std(&mt9t031->hdl, &mt9t031_ctrl_ops,
+ V4L2_CID_EXPOSURE, 1, 255, 1, 255);
+
+ mt9t031->subdev.ctrl_handler = &mt9t031->hdl;
+ if (mt9t031->hdl.error) {
+ int err = mt9t031->hdl.error;
+
+ kfree(mt9t031);
+ return err;
+ }
+ v4l2_ctrl_auto_cluster(2, &mt9t031->autoexposure,
+ V4L2_EXPOSURE_MANUAL, true);
mt9t031->y_skip_top = 0;
mt9t031->rect.left = MT9T031_COLUMN_SKIP;
@@ -864,12 +808,6 @@ static int mt9t031_probe(struct i2c_client *client,
mt9t031->rect.width = MT9T031_MAX_WIDTH;
mt9t031->rect.height = MT9T031_MAX_HEIGHT;
- /*
- * Simulated autoexposure. If enabled, we calculate shutter width
- * ourselves in the driver based on vertical blanking and frame width
- */
- mt9t031->autoexposure = 1;
-
mt9t031->xskip = 1;
mt9t031->yskip = 1;
@@ -880,8 +818,7 @@ static int mt9t031_probe(struct i2c_client *client,
mt9t031_disable(client);
if (ret) {
- if (icd)
- icd->ops = NULL;
+ v4l2_ctrl_handler_free(&mt9t031->hdl);
kfree(mt9t031);
}
@@ -891,10 +828,9 @@ static int mt9t031_probe(struct i2c_client *client,
static int mt9t031_remove(struct i2c_client *client)
{
struct mt9t031 *mt9t031 = to_mt9t031(client);
- struct soc_camera_device *icd = client->dev.platform_data;
- if (icd)
- icd->ops = NULL;
+ v4l2_device_unregister_subdev(&mt9t031->subdev);
+ v4l2_ctrl_handler_free(&mt9t031->hdl);
kfree(mt9t031);
return 0;
diff --git a/drivers/media/video/mt9t112.c b/drivers/media/video/mt9t112.c
index d2e0a50063a..32114a3c0ca 100644
--- a/drivers/media/video/mt9t112.c
+++ b/drivers/media/video/mt9t112.c
@@ -22,11 +22,11 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/v4l2-mediabus.h>
#include <linux/videodev2.h>
#include <media/mt9t112.h>
#include <media/soc_camera.h>
-#include <media/soc_mediabus.h>
#include <media/v4l2-chip-ident.h>
#include <media/v4l2-common.h>
@@ -34,11 +34,7 @@
/* #define EXT_CLOCK 24000000 */
/************************************************************************
-
-
macro
-
-
************************************************************************/
/*
* frame size
@@ -80,17 +76,8 @@
#define VAR8(id, offset) _VAR(id, offset, 0x8000)
/************************************************************************
-
-
struct
-
-
************************************************************************/
-struct mt9t112_frame_size {
- u16 width;
- u16 height;
-};
-
struct mt9t112_format {
enum v4l2_mbus_pixelcode code;
enum v4l2_colorspace colorspace;
@@ -102,21 +89,17 @@ struct mt9t112_priv {
struct v4l2_subdev subdev;
struct mt9t112_camera_info *info;
struct i2c_client *client;
- struct soc_camera_device icd;
- struct mt9t112_frame_size frame;
+ struct v4l2_rect frame;
const struct mt9t112_format *format;
int model;
u32 flags;
/* for flags */
-#define INIT_DONE (1<<0)
+#define INIT_DONE (1 << 0)
+#define PCLK_RISING (1 << 1)
};
/************************************************************************
-
-
supported format
-
-
************************************************************************/
static const struct mt9t112_format mt9t112_cfmts[] = {
@@ -154,11 +137,7 @@ static const struct mt9t112_format mt9t112_cfmts[] = {
};
/************************************************************************
-
-
general function
-
-
************************************************************************/
static struct mt9t112_priv *to_mt9t112(const struct i2c_client *client)
{
@@ -326,50 +305,47 @@ static int mt9t112_clock_info(const struct i2c_client *client, u32 ext)
n = (n >> 8) & 0x003f;
enable = ((6000 > ext) || (54000 < ext)) ? "X" : "";
- dev_info(&client->dev, "EXTCLK : %10u K %s\n", ext, enable);
+ dev_dbg(&client->dev, "EXTCLK : %10u K %s\n", ext, enable);
vco = 2 * m * ext / (n+1);
enable = ((384000 > vco) || (768000 < vco)) ? "X" : "";
- dev_info(&client->dev, "VCO : %10u K %s\n", vco, enable);
+ dev_dbg(&client->dev, "VCO : %10u K %s\n", vco, enable);
clk = vco / (p1+1) / (p2+1);
enable = (96000 < clk) ? "X" : "";
- dev_info(&client->dev, "PIXCLK : %10u K %s\n", clk, enable);
+ dev_dbg(&client->dev, "PIXCLK : %10u K %s\n", clk, enable);
clk = vco / (p3+1);
enable = (768000 < clk) ? "X" : "";
- dev_info(&client->dev, "MIPICLK : %10u K %s\n", clk, enable);
+ dev_dbg(&client->dev, "MIPICLK : %10u K %s\n", clk, enable);
clk = vco / (p6+1);
enable = (96000 < clk) ? "X" : "";
- dev_info(&client->dev, "MCU CLK : %10u K %s\n", clk, enable);
+ dev_dbg(&client->dev, "MCU CLK : %10u K %s\n", clk, enable);
clk = vco / (p5+1);
enable = (54000 < clk) ? "X" : "";
- dev_info(&client->dev, "SOC CLK : %10u K %s\n", clk, enable);
+ dev_dbg(&client->dev, "SOC CLK : %10u K %s\n", clk, enable);
clk = vco / (p4+1);
enable = (70000 < clk) ? "X" : "";
- dev_info(&client->dev, "Sensor CLK : %10u K %s\n", clk, enable);
+ dev_dbg(&client->dev, "Sensor CLK : %10u K %s\n", clk, enable);
clk = vco / (p7+1);
- dev_info(&client->dev, "External sensor : %10u K\n", clk);
+ dev_dbg(&client->dev, "External sensor : %10u K\n", clk);
clk = ext / (n+1);
enable = ((2000 > clk) || (24000 < clk)) ? "X" : "";
- dev_info(&client->dev, "PFD : %10u K %s\n", clk, enable);
+ dev_dbg(&client->dev, "PFD : %10u K %s\n", clk, enable);
return 0;
}
#endif
-static void mt9t112_frame_check(u32 *width, u32 *height)
+static void mt9t112_frame_check(u32 *width, u32 *height, u32 *left, u32 *top)
{
- if (*width > MAX_WIDTH)
- *width = MAX_WIDTH;
-
- if (*height > MAX_HEIGHT)
- *height = MAX_HEIGHT;
+ soc_camera_limit_side(left, width, 0, 0, MAX_WIDTH);
+ soc_camera_limit_side(top, height, 0, 0, MAX_HEIGHT);
}
static int mt9t112_set_a_frame_size(const struct i2c_client *client,
@@ -758,48 +734,7 @@ static int mt9t112_init_camera(const struct i2c_client *client)
}
/************************************************************************
-
-
- soc_camera_ops
-
-
-************************************************************************/
-static int mt9t112_set_bus_param(struct soc_camera_device *icd,
- unsigned long flags)
-{
- return 0;
-}
-
-static unsigned long mt9t112_query_bus_param(struct soc_camera_device *icd)
-{
- struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd));
- struct mt9t112_priv *priv = to_mt9t112(client);
- struct soc_camera_link *icl = to_soc_camera_link(icd);
- unsigned long flags = SOCAM_MASTER | SOCAM_VSYNC_ACTIVE_HIGH |
- SOCAM_HSYNC_ACTIVE_HIGH | SOCAM_DATA_ACTIVE_HIGH;
-
- flags |= (priv->info->flags & MT9T112_FLAG_PCLK_RISING_EDGE) ?
- SOCAM_PCLK_SAMPLE_RISING : SOCAM_PCLK_SAMPLE_FALLING;
-
- if (priv->info->flags & MT9T112_FLAG_DATAWIDTH_8)
- flags |= SOCAM_DATAWIDTH_8;
- else
- flags |= SOCAM_DATAWIDTH_10;
-
- return soc_camera_apply_sensor_flags(icl, flags);
-}
-
-static struct soc_camera_ops mt9t112_ops = {
- .set_bus_param = mt9t112_set_bus_param,
- .query_bus_param = mt9t112_query_bus_param,
-};
-
-/************************************************************************
-
-
v4l2_subdev_core_ops
-
-
************************************************************************/
static int mt9t112_g_chip_ident(struct v4l2_subdev *sd,
struct v4l2_dbg_chip_ident *id)
@@ -850,11 +785,7 @@ static struct v4l2_subdev_core_ops mt9t112_subdev_core_ops = {
/************************************************************************
-
-
v4l2_subdev_video_ops
-
-
************************************************************************/
static int mt9t112_s_stream(struct v4l2_subdev *sd, int enable)
{
@@ -877,8 +808,7 @@ static int mt9t112_s_stream(struct v4l2_subdev *sd, int enable)
}
if (!(priv->flags & INIT_DONE)) {
- u16 param = (MT9T112_FLAG_PCLK_RISING_EDGE &
- priv->info->flags) ? 0x0001 : 0x0000;
+ u16 param = PCLK_RISING & priv->flags ? 0x0001 : 0x0000;
ECHECKER(ret, mt9t112_init_camera(client));
@@ -910,19 +840,12 @@ static int mt9t112_s_stream(struct v4l2_subdev *sd, int enable)
return ret;
}
-static int mt9t112_set_params(struct i2c_client *client, u32 width, u32 height,
+static int mt9t112_set_params(struct mt9t112_priv *priv,
+ const struct v4l2_rect *rect,
enum v4l2_mbus_pixelcode code)
{
- struct mt9t112_priv *priv = to_mt9t112(client);
int i;
- priv->format = NULL;
-
- /*
- * frame size check
- */
- mt9t112_frame_check(&width, &height);
-
/*
* get color format
*/
@@ -933,8 +856,13 @@ static int mt9t112_set_params(struct i2c_client *client, u32 width, u32 height,
if (i == ARRAY_SIZE(mt9t112_cfmts))
return -EINVAL;
- priv->frame.width = (u16)width;
- priv->frame.height = (u16)height;
+ priv->frame = *rect;
+
+ /*
+ * frame size check
+ */
+ mt9t112_frame_check(&priv->frame.width, &priv->frame.height,
+ &priv->frame.left, &priv->frame.top);
priv->format = mt9t112_cfmts + i;
@@ -945,9 +873,12 @@ static int mt9t112_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
{
a->bounds.left = 0;
a->bounds.top = 0;
- a->bounds.width = VGA_WIDTH;
- a->bounds.height = VGA_HEIGHT;
- a->defrect = a->bounds;
+ a->bounds.width = MAX_WIDTH;
+ a->bounds.height = MAX_HEIGHT;
+ a->defrect.left = 0;
+ a->defrect.top = 0;
+ a->defrect.width = VGA_WIDTH;
+ a->defrect.height = VGA_HEIGHT;
a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
a->pixelaspect.numerator = 1;
a->pixelaspect.denominator = 1;
@@ -957,11 +888,11 @@ static int mt9t112_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
static int mt9t112_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
{
- a->c.left = 0;
- a->c.top = 0;
- a->c.width = VGA_WIDTH;
- a->c.height = VGA_HEIGHT;
- a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct mt9t112_priv *priv = to_mt9t112(client);
+
+ a->c = priv->frame;
+ a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
return 0;
}
@@ -969,10 +900,10 @@ static int mt9t112_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
static int mt9t112_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct mt9t112_priv *priv = to_mt9t112(client);
struct v4l2_rect *rect = &a->c;
- return mt9t112_set_params(client, rect->width, rect->height,
- V4L2_MBUS_FMT_UYVY8_2X8);
+ return mt9t112_set_params(priv, rect, priv->format->code);
}
static int mt9t112_g_fmt(struct v4l2_subdev *sd,
@@ -981,16 +912,9 @@ static int mt9t112_g_fmt(struct v4l2_subdev *sd,
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9t112_priv *priv = to_mt9t112(client);
- if (!priv->format) {
- int ret = mt9t112_set_params(client, VGA_WIDTH, VGA_HEIGHT,
- V4L2_MBUS_FMT_UYVY8_2X8);
- if (ret < 0)
- return ret;
- }
-
mf->width = priv->frame.width;
mf->height = priv->frame.height;
- /* TODO: set colorspace */
+ mf->colorspace = priv->format->colorspace;
mf->code = priv->format->code;
mf->field = V4L2_FIELD_NONE;
@@ -1001,17 +925,42 @@ static int mt9t112_s_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct mt9t112_priv *priv = to_mt9t112(client);
+ struct v4l2_rect rect = {
+ .width = mf->width,
+ .height = mf->height,
+ .left = priv->frame.left,
+ .top = priv->frame.top,
+ };
+ int ret;
+
+ ret = mt9t112_set_params(priv, &rect, mf->code);
+
+ if (!ret)
+ mf->colorspace = priv->format->colorspace;
- /* TODO: set colorspace */
- return mt9t112_set_params(client, mf->width, mf->height, mf->code);
+ return ret;
}
static int mt9t112_try_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf)
{
- mt9t112_frame_check(&mf->width, &mf->height);
+ unsigned int top, left;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mt9t112_cfmts); i++)
+ if (mt9t112_cfmts[i].code == mf->code)
+ break;
+
+ if (i == ARRAY_SIZE(mt9t112_cfmts)) {
+ mf->code = V4L2_MBUS_FMT_UYVY8_2X8;
+ mf->colorspace = V4L2_COLORSPACE_JPEG;
+ } else {
+ mf->colorspace = mt9t112_cfmts[i].colorspace;
+ }
+
+ mt9t112_frame_check(&mf->width, &mf->height, &left, &top);
- /* TODO: set colorspace */
mf->field = V4L2_FIELD_NONE;
return 0;
@@ -1024,6 +973,35 @@ static int mt9t112_enum_fmt(struct v4l2_subdev *sd, unsigned int index,
return -EINVAL;
*code = mt9t112_cfmts[index].code;
+
+ return 0;
+}
+
+static int mt9t112_g_mbus_config(struct v4l2_subdev *sd,
+ struct v4l2_mbus_config *cfg)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+
+ cfg->flags = V4L2_MBUS_MASTER | V4L2_MBUS_VSYNC_ACTIVE_HIGH |
+ V4L2_MBUS_HSYNC_ACTIVE_HIGH | V4L2_MBUS_DATA_ACTIVE_HIGH |
+ V4L2_MBUS_PCLK_SAMPLE_RISING | V4L2_MBUS_PCLK_SAMPLE_FALLING;
+ cfg->type = V4L2_MBUS_PARALLEL;
+ cfg->flags = soc_camera_apply_board_flags(icl, cfg);
+
+ return 0;
+}
+
+static int mt9t112_s_mbus_config(struct v4l2_subdev *sd,
+ const struct v4l2_mbus_config *cfg)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct mt9t112_priv *priv = to_mt9t112(client);
+
+ if (soc_camera_apply_board_flags(icl, cfg) & V4L2_MBUS_PCLK_SAMPLE_RISING)
+ priv->flags |= PCLK_RISING;
+
return 0;
}
@@ -1036,31 +1014,24 @@ static struct v4l2_subdev_video_ops mt9t112_subdev_video_ops = {
.g_crop = mt9t112_g_crop,
.s_crop = mt9t112_s_crop,
.enum_mbus_fmt = mt9t112_enum_fmt,
+ .g_mbus_config = mt9t112_g_mbus_config,
+ .s_mbus_config = mt9t112_s_mbus_config,
};
/************************************************************************
-
-
i2c driver
-
-
************************************************************************/
static struct v4l2_subdev_ops mt9t112_subdev_ops = {
.core = &mt9t112_subdev_core_ops,
.video = &mt9t112_subdev_video_ops,
};
-static int mt9t112_camera_probe(struct soc_camera_device *icd,
- struct i2c_client *client)
+static int mt9t112_camera_probe(struct i2c_client *client)
{
struct mt9t112_priv *priv = to_mt9t112(client);
const char *devname;
int chipid;
- /* We must have a parent by now. And it cannot be a wrong one. */
- BUG_ON(!icd->parent ||
- to_soc_camera_host(icd->parent)->nr != icd->iface);
-
/*
* check and show chip ID
*/
@@ -1088,20 +1059,21 @@ static int mt9t112_camera_probe(struct soc_camera_device *icd,
static int mt9t112_probe(struct i2c_client *client,
const struct i2c_device_id *did)
{
- struct mt9t112_priv *priv;
- struct soc_camera_device *icd = client->dev.platform_data;
- struct soc_camera_link *icl;
- int ret;
+ struct mt9t112_priv *priv;
+ struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct v4l2_rect rect = {
+ .width = VGA_WIDTH,
+ .height = VGA_HEIGHT,
+ .left = (MAX_WIDTH - VGA_WIDTH) / 2,
+ .top = (MAX_HEIGHT - VGA_HEIGHT) / 2,
+ };
+ int ret;
- if (!icd) {
- dev_err(&client->dev, "mt9t112: missing soc-camera data!\n");
+ if (!icl || !icl->priv) {
+ dev_err(&client->dev, "mt9t112: missing platform data!\n");
return -EINVAL;
}
- icl = to_soc_camera_link(icd);
- if (!icl || !icl->priv)
- return -EINVAL;
-
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
@@ -1110,13 +1082,12 @@ static int mt9t112_probe(struct i2c_client *client,
v4l2_i2c_subdev_init(&priv->subdev, client, &mt9t112_subdev_ops);
- icd->ops = &mt9t112_ops;
-
- ret = mt9t112_camera_probe(icd, client);
- if (ret) {
- icd->ops = NULL;
+ ret = mt9t112_camera_probe(client);
+ if (ret)
kfree(priv);
- }
+
+ /* Cannot fail: using the default supported pixel code */
+ mt9t112_set_params(priv, &rect, V4L2_MBUS_FMT_UYVY8_2X8);
return ret;
}
@@ -1124,9 +1095,7 @@ static int mt9t112_probe(struct i2c_client *client,
static int mt9t112_remove(struct i2c_client *client)
{
struct mt9t112_priv *priv = to_mt9t112(client);
- struct soc_camera_device *icd = client->dev.platform_data;
- icd->ops = NULL;
kfree(priv);
return 0;
}
@@ -1147,11 +1116,7 @@ static struct i2c_driver mt9t112_i2c_driver = {
};
/************************************************************************
-
-
module function
-
-
************************************************************************/
static int __init mt9t112_module_init(void)
{
diff --git a/drivers/media/video/mt9v011.c b/drivers/media/video/mt9v011.c
index 893a8b8f514..db74dd27c72 100644
--- a/drivers/media/video/mt9v011.c
+++ b/drivers/media/video/mt9v011.c
@@ -9,6 +9,7 @@
#include <linux/slab.h>
#include <linux/videodev2.h>
#include <linux/delay.h>
+#include <linux/module.h>
#include <asm/div64.h>
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
diff --git a/drivers/media/video/mt9v022.c b/drivers/media/video/mt9v022.c
index 51b0fccbfe7..690ee0d42ee 100644
--- a/drivers/media/video/mt9v022.c
+++ b/drivers/media/video/mt9v022.c
@@ -13,10 +13,13 @@
#include <linux/i2c.h>
#include <linux/delay.h>
#include <linux/log2.h>
+#include <linux/module.h>
+#include <media/soc_camera.h>
+#include <media/soc_mediabus.h>
#include <media/v4l2-subdev.h>
#include <media/v4l2-chip-ident.h>
-#include <media/soc_camera.h>
+#include <media/v4l2-ctrls.h>
/*
* mt9v022 i2c address 0x48, 0x4c, 0x58, 0x5c
@@ -100,6 +103,17 @@ static const struct mt9v022_datafmt mt9v022_monochrome_fmts[] = {
struct mt9v022 {
struct v4l2_subdev subdev;
+ struct v4l2_ctrl_handler hdl;
+ struct {
+ /* exposure/auto-exposure cluster */
+ struct v4l2_ctrl *autoexposure;
+ struct v4l2_ctrl *exposure;
+ };
+ struct {
+ /* gain/auto-gain cluster */
+ struct v4l2_ctrl *autogain;
+ struct v4l2_ctrl *gain;
+ };
struct v4l2_rect rect; /* Sensor window */
const struct mt9v022_datafmt *fmt;
const struct mt9v022_datafmt *fmts;
@@ -178,6 +192,8 @@ static int mt9v022_init(struct i2c_client *client)
ret = reg_clear(client, MT9V022_BLACK_LEVEL_CALIB_CTRL, 1);
if (!ret)
ret = reg_write(client, MT9V022_DIGITAL_TEST_PATTERN, 0);
+ if (!ret)
+ return v4l2_ctrl_handler_setup(&mt9v022->hdl);
return ret;
}
@@ -199,78 +215,6 @@ static int mt9v022_s_stream(struct v4l2_subdev *sd, int enable)
return 0;
}
-static int mt9v022_set_bus_param(struct soc_camera_device *icd,
- unsigned long flags)
-{
- struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd));
- struct mt9v022 *mt9v022 = to_mt9v022(client);
- struct soc_camera_link *icl = to_soc_camera_link(icd);
- unsigned int width_flag = flags & SOCAM_DATAWIDTH_MASK;
- int ret;
- u16 pixclk = 0;
-
- /* Only one width bit may be set */
- if (!is_power_of_2(width_flag))
- return -EINVAL;
-
- if (icl->set_bus_param) {
- ret = icl->set_bus_param(icl, width_flag);
- if (ret)
- return ret;
- } else {
- /*
- * Without board specific bus width settings we only support the
- * sensors native bus width
- */
- if (width_flag != SOCAM_DATAWIDTH_10)
- return -EINVAL;
- }
-
- flags = soc_camera_apply_sensor_flags(icl, flags);
-
- if (flags & SOCAM_PCLK_SAMPLE_FALLING)
- pixclk |= 0x10;
-
- if (!(flags & SOCAM_HSYNC_ACTIVE_HIGH))
- pixclk |= 0x1;
-
- if (!(flags & SOCAM_VSYNC_ACTIVE_HIGH))
- pixclk |= 0x2;
-
- ret = reg_write(client, MT9V022_PIXCLK_FV_LV, pixclk);
- if (ret < 0)
- return ret;
-
- if (!(flags & SOCAM_MASTER))
- mt9v022->chip_control &= ~0x8;
-
- ret = reg_write(client, MT9V022_CHIP_CONTROL, mt9v022->chip_control);
- if (ret < 0)
- return ret;
-
- dev_dbg(&client->dev, "Calculated pixclk 0x%x, chip control 0x%x\n",
- pixclk, mt9v022->chip_control);
-
- return 0;
-}
-
-static unsigned long mt9v022_query_bus_param(struct soc_camera_device *icd)
-{
- struct soc_camera_link *icl = to_soc_camera_link(icd);
- unsigned int flags = SOCAM_MASTER | SOCAM_SLAVE |
- SOCAM_PCLK_SAMPLE_RISING | SOCAM_PCLK_SAMPLE_FALLING |
- SOCAM_HSYNC_ACTIVE_HIGH | SOCAM_HSYNC_ACTIVE_LOW |
- SOCAM_VSYNC_ACTIVE_HIGH | SOCAM_VSYNC_ACTIVE_LOW |
- SOCAM_DATA_ACTIVE_HIGH;
-
- if (icl->query_bus_param)
- flags |= icl->query_bus_param(icl) & SOCAM_DATAWIDTH_MASK;
- else
- flags |= SOCAM_DATAWIDTH_10;
-
- return soc_camera_apply_sensor_flags(icl, flags);
-}
-
static int mt9v022_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
@@ -389,7 +333,7 @@ static int mt9v022_s_fmt(struct v4l2_subdev *sd,
/*
* The caller provides a supported format, as verified per call to
- * icd->try_fmt(), datawidth is from our supported format list
+ * .try_mbus_fmt(), datawidth is from our supported format list
*/
switch (mf->code) {
case V4L2_MBUS_FMT_Y8_1X8:
@@ -502,236 +446,131 @@ static int mt9v022_s_register(struct v4l2_subdev *sd,
}
#endif
-static const struct v4l2_queryctrl mt9v022_controls[] = {
- {
- .id = V4L2_CID_VFLIP,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Flip Vertically",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 0,
- }, {
- .id = V4L2_CID_HFLIP,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Flip Horizontally",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 0,
- }, {
- .id = V4L2_CID_GAIN,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Analog Gain",
- .minimum = 64,
- .maximum = 127,
- .step = 1,
- .default_value = 64,
- .flags = V4L2_CTRL_FLAG_SLIDER,
- }, {
- .id = V4L2_CID_EXPOSURE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Exposure",
- .minimum = 1,
- .maximum = 255,
- .step = 1,
- .default_value = 255,
- .flags = V4L2_CTRL_FLAG_SLIDER,
- }, {
- .id = V4L2_CID_AUTOGAIN,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Automatic Gain",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 1,
- }, {
- .id = V4L2_CID_EXPOSURE_AUTO,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Automatic Exposure",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 1,
- }
-};
-
-static struct soc_camera_ops mt9v022_ops = {
- .set_bus_param = mt9v022_set_bus_param,
- .query_bus_param = mt9v022_query_bus_param,
- .controls = mt9v022_controls,
- .num_controls = ARRAY_SIZE(mt9v022_controls),
-};
-
-static int mt9v022_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
+static int mt9v022_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
{
+ struct mt9v022 *mt9v022 = container_of(ctrl->handler,
+ struct mt9v022, hdl);
+ struct v4l2_subdev *sd = &mt9v022->subdev;
struct i2c_client *client = v4l2_get_subdevdata(sd);
- const struct v4l2_queryctrl *qctrl;
+ struct v4l2_ctrl *gain = mt9v022->gain;
+ struct v4l2_ctrl *exp = mt9v022->exposure;
unsigned long range;
int data;
- qctrl = soc_camera_find_qctrl(&mt9v022_ops, ctrl->id);
-
switch (ctrl->id) {
- case V4L2_CID_VFLIP:
- data = reg_read(client, MT9V022_READ_MODE);
- if (data < 0)
- return -EIO;
- ctrl->value = !!(data & 0x10);
- break;
- case V4L2_CID_HFLIP:
- data = reg_read(client, MT9V022_READ_MODE);
- if (data < 0)
- return -EIO;
- ctrl->value = !!(data & 0x20);
- break;
- case V4L2_CID_EXPOSURE_AUTO:
- data = reg_read(client, MT9V022_AEC_AGC_ENABLE);
- if (data < 0)
- return -EIO;
- ctrl->value = !!(data & 0x1);
- break;
case V4L2_CID_AUTOGAIN:
- data = reg_read(client, MT9V022_AEC_AGC_ENABLE);
- if (data < 0)
- return -EIO;
- ctrl->value = !!(data & 0x2);
- break;
- case V4L2_CID_GAIN:
data = reg_read(client, MT9V022_ANALOG_GAIN);
if (data < 0)
return -EIO;
- range = qctrl->maximum - qctrl->minimum;
- ctrl->value = ((data - 16) * range + 24) / 48 + qctrl->minimum;
-
- break;
- case V4L2_CID_EXPOSURE:
+ range = gain->maximum - gain->minimum;
+ gain->val = ((data - 16) * range + 24) / 48 + gain->minimum;
+ return 0;
+ case V4L2_CID_EXPOSURE_AUTO:
data = reg_read(client, MT9V022_TOTAL_SHUTTER_WIDTH);
if (data < 0)
return -EIO;
- range = qctrl->maximum - qctrl->minimum;
- ctrl->value = ((data - 1) * range + 239) / 479 + qctrl->minimum;
-
- break;
+ range = exp->maximum - exp->minimum;
+ exp->val = ((data - 1) * range + 239) / 479 + exp->minimum;
+ return 0;
}
- return 0;
+ return -EINVAL;
}
-static int mt9v022_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
+static int mt9v022_s_ctrl(struct v4l2_ctrl *ctrl)
{
- int data;
+ struct mt9v022 *mt9v022 = container_of(ctrl->handler,
+ struct mt9v022, hdl);
+ struct v4l2_subdev *sd = &mt9v022->subdev;
struct i2c_client *client = v4l2_get_subdevdata(sd);
- const struct v4l2_queryctrl *qctrl;
-
- qctrl = soc_camera_find_qctrl(&mt9v022_ops, ctrl->id);
- if (!qctrl)
- return -EINVAL;
+ int data;
switch (ctrl->id) {
case V4L2_CID_VFLIP:
- if (ctrl->value)
+ if (ctrl->val)
data = reg_set(client, MT9V022_READ_MODE, 0x10);
else
data = reg_clear(client, MT9V022_READ_MODE, 0x10);
if (data < 0)
return -EIO;
- break;
+ return 0;
case V4L2_CID_HFLIP:
- if (ctrl->value)
+ if (ctrl->val)
data = reg_set(client, MT9V022_READ_MODE, 0x20);
else
data = reg_clear(client, MT9V022_READ_MODE, 0x20);
if (data < 0)
return -EIO;
- break;
- case V4L2_CID_GAIN:
- /* mt9v022 has minimum == default */
- if (ctrl->value > qctrl->maximum || ctrl->value < qctrl->minimum)
- return -EINVAL;
- else {
- unsigned long range = qctrl->maximum - qctrl->minimum;
+ return 0;
+ case V4L2_CID_AUTOGAIN:
+ if (ctrl->val) {
+ if (reg_set(client, MT9V022_AEC_AGC_ENABLE, 0x2) < 0)
+ return -EIO;
+ } else {
+ struct v4l2_ctrl *gain = mt9v022->gain;
+ /* mt9v022 has minimum == default */
+ unsigned long range = gain->maximum - gain->minimum;
/* Valid values 16 to 64, 32 to 64 must be even. */
- unsigned long gain = ((ctrl->value - qctrl->minimum) *
+ unsigned long gain_val = ((gain->val - gain->minimum) *
48 + range / 2) / range + 16;
- if (gain >= 32)
- gain &= ~1;
+
+ if (gain_val >= 32)
+ gain_val &= ~1;
+
/*
* The user wants to set gain manually, hope, she
* knows, what she's doing... Switch AGC off.
*/
-
if (reg_clear(client, MT9V022_AEC_AGC_ENABLE, 0x2) < 0)
return -EIO;
dev_dbg(&client->dev, "Setting gain from %d to %lu\n",
- reg_read(client, MT9V022_ANALOG_GAIN), gain);
- if (reg_write(client, MT9V022_ANALOG_GAIN, gain) < 0)
+ reg_read(client, MT9V022_ANALOG_GAIN), gain_val);
+ if (reg_write(client, MT9V022_ANALOG_GAIN, gain_val) < 0)
return -EIO;
}
- break;
- case V4L2_CID_EXPOSURE:
- /* mt9v022 has maximum == default */
- if (ctrl->value > qctrl->maximum || ctrl->value < qctrl->minimum)
- return -EINVAL;
- else {
- unsigned long range = qctrl->maximum - qctrl->minimum;
- unsigned long shutter = ((ctrl->value - qctrl->minimum) *
- 479 + range / 2) / range + 1;
+ return 0;
+ case V4L2_CID_EXPOSURE_AUTO:
+ if (ctrl->val == V4L2_EXPOSURE_AUTO) {
+ data = reg_set(client, MT9V022_AEC_AGC_ENABLE, 0x1);
+ } else {
+ struct v4l2_ctrl *exp = mt9v022->exposure;
+ unsigned long range = exp->maximum - exp->minimum;
+ unsigned long shutter = ((exp->val - exp->minimum) *
+ 479 + range / 2) / range + 1;
+
/*
* The user wants to set shutter width manually, hope,
* she knows, what she's doing... Switch AEC off.
*/
-
- if (reg_clear(client, MT9V022_AEC_AGC_ENABLE, 0x1) < 0)
+ data = reg_clear(client, MT9V022_AEC_AGC_ENABLE, 0x1);
+ if (data < 0)
return -EIO;
-
dev_dbg(&client->dev, "Shutter width from %d to %lu\n",
- reg_read(client, MT9V022_TOTAL_SHUTTER_WIDTH),
- shutter);
+ reg_read(client, MT9V022_TOTAL_SHUTTER_WIDTH),
+ shutter);
if (reg_write(client, MT9V022_TOTAL_SHUTTER_WIDTH,
- shutter) < 0)
+ shutter) < 0)
return -EIO;
}
- break;
- case V4L2_CID_AUTOGAIN:
- if (ctrl->value)
- data = reg_set(client, MT9V022_AEC_AGC_ENABLE, 0x2);
- else
- data = reg_clear(client, MT9V022_AEC_AGC_ENABLE, 0x2);
- if (data < 0)
- return -EIO;
- break;
- case V4L2_CID_EXPOSURE_AUTO:
- if (ctrl->value)
- data = reg_set(client, MT9V022_AEC_AGC_ENABLE, 0x1);
- else
- data = reg_clear(client, MT9V022_AEC_AGC_ENABLE, 0x1);
- if (data < 0)
- return -EIO;
- break;
+ return 0;
}
- return 0;
+ return -EINVAL;
}
/*
* Interface active, can use i2c. If it fails, it can indeed mean, that
* this wasn't our capture interface, so, we wait for the right one
*/
-static int mt9v022_video_probe(struct soc_camera_device *icd,
- struct i2c_client *client)
+static int mt9v022_video_probe(struct i2c_client *client)
{
struct mt9v022 *mt9v022 = to_mt9v022(client);
- struct soc_camera_link *icl = to_soc_camera_link(icd);
+ struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
s32 data;
int ret;
unsigned long flags;
- /* We must have a parent by now. And it cannot be a wrong one. */
- BUG_ON(!icd->parent ||
- to_soc_camera_host(icd->parent)->nr != icd->iface);
-
/* Read out the chip version register */
data = reg_read(client, MT9V022_CHIP_VERSION);
@@ -805,16 +644,6 @@ ei2c:
return ret;
}
-static void mt9v022_video_remove(struct soc_camera_device *icd)
-{
- struct soc_camera_link *icl = to_soc_camera_link(icd);
-
- dev_dbg(icd->pdev, "Video removed: %p, %p\n",
- icd->parent, icd->vdev);
- if (icl->free_bus)
- icl->free_bus(icl);
-}
-
static int mt9v022_g_skip_top_lines(struct v4l2_subdev *sd, u32 *lines)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
@@ -825,9 +654,12 @@ static int mt9v022_g_skip_top_lines(struct v4l2_subdev *sd, u32 *lines)
return 0;
}
+static const struct v4l2_ctrl_ops mt9v022_ctrl_ops = {
+ .g_volatile_ctrl = mt9v022_g_volatile_ctrl,
+ .s_ctrl = mt9v022_s_ctrl,
+};
+
static struct v4l2_subdev_core_ops mt9v022_subdev_core_ops = {
- .g_ctrl = mt9v022_g_ctrl,
- .s_ctrl = mt9v022_s_ctrl,
.g_chip_ident = mt9v022_g_chip_ident,
#ifdef CONFIG_VIDEO_ADV_DEBUG
.g_register = mt9v022_g_register,
@@ -848,6 +680,72 @@ static int mt9v022_enum_fmt(struct v4l2_subdev *sd, unsigned int index,
return 0;
}
+static int mt9v022_g_mbus_config(struct v4l2_subdev *sd,
+ struct v4l2_mbus_config *cfg)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+
+ cfg->flags = V4L2_MBUS_MASTER | V4L2_MBUS_SLAVE |
+ V4L2_MBUS_PCLK_SAMPLE_RISING | V4L2_MBUS_PCLK_SAMPLE_FALLING |
+ V4L2_MBUS_HSYNC_ACTIVE_HIGH | V4L2_MBUS_HSYNC_ACTIVE_LOW |
+ V4L2_MBUS_VSYNC_ACTIVE_HIGH | V4L2_MBUS_VSYNC_ACTIVE_LOW |
+ V4L2_MBUS_DATA_ACTIVE_HIGH;
+ cfg->type = V4L2_MBUS_PARALLEL;
+ cfg->flags = soc_camera_apply_board_flags(icl, cfg);
+
+ return 0;
+}
+
+static int mt9v022_s_mbus_config(struct v4l2_subdev *sd,
+ const struct v4l2_mbus_config *cfg)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct mt9v022 *mt9v022 = to_mt9v022(client);
+ unsigned long flags = soc_camera_apply_board_flags(icl, cfg);
+ unsigned int bps = soc_mbus_get_fmtdesc(mt9v022->fmt->code)->bits_per_sample;
+ int ret;
+ u16 pixclk = 0;
+
+ if (icl->set_bus_param) {
+ ret = icl->set_bus_param(icl, 1 << (bps - 1));
+ if (ret)
+ return ret;
+ } else if (bps != 10) {
+ /*
+ * Without board specific bus width settings we only support the
+ * sensors native bus width
+ */
+ return -EINVAL;
+ }
+
+ if (flags & V4L2_MBUS_PCLK_SAMPLE_FALLING)
+ pixclk |= 0x10;
+
+ if (!(flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH))
+ pixclk |= 0x1;
+
+ if (!(flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH))
+ pixclk |= 0x2;
+
+ ret = reg_write(client, MT9V022_PIXCLK_FV_LV, pixclk);
+ if (ret < 0)
+ return ret;
+
+ if (!(flags & V4L2_MBUS_MASTER))
+ mt9v022->chip_control &= ~0x8;
+
+ ret = reg_write(client, MT9V022_CHIP_CONTROL, mt9v022->chip_control);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(&client->dev, "Calculated pixclk 0x%x, chip control 0x%x\n",
+ pixclk, mt9v022->chip_control);
+
+ return 0;
+}
+
static struct v4l2_subdev_video_ops mt9v022_subdev_video_ops = {
.s_stream = mt9v022_s_stream,
.s_mbus_fmt = mt9v022_s_fmt,
@@ -857,6 +755,8 @@ static struct v4l2_subdev_video_ops mt9v022_subdev_video_ops = {
.g_crop = mt9v022_g_crop,
.cropcap = mt9v022_cropcap,
.enum_mbus_fmt = mt9v022_enum_fmt,
+ .g_mbus_config = mt9v022_g_mbus_config,
+ .s_mbus_config = mt9v022_s_mbus_config,
};
static struct v4l2_subdev_sensor_ops mt9v022_subdev_sensor_ops = {
@@ -873,17 +773,10 @@ static int mt9v022_probe(struct i2c_client *client,
const struct i2c_device_id *did)
{
struct mt9v022 *mt9v022;
- struct soc_camera_device *icd = client->dev.platform_data;
+ struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
- struct soc_camera_link *icl;
int ret;
- if (!icd) {
- dev_err(&client->dev, "MT9V022: missing soc-camera data!\n");
- return -EINVAL;
- }
-
- icl = to_soc_camera_link(icd);
if (!icl) {
dev_err(&client->dev, "MT9V022 driver needs platform data\n");
return -EINVAL;
@@ -900,10 +793,39 @@ static int mt9v022_probe(struct i2c_client *client,
return -ENOMEM;
v4l2_i2c_subdev_init(&mt9v022->subdev, client, &mt9v022_subdev_ops);
+ v4l2_ctrl_handler_init(&mt9v022->hdl, 6);
+ v4l2_ctrl_new_std(&mt9v022->hdl, &mt9v022_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+ v4l2_ctrl_new_std(&mt9v022->hdl, &mt9v022_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+ mt9v022->autogain = v4l2_ctrl_new_std(&mt9v022->hdl, &mt9v022_ctrl_ops,
+ V4L2_CID_AUTOGAIN, 0, 1, 1, 1);
+ mt9v022->gain = v4l2_ctrl_new_std(&mt9v022->hdl, &mt9v022_ctrl_ops,
+ V4L2_CID_GAIN, 0, 127, 1, 64);
+
+ /*
+ * Simulated autoexposure. If enabled, we calculate shutter width
+ * ourselves in the driver based on vertical blanking and frame width
+ */
+ mt9v022->autoexposure = v4l2_ctrl_new_std_menu(&mt9v022->hdl,
+ &mt9v022_ctrl_ops, V4L2_CID_EXPOSURE_AUTO, 1, 0,
+ V4L2_EXPOSURE_AUTO);
+ mt9v022->exposure = v4l2_ctrl_new_std(&mt9v022->hdl, &mt9v022_ctrl_ops,
+ V4L2_CID_EXPOSURE, 1, 255, 1, 255);
+
+ mt9v022->subdev.ctrl_handler = &mt9v022->hdl;
+ if (mt9v022->hdl.error) {
+ int err = mt9v022->hdl.error;
+
+ kfree(mt9v022);
+ return err;
+ }
+ v4l2_ctrl_auto_cluster(2, &mt9v022->autoexposure,
+ V4L2_EXPOSURE_MANUAL, true);
+ v4l2_ctrl_auto_cluster(2, &mt9v022->autogain, 0, true);
mt9v022->chip_control = MT9V022_CHIP_CONTROL_DEFAULT;
- icd->ops = &mt9v022_ops;
/*
* MT9V022 _really_ corrupts the first read out line.
* TODO: verify on i.MX31
@@ -914,9 +836,9 @@ static int mt9v022_probe(struct i2c_client *client,
mt9v022->rect.width = MT9V022_MAX_WIDTH;
mt9v022->rect.height = MT9V022_MAX_HEIGHT;
- ret = mt9v022_video_probe(icd, client);
+ ret = mt9v022_video_probe(client);
if (ret) {
- icd->ops = NULL;
+ v4l2_ctrl_handler_free(&mt9v022->hdl);
kfree(mt9v022);
}
@@ -926,10 +848,12 @@ static int mt9v022_probe(struct i2c_client *client,
static int mt9v022_remove(struct i2c_client *client)
{
struct mt9v022 *mt9v022 = to_mt9v022(client);
- struct soc_camera_device *icd = client->dev.platform_data;
+ struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
- icd->ops = NULL;
- mt9v022_video_remove(icd);
+ v4l2_device_unregister_subdev(&mt9v022->subdev);
+ if (icl->free_bus)
+ icl->free_bus(icl);
+ v4l2_ctrl_handler_free(&mt9v022->hdl);
kfree(mt9v022);
return 0;
diff --git a/drivers/media/video/mt9v032.c b/drivers/media/video/mt9v032.c
index c64e1dc4cb4..f080c162123 100644
--- a/drivers/media/video/mt9v032.c
+++ b/drivers/media/video/mt9v032.c
@@ -19,6 +19,7 @@
#include <linux/slab.h>
#include <linux/videodev2.h>
#include <linux/v4l2-mediabus.h>
+#include <linux/module.h>
#include <media/mt9v032.h>
#include <media/v4l2-ctrls.h>
diff --git a/drivers/media/video/mx1_camera.c b/drivers/media/video/mx1_camera.c
index 087db12a3a6..18e94c7d2be 100644
--- a/drivers/media/video/mx1_camera.c
+++ b/drivers/media/video/mx1_camera.c
@@ -78,11 +78,10 @@
#define CSI_IRQ_MASK (CSISR_SFF_OR_INT | CSISR_RFF_OR_INT | \
CSISR_STATFF_INT | CSISR_RXFF_INT | CSISR_SOF_INT)
-#define CSI_BUS_FLAGS (SOCAM_MASTER | SOCAM_HSYNC_ACTIVE_HIGH | \
- SOCAM_VSYNC_ACTIVE_HIGH | SOCAM_VSYNC_ACTIVE_LOW | \
- SOCAM_PCLK_SAMPLE_RISING | SOCAM_PCLK_SAMPLE_FALLING | \
- SOCAM_DATA_ACTIVE_HIGH | SOCAM_DATA_ACTIVE_LOW | \
- SOCAM_DATAWIDTH_8)
+#define CSI_BUS_FLAGS (V4L2_MBUS_MASTER | V4L2_MBUS_HSYNC_ACTIVE_HIGH | \
+ V4L2_MBUS_VSYNC_ACTIVE_HIGH | V4L2_MBUS_VSYNC_ACTIVE_LOW | \
+ V4L2_MBUS_PCLK_SAMPLE_RISING | V4L2_MBUS_PCLK_SAMPLE_FALLING | \
+ V4L2_MBUS_DATA_ACTIVE_HIGH | V4L2_MBUS_DATA_ACTIVE_LOW)
#define MAX_VIDEO_MEM 16 /* Video memory limit in megabytes */
@@ -490,59 +489,73 @@ static int mx1_camera_set_crop(struct soc_camera_device *icd,
static int mx1_camera_set_bus_param(struct soc_camera_device *icd, __u32 pixfmt)
{
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct mx1_camera_dev *pcdev = ici->priv;
- unsigned long camera_flags, common_flags;
+ struct v4l2_mbus_config cfg = {.type = V4L2_MBUS_PARALLEL,};
+ unsigned long common_flags;
unsigned int csicr1;
int ret;
- camera_flags = icd->ops->query_bus_param(icd);
-
/* MX1 supports only 8bit buswidth */
- common_flags = soc_camera_bus_param_compatible(camera_flags,
- CSI_BUS_FLAGS);
- if (!common_flags)
- return -EINVAL;
+ ret = v4l2_subdev_call(sd, video, g_mbus_config, &cfg);
+ if (!ret) {
+ common_flags = soc_mbus_config_compatible(&cfg, CSI_BUS_FLAGS);
+ if (!common_flags) {
+ dev_warn(icd->parent,
+ "Flags incompatible: camera 0x%x, host 0x%x\n",
+ cfg.flags, CSI_BUS_FLAGS);
+ return -EINVAL;
+ }
+ } else if (ret != -ENOIOCTLCMD) {
+ return ret;
+ } else {
+ common_flags = CSI_BUS_FLAGS;
+ }
/* Make choises, based on platform choice */
- if ((common_flags & SOCAM_VSYNC_ACTIVE_HIGH) &&
- (common_flags & SOCAM_VSYNC_ACTIVE_LOW)) {
+ if ((common_flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH) &&
+ (common_flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)) {
if (!pcdev->pdata ||
pcdev->pdata->flags & MX1_CAMERA_VSYNC_HIGH)
- common_flags &= ~SOCAM_VSYNC_ACTIVE_LOW;
+ common_flags &= ~V4L2_MBUS_VSYNC_ACTIVE_LOW;
else
- common_flags &= ~SOCAM_VSYNC_ACTIVE_HIGH;
+ common_flags &= ~V4L2_MBUS_VSYNC_ACTIVE_HIGH;
}
- if ((common_flags & SOCAM_PCLK_SAMPLE_RISING) &&
- (common_flags & SOCAM_PCLK_SAMPLE_FALLING)) {
+ if ((common_flags & V4L2_MBUS_PCLK_SAMPLE_RISING) &&
+ (common_flags & V4L2_MBUS_PCLK_SAMPLE_FALLING)) {
if (!pcdev->pdata ||
pcdev->pdata->flags & MX1_CAMERA_PCLK_RISING)
- common_flags &= ~SOCAM_PCLK_SAMPLE_FALLING;
+ common_flags &= ~V4L2_MBUS_PCLK_SAMPLE_FALLING;
else
- common_flags &= ~SOCAM_PCLK_SAMPLE_RISING;
+ common_flags &= ~V4L2_MBUS_PCLK_SAMPLE_RISING;
}
- if ((common_flags & SOCAM_DATA_ACTIVE_HIGH) &&
- (common_flags & SOCAM_DATA_ACTIVE_LOW)) {
+ if ((common_flags & V4L2_MBUS_DATA_ACTIVE_HIGH) &&
+ (common_flags & V4L2_MBUS_DATA_ACTIVE_LOW)) {
if (!pcdev->pdata ||
pcdev->pdata->flags & MX1_CAMERA_DATA_HIGH)
- common_flags &= ~SOCAM_DATA_ACTIVE_LOW;
+ common_flags &= ~V4L2_MBUS_DATA_ACTIVE_LOW;
else
- common_flags &= ~SOCAM_DATA_ACTIVE_HIGH;
+ common_flags &= ~V4L2_MBUS_DATA_ACTIVE_HIGH;
}
- ret = icd->ops->set_bus_param(icd, common_flags);
- if (ret < 0)
+ cfg.flags = common_flags;
+ ret = v4l2_subdev_call(sd, video, s_mbus_config, &cfg);
+ if (ret < 0 && ret != -ENOIOCTLCMD) {
+ dev_dbg(icd->parent, "camera s_mbus_config(0x%lx) returned %d\n",
+ common_flags, ret);
return ret;
+ }
csicr1 = __raw_readl(pcdev->base + CSICR1);
- if (common_flags & SOCAM_PCLK_SAMPLE_RISING)
+ if (common_flags & V4L2_MBUS_PCLK_SAMPLE_RISING)
csicr1 |= CSICR1_REDGE;
- if (common_flags & SOCAM_VSYNC_ACTIVE_HIGH)
+ if (common_flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH)
csicr1 |= CSICR1_SOF_POL;
- if (common_flags & SOCAM_DATA_ACTIVE_LOW)
+ if (common_flags & V4L2_MBUS_DATA_ACTIVE_LOW)
csicr1 |= CSICR1_DATA_POL;
__raw_writel(csicr1, pcdev->base + CSICR1);
diff --git a/drivers/media/video/mx2_camera.c b/drivers/media/video/mx2_camera.c
index ec2410c0c80..a803d9ea8fd 100644
--- a/drivers/media/video/mx2_camera.c
+++ b/drivers/media/video/mx2_camera.c
@@ -686,16 +686,15 @@ static void mx2_camera_init_videobuf(struct videobuf_queue *q,
icd, &icd->video_lock);
}
-#define MX2_BUS_FLAGS (SOCAM_DATAWIDTH_8 | \
- SOCAM_MASTER | \
- SOCAM_VSYNC_ACTIVE_HIGH | \
- SOCAM_VSYNC_ACTIVE_LOW | \
- SOCAM_HSYNC_ACTIVE_HIGH | \
- SOCAM_HSYNC_ACTIVE_LOW | \
- SOCAM_PCLK_SAMPLE_RISING | \
- SOCAM_PCLK_SAMPLE_FALLING | \
- SOCAM_DATA_ACTIVE_HIGH | \
- SOCAM_DATA_ACTIVE_LOW)
+#define MX2_BUS_FLAGS (V4L2_MBUS_MASTER | \
+ V4L2_MBUS_VSYNC_ACTIVE_HIGH | \
+ V4L2_MBUS_VSYNC_ACTIVE_LOW | \
+ V4L2_MBUS_HSYNC_ACTIVE_HIGH | \
+ V4L2_MBUS_HSYNC_ACTIVE_LOW | \
+ V4L2_MBUS_PCLK_SAMPLE_RISING | \
+ V4L2_MBUS_PCLK_SAMPLE_FALLING | \
+ V4L2_MBUS_DATA_ACTIVE_HIGH | \
+ V4L2_MBUS_DATA_ACTIVE_LOW)
static int mx27_camera_emma_prp_reset(struct mx2_camera_dev *pcdev)
{
@@ -770,46 +769,59 @@ static void mx27_camera_emma_buf_init(struct soc_camera_device *icd,
static int mx2_camera_set_bus_param(struct soc_camera_device *icd,
__u32 pixfmt)
{
- struct soc_camera_host *ici =
- to_soc_camera_host(icd->parent);
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct mx2_camera_dev *pcdev = ici->priv;
- unsigned long camera_flags, common_flags;
- int ret = 0;
+ struct v4l2_mbus_config cfg = {.type = V4L2_MBUS_PARALLEL,};
+ unsigned long common_flags;
+ int ret;
int bytesperline;
u32 csicr1 = pcdev->csicr1;
- camera_flags = icd->ops->query_bus_param(icd);
-
- common_flags = soc_camera_bus_param_compatible(camera_flags,
- MX2_BUS_FLAGS);
- if (!common_flags)
- return -EINVAL;
+ ret = v4l2_subdev_call(sd, video, g_mbus_config, &cfg);
+ if (!ret) {
+ common_flags = soc_mbus_config_compatible(&cfg, MX2_BUS_FLAGS);
+ if (!common_flags) {
+ dev_warn(icd->parent,
+ "Flags incompatible: camera 0x%x, host 0x%x\n",
+ cfg.flags, MX2_BUS_FLAGS);
+ return -EINVAL;
+ }
+ } else if (ret != -ENOIOCTLCMD) {
+ return ret;
+ } else {
+ common_flags = MX2_BUS_FLAGS;
+ }
- if ((common_flags & SOCAM_HSYNC_ACTIVE_HIGH) &&
- (common_flags & SOCAM_HSYNC_ACTIVE_LOW)) {
+ if ((common_flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH) &&
+ (common_flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)) {
if (pcdev->platform_flags & MX2_CAMERA_HSYNC_HIGH)
- common_flags &= ~SOCAM_HSYNC_ACTIVE_LOW;
+ common_flags &= ~V4L2_MBUS_HSYNC_ACTIVE_LOW;
else
- common_flags &= ~SOCAM_HSYNC_ACTIVE_HIGH;
+ common_flags &= ~V4L2_MBUS_HSYNC_ACTIVE_HIGH;
}
- if ((common_flags & SOCAM_PCLK_SAMPLE_RISING) &&
- (common_flags & SOCAM_PCLK_SAMPLE_FALLING)) {
+ if ((common_flags & V4L2_MBUS_PCLK_SAMPLE_RISING) &&
+ (common_flags & V4L2_MBUS_PCLK_SAMPLE_FALLING)) {
if (pcdev->platform_flags & MX2_CAMERA_PCLK_SAMPLE_RISING)
- common_flags &= ~SOCAM_PCLK_SAMPLE_FALLING;
+ common_flags &= ~V4L2_MBUS_PCLK_SAMPLE_FALLING;
else
- common_flags &= ~SOCAM_PCLK_SAMPLE_RISING;
+ common_flags &= ~V4L2_MBUS_PCLK_SAMPLE_RISING;
}
- ret = icd->ops->set_bus_param(icd, common_flags);
- if (ret < 0)
+ cfg.flags = common_flags;
+ ret = v4l2_subdev_call(sd, video, s_mbus_config, &cfg);
+ if (ret < 0 && ret != -ENOIOCTLCMD) {
+ dev_dbg(icd->parent, "camera s_mbus_config(0x%lx) returned %d\n",
+ common_flags, ret);
return ret;
+ }
- if (common_flags & SOCAM_PCLK_SAMPLE_RISING)
+ if (common_flags & V4L2_MBUS_PCLK_SAMPLE_RISING)
csicr1 |= CSICR1_REDGE;
- if (common_flags & SOCAM_VSYNC_ACTIVE_HIGH)
+ if (common_flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH)
csicr1 |= CSICR1_SOF_POL;
- if (common_flags & SOCAM_HSYNC_ACTIVE_HIGH)
+ if (common_flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH)
csicr1 |= CSICR1_HSYNC_POL;
if (pcdev->platform_flags & MX2_CAMERA_SWAP16)
csicr1 |= CSICR1_SWAP16_EN;
diff --git a/drivers/media/video/mx3_camera.c b/drivers/media/video/mx3_camera.c
index c045b47803a..f96f92f00f9 100644
--- a/drivers/media/video/mx3_camera.c
+++ b/drivers/media/video/mx3_camera.c
@@ -109,10 +109,12 @@ struct mx3_camera_dev {
unsigned long platform_flags;
unsigned long mclk;
+ u16 width_flags; /* max 15 bits */
struct list_head capture;
spinlock_t lock; /* Protects video buffer lists */
struct mx3_camera_buffer *active;
+ size_t buf_total;
struct vb2_alloc_ctx *alloc_ctx;
enum v4l2_field field;
int sequence;
@@ -190,79 +192,53 @@ static void mx3_cam_dma_done(void *arg)
* Calculate the __buffer__ (not data) size and number of buffers.
*/
static int mx3_videobuf_setup(struct vb2_queue *vq,
+ const struct v4l2_format *fmt,
unsigned int *count, unsigned int *num_planes,
- unsigned long sizes[], void *alloc_ctxs[])
+ unsigned int sizes[], void *alloc_ctxs[])
{
struct soc_camera_device *icd = soc_camera_from_vb2q(vq);
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct mx3_camera_dev *mx3_cam = ici->priv;
- int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width,
- icd->current_fmt->host_fmt);
-
- if (bytes_per_line < 0)
- return bytes_per_line;
+ int bytes_per_line;
+ unsigned int height;
if (!mx3_cam->idmac_channel[0])
return -EINVAL;
- *num_planes = 1;
-
- mx3_cam->sequence = 0;
- sizes[0] = bytes_per_line * icd->user_height;
- alloc_ctxs[0] = mx3_cam->alloc_ctx;
-
- if (!*count)
- *count = 32;
-
- if (sizes[0] * *count > MAX_VIDEO_MEM * 1024 * 1024)
- *count = MAX_VIDEO_MEM * 1024 * 1024 / sizes[0];
-
- return 0;
-}
-
-static int mx3_videobuf_prepare(struct vb2_buffer *vb)
-{
- struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
- struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
- struct mx3_camera_dev *mx3_cam = ici->priv;
- struct idmac_channel *ichan = mx3_cam->idmac_channel[0];
- struct scatterlist *sg;
- struct mx3_camera_buffer *buf;
- size_t new_size;
- int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width,
+ if (fmt) {
+ const struct soc_camera_format_xlate *xlate = soc_camera_xlate_by_fourcc(icd,
+ fmt->fmt.pix.pixelformat);
+ if (!xlate)
+ return -EINVAL;
+ bytes_per_line = soc_mbus_bytes_per_line(fmt->fmt.pix.width,
+ xlate->host_fmt);
+ height = fmt->fmt.pix.height;
+ } else {
+ /* Called from VIDIOC_REQBUFS or in compatibility mode */
+ bytes_per_line = soc_mbus_bytes_per_line(icd->user_width,
icd->current_fmt->host_fmt);
-
+ height = icd->user_height;
+ }
if (bytes_per_line < 0)
return bytes_per_line;
- buf = to_mx3_vb(vb);
- sg = &buf->sg;
+ sizes[0] = bytes_per_line * height;
- new_size = bytes_per_line * icd->user_height;
-
- if (vb2_plane_size(vb, 0) < new_size) {
- dev_err(icd->parent, "Buffer too small (%lu < %zu)\n",
- vb2_plane_size(vb, 0), new_size);
- return -ENOBUFS;
- }
-
- if (buf->state == CSI_BUF_NEEDS_INIT) {
- sg_dma_address(sg) = vb2_dma_contig_plane_paddr(vb, 0);
- sg_dma_len(sg) = new_size;
+ alloc_ctxs[0] = mx3_cam->alloc_ctx;
- buf->txd = ichan->dma_chan.device->device_prep_slave_sg(
- &ichan->dma_chan, sg, 1, DMA_FROM_DEVICE,
- DMA_PREP_INTERRUPT);
- if (!buf->txd)
- return -EIO;
+ if (!vq->num_buffers)
+ mx3_cam->sequence = 0;
- buf->txd->callback_param = buf->txd;
- buf->txd->callback = mx3_cam_dma_done;
+ if (!*count)
+ *count = 2;
- buf->state = CSI_BUF_PREPARED;
- }
+ /* If *num_planes != 0, we have already verified *count. */
+ if (!*num_planes &&
+ sizes[0] * *count + mx3_cam->buf_total > MAX_VIDEO_MEM * 1024 * 1024)
+ *count = (MAX_VIDEO_MEM * 1024 * 1024 - mx3_cam->buf_total) /
+ sizes[0];
- vb2_set_plane_payload(vb, 0, new_size);
+ *num_planes = 1;
return 0;
}
@@ -286,28 +262,58 @@ static void mx3_videobuf_queue(struct vb2_buffer *vb)
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct mx3_camera_dev *mx3_cam = ici->priv;
struct mx3_camera_buffer *buf = to_mx3_vb(vb);
- struct dma_async_tx_descriptor *txd = buf->txd;
- struct idmac_channel *ichan = to_idmac_chan(txd->chan);
+ struct scatterlist *sg = &buf->sg;
+ struct dma_async_tx_descriptor *txd;
+ struct idmac_channel *ichan = mx3_cam->idmac_channel[0];
struct idmac_video_param *video = &ichan->params.video;
- dma_cookie_t cookie;
- u32 fourcc = icd->current_fmt->host_fmt->fourcc;
+ const struct soc_mbus_pixelfmt *host_fmt = icd->current_fmt->host_fmt;
+ int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width, host_fmt);
unsigned long flags;
+ dma_cookie_t cookie;
+ size_t new_size;
+
+ BUG_ON(bytes_per_line <= 0);
+
+ new_size = bytes_per_line * icd->user_height;
+
+ if (vb2_plane_size(vb, 0) < new_size) {
+ dev_err(icd->parent, "Buffer #%d too small (%lu < %zu)\n",
+ vb->v4l2_buf.index, vb2_plane_size(vb, 0), new_size);
+ goto error;
+ }
+
+ if (buf->state == CSI_BUF_NEEDS_INIT) {
+ sg_dma_address(sg) = vb2_dma_contig_plane_dma_addr(vb, 0);
+ sg_dma_len(sg) = new_size;
+
+ txd = ichan->dma_chan.device->device_prep_slave_sg(
+ &ichan->dma_chan, sg, 1, DMA_FROM_DEVICE,
+ DMA_PREP_INTERRUPT);
+ if (!txd)
+ goto error;
+
+ txd->callback_param = txd;
+ txd->callback = mx3_cam_dma_done;
+
+ buf->state = CSI_BUF_PREPARED;
+ buf->txd = txd;
+ } else {
+ txd = buf->txd;
+ }
+
+ vb2_set_plane_payload(vb, 0, new_size);
/* This is the configuration of one sg-element */
- video->out_pixel_fmt = fourcc_to_ipu_pix(fourcc);
+ video->out_pixel_fmt = fourcc_to_ipu_pix(host_fmt->fourcc);
if (video->out_pixel_fmt == IPU_PIX_FMT_GENERIC) {
/*
- * If the IPU DMA channel is configured to transport
- * generic 8-bit data, we have to set up correctly the
- * geometry parameters upon the current pixel format.
- * So, since the DMA horizontal parameters are expressed
- * in bytes not pixels, convert these in the right unit.
+ * If the IPU DMA channel is configured to transfer generic
+ * 8-bit data, we have to set up the geometry parameters
+ * correctly, according to the current pixel format. The DMA
+ * horizontal parameters in this case are expressed in bytes,
+ * not in pixels.
*/
- int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width,
- icd->current_fmt->host_fmt);
- BUG_ON(bytes_per_line <= 0);
-
video->out_width = bytes_per_line;
video->out_height = icd->user_height;
video->out_stride = bytes_per_line;
@@ -351,6 +357,7 @@ static void mx3_videobuf_queue(struct vb2_buffer *vb)
mx3_cam->active = NULL;
spin_unlock_irqrestore(&mx3_cam->lock, flags);
+error:
vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
}
@@ -384,17 +391,24 @@ static void mx3_videobuf_release(struct vb2_buffer *vb)
}
spin_unlock_irqrestore(&mx3_cam->lock, flags);
+
+ mx3_cam->buf_total -= vb2_plane_size(vb, 0);
}
static int mx3_videobuf_init(struct vb2_buffer *vb)
{
+ struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct mx3_camera_dev *mx3_cam = ici->priv;
struct mx3_camera_buffer *buf = to_mx3_vb(vb);
+
/* This is for locking debugging only */
INIT_LIST_HEAD(&buf->queue);
sg_init_table(&buf->sg, 1);
buf->state = CSI_BUF_NEEDS_INIT;
- buf->txd = NULL;
+
+ mx3_cam->buf_total += vb2_plane_size(vb, 0);
return 0;
}
@@ -405,13 +419,12 @@ static int mx3_stop_streaming(struct vb2_queue *q)
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct mx3_camera_dev *mx3_cam = ici->priv;
struct idmac_channel *ichan = mx3_cam->idmac_channel[0];
- struct dma_chan *chan;
struct mx3_camera_buffer *buf, *tmp;
unsigned long flags;
if (ichan) {
- chan = &ichan->dma_chan;
- chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
+ struct dma_chan *chan = &ichan->dma_chan;
+ chan->device->device_control(chan, DMA_PAUSE, 0);
}
spin_lock_irqsave(&mx3_cam->lock, flags);
@@ -419,8 +432,8 @@ static int mx3_stop_streaming(struct vb2_queue *q)
mx3_cam->active = NULL;
list_for_each_entry_safe(buf, tmp, &mx3_cam->capture, queue) {
- buf->state = CSI_BUF_NEEDS_INIT;
list_del_init(&buf->queue);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
}
spin_unlock_irqrestore(&mx3_cam->lock, flags);
@@ -430,7 +443,6 @@ static int mx3_stop_streaming(struct vb2_queue *q)
static struct vb2_ops mx3_videobuf_ops = {
.queue_setup = mx3_videobuf_setup,
- .buf_prepare = mx3_videobuf_prepare,
.buf_queue = mx3_videobuf_queue,
.buf_cleanup = mx3_videobuf_release,
.buf_init = mx3_videobuf_init,
@@ -514,6 +526,7 @@ static int mx3_camera_add_device(struct soc_camera_device *icd)
mx3_camera_activate(mx3_cam, icd);
+ mx3_cam->buf_total = 0;
mx3_cam->icd = icd;
dev_info(icd->parent, "MX3 Camera driver attached to camera %d\n",
@@ -548,58 +561,27 @@ static int test_platform_param(struct mx3_camera_dev *mx3_cam,
unsigned char buswidth, unsigned long *flags)
{
/*
+ * If requested data width is supported by the platform, use it or any
+ * possible lower value - i.MX31 is smart enough to shift bits
+ */
+ if (buswidth > fls(mx3_cam->width_flags))
+ return -EINVAL;
+
+ /*
* Platform specified synchronization and pixel clock polarities are
* only a recommendation and are only used during probing. MX3x
* camera interface only works in master mode, i.e., uses HSYNC and
* VSYNC signals from the sensor
*/
- *flags = SOCAM_MASTER |
- SOCAM_HSYNC_ACTIVE_HIGH |
- SOCAM_HSYNC_ACTIVE_LOW |
- SOCAM_VSYNC_ACTIVE_HIGH |
- SOCAM_VSYNC_ACTIVE_LOW |
- SOCAM_PCLK_SAMPLE_RISING |
- SOCAM_PCLK_SAMPLE_FALLING |
- SOCAM_DATA_ACTIVE_HIGH |
- SOCAM_DATA_ACTIVE_LOW;
-
- /*
- * If requested data width is supported by the platform, use it or any
- * possible lower value - i.MX31 is smart enough to schift bits
- */
- if (mx3_cam->platform_flags & MX3_CAMERA_DATAWIDTH_15)
- *flags |= SOCAM_DATAWIDTH_15 | SOCAM_DATAWIDTH_10 |
- SOCAM_DATAWIDTH_8 | SOCAM_DATAWIDTH_4;
- else if (mx3_cam->platform_flags & MX3_CAMERA_DATAWIDTH_10)
- *flags |= SOCAM_DATAWIDTH_10 | SOCAM_DATAWIDTH_8 |
- SOCAM_DATAWIDTH_4;
- else if (mx3_cam->platform_flags & MX3_CAMERA_DATAWIDTH_8)
- *flags |= SOCAM_DATAWIDTH_8 | SOCAM_DATAWIDTH_4;
- else if (mx3_cam->platform_flags & MX3_CAMERA_DATAWIDTH_4)
- *flags |= SOCAM_DATAWIDTH_4;
-
- switch (buswidth) {
- case 15:
- if (!(*flags & SOCAM_DATAWIDTH_15))
- return -EINVAL;
- break;
- case 10:
- if (!(*flags & SOCAM_DATAWIDTH_10))
- return -EINVAL;
- break;
- case 8:
- if (!(*flags & SOCAM_DATAWIDTH_8))
- return -EINVAL;
- break;
- case 4:
- if (!(*flags & SOCAM_DATAWIDTH_4))
- return -EINVAL;
- break;
- default:
- dev_warn(mx3_cam->soc_host.v4l2_dev.dev,
- "Unsupported bus width %d\n", buswidth);
- return -EINVAL;
- }
+ *flags = V4L2_MBUS_MASTER |
+ V4L2_MBUS_HSYNC_ACTIVE_HIGH |
+ V4L2_MBUS_HSYNC_ACTIVE_LOW |
+ V4L2_MBUS_VSYNC_ACTIVE_HIGH |
+ V4L2_MBUS_VSYNC_ACTIVE_LOW |
+ V4L2_MBUS_PCLK_SAMPLE_RISING |
+ V4L2_MBUS_PCLK_SAMPLE_FALLING |
+ V4L2_MBUS_DATA_ACTIVE_HIGH |
+ V4L2_MBUS_DATA_ACTIVE_LOW;
return 0;
}
@@ -607,9 +589,11 @@ static int test_platform_param(struct mx3_camera_dev *mx3_cam,
static int mx3_camera_try_bus_param(struct soc_camera_device *icd,
const unsigned int depth)
{
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct mx3_camera_dev *mx3_cam = ici->priv;
- unsigned long bus_flags, camera_flags;
+ struct v4l2_mbus_config cfg = {.type = V4L2_MBUS_PARALLEL,};
+ unsigned long bus_flags, common_flags;
int ret = test_platform_param(mx3_cam, depth, &bus_flags);
dev_dbg(icd->parent, "request bus width %d bit: %d\n", depth, ret);
@@ -617,15 +601,21 @@ static int mx3_camera_try_bus_param(struct soc_camera_device *icd,
if (ret < 0)
return ret;
- camera_flags = icd->ops->query_bus_param(icd);
-
- ret = soc_camera_bus_param_compatible(camera_flags, bus_flags);
- if (ret < 0)
- dev_warn(icd->parent,
- "Flags incompatible: camera %lx, host %lx\n",
- camera_flags, bus_flags);
+ ret = v4l2_subdev_call(sd, video, g_mbus_config, &cfg);
+ if (!ret) {
+ common_flags = soc_mbus_config_compatible(&cfg,
+ bus_flags);
+ if (!common_flags) {
+ dev_warn(icd->parent,
+ "Flags incompatible: camera 0x%x, host 0x%lx\n",
+ cfg.flags, bus_flags);
+ return -EINVAL;
+ }
+ } else if (ret != -ENOIOCTLCMD) {
+ return ret;
+ }
- return ret;
+ return 0;
}
static bool chan_filter(struct dma_chan *chan, void *arg)
@@ -994,9 +984,11 @@ static int mx3_camera_querycap(struct soc_camera_host *ici,
static int mx3_camera_set_bus_param(struct soc_camera_device *icd, __u32 pixfmt)
{
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct mx3_camera_dev *mx3_cam = ici->priv;
- unsigned long bus_flags, camera_flags, common_flags;
+ struct v4l2_mbus_config cfg = {.type = V4L2_MBUS_PARALLEL,};
+ unsigned long bus_flags, common_flags;
u32 dw, sens_conf;
const struct soc_mbus_pixelfmt *fmt;
int buswidth;
@@ -1008,83 +1000,76 @@ static int mx3_camera_set_bus_param(struct soc_camera_device *icd, __u32 pixfmt)
if (!fmt)
return -EINVAL;
- buswidth = fmt->bits_per_sample;
- ret = test_platform_param(mx3_cam, buswidth, &bus_flags);
-
xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
if (!xlate) {
dev_warn(dev, "Format %x not found\n", pixfmt);
return -EINVAL;
}
+ buswidth = fmt->bits_per_sample;
+ ret = test_platform_param(mx3_cam, buswidth, &bus_flags);
+
dev_dbg(dev, "requested bus width %d bit: %d\n", buswidth, ret);
if (ret < 0)
return ret;
- camera_flags = icd->ops->query_bus_param(icd);
-
- common_flags = soc_camera_bus_param_compatible(camera_flags, bus_flags);
- dev_dbg(dev, "Flags cam: 0x%lx host: 0x%lx common: 0x%lx\n",
- camera_flags, bus_flags, common_flags);
- if (!common_flags) {
- dev_dbg(dev, "no common flags");
- return -EINVAL;
+ ret = v4l2_subdev_call(sd, video, g_mbus_config, &cfg);
+ if (!ret) {
+ common_flags = soc_mbus_config_compatible(&cfg,
+ bus_flags);
+ if (!common_flags) {
+ dev_warn(icd->parent,
+ "Flags incompatible: camera 0x%x, host 0x%lx\n",
+ cfg.flags, bus_flags);
+ return -EINVAL;
+ }
+ } else if (ret != -ENOIOCTLCMD) {
+ return ret;
+ } else {
+ common_flags = bus_flags;
}
+ dev_dbg(dev, "Flags cam: 0x%x host: 0x%lx common: 0x%lx\n",
+ cfg.flags, bus_flags, common_flags);
+
/* Make choices, based on platform preferences */
- if ((common_flags & SOCAM_HSYNC_ACTIVE_HIGH) &&
- (common_flags & SOCAM_HSYNC_ACTIVE_LOW)) {
+ if ((common_flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH) &&
+ (common_flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)) {
if (mx3_cam->platform_flags & MX3_CAMERA_HSP)
- common_flags &= ~SOCAM_HSYNC_ACTIVE_HIGH;
+ common_flags &= ~V4L2_MBUS_HSYNC_ACTIVE_HIGH;
else
- common_flags &= ~SOCAM_HSYNC_ACTIVE_LOW;
+ common_flags &= ~V4L2_MBUS_HSYNC_ACTIVE_LOW;
}
- if ((common_flags & SOCAM_VSYNC_ACTIVE_HIGH) &&
- (common_flags & SOCAM_VSYNC_ACTIVE_LOW)) {
+ if ((common_flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH) &&
+ (common_flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)) {
if (mx3_cam->platform_flags & MX3_CAMERA_VSP)
- common_flags &= ~SOCAM_VSYNC_ACTIVE_HIGH;
+ common_flags &= ~V4L2_MBUS_VSYNC_ACTIVE_HIGH;
else
- common_flags &= ~SOCAM_VSYNC_ACTIVE_LOW;
+ common_flags &= ~V4L2_MBUS_VSYNC_ACTIVE_LOW;
}
- if ((common_flags & SOCAM_DATA_ACTIVE_HIGH) &&
- (common_flags & SOCAM_DATA_ACTIVE_LOW)) {
+ if ((common_flags & V4L2_MBUS_DATA_ACTIVE_HIGH) &&
+ (common_flags & V4L2_MBUS_DATA_ACTIVE_LOW)) {
if (mx3_cam->platform_flags & MX3_CAMERA_DP)
- common_flags &= ~SOCAM_DATA_ACTIVE_HIGH;
+ common_flags &= ~V4L2_MBUS_DATA_ACTIVE_HIGH;
else
- common_flags &= ~SOCAM_DATA_ACTIVE_LOW;
+ common_flags &= ~V4L2_MBUS_DATA_ACTIVE_LOW;
}
- if ((common_flags & SOCAM_PCLK_SAMPLE_RISING) &&
- (common_flags & SOCAM_PCLK_SAMPLE_FALLING)) {
+ if ((common_flags & V4L2_MBUS_PCLK_SAMPLE_RISING) &&
+ (common_flags & V4L2_MBUS_PCLK_SAMPLE_FALLING)) {
if (mx3_cam->platform_flags & MX3_CAMERA_PCP)
- common_flags &= ~SOCAM_PCLK_SAMPLE_RISING;
+ common_flags &= ~V4L2_MBUS_PCLK_SAMPLE_RISING;
else
- common_flags &= ~SOCAM_PCLK_SAMPLE_FALLING;
+ common_flags &= ~V4L2_MBUS_PCLK_SAMPLE_FALLING;
}
- /*
- * Make the camera work in widest common mode, we'll take care of
- * the rest
- */
- if (common_flags & SOCAM_DATAWIDTH_15)
- common_flags = (common_flags & ~SOCAM_DATAWIDTH_MASK) |
- SOCAM_DATAWIDTH_15;
- else if (common_flags & SOCAM_DATAWIDTH_10)
- common_flags = (common_flags & ~SOCAM_DATAWIDTH_MASK) |
- SOCAM_DATAWIDTH_10;
- else if (common_flags & SOCAM_DATAWIDTH_8)
- common_flags = (common_flags & ~SOCAM_DATAWIDTH_MASK) |
- SOCAM_DATAWIDTH_8;
- else
- common_flags = (common_flags & ~SOCAM_DATAWIDTH_MASK) |
- SOCAM_DATAWIDTH_4;
-
- ret = icd->ops->set_bus_param(icd, common_flags);
- if (ret < 0) {
- dev_dbg(dev, "camera set_bus_param(%lx) returned %d\n",
+ cfg.flags = common_flags;
+ ret = v4l2_subdev_call(sd, video, s_mbus_config, &cfg);
+ if (ret < 0 && ret != -ENOIOCTLCMD) {
+ dev_dbg(dev, "camera s_mbus_config(0x%lx) returned %d\n",
common_flags, ret);
return ret;
}
@@ -1108,13 +1093,13 @@ static int mx3_camera_set_bus_param(struct soc_camera_device *icd, __u32 pixfmt)
/* This has been set in mx3_camera_activate(), but we clear it above */
sens_conf |= CSI_SENS_CONF_DATA_FMT_BAYER;
- if (common_flags & SOCAM_PCLK_SAMPLE_FALLING)
+ if (common_flags & V4L2_MBUS_PCLK_SAMPLE_FALLING)
sens_conf |= 1 << CSI_SENS_CONF_PIX_CLK_POL_SHIFT;
- if (common_flags & SOCAM_HSYNC_ACTIVE_LOW)
+ if (common_flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)
sens_conf |= 1 << CSI_SENS_CONF_HSYNC_POL_SHIFT;
- if (common_flags & SOCAM_VSYNC_ACTIVE_LOW)
+ if (common_flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)
sens_conf |= 1 << CSI_SENS_CONF_VSYNC_POL_SHIFT;
- if (common_flags & SOCAM_DATA_ACTIVE_LOW)
+ if (common_flags & V4L2_MBUS_DATA_ACTIVE_LOW)
sens_conf |= 1 << CSI_SENS_CONF_DATA_POL_SHIFT;
/* Just do what we're asked to do */
@@ -1199,6 +1184,14 @@ static int __devinit mx3_camera_probe(struct platform_device *pdev)
"data widths, using default 8 bit\n");
mx3_cam->platform_flags |= MX3_CAMERA_DATAWIDTH_8;
}
+ if (mx3_cam->platform_flags & MX3_CAMERA_DATAWIDTH_4)
+ mx3_cam->width_flags = 1 << 3;
+ if (mx3_cam->platform_flags & MX3_CAMERA_DATAWIDTH_8)
+ mx3_cam->width_flags |= 1 << 7;
+ if (mx3_cam->platform_flags & MX3_CAMERA_DATAWIDTH_10)
+ mx3_cam->width_flags |= 1 << 9;
+ if (mx3_cam->platform_flags & MX3_CAMERA_DATAWIDTH_15)
+ mx3_cam->width_flags |= 1 << 14;
mx3_cam->mclk = mx3_cam->pdata->mclk_10khz * 10000;
if (!mx3_cam->mclk) {
@@ -1281,8 +1274,6 @@ static int __devexit mx3_camera_remove(struct platform_device *pdev)
dmaengine_put();
- dev_info(&pdev->dev, "i.MX3x Camera driver unloaded\n");
-
return 0;
}
diff --git a/drivers/media/video/mxb.c b/drivers/media/video/mxb.c
index 0b385002350..2e413174843 100644
--- a/drivers/media/video/mxb.c
+++ b/drivers/media/video/mxb.c
@@ -21,12 +21,15 @@
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#define DEBUG_VARIABLE debug
#include <media/saa7146_vv.h>
#include <media/tuner.h>
#include <media/v4l2-common.h>
#include <media/saa7115.h>
+#include <linux/module.h>
#include "mxb.h"
#include "tea6415c.h"
@@ -171,7 +174,7 @@ static int mxb_probe(struct saa7146_dev *dev)
mxb = kzalloc(sizeof(struct mxb), GFP_KERNEL);
if (mxb == NULL) {
- DEB_D(("not enough kernel memory.\n"));
+ DEB_D("not enough kernel memory\n");
return -ENOMEM;
}
@@ -179,7 +182,7 @@ static int mxb_probe(struct saa7146_dev *dev)
saa7146_i2c_adapter_prepare(dev, &mxb->i2c_adapter, SAA7146_I2C_BUS_BIT_RATE_480);
if (i2c_add_adapter(&mxb->i2c_adapter) < 0) {
- DEB_S(("cannot register i2c-device. skipping.\n"));
+ DEB_S("cannot register i2c-device. skipping.\n");
kfree(mxb);
return -EFAULT;
}
@@ -200,7 +203,7 @@ static int mxb_probe(struct saa7146_dev *dev)
/* check if all devices are present */
if (!mxb->tea6420_1 || !mxb->tea6420_2 || !mxb->tea6415c ||
!mxb->tda9840 || !mxb->saa7111a || !mxb->tuner) {
- printk("mxb: did not find all i2c devices. aborting\n");
+ pr_err("did not find all i2c devices. aborting\n");
i2c_del_adapter(&mxb->i2c_adapter);
kfree(mxb);
return -ENODEV;
@@ -346,11 +349,11 @@ static int mxb_init_done(struct saa7146_dev* dev)
msg.buf = &mxb_saa7740_init[i].data[0];
err = i2c_transfer(&mxb->i2c_adapter, &msg, 1);
if (err != 1) {
- DEB_D(("failed to initialize 'sound arena module'.\n"));
+ DEB_D("failed to initialize 'sound arena module'\n");
goto err;
}
}
- INFO(("'sound arena module' detected.\n"));
+ pr_info("'sound arena module' detected\n");
}
err:
/* the rest for saa7146: you should definitely set some basic values
@@ -390,7 +393,7 @@ static int vidioc_queryctrl(struct file *file, void *fh, struct v4l2_queryctrl *
for (i = MAXCONTROLS - 1; i >= 0; i--) {
if (mxb_controls[i].id == qc->id) {
*qc = mxb_controls[i];
- DEB_D(("VIDIOC_QUERYCTRL %d.\n", qc->id));
+ DEB_D("VIDIOC_QUERYCTRL %d\n", qc->id);
return 0;
}
}
@@ -413,11 +416,11 @@ static int vidioc_g_ctrl(struct file *file, void *fh, struct v4l2_control *vc)
if (vc->id == V4L2_CID_AUDIO_MUTE) {
vc->value = mxb->cur_mute;
- DEB_D(("VIDIOC_G_CTRL V4L2_CID_AUDIO_MUTE:%d.\n", vc->value));
+ DEB_D("VIDIOC_G_CTRL V4L2_CID_AUDIO_MUTE:%d\n", vc->value);
return 0;
}
- DEB_EE(("VIDIOC_G_CTRL V4L2_CID_AUDIO_MUTE:%d.\n", vc->value));
+ DEB_EE("VIDIOC_G_CTRL V4L2_CID_AUDIO_MUTE:%d\n", vc->value);
return 0;
}
@@ -440,14 +443,14 @@ static int vidioc_s_ctrl(struct file *file, void *fh, struct v4l2_control *vc)
/* switch the audio-source */
tea6420_route_line(mxb, vc->value ? 6 :
video_audio_connect[mxb->cur_input]);
- DEB_EE(("VIDIOC_S_CTRL, V4L2_CID_AUDIO_MUTE: %d.\n", vc->value));
+ DEB_EE("VIDIOC_S_CTRL, V4L2_CID_AUDIO_MUTE: %d\n", vc->value);
}
return 0;
}
static int vidioc_enum_input(struct file *file, void *fh, struct v4l2_input *i)
{
- DEB_EE(("VIDIOC_ENUMINPUT %d.\n", i->index));
+ DEB_EE("VIDIOC_ENUMINPUT %d\n", i->index);
if (i->index >= MXB_INPUTS)
return -EINVAL;
memcpy(i, &mxb_inputs[i->index], sizeof(struct v4l2_input));
@@ -460,7 +463,7 @@ static int vidioc_g_input(struct file *file, void *fh, unsigned int *i)
struct mxb *mxb = (struct mxb *)dev->ext_priv;
*i = mxb->cur_input;
- DEB_EE(("VIDIOC_G_INPUT %d.\n", *i));
+ DEB_EE("VIDIOC_G_INPUT %d\n", *i);
return 0;
}
@@ -471,7 +474,7 @@ static int vidioc_s_input(struct file *file, void *fh, unsigned int input)
int err = 0;
int i = 0;
- DEB_EE(("VIDIOC_S_INPUT %d.\n", input));
+ DEB_EE("VIDIOC_S_INPUT %d\n", input);
if (input >= MXB_INPUTS)
return -EINVAL;
@@ -514,7 +517,7 @@ static int vidioc_s_input(struct file *file, void *fh, unsigned int input)
/* switch video in saa7111a */
if (saa7111a_call(mxb, video, s_routing, i, SAA7111_FMT_CCIR, 0))
- printk(KERN_ERR "VIDIOC_S_INPUT: could not address saa7111a.\n");
+ pr_err("VIDIOC_S_INPUT: could not address saa7111a\n");
/* switch the audio-source only if necessary */
if (0 == mxb->cur_mute)
@@ -529,11 +532,12 @@ static int vidioc_g_tuner(struct file *file, void *fh, struct v4l2_tuner *t)
struct mxb *mxb = (struct mxb *)dev->ext_priv;
if (t->index) {
- DEB_D(("VIDIOC_G_TUNER: channel %d does not have a tuner attached.\n", t->index));
+ DEB_D("VIDIOC_G_TUNER: channel %d does not have a tuner attached\n",
+ t->index);
return -EINVAL;
}
- DEB_EE(("VIDIOC_G_TUNER: %d\n", t->index));
+ DEB_EE("VIDIOC_G_TUNER: %d\n", t->index);
memset(t, 0, sizeof(*t));
strlcpy(t->name, "TV Tuner", sizeof(t->name));
@@ -550,7 +554,8 @@ static int vidioc_s_tuner(struct file *file, void *fh, struct v4l2_tuner *t)
struct mxb *mxb = (struct mxb *)dev->ext_priv;
if (t->index) {
- DEB_D(("VIDIOC_S_TUNER: channel %d does not have a tuner attached.\n", t->index));
+ DEB_D("VIDIOC_S_TUNER: channel %d does not have a tuner attached\n",
+ t->index);
return -EINVAL;
}
@@ -564,14 +569,14 @@ static int vidioc_g_frequency(struct file *file, void *fh, struct v4l2_frequency
struct mxb *mxb = (struct mxb *)dev->ext_priv;
if (mxb->cur_input) {
- DEB_D(("VIDIOC_G_FREQ: channel %d does not have a tuner!\n",
- mxb->cur_input));
+ DEB_D("VIDIOC_G_FREQ: channel %d does not have a tuner!\n",
+ mxb->cur_input);
return -EINVAL;
}
*f = mxb->cur_freq;
- DEB_EE(("VIDIOC_G_FREQ: freq:0x%08x.\n", mxb->cur_freq.frequency));
+ DEB_EE("VIDIOC_G_FREQ: freq:0x%08x\n", mxb->cur_freq.frequency);
return 0;
}
@@ -588,12 +593,13 @@ static int vidioc_s_frequency(struct file *file, void *fh, struct v4l2_frequency
return -EINVAL;
if (mxb->cur_input) {
- DEB_D(("VIDIOC_S_FREQ: channel %d does not have a tuner!\n", mxb->cur_input));
+ DEB_D("VIDIOC_S_FREQ: channel %d does not have a tuner!\n",
+ mxb->cur_input);
return -EINVAL;
}
mxb->cur_freq = *f;
- DEB_EE(("VIDIOC_S_FREQUENCY: freq:0x%08x.\n", mxb->cur_freq.frequency));
+ DEB_EE("VIDIOC_S_FREQUENCY: freq:0x%08x\n", mxb->cur_freq.frequency);
/* tune in desired frequency */
tuner_call(mxb, tuner, s_frequency, &mxb->cur_freq);
@@ -612,18 +618,18 @@ static int vidioc_g_audio(struct file *file, void *fh, struct v4l2_audio *a)
struct mxb *mxb = (struct mxb *)dev->ext_priv;
if (a->index > MXB_INPUTS) {
- DEB_D(("VIDIOC_G_AUDIO %d out of range.\n", a->index));
+ DEB_D("VIDIOC_G_AUDIO %d out of range\n", a->index);
return -EINVAL;
}
- DEB_EE(("VIDIOC_G_AUDIO %d.\n", a->index));
+ DEB_EE("VIDIOC_G_AUDIO %d\n", a->index);
memcpy(a, &mxb_audios[video_audio_connect[mxb->cur_input]], sizeof(struct v4l2_audio));
return 0;
}
static int vidioc_s_audio(struct file *file, void *fh, struct v4l2_audio *a)
{
- DEB_D(("VIDIOC_S_AUDIO %d.\n", a->index));
+ DEB_D("VIDIOC_S_AUDIO %d\n", a->index);
return 0;
}
@@ -655,11 +661,11 @@ static long vidioc_default(struct file *file, void *fh, bool valid_prio,
int i = *(int *)arg;
if (i < 0 || i >= MXB_AUDIOS) {
- DEB_D(("illegal argument to MXB_S_AUDIO_CD: i:%d.\n", i));
+ DEB_D("invalid argument to MXB_S_AUDIO_CD: i:%d\n", i);
return -EINVAL;
}
- DEB_EE(("MXB_S_AUDIO_CD: i:%d.\n", i));
+ DEB_EE("MXB_S_AUDIO_CD: i:%d\n", i);
tea6420_route_cd(mxb, i);
return 0;
@@ -669,17 +675,18 @@ static long vidioc_default(struct file *file, void *fh, bool valid_prio,
int i = *(int *)arg;
if (i < 0 || i >= MXB_AUDIOS) {
- DEB_D(("illegal argument to MXB_S_AUDIO_LINE: i:%d.\n", i));
+ DEB_D("invalid argument to MXB_S_AUDIO_LINE: i:%d\n",
+ i);
return -EINVAL;
}
- DEB_EE(("MXB_S_AUDIO_LINE: i:%d.\n", i));
+ DEB_EE("MXB_S_AUDIO_LINE: i:%d\n", i);
tea6420_route_line(mxb, i);
return 0;
}
default:
/*
- DEB2(printk("does not handle this ioctl.\n"));
+ DEB2(pr_err("does not handle this ioctl\n"));
*/
return -ENOIOCTLCMD;
}
@@ -693,7 +700,7 @@ static int mxb_attach(struct saa7146_dev *dev, struct saa7146_pci_extension_data
{
struct mxb *mxb;
- DEB_EE(("dev:%p\n", dev));
+ DEB_EE("dev:%p\n", dev);
saa7146_vv_init(dev, &vv_data);
if (mxb_probe(dev)) {
@@ -720,7 +727,7 @@ static int mxb_attach(struct saa7146_dev *dev, struct saa7146_pci_extension_data
#endif
vv_data.ops.vidioc_default = vidioc_default;
if (saa7146_register_device(&mxb->video_dev, dev, "mxb", VFL_TYPE_GRABBER)) {
- ERR(("cannot register capture v4l2 device. skipping.\n"));
+ ERR("cannot register capture v4l2 device. skipping.\n");
saa7146_vv_release(dev);
return -1;
}
@@ -728,11 +735,11 @@ static int mxb_attach(struct saa7146_dev *dev, struct saa7146_pci_extension_data
/* initialization stuff (vbi) (only for revision > 0 and for extensions which want it)*/
if (MXB_BOARD_CAN_DO_VBI(dev)) {
if (saa7146_register_device(&mxb->vbi_dev, dev, "mxb", VFL_TYPE_VBI)) {
- ERR(("cannot register vbi v4l2 device. skipping.\n"));
+ ERR("cannot register vbi v4l2 device. skipping.\n");
}
}
- printk("mxb: found Multimedia eXtension Board #%d.\n", mxb_num);
+ pr_info("found Multimedia eXtension Board #%d\n", mxb_num);
mxb_num++;
mxb_init_done(dev);
@@ -743,7 +750,7 @@ static int mxb_detach(struct saa7146_dev *dev)
{
struct mxb *mxb = (struct mxb *)dev->ext_priv;
- DEB_EE(("dev:%p\n", dev));
+ DEB_EE("dev:%p\n", dev);
saa7146_unregister_device(&mxb->video_dev,dev);
if (MXB_BOARD_CAN_DO_VBI(dev))
@@ -765,7 +772,7 @@ static int std_callback(struct saa7146_dev *dev, struct saa7146_standard *standa
if (V4L2_STD_PAL_I == standard->id) {
v4l2_std_id std = V4L2_STD_PAL_I;
- DEB_D(("VIDIOC_S_STD: setting mxb for PAL_I.\n"));
+ DEB_D("VIDIOC_S_STD: setting mxb for PAL_I\n");
/* set the 7146 gpio register -- I don't know what this does exactly */
saa7146_write(dev, GPIO_CTRL, 0x00404050);
/* unset the 7111 gpio register -- I don't know what this does exactly */
@@ -774,7 +781,7 @@ static int std_callback(struct saa7146_dev *dev, struct saa7146_standard *standa
} else {
v4l2_std_id std = V4L2_STD_PAL_BG;
- DEB_D(("VIDIOC_S_STD: setting mxb for PAL/NTSC/SECAM.\n"));
+ DEB_D("VIDIOC_S_STD: setting mxb for PAL/NTSC/SECAM\n");
/* set the 7146 gpio register -- I don't know what this does exactly */
saa7146_write(dev, GPIO_CTRL, 0x00404050);
/* set the 7111 gpio register -- I don't know what this does exactly */
@@ -852,7 +859,7 @@ static struct saa7146_extension extension = {
static int __init mxb_init_module(void)
{
if (saa7146_register_extension(&extension)) {
- DEB_S(("failed to register extension.\n"));
+ DEB_S("failed to register extension\n");
return -ENODEV;
}
diff --git a/drivers/media/video/noon010pc30.c b/drivers/media/video/noon010pc30.c
index 35f722a88f7..50838bf8420 100644
--- a/drivers/media/video/noon010pc30.c
+++ b/drivers/media/video/noon010pc30.c
@@ -1,7 +1,7 @@
/*
* Driver for SiliconFile NOON010PC30 CIF (1/11") Image Sensor with ISP
*
- * Copyright (C) 2010 Samsung Electronics
+ * Copyright (C) 2010 - 2011 Samsung Electronics Co., Ltd.
* Contact: Sylwester Nawrocki, <s.nawrocki@samsung.com>
*
* Initial register configuration based on a driver authored by
@@ -10,7 +10,7 @@
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later vergsion.
+ * (at your option) any later version.
*/
#include <linux/delay.h>
@@ -21,6 +21,7 @@
#include <media/noon010pc30.h>
#include <media/v4l2-chip-ident.h>
#include <linux/videodev2.h>
+#include <linux/module.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-mediabus.h>
@@ -131,17 +132,23 @@ static const char * const noon010_supply_name[] = {
struct noon010_info {
struct v4l2_subdev sd;
+ struct media_pad pad;
struct v4l2_ctrl_handler hdl;
- const struct noon010pc30_platform_data *pdata;
+ struct regulator_bulk_data supply[NOON010_NUM_SUPPLIES];
+ u32 gpio_nreset;
+ u32 gpio_nstby;
+
+ /* Protects the struct members below */
+ struct mutex lock;
+
const struct noon010_format *curr_fmt;
const struct noon010_frmsize *curr_win;
+ unsigned int apply_new_cfg:1;
+ unsigned int streaming:1;
unsigned int hflip:1;
unsigned int vflip:1;
unsigned int power:1;
u8 i2c_reg_page;
- struct regulator_bulk_data supply[NOON010_NUM_SUPPLIES];
- u32 gpio_nreset;
- u32 gpio_nstby;
};
struct i2c_regval {
@@ -292,8 +299,10 @@ static int noon010_power_ctrl(struct v4l2_subdev *sd, bool reset, bool sleep)
u8 reg = sleep ? 0xF1 : 0xF0;
int ret = 0;
- if (reset)
+ if (reset) {
ret = cam_i2c_write(sd, POWER_CTRL_REG, reg | 0x02);
+ udelay(20);
+ }
if (!ret) {
ret = cam_i2c_write(sd, POWER_CTRL_REG, reg);
if (reset && !ret)
@@ -313,6 +322,7 @@ static int noon010_enable_autowhitebalance(struct v4l2_subdev *sd, int on)
return ret;
}
+/* Called with struct noon010_info.lock mutex held */
static int noon010_set_flip(struct v4l2_subdev *sd, int hflip, int vflip)
{
struct noon010_info *info = to_noon010(sd);
@@ -340,21 +350,18 @@ static int noon010_set_flip(struct v4l2_subdev *sd, int hflip, int vflip)
static int noon010_set_params(struct v4l2_subdev *sd)
{
struct noon010_info *info = to_noon010(sd);
- int ret;
- if (!info->curr_win)
- return -EINVAL;
-
- ret = cam_i2c_write(sd, VDO_CTL_REG(0), info->curr_win->vid_ctl1);
-
- if (!ret && info->curr_fmt)
- ret = cam_i2c_write(sd, ISP_CTL_REG(0),
- info->curr_fmt->ispctl1_reg);
- return ret;
+ int ret = cam_i2c_write(sd, VDO_CTL_REG(0),
+ info->curr_win->vid_ctl1);
+ if (ret)
+ return ret;
+ return cam_i2c_write(sd, ISP_CTL_REG(0),
+ info->curr_fmt->ispctl1_reg);
}
/* Find nearest matching image pixel size. */
-static int noon010_try_frame_size(struct v4l2_mbus_framefmt *mf)
+static int noon010_try_frame_size(struct v4l2_mbus_framefmt *mf,
+ const struct noon010_frmsize **size)
{
unsigned int min_err = ~0;
int i = ARRAY_SIZE(noon010_sizes);
@@ -374,11 +381,14 @@ static int noon010_try_frame_size(struct v4l2_mbus_framefmt *mf)
if (match) {
mf->width = match->width;
mf->height = match->height;
+ if (size)
+ *size = match;
return 0;
}
return -EINVAL;
}
+/* Called with info.lock mutex held */
static int power_enable(struct noon010_info *info)
{
int ret;
@@ -419,6 +429,7 @@ static int power_enable(struct noon010_info *info)
return 0;
}
+/* Called with info.lock mutex held */
static int power_disable(struct noon010_info *info)
{
int ret;
@@ -448,147 +459,175 @@ static int power_disable(struct noon010_info *info)
static int noon010_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct v4l2_subdev *sd = to_sd(ctrl);
+ struct noon010_info *info = to_noon010(sd);
+ int ret = 0;
v4l2_dbg(1, debug, sd, "%s: ctrl_id: %d, value: %d\n",
__func__, ctrl->id, ctrl->val);
+ mutex_lock(&info->lock);
+ /*
+ * If the device is not powered up by the host driver do
+ * not apply any controls to H/W at this time. Instead
+ * the controls will be restored right after power-up.
+ */
+ if (!info->power)
+ goto unlock;
+
switch (ctrl->id) {
case V4L2_CID_AUTO_WHITE_BALANCE:
- return noon010_enable_autowhitebalance(sd, ctrl->val);
+ ret = noon010_enable_autowhitebalance(sd, ctrl->val);
+ break;
case V4L2_CID_BLUE_BALANCE:
- return cam_i2c_write(sd, MWB_BGAIN_REG, ctrl->val);
+ ret = cam_i2c_write(sd, MWB_BGAIN_REG, ctrl->val);
+ break;
case V4L2_CID_RED_BALANCE:
- return cam_i2c_write(sd, MWB_RGAIN_REG, ctrl->val);
+ ret = cam_i2c_write(sd, MWB_RGAIN_REG, ctrl->val);
+ break;
default:
- return -EINVAL;
+ ret = -EINVAL;
}
+unlock:
+ mutex_unlock(&info->lock);
+ return ret;
}
-static int noon010_enum_fmt(struct v4l2_subdev *sd, unsigned int index,
- enum v4l2_mbus_pixelcode *code)
+static int noon010_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_mbus_code_enum *code)
{
- if (!code || index >= ARRAY_SIZE(noon010_formats))
+ if (code->index >= ARRAY_SIZE(noon010_formats))
return -EINVAL;
- *code = noon010_formats[index].code;
+ code->code = noon010_formats[code->index].code;
return 0;
}
-static int noon010_g_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf)
+static int noon010_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
{
struct noon010_info *info = to_noon010(sd);
- int ret;
-
- if (!mf)
- return -EINVAL;
+ struct v4l2_mbus_framefmt *mf;
- if (!info->curr_win || !info->curr_fmt) {
- ret = noon010_set_params(sd);
- if (ret)
- return ret;
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ if (fh) {
+ mf = v4l2_subdev_get_try_format(fh, 0);
+ fmt->format = *mf;
+ }
+ return 0;
}
+ mf = &fmt->format;
- mf->width = info->curr_win->width;
- mf->height = info->curr_win->height;
- mf->code = info->curr_fmt->code;
- mf->colorspace = info->curr_fmt->colorspace;
- mf->field = V4L2_FIELD_NONE;
+ mutex_lock(&info->lock);
+ mf->width = info->curr_win->width;
+ mf->height = info->curr_win->height;
+ mf->code = info->curr_fmt->code;
+ mf->colorspace = info->curr_fmt->colorspace;
+ mf->field = V4L2_FIELD_NONE;
+ mutex_unlock(&info->lock);
return 0;
}
/* Return nearest media bus frame format. */
-static const struct noon010_format *try_fmt(struct v4l2_subdev *sd,
+static const struct noon010_format *noon010_try_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf)
{
int i = ARRAY_SIZE(noon010_formats);
- noon010_try_frame_size(mf);
-
- while (i--)
+ while (--i)
if (mf->code == noon010_formats[i].code)
break;
-
mf->code = noon010_formats[i].code;
return &noon010_formats[i];
}
-static int noon010_try_fmt(struct v4l2_subdev *sd,
- struct v4l2_mbus_framefmt *mf)
-{
- if (!sd || !mf)
- return -EINVAL;
-
- try_fmt(sd, mf);
- return 0;
-}
-
-static int noon010_s_fmt(struct v4l2_subdev *sd,
- struct v4l2_mbus_framefmt *mf)
+static int noon010_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
{
struct noon010_info *info = to_noon010(sd);
+ const struct noon010_frmsize *size = NULL;
+ const struct noon010_format *nf;
+ struct v4l2_mbus_framefmt *mf;
+ int ret = 0;
- if (!sd || !mf)
- return -EINVAL;
-
- info->curr_fmt = try_fmt(sd, mf);
+ nf = noon010_try_fmt(sd, &fmt->format);
+ noon010_try_frame_size(&fmt->format, &size);
+ fmt->format.colorspace = V4L2_COLORSPACE_JPEG;
- return noon010_set_params(sd);
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ if (fh) {
+ mf = v4l2_subdev_get_try_format(fh, 0);
+ *mf = fmt->format;
+ }
+ return 0;
+ }
+ mutex_lock(&info->lock);
+ if (!info->streaming) {
+ info->apply_new_cfg = 1;
+ info->curr_fmt = nf;
+ info->curr_win = size;
+ } else {
+ ret = -EBUSY;
+ }
+ mutex_unlock(&info->lock);
+ return ret;
}
+/* Called with struct noon010_info.lock mutex held */
static int noon010_base_config(struct v4l2_subdev *sd)
{
- struct noon010_info *info = to_noon010(sd);
- int ret;
-
- ret = noon010_bulk_write_reg(sd, noon010_base_regs);
- if (!ret) {
- info->curr_fmt = &noon010_formats[0];
- info->curr_win = &noon010_sizes[0];
+ int ret = noon010_bulk_write_reg(sd, noon010_base_regs);
+ if (!ret)
ret = noon010_set_params(sd);
- }
if (!ret)
ret = noon010_set_flip(sd, 1, 0);
- if (!ret)
- ret = noon010_power_ctrl(sd, false, false);
- /* sync the handler and the registers state */
- v4l2_ctrl_handler_setup(&to_noon010(sd)->hdl);
return ret;
}
static int noon010_s_power(struct v4l2_subdev *sd, int on)
{
struct noon010_info *info = to_noon010(sd);
- const struct noon010pc30_platform_data *pdata = info->pdata;
- int ret = 0;
-
- if (WARN(pdata == NULL, "No platform data!\n"))
- return -ENOMEM;
+ int ret;
+ mutex_lock(&info->lock);
if (on) {
ret = power_enable(info);
- if (ret)
- return ret;
- ret = noon010_base_config(sd);
+ if (!ret)
+ ret = noon010_base_config(sd);
} else {
noon010_power_ctrl(sd, false, true);
ret = power_disable(info);
- info->curr_win = NULL;
- info->curr_fmt = NULL;
}
+ mutex_unlock(&info->lock);
+
+ /* Restore the controls state */
+ if (!ret && on)
+ ret = v4l2_ctrl_handler_setup(&info->hdl);
return ret;
}
-static int noon010_g_chip_ident(struct v4l2_subdev *sd,
- struct v4l2_dbg_chip_ident *chip)
+static int noon010_s_stream(struct v4l2_subdev *sd, int on)
{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct noon010_info *info = to_noon010(sd);
+ int ret = 0;
- return v4l2_chip_ident_i2c_client(client, chip,
- V4L2_IDENT_NOON010PC30, 0);
+ mutex_lock(&info->lock);
+ if (!info->streaming != !on) {
+ ret = noon010_power_ctrl(sd, false, !on);
+ if (!ret)
+ info->streaming = on;
+ }
+ if (!ret && on && info->apply_new_cfg) {
+ ret = noon010_set_params(sd);
+ if (!ret)
+ info->apply_new_cfg = 0;
+ }
+ mutex_unlock(&info->lock);
+ return ret;
}
static int noon010_log_status(struct v4l2_subdev *sd)
@@ -599,12 +638,27 @@ static int noon010_log_status(struct v4l2_subdev *sd)
return 0;
}
+static int noon010_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ struct v4l2_mbus_framefmt *mf = v4l2_subdev_get_try_format(fh, 0);
+
+ mf->width = noon010_sizes[0].width;
+ mf->height = noon010_sizes[0].height;
+ mf->code = noon010_formats[0].code;
+ mf->colorspace = V4L2_COLORSPACE_JPEG;
+ mf->field = V4L2_FIELD_NONE;
+ return 0;
+}
+
+static const struct v4l2_subdev_internal_ops noon010_subdev_internal_ops = {
+ .open = noon010_open,
+};
+
static const struct v4l2_ctrl_ops noon010_ctrl_ops = {
.s_ctrl = noon010_s_ctrl,
};
static const struct v4l2_subdev_core_ops noon010_core_ops = {
- .g_chip_ident = noon010_g_chip_ident,
.s_power = noon010_s_power,
.g_ctrl = v4l2_subdev_g_ctrl,
.s_ctrl = v4l2_subdev_s_ctrl,
@@ -616,15 +670,19 @@ static const struct v4l2_subdev_core_ops noon010_core_ops = {
.log_status = noon010_log_status,
};
-static const struct v4l2_subdev_video_ops noon010_video_ops = {
- .g_mbus_fmt = noon010_g_fmt,
- .s_mbus_fmt = noon010_s_fmt,
- .try_mbus_fmt = noon010_try_fmt,
- .enum_mbus_fmt = noon010_enum_fmt,
+static struct v4l2_subdev_pad_ops noon010_pad_ops = {
+ .enum_mbus_code = noon010_enum_mbus_code,
+ .get_fmt = noon010_get_fmt,
+ .set_fmt = noon010_set_fmt,
+};
+
+static struct v4l2_subdev_video_ops noon010_video_ops = {
+ .s_stream = noon010_s_stream,
};
static const struct v4l2_subdev_ops noon010_ops = {
.core = &noon010_core_ops,
+ .pad = &noon010_pad_ops,
.video = &noon010_video_ops,
};
@@ -665,10 +723,14 @@ static int noon010_probe(struct i2c_client *client,
if (!info)
return -ENOMEM;
+ mutex_init(&info->lock);
sd = &info->sd;
strlcpy(sd->name, MODULE_NAME, sizeof(sd->name));
v4l2_i2c_subdev_init(sd, client, &noon010_ops);
+ sd->internal_ops = &noon010_subdev_internal_ops;
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
v4l2_ctrl_handler_init(&info->hdl, 3);
v4l2_ctrl_new_std(&info->hdl, &noon010_ctrl_ops,
@@ -684,10 +746,11 @@ static int noon010_probe(struct i2c_client *client,
if (ret)
goto np_err;
- info->pdata = client->dev.platform_data;
info->i2c_reg_page = -1;
info->gpio_nreset = -EINVAL;
info->gpio_nstby = -EINVAL;
+ info->curr_fmt = &noon010_formats[0];
+ info->curr_win = &noon010_sizes[0];
if (gpio_is_valid(pdata->gpio_nreset)) {
ret = gpio_request(pdata->gpio_nreset, "NOON010PC30 NRST");
@@ -719,11 +782,17 @@ static int noon010_probe(struct i2c_client *client,
if (ret)
goto np_reg_err;
+ info->pad.flags = MEDIA_PAD_FL_SOURCE;
+ sd->entity.type = MEDIA_ENT_T_V4L2_SUBDEV_SENSOR;
+ ret = media_entity_init(&sd->entity, 1, &info->pad, 0);
+ if (ret < 0)
+ goto np_me_err;
+
ret = noon010_detect(client, info);
if (!ret)
return 0;
- /* the sensor detection failed */
+np_me_err:
regulator_bulk_free(NOON010_NUM_SUPPLIES, info->supply);
np_reg_err:
if (gpio_is_valid(info->gpio_nstby))
@@ -754,6 +823,7 @@ static int noon010_remove(struct i2c_client *client)
if (gpio_is_valid(info->gpio_nstby))
gpio_free(info->gpio_nstby);
+ media_entity_cleanup(&sd->entity);
kfree(info);
return 0;
}
diff --git a/drivers/media/video/omap/omap_vout.c b/drivers/media/video/omap/omap_vout.c
index b3a5ecdb33a..9c5c19f142d 100644
--- a/drivers/media/video/omap/omap_vout.c
+++ b/drivers/media/video/omap/omap_vout.c
@@ -400,7 +400,6 @@ static int omapvid_setup_overlay(struct omap_vout_device *vout,
ovl->get_overlay_info(ovl, &info);
info.paddr = addr;
- info.vaddr = NULL;
info.width = cropwidth;
info.height = cropheight;
info.color_mode = vout->dss_mode;
@@ -834,6 +833,15 @@ static void omap_vout_buffer_release(struct videobuf_queue *q,
/*
* File operations
*/
+static unsigned int omap_vout_poll(struct file *file,
+ struct poll_table_struct *wait)
+{
+ struct omap_vout_device *vout = file->private_data;
+ struct videobuf_queue *q = &vout->vbq;
+
+ return videobuf_poll_stream(file, q, wait);
+}
+
static void omap_vout_vm_open(struct vm_area_struct *vma)
{
struct omap_vout_device *vout = vma->vm_private_data;
@@ -1165,12 +1173,17 @@ static int vidioc_try_fmt_vid_overlay(struct file *file, void *fh,
{
int ret = 0;
struct omap_vout_device *vout = fh;
+ struct omap_overlay *ovl;
+ struct omapvideo_info *ovid;
struct v4l2_window *win = &f->fmt.win;
+ ovid = &vout->vid_info;
+ ovl = ovid->overlays[0];
+
ret = omap_vout_try_window(&vout->fbuf, win);
if (!ret) {
- if (vout->vid == OMAP_VIDEO1)
+ if ((ovl->caps & OMAP_DSS_OVL_CAP_GLOBAL_ALPHA) == 0)
win->global_alpha = 255;
else
win->global_alpha = f->fmt.win.global_alpha;
@@ -1194,8 +1207,8 @@ static int vidioc_s_fmt_vid_overlay(struct file *file, void *fh,
ret = omap_vout_new_window(&vout->crop, &vout->win, &vout->fbuf, win);
if (!ret) {
- /* Video1 plane does not support global alpha */
- if (ovl->id == OMAP_DSS_VIDEO1)
+ /* Video1 plane does not support global alpha on OMAP3 */
+ if ((ovl->caps & OMAP_DSS_OVL_CAP_GLOBAL_ALPHA) == 0)
vout->win.global_alpha = 255;
else
vout->win.global_alpha = f->fmt.win.global_alpha;
@@ -1788,7 +1801,9 @@ static int vidioc_s_fbuf(struct file *file, void *fh,
if (ovl->manager && ovl->manager->get_manager_info &&
ovl->manager->set_manager_info) {
ovl->manager->get_manager_info(ovl->manager, &info);
- info.alpha_enabled = enable;
+ /* enable this only if there is no zorder cap */
+ if ((ovl->caps & OMAP_DSS_OVL_CAP_ZORDER) == 0)
+ info.partial_alpha_enabled = enable;
if (ovl->manager->set_manager_info(ovl->manager, &info))
return -EINVAL;
}
@@ -1820,7 +1835,7 @@ static int vidioc_g_fbuf(struct file *file, void *fh,
}
if (ovl->manager && ovl->manager->get_manager_info) {
ovl->manager->get_manager_info(ovl->manager, &info);
- if (info.alpha_enabled)
+ if (info.partial_alpha_enabled)
a->flags |= V4L2_FBUF_FLAG_LOCAL_ALPHA;
}
@@ -1855,6 +1870,7 @@ static const struct v4l2_ioctl_ops vout_ioctl_ops = {
static const struct v4l2_file_operations omap_vout_fops = {
.owner = THIS_MODULE,
+ .poll = omap_vout_poll,
.unlocked_ioctl = video_ioctl2,
.mmap = omap_vout_mmap,
.open = omap_vout_open,
diff --git a/drivers/media/video/omap1_camera.c b/drivers/media/video/omap1_camera.c
index 8a947e603ac..e87ae2f634b 100644
--- a/drivers/media/video/omap1_camera.c
+++ b/drivers/media/video/omap1_camera.c
@@ -102,10 +102,10 @@
/* end of OMAP1 Camera Interface registers */
-#define SOCAM_BUS_FLAGS (SOCAM_MASTER | \
- SOCAM_HSYNC_ACTIVE_HIGH | SOCAM_VSYNC_ACTIVE_HIGH | \
- SOCAM_PCLK_SAMPLE_RISING | SOCAM_PCLK_SAMPLE_FALLING | \
- SOCAM_DATA_ACTIVE_HIGH | SOCAM_DATAWIDTH_8)
+#define SOCAM_BUS_FLAGS (V4L2_MBUS_MASTER | \
+ V4L2_MBUS_HSYNC_ACTIVE_HIGH | V4L2_MBUS_VSYNC_ACTIVE_HIGH | \
+ V4L2_MBUS_PCLK_SAMPLE_RISING | V4L2_MBUS_PCLK_SAMPLE_FALLING | \
+ V4L2_MBUS_DATA_ACTIVE_HIGH)
#define FIFO_SIZE ((THRESHOLD_MASK >> THRESHOLD_SHIFT) + 1)
@@ -1438,41 +1438,55 @@ static int omap1_cam_querycap(struct soc_camera_host *ici,
static int omap1_cam_set_bus_param(struct soc_camera_device *icd,
__u32 pixfmt)
{
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
struct device *dev = icd->parent;
struct soc_camera_host *ici = to_soc_camera_host(dev);
struct omap1_cam_dev *pcdev = ici->priv;
const struct soc_camera_format_xlate *xlate;
const struct soc_mbus_pixelfmt *fmt;
- unsigned long camera_flags, common_flags;
+ struct v4l2_mbus_config cfg = {.type = V4L2_MBUS_PARALLEL,};
+ unsigned long common_flags;
u32 ctrlclock, mode;
int ret;
- camera_flags = icd->ops->query_bus_param(icd);
-
- common_flags = soc_camera_bus_param_compatible(camera_flags,
- SOCAM_BUS_FLAGS);
- if (!common_flags)
- return -EINVAL;
+ ret = v4l2_subdev_call(sd, video, g_mbus_config, &cfg);
+ if (!ret) {
+ common_flags = soc_mbus_config_compatible(&cfg, SOCAM_BUS_FLAGS);
+ if (!common_flags) {
+ dev_warn(dev,
+ "Flags incompatible: camera 0x%x, host 0x%x\n",
+ cfg.flags, SOCAM_BUS_FLAGS);
+ return -EINVAL;
+ }
+ } else if (ret != -ENOIOCTLCMD) {
+ return ret;
+ } else {
+ common_flags = SOCAM_BUS_FLAGS;
+ }
/* Make choices, possibly based on platform configuration */
- if ((common_flags & SOCAM_PCLK_SAMPLE_RISING) &&
- (common_flags & SOCAM_PCLK_SAMPLE_FALLING)) {
+ if ((common_flags & V4L2_MBUS_PCLK_SAMPLE_RISING) &&
+ (common_flags & V4L2_MBUS_PCLK_SAMPLE_FALLING)) {
if (!pcdev->pdata ||
pcdev->pdata->flags & OMAP1_CAMERA_LCLK_RISING)
- common_flags &= ~SOCAM_PCLK_SAMPLE_FALLING;
+ common_flags &= ~V4L2_MBUS_PCLK_SAMPLE_FALLING;
else
- common_flags &= ~SOCAM_PCLK_SAMPLE_RISING;
+ common_flags &= ~V4L2_MBUS_PCLK_SAMPLE_RISING;
}
- ret = icd->ops->set_bus_param(icd, common_flags);
- if (ret < 0)
+ cfg.flags = common_flags;
+ ret = v4l2_subdev_call(sd, video, s_mbus_config, &cfg);
+ if (ret < 0 && ret != -ENOIOCTLCMD) {
+ dev_dbg(dev, "camera s_mbus_config(0x%lx) returned %d\n",
+ common_flags, ret);
return ret;
+ }
ctrlclock = CAM_READ_CACHE(pcdev, CTRLCLOCK);
if (ctrlclock & LCLK_EN)
CAM_WRITE(pcdev, CTRLCLOCK, ctrlclock & ~LCLK_EN);
- if (common_flags & SOCAM_PCLK_SAMPLE_RISING) {
+ if (common_flags & V4L2_MBUS_PCLK_SAMPLE_RISING) {
dev_dbg(dev, "CTRLCLOCK_REG |= POLCLK\n");
ctrlclock |= POLCLK;
} else {
@@ -1565,10 +1579,10 @@ static int __init omap1_cam_probe(struct platform_device *pdev)
pcdev->clk = clk;
pcdev->pdata = pdev->dev.platform_data;
- pcdev->pflags = pcdev->pdata->flags;
-
- if (pcdev->pdata)
+ if (pcdev->pdata) {
+ pcdev->pflags = pcdev->pdata->flags;
pcdev->camexclk = pcdev->pdata->camexclk_khz * 1000;
+ }
switch (pcdev->camexclk) {
case 6000000:
@@ -1578,6 +1592,7 @@ static int __init omap1_cam_probe(struct platform_device *pdev)
case 24000000:
break;
default:
+ /* pcdev->camexclk != 0 => pcdev->pdata != NULL */
dev_warn(&pdev->dev,
"Incorrect sensor clock frequency %ld kHz, "
"should be one of 0, 6, 8, 9.6, 12 or 24 MHz, "
@@ -1585,8 +1600,7 @@ static int __init omap1_cam_probe(struct platform_device *pdev)
pcdev->pdata->camexclk_khz);
pcdev->camexclk = 0;
case 0:
- dev_info(&pdev->dev,
- "Not providing sensor clock\n");
+ dev_info(&pdev->dev, "Not providing sensor clock\n");
}
INIT_LIST_HEAD(&pcdev->capture);
@@ -1716,5 +1730,5 @@ MODULE_PARM_DESC(sg_mode, "videobuf mode, 0: dma-contig (default), 1: dma-sg");
MODULE_DESCRIPTION("OMAP1 Camera Interface driver");
MODULE_AUTHOR("Janusz Krzysztofik <jkrzyszt@tis.icnet.pl>");
MODULE_LICENSE("GPL v2");
-MODULE_LICENSE(DRIVER_VERSION);
+MODULE_VERSION(DRIVER_VERSION);
MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/media/video/omap24xxcam.c b/drivers/media/video/omap24xxcam.c
index eb97bff7116..45522e60318 100644
--- a/drivers/media/video/omap24xxcam.c
+++ b/drivers/media/video/omap24xxcam.c
@@ -36,6 +36,7 @@
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/sched.h>
+#include <linux/module.h>
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
diff --git a/drivers/media/video/omap3isp/Makefile b/drivers/media/video/omap3isp/Makefile
index b1b344774ae..e8847e79e31 100644
--- a/drivers/media/video/omap3isp/Makefile
+++ b/drivers/media/video/omap3isp/Makefile
@@ -1,8 +1,6 @@
# Makefile for OMAP3 ISP driver
-ifdef CONFIG_VIDEO_OMAP3_DEBUG
-EXTRA_CFLAGS += -DDEBUG
-endif
+ccflags-$(CONFIG_VIDEO_OMAP3_DEBUG) += -DDEBUG
omap3-isp-objs += \
isp.o ispqueue.o ispvideo.o \
diff --git a/drivers/media/video/omap3isp/isp.c b/drivers/media/video/omap3isp/isp.c
index 5cea2bbd701..b818cacf420 100644
--- a/drivers/media/video/omap3isp/isp.c
+++ b/drivers/media/video/omap3isp/isp.c
@@ -80,6 +80,13 @@
#include "isph3a.h"
#include "isphist.h"
+/*
+ * this is provided as an interim solution until omap3isp doesn't need
+ * any omap-specific iommu API
+ */
+#define to_iommu(dev) \
+ (struct omap_iommu *)platform_get_drvdata(to_platform_device(dev))
+
static unsigned int autoidle;
module_param(autoidle, int, 0444);
MODULE_PARM_DESC(autoidle, "Enable OMAP3ISP AUTOIDLE support");
@@ -732,7 +739,7 @@ static int isp_pipeline_enable(struct isp_pipeline *pipe,
struct media_pad *pad;
struct v4l2_subdev *subdev;
unsigned long flags;
- int ret = 0;
+ int ret;
spin_lock_irqsave(&pipe->lock, flags);
pipe->state &= ~(ISP_PIPELINE_IDLE_INPUT | ISP_PIPELINE_IDLE_OUTPUT);
@@ -756,7 +763,7 @@ static int isp_pipeline_enable(struct isp_pipeline *pipe,
ret = v4l2_subdev_call(subdev, video, s_stream, mode);
if (ret < 0 && ret != -ENOIOCTLCMD)
- break;
+ return ret;
if (subdev == &isp->isp_ccdc.subdev) {
v4l2_subdev_call(&isp->isp_aewb.subdev, video,
@@ -777,7 +784,7 @@ static int isp_pipeline_enable(struct isp_pipeline *pipe,
if (pipe->do_propagation && mode == ISP_PIPELINE_STREAM_SINGLESHOT)
atomic_inc(&pipe->frame_number);
- return ret;
+ return 0;
}
static int isp_pipeline_wait_resizer(struct isp_device *isp)
@@ -1108,7 +1115,7 @@ static void isp_save_ctx(struct isp_device *isp)
{
isp_save_context(isp, isp_reg_list);
if (isp->iommu)
- iommu_save_ctx(isp->iommu);
+ omap_iommu_save_ctx(isp->iommu);
}
/*
@@ -1122,7 +1129,7 @@ static void isp_restore_ctx(struct isp_device *isp)
{
isp_restore_context(isp, isp_reg_list);
if (isp->iommu)
- iommu_restore_ctx(isp->iommu);
+ omap_iommu_restore_ctx(isp->iommu);
omap3isp_ccdc_restore_context(isp);
omap3isp_preview_restore_context(isp);
}
@@ -1697,6 +1704,7 @@ static int isp_register_entities(struct isp_device *isp)
isp->media_dev.dev = isp->dev;
strlcpy(isp->media_dev.model, "TI OMAP3 ISP",
sizeof(isp->media_dev.model));
+ isp->media_dev.hw_revision = isp->revision;
isp->media_dev.link_notify = isp_pipeline_link_notify;
ret = media_device_register(&isp->media_dev);
if (ret < 0) {
@@ -1975,7 +1983,8 @@ static int isp_remove(struct platform_device *pdev)
isp_cleanup_modules(isp);
omap3isp_get(isp);
- iommu_put(isp->iommu);
+ iommu_detach_device(isp->domain, isp->iommu_dev);
+ iommu_domain_free(isp->domain);
omap3isp_put(isp);
free_irq(isp->irq_num, isp);
@@ -2123,25 +2132,41 @@ static int isp_probe(struct platform_device *pdev)
}
/* IOMMU */
- isp->iommu = iommu_get("isp");
- if (IS_ERR_OR_NULL(isp->iommu)) {
- isp->iommu = NULL;
+ isp->iommu_dev = omap_find_iommu_device("isp");
+ if (!isp->iommu_dev) {
+ dev_err(isp->dev, "omap_find_iommu_device failed\n");
ret = -ENODEV;
goto error_isp;
}
+ /* to be removed once iommu migration is complete */
+ isp->iommu = to_iommu(isp->iommu_dev);
+
+ isp->domain = iommu_domain_alloc(pdev->dev.bus);
+ if (!isp->domain) {
+ dev_err(isp->dev, "can't alloc iommu domain\n");
+ ret = -ENOMEM;
+ goto error_isp;
+ }
+
+ ret = iommu_attach_device(isp->domain, isp->iommu_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "can't attach iommu device: %d\n", ret);
+ goto free_domain;
+ }
+
/* Interrupt */
isp->irq_num = platform_get_irq(pdev, 0);
if (isp->irq_num <= 0) {
dev_err(isp->dev, "No IRQ resource\n");
ret = -ENODEV;
- goto error_isp;
+ goto detach_dev;
}
if (request_irq(isp->irq_num, isp_isr, IRQF_SHARED, "OMAP3 ISP", isp)) {
dev_err(isp->dev, "Unable to request IRQ\n");
ret = -EINVAL;
- goto error_isp;
+ goto detach_dev;
}
/* Entities */
@@ -2162,8 +2187,11 @@ error_modules:
isp_cleanup_modules(isp);
error_irq:
free_irq(isp->irq_num, isp);
+detach_dev:
+ iommu_detach_device(isp->domain, isp->iommu_dev);
+free_domain:
+ iommu_domain_free(isp->domain);
error_isp:
- iommu_put(isp->iommu);
omap3isp_put(isp);
error:
isp_put_clocks(isp);
@@ -2183,6 +2211,8 @@ error:
regulator_put(isp->isp_csiphy2.vdd);
regulator_put(isp->isp_csiphy1.vdd);
platform_set_drvdata(pdev, NULL);
+
+ mutex_destroy(&isp->isp_mutex);
kfree(isp);
return ret;
diff --git a/drivers/media/video/omap3isp/isp.h b/drivers/media/video/omap3isp/isp.h
index 529e582ef94..705946ef4d6 100644
--- a/drivers/media/video/omap3isp/isp.h
+++ b/drivers/media/video/omap3isp/isp.h
@@ -27,11 +27,13 @@
#ifndef OMAP3_ISP_CORE_H
#define OMAP3_ISP_CORE_H
+#include <media/omap3isp.h>
#include <media/v4l2-device.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/wait.h>
+#include <linux/iommu.h>
#include <plat/iommu.h>
#include <plat/iovmm.h>
@@ -94,14 +96,6 @@ enum isp_subclk_resource {
OMAP3_ISP_SUBCLK_RESIZER = (1 << 4),
};
-enum isp_interface_type {
- ISP_INTERFACE_PARALLEL,
- ISP_INTERFACE_CSI2A_PHY2,
- ISP_INTERFACE_CCP2B_PHY1,
- ISP_INTERFACE_CCP2B_PHY2,
- ISP_INTERFACE_CSI2C_PHY1,
-};
-
/* ISP: OMAP 34xx ES 1.0 */
#define ISP_REVISION_1_0 0x10
/* ISP2: OMAP 34xx ES 2.0, 2.1 and 3.0 */
@@ -130,82 +124,6 @@ struct isp_reg {
u32 val;
};
-/**
- * struct isp_parallel_platform_data - Parallel interface platform data
- * @data_lane_shift: Data lane shifter
- * 0 - CAMEXT[13:0] -> CAM[13:0]
- * 1 - CAMEXT[13:2] -> CAM[11:0]
- * 2 - CAMEXT[13:4] -> CAM[9:0]
- * 3 - CAMEXT[13:6] -> CAM[7:0]
- * @clk_pol: Pixel clock polarity
- * 0 - Non Inverted, 1 - Inverted
- * @hs_pol: Horizontal synchronization polarity
- * 0 - Active high, 1 - Active low
- * @vs_pol: Vertical synchronization polarity
- * 0 - Active high, 1 - Active low
- * @bridge: CCDC Bridge input control
- * ISPCTRL_PAR_BRIDGE_DISABLE - Disable
- * ISPCTRL_PAR_BRIDGE_LENDIAN - Little endian
- * ISPCTRL_PAR_BRIDGE_BENDIAN - Big endian
- */
-struct isp_parallel_platform_data {
- unsigned int data_lane_shift:2;
- unsigned int clk_pol:1;
- unsigned int hs_pol:1;
- unsigned int vs_pol:1;
- unsigned int bridge:4;
-};
-
-/**
- * struct isp_ccp2_platform_data - CCP2 interface platform data
- * @strobe_clk_pol: Strobe/clock polarity
- * 0 - Non Inverted, 1 - Inverted
- * @crc: Enable the cyclic redundancy check
- * @ccp2_mode: Enable CCP2 compatibility mode
- * 0 - MIPI-CSI1 mode, 1 - CCP2 mode
- * @phy_layer: Physical layer selection
- * ISPCCP2_CTRL_PHY_SEL_CLOCK - Data/clock physical layer
- * ISPCCP2_CTRL_PHY_SEL_STROBE - Data/strobe physical layer
- * @vpclk_div: Video port output clock control
- */
-struct isp_ccp2_platform_data {
- unsigned int strobe_clk_pol:1;
- unsigned int crc:1;
- unsigned int ccp2_mode:1;
- unsigned int phy_layer:1;
- unsigned int vpclk_div:2;
-};
-
-/**
- * struct isp_csi2_platform_data - CSI2 interface platform data
- * @crc: Enable the cyclic redundancy check
- * @vpclk_div: Video port output clock control
- */
-struct isp_csi2_platform_data {
- unsigned crc:1;
- unsigned vpclk_div:2;
-};
-
-struct isp_subdev_i2c_board_info {
- struct i2c_board_info *board_info;
- int i2c_adapter_id;
-};
-
-struct isp_v4l2_subdevs_group {
- struct isp_subdev_i2c_board_info *subdevs;
- enum isp_interface_type interface;
- union {
- struct isp_parallel_platform_data parallel;
- struct isp_ccp2_platform_data ccp2;
- struct isp_csi2_platform_data csi2;
- } bus; /* gcc < 4.6.0 chokes on anonymous union initializers */
-};
-
-struct isp_platform_data {
- struct isp_v4l2_subdevs_group *subdevs;
- void (*set_constraints)(struct isp_device *isp, bool enable);
-};
-
struct isp_platform_callback {
u32 (*set_xclk)(struct isp_device *isp, u32 xclk, u8 xclksel);
int (*csiphy_config)(struct isp_csiphy *phy,
@@ -294,7 +212,9 @@ struct isp_device {
unsigned int sbl_resources;
unsigned int subclk_resources;
- struct iommu *iommu;
+ struct omap_iommu *iommu;
+ struct iommu_domain *domain;
+ struct device *iommu_dev;
struct isp_platform_callback platform_cb;
};
diff --git a/drivers/media/video/omap3isp/ispccdc.c b/drivers/media/video/omap3isp/ispccdc.c
index 80796eb0c53..b0b0fa5a357 100644
--- a/drivers/media/video/omap3isp/ispccdc.c
+++ b/drivers/media/video/omap3isp/ispccdc.c
@@ -366,7 +366,7 @@ static void ccdc_lsc_free_request(struct isp_ccdc_device *ccdc,
dma_unmap_sg(isp->dev, req->iovm->sgt->sgl,
req->iovm->sgt->nents, DMA_TO_DEVICE);
if (req->table)
- iommu_vfree(isp->iommu, req->table);
+ omap_iommu_vfree(isp->domain, isp->iommu, req->table);
kfree(req);
}
@@ -438,15 +438,15 @@ static int ccdc_lsc_config(struct isp_ccdc_device *ccdc,
req->enable = 1;
- req->table = iommu_vmalloc(isp->iommu, 0, req->config.size,
- IOMMU_FLAG);
+ req->table = omap_iommu_vmalloc(isp->domain, isp->iommu, 0,
+ req->config.size, IOMMU_FLAG);
if (IS_ERR_VALUE(req->table)) {
req->table = 0;
ret = -ENOMEM;
goto done;
}
- req->iovm = find_iovm_area(isp->iommu, req->table);
+ req->iovm = omap_find_iovm_area(isp->iommu, req->table);
if (req->iovm == NULL) {
ret = -ENOMEM;
goto done;
@@ -462,7 +462,7 @@ static int ccdc_lsc_config(struct isp_ccdc_device *ccdc,
dma_sync_sg_for_cpu(isp->dev, req->iovm->sgt->sgl,
req->iovm->sgt->nents, DMA_TO_DEVICE);
- table = da_to_va(isp->iommu, req->table);
+ table = omap_da_to_va(isp->iommu, req->table);
if (copy_from_user(table, config->lsc, req->config.size)) {
ret = -EFAULT;
goto done;
@@ -731,18 +731,19 @@ static int ccdc_config(struct isp_ccdc_device *ccdc,
/*
* table_new must be 64-bytes aligned, but it's
- * already done by iommu_vmalloc().
+ * already done by omap_iommu_vmalloc().
*/
size = ccdc->fpc.fpnum * 4;
- table_new = iommu_vmalloc(isp->iommu, 0, size,
- IOMMU_FLAG);
+ table_new = omap_iommu_vmalloc(isp->domain, isp->iommu,
+ 0, size, IOMMU_FLAG);
if (IS_ERR_VALUE(table_new))
return -ENOMEM;
- if (copy_from_user(da_to_va(isp->iommu, table_new),
+ if (copy_from_user(omap_da_to_va(isp->iommu, table_new),
(__force void __user *)
ccdc->fpc.fpcaddr, size)) {
- iommu_vfree(isp->iommu, table_new);
+ omap_iommu_vfree(isp->domain, isp->iommu,
+ table_new);
return -EFAULT;
}
@@ -752,7 +753,7 @@ static int ccdc_config(struct isp_ccdc_device *ccdc,
ccdc_configure_fpc(ccdc);
if (table_old != 0)
- iommu_vfree(isp->iommu, table_old);
+ omap_iommu_vfree(isp->domain, isp->iommu, table_old);
}
return ccdc_lsc_config(ccdc, ccdc_struct);
@@ -1405,11 +1406,14 @@ static int __ccdc_handle_stopping(struct isp_ccdc_device *ccdc, u32 event)
static void ccdc_hs_vs_isr(struct isp_ccdc_device *ccdc)
{
+ struct isp_pipeline *pipe =
+ to_isp_pipeline(&ccdc->video_out.video.entity);
struct video_device *vdev = &ccdc->subdev.devnode;
struct v4l2_event event;
memset(&event, 0, sizeof(event));
- event.type = V4L2_EVENT_OMAP3ISP_HS_VS;
+ event.type = V4L2_EVENT_FRAME_SYNC;
+ event.u.frame_sync.frame_sequence = atomic_read(&pipe->frame_number);
v4l2_event_queue(vdev, &event);
}
@@ -1691,7 +1695,11 @@ static long ccdc_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
static int ccdc_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
struct v4l2_event_subscription *sub)
{
- if (sub->type != V4L2_EVENT_OMAP3ISP_HS_VS)
+ if (sub->type != V4L2_EVENT_FRAME_SYNC)
+ return -EINVAL;
+
+ /* line number is zero at frame start */
+ if (sub->id != 0)
return -EINVAL;
return v4l2_event_subscribe(fh, sub, OMAP3ISP_CCDC_NEVENTS);
@@ -1828,7 +1836,7 @@ ccdc_try_format(struct isp_ccdc_device *ccdc, struct v4l2_subdev_fh *fh,
* callers to request an output size bigger than the input size
* up to the nearest multiple of 16.
*/
- fmt->width = clamp_t(u32, width, 32, (fmt->width + 15) & ~15);
+ fmt->width = clamp_t(u32, width, 32, fmt->width + 15);
fmt->width &= ~15;
fmt->height = clamp_t(u32, height, 32, fmt->height);
break;
@@ -2144,6 +2152,37 @@ static const struct media_entity_operations ccdc_media_ops = {
.link_setup = ccdc_link_setup,
};
+void omap3isp_ccdc_unregister_entities(struct isp_ccdc_device *ccdc)
+{
+ v4l2_device_unregister_subdev(&ccdc->subdev);
+ omap3isp_video_unregister(&ccdc->video_out);
+}
+
+int omap3isp_ccdc_register_entities(struct isp_ccdc_device *ccdc,
+ struct v4l2_device *vdev)
+{
+ int ret;
+
+ /* Register the subdev and video node. */
+ ret = v4l2_device_register_subdev(vdev, &ccdc->subdev);
+ if (ret < 0)
+ goto error;
+
+ ret = omap3isp_video_register(&ccdc->video_out, vdev);
+ if (ret < 0)
+ goto error;
+
+ return 0;
+
+error:
+ omap3isp_ccdc_unregister_entities(ccdc);
+ return ret;
+}
+
+/* -----------------------------------------------------------------------------
+ * ISP CCDC initialisation and cleanup
+ */
+
/*
* ccdc_init_entities - Initialize V4L2 subdev and media entity
* @ccdc: ISP CCDC module
@@ -2185,50 +2224,23 @@ static int ccdc_init_entities(struct isp_ccdc_device *ccdc)
ret = omap3isp_video_init(&ccdc->video_out, "CCDC");
if (ret < 0)
- return ret;
+ goto error_video;
/* Connect the CCDC subdev to the video node. */
ret = media_entity_create_link(&ccdc->subdev.entity, CCDC_PAD_SOURCE_OF,
&ccdc->video_out.video.entity, 0, 0);
if (ret < 0)
- return ret;
-
- return 0;
-}
-
-void omap3isp_ccdc_unregister_entities(struct isp_ccdc_device *ccdc)
-{
- media_entity_cleanup(&ccdc->subdev.entity);
-
- v4l2_device_unregister_subdev(&ccdc->subdev);
- omap3isp_video_unregister(&ccdc->video_out);
-}
-
-int omap3isp_ccdc_register_entities(struct isp_ccdc_device *ccdc,
- struct v4l2_device *vdev)
-{
- int ret;
-
- /* Register the subdev and video node. */
- ret = v4l2_device_register_subdev(vdev, &ccdc->subdev);
- if (ret < 0)
- goto error;
-
- ret = omap3isp_video_register(&ccdc->video_out, vdev);
- if (ret < 0)
- goto error;
+ goto error_link;
return 0;
-error:
- omap3isp_ccdc_unregister_entities(ccdc);
+error_link:
+ omap3isp_video_cleanup(&ccdc->video_out);
+error_video:
+ media_entity_cleanup(me);
return ret;
}
-/* -----------------------------------------------------------------------------
- * ISP CCDC initialisation and cleanup
- */
-
/*
* omap3isp_ccdc_init - CCDC module initialization.
* @dev: Device pointer specific to the OMAP3 ISP.
@@ -2240,6 +2252,7 @@ error:
int omap3isp_ccdc_init(struct isp_device *isp)
{
struct isp_ccdc_device *ccdc = &isp->isp_ccdc;
+ int ret;
spin_lock_init(&ccdc->lock);
init_waitqueue_head(&ccdc->wait);
@@ -2268,7 +2281,13 @@ int omap3isp_ccdc_init(struct isp_device *isp)
ccdc->update = OMAP3ISP_CCDC_BLCLAMP;
ccdc_apply_controls(ccdc);
- return ccdc_init_entities(ccdc);
+ ret = ccdc_init_entities(ccdc);
+ if (ret < 0) {
+ mutex_destroy(&ccdc->ioctl_lock);
+ return ret;
+ }
+
+ return 0;
}
/*
@@ -2279,6 +2298,9 @@ void omap3isp_ccdc_cleanup(struct isp_device *isp)
{
struct isp_ccdc_device *ccdc = &isp->isp_ccdc;
+ omap3isp_video_cleanup(&ccdc->video_out);
+ media_entity_cleanup(&ccdc->subdev.entity);
+
/* Free LSC requests. As the CCDC is stopped there's no active request,
* so only the pending request and the free queue need to be handled.
*/
@@ -2287,5 +2309,7 @@ void omap3isp_ccdc_cleanup(struct isp_device *isp)
ccdc_lsc_free_queue(ccdc, &ccdc->lsc.free_queue);
if (ccdc->fpc.fpcaddr != 0)
- iommu_vfree(isp->iommu, ccdc->fpc.fpcaddr);
+ omap_iommu_vfree(isp->domain, isp->iommu, ccdc->fpc.fpcaddr);
+
+ mutex_destroy(&ccdc->ioctl_lock);
}
diff --git a/drivers/media/video/omap3isp/ispccp2.c b/drivers/media/video/omap3isp/ispccp2.c
index ec9e395f333..904ca8c8b17 100644
--- a/drivers/media/video/omap3isp/ispccp2.c
+++ b/drivers/media/video/omap3isp/ispccp2.c
@@ -243,9 +243,9 @@ static int ccp2_phyif_config(struct isp_ccp2_device *ccp2,
val = isp_reg_readl(isp, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_CTRL);
if (!(val & ISPCCP2_CTRL_MODE)) {
- if (pdata->ccp2_mode)
+ if (pdata->ccp2_mode == ISP_CCP2_MODE_CCP2)
dev_warn(isp->dev, "OMAP3 CCP2 bus not available\n");
- if (pdata->phy_layer == ISPCCP2_CTRL_PHY_SEL_STROBE)
+ if (pdata->phy_layer == ISP_CCP2_PHY_DATA_STROBE)
/* Strobe mode requires CCP2 */
return -EIO;
}
@@ -1032,6 +1032,48 @@ static const struct media_entity_operations ccp2_media_ops = {
};
/*
+ * omap3isp_ccp2_unregister_entities - Unregister media entities: subdev
+ * @ccp2: Pointer to ISP CCP2 device
+ */
+void omap3isp_ccp2_unregister_entities(struct isp_ccp2_device *ccp2)
+{
+ v4l2_device_unregister_subdev(&ccp2->subdev);
+ omap3isp_video_unregister(&ccp2->video_in);
+}
+
+/*
+ * omap3isp_ccp2_register_entities - Register the subdev media entity
+ * @ccp2: Pointer to ISP CCP2 device
+ * @vdev: Pointer to v4l device
+ * return negative error code or zero on success
+ */
+
+int omap3isp_ccp2_register_entities(struct isp_ccp2_device *ccp2,
+ struct v4l2_device *vdev)
+{
+ int ret;
+
+ /* Register the subdev and video nodes. */
+ ret = v4l2_device_register_subdev(vdev, &ccp2->subdev);
+ if (ret < 0)
+ goto error;
+
+ ret = omap3isp_video_register(&ccp2->video_in, vdev);
+ if (ret < 0)
+ goto error;
+
+ return 0;
+
+error:
+ omap3isp_ccp2_unregister_entities(ccp2);
+ return ret;
+}
+
+/* -----------------------------------------------------------------------------
+ * ISP ccp2 initialisation and cleanup
+ */
+
+/*
* ccp2_init_entities - Initialize ccp2 subdev and media entity.
* @ccp2: Pointer to ISP CCP2 device
* return negative error code or zero on success
@@ -1083,72 +1125,23 @@ static int ccp2_init_entities(struct isp_ccp2_device *ccp2)
ret = omap3isp_video_init(&ccp2->video_in, "CCP2");
if (ret < 0)
- return ret;
+ goto error_video;
/* Connect the video node to the ccp2 subdev. */
ret = media_entity_create_link(&ccp2->video_in.video.entity, 0,
&ccp2->subdev.entity, CCP2_PAD_SINK, 0);
if (ret < 0)
- return ret;
+ goto error_link;
return 0;
-}
-/*
- * omap3isp_ccp2_unregister_entities - Unregister media entities: subdev
- * @ccp2: Pointer to ISP CCP2 device
- */
-void omap3isp_ccp2_unregister_entities(struct isp_ccp2_device *ccp2)
-{
+error_link:
+ omap3isp_video_cleanup(&ccp2->video_in);
+error_video:
media_entity_cleanup(&ccp2->subdev.entity);
-
- v4l2_device_unregister_subdev(&ccp2->subdev);
- omap3isp_video_unregister(&ccp2->video_in);
-}
-
-/*
- * omap3isp_ccp2_register_entities - Register the subdev media entity
- * @ccp2: Pointer to ISP CCP2 device
- * @vdev: Pointer to v4l device
- * return negative error code or zero on success
- */
-
-int omap3isp_ccp2_register_entities(struct isp_ccp2_device *ccp2,
- struct v4l2_device *vdev)
-{
- int ret;
-
- /* Register the subdev and video nodes. */
- ret = v4l2_device_register_subdev(vdev, &ccp2->subdev);
- if (ret < 0)
- goto error;
-
- ret = omap3isp_video_register(&ccp2->video_in, vdev);
- if (ret < 0)
- goto error;
-
- return 0;
-
-error:
- omap3isp_ccp2_unregister_entities(ccp2);
return ret;
}
-/* -----------------------------------------------------------------------------
- * ISP ccp2 initialisation and cleanup
- */
-
-/*
- * omap3isp_ccp2_cleanup - CCP2 un-initialization
- * @isp : Pointer to ISP device
- */
-void omap3isp_ccp2_cleanup(struct isp_device *isp)
-{
- struct isp_ccp2_device *ccp2 = &isp->isp_ccp2;
-
- regulator_put(ccp2->vdds_csib);
-}
-
/*
* omap3isp_ccp2_init - CCP2 initialization.
* @isp : Pointer to ISP device
@@ -1184,13 +1177,25 @@ int omap3isp_ccp2_init(struct isp_device *isp)
}
ret = ccp2_init_entities(ccp2);
- if (ret < 0)
- goto out;
+ if (ret < 0) {
+ regulator_put(ccp2->vdds_csib);
+ return ret;
+ }
ccp2_reset(ccp2);
-out:
- if (ret)
- omap3isp_ccp2_cleanup(isp);
+ return 0;
+}
- return ret;
+/*
+ * omap3isp_ccp2_cleanup - CCP2 un-initialization
+ * @isp : Pointer to ISP device
+ */
+void omap3isp_ccp2_cleanup(struct isp_device *isp)
+{
+ struct isp_ccp2_device *ccp2 = &isp->isp_ccp2;
+
+ omap3isp_video_cleanup(&ccp2->video_in);
+ media_entity_cleanup(&ccp2->subdev.entity);
+
+ regulator_put(ccp2->vdds_csib);
}
diff --git a/drivers/media/video/omap3isp/ispcsi2.c b/drivers/media/video/omap3isp/ispcsi2.c
index 69161a682b3..0c5f1cb9d99 100644
--- a/drivers/media/video/omap3isp/ispcsi2.c
+++ b/drivers/media/video/omap3isp/ispcsi2.c
@@ -1187,6 +1187,37 @@ static const struct media_entity_operations csi2_media_ops = {
.link_setup = csi2_link_setup,
};
+void omap3isp_csi2_unregister_entities(struct isp_csi2_device *csi2)
+{
+ v4l2_device_unregister_subdev(&csi2->subdev);
+ omap3isp_video_unregister(&csi2->video_out);
+}
+
+int omap3isp_csi2_register_entities(struct isp_csi2_device *csi2,
+ struct v4l2_device *vdev)
+{
+ int ret;
+
+ /* Register the subdev and video nodes. */
+ ret = v4l2_device_register_subdev(vdev, &csi2->subdev);
+ if (ret < 0)
+ goto error;
+
+ ret = omap3isp_video_register(&csi2->video_out, vdev);
+ if (ret < 0)
+ goto error;
+
+ return 0;
+
+error:
+ omap3isp_csi2_unregister_entities(csi2);
+ return ret;
+}
+
+/* -----------------------------------------------------------------------------
+ * ISP CSI2 initialisation and cleanup
+ */
+
/*
* csi2_init_entities - Initialize subdev and media entity.
* @csi2: Pointer to csi2 structure.
@@ -1228,57 +1259,23 @@ static int csi2_init_entities(struct isp_csi2_device *csi2)
ret = omap3isp_video_init(&csi2->video_out, "CSI2a");
if (ret < 0)
- return ret;
+ goto error_video;
/* Connect the CSI2 subdev to the video node. */
ret = media_entity_create_link(&csi2->subdev.entity, CSI2_PAD_SOURCE,
&csi2->video_out.video.entity, 0, 0);
if (ret < 0)
- return ret;
+ goto error_link;
return 0;
-}
-void omap3isp_csi2_unregister_entities(struct isp_csi2_device *csi2)
-{
+error_link:
+ omap3isp_video_cleanup(&csi2->video_out);
+error_video:
media_entity_cleanup(&csi2->subdev.entity);
-
- v4l2_device_unregister_subdev(&csi2->subdev);
- omap3isp_video_unregister(&csi2->video_out);
-}
-
-int omap3isp_csi2_register_entities(struct isp_csi2_device *csi2,
- struct v4l2_device *vdev)
-{
- int ret;
-
- /* Register the subdev and video nodes. */
- ret = v4l2_device_register_subdev(vdev, &csi2->subdev);
- if (ret < 0)
- goto error;
-
- ret = omap3isp_video_register(&csi2->video_out, vdev);
- if (ret < 0)
- goto error;
-
- return 0;
-
-error:
- omap3isp_csi2_unregister_entities(csi2);
return ret;
}
-/* -----------------------------------------------------------------------------
- * ISP CSI2 initialisation and cleanup
- */
-
-/*
- * omap3isp_csi2_cleanup - Routine for module driver cleanup
- */
-void omap3isp_csi2_cleanup(struct isp_device *isp)
-{
-}
-
/*
* omap3isp_csi2_init - Routine for module driver init
*/
@@ -1298,7 +1295,7 @@ int omap3isp_csi2_init(struct isp_device *isp)
ret = csi2_init_entities(csi2a);
if (ret < 0)
- goto fail;
+ return ret;
if (isp->revision == ISP_REVISION_15_0) {
csi2c->isp = isp;
@@ -1311,7 +1308,15 @@ int omap3isp_csi2_init(struct isp_device *isp)
}
return 0;
-fail:
- omap3isp_csi2_cleanup(isp);
- return ret;
+}
+
+/*
+ * omap3isp_csi2_cleanup - Routine for module driver cleanup
+ */
+void omap3isp_csi2_cleanup(struct isp_device *isp)
+{
+ struct isp_csi2_device *csi2a = &isp->isp_csi2a;
+
+ omap3isp_video_cleanup(&csi2a->video_out);
+ media_entity_cleanup(&csi2a->subdev.entity);
}
diff --git a/drivers/media/video/omap3isp/isph3a_aewb.c b/drivers/media/video/omap3isp/isph3a_aewb.c
index 8068cefd8d8..a3c76bf1817 100644
--- a/drivers/media/video/omap3isp/isph3a_aewb.c
+++ b/drivers/media/video/omap3isp/isph3a_aewb.c
@@ -370,5 +370,5 @@ void omap3isp_h3a_aewb_cleanup(struct isp_device *isp)
{
kfree(isp->isp_aewb.priv);
kfree(isp->isp_aewb.recover_priv);
- omap3isp_stat_free(&isp->isp_aewb);
+ omap3isp_stat_cleanup(&isp->isp_aewb);
}
diff --git a/drivers/media/video/omap3isp/isph3a_af.c b/drivers/media/video/omap3isp/isph3a_af.c
index ba54d0acdec..58e0bc41489 100644
--- a/drivers/media/video/omap3isp/isph3a_af.c
+++ b/drivers/media/video/omap3isp/isph3a_af.c
@@ -425,5 +425,5 @@ void omap3isp_h3a_af_cleanup(struct isp_device *isp)
{
kfree(isp->isp_af.priv);
kfree(isp->isp_af.recover_priv);
- omap3isp_stat_free(&isp->isp_af);
+ omap3isp_stat_cleanup(&isp->isp_af);
}
diff --git a/drivers/media/video/omap3isp/isphist.c b/drivers/media/video/omap3isp/isphist.c
index 1743856b30d..1163907bcdd 100644
--- a/drivers/media/video/omap3isp/isphist.c
+++ b/drivers/media/video/omap3isp/isphist.c
@@ -516,5 +516,5 @@ void omap3isp_hist_cleanup(struct isp_device *isp)
if (HIST_USING_DMA(&isp->isp_hist))
omap_free_dma(isp->isp_hist.dma_ch);
kfree(isp->isp_hist.priv);
- omap3isp_stat_free(&isp->isp_hist);
+ omap3isp_stat_cleanup(&isp->isp_hist);
}
diff --git a/drivers/media/video/omap3isp/isppreview.c b/drivers/media/video/omap3isp/isppreview.c
index aba537af87e..ccb876fe023 100644
--- a/drivers/media/video/omap3isp/isppreview.c
+++ b/drivers/media/video/omap3isp/isppreview.c
@@ -76,9 +76,51 @@ static struct omap3isp_prev_csc flr_prev_csc = {
#define DEF_DETECT_CORRECT_VAL 0xe
-#define PREV_MIN_WIDTH 64
-#define PREV_MIN_HEIGHT 8
-#define PREV_MAX_HEIGHT 16384
+/*
+ * Margins and image size limits.
+ *
+ * The preview engine crops several rows and columns internally depending on
+ * which filters are enabled. To avoid format changes when the filters are
+ * enabled or disabled (which would prevent them from being turned on or off
+ * during streaming), the driver assumes all the filters are enabled when
+ * computing sink crop and source format limits.
+ *
+ * If a filter is disabled, additional cropping is automatically added at the
+ * preview engine input by the driver to avoid overflow at line and frame end.
+ * This is completely transparent for applications.
+ *
+ * Median filter 4 pixels
+ * Noise filter,
+ * Faulty pixels correction 4 pixels, 4 lines
+ * CFA filter 4 pixels, 4 lines in Bayer mode
+ * 2 lines in other modes
+ * Color suppression 2 pixels
+ * or luma enhancement
+ * -------------------------------------------------------------
+ * Maximum total 14 pixels, 8 lines
+ *
+ * The color suppression and luma enhancement filters are applied after bayer to
+ * YUV conversion. They thus can crop one pixel on the left and one pixel on the
+ * right side of the image without changing the color pattern. When both those
+ * filters are disabled, the driver must crop the two pixels on the same side of
+ * the image to avoid changing the bayer pattern. The left margin is thus set to
+ * 8 pixels and the right margin to 6 pixels.
+ */
+
+#define PREV_MARGIN_LEFT 8
+#define PREV_MARGIN_RIGHT 6
+#define PREV_MARGIN_TOP 4
+#define PREV_MARGIN_BOTTOM 4
+
+#define PREV_MIN_IN_WIDTH 64
+#define PREV_MIN_IN_HEIGHT 8
+#define PREV_MAX_IN_HEIGHT 16384
+
+#define PREV_MIN_OUT_WIDTH 0
+#define PREV_MIN_OUT_HEIGHT 0
+#define PREV_MAX_OUT_WIDTH 1280
+#define PREV_MAX_OUT_WIDTH_ES2 3300
+#define PREV_MAX_OUT_WIDTH_3630 4096
/*
* Coeficient Tables for the submodules in Preview.
@@ -979,52 +1021,36 @@ static void preview_config_averager(struct isp_prev_device *prev, u8 average)
* enabled when reporting source pad formats to userspace. If this assumption is
* not true, rows and columns must be manually cropped at the preview engine
* input to avoid overflows at the end of lines and frames.
+ *
+ * See the explanation at the PREV_MARGIN_* definitions for more details.
*/
static void preview_config_input_size(struct isp_prev_device *prev)
{
struct isp_device *isp = to_isp_device(prev);
struct prev_params *params = &prev->params;
- struct v4l2_mbus_framefmt *format = &prev->formats[PREV_PAD_SINK];
- unsigned int sph = 0;
- unsigned int eph = format->width - 1;
- unsigned int slv = 0;
- unsigned int elv = format->height - 1;
-
- if (prev->input == PREVIEW_INPUT_CCDC) {
- sph += 2;
- eph -= 2;
+ unsigned int sph = prev->crop.left;
+ unsigned int eph = prev->crop.left + prev->crop.width - 1;
+ unsigned int slv = prev->crop.top;
+ unsigned int elv = prev->crop.top + prev->crop.height - 1;
+
+ if (params->features & PREV_CFA) {
+ sph -= 2;
+ eph += 2;
+ slv -= 2;
+ elv += 2;
}
-
- /*
- * Median filter 4 pixels
- * Noise filter 4 pixels, 4 lines
- * or faulty pixels correction
- * CFA filter 4 pixels, 4 lines in Bayer mode
- * 2 lines in other modes
- * Color suppression 2 pixels
- * or luma enhancement
- * -------------------------------------------------------------
- * Maximum total 14 pixels, 8 lines
- */
-
- if (!(params->features & PREV_CFA)) {
- sph += 2;
- eph -= 2;
- slv += 2;
- elv -= 2;
+ if (params->features & (PREV_DEFECT_COR | PREV_NOISE_FILTER)) {
+ sph -= 2;
+ eph += 2;
+ slv -= 2;
+ elv += 2;
}
- if (!(params->features & (PREV_DEFECT_COR | PREV_NOISE_FILTER))) {
- sph += 2;
- eph -= 2;
- slv += 2;
- elv -= 2;
+ if (params->features & PREV_HORZ_MEDIAN_FILTER) {
+ sph -= 2;
+ eph += 2;
}
- if (!(params->features & PREV_HORZ_MEDIAN_FILTER)) {
- sph += 2;
- eph -= 2;
- }
- if (!(params->features & (PREV_CHROMA_SUPPRESS | PREV_LUMA_ENHANCE)))
- sph += 2;
+ if (params->features & (PREV_CHROMA_SUPPRESS | PREV_LUMA_ENHANCE))
+ sph -= 2;
isp_reg_writel(isp, (sph << ISPPRV_HORZ_INFO_SPH_SHIFT) | eph,
OMAP3_ISP_IOMEM_PREV, ISPPRV_HORZ_INFO);
@@ -1228,7 +1254,6 @@ static void preview_init_params(struct isp_prev_device *prev)
/* Init values */
params->contrast = ISPPRV_CONTRAST_DEF * ISPPRV_CONTRAST_UNITS;
params->brightness = ISPPRV_BRIGHT_DEF * ISPPRV_BRIGHT_UNITS;
- params->average = NO_AVE;
params->cfa.format = OMAP3ISP_CFAFMT_BAYER;
memcpy(params->cfa.table, cfa_coef_table,
sizeof(params->cfa.table));
@@ -1281,14 +1306,14 @@ static unsigned int preview_max_out_width(struct isp_prev_device *prev)
switch (isp->revision) {
case ISP_REVISION_1_0:
- return ISPPRV_MAXOUTPUT_WIDTH;
+ return PREV_MAX_OUT_WIDTH;
case ISP_REVISION_2_0:
default:
- return ISPPRV_MAXOUTPUT_WIDTH_ES2;
+ return PREV_MAX_OUT_WIDTH_ES2;
case ISP_REVISION_15_0:
- return ISPPRV_MAXOUTPUT_WIDTH_3630;
+ return PREV_MAX_OUT_WIDTH_3630;
}
}
@@ -1296,8 +1321,6 @@ static void preview_configure(struct isp_prev_device *prev)
{
struct isp_device *isp = to_isp_device(prev);
struct v4l2_mbus_framefmt *format;
- unsigned int max_out_width;
- unsigned int format_avg;
preview_setup_hw(prev);
@@ -1335,10 +1358,7 @@ static void preview_configure(struct isp_prev_device *prev)
preview_config_outlineoffset(prev,
ALIGN(format->width, 0x10) * 2);
- max_out_width = preview_max_out_width(prev);
-
- format_avg = fls(DIV_ROUND_UP(format->width, max_out_width) - 1);
- preview_config_averager(prev, format_avg);
+ preview_config_averager(prev, 0);
preview_config_ycpos(prev, format->code);
}
@@ -1597,6 +1617,16 @@ __preview_get_format(struct isp_prev_device *prev, struct v4l2_subdev_fh *fh,
return &prev->formats[pad];
}
+static struct v4l2_rect *
+__preview_get_crop(struct isp_prev_device *prev, struct v4l2_subdev_fh *fh,
+ enum v4l2_subdev_format_whence which)
+{
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_get_try_crop(fh, PREV_PAD_SINK);
+ else
+ return &prev->crop;
+}
+
/* previewer format descriptions */
static const unsigned int preview_input_fmts[] = {
V4L2_MBUS_FMT_SGRBG10_1X10,
@@ -1611,24 +1641,25 @@ static const unsigned int preview_output_fmts[] = {
};
/*
- * preview_try_format - Handle try format by pad subdev method
- * @prev: ISP preview device
- * @fh : V4L2 subdev file handle
- * @pad: pad num
- * @fmt: pointer to v4l2 format structure
+ * preview_try_format - Validate a format
+ * @prev: ISP preview engine
+ * @fh: V4L2 subdev file handle
+ * @pad: pad number
+ * @fmt: format to be validated
+ * @which: try/active format selector
+ *
+ * Validate and adjust the given format for the given pad based on the preview
+ * engine limits and the format and crop rectangles on other pads.
*/
static void preview_try_format(struct isp_prev_device *prev,
struct v4l2_subdev_fh *fh, unsigned int pad,
struct v4l2_mbus_framefmt *fmt,
enum v4l2_subdev_format_whence which)
{
- struct v4l2_mbus_framefmt *format;
- unsigned int max_out_width;
enum v4l2_mbus_pixelcode pixelcode;
+ struct v4l2_rect *crop;
unsigned int i;
- max_out_width = preview_max_out_width(prev);
-
switch (pad) {
case PREV_PAD_SINK:
/* When reading data from the CCDC, the input size has already
@@ -1641,10 +1672,11 @@ static void preview_try_format(struct isp_prev_device *prev,
* filter array interpolation.
*/
if (prev->input == PREVIEW_INPUT_MEMORY) {
- fmt->width = clamp_t(u32, fmt->width, PREV_MIN_WIDTH,
- max_out_width * 8);
- fmt->height = clamp_t(u32, fmt->height, PREV_MIN_HEIGHT,
- PREV_MAX_HEIGHT);
+ fmt->width = clamp_t(u32, fmt->width, PREV_MIN_IN_WIDTH,
+ preview_max_out_width(prev));
+ fmt->height = clamp_t(u32, fmt->height,
+ PREV_MIN_IN_HEIGHT,
+ PREV_MAX_IN_HEIGHT);
}
fmt->colorspace = V4L2_COLORSPACE_SRGB;
@@ -1661,15 +1693,8 @@ static void preview_try_format(struct isp_prev_device *prev,
case PREV_PAD_SOURCE:
pixelcode = fmt->code;
- format = __preview_get_format(prev, fh, PREV_PAD_SINK, which);
- memcpy(fmt, format, sizeof(*fmt));
+ *fmt = *__preview_get_format(prev, fh, PREV_PAD_SINK, which);
- /* The preview module output size is configurable through the
- * input interface (horizontal and vertical cropping) and the
- * averager (horizontal scaling by 1/1, 1/2, 1/4 or 1/8). In
- * spite of this, hardcode the output size to the biggest
- * possible value for simplicity reasons.
- */
switch (pixelcode) {
case V4L2_MBUS_FMT_YUYV8_1X16:
case V4L2_MBUS_FMT_UYVY8_1X16:
@@ -1681,31 +1706,14 @@ static void preview_try_format(struct isp_prev_device *prev,
break;
}
- /* The TRM states (12.1.4.7.1.2) that 2 pixels must be cropped
- * from the left and right sides when the input source is the
- * CCDC. This seems not to be needed in practice, investigation
- * is required.
- */
- if (prev->input == PREVIEW_INPUT_CCDC)
- fmt->width -= 4;
-
- /* The preview module can output a maximum of 3312 pixels
- * horizontally due to fixed memory-line sizes. Compute the
- * horizontal averaging factor accordingly. Note that the limit
- * applies to the noise filter and CFA interpolation blocks, so
- * it doesn't take cropping by further blocks into account.
- *
- * ES 1.0 hardware revision is limited to 1280 pixels
- * horizontally.
- */
- fmt->width >>= fls(DIV_ROUND_UP(fmt->width, max_out_width) - 1);
-
- /* Assume that all blocks are enabled and crop pixels and lines
- * accordingly. See preview_config_input_size() for more
- * information.
+ /* The preview module output size is configurable through the
+ * averager (horizontal scaling by 1/1, 1/2, 1/4 or 1/8). This
+ * is not supported yet, hardcode the output size to the crop
+ * rectangle size.
*/
- fmt->width -= 14;
- fmt->height -= 8;
+ crop = __preview_get_crop(prev, fh, which);
+ fmt->width = crop->width;
+ fmt->height = crop->height;
fmt->colorspace = V4L2_COLORSPACE_JPEG;
break;
@@ -1715,6 +1723,49 @@ static void preview_try_format(struct isp_prev_device *prev,
}
/*
+ * preview_try_crop - Validate a crop rectangle
+ * @prev: ISP preview engine
+ * @sink: format on the sink pad
+ * @crop: crop rectangle to be validated
+ *
+ * The preview engine crops lines and columns for its internal operation,
+ * depending on which filters are enabled. Enforce minimum crop margins to
+ * handle that transparently for userspace.
+ *
+ * See the explanation at the PREV_MARGIN_* definitions for more details.
+ */
+static void preview_try_crop(struct isp_prev_device *prev,
+ const struct v4l2_mbus_framefmt *sink,
+ struct v4l2_rect *crop)
+{
+ unsigned int left = PREV_MARGIN_LEFT;
+ unsigned int right = sink->width - PREV_MARGIN_RIGHT;
+ unsigned int top = PREV_MARGIN_TOP;
+ unsigned int bottom = sink->height - PREV_MARGIN_BOTTOM;
+
+ /* When processing data on-the-fly from the CCDC, at least 2 pixels must
+ * be cropped from the left and right sides of the image. As we don't
+ * know which filters will be enabled, increase the left and right
+ * margins by two.
+ */
+ if (prev->input == PREVIEW_INPUT_CCDC) {
+ left += 2;
+ right -= 2;
+ }
+
+ /* Restrict left/top to even values to keep the Bayer pattern. */
+ crop->left &= ~1;
+ crop->top &= ~1;
+
+ crop->left = clamp_t(u32, crop->left, left, right - PREV_MIN_OUT_WIDTH);
+ crop->top = clamp_t(u32, crop->top, top, bottom - PREV_MIN_OUT_HEIGHT);
+ crop->width = clamp_t(u32, crop->width, PREV_MIN_OUT_WIDTH,
+ right - crop->left);
+ crop->height = clamp_t(u32, crop->height, PREV_MIN_OUT_HEIGHT,
+ bottom - crop->top);
+}
+
+/*
* preview_enum_mbus_code - Handle pixel format enumeration
* @sd : pointer to v4l2 subdev structure
* @fh : V4L2 subdev file handle
@@ -1776,6 +1827,60 @@ static int preview_enum_frame_size(struct v4l2_subdev *sd,
}
/*
+ * preview_get_crop - Retrieve the crop rectangle on a pad
+ * @sd: ISP preview V4L2 subdevice
+ * @fh: V4L2 subdev file handle
+ * @crop: crop rectangle
+ *
+ * Return 0 on success or a negative error code otherwise.
+ */
+static int preview_get_crop(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_crop *crop)
+{
+ struct isp_prev_device *prev = v4l2_get_subdevdata(sd);
+
+ /* Cropping is only supported on the sink pad. */
+ if (crop->pad != PREV_PAD_SINK)
+ return -EINVAL;
+
+ crop->rect = *__preview_get_crop(prev, fh, crop->which);
+ return 0;
+}
+
+/*
+ * preview_set_crop - Retrieve the crop rectangle on a pad
+ * @sd: ISP preview V4L2 subdevice
+ * @fh: V4L2 subdev file handle
+ * @crop: crop rectangle
+ *
+ * Return 0 on success or a negative error code otherwise.
+ */
+static int preview_set_crop(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_crop *crop)
+{
+ struct isp_prev_device *prev = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ /* Cropping is only supported on the sink pad. */
+ if (crop->pad != PREV_PAD_SINK)
+ return -EINVAL;
+
+ /* The crop rectangle can't be changed while streaming. */
+ if (prev->state != ISP_PIPELINE_STREAM_STOPPED)
+ return -EBUSY;
+
+ format = __preview_get_format(prev, fh, PREV_PAD_SINK, crop->which);
+ preview_try_crop(prev, format, &crop->rect);
+ *__preview_get_crop(prev, fh, crop->which) = crop->rect;
+
+ /* Update the source format. */
+ format = __preview_get_format(prev, fh, PREV_PAD_SOURCE, crop->which);
+ preview_try_format(prev, fh, PREV_PAD_SOURCE, format, crop->which);
+
+ return 0;
+}
+
+/*
* preview_get_format - Handle get format by pads subdev method
* @sd : pointer to v4l2 subdev structure
* @fh : V4L2 subdev file handle
@@ -1808,6 +1913,7 @@ static int preview_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
{
struct isp_prev_device *prev = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
+ struct v4l2_rect *crop;
format = __preview_get_format(prev, fh, fmt->pad, fmt->which);
if (format == NULL)
@@ -1818,9 +1924,18 @@ static int preview_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
/* Propagate the format from sink to source */
if (fmt->pad == PREV_PAD_SINK) {
+ /* Reset the crop rectangle. */
+ crop = __preview_get_crop(prev, fh, fmt->which);
+ crop->left = 0;
+ crop->top = 0;
+ crop->width = fmt->format.width;
+ crop->height = fmt->format.height;
+
+ preview_try_crop(prev, &fmt->format, crop);
+
+ /* Update the source format. */
format = __preview_get_format(prev, fh, PREV_PAD_SOURCE,
fmt->which);
- *format = fmt->format;
preview_try_format(prev, fh, PREV_PAD_SOURCE, format,
fmt->which);
}
@@ -1869,6 +1984,8 @@ static const struct v4l2_subdev_pad_ops preview_v4l2_pad_ops = {
.enum_frame_size = preview_enum_frame_size,
.get_fmt = preview_get_format,
.set_fmt = preview_set_format,
+ .get_crop = preview_get_crop,
+ .set_crop = preview_set_crop,
};
/* subdev operations */
@@ -1966,8 +2083,44 @@ static const struct media_entity_operations preview_media_ops = {
.link_setup = preview_link_setup,
};
+void omap3isp_preview_unregister_entities(struct isp_prev_device *prev)
+{
+ v4l2_device_unregister_subdev(&prev->subdev);
+ omap3isp_video_unregister(&prev->video_in);
+ omap3isp_video_unregister(&prev->video_out);
+}
+
+int omap3isp_preview_register_entities(struct isp_prev_device *prev,
+ struct v4l2_device *vdev)
+{
+ int ret;
+
+ /* Register the subdev and video nodes. */
+ ret = v4l2_device_register_subdev(vdev, &prev->subdev);
+ if (ret < 0)
+ goto error;
+
+ ret = omap3isp_video_register(&prev->video_in, vdev);
+ if (ret < 0)
+ goto error;
+
+ ret = omap3isp_video_register(&prev->video_out, vdev);
+ if (ret < 0)
+ goto error;
+
+ return 0;
+
+error:
+ omap3isp_preview_unregister_entities(prev);
+ return ret;
+}
+
+/* -----------------------------------------------------------------------------
+ * ISP previewer initialisation and cleanup
+ */
+
/*
- * review_init_entities - Initialize subdev and media entity.
+ * preview_init_entities - Initialize subdev and media entity.
* @prev : Pointer to preview structure
* return -ENOMEM or zero on success
*/
@@ -2024,69 +2177,34 @@ static int preview_init_entities(struct isp_prev_device *prev)
ret = omap3isp_video_init(&prev->video_in, "preview");
if (ret < 0)
- return ret;
+ goto error_video_in;
ret = omap3isp_video_init(&prev->video_out, "preview");
if (ret < 0)
- return ret;
+ goto error_video_out;
/* Connect the video nodes to the previewer subdev. */
ret = media_entity_create_link(&prev->video_in.video.entity, 0,
&prev->subdev.entity, PREV_PAD_SINK, 0);
if (ret < 0)
- return ret;
+ goto error_link;
ret = media_entity_create_link(&prev->subdev.entity, PREV_PAD_SOURCE,
&prev->video_out.video.entity, 0, 0);
if (ret < 0)
- return ret;
+ goto error_link;
return 0;
-}
-void omap3isp_preview_unregister_entities(struct isp_prev_device *prev)
-{
+error_link:
+ omap3isp_video_cleanup(&prev->video_out);
+error_video_out:
+ omap3isp_video_cleanup(&prev->video_in);
+error_video_in:
media_entity_cleanup(&prev->subdev.entity);
-
- v4l2_device_unregister_subdev(&prev->subdev);
- v4l2_ctrl_handler_free(&prev->ctrls);
- omap3isp_video_unregister(&prev->video_in);
- omap3isp_video_unregister(&prev->video_out);
-}
-
-int omap3isp_preview_register_entities(struct isp_prev_device *prev,
- struct v4l2_device *vdev)
-{
- int ret;
-
- /* Register the subdev and video nodes. */
- ret = v4l2_device_register_subdev(vdev, &prev->subdev);
- if (ret < 0)
- goto error;
-
- ret = omap3isp_video_register(&prev->video_in, vdev);
- if (ret < 0)
- goto error;
-
- ret = omap3isp_video_register(&prev->video_out, vdev);
- if (ret < 0)
- goto error;
-
- return 0;
-
-error:
- omap3isp_preview_unregister_entities(prev);
return ret;
}
-/* -----------------------------------------------------------------------------
- * ISP previewer initialisation and cleanup
- */
-
-void omap3isp_preview_cleanup(struct isp_device *isp)
-{
-}
-
/*
* isp_preview_init - Previewer initialization.
* @dev : Pointer to ISP device
@@ -2095,19 +2213,20 @@ void omap3isp_preview_cleanup(struct isp_device *isp)
int omap3isp_preview_init(struct isp_device *isp)
{
struct isp_prev_device *prev = &isp->isp_prev;
- int ret;
spin_lock_init(&prev->lock);
init_waitqueue_head(&prev->wait);
preview_init_params(prev);
- ret = preview_init_entities(prev);
- if (ret < 0)
- goto out;
+ return preview_init_entities(prev);
+}
-out:
- if (ret)
- omap3isp_preview_cleanup(isp);
+void omap3isp_preview_cleanup(struct isp_device *isp)
+{
+ struct isp_prev_device *prev = &isp->isp_prev;
- return ret;
+ v4l2_ctrl_handler_free(&prev->ctrls);
+ omap3isp_video_cleanup(&prev->video_in);
+ omap3isp_video_cleanup(&prev->video_out);
+ media_entity_cleanup(&prev->subdev.entity);
}
diff --git a/drivers/media/video/omap3isp/isppreview.h b/drivers/media/video/omap3isp/isppreview.h
index fa943bd05c7..f54e775c2df 100644
--- a/drivers/media/video/omap3isp/isppreview.h
+++ b/drivers/media/video/omap3isp/isppreview.h
@@ -45,11 +45,6 @@
#define ISPPRV_CONTRAST_HIGH 0xFF
#define ISPPRV_CONTRAST_UNITS 0x1
-#define NO_AVE 0x0
-#define AVE_2_PIX 0x1
-#define AVE_4_PIX 0x2
-#define AVE_8_PIX 0x3
-
/* Features list */
#define PREV_LUMA_ENHANCE OMAP3ISP_PREV_LUMAENH
#define PREV_INVERSE_ALAW OMAP3ISP_PREV_INVALAW
@@ -106,7 +101,6 @@ enum preview_ycpos_mode {
* @rgb2ycbcr: RGB to ycbcr parameters.
* @hmed: Horizontal median filter.
* @yclimit: YC limits parameters.
- * @average: Downsampling rate for averager.
* @contrast: Contrast.
* @brightness: Brightness.
*/
@@ -124,7 +118,6 @@ struct prev_params {
struct omap3isp_prev_csc rgb2ycbcr;
struct omap3isp_prev_hmed hmed;
struct omap3isp_prev_yclimit yclimit;
- u8 average;
u8 contrast;
u8 brightness;
};
@@ -159,6 +152,7 @@ struct isptables_update {
* @subdev: V4L2 subdevice
* @pads: Media entity pads
* @formats: Active formats at the subdev pad
+ * @crop: Active crop rectangle
* @input: Module currently connected to the input pad
* @output: Bitmask of the active output
* @video_in: Input video entity
@@ -177,6 +171,7 @@ struct isp_prev_device {
struct v4l2_subdev subdev;
struct media_pad pads[PREV_PADS_NUM];
struct v4l2_mbus_framefmt formats[PREV_PADS_NUM];
+ struct v4l2_rect crop;
struct v4l2_ctrl_handler ctrls;
diff --git a/drivers/media/video/omap3isp/ispqueue.c b/drivers/media/video/omap3isp/ispqueue.c
index 9c317148205..9bebb1e57aa 100644
--- a/drivers/media/video/omap3isp/ispqueue.c
+++ b/drivers/media/video/omap3isp/ispqueue.c
@@ -868,6 +868,10 @@ int omap3isp_video_queue_qbuf(struct isp_video_queue *queue,
goto done;
if (vbuf->memory == V4L2_MEMORY_USERPTR &&
+ vbuf->length < buf->vbuf.length)
+ goto done;
+
+ if (vbuf->memory == V4L2_MEMORY_USERPTR &&
vbuf->m.userptr != buf->vbuf.m.userptr) {
isp_video_buffer_cleanup(buf);
buf->vbuf.m.userptr = vbuf->m.userptr;
diff --git a/drivers/media/video/omap3isp/ispreg.h b/drivers/media/video/omap3isp/ispreg.h
index 69f6af6f6b9..084ea77d65a 100644
--- a/drivers/media/video/omap3isp/ispreg.h
+++ b/drivers/media/video/omap3isp/ispreg.h
@@ -402,9 +402,6 @@
#define ISPPRV_YENH_TABLE_ADDR 0x1000
#define ISPPRV_CFA_TABLE_ADDR 0x1400
-#define ISPPRV_MAXOUTPUT_WIDTH 1280
-#define ISPPRV_MAXOUTPUT_WIDTH_ES2 3300
-#define ISPPRV_MAXOUTPUT_WIDTH_3630 4096
#define ISPRSZ_MIN_OUTPUT 64
#define ISPRSZ_MAX_OUTPUT 3312
diff --git a/drivers/media/video/omap3isp/ispresizer.c b/drivers/media/video/omap3isp/ispresizer.c
index 0bb0f8cd36f..50e593bfcfa 100644
--- a/drivers/media/video/omap3isp/ispresizer.c
+++ b/drivers/media/video/omap3isp/ispresizer.c
@@ -1608,6 +1608,42 @@ static const struct media_entity_operations resizer_media_ops = {
.link_setup = resizer_link_setup,
};
+void omap3isp_resizer_unregister_entities(struct isp_res_device *res)
+{
+ v4l2_device_unregister_subdev(&res->subdev);
+ omap3isp_video_unregister(&res->video_in);
+ omap3isp_video_unregister(&res->video_out);
+}
+
+int omap3isp_resizer_register_entities(struct isp_res_device *res,
+ struct v4l2_device *vdev)
+{
+ int ret;
+
+ /* Register the subdev and video nodes. */
+ ret = v4l2_device_register_subdev(vdev, &res->subdev);
+ if (ret < 0)
+ goto error;
+
+ ret = omap3isp_video_register(&res->video_in, vdev);
+ if (ret < 0)
+ goto error;
+
+ ret = omap3isp_video_register(&res->video_out, vdev);
+ if (ret < 0)
+ goto error;
+
+ return 0;
+
+error:
+ omap3isp_resizer_unregister_entities(res);
+ return ret;
+}
+
+/* -----------------------------------------------------------------------------
+ * ISP resizer initialization and cleanup
+ */
+
/*
* resizer_init_entities - Initialize resizer subdev and media entity.
* @res : Pointer to resizer device structure
@@ -1652,68 +1688,34 @@ static int resizer_init_entities(struct isp_res_device *res)
ret = omap3isp_video_init(&res->video_in, "resizer");
if (ret < 0)
- return ret;
+ goto error_video_in;
ret = omap3isp_video_init(&res->video_out, "resizer");
if (ret < 0)
- return ret;
+ goto error_video_out;
/* Connect the video nodes to the resizer subdev. */
ret = media_entity_create_link(&res->video_in.video.entity, 0,
&res->subdev.entity, RESZ_PAD_SINK, 0);
if (ret < 0)
- return ret;
+ goto error_link;
ret = media_entity_create_link(&res->subdev.entity, RESZ_PAD_SOURCE,
&res->video_out.video.entity, 0, 0);
if (ret < 0)
- return ret;
+ goto error_link;
return 0;
-}
-void omap3isp_resizer_unregister_entities(struct isp_res_device *res)
-{
+error_link:
+ omap3isp_video_cleanup(&res->video_out);
+error_video_out:
+ omap3isp_video_cleanup(&res->video_in);
+error_video_in:
media_entity_cleanup(&res->subdev.entity);
-
- v4l2_device_unregister_subdev(&res->subdev);
- omap3isp_video_unregister(&res->video_in);
- omap3isp_video_unregister(&res->video_out);
-}
-
-int omap3isp_resizer_register_entities(struct isp_res_device *res,
- struct v4l2_device *vdev)
-{
- int ret;
-
- /* Register the subdev and video nodes. */
- ret = v4l2_device_register_subdev(vdev, &res->subdev);
- if (ret < 0)
- goto error;
-
- ret = omap3isp_video_register(&res->video_in, vdev);
- if (ret < 0)
- goto error;
-
- ret = omap3isp_video_register(&res->video_out, vdev);
- if (ret < 0)
- goto error;
-
- return 0;
-
-error:
- omap3isp_resizer_unregister_entities(res);
return ret;
}
-/* -----------------------------------------------------------------------------
- * ISP resizer initialization and cleanup
- */
-
-void omap3isp_resizer_cleanup(struct isp_device *isp)
-{
-}
-
/*
* isp_resizer_init - Resizer initialization.
* @isp : Pointer to ISP device
@@ -1722,17 +1724,17 @@ void omap3isp_resizer_cleanup(struct isp_device *isp)
int omap3isp_resizer_init(struct isp_device *isp)
{
struct isp_res_device *res = &isp->isp_res;
- int ret;
init_waitqueue_head(&res->wait);
atomic_set(&res->stopping, 0);
- ret = resizer_init_entities(res);
- if (ret < 0)
- goto out;
+ return resizer_init_entities(res);
+}
-out:
- if (ret)
- omap3isp_resizer_cleanup(isp);
+void omap3isp_resizer_cleanup(struct isp_device *isp)
+{
+ struct isp_res_device *res = &isp->isp_res;
- return ret;
+ omap3isp_video_cleanup(&res->video_in);
+ omap3isp_video_cleanup(&res->video_out);
+ media_entity_cleanup(&res->subdev.entity);
}
diff --git a/drivers/media/video/omap3isp/ispstat.c b/drivers/media/video/omap3isp/ispstat.c
index 808065948ac..68d539456c5 100644
--- a/drivers/media/video/omap3isp/ispstat.c
+++ b/drivers/media/video/omap3isp/ispstat.c
@@ -366,7 +366,8 @@ static void isp_stat_bufs_free(struct ispstat *stat)
dma_unmap_sg(isp->dev, buf->iovm->sgt->sgl,
buf->iovm->sgt->nents,
DMA_FROM_DEVICE);
- iommu_vfree(isp->iommu, buf->iommu_addr);
+ omap_iommu_vfree(isp->domain, isp->iommu,
+ buf->iommu_addr);
} else {
if (!buf->virt_addr)
continue;
@@ -399,8 +400,8 @@ static int isp_stat_bufs_alloc_iommu(struct ispstat *stat, unsigned int size)
struct iovm_struct *iovm;
WARN_ON(buf->dma_addr);
- buf->iommu_addr = iommu_vmalloc(isp->iommu, 0, size,
- IOMMU_FLAG);
+ buf->iommu_addr = omap_iommu_vmalloc(isp->domain, isp->iommu, 0,
+ size, IOMMU_FLAG);
if (IS_ERR((void *)buf->iommu_addr)) {
dev_err(stat->isp->dev,
"%s: Can't acquire memory for "
@@ -409,7 +410,7 @@ static int isp_stat_bufs_alloc_iommu(struct ispstat *stat, unsigned int size)
return -ENOMEM;
}
- iovm = find_iovm_area(isp->iommu, buf->iommu_addr);
+ iovm = omap_find_iovm_area(isp->iommu, buf->iommu_addr);
if (!iovm ||
!dma_map_sg(isp->dev, iovm->sgt->sgl, iovm->sgt->nents,
DMA_FROM_DEVICE)) {
@@ -418,7 +419,7 @@ static int isp_stat_bufs_alloc_iommu(struct ispstat *stat, unsigned int size)
}
buf->iovm = iovm;
- buf->virt_addr = da_to_va(stat->isp->iommu,
+ buf->virt_addr = omap_da_to_va(stat->isp->iommu,
(u32)buf->iommu_addr);
buf->empty = 1;
dev_dbg(stat->isp->dev, "%s: buffer[%d] allocated."
@@ -1022,24 +1023,6 @@ void omap3isp_stat_dma_isr(struct ispstat *stat)
__stat_isr(stat, 1);
}
-static int isp_stat_init_entities(struct ispstat *stat, const char *name,
- const struct v4l2_subdev_ops *sd_ops)
-{
- struct v4l2_subdev *subdev = &stat->subdev;
- struct media_entity *me = &subdev->entity;
-
- v4l2_subdev_init(subdev, sd_ops);
- snprintf(subdev->name, V4L2_SUBDEV_NAME_SIZE, "OMAP3 ISP %s", name);
- subdev->grp_id = 1 << 16; /* group ID for isp subdevs */
- subdev->flags |= V4L2_SUBDEV_FL_HAS_EVENTS | V4L2_SUBDEV_FL_HAS_DEVNODE;
- v4l2_set_subdevdata(subdev, stat);
-
- stat->pad.flags = MEDIA_PAD_FL_SINK;
- me->ops = NULL;
-
- return media_entity_init(me, 1, &stat->pad, 0);
-}
-
int omap3isp_stat_subscribe_event(struct v4l2_subdev *subdev,
struct v4l2_fh *fh,
struct v4l2_event_subscription *sub)
@@ -1061,7 +1044,6 @@ int omap3isp_stat_unsubscribe_event(struct v4l2_subdev *subdev,
void omap3isp_stat_unregister_entities(struct ispstat *stat)
{
- media_entity_cleanup(&stat->subdev.entity);
v4l2_device_unregister_subdev(&stat->subdev);
}
@@ -1071,21 +1053,50 @@ int omap3isp_stat_register_entities(struct ispstat *stat,
return v4l2_device_register_subdev(vdev, &stat->subdev);
}
+static int isp_stat_init_entities(struct ispstat *stat, const char *name,
+ const struct v4l2_subdev_ops *sd_ops)
+{
+ struct v4l2_subdev *subdev = &stat->subdev;
+ struct media_entity *me = &subdev->entity;
+
+ v4l2_subdev_init(subdev, sd_ops);
+ snprintf(subdev->name, V4L2_SUBDEV_NAME_SIZE, "OMAP3 ISP %s", name);
+ subdev->grp_id = 1 << 16; /* group ID for isp subdevs */
+ subdev->flags |= V4L2_SUBDEV_FL_HAS_EVENTS | V4L2_SUBDEV_FL_HAS_DEVNODE;
+ v4l2_set_subdevdata(subdev, stat);
+
+ stat->pad.flags = MEDIA_PAD_FL_SINK;
+ me->ops = NULL;
+
+ return media_entity_init(me, 1, &stat->pad, 0);
+}
+
int omap3isp_stat_init(struct ispstat *stat, const char *name,
const struct v4l2_subdev_ops *sd_ops)
{
+ int ret;
+
stat->buf = kcalloc(STAT_MAX_BUFS, sizeof(*stat->buf), GFP_KERNEL);
if (!stat->buf)
return -ENOMEM;
+
isp_stat_buf_clear(stat);
mutex_init(&stat->ioctl_lock);
atomic_set(&stat->buf_err, 0);
- return isp_stat_init_entities(stat, name, sd_ops);
+ ret = isp_stat_init_entities(stat, name, sd_ops);
+ if (ret < 0) {
+ mutex_destroy(&stat->ioctl_lock);
+ kfree(stat->buf);
+ }
+
+ return ret;
}
-void omap3isp_stat_free(struct ispstat *stat)
+void omap3isp_stat_cleanup(struct ispstat *stat)
{
+ media_entity_cleanup(&stat->subdev.entity);
+ mutex_destroy(&stat->ioctl_lock);
isp_stat_bufs_free(stat);
kfree(stat->buf);
}
diff --git a/drivers/media/video/omap3isp/ispstat.h b/drivers/media/video/omap3isp/ispstat.h
index d86da94fa50..9b7c8654dc8 100644
--- a/drivers/media/video/omap3isp/ispstat.h
+++ b/drivers/media/video/omap3isp/ispstat.h
@@ -144,7 +144,7 @@ int omap3isp_stat_request_statistics(struct ispstat *stat,
struct omap3isp_stat_data *data);
int omap3isp_stat_init(struct ispstat *stat, const char *name,
const struct v4l2_subdev_ops *sd_ops);
-void omap3isp_stat_free(struct ispstat *stat);
+void omap3isp_stat_cleanup(struct ispstat *stat);
int omap3isp_stat_subscribe_event(struct v4l2_subdev *subdev,
struct v4l2_fh *fh,
struct v4l2_event_subscription *sub);
diff --git a/drivers/media/video/omap3isp/ispvideo.c b/drivers/media/video/omap3isp/ispvideo.c
index fd965adfd59..d1000723c5a 100644
--- a/drivers/media/video/omap3isp/ispvideo.c
+++ b/drivers/media/video/omap3isp/ispvideo.c
@@ -278,7 +278,8 @@ isp_video_far_end(struct isp_video *video)
* limits reported by every block in the pipeline.
*
* Return 0 if all formats match, or -EPIPE if at least one link is found with
- * different formats on its two ends.
+ * different formats on its two ends or if the pipeline doesn't start with a
+ * video source (either a subdev with no input pad, or a non-subdev entity).
*/
static int isp_video_validate_pipeline(struct isp_pipeline *pipe)
{
@@ -329,10 +330,15 @@ static int isp_video_validate_pipeline(struct isp_pipeline *pipe)
* in the middle of it. */
shifter_link = subdev == &isp->isp_ccdc.subdev;
- /* Retrieve the source format */
+ /* Retrieve the source format. Return an error if no source
+ * entity can be found, and stop checking the pipeline if the
+ * source entity isn't a subdev.
+ */
pad = media_entity_remote_source(pad);
- if (pad == NULL ||
- media_entity_type(pad->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
+ if (pad == NULL)
+ return -EPIPE;
+
+ if (media_entity_type(pad->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
break;
subdev = media_entity_to_v4l2_subdev(pad->entity);
@@ -446,7 +452,7 @@ ispmmu_vmap(struct isp_device *isp, const struct scatterlist *sglist, int sglen)
sgt->nents = sglen;
sgt->orig_nents = sglen;
- da = iommu_vmap(isp->iommu, 0, sgt, IOMMU_FLAG);
+ da = omap_iommu_vmap(isp->domain, isp->iommu, 0, sgt, IOMMU_FLAG);
if (IS_ERR_VALUE(da))
kfree(sgt);
@@ -462,7 +468,7 @@ static void ispmmu_vunmap(struct isp_device *isp, dma_addr_t da)
{
struct sg_table *sgt;
- sgt = iommu_vunmap(isp->iommu, (u32)da);
+ sgt = omap_iommu_vunmap(isp->domain, isp->iommu, (u32)da);
kfree(sgt);
}
@@ -1050,6 +1056,14 @@ error:
if (video->isp->pdata->set_constraints)
video->isp->pdata->set_constraints(video->isp, false);
media_entity_pipeline_stop(&video->video.entity);
+ /* The DMA queue must be emptied here, otherwise CCDC interrupts
+ * that will get triggered the next time the CCDC is powered up
+ * will try to access buffers that might have been freed but
+ * still present in the DMA queue. This can easily get triggered
+ * if the above omap3isp_pipeline_set_stream() call fails on a
+ * system with a free-running sensor.
+ */
+ INIT_LIST_HEAD(&video->dmaqueue);
video->queue = NULL;
}
@@ -1311,6 +1325,13 @@ int omap3isp_video_init(struct isp_video *video, const char *name)
return 0;
}
+void omap3isp_video_cleanup(struct isp_video *video)
+{
+ media_entity_cleanup(&video->video.entity);
+ mutex_destroy(&video->stream_lock);
+ mutex_destroy(&video->mutex);
+}
+
int omap3isp_video_register(struct isp_video *video, struct v4l2_device *vdev)
{
int ret;
@@ -1327,8 +1348,6 @@ int omap3isp_video_register(struct isp_video *video, struct v4l2_device *vdev)
void omap3isp_video_unregister(struct isp_video *video)
{
- if (video_is_registered(&video->video)) {
- media_entity_cleanup(&video->video.entity);
+ if (video_is_registered(&video->video))
video_unregister_device(&video->video);
- }
}
diff --git a/drivers/media/video/omap3isp/ispvideo.h b/drivers/media/video/omap3isp/ispvideo.h
index 53160aa24e6..08cbfa144e6 100644
--- a/drivers/media/video/omap3isp/ispvideo.h
+++ b/drivers/media/video/omap3isp/ispvideo.h
@@ -190,6 +190,7 @@ struct isp_video_fh {
container_of(q, struct isp_video_fh, queue)
int omap3isp_video_init(struct isp_video *video, const char *name);
+void omap3isp_video_cleanup(struct isp_video *video);
int omap3isp_video_register(struct isp_video *video,
struct v4l2_device *vdev);
void omap3isp_video_unregister(struct isp_video *video);
diff --git a/drivers/media/video/ov2640.c b/drivers/media/video/ov2640.c
index 9ce2fa037b9..b5247cb64fd 100644
--- a/drivers/media/video/ov2640.c
+++ b/drivers/media/video/ov2640.c
@@ -18,11 +18,13 @@
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/delay.h>
+#include <linux/v4l2-mediabus.h>
#include <linux/videodev2.h>
+
+#include <media/soc_camera.h>
#include <media/v4l2-chip-ident.h>
#include <media/v4l2-subdev.h>
-#include <media/soc_camera.h>
-#include <media/soc_mediabus.h>
+#include <media/v4l2-ctrls.h>
#define VAL_SET(x, mask, rshift, lshift) \
((((x) >> rshift) & mask) << lshift)
@@ -299,12 +301,10 @@ struct ov2640_win_size {
struct ov2640_priv {
struct v4l2_subdev subdev;
- struct ov2640_camera_info *info;
+ struct v4l2_ctrl_handler hdl;
enum v4l2_mbus_pixelcode cfmt_code;
const struct ov2640_win_size *win;
int model;
- u16 flag_vflip:1;
- u16 flag_hflip:1;
};
/*
@@ -610,29 +610,6 @@ static enum v4l2_mbus_pixelcode ov2640_codes[] = {
};
/*
- * Supported controls
- */
-static const struct v4l2_queryctrl ov2640_controls[] = {
- {
- .id = V4L2_CID_VFLIP,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Flip Vertically",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 0,
- }, {
- .id = V4L2_CID_HFLIP,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Flip Horizontally",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 0,
- },
-};
-
-/*
* General functions
*/
static struct ov2640_priv *to_ov2640(const struct i2c_client *client)
@@ -701,81 +678,23 @@ static int ov2640_s_stream(struct v4l2_subdev *sd, int enable)
return 0;
}
-static int ov2640_set_bus_param(struct soc_camera_device *icd,
- unsigned long flags)
-{
- struct soc_camera_link *icl = to_soc_camera_link(icd);
- unsigned long width_flag = flags & SOCAM_DATAWIDTH_MASK;
-
- /* Only one width bit may be set */
- if (!is_power_of_2(width_flag))
- return -EINVAL;
-
- if (icl->set_bus_param)
- return icl->set_bus_param(icl, width_flag);
-
- /*
- * Without board specific bus width settings we support only the
- * sensors native bus width witch are tested working
- */
- if (width_flag & (SOCAM_DATAWIDTH_10 | SOCAM_DATAWIDTH_8))
- return 0;
-
- return 0;
-}
-
-static unsigned long ov2640_query_bus_param(struct soc_camera_device *icd)
-{
- struct soc_camera_link *icl = to_soc_camera_link(icd);
- unsigned long flags = SOCAM_PCLK_SAMPLE_RISING | SOCAM_MASTER |
- SOCAM_VSYNC_ACTIVE_HIGH | SOCAM_HSYNC_ACTIVE_HIGH |
- SOCAM_DATA_ACTIVE_HIGH;
-
- if (icl->query_bus_param)
- flags |= icl->query_bus_param(icl) & SOCAM_DATAWIDTH_MASK;
- else
- flags |= SOCAM_DATAWIDTH_10;
-
- return soc_camera_apply_sensor_flags(icl, flags);
-}
-
-static int ov2640_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
+static int ov2640_s_ctrl(struct v4l2_ctrl *ctrl)
{
+ struct v4l2_subdev *sd =
+ &container_of(ctrl->handler, struct ov2640_priv, hdl)->subdev;
struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct ov2640_priv *priv = to_ov2640(client);
-
- switch (ctrl->id) {
- case V4L2_CID_VFLIP:
- ctrl->value = priv->flag_vflip;
- break;
- case V4L2_CID_HFLIP:
- ctrl->value = priv->flag_hflip;
- break;
- }
- return 0;
-}
-
-static int ov2640_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct ov2640_priv *priv = to_ov2640(client);
- int ret = 0;
u8 val;
switch (ctrl->id) {
case V4L2_CID_VFLIP:
- val = ctrl->value ? REG04_VFLIP_IMG : 0x00;
- priv->flag_vflip = ctrl->value ? 1 : 0;
- ret = ov2640_mask_set(client, REG04, REG04_VFLIP_IMG, val);
- break;
+ val = ctrl->val ? REG04_VFLIP_IMG : 0x00;
+ return ov2640_mask_set(client, REG04, REG04_VFLIP_IMG, val);
case V4L2_CID_HFLIP:
- val = ctrl->value ? REG04_HFLIP_IMG : 0x00;
- priv->flag_hflip = ctrl->value ? 1 : 0;
- ret = ov2640_mask_set(client, REG04, REG04_HFLIP_IMG, val);
- break;
+ val = ctrl->val ? REG04_HFLIP_IMG : 0x00;
+ return ov2640_mask_set(client, REG04, REG04_HFLIP_IMG, val);
}
- return ret;
+ return -EINVAL;
}
static int ov2640_g_chip_ident(struct v4l2_subdev *sd,
@@ -1023,18 +942,13 @@ static int ov2640_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
return 0;
}
-static int ov2640_video_probe(struct soc_camera_device *icd,
- struct i2c_client *client)
+static int ov2640_video_probe(struct i2c_client *client)
{
struct ov2640_priv *priv = to_ov2640(client);
u8 pid, ver, midh, midl;
const char *devname;
int ret;
- /* We must have a parent by now. And it cannot be a wrong one. */
- BUG_ON(!icd->parent ||
- to_soc_camera_host(icd->parent)->nr != icd->iface);
-
/*
* check and show product ID and manufacturer ID
*/
@@ -1060,22 +974,17 @@ static int ov2640_video_probe(struct soc_camera_device *icd,
"%s Product ID %0x:%0x Manufacturer ID %x:%x\n",
devname, pid, ver, midh, midl);
- return 0;
+ return v4l2_ctrl_handler_setup(&priv->hdl);
err:
return ret;
}
-static struct soc_camera_ops ov2640_ops = {
- .set_bus_param = ov2640_set_bus_param,
- .query_bus_param = ov2640_query_bus_param,
- .controls = ov2640_controls,
- .num_controls = ARRAY_SIZE(ov2640_controls),
+static const struct v4l2_ctrl_ops ov2640_ctrl_ops = {
+ .s_ctrl = ov2640_s_ctrl,
};
static struct v4l2_subdev_core_ops ov2640_subdev_core_ops = {
- .g_ctrl = ov2640_g_ctrl,
- .s_ctrl = ov2640_s_ctrl,
.g_chip_ident = ov2640_g_chip_ident,
#ifdef CONFIG_VIDEO_ADV_DEBUG
.g_register = ov2640_g_register,
@@ -1083,6 +992,21 @@ static struct v4l2_subdev_core_ops ov2640_subdev_core_ops = {
#endif
};
+static int ov2640_g_mbus_config(struct v4l2_subdev *sd,
+ struct v4l2_mbus_config *cfg)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+
+ cfg->flags = V4L2_MBUS_PCLK_SAMPLE_RISING | V4L2_MBUS_MASTER |
+ V4L2_MBUS_VSYNC_ACTIVE_HIGH | V4L2_MBUS_HSYNC_ACTIVE_HIGH |
+ V4L2_MBUS_DATA_ACTIVE_HIGH;
+ cfg->type = V4L2_MBUS_PARALLEL;
+ cfg->flags = soc_camera_apply_board_flags(icl, cfg);
+
+ return 0;
+}
+
static struct v4l2_subdev_video_ops ov2640_subdev_video_ops = {
.s_stream = ov2640_s_stream,
.g_mbus_fmt = ov2640_g_fmt,
@@ -1091,6 +1015,7 @@ static struct v4l2_subdev_video_ops ov2640_subdev_video_ops = {
.cropcap = ov2640_cropcap,
.g_crop = ov2640_g_crop,
.enum_mbus_fmt = ov2640_enum_fmt,
+ .g_mbus_config = ov2640_g_mbus_config,
};
static struct v4l2_subdev_ops ov2640_subdev_ops = {
@@ -1104,18 +1029,11 @@ static struct v4l2_subdev_ops ov2640_subdev_ops = {
static int ov2640_probe(struct i2c_client *client,
const struct i2c_device_id *did)
{
- struct ov2640_priv *priv;
- struct soc_camera_device *icd = client->dev.platform_data;
- struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
- struct soc_camera_link *icl;
- int ret;
-
- if (!icd) {
- dev_err(&adapter->dev, "OV2640: missing soc-camera data!\n");
- return -EINVAL;
- }
+ struct ov2640_priv *priv;
+ struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
+ int ret;
- icl = to_soc_camera_link(icd);
if (!icl) {
dev_err(&adapter->dev,
"OV2640: Missing platform_data for driver\n");
@@ -1135,15 +1053,23 @@ static int ov2640_probe(struct i2c_client *client,
return -ENOMEM;
}
- priv->info = icl->priv;
-
v4l2_i2c_subdev_init(&priv->subdev, client, &ov2640_subdev_ops);
+ v4l2_ctrl_handler_init(&priv->hdl, 2);
+ v4l2_ctrl_new_std(&priv->hdl, &ov2640_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+ v4l2_ctrl_new_std(&priv->hdl, &ov2640_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+ priv->subdev.ctrl_handler = &priv->hdl;
+ if (priv->hdl.error) {
+ int err = priv->hdl.error;
- icd->ops = &ov2640_ops;
+ kfree(priv);
+ return err;
+ }
- ret = ov2640_video_probe(icd, client);
+ ret = ov2640_video_probe(client);
if (ret) {
- icd->ops = NULL;
+ v4l2_ctrl_handler_free(&priv->hdl);
kfree(priv);
} else {
dev_info(&adapter->dev, "OV2640 Probed\n");
@@ -1155,9 +1081,9 @@ static int ov2640_probe(struct i2c_client *client,
static int ov2640_remove(struct i2c_client *client)
{
struct ov2640_priv *priv = to_ov2640(client);
- struct soc_camera_device *icd = client->dev.platform_data;
- icd->ops = NULL;
+ v4l2_device_unregister_subdev(&priv->subdev);
+ v4l2_ctrl_handler_free(&priv->hdl);
kfree(priv);
return 0;
}
diff --git a/drivers/media/video/ov5642.c b/drivers/media/video/ov5642.c
index 349a4ad3ccc..bb37ec80f27 100644
--- a/drivers/media/video/ov5642.c
+++ b/drivers/media/video/ov5642.c
@@ -14,14 +14,16 @@
* published by the Free Software Foundation.
*/
+#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/i2c.h>
+#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/videodev2.h>
#include <linux/module.h>
+#include <linux/v4l2-mediabus.h>
#include <media/soc_camera.h>
-#include <media/soc_mediabus.h>
#include <media/v4l2-chip-ident.h>
#include <media/v4l2-subdev.h>
@@ -35,7 +37,7 @@
#define REG_WINDOW_START_Y_LOW 0x3803
#define REG_WINDOW_WIDTH_HIGH 0x3804
#define REG_WINDOW_WIDTH_LOW 0x3805
-#define REG_WINDOW_HEIGHT_HIGH 0x3806
+#define REG_WINDOW_HEIGHT_HIGH 0x3806
#define REG_WINDOW_HEIGHT_LOW 0x3807
#define REG_OUT_WIDTH_HIGH 0x3808
#define REG_OUT_WIDTH_LOW 0x3809
@@ -45,19 +47,44 @@
#define REG_OUT_TOTAL_WIDTH_LOW 0x380d
#define REG_OUT_TOTAL_HEIGHT_HIGH 0x380e
#define REG_OUT_TOTAL_HEIGHT_LOW 0x380f
+#define REG_OUTPUT_FORMAT 0x4300
+#define REG_ISP_CTRL_01 0x5001
+#define REG_AVG_WINDOW_END_X_HIGH 0x5682
+#define REG_AVG_WINDOW_END_X_LOW 0x5683
+#define REG_AVG_WINDOW_END_Y_HIGH 0x5686
+#define REG_AVG_WINDOW_END_Y_LOW 0x5687
+
+/* active pixel array size */
+#define OV5642_SENSOR_SIZE_X 2592
+#define OV5642_SENSOR_SIZE_Y 1944
/*
- * define standard resolution.
- * Works currently only for up to 720 lines
- * eg. 320x240, 640x480, 800x600, 1280x720, 2048x720
+ * About OV5642 resolution, cropping and binning:
+ * This sensor supports it all, at least in the feature description.
+ * Unfortunately, no combination of appropriate registers settings could make
+ * the chip work the intended way. As it works with predefined register lists,
+ * some undocumented registers are presumably changed there to achieve their
+ * goals.
+ * This driver currently only works for resolutions up to 720 lines with a
+ * 1:1 scale. Hopefully these restrictions will be removed in the future.
*/
+#define OV5642_MAX_WIDTH OV5642_SENSOR_SIZE_X
+#define OV5642_MAX_HEIGHT 720
-#define OV5642_WIDTH 1280
-#define OV5642_HEIGHT 720
-#define OV5642_TOTAL_WIDTH 3200
-#define OV5642_TOTAL_HEIGHT 2000
-#define OV5642_SENSOR_SIZE_X 2592
-#define OV5642_SENSOR_SIZE_Y 1944
+/* default sizes */
+#define OV5642_DEFAULT_WIDTH 1280
+#define OV5642_DEFAULT_HEIGHT OV5642_MAX_HEIGHT
+
+/* minimum extra blanking */
+#define BLANKING_EXTRA_WIDTH 500
+#define BLANKING_EXTRA_HEIGHT 20
+
+/*
+ * the sensor's autoexposure is buggy when setting total_height low.
+ * It tries to expose longer than 1 frame period without taking care of it
+ * and this leads to weird output. So we set 1000 lines as minimum.
+ */
+#define BLANKING_MIN_HEIGHT 1000
struct regval_list {
u16 reg_num;
@@ -582,6 +609,11 @@ struct ov5642_datafmt {
struct ov5642 {
struct v4l2_subdev subdev;
const struct ov5642_datafmt *fmt;
+ struct v4l2_rect crop_rect;
+
+ /* blanking information */
+ int total_width;
+ int total_height;
};
static const struct ov5642_datafmt ov5642_colour_fmts[] = {
@@ -642,6 +674,21 @@ static int reg_write(struct i2c_client *client, u16 reg, u8 val)
return 0;
}
+
+/*
+ * convenience function to write 16 bit register values that are split up
+ * into two consecutive high and low parts
+ */
+static int reg_write16(struct i2c_client *client, u16 reg, u16 val16)
+{
+ int ret;
+
+ ret = reg_write(client, reg, val16 >> 8);
+ if (ret)
+ return ret;
+ return reg_write(client, reg + 1, val16 & 0x00ff);
+}
+
#ifdef CONFIG_VIDEO_ADV_DEBUG
static int ov5642_get_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg)
{
@@ -685,58 +732,55 @@ static int ov5642_write_array(struct i2c_client *client,
return 0;
}
-static int ov5642_set_resolution(struct i2c_client *client)
+static int ov5642_set_resolution(struct v4l2_subdev *sd)
{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ov5642 *priv = to_ov5642(client);
+ int width = priv->crop_rect.width;
+ int height = priv->crop_rect.height;
+ int total_width = priv->total_width;
+ int total_height = priv->total_height;
+ int start_x = (OV5642_SENSOR_SIZE_X - width) / 2;
+ int start_y = (OV5642_SENSOR_SIZE_Y - height) / 2;
int ret;
- u8 start_x_high = ((OV5642_SENSOR_SIZE_X - OV5642_WIDTH) / 2) >> 8;
- u8 start_x_low = ((OV5642_SENSOR_SIZE_X - OV5642_WIDTH) / 2) & 0xff;
- u8 start_y_high = ((OV5642_SENSOR_SIZE_Y - OV5642_HEIGHT) / 2) >> 8;
- u8 start_y_low = ((OV5642_SENSOR_SIZE_Y - OV5642_HEIGHT) / 2) & 0xff;
-
- u8 width_high = OV5642_WIDTH >> 8;
- u8 width_low = OV5642_WIDTH & 0xff;
- u8 height_high = OV5642_HEIGHT >> 8;
- u8 height_low = OV5642_HEIGHT & 0xff;
-
- u8 total_width_high = OV5642_TOTAL_WIDTH >> 8;
- u8 total_width_low = OV5642_TOTAL_WIDTH & 0xff;
- u8 total_height_high = OV5642_TOTAL_HEIGHT >> 8;
- u8 total_height_low = OV5642_TOTAL_HEIGHT & 0xff;
-
- ret = reg_write(client, REG_WINDOW_START_X_HIGH, start_x_high);
- if (!ret)
- ret = reg_write(client, REG_WINDOW_START_X_LOW, start_x_low);
- if (!ret)
- ret = reg_write(client, REG_WINDOW_START_Y_HIGH, start_y_high);
- if (!ret)
- ret = reg_write(client, REG_WINDOW_START_Y_LOW, start_y_low);
+ /*
+ * This should set the starting point for cropping.
+ * Doesn't work so far.
+ */
+ ret = reg_write16(client, REG_WINDOW_START_X_HIGH, start_x);
if (!ret)
- ret = reg_write(client, REG_WINDOW_WIDTH_HIGH, width_high);
- if (!ret)
- ret = reg_write(client, REG_WINDOW_WIDTH_LOW , width_low);
- if (!ret)
- ret = reg_write(client, REG_WINDOW_HEIGHT_HIGH, height_high);
- if (!ret)
- ret = reg_write(client, REG_WINDOW_HEIGHT_LOW, height_low);
+ ret = reg_write16(client, REG_WINDOW_START_Y_HIGH, start_y);
+ if (!ret) {
+ priv->crop_rect.left = start_x;
+ priv->crop_rect.top = start_y;
+ }
if (!ret)
- ret = reg_write(client, REG_OUT_WIDTH_HIGH, width_high);
+ ret = reg_write16(client, REG_WINDOW_WIDTH_HIGH, width);
if (!ret)
- ret = reg_write(client, REG_OUT_WIDTH_LOW , width_low);
- if (!ret)
- ret = reg_write(client, REG_OUT_HEIGHT_HIGH, height_high);
+ ret = reg_write16(client, REG_WINDOW_HEIGHT_HIGH, height);
+ if (ret)
+ return ret;
+ priv->crop_rect.width = width;
+ priv->crop_rect.height = height;
+
+ /* Set the output window size. Only 1:1 scale is supported so far. */
+ ret = reg_write16(client, REG_OUT_WIDTH_HIGH, width);
if (!ret)
- ret = reg_write(client, REG_OUT_HEIGHT_LOW, height_low);
+ ret = reg_write16(client, REG_OUT_HEIGHT_HIGH, height);
+ /* Total width = output size + blanking */
if (!ret)
- ret = reg_write(client, REG_OUT_TOTAL_WIDTH_HIGH, total_width_high);
+ ret = reg_write16(client, REG_OUT_TOTAL_WIDTH_HIGH, total_width);
if (!ret)
- ret = reg_write(client, REG_OUT_TOTAL_WIDTH_LOW, total_width_low);
+ ret = reg_write16(client, REG_OUT_TOTAL_HEIGHT_HIGH, total_height);
+
+ /* Sets the window for AWB calculations */
if (!ret)
- ret = reg_write(client, REG_OUT_TOTAL_HEIGHT_HIGH, total_height_high);
+ ret = reg_write16(client, REG_AVG_WINDOW_END_X_HIGH, width);
if (!ret)
- ret = reg_write(client, REG_OUT_TOTAL_HEIGHT_LOW, total_height_low);
+ ret = reg_write16(client, REG_AVG_WINDOW_END_Y_HIGH, height);
return ret;
}
@@ -744,18 +788,18 @@ static int ov5642_set_resolution(struct i2c_client *client)
static int ov5642_try_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf)
{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ov5642 *priv = to_ov5642(client);
const struct ov5642_datafmt *fmt = ov5642_find_datafmt(mf->code);
- dev_dbg(sd->v4l2_dev->dev, "%s(%u) width: %u heigth: %u\n",
- __func__, mf->code, mf->width, mf->height);
+ mf->width = priv->crop_rect.width;
+ mf->height = priv->crop_rect.height;
if (!fmt) {
mf->code = ov5642_colour_fmts[0].code;
mf->colorspace = ov5642_colour_fmts[0].colorspace;
}
- mf->width = OV5642_WIDTH;
- mf->height = OV5642_HEIGHT;
mf->field = V4L2_FIELD_NONE;
return 0;
@@ -767,20 +811,13 @@ static int ov5642_s_fmt(struct v4l2_subdev *sd,
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct ov5642 *priv = to_ov5642(client);
- dev_dbg(sd->v4l2_dev->dev, "%s(%u)\n", __func__, mf->code);
-
/* MIPI CSI could have changed the format, double-check */
if (!ov5642_find_datafmt(mf->code))
return -EINVAL;
ov5642_try_fmt(sd, mf);
-
priv->fmt = ov5642_find_datafmt(mf->code);
- ov5642_write_array(client, ov5642_default_regs_init);
- ov5642_set_resolution(client);
- ov5642_write_array(client, ov5642_default_regs_finalise);
-
return 0;
}
@@ -794,8 +831,8 @@ static int ov5642_g_fmt(struct v4l2_subdev *sd,
mf->code = fmt->code;
mf->colorspace = fmt->colorspace;
- mf->width = OV5642_WIDTH;
- mf->height = OV5642_HEIGHT;
+ mf->width = priv->crop_rect.width;
+ mf->height = priv->crop_rect.height;
mf->field = V4L2_FIELD_NONE;
return 0;
@@ -828,15 +865,44 @@ static int ov5642_g_chip_ident(struct v4l2_subdev *sd,
return 0;
}
+static int ov5642_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ov5642 *priv = to_ov5642(client);
+ struct v4l2_rect *rect = &a->c;
+ int ret;
+
+ v4l_bound_align_image(&rect->width, 48, OV5642_MAX_WIDTH, 1,
+ &rect->height, 32, OV5642_MAX_HEIGHT, 1, 0);
+
+ priv->crop_rect.width = rect->width;
+ priv->crop_rect.height = rect->height;
+ priv->total_width = rect->width + BLANKING_EXTRA_WIDTH;
+ priv->total_height = max_t(int, rect->height +
+ BLANKING_EXTRA_HEIGHT,
+ BLANKING_MIN_HEIGHT);
+ priv->crop_rect.width = rect->width;
+ priv->crop_rect.height = rect->height;
+
+ ret = ov5642_write_array(client, ov5642_default_regs_init);
+ if (!ret)
+ ret = ov5642_set_resolution(sd);
+ if (!ret)
+ ret = ov5642_write_array(client, ov5642_default_regs_finalise);
+
+ return ret;
+}
+
static int ov5642_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ov5642 *priv = to_ov5642(client);
struct v4l2_rect *rect = &a->c;
- a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- rect->top = 0;
- rect->left = 0;
- rect->width = OV5642_WIDTH;
- rect->height = OV5642_HEIGHT;
+ if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ *rect = priv->crop_rect;
return 0;
}
@@ -845,8 +911,8 @@ static int ov5642_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
{
a->bounds.left = 0;
a->bounds.top = 0;
- a->bounds.width = OV5642_WIDTH;
- a->bounds.height = OV5642_HEIGHT;
+ a->bounds.width = OV5642_MAX_WIDTH;
+ a->bounds.height = OV5642_MAX_HEIGHT;
a->defrect = a->bounds;
a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
a->pixelaspect.numerator = 1;
@@ -855,16 +921,47 @@ static int ov5642_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
return 0;
}
+static int ov5642_g_mbus_config(struct v4l2_subdev *sd,
+ struct v4l2_mbus_config *cfg)
+{
+ cfg->type = V4L2_MBUS_CSI2;
+ cfg->flags = V4L2_MBUS_CSI2_2_LANE | V4L2_MBUS_CSI2_CHANNEL_0 |
+ V4L2_MBUS_CSI2_CONTINUOUS_CLOCK;
+
+ return 0;
+}
+
+static int ov5642_s_power(struct v4l2_subdev *sd, int on)
+{
+ struct i2c_client *client;
+ int ret;
+
+ if (!on)
+ return 0;
+
+ client = v4l2_get_subdevdata(sd);
+ ret = ov5642_write_array(client, ov5642_default_regs_init);
+ if (!ret)
+ ret = ov5642_set_resolution(sd);
+ if (!ret)
+ ret = ov5642_write_array(client, ov5642_default_regs_finalise);
+
+ return ret;
+}
+
static struct v4l2_subdev_video_ops ov5642_subdev_video_ops = {
.s_mbus_fmt = ov5642_s_fmt,
.g_mbus_fmt = ov5642_g_fmt,
.try_mbus_fmt = ov5642_try_fmt,
.enum_mbus_fmt = ov5642_enum_fmt,
+ .s_crop = ov5642_s_crop,
.g_crop = ov5642_g_crop,
.cropcap = ov5642_cropcap,
+ .g_mbus_config = ov5642_g_mbus_config,
};
static struct v4l2_subdev_core_ops ov5642_subdev_core_ops = {
+ .s_power = ov5642_s_power,
.g_chip_ident = ov5642_g_chip_ident,
#ifdef CONFIG_VIDEO_ADV_DEBUG
.g_register = ov5642_get_register,
@@ -877,28 +974,7 @@ static struct v4l2_subdev_ops ov5642_subdev_ops = {
.video = &ov5642_subdev_video_ops,
};
-/*
- * We have to provide soc-camera operations, but we don't have anything to say
- * there. The MIPI CSI2 driver will provide .query_bus_param and .set_bus_param
- */
-static unsigned long soc_ov5642_query_bus_param(struct soc_camera_device *icd)
-{
- return 0;
-}
-
-static int soc_ov5642_set_bus_param(struct soc_camera_device *icd,
- unsigned long flags)
-{
- return -EINVAL;
-}
-
-static struct soc_camera_ops soc_ov5642_ops = {
- .query_bus_param = soc_ov5642_query_bus_param,
- .set_bus_param = soc_ov5642_set_bus_param,
-};
-
-static int ov5642_video_probe(struct soc_camera_device *icd,
- struct i2c_client *client)
+static int ov5642_video_probe(struct i2c_client *client)
{
int ret;
u8 id_high, id_low;
@@ -929,16 +1005,9 @@ static int ov5642_probe(struct i2c_client *client,
const struct i2c_device_id *did)
{
struct ov5642 *priv;
- struct soc_camera_device *icd = client->dev.platform_data;
- struct soc_camera_link *icl;
+ struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
int ret;
- if (!icd) {
- dev_err(&client->dev, "OV5642: missing soc-camera data!\n");
- return -EINVAL;
- }
-
- icl = to_soc_camera_link(icd);
if (!icl) {
dev_err(&client->dev, "OV5642: missing platform data!\n");
return -EINVAL;
@@ -950,17 +1019,24 @@ static int ov5642_probe(struct i2c_client *client,
v4l2_i2c_subdev_init(&priv->subdev, client, &ov5642_subdev_ops);
- icd->ops = &soc_ov5642_ops;
- priv->fmt = &ov5642_colour_fmts[0];
+ priv->fmt = &ov5642_colour_fmts[0];
+
+ priv->crop_rect.width = OV5642_DEFAULT_WIDTH;
+ priv->crop_rect.height = OV5642_DEFAULT_HEIGHT;
+ priv->crop_rect.left = (OV5642_MAX_WIDTH - OV5642_DEFAULT_WIDTH) / 2;
+ priv->crop_rect.top = (OV5642_MAX_HEIGHT - OV5642_DEFAULT_HEIGHT) / 2;
+ priv->crop_rect.width = OV5642_DEFAULT_WIDTH;
+ priv->crop_rect.height = OV5642_DEFAULT_HEIGHT;
+ priv->total_width = OV5642_DEFAULT_WIDTH + BLANKING_EXTRA_WIDTH;
+ priv->total_height = BLANKING_MIN_HEIGHT;
- ret = ov5642_video_probe(icd, client);
+ ret = ov5642_video_probe(client);
if (ret < 0)
goto error;
return 0;
error:
- icd->ops = NULL;
kfree(priv);
return ret;
}
@@ -968,10 +1044,8 @@ error:
static int ov5642_remove(struct i2c_client *client)
{
struct ov5642 *priv = to_ov5642(client);
- struct soc_camera_device *icd = client->dev.platform_data;
- struct soc_camera_link *icl = to_soc_camera_link(icd);
+ struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
- icd->ops = NULL;
if (icl->free_bus)
icl->free_bus(icl);
kfree(priv);
diff --git a/drivers/media/video/ov6650.c b/drivers/media/video/ov6650.c
index 456d9ad9ae5..9f2d26b1d4c 100644
--- a/drivers/media/video/ov6650.c
+++ b/drivers/media/video/ov6650.c
@@ -28,10 +28,12 @@
#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/slab.h>
+#include <linux/v4l2-mediabus.h>
+#include <linux/module.h>
#include <media/soc_camera.h>
#include <media/v4l2-chip-ident.h>
-
+#include <media/v4l2-ctrls.h>
/* Register definitions */
#define REG_GAIN 0x00 /* range 00 - 3F */
@@ -177,20 +179,23 @@ struct ov6650_reg {
struct ov6650 {
struct v4l2_subdev subdev;
-
- int gain;
- int blue;
- int red;
- int saturation;
- int hue;
- int brightness;
- int exposure;
- int gamma;
- int aec;
- bool vflip;
- bool hflip;
- bool awb;
- bool agc;
+ struct v4l2_ctrl_handler hdl;
+ struct {
+ /* exposure/autoexposure cluster */
+ struct v4l2_ctrl *autoexposure;
+ struct v4l2_ctrl *exposure;
+ };
+ struct {
+ /* gain/autogain cluster */
+ struct v4l2_ctrl *autogain;
+ struct v4l2_ctrl *gain;
+ };
+ struct {
+ /* blue/red/autowhitebalance cluster */
+ struct v4l2_ctrl *autowb;
+ struct v4l2_ctrl *blue;
+ struct v4l2_ctrl *red;
+ };
bool half_scale; /* scale down output by 2 */
struct v4l2_rect rect; /* sensor cropping window */
unsigned long pclk_limit; /* from host */
@@ -210,126 +215,6 @@ static enum v4l2_mbus_pixelcode ov6650_codes[] = {
V4L2_MBUS_FMT_Y8_1X8,
};
-static const struct v4l2_queryctrl ov6650_controls[] = {
- {
- .id = V4L2_CID_AUTOGAIN,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "AGC",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 1,
- },
- {
- .id = V4L2_CID_GAIN,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Gain",
- .minimum = 0,
- .maximum = 0x3f,
- .step = 1,
- .default_value = DEF_GAIN,
- },
- {
- .id = V4L2_CID_AUTO_WHITE_BALANCE,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "AWB",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 1,
- },
- {
- .id = V4L2_CID_BLUE_BALANCE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Blue",
- .minimum = 0,
- .maximum = 0xff,
- .step = 1,
- .default_value = DEF_BLUE,
- },
- {
- .id = V4L2_CID_RED_BALANCE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Red",
- .minimum = 0,
- .maximum = 0xff,
- .step = 1,
- .default_value = DEF_RED,
- },
- {
- .id = V4L2_CID_SATURATION,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Saturation",
- .minimum = 0,
- .maximum = 0xf,
- .step = 1,
- .default_value = 0x8,
- },
- {
- .id = V4L2_CID_HUE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Hue",
- .minimum = 0,
- .maximum = HUE_MASK,
- .step = 1,
- .default_value = DEF_HUE,
- },
- {
- .id = V4L2_CID_BRIGHTNESS,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Brightness",
- .minimum = 0,
- .maximum = 0xff,
- .step = 1,
- .default_value = 0x80,
- },
- {
- .id = V4L2_CID_EXPOSURE_AUTO,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "AEC",
- .minimum = 0,
- .maximum = 3,
- .step = 1,
- .default_value = 0,
- },
- {
- .id = V4L2_CID_EXPOSURE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Exposure",
- .minimum = 0,
- .maximum = 0xff,
- .step = 1,
- .default_value = DEF_AECH,
- },
- {
- .id = V4L2_CID_GAMMA,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Gamma",
- .minimum = 0,
- .maximum = 0xff,
- .step = 1,
- .default_value = 0x12,
- },
- {
- .id = V4L2_CID_VFLIP,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Flip Vertically",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 0,
- },
- {
- .id = V4L2_CID_HFLIP,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Flip Horizontally",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 0,
- },
-};
-
/* read a register */
static int ov6650_reg_read(struct i2c_client *client, u8 reg, u8 *val)
{
@@ -419,213 +304,90 @@ static int ov6650_s_stream(struct v4l2_subdev *sd, int enable)
return 0;
}
-/* Alter bus settings on camera side */
-static int ov6650_set_bus_param(struct soc_camera_device *icd,
- unsigned long flags)
-{
- struct soc_camera_link *icl = to_soc_camera_link(icd);
- struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd));
- int ret;
-
- flags = soc_camera_apply_sensor_flags(icl, flags);
-
- if (flags & SOCAM_PCLK_SAMPLE_RISING)
- ret = ov6650_reg_rmw(client, REG_COMJ, COMJ_PCLK_RISING, 0);
- else
- ret = ov6650_reg_rmw(client, REG_COMJ, 0, COMJ_PCLK_RISING);
- if (ret)
- return ret;
-
- if (flags & SOCAM_HSYNC_ACTIVE_LOW)
- ret = ov6650_reg_rmw(client, REG_COMF, COMF_HREF_LOW, 0);
- else
- ret = ov6650_reg_rmw(client, REG_COMF, 0, COMF_HREF_LOW);
- if (ret)
- return ret;
-
- if (flags & SOCAM_VSYNC_ACTIVE_HIGH)
- ret = ov6650_reg_rmw(client, REG_COMJ, COMJ_VSYNC_HIGH, 0);
- else
- ret = ov6650_reg_rmw(client, REG_COMJ, 0, COMJ_VSYNC_HIGH);
-
- return ret;
-}
-
-/* Request bus settings on camera side */
-static unsigned long ov6650_query_bus_param(struct soc_camera_device *icd)
-{
- struct soc_camera_link *icl = to_soc_camera_link(icd);
-
- unsigned long flags = SOCAM_MASTER |
- SOCAM_PCLK_SAMPLE_RISING | SOCAM_PCLK_SAMPLE_FALLING |
- SOCAM_HSYNC_ACTIVE_HIGH | SOCAM_HSYNC_ACTIVE_LOW |
- SOCAM_VSYNC_ACTIVE_HIGH | SOCAM_VSYNC_ACTIVE_LOW |
- SOCAM_DATA_ACTIVE_HIGH | SOCAM_DATAWIDTH_8;
-
- return soc_camera_apply_sensor_flags(icl, flags);
-}
-
/* Get status of additional camera capabilities */
-static int ov6650_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
+static int ov6550_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
{
+ struct ov6650 *priv = container_of(ctrl->handler, struct ov6650, hdl);
+ struct v4l2_subdev *sd = &priv->subdev;
struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct ov6650 *priv = to_ov6650(client);
- uint8_t reg;
- int ret = 0;
+ uint8_t reg, reg2;
+ int ret;
switch (ctrl->id) {
case V4L2_CID_AUTOGAIN:
- ctrl->value = priv->agc;
- break;
- case V4L2_CID_GAIN:
- if (priv->agc) {
- ret = ov6650_reg_read(client, REG_GAIN, &reg);
- ctrl->value = reg;
- } else {
- ctrl->value = priv->gain;
- }
- break;
+ ret = ov6650_reg_read(client, REG_GAIN, &reg);
+ if (!ret)
+ priv->gain->val = reg;
+ return ret;
case V4L2_CID_AUTO_WHITE_BALANCE:
- ctrl->value = priv->awb;
- break;
- case V4L2_CID_BLUE_BALANCE:
- if (priv->awb) {
- ret = ov6650_reg_read(client, REG_BLUE, &reg);
- ctrl->value = reg;
- } else {
- ctrl->value = priv->blue;
- }
- break;
- case V4L2_CID_RED_BALANCE:
- if (priv->awb) {
- ret = ov6650_reg_read(client, REG_RED, &reg);
- ctrl->value = reg;
- } else {
- ctrl->value = priv->red;
+ ret = ov6650_reg_read(client, REG_BLUE, &reg);
+ if (!ret)
+ ret = ov6650_reg_read(client, REG_RED, &reg2);
+ if (!ret) {
+ priv->blue->val = reg;
+ priv->red->val = reg2;
}
- break;
- case V4L2_CID_SATURATION:
- ctrl->value = priv->saturation;
- break;
- case V4L2_CID_HUE:
- ctrl->value = priv->hue;
- break;
- case V4L2_CID_BRIGHTNESS:
- ctrl->value = priv->brightness;
- break;
+ return ret;
case V4L2_CID_EXPOSURE_AUTO:
- ctrl->value = priv->aec;
- break;
- case V4L2_CID_EXPOSURE:
- if (priv->aec) {
- ret = ov6650_reg_read(client, REG_AECH, &reg);
- ctrl->value = reg;
- } else {
- ctrl->value = priv->exposure;
- }
- break;
- case V4L2_CID_GAMMA:
- ctrl->value = priv->gamma;
- break;
- case V4L2_CID_VFLIP:
- ctrl->value = priv->vflip;
- break;
- case V4L2_CID_HFLIP:
- ctrl->value = priv->hflip;
- break;
+ ret = ov6650_reg_read(client, REG_AECH, &reg);
+ if (!ret)
+ priv->exposure->val = reg;
+ return ret;
}
- return ret;
+ return -EINVAL;
}
/* Set status of additional camera capabilities */
-static int ov6650_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
+static int ov6550_s_ctrl(struct v4l2_ctrl *ctrl)
{
+ struct ov6650 *priv = container_of(ctrl->handler, struct ov6650, hdl);
+ struct v4l2_subdev *sd = &priv->subdev;
struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct ov6650 *priv = to_ov6650(client);
- int ret = 0;
+ int ret;
switch (ctrl->id) {
case V4L2_CID_AUTOGAIN:
ret = ov6650_reg_rmw(client, REG_COMB,
- ctrl->value ? COMB_AGC : 0, COMB_AGC);
- if (!ret)
- priv->agc = ctrl->value;
- break;
- case V4L2_CID_GAIN:
- ret = ov6650_reg_write(client, REG_GAIN, ctrl->value);
- if (!ret)
- priv->gain = ctrl->value;
- break;
+ ctrl->val ? COMB_AGC : 0, COMB_AGC);
+ if (!ret && !ctrl->val)
+ ret = ov6650_reg_write(client, REG_GAIN, priv->gain->val);
+ return ret;
case V4L2_CID_AUTO_WHITE_BALANCE:
ret = ov6650_reg_rmw(client, REG_COMB,
- ctrl->value ? COMB_AWB : 0, COMB_AWB);
- if (!ret)
- priv->awb = ctrl->value;
- break;
- case V4L2_CID_BLUE_BALANCE:
- ret = ov6650_reg_write(client, REG_BLUE, ctrl->value);
- if (!ret)
- priv->blue = ctrl->value;
- break;
- case V4L2_CID_RED_BALANCE:
- ret = ov6650_reg_write(client, REG_RED, ctrl->value);
- if (!ret)
- priv->red = ctrl->value;
- break;
+ ctrl->val ? COMB_AWB : 0, COMB_AWB);
+ if (!ret && !ctrl->val) {
+ ret = ov6650_reg_write(client, REG_BLUE, priv->blue->val);
+ if (!ret)
+ ret = ov6650_reg_write(client, REG_RED,
+ priv->red->val);
+ }
+ return ret;
case V4L2_CID_SATURATION:
- ret = ov6650_reg_rmw(client, REG_SAT, SET_SAT(ctrl->value),
+ return ov6650_reg_rmw(client, REG_SAT, SET_SAT(ctrl->val),
SAT_MASK);
- if (!ret)
- priv->saturation = ctrl->value;
- break;
case V4L2_CID_HUE:
- ret = ov6650_reg_rmw(client, REG_HUE, SET_HUE(ctrl->value),
+ return ov6650_reg_rmw(client, REG_HUE, SET_HUE(ctrl->val),
HUE_MASK);
- if (!ret)
- priv->hue = ctrl->value;
- break;
case V4L2_CID_BRIGHTNESS:
- ret = ov6650_reg_write(client, REG_BRT, ctrl->value);
- if (!ret)
- priv->brightness = ctrl->value;
- break;
+ return ov6650_reg_write(client, REG_BRT, ctrl->val);
case V4L2_CID_EXPOSURE_AUTO:
- switch (ctrl->value) {
- case V4L2_EXPOSURE_AUTO:
- ret = ov6650_reg_rmw(client, REG_COMB, COMB_AEC, 0);
- break;
- default:
- ret = ov6650_reg_rmw(client, REG_COMB, 0, COMB_AEC);
- break;
- }
- if (!ret)
- priv->aec = ctrl->value;
- break;
- case V4L2_CID_EXPOSURE:
- ret = ov6650_reg_write(client, REG_AECH, ctrl->value);
- if (!ret)
- priv->exposure = ctrl->value;
- break;
+ ret = ov6650_reg_rmw(client, REG_COMB, ctrl->val ==
+ V4L2_EXPOSURE_AUTO ? COMB_AEC : 0, COMB_AEC);
+ if (!ret && ctrl->val == V4L2_EXPOSURE_MANUAL)
+ ret = ov6650_reg_write(client, REG_AECH,
+ priv->exposure->val);
+ return ret;
case V4L2_CID_GAMMA:
- ret = ov6650_reg_write(client, REG_GAM1, ctrl->value);
- if (!ret)
- priv->gamma = ctrl->value;
- break;
+ return ov6650_reg_write(client, REG_GAM1, ctrl->val);
case V4L2_CID_VFLIP:
- ret = ov6650_reg_rmw(client, REG_COMB,
- ctrl->value ? COMB_FLIP_V : 0, COMB_FLIP_V);
- if (!ret)
- priv->vflip = ctrl->value;
- break;
+ return ov6650_reg_rmw(client, REG_COMB,
+ ctrl->val ? COMB_FLIP_V : 0, COMB_FLIP_V);
case V4L2_CID_HFLIP:
- ret = ov6650_reg_rmw(client, REG_COMB,
- ctrl->value ? COMB_FLIP_H : 0, COMB_FLIP_H);
- if (!ret)
- priv->hflip = ctrl->value;
- break;
+ return ov6650_reg_rmw(client, REG_COMB,
+ ctrl->val ? COMB_FLIP_H : 0, COMB_FLIP_H);
}
- return ret;
+ return -EINVAL;
}
/* Get chip identification */
@@ -778,7 +540,7 @@ static u8 to_clkrc(struct v4l2_fract *timeperframe,
static int ov6650_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_device *icd = client->dev.platform_data;
+ struct soc_camera_device *icd = (struct soc_camera_device *)sd->grp_id;
struct soc_camera_sense *sense = icd->sense;
struct ov6650 *priv = to_ov6650(client);
bool half_scale = !is_unscaled_ok(mf->width, mf->height, &priv->rect);
@@ -1057,8 +819,7 @@ static int ov6650_prog_dflt(struct i2c_client *client)
return ret;
}
-static int ov6650_video_probe(struct soc_camera_device *icd,
- struct i2c_client *client)
+static int ov6650_video_probe(struct i2c_client *client)
{
u8 pidh, pidl, midh, midl;
int ret = 0;
@@ -1094,16 +855,12 @@ static int ov6650_video_probe(struct soc_camera_device *icd,
return ret;
}
-static struct soc_camera_ops ov6650_ops = {
- .set_bus_param = ov6650_set_bus_param,
- .query_bus_param = ov6650_query_bus_param,
- .controls = ov6650_controls,
- .num_controls = ARRAY_SIZE(ov6650_controls),
+static const struct v4l2_ctrl_ops ov6550_ctrl_ops = {
+ .g_volatile_ctrl = ov6550_g_volatile_ctrl,
+ .s_ctrl = ov6550_s_ctrl,
};
static struct v4l2_subdev_core_ops ov6650_core_ops = {
- .g_ctrl = ov6650_g_ctrl,
- .s_ctrl = ov6650_s_ctrl,
.g_chip_ident = ov6650_g_chip_ident,
#ifdef CONFIG_VIDEO_ADV_DEBUG
.g_register = ov6650_get_register,
@@ -1111,6 +868,55 @@ static struct v4l2_subdev_core_ops ov6650_core_ops = {
#endif
};
+/* Request bus settings on camera side */
+static int ov6650_g_mbus_config(struct v4l2_subdev *sd,
+ struct v4l2_mbus_config *cfg)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+
+ cfg->flags = V4L2_MBUS_MASTER |
+ V4L2_MBUS_PCLK_SAMPLE_RISING | V4L2_MBUS_PCLK_SAMPLE_FALLING |
+ V4L2_MBUS_HSYNC_ACTIVE_HIGH | V4L2_MBUS_HSYNC_ACTIVE_LOW |
+ V4L2_MBUS_VSYNC_ACTIVE_HIGH | V4L2_MBUS_VSYNC_ACTIVE_LOW |
+ V4L2_MBUS_DATA_ACTIVE_HIGH;
+ cfg->type = V4L2_MBUS_PARALLEL;
+ cfg->flags = soc_camera_apply_board_flags(icl, cfg);
+
+ return 0;
+}
+
+/* Alter bus settings on camera side */
+static int ov6650_s_mbus_config(struct v4l2_subdev *sd,
+ const struct v4l2_mbus_config *cfg)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ unsigned long flags = soc_camera_apply_board_flags(icl, cfg);
+ int ret;
+
+ if (flags & V4L2_MBUS_PCLK_SAMPLE_RISING)
+ ret = ov6650_reg_rmw(client, REG_COMJ, COMJ_PCLK_RISING, 0);
+ else
+ ret = ov6650_reg_rmw(client, REG_COMJ, 0, COMJ_PCLK_RISING);
+ if (ret)
+ return ret;
+
+ if (flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)
+ ret = ov6650_reg_rmw(client, REG_COMF, COMF_HREF_LOW, 0);
+ else
+ ret = ov6650_reg_rmw(client, REG_COMF, 0, COMF_HREF_LOW);
+ if (ret)
+ return ret;
+
+ if (flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH)
+ ret = ov6650_reg_rmw(client, REG_COMJ, COMJ_VSYNC_HIGH, 0);
+ else
+ ret = ov6650_reg_rmw(client, REG_COMJ, 0, COMJ_VSYNC_HIGH);
+
+ return ret;
+}
+
static struct v4l2_subdev_video_ops ov6650_video_ops = {
.s_stream = ov6650_s_stream,
.g_mbus_fmt = ov6650_g_fmt,
@@ -1122,6 +928,8 @@ static struct v4l2_subdev_video_ops ov6650_video_ops = {
.s_crop = ov6650_s_crop,
.g_parm = ov6650_g_parm,
.s_parm = ov6650_s_parm,
+ .g_mbus_config = ov6650_g_mbus_config,
+ .s_mbus_config = ov6650_s_mbus_config,
};
static struct v4l2_subdev_ops ov6650_subdev_ops = {
@@ -1136,16 +944,9 @@ static int ov6650_probe(struct i2c_client *client,
const struct i2c_device_id *did)
{
struct ov6650 *priv;
- struct soc_camera_device *icd = client->dev.platform_data;
- struct soc_camera_link *icl;
+ struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
int ret;
- if (!icd) {
- dev_err(&client->dev, "Missing soc-camera data!\n");
- return -EINVAL;
- }
-
- icl = to_soc_camera_link(icd);
if (!icl) {
dev_err(&client->dev, "Missing platform_data for driver\n");
return -EINVAL;
@@ -1159,8 +960,46 @@ static int ov6650_probe(struct i2c_client *client,
}
v4l2_i2c_subdev_init(&priv->subdev, client, &ov6650_subdev_ops);
+ v4l2_ctrl_handler_init(&priv->hdl, 13);
+ v4l2_ctrl_new_std(&priv->hdl, &ov6550_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+ v4l2_ctrl_new_std(&priv->hdl, &ov6550_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+ priv->autogain = v4l2_ctrl_new_std(&priv->hdl, &ov6550_ctrl_ops,
+ V4L2_CID_AUTOGAIN, 0, 1, 1, 1);
+ priv->gain = v4l2_ctrl_new_std(&priv->hdl, &ov6550_ctrl_ops,
+ V4L2_CID_GAIN, 0, 0x3f, 1, DEF_GAIN);
+ priv->autowb = v4l2_ctrl_new_std(&priv->hdl, &ov6550_ctrl_ops,
+ V4L2_CID_AUTO_WHITE_BALANCE, 0, 1, 1, 1);
+ priv->blue = v4l2_ctrl_new_std(&priv->hdl, &ov6550_ctrl_ops,
+ V4L2_CID_BLUE_BALANCE, 0, 0xff, 1, DEF_BLUE);
+ priv->red = v4l2_ctrl_new_std(&priv->hdl, &ov6550_ctrl_ops,
+ V4L2_CID_RED_BALANCE, 0, 0xff, 1, DEF_RED);
+ v4l2_ctrl_new_std(&priv->hdl, &ov6550_ctrl_ops,
+ V4L2_CID_SATURATION, 0, 0xf, 1, 0x8);
+ v4l2_ctrl_new_std(&priv->hdl, &ov6550_ctrl_ops,
+ V4L2_CID_HUE, 0, HUE_MASK, 1, DEF_HUE);
+ v4l2_ctrl_new_std(&priv->hdl, &ov6550_ctrl_ops,
+ V4L2_CID_BRIGHTNESS, 0, 0xff, 1, 0x80);
+ priv->autoexposure = v4l2_ctrl_new_std_menu(&priv->hdl,
+ &ov6550_ctrl_ops, V4L2_CID_EXPOSURE_AUTO,
+ V4L2_EXPOSURE_MANUAL, 0, V4L2_EXPOSURE_AUTO);
+ priv->exposure = v4l2_ctrl_new_std(&priv->hdl, &ov6550_ctrl_ops,
+ V4L2_CID_EXPOSURE, 0, 0xff, 1, DEF_AECH);
+ v4l2_ctrl_new_std(&priv->hdl, &ov6550_ctrl_ops,
+ V4L2_CID_GAMMA, 0, 0xff, 1, 0x12);
+
+ priv->subdev.ctrl_handler = &priv->hdl;
+ if (priv->hdl.error) {
+ int err = priv->hdl.error;
- icd->ops = &ov6650_ops;
+ kfree(priv);
+ return err;
+ }
+ v4l2_ctrl_auto_cluster(2, &priv->autogain, 0, true);
+ v4l2_ctrl_auto_cluster(3, &priv->autowb, 0, true);
+ v4l2_ctrl_auto_cluster(2, &priv->autoexposure,
+ V4L2_EXPOSURE_MANUAL, true);
priv->rect.left = DEF_HSTRT << 1;
priv->rect.top = DEF_VSTRT << 1;
@@ -1170,10 +1009,12 @@ static int ov6650_probe(struct i2c_client *client,
priv->code = V4L2_MBUS_FMT_YUYV8_2X8;
priv->colorspace = V4L2_COLORSPACE_JPEG;
- ret = ov6650_video_probe(icd, client);
+ ret = ov6650_video_probe(client);
+ if (!ret)
+ ret = v4l2_ctrl_handler_setup(&priv->hdl);
if (ret) {
- icd->ops = NULL;
+ v4l2_ctrl_handler_free(&priv->hdl);
kfree(priv);
}
@@ -1184,6 +1025,8 @@ static int ov6650_remove(struct i2c_client *client)
{
struct ov6650 *priv = to_ov6650(client);
+ v4l2_device_unregister_subdev(&priv->subdev);
+ v4l2_ctrl_handler_free(&priv->hdl);
kfree(priv);
return 0;
}
diff --git a/drivers/media/video/ov772x.c b/drivers/media/video/ov772x.c
index 397870f076c..9f6ce3d8a29 100644
--- a/drivers/media/video/ov772x.c
+++ b/drivers/media/video/ov772x.c
@@ -20,12 +20,14 @@
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/delay.h>
+#include <linux/v4l2-mediabus.h>
#include <linux/videodev2.h>
+
+#include <media/ov772x.h>
+#include <media/soc_camera.h>
+#include <media/v4l2-ctrls.h>
#include <media/v4l2-chip-ident.h>
#include <media/v4l2-subdev.h>
-#include <media/soc_camera.h>
-#include <media/soc_mediabus.h>
-#include <media/ov772x.h>
/*
* register offset
@@ -400,6 +402,7 @@ struct ov772x_win_size {
struct ov772x_priv {
struct v4l2_subdev subdev;
+ struct v4l2_ctrl_handler hdl;
struct ov772x_camera_info *info;
const struct ov772x_color_format *cfmt;
const struct ov772x_win_size *win;
@@ -517,36 +520,6 @@ static const struct ov772x_win_size ov772x_win_qvga = {
.regs = ov772x_qvga_regs,
};
-static const struct v4l2_queryctrl ov772x_controls[] = {
- {
- .id = V4L2_CID_VFLIP,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Flip Vertically",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 0,
- },
- {
- .id = V4L2_CID_HFLIP,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Flip Horizontally",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 0,
- },
- {
- .id = V4L2_CID_BAND_STOP_FILTER,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Band-stop filter",
- .minimum = 0,
- .maximum = 256,
- .step = 1,
- .default_value = 0,
- },
-};
-
/*
* general function
*/
@@ -620,75 +593,30 @@ static int ov772x_s_stream(struct v4l2_subdev *sd, int enable)
return 0;
}
-static int ov772x_set_bus_param(struct soc_camera_device *icd,
- unsigned long flags)
-{
- return 0;
-}
-
-static unsigned long ov772x_query_bus_param(struct soc_camera_device *icd)
-{
- struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd));
- struct ov772x_priv *priv = i2c_get_clientdata(client);
- struct soc_camera_link *icl = to_soc_camera_link(icd);
- unsigned long flags = SOCAM_PCLK_SAMPLE_RISING | SOCAM_MASTER |
- SOCAM_VSYNC_ACTIVE_HIGH | SOCAM_HSYNC_ACTIVE_HIGH |
- SOCAM_DATA_ACTIVE_HIGH;
-
- if (priv->info->flags & OV772X_FLAG_8BIT)
- flags |= SOCAM_DATAWIDTH_8;
- else
- flags |= SOCAM_DATAWIDTH_10;
-
- return soc_camera_apply_sensor_flags(icl, flags);
-}
-
-static int ov772x_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
-{
- struct ov772x_priv *priv = container_of(sd, struct ov772x_priv, subdev);
-
- switch (ctrl->id) {
- case V4L2_CID_VFLIP:
- ctrl->value = priv->flag_vflip;
- break;
- case V4L2_CID_HFLIP:
- ctrl->value = priv->flag_hflip;
- break;
- case V4L2_CID_BAND_STOP_FILTER:
- ctrl->value = priv->band_filter;
- break;
- }
- return 0;
-}
-
-static int ov772x_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
+static int ov772x_s_ctrl(struct v4l2_ctrl *ctrl)
{
+ struct ov772x_priv *priv = container_of(ctrl->handler,
+ struct ov772x_priv, hdl);
+ struct v4l2_subdev *sd = &priv->subdev;
struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct ov772x_priv *priv = container_of(sd, struct ov772x_priv, subdev);
int ret = 0;
u8 val;
switch (ctrl->id) {
case V4L2_CID_VFLIP:
- val = ctrl->value ? VFLIP_IMG : 0x00;
- priv->flag_vflip = ctrl->value;
+ val = ctrl->val ? VFLIP_IMG : 0x00;
+ priv->flag_vflip = ctrl->val;
if (priv->info->flags & OV772X_FLAG_VFLIP)
val ^= VFLIP_IMG;
- ret = ov772x_mask_set(client, COM3, VFLIP_IMG, val);
- break;
+ return ov772x_mask_set(client, COM3, VFLIP_IMG, val);
case V4L2_CID_HFLIP:
- val = ctrl->value ? HFLIP_IMG : 0x00;
- priv->flag_hflip = ctrl->value;
+ val = ctrl->val ? HFLIP_IMG : 0x00;
+ priv->flag_hflip = ctrl->val;
if (priv->info->flags & OV772X_FLAG_HFLIP)
val ^= HFLIP_IMG;
- ret = ov772x_mask_set(client, COM3, HFLIP_IMG, val);
- break;
+ return ov772x_mask_set(client, COM3, HFLIP_IMG, val);
case V4L2_CID_BAND_STOP_FILTER:
- if ((unsigned)ctrl->value > 256)
- ctrl->value = 256;
- if (ctrl->value == priv->band_filter)
- break;
- if (!ctrl->value) {
+ if (!ctrl->val) {
/* Switch the filter off, it is on now */
ret = ov772x_mask_set(client, BDBASE, 0xff, 0xff);
if (!ret)
@@ -696,7 +624,7 @@ static int ov772x_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
BNDF_ON_OFF, 0);
} else {
/* Switch the filter on, set AEC low limit */
- val = 256 - ctrl->value;
+ val = 256 - ctrl->val;
ret = ov772x_mask_set(client, COM8,
BNDF_ON_OFF, BNDF_ON_OFF);
if (!ret)
@@ -704,11 +632,11 @@ static int ov772x_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
0xff, val);
}
if (!ret)
- priv->band_filter = ctrl->value;
- break;
+ priv->band_filter = ctrl->val;
+ return ret;
}
- return ret;
+ return -EINVAL;
}
static int ov772x_g_chip_ident(struct v4l2_subdev *sd,
@@ -822,13 +750,13 @@ static int ov772x_set_params(struct i2c_client *client, u32 *width, u32 *height,
goto ov772x_set_fmt_error;
ret = ov772x_mask_set(client,
- EDGE_TRSHLD, EDGE_THRESHOLD_MASK,
+ EDGE_TRSHLD, OV772X_EDGE_THRESHOLD_MASK,
priv->info->edgectrl.threshold);
if (ret < 0)
goto ov772x_set_fmt_error;
ret = ov772x_mask_set(client,
- EDGE_STRNGT, EDGE_STRENGTH_MASK,
+ EDGE_STRNGT, OV772X_EDGE_STRENGTH_MASK,
priv->info->edgectrl.strength);
if (ret < 0)
goto ov772x_set_fmt_error;
@@ -840,13 +768,13 @@ static int ov772x_set_params(struct i2c_client *client, u32 *width, u32 *height,
* set upper and lower limit
*/
ret = ov772x_mask_set(client,
- EDGE_UPPER, EDGE_UPPER_MASK,
+ EDGE_UPPER, OV772X_EDGE_UPPER_MASK,
priv->info->edgectrl.upper);
if (ret < 0)
goto ov772x_set_fmt_error;
ret = ov772x_mask_set(client,
- EDGE_LOWER, EDGE_LOWER_MASK,
+ EDGE_LOWER, OV772X_EDGE_LOWER_MASK,
priv->info->edgectrl.lower);
if (ret < 0)
goto ov772x_set_fmt_error;
@@ -1025,17 +953,12 @@ static int ov772x_try_fmt(struct v4l2_subdev *sd,
return 0;
}
-static int ov772x_video_probe(struct soc_camera_device *icd,
- struct i2c_client *client)
+static int ov772x_video_probe(struct i2c_client *client)
{
struct ov772x_priv *priv = to_ov772x(client);
u8 pid, ver;
const char *devname;
- /* We must have a parent by now. And it cannot be a wrong one. */
- BUG_ON(!icd->parent ||
- to_soc_camera_host(icd->parent)->nr != icd->iface);
-
/*
* check and show product ID and manufacturer ID
*/
@@ -1064,20 +987,14 @@ static int ov772x_video_probe(struct soc_camera_device *icd,
ver,
i2c_smbus_read_byte_data(client, MIDH),
i2c_smbus_read_byte_data(client, MIDL));
-
- return 0;
+ return v4l2_ctrl_handler_setup(&priv->hdl);
}
-static struct soc_camera_ops ov772x_ops = {
- .set_bus_param = ov772x_set_bus_param,
- .query_bus_param = ov772x_query_bus_param,
- .controls = ov772x_controls,
- .num_controls = ARRAY_SIZE(ov772x_controls),
+static const struct v4l2_ctrl_ops ov772x_ctrl_ops = {
+ .s_ctrl = ov772x_s_ctrl,
};
static struct v4l2_subdev_core_ops ov772x_subdev_core_ops = {
- .g_ctrl = ov772x_g_ctrl,
- .s_ctrl = ov772x_s_ctrl,
.g_chip_ident = ov772x_g_chip_ident,
#ifdef CONFIG_VIDEO_ADV_DEBUG
.g_register = ov772x_g_register,
@@ -1095,6 +1012,21 @@ static int ov772x_enum_fmt(struct v4l2_subdev *sd, unsigned int index,
return 0;
}
+static int ov772x_g_mbus_config(struct v4l2_subdev *sd,
+ struct v4l2_mbus_config *cfg)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+
+ cfg->flags = V4L2_MBUS_PCLK_SAMPLE_RISING | V4L2_MBUS_MASTER |
+ V4L2_MBUS_VSYNC_ACTIVE_HIGH | V4L2_MBUS_HSYNC_ACTIVE_HIGH |
+ V4L2_MBUS_DATA_ACTIVE_HIGH;
+ cfg->type = V4L2_MBUS_PARALLEL;
+ cfg->flags = soc_camera_apply_board_flags(icl, cfg);
+
+ return 0;
+}
+
static struct v4l2_subdev_video_ops ov772x_subdev_video_ops = {
.s_stream = ov772x_s_stream,
.g_mbus_fmt = ov772x_g_fmt,
@@ -1103,6 +1035,7 @@ static struct v4l2_subdev_video_ops ov772x_subdev_video_ops = {
.cropcap = ov772x_cropcap,
.g_crop = ov772x_g_crop,
.enum_mbus_fmt = ov772x_enum_fmt,
+ .g_mbus_config = ov772x_g_mbus_config,
};
static struct v4l2_subdev_ops ov772x_subdev_ops = {
@@ -1117,20 +1050,15 @@ static struct v4l2_subdev_ops ov772x_subdev_ops = {
static int ov772x_probe(struct i2c_client *client,
const struct i2c_device_id *did)
{
- struct ov772x_priv *priv;
- struct soc_camera_device *icd = client->dev.platform_data;
- struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
- struct soc_camera_link *icl;
- int ret;
-
- if (!icd) {
- dev_err(&client->dev, "OV772X: missing soc-camera data!\n");
- return -EINVAL;
- }
+ struct ov772x_priv *priv;
+ struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
+ int ret;
- icl = to_soc_camera_link(icd);
- if (!icl || !icl->priv)
+ if (!icl || !icl->priv) {
+ dev_err(&client->dev, "OV772X: missing platform data!\n");
return -EINVAL;
+ }
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
dev_err(&adapter->dev,
@@ -1146,12 +1074,24 @@ static int ov772x_probe(struct i2c_client *client,
priv->info = icl->priv;
v4l2_i2c_subdev_init(&priv->subdev, client, &ov772x_subdev_ops);
+ v4l2_ctrl_handler_init(&priv->hdl, 3);
+ v4l2_ctrl_new_std(&priv->hdl, &ov772x_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+ v4l2_ctrl_new_std(&priv->hdl, &ov772x_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+ v4l2_ctrl_new_std(&priv->hdl, &ov772x_ctrl_ops,
+ V4L2_CID_BAND_STOP_FILTER, 0, 256, 1, 0);
+ priv->subdev.ctrl_handler = &priv->hdl;
+ if (priv->hdl.error) {
+ int err = priv->hdl.error;
- icd->ops = &ov772x_ops;
+ kfree(priv);
+ return err;
+ }
- ret = ov772x_video_probe(icd, client);
+ ret = ov772x_video_probe(client);
if (ret) {
- icd->ops = NULL;
+ v4l2_ctrl_handler_free(&priv->hdl);
kfree(priv);
}
@@ -1161,9 +1101,9 @@ static int ov772x_probe(struct i2c_client *client,
static int ov772x_remove(struct i2c_client *client)
{
struct ov772x_priv *priv = to_ov772x(client);
- struct soc_camera_device *icd = client->dev.platform_data;
- icd->ops = NULL;
+ v4l2_device_unregister_subdev(&priv->subdev);
+ v4l2_ctrl_handler_free(&priv->hdl);
kfree(priv);
return 0;
}
diff --git a/drivers/media/video/ov9640.c b/drivers/media/video/ov9640.c
index 3681a6ff081..a4f99797eb5 100644
--- a/drivers/media/video/ov9640.c
+++ b/drivers/media/video/ov9640.c
@@ -24,10 +24,13 @@
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/delay.h>
+#include <linux/v4l2-mediabus.h>
#include <linux/videodev2.h>
+
+#include <media/soc_camera.h>
#include <media/v4l2-chip-ident.h>
#include <media/v4l2-common.h>
-#include <media/soc_camera.h>
+#include <media/v4l2-ctrls.h>
#include "ov9640.h"
@@ -162,27 +165,6 @@ static enum v4l2_mbus_pixelcode ov9640_codes[] = {
V4L2_MBUS_FMT_RGB565_2X8_LE,
};
-static const struct v4l2_queryctrl ov9640_controls[] = {
- {
- .id = V4L2_CID_VFLIP,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Flip Vertically",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 0,
- },
- {
- .id = V4L2_CID_HFLIP,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Flip Horizontally",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 0,
- },
-};
-
/* read a register */
static int ov9640_reg_read(struct i2c_client *client, u8 reg, u8 *val)
{
@@ -284,75 +266,25 @@ static int ov9640_s_stream(struct v4l2_subdev *sd, int enable)
return 0;
}
-/* Alter bus settings on camera side */
-static int ov9640_set_bus_param(struct soc_camera_device *icd,
- unsigned long flags)
-{
- return 0;
-}
-
-/* Request bus settings on camera side */
-static unsigned long ov9640_query_bus_param(struct soc_camera_device *icd)
-{
- struct soc_camera_link *icl = to_soc_camera_link(icd);
-
- /*
- * REVISIT: the camera probably can do 10 bit transfers, but I don't
- * have those pins connected on my hardware.
- */
- unsigned long flags = SOCAM_PCLK_SAMPLE_RISING | SOCAM_MASTER |
- SOCAM_VSYNC_ACTIVE_HIGH | SOCAM_HSYNC_ACTIVE_HIGH |
- SOCAM_DATA_ACTIVE_HIGH | SOCAM_DATAWIDTH_8;
-
- return soc_camera_apply_sensor_flags(icl, flags);
-}
-
-/* Get status of additional camera capabilities */
-static int ov9640_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
-{
- struct ov9640_priv *priv = to_ov9640_sensor(sd);
-
- switch (ctrl->id) {
- case V4L2_CID_VFLIP:
- ctrl->value = priv->flag_vflip;
- break;
- case V4L2_CID_HFLIP:
- ctrl->value = priv->flag_hflip;
- break;
- }
- return 0;
-}
-
/* Set status of additional camera capabilities */
-static int ov9640_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
+static int ov9640_s_ctrl(struct v4l2_ctrl *ctrl)
{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct ov9640_priv *priv = to_ov9640_sensor(sd);
-
- int ret = 0;
+ struct ov9640_priv *priv = container_of(ctrl->handler, struct ov9640_priv, hdl);
+ struct i2c_client *client = v4l2_get_subdevdata(&priv->subdev);
switch (ctrl->id) {
case V4L2_CID_VFLIP:
- priv->flag_vflip = ctrl->value;
- if (ctrl->value)
- ret = ov9640_reg_rmw(client, OV9640_MVFP,
+ if (ctrl->val)
+ return ov9640_reg_rmw(client, OV9640_MVFP,
OV9640_MVFP_V, 0);
- else
- ret = ov9640_reg_rmw(client, OV9640_MVFP,
- 0, OV9640_MVFP_V);
- break;
+ return ov9640_reg_rmw(client, OV9640_MVFP, 0, OV9640_MVFP_V);
case V4L2_CID_HFLIP:
- priv->flag_hflip = ctrl->value;
- if (ctrl->value)
- ret = ov9640_reg_rmw(client, OV9640_MVFP,
+ if (ctrl->val)
+ return ov9640_reg_rmw(client, OV9640_MVFP,
OV9640_MVFP_H, 0);
- else
- ret = ov9640_reg_rmw(client, OV9640_MVFP,
- 0, OV9640_MVFP_H);
- break;
+ return ov9640_reg_rmw(client, OV9640_MVFP, 0, OV9640_MVFP_H);
}
-
- return ret;
+ return -EINVAL;
}
/* Get chip identification */
@@ -646,10 +578,7 @@ static int ov9640_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
return 0;
}
-
-
-static int ov9640_video_probe(struct soc_camera_device *icd,
- struct i2c_client *client)
+static int ov9640_video_probe(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct ov9640_priv *priv = to_ov9640_sensor(sd);
@@ -657,29 +586,19 @@ static int ov9640_video_probe(struct soc_camera_device *icd,
const char *devname;
int ret = 0;
- /* We must have a parent by now. And it cannot be a wrong one. */
- BUG_ON(!icd->parent ||
- to_soc_camera_host(icd->parent)->nr != icd->iface);
-
/*
* check and show product ID and manufacturer ID
*/
ret = ov9640_reg_read(client, OV9640_PID, &pid);
+ if (!ret)
+ ret = ov9640_reg_read(client, OV9640_VER, &ver);
+ if (!ret)
+ ret = ov9640_reg_read(client, OV9640_MIDH, &midh);
+ if (!ret)
+ ret = ov9640_reg_read(client, OV9640_MIDL, &midl);
if (ret)
- goto err;
-
- ret = ov9640_reg_read(client, OV9640_VER, &ver);
- if (ret)
- goto err;
-
- ret = ov9640_reg_read(client, OV9640_MIDH, &midh);
- if (ret)
- goto err;
-
- ret = ov9640_reg_read(client, OV9640_MIDL, &midl);
- if (ret)
- goto err;
+ return ret;
switch (VERSION(pid, ver)) {
case OV9640_V2:
@@ -693,27 +612,20 @@ static int ov9640_video_probe(struct soc_camera_device *icd,
break;
default:
dev_err(&client->dev, "Product ID error %x:%x\n", pid, ver);
- ret = -ENODEV;
- goto err;
+ return -ENODEV;
}
dev_info(&client->dev, "%s Product ID %0x:%0x Manufacturer ID %x:%x\n",
devname, pid, ver, midh, midl);
-err:
- return ret;
+ return v4l2_ctrl_handler_setup(&priv->hdl);
}
-static struct soc_camera_ops ov9640_ops = {
- .set_bus_param = ov9640_set_bus_param,
- .query_bus_param = ov9640_query_bus_param,
- .controls = ov9640_controls,
- .num_controls = ARRAY_SIZE(ov9640_controls),
+static const struct v4l2_ctrl_ops ov9640_ctrl_ops = {
+ .s_ctrl = ov9640_s_ctrl,
};
static struct v4l2_subdev_core_ops ov9640_core_ops = {
- .g_ctrl = ov9640_g_ctrl,
- .s_ctrl = ov9640_s_ctrl,
.g_chip_ident = ov9640_g_chip_ident,
#ifdef CONFIG_VIDEO_ADV_DEBUG
.g_register = ov9640_get_register,
@@ -722,6 +634,22 @@ static struct v4l2_subdev_core_ops ov9640_core_ops = {
};
+/* Request bus settings on camera side */
+static int ov9640_g_mbus_config(struct v4l2_subdev *sd,
+ struct v4l2_mbus_config *cfg)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+
+ cfg->flags = V4L2_MBUS_PCLK_SAMPLE_RISING | V4L2_MBUS_MASTER |
+ V4L2_MBUS_VSYNC_ACTIVE_HIGH | V4L2_MBUS_HSYNC_ACTIVE_HIGH |
+ V4L2_MBUS_DATA_ACTIVE_HIGH;
+ cfg->type = V4L2_MBUS_PARALLEL;
+ cfg->flags = soc_camera_apply_board_flags(icl, cfg);
+
+ return 0;
+}
+
static struct v4l2_subdev_video_ops ov9640_video_ops = {
.s_stream = ov9640_s_stream,
.s_mbus_fmt = ov9640_s_fmt,
@@ -729,7 +657,7 @@ static struct v4l2_subdev_video_ops ov9640_video_ops = {
.enum_mbus_fmt = ov9640_enum_fmt,
.cropcap = ov9640_cropcap,
.g_crop = ov9640_g_crop,
-
+ .g_mbus_config = ov9640_g_mbus_config,
};
static struct v4l2_subdev_ops ov9640_subdev_ops = {
@@ -744,16 +672,9 @@ static int ov9640_probe(struct i2c_client *client,
const struct i2c_device_id *did)
{
struct ov9640_priv *priv;
- struct soc_camera_device *icd = client->dev.platform_data;
- struct soc_camera_link *icl;
+ struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
int ret;
- if (!icd) {
- dev_err(&client->dev, "Missing soc-camera data!\n");
- return -EINVAL;
- }
-
- icl = to_soc_camera_link(icd);
if (!icl) {
dev_err(&client->dev, "Missing platform_data for driver\n");
return -EINVAL;
@@ -768,12 +689,23 @@ static int ov9640_probe(struct i2c_client *client,
v4l2_i2c_subdev_init(&priv->subdev, client, &ov9640_subdev_ops);
- icd->ops = &ov9640_ops;
+ v4l2_ctrl_handler_init(&priv->hdl, 2);
+ v4l2_ctrl_new_std(&priv->hdl, &ov9640_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+ v4l2_ctrl_new_std(&priv->hdl, &ov9640_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+ priv->subdev.ctrl_handler = &priv->hdl;
+ if (priv->hdl.error) {
+ int err = priv->hdl.error;
+
+ kfree(priv);
+ return err;
+ }
- ret = ov9640_video_probe(icd, client);
+ ret = ov9640_video_probe(client);
if (ret) {
- icd->ops = NULL;
+ v4l2_ctrl_handler_free(&priv->hdl);
kfree(priv);
}
@@ -785,6 +717,8 @@ static int ov9640_remove(struct i2c_client *client)
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct ov9640_priv *priv = to_ov9640_sensor(sd);
+ v4l2_device_unregister_subdev(&priv->subdev);
+ v4l2_ctrl_handler_free(&priv->hdl);
kfree(priv);
return 0;
}
diff --git a/drivers/media/video/ov9640.h b/drivers/media/video/ov9640.h
index f8a51b70792..6b33a972c83 100644
--- a/drivers/media/video/ov9640.h
+++ b/drivers/media/video/ov9640.h
@@ -198,12 +198,10 @@ struct ov9640_reg {
struct ov9640_priv {
struct v4l2_subdev subdev;
+ struct v4l2_ctrl_handler hdl;
int model;
int revision;
-
- bool flag_vflip;
- bool flag_hflip;
};
#endif /* __DRIVERS_MEDIA_VIDEO_OV9640_H__ */
diff --git a/drivers/media/video/ov9740.c b/drivers/media/video/ov9740.c
index edd1ffcca30..d9a9f7174f7 100644
--- a/drivers/media/video/ov9740.c
+++ b/drivers/media/video/ov9740.c
@@ -14,8 +14,11 @@
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/slab.h>
-#include <media/v4l2-chip-ident.h>
+#include <linux/v4l2-mediabus.h>
+
#include <media/soc_camera.h>
+#include <media/v4l2-chip-ident.h>
+#include <media/v4l2-ctrls.h>
#define to_ov9740(sd) container_of(sd, struct ov9740_priv, subdev)
@@ -192,6 +195,7 @@ struct ov9740_reg {
struct ov9740_priv {
struct v4l2_subdev subdev;
+ struct v4l2_ctrl_handler hdl;
int ident;
u16 model;
@@ -392,27 +396,6 @@ static enum v4l2_mbus_pixelcode ov9740_codes[] = {
V4L2_MBUS_FMT_YUYV8_2X8,
};
-static const struct v4l2_queryctrl ov9740_controls[] = {
- {
- .id = V4L2_CID_VFLIP,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Flip Vertically",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 0,
- },
- {
- .id = V4L2_CID_HFLIP,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Flip Horizontally",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 0,
- },
-};
-
/* read a register */
static int ov9740_reg_read(struct i2c_client *client, u16 reg, u8 *val)
{
@@ -560,25 +543,6 @@ static int ov9740_s_stream(struct v4l2_subdev *sd, int enable)
return ret;
}
-/* Alter bus settings on camera side */
-static int ov9740_set_bus_param(struct soc_camera_device *icd,
- unsigned long flags)
-{
- return 0;
-}
-
-/* Request bus settings on camera side */
-static unsigned long ov9740_query_bus_param(struct soc_camera_device *icd)
-{
- struct soc_camera_link *icl = to_soc_camera_link(icd);
-
- unsigned long flags = SOCAM_PCLK_SAMPLE_RISING | SOCAM_MASTER |
- SOCAM_VSYNC_ACTIVE_HIGH | SOCAM_HSYNC_ACTIVE_HIGH |
- SOCAM_DATA_ACTIVE_HIGH | SOCAM_DATAWIDTH_8;
-
- return soc_camera_apply_sensor_flags(icl, flags);
-}
-
/* select nearest higher resolution for capture */
static void ov9740_res_roundup(u32 *width, u32 *height)
{
@@ -788,36 +752,18 @@ static int ov9740_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
return 0;
}
-/* Get status of additional camera capabilities */
-static int ov9740_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
-{
- struct ov9740_priv *priv = to_ov9740(sd);
-
- switch (ctrl->id) {
- case V4L2_CID_VFLIP:
- ctrl->value = priv->flag_vflip;
- break;
- case V4L2_CID_HFLIP:
- ctrl->value = priv->flag_hflip;
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
/* Set status of additional camera capabilities */
-static int ov9740_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
+static int ov9740_s_ctrl(struct v4l2_ctrl *ctrl)
{
- struct ov9740_priv *priv = to_ov9740(sd);
+ struct ov9740_priv *priv =
+ container_of(ctrl->handler, struct ov9740_priv, hdl);
switch (ctrl->id) {
case V4L2_CID_VFLIP:
- priv->flag_vflip = ctrl->value;
+ priv->flag_vflip = ctrl->val;
break;
case V4L2_CID_HFLIP:
- priv->flag_hflip = ctrl->value;
+ priv->flag_hflip = ctrl->val;
break;
default:
return -EINVAL;
@@ -890,18 +836,13 @@ static int ov9740_set_register(struct v4l2_subdev *sd,
}
#endif
-static int ov9740_video_probe(struct soc_camera_device *icd,
- struct i2c_client *client)
+static int ov9740_video_probe(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct ov9740_priv *priv = to_ov9740(sd);
u8 modelhi, modello;
int ret;
- /* We must have a parent by now. And it cannot be a wrong one. */
- BUG_ON(!icd->parent ||
- to_soc_camera_host(icd->parent)->nr != icd->iface);
-
/*
* check and show product ID and manufacturer ID
*/
@@ -942,25 +883,33 @@ err:
return ret;
}
-static struct soc_camera_ops ov9740_ops = {
- .set_bus_param = ov9740_set_bus_param,
- .query_bus_param = ov9740_query_bus_param,
- .controls = ov9740_controls,
- .num_controls = ARRAY_SIZE(ov9740_controls),
-};
+/* Request bus settings on camera side */
+static int ov9740_g_mbus_config(struct v4l2_subdev *sd,
+ struct v4l2_mbus_config *cfg)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+
+ cfg->flags = V4L2_MBUS_PCLK_SAMPLE_RISING | V4L2_MBUS_MASTER |
+ V4L2_MBUS_VSYNC_ACTIVE_HIGH | V4L2_MBUS_HSYNC_ACTIVE_HIGH |
+ V4L2_MBUS_DATA_ACTIVE_HIGH;
+ cfg->type = V4L2_MBUS_PARALLEL;
+ cfg->flags = soc_camera_apply_board_flags(icl, cfg);
+
+ return 0;
+}
static struct v4l2_subdev_video_ops ov9740_video_ops = {
- .s_stream = ov9740_s_stream,
- .s_mbus_fmt = ov9740_s_fmt,
- .try_mbus_fmt = ov9740_try_fmt,
- .enum_mbus_fmt = ov9740_enum_fmt,
- .cropcap = ov9740_cropcap,
- .g_crop = ov9740_g_crop,
+ .s_stream = ov9740_s_stream,
+ .s_mbus_fmt = ov9740_s_fmt,
+ .try_mbus_fmt = ov9740_try_fmt,
+ .enum_mbus_fmt = ov9740_enum_fmt,
+ .cropcap = ov9740_cropcap,
+ .g_crop = ov9740_g_crop,
+ .g_mbus_config = ov9740_g_mbus_config,
};
static struct v4l2_subdev_core_ops ov9740_core_ops = {
- .g_ctrl = ov9740_g_ctrl,
- .s_ctrl = ov9740_s_ctrl,
.g_chip_ident = ov9740_g_chip_ident,
.s_power = ov9740_s_power,
#ifdef CONFIG_VIDEO_ADV_DEBUG
@@ -974,6 +923,10 @@ static struct v4l2_subdev_ops ov9740_subdev_ops = {
.video = &ov9740_video_ops,
};
+static const struct v4l2_ctrl_ops ov9740_ctrl_ops = {
+ .s_ctrl = ov9740_s_ctrl,
+};
+
/*
* i2c_driver function
*/
@@ -981,16 +934,9 @@ static int ov9740_probe(struct i2c_client *client,
const struct i2c_device_id *did)
{
struct ov9740_priv *priv;
- struct soc_camera_device *icd = client->dev.platform_data;
- struct soc_camera_link *icl;
+ struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
int ret;
- if (!icd) {
- dev_err(&client->dev, "Missing soc-camera data!\n");
- return -EINVAL;
- }
-
- icl = to_soc_camera_link(icd);
if (!icl) {
dev_err(&client->dev, "Missing platform_data for driver\n");
return -EINVAL;
@@ -1003,12 +949,24 @@ static int ov9740_probe(struct i2c_client *client,
}
v4l2_i2c_subdev_init(&priv->subdev, client, &ov9740_subdev_ops);
+ v4l2_ctrl_handler_init(&priv->hdl, 13);
+ v4l2_ctrl_new_std(&priv->hdl, &ov9740_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+ v4l2_ctrl_new_std(&priv->hdl, &ov9740_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+ priv->subdev.ctrl_handler = &priv->hdl;
+ if (priv->hdl.error) {
+ int err = priv->hdl.error;
- icd->ops = &ov9740_ops;
+ kfree(priv);
+ return err;
+ }
- ret = ov9740_video_probe(icd, client);
+ ret = ov9740_video_probe(client);
+ if (!ret)
+ ret = v4l2_ctrl_handler_setup(&priv->hdl);
if (ret < 0) {
- icd->ops = NULL;
+ v4l2_ctrl_handler_free(&priv->hdl);
kfree(priv);
}
@@ -1019,8 +977,9 @@ static int ov9740_remove(struct i2c_client *client)
{
struct ov9740_priv *priv = i2c_get_clientdata(client);
+ v4l2_device_unregister_subdev(&priv->subdev);
+ v4l2_ctrl_handler_free(&priv->hdl);
kfree(priv);
-
return 0;
}
diff --git a/drivers/media/video/pvrusb2/Makefile b/drivers/media/video/pvrusb2/Makefile
index de2fc14f043..c17f37d964a 100644
--- a/drivers/media/video/pvrusb2/Makefile
+++ b/drivers/media/video/pvrusb2/Makefile
@@ -16,7 +16,7 @@ pvrusb2-objs := pvrusb2-i2c-core.o \
obj-$(CONFIG_VIDEO_PVRUSB2) += pvrusb2.o
-EXTRA_CFLAGS += -Idrivers/media/video
-EXTRA_CFLAGS += -Idrivers/media/common/tuners
-EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core
-EXTRA_CFLAGS += -Idrivers/media/dvb/frontends
+ccflags-y += -Idrivers/media/video
+ccflags-y += -Idrivers/media/common/tuners
+ccflags-y += -Idrivers/media/dvb/dvb-core
+ccflags-y += -Idrivers/media/dvb/frontends
diff --git a/drivers/media/video/pvrusb2/pvrusb2-devattr.c b/drivers/media/video/pvrusb2/pvrusb2-devattr.c
index e799331389b..c6da8f77e1a 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-devattr.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-devattr.c
@@ -28,6 +28,7 @@ pvr2_device_desc structures.
#include "pvrusb2-devattr.h"
#include <linux/usb.h>
+#include <linux/module.h>
/* This is needed in order to pull in tuner type ids... */
#include <linux/i2c.h>
#include <media/tuner.h>
diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw.c b/drivers/media/video/pvrusb2/pvrusb2-hdw.c
index e98d3821279..122b45760f0 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-hdw.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-hdw.c
@@ -21,6 +21,7 @@
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include <linux/firmware.h>
#include <linux/videodev2.h>
#include <media/v4l2-common.h>
@@ -2993,6 +2994,13 @@ static void pvr2_subdev_set_control(struct pvr2_hdw *hdw, int id,
pvr2_subdev_set_control(hdw, id, #lab, (hdw)->lab##_val); \
}
+int pvr2_hdw_get_detected_std(struct pvr2_hdw *hdw, v4l2_std_id *std)
+{
+ v4l2_device_call_all(&hdw->v4l2_dev, 0,
+ video, querystd, std);
+ return 0;
+}
+
/* Execute whatever commands are required to update the state of all the
sub-devices so that they match our current control values. */
static void pvr2_subdev_update(struct pvr2_hdw *hdw)
diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw.h b/drivers/media/video/pvrusb2/pvrusb2-hdw.h
index d7753ae9ff4..66546580b17 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-hdw.h
+++ b/drivers/media/video/pvrusb2/pvrusb2-hdw.h
@@ -214,6 +214,9 @@ struct pvr2_stream *pvr2_hdw_get_video_stream(struct pvr2_hdw *);
int pvr2_hdw_get_stdenum_value(struct pvr2_hdw *hdw,struct v4l2_standard *std,
unsigned int idx);
+/* Get the detected video standard */
+int pvr2_hdw_get_detected_std(struct pvr2_hdw *hdw, v4l2_std_id *std);
+
/* Enable / disable retrieval of CPU firmware or prom contents. This must
be enabled before pvr2_hdw_cpufw_get() will function. Note that doing
this may prevent the device from running (and leaving this mode may
diff --git a/drivers/media/video/pvrusb2/pvrusb2-i2c-core.c b/drivers/media/video/pvrusb2/pvrusb2-i2c-core.c
index e72d5103e77..885ce11f222 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-i2c-core.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-i2c-core.c
@@ -19,6 +19,7 @@
*/
#include <linux/i2c.h>
+#include <linux/module.h>
#include <media/ir-kbd-i2c.h>
#include "pvrusb2-i2c-core.h"
#include "pvrusb2-hdw-internal.h"
diff --git a/drivers/media/video/pvrusb2/pvrusb2-v4l2.c b/drivers/media/video/pvrusb2/pvrusb2-v4l2.c
index e27f8ab7696..6d666174dbb 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-v4l2.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-v4l2.c
@@ -29,6 +29,7 @@
#include "pvrusb2-v4l2.h"
#include "pvrusb2-ioread.h"
#include <linux/videodev2.h>
+#include <linux/module.h>
#include <media/v4l2-dev.h>
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
@@ -227,6 +228,14 @@ static long pvr2_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
break;
}
+ case VIDIOC_QUERYSTD:
+ {
+ v4l2_std_id *std = arg;
+ *std = V4L2_STD_ALL;
+ ret = pvr2_hdw_get_detected_std(hdw, std);
+ break;
+ }
+
case VIDIOC_G_STD:
{
int val = 0;
diff --git a/drivers/media/video/pwc/pwc-if.c b/drivers/media/video/pwc/pwc-if.c
index 51ca3589b1b..01ff643e682 100644
--- a/drivers/media/video/pwc/pwc-if.c
+++ b/drivers/media/video/pwc/pwc-if.c
@@ -744,9 +744,9 @@ static int pwc_video_mmap(struct file *file, struct vm_area_struct *vma)
/***************************************************************************/
/* Videobuf2 operations */
-static int queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
- unsigned int *nplanes, unsigned long sizes[],
- void *alloc_ctxs[])
+static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], void *alloc_ctxs[])
{
struct pwc_device *pdev = vb2_get_drv_priv(vq);
@@ -816,7 +816,7 @@ static void buffer_queue(struct vb2_buffer *vb)
spin_unlock_irqrestore(&pdev->queued_bufs_lock, flags);
}
-static int start_streaming(struct vb2_queue *vq)
+static int start_streaming(struct vb2_queue *vq, unsigned int count)
{
struct pwc_device *pdev = vb2_get_drv_priv(vq);
diff --git a/drivers/media/video/pwc/pwc-v4l.c b/drivers/media/video/pwc/pwc-v4l.c
index 8c70e64444e..a10ff6b64ac 100644
--- a/drivers/media/video/pwc/pwc-v4l.c
+++ b/drivers/media/video/pwc/pwc-v4l.c
@@ -83,6 +83,7 @@ static const struct v4l2_ctrl_config pwc_contour_cfg = {
.id = PWC_CID_CUSTOM(contour),
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Contour",
+ .flags = V4L2_CTRL_FLAG_SLIDER,
.min = 0,
.max = 63,
.step = 1,
@@ -206,8 +207,7 @@ int pwc_init_controls(struct pwc_device *pdev)
pdev->blue_balance = v4l2_ctrl_new_std(hdl, &pwc_ctrl_ops,
V4L2_CID_BLUE_BALANCE, 0, 255, 1, def);
- v4l2_ctrl_auto_cluster(3, &pdev->auto_white_balance, awb_manual,
- pdev->auto_white_balance->cur.val == awb_auto);
+ v4l2_ctrl_auto_cluster(3, &pdev->auto_white_balance, awb_manual, true);
/* autogain, gain */
r = pwc_get_u8_ctrl(pdev, GET_LUM_CTL, AGC_MODE_FORMATTER, &def);
@@ -331,12 +331,12 @@ int pwc_init_controls(struct pwc_device *pdev)
pdev->restore_user = v4l2_ctrl_new_custom(hdl, &pwc_restore_user_cfg,
NULL);
if (pdev->restore_user)
- pdev->restore_user->flags = V4L2_CTRL_FLAG_UPDATE;
+ pdev->restore_user->flags |= V4L2_CTRL_FLAG_UPDATE;
pdev->restore_factory = v4l2_ctrl_new_custom(hdl,
&pwc_restore_factory_cfg,
NULL);
if (pdev->restore_factory)
- pdev->restore_factory->flags = V4L2_CTRL_FLAG_UPDATE;
+ pdev->restore_factory->flags |= V4L2_CTRL_FLAG_UPDATE;
if (!(pdev->features & FEATURE_MOTOR_PANTILT))
return hdl->error;
@@ -563,8 +563,10 @@ static int pwc_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
switch (ctrl->id) {
case V4L2_CID_AUTO_WHITE_BALANCE:
- if (pdev->color_bal_valid && time_before(jiffies,
- pdev->last_color_bal_update + HZ / 4)) {
+ if (pdev->color_bal_valid &&
+ (pdev->auto_white_balance->val != awb_auto ||
+ time_before(jiffies,
+ pdev->last_color_bal_update + HZ / 4))) {
pdev->red_balance->val = pdev->last_red_balance;
pdev->blue_balance->val = pdev->last_blue_balance;
break;
@@ -630,7 +632,7 @@ leave:
static int pwc_set_awb(struct pwc_device *pdev)
{
- int ret = 0;
+ int ret;
if (pdev->auto_white_balance->is_new) {
ret = pwc_set_u8_ctrl(pdev, SET_CHROM_CTL,
@@ -639,52 +641,34 @@ static int pwc_set_awb(struct pwc_device *pdev)
if (ret)
return ret;
- /* Update val when coming from auto or going to a preset */
- if (pdev->red_balance->is_volatile ||
- pdev->auto_white_balance->val == awb_indoor ||
- pdev->auto_white_balance->val == awb_outdoor ||
- pdev->auto_white_balance->val == awb_fl) {
- if (!pdev->red_balance->is_new)
- pwc_get_u8_ctrl(pdev, GET_STATUS_CTL,
- READ_RED_GAIN_FORMATTER,
- &pdev->red_balance->val);
- if (!pdev->blue_balance->is_new)
- pwc_get_u8_ctrl(pdev, GET_STATUS_CTL,
- READ_BLUE_GAIN_FORMATTER,
- &pdev->blue_balance->val);
- }
- if (pdev->auto_white_balance->val == awb_auto) {
- pdev->red_balance->is_volatile = true;
- pdev->blue_balance->is_volatile = true;
+ if (pdev->auto_white_balance->val != awb_manual)
pdev->color_bal_valid = false; /* Force cache update */
- } else {
- pdev->red_balance->is_volatile = false;
- pdev->blue_balance->is_volatile = false;
- }
}
+ if (pdev->auto_white_balance->val != awb_manual)
+ return 0;
- if (ret == 0 && pdev->red_balance->is_new) {
- if (pdev->auto_white_balance->val != awb_manual)
- return -EBUSY;
+ if (pdev->red_balance->is_new) {
ret = pwc_set_u8_ctrl(pdev, SET_CHROM_CTL,
PRESET_MANUAL_RED_GAIN_FORMATTER,
pdev->red_balance->val);
+ if (ret)
+ return ret;
}
- if (ret == 0 && pdev->blue_balance->is_new) {
- if (pdev->auto_white_balance->val != awb_manual)
- return -EBUSY;
+ if (pdev->blue_balance->is_new) {
ret = pwc_set_u8_ctrl(pdev, SET_CHROM_CTL,
PRESET_MANUAL_BLUE_GAIN_FORMATTER,
pdev->blue_balance->val);
+ if (ret)
+ return ret;
}
- return ret;
+ return 0;
}
/* For CODEC2 models which have separate autogain and auto exposure */
static int pwc_set_autogain(struct pwc_device *pdev)
{
- int ret = 0;
+ int ret;
if (pdev->autogain->is_new) {
ret = pwc_set_u8_ctrl(pdev, SET_LUM_CTL,
@@ -692,27 +676,28 @@ static int pwc_set_autogain(struct pwc_device *pdev)
pdev->autogain->val ? 0 : 0xff);
if (ret)
return ret;
+
if (pdev->autogain->val)
pdev->gain_valid = false; /* Force cache update */
- else if (!pdev->gain->is_new)
- pwc_get_u8_ctrl(pdev, GET_STATUS_CTL,
- READ_AGC_FORMATTER,
- &pdev->gain->val);
}
- if (ret == 0 && pdev->gain->is_new) {
- if (pdev->autogain->val)
- return -EBUSY;
+
+ if (pdev->autogain->val)
+ return 0;
+
+ if (pdev->gain->is_new) {
ret = pwc_set_u8_ctrl(pdev, SET_LUM_CTL,
PRESET_AGC_FORMATTER,
pdev->gain->val);
+ if (ret)
+ return ret;
}
- return ret;
+ return 0;
}
/* For CODEC2 models which have separate autogain and auto exposure */
static int pwc_set_exposure_auto(struct pwc_device *pdev)
{
- int ret = 0;
+ int ret;
int is_auto = pdev->exposure_auto->val == V4L2_EXPOSURE_AUTO;
if (pdev->exposure_auto->is_new) {
@@ -721,27 +706,28 @@ static int pwc_set_exposure_auto(struct pwc_device *pdev)
is_auto ? 0 : 0xff);
if (ret)
return ret;
+
if (is_auto)
pdev->exposure_valid = false; /* Force cache update */
- else if (!pdev->exposure->is_new)
- pwc_get_u16_ctrl(pdev, GET_STATUS_CTL,
- READ_SHUTTER_FORMATTER,
- &pdev->exposure->val);
}
- if (ret == 0 && pdev->exposure->is_new) {
- if (is_auto)
- return -EBUSY;
+
+ if (is_auto)
+ return 0;
+
+ if (pdev->exposure->is_new) {
ret = pwc_set_u16_ctrl(pdev, SET_LUM_CTL,
PRESET_SHUTTER_FORMATTER,
pdev->exposure->val);
+ if (ret)
+ return ret;
}
- return ret;
+ return 0;
}
/* For CODEC3 models which have autogain controlling both gain and exposure */
static int pwc_set_autogain_expo(struct pwc_device *pdev)
{
- int ret = 0;
+ int ret;
if (pdev->autogain->is_new) {
ret = pwc_set_u8_ctrl(pdev, SET_LUM_CTL,
@@ -749,35 +735,32 @@ static int pwc_set_autogain_expo(struct pwc_device *pdev)
pdev->autogain->val ? 0 : 0xff);
if (ret)
return ret;
+
if (pdev->autogain->val) {
pdev->gain_valid = false; /* Force cache update */
pdev->exposure_valid = false; /* Force cache update */
- } else {
- if (!pdev->gain->is_new)
- pwc_get_u8_ctrl(pdev, GET_STATUS_CTL,
- READ_AGC_FORMATTER,
- &pdev->gain->val);
- if (!pdev->exposure->is_new)
- pwc_get_u16_ctrl(pdev, GET_STATUS_CTL,
- READ_SHUTTER_FORMATTER,
- &pdev->exposure->val);
}
}
- if (ret == 0 && pdev->gain->is_new) {
- if (pdev->autogain->val)
- return -EBUSY;
+
+ if (pdev->autogain->val)
+ return 0;
+
+ if (pdev->gain->is_new) {
ret = pwc_set_u8_ctrl(pdev, SET_LUM_CTL,
PRESET_AGC_FORMATTER,
pdev->gain->val);
+ if (ret)
+ return ret;
}
- if (ret == 0 && pdev->exposure->is_new) {
- if (pdev->autogain->val)
- return -EBUSY;
+
+ if (pdev->exposure->is_new) {
ret = pwc_set_u16_ctrl(pdev, SET_LUM_CTL,
PRESET_SHUTTER_FORMATTER,
pdev->exposure->val);
+ if (ret)
+ return ret;
}
- return ret;
+ return 0;
}
static int pwc_set_motor(struct pwc_device *pdev)
@@ -878,10 +861,6 @@ static int pwc_s_ctrl(struct v4l2_ctrl *ctrl)
pdev->autocontour->val ? 0 : 0xff);
}
if (ret == 0 && pdev->contour->is_new) {
- if (pdev->autocontour->val) {
- ret = -EBUSY;
- break;
- }
ret = pwc_set_u8_ctrl(pdev, SET_LUM_CTL,
PRESET_CONTOUR_FORMATTER,
pdev->contour->val);
@@ -1099,6 +1078,14 @@ static int pwc_enum_frameintervals(struct file *file, void *fh,
return 0;
}
+static int pwc_log_status(struct file *file, void *priv)
+{
+ struct pwc_device *pdev = video_drvdata(file);
+
+ v4l2_ctrl_handler_log_status(&pdev->ctrl_handler, PWC_NAME);
+ return 0;
+}
+
static long pwc_default(struct file *file, void *fh, bool valid_prio,
int cmd, void *arg)
{
@@ -1122,6 +1109,7 @@ const struct v4l2_ioctl_ops pwc_ioctl_ops = {
.vidioc_dqbuf = pwc_dqbuf,
.vidioc_streamon = pwc_streamon,
.vidioc_streamoff = pwc_streamoff,
+ .vidioc_log_status = pwc_log_status,
.vidioc_enum_framesizes = pwc_enum_framesizes,
.vidioc_enum_frameintervals = pwc_enum_frameintervals,
.vidioc_default = pwc_default,
diff --git a/drivers/media/video/pxa_camera.c b/drivers/media/video/pxa_camera.c
index d07df22a5ec..79fb22c89ae 100644
--- a/drivers/media/video/pxa_camera.c
+++ b/drivers/media/video/pxa_camera.c
@@ -214,6 +214,7 @@ struct pxa_camera_dev {
unsigned long ciclk;
unsigned long mclk;
u32 mclk_divisor;
+ u16 width_flags; /* max 10 bits */
struct list_head capture;
@@ -1020,37 +1021,20 @@ static int test_platform_param(struct pxa_camera_dev *pcdev,
* quick capture interface supports both.
*/
*flags = (pcdev->platform_flags & PXA_CAMERA_MASTER ?
- SOCAM_MASTER : SOCAM_SLAVE) |
- SOCAM_HSYNC_ACTIVE_HIGH |
- SOCAM_HSYNC_ACTIVE_LOW |
- SOCAM_VSYNC_ACTIVE_HIGH |
- SOCAM_VSYNC_ACTIVE_LOW |
- SOCAM_DATA_ACTIVE_HIGH |
- SOCAM_PCLK_SAMPLE_RISING |
- SOCAM_PCLK_SAMPLE_FALLING;
+ V4L2_MBUS_MASTER : V4L2_MBUS_SLAVE) |
+ V4L2_MBUS_HSYNC_ACTIVE_HIGH |
+ V4L2_MBUS_HSYNC_ACTIVE_LOW |
+ V4L2_MBUS_VSYNC_ACTIVE_HIGH |
+ V4L2_MBUS_VSYNC_ACTIVE_LOW |
+ V4L2_MBUS_DATA_ACTIVE_HIGH |
+ V4L2_MBUS_PCLK_SAMPLE_RISING |
+ V4L2_MBUS_PCLK_SAMPLE_FALLING;
/* If requested data width is supported by the platform, use it */
- switch (buswidth) {
- case 10:
- if (!(pcdev->platform_flags & PXA_CAMERA_DATAWIDTH_10))
- return -EINVAL;
- *flags |= SOCAM_DATAWIDTH_10;
- break;
- case 9:
- if (!(pcdev->platform_flags & PXA_CAMERA_DATAWIDTH_9))
- return -EINVAL;
- *flags |= SOCAM_DATAWIDTH_9;
- break;
- case 8:
- if (!(pcdev->platform_flags & PXA_CAMERA_DATAWIDTH_8))
- return -EINVAL;
- *flags |= SOCAM_DATAWIDTH_8;
- break;
- default:
- return -EINVAL;
- }
+ if ((1 << (buswidth - 1)) & pcdev->width_flags)
+ return 0;
- return 0;
+ return -EINVAL;
}
static void pxa_camera_setup_cicr(struct soc_camera_device *icd,
@@ -1070,12 +1054,12 @@ static void pxa_camera_setup_cicr(struct soc_camera_device *icd,
* Datawidth is now guaranteed to be equal to one of the three values.
* We fix bit-per-pixel equal to data-width...
*/
- switch (flags & SOCAM_DATAWIDTH_MASK) {
- case SOCAM_DATAWIDTH_10:
+ switch (icd->current_fmt->host_fmt->bits_per_sample) {
+ case 10:
dw = 4;
bpp = 0x40;
break;
- case SOCAM_DATAWIDTH_9:
+ case 9:
dw = 3;
bpp = 0x20;
break;
@@ -1084,7 +1068,7 @@ static void pxa_camera_setup_cicr(struct soc_camera_device *icd,
* Actually it can only be 8 now,
* default is just to silence compiler warnings
*/
- case SOCAM_DATAWIDTH_8:
+ case 8:
dw = 2;
bpp = 0;
}
@@ -1093,11 +1077,11 @@ static void pxa_camera_setup_cicr(struct soc_camera_device *icd,
cicr4 |= CICR4_PCLK_EN;
if (pcdev->platform_flags & PXA_CAMERA_MCLK_EN)
cicr4 |= CICR4_MCLK_EN;
- if (flags & SOCAM_PCLK_SAMPLE_FALLING)
+ if (flags & V4L2_MBUS_PCLK_SAMPLE_FALLING)
cicr4 |= CICR4_PCP;
- if (flags & SOCAM_HSYNC_ACTIVE_LOW)
+ if (flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)
cicr4 |= CICR4_HSP;
- if (flags & SOCAM_VSYNC_ACTIVE_LOW)
+ if (flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)
cicr4 |= CICR4_VSP;
cicr0 = __raw_readl(pcdev->base + CICR0);
@@ -1151,9 +1135,11 @@ static void pxa_camera_setup_cicr(struct soc_camera_device *icd,
static int pxa_camera_set_bus_param(struct soc_camera_device *icd, __u32 pixfmt)
{
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct pxa_camera_dev *pcdev = ici->priv;
- unsigned long bus_flags, camera_flags, common_flags;
+ struct v4l2_mbus_config cfg = {.type = V4L2_MBUS_PARALLEL,};
+ unsigned long bus_flags, common_flags;
int ret;
struct pxa_cam *cam = icd->host_priv;
@@ -1162,44 +1148,58 @@ static int pxa_camera_set_bus_param(struct soc_camera_device *icd, __u32 pixfmt)
if (ret < 0)
return ret;
- camera_flags = icd->ops->query_bus_param(icd);
-
- common_flags = soc_camera_bus_param_compatible(camera_flags, bus_flags);
- if (!common_flags)
- return -EINVAL;
+ ret = v4l2_subdev_call(sd, video, g_mbus_config, &cfg);
+ if (!ret) {
+ common_flags = soc_mbus_config_compatible(&cfg,
+ bus_flags);
+ if (!common_flags) {
+ dev_warn(icd->parent,
+ "Flags incompatible: camera 0x%x, host 0x%lx\n",
+ cfg.flags, bus_flags);
+ return -EINVAL;
+ }
+ } else if (ret != -ENOIOCTLCMD) {
+ return ret;
+ } else {
+ common_flags = bus_flags;
+ }
pcdev->channels = 1;
/* Make choises, based on platform preferences */
- if ((common_flags & SOCAM_HSYNC_ACTIVE_HIGH) &&
- (common_flags & SOCAM_HSYNC_ACTIVE_LOW)) {
+ if ((common_flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH) &&
+ (common_flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)) {
if (pcdev->platform_flags & PXA_CAMERA_HSP)
- common_flags &= ~SOCAM_HSYNC_ACTIVE_HIGH;
+ common_flags &= ~V4L2_MBUS_HSYNC_ACTIVE_HIGH;
else
- common_flags &= ~SOCAM_HSYNC_ACTIVE_LOW;
+ common_flags &= ~V4L2_MBUS_HSYNC_ACTIVE_LOW;
}
- if ((common_flags & SOCAM_VSYNC_ACTIVE_HIGH) &&
- (common_flags & SOCAM_VSYNC_ACTIVE_LOW)) {
+ if ((common_flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH) &&
+ (common_flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)) {
if (pcdev->platform_flags & PXA_CAMERA_VSP)
- common_flags &= ~SOCAM_VSYNC_ACTIVE_HIGH;
+ common_flags &= ~V4L2_MBUS_VSYNC_ACTIVE_HIGH;
else
- common_flags &= ~SOCAM_VSYNC_ACTIVE_LOW;
+ common_flags &= ~V4L2_MBUS_VSYNC_ACTIVE_LOW;
}
- if ((common_flags & SOCAM_PCLK_SAMPLE_RISING) &&
- (common_flags & SOCAM_PCLK_SAMPLE_FALLING)) {
+ if ((common_flags & V4L2_MBUS_PCLK_SAMPLE_RISING) &&
+ (common_flags & V4L2_MBUS_PCLK_SAMPLE_FALLING)) {
if (pcdev->platform_flags & PXA_CAMERA_PCP)
- common_flags &= ~SOCAM_PCLK_SAMPLE_RISING;
+ common_flags &= ~V4L2_MBUS_PCLK_SAMPLE_RISING;
else
- common_flags &= ~SOCAM_PCLK_SAMPLE_FALLING;
+ common_flags &= ~V4L2_MBUS_PCLK_SAMPLE_FALLING;
}
- cam->flags = common_flags;
-
- ret = icd->ops->set_bus_param(icd, common_flags);
- if (ret < 0)
+ cfg.flags = common_flags;
+ ret = v4l2_subdev_call(sd, video, s_mbus_config, &cfg);
+ if (ret < 0 && ret != -ENOIOCTLCMD) {
+ dev_dbg(icd->parent, "camera s_mbus_config(0x%lx) returned %d\n",
+ common_flags, ret);
return ret;
+ }
+
+ cam->flags = common_flags;
pxa_camera_setup_cicr(icd, common_flags, pixfmt);
@@ -1209,17 +1209,31 @@ static int pxa_camera_set_bus_param(struct soc_camera_device *icd, __u32 pixfmt)
static int pxa_camera_try_bus_param(struct soc_camera_device *icd,
unsigned char buswidth)
{
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct pxa_camera_dev *pcdev = ici->priv;
- unsigned long bus_flags, camera_flags;
+ struct v4l2_mbus_config cfg = {.type = V4L2_MBUS_PARALLEL,};
+ unsigned long bus_flags, common_flags;
int ret = test_platform_param(pcdev, buswidth, &bus_flags);
if (ret < 0)
return ret;
- camera_flags = icd->ops->query_bus_param(icd);
+ ret = v4l2_subdev_call(sd, video, g_mbus_config, &cfg);
+ if (!ret) {
+ common_flags = soc_mbus_config_compatible(&cfg,
+ bus_flags);
+ if (!common_flags) {
+ dev_warn(icd->parent,
+ "Flags incompatible: camera 0x%x, host 0x%lx\n",
+ cfg.flags, bus_flags);
+ return -EINVAL;
+ }
+ } else if (ret == -ENOIOCTLCMD) {
+ ret = 0;
+ }
- return soc_camera_bus_param_compatible(camera_flags, bus_flags) ? 0 : -EINVAL;
+ return ret;
}
static const struct soc_mbus_pixelfmt pxa_camera_formats[] = {
@@ -1687,6 +1701,12 @@ static int __devinit pxa_camera_probe(struct platform_device *pdev)
"data widths, using default 10 bit\n");
pcdev->platform_flags |= PXA_CAMERA_DATAWIDTH_10;
}
+ if (pcdev->platform_flags & PXA_CAMERA_DATAWIDTH_8)
+ pcdev->width_flags = 1 << 7;
+ if (pcdev->platform_flags & PXA_CAMERA_DATAWIDTH_9)
+ pcdev->width_flags |= 1 << 8;
+ if (pcdev->platform_flags & PXA_CAMERA_DATAWIDTH_10)
+ pcdev->width_flags |= 1 << 9;
pcdev->mclk = pcdev->pdata->mclk_10khz * 10000;
if (!pcdev->mclk) {
dev_warn(&pdev->dev,
diff --git a/drivers/media/video/rj54n1cb0c.c b/drivers/media/video/rj54n1cb0c.c
index 847ccc067e8..9937386a3ba 100644
--- a/drivers/media/video/rj54n1cb0c.c
+++ b/drivers/media/video/rj54n1cb0c.c
@@ -11,13 +11,15 @@
#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/slab.h>
+#include <linux/v4l2-mediabus.h>
#include <linux/videodev2.h>
+#include <linux/module.h>
#include <media/rj54n1cb0c.h>
#include <media/soc_camera.h>
-#include <media/soc_mediabus.h>
#include <media/v4l2-subdev.h>
#include <media/v4l2-chip-ident.h>
+#include <media/v4l2-ctrls.h>
#define RJ54N1_DEV_CODE 0x0400
#define RJ54N1_DEV_CODE2 0x0401
@@ -148,6 +150,7 @@ struct rj54n1_clock_div {
struct rj54n1 {
struct v4l2_subdev subdev;
+ struct v4l2_ctrl_handler hdl;
struct rj54n1_clock_div clk_div;
const struct rj54n1_datafmt *fmt;
struct v4l2_rect rect; /* Sensor window */
@@ -499,31 +502,6 @@ static int rj54n1_s_stream(struct v4l2_subdev *sd, int enable)
return reg_set(client, RJ54N1_STILL_CONTROL, (!enable) << 7, 0x80);
}
-static int rj54n1_set_bus_param(struct soc_camera_device *icd,
- unsigned long flags)
-{
- struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- /* Figures 2.5-1 to 2.5-3 - default falling pixclk edge */
-
- if (flags & SOCAM_PCLK_SAMPLE_RISING)
- return reg_write(client, RJ54N1_OUT_SIGPO, 1 << 4);
- else
- return reg_write(client, RJ54N1_OUT_SIGPO, 0);
-}
-
-static unsigned long rj54n1_query_bus_param(struct soc_camera_device *icd)
-{
- struct soc_camera_link *icl = to_soc_camera_link(icd);
- const unsigned long flags =
- SOCAM_PCLK_SAMPLE_RISING | SOCAM_PCLK_SAMPLE_FALLING |
- SOCAM_MASTER | SOCAM_DATAWIDTH_8 |
- SOCAM_HSYNC_ACTIVE_HIGH | SOCAM_VSYNC_ACTIVE_HIGH |
- SOCAM_DATA_ACTIVE_HIGH;
-
- return soc_camera_apply_sensor_flags(icl, flags);
-}
-
static int rj54n1_set_rect(struct i2c_client *client,
u16 reg_x, u16 reg_y, u16 reg_xy,
u32 width, u32 height)
@@ -1202,134 +1180,51 @@ static int rj54n1_s_register(struct v4l2_subdev *sd,
}
#endif
-static const struct v4l2_queryctrl rj54n1_controls[] = {
- {
- .id = V4L2_CID_VFLIP,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Flip Vertically",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 0,
- }, {
- .id = V4L2_CID_HFLIP,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Flip Horizontally",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 0,
- }, {
- .id = V4L2_CID_GAIN,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Gain",
- .minimum = 0,
- .maximum = 127,
- .step = 1,
- .default_value = 66,
- .flags = V4L2_CTRL_FLAG_SLIDER,
- }, {
- .id = V4L2_CID_AUTO_WHITE_BALANCE,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Auto white balance",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 1,
- },
-};
-
-static struct soc_camera_ops rj54n1_ops = {
- .set_bus_param = rj54n1_set_bus_param,
- .query_bus_param = rj54n1_query_bus_param,
- .controls = rj54n1_controls,
- .num_controls = ARRAY_SIZE(rj54n1_controls),
-};
-
-static int rj54n1_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
+static int rj54n1_s_ctrl(struct v4l2_ctrl *ctrl)
{
+ struct rj54n1 *rj54n1 = container_of(ctrl->handler, struct rj54n1, hdl);
+ struct v4l2_subdev *sd = &rj54n1->subdev;
struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct rj54n1 *rj54n1 = to_rj54n1(client);
int data;
switch (ctrl->id) {
case V4L2_CID_VFLIP:
- data = reg_read(client, RJ54N1_MIRROR_STILL_MODE);
- if (data < 0)
- return -EIO;
- ctrl->value = !(data & 1);
- break;
- case V4L2_CID_HFLIP:
- data = reg_read(client, RJ54N1_MIRROR_STILL_MODE);
- if (data < 0)
- return -EIO;
- ctrl->value = !(data & 2);
- break;
- case V4L2_CID_GAIN:
- data = reg_read(client, RJ54N1_Y_GAIN);
- if (data < 0)
- return -EIO;
-
- ctrl->value = data / 2;
- break;
- case V4L2_CID_AUTO_WHITE_BALANCE:
- ctrl->value = rj54n1->auto_wb;
- break;
- }
-
- return 0;
-}
-
-static int rj54n1_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
-{
- int data;
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct rj54n1 *rj54n1 = to_rj54n1(client);
- const struct v4l2_queryctrl *qctrl;
-
- qctrl = soc_camera_find_qctrl(&rj54n1_ops, ctrl->id);
- if (!qctrl)
- return -EINVAL;
-
- switch (ctrl->id) {
- case V4L2_CID_VFLIP:
- if (ctrl->value)
+ if (ctrl->val)
data = reg_set(client, RJ54N1_MIRROR_STILL_MODE, 0, 1);
else
data = reg_set(client, RJ54N1_MIRROR_STILL_MODE, 1, 1);
if (data < 0)
return -EIO;
- break;
+ return 0;
case V4L2_CID_HFLIP:
- if (ctrl->value)
+ if (ctrl->val)
data = reg_set(client, RJ54N1_MIRROR_STILL_MODE, 0, 2);
else
data = reg_set(client, RJ54N1_MIRROR_STILL_MODE, 2, 2);
if (data < 0)
return -EIO;
- break;
+ return 0;
case V4L2_CID_GAIN:
- if (ctrl->value > qctrl->maximum ||
- ctrl->value < qctrl->minimum)
- return -EINVAL;
- else if (reg_write(client, RJ54N1_Y_GAIN, ctrl->value * 2) < 0)
+ if (reg_write(client, RJ54N1_Y_GAIN, ctrl->val * 2) < 0)
return -EIO;
- break;
+ return 0;
case V4L2_CID_AUTO_WHITE_BALANCE:
/* Auto WB area - whole image */
- if (reg_set(client, RJ54N1_WB_SEL_WEIGHT_I, ctrl->value << 7,
+ if (reg_set(client, RJ54N1_WB_SEL_WEIGHT_I, ctrl->val << 7,
0x80) < 0)
return -EIO;
- rj54n1->auto_wb = ctrl->value;
- break;
+ rj54n1->auto_wb = ctrl->val;
+ return 0;
}
- return 0;
+ return -EINVAL;
}
+static const struct v4l2_ctrl_ops rj54n1_ctrl_ops = {
+ .s_ctrl = rj54n1_s_ctrl,
+};
+
static struct v4l2_subdev_core_ops rj54n1_subdev_core_ops = {
- .g_ctrl = rj54n1_g_ctrl,
- .s_ctrl = rj54n1_s_ctrl,
.g_chip_ident = rj54n1_g_chip_ident,
#ifdef CONFIG_VIDEO_ADV_DEBUG
.g_register = rj54n1_g_register,
@@ -1337,6 +1232,36 @@ static struct v4l2_subdev_core_ops rj54n1_subdev_core_ops = {
#endif
};
+static int rj54n1_g_mbus_config(struct v4l2_subdev *sd,
+ struct v4l2_mbus_config *cfg)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+
+ cfg->flags =
+ V4L2_MBUS_PCLK_SAMPLE_RISING | V4L2_MBUS_PCLK_SAMPLE_FALLING |
+ V4L2_MBUS_MASTER | V4L2_MBUS_DATA_ACTIVE_HIGH |
+ V4L2_MBUS_HSYNC_ACTIVE_HIGH | V4L2_MBUS_VSYNC_ACTIVE_HIGH;
+ cfg->type = V4L2_MBUS_PARALLEL;
+ cfg->flags = soc_camera_apply_board_flags(icl, cfg);
+
+ return 0;
+}
+
+static int rj54n1_s_mbus_config(struct v4l2_subdev *sd,
+ const struct v4l2_mbus_config *cfg)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+
+ /* Figures 2.5-1 to 2.5-3 - default falling pixclk edge */
+ if (soc_camera_apply_board_flags(icl, cfg) &
+ V4L2_MBUS_PCLK_SAMPLE_RISING)
+ return reg_write(client, RJ54N1_OUT_SIGPO, 1 << 4);
+ else
+ return reg_write(client, RJ54N1_OUT_SIGPO, 0);
+}
+
static struct v4l2_subdev_video_ops rj54n1_subdev_video_ops = {
.s_stream = rj54n1_s_stream,
.s_mbus_fmt = rj54n1_s_fmt,
@@ -1346,6 +1271,8 @@ static struct v4l2_subdev_video_ops rj54n1_subdev_video_ops = {
.g_crop = rj54n1_g_crop,
.s_crop = rj54n1_s_crop,
.cropcap = rj54n1_cropcap,
+ .g_mbus_config = rj54n1_g_mbus_config,
+ .s_mbus_config = rj54n1_s_mbus_config,
};
static struct v4l2_subdev_ops rj54n1_subdev_ops = {
@@ -1357,17 +1284,12 @@ static struct v4l2_subdev_ops rj54n1_subdev_ops = {
* Interface active, can use i2c. If it fails, it can indeed mean, that
* this wasn't our capture interface, so, we wait for the right one
*/
-static int rj54n1_video_probe(struct soc_camera_device *icd,
- struct i2c_client *client,
+static int rj54n1_video_probe(struct i2c_client *client,
struct rj54n1_pdata *priv)
{
int data1, data2;
int ret;
- /* We must have a parent by now. And it cannot be a wrong one. */
- BUG_ON(!icd->parent ||
- to_soc_camera_host(icd->parent)->nr != icd->iface);
-
/* Read out the chip version register */
data1 = reg_read(client, RJ54N1_DEV_CODE);
data2 = reg_read(client, RJ54N1_DEV_CODE2);
@@ -1395,18 +1317,11 @@ static int rj54n1_probe(struct i2c_client *client,
const struct i2c_device_id *did)
{
struct rj54n1 *rj54n1;
- struct soc_camera_device *icd = client->dev.platform_data;
+ struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
- struct soc_camera_link *icl;
struct rj54n1_pdata *rj54n1_priv;
int ret;
- if (!icd) {
- dev_err(&client->dev, "RJ54N1CB0C: missing soc-camera data!\n");
- return -EINVAL;
- }
-
- icl = to_soc_camera_link(icd);
if (!icl || !icl->priv) {
dev_err(&client->dev, "RJ54N1CB0C: missing platform data!\n");
return -EINVAL;
@@ -1425,8 +1340,22 @@ static int rj54n1_probe(struct i2c_client *client,
return -ENOMEM;
v4l2_i2c_subdev_init(&rj54n1->subdev, client, &rj54n1_subdev_ops);
+ v4l2_ctrl_handler_init(&rj54n1->hdl, 4);
+ v4l2_ctrl_new_std(&rj54n1->hdl, &rj54n1_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+ v4l2_ctrl_new_std(&rj54n1->hdl, &rj54n1_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+ v4l2_ctrl_new_std(&rj54n1->hdl, &rj54n1_ctrl_ops,
+ V4L2_CID_GAIN, 0, 127, 1, 66);
+ v4l2_ctrl_new_std(&rj54n1->hdl, &rj54n1_ctrl_ops,
+ V4L2_CID_AUTO_WHITE_BALANCE, 0, 1, 1, 1);
+ rj54n1->subdev.ctrl_handler = &rj54n1->hdl;
+ if (rj54n1->hdl.error) {
+ int err = rj54n1->hdl.error;
- icd->ops = &rj54n1_ops;
+ kfree(rj54n1);
+ return err;
+ }
rj54n1->clk_div = clk_div;
rj54n1->rect.left = RJ54N1_COLUMN_SKIP;
@@ -1440,25 +1369,24 @@ static int rj54n1_probe(struct i2c_client *client,
rj54n1->tgclk_mhz = (rj54n1_priv->mclk_freq / PLL_L * PLL_N) /
(clk_div.ratio_tg + 1) / (clk_div.ratio_t + 1);
- ret = rj54n1_video_probe(icd, client, rj54n1_priv);
+ ret = rj54n1_video_probe(client, rj54n1_priv);
if (ret < 0) {
- icd->ops = NULL;
+ v4l2_ctrl_handler_free(&rj54n1->hdl);
kfree(rj54n1);
return ret;
}
-
- return ret;
+ return v4l2_ctrl_handler_setup(&rj54n1->hdl);
}
static int rj54n1_remove(struct i2c_client *client)
{
struct rj54n1 *rj54n1 = to_rj54n1(client);
- struct soc_camera_device *icd = client->dev.platform_data;
- struct soc_camera_link *icl = to_soc_camera_link(icd);
+ struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
- icd->ops = NULL;
+ v4l2_device_unregister_subdev(&rj54n1->subdev);
if (icl->free_bus)
icl->free_bus(icl);
+ v4l2_ctrl_handler_free(&rj54n1->hdl);
kfree(rj54n1);
return 0;
diff --git a/drivers/media/video/s5k6aa.c b/drivers/media/video/s5k6aa.c
new file mode 100644
index 00000000000..0df7f2a4181
--- /dev/null
+++ b/drivers/media/video/s5k6aa.c
@@ -0,0 +1,1681 @@
+/*
+ * Driver for Samsung S5K6AAFX SXGA 1/6" 1.3M CMOS Image Sensor
+ * with embedded SoC ISP.
+ *
+ * Copyright (C) 2011, Samsung Electronics Co., Ltd.
+ * Sylwester Nawrocki <s.nawrocki@samsung.com>
+ *
+ * Based on a driver authored by Dongsoo Nathaniel Kim.
+ * Copyright (C) 2009, Dongsoo Nathaniel Kim <dongsoo45.kim@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/media.h>
+#include <linux/module.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+
+#include <media/media-entity.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+#include <media/v4l2-mediabus.h>
+#include <media/s5k6aa.h>
+
+static int debug;
+module_param(debug, int, 0644);
+
+#define DRIVER_NAME "S5K6AA"
+
+/* The token to indicate array termination */
+#define S5K6AA_TERM 0xffff
+#define S5K6AA_OUT_WIDTH_DEF 640
+#define S5K6AA_OUT_HEIGHT_DEF 480
+#define S5K6AA_WIN_WIDTH_MAX 1280
+#define S5K6AA_WIN_HEIGHT_MAX 1024
+#define S5K6AA_WIN_WIDTH_MIN 8
+#define S5K6AA_WIN_HEIGHT_MIN 8
+
+/*
+ * H/W register Interface (0xD0000000 - 0xD0000FFF)
+ */
+#define AHB_MSB_ADDR_PTR 0xfcfc
+#define GEN_REG_OFFSH 0xd000
+#define REG_CMDWR_ADDRH 0x0028
+#define REG_CMDWR_ADDRL 0x002a
+#define REG_CMDRD_ADDRH 0x002c
+#define REG_CMDRD_ADDRL 0x002e
+#define REG_CMDBUF0_ADDR 0x0f12
+#define REG_CMDBUF1_ADDR 0x0f10
+
+/*
+ * Host S/W Register interface (0x70000000 - 0x70002000)
+ * The value of the two most significant address bytes is 0x7000,
+ * (HOST_SWIF_OFFS_H). The register addresses below specify 2 LSBs.
+ */
+#define HOST_SWIF_OFFSH 0x7000
+
+/* Initialization parameters */
+/* Master clock frequency in KHz */
+#define REG_I_INCLK_FREQ_L 0x01b8
+#define REG_I_INCLK_FREQ_H 0x01ba
+#define MIN_MCLK_FREQ_KHZ 6000U
+#define MAX_MCLK_FREQ_KHZ 27000U
+#define REG_I_USE_NPVI_CLOCKS 0x01c6
+#define REG_I_USE_NMIPI_CLOCKS 0x01c8
+
+/* Clock configurations, n = 0..2. REG_I_* frequency unit is 4 kHz. */
+#define REG_I_OPCLK_4KHZ(n) ((n) * 6 + 0x01cc)
+#define REG_I_MIN_OUTRATE_4KHZ(n) ((n) * 6 + 0x01ce)
+#define REG_I_MAX_OUTRATE_4KHZ(n) ((n) * 6 + 0x01d0)
+#define SYS_PLL_OUT_FREQ (48000000 / 4000)
+#define PCLK_FREQ_MIN (24000000 / 4000)
+#define PCLK_FREQ_MAX (48000000 / 4000)
+#define REG_I_INIT_PARAMS_UPDATED 0x01e0
+#define REG_I_ERROR_INFO 0x01e2
+
+/* General purpose parameters */
+#define REG_USER_BRIGHTNESS 0x01e4
+#define REG_USER_CONTRAST 0x01e6
+#define REG_USER_SATURATION 0x01e8
+#define REG_USER_SHARPBLUR 0x01ea
+
+#define REG_G_SPEC_EFFECTS 0x01ee
+#define REG_G_ENABLE_PREV 0x01f0
+#define REG_G_ENABLE_PREV_CHG 0x01f2
+#define REG_G_NEW_CFG_SYNC 0x01f8
+#define REG_G_PREVZOOM_IN_WIDTH 0x020a
+#define REG_G_PREVZOOM_IN_HEIGHT 0x020c
+#define REG_G_PREVZOOM_IN_XOFFS 0x020e
+#define REG_G_PREVZOOM_IN_YOFFS 0x0210
+#define REG_G_INPUTS_CHANGE_REQ 0x021a
+#define REG_G_ACTIVE_PREV_CFG 0x021c
+#define REG_G_PREV_CFG_CHG 0x021e
+#define REG_G_PREV_OPEN_AFTER_CH 0x0220
+#define REG_G_PREV_CFG_ERROR 0x0222
+
+/* Preview control section. n = 0...4. */
+#define PREG(n, x) ((n) * 0x26 + x)
+#define REG_P_OUT_WIDTH(n) PREG(n, 0x0242)
+#define REG_P_OUT_HEIGHT(n) PREG(n, 0x0244)
+#define REG_P_FMT(n) PREG(n, 0x0246)
+#define REG_P_MAX_OUT_RATE(n) PREG(n, 0x0248)
+#define REG_P_MIN_OUT_RATE(n) PREG(n, 0x024a)
+#define REG_P_PVI_MASK(n) PREG(n, 0x024c)
+#define REG_P_CLK_INDEX(n) PREG(n, 0x024e)
+#define REG_P_FR_RATE_TYPE(n) PREG(n, 0x0250)
+#define FR_RATE_DYNAMIC 0
+#define FR_RATE_FIXED 1
+#define FR_RATE_FIXED_ACCURATE 2
+#define REG_P_FR_RATE_Q_TYPE(n) PREG(n, 0x0252)
+#define FR_RATE_Q_BEST_FRRATE 1 /* Binning enabled */
+#define FR_RATE_Q_BEST_QUALITY 2 /* Binning disabled */
+/* Frame period in 0.1 ms units */
+#define REG_P_MAX_FR_TIME(n) PREG(n, 0x0254)
+#define REG_P_MIN_FR_TIME(n) PREG(n, 0x0256)
+/* Conversion to REG_P_[MAX/MIN]_FR_TIME value; __t: time in us */
+#define US_TO_FR_TIME(__t) ((__t) / 100)
+#define S5K6AA_MIN_FR_TIME 33300 /* us */
+#define S5K6AA_MAX_FR_TIME 650000 /* us */
+#define S5K6AA_MAX_HIGHRES_FR_TIME 666 /* x100 us */
+/* The below 5 registers are for "device correction" values */
+#define REG_P_COLORTEMP(n) PREG(n, 0x025e)
+#define REG_P_PREV_MIRROR(n) PREG(n, 0x0262)
+
+/* Extended image property controls */
+/* Exposure time in 10 us units */
+#define REG_SF_USR_EXPOSURE_L 0x03c6
+#define REG_SF_USR_EXPOSURE_H 0x03c8
+#define REG_SF_USR_EXPOSURE_CHG 0x03ca
+#define REG_SF_USR_TOT_GAIN 0x03cc
+#define REG_SF_USR_TOT_GAIN_CHG 0x03ce
+#define REG_SF_RGAIN 0x03d0
+#define REG_SF_RGAIN_CHG 0x03d2
+#define REG_SF_GGAIN 0x03d4
+#define REG_SF_GGAIN_CHG 0x03d6
+#define REG_SF_BGAIN 0x03d8
+#define REG_SF_BGAIN_CHG 0x03da
+#define REG_SF_FLICKER_QUANT 0x03dc
+#define REG_SF_FLICKER_QUANT_CHG 0x03de
+
+/* Output interface (parallel/MIPI) setup */
+#define REG_OIF_EN_MIPI_LANES 0x03fa
+#define REG_OIF_EN_PACKETS 0x03fc
+#define REG_OIF_CFG_CHG 0x03fe
+
+/* Auto-algorithms enable mask */
+#define REG_DBG_AUTOALG_EN 0x0400
+#define AALG_ALL_EN_MASK (1 << 0)
+#define AALG_AE_EN_MASK (1 << 1)
+#define AALG_DIVLEI_EN_MASK (1 << 2)
+#define AALG_WB_EN_MASK (1 << 3)
+#define AALG_FLICKER_EN_MASK (1 << 5)
+#define AALG_FIT_EN_MASK (1 << 6)
+#define AALG_WRHW_EN_MASK (1 << 7)
+
+/* Firmware revision information */
+#define REG_FW_APIVER 0x012e
+#define S5K6AAFX_FW_APIVER 0x0001
+#define REG_FW_REVISION 0x0130
+
+/* For now we use only one user configuration register set */
+#define S5K6AA_MAX_PRESETS 1
+
+static const char * const s5k6aa_supply_names[] = {
+ "vdd_core", /* Digital core supply 1.5V (1.4V to 1.6V) */
+ "vdda", /* Analog power supply 2.8V (2.6V to 3.0V) */
+ "vdd_reg", /* Regulator input power 1.8V (1.7V to 1.9V)
+ or 2.8V (2.6V to 3.0) */
+ "vddio", /* I/O supply 1.8V (1.65V to 1.95V)
+ or 2.8V (2.5V to 3.1V) */
+};
+#define S5K6AA_NUM_SUPPLIES ARRAY_SIZE(s5k6aa_supply_names)
+
+enum s5k6aa_gpio_id {
+ STBY,
+ RST,
+ GPIO_NUM,
+};
+
+struct s5k6aa_regval {
+ u16 addr;
+ u16 val;
+};
+
+struct s5k6aa_pixfmt {
+ enum v4l2_mbus_pixelcode code;
+ u32 colorspace;
+ /* REG_P_FMT(x) register value */
+ u16 reg_p_fmt;
+};
+
+struct s5k6aa_preset {
+ /* output pixel format and resolution */
+ struct v4l2_mbus_framefmt mbus_fmt;
+ u8 clk_id;
+ u8 index;
+};
+
+struct s5k6aa_ctrls {
+ struct v4l2_ctrl_handler handler;
+ /* Auto / manual white balance cluster */
+ struct v4l2_ctrl *awb;
+ struct v4l2_ctrl *gain_red;
+ struct v4l2_ctrl *gain_blue;
+ struct v4l2_ctrl *gain_green;
+ /* Mirror cluster */
+ struct v4l2_ctrl *hflip;
+ struct v4l2_ctrl *vflip;
+ /* Auto exposure / manual exposure and gain cluster */
+ struct v4l2_ctrl *auto_exp;
+ struct v4l2_ctrl *exposure;
+ struct v4l2_ctrl *gain;
+};
+
+struct s5k6aa_interval {
+ u16 reg_fr_time;
+ struct v4l2_fract interval;
+ /* Maximum rectangle for the interval */
+ struct v4l2_frmsize_discrete size;
+};
+
+struct s5k6aa {
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+
+ enum v4l2_mbus_type bus_type;
+ u8 mipi_lanes;
+
+ int (*s_power)(int enable);
+ struct regulator_bulk_data supplies[S5K6AA_NUM_SUPPLIES];
+ struct s5k6aa_gpio gpio[GPIO_NUM];
+
+ /* external master clock frequency */
+ unsigned long mclk_frequency;
+ /* ISP internal master clock frequency */
+ u16 clk_fop;
+ /* output pixel clock frequency range */
+ u16 pclk_fmin;
+ u16 pclk_fmax;
+
+ unsigned int inv_hflip:1;
+ unsigned int inv_vflip:1;
+
+ /* protects the struct members below */
+ struct mutex lock;
+
+ /* sensor matrix scan window */
+ struct v4l2_rect ccd_rect;
+
+ struct s5k6aa_ctrls ctrls;
+ struct s5k6aa_preset presets[S5K6AA_MAX_PRESETS];
+ struct s5k6aa_preset *preset;
+ const struct s5k6aa_interval *fiv;
+
+ unsigned int streaming:1;
+ unsigned int apply_cfg:1;
+ unsigned int apply_crop:1;
+ unsigned int power;
+};
+
+static struct s5k6aa_regval s5k6aa_analog_config[] = {
+ /* Analog settings */
+ { 0x112a, 0x0000 }, { 0x1132, 0x0000 },
+ { 0x113e, 0x0000 }, { 0x115c, 0x0000 },
+ { 0x1164, 0x0000 }, { 0x1174, 0x0000 },
+ { 0x1178, 0x0000 }, { 0x077a, 0x0000 },
+ { 0x077c, 0x0000 }, { 0x077e, 0x0000 },
+ { 0x0780, 0x0000 }, { 0x0782, 0x0000 },
+ { 0x0784, 0x0000 }, { 0x0786, 0x0000 },
+ { 0x0788, 0x0000 }, { 0x07a2, 0x0000 },
+ { 0x07a4, 0x0000 }, { 0x07a6, 0x0000 },
+ { 0x07a8, 0x0000 }, { 0x07b6, 0x0000 },
+ { 0x07b8, 0x0002 }, { 0x07ba, 0x0004 },
+ { 0x07bc, 0x0004 }, { 0x07be, 0x0005 },
+ { 0x07c0, 0x0005 }, { S5K6AA_TERM, 0 },
+};
+
+/* TODO: Add RGB888 and Bayer format */
+static const struct s5k6aa_pixfmt s5k6aa_formats[] = {
+ { V4L2_MBUS_FMT_YUYV8_2X8, V4L2_COLORSPACE_JPEG, 5 },
+ /* range 16-240 */
+ { V4L2_MBUS_FMT_YUYV8_2X8, V4L2_COLORSPACE_REC709, 6 },
+ { V4L2_MBUS_FMT_RGB565_2X8_BE, V4L2_COLORSPACE_JPEG, 0 },
+};
+
+static const struct s5k6aa_interval s5k6aa_intervals[] = {
+ { 1000, {10000, 1000000}, {1280, 1024} }, /* 10 fps */
+ { 666, {15000, 1000000}, {1280, 1024} }, /* 15 fps */
+ { 500, {20000, 1000000}, {1280, 720} }, /* 20 fps */
+ { 400, {25000, 1000000}, {640, 480} }, /* 25 fps */
+ { 333, {33300, 1000000}, {640, 480} }, /* 30 fps */
+};
+
+#define S5K6AA_INTERVAL_DEF_INDEX 1 /* 15 fps */
+
+static inline struct v4l2_subdev *ctrl_to_sd(struct v4l2_ctrl *ctrl)
+{
+ return &container_of(ctrl->handler, struct s5k6aa, ctrls.handler)->sd;
+}
+
+static inline struct s5k6aa *to_s5k6aa(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct s5k6aa, sd);
+}
+
+/* Set initial values for all preview presets */
+static void s5k6aa_presets_data_init(struct s5k6aa *s5k6aa)
+{
+ struct s5k6aa_preset *preset = &s5k6aa->presets[0];
+ int i;
+
+ for (i = 0; i < S5K6AA_MAX_PRESETS; i++) {
+ preset->mbus_fmt.width = S5K6AA_OUT_WIDTH_DEF;
+ preset->mbus_fmt.height = S5K6AA_OUT_HEIGHT_DEF;
+ preset->mbus_fmt.code = s5k6aa_formats[0].code;
+ preset->index = i;
+ preset->clk_id = 0;
+ preset++;
+ }
+
+ s5k6aa->fiv = &s5k6aa_intervals[S5K6AA_INTERVAL_DEF_INDEX];
+ s5k6aa->preset = &s5k6aa->presets[0];
+}
+
+static int s5k6aa_i2c_read(struct i2c_client *client, u16 addr, u16 *val)
+{
+ u8 wbuf[2] = {addr >> 8, addr & 0xFF};
+ struct i2c_msg msg[2];
+ u8 rbuf[2];
+ int ret;
+
+ msg[0].addr = client->addr;
+ msg[0].flags = 0;
+ msg[0].len = 2;
+ msg[0].buf = wbuf;
+
+ msg[1].addr = client->addr;
+ msg[1].flags = I2C_M_RD;
+ msg[1].len = 2;
+ msg[1].buf = rbuf;
+
+ ret = i2c_transfer(client->adapter, msg, 2);
+ *val = be16_to_cpu(*((u16 *)rbuf));
+
+ v4l2_dbg(3, debug, client, "i2c_read: 0x%04X : 0x%04x\n", addr, *val);
+
+ return ret == 2 ? 0 : ret;
+}
+
+static int s5k6aa_i2c_write(struct i2c_client *client, u16 addr, u16 val)
+{
+ u8 buf[4] = {addr >> 8, addr & 0xFF, val >> 8, val & 0xFF};
+
+ int ret = i2c_master_send(client, buf, 4);
+ v4l2_dbg(3, debug, client, "i2c_write: 0x%04X : 0x%04x\n", addr, val);
+
+ return ret == 4 ? 0 : ret;
+}
+
+/* The command register write, assumes Command_Wr_addH = 0x7000. */
+static int s5k6aa_write(struct i2c_client *c, u16 addr, u16 val)
+{
+ int ret = s5k6aa_i2c_write(c, REG_CMDWR_ADDRL, addr);
+ if (ret)
+ return ret;
+ return s5k6aa_i2c_write(c, REG_CMDBUF0_ADDR, val);
+}
+
+/* The command register read, assumes Command_Rd_addH = 0x7000. */
+static int s5k6aa_read(struct i2c_client *client, u16 addr, u16 *val)
+{
+ int ret = s5k6aa_i2c_write(client, REG_CMDRD_ADDRL, addr);
+ if (ret)
+ return ret;
+ return s5k6aa_i2c_read(client, REG_CMDBUF0_ADDR, val);
+}
+
+static int s5k6aa_write_array(struct v4l2_subdev *sd,
+ const struct s5k6aa_regval *msg)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ u16 addr_incr = 0;
+ int ret = 0;
+
+ while (msg->addr != S5K6AA_TERM) {
+ if (addr_incr != 2)
+ ret = s5k6aa_i2c_write(client, REG_CMDWR_ADDRL,
+ msg->addr);
+ if (ret)
+ break;
+ ret = s5k6aa_i2c_write(client, REG_CMDBUF0_ADDR, msg->val);
+ if (ret)
+ break;
+ /* Assume that msg->addr is always less than 0xfffc */
+ addr_incr = (msg + 1)->addr - msg->addr;
+ msg++;
+ }
+
+ return ret;
+}
+
+/* Configure the AHB high address bytes for GTG registers access */
+static int s5k6aa_set_ahb_address(struct i2c_client *client)
+{
+ int ret = s5k6aa_i2c_write(client, AHB_MSB_ADDR_PTR, GEN_REG_OFFSH);
+ if (ret)
+ return ret;
+ ret = s5k6aa_i2c_write(client, REG_CMDRD_ADDRH, HOST_SWIF_OFFSH);
+ if (ret)
+ return ret;
+ return s5k6aa_i2c_write(client, REG_CMDWR_ADDRH, HOST_SWIF_OFFSH);
+}
+
+/**
+ * s5k6aa_configure_pixel_clock - apply ISP main clock/PLL configuration
+ *
+ * Configure the internal ISP PLL for the required output frequency.
+ * Locking: called with s5k6aa.lock mutex held.
+ */
+static int s5k6aa_configure_pixel_clocks(struct s5k6aa *s5k6aa)
+{
+ struct i2c_client *c = v4l2_get_subdevdata(&s5k6aa->sd);
+ unsigned long fmclk = s5k6aa->mclk_frequency / 1000;
+ u16 status;
+ int ret;
+
+ if (WARN(fmclk < MIN_MCLK_FREQ_KHZ || fmclk > MAX_MCLK_FREQ_KHZ,
+ "Invalid clock frequency: %ld\n", fmclk))
+ return -EINVAL;
+
+ s5k6aa->pclk_fmin = PCLK_FREQ_MIN;
+ s5k6aa->pclk_fmax = PCLK_FREQ_MAX;
+ s5k6aa->clk_fop = SYS_PLL_OUT_FREQ;
+
+ /* External input clock frequency in kHz */
+ ret = s5k6aa_write(c, REG_I_INCLK_FREQ_H, fmclk >> 16);
+ if (!ret)
+ ret = s5k6aa_write(c, REG_I_INCLK_FREQ_L, fmclk & 0xFFFF);
+ if (!ret)
+ ret = s5k6aa_write(c, REG_I_USE_NPVI_CLOCKS, 1);
+ /* Internal PLL frequency */
+ if (!ret)
+ ret = s5k6aa_write(c, REG_I_OPCLK_4KHZ(0), s5k6aa->clk_fop);
+ if (!ret)
+ ret = s5k6aa_write(c, REG_I_MIN_OUTRATE_4KHZ(0),
+ s5k6aa->pclk_fmin);
+ if (!ret)
+ ret = s5k6aa_write(c, REG_I_MAX_OUTRATE_4KHZ(0),
+ s5k6aa->pclk_fmax);
+ if (!ret)
+ ret = s5k6aa_write(c, REG_I_INIT_PARAMS_UPDATED, 1);
+ if (!ret)
+ ret = s5k6aa_read(c, REG_I_ERROR_INFO, &status);
+
+ return ret ? ret : (status ? -EINVAL : 0);
+}
+
+/* Set horizontal and vertical image flipping */
+static int s5k6aa_set_mirror(struct s5k6aa *s5k6aa, int horiz_flip)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&s5k6aa->sd);
+ int index = s5k6aa->preset->index;
+
+ unsigned int vflip = s5k6aa->ctrls.vflip->val ^ s5k6aa->inv_vflip;
+ unsigned int flip = (horiz_flip ^ s5k6aa->inv_hflip) | (vflip << 1);
+
+ return s5k6aa_write(client, REG_P_PREV_MIRROR(index), flip);
+}
+
+/* Configure auto/manual white balance and R/G/B gains */
+static int s5k6aa_set_awb(struct s5k6aa *s5k6aa, int awb)
+{
+ struct i2c_client *c = v4l2_get_subdevdata(&s5k6aa->sd);
+ struct s5k6aa_ctrls *ctrls = &s5k6aa->ctrls;
+ u16 reg;
+
+ int ret = s5k6aa_read(c, REG_DBG_AUTOALG_EN, &reg);
+
+ if (!ret && !awb) {
+ ret = s5k6aa_write(c, REG_SF_RGAIN, ctrls->gain_red->val);
+ if (!ret)
+ ret = s5k6aa_write(c, REG_SF_RGAIN_CHG, 1);
+ if (ret)
+ return ret;
+
+ ret = s5k6aa_write(c, REG_SF_GGAIN, ctrls->gain_green->val);
+ if (!ret)
+ ret = s5k6aa_write(c, REG_SF_GGAIN_CHG, 1);
+ if (ret)
+ return ret;
+
+ ret = s5k6aa_write(c, REG_SF_BGAIN, ctrls->gain_blue->val);
+ if (!ret)
+ ret = s5k6aa_write(c, REG_SF_BGAIN_CHG, 1);
+ }
+ if (!ret) {
+ reg = awb ? reg | AALG_WB_EN_MASK : reg & ~AALG_WB_EN_MASK;
+ ret = s5k6aa_write(c, REG_DBG_AUTOALG_EN, reg);
+ }
+
+ return ret;
+}
+
+/* Program FW with exposure time, 'exposure' in us units */
+static int s5k6aa_set_user_exposure(struct i2c_client *client, int exposure)
+{
+ unsigned int time = exposure / 10;
+
+ int ret = s5k6aa_write(client, REG_SF_USR_EXPOSURE_L, time & 0xffff);
+ if (!ret)
+ ret = s5k6aa_write(client, REG_SF_USR_EXPOSURE_H, time >> 16);
+ if (ret)
+ return ret;
+ return s5k6aa_write(client, REG_SF_USR_EXPOSURE_CHG, 1);
+}
+
+static int s5k6aa_set_user_gain(struct i2c_client *client, int gain)
+{
+ int ret = s5k6aa_write(client, REG_SF_USR_TOT_GAIN, gain);
+ if (ret)
+ return ret;
+ return s5k6aa_write(client, REG_SF_USR_TOT_GAIN_CHG, 1);
+}
+
+/* Set auto/manual exposure and total gain */
+static int s5k6aa_set_auto_exposure(struct s5k6aa *s5k6aa, int value)
+{
+ struct i2c_client *c = v4l2_get_subdevdata(&s5k6aa->sd);
+ unsigned int exp_time = s5k6aa->ctrls.exposure->val;
+ u16 auto_alg;
+
+ int ret = s5k6aa_read(c, REG_DBG_AUTOALG_EN, &auto_alg);
+ if (ret)
+ return ret;
+
+ v4l2_dbg(1, debug, c, "man_exp: %d, auto_exp: %d, a_alg: 0x%x\n",
+ exp_time, value, auto_alg);
+
+ if (value == V4L2_EXPOSURE_AUTO) {
+ auto_alg |= AALG_AE_EN_MASK | AALG_DIVLEI_EN_MASK;
+ } else {
+ ret = s5k6aa_set_user_exposure(c, exp_time);
+ if (ret)
+ return ret;
+ ret = s5k6aa_set_user_gain(c, s5k6aa->ctrls.gain->val);
+ if (ret)
+ return ret;
+ auto_alg &= ~(AALG_AE_EN_MASK | AALG_DIVLEI_EN_MASK);
+ }
+
+ return s5k6aa_write(c, REG_DBG_AUTOALG_EN, auto_alg);
+}
+
+static int s5k6aa_set_anti_flicker(struct s5k6aa *s5k6aa, int value)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&s5k6aa->sd);
+ u16 auto_alg;
+ int ret;
+
+ ret = s5k6aa_read(client, REG_DBG_AUTOALG_EN, &auto_alg);
+ if (ret)
+ return ret;
+
+ if (value == V4L2_CID_POWER_LINE_FREQUENCY_AUTO) {
+ auto_alg |= AALG_FLICKER_EN_MASK;
+ } else {
+ auto_alg &= ~AALG_FLICKER_EN_MASK;
+ /* The V4L2_CID_LINE_FREQUENCY control values match
+ * the register values */
+ ret = s5k6aa_write(client, REG_SF_FLICKER_QUANT, value);
+ if (ret)
+ return ret;
+ ret = s5k6aa_write(client, REG_SF_FLICKER_QUANT_CHG, 1);
+ if (ret)
+ return ret;
+ }
+
+ return s5k6aa_write(client, REG_DBG_AUTOALG_EN, auto_alg);
+}
+
+static int s5k6aa_set_colorfx(struct s5k6aa *s5k6aa, int val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&s5k6aa->sd);
+ static const struct v4l2_control colorfx[] = {
+ { V4L2_COLORFX_NONE, 0 },
+ { V4L2_COLORFX_BW, 1 },
+ { V4L2_COLORFX_NEGATIVE, 2 },
+ { V4L2_COLORFX_SEPIA, 3 },
+ { V4L2_COLORFX_SKY_BLUE, 4 },
+ { V4L2_COLORFX_SKETCH, 5 },
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(colorfx); i++) {
+ if (colorfx[i].id == val)
+ return s5k6aa_write(client, REG_G_SPEC_EFFECTS,
+ colorfx[i].value);
+ }
+ return -EINVAL;
+}
+
+static int s5k6aa_preview_config_status(struct i2c_client *client)
+{
+ u16 error = 0;
+ int ret = s5k6aa_read(client, REG_G_PREV_CFG_ERROR, &error);
+
+ v4l2_dbg(1, debug, client, "error: 0x%x (%d)\n", error, ret);
+ return ret ? ret : (error ? -EINVAL : 0);
+}
+
+static int s5k6aa_get_pixfmt_index(struct s5k6aa *s5k6aa,
+ struct v4l2_mbus_framefmt *mf)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(s5k6aa_formats); i++)
+ if (mf->colorspace == s5k6aa_formats[i].colorspace &&
+ mf->code == s5k6aa_formats[i].code)
+ return i;
+ return 0;
+}
+
+static int s5k6aa_set_output_framefmt(struct s5k6aa *s5k6aa,
+ struct s5k6aa_preset *preset)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&s5k6aa->sd);
+ int fmt_index = s5k6aa_get_pixfmt_index(s5k6aa, &preset->mbus_fmt);
+ int ret;
+
+ ret = s5k6aa_write(client, REG_P_OUT_WIDTH(preset->index),
+ preset->mbus_fmt.width);
+ if (!ret)
+ ret = s5k6aa_write(client, REG_P_OUT_HEIGHT(preset->index),
+ preset->mbus_fmt.height);
+ if (!ret)
+ ret = s5k6aa_write(client, REG_P_FMT(preset->index),
+ s5k6aa_formats[fmt_index].reg_p_fmt);
+ return ret;
+}
+
+static int s5k6aa_set_input_params(struct s5k6aa *s5k6aa)
+{
+ struct i2c_client *c = v4l2_get_subdevdata(&s5k6aa->sd);
+ struct v4l2_rect *r = &s5k6aa->ccd_rect;
+ int ret;
+
+ ret = s5k6aa_write(c, REG_G_PREVZOOM_IN_WIDTH, r->width);
+ if (!ret)
+ ret = s5k6aa_write(c, REG_G_PREVZOOM_IN_HEIGHT, r->height);
+ if (!ret)
+ ret = s5k6aa_write(c, REG_G_PREVZOOM_IN_XOFFS, r->left);
+ if (!ret)
+ ret = s5k6aa_write(c, REG_G_PREVZOOM_IN_YOFFS, r->top);
+ if (!ret)
+ ret = s5k6aa_write(c, REG_G_INPUTS_CHANGE_REQ, 1);
+ if (!ret)
+ s5k6aa->apply_crop = 0;
+
+ return ret;
+}
+
+/**
+ * s5k6aa_configure_video_bus - configure the video output interface
+ * @bus_type: video bus type: parallel or MIPI-CSI
+ * @nlanes: number of MIPI lanes to be used (MIPI-CSI only)
+ *
+ * Note: Only parallel bus operation has been tested.
+ */
+static int s5k6aa_configure_video_bus(struct s5k6aa *s5k6aa,
+ enum v4l2_mbus_type bus_type, int nlanes)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&s5k6aa->sd);
+ u16 cfg = 0;
+ int ret;
+
+ /*
+ * TODO: The sensor is supposed to support BT.601 and BT.656
+ * but there is nothing indicating how to switch between both
+ * in the datasheet. For now default BT.601 interface is assumed.
+ */
+ if (bus_type == V4L2_MBUS_CSI2)
+ cfg = nlanes;
+ else if (bus_type != V4L2_MBUS_PARALLEL)
+ return -EINVAL;
+
+ ret = s5k6aa_write(client, REG_OIF_EN_MIPI_LANES, cfg);
+ if (ret)
+ return ret;
+ return s5k6aa_write(client, REG_OIF_CFG_CHG, 1);
+}
+
+/* This function should be called when switching to new user configuration set*/
+static int s5k6aa_new_config_sync(struct i2c_client *client, int timeout,
+ int cid)
+{
+ unsigned long end = jiffies + msecs_to_jiffies(timeout);
+ u16 reg = 1;
+ int ret;
+
+ ret = s5k6aa_write(client, REG_G_ACTIVE_PREV_CFG, cid);
+ if (!ret)
+ ret = s5k6aa_write(client, REG_G_PREV_CFG_CHG, 1);
+ if (!ret)
+ ret = s5k6aa_write(client, REG_G_NEW_CFG_SYNC, 1);
+ if (timeout == 0)
+ return ret;
+
+ while (ret >= 0 && time_is_after_jiffies(end)) {
+ ret = s5k6aa_read(client, REG_G_NEW_CFG_SYNC, &reg);
+ if (!reg)
+ return 0;
+ usleep_range(1000, 5000);
+ }
+ return ret ? ret : -ETIMEDOUT;
+}
+
+/**
+ * s5k6aa_set_prev_config - write user preview register set
+ *
+ * Configure output resolution and color fromat, pixel clock
+ * frequency range, device frame rate type and frame period range.
+ */
+static int s5k6aa_set_prev_config(struct s5k6aa *s5k6aa,
+ struct s5k6aa_preset *preset)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&s5k6aa->sd);
+ int idx = preset->index;
+ u16 frame_rate_q;
+ int ret;
+
+ if (s5k6aa->fiv->reg_fr_time >= S5K6AA_MAX_HIGHRES_FR_TIME)
+ frame_rate_q = FR_RATE_Q_BEST_FRRATE;
+ else
+ frame_rate_q = FR_RATE_Q_BEST_QUALITY;
+
+ ret = s5k6aa_set_output_framefmt(s5k6aa, preset);
+ if (!ret)
+ ret = s5k6aa_write(client, REG_P_MAX_OUT_RATE(idx),
+ s5k6aa->pclk_fmax);
+ if (!ret)
+ ret = s5k6aa_write(client, REG_P_MIN_OUT_RATE(idx),
+ s5k6aa->pclk_fmin);
+ if (!ret)
+ ret = s5k6aa_write(client, REG_P_CLK_INDEX(idx),
+ preset->clk_id);
+ if (!ret)
+ ret = s5k6aa_write(client, REG_P_FR_RATE_TYPE(idx),
+ FR_RATE_DYNAMIC);
+ if (!ret)
+ ret = s5k6aa_write(client, REG_P_FR_RATE_Q_TYPE(idx),
+ frame_rate_q);
+ if (!ret)
+ ret = s5k6aa_write(client, REG_P_MAX_FR_TIME(idx),
+ s5k6aa->fiv->reg_fr_time + 33);
+ if (!ret)
+ ret = s5k6aa_write(client, REG_P_MIN_FR_TIME(idx),
+ s5k6aa->fiv->reg_fr_time - 33);
+ if (!ret)
+ ret = s5k6aa_new_config_sync(client, 250, idx);
+ if (!ret)
+ ret = s5k6aa_preview_config_status(client);
+ if (!ret)
+ s5k6aa->apply_cfg = 0;
+
+ v4l2_dbg(1, debug, client, "Frame interval: %d +/- 3.3ms. (%d)\n",
+ s5k6aa->fiv->reg_fr_time, ret);
+ return ret;
+}
+
+/**
+ * s5k6aa_initialize_isp - basic ISP MCU initialization
+ *
+ * Configure AHB addresses for registers read/write; configure PLLs for
+ * required output pixel clock. The ISP power supply needs to be already
+ * enabled, with an optional H/W reset.
+ * Locking: called with s5k6aa.lock mutex held.
+ */
+static int s5k6aa_initialize_isp(struct v4l2_subdev *sd)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct s5k6aa *s5k6aa = to_s5k6aa(sd);
+ int ret;
+
+ s5k6aa->apply_crop = 1;
+ s5k6aa->apply_cfg = 1;
+ msleep(100);
+
+ ret = s5k6aa_set_ahb_address(client);
+ if (ret)
+ return ret;
+ ret = s5k6aa_configure_video_bus(s5k6aa, s5k6aa->bus_type,
+ s5k6aa->mipi_lanes);
+ if (ret)
+ return ret;
+ ret = s5k6aa_write_array(sd, s5k6aa_analog_config);
+ if (ret)
+ return ret;
+ msleep(20);
+
+ return s5k6aa_configure_pixel_clocks(s5k6aa);
+}
+
+static int s5k6aa_gpio_set_value(struct s5k6aa *priv, int id, u32 val)
+{
+ if (!gpio_is_valid(priv->gpio[id].gpio))
+ return 0;
+ gpio_set_value(priv->gpio[id].gpio, !!val);
+ return 1;
+}
+
+static int s5k6aa_gpio_assert(struct s5k6aa *priv, int id)
+{
+ return s5k6aa_gpio_set_value(priv, id, priv->gpio[id].level);
+}
+
+static int s5k6aa_gpio_deassert(struct s5k6aa *priv, int id)
+{
+ return s5k6aa_gpio_set_value(priv, id, !priv->gpio[id].level);
+}
+
+static int __s5k6aa_power_on(struct s5k6aa *s5k6aa)
+{
+ int ret;
+
+ ret = regulator_bulk_enable(S5K6AA_NUM_SUPPLIES, s5k6aa->supplies);
+ if (ret)
+ return ret;
+ if (s5k6aa_gpio_deassert(s5k6aa, STBY))
+ usleep_range(150, 200);
+
+ if (s5k6aa->s_power)
+ ret = s5k6aa->s_power(1);
+ usleep_range(4000, 4000);
+
+ if (s5k6aa_gpio_deassert(s5k6aa, RST))
+ msleep(20);
+
+ return ret;
+}
+
+static int __s5k6aa_power_off(struct s5k6aa *s5k6aa)
+{
+ int ret;
+
+ if (s5k6aa_gpio_assert(s5k6aa, RST))
+ usleep_range(100, 150);
+
+ if (s5k6aa->s_power) {
+ ret = s5k6aa->s_power(0);
+ if (ret)
+ return ret;
+ }
+ if (s5k6aa_gpio_assert(s5k6aa, STBY))
+ usleep_range(50, 100);
+ s5k6aa->streaming = 0;
+
+ return regulator_bulk_disable(S5K6AA_NUM_SUPPLIES, s5k6aa->supplies);
+}
+
+/*
+ * V4L2 subdev core and video operations
+ */
+static int s5k6aa_set_power(struct v4l2_subdev *sd, int on)
+{
+ struct s5k6aa *s5k6aa = to_s5k6aa(sd);
+ int ret = 0;
+
+ mutex_lock(&s5k6aa->lock);
+
+ if (!on == s5k6aa->power) {
+ if (on) {
+ ret = __s5k6aa_power_on(s5k6aa);
+ if (!ret)
+ ret = s5k6aa_initialize_isp(sd);
+ } else {
+ ret = __s5k6aa_power_off(s5k6aa);
+ }
+
+ if (!ret)
+ s5k6aa->power += on ? 1 : -1;
+ }
+
+ mutex_unlock(&s5k6aa->lock);
+
+ if (!on || ret || s5k6aa->power != 1)
+ return ret;
+
+ return v4l2_ctrl_handler_setup(sd->ctrl_handler);
+}
+
+static int __s5k6aa_stream(struct s5k6aa *s5k6aa, int enable)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&s5k6aa->sd);
+ int ret = 0;
+
+ ret = s5k6aa_write(client, REG_G_ENABLE_PREV, enable);
+ if (!ret)
+ ret = s5k6aa_write(client, REG_G_ENABLE_PREV_CHG, 1);
+ if (!ret)
+ s5k6aa->streaming = enable;
+
+ return ret;
+}
+
+static int s5k6aa_s_stream(struct v4l2_subdev *sd, int on)
+{
+ struct s5k6aa *s5k6aa = to_s5k6aa(sd);
+ int ret = 0;
+
+ mutex_lock(&s5k6aa->lock);
+
+ if (s5k6aa->streaming == !on) {
+ if (!ret && s5k6aa->apply_cfg)
+ ret = s5k6aa_set_prev_config(s5k6aa, s5k6aa->preset);
+ if (s5k6aa->apply_crop)
+ ret = s5k6aa_set_input_params(s5k6aa);
+ if (!ret)
+ ret = __s5k6aa_stream(s5k6aa, !!on);
+ }
+ mutex_unlock(&s5k6aa->lock);
+
+ return ret;
+}
+
+static int s5k6aa_g_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct s5k6aa *s5k6aa = to_s5k6aa(sd);
+
+ mutex_lock(&s5k6aa->lock);
+ fi->interval = s5k6aa->fiv->interval;
+ mutex_unlock(&s5k6aa->lock);
+
+ return 0;
+}
+
+static int __s5k6aa_set_frame_interval(struct s5k6aa *s5k6aa,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct v4l2_mbus_framefmt *mbus_fmt = &s5k6aa->preset->mbus_fmt;
+ const struct s5k6aa_interval *fiv = &s5k6aa_intervals[0];
+ unsigned int err, min_err = UINT_MAX;
+ unsigned int i, fr_time;
+
+ if (fi->interval.denominator == 0)
+ return -EINVAL;
+
+ fr_time = fi->interval.numerator * 10000 / fi->interval.denominator;
+
+ for (i = 0; i < ARRAY_SIZE(s5k6aa_intervals); i++) {
+ const struct s5k6aa_interval *iv = &s5k6aa_intervals[i];
+
+ if (mbus_fmt->width > iv->size.width ||
+ mbus_fmt->height > iv->size.height)
+ continue;
+
+ err = abs(iv->reg_fr_time - fr_time);
+ if (err < min_err) {
+ fiv = iv;
+ min_err = err;
+ }
+ }
+ s5k6aa->fiv = fiv;
+
+ v4l2_dbg(1, debug, &s5k6aa->sd, "Changed frame interval to %d us\n",
+ fiv->reg_fr_time * 100);
+ return 0;
+}
+
+static int s5k6aa_s_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct s5k6aa *s5k6aa = to_s5k6aa(sd);
+ int ret;
+
+ v4l2_dbg(1, debug, sd, "Setting %d/%d frame interval\n",
+ fi->interval.numerator, fi->interval.denominator);
+
+ mutex_lock(&s5k6aa->lock);
+ ret = __s5k6aa_set_frame_interval(s5k6aa, fi);
+ s5k6aa->apply_cfg = 1;
+
+ mutex_unlock(&s5k6aa->lock);
+ return ret;
+}
+
+/*
+ * V4L2 subdev pad level and video operations
+ */
+static int s5k6aa_enum_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_frame_interval_enum *fie)
+{
+ struct s5k6aa *s5k6aa = to_s5k6aa(sd);
+ const struct s5k6aa_interval *fi;
+ int ret = 0;
+
+ if (fie->index > ARRAY_SIZE(s5k6aa_intervals))
+ return -EINVAL;
+
+ v4l_bound_align_image(&fie->width, S5K6AA_WIN_WIDTH_MIN,
+ S5K6AA_WIN_WIDTH_MAX, 1,
+ &fie->height, S5K6AA_WIN_HEIGHT_MIN,
+ S5K6AA_WIN_HEIGHT_MAX, 1, 0);
+
+ mutex_lock(&s5k6aa->lock);
+ fi = &s5k6aa_intervals[fie->index];
+ if (fie->width > fi->size.width || fie->height > fi->size.height)
+ ret = -EINVAL;
+ else
+ fie->interval = fi->interval;
+ mutex_unlock(&s5k6aa->lock);
+
+ return ret;
+}
+
+static int s5k6aa_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->index >= ARRAY_SIZE(s5k6aa_formats))
+ return -EINVAL;
+
+ code->code = s5k6aa_formats[code->index].code;
+ return 0;
+}
+
+static int s5k6aa_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ int i = ARRAY_SIZE(s5k6aa_formats);
+
+ if (fse->index > 0)
+ return -EINVAL;
+
+ while (--i)
+ if (fse->code == s5k6aa_formats[i].code)
+ break;
+
+ fse->code = s5k6aa_formats[i].code;
+ fse->min_width = S5K6AA_WIN_WIDTH_MIN;
+ fse->max_width = S5K6AA_WIN_WIDTH_MAX;
+ fse->max_height = S5K6AA_WIN_HEIGHT_MIN;
+ fse->min_height = S5K6AA_WIN_HEIGHT_MAX;
+
+ return 0;
+}
+
+static struct v4l2_rect *
+__s5k6aa_get_crop_rect(struct s5k6aa *s5k6aa, struct v4l2_subdev_fh *fh,
+ enum v4l2_subdev_format_whence which)
+{
+ if (which == V4L2_SUBDEV_FORMAT_ACTIVE)
+ return &s5k6aa->ccd_rect;
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_get_try_crop(fh, 0);
+
+ return NULL;
+}
+
+static void s5k6aa_try_format(struct s5k6aa *s5k6aa,
+ struct v4l2_mbus_framefmt *mf)
+{
+ unsigned int index;
+
+ v4l_bound_align_image(&mf->width, S5K6AA_WIN_WIDTH_MIN,
+ S5K6AA_WIN_WIDTH_MAX, 1,
+ &mf->height, S5K6AA_WIN_HEIGHT_MIN,
+ S5K6AA_WIN_HEIGHT_MAX, 1, 0);
+
+ if (mf->colorspace != V4L2_COLORSPACE_JPEG &&
+ mf->colorspace != V4L2_COLORSPACE_REC709)
+ mf->colorspace = V4L2_COLORSPACE_JPEG;
+
+ index = s5k6aa_get_pixfmt_index(s5k6aa, mf);
+
+ mf->colorspace = s5k6aa_formats[index].colorspace;
+ mf->code = s5k6aa_formats[index].code;
+ mf->field = V4L2_FIELD_NONE;
+}
+
+static int s5k6aa_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct s5k6aa *s5k6aa = to_s5k6aa(sd);
+ struct v4l2_mbus_framefmt *mf;
+
+ memset(fmt->reserved, 0, sizeof(fmt->reserved));
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ mf = v4l2_subdev_get_try_format(fh, 0);
+ fmt->format = *mf;
+ return 0;
+ }
+
+ mutex_lock(&s5k6aa->lock);
+ fmt->format = s5k6aa->preset->mbus_fmt;
+ mutex_unlock(&s5k6aa->lock);
+
+ return 0;
+}
+
+static int s5k6aa_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct s5k6aa *s5k6aa = to_s5k6aa(sd);
+ struct s5k6aa_preset *preset = s5k6aa->preset;
+ struct v4l2_mbus_framefmt *mf;
+ struct v4l2_rect *crop;
+ int ret = 0;
+
+ mutex_lock(&s5k6aa->lock);
+ s5k6aa_try_format(s5k6aa, &fmt->format);
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ mf = v4l2_subdev_get_try_format(fh, fmt->pad);
+ crop = v4l2_subdev_get_try_crop(fh, 0);
+ } else {
+ if (s5k6aa->streaming) {
+ ret = -EBUSY;
+ } else {
+ mf = &preset->mbus_fmt;
+ crop = &s5k6aa->ccd_rect;
+ s5k6aa->apply_cfg = 1;
+ }
+ }
+
+ if (ret == 0) {
+ struct v4l2_subdev_frame_interval fiv = {
+ .interval = {0, 1}
+ };
+
+ *mf = fmt->format;
+ /*
+ * Make sure the crop window is valid, i.e. its size is
+ * greater than the output window, as the ISP supports
+ * only down-scaling.
+ */
+ crop->width = clamp_t(unsigned int, crop->width, mf->width,
+ S5K6AA_WIN_WIDTH_MAX);
+ crop->height = clamp_t(unsigned int, crop->height, mf->height,
+ S5K6AA_WIN_HEIGHT_MAX);
+ crop->left = clamp_t(unsigned int, crop->left, 0,
+ S5K6AA_WIN_WIDTH_MAX - crop->width);
+ crop->top = clamp_t(unsigned int, crop->top, 0,
+ S5K6AA_WIN_HEIGHT_MAX - crop->height);
+
+ /* Reset to minimum possible frame interval */
+ ret = __s5k6aa_set_frame_interval(s5k6aa, &fiv);
+ }
+ mutex_unlock(&s5k6aa->lock);
+
+ return ret;
+}
+
+static int s5k6aa_get_crop(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_crop *crop)
+{
+ struct s5k6aa *s5k6aa = to_s5k6aa(sd);
+ struct v4l2_rect *rect;
+
+ memset(crop->reserved, 0, sizeof(crop->reserved));
+ mutex_lock(&s5k6aa->lock);
+
+ rect = __s5k6aa_get_crop_rect(s5k6aa, fh, crop->which);
+ if (rect)
+ crop->rect = *rect;
+
+ mutex_unlock(&s5k6aa->lock);
+
+ v4l2_dbg(1, debug, sd, "Current crop rectangle: (%d,%d)/%dx%d\n",
+ rect->left, rect->top, rect->width, rect->height);
+
+ return 0;
+}
+
+static int s5k6aa_set_crop(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_crop *crop)
+{
+ struct s5k6aa *s5k6aa = to_s5k6aa(sd);
+ struct v4l2_mbus_framefmt *mf;
+ unsigned int max_x, max_y;
+ struct v4l2_rect *crop_r;
+
+ mutex_lock(&s5k6aa->lock);
+ crop_r = __s5k6aa_get_crop_rect(s5k6aa, fh, crop->which);
+
+ if (crop->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
+ mf = &s5k6aa->preset->mbus_fmt;
+ s5k6aa->apply_crop = 1;
+ } else {
+ mf = v4l2_subdev_get_try_format(fh, 0);
+ }
+ v4l_bound_align_image(&crop->rect.width, mf->width,
+ S5K6AA_WIN_WIDTH_MAX, 1,
+ &crop->rect.height, mf->height,
+ S5K6AA_WIN_HEIGHT_MAX, 1, 0);
+
+ max_x = (S5K6AA_WIN_WIDTH_MAX - crop->rect.width) & ~1;
+ max_y = (S5K6AA_WIN_HEIGHT_MAX - crop->rect.height) & ~1;
+
+ crop->rect.left = clamp_t(unsigned int, crop->rect.left, 0, max_x);
+ crop->rect.top = clamp_t(unsigned int, crop->rect.top, 0, max_y);
+
+ *crop_r = crop->rect;
+
+ mutex_unlock(&s5k6aa->lock);
+
+ v4l2_dbg(1, debug, sd, "Set crop rectangle: (%d,%d)/%dx%d\n",
+ crop_r->left, crop_r->top, crop_r->width, crop_r->height);
+
+ return 0;
+}
+
+static const struct v4l2_subdev_pad_ops s5k6aa_pad_ops = {
+ .enum_mbus_code = s5k6aa_enum_mbus_code,
+ .enum_frame_size = s5k6aa_enum_frame_size,
+ .enum_frame_interval = s5k6aa_enum_frame_interval,
+ .get_fmt = s5k6aa_get_fmt,
+ .set_fmt = s5k6aa_set_fmt,
+ .get_crop = s5k6aa_get_crop,
+ .set_crop = s5k6aa_set_crop,
+};
+
+static const struct v4l2_subdev_video_ops s5k6aa_video_ops = {
+ .g_frame_interval = s5k6aa_g_frame_interval,
+ .s_frame_interval = s5k6aa_s_frame_interval,
+ .s_stream = s5k6aa_s_stream,
+};
+
+/*
+ * V4L2 subdev controls
+ */
+
+static int s5k6aa_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct v4l2_subdev *sd = ctrl_to_sd(ctrl);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct s5k6aa *s5k6aa = to_s5k6aa(sd);
+ int idx, err = 0;
+
+ v4l2_dbg(1, debug, sd, "ctrl: 0x%x, value: %d\n", ctrl->id, ctrl->val);
+
+ mutex_lock(&s5k6aa->lock);
+ /*
+ * If the device is not powered up by the host driver do
+ * not apply any controls to H/W at this time. Instead
+ * the controls will be restored right after power-up.
+ */
+ if (s5k6aa->power == 0)
+ goto unlock;
+ idx = s5k6aa->preset->index;
+
+ switch (ctrl->id) {
+ case V4L2_CID_AUTO_WHITE_BALANCE:
+ err = s5k6aa_set_awb(s5k6aa, ctrl->val);
+ break;
+
+ case V4L2_CID_BRIGHTNESS:
+ err = s5k6aa_write(client, REG_USER_BRIGHTNESS, ctrl->val);
+ break;
+
+ case V4L2_CID_COLORFX:
+ err = s5k6aa_set_colorfx(s5k6aa, ctrl->val);
+ break;
+
+ case V4L2_CID_CONTRAST:
+ err = s5k6aa_write(client, REG_USER_CONTRAST, ctrl->val);
+ break;
+
+ case V4L2_CID_EXPOSURE_AUTO:
+ err = s5k6aa_set_auto_exposure(s5k6aa, ctrl->val);
+ break;
+
+ case V4L2_CID_HFLIP:
+ err = s5k6aa_set_mirror(s5k6aa, ctrl->val);
+ if (err)
+ break;
+ err = s5k6aa_write(client, REG_G_PREV_CFG_CHG, 1);
+ break;
+
+ case V4L2_CID_POWER_LINE_FREQUENCY:
+ err = s5k6aa_set_anti_flicker(s5k6aa, ctrl->val);
+ break;
+
+ case V4L2_CID_SATURATION:
+ err = s5k6aa_write(client, REG_USER_SATURATION, ctrl->val);
+ break;
+
+ case V4L2_CID_SHARPNESS:
+ err = s5k6aa_write(client, REG_USER_SHARPBLUR, ctrl->val);
+ break;
+
+ case V4L2_CID_WHITE_BALANCE_TEMPERATURE:
+ err = s5k6aa_write(client, REG_P_COLORTEMP(idx), ctrl->val);
+ if (err)
+ break;
+ err = s5k6aa_write(client, REG_G_PREV_CFG_CHG, 1);
+ break;
+ }
+unlock:
+ mutex_unlock(&s5k6aa->lock);
+ return err;
+}
+
+static const struct v4l2_ctrl_ops s5k6aa_ctrl_ops = {
+ .s_ctrl = s5k6aa_s_ctrl,
+};
+
+static int s5k6aa_log_status(struct v4l2_subdev *sd)
+{
+ v4l2_ctrl_handler_log_status(sd->ctrl_handler, sd->name);
+ return 0;
+}
+
+#define V4L2_CID_RED_GAIN (V4L2_CTRL_CLASS_CAMERA | 0x1001)
+#define V4L2_CID_GREEN_GAIN (V4L2_CTRL_CLASS_CAMERA | 0x1002)
+#define V4L2_CID_BLUE_GAIN (V4L2_CTRL_CLASS_CAMERA | 0x1003)
+
+static const struct v4l2_ctrl_config s5k6aa_ctrls[] = {
+ {
+ .ops = &s5k6aa_ctrl_ops,
+ .id = V4L2_CID_RED_GAIN,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Gain, Red",
+ .min = 0,
+ .max = 256,
+ .def = 127,
+ .step = 1,
+ }, {
+ .ops = &s5k6aa_ctrl_ops,
+ .id = V4L2_CID_GREEN_GAIN,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Gain, Green",
+ .min = 0,
+ .max = 256,
+ .def = 127,
+ .step = 1,
+ }, {
+ .ops = &s5k6aa_ctrl_ops,
+ .id = V4L2_CID_BLUE_GAIN,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Gain, Blue",
+ .min = 0,
+ .max = 256,
+ .def = 127,
+ .step = 1,
+ },
+};
+
+static int s5k6aa_initialize_ctrls(struct s5k6aa *s5k6aa)
+{
+ const struct v4l2_ctrl_ops *ops = &s5k6aa_ctrl_ops;
+ struct s5k6aa_ctrls *ctrls = &s5k6aa->ctrls;
+ struct v4l2_ctrl_handler *hdl = &ctrls->handler;
+
+ int ret = v4l2_ctrl_handler_init(hdl, 16);
+ if (ret)
+ return ret;
+ /* Auto white balance cluster */
+ ctrls->awb = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_AUTO_WHITE_BALANCE,
+ 0, 1, 1, 1);
+ ctrls->gain_red = v4l2_ctrl_new_custom(hdl, &s5k6aa_ctrls[0], NULL);
+ ctrls->gain_green = v4l2_ctrl_new_custom(hdl, &s5k6aa_ctrls[1], NULL);
+ ctrls->gain_blue = v4l2_ctrl_new_custom(hdl, &s5k6aa_ctrls[2], NULL);
+ v4l2_ctrl_auto_cluster(4, &ctrls->awb, 0, false);
+
+ ctrls->hflip = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_HFLIP, 0, 1, 1, 0);
+ ctrls->vflip = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_VFLIP, 0, 1, 1, 0);
+ v4l2_ctrl_cluster(2, &ctrls->hflip);
+
+ ctrls->auto_exp = v4l2_ctrl_new_std_menu(hdl, ops,
+ V4L2_CID_EXPOSURE_AUTO,
+ V4L2_EXPOSURE_MANUAL, 0, V4L2_EXPOSURE_AUTO);
+ /* Exposure time: x 1 us */
+ ctrls->exposure = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_EXPOSURE,
+ 0, 6000000U, 1, 100000U);
+ /* Total gain: 256 <=> 1x */
+ ctrls->gain = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_GAIN,
+ 0, 256, 1, 256);
+ v4l2_ctrl_auto_cluster(3, &ctrls->auto_exp, 0, false);
+
+ v4l2_ctrl_new_std_menu(hdl, ops, V4L2_CID_POWER_LINE_FREQUENCY,
+ V4L2_CID_POWER_LINE_FREQUENCY_AUTO, 0,
+ V4L2_CID_POWER_LINE_FREQUENCY_AUTO);
+
+ v4l2_ctrl_new_std_menu(hdl, ops, V4L2_CID_COLORFX,
+ V4L2_COLORFX_SKY_BLUE, ~0x6f, V4L2_COLORFX_NONE);
+
+ v4l2_ctrl_new_std(hdl, ops, V4L2_CID_WHITE_BALANCE_TEMPERATURE,
+ 0, 256, 1, 0);
+
+ v4l2_ctrl_new_std(hdl, ops, V4L2_CID_SATURATION, -127, 127, 1, 0);
+ v4l2_ctrl_new_std(hdl, ops, V4L2_CID_BRIGHTNESS, -127, 127, 1, 0);
+ v4l2_ctrl_new_std(hdl, ops, V4L2_CID_CONTRAST, -127, 127, 1, 0);
+ v4l2_ctrl_new_std(hdl, ops, V4L2_CID_SHARPNESS, -127, 127, 1, 0);
+
+ if (hdl->error) {
+ ret = hdl->error;
+ v4l2_ctrl_handler_free(hdl);
+ return ret;
+ }
+
+ s5k6aa->sd.ctrl_handler = hdl;
+ return 0;
+}
+
+/*
+ * V4L2 subdev internal operations
+ */
+static int s5k6aa_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ struct v4l2_mbus_framefmt *format = v4l2_subdev_get_try_format(fh, 0);
+ struct v4l2_rect *crop = v4l2_subdev_get_try_crop(fh, 0);
+
+ format->colorspace = s5k6aa_formats[0].colorspace;
+ format->code = s5k6aa_formats[0].code;
+ format->width = S5K6AA_OUT_WIDTH_DEF;
+ format->height = S5K6AA_OUT_HEIGHT_DEF;
+ format->field = V4L2_FIELD_NONE;
+
+ crop->width = S5K6AA_WIN_WIDTH_MAX;
+ crop->height = S5K6AA_WIN_HEIGHT_MAX;
+ crop->left = 0;
+ crop->top = 0;
+
+ return 0;
+}
+
+int s5k6aa_check_fw_revision(struct s5k6aa *s5k6aa)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&s5k6aa->sd);
+ u16 api_ver = 0, fw_rev = 0;
+
+ int ret = s5k6aa_set_ahb_address(client);
+
+ if (!ret)
+ ret = s5k6aa_read(client, REG_FW_APIVER, &api_ver);
+ if (!ret)
+ ret = s5k6aa_read(client, REG_FW_REVISION, &fw_rev);
+ if (ret) {
+ v4l2_err(&s5k6aa->sd, "FW revision check failed!\n");
+ return ret;
+ }
+
+ v4l2_info(&s5k6aa->sd, "FW API ver.: 0x%X, FW rev.: 0x%X\n",
+ api_ver, fw_rev);
+
+ return api_ver == S5K6AAFX_FW_APIVER ? 0 : -ENODEV;
+}
+
+static int s5k6aa_registered(struct v4l2_subdev *sd)
+{
+ struct s5k6aa *s5k6aa = to_s5k6aa(sd);
+ int ret;
+
+ mutex_lock(&s5k6aa->lock);
+ ret = __s5k6aa_power_on(s5k6aa);
+ if (!ret) {
+ msleep(100);
+ ret = s5k6aa_check_fw_revision(s5k6aa);
+ __s5k6aa_power_off(s5k6aa);
+ }
+ mutex_unlock(&s5k6aa->lock);
+
+ return ret;
+}
+
+static const struct v4l2_subdev_internal_ops s5k6aa_subdev_internal_ops = {
+ .registered = s5k6aa_registered,
+ .open = s5k6aa_open,
+};
+
+static const struct v4l2_subdev_core_ops s5k6aa_core_ops = {
+ .s_power = s5k6aa_set_power,
+ .log_status = s5k6aa_log_status,
+};
+
+static const struct v4l2_subdev_ops s5k6aa_subdev_ops = {
+ .core = &s5k6aa_core_ops,
+ .pad = &s5k6aa_pad_ops,
+ .video = &s5k6aa_video_ops,
+};
+
+/*
+ * GPIO setup
+ */
+static int s5k6aa_configure_gpio(int nr, int val, const char *name)
+{
+ unsigned long flags = val ? GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW;
+ int ret;
+
+ if (!gpio_is_valid(nr))
+ return 0;
+ ret = gpio_request_one(nr, flags, name);
+ if (!ret)
+ gpio_export(nr, 0);
+ return ret;
+}
+
+static void s5k6aa_free_gpios(struct s5k6aa *s5k6aa)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(s5k6aa->gpio); i++) {
+ if (!gpio_is_valid(s5k6aa->gpio[i].gpio))
+ continue;
+ gpio_free(s5k6aa->gpio[i].gpio);
+ s5k6aa->gpio[i].gpio = -EINVAL;
+ }
+}
+
+static int s5k6aa_configure_gpios(struct s5k6aa *s5k6aa,
+ const struct s5k6aa_platform_data *pdata)
+{
+ const struct s5k6aa_gpio *gpio = &pdata->gpio_stby;
+ int ret;
+
+ s5k6aa->gpio[STBY].gpio = -EINVAL;
+ s5k6aa->gpio[RST].gpio = -EINVAL;
+
+ ret = s5k6aa_configure_gpio(gpio->gpio, gpio->level, "S5K6AA_STBY");
+ if (ret) {
+ s5k6aa_free_gpios(s5k6aa);
+ return ret;
+ }
+ s5k6aa->gpio[STBY] = *gpio;
+ if (gpio_is_valid(gpio->gpio))
+ gpio_set_value(gpio->gpio, 0);
+
+ gpio = &pdata->gpio_reset;
+ ret = s5k6aa_configure_gpio(gpio->gpio, gpio->level, "S5K6AA_RST");
+ if (ret) {
+ s5k6aa_free_gpios(s5k6aa);
+ return ret;
+ }
+ s5k6aa->gpio[RST] = *gpio;
+ if (gpio_is_valid(gpio->gpio))
+ gpio_set_value(gpio->gpio, 0);
+
+ return 0;
+}
+
+static int s5k6aa_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ const struct s5k6aa_platform_data *pdata = client->dev.platform_data;
+ struct v4l2_subdev *sd;
+ struct s5k6aa *s5k6aa;
+ int i, ret;
+
+ if (pdata == NULL) {
+ dev_err(&client->dev, "Platform data not specified\n");
+ return -EINVAL;
+ }
+
+ if (pdata->mclk_frequency == 0) {
+ dev_err(&client->dev, "MCLK frequency not specified\n");
+ return -EINVAL;
+ }
+
+ s5k6aa = kzalloc(sizeof(*s5k6aa), GFP_KERNEL);
+ if (!s5k6aa)
+ return -ENOMEM;
+
+ mutex_init(&s5k6aa->lock);
+
+ s5k6aa->mclk_frequency = pdata->mclk_frequency;
+ s5k6aa->bus_type = pdata->bus_type;
+ s5k6aa->mipi_lanes = pdata->nlanes;
+ s5k6aa->s_power = pdata->set_power;
+ s5k6aa->inv_hflip = pdata->horiz_flip;
+ s5k6aa->inv_vflip = pdata->vert_flip;
+
+ sd = &s5k6aa->sd;
+ strlcpy(sd->name, DRIVER_NAME, sizeof(sd->name));
+ v4l2_i2c_subdev_init(sd, client, &s5k6aa_subdev_ops);
+
+ sd->internal_ops = &s5k6aa_subdev_internal_ops;
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ s5k6aa->pad.flags = MEDIA_PAD_FL_SOURCE;
+ sd->entity.type = MEDIA_ENT_T_V4L2_SUBDEV_SENSOR;
+ ret = media_entity_init(&sd->entity, 1, &s5k6aa->pad, 0);
+ if (ret)
+ goto out_err1;
+
+ ret = s5k6aa_configure_gpios(s5k6aa, pdata);
+ if (ret)
+ goto out_err2;
+
+ for (i = 0; i < S5K6AA_NUM_SUPPLIES; i++)
+ s5k6aa->supplies[i].supply = s5k6aa_supply_names[i];
+
+ ret = regulator_bulk_get(&client->dev, S5K6AA_NUM_SUPPLIES,
+ s5k6aa->supplies);
+ if (ret) {
+ dev_err(&client->dev, "Failed to get regulators\n");
+ goto out_err3;
+ }
+
+ ret = s5k6aa_initialize_ctrls(s5k6aa);
+ if (ret)
+ goto out_err4;
+
+ s5k6aa_presets_data_init(s5k6aa);
+
+ s5k6aa->ccd_rect.width = S5K6AA_WIN_WIDTH_MAX;
+ s5k6aa->ccd_rect.height = S5K6AA_WIN_HEIGHT_MAX;
+ s5k6aa->ccd_rect.left = 0;
+ s5k6aa->ccd_rect.top = 0;
+
+ return 0;
+
+out_err4:
+ regulator_bulk_free(S5K6AA_NUM_SUPPLIES, s5k6aa->supplies);
+out_err3:
+ s5k6aa_free_gpios(s5k6aa);
+out_err2:
+ media_entity_cleanup(&s5k6aa->sd.entity);
+out_err1:
+ kfree(s5k6aa);
+ return ret;
+}
+
+static int s5k6aa_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct s5k6aa *s5k6aa = to_s5k6aa(sd);
+
+ v4l2_device_unregister_subdev(sd);
+ v4l2_ctrl_handler_free(sd->ctrl_handler);
+ media_entity_cleanup(&sd->entity);
+ regulator_bulk_free(S5K6AA_NUM_SUPPLIES, s5k6aa->supplies);
+ s5k6aa_free_gpios(s5k6aa);
+ kfree(s5k6aa);
+
+ return 0;
+}
+
+static const struct i2c_device_id s5k6aa_id[] = {
+ { DRIVER_NAME, 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, s5k6aa_id);
+
+
+static struct i2c_driver s5k6aa_i2c_driver = {
+ .driver = {
+ .name = DRIVER_NAME
+ },
+ .probe = s5k6aa_probe,
+ .remove = s5k6aa_remove,
+ .id_table = s5k6aa_id,
+};
+
+static int __init s5k6aa_init(void)
+{
+ return i2c_add_driver(&s5k6aa_i2c_driver);
+}
+
+static void __exit s5k6aa_exit(void)
+{
+ i2c_del_driver(&s5k6aa_i2c_driver);
+}
+
+module_init(s5k6aa_init);
+module_exit(s5k6aa_exit);
+
+MODULE_DESCRIPTION("Samsung S5K6AA(FX) SXGA camera driver");
+MODULE_AUTHOR("Sylwester Nawrocki <s.nawrocki@samsung.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/video/s5p-fimc/Makefile b/drivers/media/video/s5p-fimc/Makefile
index df6954ab1d9..33dec7f890e 100644
--- a/drivers/media/video/s5p-fimc/Makefile
+++ b/drivers/media/video/s5p-fimc/Makefile
@@ -1,4 +1,4 @@
-s5p-fimc-objs := fimc-core.o fimc-reg.o fimc-capture.o
+s5p-fimc-objs := fimc-core.o fimc-reg.o fimc-capture.o fimc-mdevice.o
s5p-csis-objs := mipi-csis.o
obj-$(CONFIG_VIDEO_S5P_MIPI_CSIS) += s5p-csis.o
diff --git a/drivers/media/video/s5p-fimc/fimc-capture.c b/drivers/media/video/s5p-fimc/fimc-capture.c
index 0d730e55605..c8d91b0cd9b 100644
--- a/drivers/media/video/s5p-fimc/fimc-capture.c
+++ b/drivers/media/video/s5p-fimc/fimc-capture.c
@@ -16,11 +16,9 @@
#include <linux/bug.h>
#include <linux/interrupt.h>
#include <linux/device.h>
-#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/list.h>
#include <linux/slab.h>
-#include <linux/clk.h>
-#include <linux/i2c.h>
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
@@ -29,135 +27,88 @@
#include <media/videobuf2-core.h>
#include <media/videobuf2-dma-contig.h>
+#include "fimc-mdevice.h"
#include "fimc-core.h"
-static struct v4l2_subdev *fimc_subdev_register(struct fimc_dev *fimc,
- struct s5p_fimc_isp_info *isp_info)
+static int fimc_init_capture(struct fimc_dev *fimc)
{
- struct i2c_adapter *i2c_adap;
- struct fimc_vid_cap *vid_cap = &fimc->vid_cap;
- struct v4l2_subdev *sd = NULL;
-
- i2c_adap = i2c_get_adapter(isp_info->i2c_bus_num);
- if (!i2c_adap)
- return ERR_PTR(-ENOMEM);
-
- sd = v4l2_i2c_new_subdev_board(&vid_cap->v4l2_dev, i2c_adap,
- isp_info->board_info, NULL);
- if (!sd) {
- v4l2_err(&vid_cap->v4l2_dev, "failed to acquire subdev\n");
- return NULL;
- }
+ struct fimc_ctx *ctx = fimc->vid_cap.ctx;
+ struct fimc_sensor_info *sensor;
+ unsigned long flags;
+ int ret = 0;
- v4l2_info(&vid_cap->v4l2_dev, "subdevice %s registered successfuly\n",
- isp_info->board_info->type);
+ if (fimc->pipeline.sensor == NULL || ctx == NULL)
+ return -ENXIO;
+ if (ctx->s_frame.fmt == NULL)
+ return -EINVAL;
- return sd;
-}
+ sensor = v4l2_get_subdev_hostdata(fimc->pipeline.sensor);
-static void fimc_subdev_unregister(struct fimc_dev *fimc)
-{
- struct fimc_vid_cap *vid_cap = &fimc->vid_cap;
- struct i2c_client *client;
+ spin_lock_irqsave(&fimc->slock, flags);
+ fimc_prepare_dma_offset(ctx, &ctx->d_frame);
+ fimc_set_yuv_order(ctx);
- if (vid_cap->input_index < 0)
- return; /* Subdevice already released or not registered. */
+ fimc_hw_set_camera_polarity(fimc, sensor->pdata);
+ fimc_hw_set_camera_type(fimc, sensor->pdata);
+ fimc_hw_set_camera_source(fimc, sensor->pdata);
+ fimc_hw_set_camera_offset(fimc, &ctx->s_frame);
- if (vid_cap->sd) {
- v4l2_device_unregister_subdev(vid_cap->sd);
- client = v4l2_get_subdevdata(vid_cap->sd);
- i2c_unregister_device(client);
- i2c_put_adapter(client->adapter);
- vid_cap->sd = NULL;
+ ret = fimc_set_scaler_info(ctx);
+ if (!ret) {
+ fimc_hw_set_input_path(ctx);
+ fimc_hw_set_prescaler(ctx);
+ fimc_hw_set_mainscaler(ctx);
+ fimc_hw_set_target_format(ctx);
+ fimc_hw_set_rotation(ctx);
+ fimc_hw_set_effect(ctx, false);
+ fimc_hw_set_output_path(ctx);
+ fimc_hw_set_out_dma(ctx);
+ clear_bit(ST_CAPT_APPLY_CFG, &fimc->state);
}
-
- vid_cap->input_index = -1;
+ spin_unlock_irqrestore(&fimc->slock, flags);
+ return ret;
}
-/**
- * fimc_subdev_attach - attach v4l2_subdev to camera host interface
- *
- * @fimc: FIMC device information
- * @index: index to the array of available subdevices,
- * -1 for full array search or non negative value
- * to select specific subdevice
- */
-static int fimc_subdev_attach(struct fimc_dev *fimc, int index)
+static int fimc_capture_state_cleanup(struct fimc_dev *fimc, bool suspend)
{
- struct fimc_vid_cap *vid_cap = &fimc->vid_cap;
- struct s5p_platform_fimc *pdata = fimc->pdata;
- struct s5p_fimc_isp_info *isp_info;
- struct v4l2_subdev *sd;
- int i;
-
- for (i = 0; i < pdata->num_clients; ++i) {
- isp_info = &pdata->isp_info[i];
+ struct fimc_vid_cap *cap = &fimc->vid_cap;
+ struct fimc_vid_buffer *buf;
+ unsigned long flags;
+ bool streaming;
- if (index >= 0 && i != index)
- continue;
+ spin_lock_irqsave(&fimc->slock, flags);
+ streaming = fimc->state & (1 << ST_CAPT_ISP_STREAM);
- sd = fimc_subdev_register(fimc, isp_info);
- if (!IS_ERR_OR_NULL(sd)) {
- vid_cap->sd = sd;
- vid_cap->input_index = i;
+ fimc->state &= ~(1 << ST_CAPT_RUN | 1 << ST_CAPT_SHUT |
+ 1 << ST_CAPT_STREAM | 1 << ST_CAPT_ISP_STREAM);
+ if (!suspend)
+ fimc->state &= ~(1 << ST_CAPT_PEND | 1 << ST_CAPT_SUSPENDED);
- return 0;
- }
+ /* Release unused buffers */
+ while (!suspend && !list_empty(&cap->pending_buf_q)) {
+ buf = fimc_pending_queue_pop(cap);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
}
+ /* If suspending put unused buffers onto pending queue */
+ while (!list_empty(&cap->active_buf_q)) {
+ buf = fimc_active_queue_pop(cap);
+ if (suspend)
+ fimc_pending_queue_add(cap, buf);
+ else
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ }
+ set_bit(ST_CAPT_SUSPENDED, &fimc->state);
+ spin_unlock_irqrestore(&fimc->slock, flags);
- vid_cap->input_index = -1;
- vid_cap->sd = NULL;
- v4l2_err(&vid_cap->v4l2_dev, "fimc%d: sensor attach failed\n",
- fimc->id);
- return -ENODEV;
-}
-
-static int fimc_isp_subdev_init(struct fimc_dev *fimc, unsigned int index)
-{
- struct s5p_fimc_isp_info *isp_info;
- struct s5p_platform_fimc *pdata = fimc->pdata;
- int ret;
-
- if (index >= pdata->num_clients)
- return -EINVAL;
-
- isp_info = &pdata->isp_info[index];
-
- if (isp_info->clk_frequency)
- clk_set_rate(fimc->clock[CLK_CAM], isp_info->clk_frequency);
-
- ret = clk_enable(fimc->clock[CLK_CAM]);
- if (ret)
- return ret;
-
- ret = fimc_subdev_attach(fimc, index);
- if (ret)
- return ret;
-
- ret = fimc_hw_set_camera_polarity(fimc, isp_info);
- if (ret)
- return ret;
-
- ret = v4l2_subdev_call(fimc->vid_cap.sd, core, s_power, 1);
- if (!ret)
- return ret;
-
- /* enabling power failed so unregister subdev */
- fimc_subdev_unregister(fimc);
-
- v4l2_err(&fimc->vid_cap.v4l2_dev, "ISP initialization failed: %d\n",
- ret);
-
- return ret;
+ if (streaming)
+ return fimc_pipeline_s_stream(fimc, 0);
+ else
+ return 0;
}
-static int fimc_stop_capture(struct fimc_dev *fimc)
+static int fimc_stop_capture(struct fimc_dev *fimc, bool suspend)
{
unsigned long flags;
- struct fimc_vid_cap *cap;
- struct fimc_vid_buffer *buf;
-
- cap = &fimc->vid_cap;
if (!fimc_capture_active(fimc))
return 0;
@@ -169,81 +120,73 @@ static int fimc_stop_capture(struct fimc_dev *fimc)
wait_event_timeout(fimc->irq_queue,
!test_bit(ST_CAPT_SHUT, &fimc->state),
- FIMC_SHUTDOWN_TIMEOUT);
-
- v4l2_subdev_call(cap->sd, video, s_stream, 0);
+ (2*HZ/10)); /* 200 ms */
- spin_lock_irqsave(&fimc->slock, flags);
- fimc->state &= ~(1 << ST_CAPT_RUN | 1 << ST_CAPT_PEND |
- 1 << ST_CAPT_SHUT | 1 << ST_CAPT_STREAM);
+ return fimc_capture_state_cleanup(fimc, suspend);
+}
- fimc->vid_cap.active_buf_cnt = 0;
+/**
+ * fimc_capture_config_update - apply the camera interface configuration
+ *
+ * To be called from within the interrupt handler with fimc.slock
+ * spinlock held. It updates the camera pixel crop, rotation and
+ * image flip in H/W.
+ */
+int fimc_capture_config_update(struct fimc_ctx *ctx)
+{
+ struct fimc_dev *fimc = ctx->fimc_dev;
+ int ret;
- /* Release buffers that were enqueued in the driver by videobuf2. */
- while (!list_empty(&cap->pending_buf_q)) {
- buf = pending_queue_pop(cap);
- vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
- }
+ if (test_bit(ST_CAPT_APPLY_CFG, &fimc->state))
+ return 0;
- while (!list_empty(&cap->active_buf_q)) {
- buf = active_queue_pop(cap);
- vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ spin_lock(&ctx->slock);
+ fimc_hw_set_camera_offset(fimc, &ctx->s_frame);
+ ret = fimc_set_scaler_info(ctx);
+ if (ret == 0) {
+ fimc_hw_set_prescaler(ctx);
+ fimc_hw_set_mainscaler(ctx);
+ fimc_hw_set_target_format(ctx);
+ fimc_hw_set_rotation(ctx);
+ fimc_prepare_dma_offset(ctx, &ctx->d_frame);
+ fimc_hw_set_out_dma(ctx);
+ set_bit(ST_CAPT_APPLY_CFG, &fimc->state);
}
-
- spin_unlock_irqrestore(&fimc->slock, flags);
-
- dbg("state: 0x%lx", fimc->state);
- return 0;
+ spin_unlock(&ctx->slock);
+ return ret;
}
-static int start_streaming(struct vb2_queue *q)
+static int start_streaming(struct vb2_queue *q, unsigned int count)
{
struct fimc_ctx *ctx = q->drv_priv;
struct fimc_dev *fimc = ctx->fimc_dev;
- struct s5p_fimc_isp_info *isp_info;
+ struct fimc_vid_cap *vid_cap = &fimc->vid_cap;
+ int min_bufs;
int ret;
fimc_hw_reset(fimc);
+ vid_cap->frame_count = 0;
- ret = v4l2_subdev_call(fimc->vid_cap.sd, video, s_stream, 1);
- if (ret && ret != -ENOIOCTLCMD)
- return ret;
-
- ret = fimc_prepare_config(ctx, ctx->state);
+ ret = fimc_init_capture(fimc);
if (ret)
- return ret;
-
- isp_info = &fimc->pdata->isp_info[fimc->vid_cap.input_index];
- fimc_hw_set_camera_type(fimc, isp_info);
- fimc_hw_set_camera_source(fimc, isp_info);
- fimc_hw_set_camera_offset(fimc, &ctx->s_frame);
+ goto error;
- if (ctx->state & FIMC_PARAMS) {
- ret = fimc_set_scaler_info(ctx);
- if (ret) {
- err("Scaler setup error");
- return ret;
- }
- fimc_hw_set_input_path(ctx);
- fimc_hw_set_prescaler(ctx);
- fimc_hw_set_mainscaler(ctx);
- fimc_hw_set_target_format(ctx);
- fimc_hw_set_rotation(ctx);
- fimc_hw_set_effect(ctx);
- }
+ set_bit(ST_CAPT_PEND, &fimc->state);
- fimc_hw_set_output_path(ctx);
- fimc_hw_set_out_dma(ctx);
+ min_bufs = fimc->vid_cap.reqbufs_count > 1 ? 2 : 1;
- INIT_LIST_HEAD(&fimc->vid_cap.pending_buf_q);
- INIT_LIST_HEAD(&fimc->vid_cap.active_buf_q);
- fimc->vid_cap.active_buf_cnt = 0;
- fimc->vid_cap.frame_count = 0;
- fimc->vid_cap.buf_index = 0;
+ if (vid_cap->active_buf_cnt >= min_bufs &&
+ !test_and_set_bit(ST_CAPT_STREAM, &fimc->state)) {
+ fimc_activate_capture(ctx);
- set_bit(ST_CAPT_PEND, &fimc->state);
+ if (!test_and_set_bit(ST_CAPT_ISP_STREAM, &fimc->state))
+ fimc_pipeline_s_stream(fimc, 1);
+ }
return 0;
+error:
+ fimc_capture_state_cleanup(fimc, false);
+ return ret;
}
static int stop_streaming(struct vb2_queue *q)
@@ -254,7 +197,46 @@ static int stop_streaming(struct vb2_queue *q)
if (!fimc_capture_active(fimc))
return -EINVAL;
- return fimc_stop_capture(fimc);
+ return fimc_stop_capture(fimc, false);
+}
+
+int fimc_capture_suspend(struct fimc_dev *fimc)
+{
+ bool suspend = fimc_capture_busy(fimc);
+
+ int ret = fimc_stop_capture(fimc, suspend);
+ if (ret)
+ return ret;
+ return fimc_pipeline_shutdown(fimc);
+}
+
+static void buffer_queue(struct vb2_buffer *vb);
+
+int fimc_capture_resume(struct fimc_dev *fimc)
+{
+ struct fimc_vid_cap *vid_cap = &fimc->vid_cap;
+ struct fimc_vid_buffer *buf;
+ int i;
+
+ if (!test_and_clear_bit(ST_CAPT_SUSPENDED, &fimc->state))
+ return 0;
+
+ INIT_LIST_HEAD(&fimc->vid_cap.active_buf_q);
+ vid_cap->buf_index = 0;
+ fimc_pipeline_initialize(fimc, &fimc->vid_cap.vfd->entity,
+ false);
+ fimc_init_capture(fimc);
+
+ clear_bit(ST_CAPT_SUSPENDED, &fimc->state);
+
+ for (i = 0; i < vid_cap->reqbufs_count; i++) {
+ if (list_empty(&vid_cap->pending_buf_q))
+ break;
+ buf = fimc_pending_queue_pop(vid_cap);
+ buffer_queue(&buf->vb);
+ }
+ return 0;
+
}
static unsigned int get_plane_size(struct fimc_frame *fr, unsigned int plane)
@@ -264,9 +246,9 @@ static unsigned int get_plane_size(struct fimc_frame *fr, unsigned int plane)
return fr->f_width * fr->f_height * fr->fmt->depth[plane] / 8;
}
-static int queue_setup(struct vb2_queue *vq, unsigned int *num_buffers,
- unsigned int *num_planes, unsigned long sizes[],
- void *allocators[])
+static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *pfmt,
+ unsigned int *num_buffers, unsigned int *num_planes,
+ unsigned int sizes[], void *allocators[])
{
struct fimc_ctx *ctx = vq->drv_priv;
struct fimc_fmt *fmt = ctx->d_frame.fmt;
@@ -289,21 +271,20 @@ static int buffer_prepare(struct vb2_buffer *vb)
{
struct vb2_queue *vq = vb->vb2_queue;
struct fimc_ctx *ctx = vq->drv_priv;
- struct v4l2_device *v4l2_dev = &ctx->fimc_dev->m2m.v4l2_dev;
int i;
- if (!ctx->d_frame.fmt || vq->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ if (ctx->d_frame.fmt == NULL)
return -EINVAL;
for (i = 0; i < ctx->d_frame.fmt->memplanes; i++) {
- unsigned long size = get_plane_size(&ctx->d_frame, i);
+ unsigned long size = ctx->d_frame.payload[i];
if (vb2_plane_size(vb, i) < size) {
- v4l2_err(v4l2_dev, "User buffer too small (%ld < %ld)\n",
+ v4l2_err(ctx->fimc_dev->vid_cap.vfd,
+ "User buffer too small (%ld < %ld)\n",
vb2_plane_size(vb, i), size);
return -EINVAL;
}
-
vb2_set_plane_payload(vb, i, size);
}
@@ -312,10 +293,10 @@ static int buffer_prepare(struct vb2_buffer *vb)
static void buffer_queue(struct vb2_buffer *vb)
{
- struct fimc_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
- struct fimc_dev *fimc = ctx->fimc_dev;
struct fimc_vid_buffer *buf
= container_of(vb, struct fimc_vid_buffer, vb);
+ struct fimc_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct fimc_dev *fimc = ctx->fimc_dev;
struct fimc_vid_cap *vid_cap = &fimc->vid_cap;
unsigned long flags;
int min_bufs;
@@ -323,15 +304,16 @@ static void buffer_queue(struct vb2_buffer *vb)
spin_lock_irqsave(&fimc->slock, flags);
fimc_prepare_addr(ctx, &buf->vb, &ctx->d_frame, &buf->paddr);
- if (!test_bit(ST_CAPT_STREAM, &fimc->state)
- && vid_cap->active_buf_cnt < FIMC_MAX_OUT_BUFS) {
+ if (!test_bit(ST_CAPT_SUSPENDED, &fimc->state) &&
+ !test_bit(ST_CAPT_STREAM, &fimc->state) &&
+ vid_cap->active_buf_cnt < FIMC_MAX_OUT_BUFS) {
/* Setup the buffer directly for processing. */
int buf_id = (vid_cap->reqbufs_count == 1) ? -1 :
vid_cap->buf_index;
fimc_hw_set_output_addr(fimc, &buf->paddr, buf_id);
buf->index = vid_cap->buf_index;
- active_queue_add(vid_cap, buf);
+ fimc_active_queue_add(vid_cap, buf);
if (++vid_cap->buf_index >= FIMC_MAX_OUT_BUFS)
vid_cap->buf_index = 0;
@@ -341,10 +323,17 @@ static void buffer_queue(struct vb2_buffer *vb)
min_bufs = vid_cap->reqbufs_count > 1 ? 2 : 1;
- if (vid_cap->active_buf_cnt >= min_bufs &&
- !test_and_set_bit(ST_CAPT_STREAM, &fimc->state))
+
+ if (vb2_is_streaming(&vid_cap->vbq) &&
+ vid_cap->active_buf_cnt >= min_bufs &&
+ !test_and_set_bit(ST_CAPT_STREAM, &fimc->state)) {
fimc_activate_capture(ctx);
+ spin_unlock_irqrestore(&fimc->slock, flags);
+ if (!test_and_set_bit(ST_CAPT_ISP_STREAM, &fimc->state))
+ fimc_pipeline_s_stream(fimc, 1);
+ return;
+ }
spin_unlock_irqrestore(&fimc->slock, flags);
}
@@ -370,10 +359,40 @@ static struct vb2_ops fimc_capture_qops = {
.stop_streaming = stop_streaming,
};
+/**
+ * fimc_capture_ctrls_create - initialize the control handler
+ * Initialize the capture video node control handler and fill it
+ * with the FIMC controls. Inherit any sensor's controls if the
+ * 'user_subdev_api' flag is false (default behaviour).
+ * This function need to be called with the graph mutex held.
+ */
+int fimc_capture_ctrls_create(struct fimc_dev *fimc)
+{
+ struct fimc_vid_cap *vid_cap = &fimc->vid_cap;
+ int ret;
+
+ if (WARN_ON(vid_cap->ctx == NULL))
+ return -ENXIO;
+ if (vid_cap->ctx->ctrls_rdy)
+ return 0;
+
+ ret = fimc_ctrls_create(vid_cap->ctx);
+ if (ret || vid_cap->user_subdev_api)
+ return ret;
+
+ return v4l2_ctrl_add_handler(&vid_cap->ctx->ctrl_handler,
+ fimc->pipeline.sensor->ctrl_handler);
+}
+
+static int fimc_capture_set_default_format(struct fimc_dev *fimc);
+
static int fimc_capture_open(struct file *file)
{
struct fimc_dev *fimc = video_drvdata(file);
- int ret = 0;
+ int ret = v4l2_fh_open(file);
+
+ if (ret)
+ return ret;
dbg("pid: %d, state: 0x%lx", task_pid_nr(current), fimc->state);
@@ -381,17 +400,27 @@ static int fimc_capture_open(struct file *file)
if (fimc_m2m_active(fimc))
return -EBUSY;
+ set_bit(ST_CAPT_BUSY, &fimc->state);
+ pm_runtime_get_sync(&fimc->pdev->dev);
+
if (++fimc->vid_cap.refcnt == 1) {
- ret = fimc_isp_subdev_init(fimc, 0);
- if (ret) {
+ ret = fimc_pipeline_initialize(fimc,
+ &fimc->vid_cap.vfd->entity, true);
+ if (ret < 0) {
+ dev_err(&fimc->pdev->dev,
+ "Video pipeline initialization failed\n");
+ pm_runtime_put_sync(&fimc->pdev->dev);
fimc->vid_cap.refcnt--;
- return -EIO;
+ v4l2_fh_release(file);
+ clear_bit(ST_CAPT_BUSY, &fimc->state);
+ return ret;
}
- }
-
- file->private_data = fimc->vid_cap.ctx;
+ ret = fimc_capture_ctrls_create(fimc);
- return 0;
+ if (!ret && !fimc->vid_cap.user_subdev_api)
+ ret = fimc_capture_set_default_format(fimc);
+ }
+ return ret;
}
static int fimc_capture_close(struct file *file)
@@ -401,37 +430,36 @@ static int fimc_capture_close(struct file *file)
dbg("pid: %d, state: 0x%lx", task_pid_nr(current), fimc->state);
if (--fimc->vid_cap.refcnt == 0) {
- fimc_stop_capture(fimc);
- vb2_queue_release(&fimc->vid_cap.vbq);
+ clear_bit(ST_CAPT_BUSY, &fimc->state);
+ fimc_stop_capture(fimc, false);
+ fimc_pipeline_shutdown(fimc);
+ clear_bit(ST_CAPT_SUSPENDED, &fimc->state);
+ }
- v4l2_err(&fimc->vid_cap.v4l2_dev, "releasing ISP\n");
+ pm_runtime_put(&fimc->pdev->dev);
- v4l2_subdev_call(fimc->vid_cap.sd, core, s_power, 0);
- clk_disable(fimc->clock[CLK_CAM]);
- fimc_subdev_unregister(fimc);
+ if (fimc->vid_cap.refcnt == 0) {
+ vb2_queue_release(&fimc->vid_cap.vbq);
+ fimc_ctrls_delete(fimc->vid_cap.ctx);
}
-
- return 0;
+ return v4l2_fh_release(file);
}
static unsigned int fimc_capture_poll(struct file *file,
struct poll_table_struct *wait)
{
- struct fimc_ctx *ctx = file->private_data;
- struct fimc_dev *fimc = ctx->fimc_dev;
+ struct fimc_dev *fimc = video_drvdata(file);
return vb2_poll(&fimc->vid_cap.vbq, file, wait);
}
static int fimc_capture_mmap(struct file *file, struct vm_area_struct *vma)
{
- struct fimc_ctx *ctx = file->private_data;
- struct fimc_dev *fimc = ctx->fimc_dev;
+ struct fimc_dev *fimc = video_drvdata(file);
return vb2_mmap(&fimc->vid_cap.vbq, vma);
}
-/* video device file operations */
static const struct v4l2_file_operations fimc_capture_fops = {
.owner = THIS_MODULE,
.open = fimc_capture_open,
@@ -441,263 +469,553 @@ static const struct v4l2_file_operations fimc_capture_fops = {
.mmap = fimc_capture_mmap,
};
+/*
+ * Format and crop negotiation helpers
+ */
+
+static struct fimc_fmt *fimc_capture_try_format(struct fimc_ctx *ctx,
+ u32 *width, u32 *height,
+ u32 *code, u32 *fourcc, int pad)
+{
+ bool rotation = ctx->rotation == 90 || ctx->rotation == 270;
+ struct fimc_dev *fimc = ctx->fimc_dev;
+ struct samsung_fimc_variant *var = fimc->variant;
+ struct fimc_pix_limit *pl = var->pix_limit;
+ struct fimc_frame *dst = &ctx->d_frame;
+ u32 depth, min_w, max_w, min_h, align_h = 3;
+ u32 mask = FMT_FLAGS_CAM;
+ struct fimc_fmt *ffmt;
+
+ /* Color conversion from/to JPEG is not supported */
+ if (code && ctx->s_frame.fmt && pad == FIMC_SD_PAD_SOURCE &&
+ fimc_fmt_is_jpeg(ctx->s_frame.fmt->color))
+ *code = V4L2_MBUS_FMT_JPEG_1X8;
+
+ if (fourcc && *fourcc != V4L2_PIX_FMT_JPEG && pad != FIMC_SD_PAD_SINK)
+ mask |= FMT_FLAGS_M2M;
+
+ ffmt = fimc_find_format(fourcc, code, mask, 0);
+ if (WARN_ON(!ffmt))
+ return NULL;
+ if (code)
+ *code = ffmt->mbus_code;
+ if (fourcc)
+ *fourcc = ffmt->fourcc;
+
+ if (pad == FIMC_SD_PAD_SINK) {
+ max_w = fimc_fmt_is_jpeg(ffmt->color) ?
+ pl->scaler_dis_w : pl->scaler_en_w;
+ /* Apply the camera input interface pixel constraints */
+ v4l_bound_align_image(width, max_t(u32, *width, 32), max_w, 4,
+ height, max_t(u32, *height, 32),
+ FIMC_CAMIF_MAX_HEIGHT,
+ fimc_fmt_is_jpeg(ffmt->color) ? 3 : 1,
+ 0);
+ return ffmt;
+ }
+ /* Can't scale or crop in transparent (JPEG) transfer mode */
+ if (fimc_fmt_is_jpeg(ffmt->color)) {
+ *width = ctx->s_frame.f_width;
+ *height = ctx->s_frame.f_height;
+ return ffmt;
+ }
+ /* Apply the scaler and the output DMA constraints */
+ max_w = rotation ? pl->out_rot_en_w : pl->out_rot_dis_w;
+ min_w = ctx->state & FIMC_DST_CROP ? dst->width : var->min_out_pixsize;
+ min_h = ctx->state & FIMC_DST_CROP ? dst->height : var->min_out_pixsize;
+ if (fimc->id == 1 && var->pix_hoff)
+ align_h = fimc_fmt_is_rgb(ffmt->color) ? 0 : 1;
+
+ depth = fimc_get_format_depth(ffmt);
+ v4l_bound_align_image(width, min_w, max_w,
+ ffs(var->min_out_pixsize) - 1,
+ height, min_h, FIMC_CAMIF_MAX_HEIGHT,
+ align_h,
+ 64/(ALIGN(depth, 8)));
+
+ dbg("pad%d: code: 0x%x, %dx%d. dst fmt: %dx%d",
+ pad, code ? *code : 0, *width, *height,
+ dst->f_width, dst->f_height);
+
+ return ffmt;
+}
+
+static void fimc_capture_try_crop(struct fimc_ctx *ctx, struct v4l2_rect *r,
+ int pad)
+{
+ bool rotate = ctx->rotation == 90 || ctx->rotation == 270;
+ struct fimc_dev *fimc = ctx->fimc_dev;
+ struct samsung_fimc_variant *var = fimc->variant;
+ struct fimc_pix_limit *pl = var->pix_limit;
+ struct fimc_frame *sink = &ctx->s_frame;
+ u32 max_w, max_h, min_w = 0, min_h = 0, min_sz;
+ u32 align_sz = 0, align_h = 4;
+ u32 max_sc_h, max_sc_v;
+
+ /* In JPEG transparent transfer mode cropping is not supported */
+ if (fimc_fmt_is_jpeg(ctx->d_frame.fmt->color)) {
+ r->width = sink->f_width;
+ r->height = sink->f_height;
+ r->left = r->top = 0;
+ return;
+ }
+ if (pad == FIMC_SD_PAD_SOURCE) {
+ if (ctx->rotation != 90 && ctx->rotation != 270)
+ align_h = 1;
+ max_sc_h = min(SCALER_MAX_HRATIO, 1 << (ffs(sink->width) - 3));
+ max_sc_v = min(SCALER_MAX_VRATIO, 1 << (ffs(sink->height) - 1));
+ min_sz = var->min_out_pixsize;
+ } else {
+ u32 depth = fimc_get_format_depth(sink->fmt);
+ align_sz = 64/ALIGN(depth, 8);
+ min_sz = var->min_inp_pixsize;
+ min_w = min_h = min_sz;
+ max_sc_h = max_sc_v = 1;
+ }
+ /*
+ * For the crop rectangle at source pad the following constraints
+ * must be met:
+ * - it must fit in the sink pad format rectangle (f_width/f_height);
+ * - maximum downscaling ratio is 64;
+ * - maximum crop size depends if the rotator is used or not;
+ * - the sink pad format width/height must be 4 multiple of the
+ * prescaler ratios determined by sink pad size and source pad crop,
+ * the prescaler ratio is returned by fimc_get_scaler_factor().
+ */
+ max_w = min_t(u32,
+ rotate ? pl->out_rot_en_w : pl->out_rot_dis_w,
+ rotate ? sink->f_height : sink->f_width);
+ max_h = min_t(u32, FIMC_CAMIF_MAX_HEIGHT, sink->f_height);
+ if (pad == FIMC_SD_PAD_SOURCE) {
+ min_w = min_t(u32, max_w, sink->f_width / max_sc_h);
+ min_h = min_t(u32, max_h, sink->f_height / max_sc_v);
+ if (rotate) {
+ swap(max_sc_h, max_sc_v);
+ swap(min_w, min_h);
+ }
+ }
+ v4l_bound_align_image(&r->width, min_w, max_w, ffs(min_sz) - 1,
+ &r->height, min_h, max_h, align_h,
+ align_sz);
+ /* Adjust left/top if cropping rectangle is out of bounds */
+ r->left = clamp_t(u32, r->left, 0, sink->f_width - r->width);
+ r->top = clamp_t(u32, r->top, 0, sink->f_height - r->height);
+ r->left = round_down(r->left, var->hor_offs_align);
+
+ dbg("pad%d: (%d,%d)/%dx%d, sink fmt: %dx%d",
+ pad, r->left, r->top, r->width, r->height,
+ sink->f_width, sink->f_height);
+}
+
+/*
+ * The video node ioctl operations
+ */
static int fimc_vidioc_querycap_capture(struct file *file, void *priv,
struct v4l2_capability *cap)
{
- struct fimc_ctx *ctx = file->private_data;
- struct fimc_dev *fimc = ctx->fimc_dev;
+ struct fimc_dev *fimc = video_drvdata(file);
strncpy(cap->driver, fimc->pdev->name, sizeof(cap->driver) - 1);
strncpy(cap->card, fimc->pdev->name, sizeof(cap->card) - 1);
cap->bus_info[0] = 0;
- cap->capabilities = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_CAPTURE |
- V4L2_CAP_VIDEO_CAPTURE_MPLANE;
+ cap->capabilities = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_CAPTURE_MPLANE;
return 0;
}
-/* Synchronize formats of the camera interface input and attached sensor. */
-static int sync_capture_fmt(struct fimc_ctx *ctx)
+static int fimc_cap_enum_fmt_mplane(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct fimc_fmt *fmt;
+
+ fmt = fimc_find_format(NULL, NULL, FMT_FLAGS_CAM | FMT_FLAGS_M2M,
+ f->index);
+ if (!fmt)
+ return -EINVAL;
+ strncpy(f->description, fmt->name, sizeof(f->description) - 1);
+ f->pixelformat = fmt->fourcc;
+ if (fmt->fourcc == V4L2_MBUS_FMT_JPEG_1X8)
+ f->flags |= V4L2_FMT_FLAG_COMPRESSED;
+ return 0;
+}
+
+/**
+ * fimc_pipeline_try_format - negotiate and/or set formats at pipeline
+ * elements
+ * @ctx: FIMC capture context
+ * @tfmt: media bus format to try/set on subdevs
+ * @fmt_id: fimc pixel format id corresponding to returned @tfmt (output)
+ * @set: true to set format on subdevs, false to try only
+ */
+static int fimc_pipeline_try_format(struct fimc_ctx *ctx,
+ struct v4l2_mbus_framefmt *tfmt,
+ struct fimc_fmt **fmt_id,
+ bool set)
{
- struct fimc_frame *frame = &ctx->s_frame;
struct fimc_dev *fimc = ctx->fimc_dev;
- struct v4l2_mbus_framefmt *fmt = &fimc->vid_cap.fmt;
- int ret;
+ struct v4l2_subdev *sd = fimc->pipeline.sensor;
+ struct v4l2_subdev *csis = fimc->pipeline.csis;
+ struct v4l2_subdev_format sfmt;
+ struct v4l2_mbus_framefmt *mf = &sfmt.format;
+ struct fimc_fmt *ffmt = NULL;
+ int ret, i = 0;
+
+ if (WARN_ON(!sd || !tfmt))
+ return -EINVAL;
- fmt->width = ctx->d_frame.o_width;
- fmt->height = ctx->d_frame.o_height;
+ memset(&sfmt, 0, sizeof(sfmt));
+ sfmt.format = *tfmt;
+
+ sfmt.which = set ? V4L2_SUBDEV_FORMAT_ACTIVE : V4L2_SUBDEV_FORMAT_TRY;
+ while (1) {
+ ffmt = fimc_find_format(NULL, mf->code != 0 ? &mf->code : NULL,
+ FMT_FLAGS_CAM, i++);
+ if (ffmt == NULL) {
+ /*
+ * Notify user-space if common pixel code for
+ * host and sensor does not exist.
+ */
+ return -EINVAL;
+ }
+ mf->code = tfmt->code = ffmt->mbus_code;
- ret = v4l2_subdev_call(fimc->vid_cap.sd, video, s_mbus_fmt, fmt);
- if (ret == -ENOIOCTLCMD) {
- err("s_mbus_fmt failed");
- return ret;
- }
- dbg("w: %d, h: %d, code= %d", fmt->width, fmt->height, fmt->code);
+ ret = v4l2_subdev_call(sd, pad, set_fmt, NULL, &sfmt);
+ if (ret)
+ return ret;
+ if (mf->code != tfmt->code) {
+ mf->code = 0;
+ continue;
+ }
+ if (mf->width != tfmt->width || mf->width != tfmt->width) {
+ u32 fcc = ffmt->fourcc;
+ tfmt->width = mf->width;
+ tfmt->height = mf->height;
+ ffmt = fimc_capture_try_format(ctx,
+ &tfmt->width, &tfmt->height,
+ NULL, &fcc, FIMC_SD_PAD_SOURCE);
+ if (ffmt && ffmt->mbus_code)
+ mf->code = ffmt->mbus_code;
+ if (mf->width != tfmt->width || mf->width != tfmt->width)
+ continue;
+ tfmt->code = mf->code;
+ }
+ if (csis)
+ ret = v4l2_subdev_call(csis, pad, set_fmt, NULL, &sfmt);
- frame->fmt = find_mbus_format(fmt, FMT_FLAGS_CAM);
- if (!frame->fmt) {
- err("fimc source format not found\n");
- return -EINVAL;
+ if (mf->code == tfmt->code &&
+ mf->width == tfmt->width && mf->width == tfmt->width)
+ break;
}
- frame->f_width = fmt->width;
- frame->f_height = fmt->height;
- frame->width = fmt->width;
- frame->height = fmt->height;
- frame->o_width = fmt->width;
- frame->o_height = fmt->height;
- frame->offs_h = 0;
- frame->offs_v = 0;
+ if (fmt_id && ffmt)
+ *fmt_id = ffmt;
+ *tfmt = *mf;
+ dbg("code: 0x%x, %dx%d, %p", mf->code, mf->width, mf->height, ffmt);
return 0;
}
-static int fimc_cap_s_fmt_mplane(struct file *file, void *priv,
+static int fimc_cap_g_fmt_mplane(struct file *file, void *fh,
struct v4l2_format *f)
{
- struct fimc_ctx *ctx = priv;
- struct fimc_dev *fimc = ctx->fimc_dev;
- struct fimc_frame *frame;
- struct v4l2_pix_format_mplane *pix;
- int ret;
- int i;
+ struct fimc_dev *fimc = video_drvdata(file);
+ struct fimc_ctx *ctx = fimc->vid_cap.ctx;
if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
return -EINVAL;
- ret = fimc_vidioc_try_fmt_mplane(file, priv, f);
- if (ret)
- return ret;
-
- if (vb2_is_busy(&fimc->vid_cap.vbq) || fimc_capture_active(fimc))
- return -EBUSY;
+ return fimc_fill_format(&ctx->d_frame, f);
+}
- frame = &ctx->d_frame;
+static int fimc_cap_try_fmt_mplane(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
+ struct fimc_dev *fimc = video_drvdata(file);
+ struct fimc_ctx *ctx = fimc->vid_cap.ctx;
+ struct v4l2_mbus_framefmt mf;
+ struct fimc_fmt *ffmt = NULL;
- pix = &f->fmt.pix_mp;
- frame->fmt = find_format(f, FMT_FLAGS_M2M | FMT_FLAGS_CAM);
- if (!frame->fmt) {
- err("fimc target format not found\n");
+ if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
return -EINVAL;
+
+ if (pix->pixelformat == V4L2_PIX_FMT_JPEG) {
+ fimc_capture_try_format(ctx, &pix->width, &pix->height,
+ NULL, &pix->pixelformat,
+ FIMC_SD_PAD_SINK);
+ ctx->s_frame.f_width = pix->width;
+ ctx->s_frame.f_height = pix->height;
}
+ ffmt = fimc_capture_try_format(ctx, &pix->width, &pix->height,
+ NULL, &pix->pixelformat,
+ FIMC_SD_PAD_SOURCE);
+ if (!ffmt)
+ return -EINVAL;
- for (i = 0; i < frame->fmt->colplanes; i++) {
- frame->payload[i] =
- (pix->width * pix->height * frame->fmt->depth[i]) >> 3;
+ if (!fimc->vid_cap.user_subdev_api) {
+ mf.width = pix->width;
+ mf.height = pix->height;
+ mf.code = ffmt->mbus_code;
+ fimc_md_graph_lock(fimc);
+ fimc_pipeline_try_format(ctx, &mf, &ffmt, false);
+ fimc_md_graph_unlock(fimc);
+
+ pix->width = mf.width;
+ pix->height = mf.height;
+ if (ffmt)
+ pix->pixelformat = ffmt->fourcc;
}
- /* Output DMA frame pixel size and offsets. */
- frame->f_width = pix->plane_fmt[0].bytesperline * 8
- / frame->fmt->depth[0];
- frame->f_height = pix->height;
- frame->width = pix->width;
- frame->height = pix->height;
- frame->o_width = pix->width;
- frame->o_height = pix->height;
- frame->offs_h = 0;
- frame->offs_v = 0;
+ fimc_adjust_mplane_format(ffmt, pix->width, pix->height, pix);
+ return 0;
+}
- ctx->state |= (FIMC_PARAMS | FIMC_DST_FMT);
+static void fimc_capture_mark_jpeg_xfer(struct fimc_ctx *ctx, bool jpeg)
+{
+ ctx->scaler.enabled = !jpeg;
+ fimc_ctrls_activate(ctx, !jpeg);
- ret = sync_capture_fmt(ctx);
- return ret;
+ if (jpeg)
+ set_bit(ST_CAPT_JPEG, &ctx->fimc_dev->state);
+ else
+ clear_bit(ST_CAPT_JPEG, &ctx->fimc_dev->state);
}
-static int fimc_cap_enum_input(struct file *file, void *priv,
- struct v4l2_input *i)
+static int fimc_capture_set_format(struct fimc_dev *fimc, struct v4l2_format *f)
{
- struct fimc_ctx *ctx = priv;
- struct s5p_platform_fimc *pldata = ctx->fimc_dev->pdata;
- struct s5p_fimc_isp_info *isp_info;
+ struct fimc_ctx *ctx = fimc->vid_cap.ctx;
+ struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
+ struct v4l2_mbus_framefmt *mf = &fimc->vid_cap.mf;
+ struct fimc_frame *ff = &ctx->d_frame;
+ struct fimc_fmt *s_fmt = NULL;
+ int ret, i;
- if (i->index >= pldata->num_clients)
+ if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
return -EINVAL;
+ if (vb2_is_busy(&fimc->vid_cap.vbq))
+ return -EBUSY;
- isp_info = &pldata->isp_info[i->index];
+ /* Pre-configure format at camera interface input, for JPEG only */
+ if (pix->pixelformat == V4L2_PIX_FMT_JPEG) {
+ fimc_capture_try_format(ctx, &pix->width, &pix->height,
+ NULL, &pix->pixelformat,
+ FIMC_SD_PAD_SINK);
+ ctx->s_frame.f_width = pix->width;
+ ctx->s_frame.f_height = pix->height;
+ }
+ /* Try the format at the scaler and the DMA output */
+ ff->fmt = fimc_capture_try_format(ctx, &pix->width, &pix->height,
+ NULL, &pix->pixelformat,
+ FIMC_SD_PAD_SOURCE);
+ if (!ff->fmt)
+ return -EINVAL;
+ /* Try to match format at the host and the sensor */
+ if (!fimc->vid_cap.user_subdev_api) {
+ mf->code = ff->fmt->mbus_code;
+ mf->width = pix->width;
+ mf->height = pix->height;
+
+ fimc_md_graph_lock(fimc);
+ ret = fimc_pipeline_try_format(ctx, mf, &s_fmt, true);
+ fimc_md_graph_unlock(fimc);
+ if (ret)
+ return ret;
+ pix->width = mf->width;
+ pix->height = mf->height;
+ }
+ fimc_adjust_mplane_format(ff->fmt, pix->width, pix->height, pix);
+ for (i = 0; i < ff->fmt->colplanes; i++)
+ ff->payload[i] =
+ (pix->width * pix->height * ff->fmt->depth[i]) / 8;
+
+ set_frame_bounds(ff, pix->width, pix->height);
+ /* Reset the composition rectangle if not yet configured */
+ if (!(ctx->state & FIMC_DST_CROP))
+ set_frame_crop(ff, 0, 0, pix->width, pix->height);
+
+ fimc_capture_mark_jpeg_xfer(ctx, fimc_fmt_is_jpeg(ff->fmt->color));
+
+ /* Reset cropping and set format at the camera interface input */
+ if (!fimc->vid_cap.user_subdev_api) {
+ ctx->s_frame.fmt = s_fmt;
+ set_frame_bounds(&ctx->s_frame, pix->width, pix->height);
+ set_frame_crop(&ctx->s_frame, 0, 0, pix->width, pix->height);
+ }
- i->type = V4L2_INPUT_TYPE_CAMERA;
- strncpy(i->name, isp_info->board_info->type, 32);
- return 0;
+ return ret;
}
-static int fimc_cap_s_input(struct file *file, void *priv,
- unsigned int i)
+static int fimc_cap_s_fmt_mplane(struct file *file, void *priv,
+ struct v4l2_format *f)
{
- struct fimc_ctx *ctx = priv;
- struct fimc_dev *fimc = ctx->fimc_dev;
- struct s5p_platform_fimc *pdata = fimc->pdata;
-
- if (fimc_capture_active(ctx->fimc_dev))
- return -EBUSY;
+ struct fimc_dev *fimc = video_drvdata(file);
- if (i >= pdata->num_clients)
- return -EINVAL;
+ return fimc_capture_set_format(fimc, f);
+}
+static int fimc_cap_enum_input(struct file *file, void *priv,
+ struct v4l2_input *i)
+{
+ struct fimc_dev *fimc = video_drvdata(file);
+ struct v4l2_subdev *sd = fimc->pipeline.sensor;
- if (fimc->vid_cap.sd) {
- int ret = v4l2_subdev_call(fimc->vid_cap.sd, core, s_power, 0);
- if (ret)
- err("s_power failed: %d", ret);
+ if (i->index != 0)
+ return -EINVAL;
- clk_disable(fimc->clock[CLK_CAM]);
- }
+ i->type = V4L2_INPUT_TYPE_CAMERA;
+ if (sd)
+ strlcpy(i->name, sd->name, sizeof(i->name));
+ return 0;
+}
- /* Release the attached sensor subdevice. */
- fimc_subdev_unregister(fimc);
+static int fimc_cap_s_input(struct file *file, void *priv, unsigned int i)
+{
+ return i == 0 ? i : -EINVAL;
+}
- return fimc_isp_subdev_init(fimc, i);
+static int fimc_cap_g_input(struct file *file, void *priv, unsigned int *i)
+{
+ *i = 0;
+ return 0;
}
-static int fimc_cap_g_input(struct file *file, void *priv,
- unsigned int *i)
+/**
+ * fimc_pipeline_validate - check for formats inconsistencies
+ * between source and sink pad of each link
+ *
+ * Return 0 if all formats match or -EPIPE otherwise.
+ */
+static int fimc_pipeline_validate(struct fimc_dev *fimc)
{
- struct fimc_ctx *ctx = priv;
- struct fimc_vid_cap *cap = &ctx->fimc_dev->vid_cap;
+ struct v4l2_subdev_format sink_fmt, src_fmt;
+ struct fimc_vid_cap *vid_cap = &fimc->vid_cap;
+ struct v4l2_subdev *sd;
+ struct media_pad *pad;
+ int ret;
- *i = cap->input_index;
+ /* Start with the video capture node pad */
+ pad = media_entity_remote_source(&vid_cap->vd_pad);
+ if (pad == NULL)
+ return -EPIPE;
+ /* FIMC.{N} subdevice */
+ sd = media_entity_to_v4l2_subdev(pad->entity);
+
+ while (1) {
+ /* Retrieve format at the sink pad */
+ pad = &sd->entity.pads[0];
+ if (!(pad->flags & MEDIA_PAD_FL_SINK))
+ break;
+ /* Don't call FIMC subdev operation to avoid nested locking */
+ if (sd == fimc->vid_cap.subdev) {
+ struct fimc_frame *ff = &vid_cap->ctx->s_frame;
+ sink_fmt.format.width = ff->f_width;
+ sink_fmt.format.height = ff->f_height;
+ sink_fmt.format.code = ff->fmt ? ff->fmt->mbus_code : 0;
+ } else {
+ sink_fmt.pad = pad->index;
+ sink_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &sink_fmt);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ return -EPIPE;
+ }
+ /* Retrieve format at the source pad */
+ pad = media_entity_remote_source(pad);
+ if (pad == NULL ||
+ media_entity_type(pad->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
+ break;
+
+ sd = media_entity_to_v4l2_subdev(pad->entity);
+ src_fmt.pad = pad->index;
+ src_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &src_fmt);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ return -EPIPE;
+
+ if (src_fmt.format.width != sink_fmt.format.width ||
+ src_fmt.format.height != sink_fmt.format.height ||
+ src_fmt.format.code != sink_fmt.format.code)
+ return -EPIPE;
+ }
return 0;
}
static int fimc_cap_streamon(struct file *file, void *priv,
enum v4l2_buf_type type)
{
- struct fimc_ctx *ctx = priv;
- struct fimc_dev *fimc = ctx->fimc_dev;
+ struct fimc_dev *fimc = video_drvdata(file);
+ struct fimc_pipeline *p = &fimc->pipeline;
+ int ret;
- if (fimc_capture_active(fimc) || !fimc->vid_cap.sd)
+ if (fimc_capture_active(fimc))
return -EBUSY;
- if (!(ctx->state & FIMC_DST_FMT)) {
- v4l2_err(&fimc->vid_cap.v4l2_dev, "Format is not set\n");
- return -EINVAL;
- }
+ media_entity_pipeline_start(&p->sensor->entity, p->pipe);
+ if (fimc->vid_cap.user_subdev_api) {
+ ret = fimc_pipeline_validate(fimc);
+ if (ret)
+ return ret;
+ }
return vb2_streamon(&fimc->vid_cap.vbq, type);
}
static int fimc_cap_streamoff(struct file *file, void *priv,
enum v4l2_buf_type type)
{
- struct fimc_ctx *ctx = priv;
- struct fimc_dev *fimc = ctx->fimc_dev;
+ struct fimc_dev *fimc = video_drvdata(file);
+ struct v4l2_subdev *sd = fimc->pipeline.sensor;
+ int ret;
- return vb2_streamoff(&fimc->vid_cap.vbq, type);
+ ret = vb2_streamoff(&fimc->vid_cap.vbq, type);
+ if (ret == 0)
+ media_entity_pipeline_stop(&sd->entity);
+ return ret;
}
static int fimc_cap_reqbufs(struct file *file, void *priv,
struct v4l2_requestbuffers *reqbufs)
{
- struct fimc_ctx *ctx = priv;
- struct fimc_vid_cap *cap = &ctx->fimc_dev->vid_cap;
- int ret;
-
+ struct fimc_dev *fimc = video_drvdata(file);
+ int ret = vb2_reqbufs(&fimc->vid_cap.vbq, reqbufs);
- ret = vb2_reqbufs(&cap->vbq, reqbufs);
if (!ret)
- cap->reqbufs_count = reqbufs->count;
-
+ fimc->vid_cap.reqbufs_count = reqbufs->count;
return ret;
}
static int fimc_cap_querybuf(struct file *file, void *priv,
struct v4l2_buffer *buf)
{
- struct fimc_ctx *ctx = priv;
- struct fimc_vid_cap *cap = &ctx->fimc_dev->vid_cap;
+ struct fimc_dev *fimc = video_drvdata(file);
- return vb2_querybuf(&cap->vbq, buf);
+ return vb2_querybuf(&fimc->vid_cap.vbq, buf);
}
static int fimc_cap_qbuf(struct file *file, void *priv,
struct v4l2_buffer *buf)
{
- struct fimc_ctx *ctx = priv;
- struct fimc_vid_cap *cap = &ctx->fimc_dev->vid_cap;
- return vb2_qbuf(&cap->vbq, buf);
+ struct fimc_dev *fimc = video_drvdata(file);
+
+ return vb2_qbuf(&fimc->vid_cap.vbq, buf);
}
static int fimc_cap_dqbuf(struct file *file, void *priv,
struct v4l2_buffer *buf)
{
- struct fimc_ctx *ctx = priv;
- return vb2_dqbuf(&ctx->fimc_dev->vid_cap.vbq, buf,
- file->f_flags & O_NONBLOCK);
-}
-
-static int fimc_cap_s_ctrl(struct file *file, void *priv,
- struct v4l2_control *ctrl)
-{
- struct fimc_ctx *ctx = priv;
- int ret = -EINVAL;
+ struct fimc_dev *fimc = video_drvdata(file);
- /* Allow any controls but 90/270 rotation while streaming */
- if (!fimc_capture_active(ctx->fimc_dev) ||
- ctrl->id != V4L2_CID_ROTATE ||
- (ctrl->value != 90 && ctrl->value != 270)) {
- ret = check_ctrl_val(ctx, ctrl);
- if (!ret) {
- ret = fimc_s_ctrl(ctx, ctrl);
- if (!ret)
- ctx->state |= FIMC_PARAMS;
- }
- }
- if (ret == -EINVAL)
- ret = v4l2_subdev_call(ctx->fimc_dev->vid_cap.sd,
- core, s_ctrl, ctrl);
- return ret;
+ return vb2_dqbuf(&fimc->vid_cap.vbq, buf, file->f_flags & O_NONBLOCK);
}
static int fimc_cap_cropcap(struct file *file, void *fh,
struct v4l2_cropcap *cr)
{
- struct fimc_frame *f;
- struct fimc_ctx *ctx = fh;
+ struct fimc_dev *fimc = video_drvdata(file);
+ struct fimc_frame *f = &fimc->vid_cap.ctx->s_frame;
if (cr->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
return -EINVAL;
- f = &ctx->s_frame;
-
cr->bounds.left = 0;
cr->bounds.top = 0;
cr->bounds.width = f->o_width;
@@ -709,10 +1027,8 @@ static int fimc_cap_cropcap(struct file *file, void *fh,
static int fimc_cap_g_crop(struct file *file, void *fh, struct v4l2_crop *cr)
{
- struct fimc_frame *f;
- struct fimc_ctx *ctx = file->private_data;
-
- f = &ctx->s_frame;
+ struct fimc_dev *fimc = video_drvdata(file);
+ struct fimc_frame *f = &fimc->vid_cap.ctx->s_frame;
cr->c.left = f->offs_h;
cr->c.top = f->offs_v;
@@ -722,53 +1038,31 @@ static int fimc_cap_g_crop(struct file *file, void *fh, struct v4l2_crop *cr)
return 0;
}
-static int fimc_cap_s_crop(struct file *file, void *fh,
- struct v4l2_crop *cr)
+static int fimc_cap_s_crop(struct file *file, void *fh, struct v4l2_crop *cr)
{
- struct fimc_frame *f;
- struct fimc_ctx *ctx = file->private_data;
- struct fimc_dev *fimc = ctx->fimc_dev;
- int ret = -EINVAL;
-
- if (fimc_capture_active(fimc))
- return -EBUSY;
-
- ret = fimc_try_crop(ctx, cr);
- if (ret)
- return ret;
-
- if (!(ctx->state & FIMC_DST_FMT)) {
- v4l2_err(&fimc->vid_cap.v4l2_dev,
- "Capture color format not set\n");
- return -EINVAL; /* TODO: make sure this is the right value */
- }
+ struct fimc_dev *fimc = video_drvdata(file);
+ struct fimc_ctx *ctx = fimc->vid_cap.ctx;
+ struct fimc_frame *ff;
+ unsigned long flags;
- f = &ctx->s_frame;
- /* Check for the pixel scaling ratio when cropping input image. */
- ret = fimc_check_scaler_ratio(cr->c.width, cr->c.height,
- ctx->d_frame.width, ctx->d_frame.height,
- ctx->rotation);
- if (ret) {
- v4l2_err(&fimc->vid_cap.v4l2_dev, "Out of the scaler range\n");
- return ret;
- }
+ fimc_capture_try_crop(ctx, &cr->c, FIMC_SD_PAD_SINK);
+ ff = &ctx->s_frame;
- f->offs_h = cr->c.left;
- f->offs_v = cr->c.top;
- f->width = cr->c.width;
- f->height = cr->c.height;
+ spin_lock_irqsave(&fimc->slock, flags);
+ set_frame_crop(ff, cr->c.left, cr->c.top, cr->c.width, cr->c.height);
+ set_bit(ST_CAPT_APPLY_CFG, &fimc->state);
+ spin_unlock_irqrestore(&fimc->slock, flags);
return 0;
}
-
static const struct v4l2_ioctl_ops fimc_capture_ioctl_ops = {
.vidioc_querycap = fimc_vidioc_querycap_capture,
- .vidioc_enum_fmt_vid_cap_mplane = fimc_vidioc_enum_fmt_mplane,
- .vidioc_try_fmt_vid_cap_mplane = fimc_vidioc_try_fmt_mplane,
+ .vidioc_enum_fmt_vid_cap_mplane = fimc_cap_enum_fmt_mplane,
+ .vidioc_try_fmt_vid_cap_mplane = fimc_cap_try_fmt_mplane,
.vidioc_s_fmt_vid_cap_mplane = fimc_cap_s_fmt_mplane,
- .vidioc_g_fmt_vid_cap_mplane = fimc_vidioc_g_fmt_mplane,
+ .vidioc_g_fmt_vid_cap_mplane = fimc_cap_g_fmt_mplane,
.vidioc_reqbufs = fimc_cap_reqbufs,
.vidioc_querybuf = fimc_cap_querybuf,
@@ -779,10 +1073,6 @@ static const struct v4l2_ioctl_ops fimc_capture_ioctl_ops = {
.vidioc_streamon = fimc_cap_streamon,
.vidioc_streamoff = fimc_cap_streamoff,
- .vidioc_queryctrl = fimc_vidioc_queryctrl,
- .vidioc_g_ctrl = fimc_vidioc_g_ctrl,
- .vidioc_s_ctrl = fimc_cap_s_ctrl,
-
.vidioc_g_crop = fimc_cap_g_crop,
.vidioc_s_crop = fimc_cap_s_crop,
.vidioc_cropcap = fimc_cap_cropcap,
@@ -792,17 +1082,328 @@ static const struct v4l2_ioctl_ops fimc_capture_ioctl_ops = {
.vidioc_g_input = fimc_cap_g_input,
};
+/* Capture subdev media entity operations */
+static int fimc_link_setup(struct media_entity *entity,
+ const struct media_pad *local,
+ const struct media_pad *remote, u32 flags)
+{
+ struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
+ struct fimc_dev *fimc = v4l2_get_subdevdata(sd);
+
+ if (media_entity_type(remote->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
+ return -EINVAL;
+
+ if (WARN_ON(fimc == NULL))
+ return 0;
+
+ dbg("%s --> %s, flags: 0x%x. input: 0x%x",
+ local->entity->name, remote->entity->name, flags,
+ fimc->vid_cap.input);
+
+ if (flags & MEDIA_LNK_FL_ENABLED) {
+ if (fimc->vid_cap.input != 0)
+ return -EBUSY;
+ fimc->vid_cap.input = sd->grp_id;
+ return 0;
+ }
+
+ fimc->vid_cap.input = 0;
+ return 0;
+}
+
+static const struct media_entity_operations fimc_sd_media_ops = {
+ .link_setup = fimc_link_setup,
+};
+
+/**
+ * fimc_sensor_notify - v4l2_device notification from a sensor subdev
+ * @sd: pointer to a subdev generating the notification
+ * @notification: the notification type, must be S5P_FIMC_TX_END_NOTIFY
+ * @arg: pointer to an u32 type integer that stores the frame payload value
+ *
+ * The End Of Frame notification sent by sensor subdev in its still capture
+ * mode. If there is only a single VSYNC generated by the sensor at the
+ * beginning of a frame transmission, FIMC does not issue the LastIrq
+ * (end of frame) interrupt. And this notification is used to complete the
+ * frame capture and returning a buffer to user-space. Subdev drivers should
+ * call this notification from their last 'End of frame capture' interrupt.
+ */
+void fimc_sensor_notify(struct v4l2_subdev *sd, unsigned int notification,
+ void *arg)
+{
+ struct fimc_sensor_info *sensor;
+ struct fimc_vid_buffer *buf;
+ struct fimc_md *fmd;
+ struct fimc_dev *fimc;
+ unsigned long flags;
+
+ if (sd == NULL)
+ return;
+
+ sensor = v4l2_get_subdev_hostdata(sd);
+ fmd = entity_to_fimc_mdev(&sd->entity);
+
+ spin_lock_irqsave(&fmd->slock, flags);
+ fimc = sensor ? sensor->host : NULL;
+
+ if (fimc && arg && notification == S5P_FIMC_TX_END_NOTIFY &&
+ test_bit(ST_CAPT_PEND, &fimc->state)) {
+ unsigned long irq_flags;
+ spin_lock_irqsave(&fimc->slock, irq_flags);
+ if (!list_empty(&fimc->vid_cap.active_buf_q)) {
+ buf = list_entry(fimc->vid_cap.active_buf_q.next,
+ struct fimc_vid_buffer, list);
+ vb2_set_plane_payload(&buf->vb, 0, *((u32 *)arg));
+ }
+ fimc_capture_irq_handler(fimc, true);
+ fimc_deactivate_capture(fimc);
+ spin_unlock_irqrestore(&fimc->slock, irq_flags);
+ }
+ spin_unlock_irqrestore(&fmd->slock, flags);
+}
+
+static int fimc_subdev_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct fimc_fmt *fmt;
+
+ fmt = fimc_find_format(NULL, NULL, FMT_FLAGS_CAM, code->index);
+ if (!fmt)
+ return -EINVAL;
+ code->code = fmt->mbus_code;
+ return 0;
+}
+
+static int fimc_subdev_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct fimc_dev *fimc = v4l2_get_subdevdata(sd);
+ struct fimc_ctx *ctx = fimc->vid_cap.ctx;
+ struct v4l2_mbus_framefmt *mf;
+ struct fimc_frame *ff;
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ mf = v4l2_subdev_get_try_format(fh, fmt->pad);
+ fmt->format = *mf;
+ return 0;
+ }
+ mf = &fmt->format;
+ mf->colorspace = V4L2_COLORSPACE_JPEG;
+ ff = fmt->pad == FIMC_SD_PAD_SINK ? &ctx->s_frame : &ctx->d_frame;
+
+ mutex_lock(&fimc->lock);
+ /* The pixel code is same on both input and output pad */
+ if (!WARN_ON(ctx->s_frame.fmt == NULL))
+ mf->code = ctx->s_frame.fmt->mbus_code;
+ mf->width = ff->f_width;
+ mf->height = ff->f_height;
+ mutex_unlock(&fimc->lock);
+
+ return 0;
+}
+
+static int fimc_subdev_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct fimc_dev *fimc = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *mf = &fmt->format;
+ struct fimc_ctx *ctx = fimc->vid_cap.ctx;
+ struct fimc_frame *ff;
+ struct fimc_fmt *ffmt;
+
+ dbg("pad%d: code: 0x%x, %dx%d",
+ fmt->pad, mf->code, mf->width, mf->height);
+
+ if (fmt->pad == FIMC_SD_PAD_SOURCE &&
+ vb2_is_busy(&fimc->vid_cap.vbq))
+ return -EBUSY;
+
+ mutex_lock(&fimc->lock);
+ ffmt = fimc_capture_try_format(ctx, &mf->width, &mf->height,
+ &mf->code, NULL, fmt->pad);
+ mutex_unlock(&fimc->lock);
+ mf->colorspace = V4L2_COLORSPACE_JPEG;
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ mf = v4l2_subdev_get_try_format(fh, fmt->pad);
+ *mf = fmt->format;
+ return 0;
+ }
+ fimc_capture_mark_jpeg_xfer(ctx, fimc_fmt_is_jpeg(ffmt->color));
+
+ ff = fmt->pad == FIMC_SD_PAD_SINK ?
+ &ctx->s_frame : &ctx->d_frame;
+
+ mutex_lock(&fimc->lock);
+ set_frame_bounds(ff, mf->width, mf->height);
+ ff->fmt = ffmt;
+
+ /* Reset the crop rectangle if required. */
+ if (!(fmt->pad == FIMC_SD_PAD_SOURCE && (ctx->state & FIMC_DST_CROP)))
+ set_frame_crop(ff, 0, 0, mf->width, mf->height);
+
+ if (fmt->pad == FIMC_SD_PAD_SINK)
+ ctx->state &= ~FIMC_DST_CROP;
+ mutex_unlock(&fimc->lock);
+ return 0;
+}
+
+static int fimc_subdev_get_crop(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_crop *crop)
+{
+ struct fimc_dev *fimc = v4l2_get_subdevdata(sd);
+ struct fimc_ctx *ctx = fimc->vid_cap.ctx;
+ struct v4l2_rect *r = &crop->rect;
+ struct fimc_frame *ff;
+
+ if (crop->which == V4L2_SUBDEV_FORMAT_TRY) {
+ crop->rect = *v4l2_subdev_get_try_crop(fh, crop->pad);
+ return 0;
+ }
+ ff = crop->pad == FIMC_SD_PAD_SINK ?
+ &ctx->s_frame : &ctx->d_frame;
+
+ mutex_lock(&fimc->lock);
+ r->left = ff->offs_h;
+ r->top = ff->offs_v;
+ r->width = ff->width;
+ r->height = ff->height;
+ mutex_unlock(&fimc->lock);
+
+ dbg("ff:%p, pad%d: l:%d, t:%d, %dx%d, f_w: %d, f_h: %d",
+ ff, crop->pad, r->left, r->top, r->width, r->height,
+ ff->f_width, ff->f_height);
+
+ return 0;
+}
+
+static int fimc_subdev_set_crop(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_crop *crop)
+{
+ struct fimc_dev *fimc = v4l2_get_subdevdata(sd);
+ struct fimc_ctx *ctx = fimc->vid_cap.ctx;
+ struct v4l2_rect *r = &crop->rect;
+ struct fimc_frame *ff;
+ unsigned long flags;
+
+ dbg("(%d,%d)/%dx%d", r->left, r->top, r->width, r->height);
+
+ ff = crop->pad == FIMC_SD_PAD_SOURCE ?
+ &ctx->d_frame : &ctx->s_frame;
+
+ mutex_lock(&fimc->lock);
+ fimc_capture_try_crop(ctx, r, crop->pad);
+
+ if (crop->which == V4L2_SUBDEV_FORMAT_TRY) {
+ mutex_lock(&fimc->lock);
+ *v4l2_subdev_get_try_crop(fh, crop->pad) = *r;
+ return 0;
+ }
+ spin_lock_irqsave(&fimc->slock, flags);
+ set_frame_crop(ff, r->left, r->top, r->width, r->height);
+ if (crop->pad == FIMC_SD_PAD_SOURCE)
+ ctx->state |= FIMC_DST_CROP;
+
+ set_bit(ST_CAPT_APPLY_CFG, &fimc->state);
+ spin_unlock_irqrestore(&fimc->slock, flags);
+
+ dbg("pad%d: (%d,%d)/%dx%d", crop->pad, r->left, r->top,
+ r->width, r->height);
+
+ mutex_unlock(&fimc->lock);
+ return 0;
+}
+
+static struct v4l2_subdev_pad_ops fimc_subdev_pad_ops = {
+ .enum_mbus_code = fimc_subdev_enum_mbus_code,
+ .get_fmt = fimc_subdev_get_fmt,
+ .set_fmt = fimc_subdev_set_fmt,
+ .get_crop = fimc_subdev_get_crop,
+ .set_crop = fimc_subdev_set_crop,
+};
+
+static struct v4l2_subdev_ops fimc_subdev_ops = {
+ .pad = &fimc_subdev_pad_ops,
+};
+
+static int fimc_create_capture_subdev(struct fimc_dev *fimc,
+ struct v4l2_device *v4l2_dev)
+{
+ struct v4l2_subdev *sd;
+ int ret;
+
+ sd = kzalloc(sizeof(*sd), GFP_KERNEL);
+ if (!sd)
+ return -ENOMEM;
+
+ v4l2_subdev_init(sd, &fimc_subdev_ops);
+ sd->flags = V4L2_SUBDEV_FL_HAS_DEVNODE;
+ snprintf(sd->name, sizeof(sd->name), "FIMC.%d", fimc->pdev->id);
+
+ fimc->vid_cap.sd_pads[FIMC_SD_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ fimc->vid_cap.sd_pads[FIMC_SD_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_init(&sd->entity, FIMC_SD_PADS_NUM,
+ fimc->vid_cap.sd_pads, 0);
+ if (ret)
+ goto me_err;
+ ret = v4l2_device_register_subdev(v4l2_dev, sd);
+ if (ret)
+ goto sd_err;
+
+ fimc->vid_cap.subdev = sd;
+ v4l2_set_subdevdata(sd, fimc);
+ sd->entity.ops = &fimc_sd_media_ops;
+ return 0;
+sd_err:
+ media_entity_cleanup(&sd->entity);
+me_err:
+ kfree(sd);
+ return ret;
+}
+
+static void fimc_destroy_capture_subdev(struct fimc_dev *fimc)
+{
+ struct v4l2_subdev *sd = fimc->vid_cap.subdev;
+
+ if (!sd)
+ return;
+ media_entity_cleanup(&sd->entity);
+ v4l2_device_unregister_subdev(sd);
+ kfree(sd);
+ sd = NULL;
+}
+
+/* Set default format at the sensor and host interface */
+static int fimc_capture_set_default_format(struct fimc_dev *fimc)
+{
+ struct v4l2_format fmt = {
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
+ .fmt.pix_mp = {
+ .width = 640,
+ .height = 480,
+ .pixelformat = V4L2_PIX_FMT_YUYV,
+ .field = V4L2_FIELD_NONE,
+ .colorspace = V4L2_COLORSPACE_JPEG,
+ },
+ };
+
+ return fimc_capture_set_format(fimc, &fmt);
+}
+
/* fimc->lock must be already initialized */
-int fimc_register_capture_device(struct fimc_dev *fimc)
+int fimc_register_capture_device(struct fimc_dev *fimc,
+ struct v4l2_device *v4l2_dev)
{
- struct v4l2_device *v4l2_dev = &fimc->vid_cap.v4l2_dev;
struct video_device *vfd;
struct fimc_vid_cap *vid_cap;
struct fimc_ctx *ctx;
- struct v4l2_format f;
- struct fimc_frame *fr;
struct vb2_queue *q;
- int ret;
+ int ret = -ENOMEM;
ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
if (!ctx)
@@ -812,33 +1413,21 @@ int fimc_register_capture_device(struct fimc_dev *fimc)
ctx->in_path = FIMC_CAMERA;
ctx->out_path = FIMC_DMA;
ctx->state = FIMC_CTX_CAP;
-
- /* Default format of the output frames */
- f.fmt.pix.pixelformat = V4L2_PIX_FMT_RGB32;
- fr = &ctx->d_frame;
- fr->fmt = find_format(&f, FMT_FLAGS_M2M);
- fr->width = fr->f_width = fr->o_width = 640;
- fr->height = fr->f_height = fr->o_height = 480;
-
- if (!v4l2_dev->name[0])
- snprintf(v4l2_dev->name, sizeof(v4l2_dev->name),
- "%s.capture", dev_name(&fimc->pdev->dev));
-
- ret = v4l2_device_register(NULL, v4l2_dev);
- if (ret)
- goto err_info;
+ ctx->s_frame.fmt = fimc_find_format(NULL, NULL, FMT_FLAGS_CAM, 0);
+ ctx->d_frame.fmt = fimc_find_format(NULL, NULL, FMT_FLAGS_CAM, 0);
vfd = video_device_alloc();
if (!vfd) {
v4l2_err(v4l2_dev, "Failed to allocate video device\n");
- goto err_v4l2_reg;
+ goto err_vd_alloc;
}
- snprintf(vfd->name, sizeof(vfd->name), "%s:cap",
+ snprintf(vfd->name, sizeof(vfd->name), "%s.capture",
dev_name(&fimc->pdev->dev));
vfd->fops = &fimc_capture_fops;
vfd->ioctl_ops = &fimc_capture_ioctl_ops;
+ vfd->v4l2_dev = v4l2_dev;
vfd->minor = -1;
vfd->release = video_device_release;
vfd->lock = &fimc->lock;
@@ -849,8 +1438,6 @@ int fimc_register_capture_device(struct fimc_dev *fimc)
vid_cap->active_buf_cnt = 0;
vid_cap->reqbufs_count = 0;
vid_cap->refcnt = 0;
- /* Default color format for image sensor */
- vid_cap->fmt.code = V4L2_MBUS_FMT_YUYV8_2X8;
INIT_LIST_HEAD(&vid_cap->pending_buf_q);
INIT_LIST_HEAD(&vid_cap->active_buf_q);
@@ -868,34 +1455,37 @@ int fimc_register_capture_device(struct fimc_dev *fimc)
vb2_queue_init(q);
- ret = video_register_device(vfd, VFL_TYPE_GRABBER, -1);
- if (ret) {
- v4l2_err(v4l2_dev, "Failed to register video device\n");
- goto err_vd_reg;
- }
-
- v4l2_info(v4l2_dev,
- "FIMC capture driver registered as /dev/video%d\n",
- vfd->num);
+ fimc->vid_cap.vd_pad.flags = MEDIA_PAD_FL_SINK;
+ ret = media_entity_init(&vfd->entity, 1, &fimc->vid_cap.vd_pad, 0);
+ if (ret)
+ goto err_ent;
+ ret = fimc_create_capture_subdev(fimc, v4l2_dev);
+ if (ret)
+ goto err_sd_reg;
+ vfd->ctrl_handler = &ctx->ctrl_handler;
return 0;
-err_vd_reg:
+err_sd_reg:
+ media_entity_cleanup(&vfd->entity);
+err_ent:
video_device_release(vfd);
-err_v4l2_reg:
- v4l2_device_unregister(v4l2_dev);
-err_info:
+err_vd_alloc:
kfree(ctx);
- dev_err(&fimc->pdev->dev, "failed to install\n");
return ret;
}
void fimc_unregister_capture_device(struct fimc_dev *fimc)
{
- struct fimc_vid_cap *capture = &fimc->vid_cap;
-
- if (capture->vfd)
- video_unregister_device(capture->vfd);
+ struct video_device *vfd = fimc->vid_cap.vfd;
- kfree(capture->ctx);
+ if (vfd) {
+ media_entity_cleanup(&vfd->entity);
+ /* Can also be called if video device was
+ not registered */
+ video_unregister_device(vfd);
+ }
+ fimc_destroy_capture_subdev(fimc);
+ kfree(fimc->vid_cap.ctx);
+ fimc->vid_cap.ctx = NULL;
}
diff --git a/drivers/media/video/s5p-fimc/fimc-core.c b/drivers/media/video/s5p-fimc/fimc-core.c
index aa550666cc0..19ca6db38b2 100644
--- a/drivers/media/video/s5p-fimc/fimc-core.c
+++ b/drivers/media/video/s5p-fimc/fimc-core.c
@@ -18,6 +18,7 @@
#include <linux/interrupt.h>
#include <linux/device.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/list.h>
#include <linux/io.h>
#include <linux/slab.h>
@@ -27,9 +28,10 @@
#include <media/videobuf2-dma-contig.h>
#include "fimc-core.h"
+#include "fimc-mdevice.h"
static char *fimc_clocks[MAX_FIMC_CLOCKS] = {
- "sclk_fimc", "fimc", "sclk_cam"
+ "sclk_fimc", "fimc"
};
static struct fimc_fmt fimc_formats[] = {
@@ -157,59 +159,28 @@ static struct fimc_fmt fimc_formats[] = {
.memplanes = 2,
.colplanes = 2,
.flags = FMT_FLAGS_M2M,
- },
-};
-
-static struct v4l2_queryctrl fimc_ctrls[] = {
- {
- .id = V4L2_CID_HFLIP,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Horizontal flip",
- .minimum = 0,
- .maximum = 1,
- .default_value = 0,
}, {
- .id = V4L2_CID_VFLIP,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Vertical flip",
- .minimum = 0,
- .maximum = 1,
- .default_value = 0,
- }, {
- .id = V4L2_CID_ROTATE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Rotation (CCW)",
- .minimum = 0,
- .maximum = 270,
- .step = 90,
- .default_value = 0,
+ .name = "JPEG encoded data",
+ .fourcc = V4L2_PIX_FMT_JPEG,
+ .color = S5P_FIMC_JPEG,
+ .depth = { 8 },
+ .memplanes = 1,
+ .colplanes = 1,
+ .mbus_code = V4L2_MBUS_FMT_JPEG_1X8,
+ .flags = FMT_FLAGS_CAM,
},
};
-
-static struct v4l2_queryctrl *get_ctrl(int id)
+int fimc_check_scaler_ratio(struct fimc_ctx *ctx, int sw, int sh,
+ int dw, int dh, int rotation)
{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(fimc_ctrls); ++i)
- if (id == fimc_ctrls[i].id)
- return &fimc_ctrls[i];
- return NULL;
-}
+ if (rotation == 90 || rotation == 270)
+ swap(dw, dh);
-int fimc_check_scaler_ratio(int sw, int sh, int dw, int dh, int rot)
-{
- int tx, ty;
-
- if (rot == 90 || rot == 270) {
- ty = dw;
- tx = dh;
- } else {
- tx = dw;
- ty = dh;
- }
+ if (!ctx->scaler.enabled)
+ return (sw == dw && sh == dh) ? 0 : -EINVAL;
- if ((sw >= SCALER_MAX_HRATIO * tx) || (sh >= SCALER_MAX_VRATIO * ty))
+ if ((sw >= SCALER_MAX_HRATIO * dw) || (sh >= SCALER_MAX_VRATIO * dh))
return -EINVAL;
return 0;
@@ -235,10 +206,11 @@ static int fimc_get_scaler_factor(u32 src, u32 tar, u32 *ratio, u32 *shift)
int fimc_set_scaler_info(struct fimc_ctx *ctx)
{
+ struct samsung_fimc_variant *variant = ctx->fimc_dev->variant;
+ struct device *dev = &ctx->fimc_dev->pdev->dev;
struct fimc_scaler *sc = &ctx->scaler;
struct fimc_frame *s_frame = &ctx->s_frame;
struct fimc_frame *d_frame = &ctx->d_frame;
- struct samsung_fimc_variant *variant = ctx->fimc_dev->variant;
int tx, ty, sx, sy;
int ret;
@@ -250,15 +222,14 @@ int fimc_set_scaler_info(struct fimc_ctx *ctx)
ty = d_frame->height;
}
if (tx <= 0 || ty <= 0) {
- v4l2_err(&ctx->fimc_dev->m2m.v4l2_dev,
- "invalid target size: %d x %d", tx, ty);
+ dev_err(dev, "Invalid target size: %dx%d", tx, ty);
return -EINVAL;
}
sx = s_frame->width;
sy = s_frame->height;
if (sx <= 0 || sy <= 0) {
- err("invalid source size: %d x %d", sx, sy);
+ dev_err(dev, "Invalid source size: %dx%d", sx, sy);
return -EINVAL;
}
sc->real_width = sx;
@@ -301,7 +272,6 @@ int fimc_set_scaler_info(struct fimc_ctx *ctx)
static void fimc_m2m_job_finish(struct fimc_ctx *ctx, int vb_state)
{
struct vb2_buffer *src_vb, *dst_vb;
- struct fimc_dev *fimc = ctx->fimc_dev;
if (!ctx || !ctx->m2m_ctx)
return;
@@ -312,54 +282,68 @@ static void fimc_m2m_job_finish(struct fimc_ctx *ctx, int vb_state)
if (src_vb && dst_vb) {
v4l2_m2m_buf_done(src_vb, vb_state);
v4l2_m2m_buf_done(dst_vb, vb_state);
- v4l2_m2m_job_finish(fimc->m2m.m2m_dev, ctx->m2m_ctx);
+ v4l2_m2m_job_finish(ctx->fimc_dev->m2m.m2m_dev,
+ ctx->m2m_ctx);
}
}
/* Complete the transaction which has been scheduled for execution. */
-static void fimc_m2m_shutdown(struct fimc_ctx *ctx)
+static int fimc_m2m_shutdown(struct fimc_ctx *ctx)
{
struct fimc_dev *fimc = ctx->fimc_dev;
int ret;
if (!fimc_m2m_pending(fimc))
- return;
+ return 0;
fimc_ctx_state_lock_set(FIMC_CTX_SHUT, ctx);
ret = wait_event_timeout(fimc->irq_queue,
!fimc_ctx_state_is_set(FIMC_CTX_SHUT, ctx),
FIMC_SHUTDOWN_TIMEOUT);
- /*
- * In case of a timeout the buffers are not released in the interrupt
- * handler so return them here with the error flag set, if there are
- * any on the queue.
- */
- if (ret == 0)
- fimc_m2m_job_finish(ctx, VB2_BUF_STATE_ERROR);
+
+ return ret == 0 ? -ETIMEDOUT : ret;
+}
+
+static int start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct fimc_ctx *ctx = q->drv_priv;
+ int ret;
+
+ ret = pm_runtime_get_sync(&ctx->fimc_dev->pdev->dev);
+ return ret > 0 ? 0 : ret;
}
static int stop_streaming(struct vb2_queue *q)
{
struct fimc_ctx *ctx = q->drv_priv;
+ int ret;
- fimc_m2m_shutdown(ctx);
+ ret = fimc_m2m_shutdown(ctx);
+ if (ret == -ETIMEDOUT)
+ fimc_m2m_job_finish(ctx, VB2_BUF_STATE_ERROR);
+ pm_runtime_put(&ctx->fimc_dev->pdev->dev);
return 0;
}
-static void fimc_capture_irq_handler(struct fimc_dev *fimc)
+void fimc_capture_irq_handler(struct fimc_dev *fimc, bool final)
{
struct fimc_vid_cap *cap = &fimc->vid_cap;
struct fimc_vid_buffer *v_buf;
struct timeval *tv;
struct timespec ts;
+ if (test_and_clear_bit(ST_CAPT_SHUT, &fimc->state)) {
+ wake_up(&fimc->irq_queue);
+ return;
+ }
+
if (!list_empty(&cap->active_buf_q) &&
- test_bit(ST_CAPT_RUN, &fimc->state)) {
+ test_bit(ST_CAPT_RUN, &fimc->state) && final) {
ktime_get_real_ts(&ts);
- v_buf = active_queue_pop(cap);
+ v_buf = fimc_active_queue_pop(cap);
tv = &v_buf->vb.v4l2_buf.timestamp;
tv->tv_sec = ts.tv_sec;
@@ -369,19 +353,14 @@ static void fimc_capture_irq_handler(struct fimc_dev *fimc)
vb2_buffer_done(&v_buf->vb, VB2_BUF_STATE_DONE);
}
- if (test_and_clear_bit(ST_CAPT_SHUT, &fimc->state)) {
- wake_up(&fimc->irq_queue);
- return;
- }
-
if (!list_empty(&cap->pending_buf_q)) {
- v_buf = pending_queue_pop(cap);
+ v_buf = fimc_pending_queue_pop(cap);
fimc_hw_set_output_addr(fimc, &v_buf->paddr, cap->buf_index);
v_buf->index = cap->buf_index;
/* Move the buffer to the capture active queue */
- active_queue_add(cap, v_buf);
+ fimc_active_queue_add(cap, v_buf);
dbg("next frame: %d, done frame: %d",
fimc_hw_get_frame_index(fimc), v_buf->index);
@@ -391,7 +370,8 @@ static void fimc_capture_irq_handler(struct fimc_dev *fimc)
}
if (cap->active_buf_cnt == 0) {
- clear_bit(ST_CAPT_RUN, &fimc->state);
+ if (final)
+ clear_bit(ST_CAPT_RUN, &fimc->state);
if (++cap->buf_index >= FIMC_MAX_OUT_BUFS)
cap->buf_index = 0;
@@ -399,11 +379,13 @@ static void fimc_capture_irq_handler(struct fimc_dev *fimc)
set_bit(ST_CAPT_RUN, &fimc->state);
}
+ fimc_capture_config_update(cap->ctx);
+
dbg("frame: %d, active_buf_cnt: %d",
fimc_hw_get_frame_index(fimc), cap->active_buf_cnt);
}
-static irqreturn_t fimc_isr(int irq, void *priv)
+static irqreturn_t fimc_irq_handler(int irq, void *priv)
{
struct fimc_dev *fimc = priv;
struct fimc_vid_cap *cap = &fimc->vid_cap;
@@ -411,9 +393,17 @@ static irqreturn_t fimc_isr(int irq, void *priv)
fimc_hw_clear_irq(fimc);
+ spin_lock(&fimc->slock);
+
if (test_and_clear_bit(ST_M2M_PEND, &fimc->state)) {
+ if (test_and_clear_bit(ST_M2M_SUSPENDING, &fimc->state)) {
+ set_bit(ST_M2M_SUSPENDED, &fimc->state);
+ wake_up(&fimc->irq_queue);
+ goto out;
+ }
ctx = v4l2_m2m_get_curr_priv(fimc->m2m.m2m_dev);
if (ctx != NULL) {
+ spin_unlock(&fimc->slock);
fimc_m2m_job_finish(ctx, VB2_BUF_STATE_DONE);
spin_lock(&ctx->slock);
@@ -423,21 +413,16 @@ static irqreturn_t fimc_isr(int irq, void *priv)
}
spin_unlock(&ctx->slock);
}
-
return IRQ_HANDLED;
- }
-
- spin_lock(&fimc->slock);
-
- if (test_bit(ST_CAPT_PEND, &fimc->state)) {
- fimc_capture_irq_handler(fimc);
-
+ } else if (test_bit(ST_CAPT_PEND, &fimc->state)) {
+ fimc_capture_irq_handler(fimc,
+ !test_bit(ST_CAPT_JPEG, &fimc->state));
if (cap->active_buf_cnt == 1) {
fimc_deactivate_capture(fimc);
clear_bit(ST_CAPT_STREAM, &fimc->state);
}
}
-
+out:
spin_unlock(&fimc->slock);
return IRQ_HANDLED;
}
@@ -457,7 +442,7 @@ int fimc_prepare_addr(struct fimc_ctx *ctx, struct vb2_buffer *vb,
dbg("memplanes= %d, colplanes= %d, pix_size= %d",
frame->fmt->memplanes, frame->fmt->colplanes, pix_size);
- paddr->y = vb2_dma_contig_plane_paddr(vb, 0);
+ paddr->y = vb2_dma_contig_plane_dma_addr(vb, 0);
if (frame->fmt->memplanes == 1) {
switch (frame->fmt->colplanes) {
@@ -485,10 +470,10 @@ int fimc_prepare_addr(struct fimc_ctx *ctx, struct vb2_buffer *vb,
}
} else {
if (frame->fmt->memplanes >= 2)
- paddr->cb = vb2_dma_contig_plane_paddr(vb, 1);
+ paddr->cb = vb2_dma_contig_plane_dma_addr(vb, 1);
if (frame->fmt->memplanes == 3)
- paddr->cr = vb2_dma_contig_plane_paddr(vb, 2);
+ paddr->cr = vb2_dma_contig_plane_dma_addr(vb, 2);
}
dbg("PHYS_ADDR: y= 0x%X cb= 0x%X cr= 0x%X ret= %d",
@@ -498,7 +483,7 @@ int fimc_prepare_addr(struct fimc_ctx *ctx, struct vb2_buffer *vb,
}
/* Set order for 1 and 2 plane YCBCR 4:2:2 formats. */
-static void fimc_set_yuv_order(struct fimc_ctx *ctx)
+void fimc_set_yuv_order(struct fimc_ctx *ctx)
{
/* The one only mode supported in SoC. */
ctx->in_order_2p = S5P_FIMC_LSB_CRCB;
@@ -540,7 +525,7 @@ static void fimc_set_yuv_order(struct fimc_ctx *ctx)
dbg("ctx->out_order_1p= %d", ctx->out_order_1p);
}
-static void fimc_prepare_dma_offset(struct fimc_ctx *ctx, struct fimc_frame *f)
+void fimc_prepare_dma_offset(struct fimc_ctx *ctx, struct fimc_frame *f)
{
struct samsung_fimc_variant *variant = ctx->fimc_dev->variant;
u32 i, depth = 0;
@@ -606,9 +591,6 @@ int fimc_prepare_config(struct fimc_ctx *ctx, u32 flags)
fimc_set_yuv_order(ctx);
}
- /* Input DMA mode is not allowed when the scaler is disabled. */
- ctx->scaler.enabled = 1;
-
if (flags & FIMC_SRC_ADDR) {
vb = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
ret = fimc_prepare_addr(ctx, vb, s_frame, &s_frame->paddr);
@@ -635,10 +617,10 @@ static void fimc_dma_run(void *priv)
return;
fimc = ctx->fimc_dev;
-
- spin_lock_irqsave(&ctx->slock, flags);
+ spin_lock_irqsave(&fimc->slock, flags);
set_bit(ST_M2M_PEND, &fimc->state);
+ spin_lock(&ctx->slock);
ctx->state |= (FIMC_SRC_ADDR | FIMC_DST_ADDR);
ret = fimc_prepare_config(ctx, ctx->state);
if (ret)
@@ -649,8 +631,6 @@ static void fimc_dma_run(void *priv)
ctx->state |= FIMC_PARAMS;
fimc->m2m.ctx = ctx;
}
-
- spin_lock(&fimc->slock);
fimc_hw_set_input_addr(fimc, &ctx->s_frame.paddr);
if (ctx->state & FIMC_PARAMS) {
@@ -665,7 +645,7 @@ static void fimc_dma_run(void *priv)
fimc_hw_set_mainscaler(ctx);
fimc_hw_set_target_format(ctx);
fimc_hw_set_rotation(ctx);
- fimc_hw_set_effect(ctx);
+ fimc_hw_set_effect(ctx, false);
}
fimc_hw_set_output_path(ctx);
@@ -680,10 +660,9 @@ static void fimc_dma_run(void *priv)
ctx->state &= (FIMC_CTX_M2M | FIMC_CTX_CAP |
FIMC_SRC_FMT | FIMC_DST_FMT);
fimc_hw_activate_input_dma(fimc, true);
- spin_unlock(&fimc->slock);
-
dma_unlock:
- spin_unlock_irqrestore(&ctx->slock, flags);
+ spin_unlock(&ctx->slock);
+ spin_unlock_irqrestore(&fimc->slock, flags);
}
static void fimc_job_abort(void *priv)
@@ -691,9 +670,9 @@ static void fimc_job_abort(void *priv)
fimc_m2m_shutdown(priv);
}
-static int fimc_queue_setup(struct vb2_queue *vq, unsigned int *num_buffers,
- unsigned int *num_planes, unsigned long sizes[],
- void *allocators[])
+static int fimc_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
+ unsigned int *num_buffers, unsigned int *num_planes,
+ unsigned int sizes[], void *allocators[])
{
struct fimc_ctx *ctx = vb2_get_drv_priv(vq);
struct fimc_frame *f;
@@ -762,146 +741,296 @@ static struct vb2_ops fimc_qops = {
.wait_prepare = fimc_unlock,
.wait_finish = fimc_lock,
.stop_streaming = stop_streaming,
+ .start_streaming = start_streaming,
};
-static int fimc_m2m_querycap(struct file *file, void *priv,
- struct v4l2_capability *cap)
+/*
+ * V4L2 controls handling
+ */
+#define ctrl_to_ctx(__ctrl) \
+ container_of((__ctrl)->handler, struct fimc_ctx, ctrl_handler)
+
+static int fimc_s_ctrl(struct v4l2_ctrl *ctrl)
{
- struct fimc_ctx *ctx = file->private_data;
+ struct fimc_ctx *ctx = ctrl_to_ctx(ctrl);
+ struct fimc_dev *fimc = ctx->fimc_dev;
+ struct samsung_fimc_variant *variant = fimc->variant;
+ unsigned long flags;
+ int ret = 0;
+
+ if (ctrl->flags & V4L2_CTRL_FLAG_INACTIVE)
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_HFLIP:
+ spin_lock_irqsave(&ctx->slock, flags);
+ ctx->hflip = ctrl->val;
+ break;
+
+ case V4L2_CID_VFLIP:
+ spin_lock_irqsave(&ctx->slock, flags);
+ ctx->vflip = ctrl->val;
+ break;
+
+ case V4L2_CID_ROTATE:
+ if (fimc_capture_pending(fimc) ||
+ fimc_ctx_state_is_set(FIMC_DST_FMT | FIMC_SRC_FMT, ctx)) {
+ ret = fimc_check_scaler_ratio(ctx, ctx->s_frame.width,
+ ctx->s_frame.height, ctx->d_frame.width,
+ ctx->d_frame.height, ctrl->val);
+ }
+ if (ret) {
+ v4l2_err(fimc->m2m.vfd, "Out of scaler range\n");
+ return -EINVAL;
+ }
+ if ((ctrl->val == 90 || ctrl->val == 270) &&
+ !variant->has_out_rot)
+ return -EINVAL;
+ spin_lock_irqsave(&ctx->slock, flags);
+ ctx->rotation = ctrl->val;
+ break;
+
+ default:
+ v4l2_err(fimc->v4l2_dev, "Invalid control: 0x%X\n", ctrl->id);
+ return -EINVAL;
+ }
+ ctx->state |= FIMC_PARAMS;
+ set_bit(ST_CAPT_APPLY_CFG, &fimc->state);
+ spin_unlock_irqrestore(&ctx->slock, flags);
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops fimc_ctrl_ops = {
+ .s_ctrl = fimc_s_ctrl,
+};
+
+int fimc_ctrls_create(struct fimc_ctx *ctx)
+{
+ if (ctx->ctrls_rdy)
+ return 0;
+ v4l2_ctrl_handler_init(&ctx->ctrl_handler, 3);
+
+ ctx->ctrl_rotate = v4l2_ctrl_new_std(&ctx->ctrl_handler, &fimc_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+ ctx->ctrl_hflip = v4l2_ctrl_new_std(&ctx->ctrl_handler, &fimc_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+ ctx->ctrl_vflip = v4l2_ctrl_new_std(&ctx->ctrl_handler, &fimc_ctrl_ops,
+ V4L2_CID_ROTATE, 0, 270, 90, 0);
+ ctx->ctrls_rdy = ctx->ctrl_handler.error == 0;
+
+ return ctx->ctrl_handler.error;
+}
+
+void fimc_ctrls_delete(struct fimc_ctx *ctx)
+{
+ if (ctx->ctrls_rdy) {
+ v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+ ctx->ctrls_rdy = false;
+ }
+}
+
+void fimc_ctrls_activate(struct fimc_ctx *ctx, bool active)
+{
+ if (!ctx->ctrls_rdy)
+ return;
+
+ mutex_lock(&ctx->ctrl_handler.lock);
+ v4l2_ctrl_activate(ctx->ctrl_rotate, active);
+ v4l2_ctrl_activate(ctx->ctrl_hflip, active);
+ v4l2_ctrl_activate(ctx->ctrl_vflip, active);
+
+ if (active) {
+ ctx->rotation = ctx->ctrl_rotate->val;
+ ctx->hflip = ctx->ctrl_hflip->val;
+ ctx->vflip = ctx->ctrl_vflip->val;
+ } else {
+ ctx->rotation = 0;
+ ctx->hflip = 0;
+ ctx->vflip = 0;
+ }
+ mutex_unlock(&ctx->ctrl_handler.lock);
+}
+
+/*
+ * V4L2 ioctl handlers
+ */
+static int fimc_m2m_querycap(struct file *file, void *fh,
+ struct v4l2_capability *cap)
+{
+ struct fimc_ctx *ctx = fh_to_ctx(fh);
struct fimc_dev *fimc = ctx->fimc_dev;
strncpy(cap->driver, fimc->pdev->name, sizeof(cap->driver) - 1);
strncpy(cap->card, fimc->pdev->name, sizeof(cap->card) - 1);
cap->bus_info[0] = 0;
cap->capabilities = V4L2_CAP_STREAMING |
- V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT |
V4L2_CAP_VIDEO_CAPTURE_MPLANE | V4L2_CAP_VIDEO_OUTPUT_MPLANE;
return 0;
}
-int fimc_vidioc_enum_fmt_mplane(struct file *file, void *priv,
- struct v4l2_fmtdesc *f)
+static int fimc_m2m_enum_fmt_mplane(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
{
struct fimc_fmt *fmt;
- if (f->index >= ARRAY_SIZE(fimc_formats))
+ fmt = fimc_find_format(NULL, NULL, FMT_FLAGS_M2M, f->index);
+ if (!fmt)
return -EINVAL;
- fmt = &fimc_formats[f->index];
strncpy(f->description, fmt->name, sizeof(f->description) - 1);
f->pixelformat = fmt->fourcc;
-
return 0;
}
-int fimc_vidioc_g_fmt_mplane(struct file *file, void *priv,
- struct v4l2_format *f)
+int fimc_fill_format(struct fimc_frame *frame, struct v4l2_format *f)
{
- struct fimc_ctx *ctx = priv;
- struct fimc_frame *frame;
- struct v4l2_pix_format_mplane *pixm;
+ struct v4l2_pix_format_mplane *pixm = &f->fmt.pix_mp;
int i;
- frame = ctx_get_frame(ctx, f->type);
- if (IS_ERR(frame))
- return PTR_ERR(frame);
-
- pixm = &f->fmt.pix_mp;
-
- pixm->width = frame->width;
- pixm->height = frame->height;
- pixm->field = V4L2_FIELD_NONE;
- pixm->pixelformat = frame->fmt->fourcc;
- pixm->colorspace = V4L2_COLORSPACE_JPEG;
- pixm->num_planes = frame->fmt->memplanes;
+ pixm->width = frame->o_width;
+ pixm->height = frame->o_height;
+ pixm->field = V4L2_FIELD_NONE;
+ pixm->pixelformat = frame->fmt->fourcc;
+ pixm->colorspace = V4L2_COLORSPACE_JPEG;
+ pixm->num_planes = frame->fmt->memplanes;
for (i = 0; i < pixm->num_planes; ++i) {
- int bpl = frame->o_width;
-
+ int bpl = frame->f_width;
if (frame->fmt->colplanes == 1) /* packed formats */
bpl = (bpl * frame->fmt->depth[0]) / 8;
-
pixm->plane_fmt[i].bytesperline = bpl;
-
pixm->plane_fmt[i].sizeimage = (frame->o_width *
frame->o_height * frame->fmt->depth[i]) / 8;
}
-
return 0;
}
-struct fimc_fmt *find_format(struct v4l2_format *f, unsigned int mask)
+void fimc_fill_frame(struct fimc_frame *frame, struct v4l2_format *f)
{
- struct fimc_fmt *fmt;
- unsigned int i;
+ struct v4l2_pix_format_mplane *pixm = &f->fmt.pix_mp;
+
+ frame->f_width = pixm->plane_fmt[0].bytesperline;
+ if (frame->fmt->colplanes == 1)
+ frame->f_width = (frame->f_width * 8) / frame->fmt->depth[0];
+ frame->f_height = pixm->height;
+ frame->width = pixm->width;
+ frame->height = pixm->height;
+ frame->o_width = pixm->width;
+ frame->o_height = pixm->height;
+ frame->offs_h = 0;
+ frame->offs_v = 0;
+}
- for (i = 0; i < ARRAY_SIZE(fimc_formats); ++i) {
- fmt = &fimc_formats[i];
- if (fmt->fourcc == f->fmt.pix_mp.pixelformat &&
- (fmt->flags & mask))
- break;
+/**
+ * fimc_adjust_mplane_format - adjust bytesperline/sizeimage for each plane
+ * @fmt: fimc pixel format description (input)
+ * @width: requested pixel width
+ * @height: requested pixel height
+ * @pix: multi-plane format to adjust
+ */
+void fimc_adjust_mplane_format(struct fimc_fmt *fmt, u32 width, u32 height,
+ struct v4l2_pix_format_mplane *pix)
+{
+ u32 bytesperline = 0;
+ int i;
+
+ pix->colorspace = V4L2_COLORSPACE_JPEG;
+ pix->field = V4L2_FIELD_NONE;
+ pix->num_planes = fmt->memplanes;
+ pix->height = height;
+ pix->width = width;
+
+ for (i = 0; i < pix->num_planes; ++i) {
+ u32 bpl = pix->plane_fmt[i].bytesperline;
+ u32 *sizeimage = &pix->plane_fmt[i].sizeimage;
+
+ if (fmt->colplanes > 1 && (bpl == 0 || bpl < pix->width))
+ bpl = pix->width; /* Planar */
+
+ if (fmt->colplanes == 1 && /* Packed */
+ (bpl == 0 || ((bpl * 8) / fmt->depth[i]) < pix->width))
+ bpl = (pix->width * fmt->depth[0]) / 8;
+
+ if (i == 0) /* Same bytesperline for each plane. */
+ bytesperline = bpl;
+
+ pix->plane_fmt[i].bytesperline = bytesperline;
+ *sizeimage = (pix->width * pix->height * fmt->depth[i]) / 8;
}
+}
- return (i == ARRAY_SIZE(fimc_formats)) ? NULL : fmt;
+static int fimc_m2m_g_fmt_mplane(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct fimc_ctx *ctx = fh_to_ctx(fh);
+ struct fimc_frame *frame = ctx_get_frame(ctx, f->type);
+
+ if (IS_ERR(frame))
+ return PTR_ERR(frame);
+
+ return fimc_fill_format(frame, f);
}
-struct fimc_fmt *find_mbus_format(struct v4l2_mbus_framefmt *f,
- unsigned int mask)
+/**
+ * fimc_find_format - lookup fimc color format by fourcc or media bus format
+ * @pixelformat: fourcc to match, ignored if null
+ * @mbus_code: media bus code to match, ignored if null
+ * @mask: the color flags to match
+ * @index: offset in the fimc_formats array, ignored if negative
+ */
+struct fimc_fmt *fimc_find_format(u32 *pixelformat, u32 *mbus_code,
+ unsigned int mask, int index)
{
- struct fimc_fmt *fmt;
+ struct fimc_fmt *fmt, *def_fmt = NULL;
unsigned int i;
+ int id = 0;
+
+ if (index >= ARRAY_SIZE(fimc_formats))
+ return NULL;
for (i = 0; i < ARRAY_SIZE(fimc_formats); ++i) {
fmt = &fimc_formats[i];
- if (fmt->mbus_code == f->code && (fmt->flags & mask))
- break;
+ if (!(fmt->flags & mask))
+ continue;
+ if (pixelformat && fmt->fourcc == *pixelformat)
+ return fmt;
+ if (mbus_code && fmt->mbus_code == *mbus_code)
+ return fmt;
+ if (index == id)
+ def_fmt = fmt;
+ id++;
}
-
- return (i == ARRAY_SIZE(fimc_formats)) ? NULL : fmt;
+ return def_fmt;
}
-
-int fimc_vidioc_try_fmt_mplane(struct file *file, void *priv,
- struct v4l2_format *f)
+static int fimc_try_fmt_mplane(struct fimc_ctx *ctx, struct v4l2_format *f)
{
- struct fimc_ctx *ctx = priv;
struct fimc_dev *fimc = ctx->fimc_dev;
struct samsung_fimc_variant *variant = fimc->variant;
struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
struct fimc_fmt *fmt;
- u32 max_width, mod_x, mod_y, mask;
- int i, is_output = 0;
-
+ u32 max_w, mod_x, mod_y;
- if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
- if (fimc_ctx_state_is_set(FIMC_CTX_CAP, ctx))
- return -EINVAL;
- is_output = 1;
- } else if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ if (!IS_M2M(f->type))
return -EINVAL;
- }
dbg("w: %d, h: %d", pix->width, pix->height);
- mask = is_output ? FMT_FLAGS_M2M : FMT_FLAGS_M2M | FMT_FLAGS_CAM;
- fmt = find_format(f, mask);
- if (!fmt) {
- v4l2_err(&fimc->m2m.v4l2_dev, "Fourcc format (0x%X) invalid.\n",
- pix->pixelformat);
+ fmt = fimc_find_format(&pix->pixelformat, NULL, FMT_FLAGS_M2M, 0);
+ if (WARN(fmt == NULL, "Pixel format lookup failed"))
return -EINVAL;
- }
if (pix->field == V4L2_FIELD_ANY)
pix->field = V4L2_FIELD_NONE;
- else if (V4L2_FIELD_NONE != pix->field)
+ else if (pix->field != V4L2_FIELD_NONE)
return -EINVAL;
- if (is_output) {
- max_width = variant->pix_limit->scaler_dis_w;
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ max_w = variant->pix_limit->scaler_dis_w;
mod_x = ffs(variant->min_inp_pixsize) - 1;
} else {
- max_width = variant->pix_limit->out_rot_dis_w;
+ max_w = variant->pix_limit->out_rot_dis_w;
mod_x = ffs(variant->min_out_pixsize) - 1;
}
@@ -914,70 +1043,52 @@ int fimc_vidioc_try_fmt_mplane(struct file *file, void *priv,
else
mod_y = mod_x;
}
+ dbg("mod_x: %d, mod_y: %d, max_w: %d", mod_x, mod_y, max_w);
- dbg("mod_x: %d, mod_y: %d, max_w: %d", mod_x, mod_y, max_width);
-
- v4l_bound_align_image(&pix->width, 16, max_width, mod_x,
+ v4l_bound_align_image(&pix->width, 16, max_w, mod_x,
&pix->height, 8, variant->pix_limit->scaler_dis_w, mod_y, 0);
- pix->num_planes = fmt->memplanes;
- pix->colorspace = V4L2_COLORSPACE_JPEG;
-
-
- for (i = 0; i < pix->num_planes; ++i) {
- u32 bpl = pix->plane_fmt[i].bytesperline;
- u32 *sizeimage = &pix->plane_fmt[i].sizeimage;
-
- if (fmt->colplanes > 1 && (bpl == 0 || bpl < pix->width))
- bpl = pix->width; /* Planar */
-
- if (fmt->colplanes == 1 && /* Packed */
- (bpl == 0 || ((bpl * 8) / fmt->depth[i]) < pix->width))
- bpl = (pix->width * fmt->depth[0]) / 8;
-
- if (i == 0) /* Same bytesperline for each plane. */
- mod_x = bpl;
+ fimc_adjust_mplane_format(fmt, pix->width, pix->height, &f->fmt.pix_mp);
+ return 0;
+}
- pix->plane_fmt[i].bytesperline = mod_x;
- *sizeimage = (pix->width * pix->height * fmt->depth[i]) / 8;
- }
+static int fimc_m2m_try_fmt_mplane(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct fimc_ctx *ctx = fh_to_ctx(fh);
- return 0;
+ return fimc_try_fmt_mplane(ctx, f);
}
-static int fimc_m2m_s_fmt_mplane(struct file *file, void *priv,
+static int fimc_m2m_s_fmt_mplane(struct file *file, void *fh,
struct v4l2_format *f)
{
- struct fimc_ctx *ctx = priv;
+ struct fimc_ctx *ctx = fh_to_ctx(fh);
struct fimc_dev *fimc = ctx->fimc_dev;
struct vb2_queue *vq;
struct fimc_frame *frame;
struct v4l2_pix_format_mplane *pix;
int i, ret = 0;
- ret = fimc_vidioc_try_fmt_mplane(file, priv, f);
+ ret = fimc_try_fmt_mplane(ctx, f);
if (ret)
return ret;
vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
if (vb2_is_busy(vq)) {
- v4l2_err(&fimc->m2m.v4l2_dev, "queue (%d) busy\n", f->type);
+ v4l2_err(fimc->m2m.vfd, "queue (%d) busy\n", f->type);
return -EBUSY;
}
- if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
frame = &ctx->s_frame;
- } else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ else
frame = &ctx->d_frame;
- } else {
- v4l2_err(&fimc->m2m.v4l2_dev,
- "Wrong buffer/video queue type (%d)\n", f->type);
- return -EINVAL;
- }
pix = &f->fmt.pix_mp;
- frame->fmt = find_format(f, FMT_FLAGS_M2M);
+ frame->fmt = fimc_find_format(&pix->pixelformat, NULL,
+ FMT_FLAGS_M2M, 0);
if (!frame->fmt)
return -EINVAL;
@@ -986,15 +1097,9 @@ static int fimc_m2m_s_fmt_mplane(struct file *file, void *priv,
(pix->width * pix->height * frame->fmt->depth[i]) / 8;
}
- frame->f_width = pix->plane_fmt[0].bytesperline * 8 /
- frame->fmt->depth[0];
- frame->f_height = pix->height;
- frame->width = pix->width;
- frame->height = pix->height;
- frame->o_width = pix->width;
- frame->o_height = pix->height;
- frame->offs_h = 0;
- frame->offs_v = 0;
+ fimc_fill_frame(frame, f);
+
+ ctx->scaler.enabled = 1;
if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
fimc_ctx_state_lock_set(FIMC_PARAMS | FIMC_DST_FMT, ctx);
@@ -1006,39 +1111,42 @@ static int fimc_m2m_s_fmt_mplane(struct file *file, void *priv,
return 0;
}
-static int fimc_m2m_reqbufs(struct file *file, void *priv,
- struct v4l2_requestbuffers *reqbufs)
+static int fimc_m2m_reqbufs(struct file *file, void *fh,
+ struct v4l2_requestbuffers *reqbufs)
{
- struct fimc_ctx *ctx = priv;
+ struct fimc_ctx *ctx = fh_to_ctx(fh);
+
return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs);
}
-static int fimc_m2m_querybuf(struct file *file, void *priv,
- struct v4l2_buffer *buf)
+static int fimc_m2m_querybuf(struct file *file, void *fh,
+ struct v4l2_buffer *buf)
{
- struct fimc_ctx *ctx = priv;
+ struct fimc_ctx *ctx = fh_to_ctx(fh);
+
return v4l2_m2m_querybuf(file, ctx->m2m_ctx, buf);
}
-static int fimc_m2m_qbuf(struct file *file, void *priv,
- struct v4l2_buffer *buf)
+static int fimc_m2m_qbuf(struct file *file, void *fh,
+ struct v4l2_buffer *buf)
{
- struct fimc_ctx *ctx = priv;
+ struct fimc_ctx *ctx = fh_to_ctx(fh);
return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf);
}
-static int fimc_m2m_dqbuf(struct file *file, void *priv,
- struct v4l2_buffer *buf)
+static int fimc_m2m_dqbuf(struct file *file, void *fh,
+ struct v4l2_buffer *buf)
{
- struct fimc_ctx *ctx = priv;
+ struct fimc_ctx *ctx = fh_to_ctx(fh);
+
return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
}
-static int fimc_m2m_streamon(struct file *file, void *priv,
- enum v4l2_buf_type type)
+static int fimc_m2m_streamon(struct file *file, void *fh,
+ enum v4l2_buf_type type)
{
- struct fimc_ctx *ctx = priv;
+ struct fimc_ctx *ctx = fh_to_ctx(fh);
/* The source and target color format need to be set */
if (V4L2_TYPE_IS_OUTPUT(type)) {
@@ -1051,149 +1159,19 @@ static int fimc_m2m_streamon(struct file *file, void *priv,
return v4l2_m2m_streamon(file, ctx->m2m_ctx, type);
}
-static int fimc_m2m_streamoff(struct file *file, void *priv,
+static int fimc_m2m_streamoff(struct file *file, void *fh,
enum v4l2_buf_type type)
{
- struct fimc_ctx *ctx = priv;
- return v4l2_m2m_streamoff(file, ctx->m2m_ctx, type);
-}
-
-int fimc_vidioc_queryctrl(struct file *file, void *priv,
- struct v4l2_queryctrl *qc)
-{
- struct fimc_ctx *ctx = priv;
- struct v4l2_queryctrl *c;
- int ret = -EINVAL;
-
- c = get_ctrl(qc->id);
- if (c) {
- *qc = *c;
- return 0;
- }
-
- if (fimc_ctx_state_is_set(FIMC_CTX_CAP, ctx)) {
- return v4l2_subdev_call(ctx->fimc_dev->vid_cap.sd,
- core, queryctrl, qc);
- }
- return ret;
-}
-
-int fimc_vidioc_g_ctrl(struct file *file, void *priv,
- struct v4l2_control *ctrl)
-{
- struct fimc_ctx *ctx = priv;
- struct fimc_dev *fimc = ctx->fimc_dev;
+ struct fimc_ctx *ctx = fh_to_ctx(fh);
- switch (ctrl->id) {
- case V4L2_CID_HFLIP:
- ctrl->value = (FLIP_X_AXIS & ctx->flip) ? 1 : 0;
- break;
- case V4L2_CID_VFLIP:
- ctrl->value = (FLIP_Y_AXIS & ctx->flip) ? 1 : 0;
- break;
- case V4L2_CID_ROTATE:
- ctrl->value = ctx->rotation;
- break;
- default:
- if (fimc_ctx_state_is_set(FIMC_CTX_CAP, ctx)) {
- return v4l2_subdev_call(fimc->vid_cap.sd, core,
- g_ctrl, ctrl);
- } else {
- v4l2_err(&fimc->m2m.v4l2_dev, "Invalid control\n");
- return -EINVAL;
- }
- }
- dbg("ctrl->value= %d", ctrl->value);
-
- return 0;
-}
-
-int check_ctrl_val(struct fimc_ctx *ctx, struct v4l2_control *ctrl)
-{
- struct v4l2_queryctrl *c;
- c = get_ctrl(ctrl->id);
- if (!c)
- return -EINVAL;
-
- if (ctrl->value < c->minimum || ctrl->value > c->maximum
- || (c->step != 0 && ctrl->value % c->step != 0)) {
- v4l2_err(&ctx->fimc_dev->m2m.v4l2_dev,
- "Invalid control value\n");
- return -ERANGE;
- }
-
- return 0;
-}
-
-int fimc_s_ctrl(struct fimc_ctx *ctx, struct v4l2_control *ctrl)
-{
- struct samsung_fimc_variant *variant = ctx->fimc_dev->variant;
- struct fimc_dev *fimc = ctx->fimc_dev;
- int ret = 0;
-
- switch (ctrl->id) {
- case V4L2_CID_HFLIP:
- if (ctrl->value)
- ctx->flip |= FLIP_X_AXIS;
- else
- ctx->flip &= ~FLIP_X_AXIS;
- break;
-
- case V4L2_CID_VFLIP:
- if (ctrl->value)
- ctx->flip |= FLIP_Y_AXIS;
- else
- ctx->flip &= ~FLIP_Y_AXIS;
- break;
-
- case V4L2_CID_ROTATE:
- if (fimc_ctx_state_is_set(FIMC_DST_FMT | FIMC_SRC_FMT, ctx)) {
- ret = fimc_check_scaler_ratio(ctx->s_frame.width,
- ctx->s_frame.height, ctx->d_frame.width,
- ctx->d_frame.height, ctrl->value);
- }
-
- if (ret) {
- v4l2_err(&fimc->m2m.v4l2_dev, "Out of scaler range\n");
- return -EINVAL;
- }
-
- /* Check for the output rotator availability */
- if ((ctrl->value == 90 || ctrl->value == 270) &&
- (ctx->in_path == FIMC_DMA && !variant->has_out_rot))
- return -EINVAL;
- ctx->rotation = ctrl->value;
- break;
-
- default:
- v4l2_err(&fimc->m2m.v4l2_dev, "Invalid control\n");
- return -EINVAL;
- }
-
- fimc_ctx_state_lock_set(FIMC_PARAMS, ctx);
-
- return 0;
-}
-
-static int fimc_m2m_s_ctrl(struct file *file, void *priv,
- struct v4l2_control *ctrl)
-{
- struct fimc_ctx *ctx = priv;
- int ret = 0;
-
- ret = check_ctrl_val(ctx, ctrl);
- if (ret)
- return ret;
-
- ret = fimc_s_ctrl(ctx, ctrl);
- return 0;
+ return v4l2_m2m_streamoff(file, ctx->m2m_ctx, type);
}
static int fimc_m2m_cropcap(struct file *file, void *fh,
- struct v4l2_cropcap *cr)
+ struct v4l2_cropcap *cr)
{
+ struct fimc_ctx *ctx = fh_to_ctx(fh);
struct fimc_frame *frame;
- struct fimc_ctx *ctx = fh;
frame = ctx_get_frame(ctx, cr->type);
if (IS_ERR(frame))
@@ -1201,8 +1179,8 @@ static int fimc_m2m_cropcap(struct file *file, void *fh,
cr->bounds.left = 0;
cr->bounds.top = 0;
- cr->bounds.width = frame->f_width;
- cr->bounds.height = frame->f_height;
+ cr->bounds.width = frame->o_width;
+ cr->bounds.height = frame->o_height;
cr->defrect = cr->bounds;
return 0;
@@ -1210,8 +1188,8 @@ static int fimc_m2m_cropcap(struct file *file, void *fh,
static int fimc_m2m_g_crop(struct file *file, void *fh, struct v4l2_crop *cr)
{
+ struct fimc_ctx *ctx = fh_to_ctx(fh);
struct fimc_frame *frame;
- struct fimc_ctx *ctx = file->private_data;
frame = ctx_get_frame(ctx, cr->type);
if (IS_ERR(frame))
@@ -1225,26 +1203,21 @@ static int fimc_m2m_g_crop(struct file *file, void *fh, struct v4l2_crop *cr)
return 0;
}
-int fimc_try_crop(struct fimc_ctx *ctx, struct v4l2_crop *cr)
+static int fimc_m2m_try_crop(struct fimc_ctx *ctx, struct v4l2_crop *cr)
{
struct fimc_dev *fimc = ctx->fimc_dev;
struct fimc_frame *f;
u32 min_size, halign, depth = 0;
- bool is_capture_ctx;
int i;
if (cr->c.top < 0 || cr->c.left < 0) {
- v4l2_err(&fimc->m2m.v4l2_dev,
+ v4l2_err(fimc->m2m.vfd,
"doesn't support negative values for top & left\n");
return -EINVAL;
}
-
- is_capture_ctx = fimc_ctx_state_is_set(FIMC_CTX_CAP, ctx);
-
if (cr->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
- f = is_capture_ctx ? &ctx->s_frame : &ctx->d_frame;
- else if (cr->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE &&
- !is_capture_ctx)
+ f = &ctx->d_frame;
+ else if (cr->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
f = &ctx->s_frame;
else
return -EINVAL;
@@ -1253,15 +1226,10 @@ int fimc_try_crop(struct fimc_ctx *ctx, struct v4l2_crop *cr)
fimc->variant->min_inp_pixsize : fimc->variant->min_out_pixsize;
/* Get pixel alignment constraints. */
- if (is_capture_ctx) {
- min_size = 16;
- halign = 4;
- } else {
- if (fimc->id == 1 && fimc->variant->pix_hoff)
- halign = fimc_fmt_is_rgb(f->fmt->color) ? 0 : 1;
- else
- halign = ffs(min_size) - 1;
- }
+ if (fimc->id == 1 && fimc->variant->pix_hoff)
+ halign = fimc_fmt_is_rgb(f->fmt->color) ? 0 : 1;
+ else
+ halign = ffs(min_size) - 1;
for (i = 0; i < f->fmt->colplanes; i++)
depth += f->fmt->depth[i];
@@ -1278,7 +1246,7 @@ int fimc_try_crop(struct fimc_ctx *ctx, struct v4l2_crop *cr)
cr->c.top = f->o_height - cr->c.height;
cr->c.left = round_down(cr->c.left, min_size);
- cr->c.top = round_down(cr->c.top, is_capture_ctx ? 16 : 8);
+ cr->c.top = round_down(cr->c.top, fimc->variant->hor_offs_align);
dbg("l:%d, t:%d, w:%d, h:%d, f_w: %d, f_h: %d",
cr->c.left, cr->c.top, cr->c.width, cr->c.height,
@@ -1289,12 +1257,12 @@ int fimc_try_crop(struct fimc_ctx *ctx, struct v4l2_crop *cr)
static int fimc_m2m_s_crop(struct file *file, void *fh, struct v4l2_crop *cr)
{
- struct fimc_ctx *ctx = file->private_data;
+ struct fimc_ctx *ctx = fh_to_ctx(fh);
struct fimc_dev *fimc = ctx->fimc_dev;
struct fimc_frame *f;
int ret;
- ret = fimc_try_crop(ctx, cr);
+ ret = fimc_m2m_try_crop(ctx, cr);
if (ret)
return ret;
@@ -1304,18 +1272,16 @@ static int fimc_m2m_s_crop(struct file *file, void *fh, struct v4l2_crop *cr)
/* Check to see if scaling ratio is within supported range */
if (fimc_ctx_state_is_set(FIMC_DST_FMT | FIMC_SRC_FMT, ctx)) {
if (cr->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
- ret = fimc_check_scaler_ratio(cr->c.width, cr->c.height,
- ctx->d_frame.width,
- ctx->d_frame.height,
- ctx->rotation);
+ ret = fimc_check_scaler_ratio(ctx, cr->c.width,
+ cr->c.height, ctx->d_frame.width,
+ ctx->d_frame.height, ctx->rotation);
} else {
- ret = fimc_check_scaler_ratio(ctx->s_frame.width,
- ctx->s_frame.height,
- cr->c.width, cr->c.height,
- ctx->rotation);
+ ret = fimc_check_scaler_ratio(ctx, ctx->s_frame.width,
+ ctx->s_frame.height, cr->c.width,
+ cr->c.height, ctx->rotation);
}
if (ret) {
- v4l2_err(&fimc->m2m.v4l2_dev, "Out of scaler range\n");
+ v4l2_err(fimc->m2m.vfd, "Out of scaler range\n");
return -EINVAL;
}
}
@@ -1333,14 +1299,14 @@ static int fimc_m2m_s_crop(struct file *file, void *fh, struct v4l2_crop *cr)
static const struct v4l2_ioctl_ops fimc_m2m_ioctl_ops = {
.vidioc_querycap = fimc_m2m_querycap,
- .vidioc_enum_fmt_vid_cap_mplane = fimc_vidioc_enum_fmt_mplane,
- .vidioc_enum_fmt_vid_out_mplane = fimc_vidioc_enum_fmt_mplane,
+ .vidioc_enum_fmt_vid_cap_mplane = fimc_m2m_enum_fmt_mplane,
+ .vidioc_enum_fmt_vid_out_mplane = fimc_m2m_enum_fmt_mplane,
- .vidioc_g_fmt_vid_cap_mplane = fimc_vidioc_g_fmt_mplane,
- .vidioc_g_fmt_vid_out_mplane = fimc_vidioc_g_fmt_mplane,
+ .vidioc_g_fmt_vid_cap_mplane = fimc_m2m_g_fmt_mplane,
+ .vidioc_g_fmt_vid_out_mplane = fimc_m2m_g_fmt_mplane,
- .vidioc_try_fmt_vid_cap_mplane = fimc_vidioc_try_fmt_mplane,
- .vidioc_try_fmt_vid_out_mplane = fimc_vidioc_try_fmt_mplane,
+ .vidioc_try_fmt_vid_cap_mplane = fimc_m2m_try_fmt_mplane,
+ .vidioc_try_fmt_vid_out_mplane = fimc_m2m_try_fmt_mplane,
.vidioc_s_fmt_vid_cap_mplane = fimc_m2m_s_fmt_mplane,
.vidioc_s_fmt_vid_out_mplane = fimc_m2m_s_fmt_mplane,
@@ -1354,10 +1320,6 @@ static const struct v4l2_ioctl_ops fimc_m2m_ioctl_ops = {
.vidioc_streamon = fimc_m2m_streamon,
.vidioc_streamoff = fimc_m2m_streamoff,
- .vidioc_queryctrl = fimc_vidioc_queryctrl,
- .vidioc_g_ctrl = fimc_vidioc_g_ctrl,
- .vidioc_s_ctrl = fimc_m2m_s_ctrl,
-
.vidioc_g_crop = fimc_m2m_g_crop,
.vidioc_s_crop = fimc_m2m_s_crop,
.vidioc_cropcap = fimc_m2m_cropcap
@@ -1396,7 +1358,8 @@ static int queue_init(void *priv, struct vb2_queue *src_vq,
static int fimc_m2m_open(struct file *file)
{
struct fimc_dev *fimc = video_drvdata(file);
- struct fimc_ctx *ctx = NULL;
+ struct fimc_ctx *ctx;
+ int ret;
dbg("pid: %d, state: 0x%lx, refcnt: %d",
task_pid_nr(current), fimc->state, fimc->vid_cap.refcnt);
@@ -1408,19 +1371,24 @@ static int fimc_m2m_open(struct file *file)
if (fimc->vid_cap.refcnt > 0)
return -EBUSY;
- fimc->m2m.refcnt++;
- set_bit(ST_OUTDMA_RUN, &fimc->state);
-
ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
if (!ctx)
return -ENOMEM;
+ v4l2_fh_init(&ctx->fh, fimc->m2m.vfd);
+ ret = fimc_ctrls_create(ctx);
+ if (ret)
+ goto error_fh;
+
+ /* Use separate control handler per file handle */
+ ctx->fh.ctrl_handler = &ctx->ctrl_handler;
+ file->private_data = &ctx->fh;
+ v4l2_fh_add(&ctx->fh);
- file->private_data = ctx;
ctx->fimc_dev = fimc;
/* Default color format */
ctx->s_frame.fmt = &fimc_formats[0];
ctx->d_frame.fmt = &fimc_formats[0];
- /* Setup the device context for mem2mem mode. */
+ /* Setup the device context for memory-to-memory mode */
ctx->state = FIMC_CTX_M2M;
ctx->flags = 0;
ctx->in_path = FIMC_DMA;
@@ -1429,34 +1397,46 @@ static int fimc_m2m_open(struct file *file)
ctx->m2m_ctx = v4l2_m2m_ctx_init(fimc->m2m.m2m_dev, ctx, queue_init);
if (IS_ERR(ctx->m2m_ctx)) {
- int err = PTR_ERR(ctx->m2m_ctx);
- kfree(ctx);
- return err;
+ ret = PTR_ERR(ctx->m2m_ctx);
+ goto error_c;
}
+ if (fimc->m2m.refcnt++ == 0)
+ set_bit(ST_M2M_RUN, &fimc->state);
return 0;
+
+error_c:
+ fimc_ctrls_delete(ctx);
+error_fh:
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ kfree(ctx);
+ return ret;
}
static int fimc_m2m_release(struct file *file)
{
- struct fimc_ctx *ctx = file->private_data;
+ struct fimc_ctx *ctx = fh_to_ctx(file->private_data);
struct fimc_dev *fimc = ctx->fimc_dev;
dbg("pid: %d, state: 0x%lx, refcnt= %d",
task_pid_nr(current), fimc->state, fimc->m2m.refcnt);
v4l2_m2m_ctx_release(ctx->m2m_ctx);
- kfree(ctx);
- if (--fimc->m2m.refcnt <= 0)
- clear_bit(ST_OUTDMA_RUN, &fimc->state);
+ fimc_ctrls_delete(ctx);
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ if (--fimc->m2m.refcnt <= 0)
+ clear_bit(ST_M2M_RUN, &fimc->state);
+ kfree(ctx);
return 0;
}
static unsigned int fimc_m2m_poll(struct file *file,
- struct poll_table_struct *wait)
+ struct poll_table_struct *wait)
{
- struct fimc_ctx *ctx = file->private_data;
+ struct fimc_ctx *ctx = fh_to_ctx(file->private_data);
return v4l2_m2m_poll(file, ctx->m2m_ctx, wait);
}
@@ -1464,7 +1444,7 @@ static unsigned int fimc_m2m_poll(struct file *file,
static int fimc_m2m_mmap(struct file *file, struct vm_area_struct *vma)
{
- struct fimc_ctx *ctx = file->private_data;
+ struct fimc_ctx *ctx = fh_to_ctx(file->private_data);
return v4l2_m2m_mmap(file, ctx->m2m_ctx, vma);
}
@@ -1483,92 +1463,73 @@ static struct v4l2_m2m_ops m2m_ops = {
.job_abort = fimc_job_abort,
};
-static int fimc_register_m2m_device(struct fimc_dev *fimc)
+int fimc_register_m2m_device(struct fimc_dev *fimc,
+ struct v4l2_device *v4l2_dev)
{
struct video_device *vfd;
struct platform_device *pdev;
- struct v4l2_device *v4l2_dev;
int ret = 0;
if (!fimc)
return -ENODEV;
pdev = fimc->pdev;
- v4l2_dev = &fimc->m2m.v4l2_dev;
-
- /* set name if it is empty */
- if (!v4l2_dev->name[0])
- snprintf(v4l2_dev->name, sizeof(v4l2_dev->name),
- "%s.m2m", dev_name(&pdev->dev));
-
- ret = v4l2_device_register(&pdev->dev, v4l2_dev);
- if (ret)
- goto err_m2m_r1;
+ fimc->v4l2_dev = v4l2_dev;
vfd = video_device_alloc();
if (!vfd) {
v4l2_err(v4l2_dev, "Failed to allocate video device\n");
- goto err_m2m_r1;
+ return -ENOMEM;
}
vfd->fops = &fimc_m2m_fops;
vfd->ioctl_ops = &fimc_m2m_ioctl_ops;
+ vfd->v4l2_dev = v4l2_dev;
vfd->minor = -1;
vfd->release = video_device_release;
vfd->lock = &fimc->lock;
- snprintf(vfd->name, sizeof(vfd->name), "%s:m2m", dev_name(&pdev->dev));
-
+ snprintf(vfd->name, sizeof(vfd->name), "%s.m2m", dev_name(&pdev->dev));
video_set_drvdata(vfd, fimc);
- platform_set_drvdata(pdev, fimc);
fimc->m2m.vfd = vfd;
fimc->m2m.m2m_dev = v4l2_m2m_init(&m2m_ops);
if (IS_ERR(fimc->m2m.m2m_dev)) {
v4l2_err(v4l2_dev, "failed to initialize v4l2-m2m device\n");
ret = PTR_ERR(fimc->m2m.m2m_dev);
- goto err_m2m_r2;
- }
-
- ret = video_register_device(vfd, VFL_TYPE_GRABBER, -1);
- if (ret) {
- v4l2_err(v4l2_dev,
- "%s(): failed to register video device\n", __func__);
- goto err_m2m_r3;
+ goto err_init;
}
- v4l2_info(v4l2_dev,
- "FIMC m2m driver registered as /dev/video%d\n", vfd->num);
- return 0;
+ ret = media_entity_init(&vfd->entity, 0, NULL, 0);
+ if (!ret)
+ return 0;
-err_m2m_r3:
v4l2_m2m_release(fimc->m2m.m2m_dev);
-err_m2m_r2:
+err_init:
video_device_release(fimc->m2m.vfd);
-err_m2m_r1:
- v4l2_device_unregister(v4l2_dev);
-
return ret;
}
-static void fimc_unregister_m2m_device(struct fimc_dev *fimc)
+void fimc_unregister_m2m_device(struct fimc_dev *fimc)
{
- if (fimc) {
+ if (!fimc)
+ return;
+
+ if (fimc->m2m.m2m_dev)
v4l2_m2m_release(fimc->m2m.m2m_dev);
+ if (fimc->m2m.vfd) {
+ media_entity_cleanup(&fimc->m2m.vfd->entity);
+ /* Can also be called if video device wasn't registered */
video_unregister_device(fimc->m2m.vfd);
-
- v4l2_device_unregister(&fimc->m2m.v4l2_dev);
}
}
-static void fimc_clk_release(struct fimc_dev *fimc)
+static void fimc_clk_put(struct fimc_dev *fimc)
{
int i;
for (i = 0; i < fimc->num_clocks; i++) {
- if (fimc->clock[i]) {
- clk_disable(fimc->clock[i]);
+ if (fimc->clock[i])
clk_put(fimc->clock[i]);
- }
}
}
@@ -1577,15 +1538,50 @@ static int fimc_clk_get(struct fimc_dev *fimc)
int i;
for (i = 0; i < fimc->num_clocks; i++) {
fimc->clock[i] = clk_get(&fimc->pdev->dev, fimc_clocks[i]);
-
- if (!IS_ERR_OR_NULL(fimc->clock[i])) {
- clk_enable(fimc->clock[i]);
+ if (!IS_ERR_OR_NULL(fimc->clock[i]))
continue;
- }
dev_err(&fimc->pdev->dev, "failed to get fimc clock: %s\n",
fimc_clocks[i]);
return -ENXIO;
}
+
+ return 0;
+}
+
+static int fimc_m2m_suspend(struct fimc_dev *fimc)
+{
+ unsigned long flags;
+ int timeout;
+
+ spin_lock_irqsave(&fimc->slock, flags);
+ if (!fimc_m2m_pending(fimc)) {
+ spin_unlock_irqrestore(&fimc->slock, flags);
+ return 0;
+ }
+ clear_bit(ST_M2M_SUSPENDED, &fimc->state);
+ set_bit(ST_M2M_SUSPENDING, &fimc->state);
+ spin_unlock_irqrestore(&fimc->slock, flags);
+
+ timeout = wait_event_timeout(fimc->irq_queue,
+ test_bit(ST_M2M_SUSPENDED, &fimc->state),
+ FIMC_SHUTDOWN_TIMEOUT);
+
+ clear_bit(ST_M2M_SUSPENDING, &fimc->state);
+ return timeout == 0 ? -EAGAIN : 0;
+}
+
+static int fimc_m2m_resume(struct fimc_dev *fimc)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&fimc->slock, flags);
+ /* Clear for full H/W setup in first run after resume */
+ fimc->m2m.ctx = NULL;
+ spin_unlock_irqrestore(&fimc->slock, flags);
+
+ if (test_and_clear_bit(ST_M2M_SUSPENDED, &fimc->state))
+ fimc_m2m_job_finish(fimc->m2m.ctx,
+ VB2_BUF_STATE_ERROR);
return 0;
}
@@ -1596,7 +1592,6 @@ static int fimc_probe(struct platform_device *pdev)
struct samsung_fimc_driverdata *drv_data;
struct s5p_platform_fimc *pdata;
int ret = 0;
- int cap_input_index = -1;
dev_dbg(&pdev->dev, "%s():\n", __func__);
@@ -1614,15 +1609,16 @@ static int fimc_probe(struct platform_device *pdev)
return -ENOMEM;
fimc->id = pdev->id;
+
fimc->variant = drv_data->variant[fimc->id];
fimc->pdev = pdev;
pdata = pdev->dev.platform_data;
fimc->pdata = pdata;
- fimc->state = ST_IDLE;
+
+ set_bit(ST_LPM, &fimc->state);
init_waitqueue_head(&fimc->irq_queue);
spin_lock_init(&fimc->slock);
-
mutex_init(&fimc->lock);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1647,71 +1643,51 @@ static int fimc_probe(struct platform_device *pdev)
goto err_req_region;
}
- fimc->num_clocks = MAX_FIMC_CLOCKS - 1;
-
- /* Check if a video capture node needs to be registered. */
- if (pdata && pdata->num_clients > 0) {
- cap_input_index = 0;
- fimc->num_clocks++;
- }
-
- ret = fimc_clk_get(fimc);
- if (ret)
- goto err_regs_unmap;
- clk_set_rate(fimc->clock[CLK_BUS], drv_data->lclk_frequency);
-
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!res) {
dev_err(&pdev->dev, "failed to get IRQ resource\n");
ret = -ENXIO;
- goto err_clk;
+ goto err_regs_unmap;
}
fimc->irq = res->start;
- fimc_hw_reset(fimc);
+ fimc->num_clocks = MAX_FIMC_CLOCKS;
+ ret = fimc_clk_get(fimc);
+ if (ret)
+ goto err_regs_unmap;
+ clk_set_rate(fimc->clock[CLK_BUS], drv_data->lclk_frequency);
+ clk_enable(fimc->clock[CLK_BUS]);
- ret = request_irq(fimc->irq, fimc_isr, 0, pdev->name, fimc);
+ platform_set_drvdata(pdev, fimc);
+
+ ret = request_irq(fimc->irq, fimc_irq_handler, 0, pdev->name, fimc);
if (ret) {
dev_err(&pdev->dev, "failed to install irq (%d)\n", ret);
goto err_clk;
}
+ pm_runtime_enable(&pdev->dev);
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0)
+ goto err_irq;
/* Initialize contiguous memory allocator */
- fimc->alloc_ctx = vb2_dma_contig_init_ctx(&fimc->pdev->dev);
+ fimc->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev);
if (IS_ERR(fimc->alloc_ctx)) {
ret = PTR_ERR(fimc->alloc_ctx);
- goto err_irq;
+ goto err_pm;
}
- ret = fimc_register_m2m_device(fimc);
- if (ret)
- goto err_irq;
-
- /* At least one camera sensor is required to register capture node */
- if (cap_input_index >= 0) {
- ret = fimc_register_capture_device(fimc);
- if (ret)
- goto err_m2m;
- clk_disable(fimc->clock[CLK_CAM]);
- }
- /*
- * Exclude the additional output DMA address registers by masking
- * them out on HW revisions that provide extended capabilites.
- */
- if (fimc->variant->out_buf_count > 4)
- fimc_hw_set_dma_seq(fimc, 0xF);
-
- dev_dbg(&pdev->dev, "%s(): fimc-%d registered successfully\n",
- __func__, fimc->id);
+ dev_dbg(&pdev->dev, "FIMC.%d registered successfully\n", fimc->id);
+ pm_runtime_put(&pdev->dev);
return 0;
-err_m2m:
- fimc_unregister_m2m_device(fimc);
+err_pm:
+ pm_runtime_put(&pdev->dev);
err_irq:
free_irq(fimc->irq, fimc);
err_clk:
- fimc_clk_release(fimc);
+ fimc_clk_put(fimc);
err_regs_unmap:
iounmap(fimc->regs);
err_req_region:
@@ -1719,31 +1695,105 @@ err_req_region:
kfree(fimc->regs_res);
err_info:
kfree(fimc);
+ return ret;
+}
+static int fimc_runtime_resume(struct device *dev)
+{
+ struct fimc_dev *fimc = dev_get_drvdata(dev);
+
+ dbg("fimc%d: state: 0x%lx", fimc->id, fimc->state);
+
+ /* Enable clocks and perform basic initalization */
+ clk_enable(fimc->clock[CLK_GATE]);
+ fimc_hw_reset(fimc);
+ if (fimc->variant->out_buf_count > 4)
+ fimc_hw_set_dma_seq(fimc, 0xF);
+
+ /* Resume the capture or mem-to-mem device */
+ if (fimc_capture_busy(fimc))
+ return fimc_capture_resume(fimc);
+ else if (fimc_m2m_pending(fimc))
+ return fimc_m2m_resume(fimc);
+ return 0;
+}
+
+static int fimc_runtime_suspend(struct device *dev)
+{
+ struct fimc_dev *fimc = dev_get_drvdata(dev);
+ int ret = 0;
+
+ if (fimc_capture_busy(fimc))
+ ret = fimc_capture_suspend(fimc);
+ else
+ ret = fimc_m2m_suspend(fimc);
+ if (!ret)
+ clk_disable(fimc->clock[CLK_GATE]);
+
+ dbg("fimc%d: state: 0x%lx", fimc->id, fimc->state);
return ret;
}
-static int __devexit fimc_remove(struct platform_device *pdev)
+#ifdef CONFIG_PM_SLEEP
+static int fimc_resume(struct device *dev)
{
- struct fimc_dev *fimc =
- (struct fimc_dev *)platform_get_drvdata(pdev);
+ struct fimc_dev *fimc = dev_get_drvdata(dev);
+ unsigned long flags;
- free_irq(fimc->irq, fimc);
+ dbg("fimc%d: state: 0x%lx", fimc->id, fimc->state);
+
+ /* Do not resume if the device was idle before system suspend */
+ spin_lock_irqsave(&fimc->slock, flags);
+ if (!test_and_clear_bit(ST_LPM, &fimc->state) ||
+ (!fimc_m2m_active(fimc) && !fimc_capture_busy(fimc))) {
+ spin_unlock_irqrestore(&fimc->slock, flags);
+ return 0;
+ }
fimc_hw_reset(fimc);
+ if (fimc->variant->out_buf_count > 4)
+ fimc_hw_set_dma_seq(fimc, 0xF);
+ spin_unlock_irqrestore(&fimc->slock, flags);
+
+ if (fimc_capture_busy(fimc))
+ return fimc_capture_resume(fimc);
- fimc_unregister_m2m_device(fimc);
- fimc_unregister_capture_device(fimc);
+ return fimc_m2m_resume(fimc);
+}
+
+static int fimc_suspend(struct device *dev)
+{
+ struct fimc_dev *fimc = dev_get_drvdata(dev);
+
+ dbg("fimc%d: state: 0x%lx", fimc->id, fimc->state);
- fimc_clk_release(fimc);
+ if (test_and_set_bit(ST_LPM, &fimc->state))
+ return 0;
+ if (fimc_capture_busy(fimc))
+ return fimc_capture_suspend(fimc);
+
+ return fimc_m2m_suspend(fimc);
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static int __devexit fimc_remove(struct platform_device *pdev)
+{
+ struct fimc_dev *fimc = platform_get_drvdata(pdev);
+
+ pm_runtime_disable(&pdev->dev);
+ fimc_runtime_suspend(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
vb2_dma_contig_cleanup_ctx(fimc->alloc_ctx);
+ clk_disable(fimc->clock[CLK_BUS]);
+ fimc_clk_put(fimc);
+ free_irq(fimc->irq, fimc);
iounmap(fimc->regs);
release_resource(fimc->regs_res);
kfree(fimc->regs_res);
kfree(fimc);
- dev_info(&pdev->dev, "%s driver unloaded\n", pdev->name);
+ dev_info(&pdev->dev, "driver unloaded\n");
return 0;
}
@@ -1786,6 +1836,7 @@ static struct fimc_pix_limit s5p_pix_limit[4] = {
static struct samsung_fimc_variant fimc0_variant_s5p = {
.has_inp_rot = 1,
.has_out_rot = 1,
+ .has_cam_if = 1,
.min_inp_pixsize = 16,
.min_out_pixsize = 16,
.hor_offs_align = 8,
@@ -1794,6 +1845,7 @@ static struct samsung_fimc_variant fimc0_variant_s5p = {
};
static struct samsung_fimc_variant fimc2_variant_s5p = {
+ .has_cam_if = 1,
.min_inp_pixsize = 16,
.min_out_pixsize = 16,
.hor_offs_align = 8,
@@ -1805,6 +1857,7 @@ static struct samsung_fimc_variant fimc0_variant_s5pv210 = {
.pix_hoff = 1,
.has_inp_rot = 1,
.has_out_rot = 1,
+ .has_cam_if = 1,
.min_inp_pixsize = 16,
.min_out_pixsize = 16,
.hor_offs_align = 8,
@@ -1816,6 +1869,7 @@ static struct samsung_fimc_variant fimc1_variant_s5pv210 = {
.pix_hoff = 1,
.has_inp_rot = 1,
.has_out_rot = 1,
+ .has_cam_if = 1,
.has_mainscaler_ext = 1,
.min_inp_pixsize = 16,
.min_out_pixsize = 16,
@@ -1825,6 +1879,7 @@ static struct samsung_fimc_variant fimc1_variant_s5pv210 = {
};
static struct samsung_fimc_variant fimc2_variant_s5pv210 = {
+ .has_cam_if = 1,
.pix_hoff = 1,
.min_inp_pixsize = 16,
.min_out_pixsize = 16,
@@ -1837,22 +1892,24 @@ static struct samsung_fimc_variant fimc0_variant_exynos4 = {
.pix_hoff = 1,
.has_inp_rot = 1,
.has_out_rot = 1,
+ .has_cam_if = 1,
.has_cistatus2 = 1,
.has_mainscaler_ext = 1,
.min_inp_pixsize = 16,
.min_out_pixsize = 16,
- .hor_offs_align = 1,
+ .hor_offs_align = 2,
.out_buf_count = 32,
.pix_limit = &s5p_pix_limit[1],
};
-static struct samsung_fimc_variant fimc2_variant_exynos4 = {
+static struct samsung_fimc_variant fimc3_variant_exynos4 = {
.pix_hoff = 1,
+ .has_cam_if = 1,
.has_cistatus2 = 1,
.has_mainscaler_ext = 1,
.min_inp_pixsize = 16,
.min_out_pixsize = 16,
- .hor_offs_align = 1,
+ .hor_offs_align = 2,
.out_buf_count = 32,
.pix_limit = &s5p_pix_limit[3],
};
@@ -1885,7 +1942,7 @@ static struct samsung_fimc_driverdata fimc_drvdata_exynos4 = {
[0] = &fimc0_variant_exynos4,
[1] = &fimc0_variant_exynos4,
[2] = &fimc0_variant_exynos4,
- [3] = &fimc2_variant_exynos4,
+ [3] = &fimc3_variant_exynos4,
},
.num_entities = 4,
.lclk_frequency = 166000000UL,
@@ -1906,33 +1963,28 @@ static struct platform_device_id fimc_driver_ids[] = {
};
MODULE_DEVICE_TABLE(platform, fimc_driver_ids);
+static const struct dev_pm_ops fimc_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(fimc_suspend, fimc_resume)
+ SET_RUNTIME_PM_OPS(fimc_runtime_suspend, fimc_runtime_resume, NULL)
+};
+
static struct platform_driver fimc_driver = {
.probe = fimc_probe,
- .remove = __devexit_p(fimc_remove),
+ .remove = __devexit_p(fimc_remove),
.id_table = fimc_driver_ids,
.driver = {
- .name = MODULE_NAME,
+ .name = FIMC_MODULE_NAME,
.owner = THIS_MODULE,
+ .pm = &fimc_pm_ops,
}
};
-static int __init fimc_init(void)
+int __init fimc_register_driver(void)
{
- int ret = platform_driver_register(&fimc_driver);
- if (ret)
- err("platform_driver_register failed: %d\n", ret);
- return ret;
+ return platform_driver_probe(&fimc_driver, fimc_probe);
}
-static void __exit fimc_exit(void)
+void __exit fimc_unregister_driver(void)
{
platform_driver_unregister(&fimc_driver);
}
-
-module_init(fimc_init);
-module_exit(fimc_exit);
-
-MODULE_AUTHOR("Sylwester Nawrocki <s.nawrocki@samsung.com>");
-MODULE_DESCRIPTION("S5P FIMC camera host interface/video postprocessor driver");
-MODULE_LICENSE("GPL");
-MODULE_VERSION("1.0.1");
diff --git a/drivers/media/video/s5p-fimc/fimc-core.h b/drivers/media/video/s5p-fimc/fimc-core.h
index 1f70772daaf..a6936dad5b1 100644
--- a/drivers/media/video/s5p-fimc/fimc-core.h
+++ b/drivers/media/video/s5p-fimc/fimc-core.h
@@ -11,12 +11,16 @@
/*#define DEBUG*/
+#include <linux/platform_device.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/videodev2.h>
#include <linux/io.h>
+
+#include <media/media-entity.h>
#include <media/videobuf2-core.h>
+#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-mem2mem.h>
#include <media/v4l2-mediabus.h>
@@ -32,38 +36,46 @@
/* Time to wait for next frame VSYNC interrupt while stopping operation. */
#define FIMC_SHUTDOWN_TIMEOUT ((100*HZ)/1000)
-#define MAX_FIMC_CLOCKS 3
-#define MODULE_NAME "s5p-fimc"
+#define MAX_FIMC_CLOCKS 2
+#define FIMC_MODULE_NAME "s5p-fimc"
#define FIMC_MAX_DEVS 4
#define FIMC_MAX_OUT_BUFS 4
#define SCALER_MAX_HRATIO 64
#define SCALER_MAX_VRATIO 64
#define DMA_MIN_SIZE 8
+#define FIMC_CAMIF_MAX_HEIGHT 0x2000
/* indices to the clocks array */
enum {
CLK_BUS,
CLK_GATE,
- CLK_CAM,
};
enum fimc_dev_flags {
- /* for m2m node */
- ST_IDLE,
- ST_OUTDMA_RUN,
+ ST_LPM,
+ /* m2m node */
+ ST_M2M_RUN,
ST_M2M_PEND,
- /* for capture node */
+ ST_M2M_SUSPENDING,
+ ST_M2M_SUSPENDED,
+ /* capture node */
ST_CAPT_PEND,
ST_CAPT_RUN,
ST_CAPT_STREAM,
+ ST_CAPT_ISP_STREAM,
+ ST_CAPT_SUSPENDED,
ST_CAPT_SHUT,
+ ST_CAPT_BUSY,
+ ST_CAPT_APPLY_CFG,
+ ST_CAPT_JPEG,
};
-#define fimc_m2m_active(dev) test_bit(ST_OUTDMA_RUN, &(dev)->state)
+#define fimc_m2m_active(dev) test_bit(ST_M2M_RUN, &(dev)->state)
#define fimc_m2m_pending(dev) test_bit(ST_M2M_PEND, &(dev)->state)
#define fimc_capture_running(dev) test_bit(ST_CAPT_RUN, &(dev)->state)
#define fimc_capture_pending(dev) test_bit(ST_CAPT_PEND, &(dev)->state)
+#define fimc_capture_busy(dev) test_bit(ST_CAPT_BUSY, &(dev)->state)
enum fimc_datapath {
FIMC_CAMERA,
@@ -83,9 +95,14 @@ enum fimc_color_fmt {
S5P_FIMC_CBYCRY422,
S5P_FIMC_CRYCBY422,
S5P_FIMC_YCBCR444_LOCAL,
+ S5P_FIMC_JPEG = 0x40,
};
-#define fimc_fmt_is_rgb(x) ((x) & 0x10)
+#define fimc_fmt_is_rgb(x) (!!((x) & 0x10))
+#define fimc_fmt_is_jpeg(x) (!!((x) & 0x40))
+
+#define IS_M2M(__strt) ((__strt) == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE || \
+ __strt == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
/* Cb/Cr chrominance components order for 2 plane Y/CbCr 4:2:2 formats. */
#define S5P_FIMC_LSB_CRCB S5P_CIOCTRL_ORDER422_2P_LSB_CRCB
@@ -104,9 +121,10 @@ enum fimc_color_fmt {
#define FIMC_DST_ADDR (1 << 2)
#define FIMC_SRC_FMT (1 << 3)
#define FIMC_DST_FMT (1 << 4)
-#define FIMC_CTX_M2M (1 << 5)
-#define FIMC_CTX_CAP (1 << 6)
-#define FIMC_CTX_SHUT (1 << 7)
+#define FIMC_DST_CROP (1 << 5)
+#define FIMC_CTX_M2M (1 << 16)
+#define FIMC_CTX_CAP (1 << 17)
+#define FIMC_CTX_SHUT (1 << 18)
/* Image conversion flags */
#define FIMC_IN_DMA_ACCESS_TILED (1 << 0)
@@ -122,11 +140,6 @@ enum fimc_color_fmt {
/* Y (16 ~ 235), Cb/Cr (16 ~ 240) */
#define FIMC_COLOR_RANGE_NARROW (1 << 3)
-#define FLIP_NONE 0
-#define FLIP_X_AXIS 1
-#define FLIP_Y_AXIS 2
-#define FLIP_XY_AXIS (FLIP_X_AXIS | FLIP_Y_AXIS)
-
/**
* struct fimc_fmt - the driver's internal color format data
* @mbus_code: Media Bus pixel code, -1 if not applicable
@@ -275,26 +288,29 @@ struct fimc_frame {
/**
* struct fimc_m2m_device - v4l2 memory-to-memory device data
* @vfd: the video device node for v4l2 m2m mode
- * @v4l2_dev: v4l2 device for m2m mode
* @m2m_dev: v4l2 memory-to-memory device data
* @ctx: hardware context data
* @refcnt: the reference counter
*/
struct fimc_m2m_device {
struct video_device *vfd;
- struct v4l2_device v4l2_dev;
struct v4l2_m2m_dev *m2m_dev;
struct fimc_ctx *ctx;
int refcnt;
};
+#define FIMC_SD_PAD_SINK 0
+#define FIMC_SD_PAD_SOURCE 1
+#define FIMC_SD_PADS_NUM 2
+
/**
* struct fimc_vid_cap - camera capture device information
* @ctx: hardware context data
* @vfd: video device node for camera capture mode
- * @v4l2_dev: v4l2_device struct to manage subdevs
- * @sd: pointer to camera sensor subdevice currently in use
- * @fmt: Media Bus format configured at selected image sensor
+ * @subdev: subdev exposing the FIMC processing block
+ * @vd_pad: fimc video capture node pad
+ * @sd_pads: fimc video processing block pads
+ * @mf: media bus format at the FIMC camera input (and the scaler output) pad
* @pending_buf_q: the pending buffer queue head
* @active_buf_q: the queue head of buffers scheduled in hardware
* @vbq: the capture am video buffer queue
@@ -304,14 +320,17 @@ struct fimc_m2m_device {
* @reqbufs_count: the number of buffers requested in REQBUFS ioctl
* @input_index: input (camera sensor) index
* @refcnt: driver's private reference counter
+ * @input: capture input type, grp_id of the attached subdev
+ * @user_subdev_api: true if subdevs are not configured by the host driver
*/
struct fimc_vid_cap {
struct fimc_ctx *ctx;
struct vb2_alloc_ctx *alloc_ctx;
struct video_device *vfd;
- struct v4l2_device v4l2_dev;
- struct v4l2_subdev *sd;;
- struct v4l2_mbus_framefmt fmt;
+ struct v4l2_subdev *subdev;
+ struct media_pad vd_pad;
+ struct v4l2_mbus_framefmt mf;
+ struct media_pad sd_pads[FIMC_SD_PADS_NUM];
struct list_head pending_buf_q;
struct list_head active_buf_q;
struct vb2_queue vbq;
@@ -321,6 +340,8 @@ struct fimc_vid_cap {
unsigned int reqbufs_count;
int input_index;
int refcnt;
+ u32 input;
+ bool user_subdev_api;
};
/**
@@ -351,6 +372,7 @@ struct fimc_pix_limit {
* @has_cistatus2: 1 if CISTATUS2 register is present in this IP revision
* @has_mainscaler_ext: 1 if extended mainscaler ratios in CIEXTEN register
* are present in this IP revision
+ * @has_cam_if: set if this instance has a camera input interface
* @pix_limit: pixel size constraints for the scaler
* @min_inp_pixsize: minimum input pixel size
* @min_out_pixsize: minimum output pixel size
@@ -363,6 +385,7 @@ struct samsung_fimc_variant {
unsigned int has_out_rot:1;
unsigned int has_cistatus2:1;
unsigned int has_mainscaler_ext:1;
+ unsigned int has_cam_if:1;
struct fimc_pix_limit *pix_limit;
u16 min_inp_pixsize;
u16 min_out_pixsize;
@@ -383,6 +406,12 @@ struct samsung_fimc_driverdata {
int num_entities;
};
+struct fimc_pipeline {
+ struct media_pipeline *pipe;
+ struct v4l2_subdev *sensor;
+ struct v4l2_subdev *csis;
+};
+
struct fimc_ctx;
/**
@@ -399,10 +428,12 @@ struct fimc_ctx;
* @regs_res: the resource claimed for IO registers
* @irq: FIMC interrupt number
* @irq_queue: interrupt handler waitqueue
+ * @v4l2_dev: root v4l2_device
* @m2m: memory-to-memory V4L2 device information
* @vid_cap: camera capture device information
* @state: flags used to synchronize m2m and capture mode operation
* @alloc_ctx: videobuf2 memory allocator context
+ * @pipeline: fimc video capture pipeline data structure
*/
struct fimc_dev {
spinlock_t slock;
@@ -417,10 +448,12 @@ struct fimc_dev {
struct resource *regs_res;
int irq;
wait_queue_head_t irq_queue;
+ struct v4l2_device *v4l2_dev;
struct fimc_m2m_device m2m;
struct fimc_vid_cap vid_cap;
unsigned long state;
struct vb2_alloc_ctx *alloc_ctx;
+ struct fimc_pipeline pipeline;
};
/**
@@ -437,11 +470,18 @@ struct fimc_dev {
* @scaler: image scaler properties
* @effect: image effect
* @rotation: image clockwise rotation in degrees
- * @flip: image flip mode
+ * @hflip: indicates image horizontal flip if set
+ * @vflip: indicates image vertical flip if set
* @flags: additional flags for image conversion
* @state: flags to keep track of user configuration
* @fimc_dev: the FIMC device this context applies to
* @m2m_ctx: memory-to-memory device context
+ * @fh: v4l2 file handle
+ * @ctrl_handler: v4l2 controls handler
+ * @ctrl_rotate image rotation control
+ * @ctrl_hflip horizontal flip control
+ * @ctrl_vflip vartical flip control
+ * @ctrls_rdy: true if the control handler is initialized
*/
struct fimc_ctx {
spinlock_t slock;
@@ -456,13 +496,49 @@ struct fimc_ctx {
struct fimc_scaler scaler;
struct fimc_effect effect;
int rotation;
- u32 flip;
+ unsigned int hflip:1;
+ unsigned int vflip:1;
u32 flags;
u32 state;
struct fimc_dev *fimc_dev;
struct v4l2_m2m_ctx *m2m_ctx;
+ struct v4l2_fh fh;
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct v4l2_ctrl *ctrl_rotate;
+ struct v4l2_ctrl *ctrl_hflip;
+ struct v4l2_ctrl *ctrl_vflip;
+ bool ctrls_rdy;
};
+#define fh_to_ctx(__fh) container_of(__fh, struct fimc_ctx, fh)
+
+static inline void set_frame_bounds(struct fimc_frame *f, u32 width, u32 height)
+{
+ f->o_width = width;
+ f->o_height = height;
+ f->f_width = width;
+ f->f_height = height;
+}
+
+static inline void set_frame_crop(struct fimc_frame *f,
+ u32 left, u32 top, u32 width, u32 height)
+{
+ f->offs_h = left;
+ f->offs_v = top;
+ f->width = width;
+ f->height = height;
+}
+
+static inline u32 fimc_get_format_depth(struct fimc_fmt *ff)
+{
+ u32 i, depth = 0;
+
+ if (ff != NULL)
+ for (i = 0; i < ff->colplanes; i++)
+ depth += ff->depth[i];
+ return depth;
+}
+
static inline bool fimc_capture_active(struct fimc_dev *fimc)
{
unsigned long flags;
@@ -561,7 +637,7 @@ static inline struct fimc_frame *ctx_get_frame(struct fimc_ctx *ctx,
} else if (V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE == type) {
frame = &ctx->d_frame;
} else {
- v4l2_err(&ctx->fimc_dev->m2m.v4l2_dev,
+ v4l2_err(ctx->fimc_dev->v4l2_dev,
"Wrong buffer/video queue type (%d)\n", type);
return ERR_PTR(-EINVAL);
}
@@ -595,7 +671,7 @@ void fimc_hw_en_irq(struct fimc_dev *fimc, int enable);
void fimc_hw_set_prescaler(struct fimc_ctx *ctx);
void fimc_hw_set_mainscaler(struct fimc_ctx *ctx);
void fimc_hw_en_capture(struct fimc_ctx *ctx);
-void fimc_hw_set_effect(struct fimc_ctx *ctx);
+void fimc_hw_set_effect(struct fimc_ctx *ctx, bool active);
void fimc_hw_set_in_dma(struct fimc_ctx *ctx);
void fimc_hw_set_input_path(struct fimc_ctx *ctx);
void fimc_hw_set_output_path(struct fimc_ctx *ctx);
@@ -614,36 +690,45 @@ int fimc_hw_set_camera_type(struct fimc_dev *fimc,
/* fimc-core.c */
int fimc_vidioc_enum_fmt_mplane(struct file *file, void *priv,
struct v4l2_fmtdesc *f);
-int fimc_vidioc_g_fmt_mplane(struct file *file, void *priv,
- struct v4l2_format *f);
-int fimc_vidioc_try_fmt_mplane(struct file *file, void *priv,
- struct v4l2_format *f);
-int fimc_vidioc_queryctrl(struct file *file, void *priv,
- struct v4l2_queryctrl *qc);
-int fimc_vidioc_g_ctrl(struct file *file, void *priv,
- struct v4l2_control *ctrl);
-
-int fimc_try_crop(struct fimc_ctx *ctx, struct v4l2_crop *cr);
-int check_ctrl_val(struct fimc_ctx *ctx, struct v4l2_control *ctrl);
-int fimc_s_ctrl(struct fimc_ctx *ctx, struct v4l2_control *ctrl);
-
-struct fimc_fmt *find_format(struct v4l2_format *f, unsigned int mask);
-struct fimc_fmt *find_mbus_format(struct v4l2_mbus_framefmt *f,
- unsigned int mask);
-
-int fimc_check_scaler_ratio(int sw, int sh, int dw, int dh, int rot);
+int fimc_ctrls_create(struct fimc_ctx *ctx);
+void fimc_ctrls_delete(struct fimc_ctx *ctx);
+void fimc_ctrls_activate(struct fimc_ctx *ctx, bool active);
+int fimc_fill_format(struct fimc_frame *frame, struct v4l2_format *f);
+void fimc_adjust_mplane_format(struct fimc_fmt *fmt, u32 width, u32 height,
+ struct v4l2_pix_format_mplane *pix);
+struct fimc_fmt *fimc_find_format(u32 *pixelformat, u32 *mbus_code,
+ unsigned int mask, int index);
+
+int fimc_check_scaler_ratio(struct fimc_ctx *ctx, int sw, int sh,
+ int dw, int dh, int rotation);
int fimc_set_scaler_info(struct fimc_ctx *ctx);
int fimc_prepare_config(struct fimc_ctx *ctx, u32 flags);
int fimc_prepare_addr(struct fimc_ctx *ctx, struct vb2_buffer *vb,
struct fimc_frame *frame, struct fimc_addr *paddr);
+void fimc_prepare_dma_offset(struct fimc_ctx *ctx, struct fimc_frame *f);
+void fimc_set_yuv_order(struct fimc_ctx *ctx);
+void fimc_fill_frame(struct fimc_frame *frame, struct v4l2_format *f);
+void fimc_capture_irq_handler(struct fimc_dev *fimc, bool done);
+
+int fimc_register_m2m_device(struct fimc_dev *fimc,
+ struct v4l2_device *v4l2_dev);
+void fimc_unregister_m2m_device(struct fimc_dev *fimc);
+int fimc_register_driver(void);
+void fimc_unregister_driver(void);
/* -----------------------------------------------------*/
/* fimc-capture.c */
-int fimc_register_capture_device(struct fimc_dev *fimc);
+int fimc_register_capture_device(struct fimc_dev *fimc,
+ struct v4l2_device *v4l2_dev);
void fimc_unregister_capture_device(struct fimc_dev *fimc);
-int fimc_sensor_sd_init(struct fimc_dev *fimc, int index);
+int fimc_capture_ctrls_create(struct fimc_dev *fimc);
int fimc_vid_cap_buf_queue(struct fimc_dev *fimc,
struct fimc_vid_buffer *fimc_vb);
+void fimc_sensor_notify(struct v4l2_subdev *sd, unsigned int notification,
+ void *arg);
+int fimc_capture_suspend(struct fimc_dev *fimc);
+int fimc_capture_resume(struct fimc_dev *fimc);
+int fimc_capture_config_update(struct fimc_ctx *ctx);
/* Locking: the caller holds fimc->slock */
static inline void fimc_activate_capture(struct fimc_ctx *ctx)
@@ -661,22 +746,27 @@ static inline void fimc_deactivate_capture(struct fimc_dev *fimc)
}
/*
- * Add buf to the capture active buffers queue.
- * Locking: Need to be called with fimc_dev::slock held.
+ * Buffer list manipulation functions. Must be called with fimc.slock held.
*/
-static inline void active_queue_add(struct fimc_vid_cap *vid_cap,
- struct fimc_vid_buffer *buf)
+
+/**
+ * fimc_active_queue_add - add buffer to the capture active buffers queue
+ * @buf: buffer to add to the active buffers list
+ */
+static inline void fimc_active_queue_add(struct fimc_vid_cap *vid_cap,
+ struct fimc_vid_buffer *buf)
{
list_add_tail(&buf->list, &vid_cap->active_buf_q);
vid_cap->active_buf_cnt++;
}
-/*
- * Pop a video buffer from the capture active buffers queue
- * Locking: Need to be called with fimc_dev::slock held.
+/**
+ * fimc_active_queue_pop - pop buffer from the capture active buffers queue
+ *
+ * The caller must assure the active_buf_q list is not empty.
*/
-static inline struct fimc_vid_buffer *
-active_queue_pop(struct fimc_vid_cap *vid_cap)
+static inline struct fimc_vid_buffer *fimc_active_queue_pop(
+ struct fimc_vid_cap *vid_cap)
{
struct fimc_vid_buffer *buf;
buf = list_entry(vid_cap->active_buf_q.next,
@@ -686,16 +776,23 @@ active_queue_pop(struct fimc_vid_cap *vid_cap)
return buf;
}
-/* Add video buffer to the capture pending buffers queue */
+/**
+ * fimc_pending_queue_add - add buffer to the capture pending buffers queue
+ * @buf: buffer to add to the pending buffers list
+ */
static inline void fimc_pending_queue_add(struct fimc_vid_cap *vid_cap,
struct fimc_vid_buffer *buf)
{
list_add_tail(&buf->list, &vid_cap->pending_buf_q);
}
-/* Add video buffer to the capture pending buffers queue */
-static inline struct fimc_vid_buffer *
-pending_queue_pop(struct fimc_vid_cap *vid_cap)
+/**
+ * fimc_pending_queue_pop - pop buffer from the capture pending buffers queue
+ *
+ * The caller must assure the pending_buf_q list is not empty.
+ */
+static inline struct fimc_vid_buffer *fimc_pending_queue_pop(
+ struct fimc_vid_cap *vid_cap)
{
struct fimc_vid_buffer *buf;
buf = list_entry(vid_cap->pending_buf_q.next,
diff --git a/drivers/media/video/s5p-fimc/fimc-mdevice.c b/drivers/media/video/s5p-fimc/fimc-mdevice.c
new file mode 100644
index 00000000000..cc337b1de91
--- /dev/null
+++ b/drivers/media/video/s5p-fimc/fimc-mdevice.c
@@ -0,0 +1,858 @@
+/*
+ * S5P/EXYNOS4 SoC series camera host interface media device driver
+ *
+ * Copyright (C) 2011 Samsung Electronics Co., Ltd.
+ * Contact: Sylwester Nawrocki, <s.nawrocki@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation, either version 2 of the License,
+ * or (at your option) any later version.
+ */
+
+#include <linux/bug.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/version.h>
+#include <media/v4l2-ctrls.h>
+#include <media/media-device.h>
+
+#include "fimc-core.h"
+#include "fimc-mdevice.h"
+#include "mipi-csis.h"
+
+static int __fimc_md_set_camclk(struct fimc_md *fmd,
+ struct fimc_sensor_info *s_info,
+ bool on);
+/**
+ * fimc_pipeline_prepare - update pipeline information with subdevice pointers
+ * @fimc: fimc device terminating the pipeline
+ *
+ * Caller holds the graph mutex.
+ */
+void fimc_pipeline_prepare(struct fimc_dev *fimc, struct media_entity *me)
+{
+ struct media_entity_graph graph;
+ struct v4l2_subdev *sd;
+
+ media_entity_graph_walk_start(&graph, me);
+
+ while ((me = media_entity_graph_walk_next(&graph))) {
+ if (media_entity_type(me) != MEDIA_ENT_T_V4L2_SUBDEV)
+ continue;
+ sd = media_entity_to_v4l2_subdev(me);
+
+ if (sd->grp_id == SENSOR_GROUP_ID)
+ fimc->pipeline.sensor = sd;
+ else if (sd->grp_id == CSIS_GROUP_ID)
+ fimc->pipeline.csis = sd;
+ }
+}
+
+/**
+ * __subdev_set_power - change power state of a single subdev
+ * @sd: subdevice to change power state for
+ * @on: 1 to enable power or 0 to disable
+ *
+ * Return result of s_power subdev operation or -ENXIO if sd argument
+ * is NULL. Return 0 if the subdevice does not implement s_power.
+ */
+static int __subdev_set_power(struct v4l2_subdev *sd, int on)
+{
+ int *use_count;
+ int ret;
+
+ if (sd == NULL)
+ return -ENXIO;
+
+ use_count = &sd->entity.use_count;
+ if (on && (*use_count)++ > 0)
+ return 0;
+ else if (!on && (*use_count == 0 || --(*use_count) > 0))
+ return 0;
+ ret = v4l2_subdev_call(sd, core, s_power, on);
+
+ return ret != -ENOIOCTLCMD ? ret : 0;
+}
+
+/**
+ * fimc_pipeline_s_power - change power state of all pipeline subdevs
+ * @fimc: fimc device terminating the pipeline
+ * @state: 1 to enable power or 0 for power down
+ *
+ * Need to be called with the graph mutex held.
+ */
+int fimc_pipeline_s_power(struct fimc_dev *fimc, int state)
+{
+ int ret = 0;
+
+ if (fimc->pipeline.sensor == NULL)
+ return -ENXIO;
+
+ if (state) {
+ ret = __subdev_set_power(fimc->pipeline.csis, 1);
+ if (ret && ret != -ENXIO)
+ return ret;
+ return __subdev_set_power(fimc->pipeline.sensor, 1);
+ }
+
+ ret = __subdev_set_power(fimc->pipeline.sensor, 0);
+ if (ret)
+ return ret;
+ ret = __subdev_set_power(fimc->pipeline.csis, 0);
+
+ return ret == -ENXIO ? 0 : ret;
+}
+
+/**
+ * __fimc_pipeline_initialize - update the pipeline information, enable power
+ * of all pipeline subdevs and the sensor clock
+ * @me: media entity to start graph walk with
+ * @prep: true to acquire sensor (and csis) subdevs
+ *
+ * This function must be called with the graph mutex held.
+ */
+static int __fimc_pipeline_initialize(struct fimc_dev *fimc,
+ struct media_entity *me, bool prep)
+{
+ int ret;
+
+ if (prep)
+ fimc_pipeline_prepare(fimc, me);
+ if (fimc->pipeline.sensor == NULL)
+ return -EINVAL;
+ ret = fimc_md_set_camclk(fimc->pipeline.sensor, true);
+ if (ret)
+ return ret;
+ return fimc_pipeline_s_power(fimc, 1);
+}
+
+int fimc_pipeline_initialize(struct fimc_dev *fimc, struct media_entity *me,
+ bool prep)
+{
+ int ret;
+
+ mutex_lock(&me->parent->graph_mutex);
+ ret = __fimc_pipeline_initialize(fimc, me, prep);
+ mutex_unlock(&me->parent->graph_mutex);
+
+ return ret;
+}
+
+/**
+ * __fimc_pipeline_shutdown - disable the sensor clock and pipeline power
+ * @fimc: fimc device terminating the pipeline
+ *
+ * Disable power of all subdevs in the pipeline and turn off the external
+ * sensor clock.
+ * Called with the graph mutex held.
+ */
+int __fimc_pipeline_shutdown(struct fimc_dev *fimc)
+{
+ int ret = 0;
+
+ if (fimc->pipeline.sensor) {
+ ret = fimc_pipeline_s_power(fimc, 0);
+ fimc_md_set_camclk(fimc->pipeline.sensor, false);
+ }
+ return ret == -ENXIO ? 0 : ret;
+}
+
+int fimc_pipeline_shutdown(struct fimc_dev *fimc)
+{
+ struct media_entity *me = &fimc->vid_cap.vfd->entity;
+ int ret;
+
+ mutex_lock(&me->parent->graph_mutex);
+ ret = __fimc_pipeline_shutdown(fimc);
+ mutex_unlock(&me->parent->graph_mutex);
+
+ return ret;
+}
+
+/**
+ * fimc_pipeline_s_stream - invoke s_stream on pipeline subdevs
+ * @fimc: fimc device terminating the pipeline
+ * @on: passed as the s_stream call argument
+ */
+int fimc_pipeline_s_stream(struct fimc_dev *fimc, int on)
+{
+ struct fimc_pipeline *p = &fimc->pipeline;
+ int ret = 0;
+
+ if (p->sensor == NULL)
+ return -ENODEV;
+
+ if ((on && p->csis) || !on)
+ ret = v4l2_subdev_call(on ? p->csis : p->sensor,
+ video, s_stream, on);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ return ret;
+ if ((!on && p->csis) || on)
+ ret = v4l2_subdev_call(on ? p->sensor : p->csis,
+ video, s_stream, on);
+ return ret == -ENOIOCTLCMD ? 0 : ret;
+}
+
+/*
+ * Sensor subdevice helper functions
+ */
+static struct v4l2_subdev *fimc_md_register_sensor(struct fimc_md *fmd,
+ struct fimc_sensor_info *s_info)
+{
+ struct i2c_adapter *adapter;
+ struct v4l2_subdev *sd = NULL;
+
+ if (!s_info || !fmd)
+ return NULL;
+
+ adapter = i2c_get_adapter(s_info->pdata->i2c_bus_num);
+ if (!adapter)
+ return NULL;
+ sd = v4l2_i2c_new_subdev_board(&fmd->v4l2_dev, adapter,
+ s_info->pdata->board_info, NULL);
+ if (IS_ERR_OR_NULL(sd)) {
+ v4l2_err(&fmd->v4l2_dev, "Failed to acquire subdev\n");
+ return NULL;
+ }
+ v4l2_set_subdev_hostdata(sd, s_info);
+ sd->grp_id = SENSOR_GROUP_ID;
+
+ v4l2_info(&fmd->v4l2_dev, "Registered sensor subdevice %s\n",
+ s_info->pdata->board_info->type);
+ return sd;
+}
+
+static void fimc_md_unregister_sensor(struct v4l2_subdev *sd)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ if (!client)
+ return;
+ v4l2_device_unregister_subdev(sd);
+ i2c_unregister_device(client);
+ i2c_put_adapter(client->adapter);
+}
+
+static int fimc_md_register_sensor_entities(struct fimc_md *fmd)
+{
+ struct s5p_platform_fimc *pdata = fmd->pdev->dev.platform_data;
+ struct fimc_dev *fd = NULL;
+ int num_clients, ret, i;
+
+ /*
+ * Runtime resume one of the FIMC entities to make sure
+ * the sclk_cam clocks are not globally disabled.
+ */
+ for (i = 0; !fd && i < ARRAY_SIZE(fmd->fimc); i++)
+ if (fmd->fimc[i])
+ fd = fmd->fimc[i];
+ if (!fd)
+ return -ENXIO;
+ ret = pm_runtime_get_sync(&fd->pdev->dev);
+ if (ret < 0)
+ return ret;
+
+ WARN_ON(pdata->num_clients > ARRAY_SIZE(fmd->sensor));
+ num_clients = min_t(u32, pdata->num_clients, ARRAY_SIZE(fmd->sensor));
+
+ fmd->num_sensors = num_clients;
+ for (i = 0; i < num_clients; i++) {
+ fmd->sensor[i].pdata = &pdata->isp_info[i];
+ ret = __fimc_md_set_camclk(fmd, &fmd->sensor[i], true);
+ if (ret)
+ break;
+ fmd->sensor[i].subdev =
+ fimc_md_register_sensor(fmd, &fmd->sensor[i]);
+ ret = __fimc_md_set_camclk(fmd, &fmd->sensor[i], false);
+ if (ret)
+ break;
+ }
+ pm_runtime_put(&fd->pdev->dev);
+ return ret;
+}
+
+/*
+ * MIPI CSIS and FIMC platform devices registration.
+ */
+static int fimc_register_callback(struct device *dev, void *p)
+{
+ struct fimc_dev *fimc = dev_get_drvdata(dev);
+ struct fimc_md *fmd = p;
+ int ret;
+
+ if (!fimc || !fimc->pdev)
+ return 0;
+ if (fimc->pdev->id < 0 || fimc->pdev->id >= FIMC_MAX_DEVS)
+ return 0;
+
+ fmd->fimc[fimc->pdev->id] = fimc;
+ ret = fimc_register_m2m_device(fimc, &fmd->v4l2_dev);
+ if (ret)
+ return ret;
+ ret = fimc_register_capture_device(fimc, &fmd->v4l2_dev);
+ if (!ret)
+ fimc->vid_cap.user_subdev_api = fmd->user_subdev_api;
+ return ret;
+}
+
+static int csis_register_callback(struct device *dev, void *p)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct platform_device *pdev;
+ struct fimc_md *fmd = p;
+ int id, ret;
+
+ if (!sd)
+ return 0;
+ pdev = v4l2_get_subdevdata(sd);
+ if (!pdev || pdev->id < 0 || pdev->id >= CSIS_MAX_ENTITIES)
+ return 0;
+ v4l2_info(sd, "csis%d sd: %s\n", pdev->id, sd->name);
+
+ id = pdev->id < 0 ? 0 : pdev->id;
+ fmd->csis[id].sd = sd;
+ sd->grp_id = CSIS_GROUP_ID;
+ ret = v4l2_device_register_subdev(&fmd->v4l2_dev, sd);
+ if (ret)
+ v4l2_err(&fmd->v4l2_dev,
+ "Failed to register CSIS subdevice: %d\n", ret);
+ return ret;
+}
+
+/**
+ * fimc_md_register_platform_entities - register FIMC and CSIS media entities
+ */
+static int fimc_md_register_platform_entities(struct fimc_md *fmd)
+{
+ struct device_driver *driver;
+ int ret;
+
+ driver = driver_find(FIMC_MODULE_NAME, &platform_bus_type);
+ if (!driver)
+ return -ENODEV;
+ ret = driver_for_each_device(driver, NULL, fmd,
+ fimc_register_callback);
+ put_driver(driver);
+ if (ret)
+ return ret;
+
+ driver = driver_find(CSIS_DRIVER_NAME, &platform_bus_type);
+ if (driver) {
+ ret = driver_for_each_device(driver, NULL, fmd,
+ csis_register_callback);
+ put_driver(driver);
+ }
+ return ret;
+}
+
+static void fimc_md_unregister_entities(struct fimc_md *fmd)
+{
+ int i;
+
+ for (i = 0; i < FIMC_MAX_DEVS; i++) {
+ if (fmd->fimc[i] == NULL)
+ continue;
+ fimc_unregister_m2m_device(fmd->fimc[i]);
+ fimc_unregister_capture_device(fmd->fimc[i]);
+ fmd->fimc[i] = NULL;
+ }
+ for (i = 0; i < CSIS_MAX_ENTITIES; i++) {
+ if (fmd->csis[i].sd == NULL)
+ continue;
+ v4l2_device_unregister_subdev(fmd->csis[i].sd);
+ fmd->csis[i].sd = NULL;
+ }
+ for (i = 0; i < fmd->num_sensors; i++) {
+ if (fmd->sensor[i].subdev == NULL)
+ continue;
+ fimc_md_unregister_sensor(fmd->sensor[i].subdev);
+ fmd->sensor[i].subdev = NULL;
+ }
+}
+
+static int fimc_md_register_video_nodes(struct fimc_md *fmd)
+{
+ int i, ret = 0;
+
+ for (i = 0; i < FIMC_MAX_DEVS && !ret; i++) {
+ if (!fmd->fimc[i])
+ continue;
+
+ if (fmd->fimc[i]->m2m.vfd)
+ ret = video_register_device(fmd->fimc[i]->m2m.vfd,
+ VFL_TYPE_GRABBER, -1);
+ if (ret)
+ break;
+ if (fmd->fimc[i]->vid_cap.vfd)
+ ret = video_register_device(fmd->fimc[i]->vid_cap.vfd,
+ VFL_TYPE_GRABBER, -1);
+ }
+
+ return ret;
+}
+
+/**
+ * __fimc_md_create_fimc_links - create links to all FIMC entities
+ * @fmd: fimc media device
+ * @source: the source entity to create links to all fimc entities from
+ * @sensor: sensor subdev linked to FIMC[fimc_id] entity, may be null
+ * @pad: the source entity pad index
+ * @fimc_id: index of the fimc device for which link should be enabled
+ */
+static int __fimc_md_create_fimc_links(struct fimc_md *fmd,
+ struct media_entity *source,
+ struct v4l2_subdev *sensor,
+ int pad, int fimc_id)
+{
+ struct fimc_sensor_info *s_info;
+ struct media_entity *sink;
+ unsigned int flags;
+ int ret, i;
+
+ for (i = 0; i < FIMC_MAX_DEVS; i++) {
+ if (!fmd->fimc[i])
+ break;
+ /*
+ * Some FIMC variants are not fitted with camera capture
+ * interface. Skip creating a link from sensor for those.
+ */
+ if (sensor->grp_id == SENSOR_GROUP_ID &&
+ !fmd->fimc[i]->variant->has_cam_if)
+ continue;
+
+ flags = (i == fimc_id) ? MEDIA_LNK_FL_ENABLED : 0;
+ sink = &fmd->fimc[i]->vid_cap.subdev->entity;
+ ret = media_entity_create_link(source, pad, sink,
+ FIMC_SD_PAD_SINK, flags);
+ if (ret)
+ return ret;
+
+ /* Notify FIMC capture subdev entity */
+ ret = media_entity_call(sink, link_setup, &sink->pads[0],
+ &source->pads[pad], flags);
+ if (ret)
+ break;
+
+ v4l2_info(&fmd->v4l2_dev, "created link [%s] %c> [%s]",
+ source->name, flags ? '=' : '-', sink->name);
+
+ if (flags == 0)
+ continue;
+ s_info = v4l2_get_subdev_hostdata(sensor);
+ if (!WARN_ON(s_info == NULL)) {
+ unsigned long irq_flags;
+ spin_lock_irqsave(&fmd->slock, irq_flags);
+ s_info->host = fmd->fimc[i];
+ spin_unlock_irqrestore(&fmd->slock, irq_flags);
+ }
+ }
+ return 0;
+}
+
+/**
+ * fimc_md_create_links - create default links between registered entities
+ *
+ * Parallel interface sensor entities are connected directly to FIMC capture
+ * entities. The sensors using MIPI CSIS bus are connected through immutable
+ * link with CSI receiver entity specified by mux_id. Any registered CSIS
+ * entity has a link to each registered FIMC capture entity. Enabled links
+ * are created by default between each subsequent registered sensor and
+ * subsequent FIMC capture entity. The number of default active links is
+ * determined by the number of available sensors or FIMC entities,
+ * whichever is less.
+ */
+static int fimc_md_create_links(struct fimc_md *fmd)
+{
+ struct v4l2_subdev *sensor, *csis;
+ struct s5p_fimc_isp_info *pdata;
+ struct fimc_sensor_info *s_info;
+ struct media_entity *source, *sink;
+ int i, pad, fimc_id = 0;
+ int ret = 0;
+ u32 flags;
+
+ for (i = 0; i < fmd->num_sensors; i++) {
+ if (fmd->sensor[i].subdev == NULL)
+ continue;
+
+ sensor = fmd->sensor[i].subdev;
+ s_info = v4l2_get_subdev_hostdata(sensor);
+ if (!s_info || !s_info->pdata)
+ continue;
+
+ source = NULL;
+ pdata = s_info->pdata;
+
+ switch (pdata->bus_type) {
+ case FIMC_MIPI_CSI2:
+ if (WARN(pdata->mux_id >= CSIS_MAX_ENTITIES,
+ "Wrong CSI channel id: %d\n", pdata->mux_id))
+ return -EINVAL;
+
+ csis = fmd->csis[pdata->mux_id].sd;
+ if (WARN(csis == NULL,
+ "MIPI-CSI interface specified "
+ "but s5p-csis module is not loaded!\n"))
+ continue;
+
+ ret = media_entity_create_link(&sensor->entity, 0,
+ &csis->entity, CSIS_PAD_SINK,
+ MEDIA_LNK_FL_IMMUTABLE |
+ MEDIA_LNK_FL_ENABLED);
+ if (ret)
+ return ret;
+
+ v4l2_info(&fmd->v4l2_dev, "created link [%s] => [%s]",
+ sensor->entity.name, csis->entity.name);
+
+ source = &csis->entity;
+ pad = CSIS_PAD_SOURCE;
+ break;
+
+ case FIMC_ITU_601...FIMC_ITU_656:
+ source = &sensor->entity;
+ pad = 0;
+ break;
+
+ default:
+ v4l2_err(&fmd->v4l2_dev, "Wrong bus_type: %x\n",
+ pdata->bus_type);
+ return -EINVAL;
+ }
+ if (source == NULL)
+ continue;
+
+ ret = __fimc_md_create_fimc_links(fmd, source, sensor, pad,
+ fimc_id++);
+ }
+ /* Create immutable links between each FIMC's subdev and video node */
+ flags = MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED;
+ for (i = 0; i < FIMC_MAX_DEVS; i++) {
+ if (!fmd->fimc[i])
+ continue;
+ source = &fmd->fimc[i]->vid_cap.subdev->entity;
+ sink = &fmd->fimc[i]->vid_cap.vfd->entity;
+ ret = media_entity_create_link(source, FIMC_SD_PAD_SOURCE,
+ sink, 0, flags);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+/*
+ * The peripheral sensor clock management.
+ */
+static int fimc_md_get_clocks(struct fimc_md *fmd)
+{
+ char clk_name[32];
+ struct clk *clock;
+ int i;
+
+ for (i = 0; i < FIMC_MAX_CAMCLKS; i++) {
+ snprintf(clk_name, sizeof(clk_name), "sclk_cam%u", i);
+ clock = clk_get(NULL, clk_name);
+ if (IS_ERR_OR_NULL(clock)) {
+ v4l2_err(&fmd->v4l2_dev, "Failed to get clock: %s",
+ clk_name);
+ return -ENXIO;
+ }
+ fmd->camclk[i].clock = clock;
+ }
+ return 0;
+}
+
+static void fimc_md_put_clocks(struct fimc_md *fmd)
+{
+ int i = FIMC_MAX_CAMCLKS;
+
+ while (--i >= 0) {
+ if (IS_ERR_OR_NULL(fmd->camclk[i].clock))
+ continue;
+ clk_put(fmd->camclk[i].clock);
+ fmd->camclk[i].clock = NULL;
+ }
+}
+
+static int __fimc_md_set_camclk(struct fimc_md *fmd,
+ struct fimc_sensor_info *s_info,
+ bool on)
+{
+ struct s5p_fimc_isp_info *pdata = s_info->pdata;
+ struct fimc_camclk_info *camclk;
+ int ret = 0;
+
+ if (WARN_ON(pdata->clk_id >= FIMC_MAX_CAMCLKS) || fmd == NULL)
+ return -EINVAL;
+
+ if (s_info->clk_on == on)
+ return 0;
+ camclk = &fmd->camclk[pdata->clk_id];
+
+ dbg("camclk %d, f: %lu, clk: %p, on: %d",
+ pdata->clk_id, pdata->clk_frequency, camclk, on);
+
+ if (on) {
+ if (camclk->use_count > 0 &&
+ camclk->frequency != pdata->clk_frequency)
+ return -EINVAL;
+
+ if (camclk->use_count++ == 0) {
+ clk_set_rate(camclk->clock, pdata->clk_frequency);
+ camclk->frequency = pdata->clk_frequency;
+ ret = clk_enable(camclk->clock);
+ }
+ s_info->clk_on = 1;
+ dbg("Enabled camclk %d: f: %lu", pdata->clk_id,
+ clk_get_rate(camclk->clock));
+
+ return ret;
+ }
+
+ if (WARN_ON(camclk->use_count == 0))
+ return 0;
+
+ if (--camclk->use_count == 0) {
+ clk_disable(camclk->clock);
+ s_info->clk_on = 0;
+ dbg("Disabled camclk %d", pdata->clk_id);
+ }
+ return ret;
+}
+
+/**
+ * fimc_md_set_camclk - peripheral sensor clock setup
+ * @sd: sensor subdev to configure sclk_cam clock for
+ * @on: 1 to enable or 0 to disable the clock
+ *
+ * There are 2 separate clock outputs available in the SoC for external
+ * image processors. These clocks are shared between all registered FIMC
+ * devices to which sensors can be attached, either directly or through
+ * the MIPI CSI receiver. The clock is allowed here to be used by
+ * multiple sensors concurrently if they use same frequency.
+ * The per sensor subdev clk_on attribute helps to synchronize accesses
+ * to the sclk_cam clocks from the video and media device nodes.
+ * This function should only be called when the graph mutex is held.
+ */
+int fimc_md_set_camclk(struct v4l2_subdev *sd, bool on)
+{
+ struct fimc_sensor_info *s_info = v4l2_get_subdev_hostdata(sd);
+ struct fimc_md *fmd = entity_to_fimc_mdev(&sd->entity);
+
+ return __fimc_md_set_camclk(fmd, s_info, on);
+}
+
+static int fimc_md_link_notify(struct media_pad *source,
+ struct media_pad *sink, u32 flags)
+{
+ struct v4l2_subdev *sd;
+ struct fimc_dev *fimc;
+ int ret = 0;
+
+ if (media_entity_type(sink->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
+ return 0;
+
+ sd = media_entity_to_v4l2_subdev(sink->entity);
+ fimc = v4l2_get_subdevdata(sd);
+
+ if (!(flags & MEDIA_LNK_FL_ENABLED)) {
+ ret = __fimc_pipeline_shutdown(fimc);
+ fimc->pipeline.sensor = NULL;
+ fimc->pipeline.csis = NULL;
+
+ mutex_lock(&fimc->lock);
+ fimc_ctrls_delete(fimc->vid_cap.ctx);
+ mutex_unlock(&fimc->lock);
+ return ret;
+ }
+ /*
+ * Link activation. Enable power of pipeline elements only if the
+ * pipeline is already in use, i.e. its video node is opened.
+ * Recreate the controls destroyed during the link deactivation.
+ */
+ mutex_lock(&fimc->lock);
+ if (fimc->vid_cap.refcnt > 0) {
+ ret = __fimc_pipeline_initialize(fimc, source->entity, true);
+ if (!ret)
+ ret = fimc_capture_ctrls_create(fimc);
+ }
+ mutex_unlock(&fimc->lock);
+
+ return ret ? -EPIPE : ret;
+}
+
+static ssize_t fimc_md_sysfs_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct fimc_md *fmd = platform_get_drvdata(pdev);
+
+ if (fmd->user_subdev_api)
+ return strlcpy(buf, "Sub-device API (sub-dev)\n", PAGE_SIZE);
+
+ return strlcpy(buf, "V4L2 video node only API (vid-dev)\n", PAGE_SIZE);
+}
+
+static ssize_t fimc_md_sysfs_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct fimc_md *fmd = platform_get_drvdata(pdev);
+ bool subdev_api;
+ int i;
+
+ if (!strcmp(buf, "vid-dev\n"))
+ subdev_api = false;
+ else if (!strcmp(buf, "sub-dev\n"))
+ subdev_api = true;
+ else
+ return count;
+
+ fmd->user_subdev_api = subdev_api;
+ for (i = 0; i < FIMC_MAX_DEVS; i++)
+ if (fmd->fimc[i])
+ fmd->fimc[i]->vid_cap.user_subdev_api = subdev_api;
+ return count;
+}
+/*
+ * This device attribute is to select video pipeline configuration method.
+ * There are following valid values:
+ * vid-dev - for V4L2 video node API only, subdevice will be configured
+ * by the host driver.
+ * sub-dev - for media controller API, subdevs must be configured in user
+ * space before starting streaming.
+ */
+static DEVICE_ATTR(subdev_conf_mode, S_IWUSR | S_IRUGO,
+ fimc_md_sysfs_show, fimc_md_sysfs_store);
+
+static int __devinit fimc_md_probe(struct platform_device *pdev)
+{
+ struct v4l2_device *v4l2_dev;
+ struct fimc_md *fmd;
+ int ret;
+
+ if (WARN(!pdev->dev.platform_data, "Platform data not specified!\n"))
+ return -EINVAL;
+
+ fmd = kzalloc(sizeof(struct fimc_md), GFP_KERNEL);
+ if (!fmd)
+ return -ENOMEM;
+
+ spin_lock_init(&fmd->slock);
+ fmd->pdev = pdev;
+
+ strlcpy(fmd->media_dev.model, "SAMSUNG S5P FIMC",
+ sizeof(fmd->media_dev.model));
+ fmd->media_dev.link_notify = fimc_md_link_notify;
+ fmd->media_dev.dev = &pdev->dev;
+
+ v4l2_dev = &fmd->v4l2_dev;
+ v4l2_dev->mdev = &fmd->media_dev;
+ v4l2_dev->notify = fimc_sensor_notify;
+ snprintf(v4l2_dev->name, sizeof(v4l2_dev->name), "%s",
+ dev_name(&pdev->dev));
+
+ ret = v4l2_device_register(&pdev->dev, &fmd->v4l2_dev);
+ if (ret < 0) {
+ v4l2_err(v4l2_dev, "Failed to register v4l2_device: %d\n", ret);
+ goto err1;
+ }
+ ret = media_device_register(&fmd->media_dev);
+ if (ret < 0) {
+ v4l2_err(v4l2_dev, "Failed to register media device: %d\n", ret);
+ goto err2;
+ }
+ ret = fimc_md_get_clocks(fmd);
+ if (ret)
+ goto err3;
+
+ fmd->user_subdev_api = false;
+ ret = fimc_md_register_platform_entities(fmd);
+ if (ret)
+ goto err3;
+
+ ret = fimc_md_register_sensor_entities(fmd);
+ if (ret)
+ goto err3;
+ ret = fimc_md_create_links(fmd);
+ if (ret)
+ goto err3;
+ ret = v4l2_device_register_subdev_nodes(&fmd->v4l2_dev);
+ if (ret)
+ goto err3;
+ ret = fimc_md_register_video_nodes(fmd);
+ if (ret)
+ goto err3;
+
+ ret = device_create_file(&pdev->dev, &dev_attr_subdev_conf_mode);
+ if (!ret) {
+ platform_set_drvdata(pdev, fmd);
+ return 0;
+ }
+err3:
+ media_device_unregister(&fmd->media_dev);
+ fimc_md_put_clocks(fmd);
+ fimc_md_unregister_entities(fmd);
+err2:
+ v4l2_device_unregister(&fmd->v4l2_dev);
+err1:
+ kfree(fmd);
+ return ret;
+}
+
+static int __devexit fimc_md_remove(struct platform_device *pdev)
+{
+ struct fimc_md *fmd = platform_get_drvdata(pdev);
+
+ if (!fmd)
+ return 0;
+ device_remove_file(&pdev->dev, &dev_attr_subdev_conf_mode);
+ fimc_md_unregister_entities(fmd);
+ media_device_unregister(&fmd->media_dev);
+ fimc_md_put_clocks(fmd);
+ kfree(fmd);
+ return 0;
+}
+
+static struct platform_driver fimc_md_driver = {
+ .probe = fimc_md_probe,
+ .remove = __devexit_p(fimc_md_remove),
+ .driver = {
+ .name = "s5p-fimc-md",
+ .owner = THIS_MODULE,
+ }
+};
+
+int __init fimc_md_init(void)
+{
+ int ret;
+ request_module("s5p-csis");
+ ret = fimc_register_driver();
+ if (ret)
+ return ret;
+ return platform_driver_register(&fimc_md_driver);
+}
+void __exit fimc_md_exit(void)
+{
+ platform_driver_unregister(&fimc_md_driver);
+ fimc_unregister_driver();
+}
+
+module_init(fimc_md_init);
+module_exit(fimc_md_exit);
+
+MODULE_AUTHOR("Sylwester Nawrocki <s.nawrocki@samsung.com>");
+MODULE_DESCRIPTION("S5P FIMC camera host interface/video postprocessor driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("2.0.1");
diff --git a/drivers/media/video/s5p-fimc/fimc-mdevice.h b/drivers/media/video/s5p-fimc/fimc-mdevice.h
new file mode 100644
index 00000000000..da3780823e7
--- /dev/null
+++ b/drivers/media/video/s5p-fimc/fimc-mdevice.h
@@ -0,0 +1,118 @@
+/*
+ * Copyright (C) 2011 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef FIMC_MDEVICE_H_
+#define FIMC_MDEVICE_H_
+
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/mutex.h>
+#include <media/media-device.h>
+#include <media/media-entity.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+
+#include "fimc-core.h"
+#include "mipi-csis.h"
+
+/* Group IDs of sensor, MIPI CSIS and the writeback subdevs. */
+#define SENSOR_GROUP_ID (1 << 8)
+#define CSIS_GROUP_ID (1 << 9)
+#define WRITEBACK_GROUP_ID (1 << 10)
+
+#define FIMC_MAX_SENSORS 8
+#define FIMC_MAX_CAMCLKS 2
+
+struct fimc_csis_info {
+ struct v4l2_subdev *sd;
+ int id;
+};
+
+struct fimc_camclk_info {
+ struct clk *clock;
+ int use_count;
+ unsigned long frequency;
+};
+
+/**
+ * struct fimc_sensor_info - image data source subdev information
+ * @pdata: sensor's atrributes passed as media device's platform data
+ * @subdev: image sensor v4l2 subdev
+ * @host: fimc device the sensor is currently linked to
+ * @clk_on: sclk_cam clock's state associated with this subdev
+ *
+ * This data structure applies to image sensor and the writeback subdevs.
+ */
+struct fimc_sensor_info {
+ struct s5p_fimc_isp_info *pdata;
+ struct v4l2_subdev *subdev;
+ struct fimc_dev *host;
+ bool clk_on;
+};
+
+/**
+ * struct fimc_md - fimc media device information
+ * @csis: MIPI CSIS subdevs data
+ * @sensor: array of registered sensor subdevs
+ * @num_sensors: actual number of registered sensors
+ * @camclk: external sensor clock information
+ * @fimc: array of registered fimc devices
+ * @media_dev: top level media device
+ * @v4l2_dev: top level v4l2_device holding up the subdevs
+ * @pdev: platform device this media device is hooked up into
+ * @user_subdev_api: true if subdevs are not configured by the host driver
+ * @slock: spinlock protecting @sensor array
+ */
+struct fimc_md {
+ struct fimc_csis_info csis[CSIS_MAX_ENTITIES];
+ struct fimc_sensor_info sensor[FIMC_MAX_SENSORS];
+ int num_sensors;
+ struct fimc_camclk_info camclk[FIMC_MAX_CAMCLKS];
+ struct fimc_dev *fimc[FIMC_MAX_DEVS];
+ struct media_device media_dev;
+ struct v4l2_device v4l2_dev;
+ struct platform_device *pdev;
+ bool user_subdev_api;
+ spinlock_t slock;
+};
+
+#define is_subdev_pad(pad) (pad == NULL || \
+ media_entity_type(pad->entity) == MEDIA_ENT_T_V4L2_SUBDEV)
+
+#define me_subtype(me) \
+ ((me->type) & (MEDIA_ENT_TYPE_MASK | MEDIA_ENT_SUBTYPE_MASK))
+
+#define subdev_has_devnode(__sd) (__sd->flags & V4L2_SUBDEV_FL_HAS_DEVNODE)
+
+static inline struct fimc_md *entity_to_fimc_mdev(struct media_entity *me)
+{
+ return me->parent == NULL ? NULL :
+ container_of(me->parent, struct fimc_md, media_dev);
+}
+
+static inline void fimc_md_graph_lock(struct fimc_dev *fimc)
+{
+ BUG_ON(fimc->vid_cap.vfd == NULL);
+ mutex_lock(&fimc->vid_cap.vfd->entity.parent->graph_mutex);
+}
+
+static inline void fimc_md_graph_unlock(struct fimc_dev *fimc)
+{
+ BUG_ON(fimc->vid_cap.vfd == NULL);
+ mutex_unlock(&fimc->vid_cap.vfd->entity.parent->graph_mutex);
+}
+
+int fimc_md_set_camclk(struct v4l2_subdev *sd, bool on);
+void fimc_pipeline_prepare(struct fimc_dev *fimc, struct media_entity *me);
+int fimc_pipeline_initialize(struct fimc_dev *fimc, struct media_entity *me,
+ bool resume);
+int fimc_pipeline_shutdown(struct fimc_dev *fimc);
+int fimc_pipeline_s_power(struct fimc_dev *fimc, int state);
+int fimc_pipeline_s_stream(struct fimc_dev *fimc, int state);
+
+#endif
diff --git a/drivers/media/video/s5p-fimc/fimc-reg.c b/drivers/media/video/s5p-fimc/fimc-reg.c
index 4893b2d91d8..20e664e3416 100644
--- a/drivers/media/video/s5p-fimc/fimc-reg.c
+++ b/drivers/media/video/s5p-fimc/fimc-reg.c
@@ -30,7 +30,7 @@ void fimc_hw_reset(struct fimc_dev *dev)
cfg = readl(dev->regs + S5P_CIGCTRL);
cfg |= (S5P_CIGCTRL_SWRST | S5P_CIGCTRL_IRQ_LEVEL);
writel(cfg, dev->regs + S5P_CIGCTRL);
- udelay(1000);
+ udelay(10);
cfg = readl(dev->regs + S5P_CIGCTRL);
cfg &= ~S5P_CIGCTRL_SWRST;
@@ -41,19 +41,11 @@ static u32 fimc_hw_get_in_flip(struct fimc_ctx *ctx)
{
u32 flip = S5P_MSCTRL_FLIP_NORMAL;
- switch (ctx->flip) {
- case FLIP_X_AXIS:
+ if (ctx->hflip)
flip = S5P_MSCTRL_FLIP_X_MIRROR;
- break;
- case FLIP_Y_AXIS:
+ if (ctx->vflip)
flip = S5P_MSCTRL_FLIP_Y_MIRROR;
- break;
- case FLIP_XY_AXIS:
- flip = S5P_MSCTRL_FLIP_180;
- break;
- default:
- break;
- }
+
if (ctx->rotation <= 90)
return flip;
@@ -64,19 +56,11 @@ static u32 fimc_hw_get_target_flip(struct fimc_ctx *ctx)
{
u32 flip = S5P_CITRGFMT_FLIP_NORMAL;
- switch (ctx->flip) {
- case FLIP_X_AXIS:
- flip = S5P_CITRGFMT_FLIP_X_MIRROR;
- break;
- case FLIP_Y_AXIS:
- flip = S5P_CITRGFMT_FLIP_Y_MIRROR;
- break;
- case FLIP_XY_AXIS:
- flip = S5P_CITRGFMT_FLIP_180;
- break;
- default:
- break;
- }
+ if (ctx->hflip)
+ flip |= S5P_CITRGFMT_FLIP_X_MIRROR;
+ if (ctx->vflip)
+ flip |= S5P_CITRGFMT_FLIP_Y_MIRROR;
+
if (ctx->rotation <= 90)
return flip;
@@ -368,17 +352,19 @@ void fimc_hw_en_capture(struct fimc_ctx *ctx)
writel(cfg | S5P_CIIMGCPT_IMGCPTEN, dev->regs + S5P_CIIMGCPT);
}
-void fimc_hw_set_effect(struct fimc_ctx *ctx)
+void fimc_hw_set_effect(struct fimc_ctx *ctx, bool active)
{
struct fimc_dev *dev = ctx->fimc_dev;
struct fimc_effect *effect = &ctx->effect;
- u32 cfg = (S5P_CIIMGEFF_IE_ENABLE | S5P_CIIMGEFF_IE_SC_AFTER);
-
- cfg |= effect->type;
+ u32 cfg = 0;
- if (effect->type == S5P_FIMC_EFFECT_ARBITRARY) {
- cfg |= S5P_CIIMGEFF_PAT_CB(effect->pat_cb);
- cfg |= S5P_CIIMGEFF_PAT_CR(effect->pat_cr);
+ if (active) {
+ cfg |= S5P_CIIMGEFF_IE_SC_AFTER | S5P_CIIMGEFF_IE_ENABLE;
+ cfg |= effect->type;
+ if (effect->type == S5P_FIMC_EFFECT_ARBITRARY) {
+ cfg |= S5P_CIIMGEFF_PAT_CB(effect->pat_cb);
+ cfg |= S5P_CIIMGEFF_PAT_CR(effect->pat_cr);
+ }
}
writel(cfg, dev->regs + S5P_CIIMGEFF);
@@ -547,20 +533,24 @@ int fimc_hw_set_camera_polarity(struct fimc_dev *fimc,
u32 cfg = readl(fimc->regs + S5P_CIGCTRL);
cfg &= ~(S5P_CIGCTRL_INVPOLPCLK | S5P_CIGCTRL_INVPOLVSYNC |
- S5P_CIGCTRL_INVPOLHREF | S5P_CIGCTRL_INVPOLHSYNC);
+ S5P_CIGCTRL_INVPOLHREF | S5P_CIGCTRL_INVPOLHSYNC |
+ S5P_CIGCTRL_INVPOLFIELD);
- if (cam->flags & FIMC_CLK_INV_PCLK)
+ if (cam->flags & V4L2_MBUS_PCLK_SAMPLE_FALLING)
cfg |= S5P_CIGCTRL_INVPOLPCLK;
- if (cam->flags & FIMC_CLK_INV_VSYNC)
+ if (cam->flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)
cfg |= S5P_CIGCTRL_INVPOLVSYNC;
- if (cam->flags & FIMC_CLK_INV_HREF)
+ if (cam->flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)
cfg |= S5P_CIGCTRL_INVPOLHREF;
- if (cam->flags & FIMC_CLK_INV_HSYNC)
+ if (cam->flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)
cfg |= S5P_CIGCTRL_INVPOLHSYNC;
+ if (cam->flags & V4L2_MBUS_FIELD_EVEN_LOW)
+ cfg |= S5P_CIGCTRL_INVPOLFIELD;
+
writel(cfg, fimc->regs + S5P_CIGCTRL);
return 0;
@@ -588,7 +578,7 @@ int fimc_hw_set_camera_source(struct fimc_dev *fimc,
if (cam->bus_type == FIMC_ITU_601 || cam->bus_type == FIMC_ITU_656) {
for (i = 0; i < ARRAY_SIZE(pix_desc); i++) {
- if (fimc->vid_cap.fmt.code == pix_desc[i].pixelcode) {
+ if (fimc->vid_cap.mf.code == pix_desc[i].pixelcode) {
cfg = pix_desc[i].cisrcfmt;
bus_width = pix_desc[i].bus_width;
break;
@@ -596,9 +586,9 @@ int fimc_hw_set_camera_source(struct fimc_dev *fimc,
}
if (i == ARRAY_SIZE(pix_desc)) {
- v4l2_err(&fimc->vid_cap.v4l2_dev,
+ v4l2_err(fimc->vid_cap.vfd,
"Camera color format not supported: %d\n",
- fimc->vid_cap.fmt.code);
+ fimc->vid_cap.mf.code);
return -EINVAL;
}
@@ -608,6 +598,9 @@ int fimc_hw_set_camera_source(struct fimc_dev *fimc,
else if (bus_width == 16)
cfg |= S5P_CISRCFMT_ITU601_16BIT;
} /* else defaults to ITU-R BT.656 8-bit */
+ } else if (cam->bus_type == FIMC_MIPI_CSI2) {
+ if (fimc_fmt_is_jpeg(f->fmt->color))
+ cfg |= S5P_CISRCFMT_ITU601_8BIT;
}
cfg |= S5P_CISRCFMT_HSIZE(f->o_width) | S5P_CISRCFMT_VSIZE(f->o_height);
@@ -649,7 +642,7 @@ int fimc_hw_set_camera_type(struct fimc_dev *fimc,
/* Select ITU B interface, disable Writeback path and test pattern. */
cfg &= ~(S5P_CIGCTRL_TESTPAT_MASK | S5P_CIGCTRL_SELCAM_ITU_A |
S5P_CIGCTRL_SELCAM_MIPI | S5P_CIGCTRL_CAMIF_SELWB |
- S5P_CIGCTRL_SELCAM_MIPI_A);
+ S5P_CIGCTRL_SELCAM_MIPI_A | S5P_CIGCTRL_CAM_JPEG);
if (cam->bus_type == FIMC_MIPI_CSI2) {
cfg |= S5P_CIGCTRL_SELCAM_MIPI;
@@ -658,11 +651,18 @@ int fimc_hw_set_camera_type(struct fimc_dev *fimc,
cfg |= S5P_CIGCTRL_SELCAM_MIPI_A;
/* TODO: add remaining supported formats. */
- if (vid_cap->fmt.code == V4L2_MBUS_FMT_VYUY8_2X8) {
+ switch (vid_cap->mf.code) {
+ case V4L2_MBUS_FMT_VYUY8_2X8:
tmp = S5P_CSIIMGFMT_YCBCR422_8BIT;
- } else {
- err("camera image format not supported: %d",
- vid_cap->fmt.code);
+ break;
+ case V4L2_MBUS_FMT_JPEG_1X8:
+ tmp = S5P_CSIIMGFMT_USER(1);
+ cfg |= S5P_CIGCTRL_CAM_JPEG;
+ break;
+ default:
+ v4l2_err(fimc->vid_cap.vfd,
+ "Not supported camera pixel format: %d",
+ vid_cap->mf.code);
return -EINVAL;
}
tmp |= (cam->csi_data_align == 32) << 8;
diff --git a/drivers/media/video/s5p-fimc/mipi-csis.c b/drivers/media/video/s5p-fimc/mipi-csis.c
index ef056d6605c..59d79bc2f58 100644
--- a/drivers/media/video/s5p-fimc/mipi-csis.c
+++ b/drivers/media/video/s5p-fimc/mipi-csis.c
@@ -81,6 +81,12 @@ static char *csi_clock_name[] = {
};
#define NUM_CSIS_CLOCKS ARRAY_SIZE(csi_clock_name)
+static const char * const csis_supply_name[] = {
+ "vdd11", /* 1.1V or 1.2V (s5pc100) MIPI CSI suppply */
+ "vdd18", /* VDD 1.8V and MIPI CSI PLL supply */
+};
+#define CSIS_NUM_SUPPLIES ARRAY_SIZE(csis_supply_name)
+
enum {
ST_POWERED = 1,
ST_STREAMING = 2,
@@ -109,9 +115,9 @@ struct csis_state {
struct platform_device *pdev;
struct resource *regs_res;
void __iomem *regs;
+ struct regulator_bulk_data supplies[CSIS_NUM_SUPPLIES];
struct clk *clock[NUM_CSIS_CLOCKS];
int irq;
- struct regulator *supply;
u32 flags;
const struct csis_pix_format *csis_fmt;
struct v4l2_mbus_framefmt format;
@@ -460,6 +466,7 @@ static int __devinit s5pcsis_probe(struct platform_device *pdev)
struct resource *regs_res;
struct csis_state *state;
int ret = -ENOMEM;
+ int i;
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (!state)
@@ -519,14 +526,13 @@ static int __devinit s5pcsis_probe(struct platform_device *pdev)
goto e_clkput;
}
- if (!pdata->fixed_phy_vdd) {
- state->supply = regulator_get(&pdev->dev, "vdd");
- if (IS_ERR(state->supply)) {
- ret = PTR_ERR(state->supply);
- state->supply = NULL;
- goto e_clkput;
- }
- }
+ for (i = 0; i < CSIS_NUM_SUPPLIES; i++)
+ state->supplies[i].supply = csis_supply_name[i];
+
+ ret = regulator_bulk_get(&pdev->dev, CSIS_NUM_SUPPLIES,
+ state->supplies);
+ if (ret)
+ goto e_clkput;
ret = request_irq(state->irq, s5pcsis_irq_handler, 0,
dev_name(&pdev->dev), state);
@@ -553,7 +559,6 @@ static int __devinit s5pcsis_probe(struct platform_device *pdev)
/* .. and a pointer to the subdev. */
platform_set_drvdata(pdev, &state->sd);
- state->flags = ST_SUSPENDED;
pm_runtime_enable(&pdev->dev);
return 0;
@@ -561,8 +566,7 @@ static int __devinit s5pcsis_probe(struct platform_device *pdev)
e_irqfree:
free_irq(state->irq, state);
e_regput:
- if (state->supply)
- regulator_put(state->supply);
+ regulator_bulk_free(CSIS_NUM_SUPPLIES, state->supplies);
e_clkput:
clk_disable(state->clock[CSIS_CLK_MUX]);
s5pcsis_clk_put(state);
@@ -575,7 +579,7 @@ e_free:
return ret;
}
-static int s5pcsis_suspend(struct device *dev)
+static int s5pcsis_pm_suspend(struct device *dev, bool runtime)
{
struct s5p_platform_mipi_csis *pdata = dev->platform_data;
struct platform_device *pdev = to_platform_device(dev);
@@ -592,21 +596,21 @@ static int s5pcsis_suspend(struct device *dev)
ret = pdata->phy_enable(state->pdev, false);
if (ret)
goto unlock;
- if (state->supply) {
- ret = regulator_disable(state->supply);
- if (ret)
- goto unlock;
- }
+ ret = regulator_bulk_disable(CSIS_NUM_SUPPLIES,
+ state->supplies);
+ if (ret)
+ goto unlock;
clk_disable(state->clock[CSIS_CLK_GATE]);
state->flags &= ~ST_POWERED;
+ if (!runtime)
+ state->flags |= ST_SUSPENDED;
}
- state->flags |= ST_SUSPENDED;
unlock:
mutex_unlock(&state->lock);
return ret ? -EAGAIN : 0;
}
-static int s5pcsis_resume(struct device *dev)
+static int s5pcsis_pm_resume(struct device *dev, bool runtime)
{
struct s5p_platform_mipi_csis *pdata = dev->platform_data;
struct platform_device *pdev = to_platform_device(dev);
@@ -618,20 +622,20 @@ static int s5pcsis_resume(struct device *dev)
__func__, state->flags);
mutex_lock(&state->lock);
- if (!(state->flags & ST_SUSPENDED))
+ if (!runtime && !(state->flags & ST_SUSPENDED))
goto unlock;
if (!(state->flags & ST_POWERED)) {
- if (state->supply)
- ret = regulator_enable(state->supply);
+ ret = regulator_bulk_enable(CSIS_NUM_SUPPLIES,
+ state->supplies);
if (ret)
goto unlock;
-
ret = pdata->phy_enable(state->pdev, true);
if (!ret) {
state->flags |= ST_POWERED;
- } else if (state->supply) {
- regulator_disable(state->supply);
+ } else {
+ regulator_bulk_disable(CSIS_NUM_SUPPLIES,
+ state->supplies);
goto unlock;
}
clk_enable(state->clock[CSIS_CLK_GATE]);
@@ -646,24 +650,26 @@ static int s5pcsis_resume(struct device *dev)
}
#ifdef CONFIG_PM_SLEEP
-static int s5pcsis_pm_suspend(struct device *dev)
+static int s5pcsis_suspend(struct device *dev)
{
- return s5pcsis_suspend(dev);
+ return s5pcsis_pm_suspend(dev, false);
}
-static int s5pcsis_pm_resume(struct device *dev)
+static int s5pcsis_resume(struct device *dev)
{
- int ret;
-
- ret = s5pcsis_resume(dev);
+ return s5pcsis_pm_resume(dev, false);
+}
+#endif
- if (!ret) {
- pm_runtime_disable(dev);
- ret = pm_runtime_set_active(dev);
- pm_runtime_enable(dev);
- }
+#ifdef CONFIG_PM_RUNTIME
+static int s5pcsis_runtime_suspend(struct device *dev)
+{
+ return s5pcsis_pm_suspend(dev, true);
+}
- return ret;
+static int s5pcsis_runtime_resume(struct device *dev)
+{
+ return s5pcsis_pm_resume(dev, true);
}
#endif
@@ -679,8 +685,7 @@ static int __devexit s5pcsis_remove(struct platform_device *pdev)
pm_runtime_set_suspended(&pdev->dev);
s5pcsis_clk_put(state);
- if (state->supply)
- regulator_put(state->supply);
+ regulator_bulk_free(CSIS_NUM_SUPPLIES, state->supplies);
media_entity_cleanup(&state->sd.entity);
free_irq(state->irq, state);
@@ -692,8 +697,9 @@ static int __devexit s5pcsis_remove(struct platform_device *pdev)
}
static const struct dev_pm_ops s5pcsis_pm_ops = {
- SET_RUNTIME_PM_OPS(s5pcsis_suspend, s5pcsis_resume, NULL)
- SET_SYSTEM_SLEEP_PM_OPS(s5pcsis_pm_suspend, s5pcsis_pm_resume)
+ SET_RUNTIME_PM_OPS(s5pcsis_runtime_suspend, s5pcsis_runtime_resume,
+ NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(s5pcsis_suspend, s5pcsis_resume)
};
static struct platform_driver s5pcsis_driver = {
diff --git a/drivers/media/video/s5p-fimc/regs-fimc.h b/drivers/media/video/s5p-fimc/regs-fimc.h
index 0fea3e635d7..c8e3b94bd91 100644
--- a/drivers/media/video/s5p-fimc/regs-fimc.h
+++ b/drivers/media/video/s5p-fimc/regs-fimc.h
@@ -54,12 +54,14 @@
#define S5P_CIGCTRL_IRQ_CLR (1 << 19)
#define S5P_CIGCTRL_IRQ_ENABLE (1 << 16)
#define S5P_CIGCTRL_SHDW_DISABLE (1 << 12)
+#define S5P_CIGCTRL_CAM_JPEG (1 << 8)
#define S5P_CIGCTRL_SELCAM_MIPI_A (1 << 7)
#define S5P_CIGCTRL_CAMIF_SELWB (1 << 6)
/* 0 - ITU601; 1 - ITU709 */
#define S5P_CIGCTRL_CSC_ITU601_709 (1 << 5)
#define S5P_CIGCTRL_INVPOLHSYNC (1 << 4)
#define S5P_CIGCTRL_SELCAM_MIPI (1 << 3)
+#define S5P_CIGCTRL_INVPOLFIELD (1 << 1)
#define S5P_CIGCTRL_INTERLACE (1 << 0)
/* Window offset 2 */
@@ -184,7 +186,6 @@
/* Image effect */
#define S5P_CIIMGEFF 0xd0
-#define S5P_CIIMGEFF_IE_DISABLE (0 << 30)
#define S5P_CIIMGEFF_IE_ENABLE (1 << 30)
#define S5P_CIIMGEFF_IE_SC_BEFORE (0 << 29)
#define S5P_CIIMGEFF_IE_SC_AFTER (1 << 29)
@@ -286,10 +287,8 @@
#define S5P_CSIIMGFMT_RAW8 0x2a
#define S5P_CSIIMGFMT_RAW10 0x2b
#define S5P_CSIIMGFMT_RAW12 0x2c
-#define S5P_CSIIMGFMT_USER1 0x30
-#define S5P_CSIIMGFMT_USER2 0x31
-#define S5P_CSIIMGFMT_USER3 0x32
-#define S5P_CSIIMGFMT_USER4 0x33
+/* User defined formats. x = 0...16. */
+#define S5P_CSIIMGFMT_USER(x) (0x30 + x - 1)
/* Output frame buffer sequence mask */
#define S5P_CIFCNTSEQ 0x1FC
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc.c b/drivers/media/video/s5p-mfc/s5p_mfc.c
index 7dc7eab58b3..8be8b54eb74 100644
--- a/drivers/media/video/s5p-mfc/s5p_mfc.c
+++ b/drivers/media/video/s5p-mfc/s5p_mfc.c
@@ -202,7 +202,7 @@ static void s5p_mfc_handle_frame_copy_time(struct s5p_mfc_ctx *ctx)
appropraite flags */
src_buf = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
list_for_each_entry(dst_buf, &ctx->dst_queue, list) {
- if (vb2_dma_contig_plane_paddr(dst_buf->b, 0) == dec_y_addr) {
+ if (vb2_dma_contig_plane_dma_addr(dst_buf->b, 0) == dec_y_addr) {
memcpy(&dst_buf->b->v4l2_buf.timecode,
&src_buf->b->v4l2_buf.timecode,
sizeof(struct v4l2_timecode));
@@ -248,7 +248,7 @@ static void s5p_mfc_handle_frame_new(struct s5p_mfc_ctx *ctx, unsigned int err)
* check which videobuf does it correspond to */
list_for_each_entry(dst_buf, &ctx->dst_queue, list) {
/* Check if this is the buffer we're looking for */
- if (vb2_dma_contig_plane_paddr(dst_buf->b, 0) == dspl_y_addr) {
+ if (vb2_dma_contig_plane_dma_addr(dst_buf->b, 0) == dspl_y_addr) {
list_del(&dst_buf->list);
ctx->dst_queue_cnt--;
dst_buf->b->v4l2_buf.sequence = ctx->sequence;
@@ -940,9 +940,8 @@ static int match_child(struct device *dev, void *data)
return !strcmp(dev_name(dev), (char *)data);
}
-
/* MFC probe function */
-static int __devinit s5p_mfc_probe(struct platform_device *pdev)
+static int s5p_mfc_probe(struct platform_device *pdev)
{
struct s5p_mfc_dev *dev;
struct video_device *vfd;
@@ -1236,7 +1235,7 @@ static const struct dev_pm_ops s5p_mfc_pm_ops = {
NULL)
};
-static struct platform_driver s5p_mfc_pdrv = {
+static struct platform_driver s5p_mfc_driver = {
.probe = s5p_mfc_probe,
.remove = __devexit_p(s5p_mfc_remove),
.driver = {
@@ -1254,15 +1253,15 @@ static int __init s5p_mfc_init(void)
int ret;
pr_info("%s", banner);
- ret = platform_driver_register(&s5p_mfc_pdrv);
+ ret = platform_driver_register(&s5p_mfc_driver);
if (ret)
pr_err("Platform device registration failed.\n");
return ret;
}
-static void __devexit s5p_mfc_exit(void)
+static void __exit s5p_mfc_exit(void)
{
- platform_driver_unregister(&s5p_mfc_pdrv);
+ platform_driver_unregister(&s5p_mfc_driver);
}
module_init(s5p_mfc_init);
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_ctrl.c b/drivers/media/video/s5p-mfc/s5p_mfc_ctrl.c
index 5f4da80051b..f2481a85e0a 100644
--- a/drivers/media/video/s5p-mfc/s5p_mfc_ctrl.c
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_ctrl.c
@@ -38,7 +38,7 @@ int s5p_mfc_alloc_and_load_firmware(struct s5p_mfc_dev *dev)
* into kernel. */
mfc_debug_enter();
err = request_firmware((const struct firmware **)&fw_blob,
- "s5pc110-mfc.fw", dev->v4l2_dev.dev);
+ "s5p-mfc.fw", dev->v4l2_dev.dev);
if (err != 0) {
mfc_err("Firmware is not present in the /lib/firmware directory nor compiled in kernel\n");
return -EINVAL;
@@ -116,7 +116,7 @@ int s5p_mfc_reload_firmware(struct s5p_mfc_dev *dev)
* into kernel. */
mfc_debug_enter();
err = request_firmware((const struct firmware **)&fw_blob,
- "s5pc110-mfc.fw", dev->v4l2_dev.dev);
+ "s5p-mfc.fw", dev->v4l2_dev.dev);
if (err != 0) {
mfc_err("Firmware is not present in the /lib/firmware directory nor compiled in kernel\n");
return -EINVAL;
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_dec.c b/drivers/media/video/s5p-mfc/s5p_mfc_dec.c
index b2c5052a9c4..844a4d7797b 100644
--- a/drivers/media/video/s5p-mfc/s5p_mfc_dec.c
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_dec.c
@@ -165,7 +165,7 @@ static struct mfc_control controls[] = {
.maximum = 32,
.step = 1,
.default_value = 1,
- .is_volatile = 1,
+ .flags = V4L2_CTRL_FLAG_VOLATILE,
},
};
@@ -220,8 +220,8 @@ static int vidioc_querycap(struct file *file, void *priv,
strncpy(cap->card, dev->plat_dev->name, sizeof(cap->card) - 1);
cap->bus_info[0] = 0;
cap->version = KERNEL_VERSION(1, 0, 0);
- cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT
- | V4L2_CAP_STREAMING;
+ cap->capabilities = V4L2_CAP_VIDEO_CAPTURE_MPLANE |
+ V4L2_CAP_VIDEO_OUTPUT_MPLANE | V4L2_CAP_STREAMING;
return 0;
}
@@ -744,9 +744,10 @@ static const struct v4l2_ioctl_ops s5p_mfc_dec_ioctl_ops = {
.vidioc_g_crop = vidioc_g_crop,
};
-static int s5p_mfc_queue_setup(struct vb2_queue *vq, unsigned int *buf_count,
- unsigned int *plane_count, unsigned long psize[],
- void *allocators[])
+static int s5p_mfc_queue_setup(struct vb2_queue *vq,
+ const struct v4l2_format *fmt, unsigned int *buf_count,
+ unsigned int *plane_count, unsigned int psize[],
+ void *allocators[])
{
struct s5p_mfc_ctx *ctx = fh_to_ctx(vq->drv_priv);
@@ -824,7 +825,7 @@ static int s5p_mfc_buf_init(struct vb2_buffer *vb)
return 0;
for (i = 0; i <= ctx->src_fmt->num_planes ; i++) {
if (IS_ERR_OR_NULL(ERR_PTR(
- vb2_dma_contig_plane_paddr(vb, i)))) {
+ vb2_dma_contig_plane_dma_addr(vb, i)))) {
mfc_err("Plane mem not allocated\n");
return -EINVAL;
}
@@ -837,13 +838,13 @@ static int s5p_mfc_buf_init(struct vb2_buffer *vb)
i = vb->v4l2_buf.index;
ctx->dst_bufs[i].b = vb;
ctx->dst_bufs[i].cookie.raw.luma =
- vb2_dma_contig_plane_paddr(vb, 0);
+ vb2_dma_contig_plane_dma_addr(vb, 0);
ctx->dst_bufs[i].cookie.raw.chroma =
- vb2_dma_contig_plane_paddr(vb, 1);
+ vb2_dma_contig_plane_dma_addr(vb, 1);
ctx->dst_bufs_cnt++;
} else if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
if (IS_ERR_OR_NULL(ERR_PTR(
- vb2_dma_contig_plane_paddr(vb, 0)))) {
+ vb2_dma_contig_plane_dma_addr(vb, 0)))) {
mfc_err("Plane memory not allocated\n");
return -EINVAL;
}
@@ -855,7 +856,7 @@ static int s5p_mfc_buf_init(struct vb2_buffer *vb)
i = vb->v4l2_buf.index;
ctx->src_bufs[i].b = vb;
ctx->src_bufs[i].cookie.stream =
- vb2_dma_contig_plane_paddr(vb, 0);
+ vb2_dma_contig_plane_dma_addr(vb, 0);
ctx->src_bufs_cnt++;
} else {
mfc_err("s5p_mfc_buf_init: unknown queue type\n");
@@ -864,7 +865,7 @@ static int s5p_mfc_buf_init(struct vb2_buffer *vb)
return 0;
}
-static int s5p_mfc_start_streaming(struct vb2_queue *q)
+static int s5p_mfc_start_streaming(struct vb2_queue *q, unsigned int count)
{
struct s5p_mfc_ctx *ctx = fh_to_ctx(q->drv_priv);
struct s5p_mfc_dev *dev = ctx->dev;
@@ -1020,7 +1021,7 @@ int s5p_mfc_dec_ctrls_setup(struct s5p_mfc_ctx *ctx)
return ctx->ctrl_handler.error;
}
if (controls[i].is_volatile && ctx->ctrls[i])
- ctx->ctrls[i]->is_volatile = 1;
+ ctx->ctrls[i]->flags |= V4L2_CTRL_FLAG_VOLATILE;
}
return 0;
}
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_enc.c b/drivers/media/video/s5p-mfc/s5p_mfc_enc.c
index fee094a14f4..1e8cdb77d4b 100644
--- a/drivers/media/video/s5p-mfc/s5p_mfc_enc.c
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_enc.c
@@ -599,8 +599,8 @@ static void cleanup_ref_queue(struct s5p_mfc_ctx *ctx)
while (!list_empty(&ctx->ref_queue)) {
mb_entry = list_entry((&ctx->ref_queue)->next,
struct s5p_mfc_buf, list);
- mb_y_addr = vb2_dma_contig_plane_paddr(mb_entry->b, 0);
- mb_c_addr = vb2_dma_contig_plane_paddr(mb_entry->b, 1);
+ mb_y_addr = vb2_dma_contig_plane_dma_addr(mb_entry->b, 0);
+ mb_c_addr = vb2_dma_contig_plane_dma_addr(mb_entry->b, 1);
list_del(&mb_entry->list);
ctx->ref_queue_cnt--;
list_add_tail(&mb_entry->list, &ctx->src_queue);
@@ -622,7 +622,7 @@ static int enc_pre_seq_start(struct s5p_mfc_ctx *ctx)
spin_lock_irqsave(&dev->irqlock, flags);
dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list);
- dst_addr = vb2_dma_contig_plane_paddr(dst_mb->b, 0);
+ dst_addr = vb2_dma_contig_plane_dma_addr(dst_mb->b, 0);
dst_size = vb2_plane_size(dst_mb->b, 0);
s5p_mfc_set_enc_stream_buffer(ctx, dst_addr, dst_size);
spin_unlock_irqrestore(&dev->irqlock, flags);
@@ -668,14 +668,14 @@ static int enc_pre_frame_start(struct s5p_mfc_ctx *ctx)
spin_lock_irqsave(&dev->irqlock, flags);
src_mb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
- src_y_addr = vb2_dma_contig_plane_paddr(src_mb->b, 0);
- src_c_addr = vb2_dma_contig_plane_paddr(src_mb->b, 1);
+ src_y_addr = vb2_dma_contig_plane_dma_addr(src_mb->b, 0);
+ src_c_addr = vb2_dma_contig_plane_dma_addr(src_mb->b, 1);
s5p_mfc_set_enc_frame_buffer(ctx, src_y_addr, src_c_addr);
spin_unlock_irqrestore(&dev->irqlock, flags);
spin_lock_irqsave(&dev->irqlock, flags);
dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list);
- dst_addr = vb2_dma_contig_plane_paddr(dst_mb->b, 0);
+ dst_addr = vb2_dma_contig_plane_dma_addr(dst_mb->b, 0);
dst_size = vb2_plane_size(dst_mb->b, 0);
s5p_mfc_set_enc_stream_buffer(ctx, dst_addr, dst_size);
spin_unlock_irqrestore(&dev->irqlock, flags);
@@ -703,8 +703,8 @@ static int enc_post_frame_start(struct s5p_mfc_ctx *ctx)
if (slice_type >= 0) {
s5p_mfc_get_enc_frame_buffer(ctx, &enc_y_addr, &enc_c_addr);
list_for_each_entry(mb_entry, &ctx->src_queue, list) {
- mb_y_addr = vb2_dma_contig_plane_paddr(mb_entry->b, 0);
- mb_c_addr = vb2_dma_contig_plane_paddr(mb_entry->b, 1);
+ mb_y_addr = vb2_dma_contig_plane_dma_addr(mb_entry->b, 0);
+ mb_c_addr = vb2_dma_contig_plane_dma_addr(mb_entry->b, 1);
if ((enc_y_addr == mb_y_addr) &&
(enc_c_addr == mb_c_addr)) {
list_del(&mb_entry->list);
@@ -715,8 +715,8 @@ static int enc_post_frame_start(struct s5p_mfc_ctx *ctx)
}
}
list_for_each_entry(mb_entry, &ctx->ref_queue, list) {
- mb_y_addr = vb2_dma_contig_plane_paddr(mb_entry->b, 0);
- mb_c_addr = vb2_dma_contig_plane_paddr(mb_entry->b, 1);
+ mb_y_addr = vb2_dma_contig_plane_dma_addr(mb_entry->b, 0);
+ mb_c_addr = vb2_dma_contig_plane_dma_addr(mb_entry->b, 1);
if ((enc_y_addr == mb_y_addr) &&
(enc_c_addr == mb_c_addr)) {
list_del(&mb_entry->list);
@@ -785,8 +785,8 @@ static int vidioc_querycap(struct file *file, void *priv,
strncpy(cap->card, dev->plat_dev->name, sizeof(cap->card) - 1);
cap->bus_info[0] = 0;
cap->version = KERNEL_VERSION(1, 0, 0);
- cap->capabilities = V4L2_CAP_VIDEO_CAPTURE
- | V4L2_CAP_VIDEO_OUTPUT
+ cap->capabilities = V4L2_CAP_VIDEO_CAPTURE_MPLANE
+ | V4L2_CAP_VIDEO_OUTPUT_MPLANE
| V4L2_CAP_STREAMING;
return 0;
}
@@ -1501,20 +1501,21 @@ static int check_vb_with_fmt(struct s5p_mfc_fmt *fmt, struct vb2_buffer *vb)
return -EINVAL;
}
for (i = 0; i < fmt->num_planes; i++) {
- if (!vb2_dma_contig_plane_paddr(vb, i)) {
+ if (!vb2_dma_contig_plane_dma_addr(vb, i)) {
mfc_err("failed to get plane cookie\n");
return -EINVAL;
}
mfc_debug(2, "index: %d, plane[%d] cookie: 0x%08zx",
vb->v4l2_buf.index, i,
- vb2_dma_contig_plane_paddr(vb, i));
+ vb2_dma_contig_plane_dma_addr(vb, i));
}
return 0;
}
static int s5p_mfc_queue_setup(struct vb2_queue *vq,
- unsigned int *buf_count, unsigned int *plane_count,
- unsigned long psize[], void *allocators[])
+ const struct v4l2_format *fmt,
+ unsigned int *buf_count, unsigned int *plane_count,
+ unsigned int psize[], void *allocators[])
{
struct s5p_mfc_ctx *ctx = fh_to_ctx(vq->drv_priv);
@@ -1584,7 +1585,7 @@ static int s5p_mfc_buf_init(struct vb2_buffer *vb)
i = vb->v4l2_buf.index;
ctx->dst_bufs[i].b = vb;
ctx->dst_bufs[i].cookie.stream =
- vb2_dma_contig_plane_paddr(vb, 0);
+ vb2_dma_contig_plane_dma_addr(vb, 0);
ctx->dst_bufs_cnt++;
} else if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
ret = check_vb_with_fmt(ctx->src_fmt, vb);
@@ -1593,9 +1594,9 @@ static int s5p_mfc_buf_init(struct vb2_buffer *vb)
i = vb->v4l2_buf.index;
ctx->src_bufs[i].b = vb;
ctx->src_bufs[i].cookie.raw.luma =
- vb2_dma_contig_plane_paddr(vb, 0);
+ vb2_dma_contig_plane_dma_addr(vb, 0);
ctx->src_bufs[i].cookie.raw.chroma =
- vb2_dma_contig_plane_paddr(vb, 1);
+ vb2_dma_contig_plane_dma_addr(vb, 1);
ctx->src_bufs_cnt++;
} else {
mfc_err("inavlid queue type: %d\n", vq->type);
@@ -1640,7 +1641,7 @@ static int s5p_mfc_buf_prepare(struct vb2_buffer *vb)
return 0;
}
-static int s5p_mfc_start_streaming(struct vb2_queue *q)
+static int s5p_mfc_start_streaming(struct vb2_queue *q, unsigned int count)
{
struct s5p_mfc_ctx *ctx = fh_to_ctx(q->drv_priv);
struct s5p_mfc_dev *dev = ctx->dev;
@@ -1814,7 +1815,7 @@ int s5p_mfc_enc_ctrls_setup(struct s5p_mfc_ctx *ctx)
return ctx->ctrl_handler.error;
}
if (controls[i].is_volatile && ctx->ctrls[i])
- ctx->ctrls[i]->is_volatile = 1;
+ ctx->ctrls[i]->flags |= V4L2_CTRL_FLAG_VOLATILE;
}
return 0;
}
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_opr.c b/drivers/media/video/s5p-mfc/s5p_mfc_opr.c
index 7b239168c19..e08b21c50eb 100644
--- a/drivers/media/video/s5p-mfc/s5p_mfc_opr.c
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_opr.c
@@ -1135,7 +1135,7 @@ static int s5p_mfc_run_dec_frame(struct s5p_mfc_ctx *ctx, int last_frame)
temp_vb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
temp_vb->used = 1;
s5p_mfc_set_dec_stream_buffer(ctx,
- vb2_dma_contig_plane_paddr(temp_vb->b, 0), ctx->consumed_stream,
+ vb2_dma_contig_plane_dma_addr(temp_vb->b, 0), ctx->consumed_stream,
temp_vb->b->v4l2_planes[0].bytesused);
spin_unlock_irqrestore(&dev->irqlock, flags);
index = temp_vb->b->v4l2_buf.index;
@@ -1172,12 +1172,12 @@ static int s5p_mfc_run_enc_frame(struct s5p_mfc_ctx *ctx)
}
src_mb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
src_mb->used = 1;
- src_y_addr = vb2_dma_contig_plane_paddr(src_mb->b, 0);
- src_c_addr = vb2_dma_contig_plane_paddr(src_mb->b, 1);
+ src_y_addr = vb2_dma_contig_plane_dma_addr(src_mb->b, 0);
+ src_c_addr = vb2_dma_contig_plane_dma_addr(src_mb->b, 1);
s5p_mfc_set_enc_frame_buffer(ctx, src_y_addr, src_c_addr);
dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list);
dst_mb->used = 1;
- dst_addr = vb2_dma_contig_plane_paddr(dst_mb->b, 0);
+ dst_addr = vb2_dma_contig_plane_dma_addr(dst_mb->b, 0);
dst_size = vb2_plane_size(dst_mb->b, 0);
s5p_mfc_set_enc_stream_buffer(ctx, dst_addr, dst_size);
spin_unlock_irqrestore(&dev->irqlock, flags);
@@ -1200,7 +1200,7 @@ static void s5p_mfc_run_init_dec(struct s5p_mfc_ctx *ctx)
s5p_mfc_set_dec_desc_buffer(ctx);
mfc_debug(2, "Header size: %d\n", temp_vb->b->v4l2_planes[0].bytesused);
s5p_mfc_set_dec_stream_buffer(ctx,
- vb2_dma_contig_plane_paddr(temp_vb->b, 0),
+ vb2_dma_contig_plane_dma_addr(temp_vb->b, 0),
0, temp_vb->b->v4l2_planes[0].bytesused);
spin_unlock_irqrestore(&dev->irqlock, flags);
dev->curr_ctx = ctx->num;
@@ -1219,7 +1219,7 @@ static void s5p_mfc_run_init_enc(struct s5p_mfc_ctx *ctx)
s5p_mfc_set_enc_ref_buffer(ctx);
spin_lock_irqsave(&dev->irqlock, flags);
dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list);
- dst_addr = vb2_dma_contig_plane_paddr(dst_mb->b, 0);
+ dst_addr = vb2_dma_contig_plane_dma_addr(dst_mb->b, 0);
dst_size = vb2_plane_size(dst_mb->b, 0);
s5p_mfc_set_enc_stream_buffer(ctx, dst_addr, dst_size);
spin_unlock_irqrestore(&dev->irqlock, flags);
@@ -1255,7 +1255,7 @@ static int s5p_mfc_run_init_dec_buffers(struct s5p_mfc_ctx *ctx)
temp_vb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
mfc_debug(2, "Header size: %d\n", temp_vb->b->v4l2_planes[0].bytesused);
s5p_mfc_set_dec_stream_buffer(ctx,
- vb2_dma_contig_plane_paddr(temp_vb->b, 0),
+ vb2_dma_contig_plane_dma_addr(temp_vb->b, 0),
0, temp_vb->b->v4l2_planes[0].bytesused);
spin_unlock_irqrestore(&dev->irqlock, flags);
dev->curr_ctx = ctx->num;
diff --git a/drivers/media/video/s5p-tv/Kconfig b/drivers/media/video/s5p-tv/Kconfig
index 9c37dee7bc5..f2a09779ec8 100644
--- a/drivers/media/video/s5p-tv/Kconfig
+++ b/drivers/media/video/s5p-tv/Kconfig
@@ -8,7 +8,7 @@
config VIDEO_SAMSUNG_S5P_TV
bool "Samsung TV driver for S5P platform (experimental)"
- depends on PLAT_S5P
+ depends on PLAT_S5P && PM_RUNTIME
depends on EXPERIMENTAL
default n
---help---
diff --git a/drivers/media/video/s5p-tv/hdmi_drv.c b/drivers/media/video/s5p-tv/hdmi_drv.c
index 06d6663f459..0279e6e89fe 100644
--- a/drivers/media/video/s5p-tv/hdmi_drv.c
+++ b/drivers/media/video/s5p-tv/hdmi_drv.c
@@ -210,20 +210,17 @@ static void hdmi_reg_init(struct hdmi_device *hdev)
/* enable HPD interrupts */
hdmi_write_mask(hdev, HDMI_INTC_CON, ~0, HDMI_INTC_EN_GLOBAL |
HDMI_INTC_EN_HPD_PLUG | HDMI_INTC_EN_HPD_UNPLUG);
- /* choose HDMI mode */
+ /* choose DVI mode */
hdmi_write_mask(hdev, HDMI_MODE_SEL,
- HDMI_MODE_HDMI_EN, HDMI_MODE_MASK);
+ HDMI_MODE_DVI_EN, HDMI_MODE_MASK);
+ hdmi_write_mask(hdev, HDMI_CON_2, ~0,
+ HDMI_DVI_PERAMBLE_EN | HDMI_DVI_BAND_EN);
/* disable bluescreen */
hdmi_write_mask(hdev, HDMI_CON_0, 0, HDMI_BLUE_SCR_EN);
/* choose bluescreen (fecal) color */
hdmi_writeb(hdev, HDMI_BLUE_SCREEN_0, 0x12);
hdmi_writeb(hdev, HDMI_BLUE_SCREEN_1, 0x34);
hdmi_writeb(hdev, HDMI_BLUE_SCREEN_2, 0x56);
- /* enable AVI packet every vsync, fixes purple line problem */
- hdmi_writeb(hdev, HDMI_AVI_CON, 0x02);
- /* force YUV444, look to CEA-861-D, table 7 for more detail */
- hdmi_writeb(hdev, HDMI_AVI_BYTE(0), 2 << 5);
- hdmi_write_mask(hdev, HDMI_CON_1, 2, 3 << 5);
}
static void hdmi_timing_apply(struct hdmi_device *hdev,
@@ -443,6 +440,7 @@ static const struct hdmi_preset_conf hdmi_conf_480p = {
.height = 480,
.code = V4L2_MBUS_FMT_FIXED, /* means RGB888 */
.field = V4L2_FIELD_NONE,
+ .colorspace = V4L2_COLORSPACE_SRGB,
},
};
@@ -475,6 +473,7 @@ static const struct hdmi_preset_conf hdmi_conf_720p60 = {
.height = 720,
.code = V4L2_MBUS_FMT_FIXED, /* means RGB888 */
.field = V4L2_FIELD_NONE,
+ .colorspace = V4L2_COLORSPACE_SRGB,
},
};
@@ -507,6 +506,7 @@ static const struct hdmi_preset_conf hdmi_conf_1080p50 = {
.height = 1080,
.code = V4L2_MBUS_FMT_FIXED, /* means RGB888 */
.field = V4L2_FIELD_NONE,
+ .colorspace = V4L2_COLORSPACE_SRGB,
},
};
@@ -539,6 +539,7 @@ static const struct hdmi_preset_conf hdmi_conf_1080p60 = {
.height = 1080,
.code = V4L2_MBUS_FMT_FIXED, /* means RGB888 */
.field = V4L2_FIELD_NONE,
+ .colorspace = V4L2_COLORSPACE_SRGB,
},
};
diff --git a/drivers/media/video/s5p-tv/mixer.h b/drivers/media/video/s5p-tv/mixer.h
index e2242243f63..51ad59b3035 100644
--- a/drivers/media/video/s5p-tv/mixer.h
+++ b/drivers/media/video/s5p-tv/mixer.h
@@ -111,8 +111,6 @@ struct mxr_buffer {
enum mxr_layer_state {
/** layers is not shown */
MXR_LAYER_IDLE = 0,
- /** state between STREAMON and hardware start */
- MXR_LAYER_STREAMING_START,
/** layer is shown */
MXR_LAYER_STREAMING,
/** state before STREAMOFF is finished */
diff --git a/drivers/media/video/s5p-tv/mixer_grp_layer.c b/drivers/media/video/s5p-tv/mixer_grp_layer.c
index 58f0ba49580..de8270c2b6e 100644
--- a/drivers/media/video/s5p-tv/mixer_grp_layer.c
+++ b/drivers/media/video/s5p-tv/mixer_grp_layer.c
@@ -86,7 +86,7 @@ static void mxr_graph_buffer_set(struct mxr_layer *layer,
dma_addr_t addr = 0;
if (buf)
- addr = vb2_dma_contig_plane_paddr(&buf->vb, 0);
+ addr = vb2_dma_contig_plane_dma_addr(&buf->vb, 0);
mxr_reg_graph_buffer(layer->mdev, layer->idx, addr);
}
diff --git a/drivers/media/video/s5p-tv/mixer_reg.c b/drivers/media/video/s5p-tv/mixer_reg.c
index 38dac672aa1..4800a3cbb29 100644
--- a/drivers/media/video/s5p-tv/mixer_reg.c
+++ b/drivers/media/video/s5p-tv/mixer_reg.c
@@ -90,7 +90,7 @@ void mxr_reg_reset(struct mxr_device *mdev)
mxr_vsync_set_update(mdev, MXR_DISABLE);
/* set output in RGB888 mode */
- mxr_write(mdev, MXR_CFG, MXR_CFG_OUT_YUV444);
+ mxr_write(mdev, MXR_CFG, MXR_CFG_OUT_RGB888);
/* 16 beat burst in DMA */
mxr_write_mask(mdev, MXR_STATUS, MXR_STATUS_16_BURST,
@@ -376,6 +376,12 @@ void mxr_reg_set_mbus_fmt(struct mxr_device *mdev,
spin_lock_irqsave(&mdev->reg_slock, flags);
mxr_vsync_set_update(mdev, MXR_DISABLE);
+ /* selecting colorspace accepted by output */
+ if (fmt->colorspace == V4L2_COLORSPACE_JPEG)
+ val |= MXR_CFG_OUT_YUV444;
+ else
+ val |= MXR_CFG_OUT_RGB888;
+
/* choosing between interlace and progressive mode */
if (fmt->field == V4L2_FIELD_INTERLACED)
val |= MXR_CFG_SCAN_INTERLACE;
@@ -394,7 +400,8 @@ void mxr_reg_set_mbus_fmt(struct mxr_device *mdev,
else
WARN(1, "unrecognized mbus height %u!\n", fmt->height);
- mxr_write_mask(mdev, MXR_CFG, val, MXR_CFG_SCAN_MASK);
+ mxr_write_mask(mdev, MXR_CFG, val, MXR_CFG_SCAN_MASK |
+ MXR_CFG_OUT_MASK);
val = (fmt->field == V4L2_FIELD_INTERLACED) ? ~0 : 0;
vp_write_mask(mdev, VP_MODE, val,
diff --git a/drivers/media/video/s5p-tv/mixer_video.c b/drivers/media/video/s5p-tv/mixer_video.c
index 43ac22f35bc..e16d3a4bc1d 100644
--- a/drivers/media/video/s5p-tv/mixer_video.c
+++ b/drivers/media/video/s5p-tv/mixer_video.c
@@ -727,8 +727,8 @@ static const struct v4l2_file_operations mxr_fops = {
.unlocked_ioctl = video_ioctl2,
};
-static int queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
- unsigned int *nplanes, unsigned long sizes[],
+static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *pfmt,
+ unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[],
void *alloc_ctxs[])
{
struct mxr_layer *layer = vb2_get_drv_priv(vq);
@@ -764,19 +764,10 @@ static void buf_queue(struct vb2_buffer *vb)
struct mxr_layer *layer = vb2_get_drv_priv(vb->vb2_queue);
struct mxr_device *mdev = layer->mdev;
unsigned long flags;
- int must_start = 0;
spin_lock_irqsave(&layer->enq_slock, flags);
- if (layer->state == MXR_LAYER_STREAMING_START) {
- layer->state = MXR_LAYER_STREAMING;
- must_start = 1;
- }
list_add_tail(&buffer->list, &layer->enq_list);
spin_unlock_irqrestore(&layer->enq_slock, flags);
- if (must_start) {
- layer->ops.stream_set(layer, MXR_ENABLE);
- mxr_streamer_get(mdev);
- }
mxr_dbg(mdev, "queuing buffer\n");
}
@@ -797,13 +788,19 @@ static void wait_unlock(struct vb2_queue *vq)
mutex_unlock(&layer->mutex);
}
-static int start_streaming(struct vb2_queue *vq)
+static int start_streaming(struct vb2_queue *vq, unsigned int count)
{
struct mxr_layer *layer = vb2_get_drv_priv(vq);
struct mxr_device *mdev = layer->mdev;
unsigned long flags;
mxr_dbg(mdev, "%s\n", __func__);
+
+ if (count == 0) {
+ mxr_dbg(mdev, "no output buffers queued\n");
+ return -EINVAL;
+ }
+
/* block any changes in output configuration */
mxr_output_get(mdev);
@@ -814,9 +811,12 @@ static int start_streaming(struct vb2_queue *vq)
layer->ops.format_set(layer);
/* enabling layer in hardware */
spin_lock_irqsave(&layer->enq_slock, flags);
- layer->state = MXR_LAYER_STREAMING_START;
+ layer->state = MXR_LAYER_STREAMING;
spin_unlock_irqrestore(&layer->enq_slock, flags);
+ layer->ops.stream_set(layer, MXR_ENABLE);
+ mxr_streamer_get(mdev);
+
return 0;
}
diff --git a/drivers/media/video/s5p-tv/mixer_vp_layer.c b/drivers/media/video/s5p-tv/mixer_vp_layer.c
index 6950ed8ac1a..f3bb2e34cb5 100644
--- a/drivers/media/video/s5p-tv/mixer_vp_layer.c
+++ b/drivers/media/video/s5p-tv/mixer_vp_layer.c
@@ -97,9 +97,9 @@ static void mxr_vp_buffer_set(struct mxr_layer *layer,
mxr_reg_vp_buffer(layer->mdev, luma_addr, chroma_addr);
return;
}
- luma_addr[0] = vb2_dma_contig_plane_paddr(&buf->vb, 0);
+ luma_addr[0] = vb2_dma_contig_plane_dma_addr(&buf->vb, 0);
if (layer->fmt->num_subframes == 2) {
- chroma_addr[0] = vb2_dma_contig_plane_paddr(&buf->vb, 1);
+ chroma_addr[0] = vb2_dma_contig_plane_dma_addr(&buf->vb, 1);
} else {
/* FIXME: mxr_get_plane_size compute integer division,
* which is slow and should not be performed in interrupt */
diff --git a/drivers/media/video/s5p-tv/regs-hdmi.h b/drivers/media/video/s5p-tv/regs-hdmi.h
index ac93ad6f2bc..33247d13e4c 100644
--- a/drivers/media/video/s5p-tv/regs-hdmi.h
+++ b/drivers/media/video/s5p-tv/regs-hdmi.h
@@ -127,6 +127,10 @@
#define HDMI_BLUE_SCR_EN (1 << 5)
#define HDMI_EN (1 << 0)
+/* HDMI_CON_2 */
+#define HDMI_DVI_PERAMBLE_EN (1 << 5)
+#define HDMI_DVI_BAND_EN (1 << 1)
+
/* HDMI_PHY_STATUS */
#define HDMI_PHY_STATUS_READY (1 << 0)
diff --git a/drivers/media/video/s5p-tv/regs-mixer.h b/drivers/media/video/s5p-tv/regs-mixer.h
index 3c8442609c1..158abb43d0a 100644
--- a/drivers/media/video/s5p-tv/regs-mixer.h
+++ b/drivers/media/video/s5p-tv/regs-mixer.h
@@ -67,6 +67,7 @@
/* bits for MXR_CFG */
#define MXR_CFG_OUT_YUV444 (0 << 8)
#define MXR_CFG_OUT_RGB888 (1 << 8)
+#define MXR_CFG_OUT_MASK (1 << 8)
#define MXR_CFG_DST_SDO (0 << 7)
#define MXR_CFG_DST_HDMI (1 << 7)
#define MXR_CFG_DST_MASK (1 << 7)
diff --git a/drivers/media/video/s5p-tv/sdo_drv.c b/drivers/media/video/s5p-tv/sdo_drv.c
index 4dddd6bd635..8cec67ef48c 100644
--- a/drivers/media/video/s5p-tv/sdo_drv.c
+++ b/drivers/media/video/s5p-tv/sdo_drv.c
@@ -170,6 +170,7 @@ static int sdo_g_mbus_fmt(struct v4l2_subdev *sd,
fmt->height = sdev->fmt->height;
fmt->code = V4L2_MBUS_FMT_FIXED;
fmt->field = V4L2_FIELD_INTERLACED;
+ fmt->colorspace = V4L2_COLORSPACE_JPEG;
return 0;
}
diff --git a/drivers/media/video/saa7115.c b/drivers/media/video/saa7115.c
index f2ae405c74a..5cfdbc78b91 100644
--- a/drivers/media/video/saa7115.c
+++ b/drivers/media/video/saa7115.c
@@ -793,7 +793,6 @@ static int saa711x_s_ctrl(struct v4l2_ctrl *ctrl)
saa711x_write(sd, R_0F_CHROMA_GAIN_CNTL, state->gain->val);
else
saa711x_write(sd, R_0F_CHROMA_GAIN_CNTL, state->gain->val | 0x80);
- v4l2_ctrl_activate(state->gain, !state->agc->val);
break;
default:
@@ -1345,35 +1344,57 @@ static int saa711x_g_vbi_data(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_dat
static int saa711x_querystd(struct v4l2_subdev *sd, v4l2_std_id *std)
{
struct saa711x_state *state = to_state(sd);
- int reg1e;
+ int reg1f, reg1e;
- *std = V4L2_STD_ALL;
- if (state->ident != V4L2_IDENT_SAA7115) {
- int reg1f = saa711x_read(sd, R_1F_STATUS_BYTE_2_VD_DEC);
+ /*
+ * The V4L2 core already initializes std with all supported
+ * Standards. All driver needs to do is to mask it, to remove
+ * standards that don't apply from the mask
+ */
- if (reg1f & 0x20)
- *std = V4L2_STD_525_60;
- else
- *std = V4L2_STD_625_50;
+ reg1f = saa711x_read(sd, R_1F_STATUS_BYTE_2_VD_DEC);
+ v4l2_dbg(1, debug, sd, "Status byte 2 (0x1f)=0x%02x\n", reg1f);
- return 0;
- }
+ /* horizontal/vertical not locked */
+ if (reg1f & 0x40)
+ goto ret;
+
+ if (reg1f & 0x20)
+ *std &= V4L2_STD_525_60;
+ else
+ *std &= V4L2_STD_625_50;
+
+ if (state->ident != V4L2_IDENT_SAA7115)
+ goto ret;
reg1e = saa711x_read(sd, R_1E_STATUS_BYTE_1_VD_DEC);
switch (reg1e & 0x03) {
case 1:
- *std = V4L2_STD_NTSC;
+ *std &= V4L2_STD_NTSC;
break;
case 2:
- *std = V4L2_STD_PAL;
+ /*
+ * V4L2_STD_PAL just cover the european PAL standards.
+ * This is wrong, as the device could also be using an
+ * other PAL standard.
+ */
+ *std &= V4L2_STD_PAL | V4L2_STD_PAL_N | V4L2_STD_PAL_Nc |
+ V4L2_STD_PAL_M | V4L2_STD_PAL_60;
break;
case 3:
- *std = V4L2_STD_SECAM;
+ *std &= V4L2_STD_SECAM;
break;
default:
+ /* Can't detect anything */
break;
}
+
+ v4l2_dbg(1, debug, sd, "Status byte 1 (0x1e)=0x%02x\n", reg1e);
+
+ret:
+ v4l2_dbg(1, debug, sd, "detected std mask = %08Lx\n", *std);
+
return 0;
}
@@ -1601,7 +1622,6 @@ static int saa711x_probe(struct i2c_client *client,
V4L2_CID_CHROMA_AGC, 0, 1, 1, 1);
state->gain = v4l2_ctrl_new_std(hdl, &saa711x_ctrl_ops,
V4L2_CID_CHROMA_GAIN, 0, 127, 1, 40);
- state->gain->is_volatile = 1;
sd->ctrl_handler = hdl;
if (hdl->error) {
int err = hdl->error;
@@ -1610,8 +1630,7 @@ static int saa711x_probe(struct i2c_client *client,
kfree(state);
return err;
}
- state->agc->flags |= V4L2_CTRL_FLAG_UPDATE;
- v4l2_ctrl_cluster(2, &state->agc);
+ v4l2_ctrl_auto_cluster(2, &state->agc, 0, true);
state->input = -1;
state->output = SAA7115_IPORT_ON;
diff --git a/drivers/media/video/saa7134/Makefile b/drivers/media/video/saa7134/Makefile
index 8a5ff4d3cf1..a646ccf5169 100644
--- a/drivers/media/video/saa7134/Makefile
+++ b/drivers/media/video/saa7134/Makefile
@@ -10,7 +10,7 @@ obj-$(CONFIG_VIDEO_SAA7134_ALSA) += saa7134-alsa.o
obj-$(CONFIG_VIDEO_SAA7134_DVB) += saa7134-dvb.o
-EXTRA_CFLAGS += -Idrivers/media/video
-EXTRA_CFLAGS += -Idrivers/media/common/tuners
-EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core
-EXTRA_CFLAGS += -Idrivers/media/dvb/frontends
+ccflags-y += -Idrivers/media/video
+ccflags-y += -Idrivers/media/common/tuners
+ccflags-y += -Idrivers/media/dvb/dvb-core
+ccflags-y += -Idrivers/media/dvb/frontends
diff --git a/drivers/media/video/saa7134/saa7134.h b/drivers/media/video/saa7134/saa7134.h
index bc8d6bba8ee..9b550687213 100644
--- a/drivers/media/video/saa7134/saa7134.h
+++ b/drivers/media/video/saa7134/saa7134.h
@@ -843,10 +843,10 @@ void saa7134_probe_i2c_ir(struct saa7134_dev *dev);
int saa7134_ir_start(struct saa7134_dev *dev);
void saa7134_ir_stop(struct saa7134_dev *dev);
#else
-#define saa7134_input_init1(dev) (0)
-#define saa7134_input_fini(dev) (0)
-#define saa7134_input_irq(dev) (0)
-#define saa7134_probe_i2c_ir(dev) (0)
-#define saa7134_ir_start(dev) (0)
-#define saa7134_ir_stop(dev) (0)
+#define saa7134_input_init1(dev) ((void)0)
+#define saa7134_input_fini(dev) ((void)0)
+#define saa7134_input_irq(dev) ((void)0)
+#define saa7134_probe_i2c_ir(dev) ((void)0)
+#define saa7134_ir_start(dev) ((void)0)
+#define saa7134_ir_stop(dev) ((void)0)
#endif
diff --git a/drivers/media/video/saa7164/Makefile b/drivers/media/video/saa7164/Makefile
index 6303a8e60ea..ecd5811dc48 100644
--- a/drivers/media/video/saa7164/Makefile
+++ b/drivers/media/video/saa7164/Makefile
@@ -4,9 +4,9 @@ saa7164-objs := saa7164-cards.o saa7164-core.o saa7164-i2c.o saa7164-dvb.o \
obj-$(CONFIG_VIDEO_SAA7164) += saa7164.o
-EXTRA_CFLAGS += -Idrivers/media/video
-EXTRA_CFLAGS += -Idrivers/media/common/tuners
-EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core
-EXTRA_CFLAGS += -Idrivers/media/dvb/frontends
+ccflags-y += -Idrivers/media/video
+ccflags-y += -Idrivers/media/common/tuners
+ccflags-y += -Idrivers/media/dvb/dvb-core
+ccflags-y += -Idrivers/media/dvb/frontends
-EXTRA_CFLAGS += $(extra-cflags-y) $(extra-cflags-m)
+ccflags-y += $(extra-cflags-y) $(extra-cflags-m)
diff --git a/drivers/media/video/saa7164/saa7164-cards.c b/drivers/media/video/saa7164/saa7164-cards.c
index 69822a4e727..971591d6450 100644
--- a/drivers/media/video/saa7164/saa7164-cards.c
+++ b/drivers/media/video/saa7164/saa7164-cards.c
@@ -203,6 +203,66 @@ struct saa7164_board saa7164_boards[] = {
.i2c_reg_len = REGLEN_8bit,
} },
},
+ [SAA7164_BOARD_HAUPPAUGE_HVR2200_4] = {
+ .name = "Hauppauge WinTV-HVR2200",
+ .porta = SAA7164_MPEG_DVB,
+ .portb = SAA7164_MPEG_DVB,
+ .portc = SAA7164_MPEG_ENCODER,
+ .portd = SAA7164_MPEG_ENCODER,
+ .porte = SAA7164_MPEG_VBI,
+ .portf = SAA7164_MPEG_VBI,
+ .chiprev = SAA7164_CHIP_REV3,
+ .unit = {{
+ .id = 0x1d,
+ .type = SAA7164_UNIT_EEPROM,
+ .name = "4K EEPROM",
+ .i2c_bus_nr = SAA7164_I2C_BUS_0,
+ .i2c_bus_addr = 0xa0 >> 1,
+ .i2c_reg_len = REGLEN_8bit,
+ }, {
+ .id = 0x04,
+ .type = SAA7164_UNIT_TUNER,
+ .name = "TDA18271-1",
+ .i2c_bus_nr = SAA7164_I2C_BUS_1,
+ .i2c_bus_addr = 0xc0 >> 1,
+ .i2c_reg_len = REGLEN_8bit,
+ }, {
+ .id = 0x05,
+ .type = SAA7164_UNIT_ANALOG_DEMODULATOR,
+ .name = "TDA8290-1",
+ .i2c_bus_nr = SAA7164_I2C_BUS_1,
+ .i2c_bus_addr = 0x84 >> 1,
+ .i2c_reg_len = REGLEN_8bit,
+ }, {
+ .id = 0x1b,
+ .type = SAA7164_UNIT_TUNER,
+ .name = "TDA18271-2",
+ .i2c_bus_nr = SAA7164_I2C_BUS_2,
+ .i2c_bus_addr = 0xc0 >> 1,
+ .i2c_reg_len = REGLEN_8bit,
+ }, {
+ .id = 0x1c,
+ .type = SAA7164_UNIT_ANALOG_DEMODULATOR,
+ .name = "TDA8290-2",
+ .i2c_bus_nr = SAA7164_I2C_BUS_2,
+ .i2c_bus_addr = 0x84 >> 1,
+ .i2c_reg_len = REGLEN_8bit,
+ }, {
+ .id = 0x1e,
+ .type = SAA7164_UNIT_DIGITAL_DEMODULATOR,
+ .name = "TDA10048-1",
+ .i2c_bus_nr = SAA7164_I2C_BUS_1,
+ .i2c_bus_addr = 0x10 >> 1,
+ .i2c_reg_len = REGLEN_8bit,
+ }, {
+ .id = 0x1f,
+ .type = SAA7164_UNIT_DIGITAL_DEMODULATOR,
+ .name = "TDA10048-2",
+ .i2c_bus_nr = SAA7164_I2C_BUS_2,
+ .i2c_bus_addr = 0x12 >> 1,
+ .i2c_reg_len = REGLEN_8bit,
+ } },
+ },
[SAA7164_BOARD_HAUPPAUGE_HVR2250] = {
.name = "Hauppauge WinTV-HVR2250",
.porta = SAA7164_MPEG_DVB,
@@ -387,6 +447,62 @@ struct saa7164_board saa7164_boards[] = {
.i2c_reg_len = REGLEN_8bit,
} },
},
+ [SAA7164_BOARD_HAUPPAUGE_HVR2200_5] = {
+ .name = "Hauppauge WinTV-HVR2200",
+ .porta = SAA7164_MPEG_DVB,
+ .portb = SAA7164_MPEG_DVB,
+ .chiprev = SAA7164_CHIP_REV3,
+ .unit = {{
+ .id = 0x23,
+ .type = SAA7164_UNIT_EEPROM,
+ .name = "4K EEPROM",
+ .i2c_bus_nr = SAA7164_I2C_BUS_0,
+ .i2c_bus_addr = 0xa0 >> 1,
+ .i2c_reg_len = REGLEN_8bit,
+ }, {
+ .id = 0x04,
+ .type = SAA7164_UNIT_TUNER,
+ .name = "TDA18271-1",
+ .i2c_bus_nr = SAA7164_I2C_BUS_1,
+ .i2c_bus_addr = 0xc0 >> 1,
+ .i2c_reg_len = REGLEN_8bit,
+ }, {
+ .id = 0x05,
+ .type = SAA7164_UNIT_ANALOG_DEMODULATOR,
+ .name = "TDA8290-1",
+ .i2c_bus_nr = SAA7164_I2C_BUS_1,
+ .i2c_bus_addr = 0x84 >> 1,
+ .i2c_reg_len = REGLEN_8bit,
+ }, {
+ .id = 0x21,
+ .type = SAA7164_UNIT_TUNER,
+ .name = "TDA18271-2",
+ .i2c_bus_nr = SAA7164_I2C_BUS_2,
+ .i2c_bus_addr = 0xc0 >> 1,
+ .i2c_reg_len = REGLEN_8bit,
+ }, {
+ .id = 0x22,
+ .type = SAA7164_UNIT_ANALOG_DEMODULATOR,
+ .name = "TDA8290-2",
+ .i2c_bus_nr = SAA7164_I2C_BUS_2,
+ .i2c_bus_addr = 0x84 >> 1,
+ .i2c_reg_len = REGLEN_8bit,
+ }, {
+ .id = 0x24,
+ .type = SAA7164_UNIT_DIGITAL_DEMODULATOR,
+ .name = "TDA10048-1",
+ .i2c_bus_nr = SAA7164_I2C_BUS_1,
+ .i2c_bus_addr = 0x10 >> 1,
+ .i2c_reg_len = REGLEN_8bit,
+ }, {
+ .id = 0x25,
+ .type = SAA7164_UNIT_DIGITAL_DEMODULATOR,
+ .name = "TDA10048-2",
+ .i2c_bus_nr = SAA7164_I2C_BUS_2,
+ .i2c_bus_addr = 0x12 >> 1,
+ .i2c_reg_len = REGLEN_8bit,
+ } },
+ },
};
const unsigned int saa7164_bcount = ARRAY_SIZE(saa7164_boards);
@@ -426,6 +542,14 @@ struct saa7164_subid saa7164_subids[] = {
.subvendor = 0x0070,
.subdevice = 0x8851,
.card = SAA7164_BOARD_HAUPPAUGE_HVR2250_2,
+ }, {
+ .subvendor = 0x0070,
+ .subdevice = 0x8940,
+ .card = SAA7164_BOARD_HAUPPAUGE_HVR2200_4,
+ }, {
+ .subvendor = 0x0070,
+ .subdevice = 0x8953,
+ .card = SAA7164_BOARD_HAUPPAUGE_HVR2200_5,
},
};
const unsigned int saa7164_idcount = ARRAY_SIZE(saa7164_subids);
@@ -469,6 +593,8 @@ void saa7164_gpio_setup(struct saa7164_dev *dev)
case SAA7164_BOARD_HAUPPAUGE_HVR2200:
case SAA7164_BOARD_HAUPPAUGE_HVR2200_2:
case SAA7164_BOARD_HAUPPAUGE_HVR2200_3:
+ case SAA7164_BOARD_HAUPPAUGE_HVR2200_4:
+ case SAA7164_BOARD_HAUPPAUGE_HVR2200_5:
case SAA7164_BOARD_HAUPPAUGE_HVR2250:
case SAA7164_BOARD_HAUPPAUGE_HVR2250_2:
case SAA7164_BOARD_HAUPPAUGE_HVR2250_3:
@@ -549,6 +675,8 @@ void saa7164_card_setup(struct saa7164_dev *dev)
case SAA7164_BOARD_HAUPPAUGE_HVR2200:
case SAA7164_BOARD_HAUPPAUGE_HVR2200_2:
case SAA7164_BOARD_HAUPPAUGE_HVR2200_3:
+ case SAA7164_BOARD_HAUPPAUGE_HVR2200_4:
+ case SAA7164_BOARD_HAUPPAUGE_HVR2200_5:
case SAA7164_BOARD_HAUPPAUGE_HVR2250:
case SAA7164_BOARD_HAUPPAUGE_HVR2250_2:
case SAA7164_BOARD_HAUPPAUGE_HVR2250_3:
diff --git a/drivers/media/video/saa7164/saa7164-dvb.c b/drivers/media/video/saa7164/saa7164-dvb.c
index f65eab63ca8..5c5cc3ebf9b 100644
--- a/drivers/media/video/saa7164/saa7164-dvb.c
+++ b/drivers/media/video/saa7164/saa7164-dvb.c
@@ -475,6 +475,8 @@ int saa7164_dvb_register(struct saa7164_port *port)
case SAA7164_BOARD_HAUPPAUGE_HVR2200:
case SAA7164_BOARD_HAUPPAUGE_HVR2200_2:
case SAA7164_BOARD_HAUPPAUGE_HVR2200_3:
+ case SAA7164_BOARD_HAUPPAUGE_HVR2200_4:
+ case SAA7164_BOARD_HAUPPAUGE_HVR2200_5:
i2c_bus = &dev->i2c_bus[port->nr + 1];
switch (port->nr) {
case 0:
diff --git a/drivers/media/video/saa7164/saa7164.h b/drivers/media/video/saa7164/saa7164.h
index 6678bf1e781..742b34103b5 100644
--- a/drivers/media/video/saa7164/saa7164.h
+++ b/drivers/media/video/saa7164/saa7164.h
@@ -82,6 +82,8 @@
#define SAA7164_BOARD_HAUPPAUGE_HVR2200_3 6
#define SAA7164_BOARD_HAUPPAUGE_HVR2250_2 7
#define SAA7164_BOARD_HAUPPAUGE_HVR2250_3 8
+#define SAA7164_BOARD_HAUPPAUGE_HVR2200_4 9
+#define SAA7164_BOARD_HAUPPAUGE_HVR2200_5 10
#define SAA7164_MAX_UNITS 8
#define SAA7164_TS_NUMBER_OF_LINES 312
diff --git a/drivers/media/video/sh_mobile_ceu_camera.c b/drivers/media/video/sh_mobile_ceu_camera.c
index e54089802b6..f390682629c 100644
--- a/drivers/media/video/sh_mobile_ceu_camera.c
+++ b/drivers/media/video/sh_mobile_ceu_camera.c
@@ -90,7 +90,6 @@
struct sh_mobile_ceu_buffer {
struct vb2_buffer vb; /* v4l buffer must be first */
struct list_head queue;
- enum v4l2_mbus_pixelcode code;
};
struct sh_mobile_ceu_dev {
@@ -100,7 +99,8 @@ struct sh_mobile_ceu_dev {
unsigned int irq;
void __iomem *base;
- unsigned long video_limit;
+ size_t video_limit;
+ size_t buf_total;
spinlock_t lock; /* Protects video buffer lists */
struct list_head capture;
@@ -121,7 +121,7 @@ struct sh_mobile_ceu_dev {
};
struct sh_mobile_ceu_cam {
- /* CEU offsets within scaled by the CEU camera output */
+ /* CEU offsets within the camera output, before the CEU scaler */
unsigned int ceu_left;
unsigned int ceu_top;
/* Client output, as seen by the CEU */
@@ -144,30 +144,6 @@ static struct sh_mobile_ceu_buffer *to_ceu_vb(struct vb2_buffer *vb)
return container_of(vb, struct sh_mobile_ceu_buffer, vb);
}
-static unsigned long make_bus_param(struct sh_mobile_ceu_dev *pcdev)
-{
- unsigned long flags;
-
- flags = SOCAM_MASTER |
- SOCAM_PCLK_SAMPLE_RISING |
- SOCAM_HSYNC_ACTIVE_HIGH |
- SOCAM_HSYNC_ACTIVE_LOW |
- SOCAM_VSYNC_ACTIVE_HIGH |
- SOCAM_VSYNC_ACTIVE_LOW |
- SOCAM_DATA_ACTIVE_HIGH;
-
- if (pcdev->pdata->flags & SH_CEU_FLAG_USE_8BIT_BUS)
- flags |= SOCAM_DATAWIDTH_8;
-
- if (pcdev->pdata->flags & SH_CEU_FLAG_USE_16BIT_BUS)
- flags |= SOCAM_DATAWIDTH_16;
-
- if (flags & SOCAM_DATAWIDTH_MASK)
- return flags;
-
- return 0;
-}
-
static void ceu_write(struct sh_mobile_ceu_dev *priv,
unsigned long reg_offs, u32 data)
{
@@ -216,34 +192,62 @@ static int sh_mobile_ceu_soft_reset(struct sh_mobile_ceu_dev *pcdev)
/*
* Videobuf operations
*/
+
+/*
+ * .queue_setup() is called to check, whether the driver can accept the
+ * requested number of buffers and to fill in plane sizes
+ * for the current frame format if required
+ */
static int sh_mobile_ceu_videobuf_setup(struct vb2_queue *vq,
+ const struct v4l2_format *fmt,
unsigned int *count, unsigned int *num_planes,
- unsigned long sizes[], void *alloc_ctxs[])
+ unsigned int sizes[], void *alloc_ctxs[])
{
struct soc_camera_device *icd = container_of(vq, struct soc_camera_device, vb2_vidq);
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct sh_mobile_ceu_dev *pcdev = ici->priv;
- int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width,
- icd->current_fmt->host_fmt);
+ int bytes_per_line;
+ unsigned int height;
+ if (fmt) {
+ const struct soc_camera_format_xlate *xlate = soc_camera_xlate_by_fourcc(icd,
+ fmt->fmt.pix.pixelformat);
+ if (!xlate)
+ return -EINVAL;
+ bytes_per_line = soc_mbus_bytes_per_line(fmt->fmt.pix.width,
+ xlate->host_fmt);
+ height = fmt->fmt.pix.height;
+ } else {
+ /* Called from VIDIOC_REQBUFS or in compatibility mode */
+ bytes_per_line = soc_mbus_bytes_per_line(icd->user_width,
+ icd->current_fmt->host_fmt);
+ height = icd->user_height;
+ }
if (bytes_per_line < 0)
return bytes_per_line;
- *num_planes = 1;
+ sizes[0] = bytes_per_line * height;
- pcdev->sequence = 0;
- sizes[0] = bytes_per_line * icd->user_height;
alloc_ctxs[0] = pcdev->alloc_ctx;
+ if (!vq->num_buffers)
+ pcdev->sequence = 0;
+
if (!*count)
*count = 2;
- if (pcdev->video_limit) {
- if (PAGE_ALIGN(sizes[0]) * *count > pcdev->video_limit)
- *count = pcdev->video_limit / PAGE_ALIGN(sizes[0]);
+ /* If *num_planes != 0, we have already verified *count. */
+ if (pcdev->video_limit && !*num_planes) {
+ size_t size = PAGE_ALIGN(sizes[0]) * *count;
+
+ if (size + pcdev->buf_total > pcdev->video_limit)
+ *count = (pcdev->video_limit - pcdev->buf_total) /
+ PAGE_ALIGN(sizes[0]);
}
- dev_dbg(icd->parent, "count=%d, size=%lu\n", *count, sizes[0]);
+ *num_planes = 1;
+
+ dev_dbg(icd->parent, "count=%d, size=%u\n", *count, sizes[0]);
return 0;
}
@@ -267,6 +271,7 @@ static int sh_mobile_ceu_capture(struct sh_mobile_ceu_dev *pcdev)
unsigned long top1, top2;
unsigned long bottom1, bottom2;
u32 status;
+ bool planar;
int ret = 0;
/*
@@ -312,19 +317,31 @@ static int sh_mobile_ceu_capture(struct sh_mobile_ceu_dev *pcdev)
bottom2 = CDBCR;
}
- phys_addr_top = vb2_dma_contig_plane_paddr(pcdev->active, 0);
-
- ceu_write(pcdev, top1, phys_addr_top);
- if (V4L2_FIELD_NONE != pcdev->field) {
- phys_addr_bottom = phys_addr_top + icd->user_width;
- ceu_write(pcdev, bottom1, phys_addr_bottom);
- }
+ phys_addr_top = vb2_dma_contig_plane_dma_addr(pcdev->active, 0);
switch (icd->current_fmt->host_fmt->fourcc) {
case V4L2_PIX_FMT_NV12:
case V4L2_PIX_FMT_NV21:
case V4L2_PIX_FMT_NV16:
case V4L2_PIX_FMT_NV61:
+ planar = true;
+ break;
+ default:
+ planar = false;
+ }
+
+ ceu_write(pcdev, top1, phys_addr_top);
+ if (V4L2_FIELD_NONE != pcdev->field) {
+ if (planar)
+ phys_addr_bottom = phys_addr_top + icd->user_width;
+ else
+ phys_addr_bottom = phys_addr_top +
+ soc_mbus_bytes_per_line(icd->user_width,
+ icd->current_fmt->host_fmt);
+ ceu_write(pcdev, bottom1, phys_addr_bottom);
+ }
+
+ if (planar) {
phys_addr_top += icd->user_width *
icd->user_height;
ceu_write(pcdev, top2, phys_addr_top);
@@ -341,23 +358,40 @@ static int sh_mobile_ceu_capture(struct sh_mobile_ceu_dev *pcdev)
static int sh_mobile_ceu_videobuf_prepare(struct vb2_buffer *vb)
{
+ struct sh_mobile_ceu_buffer *buf = to_ceu_vb(vb);
+
+ /* Added list head initialization on alloc */
+ WARN(!list_empty(&buf->queue), "Buffer %p on queue!\n", vb);
+
+ return 0;
+}
+
+static void sh_mobile_ceu_videobuf_queue(struct vb2_buffer *vb)
+{
struct soc_camera_device *icd = container_of(vb->vb2_queue, struct soc_camera_device, vb2_vidq);
- struct sh_mobile_ceu_buffer *buf;
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct sh_mobile_ceu_dev *pcdev = ici->priv;
+ struct sh_mobile_ceu_buffer *buf = to_ceu_vb(vb);
+ unsigned long size;
int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width,
icd->current_fmt->host_fmt);
- unsigned long size;
if (bytes_per_line < 0)
- return bytes_per_line;
+ goto error;
- buf = to_ceu_vb(vb);
+ size = icd->user_height * bytes_per_line;
+
+ if (vb2_plane_size(vb, 0) < size) {
+ dev_err(icd->parent, "Buffer #%d too small (%lu < %lu)\n",
+ vb->v4l2_buf.index, vb2_plane_size(vb, 0), size);
+ goto error;
+ }
+
+ vb2_set_plane_payload(vb, 0, size);
dev_dbg(icd->parent, "%s (vb=0x%p) 0x%p %lu\n", __func__,
vb, vb2_plane_vaddr(vb, 0), vb2_get_plane_payload(vb, 0));
- /* Added list head initialization on alloc */
- WARN(!list_empty(&buf->queue), "Buffer %p on queue!\n", vb);
-
#ifdef DEBUG
/*
* This can be useful if you want to see if we actually fill
@@ -367,31 +401,6 @@ static int sh_mobile_ceu_videobuf_prepare(struct vb2_buffer *vb)
memset(vb2_plane_vaddr(vb, 0), 0xaa, vb2_get_plane_payload(vb, 0));
#endif
- BUG_ON(NULL == icd->current_fmt);
-
- size = icd->user_height * bytes_per_line;
-
- if (vb2_plane_size(vb, 0) < size) {
- dev_err(icd->parent, "Buffer too small (%lu < %lu)\n",
- vb2_plane_size(vb, 0), size);
- return -ENOBUFS;
- }
-
- vb2_set_plane_payload(vb, 0, size);
-
- return 0;
-}
-
-static void sh_mobile_ceu_videobuf_queue(struct vb2_buffer *vb)
-{
- struct soc_camera_device *icd = container_of(vb->vb2_queue, struct soc_camera_device, vb2_vidq);
- struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
- struct sh_mobile_ceu_dev *pcdev = ici->priv;
- struct sh_mobile_ceu_buffer *buf = to_ceu_vb(vb);
-
- dev_dbg(icd->parent, "%s (vb=0x%p) 0x%p %lu\n", __func__,
- vb, vb2_plane_vaddr(vb, 0), vb2_get_plane_payload(vb, 0));
-
spin_lock_irq(&pcdev->lock);
list_add_tail(&buf->queue, &pcdev->capture);
@@ -405,6 +414,11 @@ static void sh_mobile_ceu_videobuf_queue(struct vb2_buffer *vb)
sh_mobile_ceu_capture(pcdev);
}
spin_unlock_irq(&pcdev->lock);
+
+ return;
+
+error:
+ vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
}
static void sh_mobile_ceu_videobuf_release(struct vb2_buffer *vb)
@@ -429,11 +443,23 @@ static void sh_mobile_ceu_videobuf_release(struct vb2_buffer *vb)
if (buf->queue.next)
list_del_init(&buf->queue);
+ pcdev->buf_total -= PAGE_ALIGN(vb2_plane_size(vb, 0));
+ dev_dbg(icd->parent, "%s() %zu bytes buffers\n", __func__,
+ pcdev->buf_total);
+
spin_unlock_irq(&pcdev->lock);
}
static int sh_mobile_ceu_videobuf_init(struct vb2_buffer *vb)
{
+ struct soc_camera_device *icd = container_of(vb->vb2_queue, struct soc_camera_device, vb2_vidq);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct sh_mobile_ceu_dev *pcdev = ici->priv;
+
+ pcdev->buf_total += PAGE_ALIGN(vb2_plane_size(vb, 0));
+ dev_dbg(icd->parent, "%s() %zu bytes buffers\n", __func__,
+ pcdev->buf_total);
+
/* This is for locking debugging only */
INIT_LIST_HEAD(&to_ceu_vb(vb)->queue);
return 0;
@@ -535,19 +561,29 @@ static int sh_mobile_ceu_add_device(struct soc_camera_device *icd)
pm_runtime_get_sync(ici->v4l2_dev.dev);
+ pcdev->buf_total = 0;
+
ret = sh_mobile_ceu_soft_reset(pcdev);
csi2_sd = find_csi2(pcdev);
+ if (csi2_sd)
+ csi2_sd->grp_id = (long)icd;
ret = v4l2_subdev_call(csi2_sd, core, s_power, 1);
- if (ret != -ENODEV && ret != -ENOIOCTLCMD && ret < 0) {
+ if (ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV) {
pm_runtime_put_sync(ici->v4l2_dev.dev);
- } else {
- pcdev->icd = icd;
- ret = 0;
+ return ret;
}
- return ret;
+ /*
+ * -ENODEV is special: either csi2_sd == NULL or the CSI-2 driver
+ * has not found this soc-camera device among its clients
+ */
+ if (ret == -ENODEV && csi2_sd)
+ csi2_sd->grp_id = 0;
+ pcdev->icd = icd;
+
+ return 0;
}
/* Called with .video_lock held */
@@ -560,6 +596,8 @@ static void sh_mobile_ceu_remove_device(struct soc_camera_device *icd)
BUG_ON(icd != pcdev->icd);
v4l2_subdev_call(csi2_sd, core, s_power, 0);
+ if (csi2_sd)
+ csi2_sd->grp_id = 0;
/* disable capture, disable interrupts */
ceu_write(pcdev, CEIER, 0);
sh_mobile_ceu_soft_reset(pcdev);
@@ -628,22 +666,22 @@ static void sh_mobile_ceu_set_rect(struct soc_camera_device *icd)
left_offset = cam->ceu_left;
top_offset = cam->ceu_top;
- /* CEU cropping (CFSZR) is applied _after_ the scaling filter (CFLCR) */
+ WARN_ON(icd->user_width & 3 || icd->user_height & 3);
+
+ width = icd->user_width;
+
if (pcdev->image_mode) {
in_width = cam->width;
if (!pcdev->is_16bit) {
in_width *= 2;
left_offset *= 2;
}
- width = icd->user_width;
- cdwdr_width = icd->user_width;
+ cdwdr_width = width;
} else {
- int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width,
+ int bytes_per_line = soc_mbus_bytes_per_line(width,
icd->current_fmt->host_fmt);
unsigned int w_factor;
- width = icd->user_width;
-
switch (icd->current_fmt->host_fmt->packing) {
case SOC_MBUS_PACKING_2X8_PADHI:
w_factor = 2;
@@ -653,10 +691,10 @@ static void sh_mobile_ceu_set_rect(struct soc_camera_device *icd)
}
in_width = cam->width * w_factor;
- left_offset = left_offset * w_factor;
+ left_offset *= w_factor;
if (bytes_per_line < 0)
- cdwdr_width = icd->user_width;
+ cdwdr_width = width;
else
cdwdr_width = bytes_per_line;
}
@@ -664,7 +702,7 @@ static void sh_mobile_ceu_set_rect(struct soc_camera_device *icd)
height = icd->user_height;
in_height = cam->height;
if (V4L2_FIELD_NONE != pcdev->field) {
- height /= 2;
+ height = (height / 2) & ~3;
in_height /= 2;
top_offset /= 2;
cdwdr_width *= 2;
@@ -686,6 +724,7 @@ static void sh_mobile_ceu_set_rect(struct soc_camera_device *icd)
ceu_write(pcdev, CAMOR, camor);
ceu_write(pcdev, CAPWR, (in_height << 16) | in_width);
+ /* CFSZR clipping is applied _after_ the scaling filter (CFLCR) */
ceu_write(pcdev, CFSZR, (height << 16) | width);
ceu_write(pcdev, CDWDR, cdwdr_width);
}
@@ -723,66 +762,93 @@ static void capture_restore(struct sh_mobile_ceu_dev *pcdev, u32 capsr)
ceu_write(pcdev, CAPSR, capsr);
}
+/* Find the bus subdevice driver, e.g., CSI2 */
+static struct v4l2_subdev *find_bus_subdev(struct sh_mobile_ceu_dev *pcdev,
+ struct soc_camera_device *icd)
+{
+ if (pcdev->csi2_pdev) {
+ struct v4l2_subdev *csi2_sd = find_csi2(pcdev);
+ if (csi2_sd && csi2_sd->grp_id == (u32)icd)
+ return csi2_sd;
+ }
+
+ return soc_camera_to_subdev(icd);
+}
+
+#define CEU_BUS_FLAGS (V4L2_MBUS_MASTER | \
+ V4L2_MBUS_PCLK_SAMPLE_RISING | \
+ V4L2_MBUS_HSYNC_ACTIVE_HIGH | \
+ V4L2_MBUS_HSYNC_ACTIVE_LOW | \
+ V4L2_MBUS_VSYNC_ACTIVE_HIGH | \
+ V4L2_MBUS_VSYNC_ACTIVE_LOW | \
+ V4L2_MBUS_DATA_ACTIVE_HIGH)
+
/* Capture is not running, no interrupts, no locking needed */
static int sh_mobile_ceu_set_bus_param(struct soc_camera_device *icd,
__u32 pixfmt)
{
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct sh_mobile_ceu_dev *pcdev = ici->priv;
- int ret;
- unsigned long camera_flags, common_flags, value;
- int yuv_lineskip;
+ struct v4l2_subdev *sd = find_bus_subdev(pcdev, icd);
struct sh_mobile_ceu_cam *cam = icd->host_priv;
+ struct v4l2_mbus_config cfg = {.type = V4L2_MBUS_PARALLEL,};
+ unsigned long value, common_flags = CEU_BUS_FLAGS;
u32 capsr = capture_save_reset(pcdev);
+ unsigned int yuv_lineskip;
+ int ret;
- camera_flags = icd->ops->query_bus_param(icd);
- common_flags = soc_camera_bus_param_compatible(camera_flags,
- make_bus_param(pcdev));
- if (!common_flags)
- return -EINVAL;
+ /*
+ * If the client doesn't implement g_mbus_config, we just use our
+ * platform data
+ */
+ ret = v4l2_subdev_call(sd, video, g_mbus_config, &cfg);
+ if (!ret) {
+ common_flags = soc_mbus_config_compatible(&cfg,
+ common_flags);
+ if (!common_flags)
+ return -EINVAL;
+ } else if (ret != -ENOIOCTLCMD) {
+ return ret;
+ }
/* Make choises, based on platform preferences */
- if ((common_flags & SOCAM_HSYNC_ACTIVE_HIGH) &&
- (common_flags & SOCAM_HSYNC_ACTIVE_LOW)) {
+ if ((common_flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH) &&
+ (common_flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)) {
if (pcdev->pdata->flags & SH_CEU_FLAG_HSYNC_LOW)
- common_flags &= ~SOCAM_HSYNC_ACTIVE_HIGH;
+ common_flags &= ~V4L2_MBUS_HSYNC_ACTIVE_HIGH;
else
- common_flags &= ~SOCAM_HSYNC_ACTIVE_LOW;
+ common_flags &= ~V4L2_MBUS_HSYNC_ACTIVE_LOW;
}
- if ((common_flags & SOCAM_VSYNC_ACTIVE_HIGH) &&
- (common_flags & SOCAM_VSYNC_ACTIVE_LOW)) {
+ if ((common_flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH) &&
+ (common_flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)) {
if (pcdev->pdata->flags & SH_CEU_FLAG_VSYNC_LOW)
- common_flags &= ~SOCAM_VSYNC_ACTIVE_HIGH;
+ common_flags &= ~V4L2_MBUS_VSYNC_ACTIVE_HIGH;
else
- common_flags &= ~SOCAM_VSYNC_ACTIVE_LOW;
+ common_flags &= ~V4L2_MBUS_VSYNC_ACTIVE_LOW;
}
- ret = icd->ops->set_bus_param(icd, common_flags);
- if (ret < 0)
+ cfg.flags = common_flags;
+ ret = v4l2_subdev_call(sd, video, s_mbus_config, &cfg);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
return ret;
- switch (common_flags & SOCAM_DATAWIDTH_MASK) {
- case SOCAM_DATAWIDTH_8:
- pcdev->is_16bit = 0;
- break;
- case SOCAM_DATAWIDTH_16:
+ if (icd->current_fmt->host_fmt->bits_per_sample > 8)
pcdev->is_16bit = 1;
- break;
- default:
- return -EINVAL;
- }
+ else
+ pcdev->is_16bit = 0;
ceu_write(pcdev, CRCNTR, 0);
ceu_write(pcdev, CRCMPR, 0);
value = 0x00000010; /* data fetch by default */
- yuv_lineskip = 0;
+ yuv_lineskip = 0x10;
switch (icd->current_fmt->host_fmt->fourcc) {
case V4L2_PIX_FMT_NV12:
case V4L2_PIX_FMT_NV21:
- yuv_lineskip = 1; /* skip for NV12/21, no skip for NV16/61 */
+ /* convert 4:2:2 -> 4:2:0 */
+ yuv_lineskip = 0; /* skip for NV12/21, no skip for NV16/61 */
/* fall-through */
case V4L2_PIX_FMT_NV16:
case V4L2_PIX_FMT_NV61:
@@ -808,8 +874,8 @@ static int sh_mobile_ceu_set_bus_param(struct soc_camera_device *icd,
icd->current_fmt->host_fmt->fourcc == V4L2_PIX_FMT_NV61)
value ^= 0x00000100; /* swap U, V to change from NV1x->NVx1 */
- value |= common_flags & SOCAM_VSYNC_ACTIVE_LOW ? 1 << 1 : 0;
- value |= common_flags & SOCAM_HSYNC_ACTIVE_LOW ? 1 << 0 : 0;
+ value |= common_flags & V4L2_MBUS_VSYNC_ACTIVE_LOW ? 1 << 1 : 0;
+ value |= common_flags & V4L2_MBUS_HSYNC_ACTIVE_LOW ? 1 << 0 : 0;
value |= pcdev->is_16bit ? 1 << 12 : 0;
/* CSI2 mode */
@@ -852,9 +918,7 @@ static int sh_mobile_ceu_set_bus_param(struct soc_camera_device *icd,
* using 7 we swap the data bytes to match the incoming order:
* D0, D1, D2, D3, D4, D5, D6, D7
*/
- value = 0x00000017;
- if (yuv_lineskip)
- value &= ~0x00000010; /* convert 4:2:2 -> 4:2:0 */
+ value = 0x00000007 | yuv_lineskip;
ceu_write(pcdev, CDOCR, value);
ceu_write(pcdev, CFWCR, 0); /* keep "datafetch firewall" disabled */
@@ -875,13 +939,19 @@ static int sh_mobile_ceu_try_bus_param(struct soc_camera_device *icd,
{
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct sh_mobile_ceu_dev *pcdev = ici->priv;
- unsigned long camera_flags, common_flags;
+ struct v4l2_subdev *sd = find_bus_subdev(pcdev, icd);
+ unsigned long common_flags = CEU_BUS_FLAGS;
+ struct v4l2_mbus_config cfg = {.type = V4L2_MBUS_PARALLEL,};
+ int ret;
- camera_flags = icd->ops->query_bus_param(icd);
- common_flags = soc_camera_bus_param_compatible(camera_flags,
- make_bus_param(pcdev));
- if (!common_flags || buswidth > 16 ||
- (buswidth > 8 && !(common_flags & SOCAM_DATAWIDTH_16)))
+ ret = v4l2_subdev_call(sd, video, g_mbus_config, &cfg);
+ if (!ret)
+ common_flags = soc_mbus_config_compatible(&cfg,
+ common_flags);
+ else if (ret != -ENOIOCTLCMD)
+ return ret;
+
+ if (!common_flags || buswidth > 16)
return -EINVAL;
return 0;
@@ -891,26 +961,26 @@ static const struct soc_mbus_pixelfmt sh_mobile_ceu_formats[] = {
{
.fourcc = V4L2_PIX_FMT_NV12,
.name = "NV12",
- .bits_per_sample = 12,
- .packing = SOC_MBUS_PACKING_NONE,
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_1_5X8,
.order = SOC_MBUS_ORDER_LE,
}, {
.fourcc = V4L2_PIX_FMT_NV21,
.name = "NV21",
- .bits_per_sample = 12,
- .packing = SOC_MBUS_PACKING_NONE,
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_1_5X8,
.order = SOC_MBUS_ORDER_LE,
}, {
.fourcc = V4L2_PIX_FMT_NV16,
.name = "NV16",
- .bits_per_sample = 16,
- .packing = SOC_MBUS_PACKING_NONE,
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_2X8_PADHI,
.order = SOC_MBUS_ORDER_LE,
}, {
.fourcc = V4L2_PIX_FMT_NV61,
.name = "NV61",
- .bits_per_sample = 16,
- .packing = SOC_MBUS_PACKING_NONE,
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_2X8_PADHI,
.order = SOC_MBUS_ORDER_LE,
},
};
@@ -920,6 +990,8 @@ static bool sh_mobile_ceu_packing_supported(const struct soc_mbus_pixelfmt *fmt)
{
return fmt->packing == SOC_MBUS_PACKING_NONE ||
(fmt->bits_per_sample == 8 &&
+ fmt->packing == SOC_MBUS_PACKING_1_5X8) ||
+ (fmt->bits_per_sample == 8 &&
fmt->packing == SOC_MBUS_PACKING_2X8_PADHI) ||
(fmt->bits_per_sample > 8 &&
fmt->packing == SOC_MBUS_PACKING_EXTEND16);
@@ -927,6 +999,38 @@ static bool sh_mobile_ceu_packing_supported(const struct soc_mbus_pixelfmt *fmt)
static int client_g_rect(struct v4l2_subdev *sd, struct v4l2_rect *rect);
+static struct soc_camera_device *ctrl_to_icd(struct v4l2_ctrl *ctrl)
+{
+ return container_of(ctrl->handler, struct soc_camera_device,
+ ctrl_handler);
+}
+
+static int sh_mobile_ceu_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct soc_camera_device *icd = ctrl_to_icd(ctrl);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct sh_mobile_ceu_dev *pcdev = ici->priv;
+
+ switch (ctrl->id) {
+ case V4L2_CID_SHARPNESS:
+ switch (icd->current_fmt->host_fmt->fourcc) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV61:
+ ceu_write(pcdev, CLFCR, !ctrl->val);
+ return 0;
+ }
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static const struct v4l2_ctrl_ops sh_mobile_ceu_ctrl_ops = {
+ .s_ctrl = sh_mobile_ceu_s_ctrl,
+};
+
static int sh_mobile_ceu_get_formats(struct soc_camera_device *icd, unsigned int idx,
struct soc_camera_format_xlate *xlate)
{
@@ -952,6 +1056,7 @@ static int sh_mobile_ceu_get_formats(struct soc_camera_device *icd, unsigned int
}
if (!pcdev->pdata->csi2) {
+ /* Are there any restrictions in the CSI-2 case? */
ret = sh_mobile_ceu_try_bus_param(icd, fmt->bits_per_sample);
if (ret < 0)
return 0;
@@ -962,6 +1067,12 @@ static int sh_mobile_ceu_get_formats(struct soc_camera_device *icd, unsigned int
struct v4l2_rect rect;
int shift = 0;
+ /* Add our control */
+ v4l2_ctrl_new_std(&icd->ctrl_handler, &sh_mobile_ceu_ctrl_ops,
+ V4L2_CID_SHARPNESS, 0, 1, 1, 0);
+ if (icd->ctrl_handler.error)
+ return icd->ctrl_handler.error;
+
/* FIXME: subwindow is lost between close / open */
/* Cache current client geometry */
@@ -1004,9 +1115,6 @@ static int sh_mobile_ceu_get_formats(struct soc_camera_device *icd, unsigned int
cam->width = mf.width;
cam->height = mf.height;
- cam->width = mf.width;
- cam->height = mf.height;
-
icd->host_priv = cam;
} else {
cam = icd->host_priv;
@@ -1278,6 +1386,7 @@ static int client_s_fmt(struct soc_camera_device *icd,
unsigned int width = mf->width, height = mf->height, tmp_w, tmp_h;
unsigned int max_width, max_height;
struct v4l2_cropcap cap;
+ bool ceu_1to1;
int ret;
ret = v4l2_device_call_until_err(sd->v4l2_dev, (long)icd, video,
@@ -1287,7 +1396,14 @@ static int client_s_fmt(struct soc_camera_device *icd,
dev_geo(dev, "camera scaled to %ux%u\n", mf->width, mf->height);
- if ((width == mf->width && height == mf->height) || !ceu_can_scale)
+ if (width == mf->width && height == mf->height) {
+ /* Perfect! The client has done it all. */
+ ceu_1to1 = true;
+ goto update_cache;
+ }
+
+ ceu_1to1 = false;
+ if (!ceu_can_scale)
goto update_cache;
cap.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
@@ -1327,7 +1443,10 @@ update_cache:
if (ret < 0)
return ret;
- update_subrect(cam);
+ if (ceu_1to1)
+ cam->subrect = cam->rect;
+ else
+ update_subrect(cam);
return 0;
}
@@ -1414,7 +1533,10 @@ static int sh_mobile_ceu_set_crop(struct soc_camera_device *icd,
capsr = capture_save_reset(pcdev);
dev_dbg(dev, "CAPSR 0x%x, CFLCR 0x%x\n", capsr, pcdev->cflcr);
- /* 1. - 2. Apply iterative camera S_CROP for new input window. */
+ /*
+ * 1. - 2. Apply iterative camera S_CROP for new input window, read back
+ * actual camera rectangle.
+ */
ret = client_s_crop(icd, a, &cam_crop);
if (ret < 0)
return ret;
@@ -1498,8 +1620,9 @@ static int sh_mobile_ceu_set_crop(struct soc_camera_device *icd,
ceu_write(pcdev, CFLCR, cflcr);
}
- icd->user_width = out_width;
- icd->user_height = out_height;
+ icd->user_width = out_width & ~3;
+ icd->user_height = out_height & ~3;
+ /* Offsets are applied at the CEU scaling filter input */
cam->ceu_left = scale_down(rect->left - cam_rect->left, scale_cam_h) & ~1;
cam->ceu_top = scale_down(rect->top - cam_rect->top, scale_cam_v) & ~1;
@@ -1538,7 +1661,7 @@ static int sh_mobile_ceu_get_crop(struct soc_camera_device *icd,
* CEU crop, mapped backed onto the client input (subrect).
*/
static void calculate_client_output(struct soc_camera_device *icd,
- struct v4l2_pix_format *pix, struct v4l2_mbus_framefmt *mf)
+ const struct v4l2_pix_format *pix, struct v4l2_mbus_framefmt *mf)
{
struct sh_mobile_ceu_cam *cam = icd->host_priv;
struct device *dev = icd->parent;
@@ -1574,8 +1697,8 @@ static void calculate_client_output(struct soc_camera_device *icd,
dev_geo(dev, "3: scales %u:%u\n", scale_h, scale_v);
/*
- * 4. Calculate client output window by applying combined scales to real
- * input window.
+ * 4. Calculate desired client output window by applying combined scales
+ * to client (real) input window.
*/
mf->width = scale_down(cam->rect.width, scale_h);
mf->height = scale_down(cam->rect.height, scale_v);
@@ -1600,8 +1723,6 @@ static int sh_mobile_ceu_set_fmt(struct soc_camera_device *icd,
bool image_mode;
enum v4l2_field field;
- dev_geo(dev, "S_FMT(pix=0x%x, %ux%u)\n", pixfmt, pix->width, pix->height);
-
switch (pix->field) {
default:
pix->field = V4L2_FIELD_NONE;
@@ -1622,8 +1743,8 @@ static int sh_mobile_ceu_set_fmt(struct soc_camera_device *icd,
return -EINVAL;
}
- /* 1.-4. Calculate client output geometry */
- calculate_client_output(icd, &f->fmt.pix, &mf);
+ /* 1.-4. Calculate desired client output geometry */
+ calculate_client_output(icd, pix, &mf);
mf.field = pix->field;
mf.colorspace = pix->colorspace;
mf.code = xlate->code;
@@ -1639,6 +1760,9 @@ static int sh_mobile_ceu_set_fmt(struct soc_camera_device *icd,
image_mode = false;
}
+ dev_geo(dev, "S_FMT(pix=0x%x, fld 0x%x, code 0x%x, %ux%u)\n", pixfmt, mf.field, mf.code,
+ pix->width, pix->height);
+
dev_geo(dev, "4: request camera output %ux%u\n", mf.width, mf.height);
/* 5. - 9. */
@@ -1700,6 +1824,10 @@ static int sh_mobile_ceu_set_fmt(struct soc_camera_device *icd,
pcdev->field = field;
pcdev->image_mode = image_mode;
+ /* CFSZR requirement */
+ pix->width &= ~3;
+ pix->height &= ~3;
+
return 0;
}
@@ -1725,7 +1853,8 @@ static int sh_mobile_ceu_try_fmt(struct soc_camera_device *icd,
/* FIXME: calculate using depth and bus width */
- v4l_bound_align_image(&pix->width, 2, 2560, 1,
+ /* CFSZR requires height and width to be 4-pixel aligned */
+ v4l_bound_align_image(&pix->width, 2, 2560, 2,
&pix->height, 4, 1920, 2, 0);
width = pix->width;
@@ -1778,6 +1907,9 @@ static int sh_mobile_ceu_try_fmt(struct soc_camera_device *icd,
pix->height = height;
}
+ pix->width &= ~3;
+ pix->height &= ~3;
+
dev_geo(icd->parent, "%s(): return %d, fmt 0x%x, %ux%u\n",
__func__, ret, pix->pixelformat, pix->width, pix->height);
@@ -1824,8 +1956,8 @@ static int sh_mobile_ceu_set_livecrop(struct soc_camera_device *icd,
out_height != f.fmt.pix.height))
ret = -EINVAL;
if (!ret) {
- icd->user_width = out_width;
- icd->user_height = out_height;
+ icd->user_width = out_width & ~3;
+ icd->user_height = out_height & ~3;
ret = sh_mobile_ceu_set_bus_param(icd,
icd->current_fmt->host_fmt->fourcc);
}
@@ -1869,55 +2001,6 @@ static int sh_mobile_ceu_init_videobuf(struct vb2_queue *q,
return vb2_queue_init(q);
}
-static int sh_mobile_ceu_get_ctrl(struct soc_camera_device *icd,
- struct v4l2_control *ctrl)
-{
- struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
- struct sh_mobile_ceu_dev *pcdev = ici->priv;
- u32 val;
-
- switch (ctrl->id) {
- case V4L2_CID_SHARPNESS:
- val = ceu_read(pcdev, CLFCR);
- ctrl->value = val ^ 1;
- return 0;
- }
- return -ENOIOCTLCMD;
-}
-
-static int sh_mobile_ceu_set_ctrl(struct soc_camera_device *icd,
- struct v4l2_control *ctrl)
-{
- struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
- struct sh_mobile_ceu_dev *pcdev = ici->priv;
-
- switch (ctrl->id) {
- case V4L2_CID_SHARPNESS:
- switch (icd->current_fmt->host_fmt->fourcc) {
- case V4L2_PIX_FMT_NV12:
- case V4L2_PIX_FMT_NV21:
- case V4L2_PIX_FMT_NV16:
- case V4L2_PIX_FMT_NV61:
- ceu_write(pcdev, CLFCR, !ctrl->value);
- return 0;
- }
- return -EINVAL;
- }
- return -ENOIOCTLCMD;
-}
-
-static const struct v4l2_queryctrl sh_mobile_ceu_controls[] = {
- {
- .id = V4L2_CID_SHARPNESS,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Low-pass filter",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 0,
- },
-};
-
static struct soc_camera_host_ops sh_mobile_ceu_host_ops = {
.owner = THIS_MODULE,
.add = sh_mobile_ceu_add_device,
@@ -1929,14 +2012,10 @@ static struct soc_camera_host_ops sh_mobile_ceu_host_ops = {
.set_livecrop = sh_mobile_ceu_set_livecrop,
.set_fmt = sh_mobile_ceu_set_fmt,
.try_fmt = sh_mobile_ceu_try_fmt,
- .set_ctrl = sh_mobile_ceu_set_ctrl,
- .get_ctrl = sh_mobile_ceu_get_ctrl,
.poll = sh_mobile_ceu_poll,
.querycap = sh_mobile_ceu_querycap,
.set_bus_param = sh_mobile_ceu_set_bus_param,
.init_videobuf2 = sh_mobile_ceu_init_videobuf,
- .controls = sh_mobile_ceu_controls,
- .num_controls = ARRAY_SIZE(sh_mobile_ceu_controls),
};
struct bus_wait {
diff --git a/drivers/media/video/sh_mobile_csi2.c b/drivers/media/video/sh_mobile_csi2.c
index 2893a0134c7..ea4f0473ed3 100644
--- a/drivers/media/video/sh_mobile_csi2.c
+++ b/drivers/media/video/sh_mobile_csi2.c
@@ -15,10 +15,12 @@
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/videodev2.h>
+#include <linux/module.h>
#include <media/sh_mobile_ceu.h>
#include <media/sh_mobile_csi2.h>
#include <media/soc_camera.h>
+#include <media/soc_mediabus.h>
#include <media/v4l2-common.h>
#include <media/v4l2-dev.h>
#include <media/v4l2-device.h>
@@ -35,11 +37,10 @@ struct sh_csi2 {
struct v4l2_subdev subdev;
struct list_head list;
unsigned int irq;
+ unsigned long mipi_flags;
void __iomem *base;
struct platform_device *pdev;
struct sh_csi2_client_config *client;
- unsigned long (*query_bus_param)(struct soc_camera_device *);
- int (*set_bus_param)(struct soc_camera_device *, unsigned long);
};
static int sh_csi2_try_fmt(struct v4l2_subdev *sd,
@@ -127,9 +128,34 @@ static int sh_csi2_s_fmt(struct v4l2_subdev *sd,
return 0;
}
+static int sh_csi2_g_mbus_config(struct v4l2_subdev *sd,
+ struct v4l2_mbus_config *cfg)
+{
+ cfg->flags = V4L2_MBUS_PCLK_SAMPLE_RISING |
+ V4L2_MBUS_HSYNC_ACTIVE_HIGH | V4L2_MBUS_VSYNC_ACTIVE_HIGH |
+ V4L2_MBUS_MASTER | V4L2_MBUS_DATA_ACTIVE_HIGH;
+ cfg->type = V4L2_MBUS_PARALLEL;
+
+ return 0;
+}
+
+static int sh_csi2_s_mbus_config(struct v4l2_subdev *sd,
+ const struct v4l2_mbus_config *cfg)
+{
+ struct sh_csi2 *priv = container_of(sd, struct sh_csi2, subdev);
+ struct soc_camera_device *icd = (struct soc_camera_device *)sd->grp_id;
+ struct v4l2_subdev *client_sd = soc_camera_to_subdev(icd);
+ struct v4l2_mbus_config client_cfg = {.type = V4L2_MBUS_CSI2,
+ .flags = priv->mipi_flags};
+
+ return v4l2_subdev_call(client_sd, video, s_mbus_config, &client_cfg);
+}
+
static struct v4l2_subdev_video_ops sh_csi2_subdev_video_ops = {
.s_mbus_fmt = sh_csi2_s_fmt,
.try_mbus_fmt = sh_csi2_try_fmt,
+ .g_mbus_config = sh_csi2_g_mbus_config,
+ .s_mbus_config = sh_csi2_s_mbus_config,
};
static void sh_csi2_hwinit(struct sh_csi2 *priv)
@@ -144,11 +170,21 @@ static void sh_csi2_hwinit(struct sh_csi2 *priv)
udelay(5);
iowrite32(0x00000000, priv->base + SH_CSI2_SRST);
- if (priv->client->lanes & 3)
- tmp |= priv->client->lanes & 3;
- else
- /* Default - both lanes */
- tmp |= 3;
+ switch (pdata->type) {
+ case SH_CSI2C:
+ if (priv->client->lanes == 1)
+ tmp |= 1;
+ else
+ /* Default - both lanes */
+ tmp |= 3;
+ break;
+ case SH_CSI2I:
+ if (!priv->client->lanes || priv->client->lanes > 4)
+ /* Default - all 4 lanes */
+ tmp |= 0xf;
+ else
+ tmp |= (1 << priv->client->lanes) - 1;
+ }
if (priv->client->phy == SH_CSI2_PHY_MAIN)
tmp |= 0x8000;
@@ -163,38 +199,18 @@ static void sh_csi2_hwinit(struct sh_csi2 *priv)
iowrite32(tmp, priv->base + SH_CSI2_CHKSUM);
}
-static int sh_csi2_set_bus_param(struct soc_camera_device *icd,
- unsigned long flags)
-{
- return 0;
-}
-
-static unsigned long sh_csi2_query_bus_param(struct soc_camera_device *icd)
-{
- struct soc_camera_link *icl = to_soc_camera_link(icd);
- const unsigned long flags = SOCAM_PCLK_SAMPLE_RISING |
- SOCAM_HSYNC_ACTIVE_HIGH | SOCAM_VSYNC_ACTIVE_HIGH |
- SOCAM_MASTER | SOCAM_DATAWIDTH_8 | SOCAM_DATA_ACTIVE_HIGH;
-
- return soc_camera_apply_sensor_flags(icl, flags);
-}
-
static int sh_csi2_client_connect(struct sh_csi2 *priv)
{
struct sh_csi2_pdata *pdata = priv->pdev->dev.platform_data;
- struct v4l2_subdev *sd, *csi2_sd = &priv->subdev;
- struct soc_camera_device *icd = NULL;
+ struct soc_camera_device *icd = (struct soc_camera_device *)priv->subdev.grp_id;
+ struct v4l2_subdev *client_sd = soc_camera_to_subdev(icd);
struct device *dev = v4l2_get_subdevdata(&priv->subdev);
- int i;
+ struct v4l2_mbus_config cfg;
+ unsigned long common_flags, csi2_flags;
+ int i, ret;
- v4l2_device_for_each_subdev(sd, csi2_sd->v4l2_dev)
- if (sd->grp_id) {
- icd = (struct soc_camera_device *)sd->grp_id;
- break;
- }
-
- if (!icd)
- return -EINVAL;
+ if (priv->client)
+ return -EBUSY;
for (i = 0; i < pdata->num_clients; i++)
if (&pdata->clients[i].pdev->dev == icd->pdev)
@@ -205,14 +221,41 @@ static int sh_csi2_client_connect(struct sh_csi2 *priv)
if (i == pdata->num_clients)
return -ENODEV;
- priv->client = pdata->clients + i;
+ /* Check if we can support this camera */
+ csi2_flags = V4L2_MBUS_CSI2_CONTINUOUS_CLOCK | V4L2_MBUS_CSI2_1_LANE;
+
+ switch (pdata->type) {
+ case SH_CSI2C:
+ if (pdata->clients[i].lanes != 1)
+ csi2_flags |= V4L2_MBUS_CSI2_2_LANE;
+ break;
+ case SH_CSI2I:
+ switch (pdata->clients[i].lanes) {
+ default:
+ csi2_flags |= V4L2_MBUS_CSI2_4_LANE;
+ case 3:
+ csi2_flags |= V4L2_MBUS_CSI2_3_LANE;
+ case 2:
+ csi2_flags |= V4L2_MBUS_CSI2_2_LANE;
+ }
+ }
- priv->set_bus_param = icd->ops->set_bus_param;
- priv->query_bus_param = icd->ops->query_bus_param;
- icd->ops->set_bus_param = sh_csi2_set_bus_param;
- icd->ops->query_bus_param = sh_csi2_query_bus_param;
+ cfg.type = V4L2_MBUS_CSI2;
+ ret = v4l2_subdev_call(client_sd, video, g_mbus_config, &cfg);
+ if (ret == -ENOIOCTLCMD)
+ common_flags = csi2_flags;
+ else if (!ret)
+ common_flags = soc_mbus_config_compatible(&cfg,
+ csi2_flags);
+ else
+ common_flags = 0;
- csi2_sd->grp_id = (long)icd;
+ if (!common_flags)
+ return -EINVAL;
+
+ /* All good: camera MIPI configuration supported */
+ priv->mipi_flags = common_flags;
+ priv->client = pdata->clients + i;
pm_runtime_get_sync(dev);
@@ -223,16 +266,10 @@ static int sh_csi2_client_connect(struct sh_csi2 *priv)
static void sh_csi2_client_disconnect(struct sh_csi2 *priv)
{
- struct soc_camera_device *icd = (struct soc_camera_device *)priv->subdev.grp_id;
+ if (!priv->client)
+ return;
priv->client = NULL;
- priv->subdev.grp_id = 0;
-
- /* Driver is about to be unbound */
- icd->ops->set_bus_param = priv->set_bus_param;
- icd->ops->query_bus_param = priv->query_bus_param;
- priv->set_bus_param = NULL;
- priv->query_bus_param = NULL;
pm_runtime_put(v4l2_get_subdevdata(&priv->subdev));
}
diff --git a/drivers/media/video/sh_vou.c b/drivers/media/video/sh_vou.c
index 6a729879d89..9644bd861ab 100644
--- a/drivers/media/video/sh_vou.c
+++ b/drivers/media/video/sh_vou.c
@@ -20,6 +20,7 @@
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/videodev2.h>
+#include <linux/module.h>
#include <media/sh_vou.h>
#include <media/v4l2-common.h>
diff --git a/drivers/media/video/soc_camera.c b/drivers/media/video/soc_camera.c
index 5bdfe7e16bc..b72580c3895 100644
--- a/drivers/media/video/soc_camera.c
+++ b/drivers/media/video/soc_camera.c
@@ -50,49 +50,65 @@ static LIST_HEAD(hosts);
static LIST_HEAD(devices);
static DEFINE_MUTEX(list_lock); /* Protects the list of hosts */
-static int soc_camera_power_set(struct soc_camera_device *icd,
- struct soc_camera_link *icl,
- int power_on)
+static int soc_camera_power_on(struct soc_camera_device *icd,
+ struct soc_camera_link *icl)
{
- int ret;
-
- if (power_on) {
- ret = regulator_bulk_enable(icl->num_regulators,
- icl->regulators);
- if (ret < 0) {
- dev_err(icd->pdev, "Cannot enable regulators\n");
- return ret;
- }
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ int ret = regulator_bulk_enable(icl->num_regulators,
+ icl->regulators);
+ if (ret < 0) {
+ dev_err(icd->pdev, "Cannot enable regulators\n");
+ return ret;
+ }
- if (icl->power)
- ret = icl->power(icd->pdev, power_on);
+ if (icl->power) {
+ ret = icl->power(icd->pdev, 1);
if (ret < 0) {
dev_err(icd->pdev,
"Platform failed to power-on the camera.\n");
-
- regulator_bulk_disable(icl->num_regulators,
- icl->regulators);
- return ret;
+ goto elinkpwr;
}
- } else {
- ret = 0;
- if (icl->power)
- ret = icl->power(icd->pdev, 0);
+ }
+
+ ret = v4l2_subdev_call(sd, core, s_power, 1);
+ if (ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV)
+ goto esdpwr;
+
+ return 0;
+
+esdpwr:
+ if (icl->power)
+ icl->power(icd->pdev, 0);
+elinkpwr:
+ regulator_bulk_disable(icl->num_regulators,
+ icl->regulators);
+ return ret;
+}
+
+static int soc_camera_power_off(struct soc_camera_device *icd,
+ struct soc_camera_link *icl)
+{
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ int ret = v4l2_subdev_call(sd, core, s_power, 0);
+
+ if (ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV)
+ return ret;
+
+ if (icl->power) {
+ ret = icl->power(icd->pdev, 0);
if (ret < 0) {
dev_err(icd->pdev,
"Platform failed to power-off the camera.\n");
return ret;
}
-
- ret = regulator_bulk_disable(icl->num_regulators,
- icl->regulators);
- if (ret < 0) {
- dev_err(icd->pdev, "Cannot disable regulators\n");
- return ret;
- }
}
- return 0;
+ ret = regulator_bulk_disable(icl->num_regulators,
+ icl->regulators);
+ if (ret < 0)
+ dev_err(icd->pdev, "Cannot disable regulators\n");
+
+ return ret;
}
const struct soc_camera_format_xlate *soc_camera_xlate_by_fourcc(
@@ -108,38 +124,38 @@ const struct soc_camera_format_xlate *soc_camera_xlate_by_fourcc(
EXPORT_SYMBOL(soc_camera_xlate_by_fourcc);
/**
- * soc_camera_apply_sensor_flags() - apply platform SOCAM_SENSOR_INVERT_* flags
+ * soc_camera_apply_board_flags() - apply platform SOCAM_SENSOR_INVERT_* flags
* @icl: camera platform parameters
- * @flags: flags to be inverted according to platform configuration
+ * @cfg: media bus configuration
* @return: resulting flags
*/
-unsigned long soc_camera_apply_sensor_flags(struct soc_camera_link *icl,
- unsigned long flags)
+unsigned long soc_camera_apply_board_flags(struct soc_camera_link *icl,
+ const struct v4l2_mbus_config *cfg)
{
- unsigned long f;
+ unsigned long f, flags = cfg->flags;
/* If only one of the two polarities is supported, switch to the opposite */
if (icl->flags & SOCAM_SENSOR_INVERT_HSYNC) {
- f = flags & (SOCAM_HSYNC_ACTIVE_HIGH | SOCAM_HSYNC_ACTIVE_LOW);
- if (f == SOCAM_HSYNC_ACTIVE_HIGH || f == SOCAM_HSYNC_ACTIVE_LOW)
- flags ^= SOCAM_HSYNC_ACTIVE_HIGH | SOCAM_HSYNC_ACTIVE_LOW;
+ f = flags & (V4L2_MBUS_HSYNC_ACTIVE_HIGH | V4L2_MBUS_HSYNC_ACTIVE_LOW);
+ if (f == V4L2_MBUS_HSYNC_ACTIVE_HIGH || f == V4L2_MBUS_HSYNC_ACTIVE_LOW)
+ flags ^= V4L2_MBUS_HSYNC_ACTIVE_HIGH | V4L2_MBUS_HSYNC_ACTIVE_LOW;
}
if (icl->flags & SOCAM_SENSOR_INVERT_VSYNC) {
- f = flags & (SOCAM_VSYNC_ACTIVE_HIGH | SOCAM_VSYNC_ACTIVE_LOW);
- if (f == SOCAM_VSYNC_ACTIVE_HIGH || f == SOCAM_VSYNC_ACTIVE_LOW)
- flags ^= SOCAM_VSYNC_ACTIVE_HIGH | SOCAM_VSYNC_ACTIVE_LOW;
+ f = flags & (V4L2_MBUS_VSYNC_ACTIVE_HIGH | V4L2_MBUS_VSYNC_ACTIVE_LOW);
+ if (f == V4L2_MBUS_VSYNC_ACTIVE_HIGH || f == V4L2_MBUS_VSYNC_ACTIVE_LOW)
+ flags ^= V4L2_MBUS_VSYNC_ACTIVE_HIGH | V4L2_MBUS_VSYNC_ACTIVE_LOW;
}
if (icl->flags & SOCAM_SENSOR_INVERT_PCLK) {
- f = flags & (SOCAM_PCLK_SAMPLE_RISING | SOCAM_PCLK_SAMPLE_FALLING);
- if (f == SOCAM_PCLK_SAMPLE_RISING || f == SOCAM_PCLK_SAMPLE_FALLING)
- flags ^= SOCAM_PCLK_SAMPLE_RISING | SOCAM_PCLK_SAMPLE_FALLING;
+ f = flags & (V4L2_MBUS_PCLK_SAMPLE_RISING | V4L2_MBUS_PCLK_SAMPLE_FALLING);
+ if (f == V4L2_MBUS_PCLK_SAMPLE_RISING || f == V4L2_MBUS_PCLK_SAMPLE_FALLING)
+ flags ^= V4L2_MBUS_PCLK_SAMPLE_RISING | V4L2_MBUS_PCLK_SAMPLE_FALLING;
}
return flags;
}
-EXPORT_SYMBOL(soc_camera_apply_sensor_flags);
+EXPORT_SYMBOL(soc_camera_apply_board_flags);
#define pixfmtstr(x) (x) & 0xff, ((x) >> 8) & 0xff, ((x) >> 16) & 0xff, \
((x) >> 24) & 0xff
@@ -233,6 +249,14 @@ static int soc_camera_s_std(struct file *file, void *priv, v4l2_std_id *a)
return v4l2_subdev_call(sd, core, s_std, *a);
}
+static int soc_camera_g_std(struct file *file, void *priv, v4l2_std_id *a)
+{
+ struct soc_camera_device *icd = file->private_data;
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+
+ return v4l2_subdev_call(sd, core, g_std, a);
+}
+
static int soc_camera_enum_fsizes(struct file *file, void *fh,
struct v4l2_frmsizeenum *fsize)
{
@@ -318,6 +342,32 @@ static int soc_camera_dqbuf(struct file *file, void *priv,
return vb2_dqbuf(&icd->vb2_vidq, p, file->f_flags & O_NONBLOCK);
}
+static int soc_camera_create_bufs(struct file *file, void *priv,
+ struct v4l2_create_buffers *create)
+{
+ struct soc_camera_device *icd = file->private_data;
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+
+ /* videobuf2 only */
+ if (ici->ops->init_videobuf)
+ return -EINVAL;
+ else
+ return vb2_create_bufs(&icd->vb2_vidq, create);
+}
+
+static int soc_camera_prepare_buf(struct file *file, void *priv,
+ struct v4l2_buffer *b)
+{
+ struct soc_camera_device *icd = file->private_data;
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+
+ /* videobuf2 only */
+ if (ici->ops->init_videobuf)
+ return -EINVAL;
+ else
+ return vb2_prepare_buf(&icd->vb2_vidq, b);
+}
+
/* Always entered with .video_lock held */
static int soc_camera_init_user_formats(struct soc_camera_device *icd)
{
@@ -448,7 +498,7 @@ static int soc_camera_open(struct file *file)
struct soc_camera_host *ici;
int ret;
- if (!icd->ops)
+ if (!to_soc_camera_control(icd))
/* No device driver attached */
return -ENODEV;
@@ -476,7 +526,7 @@ static int soc_camera_open(struct file *file)
},
};
- ret = soc_camera_power_set(icd, icl, 1);
+ ret = soc_camera_power_on(icd, icl);
if (ret < 0)
goto epower;
@@ -512,6 +562,7 @@ static int soc_camera_open(struct file *file)
if (ret < 0)
goto einitvb;
}
+ v4l2_ctrl_handler_setup(&icd->ctrl_handler);
}
file->private_data = icd;
@@ -529,7 +580,7 @@ esfmt:
eresume:
ici->ops->remove(icd);
eiciadd:
- soc_camera_power_set(icd, icl, 0);
+ soc_camera_power_off(icd, icl);
epower:
icd->use_count--;
module_put(ici->ops->owner);
@@ -553,7 +604,7 @@ static int soc_camera_close(struct file *file)
if (ici->ops->init_videobuf2)
vb2_queue_release(&icd->vb2_vidq);
- soc_camera_power_set(icd, icl, 0);
+ soc_camera_power_off(icd, icl);
}
if (icd->streamer == file)
@@ -781,75 +832,6 @@ static int soc_camera_streamoff(struct file *file, void *priv,
return 0;
}
-static int soc_camera_queryctrl(struct file *file, void *priv,
- struct v4l2_queryctrl *qc)
-{
- struct soc_camera_device *icd = file->private_data;
- struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
- int i;
-
- WARN_ON(priv != file->private_data);
-
- if (!qc->id)
- return -EINVAL;
-
- /* First check host controls */
- for (i = 0; i < ici->ops->num_controls; i++)
- if (qc->id == ici->ops->controls[i].id) {
- memcpy(qc, &(ici->ops->controls[i]),
- sizeof(*qc));
- return 0;
- }
-
- /* Then device controls */
- for (i = 0; i < icd->ops->num_controls; i++)
- if (qc->id == icd->ops->controls[i].id) {
- memcpy(qc, &(icd->ops->controls[i]),
- sizeof(*qc));
- return 0;
- }
-
- return -EINVAL;
-}
-
-static int soc_camera_g_ctrl(struct file *file, void *priv,
- struct v4l2_control *ctrl)
-{
- struct soc_camera_device *icd = file->private_data;
- struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
- struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
- int ret;
-
- WARN_ON(priv != file->private_data);
-
- if (ici->ops->get_ctrl) {
- ret = ici->ops->get_ctrl(icd, ctrl);
- if (ret != -ENOIOCTLCMD)
- return ret;
- }
-
- return v4l2_subdev_call(sd, core, g_ctrl, ctrl);
-}
-
-static int soc_camera_s_ctrl(struct file *file, void *priv,
- struct v4l2_control *ctrl)
-{
- struct soc_camera_device *icd = file->private_data;
- struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
- struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
- int ret;
-
- WARN_ON(priv != file->private_data);
-
- if (ici->ops->set_ctrl) {
- ret = ici->ops->set_ctrl(icd, ctrl);
- if (ret != -ENOIOCTLCMD)
- return ret;
- }
-
- return v4l2_subdev_call(sd, core, s_ctrl, ctrl);
-}
-
static int soc_camera_cropcap(struct file *file, void *fh,
struct v4l2_cropcap *a)
{
@@ -1003,7 +985,7 @@ static int soc_camera_init_i2c(struct soc_camera_device *icd,
goto ei2cga;
}
- icl->board_info->platform_data = icd;
+ icl->board_info->platform_data = icl;
subdev = v4l2_i2c_new_subdev_board(&ici->v4l2_dev, adap,
icl->board_info, NULL);
@@ -1052,12 +1034,29 @@ static int soc_camera_probe(struct soc_camera_device *icd)
dev_info(icd->pdev, "Probing %s\n", dev_name(icd->pdev));
+ /*
+ * Currently the subdev with the largest number of controls (13) is
+ * ov6550. So let's pick 16 as a hint for the control handler. Note
+ * that this is a hint only: too large and you waste some memory, too
+ * small and there is a (very) small performance hit when looking up
+ * controls in the internal hash.
+ */
+ ret = v4l2_ctrl_handler_init(&icd->ctrl_handler, 16);
+ if (ret < 0)
+ return ret;
+
ret = regulator_bulk_get(icd->pdev, icl->num_regulators,
icl->regulators);
if (ret < 0)
goto ereg;
- ret = soc_camera_power_set(icd, icl, 1);
+ /*
+ * This will not yet call v4l2_subdev_core_ops::s_power(1), because the
+ * subdevice has not been initialised yet. We'll have to call it once
+ * again after initialisation, even though it shouldn't be needed, we
+ * don't do any IO here.
+ */
+ ret = soc_camera_power_on(icd, icl);
if (ret < 0)
goto epower;
@@ -1098,6 +1097,7 @@ static int soc_camera_probe(struct soc_camera_device *icd)
if (!control || !control->driver || !dev_get_drvdata(control) ||
!try_module_get(control->driver->owner)) {
icl->del_device(icd);
+ ret = -ENODEV;
goto enodrv;
}
}
@@ -1105,6 +1105,9 @@ static int soc_camera_probe(struct soc_camera_device *icd)
sd = soc_camera_to_subdev(icd);
sd->grp_id = (long)icd;
+ if (v4l2_ctrl_add_handler(&icd->ctrl_handler, sd->ctrl_handler))
+ goto ectrl;
+
/* At this point client .probe() should have run already */
ret = soc_camera_init_user_formats(icd);
if (ret < 0)
@@ -1123,6 +1126,10 @@ static int soc_camera_probe(struct soc_camera_device *icd)
if (ret < 0)
goto evidstart;
+ ret = v4l2_subdev_call(sd, core, s_power, 1);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ goto esdpwr;
+
/* Try to improve our guess of a reasonable window format */
if (!v4l2_subdev_call(sd, video, g_mbus_fmt, &mf)) {
icd->user_width = mf.width;
@@ -1133,16 +1140,19 @@ static int soc_camera_probe(struct soc_camera_device *icd)
ici->ops->remove(icd);
- soc_camera_power_set(icd, icl, 0);
+ soc_camera_power_off(icd, icl);
mutex_unlock(&icd->video_lock);
return 0;
+esdpwr:
+ video_unregister_device(icd->vdev);
evidstart:
mutex_unlock(&icd->video_lock);
soc_camera_free_user_formats(icd);
eiufmt:
+ectrl:
if (icl->board_info) {
soc_camera_free_i2c(icd);
} else {
@@ -1152,13 +1162,15 @@ eiufmt:
enodrv:
eadddev:
video_device_release(icd->vdev);
+ icd->vdev = NULL;
evdc:
ici->ops->remove(icd);
eadd:
- soc_camera_power_set(icd, icl, 0);
+ soc_camera_power_off(icd, icl);
epower:
regulator_bulk_free(icl->num_regulators, icl->regulators);
ereg:
+ v4l2_ctrl_handler_free(&icd->ctrl_handler);
return ret;
}
@@ -1173,6 +1185,7 @@ static int soc_camera_remove(struct soc_camera_device *icd)
BUG_ON(!icd->parent);
+ v4l2_ctrl_handler_free(&icd->ctrl_handler);
if (vdev) {
video_unregister_device(vdev);
icd->vdev = NULL;
@@ -1363,24 +1376,24 @@ static int soc_camera_device_register(struct soc_camera_device *icd)
static const struct v4l2_ioctl_ops soc_camera_ioctl_ops = {
.vidioc_querycap = soc_camera_querycap,
+ .vidioc_try_fmt_vid_cap = soc_camera_try_fmt_vid_cap,
.vidioc_g_fmt_vid_cap = soc_camera_g_fmt_vid_cap,
- .vidioc_enum_fmt_vid_cap = soc_camera_enum_fmt_vid_cap,
.vidioc_s_fmt_vid_cap = soc_camera_s_fmt_vid_cap,
+ .vidioc_enum_fmt_vid_cap = soc_camera_enum_fmt_vid_cap,
.vidioc_enum_input = soc_camera_enum_input,
.vidioc_g_input = soc_camera_g_input,
.vidioc_s_input = soc_camera_s_input,
.vidioc_s_std = soc_camera_s_std,
+ .vidioc_g_std = soc_camera_g_std,
.vidioc_enum_framesizes = soc_camera_enum_fsizes,
.vidioc_reqbufs = soc_camera_reqbufs,
- .vidioc_try_fmt_vid_cap = soc_camera_try_fmt_vid_cap,
.vidioc_querybuf = soc_camera_querybuf,
.vidioc_qbuf = soc_camera_qbuf,
.vidioc_dqbuf = soc_camera_dqbuf,
+ .vidioc_create_bufs = soc_camera_create_bufs,
+ .vidioc_prepare_buf = soc_camera_prepare_buf,
.vidioc_streamon = soc_camera_streamon,
.vidioc_streamoff = soc_camera_streamoff,
- .vidioc_queryctrl = soc_camera_queryctrl,
- .vidioc_g_ctrl = soc_camera_g_ctrl,
- .vidioc_s_ctrl = soc_camera_s_ctrl,
.vidioc_cropcap = soc_camera_cropcap,
.vidioc_g_crop = soc_camera_g_crop,
.vidioc_s_crop = soc_camera_s_crop,
@@ -1409,6 +1422,7 @@ static int video_dev_create(struct soc_camera_device *icd)
vdev->ioctl_ops = &soc_camera_ioctl_ops;
vdev->release = video_device_release;
vdev->tvnorms = V4L2_STD_UNKNOWN;
+ vdev->ctrl_handler = &icd->ctrl_handler;
vdev->lock = &icd->video_lock;
icd->vdev = vdev;
@@ -1427,11 +1441,6 @@ static int soc_camera_video_start(struct soc_camera_device *icd)
if (!icd->parent)
return -ENODEV;
- if (!icd->ops ||
- !icd->ops->query_bus_param ||
- !icd->ops->set_bus_param)
- return -EINVAL;
-
ret = video_register_device(icd->vdev, VFL_TYPE_GRABBER, -1);
if (ret < 0) {
dev_err(icd->pdev, "video_register_device failed: %d\n", ret);
diff --git a/drivers/media/video/soc_camera_platform.c b/drivers/media/video/soc_camera_platform.c
index 8069cd6bc5e..4402a8a74f7 100644
--- a/drivers/media/video/soc_camera_platform.c
+++ b/drivers/media/video/soc_camera_platform.c
@@ -30,32 +30,12 @@ static struct soc_camera_platform_priv *get_priv(struct platform_device *pdev)
return container_of(subdev, struct soc_camera_platform_priv, subdev);
}
-static struct soc_camera_platform_info *get_info(struct soc_camera_device *icd)
-{
- struct platform_device *pdev =
- to_platform_device(to_soc_camera_control(icd));
- return pdev->dev.platform_data;
-}
-
static int soc_camera_platform_s_stream(struct v4l2_subdev *sd, int enable)
{
struct soc_camera_platform_info *p = v4l2_get_subdevdata(sd);
return p->set_capture(p, enable);
}
-static int soc_camera_platform_set_bus_param(struct soc_camera_device *icd,
- unsigned long flags)
-{
- return 0;
-}
-
-static unsigned long
-soc_camera_platform_query_bus_param(struct soc_camera_device *icd)
-{
- struct soc_camera_platform_info *p = get_info(icd);
- return p->bus_param;
-}
-
static int soc_camera_platform_fill_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf)
{
@@ -115,6 +95,17 @@ static int soc_camera_platform_cropcap(struct v4l2_subdev *sd,
return 0;
}
+static int soc_camera_platform_g_mbus_config(struct v4l2_subdev *sd,
+ struct v4l2_mbus_config *cfg)
+{
+ struct soc_camera_platform_info *p = v4l2_get_subdevdata(sd);
+
+ cfg->flags = p->mbus_param;
+ cfg->type = p->mbus_type;
+
+ return 0;
+}
+
static struct v4l2_subdev_video_ops platform_subdev_video_ops = {
.s_stream = soc_camera_platform_s_stream,
.enum_mbus_fmt = soc_camera_platform_enum_fmt,
@@ -123,6 +114,7 @@ static struct v4l2_subdev_video_ops platform_subdev_video_ops = {
.try_mbus_fmt = soc_camera_platform_fill_fmt,
.g_mbus_fmt = soc_camera_platform_fill_fmt,
.s_mbus_fmt = soc_camera_platform_fill_fmt,
+ .g_mbus_config = soc_camera_platform_g_mbus_config,
};
static struct v4l2_subdev_ops platform_subdev_ops = {
@@ -130,11 +122,6 @@ static struct v4l2_subdev_ops platform_subdev_ops = {
.video = &platform_subdev_video_ops,
};
-static struct soc_camera_ops soc_camera_platform_ops = {
- .set_bus_param = soc_camera_platform_set_bus_param,
- .query_bus_param = soc_camera_platform_query_bus_param,
-};
-
static int soc_camera_platform_probe(struct platform_device *pdev)
{
struct soc_camera_host *ici;
@@ -163,8 +150,6 @@ static int soc_camera_platform_probe(struct platform_device *pdev)
/* Set the control device reference */
icd->control = &pdev->dev;
- icd->ops = &soc_camera_platform_ops;
-
ici = to_soc_camera_host(icd->parent);
v4l2_subdev_init(&priv->subdev, &platform_subdev_ops);
@@ -178,7 +163,6 @@ static int soc_camera_platform_probe(struct platform_device *pdev)
return ret;
evdrs:
- icd->ops = NULL;
platform_set_drvdata(pdev, NULL);
kfree(priv);
return ret;
@@ -187,11 +171,10 @@ evdrs:
static int soc_camera_platform_remove(struct platform_device *pdev)
{
struct soc_camera_platform_priv *priv = get_priv(pdev);
- struct soc_camera_platform_info *p = pdev->dev.platform_data;
- struct soc_camera_device *icd = p->icd;
+ struct soc_camera_platform_info *p = v4l2_get_subdevdata(&priv->subdev);
+ p->icd->control = NULL;
v4l2_device_unregister_subdev(&priv->subdev);
- icd->ops = NULL;
platform_set_drvdata(pdev, NULL);
kfree(priv);
return 0;
diff --git a/drivers/media/video/soc_mediabus.c b/drivers/media/video/soc_mediabus.c
index bea7c9cf4f8..cf7f2194ded 100644
--- a/drivers/media/video/soc_mediabus.c
+++ b/drivers/media/video/soc_mediabus.c
@@ -383,6 +383,39 @@ const struct soc_mbus_pixelfmt *soc_mbus_get_fmtdesc(
}
EXPORT_SYMBOL(soc_mbus_get_fmtdesc);
+unsigned int soc_mbus_config_compatible(const struct v4l2_mbus_config *cfg,
+ unsigned int flags)
+{
+ unsigned long common_flags;
+ bool hsync = true, vsync = true, pclk, data, mode;
+ bool mipi_lanes, mipi_clock;
+
+ common_flags = cfg->flags & flags;
+
+ switch (cfg->type) {
+ case V4L2_MBUS_PARALLEL:
+ hsync = common_flags & (V4L2_MBUS_HSYNC_ACTIVE_HIGH |
+ V4L2_MBUS_HSYNC_ACTIVE_LOW);
+ vsync = common_flags & (V4L2_MBUS_VSYNC_ACTIVE_HIGH |
+ V4L2_MBUS_VSYNC_ACTIVE_LOW);
+ case V4L2_MBUS_BT656:
+ pclk = common_flags & (V4L2_MBUS_PCLK_SAMPLE_RISING |
+ V4L2_MBUS_PCLK_SAMPLE_FALLING);
+ data = common_flags & (V4L2_MBUS_DATA_ACTIVE_HIGH |
+ V4L2_MBUS_DATA_ACTIVE_LOW);
+ mode = common_flags & (V4L2_MBUS_MASTER | V4L2_MBUS_SLAVE);
+ return (!hsync || !vsync || !pclk || !data || !mode) ?
+ 0 : common_flags;
+ case V4L2_MBUS_CSI2:
+ mipi_lanes = common_flags & V4L2_MBUS_CSI2_LANES;
+ mipi_clock = common_flags & (V4L2_MBUS_CSI2_NONCONTINUOUS_CLOCK |
+ V4L2_MBUS_CSI2_CONTINUOUS_CLOCK);
+ return (!mipi_lanes || !mipi_clock) ? 0 : common_flags;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(soc_mbus_config_compatible);
+
static int __init soc_mbus_init(void)
{
return 0;
diff --git a/drivers/media/video/sr030pc30.c b/drivers/media/video/sr030pc30.c
index 8afb0e8a2e0..d1b07aceaf9 100644
--- a/drivers/media/video/sr030pc30.c
+++ b/drivers/media/video/sr030pc30.c
@@ -19,6 +19,7 @@
#include <linux/i2c.h>
#include <linux/delay.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include <media/v4l2-device.h>
#include <media/v4l2-subdev.h>
#include <media/v4l2-mediabus.h>
@@ -714,11 +715,6 @@ static int sr030pc30_base_config(struct v4l2_subdev *sd)
return ret;
}
-static int sr030pc30_s_stream(struct v4l2_subdev *sd, int enable)
-{
- return 0;
-}
-
static int sr030pc30_s_power(struct v4l2_subdev *sd, int on)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
@@ -764,7 +760,6 @@ static const struct v4l2_subdev_core_ops sr030pc30_core_ops = {
};
static const struct v4l2_subdev_video_ops sr030pc30_video_ops = {
- .s_stream = sr030pc30_s_stream,
.g_mbus_fmt = sr030pc30_g_fmt,
.s_mbus_fmt = sr030pc30_s_fmt,
.try_mbus_fmt = sr030pc30_try_fmt,
diff --git a/drivers/media/video/stk-webcam.c b/drivers/media/video/stk-webcam.c
index d1a2cefbf55..cbc105f975d 100644
--- a/drivers/media/video/stk-webcam.c
+++ b/drivers/media/video/stk-webcam.c
@@ -55,6 +55,8 @@ MODULE_AUTHOR("Jaime Velasco Juan <jsagarribay@gmail.com> and Nicolas VIVIEN");
MODULE_DESCRIPTION("Syntek DC1125 webcam driver");
+/* bool for webcam LED management */
+int first_init = 1;
/* Some cameras have audio interfaces, we aren't interested in those */
static struct usb_device_id stkwebcam_table[] = {
@@ -518,7 +520,7 @@ static int stk_prepare_sio_buffers(struct stk_camera *dev, unsigned n_sbufs)
return -ENOMEM;
for (i = 0; i < n_sbufs; i++) {
if (stk_setup_siobuf(dev, i))
- return (dev->n_sbufs > 1)? 0 : -ENOMEM;
+ return (dev->n_sbufs > 1 ? 0 : -ENOMEM);
dev->n_sbufs = i+1;
}
}
@@ -558,9 +560,14 @@ static int v4l_stk_open(struct file *fp)
vdev = video_devdata(fp);
dev = vdev_to_camera(vdev);
- if (dev == NULL || !is_present(dev)) {
+ if (dev == NULL || !is_present(dev))
return -ENXIO;
- }
+
+ if (!first_init)
+ stk_camera_write_reg(dev, 0x0, 0x24);
+ else
+ first_init = 0;
+
fp->private_data = dev;
usb_autopm_get_interface(dev->interface);
@@ -574,10 +581,12 @@ static int v4l_stk_release(struct file *fp)
if (dev->owner == fp) {
stk_stop_stream(dev);
stk_free_buffers(dev);
+ stk_camera_write_reg(dev, 0x0, 0x49); /* turn off the LED */
+ unset_initialised(dev);
dev->owner = NULL;
}
- if(is_present(dev))
+ if (is_present(dev))
usb_autopm_put_interface(dev->interface);
return 0;
@@ -654,7 +663,7 @@ static unsigned int v4l_stk_poll(struct file *fp, poll_table *wait)
return POLLERR;
if (!list_empty(&dev->sio_full))
- return (POLLIN | POLLRDNORM);
+ return POLLIN | POLLRDNORM;
return 0;
}
@@ -891,9 +900,9 @@ static int stk_vidioc_g_fmt_vid_cap(struct file *filp,
struct stk_camera *dev = priv;
int i;
- for (i = 0; i < ARRAY_SIZE(stk_sizes)
- && stk_sizes[i].m != dev->vsettings.mode;
- i++);
+ for (i = 0; i < ARRAY_SIZE(stk_sizes) &&
+ stk_sizes[i].m != dev->vsettings.mode; i++)
+ ;
if (i == ARRAY_SIZE(stk_sizes)) {
STK_ERROR("ERROR: mode invalid\n");
return -EINVAL;
@@ -1305,9 +1314,8 @@ static int stk_camera_probe(struct usb_interface *interface,
usb_set_intfdata(interface, dev);
err = stk_register_video_device(dev);
- if (err) {
+ if (err)
goto error;
- }
return 0;
@@ -1350,6 +1358,7 @@ static int stk_camera_resume(struct usb_interface *intf)
return 0;
unset_initialised(dev);
stk_initialise(dev);
+ stk_camera_write_reg(dev, 0x0, 0x49);
stk_setup_format(dev);
if (is_streaming(dev))
stk_start_stream(dev);
diff --git a/drivers/media/video/tcm825x.c b/drivers/media/video/tcm825x.c
index b6ee1bd342d..462caa44ae0 100644
--- a/drivers/media/video/tcm825x.c
+++ b/drivers/media/video/tcm825x.c
@@ -27,6 +27,7 @@
*/
#include <linux/i2c.h>
+#include <linux/module.h>
#include <media/v4l2-int-device.h>
#include "tcm825x.h"
diff --git a/drivers/media/video/timblogiw.c b/drivers/media/video/timblogiw.c
index 84cd1b65b76..a0895bf0748 100644
--- a/drivers/media/video/timblogiw.c
+++ b/drivers/media/video/timblogiw.c
@@ -27,6 +27,7 @@
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/i2c.h>
+#include <linux/module.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-device.h>
#include <media/videobuf-dma-contig.h>
diff --git a/drivers/media/video/tlg2300/Makefile b/drivers/media/video/tlg2300/Makefile
index 81bb7fdd1e3..ea09b9af2d3 100644
--- a/drivers/media/video/tlg2300/Makefile
+++ b/drivers/media/video/tlg2300/Makefile
@@ -2,8 +2,8 @@ poseidon-objs := pd-video.o pd-alsa.o pd-dvb.o pd-radio.o pd-main.o
obj-$(CONFIG_VIDEO_TLG2300) += poseidon.o
-EXTRA_CFLAGS += -Idrivers/media/video
-EXTRA_CFLAGS += -Idrivers/media/common/tuners
-EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core
-EXTRA_CFLAGS += -Idrivers/media/dvb/frontends
+ccflags-y += -Idrivers/media/video
+ccflags-y += -Idrivers/media/common/tuners
+ccflags-y += -Idrivers/media/dvb/dvb-core
+ccflags-y += -Idrivers/media/dvb/frontends
diff --git a/drivers/staging/tm6000/Kconfig b/drivers/media/video/tm6000/Kconfig
index 114eec8a630..114eec8a630 100644
--- a/drivers/staging/tm6000/Kconfig
+++ b/drivers/media/video/tm6000/Kconfig
diff --git a/drivers/staging/tm6000/Makefile b/drivers/media/video/tm6000/Makefile
index 395515b4a88..395515b4a88 100644
--- a/drivers/staging/tm6000/Makefile
+++ b/drivers/media/video/tm6000/Makefile
diff --git a/drivers/staging/tm6000/tm6000-alsa.c b/drivers/media/video/tm6000/tm6000-alsa.c
index bd5fa89af07..7d675c72fd4 100644
--- a/drivers/staging/tm6000/tm6000-alsa.c
+++ b/drivers/media/video/tm6000/tm6000-alsa.c
@@ -80,7 +80,7 @@ static int _tm6000_start_audio_dma(struct snd_tm6000_card *chip)
dprintk(1, "Starting audio DMA\n");
/* Enables audio */
- tm6000_set_reg_mask(core, TM6010_REQ07_RCC_ACTIVE_VIDEO_IF, 0x40, 0x40);
+ tm6000_set_reg_mask(core, TM6010_REQ07_RCC_ACTIVE_IF, 0x40, 0x40);
tm6000_set_audio_bitrate(core, 48000);
@@ -97,7 +97,7 @@ static int _tm6000_stop_audio_dma(struct snd_tm6000_card *chip)
dprintk(1, "Stopping audio DMA\n");
/* Disables audio */
- tm6000_set_reg_mask(core, TM6010_REQ07_RCC_ACTIVE_VIDEO_IF, 0x00, 0x40);
+ tm6000_set_reg_mask(core, TM6010_REQ07_RCC_ACTIVE_IF, 0x00, 0x40);
return 0;
}
@@ -304,6 +304,7 @@ static int snd_tm6000_hw_free(struct snd_pcm_substream *substream)
schedule_work(&core->wq_trigger);
}
+ dsp_buffer_free(substream);
return 0;
}
@@ -397,7 +398,7 @@ static struct snd_pcm_ops snd_tm6000_pcm_ops = {
/*
* Alsa Constructor - Component probe
*/
-int tm6000_audio_init(struct tm6000_core *dev)
+static int tm6000_audio_init(struct tm6000_core *dev)
{
struct snd_card *card;
struct snd_tm6000_card *chip;
@@ -490,7 +491,7 @@ static int tm6000_audio_fini(struct tm6000_core *dev)
return 0;
}
-struct tm6000_ops audio_ops = {
+static struct tm6000_ops audio_ops = {
.type = TM6000_AUDIO,
.name = "TM6000 Audio Extension",
.init = tm6000_audio_init,
diff --git a/drivers/staging/tm6000/tm6000-cards.c b/drivers/media/video/tm6000/tm6000-cards.c
index 9227db5d895..ec2578a0fdf 100644
--- a/drivers/staging/tm6000/tm6000-cards.c
+++ b/drivers/media/video/tm6000/tm6000-cards.c
@@ -87,7 +87,7 @@ struct tm6000_board {
char *ir_codes;
};
-struct tm6000_board tm6000_boards[] = {
+static struct tm6000_board tm6000_boards[] = {
[TM6000_BOARD_UNKNOWN] = {
.name = "Unknown tm6000 video grabber",
.caps = {
@@ -394,7 +394,7 @@ struct tm6000_board tm6000_boards[] = {
.has_zl10353 = 1,
.has_eeprom = 1,
.has_remote = 1,
- .has_radio = 1.
+ .has_radio = 1,
},
.gpio = {
.tuner_reset = TM6010_GPIO_0,
@@ -468,6 +468,7 @@ struct tm6000_board tm6000_boards[] = {
.has_zl10353 = 1,
.has_eeprom = 1,
.has_remote = 1,
+ .has_radio = 1,
},
.gpio = {
.tuner_reset = TM6010_GPIO_2,
@@ -493,6 +494,10 @@ struct tm6000_board tm6000_boards[] = {
.amux = TM6000_AMUX_ADC2,
},
},
+ .rinput = {
+ .type = TM6000_INPUT_RADIO,
+ .amux = TM6000_AMUX_SIF1,
+ },
},
[TM5600_BOARD_TERRATEC_GRABSTER] = {
.name = "Terratec Grabster AV 150/250 MX",
@@ -611,7 +616,7 @@ struct tm6000_board tm6000_boards[] = {
};
/* table of devices that work with this driver */
-struct usb_device_id tm6000_id_table[] = {
+static struct usb_device_id tm6000_id_table[] = {
{ USB_DEVICE(0x6000, 0x0001), .driver_info = TM5600_BOARD_GENERIC },
{ USB_DEVICE(0x6000, 0x0002), .driver_info = TM6010_BOARD_GENERIC },
{ USB_DEVICE(0x06e1, 0xf332), .driver_info = TM6000_BOARD_ADSTECH_DUAL_TV },
@@ -632,7 +637,7 @@ struct usb_device_id tm6000_id_table[] = {
{ USB_DEVICE(0x13d3, 0x3264), .driver_info = TM6010_BOARD_TWINHAN_TU501 },
{ USB_DEVICE(0x6000, 0xdec2), .driver_info = TM6010_BOARD_BEHOLD_WANDER_LITE },
{ USB_DEVICE(0x6000, 0xdec3), .driver_info = TM6010_BOARD_BEHOLD_VOYAGER_LITE },
- { },
+ { }
};
/* Control power led for show some activity */
@@ -780,6 +785,11 @@ int tm6000_tuner_callback(void *ptr, int component, int command, int arg)
rc = tm6000_i2c_reset(dev, 100);
break;
}
+ break;
+ case XC2028_I2C_FLUSH:
+ tm6000_set_reg(dev, REQ_50_SET_START, 0, 0);
+ tm6000_set_reg(dev, REQ_51_SET_STOP, 0, 0);
+ break;
}
return rc;
}
@@ -787,8 +797,6 @@ EXPORT_SYMBOL_GPL(tm6000_tuner_callback);
int tm6000_cards_setup(struct tm6000_core *dev)
{
- int i, rc;
-
/*
* Board-specific initialization sequence. Handles all GPIO
* initialization sequences that are board-specific.
@@ -860,6 +868,9 @@ int tm6000_cards_setup(struct tm6000_core *dev)
*/
if (dev->gpio.tuner_reset) {
+ int rc;
+ int i;
+
for (i = 0; i < 2; i++) {
rc = tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN,
dev->gpio.tuner_reset, 0x00);
@@ -988,6 +999,16 @@ static int fill_board_specific_data(struct tm6000_core *dev)
dev->vinput[2] = tm6000_boards[dev->model].vinput[2];
dev->rinput = tm6000_boards[dev->model].rinput;
+ /* setup per-model quirks */
+ switch (dev->model) {
+ case TM6010_BOARD_TERRATEC_CINERGY_HYBRID_XE:
+ dev->quirks |= TM6000_QUIRK_NO_USB_DELAY;
+ break;
+
+ default:
+ break;
+ }
+
/* initialize hardware */
rc = tm6000_init(dev);
if (rc < 0)
@@ -1162,13 +1183,14 @@ static int tm6000_usb_probe(struct usb_interface *interface,
return -ENOMEM;
}
spin_lock_init(&dev->slock);
+ mutex_init(&dev->usb_lock);
/* Increment usage count */
- tm6000_devused |= 1<<nr;
+ set_bit(nr, &tm6000_devused);
snprintf(dev->name, 29, "tm6000 #%d", nr);
dev->model = id->driver_info;
- if ((card[nr] >= 0) && (card[nr] < ARRAY_SIZE(tm6000_boards)))
+ if (card[nr] < ARRAY_SIZE(tm6000_boards))
dev->model = card[nr];
dev->udev = usbdev;
@@ -1189,8 +1211,6 @@ static int tm6000_usb_probe(struct usb_interface *interface,
speed = "unknown";
}
-
-
/* Get endpoints */
for (i = 0; i < interface->num_altsetting; i++) {
int ep;
@@ -1274,7 +1294,6 @@ static int tm6000_usb_probe(struct usb_interface *interface,
printk(KERN_INFO "tm6000: Found %s\n", tm6000_boards[dev->model].name);
rc = tm6000_init_dev(dev);
-
if (rc < 0)
goto err;
@@ -1283,7 +1302,7 @@ static int tm6000_usb_probe(struct usb_interface *interface,
err:
printk(KERN_ERR "tm6000: Error %d while registering\n", rc);
- tm6000_devused &= ~(1<<nr);
+ clear_bit(nr, &tm6000_devused);
usb_put_dev(usbdev);
kfree(dev);
@@ -1341,6 +1360,7 @@ static void tm6000_usb_disconnect(struct usb_interface *interface)
tm6000_close_extension(dev);
tm6000_remove_from_devlist(dev);
+ clear_bit(dev->devno, &tm6000_devused);
kfree(dev);
}
diff --git a/drivers/staging/tm6000/tm6000-core.c b/drivers/media/video/tm6000/tm6000-core.c
index d7eb2e23cdb..9783616a0da 100644
--- a/drivers/staging/tm6000/tm6000-core.c
+++ b/drivers/media/video/tm6000/tm6000-core.c
@@ -39,10 +39,11 @@ int tm6000_read_write_usb(struct tm6000_core *dev, u8 req_type, u8 req,
unsigned int pipe;
u8 *data = NULL;
+ mutex_lock(&dev->usb_lock);
+
if (len)
data = kzalloc(len, GFP_KERNEL);
-
if (req_type & USB_DIR_IN)
pipe = usb_rcvctrlpipe(dev->udev, 0);
else {
@@ -51,18 +52,18 @@ int tm6000_read_write_usb(struct tm6000_core *dev, u8 req_type, u8 req,
}
if (tm6000_debug & V4L2_DEBUG_I2C) {
- printk("(dev %p, pipe %08x): ", dev->udev, pipe);
+ printk(KERN_DEBUG "(dev %p, pipe %08x): ", dev->udev, pipe);
- printk("%s: %02x %02x %02x %02x %02x %02x %02x %02x ",
+ printk(KERN_CONT "%s: %02x %02x %02x %02x %02x %02x %02x %02x ",
(req_type & USB_DIR_IN) ? " IN" : "OUT",
req_type, req, value&0xff, value>>8, index&0xff,
index>>8, len&0xff, len>>8);
if (!(req_type & USB_DIR_IN)) {
- printk(">>> ");
+ printk(KERN_CONT ">>> ");
for (i = 0; i < len; i++)
- printk(" %02x", buf[i]);
- printk("\n");
+ printk(KERN_CONT " %02x", buf[i]);
+ printk(KERN_CONT "\n");
}
}
@@ -75,21 +76,21 @@ int tm6000_read_write_usb(struct tm6000_core *dev, u8 req_type, u8 req,
if (tm6000_debug & V4L2_DEBUG_I2C) {
if (ret < 0) {
if (req_type & USB_DIR_IN)
- printk("<<< (len=%d)\n", len);
+ printk(KERN_DEBUG "<<< (len=%d)\n", len);
- printk("%s: Error #%d\n", __FUNCTION__, ret);
+ printk(KERN_CONT "%s: Error #%d\n", __func__, ret);
} else if (req_type & USB_DIR_IN) {
- printk("<<< ");
+ printk(KERN_CONT "<<< ");
for (i = 0; i < len; i++)
- printk(" %02x", buf[i]);
- printk("\n");
+ printk(KERN_CONT " %02x", buf[i]);
+ printk(KERN_CONT "\n");
}
}
kfree(data);
-
msleep(5);
+ mutex_unlock(&dev->usb_lock);
return ret;
}
@@ -188,11 +189,11 @@ void tm6000_set_fourcc_format(struct tm6000_core *dev)
if (dev->dev_type == TM6010) {
int val;
- val = tm6000_get_reg(dev, TM6010_REQ07_RCC_ACTIVE_VIDEO_IF, 0) & 0xfc;
+ val = tm6000_get_reg(dev, TM6010_REQ07_RCC_ACTIVE_IF, 0) & 0xfc;
if (dev->fourcc == V4L2_PIX_FMT_UYVY)
- tm6000_set_reg(dev, TM6010_REQ07_RCC_ACTIVE_VIDEO_IF, val);
+ tm6000_set_reg(dev, TM6010_REQ07_RCC_ACTIVE_IF, val);
else
- tm6000_set_reg(dev, TM6010_REQ07_RCC_ACTIVE_VIDEO_IF, val | 1);
+ tm6000_set_reg(dev, TM6010_REQ07_RCC_ACTIVE_IF, val | 1);
} else {
if (dev->fourcc == V4L2_PIX_FMT_UYVY)
tm6000_set_reg(dev, TM6010_REQ07_RC1_TRESHOLD, 0xd0);
@@ -268,9 +269,14 @@ int tm6000_init_analog_mode(struct tm6000_core *dev)
struct v4l2_frequency f;
if (dev->dev_type == TM6010) {
+ u8 active = TM6010_REQ07_RCC_ACTIVE_IF_AUDIO_ENABLE;
+
+ if (!dev->radio)
+ active |= TM6010_REQ07_RCC_ACTIVE_IF_VIDEO_ENABLE;
+
/* Enable video and audio */
- tm6000_set_reg_mask(dev, TM6010_REQ07_RCC_ACTIVE_VIDEO_IF,
- 0x60, 0x60);
+ tm6000_set_reg_mask(dev, TM6010_REQ07_RCC_ACTIVE_IF,
+ active, 0x60);
/* Disable TS input */
tm6000_set_reg_mask(dev, TM6010_REQ07_RC0_ACTIVE_VIDEO_SOURCE,
0x00, 0x40);
@@ -308,7 +314,7 @@ int tm6000_init_analog_mode(struct tm6000_core *dev)
* FIXME: This is a hack! xc3028 "sleeps" when no channel is detected
* for more than a few seconds. Not sure why, as this behavior does
* not happen on other devices with xc3028. So, I suspect that it
- * is yet another bug at tm6000. After start sleeping, decoding
+ * is yet another bug at tm6000. After start sleeping, decoding
* doesn't start automatically. Instead, it requires some
* I2C commands to wake it up. As we want to have image at the
* beginning, we needed to add this hack. The better would be to
@@ -335,7 +341,7 @@ int tm6000_init_digital_mode(struct tm6000_core *dev)
{
if (dev->dev_type == TM6010) {
/* Disable video and audio */
- tm6000_set_reg_mask(dev, TM6010_REQ07_RCC_ACTIVE_VIDEO_IF,
+ tm6000_set_reg_mask(dev, TM6010_REQ07_RCC_ACTIVE_IF,
0x00, 0x60);
/* Enable TS input */
tm6000_set_reg_mask(dev, TM6010_REQ07_RC0_ACTIVE_VIDEO_SOURCE,
@@ -390,7 +396,7 @@ struct reg_init {
};
/* The meaning of those initializations are unknown */
-struct reg_init tm6000_init_tab[] = {
+static struct reg_init tm6000_init_tab[] = {
/* REG VALUE */
{ TM6000_REQ07_RDF_PWDOWN_ACLK, 0x1f },
{ TM6010_REQ07_RFF_SOFT_RESET, 0x08 },
@@ -458,12 +464,12 @@ struct reg_init tm6000_init_tab[] = {
{ TM6010_REQ05_R18_IMASK7, 0x00 },
};
-struct reg_init tm6010_init_tab[] = {
+static struct reg_init tm6010_init_tab[] = {
{ TM6010_REQ07_RC0_ACTIVE_VIDEO_SOURCE, 0x00 },
{ TM6010_REQ07_RC4_HSTART0, 0xa0 },
{ TM6010_REQ07_RC6_HEND0, 0x40 },
{ TM6010_REQ07_RCA_VEND0, 0x31 },
- { TM6010_REQ07_RCC_ACTIVE_VIDEO_IF, 0xe1 },
+ { TM6010_REQ07_RCC_ACTIVE_IF, 0xe1 },
{ TM6010_REQ07_RE0_DVIDEO_SOURCE, 0x03 },
{ TM6010_REQ07_RFE_POWER_DOWN, 0x7f },
@@ -593,6 +599,56 @@ int tm6000_init(struct tm6000_core *dev)
return rc;
}
+int tm6000_reset(struct tm6000_core *dev)
+{
+ int pipe;
+ int err;
+
+ msleep(500);
+
+ err = usb_set_interface(dev->udev, dev->isoc_in.bInterfaceNumber, 0);
+ if (err < 0) {
+ tm6000_err("failed to select interface %d, alt. setting 0\n",
+ dev->isoc_in.bInterfaceNumber);
+ return err;
+ }
+
+ err = usb_reset_configuration(dev->udev);
+ if (err < 0) {
+ tm6000_err("failed to reset configuration\n");
+ return err;
+ }
+
+ if ((dev->quirks & TM6000_QUIRK_NO_USB_DELAY) == 0)
+ msleep(5);
+
+ /*
+ * Not all devices have int_in defined
+ */
+ if (!dev->int_in.endp)
+ return 0;
+
+ err = usb_set_interface(dev->udev, dev->isoc_in.bInterfaceNumber, 2);
+ if (err < 0) {
+ tm6000_err("failed to select interface %d, alt. setting 2\n",
+ dev->isoc_in.bInterfaceNumber);
+ return err;
+ }
+
+ msleep(5);
+
+ pipe = usb_rcvintpipe(dev->udev,
+ dev->int_in.endp->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
+
+ err = usb_clear_halt(dev->udev, pipe);
+ if (err < 0) {
+ tm6000_err("usb_clear_halt failed: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
int tm6000_set_audio_bitrate(struct tm6000_core *dev, int bitrate)
{
int val = 0;
@@ -687,7 +743,7 @@ int tm6000_set_audio_rinput(struct tm6000_core *dev)
return 0;
}
-void tm6010_set_mute_sif(struct tm6000_core *dev, u8 mute)
+static void tm6010_set_mute_sif(struct tm6000_core *dev, u8 mute)
{
u8 mute_reg = 0;
@@ -697,7 +753,7 @@ void tm6010_set_mute_sif(struct tm6000_core *dev, u8 mute)
tm6000_set_reg_mask(dev, TM6010_REQ08_R0A_A_I2S_MOD, mute_reg, 0x08);
}
-void tm6010_set_mute_adc(struct tm6000_core *dev, u8 mute)
+static void tm6010_set_mute_adc(struct tm6000_core *dev, u8 mute)
{
u8 mute_reg = 0;
@@ -749,7 +805,7 @@ int tm6000_tvaudio_set_mute(struct tm6000_core *dev, u8 mute)
return 0;
}
-void tm6010_set_volume_sif(struct tm6000_core *dev, int vol)
+static void tm6010_set_volume_sif(struct tm6000_core *dev, int vol)
{
u8 vol_reg;
@@ -762,7 +818,7 @@ void tm6010_set_volume_sif(struct tm6000_core *dev, int vol)
tm6000_set_reg(dev, TM6010_REQ08_R08_A_RIGHT_VOL, vol_reg);
}
-void tm6010_set_volume_adc(struct tm6000_core *dev, int vol)
+static void tm6010_set_volume_adc(struct tm6000_core *dev, int vol)
{
u8 vol_reg;
diff --git a/drivers/staging/tm6000/tm6000-dvb.c b/drivers/media/video/tm6000/tm6000-dvb.c
index 0e0dfce0582..5e6c129a4be 100644
--- a/drivers/staging/tm6000/tm6000-dvb.c
+++ b/drivers/media/video/tm6000/tm6000-dvb.c
@@ -105,7 +105,7 @@ static void tm6000_urb_received(struct urb *urb)
}
}
-int tm6000_start_stream(struct tm6000_core *dev)
+static int tm6000_start_stream(struct tm6000_core *dev)
{
int ret;
unsigned int pipe, size;
@@ -166,7 +166,7 @@ int tm6000_start_stream(struct tm6000_core *dev)
return 0;
}
-void tm6000_stop_stream(struct tm6000_core *dev)
+static void tm6000_stop_stream(struct tm6000_core *dev)
{
struct tm6000_dvb *dvb = dev->dvb;
@@ -180,7 +180,7 @@ void tm6000_stop_stream(struct tm6000_core *dev)
}
}
-int tm6000_start_feed(struct dvb_demux_feed *feed)
+static int tm6000_start_feed(struct dvb_demux_feed *feed)
{
struct dvb_demux *demux = feed->demux;
struct tm6000_core *dev = demux->priv;
@@ -199,7 +199,7 @@ int tm6000_start_feed(struct dvb_demux_feed *feed)
return 0;
}
-int tm6000_stop_feed(struct dvb_demux_feed *feed)
+static int tm6000_stop_feed(struct dvb_demux_feed *feed)
{
struct dvb_demux *demux = feed->demux;
struct tm6000_core *dev = demux->priv;
@@ -222,7 +222,7 @@ int tm6000_stop_feed(struct dvb_demux_feed *feed)
return 0;
}
-int tm6000_dvb_attach_frontend(struct tm6000_core *dev)
+static int tm6000_dvb_attach_frontend(struct tm6000_core *dev)
{
struct tm6000_dvb *dvb = dev->dvb;
@@ -247,7 +247,7 @@ int tm6000_dvb_attach_frontend(struct tm6000_core *dev)
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
-int register_dvb(struct tm6000_core *dev)
+static int register_dvb(struct tm6000_core *dev)
{
int ret = -1;
struct tm6000_dvb *dvb = dev->dvb;
@@ -330,7 +330,7 @@ int register_dvb(struct tm6000_core *dev)
dvb->demux.write_to_decoder = NULL;
ret = dvb_dmx_init(&dvb->demux);
if (ret < 0) {
- printk("tm6000: dvb_dmx_init failed (errno = %d)\n", ret);
+ printk(KERN_ERR "tm6000: dvb_dmx_init failed (errno = %d)\n", ret);
goto frontend_err;
}
@@ -340,7 +340,7 @@ int register_dvb(struct tm6000_core *dev)
ret = dvb_dmxdev_init(&dvb->dmxdev, &dvb->adapter);
if (ret < 0) {
- printk("tm6000: dvb_dmxdev_init failed (errno = %d)\n", ret);
+ printk(KERN_ERR "tm6000: dvb_dmxdev_init failed (errno = %d)\n", ret);
goto dvb_dmx_err;
}
@@ -359,7 +359,7 @@ err:
return ret;
}
-void unregister_dvb(struct tm6000_core *dev)
+static void unregister_dvb(struct tm6000_core *dev)
{
struct tm6000_dvb *dvb = dev->dvb;
diff --git a/drivers/staging/tm6000/tm6000-i2c.c b/drivers/media/video/tm6000/tm6000-i2c.c
index 5a651ea5f60..0290bbf00c3 100644
--- a/drivers/staging/tm6000/tm6000-i2c.c
+++ b/drivers/media/video/tm6000/tm6000-i2c.c
@@ -189,7 +189,7 @@ static int tm6000_i2c_xfer(struct i2c_adapter *i2c_adap,
/* 1 or 2 byte write followed by a read */
if (i2c_debug >= 2)
for (byte = 0; byte < msgs[i].len; byte++)
- printk(" %02x", msgs[i].buf[byte]);
+ printk(KERN_CONT " %02x", msgs[i].buf[byte]);
i2c_dprintk(2, "; joined to read %s len=%d:",
i == num - 2 ? "stop" : "nonstop",
msgs[i + 1].len);
@@ -211,22 +211,17 @@ static int tm6000_i2c_xfer(struct i2c_adapter *i2c_adap,
}
if (i2c_debug >= 2)
for (byte = 0; byte < msgs[i].len; byte++)
- printk(" %02x", msgs[i].buf[byte]);
+ printk(KERN_CONT " %02x", msgs[i].buf[byte]);
} else {
/* write bytes */
if (i2c_debug >= 2)
for (byte = 0; byte < msgs[i].len; byte++)
- printk(" %02x", msgs[i].buf[byte]);
+ printk(KERN_CONT " %02x", msgs[i].buf[byte]);
rc = tm6000_i2c_send_regs(dev, addr, msgs[i].buf[0],
msgs[i].buf + 1, msgs[i].len - 1);
-
- if (addr == dev->tuner_addr << 1) {
- tm6000_set_reg(dev, REQ_50_SET_START, 0, 0);
- tm6000_set_reg(dev, REQ_51_SET_STOP, 0, 0);
- }
}
if (i2c_debug >= 2)
- printk("\n");
+ printk(KERN_CONT "\n");
if (rc < 0)
goto err;
}
@@ -264,7 +259,7 @@ static int tm6000_i2c_eeprom(struct tm6000_core *dev)
p++;
if (0 == (i % 16))
printk(KERN_INFO "%s: i2c eeprom %02x:", dev->name, i);
- printk(" %02x", dev->eedata[i]);
+ printk(KERN_CONT " %02x", dev->eedata[i]);
if ((dev->eedata[i] >= ' ') && (dev->eedata[i] <= 'z'))
bytes[i%16] = dev->eedata[i];
else
@@ -274,14 +269,14 @@ static int tm6000_i2c_eeprom(struct tm6000_core *dev)
if (0 == (i % 16)) {
bytes[16] = '\0';
- printk(" %s\n", bytes);
+ printk(KERN_CONT " %s\n", bytes);
}
}
if (0 != (i%16)) {
bytes[i%16] = '\0';
for (i %= 16; i < 16; i++)
- printk(" ");
- printk(" %s\n", bytes);
+ printk(KERN_CONT " ");
+ printk(KERN_CONT " %s\n", bytes);
}
return 0;
diff --git a/drivers/staging/tm6000/tm6000-input.c b/drivers/media/video/tm6000/tm6000-input.c
index 70a2c5f557c..405d12729d0 100644
--- a/drivers/staging/tm6000/tm6000-input.c
+++ b/drivers/media/video/tm6000/tm6000-input.c
@@ -284,7 +284,7 @@ static void tm6000_ir_stop(struct rc_dev *rc)
cancel_delayed_work_sync(&ir->work);
}
-int tm6000_ir_change_protocol(struct rc_dev *rc, u64 rc_type)
+static int tm6000_ir_change_protocol(struct rc_dev *rc, u64 rc_type)
{
struct tm6000_IR *ir = rc->priv;
diff --git a/drivers/staging/tm6000/tm6000-regs.h b/drivers/media/video/tm6000/tm6000-regs.h
index 5375a834737..7f491b6de93 100644
--- a/drivers/staging/tm6000/tm6000-regs.h
+++ b/drivers/media/video/tm6000/tm6000-regs.h
@@ -90,7 +90,7 @@
*/
enum {
- TM6000_URB_MSG_VIDEO=1,
+ TM6000_URB_MSG_VIDEO = 1,
TM6000_URB_MSG_AUDIO,
TM6000_URB_MSG_VBI,
TM6000_URB_MSG_PTS,
@@ -270,7 +270,9 @@ enum {
#define TM6010_REQ07_RCA_VEND0 0x07, 0xca
#define TM6010_REQ07_RCB_DELAY 0x07, 0xcb
/* ONLY for TM6010 */
-#define TM6010_REQ07_RCC_ACTIVE_VIDEO_IF 0x07, 0xcc
+#define TM6010_REQ07_RCC_ACTIVE_IF 0x07, 0xcc
+#define TM6010_REQ07_RCC_ACTIVE_IF_VIDEO_ENABLE (1 << 5)
+#define TM6010_REQ07_RCC_ACTIVE_IF_AUDIO_ENABLE (1 << 6)
#define TM6010_REQ07_RD0_USB_PERIPHERY_CONTROL 0x07, 0xd0
#define TM6010_REQ07_RD1_ADDR_FOR_REQ1 0x07, 0xd1
#define TM6010_REQ07_RD2_ADDR_FOR_REQ2 0x07, 0xd2
diff --git a/drivers/media/video/tm6000/tm6000-stds.c b/drivers/media/video/tm6000/tm6000-stds.c
new file mode 100644
index 00000000000..9a4145dc3d8
--- /dev/null
+++ b/drivers/media/video/tm6000/tm6000-stds.c
@@ -0,0 +1,659 @@
+/*
+ * tm6000-stds.c - driver for TM5600/TM6000/TM6010 USB video capture devices
+ *
+ * Copyright (C) 2007 Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include "tm6000.h"
+#include "tm6000-regs.h"
+
+static unsigned int tm6010_a_mode;
+module_param(tm6010_a_mode, int, 0644);
+MODULE_PARM_DESC(tm6010_a_mode, "set tm6010 sif audio mode");
+
+struct tm6000_reg_settings {
+ unsigned char req;
+ unsigned char reg;
+ unsigned char value;
+};
+
+
+struct tm6000_std_settings {
+ v4l2_std_id id;
+ struct tm6000_reg_settings *common;
+};
+
+static struct tm6000_reg_settings composite_pal_m[] = {
+ { TM6010_REQ07_R3F_RESET, 0x01 },
+ { TM6010_REQ07_R00_VIDEO_CONTROL0, 0x04 },
+ { TM6010_REQ07_R01_VIDEO_CONTROL1, 0x0e },
+ { TM6010_REQ07_R02_VIDEO_CONTROL2, 0x5f },
+ { TM6010_REQ07_R03_YC_SEP_CONTROL, 0x00 },
+ { TM6010_REQ07_R07_OUTPUT_CONTROL, 0x31 },
+ { TM6010_REQ07_R18_CHROMA_DTO_INCREMENT3, 0x1e },
+ { TM6010_REQ07_R19_CHROMA_DTO_INCREMENT2, 0x83 },
+ { TM6010_REQ07_R1A_CHROMA_DTO_INCREMENT1, 0x0a },
+ { TM6010_REQ07_R1B_CHROMA_DTO_INCREMENT0, 0xe0 },
+ { TM6010_REQ07_R1C_HSYNC_DTO_INCREMENT3, 0x1c },
+ { TM6010_REQ07_R1D_HSYNC_DTO_INCREMENT2, 0xcc },
+ { TM6010_REQ07_R1E_HSYNC_DTO_INCREMENT1, 0xcc },
+ { TM6010_REQ07_R1F_HSYNC_DTO_INCREMENT0, 0xcd },
+ { TM6010_REQ07_R2E_ACTIVE_VIDEO_HSTART, 0x88 },
+ { TM6010_REQ07_R30_ACTIVE_VIDEO_VSTART, 0x20 },
+ { TM6010_REQ07_R31_ACTIVE_VIDEO_VHIGHT, 0x61 },
+ { TM6010_REQ07_R33_VSYNC_HLOCK_MAX, 0x0c },
+ { TM6010_REQ07_R35_VSYNC_AGC_MAX, 0x1c },
+ { TM6010_REQ07_R82_COMB_FILTER_CONFIG, 0x52 },
+ { TM6010_REQ07_R83_CHROMA_LOCK_CONFIG, 0x6f },
+ { TM6010_REQ07_R04_LUMA_HAGC_CONTROL, 0xdc },
+ { TM6010_REQ07_R0D_CHROMA_KILL_LEVEL, 0x07 },
+ { TM6010_REQ07_R3F_RESET, 0x00 },
+ { 0, 0, 0 }
+};
+
+static struct tm6000_reg_settings composite_pal_nc[] = {
+ { TM6010_REQ07_R3F_RESET, 0x01 },
+ { TM6010_REQ07_R00_VIDEO_CONTROL0, 0x36 },
+ { TM6010_REQ07_R01_VIDEO_CONTROL1, 0x0e },
+ { TM6010_REQ07_R02_VIDEO_CONTROL2, 0x5f },
+ { TM6010_REQ07_R03_YC_SEP_CONTROL, 0x02 },
+ { TM6010_REQ07_R07_OUTPUT_CONTROL, 0x31 },
+ { TM6010_REQ07_R18_CHROMA_DTO_INCREMENT3, 0x1e },
+ { TM6010_REQ07_R19_CHROMA_DTO_INCREMENT2, 0x91 },
+ { TM6010_REQ07_R1A_CHROMA_DTO_INCREMENT1, 0x1f },
+ { TM6010_REQ07_R1B_CHROMA_DTO_INCREMENT0, 0x0c },
+ { TM6010_REQ07_R1C_HSYNC_DTO_INCREMENT3, 0x1c },
+ { TM6010_REQ07_R1D_HSYNC_DTO_INCREMENT2, 0xcc },
+ { TM6010_REQ07_R1E_HSYNC_DTO_INCREMENT1, 0xcc },
+ { TM6010_REQ07_R1F_HSYNC_DTO_INCREMENT0, 0xcd },
+ { TM6010_REQ07_R2E_ACTIVE_VIDEO_HSTART, 0x8c },
+ { TM6010_REQ07_R30_ACTIVE_VIDEO_VSTART, 0x2c },
+ { TM6010_REQ07_R31_ACTIVE_VIDEO_VHIGHT, 0xc1 },
+ { TM6010_REQ07_R33_VSYNC_HLOCK_MAX, 0x0c },
+ { TM6010_REQ07_R35_VSYNC_AGC_MAX, 0x1c },
+ { TM6010_REQ07_R82_COMB_FILTER_CONFIG, 0x52 },
+ { TM6010_REQ07_R83_CHROMA_LOCK_CONFIG, 0x6f },
+ { TM6010_REQ07_R04_LUMA_HAGC_CONTROL, 0xdc },
+ { TM6010_REQ07_R0D_CHROMA_KILL_LEVEL, 0x07 },
+ { TM6010_REQ07_R3F_RESET, 0x00 },
+ { 0, 0, 0 }
+};
+
+static struct tm6000_reg_settings composite_pal[] = {
+ { TM6010_REQ07_R3F_RESET, 0x01 },
+ { TM6010_REQ07_R00_VIDEO_CONTROL0, 0x32 },
+ { TM6010_REQ07_R01_VIDEO_CONTROL1, 0x0e },
+ { TM6010_REQ07_R02_VIDEO_CONTROL2, 0x5f },
+ { TM6010_REQ07_R03_YC_SEP_CONTROL, 0x02 },
+ { TM6010_REQ07_R07_OUTPUT_CONTROL, 0x31 },
+ { TM6010_REQ07_R18_CHROMA_DTO_INCREMENT3, 0x25 },
+ { TM6010_REQ07_R19_CHROMA_DTO_INCREMENT2, 0xd5 },
+ { TM6010_REQ07_R1A_CHROMA_DTO_INCREMENT1, 0x63 },
+ { TM6010_REQ07_R1B_CHROMA_DTO_INCREMENT0, 0x50 },
+ { TM6010_REQ07_R1C_HSYNC_DTO_INCREMENT3, 0x1c },
+ { TM6010_REQ07_R1D_HSYNC_DTO_INCREMENT2, 0xcc },
+ { TM6010_REQ07_R1E_HSYNC_DTO_INCREMENT1, 0xcc },
+ { TM6010_REQ07_R1F_HSYNC_DTO_INCREMENT0, 0xcd },
+ { TM6010_REQ07_R2E_ACTIVE_VIDEO_HSTART, 0x8c },
+ { TM6010_REQ07_R30_ACTIVE_VIDEO_VSTART, 0x2c },
+ { TM6010_REQ07_R31_ACTIVE_VIDEO_VHIGHT, 0xc1 },
+ { TM6010_REQ07_R33_VSYNC_HLOCK_MAX, 0x0c },
+ { TM6010_REQ07_R35_VSYNC_AGC_MAX, 0x1c },
+ { TM6010_REQ07_R82_COMB_FILTER_CONFIG, 0x52 },
+ { TM6010_REQ07_R83_CHROMA_LOCK_CONFIG, 0x6f },
+ { TM6010_REQ07_R04_LUMA_HAGC_CONTROL, 0xdc },
+ { TM6010_REQ07_R0D_CHROMA_KILL_LEVEL, 0x07 },
+ { TM6010_REQ07_R3F_RESET, 0x00 },
+ { 0, 0, 0 }
+};
+
+static struct tm6000_reg_settings composite_secam[] = {
+ { TM6010_REQ07_R3F_RESET, 0x01 },
+ { TM6010_REQ07_R00_VIDEO_CONTROL0, 0x38 },
+ { TM6010_REQ07_R01_VIDEO_CONTROL1, 0x0e },
+ { TM6010_REQ07_R02_VIDEO_CONTROL2, 0x5f },
+ { TM6010_REQ07_R03_YC_SEP_CONTROL, 0x02 },
+ { TM6010_REQ07_R07_OUTPUT_CONTROL, 0x31 },
+ { TM6010_REQ07_R18_CHROMA_DTO_INCREMENT3, 0x24 },
+ { TM6010_REQ07_R19_CHROMA_DTO_INCREMENT2, 0x92 },
+ { TM6010_REQ07_R1A_CHROMA_DTO_INCREMENT1, 0xe8 },
+ { TM6010_REQ07_R1B_CHROMA_DTO_INCREMENT0, 0xed },
+ { TM6010_REQ07_R1C_HSYNC_DTO_INCREMENT3, 0x1c },
+ { TM6010_REQ07_R1D_HSYNC_DTO_INCREMENT2, 0xcc },
+ { TM6010_REQ07_R1E_HSYNC_DTO_INCREMENT1, 0xcc },
+ { TM6010_REQ07_R1F_HSYNC_DTO_INCREMENT0, 0xcd },
+ { TM6010_REQ07_R2E_ACTIVE_VIDEO_HSTART, 0x8c },
+ { TM6010_REQ07_R30_ACTIVE_VIDEO_VSTART, 0x2c },
+ { TM6010_REQ07_R31_ACTIVE_VIDEO_VHIGHT, 0xc1 },
+ { TM6010_REQ07_R33_VSYNC_HLOCK_MAX, 0x2c },
+ { TM6010_REQ07_R35_VSYNC_AGC_MAX, 0x18 },
+ { TM6010_REQ07_R82_COMB_FILTER_CONFIG, 0x42 },
+ { TM6010_REQ07_R83_CHROMA_LOCK_CONFIG, 0xff },
+ { TM6010_REQ07_R0D_CHROMA_KILL_LEVEL, 0x07 },
+ { TM6010_REQ07_R3F_RESET, 0x00 },
+ { 0, 0, 0 }
+};
+
+static struct tm6000_reg_settings composite_ntsc[] = {
+ { TM6010_REQ07_R3F_RESET, 0x01 },
+ { TM6010_REQ07_R00_VIDEO_CONTROL0, 0x00 },
+ { TM6010_REQ07_R01_VIDEO_CONTROL1, 0x0f },
+ { TM6010_REQ07_R02_VIDEO_CONTROL2, 0x5f },
+ { TM6010_REQ07_R03_YC_SEP_CONTROL, 0x00 },
+ { TM6010_REQ07_R07_OUTPUT_CONTROL, 0x31 },
+ { TM6010_REQ07_R18_CHROMA_DTO_INCREMENT3, 0x1e },
+ { TM6010_REQ07_R19_CHROMA_DTO_INCREMENT2, 0x8b },
+ { TM6010_REQ07_R1A_CHROMA_DTO_INCREMENT1, 0xa2 },
+ { TM6010_REQ07_R1B_CHROMA_DTO_INCREMENT0, 0xe9 },
+ { TM6010_REQ07_R1C_HSYNC_DTO_INCREMENT3, 0x1c },
+ { TM6010_REQ07_R1D_HSYNC_DTO_INCREMENT2, 0xcc },
+ { TM6010_REQ07_R1E_HSYNC_DTO_INCREMENT1, 0xcc },
+ { TM6010_REQ07_R1F_HSYNC_DTO_INCREMENT0, 0xcd },
+ { TM6010_REQ07_R2E_ACTIVE_VIDEO_HSTART, 0x88 },
+ { TM6010_REQ07_R30_ACTIVE_VIDEO_VSTART, 0x22 },
+ { TM6010_REQ07_R31_ACTIVE_VIDEO_VHIGHT, 0x61 },
+ { TM6010_REQ07_R33_VSYNC_HLOCK_MAX, 0x1c },
+ { TM6010_REQ07_R35_VSYNC_AGC_MAX, 0x1c },
+ { TM6010_REQ07_R82_COMB_FILTER_CONFIG, 0x42 },
+ { TM6010_REQ07_R83_CHROMA_LOCK_CONFIG, 0x6f },
+ { TM6010_REQ07_R04_LUMA_HAGC_CONTROL, 0xdd },
+ { TM6010_REQ07_R0D_CHROMA_KILL_LEVEL, 0x07 },
+ { TM6010_REQ07_R3F_RESET, 0x00 },
+ { 0, 0, 0 }
+};
+
+static struct tm6000_std_settings composite_stds[] = {
+ { .id = V4L2_STD_PAL_M, .common = composite_pal_m, },
+ { .id = V4L2_STD_PAL_Nc, .common = composite_pal_nc, },
+ { .id = V4L2_STD_PAL, .common = composite_pal, },
+ { .id = V4L2_STD_SECAM, .common = composite_secam, },
+ { .id = V4L2_STD_NTSC, .common = composite_ntsc, },
+};
+
+static struct tm6000_reg_settings svideo_pal_m[] = {
+ { TM6010_REQ07_R3F_RESET, 0x01 },
+ { TM6010_REQ07_R00_VIDEO_CONTROL0, 0x05 },
+ { TM6010_REQ07_R01_VIDEO_CONTROL1, 0x0e },
+ { TM6010_REQ07_R02_VIDEO_CONTROL2, 0x5f },
+ { TM6010_REQ07_R03_YC_SEP_CONTROL, 0x04 },
+ { TM6010_REQ07_R07_OUTPUT_CONTROL, 0x31 },
+ { TM6010_REQ07_R18_CHROMA_DTO_INCREMENT3, 0x1e },
+ { TM6010_REQ07_R19_CHROMA_DTO_INCREMENT2, 0x83 },
+ { TM6010_REQ07_R1A_CHROMA_DTO_INCREMENT1, 0x0a },
+ { TM6010_REQ07_R1B_CHROMA_DTO_INCREMENT0, 0xe0 },
+ { TM6010_REQ07_R1C_HSYNC_DTO_INCREMENT3, 0x1c },
+ { TM6010_REQ07_R1D_HSYNC_DTO_INCREMENT2, 0xcc },
+ { TM6010_REQ07_R1E_HSYNC_DTO_INCREMENT1, 0xcc },
+ { TM6010_REQ07_R1F_HSYNC_DTO_INCREMENT0, 0xcd },
+ { TM6010_REQ07_R2E_ACTIVE_VIDEO_HSTART, 0x88 },
+ { TM6010_REQ07_R30_ACTIVE_VIDEO_VSTART, 0x22 },
+ { TM6010_REQ07_R31_ACTIVE_VIDEO_VHIGHT, 0x61 },
+ { TM6010_REQ07_R33_VSYNC_HLOCK_MAX, 0x0c },
+ { TM6010_REQ07_R35_VSYNC_AGC_MAX, 0x1c },
+ { TM6010_REQ07_R82_COMB_FILTER_CONFIG, 0x52 },
+ { TM6010_REQ07_R83_CHROMA_LOCK_CONFIG, 0x6f },
+ { TM6010_REQ07_R04_LUMA_HAGC_CONTROL, 0xdc },
+ { TM6010_REQ07_R0D_CHROMA_KILL_LEVEL, 0x07 },
+ { TM6010_REQ07_R3F_RESET, 0x00 },
+ { 0, 0, 0 }
+};
+
+static struct tm6000_reg_settings svideo_pal_nc[] = {
+ { TM6010_REQ07_R3F_RESET, 0x01 },
+ { TM6010_REQ07_R00_VIDEO_CONTROL0, 0x37 },
+ { TM6010_REQ07_R01_VIDEO_CONTROL1, 0x0e },
+ { TM6010_REQ07_R02_VIDEO_CONTROL2, 0x5f },
+ { TM6010_REQ07_R03_YC_SEP_CONTROL, 0x04 },
+ { TM6010_REQ07_R07_OUTPUT_CONTROL, 0x31 },
+ { TM6010_REQ07_R18_CHROMA_DTO_INCREMENT3, 0x1e },
+ { TM6010_REQ07_R19_CHROMA_DTO_INCREMENT2, 0x91 },
+ { TM6010_REQ07_R1A_CHROMA_DTO_INCREMENT1, 0x1f },
+ { TM6010_REQ07_R1B_CHROMA_DTO_INCREMENT0, 0x0c },
+ { TM6010_REQ07_R1C_HSYNC_DTO_INCREMENT3, 0x1c },
+ { TM6010_REQ07_R1D_HSYNC_DTO_INCREMENT2, 0xcc },
+ { TM6010_REQ07_R1E_HSYNC_DTO_INCREMENT1, 0xcc },
+ { TM6010_REQ07_R1F_HSYNC_DTO_INCREMENT0, 0xcd },
+ { TM6010_REQ07_R2E_ACTIVE_VIDEO_HSTART, 0x88 },
+ { TM6010_REQ07_R30_ACTIVE_VIDEO_VSTART, 0x22 },
+ { TM6010_REQ07_R31_ACTIVE_VIDEO_VHIGHT, 0xc1 },
+ { TM6010_REQ07_R33_VSYNC_HLOCK_MAX, 0x0c },
+ { TM6010_REQ07_R35_VSYNC_AGC_MAX, 0x1c },
+ { TM6010_REQ07_R82_COMB_FILTER_CONFIG, 0x52 },
+ { TM6010_REQ07_R83_CHROMA_LOCK_CONFIG, 0x6f },
+ { TM6010_REQ07_R04_LUMA_HAGC_CONTROL, 0xdc },
+ { TM6010_REQ07_R0D_CHROMA_KILL_LEVEL, 0x07 },
+ { TM6010_REQ07_R3F_RESET, 0x00 },
+ { 0, 0, 0 }
+};
+
+static struct tm6000_reg_settings svideo_pal[] = {
+ { TM6010_REQ07_R3F_RESET, 0x01 },
+ { TM6010_REQ07_R00_VIDEO_CONTROL0, 0x33 },
+ { TM6010_REQ07_R01_VIDEO_CONTROL1, 0x0e },
+ { TM6010_REQ07_R02_VIDEO_CONTROL2, 0x5f },
+ { TM6010_REQ07_R03_YC_SEP_CONTROL, 0x04 },
+ { TM6010_REQ07_R07_OUTPUT_CONTROL, 0x30 },
+ { TM6010_REQ07_R18_CHROMA_DTO_INCREMENT3, 0x25 },
+ { TM6010_REQ07_R19_CHROMA_DTO_INCREMENT2, 0xd5 },
+ { TM6010_REQ07_R1A_CHROMA_DTO_INCREMENT1, 0x63 },
+ { TM6010_REQ07_R1B_CHROMA_DTO_INCREMENT0, 0x50 },
+ { TM6010_REQ07_R1C_HSYNC_DTO_INCREMENT3, 0x1c },
+ { TM6010_REQ07_R1D_HSYNC_DTO_INCREMENT2, 0xcc },
+ { TM6010_REQ07_R1E_HSYNC_DTO_INCREMENT1, 0xcc },
+ { TM6010_REQ07_R1F_HSYNC_DTO_INCREMENT0, 0xcd },
+ { TM6010_REQ07_R2E_ACTIVE_VIDEO_HSTART, 0x8c },
+ { TM6010_REQ07_R30_ACTIVE_VIDEO_VSTART, 0x2a },
+ { TM6010_REQ07_R31_ACTIVE_VIDEO_VHIGHT, 0xc1 },
+ { TM6010_REQ07_R33_VSYNC_HLOCK_MAX, 0x0c },
+ { TM6010_REQ07_R35_VSYNC_AGC_MAX, 0x1c },
+ { TM6010_REQ07_R82_COMB_FILTER_CONFIG, 0x52 },
+ { TM6010_REQ07_R83_CHROMA_LOCK_CONFIG, 0x6f },
+ { TM6010_REQ07_R04_LUMA_HAGC_CONTROL, 0xdc },
+ { TM6010_REQ07_R0D_CHROMA_KILL_LEVEL, 0x07 },
+ { TM6010_REQ07_R3F_RESET, 0x00 },
+ { 0, 0, 0 }
+};
+
+static struct tm6000_reg_settings svideo_secam[] = {
+ { TM6010_REQ07_R3F_RESET, 0x01 },
+ { TM6010_REQ07_R00_VIDEO_CONTROL0, 0x39 },
+ { TM6010_REQ07_R01_VIDEO_CONTROL1, 0x0e },
+ { TM6010_REQ07_R02_VIDEO_CONTROL2, 0x5f },
+ { TM6010_REQ07_R03_YC_SEP_CONTROL, 0x03 },
+ { TM6010_REQ07_R07_OUTPUT_CONTROL, 0x31 },
+ { TM6010_REQ07_R18_CHROMA_DTO_INCREMENT3, 0x24 },
+ { TM6010_REQ07_R19_CHROMA_DTO_INCREMENT2, 0x92 },
+ { TM6010_REQ07_R1A_CHROMA_DTO_INCREMENT1, 0xe8 },
+ { TM6010_REQ07_R1B_CHROMA_DTO_INCREMENT0, 0xed },
+ { TM6010_REQ07_R1C_HSYNC_DTO_INCREMENT3, 0x1c },
+ { TM6010_REQ07_R1D_HSYNC_DTO_INCREMENT2, 0xcc },
+ { TM6010_REQ07_R1E_HSYNC_DTO_INCREMENT1, 0xcc },
+ { TM6010_REQ07_R1F_HSYNC_DTO_INCREMENT0, 0xcd },
+ { TM6010_REQ07_R2E_ACTIVE_VIDEO_HSTART, 0x8c },
+ { TM6010_REQ07_R30_ACTIVE_VIDEO_VSTART, 0x2a },
+ { TM6010_REQ07_R31_ACTIVE_VIDEO_VHIGHT, 0xc1 },
+ { TM6010_REQ07_R33_VSYNC_HLOCK_MAX, 0x2c },
+ { TM6010_REQ07_R35_VSYNC_AGC_MAX, 0x18 },
+ { TM6010_REQ07_R82_COMB_FILTER_CONFIG, 0x42 },
+ { TM6010_REQ07_R83_CHROMA_LOCK_CONFIG, 0xff },
+ { TM6010_REQ07_R0D_CHROMA_KILL_LEVEL, 0x07 },
+ { TM6010_REQ07_R3F_RESET, 0x00 },
+ { 0, 0, 0 }
+};
+
+static struct tm6000_reg_settings svideo_ntsc[] = {
+ { TM6010_REQ07_R3F_RESET, 0x01 },
+ { TM6010_REQ07_R00_VIDEO_CONTROL0, 0x01 },
+ { TM6010_REQ07_R01_VIDEO_CONTROL1, 0x0f },
+ { TM6010_REQ07_R02_VIDEO_CONTROL2, 0x5f },
+ { TM6010_REQ07_R03_YC_SEP_CONTROL, 0x03 },
+ { TM6010_REQ07_R07_OUTPUT_CONTROL, 0x30 },
+ { TM6010_REQ07_R17_HLOOP_MAXSTATE, 0x8b },
+ { TM6010_REQ07_R18_CHROMA_DTO_INCREMENT3, 0x1e },
+ { TM6010_REQ07_R19_CHROMA_DTO_INCREMENT2, 0x8b },
+ { TM6010_REQ07_R1A_CHROMA_DTO_INCREMENT1, 0xa2 },
+ { TM6010_REQ07_R1B_CHROMA_DTO_INCREMENT0, 0xe9 },
+ { TM6010_REQ07_R1C_HSYNC_DTO_INCREMENT3, 0x1c },
+ { TM6010_REQ07_R1D_HSYNC_DTO_INCREMENT2, 0xcc },
+ { TM6010_REQ07_R1E_HSYNC_DTO_INCREMENT1, 0xcc },
+ { TM6010_REQ07_R1F_HSYNC_DTO_INCREMENT0, 0xcd },
+ { TM6010_REQ07_R2E_ACTIVE_VIDEO_HSTART, 0x88 },
+ { TM6010_REQ07_R30_ACTIVE_VIDEO_VSTART, 0x22 },
+ { TM6010_REQ07_R31_ACTIVE_VIDEO_VHIGHT, 0x61 },
+ { TM6010_REQ07_R33_VSYNC_HLOCK_MAX, 0x1c },
+ { TM6010_REQ07_R35_VSYNC_AGC_MAX, 0x1c },
+ { TM6010_REQ07_R82_COMB_FILTER_CONFIG, 0x42 },
+ { TM6010_REQ07_R83_CHROMA_LOCK_CONFIG, 0x6f },
+ { TM6010_REQ07_R04_LUMA_HAGC_CONTROL, 0xdd },
+ { TM6010_REQ07_R0D_CHROMA_KILL_LEVEL, 0x07 },
+ { TM6010_REQ07_R3F_RESET, 0x00 },
+ { 0, 0, 0 }
+};
+
+static struct tm6000_std_settings svideo_stds[] = {
+ { .id = V4L2_STD_PAL_M, .common = svideo_pal_m, },
+ { .id = V4L2_STD_PAL_Nc, .common = svideo_pal_nc, },
+ { .id = V4L2_STD_PAL, .common = svideo_pal, },
+ { .id = V4L2_STD_SECAM, .common = svideo_secam, },
+ { .id = V4L2_STD_NTSC, .common = svideo_ntsc, },
+};
+
+static int tm6000_set_audio_std(struct tm6000_core *dev)
+{
+ uint8_t areg_02 = 0x04; /* GC1 Fixed gain 0dB */
+ uint8_t areg_05 = 0x01; /* Auto 4.5 = M Japan, Auto 6.5 = DK */
+ uint8_t areg_06 = 0x02; /* Auto de-emphasis, mannual channel mode */
+ uint8_t nicam_flag = 0; /* No NICAM */
+
+ if (dev->radio) {
+ tm6000_set_reg(dev, TM6010_REQ08_R01_A_INIT, 0x00);
+ tm6000_set_reg(dev, TM6010_REQ08_R02_A_FIX_GAIN_CTRL, 0x04);
+ tm6000_set_reg(dev, TM6010_REQ08_R03_A_AUTO_GAIN_CTRL, 0x00);
+ tm6000_set_reg(dev, TM6010_REQ08_R04_A_SIF_AMP_CTRL, 0x80);
+ tm6000_set_reg(dev, TM6010_REQ08_R05_A_STANDARD_MOD, 0x0c);
+ /* set mono or stereo */
+ if (dev->amode == V4L2_TUNER_MODE_MONO)
+ tm6000_set_reg(dev, TM6010_REQ08_R06_A_SOUND_MOD, 0x00);
+ else if (dev->amode == V4L2_TUNER_MODE_STEREO)
+ tm6000_set_reg(dev, TM6010_REQ08_R06_A_SOUND_MOD, 0x02);
+ tm6000_set_reg(dev, TM6010_REQ08_R09_A_MAIN_VOL, 0x18);
+ tm6000_set_reg(dev, TM6010_REQ08_R0C_A_ASD_THRES2, 0x0a);
+ tm6000_set_reg(dev, TM6010_REQ08_R0D_A_AMD_THRES, 0x40);
+ tm6000_set_reg(dev, TM6010_REQ08_RF1_AADC_POWER_DOWN, 0xfe);
+ tm6000_set_reg(dev, TM6010_REQ08_R1E_A_GAIN_DEEMPH_OUT, 0x13);
+ tm6000_set_reg(dev, TM6010_REQ08_R01_A_INIT, 0x80);
+ tm6000_set_reg(dev, TM6010_REQ07_RFE_POWER_DOWN, 0xff);
+ return 0;
+ }
+
+ switch (tm6010_a_mode) {
+ /* auto */
+ case 0:
+ switch (dev->norm) {
+ case V4L2_STD_NTSC_M_KR:
+ areg_05 |= 0x00;
+ break;
+ case V4L2_STD_NTSC_M_JP:
+ areg_05 |= 0x40;
+ break;
+ case V4L2_STD_NTSC_M:
+ case V4L2_STD_PAL_M:
+ case V4L2_STD_PAL_N:
+ areg_05 |= 0x20;
+ break;
+ case V4L2_STD_PAL_Nc:
+ areg_05 |= 0x60;
+ break;
+ case V4L2_STD_SECAM_L:
+ areg_05 |= 0x00;
+ break;
+ case V4L2_STD_DK:
+ areg_05 |= 0x10;
+ break;
+ }
+ break;
+ /* A2 */
+ case 1:
+ switch (dev->norm) {
+ case V4L2_STD_B:
+ case V4L2_STD_GH:
+ areg_05 = 0x05;
+ break;
+ case V4L2_STD_DK:
+ areg_05 = 0x09;
+ break;
+ }
+ break;
+ /* NICAM */
+ case 2:
+ switch (dev->norm) {
+ case V4L2_STD_B:
+ case V4L2_STD_GH:
+ areg_05 = 0x07;
+ break;
+ case V4L2_STD_DK:
+ areg_05 = 0x06;
+ break;
+ case V4L2_STD_PAL_I:
+ areg_05 = 0x08;
+ break;
+ case V4L2_STD_SECAM_L:
+ areg_05 = 0x0a;
+ areg_02 = 0x02;
+ break;
+ }
+ nicam_flag = 1;
+ break;
+ /* other */
+ case 3:
+ switch (dev->norm) {
+ /* DK3_A2 */
+ case V4L2_STD_DK:
+ areg_05 = 0x0b;
+ break;
+ /* Korea */
+ case V4L2_STD_NTSC_M_KR:
+ areg_05 = 0x04;
+ break;
+ /* EIAJ */
+ case V4L2_STD_NTSC_M_JP:
+ areg_05 = 0x03;
+ break;
+ default:
+ areg_05 = 0x02;
+ break;
+ }
+ break;
+ }
+
+ tm6000_set_reg(dev, TM6010_REQ08_R01_A_INIT, 0x00);
+ tm6000_set_reg(dev, TM6010_REQ08_R02_A_FIX_GAIN_CTRL, areg_02);
+ tm6000_set_reg(dev, TM6010_REQ08_R03_A_AUTO_GAIN_CTRL, 0x00);
+ tm6000_set_reg(dev, TM6010_REQ08_R04_A_SIF_AMP_CTRL, 0xa0);
+ tm6000_set_reg(dev, TM6010_REQ08_R05_A_STANDARD_MOD, areg_05);
+ tm6000_set_reg(dev, TM6010_REQ08_R06_A_SOUND_MOD, areg_06);
+ tm6000_set_reg(dev, TM6010_REQ08_R07_A_LEFT_VOL, 0x00);
+ tm6000_set_reg(dev, TM6010_REQ08_R08_A_RIGHT_VOL, 0x00);
+ tm6000_set_reg(dev, TM6010_REQ08_R09_A_MAIN_VOL, 0x08);
+ tm6000_set_reg(dev, TM6010_REQ08_R0A_A_I2S_MOD, 0x91);
+ tm6000_set_reg(dev, TM6010_REQ08_R0B_A_ASD_THRES1, 0x20);
+ tm6000_set_reg(dev, TM6010_REQ08_R0C_A_ASD_THRES2, 0x12);
+ tm6000_set_reg(dev, TM6010_REQ08_R0D_A_AMD_THRES, 0x20);
+ tm6000_set_reg(dev, TM6010_REQ08_R0E_A_MONO_THRES1, 0xf0);
+ tm6000_set_reg(dev, TM6010_REQ08_R0F_A_MONO_THRES2, 0x80);
+ tm6000_set_reg(dev, TM6010_REQ08_R10_A_MUTE_THRES1, 0xc0);
+ tm6000_set_reg(dev, TM6010_REQ08_R11_A_MUTE_THRES2, 0x80);
+ tm6000_set_reg(dev, TM6010_REQ08_R12_A_AGC_U, 0x12);
+ tm6000_set_reg(dev, TM6010_REQ08_R13_A_AGC_ERR_T, 0xfe);
+ tm6000_set_reg(dev, TM6010_REQ08_R14_A_AGC_GAIN_INIT, 0x20);
+ tm6000_set_reg(dev, TM6010_REQ08_R15_A_AGC_STEP_THR, 0x14);
+ tm6000_set_reg(dev, TM6010_REQ08_R16_A_AGC_GAIN_MAX, 0xfe);
+ tm6000_set_reg(dev, TM6010_REQ08_R17_A_AGC_GAIN_MIN, 0x01);
+ tm6000_set_reg(dev, TM6010_REQ08_R18_A_TR_CTRL, 0xa0);
+ tm6000_set_reg(dev, TM6010_REQ08_R19_A_FH_2FH_GAIN, 0x32);
+ tm6000_set_reg(dev, TM6010_REQ08_R1A_A_NICAM_SER_MAX, 0x64);
+ tm6000_set_reg(dev, TM6010_REQ08_R1B_A_NICAM_SER_MIN, 0x20);
+ tm6000_set_reg(dev, REQ_08_SET_GET_AVREG_BIT, 0x1c, 0x00);
+ tm6000_set_reg(dev, REQ_08_SET_GET_AVREG_BIT, 0x1d, 0x00);
+ tm6000_set_reg(dev, TM6010_REQ08_R1E_A_GAIN_DEEMPH_OUT, 0x13);
+ tm6000_set_reg(dev, TM6010_REQ08_R1F_A_TEST_INTF_SEL, 0x00);
+ tm6000_set_reg(dev, TM6010_REQ08_R20_A_TEST_PIN_SEL, 0x00);
+ tm6000_set_reg(dev, TM6010_REQ08_R01_A_INIT, 0x80);
+
+ return 0;
+}
+
+void tm6000_get_std_res(struct tm6000_core *dev)
+{
+ /* Currently, those are the only supported resoltions */
+ if (dev->norm & V4L2_STD_525_60)
+ dev->height = 480;
+ else
+ dev->height = 576;
+
+ dev->width = 720;
+}
+
+static int tm6000_load_std(struct tm6000_core *dev, struct tm6000_reg_settings *set)
+{
+ int i, rc;
+
+ /* Load board's initialization table */
+ for (i = 0; set[i].req; i++) {
+ rc = tm6000_set_reg(dev, set[i].req, set[i].reg, set[i].value);
+ if (rc < 0) {
+ printk(KERN_ERR "Error %i while setting "
+ "req %d, reg %d to value %d\n",
+ rc, set[i].req, set[i].reg, set[i].value);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+int tm6000_set_standard(struct tm6000_core *dev)
+{
+ struct tm6000_input *input;
+ int i, rc = 0;
+ u8 reg_07_fe = 0x8a;
+ u8 reg_08_f1 = 0xfc;
+ u8 reg_08_e2 = 0xf0;
+ u8 reg_08_e6 = 0x0f;
+
+ tm6000_get_std_res(dev);
+
+ if (!dev->radio)
+ input = &dev->vinput[dev->input];
+ else
+ input = &dev->rinput;
+
+ if (dev->dev_type == TM6010) {
+ switch (input->vmux) {
+ case TM6000_VMUX_VIDEO_A:
+ tm6000_set_reg(dev, TM6010_REQ08_RE3_ADC_IN1_SEL, 0xf4);
+ tm6000_set_reg(dev, TM6010_REQ08_REA_BUFF_DRV_CTRL, 0xf1);
+ tm6000_set_reg(dev, TM6010_REQ08_REB_SIF_GAIN_CTRL, 0xe0);
+ tm6000_set_reg(dev, TM6010_REQ08_REC_REVERSE_YC_CTRL, 0xc2);
+ tm6000_set_reg(dev, TM6010_REQ08_RED_GAIN_SEL, 0xe8);
+ reg_07_fe |= 0x01;
+ break;
+ case TM6000_VMUX_VIDEO_B:
+ tm6000_set_reg(dev, TM6010_REQ08_RE3_ADC_IN1_SEL, 0xf8);
+ tm6000_set_reg(dev, TM6010_REQ08_REA_BUFF_DRV_CTRL, 0xf1);
+ tm6000_set_reg(dev, TM6010_REQ08_REB_SIF_GAIN_CTRL, 0xe0);
+ tm6000_set_reg(dev, TM6010_REQ08_REC_REVERSE_YC_CTRL, 0xc2);
+ tm6000_set_reg(dev, TM6010_REQ08_RED_GAIN_SEL, 0xe8);
+ reg_07_fe |= 0x01;
+ break;
+ case TM6000_VMUX_VIDEO_AB:
+ tm6000_set_reg(dev, TM6010_REQ08_RE3_ADC_IN1_SEL, 0xfc);
+ tm6000_set_reg(dev, TM6010_REQ08_RE4_ADC_IN2_SEL, 0xf8);
+ reg_08_e6 = 0x00;
+ tm6000_set_reg(dev, TM6010_REQ08_REA_BUFF_DRV_CTRL, 0xf2);
+ tm6000_set_reg(dev, TM6010_REQ08_REB_SIF_GAIN_CTRL, 0xf0);
+ tm6000_set_reg(dev, TM6010_REQ08_REC_REVERSE_YC_CTRL, 0xc2);
+ tm6000_set_reg(dev, TM6010_REQ08_RED_GAIN_SEL, 0xe0);
+ break;
+ default:
+ break;
+ }
+ switch (input->amux) {
+ case TM6000_AMUX_ADC1:
+ tm6000_set_reg_mask(dev, TM6010_REQ08_RF0_DAUDIO_INPUT_CONFIG,
+ 0x00, 0x0f);
+ break;
+ case TM6000_AMUX_ADC2:
+ tm6000_set_reg_mask(dev, TM6010_REQ08_RF0_DAUDIO_INPUT_CONFIG,
+ 0x08, 0x0f);
+ break;
+ case TM6000_AMUX_SIF1:
+ reg_08_e2 |= 0x02;
+ reg_08_e6 = 0x08;
+ reg_07_fe |= 0x40;
+ reg_08_f1 |= 0x02;
+ tm6000_set_reg(dev, TM6010_REQ08_RE4_ADC_IN2_SEL, 0xf3);
+ tm6000_set_reg_mask(dev, TM6010_REQ08_RF0_DAUDIO_INPUT_CONFIG,
+ 0x02, 0x0f);
+ break;
+ case TM6000_AMUX_SIF2:
+ reg_08_e2 |= 0x02;
+ reg_08_e6 = 0x08;
+ reg_07_fe |= 0x40;
+ reg_08_f1 |= 0x02;
+ tm6000_set_reg(dev, TM6010_REQ08_RE4_ADC_IN2_SEL, 0xf7);
+ tm6000_set_reg_mask(dev, TM6010_REQ08_RF0_DAUDIO_INPUT_CONFIG,
+ 0x02, 0x0f);
+ break;
+ default:
+ break;
+ }
+ tm6000_set_reg(dev, TM6010_REQ08_RE2_POWER_DOWN_CTRL1, reg_08_e2);
+ tm6000_set_reg(dev, TM6010_REQ08_RE6_POWER_DOWN_CTRL2, reg_08_e6);
+ tm6000_set_reg(dev, TM6010_REQ08_RF1_AADC_POWER_DOWN, reg_08_f1);
+ tm6000_set_reg(dev, TM6010_REQ07_RFE_POWER_DOWN, reg_07_fe);
+ } else {
+ switch (input->vmux) {
+ case TM6000_VMUX_VIDEO_A:
+ tm6000_set_reg(dev, TM6000_REQ07_RE3_VADC_INP_LPF_SEL1, 0x10);
+ tm6000_set_reg(dev, TM6000_REQ07_RE5_VADC_INP_LPF_SEL2, 0x00);
+ tm6000_set_reg(dev, TM6000_REQ07_RE8_VADC_PWDOWN_CTL, 0x0f);
+ tm6000_set_reg(dev,
+ REQ_03_SET_GET_MCU_PIN, input->v_gpio, 0);
+ break;
+ case TM6000_VMUX_VIDEO_B:
+ tm6000_set_reg(dev, TM6000_REQ07_RE3_VADC_INP_LPF_SEL1, 0x00);
+ tm6000_set_reg(dev, TM6000_REQ07_RE5_VADC_INP_LPF_SEL2, 0x00);
+ tm6000_set_reg(dev, TM6000_REQ07_RE8_VADC_PWDOWN_CTL, 0x0f);
+ tm6000_set_reg(dev,
+ REQ_03_SET_GET_MCU_PIN, input->v_gpio, 0);
+ break;
+ case TM6000_VMUX_VIDEO_AB:
+ tm6000_set_reg(dev, TM6000_REQ07_RE3_VADC_INP_LPF_SEL1, 0x10);
+ tm6000_set_reg(dev, TM6000_REQ07_RE5_VADC_INP_LPF_SEL2, 0x10);
+ tm6000_set_reg(dev, TM6000_REQ07_RE8_VADC_PWDOWN_CTL, 0x00);
+ tm6000_set_reg(dev,
+ REQ_03_SET_GET_MCU_PIN, input->v_gpio, 1);
+ break;
+ default:
+ break;
+ }
+ switch (input->amux) {
+ case TM6000_AMUX_ADC1:
+ tm6000_set_reg_mask(dev,
+ TM6000_REQ07_REB_VADC_AADC_MODE, 0x00, 0x0f);
+ break;
+ case TM6000_AMUX_ADC2:
+ tm6000_set_reg_mask(dev,
+ TM6000_REQ07_REB_VADC_AADC_MODE, 0x04, 0x0f);
+ break;
+ default:
+ break;
+ }
+ }
+ if (input->type == TM6000_INPUT_SVIDEO) {
+ for (i = 0; i < ARRAY_SIZE(svideo_stds); i++) {
+ if (dev->norm & svideo_stds[i].id) {
+ rc = tm6000_load_std(dev, svideo_stds[i].common);
+ goto ret;
+ }
+ }
+ return -EINVAL;
+ } else {
+ for (i = 0; i < ARRAY_SIZE(composite_stds); i++) {
+ if (dev->norm & composite_stds[i].id) {
+ rc = tm6000_load_std(dev, composite_stds[i].common);
+ goto ret;
+ }
+ }
+ return -EINVAL;
+ }
+
+ret:
+ if (rc < 0)
+ return rc;
+
+ if ((dev->dev_type == TM6010) &&
+ ((input->amux == TM6000_AMUX_SIF1) ||
+ (input->amux == TM6000_AMUX_SIF2)))
+ tm6000_set_audio_std(dev);
+
+ msleep(40);
+
+ return 0;
+}
diff --git a/drivers/staging/tm6000/tm6000-usb-isoc.h b/drivers/media/video/tm6000/tm6000-usb-isoc.h
index 084c2a8904a..99d15a55aa0 100644
--- a/drivers/staging/tm6000/tm6000-usb-isoc.h
+++ b/drivers/media/video/tm6000/tm6000-usb-isoc.h
@@ -46,5 +46,5 @@ struct usb_isoc_ctl {
int tmp_buf_len;
/* Stores already requested buffers */
- struct tm6000_buffer *buf;
+ struct tm6000_buffer *buf;
};
diff --git a/drivers/staging/tm6000/tm6000-video.c b/drivers/media/video/tm6000/tm6000-video.c
index 8d8b939915d..1e5ace0b5d1 100644
--- a/drivers/staging/tm6000/tm6000-video.c
+++ b/drivers/media/video/tm6000/tm6000-video.c
@@ -19,6 +19,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/errno.h>
@@ -202,17 +203,6 @@ static inline void buffer_filled(struct tm6000_core *dev,
wake_up(&buf->vb.done);
}
-const char *tm6000_msg_type[] = {
- "unknown(0)", /* 0 */
- "video", /* 1 */
- "audio", /* 2 */
- "vbi", /* 3 */
- "pts", /* 4 */
- "err", /* 5 */
- "unknown(6)", /* 6 */
- "unknown(7)", /* 7 */
-};
-
/*
* Identify the tm5600/6000 buffer header type and properly handles
*/
@@ -286,17 +276,18 @@ static int copy_streams(u8 *data, unsigned long len,
if (size > TM6000_URB_MSG_LEN)
size = TM6000_URB_MSG_LEN;
pktsize = TM6000_URB_MSG_LEN;
- /* calculate position in buffer
- * and change the buffer
+ /*
+ * calculate position in buffer and change the buffer
*/
switch (cmd) {
case TM6000_URB_MSG_VIDEO:
if (!dev->radio) {
if ((dev->isoc_ctl.vfield != field) &&
(field == 1)) {
- /* Announces that a new buffer
- * were filled
- */
+ /*
+ * Announces that a new buffer
+ * were filled
+ */
buffer_filled(dev, dma_q, vbuf);
dprintk(dev, V4L2_DEBUG_ISOC,
"new buffer filled\n");
@@ -321,7 +312,7 @@ static int copy_streams(u8 *data, unsigned long len,
break;
case TM6000_URB_MSG_AUDIO:
case TM6000_URB_MSG_PTS:
- size = pktsize; /* Size is always 180 bytes */
+ size = pktsize; /* Size is always 180 bytes */
break;
}
} else {
@@ -363,7 +354,8 @@ static int copy_streams(u8 *data, unsigned long len,
}
}
if (ptr + pktsize > endp) {
- /* End of URB packet, but cmd processing is not
+ /*
+ * End of URB packet, but cmd processing is not
* complete. Preserve the state for a next packet
*/
dev->isoc_ctl.pos = pos + cpysize;
@@ -521,9 +513,21 @@ static void tm6000_irq_callback(struct urb *urb)
struct tm6000_core *dev = container_of(dma_q, struct tm6000_core, vidq);
int i;
- if (!dev)
+ switch (urb->status) {
+ case 0:
+ case -ETIMEDOUT:
+ break;
+
+ case -ECONNRESET:
+ case -ENOENT:
+ case -ESHUTDOWN:
return;
+ default:
+ tm6000_err("urb completion error %d.\n", urb->status);
+ break;
+ }
+
spin_lock(&dev->slock);
tm6000_isoc_copy(urb);
spin_unlock(&dev->slock);
@@ -750,7 +754,7 @@ buffer_prepare(struct videobuf_queue *vq, struct videobuf_buffer *vb,
struct tm6000_fh *fh = vq->priv_data;
struct tm6000_buffer *buf = container_of(vb, struct tm6000_buffer, vb);
struct tm6000_core *dev = fh->dev;
- int rc = 0, urb_init = 0;
+ int rc = 0;
BUG_ON(NULL == fh->fmt);
@@ -776,13 +780,9 @@ buffer_prepare(struct videobuf_queue *vq, struct videobuf_buffer *vb,
rc = videobuf_iolock(vq, &buf->vb, NULL);
if (rc != 0)
goto fail;
- urb_init = 1;
}
- if (!dev->isoc_ctl.num_bufs)
- urb_init = 1;
-
- if (urb_init) {
+ if (!dev->isoc_ctl.num_bufs) {
rc = tm6000_prepare_isoc(dev);
if (rc < 0)
goto fail;
@@ -1035,8 +1035,8 @@ static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p)
static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
{
- struct tm6000_fh *fh = priv;
- struct tm6000_core *dev = fh->dev;
+ struct tm6000_fh *fh = priv;
+ struct tm6000_core *dev = fh->dev;
if (fh->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
@@ -1050,11 +1050,12 @@ static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
{
- struct tm6000_fh *fh = priv;
- struct tm6000_core *dev = fh->dev;
+ struct tm6000_fh *fh = priv;
+ struct tm6000_core *dev = fh->dev;
if (fh->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
+
if (i != fh->type)
return -EINVAL;
@@ -1067,7 +1068,7 @@ static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id *norm)
{
int rc = 0;
- struct tm6000_fh *fh = priv;
+ struct tm6000_fh *fh = priv;
struct tm6000_core *dev = fh->dev;
dev->norm = *norm;
@@ -1464,9 +1465,6 @@ static int tm6000_open(struct file *file)
int i, rc;
int radio = 0;
- printk(KERN_INFO "tm6000: open called (dev=%s)\n",
- video_device_node_name(vdev));
-
dprintk(dev, V4L2_DEBUG_OPEN, "tm6000: open called (dev=%s)\n",
video_device_node_name(vdev));
@@ -1507,12 +1505,13 @@ static int tm6000_open(struct file *file)
tm6000_get_std_res(dev);
- fh->width = dev->width;
- fh->height = dev->height;
+ fh->width = dev->width;
+ fh->height = dev->height;
dprintk(dev, V4L2_DEBUG_OPEN, "Open: fh=0x%08lx, dev=0x%08lx, "
"dev->vidq=0x%08lx\n",
- (unsigned long)fh, (unsigned long)dev, (unsigned long)&dev->vidq);
+ (unsigned long)fh, (unsigned long)dev,
+ (unsigned long)&dev->vidq);
dprintk(dev, V4L2_DEBUG_OPEN, "Open: list_empty "
"queued=%d\n", list_empty(&dev->vidq.queued));
dprintk(dev, V4L2_DEBUG_OPEN, "Open: list_empty "
@@ -1531,13 +1530,13 @@ static int tm6000_open(struct file *file)
dev->mode = TM6000_MODE_ANALOG;
}
- videobuf_queue_vmalloc_init(&fh->vb_vidq, &tm6000_video_qops,
- NULL, &dev->slock,
- fh->type,
- V4L2_FIELD_INTERLACED,
- sizeof(struct tm6000_buffer), fh, &dev->lock);
-
- if (fh->radio) {
+ if (!fh->radio) {
+ videobuf_queue_vmalloc_init(&fh->vb_vidq, &tm6000_video_qops,
+ NULL, &dev->slock,
+ fh->type,
+ V4L2_FIELD_INTERLACED,
+ sizeof(struct tm6000_buffer), fh, &dev->lock);
+ } else {
dprintk(dev, V4L2_DEBUG_OPEN, "video_open: setting radio device\n");
dev->input = 5;
tm6000_set_audio_rinput(dev);
@@ -1583,8 +1582,7 @@ tm6000_poll(struct file *file, struct poll_table_struct *wait)
buf = list_entry(fh->vb_vidq.stream.next, struct tm6000_buffer, vb.stream);
} else {
/* read() capture */
- return videobuf_poll_stream(file, &fh->vb_vidq,
- wait);
+ return videobuf_poll_stream(file, &fh->vb_vidq, wait);
}
poll_wait(file, &buf->vb.done, wait);
if (buf->vb.state == VIDEOBUF_DONE ||
@@ -1605,9 +1603,18 @@ static int tm6000_release(struct file *file)
dev->users--;
res_free(dev, fh);
+
if (!dev->users) {
+ int err;
+
tm6000_uninit_isoc(dev);
- videobuf_mmap_free(&fh->vb_vidq);
+
+ if (!fh->radio)
+ videobuf_mmap_free(&fh->vb_vidq);
+
+ err = tm6000_reset(dev);
+ if (err < 0)
+ dev_err(&vdev->dev, "reset failed: %d\n", err);
}
kfree(fh);
@@ -1617,22 +1624,19 @@ static int tm6000_release(struct file *file)
static int tm6000_mmap(struct file *file, struct vm_area_struct * vma)
{
- struct tm6000_fh *fh = file->private_data;
- int ret;
-
- ret = videobuf_mmap_mapper(&fh->vb_vidq, vma);
+ struct tm6000_fh *fh = file->private_data;
- return ret;
+ return videobuf_mmap_mapper(&fh->vb_vidq, vma);
}
static struct v4l2_file_operations tm6000_fops = {
- .owner = THIS_MODULE,
- .open = tm6000_open,
- .release = tm6000_release,
- .unlocked_ioctl = video_ioctl2, /* V4L2 ioctl handler */
- .read = tm6000_read,
- .poll = tm6000_poll,
- .mmap = tm6000_mmap,
+ .owner = THIS_MODULE,
+ .open = tm6000_open,
+ .release = tm6000_release,
+ .unlocked_ioctl = video_ioctl2, /* V4L2 ioctl handler */
+ .read = tm6000_read,
+ .poll = tm6000_poll,
+ .mmap = tm6000_mmap,
};
static const struct v4l2_ioctl_ops video_ioctl_ops = {
@@ -1693,7 +1697,7 @@ static const struct v4l2_ioctl_ops radio_ioctl_ops = {
.vidioc_s_frequency = vidioc_s_frequency,
};
-struct video_device tm6000_radio_template = {
+static struct video_device tm6000_radio_template = {
.name = "tm6000",
.fops = &radio_fops,
.ioctl_ops = &radio_ioctl_ops,
diff --git a/drivers/staging/tm6000/tm6000.h b/drivers/media/video/tm6000/tm6000.h
index c56da628dbe..2777e514eff 100644
--- a/drivers/staging/tm6000/tm6000.h
+++ b/drivers/media/video/tm6000/tm6000.h
@@ -20,9 +20,6 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-/* Use the tm6000-hack, instead of the proper initialization code i*/
-/* #define HACK 1 */
-
#include <linux/videodev2.h>
#include <media/v4l2-common.h>
#include <media/videobuf-vmalloc.h>
@@ -30,7 +27,7 @@
#include <linux/i2c.h>
#include <linux/mutex.h>
#include <media/v4l2-device.h>
-#include <linux/version.h>
+
#include <linux/dvb/frontend.h>
#include "dvb_demux.h"
#include "dvb_frontend.h"
@@ -172,6 +169,8 @@ struct tm6000_endpoint {
unsigned maxsize;
};
+#define TM6000_QUIRK_NO_USB_DELAY (1 << 0)
+
struct tm6000_core {
/* generic device properties */
char name[30]; /* name (including minor) of the device */
@@ -248,6 +247,7 @@ struct tm6000_core {
/* locks */
struct mutex lock;
+ struct mutex usb_lock;
/* usb transfer */
struct usb_device *udev; /* the usb device */
@@ -262,6 +262,8 @@ struct tm6000_core {
struct usb_isoc_ctl isoc_ctl;
spinlock_t slock;
+
+ unsigned long quirks;
};
enum tm6000_ops_type {
@@ -313,6 +315,7 @@ int tm6000_set_reg_mask(struct tm6000_core *dev, u8 req, u16 value,
u16 index, u16 mask);
int tm6000_i2c_reset(struct tm6000_core *dev, u16 tsleep);
int tm6000_init(struct tm6000_core *dev);
+int tm6000_reset(struct tm6000_core *dev);
int tm6000_init_analog_mode(struct tm6000_core *dev);
int tm6000_init_digital_mode(struct tm6000_core *dev);
@@ -381,7 +384,7 @@ extern int tm6000_debug;
#define dprintk(dev, level, fmt, arg...) do {\
if (tm6000_debug & level) \
printk(KERN_INFO "(%lu) %s %s :"fmt, jiffies, \
- dev->name, __FUNCTION__ , ##arg); } while (0)
+ dev->name, __func__ , ##arg); } while (0)
#define V4L2_DEBUG_REG 0x0004
#define V4L2_DEBUG_I2C 0x0008
@@ -392,4 +395,4 @@ extern int tm6000_debug;
#define tm6000_err(fmt, arg...) do {\
printk(KERN_ERR "tm6000 %s :"fmt, \
- __FUNCTION__ , ##arg); } while (0)
+ __func__ , ##arg); } while (0)
diff --git a/drivers/media/video/tvaudio.c b/drivers/media/video/tvaudio.c
index c46a3bb9585..f22dbef9b95 100644
--- a/drivers/media/video/tvaudio.c
+++ b/drivers/media/video/tvaudio.c
@@ -1695,14 +1695,17 @@ static int tvaudio_s_ctrl(struct v4l2_subdev *sd,
case V4L2_CID_AUDIO_BALANCE:
{
int volume, balance;
+
if (!(desc->flags & CHIP_HAS_VOLUME))
break;
- volume = max(chip->left,chip->right);
+ volume = max(chip->left, chip->right);
balance = ctrl->value;
+ chip->left = (min(65536 - balance, 32768) * volume) / 32768;
+ chip->right = (min(balance, volume * (__u16)32768)) / 32768;
- chip_write(chip,desc->leftreg,desc->volfunc(chip->left));
- chip_write(chip,desc->rightreg,desc->volfunc(chip->right));
+ chip_write(chip, desc->leftreg, desc->volfunc(chip->left));
+ chip_write(chip, desc->rightreg, desc->volfunc(chip->right));
return 0;
}
diff --git a/drivers/media/video/tvp514x.c b/drivers/media/video/tvp514x.c
index 9b3e828b077..926f0393115 100644
--- a/drivers/media/video/tvp514x.c
+++ b/drivers/media/video/tvp514x.c
@@ -32,6 +32,7 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/videodev2.h>
+#include <linux/module.h>
#include <media/v4l2-device.h>
#include <media/v4l2-common.h>
diff --git a/drivers/media/video/tvp5150.c b/drivers/media/video/tvp5150.c
index e927d25e0d3..6abaa16ae13 100644
--- a/drivers/media/video/tvp5150.c
+++ b/drivers/media/video/tvp5150.c
@@ -9,6 +9,7 @@
#include <linux/slab.h>
#include <linux/videodev2.h>
#include <linux/delay.h>
+#include <linux/module.h>
#include <media/v4l2-device.h>
#include <media/tvp5150.h>
#include <media/v4l2-chip-ident.h>
diff --git a/drivers/media/video/tvp5150_reg.h b/drivers/media/video/tvp5150_reg.h
index 4240043c0b2..25a99494491 100644
--- a/drivers/media/video/tvp5150_reg.h
+++ b/drivers/media/video/tvp5150_reg.h
@@ -45,7 +45,22 @@
/* Reserved 1Fh-27h */
-#define TVP5150_VIDEO_STD 0x28 /* Video standard */
+#define VIDEO_STD_MASK (0x07 >> 1)
+#define TVP5150_VIDEO_STD 0x28 /* Video standard */
+#define VIDEO_STD_AUTO_SWITCH_BIT 0x00
+#define VIDEO_STD_NTSC_MJ_BIT 0x02
+#define VIDEO_STD_PAL_BDGHIN_BIT 0x04
+#define VIDEO_STD_PAL_M_BIT 0x06
+#define VIDEO_STD_PAL_COMBINATION_N_BIT 0x08
+#define VIDEO_STD_NTSC_4_43_BIT 0x0a
+#define VIDEO_STD_SECAM_BIT 0x0c
+
+#define VIDEO_STD_NTSC_MJ_BIT_AS 0x01
+#define VIDEO_STD_PAL_BDGHIN_BIT_AS 0x03
+#define VIDEO_STD_PAL_M_BIT_AS 0x05
+#define VIDEO_STD_PAL_COMBINATION_N_BIT_AS 0x07
+#define VIDEO_STD_NTSC_4_43_BIT_AS 0x09
+#define VIDEO_STD_SECAM_BIT_AS 0x0b
/* Reserved 29h-2bh */
diff --git a/drivers/media/video/tvp7002.c b/drivers/media/video/tvp7002.c
index b799851bf3d..7875e80cb2f 100644
--- a/drivers/media/video/tvp7002.c
+++ b/drivers/media/video/tvp7002.c
@@ -28,6 +28,7 @@
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/videodev2.h>
+#include <linux/module.h>
#include <media/tvp7002.h>
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
@@ -128,7 +129,7 @@ static const struct i2c_reg_value tvp7002_init_default[] = {
{ TVP7002_ADC_SETUP, 0x50, TVP7002_WRITE },
{ TVP7002_COARSE_CLAMP_CTL, 0x00, TVP7002_WRITE },
{ TVP7002_SOG_CLAMP, 0x80, TVP7002_WRITE },
- { TVP7002_RGB_COARSE_CLAMP_CTL, 0x00, TVP7002_WRITE },
+ { TVP7002_RGB_COARSE_CLAMP_CTL, 0x8c, TVP7002_WRITE },
{ TVP7002_SOG_COARSE_CLAMP_CTL, 0x04, TVP7002_WRITE },
{ TVP7002_ALC_PLACEMENT, 0x5a, TVP7002_WRITE },
{ 0x32, 0x18, TVP7002_RESERVED },
@@ -182,7 +183,6 @@ static const struct i2c_reg_value tvp7002_parms_480P[] = {
{ TVP7002_HPLL_FDBK_DIV_MSBS, 0x35, TVP7002_WRITE },
{ TVP7002_HPLL_FDBK_DIV_LSBS, 0xa0, TVP7002_WRITE },
{ TVP7002_HPLL_CRTL, 0x02, TVP7002_WRITE },
- { TVP7002_HPLL_PHASE_SEL, 0x14, TVP7002_WRITE },
{ TVP7002_AVID_START_PIXEL_LSBS, 0x91, TVP7002_WRITE },
{ TVP7002_AVID_START_PIXEL_MSBS, 0x00, TVP7002_WRITE },
{ TVP7002_AVID_STOP_PIXEL_LSBS, 0x0B, TVP7002_WRITE },
@@ -204,7 +204,6 @@ static const struct i2c_reg_value tvp7002_parms_576P[] = {
{ TVP7002_HPLL_FDBK_DIV_MSBS, 0x36, TVP7002_WRITE },
{ TVP7002_HPLL_FDBK_DIV_LSBS, 0x00, TVP7002_WRITE },
{ TVP7002_HPLL_CRTL, 0x18, TVP7002_WRITE },
- { TVP7002_HPLL_PHASE_SEL, 0x14, TVP7002_WRITE },
{ TVP7002_AVID_START_PIXEL_LSBS, 0x9B, TVP7002_WRITE },
{ TVP7002_AVID_START_PIXEL_MSBS, 0x00, TVP7002_WRITE },
{ TVP7002_AVID_STOP_PIXEL_LSBS, 0x0F, TVP7002_WRITE },
@@ -226,7 +225,6 @@ static const struct i2c_reg_value tvp7002_parms_1080I60[] = {
{ TVP7002_HPLL_FDBK_DIV_MSBS, 0x89, TVP7002_WRITE },
{ TVP7002_HPLL_FDBK_DIV_LSBS, 0x80, TVP7002_WRITE },
{ TVP7002_HPLL_CRTL, 0x98, TVP7002_WRITE },
- { TVP7002_HPLL_PHASE_SEL, 0x14, TVP7002_WRITE },
{ TVP7002_AVID_START_PIXEL_LSBS, 0x06, TVP7002_WRITE },
{ TVP7002_AVID_START_PIXEL_MSBS, 0x01, TVP7002_WRITE },
{ TVP7002_AVID_STOP_PIXEL_LSBS, 0x8a, TVP7002_WRITE },
@@ -248,7 +246,6 @@ static const struct i2c_reg_value tvp7002_parms_1080P60[] = {
{ TVP7002_HPLL_FDBK_DIV_MSBS, 0x89, TVP7002_WRITE },
{ TVP7002_HPLL_FDBK_DIV_LSBS, 0x80, TVP7002_WRITE },
{ TVP7002_HPLL_CRTL, 0xE0, TVP7002_WRITE },
- { TVP7002_HPLL_PHASE_SEL, 0x14, TVP7002_WRITE },
{ TVP7002_AVID_START_PIXEL_LSBS, 0x06, TVP7002_WRITE },
{ TVP7002_AVID_START_PIXEL_MSBS, 0x01, TVP7002_WRITE },
{ TVP7002_AVID_STOP_PIXEL_LSBS, 0x8a, TVP7002_WRITE },
@@ -270,7 +267,6 @@ static const struct i2c_reg_value tvp7002_parms_1080I50[] = {
{ TVP7002_HPLL_FDBK_DIV_MSBS, 0xa5, TVP7002_WRITE },
{ TVP7002_HPLL_FDBK_DIV_LSBS, 0x00, TVP7002_WRITE },
{ TVP7002_HPLL_CRTL, 0x98, TVP7002_WRITE },
- { TVP7002_HPLL_PHASE_SEL, 0x14, TVP7002_WRITE },
{ TVP7002_AVID_START_PIXEL_LSBS, 0x06, TVP7002_WRITE },
{ TVP7002_AVID_START_PIXEL_MSBS, 0x01, TVP7002_WRITE },
{ TVP7002_AVID_STOP_PIXEL_LSBS, 0x8a, TVP7002_WRITE },
@@ -292,7 +288,6 @@ static const struct i2c_reg_value tvp7002_parms_720P60[] = {
{ TVP7002_HPLL_FDBK_DIV_MSBS, 0x67, TVP7002_WRITE },
{ TVP7002_HPLL_FDBK_DIV_LSBS, 0x20, TVP7002_WRITE },
{ TVP7002_HPLL_CRTL, 0xa0, TVP7002_WRITE },
- { TVP7002_HPLL_PHASE_SEL, 0x16, TVP7002_WRITE },
{ TVP7002_AVID_START_PIXEL_LSBS, 0x47, TVP7002_WRITE },
{ TVP7002_AVID_START_PIXEL_MSBS, 0x01, TVP7002_WRITE },
{ TVP7002_AVID_STOP_PIXEL_LSBS, 0x4B, TVP7002_WRITE },
@@ -314,7 +309,6 @@ static const struct i2c_reg_value tvp7002_parms_720P50[] = {
{ TVP7002_HPLL_FDBK_DIV_MSBS, 0x7b, TVP7002_WRITE },
{ TVP7002_HPLL_FDBK_DIV_LSBS, 0xc0, TVP7002_WRITE },
{ TVP7002_HPLL_CRTL, 0x98, TVP7002_WRITE },
- { TVP7002_HPLL_PHASE_SEL, 0x16, TVP7002_WRITE },
{ TVP7002_AVID_START_PIXEL_LSBS, 0x47, TVP7002_WRITE },
{ TVP7002_AVID_START_PIXEL_MSBS, 0x01, TVP7002_WRITE },
{ TVP7002_AVID_STOP_PIXEL_LSBS, 0x4B, TVP7002_WRITE },
@@ -687,6 +681,9 @@ static int tvp7002_query_dv_preset(struct v4l2_subdev *sd,
u8 cpl_msb;
int index;
+ /* Return invalid preset if no active input is detected */
+ qpreset->preset = V4L2_DV_INVALID;
+
device = to_tvp7002(sd);
/* Read standards from device registers */
@@ -720,8 +717,6 @@ static int tvp7002_query_dv_preset(struct v4l2_subdev *sd,
if (index == NUM_PRESETS) {
v4l2_dbg(1, debug, sd, "detection failed: lpf = %x, cpl = %x\n",
lpfr, cpln);
- /* Could not detect a signal, so return the 'invalid' preset */
- qpreset->preset = V4L2_DV_INVALID;
return 0;
}
diff --git a/drivers/media/video/tw9910.c b/drivers/media/video/tw9910.c
index 742482e3001..a514fa61116 100644
--- a/drivers/media/video/tw9910.c
+++ b/drivers/media/video/tw9910.c
@@ -22,11 +22,13 @@
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/delay.h>
+#include <linux/v4l2-mediabus.h>
#include <linux/videodev2.h>
-#include <media/v4l2-chip-ident.h>
-#include <media/v4l2-subdev.h>
+
#include <media/soc_camera.h>
#include <media/tw9910.h>
+#include <media/v4l2-chip-ident.h>
+#include <media/v4l2-subdev.h>
#define GET_ID(val) ((val & 0xF8) >> 3)
#define GET_REV(val) (val & 0x07)
@@ -203,6 +205,10 @@
#define RTSEL_FIELD 0x06 /* 0110 = FIELD */
#define RTSEL_RTCO 0x07 /* 0111 = RTCO ( Real Time Control ) */
+/* HSYNC start and end are constant for now */
+#define HSYNC_START 0x0260
+#define HSYNC_END 0x0300
+
/*
* structure
*/
@@ -220,22 +226,11 @@ struct tw9910_scale_ctrl {
u16 vscale;
};
-struct tw9910_cropping_ctrl {
- u16 vdelay;
- u16 vactive;
- u16 hdelay;
- u16 hactive;
-};
-
-struct tw9910_hsync_ctrl {
- u16 start;
- u16 end;
-};
-
struct tw9910_priv {
struct v4l2_subdev subdev;
struct tw9910_video_info *info;
const struct tw9910_scale_ctrl *scale;
+ v4l2_std_id norm;
u32 revision;
};
@@ -329,11 +324,6 @@ static const struct tw9910_scale_ctrl tw9910_pal_scales[] = {
},
};
-static const struct tw9910_hsync_ctrl tw9910_hsync_ctrl = {
- .start = 0x0260,
- .end = 0x0300,
-};
-
/*
* general function
*/
@@ -378,21 +368,20 @@ static int tw9910_set_scale(struct i2c_client *client,
return ret;
}
-static int tw9910_set_hsync(struct i2c_client *client,
- const struct tw9910_hsync_ctrl *hsync)
+static int tw9910_set_hsync(struct i2c_client *client)
{
struct tw9910_priv *priv = to_tw9910(client);
int ret;
/* bit 10 - 3 */
ret = i2c_smbus_write_byte_data(client, HSBEGIN,
- (hsync->start & 0x07F8) >> 3);
+ (HSYNC_START & 0x07F8) >> 3);
if (ret < 0)
return ret;
/* bit 10 - 3 */
ret = i2c_smbus_write_byte_data(client, HSEND,
- (hsync->end & 0x07F8) >> 3);
+ (HSYNC_END & 0x07F8) >> 3);
if (ret < 0)
return ret;
@@ -400,8 +389,8 @@ static int tw9910_set_hsync(struct i2c_client *client,
/* bit 2 - 0 */
if (1 == priv->revision)
ret = tw9910_mask_set(client, HSLOWCTL, 0x77,
- (hsync->start & 0x0007) << 4 |
- (hsync->end & 0x0007));
+ (HSYNC_START & 0x0007) << 4 |
+ (HSYNC_END & 0x0007));
return ret;
}
@@ -433,12 +422,11 @@ static int tw9910_power(struct i2c_client *client, int enable)
return tw9910_mask_set(client, ACNTL2, ACNTL2_PDN_MASK, acntl2);
}
-static const struct tw9910_scale_ctrl*
-tw9910_select_norm(struct soc_camera_device *icd, u32 width, u32 height)
+static const struct tw9910_scale_ctrl *tw9910_select_norm(v4l2_std_id norm,
+ u32 width, u32 height)
{
const struct tw9910_scale_ctrl *scale;
const struct tw9910_scale_ctrl *ret = NULL;
- v4l2_std_id norm = icd->vdev->current_norm;
__u32 diff = 0xffffffff, tmp;
int size, i;
@@ -465,7 +453,7 @@ tw9910_select_norm(struct soc_camera_device *icd, u32 width, u32 height)
}
/*
- * soc_camera_ops function
+ * subdevice operations
*/
static int tw9910_s_stream(struct v4l2_subdev *sd, int enable)
{
@@ -507,49 +495,27 @@ static int tw9910_s_stream(struct v4l2_subdev *sd, int enable)
return tw9910_power(client, enable);
}
-static int tw9910_set_bus_param(struct soc_camera_device *icd,
- unsigned long flags)
+static int tw9910_g_std(struct v4l2_subdev *sd, v4l2_std_id *norm)
{
- struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
struct i2c_client *client = v4l2_get_subdevdata(sd);
- u8 val = VSSL_VVALID | HSSL_DVALID;
+ struct tw9910_priv *priv = to_tw9910(client);
- /*
- * set OUTCTR1
- *
- * We use VVALID and DVALID signals to control VSYNC and HSYNC
- * outputs, in this mode their polarity is inverted.
- */
- if (flags & SOCAM_HSYNC_ACTIVE_LOW)
- val |= HSP_HI;
+ *norm = priv->norm;
- if (flags & SOCAM_VSYNC_ACTIVE_LOW)
- val |= VSP_HI;
-
- return i2c_smbus_write_byte_data(client, OUTCTR1, val);
+ return 0;
}
-static unsigned long tw9910_query_bus_param(struct soc_camera_device *icd)
+static int tw9910_s_std(struct v4l2_subdev *sd, v4l2_std_id norm)
{
- struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd));
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct tw9910_priv *priv = to_tw9910(client);
- struct soc_camera_link *icl = to_soc_camera_link(icd);
- unsigned long flags = SOCAM_PCLK_SAMPLE_RISING | SOCAM_MASTER |
- SOCAM_VSYNC_ACTIVE_HIGH | SOCAM_HSYNC_ACTIVE_HIGH |
- SOCAM_VSYNC_ACTIVE_LOW | SOCAM_HSYNC_ACTIVE_LOW |
- SOCAM_DATA_ACTIVE_HIGH | priv->info->buswidth;
- return soc_camera_apply_sensor_flags(icl, flags);
-}
-
-static int tw9910_s_std(struct v4l2_subdev *sd, v4l2_std_id norm)
-{
- int ret = -EINVAL;
+ if (!(norm & (V4L2_STD_NTSC | V4L2_STD_PAL)))
+ return -EINVAL;
- if (norm & (V4L2_STD_NTSC | V4L2_STD_PAL))
- ret = 0;
+ priv->norm = norm;
- return ret;
+ return 0;
}
static int tw9910_g_chip_ident(struct v4l2_subdev *sd,
@@ -600,19 +566,17 @@ static int tw9910_s_register(struct v4l2_subdev *sd,
}
#endif
-static int tw9910_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
+static int tw9910_set_frame(struct v4l2_subdev *sd, u32 *width, u32 *height)
{
- struct v4l2_rect *rect = &a->c;
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct tw9910_priv *priv = to_tw9910(client);
- struct soc_camera_device *icd = client->dev.platform_data;
- int ret = -EINVAL;
- u8 val;
+ int ret = -EINVAL;
+ u8 val;
/*
* select suitable norm
*/
- priv->scale = tw9910_select_norm(icd, rect->width, rect->height);
+ priv->scale = tw9910_select_norm(priv->norm, *width, *height);
if (!priv->scale)
goto tw9910_set_fmt_error;
@@ -670,14 +634,12 @@ static int tw9910_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
/*
* set hsync
*/
- ret = tw9910_set_hsync(client, &tw9910_hsync_ctrl);
+ ret = tw9910_set_hsync(client);
if (ret < 0)
goto tw9910_set_fmt_error;
- rect->width = priv->scale->width;
- rect->height = priv->scale->height;
- rect->left = 0;
- rect->top = 0;
+ *width = priv->scale->width;
+ *height = priv->scale->height;
return ret;
@@ -694,25 +656,15 @@ static int tw9910_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct tw9910_priv *priv = to_tw9910(client);
- if (!priv->scale) {
- int ret;
- struct v4l2_crop crop = {
- .c = {
- .left = 0,
- .top = 0,
- .width = 640,
- .height = 480,
- },
- };
- ret = tw9910_s_crop(sd, &crop);
- if (ret < 0)
- return ret;
- }
-
a->c.left = 0;
a->c.top = 0;
- a->c.width = priv->scale->width;
- a->c.height = priv->scale->height;
+ if (priv->norm & V4L2_STD_NTSC) {
+ a->c.width = 640;
+ a->c.height = 480;
+ } else {
+ a->c.width = 768;
+ a->c.height = 576;
+ }
a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
return 0;
@@ -720,14 +672,19 @@ static int tw9910_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
static int tw9910_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct tw9910_priv *priv = to_tw9910(client);
+
a->bounds.left = 0;
a->bounds.top = 0;
- a->bounds.width = 768;
- a->bounds.height = 576;
- a->defrect.left = 0;
- a->defrect.top = 0;
- a->defrect.width = 640;
- a->defrect.height = 480;
+ if (priv->norm & V4L2_STD_NTSC) {
+ a->bounds.width = 640;
+ a->bounds.height = 480;
+ } else {
+ a->bounds.width = 768;
+ a->bounds.height = 576;
+ }
+ a->defrect = a->bounds;
a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
a->pixelaspect.numerator = 1;
a->pixelaspect.denominator = 1;
@@ -743,15 +700,8 @@ static int tw9910_g_fmt(struct v4l2_subdev *sd,
if (!priv->scale) {
int ret;
- struct v4l2_crop crop = {
- .c = {
- .left = 0,
- .top = 0,
- .width = 640,
- .height = 480,
- },
- };
- ret = tw9910_s_crop(sd, &crop);
+ u32 width = 640, height = 480;
+ ret = tw9910_set_frame(sd, &width, &height);
if (ret < 0)
return ret;
}
@@ -768,17 +718,7 @@ static int tw9910_g_fmt(struct v4l2_subdev *sd,
static int tw9910_s_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf)
{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct tw9910_priv *priv = to_tw9910(client);
- /* See tw9910_s_crop() - no proper cropping support */
- struct v4l2_crop a = {
- .c = {
- .left = 0,
- .top = 0,
- .width = mf->width,
- .height = mf->height,
- },
- };
+ u32 width = mf->width, height = mf->height;
int ret;
WARN_ON(mf->field != V4L2_FIELD_ANY &&
@@ -792,10 +732,10 @@ static int tw9910_s_fmt(struct v4l2_subdev *sd,
mf->colorspace = V4L2_COLORSPACE_JPEG;
- ret = tw9910_s_crop(sd, &a);
+ ret = tw9910_set_frame(sd, &width, &height);
if (!ret) {
- mf->width = priv->scale->width;
- mf->height = priv->scale->height;
+ mf->width = width;
+ mf->height = height;
}
return ret;
}
@@ -804,7 +744,7 @@ static int tw9910_try_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_device *icd = client->dev.platform_data;
+ struct tw9910_priv *priv = to_tw9910(client);
const struct tw9910_scale_ctrl *scale;
if (V4L2_FIELD_ANY == mf->field) {
@@ -820,7 +760,7 @@ static int tw9910_try_fmt(struct v4l2_subdev *sd,
/*
* select suitable norm
*/
- scale = tw9910_select_norm(icd, mf->width, mf->height);
+ scale = tw9910_select_norm(priv->norm, mf->width, mf->height);
if (!scale)
return -EINVAL;
@@ -830,16 +770,11 @@ static int tw9910_try_fmt(struct v4l2_subdev *sd,
return 0;
}
-static int tw9910_video_probe(struct soc_camera_device *icd,
- struct i2c_client *client)
+static int tw9910_video_probe(struct i2c_client *client)
{
struct tw9910_priv *priv = to_tw9910(client);
s32 id;
- /* We must have a parent by now. And it cannot be a wrong one. */
- BUG_ON(!icd->parent ||
- to_soc_camera_host(icd->parent)->nr != icd->iface);
-
/*
* tw9910 only use 8 or 16 bit bus width
*/
@@ -868,20 +803,15 @@ static int tw9910_video_probe(struct soc_camera_device *icd,
dev_info(&client->dev,
"tw9910 Product ID %0x:%0x\n", id, priv->revision);
- icd->vdev->tvnorms = V4L2_STD_NTSC | V4L2_STD_PAL;
- icd->vdev->current_norm = V4L2_STD_NTSC;
+ priv->norm = V4L2_STD_NTSC;
return 0;
}
-static struct soc_camera_ops tw9910_ops = {
- .set_bus_param = tw9910_set_bus_param,
- .query_bus_param = tw9910_query_bus_param,
-};
-
static struct v4l2_subdev_core_ops tw9910_subdev_core_ops = {
.g_chip_ident = tw9910_g_chip_ident,
.s_std = tw9910_s_std,
+ .g_std = tw9910_g_std,
#ifdef CONFIG_VIDEO_ADV_DEBUG
.g_register = tw9910_g_register,
.s_register = tw9910_s_register,
@@ -898,6 +828,45 @@ static int tw9910_enum_fmt(struct v4l2_subdev *sd, unsigned int index,
return 0;
}
+static int tw9910_g_mbus_config(struct v4l2_subdev *sd,
+ struct v4l2_mbus_config *cfg)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+
+ cfg->flags = V4L2_MBUS_PCLK_SAMPLE_RISING | V4L2_MBUS_MASTER |
+ V4L2_MBUS_VSYNC_ACTIVE_HIGH | V4L2_MBUS_VSYNC_ACTIVE_LOW |
+ V4L2_MBUS_HSYNC_ACTIVE_HIGH | V4L2_MBUS_HSYNC_ACTIVE_LOW |
+ V4L2_MBUS_DATA_ACTIVE_HIGH;
+ cfg->type = V4L2_MBUS_PARALLEL;
+ cfg->flags = soc_camera_apply_board_flags(icl, cfg);
+
+ return 0;
+}
+
+static int tw9910_s_mbus_config(struct v4l2_subdev *sd,
+ const struct v4l2_mbus_config *cfg)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ u8 val = VSSL_VVALID | HSSL_DVALID;
+ unsigned long flags = soc_camera_apply_board_flags(icl, cfg);
+
+ /*
+ * set OUTCTR1
+ *
+ * We use VVALID and DVALID signals to control VSYNC and HSYNC
+ * outputs, in this mode their polarity is inverted.
+ */
+ if (flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)
+ val |= HSP_HI;
+
+ if (flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)
+ val |= VSP_HI;
+
+ return i2c_smbus_write_byte_data(client, OUTCTR1, val);
+}
+
static struct v4l2_subdev_video_ops tw9910_subdev_video_ops = {
.s_stream = tw9910_s_stream,
.g_mbus_fmt = tw9910_g_fmt,
@@ -905,8 +874,9 @@ static struct v4l2_subdev_video_ops tw9910_subdev_video_ops = {
.try_mbus_fmt = tw9910_try_fmt,
.cropcap = tw9910_cropcap,
.g_crop = tw9910_g_crop,
- .s_crop = tw9910_s_crop,
.enum_mbus_fmt = tw9910_enum_fmt,
+ .g_mbus_config = tw9910_g_mbus_config,
+ .s_mbus_config = tw9910_s_mbus_config,
};
static struct v4l2_subdev_ops tw9910_subdev_ops = {
@@ -922,23 +892,18 @@ static int tw9910_probe(struct i2c_client *client,
const struct i2c_device_id *did)
{
- struct tw9910_priv *priv;
- struct tw9910_video_info *info;
- struct soc_camera_device *icd = client->dev.platform_data;
- struct i2c_adapter *adapter =
+ struct tw9910_priv *priv;
+ struct tw9910_video_info *info;
+ struct i2c_adapter *adapter =
to_i2c_adapter(client->dev.parent);
- struct soc_camera_link *icl;
- int ret;
+ struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ int ret;
- if (!icd) {
- dev_err(&client->dev, "TW9910: missing soc-camera data!\n");
+ if (!icl || !icl->priv) {
+ dev_err(&client->dev, "TW9910: missing platform data!\n");
return -EINVAL;
}
- icl = to_soc_camera_link(icd);
- if (!icl || !icl->priv)
- return -EINVAL;
-
info = icl->priv;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
@@ -956,14 +921,9 @@ static int tw9910_probe(struct i2c_client *client,
v4l2_i2c_subdev_init(&priv->subdev, client, &tw9910_subdev_ops);
- icd->ops = &tw9910_ops;
- icd->iface = icl->bus_id;
-
- ret = tw9910_video_probe(icd, client);
- if (ret) {
- icd->ops = NULL;
+ ret = tw9910_video_probe(client);
+ if (ret)
kfree(priv);
- }
return ret;
}
@@ -971,9 +931,7 @@ static int tw9910_probe(struct i2c_client *client,
static int tw9910_remove(struct i2c_client *client)
{
struct tw9910_priv *priv = to_tw9910(client);
- struct soc_camera_device *icd = client->dev.platform_data;
- icd->ops = NULL;
kfree(priv);
return 0;
}
diff --git a/drivers/media/video/usbvision/Makefile b/drivers/media/video/usbvision/Makefile
index 33871875094..aea1e3b5f06 100644
--- a/drivers/media/video/usbvision/Makefile
+++ b/drivers/media/video/usbvision/Makefile
@@ -2,5 +2,5 @@ usbvision-objs := usbvision-core.o usbvision-video.o usbvision-i2c.o usbvision-
obj-$(CONFIG_VIDEO_USBVISION) += usbvision.o
-EXTRA_CFLAGS += -Idrivers/media/video
-EXTRA_CFLAGS += -Idrivers/media/common/tuners
+ccflags-y += -Idrivers/media/video
+ccflags-y += -Idrivers/media/common/tuners
diff --git a/drivers/media/video/usbvision/usbvision-cards.c b/drivers/media/video/usbvision/usbvision-cards.c
index 8f5266157f1..3103d0d020e 100644
--- a/drivers/media/video/usbvision/usbvision-cards.c
+++ b/drivers/media/video/usbvision/usbvision-cards.c
@@ -24,6 +24,7 @@
#include <linux/list.h>
+#include <linux/module.h>
#include <media/v4l2-dev.h>
#include <media/tuner.h>
#include "usbvision.h"
diff --git a/drivers/media/video/uvc/uvc_ctrl.c b/drivers/media/video/uvc/uvc_ctrl.c
index 10c2364f3e8..254d3268884 100644
--- a/drivers/media/video/uvc/uvc_ctrl.c
+++ b/drivers/media/video/uvc/uvc_ctrl.c
@@ -1016,7 +1016,8 @@ int uvc_query_v4l2_menu(struct uvc_video_chain *chain,
menu_info = &mapping->menu_info[query_menu->index];
- if (ctrl->info.flags & UVC_CTRL_FLAG_GET_RES) {
+ if (mapping->data_type == UVC_CTRL_DATA_TYPE_BITMASK &&
+ (ctrl->info.flags & UVC_CTRL_FLAG_GET_RES)) {
s32 bitmap;
if (!ctrl->cached) {
@@ -1225,7 +1226,8 @@ int uvc_ctrl_set(struct uvc_video_chain *chain,
/* Valid menu indices are reported by the GET_RES request for
* UVC controls that support it.
*/
- if (ctrl->info.flags & UVC_CTRL_FLAG_GET_RES) {
+ if (mapping->data_type == UVC_CTRL_DATA_TYPE_BITMASK &&
+ (ctrl->info.flags & UVC_CTRL_FLAG_GET_RES)) {
if (!ctrl->cached) {
ret = uvc_ctrl_populate_cache(chain, ctrl);
if (ret < 0)
diff --git a/drivers/media/video/uvc/uvc_driver.c b/drivers/media/video/uvc/uvc_driver.c
index e4100b1f68d..656d4c9e3b9 100644
--- a/drivers/media/video/uvc/uvc_driver.c
+++ b/drivers/media/video/uvc/uvc_driver.c
@@ -114,6 +114,11 @@ static struct uvc_format_desc uvc_fmts[] = {
.guid = UVC_GUID_FORMAT_RGBP,
.fcc = V4L2_PIX_FMT_RGB565,
},
+ {
+ .name = "H.264",
+ .guid = UVC_GUID_FORMAT_H264,
+ .fcc = V4L2_PIX_FMT_H264,
+ },
};
/* ------------------------------------------------------------------------
@@ -2331,6 +2336,14 @@ static struct usb_device_id uvc_ids[] = {
.bInterfaceSubClass = 1,
.bInterfaceProtocol = 0,
.driver_info = UVC_QUIRK_PROBE_DEF },
+ /* The Imaging Source USB CCD cameras */
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
+ | USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = 0x199e,
+ .idProduct = 0x8102,
+ .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = 0 },
/* Bodelin ProScopeHR */
{ .match_flags = USB_DEVICE_ID_MATCH_DEVICE
| USB_DEVICE_ID_MATCH_DEV_HI
diff --git a/drivers/media/video/uvc/uvc_v4l2.c b/drivers/media/video/uvc/uvc_v4l2.c
index ea71d5f1f6d..dadf11f704d 100644
--- a/drivers/media/video/uvc/uvc_v4l2.c
+++ b/drivers/media/video/uvc/uvc_v4l2.c
@@ -32,7 +32,7 @@
* UVC ioctls
*/
static int uvc_ioctl_ctrl_map(struct uvc_video_chain *chain,
- struct uvc_xu_control_mapping *xmap, int old)
+ struct uvc_xu_control_mapping *xmap)
{
struct uvc_control_mapping *map;
unsigned int size;
@@ -58,13 +58,6 @@ static int uvc_ioctl_ctrl_map(struct uvc_video_chain *chain,
break;
case V4L2_CTRL_TYPE_MENU:
- if (old) {
- uvc_trace(UVC_TRACE_CONTROL, "V4L2_CTRL_TYPE_MENU not "
- "supported for UVCIOC_CTRL_MAP_OLD.\n");
- ret = -EINVAL;
- goto done;
- }
-
size = xmap->menu_count * sizeof(*map->menu_info);
map->menu_info = kmalloc(size, GFP_KERNEL);
if (map->menu_info == NULL) {
@@ -538,20 +531,6 @@ static int uvc_v4l2_release(struct file *file)
return 0;
}
-static void uvc_v4l2_ioctl_warn(void)
-{
- static int warned;
-
- if (warned)
- return;
-
- uvc_printk(KERN_INFO, "Deprecated UVCIOC_CTRL_{ADD,MAP_OLD,GET,SET} "
- "ioctls will be removed in 2.6.42.\n");
- uvc_printk(KERN_INFO, "See http://www.ideasonboard.org/uvc/upgrade/ "
- "for upgrade instructions.\n");
- warned = 1;
-}
-
static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
{
struct video_device *vdev = video_devdata(file);
@@ -1032,37 +1011,8 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
uvc_trace(UVC_TRACE_IOCTL, "Unsupported ioctl 0x%08x\n", cmd);
return -EINVAL;
- /* Dynamic controls. UVCIOC_CTRL_ADD, UVCIOC_CTRL_MAP_OLD,
- * UVCIOC_CTRL_GET and UVCIOC_CTRL_SET are deprecated and scheduled for
- * removal in 2.6.42.
- */
- case __UVCIOC_CTRL_ADD:
- uvc_v4l2_ioctl_warn();
- return -EEXIST;
-
- case __UVCIOC_CTRL_MAP_OLD:
- uvc_v4l2_ioctl_warn();
- case __UVCIOC_CTRL_MAP:
case UVCIOC_CTRL_MAP:
- return uvc_ioctl_ctrl_map(chain, arg,
- cmd == __UVCIOC_CTRL_MAP_OLD);
-
- case __UVCIOC_CTRL_GET:
- case __UVCIOC_CTRL_SET:
- {
- struct uvc_xu_control *xctrl = arg;
- struct uvc_xu_control_query xqry = {
- .unit = xctrl->unit,
- .selector = xctrl->selector,
- .query = cmd == __UVCIOC_CTRL_GET
- ? UVC_GET_CUR : UVC_SET_CUR,
- .size = xctrl->size,
- .data = xctrl->data,
- };
-
- uvc_v4l2_ioctl_warn();
- return uvc_xu_ctrl_query(chain, &xqry);
- }
+ return uvc_ioctl_ctrl_map(chain, arg);
case UVCIOC_CTRL_QUERY:
return uvc_xu_ctrl_query(chain, arg);
diff --git a/drivers/media/video/uvc/uvc_video.c b/drivers/media/video/uvc/uvc_video.c
index ffd1158628b..b015e8e5e8b 100644
--- a/drivers/media/video/uvc/uvc_video.c
+++ b/drivers/media/video/uvc/uvc_video.c
@@ -790,8 +790,12 @@ static void uvc_free_urb_buffers(struct uvc_streaming *stream)
for (i = 0; i < UVC_URBS; ++i) {
if (stream->urb_buffer[i]) {
+#ifndef CONFIG_DMA_NONCOHERENT
usb_free_coherent(stream->dev->udev, stream->urb_size,
stream->urb_buffer[i], stream->urb_dma[i]);
+#else
+ kfree(stream->urb_buffer[i]);
+#endif
stream->urb_buffer[i] = NULL;
}
}
@@ -831,9 +835,14 @@ static int uvc_alloc_urb_buffers(struct uvc_streaming *stream,
for (; npackets > 1; npackets /= 2) {
for (i = 0; i < UVC_URBS; ++i) {
stream->urb_size = psize * npackets;
+#ifndef CONFIG_DMA_NONCOHERENT
stream->urb_buffer[i] = usb_alloc_coherent(
stream->dev->udev, stream->urb_size,
gfp_flags | __GFP_NOWARN, &stream->urb_dma[i]);
+#else
+ stream->urb_buffer[i] =
+ kmalloc(stream->urb_size, gfp_flags | __GFP_NOWARN);
+#endif
if (!stream->urb_buffer[i]) {
uvc_free_urb_buffers(stream);
break;
@@ -908,10 +917,14 @@ static int uvc_init_video_isoc(struct uvc_streaming *stream,
urb->context = stream;
urb->pipe = usb_rcvisocpipe(stream->dev->udev,
ep->desc.bEndpointAddress);
+#ifndef CONFIG_DMA_NONCOHERENT
urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP;
+ urb->transfer_dma = stream->urb_dma[i];
+#else
+ urb->transfer_flags = URB_ISO_ASAP;
+#endif
urb->interval = ep->desc.bInterval;
urb->transfer_buffer = stream->urb_buffer[i];
- urb->transfer_dma = stream->urb_dma[i];
urb->complete = uvc_video_complete;
urb->number_of_packets = npackets;
urb->transfer_buffer_length = size;
@@ -969,8 +982,10 @@ static int uvc_init_video_bulk(struct uvc_streaming *stream,
usb_fill_bulk_urb(urb, stream->dev->udev, pipe,
stream->urb_buffer[i], size, uvc_video_complete,
stream);
+#ifndef CONFIG_DMA_NONCOHERENT
urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
urb->transfer_dma = stream->urb_dma[i];
+#endif
stream->urb[i] = urb;
}
diff --git a/drivers/media/video/uvc/uvcvideo.h b/drivers/media/video/uvc/uvcvideo.h
index cbdd49bf8b6..4c1392ebcd4 100644
--- a/drivers/media/video/uvc/uvcvideo.h
+++ b/drivers/media/video/uvc/uvcvideo.h
@@ -1,106 +1,16 @@
#ifndef _USB_VIDEO_H_
#define _USB_VIDEO_H_
-#include <linux/kernel.h>
-#include <linux/videodev2.h>
-
-#ifndef __KERNEL__
-/*
- * This header provides binary compatibility with applications using the private
- * uvcvideo API. This API is deprecated and will be removed in 2.6.42.
- * Applications should be recompiled against the public linux/uvcvideo.h header.
- */
-#warn "The uvcvideo.h header is deprecated, use linux/uvcvideo.h instead."
-
-/*
- * Dynamic controls
- */
-
-/* Data types for UVC control data */
-#define UVC_CTRL_DATA_TYPE_RAW 0
-#define UVC_CTRL_DATA_TYPE_SIGNED 1
-#define UVC_CTRL_DATA_TYPE_UNSIGNED 2
-#define UVC_CTRL_DATA_TYPE_BOOLEAN 3
-#define UVC_CTRL_DATA_TYPE_ENUM 4
-#define UVC_CTRL_DATA_TYPE_BITMASK 5
-
-/* Control flags */
-#define UVC_CONTROL_SET_CUR (1 << 0)
-#define UVC_CONTROL_GET_CUR (1 << 1)
-#define UVC_CONTROL_GET_MIN (1 << 2)
-#define UVC_CONTROL_GET_MAX (1 << 3)
-#define UVC_CONTROL_GET_RES (1 << 4)
-#define UVC_CONTROL_GET_DEF (1 << 5)
-#define UVC_CONTROL_RESTORE (1 << 6)
-#define UVC_CONTROL_AUTO_UPDATE (1 << 7)
-
-#define UVC_CONTROL_GET_RANGE (UVC_CONTROL_GET_CUR | UVC_CONTROL_GET_MIN | \
- UVC_CONTROL_GET_MAX | UVC_CONTROL_GET_RES | \
- UVC_CONTROL_GET_DEF)
-
-struct uvc_menu_info {
- __u32 value;
- __u8 name[32];
-};
-
-struct uvc_xu_control_mapping {
- __u32 id;
- __u8 name[32];
- __u8 entity[16];
- __u8 selector;
-
- __u8 size;
- __u8 offset;
- __u32 v4l2_type;
- __u32 data_type;
-
- struct uvc_menu_info __user *menu_info;
- __u32 menu_count;
-
- __u32 reserved[4];
-};
-
-#endif
-
-struct uvc_xu_control_info {
- __u8 entity[16];
- __u8 index;
- __u8 selector;
- __u16 size;
- __u32 flags;
-};
-
-struct uvc_xu_control_mapping_old {
- __u8 reserved[64];
-};
-
-struct uvc_xu_control {
- __u8 unit;
- __u8 selector;
- __u16 size;
- __u8 __user *data;
-};
-
#ifndef __KERNEL__
-#define UVCIOC_CTRL_ADD _IOW('U', 1, struct uvc_xu_control_info)
-#define UVCIOC_CTRL_MAP_OLD _IOWR('U', 2, struct uvc_xu_control_mapping_old)
-#define UVCIOC_CTRL_MAP _IOWR('U', 2, struct uvc_xu_control_mapping)
-#define UVCIOC_CTRL_GET _IOWR('U', 3, struct uvc_xu_control)
-#define UVCIOC_CTRL_SET _IOW('U', 4, struct uvc_xu_control)
-#else
-#define __UVCIOC_CTRL_ADD _IOW('U', 1, struct uvc_xu_control_info)
-#define __UVCIOC_CTRL_MAP_OLD _IOWR('U', 2, struct uvc_xu_control_mapping_old)
-#define __UVCIOC_CTRL_MAP _IOWR('U', 2, struct uvc_xu_control_mapping)
-#define __UVCIOC_CTRL_GET _IOWR('U', 3, struct uvc_xu_control)
-#define __UVCIOC_CTRL_SET _IOW('U', 4, struct uvc_xu_control)
-#endif
-
-#ifdef __KERNEL__
+#error "The uvcvideo.h header is deprecated, use linux/uvcvideo.h instead."
+#endif /* __KERNEL__ */
+#include <linux/kernel.h>
#include <linux/poll.h>
#include <linux/usb.h>
#include <linux/usb/video.h>
#include <linux/uvcvideo.h>
+#include <linux/videodev2.h>
#include <media/media-device.h>
#include <media/v4l2-device.h>
@@ -179,6 +89,10 @@ struct uvc_xu_control {
{ 'M', '4', '2', '0', 0x00, 0x00, 0x10, 0x00, \
0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_H264 \
+ { 'H', '2', '6', '4', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+
/* ------------------------------------------------------------------------
* Driver specific constants.
*/
@@ -698,6 +612,4 @@ extern struct usb_host_endpoint *uvc_find_endpoint(
void uvc_video_decode_isight(struct urb *urb, struct uvc_streaming *stream,
struct uvc_buffer *buf);
-#endif /* __KERNEL__ */
-
#endif
diff --git a/drivers/media/video/v4l2-compat-ioctl32.c b/drivers/media/video/v4l2-compat-ioctl32.c
index 61979b70f38..c68531b8827 100644
--- a/drivers/media/video/v4l2-compat-ioctl32.c
+++ b/drivers/media/video/v4l2-compat-ioctl32.c
@@ -159,11 +159,25 @@ struct v4l2_format32 {
} fmt;
};
-static int get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up)
+/**
+ * struct v4l2_create_buffers32 - VIDIOC_CREATE_BUFS32 argument
+ * @index: on return, index of the first created buffer
+ * @count: entry: number of requested buffers,
+ * return: number of created buffers
+ * @memory: buffer memory type
+ * @format: frame format, for which buffers are requested
+ * @reserved: future extensions
+ */
+struct v4l2_create_buffers32 {
+ __u32 index;
+ __u32 count;
+ enum v4l2_memory memory;
+ struct v4l2_format32 format;
+ __u32 reserved[8];
+};
+
+static int __get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up)
{
- if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_format32)) ||
- get_user(kp->type, &up->type))
- return -EFAULT;
switch (kp->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
case V4L2_BUF_TYPE_VIDEO_OUTPUT:
@@ -192,11 +206,24 @@ static int get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user
}
}
-static int put_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up)
+static int get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up)
+{
+ if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_format32)) ||
+ get_user(kp->type, &up->type))
+ return -EFAULT;
+ return __get_v4l2_format32(kp, up);
+}
+
+static int get_v4l2_create32(struct v4l2_create_buffers *kp, struct v4l2_create_buffers32 __user *up)
+{
+ if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_create_buffers32)) ||
+ copy_from_user(kp, up, offsetof(struct v4l2_create_buffers32, format.fmt)))
+ return -EFAULT;
+ return __get_v4l2_format32(&kp->format, &up->format);
+}
+
+static int __put_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up)
{
- if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_format32)) ||
- put_user(kp->type, &up->type))
- return -EFAULT;
switch (kp->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
case V4L2_BUF_TYPE_VIDEO_OUTPUT:
@@ -225,6 +252,22 @@ static int put_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user
}
}
+static int put_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up)
+{
+ if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_format32)) ||
+ put_user(kp->type, &up->type))
+ return -EFAULT;
+ return __put_v4l2_format32(kp, up);
+}
+
+static int put_v4l2_create32(struct v4l2_create_buffers *kp, struct v4l2_create_buffers32 __user *up)
+{
+ if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_create_buffers32)) ||
+ copy_to_user(up, kp, offsetof(struct v4l2_create_buffers32, format.fmt)))
+ return -EFAULT;
+ return __put_v4l2_format32(&kp->format, &up->format);
+}
+
struct v4l2_standard32 {
__u32 index;
__u32 id[2]; /* __u64 would get the alignment wrong */
@@ -702,6 +745,8 @@ static int put_v4l2_event32(struct v4l2_event *kp, struct v4l2_event32 __user *u
#define VIDIOC_S_EXT_CTRLS32 _IOWR('V', 72, struct v4l2_ext_controls32)
#define VIDIOC_TRY_EXT_CTRLS32 _IOWR('V', 73, struct v4l2_ext_controls32)
#define VIDIOC_DQEVENT32 _IOR ('V', 89, struct v4l2_event32)
+#define VIDIOC_CREATE_BUFS32 _IOWR('V', 92, struct v4l2_create_buffers32)
+#define VIDIOC_PREPARE_BUF32 _IOWR('V', 93, struct v4l2_buffer32)
#define VIDIOC_OVERLAY32 _IOW ('V', 14, s32)
#define VIDIOC_STREAMON32 _IOW ('V', 18, s32)
@@ -721,6 +766,7 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
struct v4l2_standard v2s;
struct v4l2_ext_controls v2ecs;
struct v4l2_event v2ev;
+ struct v4l2_create_buffers v2crt;
unsigned long vx;
int vi;
} karg;
@@ -751,6 +797,8 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
case VIDIOC_S_INPUT32: cmd = VIDIOC_S_INPUT; break;
case VIDIOC_G_OUTPUT32: cmd = VIDIOC_G_OUTPUT; break;
case VIDIOC_S_OUTPUT32: cmd = VIDIOC_S_OUTPUT; break;
+ case VIDIOC_CREATE_BUFS32: cmd = VIDIOC_CREATE_BUFS; break;
+ case VIDIOC_PREPARE_BUF32: cmd = VIDIOC_PREPARE_BUF; break;
}
switch (cmd) {
@@ -775,6 +823,12 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
compatible_arg = 0;
break;
+ case VIDIOC_CREATE_BUFS:
+ err = get_v4l2_create32(&karg.v2crt, up);
+ compatible_arg = 0;
+ break;
+
+ case VIDIOC_PREPARE_BUF:
case VIDIOC_QUERYBUF:
case VIDIOC_QBUF:
case VIDIOC_DQBUF:
@@ -860,6 +914,10 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
err = put_v4l2_format32(&karg.v2f, up);
break;
+ case VIDIOC_CREATE_BUFS:
+ err = put_v4l2_create32(&karg.v2crt, up);
+ break;
+
case VIDIOC_QUERYBUF:
case VIDIOC_QBUF:
case VIDIOC_DQBUF:
@@ -959,6 +1017,8 @@ long v4l2_compat_ioctl32(struct file *file, unsigned int cmd, unsigned long arg)
case VIDIOC_DQEVENT32:
case VIDIOC_SUBSCRIBE_EVENT:
case VIDIOC_UNSUBSCRIBE_EVENT:
+ case VIDIOC_CREATE_BUFS32:
+ case VIDIOC_PREPARE_BUF32:
ret = do_video_ioctl(file, cmd, arg);
break;
diff --git a/drivers/media/video/v4l2-ctrls.c b/drivers/media/video/v4l2-ctrls.c
index 06b6014d4fb..0f415dade05 100644
--- a/drivers/media/video/v4l2-ctrls.c
+++ b/drivers/media/video/v4l2-ctrls.c
@@ -20,6 +20,7 @@
#include <linux/ctype.h>
#include <linux/slab.h>
+#include <linux/export.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ctrls.h>
@@ -43,7 +44,7 @@ struct v4l2_ctrl_helper {
};
/* Small helper function to determine if the autocluster is set to manual
- mode. In that case the is_volatile flag should be ignored. */
+ mode. */
static bool is_cur_manual(const struct v4l2_ctrl *master)
{
return master->is_auto && master->cur.val == master->manual_mode_value;
@@ -210,6 +211,7 @@ const char * const *v4l2_ctrl_get_menu(u32 id)
"Disabled",
"50 Hz",
"60 Hz",
+ "Auto",
NULL
};
static const char * const camera_exposure_auto[] = {
@@ -819,8 +821,8 @@ static void send_event(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, u32 changes)
fill_event(&ev, ctrl, changes);
list_for_each_entry(sev, &ctrl->ev_subs, node)
- if (sev->fh && (sev->fh != fh ||
- (sev->flags & V4L2_EVENT_SUB_FL_ALLOW_FEEDBACK)))
+ if (sev->fh != fh ||
+ (sev->flags & V4L2_EVENT_SUB_FL_ALLOW_FEEDBACK))
v4l2_event_queue_fh(sev->fh, &ev);
}
@@ -937,9 +939,15 @@ static void new_to_cur(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl,
break;
}
if (update_inactive) {
- ctrl->flags &= ~V4L2_CTRL_FLAG_INACTIVE;
- if (!is_cur_manual(ctrl->cluster[0]))
+ /* Note: update_inactive can only be true for auto clusters. */
+ ctrl->flags &=
+ ~(V4L2_CTRL_FLAG_INACTIVE | V4L2_CTRL_FLAG_VOLATILE);
+ if (!is_cur_manual(ctrl->cluster[0])) {
ctrl->flags |= V4L2_CTRL_FLAG_INACTIVE;
+ if (ctrl->cluster[0]->has_volatiles)
+ ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
+ }
+ fh = NULL;
}
if (changed || update_inactive) {
/* If a control was changed that was not one of the controls
@@ -1394,10 +1402,8 @@ struct v4l2_ctrl *v4l2_ctrl_new_custom(struct v4l2_ctrl_handler *hdl,
type, min, max,
is_menu ? cfg->menu_skip_mask : step,
def, flags, qmenu, priv);
- if (ctrl) {
+ if (ctrl)
ctrl->is_private = cfg->is_private;
- ctrl->is_volatile = cfg->is_volatile;
- }
return ctrl;
}
EXPORT_SYMBOL(v4l2_ctrl_new_custom);
@@ -1491,6 +1497,7 @@ EXPORT_SYMBOL(v4l2_ctrl_add_handler);
/* Cluster controls */
void v4l2_ctrl_cluster(unsigned ncontrols, struct v4l2_ctrl **controls)
{
+ bool has_volatiles = false;
int i;
/* The first control is the master control and it must not be NULL */
@@ -1500,8 +1507,11 @@ void v4l2_ctrl_cluster(unsigned ncontrols, struct v4l2_ctrl **controls)
if (controls[i]) {
controls[i]->cluster = controls;
controls[i]->ncontrols = ncontrols;
+ if (controls[i]->flags & V4L2_CTRL_FLAG_VOLATILE)
+ has_volatiles = true;
}
}
+ controls[0]->has_volatiles = has_volatiles;
}
EXPORT_SYMBOL(v4l2_ctrl_cluster);
@@ -1509,22 +1519,25 @@ void v4l2_ctrl_auto_cluster(unsigned ncontrols, struct v4l2_ctrl **controls,
u8 manual_val, bool set_volatile)
{
struct v4l2_ctrl *master = controls[0];
- u32 flag;
+ u32 flag = 0;
int i;
v4l2_ctrl_cluster(ncontrols, controls);
WARN_ON(ncontrols <= 1);
WARN_ON(manual_val < master->minimum || manual_val > master->maximum);
+ WARN_ON(set_volatile && !has_op(master, g_volatile_ctrl));
master->is_auto = true;
+ master->has_volatiles = set_volatile;
master->manual_mode_value = manual_val;
master->flags |= V4L2_CTRL_FLAG_UPDATE;
- flag = is_cur_manual(master) ? 0 : V4L2_CTRL_FLAG_INACTIVE;
+
+ if (!is_cur_manual(master))
+ flag = V4L2_CTRL_FLAG_INACTIVE |
+ (set_volatile ? V4L2_CTRL_FLAG_VOLATILE : 0);
for (i = 1; i < ncontrols; i++)
- if (controls[i]) {
- controls[i]->is_volatile = set_volatile;
+ if (controls[i])
controls[i]->flags |= flag;
- }
}
EXPORT_SYMBOL(v4l2_ctrl_auto_cluster);
@@ -1579,9 +1592,6 @@ EXPORT_SYMBOL(v4l2_ctrl_grab);
static void log_ctrl(const struct v4l2_ctrl *ctrl,
const char *prefix, const char *colon)
{
- int fl_inact = ctrl->flags & V4L2_CTRL_FLAG_INACTIVE;
- int fl_grabbed = ctrl->flags & V4L2_CTRL_FLAG_GRABBED;
-
if (ctrl->flags & (V4L2_CTRL_FLAG_DISABLED | V4L2_CTRL_FLAG_WRITE_ONLY))
return;
if (ctrl->type == V4L2_CTRL_TYPE_CTRL_CLASS)
@@ -1612,14 +1622,17 @@ static void log_ctrl(const struct v4l2_ctrl *ctrl,
printk(KERN_CONT "unknown type %d", ctrl->type);
break;
}
- if (fl_inact && fl_grabbed)
- printk(KERN_CONT " (inactive, grabbed)\n");
- else if (fl_inact)
- printk(KERN_CONT " (inactive)\n");
- else if (fl_grabbed)
- printk(KERN_CONT " (grabbed)\n");
- else
- printk(KERN_CONT "\n");
+ if (ctrl->flags & (V4L2_CTRL_FLAG_INACTIVE |
+ V4L2_CTRL_FLAG_GRABBED |
+ V4L2_CTRL_FLAG_VOLATILE)) {
+ if (ctrl->flags & V4L2_CTRL_FLAG_INACTIVE)
+ printk(KERN_CONT " inactive");
+ if (ctrl->flags & V4L2_CTRL_FLAG_GRABBED)
+ printk(KERN_CONT " grabbed");
+ if (ctrl->flags & V4L2_CTRL_FLAG_VOLATILE)
+ printk(KERN_CONT " volatile");
+ }
+ printk(KERN_CONT "\n");
}
/* Log all controls owned by the handler */
@@ -1959,7 +1972,8 @@ int v4l2_g_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct v4l2_ext_controls *cs
v4l2_ctrl_lock(master);
/* g_volatile_ctrl will update the new control values */
- if (has_op(master, g_volatile_ctrl) && !is_cur_manual(master)) {
+ if ((master->flags & V4L2_CTRL_FLAG_VOLATILE) ||
+ (master->has_volatiles && !is_cur_manual(master))) {
for (j = 0; j < master->ncontrols; j++)
cur_to_new(master->cluster[j]);
ret = call_op(master, g_volatile_ctrl);
@@ -2004,7 +2018,7 @@ static int get_ctrl(struct v4l2_ctrl *ctrl, s32 *val)
v4l2_ctrl_lock(master);
/* g_volatile_ctrl will update the current control values */
- if (ctrl->is_volatile && !is_cur_manual(master)) {
+ if (ctrl->flags & V4L2_CTRL_FLAG_VOLATILE) {
for (i = 0; i < master->ncontrols; i++)
cur_to_new(master->cluster[i]);
ret = call_op(master, g_volatile_ctrl);
@@ -2120,6 +2134,20 @@ static int validate_ctrls(struct v4l2_ext_controls *cs,
return 0;
}
+/* Obtain the current volatile values of an autocluster and mark them
+ as new. */
+static void update_from_auto_cluster(struct v4l2_ctrl *master)
+{
+ int i;
+
+ for (i = 0; i < master->ncontrols; i++)
+ cur_to_new(master->cluster[i]);
+ if (!call_op(master, g_volatile_ctrl))
+ for (i = 1; i < master->ncontrols; i++)
+ if (master->cluster[i])
+ master->cluster[i]->is_new = 1;
+}
+
/* Try or try-and-set controls */
static int try_set_ext_ctrls(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl,
struct v4l2_ext_controls *cs,
@@ -2165,6 +2193,31 @@ static int try_set_ext_ctrls(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl,
if (master->cluster[j])
master->cluster[j]->is_new = 0;
+ /* For volatile autoclusters that are currently in auto mode
+ we need to discover if it will be set to manual mode.
+ If so, then we have to copy the current volatile values
+ first since those will become the new manual values (which
+ may be overwritten by explicit new values from this set
+ of controls). */
+ if (master->is_auto && master->has_volatiles &&
+ !is_cur_manual(master)) {
+ /* Pick an initial non-manual value */
+ s32 new_auto_val = master->manual_mode_value + 1;
+ u32 tmp_idx = idx;
+
+ do {
+ /* Check if the auto control is part of the
+ list, and remember the new value. */
+ if (helpers[tmp_idx].ctrl == master)
+ new_auto_val = cs->controls[tmp_idx].value;
+ tmp_idx = helpers[tmp_idx].next;
+ } while (tmp_idx);
+ /* If the new value == the manual value, then copy
+ the current volatile values. */
+ if (new_auto_val == master->manual_mode_value)
+ update_from_auto_cluster(master);
+ }
+
/* Copy the new caller-supplied control values.
user_to_new() sets 'is_new' to 1. */
do {
@@ -2235,6 +2288,12 @@ static int set_ctrl(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, s32 *val)
if (master->cluster[i])
master->cluster[i]->is_new = 0;
+ /* For autoclusters with volatiles that are switched from auto to
+ manual mode we have to update the current volatile values since
+ those will become the initial manual values after such a switch. */
+ if (master->is_auto && master->has_volatiles && ctrl == master &&
+ !is_cur_manual(master) && *val == master->manual_mode_value)
+ update_from_auto_cluster(master);
ctrl->val = *val;
ctrl->is_new = 1;
ret = try_or_set_cluster(fh, master, true);
diff --git a/drivers/media/video/v4l2-device.c b/drivers/media/video/v4l2-device.c
index e6a2c3b302d..0edd618b9dd 100644
--- a/drivers/media/video/v4l2-device.c
+++ b/drivers/media/video/v4l2-device.c
@@ -20,7 +20,9 @@
#include <linux/types.h>
#include <linux/ioctl.h>
+#include <linux/module.h>
#include <linux/i2c.h>
+#include <linux/slab.h>
#if defined(CONFIG_SPI)
#include <linux/spi/spi.h>
#endif
@@ -193,6 +195,13 @@ int v4l2_device_register_subdev(struct v4l2_device *v4l2_dev,
}
EXPORT_SYMBOL_GPL(v4l2_device_register_subdev);
+static void v4l2_device_release_subdev_node(struct video_device *vdev)
+{
+ struct v4l2_subdev *sd = video_get_drvdata(vdev);
+ sd->devnode = NULL;
+ kfree(vdev);
+}
+
int v4l2_device_register_subdev_nodes(struct v4l2_device *v4l2_dev)
{
struct video_device *vdev;
@@ -206,22 +215,40 @@ int v4l2_device_register_subdev_nodes(struct v4l2_device *v4l2_dev)
if (!(sd->flags & V4L2_SUBDEV_FL_HAS_DEVNODE))
continue;
- vdev = &sd->devnode;
+ vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
+ if (!vdev) {
+ err = -ENOMEM;
+ goto clean_up;
+ }
+
+ video_set_drvdata(vdev, sd);
strlcpy(vdev->name, sd->name, sizeof(vdev->name));
vdev->v4l2_dev = v4l2_dev;
vdev->fops = &v4l2_subdev_fops;
- vdev->release = video_device_release_empty;
+ vdev->release = v4l2_device_release_subdev_node;
vdev->ctrl_handler = sd->ctrl_handler;
err = __video_register_device(vdev, VFL_TYPE_SUBDEV, -1, 1,
sd->owner);
- if (err < 0)
- return err;
+ if (err < 0) {
+ kfree(vdev);
+ goto clean_up;
+ }
#if defined(CONFIG_MEDIA_CONTROLLER)
sd->entity.v4l.major = VIDEO_MAJOR;
sd->entity.v4l.minor = vdev->minor;
#endif
+ sd->devnode = vdev;
}
return 0;
+
+clean_up:
+ list_for_each_entry(sd, &v4l2_dev->subdevs, list) {
+ if (!sd->devnode)
+ break;
+ video_unregister_device(sd->devnode);
+ }
+
+ return err;
}
EXPORT_SYMBOL_GPL(v4l2_device_register_subdev_nodes);
@@ -247,7 +274,7 @@ void v4l2_device_unregister_subdev(struct v4l2_subdev *sd)
if (v4l2_dev->mdev)
media_device_unregister_entity(&sd->entity);
#endif
- video_unregister_device(&sd->devnode);
+ video_unregister_device(sd->devnode);
module_put(sd->owner);
}
EXPORT_SYMBOL_GPL(v4l2_device_unregister_subdev);
diff --git a/drivers/media/video/v4l2-event.c b/drivers/media/video/v4l2-event.c
index 53b190cf225..c26ad963714 100644
--- a/drivers/media/video/v4l2-event.c
+++ b/drivers/media/video/v4l2-event.c
@@ -29,6 +29,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
+#include <linux/export.h>
static unsigned sev_pos(const struct v4l2_subscribed_event *sev, unsigned idx)
{
@@ -215,6 +216,9 @@ int v4l2_event_subscribe(struct v4l2_fh *fh,
unsigned long flags;
unsigned i;
+ if (sub->type == V4L2_EVENT_ALL)
+ return -EINVAL;
+
if (elems < 1)
elems = 1;
if (sub->type == V4L2_EVENT_CTRL) {
@@ -282,6 +286,7 @@ int v4l2_event_unsubscribe(struct v4l2_fh *fh,
{
struct v4l2_subscribed_event *sev;
unsigned long flags;
+ int i;
if (sub->type == V4L2_EVENT_ALL) {
v4l2_event_unsubscribe_all(fh);
@@ -292,8 +297,12 @@ int v4l2_event_unsubscribe(struct v4l2_fh *fh,
sev = v4l2_event_subscribed(fh, sub->type, sub->id);
if (sev != NULL) {
+ /* Remove any pending events for this subscription */
+ for (i = 0; i < sev->in_use; i++) {
+ list_del(&sev->events[sev_pos(sev, i)].list);
+ fh->navailable--;
+ }
list_del(&sev->list);
- sev->fh = NULL;
}
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
diff --git a/drivers/media/video/v4l2-fh.c b/drivers/media/video/v4l2-fh.c
index 122822d2b8b..9e3fc040ea2 100644
--- a/drivers/media/video/v4l2-fh.c
+++ b/drivers/media/video/v4l2-fh.c
@@ -24,6 +24,7 @@
#include <linux/bitops.h>
#include <linux/slab.h>
+#include <linux/export.h>
#include <media/v4l2-dev.h>
#include <media/v4l2-fh.h>
#include <media/v4l2-event.h>
diff --git a/drivers/media/video/v4l2-int-device.c b/drivers/media/video/v4l2-int-device.c
index a935bae538e..f4473494af7 100644
--- a/drivers/media/video/v4l2-int-device.c
+++ b/drivers/media/video/v4l2-int-device.c
@@ -26,6 +26,7 @@
#include <linux/list.h>
#include <linux/sort.h>
#include <linux/string.h>
+#include <linux/module.h>
#include <media/v4l2-int-device.h>
diff --git a/drivers/media/video/v4l2-ioctl.c b/drivers/media/video/v4l2-ioctl.c
index 002ce136344..e1da8fc9dd2 100644
--- a/drivers/media/video/v4l2-ioctl.c
+++ b/drivers/media/video/v4l2-ioctl.c
@@ -55,6 +55,19 @@
memset((u8 *)(p) + offsetof(typeof(*(p)), field) + sizeof((p)->field), \
0, sizeof(*(p)) - offsetof(typeof(*(p)), field) - sizeof((p)->field))
+#define have_fmt_ops(foo) ( \
+ ops->vidioc_##foo##_fmt_vid_cap || \
+ ops->vidioc_##foo##_fmt_vid_out || \
+ ops->vidioc_##foo##_fmt_vid_cap_mplane || \
+ ops->vidioc_##foo##_fmt_vid_out_mplane || \
+ ops->vidioc_##foo##_fmt_vid_overlay || \
+ ops->vidioc_##foo##_fmt_vbi_cap || \
+ ops->vidioc_##foo##_fmt_vid_out_overlay || \
+ ops->vidioc_##foo##_fmt_vbi_out || \
+ ops->vidioc_##foo##_fmt_sliced_vbi_cap || \
+ ops->vidioc_##foo##_fmt_sliced_vbi_out || \
+ ops->vidioc_##foo##_fmt_type_private)
+
struct std_descr {
v4l2_std_id std;
const char *descr;
@@ -260,6 +273,8 @@ static const char *v4l2_ioctls[] = {
[_IOC_NR(VIDIOC_DQEVENT)] = "VIDIOC_DQEVENT",
[_IOC_NR(VIDIOC_SUBSCRIBE_EVENT)] = "VIDIOC_SUBSCRIBE_EVENT",
[_IOC_NR(VIDIOC_UNSUBSCRIBE_EVENT)] = "VIDIOC_UNSUBSCRIBE_EVENT",
+ [_IOC_NR(VIDIOC_CREATE_BUFS)] = "VIDIOC_CREATE_BUFS",
+ [_IOC_NR(VIDIOC_PREPARE_BUF)] = "VIDIOC_PREPARE_BUF",
};
#define V4L2_IOCTLS ARRAY_SIZE(v4l2_ioctls)
@@ -477,63 +492,6 @@ static int check_fmt(const struct v4l2_ioctl_ops *ops, enum v4l2_buf_type type)
return -EINVAL;
}
-/**
- * fmt_sp_to_mp() - Convert a single-plane format to its multi-planar 1-plane
- * equivalent
- */
-static int fmt_sp_to_mp(const struct v4l2_format *f_sp,
- struct v4l2_format *f_mp)
-{
- struct v4l2_pix_format_mplane *pix_mp = &f_mp->fmt.pix_mp;
- const struct v4l2_pix_format *pix = &f_sp->fmt.pix;
-
- if (f_sp->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
- f_mp->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
- else if (f_sp->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
- f_mp->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
- else
- return -EINVAL;
-
- pix_mp->width = pix->width;
- pix_mp->height = pix->height;
- pix_mp->pixelformat = pix->pixelformat;
- pix_mp->field = pix->field;
- pix_mp->colorspace = pix->colorspace;
- pix_mp->num_planes = 1;
- pix_mp->plane_fmt[0].sizeimage = pix->sizeimage;
- pix_mp->plane_fmt[0].bytesperline = pix->bytesperline;
-
- return 0;
-}
-
-/**
- * fmt_mp_to_sp() - Convert a multi-planar 1-plane format to its single-planar
- * equivalent
- */
-static int fmt_mp_to_sp(const struct v4l2_format *f_mp,
- struct v4l2_format *f_sp)
-{
- const struct v4l2_pix_format_mplane *pix_mp = &f_mp->fmt.pix_mp;
- struct v4l2_pix_format *pix = &f_sp->fmt.pix;
-
- if (f_mp->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
- f_sp->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- else if (f_mp->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
- f_sp->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
- else
- return -EINVAL;
-
- pix->width = pix_mp->width;
- pix->height = pix_mp->height;
- pix->pixelformat = pix_mp->pixelformat;
- pix->field = pix_mp->field;
- pix->colorspace = pix_mp->colorspace;
- pix->sizeimage = pix_mp->plane_fmt[0].sizeimage;
- pix->bytesperline = pix_mp->plane_fmt[0].bytesperline;
-
- return 0;
-}
-
static long __video_do_ioctl(struct file *file,
unsigned int cmd, void *arg)
{
@@ -541,8 +499,8 @@ static long __video_do_ioctl(struct file *file,
const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops;
void *fh = file->private_data;
struct v4l2_fh *vfh = NULL;
- struct v4l2_format f_copy;
int use_fh_prio = 0;
+ long ret_prio = 0;
long ret = -ENOTTY;
if (ops == NULL) {
@@ -562,39 +520,8 @@ static long __video_do_ioctl(struct file *file,
use_fh_prio = test_bit(V4L2_FL_USE_FH_PRIO, &vfd->flags);
}
- if (use_fh_prio) {
- switch (cmd) {
- case VIDIOC_S_CTRL:
- case VIDIOC_S_STD:
- case VIDIOC_S_INPUT:
- case VIDIOC_S_OUTPUT:
- case VIDIOC_S_TUNER:
- case VIDIOC_S_FREQUENCY:
- case VIDIOC_S_FMT:
- case VIDIOC_S_CROP:
- case VIDIOC_S_AUDIO:
- case VIDIOC_S_AUDOUT:
- case VIDIOC_S_EXT_CTRLS:
- case VIDIOC_S_FBUF:
- case VIDIOC_S_PRIORITY:
- case VIDIOC_S_DV_PRESET:
- case VIDIOC_S_DV_TIMINGS:
- case VIDIOC_S_JPEGCOMP:
- case VIDIOC_S_MODULATOR:
- case VIDIOC_S_PARM:
- case VIDIOC_S_HW_FREQ_SEEK:
- case VIDIOC_ENCODER_CMD:
- case VIDIOC_OVERLAY:
- case VIDIOC_REQBUFS:
- case VIDIOC_STREAMON:
- case VIDIOC_STREAMOFF:
- ret = v4l2_prio_check(vfd->prio, vfh->prio);
- if (ret)
- goto exit_prio;
- ret = -EINVAL;
- break;
- }
- }
+ if (use_fh_prio)
+ ret_prio = v4l2_prio_check(vfd->prio, vfh->prio);
switch (cmd) {
@@ -638,12 +565,14 @@ static long __video_do_ioctl(struct file *file,
enum v4l2_priority *p = arg;
if (!ops->vidioc_s_priority && !use_fh_prio)
- break;
+ break;
dbgarg(cmd, "setting priority to %d\n", *p);
if (ops->vidioc_s_priority)
ret = ops->vidioc_s_priority(file, fh, *p);
else
- ret = v4l2_prio_change(&vfd->v4l2_dev->prio, &vfh->prio, *p);
+ ret = ret_prio ? ret_prio :
+ v4l2_prio_change(&vfd->v4l2_dev->prio,
+ &vfh->prio, *p);
break;
}
@@ -654,37 +583,37 @@ static long __video_do_ioctl(struct file *file,
switch (f->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
- if (ops->vidioc_enum_fmt_vid_cap)
+ if (likely(ops->vidioc_enum_fmt_vid_cap))
ret = ops->vidioc_enum_fmt_vid_cap(file, fh, f);
break;
case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
- if (ops->vidioc_enum_fmt_vid_cap_mplane)
+ if (likely(ops->vidioc_enum_fmt_vid_cap_mplane))
ret = ops->vidioc_enum_fmt_vid_cap_mplane(file,
fh, f);
break;
case V4L2_BUF_TYPE_VIDEO_OVERLAY:
- if (ops->vidioc_enum_fmt_vid_overlay)
+ if (likely(ops->vidioc_enum_fmt_vid_overlay))
ret = ops->vidioc_enum_fmt_vid_overlay(file,
fh, f);
break;
case V4L2_BUF_TYPE_VIDEO_OUTPUT:
- if (ops->vidioc_enum_fmt_vid_out)
+ if (likely(ops->vidioc_enum_fmt_vid_out))
ret = ops->vidioc_enum_fmt_vid_out(file, fh, f);
break;
case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
- if (ops->vidioc_enum_fmt_vid_out_mplane)
+ if (likely(ops->vidioc_enum_fmt_vid_out_mplane))
ret = ops->vidioc_enum_fmt_vid_out_mplane(file,
fh, f);
break;
case V4L2_BUF_TYPE_PRIVATE:
- if (ops->vidioc_enum_fmt_type_private)
+ if (likely(ops->vidioc_enum_fmt_type_private))
ret = ops->vidioc_enum_fmt_type_private(file,
fh, f);
break;
default:
break;
}
- if (!ret)
+ if (likely (!ret))
dbgarg(cmd, "index=%d, type=%d, flags=%d, "
"pixelformat=%c%c%c%c, description='%s'\n",
f->index, f->type, f->flags,
@@ -693,6 +622,14 @@ static long __video_do_ioctl(struct file *file,
(f->pixelformat >> 16) & 0xff,
(f->pixelformat >> 24) & 0xff,
f->description);
+ else if (ret == -ENOTTY &&
+ (ops->vidioc_enum_fmt_vid_cap ||
+ ops->vidioc_enum_fmt_vid_out ||
+ ops->vidioc_enum_fmt_vid_cap_mplane ||
+ ops->vidioc_enum_fmt_vid_out_mplane ||
+ ops->vidioc_enum_fmt_vid_overlay ||
+ ops->vidioc_enum_fmt_type_private))
+ ret = -EINVAL;
break;
}
case VIDIOC_G_FMT:
@@ -704,119 +641,67 @@ static long __video_do_ioctl(struct file *file,
switch (f->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
- if (ops->vidioc_g_fmt_vid_cap) {
+ if (ops->vidioc_g_fmt_vid_cap)
ret = ops->vidioc_g_fmt_vid_cap(file, fh, f);
- } else if (ops->vidioc_g_fmt_vid_cap_mplane) {
- if (fmt_sp_to_mp(f, &f_copy))
- break;
- ret = ops->vidioc_g_fmt_vid_cap_mplane(file, fh,
- &f_copy);
- if (ret)
- break;
-
- /* Driver is currently in multi-planar format,
- * we can't return it in single-planar API*/
- if (f_copy.fmt.pix_mp.num_planes > 1) {
- ret = -EBUSY;
- break;
- }
-
- ret = fmt_mp_to_sp(&f_copy, f);
- }
if (!ret)
v4l_print_pix_fmt(vfd, &f->fmt.pix);
break;
case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
- if (ops->vidioc_g_fmt_vid_cap_mplane) {
+ if (ops->vidioc_g_fmt_vid_cap_mplane)
ret = ops->vidioc_g_fmt_vid_cap_mplane(file,
fh, f);
- } else if (ops->vidioc_g_fmt_vid_cap) {
- if (fmt_mp_to_sp(f, &f_copy))
- break;
- ret = ops->vidioc_g_fmt_vid_cap(file,
- fh, &f_copy);
- if (ret)
- break;
-
- ret = fmt_sp_to_mp(&f_copy, f);
- }
if (!ret)
v4l_print_pix_fmt_mplane(vfd, &f->fmt.pix_mp);
break;
case V4L2_BUF_TYPE_VIDEO_OVERLAY:
- if (ops->vidioc_g_fmt_vid_overlay)
+ if (likely(ops->vidioc_g_fmt_vid_overlay))
ret = ops->vidioc_g_fmt_vid_overlay(file,
fh, f);
break;
case V4L2_BUF_TYPE_VIDEO_OUTPUT:
- if (ops->vidioc_g_fmt_vid_out) {
+ if (ops->vidioc_g_fmt_vid_out)
ret = ops->vidioc_g_fmt_vid_out(file, fh, f);
- } else if (ops->vidioc_g_fmt_vid_out_mplane) {
- if (fmt_sp_to_mp(f, &f_copy))
- break;
- ret = ops->vidioc_g_fmt_vid_out_mplane(file, fh,
- &f_copy);
- if (ret)
- break;
-
- /* Driver is currently in multi-planar format,
- * we can't return it in single-planar API*/
- if (f_copy.fmt.pix_mp.num_planes > 1) {
- ret = -EBUSY;
- break;
- }
-
- ret = fmt_mp_to_sp(&f_copy, f);
- }
if (!ret)
v4l_print_pix_fmt(vfd, &f->fmt.pix);
break;
case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
- if (ops->vidioc_g_fmt_vid_out_mplane) {
+ if (ops->vidioc_g_fmt_vid_out_mplane)
ret = ops->vidioc_g_fmt_vid_out_mplane(file,
fh, f);
- } else if (ops->vidioc_g_fmt_vid_out) {
- if (fmt_mp_to_sp(f, &f_copy))
- break;
- ret = ops->vidioc_g_fmt_vid_out(file,
- fh, &f_copy);
- if (ret)
- break;
-
- ret = fmt_sp_to_mp(&f_copy, f);
- }
if (!ret)
v4l_print_pix_fmt_mplane(vfd, &f->fmt.pix_mp);
break;
case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
- if (ops->vidioc_g_fmt_vid_out_overlay)
+ if (likely(ops->vidioc_g_fmt_vid_out_overlay))
ret = ops->vidioc_g_fmt_vid_out_overlay(file,
fh, f);
break;
case V4L2_BUF_TYPE_VBI_CAPTURE:
- if (ops->vidioc_g_fmt_vbi_cap)
+ if (likely(ops->vidioc_g_fmt_vbi_cap))
ret = ops->vidioc_g_fmt_vbi_cap(file, fh, f);
break;
case V4L2_BUF_TYPE_VBI_OUTPUT:
- if (ops->vidioc_g_fmt_vbi_out)
+ if (likely(ops->vidioc_g_fmt_vbi_out))
ret = ops->vidioc_g_fmt_vbi_out(file, fh, f);
break;
case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
- if (ops->vidioc_g_fmt_sliced_vbi_cap)
+ if (likely(ops->vidioc_g_fmt_sliced_vbi_cap))
ret = ops->vidioc_g_fmt_sliced_vbi_cap(file,
fh, f);
break;
case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
- if (ops->vidioc_g_fmt_sliced_vbi_out)
+ if (likely(ops->vidioc_g_fmt_sliced_vbi_out))
ret = ops->vidioc_g_fmt_sliced_vbi_out(file,
fh, f);
break;
case V4L2_BUF_TYPE_PRIVATE:
- if (ops->vidioc_g_fmt_type_private)
+ if (likely(ops->vidioc_g_fmt_type_private))
ret = ops->vidioc_g_fmt_type_private(file,
fh, f);
break;
}
+ if (unlikely(ret == -ENOTTY && have_fmt_ops(g)))
+ ret = -EINVAL;
break;
}
@@ -824,6 +709,14 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_format *f = (struct v4l2_format *)arg;
+ if (!have_fmt_ops(s))
+ break;
+ if (ret_prio) {
+ ret = ret_prio;
+ break;
+ }
+ ret = -EINVAL;
+
/* FIXME: Should be one dump per type */
dbgarg(cmd, "type=%s\n", prt_names(f->type, v4l2_type_names));
@@ -831,44 +724,15 @@ static long __video_do_ioctl(struct file *file,
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
CLEAR_AFTER_FIELD(f, fmt.pix);
v4l_print_pix_fmt(vfd, &f->fmt.pix);
- if (ops->vidioc_s_fmt_vid_cap) {
+ if (ops->vidioc_s_fmt_vid_cap)
ret = ops->vidioc_s_fmt_vid_cap(file, fh, f);
- } else if (ops->vidioc_s_fmt_vid_cap_mplane) {
- if (fmt_sp_to_mp(f, &f_copy))
- break;
- ret = ops->vidioc_s_fmt_vid_cap_mplane(file, fh,
- &f_copy);
- if (ret)
- break;
-
- if (f_copy.fmt.pix_mp.num_planes > 1) {
- /* Drivers shouldn't adjust from 1-plane
- * to more than 1-plane formats */
- ret = -EBUSY;
- WARN_ON(1);
- break;
- }
-
- ret = fmt_mp_to_sp(&f_copy, f);
- }
break;
case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
CLEAR_AFTER_FIELD(f, fmt.pix_mp);
v4l_print_pix_fmt_mplane(vfd, &f->fmt.pix_mp);
- if (ops->vidioc_s_fmt_vid_cap_mplane) {
+ if (ops->vidioc_s_fmt_vid_cap_mplane)
ret = ops->vidioc_s_fmt_vid_cap_mplane(file,
fh, f);
- } else if (ops->vidioc_s_fmt_vid_cap &&
- f->fmt.pix_mp.num_planes == 1) {
- if (fmt_mp_to_sp(f, &f_copy))
- break;
- ret = ops->vidioc_s_fmt_vid_cap(file,
- fh, &f_copy);
- if (ret)
- break;
-
- ret = fmt_sp_to_mp(&f_copy, f);
- }
break;
case V4L2_BUF_TYPE_VIDEO_OVERLAY:
CLEAR_AFTER_FIELD(f, fmt.win);
@@ -879,44 +743,15 @@ static long __video_do_ioctl(struct file *file,
case V4L2_BUF_TYPE_VIDEO_OUTPUT:
CLEAR_AFTER_FIELD(f, fmt.pix);
v4l_print_pix_fmt(vfd, &f->fmt.pix);
- if (ops->vidioc_s_fmt_vid_out) {
+ if (ops->vidioc_s_fmt_vid_out)
ret = ops->vidioc_s_fmt_vid_out(file, fh, f);
- } else if (ops->vidioc_s_fmt_vid_out_mplane) {
- if (fmt_sp_to_mp(f, &f_copy))
- break;
- ret = ops->vidioc_s_fmt_vid_out_mplane(file, fh,
- &f_copy);
- if (ret)
- break;
-
- if (f_copy.fmt.pix_mp.num_planes > 1) {
- /* Drivers shouldn't adjust from 1-plane
- * to more than 1-plane formats */
- ret = -EBUSY;
- WARN_ON(1);
- break;
- }
-
- ret = fmt_mp_to_sp(&f_copy, f);
- }
break;
case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
CLEAR_AFTER_FIELD(f, fmt.pix_mp);
v4l_print_pix_fmt_mplane(vfd, &f->fmt.pix_mp);
- if (ops->vidioc_s_fmt_vid_out_mplane) {
+ if (ops->vidioc_s_fmt_vid_out_mplane)
ret = ops->vidioc_s_fmt_vid_out_mplane(file,
fh, f);
- } else if (ops->vidioc_s_fmt_vid_out &&
- f->fmt.pix_mp.num_planes == 1) {
- if (fmt_mp_to_sp(f, &f_copy))
- break;
- ret = ops->vidioc_s_fmt_vid_out(file,
- fh, &f_copy);
- if (ret)
- break;
-
- ret = fmt_mp_to_sp(&f_copy, f);
- }
break;
case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
CLEAR_AFTER_FIELD(f, fmt.win);
@@ -926,29 +761,30 @@ static long __video_do_ioctl(struct file *file,
break;
case V4L2_BUF_TYPE_VBI_CAPTURE:
CLEAR_AFTER_FIELD(f, fmt.vbi);
- if (ops->vidioc_s_fmt_vbi_cap)
+ if (likely(ops->vidioc_s_fmt_vbi_cap))
ret = ops->vidioc_s_fmt_vbi_cap(file, fh, f);
break;
case V4L2_BUF_TYPE_VBI_OUTPUT:
CLEAR_AFTER_FIELD(f, fmt.vbi);
- if (ops->vidioc_s_fmt_vbi_out)
+ if (likely(ops->vidioc_s_fmt_vbi_out))
ret = ops->vidioc_s_fmt_vbi_out(file, fh, f);
break;
case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
CLEAR_AFTER_FIELD(f, fmt.sliced);
- if (ops->vidioc_s_fmt_sliced_vbi_cap)
+ if (likely(ops->vidioc_s_fmt_sliced_vbi_cap))
ret = ops->vidioc_s_fmt_sliced_vbi_cap(file,
fh, f);
break;
case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
CLEAR_AFTER_FIELD(f, fmt.sliced);
- if (ops->vidioc_s_fmt_sliced_vbi_out)
+ if (likely(ops->vidioc_s_fmt_sliced_vbi_out))
ret = ops->vidioc_s_fmt_sliced_vbi_out(file,
fh, f);
+
break;
case V4L2_BUF_TYPE_PRIVATE:
/* CLEAR_AFTER_FIELD(f, fmt.raw_data); <- does nothing */
- if (ops->vidioc_s_fmt_type_private)
+ if (likely(ops->vidioc_s_fmt_type_private))
ret = ops->vidioc_s_fmt_type_private(file,
fh, f);
break;
@@ -965,132 +801,77 @@ static long __video_do_ioctl(struct file *file,
switch (f->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
CLEAR_AFTER_FIELD(f, fmt.pix);
- if (ops->vidioc_try_fmt_vid_cap) {
+ if (ops->vidioc_try_fmt_vid_cap)
ret = ops->vidioc_try_fmt_vid_cap(file, fh, f);
- } else if (ops->vidioc_try_fmt_vid_cap_mplane) {
- if (fmt_sp_to_mp(f, &f_copy))
- break;
- ret = ops->vidioc_try_fmt_vid_cap_mplane(file,
- fh, &f_copy);
- if (ret)
- break;
-
- if (f_copy.fmt.pix_mp.num_planes > 1) {
- /* Drivers shouldn't adjust from 1-plane
- * to more than 1-plane formats */
- ret = -EBUSY;
- WARN_ON(1);
- break;
- }
- ret = fmt_mp_to_sp(&f_copy, f);
- }
if (!ret)
v4l_print_pix_fmt(vfd, &f->fmt.pix);
break;
case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
CLEAR_AFTER_FIELD(f, fmt.pix_mp);
- if (ops->vidioc_try_fmt_vid_cap_mplane) {
+ if (ops->vidioc_try_fmt_vid_cap_mplane)
ret = ops->vidioc_try_fmt_vid_cap_mplane(file,
fh, f);
- } else if (ops->vidioc_try_fmt_vid_cap &&
- f->fmt.pix_mp.num_planes == 1) {
- if (fmt_mp_to_sp(f, &f_copy))
- break;
- ret = ops->vidioc_try_fmt_vid_cap(file,
- fh, &f_copy);
- if (ret)
- break;
-
- ret = fmt_sp_to_mp(&f_copy, f);
- }
if (!ret)
v4l_print_pix_fmt_mplane(vfd, &f->fmt.pix_mp);
break;
case V4L2_BUF_TYPE_VIDEO_OVERLAY:
CLEAR_AFTER_FIELD(f, fmt.win);
- if (ops->vidioc_try_fmt_vid_overlay)
+ if (likely(ops->vidioc_try_fmt_vid_overlay))
ret = ops->vidioc_try_fmt_vid_overlay(file,
fh, f);
break;
case V4L2_BUF_TYPE_VIDEO_OUTPUT:
CLEAR_AFTER_FIELD(f, fmt.pix);
- if (ops->vidioc_try_fmt_vid_out) {
+ if (ops->vidioc_try_fmt_vid_out)
ret = ops->vidioc_try_fmt_vid_out(file, fh, f);
- } else if (ops->vidioc_try_fmt_vid_out_mplane) {
- if (fmt_sp_to_mp(f, &f_copy))
- break;
- ret = ops->vidioc_try_fmt_vid_out_mplane(file,
- fh, &f_copy);
- if (ret)
- break;
-
- if (f_copy.fmt.pix_mp.num_planes > 1) {
- /* Drivers shouldn't adjust from 1-plane
- * to more than 1-plane formats */
- ret = -EBUSY;
- WARN_ON(1);
- break;
- }
- ret = fmt_mp_to_sp(&f_copy, f);
- }
if (!ret)
v4l_print_pix_fmt(vfd, &f->fmt.pix);
break;
case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
CLEAR_AFTER_FIELD(f, fmt.pix_mp);
- if (ops->vidioc_try_fmt_vid_out_mplane) {
+ if (ops->vidioc_try_fmt_vid_out_mplane)
ret = ops->vidioc_try_fmt_vid_out_mplane(file,
fh, f);
- } else if (ops->vidioc_try_fmt_vid_out &&
- f->fmt.pix_mp.num_planes == 1) {
- if (fmt_mp_to_sp(f, &f_copy))
- break;
- ret = ops->vidioc_try_fmt_vid_out(file,
- fh, &f_copy);
- if (ret)
- break;
-
- ret = fmt_sp_to_mp(&f_copy, f);
- }
if (!ret)
v4l_print_pix_fmt_mplane(vfd, &f->fmt.pix_mp);
break;
case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
CLEAR_AFTER_FIELD(f, fmt.win);
- if (ops->vidioc_try_fmt_vid_out_overlay)
+ if (likely(ops->vidioc_try_fmt_vid_out_overlay))
ret = ops->vidioc_try_fmt_vid_out_overlay(file,
fh, f);
break;
case V4L2_BUF_TYPE_VBI_CAPTURE:
CLEAR_AFTER_FIELD(f, fmt.vbi);
- if (ops->vidioc_try_fmt_vbi_cap)
+ if (likely(ops->vidioc_try_fmt_vbi_cap))
ret = ops->vidioc_try_fmt_vbi_cap(file, fh, f);
break;
case V4L2_BUF_TYPE_VBI_OUTPUT:
CLEAR_AFTER_FIELD(f, fmt.vbi);
- if (ops->vidioc_try_fmt_vbi_out)
+ if (likely(ops->vidioc_try_fmt_vbi_out))
ret = ops->vidioc_try_fmt_vbi_out(file, fh, f);
break;
case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
CLEAR_AFTER_FIELD(f, fmt.sliced);
- if (ops->vidioc_try_fmt_sliced_vbi_cap)
+ if (likely(ops->vidioc_try_fmt_sliced_vbi_cap))
ret = ops->vidioc_try_fmt_sliced_vbi_cap(file,
fh, f);
break;
case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
CLEAR_AFTER_FIELD(f, fmt.sliced);
- if (ops->vidioc_try_fmt_sliced_vbi_out)
+ if (likely(ops->vidioc_try_fmt_sliced_vbi_out))
ret = ops->vidioc_try_fmt_sliced_vbi_out(file,
fh, f);
break;
case V4L2_BUF_TYPE_PRIVATE:
/* CLEAR_AFTER_FIELD(f, fmt.raw_data); <- does nothing */
- if (ops->vidioc_try_fmt_type_private)
+ if (likely(ops->vidioc_try_fmt_type_private))
ret = ops->vidioc_try_fmt_type_private(file,
fh, f);
break;
}
-
+ if (unlikely(ret == -ENOTTY && have_fmt_ops(try)))
+ ret = -EINVAL;
break;
}
/* FIXME: Those buf reqs could be handled here,
@@ -1103,6 +884,10 @@ static long __video_do_ioctl(struct file *file,
if (!ops->vidioc_reqbufs)
break;
+ if (ret_prio) {
+ ret = ret_prio;
+ break;
+ }
ret = check_fmt(ops, p->type);
if (ret)
break;
@@ -1168,6 +953,10 @@ static long __video_do_ioctl(struct file *file,
if (!ops->vidioc_overlay)
break;
+ if (ret_prio) {
+ ret = ret_prio;
+ break;
+ }
dbgarg(cmd, "value=%d\n", *i);
ret = ops->vidioc_overlay(file, fh, *i);
break;
@@ -1193,6 +982,10 @@ static long __video_do_ioctl(struct file *file,
if (!ops->vidioc_s_fbuf)
break;
+ if (ret_prio) {
+ ret = ret_prio;
+ break;
+ }
dbgarg(cmd, "capability=0x%x, flags=%d, base=0x%08lx\n",
p->capability, p->flags, (unsigned long)p->base);
v4l_print_pix_fmt(vfd, &p->fmt);
@@ -1205,6 +998,10 @@ static long __video_do_ioctl(struct file *file,
if (!ops->vidioc_streamon)
break;
+ if (ret_prio) {
+ ret = ret_prio;
+ break;
+ }
dbgarg(cmd, "type=%s\n", prt_names(i, v4l2_type_names));
ret = ops->vidioc_streamon(file, fh, i);
break;
@@ -1215,6 +1012,10 @@ static long __video_do_ioctl(struct file *file,
if (!ops->vidioc_streamoff)
break;
+ if (ret_prio) {
+ ret = ret_prio;
+ break;
+ }
dbgarg(cmd, "type=%s\n", prt_names(i, v4l2_type_names));
ret = ops->vidioc_streamoff(file, fh, i);
break;
@@ -1227,6 +1028,10 @@ static long __video_do_ioctl(struct file *file,
unsigned int index = p->index, i, j = 0;
const char *descr = "";
+ if (id == 0)
+ break;
+ ret = -EINVAL;
+
/* Return norm array in a canonical way */
for (i = 0; i <= index && id; i++) {
/* last std value in the standards array is 0, so this
@@ -1262,16 +1067,15 @@ static long __video_do_ioctl(struct file *file,
{
v4l2_std_id *id = arg;
- ret = 0;
/* Calls the specific handler */
if (ops->vidioc_g_std)
ret = ops->vidioc_g_std(file, fh, id);
- else if (vfd->current_norm)
+ else if (vfd->current_norm) {
+ ret = 0;
*id = vfd->current_norm;
- else
- ret = -EINVAL;
+ }
- if (!ret)
+ if (likely(!ret))
dbgarg(cmd, "std=0x%08Lx\n", (long long unsigned)*id);
break;
}
@@ -1281,15 +1085,20 @@ static long __video_do_ioctl(struct file *file,
dbgarg(cmd, "std=%08Lx\n", (long long unsigned)*id);
+ if (!ops->vidioc_s_std)
+ break;
+
+ if (ret_prio) {
+ ret = ret_prio;
+ break;
+ }
+ ret = -EINVAL;
norm = (*id) & vfd->tvnorms;
if (vfd->tvnorms && !norm) /* Check if std is supported */
break;
/* Calls the specific handler */
- if (ops->vidioc_s_std)
- ret = ops->vidioc_s_std(file, fh, &norm);
- else
- ret = -EINVAL;
+ ret = ops->vidioc_s_std(file, fh, &norm);
/* Updates standard information */
if (ret >= 0)
@@ -1302,6 +1111,14 @@ static long __video_do_ioctl(struct file *file,
if (!ops->vidioc_querystd)
break;
+ /*
+ * If nothing detected, it should return all supported
+ * Drivers just need to mask the std argument, in order
+ * to remove the standards that don't apply from the mask.
+ * This means that tuners, audio and video decoders can join
+ * their efforts to improve the standards detection
+ */
+ *p = vfd->tvnorms;
ret = ops->vidioc_querystd(file, fh, arg);
if (!ret)
dbgarg(cmd, "detected std=%08Lx\n",
@@ -1358,6 +1175,10 @@ static long __video_do_ioctl(struct file *file,
if (!ops->vidioc_s_input)
break;
+ if (ret_prio) {
+ ret = ret_prio;
+ break;
+ }
dbgarg(cmd, "value=%d\n", *i);
ret = ops->vidioc_s_input(file, fh, *i);
break;
@@ -1410,6 +1231,10 @@ static long __video_do_ioctl(struct file *file,
if (!ops->vidioc_s_output)
break;
+ if (ret_prio) {
+ ret = ret_prio;
+ break;
+ }
dbgarg(cmd, "value=%d\n", *i);
ret = ops->vidioc_s_output(file, fh, *i);
break;
@@ -1479,6 +1304,10 @@ static long __video_do_ioctl(struct file *file,
if (!(vfh && vfh->ctrl_handler) && !vfd->ctrl_handler &&
!ops->vidioc_s_ctrl && !ops->vidioc_s_ext_ctrls)
break;
+ if (ret_prio) {
+ ret = ret_prio;
+ break;
+ }
dbgarg(cmd, "id=0x%x, value=%d\n", p->id, p->value);
@@ -1504,6 +1333,8 @@ static long __video_do_ioctl(struct file *file,
ctrl.value = p->value;
if (check_ext_ctrls(&ctrls, 1))
ret = ops->vidioc_s_ext_ctrls(file, fh, &ctrls);
+ else
+ ret = -EINVAL;
break;
}
case VIDIOC_G_EXT_CTRLS:
@@ -1515,8 +1346,10 @@ static long __video_do_ioctl(struct file *file,
ret = v4l2_g_ext_ctrls(vfh->ctrl_handler, p);
else if (vfd->ctrl_handler)
ret = v4l2_g_ext_ctrls(vfd->ctrl_handler, p);
- else if (ops->vidioc_g_ext_ctrls && check_ext_ctrls(p, 0))
- ret = ops->vidioc_g_ext_ctrls(file, fh, p);
+ else if (ops->vidioc_g_ext_ctrls)
+ ret = check_ext_ctrls(p, 0) ?
+ ops->vidioc_g_ext_ctrls(file, fh, p) :
+ -EINVAL;
else
break;
v4l_print_ext_ctrls(cmd, vfd, p, !ret);
@@ -1530,6 +1363,10 @@ static long __video_do_ioctl(struct file *file,
if (!(vfh && vfh->ctrl_handler) && !vfd->ctrl_handler &&
!ops->vidioc_s_ext_ctrls)
break;
+ if (ret_prio) {
+ ret = ret_prio;
+ break;
+ }
v4l_print_ext_ctrls(cmd, vfd, p, 1);
if (vfh && vfh->ctrl_handler)
ret = v4l2_s_ext_ctrls(vfh, vfh->ctrl_handler, p);
@@ -1537,6 +1374,8 @@ static long __video_do_ioctl(struct file *file,
ret = v4l2_s_ext_ctrls(NULL, vfd->ctrl_handler, p);
else if (check_ext_ctrls(p, 0))
ret = ops->vidioc_s_ext_ctrls(file, fh, p);
+ else
+ ret = -EINVAL;
break;
}
case VIDIOC_TRY_EXT_CTRLS:
@@ -1554,6 +1393,8 @@ static long __video_do_ioctl(struct file *file,
ret = v4l2_try_ext_ctrls(vfd->ctrl_handler, p);
else if (check_ext_ctrls(p, 0))
ret = ops->vidioc_try_ext_ctrls(file, fh, p);
+ else
+ ret = -EINVAL;
break;
}
case VIDIOC_QUERYMENU:
@@ -1614,6 +1455,10 @@ static long __video_do_ioctl(struct file *file,
if (!ops->vidioc_s_audio)
break;
+ if (ret_prio) {
+ ret = ret_prio;
+ break;
+ }
dbgarg(cmd, "index=%d, name=%s, capability=0x%x, "
"mode=0x%x\n", p->index, p->name,
p->capability, p->mode);
@@ -1654,6 +1499,10 @@ static long __video_do_ioctl(struct file *file,
if (!ops->vidioc_s_audout)
break;
+ if (ret_prio) {
+ ret = ret_prio;
+ break;
+ }
dbgarg(cmd, "index=%d, name=%s, capability=%d, "
"mode=%d\n", p->index, p->name,
p->capability, p->mode);
@@ -1683,6 +1532,10 @@ static long __video_do_ioctl(struct file *file,
if (!ops->vidioc_s_modulator)
break;
+ if (ret_prio) {
+ ret = ret_prio;
+ break;
+ }
dbgarg(cmd, "index=%d, name=%s, capability=%d, "
"rangelow=%d, rangehigh=%d, txsubchans=%d\n",
p->index, p->name, p->capability, p->rangelow,
@@ -1709,6 +1562,10 @@ static long __video_do_ioctl(struct file *file,
if (!ops->vidioc_s_crop)
break;
+ if (ret_prio) {
+ ret = ret_prio;
+ break;
+ }
dbgarg(cmd, "type=%s\n", prt_names(p->type, v4l2_type_names));
dbgrect(vfd, "", &p->c);
ret = ops->vidioc_s_crop(file, fh, p);
@@ -1752,11 +1609,15 @@ static long __video_do_ioctl(struct file *file,
if (!ops->vidioc_g_jpegcomp)
break;
+ if (ret_prio) {
+ ret = ret_prio;
+ break;
+ }
dbgarg(cmd, "quality=%d, APPn=%d, APP_len=%d, "
"COM_len=%d, jpeg_markers=%d\n",
p->quality, p->APPn, p->APP_len,
p->COM_len, p->jpeg_markers);
- ret = ops->vidioc_s_jpegcomp(file, fh, p);
+ ret = ops->vidioc_s_jpegcomp(file, fh, p);
break;
}
case VIDIOC_G_ENC_INDEX:
@@ -1777,6 +1638,10 @@ static long __video_do_ioctl(struct file *file,
if (!ops->vidioc_encoder_cmd)
break;
+ if (ret_prio) {
+ ret = ret_prio;
+ break;
+ }
ret = ops->vidioc_encoder_cmd(file, fh, p);
if (!ret)
dbgarg(cmd, "cmd=%d, flags=%x\n", p->cmd, p->flags);
@@ -1797,6 +1662,8 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_streamparm *p = arg;
+ if (!ops->vidioc_g_parm && !vfd->current_norm)
+ break;
if (ops->vidioc_g_parm) {
ret = check_fmt(ops, p->type);
if (ret)
@@ -1805,14 +1672,13 @@ static long __video_do_ioctl(struct file *file,
} else {
v4l2_std_id std = vfd->current_norm;
+ ret = -EINVAL;
if (p->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
break;
ret = 0;
if (ops->vidioc_g_std)
ret = ops->vidioc_g_std(file, fh, &std);
- else if (std == 0)
- ret = -EINVAL;
if (ret == 0)
v4l2_video_std_frame_period(std,
&p->parm.capture.timeperframe);
@@ -1827,6 +1693,10 @@ static long __video_do_ioctl(struct file *file,
if (!ops->vidioc_s_parm)
break;
+ if (ret_prio) {
+ ret = ret_prio;
+ break;
+ }
ret = check_fmt(ops, p->type);
if (ret)
break;
@@ -1862,6 +1732,10 @@ static long __video_do_ioctl(struct file *file,
if (!ops->vidioc_s_tuner)
break;
+ if (ret_prio) {
+ ret = ret_prio;
+ break;
+ }
p->type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
dbgarg(cmd, "index=%d, name=%s, type=%d, "
@@ -1896,6 +1770,10 @@ static long __video_do_ioctl(struct file *file,
if (!ops->vidioc_s_frequency)
break;
+ if (ret_prio) {
+ ret = ret_prio;
+ break;
+ }
dbgarg(cmd, "tuner=%d, type=%d, frequency=%d\n",
p->tuner, p->type, p->frequency);
ret = ops->vidioc_s_frequency(file, fh, p);
@@ -1970,6 +1848,10 @@ static long __video_do_ioctl(struct file *file,
if (!ops->vidioc_s_hw_freq_seek)
break;
+ if (ret_prio) {
+ ret = ret_prio;
+ break;
+ }
type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
dbgarg(cmd,
@@ -2074,6 +1956,10 @@ static long __video_do_ioctl(struct file *file,
if (!ops->vidioc_s_dv_preset)
break;
+ if (ret_prio) {
+ ret = ret_prio;
+ break;
+ }
dbgarg(cmd, "preset=%d\n", p->preset);
ret = ops->vidioc_s_dv_preset(file, fh, p);
@@ -2109,6 +1995,10 @@ static long __video_do_ioctl(struct file *file,
if (!ops->vidioc_s_dv_timings)
break;
+ if (ret_prio) {
+ ret = ret_prio;
+ break;
+ }
switch (p->type) {
case V4L2_DV_BT_656_1120:
@@ -2216,20 +2106,47 @@ static long __video_do_ioctl(struct file *file,
dbgarg(cmd, "type=0x%8.8x", sub->type);
break;
}
- default:
+ case VIDIOC_CREATE_BUFS:
{
- bool valid_prio = true;
+ struct v4l2_create_buffers *create = arg;
- if (!ops->vidioc_default)
+ if (!ops->vidioc_create_bufs)
+ break;
+ if (ret_prio) {
+ ret = ret_prio;
+ break;
+ }
+ ret = check_fmt(ops, create->format.type);
+ if (ret)
break;
- if (use_fh_prio)
- valid_prio = v4l2_prio_check(vfd->prio, vfh->prio) >= 0;
- ret = ops->vidioc_default(file, fh, valid_prio, cmd, arg);
+
+ ret = ops->vidioc_create_bufs(file, fh, create);
+
+ dbgarg(cmd, "count=%d @ %d\n", create->count, create->index);
break;
}
+ case VIDIOC_PREPARE_BUF:
+ {
+ struct v4l2_buffer *b = arg;
+
+ if (!ops->vidioc_prepare_buf)
+ break;
+ ret = check_fmt(ops, b->type);
+ if (ret)
+ break;
+
+ ret = ops->vidioc_prepare_buf(file, fh, b);
+
+ dbgarg(cmd, "index=%d", b->index);
+ break;
+ }
+ default:
+ if (!ops->vidioc_default)
+ break;
+ ret = ops->vidioc_default(file, fh, ret_prio >= 0, cmd, arg);
+ break;
} /* switch */
-exit_prio:
if (vfd->debug & V4L2_DEBUG_IOCTL_ARG) {
if (ret < 0) {
v4l_print_ioctl(vfd->name, cmd);
diff --git a/drivers/media/video/v4l2-mem2mem.c b/drivers/media/video/v4l2-mem2mem.c
index 3b15bf5892a..975d0fa938c 100644
--- a/drivers/media/video/v4l2-mem2mem.c
+++ b/drivers/media/video/v4l2-mem2mem.c
@@ -97,11 +97,12 @@ void *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx)
spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
- if (list_empty(&q_ctx->rdy_queue))
- goto end;
+ if (list_empty(&q_ctx->rdy_queue)) {
+ spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
+ return NULL;
+ }
b = list_entry(q_ctx->rdy_queue.next, struct v4l2_m2m_buffer, list);
-end:
spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
return &b->vb;
}
@@ -117,12 +118,13 @@ void *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx)
unsigned long flags;
spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
- if (!list_empty(&q_ctx->rdy_queue)) {
- b = list_entry(q_ctx->rdy_queue.next, struct v4l2_m2m_buffer,
- list);
- list_del(&b->list);
- q_ctx->num_rdy--;
+ if (list_empty(&q_ctx->rdy_queue)) {
+ spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
+ return NULL;
}
+ b = list_entry(q_ctx->rdy_queue.next, struct v4l2_m2m_buffer, list);
+ list_del(&b->list);
+ q_ctx->num_rdy--;
spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
return &b->vb;
diff --git a/drivers/media/video/v4l2-subdev.c b/drivers/media/video/v4l2-subdev.c
index b7967c9dc4a..65ade5f03c2 100644
--- a/drivers/media/video/v4l2-subdev.c
+++ b/drivers/media/video/v4l2-subdev.c
@@ -24,6 +24,7 @@
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/videodev2.h>
+#include <linux/export.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
@@ -173,6 +174,25 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
case VIDIOC_UNSUBSCRIBE_EVENT:
return v4l2_subdev_call(sd, core, unsubscribe_event, vfh, arg);
+
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+ case VIDIOC_DBG_G_REGISTER:
+ {
+ struct v4l2_dbg_register *p = arg;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ return v4l2_subdev_call(sd, core, g_register, p);
+ }
+ case VIDIOC_DBG_S_REGISTER:
+ {
+ struct v4l2_dbg_register *p = arg;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ return v4l2_subdev_call(sd, core, s_register, p);
+ }
+#endif
#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
case VIDIOC_SUBDEV_G_FMT: {
struct v4l2_subdev_format *format = arg;
diff --git a/drivers/media/video/videobuf2-core.c b/drivers/media/video/videobuf2-core.c
index 3015e600094..95a3f5e82ae 100644
--- a/drivers/media/video/videobuf2-core.c
+++ b/drivers/media/video/videobuf2-core.c
@@ -38,13 +38,13 @@ module_param(debug, int, 0644);
(((q)->ops->op) ? ((q)->ops->op(args)) : 0)
#define V4L2_BUFFER_STATE_FLAGS (V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_QUEUED | \
- V4L2_BUF_FLAG_DONE | V4L2_BUF_FLAG_ERROR)
+ V4L2_BUF_FLAG_DONE | V4L2_BUF_FLAG_ERROR | \
+ V4L2_BUF_FLAG_PREPARED)
/**
* __vb2_buf_mem_alloc() - allocate video memory for the given buffer
*/
-static int __vb2_buf_mem_alloc(struct vb2_buffer *vb,
- unsigned long *plane_sizes)
+static int __vb2_buf_mem_alloc(struct vb2_buffer *vb)
{
struct vb2_queue *q = vb->vb2_queue;
void *mem_priv;
@@ -53,13 +53,13 @@ static int __vb2_buf_mem_alloc(struct vb2_buffer *vb,
/* Allocate memory for all planes in this buffer */
for (plane = 0; plane < vb->num_planes; ++plane) {
mem_priv = call_memop(q, plane, alloc, q->alloc_ctx[plane],
- plane_sizes[plane]);
+ q->plane_sizes[plane]);
if (IS_ERR_OR_NULL(mem_priv))
goto free;
/* Associate allocator private data with this plane */
vb->planes[plane].mem_priv = mem_priv;
- vb->v4l2_planes[plane].length = plane_sizes[plane];
+ vb->v4l2_planes[plane].length = q->plane_sizes[plane];
}
return 0;
@@ -110,18 +110,28 @@ static void __vb2_buf_userptr_put(struct vb2_buffer *vb)
* __setup_offsets() - setup unique offsets ("cookies") for every plane in
* every buffer on the queue
*/
-static void __setup_offsets(struct vb2_queue *q)
+static void __setup_offsets(struct vb2_queue *q, unsigned int n)
{
unsigned int buffer, plane;
struct vb2_buffer *vb;
- unsigned long off = 0;
+ unsigned long off;
- for (buffer = 0; buffer < q->num_buffers; ++buffer) {
+ if (q->num_buffers) {
+ struct v4l2_plane *p;
+ vb = q->bufs[q->num_buffers - 1];
+ p = &vb->v4l2_planes[vb->num_planes - 1];
+ off = PAGE_ALIGN(p->m.mem_offset + p->length);
+ } else {
+ off = 0;
+ }
+
+ for (buffer = q->num_buffers; buffer < q->num_buffers + n; ++buffer) {
vb = q->bufs[buffer];
if (!vb)
continue;
for (plane = 0; plane < vb->num_planes; ++plane) {
+ vb->v4l2_planes[plane].length = q->plane_sizes[plane];
vb->v4l2_planes[plane].m.mem_offset = off;
dprintk(3, "Buffer %d, plane %d offset 0x%08lx\n",
@@ -141,8 +151,7 @@ static void __setup_offsets(struct vb2_queue *q)
* Returns the number of buffers successfully allocated.
*/
static int __vb2_queue_alloc(struct vb2_queue *q, enum v4l2_memory memory,
- unsigned int num_buffers, unsigned int num_planes,
- unsigned long plane_sizes[])
+ unsigned int num_buffers, unsigned int num_planes)
{
unsigned int buffer;
struct vb2_buffer *vb;
@@ -163,13 +172,13 @@ static int __vb2_queue_alloc(struct vb2_queue *q, enum v4l2_memory memory,
vb->state = VB2_BUF_STATE_DEQUEUED;
vb->vb2_queue = q;
vb->num_planes = num_planes;
- vb->v4l2_buf.index = buffer;
+ vb->v4l2_buf.index = q->num_buffers + buffer;
vb->v4l2_buf.type = q->type;
vb->v4l2_buf.memory = memory;
/* Allocate video buffer memory for the MMAP type */
if (memory == V4L2_MEMORY_MMAP) {
- ret = __vb2_buf_mem_alloc(vb, plane_sizes);
+ ret = __vb2_buf_mem_alloc(vb);
if (ret) {
dprintk(1, "Failed allocating memory for "
"buffer %d\n", buffer);
@@ -191,15 +200,13 @@ static int __vb2_queue_alloc(struct vb2_queue *q, enum v4l2_memory memory,
}
}
- q->bufs[buffer] = vb;
+ q->bufs[q->num_buffers + buffer] = vb;
}
- q->num_buffers = buffer;
-
- __setup_offsets(q);
+ __setup_offsets(q, buffer);
dprintk(1, "Allocated %d buffers, %d plane(s) each\n",
- q->num_buffers, num_planes);
+ buffer, num_planes);
return buffer;
}
@@ -207,12 +214,13 @@ static int __vb2_queue_alloc(struct vb2_queue *q, enum v4l2_memory memory,
/**
* __vb2_free_mem() - release all video buffer memory for a given queue
*/
-static void __vb2_free_mem(struct vb2_queue *q)
+static void __vb2_free_mem(struct vb2_queue *q, unsigned int buffers)
{
unsigned int buffer;
struct vb2_buffer *vb;
- for (buffer = 0; buffer < q->num_buffers; ++buffer) {
+ for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
+ ++buffer) {
vb = q->bufs[buffer];
if (!vb)
continue;
@@ -226,17 +234,18 @@ static void __vb2_free_mem(struct vb2_queue *q)
}
/**
- * __vb2_queue_free() - free the queue - video memory and related information
- * and return the queue to an uninitialized state. Might be called even if the
- * queue has already been freed.
+ * __vb2_queue_free() - free buffers at the end of the queue - video memory and
+ * related information, if no buffers are left return the queue to an
+ * uninitialized state. Might be called even if the queue has already been freed.
*/
-static void __vb2_queue_free(struct vb2_queue *q)
+static void __vb2_queue_free(struct vb2_queue *q, unsigned int buffers)
{
unsigned int buffer;
/* Call driver-provided cleanup function for each buffer, if provided */
if (q->ops->buf_cleanup) {
- for (buffer = 0; buffer < q->num_buffers; ++buffer) {
+ for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
+ ++buffer) {
if (NULL == q->bufs[buffer])
continue;
q->ops->buf_cleanup(q->bufs[buffer]);
@@ -244,23 +253,26 @@ static void __vb2_queue_free(struct vb2_queue *q)
}
/* Release video buffer memory */
- __vb2_free_mem(q);
+ __vb2_free_mem(q, buffers);
/* Free videobuf buffers */
- for (buffer = 0; buffer < q->num_buffers; ++buffer) {
+ for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
+ ++buffer) {
kfree(q->bufs[buffer]);
q->bufs[buffer] = NULL;
}
- q->num_buffers = 0;
- q->memory = 0;
+ q->num_buffers -= buffers;
+ if (!q->num_buffers)
+ q->memory = 0;
+ INIT_LIST_HEAD(&q->queued_list);
}
/**
* __verify_planes_array() - verify that the planes array passed in struct
* v4l2_buffer from userspace can be safely used
*/
-static int __verify_planes_array(struct vb2_buffer *vb, struct v4l2_buffer *b)
+static int __verify_planes_array(struct vb2_buffer *vb, const struct v4l2_buffer *b)
{
/* Is memory for copying plane information present? */
if (NULL == b->m.planes) {
@@ -279,13 +291,48 @@ static int __verify_planes_array(struct vb2_buffer *vb, struct v4l2_buffer *b)
}
/**
+ * __buffer_in_use() - return true if the buffer is in use and
+ * the queue cannot be freed (by the means of REQBUFS(0)) call
+ */
+static bool __buffer_in_use(struct vb2_queue *q, struct vb2_buffer *vb)
+{
+ unsigned int plane;
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ void *mem_priv = vb->planes[plane].mem_priv;
+ /*
+ * If num_users() has not been provided, call_memop
+ * will return 0, apparently nobody cares about this
+ * case anyway. If num_users() returns more than 1,
+ * we are not the only user of the plane's memory.
+ */
+ if (mem_priv && call_memop(q, plane, num_users, mem_priv) > 1)
+ return true;
+ }
+ return false;
+}
+
+/**
+ * __buffers_in_use() - return true if any buffers on the queue are in use and
+ * the queue cannot be freed (by the means of REQBUFS(0)) call
+ */
+static bool __buffers_in_use(struct vb2_queue *q)
+{
+ unsigned int buffer;
+ for (buffer = 0; buffer < q->num_buffers; ++buffer) {
+ if (__buffer_in_use(q, q->bufs[buffer]))
+ return true;
+ }
+ return false;
+}
+
+/**
* __fill_v4l2_buffer() - fill in a struct v4l2_buffer with information to be
* returned to userspace
*/
static int __fill_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b)
{
struct vb2_queue *q = vb->vb2_queue;
- int ret = 0;
+ int ret;
/* Copy back data such as timestamp, flags, input, etc. */
memcpy(b, &vb->v4l2_buf, offsetof(struct v4l2_buffer, m));
@@ -332,15 +379,18 @@ static int __fill_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b)
case VB2_BUF_STATE_DONE:
b->flags |= V4L2_BUF_FLAG_DONE;
break;
+ case VB2_BUF_STATE_PREPARED:
+ b->flags |= V4L2_BUF_FLAG_PREPARED;
+ break;
case VB2_BUF_STATE_DEQUEUED:
/* nothing */
break;
}
- if (vb->num_planes_mapped == vb->num_planes)
+ if (__buffer_in_use(q, vb))
b->flags |= V4L2_BUF_FLAG_MAPPED;
- return ret;
+ return 0;
}
/**
@@ -402,33 +452,6 @@ static int __verify_mmap_ops(struct vb2_queue *q)
}
/**
- * __buffers_in_use() - return true if any buffers on the queue are in use and
- * the queue cannot be freed (by the means of REQBUFS(0)) call
- */
-static bool __buffers_in_use(struct vb2_queue *q)
-{
- unsigned int buffer, plane;
- struct vb2_buffer *vb;
-
- for (buffer = 0; buffer < q->num_buffers; ++buffer) {
- vb = q->bufs[buffer];
- for (plane = 0; plane < vb->num_planes; ++plane) {
- /*
- * If num_users() has not been provided, call_memop
- * will return 0, apparently nobody cares about this
- * case anyway. If num_users() returns more than 1,
- * we are not the only user of the plane's memory.
- */
- if (call_memop(q, plane, num_users,
- vb->planes[plane].mem_priv) > 1)
- return true;
- }
- }
-
- return false;
-}
-
-/**
* vb2_reqbufs() - Initiate streaming
* @q: videobuf2 queue
* @req: struct passed from userspace to vidioc_reqbufs handler in driver
@@ -453,8 +476,7 @@ static bool __buffers_in_use(struct vb2_queue *q)
*/
int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
{
- unsigned int num_buffers, num_planes;
- unsigned long plane_sizes[VIDEO_MAX_PLANES];
+ unsigned int num_buffers, allocated_buffers, num_planes = 0;
int ret = 0;
if (q->fileio) {
@@ -502,7 +524,7 @@ int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
return -EBUSY;
}
- __vb2_queue_free(q);
+ __vb2_queue_free(q, q->num_buffers);
/*
* In case of REQBUFS(0) return immediately without calling
@@ -516,7 +538,7 @@ int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
* Make sure the requested values and current defaults are sane.
*/
num_buffers = min_t(unsigned int, req->count, VIDEO_MAX_FRAME);
- memset(plane_sizes, 0, sizeof(plane_sizes));
+ memset(q->plane_sizes, 0, sizeof(q->plane_sizes));
memset(q->alloc_ctx, 0, sizeof(q->alloc_ctx));
q->memory = req->memory;
@@ -524,57 +546,180 @@ int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
* Ask the driver how many buffers and planes per buffer it requires.
* Driver also sets the size and allocator context for each plane.
*/
- ret = call_qop(q, queue_setup, q, &num_buffers, &num_planes,
- plane_sizes, q->alloc_ctx);
+ ret = call_qop(q, queue_setup, q, NULL, &num_buffers, &num_planes,
+ q->plane_sizes, q->alloc_ctx);
if (ret)
return ret;
/* Finally, allocate buffers and video memory */
- ret = __vb2_queue_alloc(q, req->memory, num_buffers, num_planes,
- plane_sizes);
+ ret = __vb2_queue_alloc(q, req->memory, num_buffers, num_planes);
if (ret == 0) {
dprintk(1, "Memory allocation failed\n");
return -ENOMEM;
}
+ allocated_buffers = ret;
+
/*
* Check if driver can handle the allocated number of buffers.
*/
- if (ret < num_buffers) {
- unsigned int orig_num_buffers;
+ if (allocated_buffers < num_buffers) {
+ num_buffers = allocated_buffers;
- orig_num_buffers = num_buffers = ret;
- ret = call_qop(q, queue_setup, q, &num_buffers, &num_planes,
- plane_sizes, q->alloc_ctx);
- if (ret)
- goto free_mem;
+ ret = call_qop(q, queue_setup, q, NULL, &num_buffers,
+ &num_planes, q->plane_sizes, q->alloc_ctx);
- if (orig_num_buffers < num_buffers) {
+ if (!ret && allocated_buffers < num_buffers)
ret = -ENOMEM;
- goto free_mem;
- }
/*
- * Ok, driver accepted smaller number of buffers.
+ * Either the driver has accepted a smaller number of buffers,
+ * or .queue_setup() returned an error
*/
- ret = num_buffers;
+ }
+
+ q->num_buffers = allocated_buffers;
+
+ if (ret < 0) {
+ __vb2_queue_free(q, allocated_buffers);
+ return ret;
}
/*
* Return the number of successfully allocated buffers
* to the userspace.
*/
- req->count = ret;
+ req->count = allocated_buffers;
return 0;
-
-free_mem:
- __vb2_queue_free(q);
- return ret;
}
EXPORT_SYMBOL_GPL(vb2_reqbufs);
/**
+ * vb2_create_bufs() - Allocate buffers and any required auxiliary structs
+ * @q: videobuf2 queue
+ * @create: creation parameters, passed from userspace to vidioc_create_bufs
+ * handler in driver
+ *
+ * Should be called from vidioc_create_bufs ioctl handler of a driver.
+ * This function:
+ * 1) verifies parameter sanity
+ * 2) calls the .queue_setup() queue operation
+ * 3) performs any necessary memory allocations
+ *
+ * The return values from this function are intended to be directly returned
+ * from vidioc_create_bufs handler in driver.
+ */
+int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create)
+{
+ unsigned int num_planes = 0, num_buffers, allocated_buffers;
+ int ret = 0;
+
+ if (q->fileio) {
+ dprintk(1, "%s(): file io in progress\n", __func__);
+ return -EBUSY;
+ }
+
+ if (create->memory != V4L2_MEMORY_MMAP
+ && create->memory != V4L2_MEMORY_USERPTR) {
+ dprintk(1, "%s(): unsupported memory type\n", __func__);
+ return -EINVAL;
+ }
+
+ if (create->format.type != q->type) {
+ dprintk(1, "%s(): requested type is incorrect\n", __func__);
+ return -EINVAL;
+ }
+
+ /*
+ * Make sure all the required memory ops for given memory type
+ * are available.
+ */
+ if (create->memory == V4L2_MEMORY_MMAP && __verify_mmap_ops(q)) {
+ dprintk(1, "%s(): MMAP for current setup unsupported\n", __func__);
+ return -EINVAL;
+ }
+
+ if (create->memory == V4L2_MEMORY_USERPTR && __verify_userptr_ops(q)) {
+ dprintk(1, "%s(): USERPTR for current setup unsupported\n", __func__);
+ return -EINVAL;
+ }
+
+ if (q->num_buffers == VIDEO_MAX_FRAME) {
+ dprintk(1, "%s(): maximum number of buffers already allocated\n",
+ __func__);
+ return -ENOBUFS;
+ }
+
+ create->index = q->num_buffers;
+
+ if (!q->num_buffers) {
+ memset(q->plane_sizes, 0, sizeof(q->plane_sizes));
+ memset(q->alloc_ctx, 0, sizeof(q->alloc_ctx));
+ q->memory = create->memory;
+ }
+
+ num_buffers = min(create->count, VIDEO_MAX_FRAME - q->num_buffers);
+
+ /*
+ * Ask the driver, whether the requested number of buffers, planes per
+ * buffer and their sizes are acceptable
+ */
+ ret = call_qop(q, queue_setup, q, &create->format, &num_buffers,
+ &num_planes, q->plane_sizes, q->alloc_ctx);
+ if (ret)
+ return ret;
+
+ /* Finally, allocate buffers and video memory */
+ ret = __vb2_queue_alloc(q, create->memory, num_buffers,
+ num_planes);
+ if (ret < 0) {
+ dprintk(1, "Memory allocation failed with error: %d\n", ret);
+ return ret;
+ }
+
+ allocated_buffers = ret;
+
+ /*
+ * Check if driver can handle the so far allocated number of buffers.
+ */
+ if (ret < num_buffers) {
+ num_buffers = ret;
+
+ /*
+ * q->num_buffers contains the total number of buffers, that the
+ * queue driver has set up
+ */
+ ret = call_qop(q, queue_setup, q, &create->format, &num_buffers,
+ &num_planes, q->plane_sizes, q->alloc_ctx);
+
+ if (!ret && allocated_buffers < num_buffers)
+ ret = -ENOMEM;
+
+ /*
+ * Either the driver has accepted a smaller number of buffers,
+ * or .queue_setup() returned an error
+ */
+ }
+
+ q->num_buffers += allocated_buffers;
+
+ if (ret < 0) {
+ __vb2_queue_free(q, allocated_buffers);
+ return ret;
+ }
+
+ /*
+ * Return the number of successfully allocated buffers
+ * to the userspace.
+ */
+ create->count = allocated_buffers;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vb2_create_bufs);
+
+/**
* vb2_plane_vaddr() - Return a kernel virtual address of a given plane
* @vb: vb2_buffer to which the plane in question belongs to
* @plane_no: plane number for which the address is to be returned
@@ -658,7 +803,7 @@ EXPORT_SYMBOL_GPL(vb2_buffer_done);
* __fill_vb2_buffer() - fill a vb2_buffer with information provided in
* a v4l2_buffer by the userspace
*/
-static int __fill_vb2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b,
+static int __fill_vb2_buffer(struct vb2_buffer *vb, const struct v4l2_buffer *b,
struct v4l2_plane *v4l2_planes)
{
unsigned int plane;
@@ -722,7 +867,7 @@ static int __fill_vb2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b,
/**
* __qbuf_userptr() - handle qbuf of a USERPTR buffer
*/
-static int __qbuf_userptr(struct vb2_buffer *vb, struct v4l2_buffer *b)
+static int __qbuf_userptr(struct vb2_buffer *vb, const struct v4l2_buffer *b)
{
struct v4l2_plane planes[VIDEO_MAX_PLANES];
struct vb2_queue *q = vb->vb2_queue;
@@ -745,12 +890,20 @@ static int __qbuf_userptr(struct vb2_buffer *vb, struct v4l2_buffer *b)
dprintk(3, "qbuf: userspace address for plane %d changed, "
"reacquiring memory\n", plane);
+ /* Check if the provided plane buffer is large enough */
+ if (planes[plane].length < q->plane_sizes[plane]) {
+ ret = -EINVAL;
+ goto err;
+ }
+
/* Release previously acquired memory if present */
if (vb->planes[plane].mem_priv)
call_memop(q, plane, put_userptr,
vb->planes[plane].mem_priv);
vb->planes[plane].mem_priv = NULL;
+ vb->v4l2_planes[plane].m.userptr = 0;
+ vb->v4l2_planes[plane].length = 0;
/* Acquire each plane's memory */
if (q->mem_ops->get_userptr) {
@@ -788,10 +941,13 @@ static int __qbuf_userptr(struct vb2_buffer *vb, struct v4l2_buffer *b)
return 0;
err:
/* In case of errors, release planes that were already acquired */
- for (; plane > 0; --plane) {
- call_memop(q, plane, put_userptr,
- vb->planes[plane - 1].mem_priv);
- vb->planes[plane - 1].mem_priv = NULL;
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ if (vb->planes[plane].mem_priv)
+ call_memop(q, plane, put_userptr,
+ vb->planes[plane].mem_priv);
+ vb->planes[plane].mem_priv = NULL;
+ vb->v4l2_planes[plane].m.userptr = 0;
+ vb->v4l2_planes[plane].length = 0;
}
return ret;
@@ -800,7 +956,7 @@ err:
/**
* __qbuf_mmap() - handle qbuf of an MMAP buffer
*/
-static int __qbuf_mmap(struct vb2_buffer *vb, struct v4l2_buffer *b)
+static int __qbuf_mmap(struct vb2_buffer *vb, const struct v4l2_buffer *b)
{
return __fill_vb2_buffer(vb, b, vb->v4l2_planes);
}
@@ -817,6 +973,95 @@ static void __enqueue_in_driver(struct vb2_buffer *vb)
q->ops->buf_queue(vb);
}
+static int __buf_prepare(struct vb2_buffer *vb, const struct v4l2_buffer *b)
+{
+ struct vb2_queue *q = vb->vb2_queue;
+ int ret;
+
+ switch (q->memory) {
+ case V4L2_MEMORY_MMAP:
+ ret = __qbuf_mmap(vb, b);
+ break;
+ case V4L2_MEMORY_USERPTR:
+ ret = __qbuf_userptr(vb, b);
+ break;
+ default:
+ WARN(1, "Invalid queue type\n");
+ ret = -EINVAL;
+ }
+
+ if (!ret)
+ ret = call_qop(q, buf_prepare, vb);
+ if (ret)
+ dprintk(1, "qbuf: buffer preparation failed: %d\n", ret);
+ else
+ vb->state = VB2_BUF_STATE_PREPARED;
+
+ return ret;
+}
+
+/**
+ * vb2_prepare_buf() - Pass ownership of a buffer from userspace to the kernel
+ * @q: videobuf2 queue
+ * @b: buffer structure passed from userspace to vidioc_prepare_buf
+ * handler in driver
+ *
+ * Should be called from vidioc_prepare_buf ioctl handler of a driver.
+ * This function:
+ * 1) verifies the passed buffer,
+ * 2) calls buf_prepare callback in the driver (if provided), in which
+ * driver-specific buffer initialization can be performed,
+ *
+ * The return values from this function are intended to be directly returned
+ * from vidioc_prepare_buf handler in driver.
+ */
+int vb2_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b)
+{
+ struct vb2_buffer *vb;
+ int ret;
+
+ if (q->fileio) {
+ dprintk(1, "%s(): file io in progress\n", __func__);
+ return -EBUSY;
+ }
+
+ if (b->type != q->type) {
+ dprintk(1, "%s(): invalid buffer type\n", __func__);
+ return -EINVAL;
+ }
+
+ if (b->index >= q->num_buffers) {
+ dprintk(1, "%s(): buffer index out of range\n", __func__);
+ return -EINVAL;
+ }
+
+ vb = q->bufs[b->index];
+ if (NULL == vb) {
+ /* Should never happen */
+ dprintk(1, "%s(): buffer is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ if (b->memory != q->memory) {
+ dprintk(1, "%s(): invalid memory type\n", __func__);
+ return -EINVAL;
+ }
+
+ if (vb->state != VB2_BUF_STATE_DEQUEUED) {
+ dprintk(1, "%s(): invalid buffer state %d\n", __func__, vb->state);
+ return -EINVAL;
+ }
+
+ ret = __buf_prepare(vb, b);
+ if (ret < 0)
+ return ret;
+
+ __fill_v4l2_buffer(vb, b);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vb2_prepare_buf);
+
/**
* vb2_qbuf() - Queue a buffer from userspace
* @q: videobuf2 queue
@@ -826,8 +1071,8 @@ static void __enqueue_in_driver(struct vb2_buffer *vb)
* Should be called from vidioc_qbuf ioctl handler of a driver.
* This function:
* 1) verifies the passed buffer,
- * 2) calls buf_prepare callback in the driver (if provided), in which
- * driver-specific buffer initialization can be performed,
+ * 2) if necessary, calls buf_prepare callback in the driver (if provided), in
+ * which driver-specific buffer initialization can be performed,
* 3) if streaming is on, queues the buffer in driver by the means of buf_queue
* callback for processing.
*
@@ -837,7 +1082,7 @@ static void __enqueue_in_driver(struct vb2_buffer *vb)
int vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b)
{
struct vb2_buffer *vb;
- int ret = 0;
+ int ret;
if (q->fileio) {
dprintk(1, "qbuf: file io in progress\n");
@@ -866,29 +1111,18 @@ int vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b)
return -EINVAL;
}
- if (vb->state != VB2_BUF_STATE_DEQUEUED) {
+ switch (vb->state) {
+ case VB2_BUF_STATE_DEQUEUED:
+ ret = __buf_prepare(vb, b);
+ if (ret)
+ return ret;
+ case VB2_BUF_STATE_PREPARED:
+ break;
+ default:
dprintk(1, "qbuf: buffer already in use\n");
return -EINVAL;
}
- if (q->memory == V4L2_MEMORY_MMAP)
- ret = __qbuf_mmap(vb, b);
- else if (q->memory == V4L2_MEMORY_USERPTR)
- ret = __qbuf_userptr(vb, b);
- else {
- WARN(1, "Invalid queue type\n");
- return -EINVAL;
- }
-
- if (ret)
- return ret;
-
- ret = call_qop(q, buf_prepare, vb);
- if (ret) {
- dprintk(1, "qbuf: buffer preparation failed\n");
- return ret;
- }
-
/*
* Add to the queued buffers list, a buffer will stay on it until
* dequeued in dqbuf.
@@ -903,6 +1137,9 @@ int vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b)
if (q->streaming)
__enqueue_in_driver(vb);
+ /* Fill buffer information for the userspace */
+ __fill_v4l2_buffer(vb, b);
+
dprintk(1, "qbuf of buffer %d succeeded\n", vb->v4l2_buf.index);
return 0;
}
@@ -1096,6 +1333,43 @@ int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking)
EXPORT_SYMBOL_GPL(vb2_dqbuf);
/**
+ * __vb2_queue_cancel() - cancel and stop (pause) streaming
+ *
+ * Removes all queued buffers from driver's queue and all buffers queued by
+ * userspace from videobuf's queue. Returns to state after reqbufs.
+ */
+static void __vb2_queue_cancel(struct vb2_queue *q)
+{
+ unsigned int i;
+
+ /*
+ * Tell driver to stop all transactions and release all queued
+ * buffers.
+ */
+ if (q->streaming)
+ call_qop(q, stop_streaming, q);
+ q->streaming = 0;
+
+ /*
+ * Remove all buffers from videobuf's list...
+ */
+ INIT_LIST_HEAD(&q->queued_list);
+ /*
+ * ...and done list; userspace will not receive any buffers it
+ * has not already dequeued before initiating cancel.
+ */
+ INIT_LIST_HEAD(&q->done_list);
+ atomic_set(&q->queued_count, 0);
+ wake_up_all(&q->done_wq);
+
+ /*
+ * Reinitialize all buffers for next use.
+ */
+ for (i = 0; i < q->num_buffers; ++i)
+ q->bufs[i]->state = VB2_BUF_STATE_DEQUEUED;
+}
+
+/**
* vb2_streamon - start streaming
* @q: videobuf2 queue
* @type: type argument passed from userspace to vidioc_streamon handler
@@ -1103,7 +1377,7 @@ EXPORT_SYMBOL_GPL(vb2_dqbuf);
* Should be called from vidioc_streamon handler of a driver.
* This function:
* 1) verifies current state
- * 2) starts streaming and passes any previously queued buffers to the driver
+ * 2) passes any previously queued buffers to the driver and starts streaming
*
* The return values from this function are intended to be directly returned
* from vidioc_streamon handler in the driver.
@@ -1129,75 +1403,29 @@ int vb2_streamon(struct vb2_queue *q, enum v4l2_buf_type type)
}
/*
- * Cannot start streaming on an OUTPUT device if no buffers have
- * been queued yet.
+ * If any buffers were queued before streamon,
+ * we can now pass them to driver for processing.
*/
- if (V4L2_TYPE_IS_OUTPUT(q->type)) {
- if (list_empty(&q->queued_list)) {
- dprintk(1, "streamon: no output buffers queued\n");
- return -EINVAL;
- }
- }
+ list_for_each_entry(vb, &q->queued_list, queued_entry)
+ __enqueue_in_driver(vb);
/*
* Let driver notice that streaming state has been enabled.
*/
- ret = call_qop(q, start_streaming, q);
+ ret = call_qop(q, start_streaming, q, atomic_read(&q->queued_count));
if (ret) {
dprintk(1, "streamon: driver refused to start streaming\n");
+ __vb2_queue_cancel(q);
return ret;
}
q->streaming = 1;
- /*
- * If any buffers were queued before streamon,
- * we can now pass them to driver for processing.
- */
- list_for_each_entry(vb, &q->queued_list, queued_entry)
- __enqueue_in_driver(vb);
-
dprintk(3, "Streamon successful\n");
return 0;
}
EXPORT_SYMBOL_GPL(vb2_streamon);
-/**
- * __vb2_queue_cancel() - cancel and stop (pause) streaming
- *
- * Removes all queued buffers from driver's queue and all buffers queued by
- * userspace from videobuf's queue. Returns to state after reqbufs.
- */
-static void __vb2_queue_cancel(struct vb2_queue *q)
-{
- unsigned int i;
-
- /*
- * Tell driver to stop all transactions and release all queued
- * buffers.
- */
- if (q->streaming)
- call_qop(q, stop_streaming, q);
- q->streaming = 0;
-
- /*
- * Remove all buffers from videobuf's list...
- */
- INIT_LIST_HEAD(&q->queued_list);
- /*
- * ...and done list; userspace will not receive any buffers it
- * has not already dequeued before initiating cancel.
- */
- INIT_LIST_HEAD(&q->done_list);
- atomic_set(&q->queued_count, 0);
- wake_up_all(&q->done_wq);
-
- /*
- * Reinitialize all buffers for next use.
- */
- for (i = 0; i < q->num_buffers; ++i)
- q->bufs[i]->state = VB2_BUF_STATE_DEQUEUED;
-}
/**
* vb2_streamoff - stop streaming
@@ -1336,14 +1564,42 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
if (ret)
return ret;
- vb_plane->mapped = 1;
- vb->num_planes_mapped++;
-
dprintk(3, "Buffer %d, plane %d successfully mapped\n", buffer, plane);
return 0;
}
EXPORT_SYMBOL_GPL(vb2_mmap);
+#ifndef CONFIG_MMU
+unsigned long vb2_get_unmapped_area(struct vb2_queue *q,
+ unsigned long addr,
+ unsigned long len,
+ unsigned long pgoff,
+ unsigned long flags)
+{
+ unsigned long off = pgoff << PAGE_SHIFT;
+ struct vb2_buffer *vb;
+ unsigned int buffer, plane;
+ int ret;
+
+ if (q->memory != V4L2_MEMORY_MMAP) {
+ dprintk(1, "Queue is not currently set up for mmap\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Find the plane corresponding to the offset passed by userspace.
+ */
+ ret = __find_plane_by_offset(q, off, &buffer, &plane);
+ if (ret)
+ return ret;
+
+ vb = q->bufs[buffer];
+
+ return (unsigned long)vb2_plane_vaddr(vb, plane);
+}
+EXPORT_SYMBOL_GPL(vb2_get_unmapped_area);
+#endif
+
static int __vb2_init_fileio(struct vb2_queue *q, int read);
static int __vb2_cleanup_fileio(struct vb2_queue *q);
@@ -1461,7 +1717,7 @@ void vb2_queue_release(struct vb2_queue *q)
{
__vb2_cleanup_fileio(q);
__vb2_queue_cancel(q);
- __vb2_queue_free(q);
+ __vb2_queue_free(q, q->num_buffers);
}
EXPORT_SYMBOL_GPL(vb2_queue_release);
diff --git a/drivers/media/video/videobuf2-dma-contig.c b/drivers/media/video/videobuf2-dma-contig.c
index a790a5f8c06..f17ad98fcc5 100644
--- a/drivers/media/video/videobuf2-dma-contig.c
+++ b/drivers/media/video/videobuf2-dma-contig.c
@@ -24,7 +24,7 @@ struct vb2_dc_conf {
struct vb2_dc_buf {
struct vb2_dc_conf *conf;
void *vaddr;
- dma_addr_t paddr;
+ dma_addr_t dma_addr;
unsigned long size;
struct vm_area_struct *vma;
atomic_t refcount;
@@ -42,7 +42,7 @@ static void *vb2_dma_contig_alloc(void *alloc_ctx, unsigned long size)
if (!buf)
return ERR_PTR(-ENOMEM);
- buf->vaddr = dma_alloc_coherent(conf->dev, size, &buf->paddr,
+ buf->vaddr = dma_alloc_coherent(conf->dev, size, &buf->dma_addr,
GFP_KERNEL);
if (!buf->vaddr) {
dev_err(conf->dev, "dma_alloc_coherent of size %ld failed\n",
@@ -69,7 +69,7 @@ static void vb2_dma_contig_put(void *buf_priv)
if (atomic_dec_and_test(&buf->refcount)) {
dma_free_coherent(buf->conf->dev, buf->size, buf->vaddr,
- buf->paddr);
+ buf->dma_addr);
kfree(buf);
}
}
@@ -78,7 +78,7 @@ static void *vb2_dma_contig_cookie(void *buf_priv)
{
struct vb2_dc_buf *buf = buf_priv;
- return &buf->paddr;
+ return &buf->dma_addr;
}
static void *vb2_dma_contig_vaddr(void *buf_priv)
@@ -106,7 +106,7 @@ static int vb2_dma_contig_mmap(void *buf_priv, struct vm_area_struct *vma)
return -EINVAL;
}
- return vb2_mmap_pfn_range(vma, buf->paddr, buf->size,
+ return vb2_mmap_pfn_range(vma, buf->dma_addr, buf->size,
&vb2_common_vm_ops, &buf->handler);
}
@@ -115,14 +115,14 @@ static void *vb2_dma_contig_get_userptr(void *alloc_ctx, unsigned long vaddr,
{
struct vb2_dc_buf *buf;
struct vm_area_struct *vma;
- dma_addr_t paddr = 0;
+ dma_addr_t dma_addr = 0;
int ret;
buf = kzalloc(sizeof *buf, GFP_KERNEL);
if (!buf)
return ERR_PTR(-ENOMEM);
- ret = vb2_get_contig_userptr(vaddr, size, &vma, &paddr);
+ ret = vb2_get_contig_userptr(vaddr, size, &vma, &dma_addr);
if (ret) {
printk(KERN_ERR "Failed acquiring VMA for vaddr 0x%08lx\n",
vaddr);
@@ -131,7 +131,7 @@ static void *vb2_dma_contig_get_userptr(void *alloc_ctx, unsigned long vaddr,
}
buf->size = size;
- buf->paddr = paddr;
+ buf->dma_addr = dma_addr;
buf->vma = vma;
return buf;
diff --git a/drivers/media/video/videobuf2-dma-sg.c b/drivers/media/video/videobuf2-dma-sg.c
index 065f468faf8..3bad8b105fe 100644
--- a/drivers/media/video/videobuf2-dma-sg.c
+++ b/drivers/media/video/videobuf2-dma-sg.c
@@ -75,12 +75,6 @@ static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size)
printk(KERN_DEBUG "%s: Allocated buffer of %d pages\n",
__func__, buf->sg_desc.num_pages);
-
- if (!buf->vaddr)
- buf->vaddr = vm_map_ram(buf->pages,
- buf->sg_desc.num_pages,
- -1,
- PAGE_KERNEL);
return buf;
fail_pages_alloc:
diff --git a/drivers/media/video/videobuf2-memops.c b/drivers/media/video/videobuf2-memops.c
index 569eeb3dfd5..71a7a78c3fc 100644
--- a/drivers/media/video/videobuf2-memops.c
+++ b/drivers/media/video/videobuf2-memops.c
@@ -68,12 +68,12 @@ void vb2_put_vma(struct vm_area_struct *vma)
if (!vma)
return;
- if (vma->vm_file)
- fput(vma->vm_file);
-
if (vma->vm_ops && vma->vm_ops->close)
vma->vm_ops->close(vma);
+ if (vma->vm_file)
+ fput(vma->vm_file);
+
kfree(vma);
}
EXPORT_SYMBOL_GPL(vb2_put_vma);
diff --git a/drivers/media/video/vivi.c b/drivers/media/video/vivi.c
index a848bd2af97..7d754fbcccb 100644
--- a/drivers/media/video/vivi.c
+++ b/drivers/media/video/vivi.c
@@ -650,9 +650,9 @@ static void vivi_stop_generating(struct vivi_dev *dev)
/* ------------------------------------------------------------------
Videobuf operations
------------------------------------------------------------------*/
-static int queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
- unsigned int *nplanes, unsigned long sizes[],
- void *alloc_ctxs[])
+static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], void *alloc_ctxs[])
{
struct vivi_dev *dev = vb2_get_drv_priv(vq);
unsigned long size;
@@ -766,7 +766,7 @@ static void buffer_queue(struct vb2_buffer *vb)
spin_unlock_irqrestore(&dev->slock, flags);
}
-static int start_streaming(struct vb2_queue *vq)
+static int start_streaming(struct vb2_queue *vq, unsigned int count)
{
struct vivi_dev *dev = vb2_get_drv_priv(vq);
dprintk(dev, 1, "%s\n", __func__);
@@ -852,6 +852,11 @@ static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
(f->fmt.pix.width * dev->fmt->depth) >> 3;
f->fmt.pix.sizeimage =
f->fmt.pix.height * f->fmt.pix.bytesperline;
+ if (dev->fmt->fourcc == V4L2_PIX_FMT_YUYV ||
+ dev->fmt->fourcc == V4L2_PIX_FMT_UYVY)
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
+ else
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_SRGB;
return 0;
}
@@ -885,6 +890,11 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
(f->fmt.pix.width * fmt->depth) >> 3;
f->fmt.pix.sizeimage =
f->fmt.pix.height * f->fmt.pix.bytesperline;
+ if (fmt->fourcc == V4L2_PIX_FMT_YUYV ||
+ fmt->fourcc == V4L2_PIX_FMT_UYVY)
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
+ else
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_SRGB;
return 0;
}
@@ -948,6 +958,14 @@ static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
return vb2_streamoff(&dev->vb_vidq, i);
}
+static int vidioc_log_status(struct file *file, void *priv)
+{
+ struct vivi_dev *dev = video_drvdata(file);
+
+ v4l2_ctrl_handler_log_status(&dev->ctrl_handler, dev->v4l2_dev.name);
+ return 0;
+}
+
static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id *i)
{
return 0;
@@ -1191,6 +1209,7 @@ static const struct v4l2_ioctl_ops vivi_ioctl_ops = {
.vidioc_s_input = vidioc_s_input,
.vidioc_streamon = vidioc_streamon,
.vidioc_streamoff = vidioc_streamoff,
+ .vidioc_log_status = vidioc_log_status,
.vidioc_subscribe_event = vidioc_subscribe_event,
.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
diff --git a/drivers/media/video/vpx3220.c b/drivers/media/video/vpx3220.c
index ca372eb911d..e5cad6ff64a 100644
--- a/drivers/media/video/vpx3220.c
+++ b/drivers/media/video/vpx3220.c
@@ -331,7 +331,7 @@ static int vpx3220_status(struct v4l2_subdev *sd, u32 *pstatus, v4l2_std_id *pst
if (pstd)
*pstd = std;
if (pstatus)
- *pstatus = status;
+ *pstatus = res;
return 0;
}
diff --git a/drivers/media/video/zr364xx.c b/drivers/media/video/zr364xx.c
index c492846c1c5..e78cf94f491 100644
--- a/drivers/media/video/zr364xx.c
+++ b/drivers/media/video/zr364xx.c
@@ -1638,6 +1638,9 @@ static int zr364xx_probe(struct usb_interface *intf,
if (!cam->read_endpoint) {
dev_err(&intf->dev, "Could not find bulk-in endpoint\n");
+ video_device_release(cam->vdev);
+ kfree(cam);
+ cam = NULL;
return -ENOMEM;
}
diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c
index 8c1d85e27be..56ff19cdc2a 100644
--- a/drivers/memstick/core/memstick.c
+++ b/drivers/memstick/core/memstick.c
@@ -17,6 +17,7 @@
#include <linux/fs.h>
#include <linux/delay.h>
#include <linux/slab.h>
+#include <linux/module.h>
#define DRIVER_NAME "memstick"
diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
index 4a1909a32b6..9729b92fbfd 100644
--- a/drivers/memstick/core/mspro_block.c
+++ b/drivers/memstick/core/mspro_block.c
@@ -20,6 +20,7 @@
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/memstick.h>
+#include <linux/module.h>
#define DRIVER_NAME "mspro_block"
diff --git a/drivers/memstick/host/jmb38x_ms.c b/drivers/memstick/host/jmb38x_ms.c
index d89d925caec..6ce70e9615d 100644
--- a/drivers/memstick/host/jmb38x_ms.c
+++ b/drivers/memstick/host/jmb38x_ms.c
@@ -17,6 +17,7 @@
#include <linux/highmem.h>
#include <linux/memstick.h>
#include <linux/slab.h>
+#include <linux/module.h>
#define DRIVER_NAME "jmb38x_ms"
diff --git a/drivers/memstick/host/tifm_ms.c b/drivers/memstick/host/tifm_ms.c
index 03f71a431c8..b7aacf47703 100644
--- a/drivers/memstick/host/tifm_ms.c
+++ b/drivers/memstick/host/tifm_ms.c
@@ -17,6 +17,7 @@
#include <linux/highmem.h>
#include <linux/scatterlist.h>
#include <linux/log2.h>
+#include <linux/module.h>
#include <asm/io.h>
#define DRIVER_NAME "tifm_ms"
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 7956a10f948..e9c6a6047a0 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -63,6 +63,8 @@
#ifdef CONFIG_MTRR
#include <asm/mtrr.h>
#endif
+#include <linux/kthread.h>
+#include <scsi/scsi_host.h>
#include "mptbase.h"
#include "lsi/mpi_log_fc.h"
@@ -323,6 +325,32 @@ mpt_is_discovery_complete(MPT_ADAPTER *ioc)
return rc;
}
+
+/**
+ * mpt_remove_dead_ioc_func - kthread context to remove dead ioc
+ * @arg: input argument, used to derive ioc
+ *
+ * Return 0 if controller is removed from pci subsystem.
+ * Return -1 for other case.
+ */
+static int mpt_remove_dead_ioc_func(void *arg)
+{
+ MPT_ADAPTER *ioc = (MPT_ADAPTER *)arg;
+ struct pci_dev *pdev;
+
+ if ((ioc == NULL))
+ return -1;
+
+ pdev = ioc->pcidev;
+ if ((pdev == NULL))
+ return -1;
+
+ pci_remove_bus_device(pdev);
+ return 0;
+}
+
+
+
/**
* mpt_fault_reset_work - work performed on workq after ioc fault
* @work: input argument, used to derive ioc
@@ -336,12 +364,45 @@ mpt_fault_reset_work(struct work_struct *work)
u32 ioc_raw_state;
int rc;
unsigned long flags;
+ MPT_SCSI_HOST *hd;
+ struct task_struct *p;
if (ioc->ioc_reset_in_progress || !ioc->active)
goto out;
+
ioc_raw_state = mpt_GetIocState(ioc, 0);
- if ((ioc_raw_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_FAULT) {
+ if ((ioc_raw_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_MASK) {
+ printk(MYIOC_s_INFO_FMT "%s: IOC is non-operational !!!!\n",
+ ioc->name, __func__);
+
+ /*
+ * Call mptscsih_flush_pending_cmds callback so that we
+ * flush all pending commands back to OS.
+ * This call is required to aovid deadlock at block layer.
+ * Dead IOC will fail to do diag reset,and this call is safe
+ * since dead ioc will never return any command back from HW.
+ */
+ hd = shost_priv(ioc->sh);
+ ioc->schedule_dead_ioc_flush_running_cmds(hd);
+
+ /*Remove the Dead Host */
+ p = kthread_run(mpt_remove_dead_ioc_func, ioc,
+ "mpt_dead_ioc_%d", ioc->id);
+ if (IS_ERR(p)) {
+ printk(MYIOC_s_ERR_FMT
+ "%s: Running mpt_dead_ioc thread failed !\n",
+ ioc->name, __func__);
+ } else {
+ printk(MYIOC_s_WARN_FMT
+ "%s: Running mpt_dead_ioc thread success !\n",
+ ioc->name, __func__);
+ }
+ return; /* don't rearm timer */
+ }
+
+ if ((ioc_raw_state & MPI_IOC_STATE_MASK)
+ == MPI_IOC_STATE_FAULT) {
printk(MYIOC_s_WARN_FMT "IOC is in FAULT state (%04xh)!!!\n",
ioc->name, ioc_raw_state & MPI_DOORBELL_DATA_MASK);
printk(MYIOC_s_WARN_FMT "Issuing HardReset from %s!!\n",
@@ -6413,8 +6474,19 @@ mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg)
pReq->Action, ioc->mptbase_cmds.status, timeleft));
if (ioc->mptbase_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET)
goto out;
- if (!timeleft)
+ if (!timeleft) {
+ spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
+ if (ioc->ioc_reset_in_progress) {
+ spin_unlock_irqrestore(&ioc->taskmgmt_lock,
+ flags);
+ printk(MYIOC_s_INFO_FMT "%s: host reset in"
+ " progress mpt_config timed out.!!\n",
+ __func__, ioc->name);
+ return -EFAULT;
+ }
+ spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
issue_hard_reset = 1;
+ }
goto out;
}
@@ -7128,7 +7200,18 @@ mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag)
spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
if (ioc->ioc_reset_in_progress) {
spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
- return 0;
+ ioc->wait_on_reset_completion = 1;
+ do {
+ ssleep(1);
+ } while (ioc->ioc_reset_in_progress == 1);
+ ioc->wait_on_reset_completion = 0;
+ return ioc->reset_status;
+ }
+ if (ioc->wait_on_reset_completion) {
+ spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
+ rc = 0;
+ time_count = jiffies;
+ goto exit;
}
ioc->ioc_reset_in_progress = 1;
if (ioc->alt_ioc)
@@ -7165,6 +7248,7 @@ mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag)
ioc->ioc_reset_in_progress = 0;
ioc->taskmgmt_quiesce_io = 0;
ioc->taskmgmt_in_progress = 0;
+ ioc->reset_status = rc;
if (ioc->alt_ioc) {
ioc->alt_ioc->ioc_reset_in_progress = 0;
ioc->alt_ioc->taskmgmt_quiesce_io = 0;
@@ -7180,7 +7264,7 @@ mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag)
ioc->alt_ioc, MPT_IOC_POST_RESET);
}
}
-
+exit:
dtmprintk(ioc,
printk(MYIOC_s_DEBUG_FMT
"HardResetHandler: completed (%d seconds): %s\n", ioc->name,
diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h
index fe902338539..b4d24dc081a 100644
--- a/drivers/message/fusion/mptbase.h
+++ b/drivers/message/fusion/mptbase.h
@@ -76,8 +76,8 @@
#define COPYRIGHT "Copyright (c) 1999-2008 " MODULEAUTHOR
#endif
-#define MPT_LINUX_VERSION_COMMON "3.04.19"
-#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.19"
+#define MPT_LINUX_VERSION_COMMON "3.04.20"
+#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.20"
#define WHAT_MAGIC_STRING "@" "(" "#" ")"
#define show_mptmod_ver(s,ver) \
@@ -554,10 +554,47 @@ struct mptfc_rport_info
u8 flags;
};
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+
+/*
+ * MPT_SCSI_HOST defines - Used by the IOCTL and the SCSI drivers
+ * Private to the driver.
+ */
+
+#define MPT_HOST_BUS_UNKNOWN (0xFF)
+#define MPT_HOST_TOO_MANY_TM (0x05)
+#define MPT_HOST_NVRAM_INVALID (0xFFFFFFFF)
+#define MPT_HOST_NO_CHAIN (0xFFFFFFFF)
+#define MPT_NVRAM_MASK_TIMEOUT (0x000000FF)
+#define MPT_NVRAM_SYNC_MASK (0x0000FF00)
+#define MPT_NVRAM_SYNC_SHIFT (8)
+#define MPT_NVRAM_DISCONNECT_ENABLE (0x00010000)
+#define MPT_NVRAM_ID_SCAN_ENABLE (0x00020000)
+#define MPT_NVRAM_LUN_SCAN_ENABLE (0x00040000)
+#define MPT_NVRAM_TAG_QUEUE_ENABLE (0x00080000)
+#define MPT_NVRAM_WIDE_DISABLE (0x00100000)
+#define MPT_NVRAM_BOOT_CHOICE (0x00200000)
+
+typedef enum {
+ FC,
+ SPI,
+ SAS
+} BUS_TYPE;
+
+typedef struct _MPT_SCSI_HOST {
+ struct _MPT_ADAPTER *ioc;
+ ushort sel_timeout[MPT_MAX_FC_DEVICES];
+ char *info_kbuf;
+ long last_queue_full;
+ u16 spi_pending;
+ struct list_head target_reset_list;
+} MPT_SCSI_HOST;
+
typedef void (*MPT_ADD_SGE)(void *pAddr, u32 flagslength, dma_addr_t dma_addr);
typedef void (*MPT_ADD_CHAIN)(void *pAddr, u8 next, u16 length,
dma_addr_t dma_addr);
typedef void (*MPT_SCHEDULE_TARGET_RESET)(void *ioc);
+typedef void (*MPT_FLUSH_RUNNING_CMDS)(MPT_SCSI_HOST *hd);
/*
* Adapter Structure - pci_dev specific. Maximum: MPT_MAX_ADAPTERS
@@ -716,7 +753,10 @@ typedef struct _MPT_ADAPTER
int taskmgmt_in_progress;
u8 taskmgmt_quiesce_io;
u8 ioc_reset_in_progress;
+ u8 reset_status;
+ u8 wait_on_reset_completion;
MPT_SCHEDULE_TARGET_RESET schedule_target_reset;
+ MPT_FLUSH_RUNNING_CMDS schedule_dead_ioc_flush_running_cmds;
struct work_struct sas_persist_task;
struct work_struct fc_setup_reset_work;
@@ -830,19 +870,6 @@ typedef struct _MPT_LOCAL_REPLY {
u32 pad;
} MPT_LOCAL_REPLY;
-#define MPT_HOST_BUS_UNKNOWN (0xFF)
-#define MPT_HOST_TOO_MANY_TM (0x05)
-#define MPT_HOST_NVRAM_INVALID (0xFFFFFFFF)
-#define MPT_HOST_NO_CHAIN (0xFFFFFFFF)
-#define MPT_NVRAM_MASK_TIMEOUT (0x000000FF)
-#define MPT_NVRAM_SYNC_MASK (0x0000FF00)
-#define MPT_NVRAM_SYNC_SHIFT (8)
-#define MPT_NVRAM_DISCONNECT_ENABLE (0x00010000)
-#define MPT_NVRAM_ID_SCAN_ENABLE (0x00020000)
-#define MPT_NVRAM_LUN_SCAN_ENABLE (0x00040000)
-#define MPT_NVRAM_TAG_QUEUE_ENABLE (0x00080000)
-#define MPT_NVRAM_WIDE_DISABLE (0x00100000)
-#define MPT_NVRAM_BOOT_CHOICE (0x00200000)
/* The TM_STATE variable is used to provide strict single threading of TM
* requests as well as communicate TM error conditions.
@@ -851,21 +878,6 @@ typedef struct _MPT_LOCAL_REPLY {
#define TM_STATE_IN_PROGRESS (1)
#define TM_STATE_ERROR (2)
-typedef enum {
- FC,
- SPI,
- SAS
-} BUS_TYPE;
-
-typedef struct _MPT_SCSI_HOST {
- MPT_ADAPTER *ioc;
- ushort sel_timeout[MPT_MAX_FC_DEVICES];
- char *info_kbuf;
- long last_queue_full;
- u16 spi_pending;
- struct list_head target_reset_list;
-} MPT_SCSI_HOST;
-
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* More Dynamic Multi-Pathing stuff...
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index 7596aecd507..9d950429854 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -92,6 +92,11 @@ static int max_lun = MPTSAS_MAX_LUN;
module_param(max_lun, int, 0);
MODULE_PARM_DESC(max_lun, " max lun, default=16895 ");
+static int mpt_loadtime_max_sectors = 8192;
+module_param(mpt_loadtime_max_sectors, int, 0);
+MODULE_PARM_DESC(mpt_loadtime_max_sectors,
+ " Maximum sector define for Host Bus Adaptor.Range 64 to 8192 default=8192");
+
static u8 mptsasDoneCtx = MPT_MAX_PROTOCOL_DRIVERS;
static u8 mptsasTaskCtx = MPT_MAX_PROTOCOL_DRIVERS;
static u8 mptsasInternalCtx = MPT_MAX_PROTOCOL_DRIVERS; /* Used only for internal commands */
@@ -285,10 +290,11 @@ mptsas_add_fw_event(MPT_ADAPTER *ioc, struct fw_event_work *fw_event,
spin_lock_irqsave(&ioc->fw_event_lock, flags);
list_add_tail(&fw_event->list, &ioc->fw_event_list);
INIT_DELAYED_WORK(&fw_event->work, mptsas_firmware_event_work);
- devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: add (fw_event=0x%p)\n",
- ioc->name, __func__, fw_event));
- queue_delayed_work(ioc->fw_event_q, &fw_event->work,
- delay);
+ devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: add (fw_event=0x%p)"
+ "on cpuid %d\n", ioc->name, __func__,
+ fw_event, smp_processor_id()));
+ queue_delayed_work_on(smp_processor_id(), ioc->fw_event_q,
+ &fw_event->work, delay);
spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
}
@@ -300,10 +306,11 @@ mptsas_requeue_fw_event(MPT_ADAPTER *ioc, struct fw_event_work *fw_event,
unsigned long flags;
spin_lock_irqsave(&ioc->fw_event_lock, flags);
devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: reschedule task "
- "(fw_event=0x%p)\n", ioc->name, __func__, fw_event));
+ "(fw_event=0x%p)on cpuid %d\n", ioc->name, __func__,
+ fw_event, smp_processor_id()));
fw_event->retries++;
- queue_delayed_work(ioc->fw_event_q, &fw_event->work,
- msecs_to_jiffies(delay));
+ queue_delayed_work_on(smp_processor_id(), ioc->fw_event_q,
+ &fw_event->work, msecs_to_jiffies(delay));
spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
}
@@ -1943,6 +1950,15 @@ static enum blk_eh_timer_return mptsas_eh_timed_out(struct scsi_cmnd *sc)
goto done;
}
+ /* In case if IOC is in reset from internal context.
+ * Do not execute EEH for the same IOC. SML should to reset timer.
+ */
+ if (ioc->ioc_reset_in_progress) {
+ dtmprintk(ioc, printk(MYIOC_s_WARN_FMT ": %s: ioc is in reset,"
+ "SML need to reset the timer (sc=%p)\n",
+ ioc->name, __func__, sc));
+ rc = BLK_EH_RESET_TIMER;
+ }
vdevice = sc->device->hostdata;
if (vdevice && vdevice->vtarget && (vdevice->vtarget->inDMD
|| vdevice->vtarget->deleted)) {
@@ -5142,6 +5158,8 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ioc->TaskCtx = mptsasTaskCtx;
ioc->InternalCtx = mptsasInternalCtx;
ioc->schedule_target_reset = &mptsas_schedule_target_reset;
+ ioc->schedule_dead_ioc_flush_running_cmds =
+ &mptscsih_flush_running_cmds;
/* Added sanity check on readiness of the MPT adapter.
*/
if (ioc->last_state != MPI_IOC_STATE_OPERATIONAL) {
@@ -5239,6 +5257,21 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id)
sh->sg_tablesize = numSGE;
}
+ if (mpt_loadtime_max_sectors) {
+ if (mpt_loadtime_max_sectors < 64 ||
+ mpt_loadtime_max_sectors > 8192) {
+ printk(MYIOC_s_INFO_FMT "Invalid value passed for"
+ "mpt_loadtime_max_sectors %d."
+ "Range from 64 to 8192\n", ioc->name,
+ mpt_loadtime_max_sectors);
+ }
+ mpt_loadtime_max_sectors &= 0xFFFFFFFE;
+ dprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "Resetting max sector to %d from %d\n",
+ ioc->name, mpt_loadtime_max_sectors, sh->max_sectors));
+ sh->max_sectors = mpt_loadtime_max_sectors;
+ }
+
hd = shost_priv(sh);
hd->ioc = ioc;
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index ce61a576976..0c3ced70707 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -830,7 +830,8 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
if ((pScsiReq->CDB[0] == READ_6 && ((pScsiReq->CDB[1] & 0x02) == 0)) ||
pScsiReq->CDB[0] == READ_10 ||
pScsiReq->CDB[0] == READ_12 ||
- pScsiReq->CDB[0] == READ_16 ||
+ (pScsiReq->CDB[0] == READ_16 &&
+ ((pScsiReq->CDB[1] & 0x02) == 0)) ||
pScsiReq->CDB[0] == VERIFY ||
pScsiReq->CDB[0] == VERIFY_16) {
if (scsi_bufflen(sc) !=
@@ -1024,7 +1025,7 @@ out:
*
* Must be called while new I/Os are being queued.
*/
-static void
+void
mptscsih_flush_running_cmds(MPT_SCSI_HOST *hd)
{
MPT_ADAPTER *ioc = hd->ioc;
@@ -1055,6 +1056,7 @@ mptscsih_flush_running_cmds(MPT_SCSI_HOST *hd)
sc->scsi_done(sc);
}
}
+EXPORT_SYMBOL(mptscsih_flush_running_cmds);
/*
* mptscsih_search_running_cmds - Delete any commands associated
@@ -1629,7 +1631,13 @@ mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, int lun,
return 0;
}
- if (ioc_raw_state & MPI_DOORBELL_ACTIVE) {
+ /* DOORBELL ACTIVE check is not required if
+ * MPI_IOCFACTS_CAPABILITY_HIGH_PRI_Q is supported.
+ */
+
+ if (!((ioc->facts.IOCCapabilities & MPI_IOCFACTS_CAPABILITY_HIGH_PRI_Q)
+ && (ioc->facts.MsgVersion >= MPI_VERSION_01_05)) &&
+ (ioc_raw_state & MPI_DOORBELL_ACTIVE)) {
printk(MYIOC_s_WARN_FMT
"TaskMgmt type=%x: ioc_state: "
"DOORBELL_ACTIVE (0x%x)!\n",
@@ -1728,7 +1736,9 @@ mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, int lun,
printk(MYIOC_s_WARN_FMT
"Issuing Reset from %s!! doorbell=0x%08x\n",
ioc->name, __func__, mpt_GetIocState(ioc, 0));
- retval = mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
+ retval = (ioc->bus_type == SAS) ?
+ mpt_HardResetHandler(ioc, CAN_SLEEP) :
+ mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
mpt_free_msg_frame(ioc, mf);
}
diff --git a/drivers/message/fusion/mptscsih.h b/drivers/message/fusion/mptscsih.h
index 45a5ff3eff6..43e75ff3992 100644
--- a/drivers/message/fusion/mptscsih.h
+++ b/drivers/message/fusion/mptscsih.h
@@ -135,3 +135,4 @@ extern int mptscsih_is_phys_disk(MPT_ADAPTER *ioc, u8 channel, u8 id);
extern struct device_attribute *mptscsih_host_attrs[];
extern struct scsi_cmnd *mptscsih_get_scsi_lookup(MPT_ADAPTER *ioc, int i);
extern void mptscsih_taskmgmt_response_code(MPT_ADAPTER *ioc, u8 response_code);
+extern void mptscsih_flush_running_cmds(MPT_SCSI_HOST *hd);
diff --git a/drivers/message/i2o/pci.c b/drivers/message/i2o/pci.c
index 73e4658af53..7190d5239b4 100644
--- a/drivers/message/i2o/pci.c
+++ b/drivers/message/i2o/pci.c
@@ -31,6 +31,7 @@
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/i2o.h>
+#include <linux/module.h>
#include "core.h"
#define OSM_DESCRIPTION "I2O-subsystem"
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index a67adcbd0fa..f1391c21ef2 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -2,23 +2,8 @@
# Multifunction miscellaneous devices
#
-menuconfig MFD_SUPPORT
- bool "Multifunction device drivers"
- depends on HAS_IOMEM
- default y
- help
- Multifunction devices embed several functions (e.g. GPIOs,
- touchscreens, keyboards, current regulators, power management chips,
- etc...) in one single integrated circuit. They usually talk to the
- main CPU through one or more IRQ lines and low speed data busses (SPI,
- I2C, etc..). They appear as one single device to the main system
- through the data bus and the MFD framework allows for sub devices
- (a.k.a. functions) to appear as discrete platform devices.
- MFDs are typically found on embedded platforms.
-
- This option alone does not add any kernel code.
-
-if MFD_SUPPORT
+if HAS_IOMEM
+menu "Multifunction device drivers"
config MFD_CORE
tristate
@@ -390,6 +375,7 @@ config MFD_WM8400
tristate "Support Wolfson Microelectronics WM8400"
select MFD_CORE
depends on I2C
+ select REGMAP_I2C
help
Support for the Wolfson Microelecronics WM8400 PMIC and audio
CODEC. This driver provides common support for accessing
@@ -503,6 +489,7 @@ config MFD_WM8994
config MFD_PCF50633
tristate "Support for NXP PCF50633"
depends on I2C
+ select REGMAP_I2C
help
Say yes here if you have NXP PCF50633 chip on your board.
This core driver provides register access and IRQ handling
@@ -579,6 +566,23 @@ config EZX_PCAP
This enables the PCAP ASIC present on EZX Phones. This is
needed for MMC, TouchScreen, Sound, USB, etc..
+config AB5500_CORE
+ bool "ST-Ericsson AB5500 Mixed Signal Power Management chip"
+ depends on ABX500_CORE && MFD_DB5500_PRCMU
+ select MFD_CORE
+ help
+ Select this option to enable access to AB5500 power management
+ chip. This connects to the db5500 chip via the I2C bus via PRCMU.
+ This chip embeds various other multimedia funtionalities as well.
+
+config AB5500_DEBUG
+ bool "Enable debug info via debugfs"
+ depends on AB5500_CORE && DEBUG_FS
+ default y if DEBUG_FS
+ help
+ Select this option if you want debug information from the AB5500
+ using the debug filesystem, debugfs.
+
config AB8500_CORE
bool "ST-Ericsson AB8500 Mixed Signal Power Management chip"
depends on GENERIC_HARDIRQS && ABX500_CORE
@@ -615,20 +619,6 @@ config AB8500_GPADC
help
AB8500 GPADC driver used to convert Acc and battery/ac/usb voltage
-config AB3550_CORE
- bool "ST-Ericsson AB3550 Mixed Signal Circuit core functions"
- select MFD_CORE
- depends on I2C=y && GENERIC_HARDIRQS && ABX500_CORE
- help
- Select this to enable the AB3550 Mixed Signal IC core
- functionality. This connects to a AB3550 on the I2C bus
- and expose a number of symbols needed for dependent devices
- to read and write registers and subscribe to events from
- this multi-functional IC. This is needed to use other features
- of the AB3550 such as battery-backed RTC, charging control,
- LEDs, vibrator, system power and temperature, power management
- and ALSA sound.
-
config MFD_DB8500_PRCMU
bool "ST-Ericsson DB8500 Power Reset Control Management Unit"
depends on UX500_SOC_DB8500
@@ -773,7 +763,17 @@ config MFD_AAT2870_CORE
additional drivers must be enabled in order to use the
functionality of the device.
-endif # MFD_SUPPORT
+config MFD_INTEL_MSIC
+ bool "Support for Intel MSIC"
+ depends on INTEL_SCU_IPC
+ select MFD_CORE
+ help
+ Select this option to enable access to Intel MSIC (Avatele
+ Passage) chip. This chip embeds audio, battery, GPIO, etc.
+ devices used in Intel Medfield platforms.
+
+endmenu
+endif
menu "Multimedia Capabilities Port drivers"
depends on ARCH_SA1100
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index c58020303d1..b2292eb7524 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -79,7 +79,8 @@ obj-$(CONFIG_PCF50633_GPIO) += pcf50633-gpio.o
obj-$(CONFIG_ABX500_CORE) += abx500-core.o
obj-$(CONFIG_AB3100_CORE) += ab3100-core.o
obj-$(CONFIG_AB3100_OTP) += ab3100-otp.o
-obj-$(CONFIG_AB3550_CORE) += ab3550-core.o
+obj-$(CONFIG_AB5500_CORE) += ab5500-core.o
+obj-$(CONFIG_AB5500_DEBUG) += ab5500-debugfs.o
obj-$(CONFIG_AB8500_CORE) += ab8500-core.o ab8500-sysctrl.o
obj-$(CONFIG_AB8500_DEBUG) += ab8500-debugfs.o
obj-$(CONFIG_AB8500_GPADC) += ab8500-gpadc.o
@@ -102,3 +103,4 @@ obj-$(CONFIG_MFD_PM8921_CORE) += pm8921-core.o
obj-$(CONFIG_MFD_PM8XXX_IRQ) += pm8xxx-irq.o
obj-$(CONFIG_TPS65911_COMPARATOR) += tps65911-comparator.o
obj-$(CONFIG_MFD_AAT2870_CORE) += aat2870-core.o
+obj-$(CONFIG_MFD_INTEL_MSIC) += intel_msic.o
diff --git a/drivers/mfd/aat2870-core.c b/drivers/mfd/aat2870-core.c
index 345dc658ef0..02c42015ba5 100644
--- a/drivers/mfd/aat2870-core.c
+++ b/drivers/mfd/aat2870-core.c
@@ -295,7 +295,7 @@ static ssize_t aat2870_reg_write_file(struct file *file,
{
struct aat2870_data *aat2870 = file->private_data;
char buf[32];
- int buf_size;
+ ssize_t buf_size;
char *start = buf;
unsigned long addr, val;
int ret;
diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c
index a20e1c41bed..60107ee166f 100644
--- a/drivers/mfd/ab3100-core.c
+++ b/drivers/mfd/ab3100-core.c
@@ -12,6 +12,7 @@
#include <linux/notifier.h>
#include <linux/slab.h>
#include <linux/err.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/device.h>
#include <linux/interrupt.h>
@@ -809,7 +810,7 @@ struct ab_family_id {
char *name;
};
-static const struct ab_family_id ids[] __devinitdata = {
+static const struct ab_family_id ids[] __devinitconst = {
/* AB3100 */
{
.id = 0xc0,
diff --git a/drivers/mfd/ab3550-core.c b/drivers/mfd/ab3550-core.c
deleted file mode 100644
index 56ba1943c91..00000000000
--- a/drivers/mfd/ab3550-core.c
+++ /dev/null
@@ -1,1380 +0,0 @@
-/*
- * Copyright (C) 2007-2010 ST-Ericsson
- * License terms: GNU General Public License (GPL) version 2
- * Low-level core for exclusive access to the AB3550 IC on the I2C bus
- * and some basic chip-configuration.
- * Author: Bengt Jonsson <bengt.g.jonsson@stericsson.com>
- * Author: Mattias Nilsson <mattias.i.nilsson@stericsson.com>
- * Author: Mattias Wallin <mattias.wallin@stericsson.com>
- * Author: Rickard Andersson <rickard.andersson@stericsson.com>
- */
-
-#include <linux/i2c.h>
-#include <linux/mutex.h>
-#include <linux/err.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/device.h>
-#include <linux/irq.h>
-#include <linux/interrupt.h>
-#include <linux/random.h>
-#include <linux/workqueue.h>
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-#include <linux/uaccess.h>
-#include <linux/mfd/abx500.h>
-#include <linux/list.h>
-#include <linux/bitops.h>
-#include <linux/spinlock.h>
-#include <linux/mfd/core.h>
-
-#define AB3550_NAME_STRING "ab3550"
-#define AB3550_ID_FORMAT_STRING "AB3550 %s"
-#define AB3550_NUM_BANKS 2
-#define AB3550_NUM_EVENT_REG 5
-
-/* These are the only registers inside AB3550 used in this main file */
-
-/* Chip ID register */
-#define AB3550_CID_REG 0x20
-
-/* Interrupt event registers */
-#define AB3550_EVENT_BANK 0
-#define AB3550_EVENT_REG 0x22
-
-/* Read/write operation values. */
-#define AB3550_PERM_RD (0x01)
-#define AB3550_PERM_WR (0x02)
-
-/* Read/write permissions. */
-#define AB3550_PERM_RO (AB3550_PERM_RD)
-#define AB3550_PERM_RW (AB3550_PERM_RD | AB3550_PERM_WR)
-
-/**
- * struct ab3550
- * @access_mutex: lock out concurrent accesses to the AB registers
- * @i2c_client: I2C client for this chip
- * @chip_name: name of this chip variant
- * @chip_id: 8 bit chip ID for this chip variant
- * @mask_work: a worker for writing to mask registers
- * @event_lock: a lock to protect the event_mask
- * @event_mask: a local copy of the mask event registers
- * @startup_events: a copy of the first reading of the event registers
- * @startup_events_read: whether the first events have been read
- */
-struct ab3550 {
- struct mutex access_mutex;
- struct i2c_client *i2c_client[AB3550_NUM_BANKS];
- char chip_name[32];
- u8 chip_id;
- struct work_struct mask_work;
- spinlock_t event_lock;
- u8 event_mask[AB3550_NUM_EVENT_REG];
- u8 startup_events[AB3550_NUM_EVENT_REG];
- bool startup_events_read;
-#ifdef CONFIG_DEBUG_FS
- unsigned int debug_bank;
- unsigned int debug_address;
-#endif
-};
-
-/**
- * struct ab3550_reg_range
- * @first: the first address of the range
- * @last: the last address of the range
- * @perm: access permissions for the range
- */
-struct ab3550_reg_range {
- u8 first;
- u8 last;
- u8 perm;
-};
-
-/**
- * struct ab3550_reg_ranges
- * @count: the number of ranges in the list
- * @range: the list of register ranges
- */
-struct ab3550_reg_ranges {
- u8 count;
- const struct ab3550_reg_range *range;
-};
-
-/*
- * Permissible register ranges for reading and writing per device and bank.
- *
- * The ranges must be listed in increasing address order, and no overlaps are
- * allowed. It is assumed that write permission implies read permission
- * (i.e. only RO and RW permissions should be used). Ranges with write
- * permission must not be split up.
- */
-
-#define NO_RANGE {.count = 0, .range = NULL,}
-
-static struct
-ab3550_reg_ranges ab3550_reg_ranges[AB3550_NUM_DEVICES][AB3550_NUM_BANKS] = {
- [AB3550_DEVID_DAC] = {
- NO_RANGE,
- {
- .count = 2,
- .range = (struct ab3550_reg_range[]) {
- {
- .first = 0xb0,
- .last = 0xba,
- .perm = AB3550_PERM_RW,
- },
- {
- .first = 0xbc,
- .last = 0xc3,
- .perm = AB3550_PERM_RW,
- },
- },
- },
- },
- [AB3550_DEVID_LEDS] = {
- NO_RANGE,
- {
- .count = 2,
- .range = (struct ab3550_reg_range[]) {
- {
- .first = 0x5a,
- .last = 0x88,
- .perm = AB3550_PERM_RW,
- },
- {
- .first = 0x8a,
- .last = 0xad,
- .perm = AB3550_PERM_RW,
- },
- }
- },
- },
- [AB3550_DEVID_POWER] = {
- {
- .count = 1,
- .range = (struct ab3550_reg_range[]) {
- {
- .first = 0x21,
- .last = 0x21,
- .perm = AB3550_PERM_RO,
- },
- }
- },
- NO_RANGE,
- },
- [AB3550_DEVID_REGULATORS] = {
- {
- .count = 1,
- .range = (struct ab3550_reg_range[]) {
- {
- .first = 0x69,
- .last = 0xa3,
- .perm = AB3550_PERM_RW,
- },
- }
- },
- {
- .count = 1,
- .range = (struct ab3550_reg_range[]) {
- {
- .first = 0x14,
- .last = 0x16,
- .perm = AB3550_PERM_RW,
- },
- }
- },
- },
- [AB3550_DEVID_SIM] = {
- {
- .count = 1,
- .range = (struct ab3550_reg_range[]) {
- {
- .first = 0x21,
- .last = 0x21,
- .perm = AB3550_PERM_RO,
- },
- }
- },
- {
- .count = 1,
- .range = (struct ab3550_reg_range[]) {
- {
- .first = 0x14,
- .last = 0x17,
- .perm = AB3550_PERM_RW,
- },
- }
-
- },
- },
- [AB3550_DEVID_UART] = {
- NO_RANGE,
- NO_RANGE,
- },
- [AB3550_DEVID_RTC] = {
- {
- .count = 1,
- .range = (struct ab3550_reg_range[]) {
- {
- .first = 0x00,
- .last = 0x0c,
- .perm = AB3550_PERM_RW,
- },
- }
- },
- NO_RANGE,
- },
- [AB3550_DEVID_CHARGER] = {
- {
- .count = 2,
- .range = (struct ab3550_reg_range[]) {
- {
- .first = 0x10,
- .last = 0x1a,
- .perm = AB3550_PERM_RW,
- },
- {
- .first = 0x21,
- .last = 0x21,
- .perm = AB3550_PERM_RO,
- },
- }
- },
- NO_RANGE,
- },
- [AB3550_DEVID_ADC] = {
- NO_RANGE,
- {
- .count = 1,
- .range = (struct ab3550_reg_range[]) {
- {
- .first = 0x20,
- .last = 0x56,
- .perm = AB3550_PERM_RW,
- },
-
- }
- },
- },
- [AB3550_DEVID_FUELGAUGE] = {
- {
- .count = 1,
- .range = (struct ab3550_reg_range[]) {
- {
- .first = 0x21,
- .last = 0x21,
- .perm = AB3550_PERM_RO,
- },
- }
- },
- {
- .count = 1,
- .range = (struct ab3550_reg_range[]) {
- {
- .first = 0x00,
- .last = 0x0e,
- .perm = AB3550_PERM_RW,
- },
- }
- },
- },
- [AB3550_DEVID_VIBRATOR] = {
- NO_RANGE,
- {
- .count = 1,
- .range = (struct ab3550_reg_range[]) {
- {
- .first = 0x10,
- .last = 0x13,
- .perm = AB3550_PERM_RW,
- },
-
- }
- },
- },
- [AB3550_DEVID_CODEC] = {
- {
- .count = 2,
- .range = (struct ab3550_reg_range[]) {
- {
- .first = 0x31,
- .last = 0x63,
- .perm = AB3550_PERM_RW,
- },
- {
- .first = 0x65,
- .last = 0x68,
- .perm = AB3550_PERM_RW,
- },
- }
- },
- NO_RANGE,
- },
-};
-
-static struct mfd_cell ab3550_devs[AB3550_NUM_DEVICES] = {
- [AB3550_DEVID_DAC] = {
- .name = "ab3550-dac",
- .id = AB3550_DEVID_DAC,
- .num_resources = 0,
- },
- [AB3550_DEVID_LEDS] = {
- .name = "ab3550-leds",
- .id = AB3550_DEVID_LEDS,
- },
- [AB3550_DEVID_POWER] = {
- .name = "ab3550-power",
- .id = AB3550_DEVID_POWER,
- },
- [AB3550_DEVID_REGULATORS] = {
- .name = "ab3550-regulators",
- .id = AB3550_DEVID_REGULATORS,
- },
- [AB3550_DEVID_SIM] = {
- .name = "ab3550-sim",
- .id = AB3550_DEVID_SIM,
- },
- [AB3550_DEVID_UART] = {
- .name = "ab3550-uart",
- .id = AB3550_DEVID_UART,
- },
- [AB3550_DEVID_RTC] = {
- .name = "ab3550-rtc",
- .id = AB3550_DEVID_RTC,
- },
- [AB3550_DEVID_CHARGER] = {
- .name = "ab3550-charger",
- .id = AB3550_DEVID_CHARGER,
- },
- [AB3550_DEVID_ADC] = {
- .name = "ab3550-adc",
- .id = AB3550_DEVID_ADC,
- .num_resources = 10,
- .resources = (struct resource[]) {
- {
- .name = "TRIGGER-0",
- .flags = IORESOURCE_IRQ,
- .start = 16,
- .end = 16,
- },
- {
- .name = "TRIGGER-1",
- .flags = IORESOURCE_IRQ,
- .start = 17,
- .end = 17,
- },
- {
- .name = "TRIGGER-2",
- .flags = IORESOURCE_IRQ,
- .start = 18,
- .end = 18,
- },
- {
- .name = "TRIGGER-3",
- .flags = IORESOURCE_IRQ,
- .start = 19,
- .end = 19,
- },
- {
- .name = "TRIGGER-4",
- .flags = IORESOURCE_IRQ,
- .start = 20,
- .end = 20,
- },
- {
- .name = "TRIGGER-5",
- .flags = IORESOURCE_IRQ,
- .start = 21,
- .end = 21,
- },
- {
- .name = "TRIGGER-6",
- .flags = IORESOURCE_IRQ,
- .start = 22,
- .end = 22,
- },
- {
- .name = "TRIGGER-7",
- .flags = IORESOURCE_IRQ,
- .start = 23,
- .end = 23,
- },
- {
- .name = "TRIGGER-VBAT-TXON",
- .flags = IORESOURCE_IRQ,
- .start = 13,
- .end = 13,
- },
- {
- .name = "TRIGGER-VBAT",
- .flags = IORESOURCE_IRQ,
- .start = 12,
- .end = 12,
- },
- },
- },
- [AB3550_DEVID_FUELGAUGE] = {
- .name = "ab3550-fuelgauge",
- .id = AB3550_DEVID_FUELGAUGE,
- },
- [AB3550_DEVID_VIBRATOR] = {
- .name = "ab3550-vibrator",
- .id = AB3550_DEVID_VIBRATOR,
- },
- [AB3550_DEVID_CODEC] = {
- .name = "ab3550-codec",
- .id = AB3550_DEVID_CODEC,
- },
-};
-
-/*
- * I2C transactions with error messages.
- */
-static int ab3550_i2c_master_send(struct ab3550 *ab, u8 bank, u8 *data,
- u8 count)
-{
- int err;
-
- err = i2c_master_send(ab->i2c_client[bank], data, count);
- if (err < 0) {
- dev_err(&ab->i2c_client[0]->dev, "send error: %d\n", err);
- return err;
- }
- return 0;
-}
-
-static int ab3550_i2c_master_recv(struct ab3550 *ab, u8 bank, u8 *data,
- u8 count)
-{
- int err;
-
- err = i2c_master_recv(ab->i2c_client[bank], data, count);
- if (err < 0) {
- dev_err(&ab->i2c_client[0]->dev, "receive error: %d\n", err);
- return err;
- }
- return 0;
-}
-
-/*
- * Functionality for getting/setting register values.
- */
-static int get_register_interruptible(struct ab3550 *ab, u8 bank, u8 reg,
- u8 *value)
-{
- int err;
-
- err = mutex_lock_interruptible(&ab->access_mutex);
- if (err)
- return err;
-
- err = ab3550_i2c_master_send(ab, bank, &reg, 1);
- if (!err)
- err = ab3550_i2c_master_recv(ab, bank, value, 1);
-
- mutex_unlock(&ab->access_mutex);
- return err;
-}
-
-static int get_register_page_interruptible(struct ab3550 *ab, u8 bank,
- u8 first_reg, u8 *regvals, u8 numregs)
-{
- int err;
-
- err = mutex_lock_interruptible(&ab->access_mutex);
- if (err)
- return err;
-
- err = ab3550_i2c_master_send(ab, bank, &first_reg, 1);
- if (!err)
- err = ab3550_i2c_master_recv(ab, bank, regvals, numregs);
-
- mutex_unlock(&ab->access_mutex);
- return err;
-}
-
-static int mask_and_set_register_interruptible(struct ab3550 *ab, u8 bank,
- u8 reg, u8 bitmask, u8 bitvalues)
-{
- int err = 0;
-
- if (likely(bitmask)) {
- u8 reg_bits[2] = {reg, 0};
-
- err = mutex_lock_interruptible(&ab->access_mutex);
- if (err)
- return err;
-
- if (bitmask == 0xFF) /* No need to read in this case. */
- reg_bits[1] = bitvalues;
- else { /* Read and modify the register value. */
- u8 bits;
-
- err = ab3550_i2c_master_send(ab, bank, &reg, 1);
- if (err)
- goto unlock_and_return;
- err = ab3550_i2c_master_recv(ab, bank, &bits, 1);
- if (err)
- goto unlock_and_return;
- reg_bits[1] = ((~bitmask & bits) |
- (bitmask & bitvalues));
- }
- /* Write the new value. */
- err = ab3550_i2c_master_send(ab, bank, reg_bits, 2);
-unlock_and_return:
- mutex_unlock(&ab->access_mutex);
- }
- return err;
-}
-
-/*
- * Read/write permission checking functions.
- */
-static bool page_write_allowed(const struct ab3550_reg_ranges *ranges,
- u8 first_reg, u8 last_reg)
-{
- u8 i;
-
- if (last_reg < first_reg)
- return false;
-
- for (i = 0; i < ranges->count; i++) {
- if (first_reg < ranges->range[i].first)
- break;
- if ((last_reg <= ranges->range[i].last) &&
- (ranges->range[i].perm & AB3550_PERM_WR))
- return true;
- }
- return false;
-}
-
-static bool reg_write_allowed(const struct ab3550_reg_ranges *ranges, u8 reg)
-{
- return page_write_allowed(ranges, reg, reg);
-}
-
-static bool page_read_allowed(const struct ab3550_reg_ranges *ranges,
- u8 first_reg, u8 last_reg)
-{
- u8 i;
-
- if (last_reg < first_reg)
- return false;
- /* Find the range (if it exists in the list) that includes first_reg. */
- for (i = 0; i < ranges->count; i++) {
- if (first_reg < ranges->range[i].first)
- return false;
- if (first_reg <= ranges->range[i].last)
- break;
- }
- /* Make sure that the entire range up to and including last_reg is
- * readable. This may span several of the ranges in the list.
- */
- while ((i < ranges->count) &&
- (ranges->range[i].perm & AB3550_PERM_RD)) {
- if (last_reg <= ranges->range[i].last)
- return true;
- if ((++i >= ranges->count) ||
- (ranges->range[i].first !=
- (ranges->range[i - 1].last + 1))) {
- break;
- }
- }
- return false;
-}
-
-static bool reg_read_allowed(const struct ab3550_reg_ranges *ranges, u8 reg)
-{
- return page_read_allowed(ranges, reg, reg);
-}
-
-/*
- * The register access functionality.
- */
-static int ab3550_get_chip_id(struct device *dev)
-{
- struct ab3550 *ab = dev_get_drvdata(dev->parent);
- return (int)ab->chip_id;
-}
-
-static int ab3550_mask_and_set_register_interruptible(struct device *dev,
- u8 bank, u8 reg, u8 bitmask, u8 bitvalues)
-{
- struct ab3550 *ab;
- struct platform_device *pdev = to_platform_device(dev);
-
- if ((AB3550_NUM_BANKS <= bank) ||
- !reg_write_allowed(&ab3550_reg_ranges[pdev->id][bank], reg))
- return -EINVAL;
-
- ab = dev_get_drvdata(dev->parent);
- return mask_and_set_register_interruptible(ab, bank, reg,
- bitmask, bitvalues);
-}
-
-static int ab3550_set_register_interruptible(struct device *dev, u8 bank,
- u8 reg, u8 value)
-{
- return ab3550_mask_and_set_register_interruptible(dev, bank, reg, 0xFF,
- value);
-}
-
-static int ab3550_get_register_interruptible(struct device *dev, u8 bank,
- u8 reg, u8 *value)
-{
- struct ab3550 *ab;
- struct platform_device *pdev = to_platform_device(dev);
-
- if ((AB3550_NUM_BANKS <= bank) ||
- !reg_read_allowed(&ab3550_reg_ranges[pdev->id][bank], reg))
- return -EINVAL;
-
- ab = dev_get_drvdata(dev->parent);
- return get_register_interruptible(ab, bank, reg, value);
-}
-
-static int ab3550_get_register_page_interruptible(struct device *dev, u8 bank,
- u8 first_reg, u8 *regvals, u8 numregs)
-{
- struct ab3550 *ab;
- struct platform_device *pdev = to_platform_device(dev);
-
- if ((AB3550_NUM_BANKS <= bank) ||
- !page_read_allowed(&ab3550_reg_ranges[pdev->id][bank],
- first_reg, (first_reg + numregs - 1)))
- return -EINVAL;
-
- ab = dev_get_drvdata(dev->parent);
- return get_register_page_interruptible(ab, bank, first_reg, regvals,
- numregs);
-}
-
-static int ab3550_event_registers_startup_state_get(struct device *dev,
- u8 *event)
-{
- struct ab3550 *ab;
-
- ab = dev_get_drvdata(dev->parent);
- if (!ab->startup_events_read)
- return -EAGAIN; /* Try again later */
-
- memcpy(event, ab->startup_events, AB3550_NUM_EVENT_REG);
- return 0;
-}
-
-static int ab3550_startup_irq_enabled(struct device *dev, unsigned int irq)
-{
- struct ab3550 *ab;
- struct ab3550_platform_data *plf_data;
- bool val;
-
- ab = irq_get_chip_data(irq);
- plf_data = ab->i2c_client[0]->dev.platform_data;
- irq -= plf_data->irq.base;
- val = ((ab->startup_events[irq / 8] & BIT(irq % 8)) != 0);
-
- return val;
-}
-
-static struct abx500_ops ab3550_ops = {
- .get_chip_id = ab3550_get_chip_id,
- .get_register = ab3550_get_register_interruptible,
- .set_register = ab3550_set_register_interruptible,
- .get_register_page = ab3550_get_register_page_interruptible,
- .set_register_page = NULL,
- .mask_and_set_register = ab3550_mask_and_set_register_interruptible,
- .event_registers_startup_state_get =
- ab3550_event_registers_startup_state_get,
- .startup_irq_enabled = ab3550_startup_irq_enabled,
-};
-
-static irqreturn_t ab3550_irq_handler(int irq, void *data)
-{
- struct ab3550 *ab = data;
- int err;
- unsigned int i;
- u8 e[AB3550_NUM_EVENT_REG];
- u8 *events;
- unsigned long flags;
-
- events = (ab->startup_events_read ? e : ab->startup_events);
-
- err = get_register_page_interruptible(ab, AB3550_EVENT_BANK,
- AB3550_EVENT_REG, events, AB3550_NUM_EVENT_REG);
- if (err)
- goto err_event_rd;
-
- if (!ab->startup_events_read) {
- dev_info(&ab->i2c_client[0]->dev,
- "startup events 0x%x,0x%x,0x%x,0x%x,0x%x\n",
- ab->startup_events[0], ab->startup_events[1],
- ab->startup_events[2], ab->startup_events[3],
- ab->startup_events[4]);
- ab->startup_events_read = true;
- goto out;
- }
-
- /* The two highest bits in event[4] are not used. */
- events[4] &= 0x3f;
-
- spin_lock_irqsave(&ab->event_lock, flags);
- for (i = 0; i < AB3550_NUM_EVENT_REG; i++)
- events[i] &= ~ab->event_mask[i];
- spin_unlock_irqrestore(&ab->event_lock, flags);
-
- for (i = 0; i < AB3550_NUM_EVENT_REG; i++) {
- u8 bit;
- u8 event_reg;
-
- dev_dbg(&ab->i2c_client[0]->dev, "IRQ Event[%d]: 0x%2x\n",
- i, events[i]);
-
- event_reg = events[i];
- for (bit = 0; event_reg; bit++, event_reg /= 2) {
- if (event_reg % 2) {
- unsigned int irq;
- struct ab3550_platform_data *plf_data;
-
- plf_data = ab->i2c_client[0]->dev.platform_data;
- irq = plf_data->irq.base + (i * 8) + bit;
- handle_nested_irq(irq);
- }
- }
- }
-out:
- return IRQ_HANDLED;
-
-err_event_rd:
- dev_dbg(&ab->i2c_client[0]->dev, "error reading event registers\n");
- return IRQ_HANDLED;
-}
-
-#ifdef CONFIG_DEBUG_FS
-static struct ab3550_reg_ranges debug_ranges[AB3550_NUM_BANKS] = {
- {
- .count = 6,
- .range = (struct ab3550_reg_range[]) {
- {
- .first = 0x00,
- .last = 0x0e,
- },
- {
- .first = 0x10,
- .last = 0x1a,
- },
- {
- .first = 0x1e,
- .last = 0x4f,
- },
- {
- .first = 0x51,
- .last = 0x63,
- },
- {
- .first = 0x65,
- .last = 0xa3,
- },
- {
- .first = 0xa5,
- .last = 0xa8,
- },
- }
- },
- {
- .count = 8,
- .range = (struct ab3550_reg_range[]) {
- {
- .first = 0x00,
- .last = 0x0e,
- },
- {
- .first = 0x10,
- .last = 0x17,
- },
- {
- .first = 0x1a,
- .last = 0x1c,
- },
- {
- .first = 0x20,
- .last = 0x56,
- },
- {
- .first = 0x5a,
- .last = 0x88,
- },
- {
- .first = 0x8a,
- .last = 0xad,
- },
- {
- .first = 0xb0,
- .last = 0xba,
- },
- {
- .first = 0xbc,
- .last = 0xc3,
- },
- }
- },
-};
-
-static int ab3550_registers_print(struct seq_file *s, void *p)
-{
- struct ab3550 *ab = s->private;
- int bank;
-
- seq_printf(s, AB3550_NAME_STRING " register values:\n");
-
- for (bank = 0; bank < AB3550_NUM_BANKS; bank++) {
- unsigned int i;
-
- seq_printf(s, " bank %d:\n", bank);
- for (i = 0; i < debug_ranges[bank].count; i++) {
- u8 reg;
-
- for (reg = debug_ranges[bank].range[i].first;
- reg <= debug_ranges[bank].range[i].last;
- reg++) {
- u8 value;
-
- get_register_interruptible(ab, bank, reg,
- &value);
- seq_printf(s, " [%d/0x%02X]: 0x%02X\n", bank,
- reg, value);
- }
- }
- }
- return 0;
-}
-
-static int ab3550_registers_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ab3550_registers_print, inode->i_private);
-}
-
-static const struct file_operations ab3550_registers_fops = {
- .open = ab3550_registers_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static int ab3550_bank_print(struct seq_file *s, void *p)
-{
- struct ab3550 *ab = s->private;
-
- seq_printf(s, "%d\n", ab->debug_bank);
- return 0;
-}
-
-static int ab3550_bank_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ab3550_bank_print, inode->i_private);
-}
-
-static ssize_t ab3550_bank_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ab3550 *ab = ((struct seq_file *)(file->private_data))->private;
- unsigned long user_bank;
- int err;
-
- /* Get userspace string and assure termination */
- err = kstrtoul_from_user(user_buf, count, 0, &user_bank);
- if (err)
- return err;
-
- if (user_bank >= AB3550_NUM_BANKS) {
- dev_err(&ab->i2c_client[0]->dev,
- "debugfs error input > number of banks\n");
- return -EINVAL;
- }
-
- ab->debug_bank = user_bank;
-
- return count;
-}
-
-static int ab3550_address_print(struct seq_file *s, void *p)
-{
- struct ab3550 *ab = s->private;
-
- seq_printf(s, "0x%02X\n", ab->debug_address);
- return 0;
-}
-
-static int ab3550_address_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ab3550_address_print, inode->i_private);
-}
-
-static ssize_t ab3550_address_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ab3550 *ab = ((struct seq_file *)(file->private_data))->private;
- unsigned long user_address;
- int err;
-
- /* Get userspace string and assure termination */
- err = kstrtoul_from_user(user_buf, count, 0, &user_address);
- if (err)
- return err;
-
- if (user_address > 0xff) {
- dev_err(&ab->i2c_client[0]->dev,
- "debugfs error input > 0xff\n");
- return -EINVAL;
- }
- ab->debug_address = user_address;
- return count;
-}
-
-static int ab3550_val_print(struct seq_file *s, void *p)
-{
- struct ab3550 *ab = s->private;
- int err;
- u8 regvalue;
-
- err = get_register_interruptible(ab, (u8)ab->debug_bank,
- (u8)ab->debug_address, &regvalue);
- if (err)
- return -EINVAL;
- seq_printf(s, "0x%02X\n", regvalue);
-
- return 0;
-}
-
-static int ab3550_val_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ab3550_val_print, inode->i_private);
-}
-
-static ssize_t ab3550_val_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ab3550 *ab = ((struct seq_file *)(file->private_data))->private;
- unsigned long user_val;
- int err;
- u8 regvalue;
-
- /* Get userspace string and assure termination */
- err = kstrtoul_from_user(user_buf, count, 0, &user_val);
- if (err)
- return err;
-
- if (user_val > 0xff) {
- dev_err(&ab->i2c_client[0]->dev,
- "debugfs error input > 0xff\n");
- return -EINVAL;
- }
- err = mask_and_set_register_interruptible(
- ab, (u8)ab->debug_bank,
- (u8)ab->debug_address, 0xFF, (u8)user_val);
- if (err)
- return -EINVAL;
-
- get_register_interruptible(ab, (u8)ab->debug_bank,
- (u8)ab->debug_address, &regvalue);
- if (err)
- return -EINVAL;
-
- return count;
-}
-
-static const struct file_operations ab3550_bank_fops = {
- .open = ab3550_bank_open,
- .write = ab3550_bank_write,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static const struct file_operations ab3550_address_fops = {
- .open = ab3550_address_open,
- .write = ab3550_address_write,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static const struct file_operations ab3550_val_fops = {
- .open = ab3550_val_open,
- .write = ab3550_val_write,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static struct dentry *ab3550_dir;
-static struct dentry *ab3550_reg_file;
-static struct dentry *ab3550_bank_file;
-static struct dentry *ab3550_address_file;
-static struct dentry *ab3550_val_file;
-
-static inline void ab3550_setup_debugfs(struct ab3550 *ab)
-{
- ab->debug_bank = 0;
- ab->debug_address = 0x00;
-
- ab3550_dir = debugfs_create_dir(AB3550_NAME_STRING, NULL);
- if (!ab3550_dir)
- goto exit_no_debugfs;
-
- ab3550_reg_file = debugfs_create_file("all-registers",
- S_IRUGO, ab3550_dir, ab, &ab3550_registers_fops);
- if (!ab3550_reg_file)
- goto exit_destroy_dir;
-
- ab3550_bank_file = debugfs_create_file("register-bank",
- (S_IRUGO | S_IWUSR), ab3550_dir, ab, &ab3550_bank_fops);
- if (!ab3550_bank_file)
- goto exit_destroy_reg;
-
- ab3550_address_file = debugfs_create_file("register-address",
- (S_IRUGO | S_IWUSR), ab3550_dir, ab, &ab3550_address_fops);
- if (!ab3550_address_file)
- goto exit_destroy_bank;
-
- ab3550_val_file = debugfs_create_file("register-value",
- (S_IRUGO | S_IWUSR), ab3550_dir, ab, &ab3550_val_fops);
- if (!ab3550_val_file)
- goto exit_destroy_address;
-
- return;
-
-exit_destroy_address:
- debugfs_remove(ab3550_address_file);
-exit_destroy_bank:
- debugfs_remove(ab3550_bank_file);
-exit_destroy_reg:
- debugfs_remove(ab3550_reg_file);
-exit_destroy_dir:
- debugfs_remove(ab3550_dir);
-exit_no_debugfs:
- dev_err(&ab->i2c_client[0]->dev, "failed to create debugfs entries.\n");
- return;
-}
-
-static inline void ab3550_remove_debugfs(void)
-{
- debugfs_remove(ab3550_val_file);
- debugfs_remove(ab3550_address_file);
- debugfs_remove(ab3550_bank_file);
- debugfs_remove(ab3550_reg_file);
- debugfs_remove(ab3550_dir);
-}
-
-#else /* !CONFIG_DEBUG_FS */
-static inline void ab3550_setup_debugfs(struct ab3550 *ab)
-{
-}
-static inline void ab3550_remove_debugfs(void)
-{
-}
-#endif
-
-/*
- * Basic set-up, datastructure creation/destruction and I2C interface.
- * This sets up a default config in the AB3550 chip so that it
- * will work as expected.
- */
-static int __init ab3550_setup(struct ab3550 *ab)
-{
- int err = 0;
- int i;
- struct ab3550_platform_data *plf_data;
- struct abx500_init_settings *settings;
-
- plf_data = ab->i2c_client[0]->dev.platform_data;
- settings = plf_data->init_settings;
-
- for (i = 0; i < plf_data->init_settings_sz; i++) {
- err = mask_and_set_register_interruptible(ab,
- settings[i].bank,
- settings[i].reg,
- 0xFF, settings[i].setting);
- if (err)
- goto exit_no_setup;
-
- /* If event mask register update the event mask in ab3550 */
- if ((settings[i].bank == 0) &&
- (AB3550_IMR1 <= settings[i].reg) &&
- (settings[i].reg <= AB3550_IMR5)) {
- ab->event_mask[settings[i].reg - AB3550_IMR1] =
- settings[i].setting;
- }
- }
-exit_no_setup:
- return err;
-}
-
-static void ab3550_mask_work(struct work_struct *work)
-{
- struct ab3550 *ab = container_of(work, struct ab3550, mask_work);
- int i;
- unsigned long flags;
- u8 mask[AB3550_NUM_EVENT_REG];
-
- spin_lock_irqsave(&ab->event_lock, flags);
- for (i = 0; i < AB3550_NUM_EVENT_REG; i++)
- mask[i] = ab->event_mask[i];
- spin_unlock_irqrestore(&ab->event_lock, flags);
-
- for (i = 0; i < AB3550_NUM_EVENT_REG; i++) {
- int err;
-
- err = mask_and_set_register_interruptible(ab, 0,
- (AB3550_IMR1 + i), ~0, mask[i]);
- if (err)
- dev_err(&ab->i2c_client[0]->dev,
- "ab3550_mask_work failed 0x%x,0x%x\n",
- (AB3550_IMR1 + i), mask[i]);
- }
-}
-
-static void ab3550_mask(struct irq_data *data)
-{
- unsigned long flags;
- struct ab3550 *ab;
- struct ab3550_platform_data *plf_data;
- int irq;
-
- ab = irq_data_get_irq_chip_data(data);
- plf_data = ab->i2c_client[0]->dev.platform_data;
- irq = data->irq - plf_data->irq.base;
-
- spin_lock_irqsave(&ab->event_lock, flags);
- ab->event_mask[irq / 8] |= BIT(irq % 8);
- spin_unlock_irqrestore(&ab->event_lock, flags);
-
- schedule_work(&ab->mask_work);
-}
-
-static void ab3550_unmask(struct irq_data *data)
-{
- unsigned long flags;
- struct ab3550 *ab;
- struct ab3550_platform_data *plf_data;
- int irq;
-
- ab = irq_data_get_irq_chip_data(data);
- plf_data = ab->i2c_client[0]->dev.platform_data;
- irq = data->irq - plf_data->irq.base;
-
- spin_lock_irqsave(&ab->event_lock, flags);
- ab->event_mask[irq / 8] &= ~BIT(irq % 8);
- spin_unlock_irqrestore(&ab->event_lock, flags);
-
- schedule_work(&ab->mask_work);
-}
-
-static void noop(struct irq_data *data)
-{
-}
-
-static struct irq_chip ab3550_irq_chip = {
- .name = "ab3550-core", /* Keep the same name as the request */
- .irq_disable = ab3550_mask, /* No default to mask in chip.c */
- .irq_ack = noop,
- .irq_mask = ab3550_mask,
- .irq_unmask = ab3550_unmask,
-};
-
-struct ab_family_id {
- u8 id;
- char *name;
-};
-
-static const struct ab_family_id ids[] __initdata = {
- /* AB3550 */
- {
- .id = AB3550_P1A,
- .name = "P1A"
- },
- /* Terminator */
- {
- .id = 0x00,
- }
-};
-
-static int __init ab3550_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
-{
- struct ab3550 *ab;
- struct ab3550_platform_data *ab3550_plf_data =
- client->dev.platform_data;
- int err;
- int i;
- int num_i2c_clients = 0;
-
- ab = kzalloc(sizeof(struct ab3550), GFP_KERNEL);
- if (!ab) {
- dev_err(&client->dev,
- "could not allocate " AB3550_NAME_STRING " device\n");
- return -ENOMEM;
- }
-
- /* Initialize data structure */
- mutex_init(&ab->access_mutex);
- spin_lock_init(&ab->event_lock);
- ab->i2c_client[0] = client;
-
- i2c_set_clientdata(client, ab);
-
- /* Read chip ID register */
- err = get_register_interruptible(ab, 0, AB3550_CID_REG, &ab->chip_id);
- if (err) {
- dev_err(&client->dev, "could not communicate with the analog "
- "baseband chip\n");
- goto exit_no_detect;
- }
-
- for (i = 0; ids[i].id != 0x0; i++) {
- if (ids[i].id == ab->chip_id) {
- snprintf(&ab->chip_name[0], sizeof(ab->chip_name) - 1,
- AB3550_ID_FORMAT_STRING, ids[i].name);
- break;
- }
- }
-
- if (ids[i].id == 0x0) {
- dev_err(&client->dev, "unknown analog baseband chip id: 0x%x\n",
- ab->chip_id);
- dev_err(&client->dev, "driver not started!\n");
- goto exit_no_detect;
- }
-
- dev_info(&client->dev, "detected AB chip: %s\n", &ab->chip_name[0]);
-
- /* Attach other dummy I2C clients. */
- while (++num_i2c_clients < AB3550_NUM_BANKS) {
- ab->i2c_client[num_i2c_clients] =
- i2c_new_dummy(client->adapter,
- (client->addr + num_i2c_clients));
- if (!ab->i2c_client[num_i2c_clients]) {
- err = -ENOMEM;
- goto exit_no_dummy_client;
- }
- strlcpy(ab->i2c_client[num_i2c_clients]->name, id->name,
- sizeof(ab->i2c_client[num_i2c_clients]->name));
- }
-
- err = ab3550_setup(ab);
- if (err)
- goto exit_no_setup;
-
- INIT_WORK(&ab->mask_work, ab3550_mask_work);
-
- for (i = 0; i < ab3550_plf_data->irq.count; i++) {
- unsigned int irq;
-
- irq = ab3550_plf_data->irq.base + i;
- irq_set_chip_data(irq, ab);
- irq_set_chip_and_handler(irq, &ab3550_irq_chip,
- handle_simple_irq);
- irq_set_nested_thread(irq, 1);
-#ifdef CONFIG_ARM
- set_irq_flags(irq, IRQF_VALID);
-#else
- irq_set_noprobe(irq);
-#endif
- }
-
- err = request_threaded_irq(client->irq, NULL, ab3550_irq_handler,
- IRQF_ONESHOT, "ab3550-core", ab);
- /* This real unpredictable IRQ is of course sampled for entropy */
- rand_initialize_irq(client->irq);
-
- if (err)
- goto exit_no_irq;
-
- err = abx500_register_ops(&client->dev, &ab3550_ops);
- if (err)
- goto exit_no_ops;
-
- /* Set up and register the platform devices. */
- for (i = 0; i < AB3550_NUM_DEVICES; i++) {
- ab3550_devs[i].platform_data = ab3550_plf_data->dev_data[i];
- ab3550_devs[i].pdata_size = ab3550_plf_data->dev_data_sz[i];
- }
-
- err = mfd_add_devices(&client->dev, 0, ab3550_devs,
- ARRAY_SIZE(ab3550_devs), NULL,
- ab3550_plf_data->irq.base);
-
- ab3550_setup_debugfs(ab);
-
- return 0;
-
-exit_no_ops:
-exit_no_irq:
-exit_no_setup:
-exit_no_dummy_client:
- /* Unregister the dummy i2c clients. */
- while (--num_i2c_clients)
- i2c_unregister_device(ab->i2c_client[num_i2c_clients]);
-exit_no_detect:
- kfree(ab);
- return err;
-}
-
-static int __exit ab3550_remove(struct i2c_client *client)
-{
- struct ab3550 *ab = i2c_get_clientdata(client);
- int num_i2c_clients = AB3550_NUM_BANKS;
-
- mfd_remove_devices(&client->dev);
- ab3550_remove_debugfs();
-
- while (--num_i2c_clients)
- i2c_unregister_device(ab->i2c_client[num_i2c_clients]);
-
- /*
- * At this point, all subscribers should have unregistered
- * their notifiers so deactivate IRQ
- */
- free_irq(client->irq, ab);
- kfree(ab);
- return 0;
-}
-
-static const struct i2c_device_id ab3550_id[] = {
- {AB3550_NAME_STRING, 0},
- {}
-};
-MODULE_DEVICE_TABLE(i2c, ab3550_id);
-
-static struct i2c_driver ab3550_driver = {
- .driver = {
- .name = AB3550_NAME_STRING,
- .owner = THIS_MODULE,
- },
- .id_table = ab3550_id,
- .probe = ab3550_probe,
- .remove = __exit_p(ab3550_remove),
-};
-
-static int __init ab3550_i2c_init(void)
-{
- return i2c_add_driver(&ab3550_driver);
-}
-
-static void __exit ab3550_i2c_exit(void)
-{
- i2c_del_driver(&ab3550_driver);
-}
-
-subsys_initcall(ab3550_i2c_init);
-module_exit(ab3550_i2c_exit);
-
-MODULE_AUTHOR("Mattias Wallin <mattias.wallin@stericsson.com>");
-MODULE_DESCRIPTION("AB3550 core driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/ab5500-core.c b/drivers/mfd/ab5500-core.c
new file mode 100644
index 00000000000..ec10629a0b0
--- /dev/null
+++ b/drivers/mfd/ab5500-core.c
@@ -0,0 +1,1440 @@
+/*
+ * Copyright (C) 2007-2011 ST-Ericsson
+ * License terms: GNU General Public License (GPL) version 2
+ * Low-level core for exclusive access to the AB5500 IC on the I2C bus
+ * and some basic chip-configuration.
+ * Author: Bengt Jonsson <bengt.g.jonsson@stericsson.com>
+ * Author: Mattias Nilsson <mattias.i.nilsson@stericsson.com>
+ * Author: Mattias Wallin <mattias.wallin@stericsson.com>
+ * Author: Rickard Andersson <rickard.andersson@stericsson.com>
+ * Author: Karl Komierowski <karl.komierowski@stericsson.com>
+ * Author: Bibek Basu <bibek.basu@stericsson.com>
+ *
+ * TODO: Event handling with irq_chip. Waiting for PRCMU fw support.
+ */
+
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/random.h>
+#include <linux/mfd/ab5500/ab5500.h>
+#include <linux/mfd/abx500.h>
+#include <linux/list.h>
+#include <linux/bitops.h>
+#include <linux/spinlock.h>
+#include <linux/mfd/core.h>
+#include <linux/version.h>
+#include <linux/mfd/db5500-prcmu.h>
+
+#include "ab5500-core.h"
+#include "ab5500-debugfs.h"
+
+#define AB5500_NUM_EVENT_REG 23
+#define AB5500_IT_LATCH0_REG 0x40
+#define AB5500_IT_MASK0_REG 0x60
+
+/*
+ * Permissible register ranges for reading and writing per device and bank.
+ *
+ * The ranges must be listed in increasing address order, and no overlaps are
+ * allowed. It is assumed that write permission implies read permission
+ * (i.e. only RO and RW permissions should be used). Ranges with write
+ * permission must not be split up.
+ */
+
+#define NO_RANGE {.count = 0, .range = NULL,}
+static struct ab5500_i2c_banks ab5500_bank_ranges[AB5500_NUM_DEVICES] = {
+ [AB5500_DEVID_USB] = {
+ .nbanks = 1,
+ .bank = (struct ab5500_i2c_ranges []) {
+ {
+ .bankid = AB5500_BANK_USB,
+ .nranges = 12,
+ .range = (struct ab5500_reg_range[]) {
+ {
+ .first = 0x01,
+ .last = 0x01,
+ .perm = AB5500_PERM_RW,
+ },
+ {
+ .first = 0x80,
+ .last = 0x83,
+ .perm = AB5500_PERM_RW,
+ },
+ {
+ .first = 0x87,
+ .last = 0x8A,
+ .perm = AB5500_PERM_RW,
+ },
+ {
+ .first = 0x8B,
+ .last = 0x8B,
+ .perm = AB5500_PERM_RO,
+ },
+ {
+ .first = 0x91,
+ .last = 0x92,
+ .perm = AB5500_PERM_RO,
+ },
+ {
+ .first = 0x93,
+ .last = 0x93,
+ .perm = AB5500_PERM_RW,
+ },
+ {
+ .first = 0x94,
+ .last = 0x94,
+ .perm = AB5500_PERM_RO,
+ },
+ {
+ .first = 0xA8,
+ .last = 0xB0,
+ .perm = AB5500_PERM_RO,
+ },
+ {
+ .first = 0xB2,
+ .last = 0xB2,
+ .perm = AB5500_PERM_RO,
+ },
+ {
+ .first = 0xB4,
+ .last = 0xBC,
+ .perm = AB5500_PERM_RO,
+ },
+ {
+ .first = 0xBF,
+ .last = 0xBF,
+ .perm = AB5500_PERM_RO,
+ },
+ {
+ .first = 0xC1,
+ .last = 0xC5,
+ .perm = AB5500_PERM_RO,
+ },
+ },
+ },
+ },
+ },
+ [AB5500_DEVID_ADC] = {
+ .nbanks = 1,
+ .bank = (struct ab5500_i2c_ranges []) {
+ {
+ .bankid = AB5500_BANK_ADC,
+ .nranges = 6,
+ .range = (struct ab5500_reg_range[]) {
+ {
+ .first = 0x1F,
+ .last = 0x22,
+ .perm = AB5500_PERM_RO,
+ },
+ {
+ .first = 0x23,
+ .last = 0x24,
+ .perm = AB5500_PERM_RW,
+ },
+ {
+ .first = 0x26,
+ .last = 0x2D,
+ .perm = AB5500_PERM_RO,
+ },
+ {
+ .first = 0x2F,
+ .last = 0x34,
+ .perm = AB5500_PERM_RW,
+ },
+ {
+ .first = 0x37,
+ .last = 0x57,
+ .perm = AB5500_PERM_RW,
+ },
+ {
+ .first = 0x58,
+ .last = 0x58,
+ .perm = AB5500_PERM_RO,
+ },
+ },
+ },
+ },
+ },
+ [AB5500_DEVID_LEDS] = {
+ .nbanks = 1,
+ .bank = (struct ab5500_i2c_ranges []) {
+ {
+ .bankid = AB5500_BANK_LED,
+ .nranges = 1,
+ .range = (struct ab5500_reg_range[]) {
+ {
+ .first = 0x00,
+ .last = 0x0C,
+ .perm = AB5500_PERM_RW,
+ },
+ },
+ },
+ },
+ },
+ [AB5500_DEVID_VIDEO] = {
+ .nbanks = 1,
+ .bank = (struct ab5500_i2c_ranges []) {
+ {
+ .bankid = AB5500_BANK_VDENC,
+ .nranges = 12,
+ .range = (struct ab5500_reg_range[]) {
+ {
+ .first = 0x00,
+ .last = 0x08,
+ .perm = AB5500_PERM_RW,
+ },
+ {
+ .first = 0x09,
+ .last = 0x09,
+ .perm = AB5500_PERM_RO,
+ },
+ {
+ .first = 0x0A,
+ .last = 0x12,
+ .perm = AB5500_PERM_RW,
+ },
+ {
+ .first = 0x15,
+ .last = 0x19,
+ .perm = AB5500_PERM_RW,
+ },
+ {
+ .first = 0x1B,
+ .last = 0x21,
+ .perm = AB5500_PERM_RW,
+ },
+ {
+ .first = 0x27,
+ .last = 0x2C,
+ .perm = AB5500_PERM_RW,
+ },
+ {
+ .first = 0x41,
+ .last = 0x41,
+ .perm = AB5500_PERM_RW,
+ },
+ {
+ .first = 0x45,
+ .last = 0x5B,
+ .perm = AB5500_PERM_RW,
+ },
+ {
+ .first = 0x5D,
+ .last = 0x5D,
+ .perm = AB5500_PERM_RW,
+ },
+ {
+ .first = 0x69,
+ .last = 0x69,
+ .perm = AB5500_PERM_RW,
+ },
+ {
+ .first = 0x6C,
+ .last = 0x6D,
+ .perm = AB5500_PERM_RW,
+ },
+ {
+ .first = 0x80,
+ .last = 0x81,
+ .perm = AB5500_PERM_RW,
+ },
+ },
+ },
+ },
+ },
+ [AB5500_DEVID_REGULATORS] = {
+ .nbanks = 2,
+ .bank = (struct ab5500_i2c_ranges []) {
+ {
+ .bankid = AB5500_BANK_STARTUP,
+ .nranges = 12,
+ .range = (struct ab5500_reg_range[]) {
+ {
+ .first = 0x00,
+ .last = 0x01,
+ .perm = AB5500_PERM_RW,
+ },
+ {
+ .first = 0x1F,
+ .last = 0x1F,
+ .perm = AB5500_PERM_RW,
+ },
+ {
+ .first = 0x2E,
+ .last = 0x2E,
+ .perm = AB5500_PERM_RO,
+ },
+ {
+ .first = 0x2F,
+ .last = 0x30,
+ .perm = AB5500_PERM_RW,
+ },
+ {
+ .first = 0x50,
+ .last = 0x51,
+ .perm = AB5500_PERM_RW,
+ },
+ {
+ .first = 0x60,
+ .last = 0x61,
+ .perm = AB5500_PERM_RW,
+ },
+ {
+ .first = 0x66,
+ .last = 0x8A,
+ .perm = AB5500_PERM_RW,
+ },
+ {
+ .first = 0x8C,
+ .last = 0x96,
+ .perm = AB5500_PERM_RW,
+ },
+ {
+ .first = 0xAA,
+ .last = 0xB4,
+ .perm = AB5500_PERM_RW,
+ },
+ {
+ .first = 0xB7,
+ .last = 0xBF,
+ .perm = AB5500_PERM_RW,
+ },
+ {
+ .first = 0xC1,
+ .last = 0xCA,
+ .perm = AB5500_PERM_RW,
+ },
+ {
+ .first = 0xD3,
+ .last = 0xE0,
+ .perm = AB5500_PERM_RW,
+ },
+ },
+ },
+ {
+ .bankid = AB5500_BANK_SIM_USBSIM,
+ .nranges = 1,
+ .range = (struct ab5500_reg_range[]) {
+ {
+ .first = 0x13,
+ .last = 0x19,
+ .perm = AB5500_PERM_RW,
+ },
+ },
+ },
+ },
+ },
+ [AB5500_DEVID_SIM] = {
+ .nbanks = 1,
+ .bank = (struct ab5500_i2c_ranges []) {
+ {
+ .bankid = AB5500_BANK_SIM_USBSIM,
+ .nranges = 1,
+ .range = (struct ab5500_reg_range[]) {
+ {
+ .first = 0x13,
+ .last = 0x19,
+ .perm = AB5500_PERM_RW,
+ },
+ },
+ },
+ },
+ },
+ [AB5500_DEVID_RTC] = {
+ .nbanks = 1,
+ .bank = (struct ab5500_i2c_ranges []) {
+ {
+ .bankid = AB5500_BANK_RTC,
+ .nranges = 2,
+ .range = (struct ab5500_reg_range[]) {
+ {
+ .first = 0x00,
+ .last = 0x04,
+ .perm = AB5500_PERM_RW,
+ },
+ {
+ .first = 0x06,
+ .last = 0x0C,
+ .perm = AB5500_PERM_RW,
+ },
+ },
+ },
+ },
+ },
+ [AB5500_DEVID_CHARGER] = {
+ .nbanks = 1,
+ .bank = (struct ab5500_i2c_ranges []) {
+ {
+ .bankid = AB5500_BANK_CHG,
+ .nranges = 2,
+ .range = (struct ab5500_reg_range[]) {
+ {
+ .first = 0x11,
+ .last = 0x11,
+ .perm = AB5500_PERM_RO,
+ },
+ {
+ .first = 0x12,
+ .last = 0x1B,
+ .perm = AB5500_PERM_RW,
+ },
+ },
+ },
+ },
+ },
+ [AB5500_DEVID_FUELGAUGE] = {
+ .nbanks = 1,
+ .bank = (struct ab5500_i2c_ranges []) {
+ {
+ .bankid = AB5500_BANK_FG_BATTCOM_ACC,
+ .nranges = 2,
+ .range = (struct ab5500_reg_range[]) {
+ {
+ .first = 0x00,
+ .last = 0x0B,
+ .perm = AB5500_PERM_RO,
+ },
+ {
+ .first = 0x0C,
+ .last = 0x10,
+ .perm = AB5500_PERM_RW,
+ },
+ },
+ },
+ },
+ },
+ [AB5500_DEVID_VIBRATOR] = {
+ .nbanks = 1,
+ .bank = (struct ab5500_i2c_ranges []) {
+ {
+ .bankid = AB5500_BANK_VIBRA,
+ .nranges = 2,
+ .range = (struct ab5500_reg_range[]) {
+ {
+ .first = 0x10,
+ .last = 0x13,
+ .perm = AB5500_PERM_RW,
+ },
+ {
+ .first = 0xFE,
+ .last = 0xFE,
+ .perm = AB5500_PERM_RW,
+ },
+ },
+ },
+ },
+ },
+ [AB5500_DEVID_CODEC] = {
+ .nbanks = 1,
+ .bank = (struct ab5500_i2c_ranges []) {
+ {
+ .bankid = AB5500_BANK_AUDIO_HEADSETUSB,
+ .nranges = 2,
+ .range = (struct ab5500_reg_range[]) {
+ {
+ .first = 0x00,
+ .last = 0x48,
+ .perm = AB5500_PERM_RW,
+ },
+ {
+ .first = 0xEB,
+ .last = 0xFB,
+ .perm = AB5500_PERM_RW,
+ },
+ },
+ },
+ },
+ },
+ [AB5500_DEVID_POWER] = {
+ .nbanks = 2,
+ .bank = (struct ab5500_i2c_ranges []) {
+ {
+ .bankid = AB5500_BANK_STARTUP,
+ .nranges = 1,
+ .range = (struct ab5500_reg_range[]) {
+ {
+ .first = 0x30,
+ .last = 0x30,
+ .perm = AB5500_PERM_RW,
+ },
+ },
+ },
+ {
+ .bankid = AB5500_BANK_VIT_IO_I2C_CLK_TST_OTP,
+ .nranges = 1,
+ .range = (struct ab5500_reg_range[]) {
+ {
+ .first = 0x01,
+ .last = 0x01,
+ .perm = AB5500_PERM_RW,
+ },
+ },
+ },
+ },
+ },
+};
+
+#define AB5500_IRQ(bank, bit) ((bank) * 8 + (bit))
+
+/* I appologize for the resource names beeing a mix of upper case
+ * and lower case but I want them to be exact as the documentation */
+static struct mfd_cell ab5500_devs[AB5500_NUM_DEVICES] = {
+ [AB5500_DEVID_LEDS] = {
+ .name = "ab5500-leds",
+ .id = AB5500_DEVID_LEDS,
+ },
+ [AB5500_DEVID_POWER] = {
+ .name = "ab5500-power",
+ .id = AB5500_DEVID_POWER,
+ },
+ [AB5500_DEVID_REGULATORS] = {
+ .name = "ab5500-regulator",
+ .id = AB5500_DEVID_REGULATORS,
+ },
+ [AB5500_DEVID_SIM] = {
+ .name = "ab5500-sim",
+ .id = AB5500_DEVID_SIM,
+ .num_resources = 1,
+ .resources = (struct resource[]) {
+ {
+ .name = "SIMOFF",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(2, 0), /*rising*/
+ .end = AB5500_IRQ(2, 1), /*falling*/
+ },
+ },
+ },
+ [AB5500_DEVID_RTC] = {
+ .name = "ab5500-rtc",
+ .id = AB5500_DEVID_RTC,
+ .num_resources = 1,
+ .resources = (struct resource[]) {
+ {
+ .name = "RTC_Alarm",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(1, 7),
+ .end = AB5500_IRQ(1, 7),
+ }
+ },
+ },
+ [AB5500_DEVID_CHARGER] = {
+ .name = "ab5500-charger",
+ .id = AB5500_DEVID_CHARGER,
+ },
+ [AB5500_DEVID_ADC] = {
+ .name = "ab5500-adc",
+ .id = AB5500_DEVID_ADC,
+ .num_resources = 10,
+ .resources = (struct resource[]) {
+ {
+ .name = "TRIGGER-0",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(0, 0),
+ .end = AB5500_IRQ(0, 0),
+ },
+ {
+ .name = "TRIGGER-1",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(0, 1),
+ .end = AB5500_IRQ(0, 1),
+ },
+ {
+ .name = "TRIGGER-2",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(0, 2),
+ .end = AB5500_IRQ(0, 2),
+ },
+ {
+ .name = "TRIGGER-3",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(0, 3),
+ .end = AB5500_IRQ(0, 3),
+ },
+ {
+ .name = "TRIGGER-4",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(0, 4),
+ .end = AB5500_IRQ(0, 4),
+ },
+ {
+ .name = "TRIGGER-5",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(0, 5),
+ .end = AB5500_IRQ(0, 5),
+ },
+ {
+ .name = "TRIGGER-6",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(0, 6),
+ .end = AB5500_IRQ(0, 6),
+ },
+ {
+ .name = "TRIGGER-7",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(0, 7),
+ .end = AB5500_IRQ(0, 7),
+ },
+ {
+ .name = "TRIGGER-VBAT",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(0, 8),
+ .end = AB5500_IRQ(0, 8),
+ },
+ {
+ .name = "TRIGGER-VBAT-TXON",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(0, 9),
+ .end = AB5500_IRQ(0, 9),
+ },
+ },
+ },
+ [AB5500_DEVID_FUELGAUGE] = {
+ .name = "ab5500-fuelgauge",
+ .id = AB5500_DEVID_FUELGAUGE,
+ .num_resources = 6,
+ .resources = (struct resource[]) {
+ {
+ .name = "Batt_attach",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(7, 5),
+ .end = AB5500_IRQ(7, 5),
+ },
+ {
+ .name = "Batt_removal",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(7, 6),
+ .end = AB5500_IRQ(7, 6),
+ },
+ {
+ .name = "UART_framing",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(7, 7),
+ .end = AB5500_IRQ(7, 7),
+ },
+ {
+ .name = "UART_overrun",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(8, 0),
+ .end = AB5500_IRQ(8, 0),
+ },
+ {
+ .name = "UART_Rdy_RX",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(8, 1),
+ .end = AB5500_IRQ(8, 1),
+ },
+ {
+ .name = "UART_Rdy_TX",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(8, 2),
+ .end = AB5500_IRQ(8, 2),
+ },
+ },
+ },
+ [AB5500_DEVID_VIBRATOR] = {
+ .name = "ab5500-vibrator",
+ .id = AB5500_DEVID_VIBRATOR,
+ },
+ [AB5500_DEVID_CODEC] = {
+ .name = "ab5500-codec",
+ .id = AB5500_DEVID_CODEC,
+ .num_resources = 3,
+ .resources = (struct resource[]) {
+ {
+ .name = "audio_spkr1_ovc",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(9, 5),
+ .end = AB5500_IRQ(9, 5),
+ },
+ {
+ .name = "audio_plllocked",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(9, 6),
+ .end = AB5500_IRQ(9, 6),
+ },
+ {
+ .name = "audio_spkr2_ovc",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(17, 4),
+ .end = AB5500_IRQ(17, 4),
+ },
+ },
+ },
+ [AB5500_DEVID_USB] = {
+ .name = "ab5500-usb",
+ .id = AB5500_DEVID_USB,
+ .num_resources = 36,
+ .resources = (struct resource[]) {
+ {
+ .name = "Link_Update",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(22, 1),
+ .end = AB5500_IRQ(22, 1),
+ },
+ {
+ .name = "DCIO",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(8, 3),
+ .end = AB5500_IRQ(8, 4),
+ },
+ {
+ .name = "VBUS_R",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(8, 5),
+ .end = AB5500_IRQ(8, 5),
+ },
+ {
+ .name = "VBUS_F",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(8, 6),
+ .end = AB5500_IRQ(8, 6),
+ },
+ {
+ .name = "CHGstate_10_PCVBUSchg",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(8, 7),
+ .end = AB5500_IRQ(8, 7),
+ },
+ {
+ .name = "DCIOreverse_ovc",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(9, 0),
+ .end = AB5500_IRQ(9, 0),
+ },
+ {
+ .name = "USBCharDetDone",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(9, 1),
+ .end = AB5500_IRQ(9, 1),
+ },
+ {
+ .name = "DCIO_no_limit",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(9, 2),
+ .end = AB5500_IRQ(9, 2),
+ },
+ {
+ .name = "USB_suspend",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(9, 3),
+ .end = AB5500_IRQ(9, 3),
+ },
+ {
+ .name = "DCIOreverse_fwdcurrent",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(9, 4),
+ .end = AB5500_IRQ(9, 4),
+ },
+ {
+ .name = "Vbus_Imeasmax_change",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(9, 5),
+ .end = AB5500_IRQ(9, 6),
+ },
+ {
+ .name = "OVV",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(14, 5),
+ .end = AB5500_IRQ(14, 5),
+ },
+ {
+ .name = "USBcharging_NOTok",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(15, 3),
+ .end = AB5500_IRQ(15, 3),
+ },
+ {
+ .name = "usb_adp_sensoroff",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(15, 6),
+ .end = AB5500_IRQ(15, 6),
+ },
+ {
+ .name = "usb_adp_probeplug",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(15, 7),
+ .end = AB5500_IRQ(15, 7),
+ },
+ {
+ .name = "usb_adp_sinkerror",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(16, 0),
+ .end = AB5500_IRQ(16, 6),
+ },
+ {
+ .name = "usb_adp_sourceerror",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(16, 1),
+ .end = AB5500_IRQ(16, 1),
+ },
+ {
+ .name = "usb_idgnd_r",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(16, 2),
+ .end = AB5500_IRQ(16, 2),
+ },
+ {
+ .name = "usb_idgnd_f",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(16, 3),
+ .end = AB5500_IRQ(16, 3),
+ },
+ {
+ .name = "usb_iddetR1",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(16, 4),
+ .end = AB5500_IRQ(16, 5),
+ },
+ {
+ .name = "usb_iddetR2",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(16, 6),
+ .end = AB5500_IRQ(16, 7),
+ },
+ {
+ .name = "usb_iddetR3",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(17, 0),
+ .end = AB5500_IRQ(17, 1),
+ },
+ {
+ .name = "usb_iddetR4",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(17, 2),
+ .end = AB5500_IRQ(17, 3),
+ },
+ {
+ .name = "CharTempWindowOk",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(17, 7),
+ .end = AB5500_IRQ(18, 0),
+ },
+ {
+ .name = "USB_SprDetect",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(18, 1),
+ .end = AB5500_IRQ(18, 1),
+ },
+ {
+ .name = "usb_adp_probe_unplug",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(18, 2),
+ .end = AB5500_IRQ(18, 2),
+ },
+ {
+ .name = "VBUSChDrop",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(18, 3),
+ .end = AB5500_IRQ(18, 4),
+ },
+ {
+ .name = "dcio_char_rec_done",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(18, 5),
+ .end = AB5500_IRQ(18, 5),
+ },
+ {
+ .name = "Charging_stopped_by_temp",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(18, 6),
+ .end = AB5500_IRQ(18, 6),
+ },
+ {
+ .name = "CHGstate_11_SafeModeVBUS",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(21, 1),
+ .end = AB5500_IRQ(21, 2),
+ },
+ {
+ .name = "CHGstate_12_comletedVBUS",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(21, 2),
+ .end = AB5500_IRQ(21, 2),
+ },
+ {
+ .name = "CHGstate_13_completedVBUS",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(21, 3),
+ .end = AB5500_IRQ(21, 3),
+ },
+ {
+ .name = "CHGstate_14_FullChgDCIO",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(21, 4),
+ .end = AB5500_IRQ(21, 4),
+ },
+ {
+ .name = "CHGstate_15_SafeModeDCIO",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(21, 5),
+ .end = AB5500_IRQ(21, 5),
+ },
+ {
+ .name = "CHGstate_16_OFFsuspendDCIO",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(21, 6),
+ .end = AB5500_IRQ(21, 6),
+ },
+ {
+ .name = "CHGstate_17_completedDCIO",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(21, 7),
+ .end = AB5500_IRQ(21, 7),
+ },
+ },
+ },
+ [AB5500_DEVID_OTP] = {
+ .name = "ab5500-otp",
+ .id = AB5500_DEVID_OTP,
+ },
+ [AB5500_DEVID_VIDEO] = {
+ .name = "ab5500-video",
+ .id = AB5500_DEVID_VIDEO,
+ .num_resources = 1,
+ .resources = (struct resource[]) {
+ {
+ .name = "plugTVdet",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(22, 2),
+ .end = AB5500_IRQ(22, 2),
+ },
+ },
+ },
+ [AB5500_DEVID_DBIECI] = {
+ .name = "ab5500-dbieci",
+ .id = AB5500_DEVID_DBIECI,
+ .num_resources = 10,
+ .resources = (struct resource[]) {
+ {
+ .name = "COLL",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(14, 0),
+ .end = AB5500_IRQ(14, 0),
+ },
+ {
+ .name = "RESERR",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(14, 1),
+ .end = AB5500_IRQ(14, 1),
+ },
+ {
+ .name = "FRAERR",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(14, 2),
+ .end = AB5500_IRQ(14, 2),
+ },
+ {
+ .name = "COMERR",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(14, 3),
+ .end = AB5500_IRQ(14, 3),
+ },
+ {
+ .name = "BSI_indicator",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(14, 4),
+ .end = AB5500_IRQ(14, 4),
+ },
+ {
+ .name = "SPDSET",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(14, 6),
+ .end = AB5500_IRQ(14, 6),
+ },
+ {
+ .name = "DSENT",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(14, 7),
+ .end = AB5500_IRQ(14, 7),
+ },
+ {
+ .name = "DREC",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(15, 0),
+ .end = AB5500_IRQ(15, 0),
+ },
+ {
+ .name = "ACCINT",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(15, 1),
+ .end = AB5500_IRQ(15, 1),
+ },
+ {
+ .name = "NOPINT",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(15, 2),
+ .end = AB5500_IRQ(15, 2),
+ },
+ },
+ },
+ [AB5500_DEVID_ONSWA] = {
+ .name = "ab5500-onswa",
+ .id = AB5500_DEVID_ONSWA,
+ .num_resources = 2,
+ .resources = (struct resource[]) {
+ {
+ .name = "ONSWAn_rising",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(1, 3),
+ .end = AB5500_IRQ(1, 3),
+ },
+ {
+ .name = "ONSWAn_falling",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(1, 4),
+ .end = AB5500_IRQ(1, 4),
+ },
+ },
+ },
+};
+
+/*
+ * Functionality for getting/setting register values.
+ */
+int ab5500_get_register_interruptible_raw(struct ab5500 *ab,
+ u8 bank, u8 reg,
+ u8 *value)
+{
+ int err;
+
+ if (bank >= AB5500_NUM_BANKS)
+ return -EINVAL;
+
+ err = mutex_lock_interruptible(&ab->access_mutex);
+ if (err)
+ return err;
+ err = db5500_prcmu_abb_read(bankinfo[bank].slave_addr, reg, value, 1);
+
+ mutex_unlock(&ab->access_mutex);
+ return err;
+}
+
+static int get_register_page_interruptible(struct ab5500 *ab, u8 bank,
+ u8 first_reg, u8 *regvals, u8 numregs)
+{
+ int err;
+
+ if (bank >= AB5500_NUM_BANKS)
+ return -EINVAL;
+
+ err = mutex_lock_interruptible(&ab->access_mutex);
+ if (err)
+ return err;
+
+ while (numregs) {
+ /* The hardware limit for get page is 4 */
+ u8 curnum = min_t(u8, numregs, 4u);
+
+ err = db5500_prcmu_abb_read(bankinfo[bank].slave_addr,
+ first_reg, regvals, curnum);
+ if (err)
+ goto out;
+
+ numregs -= curnum;
+ first_reg += curnum;
+ regvals += curnum;
+ }
+
+out:
+ mutex_unlock(&ab->access_mutex);
+ return err;
+}
+
+int ab5500_mask_and_set_register_interruptible_raw(struct ab5500 *ab, u8 bank,
+ u8 reg, u8 bitmask, u8 bitvalues)
+{
+ int err = 0;
+
+ if (bank >= AB5500_NUM_BANKS)
+ return -EINVAL;
+
+ if (bitmask) {
+ u8 buf;
+
+ err = mutex_lock_interruptible(&ab->access_mutex);
+ if (err)
+ return err;
+
+ if (bitmask == 0xFF) /* No need to read in this case. */
+ buf = bitvalues;
+ else { /* Read and modify the register value. */
+ err = db5500_prcmu_abb_read(bankinfo[bank].slave_addr,
+ reg, &buf, 1);
+ if (err)
+ return err;
+
+ buf = ((~bitmask & buf) | (bitmask & bitvalues));
+ }
+ /* Write the new value. */
+ err = db5500_prcmu_abb_write(bankinfo[bank].slave_addr, reg,
+ &buf, 1);
+
+ mutex_unlock(&ab->access_mutex);
+ }
+ return err;
+}
+
+static int
+set_register_interruptible(struct ab5500 *ab, u8 bank, u8 reg, u8 value)
+{
+ return ab5500_mask_and_set_register_interruptible_raw(ab, bank, reg,
+ 0xff, value);
+}
+
+/*
+ * Read/write permission checking functions.
+ */
+static const struct ab5500_i2c_ranges *get_bankref(u8 devid, u8 bank)
+{
+ u8 i;
+
+ if (devid < AB5500_NUM_DEVICES) {
+ for (i = 0; i < ab5500_bank_ranges[devid].nbanks; i++) {
+ if (ab5500_bank_ranges[devid].bank[i].bankid == bank)
+ return &ab5500_bank_ranges[devid].bank[i];
+ }
+ }
+ return NULL;
+}
+
+static bool page_write_allowed(u8 devid, u8 bank, u8 first_reg, u8 last_reg)
+{
+ u8 i; /* range loop index */
+ const struct ab5500_i2c_ranges *bankref;
+
+ bankref = get_bankref(devid, bank);
+ if (bankref == NULL || last_reg < first_reg)
+ return false;
+
+ for (i = 0; i < bankref->nranges; i++) {
+ if (first_reg < bankref->range[i].first)
+ break;
+ if ((last_reg <= bankref->range[i].last) &&
+ (bankref->range[i].perm & AB5500_PERM_WR))
+ return true;
+ }
+ return false;
+}
+
+static bool reg_write_allowed(u8 devid, u8 bank, u8 reg)
+{
+ return page_write_allowed(devid, bank, reg, reg);
+}
+
+static bool page_read_allowed(u8 devid, u8 bank, u8 first_reg, u8 last_reg)
+{
+ u8 i;
+ const struct ab5500_i2c_ranges *bankref;
+
+ bankref = get_bankref(devid, bank);
+ if (bankref == NULL || last_reg < first_reg)
+ return false;
+
+
+ /* Find the range (if it exists in the list) that includes first_reg. */
+ for (i = 0; i < bankref->nranges; i++) {
+ if (first_reg < bankref->range[i].first)
+ return false;
+ if (first_reg <= bankref->range[i].last)
+ break;
+ }
+ /* Make sure that the entire range up to and including last_reg is
+ * readable. This may span several of the ranges in the list.
+ */
+ while ((i < bankref->nranges) &&
+ (bankref->range[i].perm & AB5500_PERM_RD)) {
+ if (last_reg <= bankref->range[i].last)
+ return true;
+ if ((++i >= bankref->nranges) ||
+ (bankref->range[i].first !=
+ (bankref->range[i - 1].last + 1))) {
+ break;
+ }
+ }
+ return false;
+}
+
+static bool reg_read_allowed(u8 devid, u8 bank, u8 reg)
+{
+ return page_read_allowed(devid, bank, reg, reg);
+}
+
+
+/*
+ * The exported register access functionality.
+ */
+static int ab5500_get_chip_id(struct device *dev)
+{
+ struct ab5500 *ab = dev_get_drvdata(dev->parent);
+
+ return (int)ab->chip_id;
+}
+
+static int ab5500_mask_and_set_register_interruptible(struct device *dev,
+ u8 bank, u8 reg, u8 bitmask, u8 bitvalues)
+{
+ struct ab5500 *ab;
+ struct platform_device *pdev = to_platform_device(dev);
+
+ if ((AB5500_NUM_BANKS <= bank) ||
+ !reg_write_allowed(pdev->id, bank, reg))
+ return -EINVAL;
+
+ ab = dev_get_drvdata(dev->parent);
+ return ab5500_mask_and_set_register_interruptible_raw(ab, bank, reg,
+ bitmask, bitvalues);
+}
+
+static int ab5500_set_register_interruptible(struct device *dev, u8 bank,
+ u8 reg, u8 value)
+{
+ return ab5500_mask_and_set_register_interruptible(dev, bank, reg, 0xFF,
+ value);
+}
+
+static int ab5500_get_register_interruptible(struct device *dev, u8 bank,
+ u8 reg, u8 *value)
+{
+ struct ab5500 *ab;
+ struct platform_device *pdev = to_platform_device(dev);
+
+ if ((AB5500_NUM_BANKS <= bank) ||
+ !reg_read_allowed(pdev->id, bank, reg))
+ return -EINVAL;
+
+ ab = dev_get_drvdata(dev->parent);
+ return ab5500_get_register_interruptible_raw(ab, bank, reg, value);
+}
+
+static int ab5500_get_register_page_interruptible(struct device *dev, u8 bank,
+ u8 first_reg, u8 *regvals, u8 numregs)
+{
+ struct ab5500 *ab;
+ struct platform_device *pdev = to_platform_device(dev);
+
+ if ((AB5500_NUM_BANKS <= bank) ||
+ !page_read_allowed(pdev->id, bank,
+ first_reg, (first_reg + numregs - 1)))
+ return -EINVAL;
+
+ ab = dev_get_drvdata(dev->parent);
+ return get_register_page_interruptible(ab, bank, first_reg, regvals,
+ numregs);
+}
+
+static int
+ab5500_event_registers_startup_state_get(struct device *dev, u8 *event)
+{
+ struct ab5500 *ab;
+
+ ab = dev_get_drvdata(dev->parent);
+ if (!ab->startup_events_read)
+ return -EAGAIN; /* Try again later */
+
+ memcpy(event, ab->startup_events, AB5500_NUM_EVENT_REG);
+ return 0;
+}
+
+static struct abx500_ops ab5500_ops = {
+ .get_chip_id = ab5500_get_chip_id,
+ .get_register = ab5500_get_register_interruptible,
+ .set_register = ab5500_set_register_interruptible,
+ .get_register_page = ab5500_get_register_page_interruptible,
+ .set_register_page = NULL,
+ .mask_and_set_register = ab5500_mask_and_set_register_interruptible,
+ .event_registers_startup_state_get =
+ ab5500_event_registers_startup_state_get,
+ .startup_irq_enabled = NULL,
+};
+
+/*
+ * ab5500_setup : Basic set-up, datastructure creation/destruction
+ * and I2C interface.This sets up a default config
+ * in the AB5500 chip so that it will work as expected.
+ * @ab : Pointer to ab5500 structure
+ * @settings : Pointer to struct abx500_init_settings
+ * @size : Size of init data
+ */
+static int __init ab5500_setup(struct ab5500 *ab,
+ struct abx500_init_settings *settings, unsigned int size)
+{
+ int err = 0;
+ int i;
+
+ for (i = 0; i < size; i++) {
+ err = ab5500_mask_and_set_register_interruptible_raw(ab,
+ settings[i].bank,
+ settings[i].reg,
+ 0xFF, settings[i].setting);
+ if (err)
+ goto exit_no_setup;
+
+ /* If event mask register update the event mask in ab5500 */
+ if ((settings[i].bank == AB5500_BANK_IT) &&
+ (AB5500_MASK_BASE <= settings[i].reg) &&
+ (settings[i].reg <= AB5500_MASK_END)) {
+ ab->mask[settings[i].reg - AB5500_MASK_BASE] =
+ settings[i].setting;
+ }
+ }
+exit_no_setup:
+ return err;
+}
+
+struct ab_family_id {
+ u8 id;
+ char *name;
+};
+
+static const struct ab_family_id ids[] __initdata = {
+ /* AB5500 */
+ {
+ .id = AB5500_1_0,
+ .name = "1.0"
+ },
+ {
+ .id = AB5500_1_1,
+ .name = "1.1"
+ },
+ /* Terminator */
+ {
+ .id = 0x00,
+ }
+};
+
+static int __init ab5500_probe(struct platform_device *pdev)
+{
+ struct ab5500 *ab;
+ struct ab5500_platform_data *ab5500_plf_data =
+ pdev->dev.platform_data;
+ int err;
+ int i;
+
+ ab = kzalloc(sizeof(struct ab5500), GFP_KERNEL);
+ if (!ab) {
+ dev_err(&pdev->dev,
+ "could not allocate ab5500 device\n");
+ return -ENOMEM;
+ }
+
+ /* Initialize data structure */
+ mutex_init(&ab->access_mutex);
+ mutex_init(&ab->irq_lock);
+ ab->dev = &pdev->dev;
+
+ platform_set_drvdata(pdev, ab);
+
+ /* Read chip ID register */
+ err = ab5500_get_register_interruptible_raw(ab,
+ AB5500_BANK_VIT_IO_I2C_CLK_TST_OTP,
+ AB5500_CHIP_ID, &ab->chip_id);
+ if (err) {
+ dev_err(&pdev->dev, "could not communicate with the analog "
+ "baseband chip\n");
+ goto exit_no_detect;
+ }
+
+ for (i = 0; ids[i].id != 0x0; i++) {
+ if (ids[i].id == ab->chip_id) {
+ snprintf(&ab->chip_name[0], sizeof(ab->chip_name) - 1,
+ "AB5500 %s", ids[i].name);
+ break;
+ }
+ }
+ if (ids[i].id == 0x0) {
+ dev_err(&pdev->dev, "unknown analog baseband chip id: 0x%x\n",
+ ab->chip_id);
+ dev_err(&pdev->dev, "driver not started!\n");
+ goto exit_no_detect;
+ }
+
+ /* Clear and mask all interrupts */
+ for (i = 0; i < AB5500_NUM_IRQ_REGS; i++) {
+ u8 latchreg = AB5500_IT_LATCH0_REG + i;
+ u8 maskreg = AB5500_IT_MASK0_REG + i;
+ u8 val;
+
+ ab5500_get_register_interruptible_raw(ab, AB5500_BANK_IT,
+ latchreg, &val);
+ set_register_interruptible(ab, AB5500_BANK_IT, maskreg, 0xff);
+ ab->mask[i] = ab->oldmask[i] = 0xff;
+ }
+
+ err = abx500_register_ops(&pdev->dev, &ab5500_ops);
+ if (err) {
+ dev_err(&pdev->dev, "ab5500_register ops error\n");
+ goto exit_no_detect;
+ }
+
+ /* Set up and register the platform devices. */
+ for (i = 0; i < AB5500_NUM_DEVICES; i++) {
+ ab5500_devs[i].platform_data = ab5500_plf_data->dev_data[i];
+ ab5500_devs[i].pdata_size =
+ sizeof(ab5500_plf_data->dev_data[i]);
+ }
+
+ err = mfd_add_devices(&pdev->dev, 0, ab5500_devs,
+ ARRAY_SIZE(ab5500_devs), NULL,
+ ab5500_plf_data->irq.base);
+ if (err) {
+ dev_err(&pdev->dev, "ab5500_mfd_add_device error\n");
+ goto exit_no_detect;
+ }
+
+ err = ab5500_setup(ab, ab5500_plf_data->init_settings,
+ ab5500_plf_data->init_settings_sz);
+ if (err) {
+ dev_err(&pdev->dev, "ab5500_setup error\n");
+ goto exit_no_detect;
+ }
+
+ ab5500_setup_debugfs(ab);
+
+ dev_info(&pdev->dev, "detected AB chip: %s\n", &ab->chip_name[0]);
+ return 0;
+
+exit_no_detect:
+ kfree(ab);
+ return err;
+}
+
+static int __exit ab5500_remove(struct platform_device *pdev)
+{
+ struct ab5500 *ab = platform_get_drvdata(pdev);
+
+ ab5500_remove_debugfs();
+ mfd_remove_devices(&pdev->dev);
+ kfree(ab);
+ return 0;
+}
+
+static struct platform_driver ab5500_driver = {
+ .driver = {
+ .name = "ab5500-core",
+ .owner = THIS_MODULE,
+ },
+ .remove = __exit_p(ab5500_remove),
+};
+
+static int __init ab5500_core_init(void)
+{
+ return platform_driver_probe(&ab5500_driver, ab5500_probe);
+}
+
+static void __exit ab5500_core_exit(void)
+{
+ platform_driver_unregister(&ab5500_driver);
+}
+
+subsys_initcall(ab5500_core_init);
+module_exit(ab5500_core_exit);
+
+MODULE_AUTHOR("Mattias Wallin <mattias.wallin@stericsson.com>");
+MODULE_DESCRIPTION("AB5500 core driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/ab5500-core.h b/drivers/mfd/ab5500-core.h
new file mode 100644
index 00000000000..63b30b17e4f
--- /dev/null
+++ b/drivers/mfd/ab5500-core.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (C) 2011 ST-Ericsson
+ * License terms: GNU General Public License (GPL) version 2
+ * Shared definitions and data structures for the AB5500 MFD driver
+ */
+
+/* Read/write operation values. */
+#define AB5500_PERM_RD (0x01)
+#define AB5500_PERM_WR (0x02)
+
+/* Read/write permissions. */
+#define AB5500_PERM_RO (AB5500_PERM_RD)
+#define AB5500_PERM_RW (AB5500_PERM_RD | AB5500_PERM_WR)
+
+#define AB5500_MASK_BASE (0x60)
+#define AB5500_MASK_END (0x79)
+#define AB5500_CHIP_ID (0x20)
+
+/**
+ * struct ab5500_reg_range
+ * @first: the first address of the range
+ * @last: the last address of the range
+ * @perm: access permissions for the range
+ */
+struct ab5500_reg_range {
+ u8 first;
+ u8 last;
+ u8 perm;
+};
+
+/**
+ * struct ab5500_i2c_ranges
+ * @count: the number of ranges in the list
+ * @range: the list of register ranges
+ */
+struct ab5500_i2c_ranges {
+ u8 nranges;
+ u8 bankid;
+ const struct ab5500_reg_range *range;
+};
+
+/**
+ * struct ab5500_i2c_banks
+ * @count: the number of ranges in the list
+ * @range: the list of register ranges
+ */
+struct ab5500_i2c_banks {
+ u8 nbanks;
+ const struct ab5500_i2c_ranges *bank;
+};
+
+/**
+ * struct ab5500_bank
+ * @slave_addr: I2C slave_addr found in AB5500 specification
+ * @name: Documentation name of the bank. For reference
+ */
+struct ab5500_bank {
+ u8 slave_addr;
+ const char *name;
+};
+
+static const struct ab5500_bank bankinfo[AB5500_NUM_BANKS] = {
+ [AB5500_BANK_VIT_IO_I2C_CLK_TST_OTP] = {
+ AB5500_ADDR_VIT_IO_I2C_CLK_TST_OTP, "VIT_IO_I2C_CLK_TST_OTP"},
+ [AB5500_BANK_VDDDIG_IO_I2C_CLK_TST] = {
+ AB5500_ADDR_VDDDIG_IO_I2C_CLK_TST, "VDDDIG_IO_I2C_CLK_TST"},
+ [AB5500_BANK_VDENC] = {AB5500_ADDR_VDENC, "VDENC"},
+ [AB5500_BANK_SIM_USBSIM] = {AB5500_ADDR_SIM_USBSIM, "SIM_USBSIM"},
+ [AB5500_BANK_LED] = {AB5500_ADDR_LED, "LED"},
+ [AB5500_BANK_ADC] = {AB5500_ADDR_ADC, "ADC"},
+ [AB5500_BANK_RTC] = {AB5500_ADDR_RTC, "RTC"},
+ [AB5500_BANK_STARTUP] = {AB5500_ADDR_STARTUP, "STARTUP"},
+ [AB5500_BANK_DBI_ECI] = {AB5500_ADDR_DBI_ECI, "DBI-ECI"},
+ [AB5500_BANK_CHG] = {AB5500_ADDR_CHG, "CHG"},
+ [AB5500_BANK_FG_BATTCOM_ACC] = {
+ AB5500_ADDR_FG_BATTCOM_ACC, "FG_BATCOM_ACC"},
+ [AB5500_BANK_USB] = {AB5500_ADDR_USB, "USB"},
+ [AB5500_BANK_IT] = {AB5500_ADDR_IT, "IT"},
+ [AB5500_BANK_VIBRA] = {AB5500_ADDR_VIBRA, "VIBRA"},
+ [AB5500_BANK_AUDIO_HEADSETUSB] = {
+ AB5500_ADDR_AUDIO_HEADSETUSB, "AUDIO_HEADSETUSB"},
+};
+
+int ab5500_get_register_interruptible_raw(struct ab5500 *ab, u8 bank, u8 reg,
+ u8 *value);
+int ab5500_mask_and_set_register_interruptible_raw(struct ab5500 *ab, u8 bank,
+ u8 reg, u8 bitmask, u8 bitvalues);
diff --git a/drivers/mfd/ab5500-debugfs.c b/drivers/mfd/ab5500-debugfs.c
new file mode 100644
index 00000000000..43c0ebb8195
--- /dev/null
+++ b/drivers/mfd/ab5500-debugfs.c
@@ -0,0 +1,807 @@
+/*
+ * Copyright (C) 2011 ST-Ericsson
+ * License terms: GNU General Public License (GPL) version 2
+ * Debugfs support for the AB5500 MFD driver
+ */
+
+#include <linux/export.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/mfd/ab5500/ab5500.h>
+#include <linux/mfd/abx500.h>
+#include <linux/uaccess.h>
+
+#include "ab5500-core.h"
+#include "ab5500-debugfs.h"
+
+static struct ab5500_i2c_ranges ab5500_reg_ranges[AB5500_NUM_BANKS] = {
+ [AB5500_BANK_LED] = {
+ .bankid = AB5500_BANK_LED,
+ .nranges = 1,
+ .range = (struct ab5500_reg_range[]) {
+ {
+ .first = 0x00,
+ .last = 0x0C,
+ .perm = AB5500_PERM_RW,
+ },
+ },
+ },
+ [AB5500_BANK_ADC] = {
+ .bankid = AB5500_BANK_ADC,
+ .nranges = 6,
+ .range = (struct ab5500_reg_range[]) {
+ {
+ .first = 0x1F,
+ .last = 0x22,
+ .perm = AB5500_PERM_RO,
+ },
+ {
+ .first = 0x23,
+ .last = 0x24,
+ .perm = AB5500_PERM_RW,
+ },
+ {
+ .first = 0x26,
+ .last = 0x2D,
+ .perm = AB5500_PERM_RO,
+ },
+ {
+ .first = 0x2F,
+ .last = 0x34,
+ .perm = AB5500_PERM_RW,
+ },
+ {
+ .first = 0x37,
+ .last = 0x57,
+ .perm = AB5500_PERM_RW,
+ },
+ {
+ .first = 0x58,
+ .last = 0x58,
+ .perm = AB5500_PERM_RO,
+ },
+ },
+ },
+ [AB5500_BANK_RTC] = {
+ .bankid = AB5500_BANK_RTC,
+ .nranges = 2,
+ .range = (struct ab5500_reg_range[]) {
+ {
+ .first = 0x00,
+ .last = 0x04,
+ .perm = AB5500_PERM_RW,
+ },
+ {
+ .first = 0x06,
+ .last = 0x0C,
+ .perm = AB5500_PERM_RW,
+ },
+ },
+ },
+ [AB5500_BANK_STARTUP] = {
+ .bankid = AB5500_BANK_STARTUP,
+ .nranges = 12,
+ .range = (struct ab5500_reg_range[]) {
+ {
+ .first = 0x00,
+ .last = 0x01,
+ .perm = AB5500_PERM_RW,
+ },
+ {
+ .first = 0x1F,
+ .last = 0x1F,
+ .perm = AB5500_PERM_RW,
+ },
+ {
+ .first = 0x2E,
+ .last = 0x2E,
+ .perm = AB5500_PERM_RO,
+ },
+ {
+ .first = 0x2F,
+ .last = 0x30,
+ .perm = AB5500_PERM_RW,
+ },
+ {
+ .first = 0x50,
+ .last = 0x51,
+ .perm = AB5500_PERM_RW,
+ },
+ {
+ .first = 0x60,
+ .last = 0x61,
+ .perm = AB5500_PERM_RW,
+ },
+ {
+ .first = 0x66,
+ .last = 0x8A,
+ .perm = AB5500_PERM_RW,
+ },
+ {
+ .first = 0x8C,
+ .last = 0x96,
+ .perm = AB5500_PERM_RW,
+ },
+ {
+ .first = 0xAA,
+ .last = 0xB4,
+ .perm = AB5500_PERM_RW,
+ },
+ {
+ .first = 0xB7,
+ .last = 0xBF,
+ .perm = AB5500_PERM_RW,
+ },
+ {
+ .first = 0xC1,
+ .last = 0xCA,
+ .perm = AB5500_PERM_RW,
+ },
+ {
+ .first = 0xD3,
+ .last = 0xE0,
+ .perm = AB5500_PERM_RW,
+ },
+ },
+ },
+ [AB5500_BANK_DBI_ECI] = {
+ .bankid = AB5500_BANK_DBI_ECI,
+ .nranges = 3,
+ .range = (struct ab5500_reg_range[]) {
+ {
+ .first = 0x00,
+ .last = 0x07,
+ .perm = AB5500_PERM_RW,
+ },
+ {
+ .first = 0x10,
+ .last = 0x10,
+ .perm = AB5500_PERM_RW,
+ },
+ {
+ .first = 0x13,
+ .last = 0x13,
+ .perm = AB5500_PERM_RW,
+ },
+ },
+ },
+ [AB5500_BANK_CHG] = {
+ .bankid = AB5500_BANK_CHG,
+ .nranges = 2,
+ .range = (struct ab5500_reg_range[]) {
+ {
+ .first = 0x11,
+ .last = 0x11,
+ .perm = AB5500_PERM_RO,
+ },
+ {
+ .first = 0x12,
+ .last = 0x1B,
+ .perm = AB5500_PERM_RW,
+ },
+ },
+ },
+ [AB5500_BANK_FG_BATTCOM_ACC] = {
+ .bankid = AB5500_BANK_FG_BATTCOM_ACC,
+ .nranges = 2,
+ .range = (struct ab5500_reg_range[]) {
+ {
+ .first = 0x00,
+ .last = 0x0B,
+ .perm = AB5500_PERM_RO,
+ },
+ {
+ .first = 0x0C,
+ .last = 0x10,
+ .perm = AB5500_PERM_RW,
+ },
+ },
+ },
+ [AB5500_BANK_USB] = {
+ .bankid = AB5500_BANK_USB,
+ .nranges = 12,
+ .range = (struct ab5500_reg_range[]) {
+ {
+ .first = 0x01,
+ .last = 0x01,
+ .perm = AB5500_PERM_RW,
+ },
+ {
+ .first = 0x80,
+ .last = 0x83,
+ .perm = AB5500_PERM_RW,
+ },
+ {
+ .first = 0x87,
+ .last = 0x8A,
+ .perm = AB5500_PERM_RW,
+ },
+ {
+ .first = 0x8B,
+ .last = 0x8B,
+ .perm = AB5500_PERM_RO,
+ },
+ {
+ .first = 0x91,
+ .last = 0x92,
+ .perm = AB5500_PERM_RO,
+ },
+ {
+ .first = 0x93,
+ .last = 0x93,
+ .perm = AB5500_PERM_RW,
+ },
+ {
+ .first = 0x94,
+ .last = 0x94,
+ .perm = AB5500_PERM_RO,
+ },
+ {
+ .first = 0xA8,
+ .last = 0xB0,
+ .perm = AB5500_PERM_RO,
+ },
+ {
+ .first = 0xB2,
+ .last = 0xB2,
+ .perm = AB5500_PERM_RO,
+ },
+ {
+ .first = 0xB4,
+ .last = 0xBC,
+ .perm = AB5500_PERM_RO,
+ },
+ {
+ .first = 0xBF,
+ .last = 0xBF,
+ .perm = AB5500_PERM_RO,
+ },
+ {
+ .first = 0xC1,
+ .last = 0xC5,
+ .perm = AB5500_PERM_RO,
+ },
+ },
+ },
+ [AB5500_BANK_IT] = {
+ .bankid = AB5500_BANK_IT,
+ .nranges = 4,
+ .range = (struct ab5500_reg_range[]) {
+ {
+ .first = 0x00,
+ .last = 0x02,
+ .perm = AB5500_PERM_RO,
+ },
+ {
+ .first = 0x20,
+ .last = 0x36,
+ .perm = AB5500_PERM_RO,
+ },
+ {
+ .first = 0x40,
+ .last = 0x56,
+ .perm = AB5500_PERM_RO,
+ },
+ {
+ .first = 0x60,
+ .last = 0x76,
+ .perm = AB5500_PERM_RO,
+ },
+ },
+ },
+ [AB5500_BANK_VDDDIG_IO_I2C_CLK_TST] = {
+ .bankid = AB5500_BANK_VDDDIG_IO_I2C_CLK_TST,
+ .nranges = 7,
+ .range = (struct ab5500_reg_range[]) {
+ {
+ .first = 0x02,
+ .last = 0x02,
+ .perm = AB5500_PERM_RW,
+ },
+ {
+ .first = 0x12,
+ .last = 0x12,
+ .perm = AB5500_PERM_RW,
+ },
+ {
+ .first = 0x30,
+ .last = 0x34,
+ .perm = AB5500_PERM_RW,
+ },
+ {
+ .first = 0x40,
+ .last = 0x44,
+ .perm = AB5500_PERM_RW,
+ },
+ {
+ .first = 0x50,
+ .last = 0x54,
+ .perm = AB5500_PERM_RW,
+ },
+ {
+ .first = 0x60,
+ .last = 0x64,
+ .perm = AB5500_PERM_RW,
+ },
+ {
+ .first = 0x70,
+ .last = 0x74,
+ .perm = AB5500_PERM_RW,
+ },
+ },
+ },
+ [AB5500_BANK_VIT_IO_I2C_CLK_TST_OTP] = {
+ .bankid = AB5500_BANK_VIT_IO_I2C_CLK_TST_OTP,
+ .nranges = 13,
+ .range = (struct ab5500_reg_range[]) {
+ {
+ .first = 0x01,
+ .last = 0x01,
+ .perm = AB5500_PERM_RW,
+ },
+ {
+ .first = 0x02,
+ .last = 0x02,
+ .perm = AB5500_PERM_RO,
+ },
+ {
+ .first = 0x0D,
+ .last = 0x0F,
+ .perm = AB5500_PERM_RW,
+ },
+ {
+ .first = 0x1C,
+ .last = 0x1C,
+ .perm = AB5500_PERM_RW,
+ },
+ {
+ .first = 0x1E,
+ .last = 0x1E,
+ .perm = AB5500_PERM_RW,
+ },
+ {
+ .first = 0x20,
+ .last = 0x21,
+ .perm = AB5500_PERM_RW,
+ },
+ {
+ .first = 0x25,
+ .last = 0x25,
+ .perm = AB5500_PERM_RW,
+ },
+ {
+ .first = 0x28,
+ .last = 0x2A,
+ .perm = AB5500_PERM_RW,
+ },
+ {
+ .first = 0x30,
+ .last = 0x33,
+ .perm = AB5500_PERM_RW,
+ },
+ {
+ .first = 0x40,
+ .last = 0x43,
+ .perm = AB5500_PERM_RW,
+ },
+ {
+ .first = 0x50,
+ .last = 0x53,
+ .perm = AB5500_PERM_RW,
+ },
+ {
+ .first = 0x60,
+ .last = 0x63,
+ .perm = AB5500_PERM_RW,
+ },
+ {
+ .first = 0x70,
+ .last = 0x73,
+ .perm = AB5500_PERM_RW,
+ },
+ },
+ },
+ [AB5500_BANK_VIBRA] = {
+ .bankid = AB5500_BANK_VIBRA,
+ .nranges = 2,
+ .range = (struct ab5500_reg_range[]) {
+ {
+ .first = 0x10,
+ .last = 0x13,
+ .perm = AB5500_PERM_RW,
+ },
+ {
+ .first = 0xFE,
+ .last = 0xFE,
+ .perm = AB5500_PERM_RW,
+ },
+ },
+ },
+ [AB5500_BANK_AUDIO_HEADSETUSB] = {
+ .bankid = AB5500_BANK_AUDIO_HEADSETUSB,
+ .nranges = 2,
+ .range = (struct ab5500_reg_range[]) {
+ {
+ .first = 0x00,
+ .last = 0x48,
+ .perm = AB5500_PERM_RW,
+ },
+ {
+ .first = 0xEB,
+ .last = 0xFB,
+ .perm = AB5500_PERM_RW,
+ },
+ },
+ },
+ [AB5500_BANK_SIM_USBSIM] = {
+ .bankid = AB5500_BANK_SIM_USBSIM,
+ .nranges = 1,
+ .range = (struct ab5500_reg_range[]) {
+ {
+ .first = 0x13,
+ .last = 0x19,
+ .perm = AB5500_PERM_RW,
+ },
+ },
+ },
+ [AB5500_BANK_VDENC] = {
+ .bankid = AB5500_BANK_VDENC,
+ .nranges = 12,
+ .range = (struct ab5500_reg_range[]) {
+ {
+ .first = 0x00,
+ .last = 0x08,
+ .perm = AB5500_PERM_RW,
+ },
+ {
+ .first = 0x09,
+ .last = 0x09,
+ .perm = AB5500_PERM_RO,
+ },
+ {
+ .first = 0x0A,
+ .last = 0x12,
+ .perm = AB5500_PERM_RW,
+ },
+ {
+ .first = 0x15,
+ .last = 0x19,
+ .perm = AB5500_PERM_RW,
+ },
+ {
+ .first = 0x1B,
+ .last = 0x21,
+ .perm = AB5500_PERM_RW,
+ },
+ {
+ .first = 0x27,
+ .last = 0x2C,
+ .perm = AB5500_PERM_RW,
+ },
+ {
+ .first = 0x41,
+ .last = 0x41,
+ .perm = AB5500_PERM_RW,
+ },
+ {
+ .first = 0x45,
+ .last = 0x5B,
+ .perm = AB5500_PERM_RW,
+ },
+ {
+ .first = 0x5D,
+ .last = 0x5D,
+ .perm = AB5500_PERM_RW,
+ },
+ {
+ .first = 0x69,
+ .last = 0x69,
+ .perm = AB5500_PERM_RW,
+ },
+ {
+ .first = 0x6C,
+ .last = 0x6D,
+ .perm = AB5500_PERM_RW,
+ },
+ {
+ .first = 0x80,
+ .last = 0x81,
+ .perm = AB5500_PERM_RW,
+ },
+ },
+ },
+};
+
+static int ab5500_registers_print(struct seq_file *s, void *p)
+{
+ struct ab5500 *ab = s->private;
+ unsigned int i;
+ u8 bank = (u8)ab->debug_bank;
+
+ seq_printf(s, "ab5500 register values:\n");
+ for (bank = 0; bank < AB5500_NUM_BANKS; bank++) {
+ seq_printf(s, " bank %u, %s (0x%x):\n", bank,
+ bankinfo[bank].name,
+ bankinfo[bank].slave_addr);
+ for (i = 0; i < ab5500_reg_ranges[bank].nranges; i++) {
+ u8 reg;
+ int err;
+
+ for (reg = ab5500_reg_ranges[bank].range[i].first;
+ reg <= ab5500_reg_ranges[bank].range[i].last;
+ reg++) {
+ u8 value;
+
+ err = ab5500_get_register_interruptible_raw(ab,
+ bank, reg,
+ &value);
+ if (err < 0) {
+ dev_err(ab->dev, "get_reg failed %d"
+ "bank 0x%x reg 0x%x\n",
+ err, bank, reg);
+ return err;
+ }
+
+ err = seq_printf(s, "[%d/0x%02X]: 0x%02X\n",
+ bank, reg, value);
+ if (err < 0) {
+ dev_err(ab->dev,
+ "seq_printf overflow\n");
+ /*
+ * Error is not returned here since
+ * the output is wanted in any case
+ */
+ return 0;
+ }
+ }
+ }
+ }
+ return 0;
+}
+
+static int ab5500_registers_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ab5500_registers_print, inode->i_private);
+}
+
+static const struct file_operations ab5500_registers_fops = {
+ .open = ab5500_registers_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int ab5500_bank_print(struct seq_file *s, void *p)
+{
+ struct ab5500 *ab = s->private;
+
+ seq_printf(s, "%d\n", ab->debug_bank);
+ return 0;
+}
+
+static int ab5500_bank_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ab5500_bank_print, inode->i_private);
+}
+
+static ssize_t ab5500_bank_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ab5500 *ab = ((struct seq_file *)(file->private_data))->private;
+ char buf[32];
+ int buf_size;
+ unsigned long user_bank;
+ int err;
+
+ /* Get userspace string and assure termination */
+ buf_size = min(count, (sizeof(buf) - 1));
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ buf[buf_size] = 0;
+
+ err = strict_strtoul(buf, 0, &user_bank);
+ if (err)
+ return -EINVAL;
+
+ if (user_bank >= AB5500_NUM_BANKS) {
+ dev_err(ab->dev,
+ "debugfs error input > number of banks\n");
+ return -EINVAL;
+ }
+
+ ab->debug_bank = user_bank;
+
+ return buf_size;
+}
+
+static int ab5500_address_print(struct seq_file *s, void *p)
+{
+ struct ab5500 *ab = s->private;
+
+ seq_printf(s, "0x%02X\n", ab->debug_address);
+ return 0;
+}
+
+static int ab5500_address_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ab5500_address_print, inode->i_private);
+}
+
+static ssize_t ab5500_address_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ab5500 *ab = ((struct seq_file *)(file->private_data))->private;
+ char buf[32];
+ int buf_size;
+ unsigned long user_address;
+ int err;
+
+ /* Get userspace string and assure termination */
+ buf_size = min(count, (sizeof(buf) - 1));
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ buf[buf_size] = 0;
+
+ err = strict_strtoul(buf, 0, &user_address);
+ if (err)
+ return -EINVAL;
+ if (user_address > 0xff) {
+ dev_err(ab->dev,
+ "debugfs error input > 0xff\n");
+ return -EINVAL;
+ }
+ ab->debug_address = user_address;
+ return buf_size;
+}
+
+static int ab5500_val_print(struct seq_file *s, void *p)
+{
+ struct ab5500 *ab = s->private;
+ int err;
+ u8 regvalue;
+
+ err = ab5500_get_register_interruptible_raw(ab, (u8)ab->debug_bank,
+ (u8)ab->debug_address, &regvalue);
+ if (err) {
+ dev_err(ab->dev, "get_reg failed %d, bank 0x%x"
+ ", reg 0x%x\n", err, ab->debug_bank,
+ ab->debug_address);
+ return -EINVAL;
+ }
+ seq_printf(s, "0x%02X\n", regvalue);
+
+ return 0;
+}
+
+static int ab5500_val_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ab5500_val_print, inode->i_private);
+}
+
+static ssize_t ab5500_val_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ab5500 *ab = ((struct seq_file *)(file->private_data))->private;
+ char buf[32];
+ int buf_size;
+ unsigned long user_val;
+ int err;
+ u8 regvalue;
+
+ /* Get userspace string and assure termination */
+ buf_size = min(count, (sizeof(buf)-1));
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ buf[buf_size] = 0;
+
+ err = strict_strtoul(buf, 0, &user_val);
+ if (err)
+ return -EINVAL;
+ if (user_val > 0xff) {
+ dev_err(ab->dev,
+ "debugfs error input > 0xff\n");
+ return -EINVAL;
+ }
+ err = ab5500_mask_and_set_register_interruptible_raw(
+ ab, (u8)ab->debug_bank,
+ (u8)ab->debug_address, 0xFF, (u8)user_val);
+ if (err)
+ return -EINVAL;
+
+ ab5500_get_register_interruptible_raw(ab, (u8)ab->debug_bank,
+ (u8)ab->debug_address, &regvalue);
+ if (err)
+ return -EINVAL;
+
+ return buf_size;
+}
+
+static const struct file_operations ab5500_bank_fops = {
+ .open = ab5500_bank_open,
+ .write = ab5500_bank_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static const struct file_operations ab5500_address_fops = {
+ .open = ab5500_address_open,
+ .write = ab5500_address_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static const struct file_operations ab5500_val_fops = {
+ .open = ab5500_val_open,
+ .write = ab5500_val_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static struct dentry *ab5500_dir;
+static struct dentry *ab5500_reg_file;
+static struct dentry *ab5500_bank_file;
+static struct dentry *ab5500_address_file;
+static struct dentry *ab5500_val_file;
+
+void __init ab5500_setup_debugfs(struct ab5500 *ab)
+{
+ ab->debug_bank = AB5500_BANK_VIT_IO_I2C_CLK_TST_OTP;
+ ab->debug_address = AB5500_CHIP_ID;
+
+ ab5500_dir = debugfs_create_dir("ab5500", NULL);
+ if (!ab5500_dir)
+ goto exit_no_debugfs;
+
+ ab5500_reg_file = debugfs_create_file("all-bank-registers",
+ S_IRUGO, ab5500_dir, ab, &ab5500_registers_fops);
+ if (!ab5500_reg_file)
+ goto exit_destroy_dir;
+
+ ab5500_bank_file = debugfs_create_file("register-bank",
+ (S_IRUGO | S_IWUGO), ab5500_dir, ab, &ab5500_bank_fops);
+ if (!ab5500_bank_file)
+ goto exit_destroy_reg;
+
+ ab5500_address_file = debugfs_create_file("register-address",
+ (S_IRUGO | S_IWUGO), ab5500_dir, ab, &ab5500_address_fops);
+ if (!ab5500_address_file)
+ goto exit_destroy_bank;
+
+ ab5500_val_file = debugfs_create_file("register-value",
+ (S_IRUGO | S_IWUGO), ab5500_dir, ab, &ab5500_val_fops);
+ if (!ab5500_val_file)
+ goto exit_destroy_address;
+
+ return;
+
+exit_destroy_address:
+ debugfs_remove(ab5500_address_file);
+exit_destroy_bank:
+ debugfs_remove(ab5500_bank_file);
+exit_destroy_reg:
+ debugfs_remove(ab5500_reg_file);
+exit_destroy_dir:
+ debugfs_remove(ab5500_dir);
+exit_no_debugfs:
+ dev_err(ab->dev, "failed to create debugfs entries.\n");
+ return;
+}
+
+void __exit ab5500_remove_debugfs(void)
+{
+ debugfs_remove(ab5500_val_file);
+ debugfs_remove(ab5500_address_file);
+ debugfs_remove(ab5500_bank_file);
+ debugfs_remove(ab5500_reg_file);
+ debugfs_remove(ab5500_dir);
+}
diff --git a/drivers/mfd/ab5500-debugfs.h b/drivers/mfd/ab5500-debugfs.h
new file mode 100644
index 00000000000..7330a9b6afa
--- /dev/null
+++ b/drivers/mfd/ab5500-debugfs.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2011 ST-Ericsson
+ * License terms: GNU General Public License (GPL) version 2
+ * Debugfs interface to the AB5500 core driver
+ */
+
+#ifdef CONFIG_DEBUG_FS
+
+void ab5500_setup_debugfs(struct ab5500 *ab);
+void ab5500_remove_debugfs(void);
+
+#else /* !CONFIG_DEBUG_FS */
+
+static inline void ab5500_setup_debugfs(struct ab5500 *ab)
+{
+}
+
+static inline void ab5500_remove_debugfs(void)
+{
+}
+
+#endif
diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
index 387705e494b..1e9173804ed 100644
--- a/drivers/mfd/ab8500-core.c
+++ b/drivers/mfd/ab8500-core.c
@@ -92,6 +92,8 @@
#define AB8500_REV_REG 0x80
#define AB8500_SWITCH_OFF_STATUS 0x00
+#define AB8500_TURN_ON_STATUS 0x00
+
/*
* Map interrupt numbers to the LATCH and MASK register offsets, Interrupt
* numbers are indexed into this array with (num / 8).
@@ -293,6 +295,7 @@ static struct irq_chip ab8500_irq_chip = {
.irq_bus_lock = ab8500_irq_lock,
.irq_bus_sync_unlock = ab8500_irq_sync_unlock,
.irq_mask = ab8500_irq_mask,
+ .irq_disable = ab8500_irq_mask,
.irq_unmask = ab8500_irq_unmask,
};
@@ -811,12 +814,40 @@ static ssize_t show_switch_off_status(struct device *dev,
return sprintf(buf, "%#x\n", value);
}
+/*
+ * ab8500 has turned on due to (TURN_ON_STATUS):
+ * 0x01 PORnVbat
+ * 0x02 PonKey1dbF
+ * 0x04 PonKey2dbF
+ * 0x08 RTCAlarm
+ * 0x10 MainChDet
+ * 0x20 VbusDet
+ * 0x40 UsbIDDetect
+ * 0x80 Reserved
+ */
+static ssize_t show_turn_on_status(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int ret;
+ u8 value;
+ struct ab8500 *ab8500;
+
+ ab8500 = dev_get_drvdata(dev);
+ ret = get_register_interruptible(ab8500, AB8500_SYS_CTRL1_BLOCK,
+ AB8500_TURN_ON_STATUS, &value);
+ if (ret < 0)
+ return ret;
+ return sprintf(buf, "%#x\n", value);
+}
+
static DEVICE_ATTR(chip_id, S_IRUGO, show_chip_id, NULL);
static DEVICE_ATTR(switch_off_status, S_IRUGO, show_switch_off_status, NULL);
+static DEVICE_ATTR(turn_on_status, S_IRUGO, show_turn_on_status, NULL);
static struct attribute *ab8500_sysfs_entries[] = {
&dev_attr_chip_id.attr,
&dev_attr_switch_off_status.attr,
+ &dev_attr_turn_on_status.attr,
NULL,
};
@@ -843,11 +874,11 @@ int __devinit ab8500_init(struct ab8500 *ab8500)
return ret;
switch (value) {
- case AB8500_CUTEARLY:
case AB8500_CUT1P0:
case AB8500_CUT1P1:
case AB8500_CUT2P0:
case AB8500_CUT3P0:
+ case AB8500_CUT3P3:
dev_info(ab8500->dev, "detected chip, revision: %#x\n", value);
break;
default:
diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c
index 64bdeeb1c11..dedb7f65cea 100644
--- a/drivers/mfd/ab8500-debugfs.c
+++ b/drivers/mfd/ab8500-debugfs.c
@@ -8,6 +8,7 @@
#include <linux/seq_file.h>
#include <linux/uaccess.h>
#include <linux/fs.h>
+#include <linux/module.h>
#include <linux/debugfs.h>
#include <linux/platform_device.h>
diff --git a/drivers/mfd/ab8500-gpadc.c b/drivers/mfd/ab8500-gpadc.c
index f16afb234ff..e985d1701a8 100644
--- a/drivers/mfd/ab8500-gpadc.c
+++ b/drivers/mfd/ab8500-gpadc.c
@@ -143,12 +143,15 @@ struct ab8500_gpadc *ab8500_gpadc_get(char *name)
}
EXPORT_SYMBOL(ab8500_gpadc_get);
-static int ab8500_gpadc_ad_to_voltage(struct ab8500_gpadc *gpadc, u8 input,
+/**
+ * ab8500_gpadc_ad_to_voltage() - Convert a raw ADC value to a voltage
+ */
+int ab8500_gpadc_ad_to_voltage(struct ab8500_gpadc *gpadc, u8 channel,
int ad_value)
{
int res;
- switch (input) {
+ switch (channel) {
case MAIN_CHARGER_V:
/* For some reason we don't have calibrated data */
if (!gpadc->cal_data[ADC_INPUT_VMAIN].gain) {
@@ -232,18 +235,46 @@ static int ab8500_gpadc_ad_to_voltage(struct ab8500_gpadc *gpadc, u8 input,
}
return res;
}
+EXPORT_SYMBOL(ab8500_gpadc_ad_to_voltage);
/**
* ab8500_gpadc_convert() - gpadc conversion
- * @input: analog input to be converted to digital data
+ * @channel: analog channel to be converted to digital data
*
* This function converts the selected analog i/p to digital
* data.
*/
-int ab8500_gpadc_convert(struct ab8500_gpadc *gpadc, u8 input)
+int ab8500_gpadc_convert(struct ab8500_gpadc *gpadc, u8 channel)
+{
+ int ad_value;
+ int voltage;
+
+ ad_value = ab8500_gpadc_read_raw(gpadc, channel);
+ if (ad_value < 0) {
+ dev_err(gpadc->dev, "GPADC raw value failed ch: %d\n", channel);
+ return ad_value;
+ }
+
+ voltage = ab8500_gpadc_ad_to_voltage(gpadc, channel, ad_value);
+
+ if (voltage < 0)
+ dev_err(gpadc->dev, "GPADC to voltage conversion failed ch:"
+ " %d AD: 0x%x\n", channel, ad_value);
+
+ return voltage;
+}
+EXPORT_SYMBOL(ab8500_gpadc_convert);
+
+/**
+ * ab8500_gpadc_read_raw() - gpadc read
+ * @channel: analog channel to be read
+ *
+ * This function obtains the raw ADC value, this then needs
+ * to be converted by calling ab8500_gpadc_ad_to_voltage()
+ */
+int ab8500_gpadc_read_raw(struct ab8500_gpadc *gpadc, u8 channel)
{
int ret;
- u16 data = 0;
int looplimit = 0;
u8 val, low_data, high_data;
@@ -278,9 +309,9 @@ int ab8500_gpadc_convert(struct ab8500_gpadc *gpadc, u8 input)
goto out;
}
- /* Select the input source and set average samples to 16 */
+ /* Select the channel source and set average samples to 16 */
ret = abx500_set_register_interruptible(gpadc->dev, AB8500_GPADC,
- AB8500_GPADC_CTRL2_REG, (input | SW_AVG_16));
+ AB8500_GPADC_CTRL2_REG, (channel | SW_AVG_16));
if (ret < 0) {
dev_err(gpadc->dev,
"gpadc_conversion: set avg samples failed\n");
@@ -292,7 +323,7 @@ int ab8500_gpadc_convert(struct ab8500_gpadc *gpadc, u8 input)
* charging current sense if it needed, ABB 3.0 needs some special
* treatment too.
*/
- switch (input) {
+ switch (channel) {
case MAIN_CHARGER_C:
case USB_CHARGER_C:
ret = abx500_mask_and_set_register_interruptible(gpadc->dev,
@@ -359,7 +390,6 @@ int ab8500_gpadc_convert(struct ab8500_gpadc *gpadc, u8 input)
goto out;
}
- data = (high_data << 8) | low_data;
/* Disable GPADC */
ret = abx500_set_register_interruptible(gpadc->dev, AB8500_GPADC,
AB8500_GPADC_CTRL1_REG, DIS_GPADC);
@@ -370,8 +400,8 @@ int ab8500_gpadc_convert(struct ab8500_gpadc *gpadc, u8 input)
/* Disable VTVout LDO this is required for GPADC */
regulator_disable(gpadc->regu);
mutex_unlock(&gpadc->ab8500_gpadc_lock);
- ret = ab8500_gpadc_ad_to_voltage(gpadc, input, data);
- return ret;
+
+ return (high_data << 8) | low_data;
out:
/*
@@ -385,10 +415,10 @@ out:
regulator_disable(gpadc->regu);
mutex_unlock(&gpadc->ab8500_gpadc_lock);
dev_err(gpadc->dev,
- "gpadc_conversion: Failed to AD convert channel %d\n", input);
+ "gpadc_conversion: Failed to AD convert channel %d\n", channel);
return ret;
}
-EXPORT_SYMBOL(ab8500_gpadc_convert);
+EXPORT_SYMBOL(ab8500_gpadc_read_raw);
/**
* ab8500_bm_gpswadcconvend_handler() - isr for s/w gpadc conversion completion
diff --git a/drivers/mfd/ab8500-sysctrl.c b/drivers/mfd/ab8500-sysctrl.c
index 392185965b3..f20feefac19 100644
--- a/drivers/mfd/ab8500-sysctrl.c
+++ b/drivers/mfd/ab8500-sysctrl.c
@@ -5,6 +5,7 @@
*/
#include <linux/err.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/mfd/ab8500.h>
#include <linux/mfd/abx500.h>
diff --git a/drivers/mfd/abx500-core.c b/drivers/mfd/abx500-core.c
index f12720dbe12..7ce65f49480 100644
--- a/drivers/mfd/abx500-core.c
+++ b/drivers/mfd/abx500-core.c
@@ -8,6 +8,7 @@
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/err.h>
+#include <linux/module.h>
#include <linux/mfd/abx500.h>
static LIST_HEAD(abx500_list);
diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c
index c71ae09430c..b85bbd7f0d1 100644
--- a/drivers/mfd/asic3.c
+++ b/drivers/mfd/asic3.c
@@ -20,6 +20,7 @@
#include <linux/delay.h>
#include <linux/irq.h>
#include <linux/gpio.h>
+#include <linux/export.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
@@ -584,7 +585,7 @@ static int asic3_gpio_remove(struct platform_device *pdev)
return gpiochip_remove(&asic->gpio);
}
-static int asic3_clk_enable(struct asic3 *asic, struct asic3_clk *clk)
+static void asic3_clk_enable(struct asic3 *asic, struct asic3_clk *clk)
{
unsigned long flags;
u32 cdex;
@@ -596,8 +597,6 @@ static int asic3_clk_enable(struct asic3 *asic, struct asic3_clk *clk)
asic3_write_register(asic, ASIC3_OFFSET(CLOCK, CDEX), cdex);
}
spin_unlock_irqrestore(&asic->lock, flags);
-
- return 0;
}
static void asic3_clk_disable(struct asic3 *asic, struct asic3_clk *clk)
@@ -779,6 +778,8 @@ static struct mfd_cell asic3_cell_mmc = {
.name = "tmio-mmc",
.enable = asic3_mmc_enable,
.disable = asic3_mmc_disable,
+ .suspend = asic3_mmc_disable,
+ .resume = asic3_mmc_enable,
.platform_data = &asic3_mmc_data,
.pdata_size = sizeof(asic3_mmc_data),
.num_resources = ARRAY_SIZE(asic3_mmc_resources),
@@ -811,24 +812,43 @@ static int asic3_leds_disable(struct platform_device *pdev)
return 0;
}
+static int asic3_leds_suspend(struct platform_device *pdev)
+{
+ const struct mfd_cell *cell = mfd_get_cell(pdev);
+ struct asic3 *asic = dev_get_drvdata(pdev->dev.parent);
+
+ while (asic3_gpio_get(&asic->gpio, ASIC3_GPIO(C, cell->id)) != 0)
+ msleep(1);
+
+ asic3_clk_disable(asic, &asic->clocks[clock_ledn[cell->id]]);
+
+ return 0;
+}
+
static struct mfd_cell asic3_cell_leds[ASIC3_NUM_LEDS] = {
[0] = {
.name = "leds-asic3",
.id = 0,
.enable = asic3_leds_enable,
.disable = asic3_leds_disable,
+ .suspend = asic3_leds_suspend,
+ .resume = asic3_leds_enable,
},
[1] = {
.name = "leds-asic3",
.id = 1,
.enable = asic3_leds_enable,
.disable = asic3_leds_disable,
+ .suspend = asic3_leds_suspend,
+ .resume = asic3_leds_enable,
},
[2] = {
.name = "leds-asic3",
.id = 2,
.enable = asic3_leds_enable,
.disable = asic3_leds_disable,
+ .suspend = asic3_leds_suspend,
+ .resume = asic3_leds_enable,
},
};
@@ -949,6 +969,7 @@ static int __init asic3_probe(struct platform_device *pdev)
goto out_unmap;
}
+ asic->gpio.label = "asic3";
asic->gpio.base = pdata->gpio_base;
asic->gpio.ngpio = ASIC3_NUM_GPIOS;
asic->gpio.get = asic3_gpio_get;
diff --git a/drivers/mfd/da903x.c b/drivers/mfd/da903x.c
index 2fadbaeb1cb..1b79c37fd59 100644
--- a/drivers/mfd/da903x.c
+++ b/drivers/mfd/da903x.c
@@ -523,7 +523,7 @@ static int __devinit da903x_probe(struct i2c_client *client,
chip->ops->read_events(chip, &tmp);
ret = request_irq(client->irq, da903x_irq_handler,
- IRQF_DISABLED | IRQF_TRIGGER_FALLING,
+ IRQF_TRIGGER_FALLING,
"da903x", chip);
if (ret) {
dev_err(&client->dev, "failed to request irq %d\n",
diff --git a/drivers/mfd/db5500-prcmu.c b/drivers/mfd/db5500-prcmu.c
index 9dbb3cab4a6..bb115b2f04e 100644
--- a/drivers/mfd/db5500-prcmu.c
+++ b/drivers/mfd/db5500-prcmu.c
@@ -20,11 +20,11 @@
#include <linux/jiffies.h>
#include <linux/bitops.h>
#include <linux/interrupt.h>
-#include <linux/mfd/db5500-prcmu.h>
+#include <linux/mfd/dbx500-prcmu.h>
#include <mach/hardware.h>
#include <mach/irqs.h>
#include <mach/db5500-regs.h>
-#include "db5500-prcmu-regs.h"
+#include "dbx500-prcmu-regs.h"
#define _PRCM_MB_HEADER (tcdm_base + 0xFE8)
#define PRCM_REQ_MB0_HEADER (_PRCM_MB_HEADER + 0x0)
@@ -109,15 +109,18 @@ enum mb5_header {
#define PRCMU_DSI_CLOCK_SETTING 0x00000128
/* TVCLK_MGT PLLSW=001 (PLLSOC0) PLLDIV=0x13, = 19.05 MHZ */
#define PRCMU_DSI_LP_CLOCK_SETTING 0x00000135
-#define PRCMU_PLLDSI_FREQ_SETTING 0x0004013C
+#define PRCMU_PLLDSI_FREQ_SETTING 0x00020121
#define PRCMU_DSI_PLLOUT_SEL_SETTING 0x00000002
-#define PRCMU_ENABLE_ESCAPE_CLOCK_DIV 0x03000101
+#define PRCMU_ENABLE_ESCAPE_CLOCK_DIV 0x03000201
#define PRCMU_DISABLE_ESCAPE_CLOCK_DIV 0x00000101
#define PRCMU_ENABLE_PLLDSI 0x00000001
#define PRCMU_DISABLE_PLLDSI 0x00000000
#define PRCMU_DSI_RESET_SW 0x00000003
+#define PRCMU_RESOUTN0_PIN 0x00000001
+#define PRCMU_RESOUTN1_PIN 0x00000002
+#define PRCMU_RESOUTN2_PIN 0x00000004
#define PRCMU_PLLDSI_LOCKP_LOCKED 0x3
@@ -315,31 +318,31 @@ static bool read_mailbox_0(void)
r = false;
break;
}
- writel(MBOX_BIT(0), PRCM_ARM_IT1_CLEAR);
+ writel(MBOX_BIT(0), PRCM_ARM_IT1_CLR);
return r;
}
static bool read_mailbox_1(void)
{
- writel(MBOX_BIT(1), PRCM_ARM_IT1_CLEAR);
+ writel(MBOX_BIT(1), PRCM_ARM_IT1_CLR);
return false;
}
static bool read_mailbox_2(void)
{
- writel(MBOX_BIT(2), PRCM_ARM_IT1_CLEAR);
+ writel(MBOX_BIT(2), PRCM_ARM_IT1_CLR);
return false;
}
static bool read_mailbox_3(void)
{
- writel(MBOX_BIT(3), PRCM_ARM_IT1_CLEAR);
+ writel(MBOX_BIT(3), PRCM_ARM_IT1_CLR);
return false;
}
static bool read_mailbox_4(void)
{
- writel(MBOX_BIT(4), PRCM_ARM_IT1_CLEAR);
+ writel(MBOX_BIT(4), PRCM_ARM_IT1_CLR);
return false;
}
@@ -360,19 +363,19 @@ static bool read_mailbox_5(void)
print_unknown_header_warning(5, header);
break;
}
- writel(MBOX_BIT(5), PRCM_ARM_IT1_CLEAR);
+ writel(MBOX_BIT(5), PRCM_ARM_IT1_CLR);
return false;
}
static bool read_mailbox_6(void)
{
- writel(MBOX_BIT(6), PRCM_ARM_IT1_CLEAR);
+ writel(MBOX_BIT(6), PRCM_ARM_IT1_CLR);
return false;
}
static bool read_mailbox_7(void)
{
- writel(MBOX_BIT(7), PRCM_ARM_IT1_CLEAR);
+ writel(MBOX_BIT(7), PRCM_ARM_IT1_CLR);
return false;
}
@@ -434,7 +437,7 @@ int __init db5500_prcmu_init(void)
return -ENODEV;
/* Clean up the mailbox interrupts after pre-kernel code. */
- writel(ALL_MBOX_BITS, PRCM_ARM_IT1_CLEAR);
+ writel(ALL_MBOX_BITS, PRCM_ARM_IT1_CLR);
r = request_threaded_irq(IRQ_DB5500_PRCMU1, prcmu_irq_handler,
prcmu_irq_thread_fn, 0, "prcmu", NULL);
diff --git a/drivers/mfd/db8500-prcmu-regs.h b/drivers/mfd/db8500-prcmu-regs.h
deleted file mode 100644
index 3bbf04d5804..00000000000
--- a/drivers/mfd/db8500-prcmu-regs.h
+++ /dev/null
@@ -1,166 +0,0 @@
-/*
- * Copyright (C) STMicroelectronics 2009
- * Copyright (C) ST-Ericsson SA 2010
- *
- * Author: Kumar Sanghvi <kumar.sanghvi@stericsson.com>
- * Author: Sundar Iyer <sundar.iyer@stericsson.com>
- *
- * License Terms: GNU General Public License v2
- *
- * PRCM Unit registers
- */
-#ifndef __DB8500_PRCMU_REGS_H
-#define __DB8500_PRCMU_REGS_H
-
-#include <linux/bitops.h>
-#include <mach/hardware.h>
-
-#define BITS(_start, _end) ((BIT(_end) - BIT(_start)) + BIT(_end))
-
-#define PRCM_ARM_PLLDIVPS 0x118
-#define PRCM_ARM_PLLDIVPS_ARM_BRM_RATE BITS(0, 5)
-#define PRCM_ARM_PLLDIVPS_MAX_MASK 0xF
-
-#define PRCM_PLLARM_LOCKP 0x0A8
-#define PRCM_PLLARM_LOCKP_PRCM_PLLARM_LOCKP3 BIT(1)
-
-#define PRCM_ARM_CHGCLKREQ 0x114
-#define PRCM_ARM_CHGCLKREQ_PRCM_ARM_CHGCLKREQ BIT(0)
-
-#define PRCM_PLLARM_ENABLE 0x98
-#define PRCM_PLLARM_ENABLE_PRCM_PLLARM_ENABLE BIT(0)
-#define PRCM_PLLARM_ENABLE_PRCM_PLLARM_COUNTON BIT(8)
-
-#define PRCM_ARMCLKFIX_MGT 0x0
-#define PRCM_A9_RESETN_CLR 0x1f4
-#define PRCM_A9_RESETN_SET 0x1f0
-#define PRCM_ARM_LS_CLAMP 0x30C
-#define PRCM_SRAM_A9 0x308
-
-/* ARM WFI Standby signal register */
-#define PRCM_ARM_WFI_STANDBY 0x130
-#define PRCM_IOCR 0x310
-#define PRCM_IOCR_IOFORCE BIT(0)
-
-/* CPU mailbox registers */
-#define PRCM_MBOX_CPU_VAL 0x0FC
-#define PRCM_MBOX_CPU_SET 0x100
-
-/* Dual A9 core interrupt management unit registers */
-#define PRCM_A9_MASK_REQ 0x328
-#define PRCM_A9_MASK_REQ_PRCM_A9_MASK_REQ BIT(0)
-
-#define PRCM_A9_MASK_ACK 0x32C
-#define PRCM_ARMITMSK31TO0 0x11C
-#define PRCM_ARMITMSK63TO32 0x120
-#define PRCM_ARMITMSK95TO64 0x124
-#define PRCM_ARMITMSK127TO96 0x128
-#define PRCM_POWER_STATE_VAL 0x25C
-#define PRCM_ARMITVAL31TO0 0x260
-#define PRCM_ARMITVAL63TO32 0x264
-#define PRCM_ARMITVAL95TO64 0x268
-#define PRCM_ARMITVAL127TO96 0x26C
-
-#define PRCM_HOSTACCESS_REQ 0x334
-#define PRCM_HOSTACCESS_REQ_HOSTACCESS_REQ BIT(0)
-
-#define PRCM_ARM_IT1_CLR 0x48C
-#define PRCM_ARM_IT1_VAL 0x494
-
-#define PRCM_ITSTATUS0 0x148
-#define PRCM_ITSTATUS1 0x150
-#define PRCM_ITSTATUS2 0x158
-#define PRCM_ITSTATUS3 0x160
-#define PRCM_ITSTATUS4 0x168
-#define PRCM_ITSTATUS5 0x484
-#define PRCM_ITCLEAR5 0x488
-#define PRCM_ARMIT_MASKXP70_IT 0x1018
-
-/* System reset register */
-#define PRCM_APE_SOFTRST 0x228
-
-/* Level shifter and clamp control registers */
-#define PRCM_MMIP_LS_CLAMP_SET 0x420
-#define PRCM_MMIP_LS_CLAMP_CLR 0x424
-
-/* PRCMU HW semaphore */
-#define PRCM_SEM 0x400
-#define PRCM_SEM_PRCM_SEM BIT(0)
-
-/* PRCMU clock/PLL/reset registers */
-#define PRCM_PLLDSI_FREQ 0x500
-#define PRCM_PLLDSI_ENABLE 0x504
-#define PRCM_PLLDSI_LOCKP 0x508
-#define PRCM_DSI_PLLOUT_SEL 0x530
-#define PRCM_DSITVCLK_DIV 0x52C
-#define PRCM_APE_RESETN_SET 0x1E4
-#define PRCM_APE_RESETN_CLR 0x1E8
-
-#define PRCM_TCR 0x1C8
-#define PRCM_TCR_TENSEL_MASK BITS(0, 7)
-#define PRCM_TCR_STOP_TIMERS BIT(16)
-#define PRCM_TCR_DOZE_MODE BIT(17)
-
-#define PRCM_CLKOCR 0x1CC
-#define PRCM_CLKOCR_CLKODIV0_SHIFT 0
-#define PRCM_CLKOCR_CLKODIV0_MASK BITS(0, 5)
-#define PRCM_CLKOCR_CLKOSEL0_SHIFT 6
-#define PRCM_CLKOCR_CLKOSEL0_MASK BITS(6, 8)
-#define PRCM_CLKOCR_CLKODIV1_SHIFT 16
-#define PRCM_CLKOCR_CLKODIV1_MASK BITS(16, 21)
-#define PRCM_CLKOCR_CLKOSEL1_SHIFT 22
-#define PRCM_CLKOCR_CLKOSEL1_MASK BITS(22, 24)
-#define PRCM_CLKOCR_CLK1TYPE BIT(28)
-
-#define PRCM_SGACLK_MGT 0x014
-#define PRCM_UARTCLK_MGT 0x018
-#define PRCM_MSP02CLK_MGT 0x01C
-#define PRCM_MSP1CLK_MGT 0x288
-#define PRCM_I2CCLK_MGT 0x020
-#define PRCM_SDMMCCLK_MGT 0x024
-#define PRCM_SLIMCLK_MGT 0x028
-#define PRCM_PER1CLK_MGT 0x02C
-#define PRCM_PER2CLK_MGT 0x030
-#define PRCM_PER3CLK_MGT 0x034
-#define PRCM_PER5CLK_MGT 0x038
-#define PRCM_PER6CLK_MGT 0x03C
-#define PRCM_PER7CLK_MGT 0x040
-#define PRCM_LCDCLK_MGT 0x044
-#define PRCM_BMLCLK_MGT 0x04C
-#define PRCM_HSITXCLK_MGT 0x050
-#define PRCM_HSIRXCLK_MGT 0x054
-#define PRCM_HDMICLK_MGT 0x058
-#define PRCM_APEATCLK_MGT 0x05C
-#define PRCM_APETRACECLK_MGT 0x060
-#define PRCM_MCDECLK_MGT 0x064
-#define PRCM_IPI2CCLK_MGT 0x068
-#define PRCM_DSIALTCLK_MGT 0x06C
-#define PRCM_DMACLK_MGT 0x074
-#define PRCM_B2R2CLK_MGT 0x078
-#define PRCM_TVCLK_MGT 0x07C
-#define PRCM_UNIPROCLK_MGT 0x278
-#define PRCM_SSPCLK_MGT 0x280
-#define PRCM_RNGCLK_MGT 0x284
-#define PRCM_UICCCLK_MGT 0x27C
-
-#define PRCM_CLK_MGT_CLKPLLDIV_MASK BITS(0, 4)
-#define PRCM_CLK_MGT_CLKPLLSW_MASK BITS(5, 7)
-#define PRCM_CLK_MGT_CLKEN BIT(8)
-
-/* ePOD and memory power signal control registers */
-#define PRCM_EPOD_C_SET 0x410
-#define PRCM_SRAM_LS_SLEEP 0x304
-
-/* Debug power control unit registers */
-#define PRCM_POWER_STATE_SET 0x254
-
-/* Miscellaneous unit registers */
-#define PRCM_DSI_SW_RESET 0x324
-#define PRCM_GPIOCR 0x138
-
-/* GPIOCR register */
-#define PRCM_GPIOCR_SPI2_SELECT BIT(23)
-
-#define PRCM_DDR_SUBSYS_APE_MINBW 0x438
-
-#endif /* __DB8500_PRCMU_REGS_H */
diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
index 02a15d7cb3b..a25ab9c6b5a 100644
--- a/drivers/mfd/db8500-prcmu.c
+++ b/drivers/mfd/db8500-prcmu.c
@@ -27,14 +27,14 @@
#include <linux/platform_device.h>
#include <linux/uaccess.h>
#include <linux/mfd/core.h>
-#include <linux/mfd/db8500-prcmu.h>
+#include <linux/mfd/dbx500-prcmu.h>
#include <linux/regulator/db8500-prcmu.h>
#include <linux/regulator/machine.h>
#include <mach/hardware.h>
#include <mach/irqs.h>
#include <mach/db8500-regs.h>
#include <mach/id.h>
-#include "db8500-prcmu-regs.h"
+#include "dbx500-prcmu-regs.h"
/* Offset for the firmware version within the TCPM */
#define PRCMU_FW_VERSION_OFFSET 0xA4
@@ -131,12 +131,14 @@
#define MB1H_REQUEST_APE_OPP_100_VOLT 0x3
#define MB1H_RELEASE_APE_OPP_100_VOLT 0x4
#define MB1H_RELEASE_USB_WAKEUP 0x5
+#define MB1H_PLL_ON_OFF 0x6
/* Mailbox 1 Requests */
#define PRCM_REQ_MB1_ARM_OPP (PRCM_REQ_MB1 + 0x0)
#define PRCM_REQ_MB1_APE_OPP (PRCM_REQ_MB1 + 0x1)
-#define PRCM_REQ_MB1_APE_OPP_100_RESTORE (PRCM_REQ_MB1 + 0x4)
-#define PRCM_REQ_MB1_ARM_OPP_100_RESTORE (PRCM_REQ_MB1 + 0x8)
+#define PRCM_REQ_MB1_PLL_ON_OFF (PRCM_REQ_MB1 + 0x4)
+#define PLL_SOC1_OFF 0x4
+#define PLL_SOC1_ON 0x8
/* Mailbox 1 ACKs */
#define PRCM_ACK_MB1_CURRENT_ARM_OPP (PRCM_ACK_MB1 + 0x0)
@@ -184,6 +186,11 @@
#define MB4H_HOTDOG 0x12
#define MB4H_HOTMON 0x13
#define MB4H_HOT_PERIOD 0x14
+#define MB4H_A9WDOG_CONF 0x16
+#define MB4H_A9WDOG_EN 0x17
+#define MB4H_A9WDOG_DIS 0x18
+#define MB4H_A9WDOG_LOAD 0x19
+#define MB4H_A9WDOG_KICK 0x20
/* Mailbox 4 Requests */
#define PRCM_REQ_MB4_DDR_ST_AP_SLEEP_IDLE (PRCM_REQ_MB4 + 0x0)
@@ -196,6 +203,13 @@
#define PRCM_REQ_MB4_HOT_PERIOD (PRCM_REQ_MB4 + 0x0)
#define HOTMON_CONFIG_LOW BIT(0)
#define HOTMON_CONFIG_HIGH BIT(1)
+#define PRCM_REQ_MB4_A9WDOG_0 (PRCM_REQ_MB4 + 0x0)
+#define PRCM_REQ_MB4_A9WDOG_1 (PRCM_REQ_MB4 + 0x1)
+#define PRCM_REQ_MB4_A9WDOG_2 (PRCM_REQ_MB4 + 0x2)
+#define PRCM_REQ_MB4_A9WDOG_3 (PRCM_REQ_MB4 + 0x3)
+#define A9WDOG_AUTO_OFF_EN BIT(7)
+#define A9WDOG_AUTO_OFF_DIS 0
+#define A9WDOG_ID_MASK 0xf
/* Mailbox 5 Requests */
#define PRCM_REQ_MB5_I2C_SLAVE_OP (PRCM_REQ_MB5 + 0x0)
@@ -412,7 +426,7 @@ struct clk_mgt {
static DEFINE_SPINLOCK(clk_mgt_lock);
-#define CLK_MGT_ENTRY(_name)[PRCMU_##_name] = { (PRCM_##_name##_MGT), 0 }
+#define CLK_MGT_ENTRY(_name)[PRCMU_##_name] = { (PRCM_##_name##_MGT_OFF), 0 }
struct clk_mgt clk_mgt[PRCMU_NUM_REG_CLOCKS] = {
CLK_MGT_ENTRY(SGACLK),
CLK_MGT_ENTRY(UARTCLK),
@@ -445,6 +459,35 @@ struct clk_mgt clk_mgt[PRCMU_NUM_REG_CLOCKS] = {
CLK_MGT_ENTRY(UICCCLK),
};
+static struct regulator *hwacc_regulator[NUM_HW_ACC];
+static struct regulator *hwacc_ret_regulator[NUM_HW_ACC];
+
+static bool hwacc_enabled[NUM_HW_ACC];
+static bool hwacc_ret_enabled[NUM_HW_ACC];
+
+static const char *hwacc_regulator_name[NUM_HW_ACC] = {
+ [HW_ACC_SVAMMDSP] = "hwacc-sva-mmdsp",
+ [HW_ACC_SVAPIPE] = "hwacc-sva-pipe",
+ [HW_ACC_SIAMMDSP] = "hwacc-sia-mmdsp",
+ [HW_ACC_SIAPIPE] = "hwacc-sia-pipe",
+ [HW_ACC_SGA] = "hwacc-sga",
+ [HW_ACC_B2R2] = "hwacc-b2r2",
+ [HW_ACC_MCDE] = "hwacc-mcde",
+ [HW_ACC_ESRAM1] = "hwacc-esram1",
+ [HW_ACC_ESRAM2] = "hwacc-esram2",
+ [HW_ACC_ESRAM3] = "hwacc-esram3",
+ [HW_ACC_ESRAM4] = "hwacc-esram4",
+};
+
+static const char *hwacc_ret_regulator_name[NUM_HW_ACC] = {
+ [HW_ACC_SVAMMDSP] = "hwacc-sva-mmdsp-ret",
+ [HW_ACC_SIAMMDSP] = "hwacc-sia-mmdsp-ret",
+ [HW_ACC_ESRAM1] = "hwacc-esram1-ret",
+ [HW_ACC_ESRAM2] = "hwacc-esram2-ret",
+ [HW_ACC_ESRAM3] = "hwacc-esram3-ret",
+ [HW_ACC_ESRAM4] = "hwacc-esram4-ret",
+};
+
/*
* Used by MCDE to setup all necessary PRCMU registers
*/
@@ -493,55 +536,51 @@ static struct {
} prcmu_version;
-int prcmu_enable_dsipll(void)
+int db8500_prcmu_enable_dsipll(void)
{
int i;
unsigned int plldsifreq;
/* Clear DSIPLL_RESETN */
- writel(PRCMU_RESET_DSIPLL, (_PRCMU_BASE + PRCM_APE_RESETN_CLR));
+ writel(PRCMU_RESET_DSIPLL, PRCM_APE_RESETN_CLR);
/* Unclamp DSIPLL in/out */
- writel(PRCMU_UNCLAMP_DSIPLL, (_PRCMU_BASE + PRCM_MMIP_LS_CLAMP_CLR));
+ writel(PRCMU_UNCLAMP_DSIPLL, PRCM_MMIP_LS_CLAMP_CLR);
if (prcmu_is_u8400())
plldsifreq = PRCMU_PLLDSI_FREQ_SETTING_U8400;
else
plldsifreq = PRCMU_PLLDSI_FREQ_SETTING;
/* Set DSI PLL FREQ */
- writel(plldsifreq, (_PRCMU_BASE + PRCM_PLLDSI_FREQ));
- writel(PRCMU_DSI_PLLOUT_SEL_SETTING,
- (_PRCMU_BASE + PRCM_DSI_PLLOUT_SEL));
+ writel(plldsifreq, PRCM_PLLDSI_FREQ);
+ writel(PRCMU_DSI_PLLOUT_SEL_SETTING, PRCM_DSI_PLLOUT_SEL);
/* Enable Escape clocks */
- writel(PRCMU_ENABLE_ESCAPE_CLOCK_DIV,
- (_PRCMU_BASE + PRCM_DSITVCLK_DIV));
+ writel(PRCMU_ENABLE_ESCAPE_CLOCK_DIV, PRCM_DSITVCLK_DIV);
/* Start DSI PLL */
- writel(PRCMU_ENABLE_PLLDSI, (_PRCMU_BASE + PRCM_PLLDSI_ENABLE));
+ writel(PRCMU_ENABLE_PLLDSI, PRCM_PLLDSI_ENABLE);
/* Reset DSI PLL */
- writel(PRCMU_DSI_RESET_SW, (_PRCMU_BASE + PRCM_DSI_SW_RESET));
+ writel(PRCMU_DSI_RESET_SW, PRCM_DSI_SW_RESET);
for (i = 0; i < 10; i++) {
- if ((readl(_PRCMU_BASE + PRCM_PLLDSI_LOCKP) &
- PRCMU_PLLDSI_LOCKP_LOCKED)
+ if ((readl(PRCM_PLLDSI_LOCKP) & PRCMU_PLLDSI_LOCKP_LOCKED)
== PRCMU_PLLDSI_LOCKP_LOCKED)
break;
udelay(100);
}
/* Set DSIPLL_RESETN */
- writel(PRCMU_RESET_DSIPLL, (_PRCMU_BASE + PRCM_APE_RESETN_SET));
+ writel(PRCMU_RESET_DSIPLL, PRCM_APE_RESETN_SET);
return 0;
}
-int prcmu_disable_dsipll(void)
+int db8500_prcmu_disable_dsipll(void)
{
/* Disable dsi pll */
- writel(PRCMU_DISABLE_PLLDSI, (_PRCMU_BASE + PRCM_PLLDSI_ENABLE));
+ writel(PRCMU_DISABLE_PLLDSI, PRCM_PLLDSI_ENABLE);
/* Disable escapeclock */
- writel(PRCMU_DISABLE_ESCAPE_CLOCK_DIV,
- (_PRCMU_BASE + PRCM_DSITVCLK_DIV));
+ writel(PRCMU_DISABLE_ESCAPE_CLOCK_DIV, PRCM_DSITVCLK_DIV);
return 0;
}
-int prcmu_set_display_clocks(void)
+int db8500_prcmu_set_display_clocks(void)
{
unsigned long flags;
unsigned int dsiclk;
@@ -554,15 +593,15 @@ int prcmu_set_display_clocks(void)
spin_lock_irqsave(&clk_mgt_lock, flags);
/* Grab the HW semaphore. */
- while ((readl(_PRCMU_BASE + PRCM_SEM) & PRCM_SEM_PRCM_SEM) != 0)
+ while ((readl(PRCM_SEM) & PRCM_SEM_PRCM_SEM) != 0)
cpu_relax();
- writel(dsiclk, (_PRCMU_BASE + PRCM_HDMICLK_MGT));
- writel(PRCMU_DSI_LP_CLOCK_SETTING, (_PRCMU_BASE + PRCM_TVCLK_MGT));
- writel(PRCMU_DPI_CLOCK_SETTING, (_PRCMU_BASE + PRCM_LCDCLK_MGT));
+ writel(dsiclk, PRCM_HDMICLK_MGT);
+ writel(PRCMU_DSI_LP_CLOCK_SETTING, PRCM_TVCLK_MGT);
+ writel(PRCMU_DPI_CLOCK_SETTING, PRCM_LCDCLK_MGT);
/* Release the HW semaphore. */
- writel(0, (_PRCMU_BASE + PRCM_SEM));
+ writel(0, PRCM_SEM);
spin_unlock_irqrestore(&clk_mgt_lock, flags);
@@ -578,8 +617,8 @@ void prcmu_enable_spi2(void)
unsigned long flags;
spin_lock_irqsave(&gpiocr_lock, flags);
- reg = readl(_PRCMU_BASE + PRCM_GPIOCR);
- writel(reg | PRCM_GPIOCR_SPI2_SELECT, _PRCMU_BASE + PRCM_GPIOCR);
+ reg = readl(PRCM_GPIOCR);
+ writel(reg | PRCM_GPIOCR_SPI2_SELECT, PRCM_GPIOCR);
spin_unlock_irqrestore(&gpiocr_lock, flags);
}
@@ -592,8 +631,8 @@ void prcmu_disable_spi2(void)
unsigned long flags;
spin_lock_irqsave(&gpiocr_lock, flags);
- reg = readl(_PRCMU_BASE + PRCM_GPIOCR);
- writel(reg & ~PRCM_GPIOCR_SPI2_SELECT, _PRCMU_BASE + PRCM_GPIOCR);
+ reg = readl(PRCM_GPIOCR);
+ writel(reg & ~PRCM_GPIOCR_SPI2_SELECT, PRCM_GPIOCR);
spin_unlock_irqrestore(&gpiocr_lock, flags);
}
@@ -701,7 +740,7 @@ int prcmu_config_clkout(u8 clkout, u8 source, u8 div)
spin_lock_irqsave(&clkout_lock, flags);
- val = readl(_PRCMU_BASE + PRCM_CLKOCR);
+ val = readl(PRCM_CLKOCR);
if (val & div_mask) {
if (div) {
if ((val & mask) != bits) {
@@ -715,7 +754,7 @@ int prcmu_config_clkout(u8 clkout, u8 source, u8 div)
}
}
}
- writel((bits | (val & ~mask)), (_PRCMU_BASE + PRCM_CLKOCR));
+ writel((bits | (val & ~mask)), PRCM_CLKOCR);
requests[clkout] += (div ? 1 : -1);
unlock_and_return:
@@ -724,7 +763,7 @@ unlock_and_return:
return r;
}
-int prcmu_set_power_state(u8 state, bool keep_ulp_clk, bool keep_ap_pll)
+int db8500_prcmu_set_power_state(u8 state, bool keep_ulp_clk, bool keep_ap_pll)
{
unsigned long flags;
@@ -732,7 +771,7 @@ int prcmu_set_power_state(u8 state, bool keep_ulp_clk, bool keep_ap_pll)
spin_lock_irqsave(&mb0_transfer.lock, flags);
- while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(0))
+ while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(0))
cpu_relax();
writeb(MB0H_POWER_STATE_TRANS, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB0));
@@ -741,7 +780,7 @@ int prcmu_set_power_state(u8 state, bool keep_ulp_clk, bool keep_ap_pll)
writeb((keep_ulp_clk ? 1 : 0),
(tcdm_base + PRCM_REQ_MB0_ULP_CLOCK_STATE));
writeb(0, (tcdm_base + PRCM_REQ_MB0_DO_NOT_WFI));
- writel(MBOX_BIT(0), (_PRCMU_BASE + PRCM_MBOX_CPU_SET));
+ writel(MBOX_BIT(0), PRCM_MBOX_CPU_SET);
spin_unlock_irqrestore(&mb0_transfer.lock, flags);
@@ -770,18 +809,18 @@ static void config_wakeups(void)
return;
for (i = 0; i < 2; i++) {
- while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(0))
+ while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(0))
cpu_relax();
writel(dbb_events, (tcdm_base + PRCM_REQ_MB0_WAKEUP_8500));
writel(abb_events, (tcdm_base + PRCM_REQ_MB0_WAKEUP_4500));
writeb(header[i], (tcdm_base + PRCM_MBOX_HEADER_REQ_MB0));
- writel(MBOX_BIT(0), (_PRCMU_BASE + PRCM_MBOX_CPU_SET));
+ writel(MBOX_BIT(0), PRCM_MBOX_CPU_SET);
}
last_dbb_events = dbb_events;
last_abb_events = abb_events;
}
-void prcmu_enable_wakeups(u32 wakeups)
+void db8500_prcmu_enable_wakeups(u32 wakeups)
{
unsigned long flags;
u32 bits;
@@ -802,7 +841,7 @@ void prcmu_enable_wakeups(u32 wakeups)
spin_unlock_irqrestore(&mb0_transfer.lock, flags);
}
-void prcmu_config_abb_event_readout(u32 abb_events)
+void db8500_prcmu_config_abb_event_readout(u32 abb_events)
{
unsigned long flags;
@@ -814,7 +853,7 @@ void prcmu_config_abb_event_readout(u32 abb_events)
spin_unlock_irqrestore(&mb0_transfer.lock, flags);
}
-void prcmu_get_abb_event_buffer(void __iomem **buf)
+void db8500_prcmu_get_abb_event_buffer(void __iomem **buf)
{
if (readb(tcdm_base + PRCM_ACK_MB0_READ_POINTER) & 1)
*buf = (tcdm_base + PRCM_ACK_MB0_WAKEUP_1_4500);
@@ -823,13 +862,13 @@ void prcmu_get_abb_event_buffer(void __iomem **buf)
}
/**
- * prcmu_set_arm_opp - set the appropriate ARM OPP
+ * db8500_prcmu_set_arm_opp - set the appropriate ARM OPP
* @opp: The new ARM operating point to which transition is to be made
* Returns: 0 on success, non-zero on failure
*
* This function sets the the operating point of the ARM.
*/
-int prcmu_set_arm_opp(u8 opp)
+int db8500_prcmu_set_arm_opp(u8 opp)
{
int r;
@@ -840,14 +879,14 @@ int prcmu_set_arm_opp(u8 opp)
mutex_lock(&mb1_transfer.lock);
- while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(1))
+ while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(1))
cpu_relax();
writeb(MB1H_ARM_APE_OPP, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB1));
writeb(opp, (tcdm_base + PRCM_REQ_MB1_ARM_OPP));
writeb(APE_NO_CHANGE, (tcdm_base + PRCM_REQ_MB1_APE_OPP));
- writel(MBOX_BIT(1), (_PRCMU_BASE + PRCM_MBOX_CPU_SET));
+ writel(MBOX_BIT(1), PRCM_MBOX_CPU_SET);
wait_for_completion(&mb1_transfer.work);
if ((mb1_transfer.ack.header != MB1H_ARM_APE_OPP) ||
@@ -860,11 +899,11 @@ int prcmu_set_arm_opp(u8 opp)
}
/**
- * prcmu_get_arm_opp - get the current ARM OPP
+ * db8500_prcmu_get_arm_opp - get the current ARM OPP
*
* Returns: the current ARM OPP
*/
-int prcmu_get_arm_opp(void)
+int db8500_prcmu_get_arm_opp(void)
{
return readb(tcdm_base + PRCM_ACK_MB1_CURRENT_ARM_OPP);
}
@@ -876,7 +915,7 @@ int prcmu_get_arm_opp(void)
*/
int prcmu_get_ddr_opp(void)
{
- return readb(_PRCMU_BASE + PRCM_DDR_SUBSYS_APE_MINBW);
+ return readb(PRCM_DDR_SUBSYS_APE_MINBW);
}
/**
@@ -892,7 +931,7 @@ int prcmu_set_ddr_opp(u8 opp)
return -EINVAL;
/* Changing the DDR OPP can hang the hardware pre-v21 */
if (cpu_is_u8500v20_or_later() && !cpu_is_u8500v20())
- writeb(opp, (_PRCMU_BASE + PRCM_DDR_SUBSYS_APE_MINBW));
+ writeb(opp, PRCM_DDR_SUBSYS_APE_MINBW);
return 0;
}
@@ -909,14 +948,14 @@ int prcmu_set_ape_opp(u8 opp)
mutex_lock(&mb1_transfer.lock);
- while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(1))
+ while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(1))
cpu_relax();
writeb(MB1H_ARM_APE_OPP, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB1));
writeb(ARM_NO_CHANGE, (tcdm_base + PRCM_REQ_MB1_ARM_OPP));
writeb(opp, (tcdm_base + PRCM_REQ_MB1_APE_OPP));
- writel(MBOX_BIT(1), (_PRCMU_BASE + PRCM_MBOX_CPU_SET));
+ writel(MBOX_BIT(1), PRCM_MBOX_CPU_SET);
wait_for_completion(&mb1_transfer.work);
if ((mb1_transfer.ack.header != MB1H_ARM_APE_OPP) ||
@@ -966,12 +1005,12 @@ int prcmu_request_ape_opp_100_voltage(bool enable)
header = MB1H_RELEASE_APE_OPP_100_VOLT;
}
- while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(1))
+ while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(1))
cpu_relax();
writeb(header, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB1));
- writel(MBOX_BIT(1), (_PRCMU_BASE + PRCM_MBOX_CPU_SET));
+ writel(MBOX_BIT(1), PRCM_MBOX_CPU_SET);
wait_for_completion(&mb1_transfer.work);
if ((mb1_transfer.ack.header != header) ||
@@ -995,13 +1034,13 @@ int prcmu_release_usb_wakeup_state(void)
mutex_lock(&mb1_transfer.lock);
- while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(1))
+ while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(1))
cpu_relax();
writeb(MB1H_RELEASE_USB_WAKEUP,
(tcdm_base + PRCM_MBOX_HEADER_REQ_MB1));
- writel(MBOX_BIT(1), (_PRCMU_BASE + PRCM_MBOX_CPU_SET));
+ writel(MBOX_BIT(1), PRCM_MBOX_CPU_SET);
wait_for_completion(&mb1_transfer.work);
if ((mb1_transfer.ack.header != MB1H_RELEASE_USB_WAKEUP) ||
@@ -1013,15 +1052,169 @@ int prcmu_release_usb_wakeup_state(void)
return r;
}
+static int request_pll(u8 clock, bool enable)
+{
+ int r = 0;
+
+ if (clock == PRCMU_PLLSOC1)
+ clock = (enable ? PLL_SOC1_ON : PLL_SOC1_OFF);
+ else
+ return -EINVAL;
+
+ mutex_lock(&mb1_transfer.lock);
+
+ while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(1))
+ cpu_relax();
+
+ writeb(MB1H_PLL_ON_OFF, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB1));
+ writeb(clock, (tcdm_base + PRCM_REQ_MB1_PLL_ON_OFF));
+
+ writel(MBOX_BIT(1), PRCM_MBOX_CPU_SET);
+ wait_for_completion(&mb1_transfer.work);
+
+ if (mb1_transfer.ack.header != MB1H_PLL_ON_OFF)
+ r = -EIO;
+
+ mutex_unlock(&mb1_transfer.lock);
+
+ return r;
+}
+
/**
- * prcmu_set_epod - set the state of a EPOD (power domain)
+ * prcmu_set_hwacc - set the power state of a h/w accelerator
+ * @hwacc_dev: The hardware accelerator (enum hw_acc_dev).
+ * @state: The new power state (enum hw_acc_state).
+ *
+ * This function sets the power state of a hardware accelerator.
+ * This function should not be called from interrupt context.
+ *
+ * NOTE! Deprecated, to be removed when all users switched over to use the
+ * regulator framework API.
+ */
+int prcmu_set_hwacc(u16 hwacc_dev, u8 state)
+{
+ int r = 0;
+ bool ram_retention = false;
+ bool enable, enable_ret;
+
+ /* check argument */
+ BUG_ON(hwacc_dev >= NUM_HW_ACC);
+
+ /* get state of switches */
+ enable = hwacc_enabled[hwacc_dev];
+ enable_ret = hwacc_ret_enabled[hwacc_dev];
+
+ /* set flag if retention is possible */
+ switch (hwacc_dev) {
+ case HW_ACC_SVAMMDSP:
+ case HW_ACC_SIAMMDSP:
+ case HW_ACC_ESRAM1:
+ case HW_ACC_ESRAM2:
+ case HW_ACC_ESRAM3:
+ case HW_ACC_ESRAM4:
+ ram_retention = true;
+ break;
+ }
+
+ /* check argument */
+ BUG_ON(state > HW_ON);
+ BUG_ON(state == HW_OFF_RAMRET && !ram_retention);
+
+ /* modify enable flags */
+ switch (state) {
+ case HW_OFF:
+ enable_ret = false;
+ enable = false;
+ break;
+ case HW_ON:
+ enable = true;
+ break;
+ case HW_OFF_RAMRET:
+ enable_ret = true;
+ enable = false;
+ break;
+ }
+
+ /* get regulator (lazy) */
+ if (hwacc_regulator[hwacc_dev] == NULL) {
+ hwacc_regulator[hwacc_dev] = regulator_get(NULL,
+ hwacc_regulator_name[hwacc_dev]);
+ if (IS_ERR(hwacc_regulator[hwacc_dev])) {
+ pr_err("prcmu: failed to get supply %s\n",
+ hwacc_regulator_name[hwacc_dev]);
+ r = PTR_ERR(hwacc_regulator[hwacc_dev]);
+ goto out;
+ }
+ }
+
+ if (ram_retention) {
+ if (hwacc_ret_regulator[hwacc_dev] == NULL) {
+ hwacc_ret_regulator[hwacc_dev] = regulator_get(NULL,
+ hwacc_ret_regulator_name[hwacc_dev]);
+ if (IS_ERR(hwacc_ret_regulator[hwacc_dev])) {
+ pr_err("prcmu: failed to get supply %s\n",
+ hwacc_ret_regulator_name[hwacc_dev]);
+ r = PTR_ERR(hwacc_ret_regulator[hwacc_dev]);
+ goto out;
+ }
+ }
+ }
+
+ /* set regulators */
+ if (ram_retention) {
+ if (enable_ret && !hwacc_ret_enabled[hwacc_dev]) {
+ r = regulator_enable(hwacc_ret_regulator[hwacc_dev]);
+ if (r < 0) {
+ pr_err("prcmu_set_hwacc: ret enable failed\n");
+ goto out;
+ }
+ hwacc_ret_enabled[hwacc_dev] = true;
+ }
+ }
+
+ if (enable && !hwacc_enabled[hwacc_dev]) {
+ r = regulator_enable(hwacc_regulator[hwacc_dev]);
+ if (r < 0) {
+ pr_err("prcmu_set_hwacc: enable failed\n");
+ goto out;
+ }
+ hwacc_enabled[hwacc_dev] = true;
+ }
+
+ if (!enable && hwacc_enabled[hwacc_dev]) {
+ r = regulator_disable(hwacc_regulator[hwacc_dev]);
+ if (r < 0) {
+ pr_err("prcmu_set_hwacc: disable failed\n");
+ goto out;
+ }
+ hwacc_enabled[hwacc_dev] = false;
+ }
+
+ if (ram_retention) {
+ if (!enable_ret && hwacc_ret_enabled[hwacc_dev]) {
+ r = regulator_disable(hwacc_ret_regulator[hwacc_dev]);
+ if (r < 0) {
+ pr_err("prcmu_set_hwacc: ret disable failed\n");
+ goto out;
+ }
+ hwacc_ret_enabled[hwacc_dev] = false;
+ }
+ }
+
+out:
+ return r;
+}
+EXPORT_SYMBOL(prcmu_set_hwacc);
+
+/**
+ * db8500_prcmu_set_epod - set the state of a EPOD (power domain)
* @epod_id: The EPOD to set
* @epod_state: The new EPOD state
*
* This function sets the state of a EPOD (power domain). It may not be called
* from interrupt context.
*/
-int prcmu_set_epod(u16 epod_id, u8 epod_state)
+int db8500_prcmu_set_epod(u16 epod_id, u8 epod_state)
{
int r = 0;
bool ram_retention = false;
@@ -1048,7 +1241,7 @@ int prcmu_set_epod(u16 epod_id, u8 epod_state)
mutex_lock(&mb2_transfer.lock);
/* wait for mailbox */
- while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(2))
+ while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(2))
cpu_relax();
/* fill in mailbox */
@@ -1058,7 +1251,7 @@ int prcmu_set_epod(u16 epod_id, u8 epod_state)
writeb(MB2H_DPS, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB2));
- writel(MBOX_BIT(2), (_PRCMU_BASE + PRCM_MBOX_CPU_SET));
+ writel(MBOX_BIT(2), PRCM_MBOX_CPU_SET);
/*
* The current firmware version does not handle errors correctly,
@@ -1145,13 +1338,13 @@ static int request_sysclk(bool enable)
spin_lock_irqsave(&mb3_transfer.lock, flags);
- while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(3))
+ while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(3))
cpu_relax();
writeb((enable ? ON : OFF), (tcdm_base + PRCM_REQ_MB3_SYSCLK_MGT));
writeb(MB3H_SYSCLK, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB3));
- writel(MBOX_BIT(3), (_PRCMU_BASE + PRCM_MBOX_CPU_SET));
+ writel(MBOX_BIT(3), PRCM_MBOX_CPU_SET);
spin_unlock_irqrestore(&mb3_transfer.lock, flags);
@@ -1177,7 +1370,7 @@ static int request_timclk(bool enable)
if (!enable)
val |= PRCM_TCR_STOP_TIMERS;
- writel(val, (_PRCMU_BASE + PRCM_TCR));
+ writel(val, PRCM_TCR);
return 0;
}
@@ -1190,7 +1383,7 @@ static int request_reg_clock(u8 clock, bool enable)
spin_lock_irqsave(&clk_mgt_lock, flags);
/* Grab the HW semaphore. */
- while ((readl(_PRCMU_BASE + PRCM_SEM) & PRCM_SEM_PRCM_SEM) != 0)
+ while ((readl(PRCM_SEM) & PRCM_SEM_PRCM_SEM) != 0)
cpu_relax();
val = readl(_PRCMU_BASE + clk_mgt[clock].offset);
@@ -1203,34 +1396,61 @@ static int request_reg_clock(u8 clock, bool enable)
writel(val, (_PRCMU_BASE + clk_mgt[clock].offset));
/* Release the HW semaphore. */
- writel(0, (_PRCMU_BASE + PRCM_SEM));
+ writel(0, PRCM_SEM);
spin_unlock_irqrestore(&clk_mgt_lock, flags);
return 0;
}
+static int request_sga_clock(u8 clock, bool enable)
+{
+ u32 val;
+ int ret;
+
+ if (enable) {
+ val = readl(PRCM_CGATING_BYPASS);
+ writel(val | PRCM_CGATING_BYPASS_ICN2, PRCM_CGATING_BYPASS);
+ }
+
+ ret = request_reg_clock(clock, enable);
+
+ if (!ret && !enable) {
+ val = readl(PRCM_CGATING_BYPASS);
+ writel(val & ~PRCM_CGATING_BYPASS_ICN2, PRCM_CGATING_BYPASS);
+ }
+
+ return ret;
+}
+
/**
- * prcmu_request_clock() - Request for a clock to be enabled or disabled.
+ * db8500_prcmu_request_clock() - Request for a clock to be enabled or disabled.
* @clock: The clock for which the request is made.
* @enable: Whether the clock should be enabled (true) or disabled (false).
*
* This function should only be used by the clock implementation.
* Do not use it from any other place!
*/
-int prcmu_request_clock(u8 clock, bool enable)
+int db8500_prcmu_request_clock(u8 clock, bool enable)
{
- if (clock < PRCMU_NUM_REG_CLOCKS)
- return request_reg_clock(clock, enable);
- else if (clock == PRCMU_TIMCLK)
+ switch(clock) {
+ case PRCMU_SGACLK:
+ return request_sga_clock(clock, enable);
+ case PRCMU_TIMCLK:
return request_timclk(enable);
- else if (clock == PRCMU_SYSCLK)
+ case PRCMU_SYSCLK:
return request_sysclk(enable);
- else
- return -EINVAL;
+ case PRCMU_PLLSOC1:
+ return request_pll(clock, enable);
+ default:
+ break;
+ }
+ if (clock < PRCMU_NUM_REG_CLOCKS)
+ return request_reg_clock(clock, enable);
+ return -EINVAL;
}
-int prcmu_config_esram0_deep_sleep(u8 state)
+int db8500_prcmu_config_esram0_deep_sleep(u8 state)
{
if ((state > ESRAM0_DEEP_SLEEP_STATE_RET) ||
(state < ESRAM0_DEEP_SLEEP_STATE_OFF))
@@ -1238,7 +1458,7 @@ int prcmu_config_esram0_deep_sleep(u8 state)
mutex_lock(&mb4_transfer.lock);
- while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(4))
+ while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(4))
cpu_relax();
writeb(MB4H_MEM_ST, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB4));
@@ -1248,7 +1468,7 @@ int prcmu_config_esram0_deep_sleep(u8 state)
(tcdm_base + PRCM_REQ_MB4_DDR_ST_AP_DEEP_IDLE));
writeb(state, (tcdm_base + PRCM_REQ_MB4_ESRAM0_ST));
- writel(MBOX_BIT(4), (_PRCMU_BASE + PRCM_MBOX_CPU_SET));
+ writel(MBOX_BIT(4), PRCM_MBOX_CPU_SET);
wait_for_completion(&mb4_transfer.work);
mutex_unlock(&mb4_transfer.lock);
@@ -1260,13 +1480,13 @@ int prcmu_config_hotdog(u8 threshold)
{
mutex_lock(&mb4_transfer.lock);
- while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(4))
+ while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(4))
cpu_relax();
writeb(threshold, (tcdm_base + PRCM_REQ_MB4_HOTDOG_THRESHOLD));
writeb(MB4H_HOTDOG, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB4));
- writel(MBOX_BIT(4), (_PRCMU_BASE + PRCM_MBOX_CPU_SET));
+ writel(MBOX_BIT(4), PRCM_MBOX_CPU_SET);
wait_for_completion(&mb4_transfer.work);
mutex_unlock(&mb4_transfer.lock);
@@ -1278,7 +1498,7 @@ int prcmu_config_hotmon(u8 low, u8 high)
{
mutex_lock(&mb4_transfer.lock);
- while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(4))
+ while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(4))
cpu_relax();
writeb(low, (tcdm_base + PRCM_REQ_MB4_HOTMON_LOW));
@@ -1287,7 +1507,7 @@ int prcmu_config_hotmon(u8 low, u8 high)
(tcdm_base + PRCM_REQ_MB4_HOTMON_CONFIG));
writeb(MB4H_HOTMON, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB4));
- writel(MBOX_BIT(4), (_PRCMU_BASE + PRCM_MBOX_CPU_SET));
+ writel(MBOX_BIT(4), PRCM_MBOX_CPU_SET);
wait_for_completion(&mb4_transfer.work);
mutex_unlock(&mb4_transfer.lock);
@@ -1299,13 +1519,13 @@ static int config_hot_period(u16 val)
{
mutex_lock(&mb4_transfer.lock);
- while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(4))
+ while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(4))
cpu_relax();
writew(val, (tcdm_base + PRCM_REQ_MB4_HOT_PERIOD));
writeb(MB4H_HOT_PERIOD, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB4));
- writel(MBOX_BIT(4), (_PRCMU_BASE + PRCM_MBOX_CPU_SET));
+ writel(MBOX_BIT(4), PRCM_MBOX_CPU_SET);
wait_for_completion(&mb4_transfer.work);
mutex_unlock(&mb4_transfer.lock);
@@ -1326,6 +1546,78 @@ int prcmu_stop_temp_sense(void)
return config_hot_period(0xFFFF);
}
+static int prcmu_a9wdog(u8 cmd, u8 d0, u8 d1, u8 d2, u8 d3)
+{
+
+ mutex_lock(&mb4_transfer.lock);
+
+ while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(4))
+ cpu_relax();
+
+ writeb(d0, (tcdm_base + PRCM_REQ_MB4_A9WDOG_0));
+ writeb(d1, (tcdm_base + PRCM_REQ_MB4_A9WDOG_1));
+ writeb(d2, (tcdm_base + PRCM_REQ_MB4_A9WDOG_2));
+ writeb(d3, (tcdm_base + PRCM_REQ_MB4_A9WDOG_3));
+
+ writeb(cmd, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB4));
+
+ writel(MBOX_BIT(4), PRCM_MBOX_CPU_SET);
+ wait_for_completion(&mb4_transfer.work);
+
+ mutex_unlock(&mb4_transfer.lock);
+
+ return 0;
+
+}
+
+int prcmu_config_a9wdog(u8 num, bool sleep_auto_off)
+{
+ BUG_ON(num == 0 || num > 0xf);
+ return prcmu_a9wdog(MB4H_A9WDOG_CONF, num, 0, 0,
+ sleep_auto_off ? A9WDOG_AUTO_OFF_EN :
+ A9WDOG_AUTO_OFF_DIS);
+}
+
+int prcmu_enable_a9wdog(u8 id)
+{
+ return prcmu_a9wdog(MB4H_A9WDOG_EN, id, 0, 0, 0);
+}
+
+int prcmu_disable_a9wdog(u8 id)
+{
+ return prcmu_a9wdog(MB4H_A9WDOG_DIS, id, 0, 0, 0);
+}
+
+int prcmu_kick_a9wdog(u8 id)
+{
+ return prcmu_a9wdog(MB4H_A9WDOG_KICK, id, 0, 0, 0);
+}
+
+/*
+ * timeout is 28 bit, in ms.
+ */
+#define MAX_WATCHDOG_TIMEOUT 131000
+int prcmu_load_a9wdog(u8 id, u32 timeout)
+{
+ if (timeout > MAX_WATCHDOG_TIMEOUT)
+ /*
+ * Due to calculation bug in prcmu fw, timeouts
+ * can't be bigger than 131 seconds.
+ */
+ return -EINVAL;
+
+ return prcmu_a9wdog(MB4H_A9WDOG_LOAD,
+ (id & A9WDOG_ID_MASK) |
+ /*
+ * Put the lowest 28 bits of timeout at
+ * offset 4. Four first bits are used for id.
+ */
+ (u8)((timeout << 4) & 0xf0),
+ (u8)((timeout >> 4) & 0xff),
+ (u8)((timeout >> 12) & 0xff),
+ (u8)((timeout >> 20) & 0xff));
+}
+
/**
* prcmu_set_clock_divider() - Configure the clock divider.
* @clock: The clock for which the request is made.
@@ -1345,7 +1637,7 @@ int prcmu_set_clock_divider(u8 clock, u8 divider)
spin_lock_irqsave(&clk_mgt_lock, flags);
/* Grab the HW semaphore. */
- while ((readl(_PRCMU_BASE + PRCM_SEM) & PRCM_SEM_PRCM_SEM) != 0)
+ while ((readl(PRCM_SEM) & PRCM_SEM_PRCM_SEM) != 0)
cpu_relax();
val = readl(_PRCMU_BASE + clk_mgt[clock].offset);
@@ -1354,7 +1646,7 @@ int prcmu_set_clock_divider(u8 clock, u8 divider)
writel(val, (_PRCMU_BASE + clk_mgt[clock].offset));
/* Release the HW semaphore. */
- writel(0, (_PRCMU_BASE + PRCM_SEM));
+ writel(0, PRCM_SEM);
spin_unlock_irqrestore(&clk_mgt_lock, flags);
@@ -1380,7 +1672,7 @@ int prcmu_abb_read(u8 slave, u8 reg, u8 *value, u8 size)
mutex_lock(&mb5_transfer.lock);
- while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(5))
+ while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(5))
cpu_relax();
writeb(PRCMU_I2C_READ(slave), (tcdm_base + PRCM_REQ_MB5_I2C_SLAVE_OP));
@@ -1388,7 +1680,7 @@ int prcmu_abb_read(u8 slave, u8 reg, u8 *value, u8 size)
writeb(reg, (tcdm_base + PRCM_REQ_MB5_I2C_REG));
writeb(0, (tcdm_base + PRCM_REQ_MB5_I2C_VAL));
- writel(MBOX_BIT(5), (_PRCMU_BASE + PRCM_MBOX_CPU_SET));
+ writel(MBOX_BIT(5), PRCM_MBOX_CPU_SET);
if (!wait_for_completion_timeout(&mb5_transfer.work,
msecs_to_jiffies(20000))) {
@@ -1426,7 +1718,7 @@ int prcmu_abb_write(u8 slave, u8 reg, u8 *value, u8 size)
mutex_lock(&mb5_transfer.lock);
- while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(5))
+ while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(5))
cpu_relax();
writeb(PRCMU_I2C_WRITE(slave), (tcdm_base + PRCM_REQ_MB5_I2C_SLAVE_OP));
@@ -1434,7 +1726,7 @@ int prcmu_abb_write(u8 slave, u8 reg, u8 *value, u8 size)
writeb(reg, (tcdm_base + PRCM_REQ_MB5_I2C_REG));
writeb(*value, (tcdm_base + PRCM_REQ_MB5_I2C_VAL));
- writel(MBOX_BIT(5), (_PRCMU_BASE + PRCM_MBOX_CPU_SET));
+ writel(MBOX_BIT(5), PRCM_MBOX_CPU_SET);
if (!wait_for_completion_timeout(&mb5_transfer.work,
msecs_to_jiffies(20000))) {
@@ -1456,21 +1748,44 @@ int prcmu_abb_write(u8 slave, u8 reg, u8 *value, u8 size)
void prcmu_ac_wake_req(void)
{
u32 val;
+ u32 status;
mutex_lock(&mb0_transfer.ac_wake_lock);
- val = readl(_PRCMU_BASE + PRCM_HOSTACCESS_REQ);
+ val = readl(PRCM_HOSTACCESS_REQ);
if (val & PRCM_HOSTACCESS_REQ_HOSTACCESS_REQ)
goto unlock_and_return;
atomic_set(&ac_wake_req_state, 1);
- writel((val | PRCM_HOSTACCESS_REQ_HOSTACCESS_REQ),
- (_PRCMU_BASE + PRCM_HOSTACCESS_REQ));
+retry:
+ writel((val | PRCM_HOSTACCESS_REQ_HOSTACCESS_REQ), PRCM_HOSTACCESS_REQ);
if (!wait_for_completion_timeout(&mb0_transfer.ac_wake_work,
- msecs_to_jiffies(20000))) {
- pr_err("prcmu: %s timed out (20 s) waiting for a reply.\n",
+ msecs_to_jiffies(5000))) {
+ pr_crit("prcmu: %s timed out (5 s) waiting for a reply.\n",
+ __func__);
+ goto unlock_and_return;
+ }
+
+ /*
+ * The modem can generate an AC_WAKE_ACK, and then still go to sleep.
+ * As a workaround, we wait, and then check that the modem is indeed
+ * awake (in terms of the value of the PRCM_MOD_AWAKE_STATUS
+ * register, which may not be the whole truth).
+ */
+ udelay(400);
+ status = (readl(PRCM_MOD_AWAKE_STATUS) & BITS(0, 2));
+ if (status != (PRCM_MOD_AWAKE_STATUS_PRCM_MOD_AAPD_AWAKE |
+ PRCM_MOD_AWAKE_STATUS_PRCM_MOD_COREPD_AWAKE)) {
+ pr_err("prcmu: %s received ack, but modem not awake (0x%X).\n",
+ __func__, status);
+ udelay(1200);
+ writel(val, PRCM_HOSTACCESS_REQ);
+ if (wait_for_completion_timeout(&mb0_transfer.ac_wake_work,
+ msecs_to_jiffies(5000)))
+ goto retry;
+ pr_crit("prcmu: %s timed out (5 s) waiting for AC_SLEEP_ACK.\n",
__func__);
}
@@ -1487,16 +1802,16 @@ void prcmu_ac_sleep_req()
mutex_lock(&mb0_transfer.ac_wake_lock);
- val = readl(_PRCMU_BASE + PRCM_HOSTACCESS_REQ);
+ val = readl(PRCM_HOSTACCESS_REQ);
if (!(val & PRCM_HOSTACCESS_REQ_HOSTACCESS_REQ))
goto unlock_and_return;
writel((val & ~PRCM_HOSTACCESS_REQ_HOSTACCESS_REQ),
- (_PRCMU_BASE + PRCM_HOSTACCESS_REQ));
+ PRCM_HOSTACCESS_REQ);
if (!wait_for_completion_timeout(&mb0_transfer.ac_wake_work,
- msecs_to_jiffies(20000))) {
- pr_err("prcmu: %s timed out (20 s) waiting for a reply.\n",
+ msecs_to_jiffies(5000))) {
+ pr_crit("prcmu: %s timed out (5 s) waiting for a reply.\n",
__func__);
}
@@ -1506,21 +1821,32 @@ unlock_and_return:
mutex_unlock(&mb0_transfer.ac_wake_lock);
}
-bool prcmu_is_ac_wake_requested(void)
+bool db8500_prcmu_is_ac_wake_requested(void)
{
return (atomic_read(&ac_wake_req_state) != 0);
}
/**
- * prcmu_system_reset - System reset
+ * db8500_prcmu_system_reset - System reset
*
- * Saves the reset reason code and then sets the APE_SOFRST register which
+ * Saves the reset reason code and then sets the APE_SOFTRST register which
* fires interrupt to fw
*/
-void prcmu_system_reset(u16 reset_code)
+void db8500_prcmu_system_reset(u16 reset_code)
{
writew(reset_code, (tcdm_base + PRCM_SW_RST_REASON));
- writel(1, (_PRCMU_BASE + PRCM_APE_SOFTRST));
+ writel(1, PRCM_APE_SOFTRST);
+}
+
+/**
+ * db8500_prcmu_get_reset_code - Retrieve SW reset reason code
+ *
+ * Retrieves the reset reason code stored by prcmu_system_reset() before
+ * last restart.
+ */
+u16 db8500_prcmu_get_reset_code(void)
+{
+ return readw(tcdm_base + PRCM_SW_RST_REASON);
}
/**
@@ -1530,11 +1856,11 @@ void prcmu_modem_reset(void)
{
mutex_lock(&mb1_transfer.lock);
- while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(1))
+ while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(1))
cpu_relax();
writeb(MB1H_RESET_MODEM, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB1));
- writel(MBOX_BIT(1), (_PRCMU_BASE + PRCM_MBOX_CPU_SET));
+ writel(MBOX_BIT(1), PRCM_MBOX_CPU_SET);
wait_for_completion(&mb1_transfer.work);
/*
@@ -1551,11 +1877,11 @@ static void ack_dbb_wakeup(void)
spin_lock_irqsave(&mb0_transfer.lock, flags);
- while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(0))
+ while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(0))
cpu_relax();
writeb(MB0H_READ_WAKEUP_ACK, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB0));
- writel(MBOX_BIT(0), (_PRCMU_BASE + PRCM_MBOX_CPU_SET));
+ writel(MBOX_BIT(0), PRCM_MBOX_CPU_SET);
spin_unlock_irqrestore(&mb0_transfer.lock, flags);
}
@@ -1600,7 +1926,7 @@ static bool read_mailbox_0(void)
r = false;
break;
}
- writel(MBOX_BIT(0), (_PRCMU_BASE + PRCM_ARM_IT1_CLR));
+ writel(MBOX_BIT(0), PRCM_ARM_IT1_CLR);
return r;
}
@@ -1613,7 +1939,7 @@ static bool read_mailbox_1(void)
PRCM_ACK_MB1_CURRENT_APE_OPP);
mb1_transfer.ack.ape_voltage_status = readb(tcdm_base +
PRCM_ACK_MB1_APE_VOLTAGE_STATUS);
- writel(MBOX_BIT(1), (_PRCMU_BASE + PRCM_ARM_IT1_CLR));
+ writel(MBOX_BIT(1), PRCM_ARM_IT1_CLR);
complete(&mb1_transfer.work);
return false;
}
@@ -1621,14 +1947,14 @@ static bool read_mailbox_1(void)
static bool read_mailbox_2(void)
{
mb2_transfer.ack.status = readb(tcdm_base + PRCM_ACK_MB2_DPS_STATUS);
- writel(MBOX_BIT(2), (_PRCMU_BASE + PRCM_ARM_IT1_CLR));
+ writel(MBOX_BIT(2), PRCM_ARM_IT1_CLR);
complete(&mb2_transfer.work);
return false;
}
static bool read_mailbox_3(void)
{
- writel(MBOX_BIT(3), (_PRCMU_BASE + PRCM_ARM_IT1_CLR));
+ writel(MBOX_BIT(3), PRCM_ARM_IT1_CLR);
return false;
}
@@ -1643,6 +1969,11 @@ static bool read_mailbox_4(void)
case MB4H_HOTDOG:
case MB4H_HOTMON:
case MB4H_HOT_PERIOD:
+ case MB4H_A9WDOG_CONF:
+ case MB4H_A9WDOG_EN:
+ case MB4H_A9WDOG_DIS:
+ case MB4H_A9WDOG_LOAD:
+ case MB4H_A9WDOG_KICK:
break;
default:
print_unknown_header_warning(4, header);
@@ -1650,7 +1981,7 @@ static bool read_mailbox_4(void)
break;
}
- writel(MBOX_BIT(4), (_PRCMU_BASE + PRCM_ARM_IT1_CLR));
+ writel(MBOX_BIT(4), PRCM_ARM_IT1_CLR);
if (do_complete)
complete(&mb4_transfer.work);
@@ -1662,20 +1993,20 @@ static bool read_mailbox_5(void)
{
mb5_transfer.ack.status = readb(tcdm_base + PRCM_ACK_MB5_I2C_STATUS);
mb5_transfer.ack.value = readb(tcdm_base + PRCM_ACK_MB5_I2C_VAL);
- writel(MBOX_BIT(5), (_PRCMU_BASE + PRCM_ARM_IT1_CLR));
+ writel(MBOX_BIT(5), PRCM_ARM_IT1_CLR);
complete(&mb5_transfer.work);
return false;
}
static bool read_mailbox_6(void)
{
- writel(MBOX_BIT(6), (_PRCMU_BASE + PRCM_ARM_IT1_CLR));
+ writel(MBOX_BIT(6), PRCM_ARM_IT1_CLR);
return false;
}
static bool read_mailbox_7(void)
{
- writel(MBOX_BIT(7), (_PRCMU_BASE + PRCM_ARM_IT1_CLR));
+ writel(MBOX_BIT(7), PRCM_ARM_IT1_CLR);
return false;
}
@@ -1696,7 +2027,7 @@ static irqreturn_t prcmu_irq_handler(int irq, void *data)
u8 n;
irqreturn_t r;
- bits = (readl(_PRCMU_BASE + PRCM_ARM_IT1_VAL) & ALL_MBOX_BITS);
+ bits = (readl(PRCM_ARM_IT1_VAL) & ALL_MBOX_BITS);
if (unlikely(!bits))
return IRQ_NONE;
@@ -1768,7 +2099,7 @@ static struct irq_chip prcmu_irq_chip = {
.irq_unmask = prcmu_irq_unmask,
};
-void __init prcmu_early_init(void)
+void __init db8500_prcmu_early_init(void)
{
unsigned int i;
@@ -1826,6 +2157,16 @@ void __init prcmu_early_init(void)
}
}
+static void __init db8500_prcmu_init_clkforce(void)
+{
+ u32 val;
+
+ val = readl(PRCM_A9PL_FORCE_CLKEN);
+ val &= ~(PRCM_A9PL_FORCE_CLKEN_PRCM_A9PL_FORCE_CLKEN |
+ PRCM_A9PL_FORCE_CLKEN_PRCM_A9AXI_FORCE_CLKEN);
+ writel(val, (PRCM_A9PL_FORCE_CLKEN));
+}
+
/*
* Power domain switches (ePODs) modeled as regulators for the DB8500 SoC
*/
@@ -1861,7 +2202,42 @@ static struct regulator_consumer_supply db8500_vsmps2_consumers[] = {
static struct regulator_consumer_supply db8500_b2r2_mcde_consumers[] = {
REGULATOR_SUPPLY("vsupply", "b2r2.0"),
- REGULATOR_SUPPLY("vsupply", "mcde.0"),
+ REGULATOR_SUPPLY("vsupply", "mcde"),
+};
+
+/* SVA MMDSP regulator switch */
+static struct regulator_consumer_supply db8500_svammdsp_consumers[] = {
+ REGULATOR_SUPPLY("sva-mmdsp", "cm_control"),
+};
+
+/* SVA pipe regulator switch */
+static struct regulator_consumer_supply db8500_svapipe_consumers[] = {
+ REGULATOR_SUPPLY("sva-pipe", "cm_control"),
+};
+
+/* SIA MMDSP regulator switch */
+static struct regulator_consumer_supply db8500_siammdsp_consumers[] = {
+ REGULATOR_SUPPLY("sia-mmdsp", "cm_control"),
+};
+
+/* SIA pipe regulator switch */
+static struct regulator_consumer_supply db8500_siapipe_consumers[] = {
+ REGULATOR_SUPPLY("sia-pipe", "cm_control"),
+};
+
+static struct regulator_consumer_supply db8500_sga_consumers[] = {
+ REGULATOR_SUPPLY("v-mali", NULL),
+};
+
+/* ESRAM1 and 2 regulator switch */
+static struct regulator_consumer_supply db8500_esram12_consumers[] = {
+ REGULATOR_SUPPLY("esram12", "cm_control"),
+};
+
+/* ESRAM3 and 4 regulator switch */
+static struct regulator_consumer_supply db8500_esram34_consumers[] = {
+ REGULATOR_SUPPLY("v-esram34", "mcde"),
+ REGULATOR_SUPPLY("esram34", "cm_control"),
};
static struct regulator_init_data db8500_regulators[DB8500_NUM_REGULATORS] = {
@@ -1923,6 +2299,8 @@ static struct regulator_init_data db8500_regulators[DB8500_NUM_REGULATORS] = {
.name = "db8500-sva-mmdsp",
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
},
+ .consumer_supplies = db8500_svammdsp_consumers,
+ .num_consumer_supplies = ARRAY_SIZE(db8500_svammdsp_consumers),
},
[DB8500_REGULATOR_SWITCH_SVAMMDSPRET] = {
.constraints = {
@@ -1937,6 +2315,8 @@ static struct regulator_init_data db8500_regulators[DB8500_NUM_REGULATORS] = {
.name = "db8500-sva-pipe",
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
},
+ .consumer_supplies = db8500_svapipe_consumers,
+ .num_consumer_supplies = ARRAY_SIZE(db8500_svapipe_consumers),
},
[DB8500_REGULATOR_SWITCH_SIAMMDSP] = {
.supply_regulator = "db8500-vape",
@@ -1944,6 +2324,8 @@ static struct regulator_init_data db8500_regulators[DB8500_NUM_REGULATORS] = {
.name = "db8500-sia-mmdsp",
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
},
+ .consumer_supplies = db8500_siammdsp_consumers,
+ .num_consumer_supplies = ARRAY_SIZE(db8500_siammdsp_consumers),
},
[DB8500_REGULATOR_SWITCH_SIAMMDSPRET] = {
.constraints = {
@@ -1957,6 +2339,8 @@ static struct regulator_init_data db8500_regulators[DB8500_NUM_REGULATORS] = {
.name = "db8500-sia-pipe",
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
},
+ .consumer_supplies = db8500_siapipe_consumers,
+ .num_consumer_supplies = ARRAY_SIZE(db8500_siapipe_consumers),
},
[DB8500_REGULATOR_SWITCH_SGA] = {
.supply_regulator = "db8500-vape",
@@ -1964,6 +2348,9 @@ static struct regulator_init_data db8500_regulators[DB8500_NUM_REGULATORS] = {
.name = "db8500-sga",
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
},
+ .consumer_supplies = db8500_sga_consumers,
+ .num_consumer_supplies = ARRAY_SIZE(db8500_sga_consumers),
+
},
[DB8500_REGULATOR_SWITCH_B2R2_MCDE] = {
.supply_regulator = "db8500-vape",
@@ -1980,6 +2367,8 @@ static struct regulator_init_data db8500_regulators[DB8500_NUM_REGULATORS] = {
.name = "db8500-esram12",
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
},
+ .consumer_supplies = db8500_esram12_consumers,
+ .num_consumer_supplies = ARRAY_SIZE(db8500_esram12_consumers),
},
[DB8500_REGULATOR_SWITCH_ESRAM12RET] = {
.constraints = {
@@ -1993,6 +2382,8 @@ static struct regulator_init_data db8500_regulators[DB8500_NUM_REGULATORS] = {
.name = "db8500-esram34",
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
},
+ .consumer_supplies = db8500_esram34_consumers,
+ .num_consumer_supplies = ARRAY_SIZE(db8500_esram34_consumers),
},
[DB8500_REGULATOR_SWITCH_ESRAM34RET] = {
.constraints = {
@@ -2024,8 +2415,10 @@ static int __init db8500_prcmu_probe(struct platform_device *pdev)
if (ux500_is_svp())
return -ENODEV;
+ db8500_prcmu_init_clkforce();
+
/* Clean up the mailbox interrupts after pre-kernel code. */
- writel(ALL_MBOX_BITS, (_PRCMU_BASE + PRCM_ARM_IT1_CLR));
+ writel(ALL_MBOX_BITS, PRCM_ARM_IT1_CLR);
err = request_threaded_irq(IRQ_DB8500_PRCMU1, prcmu_irq_handler,
prcmu_irq_thread_fn, IRQF_NO_SUSPEND, "prcmu", NULL);
diff --git a/drivers/mfd/db5500-prcmu-regs.h b/drivers/mfd/dbx500-prcmu-regs.h
index 9a8e9e4ddd3..ec22e9f15d3 100644
--- a/drivers/mfd/db5500-prcmu-regs.h
+++ b/drivers/mfd/dbx500-prcmu-regs.h
@@ -10,11 +10,49 @@
* PRCM Unit registers
*/
-#ifndef __MACH_PRCMU_REGS_H
-#define __MACH_PRCMU_REGS_H
+#ifndef __DB8500_PRCMU_REGS_H
+#define __DB8500_PRCMU_REGS_H
#include <mach/hardware.h>
+#define BITS(_start, _end) ((BIT(_end) - BIT(_start)) + BIT(_end))
+
+#define PRCM_SVACLK_MGT_OFF 0x008
+#define PRCM_SIACLK_MGT_OFF 0x00C
+#define PRCM_SGACLK_MGT_OFF 0x014
+#define PRCM_UARTCLK_MGT_OFF 0x018
+#define PRCM_MSP02CLK_MGT_OFF 0x01C
+#define PRCM_I2CCLK_MGT_OFF 0x020
+#define PRCM_SDMMCCLK_MGT_OFF 0x024
+#define PRCM_SLIMCLK_MGT_OFF 0x028
+#define PRCM_PER1CLK_MGT_OFF 0x02C
+#define PRCM_PER2CLK_MGT_OFF 0x030
+#define PRCM_PER3CLK_MGT_OFF 0x034
+#define PRCM_PER5CLK_MGT_OFF 0x038
+#define PRCM_PER6CLK_MGT_OFF 0x03C
+#define PRCM_PER7CLK_MGT_OFF 0x040
+#define PRCM_PWMCLK_MGT_OFF 0x044 /* for DB5500 */
+#define PRCM_IRDACLK_MGT_OFF 0x048 /* for DB5500 */
+#define PRCM_IRRCCLK_MGT_OFF 0x04C /* for DB5500 */
+#define PRCM_LCDCLK_MGT_OFF 0x044
+#define PRCM_BMLCLK_MGT_OFF 0x04C
+#define PRCM_HSITXCLK_MGT_OFF 0x050
+#define PRCM_HSIRXCLK_MGT_OFF 0x054
+#define PRCM_HDMICLK_MGT_OFF 0x058
+#define PRCM_APEATCLK_MGT_OFF 0x05C
+#define PRCM_APETRACECLK_MGT_OFF 0x060
+#define PRCM_MCDECLK_MGT_OFF 0x064
+#define PRCM_IPI2CCLK_MGT_OFF 0x068
+#define PRCM_DSIALTCLK_MGT_OFF 0x06C
+#define PRCM_DMACLK_MGT_OFF 0x074
+#define PRCM_B2R2CLK_MGT_OFF 0x078
+#define PRCM_TVCLK_MGT_OFF 0x07C
+#define PRCM_UNIPROCLK_MGT_OFF 0x278
+#define PRCM_SSPCLK_MGT_OFF 0x280
+#define PRCM_RNGCLK_MGT_OFF 0x284
+#define PRCM_UICCCLK_MGT_OFF 0x27C
+#define PRCM_MSP1CLK_MGT_OFF 0x288
+
#define PRCM_ARM_PLLDIVPS (_PRCMU_BASE + 0x118)
#define PRCM_ARM_PLLDIVPS_ARM_BRM_RATE 0x3f
#define PRCM_ARM_PLLDIVPS_MAX_MASK 0xf
@@ -30,11 +68,15 @@
#define PRCM_PLLARM_ENABLE_PRCM_PLLARM_COUNTON 0x100
#define PRCM_ARMCLKFIX_MGT (_PRCMU_BASE + 0x0)
+#define PRCM_A9PL_FORCE_CLKEN (_PRCMU_BASE + 0x19C)
#define PRCM_A9_RESETN_CLR (_PRCMU_BASE + 0x1f4)
#define PRCM_A9_RESETN_SET (_PRCMU_BASE + 0x1f0)
#define PRCM_ARM_LS_CLAMP (_PRCMU_BASE + 0x30c)
#define PRCM_SRAM_A9 (_PRCMU_BASE + 0x308)
+#define PRCM_A9PL_FORCE_CLKEN_PRCM_A9PL_FORCE_CLKEN BIT(0)
+#define PRCM_A9PL_FORCE_CLKEN_PRCM_A9AXI_FORCE_CLKEN BIT(1)
+
/* ARM WFI Standby signal register */
#define PRCM_ARM_WFI_STANDBY (_PRCMU_BASE + 0x130)
#define PRCM_IOCR (_PRCMU_BASE + 0x310)
@@ -61,12 +103,18 @@
#define PRCM_ARMITVAL127TO96 (_PRCMU_BASE + 0x26C)
#define PRCM_HOSTACCESS_REQ (_PRCMU_BASE + 0x334)
+#define PRCM_HOSTACCESS_REQ_HOSTACCESS_REQ 0x1
#define ARM_WAKEUP_MODEM 0x1
-#define PRCM_ARM_IT1_CLEAR (_PRCMU_BASE + 0x48C)
+#define PRCM_ARM_IT1_CLR (_PRCMU_BASE + 0x48C)
#define PRCM_ARM_IT1_VAL (_PRCMU_BASE + 0x494)
#define PRCM_HOLD_EVT (_PRCMU_BASE + 0x174)
+#define PRCM_MOD_AWAKE_STATUS (_PRCMU_BASE + 0x4A0)
+#define PRCM_MOD_AWAKE_STATUS_PRCM_MOD_COREPD_AWAKE BIT(0)
+#define PRCM_MOD_AWAKE_STATUS_PRCM_MOD_AAPD_AWAKE BIT(1)
+#define PRCM_MOD_AWAKE_STATUS_PRCM_MOD_VMODEM_OFF_ISO BIT(2)
+
#define PRCM_ITSTATUS0 (_PRCMU_BASE + 0x148)
#define PRCM_ITSTATUS1 (_PRCMU_BASE + 0x150)
#define PRCM_ITSTATUS2 (_PRCMU_BASE + 0x158)
@@ -87,16 +135,21 @@
#define PRCM_PLLDSI_FREQ (_PRCMU_BASE + 0x500)
#define PRCM_PLLDSI_ENABLE (_PRCMU_BASE + 0x504)
#define PRCM_PLLDSI_LOCKP (_PRCMU_BASE + 0x508)
-#define PRCM_LCDCLK_MGT (_PRCMU_BASE + 0x044)
-#define PRCM_MCDECLK_MGT (_PRCMU_BASE + 0x064)
-#define PRCM_HDMICLK_MGT (_PRCMU_BASE + 0x058)
-#define PRCM_TVCLK_MGT (_PRCMU_BASE + 0x07c)
+#define PRCM_LCDCLK_MGT (_PRCMU_BASE + PRCM_LCDCLK_MGT_OFF)
+#define PRCM_MCDECLK_MGT (_PRCMU_BASE + PRCM_MCDECLK_MGT_OFF)
+#define PRCM_HDMICLK_MGT (_PRCMU_BASE + PRCM_HDMICLK_MGT_OFF)
+#define PRCM_TVCLK_MGT (_PRCMU_BASE + PRCM_TVCLK_MGT_OFF)
#define PRCM_DSI_PLLOUT_SEL (_PRCMU_BASE + 0x530)
#define PRCM_DSITVCLK_DIV (_PRCMU_BASE + 0x52C)
#define PRCM_PLLDSI_LOCKP (_PRCMU_BASE + 0x508)
#define PRCM_APE_RESETN_SET (_PRCMU_BASE + 0x1E4)
#define PRCM_APE_RESETN_CLR (_PRCMU_BASE + 0x1E8)
+
#define PRCM_CLKOCR (_PRCMU_BASE + 0x1CC)
+#define PRCM_CLKOCR_CLKOUT0_REF_CLK (1 << 0)
+#define PRCM_CLKOCR_CLKOUT0_MASK BITS(0, 13)
+#define PRCM_CLKOCR_CLKOUT1_REF_CLK (1 << 16)
+#define PRCM_CLKOCR_CLKOUT1_MASK BITS(16, 29)
/* ePOD and memory power signal control registers */
#define PRCM_EPOD_C_SET (_PRCMU_BASE + 0x410)
@@ -111,5 +164,41 @@
#define PRCM_GPIOCR_DBG_STM_MOD_CMD1 0x800
#define PRCM_GPIOCR_DBG_UARTMOD_CMD0 0x1
+/* PRCMU HW semaphore */
+#define PRCM_SEM (_PRCMU_BASE + 0x400)
+#define PRCM_SEM_PRCM_SEM BIT(0)
+
+#define PRCM_TCR (_PRCMU_BASE + 0x1C8)
+#define PRCM_TCR_TENSEL_MASK BITS(0, 7)
+#define PRCM_TCR_STOP_TIMERS BIT(16)
+#define PRCM_TCR_DOZE_MODE BIT(17)
+
+#define PRCM_CLKOCR_CLKODIV0_SHIFT 0
+#define PRCM_CLKOCR_CLKODIV0_MASK BITS(0, 5)
+#define PRCM_CLKOCR_CLKOSEL0_SHIFT 6
+#define PRCM_CLKOCR_CLKOSEL0_MASK BITS(6, 8)
+#define PRCM_CLKOCR_CLKODIV1_SHIFT 16
+#define PRCM_CLKOCR_CLKODIV1_MASK BITS(16, 21)
+#define PRCM_CLKOCR_CLKOSEL1_SHIFT 22
+#define PRCM_CLKOCR_CLKOSEL1_MASK BITS(22, 24)
+#define PRCM_CLKOCR_CLK1TYPE BIT(28)
+
+#define PRCM_CLK_MGT_CLKPLLDIV_MASK BITS(0, 4)
+#define PRCM_CLK_MGT_CLKPLLSW_MASK BITS(5, 7)
+#define PRCM_CLK_MGT_CLKEN BIT(8)
+
+/* GPIOCR register */
+#define PRCM_GPIOCR_SPI2_SELECT BIT(23)
+
+#define PRCM_DDR_SUBSYS_APE_MINBW (_PRCMU_BASE + 0x438)
+#define PRCM_CGATING_BYPASS (_PRCMU_BASE + 0x134)
+#define PRCM_CGATING_BYPASS_ICN2 BIT(6)
+
+/* Miscellaneous unit registers */
+#define PRCM_RESOUTN_SET (_PRCMU_BASE + 0x214)
+#define PRCM_RESOUTN_CLR (_PRCMU_BASE + 0x218)
+
+/* System reset register */
+#define PRCM_APE_SOFTRST (_PRCMU_BASE + 0x228)
-#endif /* __MACH_PRCMU__REGS_H */
+#endif /* __DB8500_PRCMU_REGS_H */
diff --git a/drivers/mfd/dm355evm_msp.c b/drivers/mfd/dm355evm_msp.c
index 3d4a861976c..8ad88da647b 100644
--- a/drivers/mfd/dm355evm_msp.c
+++ b/drivers/mfd/dm355evm_msp.c
@@ -13,6 +13,7 @@
#include <linux/mutex.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
+#include <linux/module.h>
#include <linux/err.h>
#include <linux/gpio.h>
#include <linux/leds.h>
diff --git a/drivers/mfd/intel_msic.c b/drivers/mfd/intel_msic.c
new file mode 100644
index 00000000000..97c27762174
--- /dev/null
+++ b/drivers/mfd/intel_msic.c
@@ -0,0 +1,502 @@
+/*
+ * Driver for Intel MSIC
+ *
+ * Copyright (C) 2011, Intel Corporation
+ * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/gpio.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/intel_msic.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <asm/intel_scu_ipc.h>
+
+#define MSIC_VENDOR(id) ((id >> 6) & 3)
+#define MSIC_VERSION(id) (id & 0x3f)
+#define MSIC_MAJOR(id) ('A' + ((id >> 3) & 7))
+#define MSIC_MINOR(id) (id & 7)
+
+/*
+ * MSIC interrupt tree is readable from SRAM at INTEL_MSIC_IRQ_PHYS_BASE.
+ * Since IRQ block starts from address 0x002 we need to substract that from
+ * the actual IRQ status register address.
+ */
+#define MSIC_IRQ_STATUS(x) (INTEL_MSIC_IRQ_PHYS_BASE + ((x) - 2))
+#define MSIC_IRQ_STATUS_ACCDET MSIC_IRQ_STATUS(INTEL_MSIC_ACCDET)
+
+/*
+ * The SCU hardware has limitation of 16 bytes per read/write buffer on
+ * Medfield.
+ */
+#define SCU_IPC_RWBUF_LIMIT 16
+
+/**
+ * struct intel_msic - an MSIC MFD instance
+ * @pdev: pointer to the platform device
+ * @vendor: vendor ID
+ * @version: chip version
+ * @irq_base: base address of the mapped MSIC SRAM interrupt tree
+ */
+struct intel_msic {
+ struct platform_device *pdev;
+ unsigned vendor;
+ unsigned version;
+ void __iomem *irq_base;
+};
+
+static struct resource msic_touch_resources[] = {
+ {
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct resource msic_adc_resources[] = {
+ {
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct resource msic_battery_resources[] = {
+ {
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct resource msic_gpio_resources[] = {
+ {
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct resource msic_audio_resources[] = {
+ {
+ .name = "IRQ",
+ .flags = IORESOURCE_IRQ,
+ },
+ /*
+ * We will pass IRQ_BASE to the driver now but this can be removed
+ * when/if the driver starts to use intel_msic_irq_read().
+ */
+ {
+ .name = "IRQ_BASE",
+ .flags = IORESOURCE_MEM,
+ .start = MSIC_IRQ_STATUS_ACCDET,
+ .end = MSIC_IRQ_STATUS_ACCDET,
+ },
+};
+
+static struct resource msic_hdmi_resources[] = {
+ {
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct resource msic_thermal_resources[] = {
+ {
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct resource msic_power_btn_resources[] = {
+ {
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct resource msic_ocd_resources[] = {
+ {
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+/*
+ * Devices that are part of the MSIC and are available via firmware
+ * populated SFI DEVS table.
+ */
+static struct mfd_cell msic_devs[] = {
+ [INTEL_MSIC_BLOCK_TOUCH] = {
+ .name = "msic_touch",
+ .num_resources = ARRAY_SIZE(msic_touch_resources),
+ .resources = msic_touch_resources,
+ },
+ [INTEL_MSIC_BLOCK_ADC] = {
+ .name = "msic_adc",
+ .num_resources = ARRAY_SIZE(msic_adc_resources),
+ .resources = msic_adc_resources,
+ },
+ [INTEL_MSIC_BLOCK_BATTERY] = {
+ .name = "msic_battery",
+ .num_resources = ARRAY_SIZE(msic_battery_resources),
+ .resources = msic_battery_resources,
+ },
+ [INTEL_MSIC_BLOCK_GPIO] = {
+ .name = "msic_gpio",
+ .num_resources = ARRAY_SIZE(msic_gpio_resources),
+ .resources = msic_gpio_resources,
+ },
+ [INTEL_MSIC_BLOCK_AUDIO] = {
+ .name = "msic_audio",
+ .num_resources = ARRAY_SIZE(msic_audio_resources),
+ .resources = msic_audio_resources,
+ },
+ [INTEL_MSIC_BLOCK_HDMI] = {
+ .name = "msic_hdmi",
+ .num_resources = ARRAY_SIZE(msic_hdmi_resources),
+ .resources = msic_hdmi_resources,
+ },
+ [INTEL_MSIC_BLOCK_THERMAL] = {
+ .name = "msic_thermal",
+ .num_resources = ARRAY_SIZE(msic_thermal_resources),
+ .resources = msic_thermal_resources,
+ },
+ [INTEL_MSIC_BLOCK_POWER_BTN] = {
+ .name = "msic_power_btn",
+ .num_resources = ARRAY_SIZE(msic_power_btn_resources),
+ .resources = msic_power_btn_resources,
+ },
+ [INTEL_MSIC_BLOCK_OCD] = {
+ .name = "msic_ocd",
+ .num_resources = ARRAY_SIZE(msic_ocd_resources),
+ .resources = msic_ocd_resources,
+ },
+};
+
+/*
+ * Other MSIC related devices which are not directly available via SFI DEVS
+ * table. These can be pseudo devices, regulators etc. which are needed for
+ * different purposes.
+ *
+ * These devices appear only after the MSIC driver itself is initialized so
+ * we can guarantee that the SCU IPC interface is ready.
+ */
+static struct mfd_cell msic_other_devs[] = {
+ /* Audio codec in the MSIC */
+ {
+ .id = -1,
+ .name = "sn95031",
+ },
+};
+
+/**
+ * intel_msic_reg_read - read a single MSIC register
+ * @reg: register to read
+ * @val: register value is placed here
+ *
+ * Read a single register from MSIC. Returns %0 on success and negative
+ * errno in case of failure.
+ *
+ * Function may sleep.
+ */
+int intel_msic_reg_read(unsigned short reg, u8 *val)
+{
+ return intel_scu_ipc_ioread8(reg, val);
+}
+EXPORT_SYMBOL_GPL(intel_msic_reg_read);
+
+/**
+ * intel_msic_reg_write - write a single MSIC register
+ * @reg: register to write
+ * @val: value to write to that register
+ *
+ * Write a single MSIC register. Returns 0 on success and negative
+ * errno in case of failure.
+ *
+ * Function may sleep.
+ */
+int intel_msic_reg_write(unsigned short reg, u8 val)
+{
+ return intel_scu_ipc_iowrite8(reg, val);
+}
+EXPORT_SYMBOL_GPL(intel_msic_reg_write);
+
+/**
+ * intel_msic_reg_update - update a single MSIC register
+ * @reg: register to update
+ * @val: value to write to the register
+ * @mask: specifies which of the bits are updated (%0 = don't update,
+ * %1 = update)
+ *
+ * Perform an update to a register @reg. @mask is used to specify which
+ * bits are updated. Returns %0 in case of success and negative errno in
+ * case of failure.
+ *
+ * Function may sleep.
+ */
+int intel_msic_reg_update(unsigned short reg, u8 val, u8 mask)
+{
+ return intel_scu_ipc_update_register(reg, val, mask);
+}
+EXPORT_SYMBOL_GPL(intel_msic_reg_update);
+
+/**
+ * intel_msic_bulk_read - read an array of registers
+ * @reg: array of register addresses to read
+ * @buf: array where the read values are placed
+ * @count: number of registers to read
+ *
+ * Function reads @count registers from the MSIC using addresses passed in
+ * @reg. Read values are placed in @buf. Reads are performed atomically
+ * wrt. MSIC.
+ *
+ * Returns %0 in case of success and negative errno in case of failure.
+ *
+ * Function may sleep.
+ */
+int intel_msic_bulk_read(unsigned short *reg, u8 *buf, size_t count)
+{
+ if (WARN_ON(count > SCU_IPC_RWBUF_LIMIT))
+ return -EINVAL;
+
+ return intel_scu_ipc_readv(reg, buf, count);
+}
+EXPORT_SYMBOL_GPL(intel_msic_bulk_read);
+
+/**
+ * intel_msic_bulk_write - write an array of values to the MSIC registers
+ * @reg: array of registers to write
+ * @buf: values to write to each register
+ * @count: number of registers to write
+ *
+ * Function writes @count registers in @buf to MSIC. Writes are performed
+ * atomically wrt MSIC. Returns %0 in case of success and negative errno in
+ * case of failure.
+ *
+ * Function may sleep.
+ */
+int intel_msic_bulk_write(unsigned short *reg, u8 *buf, size_t count)
+{
+ if (WARN_ON(count > SCU_IPC_RWBUF_LIMIT))
+ return -EINVAL;
+
+ return intel_scu_ipc_writev(reg, buf, count);
+}
+EXPORT_SYMBOL_GPL(intel_msic_bulk_write);
+
+/**
+ * intel_msic_irq_read - read a register from an MSIC interrupt tree
+ * @msic: MSIC instance
+ * @reg: interrupt register (between %INTEL_MSIC_IRQLVL1 and
+ * %INTEL_MSIC_RESETIRQ2)
+ * @val: value of the register is placed here
+ *
+ * This function can be used by an MSIC subdevice interrupt handler to read
+ * a register value from the MSIC interrupt tree. In this way subdevice
+ * drivers don't have to map in the interrupt tree themselves but can just
+ * call this function instead.
+ *
+ * Function doesn't sleep and is callable from interrupt context.
+ *
+ * Returns %-EINVAL if @reg is outside of the allowed register region.
+ */
+int intel_msic_irq_read(struct intel_msic *msic, unsigned short reg, u8 *val)
+{
+ if (WARN_ON(reg < INTEL_MSIC_IRQLVL1 || reg > INTEL_MSIC_RESETIRQ2))
+ return -EINVAL;
+
+ *val = readb(msic->irq_base + (reg - INTEL_MSIC_IRQLVL1));
+ return 0;
+}
+EXPORT_SYMBOL_GPL(intel_msic_irq_read);
+
+static int __devinit intel_msic_init_devices(struct intel_msic *msic)
+{
+ struct platform_device *pdev = msic->pdev;
+ struct intel_msic_platform_data *pdata = pdev->dev.platform_data;
+ int ret, i;
+
+ if (pdata->gpio) {
+ struct mfd_cell *cell = &msic_devs[INTEL_MSIC_BLOCK_GPIO];
+
+ cell->platform_data = pdata->gpio;
+ cell->pdata_size = sizeof(*pdata->gpio);
+ }
+
+ if (pdata->ocd) {
+ unsigned gpio = pdata->ocd->gpio;
+
+ ret = gpio_request_one(gpio, GPIOF_IN, "ocd_gpio");
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register OCD GPIO\n");
+ return ret;
+ }
+
+ ret = gpio_to_irq(gpio);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "no IRQ number for OCD GPIO\n");
+ gpio_free(gpio);
+ return ret;
+ }
+
+ /* Update the IRQ number for the OCD */
+ pdata->irq[INTEL_MSIC_BLOCK_OCD] = ret;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(msic_devs); i++) {
+ if (!pdata->irq[i])
+ continue;
+
+ ret = mfd_add_devices(&pdev->dev, -1, &msic_devs[i], 1, NULL,
+ pdata->irq[i]);
+ if (ret)
+ goto fail;
+ }
+
+ ret = mfd_add_devices(&pdev->dev, 0, msic_other_devs,
+ ARRAY_SIZE(msic_other_devs), NULL, 0);
+ if (ret)
+ goto fail;
+
+ return 0;
+
+fail:
+ mfd_remove_devices(&pdev->dev);
+ if (pdata->ocd)
+ gpio_free(pdata->ocd->gpio);
+
+ return ret;
+}
+
+static void __devexit intel_msic_remove_devices(struct intel_msic *msic)
+{
+ struct platform_device *pdev = msic->pdev;
+ struct intel_msic_platform_data *pdata = pdev->dev.platform_data;
+
+ mfd_remove_devices(&pdev->dev);
+
+ if (pdata->ocd)
+ gpio_free(pdata->ocd->gpio);
+}
+
+static int __devinit intel_msic_probe(struct platform_device *pdev)
+{
+ struct intel_msic_platform_data *pdata = pdev->dev.platform_data;
+ struct intel_msic *msic;
+ struct resource *res;
+ u8 id0, id1;
+ int ret;
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "no platform data passed\n");
+ return -EINVAL;
+ }
+
+ /* First validate that we have an MSIC in place */
+ ret = intel_scu_ipc_ioread8(INTEL_MSIC_ID0, &id0);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to identify the MSIC chip (ID0)\n");
+ return -ENXIO;
+ }
+
+ ret = intel_scu_ipc_ioread8(INTEL_MSIC_ID1, &id1);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to identify the MSIC chip (ID1)\n");
+ return -ENXIO;
+ }
+
+ if (MSIC_VENDOR(id0) != MSIC_VENDOR(id1)) {
+ dev_err(&pdev->dev, "invalid vendor ID: %x, %x\n", id0, id1);
+ return -ENXIO;
+ }
+
+ msic = kzalloc(sizeof(*msic), GFP_KERNEL);
+ if (!msic)
+ return -ENOMEM;
+
+ msic->vendor = MSIC_VENDOR(id0);
+ msic->version = MSIC_VERSION(id0);
+ msic->pdev = pdev;
+
+ /*
+ * Map in the MSIC interrupt tree area in SRAM. This is exposed to
+ * the clients via intel_msic_irq_read().
+ */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "failed to get SRAM iomem resource\n");
+ ret = -ENODEV;
+ goto fail_free_msic;
+ }
+
+ res = request_mem_region(res->start, resource_size(res), pdev->name);
+ if (!res) {
+ ret = -EBUSY;
+ goto fail_free_msic;
+ }
+
+ msic->irq_base = ioremap_nocache(res->start, resource_size(res));
+ if (!msic->irq_base) {
+ dev_err(&pdev->dev, "failed to map SRAM memory\n");
+ ret = -ENOMEM;
+ goto fail_release_region;
+ }
+
+ platform_set_drvdata(pdev, msic);
+
+ ret = intel_msic_init_devices(msic);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to initialize MSIC devices\n");
+ goto fail_unmap_mem;
+ }
+
+ dev_info(&pdev->dev, "Intel MSIC version %c%d (vendor %#x)\n",
+ MSIC_MAJOR(msic->version), MSIC_MINOR(msic->version),
+ msic->vendor);
+
+ return 0;
+
+fail_unmap_mem:
+ iounmap(msic->irq_base);
+fail_release_region:
+ release_mem_region(res->start, resource_size(res));
+fail_free_msic:
+ kfree(msic);
+
+ return ret;
+}
+
+static int __devexit intel_msic_remove(struct platform_device *pdev)
+{
+ struct intel_msic *msic = platform_get_drvdata(pdev);
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ intel_msic_remove_devices(msic);
+ platform_set_drvdata(pdev, NULL);
+ iounmap(msic->irq_base);
+ release_mem_region(res->start, resource_size(res));
+ kfree(msic);
+
+ return 0;
+}
+
+static struct platform_driver intel_msic_driver = {
+ .probe = intel_msic_probe,
+ .remove = __devexit_p(intel_msic_remove),
+ .driver = {
+ .name = "intel_msic",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init intel_msic_init(void)
+{
+ return platform_driver_register(&intel_msic_driver);
+}
+module_init(intel_msic_init);
+
+static void __exit intel_msic_exit(void)
+{
+ platform_driver_unregister(&intel_msic_driver);
+}
+module_exit(intel_msic_exit);
+
+MODULE_DESCRIPTION("Driver for Intel MSIC");
+MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/jz4740-adc.c b/drivers/mfd/jz4740-adc.c
index 563654c9b19..1e9ee533eac 100644
--- a/drivers/mfd/jz4740-adc.c
+++ b/drivers/mfd/jz4740-adc.c
@@ -328,7 +328,7 @@ static int __devexit jz4740_adc_remove(struct platform_device *pdev)
return 0;
}
-struct platform_driver jz4740_adc_driver = {
+static struct platform_driver jz4740_adc_driver = {
.probe = jz4740_adc_probe,
.remove = __devexit_p(jz4740_adc_remove),
.driver = {
diff --git a/drivers/mfd/max8997.c b/drivers/mfd/max8997.c
index f83103b8970..5be53ae9b61 100644
--- a/drivers/mfd/max8997.c
+++ b/drivers/mfd/max8997.c
@@ -23,7 +23,9 @@
#include <linux/slab.h>
#include <linux/i2c.h>
+#include <linux/interrupt.h>
#include <linux/pm_runtime.h>
+#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/mfd/core.h>
#include <linux/mfd/max8997.h>
@@ -142,7 +144,6 @@ static int max8997_i2c_probe(struct i2c_client *i2c,
max8997->irq_base = pdata->irq_base;
max8997->ono = pdata->ono;
- max8997->wakeup = pdata->wakeup;
mutex_init(&max8997->iolock);
@@ -169,6 +170,9 @@ static int max8997_i2c_probe(struct i2c_client *i2c,
if (ret < 0)
goto err_mfd;
+ /* MAX8997 has a power button input. */
+ device_init_wakeup(max8997->dev, pdata->wakeup);
+
return ret;
err_mfd:
@@ -398,7 +402,29 @@ static int max8997_restore(struct device *dev)
return 0;
}
+static int max8997_suspend(struct device *dev)
+{
+ struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
+ struct max8997_dev *max8997 = i2c_get_clientdata(i2c);
+
+ if (device_may_wakeup(dev))
+ irq_set_irq_wake(max8997->irq, 1);
+ return 0;
+}
+
+static int max8997_resume(struct device *dev)
+{
+ struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
+ struct max8997_dev *max8997 = i2c_get_clientdata(i2c);
+
+ if (device_may_wakeup(dev))
+ irq_set_irq_wake(max8997->irq, 0);
+ return max8997_irq_resume(max8997);
+}
+
const struct dev_pm_ops max8997_pm = {
+ .suspend = max8997_suspend,
+ .resume = max8997_resume,
.freeze = max8997_freeze,
.restore = max8997_restore,
};
diff --git a/drivers/mfd/mc13xxx-core.c b/drivers/mfd/mc13xxx-core.c
index 7e4d44bf92a..e9619acc023 100644
--- a/drivers/mfd/mc13xxx-core.c
+++ b/drivers/mfd/mc13xxx-core.c
@@ -26,20 +26,10 @@ struct mc13xxx {
irq_handler_t irqhandler[MC13XXX_NUM_IRQ];
void *irqdata[MC13XXX_NUM_IRQ];
-};
-
-struct mc13783 {
- struct mc13xxx mc13xxx;
int adcflags;
};
-struct mc13xxx *mc13783_to_mc13xxx(struct mc13783 *mc13783)
-{
- return &mc13783->mc13xxx;
-}
-EXPORT_SYMBOL(mc13783_to_mc13xxx);
-
#define MC13XXX_IRQSTAT0 0
#define MC13XXX_IRQSTAT0_ADCDONEI (1 << 0)
#define MC13XXX_IRQSTAT0_ADCBISDONEI (1 << 1)
@@ -136,14 +126,14 @@ EXPORT_SYMBOL(mc13783_to_mc13xxx);
#define MC13XXX_REVISION_FAB (0x03 << 11)
#define MC13XXX_REVISION_ICIDCODE (0x3f << 13)
-#define MC13783_ADC1 44
-#define MC13783_ADC1_ADEN (1 << 0)
-#define MC13783_ADC1_RAND (1 << 1)
-#define MC13783_ADC1_ADSEL (1 << 3)
-#define MC13783_ADC1_ASC (1 << 20)
-#define MC13783_ADC1_ADTRIGIGN (1 << 21)
+#define MC13XXX_ADC1 44
+#define MC13XXX_ADC1_ADEN (1 << 0)
+#define MC13XXX_ADC1_RAND (1 << 1)
+#define MC13XXX_ADC1_ADSEL (1 << 3)
+#define MC13XXX_ADC1_ASC (1 << 20)
+#define MC13XXX_ADC1_ADTRIGIGN (1 << 21)
-#define MC13783_ADC2 45
+#define MC13XXX_ADC2 45
#define MC13XXX_NUMREGS 0x3f
@@ -487,7 +477,7 @@ enum mc13xxx_id {
MC13XXX_ID_INVALID,
};
-const char *mc13xxx_chipname[] = {
+static const char *mc13xxx_chipname[] = {
[MC13XXX_ID_MC13783] = "mc13783",
[MC13XXX_ID_MC13892] = "mc13892",
};
@@ -558,8 +548,6 @@ static const char *mc13xxx_get_chipname(struct mc13xxx *mc13xxx)
return mc13xxx_chipname[devid->driver_data];
}
-#include <linux/mfd/mc13783.h>
-
int mc13xxx_get_flags(struct mc13xxx *mc13xxx)
{
struct mc13xxx_platform_data *pdata =
@@ -569,15 +557,15 @@ int mc13xxx_get_flags(struct mc13xxx *mc13xxx)
}
EXPORT_SYMBOL(mc13xxx_get_flags);
-#define MC13783_ADC1_CHAN0_SHIFT 5
-#define MC13783_ADC1_CHAN1_SHIFT 8
+#define MC13XXX_ADC1_CHAN0_SHIFT 5
+#define MC13XXX_ADC1_CHAN1_SHIFT 8
struct mc13xxx_adcdone_data {
struct mc13xxx *mc13xxx;
struct completion done;
};
-static irqreturn_t mc13783_handler_adcdone(int irq, void *data)
+static irqreturn_t mc13xxx_handler_adcdone(int irq, void *data)
{
struct mc13xxx_adcdone_data *adcdone_data = data;
@@ -588,12 +576,11 @@ static irqreturn_t mc13783_handler_adcdone(int irq, void *data)
return IRQ_HANDLED;
}
-#define MC13783_ADC_WORKING (1 << 0)
+#define MC13XXX_ADC_WORKING (1 << 0)
-int mc13783_adc_do_conversion(struct mc13783 *mc13783, unsigned int mode,
+int mc13xxx_adc_do_conversion(struct mc13xxx *mc13xxx, unsigned int mode,
unsigned int channel, unsigned int *sample)
{
- struct mc13xxx *mc13xxx = &mc13783->mc13xxx;
u32 adc0, adc1, old_adc0;
int i, ret;
struct mc13xxx_adcdone_data adcdone_data = {
@@ -605,51 +592,51 @@ int mc13783_adc_do_conversion(struct mc13783 *mc13783, unsigned int mode,
mc13xxx_lock(mc13xxx);
- if (mc13783->adcflags & MC13783_ADC_WORKING) {
+ if (mc13xxx->adcflags & MC13XXX_ADC_WORKING) {
ret = -EBUSY;
goto out;
}
- mc13783->adcflags |= MC13783_ADC_WORKING;
+ mc13xxx->adcflags |= MC13XXX_ADC_WORKING;
- mc13xxx_reg_read(mc13xxx, MC13783_ADC0, &old_adc0);
+ mc13xxx_reg_read(mc13xxx, MC13XXX_ADC0, &old_adc0);
- adc0 = MC13783_ADC0_ADINC1 | MC13783_ADC0_ADINC2;
- adc1 = MC13783_ADC1_ADEN | MC13783_ADC1_ADTRIGIGN | MC13783_ADC1_ASC;
+ adc0 = MC13XXX_ADC0_ADINC1 | MC13XXX_ADC0_ADINC2;
+ adc1 = MC13XXX_ADC1_ADEN | MC13XXX_ADC1_ADTRIGIGN | MC13XXX_ADC1_ASC;
if (channel > 7)
- adc1 |= MC13783_ADC1_ADSEL;
+ adc1 |= MC13XXX_ADC1_ADSEL;
switch (mode) {
- case MC13783_ADC_MODE_TS:
- adc0 |= MC13783_ADC0_ADREFEN | MC13783_ADC0_TSMOD0 |
- MC13783_ADC0_TSMOD1;
- adc1 |= 4 << MC13783_ADC1_CHAN1_SHIFT;
+ case MC13XXX_ADC_MODE_TS:
+ adc0 |= MC13XXX_ADC0_ADREFEN | MC13XXX_ADC0_TSMOD0 |
+ MC13XXX_ADC0_TSMOD1;
+ adc1 |= 4 << MC13XXX_ADC1_CHAN1_SHIFT;
break;
- case MC13783_ADC_MODE_SINGLE_CHAN:
- adc0 |= old_adc0 & MC13783_ADC0_TSMOD_MASK;
- adc1 |= (channel & 0x7) << MC13783_ADC1_CHAN0_SHIFT;
- adc1 |= MC13783_ADC1_RAND;
+ case MC13XXX_ADC_MODE_SINGLE_CHAN:
+ adc0 |= old_adc0 & MC13XXX_ADC0_TSMOD_MASK;
+ adc1 |= (channel & 0x7) << MC13XXX_ADC1_CHAN0_SHIFT;
+ adc1 |= MC13XXX_ADC1_RAND;
break;
- case MC13783_ADC_MODE_MULT_CHAN:
- adc0 |= old_adc0 & MC13783_ADC0_TSMOD_MASK;
- adc1 |= 4 << MC13783_ADC1_CHAN1_SHIFT;
+ case MC13XXX_ADC_MODE_MULT_CHAN:
+ adc0 |= old_adc0 & MC13XXX_ADC0_TSMOD_MASK;
+ adc1 |= 4 << MC13XXX_ADC1_CHAN1_SHIFT;
break;
default:
- mc13783_unlock(mc13783);
+ mc13xxx_unlock(mc13xxx);
return -EINVAL;
}
- dev_dbg(&mc13783->mc13xxx.spidev->dev, "%s: request irq\n", __func__);
- mc13xxx_irq_request(mc13xxx, MC13783_IRQ_ADCDONE,
- mc13783_handler_adcdone, __func__, &adcdone_data);
- mc13xxx_irq_ack(mc13xxx, MC13783_IRQ_ADCDONE);
+ dev_dbg(&mc13xxx->spidev->dev, "%s: request irq\n", __func__);
+ mc13xxx_irq_request(mc13xxx, MC13XXX_IRQ_ADCDONE,
+ mc13xxx_handler_adcdone, __func__, &adcdone_data);
+ mc13xxx_irq_ack(mc13xxx, MC13XXX_IRQ_ADCDONE);
- mc13xxx_reg_write(mc13xxx, MC13783_ADC0, adc0);
- mc13xxx_reg_write(mc13xxx, MC13783_ADC1, adc1);
+ mc13xxx_reg_write(mc13xxx, MC13XXX_ADC0, adc0);
+ mc13xxx_reg_write(mc13xxx, MC13XXX_ADC1, adc1);
mc13xxx_unlock(mc13xxx);
@@ -660,27 +647,27 @@ int mc13783_adc_do_conversion(struct mc13783 *mc13783, unsigned int mode,
mc13xxx_lock(mc13xxx);
- mc13xxx_irq_free(mc13xxx, MC13783_IRQ_ADCDONE, &adcdone_data);
+ mc13xxx_irq_free(mc13xxx, MC13XXX_IRQ_ADCDONE, &adcdone_data);
if (ret > 0)
for (i = 0; i < 4; ++i) {
ret = mc13xxx_reg_read(mc13xxx,
- MC13783_ADC2, &sample[i]);
+ MC13XXX_ADC2, &sample[i]);
if (ret)
break;
}
- if (mode == MC13783_ADC_MODE_TS)
+ if (mode == MC13XXX_ADC_MODE_TS)
/* restore TSMOD */
- mc13xxx_reg_write(mc13xxx, MC13783_ADC0, old_adc0);
+ mc13xxx_reg_write(mc13xxx, MC13XXX_ADC0, old_adc0);
- mc13783->adcflags &= ~MC13783_ADC_WORKING;
+ mc13xxx->adcflags &= ~MC13XXX_ADC_WORKING;
out:
mc13xxx_unlock(mc13xxx);
return ret;
}
-EXPORT_SYMBOL_GPL(mc13783_adc_do_conversion);
+EXPORT_SYMBOL_GPL(mc13xxx_adc_do_conversion);
static int mc13xxx_add_subdevice_pdata(struct mc13xxx *mc13xxx,
const char *format, void *pdata, size_t pdata_size)
@@ -716,6 +703,11 @@ static int mc13xxx_probe(struct spi_device *spi)
enum mc13xxx_id id;
int ret;
+ if (!pdata) {
+ dev_err(&spi->dev, "invalid platform data\n");
+ return -EINVAL;
+ }
+
mc13xxx = kzalloc(sizeof(*mc13xxx), GFP_KERNEL);
if (!mc13xxx)
return -ENOMEM;
@@ -763,10 +755,8 @@ err_revision:
if (pdata->flags & MC13XXX_USE_CODEC)
mc13xxx_add_subdevice(mc13xxx, "%s-codec");
- if (pdata->flags & MC13XXX_USE_REGULATOR) {
- mc13xxx_add_subdevice_pdata(mc13xxx, "%s-regulator",
- &pdata->regulators, sizeof(pdata->regulators));
- }
+ mc13xxx_add_subdevice_pdata(mc13xxx, "%s-regulator",
+ &pdata->regulators, sizeof(pdata->regulators));
if (pdata->flags & MC13XXX_USE_RTC)
mc13xxx_add_subdevice(mc13xxx, "%s-rtc");
@@ -774,10 +764,14 @@ err_revision:
if (pdata->flags & MC13XXX_USE_TOUCHSCREEN)
mc13xxx_add_subdevice(mc13xxx, "%s-ts");
- if (pdata->flags & MC13XXX_USE_LED)
+ if (pdata->leds)
mc13xxx_add_subdevice_pdata(mc13xxx, "%s-led",
pdata->leds, sizeof(*pdata->leds));
+ if (pdata->buttons)
+ mc13xxx_add_subdevice_pdata(mc13xxx, "%s-pwrbutton",
+ pdata->buttons, sizeof(*pdata->buttons));
+
return 0;
}
diff --git a/drivers/mfd/menelaus.c b/drivers/mfd/menelaus.c
index 9cee8e7f0bc..cb4910ac4d1 100644
--- a/drivers/mfd/menelaus.c
+++ b/drivers/mfd/menelaus.c
@@ -44,7 +44,7 @@
#include <asm/mach/irq.h>
-#include <mach/gpio.h>
+#include <asm/gpio.h>
#include <plat/menelaus.h>
#define DRIVER_NAME "menelaus"
@@ -1226,7 +1226,7 @@ static int menelaus_probe(struct i2c_client *client,
menelaus_write_reg(MENELAUS_MCT_CTRL1, 0x73);
if (client->irq > 0) {
- err = request_irq(client->irq, menelaus_irq, IRQF_DISABLED,
+ err = request_irq(client->irq, menelaus_irq, 0,
DRIVER_NAME, menelaus);
if (err) {
dev_dbg(&client->dev, "can't get IRQ %d, err %d\n",
diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
index 0902523af62..0f5922812bf 100644
--- a/drivers/mfd/mfd-core.c
+++ b/drivers/mfd/mfd-core.c
@@ -17,6 +17,7 @@
#include <linux/mfd/core.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
+#include <linux/module.h>
int mfd_cell_enable(struct platform_device *pdev)
{
diff --git a/drivers/mfd/pcf50633-core.c b/drivers/mfd/pcf50633-core.c
index 57868416c76..ff1a7e741ec 100644
--- a/drivers/mfd/pcf50633-core.c
+++ b/drivers/mfd/pcf50633-core.c
@@ -23,45 +23,22 @@
#include <linux/i2c.h>
#include <linux/pm.h>
#include <linux/slab.h>
+#include <linux/regmap.h>
+#include <linux/err.h>
#include <linux/mfd/pcf50633/core.h>
-static int __pcf50633_read(struct pcf50633 *pcf, u8 reg, int num, u8 *data)
-{
- int ret;
-
- ret = i2c_smbus_read_i2c_block_data(pcf->i2c_client, reg,
- num, data);
- if (ret < 0)
- dev_err(pcf->dev, "Error reading %d regs at %d\n", num, reg);
-
- return ret;
-}
-
-static int __pcf50633_write(struct pcf50633 *pcf, u8 reg, int num, u8 *data)
-{
- int ret;
-
- ret = i2c_smbus_write_i2c_block_data(pcf->i2c_client, reg,
- num, data);
- if (ret < 0)
- dev_err(pcf->dev, "Error writing %d regs at %d\n", num, reg);
-
- return ret;
-
-}
-
/* Read a block of up to 32 regs */
int pcf50633_read_block(struct pcf50633 *pcf, u8 reg,
int nr_regs, u8 *data)
{
int ret;
- mutex_lock(&pcf->lock);
- ret = __pcf50633_read(pcf, reg, nr_regs, data);
- mutex_unlock(&pcf->lock);
+ ret = regmap_raw_read(pcf->regmap, reg, data, nr_regs);
+ if (ret != 0)
+ return ret;
- return ret;
+ return nr_regs;
}
EXPORT_SYMBOL_GPL(pcf50633_read_block);
@@ -71,21 +48,22 @@ int pcf50633_write_block(struct pcf50633 *pcf , u8 reg,
{
int ret;
- mutex_lock(&pcf->lock);
- ret = __pcf50633_write(pcf, reg, nr_regs, data);
- mutex_unlock(&pcf->lock);
+ ret = regmap_raw_write(pcf->regmap, reg, data, nr_regs);
+ if (ret != 0)
+ return ret;
- return ret;
+ return nr_regs;
}
EXPORT_SYMBOL_GPL(pcf50633_write_block);
u8 pcf50633_reg_read(struct pcf50633 *pcf, u8 reg)
{
- u8 val;
+ unsigned int val;
+ int ret;
- mutex_lock(&pcf->lock);
- __pcf50633_read(pcf, reg, 1, &val);
- mutex_unlock(&pcf->lock);
+ ret = regmap_read(pcf->regmap, reg, &val);
+ if (ret < 0)
+ return -1;
return val;
}
@@ -93,56 +71,19 @@ EXPORT_SYMBOL_GPL(pcf50633_reg_read);
int pcf50633_reg_write(struct pcf50633 *pcf, u8 reg, u8 val)
{
- int ret;
-
- mutex_lock(&pcf->lock);
- ret = __pcf50633_write(pcf, reg, 1, &val);
- mutex_unlock(&pcf->lock);
-
- return ret;
+ return regmap_write(pcf->regmap, reg, val);
}
EXPORT_SYMBOL_GPL(pcf50633_reg_write);
int pcf50633_reg_set_bit_mask(struct pcf50633 *pcf, u8 reg, u8 mask, u8 val)
{
- int ret;
- u8 tmp;
-
- val &= mask;
-
- mutex_lock(&pcf->lock);
- ret = __pcf50633_read(pcf, reg, 1, &tmp);
- if (ret < 0)
- goto out;
-
- tmp &= ~mask;
- tmp |= val;
- ret = __pcf50633_write(pcf, reg, 1, &tmp);
-
-out:
- mutex_unlock(&pcf->lock);
-
- return ret;
+ return regmap_update_bits(pcf->regmap, reg, mask, val);
}
EXPORT_SYMBOL_GPL(pcf50633_reg_set_bit_mask);
int pcf50633_reg_clear_bits(struct pcf50633 *pcf, u8 reg, u8 val)
{
- int ret;
- u8 tmp;
-
- mutex_lock(&pcf->lock);
- ret = __pcf50633_read(pcf, reg, 1, &tmp);
- if (ret < 0)
- goto out;
-
- tmp &= ~val;
- ret = __pcf50633_write(pcf, reg, 1, &tmp);
-
-out:
- mutex_unlock(&pcf->lock);
-
- return ret;
+ return regmap_update_bits(pcf->regmap, reg, val, 0);
}
EXPORT_SYMBOL_GPL(pcf50633_reg_clear_bits);
@@ -251,6 +192,11 @@ static int pcf50633_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(pcf50633_pm, pcf50633_suspend, pcf50633_resume);
+static struct regmap_config pcf50633_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
static int __devinit pcf50633_probe(struct i2c_client *client,
const struct i2c_device_id *ids)
{
@@ -272,16 +218,23 @@ static int __devinit pcf50633_probe(struct i2c_client *client,
mutex_init(&pcf->lock);
+ pcf->regmap = regmap_init_i2c(client, &pcf50633_regmap_config);
+ if (IS_ERR(pcf->regmap)) {
+ ret = PTR_ERR(pcf->regmap);
+ dev_err(pcf->dev, "Failed to allocate register map: %d\n",
+ ret);
+ goto err_free;
+ }
+
i2c_set_clientdata(client, pcf);
pcf->dev = &client->dev;
- pcf->i2c_client = client;
version = pcf50633_reg_read(pcf, 0);
variant = pcf50633_reg_read(pcf, 1);
if (version < 0 || variant < 0) {
dev_err(pcf->dev, "Unable to probe pcf50633\n");
ret = -ENODEV;
- goto err_free;
+ goto err_regmap;
}
dev_info(pcf->dev, "Probed device version %d variant %d\n",
@@ -328,6 +281,8 @@ static int __devinit pcf50633_probe(struct i2c_client *client,
return 0;
+err_regmap:
+ regmap_exit(pcf->regmap);
err_free:
kfree(pcf);
@@ -351,6 +306,7 @@ static int __devexit pcf50633_remove(struct i2c_client *client)
for (i = 0; i < PCF50633_NUM_REGULATORS; i++)
platform_device_unregister(pcf->regulator_pdev[i]);
+ regmap_exit(pcf->regmap);
kfree(pcf);
return 0;
diff --git a/drivers/mfd/pcf50633-irq.c b/drivers/mfd/pcf50633-irq.c
index 1b0192f1eff..048a3b903b0 100644
--- a/drivers/mfd/pcf50633-irq.c
+++ b/drivers/mfd/pcf50633-irq.c
@@ -15,6 +15,7 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/mutex.h>
+#include <linux/export.h>
#include <linux/slab.h>
#include <linux/mfd/pcf50633/core.h>
diff --git a/drivers/mfd/tc3589x.c b/drivers/mfd/tc3589x.c
index c27e515b072..de979742c6f 100644
--- a/drivers/mfd/tc3589x.c
+++ b/drivers/mfd/tc3589x.c
@@ -357,6 +357,7 @@ static int __devexit tc3589x_remove(struct i2c_client *client)
return 0;
}
+#ifdef CONFIG_PM
static int tc3589x_suspend(struct device *dev)
{
struct tc3589x *tc3589x = dev_get_drvdata(dev);
@@ -387,6 +388,7 @@ static int tc3589x_resume(struct device *dev)
static const SIMPLE_DEV_PM_OPS(tc3589x_dev_pm_ops, tc3589x_suspend,
tc3589x_resume);
+#endif
static const struct i2c_device_id tc3589x_id[] = {
{ "tc3589x", 24 },
diff --git a/drivers/mfd/timberdale.c b/drivers/mfd/timberdale.c
index 696879e2eef..02d65692ceb 100644
--- a/drivers/mfd/timberdale.c
+++ b/drivers/mfd/timberdale.c
@@ -697,7 +697,7 @@ static int __devinit timb_probe(struct pci_dev *dev,
dev_err(&dev->dev, "The driver supports an older "
"version of the FPGA, please update the driver to "
"support %d.%d\n", priv->fw.major, priv->fw.minor);
- goto err_ioremap;
+ goto err_config;
}
if (priv->fw.major < TIMB_SUPPORTED_MAJOR ||
priv->fw.minor < TIMB_REQUIRED_MINOR) {
@@ -705,13 +705,13 @@ static int __devinit timb_probe(struct pci_dev *dev,
"please upgrade the FPGA to at least: %d.%d\n",
priv->fw.major, priv->fw.minor,
TIMB_SUPPORTED_MAJOR, TIMB_REQUIRED_MINOR);
- goto err_ioremap;
+ goto err_config;
}
msix_entries = kzalloc(TIMBERDALE_NR_IRQS * sizeof(*msix_entries),
GFP_KERNEL);
if (!msix_entries)
- goto err_ioremap;
+ goto err_config;
for (i = 0; i < TIMBERDALE_NR_IRQS; i++)
msix_entries[i].entry = i;
@@ -825,6 +825,8 @@ err_mfd:
err_create_file:
pci_disable_msix(dev);
err_msix:
+ kfree(msix_entries);
+err_config:
iounmap(priv->ctl_membase);
err_ioremap:
release_mem_region(priv->ctl_mapbase, CHIPCTLSIZE);
@@ -833,7 +835,6 @@ err_request:
err_start:
pci_disable_device(dev);
err_enable:
- kfree(msix_entries);
kfree(priv);
pci_set_drvdata(dev, NULL);
return -ENODEV;
diff --git a/drivers/mfd/tmio_core.c b/drivers/mfd/tmio_core.c
index eddc19ae464..83af78c1b0e 100644
--- a/drivers/mfd/tmio_core.c
+++ b/drivers/mfd/tmio_core.c
@@ -6,6 +6,7 @@
* published by the Free Software Foundation.
*/
+#include <linux/export.h>
#include <linux/mfd/tmio.h>
int tmio_core_mmc_enable(void __iomem *cnf, int shift, unsigned long base)
diff --git a/drivers/mfd/tps65912-core.c b/drivers/mfd/tps65912-core.c
index 955bc00e4b2..5fec23a9ac0 100644
--- a/drivers/mfd/tps65912-core.c
+++ b/drivers/mfd/tps65912-core.c
@@ -131,9 +131,6 @@ int tps65912_device_init(struct tps65912 *tps65912)
if (init_data == NULL)
return -ENOMEM;
- init_data->irq = pmic_plat_data->irq;
- init_data->irq_base = pmic_plat_data->irq;
-
mutex_init(&tps65912->io_mutex);
dev_set_drvdata(tps65912->dev, tps65912);
@@ -153,10 +150,13 @@ int tps65912_device_init(struct tps65912 *tps65912)
if (ret < 0)
goto err;
+ init_data->irq = pmic_plat_data->irq;
+ init_data->irq_base = pmic_plat_data->irq;
ret = tps65912_irq_init(tps65912, init_data->irq, init_data);
if (ret < 0)
goto err;
+ kfree(init_data);
return ret;
err:
diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
index 01ecfeee652..bfbd66021af 100644
--- a/drivers/mfd/twl-core.c
+++ b/drivers/mfd/twl-core.c
@@ -30,6 +30,7 @@
#include <linux/init.h>
#include <linux/mutex.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/err.h>
@@ -109,7 +110,7 @@
#define twl_has_watchdog() false
#endif
-#if defined(CONFIG_TWL4030_CODEC) || defined(CONFIG_TWL4030_CODEC_MODULE) ||\
+#if defined(CONFIG_MFD_TWL4030_AUDIO) || defined(CONFIG_MFD_TWL4030_AUDIO_MODULE) ||\
defined(CONFIG_TWL6040_CORE) || defined(CONFIG_TWL6040_CORE_MODULE)
#define twl_has_codec() true
#else
diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
index 8a7ee3139b8..f062c8cc6c3 100644
--- a/drivers/mfd/twl4030-irq.c
+++ b/drivers/mfd/twl4030-irq.c
@@ -30,7 +30,6 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
-#include <linux/kthread.h>
#include <linux/slab.h>
#include <linux/i2c/twl.h>
@@ -278,59 +277,6 @@ static const struct sih sih_modules_twl5031[8] = {
static unsigned twl4030_irq_base;
-static struct completion irq_event;
-
-/*
- * This thread processes interrupts reported by the Primary Interrupt Handler.
- */
-static int twl4030_irq_thread(void *data)
-{
- long irq = (long)data;
- static unsigned i2c_errors;
- static const unsigned max_i2c_errors = 100;
-
-
- current->flags |= PF_NOFREEZE;
-
- while (!kthread_should_stop()) {
- int ret;
- int module_irq;
- u8 pih_isr;
-
- /* Wait for IRQ, then read PIH irq status (also blocking) */
- wait_for_completion_interruptible(&irq_event);
-
- ret = twl_i2c_read_u8(TWL4030_MODULE_PIH, &pih_isr,
- REG_PIH_ISR_P1);
- if (ret) {
- pr_warning("twl4030: I2C error %d reading PIH ISR\n",
- ret);
- if (++i2c_errors >= max_i2c_errors) {
- printk(KERN_ERR "Maximum I2C error count"
- " exceeded. Terminating %s.\n",
- __func__);
- break;
- }
- complete(&irq_event);
- continue;
- }
-
- /* these handlers deal with the relevant SIH irq status */
- local_irq_disable();
- for (module_irq = twl4030_irq_base;
- pih_isr;
- pih_isr >>= 1, module_irq++) {
- if (pih_isr & 0x1)
- generic_handle_irq(module_irq);
- }
- local_irq_enable();
-
- enable_irq(irq);
- }
-
- return 0;
-}
-
/*
* handle_twl4030_pih() is the desc->handle method for the twl4030 interrupt.
* This is a chained interrupt, so there is no desc->action method for it.
@@ -342,9 +288,25 @@ static int twl4030_irq_thread(void *data)
*/
static irqreturn_t handle_twl4030_pih(int irq, void *devid)
{
- /* Acknowledge, clear *AND* mask the interrupt... */
- disable_irq_nosync(irq);
- complete(devid);
+ int module_irq;
+ irqreturn_t ret;
+ u8 pih_isr;
+
+ ret = twl_i2c_read_u8(TWL4030_MODULE_PIH, &pih_isr,
+ REG_PIH_ISR_P1);
+ if (ret) {
+ pr_warning("twl4030: I2C error %d reading PIH ISR\n", ret);
+ return IRQ_NONE;
+ }
+
+ /* these handlers deal with the relevant SIH irq status */
+ for (module_irq = twl4030_irq_base;
+ pih_isr;
+ pih_isr >>= 1, module_irq++) {
+ if (pih_isr & 0x1)
+ handle_nested_irq(module_irq);
+ }
+
return IRQ_HANDLED;
}
/*----------------------------------------------------------------------*/
@@ -460,113 +422,17 @@ static inline void activate_irq(int irq)
/*----------------------------------------------------------------------*/
-static DEFINE_SPINLOCK(sih_agent_lock);
-
-static struct workqueue_struct *wq;
-
struct sih_agent {
int irq_base;
const struct sih *sih;
u32 imr;
bool imr_change_pending;
- struct work_struct mask_work;
-
- u32 edge_change;
- struct work_struct edge_work;
-};
-
-static void twl4030_sih_do_mask(struct work_struct *work)
-{
- struct sih_agent *agent;
- const struct sih *sih;
- union {
- u8 bytes[4];
- u32 word;
- } imr;
- int status;
- agent = container_of(work, struct sih_agent, mask_work);
-
- /* see what work we have */
- spin_lock_irq(&sih_agent_lock);
- if (agent->imr_change_pending) {
- sih = agent->sih;
- /* byte[0] gets overwritten as we write ... */
- imr.word = cpu_to_le32(agent->imr << 8);
- agent->imr_change_pending = false;
- } else
- sih = NULL;
- spin_unlock_irq(&sih_agent_lock);
- if (!sih)
- return;
-
- /* write the whole mask ... simpler than subsetting it */
- status = twl_i2c_write(sih->module, imr.bytes,
- sih->mask[irq_line].imr_offset, sih->bytes_ixr);
- if (status)
- pr_err("twl4030: %s, %s --> %d\n", __func__,
- "write", status);
-}
-
-static void twl4030_sih_do_edge(struct work_struct *work)
-{
- struct sih_agent *agent;
- const struct sih *sih;
- u8 bytes[6];
u32 edge_change;
- int status;
-
- agent = container_of(work, struct sih_agent, edge_work);
-
- /* see what work we have */
- spin_lock_irq(&sih_agent_lock);
- edge_change = agent->edge_change;
- agent->edge_change = 0;
- sih = edge_change ? agent->sih : NULL;
- spin_unlock_irq(&sih_agent_lock);
- if (!sih)
- return;
-
- /* Read, reserving first byte for write scratch. Yes, this
- * could be cached for some speedup ... but be careful about
- * any processor on the other IRQ line, EDR registers are
- * shared.
- */
- status = twl_i2c_read(sih->module, bytes + 1,
- sih->edr_offset, sih->bytes_edr);
- if (status) {
- pr_err("twl4030: %s, %s --> %d\n", __func__,
- "read", status);
- return;
- }
-
- /* Modify only the bits we know must change */
- while (edge_change) {
- int i = fls(edge_change) - 1;
- struct irq_data *idata = irq_get_irq_data(i + agent->irq_base);
- int byte = 1 + (i >> 2);
- int off = (i & 0x3) * 2;
- unsigned int type;
-
- bytes[byte] &= ~(0x03 << off);
- type = irqd_get_trigger_type(idata);
- if (type & IRQ_TYPE_EDGE_RISING)
- bytes[byte] |= BIT(off + 1);
- if (type & IRQ_TYPE_EDGE_FALLING)
- bytes[byte] |= BIT(off + 0);
-
- edge_change &= ~BIT(i);
- }
-
- /* Write */
- status = twl_i2c_write(sih->module, bytes,
- sih->edr_offset, sih->bytes_edr);
- if (status)
- pr_err("twl4030: %s, %s --> %d\n", __func__,
- "write", status);
-}
+ struct mutex irq_lock;
+};
/*----------------------------------------------------------------------*/
@@ -579,50 +445,125 @@ static void twl4030_sih_do_edge(struct work_struct *work)
static void twl4030_sih_mask(struct irq_data *data)
{
- struct sih_agent *sih = irq_data_get_irq_chip_data(data);
- unsigned long flags;
-
- spin_lock_irqsave(&sih_agent_lock, flags);
- sih->imr |= BIT(data->irq - sih->irq_base);
- sih->imr_change_pending = true;
- queue_work(wq, &sih->mask_work);
- spin_unlock_irqrestore(&sih_agent_lock, flags);
+ struct sih_agent *agent = irq_data_get_irq_chip_data(data);
+
+ agent->imr |= BIT(data->irq - agent->irq_base);
+ agent->imr_change_pending = true;
}
static void twl4030_sih_unmask(struct irq_data *data)
{
- struct sih_agent *sih = irq_data_get_irq_chip_data(data);
- unsigned long flags;
-
- spin_lock_irqsave(&sih_agent_lock, flags);
- sih->imr &= ~BIT(data->irq - sih->irq_base);
- sih->imr_change_pending = true;
- queue_work(wq, &sih->mask_work);
- spin_unlock_irqrestore(&sih_agent_lock, flags);
+ struct sih_agent *agent = irq_data_get_irq_chip_data(data);
+
+ agent->imr &= ~BIT(data->irq - agent->irq_base);
+ agent->imr_change_pending = true;
}
static int twl4030_sih_set_type(struct irq_data *data, unsigned trigger)
{
- struct sih_agent *sih = irq_data_get_irq_chip_data(data);
- unsigned long flags;
+ struct sih_agent *agent = irq_data_get_irq_chip_data(data);
if (trigger & ~(IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
return -EINVAL;
- spin_lock_irqsave(&sih_agent_lock, flags);
- if (irqd_get_trigger_type(data) != trigger) {
- sih->edge_change |= BIT(data->irq - sih->irq_base);
- queue_work(wq, &sih->edge_work);
- }
- spin_unlock_irqrestore(&sih_agent_lock, flags);
+ if (irqd_get_trigger_type(data) != trigger)
+ agent->edge_change |= BIT(data->irq - agent->irq_base);
+
return 0;
}
+static void twl4030_sih_bus_lock(struct irq_data *data)
+{
+ struct sih_agent *agent = irq_data_get_irq_chip_data(data);
+
+ mutex_lock(&agent->irq_lock);
+}
+
+static void twl4030_sih_bus_sync_unlock(struct irq_data *data)
+{
+ struct sih_agent *agent = irq_data_get_irq_chip_data(data);
+ const struct sih *sih = agent->sih;
+ int status;
+
+ if (agent->imr_change_pending) {
+ union {
+ u32 word;
+ u8 bytes[4];
+ } imr;
+
+ /* byte[0] gets overwriten as we write ... */
+ imr.word = cpu_to_le32(agent->imr << 8);
+ agent->imr_change_pending = false;
+
+ /* write the whole mask ... simpler than subsetting it */
+ status = twl_i2c_write(sih->module, imr.bytes,
+ sih->mask[irq_line].imr_offset,
+ sih->bytes_ixr);
+ if (status)
+ pr_err("twl4030: %s, %s --> %d\n", __func__,
+ "write", status);
+ }
+
+ if (agent->edge_change) {
+ u32 edge_change;
+ u8 bytes[6];
+
+ edge_change = agent->edge_change;
+ agent->edge_change = 0;
+
+ /*
+ * Read, reserving first byte for write scratch. Yes, this
+ * could be cached for some speedup ... but be careful about
+ * any processor on the other IRQ line, EDR registers are
+ * shared.
+ */
+ status = twl_i2c_read(sih->module, bytes + 1,
+ sih->edr_offset, sih->bytes_edr);
+ if (status) {
+ pr_err("twl4030: %s, %s --> %d\n", __func__,
+ "read", status);
+ return;
+ }
+
+ /* Modify only the bits we know must change */
+ while (edge_change) {
+ int i = fls(edge_change) - 1;
+ struct irq_data *idata;
+ int byte = 1 + (i >> 2);
+ int off = (i & 0x3) * 2;
+ unsigned int type;
+
+ idata = irq_get_irq_data(i + agent->irq_base);
+
+ bytes[byte] &= ~(0x03 << off);
+
+ type = irqd_get_trigger_type(idata);
+ if (type & IRQ_TYPE_EDGE_RISING)
+ bytes[byte] |= BIT(off + 1);
+ if (type & IRQ_TYPE_EDGE_FALLING)
+ bytes[byte] |= BIT(off + 0);
+
+ edge_change &= ~BIT(i);
+ }
+
+ /* Write */
+ status = twl_i2c_write(sih->module, bytes,
+ sih->edr_offset, sih->bytes_edr);
+ if (status)
+ pr_err("twl4030: %s, %s --> %d\n", __func__,
+ "write", status);
+ }
+
+ mutex_unlock(&agent->irq_lock);
+}
+
static struct irq_chip twl4030_sih_irq_chip = {
.name = "twl4030",
- .irq_mask = twl4030_sih_mask,
+ .irq_mask = twl4030_sih_mask,
.irq_unmask = twl4030_sih_unmask,
.irq_set_type = twl4030_sih_set_type,
+ .irq_bus_lock = twl4030_sih_bus_lock,
+ .irq_bus_sync_unlock = twl4030_sih_bus_sync_unlock,
};
/*----------------------------------------------------------------------*/
@@ -655,9 +596,7 @@ static void handle_twl4030_sih(unsigned irq, struct irq_desc *desc)
int isr;
/* reading ISR acks the IRQs, using clear-on-read mode */
- local_irq_enable();
isr = sih_read_isr(sih);
- local_irq_disable();
if (isr < 0) {
pr_err("twl4030: %s SIH, read ISR error %d\n",
@@ -672,7 +611,7 @@ static void handle_twl4030_sih(unsigned irq, struct irq_desc *desc)
isr &= ~BIT(irq);
if (irq < sih->bits)
- generic_handle_irq(agent->irq_base + irq);
+ handle_nested_irq(agent->irq_base + irq);
else
pr_err("twl4030: %s SIH, invalid ISR bit %d\n",
sih->name, irq);
@@ -718,15 +657,14 @@ int twl4030_sih_setup(int module)
agent->irq_base = irq_base;
agent->sih = sih;
agent->imr = ~0;
- INIT_WORK(&agent->mask_work, twl4030_sih_do_mask);
- INIT_WORK(&agent->edge_work, twl4030_sih_do_edge);
+ mutex_init(&agent->irq_lock);
for (i = 0; i < sih->bits; i++) {
irq = irq_base + i;
+ irq_set_chip_data(irq, agent);
irq_set_chip_and_handler(irq, &twl4030_sih_irq_chip,
handle_edge_irq);
- irq_set_chip_data(irq, agent);
activate_irq(irq);
}
@@ -758,7 +696,6 @@ int twl4030_init_irq(int irq_num, unsigned irq_base, unsigned irq_end)
int status;
int i;
- struct task_struct *task;
/*
* Mask and clear all TWL4030 interrupts since initially we do
@@ -768,12 +705,6 @@ int twl4030_init_irq(int irq_num, unsigned irq_base, unsigned irq_end)
if (status < 0)
return status;
- wq = create_singlethread_workqueue("twl4030-irqchip");
- if (!wq) {
- pr_err("twl4030: workqueue FAIL\n");
- return -ESRCH;
- }
-
twl4030_irq_base = irq_base;
/* install an irq handler for each of the SIH modules;
@@ -787,6 +718,7 @@ int twl4030_init_irq(int irq_num, unsigned irq_base, unsigned irq_end)
for (i = irq_base; i < irq_end; i++) {
irq_set_chip_and_handler(i, &twl4030_irq_chip,
handle_simple_irq);
+ irq_set_nested_thread(i, 1);
activate_irq(i);
}
twl4030_irq_next = i;
@@ -801,34 +733,22 @@ int twl4030_init_irq(int irq_num, unsigned irq_base, unsigned irq_end)
}
/* install an irq handler to demultiplex the TWL4030 interrupt */
-
-
- init_completion(&irq_event);
-
- status = request_irq(irq_num, handle_twl4030_pih, IRQF_DISABLED,
- "TWL4030-PIH", &irq_event);
+ status = request_threaded_irq(irq_num, NULL, handle_twl4030_pih, 0,
+ "TWL4030-PIH", NULL);
if (status < 0) {
pr_err("twl4030: could not claim irq%d: %d\n", irq_num, status);
goto fail_rqirq;
}
- task = kthread_run(twl4030_irq_thread, (void *)(long)irq_num,
- "twl4030-irq");
- if (IS_ERR(task)) {
- pr_err("twl4030: could not create irq %d thread!\n", irq_num);
- status = PTR_ERR(task);
- goto fail_kthread;
- }
return status;
-fail_kthread:
- free_irq(irq_num, &irq_event);
fail_rqirq:
/* clean up twl4030_sih_setup */
fail:
- for (i = irq_base; i < irq_end; i++)
+ for (i = irq_base; i < irq_end; i++) {
+ irq_set_nested_thread(i, 0);
irq_set_chip_and_handler(i, NULL, NULL);
- destroy_workqueue(wq);
- wq = NULL;
+ }
+
return status;
}
diff --git a/drivers/mfd/twl4030-madc.c b/drivers/mfd/twl4030-madc.c
index 7cbf2aa9e64..834f824d3c1 100644
--- a/drivers/mfd/twl4030-madc.c
+++ b/drivers/mfd/twl4030-madc.c
@@ -740,6 +740,28 @@ static int __devinit twl4030_madc_probe(struct platform_device *pdev)
TWL4030_BCI_BCICTL1);
goto err_i2c;
}
+
+ /* Check that MADC clock is on */
+ ret = twl_i2c_read_u8(TWL4030_MODULE_INTBR, &regval, TWL4030_REG_GPBR1);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to read reg GPBR1 0x%X\n",
+ TWL4030_REG_GPBR1);
+ goto err_i2c;
+ }
+
+ /* If MADC clk is not on, turn it on */
+ if (!(regval & TWL4030_GPBR1_MADC_HFCLK_EN)) {
+ dev_info(&pdev->dev, "clk disabled, enabling\n");
+ regval |= TWL4030_GPBR1_MADC_HFCLK_EN;
+ ret = twl_i2c_write_u8(TWL4030_MODULE_INTBR, regval,
+ TWL4030_REG_GPBR1);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to write reg GPBR1 0x%X\n",
+ TWL4030_REG_GPBR1);
+ goto err_i2c;
+ }
+ }
+
platform_set_drvdata(pdev, madc);
mutex_init(&madc->lock);
ret = request_threaded_irq(platform_get_irq(pdev, 0), NULL,
diff --git a/drivers/mfd/twl6030-irq.c b/drivers/mfd/twl6030-irq.c
index eb3b5f88e56..3eee45ffb09 100644
--- a/drivers/mfd/twl6030-irq.c
+++ b/drivers/mfd/twl6030-irq.c
@@ -32,11 +32,13 @@
*/
#include <linux/init.h>
+#include <linux/export.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/kthread.h>
#include <linux/i2c/twl.h>
#include <linux/platform_device.h>
+#include <linux/suspend.h>
#include "twl-core.h"
@@ -83,8 +85,48 @@ static int twl6030_interrupt_mapping[24] = {
/*----------------------------------------------------------------------*/
static unsigned twl6030_irq_base;
+static int twl_irq;
+static bool twl_irq_wake_enabled;
static struct completion irq_event;
+static atomic_t twl6030_wakeirqs = ATOMIC_INIT(0);
+
+static int twl6030_irq_pm_notifier(struct notifier_block *notifier,
+ unsigned long pm_event, void *unused)
+{
+ int chained_wakeups;
+
+ switch (pm_event) {
+ case PM_SUSPEND_PREPARE:
+ chained_wakeups = atomic_read(&twl6030_wakeirqs);
+
+ if (chained_wakeups && !twl_irq_wake_enabled) {
+ if (enable_irq_wake(twl_irq))
+ pr_err("twl6030 IRQ wake enable failed\n");
+ else
+ twl_irq_wake_enabled = true;
+ } else if (!chained_wakeups && twl_irq_wake_enabled) {
+ disable_irq_wake(twl_irq);
+ twl_irq_wake_enabled = false;
+ }
+
+ disable_irq(twl_irq);
+ break;
+
+ case PM_POST_SUSPEND:
+ enable_irq(twl_irq);
+ break;
+
+ default:
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block twl6030_irq_pm_notifier_block = {
+ .notifier_call = twl6030_irq_pm_notifier,
+};
/*
* This thread processes interrupts reported by the Primary Interrupt Handler.
@@ -187,6 +229,16 @@ static inline void activate_irq(int irq)
#endif
}
+int twl6030_irq_set_wake(struct irq_data *d, unsigned int on)
+{
+ if (on)
+ atomic_inc(&twl6030_wakeirqs);
+ else
+ atomic_dec(&twl6030_wakeirqs);
+
+ return 0;
+}
+
/*----------------------------------------------------------------------*/
static unsigned twl6030_irq_next;
@@ -318,10 +370,12 @@ int twl6030_init_irq(int irq_num, unsigned irq_base, unsigned irq_end)
twl6030_irq_chip = dummy_irq_chip;
twl6030_irq_chip.name = "twl6030";
twl6030_irq_chip.irq_set_type = NULL;
+ twl6030_irq_chip.irq_set_wake = twl6030_irq_set_wake;
for (i = irq_base; i < irq_end; i++) {
irq_set_chip_and_handler(i, &twl6030_irq_chip,
handle_simple_irq);
+ irq_set_chip_data(i, (void *)irq_num);
activate_irq(i);
}
@@ -331,6 +385,14 @@ int twl6030_init_irq(int irq_num, unsigned irq_base, unsigned irq_end)
/* install an irq handler to demultiplex the TWL6030 interrupt */
init_completion(&irq_event);
+
+ status = request_irq(irq_num, handle_twl6030_pih, 0,
+ "TWL6030-PIH", &irq_event);
+ if (status < 0) {
+ pr_err("twl6030: could not claim irq%d: %d\n", irq_num, status);
+ goto fail_irq;
+ }
+
task = kthread_run(twl6030_irq_thread, (void *)irq_num, "twl6030-irq");
if (IS_ERR(task)) {
pr_err("twl6030: could not create irq %d thread!\n", irq_num);
@@ -338,17 +400,14 @@ int twl6030_init_irq(int irq_num, unsigned irq_base, unsigned irq_end)
goto fail_kthread;
}
- status = request_irq(irq_num, handle_twl6030_pih, IRQF_DISABLED,
- "TWL6030-PIH", &irq_event);
- if (status < 0) {
- pr_err("twl6030: could not claim irq%d: %d\n", irq_num, status);
- goto fail_irq;
- }
+ twl_irq = irq_num;
+ register_pm_notifier(&twl6030_irq_pm_notifier_block);
return status;
-fail_irq:
- free_irq(irq_num, &irq_event);
fail_kthread:
+ free_irq(irq_num, &irq_event);
+
+fail_irq:
for (i = irq_base; i < irq_end; i++)
irq_set_chip_and_handler(i, NULL, NULL);
return status;
@@ -356,6 +415,7 @@ fail_kthread:
int twl6030_exit_irq(void)
{
+ unregister_pm_notifier(&twl6030_irq_pm_notifier_block);
if (twl6030_irq_base) {
pr_err("twl6030: can't yet clean up IRQs?\n");
diff --git a/drivers/mfd/twl6040-core.c b/drivers/mfd/twl6040-core.c
index 24d436c2fe4..268f80fd043 100644
--- a/drivers/mfd/twl6040-core.c
+++ b/drivers/mfd/twl6040-core.c
@@ -34,7 +34,7 @@
#include <linux/mfd/core.h>
#include <linux/mfd/twl6040.h>
-static struct platform_device *twl6040_dev;
+#define VIBRACTRL_MEMBER(reg) ((reg == TWL6040_REG_VIBCTLL) ? 0 : 1)
int twl6040_reg_read(struct twl6040 *twl6040, unsigned int reg)
{
@@ -42,10 +42,16 @@ int twl6040_reg_read(struct twl6040 *twl6040, unsigned int reg)
u8 val = 0;
mutex_lock(&twl6040->io_mutex);
- ret = twl_i2c_read_u8(TWL_MODULE_AUDIO_VOICE, &val, reg);
- if (ret < 0) {
- mutex_unlock(&twl6040->io_mutex);
- return ret;
+ /* Vibra control registers from cache */
+ if (unlikely(reg == TWL6040_REG_VIBCTLL ||
+ reg == TWL6040_REG_VIBCTLR)) {
+ val = twl6040->vibra_ctrl_cache[VIBRACTRL_MEMBER(reg)];
+ } else {
+ ret = twl_i2c_read_u8(TWL_MODULE_AUDIO_VOICE, &val, reg);
+ if (ret < 0) {
+ mutex_unlock(&twl6040->io_mutex);
+ return ret;
+ }
}
mutex_unlock(&twl6040->io_mutex);
@@ -59,6 +65,9 @@ int twl6040_reg_write(struct twl6040 *twl6040, unsigned int reg, u8 val)
mutex_lock(&twl6040->io_mutex);
ret = twl_i2c_write_u8(TWL_MODULE_AUDIO_VOICE, val, reg);
+ /* Cache the vibra control registers */
+ if (reg == TWL6040_REG_VIBCTLL || reg == TWL6040_REG_VIBCTLR)
+ twl6040->vibra_ctrl_cache[VIBRACTRL_MEMBER(reg)] = val;
mutex_unlock(&twl6040->io_mutex);
return ret;
@@ -203,11 +212,11 @@ static irqreturn_t twl6040_naudint_handler(int irq, void *data)
if (intid & TWL6040_THINT) {
status = twl6040_reg_read(twl6040, TWL6040_REG_STATUS);
if (status & TWL6040_TSHUTDET) {
- dev_warn(&twl6040_dev->dev,
+ dev_warn(twl6040->dev,
"Thermal shutdown, powering-off");
twl6040_power(twl6040, 0);
} else {
- dev_warn(&twl6040_dev->dev,
+ dev_warn(twl6040->dev,
"Leaving thermal shutdown, powering-on");
twl6040_power(twl6040, 1);
}
@@ -227,7 +236,7 @@ static int twl6040_power_up_completion(struct twl6040 *twl6040,
if (!time_left) {
intid = twl6040_reg_read(twl6040, TWL6040_REG_INTID);
if (!(intid & TWL6040_READYINT)) {
- dev_err(&twl6040_dev->dev,
+ dev_err(twl6040->dev,
"timeout waiting for READYINT\n");
return -ETIMEDOUT;
}
@@ -255,7 +264,7 @@ int twl6040_power(struct twl6040 *twl6040, int on)
/* wait for power-up completion */
ret = twl6040_power_up_completion(twl6040, naudint);
if (ret) {
- dev_err(&twl6040_dev->dev,
+ dev_err(twl6040->dev,
"automatic power-down failed\n");
twl6040->power_count = 0;
goto out;
@@ -264,7 +273,7 @@ int twl6040_power(struct twl6040 *twl6040, int on)
/* use manual power-up sequence */
ret = twl6040_power_up(twl6040);
if (ret) {
- dev_err(&twl6040_dev->dev,
+ dev_err(twl6040->dev,
"manual power-up failed\n");
twl6040->power_count = 0;
goto out;
@@ -276,7 +285,7 @@ int twl6040_power(struct twl6040 *twl6040, int on)
} else {
/* already powered-down */
if (!twl6040->power_count) {
- dev_err(&twl6040_dev->dev,
+ dev_err(twl6040->dev,
"device is already powered-off\n");
ret = -EPERM;
goto out;
@@ -326,7 +335,7 @@ int twl6040_set_pll(struct twl6040 *twl6040, int pll_id,
lppllctl &= ~TWL6040_LPLLFIN;
break;
default:
- dev_err(&twl6040_dev->dev,
+ dev_err(twl6040->dev,
"freq_out %d not supported\n", freq_out);
ret = -EINVAL;
goto pll_out;
@@ -347,7 +356,7 @@ int twl6040_set_pll(struct twl6040 *twl6040, int pll_id,
hppllctl);
break;
default:
- dev_err(&twl6040_dev->dev,
+ dev_err(twl6040->dev,
"freq_in %d not supported\n", freq_in);
ret = -EINVAL;
goto pll_out;
@@ -356,7 +365,7 @@ int twl6040_set_pll(struct twl6040 *twl6040, int pll_id,
case TWL6040_SYSCLK_SEL_HPPLL:
/* high-performance PLL can provide only 19.2 MHz */
if (freq_out != 19200000) {
- dev_err(&twl6040_dev->dev,
+ dev_err(twl6040->dev,
"freq_out %d not supported\n", freq_out);
ret = -EINVAL;
goto pll_out;
@@ -389,7 +398,7 @@ int twl6040_set_pll(struct twl6040 *twl6040, int pll_id,
TWL6040_HPLLENA;
break;
default:
- dev_err(&twl6040_dev->dev,
+ dev_err(twl6040->dev,
"freq_in %d not supported\n", freq_in);
ret = -EINVAL;
goto pll_out;
@@ -406,7 +415,7 @@ int twl6040_set_pll(struct twl6040 *twl6040, int pll_id,
twl6040_reg_write(twl6040, TWL6040_REG_LPPLLCTL, lppllctl);
break;
default:
- dev_err(&twl6040_dev->dev, "unknown pll id %d\n", pll_id);
+ dev_err(twl6040->dev, "unknown pll id %d\n", pll_id);
ret = -EINVAL;
goto pll_out;
}
@@ -435,6 +444,18 @@ unsigned int twl6040_get_sysclk(struct twl6040 *twl6040)
}
EXPORT_SYMBOL(twl6040_get_sysclk);
+/* Get the combined status of the vibra control register */
+int twl6040_get_vibralr_status(struct twl6040 *twl6040)
+{
+ u8 status;
+
+ status = twl6040->vibra_ctrl_cache[0] | twl6040->vibra_ctrl_cache[1];
+ status &= (TWL6040_VIBENA | TWL6040_VIBSEL);
+
+ return status;
+}
+EXPORT_SYMBOL(twl6040_get_vibralr_status);
+
static struct resource twl6040_vibra_rsrc[] = {
{
.flags = IORESOURCE_IRQ,
@@ -471,9 +492,7 @@ static int __devinit twl6040_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, twl6040);
- twl6040_dev = pdev;
twl6040->dev = &pdev->dev;
- twl6040->audpwron = pdata->audpwron_gpio;
twl6040->irq = pdata->naudint_irq;
twl6040->irq_base = pdata->irq_base;
@@ -483,6 +502,12 @@ static int __devinit twl6040_probe(struct platform_device *pdev)
twl6040->rev = twl6040_reg_read(twl6040, TWL6040_REG_ASICREV);
+ /* ERRATA: Automatic power-up is not possible in ES1.0 */
+ if (twl6040_get_revid(twl6040) > TWL6040_REV_ES1_0)
+ twl6040->audpwron = pdata->audpwron_gpio;
+ else
+ twl6040->audpwron = -EINVAL;
+
if (gpio_is_valid(twl6040->audpwron)) {
ret = gpio_request(twl6040->audpwron, "audpwron");
if (ret)
@@ -493,10 +518,6 @@ static int __devinit twl6040_probe(struct platform_device *pdev)
goto gpio2_err;
}
- /* ERRATA: Automatic power-up is not possible in ES1.0 */
- if (twl6040->rev == TWL6040_REV_ES1_0)
- twl6040->audpwron = -EINVAL;
-
/* codec interrupt */
ret = twl6040_irq_init(twl6040);
if (ret)
@@ -566,7 +587,6 @@ gpio2_err:
gpio1_err:
platform_set_drvdata(pdev, NULL);
kfree(twl6040);
- twl6040_dev = NULL;
return ret;
}
@@ -586,7 +606,6 @@ static int __devexit twl6040_remove(struct platform_device *pdev)
mfd_remove_devices(&pdev->dev);
platform_set_drvdata(pdev, NULL);
kfree(twl6040);
- twl6040_dev = NULL;
return 0;
}
diff --git a/drivers/mfd/wl1273-core.c b/drivers/mfd/wl1273-core.c
index d97a8694517..f39b756df56 100644
--- a/drivers/mfd/wl1273-core.c
+++ b/drivers/mfd/wl1273-core.c
@@ -22,6 +22,7 @@
#include <linux/mfd/wl1273-core.h>
#include <linux/slab.h>
+#include <linux/module.h>
#define DRIVER_DESC "WL1273 FM Radio Core"
diff --git a/drivers/mfd/wm831x-irq.c b/drivers/mfd/wm831x-irq.c
index ada1835a545..f4747a4a9a9 100644
--- a/drivers/mfd/wm831x-irq.c
+++ b/drivers/mfd/wm831x-irq.c
@@ -420,12 +420,19 @@ static int wm831x_irq_set_type(struct irq_data *data, unsigned int type)
switch (type) {
case IRQ_TYPE_EDGE_BOTH:
wm831x->gpio_update[irq] = 0x10000 | WM831X_GPN_INT_MODE;
+ wm831x->gpio_level[irq] = false;
break;
case IRQ_TYPE_EDGE_RISING:
wm831x->gpio_update[irq] = 0x10000 | WM831X_GPN_POL;
+ wm831x->gpio_level[irq] = false;
break;
case IRQ_TYPE_EDGE_FALLING:
wm831x->gpio_update[irq] = 0x10000;
+ wm831x->gpio_level[irq] = false;
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ wm831x->gpio_update[irq] = 0x10000 | WM831X_GPN_POL;
+ wm831x->gpio_level[irq] = true;
break;
default:
return -EINVAL;
@@ -449,7 +456,7 @@ static irqreturn_t wm831x_irq_thread(int irq, void *data)
{
struct wm831x *wm831x = data;
unsigned int i;
- int primary, status_addr;
+ int primary, status_addr, ret;
int status_regs[WM831X_NUM_IRQ_REGS] = { 0 };
int read[WM831X_NUM_IRQ_REGS] = { 0 };
int *status;
@@ -507,6 +514,19 @@ static irqreturn_t wm831x_irq_thread(int irq, void *data)
if (*status & wm831x_irqs[i].mask)
handle_nested_irq(wm831x->irq_base + i);
+
+ /* Simulate an edge triggered IRQ by polling the input
+ * status. This is sucky but improves interoperability.
+ */
+ if (primary == WM831X_GP_INT &&
+ wm831x->gpio_level[i - WM831X_IRQ_GPIO_1]) {
+ ret = wm831x_reg_read(wm831x, WM831X_GPIO_LEVEL);
+ while (ret & 1 << (i - WM831X_IRQ_GPIO_1)) {
+ handle_nested_irq(wm831x->irq_base + i);
+ ret = wm831x_reg_read(wm831x,
+ WM831X_GPIO_LEVEL);
+ }
+ }
}
out:
@@ -596,8 +616,6 @@ int wm831x_irq_init(struct wm831x *wm831x, int irq)
"No interrupt specified - functionality limited\n");
}
-
-
/* Enable top level interrupts, we mask at secondary level */
wm831x_reg_write(wm831x, WM831X_SYSTEM_INTERRUPTS_MASK, 0);
diff --git a/drivers/mfd/wm8400-core.c b/drivers/mfd/wm8400-core.c
index e06ba9440cd..62b4626f456 100644
--- a/drivers/mfd/wm8400-core.c
+++ b/drivers/mfd/wm8400-core.c
@@ -12,6 +12,7 @@
*
*/
+#include <linux/module.h>
#include <linux/bug.h>
#include <linux/err.h>
#include <linux/i2c.h>
diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c
index bfde4e8ec63..5d6ba132837 100644
--- a/drivers/mfd/wm8994-core.c
+++ b/drivers/mfd/wm8994-core.c
@@ -167,6 +167,18 @@ static struct mfd_cell wm8994_devs[] = {
* and should be handled via the standard regulator API supply
* management.
*/
+static const char *wm1811_main_supplies[] = {
+ "DBVDD1",
+ "DBVDD2",
+ "DBVDD3",
+ "DCVDD",
+ "AVDD1",
+ "AVDD2",
+ "CPVDD",
+ "SPKVDD1",
+ "SPKVDD2",
+};
+
static const char *wm8994_main_supplies[] = {
"DBVDD",
"DCVDD",
@@ -205,6 +217,47 @@ static int wm8994_suspend(struct device *dev)
return 0;
}
+ ret = wm8994_reg_read(wm8994, WM8994_POWER_MANAGEMENT_4);
+ if (ret < 0) {
+ dev_err(dev, "Failed to read power status: %d\n", ret);
+ } else if (ret & (WM8994_AIF2ADCL_ENA | WM8994_AIF2ADCR_ENA |
+ WM8994_AIF1ADC2L_ENA | WM8994_AIF1ADC2R_ENA |
+ WM8994_AIF1ADC1L_ENA | WM8994_AIF1ADC1R_ENA)) {
+ dev_dbg(dev, "CODEC still active, ignoring suspend\n");
+ return 0;
+ }
+
+ ret = wm8994_reg_read(wm8994, WM8994_POWER_MANAGEMENT_5);
+ if (ret < 0) {
+ dev_err(dev, "Failed to read power status: %d\n", ret);
+ } else if (ret & (WM8994_AIF2DACL_ENA | WM8994_AIF2DACR_ENA |
+ WM8994_AIF1DAC2L_ENA | WM8994_AIF1DAC2R_ENA |
+ WM8994_AIF1DAC1L_ENA | WM8994_AIF1DAC1R_ENA)) {
+ dev_dbg(dev, "CODEC still active, ignoring suspend\n");
+ return 0;
+ }
+
+ switch (wm8994->type) {
+ case WM8958:
+ ret = wm8994_reg_read(wm8994, WM8958_MIC_DETECT_1);
+ if (ret < 0) {
+ dev_err(dev, "Failed to read power status: %d\n", ret);
+ } else if (ret & WM8958_MICD_ENA) {
+ dev_dbg(dev, "CODEC still active, ignoring suspend\n");
+ return 0;
+ }
+ break;
+ default:
+ break;
+ }
+
+ /* Disable LDO pulldowns while the device is suspended if we
+ * don't know that something will be driving them. */
+ if (!wm8994->ldo_ena_always_driven)
+ wm8994_set_bits(wm8994, WM8994_PULL_CONTROL_2,
+ WM8994_LDO1ENA_PD | WM8994_LDO2ENA_PD,
+ WM8994_LDO1ENA_PD | WM8994_LDO2ENA_PD);
+
/* GPIO configuration state is saved here since we may be configuring
* the GPIO alternate functions even if we're not using the gpiolib
* driver for them.
@@ -274,6 +327,11 @@ static int wm8994_resume(struct device *dev)
if (ret < 0)
dev_err(dev, "Failed to restore GPIO registers: %d\n", ret);
+ /* Disable LDO pulldowns while the device is active */
+ wm8994_set_bits(wm8994, WM8994_PULL_CONTROL_2,
+ WM8994_LDO1ENA_PD | WM8994_LDO2ENA_PD,
+ 0);
+
wm8994->suspended = false;
return 0;
@@ -329,6 +387,9 @@ static int wm8994_device_init(struct wm8994 *wm8994, int irq)
}
switch (wm8994->type) {
+ case WM1811:
+ wm8994->num_supplies = ARRAY_SIZE(wm1811_main_supplies);
+ break;
case WM8994:
wm8994->num_supplies = ARRAY_SIZE(wm8994_main_supplies);
break;
@@ -349,6 +410,10 @@ static int wm8994_device_init(struct wm8994 *wm8994, int irq)
}
switch (wm8994->type) {
+ case WM1811:
+ for (i = 0; i < ARRAY_SIZE(wm1811_main_supplies); i++)
+ wm8994->supplies[i].supply = wm1811_main_supplies[i];
+ break;
case WM8994:
for (i = 0; i < ARRAY_SIZE(wm8994_main_supplies); i++)
wm8994->supplies[i].supply = wm8994_main_supplies[i];
@@ -382,6 +447,13 @@ static int wm8994_device_init(struct wm8994 *wm8994, int irq)
goto err_enable;
}
switch (ret) {
+ case 0x1811:
+ devname = "WM1811";
+ if (wm8994->type != WM1811)
+ dev_warn(wm8994->dev, "Device registered as type %d\n",
+ wm8994->type);
+ wm8994->type = WM1811;
+ break;
case 0x8994:
devname = "WM8994";
if (wm8994->type != WM8994)
@@ -441,8 +513,15 @@ static int wm8994_device_init(struct wm8994 *wm8994, int irq)
pdata->gpio_defaults[i]);
}
}
+
+ wm8994->ldo_ena_always_driven = pdata->ldo_ena_always_driven;
}
+ /* Disable LDO pulldowns while the device is active */
+ wm8994_set_bits(wm8994, WM8994_PULL_CONTROL_2,
+ WM8994_LDO1ENA_PD | WM8994_LDO2ENA_PD,
+ 0);
+
/* In some system designs where the regulators are not in use,
* we can achieve a small reduction in leakage currents by
* floating LDO outputs. This bit makes no difference if the
@@ -539,6 +618,7 @@ static int wm8994_i2c_remove(struct i2c_client *i2c)
}
static const struct i2c_device_id wm8994_i2c_id[] = {
+ { "wm1811", WM1811 },
{ "wm8994", WM8994 },
{ "wm8958", WM8958 },
{ }
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 2d6423c2d19..d593878d66d 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -63,7 +63,7 @@ config AD525X_DPOT_SPI
config ATMEL_PWM
tristate "Atmel AT32/AT91 PWM support"
- depends on AVR32 || ARCH_AT91SAM9263 || ARCH_AT91SAM9RL || ARCH_AT91CAP9
+ depends on HAVE_CLK
help
This option enables device driver support for the PWM channels
on certain Atmel processors. Pulse Width Modulation is used for
@@ -506,5 +506,6 @@ source "drivers/misc/iwmc3200top/Kconfig"
source "drivers/misc/ti-st/Kconfig"
source "drivers/misc/lis3lv02d/Kconfig"
source "drivers/misc/carma/Kconfig"
+source "drivers/misc/altera-stapl/Kconfig"
endif # MISC_DEVICES
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 8f3efb68a14..b26495a0255 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -47,3 +47,4 @@ obj-$(CONFIG_AB8500_PWM) += ab8500-pwm.o
obj-y += lis3lv02d/
obj-y += carma/
obj-$(CONFIG_USB_SWITCH_FSA9480) += fsa9480.o
+obj-$(CONFIG_ALTERA_STAPL) +=altera-stapl/
diff --git a/drivers/misc/ab8500-pwm.c b/drivers/misc/ab8500-pwm.c
index 35903154ca2..2208a9d5262 100644
--- a/drivers/misc/ab8500-pwm.c
+++ b/drivers/misc/ab8500-pwm.c
@@ -10,6 +10,7 @@
#include <linux/pwm.h>
#include <linux/mfd/ab8500.h>
#include <linux/mfd/abx500.h>
+#include <linux/module.h>
/*
* PWM Out generators
diff --git a/drivers/misc/ad525x_dpot-i2c.c b/drivers/misc/ad525x_dpot-i2c.c
index 4ff73c21574..a39e0555df6 100644
--- a/drivers/misc/ad525x_dpot-i2c.c
+++ b/drivers/misc/ad525x_dpot-i2c.c
@@ -98,6 +98,7 @@ static const struct i2c_device_id ad_dpot_id[] = {
{"ad5282", AD5282_ID},
{"adn2860", ADN2860_ID},
{"ad5273", AD5273_ID},
+ {"ad5161", AD5161_ID},
{"ad5171", AD5171_ID},
{"ad5170", AD5170_ID},
{"ad5172", AD5172_ID},
diff --git a/drivers/staging/altera-stapl/Kconfig b/drivers/misc/altera-stapl/Kconfig
index b6537321ed7..7f01d8e9399 100644
--- a/drivers/staging/altera-stapl/Kconfig
+++ b/drivers/misc/altera-stapl/Kconfig
@@ -1,3 +1,5 @@
+comment "Altera FPGA firmware download module"
+
config ALTERA_STAPL
tristate "Altera FPGA firmware download module"
depends on I2C
diff --git a/drivers/misc/altera-stapl/Makefile b/drivers/misc/altera-stapl/Makefile
new file mode 100644
index 00000000000..055f61ee781
--- /dev/null
+++ b/drivers/misc/altera-stapl/Makefile
@@ -0,0 +1,3 @@
+altera-stapl-objs = altera-lpt.o altera-jtag.o altera-comp.o altera.o
+
+obj-$(CONFIG_ALTERA_STAPL) += altera-stapl.o
diff --git a/drivers/staging/altera-stapl/altera-comp.c b/drivers/misc/altera-stapl/altera-comp.c
index 49b103bedaa..49b103bedaa 100644
--- a/drivers/staging/altera-stapl/altera-comp.c
+++ b/drivers/misc/altera-stapl/altera-comp.c
diff --git a/drivers/staging/altera-stapl/altera-exprt.h b/drivers/misc/altera-stapl/altera-exprt.h
index 39c38d84a67..39c38d84a67 100644
--- a/drivers/staging/altera-stapl/altera-exprt.h
+++ b/drivers/misc/altera-stapl/altera-exprt.h
diff --git a/drivers/staging/altera-stapl/altera-jtag.c b/drivers/misc/altera-stapl/altera-jtag.c
index 8b1620b1b2d..f4bf2009697 100644
--- a/drivers/staging/altera-stapl/altera-jtag.c
+++ b/drivers/misc/altera-stapl/altera-jtag.c
@@ -26,7 +26,7 @@
#include <linux/delay.h>
#include <linux/firmware.h>
#include <linux/slab.h>
-#include "altera.h"
+#include <misc/altera.h>
#include "altera-exprt.h"
#include "altera-jtag.h"
diff --git a/drivers/staging/altera-stapl/altera-jtag.h b/drivers/misc/altera-stapl/altera-jtag.h
index 2f97e36a2fb..2f97e36a2fb 100644
--- a/drivers/staging/altera-stapl/altera-jtag.h
+++ b/drivers/misc/altera-stapl/altera-jtag.h
diff --git a/drivers/staging/altera-stapl/altera-lpt.c b/drivers/misc/altera-stapl/altera-lpt.c
index 91456a03612..91456a03612 100644
--- a/drivers/staging/altera-stapl/altera-lpt.c
+++ b/drivers/misc/altera-stapl/altera-lpt.c
diff --git a/drivers/staging/altera-stapl/altera.c b/drivers/misc/altera-stapl/altera.c
index c2eff6a82db..24272e022be 100644
--- a/drivers/staging/altera-stapl/altera.c
+++ b/drivers/misc/altera-stapl/altera.c
@@ -29,7 +29,7 @@
#include <linux/firmware.h>
#include <linux/slab.h>
#include <linux/module.h>
-#include "altera.h"
+#include <misc/altera.h>
#include "altera-exprt.h"
#include "altera-jtag.h"
diff --git a/drivers/misc/atmel-ssc.c b/drivers/misc/atmel-ssc.c
index 769a4e8e10d..5bb18778107 100644
--- a/drivers/misc/atmel-ssc.c
+++ b/drivers/misc/atmel-ssc.c
@@ -16,6 +16,7 @@
#include <linux/spinlock.h>
#include <linux/atmel-ssc.h>
#include <linux/slab.h>
+#include <linux/module.h>
/* Serialize access to ssc_list and user count */
static DEFINE_SPINLOCK(user_lock);
diff --git a/drivers/misc/atmel_tclib.c b/drivers/misc/atmel_tclib.c
index a844810b50f..4bcfc375973 100644
--- a/drivers/misc/atmel_tclib.c
+++ b/drivers/misc/atmel_tclib.c
@@ -7,6 +7,7 @@
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/export.h>
/* Number of bytes to reserve for the iomem resource */
#define ATMEL_TC_IOMEM_SIZE 256
diff --git a/drivers/misc/bh1780gli.c b/drivers/misc/bh1780gli.c
index 82fe2d06782..bfeea9ba702 100644
--- a/drivers/misc/bh1780gli.c
+++ b/drivers/misc/bh1780gli.c
@@ -22,6 +22,7 @@
#include <linux/mutex.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
+#include <linux/module.h>
#define BH1780_REG_CONTROL 0x80
#define BH1780_REG_PARTID 0x8A
diff --git a/drivers/misc/fsa9480.c b/drivers/misc/fsa9480.c
index 27dc0d21aaf..f6586d53e1a 100644
--- a/drivers/misc/fsa9480.c
+++ b/drivers/misc/fsa9480.c
@@ -400,7 +400,8 @@ static int fsa9480_irq_init(struct fsa9480_usbsw *usbsw)
return ret;
}
- device_init_wakeup(&client->dev, pdata->wakeup);
+ if (pdata)
+ device_init_wakeup(&client->dev, pdata->wakeup);
}
return 0;
diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
index 8cebec5e85e..3f7ad83ed74 100644
--- a/drivers/misc/kgdbts.c
+++ b/drivers/misc/kgdbts.c
@@ -102,6 +102,7 @@
#include <linux/nmi.h>
#include <linux/delay.h>
#include <linux/kthread.h>
+#include <linux/module.h>
#define v1printk(a...) do { \
if (verbose) \
diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
index 8b51cd62d06..29d12a70eb1 100644
--- a/drivers/misc/lis3lv02d/lis3lv02d.c
+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
@@ -163,7 +163,7 @@ static void lis3lv02d_get_xyz(struct lis3lv02d *lis3, int *x, int *y, int *z)
int i;
if (lis3->blkread) {
- if (lis3_dev.whoami == WAI_12B) {
+ if (lis3->whoami == WAI_12B) {
u16 data[3];
lis3->blkread(lis3, OUTX_L, 6, (u8 *)data);
for (i = 0; i < 3; i++)
@@ -195,18 +195,30 @@ static int lis3_8_rates[2] = {100, 400};
static int lis3_3dc_rates[16] = {0, 1, 10, 25, 50, 100, 200, 400, 1600, 5000};
/* ODR is Output Data Rate */
-static int lis3lv02d_get_odr(void)
+static int lis3lv02d_get_odr(struct lis3lv02d *lis3)
{
u8 ctrl;
int shift;
- lis3_dev.read(&lis3_dev, CTRL_REG1, &ctrl);
- ctrl &= lis3_dev.odr_mask;
- shift = ffs(lis3_dev.odr_mask) - 1;
- return lis3_dev.odrs[(ctrl >> shift)];
+ lis3->read(lis3, CTRL_REG1, &ctrl);
+ ctrl &= lis3->odr_mask;
+ shift = ffs(lis3->odr_mask) - 1;
+ return lis3->odrs[(ctrl >> shift)];
}
-static int lis3lv02d_set_odr(int rate)
+static int lis3lv02d_get_pwron_wait(struct lis3lv02d *lis3)
+{
+ int div = lis3lv02d_get_odr(lis3);
+
+ if (WARN_ONCE(div == 0, "device returned spurious data"))
+ return -ENXIO;
+
+ /* LIS3 power on delay is quite long */
+ msleep(lis3->pwron_delay / div);
+ return 0;
+}
+
+static int lis3lv02d_set_odr(struct lis3lv02d *lis3, int rate)
{
u8 ctrl;
int i, len, shift;
@@ -214,14 +226,14 @@ static int lis3lv02d_set_odr(int rate)
if (!rate)
return -EINVAL;
- lis3_dev.read(&lis3_dev, CTRL_REG1, &ctrl);
- ctrl &= ~lis3_dev.odr_mask;
- len = 1 << hweight_long(lis3_dev.odr_mask); /* # of possible values */
- shift = ffs(lis3_dev.odr_mask) - 1;
+ lis3->read(lis3, CTRL_REG1, &ctrl);
+ ctrl &= ~lis3->odr_mask;
+ len = 1 << hweight_long(lis3->odr_mask); /* # of possible values */
+ shift = ffs(lis3->odr_mask) - 1;
for (i = 0; i < len; i++)
- if (lis3_dev.odrs[i] == rate) {
- lis3_dev.write(&lis3_dev, CTRL_REG1,
+ if (lis3->odrs[i] == rate) {
+ lis3->write(lis3, CTRL_REG1,
ctrl | (i << shift));
return 0;
}
@@ -240,12 +252,12 @@ static int lis3lv02d_selftest(struct lis3lv02d *lis3, s16 results[3])
mutex_lock(&lis3->mutex);
irq_cfg = lis3->irq_cfg;
- if (lis3_dev.whoami == WAI_8B) {
+ if (lis3->whoami == WAI_8B) {
lis3->data_ready_count[IRQ_LINE0] = 0;
lis3->data_ready_count[IRQ_LINE1] = 0;
/* Change interrupt cfg to data ready for selftest */
- atomic_inc(&lis3_dev.wake_thread);
+ atomic_inc(&lis3->wake_thread);
lis3->irq_cfg = LIS3_IRQ1_DATA_READY | LIS3_IRQ2_DATA_READY;
lis3->read(lis3, CTRL_REG3, &ctrl_reg_data);
lis3->write(lis3, CTRL_REG3, (ctrl_reg_data &
@@ -253,12 +265,12 @@ static int lis3lv02d_selftest(struct lis3lv02d *lis3, s16 results[3])
(LIS3_IRQ1_DATA_READY | LIS3_IRQ2_DATA_READY));
}
- if (lis3_dev.whoami == WAI_3DC) {
+ if (lis3->whoami == WAI_3DC) {
ctlreg = CTRL_REG4;
selftest = CTRL4_ST0;
} else {
ctlreg = CTRL_REG1;
- if (lis3_dev.whoami == WAI_12B)
+ if (lis3->whoami == WAI_12B)
selftest = CTRL1_ST;
else
selftest = CTRL1_STP;
@@ -266,7 +278,9 @@ static int lis3lv02d_selftest(struct lis3lv02d *lis3, s16 results[3])
lis3->read(lis3, ctlreg, &reg);
lis3->write(lis3, ctlreg, (reg | selftest));
- msleep(lis3->pwron_delay / lis3lv02d_get_odr());
+ ret = lis3lv02d_get_pwron_wait(lis3);
+ if (ret)
+ goto fail;
/* Read directly to avoid axis remap */
x = lis3->read_data(lis3, OUTX);
@@ -275,7 +289,9 @@ static int lis3lv02d_selftest(struct lis3lv02d *lis3, s16 results[3])
/* back to normal settings */
lis3->write(lis3, ctlreg, reg);
- msleep(lis3->pwron_delay / lis3lv02d_get_odr());
+ ret = lis3lv02d_get_pwron_wait(lis3);
+ if (ret)
+ goto fail;
results[0] = x - lis3->read_data(lis3, OUTX);
results[1] = y - lis3->read_data(lis3, OUTY);
@@ -283,9 +299,9 @@ static int lis3lv02d_selftest(struct lis3lv02d *lis3, s16 results[3])
ret = 0;
- if (lis3_dev.whoami == WAI_8B) {
+ if (lis3->whoami == WAI_8B) {
/* Restore original interrupt configuration */
- atomic_dec(&lis3_dev.wake_thread);
+ atomic_dec(&lis3->wake_thread);
lis3->write(lis3, CTRL_REG3, ctrl_reg_data);
lis3->irq_cfg = irq_cfg;
@@ -363,8 +379,9 @@ void lis3lv02d_poweroff(struct lis3lv02d *lis3)
}
EXPORT_SYMBOL_GPL(lis3lv02d_poweroff);
-void lis3lv02d_poweron(struct lis3lv02d *lis3)
+int lis3lv02d_poweron(struct lis3lv02d *lis3)
{
+ int err;
u8 reg;
lis3->init(lis3);
@@ -384,35 +401,41 @@ void lis3lv02d_poweron(struct lis3lv02d *lis3)
lis3->write(lis3, CTRL_REG2, reg);
}
- /* LIS3 power on delay is quite long */
- msleep(lis3->pwron_delay / lis3lv02d_get_odr());
+ err = lis3lv02d_get_pwron_wait(lis3);
+ if (err)
+ return err;
if (lis3->reg_ctrl)
lis3_context_restore(lis3);
+
+ return 0;
}
EXPORT_SYMBOL_GPL(lis3lv02d_poweron);
static void lis3lv02d_joystick_poll(struct input_polled_dev *pidev)
{
+ struct lis3lv02d *lis3 = pidev->private;
int x, y, z;
- mutex_lock(&lis3_dev.mutex);
- lis3lv02d_get_xyz(&lis3_dev, &x, &y, &z);
+ mutex_lock(&lis3->mutex);
+ lis3lv02d_get_xyz(lis3, &x, &y, &z);
input_report_abs(pidev->input, ABS_X, x);
input_report_abs(pidev->input, ABS_Y, y);
input_report_abs(pidev->input, ABS_Z, z);
input_sync(pidev->input);
- mutex_unlock(&lis3_dev.mutex);
+ mutex_unlock(&lis3->mutex);
}
static void lis3lv02d_joystick_open(struct input_polled_dev *pidev)
{
- if (lis3_dev.pm_dev)
- pm_runtime_get_sync(lis3_dev.pm_dev);
+ struct lis3lv02d *lis3 = pidev->private;
- if (lis3_dev.pdata && lis3_dev.whoami == WAI_8B && lis3_dev.idev)
- atomic_set(&lis3_dev.wake_thread, 1);
+ if (lis3->pm_dev)
+ pm_runtime_get_sync(lis3->pm_dev);
+
+ if (lis3->pdata && lis3->whoami == WAI_8B && lis3->idev)
+ atomic_set(&lis3->wake_thread, 1);
/*
* Update coordinates for the case where poll interval is 0 and
* the chip in running purely under interrupt control
@@ -422,14 +445,18 @@ static void lis3lv02d_joystick_open(struct input_polled_dev *pidev)
static void lis3lv02d_joystick_close(struct input_polled_dev *pidev)
{
- atomic_set(&lis3_dev.wake_thread, 0);
- if (lis3_dev.pm_dev)
- pm_runtime_put(lis3_dev.pm_dev);
+ struct lis3lv02d *lis3 = pidev->private;
+
+ atomic_set(&lis3->wake_thread, 0);
+ if (lis3->pm_dev)
+ pm_runtime_put(lis3->pm_dev);
}
-static irqreturn_t lis302dl_interrupt(int irq, void *dummy)
+static irqreturn_t lis302dl_interrupt(int irq, void *data)
{
- if (!test_bit(0, &lis3_dev.misc_opened))
+ struct lis3lv02d *lis3 = data;
+
+ if (!test_bit(0, &lis3->misc_opened))
goto out;
/*
@@ -437,12 +464,12 @@ static irqreturn_t lis302dl_interrupt(int irq, void *dummy)
* the lid is closed. This leads to interrupts as soon as a little move
* is done.
*/
- atomic_inc(&lis3_dev.count);
+ atomic_inc(&lis3->count);
- wake_up_interruptible(&lis3_dev.misc_wait);
- kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
+ wake_up_interruptible(&lis3->misc_wait);
+ kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
out:
- if (atomic_read(&lis3_dev.wake_thread))
+ if (atomic_read(&lis3->wake_thread))
return IRQ_WAKE_THREAD;
return IRQ_HANDLED;
}
@@ -514,28 +541,37 @@ static irqreturn_t lis302dl_interrupt_thread2_8b(int irq, void *data)
static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
{
- if (test_and_set_bit(0, &lis3_dev.misc_opened))
+ struct lis3lv02d *lis3 = container_of(file->private_data,
+ struct lis3lv02d, miscdev);
+
+ if (test_and_set_bit(0, &lis3->misc_opened))
return -EBUSY; /* already open */
- if (lis3_dev.pm_dev)
- pm_runtime_get_sync(lis3_dev.pm_dev);
+ if (lis3->pm_dev)
+ pm_runtime_get_sync(lis3->pm_dev);
- atomic_set(&lis3_dev.count, 0);
+ atomic_set(&lis3->count, 0);
return 0;
}
static int lis3lv02d_misc_release(struct inode *inode, struct file *file)
{
- fasync_helper(-1, file, 0, &lis3_dev.async_queue);
- clear_bit(0, &lis3_dev.misc_opened); /* release the device */
- if (lis3_dev.pm_dev)
- pm_runtime_put(lis3_dev.pm_dev);
+ struct lis3lv02d *lis3 = container_of(file->private_data,
+ struct lis3lv02d, miscdev);
+
+ fasync_helper(-1, file, 0, &lis3->async_queue);
+ clear_bit(0, &lis3->misc_opened); /* release the device */
+ if (lis3->pm_dev)
+ pm_runtime_put(lis3->pm_dev);
return 0;
}
static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
size_t count, loff_t *pos)
{
+ struct lis3lv02d *lis3 = container_of(file->private_data,
+ struct lis3lv02d, miscdev);
+
DECLARE_WAITQUEUE(wait, current);
u32 data;
unsigned char byte_data;
@@ -544,10 +580,10 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
if (count < 1)
return -EINVAL;
- add_wait_queue(&lis3_dev.misc_wait, &wait);
+ add_wait_queue(&lis3->misc_wait, &wait);
while (true) {
set_current_state(TASK_INTERRUPTIBLE);
- data = atomic_xchg(&lis3_dev.count, 0);
+ data = atomic_xchg(&lis3->count, 0);
if (data)
break;
@@ -577,22 +613,28 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
out:
__set_current_state(TASK_RUNNING);
- remove_wait_queue(&lis3_dev.misc_wait, &wait);
+ remove_wait_queue(&lis3->misc_wait, &wait);
return retval;
}
static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
{
- poll_wait(file, &lis3_dev.misc_wait, wait);
- if (atomic_read(&lis3_dev.count))
+ struct lis3lv02d *lis3 = container_of(file->private_data,
+ struct lis3lv02d, miscdev);
+
+ poll_wait(file, &lis3->misc_wait, wait);
+ if (atomic_read(&lis3->count))
return POLLIN | POLLRDNORM;
return 0;
}
static int lis3lv02d_misc_fasync(int fd, struct file *file, int on)
{
- return fasync_helper(fd, file, on, &lis3_dev.async_queue);
+ struct lis3lv02d *lis3 = container_of(file->private_data,
+ struct lis3lv02d, miscdev);
+
+ return fasync_helper(fd, file, on, &lis3->async_queue);
}
static const struct file_operations lis3lv02d_misc_fops = {
@@ -605,85 +647,80 @@ static const struct file_operations lis3lv02d_misc_fops = {
.fasync = lis3lv02d_misc_fasync,
};
-static struct miscdevice lis3lv02d_misc_device = {
- .minor = MISC_DYNAMIC_MINOR,
- .name = "freefall",
- .fops = &lis3lv02d_misc_fops,
-};
-
-int lis3lv02d_joystick_enable(void)
+int lis3lv02d_joystick_enable(struct lis3lv02d *lis3)
{
struct input_dev *input_dev;
int err;
int max_val, fuzz, flat;
int btns[] = {BTN_X, BTN_Y, BTN_Z};
- if (lis3_dev.idev)
+ if (lis3->idev)
return -EINVAL;
- lis3_dev.idev = input_allocate_polled_device();
- if (!lis3_dev.idev)
+ lis3->idev = input_allocate_polled_device();
+ if (!lis3->idev)
return -ENOMEM;
- lis3_dev.idev->poll = lis3lv02d_joystick_poll;
- lis3_dev.idev->open = lis3lv02d_joystick_open;
- lis3_dev.idev->close = lis3lv02d_joystick_close;
- lis3_dev.idev->poll_interval = MDPS_POLL_INTERVAL;
- lis3_dev.idev->poll_interval_min = MDPS_POLL_MIN;
- lis3_dev.idev->poll_interval_max = MDPS_POLL_MAX;
- input_dev = lis3_dev.idev->input;
+ lis3->idev->poll = lis3lv02d_joystick_poll;
+ lis3->idev->open = lis3lv02d_joystick_open;
+ lis3->idev->close = lis3lv02d_joystick_close;
+ lis3->idev->poll_interval = MDPS_POLL_INTERVAL;
+ lis3->idev->poll_interval_min = MDPS_POLL_MIN;
+ lis3->idev->poll_interval_max = MDPS_POLL_MAX;
+ lis3->idev->private = lis3;
+ input_dev = lis3->idev->input;
input_dev->name = "ST LIS3LV02DL Accelerometer";
input_dev->phys = DRIVER_NAME "/input0";
input_dev->id.bustype = BUS_HOST;
input_dev->id.vendor = 0;
- input_dev->dev.parent = &lis3_dev.pdev->dev;
+ input_dev->dev.parent = &lis3->pdev->dev;
set_bit(EV_ABS, input_dev->evbit);
- max_val = (lis3_dev.mdps_max_val * lis3_dev.scale) / LIS3_ACCURACY;
- if (lis3_dev.whoami == WAI_12B) {
+ max_val = (lis3->mdps_max_val * lis3->scale) / LIS3_ACCURACY;
+ if (lis3->whoami == WAI_12B) {
fuzz = LIS3_DEFAULT_FUZZ_12B;
flat = LIS3_DEFAULT_FLAT_12B;
} else {
fuzz = LIS3_DEFAULT_FUZZ_8B;
flat = LIS3_DEFAULT_FLAT_8B;
}
- fuzz = (fuzz * lis3_dev.scale) / LIS3_ACCURACY;
- flat = (flat * lis3_dev.scale) / LIS3_ACCURACY;
+ fuzz = (fuzz * lis3->scale) / LIS3_ACCURACY;
+ flat = (flat * lis3->scale) / LIS3_ACCURACY;
input_set_abs_params(input_dev, ABS_X, -max_val, max_val, fuzz, flat);
input_set_abs_params(input_dev, ABS_Y, -max_val, max_val, fuzz, flat);
input_set_abs_params(input_dev, ABS_Z, -max_val, max_val, fuzz, flat);
- lis3_dev.mapped_btns[0] = lis3lv02d_get_axis(abs(lis3_dev.ac.x), btns);
- lis3_dev.mapped_btns[1] = lis3lv02d_get_axis(abs(lis3_dev.ac.y), btns);
- lis3_dev.mapped_btns[2] = lis3lv02d_get_axis(abs(lis3_dev.ac.z), btns);
+ lis3->mapped_btns[0] = lis3lv02d_get_axis(abs(lis3->ac.x), btns);
+ lis3->mapped_btns[1] = lis3lv02d_get_axis(abs(lis3->ac.y), btns);
+ lis3->mapped_btns[2] = lis3lv02d_get_axis(abs(lis3->ac.z), btns);
- err = input_register_polled_device(lis3_dev.idev);
+ err = input_register_polled_device(lis3->idev);
if (err) {
- input_free_polled_device(lis3_dev.idev);
- lis3_dev.idev = NULL;
+ input_free_polled_device(lis3->idev);
+ lis3->idev = NULL;
}
return err;
}
EXPORT_SYMBOL_GPL(lis3lv02d_joystick_enable);
-void lis3lv02d_joystick_disable(void)
+void lis3lv02d_joystick_disable(struct lis3lv02d *lis3)
{
- if (lis3_dev.irq)
- free_irq(lis3_dev.irq, &lis3_dev);
- if (lis3_dev.pdata && lis3_dev.pdata->irq2)
- free_irq(lis3_dev.pdata->irq2, &lis3_dev);
+ if (lis3->irq)
+ free_irq(lis3->irq, lis3);
+ if (lis3->pdata && lis3->pdata->irq2)
+ free_irq(lis3->pdata->irq2, lis3);
- if (!lis3_dev.idev)
+ if (!lis3->idev)
return;
- if (lis3_dev.irq)
- misc_deregister(&lis3lv02d_misc_device);
- input_unregister_polled_device(lis3_dev.idev);
- input_free_polled_device(lis3_dev.idev);
- lis3_dev.idev = NULL;
+ if (lis3->irq)
+ misc_deregister(&lis3->miscdev);
+ input_unregister_polled_device(lis3->idev);
+ input_free_polled_device(lis3->idev);
+ lis3->idev = NULL;
}
EXPORT_SYMBOL_GPL(lis3lv02d_joystick_disable);
@@ -708,6 +745,7 @@ static void lis3lv02d_sysfs_poweron(struct lis3lv02d *lis3)
static ssize_t lis3lv02d_selftest_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ struct lis3lv02d *lis3 = dev_get_drvdata(dev);
s16 values[3];
static const char ok[] = "OK";
@@ -715,8 +753,8 @@ static ssize_t lis3lv02d_selftest_show(struct device *dev,
static const char irq[] = "FAIL_IRQ";
const char *res;
- lis3lv02d_sysfs_poweron(&lis3_dev);
- switch (lis3lv02d_selftest(&lis3_dev, values)) {
+ lis3lv02d_sysfs_poweron(lis3);
+ switch (lis3lv02d_selftest(lis3, values)) {
case SELFTEST_FAIL:
res = fail;
break;
@@ -735,33 +773,37 @@ static ssize_t lis3lv02d_selftest_show(struct device *dev,
static ssize_t lis3lv02d_position_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ struct lis3lv02d *lis3 = dev_get_drvdata(dev);
int x, y, z;
- lis3lv02d_sysfs_poweron(&lis3_dev);
- mutex_lock(&lis3_dev.mutex);
- lis3lv02d_get_xyz(&lis3_dev, &x, &y, &z);
- mutex_unlock(&lis3_dev.mutex);
+ lis3lv02d_sysfs_poweron(lis3);
+ mutex_lock(&lis3->mutex);
+ lis3lv02d_get_xyz(lis3, &x, &y, &z);
+ mutex_unlock(&lis3->mutex);
return sprintf(buf, "(%d,%d,%d)\n", x, y, z);
}
static ssize_t lis3lv02d_rate_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- lis3lv02d_sysfs_poweron(&lis3_dev);
- return sprintf(buf, "%d\n", lis3lv02d_get_odr());
+ struct lis3lv02d *lis3 = dev_get_drvdata(dev);
+
+ lis3lv02d_sysfs_poweron(lis3);
+ return sprintf(buf, "%d\n", lis3lv02d_get_odr(lis3));
}
static ssize_t lis3lv02d_rate_set(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
{
+ struct lis3lv02d *lis3 = dev_get_drvdata(dev);
unsigned long rate;
if (strict_strtoul(buf, 0, &rate))
return -EINVAL;
- lis3lv02d_sysfs_poweron(&lis3_dev);
- if (lis3lv02d_set_odr(rate))
+ lis3lv02d_sysfs_poweron(lis3);
+ if (lis3lv02d_set_odr(lis3, rate))
return -EINVAL;
return count;
@@ -790,6 +832,7 @@ static int lis3lv02d_add_fs(struct lis3lv02d *lis3)
if (IS_ERR(lis3->pdev))
return PTR_ERR(lis3->pdev);
+ platform_set_drvdata(lis3->pdev, lis3);
return sysfs_create_group(&lis3->pdev->dev.kobj, &lis3lv02d_attribute_group);
}
@@ -803,7 +846,7 @@ int lis3lv02d_remove_fs(struct lis3lv02d *lis3)
/* SYSFS may have left chip running. Turn off if necessary */
if (!pm_runtime_suspended(lis3->pm_dev))
- lis3lv02d_poweroff(&lis3_dev);
+ lis3lv02d_poweroff(lis3);
pm_runtime_disable(lis3->pm_dev);
pm_runtime_set_suspended(lis3->pm_dev);
@@ -813,24 +856,24 @@ int lis3lv02d_remove_fs(struct lis3lv02d *lis3)
}
EXPORT_SYMBOL_GPL(lis3lv02d_remove_fs);
-static void lis3lv02d_8b_configure(struct lis3lv02d *dev,
+static void lis3lv02d_8b_configure(struct lis3lv02d *lis3,
struct lis3lv02d_platform_data *p)
{
int err;
int ctrl2 = p->hipass_ctrl;
if (p->click_flags) {
- dev->write(dev, CLICK_CFG, p->click_flags);
- dev->write(dev, CLICK_TIMELIMIT, p->click_time_limit);
- dev->write(dev, CLICK_LATENCY, p->click_latency);
- dev->write(dev, CLICK_WINDOW, p->click_window);
- dev->write(dev, CLICK_THSZ, p->click_thresh_z & 0xf);
- dev->write(dev, CLICK_THSY_X,
+ lis3->write(lis3, CLICK_CFG, p->click_flags);
+ lis3->write(lis3, CLICK_TIMELIMIT, p->click_time_limit);
+ lis3->write(lis3, CLICK_LATENCY, p->click_latency);
+ lis3->write(lis3, CLICK_WINDOW, p->click_window);
+ lis3->write(lis3, CLICK_THSZ, p->click_thresh_z & 0xf);
+ lis3->write(lis3, CLICK_THSY_X,
(p->click_thresh_x & 0xf) |
(p->click_thresh_y << 4));
- if (dev->idev) {
- struct input_dev *input_dev = lis3_dev.idev->input;
+ if (lis3->idev) {
+ struct input_dev *input_dev = lis3->idev->input;
input_set_capability(input_dev, EV_KEY, BTN_X);
input_set_capability(input_dev, EV_KEY, BTN_Y);
input_set_capability(input_dev, EV_KEY, BTN_Z);
@@ -838,22 +881,22 @@ static void lis3lv02d_8b_configure(struct lis3lv02d *dev,
}
if (p->wakeup_flags) {
- dev->write(dev, FF_WU_CFG_1, p->wakeup_flags);
- dev->write(dev, FF_WU_THS_1, p->wakeup_thresh & 0x7f);
+ lis3->write(lis3, FF_WU_CFG_1, p->wakeup_flags);
+ lis3->write(lis3, FF_WU_THS_1, p->wakeup_thresh & 0x7f);
/* pdata value + 1 to keep this backward compatible*/
- dev->write(dev, FF_WU_DURATION_1, p->duration1 + 1);
+ lis3->write(lis3, FF_WU_DURATION_1, p->duration1 + 1);
ctrl2 ^= HP_FF_WU1; /* Xor to keep compatible with old pdata*/
}
if (p->wakeup_flags2) {
- dev->write(dev, FF_WU_CFG_2, p->wakeup_flags2);
- dev->write(dev, FF_WU_THS_2, p->wakeup_thresh2 & 0x7f);
+ lis3->write(lis3, FF_WU_CFG_2, p->wakeup_flags2);
+ lis3->write(lis3, FF_WU_THS_2, p->wakeup_thresh2 & 0x7f);
/* pdata value + 1 to keep this backward compatible*/
- dev->write(dev, FF_WU_DURATION_2, p->duration2 + 1);
+ lis3->write(lis3, FF_WU_DURATION_2, p->duration2 + 1);
ctrl2 ^= HP_FF_WU2; /* Xor to keep compatible with old pdata*/
}
/* Configure hipass filters */
- dev->write(dev, CTRL_REG2, ctrl2);
+ lis3->write(lis3, CTRL_REG2, ctrl2);
if (p->irq2) {
err = request_threaded_irq(p->irq2,
@@ -861,7 +904,7 @@ static void lis3lv02d_8b_configure(struct lis3lv02d *dev,
lis302dl_interrupt_thread2_8b,
IRQF_TRIGGER_RISING | IRQF_ONESHOT |
(p->irq_flags2 & IRQF_TRIGGER_MASK),
- DRIVER_NAME, &lis3_dev);
+ DRIVER_NAME, lis3);
if (err < 0)
pr_err("No second IRQ. Limited functionality\n");
}
@@ -871,93 +914,97 @@ static void lis3lv02d_8b_configure(struct lis3lv02d *dev,
* Initialise the accelerometer and the various subsystems.
* Should be rather independent of the bus system.
*/
-int lis3lv02d_init_device(struct lis3lv02d *dev)
+int lis3lv02d_init_device(struct lis3lv02d *lis3)
{
int err;
irq_handler_t thread_fn;
int irq_flags = 0;
- dev->whoami = lis3lv02d_read_8(dev, WHO_AM_I);
+ lis3->whoami = lis3lv02d_read_8(lis3, WHO_AM_I);
- switch (dev->whoami) {
+ switch (lis3->whoami) {
case WAI_12B:
pr_info("12 bits sensor found\n");
- dev->read_data = lis3lv02d_read_12;
- dev->mdps_max_val = 2048;
- dev->pwron_delay = LIS3_PWRON_DELAY_WAI_12B;
- dev->odrs = lis3_12_rates;
- dev->odr_mask = CTRL1_DF0 | CTRL1_DF1;
- dev->scale = LIS3_SENSITIVITY_12B;
- dev->regs = lis3_wai12_regs;
- dev->regs_size = ARRAY_SIZE(lis3_wai12_regs);
+ lis3->read_data = lis3lv02d_read_12;
+ lis3->mdps_max_val = 2048;
+ lis3->pwron_delay = LIS3_PWRON_DELAY_WAI_12B;
+ lis3->odrs = lis3_12_rates;
+ lis3->odr_mask = CTRL1_DF0 | CTRL1_DF1;
+ lis3->scale = LIS3_SENSITIVITY_12B;
+ lis3->regs = lis3_wai12_regs;
+ lis3->regs_size = ARRAY_SIZE(lis3_wai12_regs);
break;
case WAI_8B:
pr_info("8 bits sensor found\n");
- dev->read_data = lis3lv02d_read_8;
- dev->mdps_max_val = 128;
- dev->pwron_delay = LIS3_PWRON_DELAY_WAI_8B;
- dev->odrs = lis3_8_rates;
- dev->odr_mask = CTRL1_DR;
- dev->scale = LIS3_SENSITIVITY_8B;
- dev->regs = lis3_wai8_regs;
- dev->regs_size = ARRAY_SIZE(lis3_wai8_regs);
+ lis3->read_data = lis3lv02d_read_8;
+ lis3->mdps_max_val = 128;
+ lis3->pwron_delay = LIS3_PWRON_DELAY_WAI_8B;
+ lis3->odrs = lis3_8_rates;
+ lis3->odr_mask = CTRL1_DR;
+ lis3->scale = LIS3_SENSITIVITY_8B;
+ lis3->regs = lis3_wai8_regs;
+ lis3->regs_size = ARRAY_SIZE(lis3_wai8_regs);
break;
case WAI_3DC:
pr_info("8 bits 3DC sensor found\n");
- dev->read_data = lis3lv02d_read_8;
- dev->mdps_max_val = 128;
- dev->pwron_delay = LIS3_PWRON_DELAY_WAI_8B;
- dev->odrs = lis3_3dc_rates;
- dev->odr_mask = CTRL1_ODR0|CTRL1_ODR1|CTRL1_ODR2|CTRL1_ODR3;
- dev->scale = LIS3_SENSITIVITY_8B;
+ lis3->read_data = lis3lv02d_read_8;
+ lis3->mdps_max_val = 128;
+ lis3->pwron_delay = LIS3_PWRON_DELAY_WAI_8B;
+ lis3->odrs = lis3_3dc_rates;
+ lis3->odr_mask = CTRL1_ODR0|CTRL1_ODR1|CTRL1_ODR2|CTRL1_ODR3;
+ lis3->scale = LIS3_SENSITIVITY_8B;
break;
default:
- pr_err("unknown sensor type 0x%X\n", dev->whoami);
+ pr_err("unknown sensor type 0x%X\n", lis3->whoami);
return -EINVAL;
}
- dev->reg_cache = kzalloc(max(sizeof(lis3_wai8_regs),
+ lis3->reg_cache = kzalloc(max(sizeof(lis3_wai8_regs),
sizeof(lis3_wai12_regs)), GFP_KERNEL);
- if (dev->reg_cache == NULL) {
+ if (lis3->reg_cache == NULL) {
printk(KERN_ERR DRIVER_NAME "out of memory\n");
return -ENOMEM;
}
- mutex_init(&dev->mutex);
- atomic_set(&dev->wake_thread, 0);
+ mutex_init(&lis3->mutex);
+ atomic_set(&lis3->wake_thread, 0);
- lis3lv02d_add_fs(dev);
- lis3lv02d_poweron(dev);
+ lis3lv02d_add_fs(lis3);
+ err = lis3lv02d_poweron(lis3);
+ if (err) {
+ lis3lv02d_remove_fs(lis3);
+ return err;
+ }
- if (dev->pm_dev) {
- pm_runtime_set_active(dev->pm_dev);
- pm_runtime_enable(dev->pm_dev);
+ if (lis3->pm_dev) {
+ pm_runtime_set_active(lis3->pm_dev);
+ pm_runtime_enable(lis3->pm_dev);
}
- if (lis3lv02d_joystick_enable())
+ if (lis3lv02d_joystick_enable(lis3))
pr_err("joystick initialization failed\n");
/* passing in platform specific data is purely optional and only
* used by the SPI transport layer at the moment */
- if (dev->pdata) {
- struct lis3lv02d_platform_data *p = dev->pdata;
+ if (lis3->pdata) {
+ struct lis3lv02d_platform_data *p = lis3->pdata;
- if (dev->whoami == WAI_8B)
- lis3lv02d_8b_configure(dev, p);
+ if (lis3->whoami == WAI_8B)
+ lis3lv02d_8b_configure(lis3, p);
irq_flags = p->irq_flags1 & IRQF_TRIGGER_MASK;
- dev->irq_cfg = p->irq_cfg;
+ lis3->irq_cfg = p->irq_cfg;
if (p->irq_cfg)
- dev->write(dev, CTRL_REG3, p->irq_cfg);
+ lis3->write(lis3, CTRL_REG3, p->irq_cfg);
if (p->default_rate)
- lis3lv02d_set_odr(p->default_rate);
+ lis3lv02d_set_odr(lis3, p->default_rate);
}
/* bail if we did not get an IRQ from the bus layer */
- if (!dev->irq) {
+ if (!lis3->irq) {
pr_debug("No IRQ. Disabling /dev/freefall\n");
goto out;
}
@@ -973,23 +1020,27 @@ int lis3lv02d_init_device(struct lis3lv02d *dev)
* io-apic is not configurable (and generates a warning) but I keep it
* in case of support for other hardware.
*/
- if (dev->pdata && dev->whoami == WAI_8B)
+ if (lis3->pdata && lis3->whoami == WAI_8B)
thread_fn = lis302dl_interrupt_thread1_8b;
else
thread_fn = NULL;
- err = request_threaded_irq(dev->irq, lis302dl_interrupt,
+ err = request_threaded_irq(lis3->irq, lis302dl_interrupt,
thread_fn,
IRQF_TRIGGER_RISING | IRQF_ONESHOT |
irq_flags,
- DRIVER_NAME, &lis3_dev);
+ DRIVER_NAME, lis3);
if (err < 0) {
pr_err("Cannot get IRQ\n");
goto out;
}
- if (misc_register(&lis3lv02d_misc_device))
+ lis3->miscdev.minor = MISC_DYNAMIC_MINOR;
+ lis3->miscdev.name = "freefall";
+ lis3->miscdev.fops = &lis3lv02d_misc_fops;
+
+ if (misc_register(&lis3->miscdev))
pr_err("misc_register failed\n");
out:
return 0;
diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
index a1939589eb2..2b1482ad3f1 100644
--- a/drivers/misc/lis3lv02d/lis3lv02d.h
+++ b/drivers/misc/lis3lv02d/lis3lv02d.h
@@ -21,6 +21,7 @@
#include <linux/platform_device.h>
#include <linux/input-polldev.h>
#include <linux/regulator/consumer.h>
+#include <linux/miscdevice.h>
/*
* This driver tries to support the "digital" accelerometer chips from
@@ -273,6 +274,8 @@ struct lis3lv02d {
struct fasync_struct *async_queue; /* queue for the misc device */
wait_queue_head_t misc_wait; /* Wait queue for the misc device */
unsigned long misc_opened; /* bit0: whether the device is open */
+ struct miscdevice miscdev;
+
int data_ready_count[2];
atomic_t wake_thread;
unsigned char irq_cfg;
@@ -282,10 +285,10 @@ struct lis3lv02d {
};
int lis3lv02d_init_device(struct lis3lv02d *lis3);
-int lis3lv02d_joystick_enable(void);
-void lis3lv02d_joystick_disable(void);
+int lis3lv02d_joystick_enable(struct lis3lv02d *lis3);
+void lis3lv02d_joystick_disable(struct lis3lv02d *lis3);
void lis3lv02d_poweroff(struct lis3lv02d *lis3);
-void lis3lv02d_poweron(struct lis3lv02d *lis3);
+int lis3lv02d_poweron(struct lis3lv02d *lis3);
int lis3lv02d_remove_fs(struct lis3lv02d *lis3);
extern struct lis3lv02d lis3_dev;
diff --git a/drivers/misc/lis3lv02d/lis3lv02d_i2c.c b/drivers/misc/lis3lv02d/lis3lv02d_i2c.c
index b20dfb4522d..c02fea029dc 100644
--- a/drivers/misc/lis3lv02d/lis3lv02d_i2c.c
+++ b/drivers/misc/lis3lv02d/lis3lv02d_i2c.c
@@ -79,8 +79,7 @@ static int lis3_i2c_init(struct lis3lv02d *lis3)
u8 reg;
int ret;
- if (lis3->reg_ctrl)
- lis3_reg_ctrl(lis3, LIS3_REG_ON);
+ lis3_reg_ctrl(lis3, LIS3_REG_ON);
lis3->read(lis3, WHO_AM_I, &reg);
if (reg != lis3->whoami)
@@ -106,10 +105,6 @@ static int __devinit lis3lv02d_i2c_probe(struct i2c_client *client,
struct lis3lv02d_platform_data *pdata = client->dev.platform_data;
if (pdata) {
- /* Regulator control is optional */
- if (pdata->driver_features & LIS3_USE_REGULATOR_CTRL)
- lis3_dev.reg_ctrl = lis3_reg_ctrl;
-
if ((pdata->driver_features & LIS3_USE_BLOCK_READ) &&
(i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_I2C_BLOCK)))
@@ -131,15 +126,13 @@ static int __devinit lis3lv02d_i2c_probe(struct i2c_client *client,
goto fail;
}
- if (lis3_dev.reg_ctrl) {
- lis3_dev.regulators[0].supply = reg_vdd;
- lis3_dev.regulators[1].supply = reg_vdd_io;
- ret = regulator_bulk_get(&client->dev,
- ARRAY_SIZE(lis3_dev.regulators),
- lis3_dev.regulators);
- if (ret < 0)
- goto fail;
- }
+ lis3_dev.regulators[0].supply = reg_vdd;
+ lis3_dev.regulators[1].supply = reg_vdd_io;
+ ret = regulator_bulk_get(&client->dev,
+ ARRAY_SIZE(lis3_dev.regulators),
+ lis3_dev.regulators);
+ if (ret < 0)
+ goto fail;
lis3_dev.pdata = pdata;
lis3_dev.bus_priv = client;
@@ -153,16 +146,19 @@ static int __devinit lis3lv02d_i2c_probe(struct i2c_client *client,
i2c_set_clientdata(client, &lis3_dev);
/* Provide power over the init call */
- if (lis3_dev.reg_ctrl)
- lis3_reg_ctrl(&lis3_dev, LIS3_REG_ON);
+ lis3_reg_ctrl(&lis3_dev, LIS3_REG_ON);
ret = lis3lv02d_init_device(&lis3_dev);
- if (lis3_dev.reg_ctrl)
- lis3_reg_ctrl(&lis3_dev, LIS3_REG_OFF);
+ lis3_reg_ctrl(&lis3_dev, LIS3_REG_OFF);
- if (ret == 0)
- return 0;
+ if (ret)
+ goto fail2;
+ return 0;
+
+fail2:
+ regulator_bulk_free(ARRAY_SIZE(lis3_dev.regulators),
+ lis3_dev.regulators);
fail:
if (pdata && pdata->release_resources)
pdata->release_resources();
@@ -177,12 +173,11 @@ static int __devexit lis3lv02d_i2c_remove(struct i2c_client *client)
if (pdata && pdata->release_resources)
pdata->release_resources();
- lis3lv02d_joystick_disable();
+ lis3lv02d_joystick_disable(lis3);
lis3lv02d_remove_fs(&lis3_dev);
- if (lis3_dev.reg_ctrl)
- regulator_bulk_free(ARRAY_SIZE(lis3->regulators),
- lis3_dev.regulators);
+ regulator_bulk_free(ARRAY_SIZE(lis3->regulators),
+ lis3_dev.regulators);
return 0;
}
diff --git a/drivers/misc/lis3lv02d/lis3lv02d_spi.c b/drivers/misc/lis3lv02d/lis3lv02d_spi.c
index c1f8a8fbf69..b2c1be12d16 100644
--- a/drivers/misc/lis3lv02d/lis3lv02d_spi.c
+++ b/drivers/misc/lis3lv02d/lis3lv02d_spi.c
@@ -83,7 +83,7 @@ static int __devinit lis302dl_spi_probe(struct spi_device *spi)
static int __devexit lis302dl_spi_remove(struct spi_device *spi)
{
struct lis3lv02d *lis3 = spi_get_drvdata(spi);
- lis3lv02d_joystick_disable();
+ lis3lv02d_joystick_disable(lis3);
lis3lv02d_poweroff(lis3);
return lis3lv02d_remove_fs(&lis3_dev);
diff --git a/drivers/misc/sgi-gru/grukservices.c b/drivers/misc/sgi-gru/grukservices.c
index 9e9bddaa95a..913de07e577 100644
--- a/drivers/misc/sgi-gru/grukservices.c
+++ b/drivers/misc/sgi-gru/grukservices.c
@@ -31,6 +31,7 @@
#include <linux/interrupt.h>
#include <linux/uaccess.h>
#include <linux/delay.h>
+#include <linux/export.h>
#include <asm/io_apic.h>
#include "gru.h"
#include "grulib.h"
diff --git a/drivers/misc/ti-st/st_kim.c b/drivers/misc/ti-st/st_kim.c
index 3a3580566df..43ef8d162f2 100644
--- a/drivers/misc/ti-st/st_kim.c
+++ b/drivers/misc/ti-st/st_kim.c
@@ -35,6 +35,7 @@
#include <linux/skbuff.h>
#include <linux/ti_wilink_st.h>
+#include <linux/module.h>
#define MAX_ST_DEVICES 3 /* Imagine 1 on each UART for now */
diff --git a/drivers/misc/tifm_7xx1.c b/drivers/misc/tifm_7xx1.c
index a6ef18259da..ba247902267 100644
--- a/drivers/misc/tifm_7xx1.c
+++ b/drivers/misc/tifm_7xx1.c
@@ -11,6 +11,7 @@
#include <linux/tifm.h>
#include <linux/dma-mapping.h>
+#include <linux/module.h>
#define DRIVER_NAME "tifm_7xx1"
#define DRIVER_VERSION "0.8"
diff --git a/drivers/misc/tifm_core.c b/drivers/misc/tifm_core.c
index 44d4475a09d..0bd5349b042 100644
--- a/drivers/misc/tifm_core.c
+++ b/drivers/misc/tifm_core.c
@@ -13,6 +13,7 @@
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/idr.h>
+#include <linux/module.h>
#define DRIVER_NAME "tifm_core"
#define DRIVER_VERSION "0.8"
diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
index 053d36caf95..cd41d403c9d 100644
--- a/drivers/misc/vmw_balloon.c
+++ b/drivers/misc/vmw_balloon.c
@@ -151,7 +151,7 @@ MODULE_LICENSE("GPL");
struct vmballoon_stats {
unsigned int timer;
- /* allocation statustics */
+ /* allocation statistics */
unsigned int alloc;
unsigned int alloc_fail;
unsigned int sleep_alloc;
@@ -412,6 +412,7 @@ static int vmballoon_reserve_page(struct vmballoon *b, bool can_sleep)
gfp_t flags;
unsigned int hv_status;
bool locked = false;
+ flags = can_sleep ? VMW_PAGE_ALLOC_CANSLEEP : VMW_PAGE_ALLOC_NOSLEEP;
do {
if (!can_sleep)
@@ -419,7 +420,6 @@ static int vmballoon_reserve_page(struct vmballoon *b, bool can_sleep)
else
STATS_INC(b->stats.sleep_alloc);
- flags = can_sleep ? VMW_PAGE_ALLOC_CANSLEEP : VMW_PAGE_ALLOC_NOSLEEP;
page = alloc_page(flags);
if (!page) {
if (!can_sleep)
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 4c1a648d00f..a1cb21f9530 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -94,6 +94,11 @@ struct mmc_blk_data {
unsigned int read_only;
unsigned int part_type;
unsigned int name_idx;
+ unsigned int reset_done;
+#define MMC_BLK_READ BIT(0)
+#define MMC_BLK_WRITE BIT(1)
+#define MMC_BLK_DISCARD BIT(2)
+#define MMC_BLK_SECDISCARD BIT(3)
/*
* Only set in main mmc_blk_data associated
@@ -109,11 +114,11 @@ static DEFINE_MUTEX(open_lock);
enum mmc_blk_status {
MMC_BLK_SUCCESS = 0,
MMC_BLK_PARTIAL,
- MMC_BLK_RETRY,
- MMC_BLK_RETRY_SINGLE,
- MMC_BLK_DATA_ERR,
MMC_BLK_CMD_ERR,
+ MMC_BLK_RETRY,
MMC_BLK_ABORT,
+ MMC_BLK_DATA_ERR,
+ MMC_BLK_ECC_ERR,
};
module_param(perdev_minors, int, 0444);
@@ -291,7 +296,7 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
struct mmc_card *card;
struct mmc_command cmd = {0};
struct mmc_data data = {0};
- struct mmc_request mrq = {0};
+ struct mmc_request mrq = {NULL};
struct scatterlist sg;
int err;
@@ -442,19 +447,24 @@ static inline int mmc_blk_part_switch(struct mmc_card *card,
{
int ret;
struct mmc_blk_data *main_md = mmc_get_drvdata(card);
+
if (main_md->part_curr == md->part_type)
return 0;
if (mmc_card_mmc(card)) {
- card->ext_csd.part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
- card->ext_csd.part_config |= md->part_type;
+ u8 part_config = card->ext_csd.part_config;
+
+ part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
+ part_config |= md->part_type;
ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
- EXT_CSD_PART_CONFIG, card->ext_csd.part_config,
+ EXT_CSD_PART_CONFIG, part_config,
card->ext_csd.part_time);
if (ret)
return ret;
-}
+
+ card->ext_csd.part_config = part_config;
+ }
main_md->part_curr = md->part_type;
return 0;
@@ -466,7 +476,7 @@ static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
u32 result;
__be32 *blocks;
- struct mmc_request mrq = {0};
+ struct mmc_request mrq = {NULL};
struct mmc_command cmd = {0};
struct mmc_data data = {0};
unsigned int timeout_us;
@@ -616,7 +626,7 @@ static int mmc_blk_cmd_error(struct request *req, const char *name, int error,
* Otherwise we don't understand what happened, so abort.
*/
static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
- struct mmc_blk_request *brq)
+ struct mmc_blk_request *brq, int *ecc_err)
{
bool prev_cmd_status_valid = true;
u32 status, stop_status = 0;
@@ -641,6 +651,12 @@ static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
if (err)
return ERR_ABORT;
+ /* Flag ECC errors */
+ if ((status & R1_CARD_ECC_FAILED) ||
+ (brq->stop.resp[0] & R1_CARD_ECC_FAILED) ||
+ (brq->cmd.resp[0] & R1_CARD_ECC_FAILED))
+ *ecc_err = 1;
+
/*
* Check the current card state. If it is in some data transfer
* mode, tell it to stop (and hopefully transition back to TRAN.)
@@ -658,6 +674,8 @@ static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
*/
if (err)
return ERR_ABORT;
+ if (stop_status & R1_CARD_ECC_FAILED)
+ *ecc_err = 1;
}
/* Check for set block count errors */
@@ -670,6 +688,10 @@ static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
return mmc_blk_cmd_error(req, "r/w cmd", brq->cmd.error,
prev_cmd_status_valid, status);
+ /* Data errors */
+ if (!brq->stop.error)
+ return ERR_CONTINUE;
+
/* Now for stop errors. These aren't fatal to the transfer. */
pr_err("%s: error %d sending stop command, original cmd response %#x, card status %#x\n",
req->rq_disk->disk_name, brq->stop.error,
@@ -686,12 +708,45 @@ static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
return ERR_CONTINUE;
}
+static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host,
+ int type)
+{
+ int err;
+
+ if (md->reset_done & type)
+ return -EEXIST;
+
+ md->reset_done |= type;
+ err = mmc_hw_reset(host);
+ /* Ensure we switch back to the correct partition */
+ if (err != -EOPNOTSUPP) {
+ struct mmc_blk_data *main_md = mmc_get_drvdata(host->card);
+ int part_err;
+
+ main_md->part_curr = main_md->part_type;
+ part_err = mmc_blk_part_switch(host->card, md);
+ if (part_err) {
+ /*
+ * We have failed to get back into the correct
+ * partition, so we need to abort the whole request.
+ */
+ return -ENODEV;
+ }
+ }
+ return err;
+}
+
+static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type)
+{
+ md->reset_done &= ~type;
+}
+
static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
{
struct mmc_blk_data *md = mq->data;
struct mmc_card *card = md->queue.card;
unsigned int from, nr, arg;
- int err = 0;
+ int err = 0, type = MMC_BLK_DISCARD;
if (!mmc_can_erase(card)) {
err = -EOPNOTSUPP;
@@ -701,11 +756,13 @@ static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
from = blk_rq_pos(req);
nr = blk_rq_sectors(req);
- if (mmc_can_trim(card))
+ if (mmc_can_discard(card))
+ arg = MMC_DISCARD_ARG;
+ else if (mmc_can_trim(card))
arg = MMC_TRIM_ARG;
else
arg = MMC_ERASE_ARG;
-
+retry:
if (card->quirks & MMC_QUIRK_INAND_CMD38) {
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
INAND_CMD38_ARG_EXT_CSD,
@@ -718,6 +775,10 @@ static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
}
err = mmc_erase(card, from, nr, arg);
out:
+ if (err == -EIO && !mmc_blk_reset(md, card->host, type))
+ goto retry;
+ if (!err)
+ mmc_blk_reset_success(md, type);
spin_lock_irq(&md->lock);
__blk_end_request(req, err, blk_rq_bytes(req));
spin_unlock_irq(&md->lock);
@@ -731,13 +792,20 @@ static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
struct mmc_blk_data *md = mq->data;
struct mmc_card *card = md->queue.card;
unsigned int from, nr, arg;
- int err = 0;
+ int err = 0, type = MMC_BLK_SECDISCARD;
- if (!mmc_can_secure_erase_trim(card)) {
+ if (!(mmc_can_secure_erase_trim(card) || mmc_can_sanitize(card))) {
err = -EOPNOTSUPP;
goto out;
}
+ /* The sanitize operation is supported at v4.5 only */
+ if (mmc_can_sanitize(card)) {
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_SANITIZE_START, 1, 0);
+ goto out;
+ }
+
from = blk_rq_pos(req);
nr = blk_rq_sectors(req);
@@ -745,7 +813,7 @@ static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
arg = MMC_SECURE_TRIM1_ARG;
else
arg = MMC_SECURE_ERASE_ARG;
-
+retry:
if (card->quirks & MMC_QUIRK_INAND_CMD38) {
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
INAND_CMD38_ARG_EXT_CSD,
@@ -769,6 +837,10 @@ static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG);
}
out:
+ if (err == -EIO && !mmc_blk_reset(md, card->host, type))
+ goto retry;
+ if (!err)
+ mmc_blk_reset_success(md, type);
spin_lock_irq(&md->lock);
__blk_end_request(req, err, blk_rq_bytes(req));
spin_unlock_irq(&md->lock);
@@ -779,16 +851,18 @@ out:
static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
{
struct mmc_blk_data *md = mq->data;
+ struct mmc_card *card = md->queue.card;
+ int ret = 0;
+
+ ret = mmc_flush_cache(card);
+ if (ret)
+ ret = -EIO;
- /*
- * No-op, only service this because we need REQ_FUA for reliable
- * writes.
- */
spin_lock_irq(&md->lock);
- __blk_end_request_all(req, 0);
+ __blk_end_request_all(req, ret);
spin_unlock_irq(&md->lock);
- return 1;
+ return ret ? 0 : 1;
}
/*
@@ -825,11 +899,11 @@ static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq,
static int mmc_blk_err_check(struct mmc_card *card,
struct mmc_async_req *areq)
{
- enum mmc_blk_status ret = MMC_BLK_SUCCESS;
struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req,
mmc_active);
struct mmc_blk_request *brq = &mq_mrq->brq;
struct request *req = mq_mrq->req;
+ int ecc_err = 0;
/*
* sbc.error indicates a problem with the set block count
@@ -841,8 +915,9 @@ static int mmc_blk_err_check(struct mmc_card *card,
* stop.error indicates a problem with the stop command. Data
* may have been transferred, or may still be transferring.
*/
- if (brq->sbc.error || brq->cmd.error || brq->stop.error) {
- switch (mmc_blk_cmd_recovery(card, req, brq)) {
+ if (brq->sbc.error || brq->cmd.error || brq->stop.error ||
+ brq->data.error) {
+ switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err)) {
case ERR_RETRY:
return MMC_BLK_RETRY;
case ERR_ABORT:
@@ -873,7 +948,7 @@ static int mmc_blk_err_check(struct mmc_card *card,
do {
int err = get_card_status(card, &status, 5);
if (err) {
- printk(KERN_ERR "%s: error %d requesting status\n",
+ pr_err("%s: error %d requesting status\n",
req->rq_disk->disk_name, err);
return MMC_BLK_CMD_ERR;
}
@@ -894,23 +969,21 @@ static int mmc_blk_err_check(struct mmc_card *card,
brq->cmd.resp[0], brq->stop.resp[0]);
if (rq_data_dir(req) == READ) {
- if (brq->data.blocks > 1) {
- /* Redo read one sector at a time */
- pr_warning("%s: retrying using single block read\n",
- req->rq_disk->disk_name);
- return MMC_BLK_RETRY_SINGLE;
- }
+ if (ecc_err)
+ return MMC_BLK_ECC_ERR;
return MMC_BLK_DATA_ERR;
} else {
return MMC_BLK_CMD_ERR;
}
}
- if (ret == MMC_BLK_SUCCESS &&
- blk_rq_bytes(req) != brq->data.bytes_xfered)
- ret = MMC_BLK_PARTIAL;
+ if (!brq->data.bytes_xfered)
+ return MMC_BLK_RETRY;
- return ret;
+ if (blk_rq_bytes(req) != brq->data.bytes_xfered)
+ return MMC_BLK_PARTIAL;
+
+ return MMC_BLK_SUCCESS;
}
static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
@@ -957,13 +1030,20 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
if (brq->data.blocks > card->host->max_blk_count)
brq->data.blocks = card->host->max_blk_count;
- /*
- * After a read error, we redo the request one sector at a time
- * in order to accurately determine which sectors can be read
- * successfully.
- */
- if (disable_multi && brq->data.blocks > 1)
- brq->data.blocks = 1;
+ if (brq->data.blocks > 1) {
+ /*
+ * After a read error, we redo the request one sector
+ * at a time in order to accurately determine which
+ * sectors can be read successfully.
+ */
+ if (disable_multi)
+ brq->data.blocks = 1;
+
+ /* Some controllers can't do multiblock reads due to hw bugs */
+ if (card->host->caps2 & MMC_CAP2_NO_MULTI_READ &&
+ rq_data_dir(req) == READ)
+ brq->data.blocks = 1;
+ }
if (brq->data.blocks > 1 || do_rel_wr) {
/* SPI multiblock writes terminate using a special
@@ -1049,12 +1129,41 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
mmc_queue_bounce_pre(mqrq);
}
+static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
+ struct mmc_blk_request *brq, struct request *req,
+ int ret)
+{
+ /*
+ * If this is an SD card and we're writing, we can first
+ * mark the known good sectors as ok.
+ *
+ * If the card is not SD, we can still ok written sectors
+ * as reported by the controller (which might be less than
+ * the real number of written sectors, but never more).
+ */
+ if (mmc_card_sd(card)) {
+ u32 blocks;
+
+ blocks = mmc_sd_num_wr_blocks(card);
+ if (blocks != (u32)-1) {
+ spin_lock_irq(&md->lock);
+ ret = __blk_end_request(req, 0, blocks << 9);
+ spin_unlock_irq(&md->lock);
+ }
+ } else {
+ spin_lock_irq(&md->lock);
+ ret = __blk_end_request(req, 0, brq->data.bytes_xfered);
+ spin_unlock_irq(&md->lock);
+ }
+ return ret;
+}
+
static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
{
struct mmc_blk_data *md = mq->data;
struct mmc_card *card = md->queue.card;
struct mmc_blk_request *brq = &mq->mqrq_cur->brq;
- int ret = 1, disable_multi = 0, retry = 0;
+ int ret = 1, disable_multi = 0, retry = 0, type;
enum mmc_blk_status status;
struct mmc_queue_req *mq_rq;
struct request *req;
@@ -1076,6 +1185,7 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
mq_rq = container_of(areq, struct mmc_queue_req, mmc_active);
brq = &mq_rq->brq;
req = mq_rq->req;
+ type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
mmc_queue_bounce_post(mq_rq);
switch (status) {
@@ -1084,18 +1194,18 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
/*
* A block was successfully transferred.
*/
+ mmc_blk_reset_success(md, type);
spin_lock_irq(&md->lock);
ret = __blk_end_request(req, 0,
brq->data.bytes_xfered);
spin_unlock_irq(&md->lock);
+ /*
+ * If the blk_end_request function returns non-zero even
+ * though all data has been transferred and no errors
+ * were returned by the host controller, it's a bug.
+ */
if (status == MMC_BLK_SUCCESS && ret) {
- /*
- * The blk_end_request has returned non zero
- * even though all data is transfered and no
- * erros returned by host.
- * If this happen it's a bug.
- */
- printk(KERN_ERR "%s BUG rq_tot %d d_xfer %d\n",
+ pr_err("%s BUG rq_tot %d d_xfer %d\n",
__func__, blk_rq_bytes(req),
brq->data.bytes_xfered);
rqc = NULL;
@@ -1103,16 +1213,36 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
}
break;
case MMC_BLK_CMD_ERR:
- goto cmd_err;
- case MMC_BLK_RETRY_SINGLE:
- disable_multi = 1;
- break;
+ ret = mmc_blk_cmd_err(md, card, brq, req, ret);
+ if (!mmc_blk_reset(md, card->host, type))
+ break;
+ goto cmd_abort;
case MMC_BLK_RETRY:
if (retry++ < 5)
break;
+ /* Fall through */
case MMC_BLK_ABORT:
+ if (!mmc_blk_reset(md, card->host, type))
+ break;
goto cmd_abort;
- case MMC_BLK_DATA_ERR:
+ case MMC_BLK_DATA_ERR: {
+ int err;
+
+ err = mmc_blk_reset(md, card->host, type);
+ if (!err)
+ break;
+ if (err == -ENODEV)
+ goto cmd_abort;
+ /* Fall through */
+ }
+ case MMC_BLK_ECC_ERR:
+ if (brq->data.blocks > 1) {
+ /* Redo read one sector at a time */
+ pr_warning("%s: retrying using single block read\n",
+ req->rq_disk->disk_name);
+ disable_multi = 1;
+ break;
+ }
/*
* After an error, we redo I/O one sector at a
* time, so we only reach here after trying to
@@ -1129,7 +1259,7 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
if (ret) {
/*
- * In case of a none complete request
+ * In case of a incomplete request
* prepare it again and resend.
*/
mmc_blk_rw_rq_prep(mq_rq, card, disable_multi, mq);
@@ -1139,30 +1269,6 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
return 1;
- cmd_err:
- /*
- * If this is an SD card and we're writing, we can first
- * mark the known good sectors as ok.
- *
- * If the card is not SD, we can still ok written sectors
- * as reported by the controller (which might be less than
- * the real number of written sectors, but never more).
- */
- if (mmc_card_sd(card)) {
- u32 blocks;
-
- blocks = mmc_sd_num_wr_blocks(card);
- if (blocks != (u32)-1) {
- spin_lock_irq(&md->lock);
- ret = __blk_end_request(req, 0, blocks << 9);
- spin_unlock_irq(&md->lock);
- }
- } else {
- spin_lock_irq(&md->lock);
- ret = __blk_end_request(req, 0, brq->data.bytes_xfered);
- spin_unlock_irq(&md->lock);
- }
-
cmd_abort:
spin_lock_irq(&md->lock);
while (ret)
@@ -1190,6 +1296,11 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
ret = mmc_blk_part_switch(card, md);
if (ret) {
+ if (req) {
+ spin_lock_irq(&md->lock);
+ __blk_end_request_all(req, -EIO);
+ spin_unlock_irq(&md->lock);
+ }
ret = 0;
goto out;
}
@@ -1374,32 +1485,35 @@ static int mmc_blk_alloc_part(struct mmc_card *card,
string_get_size((u64)get_capacity(part_md->disk) << 9, STRING_UNITS_2,
cap_str, sizeof(cap_str));
- printk(KERN_INFO "%s: %s %s partition %u %s\n",
+ pr_info("%s: %s %s partition %u %s\n",
part_md->disk->disk_name, mmc_card_id(card),
mmc_card_name(card), part_md->part_type, cap_str);
return 0;
}
+/* MMC Physical partitions consist of two boot partitions and
+ * up to four general purpose partitions.
+ * For each partition enabled in EXT_CSD a block device will be allocatedi
+ * to provide access to the partition.
+ */
+
static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md)
{
- int ret = 0;
+ int idx, ret = 0;
if (!mmc_card_mmc(card))
return 0;
- if (card->ext_csd.boot_size) {
- ret = mmc_blk_alloc_part(card, md, EXT_CSD_PART_CONFIG_ACC_BOOT0,
- card->ext_csd.boot_size >> 9,
- true,
- "boot0");
- if (ret)
- return ret;
- ret = mmc_blk_alloc_part(card, md, EXT_CSD_PART_CONFIG_ACC_BOOT1,
- card->ext_csd.boot_size >> 9,
- true,
- "boot1");
- if (ret)
- return ret;
+ for (idx = 0; idx < card->nr_parts; idx++) {
+ if (card->part[idx].size) {
+ ret = mmc_blk_alloc_part(card, md,
+ card->part[idx].part_cfg,
+ card->part[idx].size >> 9,
+ card->part[idx].force_ro,
+ card->part[idx].name);
+ if (ret)
+ return ret;
+ }
}
return ret;
@@ -1415,7 +1529,7 @@ mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card)
mmc_release_host(card->host);
if (err) {
- printk(KERN_ERR "%s: unable to set block size to 512: %d\n",
+ pr_err("%s: unable to set block size to 512: %d\n",
md->disk->disk_name, err);
return -EINVAL;
}
@@ -1517,7 +1631,7 @@ static int mmc_blk_probe(struct mmc_card *card)
string_get_size((u64)get_capacity(md->disk) << 9, STRING_UNITS_2,
cap_str, sizeof(cap_str));
- printk(KERN_INFO "%s: %s %s %s %s\n",
+ pr_info("%s: %s %s %s %s\n",
md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
cap_str, md->read_only ? "(ro)" : "");
diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c
index 2bf229acd3b..b038c4a9468 100644
--- a/drivers/mmc/card/mmc_test.c
+++ b/drivers/mmc/card/mmc_test.c
@@ -22,6 +22,7 @@
#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <linux/seq_file.h>
+#include <linux/module.h>
#define RESULT_OK 0
#define RESULT_FAIL 1
@@ -250,7 +251,7 @@ static int mmc_test_wait_busy(struct mmc_test_card *test)
if (!busy && mmc_test_busy(&cmd)) {
busy = 1;
if (test->card->host->caps & MMC_CAP_WAIT_WHILE_BUSY)
- printk(KERN_INFO "%s: Warning: Host did not "
+ pr_info("%s: Warning: Host did not "
"wait for busy state to end.\n",
mmc_hostname(test->card->host));
}
@@ -552,7 +553,7 @@ static void mmc_test_print_rate(struct mmc_test_card *test, uint64_t bytes,
rate = mmc_test_rate(bytes, &ts);
iops = mmc_test_rate(100, &ts); /* I/O ops per sec x 100 */
- printk(KERN_INFO "%s: Transfer of %u sectors (%u%s KiB) took %lu.%09lu "
+ pr_info("%s: Transfer of %u sectors (%u%s KiB) took %lu.%09lu "
"seconds (%u kB/s, %u KiB/s, %u.%02u IOPS)\n",
mmc_hostname(test->card->host), sectors, sectors >> 1,
(sectors & 1 ? ".5" : ""), (unsigned long)ts.tv_sec,
@@ -578,7 +579,7 @@ static void mmc_test_print_avg_rate(struct mmc_test_card *test, uint64_t bytes,
rate = mmc_test_rate(tot, &ts);
iops = mmc_test_rate(count * 100, &ts); /* I/O ops per sec x 100 */
- printk(KERN_INFO "%s: Transfer of %u x %u sectors (%u x %u%s KiB) took "
+ pr_info("%s: Transfer of %u x %u sectors (%u x %u%s KiB) took "
"%lu.%09lu seconds (%u kB/s, %u KiB/s, "
"%u.%02u IOPS, sg_len %d)\n",
mmc_hostname(test->card->host), count, sectors, count,
@@ -1408,7 +1409,7 @@ static int mmc_test_multi_read_high(struct mmc_test_card *test)
static int mmc_test_no_highmem(struct mmc_test_card *test)
{
- printk(KERN_INFO "%s: Highmem not configured - test skipped\n",
+ pr_info("%s: Highmem not configured - test skipped\n",
mmc_hostname(test->card->host));
return 0;
}
@@ -1435,7 +1436,7 @@ static int mmc_test_area_map(struct mmc_test_card *test, unsigned long sz,
t->max_seg_sz, &t->sg_len, min_sg_len);
}
if (err)
- printk(KERN_INFO "%s: Failed to map sg list\n",
+ pr_info("%s: Failed to map sg list\n",
mmc_hostname(test->card->host));
return err;
}
@@ -2135,7 +2136,7 @@ static int mmc_test_rw_multiple(struct mmc_test_card *test,
return ret;
err:
- printk(KERN_INFO "[%s] error\n", __func__);
+ pr_info("[%s] error\n", __func__);
return ret;
}
@@ -2149,7 +2150,7 @@ static int mmc_test_rw_multiple_size(struct mmc_test_card *test,
if (rw->do_nonblock_req &&
((!pre_req && post_req) || (pre_req && !post_req))) {
- printk(KERN_INFO "error: only one of pre/post is defined\n");
+ pr_info("error: only one of pre/post is defined\n");
return -EINVAL;
}
@@ -2328,6 +2329,31 @@ static int mmc_test_profile_sglen_r_nonblock_perf(struct mmc_test_card *test)
return mmc_test_rw_multiple_sg_len(test, &test_data);
}
+/*
+ * eMMC hardware reset.
+ */
+static int mmc_test_hw_reset(struct mmc_test_card *test)
+{
+ struct mmc_card *card = test->card;
+ struct mmc_host *host = card->host;
+ int err;
+
+ err = mmc_hw_reset_check(host);
+ if (!err)
+ return RESULT_OK;
+
+ if (err == -ENOSYS)
+ return RESULT_FAIL;
+
+ if (err != -EOPNOTSUPP)
+ return err;
+
+ if (!mmc_can_reset(card))
+ return RESULT_UNSUP_CARD;
+
+ return RESULT_UNSUP_HOST;
+}
+
static const struct mmc_test_case mmc_test_cases[] = {
{
.name = "Basic write (no data verification)",
@@ -2650,6 +2676,11 @@ static const struct mmc_test_case mmc_test_cases[] = {
.run = mmc_test_profile_sglen_r_nonblock_perf,
.cleanup = mmc_test_area_cleanup,
},
+
+ {
+ .name = "eMMC hardware reset",
+ .run = mmc_test_hw_reset,
+ },
};
static DEFINE_MUTEX(mmc_test_lock);
@@ -2660,7 +2691,7 @@ static void mmc_test_run(struct mmc_test_card *test, int testcase)
{
int i, ret;
- printk(KERN_INFO "%s: Starting tests of card %s...\n",
+ pr_info("%s: Starting tests of card %s...\n",
mmc_hostname(test->card->host), mmc_card_id(test->card));
mmc_claim_host(test->card->host);
@@ -2671,14 +2702,14 @@ static void mmc_test_run(struct mmc_test_card *test, int testcase)
if (testcase && ((i + 1) != testcase))
continue;
- printk(KERN_INFO "%s: Test case %d. %s...\n",
+ pr_info("%s: Test case %d. %s...\n",
mmc_hostname(test->card->host), i + 1,
mmc_test_cases[i].name);
if (mmc_test_cases[i].prepare) {
ret = mmc_test_cases[i].prepare(test);
if (ret) {
- printk(KERN_INFO "%s: Result: Prepare "
+ pr_info("%s: Result: Prepare "
"stage failed! (%d)\n",
mmc_hostname(test->card->host),
ret);
@@ -2708,25 +2739,25 @@ static void mmc_test_run(struct mmc_test_card *test, int testcase)
ret = mmc_test_cases[i].run(test);
switch (ret) {
case RESULT_OK:
- printk(KERN_INFO "%s: Result: OK\n",
+ pr_info("%s: Result: OK\n",
mmc_hostname(test->card->host));
break;
case RESULT_FAIL:
- printk(KERN_INFO "%s: Result: FAILED\n",
+ pr_info("%s: Result: FAILED\n",
mmc_hostname(test->card->host));
break;
case RESULT_UNSUP_HOST:
- printk(KERN_INFO "%s: Result: UNSUPPORTED "
+ pr_info("%s: Result: UNSUPPORTED "
"(by host)\n",
mmc_hostname(test->card->host));
break;
case RESULT_UNSUP_CARD:
- printk(KERN_INFO "%s: Result: UNSUPPORTED "
+ pr_info("%s: Result: UNSUPPORTED "
"(by card)\n",
mmc_hostname(test->card->host));
break;
default:
- printk(KERN_INFO "%s: Result: ERROR (%d)\n",
+ pr_info("%s: Result: ERROR (%d)\n",
mmc_hostname(test->card->host), ret);
}
@@ -2737,7 +2768,7 @@ static void mmc_test_run(struct mmc_test_card *test, int testcase)
if (mmc_test_cases[i].cleanup) {
ret = mmc_test_cases[i].cleanup(test);
if (ret) {
- printk(KERN_INFO "%s: Warning: Cleanup "
+ pr_info("%s: Warning: Cleanup "
"stage failed! (%d)\n",
mmc_hostname(test->card->host),
ret);
@@ -2747,7 +2778,7 @@ static void mmc_test_run(struct mmc_test_card *test, int testcase)
mmc_release_host(test->card->host);
- printk(KERN_INFO "%s: Tests completed.\n",
+ pr_info("%s: Tests completed.\n",
mmc_hostname(test->card->host));
}
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 45fb362e3f0..dcad59cbfef 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -108,7 +108,7 @@ static void mmc_request(struct request_queue *q)
wake_up_process(mq->thread);
}
-struct scatterlist *mmc_alloc_sg(int sg_len, int *err)
+static struct scatterlist *mmc_alloc_sg(int sg_len, int *err)
{
struct scatterlist *sg;
@@ -140,7 +140,7 @@ static void mmc_queue_setup_discard(struct request_queue *q,
/* granularity must not be greater than max. discard */
if (card->pref_erase > max_discard)
q->limits.discard_granularity = 0;
- if (mmc_can_secure_erase_trim(card))
+ if (mmc_can_secure_erase_trim(card) || mmc_can_sanitize(card))
queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, q);
}
@@ -197,13 +197,13 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
if (bouncesz > 512) {
mqrq_cur->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
if (!mqrq_cur->bounce_buf) {
- printk(KERN_WARNING "%s: unable to "
+ pr_warning("%s: unable to "
"allocate bounce cur buffer\n",
mmc_card_name(card));
}
mqrq_prev->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
if (!mqrq_prev->bounce_buf) {
- printk(KERN_WARNING "%s: unable to "
+ pr_warning("%s: unable to "
"allocate bounce prev buffer\n",
mmc_card_name(card));
kfree(mqrq_cur->bounce_buf);
diff --git a/drivers/mmc/card/sdio_uart.c b/drivers/mmc/card/sdio_uart.c
index c8c9edb3d7c..2c151e18c9e 100644
--- a/drivers/mmc/card/sdio_uart.c
+++ b/drivers/mmc/card/sdio_uart.c
@@ -1082,7 +1082,7 @@ static int sdio_uart_probe(struct sdio_func *func,
return -ENOMEM;
if (func->class == SDIO_CLASS_UART) {
- printk(KERN_WARNING "%s: need info on UART class basic setup\n",
+ pr_warning("%s: need info on UART class basic setup\n",
sdio_func_id(func));
kfree(port);
return -ENOSYS;
@@ -1101,23 +1101,23 @@ static int sdio_uart_probe(struct sdio_func *func,
break;
}
if (!tpl) {
- printk(KERN_WARNING
+ pr_warning(
"%s: can't find tuple 0x91 subtuple 0 (SUBTPL_SIOREG) for GPS class\n",
sdio_func_id(func));
kfree(port);
return -EINVAL;
}
- printk(KERN_DEBUG "%s: Register ID = 0x%02x, Exp ID = 0x%02x\n",
+ pr_debug("%s: Register ID = 0x%02x, Exp ID = 0x%02x\n",
sdio_func_id(func), tpl->data[2], tpl->data[3]);
port->regs_offset = (tpl->data[4] << 0) |
(tpl->data[5] << 8) |
(tpl->data[6] << 16);
- printk(KERN_DEBUG "%s: regs offset = 0x%x\n",
+ pr_debug("%s: regs offset = 0x%x\n",
sdio_func_id(func), port->regs_offset);
port->uartclk = tpl->data[7] * 115200;
if (port->uartclk == 0)
port->uartclk = 115200;
- printk(KERN_DEBUG "%s: clk %d baudcode %u 4800-div %u\n",
+ pr_debug("%s: clk %d baudcode %u 4800-div %u\n",
sdio_func_id(func), port->uartclk,
tpl->data[7], tpl->data[8] | (tpl->data[9] << 8));
} else {
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index 393d817ed04..6be49249895 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -11,9 +11,11 @@
* MMC card bus driver model
*/
+#include <linux/export.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/slab.h>
+#include <linux/stat.h>
#include <linux/pm_runtime.h>
#include <linux/mmc/card.h>
@@ -295,7 +297,7 @@ int mmc_add_card(struct mmc_card *card)
}
if (mmc_host_is_spi(card->host)) {
- printk(KERN_INFO "%s: new %s%s%s card on SPI\n",
+ pr_info("%s: new %s%s%s card on SPI\n",
mmc_hostname(card->host),
mmc_card_highspeed(card) ? "high speed " : "",
mmc_card_ddr_mode(card) ? "DDR " : "",
@@ -334,10 +336,10 @@ void mmc_remove_card(struct mmc_card *card)
if (mmc_card_present(card)) {
if (mmc_host_is_spi(card->host)) {
- printk(KERN_INFO "%s: SPI card removed\n",
+ pr_info("%s: SPI card removed\n",
mmc_hostname(card->host));
} else {
- printk(KERN_INFO "%s: card %04x removed\n",
+ pr_info("%s: card %04x removed\n",
mmc_hostname(card->host), card->rca);
}
device_del(&card->dev);
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index b27b94078c2..5278ffb20e7 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -24,6 +24,8 @@
#include <linux/regulator/consumer.h>
#include <linux/pm_runtime.h>
#include <linux/suspend.h>
+#include <linux/fault-inject.h>
+#include <linux/random.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
@@ -83,6 +85,43 @@ static void mmc_flush_scheduled_work(void)
flush_workqueue(workqueue);
}
+#ifdef CONFIG_FAIL_MMC_REQUEST
+
+/*
+ * Internal function. Inject random data errors.
+ * If mmc_data is NULL no errors are injected.
+ */
+static void mmc_should_fail_request(struct mmc_host *host,
+ struct mmc_request *mrq)
+{
+ struct mmc_command *cmd = mrq->cmd;
+ struct mmc_data *data = mrq->data;
+ static const int data_errors[] = {
+ -ETIMEDOUT,
+ -EILSEQ,
+ -EIO,
+ };
+
+ if (!data)
+ return;
+
+ if (cmd->error || data->error ||
+ !should_fail(&host->fail_mmc_request, data->blksz * data->blocks))
+ return;
+
+ data->error = data_errors[random32() % ARRAY_SIZE(data_errors)];
+ data->bytes_xfered = (random32() % (data->bytes_xfered >> 9)) << 9;
+}
+
+#else /* CONFIG_FAIL_MMC_REQUEST */
+
+static inline void mmc_should_fail_request(struct mmc_host *host,
+ struct mmc_request *mrq)
+{
+}
+
+#endif /* CONFIG_FAIL_MMC_REQUEST */
+
/**
* mmc_request_done - finish processing an MMC request
* @host: MMC host which completed request
@@ -102,13 +141,15 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
}
if (err && cmd->retries) {
- pr_debug("%s: req failed (CMD%u): %d, retrying...\n",
- mmc_hostname(host), cmd->opcode, err);
-
- cmd->retries--;
- cmd->error = 0;
- host->ops->request(host, mrq);
+ /*
+ * Request starter must handle retries - see
+ * mmc_wait_for_req_done().
+ */
+ if (mrq->done)
+ mrq->done(mrq);
} else {
+ mmc_should_fail_request(host, mrq);
+
led_trigger_event(host->led, LED_OFF);
pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n",
@@ -212,7 +253,21 @@ static void __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
static void mmc_wait_for_req_done(struct mmc_host *host,
struct mmc_request *mrq)
{
- wait_for_completion(&mrq->completion);
+ struct mmc_command *cmd;
+
+ while (1) {
+ wait_for_completion(&mrq->completion);
+
+ cmd = mrq->cmd;
+ if (!cmd->error || !cmd->retries)
+ break;
+
+ pr_debug("%s: req failed (CMD%u): %d, retrying...\n",
+ mmc_hostname(host), cmd->opcode, cmd->error);
+ cmd->retries--;
+ cmd->error = 0;
+ host->ops->request(host, mrq);
+ }
}
/**
@@ -279,8 +334,14 @@ struct mmc_async_req *mmc_start_req(struct mmc_host *host,
mmc_wait_for_req_done(host, host->areq->mrq);
err = host->areq->err_check(host->card, host->areq);
if (err) {
+ /* post process the completed failed request */
mmc_post_req(host, host->areq->mrq, 0);
if (areq)
+ /*
+ * Cancel the new prepared request, because
+ * it can't run until the failed
+ * request has been properly handled.
+ */
mmc_post_req(host, areq->mrq, -EINVAL);
host->areq = NULL;
@@ -319,6 +380,63 @@ void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
EXPORT_SYMBOL(mmc_wait_for_req);
/**
+ * mmc_interrupt_hpi - Issue for High priority Interrupt
+ * @card: the MMC card associated with the HPI transfer
+ *
+ * Issued High Priority Interrupt, and check for card status
+ * util out-of prg-state.
+ */
+int mmc_interrupt_hpi(struct mmc_card *card)
+{
+ int err;
+ u32 status;
+
+ BUG_ON(!card);
+
+ if (!card->ext_csd.hpi_en) {
+ pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host));
+ return 1;
+ }
+
+ mmc_claim_host(card->host);
+ err = mmc_send_status(card, &status);
+ if (err) {
+ pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
+ goto out;
+ }
+
+ /*
+ * If the card status is in PRG-state, we can send the HPI command.
+ */
+ if (R1_CURRENT_STATE(status) == R1_STATE_PRG) {
+ do {
+ /*
+ * We don't know when the HPI command will finish
+ * processing, so we need to resend HPI until out
+ * of prg-state, and keep checking the card status
+ * with SEND_STATUS. If a timeout error occurs when
+ * sending the HPI command, we are already out of
+ * prg-state.
+ */
+ err = mmc_send_hpi_cmd(card, &status);
+ if (err)
+ pr_debug("%s: abort HPI (%d error)\n",
+ mmc_hostname(card->host), err);
+
+ err = mmc_send_status(card, &status);
+ if (err)
+ break;
+ } while (R1_CURRENT_STATE(status) == R1_STATE_PRG);
+ } else
+ pr_debug("%s: Left prg-state\n", mmc_hostname(card->host));
+
+out:
+ mmc_release_host(card->host);
+ return err;
+}
+EXPORT_SYMBOL(mmc_interrupt_hpi);
+
+/**
* mmc_wait_for_cmd - start a command and wait for completion
* @host: MMC host to start command
* @cmd: MMC command to start
@@ -330,7 +448,7 @@ EXPORT_SYMBOL(mmc_wait_for_req);
*/
int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries)
{
- struct mmc_request mrq = {0};
+ struct mmc_request mrq = {NULL};
WARN_ON(!host->claimed);
@@ -1119,13 +1237,11 @@ static void mmc_power_up(struct mmc_host *host)
bit = fls(host->ocr_avail) - 1;
host->ios.vdd = bit;
- if (mmc_host_is_spi(host)) {
+ if (mmc_host_is_spi(host))
host->ios.chip_select = MMC_CS_HIGH;
- host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
- } else {
+ else
host->ios.chip_select = MMC_CS_DONTCARE;
- host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
- }
+ host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
host->ios.power_mode = MMC_POWER_UP;
host->ios.bus_width = MMC_BUS_WIDTH_1;
host->ios.timing = MMC_TIMING_LEGACY;
@@ -1151,13 +1267,45 @@ static void mmc_power_up(struct mmc_host *host)
mmc_host_clk_release(host);
}
-static void mmc_power_off(struct mmc_host *host)
+void mmc_power_off(struct mmc_host *host)
{
+ struct mmc_card *card;
+ unsigned int notify_type;
+ unsigned int timeout;
+ int err;
+
mmc_host_clk_hold(host);
+ card = host->card;
host->ios.clock = 0;
host->ios.vdd = 0;
+ if (card && mmc_card_mmc(card) &&
+ (card->poweroff_notify_state == MMC_POWERED_ON)) {
+
+ if (host->power_notify_type == MMC_HOST_PW_NOTIFY_SHORT) {
+ notify_type = EXT_CSD_POWER_OFF_SHORT;
+ timeout = card->ext_csd.generic_cmd6_time;
+ card->poweroff_notify_state = MMC_POWEROFF_SHORT;
+ } else {
+ notify_type = EXT_CSD_POWER_OFF_LONG;
+ timeout = card->ext_csd.power_off_longtime;
+ card->poweroff_notify_state = MMC_POWEROFF_LONG;
+ }
+
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_POWER_OFF_NOTIFICATION,
+ notify_type, timeout);
+
+ if (err && err != -EBADMSG)
+ pr_err("Device failed to respond within %d poweroff "
+ "time. Forcefully powering down the device\n",
+ timeout);
+
+ /* Set the card state to no notification after the poweroff */
+ card->poweroff_notify_state = MMC_NO_POWER_NOTIFICATION;
+ }
+
/*
* Reset ocr mask to be the highest possible voltage supported for
* this mmc host. This value will be used at next power up.
@@ -1173,6 +1321,13 @@ static void mmc_power_off(struct mmc_host *host)
host->ios.timing = MMC_TIMING_LEGACY;
mmc_set_ios(host);
+ /*
+ * Some configurations, such as the 802.11 SDIO card in the OLPC
+ * XO-1.5, require a short delay after poweroff before the card
+ * can be successfully turned on again.
+ */
+ mmc_delay(1);
+
mmc_host_clk_release(host);
}
@@ -1241,8 +1396,7 @@ void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops)
}
/*
- * Remove the current bus handler from a host. Assumes that there are
- * no interesting cards left, so the bus is powered down.
+ * Remove the current bus handler from a host.
*/
void mmc_detach_bus(struct mmc_host *host)
{
@@ -1259,8 +1413,6 @@ void mmc_detach_bus(struct mmc_host *host)
spin_unlock_irqrestore(&host->lock, flags);
- mmc_power_off(host);
-
mmc_bus_put(host);
}
@@ -1478,9 +1630,9 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from,
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
err = mmc_wait_for_cmd(card->host, &cmd, 0);
if (err) {
- printk(KERN_ERR "mmc_erase: group start error %d, "
+ pr_err("mmc_erase: group start error %d, "
"status %#x\n", err, cmd.resp[0]);
- err = -EINVAL;
+ err = -EIO;
goto out;
}
@@ -1493,9 +1645,9 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from,
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
err = mmc_wait_for_cmd(card->host, &cmd, 0);
if (err) {
- printk(KERN_ERR "mmc_erase: group end error %d, status %#x\n",
+ pr_err("mmc_erase: group end error %d, status %#x\n",
err, cmd.resp[0]);
- err = -EINVAL;
+ err = -EIO;
goto out;
}
@@ -1506,7 +1658,7 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from,
cmd.cmd_timeout_ms = mmc_erase_timeout(card, arg, qty);
err = mmc_wait_for_cmd(card->host, &cmd, 0);
if (err) {
- printk(KERN_ERR "mmc_erase: erase error %d, status %#x\n",
+ pr_err("mmc_erase: erase error %d, status %#x\n",
err, cmd.resp[0]);
err = -EIO;
goto out;
@@ -1523,7 +1675,7 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from,
/* Do not retry else we can't see errors */
err = mmc_wait_for_cmd(card->host, &cmd, 0);
if (err || (cmd.resp[0] & 0xFDF92000)) {
- printk(KERN_ERR "error %d requesting status %#x\n",
+ pr_err("error %d requesting status %#x\n",
err, cmd.resp[0]);
err = -EIO;
goto out;
@@ -1614,10 +1766,32 @@ int mmc_can_trim(struct mmc_card *card)
{
if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN)
return 1;
+ if (mmc_can_discard(card))
+ return 1;
return 0;
}
EXPORT_SYMBOL(mmc_can_trim);
+int mmc_can_discard(struct mmc_card *card)
+{
+ /*
+ * As there's no way to detect the discard support bit at v4.5
+ * use the s/w feature support filed.
+ */
+ if (card->ext_csd.feature_support & MMC_DISCARD_FEATURE)
+ return 1;
+ return 0;
+}
+EXPORT_SYMBOL(mmc_can_discard);
+
+int mmc_can_sanitize(struct mmc_card *card)
+{
+ if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_SANITIZE)
+ return 1;
+ return 0;
+}
+EXPORT_SYMBOL(mmc_can_sanitize);
+
int mmc_can_secure_erase_trim(struct mmc_card *card)
{
if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN)
@@ -1727,6 +1901,94 @@ int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen)
}
EXPORT_SYMBOL(mmc_set_blocklen);
+static void mmc_hw_reset_for_init(struct mmc_host *host)
+{
+ if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
+ return;
+ mmc_host_clk_hold(host);
+ host->ops->hw_reset(host);
+ mmc_host_clk_release(host);
+}
+
+int mmc_can_reset(struct mmc_card *card)
+{
+ u8 rst_n_function;
+
+ if (!mmc_card_mmc(card))
+ return 0;
+ rst_n_function = card->ext_csd.rst_n_function;
+ if ((rst_n_function & EXT_CSD_RST_N_EN_MASK) != EXT_CSD_RST_N_ENABLED)
+ return 0;
+ return 1;
+}
+EXPORT_SYMBOL(mmc_can_reset);
+
+static int mmc_do_hw_reset(struct mmc_host *host, int check)
+{
+ struct mmc_card *card = host->card;
+
+ if (!host->bus_ops->power_restore)
+ return -EOPNOTSUPP;
+
+ if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
+ return -EOPNOTSUPP;
+
+ if (!card)
+ return -EINVAL;
+
+ if (!mmc_can_reset(card))
+ return -EOPNOTSUPP;
+
+ mmc_host_clk_hold(host);
+ mmc_set_clock(host, host->f_init);
+
+ host->ops->hw_reset(host);
+
+ /* If the reset has happened, then a status command will fail */
+ if (check) {
+ struct mmc_command cmd = {0};
+ int err;
+
+ cmd.opcode = MMC_SEND_STATUS;
+ if (!mmc_host_is_spi(card->host))
+ cmd.arg = card->rca << 16;
+ cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
+ err = mmc_wait_for_cmd(card->host, &cmd, 0);
+ if (!err) {
+ mmc_host_clk_release(host);
+ return -ENOSYS;
+ }
+ }
+
+ host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_DDR);
+ if (mmc_host_is_spi(host)) {
+ host->ios.chip_select = MMC_CS_HIGH;
+ host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
+ } else {
+ host->ios.chip_select = MMC_CS_DONTCARE;
+ host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
+ }
+ host->ios.bus_width = MMC_BUS_WIDTH_1;
+ host->ios.timing = MMC_TIMING_LEGACY;
+ mmc_set_ios(host);
+
+ mmc_host_clk_release(host);
+
+ return host->bus_ops->power_restore(host);
+}
+
+int mmc_hw_reset(struct mmc_host *host)
+{
+ return mmc_do_hw_reset(host, 0);
+}
+EXPORT_SYMBOL(mmc_hw_reset);
+
+int mmc_hw_reset_check(struct mmc_host *host)
+{
+ return mmc_do_hw_reset(host, 1);
+}
+EXPORT_SYMBOL(mmc_hw_reset_check);
+
static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
{
host->f_init = freq;
@@ -1738,6 +2000,12 @@ static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
mmc_power_up(host);
/*
+ * Some eMMCs (with VCCQ always on) may not be reset after power up, so
+ * do a hardware reset if possible.
+ */
+ mmc_hw_reset_for_init(host);
+
+ /*
* sdio_reset sends CMD52 to reset card. Since we do not know
* if the card is being re-initialized, just send it. CMD52
* should be ignored by SD/eMMC cards.
@@ -1845,6 +2113,7 @@ void mmc_stop_host(struct mmc_host *host)
mmc_claim_host(host);
mmc_detach_bus(host);
+ mmc_power_off(host);
mmc_release_host(host);
mmc_bus_put(host);
return;
@@ -1946,6 +2215,65 @@ int mmc_card_can_sleep(struct mmc_host *host)
}
EXPORT_SYMBOL(mmc_card_can_sleep);
+/*
+ * Flush the cache to the non-volatile storage.
+ */
+int mmc_flush_cache(struct mmc_card *card)
+{
+ struct mmc_host *host = card->host;
+ int err = 0;
+
+ if (!(host->caps2 & MMC_CAP2_CACHE_CTRL))
+ return err;
+
+ if (mmc_card_mmc(card) &&
+ (card->ext_csd.cache_size > 0) &&
+ (card->ext_csd.cache_ctrl & 1)) {
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_FLUSH_CACHE, 1, 0);
+ if (err)
+ pr_err("%s: cache flush error %d\n",
+ mmc_hostname(card->host), err);
+ }
+
+ return err;
+}
+EXPORT_SYMBOL(mmc_flush_cache);
+
+/*
+ * Turn the cache ON/OFF.
+ * Turning the cache OFF shall trigger flushing of the data
+ * to the non-volatile storage.
+ */
+int mmc_cache_ctrl(struct mmc_host *host, u8 enable)
+{
+ struct mmc_card *card = host->card;
+ int err = 0;
+
+ if (!(host->caps2 & MMC_CAP2_CACHE_CTRL) ||
+ mmc_card_is_removable(host))
+ return err;
+
+ if (card && mmc_card_mmc(card) &&
+ (card->ext_csd.cache_size > 0)) {
+ enable = !!enable;
+
+ if (card->ext_csd.cache_ctrl ^ enable)
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_CACHE_CTRL, enable, 0);
+ if (err)
+ pr_err("%s: cache %s error %d\n",
+ mmc_hostname(card->host),
+ enable ? "on" : "off",
+ err);
+ else
+ card->ext_csd.cache_ctrl = enable;
+ }
+
+ return err;
+}
+EXPORT_SYMBOL(mmc_cache_ctrl);
+
#ifdef CONFIG_PM
/**
@@ -1960,23 +2288,39 @@ int mmc_suspend_host(struct mmc_host *host)
cancel_delayed_work(&host->disable);
cancel_delayed_work(&host->detect);
mmc_flush_scheduled_work();
+ err = mmc_cache_ctrl(host, 0);
+ if (err)
+ goto out;
mmc_bus_get(host);
if (host->bus_ops && !host->bus_dead) {
- if (host->bus_ops->suspend)
- err = host->bus_ops->suspend(host);
- if (err == -ENOSYS || !host->bus_ops->resume) {
- /*
- * We simply "remove" the card in this case.
- * It will be redetected on resume.
- */
- if (host->bus_ops->remove)
- host->bus_ops->remove(host);
- mmc_claim_host(host);
- mmc_detach_bus(host);
- mmc_release_host(host);
- host->pm_flags = 0;
- err = 0;
+
+ /*
+ * A long response time is not acceptable for device drivers
+ * when doing suspend. Prevent mmc_claim_host in the suspend
+ * sequence, to potentially wait "forever" by trying to
+ * pre-claim the host.
+ */
+ if (mmc_try_claim_host(host)) {
+ if (host->bus_ops->suspend)
+ err = host->bus_ops->suspend(host);
+ if (err == -ENOSYS || !host->bus_ops->resume) {
+ /*
+ * We simply "remove" the card in this case.
+ * It will be redetected on resume.
+ */
+ if (host->bus_ops->remove)
+ host->bus_ops->remove(host);
+ mmc_claim_host(host);
+ mmc_detach_bus(host);
+ mmc_power_off(host);
+ mmc_release_host(host);
+ host->pm_flags = 0;
+ err = 0;
+ }
+ mmc_do_release_host(host);
+ } else {
+ err = -EBUSY;
}
}
mmc_bus_put(host);
@@ -1984,6 +2328,7 @@ int mmc_suspend_host(struct mmc_host *host)
if (!err && !mmc_card_keep_power(host))
mmc_power_off(host);
+out:
return err;
}
@@ -2018,7 +2363,7 @@ int mmc_resume_host(struct mmc_host *host)
BUG_ON(!host->bus_ops->resume);
err = host->bus_ops->resume(host);
if (err) {
- printk(KERN_WARNING "%s: error %d during resume "
+ pr_warning("%s: error %d during resume "
"(card was removed?)\n",
mmc_hostname(host), err);
err = 0;
@@ -2049,6 +2394,7 @@ int mmc_pm_notify(struct notifier_block *notify_block,
spin_lock_irqsave(&host->lock, flags);
host->rescan_disable = 1;
+ host->power_notify_type = MMC_HOST_PW_NOTIFY_SHORT;
spin_unlock_irqrestore(&host->lock, flags);
cancel_delayed_work_sync(&host->detect);
@@ -2061,6 +2407,7 @@ int mmc_pm_notify(struct notifier_block *notify_block,
host->bus_ops->remove(host);
mmc_detach_bus(host);
+ mmc_power_off(host);
mmc_release_host(host);
host->pm_flags = 0;
break;
@@ -2071,6 +2418,7 @@ int mmc_pm_notify(struct notifier_block *notify_block,
spin_lock_irqsave(&host->lock, flags);
host->rescan_disable = 0;
+ host->power_notify_type = MMC_HOST_PW_NOTIFY_LONG;
spin_unlock_irqrestore(&host->lock, flags);
mmc_detect_change(host, 0);
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
index d9411ed2a39..14664f1fb16 100644
--- a/drivers/mmc/core/core.h
+++ b/drivers/mmc/core/core.h
@@ -43,6 +43,7 @@ int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage,
bool cmd11);
void mmc_set_timing(struct mmc_host *host, unsigned int timing);
void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type);
+void mmc_power_off(struct mmc_host *host);
static inline void mmc_delay(unsigned int ms)
{
diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c
index 998797ed67a..3923880118b 100644
--- a/drivers/mmc/core/debugfs.c
+++ b/drivers/mmc/core/debugfs.c
@@ -7,11 +7,14 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+#include <linux/moduleparam.h>
+#include <linux/export.h>
#include <linux/debugfs.h>
#include <linux/fs.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/stat.h>
+#include <linux/fault-inject.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
@@ -19,6 +22,14 @@
#include "core.h"
#include "mmc_ops.h"
+#ifdef CONFIG_FAIL_MMC_REQUEST
+
+static DECLARE_FAULT_ATTR(fail_default_attr);
+static char *fail_request;
+module_param(fail_request, charp, 0);
+
+#endif /* CONFIG_FAIL_MMC_REQUEST */
+
/* The debugfs functions are optimized away when CONFIG_DEBUG_FS isn't set. */
static int mmc_ios_show(struct seq_file *s, void *data)
{
@@ -113,6 +124,15 @@ static int mmc_ios_show(struct seq_file *s, void *data)
case MMC_TIMING_SD_HS:
str = "sd high-speed";
break;
+ case MMC_TIMING_UHS_SDR50:
+ str = "sd uhs SDR50";
+ break;
+ case MMC_TIMING_UHS_SDR104:
+ str = "sd uhs SDR104";
+ break;
+ case MMC_TIMING_UHS_DDR50:
+ str = "sd uhs DDR50";
+ break;
default:
str = "invalid";
break;
@@ -188,6 +208,15 @@ void mmc_add_host_debugfs(struct mmc_host *host)
root, &host->clk_delay))
goto err_node;
#endif
+#ifdef CONFIG_FAIL_MMC_REQUEST
+ if (fail_request)
+ setup_fault_attr(&fail_default_attr, fail_request);
+ host->fail_mmc_request = fail_default_attr;
+ if (IS_ERR(fault_create_debugfs_attr("fail_mmc_request",
+ root,
+ &host->fail_mmc_request)))
+ goto err_node;
+#endif
return;
err_node:
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 793d0a0dad8..e8a5eb38748 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -16,6 +16,7 @@
#include <linux/err.h>
#include <linux/idr.h>
#include <linux/pagemap.h>
+#include <linux/export.h>
#include <linux/leds.h>
#include <linux/slab.h>
#include <linux/suspend.h>
@@ -301,6 +302,17 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
host->max_blk_size = 512;
host->max_blk_count = PAGE_CACHE_SIZE / 512;
+ /*
+ * Enable runtime power management by default. This flag was added due
+ * to runtime power management causing disruption for some users, but
+ * the power on/off code has been improved since then.
+ *
+ * We'll enable this flag by default as an experiment, and if no
+ * problems are reported, we will follow up later and remove the flag
+ * altogether.
+ */
+ host->caps = MMC_CAP_POWER_OFF_CARD;
+
return host;
free:
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 5700b1cbdfe..dbf421a6279 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -12,6 +12,7 @@
#include <linux/err.h>
#include <linux/slab.h>
+#include <linux/stat.h>
#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
@@ -101,7 +102,7 @@ static int mmc_decode_cid(struct mmc_card *card)
break;
default:
- printk(KERN_ERR "%s: card has unknown MMCA version %d\n",
+ pr_err("%s: card has unknown MMCA version %d\n",
mmc_hostname(card->host), card->csd.mmca_vsn);
return -EINVAL;
}
@@ -135,7 +136,7 @@ static int mmc_decode_csd(struct mmc_card *card)
*/
csd->structure = UNSTUFF_BITS(resp, 126, 2);
if (csd->structure == 0) {
- printk(KERN_ERR "%s: unrecognised CSD structure version %d\n",
+ pr_err("%s: unrecognised CSD structure version %d\n",
mmc_hostname(card->host), csd->structure);
return -EINVAL;
}
@@ -195,7 +196,7 @@ static int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd)
*/
ext_csd = kmalloc(512, GFP_KERNEL);
if (!ext_csd) {
- printk(KERN_ERR "%s: could not allocate a buffer to "
+ pr_err("%s: could not allocate a buffer to "
"receive the ext_csd.\n", mmc_hostname(card->host));
return -ENOMEM;
}
@@ -217,12 +218,12 @@ static int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd)
* stored in their CSD.
*/
if (card->csd.capacity == (4096 * 512)) {
- printk(KERN_ERR "%s: unable to read EXT_CSD "
+ pr_err("%s: unable to read EXT_CSD "
"on a possible high capacity card. "
"Card will be ignored.\n",
mmc_hostname(card->host));
} else {
- printk(KERN_WARNING "%s: unable to read "
+ pr_warning("%s: unable to read "
"EXT_CSD, performance might "
"suffer.\n",
mmc_hostname(card->host));
@@ -239,7 +240,9 @@ static int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd)
*/
static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
{
- int err = 0;
+ int err = 0, idx;
+ unsigned int part_size;
+ u8 hc_erase_grp_sz = 0, hc_wp_grp_sz = 0;
BUG_ON(!card);
@@ -250,7 +253,7 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
card->ext_csd.raw_ext_csd_structure = ext_csd[EXT_CSD_STRUCTURE];
if (card->csd.structure == 3) {
if (card->ext_csd.raw_ext_csd_structure > 2) {
- printk(KERN_ERR "%s: unrecognised EXT_CSD structure "
+ pr_err("%s: unrecognised EXT_CSD structure "
"version %d\n", mmc_hostname(card->host),
card->ext_csd.raw_ext_csd_structure);
err = -EINVAL;
@@ -260,7 +263,7 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
card->ext_csd.rev = ext_csd[EXT_CSD_REV];
if (card->ext_csd.rev > 6) {
- printk(KERN_ERR "%s: unrecognised EXT_CSD revision %d\n",
+ pr_err("%s: unrecognised EXT_CSD revision %d\n",
mmc_hostname(card->host), card->ext_csd.rev);
err = -EINVAL;
goto out;
@@ -306,7 +309,7 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
break;
default:
/* MMC v4 spec says this cannot happen */
- printk(KERN_WARNING "%s: card is mmc v4 but doesn't "
+ pr_warning("%s: card is mmc v4 but doesn't "
"support any high-speed modes.\n",
mmc_hostname(card->host));
}
@@ -340,7 +343,14 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
* There are two boot regions of equal size, defined in
* multiples of 128K.
*/
- card->ext_csd.boot_size = ext_csd[EXT_CSD_BOOT_MULT] << 17;
+ if (ext_csd[EXT_CSD_BOOT_MULT] && mmc_boot_partition_access(card->host)) {
+ for (idx = 0; idx < MMC_NUM_BOOT_PARTITION; idx++) {
+ part_size = ext_csd[EXT_CSD_BOOT_MULT] << 17;
+ mmc_part_add(card, part_size,
+ EXT_CSD_PART_CONFIG_ACC_BOOT0 + idx,
+ "boot%d", idx, true);
+ }
+ }
}
card->ext_csd.raw_hc_erase_gap_size =
@@ -359,11 +369,12 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
* card has the Enhanced area enabled. If so, export enhanced
* area offset and size to user by adding sysfs interface.
*/
+ card->ext_csd.raw_partition_support = ext_csd[EXT_CSD_PARTITION_SUPPORT];
if ((ext_csd[EXT_CSD_PARTITION_SUPPORT] & 0x2) &&
(ext_csd[EXT_CSD_PARTITION_ATTRIBUTE] & 0x1)) {
- u8 hc_erase_grp_sz =
+ hc_erase_grp_sz =
ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
- u8 hc_wp_grp_sz =
+ hc_wp_grp_sz =
ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
card->ext_csd.enhanced_area_en = 1;
@@ -392,6 +403,41 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
card->ext_csd.enhanced_area_offset = -EINVAL;
card->ext_csd.enhanced_area_size = -EINVAL;
}
+
+ /*
+ * General purpose partition feature support --
+ * If ext_csd has the size of general purpose partitions,
+ * set size, part_cfg, partition name in mmc_part.
+ */
+ if (ext_csd[EXT_CSD_PARTITION_SUPPORT] &
+ EXT_CSD_PART_SUPPORT_PART_EN) {
+ if (card->ext_csd.enhanced_area_en != 1) {
+ hc_erase_grp_sz =
+ ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
+ hc_wp_grp_sz =
+ ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
+
+ card->ext_csd.enhanced_area_en = 1;
+ }
+
+ for (idx = 0; idx < MMC_NUM_GP_PARTITION; idx++) {
+ if (!ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3] &&
+ !ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 1] &&
+ !ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 2])
+ continue;
+ part_size =
+ (ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 2]
+ << 16) +
+ (ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 1]
+ << 8) +
+ ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3];
+ part_size *= (size_t)(hc_erase_grp_sz *
+ hc_wp_grp_sz);
+ mmc_part_add(card, part_size << 19,
+ EXT_CSD_PART_CONFIG_ACC_GP0 + idx,
+ "gp%d", idx, false);
+ }
+ }
card->ext_csd.sec_trim_mult =
ext_csd[EXT_CSD_SEC_TRIM_MULT];
card->ext_csd.sec_erase_mult =
@@ -402,14 +448,48 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
ext_csd[EXT_CSD_TRIM_MULT];
}
- if (card->ext_csd.rev >= 5)
+ if (card->ext_csd.rev >= 5) {
+ /* check whether the eMMC card supports HPI */
+ if (ext_csd[EXT_CSD_HPI_FEATURES] & 0x1) {
+ card->ext_csd.hpi = 1;
+ if (ext_csd[EXT_CSD_HPI_FEATURES] & 0x2)
+ card->ext_csd.hpi_cmd = MMC_STOP_TRANSMISSION;
+ else
+ card->ext_csd.hpi_cmd = MMC_SEND_STATUS;
+ /*
+ * Indicate the maximum timeout to close
+ * a command interrupted by HPI
+ */
+ card->ext_csd.out_of_int_time =
+ ext_csd[EXT_CSD_OUT_OF_INTERRUPT_TIME] * 10;
+ }
+
card->ext_csd.rel_param = ext_csd[EXT_CSD_WR_REL_PARAM];
+ card->ext_csd.rst_n_function = ext_csd[EXT_CSD_RST_N_FUNCTION];
+ }
+ card->ext_csd.raw_erased_mem_count = ext_csd[EXT_CSD_ERASED_MEM_CONT];
if (ext_csd[EXT_CSD_ERASED_MEM_CONT])
card->erased_byte = 0xFF;
else
card->erased_byte = 0x0;
+ /* eMMC v4.5 or later */
+ if (card->ext_csd.rev >= 6) {
+ card->ext_csd.feature_support |= MMC_DISCARD_FEATURE;
+
+ card->ext_csd.generic_cmd6_time = 10 *
+ ext_csd[EXT_CSD_GENERIC_CMD6_TIME];
+ card->ext_csd.power_off_longtime = 10 *
+ ext_csd[EXT_CSD_POWER_OFF_LONG_TIME];
+
+ card->ext_csd.cache_size =
+ ext_csd[EXT_CSD_CACHE_SIZE + 0] << 0 |
+ ext_csd[EXT_CSD_CACHE_SIZE + 1] << 8 |
+ ext_csd[EXT_CSD_CACHE_SIZE + 2] << 16 |
+ ext_csd[EXT_CSD_CACHE_SIZE + 3] << 24;
+ }
+
out:
return err;
}
@@ -530,6 +610,86 @@ static struct device_type mmc_type = {
};
/*
+ * Select the PowerClass for the current bus width
+ * If power class is defined for 4/8 bit bus in the
+ * extended CSD register, select it by executing the
+ * mmc_switch command.
+ */
+static int mmc_select_powerclass(struct mmc_card *card,
+ unsigned int bus_width, u8 *ext_csd)
+{
+ int err = 0;
+ unsigned int pwrclass_val;
+ unsigned int index = 0;
+ struct mmc_host *host;
+
+ BUG_ON(!card);
+
+ host = card->host;
+ BUG_ON(!host);
+
+ if (ext_csd == NULL)
+ return 0;
+
+ /* Power class selection is supported for versions >= 4.0 */
+ if (card->csd.mmca_vsn < CSD_SPEC_VER_4)
+ return 0;
+
+ /* Power class values are defined only for 4/8 bit bus */
+ if (bus_width == EXT_CSD_BUS_WIDTH_1)
+ return 0;
+
+ switch (1 << host->ios.vdd) {
+ case MMC_VDD_165_195:
+ if (host->ios.clock <= 26000000)
+ index = EXT_CSD_PWR_CL_26_195;
+ else if (host->ios.clock <= 52000000)
+ index = (bus_width <= EXT_CSD_BUS_WIDTH_8) ?
+ EXT_CSD_PWR_CL_52_195 :
+ EXT_CSD_PWR_CL_DDR_52_195;
+ else if (host->ios.clock <= 200000000)
+ index = EXT_CSD_PWR_CL_200_195;
+ break;
+ case MMC_VDD_32_33:
+ case MMC_VDD_33_34:
+ case MMC_VDD_34_35:
+ case MMC_VDD_35_36:
+ if (host->ios.clock <= 26000000)
+ index = EXT_CSD_PWR_CL_26_360;
+ else if (host->ios.clock <= 52000000)
+ index = (bus_width <= EXT_CSD_BUS_WIDTH_8) ?
+ EXT_CSD_PWR_CL_52_360 :
+ EXT_CSD_PWR_CL_DDR_52_360;
+ else if (host->ios.clock <= 200000000)
+ index = EXT_CSD_PWR_CL_200_360;
+ break;
+ default:
+ pr_warning("%s: Voltage range not supported "
+ "for power class.\n", mmc_hostname(host));
+ return -EINVAL;
+ }
+
+ pwrclass_val = ext_csd[index];
+
+ if (bus_width & (EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_BUS_WIDTH_8))
+ pwrclass_val = (pwrclass_val & EXT_CSD_PWR_CL_8BIT_MASK) >>
+ EXT_CSD_PWR_CL_8BIT_SHIFT;
+ else
+ pwrclass_val = (pwrclass_val & EXT_CSD_PWR_CL_4BIT_MASK) >>
+ EXT_CSD_PWR_CL_4BIT_SHIFT;
+
+ /* If the power class is different from the default value */
+ if (pwrclass_val > 0) {
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_POWER_CLASS,
+ pwrclass_val,
+ card->ext_csd.generic_cmd6_time);
+ }
+
+ return err;
+}
+
+/*
* Handle the detection and initialisation of a card.
*
* In the case of a resume, "oldcard" will contain the card
@@ -548,11 +708,16 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
BUG_ON(!host);
WARN_ON(!host->claimed);
+ /* Set correct bus mode for MMC before attempting init */
+ if (!mmc_host_is_spi(host))
+ mmc_set_bus_mode(host, MMC_BUSMODE_OPENDRAIN);
+
/*
* Since we're changing the OCR value, we seem to
* need to tell some cards to go back to the idle
* state. We wait 1ms to give cards time to
* respond.
+ * mmc_go_idle is needed for eMMC that are asleep
*/
mmc_go_idle(host);
@@ -668,7 +833,8 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
*/
if (card->ext_csd.enhanced_area_en) {
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
- EXT_CSD_ERASE_GROUP_DEF, 1, 0);
+ EXT_CSD_ERASE_GROUP_DEF, 1,
+ card->ext_csd.generic_cmd6_time);
if (err && err != -EBADMSG)
goto free_card;
@@ -706,17 +872,35 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
}
/*
+ * If the host supports the power_off_notify capability then
+ * set the notification byte in the ext_csd register of device
+ */
+ if ((host->caps2 & MMC_CAP2_POWEROFF_NOTIFY) &&
+ (card->poweroff_notify_state == MMC_NO_POWER_NOTIFICATION)) {
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_POWER_OFF_NOTIFICATION,
+ EXT_CSD_POWER_ON,
+ card->ext_csd.generic_cmd6_time);
+ if (err && err != -EBADMSG)
+ goto free_card;
+ }
+
+ if (!err)
+ card->poweroff_notify_state = MMC_POWERED_ON;
+
+ /*
* Activate high speed (if supported)
*/
if ((card->ext_csd.hs_max_dtr != 0) &&
(host->caps & MMC_CAP_MMC_HIGHSPEED)) {
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
- EXT_CSD_HS_TIMING, 1, 0);
+ EXT_CSD_HS_TIMING, 1,
+ card->ext_csd.generic_cmd6_time);
if (err && err != -EBADMSG)
goto free_card;
if (err) {
- printk(KERN_WARNING "%s: switch to highspeed failed\n",
+ pr_warning("%s: switch to highspeed failed\n",
mmc_hostname(card->host));
err = 0;
} else {
@@ -726,6 +910,22 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
}
/*
+ * Enable HPI feature (if supported)
+ */
+ if (card->ext_csd.hpi) {
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_HPI_MGMT, 1, 0);
+ if (err && err != -EBADMSG)
+ goto free_card;
+ if (err) {
+ pr_warning("%s: Enabling HPI failed\n",
+ mmc_hostname(card->host));
+ err = 0;
+ } else
+ card->ext_csd.hpi_en = 1;
+ }
+
+ /*
* Compute bus speed.
*/
max_dtr = (unsigned int)-1;
@@ -780,10 +980,18 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
bus_width = bus_widths[idx];
if (bus_width == MMC_BUS_WIDTH_1)
ddr = 0; /* no DDR for 1-bit width */
+ err = mmc_select_powerclass(card, ext_csd_bits[idx][0],
+ ext_csd);
+ if (err)
+ pr_err("%s: power class selection to "
+ "bus width %d failed\n",
+ mmc_hostname(card->host),
+ 1 << bus_width);
+
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_BUS_WIDTH,
ext_csd_bits[idx][0],
- 0);
+ card->ext_csd.generic_cmd6_time);
if (!err) {
mmc_set_bus_width(card->host, bus_width);
@@ -803,13 +1011,21 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
}
if (!err && ddr) {
+ err = mmc_select_powerclass(card, ext_csd_bits[idx][1],
+ ext_csd);
+ if (err)
+ pr_err("%s: power class selection to "
+ "bus width %d ddr %d failed\n",
+ mmc_hostname(card->host),
+ 1 << bus_width, ddr);
+
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_BUS_WIDTH,
ext_csd_bits[idx][1],
- 0);
+ card->ext_csd.generic_cmd6_time);
}
if (err) {
- printk(KERN_WARNING "%s: switch to bus width %d ddr %d "
+ pr_warning("%s: switch to bus width %d ddr %d "
"failed\n", mmc_hostname(card->host),
1 << bus_width, ddr);
goto free_card;
@@ -840,6 +1056,23 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
}
}
+ /*
+ * If cache size is higher than 0, this indicates
+ * the existence of cache and it can be turned on.
+ */
+ if ((host->caps2 & MMC_CAP2_CACHE_CTRL) &&
+ card->ext_csd.cache_size > 0) {
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_CACHE_CTRL, 1, 0);
+ if (err && err != -EBADMSG)
+ goto free_card;
+
+ /*
+ * Only if no error, cache is turned on successfully.
+ */
+ card->ext_csd.cache_ctrl = err ? 0 : 1;
+ }
+
if (!oldcard)
host->card = card;
@@ -891,6 +1124,7 @@ static void mmc_detect(struct mmc_host *host)
mmc_claim_host(host);
mmc_detach_bus(host);
+ mmc_power_off(host);
mmc_release_host(host);
}
}
@@ -900,16 +1134,20 @@ static void mmc_detect(struct mmc_host *host)
*/
static int mmc_suspend(struct mmc_host *host)
{
+ int err = 0;
+
BUG_ON(!host);
BUG_ON(!host->card);
mmc_claim_host(host);
- if (!mmc_host_is_spi(host))
+ if (mmc_card_can_sleep(host))
+ err = mmc_card_sleep(host);
+ else if (!mmc_host_is_spi(host))
mmc_deselect_cards(host);
host->card->state &= ~MMC_STATE_HIGHSPEED;
mmc_release_host(host);
- return 0;
+ return err;
}
/*
@@ -1016,6 +1254,10 @@ int mmc_attach_mmc(struct mmc_host *host)
BUG_ON(!host);
WARN_ON(!host->claimed);
+ /* Set correct bus mode for MMC before attempting attach */
+ if (!mmc_host_is_spi(host))
+ mmc_set_bus_mode(host, MMC_BUSMODE_OPENDRAIN);
+
err = mmc_send_op_cond(host, 0, &ocr);
if (err)
return err;
@@ -1038,7 +1280,7 @@ int mmc_attach_mmc(struct mmc_host *host)
* support.
*/
if (ocr & 0x7F) {
- printk(KERN_WARNING "%s: card claims to support voltages "
+ pr_warning("%s: card claims to support voltages "
"below the defined range. These will be ignored.\n",
mmc_hostname(host));
ocr &= ~0x7F;
@@ -1077,7 +1319,7 @@ remove_card:
err:
mmc_detach_bus(host);
- printk(KERN_ERR "%s: error %d whilst initialising MMC card\n",
+ pr_err("%s: error %d whilst initialising MMC card\n",
mmc_hostname(host), err);
return err;
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index 770c3d06f5d..4d41fa984c9 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -10,6 +10,7 @@
*/
#include <linux/slab.h>
+#include <linux/export.h>
#include <linux/types.h>
#include <linux/scatterlist.h>
@@ -233,7 +234,7 @@ static int
mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
u32 opcode, void *buf, unsigned len)
{
- struct mmc_request mrq = {0};
+ struct mmc_request mrq = {NULL};
struct mmc_command cmd = {0};
struct mmc_data data = {0};
struct scatterlist sg;
@@ -414,7 +415,7 @@ int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
return -EBADMSG;
} else {
if (status & 0xFDFFA000)
- printk(KERN_WARNING "%s: unexpected status %#x after "
+ pr_warning("%s: unexpected status %#x after "
"switch", mmc_hostname(card->host), status);
if (status & R1_SWITCH_ERROR)
return -EBADMSG;
@@ -454,7 +455,7 @@ static int
mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
u8 len)
{
- struct mmc_request mrq = {0};
+ struct mmc_request mrq = {NULL};
struct mmc_command cmd = {0};
struct mmc_data data = {0};
struct scatterlist sg;
@@ -476,7 +477,7 @@ mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
else if (len == 4)
test_buf = testdata_4bit;
else {
- printk(KERN_ERR "%s: Invalid bus_width %d\n",
+ pr_err("%s: Invalid bus_width %d\n",
mmc_hostname(host), len);
kfree(data_buf);
return -EINVAL;
@@ -547,3 +548,34 @@ int mmc_bus_test(struct mmc_card *card, u8 bus_width)
err = mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
return err;
}
+
+int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status)
+{
+ struct mmc_command cmd = {0};
+ unsigned int opcode;
+ unsigned int flags;
+ int err;
+
+ opcode = card->ext_csd.hpi_cmd;
+ if (opcode == MMC_STOP_TRANSMISSION)
+ flags = MMC_RSP_R1 | MMC_CMD_AC;
+ else if (opcode == MMC_SEND_STATUS)
+ flags = MMC_RSP_R1 | MMC_CMD_AC;
+
+ cmd.opcode = opcode;
+ cmd.arg = card->rca << 16 | 1;
+ cmd.flags = flags;
+ cmd.cmd_timeout_ms = card->ext_csd.out_of_int_time;
+
+ err = mmc_wait_for_cmd(card->host, &cmd, 0);
+ if (err) {
+ pr_warn("%s: error %d interrupting operation. "
+ "HPI command response %#x\n", mmc_hostname(card->host),
+ err, cmd.resp[0]);
+ return err;
+ }
+ if (status)
+ *status = cmd.resp[0];
+
+ return 0;
+}
diff --git a/drivers/mmc/core/mmc_ops.h b/drivers/mmc/core/mmc_ops.h
index 9276946fa5b..3dd8941c298 100644
--- a/drivers/mmc/core/mmc_ops.h
+++ b/drivers/mmc/core/mmc_ops.h
@@ -26,6 +26,7 @@ int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp);
int mmc_spi_set_crc(struct mmc_host *host, int use_crc);
int mmc_card_sleepawake(struct mmc_host *host, int sleep);
int mmc_bus_test(struct mmc_card *card, u8 bus_width);
+int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status);
#endif
diff --git a/drivers/mmc/core/quirks.c b/drivers/mmc/core/quirks.c
index 3a596217029..06ee1aeaace 100644
--- a/drivers/mmc/core/quirks.c
+++ b/drivers/mmc/core/quirks.c
@@ -11,6 +11,7 @@
#include <linux/types.h>
#include <linux/kernel.h>
+#include <linux/export.h>
#include <linux/mmc/card.h>
#ifndef SDIO_VENDOR_ID_TI
@@ -21,6 +22,14 @@
#define SDIO_DEVICE_ID_TI_WL1271 0x4076
#endif
+#ifndef SDIO_VENDOR_ID_STE
+#define SDIO_VENDOR_ID_STE 0x0020
+#endif
+
+#ifndef SDIO_DEVICE_ID_STE_CW1200
+#define SDIO_DEVICE_ID_STE_CW1200 0x2280
+#endif
+
/*
* This hook just adds a quirk for all sdio devices
*/
@@ -46,6 +55,9 @@ static const struct mmc_fixup mmc_fixup_methods[] = {
SDIO_FIXUP(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271,
add_quirk, MMC_QUIRK_DISABLE_CD),
+ SDIO_FIXUP(SDIO_VENDOR_ID_STE, SDIO_DEVICE_ID_STE_CW1200,
+ add_quirk, MMC_QUIRK_BROKEN_BYTE_MODE_512),
+
END_FIXUP
};
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 0370e03e314..f2a05ea40f2 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -12,6 +12,7 @@
#include <linux/err.h>
#include <linux/slab.h>
+#include <linux/stat.h>
#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
@@ -163,7 +164,7 @@ static int mmc_decode_csd(struct mmc_card *card)
csd->erase_size = 1;
break;
default:
- printk(KERN_ERR "%s: unrecognised CSD structure version %d\n",
+ pr_err("%s: unrecognised CSD structure version %d\n",
mmc_hostname(card->host), csd_struct);
return -EINVAL;
}
@@ -187,7 +188,7 @@ static int mmc_decode_scr(struct mmc_card *card)
scr_struct = UNSTUFF_BITS(resp, 60, 4);
if (scr_struct != 0) {
- printk(KERN_ERR "%s: unrecognised SCR structure version %d\n",
+ pr_err("%s: unrecognised SCR structure version %d\n",
mmc_hostname(card->host), scr_struct);
return -EINVAL;
}
@@ -218,7 +219,7 @@ static int mmc_read_ssr(struct mmc_card *card)
u32 *ssr;
if (!(card->csd.cmdclass & CCC_APP_SPEC)) {
- printk(KERN_WARNING "%s: card lacks mandatory SD Status "
+ pr_warning("%s: card lacks mandatory SD Status "
"function.\n", mmc_hostname(card->host));
return 0;
}
@@ -229,7 +230,7 @@ static int mmc_read_ssr(struct mmc_card *card)
err = mmc_app_sd_status(card, ssr);
if (err) {
- printk(KERN_WARNING "%s: problem reading SD Status "
+ pr_warning("%s: problem reading SD Status "
"register.\n", mmc_hostname(card->host));
err = 0;
goto out;
@@ -253,7 +254,7 @@ static int mmc_read_ssr(struct mmc_card *card)
card->ssr.erase_offset = eo * 1000;
}
} else {
- printk(KERN_WARNING "%s: SD Status: Invalid Allocation Unit "
+ pr_warning("%s: SD Status: Invalid Allocation Unit "
"size.\n", mmc_hostname(card->host));
}
out:
@@ -273,7 +274,7 @@ static int mmc_read_switch(struct mmc_card *card)
return 0;
if (!(card->csd.cmdclass & CCC_SWITCH)) {
- printk(KERN_WARNING "%s: card lacks mandatory switch "
+ pr_warning("%s: card lacks mandatory switch "
"function, performance might suffer.\n",
mmc_hostname(card->host));
return 0;
@@ -283,7 +284,7 @@ static int mmc_read_switch(struct mmc_card *card)
status = kmalloc(64, GFP_KERNEL);
if (!status) {
- printk(KERN_ERR "%s: could not allocate a buffer for "
+ pr_err("%s: could not allocate a buffer for "
"switch capabilities.\n",
mmc_hostname(card->host));
return -ENOMEM;
@@ -299,13 +300,16 @@ static int mmc_read_switch(struct mmc_card *card)
if (err != -EINVAL && err != -ENOSYS && err != -EFAULT)
goto out;
- printk(KERN_WARNING "%s: problem reading Bus Speed modes.\n",
+ pr_warning("%s: problem reading Bus Speed modes.\n",
mmc_hostname(card->host));
err = 0;
goto out;
}
+ if (status[13] & UHS_SDR50_BUS_SPEED)
+ card->sw_caps.hs_max_dtr = 50000000;
+
if (card->scr.sda_spec3) {
card->sw_caps.sd3_bus_mode = status[13];
@@ -319,7 +323,7 @@ static int mmc_read_switch(struct mmc_card *card)
if (err != -EINVAL && err != -ENOSYS && err != -EFAULT)
goto out;
- printk(KERN_WARNING "%s: problem reading "
+ pr_warning("%s: problem reading "
"Driver Strength.\n",
mmc_hostname(card->host));
err = 0;
@@ -339,7 +343,7 @@ static int mmc_read_switch(struct mmc_card *card)
if (err != -EINVAL && err != -ENOSYS && err != -EFAULT)
goto out;
- printk(KERN_WARNING "%s: problem reading "
+ pr_warning("%s: problem reading "
"Current Limit.\n",
mmc_hostname(card->host));
err = 0;
@@ -348,9 +352,6 @@ static int mmc_read_switch(struct mmc_card *card)
}
card->sw_caps.sd3_curr_limit = status[7];
- } else {
- if (status[13] & 0x02)
- card->sw_caps.hs_max_dtr = 50000000;
}
out:
@@ -383,7 +384,7 @@ int mmc_sd_switch_hs(struct mmc_card *card)
status = kmalloc(64, GFP_KERNEL);
if (!status) {
- printk(KERN_ERR "%s: could not allocate a buffer for "
+ pr_err("%s: could not allocate a buffer for "
"switch capabilities.\n", mmc_hostname(card->host));
return -ENOMEM;
}
@@ -393,7 +394,7 @@ int mmc_sd_switch_hs(struct mmc_card *card)
goto out;
if ((status[16] & 0xF) != 1) {
- printk(KERN_WARNING "%s: Problem switching card "
+ pr_warning("%s: Problem switching card "
"into high-speed mode!\n",
mmc_hostname(card->host));
err = 0;
@@ -459,7 +460,7 @@ static int sd_select_driver_type(struct mmc_card *card, u8 *status)
return err;
if ((status[15] & 0xF) != drive_strength) {
- printk(KERN_WARNING "%s: Problem setting drive strength!\n",
+ pr_warning("%s: Problem setting drive strength!\n",
mmc_hostname(card->host));
return 0;
}
@@ -538,7 +539,7 @@ static int sd_set_bus_speed_mode(struct mmc_card *card, u8 *status)
return err;
if ((status[16] & 0xF) != card->sd_bus_speed)
- printk(KERN_WARNING "%s: Problem setting bus speed mode!\n",
+ pr_warning("%s: Problem setting bus speed mode!\n",
mmc_hostname(card->host));
else {
mmc_set_timing(card->host, timing);
@@ -600,7 +601,7 @@ static int sd_set_current_limit(struct mmc_card *card, u8 *status)
return err;
if (((status[15] >> 4) & 0x0F) != current_limit)
- printk(KERN_WARNING "%s: Problem setting current limit!\n",
+ pr_warning("%s: Problem setting current limit!\n",
mmc_hostname(card->host));
return 0;
@@ -622,7 +623,7 @@ static int mmc_sd_init_uhs_card(struct mmc_card *card)
status = kmalloc(64, GFP_KERNEL);
if (!status) {
- printk(KERN_ERR "%s: could not allocate a buffer for "
+ pr_err("%s: could not allocate a buffer for "
"switch capabilities.\n", mmc_hostname(card->host));
return -ENOMEM;
}
@@ -852,7 +853,7 @@ int mmc_sd_setup_card(struct mmc_host *host, struct mmc_card *card,
ro = host->ops->get_ro(host);
if (ro < 0) {
- printk(KERN_WARNING "%s: host does not "
+ pr_warning("%s: host does not "
"support reading read-only "
"switch. assuming write-enable.\n",
mmc_hostname(host));
@@ -929,8 +930,6 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr,
err = mmc_send_relative_addr(host, &card->rca);
if (err)
return err;
-
- mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL);
}
if (!oldcard) {
@@ -1043,6 +1042,7 @@ static void mmc_sd_detect(struct mmc_host *host)
mmc_claim_host(host);
mmc_detach_bus(host);
+ mmc_power_off(host);
mmc_release_host(host);
}
}
@@ -1167,7 +1167,7 @@ int mmc_attach_sd(struct mmc_host *host)
* support.
*/
if (ocr & 0x7F) {
- printk(KERN_WARNING "%s: card claims to support voltages "
+ pr_warning("%s: card claims to support voltages "
"below the defined range. These will be ignored.\n",
mmc_hostname(host));
ocr &= ~0x7F;
@@ -1175,7 +1175,7 @@ int mmc_attach_sd(struct mmc_host *host)
if ((ocr & MMC_VDD_165_195) &&
!(host->ocr_avail_sd & MMC_VDD_165_195)) {
- printk(KERN_WARNING "%s: SD card claims to support the "
+ pr_warning("%s: SD card claims to support the "
"incompletely defined 'low voltage range'. This "
"will be ignored.\n", mmc_hostname(host));
ocr &= ~MMC_VDD_165_195;
@@ -1214,7 +1214,7 @@ remove_card:
err:
mmc_detach_bus(host);
- printk(KERN_ERR "%s: error %d whilst initialising SD card\n",
+ pr_err("%s: error %d whilst initialising SD card\n",
mmc_hostname(host), err);
return err;
diff --git a/drivers/mmc/core/sd_ops.c b/drivers/mmc/core/sd_ops.c
index 021fed15380..274ef00b446 100644
--- a/drivers/mmc/core/sd_ops.c
+++ b/drivers/mmc/core/sd_ops.c
@@ -11,6 +11,7 @@
#include <linux/slab.h>
#include <linux/types.h>
+#include <linux/export.h>
#include <linux/scatterlist.h>
#include <linux/mmc/host.h>
@@ -67,7 +68,7 @@ EXPORT_SYMBOL_GPL(mmc_app_cmd);
int mmc_wait_for_app_cmd(struct mmc_host *host, struct mmc_card *card,
struct mmc_command *cmd, int retries)
{
- struct mmc_request mrq = {0};
+ struct mmc_request mrq = {NULL};
int i, err;
@@ -244,7 +245,7 @@ int mmc_send_relative_addr(struct mmc_host *host, unsigned int *rca)
int mmc_app_send_scr(struct mmc_card *card, u32 *scr)
{
int err;
- struct mmc_request mrq = {0};
+ struct mmc_request mrq = {NULL};
struct mmc_command cmd = {0};
struct mmc_data data = {0};
struct scatterlist sg;
@@ -303,7 +304,7 @@ int mmc_app_send_scr(struct mmc_card *card, u32 *scr)
int mmc_sd_switch(struct mmc_card *card, int mode, int group,
u8 value, u8 *resp)
{
- struct mmc_request mrq = {0};
+ struct mmc_request mrq = {NULL};
struct mmc_command cmd = {0};
struct mmc_data data = {0};
struct scatterlist sg;
@@ -348,7 +349,7 @@ int mmc_sd_switch(struct mmc_card *card, int mode, int group,
int mmc_app_sd_status(struct mmc_card *card, void *ssr)
{
int err;
- struct mmc_request mrq = {0};
+ struct mmc_request mrq = {NULL};
struct mmc_command cmd = {0};
struct mmc_data data = {0};
struct scatterlist sg;
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index 262fff01917..3ab565e32a6 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -111,8 +111,8 @@ static int sdio_read_cccr(struct mmc_card *card)
cccr_vsn = data & 0x0f;
- if (cccr_vsn > SDIO_CCCR_REV_1_20) {
- printk(KERN_ERR "%s: unrecognised CCCR structure version %d\n",
+ if (cccr_vsn > SDIO_CCCR_REV_3_00) {
+ pr_err("%s: unrecognised CCCR structure version %d\n",
mmc_hostname(card->host), cccr_vsn);
return -EINVAL;
}
@@ -408,8 +408,6 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
*/
if (oldcard)
oldcard->rca = card->rca;
-
- mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL);
}
/*
@@ -597,6 +595,7 @@ out:
mmc_claim_host(host);
mmc_detach_bus(host);
+ mmc_power_off(host);
mmc_release_host(host);
}
}
@@ -778,7 +777,7 @@ int mmc_attach_sdio(struct mmc_host *host)
* support.
*/
if (ocr & 0x7F) {
- printk(KERN_WARNING "%s: card claims to support voltages "
+ pr_warning("%s: card claims to support voltages "
"below the defined range. These will be ignored.\n",
mmc_hostname(host));
ocr &= ~0x7F;
@@ -875,7 +874,7 @@ remove:
err:
mmc_detach_bus(host);
- printk(KERN_ERR "%s: error %d whilst initialising SDIO card\n",
+ pr_err("%s: error %d whilst initialising SDIO card\n",
mmc_hostname(host), err);
return err;
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
index e4e6822d09e..40989e6bb53 100644
--- a/drivers/mmc/core/sdio_bus.c
+++ b/drivers/mmc/core/sdio_bus.c
@@ -13,6 +13,7 @@
#include <linux/device.h>
#include <linux/err.h>
+#include <linux/export.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
@@ -173,7 +174,7 @@ static int sdio_bus_remove(struct device *dev)
drv->remove(func);
if (func->irq_handler) {
- printk(KERN_WARNING "WARNING: driver %s did not remove "
+ pr_warning("WARNING: driver %s did not remove "
"its interrupt handler!\n", drv->name);
sdio_claim_host(func);
sdio_release_irq(func);
diff --git a/drivers/mmc/core/sdio_cis.c b/drivers/mmc/core/sdio_cis.c
index 541bdb89e0c..f1c7ed8f4d8 100644
--- a/drivers/mmc/core/sdio_cis.c
+++ b/drivers/mmc/core/sdio_cis.c
@@ -132,7 +132,7 @@ static int cis_tpl_parse(struct mmc_card *card, struct sdio_func *func,
ret = -EINVAL;
}
if (ret && ret != -EILSEQ && ret != -ENOENT) {
- printk(KERN_ERR "%s: bad %s tuple 0x%02x (%u bytes)\n",
+ pr_err("%s: bad %s tuple 0x%02x (%u bytes)\n",
mmc_hostname(card->host), tpl_descr, code, size);
}
} else {
@@ -313,7 +313,7 @@ static int sdio_read_cis(struct mmc_card *card, struct sdio_func *func)
if (ret == -ENOENT) {
/* warn about unknown tuples */
- printk(KERN_WARNING "%s: queuing unknown"
+ pr_warning("%s: queuing unknown"
" CIS tuple 0x%02x (%u bytes)\n",
mmc_hostname(card->host),
tpl_code, tpl_link);
diff --git a/drivers/mmc/core/sdio_io.c b/drivers/mmc/core/sdio_io.c
index 0f687cdeb06..b1f3168f791 100644
--- a/drivers/mmc/core/sdio_io.c
+++ b/drivers/mmc/core/sdio_io.c
@@ -9,6 +9,7 @@
* your option) any later version.
*/
+#include <linux/export.h>
#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
#include <linux/mmc/sdio.h>
diff --git a/drivers/mmc/core/sdio_irq.c b/drivers/mmc/core/sdio_irq.c
index 03ead028d2c..68f81b9ee0f 100644
--- a/drivers/mmc/core/sdio_irq.c
+++ b/drivers/mmc/core/sdio_irq.c
@@ -16,6 +16,7 @@
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/kthread.h>
+#include <linux/export.h>
#include <linux/wait.h>
#include <linux/delay.h>
@@ -45,7 +46,7 @@ static int process_sdio_pending_irqs(struct mmc_card *card)
ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_INTx, 0, &pending);
if (ret) {
- printk(KERN_DEBUG "%s: error %d reading SDIO_CCCR_INTx\n",
+ pr_debug("%s: error %d reading SDIO_CCCR_INTx\n",
mmc_card_id(card), ret);
return ret;
}
@@ -55,7 +56,7 @@ static int process_sdio_pending_irqs(struct mmc_card *card)
if (pending & (1 << i)) {
func = card->sdio_func[i - 1];
if (!func) {
- printk(KERN_WARNING "%s: pending IRQ for "
+ pr_warning("%s: pending IRQ for "
"non-existent function\n",
mmc_card_id(card));
ret = -EINVAL;
@@ -63,7 +64,7 @@ static int process_sdio_pending_irqs(struct mmc_card *card)
func->irq_handler(func);
count++;
} else {
- printk(KERN_WARNING "%s: pending IRQ with no handler\n",
+ pr_warning("%s: pending IRQ with no handler\n",
sdio_func_id(func));
ret = -EINVAL;
}
diff --git a/drivers/mmc/core/sdio_ops.c b/drivers/mmc/core/sdio_ops.c
index f087d876c57..b0517cc0620 100644
--- a/drivers/mmc/core/sdio_ops.c
+++ b/drivers/mmc/core/sdio_ops.c
@@ -121,7 +121,7 @@ int mmc_io_rw_direct(struct mmc_card *card, int write, unsigned fn,
int mmc_io_rw_extended(struct mmc_card *card, int write, unsigned fn,
unsigned addr, int incr_addr, u8 *buf, unsigned blocks, unsigned blksz)
{
- struct mmc_request mrq = {0};
+ struct mmc_request mrq = {NULL};
struct mmc_command cmd = {0};
struct mmc_data data = {0};
struct scatterlist sg;
@@ -144,8 +144,11 @@ int mmc_io_rw_extended(struct mmc_card *card, int write, unsigned fn,
cmd.arg |= fn << 28;
cmd.arg |= incr_addr ? 0x04000000 : 0x00000000;
cmd.arg |= addr << 9;
- if (blocks == 1 && blksz <= 512)
- cmd.arg |= (blksz == 512) ? 0 : blksz; /* byte mode */
+ if (blocks == 1 && blksz < 512)
+ cmd.arg |= blksz; /* byte mode */
+ else if (blocks == 1 && blksz == 512 &&
+ !(mmc_card_broken_byte_mode_512(card)))
+ cmd.arg |= 0; /* byte mode, 0==512 */
else
cmd.arg |= 0x08000000 | blocks; /* block mode */
cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 8c87096531e..cf444b0ca2c 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -130,13 +130,13 @@ config MMC_SDHCI_CNS3XXX
If unsure, say N.
config MMC_SDHCI_ESDHC_IMX
- tristate "SDHCI platform support for the Freescale eSDHC i.MX controller"
- depends on ARCH_MX25 || ARCH_MX35 || ARCH_MX5
+ tristate "SDHCI support for the Freescale eSDHC/uSDHC i.MX controller"
+ depends on ARCH_MXC
depends on MMC_SDHCI_PLTFM
select MMC_SDHCI_IO_ACCESSORS
help
- This selects the Freescale eSDHC controller support on the platform
- bus, found on platforms like mx35/51.
+ This selects the Freescale eSDHC/uSDHC controller support
+ found on i.MX25, i.MX35 i.MX5x and i.MX6x.
If you have a controller with this interface, say Y or M here.
@@ -263,7 +263,7 @@ config MMC_WBSD
config MMC_AU1X
tristate "Alchemy AU1XX0 MMC Card Interface support"
- depends on SOC_AU1200
+ depends on MIPS_ALCHEMY
help
This selects the AMD Alchemy(R) Multimedia card interface.
If you have a Alchemy platform with a MMC slot, say Y or M here.
@@ -326,11 +326,11 @@ config MMC_MSM
support for SDIO devices.
config MMC_MXC
- tristate "Freescale i.MX2/3 Multimedia Card Interface support"
- depends on MACH_MX21 || MACH_MX27 || ARCH_MX31
+ tristate "Freescale i.MX21/27/31 Multimedia Card Interface support"
+ depends on ARCH_MXC
help
- This selects the Freescale i.MX2/3 Multimedia card Interface.
- If you have a i.MX platform with a Multimedia Card slot,
+ This selects the Freescale i.MX21, i.MX27 and i.MX31 Multimedia card
+ Interface. If you have a i.MX platform with a Multimedia Card slot,
say Y or M here.
If unsure, say N.
diff --git a/drivers/mmc/host/at91_mci.c b/drivers/mmc/host/at91_mci.c
index a4aa3af86fe..a8b4d2aa18e 100644
--- a/drivers/mmc/host/at91_mci.c
+++ b/drivers/mmc/host/at91_mci.c
@@ -869,7 +869,11 @@ static irqreturn_t at91_mci_irq(int irq, void *devid)
static irqreturn_t at91_mmc_det_irq(int irq, void *_host)
{
struct at91mci_host *host = _host;
- int present = !gpio_get_value(irq_to_gpio(irq));
+ int present;
+
+ /* entering this ISR means that we have configured det_pin:
+ * we can use its value in board structure */
+ present = !gpio_get_value(host->board->det_pin);
/*
* we expect this irq on both insert and remove,
diff --git a/drivers/mmc/host/atmel-mci-regs.h b/drivers/mmc/host/atmel-mci-regs.h
index fc8a0fe7c5c..000b3ad0f5c 100644
--- a/drivers/mmc/host/atmel-mci-regs.h
+++ b/drivers/mmc/host/atmel-mci-regs.h
@@ -17,112 +17,126 @@
#define __DRIVERS_MMC_ATMEL_MCI_H__
/* MCI Register Definitions */
-#define MCI_CR 0x0000 /* Control */
-# define MCI_CR_MCIEN ( 1 << 0) /* MCI Enable */
-# define MCI_CR_MCIDIS ( 1 << 1) /* MCI Disable */
-# define MCI_CR_PWSEN ( 1 << 2) /* Power Save Enable */
-# define MCI_CR_PWSDIS ( 1 << 3) /* Power Save Disable */
-# define MCI_CR_SWRST ( 1 << 7) /* Software Reset */
-#define MCI_MR 0x0004 /* Mode */
-# define MCI_MR_CLKDIV(x) ((x) << 0) /* Clock Divider */
-# define MCI_MR_PWSDIV(x) ((x) << 8) /* Power Saving Divider */
-# define MCI_MR_RDPROOF ( 1 << 11) /* Read Proof */
-# define MCI_MR_WRPROOF ( 1 << 12) /* Write Proof */
-# define MCI_MR_PDCFBYTE ( 1 << 13) /* Force Byte Transfer */
-# define MCI_MR_PDCPADV ( 1 << 14) /* Padding Value */
-# define MCI_MR_PDCMODE ( 1 << 15) /* PDC-oriented Mode */
-#define MCI_DTOR 0x0008 /* Data Timeout */
-# define MCI_DTOCYC(x) ((x) << 0) /* Data Timeout Cycles */
-# define MCI_DTOMUL(x) ((x) << 4) /* Data Timeout Multiplier */
-#define MCI_SDCR 0x000c /* SD Card / SDIO */
-# define MCI_SDCSEL_SLOT_A ( 0 << 0) /* Select SD slot A */
-# define MCI_SDCSEL_SLOT_B ( 1 << 0) /* Select SD slot A */
-# define MCI_SDCSEL_MASK ( 3 << 0)
-# define MCI_SDCBUS_1BIT ( 0 << 6) /* 1-bit data bus */
-# define MCI_SDCBUS_4BIT ( 2 << 6) /* 4-bit data bus */
-# define MCI_SDCBUS_8BIT ( 3 << 6) /* 8-bit data bus[2] */
-# define MCI_SDCBUS_MASK ( 3 << 6)
-#define MCI_ARGR 0x0010 /* Command Argument */
-#define MCI_CMDR 0x0014 /* Command */
-# define MCI_CMDR_CMDNB(x) ((x) << 0) /* Command Opcode */
-# define MCI_CMDR_RSPTYP_NONE ( 0 << 6) /* No response */
-# define MCI_CMDR_RSPTYP_48BIT ( 1 << 6) /* 48-bit response */
-# define MCI_CMDR_RSPTYP_136BIT ( 2 << 6) /* 136-bit response */
-# define MCI_CMDR_SPCMD_INIT ( 1 << 8) /* Initialization command */
-# define MCI_CMDR_SPCMD_SYNC ( 2 << 8) /* Synchronized command */
-# define MCI_CMDR_SPCMD_INT ( 4 << 8) /* Interrupt command */
-# define MCI_CMDR_SPCMD_INTRESP ( 5 << 8) /* Interrupt response */
-# define MCI_CMDR_OPDCMD ( 1 << 11) /* Open Drain */
-# define MCI_CMDR_MAXLAT_5CYC ( 0 << 12) /* Max latency 5 cycles */
-# define MCI_CMDR_MAXLAT_64CYC ( 1 << 12) /* Max latency 64 cycles */
-# define MCI_CMDR_START_XFER ( 1 << 16) /* Start data transfer */
-# define MCI_CMDR_STOP_XFER ( 2 << 16) /* Stop data transfer */
-# define MCI_CMDR_TRDIR_WRITE ( 0 << 18) /* Write data */
-# define MCI_CMDR_TRDIR_READ ( 1 << 18) /* Read data */
-# define MCI_CMDR_BLOCK ( 0 << 19) /* Single-block transfer */
-# define MCI_CMDR_MULTI_BLOCK ( 1 << 19) /* Multi-block transfer */
-# define MCI_CMDR_STREAM ( 2 << 19) /* MMC Stream transfer */
-# define MCI_CMDR_SDIO_BYTE ( 4 << 19) /* SDIO Byte transfer */
-# define MCI_CMDR_SDIO_BLOCK ( 5 << 19) /* SDIO Block transfer */
-# define MCI_CMDR_SDIO_SUSPEND ( 1 << 24) /* SDIO Suspend Command */
-# define MCI_CMDR_SDIO_RESUME ( 2 << 24) /* SDIO Resume Command */
-#define MCI_BLKR 0x0018 /* Block */
-# define MCI_BCNT(x) ((x) << 0) /* Data Block Count */
-# define MCI_BLKLEN(x) ((x) << 16) /* Data Block Length */
-#define MCI_CSTOR 0x001c /* Completion Signal Timeout[2] */
-# define MCI_CSTOCYC(x) ((x) << 0) /* CST cycles */
-# define MCI_CSTOMUL(x) ((x) << 4) /* CST multiplier */
-#define MCI_RSPR 0x0020 /* Response 0 */
-#define MCI_RSPR1 0x0024 /* Response 1 */
-#define MCI_RSPR2 0x0028 /* Response 2 */
-#define MCI_RSPR3 0x002c /* Response 3 */
-#define MCI_RDR 0x0030 /* Receive Data */
-#define MCI_TDR 0x0034 /* Transmit Data */
-#define MCI_SR 0x0040 /* Status */
-#define MCI_IER 0x0044 /* Interrupt Enable */
-#define MCI_IDR 0x0048 /* Interrupt Disable */
-#define MCI_IMR 0x004c /* Interrupt Mask */
-# define MCI_CMDRDY ( 1 << 0) /* Command Ready */
-# define MCI_RXRDY ( 1 << 1) /* Receiver Ready */
-# define MCI_TXRDY ( 1 << 2) /* Transmitter Ready */
-# define MCI_BLKE ( 1 << 3) /* Data Block Ended */
-# define MCI_DTIP ( 1 << 4) /* Data Transfer In Progress */
-# define MCI_NOTBUSY ( 1 << 5) /* Data Not Busy */
-# define MCI_SDIOIRQA ( 1 << 8) /* SDIO IRQ in slot A */
-# define MCI_SDIOIRQB ( 1 << 9) /* SDIO IRQ in slot B */
-# define MCI_RINDE ( 1 << 16) /* Response Index Error */
-# define MCI_RDIRE ( 1 << 17) /* Response Direction Error */
-# define MCI_RCRCE ( 1 << 18) /* Response CRC Error */
-# define MCI_RENDE ( 1 << 19) /* Response End Bit Error */
-# define MCI_RTOE ( 1 << 20) /* Response Time-Out Error */
-# define MCI_DCRCE ( 1 << 21) /* Data CRC Error */
-# define MCI_DTOE ( 1 << 22) /* Data Time-Out Error */
-# define MCI_OVRE ( 1 << 30) /* RX Overrun Error */
-# define MCI_UNRE ( 1 << 31) /* TX Underrun Error */
-#define MCI_DMA 0x0050 /* DMA Configuration[2] */
-# define MCI_DMA_OFFSET(x) ((x) << 0) /* DMA Write Buffer Offset */
-# define MCI_DMA_CHKSIZE(x) ((x) << 4) /* DMA Channel Read and Write Chunk Size */
-# define MCI_DMAEN ( 1 << 8) /* DMA Hardware Handshaking Enable */
-#define MCI_CFG 0x0054 /* Configuration[2] */
-# define MCI_CFG_FIFOMODE_1DATA ( 1 << 0) /* MCI Internal FIFO control mode */
-# define MCI_CFG_FERRCTRL_COR ( 1 << 4) /* Flow Error flag reset control mode */
-# define MCI_CFG_HSMODE ( 1 << 8) /* High Speed Mode */
-# define MCI_CFG_LSYNC ( 1 << 12) /* Synchronize on the last block */
-#define MCI_WPMR 0x00e4 /* Write Protection Mode[2] */
-# define MCI_WP_EN ( 1 << 0) /* WP Enable */
-# define MCI_WP_KEY (0x4d4349 << 8) /* WP Key */
-#define MCI_WPSR 0x00e8 /* Write Protection Status[2] */
-# define MCI_GET_WP_VS(x) ((x) & 0x0f)
-# define MCI_GET_WP_VSRC(x) (((x) >> 8) & 0xffff)
-#define MCI_FIFO_APERTURE 0x0200 /* FIFO Aperture[2] */
+#define ATMCI_CR 0x0000 /* Control */
+# define ATMCI_CR_MCIEN ( 1 << 0) /* MCI Enable */
+# define ATMCI_CR_MCIDIS ( 1 << 1) /* MCI Disable */
+# define ATMCI_CR_PWSEN ( 1 << 2) /* Power Save Enable */
+# define ATMCI_CR_PWSDIS ( 1 << 3) /* Power Save Disable */
+# define ATMCI_CR_SWRST ( 1 << 7) /* Software Reset */
+#define ATMCI_MR 0x0004 /* Mode */
+# define ATMCI_MR_CLKDIV(x) ((x) << 0) /* Clock Divider */
+# define ATMCI_MR_PWSDIV(x) ((x) << 8) /* Power Saving Divider */
+# define ATMCI_MR_RDPROOF ( 1 << 11) /* Read Proof */
+# define ATMCI_MR_WRPROOF ( 1 << 12) /* Write Proof */
+# define ATMCI_MR_PDCFBYTE ( 1 << 13) /* Force Byte Transfer */
+# define ATMCI_MR_PDCPADV ( 1 << 14) /* Padding Value */
+# define ATMCI_MR_PDCMODE ( 1 << 15) /* PDC-oriented Mode */
+#define ATMCI_DTOR 0x0008 /* Data Timeout */
+# define ATMCI_DTOCYC(x) ((x) << 0) /* Data Timeout Cycles */
+# define ATMCI_DTOMUL(x) ((x) << 4) /* Data Timeout Multiplier */
+#define ATMCI_SDCR 0x000c /* SD Card / SDIO */
+# define ATMCI_SDCSEL_SLOT_A ( 0 << 0) /* Select SD slot A */
+# define ATMCI_SDCSEL_SLOT_B ( 1 << 0) /* Select SD slot A */
+# define ATMCI_SDCSEL_MASK ( 3 << 0)
+# define ATMCI_SDCBUS_1BIT ( 0 << 6) /* 1-bit data bus */
+# define ATMCI_SDCBUS_4BIT ( 2 << 6) /* 4-bit data bus */
+# define ATMCI_SDCBUS_8BIT ( 3 << 6) /* 8-bit data bus[2] */
+# define ATMCI_SDCBUS_MASK ( 3 << 6)
+#define ATMCI_ARGR 0x0010 /* Command Argument */
+#define ATMCI_CMDR 0x0014 /* Command */
+# define ATMCI_CMDR_CMDNB(x) ((x) << 0) /* Command Opcode */
+# define ATMCI_CMDR_RSPTYP_NONE ( 0 << 6) /* No response */
+# define ATMCI_CMDR_RSPTYP_48BIT ( 1 << 6) /* 48-bit response */
+# define ATMCI_CMDR_RSPTYP_136BIT ( 2 << 6) /* 136-bit response */
+# define ATMCI_CMDR_SPCMD_INIT ( 1 << 8) /* Initialization command */
+# define ATMCI_CMDR_SPCMD_SYNC ( 2 << 8) /* Synchronized command */
+# define ATMCI_CMDR_SPCMD_INT ( 4 << 8) /* Interrupt command */
+# define ATMCI_CMDR_SPCMD_INTRESP ( 5 << 8) /* Interrupt response */
+# define ATMCI_CMDR_OPDCMD ( 1 << 11) /* Open Drain */
+# define ATMCI_CMDR_MAXLAT_5CYC ( 0 << 12) /* Max latency 5 cycles */
+# define ATMCI_CMDR_MAXLAT_64CYC ( 1 << 12) /* Max latency 64 cycles */
+# define ATMCI_CMDR_START_XFER ( 1 << 16) /* Start data transfer */
+# define ATMCI_CMDR_STOP_XFER ( 2 << 16) /* Stop data transfer */
+# define ATMCI_CMDR_TRDIR_WRITE ( 0 << 18) /* Write data */
+# define ATMCI_CMDR_TRDIR_READ ( 1 << 18) /* Read data */
+# define ATMCI_CMDR_BLOCK ( 0 << 19) /* Single-block transfer */
+# define ATMCI_CMDR_MULTI_BLOCK ( 1 << 19) /* Multi-block transfer */
+# define ATMCI_CMDR_STREAM ( 2 << 19) /* MMC Stream transfer */
+# define ATMCI_CMDR_SDIO_BYTE ( 4 << 19) /* SDIO Byte transfer */
+# define ATMCI_CMDR_SDIO_BLOCK ( 5 << 19) /* SDIO Block transfer */
+# define ATMCI_CMDR_SDIO_SUSPEND ( 1 << 24) /* SDIO Suspend Command */
+# define ATMCI_CMDR_SDIO_RESUME ( 2 << 24) /* SDIO Resume Command */
+#define ATMCI_BLKR 0x0018 /* Block */
+# define ATMCI_BCNT(x) ((x) << 0) /* Data Block Count */
+# define ATMCI_BLKLEN(x) ((x) << 16) /* Data Block Length */
+#define ATMCI_CSTOR 0x001c /* Completion Signal Timeout[2] */
+# define ATMCI_CSTOCYC(x) ((x) << 0) /* CST cycles */
+# define ATMCI_CSTOMUL(x) ((x) << 4) /* CST multiplier */
+#define ATMCI_RSPR 0x0020 /* Response 0 */
+#define ATMCI_RSPR1 0x0024 /* Response 1 */
+#define ATMCI_RSPR2 0x0028 /* Response 2 */
+#define ATMCI_RSPR3 0x002c /* Response 3 */
+#define ATMCI_RDR 0x0030 /* Receive Data */
+#define ATMCI_TDR 0x0034 /* Transmit Data */
+#define ATMCI_SR 0x0040 /* Status */
+#define ATMCI_IER 0x0044 /* Interrupt Enable */
+#define ATMCI_IDR 0x0048 /* Interrupt Disable */
+#define ATMCI_IMR 0x004c /* Interrupt Mask */
+# define ATMCI_CMDRDY ( 1 << 0) /* Command Ready */
+# define ATMCI_RXRDY ( 1 << 1) /* Receiver Ready */
+# define ATMCI_TXRDY ( 1 << 2) /* Transmitter Ready */
+# define ATMCI_BLKE ( 1 << 3) /* Data Block Ended */
+# define ATMCI_DTIP ( 1 << 4) /* Data Transfer In Progress */
+# define ATMCI_NOTBUSY ( 1 << 5) /* Data Not Busy */
+# define ATMCI_ENDRX ( 1 << 6) /* End of RX Buffer */
+# define ATMCI_ENDTX ( 1 << 7) /* End of TX Buffer */
+# define ATMCI_SDIOIRQA ( 1 << 8) /* SDIO IRQ in slot A */
+# define ATMCI_SDIOIRQB ( 1 << 9) /* SDIO IRQ in slot B */
+# define ATMCI_SDIOWAIT ( 1 << 12) /* SDIO Read Wait Operation Status */
+# define ATMCI_CSRCV ( 1 << 13) /* CE-ATA Completion Signal Received */
+# define ATMCI_RXBUFF ( 1 << 14) /* RX Buffer Full */
+# define ATMCI_TXBUFE ( 1 << 15) /* TX Buffer Empty */
+# define ATMCI_RINDE ( 1 << 16) /* Response Index Error */
+# define ATMCI_RDIRE ( 1 << 17) /* Response Direction Error */
+# define ATMCI_RCRCE ( 1 << 18) /* Response CRC Error */
+# define ATMCI_RENDE ( 1 << 19) /* Response End Bit Error */
+# define ATMCI_RTOE ( 1 << 20) /* Response Time-Out Error */
+# define ATMCI_DCRCE ( 1 << 21) /* Data CRC Error */
+# define ATMCI_DTOE ( 1 << 22) /* Data Time-Out Error */
+# define ATMCI_CSTOE ( 1 << 23) /* Completion Signal Time-out Error */
+# define ATMCI_BLKOVRE ( 1 << 24) /* DMA Block Overrun Error */
+# define ATMCI_DMADONE ( 1 << 25) /* DMA Transfer Done */
+# define ATMCI_FIFOEMPTY ( 1 << 26) /* FIFO Empty Flag */
+# define ATMCI_XFRDONE ( 1 << 27) /* Transfer Done Flag */
+# define ATMCI_ACKRCV ( 1 << 28) /* Boot Operation Acknowledge Received */
+# define ATMCI_ACKRCVE ( 1 << 29) /* Boot Operation Acknowledge Error */
+# define ATMCI_OVRE ( 1 << 30) /* RX Overrun Error */
+# define ATMCI_UNRE ( 1 << 31) /* TX Underrun Error */
+#define ATMCI_DMA 0x0050 /* DMA Configuration[2] */
+# define ATMCI_DMA_OFFSET(x) ((x) << 0) /* DMA Write Buffer Offset */
+# define ATMCI_DMA_CHKSIZE(x) ((x) << 4) /* DMA Channel Read and Write Chunk Size */
+# define ATMCI_DMAEN ( 1 << 8) /* DMA Hardware Handshaking Enable */
+#define ATMCI_CFG 0x0054 /* Configuration[2] */
+# define ATMCI_CFG_FIFOMODE_1DATA ( 1 << 0) /* MCI Internal FIFO control mode */
+# define ATMCI_CFG_FERRCTRL_COR ( 1 << 4) /* Flow Error flag reset control mode */
+# define ATMCI_CFG_HSMODE ( 1 << 8) /* High Speed Mode */
+# define ATMCI_CFG_LSYNC ( 1 << 12) /* Synchronize on the last block */
+#define ATMCI_WPMR 0x00e4 /* Write Protection Mode[2] */
+# define ATMCI_WP_EN ( 1 << 0) /* WP Enable */
+# define ATMCI_WP_KEY (0x4d4349 << 8) /* WP Key */
+#define ATMCI_WPSR 0x00e8 /* Write Protection Status[2] */
+# define ATMCI_GET_WP_VS(x) ((x) & 0x0f)
+# define ATMCI_GET_WP_VSRC(x) (((x) >> 8) & 0xffff)
+#define ATMCI_VERSION 0x00FC /* Version */
+#define ATMCI_FIFO_APERTURE 0x0200 /* FIFO Aperture[2] */
/* This is not including the FIFO Aperture on MCI2 */
-#define MCI_REGS_SIZE 0x100
+#define ATMCI_REGS_SIZE 0x100
/* Register access macros */
-#define mci_readl(port,reg) \
- __raw_readl((port)->regs + MCI_##reg)
-#define mci_writel(port,reg,value) \
- __raw_writel((value), (port)->regs + MCI_##reg)
+#define atmci_readl(port,reg) \
+ __raw_readl((port)->regs + reg)
+#define atmci_writel(port,reg,value) \
+ __raw_writel((value), (port)->regs + reg)
#endif /* __DRIVERS_MMC_ATMEL_MCI_H__ */
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index fa8cae1d700..a7ee5027146 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -30,6 +30,7 @@
#include <mach/atmel-mci.h>
#include <linux/atmel-mci.h>
+#include <linux/atmel_pdc.h>
#include <asm/io.h>
#include <asm/unaligned.h>
@@ -39,7 +40,7 @@
#include "atmel-mci-regs.h"
-#define ATMCI_DATA_ERROR_FLAGS (MCI_DCRCE | MCI_DTOE | MCI_OVRE | MCI_UNRE)
+#define ATMCI_DATA_ERROR_FLAGS (ATMCI_DCRCE | ATMCI_DTOE | ATMCI_OVRE | ATMCI_UNRE)
#define ATMCI_DMA_THRESHOLD 16
enum {
@@ -58,18 +59,35 @@ enum atmel_mci_state {
STATE_DATA_ERROR,
};
+enum atmci_xfer_dir {
+ XFER_RECEIVE = 0,
+ XFER_TRANSMIT,
+};
+
+enum atmci_pdc_buf {
+ PDC_FIRST_BUF = 0,
+ PDC_SECOND_BUF,
+};
+
+struct atmel_mci_caps {
+ bool has_dma;
+ bool has_pdc;
+ bool has_cfg_reg;
+ bool has_cstor_reg;
+ bool has_highspeed;
+ bool has_rwproof;
+};
+
struct atmel_mci_dma {
-#ifdef CONFIG_MMC_ATMELMCI_DMA
struct dma_chan *chan;
struct dma_async_tx_descriptor *data_desc;
-#endif
};
/**
* struct atmel_mci - MMC controller state shared between all slots
* @lock: Spinlock protecting the queue and associated data.
* @regs: Pointer to MMIO registers.
- * @sg: Scatterlist entry currently being processed by PIO code, if any.
+ * @sg: Scatterlist entry currently being processed by PIO or PDC code.
* @pio_offset: Offset into the current scatterlist entry.
* @cur_slot: The slot which is currently using the controller.
* @mrq: The request currently being processed on @cur_slot,
@@ -77,6 +95,7 @@ struct atmel_mci_dma {
* @cmd: The command currently being sent to the card, or NULL.
* @data: The data currently being transferred, or NULL if no data
* transfer is in progress.
+ * @data_size: just data->blocks * data->blksz.
* @dma: DMA client state.
* @data_chan: DMA channel being used for the current data transfer.
* @cmd_status: Snapshot of SR taken upon completion of the current
@@ -103,6 +122,13 @@ struct atmel_mci_dma {
* @mck: The peripheral bus clock hooked up to the MMC controller.
* @pdev: Platform device associated with the MMC controller.
* @slot: Slots sharing this MMC controller.
+ * @caps: MCI capabilities depending on MCI version.
+ * @prepare_data: function to setup MCI before data transfer which
+ * depends on MCI capabilities.
+ * @submit_data: function to start data transfer which depends on MCI
+ * capabilities.
+ * @stop_transfer: function to stop data transfer which depends on MCI
+ * capabilities.
*
* Locking
* =======
@@ -143,6 +169,7 @@ struct atmel_mci {
struct mmc_request *mrq;
struct mmc_command *cmd;
struct mmc_data *data;
+ unsigned int data_size;
struct atmel_mci_dma dma;
struct dma_chan *data_chan;
@@ -166,7 +193,13 @@ struct atmel_mci {
struct clk *mck;
struct platform_device *pdev;
- struct atmel_mci_slot *slot[ATMEL_MCI_MAX_NR_SLOTS];
+ struct atmel_mci_slot *slot[ATMCI_MAX_NR_SLOTS];
+
+ struct atmel_mci_caps caps;
+
+ u32 (*prepare_data)(struct atmel_mci *host, struct mmc_data *data);
+ void (*submit_data)(struct atmel_mci *host, struct mmc_data *data);
+ void (*stop_transfer)(struct atmel_mci *host);
};
/**
@@ -220,31 +253,6 @@ struct atmel_mci_slot {
set_bit(event, &host->pending_events)
/*
- * Enable or disable features/registers based on
- * whether the processor supports them
- */
-static bool mci_has_rwproof(void)
-{
- if (cpu_is_at91sam9261() || cpu_is_at91rm9200())
- return false;
- else
- return true;
-}
-
-/*
- * The new MCI2 module isn't 100% compatible with the old MCI module,
- * and it has a few nice features which we want to use...
- */
-static inline bool atmci_is_mci2(void)
-{
- if (cpu_is_at91sam9g45())
- return true;
-
- return false;
-}
-
-
-/*
* The debugfs stuff below is mostly optimized away when
* CONFIG_DEBUG_FS is not set.
*/
@@ -352,7 +360,7 @@ static int atmci_regs_show(struct seq_file *s, void *v)
struct atmel_mci *host = s->private;
u32 *buf;
- buf = kmalloc(MCI_REGS_SIZE, GFP_KERNEL);
+ buf = kmalloc(ATMCI_REGS_SIZE, GFP_KERNEL);
if (!buf)
return -ENOMEM;
@@ -363,47 +371,50 @@ static int atmci_regs_show(struct seq_file *s, void *v)
*/
spin_lock_bh(&host->lock);
clk_enable(host->mck);
- memcpy_fromio(buf, host->regs, MCI_REGS_SIZE);
+ memcpy_fromio(buf, host->regs, ATMCI_REGS_SIZE);
clk_disable(host->mck);
spin_unlock_bh(&host->lock);
seq_printf(s, "MR:\t0x%08x%s%s CLKDIV=%u\n",
- buf[MCI_MR / 4],
- buf[MCI_MR / 4] & MCI_MR_RDPROOF ? " RDPROOF" : "",
- buf[MCI_MR / 4] & MCI_MR_WRPROOF ? " WRPROOF" : "",
- buf[MCI_MR / 4] & 0xff);
- seq_printf(s, "DTOR:\t0x%08x\n", buf[MCI_DTOR / 4]);
- seq_printf(s, "SDCR:\t0x%08x\n", buf[MCI_SDCR / 4]);
- seq_printf(s, "ARGR:\t0x%08x\n", buf[MCI_ARGR / 4]);
+ buf[ATMCI_MR / 4],
+ buf[ATMCI_MR / 4] & ATMCI_MR_RDPROOF ? " RDPROOF" : "",
+ buf[ATMCI_MR / 4] & ATMCI_MR_WRPROOF ? " WRPROOF" : "",
+ buf[ATMCI_MR / 4] & 0xff);
+ seq_printf(s, "DTOR:\t0x%08x\n", buf[ATMCI_DTOR / 4]);
+ seq_printf(s, "SDCR:\t0x%08x\n", buf[ATMCI_SDCR / 4]);
+ seq_printf(s, "ARGR:\t0x%08x\n", buf[ATMCI_ARGR / 4]);
seq_printf(s, "BLKR:\t0x%08x BCNT=%u BLKLEN=%u\n",
- buf[MCI_BLKR / 4],
- buf[MCI_BLKR / 4] & 0xffff,
- (buf[MCI_BLKR / 4] >> 16) & 0xffff);
- if (atmci_is_mci2())
- seq_printf(s, "CSTOR:\t0x%08x\n", buf[MCI_CSTOR / 4]);
+ buf[ATMCI_BLKR / 4],
+ buf[ATMCI_BLKR / 4] & 0xffff,
+ (buf[ATMCI_BLKR / 4] >> 16) & 0xffff);
+ if (host->caps.has_cstor_reg)
+ seq_printf(s, "CSTOR:\t0x%08x\n", buf[ATMCI_CSTOR / 4]);
/* Don't read RSPR and RDR; it will consume the data there */
- atmci_show_status_reg(s, "SR", buf[MCI_SR / 4]);
- atmci_show_status_reg(s, "IMR", buf[MCI_IMR / 4]);
+ atmci_show_status_reg(s, "SR", buf[ATMCI_SR / 4]);
+ atmci_show_status_reg(s, "IMR", buf[ATMCI_IMR / 4]);
- if (atmci_is_mci2()) {
+ if (host->caps.has_dma) {
u32 val;
- val = buf[MCI_DMA / 4];
+ val = buf[ATMCI_DMA / 4];
seq_printf(s, "DMA:\t0x%08x OFFSET=%u CHKSIZE=%u%s\n",
val, val & 3,
((val >> 4) & 3) ?
1 << (((val >> 4) & 3) + 1) : 1,
- val & MCI_DMAEN ? " DMAEN" : "");
+ val & ATMCI_DMAEN ? " DMAEN" : "");
+ }
+ if (host->caps.has_cfg_reg) {
+ u32 val;
- val = buf[MCI_CFG / 4];
+ val = buf[ATMCI_CFG / 4];
seq_printf(s, "CFG:\t0x%08x%s%s%s%s\n",
val,
- val & MCI_CFG_FIFOMODE_1DATA ? " FIFOMODE_ONE_DATA" : "",
- val & MCI_CFG_FERRCTRL_COR ? " FERRCTRL_CLEAR_ON_READ" : "",
- val & MCI_CFG_HSMODE ? " HSMODE" : "",
- val & MCI_CFG_LSYNC ? " LSYNC" : "");
+ val & ATMCI_CFG_FIFOMODE_1DATA ? " FIFOMODE_ONE_DATA" : "",
+ val & ATMCI_CFG_FERRCTRL_COR ? " FERRCTRL_CLEAR_ON_READ" : "",
+ val & ATMCI_CFG_HSMODE ? " HSMODE" : "",
+ val & ATMCI_CFG_LSYNC ? " LSYNC" : "");
}
kfree(buf);
@@ -466,7 +477,7 @@ err:
dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
}
-static inline unsigned int ns_to_clocks(struct atmel_mci *host,
+static inline unsigned int atmci_ns_to_clocks(struct atmel_mci *host,
unsigned int ns)
{
return (ns * (host->bus_hz / 1000000) + 999) / 1000;
@@ -482,7 +493,8 @@ static void atmci_set_timeout(struct atmel_mci *host,
unsigned dtocyc;
unsigned dtomul;
- timeout = ns_to_clocks(host, data->timeout_ns) + data->timeout_clks;
+ timeout = atmci_ns_to_clocks(host, data->timeout_ns)
+ + data->timeout_clks;
for (dtomul = 0; dtomul < 8; dtomul++) {
unsigned shift = dtomul_to_shift[dtomul];
@@ -498,7 +510,7 @@ static void atmci_set_timeout(struct atmel_mci *host,
dev_vdbg(&slot->mmc->class_dev, "setting timeout to %u cycles\n",
dtocyc << dtomul_to_shift[dtomul]);
- mci_writel(host, DTOR, (MCI_DTOMUL(dtomul) | MCI_DTOCYC(dtocyc)));
+ atmci_writel(host, ATMCI_DTOR, (ATMCI_DTOMUL(dtomul) | ATMCI_DTOCYC(dtocyc)));
}
/*
@@ -512,13 +524,13 @@ static u32 atmci_prepare_command(struct mmc_host *mmc,
cmd->error = -EINPROGRESS;
- cmdr = MCI_CMDR_CMDNB(cmd->opcode);
+ cmdr = ATMCI_CMDR_CMDNB(cmd->opcode);
if (cmd->flags & MMC_RSP_PRESENT) {
if (cmd->flags & MMC_RSP_136)
- cmdr |= MCI_CMDR_RSPTYP_136BIT;
+ cmdr |= ATMCI_CMDR_RSPTYP_136BIT;
else
- cmdr |= MCI_CMDR_RSPTYP_48BIT;
+ cmdr |= ATMCI_CMDR_RSPTYP_48BIT;
}
/*
@@ -526,34 +538,34 @@ static u32 atmci_prepare_command(struct mmc_host *mmc,
* it's too difficult to determine whether this is an ACMD or
* not. Better make it 64.
*/
- cmdr |= MCI_CMDR_MAXLAT_64CYC;
+ cmdr |= ATMCI_CMDR_MAXLAT_64CYC;
if (mmc->ios.bus_mode == MMC_BUSMODE_OPENDRAIN)
- cmdr |= MCI_CMDR_OPDCMD;
+ cmdr |= ATMCI_CMDR_OPDCMD;
data = cmd->data;
if (data) {
- cmdr |= MCI_CMDR_START_XFER;
+ cmdr |= ATMCI_CMDR_START_XFER;
if (cmd->opcode == SD_IO_RW_EXTENDED) {
- cmdr |= MCI_CMDR_SDIO_BLOCK;
+ cmdr |= ATMCI_CMDR_SDIO_BLOCK;
} else {
if (data->flags & MMC_DATA_STREAM)
- cmdr |= MCI_CMDR_STREAM;
+ cmdr |= ATMCI_CMDR_STREAM;
else if (data->blocks > 1)
- cmdr |= MCI_CMDR_MULTI_BLOCK;
+ cmdr |= ATMCI_CMDR_MULTI_BLOCK;
else
- cmdr |= MCI_CMDR_BLOCK;
+ cmdr |= ATMCI_CMDR_BLOCK;
}
if (data->flags & MMC_DATA_READ)
- cmdr |= MCI_CMDR_TRDIR_READ;
+ cmdr |= ATMCI_CMDR_TRDIR_READ;
}
return cmdr;
}
-static void atmci_start_command(struct atmel_mci *host,
+static void atmci_send_command(struct atmel_mci *host,
struct mmc_command *cmd, u32 cmd_flags)
{
WARN_ON(host->cmd);
@@ -563,43 +575,119 @@ static void atmci_start_command(struct atmel_mci *host,
"start command: ARGR=0x%08x CMDR=0x%08x\n",
cmd->arg, cmd_flags);
- mci_writel(host, ARGR, cmd->arg);
- mci_writel(host, CMDR, cmd_flags);
+ atmci_writel(host, ATMCI_ARGR, cmd->arg);
+ atmci_writel(host, ATMCI_CMDR, cmd_flags);
}
-static void send_stop_cmd(struct atmel_mci *host, struct mmc_data *data)
+static void atmci_send_stop_cmd(struct atmel_mci *host, struct mmc_data *data)
{
- atmci_start_command(host, data->stop, host->stop_cmdr);
- mci_writel(host, IER, MCI_CMDRDY);
+ atmci_send_command(host, data->stop, host->stop_cmdr);
+ atmci_writel(host, ATMCI_IER, ATMCI_CMDRDY);
}
-#ifdef CONFIG_MMC_ATMELMCI_DMA
-static void atmci_dma_cleanup(struct atmel_mci *host)
+/*
+ * Configure given PDC buffer taking care of alignement issues.
+ * Update host->data_size and host->sg.
+ */
+static void atmci_pdc_set_single_buf(struct atmel_mci *host,
+ enum atmci_xfer_dir dir, enum atmci_pdc_buf buf_nb)
+{
+ u32 pointer_reg, counter_reg;
+
+ if (dir == XFER_RECEIVE) {
+ pointer_reg = ATMEL_PDC_RPR;
+ counter_reg = ATMEL_PDC_RCR;
+ } else {
+ pointer_reg = ATMEL_PDC_TPR;
+ counter_reg = ATMEL_PDC_TCR;
+ }
+
+ if (buf_nb == PDC_SECOND_BUF) {
+ pointer_reg += ATMEL_PDC_SCND_BUF_OFF;
+ counter_reg += ATMEL_PDC_SCND_BUF_OFF;
+ }
+
+ atmci_writel(host, pointer_reg, sg_dma_address(host->sg));
+ if (host->data_size <= sg_dma_len(host->sg)) {
+ if (host->data_size & 0x3) {
+ /* If size is different from modulo 4, transfer bytes */
+ atmci_writel(host, counter_reg, host->data_size);
+ atmci_writel(host, ATMCI_MR, host->mode_reg | ATMCI_MR_PDCFBYTE);
+ } else {
+ /* Else transfer 32-bits words */
+ atmci_writel(host, counter_reg, host->data_size / 4);
+ }
+ host->data_size = 0;
+ } else {
+ /* We assume the size of a page is 32-bits aligned */
+ atmci_writel(host, counter_reg, sg_dma_len(host->sg) / 4);
+ host->data_size -= sg_dma_len(host->sg);
+ if (host->data_size)
+ host->sg = sg_next(host->sg);
+ }
+}
+
+/*
+ * Configure PDC buffer according to the data size ie configuring one or two
+ * buffers. Don't use this function if you want to configure only the second
+ * buffer. In this case, use atmci_pdc_set_single_buf.
+ */
+static void atmci_pdc_set_both_buf(struct atmel_mci *host, int dir)
{
- struct mmc_data *data = host->data;
+ atmci_pdc_set_single_buf(host, dir, PDC_FIRST_BUF);
+ if (host->data_size)
+ atmci_pdc_set_single_buf(host, dir, PDC_SECOND_BUF);
+}
+
+/*
+ * Unmap sg lists, called when transfer is finished.
+ */
+static void atmci_pdc_cleanup(struct atmel_mci *host)
+{
+ struct mmc_data *data = host->data;
if (data)
- dma_unmap_sg(host->dma.chan->device->dev,
- data->sg, data->sg_len,
- ((data->flags & MMC_DATA_WRITE)
- ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
+ dma_unmap_sg(&host->pdev->dev,
+ data->sg, data->sg_len,
+ ((data->flags & MMC_DATA_WRITE)
+ ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
}
-static void atmci_stop_dma(struct atmel_mci *host)
+/*
+ * Disable PDC transfers. Update pending flags to EVENT_XFER_COMPLETE after
+ * having received ATMCI_TXBUFE or ATMCI_RXBUFF interrupt. Enable ATMCI_NOTBUSY
+ * interrupt needed for both transfer directions.
+ */
+static void atmci_pdc_complete(struct atmel_mci *host)
{
- struct dma_chan *chan = host->data_chan;
+ atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
+ atmci_pdc_cleanup(host);
- if (chan) {
- dmaengine_terminate_all(chan);
- atmci_dma_cleanup(host);
- } else {
- /* Data transfer was stopped by the interrupt handler */
+ /*
+ * If the card was removed, data will be NULL. No point trying
+ * to send the stop command or waiting for NBUSY in this case.
+ */
+ if (host->data) {
atmci_set_pending(host, EVENT_XFER_COMPLETE);
- mci_writel(host, IER, MCI_NOTBUSY);
+ tasklet_schedule(&host->tasklet);
+ atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
}
}
-/* This function is called by the DMA driver from tasklet context. */
+static void atmci_dma_cleanup(struct atmel_mci *host)
+{
+ struct mmc_data *data = host->data;
+
+ if (data)
+ dma_unmap_sg(host->dma.chan->device->dev,
+ data->sg, data->sg_len,
+ ((data->flags & MMC_DATA_WRITE)
+ ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
+}
+
+/*
+ * This function is called by the DMA driver from tasklet context.
+ */
static void atmci_dma_complete(void *arg)
{
struct atmel_mci *host = arg;
@@ -607,9 +695,9 @@ static void atmci_dma_complete(void *arg)
dev_vdbg(&host->pdev->dev, "DMA complete\n");
- if (atmci_is_mci2())
+ if (host->caps.has_dma)
/* Disable DMA hardware handshaking on MCI */
- mci_writel(host, DMA, mci_readl(host, DMA) & ~MCI_DMAEN);
+ atmci_writel(host, ATMCI_DMA, atmci_readl(host, ATMCI_DMA) & ~ATMCI_DMAEN);
atmci_dma_cleanup(host);
@@ -641,11 +729,93 @@ static void atmci_dma_complete(void *arg)
* completion callback" rule of the dma engine
* framework.
*/
- mci_writel(host, IER, MCI_NOTBUSY);
+ atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
}
}
-static int
+/*
+ * Returns a mask of interrupt flags to be enabled after the whole
+ * request has been prepared.
+ */
+static u32 atmci_prepare_data(struct atmel_mci *host, struct mmc_data *data)
+{
+ u32 iflags;
+
+ data->error = -EINPROGRESS;
+
+ host->sg = data->sg;
+ host->data = data;
+ host->data_chan = NULL;
+
+ iflags = ATMCI_DATA_ERROR_FLAGS;
+
+ /*
+ * Errata: MMC data write operation with less than 12
+ * bytes is impossible.
+ *
+ * Errata: MCI Transmit Data Register (TDR) FIFO
+ * corruption when length is not multiple of 4.
+ */
+ if (data->blocks * data->blksz < 12
+ || (data->blocks * data->blksz) & 3)
+ host->need_reset = true;
+
+ host->pio_offset = 0;
+ if (data->flags & MMC_DATA_READ)
+ iflags |= ATMCI_RXRDY;
+ else
+ iflags |= ATMCI_TXRDY;
+
+ return iflags;
+}
+
+/*
+ * Set interrupt flags and set block length into the MCI mode register even
+ * if this value is also accessible in the MCI block register. It seems to be
+ * necessary before the High Speed MCI version. It also map sg and configure
+ * PDC registers.
+ */
+static u32
+atmci_prepare_data_pdc(struct atmel_mci *host, struct mmc_data *data)
+{
+ u32 iflags, tmp;
+ unsigned int sg_len;
+ enum dma_data_direction dir;
+
+ data->error = -EINPROGRESS;
+
+ host->data = data;
+ host->sg = data->sg;
+ iflags = ATMCI_DATA_ERROR_FLAGS;
+
+ /* Enable pdc mode */
+ atmci_writel(host, ATMCI_MR, host->mode_reg | ATMCI_MR_PDCMODE);
+
+ if (data->flags & MMC_DATA_READ) {
+ dir = DMA_FROM_DEVICE;
+ iflags |= ATMCI_ENDRX | ATMCI_RXBUFF;
+ } else {
+ dir = DMA_TO_DEVICE;
+ iflags |= ATMCI_ENDTX | ATMCI_TXBUFE;
+ }
+
+ /* Set BLKLEN */
+ tmp = atmci_readl(host, ATMCI_MR);
+ tmp &= 0x0000ffff;
+ tmp |= ATMCI_BLKLEN(data->blksz);
+ atmci_writel(host, ATMCI_MR, tmp);
+
+ /* Configure PDC */
+ host->data_size = data->blocks * data->blksz;
+ sg_len = dma_map_sg(&host->pdev->dev, data->sg, data->sg_len, dir);
+ if (host->data_size)
+ atmci_pdc_set_both_buf(host,
+ ((dir == DMA_FROM_DEVICE) ? XFER_RECEIVE : XFER_TRANSMIT));
+
+ return iflags;
+}
+
+static u32
atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
{
struct dma_chan *chan;
@@ -654,6 +824,15 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
unsigned int i;
enum dma_data_direction direction;
unsigned int sglen;
+ u32 iflags;
+
+ data->error = -EINPROGRESS;
+
+ WARN_ON(host->data);
+ host->sg = NULL;
+ host->data = data;
+
+ iflags = ATMCI_DATA_ERROR_FLAGS;
/*
* We don't do DMA on "complex" transfers, i.e. with
@@ -661,13 +840,13 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
* with all the DMA setup overhead for short transfers.
*/
if (data->blocks * data->blksz < ATMCI_DMA_THRESHOLD)
- return -EINVAL;
+ return atmci_prepare_data(host, data);
if (data->blksz & 3)
- return -EINVAL;
+ return atmci_prepare_data(host, data);
for_each_sg(data->sg, sg, data->sg_len, i) {
if (sg->offset & 3 || sg->length & 3)
- return -EINVAL;
+ return atmci_prepare_data(host, data);
}
/* If we don't have a channel, we can't do DMA */
@@ -678,8 +857,8 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
if (!chan)
return -ENODEV;
- if (atmci_is_mci2())
- mci_writel(host, DMA, MCI_DMA_CHKSIZE(3) | MCI_DMAEN);
+ if (host->caps.has_dma)
+ atmci_writel(host, ATMCI_DMA, ATMCI_DMA_CHKSIZE(3) | ATMCI_DMAEN);
if (data->flags & MMC_DATA_READ)
direction = DMA_FROM_DEVICE;
@@ -687,7 +866,7 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
direction = DMA_TO_DEVICE;
sglen = dma_map_sg(chan->device->dev, data->sg,
- data->sg_len, direction);
+ data->sg_len, direction);
desc = chan->device->device_prep_slave_sg(chan,
data->sg, sglen, direction,
@@ -699,13 +878,32 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
desc->callback = atmci_dma_complete;
desc->callback_param = host;
- return 0;
+ return iflags;
unmap_exit:
dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, direction);
return -ENOMEM;
}
-static void atmci_submit_data(struct atmel_mci *host)
+static void
+atmci_submit_data(struct atmel_mci *host, struct mmc_data *data)
+{
+ return;
+}
+
+/*
+ * Start PDC according to transfer direction.
+ */
+static void
+atmci_submit_data_pdc(struct atmel_mci *host, struct mmc_data *data)
+{
+ if (data->flags & MMC_DATA_READ)
+ atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN);
+ else
+ atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
+}
+
+static void
+atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data)
{
struct dma_chan *chan = host->data_chan;
struct dma_async_tx_descriptor *desc = host->dma.data_desc;
@@ -716,64 +914,39 @@ static void atmci_submit_data(struct atmel_mci *host)
}
}
-#else /* CONFIG_MMC_ATMELMCI_DMA */
-
-static int atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
-{
- return -ENOSYS;
-}
-
-static void atmci_submit_data(struct atmel_mci *host) {}
-
-static void atmci_stop_dma(struct atmel_mci *host)
+static void atmci_stop_transfer(struct atmel_mci *host)
{
- /* Data transfer was stopped by the interrupt handler */
atmci_set_pending(host, EVENT_XFER_COMPLETE);
- mci_writel(host, IER, MCI_NOTBUSY);
+ atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
}
-#endif /* CONFIG_MMC_ATMELMCI_DMA */
-
/*
- * Returns a mask of interrupt flags to be enabled after the whole
- * request has been prepared.
+ * Stop data transfer because error(s) occured.
*/
-static u32 atmci_prepare_data(struct atmel_mci *host, struct mmc_data *data)
+static void atmci_stop_transfer_pdc(struct atmel_mci *host)
{
- u32 iflags;
-
- data->error = -EINPROGRESS;
-
- WARN_ON(host->data);
- host->sg = NULL;
- host->data = data;
-
- iflags = ATMCI_DATA_ERROR_FLAGS;
- if (atmci_prepare_data_dma(host, data)) {
- host->data_chan = NULL;
+ atmci_set_pending(host, EVENT_XFER_COMPLETE);
+ atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
+}
- /*
- * Errata: MMC data write operation with less than 12
- * bytes is impossible.
- *
- * Errata: MCI Transmit Data Register (TDR) FIFO
- * corruption when length is not multiple of 4.
- */
- if (data->blocks * data->blksz < 12
- || (data->blocks * data->blksz) & 3)
- host->need_reset = true;
+static void atmci_stop_transfer_dma(struct atmel_mci *host)
+{
+ struct dma_chan *chan = host->data_chan;
- host->sg = data->sg;
- host->pio_offset = 0;
- if (data->flags & MMC_DATA_READ)
- iflags |= MCI_RXRDY;
- else
- iflags |= MCI_TXRDY;
+ if (chan) {
+ dmaengine_terminate_all(chan);
+ atmci_dma_cleanup(host);
+ } else {
+ /* Data transfer was stopped by the interrupt handler */
+ atmci_set_pending(host, EVENT_XFER_COMPLETE);
+ atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
}
-
- return iflags;
}
+/*
+ * Start a request: prepare data if needed, prepare the command and activate
+ * interrupts.
+ */
static void atmci_start_request(struct atmel_mci *host,
struct atmel_mci_slot *slot)
{
@@ -792,24 +965,24 @@ static void atmci_start_request(struct atmel_mci *host,
host->data_status = 0;
if (host->need_reset) {
- mci_writel(host, CR, MCI_CR_SWRST);
- mci_writel(host, CR, MCI_CR_MCIEN);
- mci_writel(host, MR, host->mode_reg);
- if (atmci_is_mci2())
- mci_writel(host, CFG, host->cfg_reg);
+ atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
+ atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIEN);
+ atmci_writel(host, ATMCI_MR, host->mode_reg);
+ if (host->caps.has_cfg_reg)
+ atmci_writel(host, ATMCI_CFG, host->cfg_reg);
host->need_reset = false;
}
- mci_writel(host, SDCR, slot->sdc_reg);
+ atmci_writel(host, ATMCI_SDCR, slot->sdc_reg);
- iflags = mci_readl(host, IMR);
- if (iflags & ~(MCI_SDIOIRQA | MCI_SDIOIRQB))
+ iflags = atmci_readl(host, ATMCI_IMR);
+ if (iflags & ~(ATMCI_SDIOIRQA | ATMCI_SDIOIRQB))
dev_warn(&slot->mmc->class_dev, "WARNING: IMR=0x%08x\n",
iflags);
if (unlikely(test_and_clear_bit(ATMCI_CARD_NEED_INIT, &slot->flags))) {
/* Send init sequence (74 clock cycles) */
- mci_writel(host, CMDR, MCI_CMDR_SPCMD_INIT);
- while (!(mci_readl(host, SR) & MCI_CMDRDY))
+ atmci_writel(host, ATMCI_CMDR, ATMCI_CMDR_SPCMD_INIT);
+ while (!(atmci_readl(host, ATMCI_SR) & ATMCI_CMDRDY))
cpu_relax();
}
iflags = 0;
@@ -818,31 +991,31 @@ static void atmci_start_request(struct atmel_mci *host,
atmci_set_timeout(host, slot, data);
/* Must set block count/size before sending command */
- mci_writel(host, BLKR, MCI_BCNT(data->blocks)
- | MCI_BLKLEN(data->blksz));
+ atmci_writel(host, ATMCI_BLKR, ATMCI_BCNT(data->blocks)
+ | ATMCI_BLKLEN(data->blksz));
dev_vdbg(&slot->mmc->class_dev, "BLKR=0x%08x\n",
- MCI_BCNT(data->blocks) | MCI_BLKLEN(data->blksz));
+ ATMCI_BCNT(data->blocks) | ATMCI_BLKLEN(data->blksz));
- iflags |= atmci_prepare_data(host, data);
+ iflags |= host->prepare_data(host, data);
}
- iflags |= MCI_CMDRDY;
+ iflags |= ATMCI_CMDRDY;
cmd = mrq->cmd;
cmdflags = atmci_prepare_command(slot->mmc, cmd);
- atmci_start_command(host, cmd, cmdflags);
+ atmci_send_command(host, cmd, cmdflags);
if (data)
- atmci_submit_data(host);
+ host->submit_data(host, data);
if (mrq->stop) {
host->stop_cmdr = atmci_prepare_command(slot->mmc, mrq->stop);
- host->stop_cmdr |= MCI_CMDR_STOP_XFER;
+ host->stop_cmdr |= ATMCI_CMDR_STOP_XFER;
if (!(data->flags & MMC_DATA_WRITE))
- host->stop_cmdr |= MCI_CMDR_TRDIR_READ;
+ host->stop_cmdr |= ATMCI_CMDR_TRDIR_READ;
if (data->flags & MMC_DATA_STREAM)
- host->stop_cmdr |= MCI_CMDR_STREAM;
+ host->stop_cmdr |= ATMCI_CMDR_STREAM;
else
- host->stop_cmdr |= MCI_CMDR_MULTI_BLOCK;
+ host->stop_cmdr |= ATMCI_CMDR_MULTI_BLOCK;
}
/*
@@ -851,7 +1024,7 @@ static void atmci_start_request(struct atmel_mci *host,
* conditions (e.g. command and data complete, but stop not
* prepared yet.)
*/
- mci_writel(host, IER, iflags);
+ atmci_writel(host, ATMCI_IER, iflags);
}
static void atmci_queue_request(struct atmel_mci *host,
@@ -909,13 +1082,13 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
struct atmel_mci *host = slot->host;
unsigned int i;
- slot->sdc_reg &= ~MCI_SDCBUS_MASK;
+ slot->sdc_reg &= ~ATMCI_SDCBUS_MASK;
switch (ios->bus_width) {
case MMC_BUS_WIDTH_1:
- slot->sdc_reg |= MCI_SDCBUS_1BIT;
+ slot->sdc_reg |= ATMCI_SDCBUS_1BIT;
break;
case MMC_BUS_WIDTH_4:
- slot->sdc_reg |= MCI_SDCBUS_4BIT;
+ slot->sdc_reg |= ATMCI_SDCBUS_4BIT;
break;
}
@@ -926,10 +1099,10 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
spin_lock_bh(&host->lock);
if (!host->mode_reg) {
clk_enable(host->mck);
- mci_writel(host, CR, MCI_CR_SWRST);
- mci_writel(host, CR, MCI_CR_MCIEN);
- if (atmci_is_mci2())
- mci_writel(host, CFG, host->cfg_reg);
+ atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
+ atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIEN);
+ if (host->caps.has_cfg_reg)
+ atmci_writel(host, ATMCI_CFG, host->cfg_reg);
}
/*
@@ -937,7 +1110,7 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
* core ios update when finding the minimum.
*/
slot->clock = ios->clock;
- for (i = 0; i < ATMEL_MCI_MAX_NR_SLOTS; i++) {
+ for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
if (host->slot[i] && host->slot[i]->clock
&& host->slot[i]->clock < clock_min)
clock_min = host->slot[i]->clock;
@@ -952,28 +1125,28 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
clkdiv = 255;
}
- host->mode_reg = MCI_MR_CLKDIV(clkdiv);
+ host->mode_reg = ATMCI_MR_CLKDIV(clkdiv);
/*
* WRPROOF and RDPROOF prevent overruns/underruns by
* stopping the clock when the FIFO is full/empty.
* This state is not expected to last for long.
*/
- if (mci_has_rwproof())
- host->mode_reg |= (MCI_MR_WRPROOF | MCI_MR_RDPROOF);
+ if (host->caps.has_rwproof)
+ host->mode_reg |= (ATMCI_MR_WRPROOF | ATMCI_MR_RDPROOF);
- if (atmci_is_mci2()) {
+ if (host->caps.has_cfg_reg) {
/* setup High Speed mode in relation with card capacity */
if (ios->timing == MMC_TIMING_SD_HS)
- host->cfg_reg |= MCI_CFG_HSMODE;
+ host->cfg_reg |= ATMCI_CFG_HSMODE;
else
- host->cfg_reg &= ~MCI_CFG_HSMODE;
+ host->cfg_reg &= ~ATMCI_CFG_HSMODE;
}
if (list_empty(&host->queue)) {
- mci_writel(host, MR, host->mode_reg);
- if (atmci_is_mci2())
- mci_writel(host, CFG, host->cfg_reg);
+ atmci_writel(host, ATMCI_MR, host->mode_reg);
+ if (host->caps.has_cfg_reg)
+ atmci_writel(host, ATMCI_CFG, host->cfg_reg);
} else {
host->need_clock_update = true;
}
@@ -984,16 +1157,16 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
spin_lock_bh(&host->lock);
slot->clock = 0;
- for (i = 0; i < ATMEL_MCI_MAX_NR_SLOTS; i++) {
+ for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
if (host->slot[i] && host->slot[i]->clock) {
any_slot_active = true;
break;
}
}
if (!any_slot_active) {
- mci_writel(host, CR, MCI_CR_MCIDIS);
+ atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIDIS);
if (host->mode_reg) {
- mci_readl(host, MR);
+ atmci_readl(host, ATMCI_MR);
clk_disable(host->mck);
}
host->mode_reg = 0;
@@ -1057,9 +1230,9 @@ static void atmci_enable_sdio_irq(struct mmc_host *mmc, int enable)
struct atmel_mci *host = slot->host;
if (enable)
- mci_writel(host, IER, slot->sdio_irq);
+ atmci_writel(host, ATMCI_IER, slot->sdio_irq);
else
- mci_writel(host, IDR, slot->sdio_irq);
+ atmci_writel(host, ATMCI_IDR, slot->sdio_irq);
}
static const struct mmc_host_ops atmci_ops = {
@@ -1086,9 +1259,9 @@ static void atmci_request_end(struct atmel_mci *host, struct mmc_request *mrq)
* busy transferring data.
*/
if (host->need_clock_update) {
- mci_writel(host, MR, host->mode_reg);
- if (atmci_is_mci2())
- mci_writel(host, CFG, host->cfg_reg);
+ atmci_writel(host, ATMCI_MR, host->mode_reg);
+ if (host->caps.has_cfg_reg)
+ atmci_writel(host, ATMCI_CFG, host->cfg_reg);
}
host->cur_slot->mrq = NULL;
@@ -1117,16 +1290,16 @@ static void atmci_command_complete(struct atmel_mci *host,
u32 status = host->cmd_status;
/* Read the response from the card (up to 16 bytes) */
- cmd->resp[0] = mci_readl(host, RSPR);
- cmd->resp[1] = mci_readl(host, RSPR);
- cmd->resp[2] = mci_readl(host, RSPR);
- cmd->resp[3] = mci_readl(host, RSPR);
+ cmd->resp[0] = atmci_readl(host, ATMCI_RSPR);
+ cmd->resp[1] = atmci_readl(host, ATMCI_RSPR);
+ cmd->resp[2] = atmci_readl(host, ATMCI_RSPR);
+ cmd->resp[3] = atmci_readl(host, ATMCI_RSPR);
- if (status & MCI_RTOE)
+ if (status & ATMCI_RTOE)
cmd->error = -ETIMEDOUT;
- else if ((cmd->flags & MMC_RSP_CRC) && (status & MCI_RCRCE))
+ else if ((cmd->flags & MMC_RSP_CRC) && (status & ATMCI_RCRCE))
cmd->error = -EILSEQ;
- else if (status & (MCI_RINDE | MCI_RDIRE | MCI_RENDE))
+ else if (status & (ATMCI_RINDE | ATMCI_RDIRE | ATMCI_RENDE))
cmd->error = -EIO;
else
cmd->error = 0;
@@ -1136,10 +1309,10 @@ static void atmci_command_complete(struct atmel_mci *host,
"command error: status=0x%08x\n", status);
if (cmd->data) {
- atmci_stop_dma(host);
+ host->stop_transfer(host);
host->data = NULL;
- mci_writel(host, IDR, MCI_NOTBUSY
- | MCI_TXRDY | MCI_RXRDY
+ atmci_writel(host, ATMCI_IDR, ATMCI_NOTBUSY
+ | ATMCI_TXRDY | ATMCI_RXRDY
| ATMCI_DATA_ERROR_FLAGS);
}
}
@@ -1191,11 +1364,11 @@ static void atmci_detect_change(unsigned long data)
* Reset controller to terminate any ongoing
* commands or data transfers.
*/
- mci_writel(host, CR, MCI_CR_SWRST);
- mci_writel(host, CR, MCI_CR_MCIEN);
- mci_writel(host, MR, host->mode_reg);
- if (atmci_is_mci2())
- mci_writel(host, CFG, host->cfg_reg);
+ atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
+ atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIEN);
+ atmci_writel(host, ATMCI_MR, host->mode_reg);
+ if (host->caps.has_cfg_reg)
+ atmci_writel(host, ATMCI_CFG, host->cfg_reg);
host->data = NULL;
host->cmd = NULL;
@@ -1210,7 +1383,7 @@ static void atmci_detect_change(unsigned long data)
/* fall through */
case STATE_SENDING_DATA:
mrq->data->error = -ENOMEDIUM;
- atmci_stop_dma(host);
+ host->stop_transfer(host);
break;
case STATE_DATA_BUSY:
case STATE_DATA_ERROR:
@@ -1261,7 +1434,7 @@ static void atmci_tasklet_func(unsigned long priv)
dev_vdbg(&host->pdev->dev,
"tasklet: state %u pending/completed/mask %lx/%lx/%x\n",
state, host->pending_events, host->completed_events,
- mci_readl(host, IMR));
+ atmci_readl(host, ATMCI_IMR));
do {
prev_state = state;
@@ -1289,9 +1462,9 @@ static void atmci_tasklet_func(unsigned long priv)
case STATE_SENDING_DATA:
if (atmci_test_and_clear_pending(host,
EVENT_DATA_ERROR)) {
- atmci_stop_dma(host);
+ host->stop_transfer(host);
if (data->stop)
- send_stop_cmd(host, data);
+ atmci_send_stop_cmd(host, data);
state = STATE_DATA_ERROR;
break;
}
@@ -1313,11 +1486,11 @@ static void atmci_tasklet_func(unsigned long priv)
atmci_set_completed(host, EVENT_DATA_COMPLETE);
status = host->data_status;
if (unlikely(status & ATMCI_DATA_ERROR_FLAGS)) {
- if (status & MCI_DTOE) {
+ if (status & ATMCI_DTOE) {
dev_dbg(&host->pdev->dev,
"data timeout error\n");
data->error = -ETIMEDOUT;
- } else if (status & MCI_DCRCE) {
+ } else if (status & ATMCI_DCRCE) {
dev_dbg(&host->pdev->dev,
"data CRC error\n");
data->error = -EILSEQ;
@@ -1330,7 +1503,7 @@ static void atmci_tasklet_func(unsigned long priv)
} else {
data->bytes_xfered = data->blocks * data->blksz;
data->error = 0;
- mci_writel(host, IDR, ATMCI_DATA_ERROR_FLAGS);
+ atmci_writel(host, ATMCI_IDR, ATMCI_DATA_ERROR_FLAGS);
}
if (!data->stop) {
@@ -1340,7 +1513,7 @@ static void atmci_tasklet_func(unsigned long priv)
prev_state = state = STATE_SENDING_STOP;
if (!data->error)
- send_stop_cmd(host, data);
+ atmci_send_stop_cmd(host, data);
/* fall through */
case STATE_SENDING_STOP:
@@ -1380,7 +1553,7 @@ static void atmci_read_data_pio(struct atmel_mci *host)
unsigned int nbytes = 0;
do {
- value = mci_readl(host, RDR);
+ value = atmci_readl(host, ATMCI_RDR);
if (likely(offset + 4 <= sg->length)) {
put_unaligned(value, (u32 *)(buf + offset));
@@ -1412,9 +1585,9 @@ static void atmci_read_data_pio(struct atmel_mci *host)
nbytes += offset;
}
- status = mci_readl(host, SR);
+ status = atmci_readl(host, ATMCI_SR);
if (status & ATMCI_DATA_ERROR_FLAGS) {
- mci_writel(host, IDR, (MCI_NOTBUSY | MCI_RXRDY
+ atmci_writel(host, ATMCI_IDR, (ATMCI_NOTBUSY | ATMCI_RXRDY
| ATMCI_DATA_ERROR_FLAGS));
host->data_status = status;
data->bytes_xfered += nbytes;
@@ -1423,7 +1596,7 @@ static void atmci_read_data_pio(struct atmel_mci *host)
tasklet_schedule(&host->tasklet);
return;
}
- } while (status & MCI_RXRDY);
+ } while (status & ATMCI_RXRDY);
host->pio_offset = offset;
data->bytes_xfered += nbytes;
@@ -1431,8 +1604,8 @@ static void atmci_read_data_pio(struct atmel_mci *host)
return;
done:
- mci_writel(host, IDR, MCI_RXRDY);
- mci_writel(host, IER, MCI_NOTBUSY);
+ atmci_writel(host, ATMCI_IDR, ATMCI_RXRDY);
+ atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
data->bytes_xfered += nbytes;
smp_wmb();
atmci_set_pending(host, EVENT_XFER_COMPLETE);
@@ -1451,7 +1624,7 @@ static void atmci_write_data_pio(struct atmel_mci *host)
do {
if (likely(offset + 4 <= sg->length)) {
value = get_unaligned((u32 *)(buf + offset));
- mci_writel(host, TDR, value);
+ atmci_writel(host, ATMCI_TDR, value);
offset += 4;
nbytes += 4;
@@ -1472,20 +1645,20 @@ static void atmci_write_data_pio(struct atmel_mci *host)
host->sg = sg = sg_next(sg);
if (!sg) {
- mci_writel(host, TDR, value);
+ atmci_writel(host, ATMCI_TDR, value);
goto done;
}
offset = 4 - remaining;
buf = sg_virt(sg);
memcpy((u8 *)&value + remaining, buf, offset);
- mci_writel(host, TDR, value);
+ atmci_writel(host, ATMCI_TDR, value);
nbytes += offset;
}
- status = mci_readl(host, SR);
+ status = atmci_readl(host, ATMCI_SR);
if (status & ATMCI_DATA_ERROR_FLAGS) {
- mci_writel(host, IDR, (MCI_NOTBUSY | MCI_TXRDY
+ atmci_writel(host, ATMCI_IDR, (ATMCI_NOTBUSY | ATMCI_TXRDY
| ATMCI_DATA_ERROR_FLAGS));
host->data_status = status;
data->bytes_xfered += nbytes;
@@ -1494,7 +1667,7 @@ static void atmci_write_data_pio(struct atmel_mci *host)
tasklet_schedule(&host->tasklet);
return;
}
- } while (status & MCI_TXRDY);
+ } while (status & ATMCI_TXRDY);
host->pio_offset = offset;
data->bytes_xfered += nbytes;
@@ -1502,8 +1675,8 @@ static void atmci_write_data_pio(struct atmel_mci *host)
return;
done:
- mci_writel(host, IDR, MCI_TXRDY);
- mci_writel(host, IER, MCI_NOTBUSY);
+ atmci_writel(host, ATMCI_IDR, ATMCI_TXRDY);
+ atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
data->bytes_xfered += nbytes;
smp_wmb();
atmci_set_pending(host, EVENT_XFER_COMPLETE);
@@ -1511,7 +1684,7 @@ done:
static void atmci_cmd_interrupt(struct atmel_mci *host, u32 status)
{
- mci_writel(host, IDR, MCI_CMDRDY);
+ atmci_writel(host, ATMCI_IDR, ATMCI_CMDRDY);
host->cmd_status = status;
smp_wmb();
@@ -1523,7 +1696,7 @@ static void atmci_sdio_interrupt(struct atmel_mci *host, u32 status)
{
int i;
- for (i = 0; i < ATMEL_MCI_MAX_NR_SLOTS; i++) {
+ for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
struct atmel_mci_slot *slot = host->slot[i];
if (slot && (status & slot->sdio_irq)) {
mmc_signal_sdio_irq(slot->mmc);
@@ -1539,40 +1712,92 @@ static irqreturn_t atmci_interrupt(int irq, void *dev_id)
unsigned int pass_count = 0;
do {
- status = mci_readl(host, SR);
- mask = mci_readl(host, IMR);
+ status = atmci_readl(host, ATMCI_SR);
+ mask = atmci_readl(host, ATMCI_IMR);
pending = status & mask;
if (!pending)
break;
if (pending & ATMCI_DATA_ERROR_FLAGS) {
- mci_writel(host, IDR, ATMCI_DATA_ERROR_FLAGS
- | MCI_RXRDY | MCI_TXRDY);
- pending &= mci_readl(host, IMR);
+ atmci_writel(host, ATMCI_IDR, ATMCI_DATA_ERROR_FLAGS
+ | ATMCI_RXRDY | ATMCI_TXRDY);
+ pending &= atmci_readl(host, ATMCI_IMR);
host->data_status = status;
smp_wmb();
atmci_set_pending(host, EVENT_DATA_ERROR);
tasklet_schedule(&host->tasklet);
}
- if (pending & MCI_NOTBUSY) {
- mci_writel(host, IDR,
- ATMCI_DATA_ERROR_FLAGS | MCI_NOTBUSY);
+
+ if (pending & ATMCI_TXBUFE) {
+ atmci_writel(host, ATMCI_IDR, ATMCI_TXBUFE);
+ atmci_writel(host, ATMCI_IDR, ATMCI_ENDTX);
+ /*
+ * We can receive this interruption before having configured
+ * the second pdc buffer, so we need to reconfigure first and
+ * second buffers again
+ */
+ if (host->data_size) {
+ atmci_pdc_set_both_buf(host, XFER_TRANSMIT);
+ atmci_writel(host, ATMCI_IER, ATMCI_ENDTX);
+ atmci_writel(host, ATMCI_IER, ATMCI_TXBUFE);
+ } else {
+ atmci_pdc_complete(host);
+ }
+ } else if (pending & ATMCI_ENDTX) {
+ atmci_writel(host, ATMCI_IDR, ATMCI_ENDTX);
+
+ if (host->data_size) {
+ atmci_pdc_set_single_buf(host,
+ XFER_TRANSMIT, PDC_SECOND_BUF);
+ atmci_writel(host, ATMCI_IER, ATMCI_ENDTX);
+ }
+ }
+
+ if (pending & ATMCI_RXBUFF) {
+ atmci_writel(host, ATMCI_IDR, ATMCI_RXBUFF);
+ atmci_writel(host, ATMCI_IDR, ATMCI_ENDRX);
+ /*
+ * We can receive this interruption before having configured
+ * the second pdc buffer, so we need to reconfigure first and
+ * second buffers again
+ */
+ if (host->data_size) {
+ atmci_pdc_set_both_buf(host, XFER_RECEIVE);
+ atmci_writel(host, ATMCI_IER, ATMCI_ENDRX);
+ atmci_writel(host, ATMCI_IER, ATMCI_RXBUFF);
+ } else {
+ atmci_pdc_complete(host);
+ }
+ } else if (pending & ATMCI_ENDRX) {
+ atmci_writel(host, ATMCI_IDR, ATMCI_ENDRX);
+
+ if (host->data_size) {
+ atmci_pdc_set_single_buf(host,
+ XFER_RECEIVE, PDC_SECOND_BUF);
+ atmci_writel(host, ATMCI_IER, ATMCI_ENDRX);
+ }
+ }
+
+
+ if (pending & ATMCI_NOTBUSY) {
+ atmci_writel(host, ATMCI_IDR,
+ ATMCI_DATA_ERROR_FLAGS | ATMCI_NOTBUSY);
if (!host->data_status)
host->data_status = status;
smp_wmb();
atmci_set_pending(host, EVENT_DATA_COMPLETE);
tasklet_schedule(&host->tasklet);
}
- if (pending & MCI_RXRDY)
+ if (pending & ATMCI_RXRDY)
atmci_read_data_pio(host);
- if (pending & MCI_TXRDY)
+ if (pending & ATMCI_TXRDY)
atmci_write_data_pio(host);
- if (pending & MCI_CMDRDY)
+ if (pending & ATMCI_CMDRDY)
atmci_cmd_interrupt(host, status);
- if (pending & (MCI_SDIOIRQA | MCI_SDIOIRQB))
+ if (pending & (ATMCI_SDIOIRQA | ATMCI_SDIOIRQB))
atmci_sdio_interrupt(host, status);
} while (pass_count++ < 5);
@@ -1621,7 +1846,7 @@ static int __init atmci_init_slot(struct atmel_mci *host,
mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
if (sdio_irq)
mmc->caps |= MMC_CAP_SDIO_IRQ;
- if (atmci_is_mci2())
+ if (host->caps.has_highspeed)
mmc->caps |= MMC_CAP_SD_HIGHSPEED;
if (slot_data->bus_width >= 4)
mmc->caps |= MMC_CAP_4_BIT_DATA;
@@ -1704,8 +1929,7 @@ static void __exit atmci_cleanup_slot(struct atmel_mci_slot *slot,
mmc_free_host(slot->mmc);
}
-#ifdef CONFIG_MMC_ATMELMCI_DMA
-static bool filter(struct dma_chan *chan, void *slave)
+static bool atmci_filter(struct dma_chan *chan, void *slave)
{
struct mci_dma_data *sl = slave;
@@ -1730,14 +1954,14 @@ static void atmci_configure_dma(struct atmel_mci *host)
dma_cap_mask_t mask;
setup_dma_addr(pdata->dma_slave,
- host->mapbase + MCI_TDR,
- host->mapbase + MCI_RDR);
+ host->mapbase + ATMCI_TDR,
+ host->mapbase + ATMCI_RDR);
/* Try to grab a DMA channel */
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
host->dma.chan =
- dma_request_channel(mask, filter, pdata->dma_slave);
+ dma_request_channel(mask, atmci_filter, pdata->dma_slave);
}
if (!host->dma.chan)
dev_notice(&host->pdev->dev, "DMA not available, using PIO\n");
@@ -1746,9 +1970,60 @@ static void atmci_configure_dma(struct atmel_mci *host)
"Using %s for DMA transfers\n",
dma_chan_name(host->dma.chan));
}
+
+static inline unsigned int atmci_get_version(struct atmel_mci *host)
+{
+ return atmci_readl(host, ATMCI_VERSION) & 0x00000fff;
+}
+
+/*
+ * HSMCI (High Speed MCI) module is not fully compatible with MCI module.
+ * HSMCI provides DMA support and a new config register but no more supports
+ * PDC.
+ */
+static void __init atmci_get_cap(struct atmel_mci *host)
+{
+ unsigned int version;
+
+ version = atmci_get_version(host);
+ dev_info(&host->pdev->dev,
+ "version: 0x%x\n", version);
+
+ host->caps.has_dma = 0;
+ host->caps.has_pdc = 0;
+ host->caps.has_cfg_reg = 0;
+ host->caps.has_cstor_reg = 0;
+ host->caps.has_highspeed = 0;
+ host->caps.has_rwproof = 0;
+
+ /* keep only major version number */
+ switch (version & 0xf00) {
+ case 0x100:
+ case 0x200:
+ host->caps.has_pdc = 1;
+ host->caps.has_rwproof = 1;
+ break;
+ case 0x300:
+ case 0x400:
+ case 0x500:
+#ifdef CONFIG_AT_HDMAC
+ host->caps.has_dma = 1;
#else
-static void atmci_configure_dma(struct atmel_mci *host) {}
+ host->caps.has_dma = 0;
+ dev_info(&host->pdev->dev,
+ "has dma capability but dma engine is not selected, then use pio\n");
#endif
+ host->caps.has_cfg_reg = 1;
+ host->caps.has_cstor_reg = 1;
+ host->caps.has_highspeed = 1;
+ host->caps.has_rwproof = 1;
+ break;
+ default:
+ dev_warn(&host->pdev->dev,
+ "Unmanaged mci version, set minimum capabilities\n");
+ break;
+ }
+}
static int __init atmci_probe(struct platform_device *pdev)
{
@@ -1789,7 +2064,7 @@ static int __init atmci_probe(struct platform_device *pdev)
goto err_ioremap;
clk_enable(host->mck);
- mci_writel(host, CR, MCI_CR_SWRST);
+ atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
host->bus_hz = clk_get_rate(host->mck);
clk_disable(host->mck);
@@ -1801,7 +2076,27 @@ static int __init atmci_probe(struct platform_device *pdev)
if (ret)
goto err_request_irq;
- atmci_configure_dma(host);
+ /* Get MCI capabilities and set operations according to it */
+ atmci_get_cap(host);
+ if (host->caps.has_dma) {
+ dev_info(&pdev->dev, "using DMA\n");
+ host->prepare_data = &atmci_prepare_data_dma;
+ host->submit_data = &atmci_submit_data_dma;
+ host->stop_transfer = &atmci_stop_transfer_dma;
+ } else if (host->caps.has_pdc) {
+ dev_info(&pdev->dev, "using PDC\n");
+ host->prepare_data = &atmci_prepare_data_pdc;
+ host->submit_data = &atmci_submit_data_pdc;
+ host->stop_transfer = &atmci_stop_transfer_pdc;
+ } else {
+ dev_info(&pdev->dev, "no DMA, no PDC\n");
+ host->prepare_data = &atmci_prepare_data;
+ host->submit_data = &atmci_submit_data;
+ host->stop_transfer = &atmci_stop_transfer;
+ }
+
+ if (host->caps.has_dma)
+ atmci_configure_dma(host);
platform_set_drvdata(pdev, host);
@@ -1810,13 +2105,13 @@ static int __init atmci_probe(struct platform_device *pdev)
ret = -ENODEV;
if (pdata->slot[0].bus_width) {
ret = atmci_init_slot(host, &pdata->slot[0],
- 0, MCI_SDCSEL_SLOT_A, MCI_SDIOIRQA);
+ 0, ATMCI_SDCSEL_SLOT_A, ATMCI_SDIOIRQA);
if (!ret)
nr_slots++;
}
if (pdata->slot[1].bus_width) {
ret = atmci_init_slot(host, &pdata->slot[1],
- 1, MCI_SDCSEL_SLOT_B, MCI_SDIOIRQB);
+ 1, ATMCI_SDCSEL_SLOT_B, ATMCI_SDIOIRQB);
if (!ret)
nr_slots++;
}
@@ -1833,10 +2128,8 @@ static int __init atmci_probe(struct platform_device *pdev)
return 0;
err_init_slot:
-#ifdef CONFIG_MMC_ATMELMCI_DMA
if (host->dma.chan)
dma_release_channel(host->dma.chan);
-#endif
free_irq(irq, host);
err_request_irq:
iounmap(host->regs);
@@ -1854,15 +2147,15 @@ static int __exit atmci_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
- for (i = 0; i < ATMEL_MCI_MAX_NR_SLOTS; i++) {
+ for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
if (host->slot[i])
atmci_cleanup_slot(host->slot[i], i);
}
clk_enable(host->mck);
- mci_writel(host, IDR, ~0UL);
- mci_writel(host, CR, MCI_CR_MCIDIS);
- mci_readl(host, SR);
+ atmci_writel(host, ATMCI_IDR, ~0UL);
+ atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIDIS);
+ atmci_readl(host, ATMCI_SR);
clk_disable(host->mck);
#ifdef CONFIG_MMC_ATMELMCI_DMA
@@ -1885,7 +2178,7 @@ static int atmci_suspend(struct device *dev)
struct atmel_mci *host = dev_get_drvdata(dev);
int i;
- for (i = 0; i < ATMEL_MCI_MAX_NR_SLOTS; i++) {
+ for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
struct atmel_mci_slot *slot = host->slot[i];
int ret;
@@ -1916,7 +2209,7 @@ static int atmci_resume(struct device *dev)
int i;
int ret = 0;
- for (i = 0; i < ATMEL_MCI_MAX_NR_SLOTS; i++) {
+ for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
struct atmel_mci_slot *slot = host->slot[i];
int err;
diff --git a/drivers/mmc/host/au1xmmc.c b/drivers/mmc/host/au1xmmc.c
index ef72e874ca3..5d3b9ae6452 100644
--- a/drivers/mmc/host/au1xmmc.c
+++ b/drivers/mmc/host/au1xmmc.c
@@ -55,7 +55,7 @@
#ifdef DEBUG
#define DBG(fmt, idx, args...) \
- printk(KERN_DEBUG "au1xmmc(%d): DEBUG: " fmt, idx, ##args)
+ pr_debug("au1xmmc(%d): DEBUG: " fmt, idx, ##args)
#else
#define DBG(fmt, idx, args...) do {} while (0)
#endif
@@ -64,11 +64,8 @@
#define AU1XMMC_DESCRIPTOR_COUNT 1
/* max DMA seg size: 64KB on Au1100, 4MB on Au1200 */
-#ifdef CONFIG_SOC_AU1100
-#define AU1XMMC_DESCRIPTOR_SIZE 0x0000ffff
-#else /* Au1200 */
-#define AU1XMMC_DESCRIPTOR_SIZE 0x003fffff
-#endif
+#define AU1100_MMC_DESCRIPTOR_SIZE 0x0000ffff
+#define AU1200_MMC_DESCRIPTOR_SIZE 0x003fffff
#define AU1XMMC_OCR (MMC_VDD_27_28 | MMC_VDD_28_29 | MMC_VDD_29_30 | \
MMC_VDD_30_31 | MMC_VDD_31_32 | MMC_VDD_32_33 | \
@@ -127,6 +124,7 @@ struct au1xmmc_host {
#define HOST_F_XMIT 0x0001
#define HOST_F_RECV 0x0002
#define HOST_F_DMA 0x0010
+#define HOST_F_DBDMA 0x0020
#define HOST_F_ACTIVE 0x0100
#define HOST_F_STOP 0x1000
@@ -151,6 +149,16 @@ struct au1xmmc_host {
#define DMA_CHANNEL(h) \
(((h)->flags & HOST_F_XMIT) ? (h)->tx_chan : (h)->rx_chan)
+static inline int has_dbdma(void)
+{
+ switch (alchemy_get_cputype()) {
+ case ALCHEMY_CPU_AU1200:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
static inline void IRQ_ON(struct au1xmmc_host *host, u32 mask)
{
u32 val = au_readl(HOST_CONFIG(host));
@@ -268,7 +276,7 @@ static int au1xmmc_send_command(struct au1xmmc_host *host, int wait,
mmccmd |= SD_CMD_RT_3;
break;
default:
- printk(KERN_INFO "au1xmmc: unhandled response type %02x\n",
+ pr_info("au1xmmc: unhandled response type %02x\n",
mmc_resp_type(cmd));
return -EINVAL;
}
@@ -353,14 +361,12 @@ static void au1xmmc_data_complete(struct au1xmmc_host *host, u32 status)
data->bytes_xfered = 0;
if (!data->error) {
- if (host->flags & HOST_F_DMA) {
-#ifdef CONFIG_SOC_AU1200 /* DBDMA */
+ if (host->flags & (HOST_F_DMA | HOST_F_DBDMA)) {
u32 chan = DMA_CHANNEL(host);
chan_tab_t *c = *((chan_tab_t **)chan);
au1x_dma_chan_t *cp = c->chan_ptr;
data->bytes_xfered = cp->ddma_bytecnt;
-#endif
} else
data->bytes_xfered =
(data->blocks * data->blksz) - host->pio.len;
@@ -570,11 +576,10 @@ static void au1xmmc_cmd_complete(struct au1xmmc_host *host, u32 status)
host->status = HOST_S_DATA;
- if (host->flags & HOST_F_DMA) {
-#ifdef CONFIG_SOC_AU1200 /* DBDMA */
+ if ((host->flags & (HOST_F_DMA | HOST_F_DBDMA))) {
u32 channel = DMA_CHANNEL(host);
- /* Start the DMA as soon as the buffer gets something in it */
+ /* Start the DBDMA as soon as the buffer gets something in it */
if (host->flags & HOST_F_RECV) {
u32 mask = SD_STATUS_DB | SD_STATUS_NE;
@@ -584,7 +589,6 @@ static void au1xmmc_cmd_complete(struct au1xmmc_host *host, u32 status)
}
au1xxx_dbdma_start(channel);
-#endif
}
}
@@ -633,8 +637,7 @@ static int au1xmmc_prepare_data(struct au1xmmc_host *host,
au_writel(data->blksz - 1, HOST_BLKSIZE(host));
- if (host->flags & HOST_F_DMA) {
-#ifdef CONFIG_SOC_AU1200 /* DBDMA */
+ if (host->flags & (HOST_F_DMA | HOST_F_DBDMA)) {
int i;
u32 channel = DMA_CHANNEL(host);
@@ -663,7 +666,6 @@ static int au1xmmc_prepare_data(struct au1xmmc_host *host,
datalen -= len;
}
-#endif
} else {
host->pio.index = 0;
host->pio.offset = 0;
@@ -838,7 +840,6 @@ static irqreturn_t au1xmmc_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
-#ifdef CONFIG_SOC_AU1200
/* 8bit memory DMA device */
static dbdev_tab_t au1xmmc_mem_dbdev = {
.dev_id = DSCR_CMD0_ALWAYS,
@@ -905,7 +906,7 @@ static int au1xmmc_dbdma_init(struct au1xmmc_host *host)
au1xxx_dbdma_ring_alloc(host->rx_chan, AU1XMMC_DESCRIPTOR_COUNT);
/* DBDMA is good to go */
- host->flags |= HOST_F_DMA;
+ host->flags |= HOST_F_DMA | HOST_F_DBDMA;
return 0;
}
@@ -918,7 +919,6 @@ static void au1xmmc_dbdma_shutdown(struct au1xmmc_host *host)
au1xxx_dbdma_chan_free(host->rx_chan);
}
}
-#endif
static void au1xmmc_enable_sdio_irq(struct mmc_host *mmc, int en)
{
@@ -997,8 +997,16 @@ static int __devinit au1xmmc_probe(struct platform_device *pdev)
mmc->f_min = 450000;
mmc->f_max = 24000000;
- mmc->max_seg_size = AU1XMMC_DESCRIPTOR_SIZE;
- mmc->max_segs = AU1XMMC_DESCRIPTOR_COUNT;
+ switch (alchemy_get_cputype()) {
+ case ALCHEMY_CPU_AU1100:
+ mmc->max_seg_size = AU1100_MMC_DESCRIPTOR_SIZE;
+ mmc->max_segs = AU1XMMC_DESCRIPTOR_COUNT;
+ break;
+ case ALCHEMY_CPU_AU1200:
+ mmc->max_seg_size = AU1200_MMC_DESCRIPTOR_SIZE;
+ mmc->max_segs = AU1XMMC_DESCRIPTOR_COUNT;
+ break;
+ }
mmc->max_blk_size = 2048;
mmc->max_blk_count = 512;
@@ -1028,11 +1036,11 @@ static int __devinit au1xmmc_probe(struct platform_device *pdev)
tasklet_init(&host->finish_task, au1xmmc_tasklet_finish,
(unsigned long)host);
-#ifdef CONFIG_SOC_AU1200
- ret = au1xmmc_dbdma_init(host);
- if (ret)
- printk(KERN_INFO DRIVER_NAME ": DBDMA init failed; using PIO\n");
-#endif
+ if (has_dbdma()) {
+ ret = au1xmmc_dbdma_init(host);
+ if (ret)
+ pr_info(DRIVER_NAME ": DBDMA init failed; using PIO\n");
+ }
#ifdef CONFIG_LEDS_CLASS
if (host->platdata && host->platdata->led) {
@@ -1056,7 +1064,7 @@ static int __devinit au1xmmc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, host);
- printk(KERN_INFO DRIVER_NAME ": MMC Controller %d set up at %8.8X"
+ pr_info(DRIVER_NAME ": MMC Controller %d set up at %8.8X"
" (mode=%s)\n", pdev->id, host->iobase,
host->flags & HOST_F_DMA ? "dma" : "pio");
@@ -1073,9 +1081,8 @@ out5:
au_writel(0, HOST_CONFIG2(host));
au_sync();
-#ifdef CONFIG_SOC_AU1200
- au1xmmc_dbdma_shutdown(host);
-#endif
+ if (host->flags & HOST_F_DBDMA)
+ au1xmmc_dbdma_shutdown(host);
tasklet_kill(&host->data_task);
tasklet_kill(&host->finish_task);
@@ -1120,9 +1127,9 @@ static int __devexit au1xmmc_remove(struct platform_device *pdev)
tasklet_kill(&host->data_task);
tasklet_kill(&host->finish_task);
-#ifdef CONFIG_SOC_AU1200
- au1xmmc_dbdma_shutdown(host);
-#endif
+ if (host->flags & HOST_F_DBDMA)
+ au1xmmc_dbdma_shutdown(host);
+
au1xmmc_set_power(host, 0);
free_irq(host->irq, host);
@@ -1181,24 +1188,23 @@ static struct platform_driver au1xmmc_driver = {
static int __init au1xmmc_init(void)
{
-#ifdef CONFIG_SOC_AU1200
- /* DSCR_CMD0_ALWAYS has a stride of 32 bits, we need a stride
- * of 8 bits. And since devices are shared, we need to create
- * our own to avoid freaking out other devices.
- */
- memid = au1xxx_ddma_add_device(&au1xmmc_mem_dbdev);
- if (!memid)
- printk(KERN_ERR "au1xmmc: cannot add memory dbdma dev\n");
-#endif
+ if (has_dbdma()) {
+ /* DSCR_CMD0_ALWAYS has a stride of 32 bits, we need a stride
+ * of 8 bits. And since devices are shared, we need to create
+ * our own to avoid freaking out other devices.
+ */
+ memid = au1xxx_ddma_add_device(&au1xmmc_mem_dbdev);
+ if (!memid)
+ pr_err("au1xmmc: cannot add memory dbdma\n");
+ }
return platform_driver_register(&au1xmmc_driver);
}
static void __exit au1xmmc_exit(void)
{
-#ifdef CONFIG_SOC_AU1200
- if (memid)
+ if (has_dbdma() && memid)
au1xxx_ddma_del_device(memid);
-#endif
+
platform_driver_unregister(&au1xmmc_driver);
}
diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
index 0076c7448fe..64a8325a4a8 100644
--- a/drivers/mmc/host/davinci_mmc.c
+++ b/drivers/mmc/host/davinci_mmc.c
@@ -807,12 +807,25 @@ static void calculate_clk_divider(struct mmc_host *mmc, struct mmc_ios *ios)
static void mmc_davinci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct mmc_davinci_host *host = mmc_priv(mmc);
+ struct platform_device *pdev = to_platform_device(mmc->parent);
+ struct davinci_mmc_config *config = pdev->dev.platform_data;
dev_dbg(mmc_dev(host->mmc),
"clock %dHz busmode %d powermode %d Vdd %04x\n",
ios->clock, ios->bus_mode, ios->power_mode,
ios->vdd);
+ switch (ios->power_mode) {
+ case MMC_POWER_OFF:
+ if (config && config->set_power)
+ config->set_power(pdev->id, false);
+ break;
+ case MMC_POWER_UP:
+ if (config && config->set_power)
+ config->set_power(pdev->id, true);
+ break;
+ }
+
switch (ios->bus_width) {
case MMC_BUS_WIDTH_8:
dev_dbg(mmc_dev(host->mmc), "Enabling 8 bit mode\n");
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index ff0f714b012..3aaeb084191 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -764,11 +764,29 @@ static int dw_mci_get_cd(struct mmc_host *mmc)
return present;
}
+static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
+{
+ struct dw_mci_slot *slot = mmc_priv(mmc);
+ struct dw_mci *host = slot->host;
+ u32 int_mask;
+
+ /* Enable/disable Slot Specific SDIO interrupt */
+ int_mask = mci_readl(host, INTMASK);
+ if (enb) {
+ mci_writel(host, INTMASK,
+ (int_mask | (1 << SDMMC_INT_SDIO(slot->id))));
+ } else {
+ mci_writel(host, INTMASK,
+ (int_mask & ~(1 << SDMMC_INT_SDIO(slot->id))));
+ }
+}
+
static const struct mmc_host_ops dw_mci_ops = {
- .request = dw_mci_request,
- .set_ios = dw_mci_set_ios,
- .get_ro = dw_mci_get_ro,
- .get_cd = dw_mci_get_cd,
+ .request = dw_mci_request,
+ .set_ios = dw_mci_set_ios,
+ .get_ro = dw_mci_get_ro,
+ .get_cd = dw_mci_get_cd,
+ .enable_sdio_irq = dw_mci_enable_sdio_irq,
};
static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
@@ -1025,7 +1043,8 @@ static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
buf += len;
cnt -= len;
if (!sg_next(host->sg) || host->part_buf_count == 2) {
- mci_writew(host, DATA, host->part_buf16);
+ mci_writew(host, DATA(host->data_offset),
+ host->part_buf16);
host->part_buf_count = 0;
}
}
@@ -1042,21 +1061,23 @@ static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
cnt -= len;
/* push data from aligned buffer into fifo */
for (i = 0; i < items; ++i)
- mci_writew(host, DATA, aligned_buf[i]);
+ mci_writew(host, DATA(host->data_offset),
+ aligned_buf[i]);
}
} else
#endif
{
u16 *pdata = buf;
for (; cnt >= 2; cnt -= 2)
- mci_writew(host, DATA, *pdata++);
+ mci_writew(host, DATA(host->data_offset), *pdata++);
buf = pdata;
}
/* put anything remaining in the part_buf */
if (cnt) {
dw_mci_set_part_bytes(host, buf, cnt);
if (!sg_next(host->sg))
- mci_writew(host, DATA, host->part_buf16);
+ mci_writew(host, DATA(host->data_offset),
+ host->part_buf16);
}
}
@@ -1071,7 +1092,8 @@ static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
int items = len >> 1;
int i;
for (i = 0; i < items; ++i)
- aligned_buf[i] = mci_readw(host, DATA);
+ aligned_buf[i] = mci_readw(host,
+ DATA(host->data_offset));
/* memcpy from aligned buffer into output buffer */
memcpy(buf, aligned_buf, len);
buf += len;
@@ -1082,11 +1104,11 @@ static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
{
u16 *pdata = buf;
for (; cnt >= 2; cnt -= 2)
- *pdata++ = mci_readw(host, DATA);
+ *pdata++ = mci_readw(host, DATA(host->data_offset));
buf = pdata;
}
if (cnt) {
- host->part_buf16 = mci_readw(host, DATA);
+ host->part_buf16 = mci_readw(host, DATA(host->data_offset));
dw_mci_pull_final_bytes(host, buf, cnt);
}
}
@@ -1099,7 +1121,8 @@ static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
buf += len;
cnt -= len;
if (!sg_next(host->sg) || host->part_buf_count == 4) {
- mci_writel(host, DATA, host->part_buf32);
+ mci_writel(host, DATA(host->data_offset),
+ host->part_buf32);
host->part_buf_count = 0;
}
}
@@ -1116,21 +1139,23 @@ static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
cnt -= len;
/* push data from aligned buffer into fifo */
for (i = 0; i < items; ++i)
- mci_writel(host, DATA, aligned_buf[i]);
+ mci_writel(host, DATA(host->data_offset),
+ aligned_buf[i]);
}
} else
#endif
{
u32 *pdata = buf;
for (; cnt >= 4; cnt -= 4)
- mci_writel(host, DATA, *pdata++);
+ mci_writel(host, DATA(host->data_offset), *pdata++);
buf = pdata;
}
/* put anything remaining in the part_buf */
if (cnt) {
dw_mci_set_part_bytes(host, buf, cnt);
if (!sg_next(host->sg))
- mci_writel(host, DATA, host->part_buf32);
+ mci_writel(host, DATA(host->data_offset),
+ host->part_buf32);
}
}
@@ -1145,7 +1170,8 @@ static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
int items = len >> 2;
int i;
for (i = 0; i < items; ++i)
- aligned_buf[i] = mci_readl(host, DATA);
+ aligned_buf[i] = mci_readl(host,
+ DATA(host->data_offset));
/* memcpy from aligned buffer into output buffer */
memcpy(buf, aligned_buf, len);
buf += len;
@@ -1156,11 +1182,11 @@ static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
{
u32 *pdata = buf;
for (; cnt >= 4; cnt -= 4)
- *pdata++ = mci_readl(host, DATA);
+ *pdata++ = mci_readl(host, DATA(host->data_offset));
buf = pdata;
}
if (cnt) {
- host->part_buf32 = mci_readl(host, DATA);
+ host->part_buf32 = mci_readl(host, DATA(host->data_offset));
dw_mci_pull_final_bytes(host, buf, cnt);
}
}
@@ -1173,7 +1199,8 @@ static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
buf += len;
cnt -= len;
if (!sg_next(host->sg) || host->part_buf_count == 8) {
- mci_writew(host, DATA, host->part_buf);
+ mci_writew(host, DATA(host->data_offset),
+ host->part_buf);
host->part_buf_count = 0;
}
}
@@ -1190,21 +1217,23 @@ static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
cnt -= len;
/* push data from aligned buffer into fifo */
for (i = 0; i < items; ++i)
- mci_writeq(host, DATA, aligned_buf[i]);
+ mci_writeq(host, DATA(host->data_offset),
+ aligned_buf[i]);
}
} else
#endif
{
u64 *pdata = buf;
for (; cnt >= 8; cnt -= 8)
- mci_writeq(host, DATA, *pdata++);
+ mci_writeq(host, DATA(host->data_offset), *pdata++);
buf = pdata;
}
/* put anything remaining in the part_buf */
if (cnt) {
dw_mci_set_part_bytes(host, buf, cnt);
if (!sg_next(host->sg))
- mci_writeq(host, DATA, host->part_buf);
+ mci_writeq(host, DATA(host->data_offset),
+ host->part_buf);
}
}
@@ -1219,7 +1248,8 @@ static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
int items = len >> 3;
int i;
for (i = 0; i < items; ++i)
- aligned_buf[i] = mci_readq(host, DATA);
+ aligned_buf[i] = mci_readq(host,
+ DATA(host->data_offset));
/* memcpy from aligned buffer into output buffer */
memcpy(buf, aligned_buf, len);
buf += len;
@@ -1230,11 +1260,11 @@ static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
{
u64 *pdata = buf;
for (; cnt >= 8; cnt -= 8)
- *pdata++ = mci_readq(host, DATA);
+ *pdata++ = mci_readq(host, DATA(host->data_offset));
buf = pdata;
}
if (cnt) {
- host->part_buf = mci_readq(host, DATA);
+ host->part_buf = mci_readq(host, DATA(host->data_offset));
dw_mci_pull_final_bytes(host, buf, cnt);
}
}
@@ -1406,6 +1436,7 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
struct dw_mci *host = dev_id;
u32 status, pending;
unsigned int pass_count = 0;
+ int i;
do {
status = mci_readl(host, RINTSTS);
@@ -1477,6 +1508,15 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
queue_work(dw_mci_card_workqueue, &host->card_work);
}
+ /* Handle SDIO Interrupts */
+ for (i = 0; i < host->num_slots; i++) {
+ struct dw_mci_slot *slot = host->slot[i];
+ if (pending & SDMMC_INT_SDIO(i)) {
+ mci_writel(host, RINTSTS, SDMMC_INT_SDIO(i));
+ mmc_signal_sdio_irq(slot->mmc);
+ }
+ }
+
} while (pass_count++ < 5);
#ifdef CONFIG_MMC_DW_IDMAC
@@ -1673,7 +1713,7 @@ static int __init dw_mci_init_slot(struct dw_mci *host, unsigned int id)
host->vmmc = regulator_get(mmc_dev(mmc), "vmmc");
if (IS_ERR(host->vmmc)) {
- printk(KERN_INFO "%s: no vmmc regulator found\n", mmc_hostname(mmc));
+ pr_info("%s: no vmmc regulator found\n", mmc_hostname(mmc));
host->vmmc = NULL;
} else
regulator_enable(host->vmmc);
@@ -1924,6 +1964,18 @@ static int dw_mci_probe(struct platform_device *pdev)
}
/*
+ * In 2.40a spec, Data offset is changed.
+ * Need to check the version-id and set data-offset for DATA register.
+ */
+ host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
+ dev_info(&pdev->dev, "Version ID is %04x\n", host->verid);
+
+ if (host->verid < DW_MMC_240A)
+ host->data_offset = DATA_OFFSET;
+ else
+ host->data_offset = DATA_240A_OFFSET;
+
+ /*
* Enable interrupts for command done, data over, data empty, card det,
* receive ready and error such as transmit, receive timeout, crc error
*/
diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
index 027d3773539..72c071f6e00 100644
--- a/drivers/mmc/host/dw_mmc.h
+++ b/drivers/mmc/host/dw_mmc.h
@@ -14,6 +14,8 @@
#ifndef _DW_MMC_H_
#define _DW_MMC_H_
+#define DW_MMC_240A 0x240a
+
#define SDMMC_CTRL 0x000
#define SDMMC_PWREN 0x004
#define SDMMC_CLKDIV 0x008
@@ -51,7 +53,14 @@
#define SDMMC_IDINTEN 0x090
#define SDMMC_DSCADDR 0x094
#define SDMMC_BUFADDR 0x098
-#define SDMMC_DATA 0x100
+#define SDMMC_DATA(x) (x)
+
+/*
+ * Data offset is difference according to Version
+ * Lower than 2.40a : data register offest is 0x100
+ */
+#define DATA_OFFSET 0x100
+#define DATA_240A_OFFSET 0x200
/* shift bit field */
#define _SBF(f, v) ((v) << (f))
@@ -82,7 +91,7 @@
#define SDMMC_CTYPE_4BIT BIT(0)
#define SDMMC_CTYPE_1BIT 0
/* Interrupt status & mask register defines */
-#define SDMMC_INT_SDIO BIT(16)
+#define SDMMC_INT_SDIO(n) BIT(16 + (n))
#define SDMMC_INT_EBE BIT(15)
#define SDMMC_INT_ACD BIT(14)
#define SDMMC_INT_SBE BIT(13)
@@ -130,6 +139,8 @@
#define SDMMC_IDMAC_ENABLE BIT(7)
#define SDMMC_IDMAC_FB BIT(1)
#define SDMMC_IDMAC_SWRESET BIT(0)
+/* Version ID register define */
+#define SDMMC_GET_VERID(x) ((x) & 0xFFFF)
/* Register access macros */
#define mci_readl(dev, reg) \
diff --git a/drivers/mmc/host/imxmmc.c b/drivers/mmc/host/imxmmc.c
index 881f7ba545a..ea0f3cedef2 100644
--- a/drivers/mmc/host/imxmmc.c
+++ b/drivers/mmc/host/imxmmc.c
@@ -942,7 +942,7 @@ static int __init imxmci_probe(struct platform_device *pdev)
int ret = 0, irq;
u16 rev_no;
- printk(KERN_INFO "i.MX mmc driver\n");
+ pr_info("i.MX mmc driver\n");
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(pdev, 0);
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index 7c1e16aaf17..92946b84e9f 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -27,6 +27,7 @@
#include <linux/sched.h>
#include <linux/delay.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include <linux/bio.h>
#include <linux/dma-mapping.h>
#include <linux/crc7.h>
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 56e9a416826..50b5f9926f6 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -29,6 +29,7 @@
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/amba/mmci.h>
+#include <linux/pm_runtime.h>
#include <asm/div64.h>
#include <asm/io.h>
@@ -170,6 +171,7 @@ mmci_request_end(struct mmci_host *host, struct mmc_request *mrq)
* back into the driver...
*/
spin_unlock(&host->lock);
+ pm_runtime_put(mmc_dev(host->mmc));
mmc_request_done(host->mmc, mrq);
spin_lock(&host->lock);
}
@@ -464,7 +466,7 @@ static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
struct mmci_host_next *next = &host->next_data;
if (data->host_cookie && data->host_cookie != next->cookie) {
- printk(KERN_WARNING "[%s] invalid cookie: data->host_cookie %d"
+ pr_warning("[%s] invalid cookie: data->host_cookie %d"
" host->next_data.cookie %d\n",
__func__, data->host_cookie, host->next_data.cookie);
data->host_cookie = 0;
@@ -529,7 +531,7 @@ static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq,
if (chan) {
if (err)
dmaengine_terminate_all(chan);
- if (err || data->host_cookie)
+ if (data->host_cookie)
dma_unmap_sg(mmc_dev(host->mmc), data->sg,
data->sg_len, dir);
mrq->data->host_cookie = 0;
@@ -984,6 +986,8 @@ static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
return;
}
+ pm_runtime_get_sync(mmc_dev(mmc));
+
spin_lock_irqsave(&host->lock, flags);
host->mrq = mrq;
@@ -1156,10 +1160,14 @@ static int __devinit mmci_probe(struct amba_device *dev,
goto host_free;
}
- ret = clk_enable(host->clk);
+ ret = clk_prepare(host->clk);
if (ret)
goto clk_free;
+ ret = clk_enable(host->clk);
+ if (ret)
+ goto clk_unprep;
+
host->plat = plat;
host->variant = variant;
host->mclk = clk_get_rate(host->clk);
@@ -1327,6 +1335,8 @@ static int __devinit mmci_probe(struct amba_device *dev,
mmci_dma_setup(host);
+ pm_runtime_put(&dev->dev);
+
mmc_add_host(mmc);
return 0;
@@ -1345,6 +1355,8 @@ static int __devinit mmci_probe(struct amba_device *dev,
iounmap(host->base);
clk_disable:
clk_disable(host->clk);
+ clk_unprep:
+ clk_unprepare(host->clk);
clk_free:
clk_put(host->clk);
host_free:
@@ -1364,6 +1376,12 @@ static int __devexit mmci_remove(struct amba_device *dev)
if (mmc) {
struct mmci_host *host = mmc_priv(mmc);
+ /*
+ * Undo pm_runtime_put() in probe. We use the _sync
+ * version here so that we can access the primecell.
+ */
+ pm_runtime_get_sync(&dev->dev);
+
mmc_remove_host(mmc);
writel(0, host->base + MMCIMASK0);
@@ -1386,6 +1404,7 @@ static int __devexit mmci_remove(struct amba_device *dev)
iounmap(host->base);
clk_disable(host->clk);
+ clk_unprepare(host->clk);
clk_put(host->clk);
if (host->vcc)
diff --git a/drivers/mmc/host/msm_sdcc.c b/drivers/mmc/host/msm_sdcc.c
index a4c865a5286..80d8eb143b4 100644
--- a/drivers/mmc/host/msm_sdcc.c
+++ b/drivers/mmc/host/msm_sdcc.c
@@ -213,7 +213,8 @@ msmsdcc_dma_exec_func(struct msm_dmov_cmd *cmd)
msmsdcc_writel(host, host->cmd_timeout, MMCIDATATIMER);
msmsdcc_writel(host, (unsigned int)host->curr.xfer_size,
MMCIDATALENGTH);
- msmsdcc_writel(host, host->cmd_pio_irqmask, MMCIMASK1);
+ msmsdcc_writel(host, (msmsdcc_readl(host, MMCIMASK0) &
+ (~MCI_IRQ_PIO)) | host->cmd_pio_irqmask, MMCIMASK0);
msmsdcc_writel(host, host->cmd_datactrl, MMCIDATACTRL);
if (host->cmd_cmd) {
@@ -388,7 +389,7 @@ static int msmsdcc_config_dma(struct msmsdcc_host *host, struct mmc_data *data)
n = dma_map_sg(mmc_dev(host->mmc), host->dma.sg,
host->dma.num_ents, host->dma.dir);
if (n == 0) {
- printk(KERN_ERR "%s: Unable to map in all sg elements\n",
+ pr_err("%s: Unable to map in all sg elements\n",
mmc_hostname(host->mmc));
host->dma.sg = NULL;
host->dma.num_ents = 0;
@@ -474,7 +475,7 @@ msmsdcc_start_command_deferred(struct msmsdcc_host *host,
*c |= MCI_CSPM_MCIABORT;
if (host->curr.cmd != NULL) {
- printk(KERN_ERR "%s: Overlapping command requests\n",
+ pr_err("%s: Overlapping command requests\n",
mmc_hostname(host->mmc));
}
host->curr.cmd = cmd;
@@ -543,7 +544,9 @@ msmsdcc_start_data(struct msmsdcc_host *host, struct mmc_data *data,
msmsdcc_writel(host, host->curr.xfer_size, MMCIDATALENGTH);
- msmsdcc_writel(host, pio_irqmask, MMCIMASK1);
+ msmsdcc_writel(host, (msmsdcc_readl(host, MMCIMASK0) &
+ (~MCI_IRQ_PIO)) | pio_irqmask, MMCIMASK0);
+
msmsdcc_writel(host, datactrl, MMCIDATACTRL);
if (cmd) {
@@ -659,8 +662,13 @@ msmsdcc_pio_irq(int irq, void *dev_id)
{
struct msmsdcc_host *host = dev_id;
uint32_t status;
+ u32 mci_mask0;
status = msmsdcc_readl(host, MMCISTATUS);
+ mci_mask0 = msmsdcc_readl(host, MMCIMASK0);
+
+ if (((mci_mask0 & status) & MCI_IRQ_PIO) == 0)
+ return IRQ_NONE;
do {
unsigned long flags;
@@ -719,10 +727,12 @@ msmsdcc_pio_irq(int irq, void *dev_id)
} while (1);
if (status & MCI_RXACTIVE && host->curr.xfer_remain < MCI_FIFOSIZE)
- msmsdcc_writel(host, MCI_RXDATAAVLBLMASK, MMCIMASK1);
+ msmsdcc_writel(host, (mci_mask0 & (~MCI_IRQ_PIO)) |
+ MCI_RXDATAAVLBLMASK, MMCIMASK0);
if (!host->curr.xfer_remain)
- msmsdcc_writel(host, 0, MMCIMASK1);
+ msmsdcc_writel(host, (mci_mask0 & (~MCI_IRQ_PIO)) | 0,
+ MMCIMASK0);
return IRQ_HANDLED;
}
@@ -854,6 +864,8 @@ msmsdcc_irq(int irq, void *dev_id)
do {
status = msmsdcc_readl(host, MMCISTATUS);
status &= msmsdcc_readl(host, MMCIMASK0);
+ if ((status & (~MCI_IRQ_PIO)) == 0)
+ break;
msmsdcc_writel(host, status, MMCICLEAR);
if (status & MCI_SDIOINTR)
@@ -939,7 +951,7 @@ static void msmsdcc_setup_gpio(struct msmsdcc_host *host, bool enable)
struct msm_mmc_gpio_data *curr;
int i, rc = 0;
- if (!host->plat->gpio_data && host->gpio_config_status == enable)
+ if (!host->plat->gpio_data || host->gpio_config_status == enable)
return;
curr = host->plat->gpio_data;
@@ -1052,10 +1064,19 @@ static void msmsdcc_enable_sdio_irq(struct mmc_host *mmc, int enable)
spin_unlock_irqrestore(&host->lock, flags);
}
+static void msmsdcc_init_card(struct mmc_host *mmc, struct mmc_card *card)
+{
+ struct msmsdcc_host *host = mmc_priv(mmc);
+
+ if (host->plat->init_card)
+ host->plat->init_card(card);
+}
+
static const struct mmc_host_ops msmsdcc_ops = {
.request = msmsdcc_request,
.set_ios = msmsdcc_set_ios,
.enable_sdio_irq = msmsdcc_enable_sdio_irq,
+ .init_card = msmsdcc_init_card,
};
static void
@@ -1092,7 +1113,7 @@ msmsdcc_platform_status_irq(int irq, void *dev_id)
{
struct msmsdcc_host *host = dev_id;
- printk(KERN_DEBUG "%s: %d\n", __func__, irq);
+ pr_debug("%s: %d\n", __func__, irq);
msmsdcc_check_status((unsigned long) host);
return IRQ_HANDLED;
}
@@ -1102,7 +1123,7 @@ msmsdcc_status_notify_cb(int card_present, void *dev_id)
{
struct msmsdcc_host *host = dev_id;
- printk(KERN_DEBUG "%s: card_present %d\n", mmc_hostname(host->mmc),
+ pr_debug("%s: card_present %d\n", mmc_hostname(host->mmc),
card_present);
msmsdcc_check_status((unsigned long) host);
}
@@ -1150,7 +1171,6 @@ msmsdcc_probe(struct platform_device *pdev)
struct msmsdcc_host *host;
struct mmc_host *mmc;
struct resource *cmd_irqres = NULL;
- struct resource *pio_irqres = NULL;
struct resource *stat_irqres = NULL;
struct resource *memres = NULL;
struct resource *dmares = NULL;
@@ -1175,12 +1195,10 @@ msmsdcc_probe(struct platform_device *pdev)
dmares = platform_get_resource(pdev, IORESOURCE_DMA, 0);
cmd_irqres = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
"cmd_irq");
- pio_irqres = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
- "pio_irq");
stat_irqres = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
"status_irq");
- if (!cmd_irqres || !pio_irqres || !memres) {
+ if (!cmd_irqres || !memres) {
pr_err("%s: Invalid resource\n", __func__);
return -ENXIO;
}
@@ -1200,17 +1218,20 @@ msmsdcc_probe(struct platform_device *pdev)
host->plat = plat;
host->mmc = mmc;
host->curr.cmd = NULL;
+ init_timer(&host->busclk_timer);
+ host->busclk_timer.data = (unsigned long) host;
+ host->busclk_timer.function = msmsdcc_busclk_expired;
+
host->cmdpoll = 1;
host->base = ioremap(memres->start, PAGE_SIZE);
if (!host->base) {
ret = -ENOMEM;
- goto out;
+ goto host_free;
}
host->cmd_irqres = cmd_irqres;
- host->pio_irqres = pio_irqres;
host->memres = memres;
host->dmares = dmares;
spin_lock_init(&host->lock);
@@ -1221,13 +1242,19 @@ msmsdcc_probe(struct platform_device *pdev)
/*
* Setup DMA
*/
- msmsdcc_init_dma(host);
+ if (host->dmares) {
+ ret = msmsdcc_init_dma(host);
+ if (ret)
+ goto ioremap_free;
+ } else {
+ host->dma.channel = -1;
+ }
/* Get our clocks */
host->pclk = clk_get(&pdev->dev, "sdc_pclk");
if (IS_ERR(host->pclk)) {
ret = PTR_ERR(host->pclk);
- goto host_free;
+ goto dma_free;
}
host->clk = clk_get(&pdev->dev, "sdc_clk");
@@ -1236,17 +1263,17 @@ msmsdcc_probe(struct platform_device *pdev)
goto pclk_put;
}
- /* Enable clocks */
- ret = msmsdcc_enable_clocks(host);
- if (ret)
- goto clk_put;
-
ret = clk_set_rate(host->clk, msmsdcc_fmin);
if (ret) {
pr_err("%s: Clock rate set failed (%d)\n", __func__, ret);
- goto clk_disable;
+ goto clk_put;
}
+ /* Enable clocks */
+ ret = msmsdcc_enable_clocks(host);
+ if (ret)
+ goto clk_put;
+
host->pclk_rate = clk_get_rate(host->pclk);
host->clk_rate = clk_get_rate(host->clk);
@@ -1316,16 +1343,12 @@ msmsdcc_probe(struct platform_device *pdev)
host->eject = !host->oldstat;
}
- init_timer(&host->busclk_timer);
- host->busclk_timer.data = (unsigned long) host;
- host->busclk_timer.function = msmsdcc_busclk_expired;
-
ret = request_irq(cmd_irqres->start, msmsdcc_irq, IRQF_SHARED,
DRIVER_NAME " (cmd)", host);
if (ret)
goto stat_irq_free;
- ret = request_irq(pio_irqres->start, msmsdcc_pio_irq, IRQF_SHARED,
+ ret = request_irq(cmd_irqres->start, msmsdcc_pio_irq, IRQF_SHARED,
DRIVER_NAME " (pio)", host);
if (ret)
goto cmd_irq_free;
@@ -1368,6 +1391,13 @@ msmsdcc_probe(struct platform_device *pdev)
clk_put(host->clk);
pclk_put:
clk_put(host->pclk);
+dma_free:
+ if (host->dmares)
+ dma_free_coherent(NULL, sizeof(struct msmsdcc_nc_dmadata),
+ host->dma.nc, host->dma.nc_busaddr);
+ioremap_free:
+ tasklet_kill(&host->dma_tlet);
+ iounmap(host->base);
host_free:
mmc_free_host(mmc);
out:
diff --git a/drivers/mmc/host/msm_sdcc.h b/drivers/mmc/host/msm_sdcc.h
index 42d7bbc977c..402028d16b8 100644
--- a/drivers/mmc/host/msm_sdcc.h
+++ b/drivers/mmc/host/msm_sdcc.h
@@ -140,6 +140,11 @@
MCI_DATATIMEOUTMASK|MCI_TXUNDERRUNMASK|MCI_RXOVERRUNMASK| \
MCI_CMDRESPENDMASK|MCI_CMDSENTMASK|MCI_DATAENDMASK|MCI_PROGDONEMASK)
+#define MCI_IRQ_PIO \
+ (MCI_RXDATAAVLBLMASK | MCI_TXDATAAVLBLMASK | MCI_RXFIFOEMPTYMASK | \
+ MCI_TXFIFOEMPTYMASK | MCI_RXFIFOFULLMASK | MCI_TXFIFOFULLMASK | \
+ MCI_RXFIFOHALFFULLMASK | MCI_TXFIFOHALFEMPTYMASK | \
+ MCI_RXACTIVEMASK | MCI_TXACTIVEMASK)
/*
* The size of the FIFO in bytes.
*/
@@ -202,7 +207,6 @@ struct msmsdcc_stats {
struct msmsdcc_host {
struct resource *cmd_irqres;
- struct resource *pio_irqres;
struct resource *memres;
struct resource *dmares;
void __iomem *base;
diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c
index a5bf60e01af..211a4959c29 100644
--- a/drivers/mmc/host/mvsdio.c
+++ b/drivers/mmc/host/mvsdio.c
@@ -117,7 +117,7 @@ static int mvsd_setup_data(struct mvsd_host *host, struct mmc_data *data)
host->pio_size = data->blocks * data->blksz;
host->pio_ptr = sg_virt(data->sg);
if (!nodma)
- printk(KERN_DEBUG "%s: fallback to PIO for data "
+ pr_debug("%s: fallback to PIO for data "
"at 0x%p size %d\n",
mmc_hostname(host->mmc),
host->pio_ptr, host->pio_size);
@@ -471,7 +471,7 @@ static irqreturn_t mvsd_irq(int irq, void *dev)
if (mrq->data)
err_status = mvsd_finish_data(host, mrq->data, err_status);
if (err_status) {
- printk(KERN_ERR "%s: unhandled error status %#04x\n",
+ pr_err("%s: unhandled error status %#04x\n",
mmc_hostname(host->mmc), err_status);
cmd->error = -ENOMSG;
}
@@ -489,7 +489,7 @@ static irqreturn_t mvsd_irq(int irq, void *dev)
if (irq_handled)
return IRQ_HANDLED;
- printk(KERN_ERR "%s: unhandled interrupt status=0x%04x en=0x%04x "
+ pr_err("%s: unhandled interrupt status=0x%04x en=0x%04x "
"pio=%d\n", mmc_hostname(host->mmc), intr_status,
host->intr_en, host->pio_size);
return IRQ_NONE;
@@ -505,9 +505,9 @@ static void mvsd_timeout_timer(unsigned long data)
spin_lock_irqsave(&host->lock, flags);
mrq = host->mrq;
if (mrq) {
- printk(KERN_ERR "%s: Timeout waiting for hardware interrupt.\n",
+ pr_err("%s: Timeout waiting for hardware interrupt.\n",
mmc_hostname(host->mmc));
- printk(KERN_ERR "%s: hw_state=0x%04x, intr_status=0x%04x "
+ pr_err("%s: hw_state=0x%04x, intr_status=0x%04x "
"intr_en=0x%04x\n", mmc_hostname(host->mmc),
mvsd_read(MVSD_HW_STATE),
mvsd_read(MVSD_NOR_INTR_STATUS),
@@ -762,7 +762,7 @@ static int __init mvsd_probe(struct platform_device *pdev)
ret = request_irq(irq, mvsd_irq, 0, DRIVER_NAME, host);
if (ret) {
- printk(KERN_ERR "%s: cannot assign irq %d\n", DRIVER_NAME, irq);
+ pr_err("%s: cannot assign irq %d\n", DRIVER_NAME, irq);
goto out;
} else
host->irq = irq;
@@ -802,7 +802,7 @@ static int __init mvsd_probe(struct platform_device *pdev)
if (ret)
goto out;
- printk(KERN_NOTICE "%s: %s driver initialized, ",
+ pr_notice("%s: %s driver initialized, ",
mmc_hostname(mmc), DRIVER_NAME);
if (host->gpio_card_detect)
printk("using GPIO %d for card detection\n",
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index 14aa213b00d..325ea61e12d 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -40,6 +40,7 @@
#include <mach/mmc.h>
#include <mach/dma.h>
+#include <mach/hardware.h>
#define DRIVER_NAME "mxc-mmc"
@@ -842,7 +843,7 @@ static int mxcmci_probe(struct platform_device *pdev)
int ret = 0, irq;
dma_cap_mask_t mask;
- printk(KERN_INFO "i.MX SDHC driver\n");
+ pr_info("i.MX SDHC driver\n");
iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(pdev, 0);
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
index d513d47364d..99b449d26a4 100644
--- a/drivers/mmc/host/mxs-mmc.c
+++ b/drivers/mmc/host/mxs-mmc.c
@@ -37,6 +37,7 @@
#include <linux/mmc/sdio.h>
#include <linux/gpio.h>
#include <linux/regulator/consumer.h>
+#include <linux/module.h>
#include <mach/mxs.h>
#include <mach/common.h>
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index a6c32904014..2dba999caf2 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -33,7 +33,7 @@
#include <plat/board.h>
#include <plat/mmc.h>
-#include <mach/gpio.h>
+#include <asm/gpio.h>
#include <plat/dma.h>
#include <plat/mux.h>
#include <plat/fpga.h>
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 21e4a799df4..101cd31c822 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -450,15 +450,14 @@ static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
* framework is fixed, we need a workaround like this
* (which is safe for MMC, but not in general).
*/
- if (regulator_is_enabled(host->vcc) > 0) {
- regulator_enable(host->vcc);
- regulator_disable(host->vcc);
- }
- if (host->vcc_aux) {
- if (regulator_is_enabled(reg) > 0) {
- regulator_enable(reg);
- regulator_disable(reg);
- }
+ if (regulator_is_enabled(host->vcc) > 0 ||
+ (host->vcc_aux && regulator_is_enabled(host->vcc_aux))) {
+ int vdd = ffs(mmc_slot(host).ocr_mask) - 1;
+
+ mmc_slot(host).set_power(host->dev, host->slot_id,
+ 1, vdd);
+ mmc_slot(host).set_power(host->dev, host->slot_id,
+ 0, 0);
}
}
@@ -1264,14 +1263,14 @@ static void omap_hsmmc_protect_card(struct omap_hsmmc_host *host)
host->reqs_blocked = 0;
if (mmc_slot(host).get_cover_state(host->dev, host->slot_id)) {
if (host->protect_card) {
- printk(KERN_INFO "%s: cover is closed, "
+ pr_info("%s: cover is closed, "
"card is now accessible\n",
mmc_hostname(host->mmc));
host->protect_card = 0;
}
} else {
if (!host->protect_card) {
- printk(KERN_INFO "%s: cover is open, "
+ pr_info("%s: cover is open, "
"card is now inaccessible\n",
mmc_hostname(host->mmc));
host->protect_card = 1;
@@ -1422,7 +1421,7 @@ static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host,
if (!next && data->host_cookie &&
data->host_cookie != host->next_data.cookie) {
- printk(KERN_WARNING "[%s] invalid cookie: data->host_cookie %d"
+ pr_warning("[%s] invalid cookie: data->host_cookie %d"
" host->next_data.cookie %d\n",
__func__, data->host_cookie, host->next_data.cookie);
data->host_cookie = 0;
@@ -1943,6 +1942,10 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev)
omap_hsmmc_context_save(host);
mmc->caps |= MMC_CAP_DISABLE;
+ if (host->pdata->controller_flags & OMAP_HSMMC_BROKEN_MULTIBLOCK_READ) {
+ dev_info(&pdev->dev, "multiblock reads disabled due to 35xx erratum 2.1.1.128; MMC read performance may suffer\n");
+ mmc->caps2 |= MMC_CAP2_NO_MULTI_READ;
+ }
pm_runtime_enable(host->dev);
pm_runtime_get_sync(host->dev);
@@ -2015,7 +2018,7 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev)
}
/* Request IRQ for MMC operations */
- ret = request_irq(host->irq, omap_hsmmc_irq, IRQF_DISABLED,
+ ret = request_irq(host->irq, omap_hsmmc_irq, 0,
mmc_hostname(mmc), host);
if (ret) {
dev_dbg(mmc_dev(host->mmc), "Unable to grab HSMMC IRQ\n");
@@ -2043,8 +2046,7 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev)
if ((mmc_slot(host).card_detect_irq)) {
ret = request_irq(mmc_slot(host).card_detect_irq,
omap_hsmmc_cd_handler,
- IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING
- | IRQF_DISABLED,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
mmc_hostname(mmc), host);
if (ret) {
dev_dbg(mmc_dev(host->mmc),
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index 7257738fd7d..fc4356e00d4 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -558,7 +558,7 @@ static void pxamci_dma_irq(int dma, void *devid)
if (dcsr & DCSR_ENDINTR) {
writel(BUF_PART_FULL, host->base + MMC_PRTBUF);
} else {
- printk(KERN_ERR "%s: DMA error on channel %d (DCSR=%#x)\n",
+ pr_err("%s: DMA error on channel %d (DCSR=%#x)\n",
mmc_hostname(host->mmc), dma, dcsr);
host->data->error = -EIO;
pxamci_data_done(host, 0);
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index a04f87d7ee3..720f99334a7 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -247,7 +247,7 @@ static void s3cmci_check_sdio_irq(struct s3cmci_host *host)
{
if (host->sdio_irqen) {
if (gpio_get_value(S3C2410_GPE(8)) == 0) {
- printk(KERN_DEBUG "%s: signalling irq\n", __func__);
+ pr_debug("%s: signalling irq\n", __func__);
mmc_signal_sdio_irq(host->mmc);
}
}
@@ -344,7 +344,7 @@ static void s3cmci_disable_irq(struct s3cmci_host *host, bool transfer)
local_irq_save(flags);
- //printk(KERN_DEBUG "%s: transfer %d\n", __func__, transfer);
+ /* pr_debug("%s: transfer %d\n", __func__, transfer); */
host->irq_disabled = transfer;
@@ -913,9 +913,9 @@ request_done:
}
static void s3cmci_dma_setup(struct s3cmci_host *host,
- enum s3c2410_dmasrc source)
+ enum dma_data_direction source)
{
- static enum s3c2410_dmasrc last_source = -1;
+ static enum dma_data_direction last_source = -1;
static int setup_ok;
if (last_source == source)
@@ -1087,7 +1087,7 @@ static int s3cmci_prepare_dma(struct s3cmci_host *host, struct mmc_data *data)
BUG_ON((data->flags & BOTH_DIR) == BOTH_DIR);
- s3cmci_dma_setup(host, rw ? S3C2410_DMASRC_MEM : S3C2410_DMASRC_HW);
+ s3cmci_dma_setup(host, rw ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
s3c2410_dma_ctrl(host->dma, S3C2410_DMAOP_FLUSH);
dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index 4dc0028086a..4b976f00ea8 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -32,6 +32,16 @@
/* VENDOR SPEC register */
#define SDHCI_VENDOR_SPEC 0xC0
#define SDHCI_VENDOR_SPEC_SDIO_QUIRK 0x00000002
+#define SDHCI_WTMK_LVL 0x44
+#define SDHCI_MIX_CTRL 0x48
+
+/*
+ * There is an INT DMA ERR mis-match between eSDHC and STD SDHC SPEC:
+ * Bit25 is used in STD SPEC, and is reserved in fsl eSDHC design,
+ * but bit28 is used as the INT DMA ERR in fsl eSDHC design.
+ * Define this macro DMA error INT for fsl eSDHC
+ */
+#define SDHCI_INT_VENDOR_SPEC_DMA_ERR 0x10000000
/*
* The CMDTYPE of the CMD register (offset 0xE) should be set to
@@ -51,6 +61,7 @@ enum imx_esdhc_type {
IMX35_ESDHC,
IMX51_ESDHC,
IMX53_ESDHC,
+ IMX6Q_USDHC,
};
struct pltfm_imx_data {
@@ -74,6 +85,9 @@ static struct platform_device_id imx_esdhc_devtype[] = {
.name = "sdhci-esdhc-imx53",
.driver_data = IMX53_ESDHC,
}, {
+ .name = "sdhci-usdhc-imx6q",
+ .driver_data = IMX6Q_USDHC,
+ }, {
/* sentinel */
}
};
@@ -84,6 +98,7 @@ static const struct of_device_id imx_esdhc_dt_ids[] = {
{ .compatible = "fsl,imx35-esdhc", .data = &imx_esdhc_devtype[IMX35_ESDHC], },
{ .compatible = "fsl,imx51-esdhc", .data = &imx_esdhc_devtype[IMX51_ESDHC], },
{ .compatible = "fsl,imx53-esdhc", .data = &imx_esdhc_devtype[IMX53_ESDHC], },
+ { .compatible = "fsl,imx6q-usdhc", .data = &imx_esdhc_devtype[IMX6Q_USDHC], },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, imx_esdhc_dt_ids);
@@ -108,6 +123,11 @@ static inline int is_imx53_esdhc(struct pltfm_imx_data *data)
return data->devtype == IMX53_ESDHC;
}
+static inline int is_imx6q_usdhc(struct pltfm_imx_data *data)
+{
+ return data->devtype == IMX6Q_USDHC;
+}
+
static inline void esdhc_clrset_le(struct sdhci_host *host, u32 mask, u32 val, int reg)
{
void __iomem *base = host->ioaddr + (reg & ~0x3);
@@ -135,6 +155,27 @@ static u32 esdhc_readl_le(struct sdhci_host *host, int reg)
val |= SDHCI_CARD_PRESENT;
}
+ if (unlikely(reg == SDHCI_CAPABILITIES)) {
+ /* In FSL esdhc IC module, only bit20 is used to indicate the
+ * ADMA2 capability of esdhc, but this bit is messed up on
+ * some SOCs (e.g. on MX25, MX35 this bit is set, but they
+ * don't actually support ADMA2). So set the BROKEN_ADMA
+ * uirk on MX25/35 platforms.
+ */
+
+ if (val & SDHCI_CAN_DO_ADMA1) {
+ val &= ~SDHCI_CAN_DO_ADMA1;
+ val |= SDHCI_CAN_DO_ADMA2;
+ }
+ }
+
+ if (unlikely(reg == SDHCI_INT_STATUS)) {
+ if (val & SDHCI_INT_VENDOR_SPEC_DMA_ERR) {
+ val &= ~SDHCI_INT_VENDOR_SPEC_DMA_ERR;
+ val |= SDHCI_INT_ADMA_ERROR;
+ }
+ }
+
return val;
}
@@ -179,13 +220,28 @@ static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg)
writel(v, host->ioaddr + SDHCI_VENDOR_SPEC);
}
+ if (unlikely(reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE)) {
+ if (val & SDHCI_INT_ADMA_ERROR) {
+ val &= ~SDHCI_INT_ADMA_ERROR;
+ val |= SDHCI_INT_VENDOR_SPEC_DMA_ERR;
+ }
+ }
+
writel(val, host->ioaddr + reg);
}
static u16 esdhc_readw_le(struct sdhci_host *host, int reg)
{
- if (unlikely(reg == SDHCI_HOST_VERSION))
- reg ^= 2;
+ if (unlikely(reg == SDHCI_HOST_VERSION)) {
+ u16 val = readw(host->ioaddr + (reg ^ 2));
+ /*
+ * uSDHC supports SDHCI v3.0, but it's encoded as value
+ * 0x3 in host controller version register, which violates
+ * SDHCI_SPEC_300 definition. Work it around here.
+ */
+ if ((val & SDHCI_SPEC_VER_MASK) == 3)
+ return --val;
+ }
return readw(host->ioaddr + reg);
}
@@ -216,8 +272,17 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg)
if ((host->cmd->opcode == MMC_STOP_TRANSMISSION)
&& (imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT))
val |= SDHCI_CMD_ABORTCMD;
- writel(val << 16 | imx_data->scratchpad,
- host->ioaddr + SDHCI_TRANSFER_MODE);
+
+ if (is_imx6q_usdhc(imx_data)) {
+ u32 m = readl(host->ioaddr + SDHCI_MIX_CTRL);
+ m = imx_data->scratchpad | (m & 0xffff0000);
+ writel(m, host->ioaddr + SDHCI_MIX_CTRL);
+ writel(val << 16,
+ host->ioaddr + SDHCI_TRANSFER_MODE);
+ } else {
+ writel(val << 16 | imx_data->scratchpad,
+ host->ioaddr + SDHCI_TRANSFER_MODE);
+ }
return;
case SDHCI_BLOCK_SIZE:
val &= ~SDHCI_MAKE_BLKSZ(0x7, 0);
@@ -311,9 +376,10 @@ static struct sdhci_ops sdhci_esdhc_ops = {
};
static struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = {
- .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_ADMA
+ .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_NO_HISPD_BIT
+ | SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC
+ | SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC
| SDHCI_QUIRK_BROKEN_CARD_DETECTION,
- /* ADMA has issues. Might be fixable */
.ops = &sdhci_esdhc_ops,
};
@@ -405,11 +471,19 @@ static int __devinit sdhci_esdhc_imx_probe(struct platform_device *pdev)
if (is_imx25_esdhc(imx_data) || is_imx35_esdhc(imx_data))
/* Fix errata ENGcm07207 present on i.MX25 and i.MX35 */
- host->quirks |= SDHCI_QUIRK_NO_MULTIBLOCK;
+ host->quirks |= SDHCI_QUIRK_NO_MULTIBLOCK
+ | SDHCI_QUIRK_BROKEN_ADMA;
if (is_imx53_esdhc(imx_data))
imx_data->flags |= ESDHC_FLAG_MULTIBLK_NO_INT;
+ /*
+ * The imx6q ROM code will change the default watermark level setting
+ * to something insane. Change it back here.
+ */
+ if (is_imx6q_usdhc(imx_data))
+ writel(0x08100810, host->ioaddr + SDHCI_WTMK_LVL);
+
boarddata = &imx_data->boarddata;
if (sdhci_esdhc_imx_probe_dt(pdev, boarddata) < 0) {
if (!host->mmc->parent->platform_data) {
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index fe604df6501..59e9d003e58 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -1,7 +1,7 @@
/*
* Freescale eSDHC controller driver.
*
- * Copyright (c) 2007 Freescale Semiconductor, Inc.
+ * Copyright (c) 2007, 2010 Freescale Semiconductor, Inc.
* Copyright (c) 2009 MontaVista Software, Inc.
*
* Authors: Xiaobo Xie <X.Xie@freescale.com>
@@ -15,6 +15,7 @@
#include <linux/io.h>
#include <linux/delay.h>
+#include <linux/module.h>
#include <linux/mmc/host.h>
#include "sdhci-pltfm.h"
#include "sdhci-esdhc.h"
@@ -22,11 +23,21 @@
static u16 esdhc_readw(struct sdhci_host *host, int reg)
{
u16 ret;
+ int base = reg & ~0x3;
+ int shift = (reg & 0x2) * 8;
if (unlikely(reg == SDHCI_HOST_VERSION))
- ret = in_be16(host->ioaddr + reg);
+ ret = in_be32(host->ioaddr + base) & 0xffff;
else
- ret = sdhci_be32bs_readw(host, reg);
+ ret = (in_be32(host->ioaddr + base) >> shift) & 0xffff;
+ return ret;
+}
+
+static u8 esdhc_readb(struct sdhci_host *host, int reg)
+{
+ int base = reg & ~0x3;
+ int shift = (reg & 0x3) * 8;
+ u8 ret = (in_be32(host->ioaddr + base) >> shift) & 0xff;
return ret;
}
@@ -74,7 +85,7 @@ static unsigned int esdhc_of_get_min_clock(struct sdhci_host *host)
static struct sdhci_ops sdhci_esdhc_ops = {
.read_l = sdhci_be32bs_readl,
.read_w = esdhc_readw,
- .read_b = sdhci_be32bs_readb,
+ .read_b = esdhc_readb,
.write_l = sdhci_be32bs_writel,
.write_w = esdhc_writew,
.write_b = esdhc_writeb,
diff --git a/drivers/mmc/host/sdhci-of-hlwd.c b/drivers/mmc/host/sdhci-of-hlwd.c
index 735be131dca..9b0d794a4f6 100644
--- a/drivers/mmc/host/sdhci-of-hlwd.c
+++ b/drivers/mmc/host/sdhci-of-hlwd.c
@@ -20,6 +20,7 @@
*/
#include <linux/delay.h>
+#include <linux/module.h>
#include <linux/mmc/host.h>
#include "sdhci-pltfm.h"
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
index 26c528648f3..d833d9c2f7e 100644
--- a/drivers/mmc/host/sdhci-pci.c
+++ b/drivers/mmc/host/sdhci-pci.c
@@ -14,6 +14,7 @@
#include <linux/delay.h>
#include <linux/highmem.h>
+#include <linux/module.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
@@ -21,6 +22,9 @@
#include <linux/mmc/host.h>
#include <linux/scatterlist.h>
#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/sfi.h>
+#include <linux/pm_runtime.h>
#include "sdhci.h"
@@ -43,6 +47,7 @@ struct sdhci_pci_slot;
struct sdhci_pci_fixes {
unsigned int quirks;
+ bool allow_runtime_pm;
int (*probe) (struct sdhci_pci_chip *);
@@ -59,12 +64,16 @@ struct sdhci_pci_slot {
struct sdhci_host *host;
int pci_bar;
+ int rst_n_gpio;
+ int cd_gpio;
+ int cd_irq;
};
struct sdhci_pci_chip {
struct pci_dev *pdev;
unsigned int quirks;
+ bool allow_runtime_pm;
const struct sdhci_pci_fixes *fixes;
int num_slots; /* Slots on controller */
@@ -163,12 +172,129 @@ static int mrst_hc_probe(struct sdhci_pci_chip *chip)
return 0;
}
+/* Medfield eMMC hardware reset GPIOs */
+static int mfd_emmc0_rst_gpio = -EINVAL;
+static int mfd_emmc1_rst_gpio = -EINVAL;
+
+static int mfd_emmc_gpio_parse(struct sfi_table_header *table)
+{
+ struct sfi_table_simple *sb = (struct sfi_table_simple *)table;
+ struct sfi_gpio_table_entry *entry;
+ int i, num;
+
+ num = SFI_GET_NUM_ENTRIES(sb, struct sfi_gpio_table_entry);
+ entry = (struct sfi_gpio_table_entry *)sb->pentry;
+
+ for (i = 0; i < num; i++, entry++) {
+ if (!strncmp(entry->pin_name, "emmc0_rst", SFI_NAME_LEN))
+ mfd_emmc0_rst_gpio = entry->pin_no;
+ else if (!strncmp(entry->pin_name, "emmc1_rst", SFI_NAME_LEN))
+ mfd_emmc1_rst_gpio = entry->pin_no;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_RUNTIME
+
+static irqreturn_t mfd_sd_cd(int irq, void *dev_id)
+{
+ struct sdhci_pci_slot *slot = dev_id;
+ struct sdhci_host *host = slot->host;
+
+ mmc_detect_change(host->mmc, msecs_to_jiffies(200));
+ return IRQ_HANDLED;
+}
+
+#define MFLD_SD_CD_PIN 69
+
+static int mfd_sd_probe_slot(struct sdhci_pci_slot *slot)
+{
+ int err, irq, gpio = MFLD_SD_CD_PIN;
+
+ slot->cd_gpio = -EINVAL;
+ slot->cd_irq = -EINVAL;
+
+ err = gpio_request(gpio, "sd_cd");
+ if (err < 0)
+ goto out;
+
+ err = gpio_direction_input(gpio);
+ if (err < 0)
+ goto out_free;
+
+ irq = gpio_to_irq(gpio);
+ if (irq < 0)
+ goto out_free;
+
+ err = request_irq(irq, mfd_sd_cd, IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING, "sd_cd", slot);
+ if (err)
+ goto out_free;
+
+ slot->cd_gpio = gpio;
+ slot->cd_irq = irq;
+ slot->host->quirks2 |= SDHCI_QUIRK2_OWN_CARD_DETECTION;
+
+ return 0;
+
+out_free:
+ gpio_free(gpio);
+out:
+ dev_warn(&slot->chip->pdev->dev, "failed to setup card detect wake up\n");
+ return 0;
+}
+
+static void mfd_sd_remove_slot(struct sdhci_pci_slot *slot, int dead)
+{
+ if (slot->cd_irq >= 0)
+ free_irq(slot->cd_irq, slot);
+ gpio_free(slot->cd_gpio);
+}
+
+#else
+
+#define mfd_sd_probe_slot NULL
+#define mfd_sd_remove_slot NULL
+
+#endif
+
static int mfd_emmc_probe_slot(struct sdhci_pci_slot *slot)
{
- slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA;
+ const char *name = NULL;
+ int gpio = -EINVAL;
+
+ sfi_table_parse(SFI_SIG_GPIO, NULL, NULL, mfd_emmc_gpio_parse);
+
+ switch (slot->chip->pdev->device) {
+ case PCI_DEVICE_ID_INTEL_MFD_EMMC0:
+ gpio = mfd_emmc0_rst_gpio;
+ name = "eMMC0_reset";
+ break;
+ case PCI_DEVICE_ID_INTEL_MFD_EMMC1:
+ gpio = mfd_emmc1_rst_gpio;
+ name = "eMMC1_reset";
+ break;
+ }
+
+ if (!gpio_request(gpio, name)) {
+ gpio_direction_output(gpio, 1);
+ slot->rst_n_gpio = gpio;
+ slot->host->mmc->caps |= MMC_CAP_HW_RESET;
+ }
+
+ slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE;
+
+ slot->host->mmc->caps2 = MMC_CAP2_BOOTPART_NOACC;
+
return 0;
}
+static void mfd_emmc_remove_slot(struct sdhci_pci_slot *slot, int dead)
+{
+ gpio_free(slot->rst_n_gpio);
+}
+
static const struct sdhci_pci_fixes sdhci_intel_mrst_hc0 = {
.quirks = SDHCI_QUIRK_BROKEN_ADMA | SDHCI_QUIRK_NO_HISPD_BIT,
.probe_slot = mrst_hc_probe_slot,
@@ -181,15 +307,21 @@ static const struct sdhci_pci_fixes sdhci_intel_mrst_hc1_hc2 = {
static const struct sdhci_pci_fixes sdhci_intel_mfd_sd = {
.quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
+ .allow_runtime_pm = true,
+ .probe_slot = mfd_sd_probe_slot,
+ .remove_slot = mfd_sd_remove_slot,
};
static const struct sdhci_pci_fixes sdhci_intel_mfd_sdio = {
.quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
+ .allow_runtime_pm = true,
};
static const struct sdhci_pci_fixes sdhci_intel_mfd_emmc = {
.quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
+ .allow_runtime_pm = true,
.probe_slot = mfd_emmc_probe_slot,
+ .remove_slot = mfd_emmc_remove_slot,
};
/* O2Micro extra registers */
@@ -832,9 +964,25 @@ static int sdhci_pci_8bit_width(struct sdhci_host *host, int width)
return 0;
}
+static void sdhci_pci_hw_reset(struct sdhci_host *host)
+{
+ struct sdhci_pci_slot *slot = sdhci_priv(host);
+ int rst_n_gpio = slot->rst_n_gpio;
+
+ if (!gpio_is_valid(rst_n_gpio))
+ return;
+ gpio_set_value_cansleep(rst_n_gpio, 0);
+ /* For eMMC, minimum is 1us but give it 10us for good measure */
+ udelay(10);
+ gpio_set_value_cansleep(rst_n_gpio, 1);
+ /* For eMMC, minimum is 200us but give it 300us for good measure */
+ usleep_range(300, 1000);
+}
+
static struct sdhci_ops sdhci_pci_ops = {
.enable_dma = sdhci_pci_enable_dma,
.platform_8bit_width = sdhci_pci_8bit_width,
+ .hw_reset = sdhci_pci_hw_reset,
};
/*****************************************************************************\
@@ -944,6 +1092,95 @@ static int sdhci_pci_resume(struct pci_dev *pdev)
#endif /* CONFIG_PM */
+#ifdef CONFIG_PM_RUNTIME
+
+static int sdhci_pci_runtime_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
+ struct sdhci_pci_chip *chip;
+ struct sdhci_pci_slot *slot;
+ pm_message_t state = { .event = PM_EVENT_SUSPEND };
+ int i, ret;
+
+ chip = pci_get_drvdata(pdev);
+ if (!chip)
+ return 0;
+
+ for (i = 0; i < chip->num_slots; i++) {
+ slot = chip->slots[i];
+ if (!slot)
+ continue;
+
+ ret = sdhci_runtime_suspend_host(slot->host);
+
+ if (ret) {
+ for (i--; i >= 0; i--)
+ sdhci_runtime_resume_host(chip->slots[i]->host);
+ return ret;
+ }
+ }
+
+ if (chip->fixes && chip->fixes->suspend) {
+ ret = chip->fixes->suspend(chip, state);
+ if (ret) {
+ for (i = chip->num_slots - 1; i >= 0; i--)
+ sdhci_runtime_resume_host(chip->slots[i]->host);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int sdhci_pci_runtime_resume(struct device *dev)
+{
+ struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
+ struct sdhci_pci_chip *chip;
+ struct sdhci_pci_slot *slot;
+ int i, ret;
+
+ chip = pci_get_drvdata(pdev);
+ if (!chip)
+ return 0;
+
+ if (chip->fixes && chip->fixes->resume) {
+ ret = chip->fixes->resume(chip);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < chip->num_slots; i++) {
+ slot = chip->slots[i];
+ if (!slot)
+ continue;
+
+ ret = sdhci_runtime_resume_host(slot->host);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int sdhci_pci_runtime_idle(struct device *dev)
+{
+ return 0;
+}
+
+#else
+
+#define sdhci_pci_runtime_suspend NULL
+#define sdhci_pci_runtime_resume NULL
+#define sdhci_pci_runtime_idle NULL
+
+#endif
+
+static const struct dev_pm_ops sdhci_pci_pm_ops = {
+ .runtime_suspend = sdhci_pci_runtime_suspend,
+ .runtime_resume = sdhci_pci_runtime_resume,
+ .runtime_idle = sdhci_pci_runtime_idle,
+};
+
/*****************************************************************************\
* *
* Device probing/removal *
@@ -988,6 +1225,7 @@ static struct sdhci_pci_slot * __devinit sdhci_pci_probe_slot(
slot->chip = chip;
slot->host = host;
slot->pci_bar = bar;
+ slot->rst_n_gpio = -EINVAL;
host->hw_name = "PCI";
host->ops = &sdhci_pci_ops;
@@ -1058,6 +1296,21 @@ static void sdhci_pci_remove_slot(struct sdhci_pci_slot *slot)
sdhci_free_host(slot->host);
}
+static void __devinit sdhci_pci_runtime_pm_allow(struct device *dev)
+{
+ pm_runtime_put_noidle(dev);
+ pm_runtime_allow(dev);
+ pm_runtime_set_autosuspend_delay(dev, 50);
+ pm_runtime_use_autosuspend(dev);
+ pm_suspend_ignore_children(dev, 1);
+}
+
+static void __devexit sdhci_pci_runtime_pm_forbid(struct device *dev)
+{
+ pm_runtime_forbid(dev);
+ pm_runtime_get_noresume(dev);
+}
+
static int __devinit sdhci_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -1107,8 +1360,10 @@ static int __devinit sdhci_pci_probe(struct pci_dev *pdev,
chip->pdev = pdev;
chip->fixes = (const struct sdhci_pci_fixes *)ent->driver_data;
- if (chip->fixes)
+ if (chip->fixes) {
chip->quirks = chip->fixes->quirks;
+ chip->allow_runtime_pm = chip->fixes->allow_runtime_pm;
+ }
chip->num_slots = slots;
pci_set_drvdata(pdev, chip);
@@ -1133,6 +1388,9 @@ static int __devinit sdhci_pci_probe(struct pci_dev *pdev,
chip->slots[i] = slot;
}
+ if (chip->allow_runtime_pm)
+ sdhci_pci_runtime_pm_allow(&pdev->dev);
+
return 0;
free:
@@ -1152,6 +1410,9 @@ static void __devexit sdhci_pci_remove(struct pci_dev *pdev)
chip = pci_get_drvdata(pdev);
if (chip) {
+ if (chip->allow_runtime_pm)
+ sdhci_pci_runtime_pm_forbid(&pdev->dev);
+
for (i = 0; i < chip->num_slots; i++)
sdhci_pci_remove_slot(chip->slots[i]);
@@ -1169,6 +1430,9 @@ static struct pci_driver sdhci_driver = {
.remove = __devexit_p(sdhci_pci_remove),
.suspend = sdhci_pci_suspend,
.resume = sdhci_pci_resume,
+ .driver = {
+ .pm = &sdhci_pci_pm_ops
+ },
};
/*****************************************************************************\
diff --git a/drivers/mmc/host/sdhci-pltfm.c b/drivers/mmc/host/sdhci-pltfm.c
index 6414efeddca..a9e12ea0558 100644
--- a/drivers/mmc/host/sdhci-pltfm.c
+++ b/drivers/mmc/host/sdhci-pltfm.c
@@ -29,6 +29,7 @@
*/
#include <linux/err.h>
+#include <linux/module.h>
#include <linux/of.h>
#ifdef CONFIG_PPC
#include <asm/machdep.h>
diff --git a/drivers/mmc/host/sdhci-pxav2.c b/drivers/mmc/host/sdhci-pxav2.c
index 38f58994f79..d4bf6d30c7b 100644
--- a/drivers/mmc/host/sdhci-pxav2.c
+++ b/drivers/mmc/host/sdhci-pxav2.c
@@ -21,6 +21,7 @@
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
+#include <linux/module.h>
#include <linux/io.h>
#include <linux/gpio.h>
#include <linux/mmc/card.h>
@@ -59,7 +60,7 @@ static void pxav2_set_private_registers(struct sdhci_host *host, u8 mask)
* tune timing of read data/command when crc error happen
* no performance impact
*/
- if (pdata->clk_delay_sel == 1) {
+ if (pdata && pdata->clk_delay_sel == 1) {
tmp = readw(host->ioaddr + SD_CLOCK_BURST_SIZE_SETUP);
tmp &= ~(SDCLK_DELAY_MASK << SDCLK_DELAY_SHIFT);
@@ -71,7 +72,7 @@ static void pxav2_set_private_registers(struct sdhci_host *host, u8 mask)
writew(tmp, host->ioaddr + SD_CLOCK_BURST_SIZE_SETUP);
}
- if (pdata->flags & PXA_FLAG_ENABLE_CLOCK_GATING) {
+ if (pdata && (pdata->flags & PXA_FLAG_ENABLE_CLOCK_GATING)) {
tmp = readw(host->ioaddr + SD_FIFO_PARAM);
tmp &= ~CLK_GATE_SETTING_BITS;
writew(tmp, host->ioaddr + SD_FIFO_PARAM);
diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
index fc7e4a51562..cff4ad3e7a5 100644
--- a/drivers/mmc/host/sdhci-pxav3.c
+++ b/drivers/mmc/host/sdhci-pxav3.c
@@ -27,6 +27,7 @@
#include <linux/platform_data/pxa_sdhci.h>
#include <linux/slab.h>
#include <linux/delay.h>
+#include <linux/module.h>
#include "sdhci.h"
#include "sdhci-pltfm.h"
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index fe886d6c474..3d00e722efc 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -203,17 +203,23 @@ static void sdhci_s3c_set_clock(struct sdhci_host *host, unsigned int clock)
writel(ctrl, host->ioaddr + S3C_SDHCI_CONTROL2);
}
- /* reconfigure the hardware for new clock rate */
-
- {
- struct mmc_ios ios;
-
- ios.clock = clock;
-
- if (ourhost->pdata->cfg_card)
- (ourhost->pdata->cfg_card)(ourhost->pdev, host->ioaddr,
- &ios, NULL);
- }
+ /* reprogram default hardware configuration */
+ writel(S3C64XX_SDHCI_CONTROL4_DRIVE_9mA,
+ host->ioaddr + S3C64XX_SDHCI_CONTROL4);
+
+ ctrl = readl(host->ioaddr + S3C_SDHCI_CONTROL2);
+ ctrl |= (S3C64XX_SDHCI_CTRL2_ENSTAASYNCCLR |
+ S3C64XX_SDHCI_CTRL2_ENCMDCNFMSK |
+ S3C_SDHCI_CTRL2_ENFBCLKRX |
+ S3C_SDHCI_CTRL2_DFCNT_NONE |
+ S3C_SDHCI_CTRL2_ENCLKOUTHOLD);
+ writel(ctrl, host->ioaddr + S3C_SDHCI_CONTROL2);
+
+ /* reconfigure the controller for new clock rate */
+ ctrl = (S3C_SDHCI_CTRL3_FCSEL1 | S3C_SDHCI_CTRL3_FCSEL0);
+ if (clock < 25 * 1000000)
+ ctrl |= (S3C_SDHCI_CTRL3_FCSEL3 | S3C_SDHCI_CTRL3_FCSEL2);
+ writel(ctrl, host->ioaddr + S3C_SDHCI_CONTROL3);
}
/**
@@ -561,8 +567,10 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
err_req_regs:
for (ptr = 0; ptr < MAX_BUS_CLK; ptr++) {
- clk_disable(sc->clk_bus[ptr]);
- clk_put(sc->clk_bus[ptr]);
+ if (sc->clk_bus[ptr]) {
+ clk_disable(sc->clk_bus[ptr]);
+ clk_put(sc->clk_bus[ptr]);
+ }
}
err_no_busclks:
diff --git a/drivers/mmc/host/sdhci-spear.c b/drivers/mmc/host/sdhci-spear.c
index 60a4c97d3d1..63cc8b6a1c9 100644
--- a/drivers/mmc/host/sdhci-spear.c
+++ b/drivers/mmc/host/sdhci-spear.c
@@ -17,6 +17,7 @@
#include <linux/delay.h>
#include <linux/gpio.h>
#include <linux/highmem.h>
+#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/platform_device.h>
@@ -177,8 +178,6 @@ static int __devinit sdhci_probe(struct platform_device *pdev)
sdhci->data->card_power_gpio);
goto err_pgpio_direction;
}
-
- gpio_set_value(sdhci->data->card_power_gpio, 1);
}
if (sdhci->data->card_int_gpio >= 0) {
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index 18b0bd31de7..89699e861fc 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -13,15 +13,21 @@
*/
#include <linux/err.h>
+#include <linux/module.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
#include <linux/gpio.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
+#include <linux/module.h>
-#include <mach/gpio.h>
+#include <asm/gpio.h>
+
+#include <mach/gpio-tegra.h>
#include <mach/sdhci.h>
#include "sdhci-pltfm.h"
@@ -73,10 +79,8 @@ static void tegra_sdhci_writel(struct sdhci_host *host, u32 val, int reg)
static unsigned int tegra_sdhci_get_ro(struct sdhci_host *sdhci)
{
- struct platform_device *pdev = to_platform_device(mmc_dev(sdhci->mmc));
- struct tegra_sdhci_platform_data *plat;
-
- plat = pdev->dev.platform_data;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
+ struct tegra_sdhci_platform_data *plat = pltfm_host->priv;
if (!gpio_is_valid(plat->wp_gpio))
return -1;
@@ -94,12 +98,10 @@ static irqreturn_t carddetect_irq(int irq, void *data)
static int tegra_sdhci_8bit(struct sdhci_host *host, int bus_width)
{
- struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc));
- struct tegra_sdhci_platform_data *plat;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct tegra_sdhci_platform_data *plat = pltfm_host->priv;
u32 ctrl;
- plat = pdev->dev.platform_data;
-
ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
if (plat->is_8bit && bus_width == MMC_BUS_WIDTH_8) {
ctrl &= ~SDHCI_CTRL_4BITBUS;
@@ -131,6 +133,36 @@ static struct sdhci_pltfm_data sdhci_tegra_pdata = {
.ops = &tegra_sdhci_ops,
};
+static const struct of_device_id sdhci_tegra_dt_match[] __devinitdata = {
+ { .compatible = "nvidia,tegra20-sdhci", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, sdhci_dt_ids);
+
+static struct tegra_sdhci_platform_data * __devinit sdhci_tegra_dt_parse_pdata(
+ struct platform_device *pdev)
+{
+ struct tegra_sdhci_platform_data *plat;
+ struct device_node *np = pdev->dev.of_node;
+
+ if (!np)
+ return NULL;
+
+ plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
+ if (!plat) {
+ dev_err(&pdev->dev, "Can't allocate platform data\n");
+ return NULL;
+ }
+
+ plat->cd_gpio = of_get_named_gpio(np, "cd-gpios", 0);
+ plat->wp_gpio = of_get_named_gpio(np, "wp-gpios", 0);
+ plat->power_gpio = of_get_named_gpio(np, "power-gpios", 0);
+ if (of_find_property(np, "support-8bit", NULL))
+ plat->is_8bit = 1;
+
+ return plat;
+}
+
static int __devinit sdhci_tegra_probe(struct platform_device *pdev)
{
struct sdhci_pltfm_host *pltfm_host;
@@ -147,12 +179,17 @@ static int __devinit sdhci_tegra_probe(struct platform_device *pdev)
plat = pdev->dev.platform_data;
+ if (plat == NULL)
+ plat = sdhci_tegra_dt_parse_pdata(pdev);
+
if (plat == NULL) {
dev_err(mmc_dev(host->mmc), "missing platform data\n");
rc = -ENXIO;
goto err_no_plat;
}
+ pltfm_host->priv = plat;
+
if (gpio_is_valid(plat->power_gpio)) {
rc = gpio_request(plat->power_gpio, "sdhci_power");
if (rc) {
@@ -247,13 +284,11 @@ static int __devexit sdhci_tegra_remove(struct platform_device *pdev)
{
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct tegra_sdhci_platform_data *plat;
+ struct tegra_sdhci_platform_data *plat = pltfm_host->priv;
int dead = (readl(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff);
sdhci_remove_host(host, dead);
- plat = pdev->dev.platform_data;
-
if (gpio_is_valid(plat->wp_gpio)) {
tegra_gpio_disable(plat->wp_gpio);
gpio_free(plat->wp_gpio);
@@ -282,6 +317,7 @@ static struct platform_driver sdhci_tegra_driver = {
.driver = {
.name = "sdhci-tegra",
.owner = THIS_MODULE,
+ .of_match_table = sdhci_tegra_dt_match,
},
.probe = sdhci_tegra_probe,
.remove = __devexit_p(sdhci_tegra_remove),
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 0e02cc1df12..6d8eea32354 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -16,10 +16,12 @@
#include <linux/delay.h>
#include <linux/highmem.h>
#include <linux/io.h>
+#include <linux/module.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/scatterlist.h>
#include <linux/regulator/consumer.h>
+#include <linux/pm_runtime.h>
#include <linux/leds.h>
@@ -41,6 +43,7 @@
#define MAX_TUNING_LOOP 40
static unsigned int debug_quirks = 0;
+static unsigned int debug_quirks2;
static void sdhci_finish_data(struct sdhci_host *);
@@ -49,53 +52,67 @@ static void sdhci_finish_command(struct sdhci_host *);
static int sdhci_execute_tuning(struct mmc_host *mmc);
static void sdhci_tuning_timer(unsigned long data);
+#ifdef CONFIG_PM_RUNTIME
+static int sdhci_runtime_pm_get(struct sdhci_host *host);
+static int sdhci_runtime_pm_put(struct sdhci_host *host);
+#else
+static inline int sdhci_runtime_pm_get(struct sdhci_host *host)
+{
+ return 0;
+}
+static inline int sdhci_runtime_pm_put(struct sdhci_host *host)
+{
+ return 0;
+}
+#endif
+
static void sdhci_dumpregs(struct sdhci_host *host)
{
- printk(KERN_DEBUG DRIVER_NAME ": =========== REGISTER DUMP (%s)===========\n",
+ pr_debug(DRIVER_NAME ": =========== REGISTER DUMP (%s)===========\n",
mmc_hostname(host->mmc));
- printk(KERN_DEBUG DRIVER_NAME ": Sys addr: 0x%08x | Version: 0x%08x\n",
+ pr_debug(DRIVER_NAME ": Sys addr: 0x%08x | Version: 0x%08x\n",
sdhci_readl(host, SDHCI_DMA_ADDRESS),
sdhci_readw(host, SDHCI_HOST_VERSION));
- printk(KERN_DEBUG DRIVER_NAME ": Blk size: 0x%08x | Blk cnt: 0x%08x\n",
+ pr_debug(DRIVER_NAME ": Blk size: 0x%08x | Blk cnt: 0x%08x\n",
sdhci_readw(host, SDHCI_BLOCK_SIZE),
sdhci_readw(host, SDHCI_BLOCK_COUNT));
- printk(KERN_DEBUG DRIVER_NAME ": Argument: 0x%08x | Trn mode: 0x%08x\n",
+ pr_debug(DRIVER_NAME ": Argument: 0x%08x | Trn mode: 0x%08x\n",
sdhci_readl(host, SDHCI_ARGUMENT),
sdhci_readw(host, SDHCI_TRANSFER_MODE));
- printk(KERN_DEBUG DRIVER_NAME ": Present: 0x%08x | Host ctl: 0x%08x\n",
+ pr_debug(DRIVER_NAME ": Present: 0x%08x | Host ctl: 0x%08x\n",
sdhci_readl(host, SDHCI_PRESENT_STATE),
sdhci_readb(host, SDHCI_HOST_CONTROL));
- printk(KERN_DEBUG DRIVER_NAME ": Power: 0x%08x | Blk gap: 0x%08x\n",
+ pr_debug(DRIVER_NAME ": Power: 0x%08x | Blk gap: 0x%08x\n",
sdhci_readb(host, SDHCI_POWER_CONTROL),
sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL));
- printk(KERN_DEBUG DRIVER_NAME ": Wake-up: 0x%08x | Clock: 0x%08x\n",
+ pr_debug(DRIVER_NAME ": Wake-up: 0x%08x | Clock: 0x%08x\n",
sdhci_readb(host, SDHCI_WAKE_UP_CONTROL),
sdhci_readw(host, SDHCI_CLOCK_CONTROL));
- printk(KERN_DEBUG DRIVER_NAME ": Timeout: 0x%08x | Int stat: 0x%08x\n",
+ pr_debug(DRIVER_NAME ": Timeout: 0x%08x | Int stat: 0x%08x\n",
sdhci_readb(host, SDHCI_TIMEOUT_CONTROL),
sdhci_readl(host, SDHCI_INT_STATUS));
- printk(KERN_DEBUG DRIVER_NAME ": Int enab: 0x%08x | Sig enab: 0x%08x\n",
+ pr_debug(DRIVER_NAME ": Int enab: 0x%08x | Sig enab: 0x%08x\n",
sdhci_readl(host, SDHCI_INT_ENABLE),
sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
- printk(KERN_DEBUG DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n",
+ pr_debug(DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n",
sdhci_readw(host, SDHCI_ACMD12_ERR),
sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
- printk(KERN_DEBUG DRIVER_NAME ": Caps: 0x%08x | Caps_1: 0x%08x\n",
+ pr_debug(DRIVER_NAME ": Caps: 0x%08x | Caps_1: 0x%08x\n",
sdhci_readl(host, SDHCI_CAPABILITIES),
sdhci_readl(host, SDHCI_CAPABILITIES_1));
- printk(KERN_DEBUG DRIVER_NAME ": Cmd: 0x%08x | Max curr: 0x%08x\n",
+ pr_debug(DRIVER_NAME ": Cmd: 0x%08x | Max curr: 0x%08x\n",
sdhci_readw(host, SDHCI_COMMAND),
sdhci_readl(host, SDHCI_MAX_CURRENT));
- printk(KERN_DEBUG DRIVER_NAME ": Host ctl2: 0x%08x\n",
+ pr_debug(DRIVER_NAME ": Host ctl2: 0x%08x\n",
sdhci_readw(host, SDHCI_HOST_CONTROL2));
if (host->flags & SDHCI_USE_ADMA)
- printk(KERN_DEBUG DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n",
+ pr_debug(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n",
readl(host->ioaddr + SDHCI_ADMA_ERROR),
readl(host->ioaddr + SDHCI_ADMA_ADDRESS));
- printk(KERN_DEBUG DRIVER_NAME ": ===========================================\n");
+ pr_debug(DRIVER_NAME ": ===========================================\n");
}
/*****************************************************************************\
@@ -132,6 +149,9 @@ static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
return;
+ if (host->quirks2 & SDHCI_QUIRK2_OWN_CARD_DETECTION)
+ return;
+
present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
SDHCI_CARD_PRESENT;
irqs = present ? SDHCI_INT_CARD_REMOVE : SDHCI_INT_CARD_INSERT;
@@ -180,7 +200,7 @@ static void sdhci_reset(struct sdhci_host *host, u8 mask)
/* hw clears the bit when it's done */
while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) {
if (timeout == 0) {
- printk(KERN_ERR "%s: Reset 0x%x never completed.\n",
+ pr_err("%s: Reset 0x%x never completed.\n",
mmc_hostname(host->mmc), (int)mask);
sdhci_dumpregs(host);
return;
@@ -251,11 +271,14 @@ static void sdhci_led_control(struct led_classdev *led,
spin_lock_irqsave(&host->lock, flags);
+ if (host->runtime_suspended)
+ goto out;
+
if (brightness == LED_OFF)
sdhci_deactivate_led(host);
else
sdhci_activate_led(host);
-
+out:
spin_unlock_irqrestore(&host->lock, flags);
}
#endif
@@ -654,7 +677,7 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
}
if (count >= 0xF) {
- printk(KERN_WARNING "%s: Too large timeout requested for CMD%d!\n",
+ pr_warning("%s: Too large timeout requested for CMD%d!\n",
mmc_hostname(host->mmc), cmd->opcode);
count = 0xE;
}
@@ -949,7 +972,7 @@ static void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
if (timeout == 0) {
- printk(KERN_ERR "%s: Controller never released "
+ pr_err("%s: Controller never released "
"inhibit bit(s).\n", mmc_hostname(host->mmc));
sdhci_dumpregs(host);
cmd->error = -EIO;
@@ -971,7 +994,7 @@ static void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
sdhci_set_transfer_mode(host, cmd);
if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
- printk(KERN_ERR "%s: Unsupported response type!\n",
+ pr_err("%s: Unsupported response type!\n",
mmc_hostname(host->mmc));
cmd->error = -EINVAL;
tasklet_schedule(&host->finish_tasklet);
@@ -1121,7 +1144,7 @@ static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
& SDHCI_CLOCK_INT_STABLE)) {
if (timeout == 0) {
- printk(KERN_ERR "%s: Internal clock never "
+ pr_err("%s: Internal clock never "
"stabilised.\n", mmc_hostname(host->mmc));
sdhci_dumpregs(host);
return;
@@ -1209,6 +1232,8 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
host = mmc_priv(mmc);
+ sdhci_runtime_pm_get(host);
+
spin_lock_irqsave(&host->lock, flags);
WARN_ON(host->mrq != NULL);
@@ -1269,14 +1294,11 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
spin_unlock_irqrestore(&host->lock, flags);
}
-static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
{
- struct sdhci_host *host;
unsigned long flags;
u8 ctrl;
- host = mmc_priv(mmc);
-
spin_lock_irqsave(&host->lock, flags);
if (host->flags & SDHCI_DEVICE_DEAD)
@@ -1426,7 +1448,16 @@ out:
spin_unlock_irqrestore(&host->lock, flags);
}
-static int check_ro(struct sdhci_host *host)
+static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+
+ sdhci_runtime_pm_get(host);
+ sdhci_do_set_ios(host, ios);
+ sdhci_runtime_pm_put(host);
+}
+
+static int sdhci_check_ro(struct sdhci_host *host)
{
unsigned long flags;
int is_readonly;
@@ -1450,19 +1481,16 @@ static int check_ro(struct sdhci_host *host)
#define SAMPLE_COUNT 5
-static int sdhci_get_ro(struct mmc_host *mmc)
+static int sdhci_do_get_ro(struct sdhci_host *host)
{
- struct sdhci_host *host;
int i, ro_count;
- host = mmc_priv(mmc);
-
if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT))
- return check_ro(host);
+ return sdhci_check_ro(host);
ro_count = 0;
for (i = 0; i < SAMPLE_COUNT; i++) {
- if (check_ro(host)) {
+ if (sdhci_check_ro(host)) {
if (++ro_count > SAMPLE_COUNT / 2)
return 1;
}
@@ -1471,38 +1499,64 @@ static int sdhci_get_ro(struct mmc_host *mmc)
return 0;
}
-static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
+static void sdhci_hw_reset(struct mmc_host *mmc)
{
- struct sdhci_host *host;
- unsigned long flags;
+ struct sdhci_host *host = mmc_priv(mmc);
- host = mmc_priv(mmc);
+ if (host->ops && host->ops->hw_reset)
+ host->ops->hw_reset(host);
+}
- spin_lock_irqsave(&host->lock, flags);
+static int sdhci_get_ro(struct mmc_host *mmc)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ int ret;
+ sdhci_runtime_pm_get(host);
+ ret = sdhci_do_get_ro(host);
+ sdhci_runtime_pm_put(host);
+ return ret;
+}
+
+static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable)
+{
if (host->flags & SDHCI_DEVICE_DEAD)
goto out;
if (enable)
+ host->flags |= SDHCI_SDIO_IRQ_ENABLED;
+ else
+ host->flags &= ~SDHCI_SDIO_IRQ_ENABLED;
+
+ /* SDIO IRQ will be enabled as appropriate in runtime resume */
+ if (host->runtime_suspended)
+ goto out;
+
+ if (enable)
sdhci_unmask_irqs(host, SDHCI_INT_CARD_INT);
else
sdhci_mask_irqs(host, SDHCI_INT_CARD_INT);
out:
mmiowb();
+}
+
+static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ unsigned long flags;
+ spin_lock_irqsave(&host->lock, flags);
+ sdhci_enable_sdio_irq_nolock(host, enable);
spin_unlock_irqrestore(&host->lock, flags);
}
-static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
- struct mmc_ios *ios)
+static int sdhci_do_start_signal_voltage_switch(struct sdhci_host *host,
+ struct mmc_ios *ios)
{
- struct sdhci_host *host;
u8 pwr;
u16 clk, ctrl;
u32 present_state;
- host = mmc_priv(mmc);
-
/*
* Signal Voltage Switching is only applicable for Host Controllers
* v3.00 and above.
@@ -1528,7 +1582,7 @@ static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
if (!(ctrl & SDHCI_CTRL_VDD_180))
return 0;
else {
- printk(KERN_INFO DRIVER_NAME ": Switching to 3.3V "
+ pr_info(DRIVER_NAME ": Switching to 3.3V "
"signalling voltage failed\n");
return -EIO;
}
@@ -1587,7 +1641,7 @@ static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
pwr |= SDHCI_POWER_ON;
sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
- printk(KERN_INFO DRIVER_NAME ": Switching to 1.8V signalling "
+ pr_info(DRIVER_NAME ": Switching to 1.8V signalling "
"voltage failed, retrying with S18R set to 0\n");
return -EAGAIN;
} else
@@ -1595,6 +1649,20 @@ static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
return 0;
}
+static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
+ struct mmc_ios *ios)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ int err;
+
+ if (host->version < SDHCI_SPEC_300)
+ return 0;
+ sdhci_runtime_pm_get(host);
+ err = sdhci_do_start_signal_voltage_switch(host, ios);
+ sdhci_runtime_pm_put(host);
+ return err;
+}
+
static int sdhci_execute_tuning(struct mmc_host *mmc)
{
struct sdhci_host *host;
@@ -1606,6 +1674,7 @@ static int sdhci_execute_tuning(struct mmc_host *mmc)
host = mmc_priv(mmc);
+ sdhci_runtime_pm_get(host);
disable_irq(host->irq);
spin_lock(&host->lock);
@@ -1623,6 +1692,7 @@ static int sdhci_execute_tuning(struct mmc_host *mmc)
else {
spin_unlock(&host->lock);
enable_irq(host->irq);
+ sdhci_runtime_pm_put(host);
return 0;
}
@@ -1648,7 +1718,7 @@ static int sdhci_execute_tuning(struct mmc_host *mmc)
timeout = 150;
do {
struct mmc_command cmd = {0};
- struct mmc_request mrq = {0};
+ struct mmc_request mrq = {NULL};
if (!tuning_loop_counter && !timeout)
break;
@@ -1694,7 +1764,7 @@ static int sdhci_execute_tuning(struct mmc_host *mmc)
spin_lock(&host->lock);
if (!host->tuning_done) {
- printk(KERN_INFO DRIVER_NAME ": Timeout waiting for "
+ pr_info(DRIVER_NAME ": Timeout waiting for "
"Buffer Read Ready interrupt during tuning "
"procedure, falling back to fixed sampling "
"clock\n");
@@ -1724,7 +1794,7 @@ static int sdhci_execute_tuning(struct mmc_host *mmc)
sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
} else {
if (!(ctrl & SDHCI_CTRL_TUNED_CLK)) {
- printk(KERN_INFO DRIVER_NAME ": Tuning procedure"
+ pr_info(DRIVER_NAME ": Tuning procedure"
" failed, falling back to fixed sampling"
" clock\n");
err = -EIO;
@@ -1766,18 +1836,16 @@ out:
sdhci_clear_set_irqs(host, SDHCI_INT_DATA_AVAIL, ier);
spin_unlock(&host->lock);
enable_irq(host->irq);
+ sdhci_runtime_pm_put(host);
return err;
}
-static void sdhci_enable_preset_value(struct mmc_host *mmc, bool enable)
+static void sdhci_do_enable_preset_value(struct sdhci_host *host, bool enable)
{
- struct sdhci_host *host;
u16 ctrl;
unsigned long flags;
- host = mmc_priv(mmc);
-
/* Host Controller v3.00 defines preset value registers */
if (host->version < SDHCI_SPEC_300)
return;
@@ -1793,18 +1861,30 @@ static void sdhci_enable_preset_value(struct mmc_host *mmc, bool enable)
if (enable && !(ctrl & SDHCI_CTRL_PRESET_VAL_ENABLE)) {
ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE;
sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
+ host->flags |= SDHCI_PV_ENABLED;
} else if (!enable && (ctrl & SDHCI_CTRL_PRESET_VAL_ENABLE)) {
ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE;
sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
+ host->flags &= ~SDHCI_PV_ENABLED;
}
spin_unlock_irqrestore(&host->lock, flags);
}
+static void sdhci_enable_preset_value(struct mmc_host *mmc, bool enable)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+
+ sdhci_runtime_pm_get(host);
+ sdhci_do_enable_preset_value(host, enable);
+ sdhci_runtime_pm_put(host);
+}
+
static const struct mmc_host_ops sdhci_ops = {
.request = sdhci_request,
.set_ios = sdhci_set_ios,
.get_ro = sdhci_get_ro,
+ .hw_reset = sdhci_hw_reset,
.enable_sdio_irq = sdhci_enable_sdio_irq,
.start_signal_voltage_switch = sdhci_start_signal_voltage_switch,
.execute_tuning = sdhci_execute_tuning,
@@ -1826,19 +1906,19 @@ static void sdhci_tasklet_card(unsigned long param)
spin_lock_irqsave(&host->lock, flags);
- if (!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT)) {
- if (host->mrq) {
- printk(KERN_ERR "%s: Card removed during transfer!\n",
- mmc_hostname(host->mmc));
- printk(KERN_ERR "%s: Resetting controller.\n",
- mmc_hostname(host->mmc));
+ /* Check host->mrq first in case we are runtime suspended */
+ if (host->mrq &&
+ !(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT)) {
+ pr_err("%s: Card removed during transfer!\n",
+ mmc_hostname(host->mmc));
+ pr_err("%s: Resetting controller.\n",
+ mmc_hostname(host->mmc));
- sdhci_reset(host, SDHCI_RESET_CMD);
- sdhci_reset(host, SDHCI_RESET_DATA);
+ sdhci_reset(host, SDHCI_RESET_CMD);
+ sdhci_reset(host, SDHCI_RESET_DATA);
- host->mrq->cmd->error = -ENOMEDIUM;
- tasklet_schedule(&host->finish_tasklet);
- }
+ host->mrq->cmd->error = -ENOMEDIUM;
+ tasklet_schedule(&host->finish_tasklet);
}
spin_unlock_irqrestore(&host->lock, flags);
@@ -1854,14 +1934,16 @@ static void sdhci_tasklet_finish(unsigned long param)
host = (struct sdhci_host*)param;
+ spin_lock_irqsave(&host->lock, flags);
+
/*
* If this tasklet gets rescheduled while running, it will
* be run again afterwards but without any active request.
*/
- if (!host->mrq)
+ if (!host->mrq) {
+ spin_unlock_irqrestore(&host->lock, flags);
return;
-
- spin_lock_irqsave(&host->lock, flags);
+ }
del_timer(&host->timer);
@@ -1905,6 +1987,7 @@ static void sdhci_tasklet_finish(unsigned long param)
spin_unlock_irqrestore(&host->lock, flags);
mmc_request_done(host->mmc, mrq);
+ sdhci_runtime_pm_put(host);
}
static void sdhci_timeout_timer(unsigned long data)
@@ -1917,7 +2000,7 @@ static void sdhci_timeout_timer(unsigned long data)
spin_lock_irqsave(&host->lock, flags);
if (host->mrq) {
- printk(KERN_ERR "%s: Timeout waiting for hardware "
+ pr_err("%s: Timeout waiting for hardware "
"interrupt.\n", mmc_hostname(host->mmc));
sdhci_dumpregs(host);
@@ -1963,7 +2046,7 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask)
BUG_ON(intmask == 0);
if (!host->cmd) {
- printk(KERN_ERR "%s: Got command interrupt 0x%08x even "
+ pr_err("%s: Got command interrupt 0x%08x even "
"though no command operation was in progress.\n",
mmc_hostname(host->mmc), (unsigned)intmask);
sdhci_dumpregs(host);
@@ -2063,7 +2146,7 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
}
}
- printk(KERN_ERR "%s: Got data interrupt 0x%08x even "
+ pr_err("%s: Got data interrupt 0x%08x even "
"though no data operation was in progress.\n",
mmc_hostname(host->mmc), (unsigned)intmask);
sdhci_dumpregs(host);
@@ -2080,7 +2163,7 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
!= MMC_BUS_TEST_R)
host->data->error = -EILSEQ;
else if (intmask & SDHCI_INT_ADMA_ERROR) {
- printk(KERN_ERR "%s: ADMA error\n", mmc_hostname(host->mmc));
+ pr_err("%s: ADMA error\n", mmc_hostname(host->mmc));
sdhci_show_adma_error(host);
host->data->error = -EIO;
}
@@ -2136,12 +2219,19 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
static irqreturn_t sdhci_irq(int irq, void *dev_id)
{
irqreturn_t result;
- struct sdhci_host* host = dev_id;
+ struct sdhci_host *host = dev_id;
u32 intmask;
int cardint = 0;
spin_lock(&host->lock);
+ if (host->runtime_suspended) {
+ spin_unlock(&host->lock);
+ pr_warning("%s: got irq while runtime suspended\n",
+ mmc_hostname(host->mmc));
+ return IRQ_HANDLED;
+ }
+
intmask = sdhci_readl(host, SDHCI_INT_STATUS);
if (!intmask || intmask == 0xffffffff) {
@@ -2194,7 +2284,7 @@ static irqreturn_t sdhci_irq(int irq, void *dev_id)
intmask &= ~SDHCI_INT_ERROR;
if (intmask & SDHCI_INT_BUS_POWER) {
- printk(KERN_ERR "%s: Card is consuming too much power!\n",
+ pr_err("%s: Card is consuming too much power!\n",
mmc_hostname(host->mmc));
sdhci_writel(host, SDHCI_INT_BUS_POWER, SDHCI_INT_STATUS);
}
@@ -2207,7 +2297,7 @@ static irqreturn_t sdhci_irq(int irq, void *dev_id)
intmask &= ~SDHCI_INT_CARD_INT;
if (intmask) {
- printk(KERN_ERR "%s: Unexpected interrupt 0x%08x.\n",
+ pr_err("%s: Unexpected interrupt 0x%08x.\n",
mmc_hostname(host->mmc), intmask);
sdhci_dumpregs(host);
@@ -2275,7 +2365,6 @@ int sdhci_resume_host(struct sdhci_host *host)
return ret;
}
-
if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
if (host->ops->enable_dma)
host->ops->enable_dma(host);
@@ -2314,6 +2403,90 @@ EXPORT_SYMBOL_GPL(sdhci_enable_irq_wakeups);
#endif /* CONFIG_PM */
+#ifdef CONFIG_PM_RUNTIME
+
+static int sdhci_runtime_pm_get(struct sdhci_host *host)
+{
+ return pm_runtime_get_sync(host->mmc->parent);
+}
+
+static int sdhci_runtime_pm_put(struct sdhci_host *host)
+{
+ pm_runtime_mark_last_busy(host->mmc->parent);
+ return pm_runtime_put_autosuspend(host->mmc->parent);
+}
+
+int sdhci_runtime_suspend_host(struct sdhci_host *host)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ /* Disable tuning since we are suspending */
+ if (host->version >= SDHCI_SPEC_300 &&
+ host->tuning_mode == SDHCI_TUNING_MODE_1) {
+ del_timer_sync(&host->tuning_timer);
+ host->flags &= ~SDHCI_NEEDS_RETUNING;
+ }
+
+ spin_lock_irqsave(&host->lock, flags);
+ sdhci_mask_irqs(host, SDHCI_INT_ALL_MASK);
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ synchronize_irq(host->irq);
+
+ spin_lock_irqsave(&host->lock, flags);
+ host->runtime_suspended = true;
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host);
+
+int sdhci_runtime_resume_host(struct sdhci_host *host)
+{
+ unsigned long flags;
+ int ret = 0, host_flags = host->flags;
+
+ if (host_flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
+ if (host->ops->enable_dma)
+ host->ops->enable_dma(host);
+ }
+
+ sdhci_init(host, 0);
+
+ /* Force clock and power re-program */
+ host->pwr = 0;
+ host->clock = 0;
+ sdhci_do_set_ios(host, &host->mmc->ios);
+
+ sdhci_do_start_signal_voltage_switch(host, &host->mmc->ios);
+ if (host_flags & SDHCI_PV_ENABLED)
+ sdhci_do_enable_preset_value(host, true);
+
+ /* Set the re-tuning expiration flag */
+ if ((host->version >= SDHCI_SPEC_300) && host->tuning_count &&
+ (host->tuning_mode == SDHCI_TUNING_MODE_1))
+ host->flags |= SDHCI_NEEDS_RETUNING;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ host->runtime_suspended = false;
+
+ /* Enable SDIO IRQ */
+ if ((host->flags & SDHCI_SDIO_IRQ_ENABLED))
+ sdhci_enable_sdio_irq_nolock(host, true);
+
+ /* Enable Card Detection */
+ sdhci_enable_card_detection(host);
+
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host);
+
+#endif
+
/*****************************************************************************\
* *
* Device allocation/registration *
@@ -2356,6 +2529,8 @@ int sdhci_add_host(struct sdhci_host *host)
if (debug_quirks)
host->quirks = debug_quirks;
+ if (debug_quirks2)
+ host->quirks2 = debug_quirks2;
sdhci_reset(host, SDHCI_RESET_ALL);
@@ -2363,7 +2538,7 @@ int sdhci_add_host(struct sdhci_host *host)
host->version = (host->version & SDHCI_SPEC_VER_MASK)
>> SDHCI_SPEC_VER_SHIFT;
if (host->version > SDHCI_SPEC_300) {
- printk(KERN_ERR "%s: Unknown controller version (%d). "
+ pr_err("%s: Unknown controller version (%d). "
"You may experience problems.\n", mmc_hostname(mmc),
host->version);
}
@@ -2400,7 +2575,7 @@ int sdhci_add_host(struct sdhci_host *host)
if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
if (host->ops->enable_dma) {
if (host->ops->enable_dma(host)) {
- printk(KERN_WARNING "%s: No suitable DMA "
+ pr_warning("%s: No suitable DMA "
"available. Falling back to PIO.\n",
mmc_hostname(mmc));
host->flags &=
@@ -2420,7 +2595,7 @@ int sdhci_add_host(struct sdhci_host *host)
if (!host->adma_desc || !host->align_buffer) {
kfree(host->adma_desc);
kfree(host->align_buffer);
- printk(KERN_WARNING "%s: Unable to allocate ADMA "
+ pr_warning("%s: Unable to allocate ADMA "
"buffers. Falling back to standard DMA.\n",
mmc_hostname(mmc));
host->flags &= ~SDHCI_USE_ADMA;
@@ -2448,8 +2623,7 @@ int sdhci_add_host(struct sdhci_host *host)
if (host->max_clk == 0 || host->quirks &
SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) {
if (!host->ops->get_max_clock) {
- printk(KERN_ERR
- "%s: Hardware doesn't specify base clock "
+ pr_err("%s: Hardware doesn't specify base clock "
"frequency.\n", mmc_hostname(mmc));
return -ENODEV;
}
@@ -2495,8 +2669,7 @@ int sdhci_add_host(struct sdhci_host *host)
host->timeout_clk = host->ops->get_timeout_clock(host);
} else if (!(host->quirks &
SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
- printk(KERN_ERR
- "%s: Hardware doesn't specify timeout clock "
+ pr_err("%s: Hardware doesn't specify timeout clock "
"frequency.\n", mmc_hostname(mmc));
return -ENODEV;
}
@@ -2566,6 +2739,15 @@ int sdhci_add_host(struct sdhci_host *host)
if (caps[1] & SDHCI_DRIVER_TYPE_D)
mmc->caps |= MMC_CAP_DRIVER_TYPE_D;
+ /*
+ * If Power Off Notify capability is enabled by the host,
+ * set notify to short power off notify timeout value.
+ */
+ if (mmc->caps2 & MMC_CAP2_POWEROFF_NOTIFY)
+ mmc->power_notify_type = MMC_HOST_PW_NOTIFY_SHORT;
+ else
+ mmc->power_notify_type = MMC_HOST_PW_NOTIFY_NONE;
+
/* Initial value for re-tuning timer count */
host->tuning_count = (caps[1] & SDHCI_RETUNING_TIMER_COUNT_MASK) >>
SDHCI_RETUNING_TIMER_COUNT_SHIFT;
@@ -2655,7 +2837,7 @@ int sdhci_add_host(struct sdhci_host *host)
mmc->ocr_avail_mmc &= host->ocr_avail_mmc;
if (mmc->ocr_avail == 0) {
- printk(KERN_ERR "%s: Hardware doesn't report any "
+ pr_err("%s: Hardware doesn't report any "
"support voltages.\n", mmc_hostname(mmc));
return -ENODEV;
}
@@ -2703,7 +2885,7 @@ int sdhci_add_host(struct sdhci_host *host)
mmc->max_blk_size = (caps[0] & SDHCI_MAX_BLOCK_MASK) >>
SDHCI_MAX_BLOCK_SHIFT;
if (mmc->max_blk_size >= 3) {
- printk(KERN_WARNING "%s: Invalid maximum block size, "
+ pr_warning("%s: Invalid maximum block size, "
"assuming 512 bytes\n", mmc_hostname(mmc));
mmc->max_blk_size = 0;
}
@@ -2742,7 +2924,7 @@ int sdhci_add_host(struct sdhci_host *host)
host->vmmc = regulator_get(mmc_dev(mmc), "vmmc");
if (IS_ERR(host->vmmc)) {
- printk(KERN_INFO "%s: no vmmc regulator found\n", mmc_hostname(mmc));
+ pr_info("%s: no vmmc regulator found\n", mmc_hostname(mmc));
host->vmmc = NULL;
} else {
regulator_enable(host->vmmc);
@@ -2771,7 +2953,7 @@ int sdhci_add_host(struct sdhci_host *host)
mmc_add_host(mmc);
- printk(KERN_INFO "%s: SDHCI controller on %s [%s] using %s\n",
+ pr_info("%s: SDHCI controller on %s [%s] using %s\n",
mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
(host->flags & SDHCI_USE_ADMA) ? "ADMA" :
(host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO");
@@ -2804,7 +2986,7 @@ void sdhci_remove_host(struct sdhci_host *host, int dead)
host->flags |= SDHCI_DEVICE_DEAD;
if (host->mrq) {
- printk(KERN_ERR "%s: Controller removed during "
+ pr_err("%s: Controller removed during "
" transfer!\n", mmc_hostname(host->mmc));
host->mrq->cmd->error = -ENOMEDIUM;
@@ -2863,9 +3045,9 @@ EXPORT_SYMBOL_GPL(sdhci_free_host);
static int __init sdhci_drv_init(void)
{
- printk(KERN_INFO DRIVER_NAME
+ pr_info(DRIVER_NAME
": Secure Digital Host Controller Interface driver\n");
- printk(KERN_INFO DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
+ pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
return 0;
}
@@ -2878,9 +3060,11 @@ module_init(sdhci_drv_init);
module_exit(sdhci_drv_exit);
module_param(debug_quirks, uint, 0444);
+module_param(debug_quirks2, uint, 0444);
MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver");
MODULE_LICENSE("GPL");
MODULE_PARM_DESC(debug_quirks, "Force certain quirks.");
+MODULE_PARM_DESC(debug_quirks2, "Force certain other quirks.");
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 745c42fa41e..0a5b65460d8 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -273,7 +273,7 @@ struct sdhci_ops {
void (*platform_reset_enter)(struct sdhci_host *host, u8 mask);
void (*platform_reset_exit)(struct sdhci_host *host, u8 mask);
int (*set_uhs_signaling)(struct sdhci_host *host, unsigned int uhs);
-
+ void (*hw_reset)(struct sdhci_host *host);
};
#ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS
@@ -379,4 +379,9 @@ extern int sdhci_resume_host(struct sdhci_host *host);
extern void sdhci_enable_irq_wakeups(struct sdhci_host *host);
#endif
+#ifdef CONFIG_PM_RUNTIME
+extern int sdhci_runtime_suspend_host(struct sdhci_host *host);
+extern int sdhci_runtime_resume_host(struct sdhci_host *host);
+#endif
+
#endif /* __SDHCI_HW_H */
diff --git a/drivers/mmc/host/sdricoh_cs.c b/drivers/mmc/host/sdricoh_cs.c
index 496b7efbc6b..7009f17ad6c 100644
--- a/drivers/mmc/host/sdricoh_cs.c
+++ b/drivers/mmc/host/sdricoh_cs.c
@@ -26,6 +26,7 @@
*/
#include <linux/delay.h>
#include <linux/highmem.h>
+#include <linux/module.h>
#include <linux/pci.h>
#include <linux/ioport.h>
#include <linux/scatterlist.h>
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 557886bee9c..369366c8e20 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -31,6 +31,7 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/spinlock.h>
+#include <linux/module.h>
#define DRIVER_NAME "sh_mmcif"
#define DRIVER_VERSION "2010-04-28"
@@ -165,6 +166,8 @@ struct sh_mmcif_host {
struct mmc_host *mmc;
struct mmc_data *data;
struct platform_device *pd;
+ struct sh_dmae_slave dma_slave_tx;
+ struct sh_dmae_slave dma_slave_rx;
struct clk *hclk;
unsigned int clk;
int bus_width;
@@ -323,25 +326,35 @@ static bool sh_mmcif_filter(struct dma_chan *chan, void *arg)
static void sh_mmcif_request_dma(struct sh_mmcif_host *host,
struct sh_mmcif_plat_data *pdata)
{
+ struct sh_dmae_slave *tx, *rx;
host->dma_active = false;
/* We can only either use DMA for both Tx and Rx or not use it at all */
if (pdata->dma) {
+ dev_warn(&host->pd->dev,
+ "Update your platform to use embedded DMA slave IDs\n");
+ tx = &pdata->dma->chan_priv_tx;
+ rx = &pdata->dma->chan_priv_rx;
+ } else {
+ tx = &host->dma_slave_tx;
+ tx->slave_id = pdata->slave_id_tx;
+ rx = &host->dma_slave_rx;
+ rx->slave_id = pdata->slave_id_rx;
+ }
+ if (tx->slave_id > 0 && rx->slave_id > 0) {
dma_cap_mask_t mask;
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
- host->chan_tx = dma_request_channel(mask, sh_mmcif_filter,
- &pdata->dma->chan_priv_tx);
+ host->chan_tx = dma_request_channel(mask, sh_mmcif_filter, tx);
dev_dbg(&host->pd->dev, "%s: TX: got channel %p\n", __func__,
host->chan_tx);
if (!host->chan_tx)
return;
- host->chan_rx = dma_request_channel(mask, sh_mmcif_filter,
- &pdata->dma->chan_priv_rx);
+ host->chan_rx = dma_request_channel(mask, sh_mmcif_filter, rx);
dev_dbg(&host->pd->dev, "%s: RX: got channel %p\n", __func__,
host->chan_rx);
diff --git a/drivers/mmc/host/sh_mobile_sdhi.c b/drivers/mmc/host/sh_mobile_sdhi.c
index 0c4a672f5db..41ae6466bd8 100644
--- a/drivers/mmc/host/sh_mobile_sdhi.c
+++ b/drivers/mmc/host/sh_mobile_sdhi.c
@@ -21,6 +21,7 @@
#include <linux/kernel.h>
#include <linux/clk.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/mmc/host.h>
#include <linux/mmc/sh_mobile_sdhi.h>
@@ -96,7 +97,8 @@ static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev)
struct sh_mobile_sdhi_info *p = pdev->dev.platform_data;
struct tmio_mmc_host *host;
char clk_name[8];
- int i, irq, ret;
+ int irq, ret, i = 0;
+ bool multiplexed_isr = true;
priv = kzalloc(sizeof(struct sh_mobile_sdhi), GFP_KERNEL);
if (priv == NULL) {
@@ -153,27 +155,60 @@ static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev)
if (ret < 0)
goto eprobe;
- for (i = 0; i < 3; i++) {
- irq = platform_get_irq(pdev, i);
- if (irq < 0) {
- if (i) {
- continue;
- } else {
- ret = irq;
- goto eirq;
- }
- }
- ret = request_irq(irq, tmio_mmc_irq, 0,
+ /*
+ * Allow one or more specific (named) ISRs or
+ * one or more multiplexed (un-named) ISRs.
+ */
+
+ irq = platform_get_irq_byname(pdev, SH_MOBILE_SDHI_IRQ_CARD_DETECT);
+ if (irq >= 0) {
+ multiplexed_isr = false;
+ ret = request_irq(irq, tmio_mmc_card_detect_irq, 0,
+ dev_name(&pdev->dev), host);
+ if (ret)
+ goto eirq_card_detect;
+ }
+
+ irq = platform_get_irq_byname(pdev, SH_MOBILE_SDHI_IRQ_SDIO);
+ if (irq >= 0) {
+ multiplexed_isr = false;
+ ret = request_irq(irq, tmio_mmc_sdio_irq, 0,
+ dev_name(&pdev->dev), host);
+ if (ret)
+ goto eirq_sdio;
+ }
+
+ irq = platform_get_irq_byname(pdev, SH_MOBILE_SDHI_IRQ_SDCARD);
+ if (irq >= 0) {
+ multiplexed_isr = false;
+ ret = request_irq(irq, tmio_mmc_sdcard_irq, 0,
dev_name(&pdev->dev), host);
- if (ret) {
- while (i--) {
- irq = platform_get_irq(pdev, i);
- if (irq >= 0)
- free_irq(irq, host);
- }
- goto eirq;
+ if (ret)
+ goto eirq_sdcard;
+ } else if (!multiplexed_isr) {
+ dev_err(&pdev->dev,
+ "Principal SD-card IRQ is missing among named interrupts\n");
+ ret = irq;
+ goto eirq_sdcard;
+ }
+
+ if (multiplexed_isr) {
+ while (1) {
+ irq = platform_get_irq(pdev, i);
+ if (irq < 0)
+ break;
+ i++;
+ ret = request_irq(irq, tmio_mmc_irq, 0,
+ dev_name(&pdev->dev), host);
+ if (ret)
+ goto eirq_multiplexed;
}
+
+ /* There must be at least one IRQ source */
+ if (!i)
+ goto eirq_multiplexed;
}
+
dev_info(&pdev->dev, "%s base at 0x%08lx clock rate %u MHz\n",
mmc_hostname(host->mmc), (unsigned long)
(platform_get_resource(pdev,IORESOURCE_MEM, 0)->start),
@@ -181,7 +216,20 @@ static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev)
return ret;
-eirq:
+eirq_multiplexed:
+ while (i--) {
+ irq = platform_get_irq(pdev, i);
+ free_irq(irq, host);
+ }
+eirq_sdcard:
+ irq = platform_get_irq_byname(pdev, SH_MOBILE_SDHI_IRQ_SDIO);
+ if (irq >= 0)
+ free_irq(irq, host);
+eirq_sdio:
+ irq = platform_get_irq_byname(pdev, SH_MOBILE_SDHI_IRQ_CARD_DETECT);
+ if (irq >= 0)
+ free_irq(irq, host);
+eirq_card_detect:
tmio_mmc_host_remove(host);
eprobe:
clk_disable(priv->clk);
@@ -197,16 +245,17 @@ static int sh_mobile_sdhi_remove(struct platform_device *pdev)
struct tmio_mmc_host *host = mmc_priv(mmc);
struct sh_mobile_sdhi *priv = container_of(host->pdata, struct sh_mobile_sdhi, mmc_data);
struct sh_mobile_sdhi_info *p = pdev->dev.platform_data;
- int i, irq;
+ int i = 0, irq;
p->pdata = NULL;
tmio_mmc_host_remove(host);
- for (i = 0; i < 3; i++) {
- irq = platform_get_irq(pdev, i);
- if (irq >= 0)
- free_irq(irq, host);
+ while (1) {
+ irq = platform_get_irq(pdev, i++);
+ if (irq < 0)
+ break;
+ free_irq(irq, host);
}
clk_disable(priv->clk);
diff --git a/drivers/mmc/host/tifm_sd.c b/drivers/mmc/host/tifm_sd.c
index 457c26ea09d..f70d04664ca 100644
--- a/drivers/mmc/host/tifm_sd.c
+++ b/drivers/mmc/host/tifm_sd.c
@@ -16,6 +16,7 @@
#include <linux/mmc/host.h>
#include <linux/highmem.h>
#include <linux/scatterlist.h>
+#include <linux/module.h>
#include <asm/io.h>
#define DRIVER_NAME "tifm_sd"
@@ -631,7 +632,7 @@ static void tifm_sd_request(struct mmc_host *mmc, struct mmc_request *mrq)
}
if (host->req) {
- printk(KERN_ERR "%s : unfinished request detected\n",
+ pr_err("%s : unfinished request detected\n",
dev_name(&sock->dev));
mrq->cmd->error = -ETIMEDOUT;
goto err_out;
@@ -671,7 +672,7 @@ static void tifm_sd_request(struct mmc_host *mmc, struct mmc_request *mrq)
r_data->flags & MMC_DATA_WRITE
? PCI_DMA_TODEVICE
: PCI_DMA_FROMDEVICE)) {
- printk(KERN_ERR "%s : scatterlist map failed\n",
+ pr_err("%s : scatterlist map failed\n",
dev_name(&sock->dev));
mrq->cmd->error = -ENOMEM;
goto err_out;
@@ -683,7 +684,7 @@ static void tifm_sd_request(struct mmc_host *mmc, struct mmc_request *mrq)
? PCI_DMA_TODEVICE
: PCI_DMA_FROMDEVICE);
if (host->sg_len < 1) {
- printk(KERN_ERR "%s : scatterlist map failed\n",
+ pr_err("%s : scatterlist map failed\n",
dev_name(&sock->dev));
tifm_unmap_sg(sock, &host->bounce_buf, 1,
r_data->flags & MMC_DATA_WRITE
@@ -747,7 +748,7 @@ static void tifm_sd_end_cmd(unsigned long data)
host->req = NULL;
if (!mrq) {
- printk(KERN_ERR " %s : no request to complete?\n",
+ pr_err(" %s : no request to complete?\n",
dev_name(&sock->dev));
spin_unlock_irqrestore(&sock->lock, flags);
return;
@@ -786,8 +787,7 @@ static void tifm_sd_abort(unsigned long data)
{
struct tifm_sd *host = (struct tifm_sd*)data;
- printk(KERN_ERR
- "%s : card failed to respond for a long period of time "
+ pr_err("%s : card failed to respond for a long period of time "
"(%x, %x)\n",
dev_name(&host->dev->dev), host->req->cmd->opcode, host->cmd_flags);
@@ -905,7 +905,7 @@ static int tifm_sd_initialize_host(struct tifm_sd *host)
}
if (rc) {
- printk(KERN_ERR "%s : controller failed to reset\n",
+ pr_err("%s : controller failed to reset\n",
dev_name(&sock->dev));
return -ENODEV;
}
@@ -931,8 +931,7 @@ static int tifm_sd_initialize_host(struct tifm_sd *host)
}
if (rc) {
- printk(KERN_ERR
- "%s : card not ready - probe failed on initialization\n",
+ pr_err("%s : card not ready - probe failed on initialization\n",
dev_name(&sock->dev));
return -ENODEV;
}
@@ -953,7 +952,7 @@ static int tifm_sd_probe(struct tifm_dev *sock)
if (!(TIFM_SOCK_STATE_OCCUPIED
& readl(sock->addr + SOCK_PRESENT_STATE))) {
- printk(KERN_WARNING "%s : card gone, unexpectedly\n",
+ pr_warning("%s : card gone, unexpectedly\n",
dev_name(&sock->dev));
return rc;
}
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c
index 44a9668c4b7..a4ea1024278 100644
--- a/drivers/mmc/host/tmio_mmc.c
+++ b/drivers/mmc/host/tmio_mmc.c
@@ -88,8 +88,8 @@ static int __devinit tmio_mmc_probe(struct platform_device *pdev)
if (ret)
goto cell_disable;
- ret = request_irq(irq, tmio_mmc_irq, IRQF_DISABLED |
- IRQF_TRIGGER_FALLING, dev_name(&pdev->dev), host);
+ ret = request_irq(irq, tmio_mmc_irq, IRQF_TRIGGER_FALLING,
+ dev_name(&pdev->dev), host);
if (ret)
goto host_remove;
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
index eeaf64391fb..3020f98218f 100644
--- a/drivers/mmc/host/tmio_mmc.h
+++ b/drivers/mmc/host/tmio_mmc.h
@@ -79,6 +79,10 @@ struct tmio_mmc_host {
struct delayed_work delayed_reset_work;
struct work_struct done;
+ /* Cache IRQ mask */
+ u32 sdcard_irq_mask;
+ u32 sdio_irq_mask;
+
spinlock_t lock; /* protect host private data */
unsigned long last_req_ts;
struct mutex ios_lock; /* protect set_ios() context */
@@ -93,6 +97,9 @@ void tmio_mmc_do_data_irq(struct tmio_mmc_host *host);
void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host *host, u32 i);
void tmio_mmc_disable_mmc_irqs(struct tmio_mmc_host *host, u32 i);
irqreturn_t tmio_mmc_irq(int irq, void *devid);
+irqreturn_t tmio_mmc_sdcard_irq(int irq, void *devid);
+irqreturn_t tmio_mmc_card_detect_irq(int irq, void *devid);
+irqreturn_t tmio_mmc_sdio_irq(int irq, void *devid);
static inline char *tmio_mmc_kmap_atomic(struct scatterlist *sg,
unsigned long *flags)
diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c
index 1f16357e730..d85a60cda16 100644
--- a/drivers/mmc/host/tmio_mmc_pio.c
+++ b/drivers/mmc/host/tmio_mmc_pio.c
@@ -48,14 +48,14 @@
void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host *host, u32 i)
{
- u32 mask = sd_ctrl_read32(host, CTL_IRQ_MASK) & ~(i & TMIO_MASK_IRQ);
- sd_ctrl_write32(host, CTL_IRQ_MASK, mask);
+ host->sdcard_irq_mask &= ~(i & TMIO_MASK_IRQ);
+ sd_ctrl_write32(host, CTL_IRQ_MASK, host->sdcard_irq_mask);
}
void tmio_mmc_disable_mmc_irqs(struct tmio_mmc_host *host, u32 i)
{
- u32 mask = sd_ctrl_read32(host, CTL_IRQ_MASK) | (i & TMIO_MASK_IRQ);
- sd_ctrl_write32(host, CTL_IRQ_MASK, mask);
+ host->sdcard_irq_mask |= (i & TMIO_MASK_IRQ);
+ sd_ctrl_write32(host, CTL_IRQ_MASK, host->sdcard_irq_mask);
}
static void tmio_mmc_ack_mmc_irqs(struct tmio_mmc_host *host, u32 i)
@@ -92,7 +92,7 @@ static int tmio_mmc_next_sg(struct tmio_mmc_host *host)
static void pr_debug_status(u32 status)
{
int i = 0;
- printk(KERN_DEBUG "status: %08x = ", status);
+ pr_debug("status: %08x = ", status);
STATUS_TO_TEXT(CARD_REMOVE, status, i);
STATUS_TO_TEXT(CARD_INSERT, status, i);
STATUS_TO_TEXT(SIGSTATE, status, i);
@@ -127,11 +127,13 @@ static void tmio_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
if (enable) {
host->sdio_irq_enabled = 1;
+ host->sdio_irq_mask = TMIO_SDIO_MASK_ALL &
+ ~TMIO_SDIO_STAT_IOIRQ;
sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0001);
- sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK,
- (TMIO_SDIO_MASK_ALL & ~TMIO_SDIO_STAT_IOIRQ));
+ sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, host->sdio_irq_mask);
} else {
- sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, TMIO_SDIO_MASK_ALL);
+ host->sdio_irq_mask = TMIO_SDIO_MASK_ALL;
+ sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, host->sdio_irq_mask);
sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0000);
host->sdio_irq_enabled = 0;
}
@@ -543,45 +545,20 @@ out:
spin_unlock(&host->lock);
}
-irqreturn_t tmio_mmc_irq(int irq, void *devid)
+static void tmio_mmc_card_irq_status(struct tmio_mmc_host *host,
+ int *ireg, int *status)
{
- struct tmio_mmc_host *host = devid;
- struct mmc_host *mmc = host->mmc;
- struct tmio_mmc_data *pdata = host->pdata;
- unsigned int ireg, irq_mask, status;
- unsigned int sdio_ireg, sdio_irq_mask, sdio_status;
-
- pr_debug("MMC IRQ begin\n");
-
- status = sd_ctrl_read32(host, CTL_STATUS);
- irq_mask = sd_ctrl_read32(host, CTL_IRQ_MASK);
- ireg = status & TMIO_MASK_IRQ & ~irq_mask;
-
- sdio_ireg = 0;
- if (!ireg && pdata->flags & TMIO_MMC_SDIO_IRQ) {
- sdio_status = sd_ctrl_read16(host, CTL_SDIO_STATUS);
- sdio_irq_mask = sd_ctrl_read16(host, CTL_SDIO_IRQ_MASK);
- sdio_ireg = sdio_status & TMIO_SDIO_MASK_ALL & ~sdio_irq_mask;
+ *status = sd_ctrl_read32(host, CTL_STATUS);
+ *ireg = *status & TMIO_MASK_IRQ & ~host->sdcard_irq_mask;
- sd_ctrl_write16(host, CTL_SDIO_STATUS, sdio_status & ~TMIO_SDIO_MASK_ALL);
-
- if (sdio_ireg && !host->sdio_irq_enabled) {
- pr_warning("tmio_mmc: Spurious SDIO IRQ, disabling! 0x%04x 0x%04x 0x%04x\n",
- sdio_status, sdio_irq_mask, sdio_ireg);
- tmio_mmc_enable_sdio_irq(mmc, 0);
- goto out;
- }
-
- if (mmc->caps & MMC_CAP_SDIO_IRQ &&
- sdio_ireg & TMIO_SDIO_STAT_IOIRQ)
- mmc_signal_sdio_irq(mmc);
-
- if (sdio_ireg)
- goto out;
- }
+ pr_debug_status(*status);
+ pr_debug_status(*ireg);
+}
- pr_debug_status(status);
- pr_debug_status(ireg);
+static bool __tmio_mmc_card_detect_irq(struct tmio_mmc_host *host,
+ int ireg, int status)
+{
+ struct mmc_host *mmc = host->mmc;
/* Card insert / remove attempts */
if (ireg & (TMIO_STAT_CARD_INSERT | TMIO_STAT_CARD_REMOVE)) {
@@ -591,43 +568,102 @@ irqreturn_t tmio_mmc_irq(int irq, void *devid)
((ireg & TMIO_STAT_CARD_INSERT) && !mmc->card)) &&
!work_pending(&mmc->detect.work))
mmc_detect_change(host->mmc, msecs_to_jiffies(100));
- goto out;
+ return true;
}
- /* CRC and other errors */
-/* if (ireg & TMIO_STAT_ERR_IRQ)
- * handled |= tmio_error_irq(host, irq, stat);
- */
+ return false;
+}
+
+irqreturn_t tmio_mmc_card_detect_irq(int irq, void *devid)
+{
+ unsigned int ireg, status;
+ struct tmio_mmc_host *host = devid;
+
+ tmio_mmc_card_irq_status(host, &ireg, &status);
+ __tmio_mmc_card_detect_irq(host, ireg, status);
+ return IRQ_HANDLED;
+}
+EXPORT_SYMBOL(tmio_mmc_card_detect_irq);
+
+static bool __tmio_mmc_sdcard_irq(struct tmio_mmc_host *host,
+ int ireg, int status)
+{
/* Command completion */
if (ireg & (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT)) {
tmio_mmc_ack_mmc_irqs(host,
TMIO_STAT_CMDRESPEND |
TMIO_STAT_CMDTIMEOUT);
tmio_mmc_cmd_irq(host, status);
- goto out;
+ return true;
}
/* Data transfer */
if (ireg & (TMIO_STAT_RXRDY | TMIO_STAT_TXRQ)) {
tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_RXRDY | TMIO_STAT_TXRQ);
tmio_mmc_pio_irq(host);
- goto out;
+ return true;
}
/* Data transfer completion */
if (ireg & TMIO_STAT_DATAEND) {
tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_DATAEND);
tmio_mmc_data_irq(host);
- goto out;
+ return true;
}
- pr_warning("tmio_mmc: Spurious irq, disabling! "
- "0x%08x 0x%08x 0x%08x\n", status, irq_mask, ireg);
- pr_debug_status(status);
- tmio_mmc_disable_mmc_irqs(host, status & ~irq_mask);
+ return false;
+}
+
+irqreturn_t tmio_mmc_sdcard_irq(int irq, void *devid)
+{
+ unsigned int ireg, status;
+ struct tmio_mmc_host *host = devid;
+
+ tmio_mmc_card_irq_status(host, &ireg, &status);
+ __tmio_mmc_sdcard_irq(host, ireg, status);
+
+ return IRQ_HANDLED;
+}
+EXPORT_SYMBOL(tmio_mmc_sdcard_irq);
+
+irqreturn_t tmio_mmc_sdio_irq(int irq, void *devid)
+{
+ struct tmio_mmc_host *host = devid;
+ struct mmc_host *mmc = host->mmc;
+ struct tmio_mmc_data *pdata = host->pdata;
+ unsigned int ireg, status;
+
+ if (!(pdata->flags & TMIO_MMC_SDIO_IRQ))
+ return IRQ_HANDLED;
+
+ status = sd_ctrl_read16(host, CTL_SDIO_STATUS);
+ ireg = status & TMIO_SDIO_MASK_ALL & ~host->sdcard_irq_mask;
+
+ sd_ctrl_write16(host, CTL_SDIO_STATUS, status & ~TMIO_SDIO_MASK_ALL);
+
+ if (mmc->caps & MMC_CAP_SDIO_IRQ && ireg & TMIO_SDIO_STAT_IOIRQ)
+ mmc_signal_sdio_irq(mmc);
+
+ return IRQ_HANDLED;
+}
+EXPORT_SYMBOL(tmio_mmc_sdio_irq);
+
+irqreturn_t tmio_mmc_irq(int irq, void *devid)
+{
+ struct tmio_mmc_host *host = devid;
+ unsigned int ireg, status;
+
+ pr_debug("MMC IRQ begin\n");
+
+ tmio_mmc_card_irq_status(host, &ireg, &status);
+ if (__tmio_mmc_card_detect_irq(host, ireg, status))
+ return IRQ_HANDLED;
+ if (__tmio_mmc_sdcard_irq(host, ireg, status))
+ return IRQ_HANDLED;
+
+ tmio_mmc_sdio_irq(irq, devid);
-out:
return IRQ_HANDLED;
}
EXPORT_SYMBOL(tmio_mmc_irq);
@@ -882,6 +918,7 @@ int __devinit tmio_mmc_host_probe(struct tmio_mmc_host **host,
tmio_mmc_clk_stop(_host);
tmio_mmc_reset(_host);
+ _host->sdcard_irq_mask = sd_ctrl_read32(_host, CTL_IRQ_MASK);
tmio_mmc_disable_mmc_irqs(_host, TMIO_MASK_ALL);
if (pdata->flags & TMIO_MMC_SDIO_IRQ)
tmio_mmc_enable_sdio_irq(mmc, 0);
diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c
index 4dfe2c02ea9..4b83c43f950 100644
--- a/drivers/mmc/host/via-sdmmc.c
+++ b/drivers/mmc/host/via-sdmmc.c
@@ -9,6 +9,7 @@
*/
#include <linux/pci.h>
+#include <linux/module.h>
#include <linux/dma-mapping.h>
#include <linux/highmem.h>
#include <linux/delay.h>
@@ -1191,7 +1192,7 @@ static void __devexit via_sd_remove(struct pci_dev *pcidev)
mmiowb();
if (sdhost->mrq) {
- printk(KERN_ERR "%s: Controller removed during "
+ pr_err("%s: Controller removed during "
"transfer\n", mmc_hostname(sdhost->mmc));
/* make sure all DMA is stopped */
diff --git a/drivers/mmc/host/wbsd.c b/drivers/mmc/host/wbsd.c
index 62e5a4d171e..64acd9ce141 100644
--- a/drivers/mmc/host/wbsd.c
+++ b/drivers/mmc/host/wbsd.c
@@ -194,7 +194,7 @@ static void wbsd_reset(struct wbsd_host *host)
{
u8 setup;
- printk(KERN_ERR "%s: Resetting chip\n", mmc_hostname(host->mmc));
+ pr_err("%s: Resetting chip\n", mmc_hostname(host->mmc));
/*
* Soft reset of chip (SD/MMC part).
@@ -721,7 +721,7 @@ static void wbsd_finish_data(struct wbsd_host *host, struct mmc_data *data)
* Any leftover data?
*/
if (count) {
- printk(KERN_ERR "%s: Incomplete DMA transfer. "
+ pr_err("%s: Incomplete DMA transfer. "
"%d bytes left.\n",
mmc_hostname(host->mmc), count);
@@ -803,7 +803,7 @@ static void wbsd_request(struct mmc_host *mmc, struct mmc_request *mrq)
default:
#ifdef CONFIG_MMC_DEBUG
- printk(KERN_WARNING "%s: Data command %d is not "
+ pr_warning("%s: Data command %d is not "
"supported by this controller.\n",
mmc_hostname(host->mmc), cmd->opcode);
#endif
@@ -1029,7 +1029,7 @@ static void wbsd_tasklet_card(unsigned long param)
host->flags &= ~WBSD_FCARD_PRESENT;
if (host->mrq) {
- printk(KERN_ERR "%s: Card removed during transfer!\n",
+ pr_err("%s: Card removed during transfer!\n",
mmc_hostname(host->mmc));
wbsd_reset(host);
@@ -1429,7 +1429,7 @@ free:
free_dma(dma);
err:
- printk(KERN_WARNING DRIVER_NAME ": Unable to allocate DMA %d. "
+ pr_warning(DRIVER_NAME ": Unable to allocate DMA %d. "
"Falling back on FIFO.\n", dma);
}
@@ -1664,7 +1664,7 @@ static int __devinit wbsd_init(struct device *dev, int base, int irq, int dma,
ret = wbsd_scan(host);
if (ret) {
if (pnp && (ret == -ENODEV)) {
- printk(KERN_WARNING DRIVER_NAME
+ pr_warning(DRIVER_NAME
": Unable to confirm device presence. You may "
"experience lock-ups.\n");
} else {
@@ -1688,7 +1688,7 @@ static int __devinit wbsd_init(struct device *dev, int base, int irq, int dma,
*/
if (pnp) {
if ((host->config != 0) && !wbsd_chip_validate(host)) {
- printk(KERN_WARNING DRIVER_NAME
+ pr_warning(DRIVER_NAME
": PnP active but chip not configured! "
"You probably have a buggy BIOS. "
"Configuring chip manually.\n");
@@ -1720,7 +1720,7 @@ static int __devinit wbsd_init(struct device *dev, int base, int irq, int dma,
mmc_add_host(mmc);
- printk(KERN_INFO "%s: W83L51xD", mmc_hostname(mmc));
+ pr_info("%s: W83L51xD", mmc_hostname(mmc));
if (host->chip_id != 0)
printk(" id %x", (int)host->chip_id);
printk(" at 0x%x irq %d", (int)host->base, (int)host->irq);
@@ -1909,7 +1909,7 @@ static int wbsd_pnp_resume(struct pnp_dev *pnp_dev)
*/
if (host->config != 0) {
if (!wbsd_chip_validate(host)) {
- printk(KERN_WARNING DRIVER_NAME
+ pr_warning(DRIVER_NAME
": PnP active but chip not configured! "
"You probably have a buggy BIOS. "
"Configuring chip manually.\n");
@@ -1973,9 +1973,9 @@ static int __init wbsd_drv_init(void)
{
int result;
- printk(KERN_INFO DRIVER_NAME
+ pr_info(DRIVER_NAME
": Winbond W83L51xD SD/MMC card interface driver\n");
- printk(KERN_INFO DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
+ pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
#ifdef CONFIG_PNP
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index 66b616ebe53..318a869286a 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -12,27 +12,17 @@ menuconfig MTD
if MTD
-config MTD_DEBUG
- bool "Debugging"
- help
- This turns on low-level debugging for the entire MTD sub-system.
- Normally, you should say 'N'.
-
-config MTD_DEBUG_VERBOSE
- int "Debugging verbosity (0 = quiet, 3 = noisy)"
- depends on MTD_DEBUG
- default "0"
- help
- Determines the verbosity level of the MTD debugging messages.
-
config MTD_TESTS
- tristate "MTD tests support"
+ tristate "MTD tests support (DANGEROUS)"
depends on m
help
This option includes various MTD tests into compilation. The tests
should normally be compiled as kernel modules. The modules perform
various checks and verifications when loaded.
+ WARNING: some of the tests will ERASE entire MTD device which they
+ test. Do not use these tests unless you really know what you do.
+
config MTD_REDBOOT_PARTS
tristate "RedBoot partition table parsing"
---help---
@@ -137,7 +127,8 @@ config MTD_AFS_PARTS
'physmap' map driver (CONFIG_MTD_PHYSMAP) does this, for example.
config MTD_OF_PARTS
- def_bool y
+ tristate "OpenFirmware partitioning information support"
+ default Y
depends on OF
help
This provides a partition parsing function which derives
diff --git a/drivers/mtd/Makefile b/drivers/mtd/Makefile
index 39664c4229f..9aaac3ac89f 100644
--- a/drivers/mtd/Makefile
+++ b/drivers/mtd/Makefile
@@ -5,8 +5,8 @@
# Core functionality.
obj-$(CONFIG_MTD) += mtd.o
mtd-y := mtdcore.o mtdsuper.o mtdconcat.o mtdpart.o
-mtd-$(CONFIG_MTD_OF_PARTS) += ofpart.o
+obj-$(CONFIG_MTD_OF_PARTS) += ofpart.o
obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o
obj-$(CONFIG_MTD_CMDLINE_PARTS) += cmdlinepart.o
obj-$(CONFIG_MTD_AFS_PARTS) += afs.o
diff --git a/drivers/mtd/afs.c b/drivers/mtd/afs.c
index 302372c08b5..89a02f6f65d 100644
--- a/drivers/mtd/afs.c
+++ b/drivers/mtd/afs.c
@@ -162,8 +162,8 @@ afs_read_iis(struct mtd_info *mtd, struct image_info_struct *iis, u_int ptr)
}
static int parse_afs_partitions(struct mtd_info *mtd,
- struct mtd_partition **pparts,
- unsigned long origin)
+ struct mtd_partition **pparts,
+ struct mtd_part_parser_data *data)
{
struct mtd_partition *parts;
u_int mask, off, idx, sz;
diff --git a/drivers/mtd/ar7part.c b/drivers/mtd/ar7part.c
index 6697a1ec72d..f40ea454755 100644
--- a/drivers/mtd/ar7part.c
+++ b/drivers/mtd/ar7part.c
@@ -27,6 +27,7 @@
#include <linux/mtd/partitions.h>
#include <linux/bootmem.h>
#include <linux/magic.h>
+#include <linux/module.h>
#define AR7_PARTS 4
#define ROOT_OFFSET 0xe0000
@@ -46,7 +47,7 @@ struct ar7_bin_rec {
static int create_mtd_partitions(struct mtd_info *master,
struct mtd_partition **pparts,
- unsigned long origin)
+ struct mtd_part_parser_data *data)
{
struct ar7_bin_rec header;
unsigned int offset;
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index 23175edd563..8d70895a58d 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -145,8 +145,7 @@ static void fixup_amd_bootblock(struct mtd_info *mtd)
if (((major << 8) | minor) < 0x3131) {
/* CFI version 1.0 => don't trust bootloc */
- DEBUG(MTD_DEBUG_LEVEL1,
- "%s: JEDEC Vendor ID is 0x%02X Device ID is 0x%02X\n",
+ pr_debug("%s: JEDEC Vendor ID is 0x%02X Device ID is 0x%02X\n",
map->name, cfi->mfr, cfi->id);
/* AFAICS all 29LV400 with a bottom boot block have a device ID
@@ -166,8 +165,7 @@ static void fixup_amd_bootblock(struct mtd_info *mtd)
* the 8-bit device ID.
*/
(cfi->mfr == CFI_MFR_MACRONIX)) {
- DEBUG(MTD_DEBUG_LEVEL1,
- "%s: Macronix MX29LV400C with bottom boot block"
+ pr_debug("%s: Macronix MX29LV400C with bottom boot block"
" detected\n", map->name);
extp->TopBottom = 2; /* bottom boot */
} else
@@ -178,8 +176,7 @@ static void fixup_amd_bootblock(struct mtd_info *mtd)
extp->TopBottom = 2; /* bottom boot */
}
- DEBUG(MTD_DEBUG_LEVEL1,
- "%s: AMD CFI PRI V%c.%c has no boot block field;"
+ pr_debug("%s: AMD CFI PRI V%c.%c has no boot block field;"
" deduced %s from Device ID\n", map->name, major, minor,
extp->TopBottom == 2 ? "bottom" : "top");
}
@@ -191,7 +188,7 @@ static void fixup_use_write_buffers(struct mtd_info *mtd)
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
if (cfi->cfiq->BufWriteTimeoutTyp) {
- DEBUG(MTD_DEBUG_LEVEL1, "Using buffer write method\n" );
+ pr_debug("Using buffer write method\n" );
mtd->write = cfi_amdstd_write_buffers;
}
}
@@ -443,8 +440,8 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
mtd->writesize = 1;
mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
- DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): write buffer size %d\n",
- __func__, mtd->writebufsize);
+ pr_debug("MTD %s(): write buffer size %d\n", __func__,
+ mtd->writebufsize);
mtd->reboot_notifier.notifier_call = cfi_amdstd_reboot;
@@ -1163,7 +1160,7 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
return ret;
}
- DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
+ pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
__func__, adr, datum.x[0] );
/*
@@ -1174,7 +1171,7 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
*/
oldd = map_read(map, adr);
if (map_word_equal(map, oldd, datum)) {
- DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): NOP\n",
+ pr_debug("MTD %s(): NOP\n",
__func__);
goto op_done;
}
@@ -1400,7 +1397,7 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
datum = map_word_load(map, buf);
- DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
+ pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
__func__, adr, datum.x[0] );
XIP_INVAL_CACHED_RANGE(map, adr, len);
@@ -1587,7 +1584,7 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
return ret;
}
- DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n",
+ pr_debug("MTD %s(): ERASE 0x%.8lx\n",
__func__, chip->start );
XIP_INVAL_CACHED_RANGE(map, adr, map->size);
@@ -1675,7 +1672,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
return ret;
}
- DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n",
+ pr_debug("MTD %s(): ERASE 0x%.8lx\n",
__func__, adr );
XIP_INVAL_CACHED_RANGE(map, adr, len);
@@ -1801,8 +1798,7 @@ static int do_atmel_lock(struct map_info *map, struct flchip *chip,
goto out_unlock;
chip->state = FL_LOCKING;
- DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): LOCK 0x%08lx len %d\n",
- __func__, adr, len);
+ pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len);
cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
cfi->device_type, NULL);
@@ -1837,8 +1833,7 @@ static int do_atmel_unlock(struct map_info *map, struct flchip *chip,
goto out_unlock;
chip->state = FL_UNLOCKING;
- DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): LOCK 0x%08lx len %d\n",
- __func__, adr, len);
+ pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len);
cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
cfi->device_type, NULL);
diff --git a/drivers/mtd/chips/fwh_lock.h b/drivers/mtd/chips/fwh_lock.h
index 5e3cc80128a..89c6595454a 100644
--- a/drivers/mtd/chips/fwh_lock.h
+++ b/drivers/mtd/chips/fwh_lock.h
@@ -34,8 +34,7 @@ static int fwh_xxlock_oneblock(struct map_info *map, struct flchip *chip,
/* Refuse the operation if the we cannot look behind the chip */
if (chip->start < 0x400000) {
- DEBUG( MTD_DEBUG_LEVEL3,
- "MTD %s(): chip->start: %lx wanted >= 0x400000\n",
+ pr_debug( "MTD %s(): chip->start: %lx wanted >= 0x400000\n",
__func__, chip->start );
return -EIO;
}
diff --git a/drivers/mtd/chips/jedec_probe.c b/drivers/mtd/chips/jedec_probe.c
index ea832ea0e4a..c443f527a53 100644
--- a/drivers/mtd/chips/jedec_probe.c
+++ b/drivers/mtd/chips/jedec_probe.c
@@ -1914,11 +1914,10 @@ static void jedec_reset(u32 base, struct map_info *map, struct cfi_private *cfi)
* (oh and incidentaly the jedec spec - 3.5.3.3) the reset
* sequence is *supposed* to be 0xaa at 0x5555, 0x55 at
* 0x2aaa, 0xF0 at 0x5555 this will not affect the AMD chips
- * as they will ignore the writes and dont care what address
+ * as they will ignore the writes and don't care what address
* the F0 is written to */
if (cfi->addr_unlock1) {
- DEBUG( MTD_DEBUG_LEVEL3,
- "reset unlock called %x %x \n",
+ pr_debug( "reset unlock called %x %x \n",
cfi->addr_unlock1,cfi->addr_unlock2);
cfi_send_gen_cmd(0xaa, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0x55, cfi->addr_unlock2, base, map, cfi, cfi->device_type, NULL);
@@ -1941,7 +1940,7 @@ static int cfi_jedec_setup(struct map_info *map, struct cfi_private *cfi, int in
uint8_t uaddr;
if (!(jedec_table[index].devtypes & cfi->device_type)) {
- DEBUG(MTD_DEBUG_LEVEL1, "Rejecting potential %s with incompatible %d-bit device type\n",
+ pr_debug("Rejecting potential %s with incompatible %d-bit device type\n",
jedec_table[index].name, 4 * (1<<cfi->device_type));
return 0;
}
@@ -2021,7 +2020,7 @@ static inline int jedec_match( uint32_t base,
* there aren't.
*/
if (finfo->dev_id > 0xff) {
- DEBUG( MTD_DEBUG_LEVEL3, "%s(): ID is not 8bit\n",
+ pr_debug("%s(): ID is not 8bit\n",
__func__);
goto match_done;
}
@@ -2045,12 +2044,10 @@ static inline int jedec_match( uint32_t base,
}
/* the part size must fit in the memory window */
- DEBUG( MTD_DEBUG_LEVEL3,
- "MTD %s(): Check fit 0x%.8x + 0x%.8x = 0x%.8x\n",
+ pr_debug("MTD %s(): Check fit 0x%.8x + 0x%.8x = 0x%.8x\n",
__func__, base, 1 << finfo->dev_size, base + (1 << finfo->dev_size) );
if ( base + cfi_interleave(cfi) * ( 1 << finfo->dev_size ) > map->size ) {
- DEBUG( MTD_DEBUG_LEVEL3,
- "MTD %s(): 0x%.4x 0x%.4x %dKiB doesn't fit\n",
+ pr_debug("MTD %s(): 0x%.4x 0x%.4x %dKiB doesn't fit\n",
__func__, finfo->mfr_id, finfo->dev_id,
1 << finfo->dev_size );
goto match_done;
@@ -2061,13 +2058,12 @@ static inline int jedec_match( uint32_t base,
uaddr = finfo->uaddr;
- DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): check unlock addrs 0x%.4x 0x%.4x\n",
+ pr_debug("MTD %s(): check unlock addrs 0x%.4x 0x%.4x\n",
__func__, cfi->addr_unlock1, cfi->addr_unlock2 );
if ( MTD_UADDR_UNNECESSARY != uaddr && MTD_UADDR_DONT_CARE != uaddr
&& ( unlock_addrs[uaddr].addr1 / cfi->device_type != cfi->addr_unlock1 ||
unlock_addrs[uaddr].addr2 / cfi->device_type != cfi->addr_unlock2 ) ) {
- DEBUG( MTD_DEBUG_LEVEL3,
- "MTD %s(): 0x%.4x 0x%.4x did not match\n",
+ pr_debug("MTD %s(): 0x%.4x 0x%.4x did not match\n",
__func__,
unlock_addrs[uaddr].addr1,
unlock_addrs[uaddr].addr2);
@@ -2083,15 +2079,13 @@ static inline int jedec_match( uint32_t base,
* FIXME - write a driver that takes all of the chip info as
* module parameters, doesn't probe but forces a load.
*/
- DEBUG( MTD_DEBUG_LEVEL3,
- "MTD %s(): check ID's disappear when not in ID mode\n",
+ pr_debug("MTD %s(): check ID's disappear when not in ID mode\n",
__func__ );
jedec_reset( base, map, cfi );
mfr = jedec_read_mfr( map, base, cfi );
id = jedec_read_id( map, base, cfi );
if ( mfr == cfi->mfr && id == cfi->id ) {
- DEBUG( MTD_DEBUG_LEVEL3,
- "MTD %s(): ID 0x%.2x:0x%.2x did not change after reset:\n"
+ pr_debug("MTD %s(): ID 0x%.2x:0x%.2x did not change after reset:\n"
"You might need to manually specify JEDEC parameters.\n",
__func__, cfi->mfr, cfi->id );
goto match_done;
@@ -2104,7 +2098,7 @@ static inline int jedec_match( uint32_t base,
* Put the device back in ID mode - only need to do this if we
* were truly frobbing a real device.
*/
- DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): return to ID mode\n", __func__ );
+ pr_debug("MTD %s(): return to ID mode\n", __func__ );
if (cfi->addr_unlock1) {
cfi_send_gen_cmd(0xaa, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0x55, cfi->addr_unlock2, base, map, cfi, cfi->device_type, NULL);
@@ -2167,13 +2161,11 @@ static int jedec_probe_chip(struct map_info *map, __u32 base,
cfi->mfr = jedec_read_mfr(map, base, cfi);
cfi->id = jedec_read_id(map, base, cfi);
- DEBUG(MTD_DEBUG_LEVEL3,
- "Search for id:(%02x %02x) interleave(%d) type(%d)\n",
+ pr_debug("Search for id:(%02x %02x) interleave(%d) type(%d)\n",
cfi->mfr, cfi->id, cfi_interleave(cfi), cfi->device_type);
for (i = 0; i < ARRAY_SIZE(jedec_table); i++) {
if ( jedec_match( base, map, cfi, &jedec_table[i] ) ) {
- DEBUG( MTD_DEBUG_LEVEL3,
- "MTD %s(): matched device 0x%x,0x%x unlock_addrs: 0x%.4x 0x%.4x\n",
+ pr_debug("MTD %s(): matched device 0x%x,0x%x unlock_addrs: 0x%.4x 0x%.4x\n",
__func__, cfi->mfr, cfi->id,
cfi->addr_unlock1, cfi->addr_unlock2 );
if (!cfi_jedec_setup(map, cfi, i))
diff --git a/drivers/mtd/cmdlinepart.c b/drivers/mtd/cmdlinepart.c
index e790f38893b..ddf9ec6d916 100644
--- a/drivers/mtd/cmdlinepart.c
+++ b/drivers/mtd/cmdlinepart.c
@@ -43,6 +43,7 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/bootmem.h>
+#include <linux/module.h>
/* error message prefix */
#define ERRP "mtd: "
@@ -188,10 +189,7 @@ static struct mtd_partition * newpart(char *s,
extra_mem_size;
parts = kzalloc(alloc_size, GFP_KERNEL);
if (!parts)
- {
- printk(KERN_ERR ERRP "out of memory\n");
return NULL;
- }
extra_mem = (unsigned char *)(parts + *num_parts);
}
/* enter this partition (offset will be calculated later if it is zero at this point) */
@@ -316,8 +314,8 @@ static int mtdpart_setup_real(char *s)
* the first one in the chain if a NULL mtd_id is passed in.
*/
static int parse_cmdline_partitions(struct mtd_info *master,
- struct mtd_partition **pparts,
- unsigned long origin)
+ struct mtd_partition **pparts,
+ struct mtd_part_parser_data *data)
{
unsigned long offset;
int i;
diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig
index 35081ce77fb..283d887f782 100644
--- a/drivers/mtd/devices/Kconfig
+++ b/drivers/mtd/devices/Kconfig
@@ -249,6 +249,16 @@ config MTD_DOC2001PLUS
under "NAND Flash Device Drivers" (currently that driver does not
support all Millennium Plus devices).
+config MTD_DOCG3
+ tristate "M-Systems Disk-On-Chip G3"
+ ---help---
+ This provides an MTD device driver for the M-Systems DiskOnChip
+ G3 devices.
+
+ The driver provides access to G3 DiskOnChip, distributed by
+ M-Systems and now Sandisk. The support is very experimental,
+ and doesn't give access to any write operations.
+
config MTD_DOCPROBE
tristate
select MTD_DOCECC
@@ -268,8 +278,7 @@ config MTD_DOCPROBE_ADVANCED
config MTD_DOCPROBE_ADDRESS
hex "Physical address of DiskOnChip" if MTD_DOCPROBE_ADVANCED
depends on MTD_DOCPROBE
- default "0x0000" if MTD_DOCPROBE_ADVANCED
- default "0" if !MTD_DOCPROBE_ADVANCED
+ default "0x0"
---help---
By default, the probe for DiskOnChip devices will look for a
DiskOnChip at every multiple of 0x2000 between 0xC8000 and 0xEE000.
diff --git a/drivers/mtd/devices/Makefile b/drivers/mtd/devices/Makefile
index f3226b1d38f..56c7cd462f1 100644
--- a/drivers/mtd/devices/Makefile
+++ b/drivers/mtd/devices/Makefile
@@ -5,6 +5,7 @@
obj-$(CONFIG_MTD_DOC2000) += doc2000.o
obj-$(CONFIG_MTD_DOC2001) += doc2001.o
obj-$(CONFIG_MTD_DOC2001PLUS) += doc2001plus.o
+obj-$(CONFIG_MTD_DOCG3) += docg3.o
obj-$(CONFIG_MTD_DOCPROBE) += docprobe.o
obj-$(CONFIG_MTD_DOCECC) += docecc.o
obj-$(CONFIG_MTD_SLRAM) += slram.o
@@ -17,3 +18,5 @@ obj-$(CONFIG_MTD_BLOCK2MTD) += block2mtd.o
obj-$(CONFIG_MTD_DATAFLASH) += mtd_dataflash.o
obj-$(CONFIG_MTD_M25P80) += m25p80.o
obj-$(CONFIG_MTD_SST25L) += sst25l.o
+
+CFLAGS_docg3.o += -I$(src) \ No newline at end of file
diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
index f7fbf6025ef..e9fad915121 100644
--- a/drivers/mtd/devices/doc2000.c
+++ b/drivers/mtd/devices/doc2000.c
@@ -82,8 +82,7 @@ static int _DoC_WaitReady(struct DiskOnChip *doc)
void __iomem *docptr = doc->virtadr;
unsigned long timeo = jiffies + (HZ * 10);
- DEBUG(MTD_DEBUG_LEVEL3,
- "_DoC_WaitReady called for out-of-line wait\n");
+ pr_debug("_DoC_WaitReady called for out-of-line wait\n");
/* Out-of-line routine to wait for chip response */
while (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B)) {
@@ -92,7 +91,7 @@ static int _DoC_WaitReady(struct DiskOnChip *doc)
DoC_Delay(doc, 2);
if (time_after(jiffies, timeo)) {
- DEBUG(MTD_DEBUG_LEVEL2, "_DoC_WaitReady timed out.\n");
+ pr_debug("_DoC_WaitReady timed out.\n");
return -EIO;
}
udelay(1);
@@ -323,8 +322,7 @@ static int DoC_IdentChip(struct DiskOnChip *doc, int floor, int chip)
/* Reset the chip */
if (DoC_Command(doc, NAND_CMD_RESET, CDSN_CTRL_WP)) {
- DEBUG(MTD_DEBUG_LEVEL2,
- "DoC_Command (reset) for %d,%d returned true\n",
+ pr_debug("DoC_Command (reset) for %d,%d returned true\n",
floor, chip);
return 0;
}
@@ -332,8 +330,7 @@ static int DoC_IdentChip(struct DiskOnChip *doc, int floor, int chip)
/* Read the NAND chip ID: 1. Send ReadID command */
if (DoC_Command(doc, NAND_CMD_READID, CDSN_CTRL_WP)) {
- DEBUG(MTD_DEBUG_LEVEL2,
- "DoC_Command (ReadID) for %d,%d returned true\n",
+ pr_debug("DoC_Command (ReadID) for %d,%d returned true\n",
floor, chip);
return 0;
}
@@ -699,7 +696,7 @@ static int doc_read(struct mtd_info *mtd, loff_t from, size_t len,
#ifdef ECC_DEBUG
printk(KERN_ERR "DiskOnChip ECC Error: Read at %lx\n", (long)from);
#endif
- /* Read the ECC syndrom through the DiskOnChip ECC
+ /* Read the ECC syndrome through the DiskOnChip ECC
logic. These syndrome will be all ZERO when there
is no error */
for (i = 0; i < 6; i++) {
@@ -930,7 +927,7 @@ static int doc_read_oob(struct mtd_info *mtd, loff_t ofs,
uint8_t *buf = ops->oobbuf;
size_t len = ops->len;
- BUG_ON(ops->mode != MTD_OOB_PLACE);
+ BUG_ON(ops->mode != MTD_OPS_PLACE_OOB);
ofs += ops->ooboffs;
@@ -1094,7 +1091,7 @@ static int doc_write_oob(struct mtd_info *mtd, loff_t ofs,
struct DiskOnChip *this = mtd->priv;
int ret;
- BUG_ON(ops->mode != MTD_OOB_PLACE);
+ BUG_ON(ops->mode != MTD_OPS_PLACE_OOB);
mutex_lock(&this->lock);
ret = doc_write_oob_nolock(mtd, ofs + ops->ooboffs, ops->len,
diff --git a/drivers/mtd/devices/doc2001.c b/drivers/mtd/devices/doc2001.c
index 241192f05bc..a3f7a27499b 100644
--- a/drivers/mtd/devices/doc2001.c
+++ b/drivers/mtd/devices/doc2001.c
@@ -55,15 +55,14 @@ static int _DoC_WaitReady(void __iomem * docptr)
{
unsigned short c = 0xffff;
- DEBUG(MTD_DEBUG_LEVEL3,
- "_DoC_WaitReady called for out-of-line wait\n");
+ pr_debug("_DoC_WaitReady called for out-of-line wait\n");
/* Out-of-line routine to wait for chip response */
while (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B) && --c)
;
if (c == 0)
- DEBUG(MTD_DEBUG_LEVEL2, "_DoC_WaitReady timed out.\n");
+ pr_debug("_DoC_WaitReady timed out.\n");
return (c == 0);
}
@@ -464,7 +463,7 @@ static int doc_read (struct mtd_info *mtd, loff_t from, size_t len,
#ifdef ECC_DEBUG
printk("DiskOnChip ECC Error: Read at %lx\n", (long)from);
#endif
- /* Read the ECC syndrom through the DiskOnChip ECC logic.
+ /* Read the ECC syndrome through the DiskOnChip ECC logic.
These syndrome will be all ZERO when there is no error */
for (i = 0; i < 6; i++) {
syndrome[i] = ReadDOC(docptr, ECCSyndrome0 + i);
@@ -632,7 +631,7 @@ static int doc_read_oob(struct mtd_info *mtd, loff_t ofs,
uint8_t *buf = ops->oobbuf;
size_t len = ops->len;
- BUG_ON(ops->mode != MTD_OOB_PLACE);
+ BUG_ON(ops->mode != MTD_OPS_PLACE_OOB);
ofs += ops->ooboffs;
@@ -690,7 +689,7 @@ static int doc_write_oob(struct mtd_info *mtd, loff_t ofs,
uint8_t *buf = ops->oobbuf;
size_t len = ops->len;
- BUG_ON(ops->mode != MTD_OOB_PLACE);
+ BUG_ON(ops->mode != MTD_OPS_PLACE_OOB);
ofs += ops->ooboffs;
diff --git a/drivers/mtd/devices/doc2001plus.c b/drivers/mtd/devices/doc2001plus.c
index 09ae0adc3ad..99351bc3e0e 100644
--- a/drivers/mtd/devices/doc2001plus.c
+++ b/drivers/mtd/devices/doc2001plus.c
@@ -61,15 +61,14 @@ static int _DoC_WaitReady(void __iomem * docptr)
{
unsigned int c = 0xffff;
- DEBUG(MTD_DEBUG_LEVEL3,
- "_DoC_WaitReady called for out-of-line wait\n");
+ pr_debug("_DoC_WaitReady called for out-of-line wait\n");
/* Out-of-line routine to wait for chip response */
while (((ReadDOC(docptr, Mplus_FlashControl) & CDSN_CTRL_FR_B_MASK) != CDSN_CTRL_FR_B_MASK) && --c)
;
if (c == 0)
- DEBUG(MTD_DEBUG_LEVEL2, "_DoC_WaitReady timed out.\n");
+ pr_debug("_DoC_WaitReady timed out.\n");
return (c == 0);
}
@@ -655,7 +654,7 @@ static int doc_read(struct mtd_info *mtd, loff_t from, size_t len,
#ifdef ECC_DEBUG
printk("DiskOnChip ECC Error: Read at %lx\n", (long)from);
#endif
- /* Read the ECC syndrom through the DiskOnChip ECC logic.
+ /* Read the ECC syndrome through the DiskOnChip ECC logic.
These syndrome will be all ZERO when there is no error */
for (i = 0; i < 6; i++)
syndrome[i] = ReadDOC(docptr, Mplus_ECCSyndrome0 + i);
@@ -835,7 +834,7 @@ static int doc_read_oob(struct mtd_info *mtd, loff_t ofs,
uint8_t *buf = ops->oobbuf;
size_t len = ops->len;
- BUG_ON(ops->mode != MTD_OOB_PLACE);
+ BUG_ON(ops->mode != MTD_OPS_PLACE_OOB);
ofs += ops->ooboffs;
@@ -920,7 +919,7 @@ static int doc_write_oob(struct mtd_info *mtd, loff_t ofs,
uint8_t *buf = ops->oobbuf;
size_t len = ops->len;
- BUG_ON(ops->mode != MTD_OOB_PLACE);
+ BUG_ON(ops->mode != MTD_OPS_PLACE_OOB);
ofs += ops->ooboffs;
diff --git a/drivers/mtd/devices/docecc.c b/drivers/mtd/devices/docecc.c
index 37ef29a73ee..4a1c39b6f37 100644
--- a/drivers/mtd/devices/docecc.c
+++ b/drivers/mtd/devices/docecc.c
@@ -2,7 +2,7 @@
* ECC algorithm for M-systems disk on chip. We use the excellent Reed
* Solmon code of Phil Karn (karn@ka9q.ampr.org) available under the
* GNU GPL License. The rest is simply to convert the disk on chip
- * syndrom into a standard syndom.
+ * syndrome into a standard syndome.
*
* Author: Fabrice Bellard (fabrice.bellard@netgem.com)
* Copyright (C) 2000 Netgem S.A.
diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c
new file mode 100644
index 00000000000..bdcf5df982e
--- /dev/null
+++ b/drivers/mtd/devices/docg3.c
@@ -0,0 +1,1114 @@
+/*
+ * Handles the M-Systems DiskOnChip G3 chip
+ *
+ * Copyright (C) 2011 Robert Jarzmik
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/platform_device.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#define CREATE_TRACE_POINTS
+#include "docg3.h"
+
+/*
+ * This driver handles the DiskOnChip G3 flash memory.
+ *
+ * As no specification is available from M-Systems/Sandisk, this drivers lacks
+ * several functions available on the chip, as :
+ * - block erase
+ * - page write
+ * - IPL write
+ * - ECC fixing (lack of BCH algorith understanding)
+ * - powerdown / powerup
+ *
+ * The bus data width (8bits versus 16bits) is not handled (if_cfg flag), and
+ * the driver assumes a 16bits data bus.
+ *
+ * DocG3 relies on 2 ECC algorithms, which are handled in hardware :
+ * - a 1 byte Hamming code stored in the OOB for each page
+ * - a 7 bytes BCH code stored in the OOB for each page
+ * The BCH part is only used for check purpose, no correction is available as
+ * some information is missing. What is known is that :
+ * - BCH is in GF(2^14)
+ * - BCH is over data of 520 bytes (512 page + 7 page_info bytes
+ * + 1 hamming byte)
+ * - BCH can correct up to 4 bits (t = 4)
+ * - BCH syndroms are calculated in hardware, and checked in hardware as well
+ *
+ */
+
+static inline u8 doc_readb(struct docg3 *docg3, u16 reg)
+{
+ u8 val = readb(docg3->base + reg);
+
+ trace_docg3_io(0, 8, reg, (int)val);
+ return val;
+}
+
+static inline u16 doc_readw(struct docg3 *docg3, u16 reg)
+{
+ u16 val = readw(docg3->base + reg);
+
+ trace_docg3_io(0, 16, reg, (int)val);
+ return val;
+}
+
+static inline void doc_writeb(struct docg3 *docg3, u8 val, u16 reg)
+{
+ writeb(val, docg3->base + reg);
+ trace_docg3_io(1, 16, reg, val);
+}
+
+static inline void doc_writew(struct docg3 *docg3, u16 val, u16 reg)
+{
+ writew(val, docg3->base + reg);
+ trace_docg3_io(1, 16, reg, val);
+}
+
+static inline void doc_flash_command(struct docg3 *docg3, u8 cmd)
+{
+ doc_writeb(docg3, cmd, DOC_FLASHCOMMAND);
+}
+
+static inline void doc_flash_sequence(struct docg3 *docg3, u8 seq)
+{
+ doc_writeb(docg3, seq, DOC_FLASHSEQUENCE);
+}
+
+static inline void doc_flash_address(struct docg3 *docg3, u8 addr)
+{
+ doc_writeb(docg3, addr, DOC_FLASHADDRESS);
+}
+
+static char const *part_probes[] = { "cmdlinepart", "saftlpart", NULL };
+
+static int doc_register_readb(struct docg3 *docg3, int reg)
+{
+ u8 val;
+
+ doc_writew(docg3, reg, DOC_READADDRESS);
+ val = doc_readb(docg3, reg);
+ doc_vdbg("Read register %04x : %02x\n", reg, val);
+ return val;
+}
+
+static int doc_register_readw(struct docg3 *docg3, int reg)
+{
+ u16 val;
+
+ doc_writew(docg3, reg, DOC_READADDRESS);
+ val = doc_readw(docg3, reg);
+ doc_vdbg("Read register %04x : %04x\n", reg, val);
+ return val;
+}
+
+/**
+ * doc_delay - delay docg3 operations
+ * @docg3: the device
+ * @nbNOPs: the number of NOPs to issue
+ *
+ * As no specification is available, the right timings between chip commands are
+ * unknown. The only available piece of information are the observed nops on a
+ * working docg3 chip.
+ * Therefore, doc_delay relies on a busy loop of NOPs, instead of scheduler
+ * friendlier msleep() functions or blocking mdelay().
+ */
+static void doc_delay(struct docg3 *docg3, int nbNOPs)
+{
+ int i;
+
+ doc_dbg("NOP x %d\n", nbNOPs);
+ for (i = 0; i < nbNOPs; i++)
+ doc_writeb(docg3, 0, DOC_NOP);
+}
+
+static int is_prot_seq_error(struct docg3 *docg3)
+{
+ int ctrl;
+
+ ctrl = doc_register_readb(docg3, DOC_FLASHCONTROL);
+ return ctrl & (DOC_CTRL_PROTECTION_ERROR | DOC_CTRL_SEQUENCE_ERROR);
+}
+
+static int doc_is_ready(struct docg3 *docg3)
+{
+ int ctrl;
+
+ ctrl = doc_register_readb(docg3, DOC_FLASHCONTROL);
+ return ctrl & DOC_CTRL_FLASHREADY;
+}
+
+static int doc_wait_ready(struct docg3 *docg3)
+{
+ int maxWaitCycles = 100;
+
+ do {
+ doc_delay(docg3, 4);
+ cpu_relax();
+ } while (!doc_is_ready(docg3) && maxWaitCycles--);
+ doc_delay(docg3, 2);
+ if (maxWaitCycles > 0)
+ return 0;
+ else
+ return -EIO;
+}
+
+static int doc_reset_seq(struct docg3 *docg3)
+{
+ int ret;
+
+ doc_writeb(docg3, 0x10, DOC_FLASHCONTROL);
+ doc_flash_sequence(docg3, DOC_SEQ_RESET);
+ doc_flash_command(docg3, DOC_CMD_RESET);
+ doc_delay(docg3, 2);
+ ret = doc_wait_ready(docg3);
+
+ doc_dbg("doc_reset_seq() -> isReady=%s\n", ret ? "false" : "true");
+ return ret;
+}
+
+/**
+ * doc_read_data_area - Read data from data area
+ * @docg3: the device
+ * @buf: the buffer to fill in
+ * @len: the lenght to read
+ * @first: first time read, DOC_READADDRESS should be set
+ *
+ * Reads bytes from flash data. Handles the single byte / even bytes reads.
+ */
+static void doc_read_data_area(struct docg3 *docg3, void *buf, int len,
+ int first)
+{
+ int i, cdr, len4;
+ u16 data16, *dst16;
+ u8 data8, *dst8;
+
+ doc_dbg("doc_read_data_area(buf=%p, len=%d)\n", buf, len);
+ cdr = len & 0x3;
+ len4 = len - cdr;
+
+ if (first)
+ doc_writew(docg3, DOC_IOSPACE_DATA, DOC_READADDRESS);
+ dst16 = buf;
+ for (i = 0; i < len4; i += 2) {
+ data16 = doc_readw(docg3, DOC_IOSPACE_DATA);
+ *dst16 = data16;
+ dst16++;
+ }
+
+ if (cdr) {
+ doc_writew(docg3, DOC_IOSPACE_DATA | DOC_READADDR_ONE_BYTE,
+ DOC_READADDRESS);
+ doc_delay(docg3, 1);
+ dst8 = (u8 *)dst16;
+ for (i = 0; i < cdr; i++) {
+ data8 = doc_readb(docg3, DOC_IOSPACE_DATA);
+ *dst8 = data8;
+ dst8++;
+ }
+ }
+}
+
+/**
+ * doc_set_data_mode - Sets the flash to reliable data mode
+ * @docg3: the device
+ *
+ * The reliable data mode is a bit slower than the fast mode, but less errors
+ * occur. Entering the reliable mode cannot be done without entering the fast
+ * mode first.
+ */
+static void doc_set_reliable_mode(struct docg3 *docg3)
+{
+ doc_dbg("doc_set_reliable_mode()\n");
+ doc_flash_sequence(docg3, DOC_SEQ_SET_MODE);
+ doc_flash_command(docg3, DOC_CMD_FAST_MODE);
+ doc_flash_command(docg3, DOC_CMD_RELIABLE_MODE);
+ doc_delay(docg3, 2);
+}
+
+/**
+ * doc_set_asic_mode - Set the ASIC mode
+ * @docg3: the device
+ * @mode: the mode
+ *
+ * The ASIC can work in 3 modes :
+ * - RESET: all registers are zeroed
+ * - NORMAL: receives and handles commands
+ * - POWERDOWN: minimal poweruse, flash parts shut off
+ */
+static void doc_set_asic_mode(struct docg3 *docg3, u8 mode)
+{
+ int i;
+
+ for (i = 0; i < 12; i++)
+ doc_readb(docg3, DOC_IOSPACE_IPL);
+
+ mode |= DOC_ASICMODE_MDWREN;
+ doc_dbg("doc_set_asic_mode(%02x)\n", mode);
+ doc_writeb(docg3, mode, DOC_ASICMODE);
+ doc_writeb(docg3, ~mode, DOC_ASICMODECONFIRM);
+ doc_delay(docg3, 1);
+}
+
+/**
+ * doc_set_device_id - Sets the devices id for cascaded G3 chips
+ * @docg3: the device
+ * @id: the chip to select (amongst 0, 1, 2, 3)
+ *
+ * There can be 4 cascaded G3 chips. This function selects the one which will
+ * should be the active one.
+ */
+static void doc_set_device_id(struct docg3 *docg3, int id)
+{
+ u8 ctrl;
+
+ doc_dbg("doc_set_device_id(%d)\n", id);
+ doc_writeb(docg3, id, DOC_DEVICESELECT);
+ ctrl = doc_register_readb(docg3, DOC_FLASHCONTROL);
+
+ ctrl &= ~DOC_CTRL_VIOLATION;
+ ctrl |= DOC_CTRL_CE;
+ doc_writeb(docg3, ctrl, DOC_FLASHCONTROL);
+}
+
+/**
+ * doc_set_extra_page_mode - Change flash page layout
+ * @docg3: the device
+ *
+ * Normally, the flash page is split into the data (512 bytes) and the out of
+ * band data (16 bytes). For each, 4 more bytes can be accessed, where the wear
+ * leveling counters are stored. To access this last area of 4 bytes, a special
+ * mode must be input to the flash ASIC.
+ *
+ * Returns 0 if no error occured, -EIO else.
+ */
+static int doc_set_extra_page_mode(struct docg3 *docg3)
+{
+ int fctrl;
+
+ doc_dbg("doc_set_extra_page_mode()\n");
+ doc_flash_sequence(docg3, DOC_SEQ_PAGE_SIZE_532);
+ doc_flash_command(docg3, DOC_CMD_PAGE_SIZE_532);
+ doc_delay(docg3, 2);
+
+ fctrl = doc_register_readb(docg3, DOC_FLASHCONTROL);
+ if (fctrl & (DOC_CTRL_PROTECTION_ERROR | DOC_CTRL_SEQUENCE_ERROR))
+ return -EIO;
+ else
+ return 0;
+}
+
+/**
+ * doc_seek - Set both flash planes to the specified block, page for reading
+ * @docg3: the device
+ * @block0: the first plane block index
+ * @block1: the second plane block index
+ * @page: the page index within the block
+ * @wear: if true, read will occur on the 4 extra bytes of the wear area
+ * @ofs: offset in page to read
+ *
+ * Programs the flash even and odd planes to the specific block and page.
+ * Alternatively, programs the flash to the wear area of the specified page.
+ */
+static int doc_read_seek(struct docg3 *docg3, int block0, int block1, int page,
+ int wear, int ofs)
+{
+ int sector, ret = 0;
+
+ doc_dbg("doc_seek(blocks=(%d,%d), page=%d, ofs=%d, wear=%d)\n",
+ block0, block1, page, ofs, wear);
+
+ if (!wear && (ofs < 2 * DOC_LAYOUT_PAGE_SIZE)) {
+ doc_flash_sequence(docg3, DOC_SEQ_SET_PLANE1);
+ doc_flash_command(docg3, DOC_CMD_READ_PLANE1);
+ doc_delay(docg3, 2);
+ } else {
+ doc_flash_sequence(docg3, DOC_SEQ_SET_PLANE2);
+ doc_flash_command(docg3, DOC_CMD_READ_PLANE2);
+ doc_delay(docg3, 2);
+ }
+
+ doc_set_reliable_mode(docg3);
+ if (wear)
+ ret = doc_set_extra_page_mode(docg3);
+ if (ret)
+ goto out;
+
+ sector = (block0 << DOC_ADDR_BLOCK_SHIFT) + (page & DOC_ADDR_PAGE_MASK);
+ doc_flash_sequence(docg3, DOC_SEQ_READ);
+ doc_flash_command(docg3, DOC_CMD_PROG_BLOCK_ADDR);
+ doc_delay(docg3, 1);
+ doc_flash_address(docg3, sector & 0xff);
+ doc_flash_address(docg3, (sector >> 8) & 0xff);
+ doc_flash_address(docg3, (sector >> 16) & 0xff);
+ doc_delay(docg3, 1);
+
+ sector = (block1 << DOC_ADDR_BLOCK_SHIFT) + (page & DOC_ADDR_PAGE_MASK);
+ doc_flash_command(docg3, DOC_CMD_PROG_BLOCK_ADDR);
+ doc_delay(docg3, 1);
+ doc_flash_address(docg3, sector & 0xff);
+ doc_flash_address(docg3, (sector >> 8) & 0xff);
+ doc_flash_address(docg3, (sector >> 16) & 0xff);
+ doc_delay(docg3, 2);
+
+out:
+ return ret;
+}
+
+/**
+ * doc_read_page_ecc_init - Initialize hardware ECC engine
+ * @docg3: the device
+ * @len: the number of bytes covered by the ECC (BCH covered)
+ *
+ * The function does initialize the hardware ECC engine to compute the Hamming
+ * ECC (on 1 byte) and the BCH Syndroms (on 7 bytes).
+ *
+ * Return 0 if succeeded, -EIO on error
+ */
+static int doc_read_page_ecc_init(struct docg3 *docg3, int len)
+{
+ doc_writew(docg3, DOC_ECCCONF0_READ_MODE
+ | DOC_ECCCONF0_BCH_ENABLE | DOC_ECCCONF0_HAMMING_ENABLE
+ | (len & DOC_ECCCONF0_DATA_BYTES_MASK),
+ DOC_ECCCONF0);
+ doc_delay(docg3, 4);
+ doc_register_readb(docg3, DOC_FLASHCONTROL);
+ return doc_wait_ready(docg3);
+}
+
+/**
+ * doc_read_page_prepare - Prepares reading data from a flash page
+ * @docg3: the device
+ * @block0: the first plane block index on flash memory
+ * @block1: the second plane block index on flash memory
+ * @page: the page index in the block
+ * @offset: the offset in the page (must be a multiple of 4)
+ *
+ * Prepares the page to be read in the flash memory :
+ * - tell ASIC to map the flash pages
+ * - tell ASIC to be in read mode
+ *
+ * After a call to this method, a call to doc_read_page_finish is mandatory,
+ * to end the read cycle of the flash.
+ *
+ * Read data from a flash page. The length to be read must be between 0 and
+ * (page_size + oob_size + wear_size), ie. 532, and a multiple of 4 (because
+ * the extra bytes reading is not implemented).
+ *
+ * As pages are grouped by 2 (in 2 planes), reading from a page must be done
+ * in two steps:
+ * - one read of 512 bytes at offset 0
+ * - one read of 512 bytes at offset 512 + 16
+ *
+ * Returns 0 if successful, -EIO if a read error occured.
+ */
+static int doc_read_page_prepare(struct docg3 *docg3, int block0, int block1,
+ int page, int offset)
+{
+ int wear_area = 0, ret = 0;
+
+ doc_dbg("doc_read_page_prepare(blocks=(%d,%d), page=%d, ofsInPage=%d)\n",
+ block0, block1, page, offset);
+ if (offset >= DOC_LAYOUT_WEAR_OFFSET)
+ wear_area = 1;
+ if (!wear_area && offset > (DOC_LAYOUT_PAGE_OOB_SIZE * 2))
+ return -EINVAL;
+
+ doc_set_device_id(docg3, docg3->device_id);
+ ret = doc_reset_seq(docg3);
+ if (ret)
+ goto err;
+
+ /* Program the flash address block and page */
+ ret = doc_read_seek(docg3, block0, block1, page, wear_area, offset);
+ if (ret)
+ goto err;
+
+ doc_flash_command(docg3, DOC_CMD_READ_ALL_PLANES);
+ doc_delay(docg3, 2);
+ doc_wait_ready(docg3);
+
+ doc_flash_command(docg3, DOC_CMD_SET_ADDR_READ);
+ doc_delay(docg3, 1);
+ if (offset >= DOC_LAYOUT_PAGE_SIZE * 2)
+ offset -= 2 * DOC_LAYOUT_PAGE_SIZE;
+ doc_flash_address(docg3, offset >> 2);
+ doc_delay(docg3, 1);
+ doc_wait_ready(docg3);
+
+ doc_flash_command(docg3, DOC_CMD_READ_FLASH);
+
+ return 0;
+err:
+ doc_writeb(docg3, 0, DOC_DATAEND);
+ doc_delay(docg3, 2);
+ return -EIO;
+}
+
+/**
+ * doc_read_page_getbytes - Reads bytes from a prepared page
+ * @docg3: the device
+ * @len: the number of bytes to be read (must be a multiple of 4)
+ * @buf: the buffer to be filled in
+ * @first: 1 if first time read, DOC_READADDRESS should be set
+ *
+ */
+static int doc_read_page_getbytes(struct docg3 *docg3, int len, u_char *buf,
+ int first)
+{
+ doc_read_data_area(docg3, buf, len, first);
+ doc_delay(docg3, 2);
+ return len;
+}
+
+/**
+ * doc_get_hw_bch_syndroms - Get hardware calculated BCH syndroms
+ * @docg3: the device
+ * @syns: the array of 7 integers where the syndroms will be stored
+ */
+static void doc_get_hw_bch_syndroms(struct docg3 *docg3, int *syns)
+{
+ int i;
+
+ for (i = 0; i < DOC_ECC_BCH_SIZE; i++)
+ syns[i] = doc_register_readb(docg3, DOC_BCH_SYNDROM(i));
+}
+
+/**
+ * doc_read_page_finish - Ends reading of a flash page
+ * @docg3: the device
+ *
+ * As a side effect, resets the chip selector to 0. This ensures that after each
+ * read operation, the floor 0 is selected. Therefore, if the systems halts, the
+ * reboot will boot on floor 0, where the IPL is.
+ */
+static void doc_read_page_finish(struct docg3 *docg3)
+{
+ doc_writeb(docg3, 0, DOC_DATAEND);
+ doc_delay(docg3, 2);
+ doc_set_device_id(docg3, 0);
+}
+
+/**
+ * calc_block_sector - Calculate blocks, pages and ofs.
+
+ * @from: offset in flash
+ * @block0: first plane block index calculated
+ * @block1: second plane block index calculated
+ * @page: page calculated
+ * @ofs: offset in page
+ */
+static void calc_block_sector(loff_t from, int *block0, int *block1, int *page,
+ int *ofs)
+{
+ uint sector;
+
+ sector = from / DOC_LAYOUT_PAGE_SIZE;
+ *block0 = sector / (DOC_LAYOUT_PAGES_PER_BLOCK * DOC_LAYOUT_NBPLANES)
+ * DOC_LAYOUT_NBPLANES;
+ *block1 = *block0 + 1;
+ *page = sector % (DOC_LAYOUT_PAGES_PER_BLOCK * DOC_LAYOUT_NBPLANES);
+ *page /= DOC_LAYOUT_NBPLANES;
+ if (sector % 2)
+ *ofs = DOC_LAYOUT_PAGE_OOB_SIZE;
+ else
+ *ofs = 0;
+}
+
+/**
+ * doc_read - Read bytes from flash
+ * @mtd: the device
+ * @from: the offset from first block and first page, in bytes, aligned on page
+ * size
+ * @len: the number of bytes to read (must be a multiple of 4)
+ * @retlen: the number of bytes actually read
+ * @buf: the filled in buffer
+ *
+ * Reads flash memory pages. This function does not read the OOB chunk, but only
+ * the page data.
+ *
+ * Returns 0 if read successfull, of -EIO, -EINVAL if an error occured
+ */
+static int doc_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ struct docg3 *docg3 = mtd->priv;
+ int block0, block1, page, readlen, ret, ofs = 0;
+ int syn[DOC_ECC_BCH_SIZE], eccconf1;
+ u8 oob[DOC_LAYOUT_OOB_SIZE];
+
+ ret = -EINVAL;
+ doc_dbg("doc_read(from=%lld, len=%zu, buf=%p)\n", from, len, buf);
+ if (from % DOC_LAYOUT_PAGE_SIZE)
+ goto err;
+ if (len % 4)
+ goto err;
+ calc_block_sector(from, &block0, &block1, &page, &ofs);
+ if (block1 > docg3->max_block)
+ goto err;
+
+ *retlen = 0;
+ ret = 0;
+ readlen = min_t(size_t, len, (size_t)DOC_LAYOUT_PAGE_SIZE);
+ while (!ret && len > 0) {
+ readlen = min_t(size_t, len, (size_t)DOC_LAYOUT_PAGE_SIZE);
+ ret = doc_read_page_prepare(docg3, block0, block1, page, ofs);
+ if (ret < 0)
+ goto err;
+ ret = doc_read_page_ecc_init(docg3, DOC_ECC_BCH_COVERED_BYTES);
+ if (ret < 0)
+ goto err_in_read;
+ ret = doc_read_page_getbytes(docg3, readlen, buf, 1);
+ if (ret < readlen)
+ goto err_in_read;
+ ret = doc_read_page_getbytes(docg3, DOC_LAYOUT_OOB_SIZE,
+ oob, 0);
+ if (ret < DOC_LAYOUT_OOB_SIZE)
+ goto err_in_read;
+
+ *retlen += readlen;
+ buf += readlen;
+ len -= readlen;
+
+ ofs ^= DOC_LAYOUT_PAGE_OOB_SIZE;
+ if (ofs == 0)
+ page += 2;
+ if (page > DOC_ADDR_PAGE_MASK) {
+ page = 0;
+ block0 += 2;
+ block1 += 2;
+ }
+
+ /*
+ * There should be a BCH bitstream fixing algorithm here ...
+ * By now, a page read failure is triggered by BCH error
+ */
+ doc_get_hw_bch_syndroms(docg3, syn);
+ eccconf1 = doc_register_readb(docg3, DOC_ECCCONF1);
+
+ doc_dbg("OOB - INFO: %02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
+ oob[0], oob[1], oob[2], oob[3], oob[4],
+ oob[5], oob[6]);
+ doc_dbg("OOB - HAMMING: %02x\n", oob[7]);
+ doc_dbg("OOB - BCH_ECC: %02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
+ oob[8], oob[9], oob[10], oob[11], oob[12],
+ oob[13], oob[14]);
+ doc_dbg("OOB - UNUSED: %02x\n", oob[15]);
+ doc_dbg("ECC checks: ECCConf1=%x\n", eccconf1);
+ doc_dbg("ECC BCH syndrom: %02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
+ syn[0], syn[1], syn[2], syn[3], syn[4], syn[5], syn[6]);
+
+ ret = -EBADMSG;
+ if (block0 >= DOC_LAYOUT_BLOCK_FIRST_DATA) {
+ if (eccconf1 & DOC_ECCCONF1_BCH_SYNDROM_ERR)
+ goto err_in_read;
+ if (is_prot_seq_error(docg3))
+ goto err_in_read;
+ }
+ doc_read_page_finish(docg3);
+ }
+
+ return 0;
+err_in_read:
+ doc_read_page_finish(docg3);
+err:
+ return ret;
+}
+
+/**
+ * doc_read_oob - Read out of band bytes from flash
+ * @mtd: the device
+ * @from: the offset from first block and first page, in bytes, aligned on page
+ * size
+ * @ops: the mtd oob structure
+ *
+ * Reads flash memory OOB area of pages.
+ *
+ * Returns 0 if read successfull, of -EIO, -EINVAL if an error occured
+ */
+static int doc_read_oob(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops)
+{
+ struct docg3 *docg3 = mtd->priv;
+ int block0, block1, page, ofs, ret;
+ u8 *buf = ops->oobbuf;
+ size_t len = ops->ooblen;
+
+ doc_dbg("doc_read_oob(from=%lld, buf=%p, len=%zu)\n", from, buf, len);
+ if (len != DOC_LAYOUT_OOB_SIZE)
+ return -EINVAL;
+
+ switch (ops->mode) {
+ case MTD_OPS_PLACE_OOB:
+ buf += ops->ooboffs;
+ break;
+ default:
+ break;
+ }
+
+ calc_block_sector(from, &block0, &block1, &page, &ofs);
+ if (block1 > docg3->max_block)
+ return -EINVAL;
+
+ ret = doc_read_page_prepare(docg3, block0, block1, page,
+ ofs + DOC_LAYOUT_PAGE_SIZE);
+ if (!ret)
+ ret = doc_read_page_ecc_init(docg3, DOC_LAYOUT_OOB_SIZE);
+ if (!ret)
+ ret = doc_read_page_getbytes(docg3, DOC_LAYOUT_OOB_SIZE,
+ buf, 1);
+ doc_read_page_finish(docg3);
+
+ if (ret > 0)
+ ops->oobretlen = ret;
+ else
+ ops->oobretlen = 0;
+ return (ret > 0) ? 0 : ret;
+}
+
+static int doc_reload_bbt(struct docg3 *docg3)
+{
+ int block = DOC_LAYOUT_BLOCK_BBT;
+ int ret = 0, nbpages, page;
+ u_char *buf = docg3->bbt;
+
+ nbpages = DIV_ROUND_UP(docg3->max_block + 1, 8 * DOC_LAYOUT_PAGE_SIZE);
+ for (page = 0; !ret && (page < nbpages); page++) {
+ ret = doc_read_page_prepare(docg3, block, block + 1,
+ page + DOC_LAYOUT_PAGE_BBT, 0);
+ if (!ret)
+ ret = doc_read_page_ecc_init(docg3,
+ DOC_LAYOUT_PAGE_SIZE);
+ if (!ret)
+ doc_read_page_getbytes(docg3, DOC_LAYOUT_PAGE_SIZE,
+ buf, 1);
+ buf += DOC_LAYOUT_PAGE_SIZE;
+ }
+ doc_read_page_finish(docg3);
+ return ret;
+}
+
+/**
+ * doc_block_isbad - Checks whether a block is good or not
+ * @mtd: the device
+ * @from: the offset to find the correct block
+ *
+ * Returns 1 if block is bad, 0 if block is good
+ */
+static int doc_block_isbad(struct mtd_info *mtd, loff_t from)
+{
+ struct docg3 *docg3 = mtd->priv;
+ int block0, block1, page, ofs, is_good;
+
+ calc_block_sector(from, &block0, &block1, &page, &ofs);
+ doc_dbg("doc_block_isbad(from=%lld) => block=(%d,%d), page=%d, ofs=%d\n",
+ from, block0, block1, page, ofs);
+
+ if (block0 < DOC_LAYOUT_BLOCK_FIRST_DATA)
+ return 0;
+ if (block1 > docg3->max_block)
+ return -EINVAL;
+
+ is_good = docg3->bbt[block0 >> 3] & (1 << (block0 & 0x7));
+ return !is_good;
+}
+
+/**
+ * doc_get_erase_count - Get block erase count
+ * @docg3: the device
+ * @from: the offset in which the block is.
+ *
+ * Get the number of times a block was erased. The number is the maximum of
+ * erase times between first and second plane (which should be equal normally).
+ *
+ * Returns The number of erases, or -EINVAL or -EIO on error.
+ */
+static int doc_get_erase_count(struct docg3 *docg3, loff_t from)
+{
+ u8 buf[DOC_LAYOUT_WEAR_SIZE];
+ int ret, plane1_erase_count, plane2_erase_count;
+ int block0, block1, page, ofs;
+
+ doc_dbg("doc_get_erase_count(from=%lld, buf=%p)\n", from, buf);
+ if (from % DOC_LAYOUT_PAGE_SIZE)
+ return -EINVAL;
+ calc_block_sector(from, &block0, &block1, &page, &ofs);
+ if (block1 > docg3->max_block)
+ return -EINVAL;
+
+ ret = doc_reset_seq(docg3);
+ if (!ret)
+ ret = doc_read_page_prepare(docg3, block0, block1, page,
+ ofs + DOC_LAYOUT_WEAR_OFFSET);
+ if (!ret)
+ ret = doc_read_page_getbytes(docg3, DOC_LAYOUT_WEAR_SIZE,
+ buf, 1);
+ doc_read_page_finish(docg3);
+
+ if (ret || (buf[0] != DOC_ERASE_MARK) || (buf[2] != DOC_ERASE_MARK))
+ return -EIO;
+ plane1_erase_count = (u8)(~buf[1]) | ((u8)(~buf[4]) << 8)
+ | ((u8)(~buf[5]) << 16);
+ plane2_erase_count = (u8)(~buf[3]) | ((u8)(~buf[6]) << 8)
+ | ((u8)(~buf[7]) << 16);
+
+ return max(plane1_erase_count, plane2_erase_count);
+}
+
+/*
+ * Debug sysfs entries
+ */
+static int dbg_flashctrl_show(struct seq_file *s, void *p)
+{
+ struct docg3 *docg3 = (struct docg3 *)s->private;
+
+ int pos = 0;
+ u8 fctrl = doc_register_readb(docg3, DOC_FLASHCONTROL);
+
+ pos += seq_printf(s,
+ "FlashControl : 0x%02x (%s,CE# %s,%s,%s,flash %s)\n",
+ fctrl,
+ fctrl & DOC_CTRL_VIOLATION ? "protocol violation" : "-",
+ fctrl & DOC_CTRL_CE ? "active" : "inactive",
+ fctrl & DOC_CTRL_PROTECTION_ERROR ? "protection error" : "-",
+ fctrl & DOC_CTRL_SEQUENCE_ERROR ? "sequence error" : "-",
+ fctrl & DOC_CTRL_FLASHREADY ? "ready" : "not ready");
+ return pos;
+}
+DEBUGFS_RO_ATTR(flashcontrol, dbg_flashctrl_show);
+
+static int dbg_asicmode_show(struct seq_file *s, void *p)
+{
+ struct docg3 *docg3 = (struct docg3 *)s->private;
+
+ int pos = 0;
+ int pctrl = doc_register_readb(docg3, DOC_ASICMODE);
+ int mode = pctrl & 0x03;
+
+ pos += seq_printf(s,
+ "%04x : RAM_WE=%d,RSTIN_RESET=%d,BDETCT_RESET=%d,WRITE_ENABLE=%d,POWERDOWN=%d,MODE=%d%d (",
+ pctrl,
+ pctrl & DOC_ASICMODE_RAM_WE ? 1 : 0,
+ pctrl & DOC_ASICMODE_RSTIN_RESET ? 1 : 0,
+ pctrl & DOC_ASICMODE_BDETCT_RESET ? 1 : 0,
+ pctrl & DOC_ASICMODE_MDWREN ? 1 : 0,
+ pctrl & DOC_ASICMODE_POWERDOWN ? 1 : 0,
+ mode >> 1, mode & 0x1);
+
+ switch (mode) {
+ case DOC_ASICMODE_RESET:
+ pos += seq_printf(s, "reset");
+ break;
+ case DOC_ASICMODE_NORMAL:
+ pos += seq_printf(s, "normal");
+ break;
+ case DOC_ASICMODE_POWERDOWN:
+ pos += seq_printf(s, "powerdown");
+ break;
+ }
+ pos += seq_printf(s, ")\n");
+ return pos;
+}
+DEBUGFS_RO_ATTR(asic_mode, dbg_asicmode_show);
+
+static int dbg_device_id_show(struct seq_file *s, void *p)
+{
+ struct docg3 *docg3 = (struct docg3 *)s->private;
+ int pos = 0;
+ int id = doc_register_readb(docg3, DOC_DEVICESELECT);
+
+ pos += seq_printf(s, "DeviceId = %d\n", id);
+ return pos;
+}
+DEBUGFS_RO_ATTR(device_id, dbg_device_id_show);
+
+static int dbg_protection_show(struct seq_file *s, void *p)
+{
+ struct docg3 *docg3 = (struct docg3 *)s->private;
+ int pos = 0;
+ int protect = doc_register_readb(docg3, DOC_PROTECTION);
+ int dps0 = doc_register_readb(docg3, DOC_DPS0_STATUS);
+ int dps0_low = doc_register_readb(docg3, DOC_DPS0_ADDRLOW);
+ int dps0_high = doc_register_readb(docg3, DOC_DPS0_ADDRHIGH);
+ int dps1 = doc_register_readb(docg3, DOC_DPS1_STATUS);
+ int dps1_low = doc_register_readb(docg3, DOC_DPS1_ADDRLOW);
+ int dps1_high = doc_register_readb(docg3, DOC_DPS1_ADDRHIGH);
+
+ pos += seq_printf(s, "Protection = 0x%02x (",
+ protect);
+ if (protect & DOC_PROTECT_FOUNDRY_OTP_LOCK)
+ pos += seq_printf(s, "FOUNDRY_OTP_LOCK,");
+ if (protect & DOC_PROTECT_CUSTOMER_OTP_LOCK)
+ pos += seq_printf(s, "CUSTOMER_OTP_LOCK,");
+ if (protect & DOC_PROTECT_LOCK_INPUT)
+ pos += seq_printf(s, "LOCK_INPUT,");
+ if (protect & DOC_PROTECT_STICKY_LOCK)
+ pos += seq_printf(s, "STICKY_LOCK,");
+ if (protect & DOC_PROTECT_PROTECTION_ENABLED)
+ pos += seq_printf(s, "PROTECTION ON,");
+ if (protect & DOC_PROTECT_IPL_DOWNLOAD_LOCK)
+ pos += seq_printf(s, "IPL_DOWNLOAD_LOCK,");
+ if (protect & DOC_PROTECT_PROTECTION_ERROR)
+ pos += seq_printf(s, "PROTECT_ERR,");
+ else
+ pos += seq_printf(s, "NO_PROTECT_ERR");
+ pos += seq_printf(s, ")\n");
+
+ pos += seq_printf(s, "DPS0 = 0x%02x : "
+ "Protected area [0x%x - 0x%x] : OTP=%d, READ=%d, "
+ "WRITE=%d, HW_LOCK=%d, KEY_OK=%d\n",
+ dps0, dps0_low, dps0_high,
+ !!(dps0 & DOC_DPS_OTP_PROTECTED),
+ !!(dps0 & DOC_DPS_READ_PROTECTED),
+ !!(dps0 & DOC_DPS_WRITE_PROTECTED),
+ !!(dps0 & DOC_DPS_HW_LOCK_ENABLED),
+ !!(dps0 & DOC_DPS_KEY_OK));
+ pos += seq_printf(s, "DPS1 = 0x%02x : "
+ "Protected area [0x%x - 0x%x] : OTP=%d, READ=%d, "
+ "WRITE=%d, HW_LOCK=%d, KEY_OK=%d\n",
+ dps1, dps1_low, dps1_high,
+ !!(dps1 & DOC_DPS_OTP_PROTECTED),
+ !!(dps1 & DOC_DPS_READ_PROTECTED),
+ !!(dps1 & DOC_DPS_WRITE_PROTECTED),
+ !!(dps1 & DOC_DPS_HW_LOCK_ENABLED),
+ !!(dps1 & DOC_DPS_KEY_OK));
+ return pos;
+}
+DEBUGFS_RO_ATTR(protection, dbg_protection_show);
+
+static int __init doc_dbg_register(struct docg3 *docg3)
+{
+ struct dentry *root, *entry;
+
+ root = debugfs_create_dir("docg3", NULL);
+ if (!root)
+ return -ENOMEM;
+
+ entry = debugfs_create_file("flashcontrol", S_IRUSR, root, docg3,
+ &flashcontrol_fops);
+ if (entry)
+ entry = debugfs_create_file("asic_mode", S_IRUSR, root,
+ docg3, &asic_mode_fops);
+ if (entry)
+ entry = debugfs_create_file("device_id", S_IRUSR, root,
+ docg3, &device_id_fops);
+ if (entry)
+ entry = debugfs_create_file("protection", S_IRUSR, root,
+ docg3, &protection_fops);
+ if (entry) {
+ docg3->debugfs_root = root;
+ return 0;
+ } else {
+ debugfs_remove_recursive(root);
+ return -ENOMEM;
+ }
+}
+
+static void __exit doc_dbg_unregister(struct docg3 *docg3)
+{
+ debugfs_remove_recursive(docg3->debugfs_root);
+}
+
+/**
+ * doc_set_driver_info - Fill the mtd_info structure and docg3 structure
+ * @chip_id: The chip ID of the supported chip
+ * @mtd: The structure to fill
+ */
+static void __init doc_set_driver_info(int chip_id, struct mtd_info *mtd)
+{
+ struct docg3 *docg3 = mtd->priv;
+ int cfg;
+
+ cfg = doc_register_readb(docg3, DOC_CONFIGURATION);
+ docg3->if_cfg = (cfg & DOC_CONF_IF_CFG ? 1 : 0);
+
+ switch (chip_id) {
+ case DOC_CHIPID_G3:
+ mtd->name = "DiskOnChip G3";
+ docg3->max_block = 2047;
+ break;
+ }
+ mtd->type = MTD_NANDFLASH;
+ /*
+ * Once write methods are added, the correct flags will be set.
+ * mtd->flags = MTD_CAP_NANDFLASH;
+ */
+ mtd->flags = MTD_CAP_ROM;
+ mtd->size = (docg3->max_block + 1) * DOC_LAYOUT_BLOCK_SIZE;
+ mtd->erasesize = DOC_LAYOUT_BLOCK_SIZE * DOC_LAYOUT_NBPLANES;
+ mtd->writesize = DOC_LAYOUT_PAGE_SIZE;
+ mtd->oobsize = DOC_LAYOUT_OOB_SIZE;
+ mtd->owner = THIS_MODULE;
+ mtd->erase = NULL;
+ mtd->point = NULL;
+ mtd->unpoint = NULL;
+ mtd->read = doc_read;
+ mtd->write = NULL;
+ mtd->read_oob = doc_read_oob;
+ mtd->write_oob = NULL;
+ mtd->sync = NULL;
+ mtd->block_isbad = doc_block_isbad;
+}
+
+/**
+ * doc_probe - Probe the IO space for a DiskOnChip G3 chip
+ * @pdev: platform device
+ *
+ * Probes for a G3 chip at the specified IO space in the platform data
+ * ressources.
+ *
+ * Returns 0 on success, -ENOMEM, -ENXIO on error
+ */
+static int __init docg3_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct docg3 *docg3;
+ struct mtd_info *mtd;
+ struct resource *ress;
+ int ret, bbt_nbpages;
+ u16 chip_id, chip_id_inv;
+
+ ret = -ENOMEM;
+ docg3 = kzalloc(sizeof(struct docg3), GFP_KERNEL);
+ if (!docg3)
+ goto nomem1;
+ mtd = kzalloc(sizeof(struct mtd_info), GFP_KERNEL);
+ if (!mtd)
+ goto nomem2;
+ mtd->priv = docg3;
+
+ ret = -ENXIO;
+ ress = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!ress) {
+ dev_err(dev, "No I/O memory resource defined\n");
+ goto noress;
+ }
+ docg3->base = ioremap(ress->start, DOC_IOSPACE_SIZE);
+
+ docg3->dev = &pdev->dev;
+ docg3->device_id = 0;
+ doc_set_device_id(docg3, docg3->device_id);
+ doc_set_asic_mode(docg3, DOC_ASICMODE_RESET);
+ doc_set_asic_mode(docg3, DOC_ASICMODE_NORMAL);
+
+ chip_id = doc_register_readw(docg3, DOC_CHIPID);
+ chip_id_inv = doc_register_readw(docg3, DOC_CHIPID_INV);
+
+ ret = -ENODEV;
+ if (chip_id != (u16)(~chip_id_inv)) {
+ doc_info("No device found at IO addr %p\n",
+ (void *)ress->start);
+ goto nochipfound;
+ }
+
+ switch (chip_id) {
+ case DOC_CHIPID_G3:
+ doc_info("Found a G3 DiskOnChip at addr %p\n",
+ (void *)ress->start);
+ break;
+ default:
+ doc_err("Chip id %04x is not a DiskOnChip G3 chip\n", chip_id);
+ goto nochipfound;
+ }
+
+ doc_set_driver_info(chip_id, mtd);
+ platform_set_drvdata(pdev, mtd);
+
+ ret = -ENOMEM;
+ bbt_nbpages = DIV_ROUND_UP(docg3->max_block + 1,
+ 8 * DOC_LAYOUT_PAGE_SIZE);
+ docg3->bbt = kzalloc(bbt_nbpages * DOC_LAYOUT_PAGE_SIZE, GFP_KERNEL);
+ if (!docg3->bbt)
+ goto nochipfound;
+ doc_reload_bbt(docg3);
+
+ ret = mtd_device_parse_register(mtd, part_probes,
+ NULL, NULL, 0);
+ if (ret)
+ goto register_error;
+
+ doc_dbg_register(docg3);
+ return 0;
+
+register_error:
+ kfree(docg3->bbt);
+nochipfound:
+ iounmap(docg3->base);
+noress:
+ kfree(mtd);
+nomem2:
+ kfree(docg3);
+nomem1:
+ return ret;
+}
+
+/**
+ * docg3_release - Release the driver
+ * @pdev: the platform device
+ *
+ * Returns 0
+ */
+static int __exit docg3_release(struct platform_device *pdev)
+{
+ struct mtd_info *mtd = platform_get_drvdata(pdev);
+ struct docg3 *docg3 = mtd->priv;
+
+ doc_dbg_unregister(docg3);
+ mtd_device_unregister(mtd);
+ iounmap(docg3->base);
+ kfree(docg3->bbt);
+ kfree(docg3);
+ kfree(mtd);
+ return 0;
+}
+
+static struct platform_driver g3_driver = {
+ .driver = {
+ .name = "docg3",
+ .owner = THIS_MODULE,
+ },
+ .remove = __exit_p(docg3_release),
+};
+
+static int __init docg3_init(void)
+{
+ return platform_driver_probe(&g3_driver, docg3_probe);
+}
+module_init(docg3_init);
+
+
+static void __exit docg3_exit(void)
+{
+ platform_driver_unregister(&g3_driver);
+}
+module_exit(docg3_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Robert Jarzmik <robert.jarzmik@free.fr>");
+MODULE_DESCRIPTION("MTD driver for DiskOnChip G3");
diff --git a/drivers/mtd/devices/docg3.h b/drivers/mtd/devices/docg3.h
new file mode 100644
index 00000000000..0d407be2459
--- /dev/null
+++ b/drivers/mtd/devices/docg3.h
@@ -0,0 +1,297 @@
+/*
+ * Handles the M-Systems DiskOnChip G3 chip
+ *
+ * Copyright (C) 2011 Robert Jarzmik
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#ifndef _MTD_DOCG3_H
+#define _MTD_DOCG3_H
+
+/*
+ * Flash memory areas :
+ * - 0x0000 .. 0x07ff : IPL
+ * - 0x0800 .. 0x0fff : Data area
+ * - 0x1000 .. 0x17ff : Registers
+ * - 0x1800 .. 0x1fff : Unknown
+ */
+#define DOC_IOSPACE_IPL 0x0000
+#define DOC_IOSPACE_DATA 0x0800
+#define DOC_IOSPACE_SIZE 0x2000
+
+/*
+ * DOC G3 layout and adressing scheme
+ * A page address for the block "b", plane "P" and page "p":
+ * address = [bbbb bPpp pppp]
+ */
+
+#define DOC_ADDR_PAGE_MASK 0x3f
+#define DOC_ADDR_BLOCK_SHIFT 6
+#define DOC_LAYOUT_NBPLANES 2
+#define DOC_LAYOUT_PAGES_PER_BLOCK 64
+#define DOC_LAYOUT_PAGE_SIZE 512
+#define DOC_LAYOUT_OOB_SIZE 16
+#define DOC_LAYOUT_WEAR_SIZE 8
+#define DOC_LAYOUT_PAGE_OOB_SIZE \
+ (DOC_LAYOUT_PAGE_SIZE + DOC_LAYOUT_OOB_SIZE)
+#define DOC_LAYOUT_WEAR_OFFSET (DOC_LAYOUT_PAGE_OOB_SIZE * 2)
+#define DOC_LAYOUT_BLOCK_SIZE \
+ (DOC_LAYOUT_PAGES_PER_BLOCK * DOC_LAYOUT_PAGE_SIZE)
+#define DOC_ECC_BCH_SIZE 7
+#define DOC_ECC_BCH_COVERED_BYTES \
+ (DOC_LAYOUT_PAGE_SIZE + DOC_LAYOUT_OOB_PAGEINFO_SZ + \
+ DOC_LAYOUT_OOB_HAMMING_SZ + DOC_LAYOUT_OOB_BCH_SZ)
+
+/*
+ * Blocks distribution
+ */
+#define DOC_LAYOUT_BLOCK_BBT 0
+#define DOC_LAYOUT_BLOCK_OTP 0
+#define DOC_LAYOUT_BLOCK_FIRST_DATA 6
+
+#define DOC_LAYOUT_PAGE_BBT 4
+
+/*
+ * Extra page OOB (16 bytes wide) layout
+ */
+#define DOC_LAYOUT_OOB_PAGEINFO_OFS 0
+#define DOC_LAYOUT_OOB_HAMMING_OFS 7
+#define DOC_LAYOUT_OOB_BCH_OFS 8
+#define DOC_LAYOUT_OOB_UNUSED_OFS 15
+#define DOC_LAYOUT_OOB_PAGEINFO_SZ 7
+#define DOC_LAYOUT_OOB_HAMMING_SZ 1
+#define DOC_LAYOUT_OOB_BCH_SZ 7
+#define DOC_LAYOUT_OOB_UNUSED_SZ 1
+
+
+#define DOC_CHIPID_G3 0x200
+#define DOC_ERASE_MARK 0xaa
+/*
+ * Flash registers
+ */
+#define DOC_CHIPID 0x1000
+#define DOC_TEST 0x1004
+#define DOC_BUSLOCK 0x1006
+#define DOC_ENDIANCONTROL 0x1008
+#define DOC_DEVICESELECT 0x100a
+#define DOC_ASICMODE 0x100c
+#define DOC_CONFIGURATION 0x100e
+#define DOC_INTERRUPTCONTROL 0x1010
+#define DOC_READADDRESS 0x101a
+#define DOC_DATAEND 0x101e
+#define DOC_INTERRUPTSTATUS 0x1020
+
+#define DOC_FLASHSEQUENCE 0x1032
+#define DOC_FLASHCOMMAND 0x1034
+#define DOC_FLASHADDRESS 0x1036
+#define DOC_FLASHCONTROL 0x1038
+#define DOC_NOP 0x103e
+
+#define DOC_ECCCONF0 0x1040
+#define DOC_ECCCONF1 0x1042
+#define DOC_ECCPRESET 0x1044
+#define DOC_HAMMINGPARITY 0x1046
+#define DOC_BCH_SYNDROM(idx) (0x1048 + (idx << 1))
+
+#define DOC_PROTECTION 0x1056
+#define DOC_DPS0_ADDRLOW 0x1060
+#define DOC_DPS0_ADDRHIGH 0x1062
+#define DOC_DPS1_ADDRLOW 0x1064
+#define DOC_DPS1_ADDRHIGH 0x1066
+#define DOC_DPS0_STATUS 0x106c
+#define DOC_DPS1_STATUS 0x106e
+
+#define DOC_ASICMODECONFIRM 0x1072
+#define DOC_CHIPID_INV 0x1074
+
+/*
+ * Flash sequences
+ * A sequence is preset before one or more commands are input to the chip.
+ */
+#define DOC_SEQ_RESET 0x00
+#define DOC_SEQ_PAGE_SIZE_532 0x03
+#define DOC_SEQ_SET_MODE 0x09
+#define DOC_SEQ_READ 0x12
+#define DOC_SEQ_SET_PLANE1 0x0e
+#define DOC_SEQ_SET_PLANE2 0x10
+#define DOC_SEQ_PAGE_SETUP 0x1d
+
+/*
+ * Flash commands
+ */
+#define DOC_CMD_READ_PLANE1 0x00
+#define DOC_CMD_SET_ADDR_READ 0x05
+#define DOC_CMD_READ_ALL_PLANES 0x30
+#define DOC_CMD_READ_PLANE2 0x50
+#define DOC_CMD_READ_FLASH 0xe0
+#define DOC_CMD_PAGE_SIZE_532 0x3c
+
+#define DOC_CMD_PROG_BLOCK_ADDR 0x60
+#define DOC_CMD_PROG_CYCLE1 0x80
+#define DOC_CMD_PROG_CYCLE2 0x10
+#define DOC_CMD_ERASECYCLE2 0xd0
+
+#define DOC_CMD_RELIABLE_MODE 0x22
+#define DOC_CMD_FAST_MODE 0xa2
+
+#define DOC_CMD_RESET 0xff
+
+/*
+ * Flash register : DOC_FLASHCONTROL
+ */
+#define DOC_CTRL_VIOLATION 0x20
+#define DOC_CTRL_CE 0x10
+#define DOC_CTRL_UNKNOWN_BITS 0x08
+#define DOC_CTRL_PROTECTION_ERROR 0x04
+#define DOC_CTRL_SEQUENCE_ERROR 0x02
+#define DOC_CTRL_FLASHREADY 0x01
+
+/*
+ * Flash register : DOC_ASICMODE
+ */
+#define DOC_ASICMODE_RESET 0x00
+#define DOC_ASICMODE_NORMAL 0x01
+#define DOC_ASICMODE_POWERDOWN 0x02
+#define DOC_ASICMODE_MDWREN 0x04
+#define DOC_ASICMODE_BDETCT_RESET 0x08
+#define DOC_ASICMODE_RSTIN_RESET 0x10
+#define DOC_ASICMODE_RAM_WE 0x20
+
+/*
+ * Flash register : DOC_ECCCONF0
+ */
+#define DOC_ECCCONF0_READ_MODE 0x8000
+#define DOC_ECCCONF0_AUTO_ECC_ENABLE 0x4000
+#define DOC_ECCCONF0_HAMMING_ENABLE 0x1000
+#define DOC_ECCCONF0_BCH_ENABLE 0x0800
+#define DOC_ECCCONF0_DATA_BYTES_MASK 0x07ff
+
+/*
+ * Flash register : DOC_ECCCONF1
+ */
+#define DOC_ECCCONF1_BCH_SYNDROM_ERR 0x80
+#define DOC_ECCCONF1_UNKOWN1 0x40
+#define DOC_ECCCONF1_UNKOWN2 0x20
+#define DOC_ECCCONF1_UNKOWN3 0x10
+#define DOC_ECCCONF1_HAMMING_BITS_MASK 0x0f
+
+/*
+ * Flash register : DOC_PROTECTION
+ */
+#define DOC_PROTECT_FOUNDRY_OTP_LOCK 0x01
+#define DOC_PROTECT_CUSTOMER_OTP_LOCK 0x02
+#define DOC_PROTECT_LOCK_INPUT 0x04
+#define DOC_PROTECT_STICKY_LOCK 0x08
+#define DOC_PROTECT_PROTECTION_ENABLED 0x10
+#define DOC_PROTECT_IPL_DOWNLOAD_LOCK 0x20
+#define DOC_PROTECT_PROTECTION_ERROR 0x80
+
+/*
+ * Flash register : DOC_DPS0_STATUS and DOC_DPS1_STATUS
+ */
+#define DOC_DPS_OTP_PROTECTED 0x01
+#define DOC_DPS_READ_PROTECTED 0x02
+#define DOC_DPS_WRITE_PROTECTED 0x04
+#define DOC_DPS_HW_LOCK_ENABLED 0x08
+#define DOC_DPS_KEY_OK 0x80
+
+/*
+ * Flash register : DOC_CONFIGURATION
+ */
+#define DOC_CONF_IF_CFG 0x80
+#define DOC_CONF_MAX_ID_MASK 0x30
+#define DOC_CONF_VCCQ_3V 0x01
+
+/*
+ * Flash register : DOC_READADDRESS
+ */
+#define DOC_READADDR_INC 0x8000
+#define DOC_READADDR_ONE_BYTE 0x4000
+#define DOC_READADDR_ADDR_MASK 0x1fff
+
+/**
+ * struct docg3 - DiskOnChip driver private data
+ * @dev: the device currently under control
+ * @base: mapped IO space
+ * @device_id: number of the cascaded DoCG3 device (0, 1, 2 or 3)
+ * @if_cfg: if true, reads are on 16bits, else reads are on 8bits
+ * @bbt: bad block table cache
+ * @debugfs_root: debugfs root node
+ */
+struct docg3 {
+ struct device *dev;
+ void __iomem *base;
+ unsigned int device_id:4;
+ unsigned int if_cfg:1;
+ int max_block;
+ u8 *bbt;
+ struct dentry *debugfs_root;
+};
+
+#define doc_err(fmt, arg...) dev_err(docg3->dev, (fmt), ## arg)
+#define doc_info(fmt, arg...) dev_info(docg3->dev, (fmt), ## arg)
+#define doc_dbg(fmt, arg...) dev_dbg(docg3->dev, (fmt), ## arg)
+#define doc_vdbg(fmt, arg...) dev_vdbg(docg3->dev, (fmt), ## arg)
+
+#define DEBUGFS_RO_ATTR(name, show_fct) \
+ static int name##_open(struct inode *inode, struct file *file) \
+ { return single_open(file, show_fct, inode->i_private); } \
+ static const struct file_operations name##_fops = { \
+ .owner = THIS_MODULE, \
+ .open = name##_open, \
+ .llseek = seq_lseek, \
+ .read = seq_read, \
+ .release = single_release \
+ };
+#endif
+
+/*
+ * Trace events part
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM docg3
+
+#if !defined(_MTD_DOCG3_TRACE) || defined(TRACE_HEADER_MULTI_READ)
+#define _MTD_DOCG3_TRACE
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(docg3_io,
+ TP_PROTO(int op, int width, u16 reg, int val),
+ TP_ARGS(op, width, reg, val),
+ TP_STRUCT__entry(
+ __field(int, op)
+ __field(unsigned char, width)
+ __field(u16, reg)
+ __field(int, val)),
+ TP_fast_assign(
+ __entry->op = op;
+ __entry->width = width;
+ __entry->reg = reg;
+ __entry->val = val;),
+ TP_printk("docg3: %s%02d reg=%04x, val=%04x",
+ __entry->op ? "write" : "read", __entry->width,
+ __entry->reg, __entry->val)
+ );
+#endif
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE docg3
+#include <trace/define_trace.h>
diff --git a/drivers/mtd/devices/docprobe.c b/drivers/mtd/devices/docprobe.c
index d374603493a..45116bb3029 100644
--- a/drivers/mtd/devices/docprobe.c
+++ b/drivers/mtd/devices/docprobe.c
@@ -50,11 +50,6 @@
#include <linux/mtd/nand.h>
#include <linux/mtd/doc2000.h>
-/* Where to look for the devices? */
-#ifndef CONFIG_MTD_DOCPROBE_ADDRESS
-#define CONFIG_MTD_DOCPROBE_ADDRESS 0
-#endif
-
static unsigned long doc_config_location = CONFIG_MTD_DOCPROBE_ADDRESS;
module_param(doc_config_location, ulong, 0);
diff --git a/drivers/mtd/devices/lart.c b/drivers/mtd/devices/lart.c
index 772a0ff89e0..3a11ea628e5 100644
--- a/drivers/mtd/devices/lart.c
+++ b/drivers/mtd/devices/lart.c
@@ -34,9 +34,6 @@
/* debugging */
//#define LART_DEBUG
-/* partition support */
-#define HAVE_PARTITIONS
-
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
@@ -44,9 +41,7 @@
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/mtd/mtd.h>
-#ifdef HAVE_PARTITIONS
#include <linux/mtd/partitions.h>
-#endif
#ifndef CONFIG_SA1100_LART
#error This is for LART architecture only
@@ -598,7 +593,6 @@ static struct mtd_erase_region_info erase_regions[] = {
}
};
-#ifdef HAVE_PARTITIONS
static struct mtd_partition lart_partitions[] = {
/* blob */
{
@@ -619,7 +613,7 @@ static struct mtd_partition lart_partitions[] = {
.size = INITRD_LEN, /* MTDPART_SIZ_FULL */
}
};
-#endif
+#define NUM_PARTITIONS ARRAY_SIZE(lart_partitions)
static int __init lart_flash_init (void)
{
@@ -668,7 +662,6 @@ static int __init lart_flash_init (void)
result,mtd.eraseregions[result].erasesize,mtd.eraseregions[result].erasesize / 1024,
result,mtd.eraseregions[result].numblocks);
-#ifdef HAVE_PARTITIONS
printk ("\npartitions = %d\n", ARRAY_SIZE(lart_partitions));
for (result = 0; result < ARRAY_SIZE(lart_partitions); result++)
@@ -681,25 +674,16 @@ static int __init lart_flash_init (void)
result,lart_partitions[result].offset,
result,lart_partitions[result].size,lart_partitions[result].size / 1024);
#endif
-#endif
-#ifndef HAVE_PARTITIONS
- result = mtd_device_register(&mtd, NULL, 0);
-#else
result = mtd_device_register(&mtd, lart_partitions,
ARRAY_SIZE(lart_partitions));
-#endif
return (result);
}
static void __exit lart_flash_exit (void)
{
-#ifndef HAVE_PARTITIONS
- mtd_device_unregister(&mtd);
-#else
mtd_device_unregister(&mtd);
-#endif
}
module_init (lart_flash_init);
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index 35180e475c4..884904d3f9d 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -30,6 +30,7 @@
#include <linux/mtd/cfi.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
+#include <linux/of_platform.h>
#include <linux/spi/spi.h>
#include <linux/spi/flash.h>
@@ -88,7 +89,6 @@ struct m25p {
struct spi_device *spi;
struct mutex lock;
struct mtd_info mtd;
- unsigned partitioned:1;
u16 page_size;
u16 addr_width;
u8 erase_opcode;
@@ -209,9 +209,8 @@ static int wait_till_ready(struct m25p *flash)
*/
static int erase_chip(struct m25p *flash)
{
- DEBUG(MTD_DEBUG_LEVEL3, "%s: %s %lldKiB\n",
- dev_name(&flash->spi->dev), __func__,
- (long long)(flash->mtd.size >> 10));
+ pr_debug("%s: %s %lldKiB\n", dev_name(&flash->spi->dev), __func__,
+ (long long)(flash->mtd.size >> 10));
/* Wait until finished previous write command. */
if (wait_till_ready(flash))
@@ -250,9 +249,8 @@ static int m25p_cmdsz(struct m25p *flash)
*/
static int erase_sector(struct m25p *flash, u32 offset)
{
- DEBUG(MTD_DEBUG_LEVEL3, "%s: %s %dKiB at 0x%08x\n",
- dev_name(&flash->spi->dev), __func__,
- flash->mtd.erasesize / 1024, offset);
+ pr_debug("%s: %s %dKiB at 0x%08x\n", dev_name(&flash->spi->dev),
+ __func__, flash->mtd.erasesize / 1024, offset);
/* Wait until finished previous write command. */
if (wait_till_ready(flash))
@@ -286,9 +284,9 @@ static int m25p80_erase(struct mtd_info *mtd, struct erase_info *instr)
u32 addr,len;
uint32_t rem;
- DEBUG(MTD_DEBUG_LEVEL2, "%s: %s %s 0x%llx, len %lld\n",
- dev_name(&flash->spi->dev), __func__, "at",
- (long long)instr->addr, (long long)instr->len);
+ pr_debug("%s: %s at 0x%llx, len %lld\n", dev_name(&flash->spi->dev),
+ __func__, (long long)instr->addr,
+ (long long)instr->len);
/* sanity checks */
if (instr->addr + instr->len > flash->mtd.size)
@@ -348,9 +346,8 @@ static int m25p80_read(struct mtd_info *mtd, loff_t from, size_t len,
struct spi_transfer t[2];
struct spi_message m;
- DEBUG(MTD_DEBUG_LEVEL2, "%s: %s %s 0x%08x, len %zd\n",
- dev_name(&flash->spi->dev), __func__, "from",
- (u32)from, len);
+ pr_debug("%s: %s from 0x%08x, len %zd\n", dev_name(&flash->spi->dev),
+ __func__, (u32)from, len);
/* sanity checks */
if (!len)
@@ -417,9 +414,8 @@ static int m25p80_write(struct mtd_info *mtd, loff_t to, size_t len,
struct spi_transfer t[2];
struct spi_message m;
- DEBUG(MTD_DEBUG_LEVEL2, "%s: %s %s 0x%08x, len %zd\n",
- dev_name(&flash->spi->dev), __func__, "to",
- (u32)to, len);
+ pr_debug("%s: %s to 0x%08x, len %zd\n", dev_name(&flash->spi->dev),
+ __func__, (u32)to, len);
*retlen = 0;
@@ -510,9 +506,8 @@ static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t actual;
int cmd_sz, ret;
- DEBUG(MTD_DEBUG_LEVEL2, "%s: %s %s 0x%08x, len %zd\n",
- dev_name(&flash->spi->dev), __func__, "to",
- (u32)to, len);
+ pr_debug("%s: %s to 0x%08x, len %zd\n", dev_name(&flash->spi->dev),
+ __func__, (u32)to, len);
*retlen = 0;
@@ -661,6 +656,7 @@ static const struct spi_device_id m25p_ids[] = {
{ "at25fs040", INFO(0x1f6604, 0, 64 * 1024, 8, SECT_4K) },
{ "at25df041a", INFO(0x1f4401, 0, 64 * 1024, 8, SECT_4K) },
+ { "at25df321a", INFO(0x1f4701, 0, 64 * 1024, 64, SECT_4K) },
{ "at25df641", INFO(0x1f4800, 0, 64 * 1024, 128, SECT_4K) },
{ "at26f004", INFO(0x1f0400, 0, 64 * 1024, 8, SECT_4K) },
@@ -671,6 +667,7 @@ static const struct spi_device_id m25p_ids[] = {
/* EON -- en25xxx */
{ "en25f32", INFO(0x1c3116, 0, 64 * 1024, 64, SECT_4K) },
{ "en25p32", INFO(0x1c2016, 0, 64 * 1024, 64, 0) },
+ { "en25q32b", INFO(0x1c3016, 0, 64 * 1024, 64, 0) },
{ "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) },
/* Intel/Numonyx -- xxxs33b */
@@ -788,8 +785,8 @@ static const struct spi_device_id *__devinit jedec_probe(struct spi_device *spi)
*/
tmp = spi_write_then_read(spi, &code, 1, id, 5);
if (tmp < 0) {
- DEBUG(MTD_DEBUG_LEVEL0, "%s: error %d reading JEDEC ID\n",
- dev_name(&spi->dev), tmp);
+ pr_debug("%s: error %d reading JEDEC ID\n",
+ dev_name(&spi->dev), tmp);
return ERR_PTR(tmp);
}
jedec = id[0];
@@ -825,8 +822,12 @@ static int __devinit m25p_probe(struct spi_device *spi)
struct m25p *flash;
struct flash_info *info;
unsigned i;
- struct mtd_partition *parts = NULL;
- int nr_parts = 0;
+ struct mtd_part_parser_data ppdata;
+
+#ifdef CONFIG_MTD_OF_PARTS
+ if (!of_device_is_available(spi->dev.of_node))
+ return -ENODEV;
+#endif
/* Platform data helps sort out which chip type we have, as
* well as how this board partitions it. If we don't have
@@ -928,6 +929,7 @@ static int __devinit m25p_probe(struct spi_device *spi)
if (info->flags & M25P_NO_ERASE)
flash->mtd.flags |= MTD_NO_ERASE;
+ ppdata.of_node = spi->dev.of_node;
flash->mtd.dev.parent = &spi->dev;
flash->page_size = info->page_size;
@@ -945,8 +947,7 @@ static int __devinit m25p_probe(struct spi_device *spi)
dev_info(&spi->dev, "%s (%lld Kbytes)\n", id->name,
(long long)flash->mtd.size >> 10);
- DEBUG(MTD_DEBUG_LEVEL2,
- "mtd .name = %s, .size = 0x%llx (%lldMiB) "
+ pr_debug("mtd .name = %s, .size = 0x%llx (%lldMiB) "
".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n",
flash->mtd.name,
(long long)flash->mtd.size, (long long)(flash->mtd.size >> 20),
@@ -955,8 +956,7 @@ static int __devinit m25p_probe(struct spi_device *spi)
if (flash->mtd.numeraseregions)
for (i = 0; i < flash->mtd.numeraseregions; i++)
- DEBUG(MTD_DEBUG_LEVEL2,
- "mtd.eraseregions[%d] = { .offset = 0x%llx, "
+ pr_debug("mtd.eraseregions[%d] = { .offset = 0x%llx, "
".erasesize = 0x%.8x (%uKiB), "
".numblocks = %d }\n",
i, (long long)flash->mtd.eraseregions[i].offset,
@@ -968,41 +968,9 @@ static int __devinit m25p_probe(struct spi_device *spi)
/* partitions should match sector boundaries; and it may be good to
* use readonly partitions for writeprotected sectors (BP2..BP0).
*/
- if (mtd_has_cmdlinepart()) {
- static const char *part_probes[]
- = { "cmdlinepart", NULL, };
-
- nr_parts = parse_mtd_partitions(&flash->mtd,
- part_probes, &parts, 0);
- }
-
- if (nr_parts <= 0 && data && data->parts) {
- parts = data->parts;
- nr_parts = data->nr_parts;
- }
-
-#ifdef CONFIG_MTD_OF_PARTS
- if (nr_parts <= 0 && spi->dev.of_node) {
- nr_parts = of_mtd_parse_partitions(&spi->dev,
- spi->dev.of_node, &parts);
- }
-#endif
-
- if (nr_parts > 0) {
- for (i = 0; i < nr_parts; i++) {
- DEBUG(MTD_DEBUG_LEVEL2, "partitions[%d] = "
- "{.name = %s, .offset = 0x%llx, "
- ".size = 0x%llx (%lldKiB) }\n",
- i, parts[i].name,
- (long long)parts[i].offset,
- (long long)parts[i].size,
- (long long)(parts[i].size >> 10));
- }
- flash->partitioned = 1;
- }
-
- return mtd_device_register(&flash->mtd, parts, nr_parts) == 1 ?
- -ENODEV : 0;
+ return mtd_device_parse_register(&flash->mtd, NULL, &ppdata,
+ data ? data->parts : NULL,
+ data ? data->nr_parts : 0);
}
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
index 13749d458a3..d75c7af18a6 100644
--- a/drivers/mtd/devices/mtd_dataflash.c
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -17,6 +17,8 @@
#include <linux/mutex.h>
#include <linux/err.h>
#include <linux/math64.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/spi/spi.h>
#include <linux/spi/flash.h>
@@ -24,7 +26,6 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
-
/*
* DataFlash is a kind of SPI flash. Most AT45 chips have two buffers in
* each chip, which may be used for double buffered I/O; but this driver
@@ -98,6 +99,16 @@ struct dataflash {
struct mtd_info mtd;
};
+#ifdef CONFIG_OF
+static const struct of_device_id dataflash_dt_ids[] = {
+ { .compatible = "atmel,at45", },
+ { .compatible = "atmel,dataflash", },
+ { /* sentinel */ }
+};
+#else
+#define dataflash_dt_ids NULL
+#endif
+
/* ......................................................................... */
/*
@@ -122,7 +133,7 @@ static int dataflash_waitready(struct spi_device *spi)
for (;;) {
status = dataflash_status(spi);
if (status < 0) {
- DEBUG(MTD_DEBUG_LEVEL1, "%s: status %d?\n",
+ pr_debug("%s: status %d?\n",
dev_name(&spi->dev), status);
status = 0;
}
@@ -149,7 +160,7 @@ static int dataflash_erase(struct mtd_info *mtd, struct erase_info *instr)
uint8_t *command;
uint32_t rem;
- DEBUG(MTD_DEBUG_LEVEL2, "%s: erase addr=0x%llx len 0x%llx\n",
+ pr_debug("%s: erase addr=0x%llx len 0x%llx\n",
dev_name(&spi->dev), (long long)instr->addr,
(long long)instr->len);
@@ -187,7 +198,7 @@ static int dataflash_erase(struct mtd_info *mtd, struct erase_info *instr)
command[2] = (uint8_t)(pageaddr >> 8);
command[3] = 0;
- DEBUG(MTD_DEBUG_LEVEL3, "ERASE %s: (%x) %x %x %x [%i]\n",
+ pr_debug("ERASE %s: (%x) %x %x %x [%i]\n",
do_block ? "block" : "page",
command[0], command[1], command[2], command[3],
pageaddr);
@@ -238,8 +249,8 @@ static int dataflash_read(struct mtd_info *mtd, loff_t from, size_t len,
uint8_t *command;
int status;
- DEBUG(MTD_DEBUG_LEVEL2, "%s: read 0x%x..0x%x\n",
- dev_name(&priv->spi->dev), (unsigned)from, (unsigned)(from + len));
+ pr_debug("%s: read 0x%x..0x%x\n", dev_name(&priv->spi->dev),
+ (unsigned)from, (unsigned)(from + len));
*retlen = 0;
@@ -255,7 +266,7 @@ static int dataflash_read(struct mtd_info *mtd, loff_t from, size_t len,
command = priv->command;
- DEBUG(MTD_DEBUG_LEVEL3, "READ: (%x) %x %x %x\n",
+ pr_debug("READ: (%x) %x %x %x\n",
command[0], command[1], command[2], command[3]);
spi_message_init(&msg);
@@ -287,7 +298,7 @@ static int dataflash_read(struct mtd_info *mtd, loff_t from, size_t len,
*retlen = msg.actual_length - 8;
status = 0;
} else
- DEBUG(MTD_DEBUG_LEVEL1, "%s: read %x..%x --> %d\n",
+ pr_debug("%s: read %x..%x --> %d\n",
dev_name(&priv->spi->dev),
(unsigned)from, (unsigned)(from + len),
status);
@@ -314,7 +325,7 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len,
int status = -EINVAL;
uint8_t *command;
- DEBUG(MTD_DEBUG_LEVEL2, "%s: write 0x%x..0x%x\n",
+ pr_debug("%s: write 0x%x..0x%x\n",
dev_name(&spi->dev), (unsigned)to, (unsigned)(to + len));
*retlen = 0;
@@ -340,7 +351,7 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len,
mutex_lock(&priv->lock);
while (remaining > 0) {
- DEBUG(MTD_DEBUG_LEVEL3, "write @ %i:%i len=%i\n",
+ pr_debug("write @ %i:%i len=%i\n",
pageaddr, offset, writelen);
/* REVISIT:
@@ -368,12 +379,12 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len,
command[2] = (addr & 0x0000FF00) >> 8;
command[3] = 0;
- DEBUG(MTD_DEBUG_LEVEL3, "TRANSFER: (%x) %x %x %x\n",
+ pr_debug("TRANSFER: (%x) %x %x %x\n",
command[0], command[1], command[2], command[3]);
status = spi_sync(spi, &msg);
if (status < 0)
- DEBUG(MTD_DEBUG_LEVEL1, "%s: xfer %u -> %d \n",
+ pr_debug("%s: xfer %u -> %d\n",
dev_name(&spi->dev), addr, status);
(void) dataflash_waitready(priv->spi);
@@ -386,7 +397,7 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len,
command[2] = (addr & 0x0000FF00) >> 8;
command[3] = (addr & 0x000000FF);
- DEBUG(MTD_DEBUG_LEVEL3, "PROGRAM: (%x) %x %x %x\n",
+ pr_debug("PROGRAM: (%x) %x %x %x\n",
command[0], command[1], command[2], command[3]);
x[1].tx_buf = writebuf;
@@ -395,7 +406,7 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len,
status = spi_sync(spi, &msg);
spi_transfer_del(x + 1);
if (status < 0)
- DEBUG(MTD_DEBUG_LEVEL1, "%s: pgm %u/%u -> %d \n",
+ pr_debug("%s: pgm %u/%u -> %d\n",
dev_name(&spi->dev), addr, writelen, status);
(void) dataflash_waitready(priv->spi);
@@ -410,12 +421,12 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len,
command[2] = (addr & 0x0000FF00) >> 8;
command[3] = 0;
- DEBUG(MTD_DEBUG_LEVEL3, "COMPARE: (%x) %x %x %x\n",
+ pr_debug("COMPARE: (%x) %x %x %x\n",
command[0], command[1], command[2], command[3]);
status = spi_sync(spi, &msg);
if (status < 0)
- DEBUG(MTD_DEBUG_LEVEL1, "%s: compare %u -> %d \n",
+ pr_debug("%s: compare %u -> %d\n",
dev_name(&spi->dev), addr, status);
status = dataflash_waitready(priv->spi);
@@ -634,11 +645,10 @@ add_dataflash_otp(struct spi_device *spi, char *name,
{
struct dataflash *priv;
struct mtd_info *device;
+ struct mtd_part_parser_data ppdata;
struct flash_platform_data *pdata = spi->dev.platform_data;
char *otp_tag = "";
int err = 0;
- struct mtd_partition *parts;
- int nr_parts = 0;
priv = kzalloc(sizeof *priv, GFP_KERNEL);
if (!priv)
@@ -677,28 +687,11 @@ add_dataflash_otp(struct spi_device *spi, char *name,
pagesize, otp_tag);
dev_set_drvdata(&spi->dev, priv);
- if (mtd_has_cmdlinepart()) {
- static const char *part_probes[] = { "cmdlinepart", NULL, };
-
- nr_parts = parse_mtd_partitions(device, part_probes, &parts,
- 0);
- }
+ ppdata.of_node = spi->dev.of_node;
+ err = mtd_device_parse_register(device, NULL, &ppdata,
+ pdata ? pdata->parts : NULL,
+ pdata ? pdata->nr_parts : 0);
- if (nr_parts <= 0 && pdata && pdata->parts) {
- parts = pdata->parts;
- nr_parts = pdata->nr_parts;
- }
-
- if (nr_parts > 0) {
- priv->partitioned = 1;
- err = mtd_device_register(device, parts, nr_parts);
- goto out;
- }
-
- if (mtd_device_register(device, NULL, 0) == 1)
- err = -ENODEV;
-
-out:
if (!err)
return 0;
@@ -787,7 +780,7 @@ static struct flash_info *__devinit jedec_probe(struct spi_device *spi)
*/
tmp = spi_write_then_read(spi, &code, 1, id, 3);
if (tmp < 0) {
- DEBUG(MTD_DEBUG_LEVEL0, "%s: error %d reading JEDEC ID\n",
+ pr_debug("%s: error %d reading JEDEC ID\n",
dev_name(&spi->dev), tmp);
return ERR_PTR(tmp);
}
@@ -804,7 +797,7 @@ static struct flash_info *__devinit jedec_probe(struct spi_device *spi)
tmp < ARRAY_SIZE(dataflash_data);
tmp++, info++) {
if (info->jedec_id == jedec) {
- DEBUG(MTD_DEBUG_LEVEL1, "%s: OTP, sector protect%s\n",
+ pr_debug("%s: OTP, sector protect%s\n",
dev_name(&spi->dev),
(info->flags & SUP_POW2PS)
? ", binary pagesize" : ""
@@ -812,8 +805,7 @@ static struct flash_info *__devinit jedec_probe(struct spi_device *spi)
if (info->flags & SUP_POW2PS) {
status = dataflash_status(spi);
if (status < 0) {
- DEBUG(MTD_DEBUG_LEVEL1,
- "%s: status error %d\n",
+ pr_debug("%s: status error %d\n",
dev_name(&spi->dev), status);
return ERR_PTR(status);
}
@@ -878,7 +870,7 @@ static int __devinit dataflash_probe(struct spi_device *spi)
*/
status = dataflash_status(spi);
if (status <= 0 || status == 0xff) {
- DEBUG(MTD_DEBUG_LEVEL1, "%s: status error %d\n",
+ pr_debug("%s: status error %d\n",
dev_name(&spi->dev), status);
if (status == 0 || status == 0xff)
status = -ENODEV;
@@ -914,14 +906,14 @@ static int __devinit dataflash_probe(struct spi_device *spi)
break;
/* obsolete AT45DB1282 not (yet?) supported */
default:
- DEBUG(MTD_DEBUG_LEVEL1, "%s: unsupported device (%x)\n",
- dev_name(&spi->dev), status & 0x3c);
+ pr_debug("%s: unsupported device (%x)\n", dev_name(&spi->dev),
+ status & 0x3c);
status = -ENODEV;
}
if (status < 0)
- DEBUG(MTD_DEBUG_LEVEL1, "%s: add_dataflash --> %d\n",
- dev_name(&spi->dev), status);
+ pr_debug("%s: add_dataflash --> %d\n", dev_name(&spi->dev),
+ status);
return status;
}
@@ -931,7 +923,7 @@ static int __devexit dataflash_remove(struct spi_device *spi)
struct dataflash *flash = dev_get_drvdata(&spi->dev);
int status;
- DEBUG(MTD_DEBUG_LEVEL1, "%s: remove\n", dev_name(&spi->dev));
+ pr_debug("%s: remove\n", dev_name(&spi->dev));
status = mtd_device_unregister(&flash->mtd);
if (status == 0) {
@@ -946,6 +938,7 @@ static struct spi_driver dataflash_driver = {
.name = "mtd_dataflash",
.bus = &spi_bus_type,
.owner = THIS_MODULE,
+ .of_match_table = dataflash_dt_ids,
},
.probe = dataflash_probe,
diff --git a/drivers/mtd/devices/sst25l.c b/drivers/mtd/devices/sst25l.c
index 83e80c65d6e..d38ef3bffe8 100644
--- a/drivers/mtd/devices/sst25l.c
+++ b/drivers/mtd/devices/sst25l.c
@@ -52,8 +52,6 @@ struct sst25l_flash {
struct spi_device *spi;
struct mutex lock;
struct mtd_info mtd;
-
- int partitioned;
};
struct flash_info {
@@ -381,8 +379,6 @@ static int __devinit sst25l_probe(struct spi_device *spi)
struct sst25l_flash *flash;
struct flash_platform_data *data;
int ret, i;
- struct mtd_partition *parts = NULL;
- int nr_parts = 0;
flash_info = sst25l_match_device(spi);
if (!flash_info)
@@ -414,8 +410,7 @@ static int __devinit sst25l_probe(struct spi_device *spi)
dev_info(&spi->dev, "%s (%lld KiB)\n", flash_info->name,
(long long)flash->mtd.size >> 10);
- DEBUG(MTD_DEBUG_LEVEL2,
- "mtd .name = %s, .size = 0x%llx (%lldMiB) "
+ pr_debug("mtd .name = %s, .size = 0x%llx (%lldMiB) "
".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n",
flash->mtd.name,
(long long)flash->mtd.size, (long long)(flash->mtd.size >> 20),
@@ -423,37 +418,10 @@ static int __devinit sst25l_probe(struct spi_device *spi)
flash->mtd.numeraseregions);
- if (mtd_has_cmdlinepart()) {
- static const char *part_probes[] = {"cmdlinepart", NULL};
-
- nr_parts = parse_mtd_partitions(&flash->mtd,
- part_probes,
- &parts, 0);
- }
-
- if (nr_parts <= 0 && data && data->parts) {
- parts = data->parts;
- nr_parts = data->nr_parts;
- }
-
- if (nr_parts > 0) {
- for (i = 0; i < nr_parts; i++) {
- DEBUG(MTD_DEBUG_LEVEL2, "partitions[%d] = "
- "{.name = %s, .offset = 0x%llx, "
- ".size = 0x%llx (%lldKiB) }\n",
- i, parts[i].name,
- (long long)parts[i].offset,
- (long long)parts[i].size,
- (long long)(parts[i].size >> 10));
- }
-
- flash->partitioned = 1;
- return mtd_device_register(&flash->mtd, parts,
- nr_parts);
- }
-
- ret = mtd_device_register(&flash->mtd, NULL, 0);
- if (ret == 1) {
+ ret = mtd_device_parse_register(&flash->mtd, NULL, 0,
+ data ? data->parts : NULL,
+ data ? data->nr_parts : 0);
+ if (ret) {
kfree(flash);
dev_set_drvdata(&spi->dev, NULL);
return -ENODEV;
diff --git a/drivers/mtd/ftl.c b/drivers/mtd/ftl.c
index 037b399df3f..c7382bb686c 100644
--- a/drivers/mtd/ftl.c
+++ b/drivers/mtd/ftl.c
@@ -339,7 +339,7 @@ static int erase_xfer(partition_t *part,
struct erase_info *erase;
xfer = &part->XferInfo[xfernum];
- DEBUG(1, "ftl_cs: erasing xfer unit at 0x%x\n", xfer->Offset);
+ pr_debug("ftl_cs: erasing xfer unit at 0x%x\n", xfer->Offset);
xfer->state = XFER_ERASING;
/* Is there a free erase slot? Always in MTD. */
@@ -415,7 +415,7 @@ static int prepare_xfer(partition_t *part, int i)
xfer = &part->XferInfo[i];
xfer->state = XFER_FAILED;
- DEBUG(1, "ftl_cs: preparing xfer unit at 0x%x\n", xfer->Offset);
+ pr_debug("ftl_cs: preparing xfer unit at 0x%x\n", xfer->Offset);
/* Write the transfer unit header */
header = part->header;
@@ -476,7 +476,7 @@ static int copy_erase_unit(partition_t *part, uint16_t srcunit,
eun = &part->EUNInfo[srcunit];
xfer = &part->XferInfo[xferunit];
- DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n",
+ pr_debug("ftl_cs: copying block 0x%x to 0x%x\n",
eun->Offset, xfer->Offset);
@@ -598,7 +598,7 @@ static int copy_erase_unit(partition_t *part, uint16_t srcunit,
unit with the fewest erases, and usually pick the data unit with
the most deleted blocks. But with a small probability, pick the
oldest data unit instead. This means that we generally postpone
- the next reclaimation as long as possible, but shuffle static
+ the next reclamation as long as possible, but shuffle static
stuff around a bit for wear leveling.
======================================================================*/
@@ -609,8 +609,8 @@ static int reclaim_block(partition_t *part)
uint32_t best;
int queued, ret;
- DEBUG(0, "ftl_cs: reclaiming space...\n");
- DEBUG(3, "NumTransferUnits == %x\n", part->header.NumTransferUnits);
+ pr_debug("ftl_cs: reclaiming space...\n");
+ pr_debug("NumTransferUnits == %x\n", part->header.NumTransferUnits);
/* Pick the least erased transfer unit */
best = 0xffffffff; xfer = 0xffff;
do {
@@ -618,22 +618,22 @@ static int reclaim_block(partition_t *part)
for (i = 0; i < part->header.NumTransferUnits; i++) {
int n=0;
if (part->XferInfo[i].state == XFER_UNKNOWN) {
- DEBUG(3,"XferInfo[%d].state == XFER_UNKNOWN\n",i);
+ pr_debug("XferInfo[%d].state == XFER_UNKNOWN\n",i);
n=1;
erase_xfer(part, i);
}
if (part->XferInfo[i].state == XFER_ERASING) {
- DEBUG(3,"XferInfo[%d].state == XFER_ERASING\n",i);
+ pr_debug("XferInfo[%d].state == XFER_ERASING\n",i);
n=1;
queued = 1;
}
else if (part->XferInfo[i].state == XFER_ERASED) {
- DEBUG(3,"XferInfo[%d].state == XFER_ERASED\n",i);
+ pr_debug("XferInfo[%d].state == XFER_ERASED\n",i);
n=1;
prepare_xfer(part, i);
}
if (part->XferInfo[i].state == XFER_PREPARED) {
- DEBUG(3,"XferInfo[%d].state == XFER_PREPARED\n",i);
+ pr_debug("XferInfo[%d].state == XFER_PREPARED\n",i);
n=1;
if (part->XferInfo[i].EraseCount <= best) {
best = part->XferInfo[i].EraseCount;
@@ -641,12 +641,12 @@ static int reclaim_block(partition_t *part)
}
}
if (!n)
- DEBUG(3,"XferInfo[%d].state == %x\n",i, part->XferInfo[i].state);
+ pr_debug("XferInfo[%d].state == %x\n",i, part->XferInfo[i].state);
}
if (xfer == 0xffff) {
if (queued) {
- DEBUG(1, "ftl_cs: waiting for transfer "
+ pr_debug("ftl_cs: waiting for transfer "
"unit to be prepared...\n");
if (part->mbd.mtd->sync)
part->mbd.mtd->sync(part->mbd.mtd);
@@ -656,7 +656,7 @@ static int reclaim_block(partition_t *part)
printk(KERN_NOTICE "ftl_cs: reclaim failed: no "
"suitable transfer units!\n");
else
- DEBUG(1, "ftl_cs: reclaim failed: no "
+ pr_debug("ftl_cs: reclaim failed: no "
"suitable transfer units!\n");
return -EIO;
@@ -666,7 +666,7 @@ static int reclaim_block(partition_t *part)
eun = 0;
if ((jiffies % shuffle_freq) == 0) {
- DEBUG(1, "ftl_cs: recycling freshest block...\n");
+ pr_debug("ftl_cs: recycling freshest block...\n");
best = 0xffffffff;
for (i = 0; i < part->DataUnits; i++)
if (part->EUNInfo[i].EraseCount <= best) {
@@ -686,7 +686,7 @@ static int reclaim_block(partition_t *part)
printk(KERN_NOTICE "ftl_cs: reclaim failed: "
"no free blocks!\n");
else
- DEBUG(1,"ftl_cs: reclaim failed: "
+ pr_debug("ftl_cs: reclaim failed: "
"no free blocks!\n");
return -EIO;
@@ -771,7 +771,7 @@ static uint32_t find_free(partition_t *part)
printk(KERN_NOTICE "ftl_cs: bad free list!\n");
return 0;
}
- DEBUG(2, "ftl_cs: found free block at %d in %d\n", blk, eun);
+ pr_debug("ftl_cs: found free block at %d in %d\n", blk, eun);
return blk;
} /* find_free */
@@ -791,7 +791,7 @@ static int ftl_read(partition_t *part, caddr_t buffer,
int ret;
size_t offset, retlen;
- DEBUG(2, "ftl_cs: ftl_read(0x%p, 0x%lx, %ld)\n",
+ pr_debug("ftl_cs: ftl_read(0x%p, 0x%lx, %ld)\n",
part, sector, nblocks);
if (!(part->state & FTL_FORMATTED)) {
printk(KERN_NOTICE "ftl_cs: bad partition\n");
@@ -840,7 +840,7 @@ static int set_bam_entry(partition_t *part, uint32_t log_addr,
int ret;
size_t retlen, offset;
- DEBUG(2, "ftl_cs: set_bam_entry(0x%p, 0x%x, 0x%x)\n",
+ pr_debug("ftl_cs: set_bam_entry(0x%p, 0x%x, 0x%x)\n",
part, log_addr, virt_addr);
bsize = 1 << part->header.EraseUnitSize;
eun = log_addr / bsize;
@@ -905,7 +905,7 @@ static int ftl_write(partition_t *part, caddr_t buffer,
int ret;
size_t retlen, offset;
- DEBUG(2, "ftl_cs: ftl_write(0x%p, %ld, %ld)\n",
+ pr_debug("ftl_cs: ftl_write(0x%p, %ld, %ld)\n",
part, sector, nblocks);
if (!(part->state & FTL_FORMATTED)) {
printk(KERN_NOTICE "ftl_cs: bad partition\n");
@@ -1011,7 +1011,7 @@ static int ftl_discardsect(struct mtd_blktrans_dev *dev,
partition_t *part = (void *)dev;
uint32_t bsize = 1 << part->header.EraseUnitSize;
- DEBUG(1, "FTL erase sector %ld for %d sectors\n",
+ pr_debug("FTL erase sector %ld for %d sectors\n",
sector, nr_sects);
while (nr_sects) {
diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c
index d7592e67d04..dd034efd187 100644
--- a/drivers/mtd/inftlcore.c
+++ b/drivers/mtd/inftlcore.c
@@ -63,14 +63,12 @@ static void inftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
return;
}
- DEBUG(MTD_DEBUG_LEVEL3, "INFTL: add_mtd for %s\n", mtd->name);
+ pr_debug("INFTL: add_mtd for %s\n", mtd->name);
inftl = kzalloc(sizeof(*inftl), GFP_KERNEL);
- if (!inftl) {
- printk(KERN_WARNING "INFTL: Out of memory for data structures\n");
+ if (!inftl)
return;
- }
inftl->mbd.mtd = mtd;
inftl->mbd.devnum = -1;
@@ -133,7 +131,7 @@ static void inftl_remove_dev(struct mtd_blktrans_dev *dev)
{
struct INFTLrecord *inftl = (void *)dev;
- DEBUG(MTD_DEBUG_LEVEL3, "INFTL: remove_dev (i=%d)\n", dev->devnum);
+ pr_debug("INFTL: remove_dev (i=%d)\n", dev->devnum);
del_mtd_blktrans_dev(dev);
@@ -154,7 +152,7 @@ int inftl_read_oob(struct mtd_info *mtd, loff_t offs, size_t len,
struct mtd_oob_ops ops;
int res;
- ops.mode = MTD_OOB_PLACE;
+ ops.mode = MTD_OPS_PLACE_OOB;
ops.ooboffs = offs & (mtd->writesize - 1);
ops.ooblen = len;
ops.oobbuf = buf;
@@ -174,7 +172,7 @@ int inftl_write_oob(struct mtd_info *mtd, loff_t offs, size_t len,
struct mtd_oob_ops ops;
int res;
- ops.mode = MTD_OOB_PLACE;
+ ops.mode = MTD_OPS_PLACE_OOB;
ops.ooboffs = offs & (mtd->writesize - 1);
ops.ooblen = len;
ops.oobbuf = buf;
@@ -194,7 +192,7 @@ static int inftl_write(struct mtd_info *mtd, loff_t offs, size_t len,
struct mtd_oob_ops ops;
int res;
- ops.mode = MTD_OOB_PLACE;
+ ops.mode = MTD_OPS_PLACE_OOB;
ops.ooboffs = offs;
ops.ooblen = mtd->oobsize;
ops.oobbuf = oob;
@@ -215,16 +213,16 @@ static u16 INFTL_findfreeblock(struct INFTLrecord *inftl, int desperate)
u16 pot = inftl->LastFreeEUN;
int silly = inftl->nb_blocks;
- DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_findfreeblock(inftl=%p,"
- "desperate=%d)\n", inftl, desperate);
+ pr_debug("INFTL: INFTL_findfreeblock(inftl=%p,desperate=%d)\n",
+ inftl, desperate);
/*
* Normally, we force a fold to happen before we run out of free
* blocks completely.
*/
if (!desperate && inftl->numfreeEUNs < 2) {
- DEBUG(MTD_DEBUG_LEVEL1, "INFTL: there are too few free "
- "EUNs (%d)\n", inftl->numfreeEUNs);
+ pr_debug("INFTL: there are too few free EUNs (%d)\n",
+ inftl->numfreeEUNs);
return BLOCK_NIL;
}
@@ -259,8 +257,8 @@ static u16 INFTL_foldchain(struct INFTLrecord *inftl, unsigned thisVUC, unsigned
struct inftl_oob oob;
size_t retlen;
- DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
- "pending=%d)\n", inftl, thisVUC, pendingblock);
+ pr_debug("INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,pending=%d)\n",
+ inftl, thisVUC, pendingblock);
memset(BlockMap, 0xff, sizeof(BlockMap));
memset(BlockDeleted, 0, sizeof(BlockDeleted));
@@ -323,8 +321,7 @@ static u16 INFTL_foldchain(struct INFTLrecord *inftl, unsigned thisVUC, unsigned
* Chain, and the Erase Unit into which we are supposed to be copying.
* Go for it.
*/
- DEBUG(MTD_DEBUG_LEVEL1, "INFTL: folding chain %d into unit %d\n",
- thisVUC, targetEUN);
+ pr_debug("INFTL: folding chain %d into unit %d\n", thisVUC, targetEUN);
for (block = 0; block < inftl->EraseSize/SECTORSIZE ; block++) {
unsigned char movebuf[SECTORSIZE];
@@ -349,14 +346,13 @@ static u16 INFTL_foldchain(struct INFTLrecord *inftl, unsigned thisVUC, unsigned
ret = mtd->read(mtd, (inftl->EraseSize * BlockMap[block]) +
(block * SECTORSIZE), SECTORSIZE, &retlen,
movebuf);
- if (ret < 0 && ret != -EUCLEAN) {
+ if (ret < 0 && !mtd_is_bitflip(ret)) {
ret = mtd->read(mtd,
(inftl->EraseSize * BlockMap[block]) +
(block * SECTORSIZE), SECTORSIZE,
&retlen, movebuf);
if (ret != -EIO)
- DEBUG(MTD_DEBUG_LEVEL1, "INFTL: error went "
- "away on retry?\n");
+ pr_debug("INFTL: error went away on retry?\n");
}
memset(&oob, 0xff, sizeof(struct inftl_oob));
oob.b.Status = oob.b.Status1 = SECTOR_USED;
@@ -372,8 +368,7 @@ static u16 INFTL_foldchain(struct INFTLrecord *inftl, unsigned thisVUC, unsigned
* is important, by doing oldest first if we crash/reboot then it
* it is relatively simple to clean up the mess).
*/
- DEBUG(MTD_DEBUG_LEVEL1, "INFTL: want to erase virtual chain %d\n",
- thisVUC);
+ pr_debug("INFTL: want to erase virtual chain %d\n", thisVUC);
for (;;) {
/* Find oldest unit in chain. */
@@ -421,7 +416,7 @@ static u16 INFTL_makefreeblock(struct INFTLrecord *inftl, unsigned pendingblock)
u16 ChainLength = 0, thislen;
u16 chain, EUN;
- DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_makefreeblock(inftl=%p,"
+ pr_debug("INFTL: INFTL_makefreeblock(inftl=%p,"
"pending=%d)\n", inftl, pendingblock);
for (chain = 0; chain < inftl->nb_blocks; chain++) {
@@ -484,8 +479,8 @@ static inline u16 INFTL_findwriteunit(struct INFTLrecord *inftl, unsigned block)
size_t retlen;
int silly, silly2 = 3;
- DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_findwriteunit(inftl=%p,"
- "block=%d)\n", inftl, block);
+ pr_debug("INFTL: INFTL_findwriteunit(inftl=%p,block=%d)\n",
+ inftl, block);
do {
/*
@@ -501,8 +496,8 @@ static inline u16 INFTL_findwriteunit(struct INFTLrecord *inftl, unsigned block)
blockofs, 8, &retlen, (char *)&bci);
status = bci.Status | bci.Status1;
- DEBUG(MTD_DEBUG_LEVEL3, "INFTL: status of block %d in "
- "EUN %d is %x\n", block , writeEUN, status);
+ pr_debug("INFTL: status of block %d in EUN %d is %x\n",
+ block , writeEUN, status);
switch(status) {
case SECTOR_FREE:
@@ -555,9 +550,9 @@ hitused:
* Hopefully we free something, lets try again.
* This time we are desperate...
*/
- DEBUG(MTD_DEBUG_LEVEL1, "INFTL: using desperate==1 "
- "to find free EUN to accommodate write to "
- "VUC %d\n", thisVUC);
+ pr_debug("INFTL: using desperate==1 to find free EUN "
+ "to accommodate write to VUC %d\n",
+ thisVUC);
writeEUN = INFTL_findfreeblock(inftl, 1);
if (writeEUN == BLOCK_NIL) {
/*
@@ -647,7 +642,7 @@ static void INFTL_trydeletechain(struct INFTLrecord *inftl, unsigned thisVUC)
struct inftl_bci bci;
size_t retlen;
- DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_trydeletechain(inftl=%p,"
+ pr_debug("INFTL: INFTL_trydeletechain(inftl=%p,"
"thisVUC=%d)\n", inftl, thisVUC);
memset(BlockUsed, 0, sizeof(BlockUsed));
@@ -711,7 +706,7 @@ static void INFTL_trydeletechain(struct INFTLrecord *inftl, unsigned thisVUC)
* For each block in the chain free it and make it available
* for future use. Erase from the oldest unit first.
*/
- DEBUG(MTD_DEBUG_LEVEL1, "INFTL: deleting empty VUC %d\n", thisVUC);
+ pr_debug("INFTL: deleting empty VUC %d\n", thisVUC);
for (;;) {
u16 *prevEUN = &inftl->VUtable[thisVUC];
@@ -719,7 +714,7 @@ static void INFTL_trydeletechain(struct INFTLrecord *inftl, unsigned thisVUC)
/* If the chain is all gone already, we're done */
if (thisEUN == BLOCK_NIL) {
- DEBUG(MTD_DEBUG_LEVEL2, "INFTL: Empty VUC %d for deletion was already absent\n", thisEUN);
+ pr_debug("INFTL: Empty VUC %d for deletion was already absent\n", thisEUN);
return;
}
@@ -731,7 +726,7 @@ static void INFTL_trydeletechain(struct INFTLrecord *inftl, unsigned thisVUC)
thisEUN = *prevEUN;
}
- DEBUG(MTD_DEBUG_LEVEL3, "Deleting EUN %d from VUC %d\n",
+ pr_debug("Deleting EUN %d from VUC %d\n",
thisEUN, thisVUC);
if (INFTL_formatblock(inftl, thisEUN) < 0) {
@@ -767,7 +762,7 @@ static int INFTL_deleteblock(struct INFTLrecord *inftl, unsigned block)
size_t retlen;
struct inftl_bci bci;
- DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_deleteblock(inftl=%p,"
+ pr_debug("INFTL: INFTL_deleteblock(inftl=%p,"
"block=%d)\n", inftl, block);
while (thisEUN < inftl->nb_blocks) {
@@ -826,7 +821,7 @@ static int inftl_writeblock(struct mtd_blktrans_dev *mbd, unsigned long block,
struct inftl_oob oob;
char *p, *pend;
- DEBUG(MTD_DEBUG_LEVEL3, "INFTL: inftl_writeblock(inftl=%p,block=%ld,"
+ pr_debug("INFTL: inftl_writeblock(inftl=%p,block=%ld,"
"buffer=%p)\n", inftl, block, buffer);
/* Is block all zero? */
@@ -876,7 +871,7 @@ static int inftl_readblock(struct mtd_blktrans_dev *mbd, unsigned long block,
struct inftl_bci bci;
size_t retlen;
- DEBUG(MTD_DEBUG_LEVEL3, "INFTL: inftl_readblock(inftl=%p,block=%ld,"
+ pr_debug("INFTL: inftl_readblock(inftl=%p,block=%ld,"
"buffer=%p)\n", inftl, block, buffer);
while (thisEUN < inftl->nb_blocks) {
@@ -922,7 +917,7 @@ foundit:
int ret = mtd->read(mtd, ptr, SECTORSIZE, &retlen, buffer);
/* Handle corrected bit flips gracefully */
- if (ret < 0 && ret != -EUCLEAN)
+ if (ret < 0 && !mtd_is_bitflip(ret))
return -EIO;
}
return 0;
diff --git a/drivers/mtd/inftlmount.c b/drivers/mtd/inftlmount.c
index 104052e774b..2ff601f816c 100644
--- a/drivers/mtd/inftlmount.c
+++ b/drivers/mtd/inftlmount.c
@@ -53,7 +53,7 @@ static int find_boot_record(struct INFTLrecord *inftl)
struct INFTLPartition *ip;
size_t retlen;
- DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
+ pr_debug("INFTL: find_boot_record(inftl=%p)\n", inftl);
/*
* Assume logical EraseSize == physical erasesize for starting the
@@ -139,24 +139,20 @@ static int find_boot_record(struct INFTLrecord *inftl)
mh->FormatFlags = le32_to_cpu(mh->FormatFlags);
mh->PercentUsed = le32_to_cpu(mh->PercentUsed);
-#ifdef CONFIG_MTD_DEBUG_VERBOSE
- if (CONFIG_MTD_DEBUG_VERBOSE >= 2) {
- printk("INFTL: Media Header ->\n"
- " bootRecordID = %s\n"
- " NoOfBootImageBlocks = %d\n"
- " NoOfBinaryPartitions = %d\n"
- " NoOfBDTLPartitions = %d\n"
- " BlockMultiplerBits = %d\n"
- " FormatFlgs = %d\n"
- " OsakVersion = 0x%x\n"
- " PercentUsed = %d\n",
- mh->bootRecordID, mh->NoOfBootImageBlocks,
- mh->NoOfBinaryPartitions,
- mh->NoOfBDTLPartitions,
- mh->BlockMultiplierBits, mh->FormatFlags,
- mh->OsakVersion, mh->PercentUsed);
- }
-#endif
+ pr_debug("INFTL: Media Header ->\n"
+ " bootRecordID = %s\n"
+ " NoOfBootImageBlocks = %d\n"
+ " NoOfBinaryPartitions = %d\n"
+ " NoOfBDTLPartitions = %d\n"
+ " BlockMultiplerBits = %d\n"
+ " FormatFlgs = %d\n"
+ " OsakVersion = 0x%x\n"
+ " PercentUsed = %d\n",
+ mh->bootRecordID, mh->NoOfBootImageBlocks,
+ mh->NoOfBinaryPartitions,
+ mh->NoOfBDTLPartitions,
+ mh->BlockMultiplierBits, mh->FormatFlags,
+ mh->OsakVersion, mh->PercentUsed);
if (mh->NoOfBDTLPartitions == 0) {
printk(KERN_WARNING "INFTL: Media Header sanity check "
@@ -200,19 +196,15 @@ static int find_boot_record(struct INFTLrecord *inftl)
ip->spareUnits = le32_to_cpu(ip->spareUnits);
ip->Reserved0 = le32_to_cpu(ip->Reserved0);
-#ifdef CONFIG_MTD_DEBUG_VERBOSE
- if (CONFIG_MTD_DEBUG_VERBOSE >= 2) {
- printk(" PARTITION[%d] ->\n"
- " virtualUnits = %d\n"
- " firstUnit = %d\n"
- " lastUnit = %d\n"
- " flags = 0x%x\n"
- " spareUnits = %d\n",
- i, ip->virtualUnits, ip->firstUnit,
- ip->lastUnit, ip->flags,
- ip->spareUnits);
- }
-#endif
+ pr_debug(" PARTITION[%d] ->\n"
+ " virtualUnits = %d\n"
+ " firstUnit = %d\n"
+ " lastUnit = %d\n"
+ " flags = 0x%x\n"
+ " spareUnits = %d\n",
+ i, ip->virtualUnits, ip->firstUnit,
+ ip->lastUnit, ip->flags,
+ ip->spareUnits);
if (ip->Reserved0 != ip->firstUnit) {
struct erase_info *instr = &inftl->instr;
@@ -375,7 +367,7 @@ static int check_free_sectors(struct INFTLrecord *inftl, unsigned int address,
*
* Return: 0 when succeed, -1 on error.
*
- * ToDo: 1. Is it neceressary to check_free_sector after erasing ??
+ * ToDo: 1. Is it necessary to check_free_sector after erasing ??
*/
int INFTL_formatblock(struct INFTLrecord *inftl, int block)
{
@@ -385,8 +377,7 @@ int INFTL_formatblock(struct INFTLrecord *inftl, int block)
struct mtd_info *mtd = inftl->mbd.mtd;
int physblock;
- DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_formatblock(inftl=%p,"
- "block=%d)\n", inftl, block);
+ pr_debug("INFTL: INFTL_formatblock(inftl=%p,block=%d)\n", inftl, block);
memset(instr, 0, sizeof(struct erase_info));
@@ -476,30 +467,30 @@ void INFTL_dumptables(struct INFTLrecord *s)
{
int i;
- printk("-------------------------------------------"
+ pr_debug("-------------------------------------------"
"----------------------------------\n");
- printk("VUtable[%d] ->", s->nb_blocks);
+ pr_debug("VUtable[%d] ->", s->nb_blocks);
for (i = 0; i < s->nb_blocks; i++) {
if ((i % 8) == 0)
- printk("\n%04x: ", i);
- printk("%04x ", s->VUtable[i]);
+ pr_debug("\n%04x: ", i);
+ pr_debug("%04x ", s->VUtable[i]);
}
- printk("\n-------------------------------------------"
+ pr_debug("\n-------------------------------------------"
"----------------------------------\n");
- printk("PUtable[%d-%d=%d] ->", s->firstEUN, s->lastEUN, s->nb_blocks);
+ pr_debug("PUtable[%d-%d=%d] ->", s->firstEUN, s->lastEUN, s->nb_blocks);
for (i = 0; i <= s->lastEUN; i++) {
if ((i % 8) == 0)
- printk("\n%04x: ", i);
- printk("%04x ", s->PUtable[i]);
+ pr_debug("\n%04x: ", i);
+ pr_debug("%04x ", s->PUtable[i]);
}
- printk("\n-------------------------------------------"
+ pr_debug("\n-------------------------------------------"
"----------------------------------\n");
- printk("INFTL ->\n"
+ pr_debug("INFTL ->\n"
" EraseSize = %d\n"
" h/s/c = %d/%d/%d\n"
" numvunits = %d\n"
@@ -513,7 +504,7 @@ void INFTL_dumptables(struct INFTLrecord *s)
s->numvunits, s->firstEUN, s->lastEUN, s->numfreeEUNs,
s->LastFreeEUN, s->nb_blocks, s->nb_boot_blocks);
- printk("\n-------------------------------------------"
+ pr_debug("\n-------------------------------------------"
"----------------------------------\n");
}
@@ -521,25 +512,25 @@ void INFTL_dumpVUchains(struct INFTLrecord *s)
{
int logical, block, i;
- printk("-------------------------------------------"
+ pr_debug("-------------------------------------------"
"----------------------------------\n");
- printk("INFTL Virtual Unit Chains:\n");
+ pr_debug("INFTL Virtual Unit Chains:\n");
for (logical = 0; logical < s->nb_blocks; logical++) {
block = s->VUtable[logical];
if (block > s->nb_blocks)
continue;
- printk(" LOGICAL %d --> %d ", logical, block);
+ pr_debug(" LOGICAL %d --> %d ", logical, block);
for (i = 0; i < s->nb_blocks; i++) {
if (s->PUtable[block] == BLOCK_NIL)
break;
block = s->PUtable[block];
- printk("%d ", block);
+ pr_debug("%d ", block);
}
- printk("\n");
+ pr_debug("\n");
}
- printk("-------------------------------------------"
+ pr_debug("-------------------------------------------"
"----------------------------------\n");
}
@@ -555,7 +546,7 @@ int INFTL_mount(struct INFTLrecord *s)
int i;
u8 *ANACtable, ANAC;
- DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_mount(inftl=%p)\n", s);
+ pr_debug("INFTL: INFTL_mount(inftl=%p)\n", s);
/* Search for INFTL MediaHeader and Spare INFTL Media Header */
if (find_boot_record(s) < 0) {
@@ -585,7 +576,7 @@ int INFTL_mount(struct INFTLrecord *s)
* NOTEXPLORED state. Then at the end we will try to format it and
* mark it as free.
*/
- DEBUG(MTD_DEBUG_LEVEL3, "INFTL: pass 1, explore each unit\n");
+ pr_debug("INFTL: pass 1, explore each unit\n");
for (first_block = s->firstEUN; first_block <= s->lastEUN; first_block++) {
if (s->PUtable[first_block] != BLOCK_NOTEXPLORED)
continue;
@@ -717,17 +708,14 @@ int INFTL_mount(struct INFTLrecord *s)
logical_block = BLOCK_NIL;
}
-#ifdef CONFIG_MTD_DEBUG_VERBOSE
- if (CONFIG_MTD_DEBUG_VERBOSE >= 2)
- INFTL_dumptables(s);
-#endif
+ INFTL_dumptables(s);
/*
* Second pass, check for infinite loops in chains. These are
* possible because we don't update the previous pointers when
* we fold chains. No big deal, just fix them up in PUtable.
*/
- DEBUG(MTD_DEBUG_LEVEL3, "INFTL: pass 2, validate virtual chains\n");
+ pr_debug("INFTL: pass 2, validate virtual chains\n");
for (logical_block = 0; logical_block < s->numvunits; logical_block++) {
block = s->VUtable[logical_block];
last_block = BLOCK_NIL;
@@ -772,12 +760,8 @@ int INFTL_mount(struct INFTLrecord *s)
}
}
-#ifdef CONFIG_MTD_DEBUG_VERBOSE
- if (CONFIG_MTD_DEBUG_VERBOSE >= 2)
- INFTL_dumptables(s);
- if (CONFIG_MTD_DEBUG_VERBOSE >= 2)
- INFTL_dumpVUchains(s);
-#endif
+ INFTL_dumptables(s);
+ INFTL_dumpVUchains(s);
/*
* Third pass, format unreferenced blocks and init free block count.
@@ -785,7 +769,7 @@ int INFTL_mount(struct INFTLrecord *s)
s->numfreeEUNs = 0;
s->LastFreeEUN = BLOCK_NIL;
- DEBUG(MTD_DEBUG_LEVEL3, "INFTL: pass 3, format unused blocks\n");
+ pr_debug("INFTL: pass 3, format unused blocks\n");
for (block = s->firstEUN; block <= s->lastEUN; block++) {
if (s->PUtable[block] == BLOCK_NOTEXPLORED) {
printk("INFTL: unreferenced block %d, formatting it\n",
diff --git a/drivers/mtd/lpddr/lpddr_cmds.c b/drivers/mtd/lpddr/lpddr_cmds.c
index 65655dd59e1..1dca31d9a8b 100644
--- a/drivers/mtd/lpddr/lpddr_cmds.c
+++ b/drivers/mtd/lpddr/lpddr_cmds.c
@@ -27,6 +27,7 @@
#include <linux/mtd/pfow.h>
#include <linux/mtd/qinfo.h>
#include <linux/slab.h>
+#include <linux/module.h>
static int lpddr_read(struct mtd_info *mtd, loff_t adr, size_t len,
size_t *retlen, u_char *buf);
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index c0c328c5b13..8e0c4bf9f7f 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -41,8 +41,6 @@ config MTD_PHYSMAP_START
are mapped on your particular target board. Refer to the
memory map which should hopefully be in the documentation for
your board.
- Ignore this option if you use run-time physmap configuration
- (i.e., run-time calling physmap_configure()).
config MTD_PHYSMAP_LEN
hex "Physical length of flash mapping"
@@ -55,8 +53,6 @@ config MTD_PHYSMAP_LEN
than the total amount of flash present. Refer to the memory
map which should hopefully be in the documentation for your
board.
- Ignore this option if you use run-time physmap configuration
- (i.e., run-time calling physmap_configure()).
config MTD_PHYSMAP_BANKWIDTH
int "Bank width in octets"
@@ -67,8 +63,6 @@ config MTD_PHYSMAP_BANKWIDTH
in octets. For example, if you have a data bus width of 32
bits, you would set the bus width octet value to 4. This is
used internally by the CFI drivers.
- Ignore this option if you use run-time physmap configuration
- (i.e., run-time calling physmap_configure()).
config MTD_PHYSMAP_OF
tristate "Flash device in physical memory map based on OF description"
@@ -260,7 +254,6 @@ config MTD_BCM963XX
config MTD_LANTIQ
tristate "Lantiq SoC NOR support"
depends on LANTIQ
- select MTD_PARTITIONS
help
Support for NOR flash attached to the Lantiq SoC's External Bus Unit.
@@ -339,10 +332,6 @@ config MTD_SOLUTIONENGINE
This enables access to the flash chips on the Hitachi SolutionEngine and
similar boards. Say 'Y' if you are building a kernel for such a board.
-config MTD_ARM_INTEGRATOR
- tristate "CFI Flash device mapped on ARM Integrator/P720T"
- depends on ARM && MTD_CFI
-
config MTD_CDB89712
tristate "Cirrus CDB89712 evaluation board mappings"
depends on MTD_CFI && ARCH_CDB89712
@@ -398,13 +387,6 @@ config MTD_AUTCPU12
This enables access to the NV-RAM on autronix autcpu12 board.
If you have such a board, say 'Y'.
-config MTD_EDB7312
- tristate "CFI Flash device mapped on EDB7312"
- depends on ARCH_EDB7312 && MTD_CFI
- help
- This enables access to the CFI Flash on the Cogent EDB7312 board.
- If you have such a board, say 'Y' here.
-
config MTD_IMPA7
tristate "JEDEC Flash device mapped on impA7"
depends on ARM && MTD_JEDECPROBE
@@ -412,14 +394,6 @@ config MTD_IMPA7
This enables access to the NOR Flash on the impA7 board of
implementa GmbH. If you have such a board, say 'Y' here.
-config MTD_CEIVA
- tristate "JEDEC Flash device mapped on Ceiva/Polaroid PhotoMax Digital Picture Frame"
- depends on MTD_JEDECPROBE && ARCH_CEIVA
- help
- This enables access to the flash chips on the Ceiva/Polaroid
- PhotoMax Digital Picture Frame.
- If you have such a device, say 'Y'.
-
config MTD_H720X
tristate "Hynix evaluation board mappings"
depends on MTD_CFI && ( ARCH_H7201 || ARCH_H7202 )
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile
index cb48b11afff..45dcb8b14f2 100644
--- a/drivers/mtd/maps/Makefile
+++ b/drivers/mtd/maps/Makefile
@@ -19,7 +19,6 @@ obj-$(CONFIG_MTD_CK804XROM) += ck804xrom.o
obj-$(CONFIG_MTD_TSUNAMI) += tsunami_flash.o
obj-$(CONFIG_MTD_PXA2XX) += pxa2xx-flash.o
obj-$(CONFIG_MTD_MBX860) += mbx860.o
-obj-$(CONFIG_MTD_CEIVA) += ceiva.o
obj-$(CONFIG_MTD_OCTAGON) += octagon-5066.o
obj-$(CONFIG_MTD_PHYSMAP) += physmap.o
obj-$(CONFIG_MTD_PHYSMAP_OF) += physmap_of.o
@@ -40,7 +39,6 @@ obj-$(CONFIG_MTD_DBOX2) += dbox2-flash.o
obj-$(CONFIG_MTD_SOLUTIONENGINE)+= solutionengine.o
obj-$(CONFIG_MTD_PCI) += pci.o
obj-$(CONFIG_MTD_AUTCPU12) += autcpu12-nvram.o
-obj-$(CONFIG_MTD_EDB7312) += edb7312.o
obj-$(CONFIG_MTD_IMPA7) += impa7.o
obj-$(CONFIG_MTD_FORTUNET) += fortunet.o
obj-$(CONFIG_MTD_UCLINUX) += uclinux.o
diff --git a/drivers/mtd/maps/bcm963xx-flash.c b/drivers/mtd/maps/bcm963xx-flash.c
index 608967fe74c..736ca10ca9f 100644
--- a/drivers/mtd/maps/bcm963xx-flash.c
+++ b/drivers/mtd/maps/bcm963xx-flash.c
@@ -21,6 +21,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include <linux/mtd/map.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
diff --git a/drivers/mtd/maps/bfin-async-flash.c b/drivers/mtd/maps/bfin-async-flash.c
index 67815eed2f0..6d6b2b5674e 100644
--- a/drivers/mtd/maps/bfin-async-flash.c
+++ b/drivers/mtd/maps/bfin-async-flash.c
@@ -41,7 +41,6 @@ struct async_state {
uint32_t flash_ambctl0, flash_ambctl1;
uint32_t save_ambctl0, save_ambctl1;
unsigned long irq_flags;
- struct mtd_partition *parts;
};
static void switch_to_flash(struct async_state *state)
@@ -165,18 +164,8 @@ static int __devinit bfin_flash_probe(struct platform_device *pdev)
return -ENXIO;
}
- ret = parse_mtd_partitions(state->mtd, part_probe_types, &pdata->parts, 0);
- if (ret > 0) {
- pr_devinit(KERN_NOTICE DRIVER_NAME ": Using commandline partition definition\n");
- mtd_device_register(state->mtd, pdata->parts, ret);
- state->parts = pdata->parts;
- } else if (pdata->nr_parts) {
- pr_devinit(KERN_NOTICE DRIVER_NAME ": Using board partition definition\n");
- mtd_device_register(state->mtd, pdata->parts, pdata->nr_parts);
- } else {
- pr_devinit(KERN_NOTICE DRIVER_NAME ": no partition info available, registering whole flash at once\n");
- mtd_device_register(state->mtd, NULL, 0);
- }
+ mtd_device_parse_register(state->mtd, part_probe_types, 0,
+ pdata->parts, pdata->nr_parts);
platform_set_drvdata(pdev, state);
@@ -188,7 +177,6 @@ static int __devexit bfin_flash_remove(struct platform_device *pdev)
struct async_state *state = platform_get_drvdata(pdev);
gpio_free(state->enet_flash_pin);
mtd_device_unregister(state->mtd);
- kfree(state->parts);
map_destroy(state->mtd);
kfree(state);
return 0;
diff --git a/drivers/mtd/maps/ceiva.c b/drivers/mtd/maps/ceiva.c
deleted file mode 100644
index 06f9c981572..00000000000
--- a/drivers/mtd/maps/ceiva.c
+++ /dev/null
@@ -1,341 +0,0 @@
-/*
- * Ceiva flash memory driver.
- * Copyright (C) 2002 Rob Scott <rscott@mtrob.fdns.net>
- *
- * Note: this driver supports jedec compatible devices. Modification
- * for CFI compatible devices should be straight forward: change
- * jedec_probe to cfi_probe.
- *
- * Based on: sa1100-flash.c, which has the following copyright:
- * Flash memory access on SA11x0 based devices
- *
- * (C) 2000 Nicolas Pitre <nico@fluxnic.net>
- *
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/ioport.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/partitions.h>
-#include <linux/mtd/concat.h>
-
-#include <mach/hardware.h>
-#include <asm/mach-types.h>
-#include <asm/io.h>
-#include <asm/sizes.h>
-
-/*
- * This isn't complete yet, so...
- */
-#define CONFIG_MTD_CEIVA_STATICMAP
-
-#ifdef CONFIG_MTD_CEIVA_STATICMAP
-/*
- * See include/linux/mtd/partitions.h for definition of the mtd_partition
- * structure.
- *
- * Please note:
- * 1. The flash size given should be the largest flash size that can
- * be accommodated.
- *
- * 2. The bus width must defined in clps_setup_flash.
- *
- * The MTD layer will detect flash chip aliasing and reduce the size of
- * the map accordingly.
- *
- */
-
-#ifdef CONFIG_ARCH_CEIVA
-/* Flash / Partition sizing */
-/* For the 28F8003, we use the block mapping to calcuate the sizes */
-#define MAX_SIZE_KiB (16 + 8 + 8 + 96 + (7*128))
-#define BOOT_PARTITION_SIZE_KiB (16)
-#define PARAMS_PARTITION_SIZE_KiB (8)
-#define KERNEL_PARTITION_SIZE_KiB (4*128)
-/* Use both remaining portion of first flash, and all of second flash */
-#define ROOT_PARTITION_SIZE_KiB (3*128) + (8*128)
-
-static struct mtd_partition ceiva_partitions[] = {
- {
- .name = "Ceiva BOOT partition",
- .size = BOOT_PARTITION_SIZE_KiB*1024,
- .offset = 0,
-
- },{
- .name = "Ceiva parameters partition",
- .size = PARAMS_PARTITION_SIZE_KiB*1024,
- .offset = (16 + 8) * 1024,
- },{
- .name = "Ceiva kernel partition",
- .size = (KERNEL_PARTITION_SIZE_KiB)*1024,
- .offset = 0x20000,
-
- },{
- .name = "Ceiva root filesystem partition",
- .offset = MTDPART_OFS_APPEND,
- .size = (ROOT_PARTITION_SIZE_KiB)*1024,
- }
-};
-#endif
-
-static int __init clps_static_partitions(struct mtd_partition **parts)
-{
- int nb_parts = 0;
-
-#ifdef CONFIG_ARCH_CEIVA
- if (machine_is_ceiva()) {
- *parts = ceiva_partitions;
- nb_parts = ARRAY_SIZE(ceiva_partitions);
- }
-#endif
- return nb_parts;
-}
-#endif
-
-struct clps_info {
- unsigned long base;
- unsigned long size;
- int width;
- void *vbase;
- struct map_info *map;
- struct mtd_info *mtd;
- struct resource *res;
-};
-
-#define NR_SUBMTD 4
-
-static struct clps_info info[NR_SUBMTD];
-
-static int __init clps_setup_mtd(struct clps_info *clps, int nr, struct mtd_info **rmtd)
-{
- struct mtd_info *subdev[nr];
- struct map_info *maps;
- int i, found = 0, ret = 0;
-
- /*
- * Allocate the map_info structs in one go.
- */
- maps = kzalloc(sizeof(struct map_info) * nr, GFP_KERNEL);
- if (!maps)
- return -ENOMEM;
- /*
- * Claim and then map the memory regions.
- */
- for (i = 0; i < nr; i++) {
- if (clps[i].base == (unsigned long)-1)
- break;
-
- clps[i].res = request_mem_region(clps[i].base, clps[i].size, "clps flash");
- if (!clps[i].res) {
- ret = -EBUSY;
- break;
- }
-
- clps[i].map = maps + i;
-
- clps[i].map->name = "clps flash";
- clps[i].map->phys = clps[i].base;
-
- clps[i].vbase = ioremap(clps[i].base, clps[i].size);
- if (!clps[i].vbase) {
- ret = -ENOMEM;
- break;
- }
-
- clps[i].map->virt = (void __iomem *)clps[i].vbase;
- clps[i].map->bankwidth = clps[i].width;
- clps[i].map->size = clps[i].size;
-
- simple_map_init(&clps[i].map);
-
- clps[i].mtd = do_map_probe("jedec_probe", clps[i].map);
- if (clps[i].mtd == NULL) {
- ret = -ENXIO;
- break;
- }
- clps[i].mtd->owner = THIS_MODULE;
- subdev[i] = clps[i].mtd;
-
- printk(KERN_INFO "clps flash: JEDEC device at 0x%08lx, %dMiB, "
- "%d-bit\n", clps[i].base, clps[i].mtd->size >> 20,
- clps[i].width * 8);
- found += 1;
- }
-
- /*
- * ENXIO is special. It means we didn't find a chip when
- * we probed. We need to tear down the mapping, free the
- * resource and mark it as such.
- */
- if (ret == -ENXIO) {
- iounmap(clps[i].vbase);
- clps[i].vbase = NULL;
- release_resource(clps[i].res);
- clps[i].res = NULL;
- }
-
- /*
- * If we found one device, don't bother with concat support.
- * If we found multiple devices, use concat if we have it
- * available, otherwise fail.
- */
- if (ret == 0 || ret == -ENXIO) {
- if (found == 1) {
- *rmtd = subdev[0];
- ret = 0;
- } else if (found > 1) {
- /*
- * We detected multiple devices. Concatenate
- * them together.
- */
- *rmtd = mtd_concat_create(subdev, found,
- "clps flash");
- if (*rmtd == NULL)
- ret = -ENXIO;
- }
- }
-
- /*
- * If we failed, clean up.
- */
- if (ret) {
- do {
- if (clps[i].mtd)
- map_destroy(clps[i].mtd);
- if (clps[i].vbase)
- iounmap(clps[i].vbase);
- if (clps[i].res)
- release_resource(clps[i].res);
- } while (i--);
-
- kfree(maps);
- }
-
- return ret;
-}
-
-static void __exit clps_destroy_mtd(struct clps_info *clps, struct mtd_info *mtd)
-{
- int i;
-
- mtd_device_unregister(mtd);
-
- if (mtd != clps[0].mtd)
- mtd_concat_destroy(mtd);
-
- for (i = NR_SUBMTD; i >= 0; i--) {
- if (clps[i].mtd)
- map_destroy(clps[i].mtd);
- if (clps[i].vbase)
- iounmap(clps[i].vbase);
- if (clps[i].res)
- release_resource(clps[i].res);
- }
- kfree(clps[0].map);
-}
-
-/*
- * We define the memory space, size, and width for the flash memory
- * space here.
- */
-
-static int __init clps_setup_flash(void)
-{
- int nr = 0;
-
-#ifdef CONFIG_ARCH_CEIVA
- if (machine_is_ceiva()) {
- info[0].base = CS0_PHYS_BASE;
- info[0].size = SZ_32M;
- info[0].width = CEIVA_FLASH_WIDTH;
- info[1].base = CS1_PHYS_BASE;
- info[1].size = SZ_32M;
- info[1].width = CEIVA_FLASH_WIDTH;
- nr = 2;
- }
-#endif
- return nr;
-}
-
-static struct mtd_partition *parsed_parts;
-static const char *probes[] = { "cmdlinepart", "RedBoot", NULL };
-
-static void __init clps_locate_partitions(struct mtd_info *mtd)
-{
- const char *part_type = NULL;
- int nr_parts = 0;
- do {
- /*
- * Partition selection stuff.
- */
- nr_parts = parse_mtd_partitions(mtd, probes, &parsed_parts, 0);
- if (nr_parts > 0) {
- part_type = "command line";
- break;
- }
-#ifdef CONFIG_MTD_CEIVA_STATICMAP
- nr_parts = clps_static_partitions(&parsed_parts);
- if (nr_parts > 0) {
- part_type = "static";
- break;
- }
- printk("found: %d partitions\n", nr_parts);
-#endif
- } while (0);
-
- if (nr_parts == 0) {
- printk(KERN_NOTICE "clps flash: no partition info "
- "available, registering whole flash\n");
- mtd_device_register(mtd, NULL, 0);
- } else {
- printk(KERN_NOTICE "clps flash: using %s partition "
- "definition\n", part_type);
- mtd_device_register(mtd, parsed_parts, nr_parts);
- }
-
- /* Always succeeds. */
-}
-
-static void __exit clps_destroy_partitions(void)
-{
- kfree(parsed_parts);
-}
-
-static struct mtd_info *mymtd;
-
-static int __init clps_mtd_init(void)
-{
- int ret;
- int nr;
-
- nr = clps_setup_flash();
- if (nr < 0)
- return nr;
-
- ret = clps_setup_mtd(info, nr, &mymtd);
- if (ret)
- return ret;
-
- clps_locate_partitions(mymtd);
-
- return 0;
-}
-
-static void __exit clps_mtd_cleanup(void)
-{
- clps_destroy_mtd(info, mymtd);
- clps_destroy_partitions();
-}
-
-module_init(clps_mtd_init);
-module_exit(clps_mtd_cleanup);
-
-MODULE_AUTHOR("Rob Scott");
-MODULE_DESCRIPTION("Cirrus Logic JEDEC map driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/maps/dc21285.c b/drivers/mtd/maps/dc21285.c
index 7a9e1989c97..f43b365b848 100644
--- a/drivers/mtd/maps/dc21285.c
+++ b/drivers/mtd/maps/dc21285.c
@@ -145,14 +145,10 @@ static struct map_info dc21285_map = {
/* Partition stuff */
-static struct mtd_partition *dc21285_parts;
static const char *probes[] = { "RedBoot", "cmdlinepart", NULL };
static int __init init_dc21285(void)
{
-
- int nrparts;
-
/* Determine bankwidth */
switch (*CSR_SA110_CNTL & (3<<14)) {
case SA110_CNTL_ROMWIDTH_8:
@@ -200,8 +196,7 @@ static int __init init_dc21285(void)
dc21285_mtd->owner = THIS_MODULE;
- nrparts = parse_mtd_partitions(dc21285_mtd, probes, &dc21285_parts, 0);
- mtd_device_register(dc21285_mtd, dc21285_parts, nrparts);
+ mtd_device_parse_register(dc21285_mtd, probes, 0, NULL, 0);
if(machine_is_ebsa285()) {
/*
@@ -224,8 +219,6 @@ static int __init init_dc21285(void)
static void __exit cleanup_dc21285(void)
{
mtd_device_unregister(dc21285_mtd);
- if (dc21285_parts)
- kfree(dc21285_parts);
map_destroy(dc21285_mtd);
iounmap(dc21285_map.virt);
}
diff --git a/drivers/mtd/maps/edb7312.c b/drivers/mtd/maps/edb7312.c
deleted file mode 100644
index fe42a212bb3..00000000000
--- a/drivers/mtd/maps/edb7312.c
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- * Handle mapping of the NOR flash on Cogent EDB7312 boards
- *
- * Copyright 2002 SYSGO Real-Time Solutions GmbH
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <asm/io.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/partitions.h>
-
-#define WINDOW_ADDR 0x00000000 /* physical properties of flash */
-#define WINDOW_SIZE 0x01000000
-#define BUSWIDTH 2
-#define FLASH_BLOCKSIZE_MAIN 0x20000
-#define FLASH_NUMBLOCKS_MAIN 128
-/* can be "cfi_probe", "jedec_probe", "map_rom", NULL }; */
-#define PROBETYPES { "cfi_probe", NULL }
-
-#define MSG_PREFIX "EDB7312-NOR:" /* prefix for our printk()'s */
-#define MTDID "edb7312-nor" /* for mtdparts= partitioning */
-
-static struct mtd_info *mymtd;
-
-struct map_info edb7312nor_map = {
- .name = "NOR flash on EDB7312",
- .size = WINDOW_SIZE,
- .bankwidth = BUSWIDTH,
- .phys = WINDOW_ADDR,
-};
-
-/*
- * MTD partitioning stuff
- */
-static struct mtd_partition static_partitions[3] =
-{
- {
- .name = "ARMboot",
- .size = 0x40000,
- .offset = 0
- },
- {
- .name = "Kernel",
- .size = 0x200000,
- .offset = 0x40000
- },
- {
- .name = "RootFS",
- .size = 0xDC0000,
- .offset = 0x240000
- },
-};
-
-static const char *probes[] = { "RedBoot", "cmdlinepart", NULL };
-
-static int mtd_parts_nb = 0;
-static struct mtd_partition *mtd_parts = 0;
-
-static int __init init_edb7312nor(void)
-{
- static const char *rom_probe_types[] = PROBETYPES;
- const char **type;
- const char *part_type = 0;
-
- printk(KERN_NOTICE MSG_PREFIX "0x%08x at 0x%08x\n",
- WINDOW_SIZE, WINDOW_ADDR);
- edb7312nor_map.virt = ioremap(WINDOW_ADDR, WINDOW_SIZE);
-
- if (!edb7312nor_map.virt) {
- printk(MSG_PREFIX "failed to ioremap\n");
- return -EIO;
- }
-
- simple_map_init(&edb7312nor_map);
-
- mymtd = 0;
- type = rom_probe_types;
- for(; !mymtd && *type; type++) {
- mymtd = do_map_probe(*type, &edb7312nor_map);
- }
- if (mymtd) {
- mymtd->owner = THIS_MODULE;
-
- mtd_parts_nb = parse_mtd_partitions(mymtd, probes, &mtd_parts, MTDID);
- if (mtd_parts_nb > 0)
- part_type = "detected";
-
- if (mtd_parts_nb == 0) {
- mtd_parts = static_partitions;
- mtd_parts_nb = ARRAY_SIZE(static_partitions);
- part_type = "static";
- }
-
- if (mtd_parts_nb == 0)
- printk(KERN_NOTICE MSG_PREFIX "no partition info available\n");
- else
- printk(KERN_NOTICE MSG_PREFIX
- "using %s partition definition\n", part_type);
- /* Register the whole device first. */
- mtd_device_register(mymtd, NULL, 0);
- mtd_device_register(mymtd, mtd_parts, mtd_parts_nb);
- return 0;
- }
-
- iounmap((void *)edb7312nor_map.virt);
- return -ENXIO;
-}
-
-static void __exit cleanup_edb7312nor(void)
-{
- if (mymtd) {
- mtd_device_unregister(mymtd);
- map_destroy(mymtd);
- }
- if (edb7312nor_map.virt) {
- iounmap((void *)edb7312nor_map.virt);
- edb7312nor_map.virt = 0;
- }
-}
-
-module_init(init_edb7312nor);
-module_exit(cleanup_edb7312nor);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Marius Groeger <mag@sysgo.de>");
-MODULE_DESCRIPTION("Generic configurable MTD map driver");
diff --git a/drivers/mtd/maps/gpio-addr-flash.c b/drivers/mtd/maps/gpio-addr-flash.c
index 7568c5f8b8a..1ec66f031c5 100644
--- a/drivers/mtd/maps/gpio-addr-flash.c
+++ b/drivers/mtd/maps/gpio-addr-flash.c
@@ -187,7 +187,6 @@ static const char *part_probe_types[] = { "cmdlinepart", "RedBoot", NULL };
*/
static int __devinit gpio_flash_probe(struct platform_device *pdev)
{
- int nr_parts;
size_t i, arr_size;
struct physmap_flash_data *pdata;
struct resource *memory;
@@ -252,20 +251,9 @@ static int __devinit gpio_flash_probe(struct platform_device *pdev)
return -ENXIO;
}
- nr_parts = parse_mtd_partitions(state->mtd, part_probe_types,
- &pdata->parts, 0);
- if (nr_parts > 0) {
- pr_devinit(KERN_NOTICE PFX "Using commandline partition definition\n");
- kfree(pdata->parts);
- } else if (pdata->nr_parts) {
- pr_devinit(KERN_NOTICE PFX "Using board partition definition\n");
- nr_parts = pdata->nr_parts;
- } else {
- pr_devinit(KERN_NOTICE PFX "no partition info available, registering whole flash at once\n");
- nr_parts = 0;
- }
- mtd_device_register(state->mtd, pdata->parts, nr_parts);
+ mtd_device_parse_register(state->mtd, part_probe_types, 0,
+ pdata->parts, pdata->nr_parts);
return 0;
}
diff --git a/drivers/mtd/maps/h720x-flash.c b/drivers/mtd/maps/h720x-flash.c
index 7f035860a36..49c14187fc6 100644
--- a/drivers/mtd/maps/h720x-flash.c
+++ b/drivers/mtd/maps/h720x-flash.c
@@ -58,18 +58,11 @@ static struct mtd_partition h720x_partitions[] = {
#define NUM_PARTITIONS ARRAY_SIZE(h720x_partitions)
-static int nr_mtd_parts;
-static struct mtd_partition *mtd_parts;
-static const char *probes[] = { "cmdlinepart", NULL };
-
/*
* Initialize FLASH support
*/
static int __init h720x_mtd_init(void)
{
-
- char *part_type = NULL;
-
h720x_map.virt = ioremap(h720x_map.phys, h720x_map.size);
if (!h720x_map.virt) {
@@ -92,16 +85,8 @@ static int __init h720x_mtd_init(void)
if (mymtd) {
mymtd->owner = THIS_MODULE;
- nr_mtd_parts = parse_mtd_partitions(mymtd, probes, &mtd_parts, 0);
- if (nr_mtd_parts > 0)
- part_type = "command line";
- if (nr_mtd_parts <= 0) {
- mtd_parts = h720x_partitions;
- nr_mtd_parts = NUM_PARTITIONS;
- part_type = "builtin";
- }
- printk(KERN_INFO "Using %s partition table\n", part_type);
- mtd_device_register(mymtd, mtd_parts, nr_mtd_parts);
+ mtd_device_parse_register(mymtd, NULL, 0,
+ h720x_partitions, NUM_PARTITIONS);
return 0;
}
@@ -120,10 +105,6 @@ static void __exit h720x_mtd_cleanup(void)
map_destroy(mymtd);
}
- /* Free partition info, if commandline partition was used */
- if (mtd_parts && (mtd_parts != h720x_partitions))
- kfree (mtd_parts);
-
if (h720x_map.virt) {
iounmap((void *)h720x_map.virt);
h720x_map.virt = 0;
diff --git a/drivers/mtd/maps/impa7.c b/drivers/mtd/maps/impa7.c
index 404a50cbafa..f47aedb2436 100644
--- a/drivers/mtd/maps/impa7.c
+++ b/drivers/mtd/maps/impa7.c
@@ -49,7 +49,7 @@ static struct map_info impa7_map[NUM_FLASHBANKS] = {
/*
* MTD partitioning stuff
*/
-static struct mtd_partition static_partitions[] =
+static struct mtd_partition partitions[] =
{
{
.name = "FileSystem",
@@ -58,16 +58,10 @@ static struct mtd_partition static_partitions[] =
},
};
-static int mtd_parts_nb[NUM_FLASHBANKS];
-static struct mtd_partition *mtd_parts[NUM_FLASHBANKS];
-
-static const char *probes[] = { "cmdlinepart", NULL };
-
static int __init init_impa7(void)
{
static const char *rom_probe_types[] = PROBETYPES;
const char **type;
- const char *part_type = 0;
int i;
static struct { u_long addr; u_long size; } pt[NUM_FLASHBANKS] = {
{ WINDOW_ADDR0, WINDOW_SIZE0 },
@@ -97,23 +91,9 @@ static int __init init_impa7(void)
if (impa7_mtd[i]) {
impa7_mtd[i]->owner = THIS_MODULE;
devicesfound++;
- mtd_parts_nb[i] = parse_mtd_partitions(impa7_mtd[i],
- probes,
- &mtd_parts[i],
- 0);
- if (mtd_parts_nb[i] > 0) {
- part_type = "command line";
- } else {
- mtd_parts[i] = static_partitions;
- mtd_parts_nb[i] = ARRAY_SIZE(static_partitions);
- part_type = "static";
- }
-
- printk(KERN_NOTICE MSG_PREFIX
- "using %s partition definition\n",
- part_type);
- mtd_device_register(impa7_mtd[i],
- mtd_parts[i], mtd_parts_nb[i]);
+ mtd_device_parse_register(impa7_mtd[i], NULL, 0,
+ partitions,
+ ARRAY_SIZE(partitions));
}
else
iounmap((void *)impa7_map[i].virt);
diff --git a/drivers/mtd/maps/intel_vr_nor.c b/drivers/mtd/maps/intel_vr_nor.c
index d2f47be8754..08c239604ee 100644
--- a/drivers/mtd/maps/intel_vr_nor.c
+++ b/drivers/mtd/maps/intel_vr_nor.c
@@ -44,7 +44,6 @@ struct vr_nor_mtd {
void __iomem *csr_base;
struct map_info map;
struct mtd_info *info;
- int nr_parts;
struct pci_dev *dev;
};
@@ -71,13 +70,9 @@ static void __devexit vr_nor_destroy_partitions(struct vr_nor_mtd *p)
static int __devinit vr_nor_init_partitions(struct vr_nor_mtd *p)
{
- struct mtd_partition *parts;
- static const char *part_probes[] = { "cmdlinepart", NULL };
-
/* register the flash bank */
/* partition the flash bank */
- p->nr_parts = parse_mtd_partitions(p->info, part_probes, &parts, 0);
- return mtd_device_register(p->info, parts, p->nr_parts);
+ return mtd_device_parse_register(p->info, NULL, 0, NULL, 0);
}
static void __devexit vr_nor_destroy_mtd_setup(struct vr_nor_mtd *p)
diff --git a/drivers/mtd/maps/ixp2000.c b/drivers/mtd/maps/ixp2000.c
index 1594a802631..437fcd2f352 100644
--- a/drivers/mtd/maps/ixp2000.c
+++ b/drivers/mtd/maps/ixp2000.c
@@ -38,7 +38,6 @@
struct ixp2000_flash_info {
struct mtd_info *mtd;
struct map_info map;
- struct mtd_partition *partitions;
struct resource *res;
};
@@ -125,8 +124,6 @@ static int ixp2000_flash_remove(struct platform_device *dev)
if (info->map.map_priv_1)
iounmap((void *) info->map.map_priv_1);
- kfree(info->partitions);
-
if (info->res) {
release_resource(info->res);
kfree(info->res);
@@ -229,13 +226,7 @@ static int ixp2000_flash_probe(struct platform_device *dev)
}
info->mtd->owner = THIS_MODULE;
- err = parse_mtd_partitions(info->mtd, probes, &info->partitions, 0);
- if (err > 0) {
- err = mtd_device_register(info->mtd, info->partitions, err);
- if(err)
- dev_err(&dev->dev, "Could not parse partitions\n");
- }
-
+ err = mtd_device_parse_register(info->mtd, probes, 0, NULL, 0);
if (err)
goto Error;
diff --git a/drivers/mtd/maps/ixp4xx.c b/drivers/mtd/maps/ixp4xx.c
index 155b21942f4..30409015a3d 100644
--- a/drivers/mtd/maps/ixp4xx.c
+++ b/drivers/mtd/maps/ixp4xx.c
@@ -145,7 +145,6 @@ static void ixp4xx_write16(struct map_info *map, map_word d, unsigned long adr)
struct ixp4xx_flash_info {
struct mtd_info *mtd;
struct map_info map;
- struct mtd_partition *partitions;
struct resource *res;
};
@@ -168,8 +167,6 @@ static int ixp4xx_flash_remove(struct platform_device *dev)
if (info->map.virt)
iounmap(info->map.virt);
- kfree(info->partitions);
-
if (info->res) {
release_resource(info->res);
kfree(info->res);
@@ -185,8 +182,6 @@ static int ixp4xx_flash_probe(struct platform_device *dev)
{
struct flash_platform_data *plat = dev->dev.platform_data;
struct ixp4xx_flash_info *info;
- const char *part_type = NULL;
- int nr_parts = 0;
int err = -1;
if (!plat)
@@ -252,28 +247,12 @@ static int ixp4xx_flash_probe(struct platform_device *dev)
/* Use the fast version */
info->map.write = ixp4xx_write16;
- nr_parts = parse_mtd_partitions(info->mtd, probes, &info->partitions,
- dev->resource->start);
- if (nr_parts > 0) {
- part_type = "dynamic";
- } else {
- info->partitions = plat->parts;
- nr_parts = plat->nr_parts;
- part_type = "static";
- }
- if (nr_parts == 0)
- printk(KERN_NOTICE "IXP4xx flash: no partition info "
- "available, registering whole flash\n");
- else
- printk(KERN_NOTICE "IXP4xx flash: using %s partition "
- "definition\n", part_type);
-
- err = mtd_device_register(info->mtd, info->partitions, nr_parts);
- if (err)
+ err = mtd_device_parse_register(info->mtd, probes, dev->resource->start,
+ plat->parts, plat->nr_parts);
+ if (err) {
printk(KERN_ERR "Could not parse partitions\n");
-
- if (err)
goto Error;
+ }
return 0;
diff --git a/drivers/mtd/maps/lantiq-flash.c b/drivers/mtd/maps/lantiq-flash.c
index a90cabd7b84..4f10e27ada5 100644
--- a/drivers/mtd/maps/lantiq-flash.c
+++ b/drivers/mtd/maps/lantiq-flash.c
@@ -107,16 +107,12 @@ ltq_copy_to(struct map_info *map, unsigned long to,
spin_unlock_irqrestore(&ebu_lock, flags);
}
-static const char const *part_probe_types[] = { "cmdlinepart", NULL };
-
static int __init
ltq_mtd_probe(struct platform_device *pdev)
{
struct physmap_flash_data *ltq_mtd_data = dev_get_platdata(&pdev->dev);
struct ltq_mtd *ltq_mtd;
- struct mtd_partition *parts;
struct resource *res;
- int nr_parts = 0;
struct cfi_private *cfi;
int err;
@@ -172,17 +168,8 @@ ltq_mtd_probe(struct platform_device *pdev)
cfi->addr_unlock1 ^= 1;
cfi->addr_unlock2 ^= 1;
- nr_parts = parse_mtd_partitions(ltq_mtd->mtd,
- part_probe_types, &parts, 0);
- if (nr_parts > 0) {
- dev_info(&pdev->dev,
- "using %d partitions from cmdline", nr_parts);
- } else {
- nr_parts = ltq_mtd_data->nr_parts;
- parts = ltq_mtd_data->parts;
- }
-
- err = add_mtd_partitions(ltq_mtd->mtd, parts, nr_parts);
+ err = mtd_device_parse_register(ltq_mtd->mtd, NULL, 0,
+ ltq_mtd_data->parts, ltq_mtd_data->nr_parts);
if (err) {
dev_err(&pdev->dev, "failed to add partitions\n");
goto err_destroy;
@@ -208,7 +195,7 @@ ltq_mtd_remove(struct platform_device *pdev)
if (ltq_mtd) {
if (ltq_mtd->mtd) {
- del_mtd_partitions(ltq_mtd->mtd);
+ mtd_device_unregister(ltq_mtd->mtd);
map_destroy(ltq_mtd->mtd);
}
if (ltq_mtd->map->virt)
diff --git a/drivers/mtd/maps/latch-addr-flash.c b/drivers/mtd/maps/latch-addr-flash.c
index 5936c466e90..119baa7d747 100644
--- a/drivers/mtd/maps/latch-addr-flash.c
+++ b/drivers/mtd/maps/latch-addr-flash.c
@@ -33,9 +33,6 @@ struct latch_addr_flash_info {
/* cache; could be found out of res */
unsigned long win_mask;
- int nr_parts;
- struct mtd_partition *parts;
-
spinlock_t lock;
};
@@ -97,8 +94,6 @@ static void lf_copy_from(struct map_info *map, void *to,
static char *rom_probe_types[] = { "cfi_probe", NULL };
-static char *part_probe_types[] = { "cmdlinepart", NULL };
-
static int latch_addr_flash_remove(struct platform_device *dev)
{
struct latch_addr_flash_info *info;
@@ -112,8 +107,6 @@ static int latch_addr_flash_remove(struct platform_device *dev)
latch_addr_data = dev->dev.platform_data;
if (info->mtd != NULL) {
- if (info->nr_parts)
- kfree(info->parts);
mtd_device_unregister(info->mtd);
map_destroy(info->mtd);
}
@@ -206,21 +199,8 @@ static int __devinit latch_addr_flash_probe(struct platform_device *dev)
}
info->mtd->owner = THIS_MODULE;
- err = parse_mtd_partitions(info->mtd, (const char **)part_probe_types,
- &info->parts, 0);
- if (err > 0) {
- mtd_device_register(info->mtd, info->parts, err);
- return 0;
- }
- if (latch_addr_data->nr_parts) {
- pr_notice("Using latch-addr-flash partition information\n");
- mtd_device_register(info->mtd,
- latch_addr_data->parts,
- latch_addr_data->nr_parts);
- return 0;
- }
-
- mtd_device_register(info->mtd, NULL, 0);
+ mtd_device_parse_register(info->mtd, NULL, 0,
+ latch_addr_data->parts, latch_addr_data->nr_parts);
return 0;
iounmap:
diff --git a/drivers/mtd/maps/pcmciamtd.c b/drivers/mtd/maps/pcmciamtd.c
index bbe168b65c2..e8e9fec2355 100644
--- a/drivers/mtd/maps/pcmciamtd.c
+++ b/drivers/mtd/maps/pcmciamtd.c
@@ -22,22 +22,6 @@
#include <linux/mtd/map.h>
#include <linux/mtd/mtd.h>
-#ifdef CONFIG_MTD_DEBUG
-static int debug = CONFIG_MTD_DEBUG_VERBOSE;
-module_param(debug, int, 0);
-MODULE_PARM_DESC(debug, "Set Debug Level 0=quiet, 5=noisy");
-#undef DEBUG
-#define DEBUG(n, format, arg...) \
- if (n <= debug) { \
- printk(KERN_DEBUG __FILE__ ":%s(): " format "\n", __func__ , ## arg); \
- }
-
-#else
-#undef DEBUG
-#define DEBUG(n, arg...)
-static const int debug = 0;
-#endif
-
#define info(format, arg...) printk(KERN_INFO "pcmciamtd: " format "\n" , ## arg)
#define DRIVER_DESC "PCMCIA Flash memory card driver"
@@ -105,13 +89,13 @@ static caddr_t remap_window(struct map_info *map, unsigned long to)
int ret;
if (!pcmcia_dev_present(dev->p_dev)) {
- DEBUG(1, "device removed");
+ pr_debug("device removed\n");
return 0;
}
offset = to & ~(dev->win_size-1);
if (offset != dev->offset) {
- DEBUG(2, "Remapping window from 0x%8.8x to 0x%8.8x",
+ pr_debug("Remapping window from 0x%8.8x to 0x%8.8x\n",
dev->offset, offset);
ret = pcmcia_map_mem_page(dev->p_dev, win, offset);
if (ret != 0)
@@ -132,7 +116,7 @@ static map_word pcmcia_read8_remap(struct map_info *map, unsigned long ofs)
return d;
d.x[0] = readb(addr);
- DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%02lx", ofs, addr, d.x[0]);
+ pr_debug("ofs = 0x%08lx (%p) data = 0x%02lx\n", ofs, addr, d.x[0]);
return d;
}
@@ -147,7 +131,7 @@ static map_word pcmcia_read16_remap(struct map_info *map, unsigned long ofs)
return d;
d.x[0] = readw(addr);
- DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%04lx", ofs, addr, d.x[0]);
+ pr_debug("ofs = 0x%08lx (%p) data = 0x%04lx\n", ofs, addr, d.x[0]);
return d;
}
@@ -157,7 +141,7 @@ static void pcmcia_copy_from_remap(struct map_info *map, void *to, unsigned long
struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1;
unsigned long win_size = dev->win_size;
- DEBUG(3, "to = %p from = %lu len = %zd", to, from, len);
+ pr_debug("to = %p from = %lu len = %zd\n", to, from, len);
while(len) {
int toread = win_size - (from & (win_size-1));
caddr_t addr;
@@ -169,7 +153,7 @@ static void pcmcia_copy_from_remap(struct map_info *map, void *to, unsigned long
if(!addr)
return;
- DEBUG(4, "memcpy from %p to %p len = %d", addr, to, toread);
+ pr_debug("memcpy from %p to %p len = %d\n", addr, to, toread);
memcpy_fromio(to, addr, toread);
len -= toread;
to += toread;
@@ -185,7 +169,7 @@ static void pcmcia_write8_remap(struct map_info *map, map_word d, unsigned long
if(!addr)
return;
- DEBUG(3, "adr = 0x%08lx (%p) data = 0x%02lx", adr, addr, d.x[0]);
+ pr_debug("adr = 0x%08lx (%p) data = 0x%02lx\n", adr, addr, d.x[0]);
writeb(d.x[0], addr);
}
@@ -196,7 +180,7 @@ static void pcmcia_write16_remap(struct map_info *map, map_word d, unsigned long
if(!addr)
return;
- DEBUG(3, "adr = 0x%08lx (%p) data = 0x%04lx", adr, addr, d.x[0]);
+ pr_debug("adr = 0x%08lx (%p) data = 0x%04lx\n", adr, addr, d.x[0]);
writew(d.x[0], addr);
}
@@ -206,7 +190,7 @@ static void pcmcia_copy_to_remap(struct map_info *map, unsigned long to, const v
struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1;
unsigned long win_size = dev->win_size;
- DEBUG(3, "to = %lu from = %p len = %zd", to, from, len);
+ pr_debug("to = %lu from = %p len = %zd\n", to, from, len);
while(len) {
int towrite = win_size - (to & (win_size-1));
caddr_t addr;
@@ -218,7 +202,7 @@ static void pcmcia_copy_to_remap(struct map_info *map, unsigned long to, const v
if(!addr)
return;
- DEBUG(4, "memcpy from %p to %p len = %d", from, addr, towrite);
+ pr_debug("memcpy from %p to %p len = %d\n", from, addr, towrite);
memcpy_toio(addr, from, towrite);
len -= towrite;
to += towrite;
@@ -240,7 +224,7 @@ static map_word pcmcia_read8(struct map_info *map, unsigned long ofs)
return d;
d.x[0] = readb(win_base + ofs);
- DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%02lx",
+ pr_debug("ofs = 0x%08lx (%p) data = 0x%02lx\n",
ofs, win_base + ofs, d.x[0]);
return d;
}
@@ -255,7 +239,7 @@ static map_word pcmcia_read16(struct map_info *map, unsigned long ofs)
return d;
d.x[0] = readw(win_base + ofs);
- DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%04lx",
+ pr_debug("ofs = 0x%08lx (%p) data = 0x%04lx\n",
ofs, win_base + ofs, d.x[0]);
return d;
}
@@ -268,7 +252,7 @@ static void pcmcia_copy_from(struct map_info *map, void *to, unsigned long from,
if(DEV_REMOVED(map))
return;
- DEBUG(3, "to = %p from = %lu len = %zd", to, from, len);
+ pr_debug("to = %p from = %lu len = %zd\n", to, from, len);
memcpy_fromio(to, win_base + from, len);
}
@@ -280,7 +264,7 @@ static void pcmcia_write8(struct map_info *map, map_word d, unsigned long adr)
if(DEV_REMOVED(map))
return;
- DEBUG(3, "adr = 0x%08lx (%p) data = 0x%02lx",
+ pr_debug("adr = 0x%08lx (%p) data = 0x%02lx\n",
adr, win_base + adr, d.x[0]);
writeb(d.x[0], win_base + adr);
}
@@ -293,7 +277,7 @@ static void pcmcia_write16(struct map_info *map, map_word d, unsigned long adr)
if(DEV_REMOVED(map))
return;
- DEBUG(3, "adr = 0x%08lx (%p) data = 0x%04lx",
+ pr_debug("adr = 0x%08lx (%p) data = 0x%04lx\n",
adr, win_base + adr, d.x[0]);
writew(d.x[0], win_base + adr);
}
@@ -306,7 +290,7 @@ static void pcmcia_copy_to(struct map_info *map, unsigned long to, const void *f
if(DEV_REMOVED(map))
return;
- DEBUG(3, "to = %lu from = %p len = %zd", to, from, len);
+ pr_debug("to = %lu from = %p len = %zd\n", to, from, len);
memcpy_toio(win_base + to, from, len);
}
@@ -316,7 +300,7 @@ static void pcmciamtd_set_vpp(struct map_info *map, int on)
struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1;
struct pcmcia_device *link = dev->p_dev;
- DEBUG(2, "dev = %p on = %d vpp = %d\n", dev, on, dev->vpp);
+ pr_debug("dev = %p on = %d vpp = %d\n\n", dev, on, dev->vpp);
pcmcia_fixup_vpp(link, on ? dev->vpp : 0);
}
@@ -325,7 +309,7 @@ static void pcmciamtd_release(struct pcmcia_device *link)
{
struct pcmciamtd_dev *dev = link->priv;
- DEBUG(3, "link = 0x%p", link);
+ pr_debug("link = 0x%p\n", link);
if (link->resource[2]->end) {
if(dev->win_base) {
@@ -337,7 +321,6 @@ static void pcmciamtd_release(struct pcmcia_device *link)
}
-#ifdef CONFIG_MTD_DEBUG
static int pcmciamtd_cistpl_format(struct pcmcia_device *p_dev,
tuple_t *tuple,
void *priv_data)
@@ -347,7 +330,7 @@ static int pcmciamtd_cistpl_format(struct pcmcia_device *p_dev,
if (!pcmcia_parse_tuple(tuple, &parse)) {
cistpl_format_t *t = &parse.format;
(void)t; /* Shut up, gcc */
- DEBUG(2, "Format type: %u, Error Detection: %u, offset = %u, length =%u",
+ pr_debug("Format type: %u, Error Detection: %u, offset = %u, length =%u\n",
t->type, t->edc, t->offset, t->length);
}
return -ENOSPC;
@@ -363,12 +346,11 @@ static int pcmciamtd_cistpl_jedec(struct pcmcia_device *p_dev,
if (!pcmcia_parse_tuple(tuple, &parse)) {
cistpl_jedec_t *t = &parse.jedec;
for (i = 0; i < t->nid; i++)
- DEBUG(2, "JEDEC: 0x%02x 0x%02x",
+ pr_debug("JEDEC: 0x%02x 0x%02x\n",
t->id[i].mfr, t->id[i].info);
}
return -ENOSPC;
}
-#endif
static int pcmciamtd_cistpl_device(struct pcmcia_device *p_dev,
tuple_t *tuple,
@@ -382,14 +364,14 @@ static int pcmciamtd_cistpl_device(struct pcmcia_device *p_dev,
if (pcmcia_parse_tuple(tuple, &parse))
return -EINVAL;
- DEBUG(2, "Common memory:");
+ pr_debug("Common memory:\n");
dev->pcmcia_map.size = t->dev[0].size;
/* from here on: DEBUG only */
for (i = 0; i < t->ndev; i++) {
- DEBUG(2, "Region %d, type = %u", i, t->dev[i].type);
- DEBUG(2, "Region %d, wp = %u", i, t->dev[i].wp);
- DEBUG(2, "Region %d, speed = %u ns", i, t->dev[i].speed);
- DEBUG(2, "Region %d, size = %u bytes", i, t->dev[i].size);
+ pr_debug("Region %d, type = %u\n", i, t->dev[i].type);
+ pr_debug("Region %d, wp = %u\n", i, t->dev[i].wp);
+ pr_debug("Region %d, speed = %u ns\n", i, t->dev[i].speed);
+ pr_debug("Region %d, size = %u bytes\n", i, t->dev[i].size);
}
return 0;
}
@@ -409,12 +391,12 @@ static int pcmciamtd_cistpl_geo(struct pcmcia_device *p_dev,
dev->pcmcia_map.bankwidth = t->geo[0].buswidth;
/* from here on: DEBUG only */
for (i = 0; i < t->ngeo; i++) {
- DEBUG(2, "region: %d bankwidth = %u", i, t->geo[i].buswidth);
- DEBUG(2, "region: %d erase_block = %u", i, t->geo[i].erase_block);
- DEBUG(2, "region: %d read_block = %u", i, t->geo[i].read_block);
- DEBUG(2, "region: %d write_block = %u", i, t->geo[i].write_block);
- DEBUG(2, "region: %d partition = %u", i, t->geo[i].partition);
- DEBUG(2, "region: %d interleave = %u", i, t->geo[i].interleave);
+ pr_debug("region: %d bankwidth = %u\n", i, t->geo[i].buswidth);
+ pr_debug("region: %d erase_block = %u\n", i, t->geo[i].erase_block);
+ pr_debug("region: %d read_block = %u\n", i, t->geo[i].read_block);
+ pr_debug("region: %d write_block = %u\n", i, t->geo[i].write_block);
+ pr_debug("region: %d partition = %u\n", i, t->geo[i].partition);
+ pr_debug("region: %d interleave = %u\n", i, t->geo[i].interleave);
}
return 0;
}
@@ -432,13 +414,11 @@ static void card_settings(struct pcmciamtd_dev *dev, struct pcmcia_device *p_dev
if (p_dev->prod_id[i])
strcat(dev->mtd_name, p_dev->prod_id[i]);
}
- DEBUG(2, "Found name: %s", dev->mtd_name);
+ pr_debug("Found name: %s\n", dev->mtd_name);
}
-#ifdef CONFIG_MTD_DEBUG
pcmcia_loop_tuple(p_dev, CISTPL_FORMAT, pcmciamtd_cistpl_format, NULL);
pcmcia_loop_tuple(p_dev, CISTPL_JEDEC_C, pcmciamtd_cistpl_jedec, NULL);
-#endif
pcmcia_loop_tuple(p_dev, CISTPL_DEVICE, pcmciamtd_cistpl_device, dev);
pcmcia_loop_tuple(p_dev, CISTPL_DEVICE_GEO, pcmciamtd_cistpl_geo, dev);
@@ -450,12 +430,12 @@ static void card_settings(struct pcmciamtd_dev *dev, struct pcmcia_device *p_dev
if(force_size) {
dev->pcmcia_map.size = force_size << 20;
- DEBUG(2, "size forced to %dM", force_size);
+ pr_debug("size forced to %dM\n", force_size);
}
if(bankwidth) {
dev->pcmcia_map.bankwidth = bankwidth;
- DEBUG(2, "bankwidth forced to %d", bankwidth);
+ pr_debug("bankwidth forced to %d\n", bankwidth);
}
dev->pcmcia_map.name = dev->mtd_name;
@@ -464,7 +444,7 @@ static void card_settings(struct pcmciamtd_dev *dev, struct pcmcia_device *p_dev
*new_name = 1;
}
- DEBUG(1, "Device: Size: %lu Width:%d Name: %s",
+ pr_debug("Device: Size: %lu Width:%d Name: %s\n",
dev->pcmcia_map.size,
dev->pcmcia_map.bankwidth << 3, dev->mtd_name);
}
@@ -479,7 +459,7 @@ static int pcmciamtd_config(struct pcmcia_device *link)
static char *probes[] = { "jedec_probe", "cfi_probe" };
int new_name = 0;
- DEBUG(3, "link=0x%p", link);
+ pr_debug("link=0x%p\n", link);
card_settings(dev, link, &new_name);
@@ -512,11 +492,11 @@ static int pcmciamtd_config(struct pcmcia_device *link)
do {
int ret;
- DEBUG(2, "requesting window with size = %luKiB memspeed = %d",
+ pr_debug("requesting window with size = %luKiB memspeed = %d\n",
(unsigned long) resource_size(link->resource[2]) >> 10,
mem_speed);
ret = pcmcia_request_window(link, link->resource[2], mem_speed);
- DEBUG(2, "ret = %d dev->win_size = %d", ret, dev->win_size);
+ pr_debug("ret = %d dev->win_size = %d\n", ret, dev->win_size);
if(ret) {
j++;
link->resource[2]->start = 0;
@@ -524,21 +504,21 @@ static int pcmciamtd_config(struct pcmcia_device *link)
force_size << 20 : MAX_PCMCIA_ADDR;
link->resource[2]->end >>= j;
} else {
- DEBUG(2, "Got window of size %luKiB", (unsigned long)
+ pr_debug("Got window of size %luKiB\n", (unsigned long)
resource_size(link->resource[2]) >> 10);
dev->win_size = resource_size(link->resource[2]);
break;
}
} while (link->resource[2]->end >= 0x1000);
- DEBUG(2, "dev->win_size = %d", dev->win_size);
+ pr_debug("dev->win_size = %d\n", dev->win_size);
if(!dev->win_size) {
dev_err(&dev->p_dev->dev, "Cannot allocate memory window\n");
pcmciamtd_release(link);
return -ENODEV;
}
- DEBUG(1, "Allocated a window of %dKiB", dev->win_size >> 10);
+ pr_debug("Allocated a window of %dKiB\n", dev->win_size >> 10);
/* Get write protect status */
dev->win_base = ioremap(link->resource[2]->start,
@@ -549,7 +529,7 @@ static int pcmciamtd_config(struct pcmcia_device *link)
pcmciamtd_release(link);
return -ENODEV;
}
- DEBUG(1, "mapped window dev = %p @ %pR, base = %p",
+ pr_debug("mapped window dev = %p @ %pR, base = %p\n",
dev, link->resource[2], dev->win_base);
dev->offset = 0;
@@ -564,7 +544,7 @@ static int pcmciamtd_config(struct pcmcia_device *link)
}
link->config_index = 0;
- DEBUG(2, "Setting Configuration");
+ pr_debug("Setting Configuration\n");
ret = pcmcia_enable_device(link);
if (ret != 0) {
if (dev->win_base) {
@@ -580,17 +560,17 @@ static int pcmciamtd_config(struct pcmcia_device *link)
mtd = do_map_probe("map_rom", &dev->pcmcia_map);
} else {
for(i = 0; i < ARRAY_SIZE(probes); i++) {
- DEBUG(1, "Trying %s", probes[i]);
+ pr_debug("Trying %s\n", probes[i]);
mtd = do_map_probe(probes[i], &dev->pcmcia_map);
if(mtd)
break;
- DEBUG(1, "FAILED: %s", probes[i]);
+ pr_debug("FAILED: %s\n", probes[i]);
}
}
if(!mtd) {
- DEBUG(1, "Can not find an MTD");
+ pr_debug("Can not find an MTD\n");
pcmciamtd_release(link);
return -ENODEV;
}
@@ -617,7 +597,7 @@ static int pcmciamtd_config(struct pcmcia_device *link)
/* If the memory found is fits completely into the mapped PCMCIA window,
use the faster non-remapping read/write functions */
if(mtd->size <= dev->win_size) {
- DEBUG(1, "Using non remapping memory functions");
+ pr_debug("Using non remapping memory functions\n");
dev->pcmcia_map.map_priv_2 = (unsigned long)dev->win_base;
if (dev->pcmcia_map.bankwidth == 1) {
dev->pcmcia_map.read = pcmcia_read8;
@@ -645,7 +625,7 @@ static int pcmciamtd_config(struct pcmcia_device *link)
static int pcmciamtd_suspend(struct pcmcia_device *dev)
{
- DEBUG(2, "EVENT_PM_RESUME");
+ pr_debug("EVENT_PM_RESUME\n");
/* get_lock(link); */
@@ -654,7 +634,7 @@ static int pcmciamtd_suspend(struct pcmcia_device *dev)
static int pcmciamtd_resume(struct pcmcia_device *dev)
{
- DEBUG(2, "EVENT_PM_SUSPEND");
+ pr_debug("EVENT_PM_SUSPEND\n");
/* free_lock(link); */
@@ -666,7 +646,7 @@ static void pcmciamtd_detach(struct pcmcia_device *link)
{
struct pcmciamtd_dev *dev = link->priv;
- DEBUG(3, "link=0x%p", link);
+ pr_debug("link=0x%p\n", link);
if(dev->mtd_info) {
mtd_device_unregister(dev->mtd_info);
@@ -686,7 +666,7 @@ static int pcmciamtd_probe(struct pcmcia_device *link)
/* Create new memory card device */
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev) return -ENOMEM;
- DEBUG(1, "dev=0x%p", dev);
+ pr_debug("dev=0x%p\n", dev);
dev->p_dev = link;
link->priv = dev;
@@ -755,7 +735,7 @@ static int __init init_pcmciamtd(void)
static void __exit exit_pcmciamtd(void)
{
- DEBUG(1, DRIVER_DESC " unloading");
+ pr_debug(DRIVER_DESC " unloading");
pcmcia_unregister_driver(&pcmciamtd_driver);
}
diff --git a/drivers/mtd/maps/physmap.c b/drivers/mtd/maps/physmap.c
index f64cee4a3bf..66e8200079c 100644
--- a/drivers/mtd/maps/physmap.c
+++ b/drivers/mtd/maps/physmap.c
@@ -27,8 +27,6 @@ struct physmap_flash_info {
struct mtd_info *mtd[MAX_RESOURCES];
struct mtd_info *cmtd;
struct map_info map[MAX_RESOURCES];
- int nr_parts;
- struct mtd_partition *parts;
};
static int physmap_flash_remove(struct platform_device *dev)
@@ -46,8 +44,6 @@ static int physmap_flash_remove(struct platform_device *dev)
if (info->cmtd) {
mtd_device_unregister(info->cmtd);
- if (info->nr_parts)
- kfree(info->parts);
if (info->cmtd != info->mtd[0])
mtd_concat_destroy(info->cmtd);
}
@@ -175,23 +171,8 @@ static int physmap_flash_probe(struct platform_device *dev)
if (err)
goto err_out;
- err = parse_mtd_partitions(info->cmtd, part_probe_types,
- &info->parts, 0);
- if (err > 0) {
- mtd_device_register(info->cmtd, info->parts, err);
- info->nr_parts = err;
- return 0;
- }
-
- if (physmap_data->nr_parts) {
- printk(KERN_NOTICE "Using physmap partition information\n");
- mtd_device_register(info->cmtd, physmap_data->parts,
- physmap_data->nr_parts);
- return 0;
- }
-
- mtd_device_register(info->cmtd, NULL, 0);
-
+ mtd_device_parse_register(info->cmtd, part_probe_types, 0,
+ physmap_data->parts, physmap_data->nr_parts);
return 0;
err_out:
@@ -245,21 +226,6 @@ static struct platform_device physmap_flash = {
.num_resources = 1,
.resource = &physmap_flash_resource,
};
-
-void physmap_configure(unsigned long addr, unsigned long size,
- int bankwidth, void (*set_vpp)(struct map_info *, int))
-{
- physmap_flash_resource.start = addr;
- physmap_flash_resource.end = addr + size - 1;
- physmap_flash_data.width = bankwidth;
- physmap_flash_data.set_vpp = set_vpp;
-}
-
-void physmap_set_partitions(struct mtd_partition *parts, int num_parts)
-{
- physmap_flash_data.nr_parts = num_parts;
- physmap_flash_data.parts = parts;
-}
#endif
static int __init physmap_init(void)
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
index d251d1db129..7d65f9d3e69 100644
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of.c
@@ -34,58 +34,10 @@ struct of_flash_list {
struct of_flash {
struct mtd_info *cmtd;
- struct mtd_partition *parts;
int list_size; /* number of elements in of_flash_list */
struct of_flash_list list[0];
};
-#define OF_FLASH_PARTS(info) ((info)->parts)
-static int parse_obsolete_partitions(struct platform_device *dev,
- struct of_flash *info,
- struct device_node *dp)
-{
- int i, plen, nr_parts;
- const struct {
- __be32 offset, len;
- } *part;
- const char *names;
-
- part = of_get_property(dp, "partitions", &plen);
- if (!part)
- return 0; /* No partitions found */
-
- dev_warn(&dev->dev, "Device tree uses obsolete partition map binding\n");
-
- nr_parts = plen / sizeof(part[0]);
-
- info->parts = kzalloc(nr_parts * sizeof(*info->parts), GFP_KERNEL);
- if (!info->parts)
- return -ENOMEM;
-
- names = of_get_property(dp, "partition-names", &plen);
-
- for (i = 0; i < nr_parts; i++) {
- info->parts[i].offset = be32_to_cpu(part->offset);
- info->parts[i].size = be32_to_cpu(part->len) & ~1;
- if (be32_to_cpu(part->len) & 1) /* bit 0 set signifies read only partition */
- info->parts[i].mask_flags = MTD_WRITEABLE;
-
- if (names && (plen > 0)) {
- int len = strlen(names) + 1;
-
- info->parts[i].name = (char *)names;
- plen -= len;
- names += len;
- } else {
- info->parts[i].name = "unnamed";
- }
-
- part++;
- }
-
- return nr_parts;
-}
-
static int of_flash_remove(struct platform_device *dev)
{
struct of_flash *info;
@@ -101,11 +53,8 @@ static int of_flash_remove(struct platform_device *dev)
mtd_concat_destroy(info->cmtd);
}
- if (info->cmtd) {
- if (OF_FLASH_PARTS(info))
- kfree(OF_FLASH_PARTS(info));
+ if (info->cmtd)
mtd_device_unregister(info->cmtd);
- }
for (i = 0; i < info->list_size; i++) {
if (info->list[i].mtd)
@@ -165,7 +114,8 @@ static struct mtd_info * __devinit obsolete_probe(struct platform_device *dev,
specifies the list of partition probers to use. If none is given then the
default is use. These take precedence over other device tree
information. */
-static const char *part_probe_types_def[] = { "cmdlinepart", "RedBoot", NULL };
+static const char *part_probe_types_def[] = { "cmdlinepart", "RedBoot",
+ "ofpart", "ofoldpart", NULL };
static const char ** __devinit of_get_probes(struct device_node *dp)
{
const char *cp;
@@ -218,6 +168,7 @@ static int __devinit of_flash_probe(struct platform_device *dev)
int reg_tuple_size;
struct mtd_info **mtd_list = NULL;
resource_size_t res_size;
+ struct mtd_part_parser_data ppdata;
match = of_match_device(of_flash_match, &dev->dev);
if (!match)
@@ -331,29 +282,12 @@ static int __devinit of_flash_probe(struct platform_device *dev)
if (err)
goto err_out;
+ ppdata.of_node = dp;
part_probe_types = of_get_probes(dp);
- err = parse_mtd_partitions(info->cmtd, part_probe_types,
- &info->parts, 0);
- if (err < 0) {
- of_free_probes(part_probe_types);
- goto err_out;
- }
+ mtd_device_parse_register(info->cmtd, part_probe_types, &ppdata,
+ NULL, 0);
of_free_probes(part_probe_types);
- if (err == 0) {
- err = of_mtd_parse_partitions(&dev->dev, dp, &info->parts);
- if (err < 0)
- goto err_out;
- }
-
- if (err == 0) {
- err = parse_obsolete_partitions(dev, info, dp);
- if (err < 0)
- goto err_out;
- }
-
- mtd_device_register(info->cmtd, info->parts, err);
-
kfree(mtd_list);
return 0;
diff --git a/drivers/mtd/maps/plat-ram.c b/drivers/mtd/maps/plat-ram.c
index 9ca1eccba4b..94f55348972 100644
--- a/drivers/mtd/maps/plat-ram.c
+++ b/drivers/mtd/maps/plat-ram.c
@@ -44,8 +44,6 @@ struct platram_info {
struct device *dev;
struct mtd_info *mtd;
struct map_info map;
- struct mtd_partition *partitions;
- bool free_partitions;
struct resource *area;
struct platdata_mtd_ram *pdata;
};
@@ -95,10 +93,6 @@ static int platram_remove(struct platform_device *pdev)
if (info->mtd) {
mtd_device_unregister(info->mtd);
- if (info->partitions) {
- if (info->free_partitions)
- kfree(info->partitions);
- }
map_destroy(info->mtd);
}
@@ -228,21 +222,8 @@ static int platram_probe(struct platform_device *pdev)
/* check to see if there are any available partitions, or wether
* to add this device whole */
- if (!pdata->nr_partitions) {
- /* try to probe using the supplied probe type */
- if (pdata->probes) {
- err = parse_mtd_partitions(info->mtd, pdata->probes,
- &info->partitions, 0);
- info->free_partitions = 1;
- if (err > 0)
- err = mtd_device_register(info->mtd,
- info->partitions, err);
- }
- }
- /* use the static mapping */
- else
- err = mtd_device_register(info->mtd, pdata->partitions,
- pdata->nr_partitions);
+ err = mtd_device_parse_register(info->mtd, pdata->probes, 0,
+ pdata->partitions, pdata->nr_partitions);
if (!err)
dev_info(&pdev->dev, "registered mtd device\n");
diff --git a/drivers/mtd/maps/pxa2xx-flash.c b/drivers/mtd/maps/pxa2xx-flash.c
index 7ae137d4b99..411a17df9fc 100644
--- a/drivers/mtd/maps/pxa2xx-flash.c
+++ b/drivers/mtd/maps/pxa2xx-flash.c
@@ -41,8 +41,6 @@ static void pxa2xx_map_inval_cache(struct map_info *map, unsigned long from,
}
struct pxa2xx_flash_info {
- struct mtd_partition *parts;
- int nr_parts;
struct mtd_info *mtd;
struct map_info map;
};
@@ -55,9 +53,7 @@ static int __devinit pxa2xx_flash_probe(struct platform_device *pdev)
{
struct flash_platform_data *flash = pdev->dev.platform_data;
struct pxa2xx_flash_info *info;
- struct mtd_partition *parts;
struct resource *res;
- int ret = 0;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
@@ -71,8 +67,6 @@ static int __devinit pxa2xx_flash_probe(struct platform_device *pdev)
info->map.bankwidth = flash->width;
info->map.phys = res->start;
info->map.size = resource_size(res);
- info->parts = flash->parts;
- info->nr_parts = flash->nr_parts;
info->map.virt = ioremap(info->map.phys, info->map.size);
if (!info->map.virt) {
@@ -104,18 +98,7 @@ static int __devinit pxa2xx_flash_probe(struct platform_device *pdev)
}
info->mtd->owner = THIS_MODULE;
- ret = parse_mtd_partitions(info->mtd, probes, &parts, 0);
-
- if (ret > 0) {
- info->nr_parts = ret;
- info->parts = parts;
- }
-
- if (!info->nr_parts)
- printk("Registering %s as whole device\n",
- info->map.name);
-
- mtd_device_register(info->mtd, info->parts, info->nr_parts);
+ mtd_device_parse_register(info->mtd, probes, 0, NULL, 0);
platform_set_drvdata(pdev, info);
return 0;
@@ -133,7 +116,6 @@ static int __devexit pxa2xx_flash_remove(struct platform_device *dev)
iounmap(info->map.virt);
if (info->map.cached)
iounmap(info->map.cached);
- kfree(info->parts);
kfree(info);
return 0;
}
diff --git a/drivers/mtd/maps/rbtx4939-flash.c b/drivers/mtd/maps/rbtx4939-flash.c
index 761fb459d2c..0237f197fd1 100644
--- a/drivers/mtd/maps/rbtx4939-flash.c
+++ b/drivers/mtd/maps/rbtx4939-flash.c
@@ -25,8 +25,6 @@
struct rbtx4939_flash_info {
struct mtd_info *mtd;
struct map_info map;
- int nr_parts;
- struct mtd_partition *parts;
};
static int rbtx4939_flash_remove(struct platform_device *dev)
@@ -41,8 +39,6 @@ static int rbtx4939_flash_remove(struct platform_device *dev)
if (info->mtd) {
struct rbtx4939_flash_data *pdata = dev->dev.platform_data;
- if (info->nr_parts)
- kfree(info->parts);
mtd_device_unregister(info->mtd);
map_destroy(info->mtd);
}
@@ -50,7 +46,6 @@ static int rbtx4939_flash_remove(struct platform_device *dev)
}
static const char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL };
-static const char *part_probe_types[] = { "cmdlinepart", NULL };
static int rbtx4939_flash_probe(struct platform_device *dev)
{
@@ -107,22 +102,11 @@ static int rbtx4939_flash_probe(struct platform_device *dev)
info->mtd->owner = THIS_MODULE;
if (err)
goto err_out;
+ err = mtd_device_parse_register(info->mtd, NULL, 0,
+ pdata->parts, pdata->nr_parts);
- err = parse_mtd_partitions(info->mtd, part_probe_types,
- &info->parts, 0);
- if (err > 0) {
- mtd_device_register(info->mtd, info->parts, err);
- info->nr_parts = err;
- return 0;
- }
-
- if (pdata->nr_parts) {
- pr_notice("Using rbtx4939 partition information\n");
- mtd_device_register(info->mtd, pdata->parts, pdata->nr_parts);
- return 0;
- }
-
- mtd_device_register(info->mtd, NULL, 0);
+ if (err)
+ goto err_out;
return 0;
err_out:
diff --git a/drivers/mtd/maps/sa1100-flash.c b/drivers/mtd/maps/sa1100-flash.c
index a9b5e0e5c4c..fa9c0a9670c 100644
--- a/drivers/mtd/maps/sa1100-flash.c
+++ b/drivers/mtd/maps/sa1100-flash.c
@@ -131,10 +131,8 @@ struct sa_subdev_info {
};
struct sa_info {
- struct mtd_partition *parts;
struct mtd_info *mtd;
int num_subdev;
- unsigned int nr_parts;
struct sa_subdev_info subdev[0];
};
@@ -231,8 +229,6 @@ static void sa1100_destroy(struct sa_info *info, struct flash_platform_data *pla
mtd_concat_destroy(info->mtd);
}
- kfree(info->parts);
-
for (i = info->num_subdev - 1; i >= 0; i--)
sa1100_destroy_subdev(&info->subdev[i]);
kfree(info);
@@ -341,10 +337,8 @@ static const char *part_probes[] = { "cmdlinepart", "RedBoot", NULL };
static int __devinit sa1100_mtd_probe(struct platform_device *pdev)
{
struct flash_platform_data *plat = pdev->dev.platform_data;
- struct mtd_partition *parts;
- const char *part_type = NULL;
struct sa_info *info;
- int err, nr_parts = 0;
+ int err;
if (!plat)
return -ENODEV;
@@ -358,26 +352,8 @@ static int __devinit sa1100_mtd_probe(struct platform_device *pdev)
/*
* Partition selection stuff.
*/
- nr_parts = parse_mtd_partitions(info->mtd, part_probes, &parts, 0);
- if (nr_parts > 0) {
- info->parts = parts;
- part_type = "dynamic";
- } else {
- parts = plat->parts;
- nr_parts = plat->nr_parts;
- part_type = "static";
- }
-
- if (nr_parts == 0)
- printk(KERN_NOTICE "SA1100 flash: no partition info "
- "available, registering whole flash\n");
- else
- printk(KERN_NOTICE "SA1100 flash: using %s partition "
- "definition\n", part_type);
-
- mtd_device_register(info->mtd, parts, nr_parts);
-
- info->nr_parts = nr_parts;
+ mtd_device_parse_register(info->mtd, part_probes, 0,
+ plat->parts, plat->nr_parts);
platform_set_drvdata(pdev, info);
err = 0;
diff --git a/drivers/mtd/maps/solutionengine.c b/drivers/mtd/maps/solutionengine.c
index cbf6bade935..496c40704af 100644
--- a/drivers/mtd/maps/solutionengine.c
+++ b/drivers/mtd/maps/solutionengine.c
@@ -19,8 +19,6 @@
static struct mtd_info *flash_mtd;
static struct mtd_info *eprom_mtd;
-static struct mtd_partition *parsed_parts;
-
struct map_info soleng_eprom_map = {
.name = "Solution Engine EPROM",
.size = 0x400000,
@@ -51,12 +49,14 @@ static struct mtd_partition superh_se_partitions[] = {
.size = MTDPART_SIZ_FULL,
}
};
+#define NUM_PARTITIONS ARRAY_SIZE(superh_se_partitions)
+#else
+#define superh_se_partitions NULL
+#define NUM_PARTITIONS 0
#endif /* CONFIG_MTD_SUPERH_RESERVE */
static int __init init_soleng_maps(void)
{
- int nr_parts = 0;
-
/* First probe at offset 0 */
soleng_flash_map.phys = 0;
soleng_flash_map.virt = (void __iomem *)P2SEGADDR(0);
@@ -92,21 +92,8 @@ static int __init init_soleng_maps(void)
mtd_device_register(eprom_mtd, NULL, 0);
}
- nr_parts = parse_mtd_partitions(flash_mtd, probes, &parsed_parts, 0);
-
-#ifdef CONFIG_MTD_SUPERH_RESERVE
- if (nr_parts <= 0) {
- printk(KERN_NOTICE "Using configured partition at 0x%08x.\n",
- CONFIG_MTD_SUPERH_RESERVE);
- parsed_parts = superh_se_partitions;
- nr_parts = sizeof(superh_se_partitions)/sizeof(*parsed_parts);
- }
-#endif /* CONFIG_MTD_SUPERH_RESERVE */
-
- if (nr_parts > 0)
- mtd_device_register(flash_mtd, parsed_parts, nr_parts);
- else
- mtd_device_register(flash_mtd, NULL, 0);
+ mtd_device_parse_register(flash_mtd, probes, 0,
+ superh_se_partitions, NUM_PARTITIONS);
return 0;
}
@@ -118,10 +105,7 @@ static void __exit cleanup_soleng_maps(void)
map_destroy(eprom_mtd);
}
- if (parsed_parts)
- mtd_device_unregister(flash_mtd);
- else
- mtd_device_unregister(flash_mtd);
+ mtd_device_unregister(flash_mtd);
map_destroy(flash_mtd);
}
diff --git a/drivers/mtd/maps/wr_sbc82xx_flash.c b/drivers/mtd/maps/wr_sbc82xx_flash.c
index 901ce968efa..aa7e0cb2893 100644
--- a/drivers/mtd/maps/wr_sbc82xx_flash.c
+++ b/drivers/mtd/maps/wr_sbc82xx_flash.c
@@ -20,7 +20,6 @@
#include <asm/immap_cpm2.h>
static struct mtd_info *sbcmtd[3];
-static struct mtd_partition *sbcmtd_parts[3];
struct map_info sbc82xx_flash_map[3] = {
{.name = "Boot flash"},
@@ -101,6 +100,7 @@ static int __init init_sbc82xx_flash(void)
for (i=0; i<3; i++) {
int8_t flashcs[3] = { 0, 6, 1 };
int nr_parts;
+ struct mtd_partition *defparts;
printk(KERN_NOTICE "PowerQUICC II %s (%ld MiB on CS%d",
sbc82xx_flash_map[i].name,
@@ -113,7 +113,8 @@ static int __init init_sbc82xx_flash(void)
}
printk(" at %08lx)\n", sbc82xx_flash_map[i].phys);
- sbc82xx_flash_map[i].virt = ioremap(sbc82xx_flash_map[i].phys, sbc82xx_flash_map[i].size);
+ sbc82xx_flash_map[i].virt = ioremap(sbc82xx_flash_map[i].phys,
+ sbc82xx_flash_map[i].size);
if (!sbc82xx_flash_map[i].virt) {
printk("Failed to ioremap\n");
@@ -129,24 +130,20 @@ static int __init init_sbc82xx_flash(void)
sbcmtd[i]->owner = THIS_MODULE;
- nr_parts = parse_mtd_partitions(sbcmtd[i], part_probes,
- &sbcmtd_parts[i], 0);
- if (nr_parts > 0) {
- mtd_device_register(sbcmtd[i], sbcmtd_parts[i],
- nr_parts);
- continue;
- }
-
/* No partitioning detected. Use default */
if (i == 2) {
- mtd_device_register(sbcmtd[i], NULL, 0);
+ defparts = NULL;
+ nr_parts = 0;
} else if (i == bigflash) {
- mtd_device_register(sbcmtd[i], bigflash_parts,
- ARRAY_SIZE(bigflash_parts));
+ defparts = bigflash_parts;
+ nr_parts = ARRAY_SIZE(bigflash_parts);
} else {
- mtd_device_register(sbcmtd[i], smallflash_parts,
- ARRAY_SIZE(smallflash_parts));
+ defparts = smallflash_parts;
+ nr_parts = ARRAY_SIZE(smallflash_parts);
}
+
+ mtd_device_parse_register(sbcmtd[i], part_probes, 0,
+ defparts, nr_parts);
}
return 0;
}
@@ -159,12 +156,8 @@ static void __exit cleanup_sbc82xx_flash(void)
if (!sbcmtd[i])
continue;
- if (i<2 || sbcmtd_parts[i])
- mtd_device_unregister(sbcmtd[i]);
- else
- mtd_device_unregister(sbcmtd[i]);
+ mtd_device_unregister(sbcmtd[i]);
- kfree(sbcmtd_parts[i]);
map_destroy(sbcmtd[i]);
iounmap((void *)sbc82xx_flash_map[i].virt);
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index ca385697446..ed8b5e744b1 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -426,6 +426,8 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
new->rq->queuedata = new;
blk_queue_logical_block_size(new->rq, tr->blksize);
+ queue_flag_set_unlocked(QUEUE_FLAG_NONROT, new->rq);
+
if (tr->discard) {
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, new->rq);
new->rq->limits.max_discard_sectors = UINT_MAX;
diff --git a/drivers/mtd/mtdblock.c b/drivers/mtd/mtdblock.c
index 3326615ad66..7c1dc908a17 100644
--- a/drivers/mtd/mtdblock.c
+++ b/drivers/mtd/mtdblock.c
@@ -44,7 +44,7 @@ struct mtdblk_dev {
enum { STATE_EMPTY, STATE_CLEAN, STATE_DIRTY } cache_state;
};
-static struct mutex mtdblks_lock;
+static DEFINE_MUTEX(mtdblks_lock);
/*
* Cache stuff...
@@ -119,7 +119,7 @@ static int write_cached_data (struct mtdblk_dev *mtdblk)
if (mtdblk->cache_state != STATE_DIRTY)
return 0;
- DEBUG(MTD_DEBUG_LEVEL2, "mtdblock: writing cached data for \"%s\" "
+ pr_debug("mtdblock: writing cached data for \"%s\" "
"at 0x%lx, size 0x%x\n", mtd->name,
mtdblk->cache_offset, mtdblk->cache_size);
@@ -148,7 +148,7 @@ static int do_cached_write (struct mtdblk_dev *mtdblk, unsigned long pos,
size_t retlen;
int ret;
- DEBUG(MTD_DEBUG_LEVEL2, "mtdblock: write on \"%s\" at 0x%lx, size 0x%x\n",
+ pr_debug("mtdblock: write on \"%s\" at 0x%lx, size 0x%x\n",
mtd->name, pos, len);
if (!sect_size)
@@ -218,7 +218,7 @@ static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos,
size_t retlen;
int ret;
- DEBUG(MTD_DEBUG_LEVEL2, "mtdblock: read on \"%s\" at 0x%lx, size 0x%x\n",
+ pr_debug("mtdblock: read on \"%s\" at 0x%lx, size 0x%x\n",
mtd->name, pos, len);
if (!sect_size)
@@ -283,7 +283,7 @@ static int mtdblock_open(struct mtd_blktrans_dev *mbd)
{
struct mtdblk_dev *mtdblk = container_of(mbd, struct mtdblk_dev, mbd);
- DEBUG(MTD_DEBUG_LEVEL1,"mtdblock_open\n");
+ pr_debug("mtdblock_open\n");
mutex_lock(&mtdblks_lock);
if (mtdblk->count) {
@@ -303,7 +303,7 @@ static int mtdblock_open(struct mtd_blktrans_dev *mbd)
mutex_unlock(&mtdblks_lock);
- DEBUG(MTD_DEBUG_LEVEL1, "ok\n");
+ pr_debug("ok\n");
return 0;
}
@@ -312,7 +312,7 @@ static int mtdblock_release(struct mtd_blktrans_dev *mbd)
{
struct mtdblk_dev *mtdblk = container_of(mbd, struct mtdblk_dev, mbd);
- DEBUG(MTD_DEBUG_LEVEL1, "mtdblock_release\n");
+ pr_debug("mtdblock_release\n");
mutex_lock(&mtdblks_lock);
@@ -329,7 +329,7 @@ static int mtdblock_release(struct mtd_blktrans_dev *mbd)
mutex_unlock(&mtdblks_lock);
- DEBUG(MTD_DEBUG_LEVEL1, "ok\n");
+ pr_debug("ok\n");
return 0;
}
@@ -389,8 +389,6 @@ static struct mtd_blktrans_ops mtdblock_tr = {
static int __init init_mtdblock(void)
{
- mutex_init(&mtdblks_lock);
-
return register_mtd_blktrans(&mtdblock_tr);
}
diff --git a/drivers/mtd/mtdblock_ro.c b/drivers/mtd/mtdblock_ro.c
index 795a8c0a05b..0470a6e8630 100644
--- a/drivers/mtd/mtdblock_ro.c
+++ b/drivers/mtd/mtdblock_ro.c
@@ -23,6 +23,7 @@
#include <linux/slab.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/blktrans.h>
+#include <linux/module.h>
static int mtdblock_readsect(struct mtd_blktrans_dev *dev,
unsigned long block, char *buf)
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index f1af2228a1b..e7dc732ddab 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -43,7 +43,7 @@ static struct vfsmount *mtd_inode_mnt __read_mostly;
/*
* Data structure to hold the pointer to the mtd device as well
- * as mode information ofr various use cases.
+ * as mode information of various use cases.
*/
struct mtd_file_info {
struct mtd_info *mtd;
@@ -86,7 +86,7 @@ static int mtd_open(struct inode *inode, struct file *file)
struct mtd_file_info *mfi;
struct inode *mtd_ino;
- DEBUG(MTD_DEBUG_LEVEL0, "MTD_open\n");
+ pr_debug("MTD_open\n");
/* You can't open the RO devices RW */
if ((file->f_mode & FMODE_WRITE) && (minor & 1))
@@ -151,7 +151,7 @@ static int mtd_close(struct inode *inode, struct file *file)
struct mtd_file_info *mfi = file->private_data;
struct mtd_info *mtd = mfi->mtd;
- DEBUG(MTD_DEBUG_LEVEL0, "MTD_close\n");
+ pr_debug("MTD_close\n");
/* Only sync if opened RW */
if ((file->f_mode & FMODE_WRITE) && mtd->sync)
@@ -195,7 +195,7 @@ static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t
size_t size = count;
char *kbuf;
- DEBUG(MTD_DEBUG_LEVEL0,"MTD_read\n");
+ pr_debug("MTD_read\n");
if (*ppos + count > mtd->size)
count = mtd->size - *ppos;
@@ -211,17 +211,17 @@ static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t
len = min_t(size_t, count, size);
switch (mfi->mode) {
- case MTD_MODE_OTP_FACTORY:
+ case MTD_FILE_MODE_OTP_FACTORY:
ret = mtd->read_fact_prot_reg(mtd, *ppos, len, &retlen, kbuf);
break;
- case MTD_MODE_OTP_USER:
+ case MTD_FILE_MODE_OTP_USER:
ret = mtd->read_user_prot_reg(mtd, *ppos, len, &retlen, kbuf);
break;
- case MTD_MODE_RAW:
+ case MTD_FILE_MODE_RAW:
{
struct mtd_oob_ops ops;
- ops.mode = MTD_OOB_RAW;
+ ops.mode = MTD_OPS_RAW;
ops.datbuf = kbuf;
ops.oobbuf = NULL;
ops.len = len;
@@ -233,16 +233,16 @@ static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t
default:
ret = mtd->read(mtd, *ppos, len, &retlen, kbuf);
}
- /* Nand returns -EBADMSG on ecc errors, but it returns
+ /* Nand returns -EBADMSG on ECC errors, but it returns
* the data. For our userspace tools it is important
- * to dump areas with ecc errors !
+ * to dump areas with ECC errors!
* For kernel internal usage it also might return -EUCLEAN
* to signal the caller that a bitflip has occurred and has
* been corrected by the ECC algorithm.
* Userspace software which accesses NAND this way
* must be aware of the fact that it deals with NAND
*/
- if (!ret || (ret == -EUCLEAN) || (ret == -EBADMSG)) {
+ if (!ret || mtd_is_bitflip_or_eccerr(ret)) {
*ppos += retlen;
if (copy_to_user(buf, kbuf, retlen)) {
kfree(kbuf);
@@ -278,7 +278,7 @@ static ssize_t mtd_write(struct file *file, const char __user *buf, size_t count
int ret=0;
int len;
- DEBUG(MTD_DEBUG_LEVEL0,"MTD_write\n");
+ pr_debug("MTD_write\n");
if (*ppos == mtd->size)
return -ENOSPC;
@@ -302,10 +302,10 @@ static ssize_t mtd_write(struct file *file, const char __user *buf, size_t count
}
switch (mfi->mode) {
- case MTD_MODE_OTP_FACTORY:
+ case MTD_FILE_MODE_OTP_FACTORY:
ret = -EROFS;
break;
- case MTD_MODE_OTP_USER:
+ case MTD_FILE_MODE_OTP_USER:
if (!mtd->write_user_prot_reg) {
ret = -EOPNOTSUPP;
break;
@@ -313,13 +313,14 @@ static ssize_t mtd_write(struct file *file, const char __user *buf, size_t count
ret = mtd->write_user_prot_reg(mtd, *ppos, len, &retlen, kbuf);
break;
- case MTD_MODE_RAW:
+ case MTD_FILE_MODE_RAW:
{
struct mtd_oob_ops ops;
- ops.mode = MTD_OOB_RAW;
+ ops.mode = MTD_OPS_RAW;
ops.datbuf = kbuf;
ops.oobbuf = NULL;
+ ops.ooboffs = 0;
ops.len = len;
ret = mtd->write_oob(mtd, *ppos, &ops);
@@ -367,13 +368,13 @@ static int otp_select_filemode(struct mtd_file_info *mfi, int mode)
if (!mtd->read_fact_prot_reg)
ret = -EOPNOTSUPP;
else
- mfi->mode = MTD_MODE_OTP_FACTORY;
+ mfi->mode = MTD_FILE_MODE_OTP_FACTORY;
break;
case MTD_OTP_USER:
if (!mtd->read_fact_prot_reg)
ret = -EOPNOTSUPP;
else
- mfi->mode = MTD_MODE_OTP_USER;
+ mfi->mode = MTD_FILE_MODE_OTP_USER;
break;
default:
ret = -EINVAL;
@@ -390,6 +391,7 @@ static int mtd_do_writeoob(struct file *file, struct mtd_info *mtd,
uint64_t start, uint32_t length, void __user *ptr,
uint32_t __user *retp)
{
+ struct mtd_file_info *mfi = file->private_data;
struct mtd_oob_ops ops;
uint32_t retlen;
int ret = 0;
@@ -409,9 +411,10 @@ static int mtd_do_writeoob(struct file *file, struct mtd_info *mtd,
return ret;
ops.ooblen = length;
- ops.ooboffs = start & (mtd->oobsize - 1);
+ ops.ooboffs = start & (mtd->writesize - 1);
ops.datbuf = NULL;
- ops.mode = MTD_OOB_PLACE;
+ ops.mode = (mfi->mode == MTD_FILE_MODE_RAW) ? MTD_OPS_RAW :
+ MTD_OPS_PLACE_OOB;
if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
return -EINVAL;
@@ -420,7 +423,7 @@ static int mtd_do_writeoob(struct file *file, struct mtd_info *mtd,
if (IS_ERR(ops.oobbuf))
return PTR_ERR(ops.oobbuf);
- start &= ~((uint64_t)mtd->oobsize - 1);
+ start &= ~((uint64_t)mtd->writesize - 1);
ret = mtd->write_oob(mtd, start, &ops);
if (ops.oobretlen > 0xFFFFFFFFU)
@@ -433,9 +436,11 @@ static int mtd_do_writeoob(struct file *file, struct mtd_info *mtd,
return ret;
}
-static int mtd_do_readoob(struct mtd_info *mtd, uint64_t start,
- uint32_t length, void __user *ptr, uint32_t __user *retp)
+static int mtd_do_readoob(struct file *file, struct mtd_info *mtd,
+ uint64_t start, uint32_t length, void __user *ptr,
+ uint32_t __user *retp)
{
+ struct mtd_file_info *mfi = file->private_data;
struct mtd_oob_ops ops;
int ret = 0;
@@ -451,9 +456,10 @@ static int mtd_do_readoob(struct mtd_info *mtd, uint64_t start,
return ret;
ops.ooblen = length;
- ops.ooboffs = start & (mtd->oobsize - 1);
+ ops.ooboffs = start & (mtd->writesize - 1);
ops.datbuf = NULL;
- ops.mode = MTD_OOB_PLACE;
+ ops.mode = (mfi->mode == MTD_FILE_MODE_RAW) ? MTD_OPS_RAW :
+ MTD_OPS_PLACE_OOB;
if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
return -EINVAL;
@@ -462,7 +468,7 @@ static int mtd_do_readoob(struct mtd_info *mtd, uint64_t start,
if (!ops.oobbuf)
return -ENOMEM;
- start &= ~((uint64_t)mtd->oobsize - 1);
+ start &= ~((uint64_t)mtd->writesize - 1);
ret = mtd->read_oob(mtd, start, &ops);
if (put_user(ops.oobretlen, retp))
@@ -472,13 +478,29 @@ static int mtd_do_readoob(struct mtd_info *mtd, uint64_t start,
ret = -EFAULT;
kfree(ops.oobbuf);
+
+ /*
+ * NAND returns -EBADMSG on ECC errors, but it returns the OOB
+ * data. For our userspace tools it is important to dump areas
+ * with ECC errors!
+ * For kernel internal usage it also might return -EUCLEAN
+ * to signal the caller that a bitflip has occured and has
+ * been corrected by the ECC algorithm.
+ *
+ * Note: currently the standard NAND function, nand_read_oob_std,
+ * does not calculate ECC for the OOB area, so do not rely on
+ * this behavior unless you have replaced it with your own.
+ */
+ if (mtd_is_bitflip_or_eccerr(ret))
+ return 0;
+
return ret;
}
/*
* Copies (and truncates, if necessary) data from the larger struct,
* nand_ecclayout, to the smaller, deprecated layout struct,
- * nand_ecclayout_user. This is necessary only to suppport the deprecated
+ * nand_ecclayout_user. This is necessary only to support the deprecated
* API ioctl ECCGETLAYOUT while allowing all new functionality to use
* nand_ecclayout flexibly (i.e. the struct may change size in new
* releases without requiring major rewrites).
@@ -544,6 +566,55 @@ static int mtd_blkpg_ioctl(struct mtd_info *mtd,
}
}
+static int mtd_write_ioctl(struct mtd_info *mtd,
+ struct mtd_write_req __user *argp)
+{
+ struct mtd_write_req req;
+ struct mtd_oob_ops ops;
+ void __user *usr_data, *usr_oob;
+ int ret;
+
+ if (copy_from_user(&req, argp, sizeof(req)) ||
+ !access_ok(VERIFY_READ, req.usr_data, req.len) ||
+ !access_ok(VERIFY_READ, req.usr_oob, req.ooblen))
+ return -EFAULT;
+ if (!mtd->write_oob)
+ return -EOPNOTSUPP;
+
+ ops.mode = req.mode;
+ ops.len = (size_t)req.len;
+ ops.ooblen = (size_t)req.ooblen;
+ ops.ooboffs = 0;
+
+ usr_data = (void __user *)(uintptr_t)req.usr_data;
+ usr_oob = (void __user *)(uintptr_t)req.usr_oob;
+
+ if (req.usr_data) {
+ ops.datbuf = memdup_user(usr_data, ops.len);
+ if (IS_ERR(ops.datbuf))
+ return PTR_ERR(ops.datbuf);
+ } else {
+ ops.datbuf = NULL;
+ }
+
+ if (req.usr_oob) {
+ ops.oobbuf = memdup_user(usr_oob, ops.ooblen);
+ if (IS_ERR(ops.oobbuf)) {
+ kfree(ops.datbuf);
+ return PTR_ERR(ops.oobbuf);
+ }
+ } else {
+ ops.oobbuf = NULL;
+ }
+
+ ret = mtd->write_oob(mtd, (loff_t)req.start, &ops);
+
+ kfree(ops.datbuf);
+ kfree(ops.oobbuf);
+
+ return ret;
+}
+
static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
{
struct mtd_file_info *mfi = file->private_data;
@@ -553,7 +624,7 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
u_long size;
struct mtd_info_user info;
- DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
+ pr_debug("MTD_ioctl\n");
size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
if (cmd & IOC_IN) {
@@ -601,8 +672,8 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
info.erasesize = mtd->erasesize;
info.writesize = mtd->writesize;
info.oobsize = mtd->oobsize;
- /* The below fields are obsolete */
- info.ecctype = -1;
+ /* The below field is obsolete */
+ info.padding = 0;
if (copy_to_user(argp, &info, sizeof(struct mtd_info_user)))
return -EFAULT;
break;
@@ -698,7 +769,7 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
if (copy_from_user(&buf, argp, sizeof(buf)))
ret = -EFAULT;
else
- ret = mtd_do_readoob(mtd, buf.start, buf.length,
+ ret = mtd_do_readoob(file, mtd, buf.start, buf.length,
buf.ptr, &buf_user->start);
break;
}
@@ -725,12 +796,19 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
if (copy_from_user(&buf, argp, sizeof(buf)))
ret = -EFAULT;
else
- ret = mtd_do_readoob(mtd, buf.start, buf.length,
+ ret = mtd_do_readoob(file, mtd, buf.start, buf.length,
(void __user *)(uintptr_t)buf.usr_ptr,
&buf_user->length);
break;
}
+ case MEMWRITE:
+ {
+ ret = mtd_write_ioctl(mtd,
+ (struct mtd_write_req __user *)arg);
+ break;
+ }
+
case MEMLOCK:
{
struct erase_info_user einfo;
@@ -827,7 +905,7 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
if (copy_from_user(&mode, argp, sizeof(int)))
return -EFAULT;
- mfi->mode = MTD_MODE_NORMAL;
+ mfi->mode = MTD_FILE_MODE_NORMAL;
ret = otp_select_filemode(mfi, mode);
@@ -843,11 +921,11 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
return -ENOMEM;
ret = -EOPNOTSUPP;
switch (mfi->mode) {
- case MTD_MODE_OTP_FACTORY:
+ case MTD_FILE_MODE_OTP_FACTORY:
if (mtd->get_fact_prot_info)
ret = mtd->get_fact_prot_info(mtd, buf, 4096);
break;
- case MTD_MODE_OTP_USER:
+ case MTD_FILE_MODE_OTP_USER:
if (mtd->get_user_prot_info)
ret = mtd->get_user_prot_info(mtd, buf, 4096);
break;
@@ -871,7 +949,7 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
{
struct otp_info oinfo;
- if (mfi->mode != MTD_MODE_OTP_USER)
+ if (mfi->mode != MTD_FILE_MODE_OTP_USER)
return -EINVAL;
if (copy_from_user(&oinfo, argp, sizeof(oinfo)))
return -EFAULT;
@@ -882,7 +960,7 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
}
#endif
- /* This ioctl is being deprecated - it truncates the ecc layout */
+ /* This ioctl is being deprecated - it truncates the ECC layout */
case ECCGETLAYOUT:
{
struct nand_ecclayout_user *usrlay;
@@ -915,17 +993,17 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
mfi->mode = 0;
switch(arg) {
- case MTD_MODE_OTP_FACTORY:
- case MTD_MODE_OTP_USER:
+ case MTD_FILE_MODE_OTP_FACTORY:
+ case MTD_FILE_MODE_OTP_USER:
ret = otp_select_filemode(mfi, arg);
break;
- case MTD_MODE_RAW:
+ case MTD_FILE_MODE_RAW:
if (!mtd->read_oob || !mtd->write_oob)
return -EOPNOTSUPP;
mfi->mode = arg;
- case MTD_MODE_NORMAL:
+ case MTD_FILE_MODE_NORMAL:
break;
default:
ret = -EINVAL;
@@ -1011,7 +1089,7 @@ static long mtd_compat_ioctl(struct file *file, unsigned int cmd,
if (copy_from_user(&buf, argp, sizeof(buf)))
ret = -EFAULT;
else
- ret = mtd_do_readoob(mtd, buf.start,
+ ret = mtd_do_readoob(file, mtd, buf.start,
buf.length, compat_ptr(buf.ptr),
&buf_user->start);
break;
@@ -1144,7 +1222,7 @@ static void mtdchar_notify_remove(struct mtd_info *mtd)
if (mtd_ino) {
/* Destroy the inode if it exists */
- mtd_ino->i_nlink = 0;
+ clear_nlink(mtd_ino);
iput(mtd_ino);
}
}
diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c
index e601672a530..6df4d4d4eb9 100644
--- a/drivers/mtd/mtdconcat.c
+++ b/drivers/mtd/mtdconcat.c
@@ -95,10 +95,10 @@ concat_read(struct mtd_info *mtd, loff_t from, size_t len,
/* Save information about bitflips! */
if (unlikely(err)) {
- if (err == -EBADMSG) {
+ if (mtd_is_eccerr(err)) {
mtd->ecc_stats.failed++;
ret = err;
- } else if (err == -EUCLEAN) {
+ } else if (mtd_is_bitflip(err)) {
mtd->ecc_stats.corrected++;
/* Do not overwrite -EBADMSG !! */
if (!ret)
@@ -279,10 +279,10 @@ concat_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
/* Save information about bitflips! */
if (unlikely(err)) {
- if (err == -EBADMSG) {
+ if (mtd_is_eccerr(err)) {
mtd->ecc_stats.failed++;
ret = err;
- } else if (err == -EUCLEAN) {
+ } else if (mtd_is_bitflip(err)) {
mtd->ecc_stats.corrected++;
/* Do not overwrite -EBADMSG !! */
if (!ret)
@@ -770,7 +770,7 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c
/*
* Set up the new "super" device's MTD object structure, check for
- * incompatibilites between the subdevices.
+ * incompatibilities between the subdevices.
*/
concat->mtd.type = subdev[0]->type;
concat->mtd.flags = subdev[0]->flags;
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index c510aff289a..b01993ea260 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -362,7 +362,7 @@ int add_mtd_device(struct mtd_info *mtd)
MTD_DEVT(i) + 1,
NULL, "mtd%dro", i);
- DEBUG(0, "mtd: Giving out device %d to %s\n", i, mtd->name);
+ pr_debug("mtd: Giving out device %d to %s\n", i, mtd->name);
/* No need to get a refcount on the module containing
the notifier, since we hold the mtd_table_mutex */
list_for_each_entry(not, &mtd_notifiers, list)
@@ -429,27 +429,63 @@ out_error:
}
/**
- * mtd_device_register - register an MTD device.
+ * mtd_device_parse_register - parse partitions and register an MTD device.
*
- * @master: the MTD device to register
- * @parts: the partitions to register - only valid if nr_parts > 0
- * @nr_parts: the number of partitions in parts. If zero then the full MTD
- * device is registered
+ * @mtd: the MTD device to register
+ * @types: the list of MTD partition probes to try, see
+ * 'parse_mtd_partitions()' for more information
+ * @parser_data: MTD partition parser-specific data
+ * @parts: fallback partition information to register, if parsing fails;
+ * only valid if %nr_parts > %0
+ * @nr_parts: the number of partitions in parts, if zero then the full
+ * MTD device is registered if no partition info is found
*
- * Register an MTD device with the system and optionally, a number of
- * partitions. If nr_parts is 0 then the whole device is registered, otherwise
- * only the partitions are registered. To register both the full device *and*
- * the partitions, call mtd_device_register() twice, once with nr_parts == 0
- * and once equal to the number of partitions.
+ * This function aggregates MTD partitions parsing (done by
+ * 'parse_mtd_partitions()') and MTD device and partitions registering. It
+ * basically follows the most common pattern found in many MTD drivers:
+ *
+ * * It first tries to probe partitions on MTD device @mtd using parsers
+ * specified in @types (if @types is %NULL, then the default list of parsers
+ * is used, see 'parse_mtd_partitions()' for more information). If none are
+ * found this functions tries to fallback to information specified in
+ * @parts/@nr_parts.
+ * * If any partitioning info was found, this function registers the found
+ * partitions.
+ * * If no partitions were found this function just registers the MTD device
+ * @mtd and exits.
+ *
+ * Returns zero in case of success and a negative error code in case of failure.
*/
-int mtd_device_register(struct mtd_info *master,
- const struct mtd_partition *parts,
- int nr_parts)
+int mtd_device_parse_register(struct mtd_info *mtd, const char **types,
+ struct mtd_part_parser_data *parser_data,
+ const struct mtd_partition *parts,
+ int nr_parts)
{
- return parts ? add_mtd_partitions(master, parts, nr_parts) :
- add_mtd_device(master);
+ int err;
+ struct mtd_partition *real_parts;
+
+ err = parse_mtd_partitions(mtd, types, &real_parts, parser_data);
+ if (err <= 0 && nr_parts && parts) {
+ real_parts = kmemdup(parts, sizeof(*parts) * nr_parts,
+ GFP_KERNEL);
+ if (!real_parts)
+ err = -ENOMEM;
+ else
+ err = nr_parts;
+ }
+
+ if (err > 0) {
+ err = add_mtd_partitions(mtd, real_parts, err);
+ kfree(real_parts);
+ } else if (err == 0) {
+ err = add_mtd_device(mtd);
+ if (err == 1)
+ err = -ENODEV;
+ }
+
+ return err;
}
-EXPORT_SYMBOL_GPL(mtd_device_register);
+EXPORT_SYMBOL_GPL(mtd_device_parse_register);
/**
* mtd_device_unregister - unregister an existing MTD device.
diff --git a/drivers/mtd/mtdcore.h b/drivers/mtd/mtdcore.h
index 0ed6126b4c1..961a3840854 100644
--- a/drivers/mtd/mtdcore.h
+++ b/drivers/mtd/mtdcore.h
@@ -15,6 +15,9 @@ extern int del_mtd_device(struct mtd_info *mtd);
extern int add_mtd_partitions(struct mtd_info *, const struct mtd_partition *,
int);
extern int del_mtd_partitions(struct mtd_info *);
+extern int parse_mtd_partitions(struct mtd_info *master, const char **types,
+ struct mtd_partition **pparts,
+ struct mtd_part_parser_data *data);
#define mtd_for_each_device(mtd) \
for ((mtd) = __mtd_next_device(0); \
diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c
index e3e40f44032..1e2fa623670 100644
--- a/drivers/mtd/mtdoops.c
+++ b/drivers/mtd/mtdoops.c
@@ -258,7 +258,7 @@ static void find_next_position(struct mtdoops_context *cxt)
ret = mtd->read(mtd, page * record_size, MTDOOPS_HEADER_SIZE,
&retlen, (u_char *) &count[0]);
if (retlen != MTDOOPS_HEADER_SIZE ||
- (ret < 0 && ret != -EUCLEAN)) {
+ (ret < 0 && !mtd_is_bitflip(ret))) {
printk(KERN_ERR "mtdoops: read failure at %ld (%td of %d read), err %d\n",
page * record_size, retlen,
MTDOOPS_HEADER_SIZE, ret);
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 630be3e7da0..a0bd2de4752 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -73,9 +73,9 @@ static int part_read(struct mtd_info *mtd, loff_t from, size_t len,
res = part->master->read(part->master, from + part->offset,
len, retlen, buf);
if (unlikely(res)) {
- if (res == -EUCLEAN)
+ if (mtd_is_bitflip(res))
mtd->ecc_stats.corrected += part->master->ecc_stats.corrected - stats.corrected;
- if (res == -EBADMSG)
+ if (mtd_is_eccerr(res))
mtd->ecc_stats.failed += part->master->ecc_stats.failed - stats.failed;
}
return res;
@@ -130,7 +130,7 @@ static int part_read_oob(struct mtd_info *mtd, loff_t from,
if (ops->oobbuf) {
size_t len, pages;
- if (ops->mode == MTD_OOB_AUTO)
+ if (ops->mode == MTD_OPS_AUTO_OOB)
len = mtd->oobavail;
else
len = mtd->oobsize;
@@ -142,9 +142,9 @@ static int part_read_oob(struct mtd_info *mtd, loff_t from,
res = part->master->read_oob(part->master, from + part->offset, ops);
if (unlikely(res)) {
- if (res == -EUCLEAN)
+ if (mtd_is_bitflip(res))
mtd->ecc_stats.corrected++;
- if (res == -EBADMSG)
+ if (mtd_is_eccerr(res))
mtd->ecc_stats.failed++;
}
return res;
@@ -479,6 +479,19 @@ static struct mtd_part *allocate_partition(struct mtd_info *master,
(unsigned long long)cur_offset, (unsigned long long)slave->offset);
}
}
+ if (slave->offset == MTDPART_OFS_RETAIN) {
+ slave->offset = cur_offset;
+ if (master->size - slave->offset >= slave->mtd.size) {
+ slave->mtd.size = master->size - slave->offset
+ - slave->mtd.size;
+ } else {
+ printk(KERN_ERR "mtd partition \"%s\" doesn't have enough space: %#llx < %#llx, disabled\n",
+ part->name, master->size - slave->offset,
+ slave->mtd.size);
+ /* register to preserve ordering */
+ goto out_register;
+ }
+ }
if (slave->mtd.size == MTDPART_SIZ_FULL)
slave->mtd.size = master->size - slave->offset;
@@ -693,6 +706,8 @@ static struct mtd_part_parser *get_partition_parser(const char *name)
return ret;
}
+#define put_partition_parser(p) do { module_put((p)->owner); } while (0)
+
int register_mtd_parser(struct mtd_part_parser *p)
{
spin_lock(&part_parser_lock);
@@ -712,19 +727,51 @@ int deregister_mtd_parser(struct mtd_part_parser *p)
}
EXPORT_SYMBOL_GPL(deregister_mtd_parser);
+/*
+ * Do not forget to update 'parse_mtd_partitions()' kerneldoc comment if you
+ * are changing this array!
+ */
+static const char *default_mtd_part_types[] = {
+ "cmdlinepart",
+ "ofpart",
+ NULL
+};
+
+/**
+ * parse_mtd_partitions - parse MTD partitions
+ * @master: the master partition (describes whole MTD device)
+ * @types: names of partition parsers to try or %NULL
+ * @pparts: array of partitions found is returned here
+ * @data: MTD partition parser-specific data
+ *
+ * This function tries to find partition on MTD device @master. It uses MTD
+ * partition parsers, specified in @types. However, if @types is %NULL, then
+ * the default list of parsers is used. The default list contains only the
+ * "cmdlinepart" and "ofpart" parsers ATM.
+ *
+ * This function may return:
+ * o a negative error code in case of failure
+ * o zero if no partitions were found
+ * o a positive number of found partitions, in which case on exit @pparts will
+ * point to an array containing this number of &struct mtd_info objects.
+ */
int parse_mtd_partitions(struct mtd_info *master, const char **types,
- struct mtd_partition **pparts, unsigned long origin)
+ struct mtd_partition **pparts,
+ struct mtd_part_parser_data *data)
{
struct mtd_part_parser *parser;
int ret = 0;
+ if (!types)
+ types = default_mtd_part_types;
+
for ( ; ret <= 0 && *types; types++) {
parser = get_partition_parser(*types);
if (!parser && !request_module("%s", *types))
parser = get_partition_parser(*types);
if (!parser)
continue;
- ret = (*parser->parse_fn)(master, pparts, origin);
+ ret = (*parser->parse_fn)(master, pparts, data);
if (ret > 0) {
printk(KERN_NOTICE "%d %s partitions found on MTD device %s\n",
ret, parser->name, master->name);
@@ -733,7 +780,6 @@ int parse_mtd_partitions(struct mtd_info *master, const char **types,
}
return ret;
}
-EXPORT_SYMBOL_GPL(parse_mtd_partitions);
int mtd_is_partition(struct mtd_info *mtd)
{
diff --git a/drivers/mtd/mtdsuper.c b/drivers/mtd/mtdsuper.c
index 16b02a1fc10..a90bfe79916 100644
--- a/drivers/mtd/mtdsuper.c
+++ b/drivers/mtd/mtdsuper.c
@@ -14,6 +14,7 @@
#include <linux/mtd/super.h>
#include <linux/namei.h>
+#include <linux/export.h>
#include <linux/ctype.h>
#include <linux/slab.h>
@@ -26,12 +27,12 @@ static int get_sb_mtd_compare(struct super_block *sb, void *_mtd)
struct mtd_info *mtd = _mtd;
if (sb->s_mtd == mtd) {
- DEBUG(2, "MTDSB: Match on device %d (\"%s\")\n",
+ pr_debug("MTDSB: Match on device %d (\"%s\")\n",
mtd->index, mtd->name);
return 1;
}
- DEBUG(2, "MTDSB: No match, device %d (\"%s\"), device %d (\"%s\")\n",
+ pr_debug("MTDSB: No match, device %d (\"%s\"), device %d (\"%s\")\n",
sb->s_mtd->index, sb->s_mtd->name, mtd->index, mtd->name);
return 0;
}
@@ -70,7 +71,7 @@ static struct dentry *mount_mtd_aux(struct file_system_type *fs_type, int flags,
goto already_mounted;
/* fresh new superblock */
- DEBUG(1, "MTDSB: New superblock for device %d (\"%s\")\n",
+ pr_debug("MTDSB: New superblock for device %d (\"%s\")\n",
mtd->index, mtd->name);
sb->s_flags = flags;
@@ -87,7 +88,7 @@ static struct dentry *mount_mtd_aux(struct file_system_type *fs_type, int flags,
/* new mountpoint for an already mounted superblock */
already_mounted:
- DEBUG(1, "MTDSB: Device %d (\"%s\") is already mounted\n",
+ pr_debug("MTDSB: Device %d (\"%s\") is already mounted\n",
mtd->index, mtd->name);
put_mtd_device(mtd);
return dget(sb->s_root);
@@ -108,7 +109,7 @@ static struct dentry *mount_mtd_nr(struct file_system_type *fs_type, int flags,
mtd = get_mtd_device(NULL, mtdnr);
if (IS_ERR(mtd)) {
- DEBUG(0, "MTDSB: Device #%u doesn't appear to exist\n", mtdnr);
+ pr_debug("MTDSB: Device #%u doesn't appear to exist\n", mtdnr);
return ERR_CAST(mtd);
}
@@ -131,7 +132,7 @@ struct dentry *mount_mtd(struct file_system_type *fs_type, int flags,
if (!dev_name)
return ERR_PTR(-EINVAL);
- DEBUG(2, "MTDSB: dev_name \"%s\"\n", dev_name);
+ pr_debug("MTDSB: dev_name \"%s\"\n", dev_name);
/* the preferred way of mounting in future; especially when
* CONFIG_BLOCK=n - we specify the underlying MTD device by number or
@@ -142,7 +143,7 @@ struct dentry *mount_mtd(struct file_system_type *fs_type, int flags,
struct mtd_info *mtd;
/* mount by MTD device name */
- DEBUG(1, "MTDSB: mtd:%%s, name \"%s\"\n",
+ pr_debug("MTDSB: mtd:%%s, name \"%s\"\n",
dev_name + 4);
mtd = get_mtd_device_nm(dev_name + 4);
@@ -163,7 +164,7 @@ struct dentry *mount_mtd(struct file_system_type *fs_type, int flags,
mtdnr = simple_strtoul(dev_name + 3, &endptr, 0);
if (!*endptr) {
/* It was a valid number */
- DEBUG(1, "MTDSB: mtd%%d, mtdnr %d\n",
+ pr_debug("MTDSB: mtd%%d, mtdnr %d\n",
mtdnr);
return mount_mtd_nr(fs_type, flags,
dev_name, data,
@@ -179,10 +180,10 @@ struct dentry *mount_mtd(struct file_system_type *fs_type, int flags,
bdev = lookup_bdev(dev_name);
if (IS_ERR(bdev)) {
ret = PTR_ERR(bdev);
- DEBUG(1, "MTDSB: lookup_bdev() returned %d\n", ret);
+ pr_debug("MTDSB: lookup_bdev() returned %d\n", ret);
return ERR_PTR(ret);
}
- DEBUG(1, "MTDSB: lookup_bdev() returned 0\n");
+ pr_debug("MTDSB: lookup_bdev() returned 0\n");
ret = -EINVAL;
diff --git a/drivers/mtd/mtdswap.c b/drivers/mtd/mtdswap.c
index fd788532761..bd9590c723e 100644
--- a/drivers/mtd/mtdswap.c
+++ b/drivers/mtd/mtdswap.c
@@ -86,7 +86,7 @@ struct swap_eb {
unsigned int flags;
unsigned int active_count;
unsigned int erase_count;
- unsigned int pad; /* speeds up pointer decremtnt */
+ unsigned int pad; /* speeds up pointer decrement */
};
#define MTDSWAP_ECNT_MIN(rbroot) (rb_entry(rb_first(rbroot), struct swap_eb, \
@@ -314,7 +314,7 @@ static int mtdswap_read_oob(struct mtdswap_dev *d, loff_t from,
{
int ret = d->mtd->read_oob(d->mtd, from, ops);
- if (ret == -EUCLEAN)
+ if (mtd_is_bitflip(ret))
return ret;
if (ret) {
@@ -350,11 +350,11 @@ static int mtdswap_read_markers(struct mtdswap_dev *d, struct swap_eb *eb)
ops.oobbuf = d->oob_buf;
ops.ooboffs = 0;
ops.datbuf = NULL;
- ops.mode = MTD_OOB_AUTO;
+ ops.mode = MTD_OPS_AUTO_OOB;
ret = mtdswap_read_oob(d, offset, &ops);
- if (ret && ret != -EUCLEAN)
+ if (ret && !mtd_is_bitflip(ret))
return ret;
data = (struct mtdswap_oobdata *)d->oob_buf;
@@ -363,7 +363,7 @@ static int mtdswap_read_markers(struct mtdswap_dev *d, struct swap_eb *eb)
if (le16_to_cpu(data->magic) == MTDSWAP_MAGIC_CLEAN) {
eb->erase_count = le32_to_cpu(data->count);
- if (ret == -EUCLEAN)
+ if (mtd_is_bitflip(ret))
ret = MTDSWAP_SCANNED_BITFLIP;
else {
if (le16_to_cpu(data2->magic) == MTDSWAP_MAGIC_DIRTY)
@@ -389,7 +389,7 @@ static int mtdswap_write_marker(struct mtdswap_dev *d, struct swap_eb *eb,
ops.ooboffs = 0;
ops.oobbuf = (uint8_t *)&n;
- ops.mode = MTD_OOB_AUTO;
+ ops.mode = MTD_OPS_AUTO_OOB;
ops.datbuf = NULL;
if (marker == MTDSWAP_TYPE_CLEAN) {
@@ -408,7 +408,7 @@ static int mtdswap_write_marker(struct mtdswap_dev *d, struct swap_eb *eb,
if (ret) {
dev_warn(d->dev, "Write OOB failed for block at %08llx "
"error %d\n", offset, ret);
- if (ret == -EIO || ret == -EBADMSG)
+ if (ret == -EIO || mtd_is_eccerr(ret))
mtdswap_handle_write_error(d, eb);
return ret;
}
@@ -628,7 +628,7 @@ static int mtdswap_map_free_block(struct mtdswap_dev *d, unsigned int page,
TREE_COUNT(d, CLEAN)--;
ret = mtdswap_write_marker(d, eb, MTDSWAP_TYPE_DIRTY);
- } while (ret == -EIO || ret == -EBADMSG);
+ } while (ret == -EIO || mtd_is_eccerr(ret));
if (ret)
return ret;
@@ -678,7 +678,7 @@ retry:
ret = mtdswap_map_free_block(d, page, bp);
eb = d->eb_data + (*bp / d->pages_per_eblk);
- if (ret == -EIO || ret == -EBADMSG) {
+ if (ret == -EIO || mtd_is_eccerr(ret)) {
d->curr_write = NULL;
eb->active_count--;
d->revmap[*bp] = PAGE_UNDEF;
@@ -690,7 +690,7 @@ retry:
writepos = (loff_t)*bp << PAGE_SHIFT;
ret = mtd->write(mtd, writepos, PAGE_SIZE, &retlen, buf);
- if (ret == -EIO || ret == -EBADMSG) {
+ if (ret == -EIO || mtd_is_eccerr(ret)) {
d->curr_write_pos--;
eb->active_count--;
d->revmap[*bp] = PAGE_UNDEF;
@@ -738,7 +738,7 @@ static int mtdswap_move_block(struct mtdswap_dev *d, unsigned int oldblock,
retry:
ret = mtd->read(mtd, readpos, PAGE_SIZE, &retlen, d->page_buf);
- if (ret < 0 && ret != -EUCLEAN) {
+ if (ret < 0 && !mtd_is_bitflip(ret)) {
oldeb = d->eb_data + oldblock / d->pages_per_eblk;
oldeb->flags |= EBLOCK_READERR;
@@ -931,7 +931,7 @@ static unsigned int mtdswap_eblk_passes(struct mtdswap_dev *d,
struct mtd_oob_ops ops;
int ret;
- ops.mode = MTD_OOB_AUTO;
+ ops.mode = MTD_OPS_AUTO_OOB;
ops.len = mtd->writesize;
ops.ooblen = mtd->ecclayout->oobavail;
ops.ooboffs = 0;
@@ -1016,7 +1016,7 @@ static int mtdswap_gc(struct mtdswap_dev *d, unsigned int background)
if (ret == 0)
mtdswap_rb_add(d, eb, MTDSWAP_CLEAN);
- else if (ret != -EIO && ret != -EBADMSG)
+ else if (ret != -EIO && !mtd_is_eccerr(ret))
mtdswap_rb_add(d, eb, MTDSWAP_DIRTY);
return 0;
@@ -1164,7 +1164,7 @@ retry:
ret = mtd->read(mtd, readpos, PAGE_SIZE, &retlen, buf);
d->mtd_read_count++;
- if (ret == -EUCLEAN) {
+ if (mtd_is_bitflip(ret)) {
eb->flags |= EBLOCK_BITFLIP;
mtdswap_rb_add(d, eb, MTDSWAP_BITFLIP);
ret = 0;
@@ -1374,11 +1374,10 @@ static int mtdswap_init(struct mtdswap_dev *d, unsigned int eblocks,
goto revmap_fail;
eblk_bytes = sizeof(struct swap_eb)*d->eblks;
- d->eb_data = vmalloc(eblk_bytes);
+ d->eb_data = vzalloc(eblk_bytes);
if (!d->eb_data)
goto eb_data_fail;
- memset(d->eb_data, 0, eblk_bytes);
for (i = 0; i < pages; i++)
d->page_data[i] = BLOCK_UNDEF;
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 4c3425235ad..cce7b70824c 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -83,16 +83,9 @@ config MTD_NAND_DENALI_SCRATCH_REG_ADDR
scratch register here to enable this feature. On Intel Moorestown
boards, the scratch register is at 0xFF108018.
-config MTD_NAND_EDB7312
- tristate "Support for Cirrus Logic EBD7312 evaluation board"
- depends on ARCH_EDB7312
- help
- This enables the driver for the Cirrus Logic EBD7312 evaluation
- board to access the onboard NAND Flash.
-
config MTD_NAND_H1900
tristate "iPAQ H1900 flash"
- depends on ARCH_PXA
+ depends on ARCH_PXA && BROKEN
help
This enables the driver for the iPAQ h1900 flash.
@@ -116,10 +109,11 @@ config MTD_NAND_AMS_DELTA
Support for NAND flash on Amstrad E3 (Delta).
config MTD_NAND_OMAP2
- tristate "NAND Flash device on OMAP2 and OMAP3"
- depends on ARM && (ARCH_OMAP2 || ARCH_OMAP3)
+ tristate "NAND Flash device on OMAP2, OMAP3 and OMAP4"
+ depends on ARM && (ARCH_OMAP2 || ARCH_OMAP3 || ARCH_OMAP4)
help
- Support for NAND flash on Texas Instruments OMAP2 and OMAP3 platforms.
+ Support for NAND flash on Texas Instruments OMAP2, OMAP3 and OMAP4
+ platforms.
config MTD_NAND_IDS
tristate
@@ -138,7 +132,7 @@ config MTD_NAND_RICOH
config MTD_NAND_AU1550
tristate "Au1550/1200 NAND support"
- depends on SOC_AU1200 || SOC_AU1550
+ depends on MIPS_ALCHEMY
help
This enables the driver for the NAND flash controller on the
AMD/Alchemy 1550 SOC.
@@ -423,6 +417,19 @@ config MTD_NAND_NANDSIM
The simulator may simulate various NAND flash chips for the
MTD nand layer.
+config MTD_NAND_GPMI_NAND
+ bool "GPMI NAND Flash Controller driver"
+ depends on MTD_NAND && (SOC_IMX23 || SOC_IMX28)
+ select MTD_PARTITIONS
+ select MTD_CMDLINE_PARTS
+ help
+ Enables NAND Flash support for IMX23 or IMX28.
+ The GPMI controller is very powerful, with the help of BCH
+ module, it can do the hardware ECC. The GPMI supports several
+ NAND flashs at the same time. The GPMI may conflicts with other
+ block, such as SD card. So pay attention to it when you enable
+ the GPMI.
+
config MTD_NAND_PLATFORM
tristate "Support for generic platform NAND driver"
help
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index 5745d831168..618f4ba2369 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -13,7 +13,6 @@ obj-$(CONFIG_MTD_NAND_SPIA) += spia.o
obj-$(CONFIG_MTD_NAND_AMS_DELTA) += ams-delta.o
obj-$(CONFIG_MTD_NAND_AUTCPU12) += autcpu12.o
obj-$(CONFIG_MTD_NAND_DENALI) += denali.o
-obj-$(CONFIG_MTD_NAND_EDB7312) += edb7312.o
obj-$(CONFIG_MTD_NAND_AU1550) += au1550nd.o
obj-$(CONFIG_MTD_NAND_BF5XX) += bf5xx_nand.o
obj-$(CONFIG_MTD_NAND_PPCHAMELEONEVB) += ppchameleonevb.o
@@ -49,5 +48,6 @@ obj-$(CONFIG_MTD_NAND_BCM_UMI) += bcm_umi_nand.o nand_bcm_umi.o
obj-$(CONFIG_MTD_NAND_MPC5121_NFC) += mpc5121_nfc.o
obj-$(CONFIG_MTD_NAND_RICOH) += r852.o
obj-$(CONFIG_MTD_NAND_JZ4740) += jz4740_nand.o
+obj-$(CONFIG_MTD_NAND_GPMI_NAND) += gpmi-nand/
nand-objs := nand_base.o nand_bbt.o
diff --git a/drivers/mtd/nand/ams-delta.c b/drivers/mtd/nand/ams-delta.c
index 78017eb9318..9e6b498c9be 100644
--- a/drivers/mtd/nand/ams-delta.c
+++ b/drivers/mtd/nand/ams-delta.c
@@ -26,7 +26,7 @@
#include <asm/io.h>
#include <mach/hardware.h>
#include <asm/sizes.h>
-#include <mach/gpio.h>
+#include <asm/gpio.h>
#include <plat/board-ams-delta.h>
/*
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index 55da20ccc7a..23e5d77c39f 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -161,37 +161,6 @@ static int atmel_nand_device_ready(struct mtd_info *mtd)
!!host->board->rdy_pin_active_low;
}
-/*
- * Minimal-overhead PIO for data access.
- */
-static void atmel_read_buf8(struct mtd_info *mtd, u8 *buf, int len)
-{
- struct nand_chip *nand_chip = mtd->priv;
-
- __raw_readsb(nand_chip->IO_ADDR_R, buf, len);
-}
-
-static void atmel_read_buf16(struct mtd_info *mtd, u8 *buf, int len)
-{
- struct nand_chip *nand_chip = mtd->priv;
-
- __raw_readsw(nand_chip->IO_ADDR_R, buf, len / 2);
-}
-
-static void atmel_write_buf8(struct mtd_info *mtd, const u8 *buf, int len)
-{
- struct nand_chip *nand_chip = mtd->priv;
-
- __raw_writesb(nand_chip->IO_ADDR_W, buf, len);
-}
-
-static void atmel_write_buf16(struct mtd_info *mtd, const u8 *buf, int len)
-{
- struct nand_chip *nand_chip = mtd->priv;
-
- __raw_writesw(nand_chip->IO_ADDR_W, buf, len / 2);
-}
-
static void dma_complete_func(void *completion)
{
complete(completion);
@@ -266,33 +235,27 @@ err_buf:
static void atmel_read_buf(struct mtd_info *mtd, u8 *buf, int len)
{
struct nand_chip *chip = mtd->priv;
- struct atmel_nand_host *host = chip->priv;
if (use_dma && len > mtd->oobsize)
/* only use DMA for bigger than oob size: better performances */
if (atmel_nand_dma_op(mtd, buf, len, 1) == 0)
return;
- if (host->board->bus_width_16)
- atmel_read_buf16(mtd, buf, len);
- else
- atmel_read_buf8(mtd, buf, len);
+ /* if no DMA operation possible, use PIO */
+ memcpy_fromio(buf, chip->IO_ADDR_R, len);
}
static void atmel_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
{
struct nand_chip *chip = mtd->priv;
- struct atmel_nand_host *host = chip->priv;
if (use_dma && len > mtd->oobsize)
/* only use DMA for bigger than oob size: better performances */
if (atmel_nand_dma_op(mtd, (void *)buf, len, 0) == 0)
return;
- if (host->board->bus_width_16)
- atmel_write_buf16(mtd, buf, len);
- else
- atmel_write_buf8(mtd, buf, len);
+ /* if no DMA operation possible, use PIO */
+ memcpy_toio(chip->IO_ADDR_W, buf, len);
}
/*
@@ -481,10 +444,6 @@ static void atmel_nand_hwctl(struct mtd_info *mtd, int mode)
}
}
-#ifdef CONFIG_MTD_CMDLINE_PARTS
-static const char *part_probes[] = { "cmdlinepart", NULL };
-#endif
-
/*
* Probe for the NAND device.
*/
@@ -496,8 +455,6 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
struct resource *regs;
struct resource *mem;
int res;
- struct mtd_partition *partitions = NULL;
- int num_partitions = 0;
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!mem) {
@@ -583,7 +540,7 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
if (on_flash_bbt) {
printk(KERN_INFO "atmel_nand: Use On Flash BBT\n");
- nand_chip->options |= NAND_USE_FLASH_BBT;
+ nand_chip->bbt_options |= NAND_BBT_USE_FLASH;
}
if (!cpu_has_dma())
@@ -594,7 +551,7 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
dma_cap_zero(mask);
dma_cap_set(DMA_MEMCPY, mask);
- host->dma_chan = dma_request_channel(mask, 0, NULL);
+ host->dma_chan = dma_request_channel(mask, NULL, NULL);
if (!host->dma_chan) {
dev_err(host->dev, "Failed to request DMA channel\n");
use_dma = 0;
@@ -655,27 +612,12 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
goto err_scan_tail;
}
-#ifdef CONFIG_MTD_CMDLINE_PARTS
mtd->name = "atmel_nand";
- num_partitions = parse_mtd_partitions(mtd, part_probes,
- &partitions, 0);
-#endif
- if (num_partitions <= 0 && host->board->partition_info)
- partitions = host->board->partition_info(mtd->size,
- &num_partitions);
-
- if ((!partitions) || (num_partitions == 0)) {
- printk(KERN_ERR "atmel_nand: No partitions defined, or unsupported device.\n");
- res = -ENXIO;
- goto err_no_partitions;
- }
-
- res = mtd_device_register(mtd, partitions, num_partitions);
+ res = mtd_device_parse_register(mtd, NULL, 0,
+ host->board->parts, host->board->num_parts);
if (!res)
return res;
-err_no_partitions:
- nand_release(mtd);
err_scan_tail:
err_scan_ident:
err_no_card:
diff --git a/drivers/mtd/nand/au1550nd.c b/drivers/mtd/nand/au1550nd.c
index e7767eef450..7dd3700f230 100644
--- a/drivers/mtd/nand/au1550nd.c
+++ b/drivers/mtd/nand/au1550nd.c
@@ -19,7 +19,11 @@
#include <linux/mtd/partitions.h>
#include <asm/io.h>
-#include <asm/mach-au1x00/au1xxx.h>
+#ifdef CONFIG_MIPS_PB1550
+#include <asm/mach-pb1x00/pb1550.h>
+#elif defined(CONFIG_MIPS_DB1550)
+#include <asm/mach-db1x00/db1x00.h>
+#endif
#include <asm/mach-db1x00/bcsr.h>
/*
@@ -48,7 +52,7 @@ static const struct mtd_partition partition_info[] = {
* au_read_byte - read one byte from the chip
* @mtd: MTD device structure
*
- * read function for 8bit buswith
+ * read function for 8bit buswidth
*/
static u_char au_read_byte(struct mtd_info *mtd)
{
@@ -63,7 +67,7 @@ static u_char au_read_byte(struct mtd_info *mtd)
* @mtd: MTD device structure
* @byte: pointer to data byte to write
*
- * write function for 8it buswith
+ * write function for 8it buswidth
*/
static void au_write_byte(struct mtd_info *mtd, u_char byte)
{
@@ -73,11 +77,10 @@ static void au_write_byte(struct mtd_info *mtd, u_char byte)
}
/**
- * au_read_byte16 - read one byte endianess aware from the chip
+ * au_read_byte16 - read one byte endianness aware from the chip
* @mtd: MTD device structure
*
- * read function for 16bit buswith with
- * endianess conversion
+ * read function for 16bit buswidth with endianness conversion
*/
static u_char au_read_byte16(struct mtd_info *mtd)
{
@@ -88,12 +91,11 @@ static u_char au_read_byte16(struct mtd_info *mtd)
}
/**
- * au_write_byte16 - write one byte endianess aware to the chip
+ * au_write_byte16 - write one byte endianness aware to the chip
* @mtd: MTD device structure
* @byte: pointer to data byte to write
*
- * write function for 16bit buswith with
- * endianess conversion
+ * write function for 16bit buswidth with endianness conversion
*/
static void au_write_byte16(struct mtd_info *mtd, u_char byte)
{
@@ -106,8 +108,7 @@ static void au_write_byte16(struct mtd_info *mtd, u_char byte)
* au_read_word - read one word from the chip
* @mtd: MTD device structure
*
- * read function for 16bit buswith without
- * endianess conversion
+ * read function for 16bit buswidth without endianness conversion
*/
static u16 au_read_word(struct mtd_info *mtd)
{
@@ -123,7 +124,7 @@ static u16 au_read_word(struct mtd_info *mtd)
* @buf: data buffer
* @len: number of bytes to write
*
- * write function for 8bit buswith
+ * write function for 8bit buswidth
*/
static void au_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
{
@@ -142,7 +143,7 @@ static void au_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
* @buf: buffer to store date
* @len: number of bytes to read
*
- * read function for 8bit buswith
+ * read function for 8bit buswidth
*/
static void au_read_buf(struct mtd_info *mtd, u_char *buf, int len)
{
@@ -161,7 +162,7 @@ static void au_read_buf(struct mtd_info *mtd, u_char *buf, int len)
* @buf: buffer containing the data to compare
* @len: number of bytes to compare
*
- * verify function for 8bit buswith
+ * verify function for 8bit buswidth
*/
static int au_verify_buf(struct mtd_info *mtd, const u_char *buf, int len)
{
@@ -183,7 +184,7 @@ static int au_verify_buf(struct mtd_info *mtd, const u_char *buf, int len)
* @buf: data buffer
* @len: number of bytes to write
*
- * write function for 16bit buswith
+ * write function for 16bit buswidth
*/
static void au_write_buf16(struct mtd_info *mtd, const u_char *buf, int len)
{
@@ -205,7 +206,7 @@ static void au_write_buf16(struct mtd_info *mtd, const u_char *buf, int len)
* @buf: buffer to store date
* @len: number of bytes to read
*
- * read function for 16bit buswith
+ * read function for 16bit buswidth
*/
static void au_read_buf16(struct mtd_info *mtd, u_char *buf, int len)
{
@@ -226,7 +227,7 @@ static void au_read_buf16(struct mtd_info *mtd, u_char *buf, int len)
* @buf: buffer containing the data to compare
* @len: number of bytes to compare
*
- * verify function for 16bit buswith
+ * verify function for 16bit buswidth
*/
static int au_verify_buf16(struct mtd_info *mtd, const u_char *buf, int len)
{
diff --git a/drivers/mtd/nand/autcpu12.c b/drivers/mtd/nand/autcpu12.c
index eddc9a22498..2e42ec2e8ff 100644
--- a/drivers/mtd/nand/autcpu12.c
+++ b/drivers/mtd/nand/autcpu12.c
@@ -172,9 +172,9 @@ static int __init autcpu12_init(void)
/* Enable the following for a flash based bad block table */
/*
- this->options = NAND_USE_FLASH_BBT;
+ this->bbt_options = NAND_BBT_USE_FLASH;
*/
- this->options = NAND_USE_FLASH_BBT;
+ this->bbt_options = NAND_BBT_USE_FLASH;
/* Scan to find existence of the device */
if (nand_scan(autcpu12_mtd, 1)) {
diff --git a/drivers/mtd/nand/bcm_umi_nand.c b/drivers/mtd/nand/bcm_umi_nand.c
index 8c569e454dc..46b58d67284 100644
--- a/drivers/mtd/nand/bcm_umi_nand.c
+++ b/drivers/mtd/nand/bcm_umi_nand.c
@@ -52,8 +52,6 @@
static const __devinitconst char gBanner[] = KERN_INFO \
"BCM UMI MTD NAND Driver: 1.00\n";
-const char *part_probes[] = { "cmdlinepart", NULL };
-
#if NAND_ECC_BCH
static uint8_t scan_ff_pattern[] = { 0xff };
@@ -376,16 +374,18 @@ static int __devinit bcm_umi_nand_probe(struct platform_device *pdev)
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!r)
- return -ENXIO;
+ if (!r) {
+ err = -ENXIO;
+ goto out_free;
+ }
/* map physical address */
bcm_umi_io_base = ioremap(r->start, resource_size(r));
if (!bcm_umi_io_base) {
printk(KERN_ERR "ioremap to access BCM UMI NAND chip failed\n");
- kfree(board_mtd);
- return -EIO;
+ err = -EIO;
+ goto out_free;
}
/* Get pointer to private data */
@@ -401,9 +401,8 @@ static int __devinit bcm_umi_nand_probe(struct platform_device *pdev)
/* Initialize the NAND hardware. */
if (bcm_umi_nand_inithw() < 0) {
printk(KERN_ERR "BCM UMI NAND chip could not be initialized\n");
- iounmap(bcm_umi_io_base);
- kfree(board_mtd);
- return -EIO;
+ err = -EIO;
+ goto out_unmap;
}
/* Set address of NAND IO lines */
@@ -436,7 +435,7 @@ static int __devinit bcm_umi_nand_probe(struct platform_device *pdev)
#if USE_DMA
err = nand_dma_init();
if (err != 0)
- return err;
+ goto out_unmap;
#endif
/* Figure out the size of the device that we have.
@@ -447,9 +446,7 @@ static int __devinit bcm_umi_nand_probe(struct platform_device *pdev)
err = nand_scan_ident(board_mtd, 1, NULL);
if (err) {
printk(KERN_ERR "nand_scan failed: %d\n", err);
- iounmap(bcm_umi_io_base);
- kfree(board_mtd);
- return err;
+ goto out_unmap;
}
/* Now that we know the nand size, we can setup the ECC layout */
@@ -468,13 +465,14 @@ static int __devinit bcm_umi_nand_probe(struct platform_device *pdev)
{
printk(KERN_ERR "NAND - Unrecognized pagesize: %d\n",
board_mtd->writesize);
- return -EINVAL;
+ err = -EINVAL;
+ goto out_unmap;
}
}
#if NAND_ECC_BCH
if (board_mtd->writesize > 512) {
- if (this->options & NAND_USE_FLASH_BBT)
+ if (this->bbt_options & NAND_BBT_USE_FLASH)
largepage_bbt.options = NAND_BBT_SCAN2NDPAGE;
this->badblock_pattern = &largepage_bbt;
}
@@ -485,33 +483,20 @@ static int __devinit bcm_umi_nand_probe(struct platform_device *pdev)
err = nand_scan_tail(board_mtd);
if (err) {
printk(KERN_ERR "nand_scan failed: %d\n", err);
- iounmap(bcm_umi_io_base);
- kfree(board_mtd);
- return err;
+ goto out_unmap;
}
/* Register the partitions */
- {
- int nr_partitions;
- struct mtd_partition *partition_info;
-
- board_mtd->name = "bcm_umi-nand";
- nr_partitions =
- parse_mtd_partitions(board_mtd, part_probes,
- &partition_info, 0);
-
- if (nr_partitions <= 0) {
- printk(KERN_ERR "BCM UMI NAND: Too few partitions - %d\n",
- nr_partitions);
- iounmap(bcm_umi_io_base);
- kfree(board_mtd);
- return -EIO;
- }
- mtd_device_register(board_mtd, partition_info, nr_partitions);
- }
+ board_mtd->name = "bcm_umi-nand";
+ mtd_device_parse_register(board_mtd, NULL, 0, NULL, 0);
/* Return happy */
return 0;
+out_unmap:
+ iounmap(bcm_umi_io_base);
+out_free:
+ kfree(board_mtd);
+ return err;
}
static int bcm_umi_nand_remove(struct platform_device *pdev)
diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c
index 87ebb4e5b0c..72d3f23490c 100644
--- a/drivers/mtd/nand/cafe_nand.c
+++ b/drivers/mtd/nand/cafe_nand.c
@@ -21,6 +21,7 @@
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include <asm/io.h>
#define CAFE_NAND_CTRL1 0x00
@@ -57,7 +58,6 @@
struct cafe_priv {
struct nand_chip nand;
- struct mtd_partition *parts;
struct pci_dev *pdev;
void __iomem *mmio;
struct rs_control *rs;
@@ -371,7 +371,7 @@ static int cafe_nand_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
return 1;
}
/**
- * cafe_nand_read_page_syndrome - {REPLACABLE] hardware ecc syndrom based page read
+ * cafe_nand_read_page_syndrome - [REPLACEABLE] hardware ecc syndrome based page read
* @mtd: mtd info structure
* @chip: nand chip info structure
* @buf: buffer to store read data
@@ -630,8 +630,6 @@ static int __devinit cafe_nand_probe(struct pci_dev *pdev,
struct cafe_priv *cafe;
uint32_t ctrl;
int err = 0;
- struct mtd_partition *parts;
- int nr_parts;
/* Very old versions shared the same PCI ident for all three
functions on the chip. Verify the class too... */
@@ -686,7 +684,8 @@ static int __devinit cafe_nand_probe(struct pci_dev *pdev,
cafe->nand.chip_delay = 0;
/* Enable the following for a flash based bad block table */
- cafe->nand.options = NAND_USE_FLASH_BBT | NAND_NO_AUTOINCR | NAND_OWN_BUFFERS;
+ cafe->nand.bbt_options = NAND_BBT_USE_FLASH;
+ cafe->nand.options = NAND_NO_AUTOINCR | NAND_OWN_BUFFERS;
if (skipbbt) {
cafe->nand.options |= NAND_SKIP_BBTSCAN;
@@ -799,18 +798,9 @@ static int __devinit cafe_nand_probe(struct pci_dev *pdev,
pci_set_drvdata(pdev, mtd);
- /* We register the whole device first, separate from the partitions */
- mtd_device_register(mtd, NULL, 0);
-
-#ifdef CONFIG_MTD_CMDLINE_PARTS
mtd->name = "cafe_nand";
-#endif
- nr_parts = parse_mtd_partitions(mtd, part_probes, &parts, 0);
- if (nr_parts > 0) {
- cafe->parts = parts;
- dev_info(&cafe->pdev->dev, "%d partitions found\n", nr_parts);
- mtd_device_register(mtd, parts, nr_parts);
- }
+ mtd_device_parse_register(mtd, part_probes, 0, NULL, 0);
+
goto out;
out_irq:
diff --git a/drivers/mtd/nand/cmx270_nand.c b/drivers/mtd/nand/cmx270_nand.c
index 6fc043a30d1..737ef9a04fd 100644
--- a/drivers/mtd/nand/cmx270_nand.c
+++ b/drivers/mtd/nand/cmx270_nand.c
@@ -22,6 +22,7 @@
#include <linux/mtd/partitions.h>
#include <linux/slab.h>
#include <linux/gpio.h>
+#include <linux/module.h>
#include <asm/io.h>
#include <asm/irq.h>
@@ -50,8 +51,6 @@ static struct mtd_partition partition_info[] = {
};
#define NUM_PARTITIONS (ARRAY_SIZE(partition_info))
-const char *part_probes[] = { "cmdlinepart", NULL };
-
static u_char cmx270_read_byte(struct mtd_info *mtd)
{
struct nand_chip *this = mtd->priv;
@@ -151,9 +150,6 @@ static int cmx270_device_ready(struct mtd_info *mtd)
static int __init cmx270_init(void)
{
struct nand_chip *this;
- const char *part_type;
- struct mtd_partition *mtd_parts;
- int mtd_parts_nb = 0;
int ret;
if (!(machine_is_armcore() && cpu_is_pxa27x()))
@@ -222,23 +218,9 @@ static int __init cmx270_init(void)
goto err_scan;
}
-#ifdef CONFIG_MTD_CMDLINE_PARTS
- mtd_parts_nb = parse_mtd_partitions(cmx270_nand_mtd, part_probes,
- &mtd_parts, 0);
- if (mtd_parts_nb > 0)
- part_type = "command line";
- else
- mtd_parts_nb = 0;
-#endif
- if (!mtd_parts_nb) {
- mtd_parts = partition_info;
- mtd_parts_nb = NUM_PARTITIONS;
- part_type = "static";
- }
-
/* Register the partitions */
- pr_notice("Using %s partition definition\n", part_type);
- ret = mtd_device_register(cmx270_nand_mtd, mtd_parts, mtd_parts_nb);
+ ret = mtd_device_parse_register(cmx270_nand_mtd, NULL, 0,
+ partition_info, NUM_PARTITIONS);
if (ret)
goto err_scan;
diff --git a/drivers/mtd/nand/cs553x_nand.c b/drivers/mtd/nand/cs553x_nand.c
index f59ad1f2d5d..414afa79356 100644
--- a/drivers/mtd/nand/cs553x_nand.c
+++ b/drivers/mtd/nand/cs553x_nand.c
@@ -239,7 +239,8 @@ static int __init cs553x_init_one(int cs, int mmio, unsigned long adr)
this->ecc.correct = nand_correct_data;
/* Enable the following for a flash based bad block table */
- this->options = NAND_USE_FLASH_BBT | NAND_NO_AUTOINCR;
+ this->bbt_options = NAND_BBT_USE_FLASH;
+ this->options = NAND_NO_AUTOINCR;
/* Scan to find existence of the device */
if (nand_scan(new_mtd, 1)) {
@@ -277,15 +278,11 @@ static int is_geode(void)
return 0;
}
-static const char *part_probes[] = { "cmdlinepart", NULL };
-
static int __init cs553x_init(void)
{
int err = -ENXIO;
int i;
uint64_t val;
- int mtd_parts_nb = 0;
- struct mtd_partition *mtd_parts = NULL;
/* If the CPU isn't a Geode GX or LX, abort */
if (!is_geode())
@@ -315,13 +312,9 @@ static int __init cs553x_init(void)
do mtdconcat etc. if we want to. */
for (i = 0; i < NR_CS553X_CONTROLLERS; i++) {
if (cs553x_mtd[i]) {
-
/* If any devices registered, return success. Else the last error. */
- mtd_parts_nb = parse_mtd_partitions(cs553x_mtd[i], part_probes, &mtd_parts, 0);
- if (mtd_parts_nb > 0)
- printk(KERN_NOTICE "Using command line partition definition\n");
- mtd_device_register(cs553x_mtd[i], mtd_parts,
- mtd_parts_nb);
+ mtd_device_parse_register(cs553x_mtd[i], NULL, 0,
+ NULL, 0);
err = 0;
}
}
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c
index 1f34951ae1a..c153e1f77f9 100644
--- a/drivers/mtd/nand/davinci_nand.c
+++ b/drivers/mtd/nand/davinci_nand.c
@@ -57,7 +57,6 @@ struct davinci_nand_info {
struct device *dev;
struct clk *clk;
- bool partitioned;
bool is_readmode;
@@ -530,8 +529,6 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
int ret;
uint32_t val;
nand_ecc_modes_t ecc_mode;
- struct mtd_partition *mtd_parts = NULL;
- int mtd_parts_nb = 0;
/* insist on board-specific configuration */
if (!pdata)
@@ -581,7 +578,9 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
info->chip.chip_delay = 0;
info->chip.select_chip = nand_davinci_select_chip;
- /* options such as NAND_USE_FLASH_BBT or 16-bit widths */
+ /* options such as NAND_BBT_USE_FLASH */
+ info->chip.bbt_options = pdata->bbt_options;
+ /* options such as 16-bit widths */
info->chip.options = pdata->options;
info->chip.bbt_td = pdata->bbt_td;
info->chip.bbt_md = pdata->bbt_md;
@@ -751,33 +750,8 @@ syndrome_done:
if (ret < 0)
goto err_scan;
- if (mtd_has_cmdlinepart()) {
- static const char *probes[] __initconst = {
- "cmdlinepart", NULL
- };
-
- mtd_parts_nb = parse_mtd_partitions(&info->mtd, probes,
- &mtd_parts, 0);
- }
-
- if (mtd_parts_nb <= 0) {
- mtd_parts = pdata->parts;
- mtd_parts_nb = pdata->nr_parts;
- }
-
- /* Register any partitions */
- if (mtd_parts_nb > 0) {
- ret = mtd_device_register(&info->mtd, mtd_parts,
- mtd_parts_nb);
- if (ret == 0)
- info->partitioned = true;
- }
-
- /* If there's no partition info, just package the whole chip
- * as a single MTD device.
- */
- if (!info->partitioned)
- ret = mtd_device_register(&info->mtd, NULL, 0) ? -ENODEV : 0;
+ ret = mtd_device_parse_register(&info->mtd, NULL, 0,
+ pdata->parts, pdata->nr_parts);
if (ret < 0)
goto err_scan;
@@ -816,9 +790,6 @@ err_nomem:
static int __exit nand_davinci_remove(struct platform_device *pdev)
{
struct davinci_nand_info *info = platform_get_drvdata(pdev);
- int status;
-
- status = mtd_device_unregister(&info->mtd);
spin_lock_irq(&davinci_nand_lock);
if (info->chip.ecc.mode == NAND_ECC_HW_SYNDROME)
diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
index d5276218945..3984d488f9a 100644
--- a/drivers/mtd/nand/denali.c
+++ b/drivers/mtd/nand/denali.c
@@ -1346,6 +1346,7 @@ static void denali_hw_init(struct denali_nand_info *denali)
* */
denali->bbtskipbytes = ioread32(denali->flash_reg +
SPARE_AREA_SKIP_BYTES);
+ detect_max_banks(denali);
denali_nand_reset(denali);
iowrite32(0x0F, denali->flash_reg + RB_PIN_ENABLED);
iowrite32(CHIP_EN_DONT_CARE__FLAG,
@@ -1356,7 +1357,6 @@ static void denali_hw_init(struct denali_nand_info *denali)
/* Should set value for these registers when init */
iowrite32(0, denali->flash_reg + TWO_ROW_ADDR_CYCLES);
iowrite32(1, denali->flash_reg + ECC_ENABLE);
- detect_max_banks(denali);
denali_nand_timing_set(denali);
denali_irq_init(denali);
}
@@ -1577,7 +1577,8 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
denali->nand.bbt_md = &bbt_mirror_descr;
/* skip the scan for now until we have OOB read and write support */
- denali->nand.options |= NAND_USE_FLASH_BBT | NAND_SKIP_BBTSCAN;
+ denali->nand.bbt_options |= NAND_BBT_USE_FLASH;
+ denali->nand.options |= NAND_SKIP_BBTSCAN;
denali->nand.ecc.mode = NAND_ECC_HW_SYNDROME;
/* Denali Controller only support 15bit and 8bit ECC in MRST,
@@ -1676,7 +1677,6 @@ static void denali_pci_remove(struct pci_dev *dev)
struct denali_nand_info *denali = pci_get_drvdata(dev);
nand_release(&denali->mtd);
- mtd_device_unregister(&denali->mtd);
denali_irq_cleanup(dev->irq, denali);
diff --git a/drivers/mtd/nand/diskonchip.c b/drivers/mtd/nand/diskonchip.c
index 7837728d02f..5780dbab611 100644
--- a/drivers/mtd/nand/diskonchip.c
+++ b/drivers/mtd/nand/diskonchip.c
@@ -31,6 +31,7 @@
#include <linux/mtd/doc2000.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/inftl.h>
+#include <linux/module.h>
/* Where to look for the devices? */
#ifndef CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS
@@ -132,7 +133,7 @@ static struct rs_control *rs_decoder;
/*
* The HW decoder in the DoC ASIC's provides us a error syndrome,
- * which we must convert to a standard syndrom usable by the generic
+ * which we must convert to a standard syndrome usable by the generic
* Reed-Solomon library code.
*
* Fabrice Bellard figured this out in the old docecc code. I added
@@ -153,7 +154,7 @@ static int doc_ecc_decode(struct rs_control *rs, uint8_t *data, uint8_t *ecc)
ds[3] = ((ecc[3] & 0xc0) >> 6) | ((ecc[0] & 0xff) << 2);
parity = ecc[1];
- /* Initialize the syndrom buffer */
+ /* Initialize the syndrome buffer */
for (i = 0; i < NROOTS; i++)
s[i] = ds[0];
/*
@@ -1031,7 +1032,7 @@ static int doc200x_correct_data(struct mtd_info *mtd, u_char *dat,
WriteDOC(DOC_ECC_DIS, docptr, Mplus_ECCConf);
else
WriteDOC(DOC_ECC_DIS, docptr, ECCConf);
- if (no_ecc_failures && (ret == -EBADMSG)) {
+ if (no_ecc_failures && mtd_is_eccerr(ret)) {
printk(KERN_ERR "suppressing ECC failure\n");
ret = 0;
}
@@ -1652,7 +1653,7 @@ static int __init doc_probe(unsigned long physadr)
nand->ecc.mode = NAND_ECC_HW_SYNDROME;
nand->ecc.size = 512;
nand->ecc.bytes = 6;
- nand->options = NAND_USE_FLASH_BBT;
+ nand->bbt_options = NAND_BBT_USE_FLASH;
doc->physadr = physadr;
doc->virtadr = virtadr;
diff --git a/drivers/mtd/nand/edb7312.c b/drivers/mtd/nand/edb7312.c
deleted file mode 100644
index 8400d0f6dad..00000000000
--- a/drivers/mtd/nand/edb7312.c
+++ /dev/null
@@ -1,203 +0,0 @@
-/*
- * drivers/mtd/nand/edb7312.c
- *
- * Copyright (C) 2002 Marius Gröger (mag@sysgo.de)
- *
- * Derived from drivers/mtd/nand/autcpu12.c
- * Copyright (c) 2001 Thomas Gleixner (gleixner@autronix.de)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Overview:
- * This is a device driver for the NAND flash device found on the
- * CLEP7312 board which utilizes the Toshiba TC58V64AFT part. This is
- * a 64Mibit (8MiB x 8 bits) NAND flash device.
- */
-
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/nand.h>
-#include <linux/mtd/partitions.h>
-#include <asm/io.h>
-#include <mach/hardware.h> /* for CLPS7111_VIRT_BASE */
-#include <asm/sizes.h>
-#include <asm/hardware/clps7111.h>
-
-/*
- * MTD structure for EDB7312 board
- */
-static struct mtd_info *ep7312_mtd = NULL;
-
-/*
- * Values specific to the EDB7312 board (used with EP7312 processor)
- */
-#define EP7312_FIO_PBASE 0x10000000 /* Phys address of flash */
-#define EP7312_PXDR 0x0001 /*
- * IO offset to Port B data register
- * where the CLE, ALE and NCE pins
- * are wired to.
- */
-#define EP7312_PXDDR 0x0041 /*
- * IO offset to Port B data direction
- * register so we can control the IO
- * lines.
- */
-
-/*
- * Module stuff
- */
-
-static unsigned long ep7312_fio_pbase = EP7312_FIO_PBASE;
-static void __iomem *ep7312_pxdr = (void __iomem *)EP7312_PXDR;
-static void __iomem *ep7312_pxddr = (void __iomem *)EP7312_PXDDR;
-
-/*
- * Define static partitions for flash device
- */
-static struct mtd_partition partition_info[] = {
- {.name = "EP7312 Nand Flash",
- .offset = 0,
- .size = 8 * 1024 * 1024}
-};
-
-#define NUM_PARTITIONS 1
-
-/*
- * hardware specific access to control-lines
- *
- * NAND_NCE: bit 0 -> bit 6 (bit 7 = 1)
- * NAND_CLE: bit 1 -> bit 4
- * NAND_ALE: bit 2 -> bit 5
- */
-static void ep7312_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
-{
- struct nand_chip *chip = mtd->priv;
-
- if (ctrl & NAND_CTRL_CHANGE) {
- unsigned char bits = 0x80;
-
- bits |= (ctrl & (NAND_CLE | NAND_ALE)) << 3;
- bits |= (ctrl & NAND_NCE) ? 0x00 : 0x40;
-
- clps_writeb((clps_readb(ep7312_pxdr) & 0xF0) | bits,
- ep7312_pxdr);
- }
- if (cmd != NAND_CMD_NONE)
- writeb(cmd, chip->IO_ADDR_W);
-}
-
-/*
- * read device ready pin
- */
-static int ep7312_device_ready(struct mtd_info *mtd)
-{
- return 1;
-}
-
-const char *part_probes[] = { "cmdlinepart", NULL };
-
-/*
- * Main initialization routine
- */
-static int __init ep7312_init(void)
-{
- struct nand_chip *this;
- const char *part_type = 0;
- int mtd_parts_nb = 0;
- struct mtd_partition *mtd_parts = 0;
- void __iomem *ep7312_fio_base;
-
- /* Allocate memory for MTD device structure and private data */
- ep7312_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);
- if (!ep7312_mtd) {
- printk("Unable to allocate EDB7312 NAND MTD device structure.\n");
- return -ENOMEM;
- }
-
- /* map physical address */
- ep7312_fio_base = ioremap(ep7312_fio_pbase, SZ_1K);
- if (!ep7312_fio_base) {
- printk("ioremap EDB7312 NAND flash failed\n");
- kfree(ep7312_mtd);
- return -EIO;
- }
-
- /* Get pointer to private data */
- this = (struct nand_chip *)(&ep7312_mtd[1]);
-
- /* Initialize structures */
- memset(ep7312_mtd, 0, sizeof(struct mtd_info));
- memset(this, 0, sizeof(struct nand_chip));
-
- /* Link the private data with the MTD structure */
- ep7312_mtd->priv = this;
- ep7312_mtd->owner = THIS_MODULE;
-
- /*
- * Set GPIO Port B control register so that the pins are configured
- * to be outputs for controlling the NAND flash.
- */
- clps_writeb(0xf0, ep7312_pxddr);
-
- /* insert callbacks */
- this->IO_ADDR_R = ep7312_fio_base;
- this->IO_ADDR_W = ep7312_fio_base;
- this->cmd_ctrl = ep7312_hwcontrol;
- this->dev_ready = ep7312_device_ready;
- /* 15 us command delay time */
- this->chip_delay = 15;
-
- /* Scan to find existence of the device */
- if (nand_scan(ep7312_mtd, 1)) {
- iounmap((void *)ep7312_fio_base);
- kfree(ep7312_mtd);
- return -ENXIO;
- }
- ep7312_mtd->name = "edb7312-nand";
- mtd_parts_nb = parse_mtd_partitions(ep7312_mtd, part_probes, &mtd_parts, 0);
- if (mtd_parts_nb > 0)
- part_type = "command line";
- else
- mtd_parts_nb = 0;
- if (mtd_parts_nb == 0) {
- mtd_parts = partition_info;
- mtd_parts_nb = NUM_PARTITIONS;
- part_type = "static";
- }
-
- /* Register the partitions */
- printk(KERN_NOTICE "Using %s partition definition\n", part_type);
- mtd_device_register(ep7312_mtd, mtd_parts, mtd_parts_nb);
-
- /* Return happy */
- return 0;
-}
-
-module_init(ep7312_init);
-
-/*
- * Clean up routine
- */
-static void __exit ep7312_cleanup(void)
-{
- struct nand_chip *this = (struct nand_chip *)&ep7312_mtd[1];
-
- /* Release resources, unregister device */
- nand_release(ap7312_mtd);
-
- /* Release io resource */
- iounmap(this->IO_ADDR_R);
-
- /* Free the MTD device structure */
- kfree(ep7312_mtd);
-}
-
-module_exit(ep7312_cleanup);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Marius Groeger <mag@sysgo.de>");
-MODULE_DESCRIPTION("MTD map driver for Cogent EDB7312 board");
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
index 33d8aad8bba..eedd8ee2c9a 100644
--- a/drivers/mtd/nand/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -75,7 +75,6 @@ struct fsl_elbc_fcm_ctrl {
unsigned int use_mdr; /* Non zero if the MDR is to be set */
unsigned int oob; /* Non zero if operating on OOB data */
unsigned int counter; /* counter for the initializations */
- char *oob_poi; /* Place to write ECC after read back */
};
/* These map to the positions used by the FCM hardware ECC generator */
@@ -244,6 +243,25 @@ static int fsl_elbc_run_command(struct mtd_info *mtd)
return -EIO;
}
+ if (chip->ecc.mode != NAND_ECC_HW)
+ return 0;
+
+ if (elbc_fcm_ctrl->read_bytes == mtd->writesize + mtd->oobsize) {
+ uint32_t lteccr = in_be32(&lbc->lteccr);
+ /*
+ * if command was a full page read and the ELBC
+ * has the LTECCR register, then bits 12-15 (ppc order) of
+ * LTECCR indicates which 512 byte sub-pages had fixed errors.
+ * bits 28-31 are uncorrectable errors, marked elsewhere.
+ * for small page nand only 1 bit is used.
+ * if the ELBC doesn't have the lteccr register it reads 0
+ */
+ if (lteccr & 0x000F000F)
+ out_be32(&lbc->lteccr, 0x000F000F); /* clear lteccr */
+ if (lteccr & 0x000F0000)
+ mtd->ecc_stats.corrected++;
+ }
+
return 0;
}
@@ -435,7 +453,6 @@ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
/* PAGEPROG reuses all of the setup from SEQIN and adds the length */
case NAND_CMD_PAGEPROG: {
- int full_page;
dev_vdbg(priv->dev,
"fsl_elbc_cmdfunc: NAND_CMD_PAGEPROG "
"writing %d bytes.\n", elbc_fcm_ctrl->index);
@@ -445,34 +462,12 @@ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
* write so the HW generates the ECC.
*/
if (elbc_fcm_ctrl->oob || elbc_fcm_ctrl->column != 0 ||
- elbc_fcm_ctrl->index != mtd->writesize + mtd->oobsize) {
+ elbc_fcm_ctrl->index != mtd->writesize + mtd->oobsize)
out_be32(&lbc->fbcr, elbc_fcm_ctrl->index);
- full_page = 0;
- } else {
+ else
out_be32(&lbc->fbcr, 0);
- full_page = 1;
- }
fsl_elbc_run_command(mtd);
-
- /* Read back the page in order to fill in the ECC for the
- * caller. Is this really needed?
- */
- if (full_page && elbc_fcm_ctrl->oob_poi) {
- out_be32(&lbc->fbcr, 3);
- set_addr(mtd, 6, page_addr, 1);
-
- elbc_fcm_ctrl->read_bytes = mtd->writesize + 9;
-
- fsl_elbc_do_read(chip, 1);
- fsl_elbc_run_command(mtd);
-
- memcpy_fromio(elbc_fcm_ctrl->oob_poi + 6,
- &elbc_fcm_ctrl->addr[elbc_fcm_ctrl->index], 3);
- elbc_fcm_ctrl->index += 3;
- }
-
- elbc_fcm_ctrl->oob_poi = NULL;
return;
}
@@ -752,13 +747,8 @@ static void fsl_elbc_write_page(struct mtd_info *mtd,
struct nand_chip *chip,
const uint8_t *buf)
{
- struct fsl_elbc_mtd *priv = chip->priv;
- struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = priv->ctrl->nand;
-
fsl_elbc_write_buf(mtd, buf, mtd->writesize);
fsl_elbc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
-
- elbc_fcm_ctrl->oob_poi = chip->oob_poi;
}
static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv)
@@ -791,8 +781,8 @@ static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv)
chip->bbt_md = &bbt_mirror_descr;
/* set up nand options */
- chip->options = NAND_NO_READRDY | NAND_NO_AUTOINCR |
- NAND_USE_FLASH_BBT;
+ chip->options = NAND_NO_READRDY | NAND_NO_AUTOINCR;
+ chip->bbt_options = NAND_BBT_USE_FLASH;
chip->controller = &elbc_fcm_ctrl->controller;
chip->priv = priv;
@@ -829,7 +819,6 @@ static int fsl_elbc_chip_remove(struct fsl_elbc_mtd *priv)
elbc_fcm_ctrl->chips[priv->bank] = NULL;
kfree(priv);
- kfree(elbc_fcm_ctrl);
return 0;
}
@@ -842,13 +831,14 @@ static int __devinit fsl_elbc_nand_probe(struct platform_device *pdev)
struct resource res;
struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl;
static const char *part_probe_types[]
- = { "cmdlinepart", "RedBoot", NULL };
- struct mtd_partition *parts;
+ = { "cmdlinepart", "RedBoot", "ofpart", NULL };
int ret;
int bank;
struct device *dev;
struct device_node *node = pdev->dev.of_node;
+ struct mtd_part_parser_data ppdata;
+ ppdata.of_node = pdev->dev.of_node;
if (!fsl_lbc_ctrl_dev || !fsl_lbc_ctrl_dev->regs)
return -ENODEV;
lbc = fsl_lbc_ctrl_dev->regs;
@@ -934,17 +924,8 @@ static int __devinit fsl_elbc_nand_probe(struct platform_device *pdev)
/* First look for RedBoot table or partitions on the command
* line, these take precedence over device tree information */
- ret = parse_mtd_partitions(&priv->mtd, part_probe_types, &parts, 0);
- if (ret < 0)
- goto err;
-
- if (ret == 0) {
- ret = of_mtd_parse_partitions(priv->dev, node, &parts);
- if (ret < 0)
- goto err;
- }
-
- mtd_device_register(&priv->mtd, parts, ret);
+ mtd_device_parse_register(&priv->mtd, part_probe_types, &ppdata,
+ NULL, 0);
printk(KERN_INFO "eLBC NAND device at 0x%llx, bank %d\n",
(unsigned long long)res.start, priv->bank);
diff --git a/drivers/mtd/nand/fsl_upm.c b/drivers/mtd/nand/fsl_upm.c
index 23752fd5bc5..b4f3cc9f32f 100644
--- a/drivers/mtd/nand/fsl_upm.c
+++ b/drivers/mtd/nand/fsl_upm.c
@@ -158,7 +158,7 @@ static int __devinit fun_chip_init(struct fsl_upm_nand *fun,
{
int ret;
struct device_node *flash_np;
- static const char *part_types[] = { "cmdlinepart", NULL, };
+ struct mtd_part_parser_data ppdata;
fun->chip.IO_ADDR_R = fun->io_base;
fun->chip.IO_ADDR_W = fun->io_base;
@@ -192,18 +192,12 @@ static int __devinit fun_chip_init(struct fsl_upm_nand *fun,
if (ret)
goto err;
- ret = parse_mtd_partitions(&fun->mtd, part_types, &fun->parts, 0);
-
-#ifdef CONFIG_MTD_OF_PARTS
- if (ret == 0) {
- ret = of_mtd_parse_partitions(fun->dev, flash_np, &fun->parts);
- if (ret < 0)
- goto err;
- }
-#endif
- ret = mtd_device_register(&fun->mtd, fun->parts, ret);
+ ppdata.of_node = flash_np;
+ ret = mtd_device_parse_register(&fun->mtd, NULL, &ppdata, NULL, 0);
err:
of_node_put(flash_np);
+ if (ret)
+ kfree(fun->mtd.name);
return ret;
}
diff --git a/drivers/mtd/nand/fsmc_nand.c b/drivers/mtd/nand/fsmc_nand.c
index e9b275ac381..e53b7606413 100644
--- a/drivers/mtd/nand/fsmc_nand.c
+++ b/drivers/mtd/nand/fsmc_nand.c
@@ -146,7 +146,7 @@ static struct mtd_partition partition_info_16KB_blk[] = {
{
.name = "Root File System",
.offset = 0x460000,
- .size = 0,
+ .size = MTDPART_SIZ_FULL,
},
};
@@ -173,13 +173,10 @@ static struct mtd_partition partition_info_128KB_blk[] = {
{
.name = "Root File System",
.offset = 0x800000,
- .size = 0,
+ .size = MTDPART_SIZ_FULL,
},
};
-#ifdef CONFIG_MTD_CMDLINE_PARTS
-const char *part_probes[] = { "cmdlinepart", NULL };
-#endif
/**
* struct fsmc_nand_data - structure for FSMC NAND device state
@@ -187,8 +184,6 @@ const char *part_probes[] = { "cmdlinepart", NULL };
* @pid: Part ID on the AMBA PrimeCell format
* @mtd: MTD info for a NAND flash.
* @nand: Chip related info for a NAND flash.
- * @partitions: Partition info for a NAND Flash.
- * @nr_partitions: Total number of partition of a NAND flash.
*
* @ecc_place: ECC placing locations in oobfree type format.
* @bank: Bank number for probed device.
@@ -203,8 +198,6 @@ struct fsmc_nand_data {
u32 pid;
struct mtd_info mtd;
struct nand_chip nand;
- struct mtd_partition *partitions;
- unsigned int nr_partitions;
struct fsmc_eccplace *ecc_place;
unsigned int bank;
@@ -716,65 +709,17 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
* platform data,
* default partition information present in driver.
*/
-#ifdef CONFIG_MTD_CMDLINE_PARTS
/*
- * Check if partition info passed via command line
+ * Check for partition info passed
*/
host->mtd.name = "nand";
- host->nr_partitions = parse_mtd_partitions(&host->mtd, part_probes,
- &host->partitions, 0);
- if (host->nr_partitions <= 0) {
-#endif
- /*
- * Check if partition info passed via command line
- */
- if (pdata->partitions) {
- host->partitions = pdata->partitions;
- host->nr_partitions = pdata->nr_partitions;
- } else {
- struct mtd_partition *partition;
- int i;
-
- /* Select the default partitions info */
- switch (host->mtd.size) {
- case 0x01000000:
- case 0x02000000:
- case 0x04000000:
- host->partitions = partition_info_16KB_blk;
- host->nr_partitions =
- sizeof(partition_info_16KB_blk) /
- sizeof(struct mtd_partition);
- break;
- case 0x08000000:
- case 0x10000000:
- case 0x20000000:
- case 0x40000000:
- host->partitions = partition_info_128KB_blk;
- host->nr_partitions =
- sizeof(partition_info_128KB_blk) /
- sizeof(struct mtd_partition);
- break;
- default:
- ret = -ENXIO;
- pr_err("Unsupported NAND size\n");
- goto err_probe;
- }
-
- partition = host->partitions;
- for (i = 0; i < host->nr_partitions; i++, partition++) {
- if (partition->size == 0) {
- partition->size = host->mtd.size -
- partition->offset;
- break;
- }
- }
- }
-#ifdef CONFIG_MTD_CMDLINE_PARTS
- }
-#endif
-
- ret = mtd_device_register(&host->mtd, host->partitions,
- host->nr_partitions);
+ ret = mtd_device_parse_register(&host->mtd, NULL, 0,
+ host->mtd.size <= 0x04000000 ?
+ partition_info_16KB_blk :
+ partition_info_128KB_blk,
+ host->mtd.size <= 0x04000000 ?
+ ARRAY_SIZE(partition_info_16KB_blk) :
+ ARRAY_SIZE(partition_info_128KB_blk));
if (ret)
goto err_probe;
@@ -822,7 +767,7 @@ static int fsmc_nand_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
if (host) {
- mtd_device_unregister(&host->mtd);
+ nand_release(&host->mtd);
clk_disable(host->clk);
clk_put(host->clk);
diff --git a/drivers/mtd/nand/gpmi-nand/Makefile b/drivers/mtd/nand/gpmi-nand/Makefile
new file mode 100644
index 00000000000..3a462487c35
--- /dev/null
+++ b/drivers/mtd/nand/gpmi-nand/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_MTD_NAND_GPMI_NAND) += gpmi_nand.o
+gpmi_nand-objs += gpmi-nand.o
+gpmi_nand-objs += gpmi-lib.o
diff --git a/drivers/mtd/nand/gpmi-nand/bch-regs.h b/drivers/mtd/nand/gpmi-nand/bch-regs.h
new file mode 100644
index 00000000000..4effb8c579d
--- /dev/null
+++ b/drivers/mtd/nand/gpmi-nand/bch-regs.h
@@ -0,0 +1,84 @@
+/*
+ * Freescale GPMI NAND Flash Driver
+ *
+ * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ * Copyright 2008 Embedded Alley Solutions, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+#ifndef __GPMI_NAND_BCH_REGS_H
+#define __GPMI_NAND_BCH_REGS_H
+
+#define HW_BCH_CTRL 0x00000000
+#define HW_BCH_CTRL_SET 0x00000004
+#define HW_BCH_CTRL_CLR 0x00000008
+#define HW_BCH_CTRL_TOG 0x0000000c
+
+#define BM_BCH_CTRL_COMPLETE_IRQ_EN (1 << 8)
+#define BM_BCH_CTRL_COMPLETE_IRQ (1 << 0)
+
+#define HW_BCH_STATUS0 0x00000010
+#define HW_BCH_MODE 0x00000020
+#define HW_BCH_ENCODEPTR 0x00000030
+#define HW_BCH_DATAPTR 0x00000040
+#define HW_BCH_METAPTR 0x00000050
+#define HW_BCH_LAYOUTSELECT 0x00000070
+
+#define HW_BCH_FLASH0LAYOUT0 0x00000080
+
+#define BP_BCH_FLASH0LAYOUT0_NBLOCKS 24
+#define BM_BCH_FLASH0LAYOUT0_NBLOCKS (0xff << BP_BCH_FLASH0LAYOUT0_NBLOCKS)
+#define BF_BCH_FLASH0LAYOUT0_NBLOCKS(v) \
+ (((v) << BP_BCH_FLASH0LAYOUT0_NBLOCKS) & BM_BCH_FLASH0LAYOUT0_NBLOCKS)
+
+#define BP_BCH_FLASH0LAYOUT0_META_SIZE 16
+#define BM_BCH_FLASH0LAYOUT0_META_SIZE (0xff << BP_BCH_FLASH0LAYOUT0_META_SIZE)
+#define BF_BCH_FLASH0LAYOUT0_META_SIZE(v) \
+ (((v) << BP_BCH_FLASH0LAYOUT0_META_SIZE)\
+ & BM_BCH_FLASH0LAYOUT0_META_SIZE)
+
+#define BP_BCH_FLASH0LAYOUT0_ECC0 12
+#define BM_BCH_FLASH0LAYOUT0_ECC0 (0xf << BP_BCH_FLASH0LAYOUT0_ECC0)
+#define BF_BCH_FLASH0LAYOUT0_ECC0(v) \
+ (((v) << BP_BCH_FLASH0LAYOUT0_ECC0) & BM_BCH_FLASH0LAYOUT0_ECC0)
+
+#define BP_BCH_FLASH0LAYOUT0_DATA0_SIZE 0
+#define BM_BCH_FLASH0LAYOUT0_DATA0_SIZE \
+ (0xfff << BP_BCH_FLASH0LAYOUT0_DATA0_SIZE)
+#define BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(v) \
+ (((v) << BP_BCH_FLASH0LAYOUT0_DATA0_SIZE)\
+ & BM_BCH_FLASH0LAYOUT0_DATA0_SIZE)
+
+#define HW_BCH_FLASH0LAYOUT1 0x00000090
+
+#define BP_BCH_FLASH0LAYOUT1_PAGE_SIZE 16
+#define BM_BCH_FLASH0LAYOUT1_PAGE_SIZE \
+ (0xffff << BP_BCH_FLASH0LAYOUT1_PAGE_SIZE)
+#define BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(v) \
+ (((v) << BP_BCH_FLASH0LAYOUT1_PAGE_SIZE) \
+ & BM_BCH_FLASH0LAYOUT1_PAGE_SIZE)
+
+#define BP_BCH_FLASH0LAYOUT1_ECCN 12
+#define BM_BCH_FLASH0LAYOUT1_ECCN (0xf << BP_BCH_FLASH0LAYOUT1_ECCN)
+#define BF_BCH_FLASH0LAYOUT1_ECCN(v) \
+ (((v) << BP_BCH_FLASH0LAYOUT1_ECCN) & BM_BCH_FLASH0LAYOUT1_ECCN)
+
+#define BP_BCH_FLASH0LAYOUT1_DATAN_SIZE 0
+#define BM_BCH_FLASH0LAYOUT1_DATAN_SIZE \
+ (0xfff << BP_BCH_FLASH0LAYOUT1_DATAN_SIZE)
+#define BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(v) \
+ (((v) << BP_BCH_FLASH0LAYOUT1_DATAN_SIZE) \
+ & BM_BCH_FLASH0LAYOUT1_DATAN_SIZE)
+#endif
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
new file mode 100644
index 00000000000..de4db7604a3
--- /dev/null
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
@@ -0,0 +1,1057 @@
+/*
+ * Freescale GPMI NAND Flash Driver
+ *
+ * Copyright (C) 2008-2011 Freescale Semiconductor, Inc.
+ * Copyright (C) 2008 Embedded Alley Solutions, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+#include <linux/mtd/gpmi-nand.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <mach/mxs.h>
+
+#include "gpmi-nand.h"
+#include "gpmi-regs.h"
+#include "bch-regs.h"
+
+struct timing_threshod timing_default_threshold = {
+ .max_data_setup_cycles = (BM_GPMI_TIMING0_DATA_SETUP >>
+ BP_GPMI_TIMING0_DATA_SETUP),
+ .internal_data_setup_in_ns = 0,
+ .max_sample_delay_factor = (BM_GPMI_CTRL1_RDN_DELAY >>
+ BP_GPMI_CTRL1_RDN_DELAY),
+ .max_dll_clock_period_in_ns = 32,
+ .max_dll_delay_in_ns = 16,
+};
+
+/*
+ * Clear the bit and poll it cleared. This is usually called with
+ * a reset address and mask being either SFTRST(bit 31) or CLKGATE
+ * (bit 30).
+ */
+static int clear_poll_bit(void __iomem *addr, u32 mask)
+{
+ int timeout = 0x400;
+
+ /* clear the bit */
+ __mxs_clrl(mask, addr);
+
+ /*
+ * SFTRST needs 3 GPMI clocks to settle, the reference manual
+ * recommends to wait 1us.
+ */
+ udelay(1);
+
+ /* poll the bit becoming clear */
+ while ((readl(addr) & mask) && --timeout)
+ /* nothing */;
+
+ return !timeout;
+}
+
+#define MODULE_CLKGATE (1 << 30)
+#define MODULE_SFTRST (1 << 31)
+/*
+ * The current mxs_reset_block() will do two things:
+ * [1] enable the module.
+ * [2] reset the module.
+ *
+ * In most of the cases, it's ok. But there is a hardware bug in the BCH block.
+ * If you try to soft reset the BCH block, it becomes unusable until
+ * the next hard reset. This case occurs in the NAND boot mode. When the board
+ * boots by NAND, the ROM of the chip will initialize the BCH blocks itself.
+ * So If the driver tries to reset the BCH again, the BCH will not work anymore.
+ * You will see a DMA timeout in this case.
+ *
+ * To avoid this bug, just add a new parameter `just_enable` for
+ * the mxs_reset_block(), and rewrite it here.
+ */
+int gpmi_reset_block(void __iomem *reset_addr, bool just_enable)
+{
+ int ret;
+ int timeout = 0x400;
+
+ /* clear and poll SFTRST */
+ ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
+ if (unlikely(ret))
+ goto error;
+
+ /* clear CLKGATE */
+ __mxs_clrl(MODULE_CLKGATE, reset_addr);
+
+ if (!just_enable) {
+ /* set SFTRST to reset the block */
+ __mxs_setl(MODULE_SFTRST, reset_addr);
+ udelay(1);
+
+ /* poll CLKGATE becoming set */
+ while ((!(readl(reset_addr) & MODULE_CLKGATE)) && --timeout)
+ /* nothing */;
+ if (unlikely(!timeout))
+ goto error;
+ }
+
+ /* clear and poll SFTRST */
+ ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
+ if (unlikely(ret))
+ goto error;
+
+ /* clear and poll CLKGATE */
+ ret = clear_poll_bit(reset_addr, MODULE_CLKGATE);
+ if (unlikely(ret))
+ goto error;
+
+ return 0;
+
+error:
+ pr_err("%s(%p): module reset timeout\n", __func__, reset_addr);
+ return -ETIMEDOUT;
+}
+
+int gpmi_init(struct gpmi_nand_data *this)
+{
+ struct resources *r = &this->resources;
+ int ret;
+
+ ret = clk_enable(r->clock);
+ if (ret)
+ goto err_out;
+ ret = gpmi_reset_block(r->gpmi_regs, false);
+ if (ret)
+ goto err_out;
+
+ /* Choose NAND mode. */
+ writel(BM_GPMI_CTRL1_GPMI_MODE, r->gpmi_regs + HW_GPMI_CTRL1_CLR);
+
+ /* Set the IRQ polarity. */
+ writel(BM_GPMI_CTRL1_ATA_IRQRDY_POLARITY,
+ r->gpmi_regs + HW_GPMI_CTRL1_SET);
+
+ /* Disable Write-Protection. */
+ writel(BM_GPMI_CTRL1_DEV_RESET, r->gpmi_regs + HW_GPMI_CTRL1_SET);
+
+ /* Select BCH ECC. */
+ writel(BM_GPMI_CTRL1_BCH_MODE, r->gpmi_regs + HW_GPMI_CTRL1_SET);
+
+ clk_disable(r->clock);
+ return 0;
+err_out:
+ return ret;
+}
+
+/* This function is very useful. It is called only when the bug occur. */
+void gpmi_dump_info(struct gpmi_nand_data *this)
+{
+ struct resources *r = &this->resources;
+ struct bch_geometry *geo = &this->bch_geometry;
+ u32 reg;
+ int i;
+
+ pr_err("Show GPMI registers :\n");
+ for (i = 0; i <= HW_GPMI_DEBUG / 0x10 + 1; i++) {
+ reg = readl(r->gpmi_regs + i * 0x10);
+ pr_err("offset 0x%.3x : 0x%.8x\n", i * 0x10, reg);
+ }
+
+ /* start to print out the BCH info */
+ pr_err("BCH Geometry :\n");
+ pr_err("GF length : %u\n", geo->gf_len);
+ pr_err("ECC Strength : %u\n", geo->ecc_strength);
+ pr_err("Page Size in Bytes : %u\n", geo->page_size);
+ pr_err("Metadata Size in Bytes : %u\n", geo->metadata_size);
+ pr_err("ECC Chunk Size in Bytes: %u\n", geo->ecc_chunk_size);
+ pr_err("ECC Chunk Count : %u\n", geo->ecc_chunk_count);
+ pr_err("Payload Size in Bytes : %u\n", geo->payload_size);
+ pr_err("Auxiliary Size in Bytes: %u\n", geo->auxiliary_size);
+ pr_err("Auxiliary Status Offset: %u\n", geo->auxiliary_status_offset);
+ pr_err("Block Mark Byte Offset : %u\n", geo->block_mark_byte_offset);
+ pr_err("Block Mark Bit Offset : %u\n", geo->block_mark_bit_offset);
+}
+
+/* Configures the geometry for BCH. */
+int bch_set_geometry(struct gpmi_nand_data *this)
+{
+ struct resources *r = &this->resources;
+ struct bch_geometry *bch_geo = &this->bch_geometry;
+ unsigned int block_count;
+ unsigned int block_size;
+ unsigned int metadata_size;
+ unsigned int ecc_strength;
+ unsigned int page_size;
+ int ret;
+
+ if (common_nfc_set_geometry(this))
+ return !0;
+
+ block_count = bch_geo->ecc_chunk_count - 1;
+ block_size = bch_geo->ecc_chunk_size;
+ metadata_size = bch_geo->metadata_size;
+ ecc_strength = bch_geo->ecc_strength >> 1;
+ page_size = bch_geo->page_size;
+
+ ret = clk_enable(r->clock);
+ if (ret)
+ goto err_out;
+
+ ret = gpmi_reset_block(r->bch_regs, true);
+ if (ret)
+ goto err_out;
+
+ /* Configure layout 0. */
+ writel(BF_BCH_FLASH0LAYOUT0_NBLOCKS(block_count)
+ | BF_BCH_FLASH0LAYOUT0_META_SIZE(metadata_size)
+ | BF_BCH_FLASH0LAYOUT0_ECC0(ecc_strength)
+ | BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(block_size),
+ r->bch_regs + HW_BCH_FLASH0LAYOUT0);
+
+ writel(BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(page_size)
+ | BF_BCH_FLASH0LAYOUT1_ECCN(ecc_strength)
+ | BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(block_size),
+ r->bch_regs + HW_BCH_FLASH0LAYOUT1);
+
+ /* Set *all* chip selects to use layout 0. */
+ writel(0, r->bch_regs + HW_BCH_LAYOUTSELECT);
+
+ /* Enable interrupts. */
+ writel(BM_BCH_CTRL_COMPLETE_IRQ_EN,
+ r->bch_regs + HW_BCH_CTRL_SET);
+
+ clk_disable(r->clock);
+ return 0;
+err_out:
+ return ret;
+}
+
+/* Converts time in nanoseconds to cycles. */
+static unsigned int ns_to_cycles(unsigned int time,
+ unsigned int period, unsigned int min)
+{
+ unsigned int k;
+
+ k = (time + period - 1) / period;
+ return max(k, min);
+}
+
+/* Apply timing to current hardware conditions. */
+static int gpmi_nfc_compute_hardware_timing(struct gpmi_nand_data *this,
+ struct gpmi_nfc_hardware_timing *hw)
+{
+ struct gpmi_nand_platform_data *pdata = this->pdata;
+ struct timing_threshod *nfc = &timing_default_threshold;
+ struct nand_chip *nand = &this->nand;
+ struct nand_timing target = this->timing;
+ bool improved_timing_is_available;
+ unsigned long clock_frequency_in_hz;
+ unsigned int clock_period_in_ns;
+ bool dll_use_half_periods;
+ unsigned int dll_delay_shift;
+ unsigned int max_sample_delay_in_ns;
+ unsigned int address_setup_in_cycles;
+ unsigned int data_setup_in_ns;
+ unsigned int data_setup_in_cycles;
+ unsigned int data_hold_in_cycles;
+ int ideal_sample_delay_in_ns;
+ unsigned int sample_delay_factor;
+ int tEYE;
+ unsigned int min_prop_delay_in_ns = pdata->min_prop_delay_in_ns;
+ unsigned int max_prop_delay_in_ns = pdata->max_prop_delay_in_ns;
+
+ /*
+ * If there are multiple chips, we need to relax the timings to allow
+ * for signal distortion due to higher capacitance.
+ */
+ if (nand->numchips > 2) {
+ target.data_setup_in_ns += 10;
+ target.data_hold_in_ns += 10;
+ target.address_setup_in_ns += 10;
+ } else if (nand->numchips > 1) {
+ target.data_setup_in_ns += 5;
+ target.data_hold_in_ns += 5;
+ target.address_setup_in_ns += 5;
+ }
+
+ /* Check if improved timing information is available. */
+ improved_timing_is_available =
+ (target.tREA_in_ns >= 0) &&
+ (target.tRLOH_in_ns >= 0) &&
+ (target.tRHOH_in_ns >= 0) ;
+
+ /* Inspect the clock. */
+ clock_frequency_in_hz = nfc->clock_frequency_in_hz;
+ clock_period_in_ns = 1000000000 / clock_frequency_in_hz;
+
+ /*
+ * The NFC quantizes setup and hold parameters in terms of clock cycles.
+ * Here, we quantize the setup and hold timing parameters to the
+ * next-highest clock period to make sure we apply at least the
+ * specified times.
+ *
+ * For data setup and data hold, the hardware interprets a value of zero
+ * as the largest possible delay. This is not what's intended by a zero
+ * in the input parameter, so we impose a minimum of one cycle.
+ */
+ data_setup_in_cycles = ns_to_cycles(target.data_setup_in_ns,
+ clock_period_in_ns, 1);
+ data_hold_in_cycles = ns_to_cycles(target.data_hold_in_ns,
+ clock_period_in_ns, 1);
+ address_setup_in_cycles = ns_to_cycles(target.address_setup_in_ns,
+ clock_period_in_ns, 0);
+
+ /*
+ * The clock's period affects the sample delay in a number of ways:
+ *
+ * (1) The NFC HAL tells us the maximum clock period the sample delay
+ * DLL can tolerate. If the clock period is greater than half that
+ * maximum, we must configure the DLL to be driven by half periods.
+ *
+ * (2) We need to convert from an ideal sample delay, in ns, to a
+ * "sample delay factor," which the NFC uses. This factor depends on
+ * whether we're driving the DLL with full or half periods.
+ * Paraphrasing the reference manual:
+ *
+ * AD = SDF x 0.125 x RP
+ *
+ * where:
+ *
+ * AD is the applied delay, in ns.
+ * SDF is the sample delay factor, which is dimensionless.
+ * RP is the reference period, in ns, which is a full clock period
+ * if the DLL is being driven by full periods, or half that if
+ * the DLL is being driven by half periods.
+ *
+ * Let's re-arrange this in a way that's more useful to us:
+ *
+ * 8
+ * SDF = AD x ----
+ * RP
+ *
+ * The reference period is either the clock period or half that, so this
+ * is:
+ *
+ * 8 AD x DDF
+ * SDF = AD x ----- = --------
+ * f x P P
+ *
+ * where:
+ *
+ * f is 1 or 1/2, depending on how we're driving the DLL.
+ * P is the clock period.
+ * DDF is the DLL Delay Factor, a dimensionless value that
+ * incorporates all the constants in the conversion.
+ *
+ * DDF will be either 8 or 16, both of which are powers of two. We can
+ * reduce the cost of this conversion by using bit shifts instead of
+ * multiplication or division. Thus:
+ *
+ * AD << DDS
+ * SDF = ---------
+ * P
+ *
+ * or
+ *
+ * AD = (SDF >> DDS) x P
+ *
+ * where:
+ *
+ * DDS is the DLL Delay Shift, the logarithm to base 2 of the DDF.
+ */
+ if (clock_period_in_ns > (nfc->max_dll_clock_period_in_ns >> 1)) {
+ dll_use_half_periods = true;
+ dll_delay_shift = 3 + 1;
+ } else {
+ dll_use_half_periods = false;
+ dll_delay_shift = 3;
+ }
+
+ /*
+ * Compute the maximum sample delay the NFC allows, under current
+ * conditions. If the clock is running too slowly, no sample delay is
+ * possible.
+ */
+ if (clock_period_in_ns > nfc->max_dll_clock_period_in_ns)
+ max_sample_delay_in_ns = 0;
+ else {
+ /*
+ * Compute the delay implied by the largest sample delay factor
+ * the NFC allows.
+ */
+ max_sample_delay_in_ns =
+ (nfc->max_sample_delay_factor * clock_period_in_ns) >>
+ dll_delay_shift;
+
+ /*
+ * Check if the implied sample delay larger than the NFC
+ * actually allows.
+ */
+ if (max_sample_delay_in_ns > nfc->max_dll_delay_in_ns)
+ max_sample_delay_in_ns = nfc->max_dll_delay_in_ns;
+ }
+
+ /*
+ * Check if improved timing information is available. If not, we have to
+ * use a less-sophisticated algorithm.
+ */
+ if (!improved_timing_is_available) {
+ /*
+ * Fold the read setup time required by the NFC into the ideal
+ * sample delay.
+ */
+ ideal_sample_delay_in_ns = target.gpmi_sample_delay_in_ns +
+ nfc->internal_data_setup_in_ns;
+
+ /*
+ * The ideal sample delay may be greater than the maximum
+ * allowed by the NFC. If so, we can trade off sample delay time
+ * for more data setup time.
+ *
+ * In each iteration of the following loop, we add a cycle to
+ * the data setup time and subtract a corresponding amount from
+ * the sample delay until we've satisified the constraints or
+ * can't do any better.
+ */
+ while ((ideal_sample_delay_in_ns > max_sample_delay_in_ns) &&
+ (data_setup_in_cycles < nfc->max_data_setup_cycles)) {
+
+ data_setup_in_cycles++;
+ ideal_sample_delay_in_ns -= clock_period_in_ns;
+
+ if (ideal_sample_delay_in_ns < 0)
+ ideal_sample_delay_in_ns = 0;
+
+ }
+
+ /*
+ * Compute the sample delay factor that corresponds most closely
+ * to the ideal sample delay. If the result is too large for the
+ * NFC, use the maximum value.
+ *
+ * Notice that we use the ns_to_cycles function to compute the
+ * sample delay factor. We do this because the form of the
+ * computation is the same as that for calculating cycles.
+ */
+ sample_delay_factor =
+ ns_to_cycles(
+ ideal_sample_delay_in_ns << dll_delay_shift,
+ clock_period_in_ns, 0);
+
+ if (sample_delay_factor > nfc->max_sample_delay_factor)
+ sample_delay_factor = nfc->max_sample_delay_factor;
+
+ /* Skip to the part where we return our results. */
+ goto return_results;
+ }
+
+ /*
+ * If control arrives here, we have more detailed timing information,
+ * so we can use a better algorithm.
+ */
+
+ /*
+ * Fold the read setup time required by the NFC into the maximum
+ * propagation delay.
+ */
+ max_prop_delay_in_ns += nfc->internal_data_setup_in_ns;
+
+ /*
+ * Earlier, we computed the number of clock cycles required to satisfy
+ * the data setup time. Now, we need to know the actual nanoseconds.
+ */
+ data_setup_in_ns = clock_period_in_ns * data_setup_in_cycles;
+
+ /*
+ * Compute tEYE, the width of the data eye when reading from the NAND
+ * Flash. The eye width is fundamentally determined by the data setup
+ * time, perturbed by propagation delays and some characteristics of the
+ * NAND Flash device.
+ *
+ * start of the eye = max_prop_delay + tREA
+ * end of the eye = min_prop_delay + tRHOH + data_setup
+ */
+ tEYE = (int)min_prop_delay_in_ns + (int)target.tRHOH_in_ns +
+ (int)data_setup_in_ns;
+
+ tEYE -= (int)max_prop_delay_in_ns + (int)target.tREA_in_ns;
+
+ /*
+ * The eye must be open. If it's not, we can try to open it by
+ * increasing its main forcer, the data setup time.
+ *
+ * In each iteration of the following loop, we increase the data setup
+ * time by a single clock cycle. We do this until either the eye is
+ * open or we run into NFC limits.
+ */
+ while ((tEYE <= 0) &&
+ (data_setup_in_cycles < nfc->max_data_setup_cycles)) {
+ /* Give a cycle to data setup. */
+ data_setup_in_cycles++;
+ /* Synchronize the data setup time with the cycles. */
+ data_setup_in_ns += clock_period_in_ns;
+ /* Adjust tEYE accordingly. */
+ tEYE += clock_period_in_ns;
+ }
+
+ /*
+ * When control arrives here, the eye is open. The ideal time to sample
+ * the data is in the center of the eye:
+ *
+ * end of the eye + start of the eye
+ * --------------------------------- - data_setup
+ * 2
+ *
+ * After some algebra, this simplifies to the code immediately below.
+ */
+ ideal_sample_delay_in_ns =
+ ((int)max_prop_delay_in_ns +
+ (int)target.tREA_in_ns +
+ (int)min_prop_delay_in_ns +
+ (int)target.tRHOH_in_ns -
+ (int)data_setup_in_ns) >> 1;
+
+ /*
+ * The following figure illustrates some aspects of a NAND Flash read:
+ *
+ *
+ * __ _____________________________________
+ * RDN \_________________/
+ *
+ * <---- tEYE ----->
+ * /-----------------\
+ * Read Data ----------------------------< >---------
+ * \-----------------/
+ * ^ ^ ^ ^
+ * | | | |
+ * |<--Data Setup -->|<--Delay Time -->| |
+ * | | | |
+ * | | |
+ * | |<-- Quantized Delay Time -->|
+ * | | |
+ *
+ *
+ * We have some issues we must now address:
+ *
+ * (1) The *ideal* sample delay time must not be negative. If it is, we
+ * jam it to zero.
+ *
+ * (2) The *ideal* sample delay time must not be greater than that
+ * allowed by the NFC. If it is, we can increase the data setup
+ * time, which will reduce the delay between the end of the data
+ * setup and the center of the eye. It will also make the eye
+ * larger, which might help with the next issue...
+ *
+ * (3) The *quantized* sample delay time must not fall either before the
+ * eye opens or after it closes (the latter is the problem
+ * illustrated in the above figure).
+ */
+
+ /* Jam a negative ideal sample delay to zero. */
+ if (ideal_sample_delay_in_ns < 0)
+ ideal_sample_delay_in_ns = 0;
+
+ /*
+ * Extend the data setup as needed to reduce the ideal sample delay
+ * below the maximum permitted by the NFC.
+ */
+ while ((ideal_sample_delay_in_ns > max_sample_delay_in_ns) &&
+ (data_setup_in_cycles < nfc->max_data_setup_cycles)) {
+
+ /* Give a cycle to data setup. */
+ data_setup_in_cycles++;
+ /* Synchronize the data setup time with the cycles. */
+ data_setup_in_ns += clock_period_in_ns;
+ /* Adjust tEYE accordingly. */
+ tEYE += clock_period_in_ns;
+
+ /*
+ * Decrease the ideal sample delay by one half cycle, to keep it
+ * in the middle of the eye.
+ */
+ ideal_sample_delay_in_ns -= (clock_period_in_ns >> 1);
+
+ /* Jam a negative ideal sample delay to zero. */
+ if (ideal_sample_delay_in_ns < 0)
+ ideal_sample_delay_in_ns = 0;
+ }
+
+ /*
+ * Compute the sample delay factor that corresponds to the ideal sample
+ * delay. If the result is too large, then use the maximum allowed
+ * value.
+ *
+ * Notice that we use the ns_to_cycles function to compute the sample
+ * delay factor. We do this because the form of the computation is the
+ * same as that for calculating cycles.
+ */
+ sample_delay_factor =
+ ns_to_cycles(ideal_sample_delay_in_ns << dll_delay_shift,
+ clock_period_in_ns, 0);
+
+ if (sample_delay_factor > nfc->max_sample_delay_factor)
+ sample_delay_factor = nfc->max_sample_delay_factor;
+
+ /*
+ * These macros conveniently encapsulate a computation we'll use to
+ * continuously evaluate whether or not the data sample delay is inside
+ * the eye.
+ */
+ #define IDEAL_DELAY ((int) ideal_sample_delay_in_ns)
+
+ #define QUANTIZED_DELAY \
+ ((int) ((sample_delay_factor * clock_period_in_ns) >> \
+ dll_delay_shift))
+
+ #define DELAY_ERROR (abs(QUANTIZED_DELAY - IDEAL_DELAY))
+
+ #define SAMPLE_IS_NOT_WITHIN_THE_EYE (DELAY_ERROR > (tEYE >> 1))
+
+ /*
+ * While the quantized sample time falls outside the eye, reduce the
+ * sample delay or extend the data setup to move the sampling point back
+ * toward the eye. Do not allow the number of data setup cycles to
+ * exceed the maximum allowed by the NFC.
+ */
+ while (SAMPLE_IS_NOT_WITHIN_THE_EYE &&
+ (data_setup_in_cycles < nfc->max_data_setup_cycles)) {
+ /*
+ * If control arrives here, the quantized sample delay falls
+ * outside the eye. Check if it's before the eye opens, or after
+ * the eye closes.
+ */
+ if (QUANTIZED_DELAY > IDEAL_DELAY) {
+ /*
+ * If control arrives here, the quantized sample delay
+ * falls after the eye closes. Decrease the quantized
+ * delay time and then go back to re-evaluate.
+ */
+ if (sample_delay_factor != 0)
+ sample_delay_factor--;
+ continue;
+ }
+
+ /*
+ * If control arrives here, the quantized sample delay falls
+ * before the eye opens. Shift the sample point by increasing
+ * data setup time. This will also make the eye larger.
+ */
+
+ /* Give a cycle to data setup. */
+ data_setup_in_cycles++;
+ /* Synchronize the data setup time with the cycles. */
+ data_setup_in_ns += clock_period_in_ns;
+ /* Adjust tEYE accordingly. */
+ tEYE += clock_period_in_ns;
+
+ /*
+ * Decrease the ideal sample delay by one half cycle, to keep it
+ * in the middle of the eye.
+ */
+ ideal_sample_delay_in_ns -= (clock_period_in_ns >> 1);
+
+ /* ...and one less period for the delay time. */
+ ideal_sample_delay_in_ns -= clock_period_in_ns;
+
+ /* Jam a negative ideal sample delay to zero. */
+ if (ideal_sample_delay_in_ns < 0)
+ ideal_sample_delay_in_ns = 0;
+
+ /*
+ * We have a new ideal sample delay, so re-compute the quantized
+ * delay.
+ */
+ sample_delay_factor =
+ ns_to_cycles(
+ ideal_sample_delay_in_ns << dll_delay_shift,
+ clock_period_in_ns, 0);
+
+ if (sample_delay_factor > nfc->max_sample_delay_factor)
+ sample_delay_factor = nfc->max_sample_delay_factor;
+ }
+
+ /* Control arrives here when we're ready to return our results. */
+return_results:
+ hw->data_setup_in_cycles = data_setup_in_cycles;
+ hw->data_hold_in_cycles = data_hold_in_cycles;
+ hw->address_setup_in_cycles = address_setup_in_cycles;
+ hw->use_half_periods = dll_use_half_periods;
+ hw->sample_delay_factor = sample_delay_factor;
+
+ /* Return success. */
+ return 0;
+}
+
+/* Begin the I/O */
+void gpmi_begin(struct gpmi_nand_data *this)
+{
+ struct resources *r = &this->resources;
+ struct timing_threshod *nfc = &timing_default_threshold;
+ unsigned char *gpmi_regs = r->gpmi_regs;
+ unsigned int clock_period_in_ns;
+ uint32_t reg;
+ unsigned int dll_wait_time_in_us;
+ struct gpmi_nfc_hardware_timing hw;
+ int ret;
+
+ /* Enable the clock. */
+ ret = clk_enable(r->clock);
+ if (ret) {
+ pr_err("We failed in enable the clk\n");
+ goto err_out;
+ }
+
+ /* set ready/busy timeout */
+ writel(0x500 << BP_GPMI_TIMING1_BUSY_TIMEOUT,
+ gpmi_regs + HW_GPMI_TIMING1);
+
+ /* Get the timing information we need. */
+ nfc->clock_frequency_in_hz = clk_get_rate(r->clock);
+ clock_period_in_ns = 1000000000 / nfc->clock_frequency_in_hz;
+
+ gpmi_nfc_compute_hardware_timing(this, &hw);
+
+ /* Set up all the simple timing parameters. */
+ reg = BF_GPMI_TIMING0_ADDRESS_SETUP(hw.address_setup_in_cycles) |
+ BF_GPMI_TIMING0_DATA_HOLD(hw.data_hold_in_cycles) |
+ BF_GPMI_TIMING0_DATA_SETUP(hw.data_setup_in_cycles) ;
+
+ writel(reg, gpmi_regs + HW_GPMI_TIMING0);
+
+ /*
+ * DLL_ENABLE must be set to 0 when setting RDN_DELAY or HALF_PERIOD.
+ */
+ writel(BM_GPMI_CTRL1_DLL_ENABLE, gpmi_regs + HW_GPMI_CTRL1_CLR);
+
+ /* Clear out the DLL control fields. */
+ writel(BM_GPMI_CTRL1_RDN_DELAY, gpmi_regs + HW_GPMI_CTRL1_CLR);
+ writel(BM_GPMI_CTRL1_HALF_PERIOD, gpmi_regs + HW_GPMI_CTRL1_CLR);
+
+ /* If no sample delay is called for, return immediately. */
+ if (!hw.sample_delay_factor)
+ return;
+
+ /* Configure the HALF_PERIOD flag. */
+ if (hw.use_half_periods)
+ writel(BM_GPMI_CTRL1_HALF_PERIOD,
+ gpmi_regs + HW_GPMI_CTRL1_SET);
+
+ /* Set the delay factor. */
+ writel(BF_GPMI_CTRL1_RDN_DELAY(hw.sample_delay_factor),
+ gpmi_regs + HW_GPMI_CTRL1_SET);
+
+ /* Enable the DLL. */
+ writel(BM_GPMI_CTRL1_DLL_ENABLE, gpmi_regs + HW_GPMI_CTRL1_SET);
+
+ /*
+ * After we enable the GPMI DLL, we have to wait 64 clock cycles before
+ * we can use the GPMI.
+ *
+ * Calculate the amount of time we need to wait, in microseconds.
+ */
+ dll_wait_time_in_us = (clock_period_in_ns * 64) / 1000;
+
+ if (!dll_wait_time_in_us)
+ dll_wait_time_in_us = 1;
+
+ /* Wait for the DLL to settle. */
+ udelay(dll_wait_time_in_us);
+
+err_out:
+ return;
+}
+
+void gpmi_end(struct gpmi_nand_data *this)
+{
+ struct resources *r = &this->resources;
+ clk_disable(r->clock);
+}
+
+/* Clears a BCH interrupt. */
+void gpmi_clear_bch(struct gpmi_nand_data *this)
+{
+ struct resources *r = &this->resources;
+ writel(BM_BCH_CTRL_COMPLETE_IRQ, r->bch_regs + HW_BCH_CTRL_CLR);
+}
+
+/* Returns the Ready/Busy status of the given chip. */
+int gpmi_is_ready(struct gpmi_nand_data *this, unsigned chip)
+{
+ struct resources *r = &this->resources;
+ uint32_t mask = 0;
+ uint32_t reg = 0;
+
+ if (GPMI_IS_MX23(this)) {
+ mask = MX23_BM_GPMI_DEBUG_READY0 << chip;
+ reg = readl(r->gpmi_regs + HW_GPMI_DEBUG);
+ } else if (GPMI_IS_MX28(this)) {
+ mask = MX28_BF_GPMI_STAT_READY_BUSY(1 << chip);
+ reg = readl(r->gpmi_regs + HW_GPMI_STAT);
+ } else
+ pr_err("unknow arch.\n");
+ return reg & mask;
+}
+
+static inline void set_dma_type(struct gpmi_nand_data *this,
+ enum dma_ops_type type)
+{
+ this->last_dma_type = this->dma_type;
+ this->dma_type = type;
+}
+
+int gpmi_send_command(struct gpmi_nand_data *this)
+{
+ struct dma_chan *channel = get_dma_chan(this);
+ struct dma_async_tx_descriptor *desc;
+ struct scatterlist *sgl;
+ int chip = this->current_chip;
+ u32 pio[3];
+
+ /* [1] send out the PIO words */
+ pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__WRITE)
+ | BM_GPMI_CTRL0_WORD_LENGTH
+ | BF_GPMI_CTRL0_CS(chip, this)
+ | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
+ | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_CLE)
+ | BM_GPMI_CTRL0_ADDRESS_INCREMENT
+ | BF_GPMI_CTRL0_XFER_COUNT(this->command_length);
+ pio[1] = pio[2] = 0;
+ desc = channel->device->device_prep_slave_sg(channel,
+ (struct scatterlist *)pio,
+ ARRAY_SIZE(pio), DMA_NONE, 0);
+ if (!desc) {
+ pr_err("step 1 error\n");
+ return -1;
+ }
+
+ /* [2] send out the COMMAND + ADDRESS string stored in @buffer */
+ sgl = &this->cmd_sgl;
+
+ sg_init_one(sgl, this->cmd_buffer, this->command_length);
+ dma_map_sg(this->dev, sgl, 1, DMA_TO_DEVICE);
+ desc = channel->device->device_prep_slave_sg(channel,
+ sgl, 1, DMA_TO_DEVICE, 1);
+ if (!desc) {
+ pr_err("step 2 error\n");
+ return -1;
+ }
+
+ /* [3] submit the DMA */
+ set_dma_type(this, DMA_FOR_COMMAND);
+ return start_dma_without_bch_irq(this, desc);
+}
+
+int gpmi_send_data(struct gpmi_nand_data *this)
+{
+ struct dma_async_tx_descriptor *desc;
+ struct dma_chan *channel = get_dma_chan(this);
+ int chip = this->current_chip;
+ uint32_t command_mode;
+ uint32_t address;
+ u32 pio[2];
+
+ /* [1] PIO */
+ command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WRITE;
+ address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
+
+ pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
+ | BM_GPMI_CTRL0_WORD_LENGTH
+ | BF_GPMI_CTRL0_CS(chip, this)
+ | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
+ | BF_GPMI_CTRL0_ADDRESS(address)
+ | BF_GPMI_CTRL0_XFER_COUNT(this->upper_len);
+ pio[1] = 0;
+ desc = channel->device->device_prep_slave_sg(channel,
+ (struct scatterlist *)pio,
+ ARRAY_SIZE(pio), DMA_NONE, 0);
+ if (!desc) {
+ pr_err("step 1 error\n");
+ return -1;
+ }
+
+ /* [2] send DMA request */
+ prepare_data_dma(this, DMA_TO_DEVICE);
+ desc = channel->device->device_prep_slave_sg(channel, &this->data_sgl,
+ 1, DMA_TO_DEVICE, 1);
+ if (!desc) {
+ pr_err("step 2 error\n");
+ return -1;
+ }
+ /* [3] submit the DMA */
+ set_dma_type(this, DMA_FOR_WRITE_DATA);
+ return start_dma_without_bch_irq(this, desc);
+}
+
+int gpmi_read_data(struct gpmi_nand_data *this)
+{
+ struct dma_async_tx_descriptor *desc;
+ struct dma_chan *channel = get_dma_chan(this);
+ int chip = this->current_chip;
+ u32 pio[2];
+
+ /* [1] : send PIO */
+ pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__READ)
+ | BM_GPMI_CTRL0_WORD_LENGTH
+ | BF_GPMI_CTRL0_CS(chip, this)
+ | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
+ | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA)
+ | BF_GPMI_CTRL0_XFER_COUNT(this->upper_len);
+ pio[1] = 0;
+ desc = channel->device->device_prep_slave_sg(channel,
+ (struct scatterlist *)pio,
+ ARRAY_SIZE(pio), DMA_NONE, 0);
+ if (!desc) {
+ pr_err("step 1 error\n");
+ return -1;
+ }
+
+ /* [2] : send DMA request */
+ prepare_data_dma(this, DMA_FROM_DEVICE);
+ desc = channel->device->device_prep_slave_sg(channel, &this->data_sgl,
+ 1, DMA_FROM_DEVICE, 1);
+ if (!desc) {
+ pr_err("step 2 error\n");
+ return -1;
+ }
+
+ /* [3] : submit the DMA */
+ set_dma_type(this, DMA_FOR_READ_DATA);
+ return start_dma_without_bch_irq(this, desc);
+}
+
+int gpmi_send_page(struct gpmi_nand_data *this,
+ dma_addr_t payload, dma_addr_t auxiliary)
+{
+ struct bch_geometry *geo = &this->bch_geometry;
+ uint32_t command_mode;
+ uint32_t address;
+ uint32_t ecc_command;
+ uint32_t buffer_mask;
+ struct dma_async_tx_descriptor *desc;
+ struct dma_chan *channel = get_dma_chan(this);
+ int chip = this->current_chip;
+ u32 pio[6];
+
+ /* A DMA descriptor that does an ECC page read. */
+ command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WRITE;
+ address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
+ ecc_command = BV_GPMI_ECCCTRL_ECC_CMD__BCH_ENCODE;
+ buffer_mask = BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE |
+ BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY;
+
+ pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
+ | BM_GPMI_CTRL0_WORD_LENGTH
+ | BF_GPMI_CTRL0_CS(chip, this)
+ | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
+ | BF_GPMI_CTRL0_ADDRESS(address)
+ | BF_GPMI_CTRL0_XFER_COUNT(0);
+ pio[1] = 0;
+ pio[2] = BM_GPMI_ECCCTRL_ENABLE_ECC
+ | BF_GPMI_ECCCTRL_ECC_CMD(ecc_command)
+ | BF_GPMI_ECCCTRL_BUFFER_MASK(buffer_mask);
+ pio[3] = geo->page_size;
+ pio[4] = payload;
+ pio[5] = auxiliary;
+
+ desc = channel->device->device_prep_slave_sg(channel,
+ (struct scatterlist *)pio,
+ ARRAY_SIZE(pio), DMA_NONE, 0);
+ if (!desc) {
+ pr_err("step 2 error\n");
+ return -1;
+ }
+ set_dma_type(this, DMA_FOR_WRITE_ECC_PAGE);
+ return start_dma_with_bch_irq(this, desc);
+}
+
+int gpmi_read_page(struct gpmi_nand_data *this,
+ dma_addr_t payload, dma_addr_t auxiliary)
+{
+ struct bch_geometry *geo = &this->bch_geometry;
+ uint32_t command_mode;
+ uint32_t address;
+ uint32_t ecc_command;
+ uint32_t buffer_mask;
+ struct dma_async_tx_descriptor *desc;
+ struct dma_chan *channel = get_dma_chan(this);
+ int chip = this->current_chip;
+ u32 pio[6];
+
+ /* [1] Wait for the chip to report ready. */
+ command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY;
+ address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
+
+ pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
+ | BM_GPMI_CTRL0_WORD_LENGTH
+ | BF_GPMI_CTRL0_CS(chip, this)
+ | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
+ | BF_GPMI_CTRL0_ADDRESS(address)
+ | BF_GPMI_CTRL0_XFER_COUNT(0);
+ pio[1] = 0;
+ desc = channel->device->device_prep_slave_sg(channel,
+ (struct scatterlist *)pio, 2, DMA_NONE, 0);
+ if (!desc) {
+ pr_err("step 1 error\n");
+ return -1;
+ }
+
+ /* [2] Enable the BCH block and read. */
+ command_mode = BV_GPMI_CTRL0_COMMAND_MODE__READ;
+ address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
+ ecc_command = BV_GPMI_ECCCTRL_ECC_CMD__BCH_DECODE;
+ buffer_mask = BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE
+ | BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY;
+
+ pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
+ | BM_GPMI_CTRL0_WORD_LENGTH
+ | BF_GPMI_CTRL0_CS(chip, this)
+ | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
+ | BF_GPMI_CTRL0_ADDRESS(address)
+ | BF_GPMI_CTRL0_XFER_COUNT(geo->page_size);
+
+ pio[1] = 0;
+ pio[2] = BM_GPMI_ECCCTRL_ENABLE_ECC
+ | BF_GPMI_ECCCTRL_ECC_CMD(ecc_command)
+ | BF_GPMI_ECCCTRL_BUFFER_MASK(buffer_mask);
+ pio[3] = geo->page_size;
+ pio[4] = payload;
+ pio[5] = auxiliary;
+ desc = channel->device->device_prep_slave_sg(channel,
+ (struct scatterlist *)pio,
+ ARRAY_SIZE(pio), DMA_NONE, 1);
+ if (!desc) {
+ pr_err("step 2 error\n");
+ return -1;
+ }
+
+ /* [3] Disable the BCH block */
+ command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY;
+ address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
+
+ pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
+ | BM_GPMI_CTRL0_WORD_LENGTH
+ | BF_GPMI_CTRL0_CS(chip, this)
+ | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
+ | BF_GPMI_CTRL0_ADDRESS(address)
+ | BF_GPMI_CTRL0_XFER_COUNT(geo->page_size);
+ pio[1] = 0;
+ desc = channel->device->device_prep_slave_sg(channel,
+ (struct scatterlist *)pio, 2, DMA_NONE, 1);
+ if (!desc) {
+ pr_err("step 3 error\n");
+ return -1;
+ }
+
+ /* [4] submit the DMA */
+ set_dma_type(this, DMA_FOR_READ_ECC_PAGE);
+ return start_dma_with_bch_irq(this, desc);
+}
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
new file mode 100644
index 00000000000..071b63420f0
--- /dev/null
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
@@ -0,0 +1,1619 @@
+/*
+ * Freescale GPMI NAND Flash Driver
+ *
+ * Copyright (C) 2010-2011 Freescale Semiconductor, Inc.
+ * Copyright (C) 2008 Embedded Alley Solutions, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/mtd/gpmi-nand.h>
+#include <linux/mtd/partitions.h>
+
+#include "gpmi-nand.h"
+
+/* add our owner bbt descriptor */
+static uint8_t scan_ff_pattern[] = { 0xff };
+static struct nand_bbt_descr gpmi_bbt_descr = {
+ .options = 0,
+ .offs = 0,
+ .len = 1,
+ .pattern = scan_ff_pattern
+};
+
+/* We will use all the (page + OOB). */
+static struct nand_ecclayout gpmi_hw_ecclayout = {
+ .eccbytes = 0,
+ .eccpos = { 0, },
+ .oobfree = { {.offset = 0, .length = 0} }
+};
+
+static irqreturn_t bch_irq(int irq, void *cookie)
+{
+ struct gpmi_nand_data *this = cookie;
+
+ gpmi_clear_bch(this);
+ complete(&this->bch_done);
+ return IRQ_HANDLED;
+}
+
+/*
+ * Calculate the ECC strength by hand:
+ * E : The ECC strength.
+ * G : the length of Galois Field.
+ * N : The chunk count of per page.
+ * O : the oobsize of the NAND chip.
+ * M : the metasize of per page.
+ *
+ * The formula is :
+ * E * G * N
+ * ------------ <= (O - M)
+ * 8
+ *
+ * So, we get E by:
+ * (O - M) * 8
+ * E <= -------------
+ * G * N
+ */
+static inline int get_ecc_strength(struct gpmi_nand_data *this)
+{
+ struct bch_geometry *geo = &this->bch_geometry;
+ struct mtd_info *mtd = &this->mtd;
+ int ecc_strength;
+
+ ecc_strength = ((mtd->oobsize - geo->metadata_size) * 8)
+ / (geo->gf_len * geo->ecc_chunk_count);
+
+ /* We need the minor even number. */
+ return round_down(ecc_strength, 2);
+}
+
+int common_nfc_set_geometry(struct gpmi_nand_data *this)
+{
+ struct bch_geometry *geo = &this->bch_geometry;
+ struct mtd_info *mtd = &this->mtd;
+ unsigned int metadata_size;
+ unsigned int status_size;
+ unsigned int block_mark_bit_offset;
+
+ /*
+ * The size of the metadata can be changed, though we set it to 10
+ * bytes now. But it can't be too large, because we have to save
+ * enough space for BCH.
+ */
+ geo->metadata_size = 10;
+
+ /* The default for the length of Galois Field. */
+ geo->gf_len = 13;
+
+ /* The default for chunk size. There is no oobsize greater then 512. */
+ geo->ecc_chunk_size = 512;
+ while (geo->ecc_chunk_size < mtd->oobsize)
+ geo->ecc_chunk_size *= 2; /* keep C >= O */
+
+ geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunk_size;
+
+ /* We use the same ECC strength for all chunks. */
+ geo->ecc_strength = get_ecc_strength(this);
+ if (!geo->ecc_strength) {
+ pr_err("We get a wrong ECC strength.\n");
+ return -EINVAL;
+ }
+
+ geo->page_size = mtd->writesize + mtd->oobsize;
+ geo->payload_size = mtd->writesize;
+
+ /*
+ * The auxiliary buffer contains the metadata and the ECC status. The
+ * metadata is padded to the nearest 32-bit boundary. The ECC status
+ * contains one byte for every ECC chunk, and is also padded to the
+ * nearest 32-bit boundary.
+ */
+ metadata_size = ALIGN(geo->metadata_size, 4);
+ status_size = ALIGN(geo->ecc_chunk_count, 4);
+
+ geo->auxiliary_size = metadata_size + status_size;
+ geo->auxiliary_status_offset = metadata_size;
+
+ if (!this->swap_block_mark)
+ return 0;
+
+ /*
+ * We need to compute the byte and bit offsets of
+ * the physical block mark within the ECC-based view of the page.
+ *
+ * NAND chip with 2K page shows below:
+ * (Block Mark)
+ * | |
+ * | D |
+ * |<---->|
+ * V V
+ * +---+----------+-+----------+-+----------+-+----------+-+
+ * | M | data |E| data |E| data |E| data |E|
+ * +---+----------+-+----------+-+----------+-+----------+-+
+ *
+ * The position of block mark moves forward in the ECC-based view
+ * of page, and the delta is:
+ *
+ * E * G * (N - 1)
+ * D = (---------------- + M)
+ * 8
+ *
+ * With the formula to compute the ECC strength, and the condition
+ * : C >= O (C is the ecc chunk size)
+ *
+ * It's easy to deduce to the following result:
+ *
+ * E * G (O - M) C - M C - M
+ * ----------- <= ------- <= -------- < ---------
+ * 8 N N (N - 1)
+ *
+ * So, we get:
+ *
+ * E * G * (N - 1)
+ * D = (---------------- + M) < C
+ * 8
+ *
+ * The above inequality means the position of block mark
+ * within the ECC-based view of the page is still in the data chunk,
+ * and it's NOT in the ECC bits of the chunk.
+ *
+ * Use the following to compute the bit position of the
+ * physical block mark within the ECC-based view of the page:
+ * (page_size - D) * 8
+ *
+ * --Huang Shijie
+ */
+ block_mark_bit_offset = mtd->writesize * 8 -
+ (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1)
+ + geo->metadata_size * 8);
+
+ geo->block_mark_byte_offset = block_mark_bit_offset / 8;
+ geo->block_mark_bit_offset = block_mark_bit_offset % 8;
+ return 0;
+}
+
+struct dma_chan *get_dma_chan(struct gpmi_nand_data *this)
+{
+ int chipnr = this->current_chip;
+
+ return this->dma_chans[chipnr];
+}
+
+/* Can we use the upper's buffer directly for DMA? */
+void prepare_data_dma(struct gpmi_nand_data *this, enum dma_data_direction dr)
+{
+ struct scatterlist *sgl = &this->data_sgl;
+ int ret;
+
+ this->direct_dma_map_ok = true;
+
+ /* first try to map the upper buffer directly */
+ sg_init_one(sgl, this->upper_buf, this->upper_len);
+ ret = dma_map_sg(this->dev, sgl, 1, dr);
+ if (ret == 0) {
+ /* We have to use our own DMA buffer. */
+ sg_init_one(sgl, this->data_buffer_dma, PAGE_SIZE);
+
+ if (dr == DMA_TO_DEVICE)
+ memcpy(this->data_buffer_dma, this->upper_buf,
+ this->upper_len);
+
+ ret = dma_map_sg(this->dev, sgl, 1, dr);
+ if (ret == 0)
+ pr_err("map failed.\n");
+
+ this->direct_dma_map_ok = false;
+ }
+}
+
+/* This will be called after the DMA operation is finished. */
+static void dma_irq_callback(void *param)
+{
+ struct gpmi_nand_data *this = param;
+ struct completion *dma_c = &this->dma_done;
+
+ complete(dma_c);
+
+ switch (this->dma_type) {
+ case DMA_FOR_COMMAND:
+ dma_unmap_sg(this->dev, &this->cmd_sgl, 1, DMA_TO_DEVICE);
+ break;
+
+ case DMA_FOR_READ_DATA:
+ dma_unmap_sg(this->dev, &this->data_sgl, 1, DMA_FROM_DEVICE);
+ if (this->direct_dma_map_ok == false)
+ memcpy(this->upper_buf, this->data_buffer_dma,
+ this->upper_len);
+ break;
+
+ case DMA_FOR_WRITE_DATA:
+ dma_unmap_sg(this->dev, &this->data_sgl, 1, DMA_TO_DEVICE);
+ break;
+
+ case DMA_FOR_READ_ECC_PAGE:
+ case DMA_FOR_WRITE_ECC_PAGE:
+ /* We have to wait the BCH interrupt to finish. */
+ break;
+
+ default:
+ pr_err("in wrong DMA operation.\n");
+ }
+}
+
+int start_dma_without_bch_irq(struct gpmi_nand_data *this,
+ struct dma_async_tx_descriptor *desc)
+{
+ struct completion *dma_c = &this->dma_done;
+ int err;
+
+ init_completion(dma_c);
+
+ desc->callback = dma_irq_callback;
+ desc->callback_param = this;
+ dmaengine_submit(desc);
+
+ /* Wait for the interrupt from the DMA block. */
+ err = wait_for_completion_timeout(dma_c, msecs_to_jiffies(1000));
+ if (!err) {
+ pr_err("DMA timeout, last DMA :%d\n", this->last_dma_type);
+ gpmi_dump_info(this);
+ return -ETIMEDOUT;
+ }
+ return 0;
+}
+
+/*
+ * This function is used in BCH reading or BCH writing pages.
+ * It will wait for the BCH interrupt as long as ONE second.
+ * Actually, we must wait for two interrupts :
+ * [1] firstly the DMA interrupt and
+ * [2] secondly the BCH interrupt.
+ */
+int start_dma_with_bch_irq(struct gpmi_nand_data *this,
+ struct dma_async_tx_descriptor *desc)
+{
+ struct completion *bch_c = &this->bch_done;
+ int err;
+
+ /* Prepare to receive an interrupt from the BCH block. */
+ init_completion(bch_c);
+
+ /* start the DMA */
+ start_dma_without_bch_irq(this, desc);
+
+ /* Wait for the interrupt from the BCH block. */
+ err = wait_for_completion_timeout(bch_c, msecs_to_jiffies(1000));
+ if (!err) {
+ pr_err("BCH timeout, last DMA :%d\n", this->last_dma_type);
+ gpmi_dump_info(this);
+ return -ETIMEDOUT;
+ }
+ return 0;
+}
+
+static int __devinit
+acquire_register_block(struct gpmi_nand_data *this, const char *res_name)
+{
+ struct platform_device *pdev = this->pdev;
+ struct resources *res = &this->resources;
+ struct resource *r;
+ void *p;
+
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, res_name);
+ if (!r) {
+ pr_err("Can't get resource for %s\n", res_name);
+ return -ENXIO;
+ }
+
+ p = ioremap(r->start, resource_size(r));
+ if (!p) {
+ pr_err("Can't remap %s\n", res_name);
+ return -ENOMEM;
+ }
+
+ if (!strcmp(res_name, GPMI_NAND_GPMI_REGS_ADDR_RES_NAME))
+ res->gpmi_regs = p;
+ else if (!strcmp(res_name, GPMI_NAND_BCH_REGS_ADDR_RES_NAME))
+ res->bch_regs = p;
+ else
+ pr_err("unknown resource name : %s\n", res_name);
+
+ return 0;
+}
+
+static void release_register_block(struct gpmi_nand_data *this)
+{
+ struct resources *res = &this->resources;
+ if (res->gpmi_regs)
+ iounmap(res->gpmi_regs);
+ if (res->bch_regs)
+ iounmap(res->bch_regs);
+ res->gpmi_regs = NULL;
+ res->bch_regs = NULL;
+}
+
+static int __devinit
+acquire_bch_irq(struct gpmi_nand_data *this, irq_handler_t irq_h)
+{
+ struct platform_device *pdev = this->pdev;
+ struct resources *res = &this->resources;
+ const char *res_name = GPMI_NAND_BCH_INTERRUPT_RES_NAME;
+ struct resource *r;
+ int err;
+
+ r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, res_name);
+ if (!r) {
+ pr_err("Can't get resource for %s\n", res_name);
+ return -ENXIO;
+ }
+
+ err = request_irq(r->start, irq_h, 0, res_name, this);
+ if (err) {
+ pr_err("Can't own %s\n", res_name);
+ return err;
+ }
+
+ res->bch_low_interrupt = r->start;
+ res->bch_high_interrupt = r->end;
+ return 0;
+}
+
+static void release_bch_irq(struct gpmi_nand_data *this)
+{
+ struct resources *res = &this->resources;
+ int i = res->bch_low_interrupt;
+
+ for (; i <= res->bch_high_interrupt; i++)
+ free_irq(i, this);
+}
+
+static bool gpmi_dma_filter(struct dma_chan *chan, void *param)
+{
+ struct gpmi_nand_data *this = param;
+ struct resource *r = this->private;
+
+ if (!mxs_dma_is_apbh(chan))
+ return false;
+ /*
+ * only catch the GPMI dma channels :
+ * for mx23 : MX23_DMA_GPMI0 ~ MX23_DMA_GPMI3
+ * (These four channels share the same IRQ!)
+ *
+ * for mx28 : MX28_DMA_GPMI0 ~ MX28_DMA_GPMI7
+ * (These eight channels share the same IRQ!)
+ */
+ if (r->start <= chan->chan_id && chan->chan_id <= r->end) {
+ chan->private = &this->dma_data;
+ return true;
+ }
+ return false;
+}
+
+static void release_dma_channels(struct gpmi_nand_data *this)
+{
+ unsigned int i;
+ for (i = 0; i < DMA_CHANS; i++)
+ if (this->dma_chans[i]) {
+ dma_release_channel(this->dma_chans[i]);
+ this->dma_chans[i] = NULL;
+ }
+}
+
+static int __devinit acquire_dma_channels(struct gpmi_nand_data *this)
+{
+ struct platform_device *pdev = this->pdev;
+ struct gpmi_nand_platform_data *pdata = this->pdata;
+ struct resources *res = &this->resources;
+ struct resource *r, *r_dma;
+ unsigned int i;
+
+ r = platform_get_resource_byname(pdev, IORESOURCE_DMA,
+ GPMI_NAND_DMA_CHANNELS_RES_NAME);
+ r_dma = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+ GPMI_NAND_DMA_INTERRUPT_RES_NAME);
+ if (!r || !r_dma) {
+ pr_err("Can't get resource for DMA\n");
+ return -ENXIO;
+ }
+
+ /* used in gpmi_dma_filter() */
+ this->private = r;
+
+ for (i = r->start; i <= r->end; i++) {
+ struct dma_chan *dma_chan;
+ dma_cap_mask_t mask;
+
+ if (i - r->start >= pdata->max_chip_count)
+ break;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ /* get the DMA interrupt */
+ if (r_dma->start == r_dma->end) {
+ /* only register the first. */
+ if (i == r->start)
+ this->dma_data.chan_irq = r_dma->start;
+ else
+ this->dma_data.chan_irq = NO_IRQ;
+ } else
+ this->dma_data.chan_irq = r_dma->start + (i - r->start);
+
+ dma_chan = dma_request_channel(mask, gpmi_dma_filter, this);
+ if (!dma_chan)
+ goto acquire_err;
+
+ /* fill the first empty item */
+ this->dma_chans[i - r->start] = dma_chan;
+ }
+
+ res->dma_low_channel = r->start;
+ res->dma_high_channel = i;
+ return 0;
+
+acquire_err:
+ pr_err("Can't acquire DMA channel %u\n", i);
+ release_dma_channels(this);
+ return -EINVAL;
+}
+
+static int __devinit acquire_resources(struct gpmi_nand_data *this)
+{
+ struct resources *res = &this->resources;
+ int ret;
+
+ ret = acquire_register_block(this, GPMI_NAND_GPMI_REGS_ADDR_RES_NAME);
+ if (ret)
+ goto exit_regs;
+
+ ret = acquire_register_block(this, GPMI_NAND_BCH_REGS_ADDR_RES_NAME);
+ if (ret)
+ goto exit_regs;
+
+ ret = acquire_bch_irq(this, bch_irq);
+ if (ret)
+ goto exit_regs;
+
+ ret = acquire_dma_channels(this);
+ if (ret)
+ goto exit_dma_channels;
+
+ res->clock = clk_get(&this->pdev->dev, NULL);
+ if (IS_ERR(res->clock)) {
+ pr_err("can not get the clock\n");
+ ret = -ENOENT;
+ goto exit_clock;
+ }
+ return 0;
+
+exit_clock:
+ release_dma_channels(this);
+exit_dma_channels:
+ release_bch_irq(this);
+exit_regs:
+ release_register_block(this);
+ return ret;
+}
+
+static void release_resources(struct gpmi_nand_data *this)
+{
+ struct resources *r = &this->resources;
+
+ clk_put(r->clock);
+ release_register_block(this);
+ release_bch_irq(this);
+ release_dma_channels(this);
+}
+
+static int __devinit init_hardware(struct gpmi_nand_data *this)
+{
+ int ret;
+
+ /*
+ * This structure contains the "safe" GPMI timing that should succeed
+ * with any NAND Flash device
+ * (although, with less-than-optimal performance).
+ */
+ struct nand_timing safe_timing = {
+ .data_setup_in_ns = 80,
+ .data_hold_in_ns = 60,
+ .address_setup_in_ns = 25,
+ .gpmi_sample_delay_in_ns = 6,
+ .tREA_in_ns = -1,
+ .tRLOH_in_ns = -1,
+ .tRHOH_in_ns = -1,
+ };
+
+ /* Initialize the hardwares. */
+ ret = gpmi_init(this);
+ if (ret)
+ return ret;
+
+ this->timing = safe_timing;
+ return 0;
+}
+
+static int read_page_prepare(struct gpmi_nand_data *this,
+ void *destination, unsigned length,
+ void *alt_virt, dma_addr_t alt_phys, unsigned alt_size,
+ void **use_virt, dma_addr_t *use_phys)
+{
+ struct device *dev = this->dev;
+
+ if (virt_addr_valid(destination)) {
+ dma_addr_t dest_phys;
+
+ dest_phys = dma_map_single(dev, destination,
+ length, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, dest_phys)) {
+ if (alt_size < length) {
+ pr_err("Alternate buffer is too small\n");
+ return -ENOMEM;
+ }
+ goto map_failed;
+ }
+ *use_virt = destination;
+ *use_phys = dest_phys;
+ this->direct_dma_map_ok = true;
+ return 0;
+ }
+
+map_failed:
+ *use_virt = alt_virt;
+ *use_phys = alt_phys;
+ this->direct_dma_map_ok = false;
+ return 0;
+}
+
+static inline void read_page_end(struct gpmi_nand_data *this,
+ void *destination, unsigned length,
+ void *alt_virt, dma_addr_t alt_phys, unsigned alt_size,
+ void *used_virt, dma_addr_t used_phys)
+{
+ if (this->direct_dma_map_ok)
+ dma_unmap_single(this->dev, used_phys, length, DMA_FROM_DEVICE);
+}
+
+static inline void read_page_swap_end(struct gpmi_nand_data *this,
+ void *destination, unsigned length,
+ void *alt_virt, dma_addr_t alt_phys, unsigned alt_size,
+ void *used_virt, dma_addr_t used_phys)
+{
+ if (!this->direct_dma_map_ok)
+ memcpy(destination, alt_virt, length);
+}
+
+static int send_page_prepare(struct gpmi_nand_data *this,
+ const void *source, unsigned length,
+ void *alt_virt, dma_addr_t alt_phys, unsigned alt_size,
+ const void **use_virt, dma_addr_t *use_phys)
+{
+ struct device *dev = this->dev;
+
+ if (virt_addr_valid(source)) {
+ dma_addr_t source_phys;
+
+ source_phys = dma_map_single(dev, (void *)source, length,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, source_phys)) {
+ if (alt_size < length) {
+ pr_err("Alternate buffer is too small\n");
+ return -ENOMEM;
+ }
+ goto map_failed;
+ }
+ *use_virt = source;
+ *use_phys = source_phys;
+ return 0;
+ }
+map_failed:
+ /*
+ * Copy the content of the source buffer into the alternate
+ * buffer and set up the return values accordingly.
+ */
+ memcpy(alt_virt, source, length);
+
+ *use_virt = alt_virt;
+ *use_phys = alt_phys;
+ return 0;
+}
+
+static void send_page_end(struct gpmi_nand_data *this,
+ const void *source, unsigned length,
+ void *alt_virt, dma_addr_t alt_phys, unsigned alt_size,
+ const void *used_virt, dma_addr_t used_phys)
+{
+ struct device *dev = this->dev;
+ if (used_virt == source)
+ dma_unmap_single(dev, used_phys, length, DMA_TO_DEVICE);
+}
+
+static void gpmi_free_dma_buffer(struct gpmi_nand_data *this)
+{
+ struct device *dev = this->dev;
+
+ if (this->page_buffer_virt && virt_addr_valid(this->page_buffer_virt))
+ dma_free_coherent(dev, this->page_buffer_size,
+ this->page_buffer_virt,
+ this->page_buffer_phys);
+ kfree(this->cmd_buffer);
+ kfree(this->data_buffer_dma);
+
+ this->cmd_buffer = NULL;
+ this->data_buffer_dma = NULL;
+ this->page_buffer_virt = NULL;
+ this->page_buffer_size = 0;
+}
+
+/* Allocate the DMA buffers */
+static int gpmi_alloc_dma_buffer(struct gpmi_nand_data *this)
+{
+ struct bch_geometry *geo = &this->bch_geometry;
+ struct device *dev = this->dev;
+
+ /* [1] Allocate a command buffer. PAGE_SIZE is enough. */
+ this->cmd_buffer = kzalloc(PAGE_SIZE, GFP_DMA);
+ if (this->cmd_buffer == NULL)
+ goto error_alloc;
+
+ /* [2] Allocate a read/write data buffer. PAGE_SIZE is enough. */
+ this->data_buffer_dma = kzalloc(PAGE_SIZE, GFP_DMA);
+ if (this->data_buffer_dma == NULL)
+ goto error_alloc;
+
+ /*
+ * [3] Allocate the page buffer.
+ *
+ * Both the payload buffer and the auxiliary buffer must appear on
+ * 32-bit boundaries. We presume the size of the payload buffer is a
+ * power of two and is much larger than four, which guarantees the
+ * auxiliary buffer will appear on a 32-bit boundary.
+ */
+ this->page_buffer_size = geo->payload_size + geo->auxiliary_size;
+ this->page_buffer_virt = dma_alloc_coherent(dev, this->page_buffer_size,
+ &this->page_buffer_phys, GFP_DMA);
+ if (!this->page_buffer_virt)
+ goto error_alloc;
+
+
+ /* Slice up the page buffer. */
+ this->payload_virt = this->page_buffer_virt;
+ this->payload_phys = this->page_buffer_phys;
+ this->auxiliary_virt = this->payload_virt + geo->payload_size;
+ this->auxiliary_phys = this->payload_phys + geo->payload_size;
+ return 0;
+
+error_alloc:
+ gpmi_free_dma_buffer(this);
+ pr_err("allocate DMA buffer ret!!\n");
+ return -ENOMEM;
+}
+
+static void gpmi_cmd_ctrl(struct mtd_info *mtd, int data, unsigned int ctrl)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct gpmi_nand_data *this = chip->priv;
+ int ret;
+
+ /*
+ * Every operation begins with a command byte and a series of zero or
+ * more address bytes. These are distinguished by either the Address
+ * Latch Enable (ALE) or Command Latch Enable (CLE) signals being
+ * asserted. When MTD is ready to execute the command, it will deassert
+ * both latch enables.
+ *
+ * Rather than run a separate DMA operation for every single byte, we
+ * queue them up and run a single DMA operation for the entire series
+ * of command and data bytes. NAND_CMD_NONE means the END of the queue.
+ */
+ if ((ctrl & (NAND_ALE | NAND_CLE))) {
+ if (data != NAND_CMD_NONE)
+ this->cmd_buffer[this->command_length++] = data;
+ return;
+ }
+
+ if (!this->command_length)
+ return;
+
+ ret = gpmi_send_command(this);
+ if (ret)
+ pr_err("Chip: %u, Error %d\n", this->current_chip, ret);
+
+ this->command_length = 0;
+}
+
+static int gpmi_dev_ready(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct gpmi_nand_data *this = chip->priv;
+
+ return gpmi_is_ready(this, this->current_chip);
+}
+
+static void gpmi_select_chip(struct mtd_info *mtd, int chipnr)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct gpmi_nand_data *this = chip->priv;
+
+ if ((this->current_chip < 0) && (chipnr >= 0))
+ gpmi_begin(this);
+ else if ((this->current_chip >= 0) && (chipnr < 0))
+ gpmi_end(this);
+
+ this->current_chip = chipnr;
+}
+
+static void gpmi_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct gpmi_nand_data *this = chip->priv;
+
+ pr_debug("len is %d\n", len);
+ this->upper_buf = buf;
+ this->upper_len = len;
+
+ gpmi_read_data(this);
+}
+
+static void gpmi_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct gpmi_nand_data *this = chip->priv;
+
+ pr_debug("len is %d\n", len);
+ this->upper_buf = (uint8_t *)buf;
+ this->upper_len = len;
+
+ gpmi_send_data(this);
+}
+
+static uint8_t gpmi_read_byte(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct gpmi_nand_data *this = chip->priv;
+ uint8_t *buf = this->data_buffer_dma;
+
+ gpmi_read_buf(mtd, buf, 1);
+ return buf[0];
+}
+
+/*
+ * Handles block mark swapping.
+ * It can be called in swapping the block mark, or swapping it back,
+ * because the the operations are the same.
+ */
+static void block_mark_swapping(struct gpmi_nand_data *this,
+ void *payload, void *auxiliary)
+{
+ struct bch_geometry *nfc_geo = &this->bch_geometry;
+ unsigned char *p;
+ unsigned char *a;
+ unsigned int bit;
+ unsigned char mask;
+ unsigned char from_data;
+ unsigned char from_oob;
+
+ if (!this->swap_block_mark)
+ return;
+
+ /*
+ * If control arrives here, we're swapping. Make some convenience
+ * variables.
+ */
+ bit = nfc_geo->block_mark_bit_offset;
+ p = payload + nfc_geo->block_mark_byte_offset;
+ a = auxiliary;
+
+ /*
+ * Get the byte from the data area that overlays the block mark. Since
+ * the ECC engine applies its own view to the bits in the page, the
+ * physical block mark won't (in general) appear on a byte boundary in
+ * the data.
+ */
+ from_data = (p[0] >> bit) | (p[1] << (8 - bit));
+
+ /* Get the byte from the OOB. */
+ from_oob = a[0];
+
+ /* Swap them. */
+ a[0] = from_data;
+
+ mask = (0x1 << bit) - 1;
+ p[0] = (p[0] & mask) | (from_oob << bit);
+
+ mask = ~0 << bit;
+ p[1] = (p[1] & mask) | (from_oob >> (8 - bit));
+}
+
+static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int page)
+{
+ struct gpmi_nand_data *this = chip->priv;
+ struct bch_geometry *nfc_geo = &this->bch_geometry;
+ void *payload_virt;
+ dma_addr_t payload_phys;
+ void *auxiliary_virt;
+ dma_addr_t auxiliary_phys;
+ unsigned int i;
+ unsigned char *status;
+ unsigned int failed;
+ unsigned int corrected;
+ int ret;
+
+ pr_debug("page number is : %d\n", page);
+ ret = read_page_prepare(this, buf, mtd->writesize,
+ this->payload_virt, this->payload_phys,
+ nfc_geo->payload_size,
+ &payload_virt, &payload_phys);
+ if (ret) {
+ pr_err("Inadequate DMA buffer\n");
+ ret = -ENOMEM;
+ return ret;
+ }
+ auxiliary_virt = this->auxiliary_virt;
+ auxiliary_phys = this->auxiliary_phys;
+
+ /* go! */
+ ret = gpmi_read_page(this, payload_phys, auxiliary_phys);
+ read_page_end(this, buf, mtd->writesize,
+ this->payload_virt, this->payload_phys,
+ nfc_geo->payload_size,
+ payload_virt, payload_phys);
+ if (ret) {
+ pr_err("Error in ECC-based read: %d\n", ret);
+ goto exit_nfc;
+ }
+
+ /* handle the block mark swapping */
+ block_mark_swapping(this, payload_virt, auxiliary_virt);
+
+ /* Loop over status bytes, accumulating ECC status. */
+ failed = 0;
+ corrected = 0;
+ status = auxiliary_virt + nfc_geo->auxiliary_status_offset;
+
+ for (i = 0; i < nfc_geo->ecc_chunk_count; i++, status++) {
+ if ((*status == STATUS_GOOD) || (*status == STATUS_ERASED))
+ continue;
+
+ if (*status == STATUS_UNCORRECTABLE) {
+ failed++;
+ continue;
+ }
+ corrected += *status;
+ }
+
+ /*
+ * Propagate ECC status to the owning MTD only when failed or
+ * corrected times nearly reaches our ECC correction threshold.
+ */
+ if (failed || corrected >= (nfc_geo->ecc_strength - 1)) {
+ mtd->ecc_stats.failed += failed;
+ mtd->ecc_stats.corrected += corrected;
+ }
+
+ /*
+ * It's time to deliver the OOB bytes. See gpmi_ecc_read_oob() for
+ * details about our policy for delivering the OOB.
+ *
+ * We fill the caller's buffer with set bits, and then copy the block
+ * mark to th caller's buffer. Note that, if block mark swapping was
+ * necessary, it has already been done, so we can rely on the first
+ * byte of the auxiliary buffer to contain the block mark.
+ */
+ memset(chip->oob_poi, ~0, mtd->oobsize);
+ chip->oob_poi[0] = ((uint8_t *) auxiliary_virt)[0];
+
+ read_page_swap_end(this, buf, mtd->writesize,
+ this->payload_virt, this->payload_phys,
+ nfc_geo->payload_size,
+ payload_virt, payload_phys);
+exit_nfc:
+ return ret;
+}
+
+static void gpmi_ecc_write_page(struct mtd_info *mtd,
+ struct nand_chip *chip, const uint8_t *buf)
+{
+ struct gpmi_nand_data *this = chip->priv;
+ struct bch_geometry *nfc_geo = &this->bch_geometry;
+ const void *payload_virt;
+ dma_addr_t payload_phys;
+ const void *auxiliary_virt;
+ dma_addr_t auxiliary_phys;
+ int ret;
+
+ pr_debug("ecc write page.\n");
+ if (this->swap_block_mark) {
+ /*
+ * If control arrives here, we're doing block mark swapping.
+ * Since we can't modify the caller's buffers, we must copy them
+ * into our own.
+ */
+ memcpy(this->payload_virt, buf, mtd->writesize);
+ payload_virt = this->payload_virt;
+ payload_phys = this->payload_phys;
+
+ memcpy(this->auxiliary_virt, chip->oob_poi,
+ nfc_geo->auxiliary_size);
+ auxiliary_virt = this->auxiliary_virt;
+ auxiliary_phys = this->auxiliary_phys;
+
+ /* Handle block mark swapping. */
+ block_mark_swapping(this,
+ (void *) payload_virt, (void *) auxiliary_virt);
+ } else {
+ /*
+ * If control arrives here, we're not doing block mark swapping,
+ * so we can to try and use the caller's buffers.
+ */
+ ret = send_page_prepare(this,
+ buf, mtd->writesize,
+ this->payload_virt, this->payload_phys,
+ nfc_geo->payload_size,
+ &payload_virt, &payload_phys);
+ if (ret) {
+ pr_err("Inadequate payload DMA buffer\n");
+ return;
+ }
+
+ ret = send_page_prepare(this,
+ chip->oob_poi, mtd->oobsize,
+ this->auxiliary_virt, this->auxiliary_phys,
+ nfc_geo->auxiliary_size,
+ &auxiliary_virt, &auxiliary_phys);
+ if (ret) {
+ pr_err("Inadequate auxiliary DMA buffer\n");
+ goto exit_auxiliary;
+ }
+ }
+
+ /* Ask the NFC. */
+ ret = gpmi_send_page(this, payload_phys, auxiliary_phys);
+ if (ret)
+ pr_err("Error in ECC-based write: %d\n", ret);
+
+ if (!this->swap_block_mark) {
+ send_page_end(this, chip->oob_poi, mtd->oobsize,
+ this->auxiliary_virt, this->auxiliary_phys,
+ nfc_geo->auxiliary_size,
+ auxiliary_virt, auxiliary_phys);
+exit_auxiliary:
+ send_page_end(this, buf, mtd->writesize,
+ this->payload_virt, this->payload_phys,
+ nfc_geo->payload_size,
+ payload_virt, payload_phys);
+ }
+}
+
+/*
+ * There are several places in this driver where we have to handle the OOB and
+ * block marks. This is the function where things are the most complicated, so
+ * this is where we try to explain it all. All the other places refer back to
+ * here.
+ *
+ * These are the rules, in order of decreasing importance:
+ *
+ * 1) Nothing the caller does can be allowed to imperil the block mark.
+ *
+ * 2) In read operations, the first byte of the OOB we return must reflect the
+ * true state of the block mark, no matter where that block mark appears in
+ * the physical page.
+ *
+ * 3) ECC-based read operations return an OOB full of set bits (since we never
+ * allow ECC-based writes to the OOB, it doesn't matter what ECC-based reads
+ * return).
+ *
+ * 4) "Raw" read operations return a direct view of the physical bytes in the
+ * page, using the conventional definition of which bytes are data and which
+ * are OOB. This gives the caller a way to see the actual, physical bytes
+ * in the page, without the distortions applied by our ECC engine.
+ *
+ *
+ * What we do for this specific read operation depends on two questions:
+ *
+ * 1) Are we doing a "raw" read, or an ECC-based read?
+ *
+ * 2) Are we using block mark swapping or transcription?
+ *
+ * There are four cases, illustrated by the following Karnaugh map:
+ *
+ * | Raw | ECC-based |
+ * -------------+-------------------------+-------------------------+
+ * | Read the conventional | |
+ * | OOB at the end of the | |
+ * Swapping | page and return it. It | |
+ * | contains exactly what | |
+ * | we want. | Read the block mark and |
+ * -------------+-------------------------+ return it in a buffer |
+ * | Read the conventional | full of set bits. |
+ * | OOB at the end of the | |
+ * | page and also the block | |
+ * Transcribing | mark in the metadata. | |
+ * | Copy the block mark | |
+ * | into the first byte of | |
+ * | the OOB. | |
+ * -------------+-------------------------+-------------------------+
+ *
+ * Note that we break rule #4 in the Transcribing/Raw case because we're not
+ * giving an accurate view of the actual, physical bytes in the page (we're
+ * overwriting the block mark). That's OK because it's more important to follow
+ * rule #2.
+ *
+ * It turns out that knowing whether we want an "ECC-based" or "raw" read is not
+ * easy. When reading a page, for example, the NAND Flash MTD code calls our
+ * ecc.read_page or ecc.read_page_raw function. Thus, the fact that MTD wants an
+ * ECC-based or raw view of the page is implicit in which function it calls
+ * (there is a similar pair of ECC-based/raw functions for writing).
+ *
+ * Since MTD assumes the OOB is not covered by ECC, there is no pair of
+ * ECC-based/raw functions for reading or or writing the OOB. The fact that the
+ * caller wants an ECC-based or raw view of the page is not propagated down to
+ * this driver.
+ */
+static int gpmi_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
+ int page, int sndcmd)
+{
+ struct gpmi_nand_data *this = chip->priv;
+
+ pr_debug("page number is %d\n", page);
+ /* clear the OOB buffer */
+ memset(chip->oob_poi, ~0, mtd->oobsize);
+
+ /* Read out the conventional OOB. */
+ chip->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page);
+ chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ /*
+ * Now, we want to make sure the block mark is correct. In the
+ * Swapping/Raw case, we already have it. Otherwise, we need to
+ * explicitly read it.
+ */
+ if (!this->swap_block_mark) {
+ /* Read the block mark into the first byte of the OOB buffer. */
+ chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+ chip->oob_poi[0] = chip->read_byte(mtd);
+ }
+
+ /*
+ * Return true, indicating that the next call to this function must send
+ * a command.
+ */
+ return true;
+}
+
+static int
+gpmi_ecc_write_oob(struct mtd_info *mtd, struct nand_chip *chip, int page)
+{
+ /*
+ * The BCH will use all the (page + oob).
+ * Our gpmi_hw_ecclayout can only prohibit the JFFS2 to write the oob.
+ * But it can not stop some ioctls such MEMWRITEOOB which uses
+ * MTD_OPS_PLACE_OOB. So We have to implement this function to prohibit
+ * these ioctls too.
+ */
+ return -EPERM;
+}
+
+static int gpmi_block_markbad(struct mtd_info *mtd, loff_t ofs)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct gpmi_nand_data *this = chip->priv;
+ int block, ret = 0;
+ uint8_t *block_mark;
+ int column, page, status, chipnr;
+
+ /* Get block number */
+ block = (int)(ofs >> chip->bbt_erase_shift);
+ if (chip->bbt)
+ chip->bbt[block >> 2] |= 0x01 << ((block & 0x03) << 1);
+
+ /* Do we have a flash based bad block table ? */
+ if (chip->options & NAND_BBT_USE_FLASH)
+ ret = nand_update_bbt(mtd, ofs);
+ else {
+ chipnr = (int)(ofs >> chip->chip_shift);
+ chip->select_chip(mtd, chipnr);
+
+ column = this->swap_block_mark ? mtd->writesize : 0;
+
+ /* Write the block mark. */
+ block_mark = this->data_buffer_dma;
+ block_mark[0] = 0; /* bad block marker */
+
+ /* Shift to get page */
+ page = (int)(ofs >> chip->page_shift);
+
+ chip->cmdfunc(mtd, NAND_CMD_SEQIN, column, page);
+ chip->write_buf(mtd, block_mark, 1);
+ chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+
+ status = chip->waitfunc(mtd, chip);
+ if (status & NAND_STATUS_FAIL)
+ ret = -EIO;
+
+ chip->select_chip(mtd, -1);
+ }
+ if (!ret)
+ mtd->ecc_stats.badblocks++;
+
+ return ret;
+}
+
+static int __devinit nand_boot_set_geometry(struct gpmi_nand_data *this)
+{
+ struct boot_rom_geometry *geometry = &this->rom_geometry;
+
+ /*
+ * Set the boot block stride size.
+ *
+ * In principle, we should be reading this from the OTP bits, since
+ * that's where the ROM is going to get it. In fact, we don't have any
+ * way to read the OTP bits, so we go with the default and hope for the
+ * best.
+ */
+ geometry->stride_size_in_pages = 64;
+
+ /*
+ * Set the search area stride exponent.
+ *
+ * In principle, we should be reading this from the OTP bits, since
+ * that's where the ROM is going to get it. In fact, we don't have any
+ * way to read the OTP bits, so we go with the default and hope for the
+ * best.
+ */
+ geometry->search_area_stride_exponent = 2;
+ return 0;
+}
+
+static const char *fingerprint = "STMP";
+static int __devinit mx23_check_transcription_stamp(struct gpmi_nand_data *this)
+{
+ struct boot_rom_geometry *rom_geo = &this->rom_geometry;
+ struct device *dev = this->dev;
+ struct mtd_info *mtd = &this->mtd;
+ struct nand_chip *chip = &this->nand;
+ unsigned int search_area_size_in_strides;
+ unsigned int stride;
+ unsigned int page;
+ loff_t byte;
+ uint8_t *buffer = chip->buffers->databuf;
+ int saved_chip_number;
+ int found_an_ncb_fingerprint = false;
+
+ /* Compute the number of strides in a search area. */
+ search_area_size_in_strides = 1 << rom_geo->search_area_stride_exponent;
+
+ saved_chip_number = this->current_chip;
+ chip->select_chip(mtd, 0);
+
+ /*
+ * Loop through the first search area, looking for the NCB fingerprint.
+ */
+ dev_dbg(dev, "Scanning for an NCB fingerprint...\n");
+
+ for (stride = 0; stride < search_area_size_in_strides; stride++) {
+ /* Compute the page and byte addresses. */
+ page = stride * rom_geo->stride_size_in_pages;
+ byte = page * mtd->writesize;
+
+ dev_dbg(dev, "Looking for a fingerprint in page 0x%x\n", page);
+
+ /*
+ * Read the NCB fingerprint. The fingerprint is four bytes long
+ * and starts in the 12th byte of the page.
+ */
+ chip->cmdfunc(mtd, NAND_CMD_READ0, 12, page);
+ chip->read_buf(mtd, buffer, strlen(fingerprint));
+
+ /* Look for the fingerprint. */
+ if (!memcmp(buffer, fingerprint, strlen(fingerprint))) {
+ found_an_ncb_fingerprint = true;
+ break;
+ }
+
+ }
+
+ chip->select_chip(mtd, saved_chip_number);
+
+ if (found_an_ncb_fingerprint)
+ dev_dbg(dev, "\tFound a fingerprint\n");
+ else
+ dev_dbg(dev, "\tNo fingerprint found\n");
+ return found_an_ncb_fingerprint;
+}
+
+/* Writes a transcription stamp. */
+static int __devinit mx23_write_transcription_stamp(struct gpmi_nand_data *this)
+{
+ struct device *dev = this->dev;
+ struct boot_rom_geometry *rom_geo = &this->rom_geometry;
+ struct mtd_info *mtd = &this->mtd;
+ struct nand_chip *chip = &this->nand;
+ unsigned int block_size_in_pages;
+ unsigned int search_area_size_in_strides;
+ unsigned int search_area_size_in_pages;
+ unsigned int search_area_size_in_blocks;
+ unsigned int block;
+ unsigned int stride;
+ unsigned int page;
+ loff_t byte;
+ uint8_t *buffer = chip->buffers->databuf;
+ int saved_chip_number;
+ int status;
+
+ /* Compute the search area geometry. */
+ block_size_in_pages = mtd->erasesize / mtd->writesize;
+ search_area_size_in_strides = 1 << rom_geo->search_area_stride_exponent;
+ search_area_size_in_pages = search_area_size_in_strides *
+ rom_geo->stride_size_in_pages;
+ search_area_size_in_blocks =
+ (search_area_size_in_pages + (block_size_in_pages - 1)) /
+ block_size_in_pages;
+
+ dev_dbg(dev, "Search Area Geometry :\n");
+ dev_dbg(dev, "\tin Blocks : %u\n", search_area_size_in_blocks);
+ dev_dbg(dev, "\tin Strides: %u\n", search_area_size_in_strides);
+ dev_dbg(dev, "\tin Pages : %u\n", search_area_size_in_pages);
+
+ /* Select chip 0. */
+ saved_chip_number = this->current_chip;
+ chip->select_chip(mtd, 0);
+
+ /* Loop over blocks in the first search area, erasing them. */
+ dev_dbg(dev, "Erasing the search area...\n");
+
+ for (block = 0; block < search_area_size_in_blocks; block++) {
+ /* Compute the page address. */
+ page = block * block_size_in_pages;
+
+ /* Erase this block. */
+ dev_dbg(dev, "\tErasing block 0x%x\n", block);
+ chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page);
+ chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1);
+
+ /* Wait for the erase to finish. */
+ status = chip->waitfunc(mtd, chip);
+ if (status & NAND_STATUS_FAIL)
+ dev_err(dev, "[%s] Erase failed.\n", __func__);
+ }
+
+ /* Write the NCB fingerprint into the page buffer. */
+ memset(buffer, ~0, mtd->writesize);
+ memset(chip->oob_poi, ~0, mtd->oobsize);
+ memcpy(buffer + 12, fingerprint, strlen(fingerprint));
+
+ /* Loop through the first search area, writing NCB fingerprints. */
+ dev_dbg(dev, "Writing NCB fingerprints...\n");
+ for (stride = 0; stride < search_area_size_in_strides; stride++) {
+ /* Compute the page and byte addresses. */
+ page = stride * rom_geo->stride_size_in_pages;
+ byte = page * mtd->writesize;
+
+ /* Write the first page of the current stride. */
+ dev_dbg(dev, "Writing an NCB fingerprint in page 0x%x\n", page);
+ chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
+ chip->ecc.write_page_raw(mtd, chip, buffer);
+ chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+
+ /* Wait for the write to finish. */
+ status = chip->waitfunc(mtd, chip);
+ if (status & NAND_STATUS_FAIL)
+ dev_err(dev, "[%s] Write failed.\n", __func__);
+ }
+
+ /* Deselect chip 0. */
+ chip->select_chip(mtd, saved_chip_number);
+ return 0;
+}
+
+static int __devinit mx23_boot_init(struct gpmi_nand_data *this)
+{
+ struct device *dev = this->dev;
+ struct nand_chip *chip = &this->nand;
+ struct mtd_info *mtd = &this->mtd;
+ unsigned int block_count;
+ unsigned int block;
+ int chipnr;
+ int page;
+ loff_t byte;
+ uint8_t block_mark;
+ int ret = 0;
+
+ /*
+ * If control arrives here, we can't use block mark swapping, which
+ * means we're forced to use transcription. First, scan for the
+ * transcription stamp. If we find it, then we don't have to do
+ * anything -- the block marks are already transcribed.
+ */
+ if (mx23_check_transcription_stamp(this))
+ return 0;
+
+ /*
+ * If control arrives here, we couldn't find a transcription stamp, so
+ * so we presume the block marks are in the conventional location.
+ */
+ dev_dbg(dev, "Transcribing bad block marks...\n");
+
+ /* Compute the number of blocks in the entire medium. */
+ block_count = chip->chipsize >> chip->phys_erase_shift;
+
+ /*
+ * Loop over all the blocks in the medium, transcribing block marks as
+ * we go.
+ */
+ for (block = 0; block < block_count; block++) {
+ /*
+ * Compute the chip, page and byte addresses for this block's
+ * conventional mark.
+ */
+ chipnr = block >> (chip->chip_shift - chip->phys_erase_shift);
+ page = block << (chip->phys_erase_shift - chip->page_shift);
+ byte = block << chip->phys_erase_shift;
+
+ /* Send the command to read the conventional block mark. */
+ chip->select_chip(mtd, chipnr);
+ chip->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page);
+ block_mark = chip->read_byte(mtd);
+ chip->select_chip(mtd, -1);
+
+ /*
+ * Check if the block is marked bad. If so, we need to mark it
+ * again, but this time the result will be a mark in the
+ * location where we transcribe block marks.
+ */
+ if (block_mark != 0xff) {
+ dev_dbg(dev, "Transcribing mark in block %u\n", block);
+ ret = chip->block_markbad(mtd, byte);
+ if (ret)
+ dev_err(dev, "Failed to mark block bad with "
+ "ret %d\n", ret);
+ }
+ }
+
+ /* Write the stamp that indicates we've transcribed the block marks. */
+ mx23_write_transcription_stamp(this);
+ return 0;
+}
+
+static int __devinit nand_boot_init(struct gpmi_nand_data *this)
+{
+ nand_boot_set_geometry(this);
+
+ /* This is ROM arch-specific initilization before the BBT scanning. */
+ if (GPMI_IS_MX23(this))
+ return mx23_boot_init(this);
+ return 0;
+}
+
+static int __devinit gpmi_set_geometry(struct gpmi_nand_data *this)
+{
+ int ret;
+
+ /* Free the temporary DMA memory for reading ID. */
+ gpmi_free_dma_buffer(this);
+
+ /* Set up the NFC geometry which is used by BCH. */
+ ret = bch_set_geometry(this);
+ if (ret) {
+ pr_err("set geometry ret : %d\n", ret);
+ return ret;
+ }
+
+ /* Alloc the new DMA buffers according to the pagesize and oobsize */
+ return gpmi_alloc_dma_buffer(this);
+}
+
+static int gpmi_pre_bbt_scan(struct gpmi_nand_data *this)
+{
+ int ret;
+
+ /* Set up swap_block_mark, must be set before the gpmi_set_geometry() */
+ if (GPMI_IS_MX23(this))
+ this->swap_block_mark = false;
+ else
+ this->swap_block_mark = true;
+
+ /* Set up the medium geometry */
+ ret = gpmi_set_geometry(this);
+ if (ret)
+ return ret;
+
+ /* NAND boot init, depends on the gpmi_set_geometry(). */
+ return nand_boot_init(this);
+}
+
+static int gpmi_scan_bbt(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct gpmi_nand_data *this = chip->priv;
+ int ret;
+
+ /* Prepare for the BBT scan. */
+ ret = gpmi_pre_bbt_scan(this);
+ if (ret)
+ return ret;
+
+ /* use the default BBT implementation */
+ return nand_default_bbt(mtd);
+}
+
+void gpmi_nfc_exit(struct gpmi_nand_data *this)
+{
+ nand_release(&this->mtd);
+ gpmi_free_dma_buffer(this);
+}
+
+static int __devinit gpmi_nfc_init(struct gpmi_nand_data *this)
+{
+ struct gpmi_nand_platform_data *pdata = this->pdata;
+ struct mtd_info *mtd = &this->mtd;
+ struct nand_chip *chip = &this->nand;
+ int ret;
+
+ /* init current chip */
+ this->current_chip = -1;
+
+ /* init the MTD data structures */
+ mtd->priv = chip;
+ mtd->name = "gpmi-nand";
+ mtd->owner = THIS_MODULE;
+
+ /* init the nand_chip{}, we don't support a 16-bit NAND Flash bus. */
+ chip->priv = this;
+ chip->select_chip = gpmi_select_chip;
+ chip->cmd_ctrl = gpmi_cmd_ctrl;
+ chip->dev_ready = gpmi_dev_ready;
+ chip->read_byte = gpmi_read_byte;
+ chip->read_buf = gpmi_read_buf;
+ chip->write_buf = gpmi_write_buf;
+ chip->ecc.read_page = gpmi_ecc_read_page;
+ chip->ecc.write_page = gpmi_ecc_write_page;
+ chip->ecc.read_oob = gpmi_ecc_read_oob;
+ chip->ecc.write_oob = gpmi_ecc_write_oob;
+ chip->scan_bbt = gpmi_scan_bbt;
+ chip->badblock_pattern = &gpmi_bbt_descr;
+ chip->block_markbad = gpmi_block_markbad;
+ chip->options |= NAND_NO_SUBPAGE_WRITE;
+ chip->ecc.mode = NAND_ECC_HW;
+ chip->ecc.size = 1;
+ chip->ecc.layout = &gpmi_hw_ecclayout;
+
+ /* Allocate a temporary DMA buffer for reading ID in the nand_scan() */
+ this->bch_geometry.payload_size = 1024;
+ this->bch_geometry.auxiliary_size = 128;
+ ret = gpmi_alloc_dma_buffer(this);
+ if (ret)
+ goto err_out;
+
+ ret = nand_scan(mtd, pdata->max_chip_count);
+ if (ret) {
+ pr_err("Chip scan failed\n");
+ goto err_out;
+ }
+
+ ret = mtd_device_parse_register(mtd, NULL, NULL,
+ pdata->partitions, pdata->partition_count);
+ if (ret)
+ goto err_out;
+ return 0;
+
+err_out:
+ gpmi_nfc_exit(this);
+ return ret;
+}
+
+static int __devinit gpmi_nand_probe(struct platform_device *pdev)
+{
+ struct gpmi_nand_platform_data *pdata = pdev->dev.platform_data;
+ struct gpmi_nand_data *this;
+ int ret;
+
+ this = kzalloc(sizeof(*this), GFP_KERNEL);
+ if (!this) {
+ pr_err("Failed to allocate per-device memory\n");
+ return -ENOMEM;
+ }
+
+ platform_set_drvdata(pdev, this);
+ this->pdev = pdev;
+ this->dev = &pdev->dev;
+ this->pdata = pdata;
+
+ if (pdata->platform_init) {
+ ret = pdata->platform_init();
+ if (ret)
+ goto platform_init_error;
+ }
+
+ ret = acquire_resources(this);
+ if (ret)
+ goto exit_acquire_resources;
+
+ ret = init_hardware(this);
+ if (ret)
+ goto exit_nfc_init;
+
+ ret = gpmi_nfc_init(this);
+ if (ret)
+ goto exit_nfc_init;
+
+ return 0;
+
+exit_nfc_init:
+ release_resources(this);
+platform_init_error:
+exit_acquire_resources:
+ platform_set_drvdata(pdev, NULL);
+ kfree(this);
+ return ret;
+}
+
+static int __exit gpmi_nand_remove(struct platform_device *pdev)
+{
+ struct gpmi_nand_data *this = platform_get_drvdata(pdev);
+
+ gpmi_nfc_exit(this);
+ release_resources(this);
+ platform_set_drvdata(pdev, NULL);
+ kfree(this);
+ return 0;
+}
+
+static const struct platform_device_id gpmi_ids[] = {
+ {
+ .name = "imx23-gpmi-nand",
+ .driver_data = IS_MX23,
+ }, {
+ .name = "imx28-gpmi-nand",
+ .driver_data = IS_MX28,
+ }, {},
+};
+
+static struct platform_driver gpmi_nand_driver = {
+ .driver = {
+ .name = "gpmi-nand",
+ },
+ .probe = gpmi_nand_probe,
+ .remove = __exit_p(gpmi_nand_remove),
+ .id_table = gpmi_ids,
+};
+
+static int __init gpmi_nand_init(void)
+{
+ int err;
+
+ err = platform_driver_register(&gpmi_nand_driver);
+ if (err == 0)
+ printk(KERN_INFO "GPMI NAND driver registered. (IMX)\n");
+ else
+ pr_err("i.MX GPMI NAND driver registration failed\n");
+ return err;
+}
+
+static void __exit gpmi_nand_exit(void)
+{
+ platform_driver_unregister(&gpmi_nand_driver);
+}
+
+module_init(gpmi_nand_init);
+module_exit(gpmi_nand_exit);
+
+MODULE_AUTHOR("Freescale Semiconductor, Inc.");
+MODULE_DESCRIPTION("i.MX GPMI NAND Flash Controller Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
new file mode 100644
index 00000000000..e023bccb778
--- /dev/null
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
@@ -0,0 +1,273 @@
+/*
+ * Freescale GPMI NAND Flash Driver
+ *
+ * Copyright (C) 2010-2011 Freescale Semiconductor, Inc.
+ * Copyright (C) 2008 Embedded Alley Solutions, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __DRIVERS_MTD_NAND_GPMI_NAND_H
+#define __DRIVERS_MTD_NAND_GPMI_NAND_H
+
+#include <linux/mtd/nand.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <mach/dma.h>
+
+struct resources {
+ void *gpmi_regs;
+ void *bch_regs;
+ unsigned int bch_low_interrupt;
+ unsigned int bch_high_interrupt;
+ unsigned int dma_low_channel;
+ unsigned int dma_high_channel;
+ struct clk *clock;
+};
+
+/**
+ * struct bch_geometry - BCH geometry description.
+ * @gf_len: The length of Galois Field. (e.g., 13 or 14)
+ * @ecc_strength: A number that describes the strength of the ECC
+ * algorithm.
+ * @page_size: The size, in bytes, of a physical page, including
+ * both data and OOB.
+ * @metadata_size: The size, in bytes, of the metadata.
+ * @ecc_chunk_size: The size, in bytes, of a single ECC chunk. Note
+ * the first chunk in the page includes both data and
+ * metadata, so it's a bit larger than this value.
+ * @ecc_chunk_count: The number of ECC chunks in the page,
+ * @payload_size: The size, in bytes, of the payload buffer.
+ * @auxiliary_size: The size, in bytes, of the auxiliary buffer.
+ * @auxiliary_status_offset: The offset into the auxiliary buffer at which
+ * the ECC status appears.
+ * @block_mark_byte_offset: The byte offset in the ECC-based page view at
+ * which the underlying physical block mark appears.
+ * @block_mark_bit_offset: The bit offset into the ECC-based page view at
+ * which the underlying physical block mark appears.
+ */
+struct bch_geometry {
+ unsigned int gf_len;
+ unsigned int ecc_strength;
+ unsigned int page_size;
+ unsigned int metadata_size;
+ unsigned int ecc_chunk_size;
+ unsigned int ecc_chunk_count;
+ unsigned int payload_size;
+ unsigned int auxiliary_size;
+ unsigned int auxiliary_status_offset;
+ unsigned int block_mark_byte_offset;
+ unsigned int block_mark_bit_offset;
+};
+
+/**
+ * struct boot_rom_geometry - Boot ROM geometry description.
+ * @stride_size_in_pages: The size of a boot block stride, in pages.
+ * @search_area_stride_exponent: The logarithm to base 2 of the size of a
+ * search area in boot block strides.
+ */
+struct boot_rom_geometry {
+ unsigned int stride_size_in_pages;
+ unsigned int search_area_stride_exponent;
+};
+
+/* DMA operations types */
+enum dma_ops_type {
+ DMA_FOR_COMMAND = 1,
+ DMA_FOR_READ_DATA,
+ DMA_FOR_WRITE_DATA,
+ DMA_FOR_READ_ECC_PAGE,
+ DMA_FOR_WRITE_ECC_PAGE
+};
+
+/**
+ * struct nand_timing - Fundamental timing attributes for NAND.
+ * @data_setup_in_ns: The data setup time, in nanoseconds. Usually the
+ * maximum of tDS and tWP. A negative value
+ * indicates this characteristic isn't known.
+ * @data_hold_in_ns: The data hold time, in nanoseconds. Usually the
+ * maximum of tDH, tWH and tREH. A negative value
+ * indicates this characteristic isn't known.
+ * @address_setup_in_ns: The address setup time, in nanoseconds. Usually
+ * the maximum of tCLS, tCS and tALS. A negative
+ * value indicates this characteristic isn't known.
+ * @gpmi_sample_delay_in_ns: A GPMI-specific timing parameter. A negative value
+ * indicates this characteristic isn't known.
+ * @tREA_in_ns: tREA, in nanoseconds, from the data sheet. A
+ * negative value indicates this characteristic isn't
+ * known.
+ * @tRLOH_in_ns: tRLOH, in nanoseconds, from the data sheet. A
+ * negative value indicates this characteristic isn't
+ * known.
+ * @tRHOH_in_ns: tRHOH, in nanoseconds, from the data sheet. A
+ * negative value indicates this characteristic isn't
+ * known.
+ */
+struct nand_timing {
+ int8_t data_setup_in_ns;
+ int8_t data_hold_in_ns;
+ int8_t address_setup_in_ns;
+ int8_t gpmi_sample_delay_in_ns;
+ int8_t tREA_in_ns;
+ int8_t tRLOH_in_ns;
+ int8_t tRHOH_in_ns;
+};
+
+struct gpmi_nand_data {
+ /* System Interface */
+ struct device *dev;
+ struct platform_device *pdev;
+ struct gpmi_nand_platform_data *pdata;
+
+ /* Resources */
+ struct resources resources;
+
+ /* Flash Hardware */
+ struct nand_timing timing;
+
+ /* BCH */
+ struct bch_geometry bch_geometry;
+ struct completion bch_done;
+
+ /* NAND Boot issue */
+ bool swap_block_mark;
+ struct boot_rom_geometry rom_geometry;
+
+ /* MTD / NAND */
+ struct nand_chip nand;
+ struct mtd_info mtd;
+
+ /* General-use Variables */
+ int current_chip;
+ unsigned int command_length;
+
+ /* passed from upper layer */
+ uint8_t *upper_buf;
+ int upper_len;
+
+ /* for DMA operations */
+ bool direct_dma_map_ok;
+
+ struct scatterlist cmd_sgl;
+ char *cmd_buffer;
+
+ struct scatterlist data_sgl;
+ char *data_buffer_dma;
+
+ void *page_buffer_virt;
+ dma_addr_t page_buffer_phys;
+ unsigned int page_buffer_size;
+
+ void *payload_virt;
+ dma_addr_t payload_phys;
+
+ void *auxiliary_virt;
+ dma_addr_t auxiliary_phys;
+
+ /* DMA channels */
+#define DMA_CHANS 8
+ struct dma_chan *dma_chans[DMA_CHANS];
+ struct mxs_dma_data dma_data;
+ enum dma_ops_type last_dma_type;
+ enum dma_ops_type dma_type;
+ struct completion dma_done;
+
+ /* private */
+ void *private;
+};
+
+/**
+ * struct gpmi_nfc_hardware_timing - GPMI hardware timing parameters.
+ * @data_setup_in_cycles: The data setup time, in cycles.
+ * @data_hold_in_cycles: The data hold time, in cycles.
+ * @address_setup_in_cycles: The address setup time, in cycles.
+ * @use_half_periods: Indicates the clock is running slowly, so the
+ * NFC DLL should use half-periods.
+ * @sample_delay_factor: The sample delay factor.
+ */
+struct gpmi_nfc_hardware_timing {
+ uint8_t data_setup_in_cycles;
+ uint8_t data_hold_in_cycles;
+ uint8_t address_setup_in_cycles;
+ bool use_half_periods;
+ uint8_t sample_delay_factor;
+};
+
+/**
+ * struct timing_threshod - Timing threshold
+ * @max_data_setup_cycles: The maximum number of data setup cycles that
+ * can be expressed in the hardware.
+ * @internal_data_setup_in_ns: The time, in ns, that the NFC hardware requires
+ * for data read internal setup. In the Reference
+ * Manual, see the chapter "High-Speed NAND
+ * Timing" for more details.
+ * @max_sample_delay_factor: The maximum sample delay factor that can be
+ * expressed in the hardware.
+ * @max_dll_clock_period_in_ns: The maximum period of the GPMI clock that the
+ * sample delay DLL hardware can possibly work
+ * with (the DLL is unusable with longer periods).
+ * If the full-cycle period is greater than HALF
+ * this value, the DLL must be configured to use
+ * half-periods.
+ * @max_dll_delay_in_ns: The maximum amount of delay, in ns, that the
+ * DLL can implement.
+ * @clock_frequency_in_hz: The clock frequency, in Hz, during the current
+ * I/O transaction. If no I/O transaction is in
+ * progress, this is the clock frequency during
+ * the most recent I/O transaction.
+ */
+struct timing_threshod {
+ const unsigned int max_chip_count;
+ const unsigned int max_data_setup_cycles;
+ const unsigned int internal_data_setup_in_ns;
+ const unsigned int max_sample_delay_factor;
+ const unsigned int max_dll_clock_period_in_ns;
+ const unsigned int max_dll_delay_in_ns;
+ unsigned long clock_frequency_in_hz;
+
+};
+
+/* Common Services */
+extern int common_nfc_set_geometry(struct gpmi_nand_data *);
+extern struct dma_chan *get_dma_chan(struct gpmi_nand_data *);
+extern void prepare_data_dma(struct gpmi_nand_data *,
+ enum dma_data_direction dr);
+extern int start_dma_without_bch_irq(struct gpmi_nand_data *,
+ struct dma_async_tx_descriptor *);
+extern int start_dma_with_bch_irq(struct gpmi_nand_data *,
+ struct dma_async_tx_descriptor *);
+
+/* GPMI-NAND helper function library */
+extern int gpmi_init(struct gpmi_nand_data *);
+extern void gpmi_clear_bch(struct gpmi_nand_data *);
+extern void gpmi_dump_info(struct gpmi_nand_data *);
+extern int bch_set_geometry(struct gpmi_nand_data *);
+extern int gpmi_is_ready(struct gpmi_nand_data *, unsigned chip);
+extern int gpmi_send_command(struct gpmi_nand_data *);
+extern void gpmi_begin(struct gpmi_nand_data *);
+extern void gpmi_end(struct gpmi_nand_data *);
+extern int gpmi_read_data(struct gpmi_nand_data *);
+extern int gpmi_send_data(struct gpmi_nand_data *);
+extern int gpmi_send_page(struct gpmi_nand_data *,
+ dma_addr_t payload, dma_addr_t auxiliary);
+extern int gpmi_read_page(struct gpmi_nand_data *,
+ dma_addr_t payload, dma_addr_t auxiliary);
+
+/* BCH : Status Block Completion Codes */
+#define STATUS_GOOD 0x00
+#define STATUS_ERASED 0xff
+#define STATUS_UNCORRECTABLE 0xfe
+
+/* Use the platform_id to distinguish different Archs. */
+#define IS_MX23 0x1
+#define IS_MX28 0x2
+#define GPMI_IS_MX23(x) ((x)->pdev->id_entry->driver_data == IS_MX23)
+#define GPMI_IS_MX28(x) ((x)->pdev->id_entry->driver_data == IS_MX28)
+#endif
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-regs.h b/drivers/mtd/nand/gpmi-nand/gpmi-regs.h
new file mode 100644
index 00000000000..83431240e2f
--- /dev/null
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-regs.h
@@ -0,0 +1,172 @@
+/*
+ * Freescale GPMI NAND Flash Driver
+ *
+ * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ * Copyright 2008 Embedded Alley Solutions, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+#ifndef __GPMI_NAND_GPMI_REGS_H
+#define __GPMI_NAND_GPMI_REGS_H
+
+#define HW_GPMI_CTRL0 0x00000000
+#define HW_GPMI_CTRL0_SET 0x00000004
+#define HW_GPMI_CTRL0_CLR 0x00000008
+#define HW_GPMI_CTRL0_TOG 0x0000000c
+
+#define BP_GPMI_CTRL0_COMMAND_MODE 24
+#define BM_GPMI_CTRL0_COMMAND_MODE (3 << BP_GPMI_CTRL0_COMMAND_MODE)
+#define BF_GPMI_CTRL0_COMMAND_MODE(v) \
+ (((v) << BP_GPMI_CTRL0_COMMAND_MODE) & BM_GPMI_CTRL0_COMMAND_MODE)
+#define BV_GPMI_CTRL0_COMMAND_MODE__WRITE 0x0
+#define BV_GPMI_CTRL0_COMMAND_MODE__READ 0x1
+#define BV_GPMI_CTRL0_COMMAND_MODE__READ_AND_COMPARE 0x2
+#define BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY 0x3
+
+#define BM_GPMI_CTRL0_WORD_LENGTH (1 << 23)
+#define BV_GPMI_CTRL0_WORD_LENGTH__16_BIT 0x0
+#define BV_GPMI_CTRL0_WORD_LENGTH__8_BIT 0x1
+
+/*
+ * Difference in LOCK_CS between imx23 and imx28 :
+ * This bit may impact the _POWER_ consumption. So some chips
+ * do not set it.
+ */
+#define MX23_BP_GPMI_CTRL0_LOCK_CS 22
+#define MX28_BP_GPMI_CTRL0_LOCK_CS 27
+#define LOCK_CS_ENABLE 0x1
+#define BF_GPMI_CTRL0_LOCK_CS(v, x) 0x0
+
+/* Difference in CS between imx23 and imx28 */
+#define BP_GPMI_CTRL0_CS 20
+#define MX23_BM_GPMI_CTRL0_CS (3 << BP_GPMI_CTRL0_CS)
+#define MX28_BM_GPMI_CTRL0_CS (7 << BP_GPMI_CTRL0_CS)
+#define BF_GPMI_CTRL0_CS(v, x) (((v) << BP_GPMI_CTRL0_CS) & \
+ (GPMI_IS_MX23((x)) \
+ ? MX23_BM_GPMI_CTRL0_CS \
+ : MX28_BM_GPMI_CTRL0_CS))
+
+#define BP_GPMI_CTRL0_ADDRESS 17
+#define BM_GPMI_CTRL0_ADDRESS (3 << BP_GPMI_CTRL0_ADDRESS)
+#define BF_GPMI_CTRL0_ADDRESS(v) \
+ (((v) << BP_GPMI_CTRL0_ADDRESS) & BM_GPMI_CTRL0_ADDRESS)
+#define BV_GPMI_CTRL0_ADDRESS__NAND_DATA 0x0
+#define BV_GPMI_CTRL0_ADDRESS__NAND_CLE 0x1
+#define BV_GPMI_CTRL0_ADDRESS__NAND_ALE 0x2
+
+#define BM_GPMI_CTRL0_ADDRESS_INCREMENT (1 << 16)
+#define BV_GPMI_CTRL0_ADDRESS_INCREMENT__DISABLED 0x0
+#define BV_GPMI_CTRL0_ADDRESS_INCREMENT__ENABLED 0x1
+
+#define BP_GPMI_CTRL0_XFER_COUNT 0
+#define BM_GPMI_CTRL0_XFER_COUNT (0xffff << BP_GPMI_CTRL0_XFER_COUNT)
+#define BF_GPMI_CTRL0_XFER_COUNT(v) \
+ (((v) << BP_GPMI_CTRL0_XFER_COUNT) & BM_GPMI_CTRL0_XFER_COUNT)
+
+#define HW_GPMI_COMPARE 0x00000010
+
+#define HW_GPMI_ECCCTRL 0x00000020
+#define HW_GPMI_ECCCTRL_SET 0x00000024
+#define HW_GPMI_ECCCTRL_CLR 0x00000028
+#define HW_GPMI_ECCCTRL_TOG 0x0000002c
+
+#define BP_GPMI_ECCCTRL_ECC_CMD 13
+#define BM_GPMI_ECCCTRL_ECC_CMD (3 << BP_GPMI_ECCCTRL_ECC_CMD)
+#define BF_GPMI_ECCCTRL_ECC_CMD(v) \
+ (((v) << BP_GPMI_ECCCTRL_ECC_CMD) & BM_GPMI_ECCCTRL_ECC_CMD)
+#define BV_GPMI_ECCCTRL_ECC_CMD__BCH_DECODE 0x0
+#define BV_GPMI_ECCCTRL_ECC_CMD__BCH_ENCODE 0x1
+
+#define BM_GPMI_ECCCTRL_ENABLE_ECC (1 << 12)
+#define BV_GPMI_ECCCTRL_ENABLE_ECC__ENABLE 0x1
+#define BV_GPMI_ECCCTRL_ENABLE_ECC__DISABLE 0x0
+
+#define BP_GPMI_ECCCTRL_BUFFER_MASK 0
+#define BM_GPMI_ECCCTRL_BUFFER_MASK (0x1ff << BP_GPMI_ECCCTRL_BUFFER_MASK)
+#define BF_GPMI_ECCCTRL_BUFFER_MASK(v) \
+ (((v) << BP_GPMI_ECCCTRL_BUFFER_MASK) & BM_GPMI_ECCCTRL_BUFFER_MASK)
+#define BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY 0x100
+#define BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE 0x1FF
+
+#define HW_GPMI_ECCCOUNT 0x00000030
+#define HW_GPMI_PAYLOAD 0x00000040
+#define HW_GPMI_AUXILIARY 0x00000050
+#define HW_GPMI_CTRL1 0x00000060
+#define HW_GPMI_CTRL1_SET 0x00000064
+#define HW_GPMI_CTRL1_CLR 0x00000068
+#define HW_GPMI_CTRL1_TOG 0x0000006c
+
+#define BM_GPMI_CTRL1_BCH_MODE (1 << 18)
+
+#define BP_GPMI_CTRL1_DLL_ENABLE 17
+#define BM_GPMI_CTRL1_DLL_ENABLE (1 << BP_GPMI_CTRL1_DLL_ENABLE)
+
+#define BP_GPMI_CTRL1_HALF_PERIOD 16
+#define BM_GPMI_CTRL1_HALF_PERIOD (1 << BP_GPMI_CTRL1_HALF_PERIOD)
+
+#define BP_GPMI_CTRL1_RDN_DELAY 12
+#define BM_GPMI_CTRL1_RDN_DELAY (0xf << BP_GPMI_CTRL1_RDN_DELAY)
+#define BF_GPMI_CTRL1_RDN_DELAY(v) \
+ (((v) << BP_GPMI_CTRL1_RDN_DELAY) & BM_GPMI_CTRL1_RDN_DELAY)
+
+#define BM_GPMI_CTRL1_DEV_RESET (1 << 3)
+#define BV_GPMI_CTRL1_DEV_RESET__ENABLED 0x0
+#define BV_GPMI_CTRL1_DEV_RESET__DISABLED 0x1
+
+#define BM_GPMI_CTRL1_ATA_IRQRDY_POLARITY (1 << 2)
+#define BV_GPMI_CTRL1_ATA_IRQRDY_POLARITY__ACTIVELOW 0x0
+#define BV_GPMI_CTRL1_ATA_IRQRDY_POLARITY__ACTIVEHIGH 0x1
+
+#define BM_GPMI_CTRL1_CAMERA_MODE (1 << 1)
+#define BV_GPMI_CTRL1_GPMI_MODE__NAND 0x0
+#define BV_GPMI_CTRL1_GPMI_MODE__ATA 0x1
+
+#define BM_GPMI_CTRL1_GPMI_MODE (1 << 0)
+
+#define HW_GPMI_TIMING0 0x00000070
+
+#define BP_GPMI_TIMING0_ADDRESS_SETUP 16
+#define BM_GPMI_TIMING0_ADDRESS_SETUP (0xff << BP_GPMI_TIMING0_ADDRESS_SETUP)
+#define BF_GPMI_TIMING0_ADDRESS_SETUP(v) \
+ (((v) << BP_GPMI_TIMING0_ADDRESS_SETUP) & BM_GPMI_TIMING0_ADDRESS_SETUP)
+
+#define BP_GPMI_TIMING0_DATA_HOLD 8
+#define BM_GPMI_TIMING0_DATA_HOLD (0xff << BP_GPMI_TIMING0_DATA_HOLD)
+#define BF_GPMI_TIMING0_DATA_HOLD(v) \
+ (((v) << BP_GPMI_TIMING0_DATA_HOLD) & BM_GPMI_TIMING0_DATA_HOLD)
+
+#define BP_GPMI_TIMING0_DATA_SETUP 0
+#define BM_GPMI_TIMING0_DATA_SETUP (0xff << BP_GPMI_TIMING0_DATA_SETUP)
+#define BF_GPMI_TIMING0_DATA_SETUP(v) \
+ (((v) << BP_GPMI_TIMING0_DATA_SETUP) & BM_GPMI_TIMING0_DATA_SETUP)
+
+#define HW_GPMI_TIMING1 0x00000080
+#define BP_GPMI_TIMING1_BUSY_TIMEOUT 16
+
+#define HW_GPMI_TIMING2 0x00000090
+#define HW_GPMI_DATA 0x000000a0
+
+/* MX28 uses this to detect READY. */
+#define HW_GPMI_STAT 0x000000b0
+#define MX28_BP_GPMI_STAT_READY_BUSY 24
+#define MX28_BM_GPMI_STAT_READY_BUSY (0xff << MX28_BP_GPMI_STAT_READY_BUSY)
+#define MX28_BF_GPMI_STAT_READY_BUSY(v) \
+ (((v) << MX28_BP_GPMI_STAT_READY_BUSY) & MX28_BM_GPMI_STAT_READY_BUSY)
+
+/* MX23 uses this to detect READY. */
+#define HW_GPMI_DEBUG 0x000000c0
+#define MX23_BP_GPMI_DEBUG_READY0 28
+#define MX23_BM_GPMI_DEBUG_READY0 (1 << MX23_BP_GPMI_DEBUG_READY0)
+#endif
diff --git a/drivers/mtd/nand/h1910.c b/drivers/mtd/nand/h1910.c
index 02a03e67109..5dc6f0d92f1 100644
--- a/drivers/mtd/nand/h1910.c
+++ b/drivers/mtd/nand/h1910.c
@@ -81,9 +81,6 @@ static int h1910_device_ready(struct mtd_info *mtd)
static int __init h1910_init(void)
{
struct nand_chip *this;
- const char *part_type = 0;
- int mtd_parts_nb = 0;
- struct mtd_partition *mtd_parts = 0;
void __iomem *nandaddr;
if (!machine_is_h1900())
@@ -136,22 +133,10 @@ static int __init h1910_init(void)
iounmap((void *)nandaddr);
return -ENXIO;
}
-#ifdef CONFIG_MTD_CMDLINE_PARTS
- mtd_parts_nb = parse_cmdline_partitions(h1910_nand_mtd, &mtd_parts, "h1910-nand");
- if (mtd_parts_nb > 0)
- part_type = "command line";
- else
- mtd_parts_nb = 0;
-#endif
- if (mtd_parts_nb == 0) {
- mtd_parts = partition_info;
- mtd_parts_nb = NUM_PARTITIONS;
- part_type = "static";
- }
/* Register the partitions */
- printk(KERN_NOTICE "Using %s partition definition\n", part_type);
- mtd_device_register(h1910_nand_mtd, mtd_parts, mtd_parts_nb);
+ mtd_device_parse_register(h1910_nand_mtd, NULL, 0,
+ partition_info, NUM_PARTITIONS);
/* Return happy */
return 0;
diff --git a/drivers/mtd/nand/jz4740_nand.c b/drivers/mtd/nand/jz4740_nand.c
index 6e813daed06..e2664073a89 100644
--- a/drivers/mtd/nand/jz4740_nand.c
+++ b/drivers/mtd/nand/jz4740_nand.c
@@ -251,10 +251,6 @@ static int jz_nand_correct_ecc_rs(struct mtd_info *mtd, uint8_t *dat,
return 0;
}
-#ifdef CONFIG_MTD_CMDLINE_PARTS
-static const char *part_probes[] = {"cmdline", NULL};
-#endif
-
static int jz_nand_ioremap_resource(struct platform_device *pdev,
const char *name, struct resource **res, void __iomem **base)
{
@@ -299,8 +295,6 @@ static int __devinit jz_nand_probe(struct platform_device *pdev)
struct nand_chip *chip;
struct mtd_info *mtd;
struct jz_nand_platform_data *pdata = pdev->dev.platform_data;
- struct mtd_partition *partition_info;
- int num_partitions = 0;
nand = kzalloc(sizeof(*nand), GFP_KERNEL);
if (!nand) {
@@ -373,15 +367,9 @@ static int __devinit jz_nand_probe(struct platform_device *pdev)
goto err_gpio_free;
}
-#ifdef CONFIG_MTD_CMDLINE_PARTS
- num_partitions = parse_mtd_partitions(mtd, part_probes,
- &partition_info, 0);
-#endif
- if (num_partitions <= 0 && pdata) {
- num_partitions = pdata->num_partitions;
- partition_info = pdata->partitions;
- }
- ret = mtd_device_register(mtd, partition_info, num_partitions);
+ ret = mtd_device_parse_register(mtd, NULL, 0,
+ pdata ? pdata->partitions : NULL,
+ pdata ? pdata->num_partitions : 0);
if (ret) {
dev_err(&pdev->dev, "Failed to add mtd device\n");
diff --git a/drivers/mtd/nand/mpc5121_nfc.c b/drivers/mtd/nand/mpc5121_nfc.c
index eb1fbac63eb..5ede6470634 100644
--- a/drivers/mtd/nand/mpc5121_nfc.c
+++ b/drivers/mtd/nand/mpc5121_nfc.c
@@ -131,8 +131,6 @@ struct mpc5121_nfc_prv {
static void mpc5121_nfc_done(struct mtd_info *mtd);
-static const char *mpc5121_nfc_pprobes[] = { "cmdlinepart", NULL };
-
/* Read NFC register */
static inline u16 nfc_read(struct mtd_info *mtd, uint reg)
{
@@ -656,13 +654,13 @@ static int __devinit mpc5121_nfc_probe(struct platform_device *op)
struct mpc5121_nfc_prv *prv;
struct resource res;
struct mtd_info *mtd;
- struct mtd_partition *parts;
struct nand_chip *chip;
unsigned long regs_paddr, regs_size;
const __be32 *chips_no;
int resettime = 0;
int retval = 0;
int rev, len;
+ struct mtd_part_parser_data ppdata;
/*
* Check SoC revision. This driver supports only NFC
@@ -727,6 +725,7 @@ static int __devinit mpc5121_nfc_probe(struct platform_device *op)
}
mtd->name = "MPC5121 NAND";
+ ppdata.of_node = dn;
chip->dev_ready = mpc5121_nfc_dev_ready;
chip->cmdfunc = mpc5121_nfc_command;
chip->read_byte = mpc5121_nfc_read_byte;
@@ -735,7 +734,8 @@ static int __devinit mpc5121_nfc_probe(struct platform_device *op)
chip->write_buf = mpc5121_nfc_write_buf;
chip->verify_buf = mpc5121_nfc_verify_buf;
chip->select_chip = mpc5121_nfc_select_chip;
- chip->options = NAND_NO_AUTOINCR | NAND_USE_FLASH_BBT;
+ chip->options = NAND_NO_AUTOINCR;
+ chip->bbt_options = NAND_BBT_USE_FLASH;
chip->ecc.mode = NAND_ECC_SOFT;
/* Support external chip-select logic on ADS5121 board */
@@ -837,19 +837,7 @@ static int __devinit mpc5121_nfc_probe(struct platform_device *op)
dev_set_drvdata(dev, mtd);
/* Register device in MTD */
- retval = parse_mtd_partitions(mtd, mpc5121_nfc_pprobes, &parts, 0);
-#ifdef CONFIG_MTD_OF_PARTS
- if (retval == 0)
- retval = of_mtd_parse_partitions(dev, dn, &parts);
-#endif
- if (retval < 0) {
- dev_err(dev, "Error parsing MTD partitions!\n");
- devm_free_irq(dev, prv->irq, mtd);
- retval = -EINVAL;
- goto error;
- }
-
- retval = mtd_device_register(mtd, parts, retval);
+ retval = mtd_device_parse_register(mtd, NULL, &ppdata, NULL, 0);
if (retval) {
dev_err(dev, "Error adding MTD device!\n");
devm_free_irq(dev, prv->irq, mtd);
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index 90df34c4d26..74a43b818d0 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -41,7 +41,7 @@
#define nfc_is_v21() (cpu_is_mx25() || cpu_is_mx35())
#define nfc_is_v1() (cpu_is_mx31() || cpu_is_mx27() || cpu_is_mx21())
-#define nfc_is_v3_2() cpu_is_mx51()
+#define nfc_is_v3_2() (cpu_is_mx51() || cpu_is_mx53())
#define nfc_is_v3() nfc_is_v3_2()
/* Addresses for NFC registers */
@@ -143,7 +143,6 @@
struct mxc_nand_host {
struct mtd_info mtd;
struct nand_chip nand;
- struct mtd_partition *parts;
struct device *dev;
void *spare0;
@@ -350,8 +349,7 @@ static void wait_op_done(struct mxc_nand_host *host, int useirq)
udelay(1);
}
if (max_retries < 0)
- DEBUG(MTD_DEBUG_LEVEL0, "%s: INT not set\n",
- __func__);
+ pr_debug("%s: INT not set\n", __func__);
}
}
@@ -371,7 +369,7 @@ static void send_cmd_v3(struct mxc_nand_host *host, uint16_t cmd, int useirq)
* waits for completion. */
static void send_cmd_v1_v2(struct mxc_nand_host *host, uint16_t cmd, int useirq)
{
- DEBUG(MTD_DEBUG_LEVEL3, "send_cmd(host, 0x%x, %d)\n", cmd, useirq);
+ pr_debug("send_cmd(host, 0x%x, %d)\n", cmd, useirq);
writew(cmd, NFC_V1_V2_FLASH_CMD);
writew(NFC_CMD, NFC_V1_V2_CONFIG2);
@@ -387,8 +385,7 @@ static void send_cmd_v1_v2(struct mxc_nand_host *host, uint16_t cmd, int useirq)
udelay(1);
}
if (max_retries < 0)
- DEBUG(MTD_DEBUG_LEVEL0, "%s: RESET failed\n",
- __func__);
+ pr_debug("%s: RESET failed\n", __func__);
} else {
/* Wait for operation to complete */
wait_op_done(host, useirq);
@@ -411,7 +408,7 @@ static void send_addr_v3(struct mxc_nand_host *host, uint16_t addr, int islast)
* a NAND command. */
static void send_addr_v1_v2(struct mxc_nand_host *host, uint16_t addr, int islast)
{
- DEBUG(MTD_DEBUG_LEVEL3, "send_addr(host, 0x%x %d)\n", addr, islast);
+ pr_debug("send_addr(host, 0x%x %d)\n", addr, islast);
writew(addr, NFC_V1_V2_FLASH_ADDR);
writew(NFC_ADDR, NFC_V1_V2_CONFIG2);
@@ -561,8 +558,7 @@ static int mxc_nand_correct_data_v1(struct mtd_info *mtd, u_char *dat,
uint16_t ecc_status = readw(NFC_V1_V2_ECC_STATUS_RESULT);
if (((ecc_status & 0x3) == 2) || ((ecc_status >> 2) == 2)) {
- DEBUG(MTD_DEBUG_LEVEL0,
- "MXC_NAND: HWECC uncorrectable 2-bit ECC error\n");
+ pr_debug("MXC_NAND: HWECC uncorrectable 2-bit ECC error\n");
return -1;
}
@@ -849,7 +845,7 @@ static void preset_v1_v2(struct mtd_info *mtd)
writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR3);
} else if (nfc_is_v1()) {
writew(0x0, NFC_V1_UNLOCKSTART_BLKADDR);
- writew(0x4000, NFC_V1_UNLOCKEND_BLKADDR);
+ writew(0xffff, NFC_V1_UNLOCKEND_BLKADDR);
} else
BUG();
@@ -932,8 +928,7 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
struct nand_chip *nand_chip = mtd->priv;
struct mxc_nand_host *host = nand_chip->priv;
- DEBUG(MTD_DEBUG_LEVEL3,
- "mxc_nand_command (cmd = 0x%x, col = 0x%x, page = 0x%x)\n",
+ pr_debug("mxc_nand_command (cmd = 0x%x, col = 0x%x, page = 0x%x)\n",
command, column, page_addr);
/* Reset command state information */
@@ -1044,7 +1039,7 @@ static int __init mxcnd_probe(struct platform_device *pdev)
struct mxc_nand_platform_data *pdata = pdev->dev.platform_data;
struct mxc_nand_host *host;
struct resource *res;
- int err = 0, __maybe_unused nr_parts = 0;
+ int err = 0;
struct nand_ecclayout *oob_smallpage, *oob_largepage;
/* Allocate memory for MTD device structure and private data */
@@ -1179,7 +1174,7 @@ static int __init mxcnd_probe(struct platform_device *pdev)
this->bbt_td = &bbt_main_descr;
this->bbt_md = &bbt_mirror_descr;
/* update flash based bbt */
- this->options |= NAND_USE_FLASH_BBT;
+ this->bbt_options |= NAND_BBT_USE_FLASH;
}
init_completion(&host->op_completion);
@@ -1231,16 +1226,8 @@ static int __init mxcnd_probe(struct platform_device *pdev)
}
/* Register the partitions */
- nr_parts =
- parse_mtd_partitions(mtd, part_probes, &host->parts, 0);
- if (nr_parts > 0)
- mtd_device_register(mtd, host->parts, nr_parts);
- else if (pdata->parts)
- mtd_device_register(mtd, pdata->parts, pdata->nr_parts);
- else {
- pr_info("Registering %s as whole device\n", mtd->name);
- mtd_device_register(mtd, NULL, 0);
- }
+ mtd_device_parse_register(mtd, part_probes, 0,
+ pdata->parts, pdata->nr_parts);
platform_set_drvdata(pdev, host);
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index a46e9bb847b..3ed9c5e4d34 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -21,7 +21,7 @@
* TODO:
* Enable cached programming for 2k page size chips
* Check, if mtd->ecctype should be set to MTD_ECC_HW
- * if we have HW ecc support.
+ * if we have HW ECC support.
* The AG-AND chips have nice features for speed improvement,
* which are not supported yet. Read / program 4 pages in one go.
* BBT table is not serialized, has to be fixed
@@ -113,21 +113,19 @@ static int check_offs_len(struct mtd_info *mtd,
/* Start address must align on block boundary */
if (ofs & ((1 << chip->phys_erase_shift) - 1)) {
- DEBUG(MTD_DEBUG_LEVEL0, "%s: Unaligned address\n", __func__);
+ pr_debug("%s: unaligned address\n", __func__);
ret = -EINVAL;
}
/* Length must align on block boundary */
if (len & ((1 << chip->phys_erase_shift) - 1)) {
- DEBUG(MTD_DEBUG_LEVEL0, "%s: Length not block aligned\n",
- __func__);
+ pr_debug("%s: length not block aligned\n", __func__);
ret = -EINVAL;
}
/* Do not allow past end of device */
if (ofs + len > mtd->size) {
- DEBUG(MTD_DEBUG_LEVEL0, "%s: Past end of device\n",
- __func__);
+ pr_debug("%s: past end of device\n", __func__);
ret = -EINVAL;
}
@@ -136,9 +134,9 @@ static int check_offs_len(struct mtd_info *mtd,
/**
* nand_release_device - [GENERIC] release chip
- * @mtd: MTD device structure
+ * @mtd: MTD device structure
*
- * Deselect, release chip lock and wake up anyone waiting on the device
+ * Deselect, release chip lock and wake up anyone waiting on the device.
*/
static void nand_release_device(struct mtd_info *mtd)
{
@@ -157,9 +155,9 @@ static void nand_release_device(struct mtd_info *mtd)
/**
* nand_read_byte - [DEFAULT] read one byte from the chip
- * @mtd: MTD device structure
+ * @mtd: MTD device structure
*
- * Default read function for 8bit buswith
+ * Default read function for 8bit buswidth
*/
static uint8_t nand_read_byte(struct mtd_info *mtd)
{
@@ -169,10 +167,11 @@ static uint8_t nand_read_byte(struct mtd_info *mtd)
/**
* nand_read_byte16 - [DEFAULT] read one byte endianess aware from the chip
- * @mtd: MTD device structure
+ * nand_read_byte16 - [DEFAULT] read one byte endianness aware from the chip
+ * @mtd: MTD device structure
+ *
+ * Default read function for 16bit buswidth with endianness conversion.
*
- * Default read function for 16bit buswith with
- * endianess conversion
*/
static uint8_t nand_read_byte16(struct mtd_info *mtd)
{
@@ -182,10 +181,9 @@ static uint8_t nand_read_byte16(struct mtd_info *mtd)
/**
* nand_read_word - [DEFAULT] read one word from the chip
- * @mtd: MTD device structure
+ * @mtd: MTD device structure
*
- * Default read function for 16bit buswith without
- * endianess conversion
+ * Default read function for 16bit buswidth without endianness conversion.
*/
static u16 nand_read_word(struct mtd_info *mtd)
{
@@ -195,8 +193,8 @@ static u16 nand_read_word(struct mtd_info *mtd)
/**
* nand_select_chip - [DEFAULT] control CE line
- * @mtd: MTD device structure
- * @chipnr: chipnumber to select, -1 for deselect
+ * @mtd: MTD device structure
+ * @chipnr: chipnumber to select, -1 for deselect
*
* Default select function for 1 chip devices.
*/
@@ -218,11 +216,11 @@ static void nand_select_chip(struct mtd_info *mtd, int chipnr)
/**
* nand_write_buf - [DEFAULT] write buffer to chip
- * @mtd: MTD device structure
- * @buf: data buffer
- * @len: number of bytes to write
+ * @mtd: MTD device structure
+ * @buf: data buffer
+ * @len: number of bytes to write
*
- * Default write function for 8bit buswith
+ * Default write function for 8bit buswidth.
*/
static void nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
{
@@ -235,11 +233,11 @@ static void nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
/**
* nand_read_buf - [DEFAULT] read chip data into buffer
- * @mtd: MTD device structure
- * @buf: buffer to store date
- * @len: number of bytes to read
+ * @mtd: MTD device structure
+ * @buf: buffer to store date
+ * @len: number of bytes to read
*
- * Default read function for 8bit buswith
+ * Default read function for 8bit buswidth.
*/
static void nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
{
@@ -252,11 +250,11 @@ static void nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
/**
* nand_verify_buf - [DEFAULT] Verify chip data against buffer
- * @mtd: MTD device structure
- * @buf: buffer containing the data to compare
- * @len: number of bytes to compare
+ * @mtd: MTD device structure
+ * @buf: buffer containing the data to compare
+ * @len: number of bytes to compare
*
- * Default verify function for 8bit buswith
+ * Default verify function for 8bit buswidth.
*/
static int nand_verify_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
{
@@ -271,11 +269,11 @@ static int nand_verify_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
/**
* nand_write_buf16 - [DEFAULT] write buffer to chip
- * @mtd: MTD device structure
- * @buf: data buffer
- * @len: number of bytes to write
+ * @mtd: MTD device structure
+ * @buf: data buffer
+ * @len: number of bytes to write
*
- * Default write function for 16bit buswith
+ * Default write function for 16bit buswidth.
*/
static void nand_write_buf16(struct mtd_info *mtd, const uint8_t *buf, int len)
{
@@ -291,11 +289,11 @@ static void nand_write_buf16(struct mtd_info *mtd, const uint8_t *buf, int len)
/**
* nand_read_buf16 - [DEFAULT] read chip data into buffer
- * @mtd: MTD device structure
- * @buf: buffer to store date
- * @len: number of bytes to read
+ * @mtd: MTD device structure
+ * @buf: buffer to store date
+ * @len: number of bytes to read
*
- * Default read function for 16bit buswith
+ * Default read function for 16bit buswidth.
*/
static void nand_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len)
{
@@ -310,11 +308,11 @@ static void nand_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len)
/**
* nand_verify_buf16 - [DEFAULT] Verify chip data against buffer
- * @mtd: MTD device structure
- * @buf: buffer containing the data to compare
- * @len: number of bytes to compare
+ * @mtd: MTD device structure
+ * @buf: buffer containing the data to compare
+ * @len: number of bytes to compare
*
- * Default verify function for 16bit buswith
+ * Default verify function for 16bit buswidth.
*/
static int nand_verify_buf16(struct mtd_info *mtd, const uint8_t *buf, int len)
{
@@ -332,9 +330,9 @@ static int nand_verify_buf16(struct mtd_info *mtd, const uint8_t *buf, int len)
/**
* nand_block_bad - [DEFAULT] Read bad block marker from the chip
- * @mtd: MTD device structure
- * @ofs: offset from device start
- * @getchip: 0, if the chip is already selected
+ * @mtd: MTD device structure
+ * @ofs: offset from device start
+ * @getchip: 0, if the chip is already selected
*
* Check, if the block is bad.
*/
@@ -344,7 +342,7 @@ static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
struct nand_chip *chip = mtd->priv;
u16 bad;
- if (chip->options & NAND_BBT_SCANLASTPAGE)
+ if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
ofs += mtd->erasesize - mtd->writesize;
page = (int)(ofs >> chip->page_shift) & chip->pagemask;
@@ -384,11 +382,11 @@ static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
/**
* nand_default_block_markbad - [DEFAULT] mark a block bad
- * @mtd: MTD device structure
- * @ofs: offset from device start
+ * @mtd: MTD device structure
+ * @ofs: offset from device start
*
- * This is the default implementation, which can be overridden by
- * a hardware specific driver.
+ * This is the default implementation, which can be overridden by a hardware
+ * specific driver.
*/
static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
{
@@ -396,7 +394,7 @@ static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
uint8_t buf[2] = { 0, 0 };
int block, ret, i = 0;
- if (chip->options & NAND_BBT_SCANLASTPAGE)
+ if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
ofs += mtd->erasesize - mtd->writesize;
/* Get block number */
@@ -404,33 +402,31 @@ static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
if (chip->bbt)
chip->bbt[block >> 2] |= 0x01 << ((block & 0x03) << 1);
- /* Do we have a flash based bad block table ? */
- if (chip->options & NAND_USE_FLASH_BBT)
+ /* Do we have a flash based bad block table? */
+ if (chip->bbt_options & NAND_BBT_USE_FLASH)
ret = nand_update_bbt(mtd, ofs);
else {
+ struct mtd_oob_ops ops;
+
nand_get_device(chip, mtd, FL_WRITING);
- /* Write to first two pages and to byte 1 and 6 if necessary.
- * If we write to more than one location, the first error
- * encountered quits the procedure. We write two bytes per
- * location, so we dont have to mess with 16 bit access.
+ /*
+ * Write to first two pages if necessary. If we write to more
+ * than one location, the first error encountered quits the
+ * procedure. We write two bytes per location, so we dont have
+ * to mess with 16 bit access.
*/
+ ops.len = ops.ooblen = 2;
+ ops.datbuf = NULL;
+ ops.oobbuf = buf;
+ ops.ooboffs = chip->badblockpos & ~0x01;
+ ops.mode = MTD_OPS_PLACE_OOB;
do {
- chip->ops.len = chip->ops.ooblen = 2;
- chip->ops.datbuf = NULL;
- chip->ops.oobbuf = buf;
- chip->ops.ooboffs = chip->badblockpos & ~0x01;
-
- ret = nand_do_write_oob(mtd, ofs, &chip->ops);
+ ret = nand_do_write_oob(mtd, ofs, &ops);
- if (!ret && (chip->options & NAND_BBT_SCANBYTE1AND6)) {
- chip->ops.ooboffs = NAND_SMALL_BADBLOCK_POS
- & ~0x01;
- ret = nand_do_write_oob(mtd, ofs, &chip->ops);
- }
i++;
ofs += mtd->writesize;
- } while (!ret && (chip->options & NAND_BBT_SCAN2NDPAGE) &&
+ } while (!ret && (chip->bbt_options & NAND_BBT_SCAN2NDPAGE) &&
i < 2);
nand_release_device(mtd);
@@ -443,16 +439,16 @@ static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
/**
* nand_check_wp - [GENERIC] check if the chip is write protected
- * @mtd: MTD device structure
- * Check, if the device is write protected
+ * @mtd: MTD device structure
*
- * The function expects, that the device is already selected
+ * Check, if the device is write protected. The function expects, that the
+ * device is already selected.
*/
static int nand_check_wp(struct mtd_info *mtd)
{
struct nand_chip *chip = mtd->priv;
- /* broken xD cards report WP despite being writable */
+ /* Broken xD cards report WP despite being writable */
if (chip->options & NAND_BROKEN_XD)
return 0;
@@ -463,10 +459,10 @@ static int nand_check_wp(struct mtd_info *mtd)
/**
* nand_block_checkbad - [GENERIC] Check if a block is marked bad
- * @mtd: MTD device structure
- * @ofs: offset from device start
- * @getchip: 0, if the chip is already selected
- * @allowbbt: 1, if its allowed to access the bbt area
+ * @mtd: MTD device structure
+ * @ofs: offset from device start
+ * @getchip: 0, if the chip is already selected
+ * @allowbbt: 1, if its allowed to access the bbt area
*
* Check, if the block is bad. Either by reading the bad block table or
* calling of the scan function.
@@ -485,8 +481,8 @@ static int nand_block_checkbad(struct mtd_info *mtd, loff_t ofs, int getchip,
/**
* panic_nand_wait_ready - [GENERIC] Wait for the ready pin after commands.
- * @mtd: MTD device structure
- * @timeo: Timeout
+ * @mtd: MTD device structure
+ * @timeo: Timeout
*
* Helper function for nand_wait_ready used when needing to wait in interrupt
* context.
@@ -505,10 +501,7 @@ static void panic_nand_wait_ready(struct mtd_info *mtd, unsigned long timeo)
}
}
-/*
- * Wait for the ready pin, after a command
- * The timeout is catched later.
- */
+/* Wait for the ready pin, after a command. The timeout is caught later. */
void nand_wait_ready(struct mtd_info *mtd)
{
struct nand_chip *chip = mtd->priv;
@@ -519,7 +512,7 @@ void nand_wait_ready(struct mtd_info *mtd)
return panic_nand_wait_ready(mtd, 400);
led_trigger_event(nand_led_trigger, LED_FULL);
- /* wait until command is processed or timeout occures */
+ /* Wait until command is processed or timeout occurs */
do {
if (chip->dev_ready(mtd))
break;
@@ -531,13 +524,13 @@ EXPORT_SYMBOL_GPL(nand_wait_ready);
/**
* nand_command - [DEFAULT] Send command to NAND device
- * @mtd: MTD device structure
- * @command: the command to be sent
- * @column: the column address for this command, -1 if none
- * @page_addr: the page address for this command, -1 if none
+ * @mtd: MTD device structure
+ * @command: the command to be sent
+ * @column: the column address for this command, -1 if none
+ * @page_addr: the page address for this command, -1 if none
*
- * Send command to NAND device. This function is used for small page
- * devices (256/512 Bytes per page)
+ * Send command to NAND device. This function is used for small page devices
+ * (256/512 Bytes per page).
*/
static void nand_command(struct mtd_info *mtd, unsigned int command,
int column, int page_addr)
@@ -545,9 +538,7 @@ static void nand_command(struct mtd_info *mtd, unsigned int command,
register struct nand_chip *chip = mtd->priv;
int ctrl = NAND_CTRL_CLE | NAND_CTRL_CHANGE;
- /*
- * Write out the command to the device.
- */
+ /* Write out the command to the device */
if (command == NAND_CMD_SEQIN) {
int readcmd;
@@ -567,9 +558,7 @@ static void nand_command(struct mtd_info *mtd, unsigned int command,
}
chip->cmd_ctrl(mtd, command, ctrl);
- /*
- * Address cycle, when necessary
- */
+ /* Address cycle, when necessary */
ctrl = NAND_CTRL_ALE | NAND_CTRL_CHANGE;
/* Serially input address */
if (column != -1) {
@@ -590,8 +579,8 @@ static void nand_command(struct mtd_info *mtd, unsigned int command,
chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
/*
- * program and erase have their own busy handlers
- * status and sequential in needs no delay
+ * Program and erase have their own busy handlers status and sequential
+ * in needs no delay
*/
switch (command) {
@@ -625,8 +614,10 @@ static void nand_command(struct mtd_info *mtd, unsigned int command,
return;
}
}
- /* Apply this short delay always to ensure that we do wait tWB in
- * any case on any machine. */
+ /*
+ * Apply this short delay always to ensure that we do wait tWB in
+ * any case on any machine.
+ */
ndelay(100);
nand_wait_ready(mtd);
@@ -634,14 +625,14 @@ static void nand_command(struct mtd_info *mtd, unsigned int command,
/**
* nand_command_lp - [DEFAULT] Send command to NAND large page device
- * @mtd: MTD device structure
- * @command: the command to be sent
- * @column: the column address for this command, -1 if none
- * @page_addr: the page address for this command, -1 if none
+ * @mtd: MTD device structure
+ * @command: the command to be sent
+ * @column: the column address for this command, -1 if none
+ * @page_addr: the page address for this command, -1 if none
*
* Send command to NAND device. This is the version for the new large page
- * devices We dont have the separate regions as we have in the small page
- * devices. We must emulate NAND_CMD_READOOB to keep the code compatible.
+ * devices. We don't have the separate regions as we have in the small page
+ * devices. We must emulate NAND_CMD_READOOB to keep the code compatible.
*/
static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
int column, int page_addr)
@@ -683,8 +674,8 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
/*
- * program and erase have their own busy handlers
- * status, sequential in, and deplete1 need no delay
+ * Program and erase have their own busy handlers status, sequential
+ * in, and deplete1 need no delay.
*/
switch (command) {
@@ -698,14 +689,12 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
case NAND_CMD_DEPLETE1:
return;
- /*
- * read error status commands require only a short delay
- */
case NAND_CMD_STATUS_ERROR:
case NAND_CMD_STATUS_ERROR0:
case NAND_CMD_STATUS_ERROR1:
case NAND_CMD_STATUS_ERROR2:
case NAND_CMD_STATUS_ERROR3:
+ /* Read error status commands require only a short delay */
udelay(chip->chip_delay);
return;
@@ -739,7 +728,7 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
default:
/*
* If we don't have access to the busy pin, we apply the given
- * command delay
+ * command delay.
*/
if (!chip->dev_ready) {
udelay(chip->chip_delay);
@@ -747,8 +736,10 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
}
}
- /* Apply this short delay always to ensure that we do wait tWB in
- * any case on any machine. */
+ /*
+ * Apply this short delay always to ensure that we do wait tWB in
+ * any case on any machine.
+ */
ndelay(100);
nand_wait_ready(mtd);
@@ -756,25 +747,25 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
/**
* panic_nand_get_device - [GENERIC] Get chip for selected access
- * @chip: the nand chip descriptor
- * @mtd: MTD device structure
- * @new_state: the state which is requested
+ * @chip: the nand chip descriptor
+ * @mtd: MTD device structure
+ * @new_state: the state which is requested
*
* Used when in panic, no locks are taken.
*/
static void panic_nand_get_device(struct nand_chip *chip,
struct mtd_info *mtd, int new_state)
{
- /* Hardware controller shared among independend devices */
+ /* Hardware controller shared among independent devices */
chip->controller->active = chip;
chip->state = new_state;
}
/**
* nand_get_device - [GENERIC] Get chip for selected access
- * @chip: the nand chip descriptor
- * @mtd: MTD device structure
- * @new_state: the state which is requested
+ * @chip: the nand chip descriptor
+ * @mtd: MTD device structure
+ * @new_state: the state which is requested
*
* Get the device and lock it for exclusive access
*/
@@ -812,10 +803,10 @@ retry:
}
/**
- * panic_nand_wait - [GENERIC] wait until the command is done
- * @mtd: MTD device structure
- * @chip: NAND chip structure
- * @timeo: Timeout
+ * panic_nand_wait - [GENERIC] wait until the command is done
+ * @mtd: MTD device structure
+ * @chip: NAND chip structure
+ * @timeo: timeout
*
* Wait for command done. This is a helper function for nand_wait used when
* we are in interrupt context. May happen when in panic and trying to write
@@ -838,13 +829,13 @@ static void panic_nand_wait(struct mtd_info *mtd, struct nand_chip *chip,
}
/**
- * nand_wait - [DEFAULT] wait until the command is done
- * @mtd: MTD device structure
- * @chip: NAND chip structure
+ * nand_wait - [DEFAULT] wait until the command is done
+ * @mtd: MTD device structure
+ * @chip: NAND chip structure
*
- * Wait for command done. This applies to erase and program only
- * Erase can take up to 400ms and program up to 20ms according to
- * general NAND and SmartMedia specs
+ * Wait for command done. This applies to erase and program only. Erase can
+ * take up to 400ms and program up to 20ms according to general NAND and
+ * SmartMedia specs.
*/
static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
{
@@ -859,8 +850,10 @@ static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
led_trigger_event(nand_led_trigger, LED_FULL);
- /* Apply this short delay always to ensure that we do wait tWB in
- * any case on any machine. */
+ /*
+ * Apply this short delay always to ensure that we do wait tWB in any
+ * case on any machine.
+ */
ndelay(100);
if ((state == FL_ERASING) && (chip->options & NAND_IS_AND))
@@ -890,16 +883,15 @@ static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
/**
* __nand_unlock - [REPLACEABLE] unlocks specified locked blocks
- *
* @mtd: mtd info
* @ofs: offset to start unlock from
* @len: length to unlock
- * @invert: when = 0, unlock the range of blocks within the lower and
- * upper boundary address
- * when = 1, unlock the range of blocks outside the boundaries
- * of the lower and upper boundary address
+ * @invert: when = 0, unlock the range of blocks within the lower and
+ * upper boundary address
+ * when = 1, unlock the range of blocks outside the boundaries
+ * of the lower and upper boundary address
*
- * return - unlock status
+ * Returs unlock status.
*/
static int __nand_unlock(struct mtd_info *mtd, loff_t ofs,
uint64_t len, int invert)
@@ -919,10 +911,9 @@ static int __nand_unlock(struct mtd_info *mtd, loff_t ofs,
/* Call wait ready function */
status = chip->waitfunc(mtd, chip);
- udelay(1000);
/* See if device thinks it succeeded */
if (status & 0x01) {
- DEBUG(MTD_DEBUG_LEVEL0, "%s: Error status = 0x%08x\n",
+ pr_debug("%s: error status = 0x%08x\n",
__func__, status);
ret = -EIO;
}
@@ -932,12 +923,11 @@ static int __nand_unlock(struct mtd_info *mtd, loff_t ofs,
/**
* nand_unlock - [REPLACEABLE] unlocks specified locked blocks
- *
* @mtd: mtd info
* @ofs: offset to start unlock from
* @len: length to unlock
*
- * return - unlock status
+ * Returns unlock status.
*/
int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
@@ -945,7 +935,7 @@ int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
int chipnr;
struct nand_chip *chip = mtd->priv;
- DEBUG(MTD_DEBUG_LEVEL3, "%s: start = 0x%012llx, len = %llu\n",
+ pr_debug("%s: start = 0x%012llx, len = %llu\n",
__func__, (unsigned long long)ofs, len);
if (check_offs_len(mtd, ofs, len))
@@ -964,7 +954,7 @@ int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
/* Check, if it is write protected */
if (nand_check_wp(mtd)) {
- DEBUG(MTD_DEBUG_LEVEL0, "%s: Device is write protected!!!\n",
+ pr_debug("%s: device is write protected!\n",
__func__);
ret = -EIO;
goto out;
@@ -981,18 +971,16 @@ EXPORT_SYMBOL(nand_unlock);
/**
* nand_lock - [REPLACEABLE] locks all blocks present in the device
- *
* @mtd: mtd info
* @ofs: offset to start unlock from
* @len: length to unlock
*
- * return - lock status
+ * This feature is not supported in many NAND parts. 'Micron' NAND parts do
+ * have this feature, but it allows only to lock all blocks, not for specified
+ * range for block. Implementing 'lock' feature by making use of 'unlock', for
+ * now.
*
- * This feature is not supported in many NAND parts. 'Micron' NAND parts
- * do have this feature, but it allows only to lock all blocks, not for
- * specified range for block.
- *
- * Implementing 'lock' feature by making use of 'unlock', for now.
+ * Returns lock status.
*/
int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
@@ -1000,7 +988,7 @@ int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
int chipnr, status, page;
struct nand_chip *chip = mtd->priv;
- DEBUG(MTD_DEBUG_LEVEL3, "%s: start = 0x%012llx, len = %llu\n",
+ pr_debug("%s: start = 0x%012llx, len = %llu\n",
__func__, (unsigned long long)ofs, len);
if (check_offs_len(mtd, ofs, len))
@@ -1015,7 +1003,7 @@ int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
/* Check, if it is write protected */
if (nand_check_wp(mtd)) {
- DEBUG(MTD_DEBUG_LEVEL0, "%s: Device is write protected!!!\n",
+ pr_debug("%s: device is write protected!\n",
__func__);
status = MTD_ERASE_FAILED;
ret = -EIO;
@@ -1028,10 +1016,9 @@ int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
/* Call wait ready function */
status = chip->waitfunc(mtd, chip);
- udelay(1000);
/* See if device thinks it succeeded */
if (status & 0x01) {
- DEBUG(MTD_DEBUG_LEVEL0, "%s: Error status = 0x%08x\n",
+ pr_debug("%s: error status = 0x%08x\n",
__func__, status);
ret = -EIO;
goto out;
@@ -1047,13 +1034,13 @@ out:
EXPORT_SYMBOL(nand_lock);
/**
- * nand_read_page_raw - [Intern] read raw page data without ecc
- * @mtd: mtd info structure
- * @chip: nand chip info structure
- * @buf: buffer to store read data
- * @page: page number to read
+ * nand_read_page_raw - [INTERN] read raw page data without ecc
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @page: page number to read
*
- * Not for syndrome calculating ecc controllers, which use a special oob layout
+ * Not for syndrome calculating ECC controllers, which use a special oob layout.
*/
static int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
uint8_t *buf, int page)
@@ -1064,11 +1051,11 @@ static int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
}
/**
- * nand_read_page_raw_syndrome - [Intern] read raw page data without ecc
- * @mtd: mtd info structure
- * @chip: nand chip info structure
- * @buf: buffer to store read data
- * @page: page number to read
+ * nand_read_page_raw_syndrome - [INTERN] read raw page data without ecc
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @page: page number to read
*
* We need a special oob layout and handling even when OOB isn't used.
*/
@@ -1107,11 +1094,11 @@ static int nand_read_page_raw_syndrome(struct mtd_info *mtd,
}
/**
- * nand_read_page_swecc - [REPLACABLE] software ecc based page read function
- * @mtd: mtd info structure
- * @chip: nand chip info structure
- * @buf: buffer to store read data
- * @page: page number to read
+ * nand_read_page_swecc - [REPLACEABLE] software ECC based page read function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @page: page number to read
*/
static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
uint8_t *buf, int page)
@@ -1148,12 +1135,12 @@ static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
}
/**
- * nand_read_subpage - [REPLACABLE] software ecc based sub-page read function
- * @mtd: mtd info structure
- * @chip: nand chip info structure
- * @data_offs: offset of requested data within the page
- * @readlen: data length
- * @bufpoi: buffer to store read data
+ * nand_read_subpage - [REPLACEABLE] software ECC based sub-page read function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @data_offs: offset of requested data within the page
+ * @readlen: data length
+ * @bufpoi: buffer to store read data
*/
static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
uint32_t data_offs, uint32_t readlen, uint8_t *bufpoi)
@@ -1166,12 +1153,12 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1;
int index = 0;
- /* Column address wihin the page aligned to ECC size (256bytes). */
+ /* Column address within the page aligned to ECC size (256bytes) */
start_step = data_offs / chip->ecc.size;
end_step = (data_offs + readlen - 1) / chip->ecc.size;
num_steps = end_step - start_step + 1;
- /* Data size aligned to ECC ecc.size*/
+ /* Data size aligned to ECC ecc.size */
datafrag_len = num_steps * chip->ecc.size;
eccfrag_len = num_steps * chip->ecc.bytes;
@@ -1183,13 +1170,14 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
p = bufpoi + data_col_addr;
chip->read_buf(mtd, p, datafrag_len);
- /* Calculate ECC */
+ /* Calculate ECC */
for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size)
chip->ecc.calculate(mtd, p, &chip->buffers->ecccalc[i]);
- /* The performance is faster if to position offsets
- according to ecc.pos. Let make sure here that
- there are no gaps in ecc positions */
+ /*
+ * The performance is faster if we position offsets according to
+ * ecc.pos. Let's make sure that there are no gaps in ECC positions.
+ */
for (i = 0; i < eccfrag_len - 1; i++) {
if (eccpos[i + start_step * chip->ecc.bytes] + 1 !=
eccpos[i + start_step * chip->ecc.bytes + 1]) {
@@ -1201,8 +1189,10 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
chip->cmdfunc(mtd, NAND_CMD_RNDOUT, mtd->writesize, -1);
chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
} else {
- /* send the command to read the particular ecc bytes */
- /* take care about buswidth alignment in read_buf */
+ /*
+ * Send the command to read the particular ECC bytes take care
+ * about buswidth alignment in read_buf.
+ */
index = start_step * chip->ecc.bytes;
aligned_pos = eccpos[index] & ~(busw - 1);
@@ -1235,13 +1225,13 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
}
/**
- * nand_read_page_hwecc - [REPLACABLE] hardware ecc based page read function
- * @mtd: mtd info structure
- * @chip: nand chip info structure
- * @buf: buffer to store read data
- * @page: page number to read
+ * nand_read_page_hwecc - [REPLACEABLE] hardware ECC based page read function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @page: page number to read
*
- * Not for syndrome calculating ecc controllers which need a special oob layout
+ * Not for syndrome calculating ECC controllers which need a special oob layout.
*/
static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
uint8_t *buf, int page)
@@ -1280,18 +1270,17 @@ static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
}
/**
- * nand_read_page_hwecc_oob_first - [REPLACABLE] hw ecc, read oob first
- * @mtd: mtd info structure
- * @chip: nand chip info structure
- * @buf: buffer to store read data
- * @page: page number to read
+ * nand_read_page_hwecc_oob_first - [REPLACEABLE] hw ecc, read oob first
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @page: page number to read
*
- * Hardware ECC for large page chips, require OOB to be read first.
- * For this ECC mode, the write_page method is re-used from ECC_HW.
- * These methods read/write ECC from the OOB area, unlike the
- * ECC_HW_SYNDROME support with multiple ECC steps, follows the
- * "infix ECC" scheme and reads/writes ECC from the data area, by
- * overwriting the NAND manufacturer bad block markings.
+ * Hardware ECC for large page chips, require OOB to be read first. For this
+ * ECC mode, the write_page method is re-used from ECC_HW. These methods
+ * read/write ECC from the OOB area, unlike the ECC_HW_SYNDROME support with
+ * multiple ECC steps, follows the "infix ECC" scheme and reads/writes ECC from
+ * the data area, by overwriting the NAND manufacturer bad block markings.
*/
static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
struct nand_chip *chip, uint8_t *buf, int page)
@@ -1329,14 +1318,14 @@ static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
}
/**
- * nand_read_page_syndrome - [REPLACABLE] hardware ecc syndrom based page read
- * @mtd: mtd info structure
- * @chip: nand chip info structure
- * @buf: buffer to store read data
- * @page: page number to read
+ * nand_read_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page read
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @page: page number to read
*
- * The hw generator calculates the error syndrome automatically. Therefor
- * we need a special oob layout and handling.
+ * The hw generator calculates the error syndrome automatically. Therefore we
+ * need a special oob layout and handling.
*/
static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
uint8_t *buf, int page)
@@ -1384,29 +1373,29 @@ static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
}
/**
- * nand_transfer_oob - [Internal] Transfer oob to client buffer
- * @chip: nand chip structure
- * @oob: oob destination address
- * @ops: oob ops structure
- * @len: size of oob to transfer
+ * nand_transfer_oob - [INTERN] Transfer oob to client buffer
+ * @chip: nand chip structure
+ * @oob: oob destination address
+ * @ops: oob ops structure
+ * @len: size of oob to transfer
*/
static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob,
struct mtd_oob_ops *ops, size_t len)
{
switch (ops->mode) {
- case MTD_OOB_PLACE:
- case MTD_OOB_RAW:
+ case MTD_OPS_PLACE_OOB:
+ case MTD_OPS_RAW:
memcpy(oob, chip->oob_poi + ops->ooboffs, len);
return oob + len;
- case MTD_OOB_AUTO: {
+ case MTD_OPS_AUTO_OOB: {
struct nand_oobfree *free = chip->ecc.layout->oobfree;
uint32_t boffs = 0, roffs = ops->ooboffs;
size_t bytes = 0;
for (; free->length && len; free++, len -= bytes) {
- /* Read request not from offset 0 ? */
+ /* Read request not from offset 0? */
if (unlikely(roffs)) {
if (roffs >= free->length) {
roffs -= free->length;
@@ -1432,11 +1421,10 @@ static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob,
}
/**
- * nand_do_read_ops - [Internal] Read data with ECC
- *
- * @mtd: MTD device structure
- * @from: offset to read from
- * @ops: oob ops structure
+ * nand_do_read_ops - [INTERN] Read data with ECC
+ * @mtd: MTD device structure
+ * @from: offset to read from
+ * @ops: oob ops structure
*
* Internal function. Called with chip held.
*/
@@ -1451,7 +1439,7 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
int ret = 0;
uint32_t readlen = ops->len;
uint32_t oobreadlen = ops->ooblen;
- uint32_t max_oobsize = ops->mode == MTD_OOB_AUTO ?
+ uint32_t max_oobsize = ops->mode == MTD_OPS_AUTO_OOB ?
mtd->oobavail : mtd->oobsize;
uint8_t *bufpoi, *oob, *buf;
@@ -1473,7 +1461,7 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
bytes = min(mtd->writesize - col, readlen);
aligned = (bytes == mtd->writesize);
- /* Is the current page in the buffer ? */
+ /* Is the current page in the buffer? */
if (realpage != chip->pagebuf || oob) {
bufpoi = aligned ? buf : chip->buffers->databuf;
@@ -1483,7 +1471,7 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
}
/* Now read the page into the buffer */
- if (unlikely(ops->mode == MTD_OOB_RAW))
+ if (unlikely(ops->mode == MTD_OPS_RAW))
ret = chip->ecc.read_page_raw(mtd, chip,
bufpoi, page);
else if (!aligned && NAND_SUBPAGE_READ(chip) && !oob)
@@ -1492,14 +1480,22 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
else
ret = chip->ecc.read_page(mtd, chip, bufpoi,
page);
- if (ret < 0)
+ if (ret < 0) {
+ if (!aligned)
+ /* Invalidate page cache */
+ chip->pagebuf = -1;
break;
+ }
/* Transfer not aligned data */
if (!aligned) {
if (!NAND_SUBPAGE_READ(chip) && !oob &&
- !(mtd->ecc_stats.failed - stats.failed))
+ !(mtd->ecc_stats.failed - stats.failed) &&
+ (ops->mode != MTD_OPS_RAW))
chip->pagebuf = realpage;
+ else
+ /* Invalidate page cache */
+ chip->pagebuf = -1;
memcpy(buf, chip->buffers->databuf + col, bytes);
}
@@ -1539,7 +1535,7 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
if (!readlen)
break;
- /* For subsequent reads align to page boundary. */
+ /* For subsequent reads align to page boundary */
col = 0;
/* Increment page address */
realpage++;
@@ -1552,8 +1548,9 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
chip->select_chip(mtd, chipnr);
}
- /* Check, if the chip supports auto page increment
- * or if we have hit a block boundary.
+ /*
+ * Check, if the chip supports auto page increment or if we
+ * have hit a block boundary.
*/
if (!NAND_CANAUTOINCR(chip) || !(page & blkcheck))
sndcmd = 1;
@@ -1574,18 +1571,19 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
/**
* nand_read - [MTD Interface] MTD compatibility function for nand_do_read_ecc
- * @mtd: MTD device structure
- * @from: offset to read from
- * @len: number of bytes to read
- * @retlen: pointer to variable to store the number of read bytes
- * @buf: the databuffer to put data
+ * @mtd: MTD device structure
+ * @from: offset to read from
+ * @len: number of bytes to read
+ * @retlen: pointer to variable to store the number of read bytes
+ * @buf: the databuffer to put data
*
- * Get hold of the chip and call nand_do_read
+ * Get hold of the chip and call nand_do_read.
*/
static int nand_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, uint8_t *buf)
{
struct nand_chip *chip = mtd->priv;
+ struct mtd_oob_ops ops;
int ret;
/* Do not allow reads past end of device */
@@ -1596,13 +1594,14 @@ static int nand_read(struct mtd_info *mtd, loff_t from, size_t len,
nand_get_device(chip, mtd, FL_READING);
- chip->ops.len = len;
- chip->ops.datbuf = buf;
- chip->ops.oobbuf = NULL;
+ ops.len = len;
+ ops.datbuf = buf;
+ ops.oobbuf = NULL;
+ ops.mode = 0;
- ret = nand_do_read_ops(mtd, from, &chip->ops);
+ ret = nand_do_read_ops(mtd, from, &ops);
- *retlen = chip->ops.retlen;
+ *retlen = ops.retlen;
nand_release_device(mtd);
@@ -1610,11 +1609,11 @@ static int nand_read(struct mtd_info *mtd, loff_t from, size_t len,
}
/**
- * nand_read_oob_std - [REPLACABLE] the most common OOB data read function
- * @mtd: mtd info structure
- * @chip: nand chip info structure
- * @page: page number to read
- * @sndcmd: flag whether to issue read command or not
+ * nand_read_oob_std - [REPLACEABLE] the most common OOB data read function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @page: page number to read
+ * @sndcmd: flag whether to issue read command or not
*/
static int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
int page, int sndcmd)
@@ -1628,12 +1627,12 @@ static int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
}
/**
- * nand_read_oob_syndrome - [REPLACABLE] OOB data read function for HW ECC
+ * nand_read_oob_syndrome - [REPLACEABLE] OOB data read function for HW ECC
* with syndromes
- * @mtd: mtd info structure
- * @chip: nand chip info structure
- * @page: page number to read
- * @sndcmd: flag whether to issue read command or not
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @page: page number to read
+ * @sndcmd: flag whether to issue read command or not
*/
static int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
int page, int sndcmd)
@@ -1667,10 +1666,10 @@ static int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
}
/**
- * nand_write_oob_std - [REPLACABLE] the most common OOB data write function
- * @mtd: mtd info structure
- * @chip: nand chip info structure
- * @page: page number to write
+ * nand_write_oob_std - [REPLACEABLE] the most common OOB data write function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @page: page number to write
*/
static int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
int page)
@@ -1690,11 +1689,11 @@ static int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
}
/**
- * nand_write_oob_syndrome - [REPLACABLE] OOB data write function for HW ECC
- * with syndrome - only for large page flash !
- * @mtd: mtd info structure
- * @chip: nand chip info structure
- * @page: page number to write
+ * nand_write_oob_syndrome - [REPLACEABLE] OOB data write function for HW ECC
+ * with syndrome - only for large page flash
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @page: page number to write
*/
static int nand_write_oob_syndrome(struct mtd_info *mtd,
struct nand_chip *chip, int page)
@@ -1749,34 +1748,37 @@ static int nand_write_oob_syndrome(struct mtd_info *mtd,
}
/**
- * nand_do_read_oob - [Intern] NAND read out-of-band
- * @mtd: MTD device structure
- * @from: offset to read from
- * @ops: oob operations description structure
+ * nand_do_read_oob - [INTERN] NAND read out-of-band
+ * @mtd: MTD device structure
+ * @from: offset to read from
+ * @ops: oob operations description structure
*
- * NAND read out-of-band data from the spare area
+ * NAND read out-of-band data from the spare area.
*/
static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
struct mtd_oob_ops *ops)
{
int page, realpage, chipnr, sndcmd = 1;
struct nand_chip *chip = mtd->priv;
+ struct mtd_ecc_stats stats;
int blkcheck = (1 << (chip->phys_erase_shift - chip->page_shift)) - 1;
int readlen = ops->ooblen;
int len;
uint8_t *buf = ops->oobbuf;
- DEBUG(MTD_DEBUG_LEVEL3, "%s: from = 0x%08Lx, len = %i\n",
+ pr_debug("%s: from = 0x%08Lx, len = %i\n",
__func__, (unsigned long long)from, readlen);
- if (ops->mode == MTD_OOB_AUTO)
+ stats = mtd->ecc_stats;
+
+ if (ops->mode == MTD_OPS_AUTO_OOB)
len = chip->ecc.layout->oobavail;
else
len = mtd->oobsize;
if (unlikely(ops->ooboffs >= len)) {
- DEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt to start read "
- "outside oob\n", __func__);
+ pr_debug("%s: attempt to start read outside oob\n",
+ __func__);
return -EINVAL;
}
@@ -1784,8 +1786,8 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
if (unlikely(from >= mtd->size ||
ops->ooboffs + readlen > ((mtd->size >> chip->page_shift) -
(from >> chip->page_shift)) * len)) {
- DEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt read beyond end "
- "of device\n", __func__);
+ pr_debug("%s: attempt to read beyond end of device\n",
+ __func__);
return -EINVAL;
}
@@ -1797,7 +1799,10 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
page = realpage & chip->pagemask;
while (1) {
- sndcmd = chip->ecc.read_oob(mtd, chip, page, sndcmd);
+ if (ops->mode == MTD_OPS_RAW)
+ sndcmd = chip->ecc.read_oob_raw(mtd, chip, page, sndcmd);
+ else
+ sndcmd = chip->ecc.read_oob(mtd, chip, page, sndcmd);
len = min(len, readlen);
buf = nand_transfer_oob(chip, buf, ops, len);
@@ -1830,24 +1835,29 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
chip->select_chip(mtd, chipnr);
}
- /* Check, if the chip supports auto page increment
- * or if we have hit a block boundary.
+ /*
+ * Check, if the chip supports auto page increment or if we
+ * have hit a block boundary.
*/
if (!NAND_CANAUTOINCR(chip) || !(page & blkcheck))
sndcmd = 1;
}
ops->oobretlen = ops->ooblen;
- return 0;
+
+ if (mtd->ecc_stats.failed - stats.failed)
+ return -EBADMSG;
+
+ return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0;
}
/**
* nand_read_oob - [MTD Interface] NAND read data and/or out-of-band
- * @mtd: MTD device structure
- * @from: offset to read from
- * @ops: oob operation description structure
+ * @mtd: MTD device structure
+ * @from: offset to read from
+ * @ops: oob operation description structure
*
- * NAND read data and/or out-of-band data
+ * NAND read data and/or out-of-band data.
*/
static int nand_read_oob(struct mtd_info *mtd, loff_t from,
struct mtd_oob_ops *ops)
@@ -1859,17 +1869,17 @@ static int nand_read_oob(struct mtd_info *mtd, loff_t from,
/* Do not allow reads past end of device */
if (ops->datbuf && (from + ops->len) > mtd->size) {
- DEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt read "
- "beyond end of device\n", __func__);
+ pr_debug("%s: attempt to read beyond end of device\n",
+ __func__);
return -EINVAL;
}
nand_get_device(chip, mtd, FL_READING);
switch (ops->mode) {
- case MTD_OOB_PLACE:
- case MTD_OOB_AUTO:
- case MTD_OOB_RAW:
+ case MTD_OPS_PLACE_OOB:
+ case MTD_OPS_AUTO_OOB:
+ case MTD_OPS_RAW:
break;
default:
@@ -1888,12 +1898,12 @@ out:
/**
- * nand_write_page_raw - [Intern] raw page write function
- * @mtd: mtd info structure
- * @chip: nand chip info structure
- * @buf: data buffer
+ * nand_write_page_raw - [INTERN] raw page write function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: data buffer
*
- * Not for syndrome calculating ecc controllers, which use a special oob layout
+ * Not for syndrome calculating ECC controllers, which use a special oob layout.
*/
static void nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
const uint8_t *buf)
@@ -1903,10 +1913,10 @@ static void nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
}
/**
- * nand_write_page_raw_syndrome - [Intern] raw page write function
- * @mtd: mtd info structure
- * @chip: nand chip info structure
- * @buf: data buffer
+ * nand_write_page_raw_syndrome - [INTERN] raw page write function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: data buffer
*
* We need a special oob layout and handling even when ECC isn't checked.
*/
@@ -1942,10 +1952,10 @@ static void nand_write_page_raw_syndrome(struct mtd_info *mtd,
chip->write_buf(mtd, oob, size);
}
/**
- * nand_write_page_swecc - [REPLACABLE] software ecc based page write function
- * @mtd: mtd info structure
- * @chip: nand chip info structure
- * @buf: data buffer
+ * nand_write_page_swecc - [REPLACEABLE] software ECC based page write function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: data buffer
*/
static void nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
const uint8_t *buf)
@@ -1957,7 +1967,7 @@ static void nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
const uint8_t *p = buf;
uint32_t *eccpos = chip->ecc.layout->eccpos;
- /* Software ecc calculation */
+ /* Software ECC calculation */
for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
chip->ecc.calculate(mtd, p, &ecc_calc[i]);
@@ -1968,10 +1978,10 @@ static void nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
}
/**
- * nand_write_page_hwecc - [REPLACABLE] hardware ecc based page write function
- * @mtd: mtd info structure
- * @chip: nand chip info structure
- * @buf: data buffer
+ * nand_write_page_hwecc - [REPLACEABLE] hardware ECC based page write function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: data buffer
*/
static void nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
const uint8_t *buf)
@@ -1996,13 +2006,13 @@ static void nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
}
/**
- * nand_write_page_syndrome - [REPLACABLE] hardware ecc syndrom based page write
- * @mtd: mtd info structure
- * @chip: nand chip info structure
- * @buf: data buffer
+ * nand_write_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page write
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: data buffer
*
- * The hw generator calculates the error syndrome automatically. Therefor
- * we need a special oob layout and handling.
+ * The hw generator calculates the error syndrome automatically. Therefore we
+ * need a special oob layout and handling.
*/
static void nand_write_page_syndrome(struct mtd_info *mtd,
struct nand_chip *chip, const uint8_t *buf)
@@ -2041,12 +2051,12 @@ static void nand_write_page_syndrome(struct mtd_info *mtd,
/**
* nand_write_page - [REPLACEABLE] write one page
- * @mtd: MTD device structure
- * @chip: NAND chip descriptor
- * @buf: the data to write
- * @page: page number to write
- * @cached: cached programming
- * @raw: use _raw version of write_page
+ * @mtd: MTD device structure
+ * @chip: NAND chip descriptor
+ * @buf: the data to write
+ * @page: page number to write
+ * @cached: cached programming
+ * @raw: use _raw version of write_page
*/
static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
const uint8_t *buf, int page, int cached, int raw)
@@ -2061,8 +2071,8 @@ static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
chip->ecc.write_page(mtd, chip, buf);
/*
- * Cached progamming disabled for now, Not sure if its worth the
- * trouble. The speed gain is not very impressive. (2.3->2.6Mib/s)
+ * Cached progamming disabled for now. Not sure if it's worth the
+ * trouble. The speed gain is not very impressive. (2.3->2.6Mib/s).
*/
cached = 0;
@@ -2072,7 +2082,7 @@ static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
status = chip->waitfunc(mtd, chip);
/*
* See if operation failed and additional status checks are
- * available
+ * available.
*/
if ((status & NAND_STATUS_FAIL) && (chip->errstat))
status = chip->errstat(mtd, chip, FL_WRITING, status,
@@ -2096,29 +2106,37 @@ static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
}
/**
- * nand_fill_oob - [Internal] Transfer client buffer to oob
- * @chip: nand chip structure
- * @oob: oob data buffer
- * @len: oob data write length
- * @ops: oob ops structure
+ * nand_fill_oob - [INTERN] Transfer client buffer to oob
+ * @mtd: MTD device structure
+ * @oob: oob data buffer
+ * @len: oob data write length
+ * @ops: oob ops structure
*/
-static uint8_t *nand_fill_oob(struct nand_chip *chip, uint8_t *oob, size_t len,
- struct mtd_oob_ops *ops)
+static uint8_t *nand_fill_oob(struct mtd_info *mtd, uint8_t *oob, size_t len,
+ struct mtd_oob_ops *ops)
{
+ struct nand_chip *chip = mtd->priv;
+
+ /*
+ * Initialise to all 0xFF, to avoid the possibility of left over OOB
+ * data from a previous OOB read.
+ */
+ memset(chip->oob_poi, 0xff, mtd->oobsize);
+
switch (ops->mode) {
- case MTD_OOB_PLACE:
- case MTD_OOB_RAW:
+ case MTD_OPS_PLACE_OOB:
+ case MTD_OPS_RAW:
memcpy(chip->oob_poi + ops->ooboffs, oob, len);
return oob + len;
- case MTD_OOB_AUTO: {
+ case MTD_OPS_AUTO_OOB: {
struct nand_oobfree *free = chip->ecc.layout->oobfree;
uint32_t boffs = 0, woffs = ops->ooboffs;
size_t bytes = 0;
for (; free->length && len; free++, len -= bytes) {
- /* Write request not from offset 0 ? */
+ /* Write request not from offset 0? */
if (unlikely(woffs)) {
if (woffs >= free->length) {
woffs -= free->length;
@@ -2146,12 +2164,12 @@ static uint8_t *nand_fill_oob(struct nand_chip *chip, uint8_t *oob, size_t len,
#define NOTALIGNED(x) ((x & (chip->subpagesize - 1)) != 0)
/**
- * nand_do_write_ops - [Internal] NAND write with ECC
- * @mtd: MTD device structure
- * @to: offset to write to
- * @ops: oob operations description structure
+ * nand_do_write_ops - [INTERN] NAND write with ECC
+ * @mtd: MTD device structure
+ * @to: offset to write to
+ * @ops: oob operations description structure
*
- * NAND write with ECC
+ * NAND write with ECC.
*/
static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
struct mtd_oob_ops *ops)
@@ -2161,7 +2179,7 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
uint32_t writelen = ops->len;
uint32_t oobwritelen = ops->ooblen;
- uint32_t oobmaxlen = ops->mode == MTD_OOB_AUTO ?
+ uint32_t oobmaxlen = ops->mode == MTD_OPS_AUTO_OOB ?
mtd->oobavail : mtd->oobsize;
uint8_t *oob = ops->oobbuf;
@@ -2172,10 +2190,10 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
if (!writelen)
return 0;
- /* reject writes, which are not page aligned */
+ /* Reject writes, which are not page aligned */
if (NOTALIGNED(to) || NOTALIGNED(ops->len)) {
- printk(KERN_NOTICE "%s: Attempt to write not "
- "page aligned data\n", __func__);
+ pr_notice("%s: attempt to write non page aligned data\n",
+ __func__);
return -EINVAL;
}
@@ -2201,10 +2219,6 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
(chip->pagebuf << chip->page_shift) < (to + ops->len))
chip->pagebuf = -1;
- /* If we're not given explicit OOB data, let it be 0xFF */
- if (likely(!oob))
- memset(chip->oob_poi, 0xff, mtd->oobsize);
-
/* Don't allow multipage oob writes with offset */
if (oob && ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen))
return -EINVAL;
@@ -2214,7 +2228,7 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
int cached = writelen > bytes && page != blockmask;
uint8_t *wbuf = buf;
- /* Partial page write ? */
+ /* Partial page write? */
if (unlikely(column || writelen < (mtd->writesize - 1))) {
cached = 0;
bytes = min_t(int, bytes - column, (int) writelen);
@@ -2226,12 +2240,15 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
if (unlikely(oob)) {
size_t len = min(oobwritelen, oobmaxlen);
- oob = nand_fill_oob(chip, oob, len, ops);
+ oob = nand_fill_oob(mtd, oob, len, ops);
oobwritelen -= len;
+ } else {
+ /* We still need to erase leftover OOB data */
+ memset(chip->oob_poi, 0xff, mtd->oobsize);
}
ret = chip->write_page(mtd, chip, wbuf, page, cached,
- (ops->mode == MTD_OOB_RAW));
+ (ops->mode == MTD_OPS_RAW));
if (ret)
break;
@@ -2260,11 +2277,11 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
/**
* panic_nand_write - [MTD Interface] NAND write with ECC
- * @mtd: MTD device structure
- * @to: offset to write to
- * @len: number of bytes to write
- * @retlen: pointer to variable to store the number of written bytes
- * @buf: the data to write
+ * @mtd: MTD device structure
+ * @to: offset to write to
+ * @len: number of bytes to write
+ * @retlen: pointer to variable to store the number of written bytes
+ * @buf: the data to write
*
* NAND write with ECC. Used when performing writes in interrupt context, this
* may for example be called by mtdoops when writing an oops while in panic.
@@ -2273,6 +2290,7 @@ static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const uint8_t *buf)
{
struct nand_chip *chip = mtd->priv;
+ struct mtd_oob_ops ops;
int ret;
/* Do not allow reads past end of device */
@@ -2281,36 +2299,38 @@ static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
if (!len)
return 0;
- /* Wait for the device to get ready. */
+ /* Wait for the device to get ready */
panic_nand_wait(mtd, chip, 400);
- /* Grab the device. */
+ /* Grab the device */
panic_nand_get_device(chip, mtd, FL_WRITING);
- chip->ops.len = len;
- chip->ops.datbuf = (uint8_t *)buf;
- chip->ops.oobbuf = NULL;
+ ops.len = len;
+ ops.datbuf = (uint8_t *)buf;
+ ops.oobbuf = NULL;
+ ops.mode = 0;
- ret = nand_do_write_ops(mtd, to, &chip->ops);
+ ret = nand_do_write_ops(mtd, to, &ops);
- *retlen = chip->ops.retlen;
+ *retlen = ops.retlen;
return ret;
}
/**
* nand_write - [MTD Interface] NAND write with ECC
- * @mtd: MTD device structure
- * @to: offset to write to
- * @len: number of bytes to write
- * @retlen: pointer to variable to store the number of written bytes
- * @buf: the data to write
+ * @mtd: MTD device structure
+ * @to: offset to write to
+ * @len: number of bytes to write
+ * @retlen: pointer to variable to store the number of written bytes
+ * @buf: the data to write
*
- * NAND write with ECC
+ * NAND write with ECC.
*/
static int nand_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const uint8_t *buf)
{
struct nand_chip *chip = mtd->priv;
+ struct mtd_oob_ops ops;
int ret;
/* Do not allow reads past end of device */
@@ -2321,13 +2341,14 @@ static int nand_write(struct mtd_info *mtd, loff_t to, size_t len,
nand_get_device(chip, mtd, FL_WRITING);
- chip->ops.len = len;
- chip->ops.datbuf = (uint8_t *)buf;
- chip->ops.oobbuf = NULL;
+ ops.len = len;
+ ops.datbuf = (uint8_t *)buf;
+ ops.oobbuf = NULL;
+ ops.mode = 0;
- ret = nand_do_write_ops(mtd, to, &chip->ops);
+ ret = nand_do_write_ops(mtd, to, &ops);
- *retlen = chip->ops.retlen;
+ *retlen = ops.retlen;
nand_release_device(mtd);
@@ -2336,11 +2357,11 @@ static int nand_write(struct mtd_info *mtd, loff_t to, size_t len,
/**
* nand_do_write_oob - [MTD Interface] NAND write out-of-band
- * @mtd: MTD device structure
- * @to: offset to write to
- * @ops: oob operation description structure
+ * @mtd: MTD device structure
+ * @to: offset to write to
+ * @ops: oob operation description structure
*
- * NAND write out-of-band
+ * NAND write out-of-band.
*/
static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
struct mtd_oob_ops *ops)
@@ -2348,24 +2369,24 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
int chipnr, page, status, len;
struct nand_chip *chip = mtd->priv;
- DEBUG(MTD_DEBUG_LEVEL3, "%s: to = 0x%08x, len = %i\n",
+ pr_debug("%s: to = 0x%08x, len = %i\n",
__func__, (unsigned int)to, (int)ops->ooblen);
- if (ops->mode == MTD_OOB_AUTO)
+ if (ops->mode == MTD_OPS_AUTO_OOB)
len = chip->ecc.layout->oobavail;
else
len = mtd->oobsize;
/* Do not allow write past end of page */
if ((ops->ooboffs + ops->ooblen) > len) {
- DEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt to write "
- "past end of page\n", __func__);
+ pr_debug("%s: attempt to write past end of page\n",
+ __func__);
return -EINVAL;
}
if (unlikely(ops->ooboffs >= len)) {
- DEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt to start "
- "write outside oob\n", __func__);
+ pr_debug("%s: attempt to start write outside oob\n",
+ __func__);
return -EINVAL;
}
@@ -2374,8 +2395,8 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
ops->ooboffs + ops->ooblen >
((mtd->size >> chip->page_shift) -
(to >> chip->page_shift)) * len)) {
- DEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt write beyond "
- "end of device\n", __func__);
+ pr_debug("%s: attempt to write beyond end of device\n",
+ __func__);
return -EINVAL;
}
@@ -2401,10 +2422,12 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
if (page == chip->pagebuf)
chip->pagebuf = -1;
- memset(chip->oob_poi, 0xff, mtd->oobsize);
- nand_fill_oob(chip, ops->oobbuf, ops->ooblen, ops);
- status = chip->ecc.write_oob(mtd, chip, page & chip->pagemask);
- memset(chip->oob_poi, 0xff, mtd->oobsize);
+ nand_fill_oob(mtd, ops->oobbuf, ops->ooblen, ops);
+
+ if (ops->mode == MTD_OPS_RAW)
+ status = chip->ecc.write_oob_raw(mtd, chip, page & chip->pagemask);
+ else
+ status = chip->ecc.write_oob(mtd, chip, page & chip->pagemask);
if (status)
return status;
@@ -2416,9 +2439,9 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
/**
* nand_write_oob - [MTD Interface] NAND write data and/or out-of-band
- * @mtd: MTD device structure
- * @to: offset to write to
- * @ops: oob operation description structure
+ * @mtd: MTD device structure
+ * @to: offset to write to
+ * @ops: oob operation description structure
*/
static int nand_write_oob(struct mtd_info *mtd, loff_t to,
struct mtd_oob_ops *ops)
@@ -2430,17 +2453,17 @@ static int nand_write_oob(struct mtd_info *mtd, loff_t to,
/* Do not allow writes past end of device */
if (ops->datbuf && (to + ops->len) > mtd->size) {
- DEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt write beyond "
- "end of device\n", __func__);
+ pr_debug("%s: attempt to write beyond end of device\n",
+ __func__);
return -EINVAL;
}
nand_get_device(chip, mtd, FL_WRITING);
switch (ops->mode) {
- case MTD_OOB_PLACE:
- case MTD_OOB_AUTO:
- case MTD_OOB_RAW:
+ case MTD_OPS_PLACE_OOB:
+ case MTD_OPS_AUTO_OOB:
+ case MTD_OPS_RAW:
break;
default:
@@ -2458,11 +2481,11 @@ out:
}
/**
- * single_erease_cmd - [GENERIC] NAND standard block erase command function
- * @mtd: MTD device structure
- * @page: the page address of the block which will be erased
+ * single_erase_cmd - [GENERIC] NAND standard block erase command function
+ * @mtd: MTD device structure
+ * @page: the page address of the block which will be erased
*
- * Standard erase command for NAND chips
+ * Standard erase command for NAND chips.
*/
static void single_erase_cmd(struct mtd_info *mtd, int page)
{
@@ -2473,12 +2496,11 @@ static void single_erase_cmd(struct mtd_info *mtd, int page)
}
/**
- * multi_erease_cmd - [GENERIC] AND specific block erase command function
- * @mtd: MTD device structure
- * @page: the page address of the block which will be erased
+ * multi_erase_cmd - [GENERIC] AND specific block erase command function
+ * @mtd: MTD device structure
+ * @page: the page address of the block which will be erased
*
- * AND multi block erase command function
- * Erase 4 consecutive blocks
+ * AND multi block erase command function. Erase 4 consecutive blocks.
*/
static void multi_erase_cmd(struct mtd_info *mtd, int page)
{
@@ -2493,10 +2515,10 @@ static void multi_erase_cmd(struct mtd_info *mtd, int page)
/**
* nand_erase - [MTD Interface] erase block(s)
- * @mtd: MTD device structure
- * @instr: erase instruction
+ * @mtd: MTD device structure
+ * @instr: erase instruction
*
- * Erase one ore more blocks
+ * Erase one ore more blocks.
*/
static int nand_erase(struct mtd_info *mtd, struct erase_info *instr)
{
@@ -2505,12 +2527,12 @@ static int nand_erase(struct mtd_info *mtd, struct erase_info *instr)
#define BBT_PAGE_MASK 0xffffff3f
/**
- * nand_erase_nand - [Internal] erase block(s)
- * @mtd: MTD device structure
- * @instr: erase instruction
- * @allowbbt: allow erasing the bbt area
+ * nand_erase_nand - [INTERN] erase block(s)
+ * @mtd: MTD device structure
+ * @instr: erase instruction
+ * @allowbbt: allow erasing the bbt area
*
- * Erase one ore more blocks
+ * Erase one ore more blocks.
*/
int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
int allowbbt)
@@ -2521,9 +2543,9 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
unsigned int bbt_masked_page = 0xffffffff;
loff_t len;
- DEBUG(MTD_DEBUG_LEVEL3, "%s: start = 0x%012llx, len = %llu\n",
- __func__, (unsigned long long)instr->addr,
- (unsigned long long)instr->len);
+ pr_debug("%s: start = 0x%012llx, len = %llu\n",
+ __func__, (unsigned long long)instr->addr,
+ (unsigned long long)instr->len);
if (check_offs_len(mtd, instr->addr, instr->len))
return -EINVAL;
@@ -2545,8 +2567,8 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
/* Check, if it is write protected */
if (nand_check_wp(mtd)) {
- DEBUG(MTD_DEBUG_LEVEL0, "%s: Device is write protected!!!\n",
- __func__);
+ pr_debug("%s: device is write protected!\n",
+ __func__);
instr->state = MTD_ERASE_FAILED;
goto erase_exit;
}
@@ -2555,7 +2577,7 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
* If BBT requires refresh, set the BBT page mask to see if the BBT
* should be rewritten. Otherwise the mask is set to 0xffffffff which
* can not be matched. This is also done when the bbt is actually
- * erased to avoid recusrsive updates
+ * erased to avoid recursive updates.
*/
if (chip->options & BBT_AUTO_REFRESH && !allowbbt)
bbt_masked_page = chip->bbt_td->pages[chipnr] & BBT_PAGE_MASK;
@@ -2566,20 +2588,18 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
instr->state = MTD_ERASING;
while (len) {
- /*
- * heck if we have a bad block, we do not erase bad blocks !
- */
+ /* Heck if we have a bad block, we do not erase bad blocks! */
if (nand_block_checkbad(mtd, ((loff_t) page) <<
chip->page_shift, 0, allowbbt)) {
- printk(KERN_WARNING "%s: attempt to erase a bad block "
- "at page 0x%08x\n", __func__, page);
+ pr_warn("%s: attempt to erase a bad block at page 0x%08x\n",
+ __func__, page);
instr->state = MTD_ERASE_FAILED;
goto erase_exit;
}
/*
* Invalidate the page cache, if we erase the block which
- * contains the current cached page
+ * contains the current cached page.
*/
if (page <= chip->pagebuf && chip->pagebuf <
(page + pages_per_block))
@@ -2599,8 +2619,8 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
/* See if block erase succeeded */
if (status & NAND_STATUS_FAIL) {
- DEBUG(MTD_DEBUG_LEVEL0, "%s: Failed erase, "
- "page 0x%08x\n", __func__, page);
+ pr_debug("%s: failed erase, page 0x%08x\n",
+ __func__, page);
instr->state = MTD_ERASE_FAILED;
instr->fail_addr =
((loff_t)page << chip->page_shift);
@@ -2609,7 +2629,7 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
/*
* If BBT requires refresh, set the BBT rewrite flag to the
- * page being erased
+ * page being erased.
*/
if (bbt_masked_page != 0xffffffff &&
(page & BBT_PAGE_MASK) == bbt_masked_page)
@@ -2628,7 +2648,7 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
/*
* If BBT requires refresh and BBT-PERCHIP, set the BBT
- * page mask to see if this BBT should be rewritten
+ * page mask to see if this BBT should be rewritten.
*/
if (bbt_masked_page != 0xffffffff &&
(chip->bbt_td->options & NAND_BBT_PERCHIP))
@@ -2651,7 +2671,7 @@ erase_exit:
/*
* If BBT requires refresh and erase was successful, rewrite any
- * selected bad block tables
+ * selected bad block tables.
*/
if (bbt_masked_page == 0xffffffff || ret)
return ret;
@@ -2659,10 +2679,10 @@ erase_exit:
for (chipnr = 0; chipnr < chip->numchips; chipnr++) {
if (!rewrite_bbt[chipnr])
continue;
- /* update the BBT for chip */
- DEBUG(MTD_DEBUG_LEVEL0, "%s: nand_update_bbt "
- "(%d:0x%0llx 0x%0x)\n", __func__, chipnr,
- rewrite_bbt[chipnr], chip->bbt_td->pages[chipnr]);
+ /* Update the BBT for chip */
+ pr_debug("%s: nand_update_bbt (%d:0x%0llx 0x%0x)\n",
+ __func__, chipnr, rewrite_bbt[chipnr],
+ chip->bbt_td->pages[chipnr]);
nand_update_bbt(mtd, rewrite_bbt[chipnr]);
}
@@ -2672,15 +2692,15 @@ erase_exit:
/**
* nand_sync - [MTD Interface] sync
- * @mtd: MTD device structure
+ * @mtd: MTD device structure
*
- * Sync is actually a wait for chip ready function
+ * Sync is actually a wait for chip ready function.
*/
static void nand_sync(struct mtd_info *mtd)
{
struct nand_chip *chip = mtd->priv;
- DEBUG(MTD_DEBUG_LEVEL3, "%s: called\n", __func__);
+ pr_debug("%s: called\n", __func__);
/* Grab the lock and see if the device is available */
nand_get_device(chip, mtd, FL_SYNCING);
@@ -2690,8 +2710,8 @@ static void nand_sync(struct mtd_info *mtd)
/**
* nand_block_isbad - [MTD Interface] Check if block at offset is bad
- * @mtd: MTD device structure
- * @offs: offset relative to mtd start
+ * @mtd: MTD device structure
+ * @offs: offset relative to mtd start
*/
static int nand_block_isbad(struct mtd_info *mtd, loff_t offs)
{
@@ -2704,8 +2724,8 @@ static int nand_block_isbad(struct mtd_info *mtd, loff_t offs)
/**
* nand_block_markbad - [MTD Interface] Mark block at the given offset as bad
- * @mtd: MTD device structure
- * @ofs: offset relative to mtd start
+ * @mtd: MTD device structure
+ * @ofs: offset relative to mtd start
*/
static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
{
@@ -2714,7 +2734,7 @@ static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
ret = nand_block_isbad(mtd, ofs);
if (ret) {
- /* If it was bad already, return success and do nothing. */
+ /* If it was bad already, return success and do nothing */
if (ret > 0)
return 0;
return ret;
@@ -2725,7 +2745,7 @@ static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
/**
* nand_suspend - [MTD Interface] Suspend the NAND flash
- * @mtd: MTD device structure
+ * @mtd: MTD device structure
*/
static int nand_suspend(struct mtd_info *mtd)
{
@@ -2736,7 +2756,7 @@ static int nand_suspend(struct mtd_info *mtd)
/**
* nand_resume - [MTD Interface] Resume the NAND flash
- * @mtd: MTD device structure
+ * @mtd: MTD device structure
*/
static void nand_resume(struct mtd_info *mtd)
{
@@ -2745,13 +2765,11 @@ static void nand_resume(struct mtd_info *mtd)
if (chip->state == FL_PM_SUSPENDED)
nand_release_device(mtd);
else
- printk(KERN_ERR "%s called for a chip which is not "
- "in suspended state\n", __func__);
+ pr_err("%s called for a chip which is not in suspended state\n",
+ __func__);
}
-/*
- * Set default functions
- */
+/* Set default functions */
static void nand_set_defaults(struct nand_chip *chip, int busw)
{
/* check for proper chip_delay setup, set 20us if not */
@@ -2793,23 +2811,21 @@ static void nand_set_defaults(struct nand_chip *chip, int busw)
}
-/*
- * sanitize ONFI strings so we can safely print them
- */
+/* Sanitize ONFI strings so we can safely print them */
static void sanitize_string(uint8_t *s, size_t len)
{
ssize_t i;
- /* null terminate */
+ /* Null terminate */
s[len - 1] = 0;
- /* remove non printable chars */
+ /* Remove non printable chars */
for (i = 0; i < len - 1; i++) {
if (s[i] < ' ' || s[i] > 127)
s[i] = '?';
}
- /* remove trailing spaces */
+ /* Remove trailing spaces */
strim(s);
}
@@ -2826,28 +2842,28 @@ static u16 onfi_crc16(u16 crc, u8 const *p, size_t len)
}
/*
- * Check if the NAND chip is ONFI compliant, returns 1 if it is, 0 otherwise
+ * Check if the NAND chip is ONFI compliant, returns 1 if it is, 0 otherwise.
*/
static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
- int busw)
+ int *busw)
{
struct nand_onfi_params *p = &chip->onfi_params;
int i;
int val;
- /* try ONFI for unknow chip or LP */
+ /* Try ONFI for unknown chip or LP */
chip->cmdfunc(mtd, NAND_CMD_READID, 0x20, -1);
if (chip->read_byte(mtd) != 'O' || chip->read_byte(mtd) != 'N' ||
chip->read_byte(mtd) != 'F' || chip->read_byte(mtd) != 'I')
return 0;
- printk(KERN_INFO "ONFI flash detected\n");
+ pr_info("ONFI flash detected\n");
chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1);
for (i = 0; i < 3; i++) {
chip->read_buf(mtd, (uint8_t *)p, sizeof(*p));
if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 254) ==
le16_to_cpu(p->crc)) {
- printk(KERN_INFO "ONFI param page %d valid\n", i);
+ pr_info("ONFI param page %d valid\n", i);
break;
}
}
@@ -2855,7 +2871,7 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
if (i == 3)
return 0;
- /* check version */
+ /* Check version */
val = le16_to_cpu(p->revision);
if (val & (1 << 5))
chip->onfi_version = 23;
@@ -2871,8 +2887,7 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
chip->onfi_version = 0;
if (!chip->onfi_version) {
- printk(KERN_INFO "%s: unsupported ONFI version: %d\n",
- __func__, val);
+ pr_info("%s: unsupported ONFI version: %d\n", __func__, val);
return 0;
}
@@ -2884,9 +2899,9 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
mtd->erasesize = le32_to_cpu(p->pages_per_block) * mtd->writesize;
mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page);
chip->chipsize = (uint64_t)le32_to_cpu(p->blocks_per_lun) * mtd->erasesize;
- busw = 0;
+ *busw = 0;
if (le16_to_cpu(p->features) & 1)
- busw = NAND_BUSWIDTH_16;
+ *busw = NAND_BUSWIDTH_16;
chip->options &= ~NAND_CHIPOPTIONS_MSK;
chip->options |= (NAND_NO_READRDY |
@@ -2896,7 +2911,7 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
}
/*
- * Get the flash and manufacturer id and lookup if the type is supported
+ * Get the flash and manufacturer id and lookup if the type is supported.
*/
static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
struct nand_chip *chip,
@@ -2913,7 +2928,7 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
/*
* Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx)
- * after power-up
+ * after power-up.
*/
chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
@@ -2924,7 +2939,8 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
*maf_id = chip->read_byte(mtd);
*dev_id = chip->read_byte(mtd);
- /* Try again to make sure, as some systems the bus-hold or other
+ /*
+ * Try again to make sure, as some systems the bus-hold or other
* interface concerns can cause random data which looks like a
* possibly credible NAND flash to appear. If the two results do
* not match, ignore the device completely.
@@ -2936,9 +2952,9 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
id_data[i] = chip->read_byte(mtd);
if (id_data[0] != *maf_id || id_data[1] != *dev_id) {
- printk(KERN_INFO "%s: second ID read did not match "
- "%02x,%02x against %02x,%02x\n", __func__,
- *maf_id, *dev_id, id_data[0], id_data[1]);
+ pr_info("%s: second ID read did not match "
+ "%02x,%02x against %02x,%02x\n", __func__,
+ *maf_id, *dev_id, id_data[0], id_data[1]);
return ERR_PTR(-ENODEV);
}
@@ -2952,7 +2968,7 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
chip->onfi_version = 0;
if (!type->name || !type->pagesize) {
/* Check is chip is ONFI compliant */
- ret = nand_flash_detect_onfi(mtd, chip, busw);
+ ret = nand_flash_detect_onfi(mtd, chip, &busw);
if (ret)
goto ident_done;
}
@@ -2973,7 +2989,7 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
chip->chipsize = (uint64_t)type->chipsize << 20;
if (!type->pagesize && chip->init_size) {
- /* set the pagesize, oobsize, erasesize by the driver*/
+ /* Set the pagesize, oobsize, erasesize by the driver */
busw = chip->init_size(mtd, chip, id_data);
} else if (!type->pagesize) {
int extid;
@@ -3033,7 +3049,7 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
}
} else {
/*
- * Old devices have chip data hardcoded in the device id table
+ * Old devices have chip data hardcoded in the device id table.
*/
mtd->erasesize = type->erasesize;
mtd->writesize = type->pagesize;
@@ -3043,7 +3059,7 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
/*
* Check for Spansion/AMD ID + repeating 5th, 6th byte since
* some Spansion chips have erasesize that conflicts with size
- * listed in nand_ids table
+ * listed in nand_ids table.
* Data sheet (5 byte ID): Spansion S30ML-P ORNAND (p.39)
*/
if (*maf_id == NAND_MFR_AMD && id_data[4] != 0x00 &&
@@ -3057,15 +3073,16 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
chip->options &= ~NAND_CHIPOPTIONS_MSK;
chip->options |= type->options & NAND_CHIPOPTIONS_MSK;
- /* Check if chip is a not a samsung device. Do not clear the
- * options for chips which are not having an extended id.
+ /*
+ * Check if chip is not a Samsung device. Do not clear the
+ * options for chips which do not have an extended id.
*/
if (*maf_id != NAND_MFR_SAMSUNG && !type->pagesize)
chip->options &= ~NAND_SAMSUNG_LP_OPTIONS;
ident_done:
/*
- * Set chip as a default. Board drivers can override it, if necessary
+ * Set chip as a default. Board drivers can override it, if necessary.
*/
chip->options |= NAND_NO_AUTOINCR;
@@ -3077,21 +3094,21 @@ ident_done:
/*
* Check, if buswidth is correct. Hardware drivers should set
- * chip correct !
+ * chip correct!
*/
if (busw != (chip->options & NAND_BUSWIDTH_16)) {
- printk(KERN_INFO "NAND device: Manufacturer ID:"
- " 0x%02x, Chip ID: 0x%02x (%s %s)\n", *maf_id,
- *dev_id, nand_manuf_ids[maf_idx].name, mtd->name);
- printk(KERN_WARNING "NAND bus width %d instead %d bit\n",
- (chip->options & NAND_BUSWIDTH_16) ? 16 : 8,
- busw ? 16 : 8);
+ pr_info("NAND device: Manufacturer ID:"
+ " 0x%02x, Chip ID: 0x%02x (%s %s)\n", *maf_id,
+ *dev_id, nand_manuf_ids[maf_idx].name, mtd->name);
+ pr_warn("NAND bus width %d instead %d bit\n",
+ (chip->options & NAND_BUSWIDTH_16) ? 16 : 8,
+ busw ? 16 : 8);
return ERR_PTR(-EINVAL);
}
/* Calculate the address shift from the page size */
chip->page_shift = ffs(mtd->writesize) - 1;
- /* Convert chipsize to number of pages per chip -1. */
+ /* Convert chipsize to number of pages per chip -1 */
chip->pagemask = (chip->chipsize >> chip->page_shift) - 1;
chip->bbt_erase_shift = chip->phys_erase_shift =
@@ -3121,7 +3138,7 @@ ident_done:
if ((chip->cellinfo & NAND_CI_CELLTYPE_MSK) &&
(*maf_id == NAND_MFR_SAMSUNG ||
*maf_id == NAND_MFR_HYNIX))
- chip->options |= NAND_BBT_SCANLASTPAGE;
+ chip->bbt_options |= NAND_BBT_SCANLASTPAGE;
else if ((!(chip->cellinfo & NAND_CI_CELLTYPE_MSK) &&
(*maf_id == NAND_MFR_SAMSUNG ||
*maf_id == NAND_MFR_HYNIX ||
@@ -3129,17 +3146,7 @@ ident_done:
*maf_id == NAND_MFR_AMD)) ||
(mtd->writesize == 2048 &&
*maf_id == NAND_MFR_MICRON))
- chip->options |= NAND_BBT_SCAN2NDPAGE;
-
- /*
- * Numonyx/ST 2K pages, x8 bus use BOTH byte 1 and 6
- */
- if (!(busw & NAND_BUSWIDTH_16) &&
- *maf_id == NAND_MFR_STMICRO &&
- mtd->writesize == 2048) {
- chip->options |= NAND_BBT_SCANBYTE1AND6;
- chip->badblockpos = 0;
- }
+ chip->bbt_options |= NAND_BBT_SCAN2NDPAGE;
/* Check for AND chips with 4 page planes */
if (chip->options & NAND_4PAGE_ARRAY)
@@ -3147,12 +3154,11 @@ ident_done:
else
chip->erase_cmd = single_erase_cmd;
- /* Do not replace user supplied command function ! */
+ /* Do not replace user supplied command function! */
if (mtd->writesize > 512 && chip->cmdfunc == nand_command)
chip->cmdfunc = nand_command_lp;
- /* TODO onfi flash name */
- printk(KERN_INFO "NAND device: Manufacturer ID:"
+ pr_info("NAND device: Manufacturer ID:"
" 0x%02x, Chip ID: 0x%02x (%s %s)\n", *maf_id, *dev_id,
nand_manuf_ids[maf_idx].name,
chip->onfi_version ? chip->onfi_params.model : type->name);
@@ -3162,12 +3168,12 @@ ident_done:
/**
* nand_scan_ident - [NAND Interface] Scan for the NAND device
- * @mtd: MTD device structure
- * @maxchips: Number of chips to scan for
- * @table: Alternative NAND ID table
+ * @mtd: MTD device structure
+ * @maxchips: number of chips to scan for
+ * @table: alternative NAND ID table
*
- * This is the first phase of the normal nand_scan() function. It
- * reads the flash ID and sets up MTD fields accordingly.
+ * This is the first phase of the normal nand_scan() function. It reads the
+ * flash ID and sets up MTD fields accordingly.
*
* The mtd->owner field must be set to the module of the caller.
*/
@@ -3189,7 +3195,7 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
if (IS_ERR(type)) {
if (!(chip->options & NAND_SCAN_SILENT_NODEV))
- printk(KERN_WARNING "No NAND device found.\n");
+ pr_warn("No NAND device found\n");
chip->select_chip(mtd, -1);
return PTR_ERR(type);
}
@@ -3207,7 +3213,7 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
break;
}
if (i > 1)
- printk(KERN_INFO "%d NAND chips detected\n", i);
+ pr_info("%d NAND chips detected\n", i);
/* Store the number of chips and calc total size for mtd */
chip->numchips = i;
@@ -3220,11 +3226,11 @@ EXPORT_SYMBOL(nand_scan_ident);
/**
* nand_scan_tail - [NAND Interface] Scan for the NAND device
- * @mtd: MTD device structure
+ * @mtd: MTD device structure
*
- * This is the second phase of the normal nand_scan() function. It
- * fills out all the uninitialized function pointers with the defaults
- * and scans for a bad block table if appropriate.
+ * This is the second phase of the normal nand_scan() function. It fills out
+ * all the uninitialized function pointers with the defaults and scans for a
+ * bad block table if appropriate.
*/
int nand_scan_tail(struct mtd_info *mtd)
{
@@ -3240,7 +3246,7 @@ int nand_scan_tail(struct mtd_info *mtd)
chip->oob_poi = chip->buffers->databuf + mtd->writesize;
/*
- * If no default placement scheme is given, select an appropriate one
+ * If no default placement scheme is given, select an appropriate one.
*/
if (!chip->ecc.layout && (chip->ecc.mode != NAND_ECC_SOFT_BCH)) {
switch (mtd->oobsize) {
@@ -3257,8 +3263,8 @@ int nand_scan_tail(struct mtd_info *mtd)
chip->ecc.layout = &nand_oob_128;
break;
default:
- printk(KERN_WARNING "No oob scheme defined for "
- "oobsize %d\n", mtd->oobsize);
+ pr_warn("No oob scheme defined for oobsize %d\n",
+ mtd->oobsize);
BUG();
}
}
@@ -3267,7 +3273,7 @@ int nand_scan_tail(struct mtd_info *mtd)
chip->write_page = nand_write_page;
/*
- * check ECC mode, default to software if 3byte/512byte hardware ECC is
+ * Check ECC mode, default to software if 3byte/512byte hardware ECC is
* selected and we have 256 byte pagesize fallback to software ECC
*/
@@ -3276,15 +3282,15 @@ int nand_scan_tail(struct mtd_info *mtd)
/* Similar to NAND_ECC_HW, but a separate read_page handle */
if (!chip->ecc.calculate || !chip->ecc.correct ||
!chip->ecc.hwctl) {
- printk(KERN_WARNING "No ECC functions supplied; "
- "Hardware ECC not possible\n");
+ pr_warn("No ECC functions supplied; "
+ "hardware ECC not possible\n");
BUG();
}
if (!chip->ecc.read_page)
chip->ecc.read_page = nand_read_page_hwecc_oob_first;
case NAND_ECC_HW:
- /* Use standard hwecc read page function ? */
+ /* Use standard hwecc read page function? */
if (!chip->ecc.read_page)
chip->ecc.read_page = nand_read_page_hwecc;
if (!chip->ecc.write_page)
@@ -3305,11 +3311,11 @@ int nand_scan_tail(struct mtd_info *mtd)
chip->ecc.read_page == nand_read_page_hwecc ||
!chip->ecc.write_page ||
chip->ecc.write_page == nand_write_page_hwecc)) {
- printk(KERN_WARNING "No ECC functions supplied; "
- "Hardware ECC not possible\n");
+ pr_warn("No ECC functions supplied; "
+ "hardware ECC not possible\n");
BUG();
}
- /* Use standard syndrome read/write page function ? */
+ /* Use standard syndrome read/write page function? */
if (!chip->ecc.read_page)
chip->ecc.read_page = nand_read_page_syndrome;
if (!chip->ecc.write_page)
@@ -3325,9 +3331,9 @@ int nand_scan_tail(struct mtd_info *mtd)
if (mtd->writesize >= chip->ecc.size)
break;
- printk(KERN_WARNING "%d byte HW ECC not possible on "
- "%d byte page size, fallback to SW ECC\n",
- chip->ecc.size, mtd->writesize);
+ pr_warn("%d byte HW ECC not possible on "
+ "%d byte page size, fallback to SW ECC\n",
+ chip->ecc.size, mtd->writesize);
chip->ecc.mode = NAND_ECC_SOFT;
case NAND_ECC_SOFT:
@@ -3347,7 +3353,7 @@ int nand_scan_tail(struct mtd_info *mtd)
case NAND_ECC_SOFT_BCH:
if (!mtd_nand_has_bch()) {
- printk(KERN_WARNING "CONFIG_MTD_ECC_BCH not enabled\n");
+ pr_warn("CONFIG_MTD_ECC_BCH not enabled\n");
BUG();
}
chip->ecc.calculate = nand_bch_calculate_ecc;
@@ -3362,8 +3368,8 @@ int nand_scan_tail(struct mtd_info *mtd)
/*
* Board driver should supply ecc.size and ecc.bytes values to
* select how many bits are correctable; see nand_bch_init()
- * for details.
- * Otherwise, default to 4 bits for large page devices
+ * for details. Otherwise, default to 4 bits for large page
+ * devices.
*/
if (!chip->ecc.size && (mtd->oobsize >= 64)) {
chip->ecc.size = 512;
@@ -3374,14 +3380,14 @@ int nand_scan_tail(struct mtd_info *mtd)
chip->ecc.bytes,
&chip->ecc.layout);
if (!chip->ecc.priv) {
- printk(KERN_WARNING "BCH ECC initialization failed!\n");
+ pr_warn("BCH ECC initialization failed!\n");
BUG();
}
break;
case NAND_ECC_NONE:
- printk(KERN_WARNING "NAND_ECC_NONE selected by board driver. "
- "This is not recommended !!\n");
+ pr_warn("NAND_ECC_NONE selected by board driver. "
+ "This is not recommended!\n");
chip->ecc.read_page = nand_read_page_raw;
chip->ecc.write_page = nand_write_page_raw;
chip->ecc.read_oob = nand_read_oob_std;
@@ -3393,14 +3399,19 @@ int nand_scan_tail(struct mtd_info *mtd)
break;
default:
- printk(KERN_WARNING "Invalid NAND_ECC_MODE %d\n",
- chip->ecc.mode);
+ pr_warn("Invalid NAND_ECC_MODE %d\n", chip->ecc.mode);
BUG();
}
+ /* For many systems, the standard OOB write also works for raw */
+ if (!chip->ecc.read_oob_raw)
+ chip->ecc.read_oob_raw = chip->ecc.read_oob;
+ if (!chip->ecc.write_oob_raw)
+ chip->ecc.write_oob_raw = chip->ecc.write_oob;
+
/*
* The number of bytes available for a client to place data into
- * the out of band area
+ * the out of band area.
*/
chip->ecc.layout->oobavail = 0;
for (i = 0; chip->ecc.layout->oobfree[i].length
@@ -3411,19 +3422,16 @@ int nand_scan_tail(struct mtd_info *mtd)
/*
* Set the number of read / write steps for one page depending on ECC
- * mode
+ * mode.
*/
chip->ecc.steps = mtd->writesize / chip->ecc.size;
if (chip->ecc.steps * chip->ecc.size != mtd->writesize) {
- printk(KERN_WARNING "Invalid ecc parameters\n");
+ pr_warn("Invalid ECC parameters\n");
BUG();
}
chip->ecc.total = chip->ecc.steps * chip->ecc.bytes;
- /*
- * Allow subpage writes up to ecc.steps. Not possible for MLC
- * FLASH.
- */
+ /* Allow subpage writes up to ecc.steps. Not possible for MLC flash */
if (!(chip->options & NAND_NO_SUBPAGE_WRITE) &&
!(chip->cellinfo & NAND_CI_CELLTYPE_MSK)) {
switch (chip->ecc.steps) {
@@ -3481,9 +3489,11 @@ int nand_scan_tail(struct mtd_info *mtd)
}
EXPORT_SYMBOL(nand_scan_tail);
-/* is_module_text_address() isn't exported, and it's mostly a pointless
+/*
+ * is_module_text_address() isn't exported, and it's mostly a pointless
* test if this is a module _anyway_ -- they'd have to try _really_ hard
- * to call us from in-kernel code if the core NAND support is modular. */
+ * to call us from in-kernel code if the core NAND support is modular.
+ */
#ifdef MODULE
#define caller_is_module() (1)
#else
@@ -3493,15 +3503,13 @@ EXPORT_SYMBOL(nand_scan_tail);
/**
* nand_scan - [NAND Interface] Scan for the NAND device
- * @mtd: MTD device structure
- * @maxchips: Number of chips to scan for
- *
- * This fills out all the uninitialized function pointers
- * with the defaults.
- * The flash ID is read and the mtd/chip structures are
- * filled with the appropriate values.
- * The mtd->owner field must be set to the module of the caller
+ * @mtd: MTD device structure
+ * @maxchips: number of chips to scan for
*
+ * This fills out all the uninitialized function pointers with the defaults.
+ * The flash ID is read and the mtd/chip structures are filled with the
+ * appropriate values. The mtd->owner field must be set to the module of the
+ * caller.
*/
int nand_scan(struct mtd_info *mtd, int maxchips)
{
@@ -3509,8 +3517,7 @@ int nand_scan(struct mtd_info *mtd, int maxchips)
/* Many callers got this wrong, so check for it for a while... */
if (!mtd->owner && caller_is_module()) {
- printk(KERN_CRIT "%s called with NULL mtd->owner!\n",
- __func__);
+ pr_crit("%s called with NULL mtd->owner!\n", __func__);
BUG();
}
@@ -3523,8 +3530,8 @@ EXPORT_SYMBOL(nand_scan);
/**
* nand_release - [NAND Interface] Free resources held by the NAND device
- * @mtd: MTD device structure
-*/
+ * @mtd: MTD device structure
+ */
void nand_release(struct mtd_info *mtd)
{
struct nand_chip *chip = mtd->priv;
diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c
index ccbeaa1e4a8..69148ae3bf5 100644
--- a/drivers/mtd/nand/nand_bbt.c
+++ b/drivers/mtd/nand/nand_bbt.c
@@ -14,7 +14,7 @@
*
* When nand_scan_bbt is called, then it tries to find the bad block table
* depending on the options in the BBT descriptor(s). If no flash based BBT
- * (NAND_USE_FLASH_BBT) is specified then the device is scanned for factory
+ * (NAND_BBT_USE_FLASH) is specified then the device is scanned for factory
* marked good / bad blocks. This information is used to create a memory BBT.
* Once a new bad block is discovered then the "factory" information is updated
* on the device.
@@ -36,9 +36,9 @@
* The table is marked in the OOB area with an ident pattern and a version
* number which indicates which of both tables is more up to date. If the NAND
* controller needs the complete OOB area for the ECC information then the
- * option NAND_USE_FLASH_BBT_NO_OOB should be used: it moves the ident pattern
- * and the version byte into the data area and the OOB area will remain
- * untouched.
+ * option NAND_BBT_NO_OOB should be used (along with NAND_BBT_USE_FLASH, of
+ * course): it moves the ident pattern and the version byte into the data area
+ * and the OOB area will remain untouched.
*
* The table uses 2 bits per block
* 11b: block is good
@@ -67,6 +67,7 @@
#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/vmalloc.h>
+#include <linux/export.h>
static int check_pattern_no_oob(uint8_t *buf, struct nand_bbt_descr *td)
{
@@ -80,17 +81,15 @@ static int check_pattern_no_oob(uint8_t *buf, struct nand_bbt_descr *td)
/**
* check_pattern - [GENERIC] check if a pattern is in the buffer
- * @buf: the buffer to search
- * @len: the length of buffer to search
- * @paglen: the pagelength
- * @td: search pattern descriptor
+ * @buf: the buffer to search
+ * @len: the length of buffer to search
+ * @paglen: the pagelength
+ * @td: search pattern descriptor
*
- * Check for a pattern at the given place. Used to search bad block
- * tables and good / bad block identifiers.
- * If the SCAN_EMPTY option is set then check, if all bytes except the
- * pattern area contain 0xff
- *
-*/
+ * Check for a pattern at the given place. Used to search bad block tables and
+ * good / bad block identifiers. If the SCAN_EMPTY option is set then check, if
+ * all bytes except the pattern area contain 0xff.
+ */
static int check_pattern(uint8_t *buf, int len, int paglen, struct nand_bbt_descr *td)
{
int i, end = 0;
@@ -109,32 +108,8 @@ static int check_pattern(uint8_t *buf, int len, int paglen, struct nand_bbt_desc
p += end;
/* Compare the pattern */
- for (i = 0; i < td->len; i++) {
- if (p[i] != td->pattern[i])
- return -1;
- }
-
- /* Check both positions 1 and 6 for pattern? */
- if (td->options & NAND_BBT_SCANBYTE1AND6) {
- if (td->options & NAND_BBT_SCANEMPTY) {
- p += td->len;
- end += NAND_SMALL_BADBLOCK_POS - td->offs;
- /* Check region between positions 1 and 6 */
- for (i = 0; i < NAND_SMALL_BADBLOCK_POS - td->offs - td->len;
- i++) {
- if (*p++ != 0xff)
- return -1;
- }
- }
- else {
- p += NAND_SMALL_BADBLOCK_POS - td->offs;
- }
- /* Compare the pattern */
- for (i = 0; i < td->len; i++) {
- if (p[i] != td->pattern[i])
- return -1;
- }
- }
+ if (memcmp(p, td->pattern, td->len))
+ return -1;
if (td->options & NAND_BBT_SCANEMPTY) {
p += td->len;
@@ -149,14 +124,13 @@ static int check_pattern(uint8_t *buf, int len, int paglen, struct nand_bbt_desc
/**
* check_short_pattern - [GENERIC] check if a pattern is in the buffer
- * @buf: the buffer to search
- * @td: search pattern descriptor
- *
- * Check for a pattern at the given place. Used to search bad block
- * tables and good / bad block identifiers. Same as check_pattern, but
- * no optional empty check
+ * @buf: the buffer to search
+ * @td: search pattern descriptor
*
-*/
+ * Check for a pattern at the given place. Used to search bad block tables and
+ * good / bad block identifiers. Same as check_pattern, but no optional empty
+ * check.
+ */
static int check_short_pattern(uint8_t *buf, struct nand_bbt_descr *td)
{
int i;
@@ -167,21 +141,14 @@ static int check_short_pattern(uint8_t *buf, struct nand_bbt_descr *td)
if (p[td->offs + i] != td->pattern[i])
return -1;
}
- /* Need to check location 1 AND 6? */
- if (td->options & NAND_BBT_SCANBYTE1AND6) {
- for (i = 0; i < td->len; i++) {
- if (p[NAND_SMALL_BADBLOCK_POS + i] != td->pattern[i])
- return -1;
- }
- }
return 0;
}
/**
* add_marker_len - compute the length of the marker in data area
- * @td: BBT descriptor used for computation
+ * @td: BBT descriptor used for computation
*
- * The length will be 0 if the markeris located in OOB area.
+ * The length will be 0 if the marker is located in OOB area.
*/
static u32 add_marker_len(struct nand_bbt_descr *td)
{
@@ -198,34 +165,33 @@ static u32 add_marker_len(struct nand_bbt_descr *td)
/**
* read_bbt - [GENERIC] Read the bad block table starting from page
- * @mtd: MTD device structure
- * @buf: temporary buffer
- * @page: the starting page
- * @num: the number of bbt descriptors to read
- * @td: the bbt describtion table
- * @offs: offset in the memory table
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @page: the starting page
+ * @num: the number of bbt descriptors to read
+ * @td: the bbt describtion table
+ * @offs: offset in the memory table
*
* Read the bad block table starting from page.
- *
*/
static int read_bbt(struct mtd_info *mtd, uint8_t *buf, int page, int num,
struct nand_bbt_descr *td, int offs)
{
- int res, i, j, act = 0;
+ int res, ret = 0, i, j, act = 0;
struct nand_chip *this = mtd->priv;
size_t retlen, len, totlen;
loff_t from;
int bits = td->options & NAND_BBT_NRBITS_MSK;
- uint8_t msk = (uint8_t) ((1 << bits) - 1);
+ uint8_t msk = (uint8_t)((1 << bits) - 1);
u32 marker_len;
int reserved_block_code = td->reserved_block_code;
totlen = (num * bits) >> 3;
marker_len = add_marker_len(td);
- from = ((loff_t) page) << this->page_shift;
+ from = ((loff_t)page) << this->page_shift;
while (totlen) {
- len = min(totlen, (size_t) (1 << this->bbt_erase_shift));
+ len = min(totlen, (size_t)(1 << this->bbt_erase_shift));
if (marker_len) {
/*
* In case the BBT marker is not in the OOB area it
@@ -237,11 +203,18 @@ static int read_bbt(struct mtd_info *mtd, uint8_t *buf, int page, int num,
}
res = mtd->read(mtd, from, len, &retlen, buf);
if (res < 0) {
- if (retlen != len) {
- printk(KERN_INFO "nand_bbt: Error reading bad block table\n");
+ if (mtd_is_eccerr(res)) {
+ pr_info("nand_bbt: ECC error in BBT at "
+ "0x%012llx\n", from & ~mtd->writesize);
+ return res;
+ } else if (mtd_is_bitflip(res)) {
+ pr_info("nand_bbt: corrected error in BBT at "
+ "0x%012llx\n", from & ~mtd->writesize);
+ ret = res;
+ } else {
+ pr_info("nand_bbt: error reading BBT\n");
return res;
}
- printk(KERN_WARNING "nand_bbt: ECC error while reading bad block table\n");
}
/* Analyse data */
@@ -252,17 +225,19 @@ static int read_bbt(struct mtd_info *mtd, uint8_t *buf, int page, int num,
if (tmp == msk)
continue;
if (reserved_block_code && (tmp == reserved_block_code)) {
- printk(KERN_DEBUG "nand_read_bbt: Reserved block at 0x%012llx\n",
- (loff_t)((offs << 2) + (act >> 1)) << this->bbt_erase_shift);
+ pr_info("nand_read_bbt: reserved block at 0x%012llx\n",
+ (loff_t)((offs << 2) + (act >> 1)) << this->bbt_erase_shift);
this->bbt[offs + (act >> 3)] |= 0x2 << (act & 0x06);
mtd->ecc_stats.bbtblocks++;
continue;
}
- /* Leave it for now, if its matured we can move this
- * message to MTD_DEBUG_LEVEL0 */
- printk(KERN_DEBUG "nand_read_bbt: Bad block at 0x%012llx\n",
- (loff_t)((offs << 2) + (act >> 1)) << this->bbt_erase_shift);
- /* Factory marked bad or worn out ? */
+ /*
+ * Leave it for now, if it's matured we can
+ * move this message to pr_debug.
+ */
+ pr_info("nand_read_bbt: bad block at 0x%012llx\n",
+ (loff_t)((offs << 2) + (act >> 1)) << this->bbt_erase_shift);
+ /* Factory marked bad or worn out? */
if (tmp == 0)
this->bbt[offs + (act >> 3)] |= 0x3 << (act & 0x06);
else
@@ -273,20 +248,20 @@ static int read_bbt(struct mtd_info *mtd, uint8_t *buf, int page, int num,
totlen -= len;
from += len;
}
- return 0;
+ return ret;
}
/**
* read_abs_bbt - [GENERIC] Read the bad block table starting at a given page
- * @mtd: MTD device structure
- * @buf: temporary buffer
- * @td: descriptor for the bad block table
- * @chip: read the table for a specific chip, -1 read all chips.
- * Applies only if NAND_BBT_PERCHIP option is set
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @td: descriptor for the bad block table
+ * @chip: read the table for a specific chip, -1 read all chips; applies only if
+ * NAND_BBT_PERCHIP option is set
*
- * Read the bad block table for all chips starting at a given page
- * We assume that the bbt bits are in consecutive order.
-*/
+ * Read the bad block table for all chips starting at a given page. We assume
+ * that the bbt bits are in consecutive order.
+ */
static int read_abs_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *td, int chip)
{
struct nand_chip *this = mtd->priv;
@@ -312,9 +287,7 @@ static int read_abs_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_desc
return 0;
}
-/*
- * BBT marker is in the first page, no OOB.
- */
+/* BBT marker is in the first page, no OOB */
static int scan_read_raw_data(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
struct nand_bbt_descr *td)
{
@@ -328,35 +301,26 @@ static int scan_read_raw_data(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
return mtd->read(mtd, offs, len, &retlen, buf);
}
-/*
- * Scan read raw data from flash
- */
+/* Scan read raw data from flash */
static int scan_read_raw_oob(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
size_t len)
{
struct mtd_oob_ops ops;
int res;
- ops.mode = MTD_OOB_RAW;
+ ops.mode = MTD_OPS_RAW;
ops.ooboffs = 0;
ops.ooblen = mtd->oobsize;
-
while (len > 0) {
- if (len <= mtd->writesize) {
- ops.oobbuf = buf + len;
- ops.datbuf = buf;
- ops.len = len;
- return mtd->read_oob(mtd, offs, &ops);
- } else {
- ops.oobbuf = buf + mtd->writesize;
- ops.datbuf = buf;
- ops.len = mtd->writesize;
- res = mtd->read_oob(mtd, offs, &ops);
+ ops.datbuf = buf;
+ ops.len = min(len, (size_t)mtd->writesize);
+ ops.oobbuf = buf + ops.len;
- if (res)
- return res;
- }
+ res = mtd->read_oob(mtd, offs, &ops);
+
+ if (res)
+ return res;
buf += mtd->oobsize + mtd->writesize;
len -= mtd->writesize;
@@ -373,15 +337,13 @@ static int scan_read_raw(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
return scan_read_raw_oob(mtd, buf, offs, len);
}
-/*
- * Scan write data with oob to flash
- */
+/* Scan write data with oob to flash */
static int scan_write_bbt(struct mtd_info *mtd, loff_t offs, size_t len,
uint8_t *buf, uint8_t *oob)
{
struct mtd_oob_ops ops;
- ops.mode = MTD_OOB_PLACE;
+ ops.mode = MTD_OPS_PLACE_OOB;
ops.ooboffs = 0;
ops.ooblen = mtd->oobsize;
ops.datbuf = buf;
@@ -402,15 +364,14 @@ static u32 bbt_get_ver_offs(struct mtd_info *mtd, struct nand_bbt_descr *td)
/**
* read_abs_bbts - [GENERIC] Read the bad block table(s) for all chips starting at a given page
- * @mtd: MTD device structure
- * @buf: temporary buffer
- * @td: descriptor for the bad block table
- * @md: descriptor for the bad block table mirror
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @td: descriptor for the bad block table
+ * @md: descriptor for the bad block table mirror
*
- * Read the bad block table(s) for all chips starting at a given page
- * We assume that the bbt bits are in consecutive order.
- *
-*/
+ * Read the bad block table(s) for all chips starting at a given page. We
+ * assume that the bbt bits are in consecutive order.
+ */
static int read_abs_bbts(struct mtd_info *mtd, uint8_t *buf,
struct nand_bbt_descr *td, struct nand_bbt_descr *md)
{
@@ -421,8 +382,8 @@ static int read_abs_bbts(struct mtd_info *mtd, uint8_t *buf,
scan_read_raw(mtd, buf, (loff_t)td->pages[0] << this->page_shift,
mtd->writesize, td);
td->version[0] = buf[bbt_get_ver_offs(mtd, td)];
- printk(KERN_DEBUG "Bad block table at page %d, version 0x%02X\n",
- td->pages[0], td->version[0]);
+ pr_info("Bad block table at page %d, version 0x%02X\n",
+ td->pages[0], td->version[0]);
}
/* Read the mirror version, if available */
@@ -430,15 +391,13 @@ static int read_abs_bbts(struct mtd_info *mtd, uint8_t *buf,
scan_read_raw(mtd, buf, (loff_t)md->pages[0] << this->page_shift,
mtd->writesize, td);
md->version[0] = buf[bbt_get_ver_offs(mtd, md)];
- printk(KERN_DEBUG "Bad block table at page %d, version 0x%02X\n",
- md->pages[0], md->version[0]);
+ pr_info("Bad block table at page %d, version 0x%02X\n",
+ md->pages[0], md->version[0]);
}
return 1;
}
-/*
- * Scan a given block full
- */
+/* Scan a given block full */
static int scan_block_full(struct mtd_info *mtd, struct nand_bbt_descr *bd,
loff_t offs, uint8_t *buf, size_t readlen,
int scanlen, int len)
@@ -446,7 +405,8 @@ static int scan_block_full(struct mtd_info *mtd, struct nand_bbt_descr *bd,
int ret, j;
ret = scan_read_raw_oob(mtd, buf, offs, readlen);
- if (ret)
+ /* Ignore ECC errors when checking for BBM */
+ if (ret && !mtd_is_bitflip_or_eccerr(ret))
return ret;
for (j = 0; j < len; j++, buf += scanlen) {
@@ -456,9 +416,7 @@ static int scan_block_full(struct mtd_info *mtd, struct nand_bbt_descr *bd,
return 0;
}
-/*
- * Scan a given block partially
- */
+/* Scan a given block partially */
static int scan_block_fast(struct mtd_info *mtd, struct nand_bbt_descr *bd,
loff_t offs, uint8_t *buf, int len)
{
@@ -469,16 +427,16 @@ static int scan_block_fast(struct mtd_info *mtd, struct nand_bbt_descr *bd,
ops.oobbuf = buf;
ops.ooboffs = 0;
ops.datbuf = NULL;
- ops.mode = MTD_OOB_PLACE;
+ ops.mode = MTD_OPS_PLACE_OOB;
for (j = 0; j < len; j++) {
/*
- * Read the full oob until read_oob is fixed to
- * handle single byte reads for 16 bit
- * buswidth
+ * Read the full oob until read_oob is fixed to handle single
+ * byte reads for 16 bit buswidth.
*/
ret = mtd->read_oob(mtd, offs, &ops);
- if (ret)
+ /* Ignore ECC errors when checking for BBM */
+ if (ret && !mtd_is_bitflip_or_eccerr(ret))
return ret;
if (check_short_pattern(buf, bd))
@@ -491,14 +449,14 @@ static int scan_block_fast(struct mtd_info *mtd, struct nand_bbt_descr *bd,
/**
* create_bbt - [GENERIC] Create a bad block table by scanning the device
- * @mtd: MTD device structure
- * @buf: temporary buffer
- * @bd: descriptor for the good/bad block search pattern
- * @chip: create the table for a specific chip, -1 read all chips.
- * Applies only if NAND_BBT_PERCHIP option is set
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @bd: descriptor for the good/bad block search pattern
+ * @chip: create the table for a specific chip, -1 read all chips; applies only
+ * if NAND_BBT_PERCHIP option is set
*
- * Create a bad block table by scanning the device
- * for the given good/bad block identify pattern
+ * Create a bad block table by scanning the device for the given good/bad block
+ * identify pattern.
*/
static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
struct nand_bbt_descr *bd, int chip)
@@ -509,7 +467,7 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
loff_t from;
size_t readlen;
- printk(KERN_INFO "Scanning device for bad blocks\n");
+ pr_info("Scanning device for bad blocks\n");
if (bd->options & NAND_BBT_SCANALLPAGES)
len = 1 << (this->bbt_erase_shift - this->page_shift);
@@ -529,14 +487,16 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
}
if (chip == -1) {
- /* Note that numblocks is 2 * (real numblocks) here, see i+=2
- * below as it makes shifting and masking less painful */
+ /*
+ * Note that numblocks is 2 * (real numblocks) here, see i+=2
+ * below as it makes shifting and masking less painful
+ */
numblocks = mtd->size >> (this->bbt_erase_shift - 1);
startblock = 0;
from = 0;
} else {
if (chip >= this->numchips) {
- printk(KERN_WARNING "create_bbt(): chipnr (%d) > available chips (%d)\n",
+ pr_warn("create_bbt(): chipnr (%d) > available chips (%d)\n",
chip + 1, this->numchips);
return -EINVAL;
}
@@ -546,7 +506,7 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
from = (loff_t)startblock << (this->bbt_erase_shift - 1);
}
- if (this->options & NAND_BBT_SCANLASTPAGE)
+ if (this->bbt_options & NAND_BBT_SCANLASTPAGE)
from += mtd->erasesize - (mtd->writesize * len);
for (i = startblock; i < numblocks;) {
@@ -565,8 +525,8 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
if (ret) {
this->bbt[i >> 3] |= 0x03 << (i & 0x6);
- printk(KERN_WARNING "Bad eraseblock %d at 0x%012llx\n",
- i >> 1, (unsigned long long)from);
+ pr_warn("Bad eraseblock %d at 0x%012llx\n",
+ i >> 1, (unsigned long long)from);
mtd->ecc_stats.badblocks++;
}
@@ -578,20 +538,18 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
/**
* search_bbt - [GENERIC] scan the device for a specific bad block table
- * @mtd: MTD device structure
- * @buf: temporary buffer
- * @td: descriptor for the bad block table
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @td: descriptor for the bad block table
*
- * Read the bad block table by searching for a given ident pattern.
- * Search is preformed either from the beginning up or from the end of
- * the device downwards. The search starts always at the start of a
- * block.
- * If the option NAND_BBT_PERCHIP is given, each chip is searched
- * for a bbt, which contains the bad block information of this chip.
- * This is necessary to provide support for certain DOC devices.
+ * Read the bad block table by searching for a given ident pattern. Search is
+ * preformed either from the beginning up or from the end of the device
+ * downwards. The search starts always at the start of a block. If the option
+ * NAND_BBT_PERCHIP is given, each chip is searched for a bbt, which contains
+ * the bad block information of this chip. This is necessary to provide support
+ * for certain DOC devices.
*
- * The bbt ident pattern resides in the oob area of the first page
- * in a block.
+ * The bbt ident pattern resides in the oob area of the first page in a block.
*/
static int search_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *td)
{
@@ -602,7 +560,7 @@ static int search_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr
int bbtblocks;
int blocktopage = this->bbt_erase_shift - this->page_shift;
- /* Search direction top -> down ? */
+ /* Search direction top -> down? */
if (td->options & NAND_BBT_LASTBLOCK) {
startblock = (mtd->size >> this->bbt_erase_shift) - 1;
dir = -1;
@@ -611,7 +569,7 @@ static int search_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr
dir = 1;
}
- /* Do we have a bbt per chip ? */
+ /* Do we have a bbt per chip? */
if (td->options & NAND_BBT_PERCHIP) {
chips = this->numchips;
bbtblocks = this->chipsize >> this->bbt_erase_shift;
@@ -650,23 +608,23 @@ static int search_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr
/* Check, if we found a bbt for each requested chip */
for (i = 0; i < chips; i++) {
if (td->pages[i] == -1)
- printk(KERN_WARNING "Bad block table not found for chip %d\n", i);
+ pr_warn("Bad block table not found for chip %d\n", i);
else
- printk(KERN_DEBUG "Bad block table found at page %d, version 0x%02X\n", td->pages[i],
- td->version[i]);
+ pr_info("Bad block table found at page %d, version "
+ "0x%02X\n", td->pages[i], td->version[i]);
}
return 0;
}
/**
* search_read_bbts - [GENERIC] scan the device for bad block table(s)
- * @mtd: MTD device structure
- * @buf: temporary buffer
- * @td: descriptor for the bad block table
- * @md: descriptor for the bad block table mirror
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @td: descriptor for the bad block table
+ * @md: descriptor for the bad block table mirror
*
- * Search and read the bad block table(s)
-*/
+ * Search and read the bad block table(s).
+ */
static int search_read_bbts(struct mtd_info *mtd, uint8_t * buf, struct nand_bbt_descr *td, struct nand_bbt_descr *md)
{
/* Search the primary table */
@@ -682,16 +640,14 @@ static int search_read_bbts(struct mtd_info *mtd, uint8_t * buf, struct nand_bbt
/**
* write_bbt - [GENERIC] (Re)write the bad block table
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @td: descriptor for the bad block table
+ * @md: descriptor for the bad block table mirror
+ * @chipsel: selector for a specific chip, -1 for all
*
- * @mtd: MTD device structure
- * @buf: temporary buffer
- * @td: descriptor for the bad block table
- * @md: descriptor for the bad block table mirror
- * @chipsel: selector for a specific chip, -1 for all
- *
- * (Re)write the bad block table
- *
-*/
+ * (Re)write the bad block table.
+ */
static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
struct nand_bbt_descr *td, struct nand_bbt_descr *md,
int chipsel)
@@ -710,14 +666,14 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
ops.ooblen = mtd->oobsize;
ops.ooboffs = 0;
ops.datbuf = NULL;
- ops.mode = MTD_OOB_PLACE;
+ ops.mode = MTD_OPS_PLACE_OOB;
if (!rcode)
rcode = 0xff;
- /* Write bad block table per chip rather than per device ? */
+ /* Write bad block table per chip rather than per device? */
if (td->options & NAND_BBT_PERCHIP) {
numblocks = (int)(this->chipsize >> this->bbt_erase_shift);
- /* Full device write or specific chip ? */
+ /* Full device write or specific chip? */
if (chipsel == -1) {
nrchips = this->numchips;
} else {
@@ -731,8 +687,8 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
/* Loop through the chips */
for (; chip < nrchips; chip++) {
-
- /* There was already a version of the table, reuse the page
+ /*
+ * There was already a version of the table, reuse the page
* This applies for absolute placement too, as we have the
* page nr. in td->pages.
*/
@@ -741,8 +697,10 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
goto write;
}
- /* Automatic placement of the bad block table */
- /* Search direction top -> down ? */
+ /*
+ * Automatic placement of the bad block table. Search direction
+ * top -> down?
+ */
if (td->options & NAND_BBT_LASTBLOCK) {
startblock = numblocks * (chip + 1) - 1;
dir = -1;
@@ -766,7 +724,7 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
if (!md || md->pages[chip] != page)
goto write;
}
- printk(KERN_ERR "No space left to write bad block table\n");
+ pr_err("No space left to write bad block table\n");
return -ENOSPC;
write:
@@ -791,24 +749,22 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
bbtoffs = chip * (numblocks >> 2);
- to = ((loff_t) page) << this->page_shift;
+ to = ((loff_t)page) << this->page_shift;
- /* Must we save the block contents ? */
+ /* Must we save the block contents? */
if (td->options & NAND_BBT_SAVECONTENT) {
/* Make it block aligned */
- to &= ~((loff_t) ((1 << this->bbt_erase_shift) - 1));
+ to &= ~((loff_t)((1 << this->bbt_erase_shift) - 1));
len = 1 << this->bbt_erase_shift;
res = mtd->read(mtd, to, len, &retlen, buf);
if (res < 0) {
if (retlen != len) {
- printk(KERN_INFO "nand_bbt: Error "
- "reading block for writing "
- "the bad block table\n");
+ pr_info("nand_bbt: error reading block "
+ "for writing the bad block table\n");
return res;
}
- printk(KERN_WARNING "nand_bbt: ECC error "
- "while reading block for writing "
- "bad block table\n");
+ pr_warn("nand_bbt: ECC error while reading "
+ "block for writing bad block table\n");
}
/* Read oob data */
ops.ooblen = (len >> this->page_shift) * mtd->oobsize;
@@ -821,19 +777,19 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
pageoffs = page - (int)(to >> this->page_shift);
offs = pageoffs << this->page_shift;
/* Preset the bbt area with 0xff */
- memset(&buf[offs], 0xff, (size_t) (numblocks >> sft));
+ memset(&buf[offs], 0xff, (size_t)(numblocks >> sft));
ooboffs = len + (pageoffs * mtd->oobsize);
} else if (td->options & NAND_BBT_NO_OOB) {
ooboffs = 0;
offs = td->len;
- /* the version byte */
+ /* The version byte */
if (td->options & NAND_BBT_VERSION)
offs++;
/* Calc length */
- len = (size_t) (numblocks >> sft);
+ len = (size_t)(numblocks >> sft);
len += offs;
- /* Make it page aligned ! */
+ /* Make it page aligned! */
len = ALIGN(len, mtd->writesize);
/* Preset the buffer with 0xff */
memset(buf, 0xff, len);
@@ -841,8 +797,8 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
memcpy(buf, td->pattern, td->len);
} else {
/* Calc length */
- len = (size_t) (numblocks >> sft);
- /* Make it page aligned ! */
+ len = (size_t)(numblocks >> sft);
+ /* Make it page aligned! */
len = ALIGN(len, mtd->writesize);
/* Preset the buffer with 0xff */
memset(buf, 0xff, len +
@@ -856,13 +812,13 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
if (td->options & NAND_BBT_VERSION)
buf[ooboffs + td->veroffs] = td->version[chip];
- /* walk through the memory table */
+ /* Walk through the memory table */
for (i = 0; i < numblocks;) {
uint8_t dat;
dat = this->bbt[bbtoffs + (i >> 2)];
for (j = 0; j < 4; j++, i++) {
int sftcnt = (i << (3 - sft)) & sftmsk;
- /* Do not store the reserved bbt blocks ! */
+ /* Do not store the reserved bbt blocks! */
buf[offs + (i >> sft)] &=
~(msk[dat & 0x03] << sftcnt);
dat >>= 2;
@@ -883,8 +839,8 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
if (res < 0)
goto outerr;
- printk(KERN_DEBUG "Bad block table written to 0x%012llx, version "
- "0x%02X\n", (unsigned long long)to, td->version[chip]);
+ pr_info("Bad block table written to 0x%012llx, version 0x%02X\n",
+ (unsigned long long)to, td->version[chip]);
/* Mark it as used */
td->pages[chip] = page;
@@ -892,19 +848,18 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
return 0;
outerr:
- printk(KERN_WARNING
- "nand_bbt: Error while writing bad block table %d\n", res);
+ pr_warn("nand_bbt: error while writing bad block table %d\n", res);
return res;
}
/**
* nand_memory_bbt - [GENERIC] create a memory based bad block table
- * @mtd: MTD device structure
- * @bd: descriptor for the good/bad block search pattern
+ * @mtd: MTD device structure
+ * @bd: descriptor for the good/bad block search pattern
*
- * The function creates a memory based bbt by scanning the device
- * for manufacturer / software marked good / bad blocks
-*/
+ * The function creates a memory based bbt by scanning the device for
+ * manufacturer / software marked good / bad blocks.
+ */
static inline int nand_memory_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
{
struct nand_chip *this = mtd->priv;
@@ -915,25 +870,24 @@ static inline int nand_memory_bbt(struct mtd_info *mtd, struct nand_bbt_descr *b
/**
* check_create - [GENERIC] create and write bbt(s) if necessary
- * @mtd: MTD device structure
- * @buf: temporary buffer
- * @bd: descriptor for the good/bad block search pattern
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @bd: descriptor for the good/bad block search pattern
*
- * The function checks the results of the previous call to read_bbt
- * and creates / updates the bbt(s) if necessary
- * Creation is necessary if no bbt was found for the chip/device
- * Update is necessary if one of the tables is missing or the
- * version nr. of one table is less than the other
-*/
+ * The function checks the results of the previous call to read_bbt and creates
+ * / updates the bbt(s) if necessary. Creation is necessary if no bbt was found
+ * for the chip/device. Update is necessary if one of the tables is missing or
+ * the version nr. of one table is less than the other.
+ */
static int check_create(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *bd)
{
- int i, chips, writeops, chipsel, res;
+ int i, chips, writeops, create, chipsel, res, res2;
struct nand_chip *this = mtd->priv;
struct nand_bbt_descr *td = this->bbt_td;
struct nand_bbt_descr *md = this->bbt_md;
struct nand_bbt_descr *rd, *rd2;
- /* Do we have a bbt per chip ? */
+ /* Do we have a bbt per chip? */
if (td->options & NAND_BBT_PERCHIP)
chips = this->numchips;
else
@@ -941,86 +895,98 @@ static int check_create(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_desc
for (i = 0; i < chips; i++) {
writeops = 0;
+ create = 0;
rd = NULL;
rd2 = NULL;
- /* Per chip or per device ? */
+ res = res2 = 0;
+ /* Per chip or per device? */
chipsel = (td->options & NAND_BBT_PERCHIP) ? i : -1;
- /* Mirrored table available ? */
+ /* Mirrored table available? */
if (md) {
if (td->pages[i] == -1 && md->pages[i] == -1) {
+ create = 1;
writeops = 0x03;
- goto create;
- }
-
- if (td->pages[i] == -1) {
+ } else if (td->pages[i] == -1) {
rd = md;
- td->version[i] = md->version[i];
- writeops = 1;
- goto writecheck;
- }
-
- if (md->pages[i] == -1) {
+ writeops = 0x01;
+ } else if (md->pages[i] == -1) {
rd = td;
- md->version[i] = td->version[i];
- writeops = 2;
- goto writecheck;
- }
-
- if (td->version[i] == md->version[i]) {
+ writeops = 0x02;
+ } else if (td->version[i] == md->version[i]) {
rd = td;
if (!(td->options & NAND_BBT_VERSION))
rd2 = md;
- goto writecheck;
- }
-
- if (((int8_t) (td->version[i] - md->version[i])) > 0) {
+ } else if (((int8_t)(td->version[i] - md->version[i])) > 0) {
rd = td;
- md->version[i] = td->version[i];
- writeops = 2;
+ writeops = 0x02;
} else {
rd = md;
- td->version[i] = md->version[i];
- writeops = 1;
+ writeops = 0x01;
}
-
- goto writecheck;
-
} else {
if (td->pages[i] == -1) {
+ create = 1;
writeops = 0x01;
- goto create;
+ } else {
+ rd = td;
}
- rd = td;
- goto writecheck;
}
- create:
- /* Create the bad block table by scanning the device ? */
- if (!(td->options & NAND_BBT_CREATE))
- continue;
- /* Create the table in memory by scanning the chip(s) */
- if (!(this->options & NAND_CREATE_EMPTY_BBT))
- create_bbt(mtd, buf, bd, chipsel);
-
- td->version[i] = 1;
- if (md)
- md->version[i] = 1;
- writecheck:
- /* read back first ? */
- if (rd)
- read_abs_bbt(mtd, buf, rd, chipsel);
- /* If they weren't versioned, read both. */
- if (rd2)
- read_abs_bbt(mtd, buf, rd2, chipsel);
-
- /* Write the bad block table to the device ? */
+ if (create) {
+ /* Create the bad block table by scanning the device? */
+ if (!(td->options & NAND_BBT_CREATE))
+ continue;
+
+ /* Create the table in memory by scanning the chip(s) */
+ if (!(this->bbt_options & NAND_BBT_CREATE_EMPTY))
+ create_bbt(mtd, buf, bd, chipsel);
+
+ td->version[i] = 1;
+ if (md)
+ md->version[i] = 1;
+ }
+
+ /* Read back first? */
+ if (rd) {
+ res = read_abs_bbt(mtd, buf, rd, chipsel);
+ if (mtd_is_eccerr(res)) {
+ /* Mark table as invalid */
+ rd->pages[i] = -1;
+ rd->version[i] = 0;
+ i--;
+ continue;
+ }
+ }
+ /* If they weren't versioned, read both */
+ if (rd2) {
+ res2 = read_abs_bbt(mtd, buf, rd2, chipsel);
+ if (mtd_is_eccerr(res2)) {
+ /* Mark table as invalid */
+ rd2->pages[i] = -1;
+ rd2->version[i] = 0;
+ i--;
+ continue;
+ }
+ }
+
+ /* Scrub the flash table(s)? */
+ if (mtd_is_bitflip(res) || mtd_is_bitflip(res2))
+ writeops = 0x03;
+
+ /* Update version numbers before writing */
+ if (md) {
+ td->version[i] = max(td->version[i], md->version[i]);
+ md->version[i] = td->version[i];
+ }
+
+ /* Write the bad block table to the device? */
if ((writeops & 0x01) && (td->options & NAND_BBT_WRITE)) {
res = write_bbt(mtd, buf, td, md, chipsel);
if (res < 0)
return res;
}
- /* Write the mirror bad block table to the device ? */
+ /* Write the mirror bad block table to the device? */
if ((writeops & 0x02) && md && (md->options & NAND_BBT_WRITE)) {
res = write_bbt(mtd, buf, md, td, chipsel);
if (res < 0)
@@ -1032,20 +998,19 @@ static int check_create(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_desc
/**
* mark_bbt_regions - [GENERIC] mark the bad block table regions
- * @mtd: MTD device structure
- * @td: bad block table descriptor
+ * @mtd: MTD device structure
+ * @td: bad block table descriptor
*
- * The bad block table regions are marked as "bad" to prevent
- * accidental erasures / writes. The regions are identified by
- * the mark 0x02.
-*/
+ * The bad block table regions are marked as "bad" to prevent accidental
+ * erasures / writes. The regions are identified by the mark 0x02.
+ */
static void mark_bbt_region(struct mtd_info *mtd, struct nand_bbt_descr *td)
{
struct nand_chip *this = mtd->priv;
int i, j, chips, block, nrblocks, update;
uint8_t oldval, newval;
- /* Do we have a bbt per chip ? */
+ /* Do we have a bbt per chip? */
if (td->options & NAND_BBT_PERCHIP) {
chips = this->numchips;
nrblocks = (int)(this->chipsize >> this->bbt_erase_shift);
@@ -1082,9 +1047,11 @@ static void mark_bbt_region(struct mtd_info *mtd, struct nand_bbt_descr *td)
update = 1;
block += 2;
}
- /* If we want reserved blocks to be recorded to flash, and some
- new ones have been marked, then we need to update the stored
- bbts. This should only happen once. */
+ /*
+ * If we want reserved blocks to be recorded to flash, and some
+ * new ones have been marked, then we need to update the stored
+ * bbts. This should only happen once.
+ */
if (update && td->reserved_block_code)
nand_update_bbt(mtd, (loff_t)(block - 2) << (this->bbt_erase_shift - 1));
}
@@ -1092,8 +1059,8 @@ static void mark_bbt_region(struct mtd_info *mtd, struct nand_bbt_descr *td)
/**
* verify_bbt_descr - verify the bad block description
- * @mtd: MTD device structure
- * @bd: the table to verify
+ * @mtd: MTD device structure
+ * @bd: the table to verify
*
* This functions performs a few sanity checks on the bad block description
* table.
@@ -1111,16 +1078,16 @@ static void verify_bbt_descr(struct mtd_info *mtd, struct nand_bbt_descr *bd)
pattern_len = bd->len;
bits = bd->options & NAND_BBT_NRBITS_MSK;
- BUG_ON((this->options & NAND_USE_FLASH_BBT_NO_OOB) &&
- !(this->options & NAND_USE_FLASH_BBT));
+ BUG_ON((this->bbt_options & NAND_BBT_NO_OOB) &&
+ !(this->bbt_options & NAND_BBT_USE_FLASH));
BUG_ON(!bits);
if (bd->options & NAND_BBT_VERSION)
pattern_len++;
if (bd->options & NAND_BBT_NO_OOB) {
- BUG_ON(!(this->options & NAND_USE_FLASH_BBT));
- BUG_ON(!(this->options & NAND_USE_FLASH_BBT_NO_OOB));
+ BUG_ON(!(this->bbt_options & NAND_BBT_USE_FLASH));
+ BUG_ON(!(this->bbt_options & NAND_BBT_NO_OOB));
BUG_ON(bd->offs);
if (bd->options & NAND_BBT_VERSION)
BUG_ON(bd->veroffs != bd->len);
@@ -1140,18 +1107,16 @@ static void verify_bbt_descr(struct mtd_info *mtd, struct nand_bbt_descr *bd)
/**
* nand_scan_bbt - [NAND Interface] scan, find, read and maybe create bad block table(s)
- * @mtd: MTD device structure
- * @bd: descriptor for the good/bad block search pattern
- *
- * The function checks, if a bad block table(s) is/are already
- * available. If not it scans the device for manufacturer
- * marked good / bad blocks and writes the bad block table(s) to
- * the selected place.
+ * @mtd: MTD device structure
+ * @bd: descriptor for the good/bad block search pattern
*
- * The bad block table memory is allocated here. It must be freed
- * by calling the nand_free_bbt function.
+ * The function checks, if a bad block table(s) is/are already available. If
+ * not it scans the device for manufacturer marked good / bad blocks and writes
+ * the bad block table(s) to the selected place.
*
-*/
+ * The bad block table memory is allocated here. It must be freed by calling
+ * the nand_free_bbt function.
+ */
int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
{
struct nand_chip *this = mtd->priv;
@@ -1161,19 +1126,21 @@ int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
struct nand_bbt_descr *md = this->bbt_md;
len = mtd->size >> (this->bbt_erase_shift + 2);
- /* Allocate memory (2bit per block) and clear the memory bad block table */
+ /*
+ * Allocate memory (2bit per block) and clear the memory bad block
+ * table.
+ */
this->bbt = kzalloc(len, GFP_KERNEL);
- if (!this->bbt) {
- printk(KERN_ERR "nand_scan_bbt: Out of memory\n");
+ if (!this->bbt)
return -ENOMEM;
- }
- /* If no primary table decriptor is given, scan the device
- * to build a memory based bad block table
+ /*
+ * If no primary table decriptor is given, scan the device to build a
+ * memory based bad block table.
*/
if (!td) {
if ((res = nand_memory_bbt(mtd, bd))) {
- printk(KERN_ERR "nand_bbt: Can't scan flash and build the RAM-based BBT\n");
+ pr_err("nand_bbt: can't scan flash and build the RAM-based BBT\n");
kfree(this->bbt);
this->bbt = NULL;
}
@@ -1187,13 +1154,12 @@ int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
len += (len >> this->page_shift) * mtd->oobsize;
buf = vmalloc(len);
if (!buf) {
- printk(KERN_ERR "nand_bbt: Out of memory\n");
kfree(this->bbt);
this->bbt = NULL;
return -ENOMEM;
}
- /* Is the bbt at a given page ? */
+ /* Is the bbt at a given page? */
if (td->options & NAND_BBT_ABSPAGE) {
res = read_abs_bbts(mtd, buf, td, md);
} else {
@@ -1215,15 +1181,15 @@ int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
/**
* nand_update_bbt - [NAND Interface] update bad block table(s)
- * @mtd: MTD device structure
- * @offs: the offset of the newly marked block
+ * @mtd: MTD device structure
+ * @offs: the offset of the newly marked block
*
- * The function updates the bad block table(s)
-*/
+ * The function updates the bad block table(s).
+ */
int nand_update_bbt(struct mtd_info *mtd, loff_t offs)
{
struct nand_chip *this = mtd->priv;
- int len, res = 0, writeops = 0;
+ int len, res = 0;
int chip, chipsel;
uint8_t *buf;
struct nand_bbt_descr *td = this->bbt_td;
@@ -1236,14 +1202,10 @@ int nand_update_bbt(struct mtd_info *mtd, loff_t offs)
len = (1 << this->bbt_erase_shift);
len += (len >> this->page_shift) * mtd->oobsize;
buf = kmalloc(len, GFP_KERNEL);
- if (!buf) {
- printk(KERN_ERR "nand_update_bbt: Out of memory\n");
+ if (!buf)
return -ENOMEM;
- }
-
- writeops = md != NULL ? 0x03 : 0x01;
- /* Do we have a bbt per chip ? */
+ /* Do we have a bbt per chip? */
if (td->options & NAND_BBT_PERCHIP) {
chip = (int)(offs >> this->chip_shift);
chipsel = chip;
@@ -1256,14 +1218,14 @@ int nand_update_bbt(struct mtd_info *mtd, loff_t offs)
if (md)
md->version[chip]++;
- /* Write the bad block table to the device ? */
- if ((writeops & 0x01) && (td->options & NAND_BBT_WRITE)) {
+ /* Write the bad block table to the device? */
+ if (td->options & NAND_BBT_WRITE) {
res = write_bbt(mtd, buf, td, md, chipsel);
if (res < 0)
goto out;
}
- /* Write the mirror bad block table to the device ? */
- if ((writeops & 0x02) && md && (md->options & NAND_BBT_WRITE)) {
+ /* Write the mirror bad block table to the device? */
+ if (md && (md->options & NAND_BBT_WRITE)) {
res = write_bbt(mtd, buf, md, td, chipsel);
}
@@ -1272,8 +1234,10 @@ int nand_update_bbt(struct mtd_info *mtd, loff_t offs)
return res;
}
-/* Define some generic bad / good block scan pattern which are used
- * while scanning a device for factory marked good / bad blocks. */
+/*
+ * Define some generic bad / good block scan pattern which are used
+ * while scanning a device for factory marked good / bad blocks.
+ */
static uint8_t scan_ff_pattern[] = { 0xff, 0xff };
static uint8_t scan_agand_pattern[] = { 0x1C, 0x71, 0xC7, 0x1C, 0x71, 0xC7 };
@@ -1285,8 +1249,7 @@ static struct nand_bbt_descr agand_flashbased = {
.pattern = scan_agand_pattern
};
-/* Generic flash bbt decriptors
-*/
+/* Generic flash bbt descriptors */
static uint8_t bbt_pattern[] = {'B', 'b', 't', '0' };
static uint8_t mirror_pattern[] = {'1', 't', 'b', 'B' };
@@ -1330,31 +1293,27 @@ static struct nand_bbt_descr bbt_mirror_no_bbt_descr = {
.pattern = mirror_pattern
};
-#define BBT_SCAN_OPTIONS (NAND_BBT_SCANLASTPAGE | NAND_BBT_SCAN2NDPAGE | \
- NAND_BBT_SCANBYTE1AND6)
+#define BADBLOCK_SCAN_MASK (~NAND_BBT_NO_OOB)
/**
- * nand_create_default_bbt_descr - [Internal] Creates a BBT descriptor structure
- * @this: NAND chip to create descriptor for
+ * nand_create_badblock_pattern - [INTERN] Creates a BBT descriptor structure
+ * @this: NAND chip to create descriptor for
*
* This function allocates and initializes a nand_bbt_descr for BBM detection
- * based on the properties of "this". The new descriptor is stored in
+ * based on the properties of @this. The new descriptor is stored in
* this->badblock_pattern. Thus, this->badblock_pattern should be NULL when
* passed to this function.
- *
*/
-static int nand_create_default_bbt_descr(struct nand_chip *this)
+static int nand_create_badblock_pattern(struct nand_chip *this)
{
struct nand_bbt_descr *bd;
if (this->badblock_pattern) {
- printk(KERN_WARNING "BBT descr already allocated; not replacing.\n");
+ pr_warn("Bad block pattern already allocated; not replacing\n");
return -EINVAL;
}
bd = kzalloc(sizeof(*bd), GFP_KERNEL);
- if (!bd) {
- printk(KERN_ERR "nand_create_default_bbt_descr: Out of memory\n");
+ if (!bd)
return -ENOMEM;
- }
- bd->options = this->options & BBT_SCAN_OPTIONS;
+ bd->options = this->bbt_options & BADBLOCK_SCAN_MASK;
bd->offs = this->badblockpos;
bd->len = (this->options & NAND_BUSWIDTH_16) ? 2 : 1;
bd->pattern = scan_ff_pattern;
@@ -1365,22 +1324,20 @@ static int nand_create_default_bbt_descr(struct nand_chip *this)
/**
* nand_default_bbt - [NAND Interface] Select a default bad block table for the device
- * @mtd: MTD device structure
- *
- * This function selects the default bad block table
- * support for the device and calls the nand_scan_bbt function
+ * @mtd: MTD device structure
*
-*/
+ * This function selects the default bad block table support for the device and
+ * calls the nand_scan_bbt function.
+ */
int nand_default_bbt(struct mtd_info *mtd)
{
struct nand_chip *this = mtd->priv;
- /* Default for AG-AND. We must use a flash based
- * bad block table as the devices have factory marked
- * _good_ blocks. Erasing those blocks leads to loss
- * of the good / bad information, so we _must_ store
- * this information in a good / bad table during
- * startup
+ /*
+ * Default for AG-AND. We must use a flash based bad block table as the
+ * devices have factory marked _good_ blocks. Erasing those blocks
+ * leads to loss of the good / bad information, so we _must_ store this
+ * information in a good / bad table during startup.
*/
if (this->options & NAND_IS_AND) {
/* Use the default pattern descriptors */
@@ -1388,15 +1345,15 @@ int nand_default_bbt(struct mtd_info *mtd)
this->bbt_td = &bbt_main_descr;
this->bbt_md = &bbt_mirror_descr;
}
- this->options |= NAND_USE_FLASH_BBT;
+ this->bbt_options |= NAND_BBT_USE_FLASH;
return nand_scan_bbt(mtd, &agand_flashbased);
}
- /* Is a flash based bad block table requested ? */
- if (this->options & NAND_USE_FLASH_BBT) {
+ /* Is a flash based bad block table requested? */
+ if (this->bbt_options & NAND_BBT_USE_FLASH) {
/* Use the default pattern descriptors */
if (!this->bbt_td) {
- if (this->options & NAND_USE_FLASH_BBT_NO_OOB) {
+ if (this->bbt_options & NAND_BBT_NO_OOB) {
this->bbt_td = &bbt_main_no_bbt_descr;
this->bbt_md = &bbt_mirror_no_bbt_descr;
} else {
@@ -1410,18 +1367,17 @@ int nand_default_bbt(struct mtd_info *mtd)
}
if (!this->badblock_pattern)
- nand_create_default_bbt_descr(this);
+ nand_create_badblock_pattern(this);
return nand_scan_bbt(mtd, this->badblock_pattern);
}
/**
* nand_isbad_bbt - [NAND Interface] Check if a block is bad
- * @mtd: MTD device structure
- * @offs: offset in the device
- * @allowbbt: allow access to bad block table region
- *
-*/
+ * @mtd: MTD device structure
+ * @offs: offset in the device
+ * @allowbbt: allow access to bad block table region
+ */
int nand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt)
{
struct nand_chip *this = mtd->priv;
@@ -1432,8 +1388,9 @@ int nand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt)
block = (int)(offs >> (this->bbt_erase_shift - 1));
res = (this->bbt[block >> 3] >> (block & 0x06)) & 0x03;
- DEBUG(MTD_DEBUG_LEVEL2, "nand_isbad_bbt(): bbt info for offs 0x%08x: (block %d) 0x%02x\n",
- (unsigned int)offs, block >> 1, res);
+ pr_debug("nand_isbad_bbt(): bbt info for offs 0x%08x: "
+ "(block %d) 0x%02x\n",
+ (unsigned int)offs, block >> 1, res);
switch ((int)res) {
case 0x00:
diff --git a/drivers/mtd/nand/nand_bch.c b/drivers/mtd/nand/nand_bch.c
index 0f931e75711..3803e0bba23 100644
--- a/drivers/mtd/nand/nand_bch.c
+++ b/drivers/mtd/nand/nand_bch.c
@@ -93,8 +93,8 @@ int nand_bch_correct_data(struct mtd_info *mtd, unsigned char *buf,
buf[errloc[i] >> 3] ^= (1 << (errloc[i] & 7));
/* else error in ecc, no action needed */
- DEBUG(MTD_DEBUG_LEVEL0, "%s: corrected bitflip %u\n",
- __func__, errloc[i]);
+ pr_debug("%s: corrected bitflip %u\n", __func__,
+ errloc[i]);
}
} else if (count < 0) {
printk(KERN_ERR "ecc unrecoverable error\n");
diff --git a/drivers/mtd/nand/nand_ecc.c b/drivers/mtd/nand/nand_ecc.c
index 271b8e735e8..b7cfe0d3712 100644
--- a/drivers/mtd/nand/nand_ecc.c
+++ b/drivers/mtd/nand/nand_ecc.c
@@ -110,7 +110,7 @@ static const char bitsperbyte[256] = {
/*
* addressbits is a lookup table to filter out the bits from the xor-ed
- * ecc data that identify the faulty location.
+ * ECC data that identify the faulty location.
* this is only used for repairing parity
* see the comments in nand_correct_data for more details
*/
@@ -153,7 +153,7 @@ static const char addressbits[256] = {
* __nand_calculate_ecc - [NAND Interface] Calculate 3-byte ECC for 256/512-byte
* block
* @buf: input buffer with raw data
- * @eccsize: data bytes per ecc step (256 or 512)
+ * @eccsize: data bytes per ECC step (256 or 512)
* @code: output buffer with ECC
*/
void __nand_calculate_ecc(const unsigned char *buf, unsigned int eccsize,
@@ -348,7 +348,7 @@ void __nand_calculate_ecc(const unsigned char *buf, unsigned int eccsize,
rp17 = (par ^ rp16) & 0xff;
/*
- * Finally calculate the ecc bits.
+ * Finally calculate the ECC bits.
* Again here it might seem that there are performance optimisations
* possible, but benchmarks showed that on the system this is developed
* the code below is the fastest
@@ -436,7 +436,7 @@ EXPORT_SYMBOL(nand_calculate_ecc);
* @buf: raw data read from the chip
* @read_ecc: ECC from the chip
* @calc_ecc: the ECC calculated from raw data
- * @eccsize: data bytes per ecc step (256 or 512)
+ * @eccsize: data bytes per ECC step (256 or 512)
*
* Detect and correct a 1 bit error for eccsize byte block
*/
@@ -505,7 +505,7 @@ int __nand_correct_data(unsigned char *buf,
}
/* count nr of bits; use table lookup, faster than calculating it */
if ((bitsperbyte[b0] + bitsperbyte[b1] + bitsperbyte[b2]) == 1)
- return 1; /* error in ecc data; no action needed */
+ return 1; /* error in ECC data; no action needed */
printk(KERN_ERR "uncorrectable error : ");
return -1;
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index 357e8c5252a..34c03be7730 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -2273,9 +2273,9 @@ static int __init ns_init_module(void)
switch (bbt) {
case 2:
- chip->options |= NAND_USE_FLASH_BBT_NO_OOB;
+ chip->bbt_options |= NAND_BBT_NO_OOB;
case 1:
- chip->options |= NAND_USE_FLASH_BBT;
+ chip->bbt_options |= NAND_BBT_USE_FLASH;
case 0:
break;
default:
diff --git a/drivers/mtd/nand/ndfc.c b/drivers/mtd/nand/ndfc.c
index ea2dea8a9c8..ee1713907b9 100644
--- a/drivers/mtd/nand/ndfc.c
+++ b/drivers/mtd/nand/ndfc.c
@@ -42,7 +42,6 @@ struct ndfc_controller {
struct nand_chip chip;
int chip_select;
struct nand_hw_control ndfc_control;
- struct mtd_partition *parts;
};
static struct ndfc_controller ndfc_ctrl[NDFC_MAX_CS];
@@ -159,13 +158,9 @@ static int ndfc_verify_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
static int ndfc_chip_init(struct ndfc_controller *ndfc,
struct device_node *node)
{
-#ifdef CONFIG_MTD_CMDLINE_PARTS
- static const char *part_types[] = { "cmdlinepart", NULL };
-#else
- static const char *part_types[] = { NULL };
-#endif
struct device_node *flash_np;
struct nand_chip *chip = &ndfc->chip;
+ struct mtd_part_parser_data ppdata;
int ret;
chip->IO_ADDR_R = ndfc->ndfcbase + NDFC_DATA;
@@ -193,6 +188,7 @@ static int ndfc_chip_init(struct ndfc_controller *ndfc,
if (!flash_np)
return -ENODEV;
+ ppdata->of_node = flash_np;
ndfc->mtd.name = kasprintf(GFP_KERNEL, "%s.%s",
dev_name(&ndfc->ofdev->dev), flash_np->name);
if (!ndfc->mtd.name) {
@@ -204,18 +200,7 @@ static int ndfc_chip_init(struct ndfc_controller *ndfc,
if (ret)
goto err;
- ret = parse_mtd_partitions(&ndfc->mtd, part_types, &ndfc->parts, 0);
- if (ret < 0)
- goto err;
-
- if (ret == 0) {
- ret = of_mtd_parse_partitions(&ndfc->ofdev->dev, flash_np,
- &ndfc->parts);
- if (ret < 0)
- goto err;
- }
-
- ret = mtd_device_register(&ndfc->mtd, ndfc->parts, ret);
+ ret = mtd_device_parse_register(&ndfc->mtd, NULL, &ppdata, NULL, 0);
err:
of_node_put(flash_np);
@@ -288,6 +273,7 @@ static int __devexit ndfc_remove(struct platform_device *ofdev)
struct ndfc_controller *ndfc = dev_get_drvdata(&ofdev->dev);
nand_release(&ndfc->mtd);
+ kfree(ndfc->mtd.name);
return 0;
}
diff --git a/drivers/mtd/nand/nomadik_nand.c b/drivers/mtd/nand/nomadik_nand.c
index b6a5c86ab31..b463ecfb4c1 100644
--- a/drivers/mtd/nand/nomadik_nand.c
+++ b/drivers/mtd/nand/nomadik_nand.c
@@ -187,6 +187,7 @@ static int nomadik_nand_remove(struct platform_device *pdev)
pdata->exit();
if (host) {
+ nand_release(&host->mtd);
iounmap(host->cmd_va);
iounmap(host->data_va);
iounmap(host->addr_va);
diff --git a/drivers/mtd/nand/nuc900_nand.c b/drivers/mtd/nand/nuc900_nand.c
index 9c30a0b0317..fa8faedfad6 100644
--- a/drivers/mtd/nand/nuc900_nand.c
+++ b/drivers/mtd/nand/nuc900_nand.c
@@ -339,6 +339,7 @@ static int __devexit nuc900_nand_remove(struct platform_device *pdev)
struct nuc900_nand *nuc900_nand = platform_get_drvdata(pdev);
struct resource *res;
+ nand_release(&nuc900_nand->mtd);
iounmap(nuc900_nand->reg);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index 0db2c0e7656..f745f00f316 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -11,6 +11,7 @@
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
+#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/jiffies.h>
#include <linux/sched.h>
@@ -94,8 +95,6 @@
#define P4e_s(a) (TF(a & NAND_Ecc_P4e) << 0)
#define P4o_s(a) (TF(a & NAND_Ecc_P4o) << 1)
-static const char *part_probes[] = { "cmdlinepart", NULL };
-
/* oob info generated runtime depending on ecc algorithm and layout selected */
static struct nand_ecclayout omap_oobinfo;
/* Define some generic bad / good block scan pattern which are used
@@ -114,7 +113,6 @@ struct omap_nand_info {
struct nand_hw_control controller;
struct omap_nand_platform_data *pdata;
struct mtd_info mtd;
- struct mtd_partition *parts;
struct nand_chip nand;
struct platform_device *pdev;
@@ -744,12 +742,12 @@ static int omap_compare_ecc(u8 *ecc_data1, /* read from NAND memory */
case 1:
/* Uncorrectable error */
- DEBUG(MTD_DEBUG_LEVEL0, "ECC UNCORRECTED_ERROR 1\n");
+ pr_debug("ECC UNCORRECTED_ERROR 1\n");
return -1;
case 11:
/* UN-Correctable error */
- DEBUG(MTD_DEBUG_LEVEL0, "ECC UNCORRECTED_ERROR B\n");
+ pr_debug("ECC UNCORRECTED_ERROR B\n");
return -1;
case 12:
@@ -766,8 +764,8 @@ static int omap_compare_ecc(u8 *ecc_data1, /* read from NAND memory */
find_bit = (ecc_bit[5] << 2) + (ecc_bit[3] << 1) + ecc_bit[1];
- DEBUG(MTD_DEBUG_LEVEL0, "Correcting single bit ECC error at "
- "offset: %d, bit: %d\n", find_byte, find_bit);
+ pr_debug("Correcting single bit ECC error at offset: "
+ "%d, bit: %d\n", find_byte, find_bit);
page_data[find_byte] ^= (1 << find_bit);
@@ -779,7 +777,7 @@ static int omap_compare_ecc(u8 *ecc_data1, /* read from NAND memory */
ecc_data2[2] == 0)
return 0;
}
- DEBUG(MTD_DEBUG_LEVEL0, "UNCORRECTED_ERROR default\n");
+ pr_debug("UNCORRECTED_ERROR default\n");
return -1;
}
}
@@ -1103,13 +1101,8 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
goto out_release_mem_region;
}
- err = parse_mtd_partitions(&info->mtd, part_probes, &info->parts, 0);
- if (err > 0)
- mtd_device_register(&info->mtd, info->parts, err);
- else if (pdata->parts)
- mtd_device_register(&info->mtd, pdata->parts, pdata->nr_parts);
- else
- mtd_device_register(&info->mtd, NULL, 0);
+ mtd_device_parse_register(&info->mtd, NULL, 0,
+ pdata->parts, pdata->nr_parts);
platform_set_drvdata(pdev, &info->mtd);
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c
index 7794d0680f9..29f505adaf8 100644
--- a/drivers/mtd/nand/orion_nand.c
+++ b/drivers/mtd/nand/orion_nand.c
@@ -21,8 +21,6 @@
#include <mach/hardware.h>
#include <plat/orion_nand.h>
-static const char *part_probes[] = { "cmdlinepart", NULL };
-
static void orion_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
{
struct nand_chip *nc = mtd->priv;
@@ -81,8 +79,6 @@ static int __init orion_nand_probe(struct platform_device *pdev)
struct resource *res;
void __iomem *io_base;
int ret = 0;
- struct mtd_partition *partitions = NULL;
- int num_part = 0;
nc = kzalloc(sizeof(struct nand_chip) + sizeof(struct mtd_info), GFP_KERNEL);
if (!nc) {
@@ -132,17 +128,9 @@ static int __init orion_nand_probe(struct platform_device *pdev)
goto no_dev;
}
-#ifdef CONFIG_MTD_CMDLINE_PARTS
mtd->name = "orion_nand";
- num_part = parse_mtd_partitions(mtd, part_probes, &partitions, 0);
-#endif
- /* If cmdline partitions have been passed, let them be used */
- if (num_part <= 0) {
- num_part = board->nr_parts;
- partitions = board->parts;
- }
-
- ret = mtd_device_register(mtd, partitions, num_part);
+ ret = mtd_device_parse_register(mtd, NULL, 0,
+ board->parts, board->nr_parts);
if (ret) {
nand_release(mtd);
goto no_dev;
diff --git a/drivers/mtd/nand/pasemi_nand.c b/drivers/mtd/nand/pasemi_nand.c
index b1aa41b8a4e..a97264ececd 100644
--- a/drivers/mtd/nand/pasemi_nand.c
+++ b/drivers/mtd/nand/pasemi_nand.c
@@ -155,7 +155,8 @@ static int __devinit pasemi_nand_probe(struct platform_device *ofdev)
chip->ecc.mode = NAND_ECC_SOFT;
/* Enable the following for a flash based bad block table */
- chip->options = NAND_USE_FLASH_BBT | NAND_NO_AUTOINCR;
+ chip->options = NAND_NO_AUTOINCR;
+ chip->bbt_options = NAND_BBT_USE_FLASH;
/* Scan to find existence of the device */
if (nand_scan(pasemi_nand_mtd, 1)) {
diff --git a/drivers/mtd/nand/plat_nand.c b/drivers/mtd/nand/plat_nand.c
index 633c04bf76f..ea8e1234e0e 100644
--- a/drivers/mtd/nand/plat_nand.c
+++ b/drivers/mtd/nand/plat_nand.c
@@ -21,8 +21,6 @@ struct plat_nand_data {
struct nand_chip chip;
struct mtd_info mtd;
void __iomem *io_base;
- int nr_parts;
- struct mtd_partition *parts;
};
/*
@@ -79,6 +77,7 @@ static int __devinit plat_nand_probe(struct platform_device *pdev)
data->chip.read_buf = pdata->ctrl.read_buf;
data->chip.chip_delay = pdata->chip.chip_delay;
data->chip.options |= pdata->chip.options;
+ data->chip.bbt_options |= pdata->chip.bbt_options;
data->chip.ecc.hwctl = pdata->ctrl.hwcontrol;
data->chip.ecc.layout = pdata->chip.ecclayout;
@@ -99,23 +98,9 @@ static int __devinit plat_nand_probe(struct platform_device *pdev)
goto out;
}
- if (pdata->chip.part_probe_types) {
- err = parse_mtd_partitions(&data->mtd,
- pdata->chip.part_probe_types,
- &data->parts, 0);
- if (err > 0) {
- mtd_device_register(&data->mtd, data->parts, err);
- return 0;
- }
- }
- if (pdata->chip.set_parts)
- pdata->chip.set_parts(data->mtd.size, &pdata->chip);
- if (pdata->chip.partitions) {
- data->parts = pdata->chip.partitions;
- err = mtd_device_register(&data->mtd, data->parts,
- pdata->chip.nr_partitions);
- } else
- err = mtd_device_register(&data->mtd, NULL, 0);
+ err = mtd_device_parse_register(&data->mtd,
+ pdata->chip.part_probe_types, 0,
+ pdata->chip.partitions, pdata->chip.nr_partitions);
if (!err)
return err;
@@ -145,8 +130,6 @@ static int __devexit plat_nand_remove(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
nand_release(&data->mtd);
- if (data->parts && data->parts != pdata->chip.partitions)
- kfree(data->parts);
if (pdata->ctrl.remove)
pdata->ctrl.remove(pdev);
iounmap(data->io_base);
diff --git a/drivers/mtd/nand/ppchameleonevb.c b/drivers/mtd/nand/ppchameleonevb.c
index 3bbb796b451..7e52af51a19 100644
--- a/drivers/mtd/nand/ppchameleonevb.c
+++ b/drivers/mtd/nand/ppchameleonevb.c
@@ -99,8 +99,6 @@ static struct mtd_partition partition_info_evb[] = {
#define NUM_PARTITIONS 1
-extern int parse_cmdline_partitions(struct mtd_info *master, struct mtd_partition **pparts, const char *mtd_id);
-
/*
* hardware specific access to control-lines
*/
@@ -187,18 +185,12 @@ static int ppchameleonevb_device_ready(struct mtd_info *minfo)
}
#endif
-const char *part_probes[] = { "cmdlinepart", NULL };
-const char *part_probes_evb[] = { "cmdlinepart", NULL };
-
/*
* Main initialization routine
*/
static int __init ppchameleonevb_init(void)
{
struct nand_chip *this;
- const char *part_type = 0;
- int mtd_parts_nb = 0;
- struct mtd_partition *mtd_parts = 0;
void __iomem *ppchameleon_fio_base;
void __iomem *ppchameleonevb_fio_base;
@@ -281,24 +273,13 @@ static int __init ppchameleonevb_init(void)
#endif
ppchameleon_mtd->name = "ppchameleon-nand";
- mtd_parts_nb = parse_mtd_partitions(ppchameleon_mtd, part_probes, &mtd_parts, 0);
- if (mtd_parts_nb > 0)
- part_type = "command line";
- else
- mtd_parts_nb = 0;
-
- if (mtd_parts_nb == 0) {
- if (ppchameleon_mtd->size == NAND_SMALL_SIZE)
- mtd_parts = partition_info_me;
- else
- mtd_parts = partition_info_hi;
- mtd_parts_nb = NUM_PARTITIONS;
- part_type = "static";
- }
/* Register the partitions */
- printk(KERN_NOTICE "Using %s partition definition\n", part_type);
- mtd_device_register(ppchameleon_mtd, mtd_parts, mtd_parts_nb);
+ mtd_device_parse_register(ppchameleon_mtd, NULL, 0,
+ ppchameleon_mtd->size == NAND_SMALL_SIZE ?
+ partition_info_me :
+ partition_info_hi,
+ NUM_PARTITIONS);
nand_evb_init:
/****************************
@@ -382,21 +363,13 @@ static int __init ppchameleonevb_init(void)
}
ppchameleonevb_mtd->name = NAND_EVB_MTD_NAME;
- mtd_parts_nb = parse_mtd_partitions(ppchameleonevb_mtd, part_probes_evb, &mtd_parts, 0);
- if (mtd_parts_nb > 0)
- part_type = "command line";
- else
- mtd_parts_nb = 0;
-
- if (mtd_parts_nb == 0) {
- mtd_parts = partition_info_evb;
- mtd_parts_nb = NUM_PARTITIONS;
- part_type = "static";
- }
/* Register the partitions */
- printk(KERN_NOTICE "Using %s partition definition\n", part_type);
- mtd_device_register(ppchameleonevb_mtd, mtd_parts, mtd_parts_nb);
+ mtd_device_parse_register(ppchameleonevb_mtd, NULL, 0,
+ ppchameleon_mtd->size == NAND_SMALL_SIZE ?
+ partition_info_me :
+ partition_info_hi,
+ NUM_PARTITIONS);
/* Return happy */
return 0;
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index 1fb3b3a8058..9eb7f879969 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -110,6 +110,7 @@ enum {
enum {
STATE_IDLE = 0,
+ STATE_PREPARED,
STATE_CMD_HANDLE,
STATE_DMA_READING,
STATE_DMA_WRITING,
@@ -120,21 +121,40 @@ enum {
STATE_READY,
};
-struct pxa3xx_nand_info {
- struct nand_chip nand_chip;
+struct pxa3xx_nand_host {
+ struct nand_chip chip;
+ struct pxa3xx_nand_cmdset *cmdset;
+ struct mtd_info *mtd;
+ void *info_data;
+
+ /* page size of attached chip */
+ unsigned int page_size;
+ int use_ecc;
+ int cs;
+ /* calculated from pxa3xx_nand_flash data */
+ unsigned int col_addr_cycles;
+ unsigned int row_addr_cycles;
+ size_t read_id_bytes;
+
+ /* cached register value */
+ uint32_t reg_ndcr;
+ uint32_t ndtr0cs0;
+ uint32_t ndtr1cs0;
+};
+
+struct pxa3xx_nand_info {
struct nand_hw_control controller;
struct platform_device *pdev;
- struct pxa3xx_nand_cmdset *cmdset;
struct clk *clk;
void __iomem *mmio_base;
unsigned long mmio_phys;
+ struct completion cmd_complete;
unsigned int buf_start;
unsigned int buf_count;
- struct mtd_info *mtd;
/* DMA information */
int drcmr_dat;
int drcmr_cmd;
@@ -142,44 +162,27 @@ struct pxa3xx_nand_info {
unsigned char *data_buff;
unsigned char *oob_buff;
dma_addr_t data_buff_phys;
- size_t data_buff_size;
int data_dma_ch;
struct pxa_dma_desc *data_desc;
dma_addr_t data_desc_addr;
- uint32_t reg_ndcr;
-
- /* saved column/page_addr during CMD_SEQIN */
- int seqin_column;
- int seqin_page_addr;
-
- /* relate to the command */
+ struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
unsigned int state;
+ int cs;
int use_ecc; /* use HW ECC ? */
int use_dma; /* use DMA ? */
int is_ready;
unsigned int page_size; /* page size of attached chip */
unsigned int data_size; /* data size in FIFO */
+ unsigned int oob_size;
int retcode;
- struct completion cmd_complete;
/* generated NDCBx register values */
uint32_t ndcb0;
uint32_t ndcb1;
uint32_t ndcb2;
-
- /* timing calcuted from setting */
- uint32_t ndtr0cs0;
- uint32_t ndtr1cs0;
-
- /* calculated from pxa3xx_nand_flash data */
- size_t oob_size;
- size_t read_id_bytes;
-
- unsigned int col_addr_cycles;
- unsigned int row_addr_cycles;
};
static int use_dma = 1;
@@ -225,7 +228,7 @@ static struct pxa3xx_nand_flash builtin_flash_types[] = {
/* Define a default flash type setting serve as flash detecting only */
#define DEFAULT_FLASH_TYPE (&builtin_flash_types[0])
-const char *mtd_names[] = {"pxa3xx_nand-0", NULL};
+const char *mtd_names[] = {"pxa3xx_nand-0", "pxa3xx_nand-1", NULL};
#define NDTR0_tCH(c) (min((c), 7) << 19)
#define NDTR0_tCS(c) (min((c), 7) << 16)
@@ -241,9 +244,10 @@ const char *mtd_names[] = {"pxa3xx_nand-0", NULL};
/* convert nano-seconds to nand flash controller clock cycles */
#define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000)
-static void pxa3xx_nand_set_timing(struct pxa3xx_nand_info *info,
+static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
const struct pxa3xx_nand_timing *t)
{
+ struct pxa3xx_nand_info *info = host->info_data;
unsigned long nand_clk = clk_get_rate(info->clk);
uint32_t ndtr0, ndtr1;
@@ -258,23 +262,24 @@ static void pxa3xx_nand_set_timing(struct pxa3xx_nand_info *info,
NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
- info->ndtr0cs0 = ndtr0;
- info->ndtr1cs0 = ndtr1;
+ host->ndtr0cs0 = ndtr0;
+ host->ndtr1cs0 = ndtr1;
nand_writel(info, NDTR0CS0, ndtr0);
nand_writel(info, NDTR1CS0, ndtr1);
}
static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info)
{
- int oob_enable = info->reg_ndcr & NDCR_SPARE_EN;
+ struct pxa3xx_nand_host *host = info->host[info->cs];
+ int oob_enable = host->reg_ndcr & NDCR_SPARE_EN;
- info->data_size = info->page_size;
+ info->data_size = host->page_size;
if (!oob_enable) {
info->oob_size = 0;
return;
}
- switch (info->page_size) {
+ switch (host->page_size) {
case 2048:
info->oob_size = (info->use_ecc) ? 40 : 64;
break;
@@ -292,9 +297,10 @@ static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info)
*/
static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
{
+ struct pxa3xx_nand_host *host = info->host[info->cs];
uint32_t ndcr;
- ndcr = info->reg_ndcr;
+ ndcr = host->reg_ndcr;
ndcr |= info->use_ecc ? NDCR_ECC_EN : 0;
ndcr |= info->use_dma ? NDCR_DMA_EN : 0;
ndcr |= NDCR_ND_RUN;
@@ -359,7 +365,7 @@ static void handle_data_pio(struct pxa3xx_nand_info *info)
DIV_ROUND_UP(info->oob_size, 4));
break;
default:
- printk(KERN_ERR "%s: invalid state %d\n", __func__,
+ dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
info->state);
BUG();
}
@@ -385,7 +391,7 @@ static void start_data_dma(struct pxa3xx_nand_info *info)
desc->dcmd |= DCMD_INCTRGADDR | DCMD_FLOWSRC;
break;
default:
- printk(KERN_ERR "%s: invalid state %d\n", __func__,
+ dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
info->state);
BUG();
}
@@ -416,6 +422,15 @@ static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
{
struct pxa3xx_nand_info *info = devid;
unsigned int status, is_completed = 0;
+ unsigned int ready, cmd_done;
+
+ if (info->cs == 0) {
+ ready = NDSR_FLASH_RDY;
+ cmd_done = NDSR_CS0_CMDD;
+ } else {
+ ready = NDSR_RDY;
+ cmd_done = NDSR_CS1_CMDD;
+ }
status = nand_readl(info, NDSR);
@@ -437,11 +452,11 @@ static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
handle_data_pio(info);
}
}
- if (status & NDSR_CS0_CMDD) {
+ if (status & cmd_done) {
info->state = STATE_CMD_DONE;
is_completed = 1;
}
- if (status & NDSR_FLASH_RDY) {
+ if (status & ready) {
info->is_ready = 1;
info->state = STATE_READY;
}
@@ -463,12 +478,6 @@ NORMAL_IRQ_EXIT:
return IRQ_HANDLED;
}
-static int pxa3xx_nand_dev_ready(struct mtd_info *mtd)
-{
- struct pxa3xx_nand_info *info = mtd->priv;
- return (nand_readl(info, NDSR) & NDSR_RDY) ? 1 : 0;
-}
-
static inline int is_buf_blank(uint8_t *buf, size_t len)
{
for (; len > 0; len--)
@@ -481,10 +490,12 @@ static int prepare_command_pool(struct pxa3xx_nand_info *info, int command,
uint16_t column, int page_addr)
{
uint16_t cmd;
- int addr_cycle, exec_cmd, ndcb0;
- struct mtd_info *mtd = info->mtd;
+ int addr_cycle, exec_cmd;
+ struct pxa3xx_nand_host *host;
+ struct mtd_info *mtd;
- ndcb0 = 0;
+ host = info->host[info->cs];
+ mtd = host->mtd;
addr_cycle = 0;
exec_cmd = 1;
@@ -495,6 +506,10 @@ static int prepare_command_pool(struct pxa3xx_nand_info *info, int command,
info->use_ecc = 0;
info->is_ready = 0;
info->retcode = ERR_NONE;
+ if (info->cs != 0)
+ info->ndcb0 = NDCB0_CSEL;
+ else
+ info->ndcb0 = 0;
switch (command) {
case NAND_CMD_READ0:
@@ -512,20 +527,19 @@ static int prepare_command_pool(struct pxa3xx_nand_info *info, int command,
break;
}
- info->ndcb0 = ndcb0;
- addr_cycle = NDCB0_ADDR_CYC(info->row_addr_cycles
- + info->col_addr_cycles);
+ addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
+ + host->col_addr_cycles);
switch (command) {
case NAND_CMD_READOOB:
case NAND_CMD_READ0:
- cmd = info->cmdset->read1;
+ cmd = host->cmdset->read1;
if (command == NAND_CMD_READOOB)
info->buf_start = mtd->writesize + column;
else
info->buf_start = column;
- if (unlikely(info->page_size < PAGE_CHUNK_SIZE))
+ if (unlikely(host->page_size < PAGE_CHUNK_SIZE))
info->ndcb0 |= NDCB0_CMD_TYPE(0)
| addr_cycle
| (cmd & NDCB0_CMD1_MASK);
@@ -537,7 +551,7 @@ static int prepare_command_pool(struct pxa3xx_nand_info *info, int command,
case NAND_CMD_SEQIN:
/* small page addr setting */
- if (unlikely(info->page_size < PAGE_CHUNK_SIZE)) {
+ if (unlikely(host->page_size < PAGE_CHUNK_SIZE)) {
info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
| (column & 0xFF);
@@ -564,7 +578,7 @@ static int prepare_command_pool(struct pxa3xx_nand_info *info, int command,
break;
}
- cmd = info->cmdset->program;
+ cmd = host->cmdset->program;
info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
| NDCB0_AUTO_RS
| NDCB0_ST_ROW_EN
@@ -574,8 +588,8 @@ static int prepare_command_pool(struct pxa3xx_nand_info *info, int command,
break;
case NAND_CMD_READID:
- cmd = info->cmdset->read_id;
- info->buf_count = info->read_id_bytes;
+ cmd = host->cmdset->read_id;
+ info->buf_count = host->read_id_bytes;
info->ndcb0 |= NDCB0_CMD_TYPE(3)
| NDCB0_ADDR_CYC(1)
| cmd;
@@ -583,7 +597,7 @@ static int prepare_command_pool(struct pxa3xx_nand_info *info, int command,
info->data_size = 8;
break;
case NAND_CMD_STATUS:
- cmd = info->cmdset->read_status;
+ cmd = host->cmdset->read_status;
info->buf_count = 1;
info->ndcb0 |= NDCB0_CMD_TYPE(4)
| NDCB0_ADDR_CYC(1)
@@ -593,7 +607,7 @@ static int prepare_command_pool(struct pxa3xx_nand_info *info, int command,
break;
case NAND_CMD_ERASE1:
- cmd = info->cmdset->erase;
+ cmd = host->cmdset->erase;
info->ndcb0 |= NDCB0_CMD_TYPE(2)
| NDCB0_AUTO_RS
| NDCB0_ADDR_CYC(3)
@@ -604,7 +618,7 @@ static int prepare_command_pool(struct pxa3xx_nand_info *info, int command,
break;
case NAND_CMD_RESET:
- cmd = info->cmdset->reset;
+ cmd = host->cmdset->reset;
info->ndcb0 |= NDCB0_CMD_TYPE(5)
| cmd;
@@ -616,8 +630,8 @@ static int prepare_command_pool(struct pxa3xx_nand_info *info, int command,
default:
exec_cmd = 0;
- printk(KERN_ERR "pxa3xx-nand: non-supported"
- " command %x\n", command);
+ dev_err(&info->pdev->dev, "non-supported command %x\n",
+ command);
break;
}
@@ -627,7 +641,8 @@ static int prepare_command_pool(struct pxa3xx_nand_info *info, int command,
static void pxa3xx_nand_cmdfunc(struct mtd_info *mtd, unsigned command,
int column, int page_addr)
{
- struct pxa3xx_nand_info *info = mtd->priv;
+ struct pxa3xx_nand_host *host = mtd->priv;
+ struct pxa3xx_nand_info *info = host->info_data;
int ret, exec_cmd;
/*
@@ -635,9 +650,21 @@ static void pxa3xx_nand_cmdfunc(struct mtd_info *mtd, unsigned command,
* "byte" address into a "word" address appropriate
* for indexing a word-oriented device
*/
- if (info->reg_ndcr & NDCR_DWIDTH_M)
+ if (host->reg_ndcr & NDCR_DWIDTH_M)
column /= 2;
+ /*
+ * There may be different NAND chip hooked to
+ * different chip select, so check whether
+ * chip select has been changed, if yes, reset the timing
+ */
+ if (info->cs != host->cs) {
+ info->cs = host->cs;
+ nand_writel(info, NDTR0CS0, host->ndtr0cs0);
+ nand_writel(info, NDTR1CS0, host->ndtr1cs0);
+ }
+
+ info->state = STATE_PREPARED;
exec_cmd = prepare_command_pool(info, command, column, page_addr);
if (exec_cmd) {
init_completion(&info->cmd_complete);
@@ -646,12 +673,12 @@ static void pxa3xx_nand_cmdfunc(struct mtd_info *mtd, unsigned command,
ret = wait_for_completion_timeout(&info->cmd_complete,
CHIP_DELAY_TIMEOUT);
if (!ret) {
- printk(KERN_ERR "Wait time out!!!\n");
+ dev_err(&info->pdev->dev, "Wait time out!!!\n");
/* Stop State Machine for next command cycle */
pxa3xx_nand_stop(info);
}
- info->state = STATE_IDLE;
}
+ info->state = STATE_IDLE;
}
static void pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
@@ -664,7 +691,8 @@ static void pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
struct nand_chip *chip, uint8_t *buf, int page)
{
- struct pxa3xx_nand_info *info = mtd->priv;
+ struct pxa3xx_nand_host *host = mtd->priv;
+ struct pxa3xx_nand_info *info = host->info_data;
chip->read_buf(mtd, buf, mtd->writesize);
chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
@@ -685,6 +713,8 @@ static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
* OOB, ignore such double bit errors
*/
if (is_buf_blank(buf, mtd->writesize))
+ info->retcode = ERR_NONE;
+ else
mtd->ecc_stats.failed++;
}
@@ -693,7 +723,8 @@ static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
{
- struct pxa3xx_nand_info *info = mtd->priv;
+ struct pxa3xx_nand_host *host = mtd->priv;
+ struct pxa3xx_nand_info *info = host->info_data;
char retval = 0xFF;
if (info->buf_start < info->buf_count)
@@ -705,7 +736,8 @@ static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
{
- struct pxa3xx_nand_info *info = mtd->priv;
+ struct pxa3xx_nand_host *host = mtd->priv;
+ struct pxa3xx_nand_info *info = host->info_data;
u16 retval = 0xFFFF;
if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
@@ -717,7 +749,8 @@ static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
{
- struct pxa3xx_nand_info *info = mtd->priv;
+ struct pxa3xx_nand_host *host = mtd->priv;
+ struct pxa3xx_nand_info *info = host->info_data;
int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
memcpy(buf, info->data_buff + info->buf_start, real_len);
@@ -727,7 +760,8 @@ static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
const uint8_t *buf, int len)
{
- struct pxa3xx_nand_info *info = mtd->priv;
+ struct pxa3xx_nand_host *host = mtd->priv;
+ struct pxa3xx_nand_info *info = host->info_data;
int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
memcpy(info->data_buff + info->buf_start, buf, real_len);
@@ -747,7 +781,8 @@ static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
{
- struct pxa3xx_nand_info *info = mtd->priv;
+ struct pxa3xx_nand_host *host = mtd->priv;
+ struct pxa3xx_nand_info *info = host->info_data;
/* pxa3xx_nand_send_command has waited for command complete */
if (this->state == FL_WRITING || this->state == FL_ERASING) {
@@ -770,54 +805,70 @@ static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info,
{
struct platform_device *pdev = info->pdev;
struct pxa3xx_nand_platform_data *pdata = pdev->dev.platform_data;
+ struct pxa3xx_nand_host *host = info->host[info->cs];
uint32_t ndcr = 0x0; /* enable all interrupts */
- if (f->page_size != 2048 && f->page_size != 512)
+ if (f->page_size != 2048 && f->page_size != 512) {
+ dev_err(&pdev->dev, "Current only support 2048 and 512 size\n");
return -EINVAL;
+ }
- if (f->flash_width != 16 && f->flash_width != 8)
+ if (f->flash_width != 16 && f->flash_width != 8) {
+ dev_err(&pdev->dev, "Only support 8bit and 16 bit!\n");
return -EINVAL;
+ }
/* calculate flash information */
- info->cmdset = &default_cmdset;
- info->page_size = f->page_size;
- info->read_id_bytes = (f->page_size == 2048) ? 4 : 2;
+ host->cmdset = &default_cmdset;
+ host->page_size = f->page_size;
+ host->read_id_bytes = (f->page_size == 2048) ? 4 : 2;
/* calculate addressing information */
- info->col_addr_cycles = (f->page_size == 2048) ? 2 : 1;
+ host->col_addr_cycles = (f->page_size == 2048) ? 2 : 1;
if (f->num_blocks * f->page_per_block > 65536)
- info->row_addr_cycles = 3;
+ host->row_addr_cycles = 3;
else
- info->row_addr_cycles = 2;
+ host->row_addr_cycles = 2;
ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
- ndcr |= (info->col_addr_cycles == 2) ? NDCR_RA_START : 0;
+ ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
ndcr |= (f->page_per_block == 64) ? NDCR_PG_PER_BLK : 0;
ndcr |= (f->page_size == 2048) ? NDCR_PAGE_SZ : 0;
ndcr |= (f->flash_width == 16) ? NDCR_DWIDTH_M : 0;
ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0;
- ndcr |= NDCR_RD_ID_CNT(info->read_id_bytes);
+ ndcr |= NDCR_RD_ID_CNT(host->read_id_bytes);
ndcr |= NDCR_SPARE_EN; /* enable spare by default */
- info->reg_ndcr = ndcr;
+ host->reg_ndcr = ndcr;
- pxa3xx_nand_set_timing(info, f->timing);
+ pxa3xx_nand_set_timing(host, f->timing);
return 0;
}
static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
{
+ /*
+ * We set 0 by hard coding here, for we don't support keep_config
+ * when there is more than one chip attached to the controller
+ */
+ struct pxa3xx_nand_host *host = info->host[0];
uint32_t ndcr = nand_readl(info, NDCR);
- info->page_size = ndcr & NDCR_PAGE_SZ ? 2048 : 512;
- /* set info fields needed to read id */
- info->read_id_bytes = (info->page_size == 2048) ? 4 : 2;
- info->reg_ndcr = ndcr;
- info->cmdset = &default_cmdset;
- info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
- info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
+ if (ndcr & NDCR_PAGE_SZ) {
+ host->page_size = 2048;
+ host->read_id_bytes = 4;
+ } else {
+ host->page_size = 512;
+ host->read_id_bytes = 2;
+ }
+
+ host->reg_ndcr = ndcr & ~NDCR_INT_MASK;
+ host->cmdset = &default_cmdset;
+
+ host->ndtr0cs0 = nand_readl(info, NDTR0CS0);
+ host->ndtr1cs0 = nand_readl(info, NDTR1CS0);
return 0;
}
@@ -847,7 +898,6 @@ static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
return -ENOMEM;
}
- info->data_buff_size = MAX_BUFF_SIZE;
info->data_desc = (void *)info->data_buff + data_desc_offset;
info->data_desc_addr = info->data_buff_phys + data_desc_offset;
@@ -855,7 +905,7 @@ static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
pxa3xx_nand_data_dma_irq, info);
if (info->data_dma_ch < 0) {
dev_err(&pdev->dev, "failed to request data dma\n");
- dma_free_coherent(&pdev->dev, info->data_buff_size,
+ dma_free_coherent(&pdev->dev, MAX_BUFF_SIZE,
info->data_buff, info->data_buff_phys);
return info->data_dma_ch;
}
@@ -865,24 +915,28 @@ static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
static int pxa3xx_nand_sensing(struct pxa3xx_nand_info *info)
{
- struct mtd_info *mtd = info->mtd;
- struct nand_chip *chip = mtd->priv;
-
+ struct mtd_info *mtd;
+ int ret;
+ mtd = info->host[info->cs]->mtd;
/* use the common timing to make a try */
- pxa3xx_nand_config_flash(info, &builtin_flash_types[0]);
- chip->cmdfunc(mtd, NAND_CMD_RESET, 0, 0);
+ ret = pxa3xx_nand_config_flash(info, &builtin_flash_types[0]);
+ if (ret)
+ return ret;
+
+ pxa3xx_nand_cmdfunc(mtd, NAND_CMD_RESET, 0, 0);
if (info->is_ready)
- return 1;
- else
return 0;
+
+ return -ENODEV;
}
static int pxa3xx_nand_scan(struct mtd_info *mtd)
{
- struct pxa3xx_nand_info *info = mtd->priv;
+ struct pxa3xx_nand_host *host = mtd->priv;
+ struct pxa3xx_nand_info *info = host->info_data;
struct platform_device *pdev = info->pdev;
struct pxa3xx_nand_platform_data *pdata = pdev->dev.platform_data;
- struct nand_flash_dev pxa3xx_flash_ids[2] = { {NULL,}, {NULL,} };
+ struct nand_flash_dev pxa3xx_flash_ids[2], *def = NULL;
const struct pxa3xx_nand_flash *f = NULL;
struct nand_chip *chip = mtd->priv;
uint32_t id = -1;
@@ -893,22 +947,20 @@ static int pxa3xx_nand_scan(struct mtd_info *mtd)
goto KEEP_CONFIG;
ret = pxa3xx_nand_sensing(info);
- if (!ret) {
- kfree(mtd);
- info->mtd = NULL;
- printk(KERN_INFO "There is no nand chip on cs 0!\n");
+ if (ret) {
+ dev_info(&info->pdev->dev, "There is no chip on cs %d!\n",
+ info->cs);
- return -EINVAL;
+ return ret;
}
chip->cmdfunc(mtd, NAND_CMD_READID, 0, 0);
id = *((uint16_t *)(info->data_buff));
if (id != 0)
- printk(KERN_INFO "Detect a flash id %x\n", id);
+ dev_info(&info->pdev->dev, "Detect a flash id %x\n", id);
else {
- kfree(mtd);
- info->mtd = NULL;
- printk(KERN_WARNING "Read out ID 0, potential timing set wrong!!\n");
+ dev_warn(&info->pdev->dev,
+ "Read out ID 0, potential timing set wrong!!\n");
return -EINVAL;
}
@@ -926,14 +978,17 @@ static int pxa3xx_nand_scan(struct mtd_info *mtd)
}
if (i >= (ARRAY_SIZE(builtin_flash_types) + pdata->num_flash - 1)) {
- kfree(mtd);
- info->mtd = NULL;
- printk(KERN_ERR "ERROR!! flash not defined!!!\n");
+ dev_err(&info->pdev->dev, "ERROR!! flash not defined!!!\n");
return -EINVAL;
}
- pxa3xx_nand_config_flash(info, f);
+ ret = pxa3xx_nand_config_flash(info, f);
+ if (ret) {
+ dev_err(&info->pdev->dev, "ERROR! Configure failed\n");
+ return ret;
+ }
+
pxa3xx_flash_ids[0].name = f->name;
pxa3xx_flash_ids[0].id = (f->chip_id >> 8) & 0xffff;
pxa3xx_flash_ids[0].pagesize = f->page_size;
@@ -942,62 +997,78 @@ static int pxa3xx_nand_scan(struct mtd_info *mtd)
pxa3xx_flash_ids[0].erasesize = f->page_size * f->page_per_block;
if (f->flash_width == 16)
pxa3xx_flash_ids[0].options = NAND_BUSWIDTH_16;
+ pxa3xx_flash_ids[1].name = NULL;
+ def = pxa3xx_flash_ids;
KEEP_CONFIG:
- if (nand_scan_ident(mtd, 1, pxa3xx_flash_ids))
+ chip->ecc.mode = NAND_ECC_HW;
+ chip->ecc.size = host->page_size;
+
+ chip->options = NAND_NO_AUTOINCR;
+ chip->options |= NAND_NO_READRDY;
+ if (host->reg_ndcr & NDCR_DWIDTH_M)
+ chip->options |= NAND_BUSWIDTH_16;
+
+ if (nand_scan_ident(mtd, 1, def))
return -ENODEV;
/* calculate addressing information */
- info->col_addr_cycles = (mtd->writesize >= 2048) ? 2 : 1;
+ if (mtd->writesize >= 2048)
+ host->col_addr_cycles = 2;
+ else
+ host->col_addr_cycles = 1;
+
info->oob_buff = info->data_buff + mtd->writesize;
if ((mtd->size >> chip->page_shift) > 65536)
- info->row_addr_cycles = 3;
+ host->row_addr_cycles = 3;
else
- info->row_addr_cycles = 2;
- mtd->name = mtd_names[0];
- chip->ecc.mode = NAND_ECC_HW;
- chip->ecc.size = f->page_size;
-
- chip->options = (f->flash_width == 16) ? NAND_BUSWIDTH_16 : 0;
- chip->options |= NAND_NO_AUTOINCR;
- chip->options |= NAND_NO_READRDY;
+ host->row_addr_cycles = 2;
+ mtd->name = mtd_names[0];
return nand_scan_tail(mtd);
}
-static
-struct pxa3xx_nand_info *alloc_nand_resource(struct platform_device *pdev)
+static int alloc_nand_resource(struct platform_device *pdev)
{
+ struct pxa3xx_nand_platform_data *pdata;
struct pxa3xx_nand_info *info;
+ struct pxa3xx_nand_host *host;
struct nand_chip *chip;
struct mtd_info *mtd;
struct resource *r;
- int ret, irq;
+ int ret, irq, cs;
- mtd = kzalloc(sizeof(struct mtd_info) + sizeof(struct pxa3xx_nand_info),
- GFP_KERNEL);
- if (!mtd) {
+ pdata = pdev->dev.platform_data;
+ info = kzalloc(sizeof(*info) + (sizeof(*mtd) +
+ sizeof(*host)) * pdata->num_cs, GFP_KERNEL);
+ if (!info) {
dev_err(&pdev->dev, "failed to allocate memory\n");
- return NULL;
+ return -ENOMEM;
}
- info = (struct pxa3xx_nand_info *)(&mtd[1]);
- chip = (struct nand_chip *)(&mtd[1]);
info->pdev = pdev;
- info->mtd = mtd;
- mtd->priv = info;
- mtd->owner = THIS_MODULE;
-
- chip->ecc.read_page = pxa3xx_nand_read_page_hwecc;
- chip->ecc.write_page = pxa3xx_nand_write_page_hwecc;
- chip->controller = &info->controller;
- chip->waitfunc = pxa3xx_nand_waitfunc;
- chip->select_chip = pxa3xx_nand_select_chip;
- chip->dev_ready = pxa3xx_nand_dev_ready;
- chip->cmdfunc = pxa3xx_nand_cmdfunc;
- chip->read_word = pxa3xx_nand_read_word;
- chip->read_byte = pxa3xx_nand_read_byte;
- chip->read_buf = pxa3xx_nand_read_buf;
- chip->write_buf = pxa3xx_nand_write_buf;
- chip->verify_buf = pxa3xx_nand_verify_buf;
+ for (cs = 0; cs < pdata->num_cs; cs++) {
+ mtd = (struct mtd_info *)((unsigned int)&info[1] +
+ (sizeof(*mtd) + sizeof(*host)) * cs);
+ chip = (struct nand_chip *)(&mtd[1]);
+ host = (struct pxa3xx_nand_host *)chip;
+ info->host[cs] = host;
+ host->mtd = mtd;
+ host->cs = cs;
+ host->info_data = info;
+ mtd->priv = host;
+ mtd->owner = THIS_MODULE;
+
+ chip->ecc.read_page = pxa3xx_nand_read_page_hwecc;
+ chip->ecc.write_page = pxa3xx_nand_write_page_hwecc;
+ chip->controller = &info->controller;
+ chip->waitfunc = pxa3xx_nand_waitfunc;
+ chip->select_chip = pxa3xx_nand_select_chip;
+ chip->cmdfunc = pxa3xx_nand_cmdfunc;
+ chip->read_word = pxa3xx_nand_read_word;
+ chip->read_byte = pxa3xx_nand_read_byte;
+ chip->read_buf = pxa3xx_nand_read_buf;
+ chip->write_buf = pxa3xx_nand_write_buf;
+ chip->verify_buf = pxa3xx_nand_verify_buf;
+ }
spin_lock_init(&chip->controller->lock);
init_waitqueue_head(&chip->controller->wq);
@@ -1070,13 +1141,13 @@ struct pxa3xx_nand_info *alloc_nand_resource(struct platform_device *pdev)
platform_set_drvdata(pdev, info);
- return info;
+ return 0;
fail_free_buf:
free_irq(irq, info);
if (use_dma) {
pxa_free_dma(info->data_dma_ch);
- dma_free_coherent(&pdev->dev, info->data_buff_size,
+ dma_free_coherent(&pdev->dev, MAX_BUFF_SIZE,
info->data_buff, info->data_buff_phys);
} else
kfree(info->data_buff);
@@ -1088,17 +1159,21 @@ fail_put_clk:
clk_disable(info->clk);
clk_put(info->clk);
fail_free_mtd:
- kfree(mtd);
- return NULL;
+ kfree(info);
+ return ret;
}
static int pxa3xx_nand_remove(struct platform_device *pdev)
{
struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
- struct mtd_info *mtd = info->mtd;
+ struct pxa3xx_nand_platform_data *pdata;
struct resource *r;
- int irq;
+ int irq, cs;
+ if (!info)
+ return 0;
+
+ pdata = pdev->dev.platform_data;
platform_set_drvdata(pdev, NULL);
irq = platform_get_irq(pdev, 0);
@@ -1106,7 +1181,7 @@ static int pxa3xx_nand_remove(struct platform_device *pdev)
free_irq(irq, info);
if (use_dma) {
pxa_free_dma(info->data_dma_ch);
- dma_free_writecombine(&pdev->dev, info->data_buff_size,
+ dma_free_writecombine(&pdev->dev, MAX_BUFF_SIZE,
info->data_buff, info->data_buff_phys);
} else
kfree(info->data_buff);
@@ -1118,10 +1193,9 @@ static int pxa3xx_nand_remove(struct platform_device *pdev)
clk_disable(info->clk);
clk_put(info->clk);
- if (mtd) {
- mtd_device_unregister(mtd);
- kfree(mtd);
- }
+ for (cs = 0; cs < pdata->num_cs; cs++)
+ nand_release(info->host[cs]->mtd);
+ kfree(info);
return 0;
}
@@ -1129,6 +1203,7 @@ static int pxa3xx_nand_probe(struct platform_device *pdev)
{
struct pxa3xx_nand_platform_data *pdata;
struct pxa3xx_nand_info *info;
+ int ret, cs, probe_success;
pdata = pdev->dev.platform_data;
if (!pdata) {
@@ -1136,52 +1211,88 @@ static int pxa3xx_nand_probe(struct platform_device *pdev)
return -ENODEV;
}
- info = alloc_nand_resource(pdev);
- if (info == NULL)
- return -ENOMEM;
-
- if (pxa3xx_nand_scan(info->mtd)) {
- dev_err(&pdev->dev, "failed to scan nand\n");
- pxa3xx_nand_remove(pdev);
- return -ENODEV;
+ ret = alloc_nand_resource(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "alloc nand resource failed\n");
+ return ret;
}
- if (mtd_has_cmdlinepart()) {
- const char *probes[] = { "cmdlinepart", NULL };
- struct mtd_partition *parts;
- int nr_parts;
+ info = platform_get_drvdata(pdev);
+ probe_success = 0;
+ for (cs = 0; cs < pdata->num_cs; cs++) {
+ info->cs = cs;
+ ret = pxa3xx_nand_scan(info->host[cs]->mtd);
+ if (ret) {
+ dev_warn(&pdev->dev, "failed to scan nand at cs %d\n",
+ cs);
+ continue;
+ }
- nr_parts = parse_mtd_partitions(info->mtd, probes, &parts, 0);
+ ret = mtd_device_parse_register(info->host[cs]->mtd, NULL, 0,
+ pdata->parts[cs], pdata->nr_parts[cs]);
+ if (!ret)
+ probe_success = 1;
+ }
- if (nr_parts)
- return mtd_device_register(info->mtd, parts, nr_parts);
+ if (!probe_success) {
+ pxa3xx_nand_remove(pdev);
+ return -ENODEV;
}
- return mtd_device_register(info->mtd, pdata->parts, pdata->nr_parts);
+ return 0;
}
#ifdef CONFIG_PM
static int pxa3xx_nand_suspend(struct platform_device *pdev, pm_message_t state)
{
struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
- struct mtd_info *mtd = info->mtd;
+ struct pxa3xx_nand_platform_data *pdata;
+ struct mtd_info *mtd;
+ int cs;
+ pdata = pdev->dev.platform_data;
if (info->state) {
dev_err(&pdev->dev, "driver busy, state = %d\n", info->state);
return -EAGAIN;
}
+ for (cs = 0; cs < pdata->num_cs; cs++) {
+ mtd = info->host[cs]->mtd;
+ mtd->suspend(mtd);
+ }
+
return 0;
}
static int pxa3xx_nand_resume(struct platform_device *pdev)
{
struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
- struct mtd_info *mtd = info->mtd;
+ struct pxa3xx_nand_platform_data *pdata;
+ struct mtd_info *mtd;
+ int cs;
- nand_writel(info, NDTR0CS0, info->ndtr0cs0);
- nand_writel(info, NDTR1CS0, info->ndtr1cs0);
- clk_enable(info->clk);
+ pdata = pdev->dev.platform_data;
+ /* We don't want to handle interrupt without calling mtd routine */
+ disable_int(info, NDCR_INT_MASK);
+
+ /*
+ * Directly set the chip select to a invalid value,
+ * then the driver would reset the timing according
+ * to current chip select at the beginning of cmdfunc
+ */
+ info->cs = 0xff;
+
+ /*
+ * As the spec says, the NDSR would be updated to 0x1800 when
+ * doing the nand_clk disable/enable.
+ * To prevent it damaging state machine of the driver, clear
+ * all status before resume
+ */
+ nand_writel(info, NDSR, NDSR_MASK);
+ for (cs = 0; cs < pdata->num_cs; cs++) {
+ mtd = info->host[cs]->mtd;
+ mtd->resume(mtd);
+ }
return 0;
}
diff --git a/drivers/mtd/nand/r852.c b/drivers/mtd/nand/r852.c
index cae2e013c98..f20f393bfda 100644
--- a/drivers/mtd/nand/r852.c
+++ b/drivers/mtd/nand/r852.c
@@ -1027,7 +1027,7 @@ void r852_shutdown(struct pci_dev *pci_dev)
}
#ifdef CONFIG_PM
-int r852_suspend(struct device *device)
+static int r852_suspend(struct device *device)
{
struct r852_device *dev = pci_get_drvdata(to_pci_dev(device));
@@ -1048,7 +1048,7 @@ int r852_suspend(struct device *device)
return 0;
}
-int r852_resume(struct device *device)
+static int r852_resume(struct device *device)
{
struct r852_device *dev = pci_get_drvdata(to_pci_dev(device));
@@ -1092,7 +1092,7 @@ static const struct pci_device_id r852_pci_id_tbl[] = {
MODULE_DEVICE_TABLE(pci, r852_pci_id_tbl);
-SIMPLE_DEV_PM_OPS(r852_pm_ops, r852_suspend, r852_resume);
+static SIMPLE_DEV_PM_OPS(r852_pm_ops, r852_suspend, r852_resume);
static struct pci_driver r852_pci_driver = {
.name = DRV_NAME,
diff --git a/drivers/mtd/nand/rtc_from4.c b/drivers/mtd/nand/rtc_from4.c
index c9f9127ff77..f309addc2fa 100644
--- a/drivers/mtd/nand/rtc_from4.c
+++ b/drivers/mtd/nand/rtc_from4.c
@@ -351,7 +351,7 @@ static int rtc_from4_correct_data(struct mtd_info *mtd, const u_char *buf, u_cha
return 0;
}
- /* Read the syndrom pattern from the FPGA and correct the bitorder */
+ /* Read the syndrome pattern from the FPGA and correct the bitorder */
rs_ecc = (volatile unsigned short *)(rtc_from4_fio_base + RTC_FROM4_RS_ECC);
for (i = 0; i < 8; i++) {
ecc[i] = bitrev8(*rs_ecc);
@@ -380,7 +380,7 @@ static int rtc_from4_correct_data(struct mtd_info *mtd, const u_char *buf, u_cha
/* Let the library code do its magic. */
res = decode_rs8(rs_decoder, (uint8_t *) buf, par, 512, syn, 0, NULL, 0xff, NULL);
if (res > 0) {
- DEBUG(MTD_DEBUG_LEVEL0, "rtc_from4_correct_data: " "ECC corrected %d errors on read\n", res);
+ pr_debug("rtc_from4_correct_data: " "ECC corrected %d errors on read\n", res);
}
return res;
}
@@ -444,7 +444,6 @@ static int rtc_from4_errstat(struct mtd_info *mtd, struct nand_chip *this,
len = mtd->writesize;
buf = kmalloc(len, GFP_KERNEL);
if (!buf) {
- printk(KERN_ERR "rtc_from4_errstat: Out of memory!\n");
er_stat = 1;
goto out;
}
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c
index 4405468f196..868685db671 100644
--- a/drivers/mtd/nand/s3c2410.c
+++ b/drivers/mtd/nand/s3c2410.c
@@ -723,7 +723,7 @@ static int s3c24xx_nand_remove(struct platform_device *pdev)
/* free the common resources */
- if (info->clk != NULL && !IS_ERR(info->clk)) {
+ if (!IS_ERR(info->clk)) {
s3c2410_nand_clk_set_state(info, CLOCK_DISABLE);
clk_put(info->clk);
}
@@ -744,26 +744,15 @@ static int s3c24xx_nand_remove(struct platform_device *pdev)
return 0;
}
-const char *part_probes[] = { "cmdlinepart", NULL };
static int s3c2410_nand_add_partition(struct s3c2410_nand_info *info,
struct s3c2410_nand_mtd *mtd,
struct s3c2410_nand_set *set)
{
- struct mtd_partition *part_info;
- int nr_part = 0;
+ if (set)
+ mtd->mtd.name = set->name;
- if (set == NULL)
- return mtd_device_register(&mtd->mtd, NULL, 0);
-
- mtd->mtd.name = set->name;
- nr_part = parse_mtd_partitions(&mtd->mtd, part_probes, &part_info, 0);
-
- if (nr_part <= 0 && set->nr_partitions > 0) {
- nr_part = set->nr_partitions;
- part_info = set->partitions;
- }
-
- return mtd_device_register(&mtd->mtd, part_info, nr_part);
+ return mtd_device_parse_register(&mtd->mtd, NULL, 0,
+ set->partitions, set->nr_partitions);
}
/**
@@ -880,8 +869,10 @@ static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info,
/* If you use u-boot BBT creation code, specifying this flag will
* let the kernel fish out the BBT from the NAND, and also skip the
* full NAND scan that can take 1/2s or so. Little things... */
- if (set->flash_bbt)
- chip->options |= NAND_USE_FLASH_BBT | NAND_SKIP_BBTSCAN;
+ if (set->flash_bbt) {
+ chip->bbt_options |= NAND_BBT_USE_FLASH;
+ chip->options |= NAND_SKIP_BBTSCAN;
+ }
}
/**
diff --git a/drivers/mtd/nand/sharpsl.c b/drivers/mtd/nand/sharpsl.c
index 19e24ed089e..619d2a50478 100644
--- a/drivers/mtd/nand/sharpsl.c
+++ b/drivers/mtd/nand/sharpsl.c
@@ -103,16 +103,12 @@ static int sharpsl_nand_calculate_ecc(struct mtd_info *mtd, const u_char * dat,
return readb(sharpsl->io + ECCCNTR) != 0;
}
-static const char *part_probes[] = { "cmdlinepart", NULL };
-
/*
* Main initialization routine
*/
static int __devinit sharpsl_nand_probe(struct platform_device *pdev)
{
struct nand_chip *this;
- struct mtd_partition *sharpsl_partition_info;
- int nr_partitions;
struct resource *r;
int err = 0;
struct sharpsl_nand *sharpsl;
@@ -184,14 +180,9 @@ static int __devinit sharpsl_nand_probe(struct platform_device *pdev)
/* Register the partitions */
sharpsl->mtd.name = "sharpsl-nand";
- nr_partitions = parse_mtd_partitions(&sharpsl->mtd, part_probes, &sharpsl_partition_info, 0);
- if (nr_partitions <= 0) {
- nr_partitions = data->nr_partitions;
- sharpsl_partition_info = data->partitions;
- }
- err = mtd_device_register(&sharpsl->mtd, sharpsl_partition_info,
- nr_partitions);
+ err = mtd_device_parse_register(&sharpsl->mtd, NULL, 0,
+ data->partitions, data->nr_partitions);
if (err)
goto err_add;
diff --git a/drivers/mtd/nand/sm_common.c b/drivers/mtd/nand/sm_common.c
index b6332e83b28..32ae5af7444 100644
--- a/drivers/mtd/nand/sm_common.c
+++ b/drivers/mtd/nand/sm_common.c
@@ -8,6 +8,7 @@
*/
#include <linux/kernel.h>
#include <linux/mtd/nand.h>
+#include <linux/module.h>
#include "sm_common.h"
static struct nand_ecclayout nand_oob_sm = {
@@ -47,7 +48,7 @@ static int sm_block_markbad(struct mtd_info *mtd, loff_t ofs)
/* As long as this function is called on erase block boundaries
it will work correctly for 256 byte nand */
- ops.mode = MTD_OOB_PLACE;
+ ops.mode = MTD_OPS_PLACE_OOB;
ops.ooboffs = 0;
ops.ooblen = mtd->oobsize;
ops.oobbuf = (void *)&oob;
diff --git a/drivers/mtd/nand/socrates_nand.c b/drivers/mtd/nand/socrates_nand.c
index ca2d0555729..0fb24f9c232 100644
--- a/drivers/mtd/nand/socrates_nand.c
+++ b/drivers/mtd/nand/socrates_nand.c
@@ -155,8 +155,6 @@ static int socrates_nand_device_ready(struct mtd_info *mtd)
return 1;
}
-static const char *part_probes[] = { "cmdlinepart", NULL };
-
/*
* Probe for the NAND device.
*/
@@ -166,8 +164,7 @@ static int __devinit socrates_nand_probe(struct platform_device *ofdev)
struct mtd_info *mtd;
struct nand_chip *nand_chip;
int res;
- struct mtd_partition *partitions = NULL;
- int num_partitions = 0;
+ struct mtd_part_parser_data ppdata;
/* Allocate memory for the device structure (and zero it) */
host = kzalloc(sizeof(struct socrates_nand_host), GFP_KERNEL);
@@ -193,6 +190,7 @@ static int __devinit socrates_nand_probe(struct platform_device *ofdev)
mtd->name = "socrates_nand";
mtd->owner = THIS_MODULE;
mtd->dev.parent = &ofdev->dev;
+ ppdata.of_node = ofdev->dev.of_node;
/*should never be accessed directly */
nand_chip->IO_ADDR_R = (void *)0xdeadbeef;
@@ -225,30 +223,10 @@ static int __devinit socrates_nand_probe(struct platform_device *ofdev)
goto out;
}
-#ifdef CONFIG_MTD_CMDLINE_PARTS
- num_partitions = parse_mtd_partitions(mtd, part_probes,
- &partitions, 0);
- if (num_partitions < 0) {
- res = num_partitions;
- goto release;
- }
-#endif
-
- if (num_partitions == 0) {
- num_partitions = of_mtd_parse_partitions(&ofdev->dev,
- ofdev->dev.of_node,
- &partitions);
- if (num_partitions < 0) {
- res = num_partitions;
- goto release;
- }
- }
-
- res = mtd_device_register(mtd, partitions, num_partitions);
+ res = mtd_device_parse_register(mtd, NULL, &ppdata, NULL, 0);
if (!res)
return res;
-release:
nand_release(mtd);
out:
diff --git a/drivers/mtd/nand/tmio_nand.c b/drivers/mtd/nand/tmio_nand.c
index 11e8371b568..beebd95f769 100644
--- a/drivers/mtd/nand/tmio_nand.c
+++ b/drivers/mtd/nand/tmio_nand.c
@@ -121,9 +121,6 @@ struct tmio_nand {
#define mtd_to_tmio(m) container_of(m, struct tmio_nand, mtd)
-#ifdef CONFIG_MTD_CMDLINE_PARTS
-static const char *part_probes[] = { "cmdlinepart", NULL };
-#endif
/*--------------------------------------------------------------------------*/
@@ -381,8 +378,6 @@ static int tmio_probe(struct platform_device *dev)
struct tmio_nand *tmio;
struct mtd_info *mtd;
struct nand_chip *nand_chip;
- struct mtd_partition *parts;
- int nbparts = 0;
int retval;
if (data == NULL)
@@ -461,15 +456,9 @@ static int tmio_probe(struct platform_device *dev)
goto err_scan;
}
/* Register the partitions */
-#ifdef CONFIG_MTD_CMDLINE_PARTS
- nbparts = parse_mtd_partitions(mtd, part_probes, &parts, 0);
-#endif
- if (nbparts <= 0 && data) {
- parts = data->partition;
- nbparts = data->num_partitions;
- }
-
- retval = mtd_device_register(mtd, parts, nbparts);
+ retval = mtd_device_parse_register(mtd, NULL, 0,
+ data ? data->partition : NULL,
+ data ? data->num_partitions : 0);
if (!retval)
return retval;
diff --git a/drivers/mtd/nand/txx9ndfmc.c b/drivers/mtd/nand/txx9ndfmc.c
index bfba4e39a6c..ace46fdaef5 100644
--- a/drivers/mtd/nand/txx9ndfmc.c
+++ b/drivers/mtd/nand/txx9ndfmc.c
@@ -74,7 +74,6 @@ struct txx9ndfmc_drvdata {
unsigned char hold; /* in gbusclock */
unsigned char spw; /* in gbusclock */
struct nand_hw_control hw_control;
- struct mtd_partition *parts[MAX_TXX9NDFMC_DEV];
};
static struct platform_device *mtd_to_platdev(struct mtd_info *mtd)
@@ -287,7 +286,6 @@ static int txx9ndfmc_nand_scan(struct mtd_info *mtd)
static int __init txx9ndfmc_probe(struct platform_device *dev)
{
struct txx9ndfmc_platform_data *plat = dev->dev.platform_data;
- static const char *probes[] = { "cmdlinepart", NULL };
int hold, spw;
int i;
struct txx9ndfmc_drvdata *drvdata;
@@ -333,7 +331,6 @@ static int __init txx9ndfmc_probe(struct platform_device *dev)
struct txx9ndfmc_priv *txx9_priv;
struct nand_chip *chip;
struct mtd_info *mtd;
- int nr_parts;
if (!(plat->ch_mask & (1 << i)))
continue;
@@ -393,9 +390,7 @@ static int __init txx9ndfmc_probe(struct platform_device *dev)
}
mtd->name = txx9_priv->mtdname;
- nr_parts = parse_mtd_partitions(mtd, probes,
- &drvdata->parts[i], 0);
- mtd_device_register(mtd, drvdata->parts[i], nr_parts);
+ mtd_device_parse_register(mtd, NULL, 0, NULL, 0);
drvdata->mtds[i] = mtd;
}
@@ -421,7 +416,6 @@ static int __exit txx9ndfmc_remove(struct platform_device *dev)
txx9_priv = chip->priv;
nand_release(mtd);
- kfree(drvdata->parts[i]);
kfree(txx9_priv->mtdname);
kfree(txx9_priv);
}
diff --git a/drivers/mtd/nftlcore.c b/drivers/mtd/nftlcore.c
index b155666acfb..cda77b562ad 100644
--- a/drivers/mtd/nftlcore.c
+++ b/drivers/mtd/nftlcore.c
@@ -63,14 +63,12 @@ static void nftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
return;
}
- DEBUG(MTD_DEBUG_LEVEL1, "NFTL: add_mtd for %s\n", mtd->name);
+ pr_debug("NFTL: add_mtd for %s\n", mtd->name);
nftl = kzalloc(sizeof(struct NFTLrecord), GFP_KERNEL);
- if (!nftl) {
- printk(KERN_WARNING "NFTL: out of memory for data structures\n");
+ if (!nftl)
return;
- }
nftl->mbd.mtd = mtd;
nftl->mbd.devnum = -1;
@@ -132,7 +130,7 @@ static void nftl_remove_dev(struct mtd_blktrans_dev *dev)
{
struct NFTLrecord *nftl = (void *)dev;
- DEBUG(MTD_DEBUG_LEVEL1, "NFTL: remove_dev (i=%d)\n", dev->devnum);
+ pr_debug("NFTL: remove_dev (i=%d)\n", dev->devnum);
del_mtd_blktrans_dev(dev);
kfree(nftl->ReplUnitTable);
@@ -149,7 +147,7 @@ int nftl_read_oob(struct mtd_info *mtd, loff_t offs, size_t len,
struct mtd_oob_ops ops;
int res;
- ops.mode = MTD_OOB_PLACE;
+ ops.mode = MTD_OPS_PLACE_OOB;
ops.ooboffs = offs & mask;
ops.ooblen = len;
ops.oobbuf = buf;
@@ -170,7 +168,7 @@ int nftl_write_oob(struct mtd_info *mtd, loff_t offs, size_t len,
struct mtd_oob_ops ops;
int res;
- ops.mode = MTD_OOB_PLACE;
+ ops.mode = MTD_OPS_PLACE_OOB;
ops.ooboffs = offs & mask;
ops.ooblen = len;
ops.oobbuf = buf;
@@ -193,7 +191,7 @@ static int nftl_write(struct mtd_info *mtd, loff_t offs, size_t len,
struct mtd_oob_ops ops;
int res;
- ops.mode = MTD_OOB_PLACE;
+ ops.mode = MTD_OPS_PLACE_OOB;
ops.ooboffs = offs & mask;
ops.ooblen = mtd->oobsize;
ops.oobbuf = oob;
@@ -220,7 +218,7 @@ static u16 NFTL_findfreeblock(struct NFTLrecord *nftl, int desperate )
/* Normally, we force a fold to happen before we run out of free blocks completely */
if (!desperate && nftl->numfreeEUNs < 2) {
- DEBUG(MTD_DEBUG_LEVEL1, "NFTL_findfreeblock: there are too few free EUNs\n");
+ pr_debug("NFTL_findfreeblock: there are too few free EUNs\n");
return BLOCK_NIL;
}
@@ -291,8 +289,7 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p
if (block == 2) {
foldmark = oob.u.c.FoldMark | oob.u.c.FoldMark1;
if (foldmark == FOLD_MARK_IN_PROGRESS) {
- DEBUG(MTD_DEBUG_LEVEL1,
- "Write Inhibited on EUN %d\n", thisEUN);
+ pr_debug("Write Inhibited on EUN %d\n", thisEUN);
inplace = 0;
} else {
/* There's no other reason not to do inplace,
@@ -357,7 +354,7 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p
if (BlockLastState[block] != SECTOR_FREE &&
BlockMap[block] != BLOCK_NIL &&
BlockMap[block] != targetEUN) {
- DEBUG(MTD_DEBUG_LEVEL1, "Setting inplace to 0. VUC %d, "
+ pr_debug("Setting inplace to 0. VUC %d, "
"block %d was %x lastEUN, "
"and is in EUN %d (%s) %d\n",
thisVUC, block, BlockLastState[block],
@@ -373,14 +370,14 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p
pendingblock < ((thisVUC + 1)* (nftl->EraseSize / 512)) &&
BlockLastState[pendingblock - (thisVUC * (nftl->EraseSize / 512))] !=
SECTOR_FREE) {
- DEBUG(MTD_DEBUG_LEVEL1, "Pending write not free in EUN %d. "
+ pr_debug("Pending write not free in EUN %d. "
"Folding out of place.\n", targetEUN);
inplace = 0;
}
}
if (!inplace) {
- DEBUG(MTD_DEBUG_LEVEL1, "Cannot fold Virtual Unit Chain %d in place. "
+ pr_debug("Cannot fold Virtual Unit Chain %d in place. "
"Trying out-of-place\n", thisVUC);
/* We need to find a targetEUN to fold into. */
targetEUN = NFTL_findfreeblock(nftl, 1);
@@ -410,7 +407,7 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p
and the Erase Unit into which we are supposed to be copying.
Go for it.
*/
- DEBUG(MTD_DEBUG_LEVEL1,"Folding chain %d into unit %d\n", thisVUC, targetEUN);
+ pr_debug("Folding chain %d into unit %d\n", thisVUC, targetEUN);
for (block = 0; block < nftl->EraseSize / 512 ; block++) {
unsigned char movebuf[512];
int ret;
@@ -428,7 +425,7 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p
ret = mtd->read(mtd, (nftl->EraseSize * BlockMap[block]) + (block * 512),
512, &retlen, movebuf);
- if (ret < 0 && ret != -EUCLEAN) {
+ if (ret < 0 && !mtd_is_bitflip(ret)) {
ret = mtd->read(mtd, (nftl->EraseSize * BlockMap[block])
+ (block * 512), 512, &retlen,
movebuf);
@@ -457,7 +454,7 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p
has duplicate chains, we need to free one of the chains because it's not necessary any more.
*/
thisEUN = nftl->EUNtable[thisVUC];
- DEBUG(MTD_DEBUG_LEVEL1,"Want to erase\n");
+ pr_debug("Want to erase\n");
/* For each block in the old chain (except the targetEUN of course),
free it and make it available for future use */
@@ -570,7 +567,7 @@ static inline u16 NFTL_findwriteunit(struct NFTLrecord *nftl, unsigned block)
(writeEUN * nftl->EraseSize) + blockofs,
8, &retlen, (char *)&bci);
- DEBUG(MTD_DEBUG_LEVEL2, "Status of block %d in EUN %d is %x\n",
+ pr_debug("Status of block %d in EUN %d is %x\n",
block , writeEUN, le16_to_cpu(bci.Status));
status = bci.Status | bci.Status1;
@@ -623,7 +620,7 @@ static inline u16 NFTL_findwriteunit(struct NFTLrecord *nftl, unsigned block)
but they are reserved for when we're
desperate. Well, now we're desperate.
*/
- DEBUG(MTD_DEBUG_LEVEL1, "Using desperate==1 to find free EUN to accommodate write to VUC %d\n", thisVUC);
+ pr_debug("Using desperate==1 to find free EUN to accommodate write to VUC %d\n", thisVUC);
writeEUN = NFTL_findfreeblock(nftl, 1);
}
if (writeEUN == BLOCK_NIL) {
@@ -776,7 +773,7 @@ static int nftl_readblock(struct mtd_blktrans_dev *mbd, unsigned long block,
size_t retlen;
int res = mtd->read(mtd, ptr, 512, &retlen, buffer);
- if (res < 0 && res != -EUCLEAN)
+ if (res < 0 && !mtd_is_bitflip(res))
return -EIO;
}
return 0;
diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
index e3cd1ffad2f..ac4092591ae 100644
--- a/drivers/mtd/nftlmount.c
+++ b/drivers/mtd/nftlmount.c
@@ -32,7 +32,7 @@
/* find_boot_record: Find the NFTL Media Header and its Spare copy which contains the
* various device information of the NFTL partition and Bad Unit Table. Update
- * the ReplUnitTable[] table accroding to the Bad Unit Table. ReplUnitTable[]
+ * the ReplUnitTable[] table according to the Bad Unit Table. ReplUnitTable[]
* is used for management of Erase Unit in other routines in nftl.c and nftlmount.c
*/
static int find_boot_record(struct NFTLrecord *nftl)
@@ -297,7 +297,7 @@ static int check_free_sectors(struct NFTLrecord *nftl, unsigned int address, int
*
* Return: 0 when succeed, -1 on error.
*
- * ToDo: 1. Is it neceressary to check_free_sector after erasing ??
+ * ToDo: 1. Is it necessary to check_free_sector after erasing ??
*/
int NFTL_formatblock(struct NFTLrecord *nftl, int block)
{
@@ -337,7 +337,7 @@ int NFTL_formatblock(struct NFTLrecord *nftl, int block)
nb_erases = le32_to_cpu(uci.WearInfo);
nb_erases++;
- /* wrap (almost impossible with current flashs) or free block */
+ /* wrap (almost impossible with current flash) or free block */
if (nb_erases == 0)
nb_erases = 1;
@@ -363,10 +363,10 @@ fail:
* Mark as 'IGNORE' each incorrect sector. This check is only done if the chain
* was being folded when NFTL was interrupted.
*
- * The check_free_sectors in this function is neceressary. There is a possible
+ * The check_free_sectors in this function is necessary. There is a possible
* situation that after writing the Data area, the Block Control Information is
* not updated according (due to power failure or something) which leaves the block
- * in an umconsistent state. So we have to check if a block is really FREE in this
+ * in an inconsistent state. So we have to check if a block is really FREE in this
* case. */
static void check_sectors_in_chain(struct NFTLrecord *nftl, unsigned int first_block)
{
@@ -428,7 +428,7 @@ static int calc_chain_length(struct NFTLrecord *nftl, unsigned int first_block)
for (;;) {
length++;
- /* avoid infinite loops, although this is guaranted not to
+ /* avoid infinite loops, although this is guaranteed not to
happen because of the previous checks */
if (length >= nftl->nb_blocks) {
printk("nftl: length too long %d !\n", length);
@@ -447,11 +447,11 @@ static int calc_chain_length(struct NFTLrecord *nftl, unsigned int first_block)
/* format_chain: Format an invalid Virtual Unit chain. It frees all the Erase Units in a
* Virtual Unit Chain, i.e. all the units are disconnected.
*
- * It is not stricly correct to begin from the first block of the chain because
+ * It is not strictly correct to begin from the first block of the chain because
* if we stop the code, we may see again a valid chain if there was a first_block
* flag in a block inside it. But is it really a problem ?
*
- * FixMe: Figure out what the last statesment means. What if power failure when we are
+ * FixMe: Figure out what the last statement means. What if power failure when we are
* in the for (;;) loop formatting blocks ??
*/
static void format_chain(struct NFTLrecord *nftl, unsigned int first_block)
@@ -485,7 +485,7 @@ static void format_chain(struct NFTLrecord *nftl, unsigned int first_block)
* totally free (only 0xff).
*
* Definition: Free Erase Unit -- A properly erased/formatted Free Erase Unit should have meet the
- * following critia:
+ * following criteria:
* 1. */
static int check_and_mark_free_block(struct NFTLrecord *nftl, int block)
{
@@ -502,7 +502,7 @@ static int check_and_mark_free_block(struct NFTLrecord *nftl, int block)
erase_mark = le16_to_cpu ((h1.EraseMark | h1.EraseMark1));
if (erase_mark != ERASE_MARK) {
/* if no erase mark, the block must be totally free. This is
- possible in two cases : empty filsystem or interrupted erase (very unlikely) */
+ possible in two cases : empty filesystem or interrupted erase (very unlikely) */
if (check_free_sectors (nftl, block * nftl->EraseSize, nftl->EraseSize, 1) != 0)
return -1;
@@ -544,7 +544,7 @@ static int check_and_mark_free_block(struct NFTLrecord *nftl, int block)
/* get_fold_mark: Read fold mark from Unit Control Information #2, we use FOLD_MARK_IN_PROGRESS
* to indicate that we are in the progression of a Virtual Unit Chain folding. If the UCI #2
* is FOLD_MARK_IN_PROGRESS when mounting the NFTL, the (previous) folding process is interrupted
- * for some reason. A clean up/check of the VUC is neceressary in this case.
+ * for some reason. A clean up/check of the VUC is necessary in this case.
*
* WARNING: return 0 if read error
*/
@@ -657,7 +657,7 @@ int NFTL_mount(struct NFTLrecord *s)
printk("Block %d: incorrect logical block: %d expected: %d\n",
block, logical_block, first_logical_block);
/* the chain is incorrect : we must format it,
- but we need to read it completly */
+ but we need to read it completely */
do_format_chain = 1;
}
if (is_first_block) {
@@ -669,7 +669,7 @@ int NFTL_mount(struct NFTLrecord *s)
printk("Block %d: incorrectly marked as first block in chain\n",
block);
/* the chain is incorrect : we must format it,
- but we need to read it completly */
+ but we need to read it completely */
do_format_chain = 1;
} else {
printk("Block %d: folding in progress - ignoring first block flag\n",
diff --git a/drivers/mtd/ofpart.c b/drivers/mtd/ofpart.c
index a996718fa6b..64be8f0848b 100644
--- a/drivers/mtd/ofpart.c
+++ b/drivers/mtd/ofpart.c
@@ -20,14 +20,23 @@
#include <linux/slab.h>
#include <linux/mtd/partitions.h>
-int __devinit of_mtd_parse_partitions(struct device *dev,
- struct device_node *node,
- struct mtd_partition **pparts)
+static int parse_ofpart_partitions(struct mtd_info *master,
+ struct mtd_partition **pparts,
+ struct mtd_part_parser_data *data)
{
+ struct device_node *node;
const char *partname;
struct device_node *pp;
int nr_parts, i;
+
+ if (!data)
+ return 0;
+
+ node = data->of_node;
+ if (!node)
+ return 0;
+
/* First count the subnodes */
pp = NULL;
nr_parts = 0;
@@ -69,7 +78,7 @@ int __devinit of_mtd_parse_partitions(struct device *dev,
if (!i) {
of_node_put(pp);
- dev_err(dev, "No valid partition found on %s\n", node->full_name);
+ pr_err("No valid partition found on %s\n", node->full_name);
kfree(*pparts);
*pparts = NULL;
return -EINVAL;
@@ -77,6 +86,99 @@ int __devinit of_mtd_parse_partitions(struct device *dev,
return nr_parts;
}
-EXPORT_SYMBOL(of_mtd_parse_partitions);
+
+static struct mtd_part_parser ofpart_parser = {
+ .owner = THIS_MODULE,
+ .parse_fn = parse_ofpart_partitions,
+ .name = "ofpart",
+};
+
+static int parse_ofoldpart_partitions(struct mtd_info *master,
+ struct mtd_partition **pparts,
+ struct mtd_part_parser_data *data)
+{
+ struct device_node *dp;
+ int i, plen, nr_parts;
+ const struct {
+ __be32 offset, len;
+ } *part;
+ const char *names;
+
+ if (!data)
+ return 0;
+
+ dp = data->of_node;
+ if (!dp)
+ return 0;
+
+ part = of_get_property(dp, "partitions", &plen);
+ if (!part)
+ return 0; /* No partitions found */
+
+ pr_warning("Device tree uses obsolete partition map binding: %s\n",
+ dp->full_name);
+
+ nr_parts = plen / sizeof(part[0]);
+
+ *pparts = kzalloc(nr_parts * sizeof(*(*pparts)), GFP_KERNEL);
+ if (!pparts)
+ return -ENOMEM;
+
+ names = of_get_property(dp, "partition-names", &plen);
+
+ for (i = 0; i < nr_parts; i++) {
+ (*pparts)[i].offset = be32_to_cpu(part->offset);
+ (*pparts)[i].size = be32_to_cpu(part->len) & ~1;
+ /* bit 0 set signifies read only partition */
+ if (be32_to_cpu(part->len) & 1)
+ (*pparts)[i].mask_flags = MTD_WRITEABLE;
+
+ if (names && (plen > 0)) {
+ int len = strlen(names) + 1;
+
+ (*pparts)[i].name = (char *)names;
+ plen -= len;
+ names += len;
+ } else {
+ (*pparts)[i].name = "unnamed";
+ }
+
+ part++;
+ }
+
+ return nr_parts;
+}
+
+static struct mtd_part_parser ofoldpart_parser = {
+ .owner = THIS_MODULE,
+ .parse_fn = parse_ofoldpart_partitions,
+ .name = "ofoldpart",
+};
+
+static int __init ofpart_parser_init(void)
+{
+ int rc;
+ rc = register_mtd_parser(&ofpart_parser);
+ if (rc)
+ goto out;
+
+ rc = register_mtd_parser(&ofoldpart_parser);
+ if (!rc)
+ return 0;
+
+ deregister_mtd_parser(&ofoldpart_parser);
+out:
+ return rc;
+}
+
+module_init(ofpart_parser_init);
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Parser for MTD partitioning information in device tree");
+MODULE_AUTHOR("Vitaly Wool, David Gibson");
+/*
+ * When MTD core cannot find the requested parser, it tries to load the module
+ * with the same name. Since we provide the ofoldpart parser, we should have
+ * the corresponding alias.
+ */
+MODULE_ALIAS("ofoldpart");
diff --git a/drivers/mtd/onenand/generic.c b/drivers/mtd/onenand/generic.c
index 2d70d354d84..7813095264a 100644
--- a/drivers/mtd/onenand/generic.c
+++ b/drivers/mtd/onenand/generic.c
@@ -30,11 +30,8 @@
*/
#define DRIVER_NAME "onenand-flash"
-static const char *part_probes[] = { "cmdlinepart", NULL, };
-
struct onenand_info {
struct mtd_info mtd;
- struct mtd_partition *parts;
struct onenand_chip onenand;
};
@@ -73,13 +70,9 @@ static int __devinit generic_onenand_probe(struct platform_device *pdev)
goto out_iounmap;
}
- err = parse_mtd_partitions(&info->mtd, part_probes, &info->parts, 0);
- if (err > 0)
- mtd_device_register(&info->mtd, info->parts, err);
- else if (err <= 0 && pdata && pdata->parts)
- mtd_device_register(&info->mtd, pdata->parts, pdata->nr_parts);
- else
- err = mtd_device_register(&info->mtd, NULL, 0);
+ err = mtd_device_parse_register(&info->mtd, NULL, 0,
+ pdata ? pdata->parts : NULL,
+ pdata ? pdata->nr_parts : 0);
platform_set_drvdata(pdev, info);
@@ -104,7 +97,6 @@ static int __devexit generic_onenand_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
if (info) {
- mtd_device_unregister(&info->mtd);
onenand_release(&info->mtd);
release_mem_region(res->start, size);
iounmap(info->onenand.base);
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
index a916dec2921..7e9ea6852b6 100644
--- a/drivers/mtd/onenand/omap2.c
+++ b/drivers/mtd/onenand/omap2.c
@@ -40,7 +40,7 @@
#include <asm/mach/flash.h>
#include <plat/gpmc.h>
#include <plat/onenand.h>
-#include <mach/gpio.h>
+#include <asm/gpio.h>
#include <plat/dma.h>
@@ -57,7 +57,6 @@ struct omap2_onenand {
unsigned long phys_base;
int gpio_irq;
struct mtd_info mtd;
- struct mtd_partition *parts;
struct onenand_chip onenand;
struct completion irq_done;
struct completion dma_done;
@@ -67,8 +66,6 @@ struct omap2_onenand {
struct regulator *regulator;
};
-static const char *part_probes[] = { "cmdlinepart", NULL, };
-
static void omap2_onenand_dma_cb(int lch, u16 ch_status, void *data)
{
struct omap2_onenand *c = data;
@@ -741,6 +738,7 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev)
c->regulator = regulator_get(&pdev->dev, "vonenand");
if (IS_ERR(c->regulator)) {
dev_err(&pdev->dev, "Failed to get regulator\n");
+ r = PTR_ERR(c->regulator);
goto err_release_dma;
}
c->onenand.enable = omap2_onenand_enable;
@@ -753,13 +751,9 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev)
if ((r = onenand_scan(&c->mtd, 1)) < 0)
goto err_release_regulator;
- r = parse_mtd_partitions(&c->mtd, part_probes, &c->parts, 0);
- if (r > 0)
- r = mtd_device_register(&c->mtd, c->parts, r);
- else if (pdata->parts != NULL)
- r = mtd_device_register(&c->mtd, pdata->parts, pdata->nr_parts);
- else
- r = mtd_device_register(&c->mtd, NULL, 0);
+ r = mtd_device_parse_register(&c->mtd, NULL, 0,
+ pdata ? pdata->parts : NULL,
+ pdata ? pdata->nr_parts : 0);
if (r)
goto err_release_onenand;
@@ -786,7 +780,6 @@ err_release_mem_region:
err_free_cs:
gpmc_cs_free(c->gpmc_cs);
err_kfree:
- kfree(c->parts);
kfree(c);
return r;
@@ -809,7 +802,6 @@ static int __devexit omap2_onenand_remove(struct platform_device *pdev)
iounmap(c->onenand.base);
release_mem_region(c->phys_base, ONENAND_IO_SIZE);
gpmc_cs_free(c->gpmc_cs);
- kfree(c->parts);
kfree(c);
return 0;
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
index ac9e959802a..a8394730b4b 100644
--- a/drivers/mtd/onenand/onenand_base.c
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -1015,7 +1015,7 @@ static void onenand_release_device(struct mtd_info *mtd)
}
/**
- * onenand_transfer_auto_oob - [Internal] oob auto-placement transfer
+ * onenand_transfer_auto_oob - [INTERN] oob auto-placement transfer
* @param mtd MTD device structure
* @param buf destination address
* @param column oob offset to read from
@@ -1079,7 +1079,7 @@ static int onenand_recover_lsb(struct mtd_info *mtd, loff_t addr, int status)
return status;
/* check if we failed due to uncorrectable error */
- if (status != -EBADMSG && status != ONENAND_BBT_READ_ECC_ERROR)
+ if (!mtd_is_eccerr(status) && status != ONENAND_BBT_READ_ECC_ERROR)
return status;
/* check if address lies in MLC region */
@@ -1122,10 +1122,10 @@ static int onenand_mlc_read_ops_nolock(struct mtd_info *mtd, loff_t from,
int ret = 0;
int writesize = this->writesize;
- DEBUG(MTD_DEBUG_LEVEL3, "%s: from = 0x%08x, len = %i\n",
- __func__, (unsigned int) from, (int) len);
+ pr_debug("%s: from = 0x%08x, len = %i\n", __func__, (unsigned int)from,
+ (int)len);
- if (ops->mode == MTD_OOB_AUTO)
+ if (ops->mode == MTD_OPS_AUTO_OOB)
oobsize = this->ecclayout->oobavail;
else
oobsize = mtd->oobsize;
@@ -1159,7 +1159,7 @@ static int onenand_mlc_read_ops_nolock(struct mtd_info *mtd, loff_t from,
if (unlikely(ret))
ret = onenand_recover_lsb(mtd, from, ret);
onenand_update_bufferram(mtd, from, !ret);
- if (ret == -EBADMSG)
+ if (mtd_is_eccerr(ret))
ret = 0;
if (ret)
break;
@@ -1170,7 +1170,7 @@ static int onenand_mlc_read_ops_nolock(struct mtd_info *mtd, loff_t from,
thisooblen = oobsize - oobcolumn;
thisooblen = min_t(int, thisooblen, ooblen - oobread);
- if (ops->mode == MTD_OOB_AUTO)
+ if (ops->mode == MTD_OPS_AUTO_OOB)
onenand_transfer_auto_oob(mtd, oobbuf, oobcolumn, thisooblen);
else
this->read_bufferram(mtd, ONENAND_SPARERAM, oobbuf, oobcolumn, thisooblen);
@@ -1226,10 +1226,10 @@ static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from,
int ret = 0, boundary = 0;
int writesize = this->writesize;
- DEBUG(MTD_DEBUG_LEVEL3, "%s: from = 0x%08x, len = %i\n",
- __func__, (unsigned int) from, (int) len);
+ pr_debug("%s: from = 0x%08x, len = %i\n", __func__, (unsigned int)from,
+ (int)len);
- if (ops->mode == MTD_OOB_AUTO)
+ if (ops->mode == MTD_OPS_AUTO_OOB)
oobsize = this->ecclayout->oobavail;
else
oobsize = mtd->oobsize;
@@ -1255,7 +1255,7 @@ static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from,
this->command(mtd, ONENAND_CMD_READ, from, writesize);
ret = this->wait(mtd, FL_READING);
onenand_update_bufferram(mtd, from, !ret);
- if (ret == -EBADMSG)
+ if (mtd_is_eccerr(ret))
ret = 0;
}
}
@@ -1291,7 +1291,7 @@ static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from,
thisooblen = oobsize - oobcolumn;
thisooblen = min_t(int, thisooblen, ooblen - oobread);
- if (ops->mode == MTD_OOB_AUTO)
+ if (ops->mode == MTD_OPS_AUTO_OOB)
onenand_transfer_auto_oob(mtd, oobbuf, oobcolumn, thisooblen);
else
this->read_bufferram(mtd, ONENAND_SPARERAM, oobbuf, oobcolumn, thisooblen);
@@ -1315,7 +1315,7 @@ static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from,
/* Now wait for load */
ret = this->wait(mtd, FL_READING);
onenand_update_bufferram(mtd, from, !ret);
- if (ret == -EBADMSG)
+ if (mtd_is_eccerr(ret))
ret = 0;
}
@@ -1351,19 +1351,19 @@ static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from,
struct mtd_ecc_stats stats;
int read = 0, thislen, column, oobsize;
size_t len = ops->ooblen;
- mtd_oob_mode_t mode = ops->mode;
+ unsigned int mode = ops->mode;
u_char *buf = ops->oobbuf;
int ret = 0, readcmd;
from += ops->ooboffs;
- DEBUG(MTD_DEBUG_LEVEL3, "%s: from = 0x%08x, len = %i\n",
- __func__, (unsigned int) from, (int) len);
+ pr_debug("%s: from = 0x%08x, len = %i\n", __func__, (unsigned int)from,
+ (int)len);
/* Initialize return length value */
ops->oobretlen = 0;
- if (mode == MTD_OOB_AUTO)
+ if (mode == MTD_OPS_AUTO_OOB)
oobsize = this->ecclayout->oobavail;
else
oobsize = mtd->oobsize;
@@ -1403,13 +1403,13 @@ static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from,
if (unlikely(ret))
ret = onenand_recover_lsb(mtd, from, ret);
- if (ret && ret != -EBADMSG) {
+ if (ret && !mtd_is_eccerr(ret)) {
printk(KERN_ERR "%s: read failed = 0x%x\n",
__func__, ret);
break;
}
- if (mode == MTD_OOB_AUTO)
+ if (mode == MTD_OPS_AUTO_OOB)
onenand_transfer_auto_oob(mtd, buf, column, thislen);
else
this->read_bufferram(mtd, ONENAND_SPARERAM, buf, column, thislen);
@@ -1487,10 +1487,10 @@ static int onenand_read_oob(struct mtd_info *mtd, loff_t from,
int ret;
switch (ops->mode) {
- case MTD_OOB_PLACE:
- case MTD_OOB_AUTO:
+ case MTD_OPS_PLACE_OOB:
+ case MTD_OPS_AUTO_OOB:
break;
- case MTD_OOB_RAW:
+ case MTD_OPS_RAW:
/* Not implemented yet */
default:
return -EINVAL;
@@ -1576,8 +1576,8 @@ int onenand_bbt_read_oob(struct mtd_info *mtd, loff_t from,
size_t len = ops->ooblen;
u_char *buf = ops->oobbuf;
- DEBUG(MTD_DEBUG_LEVEL3, "%s: from = 0x%08x, len = %zi\n",
- __func__, (unsigned int) from, len);
+ pr_debug("%s: from = 0x%08x, len = %zi\n", __func__, (unsigned int)from,
+ len);
/* Initialize return value */
ops->oobretlen = 0;
@@ -1750,8 +1750,8 @@ static int onenand_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
/* Wait for any existing operation to clear */
onenand_panic_wait(mtd);
- DEBUG(MTD_DEBUG_LEVEL3, "%s: to = 0x%08x, len = %i\n",
- __func__, (unsigned int) to, (int) len);
+ pr_debug("%s: to = 0x%08x, len = %i\n", __func__, (unsigned int)to,
+ (int)len);
/* Initialize retlen, in case of early exit */
*retlen = 0;
@@ -1821,7 +1821,7 @@ static int onenand_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
}
/**
- * onenand_fill_auto_oob - [Internal] oob auto-placement transfer
+ * onenand_fill_auto_oob - [INTERN] oob auto-placement transfer
* @param mtd MTD device structure
* @param oob_buf oob buffer
* @param buf source address
@@ -1883,8 +1883,8 @@ static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to,
u_char *oobbuf;
int ret = 0, cmd;
- DEBUG(MTD_DEBUG_LEVEL3, "%s: to = 0x%08x, len = %i\n",
- __func__, (unsigned int) to, (int) len);
+ pr_debug("%s: to = 0x%08x, len = %i\n", __func__, (unsigned int)to,
+ (int)len);
/* Initialize retlen, in case of early exit */
ops->retlen = 0;
@@ -1908,7 +1908,7 @@ static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to,
if (!len)
return 0;
- if (ops->mode == MTD_OOB_AUTO)
+ if (ops->mode == MTD_OPS_AUTO_OOB)
oobsize = this->ecclayout->oobavail;
else
oobsize = mtd->oobsize;
@@ -1945,7 +1945,7 @@ static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to,
/* We send data to spare ram with oobsize
* to prevent byte access */
memset(oobbuf, 0xff, mtd->oobsize);
- if (ops->mode == MTD_OOB_AUTO)
+ if (ops->mode == MTD_OPS_AUTO_OOB)
onenand_fill_auto_oob(mtd, oobbuf, oob, oobcolumn, thisooblen);
else
memcpy(oobbuf + oobcolumn, oob, thisooblen);
@@ -2055,7 +2055,7 @@ static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to,
/**
- * onenand_write_oob_nolock - [Internal] OneNAND write out-of-band
+ * onenand_write_oob_nolock - [INTERN] OneNAND write out-of-band
* @param mtd MTD device structure
* @param to offset to write to
* @param len number of bytes to write
@@ -2074,17 +2074,17 @@ static int onenand_write_oob_nolock(struct mtd_info *mtd, loff_t to,
u_char *oobbuf;
size_t len = ops->ooblen;
const u_char *buf = ops->oobbuf;
- mtd_oob_mode_t mode = ops->mode;
+ unsigned int mode = ops->mode;
to += ops->ooboffs;
- DEBUG(MTD_DEBUG_LEVEL3, "%s: to = 0x%08x, len = %i\n",
- __func__, (unsigned int) to, (int) len);
+ pr_debug("%s: to = 0x%08x, len = %i\n", __func__, (unsigned int)to,
+ (int)len);
/* Initialize retlen, in case of early exit */
ops->oobretlen = 0;
- if (mode == MTD_OOB_AUTO)
+ if (mode == MTD_OPS_AUTO_OOB)
oobsize = this->ecclayout->oobavail;
else
oobsize = mtd->oobsize;
@@ -2128,7 +2128,7 @@ static int onenand_write_oob_nolock(struct mtd_info *mtd, loff_t to,
/* We send data to spare ram with oobsize
* to prevent byte access */
memset(oobbuf, 0xff, mtd->oobsize);
- if (mode == MTD_OOB_AUTO)
+ if (mode == MTD_OPS_AUTO_OOB)
onenand_fill_auto_oob(mtd, oobbuf, buf, column, thislen);
else
memcpy(oobbuf + column, buf, thislen);
@@ -2217,10 +2217,10 @@ static int onenand_write_oob(struct mtd_info *mtd, loff_t to,
int ret;
switch (ops->mode) {
- case MTD_OOB_PLACE:
- case MTD_OOB_AUTO:
+ case MTD_OPS_PLACE_OOB:
+ case MTD_OPS_AUTO_OOB:
break;
- case MTD_OOB_RAW:
+ case MTD_OPS_RAW:
/* Not implemented yet */
default:
return -EINVAL;
@@ -2281,7 +2281,7 @@ static int onenand_multiblock_erase_verify(struct mtd_info *mtd,
}
/**
- * onenand_multiblock_erase - [Internal] erase block(s) using multiblock erase
+ * onenand_multiblock_erase - [INTERN] erase block(s) using multiblock erase
* @param mtd MTD device structure
* @param instr erase instruction
* @param region erase region
@@ -2397,7 +2397,7 @@ static int onenand_multiblock_erase(struct mtd_info *mtd,
/**
- * onenand_block_by_block_erase - [Internal] erase block(s) using regular erase
+ * onenand_block_by_block_erase - [INTERN] erase block(s) using regular erase
* @param mtd MTD device structure
* @param instr erase instruction
* @param region erase region
@@ -2489,8 +2489,9 @@ static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr)
struct mtd_erase_region_info *region = NULL;
loff_t region_offset = 0;
- DEBUG(MTD_DEBUG_LEVEL3, "%s: start=0x%012llx, len=%llu\n", __func__,
- (unsigned long long) instr->addr, (unsigned long long) instr->len);
+ pr_debug("%s: start=0x%012llx, len=%llu\n", __func__,
+ (unsigned long long)instr->addr,
+ (unsigned long long)instr->len);
/* Do not allow erase past end of device */
if (unlikely((len + addr) > mtd->size)) {
@@ -2558,7 +2559,7 @@ static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr)
*/
static void onenand_sync(struct mtd_info *mtd)
{
- DEBUG(MTD_DEBUG_LEVEL3, "%s: called\n", __func__);
+ pr_debug("%s: called\n", __func__);
/* Grab the lock and see if the device is available */
onenand_get_device(mtd, FL_SYNCING);
@@ -2602,7 +2603,7 @@ static int onenand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
struct bbm_info *bbm = this->bbm;
u_char buf[2] = {0, 0};
struct mtd_oob_ops ops = {
- .mode = MTD_OOB_PLACE,
+ .mode = MTD_OPS_PLACE_OOB,
.ooblen = 2,
.oobbuf = buf,
.ooboffs = 0,
@@ -2922,7 +2923,7 @@ static int onenand_otp_command(struct mtd_info *mtd, int cmd, loff_t addr,
}
/**
- * onenand_otp_write_oob_nolock - [Internal] OneNAND write out-of-band, specific to OTP
+ * onenand_otp_write_oob_nolock - [INTERN] OneNAND write out-of-band, specific to OTP
* @param mtd MTD device structure
* @param to offset to write to
* @param len number of bytes to write
@@ -3170,7 +3171,7 @@ static int do_otp_lock(struct mtd_info *mtd, loff_t from, size_t len,
this->command(mtd, ONENAND_CMD_RESET, 0, 0);
this->wait(mtd, FL_RESETING);
} else {
- ops.mode = MTD_OOB_PLACE;
+ ops.mode = MTD_OPS_PLACE_OOB;
ops.ooblen = len;
ops.oobbuf = buf;
ops.ooboffs = 0;
@@ -3429,6 +3430,19 @@ static void onenand_check_features(struct mtd_info *mtd)
else if (numbufs == 1) {
this->options |= ONENAND_HAS_4KB_PAGE;
this->options |= ONENAND_HAS_CACHE_PROGRAM;
+ /*
+ * There are two different 4KiB pagesize chips
+ * and no way to detect it by H/W config values.
+ *
+ * To detect the correct NOP for each chips,
+ * It should check the version ID as workaround.
+ *
+ * Now it has as following
+ * KFM4G16Q4M has NOP 4 with version ID 0x0131
+ * KFM4G16Q5M has NOP 1 with versoin ID 0x013e
+ */
+ if ((this->version_id & 0xf) == 0xe)
+ this->options |= ONENAND_HAS_NOP_1;
}
case ONENAND_DEVICE_DENSITY_2Gb:
@@ -3663,7 +3677,7 @@ static int flexonenand_check_blocks_erased(struct mtd_info *mtd, int start, int
int i, ret;
int block;
struct mtd_oob_ops ops = {
- .mode = MTD_OOB_PLACE,
+ .mode = MTD_OPS_PLACE_OOB,
.ooboffs = 0,
.ooblen = mtd->oobsize,
.datbuf = NULL,
@@ -4054,6 +4068,8 @@ int onenand_scan(struct mtd_info *mtd, int maxchips)
this->ecclayout = &onenand_oob_128;
mtd->subpage_sft = 2;
}
+ if (ONENAND_IS_NOP_1(this))
+ mtd->subpage_sft = 0;
break;
case 64:
this->ecclayout = &onenand_oob_64;
diff --git a/drivers/mtd/onenand/onenand_bbt.c b/drivers/mtd/onenand/onenand_bbt.c
index fc2c16a0fd1..66fe3b7e785 100644
--- a/drivers/mtd/onenand/onenand_bbt.c
+++ b/drivers/mtd/onenand/onenand_bbt.c
@@ -15,6 +15,7 @@
#include <linux/slab.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/onenand.h>
+#include <linux/export.h>
/**
* check_short_pattern - [GENERIC] check if a pattern is in the buffer
@@ -80,7 +81,7 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr
startblock = 0;
from = 0;
- ops.mode = MTD_OOB_PLACE;
+ ops.mode = MTD_OPS_PLACE_OOB;
ops.ooblen = readlen;
ops.oobbuf = buf;
ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0;
@@ -153,7 +154,7 @@ static int onenand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt)
block = (int) (onenand_block(this, offs) << 1);
res = (bbm->bbt[block >> 3] >> (block & 0x06)) & 0x03;
- DEBUG(MTD_DEBUG_LEVEL2, "onenand_isbad_bbt: bbt info for offs 0x%08x: (block %d) 0x%02x\n",
+ pr_debug("onenand_isbad_bbt: bbt info for offs 0x%08x: (block %d) 0x%02x\n",
(unsigned int) offs, block >> 1, res);
switch ((int) res) {
@@ -188,10 +189,8 @@ int onenand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
len = this->chipsize >> (this->erase_shift + 2);
/* Allocate memory (2bit per block) and clear the memory bad block table */
bbm->bbt = kzalloc(len, GFP_KERNEL);
- if (!bbm->bbt) {
- printk(KERN_ERR "onenand_scan_bbt: Out of memory\n");
+ if (!bbm->bbt)
return -ENOMEM;
- }
/* Set the bad block position */
bbm->badblockpos = ONENAND_BADBLOCK_POS;
diff --git a/drivers/mtd/onenand/samsung.c b/drivers/mtd/onenand/samsung.c
index 3306b5b3c73..5474547eafc 100644
--- a/drivers/mtd/onenand/samsung.c
+++ b/drivers/mtd/onenand/samsung.c
@@ -147,7 +147,6 @@ struct s3c_onenand {
struct resource *dma_res;
unsigned long phys_base;
struct completion complete;
- struct mtd_partition *parts;
};
#define CMD_MAP_00(dev, addr) (dev->cmd_map(MAP_00, ((addr) << 1)))
@@ -157,8 +156,6 @@ struct s3c_onenand {
static struct s3c_onenand *onenand;
-static const char *part_probes[] = { "cmdlinepart", NULL, };
-
static inline int s3c_read_reg(int offset)
{
return readl(onenand->base + offset);
@@ -1017,13 +1014,9 @@ static int s3c_onenand_probe(struct platform_device *pdev)
if (s3c_read_reg(MEM_CFG_OFFSET) & ONENAND_SYS_CFG1_SYNC_READ)
dev_info(&onenand->pdev->dev, "OneNAND Sync. Burst Read enabled\n");
- err = parse_mtd_partitions(mtd, part_probes, &onenand->parts, 0);
- if (err > 0)
- mtd_device_register(mtd, onenand->parts, err);
- else if (err <= 0 && pdata && pdata->parts)
- mtd_device_register(mtd, pdata->parts, pdata->nr_parts);
- else
- err = mtd_device_register(mtd, NULL, 0);
+ err = mtd_device_parse_register(mtd, NULL, 0,
+ pdata ? pdata->parts : NULL,
+ pdata ? pdata->nr_parts : 0);
platform_set_drvdata(pdev, mtd);
diff --git a/drivers/mtd/redboot.c b/drivers/mtd/redboot.c
index 7a87d07cd79..e366b1d84ea 100644
--- a/drivers/mtd/redboot.c
+++ b/drivers/mtd/redboot.c
@@ -28,6 +28,7 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
+#include <linux/module.h>
struct fis_image_desc {
unsigned char name[16]; // Null terminated name
@@ -56,8 +57,8 @@ static inline int redboot_checksum(struct fis_image_desc *img)
}
static int parse_redboot_partitions(struct mtd_info *master,
- struct mtd_partition **pparts,
- unsigned long fis_origin)
+ struct mtd_partition **pparts,
+ struct mtd_part_parser_data *data)
{
int nrparts = 0;
struct fis_image_desc *buf;
@@ -197,11 +198,10 @@ static int parse_redboot_partitions(struct mtd_info *master,
goto out;
}
new_fl->img = &buf[i];
- if (fis_origin) {
- buf[i].flash_base -= fis_origin;
- } else {
- buf[i].flash_base &= master->size-1;
- }
+ if (data && data->origin)
+ buf[i].flash_base -= data->origin;
+ else
+ buf[i].flash_base &= master->size-1;
/* I'm sure the JFFS2 code has done me permanent damage.
* I now think the following is _normal_
@@ -297,6 +297,9 @@ static struct mtd_part_parser redboot_parser = {
.name = "RedBoot",
};
+/* mtd parsers will request the module by parser name */
+MODULE_ALIAS("RedBoot");
+
static int __init redboot_parser_init(void)
{
return register_mtd_parser(&redboot_parser);
diff --git a/drivers/mtd/rfd_ftl.c b/drivers/mtd/rfd_ftl.c
index cc4d1805b86..73ae217a425 100644
--- a/drivers/mtd/rfd_ftl.c
+++ b/drivers/mtd/rfd_ftl.c
@@ -18,6 +18,7 @@
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/jiffies.h>
+#include <linux/module.h>
#include <asm/types.h>
diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
index ed3d6cd2c6d..fddb714e323 100644
--- a/drivers/mtd/sm_ftl.c
+++ b/drivers/mtd/sm_ftl.c
@@ -34,7 +34,7 @@ module_param(debug, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug, "Debug level (0-2)");
-/* ------------------- sysfs attributtes ---------------------------------- */
+/* ------------------- sysfs attributes ---------------------------------- */
struct sm_sysfs_attribute {
struct device_attribute dev_attr;
char *data;
@@ -138,7 +138,7 @@ static int sm_get_lba(uint8_t *lba)
if ((lba[0] & 0xF8) != 0x10)
return -2;
- /* check parity - endianess doesn't matter */
+ /* check parity - endianness doesn't matter */
if (hweight16(*(uint16_t *)lba) & 1)
return -2;
@@ -147,7 +147,7 @@ static int sm_get_lba(uint8_t *lba)
/*
- * Read LBA asscociated with block
+ * Read LBA associated with block
* returns -1, if block is erased
* returns -2 if error happens
*/
@@ -252,11 +252,11 @@ static int sm_read_sector(struct sm_ftl *ftl,
return 0;
}
- /* User might not need the oob, but we do for data vertification */
+ /* User might not need the oob, but we do for data verification */
if (!oob)
oob = &tmp_oob;
- ops.mode = ftl->smallpagenand ? MTD_OOB_RAW : MTD_OOB_PLACE;
+ ops.mode = ftl->smallpagenand ? MTD_OPS_RAW : MTD_OPS_PLACE_OOB;
ops.ooboffs = 0;
ops.ooblen = SM_OOB_SIZE;
ops.oobbuf = (void *)oob;
@@ -276,12 +276,12 @@ again:
return ret;
}
- /* Unfortunelly, oob read will _always_ succeed,
+ /* Unfortunately, oob read will _always_ succeed,
despite card removal..... */
ret = mtd->read_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops);
/* Test for unknown errors */
- if (ret != 0 && ret != -EUCLEAN && ret != -EBADMSG) {
+ if (ret != 0 && !mtd_is_bitflip_or_eccerr(ret)) {
dbg("read of block %d at zone %d, failed due to error (%d)",
block, zone, ret);
goto again;
@@ -306,7 +306,7 @@ again:
}
/* Test ECC*/
- if (ret == -EBADMSG ||
+ if (mtd_is_eccerr(ret) ||
(ftl->smallpagenand && sm_correct_sector(buffer, oob))) {
dbg("read of block %d at zone %d, failed due to ECC error",
@@ -336,7 +336,7 @@ static int sm_write_sector(struct sm_ftl *ftl,
if (ftl->unstable)
return -EIO;
- ops.mode = ftl->smallpagenand ? MTD_OOB_RAW : MTD_OOB_PLACE;
+ ops.mode = ftl->smallpagenand ? MTD_OPS_RAW : MTD_OPS_PLACE_OOB;
ops.len = SM_SECTOR_SIZE;
ops.datbuf = buffer;
ops.ooboffs = 0;
@@ -447,14 +447,14 @@ static void sm_mark_block_bad(struct sm_ftl *ftl, int zone, int block)
/* We aren't checking the return value, because we don't care */
/* This also fails on fake xD cards, but I guess these won't expose
- any bad blocks till fail completly */
+ any bad blocks till fail completely */
for (boffset = 0; boffset < ftl->block_size; boffset += SM_SECTOR_SIZE)
sm_write_sector(ftl, zone, block, boffset, NULL, &oob);
}
/*
* Erase a block within a zone
- * If erase succedes, it updates free block fifo, otherwise marks block as bad
+ * If erase succeeds, it updates free block fifo, otherwise marks block as bad
*/
static int sm_erase_block(struct sm_ftl *ftl, int zone_num, uint16_t block,
int put_free)
@@ -510,7 +510,7 @@ static void sm_erase_callback(struct erase_info *self)
complete(&ftl->erase_completion);
}
-/* Throughtly test that block is valid. */
+/* Thoroughly test that block is valid. */
static int sm_check_block(struct sm_ftl *ftl, int zone, int block)
{
int boffset;
@@ -526,7 +526,7 @@ static int sm_check_block(struct sm_ftl *ftl, int zone, int block)
for (boffset = 0; boffset < ftl->block_size;
boffset += SM_SECTOR_SIZE) {
- /* This shoudn't happen anyway */
+ /* This shouldn't happen anyway */
if (sm_read_sector(ftl, zone, block, boffset, NULL, &oob))
return -2;
diff --git a/drivers/mtd/ssfdc.c b/drivers/mtd/ssfdc.c
index 5cd18979333..976e3d28b96 100644
--- a/drivers/mtd/ssfdc.c
+++ b/drivers/mtd/ssfdc.c
@@ -135,8 +135,7 @@ static int get_valid_cis_sector(struct mtd_info *mtd)
/* Found */
cis_sector = (int)(offset >> SECTOR_SHIFT);
} else {
- DEBUG(MTD_DEBUG_LEVEL1,
- "SSFDC_RO: CIS/IDI sector not found"
+ pr_debug("SSFDC_RO: CIS/IDI sector not found"
" on %s (mtd%d)\n", mtd->name,
mtd->index);
}
@@ -170,7 +169,7 @@ static int read_raw_oob(struct mtd_info *mtd, loff_t offs, uint8_t *buf)
struct mtd_oob_ops ops;
int ret;
- ops.mode = MTD_OOB_RAW;
+ ops.mode = MTD_OPS_RAW;
ops.ooboffs = 0;
ops.ooblen = OOB_SIZE;
ops.oobbuf = buf;
@@ -221,8 +220,7 @@ static int get_logical_address(uint8_t *oob_buf)
block_address >>= 1;
if (get_parity(block_address, 10) != parity) {
- DEBUG(MTD_DEBUG_LEVEL0,
- "SSFDC_RO: logical address field%d"
+ pr_debug("SSFDC_RO: logical address field%d"
"parity error(0x%04X)\n", j+1,
block_address);
} else {
@@ -235,7 +233,7 @@ static int get_logical_address(uint8_t *oob_buf)
if (!ok)
block_address = -2;
- DEBUG(MTD_DEBUG_LEVEL3, "SSFDC_RO: get_logical_address() %d\n",
+ pr_debug("SSFDC_RO: get_logical_address() %d\n",
block_address);
return block_address;
@@ -249,7 +247,7 @@ static int build_logical_block_map(struct ssfdcr_record *ssfdc)
int ret, block_address, phys_block;
struct mtd_info *mtd = ssfdc->mbd.mtd;
- DEBUG(MTD_DEBUG_LEVEL1, "SSFDC_RO: build_block_map() nblks=%d (%luK)\n",
+ pr_debug("SSFDC_RO: build_block_map() nblks=%d (%luK)\n",
ssfdc->map_len,
(unsigned long)ssfdc->map_len * ssfdc->erase_size / 1024);
@@ -262,8 +260,7 @@ static int build_logical_block_map(struct ssfdcr_record *ssfdc)
ret = read_raw_oob(mtd, offset, oob_buf);
if (ret < 0) {
- DEBUG(MTD_DEBUG_LEVEL0,
- "SSFDC_RO: mtd read_oob() failed at %lu\n",
+ pr_debug("SSFDC_RO: mtd read_oob() failed at %lu\n",
offset);
return -1;
}
@@ -279,8 +276,7 @@ static int build_logical_block_map(struct ssfdcr_record *ssfdc)
ssfdc->logic_block_map[block_address] =
(unsigned short)phys_block;
- DEBUG(MTD_DEBUG_LEVEL2,
- "SSFDC_RO: build_block_map() phys_block=%d,"
+ pr_debug("SSFDC_RO: build_block_map() phys_block=%d,"
"logic_block_addr=%d, zone=%d\n",
phys_block, block_address, zone_index);
}
@@ -304,11 +300,8 @@ static void ssfdcr_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
return;
ssfdc = kzalloc(sizeof(struct ssfdcr_record), GFP_KERNEL);
- if (!ssfdc) {
- printk(KERN_WARNING
- "SSFDC_RO: out of memory for data structures\n");
+ if (!ssfdc)
return;
- }
ssfdc->mbd.mtd = mtd;
ssfdc->mbd.devnum = -1;
@@ -319,8 +312,7 @@ static void ssfdcr_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
ssfdc->erase_size = mtd->erasesize;
ssfdc->map_len = (u32)mtd->size / mtd->erasesize;
- DEBUG(MTD_DEBUG_LEVEL1,
- "SSFDC_RO: cis_block=%d,erase_size=%d,map_len=%d,n_zones=%d\n",
+ pr_debug("SSFDC_RO: cis_block=%d,erase_size=%d,map_len=%d,n_zones=%d\n",
ssfdc->cis_block, ssfdc->erase_size, ssfdc->map_len,
DIV_ROUND_UP(ssfdc->map_len, MAX_PHYS_BLK_PER_ZONE));
@@ -331,7 +323,7 @@ static void ssfdcr_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
ssfdc->cylinders = (unsigned short)(((u32)mtd->size >> SECTOR_SHIFT) /
((long)ssfdc->sectors * (long)ssfdc->heads));
- DEBUG(MTD_DEBUG_LEVEL1, "SSFDC_RO: using C:%d H:%d S:%d == %ld sects\n",
+ pr_debug("SSFDC_RO: using C:%d H:%d S:%d == %ld sects\n",
ssfdc->cylinders, ssfdc->heads , ssfdc->sectors,
(long)ssfdc->cylinders * (long)ssfdc->heads *
(long)ssfdc->sectors);
@@ -342,11 +334,8 @@ static void ssfdcr_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
/* Allocate logical block map */
ssfdc->logic_block_map = kmalloc(sizeof(ssfdc->logic_block_map[0]) *
ssfdc->map_len, GFP_KERNEL);
- if (!ssfdc->logic_block_map) {
- printk(KERN_WARNING
- "SSFDC_RO: out of memory for data structures\n");
+ if (!ssfdc->logic_block_map)
goto out_err;
- }
memset(ssfdc->logic_block_map, 0xff, sizeof(ssfdc->logic_block_map[0]) *
ssfdc->map_len);
@@ -371,7 +360,7 @@ static void ssfdcr_remove_dev(struct mtd_blktrans_dev *dev)
{
struct ssfdcr_record *ssfdc = (struct ssfdcr_record *)dev;
- DEBUG(MTD_DEBUG_LEVEL1, "SSFDC_RO: remove_dev (i=%d)\n", dev->devnum);
+ pr_debug("SSFDC_RO: remove_dev (i=%d)\n", dev->devnum);
del_mtd_blktrans_dev(dev);
kfree(ssfdc->logic_block_map);
@@ -387,8 +376,7 @@ static int ssfdcr_readsect(struct mtd_blktrans_dev *dev,
offset = (int)(logic_sect_no % sectors_per_block);
block_address = (int)(logic_sect_no / sectors_per_block);
- DEBUG(MTD_DEBUG_LEVEL3,
- "SSFDC_RO: ssfdcr_readsect(%lu) sec_per_blk=%d, ofst=%d,"
+ pr_debug("SSFDC_RO: ssfdcr_readsect(%lu) sec_per_blk=%d, ofst=%d,"
" block_addr=%d\n", logic_sect_no, sectors_per_block, offset,
block_address);
@@ -397,8 +385,7 @@ static int ssfdcr_readsect(struct mtd_blktrans_dev *dev,
block_address = ssfdc->logic_block_map[block_address];
- DEBUG(MTD_DEBUG_LEVEL3,
- "SSFDC_RO: ssfdcr_readsect() phys_block_addr=%d\n",
+ pr_debug("SSFDC_RO: ssfdcr_readsect() phys_block_addr=%d\n",
block_address);
if (block_address < 0xffff) {
@@ -407,8 +394,7 @@ static int ssfdcr_readsect(struct mtd_blktrans_dev *dev,
sect_no = (unsigned long)block_address * sectors_per_block +
offset;
- DEBUG(MTD_DEBUG_LEVEL3,
- "SSFDC_RO: ssfdcr_readsect() phys_sect_no=%lu\n",
+ pr_debug("SSFDC_RO: ssfdcr_readsect() phys_sect_no=%lu\n",
sect_no);
if (read_physical_sector(ssfdc->mbd.mtd, buf, sect_no) < 0)
@@ -424,7 +410,7 @@ static int ssfdcr_getgeo(struct mtd_blktrans_dev *dev, struct hd_geometry *geo)
{
struct ssfdcr_record *ssfdc = (struct ssfdcr_record *)dev;
- DEBUG(MTD_DEBUG_LEVEL1, "SSFDC_RO: ssfdcr_getgeo() C=%d, H=%d, S=%d\n",
+ pr_debug("SSFDC_RO: ssfdcr_getgeo() C=%d, H=%d, S=%d\n",
ssfdc->cylinders, ssfdc->heads, ssfdc->sectors);
geo->heads = ssfdc->heads;
diff --git a/drivers/mtd/tests/mtd_oobtest.c b/drivers/mtd/tests/mtd_oobtest.c
index dec92ae6111..933f7e5f32d 100644
--- a/drivers/mtd/tests/mtd_oobtest.c
+++ b/drivers/mtd/tests/mtd_oobtest.c
@@ -30,7 +30,7 @@
#define PRINT_PREF KERN_INFO "mtd_oobtest: "
-static int dev;
+static int dev = -EINVAL;
module_param(dev, int, S_IRUGO);
MODULE_PARM_DESC(dev, "MTD device number to use");
@@ -131,7 +131,7 @@ static int write_eraseblock(int ebnum)
for (i = 0; i < pgcnt; ++i, addr += mtd->writesize) {
set_random_data(writebuf, use_len);
- ops.mode = MTD_OOB_AUTO;
+ ops.mode = MTD_OPS_AUTO_OOB;
ops.len = 0;
ops.retlen = 0;
ops.ooblen = use_len;
@@ -184,7 +184,7 @@ static int verify_eraseblock(int ebnum)
for (i = 0; i < pgcnt; ++i, addr += mtd->writesize) {
set_random_data(writebuf, use_len);
- ops.mode = MTD_OOB_AUTO;
+ ops.mode = MTD_OPS_AUTO_OOB;
ops.len = 0;
ops.retlen = 0;
ops.ooblen = use_len;
@@ -211,7 +211,7 @@ static int verify_eraseblock(int ebnum)
if (use_offset != 0 || use_len < mtd->ecclayout->oobavail) {
int k;
- ops.mode = MTD_OOB_AUTO;
+ ops.mode = MTD_OPS_AUTO_OOB;
ops.len = 0;
ops.retlen = 0;
ops.ooblen = mtd->ecclayout->oobavail;
@@ -276,7 +276,7 @@ static int verify_eraseblock_in_one_go(int ebnum)
size_t len = mtd->ecclayout->oobavail * pgcnt;
set_random_data(writebuf, len);
- ops.mode = MTD_OOB_AUTO;
+ ops.mode = MTD_OPS_AUTO_OOB;
ops.len = 0;
ops.retlen = 0;
ops.ooblen = len;
@@ -366,6 +366,13 @@ static int __init mtd_oobtest_init(void)
printk(KERN_INFO "\n");
printk(KERN_INFO "=================================================\n");
+
+ if (dev < 0) {
+ printk(PRINT_PREF "Please specify a valid mtd-device via module paramter\n");
+ printk(KERN_CRIT "CAREFUL: This test wipes all data on the specified MTD device!\n");
+ return -EINVAL;
+ }
+
printk(PRINT_PREF "MTD device: %d\n", dev);
mtd = get_mtd_device(NULL, dev);
@@ -507,7 +514,7 @@ static int __init mtd_oobtest_init(void)
addr0 += mtd->erasesize;
/* Attempt to write off end of OOB */
- ops.mode = MTD_OOB_AUTO;
+ ops.mode = MTD_OPS_AUTO_OOB;
ops.len = 0;
ops.retlen = 0;
ops.ooblen = 1;
@@ -527,7 +534,7 @@ static int __init mtd_oobtest_init(void)
}
/* Attempt to read off end of OOB */
- ops.mode = MTD_OOB_AUTO;
+ ops.mode = MTD_OPS_AUTO_OOB;
ops.len = 0;
ops.retlen = 0;
ops.ooblen = 1;
@@ -551,7 +558,7 @@ static int __init mtd_oobtest_init(void)
"block is bad\n");
else {
/* Attempt to write off end of device */
- ops.mode = MTD_OOB_AUTO;
+ ops.mode = MTD_OPS_AUTO_OOB;
ops.len = 0;
ops.retlen = 0;
ops.ooblen = mtd->ecclayout->oobavail + 1;
@@ -571,7 +578,7 @@ static int __init mtd_oobtest_init(void)
}
/* Attempt to read off end of device */
- ops.mode = MTD_OOB_AUTO;
+ ops.mode = MTD_OPS_AUTO_OOB;
ops.len = 0;
ops.retlen = 0;
ops.ooblen = mtd->ecclayout->oobavail + 1;
@@ -595,7 +602,7 @@ static int __init mtd_oobtest_init(void)
goto out;
/* Attempt to write off end of device */
- ops.mode = MTD_OOB_AUTO;
+ ops.mode = MTD_OPS_AUTO_OOB;
ops.len = 0;
ops.retlen = 0;
ops.ooblen = mtd->ecclayout->oobavail;
@@ -615,7 +622,7 @@ static int __init mtd_oobtest_init(void)
}
/* Attempt to read off end of device */
- ops.mode = MTD_OOB_AUTO;
+ ops.mode = MTD_OPS_AUTO_OOB;
ops.len = 0;
ops.retlen = 0;
ops.ooblen = mtd->ecclayout->oobavail;
@@ -655,7 +662,7 @@ static int __init mtd_oobtest_init(void)
addr = (i + 1) * mtd->erasesize - mtd->writesize;
for (pg = 0; pg < cnt; ++pg) {
set_random_data(writebuf, sz);
- ops.mode = MTD_OOB_AUTO;
+ ops.mode = MTD_OPS_AUTO_OOB;
ops.len = 0;
ops.retlen = 0;
ops.ooblen = sz;
@@ -683,7 +690,7 @@ static int __init mtd_oobtest_init(void)
continue;
set_random_data(writebuf, mtd->ecclayout->oobavail * 2);
addr = (i + 1) * mtd->erasesize - mtd->writesize;
- ops.mode = MTD_OOB_AUTO;
+ ops.mode = MTD_OPS_AUTO_OOB;
ops.len = 0;
ops.retlen = 0;
ops.ooblen = mtd->ecclayout->oobavail * 2;
diff --git a/drivers/mtd/tests/mtd_pagetest.c b/drivers/mtd/tests/mtd_pagetest.c
index 00b937e38c1..afafb6935fd 100644
--- a/drivers/mtd/tests/mtd_pagetest.c
+++ b/drivers/mtd/tests/mtd_pagetest.c
@@ -30,7 +30,7 @@
#define PRINT_PREF KERN_INFO "mtd_pagetest: "
-static int dev;
+static int dev = -EINVAL;
module_param(dev, int, S_IRUGO);
MODULE_PARM_DESC(dev, "MTD device number to use");
@@ -128,7 +128,7 @@ static int verify_eraseblock(int ebnum)
for (j = 0; j < pgcnt - 1; ++j, addr += pgsize) {
/* Do a read to set the internal dataRAMs to different data */
err = mtd->read(mtd, addr0, bufsize, &read, twopages);
- if (err == -EUCLEAN)
+ if (mtd_is_bitflip(err))
err = 0;
if (err || read != bufsize) {
printk(PRINT_PREF "error: read failed at %#llx\n",
@@ -136,7 +136,7 @@ static int verify_eraseblock(int ebnum)
return err;
}
err = mtd->read(mtd, addrn - bufsize, bufsize, &read, twopages);
- if (err == -EUCLEAN)
+ if (mtd_is_bitflip(err))
err = 0;
if (err || read != bufsize) {
printk(PRINT_PREF "error: read failed at %#llx\n",
@@ -146,7 +146,7 @@ static int verify_eraseblock(int ebnum)
memset(twopages, 0, bufsize);
read = 0;
err = mtd->read(mtd, addr, bufsize, &read, twopages);
- if (err == -EUCLEAN)
+ if (mtd_is_bitflip(err))
err = 0;
if (err || read != bufsize) {
printk(PRINT_PREF "error: read failed at %#llx\n",
@@ -164,7 +164,7 @@ static int verify_eraseblock(int ebnum)
unsigned long oldnext = next;
/* Do a read to set the internal dataRAMs to different data */
err = mtd->read(mtd, addr0, bufsize, &read, twopages);
- if (err == -EUCLEAN)
+ if (mtd_is_bitflip(err))
err = 0;
if (err || read != bufsize) {
printk(PRINT_PREF "error: read failed at %#llx\n",
@@ -172,7 +172,7 @@ static int verify_eraseblock(int ebnum)
return err;
}
err = mtd->read(mtd, addrn - bufsize, bufsize, &read, twopages);
- if (err == -EUCLEAN)
+ if (mtd_is_bitflip(err))
err = 0;
if (err || read != bufsize) {
printk(PRINT_PREF "error: read failed at %#llx\n",
@@ -182,7 +182,7 @@ static int verify_eraseblock(int ebnum)
memset(twopages, 0, bufsize);
read = 0;
err = mtd->read(mtd, addr, bufsize, &read, twopages);
- if (err == -EUCLEAN)
+ if (mtd_is_bitflip(err))
err = 0;
if (err || read != bufsize) {
printk(PRINT_PREF "error: read failed at %#llx\n",
@@ -231,7 +231,7 @@ static int crosstest(void)
read = 0;
addr = addrn - pgsize - pgsize;
err = mtd->read(mtd, addr, pgsize, &read, pp1);
- if (err == -EUCLEAN)
+ if (mtd_is_bitflip(err))
err = 0;
if (err || read != pgsize) {
printk(PRINT_PREF "error: read failed at %#llx\n",
@@ -244,7 +244,7 @@ static int crosstest(void)
read = 0;
addr = addrn - pgsize - pgsize - pgsize;
err = mtd->read(mtd, addr, pgsize, &read, pp1);
- if (err == -EUCLEAN)
+ if (mtd_is_bitflip(err))
err = 0;
if (err || read != pgsize) {
printk(PRINT_PREF "error: read failed at %#llx\n",
@@ -258,7 +258,7 @@ static int crosstest(void)
addr = addr0;
printk(PRINT_PREF "reading page at %#llx\n", (long long)addr);
err = mtd->read(mtd, addr, pgsize, &read, pp2);
- if (err == -EUCLEAN)
+ if (mtd_is_bitflip(err))
err = 0;
if (err || read != pgsize) {
printk(PRINT_PREF "error: read failed at %#llx\n",
@@ -272,7 +272,7 @@ static int crosstest(void)
addr = addrn - pgsize;
printk(PRINT_PREF "reading page at %#llx\n", (long long)addr);
err = mtd->read(mtd, addr, pgsize, &read, pp3);
- if (err == -EUCLEAN)
+ if (mtd_is_bitflip(err))
err = 0;
if (err || read != pgsize) {
printk(PRINT_PREF "error: read failed at %#llx\n",
@@ -286,7 +286,7 @@ static int crosstest(void)
addr = addr0;
printk(PRINT_PREF "reading page at %#llx\n", (long long)addr);
err = mtd->read(mtd, addr, pgsize, &read, pp4);
- if (err == -EUCLEAN)
+ if (mtd_is_bitflip(err))
err = 0;
if (err || read != pgsize) {
printk(PRINT_PREF "error: read failed at %#llx\n",
@@ -345,7 +345,7 @@ static int erasecrosstest(void)
printk(PRINT_PREF "reading 1st page of block %d\n", ebnum);
memset(readbuf, 0, pgsize);
err = mtd->read(mtd, addr0, pgsize, &read, readbuf);
- if (err == -EUCLEAN)
+ if (mtd_is_bitflip(err))
err = 0;
if (err || read != pgsize) {
printk(PRINT_PREF "error: read failed at %#llx\n",
@@ -383,7 +383,7 @@ static int erasecrosstest(void)
printk(PRINT_PREF "reading 1st page of block %d\n", ebnum);
memset(readbuf, 0, pgsize);
err = mtd->read(mtd, addr0, pgsize, &read, readbuf);
- if (err == -EUCLEAN)
+ if (mtd_is_bitflip(err))
err = 0;
if (err || read != pgsize) {
printk(PRINT_PREF "error: read failed at %#llx\n",
@@ -439,7 +439,7 @@ static int erasetest(void)
printk(PRINT_PREF "reading 1st page of block %d\n", ebnum);
err = mtd->read(mtd, addr0, pgsize, &read, twopages);
- if (err == -EUCLEAN)
+ if (mtd_is_bitflip(err))
err = 0;
if (err || read != pgsize) {
printk(PRINT_PREF "error: read failed at %#llx\n",
@@ -504,6 +504,13 @@ static int __init mtd_pagetest_init(void)
printk(KERN_INFO "\n");
printk(KERN_INFO "=================================================\n");
+
+ if (dev < 0) {
+ printk(PRINT_PREF "Please specify a valid mtd-device via module paramter\n");
+ printk(KERN_CRIT "CAREFUL: This test wipes all data on the specified MTD device!\n");
+ return -EINVAL;
+ }
+
printk(PRINT_PREF "MTD device: %d\n", dev);
mtd = get_mtd_device(NULL, dev);
diff --git a/drivers/mtd/tests/mtd_readtest.c b/drivers/mtd/tests/mtd_readtest.c
index afe71aa15c4..550fe51225a 100644
--- a/drivers/mtd/tests/mtd_readtest.c
+++ b/drivers/mtd/tests/mtd_readtest.c
@@ -29,7 +29,7 @@
#define PRINT_PREF KERN_INFO "mtd_readtest: "
-static int dev;
+static int dev = -EINVAL;
module_param(dev, int, S_IRUGO);
MODULE_PARM_DESC(dev, "MTD device number to use");
@@ -66,7 +66,7 @@ static int read_eraseblock_by_page(int ebnum)
if (mtd->oobsize) {
struct mtd_oob_ops ops;
- ops.mode = MTD_OOB_PLACE;
+ ops.mode = MTD_OPS_PLACE_OOB;
ops.len = 0;
ops.retlen = 0;
ops.ooblen = mtd->oobsize;
@@ -75,7 +75,8 @@ static int read_eraseblock_by_page(int ebnum)
ops.datbuf = NULL;
ops.oobbuf = oobbuf;
ret = mtd->read_oob(mtd, addr, &ops);
- if (ret || ops.oobretlen != mtd->oobsize) {
+ if ((ret && !mtd_is_bitflip(ret)) ||
+ ops.oobretlen != mtd->oobsize) {
printk(PRINT_PREF "error: read oob failed at "
"%#llx\n", (long long)addr);
if (!err)
@@ -169,6 +170,12 @@ static int __init mtd_readtest_init(void)
printk(KERN_INFO "\n");
printk(KERN_INFO "=================================================\n");
+
+ if (dev < 0) {
+ printk(PRINT_PREF "Please specify a valid mtd-device via module paramter\n");
+ return -EINVAL;
+ }
+
printk(PRINT_PREF "MTD device: %d\n", dev);
mtd = get_mtd_device(NULL, dev);
diff --git a/drivers/mtd/tests/mtd_speedtest.c b/drivers/mtd/tests/mtd_speedtest.c
index 627d4e2466a..493b367bdd3 100644
--- a/drivers/mtd/tests/mtd_speedtest.c
+++ b/drivers/mtd/tests/mtd_speedtest.c
@@ -29,7 +29,7 @@
#define PRINT_PREF KERN_INFO "mtd_speedtest: "
-static int dev;
+static int dev = -EINVAL;
module_param(dev, int, S_IRUGO);
MODULE_PARM_DESC(dev, "MTD device number to use");
@@ -216,7 +216,7 @@ static int read_eraseblock(int ebnum)
err = mtd->read(mtd, addr, mtd->erasesize, &read, iobuf);
/* Ignore corrected ECC errors */
- if (err == -EUCLEAN)
+ if (mtd_is_bitflip(err))
err = 0;
if (err || read != mtd->erasesize) {
printk(PRINT_PREF "error: read failed at %#llx\n", addr);
@@ -237,7 +237,7 @@ static int read_eraseblock_by_page(int ebnum)
for (i = 0; i < pgcnt; i++) {
err = mtd->read(mtd, addr, pgsize, &read, buf);
/* Ignore corrected ECC errors */
- if (err == -EUCLEAN)
+ if (mtd_is_bitflip(err))
err = 0;
if (err || read != pgsize) {
printk(PRINT_PREF "error: read failed at %#llx\n",
@@ -263,7 +263,7 @@ static int read_eraseblock_by_2pages(int ebnum)
for (i = 0; i < n; i++) {
err = mtd->read(mtd, addr, sz, &read, buf);
/* Ignore corrected ECC errors */
- if (err == -EUCLEAN)
+ if (mtd_is_bitflip(err))
err = 0;
if (err || read != sz) {
printk(PRINT_PREF "error: read failed at %#llx\n",
@@ -278,7 +278,7 @@ static int read_eraseblock_by_2pages(int ebnum)
if (pgcnt % 2) {
err = mtd->read(mtd, addr, pgsize, &read, buf);
/* Ignore corrected ECC errors */
- if (err == -EUCLEAN)
+ if (mtd_is_bitflip(err))
err = 0;
if (err || read != pgsize) {
printk(PRINT_PREF "error: read failed at %#llx\n",
@@ -361,6 +361,13 @@ static int __init mtd_speedtest_init(void)
printk(KERN_INFO "\n");
printk(KERN_INFO "=================================================\n");
+
+ if (dev < 0) {
+ printk(PRINT_PREF "Please specify a valid mtd-device via module paramter\n");
+ printk(KERN_CRIT "CAREFUL: This test wipes all data on the specified MTD device!\n");
+ return -EINVAL;
+ }
+
if (count)
printk(PRINT_PREF "MTD device: %d count: %d\n", dev, count);
else
diff --git a/drivers/mtd/tests/mtd_stresstest.c b/drivers/mtd/tests/mtd_stresstest.c
index 531625fc925..52ffd9120e0 100644
--- a/drivers/mtd/tests/mtd_stresstest.c
+++ b/drivers/mtd/tests/mtd_stresstest.c
@@ -30,7 +30,7 @@
#define PRINT_PREF KERN_INFO "mtd_stresstest: "
-static int dev;
+static int dev = -EINVAL;
module_param(dev, int, S_IRUGO);
MODULE_PARM_DESC(dev, "MTD device number to use");
@@ -154,7 +154,7 @@ static int do_read(void)
}
addr = eb * mtd->erasesize + offs;
err = mtd->read(mtd, addr, len, &read, readbuf);
- if (err == -EUCLEAN)
+ if (mtd_is_bitflip(err))
err = 0;
if (unlikely(err || read != len)) {
printk(PRINT_PREF "error: read failed at 0x%llx\n",
@@ -250,6 +250,13 @@ static int __init mtd_stresstest_init(void)
printk(KERN_INFO "\n");
printk(KERN_INFO "=================================================\n");
+
+ if (dev < 0) {
+ printk(PRINT_PREF "Please specify a valid mtd-device via module paramter\n");
+ printk(KERN_CRIT "CAREFUL: This test wipes all data on the specified MTD device!\n");
+ return -EINVAL;
+ }
+
printk(PRINT_PREF "MTD device: %d\n", dev);
mtd = get_mtd_device(NULL, dev);
diff --git a/drivers/mtd/tests/mtd_subpagetest.c b/drivers/mtd/tests/mtd_subpagetest.c
index 334eae53a3d..1a05bfac4ee 100644
--- a/drivers/mtd/tests/mtd_subpagetest.c
+++ b/drivers/mtd/tests/mtd_subpagetest.c
@@ -29,7 +29,7 @@
#define PRINT_PREF KERN_INFO "mtd_subpagetest: "
-static int dev;
+static int dev = -EINVAL;
module_param(dev, int, S_IRUGO);
MODULE_PARM_DESC(dev, "MTD device number to use");
@@ -198,7 +198,7 @@ static int verify_eraseblock(int ebnum)
read = 0;
err = mtd->read(mtd, addr, subpgsize, &read, readbuf);
if (unlikely(err || read != subpgsize)) {
- if (err == -EUCLEAN && read == subpgsize) {
+ if (mtd_is_bitflip(err) && read == subpgsize) {
printk(PRINT_PREF "ECC correction at %#llx\n",
(long long)addr);
err = 0;
@@ -226,7 +226,7 @@ static int verify_eraseblock(int ebnum)
read = 0;
err = mtd->read(mtd, addr, subpgsize, &read, readbuf);
if (unlikely(err || read != subpgsize)) {
- if (err == -EUCLEAN && read == subpgsize) {
+ if (mtd_is_bitflip(err) && read == subpgsize) {
printk(PRINT_PREF "ECC correction at %#llx\n",
(long long)addr);
err = 0;
@@ -264,7 +264,7 @@ static int verify_eraseblock2(int ebnum)
read = 0;
err = mtd->read(mtd, addr, subpgsize * k, &read, readbuf);
if (unlikely(err || read != subpgsize * k)) {
- if (err == -EUCLEAN && read == subpgsize * k) {
+ if (mtd_is_bitflip(err) && read == subpgsize * k) {
printk(PRINT_PREF "ECC correction at %#llx\n",
(long long)addr);
err = 0;
@@ -298,7 +298,7 @@ static int verify_eraseblock_ff(int ebnum)
read = 0;
err = mtd->read(mtd, addr, subpgsize, &read, readbuf);
if (unlikely(err || read != subpgsize)) {
- if (err == -EUCLEAN && read == subpgsize) {
+ if (mtd_is_bitflip(err) && read == subpgsize) {
printk(PRINT_PREF "ECC correction at %#llx\n",
(long long)addr);
err = 0;
@@ -379,6 +379,13 @@ static int __init mtd_subpagetest_init(void)
printk(KERN_INFO "\n");
printk(KERN_INFO "=================================================\n");
+
+ if (dev < 0) {
+ printk(PRINT_PREF "Please specify a valid mtd-device via module paramter\n");
+ printk(KERN_CRIT "CAREFUL: This test wipes all data on the specified MTD device!\n");
+ return -EINVAL;
+ }
+
printk(PRINT_PREF "MTD device: %d\n", dev);
mtd = get_mtd_device(NULL, dev);
diff --git a/drivers/mtd/tests/mtd_torturetest.c b/drivers/mtd/tests/mtd_torturetest.c
index 5c6c3d24890..03ab649a696 100644
--- a/drivers/mtd/tests/mtd_torturetest.c
+++ b/drivers/mtd/tests/mtd_torturetest.c
@@ -46,7 +46,7 @@ static int pgcnt;
module_param(pgcnt, int, S_IRUGO);
MODULE_PARM_DESC(pgcnt, "number of pages per eraseblock to torture (0 => all)");
-static int dev;
+static int dev = -EINVAL;
module_param(dev, int, S_IRUGO);
MODULE_PARM_DESC(dev, "MTD device number to use");
@@ -138,7 +138,7 @@ static inline int check_eraseblock(int ebnum, unsigned char *buf)
retry:
err = mtd->read(mtd, addr, len, &read, check_buf);
- if (err == -EUCLEAN)
+ if (mtd_is_bitflip(err))
printk(PRINT_PREF "single bit flip occurred at EB %d "
"MTD reported that it was fixed.\n", ebnum);
else if (err) {
@@ -213,6 +213,13 @@ static int __init tort_init(void)
printk(KERN_INFO "=================================================\n");
printk(PRINT_PREF "Warning: this program is trying to wear out your "
"flash, stop it if this is not wanted.\n");
+
+ if (dev < 0) {
+ printk(PRINT_PREF "Please specify a valid mtd-device via module paramter\n");
+ printk(KERN_CRIT "CAREFUL: This test wipes all data on the specified MTD device!\n");
+ return -EINVAL;
+ }
+
printk(PRINT_PREF "MTD device: %d\n", dev);
printk(PRINT_PREF "torture %d eraseblocks (%d-%d) of mtd%d\n",
ebcnt, eb, eb + ebcnt - 1, dev);
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index 4be67181501..fb7f19b62d9 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -443,7 +443,7 @@ retry:
if (err == UBI_IO_BITFLIPS) {
scrub = 1;
err = 0;
- } else if (err == -EBADMSG) {
+ } else if (mtd_is_eccerr(err)) {
if (vol->vol_type == UBI_DYNAMIC_VOLUME)
goto out_unlock;
scrub = 1;
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index 6ba55c23587..f20b6f22f24 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -172,9 +172,9 @@ int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset,
retry:
err = ubi->mtd->read(ubi->mtd, addr, len, &read, buf);
if (err) {
- const char *errstr = (err == -EBADMSG) ? " (ECC error)" : "";
+ const char *errstr = mtd_is_eccerr(err) ? " (ECC error)" : "";
- if (err == -EUCLEAN) {
+ if (mtd_is_bitflip(err)) {
/*
* -EUCLEAN is reported if there was a bit-flip which
* was corrected, so this is harmless.
@@ -205,7 +205,7 @@ retry:
* all the requested data. But some buggy drivers might do
* this, so we change it to -EIO.
*/
- if (read != len && err == -EBADMSG) {
+ if (read != len && mtd_is_eccerr(err)) {
ubi_assert(0);
err = -EIO;
}
@@ -469,7 +469,7 @@ static int torture_peb(struct ubi_device *ubi, int pnum)
out:
mutex_unlock(&ubi->buf_mutex);
- if (err == UBI_IO_BITFLIPS || err == -EBADMSG) {
+ if (err == UBI_IO_BITFLIPS || mtd_is_eccerr(err)) {
/*
* If a bit-flip or data integrity error was detected, the test
* has not passed because it happened on a freshly erased
@@ -760,7 +760,7 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
read_err = ubi_io_read(ubi, ec_hdr, pnum, 0, UBI_EC_HDR_SIZE);
if (read_err) {
- if (read_err != UBI_IO_BITFLIPS && read_err != -EBADMSG)
+ if (read_err != UBI_IO_BITFLIPS && !mtd_is_eccerr(read_err))
return read_err;
/*
@@ -776,7 +776,7 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
magic = be32_to_cpu(ec_hdr->magic);
if (magic != UBI_EC_HDR_MAGIC) {
- if (read_err == -EBADMSG)
+ if (mtd_is_eccerr(read_err))
return UBI_IO_BAD_HDR_EBADMSG;
/*
@@ -1032,12 +1032,12 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
p = (char *)vid_hdr - ubi->vid_hdr_shift;
read_err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset,
ubi->vid_hdr_alsize);
- if (read_err && read_err != UBI_IO_BITFLIPS && read_err != -EBADMSG)
+ if (read_err && read_err != UBI_IO_BITFLIPS && !mtd_is_eccerr(read_err))
return read_err;
magic = be32_to_cpu(vid_hdr->magic);
if (magic != UBI_VID_HDR_MAGIC) {
- if (read_err == -EBADMSG)
+ if (mtd_is_eccerr(read_err))
return UBI_IO_BAD_HDR_EBADMSG;
if (ubi_check_pattern(vid_hdr, 0xFF, UBI_VID_HDR_SIZE)) {
@@ -1219,7 +1219,7 @@ static int paranoid_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum)
return -ENOMEM;
err = ubi_io_read(ubi, ec_hdr, pnum, 0, UBI_EC_HDR_SIZE);
- if (err && err != UBI_IO_BITFLIPS && err != -EBADMSG)
+ if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err))
goto exit;
crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC);
@@ -1306,7 +1306,7 @@ static int paranoid_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum)
p = (char *)vid_hdr - ubi->vid_hdr_shift;
err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset,
ubi->vid_hdr_alsize);
- if (err && err != UBI_IO_BITFLIPS && err != -EBADMSG)
+ if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err))
goto exit;
crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_EC_HDR_SIZE_CRC);
@@ -1358,7 +1358,7 @@ int ubi_dbg_check_write(struct ubi_device *ubi, const void *buf, int pnum,
}
err = ubi->mtd->read(ubi->mtd, addr, len, &read, buf1);
- if (err && err != -EUCLEAN)
+ if (err && !mtd_is_bitflip(err))
goto out_free;
for (i = 0; i < len; i++) {
@@ -1422,7 +1422,7 @@ int ubi_dbg_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len)
}
err = ubi->mtd->read(ubi->mtd, addr, len, &read, buf);
- if (err && err != -EUCLEAN) {
+ if (err && !mtd_is_bitflip(err)) {
ubi_err("error %d while reading %d bytes from PEB %d:%d, "
"read %zd bytes", err, len, pnum, offset, read);
goto error;
diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c
index d39716e5b20..1a35fc5e3b4 100644
--- a/drivers/mtd/ubi/kapi.c
+++ b/drivers/mtd/ubi/kapi.c
@@ -410,7 +410,7 @@ int ubi_leb_read(struct ubi_volume_desc *desc, int lnum, char *buf, int offset,
return 0;
err = ubi_eba_read_leb(ubi, vol, lnum, buf, offset, len, check);
- if (err && err == -EBADMSG && vol->vol_type == UBI_STATIC_VOLUME) {
+ if (err && mtd_is_eccerr(err) && vol->vol_type == UBI_STATIC_VOLUME) {
ubi_warn("mark volume %d as corrupted", vol_id);
vol->corrupted = 1;
}
diff --git a/drivers/mtd/ubi/misc.c b/drivers/mtd/ubi/misc.c
index ff2a65c37f6..f6a7d7ac4b9 100644
--- a/drivers/mtd/ubi/misc.c
+++ b/drivers/mtd/ubi/misc.c
@@ -81,7 +81,7 @@ int ubi_check_volume(struct ubi_device *ubi, int vol_id)
err = ubi_eba_read_leb(ubi, vol, i, buf, 0, size, 1);
if (err) {
- if (err == -EBADMSG)
+ if (mtd_is_eccerr(err))
err = 1;
break;
}
diff --git a/drivers/mtd/ubi/scan.c b/drivers/mtd/ubi/scan.c
index a3a198f9b98..0cb17d936b5 100644
--- a/drivers/mtd/ubi/scan.c
+++ b/drivers/mtd/ubi/scan.c
@@ -395,7 +395,7 @@ static int compare_lebs(struct ubi_device *ubi, const struct ubi_scan_leb *seb,
}
err = ubi_io_read_data(ubi, buf, pnum, 0, len);
- if (err && err != UBI_IO_BITFLIPS && err != -EBADMSG)
+ if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err))
goto out_free_buf;
data_crc = be32_to_cpu(vid_hdr->data_crc);
@@ -793,7 +793,7 @@ static int check_corruption(struct ubi_device *ubi, struct ubi_vid_hdr *vid_hdr,
err = ubi_io_read(ubi, ubi->peb_buf1, pnum, ubi->leb_start,
ubi->leb_size);
- if (err == UBI_IO_BITFLIPS || err == -EBADMSG) {
+ if (err == UBI_IO_BITFLIPS || mtd_is_eccerr(err)) {
/*
* Bit-flips or integrity errors while reading the data area.
* It is difficult to say for sure what type of corruption is
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
index 97e093d1967..863835f4aef 100644
--- a/drivers/mtd/ubi/vmt.c
+++ b/drivers/mtd/ubi/vmt.c
@@ -26,6 +26,7 @@
#include <linux/err.h>
#include <linux/math64.h>
#include <linux/slab.h>
+#include <linux/export.h>
#include "ubi.h"
#ifdef CONFIG_MTD_UBI_DEBUG
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
index 4b50a3029b8..9ad18da1891 100644
--- a/drivers/mtd/ubi/vtbl.c
+++ b/drivers/mtd/ubi/vtbl.c
@@ -423,7 +423,7 @@ static struct ubi_vtbl_record *process_lvol(struct ubi_device *ubi,
err = ubi_io_read_data(ubi, leb[seb->lnum], seb->pnum, 0,
ubi->vtbl_size);
- if (err == UBI_IO_BITFLIPS || err == -EBADMSG)
+ if (err == UBI_IO_BITFLIPS || mtd_is_eccerr(err))
/*
* Scrub the PEB later. Note, -EBADMSG indicates an
* uncorrectable ECC error, but we have our own CRC and
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index c34cc1e7c6f..b0c57725648 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -550,7 +550,7 @@ down:
/*
* Get link speed and duplex from the slave's base driver
* using ethtool. If for some reason the call fails or the
- * values are invalid, fake speed and duplex to 100/Full
+ * values are invalid, set speed and duplex to -1,
* and return error.
*/
static int bond_update_speed_duplex(struct slave *slave)
@@ -560,9 +560,8 @@ static int bond_update_speed_duplex(struct slave *slave)
u32 slave_speed;
int res;
- /* Fake speed and duplex */
- slave->speed = SPEED_100;
- slave->duplex = DUPLEX_FULL;
+ slave->speed = SPEED_UNKNOWN;
+ slave->duplex = DUPLEX_UNKNOWN;
res = __ethtool_get_settings(slave_dev, &ecmd);
if (res < 0)
@@ -1751,16 +1750,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
new_slave->link = BOND_LINK_DOWN;
}
- if (bond_update_speed_duplex(new_slave) &&
- (new_slave->link != BOND_LINK_DOWN)) {
- pr_warning("%s: Warning: failed to get speed and duplex from %s, assumed to be 100Mb/sec and Full.\n",
- bond_dev->name, new_slave->dev->name);
-
- if (bond->params.mode == BOND_MODE_8023AD) {
- pr_warning("%s: Warning: Operation of 802.3ad mode requires ETHTOOL support in base driver for proper aggregator selection.\n",
- bond_dev->name);
- }
- }
+ bond_update_speed_duplex(new_slave);
if (USES_PRIMARY(bond->params.mode) && bond->params.primary[0]) {
/* if there is a primary slave, remember it */
@@ -3220,6 +3210,7 @@ static int bond_slave_netdev_event(unsigned long event,
{
struct net_device *bond_dev = slave_dev->master;
struct bonding *bond = netdev_priv(bond_dev);
+ struct slave *slave = NULL;
switch (event) {
case NETDEV_UNREGISTER:
@@ -3230,20 +3221,16 @@ static int bond_slave_netdev_event(unsigned long event,
bond_release(bond_dev, slave_dev);
}
break;
+ case NETDEV_UP:
case NETDEV_CHANGE:
- if (bond->params.mode == BOND_MODE_8023AD || bond_is_lb(bond)) {
- struct slave *slave;
+ slave = bond_get_slave_by_dev(bond, slave_dev);
+ if (slave) {
+ u32 old_speed = slave->speed;
+ u8 old_duplex = slave->duplex;
- slave = bond_get_slave_by_dev(bond, slave_dev);
- if (slave) {
- u32 old_speed = slave->speed;
- u8 old_duplex = slave->duplex;
-
- bond_update_speed_duplex(slave);
-
- if (bond_is_lb(bond))
- break;
+ bond_update_speed_duplex(slave);
+ if (bond->params.mode == BOND_MODE_8023AD) {
if (old_speed != slave->speed)
bond_3ad_adapter_speed_changed(slave);
if (old_duplex != slave->duplex)
diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c
index 95de93b9038..ad284baafe8 100644
--- a/drivers/net/bonding/bond_procfs.c
+++ b/drivers/net/bonding/bond_procfs.c
@@ -1,4 +1,5 @@
#include <linux/proc_fs.h>
+#include <linux/export.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
#include "bonding.h"
@@ -157,8 +158,16 @@ static void bond_info_show_slave(struct seq_file *seq,
seq_printf(seq, "\nSlave Interface: %s\n", slave->dev->name);
seq_printf(seq, "MII Status: %s\n",
(slave->link == BOND_LINK_UP) ? "up" : "down");
- seq_printf(seq, "Speed: %d Mbps\n", slave->speed);
- seq_printf(seq, "Duplex: %s\n", slave->duplex ? "full" : "half");
+ if (slave->speed == SPEED_UNKNOWN)
+ seq_printf(seq, "Speed: %s\n", "Unknown");
+ else
+ seq_printf(seq, "Speed: %d Mbps\n", slave->speed);
+
+ if (slave->duplex == DUPLEX_UNKNOWN)
+ seq_printf(seq, "Duplex: %s\n", "Unknown");
+ else
+ seq_printf(seq, "Duplex: %s\n", slave->duplex ? "full" : "half");
+
seq_printf(seq, "Link Failure Count: %u\n",
slave->link_failure_count);
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 6dff5a0e733..597f4d45c63 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -159,6 +159,7 @@ config S6GMAC
will be called s6gmac.
source "drivers/net/ethernet/seeq/Kconfig"
+source "drivers/net/ethernet/silan/Kconfig"
source "drivers/net/ethernet/sis/Kconfig"
source "drivers/net/ethernet/sfc/Kconfig"
source "drivers/net/ethernet/sgi/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index c53ad3afc99..be5dde04026 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -58,6 +58,7 @@ obj-$(CONFIG_SH_ETH) += renesas/
obj-$(CONFIG_NET_VENDOR_RDC) += rdc/
obj-$(CONFIG_S6GMAC) += s6gmac.o
obj-$(CONFIG_NET_VENDOR_SEEQ) += seeq/
+obj-$(CONFIG_NET_VENDOR_SILAN) += silan/
obj-$(CONFIG_NET_VENDOR_SIS) += sis/
obj-$(CONFIG_SFC) += sfc/
obj-$(CONFIG_NET_VENDOR_SGI) += sgi/
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index 82386677bb8..4865ff14beb 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -541,19 +541,17 @@ static void au1000_reset_mac(struct net_device *dev)
* these are not descriptors sitting in memory.
*/
static void
-au1000_setup_hw_rings(struct au1000_private *aup, u32 rx_base, u32 tx_base)
+au1000_setup_hw_rings(struct au1000_private *aup, void __iomem *tx_base)
{
int i;
for (i = 0; i < NUM_RX_DMA; i++) {
- aup->rx_dma_ring[i] =
- (struct rx_dma *)
- (rx_base + sizeof(struct rx_dma)*i);
+ aup->rx_dma_ring[i] = (struct rx_dma *)
+ (tx_base + 0x100 + sizeof(struct rx_dma) * i);
}
for (i = 0; i < NUM_TX_DMA; i++) {
- aup->tx_dma_ring[i] =
- (struct tx_dma *)
- (tx_base + sizeof(struct tx_dma)*i);
+ aup->tx_dma_ring[i] = (struct tx_dma *)
+ (tx_base + sizeof(struct tx_dma) * i);
}
}
@@ -1026,7 +1024,7 @@ static int __devinit au1000_probe(struct platform_device *pdev)
struct net_device *dev = NULL;
struct db_dest *pDB, *pDBfree;
int irq, i, err = 0;
- struct resource *base, *macen;
+ struct resource *base, *macen, *macdma;
base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!base) {
@@ -1049,6 +1047,13 @@ static int __devinit au1000_probe(struct platform_device *pdev)
goto out;
}
+ macdma = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+ if (!macdma) {
+ dev_err(&pdev->dev, "failed to retrieve MACDMA registers\n");
+ err = -ENODEV;
+ goto out;
+ }
+
if (!request_mem_region(base->start, resource_size(base),
pdev->name)) {
dev_err(&pdev->dev, "failed to request memory region for base registers\n");
@@ -1063,6 +1068,13 @@ static int __devinit au1000_probe(struct platform_device *pdev)
goto err_request;
}
+ if (!request_mem_region(macdma->start, resource_size(macdma),
+ pdev->name)) {
+ dev_err(&pdev->dev, "failed to request MACDMA memory region\n");
+ err = -ENXIO;
+ goto err_macdma;
+ }
+
dev = alloc_etherdev(sizeof(struct au1000_private));
if (!dev) {
dev_err(&pdev->dev, "alloc_etherdev failed\n");
@@ -1109,10 +1121,14 @@ static int __devinit au1000_probe(struct platform_device *pdev)
}
aup->mac_id = pdev->id;
- if (pdev->id == 0)
- au1000_setup_hw_rings(aup, MAC0_RX_DMA_ADDR, MAC0_TX_DMA_ADDR);
- else if (pdev->id == 1)
- au1000_setup_hw_rings(aup, MAC1_RX_DMA_ADDR, MAC1_TX_DMA_ADDR);
+ aup->macdma = ioremap_nocache(macdma->start, resource_size(macdma));
+ if (!aup->macdma) {
+ dev_err(&pdev->dev, "failed to ioremap MACDMA registers\n");
+ err = -ENXIO;
+ goto err_remap3;
+ }
+
+ au1000_setup_hw_rings(aup, aup->macdma);
/* set a random MAC now in case platform_data doesn't provide one */
random_ether_addr(dev->dev_addr);
@@ -1252,6 +1268,8 @@ err_out:
err_mdiobus_reg:
mdiobus_free(aup->mii_bus);
err_mdiobus_alloc:
+ iounmap(aup->macdma);
+err_remap3:
iounmap(aup->enable);
err_remap2:
iounmap(aup->mac);
@@ -1261,6 +1279,8 @@ err_remap1:
err_vaddr:
free_netdev(dev);
err_alloc:
+ release_mem_region(macdma->start, resource_size(macdma));
+err_macdma:
release_mem_region(macen->start, resource_size(macen));
err_request:
release_mem_region(base->start, resource_size(base));
@@ -1293,9 +1313,13 @@ static int __devexit au1000_remove(struct platform_device *pdev)
(NUM_TX_BUFFS + NUM_RX_BUFFS),
(void *)aup->vaddr, aup->dma_addr);
+ iounmap(aup->macdma);
iounmap(aup->mac);
iounmap(aup->enable);
+ base = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+ release_mem_region(base->start, resource_size(base));
+
base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(base->start, resource_size(base));
diff --git a/drivers/net/ethernet/amd/au1000_eth.h b/drivers/net/ethernet/amd/au1000_eth.h
index 6229c774552..4b7f7ad62bb 100644
--- a/drivers/net/ethernet/amd/au1000_eth.h
+++ b/drivers/net/ethernet/amd/au1000_eth.h
@@ -124,7 +124,7 @@ struct au1000_private {
*/
struct mac_reg *mac; /* mac registers */
u32 *enable; /* address of MAC Enable Register */
-
+ void __iomem *macdma; /* base of MAC DMA port */
u32 vaddr; /* virtual address of rx/tx buffers */
dma_addr_t dma_addr; /* dma address of rx/tx buffers */
diff --git a/drivers/net/ethernet/apple/Kconfig b/drivers/net/ethernet/apple/Kconfig
index a759d5483ab..1375e2dc946 100644
--- a/drivers/net/ethernet/apple/Kconfig
+++ b/drivers/net/ethernet/apple/Kconfig
@@ -52,18 +52,6 @@ config BMAC
To compile this driver as a module, choose M here: the module
will be called bmac.
-config MAC89x0
- tristate "Macintosh CS89x0 based ethernet cards"
- depends on MAC
- ---help---
- Support for CS89x0 chipset based Ethernet cards. If you have a
- Nubus or LC-PDS network (Ethernet) card of this type, say Y and
- read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here. This module will
- be called mac89x0.
-
config MACMACE
bool "Macintosh (AV) onboard MACE ethernet"
depends on MAC
diff --git a/drivers/net/ethernet/apple/Makefile b/drivers/net/ethernet/apple/Makefile
index 0d3a5919c95..86eaa17af0f 100644
--- a/drivers/net/ethernet/apple/Makefile
+++ b/drivers/net/ethernet/apple/Makefile
@@ -4,5 +4,4 @@
obj-$(CONFIG_MACE) += mace.o
obj-$(CONFIG_BMAC) += bmac.o
-obj-$(CONFIG_MAC89x0) += mac89x0.o
obj-$(CONFIG_MACMACE) += macmace.o
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 161cbbb4814..bf4074167d6 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -89,10 +89,10 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
#define DRV_MODULE_NAME "tg3"
#define TG3_MAJ_NUM 3
-#define TG3_MIN_NUM 120
+#define TG3_MIN_NUM 121
#define DRV_MODULE_VERSION \
__stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
-#define DRV_MODULE_RELDATE "August 18, 2011"
+#define DRV_MODULE_RELDATE "November 2, 2011"
#define RESET_KIND_SHUTDOWN 0
#define RESET_KIND_INIT 1
@@ -628,19 +628,23 @@ static void tg3_ape_lock_init(struct tg3 *tp)
regbase = TG3_APE_PER_LOCK_GRANT;
/* Make sure the driver hasn't any stale locks. */
- for (i = 0; i < 8; i++) {
- if (i == TG3_APE_LOCK_GPIO)
- continue;
- tg3_ape_write32(tp, regbase + 4 * i, APE_LOCK_GRANT_DRIVER);
+ for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
+ switch (i) {
+ case TG3_APE_LOCK_PHY0:
+ case TG3_APE_LOCK_PHY1:
+ case TG3_APE_LOCK_PHY2:
+ case TG3_APE_LOCK_PHY3:
+ bit = APE_LOCK_GRANT_DRIVER;
+ break;
+ default:
+ if (!tp->pci_fn)
+ bit = APE_LOCK_GRANT_DRIVER;
+ else
+ bit = 1 << tp->pci_fn;
+ }
+ tg3_ape_write32(tp, regbase + 4 * i, bit);
}
- /* Clear the correct bit of the GPIO lock too. */
- if (!tp->pci_fn)
- bit = APE_LOCK_GRANT_DRIVER;
- else
- bit = 1 << tp->pci_fn;
-
- tg3_ape_write32(tp, regbase + 4 * TG3_APE_LOCK_GPIO, bit);
}
static int tg3_ape_lock(struct tg3 *tp, int locknum)
@@ -658,6 +662,10 @@ static int tg3_ape_lock(struct tg3 *tp, int locknum)
return 0;
case TG3_APE_LOCK_GRC:
case TG3_APE_LOCK_MEM:
+ if (!tp->pci_fn)
+ bit = APE_LOCK_REQ_DRIVER;
+ else
+ bit = 1 << tp->pci_fn;
break;
default:
return -EINVAL;
@@ -673,11 +681,6 @@ static int tg3_ape_lock(struct tg3 *tp, int locknum)
off = 4 * locknum;
- if (locknum != TG3_APE_LOCK_GPIO || !tp->pci_fn)
- bit = APE_LOCK_REQ_DRIVER;
- else
- bit = 1 << tp->pci_fn;
-
tg3_ape_write32(tp, req + off, bit);
/* Wait for up to 1 millisecond to acquire lock. */
@@ -710,6 +713,10 @@ static void tg3_ape_unlock(struct tg3 *tp, int locknum)
return;
case TG3_APE_LOCK_GRC:
case TG3_APE_LOCK_MEM:
+ if (!tp->pci_fn)
+ bit = APE_LOCK_GRANT_DRIVER;
+ else
+ bit = 1 << tp->pci_fn;
break;
default:
return;
@@ -720,11 +727,6 @@ static void tg3_ape_unlock(struct tg3 *tp, int locknum)
else
gnt = TG3_APE_PER_LOCK_GRANT;
- if (locknum != TG3_APE_LOCK_GPIO || !tp->pci_fn)
- bit = APE_LOCK_GRANT_DRIVER;
- else
- bit = 1 << tp->pci_fn;
-
tg3_ape_write32(tp, gnt + 4 * locknum, bit);
}
@@ -5927,6 +5929,18 @@ static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
return work_done;
}
+static inline void tg3_reset_task_schedule(struct tg3 *tp)
+{
+ if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
+ schedule_work(&tp->reset_task);
+}
+
+static inline void tg3_reset_task_cancel(struct tg3 *tp)
+{
+ cancel_work_sync(&tp->reset_task);
+ tg3_flag_clear(tp, RESET_TASK_PENDING);
+}
+
static int tg3_poll_msix(struct napi_struct *napi, int budget)
{
struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
@@ -5967,7 +5981,7 @@ static int tg3_poll_msix(struct napi_struct *napi, int budget)
tx_recovery:
/* work_done is guaranteed to be less than budget. */
napi_complete(napi);
- schedule_work(&tp->reset_task);
+ tg3_reset_task_schedule(tp);
return work_done;
}
@@ -6002,7 +6016,7 @@ static void tg3_process_error(struct tg3 *tp)
tg3_dump_state(tp);
tg3_flag_set(tp, ERROR_PROCESSED);
- schedule_work(&tp->reset_task);
+ tg3_reset_task_schedule(tp);
}
static int tg3_poll(struct napi_struct *napi, int budget)
@@ -6049,7 +6063,7 @@ static int tg3_poll(struct napi_struct *napi, int budget)
tx_recovery:
/* work_done is guaranteed to be less than budget. */
napi_complete(napi);
- schedule_work(&tp->reset_task);
+ tg3_reset_task_schedule(tp);
return work_done;
}
@@ -6338,11 +6352,11 @@ static void tg3_reset_task(struct work_struct *work)
{
struct tg3 *tp = container_of(work, struct tg3, reset_task);
int err;
- unsigned int restart_timer;
tg3_full_lock(tp, 0);
if (!netif_running(tp->dev)) {
+ tg3_flag_clear(tp, RESET_TASK_PENDING);
tg3_full_unlock(tp);
return;
}
@@ -6355,9 +6369,6 @@ static void tg3_reset_task(struct work_struct *work)
tg3_full_lock(tp, 1);
- restart_timer = tg3_flag(tp, RESTART_TIMER);
- tg3_flag_clear(tp, RESTART_TIMER);
-
if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
tp->write32_tx_mbox = tg3_write32_tx_mbox;
tp->write32_rx_mbox = tg3_write_flush_reg32;
@@ -6372,14 +6383,13 @@ static void tg3_reset_task(struct work_struct *work)
tg3_netif_start(tp);
- if (restart_timer)
- mod_timer(&tp->timer, jiffies + 1);
-
out:
tg3_full_unlock(tp);
if (!err)
tg3_phy_start(tp);
+
+ tg3_flag_clear(tp, RESET_TASK_PENDING);
}
static void tg3_tx_timeout(struct net_device *dev)
@@ -6391,7 +6401,7 @@ static void tg3_tx_timeout(struct net_device *dev)
tg3_dump_state(tp);
}
- schedule_work(&tp->reset_task);
+ tg3_reset_task_schedule(tp);
}
/* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
@@ -6442,31 +6452,26 @@ static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
hwbug = 1;
if (tg3_flag(tp, 4K_FIFO_LIMIT)) {
+ u32 prvidx = *entry;
u32 tmp_flag = flags & ~TXD_FLAG_END;
- while (len > TG3_TX_BD_DMA_MAX) {
+ while (len > TG3_TX_BD_DMA_MAX && *budget) {
u32 frag_len = TG3_TX_BD_DMA_MAX;
len -= TG3_TX_BD_DMA_MAX;
- if (len) {
- tnapi->tx_buffers[*entry].fragmented = true;
- /* Avoid the 8byte DMA problem */
- if (len <= 8) {
- len += TG3_TX_BD_DMA_MAX / 2;
- frag_len = TG3_TX_BD_DMA_MAX / 2;
- }
- } else
- tmp_flag = flags;
-
- if (*budget) {
- tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
- frag_len, tmp_flag, mss, vlan);
- (*budget)--;
- *entry = NEXT_TX(*entry);
- } else {
- hwbug = 1;
- break;
+ /* Avoid the 8byte DMA problem */
+ if (len <= 8) {
+ len += TG3_TX_BD_DMA_MAX / 2;
+ frag_len = TG3_TX_BD_DMA_MAX / 2;
}
+ tnapi->tx_buffers[*entry].fragmented = true;
+
+ tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
+ frag_len, tmp_flag, mss, vlan);
+ *budget -= 1;
+ prvidx = *entry;
+ *entry = NEXT_TX(*entry);
+
map += frag_len;
}
@@ -6474,10 +6479,11 @@ static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
if (*budget) {
tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
len, flags, mss, vlan);
- (*budget)--;
+ *budget -= 1;
*entry = NEXT_TX(*entry);
} else {
hwbug = 1;
+ tnapi->tx_buffers[prvidx].fragmented = false;
}
}
} else {
@@ -6509,7 +6515,7 @@ static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
txb = &tnapi->tx_buffers[entry];
}
- for (i = 0; i < last; i++) {
+ for (i = 0; i <= last; i++) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
entry = NEXT_TX(entry);
@@ -6559,6 +6565,8 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
dev_kfree_skb(new_skb);
ret = -1;
} else {
+ u32 save_entry = *entry;
+
base_flags |= TXD_FLAG_END;
tnapi->tx_buffers[*entry].skb = new_skb;
@@ -6568,7 +6576,7 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
new_skb->len, base_flags,
mss, vlan)) {
- tg3_tx_skb_unmap(tnapi, *entry, 0);
+ tg3_tx_skb_unmap(tnapi, save_entry, -1);
dev_kfree_skb(new_skb);
ret = -1;
}
@@ -6758,11 +6766,10 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
- mss, vlan))
+ mss, vlan)) {
would_hit_hwbug = 1;
-
/* Now loop through additional data fragments, and queue them. */
- if (skb_shinfo(skb)->nr_frags > 0) {
+ } else if (skb_shinfo(skb)->nr_frags > 0) {
u32 tmp_mss = mss;
if (!tg3_flag(tp, HW_TSO_1) &&
@@ -6784,11 +6791,14 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (dma_mapping_error(&tp->pdev->dev, mapping))
goto dma_error;
- if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
+ if (!budget ||
+ tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
len, base_flags |
((i == last) ? TXD_FLAG_END : 0),
- tmp_mss, vlan))
+ tmp_mss, vlan)) {
would_hit_hwbug = 1;
+ break;
+ }
}
}
@@ -6828,7 +6838,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
dma_error:
- tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
+ tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
drop:
dev_kfree_skb(skb);
@@ -7281,7 +7291,8 @@ static void tg3_free_rings(struct tg3 *tp)
if (!skb)
continue;
- tg3_tx_skb_unmap(tnapi, i, skb_shinfo(skb)->nr_frags);
+ tg3_tx_skb_unmap(tnapi, i,
+ skb_shinfo(skb)->nr_frags - 1);
dev_kfree_skb_any(skb);
}
@@ -9200,7 +9211,7 @@ static void tg3_timer(unsigned long __opaque)
{
struct tg3 *tp = (struct tg3 *) __opaque;
- if (tp->irq_sync)
+ if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
goto restart_timer;
spin_lock(&tp->lock);
@@ -9223,10 +9234,9 @@ static void tg3_timer(unsigned long __opaque)
}
if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
- tg3_flag_set(tp, RESTART_TIMER);
spin_unlock(&tp->lock);
- schedule_work(&tp->reset_task);
- return;
+ tg3_reset_task_schedule(tp);
+ goto restart_timer;
}
}
@@ -9674,15 +9684,14 @@ static int tg3_open(struct net_device *dev)
struct tg3_napi *tnapi = &tp->napi[i];
err = tg3_request_irq(tp, i);
if (err) {
- for (i--; i >= 0; i--)
+ for (i--; i >= 0; i--) {
+ tnapi = &tp->napi[i];
free_irq(tnapi->irq_vec, tnapi);
- break;
+ }
+ goto err_out2;
}
}
- if (err)
- goto err_out2;
-
tg3_full_lock(tp, 0);
err = tg3_init_hw(tp, 1);
@@ -9783,7 +9792,7 @@ static int tg3_close(struct net_device *dev)
struct tg3 *tp = netdev_priv(dev);
tg3_napi_disable(tp);
- cancel_work_sync(&tp->reset_task);
+ tg3_reset_task_cancel(tp);
netif_tx_stop_all_queues(dev);
@@ -11520,7 +11529,7 @@ static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
break;
}
- tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, 0);
+ tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
dev_kfree_skb(skb);
if (tx_idx != tnapi->tx_prod)
@@ -14228,12 +14237,30 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
val = tr32(MEMARB_MODE);
tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
- if (tg3_flag(tp, PCIX_MODE)) {
- pci_read_config_dword(tp->pdev,
- tp->pcix_cap + PCI_X_STATUS, &val);
- tp->pci_fn = val & 0x7;
- } else {
- tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
+ tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
+ tg3_flag(tp, 5780_CLASS)) {
+ if (tg3_flag(tp, PCIX_MODE)) {
+ pci_read_config_dword(tp->pdev,
+ tp->pcix_cap + PCI_X_STATUS,
+ &val);
+ tp->pci_fn = val & 0x7;
+ }
+ } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
+ tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
+ if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
+ NIC_SRAM_CPMUSTAT_SIG) {
+ tp->pci_fn = val & TG3_CPMU_STATUS_FMSK_5717;
+ tp->pci_fn = tp->pci_fn ? 1 : 0;
+ }
+ } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
+ tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
+ if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
+ NIC_SRAM_CPMUSTAT_SIG) {
+ tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
+ TG3_CPMU_STATUS_FSHFT_5719;
+ }
}
/* Get eeprom hw config before calling tg3_set_power_state().
@@ -15665,7 +15692,7 @@ static void __devexit tg3_remove_one(struct pci_dev *pdev)
if (tp->fw)
release_firmware(tp->fw);
- cancel_work_sync(&tp->reset_task);
+ tg3_reset_task_cancel(tp);
if (tg3_flag(tp, USE_PHYLIB)) {
tg3_phy_fini(tp);
@@ -15699,7 +15726,7 @@ static int tg3_suspend(struct device *device)
if (!netif_running(dev))
return 0;
- flush_work_sync(&tp->reset_task);
+ tg3_reset_task_cancel(tp);
tg3_phy_stop(tp);
tg3_netif_stop(tp);
@@ -15812,12 +15839,10 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
tg3_netif_stop(tp);
del_timer_sync(&tp->timer);
- tg3_flag_clear(tp, RESTART_TIMER);
/* Want to make sure that the reset task doesn't run */
- cancel_work_sync(&tp->reset_task);
+ tg3_reset_task_cancel(tp);
tg3_flag_clear(tp, TX_RECOVERY_PENDING);
- tg3_flag_clear(tp, RESTART_TIMER);
netif_device_detach(netdev);
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index f32f288134c..94b4bd049a3 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -1095,6 +1095,11 @@
#define TG3_CPMU_CLCK_ORIDE 0x00003624
#define CPMU_CLCK_ORIDE_MAC_ORIDE_EN 0x80000000
+#define TG3_CPMU_STATUS 0x0000362c
+#define TG3_CPMU_STATUS_FMSK_5717 0x20000000
+#define TG3_CPMU_STATUS_FMSK_5719 0xc0000000
+#define TG3_CPMU_STATUS_FSHFT_5719 30
+
#define TG3_CPMU_CLCK_STAT 0x00003630
#define CPMU_CLCK_STAT_MAC_CLCK_MASK 0x001f0000
#define CPMU_CLCK_STAT_MAC_CLCK_62_5 0x00000000
@@ -2128,6 +2133,10 @@
#define NIC_SRAM_RGMII_EXT_IBND_RX_EN 0x00000008
#define NIC_SRAM_RGMII_EXT_IBND_TX_EN 0x00000010
+#define NIC_SRAM_CPMU_STATUS 0x00000e00
+#define NIC_SRAM_CPMUSTAT_SIG 0x0000362c
+#define NIC_SRAM_CPMUSTAT_SIG_MSK 0x0000ffff
+
#define NIC_SRAM_RX_MINI_BUFFER_DESC 0x00001000
#define NIC_SRAM_DMA_DESC_POOL_BASE 0x00002000
@@ -2344,9 +2353,13 @@
#define APE_PER_LOCK_GRANT_DRIVER 0x00001000
/* APE convenience enumerations. */
-#define TG3_APE_LOCK_GRC 1
-#define TG3_APE_LOCK_MEM 4
-#define TG3_APE_LOCK_GPIO 7
+#define TG3_APE_LOCK_PHY0 0
+#define TG3_APE_LOCK_GRC 1
+#define TG3_APE_LOCK_PHY1 2
+#define TG3_APE_LOCK_PHY2 3
+#define TG3_APE_LOCK_MEM 4
+#define TG3_APE_LOCK_PHY3 5
+#define TG3_APE_LOCK_GPIO 7
#define TG3_EEPROM_SB_F1R2_MBA_OFF 0x10
@@ -2866,7 +2879,6 @@ enum TG3_FLAGS {
TG3_FLAG_JUMBO_CAPABLE,
TG3_FLAG_CHIP_RESETTING,
TG3_FLAG_INIT_COMPLETE,
- TG3_FLAG_RESTART_TIMER,
TG3_FLAG_TSO_BUG,
TG3_FLAG_IS_5788,
TG3_FLAG_MAX_RXPEND_64,
@@ -2909,6 +2921,7 @@ enum TG3_FLAGS {
TG3_FLAG_APE_HAS_NCSI,
TG3_FLAG_5717_PLUS,
TG3_FLAG_4K_FIFO_LIMIT,
+ TG3_FLAG_RESET_TASK_PENDING,
/* Add new flags before this comment and TG3_FLAG_NUMBER_OF_FLAGS */
TG3_FLAG_NUMBER_OF_FLAGS, /* Last entry in enum TG3_FLAGS */
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index 5d7872ecff5..7f3091e7eb4 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -25,6 +25,7 @@
#include <linux/if_ether.h>
#include <linux/ip.h>
#include <linux/prefetch.h>
+#include <linux/module.h>
#include "bnad.h"
#include "bna.h"
diff --git a/drivers/net/ethernet/cadence/at91_ether.c b/drivers/net/ethernet/cadence/at91_ether.c
index 1b0ba8c819f..56624d30348 100644
--- a/drivers/net/ethernet/cadence/at91_ether.c
+++ b/drivers/net/ethernet/cadence/at91_ether.c
@@ -35,7 +35,7 @@
#include <asm/mach-types.h>
#include <mach/at91rm9200_emac.h>
-#include <mach/gpio.h>
+#include <asm/gpio.h>
#include <mach/board.h>
#include "at91_ether.h"
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
index da5a5d9b8af..90ff1318cc0 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
@@ -40,6 +40,7 @@
#include <net/netevent.h>
#include <linux/highmem.h>
#include <linux/vmalloc.h>
+#include <linux/export.h>
#include "common.h"
#include "regs.h"
diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.c b/drivers/net/ethernet/chelsio/cxgb3/l2t.c
index 41540978a17..70fec8b1140 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.c
@@ -35,6 +35,7 @@
#include <linux/if_vlan.h>
#include <linux/jhash.h>
#include <linux/slab.h>
+#include <linux/export.h>
#include <net/neighbour.h>
#include "common.h"
#include "t3cdev.h"
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
index a2d323c473f..6ac77a62f36 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
@@ -37,6 +37,9 @@
#include <linux/if.h>
#include <linux/if_vlan.h>
#include <linux/jhash.h>
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
#include <net/neighbour.h>
#include "cxgb4.h"
#include "l2t.h"
@@ -503,10 +506,6 @@ struct l2t_data *t4_init_l2t(void)
return d;
}
-#include <linux/module.h>
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-
static inline void *l2t_get_idx(struct seq_file *seq, loff_t pos)
{
struct l2t_entry *l2tab = seq->private;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index ddc16985d0f..140254c7cba 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -40,6 +40,7 @@
#include <linux/dma-mapping.h>
#include <linux/jiffies.h>
#include <linux/prefetch.h>
+#include <linux/export.h>
#include <net/ipv6.h>
#include <net/tcp.h>
#include "cxgb4.h"
diff --git a/drivers/net/ethernet/cirrus/Kconfig b/drivers/net/ethernet/cirrus/Kconfig
index 6cbb81ccc02..1f8648f099c 100644
--- a/drivers/net/ethernet/cirrus/Kconfig
+++ b/drivers/net/ethernet/cirrus/Kconfig
@@ -6,7 +6,7 @@ config NET_VENDOR_CIRRUS
bool "Cirrus devices"
default y
depends on ISA || EISA || MACH_IXDP2351 || ARCH_IXDP2X01 \
- || MACH_MX31ADS || MACH_QQ2440 || (ARM && ARCH_EP93XX)
+ || MACH_MX31ADS || MACH_QQ2440 || (ARM && ARCH_EP93XX) || MAC
---help---
If you have a network (Ethernet) card belonging to this class, say Y
and read the Ethernet-HOWTO, available from
@@ -47,4 +47,16 @@ config EP93XX_ETH
This is a driver for the ethernet hardware included in EP93xx CPUs.
Say Y if you are building a kernel for EP93xx based devices.
+config MAC89x0
+ tristate "Macintosh CS89x0 based ethernet cards"
+ depends on MAC
+ ---help---
+ Support for CS89x0 chipset based Ethernet cards. If you have a
+ Nubus or LC-PDS network (Ethernet) card of this type, say Y and
+ read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here. This module will
+ be called mac89x0.
+
endif # NET_VENDOR_CIRRUS
diff --git a/drivers/net/ethernet/cirrus/Makefile b/drivers/net/ethernet/cirrus/Makefile
index 14bd77e0cb5..ca245e2b5d9 100644
--- a/drivers/net/ethernet/cirrus/Makefile
+++ b/drivers/net/ethernet/cirrus/Makefile
@@ -4,3 +4,4 @@
obj-$(CONFIG_CS89x0) += cs89x0.o
obj-$(CONFIG_EP93XX_ETH) += ep93xx_eth.o
+obj-$(CONFIG_MAC89x0) += mac89x0.o
diff --git a/drivers/net/ethernet/apple/mac89x0.c b/drivers/net/ethernet/cirrus/mac89x0.c
index 83781f316d1..83781f316d1 100644
--- a/drivers/net/ethernet/apple/mac89x0.c
+++ b/drivers/net/ethernet/cirrus/mac89x0.c
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 824b8e6021f..2c7b36673df 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -318,8 +318,7 @@ static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
if (msecs > 4000) {
dev_err(&adapter->pdev->dev, "mbox poll timed out\n");
- if (!lancer_chip(adapter))
- be_detect_dump_ue(adapter);
+ be_detect_dump_ue(adapter);
return -1;
}
@@ -1540,7 +1539,14 @@ int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
req->if_flags_mask = req->if_flags =
cpu_to_le32(BE_IF_FLAGS_MULTICAST);
- req->mcast_num = cpu_to_le16(netdev_mc_count(adapter->netdev));
+
+ /* Reset mcast promisc mode if already set by setting mask
+ * and not setting flags field
+ */
+ req->if_flags_mask |=
+ cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
+
+ req->mcast_num = cpu_to_le32(netdev_mc_count(adapter->netdev));
netdev_for_each_mc_addr(ha, adapter->netdev)
memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN);
}
diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h
index fbc8a915519..f2c89e3ccab 100644
--- a/drivers/net/ethernet/emulex/benet/be_hw.h
+++ b/drivers/net/ethernet/emulex/benet/be_hw.h
@@ -48,6 +48,8 @@
/* Lancer SLIPORT_CONTROL SLIPORT_STATUS registers */
#define SLIPORT_STATUS_OFFSET 0x404
#define SLIPORT_CONTROL_OFFSET 0x408
+#define SLIPORT_ERROR1_OFFSET 0x40C
+#define SLIPORT_ERROR2_OFFSET 0x410
#define SLIPORT_STATUS_ERR_MASK 0x80000000
#define SLIPORT_STATUS_RN_MASK 0x01000000
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 21804972fa2..bf266a00c77 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -16,6 +16,7 @@
*/
#include <linux/prefetch.h>
+#include <linux/module.h>
#include "be.h"
#include "be_cmds.h"
#include <asm/div64.h>
@@ -1905,6 +1906,8 @@ loop_continue:
be_rx_stats_update(rxo, rxcp);
}
+ be_cq_notify(adapter, rx_cq->id, false, work_done);
+
/* Refill the queue */
if (work_done && atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
be_post_rx_frags(rxo, GFP_ATOMIC);
@@ -1912,10 +1915,8 @@ loop_continue:
/* All consumed */
if (work_done < budget) {
napi_complete(napi);
- be_cq_notify(adapter, rx_cq->id, true, work_done);
- } else {
- /* More to be consumed; continue with interrupts disabled */
- be_cq_notify(adapter, rx_cq->id, false, work_done);
+ /* Arm CQ */
+ be_cq_notify(adapter, rx_cq->id, true, 0);
}
return work_done;
}
@@ -1977,42 +1978,62 @@ static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
void be_detect_dump_ue(struct be_adapter *adapter)
{
- u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
+ u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
+ u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
u32 i;
- pci_read_config_dword(adapter->pdev,
- PCICFG_UE_STATUS_LOW, &ue_status_lo);
- pci_read_config_dword(adapter->pdev,
- PCICFG_UE_STATUS_HIGH, &ue_status_hi);
- pci_read_config_dword(adapter->pdev,
- PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
- pci_read_config_dword(adapter->pdev,
- PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
+ if (lancer_chip(adapter)) {
+ sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
+ if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
+ sliport_err1 = ioread32(adapter->db +
+ SLIPORT_ERROR1_OFFSET);
+ sliport_err2 = ioread32(adapter->db +
+ SLIPORT_ERROR2_OFFSET);
+ }
+ } else {
+ pci_read_config_dword(adapter->pdev,
+ PCICFG_UE_STATUS_LOW, &ue_lo);
+ pci_read_config_dword(adapter->pdev,
+ PCICFG_UE_STATUS_HIGH, &ue_hi);
+ pci_read_config_dword(adapter->pdev,
+ PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
+ pci_read_config_dword(adapter->pdev,
+ PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
- ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
- ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
+ ue_lo = (ue_lo & (~ue_lo_mask));
+ ue_hi = (ue_hi & (~ue_hi_mask));
+ }
- if (ue_status_lo || ue_status_hi) {
+ if (ue_lo || ue_hi ||
+ sliport_status & SLIPORT_STATUS_ERR_MASK) {
adapter->ue_detected = true;
adapter->eeh_err = true;
dev_err(&adapter->pdev->dev, "UE Detected!!\n");
}
- if (ue_status_lo) {
- for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
- if (ue_status_lo & 1)
+ if (ue_lo) {
+ for (i = 0; ue_lo; ue_lo >>= 1, i++) {
+ if (ue_lo & 1)
dev_err(&adapter->pdev->dev,
"UE: %s bit set\n", ue_status_low_desc[i]);
}
}
- if (ue_status_hi) {
- for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
- if (ue_status_hi & 1)
+ if (ue_hi) {
+ for (i = 0; ue_hi; ue_hi >>= 1, i++) {
+ if (ue_hi & 1)
dev_err(&adapter->pdev->dev,
"UE: %s bit set\n", ue_status_hi_desc[i]);
}
}
+ if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
+ dev_err(&adapter->pdev->dev,
+ "sliport status 0x%x\n", sliport_status);
+ dev_err(&adapter->pdev->dev,
+ "sliport error1 0x%x\n", sliport_err1);
+ dev_err(&adapter->pdev->dev,
+ "sliport error2 0x%x\n", sliport_err2);
+ }
}
static void be_worker(struct work_struct *work)
@@ -2022,7 +2043,7 @@ static void be_worker(struct work_struct *work)
struct be_rx_obj *rxo;
int i;
- if (!adapter->ue_detected && !lancer_chip(adapter))
+ if (!adapter->ue_detected)
be_detect_dump_ue(adapter);
/* when interrupts are not yet enabled, just reap any pending
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index bdb348a5ccf..251b635fe75 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -22,6 +22,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/of.h>
+#include <linux/module.h>
#include <net/ethoc.h>
static int buffer_size = 0x8000; /* 32 KBytes */
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
index 1cf671643d1..c520cfd3b29 100644
--- a/drivers/net/ethernet/freescale/Kconfig
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -7,8 +7,7 @@ config NET_VENDOR_FREESCALE
default y
depends on FSL_SOC || QUICC_ENGINE || CPM1 || CPM2 || PPC_MPC512x || \
M523x || M527x || M5272 || M528x || M520x || M532x || \
- ARCH_MXC || ARCH_MXS || \
- (PPC_MPC52xx && PPC_BESTCOMM)
+ ARCH_MXC || ARCH_MXS || (PPC_MPC52xx && PPC_BESTCOMM)
---help---
If you have a network (Ethernet) card belonging to this class, say Y
and read the Ethernet-HOWTO, available from
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 46d690a92c0..b5dc0273a1d 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -17,6 +17,7 @@
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/stddef.h>
+#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
diff --git a/drivers/net/ethernet/i825xx/3c505.c b/drivers/net/ethernet/i825xx/3c505.c
index 40e1a175fce..ba82a266051 100644
--- a/drivers/net/ethernet/i825xx/3c505.c
+++ b/drivers/net/ethernet/i825xx/3c505.c
@@ -126,15 +126,13 @@
*
*********************************************************/
-#define filename __FILE__
-
#define timeout_msg "*** timeout at %s:%s (line %d) ***\n"
#define TIMEOUT_MSG(lineno) \
- pr_notice(timeout_msg, filename, __func__, (lineno))
+ pr_notice(timeout_msg, __FILE__, __func__, (lineno))
#define invalid_pcb_msg "*** invalid pcb length %d at %s:%s (line %d) ***\n"
#define INVALID_PCB_MSG(len) \
- pr_notice(invalid_pcb_msg, (len), filename, __func__, __LINE__)
+ pr_notice(invalid_pcb_msg, (len), __FILE__, __func__, __LINE__)
#define search_msg "%s: Looking for 3c505 adapter at address %#x..."
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 61029dc7fa6..76213162fbe 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -5,7 +5,11 @@
config NET_VENDOR_INTEL
bool "Intel devices"
default y
- depends on PCI || PCI_MSI
+ depends on PCI || PCI_MSI || ISA || ISA_DMA_API || ARM || \
+ ARCH_ACORN || MCA || MCA_LEGACY || SNI_RM || SUN3 || \
+ GSC || BVME6000 || MVME16x || ARCH_ENP2611 || \
+ (ARM && ARCH_IXP4XX && IXP4XX_NPE && IXP4XX_QMGR) || \
+ EXPERIMENTAL
---help---
If you have a network (Ethernet) card belonging to this class, say Y
and read the Ethernet-HOWTO, available from
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index ae17cd1a907..5a2fdf7a00c 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -2810,6 +2810,10 @@ static int __devinit e100_probe(struct pci_dev *pdev,
e100_get_defaults(nic);
+ /* D100 MAC doesn't allow rx of vlan packets with normal MTU */
+ if (nic->mac < mac_82558_D101_A4)
+ netdev->features |= NETIF_F_VLAN_CHALLENGED;
+
/* locks must be initialized before calling hw_reset */
spin_lock_init(&nic->cb_lock);
spin_lock_init(&nic->cmd_lock);
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 6a17c62cb86..e2a80a283fd 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -866,8 +866,7 @@ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
if (test_and_set_bit(__E1000_ACCESS_SHARED_RESOURCE,
&hw->adapter->state)) {
- WARN(1, "e1000e: %s: contention for Phy access\n",
- hw->adapter->netdev->name);
+ e_dbg("contention for Phy access\n");
return -E1000_ERR_PHY;
}
diff --git a/drivers/net/ethernet/intel/e1000e/param.c b/drivers/net/ethernet/intel/e1000e/param.c
index 4dd9b63273f..20e93b08e7f 100644
--- a/drivers/net/ethernet/intel/e1000e/param.c
+++ b/drivers/net/ethernet/intel/e1000e/param.c
@@ -27,6 +27,7 @@
*******************************************************************************/
#include <linux/netdevice.h>
+#include <linux/module.h>
#include <linux/pci.h>
#include "e1000.h"
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
index 7edf31efe75..b17d7c20f81 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.c
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
@@ -1687,7 +1687,7 @@ s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw)
if (ret_val)
goto out;
- is_cm = !(phy_data & I347AT4_PCDC_CABLE_LENGTH_UNIT);
+ is_cm = !(phy_data2 & I347AT4_PCDC_CABLE_LENGTH_UNIT);
/* Populate the phy structure with cable length in meters */
phy->min_cable_length = phy_data / (is_cm ? 100 : 1);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 834f044be4c..f1365fef4ed 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -3344,7 +3344,7 @@ static u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
u32 length)
{
- u32 hicr, i;
+ u32 hicr, i, bi;
u32 hdr_size = sizeof(struct ixgbe_hic_hdr);
u8 buf_len, dword_len;
@@ -3398,9 +3398,9 @@ static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
dword_len = hdr_size >> 2;
/* first pull in the header so we know the buffer length */
- for (i = 0; i < dword_len; i++) {
- buffer[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, i);
- le32_to_cpus(&buffer[i]);
+ for (bi = 0; bi < dword_len; bi++) {
+ buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
+ le32_to_cpus(&buffer[bi]);
}
/* If there is any thing in data position pull it in */
@@ -3414,12 +3414,14 @@ static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
goto out;
}
- /* Calculate length in DWORDs, add one for odd lengths */
- dword_len = (buf_len + 1) >> 2;
+ /* Calculate length in DWORDs, add 3 for odd lengths */
+ dword_len = (buf_len + 3) >> 2;
- /* Pull in the rest of the buffer (i is where we left off)*/
- for (; i < buf_len; i++)
- buffer[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, i);
+ /* Pull in the rest of the buffer (bi is where we left off)*/
+ for (; bi <= dword_len; bi++) {
+ buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
+ le32_to_cpus(&buffer[bi]);
+ }
out:
return ret_val;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
index 3631d639d86..33b93ffb87c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
@@ -561,11 +561,12 @@ static int ixgbe_dcbnl_ieee_getets(struct net_device *dev,
struct ixgbe_adapter *adapter = netdev_priv(dev);
struct ieee_ets *my_ets = adapter->ixgbe_ieee_ets;
+ ets->ets_cap = adapter->dcb_cfg.num_tcs.pg_tcs;
+
/* No IEEE PFC settings available */
if (!my_ets)
- return -EINVAL;
+ return 0;
- ets->ets_cap = adapter->dcb_cfg.num_tcs.pg_tcs;
ets->cbs = my_ets->cbs;
memcpy(ets->tc_tx_bw, my_ets->tc_tx_bw, sizeof(ets->tc_tx_bw));
memcpy(ets->tc_rx_bw, my_ets->tc_rx_bw, sizeof(ets->tc_rx_bw));
@@ -621,11 +622,12 @@ static int ixgbe_dcbnl_ieee_getpfc(struct net_device *dev,
struct ieee_pfc *my_pfc = adapter->ixgbe_ieee_pfc;
int i;
+ pfc->pfc_cap = adapter->dcb_cfg.num_tcs.pfc_tcs;
+
/* No IEEE PFC settings available */
if (!my_pfc)
- return -EINVAL;
+ return 0;
- pfc->pfc_cap = adapter->dcb_cfg.num_tcs.pfc_tcs;
pfc->pfc_en = my_pfc->pfc_en;
pfc->mbc = my_pfc->mbc;
pfc->delay = my_pfc->delay;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 09b8e88b299..8ef92d1a6aa 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -3345,34 +3345,25 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true);
- /* reconfigure the hardware */
- if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE) {
#ifdef IXGBE_FCOE
- if (adapter->netdev->features & NETIF_F_FCOE_MTU)
- max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
+ if (adapter->netdev->features & NETIF_F_FCOE_MTU)
+ max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
#endif
+
+ /* reconfigure the hardware */
+ if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE) {
ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
DCB_TX_CONFIG);
ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
DCB_RX_CONFIG);
ixgbe_dcb_hw_config(hw, &adapter->dcb_cfg);
- } else {
- struct net_device *dev = adapter->netdev;
-
- if (adapter->ixgbe_ieee_ets) {
- struct ieee_ets *ets = adapter->ixgbe_ieee_ets;
- int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
-
- ixgbe_dcb_hw_ets(&adapter->hw, ets, max_frame);
- }
-
- if (adapter->ixgbe_ieee_pfc) {
- struct ieee_pfc *pfc = adapter->ixgbe_ieee_pfc;
- u8 *prio_tc = adapter->ixgbe_ieee_ets->prio_tc;
-
- ixgbe_dcb_hw_pfc_config(&adapter->hw, pfc->pfc_en,
- prio_tc);
- }
+ } else if (adapter->ixgbe_ieee_ets && adapter->ixgbe_ieee_pfc) {
+ ixgbe_dcb_hw_ets(&adapter->hw,
+ adapter->ixgbe_ieee_ets,
+ max_frame);
+ ixgbe_dcb_hw_pfc_config(&adapter->hw,
+ adapter->ixgbe_ieee_pfc->pfc_en,
+ adapter->ixgbe_ieee_ets->prio_tc);
}
/* Enable RSS Hash per TC */
@@ -6125,7 +6116,6 @@ static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter)
autoneg = hw->phy.autoneg_advertised;
if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiation);
- hw->mac.autotry_restart = false;
if (hw->mac.ops.setup_link)
hw->mac.ops.setup_link(hw, autoneg, negotiation, true);
@@ -7589,13 +7579,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
goto err_eeprom;
}
- /* power down the optics for multispeed fiber and 82599 SFP+ fiber */
- if (hw->mac.ops.disable_tx_laser &&
- ((hw->phy.multispeed_fiber) ||
- ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
- (hw->mac.type == ixgbe_mac_82599EB))))
- hw->mac.ops.disable_tx_laser(hw);
-
setup_timer(&adapter->service_timer, &ixgbe_service_timer,
(unsigned long) adapter);
@@ -7693,6 +7676,13 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
if (err)
goto err_register;
+ /* power down the optics for multispeed fiber and 82599 SFP+ fiber */
+ if (hw->mac.ops.disable_tx_laser &&
+ ((hw->phy.multispeed_fiber) ||
+ ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
+ (hw->mac.type == ixgbe_mac_82599EB))))
+ hw->mac.ops.disable_tx_laser(hw);
+
/* carrier off reporting is important to ethtool even BEFORE open */
netif_carrier_off(netdev);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index db95731863d..00fcd39ad66 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -442,12 +442,14 @@ static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter,
int ixgbe_check_vf_assignment(struct ixgbe_adapter *adapter)
{
+#ifdef CONFIG_PCI_IOV
int i;
for (i = 0; i < adapter->num_vfs; i++) {
if (adapter->vfinfo[i].vfdev->dev_flags &
PCI_DEV_FLAGS_ASSIGNED)
return true;
}
+#endif
return false;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
index 5a7e1eb3359..df04f1a3857 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
@@ -43,9 +43,11 @@ int ixgbe_ndo_get_vf_config(struct net_device *netdev,
int vf, struct ifla_vf_info *ivi);
void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter);
void ixgbe_disable_sriov(struct ixgbe_adapter *adapter);
+int ixgbe_check_vf_assignment(struct ixgbe_adapter *adapter);
+#ifdef CONFIG_PCI_IOV
void ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
const struct ixgbe_info *ii);
-int ixgbe_check_vf_assignment(struct ixgbe_adapter *adapter);
+#endif
#endif /* _IXGBE_SRIOV_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 5e92cc2079b..4c8e19951d5 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -54,7 +54,7 @@ char ixgbevf_driver_name[] = "ixgbevf";
static const char ixgbevf_driver_string[] =
"Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
-#define DRV_VERSION "2.1.0-k"
+#define DRV_VERSION "2.2.0-k"
const char ixgbevf_driver_version[] = DRV_VERSION;
static char ixgbevf_copyright[] =
"Copyright (c) 2009 - 2010 Intel Corporation.";
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index cbd026f3bc5..fdc6c394c68 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -366,17 +366,6 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
gm_phy_write(hw, port, PHY_MARV_FE_SPEC_2, spec);
}
} else {
- if (hw->chip_id >= CHIP_ID_YUKON_OPT) {
- u16 ctrl2 = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL_2);
-
- /* enable PHY Reverse Auto-Negotiation */
- ctrl2 |= 1u << 13;
-
- /* Write PHY changes (SW-reset must follow) */
- gm_phy_write(hw, port, PHY_MARV_EXT_CTRL_2, ctrl2);
- }
-
-
/* disable energy detect */
ctrl &= ~PHY_M_PC_EN_DET_MSK;
diff --git a/drivers/net/ethernet/mellanox/mlx4/alloc.c b/drivers/net/ethernet/mellanox/mlx4/alloc.c
index 116cae334da..8be20e7ea3d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx4/alloc.c
@@ -34,6 +34,7 @@
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/mm.h>
+#include <linux/export.h>
#include <linux/bitmap.h>
#include <linux/dma-mapping.h>
#include <linux/vmalloc.h>
diff --git a/drivers/net/ethernet/mellanox/mlx4/catas.c b/drivers/net/ethernet/mellanox/mlx4/catas.c
index 32f947154c3..45aea9c3ae2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/catas.c
+++ b/drivers/net/ethernet/mellanox/mlx4/catas.c
@@ -32,6 +32,7 @@
*/
#include <linux/workqueue.h>
+#include <linux/module.h>
#include "mlx4.h"
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 23cee7b6af9..78f5a1a0b8c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -34,6 +34,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
+#include <linux/export.h>
#include <linux/pci.h>
#include <linux/errno.h>
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index bd8ef9f2fa7..499a5168892 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -35,6 +35,7 @@
*/
#include <linux/hardirq.h>
+#include <linux/export.h>
#include <linux/gfp.h>
#include <linux/mlx4/cmd.h>
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 90f2cd24faa..d901b426753 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -39,6 +39,7 @@
#include <linux/if_vlan.h>
#include <linux/vmalloc.h>
#include <linux/tcp.h>
+#include <linux/moduleparam.h>
#include "mlx4_en.h"
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 1ad1f6029af..24ee9677599 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -33,6 +33,7 @@
#include <linux/interrupt.h>
#include <linux/slab.h>
+#include <linux/export.h>
#include <linux/mm.h>
#include <linux/dma-mapping.h>
@@ -484,7 +485,7 @@ static void mlx4_free_eq(struct mlx4_dev *dev,
mlx4_mtt_cleanup(dev, &eq->mtt);
for (i = 0; i < npages; ++i)
- pci_free_consistent(dev->pdev, PAGE_SIZE,
+ dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
eq->page_list[i].buf,
eq->page_list[i].map);
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index ed452ddfe34..435ca6e4973 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -33,6 +33,7 @@
*/
#include <linux/mlx4/cmd.h>
+#include <linux/module.h>
#include <linux/cache.h>
#include "fw.h"
@@ -205,6 +206,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
#define QUERY_DEV_CAP_MAX_MCG_OFFSET 0x63
#define QUERY_DEV_CAP_RSVD_PD_OFFSET 0x64
#define QUERY_DEV_CAP_MAX_PD_OFFSET 0x65
+#define QUERY_DEV_CAP_RSVD_XRC_OFFSET 0x66
+#define QUERY_DEV_CAP_MAX_XRC_OFFSET 0x67
#define QUERY_DEV_CAP_MAX_COUNTERS_OFFSET 0x68
#define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80
#define QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET 0x82
@@ -319,6 +322,10 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev_cap->reserved_pds = field >> 4;
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PD_OFFSET);
dev_cap->max_pds = 1 << (field & 0x3f);
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_XRC_OFFSET);
+ dev_cap->reserved_xrcds = field >> 4;
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PD_OFFSET);
+ dev_cap->max_xrcds = 1 << (field & 0x1f);
MLX4_GET(size, outbox, QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET);
dev_cap->rdmarc_entry_sz = size;
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
index 1e8ecc3708e..bf5ec228652 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -93,6 +93,8 @@ struct mlx4_dev_cap {
int max_mcgs;
int reserved_pds;
int max_pds;
+ int reserved_xrcds;
+ int max_xrcds;
int qpc_entry_sz;
int rdmarc_entry_sz;
int altc_entry_sz;
diff --git a/drivers/net/ethernet/mellanox/mlx4/intf.c b/drivers/net/ethernet/mellanox/mlx4/intf.c
index 73c94fcdfdd..ca6feb55bd9 100644
--- a/drivers/net/ethernet/mellanox/mlx4/intf.c
+++ b/drivers/net/ethernet/mellanox/mlx4/intf.c
@@ -32,6 +32,7 @@
*/
#include <linux/slab.h>
+#include <linux/export.h>
#include "mlx4.h"
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index f0ee35df4dd..94bbc85a532 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -96,6 +96,8 @@ MODULE_PARM_DESC(log_num_mac, "Log2 max number of MACs per ETH port (1-7)");
static int log_num_vlan;
module_param_named(log_num_vlan, log_num_vlan, int, 0444);
MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)");
+/* Log2 max number of VLANs per ETH port (0-7) */
+#define MLX4_LOG_NUM_VLANS 7
static int use_prio;
module_param_named(use_prio, use_prio, bool, 0444);
@@ -220,6 +222,10 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.reserved_mrws = dev_cap->reserved_mrws;
dev->caps.reserved_uars = dev_cap->reserved_uars;
dev->caps.reserved_pds = dev_cap->reserved_pds;
+ dev->caps.reserved_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ?
+ dev_cap->reserved_xrcds : 0;
+ dev->caps.max_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ?
+ dev_cap->max_xrcds : 0;
dev->caps.mtt_entry_sz = dev->caps.mtts_per_seg * dev_cap->mtt_entry_sz;
dev->caps.max_msg_sz = dev_cap->max_msg_sz;
dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1);
@@ -230,7 +236,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.max_gso_sz = dev_cap->max_gso_sz;
dev->caps.log_num_macs = log_num_mac;
- dev->caps.log_num_vlans = log_num_vlan;
+ dev->caps.log_num_vlans = MLX4_LOG_NUM_VLANS;
dev->caps.log_num_prios = use_prio ? 3 : 0;
for (i = 1; i <= dev->caps.num_ports; ++i) {
@@ -912,11 +918,18 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
goto err_kar_unmap;
}
+ err = mlx4_init_xrcd_table(dev);
+ if (err) {
+ mlx4_err(dev, "Failed to initialize "
+ "reliable connection domain table, aborting.\n");
+ goto err_pd_table_free;
+ }
+
err = mlx4_init_mr_table(dev);
if (err) {
mlx4_err(dev, "Failed to initialize "
"memory region table, aborting.\n");
- goto err_pd_table_free;
+ goto err_xrcd_table_free;
}
err = mlx4_init_eq_table(dev);
@@ -998,6 +1011,13 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
"ib capabilities (%d). Continuing with "
"caps = 0\n", port, err);
dev->caps.ib_port_def_cap[port] = ib_port_default_caps;
+
+ err = mlx4_check_ext_port_caps(dev, port);
+ if (err)
+ mlx4_warn(dev, "failed to get port %d extended "
+ "port capabilities support info (%d)."
+ " Assuming not supported\n", port, err);
+
err = mlx4_SET_PORT(dev, port);
if (err) {
mlx4_err(dev, "Failed to set port %d, aborting\n",
@@ -1033,6 +1053,9 @@ err_eq_table_free:
err_mr_table_free:
mlx4_cleanup_mr_table(dev);
+err_xrcd_table_free:
+ mlx4_cleanup_xrcd_table(dev);
+
err_pd_table_free:
mlx4_cleanup_pd_table(dev);
@@ -1355,6 +1378,7 @@ err_port:
mlx4_cmd_use_polling(dev);
mlx4_cleanup_eq_table(dev);
mlx4_cleanup_mr_table(dev);
+ mlx4_cleanup_xrcd_table(dev);
mlx4_cleanup_pd_table(dev);
mlx4_cleanup_uar_table(dev);
@@ -1416,6 +1440,7 @@ static void mlx4_remove_one(struct pci_dev *pdev)
mlx4_cmd_use_polling(dev);
mlx4_cleanup_eq_table(dev);
mlx4_cleanup_mr_table(dev);
+ mlx4_cleanup_xrcd_table(dev);
mlx4_cleanup_pd_table(dev);
iounmap(priv->kar);
@@ -1489,10 +1514,9 @@ static int __init mlx4_verify_params(void)
return -1;
}
- if ((log_num_vlan < 0) || (log_num_vlan > 7)) {
- pr_warning("mlx4_core: bad num_vlan: %d\n", log_num_vlan);
- return -1;
- }
+ if (log_num_vlan != 0)
+ pr_warning("mlx4_core: log_num_vlan - obsolete module param, using %d\n",
+ MLX4_LOG_NUM_VLANS);
if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 7)) {
pr_warning("mlx4_core: bad log_mtts_per_seg: %d\n", log_mtts_per_seg);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index cd1784593a3..978688c3104 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -35,6 +35,7 @@
#include <linux/etherdevice.h>
#include <linux/mlx4/cmd.h>
+#include <linux/export.h>
#include "mlx4.h"
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index a2fcd8402d3..5dfa68ffc11 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -335,6 +335,7 @@ struct mlx4_priv {
struct mlx4_cmd cmd;
struct mlx4_bitmap pd_bitmap;
+ struct mlx4_bitmap xrcd_bitmap;
struct mlx4_uar_table uar_table;
struct mlx4_mr_table mr_table;
struct mlx4_cq_table cq_table;
@@ -384,6 +385,7 @@ int mlx4_alloc_eq_table(struct mlx4_dev *dev);
void mlx4_free_eq_table(struct mlx4_dev *dev);
int mlx4_init_pd_table(struct mlx4_dev *dev);
+int mlx4_init_xrcd_table(struct mlx4_dev *dev);
int mlx4_init_uar_table(struct mlx4_dev *dev);
int mlx4_init_mr_table(struct mlx4_dev *dev);
int mlx4_init_eq_table(struct mlx4_dev *dev);
@@ -393,6 +395,7 @@ int mlx4_init_srq_table(struct mlx4_dev *dev);
int mlx4_init_mcg_table(struct mlx4_dev *dev);
void mlx4_cleanup_pd_table(struct mlx4_dev *dev);
+void mlx4_cleanup_xrcd_table(struct mlx4_dev *dev);
void mlx4_cleanup_uar_table(struct mlx4_dev *dev);
void mlx4_cleanup_mr_table(struct mlx4_dev *dev);
void mlx4_cleanup_eq_table(struct mlx4_dev *dev);
@@ -450,6 +453,7 @@ void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table);
int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port);
int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps);
+int mlx4_check_ext_port_caps(struct mlx4_dev *dev, u8 port);
int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
enum mlx4_protocol prot, enum mlx4_steer_type steer);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index fca66165110..8fda331c65d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -581,8 +581,9 @@ extern const struct ethtool_ops mlx4_en_ethtool_ops;
* printk / logging functions
*/
+__printf(3, 4)
int en_print(const char *level, const struct mlx4_en_priv *priv,
- const char *format, ...) __attribute__ ((format (printf, 3, 4)));
+ const char *format, ...);
#define en_dbg(mlevel, priv, format, arg...) \
do { \
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index 9c188bdd7f4..efa3e77355e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -33,6 +33,7 @@
*/
#include <linux/errno.h>
+#include <linux/export.h>
#include <linux/slab.h>
#include <linux/mlx4/cmd.h>
@@ -139,7 +140,7 @@ static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order)
buddy->bits = kzalloc((buddy->max_order + 1) * sizeof (long *),
GFP_KERNEL);
- buddy->num_free = kzalloc((buddy->max_order + 1) * sizeof (int *),
+ buddy->num_free = kcalloc((buddy->max_order + 1), sizeof *buddy->num_free,
GFP_KERNEL);
if (!buddy->bits || !buddy->num_free)
goto err_out;
diff --git a/drivers/net/ethernet/mellanox/mlx4/pd.c b/drivers/net/ethernet/mellanox/mlx4/pd.c
index 1286b886dce..260ed259ce9 100644
--- a/drivers/net/ethernet/mellanox/mlx4/pd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/pd.c
@@ -32,6 +32,7 @@
*/
#include <linux/errno.h>
+#include <linux/export.h>
#include <linux/io-mapping.h>
#include <asm/page.h>
@@ -61,6 +62,24 @@ void mlx4_pd_free(struct mlx4_dev *dev, u32 pdn)
}
EXPORT_SYMBOL_GPL(mlx4_pd_free);
+int mlx4_xrcd_alloc(struct mlx4_dev *dev, u32 *xrcdn)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ *xrcdn = mlx4_bitmap_alloc(&priv->xrcd_bitmap);
+ if (*xrcdn == -1)
+ return -ENOMEM;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx4_xrcd_alloc);
+
+void mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn)
+{
+ mlx4_bitmap_free(&mlx4_priv(dev)->xrcd_bitmap, xrcdn);
+}
+EXPORT_SYMBOL_GPL(mlx4_xrcd_free);
+
int mlx4_init_pd_table(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@@ -74,6 +93,18 @@ void mlx4_cleanup_pd_table(struct mlx4_dev *dev)
mlx4_bitmap_cleanup(&mlx4_priv(dev)->pd_bitmap);
}
+int mlx4_init_xrcd_table(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ return mlx4_bitmap_init(&priv->xrcd_bitmap, (1 << 16),
+ (1 << 16) - 1, dev->caps.reserved_xrcds + 1, 0);
+}
+
+void mlx4_cleanup_xrcd_table(struct mlx4_dev *dev)
+{
+ mlx4_bitmap_cleanup(&mlx4_priv(dev)->xrcd_bitmap);
+}
int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar)
{
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index 163a314c148..d942aea4927 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -32,6 +32,7 @@
#include <linux/errno.h>
#include <linux/if_ether.h>
+#include <linux/export.h>
#include <linux/mlx4/cmd.h>
@@ -148,22 +149,26 @@ int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn, u8 wrap)
if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) {
err = mlx4_uc_steer_add(dev, port, mac, qpn, 1);
- if (!err) {
- entry = kmalloc(sizeof *entry, GFP_KERNEL);
- if (!entry) {
- mlx4_uc_steer_release(dev, port, mac, *qpn, 1);
- return -ENOMEM;
- }
- entry->mac = mac;
- err = radix_tree_insert(&info->mac_tree, *qpn, entry);
- if (err) {
- mlx4_uc_steer_release(dev, port, mac, *qpn, 1);
- return err;
- }
- } else
+ if (err)
return err;
+
+ entry = kmalloc(sizeof *entry, GFP_KERNEL);
+ if (!entry) {
+ mlx4_uc_steer_release(dev, port, mac, *qpn, 1);
+ return -ENOMEM;
+ }
+
+ entry->mac = mac;
+ err = radix_tree_insert(&info->mac_tree, *qpn, entry);
+ if (err) {
+ kfree(entry);
+ mlx4_uc_steer_release(dev, port, mac, *qpn, 1);
+ return err;
+ }
}
+
mlx4_dbg(dev, "Registering MAC: 0x%llx\n", (unsigned long long) mac);
+
mutex_lock(&table->mutex);
for (i = 0; i < MLX4_MAX_MAC_NUM - 1; i++) {
if (free < 0 && !table->refs[i]) {
@@ -465,6 +470,48 @@ int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps)
return err;
}
+int mlx4_check_ext_port_caps(struct mlx4_dev *dev, u8 port)
+{
+ struct mlx4_cmd_mailbox *inmailbox, *outmailbox;
+ u8 *inbuf, *outbuf;
+ int err, packet_error;
+
+ inmailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(inmailbox))
+ return PTR_ERR(inmailbox);
+
+ outmailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(outmailbox)) {
+ mlx4_free_cmd_mailbox(dev, inmailbox);
+ return PTR_ERR(outmailbox);
+ }
+
+ inbuf = inmailbox->buf;
+ outbuf = outmailbox->buf;
+ memset(inbuf, 0, 256);
+ memset(outbuf, 0, 256);
+ inbuf[0] = 1;
+ inbuf[1] = 1;
+ inbuf[2] = 1;
+ inbuf[3] = 1;
+
+ *(__be16 *) (&inbuf[16]) = MLX4_ATTR_EXTENDED_PORT_INFO;
+ *(__be32 *) (&inbuf[20]) = cpu_to_be32(port);
+
+ err = mlx4_cmd_box(dev, inmailbox->dma, outmailbox->dma, port, 3,
+ MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C);
+
+ packet_error = be16_to_cpu(*(__be16 *) (outbuf + 4));
+
+ dev->caps.ext_port_cap[port] = (!err && !packet_error) ?
+ MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO
+ : 0;
+
+ mlx4_free_cmd_mailbox(dev, inmailbox);
+ mlx4_free_cmd_mailbox(dev, outmailbox);
+ return err;
+}
+
int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port)
{
struct mlx4_cmd_mailbox *mailbox;
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index ec9350e5f21..15f870cb259 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -34,6 +34,7 @@
*/
#include <linux/gfp.h>
+#include <linux/export.h>
#include <linux/mlx4/cmd.h>
#include <linux/mlx4/qp.h>
@@ -280,6 +281,9 @@ int mlx4_init_qp_table(struct mlx4_dev *dev)
* We reserve 2 extra QPs per port for the special QPs. The
* block of special QPs must be aligned to a multiple of 8, so
* round up.
+ *
+ * We also reserve the MSB of the 24-bit QP number to indicate
+ * that a QP is an XRC QP.
*/
dev->caps.sqp_start =
ALIGN(dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 8);
diff --git a/drivers/net/ethernet/mellanox/mlx4/srq.c b/drivers/net/ethernet/mellanox/mlx4/srq.c
index 3b07b80a045..9cbf3fce014 100644
--- a/drivers/net/ethernet/mellanox/mlx4/srq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/srq.c
@@ -32,6 +32,7 @@
*/
#include <linux/mlx4/cmd.h>
+#include <linux/export.h>
#include <linux/gfp.h>
#include "mlx4.h"
@@ -40,20 +41,20 @@
struct mlx4_srq_context {
__be32 state_logsize_srqn;
u8 logstride;
- u8 reserved1[3];
- u8 pg_offset;
- u8 reserved2[3];
- u32 reserved3;
+ u8 reserved1;
+ __be16 xrcd;
+ __be32 pg_offset_cqn;
+ u32 reserved2;
u8 log_page_size;
- u8 reserved4[2];
+ u8 reserved3[2];
u8 mtt_base_addr_h;
__be32 mtt_base_addr_l;
__be32 pd;
__be16 limit_watermark;
__be16 wqe_cnt;
- u16 reserved5;
+ u16 reserved4;
__be16 wqe_counter;
- u32 reserved6;
+ u32 reserved5;
__be64 db_rec_addr;
};
@@ -109,8 +110,8 @@ static int mlx4_QUERY_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox
MLX4_CMD_TIME_CLASS_A);
}
-int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, struct mlx4_mtt *mtt,
- u64 db_rec, struct mlx4_srq *srq)
+int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, u32 cqn, u16 xrcd,
+ struct mlx4_mtt *mtt, u64 db_rec, struct mlx4_srq *srq)
{
struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
struct mlx4_cmd_mailbox *mailbox;
@@ -148,6 +149,8 @@ int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, struct mlx4_mtt *mtt,
srq_context->state_logsize_srqn = cpu_to_be32((ilog2(srq->max) << 24) |
srq->srqn);
srq_context->logstride = srq->wqe_shift - 4;
+ srq_context->xrcd = cpu_to_be16(xrcd);
+ srq_context->pg_offset_cqn = cpu_to_be32(cqn & 0xffffff);
srq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
mtt_addr = mlx4_mtt_addr(dev, mtt);
diff --git a/drivers/net/ethernet/natsemi/Kconfig b/drivers/net/ethernet/natsemi/Kconfig
index 4a6b9fd073b..eb836f770f5 100644
--- a/drivers/net/ethernet/natsemi/Kconfig
+++ b/drivers/net/ethernet/natsemi/Kconfig
@@ -5,7 +5,10 @@
config NET_VENDOR_NATSEMI
bool "National Semi-conductor devices"
default y
- depends on MCA || MAC || MACH_JAZZ || PCI || XTENSA_PLATFORM_XT2000
+ depends on AMIGA_PCMCIA || ARM || EISA || EXPERIMENTAL || H8300 || \
+ ISA || M32R || MAC || MACH_JAZZ || MACH_TX49XX || MCA || \
+ MCA_LEGACY || MIPS || PCI || PCMCIA || SUPERH || \
+ XTENSA_PLATFORM_XT2000 || ZORRO
---help---
If you have a network (Ethernet) card belonging to this class, say Y
and read the Ethernet-HOWTO, available from
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index 671e166b5af..a83197d757c 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -55,6 +55,7 @@
#include <linux/firmware.h>
#include <linux/net_tstamp.h>
#include <linux/prefetch.h>
+#include <linux/module.h>
#include "vxge-main.h"
#include "vxge-reg.h"
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 1e37eb98c4e..1dca57013cb 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -1682,6 +1682,7 @@ static void nv_get_hw_stats(struct net_device *dev)
np->estats.tx_pause += readl(base + NvRegTxPause);
np->estats.rx_pause += readl(base + NvRegRxPause);
np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame);
+ np->estats.rx_errors_total += np->estats.rx_drop_frame;
}
if (np->driver_data & DEV_HAS_STATISTICS_V3) {
@@ -1706,11 +1707,14 @@ static struct net_device_stats *nv_get_stats(struct net_device *dev)
nv_get_hw_stats(dev);
/* copy to net_device stats */
+ dev->stats.tx_packets = np->estats.tx_packets;
+ dev->stats.rx_bytes = np->estats.rx_bytes;
dev->stats.tx_bytes = np->estats.tx_bytes;
dev->stats.tx_fifo_errors = np->estats.tx_fifo_errors;
dev->stats.tx_carrier_errors = np->estats.tx_carrier_errors;
dev->stats.rx_crc_errors = np->estats.rx_crc_errors;
dev->stats.rx_over_errors = np->estats.rx_over_errors;
+ dev->stats.rx_fifo_errors = np->estats.rx_drop_frame;
dev->stats.rx_errors = np->estats.rx_errors_total;
dev->stats.tx_errors = np->estats.tx_errors_total;
}
@@ -2099,10 +2103,10 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* add fragments to entries count */
for (i = 0; i < fragments; i++) {
- u32 size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
+ u32 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
- entries += (size >> NV_TX2_TSO_MAX_SHIFT) +
- ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
+ entries += (frag_size >> NV_TX2_TSO_MAX_SHIFT) +
+ ((frag_size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
}
spin_lock_irqsave(&np->lock, flags);
@@ -2141,13 +2145,13 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* setup the fragments */
for (i = 0; i < fragments; i++) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- u32 size = skb_frag_size(frag);
+ u32 frag_size = skb_frag_size(frag);
offset = 0;
do {
prev_tx = put_tx;
prev_tx_ctx = np->put_tx_ctx;
- bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
+ bcnt = (frag_size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : frag_size;
np->put_tx_ctx->dma = skb_frag_dma_map(
&np->pci_dev->dev,
frag, offset,
@@ -2159,12 +2163,12 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
offset += bcnt;
- size -= bcnt;
+ frag_size -= bcnt;
if (unlikely(put_tx++ == np->last_tx.orig))
put_tx = np->first_tx.orig;
if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
np->put_tx_ctx = np->first_tx_ctx;
- } while (size);
+ } while (frag_size);
}
/* set last fragment flag */
@@ -2213,10 +2217,10 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
/* add fragments to entries count */
for (i = 0; i < fragments; i++) {
- u32 size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
+ u32 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
- entries += (size >> NV_TX2_TSO_MAX_SHIFT) +
- ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
+ entries += (frag_size >> NV_TX2_TSO_MAX_SHIFT) +
+ ((frag_size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
}
spin_lock_irqsave(&np->lock, flags);
@@ -2257,13 +2261,13 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
/* setup the fragments */
for (i = 0; i < fragments; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- u32 size = skb_frag_size(frag);
+ u32 frag_size = skb_frag_size(frag);
offset = 0;
do {
prev_tx = put_tx;
prev_tx_ctx = np->put_tx_ctx;
- bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
+ bcnt = (frag_size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : frag_size;
np->put_tx_ctx->dma = skb_frag_dma_map(
&np->pci_dev->dev,
frag, offset,
@@ -2276,12 +2280,12 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
offset += bcnt;
- size -= bcnt;
+ frag_size -= bcnt;
if (unlikely(put_tx++ == np->last_tx.ex))
put_tx = np->first_tx.ex;
if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
np->put_tx_ctx = np->first_tx_ctx;
- } while (size);
+ } while (frag_size);
}
/* set last fragment flag */
@@ -2374,16 +2378,8 @@ static int nv_tx_done(struct net_device *dev, int limit)
if (np->desc_ver == DESC_VER_1) {
if (flags & NV_TX_LASTPACKET) {
if (flags & NV_TX_ERROR) {
- if (flags & NV_TX_UNDERFLOW)
- dev->stats.tx_fifo_errors++;
- if (flags & NV_TX_CARRIERLOST)
- dev->stats.tx_carrier_errors++;
if ((flags & NV_TX_RETRYERROR) && !(flags & NV_TX_RETRYCOUNT_MASK))
nv_legacybackoff_reseed(dev);
- dev->stats.tx_errors++;
- } else {
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += np->get_tx_ctx->skb->len;
}
dev_kfree_skb_any(np->get_tx_ctx->skb);
np->get_tx_ctx->skb = NULL;
@@ -2392,16 +2388,8 @@ static int nv_tx_done(struct net_device *dev, int limit)
} else {
if (flags & NV_TX2_LASTPACKET) {
if (flags & NV_TX2_ERROR) {
- if (flags & NV_TX2_UNDERFLOW)
- dev->stats.tx_fifo_errors++;
- if (flags & NV_TX2_CARRIERLOST)
- dev->stats.tx_carrier_errors++;
if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK))
nv_legacybackoff_reseed(dev);
- dev->stats.tx_errors++;
- } else {
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += np->get_tx_ctx->skb->len;
}
dev_kfree_skb_any(np->get_tx_ctx->skb);
np->get_tx_ctx->skb = NULL;
@@ -2434,9 +2422,7 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit)
nv_unmap_txskb(np, np->get_tx_ctx);
if (flags & NV_TX2_LASTPACKET) {
- if (!(flags & NV_TX2_ERROR))
- dev->stats.tx_packets++;
- else {
+ if (flags & NV_TX2_ERROR) {
if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK)) {
if (np->driver_data & DEV_HAS_GEAR_MODE)
nv_gear_backoff_reseed(dev);
@@ -2636,7 +2622,6 @@ static int nv_rx_process(struct net_device *dev, int limit)
if ((flags & NV_RX_ERROR_MASK) == NV_RX_ERROR4) {
len = nv_getlen(dev, skb->data, len);
if (len < 0) {
- dev->stats.rx_errors++;
dev_kfree_skb(skb);
goto next_pkt;
}
@@ -2650,11 +2635,6 @@ static int nv_rx_process(struct net_device *dev, int limit)
else {
if (flags & NV_RX_MISSEDFRAME)
dev->stats.rx_missed_errors++;
- if (flags & NV_RX_CRCERR)
- dev->stats.rx_crc_errors++;
- if (flags & NV_RX_OVERFLOW)
- dev->stats.rx_over_errors++;
- dev->stats.rx_errors++;
dev_kfree_skb(skb);
goto next_pkt;
}
@@ -2670,7 +2650,6 @@ static int nv_rx_process(struct net_device *dev, int limit)
if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_ERROR4) {
len = nv_getlen(dev, skb->data, len);
if (len < 0) {
- dev->stats.rx_errors++;
dev_kfree_skb(skb);
goto next_pkt;
}
@@ -2682,11 +2661,6 @@ static int nv_rx_process(struct net_device *dev, int limit)
}
/* the rest are hard errors */
else {
- if (flags & NV_RX2_CRCERR)
- dev->stats.rx_crc_errors++;
- if (flags & NV_RX2_OVERFLOW)
- dev->stats.rx_over_errors++;
- dev->stats.rx_errors++;
dev_kfree_skb(skb);
goto next_pkt;
}
@@ -2704,7 +2678,6 @@ static int nv_rx_process(struct net_device *dev, int limit)
skb->protocol = eth_type_trans(skb, dev);
napi_gro_receive(&np->napi, skb);
dev->stats.rx_packets++;
- dev->stats.rx_bytes += len;
next_pkt:
if (unlikely(np->get_rx.orig++ == np->last_rx.orig))
np->get_rx.orig = np->first_rx.orig;
@@ -2787,9 +2760,7 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
__vlan_hwaccel_put_tag(skb, vid);
}
napi_gro_receive(&np->napi, skb);
-
dev->stats.rx_packets++;
- dev->stats.rx_bytes += len;
} else {
dev_kfree_skb(skb);
}
@@ -2962,11 +2933,11 @@ static void nv_set_multicast(struct net_device *dev)
struct netdev_hw_addr *ha;
netdev_for_each_mc_addr(ha, dev) {
- unsigned char *addr = ha->addr;
+ unsigned char *hw_addr = ha->addr;
u32 a, b;
- a = le32_to_cpu(*(__le32 *) addr);
- b = le16_to_cpu(*(__le16 *) (&addr[4]));
+ a = le32_to_cpu(*(__le32 *) hw_addr);
+ b = le16_to_cpu(*(__le16 *) (&hw_addr[4]));
alwaysOn[0] &= a;
alwaysOff[0] &= ~a;
alwaysOn[1] &= b;
@@ -3398,7 +3369,8 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data)
for (i = 0;; i++) {
events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL;
- writel(NVREG_IRQ_TX_ALL, base + NvRegMSIXIrqStatus);
+ writel(events, base + NvRegMSIXIrqStatus);
+ netdev_dbg(dev, "tx irq events: %08x\n", events);
if (!(events & np->irqmask))
break;
@@ -3509,7 +3481,8 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data)
for (i = 0;; i++) {
events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL;
- writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus);
+ writel(events, base + NvRegMSIXIrqStatus);
+ netdev_dbg(dev, "rx irq events: %08x\n", events);
if (!(events & np->irqmask))
break;
@@ -3553,7 +3526,8 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data)
for (i = 0;; i++) {
events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER;
- writel(NVREG_IRQ_OTHER, base + NvRegMSIXIrqStatus);
+ writel(events, base + NvRegMSIXIrqStatus);
+ netdev_dbg(dev, "irq events: %08x\n", events);
if (!(events & np->irqmask))
break;
@@ -3617,10 +3591,10 @@ static irqreturn_t nv_nic_irq_test(int foo, void *data)
if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
- writel(NVREG_IRQ_TIMER, base + NvRegIrqStatus);
+ writel(events & NVREG_IRQ_TIMER, base + NvRegIrqStatus);
} else {
events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
- writel(NVREG_IRQ_TIMER, base + NvRegMSIXIrqStatus);
+ writel(events & NVREG_IRQ_TIMER, base + NvRegMSIXIrqStatus);
}
pci_push(base);
if (!(events & NVREG_IRQ_TIMER))
@@ -4566,7 +4540,7 @@ static void nv_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *e
struct fe_priv *np = netdev_priv(dev);
/* update stats */
- nv_do_stats_poll((unsigned long)dev);
+ nv_get_hw_stats(dev);
memcpy(buffer, &np->estats, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(u64));
}
diff --git a/drivers/net/ethernet/octeon/octeon_mgmt.c b/drivers/net/ethernet/octeon/octeon_mgmt.c
index bc1d946b797..212f43b308a 100644
--- a/drivers/net/ethernet/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/octeon/octeon_mgmt.c
@@ -9,6 +9,7 @@
#include <linux/capability.h>
#include <linux/dma-mapping.h>
#include <linux/init.h>
+#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/netdevice.h>
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index b89f3a684ae..48406ca382f 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -20,6 +20,7 @@
#include "pch_gbe.h"
#include "pch_gbe_api.h"
+#include <linux/module.h>
#define DRV_VERSION "1.00"
const char pch_driver_version[] = DRV_VERSION;
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c
index 5b5d90a47e2..9c075ea2682 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c
@@ -18,6 +18,7 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
*/
+#include <linux/module.h> /* for __MODULE_STRING */
#include "pch_gbe.h"
#define OPTION_UNSET -1
diff --git a/drivers/net/ethernet/realtek/Kconfig b/drivers/net/ethernet/realtek/Kconfig
index 84083ec6e61..0578859a3c7 100644
--- a/drivers/net/ethernet/realtek/Kconfig
+++ b/drivers/net/ethernet/realtek/Kconfig
@@ -115,16 +115,4 @@ config R8169
To compile this driver as a module, choose M here: the module
will be called r8169. This is recommended.
-config SC92031
- tristate "Silan SC92031 PCI Fast Ethernet Adapter driver (EXPERIMENTAL)"
- depends on PCI && EXPERIMENTAL
- select CRC32
- ---help---
- This is a driver for the Fast Ethernet PCI network cards based on
- the Silan SC92031 chip (sometimes also called Rsltek 8139D). If you
- have one of these, say Y here.
-
- To compile this driver as a module, choose M here: the module
- will be called sc92031. This is recommended.
-
endif # NET_VENDOR_REALTEK
diff --git a/drivers/net/ethernet/realtek/Makefile b/drivers/net/ethernet/realtek/Makefile
index e48cfb6ac42..71b1da30ecb 100644
--- a/drivers/net/ethernet/realtek/Makefile
+++ b/drivers/net/ethernet/realtek/Makefile
@@ -6,4 +6,3 @@ obj-$(CONFIG_8139CP) += 8139cp.o
obj-$(CONFIG_8139TOO) += 8139too.o
obj-$(CONFIG_ATP) += atp.o
obj-$(CONFIG_R8169) += r8169.o
-obj-$(CONFIG_SC92031) += sc92031.o
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index de9afebe183..d5731f1fe6d 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -2229,13 +2229,15 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
/* PCI device ID table */
static DEFINE_PCI_DEVICE_TABLE(efx_pci_table) = {
- {PCI_DEVICE(EFX_VENDID_SFC, FALCON_A_P_DEVID),
+ {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE,
+ PCI_DEVICE_ID_SOLARFLARE_SFC4000A_0),
.driver_data = (unsigned long) &falcon_a1_nic_type},
- {PCI_DEVICE(EFX_VENDID_SFC, FALCON_B_P_DEVID),
+ {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE,
+ PCI_DEVICE_ID_SOLARFLARE_SFC4000B),
.driver_data = (unsigned long) &falcon_b0_nic_type},
- {PCI_DEVICE(EFX_VENDID_SFC, BETHPAGE_A_P_DEVID),
+ {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, BETHPAGE_A_P_DEVID),
.driver_data = (unsigned long) &siena_a0_nic_type},
- {PCI_DEVICE(EFX_VENDID_SFC, SIENA_A_P_DEVID),
+ {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, SIENA_A_P_DEVID),
.driver_data = (unsigned long) &siena_a0_nic_type},
{0} /* end of list */
};
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index 442f4d0c247..4764793ed23 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -15,10 +15,6 @@
#include "filter.h"
/* PCI IDs */
-#define EFX_VENDID_SFC 0x1924
-#define FALCON_A_P_DEVID 0x0703
-#define FALCON_A_S_DEVID 0x6703
-#define FALCON_B_P_DEVID 0x0710
#define BETHPAGE_A_P_DEVID 0x0803
#define SIENA_A_P_DEVID 0x0813
diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c
index 4dd1748a19c..97b606b92e8 100644
--- a/drivers/net/ethernet/sfc/falcon.c
+++ b/drivers/net/ethernet/sfc/falcon.c
@@ -1426,7 +1426,8 @@ static int falcon_probe_nic(struct efx_nic *efx)
}
dev = pci_dev_get(efx->pci_dev);
- while ((dev = pci_get_device(EFX_VENDID_SFC, FALCON_A_S_DEVID,
+ while ((dev = pci_get_device(PCI_VENDOR_ID_SOLARFLARE,
+ PCI_DEVICE_ID_SOLARFLARE_SFC4000A_1,
dev))) {
if (dev->bus == efx->pci_dev->bus &&
dev->devfn == efx->pci_dev->devfn + 1) {
diff --git a/drivers/net/ethernet/sfc/falcon_boards.c b/drivers/net/ethernet/sfc/falcon_boards.c
index b9cc846811d..6cc16b8cc6f 100644
--- a/drivers/net/ethernet/sfc/falcon_boards.c
+++ b/drivers/net/ethernet/sfc/falcon_boards.c
@@ -764,7 +764,8 @@ int falcon_probe_board(struct efx_nic *efx, u16 revision_info)
if (board->type) {
netif_info(efx, probe, efx->net_dev, "board is %s rev %c%d\n",
- (efx->pci_dev->subsystem_vendor == EFX_VENDID_SFC)
+ (efx->pci_dev->subsystem_vendor ==
+ PCI_VENDOR_ID_SOLARFLARE)
? board->type->ref_model : board->type->gen_type,
'A' + board->major, board->minor);
return 0;
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index adbda182f15..752d521c09b 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -15,6 +15,7 @@
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/prefetch.h>
+#include <linux/moduleparam.h>
#include <net/ip.h>
#include <net/checksum.h>
#include "net_driver.h"
diff --git a/drivers/net/ethernet/silan/Kconfig b/drivers/net/ethernet/silan/Kconfig
new file mode 100644
index 00000000000..ae1ce170864
--- /dev/null
+++ b/drivers/net/ethernet/silan/Kconfig
@@ -0,0 +1,33 @@
+#
+# Silan device configuration
+#
+
+config NET_VENDOR_SILAN
+ bool "Silan devices"
+ default y
+ depends on PCI && EXPERIMENTAL
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Silan devices. If you say Y, you will be asked for
+ your specific card in the following questions.
+
+if NET_VENDOR_SILAN
+
+config SC92031
+ tristate "Silan SC92031 PCI Fast Ethernet Adapter driver (EXPERIMENTAL)"
+ depends on PCI && EXPERIMENTAL
+ select CRC32
+ ---help---
+ This is a driver for the Fast Ethernet PCI network cards based on
+ the Silan SC92031 chip (sometimes also called Rsltek 8139D). If you
+ have one of these, say Y here.
+
+ To compile this driver as a module, choose M here: the module
+ will be called sc92031. This is recommended.
+
+endif # NET_VENDOR_SILAN
diff --git a/drivers/net/ethernet/silan/Makefile b/drivers/net/ethernet/silan/Makefile
new file mode 100644
index 00000000000..4ad3523dcb9
--- /dev/null
+++ b/drivers/net/ethernet/silan/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the Silan network device drivers.
+#
+
+obj-$(CONFIG_SC92031) += sc92031.o
diff --git a/drivers/net/ethernet/realtek/sc92031.c b/drivers/net/ethernet/silan/sc92031.c
index a284d644053..a284d644053 100644
--- a/drivers/net/ethernet/realtek/sc92031.c
+++ b/drivers/net/ethernet/silan/sc92031.c
diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c
index 4f15680849f..edb24b0e337 100644
--- a/drivers/net/ethernet/smsc/smsc9420.c
+++ b/drivers/net/ethernet/smsc/smsc9420.c
@@ -28,6 +28,7 @@
#include <linux/dma-mapping.h>
#include <linux/crc32.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include <asm/unaligned.h>
#include "smsc9420.h"
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index ac6f190743d..22745d7bf53 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -29,15 +29,6 @@ config STMMAC_DA
By default, the DMA arbitration scheme is based on Round-robin
(rx:tx priority is 1:1).
-config STMMAC_DUAL_MAC
- bool "STMMAC: dual mac support (EXPERIMENTAL)"
- default n
- depends on EXPERIMENTAL && STMMAC_ETH && !STMMAC_TIMER
- ---help---
- Some ST SoCs (for example the stx7141 and stx7200c2) have two
- Ethernet Controllers. This option turns on the second Ethernet
- device on this kind of platforms.
-
config STMMAC_TIMER
bool "STMMAC Timer optimisation"
default n
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index ddb33cfd354..7bf1e201578 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -1674,6 +1674,9 @@ static int __devinit ps3_gelic_driver_probe(struct ps3_system_bus_device *dev)
int result;
pr_debug("%s: called\n", __func__);
+
+ udbg_shutdown_ps3gelic();
+
result = ps3_open_hv_device(dev);
if (result) {
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.h b/drivers/net/ethernet/toshiba/ps3_gelic_net.h
index d3fadfbc3bc..a93df6ac190 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.h
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.h
@@ -359,6 +359,12 @@ static inline void *port_priv(struct gelic_port *port)
return port->priv;
}
+#ifdef CONFIG_PPC_EARLY_DEBUG_PS3GELIC
+extern void udbg_shutdown_ps3gelic(void);
+#else
+static inline void udbg_shutdown_ps3gelic(void) {}
+#endif
+
extern int gelic_card_set_irq_mask(struct gelic_card *card, u64 mask);
/* shared netdev ops */
extern void gelic_card_up(struct gelic_card *card);
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 4d1658e78de..caf3659e173 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -716,8 +716,8 @@ static int temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
cur_p->phys = dma_map_single(ndev->dev.parent,
skb_frag_address(frag),
- frag_size(frag), DMA_TO_DEVICE);
- cur_p->len = frag_size(frag);
+ skb_frag_size(frag), DMA_TO_DEVICE);
+ cur_p->len = skb_frag_size(frag);
cur_p->app0 = 0;
frag++;
}
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index ec96d910e9a..f45c85a8426 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -35,6 +35,7 @@
#include <linux/platform_device.h>
#include <linux/ptp_classify.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include <mach/ixp46x_ts.h>
#include <mach/npe.h>
#include <mach/qmgr.h>
diff --git a/drivers/net/irda/Kconfig b/drivers/net/irda/Kconfig
index a40fab44b9a..d423d18b4ad 100644
--- a/drivers/net/irda/Kconfig
+++ b/drivers/net/irda/Kconfig
@@ -314,7 +314,7 @@ config TOSHIBA_FIR
config AU1000_FIR
tristate "Alchemy Au1000 SIR/FIR"
- depends on SOC_AU1000 && IRDA
+ depends on IRDA && MIPS_ALCHEMY
config SMC_IRCC_FIR
tristate "SMSC IrCC (EXPERIMENTAL)"
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index a3ce3d4561e..74134970b70 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -192,6 +192,13 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
*/
macvlan_broadcast(skb, port, src->dev,
MACVLAN_MODE_VEPA);
+ else {
+ /* forward to original port. */
+ vlan = src;
+ ret = macvlan_broadcast_one(skb, vlan, eth, 0);
+ goto out;
+ }
+
return RX_HANDLER_PASS;
}
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index a4eae750a41..f414ffb5b72 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -14,6 +14,7 @@
*
*/
#include <linux/phy.h>
+#include <linux/module.h>
#define RTL821x_PHYSR 0x11
#define RTL821x_PHYSR_DUPLEX 0x2000
diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
index 3bb13113703..7145714a5ec 100644
--- a/drivers/net/rionet.c
+++ b/drivers/net/rionet.c
@@ -88,8 +88,8 @@ static struct rio_dev **rionet_active;
#define dev_rionet_capable(dev) \
is_rionet_capable(dev->src_ops, dev->dst_ops)
-#define RIONET_MAC_MATCH(x) (*(u32 *)x == 0x00010001)
-#define RIONET_GET_DESTID(x) (*(u16 *)(x + 4))
+#define RIONET_MAC_MATCH(x) (!memcmp((x), "\00\01\00\01", 4))
+#define RIONET_GET_DESTID(x) ((*((u8 *)x + 4) << 8) | *((u8 *)x + 5))
static int rionet_rx_clean(struct net_device *ndev)
{
diff --git a/drivers/net/usb/lg-vl600.c b/drivers/net/usb/lg-vl600.c
index 1e722195105..d43db32f947 100644
--- a/drivers/net/usb/lg-vl600.c
+++ b/drivers/net/usb/lg-vl600.c
@@ -27,6 +27,7 @@
#include <linux/if_ether.h>
#include <linux/if_arp.h>
#include <linux/inetdevice.h>
+#include <linux/module.h>
/*
* The device has a CDC ACM port for modem control (it claims to be
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 7d6082160bc..fae0fbd8bc8 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1057,7 +1057,8 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
unsigned long flags;
int retval;
- skb_tx_timestamp(skb);
+ if (skb)
+ skb_tx_timestamp(skb);
// some devices want funky USB-level framing, for
// win32 driver (usually) and/or hardware quirks
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 5b23767ea81..ef883e97cee 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -17,6 +17,7 @@
#include <net/dst.h>
#include <net/xfrm.h>
#include <linux/veth.h>
+#include <linux/module.h>
#define DRV_NAME "veth"
#define DRV_VERSION "1.0"
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 91039ab1672..6ee8410443c 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -925,12 +925,10 @@ static void virtnet_update_status(struct virtnet_info *vi)
{
u16 v;
- if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS))
- return;
-
- vi->vdev->config->get(vi->vdev,
+ if (virtio_config_val(vi->vdev, VIRTIO_NET_F_STATUS,
offsetof(struct virtio_net_config, status),
- &v, sizeof(v));
+ &v) < 0)
+ return;
/* Ignore unknown (future) status bits */
v &= VIRTIO_NET_S_LINK_UP;
@@ -1006,11 +1004,9 @@ static int virtnet_probe(struct virtio_device *vdev)
}
/* Configuration may specify what MAC to use. Otherwise random. */
- if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) {
- vdev->config->get(vdev,
+ if (virtio_config_val_len(vdev, VIRTIO_NET_F_MAC,
offsetof(struct virtio_net_config, mac),
- dev->dev_addr, dev->addr_len);
- } else
+ dev->dev_addr, dev->addr_len) < 0)
random_ether_addr(dev->dev_addr);
/* Set up our device-specific information */
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index b771ebac0f0..d96bfb1ac20 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -24,6 +24,7 @@
*
*/
+#include <linux/module.h>
#include <net/ip6_checksum.h>
#include "vmxnet3_int.h"
diff --git a/drivers/net/wimax/i2400m/control.c b/drivers/net/wimax/i2400m/control.c
index 727d728649b..2fea02b35b2 100644
--- a/drivers/net/wimax/i2400m/control.c
+++ b/drivers/net/wimax/i2400m/control.c
@@ -78,6 +78,8 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/wimax/i2400m.h>
+#include <linux/export.h>
+#include <linux/moduleparam.h>
#define D_SUBMODULE control
diff --git a/drivers/net/wimax/i2400m/debugfs.c b/drivers/net/wimax/i2400m/debugfs.c
index 9c70b5fa3f5..129ba36bd04 100644
--- a/drivers/net/wimax/i2400m/debugfs.c
+++ b/drivers/net/wimax/i2400m/debugfs.c
@@ -26,6 +26,7 @@
#include <linux/etherdevice.h>
#include <linux/spinlock.h>
#include <linux/device.h>
+#include <linux/export.h>
#include "i2400m.h"
diff --git a/drivers/net/wimax/i2400m/fw.c b/drivers/net/wimax/i2400m/fw.c
index 85dadd5bf4b..7cbd7d231e1 100644
--- a/drivers/net/wimax/i2400m/fw.c
+++ b/drivers/net/wimax/i2400m/fw.c
@@ -158,6 +158,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/usb.h>
+#include <linux/export.h>
#include "i2400m.h"
diff --git a/drivers/net/wimax/i2400m/netdev.c b/drivers/net/wimax/i2400m/netdev.c
index 2edd8fe1c1f..64a110604ad 100644
--- a/drivers/net/wimax/i2400m/netdev.c
+++ b/drivers/net/wimax/i2400m/netdev.c
@@ -76,6 +76,7 @@
#include <linux/slab.h>
#include <linux/netdevice.h>
#include <linux/ethtool.h>
+#include <linux/export.h>
#include "i2400m.h"
diff --git a/drivers/net/wimax/i2400m/rx.c b/drivers/net/wimax/i2400m/rx.c
index 2f94a872101..37becfcc98f 100644
--- a/drivers/net/wimax/i2400m/rx.c
+++ b/drivers/net/wimax/i2400m/rx.c
@@ -149,6 +149,8 @@
#include <linux/if_arp.h>
#include <linux/netdevice.h>
#include <linux/workqueue.h>
+#include <linux/export.h>
+#include <linux/moduleparam.h>
#include "i2400m.h"
diff --git a/drivers/net/wimax/i2400m/sdio.c b/drivers/net/wimax/i2400m/sdio.c
index be428cae28d..21a9edd6e75 100644
--- a/drivers/net/wimax/i2400m/sdio.c
+++ b/drivers/net/wimax/i2400m/sdio.c
@@ -55,6 +55,7 @@
#include <linux/mmc/sdio_func.h>
#include "i2400m-sdio.h"
#include <linux/wimax/i2400m.h>
+#include <linux/module.h>
#define D_SUBMODULE main
#include "sdio-debug-levels.h"
diff --git a/drivers/net/wimax/i2400m/tx.c b/drivers/net/wimax/i2400m/tx.c
index 4b30ed11d78..4b9ecb20dee 100644
--- a/drivers/net/wimax/i2400m/tx.c
+++ b/drivers/net/wimax/i2400m/tx.c
@@ -245,6 +245,7 @@
*/
#include <linux/netdevice.h>
#include <linux/slab.h>
+#include <linux/export.h>
#include "i2400m.h"
diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c
index 9a644d052f1..2c1b8b68764 100644
--- a/drivers/net/wimax/i2400m/usb.c
+++ b/drivers/net/wimax/i2400m/usb.c
@@ -67,6 +67,7 @@
#include <linux/wimax/i2400m.h>
#include <linux/debugfs.h>
#include <linux/slab.h>
+#include <linux/module.h>
#define D_SUBMODULE usb
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index c1c0678b1fb..98db76196b5 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -42,7 +42,7 @@ obj-$(CONFIG_ADM8211) += adm8211.o
obj-$(CONFIG_MWL8K) += mwl8k.o
obj-$(CONFIG_IWLWIFI) += iwlwifi/
-obj-$(CONFIG_IWLWIFI_LEGACY) += iwlegacy/
+obj-$(CONFIG_IWLEGACY) += iwlegacy/
obj-$(CONFIG_RT2X00) += rt2x00/
obj-$(CONFIG_P54_COMMON) += p54/
diff --git a/drivers/net/wireless/adm8211.c b/drivers/net/wireless/adm8211.c
index 3b752d9fb3c..f5ce5623da9 100644
--- a/drivers/net/wireless/adm8211.c
+++ b/drivers/net/wireless/adm8211.c
@@ -25,6 +25,7 @@
#include <linux/delay.h>
#include <linux/crc32.h>
#include <linux/eeprom_93cx6.h>
+#include <linux/module.h>
#include <net/mac80211.h>
#include "adm8211.h"
diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
index 39322d4121b..4045e5ab055 100644
--- a/drivers/net/wireless/at76c50x-usb.c
+++ b/drivers/net/wireless/at76c50x-usb.c
@@ -517,7 +517,7 @@ static char *hex2str(void *buf, size_t len)
goto exit;
while (len--) {
- obuf = pack_hex_byte(obuf, *ibuf++);
+ obuf = hex_byte_pack(obuf, *ibuf++);
*obuf++ = '-';
}
obuf--;
diff --git a/drivers/net/wireless/ath/Kconfig b/drivers/net/wireless/ath/Kconfig
index 07354883641..09602241901 100644
--- a/drivers/net/wireless/ath/Kconfig
+++ b/drivers/net/wireless/ath/Kconfig
@@ -1,6 +1,6 @@
menuconfig ATH_COMMON
tristate "Atheros Wireless Cards"
- depends on CFG80211
+ depends on CFG80211 && (!UML || BROKEN)
---help---
This will enable the support for the Atheros wireless drivers.
ath5k, ath9k, ath9k_htc and ar9170 drivers share some common code, this option
diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
index fe4bf4da255..c1d699fd571 100644
--- a/drivers/net/wireless/ath/ath.h
+++ b/drivers/net/wireless/ath/ath.h
@@ -152,6 +152,7 @@ struct ath_common {
struct ath_cycle_counters cc_survey;
struct ath_regulatory regulatory;
+ struct ath_regulatory reg_world_copy;
const struct ath_ops *ops;
const struct ath_bus_ops *bus_ops;
@@ -173,8 +174,7 @@ bool ath_hw_keyreset(struct ath_common *common, u16 entry);
void ath_hw_cycle_counters_update(struct ath_common *common);
int32_t ath_hw_get_listen_time(struct ath_common *common);
-extern __attribute__((format (printf, 2, 3)))
-void ath_printk(const char *level, const char *fmt, ...);
+extern __printf(2, 3) void ath_printk(const char *level, const char *fmt, ...);
#define _ath_printk(level, common, fmt, ...) \
do { \
@@ -215,6 +215,10 @@ do { \
* @ATH_DBG_HWTIMER: hardware timer handling
* @ATH_DBG_BTCOEX: bluetooth coexistance
* @ATH_DBG_BSTUCK: stuck beacons
+ * @ATH_DBG_MCI: Message Coexistence Interface, a private protocol
+ * used exclusively for WLAN-BT coexistence starting from
+ * AR9462.
+ * @ATH_DBG_DFS: radar datection
* @ATH_DBG_ANY: enable all debugging
*
* The debug level is used to control the amount and type of debugging output
@@ -241,6 +245,7 @@ enum ATH_DEBUG {
ATH_DBG_WMI = 0x00004000,
ATH_DBG_BSTUCK = 0x00008000,
ATH_DBG_MCI = 0x00010000,
+ ATH_DBG_DFS = 0x00020000,
ATH_DBG_ANY = 0xffffffff
};
@@ -259,7 +264,7 @@ do { \
#else
-static inline __attribute__((format (printf, 3, 4)))
+static inline __attribute__ ((format (printf, 3, 4)))
void ath_dbg(struct ath_common *common, enum ATH_DEBUG dbg_mask,
const char *fmt, ...)
{
diff --git a/drivers/net/wireless/ath/ath5k/ahb.c b/drivers/net/wireless/ath/ath5k/ahb.c
index e5be7e70181..ee7ea572b06 100644
--- a/drivers/net/wireless/ath/ath5k/ahb.c
+++ b/drivers/net/wireless/ath/ath5k/ahb.c
@@ -166,7 +166,9 @@ static int ath_ahb_probe(struct platform_device *pdev)
if (to_platform_device(ah->dev)->id == 0 &&
(bcfg->config->flags & (BD_WLAN0 | BD_WLAN1)) ==
(BD_WLAN1 | BD_WLAN0))
- __set_bit(ATH_STAT_2G_DISABLED, ah->status);
+ ah->ah_capabilities.cap_needs_2GHz_ovr = true;
+ else
+ ah->ah_capabilities.cap_needs_2GHz_ovr = false;
}
ret = ath5k_init_ah(ah, &ath_ahb_bus_ops);
diff --git a/drivers/net/wireless/ath/ath5k/ani.c b/drivers/net/wireless/ath/ath5k/ani.c
index bea90e6be70..bf674161a21 100644
--- a/drivers/net/wireless/ath/ath5k/ani.c
+++ b/drivers/net/wireless/ath/ath5k/ani.c
@@ -27,15 +27,21 @@
* or reducing sensitivity as necessary.
*
* The parameters are:
+ *
* - "noise immunity"
+ *
* - "spur immunity"
+ *
* - "firstep level"
+ *
* - "OFDM weak signal detection"
+ *
* - "CCK weak signal detection"
*
* Basically we look at the amount of ODFM and CCK timing errors we get and then
* raise or lower immunity accordingly by setting one or more of these
* parameters.
+ *
* Newer chipsets have PHY error counters in hardware which will generate a MIB
* interrupt when they overflow. Older hardware has too enable PHY error frames
* by setting a RX flag and then count every single PHY error. When a specified
@@ -45,11 +51,13 @@
*/
-/*** ANI parameter control ***/
+/***********************\
+* ANI parameter control *
+\***********************/
/**
* ath5k_ani_set_noise_immunity_level() - Set noise immunity level
- *
+ * @ah: The &struct ath5k_hw
* @level: level between 0 and @ATH5K_ANI_MAX_NOISE_IMM_LVL
*/
void
@@ -91,12 +99,11 @@ ath5k_ani_set_noise_immunity_level(struct ath5k_hw *ah, int level)
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "new level %d", level);
}
-
/**
* ath5k_ani_set_spur_immunity_level() - Set spur immunity level
- *
+ * @ah: The &struct ath5k_hw
* @level: level between 0 and @max_spur_level (the maximum level is dependent
- * on the chip revision).
+ * on the chip revision).
*/
void
ath5k_ani_set_spur_immunity_level(struct ath5k_hw *ah, int level)
@@ -117,10 +124,9 @@ ath5k_ani_set_spur_immunity_level(struct ath5k_hw *ah, int level)
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "new level %d", level);
}
-
/**
* ath5k_ani_set_firstep_level() - Set "firstep" level
- *
+ * @ah: The &struct ath5k_hw
* @level: level between 0 and @ATH5K_ANI_MAX_FIRSTEP_LVL
*/
void
@@ -140,11 +146,9 @@ ath5k_ani_set_firstep_level(struct ath5k_hw *ah, int level)
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "new level %d", level);
}
-
/**
- * ath5k_ani_set_ofdm_weak_signal_detection() - Control OFDM weak signal
- * detection
- *
+ * ath5k_ani_set_ofdm_weak_signal_detection() - Set OFDM weak signal detection
+ * @ah: The &struct ath5k_hw
* @on: turn on or off
*/
void
@@ -182,10 +186,9 @@ ath5k_ani_set_ofdm_weak_signal_detection(struct ath5k_hw *ah, bool on)
on ? "on" : "off");
}
-
/**
- * ath5k_ani_set_cck_weak_signal_detection() - control CCK weak signal detection
- *
+ * ath5k_ani_set_cck_weak_signal_detection() - Set CCK weak signal detection
+ * @ah: The &struct ath5k_hw
* @on: turn on or off
*/
void
@@ -200,13 +203,16 @@ ath5k_ani_set_cck_weak_signal_detection(struct ath5k_hw *ah, bool on)
}
-/*** ANI algorithm ***/
+/***************\
+* ANI algorithm *
+\***************/
/**
* ath5k_ani_raise_immunity() - Increase noise immunity
- *
+ * @ah: The &struct ath5k_hw
+ * @as: The &struct ath5k_ani_state
* @ofdm_trigger: If this is true we are called because of too many OFDM errors,
- * the algorithm will tune more parameters then.
+ * the algorithm will tune more parameters then.
*
* Try to raise noise immunity (=decrease sensitivity) in several steps
* depending on the average RSSI of the beacons we received.
@@ -290,9 +296,10 @@ ath5k_ani_raise_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as,
*/
}
-
/**
* ath5k_ani_lower_immunity() - Decrease noise immunity
+ * @ah: The &struct ath5k_hw
+ * @as: The &struct ath5k_ani_state
*
* Try to lower noise immunity (=increase sensitivity) in several steps
* depending on the average RSSI of the beacons we received.
@@ -352,9 +359,10 @@ ath5k_ani_lower_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as)
}
}
-
/**
* ath5k_hw_ani_get_listen_time() - Update counters and return listening time
+ * @ah: The &struct ath5k_hw
+ * @as: The &struct ath5k_ani_state
*
* Return an approximation of the time spent "listening" in milliseconds (ms)
* since the last call of this function.
@@ -379,9 +387,10 @@ ath5k_hw_ani_get_listen_time(struct ath5k_hw *ah, struct ath5k_ani_state *as)
return listen;
}
-
/**
* ath5k_ani_save_and_clear_phy_errors() - Clear and save PHY error counters
+ * @ah: The &struct ath5k_hw
+ * @as: The &struct ath5k_ani_state
*
* Clear the PHY error counters as soon as possible, since this might be called
* from a MIB interrupt and we want to make sure we don't get interrupted again.
@@ -429,14 +438,14 @@ ath5k_ani_save_and_clear_phy_errors(struct ath5k_hw *ah,
return 1;
}
-
/**
* ath5k_ani_period_restart() - Restart ANI period
+ * @as: The &struct ath5k_ani_state
*
* Just reset counters, so they are clear for the next "ani period".
*/
static void
-ath5k_ani_period_restart(struct ath5k_hw *ah, struct ath5k_ani_state *as)
+ath5k_ani_period_restart(struct ath5k_ani_state *as)
{
/* keep last values for debugging */
as->last_ofdm_errors = as->ofdm_errors;
@@ -448,9 +457,9 @@ ath5k_ani_period_restart(struct ath5k_hw *ah, struct ath5k_ani_state *as)
as->listen_time = 0;
}
-
/**
* ath5k_ani_calibration() - The main ANI calibration function
+ * @ah: The &struct ath5k_hw
*
* We count OFDM and CCK errors relative to the time where we did not send or
* receive ("listen" time) and raise or lower immunity accordingly.
@@ -492,7 +501,7 @@ ath5k_ani_calibration(struct ath5k_hw *ah)
/* too many PHY errors - we have to raise immunity */
bool ofdm_flag = as->ofdm_errors > ofdm_high ? true : false;
ath5k_ani_raise_immunity(ah, as, ofdm_flag);
- ath5k_ani_period_restart(ah, as);
+ ath5k_ani_period_restart(as);
} else if (as->listen_time > 5 * ATH5K_ANI_LISTEN_PERIOD) {
/* If more than 5 (TODO: why 5?) periods have passed and we got
@@ -504,15 +513,18 @@ ath5k_ani_calibration(struct ath5k_hw *ah)
if (as->ofdm_errors <= ofdm_low && as->cck_errors <= cck_low)
ath5k_ani_lower_immunity(ah, as);
- ath5k_ani_period_restart(ah, as);
+ ath5k_ani_period_restart(as);
}
}
-/*** INTERRUPT HANDLER ***/
+/*******************\
+* Interrupt handler *
+\*******************/
/**
* ath5k_ani_mib_intr() - Interrupt handler for ANI MIB counters
+ * @ah: The &struct ath5k_hw
*
* Just read & reset the registers quickly, so they don't generate more
* interrupts, save the counters and schedule the tasklet to decide whether
@@ -549,9 +561,11 @@ ath5k_ani_mib_intr(struct ath5k_hw *ah)
tasklet_schedule(&ah->ani_tasklet);
}
-
/**
- * ath5k_ani_phy_error_report() - Used by older HW to report PHY errors
+ * ath5k_ani_phy_error_report - Used by older HW to report PHY errors
+ *
+ * @ah: The &struct ath5k_hw
+ * @phyerr: One of enum ath5k_phy_error_code
*
* This is used by hardware without PHY error counters to report PHY errors
* on a frame-by-frame basis, instead of the interrupt.
@@ -574,10 +588,13 @@ ath5k_ani_phy_error_report(struct ath5k_hw *ah,
}
-/*** INIT ***/
+/****************\
+* Initialization *
+\****************/
/**
* ath5k_enable_phy_err_counters() - Enable PHY error counters
+ * @ah: The &struct ath5k_hw
*
* Enable PHY error counters for OFDM and CCK timing errors.
*/
@@ -596,9 +613,9 @@ ath5k_enable_phy_err_counters(struct ath5k_hw *ah)
ath5k_hw_reg_write(ah, 0, AR5K_CCK_FIL_CNT);
}
-
/**
* ath5k_disable_phy_err_counters() - Disable PHY error counters
+ * @ah: The &struct ath5k_hw
*
* Disable PHY error counters for OFDM and CCK timing errors.
*/
@@ -615,10 +632,10 @@ ath5k_disable_phy_err_counters(struct ath5k_hw *ah)
ath5k_hw_reg_write(ah, 0, AR5K_CCK_FIL_CNT);
}
-
/**
* ath5k_ani_init() - Initialize ANI
- * @mode: Which mode to use (auto, manual high, manual low, off)
+ * @ah: The &struct ath5k_hw
+ * @mode: One of enum ath5k_ani_mode
*
* Initialize ANI according to mode.
*/
@@ -695,10 +712,18 @@ ath5k_ani_init(struct ath5k_hw *ah, enum ath5k_ani_mode mode)
}
-/*** DEBUG ***/
+/**************\
+* Debug output *
+\**************/
#ifdef CONFIG_ATH5K_DEBUG
+/**
+ * ath5k_ani_print_counters() - Print ANI counters
+ * @ah: The &struct ath5k_hw
+ *
+ * Used for debugging ANI
+ */
void
ath5k_ani_print_counters(struct ath5k_hw *ah)
{
diff --git a/drivers/net/wireless/ath/ath5k/ani.h b/drivers/net/wireless/ath/ath5k/ani.h
index 7358b6c83c6..21aa355460b 100644
--- a/drivers/net/wireless/ath/ath5k/ani.h
+++ b/drivers/net/wireless/ath/ath5k/ani.h
@@ -40,13 +40,13 @@ enum ath5k_phy_error_code;
* enum ath5k_ani_mode - mode for ANI / noise sensitivity
*
* @ATH5K_ANI_MODE_OFF: Turn ANI off. This can be useful to just stop the ANI
- * algorithm after it has been on auto mode.
- * ATH5K_ANI_MODE_MANUAL_LOW: Manually set all immunity parameters to low,
- * maximizing sensitivity. ANI will not run.
- * ATH5K_ANI_MODE_MANUAL_HIGH: Manually set all immunity parameters to high,
- * minimizing sensitivity. ANI will not run.
- * ATH5K_ANI_MODE_AUTO: Automatically control immunity parameters based on the
- * amount of OFDM and CCK frame errors (default).
+ * algorithm after it has been on auto mode.
+ * @ATH5K_ANI_MODE_MANUAL_LOW: Manually set all immunity parameters to low,
+ * maximizing sensitivity. ANI will not run.
+ * @ATH5K_ANI_MODE_MANUAL_HIGH: Manually set all immunity parameters to high,
+ * minimizing sensitivity. ANI will not run.
+ * @ATH5K_ANI_MODE_AUTO: Automatically control immunity parameters based on the
+ * amount of OFDM and CCK frame errors (default).
*/
enum ath5k_ani_mode {
ATH5K_ANI_MODE_OFF = 0,
@@ -58,8 +58,22 @@ enum ath5k_ani_mode {
/**
* struct ath5k_ani_state - ANI state and associated counters
- *
- * @max_spur_level: the maximum spur level is chip dependent
+ * @ani_mode: One of enum ath5k_ani_mode
+ * @noise_imm_level: Noise immunity level
+ * @spur_level: Spur immunity level
+ * @firstep_level: FIRstep level
+ * @ofdm_weak_sig: OFDM weak signal detection state (on/off)
+ * @cck_weak_sig: CCK weak signal detection state (on/off)
+ * @max_spur_level: Max spur immunity level (chip specific)
+ * @listen_time: Listen time
+ * @ofdm_errors: OFDM timing error count
+ * @cck_errors: CCK timing error count
+ * @last_cc: The &struct ath_cycle_counters (for stats)
+ * @last_listen: Listen time from previous run (for stats)
+ * @last_ofdm_errors: OFDM timing error count from previous run (for tats)
+ * @last_cck_errors: CCK timing error count from previous run (for stats)
+ * @sum_ofdm_errors: Sum of OFDM timing errors (for stats)
+ * @sum_cck_errors: Sum of all CCK timing errors (for stats)
*/
struct ath5k_ani_state {
enum ath5k_ani_mode ani_mode;
diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h
index fecbcd9a425..e564e585b22 100644
--- a/drivers/net/wireless/ath/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath/ath5k/ath5k.h
@@ -187,10 +187,9 @@
#define AR5K_TUNE_MAX_TXPOWER 63
#define AR5K_TUNE_DEFAULT_TXPOWER 25
#define AR5K_TUNE_TPC_TXPOWER false
-#define ATH5K_TUNE_CALIBRATION_INTERVAL_FULL 10000 /* 10 sec */
+#define ATH5K_TUNE_CALIBRATION_INTERVAL_FULL 60000 /* 60 sec */
+#define ATH5K_TUNE_CALIBRATION_INTERVAL_SHORT 10000 /* 10 sec */
#define ATH5K_TUNE_CALIBRATION_INTERVAL_ANI 1000 /* 1 sec */
-#define ATH5K_TUNE_CALIBRATION_INTERVAL_NF 60000 /* 60 sec */
-
#define ATH5K_TX_COMPLETE_POLL_INT 3000 /* 3 sec */
#define AR5K_INIT_CARR_SENSE_EN 1
@@ -262,16 +261,34 @@
#define AR5K_AGC_SETTLING_TURBO 37
-/* GENERIC CHIPSET DEFINITIONS */
-/* MAC Chips */
+/*****************************\
+* GENERIC CHIPSET DEFINITIONS *
+\*****************************/
+
+/**
+ * enum ath5k_version - MAC Chips
+ * @AR5K_AR5210: AR5210 (Crete)
+ * @AR5K_AR5211: AR5211 (Oahu/Maui)
+ * @AR5K_AR5212: AR5212 (Venice) and newer
+ */
enum ath5k_version {
AR5K_AR5210 = 0,
AR5K_AR5211 = 1,
AR5K_AR5212 = 2,
};
-/* PHY Chips */
+/**
+ * enum ath5k_radio - PHY Chips
+ * @AR5K_RF5110: RF5110 (Fez)
+ * @AR5K_RF5111: RF5111 (Sombrero)
+ * @AR5K_RF5112: RF2112/5112(A) (Derby/Derby2)
+ * @AR5K_RF2413: RF2413/2414 (Griffin/Griffin-Lite)
+ * @AR5K_RF5413: RF5413/5414/5424 (Eagle/Condor)
+ * @AR5K_RF2316: RF2315/2316 (Cobra SoC)
+ * @AR5K_RF2317: RF2317 (Spider SoC)
+ * @AR5K_RF2425: RF2425/2417 (Swan/Nalla)
+ */
enum ath5k_radio {
AR5K_RF5110 = 0,
AR5K_RF5111 = 1,
@@ -303,11 +320,11 @@ enum ath5k_radio {
#define AR5K_SREV_AR5213A 0x59 /* Hainan */
#define AR5K_SREV_AR2413 0x78 /* Griffin lite */
#define AR5K_SREV_AR2414 0x70 /* Griffin */
-#define AR5K_SREV_AR2315_R6 0x86 /* AP51-Light */
-#define AR5K_SREV_AR2315_R7 0x87 /* AP51-Full */
+#define AR5K_SREV_AR2315_R6 0x86 /* AP51-Light */
+#define AR5K_SREV_AR2315_R7 0x87 /* AP51-Full */
#define AR5K_SREV_AR5424 0x90 /* Condor */
-#define AR5K_SREV_AR2317_R1 0x90 /* AP61-Light */
-#define AR5K_SREV_AR2317_R2 0x91 /* AP61-Full */
+#define AR5K_SREV_AR2317_R1 0x90 /* AP61-Light */
+#define AR5K_SREV_AR2317_R2 0x91 /* AP61-Full */
#define AR5K_SREV_AR5413 0xa4 /* Eagle lite */
#define AR5K_SREV_AR5414 0xa0 /* Eagle */
#define AR5K_SREV_AR2415 0xb0 /* Talon */
@@ -344,32 +361,40 @@ enum ath5k_radio {
/* TODO add support to mac80211 for vendor-specific rates and modes */
-/*
+/**
+ * DOC: Atheros XR
+ *
* Some of this information is based on Documentation from:
*
* http://madwifi-project.org/wiki/ChipsetFeatures/SuperAG
*
- * Modulation for Atheros' eXtended Range - range enhancing extension that is
- * supposed to double the distance an Atheros client device can keep a
- * connection with an Atheros access point. This is achieved by increasing
- * the receiver sensitivity up to, -105dBm, which is about 20dB above what
- * the 802.11 specifications demand. In addition, new (proprietary) data rates
- * are introduced: 3, 2, 1, 0.5 and 0.25 MBit/s.
+ * Atheros' eXtended Range - range enhancing extension is a modulation scheme
+ * that is supposed to double the link distance between an Atheros XR-enabled
+ * client device with an Atheros XR-enabled access point. This is achieved
+ * by increasing the receiver sensitivity up to, -105dBm, which is about 20dB
+ * above what the 802.11 specifications demand. In addition, new (proprietary)
+ * data rates are introduced: 3, 2, 1, 0.5 and 0.25 MBit/s.
*
* Please note that can you either use XR or TURBO but you cannot use both,
* they are exclusive.
*
+ * Also note that we do not plan to support XR mode at least for now. You can
+ * get a mode similar to XR by using 5MHz bwmode.
*/
-#define MODULATION_XR 0x00000200
-/*
- * Modulation for Atheros' Turbo G and Turbo A, its supposed to provide a
- * throughput transmission speed up to 40Mbit/s-60Mbit/s at a 108Mbit/s
- * signaling rate achieved through the bonding of two 54Mbit/s 802.11g
- * channels. To use this feature your Access Point must also support it.
+
+
+/**
+ * DOC: Atheros SuperAG
+ *
+ * In addition to XR we have another modulation scheme called TURBO mode
+ * that is supposed to provide a throughput transmission speed up to 40Mbit/s
+ * -60Mbit/s at a 108Mbit/s signaling rate achieved through the bonding of two
+ * 54Mbit/s 802.11g channels. To use this feature both ends must support it.
* There is also a distinction between "static" and "dynamic" turbo modes:
*
* - Static: is the dumb version: devices set to this mode stick to it until
* the mode is turned off.
+ *
* - Dynamic: is the intelligent version, the network decides itself if it
* is ok to use turbo. As soon as traffic is detected on adjacent channels
* (which would get used in turbo mode), or when a non-turbo station joins
@@ -383,24 +408,39 @@ enum ath5k_radio {
*
* http://www.pcworld.com/article/id,113428-page,1/article.html
*
- * The channel bonding seems to be driver specific though. In addition to
- * deciding what channels will be used, these "Turbo" modes are accomplished
- * by also enabling the following features:
+ * The channel bonding seems to be driver specific though.
+ *
+ * In addition to TURBO modes we also have the following features for even
+ * greater speed-up:
*
* - Bursting: allows multiple frames to be sent at once, rather than pausing
* after each frame. Bursting is a standards-compliant feature that can be
* used with any Access Point.
+ *
* - Fast frames: increases the amount of information that can be sent per
* frame, also resulting in a reduction of transmission overhead. It is a
* proprietary feature that needs to be supported by the Access Point.
+ *
* - Compression: data frames are compressed in real time using a Lempel Ziv
* algorithm. This is done transparently. Once this feature is enabled,
* compression and decompression takes place inside the chipset, without
* putting additional load on the host CPU.
*
+ * As with XR we also don't plan to support SuperAG features for now. You can
+ * get a mode similar to TURBO by using 40MHz bwmode.
*/
-#define MODULATION_TURBO 0x00000080
+
+/**
+ * enum ath5k_driver_mode - PHY operation mode
+ * @AR5K_MODE_11A: 802.11a
+ * @AR5K_MODE_11B: 802.11b
+ * @AR5K_MODE_11G: 801.11g
+ * @AR5K_MODE_MAX: Used for boundary checks
+ *
+ * Do not change the order here, we use these as
+ * array indices and it also maps EEPROM structures.
+ */
enum ath5k_driver_mode {
AR5K_MODE_11A = 0,
AR5K_MODE_11B = 1,
@@ -408,30 +448,64 @@ enum ath5k_driver_mode {
AR5K_MODE_MAX = 3
};
+/**
+ * enum ath5k_ant_mode - Antenna operation mode
+ * @AR5K_ANTMODE_DEFAULT: Default antenna setup
+ * @AR5K_ANTMODE_FIXED_A: Only antenna A is present
+ * @AR5K_ANTMODE_FIXED_B: Only antenna B is present
+ * @AR5K_ANTMODE_SINGLE_AP: STA locked on a single ap
+ * @AR5K_ANTMODE_SECTOR_AP: AP with tx antenna set on tx desc
+ * @AR5K_ANTMODE_SECTOR_STA: STA with tx antenna set on tx desc
+ * @AR5K_ANTMODE_DEBUG: Debug mode -A -> Rx, B-> Tx-
+ * @AR5K_ANTMODE_MAX: Used for boundary checks
+ *
+ * For more infos on antenna control check out phy.c
+ */
enum ath5k_ant_mode {
- AR5K_ANTMODE_DEFAULT = 0, /* default antenna setup */
- AR5K_ANTMODE_FIXED_A = 1, /* only antenna A is present */
- AR5K_ANTMODE_FIXED_B = 2, /* only antenna B is present */
- AR5K_ANTMODE_SINGLE_AP = 3, /* sta locked on a single ap */
- AR5K_ANTMODE_SECTOR_AP = 4, /* AP with tx antenna set on tx desc */
- AR5K_ANTMODE_SECTOR_STA = 5, /* STA with tx antenna set on tx desc */
- AR5K_ANTMODE_DEBUG = 6, /* Debug mode -A -> Rx, B-> Tx- */
+ AR5K_ANTMODE_DEFAULT = 0,
+ AR5K_ANTMODE_FIXED_A = 1,
+ AR5K_ANTMODE_FIXED_B = 2,
+ AR5K_ANTMODE_SINGLE_AP = 3,
+ AR5K_ANTMODE_SECTOR_AP = 4,
+ AR5K_ANTMODE_SECTOR_STA = 5,
+ AR5K_ANTMODE_DEBUG = 6,
AR5K_ANTMODE_MAX,
};
+/**
+ * enum ath5k_bw_mode - Bandwidth operation mode
+ * @AR5K_BWMODE_DEFAULT: 20MHz, default operation
+ * @AR5K_BWMODE_5MHZ: Quarter rate
+ * @AR5K_BWMODE_10MHZ: Half rate
+ * @AR5K_BWMODE_40MHZ: Turbo
+ */
enum ath5k_bw_mode {
- AR5K_BWMODE_DEFAULT = 0, /* 20MHz, default operation */
- AR5K_BWMODE_5MHZ = 1, /* Quarter rate */
- AR5K_BWMODE_10MHZ = 2, /* Half rate */
- AR5K_BWMODE_40MHZ = 3 /* Turbo */
+ AR5K_BWMODE_DEFAULT = 0,
+ AR5K_BWMODE_5MHZ = 1,
+ AR5K_BWMODE_10MHZ = 2,
+ AR5K_BWMODE_40MHZ = 3
};
+
+
/****************\
TX DEFINITIONS
\****************/
-/*
- * TX Status descriptor
+/**
+ * struct ath5k_tx_status - TX Status descriptor
+ * @ts_seqnum: Sequence number
+ * @ts_tstamp: Timestamp
+ * @ts_status: Status code
+ * @ts_final_idx: Final transmission series index
+ * @ts_final_retry: Final retry count
+ * @ts_rssi: RSSI for received ACK
+ * @ts_shortretry: Short retry count
+ * @ts_virtcol: Virtual collision count
+ * @ts_antenna: Antenna used
+ *
+ * TX status descriptor gets filled by the hw
+ * on each transmission attempt.
*/
struct ath5k_tx_status {
u16 ts_seqnum;
@@ -454,7 +528,6 @@ struct ath5k_tx_status {
* enum ath5k_tx_queue - Queue types used to classify tx queues.
* @AR5K_TX_QUEUE_INACTIVE: q is unused -- see ath5k_hw_release_tx_queue
* @AR5K_TX_QUEUE_DATA: A normal data queue
- * @AR5K_TX_QUEUE_XR_DATA: An XR-data queue
* @AR5K_TX_QUEUE_BEACON: The beacon queue
* @AR5K_TX_QUEUE_CAB: The after-beacon queue
* @AR5K_TX_QUEUE_UAPSD: Unscheduled Automatic Power Save Delivery queue
@@ -462,7 +535,6 @@ struct ath5k_tx_status {
enum ath5k_tx_queue {
AR5K_TX_QUEUE_INACTIVE = 0,
AR5K_TX_QUEUE_DATA,
- AR5K_TX_QUEUE_XR_DATA,
AR5K_TX_QUEUE_BEACON,
AR5K_TX_QUEUE_CAB,
AR5K_TX_QUEUE_UAPSD,
@@ -471,36 +543,46 @@ enum ath5k_tx_queue {
#define AR5K_NUM_TX_QUEUES 10
#define AR5K_NUM_TX_QUEUES_NOQCU 2
-/*
- * Queue syb-types to classify normal data queues.
+/**
+ * enum ath5k_tx_queue_subtype - Queue sub-types to classify normal data queues
+ * @AR5K_WME_AC_BK: Background traffic
+ * @AR5K_WME_AC_BE: Best-effort (normal) traffic
+ * @AR5K_WME_AC_VI: Video traffic
+ * @AR5K_WME_AC_VO: Voice traffic
+ *
* These are the 4 Access Categories as defined in
* WME spec. 0 is the lowest priority and 4 is the
* highest. Normal data that hasn't been classified
* goes to the Best Effort AC.
*/
enum ath5k_tx_queue_subtype {
- AR5K_WME_AC_BK = 0, /*Background traffic*/
- AR5K_WME_AC_BE, /*Best-effort (normal) traffic*/
- AR5K_WME_AC_VI, /*Video traffic*/
- AR5K_WME_AC_VO, /*Voice traffic*/
+ AR5K_WME_AC_BK = 0,
+ AR5K_WME_AC_BE,
+ AR5K_WME_AC_VI,
+ AR5K_WME_AC_VO,
};
-/*
- * Queue ID numbers as returned by the hw functions, each number
- * represents a hw queue. If hw does not support hw queues
- * (eg 5210) all data goes in one queue. These match
- * d80211 definitions (net80211/MadWiFi don't use them).
+/**
+ * enum ath5k_tx_queue_id - Queue ID numbers as returned by the hw functions
+ * @AR5K_TX_QUEUE_ID_NOQCU_DATA: Data queue on AR5210 (no QCU available)
+ * @AR5K_TX_QUEUE_ID_NOQCU_BEACON: Beacon queue on AR5210 (no QCU available)
+ * @AR5K_TX_QUEUE_ID_DATA_MIN: Data queue min index
+ * @AR5K_TX_QUEUE_ID_DATA_MAX: Data queue max index
+ * @AR5K_TX_QUEUE_ID_CAB: Content after beacon queue
+ * @AR5K_TX_QUEUE_ID_BEACON: Beacon queue
+ * @AR5K_TX_QUEUE_ID_UAPSD: Urgent Automatic Power Save Delivery,
+ *
+ * Each number represents a hw queue. If hw does not support hw queues
+ * (eg 5210) all data goes in one queue.
*/
enum ath5k_tx_queue_id {
AR5K_TX_QUEUE_ID_NOQCU_DATA = 0,
AR5K_TX_QUEUE_ID_NOQCU_BEACON = 1,
- AR5K_TX_QUEUE_ID_DATA_MIN = 0, /*IEEE80211_TX_QUEUE_DATA0*/
- AR5K_TX_QUEUE_ID_DATA_MAX = 3, /*IEEE80211_TX_QUEUE_DATA3*/
- AR5K_TX_QUEUE_ID_DATA_SVP = 5, /*IEEE80211_TX_QUEUE_SVP - Spectralink Voice Protocol*/
- AR5K_TX_QUEUE_ID_CAB = 6, /*IEEE80211_TX_QUEUE_AFTER_BEACON*/
- AR5K_TX_QUEUE_ID_BEACON = 7, /*IEEE80211_TX_QUEUE_BEACON*/
- AR5K_TX_QUEUE_ID_UAPSD = 8,
- AR5K_TX_QUEUE_ID_XR_DATA = 9,
+ AR5K_TX_QUEUE_ID_DATA_MIN = 0,
+ AR5K_TX_QUEUE_ID_DATA_MAX = 3,
+ AR5K_TX_QUEUE_ID_UAPSD = 7,
+ AR5K_TX_QUEUE_ID_CAB = 8,
+ AR5K_TX_QUEUE_ID_BEACON = 9,
};
/*
@@ -521,46 +603,70 @@ enum ath5k_tx_queue_id {
#define AR5K_TXQ_FLAG_POST_FR_BKOFF_DIS 0x1000 /* Disable backoff while bursting */
#define AR5K_TXQ_FLAG_COMPRESSION_ENABLE 0x2000 /* Enable hw compression -not implemented-*/
-/*
- * Data transmit queue state. One of these exists for each
- * hardware transmit queue. Packets sent to us from above
- * are assigned to queues based on their priority. Not all
- * devices support a complete set of hardware transmit queues.
- * For those devices the array sc_ac2q will map multiple
- * priorities to fewer hardware queues (typically all to one
- * hardware queue).
+/**
+ * struct ath5k_txq - Transmit queue state
+ * @qnum: Hardware q number
+ * @link: Link ptr in last TX desc
+ * @q: Transmit queue (&struct list_head)
+ * @lock: Lock on q and link
+ * @setup: Is the queue configured
+ * @txq_len:Number of queued buffers
+ * @txq_max: Max allowed num of queued buffers
+ * @txq_poll_mark: Used to check if queue got stuck
+ * @txq_stuck: Queue stuck counter
+ *
+ * One of these exists for each hardware transmit queue.
+ * Packets sent to us from above are assigned to queues based
+ * on their priority. Not all devices support a complete set
+ * of hardware transmit queues. For those devices the array
+ * sc_ac2q will map multiple priorities to fewer hardware queues
+ * (typically all to one hardware queue).
*/
struct ath5k_txq {
- unsigned int qnum; /* hardware q number */
- u32 *link; /* link ptr in last TX desc */
- struct list_head q; /* transmit queue */
- spinlock_t lock; /* lock on q and link */
+ unsigned int qnum;
+ u32 *link;
+ struct list_head q;
+ spinlock_t lock;
bool setup;
- int txq_len; /* number of queued buffers */
- int txq_max; /* max allowed num of queued buffers */
+ int txq_len;
+ int txq_max;
bool txq_poll_mark;
- unsigned int txq_stuck; /* informational counter */
+ unsigned int txq_stuck;
};
-/*
- * A struct to hold tx queue's parameters
+/**
+ * struct ath5k_txq_info - A struct to hold TX queue's parameters
+ * @tqi_type: One of enum ath5k_tx_queue
+ * @tqi_subtype: One of enum ath5k_tx_queue_subtype
+ * @tqi_flags: TX queue flags (see above)
+ * @tqi_aifs: Arbitrated Inter-frame Space
+ * @tqi_cw_min: Minimum Contention Window
+ * @tqi_cw_max: Maximum Contention Window
+ * @tqi_cbr_period: Constant bit rate period
+ * @tqi_ready_time: Time queue waits after an event when RDYTIME is enabled
*/
struct ath5k_txq_info {
enum ath5k_tx_queue tqi_type;
enum ath5k_tx_queue_subtype tqi_subtype;
- u16 tqi_flags; /* Tx queue flags (see above) */
- u8 tqi_aifs; /* Arbitrated Interframe Space */
- u16 tqi_cw_min; /* Minimum Contention Window */
- u16 tqi_cw_max; /* Maximum Contention Window */
- u32 tqi_cbr_period; /* Constant bit rate period */
+ u16 tqi_flags;
+ u8 tqi_aifs;
+ u16 tqi_cw_min;
+ u16 tqi_cw_max;
+ u32 tqi_cbr_period;
u32 tqi_cbr_overflow_limit;
u32 tqi_burst_time;
- u32 tqi_ready_time; /* Time queue waits after an event */
+ u32 tqi_ready_time;
};
-/*
- * Transmit packet types.
- * used on tx control descriptor
+/**
+ * enum ath5k_pkt_type - Transmit packet types
+ * @AR5K_PKT_TYPE_NORMAL: Normal data
+ * @AR5K_PKT_TYPE_ATIM: ATIM
+ * @AR5K_PKT_TYPE_PSPOLL: PS-Poll
+ * @AR5K_PKT_TYPE_BEACON: Beacon
+ * @AR5K_PKT_TYPE_PROBE_RESP: Probe response
+ * @AR5K_PKT_TYPE_PIFS: PIFS
+ * Used on tx control descriptor
*/
enum ath5k_pkt_type {
AR5K_PKT_TYPE_NORMAL = 0,
@@ -583,27 +689,23 @@ enum ath5k_pkt_type {
(ah->ah_txpower.txp_rates_power_table[(_r)] & 0x3f) << (_v) \
)
-/*
- * DMA size definitions (2^(n+2))
- */
-enum ath5k_dmasize {
- AR5K_DMASIZE_4B = 0,
- AR5K_DMASIZE_8B,
- AR5K_DMASIZE_16B,
- AR5K_DMASIZE_32B,
- AR5K_DMASIZE_64B,
- AR5K_DMASIZE_128B,
- AR5K_DMASIZE_256B,
- AR5K_DMASIZE_512B
-};
/****************\
RX DEFINITIONS
\****************/
-/*
- * RX Status descriptor
+/**
+ * struct ath5k_rx_status - RX Status descriptor
+ * @rs_datalen: Data length
+ * @rs_tstamp: Timestamp
+ * @rs_status: Status code
+ * @rs_phyerr: PHY error mask
+ * @rs_rssi: RSSI in 0.5dbm units
+ * @rs_keyix: Index to the key used for decrypting
+ * @rs_rate: Rate used to decode the frame
+ * @rs_antenna: Antenna used to receive the frame
+ * @rs_more: Indicates this is a frame fragment (Fast frames)
*/
struct ath5k_rx_status {
u16 rs_datalen;
@@ -645,10 +747,18 @@ struct ath5k_rx_status {
#define TSF_TO_TU(_tsf) (u32)((_tsf) >> 10)
+
/*******************************\
GAIN OPTIMIZATION DEFINITIONS
\*******************************/
+/**
+ * enum ath5k_rfgain - RF Gain optimization engine state
+ * @AR5K_RFGAIN_INACTIVE: Engine disabled
+ * @AR5K_RFGAIN_ACTIVE: Probe active
+ * @AR5K_RFGAIN_READ_REQUESTED: Probe requested
+ * @AR5K_RFGAIN_NEED_CHANGE: Gain_F needs change
+ */
enum ath5k_rfgain {
AR5K_RFGAIN_INACTIVE = 0,
AR5K_RFGAIN_ACTIVE,
@@ -656,6 +766,16 @@ enum ath5k_rfgain {
AR5K_RFGAIN_NEED_CHANGE,
};
+/**
+ * struct ath5k_gain - RF Gain optimization engine state data
+ * @g_step_idx: Current step index
+ * @g_current: Current gain
+ * @g_target: Target gain
+ * @g_low: Low gain boundary
+ * @g_high: High gain boundary
+ * @g_f_corr: Gain_F correction
+ * @g_state: One of enum ath5k_rfgain
+ */
struct ath5k_gain {
u8 g_step_idx;
u8 g_current;
@@ -666,6 +786,8 @@ struct ath5k_gain {
u8 g_state;
};
+
+
/********************\
COMMON DEFINITIONS
\********************/
@@ -674,9 +796,14 @@ struct ath5k_gain {
#define AR5K_SLOT_TIME_20 880
#define AR5K_SLOT_TIME_MAX 0xffff
-/*
- * The following structure is used to map 2GHz channels to
- * 5GHz Atheros channels.
+/**
+ * struct ath5k_athchan_2ghz - 2GHz to 5GHZ map for RF5111
+ * @a2_flags: Channel flags (internal)
+ * @a2_athchan: HW channel number (internal)
+ *
+ * This structure is used to map 2GHz channels to
+ * 5GHz Atheros channels on 2111 frequency converter
+ * that comes together with RF5111
* TODO: Clean up
*/
struct ath5k_athchan_2ghz {
@@ -684,36 +811,80 @@ struct ath5k_athchan_2ghz {
u16 a2_athchan;
};
+/**
+ * enum ath5k_dmasize - DMA size definitions (2^(n+2))
+ * @AR5K_DMASIZE_4B: 4Bytes
+ * @AR5K_DMASIZE_8B: 8Bytes
+ * @AR5K_DMASIZE_16B: 16Bytes
+ * @AR5K_DMASIZE_32B: 32Bytes
+ * @AR5K_DMASIZE_64B: 64Bytes (Default)
+ * @AR5K_DMASIZE_128B: 128Bytes
+ * @AR5K_DMASIZE_256B: 256Bytes
+ * @AR5K_DMASIZE_512B: 512Bytes
+ *
+ * These are used to set DMA burst size on hw
+ *
+ * Note: Some platforms can't handle more than 4Bytes
+ * be careful on embedded boards.
+ */
+enum ath5k_dmasize {
+ AR5K_DMASIZE_4B = 0,
+ AR5K_DMASIZE_8B,
+ AR5K_DMASIZE_16B,
+ AR5K_DMASIZE_32B,
+ AR5K_DMASIZE_64B,
+ AR5K_DMASIZE_128B,
+ AR5K_DMASIZE_256B,
+ AR5K_DMASIZE_512B
+};
+
+
/******************\
RATE DEFINITIONS
\******************/
/**
+ * DOC: Rate codes
+ *
* Seems the ar5xxx hardware supports up to 32 rates, indexed by 1-32.
*
* The rate code is used to get the RX rate or set the TX rate on the
* hardware descriptors. It is also used for internal modulation control
* and settings.
*
- * This is the hardware rate map we are aware of:
- *
- * rate_code 0x01 0x02 0x03 0x04 0x05 0x06 0x07 0x08
- * rate_kbps 3000 1000 ? ? ? 2000 500 48000
- *
- * rate_code 0x09 0x0A 0x0B 0x0C 0x0D 0x0E 0x0F 0x10
- * rate_kbps 24000 12000 6000 54000 36000 18000 9000 ?
+ * This is the hardware rate map we are aware of (html unfriendly):
*
- * rate_code 17 18 19 20 21 22 23 24
- * rate_kbps ? ? ? ? ? ? ? 11000
+ * Rate code Rate (Kbps)
+ * --------- -----------
+ * 0x01 3000 (XR)
+ * 0x02 1000 (XR)
+ * 0x03 250 (XR)
+ * 0x04 - 05 -Reserved-
+ * 0x06 2000 (XR)
+ * 0x07 500 (XR)
+ * 0x08 48000 (OFDM)
+ * 0x09 24000 (OFDM)
+ * 0x0A 12000 (OFDM)
+ * 0x0B 6000 (OFDM)
+ * 0x0C 54000 (OFDM)
+ * 0x0D 36000 (OFDM)
+ * 0x0E 18000 (OFDM)
+ * 0x0F 9000 (OFDM)
+ * 0x10 - 17 -Reserved-
+ * 0x18 11000L (CCK)
+ * 0x19 5500L (CCK)
+ * 0x1A 2000L (CCK)
+ * 0x1B 1000L (CCK)
+ * 0x1C 11000S (CCK)
+ * 0x1D 5500S (CCK)
+ * 0x1E 2000S (CCK)
+ * 0x1F -Reserved-
*
- * rate_code 25 26 27 28 29 30 31 32
- * rate_kbps 5500 2000 1000 11000S 5500S 2000S ? ?
- *
- * "S" indicates CCK rates with short preamble.
+ * "S" indicates CCK rates with short preamble and "L" with long preamble.
*
* AR5211 has different rate codes for CCK (802.11B) rates. It only uses the
- * lowest 4 bits, so they are the same as below with a 0xF mask.
+ * lowest 4 bits, so they are the same as above with a 0xF mask.
* (0xB, 0xA, 0x9 and 0x8 for 1M, 2M, 5.5M and 11M).
* We handle this in ath5k_setup_bands().
*/
@@ -733,13 +904,9 @@ struct ath5k_athchan_2ghz {
#define ATH5K_RATE_CODE_36M 0x0D
#define ATH5K_RATE_CODE_48M 0x08
#define ATH5K_RATE_CODE_54M 0x0C
-/* XR */
-#define ATH5K_RATE_CODE_XR_500K 0x07
-#define ATH5K_RATE_CODE_XR_1M 0x02
-#define ATH5K_RATE_CODE_XR_2M 0x06
-#define ATH5K_RATE_CODE_XR_3M 0x01
-/* adding this flag to rate_code enables short preamble */
+/* Adding this flag to rate_code on B rates
+ * enables short preamble */
#define AR5K_SET_SHORT_PREAMBLE 0x04
/*
@@ -769,49 +936,65 @@ extern int ath5k_modparam_nohwcrypt;
/**
* enum ath5k_int - Hardware interrupt masks helpers
+ * @AR5K_INT_RXOK: Frame successfully received
+ * @AR5K_INT_RXDESC: Request RX descriptor/Read RX descriptor
+ * @AR5K_INT_RXERR: Frame reception failed
+ * @AR5K_INT_RXNOFRM: No frame received within a specified time period
+ * @AR5K_INT_RXEOL: Reached "End Of List", means we need more RX descriptors
+ * @AR5K_INT_RXORN: Indicates we got RX FIFO overrun. Note that Rx overrun is
+ * not always fatal, on some chips we can continue operation
+ * without resetting the card, that's why %AR5K_INT_FATAL is not
+ * common for all chips.
+ * @AR5K_INT_RX_ALL: Mask to identify all RX related interrupts
+ *
+ * @AR5K_INT_TXOK: Frame transmission success
+ * @AR5K_INT_TXDESC: Request TX descriptor/Read TX status descriptor
+ * @AR5K_INT_TXERR: Frame transmission failure
+ * @AR5K_INT_TXEOL: Received End Of List for VEOL (Virtual End Of List). The
+ * Queue Control Unit (QCU) signals an EOL interrupt only if a
+ * descriptor's LinkPtr is NULL. For more details, refer to:
+ * "http://www.freepatentsonline.com/20030225739.html"
+ * @AR5K_INT_TXNOFRM: No frame was transmitted within a specified time period
+ * @AR5K_INT_TXURN: Indicates we got TX FIFO underrun. In such case we should
+ * increase the TX trigger threshold.
+ * @AR5K_INT_TX_ALL: Mask to identify all TX related interrupts
*
- * @AR5K_INT_RX: mask to identify received frame interrupts, of type
- * AR5K_ISR_RXOK or AR5K_ISR_RXERR
- * @AR5K_INT_RXDESC: Request RX descriptor/Read RX descriptor (?)
- * @AR5K_INT_RXNOFRM: No frame received (?)
- * @AR5K_INT_RXEOL: received End Of List for VEOL (Virtual End Of List). The
- * Queue Control Unit (QCU) signals an EOL interrupt only if a descriptor's
- * LinkPtr is NULL. For more details, refer to:
- * http://www.freepatentsonline.com/20030225739.html
- * @AR5K_INT_RXORN: Indicates we got RX overrun (eg. no more descriptors).
- * Note that Rx overrun is not always fatal, on some chips we can continue
- * operation without resetting the card, that's why int_fatal is not
- * common for all chips.
- * @AR5K_INT_TX: mask to identify received frame interrupts, of type
- * AR5K_ISR_TXOK or AR5K_ISR_TXERR
- * @AR5K_INT_TXDESC: Request TX descriptor/Read TX status descriptor (?)
- * @AR5K_INT_TXURN: received when we should increase the TX trigger threshold
- * We currently do increments on interrupt by
- * (AR5K_TUNE_MAX_TX_FIFO_THRES - current_trigger_level) / 2
* @AR5K_INT_MIB: Indicates the either Management Information Base counters or
- * one of the PHY error counters reached the maximum value and should be
- * read and cleared.
+ * one of the PHY error counters reached the maximum value and
+ * should be read and cleared.
+ * @AR5K_INT_SWI: Software triggered interrupt.
* @AR5K_INT_RXPHY: RX PHY Error
* @AR5K_INT_RXKCM: RX Key cache miss
* @AR5K_INT_SWBA: SoftWare Beacon Alert - indicates its time to send a
- * beacon that must be handled in software. The alternative is if you
- * have VEOL support, in that case you let the hardware deal with things.
+ * beacon that must be handled in software. The alternative is if
+ * you have VEOL support, in that case you let the hardware deal
+ * with things.
+ * @AR5K_INT_BRSSI: Beacon received with an RSSI value below our threshold
* @AR5K_INT_BMISS: If in STA mode this indicates we have stopped seeing
- * beacons from the AP have associated with, we should probably try to
- * reassociate. When in IBSS mode this might mean we have not received
- * any beacons from any local stations. Note that every station in an
- * IBSS schedules to send beacons at the Target Beacon Transmission Time
- * (TBTT) with a random backoff.
- * @AR5K_INT_BNR: Beacon Not Ready interrupt - ??
- * @AR5K_INT_GPIO: GPIO interrupt is used for RF Kill, disabled for now
- * until properly handled
- * @AR5K_INT_FATAL: Fatal errors were encountered, typically caused by DMA
- * errors. These types of errors we can enable seem to be of type
- * AR5K_SIMR2_MCABT, AR5K_SIMR2_SSERR and AR5K_SIMR2_DPERR.
+ * beacons from the AP have associated with, we should probably
+ * try to reassociate. When in IBSS mode this might mean we have
+ * not received any beacons from any local stations. Note that
+ * every station in an IBSS schedules to send beacons at the
+ * Target Beacon Transmission Time (TBTT) with a random backoff.
+ * @AR5K_INT_BNR: Beacon queue got triggered (DMA beacon alert) while empty.
+ * @AR5K_INT_TIM: Beacon with local station's TIM bit set
+ * @AR5K_INT_DTIM: Beacon with DTIM bit and zero DTIM count received
+ * @AR5K_INT_DTIM_SYNC: DTIM sync lost
+ * @AR5K_INT_GPIO: GPIO interrupt is used for RF Kill switches connected to
+ * our GPIO pins.
+ * @AR5K_INT_BCN_TIMEOUT: Beacon timeout, we waited after TBTT but got noting
+ * @AR5K_INT_CAB_TIMEOUT: We waited for CAB traffic after the beacon but got
+ * nothing or an incomplete CAB frame sequence.
+ * @AR5K_INT_QCBRORN: A queue got it's CBR counter expired
+ * @AR5K_INT_QCBRURN: A queue got triggered wile empty
+ * @AR5K_INT_QTRIG: A queue got triggered
+ *
+ * @AR5K_INT_FATAL: Fatal errors were encountered, typically caused by bus/DMA
+ * errors. Indicates we need to reset the card.
* @AR5K_INT_GLOBAL: Used to clear and set the IER
- * @AR5K_INT_NOCARD: signals the card has been removed
- * @AR5K_INT_COMMON: common interrupts shared among MACs with the same
- * bit value
+ * @AR5K_INT_NOCARD: Signals the card has been removed
+ * @AR5K_INT_COMMON: Common interrupts shared among MACs with the same
+ * bit value
*
* These are mapped to take advantage of some common bits
* between the MACs, to be able to set intr properties
@@ -847,15 +1030,15 @@ enum ath5k_int {
AR5K_INT_GPIO = 0x01000000,
AR5K_INT_BCN_TIMEOUT = 0x02000000, /* Non common */
AR5K_INT_CAB_TIMEOUT = 0x04000000, /* Non common */
- AR5K_INT_RX_DOPPLER = 0x08000000, /* Non common */
- AR5K_INT_QCBRORN = 0x10000000, /* Non common */
- AR5K_INT_QCBRURN = 0x20000000, /* Non common */
- AR5K_INT_QTRIG = 0x40000000, /* Non common */
+ AR5K_INT_QCBRORN = 0x08000000, /* Non common */
+ AR5K_INT_QCBRURN = 0x10000000, /* Non common */
+ AR5K_INT_QTRIG = 0x20000000, /* Non common */
AR5K_INT_GLOBAL = 0x80000000,
AR5K_INT_TX_ALL = AR5K_INT_TXOK
| AR5K_INT_TXDESC
| AR5K_INT_TXERR
+ | AR5K_INT_TXNOFRM
| AR5K_INT_TXEOL
| AR5K_INT_TXURN,
@@ -891,15 +1074,32 @@ enum ath5k_int {
AR5K_INT_NOCARD = 0xffffffff
};
-/* mask which calibration is active at the moment */
+/**
+ * enum ath5k_calibration_mask - Mask which calibration is active at the moment
+ * @AR5K_CALIBRATION_FULL: Full calibration (AGC + SHORT)
+ * @AR5K_CALIBRATION_SHORT: Short calibration (NF + I/Q)
+ * @AR5K_CALIBRATION_NF: Noise Floor calibration
+ * @AR5K_CALIBRATION_ANI: Adaptive Noise Immunity
+ */
enum ath5k_calibration_mask {
AR5K_CALIBRATION_FULL = 0x01,
AR5K_CALIBRATION_SHORT = 0x02,
- AR5K_CALIBRATION_ANI = 0x04,
+ AR5K_CALIBRATION_NF = 0x04,
+ AR5K_CALIBRATION_ANI = 0x08,
};
-/*
- * Power management
+/**
+ * enum ath5k_power_mode - Power management modes
+ * @AR5K_PM_UNDEFINED: Undefined
+ * @AR5K_PM_AUTO: Allow card to sleep if possible
+ * @AR5K_PM_AWAKE: Force card to wake up
+ * @AR5K_PM_FULL_SLEEP: Force card to full sleep (DANGEROUS)
+ * @AR5K_PM_NETWORK_SLEEP: Allow to sleep for a specified duration
+ *
+ * Currently only PM_AWAKE is used, FULL_SLEEP and NETWORK_SLEEP/AUTO
+ * are also known to have problems on some cards. This is not a big
+ * problem though because we can have almost the same effect as
+ * FULL_SLEEP by putting card on warm reset (it's almost powered down).
*/
enum ath5k_power_mode {
AR5K_PM_UNDEFINED = 0,
@@ -957,6 +1157,8 @@ struct ath5k_capabilities {
} cap_queues;
bool cap_has_phyerr_counters;
+ bool cap_has_mrr_support;
+ bool cap_needs_2GHz_ovr;
};
/* size of noise floor history (keep it a power of two) */
@@ -1072,13 +1274,11 @@ struct ath5k_hw {
dma_addr_t desc_daddr; /* DMA (physical) address */
size_t desc_len; /* size of TX/RX descriptors */
- DECLARE_BITMAP(status, 6);
+ DECLARE_BITMAP(status, 4);
#define ATH_STAT_INVALID 0 /* disable hardware accesses */
-#define ATH_STAT_MRRETRY 1 /* multi-rate retry support */
-#define ATH_STAT_PROMISC 2
-#define ATH_STAT_LEDSOFT 3 /* enable LED gpio status */
-#define ATH_STAT_STARTED 4 /* opened & irqs enabled */
-#define ATH_STAT_2G_DISABLED 5 /* multiband radio without 2G */
+#define ATH_STAT_PROMISC 1
+#define ATH_STAT_LEDSOFT 2 /* enable LED gpio status */
+#define ATH_STAT_STARTED 3 /* opened & irqs enabled */
unsigned int filter_flags; /* HW flags, AR5K_RX_FILTER_* */
struct ieee80211_channel *curchan; /* current h/w channel */
@@ -1097,6 +1297,7 @@ struct ath5k_hw {
led_on; /* pin setting for LED on */
struct work_struct reset_work; /* deferred chip reset */
+ struct work_struct calib_work; /* deferred phy calibration */
struct list_head rxbuf; /* receive buffer */
spinlock_t rxbuflock;
@@ -1113,8 +1314,6 @@ struct ath5k_hw {
struct ath5k_rfkill rf_kill;
- struct tasklet_struct calib; /* calibration tasklet */
-
spinlock_t block; /* protects beacon */
struct tasklet_struct beacontq; /* beacon intr tasklet */
struct list_head bcbuf; /* beacon buffer */
@@ -1144,7 +1343,7 @@ struct ath5k_hw {
enum ath5k_int ah_imr;
struct ieee80211_channel *ah_current_channel;
- bool ah_calibration;
+ bool ah_iq_cal_needed;
bool ah_single_chip;
enum ath5k_version ah_version;
@@ -1187,7 +1386,13 @@ struct ath5k_hw {
u32 ah_txq_imr_cbrurn;
u32 ah_txq_imr_qtrig;
u32 ah_txq_imr_nofrm;
- u32 ah_txq_isr;
+
+ u32 ah_txq_isr_txok_all;
+ u32 ah_txq_isr_txurn;
+ u32 ah_txq_isr_qcborn;
+ u32 ah_txq_isr_qcburn;
+ u32 ah_txq_isr_qtrig;
+
u32 *ah_rf_banks;
size_t ah_rf_banks_size;
size_t ah_rf_regs_count;
@@ -1228,8 +1433,8 @@ struct ath5k_hw {
/* Calibration timestamp */
unsigned long ah_cal_next_full;
+ unsigned long ah_cal_next_short;
unsigned long ah_cal_next_ani;
- unsigned long ah_cal_next_nf;
/* Calibration mask */
u8 ah_cal_mask;
@@ -1338,11 +1543,11 @@ void ath5k_hw_stop_rx_pcu(struct ath5k_hw *ah);
u64 ath5k_hw_get_tsf64(struct ath5k_hw *ah);
void ath5k_hw_set_tsf64(struct ath5k_hw *ah, u64 tsf64);
void ath5k_hw_reset_tsf(struct ath5k_hw *ah);
-void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval);
+void ath5k_hw_init_beacon_timers(struct ath5k_hw *ah, u32 next_beacon,
+ u32 interval);
bool ath5k_hw_check_beacon_timers(struct ath5k_hw *ah, int intval);
/* Init function */
-void ath5k_hw_pcu_init(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
- u8 mode);
+void ath5k_hw_pcu_init(struct ath5k_hw *ah, enum nl80211_iftype op_mode);
/* Queue Control Unit, DFS Control Unit Functions */
int ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue,
diff --git a/drivers/net/wireless/ath/ath5k/attach.c b/drivers/net/wireless/ath/ath5k/attach.c
index 91627dd2c26..d7114c75fe9 100644
--- a/drivers/net/wireless/ath/ath5k/attach.c
+++ b/drivers/net/wireless/ath/ath5k/attach.c
@@ -27,8 +27,7 @@
#include "debug.h"
/**
- * ath5k_hw_post - Power On Self Test helper function
- *
+ * ath5k_hw_post() - Power On Self Test helper function
* @ah: The &struct ath5k_hw
*/
static int ath5k_hw_post(struct ath5k_hw *ah)
@@ -92,8 +91,7 @@ static int ath5k_hw_post(struct ath5k_hw *ah)
}
/**
- * ath5k_hw_init - Check if hw is supported and init the needed structs
- *
+ * ath5k_hw_init() - Check if hw is supported and init the needed structs
* @ah: The &struct ath5k_hw associated with the device
*
* Check if the device is supported, perform a POST and initialize the needed
@@ -298,7 +296,7 @@ int ath5k_hw_init(struct ath5k_hw *ah)
/* Reset SERDES to load new settings */
ath5k_hw_reg_write(ah, 0x00000000, AR5K_PCIE_SERDES_RESET);
- mdelay(1);
+ usleep_range(1000, 1500);
}
/* Get misc capabilities */
@@ -308,11 +306,6 @@ int ath5k_hw_init(struct ath5k_hw *ah)
goto err;
}
- if (test_bit(ATH_STAT_2G_DISABLED, ah->status)) {
- __clear_bit(AR5K_MODE_11B, ah->ah_capabilities.cap_mode);
- __clear_bit(AR5K_MODE_11G, ah->ah_capabilities.cap_mode);
- }
-
/* Crypto settings */
common->keymax = (ah->ah_version == AR5K_AR5210 ?
AR5K_KEYTABLE_SIZE_5210 : AR5K_KEYTABLE_SIZE_5211);
@@ -349,8 +342,7 @@ err:
}
/**
- * ath5k_hw_deinit - Free the ath5k_hw struct
- *
+ * ath5k_hw_deinit() - Free the &struct ath5k_hw
* @ah: The &struct ath5k_hw
*/
void ath5k_hw_deinit(struct ath5k_hw *ah)
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index b346d049200..178a4dd1031 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -80,6 +80,11 @@ static int modparam_fastchanswitch;
module_param_named(fastchanswitch, modparam_fastchanswitch, bool, S_IRUGO);
MODULE_PARM_DESC(fastchanswitch, "Enable fast channel switching for AR2413/AR5413 radios.");
+static int ath5k_modparam_no_hw_rfkill_switch;
+module_param_named(no_hw_rfkill_switch, ath5k_modparam_no_hw_rfkill_switch,
+ bool, S_IRUGO);
+MODULE_PARM_DESC(no_hw_rfkill_switch, "Ignore the GPIO RFKill switch state");
+
/* Module info */
MODULE_AUTHOR("Jiri Slaby");
@@ -183,7 +188,6 @@ static const struct ieee80211_rate ath5k_rates[] = {
{ .bitrate = 540,
.hw_value = ATH5K_RATE_CODE_54M,
.flags = 0 },
- /* XR missing */
};
static inline u64 ath5k_extend_tsf(struct ath5k_hw *ah, u32 rstamp)
@@ -721,21 +725,24 @@ ath5k_txbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf,
if (ret)
goto err_unmap;
- memset(mrr_rate, 0, sizeof(mrr_rate));
- memset(mrr_tries, 0, sizeof(mrr_tries));
- for (i = 0; i < 3; i++) {
- rate = ieee80211_get_alt_retry_rate(ah->hw, info, i);
- if (!rate)
- break;
+ /* Set up MRR descriptor */
+ if (ah->ah_capabilities.cap_has_mrr_support) {
+ memset(mrr_rate, 0, sizeof(mrr_rate));
+ memset(mrr_tries, 0, sizeof(mrr_tries));
+ for (i = 0; i < 3; i++) {
+ rate = ieee80211_get_alt_retry_rate(ah->hw, info, i);
+ if (!rate)
+ break;
- mrr_rate[i] = rate->hw_value;
- mrr_tries[i] = info->control.rates[i + 1].count;
- }
+ mrr_rate[i] = rate->hw_value;
+ mrr_tries[i] = info->control.rates[i + 1].count;
+ }
- ath5k_hw_setup_mrr_tx_desc(ah, ds,
- mrr_rate[0], mrr_tries[0],
- mrr_rate[1], mrr_tries[1],
- mrr_rate[2], mrr_tries[2]);
+ ath5k_hw_setup_mrr_tx_desc(ah, ds,
+ mrr_rate[0], mrr_tries[0],
+ mrr_rate[1], mrr_tries[1],
+ mrr_rate[2], mrr_tries[2]);
+ }
ds->ds_link = 0;
ds->ds_data = bf->skbaddr;
@@ -1689,7 +1696,7 @@ ath5k_tasklet_tx(unsigned long data)
struct ath5k_hw *ah = (void *)data;
for (i = 0; i < AR5K_NUM_TX_QUEUES; i++)
- if (ah->txqs[i].setup && (ah->ah_txq_isr & BIT(i)))
+ if (ah->txqs[i].setup && (ah->ah_txq_isr_txok_all & BIT(i)))
ath5k_tx_processq(ah, &ah->txqs[i]);
ah->tx_pending = false;
@@ -2005,7 +2012,7 @@ ath5k_beacon_update_timers(struct ath5k_hw *ah, u64 bc_tsf)
ah->nexttbtt = nexttbtt;
intval |= AR5K_BEACON_ENA;
- ath5k_hw_init_beacon(ah, nexttbtt, intval);
+ ath5k_hw_init_beacon_timers(ah, nexttbtt, intval);
/*
* debugging output last in order to preserve the time critical aspect
@@ -2112,16 +2119,29 @@ static void
ath5k_intr_calibration_poll(struct ath5k_hw *ah)
{
if (time_is_before_eq_jiffies(ah->ah_cal_next_ani) &&
- !(ah->ah_cal_mask & AR5K_CALIBRATION_FULL)) {
- /* run ANI only when full calibration is not active */
+ !(ah->ah_cal_mask & AR5K_CALIBRATION_FULL) &&
+ !(ah->ah_cal_mask & AR5K_CALIBRATION_SHORT)) {
+
+ /* Run ANI only when calibration is not active */
+
ah->ah_cal_next_ani = jiffies +
msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_ANI);
tasklet_schedule(&ah->ani_tasklet);
- } else if (time_is_before_eq_jiffies(ah->ah_cal_next_full)) {
- ah->ah_cal_next_full = jiffies +
- msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_FULL);
- tasklet_schedule(&ah->calib);
+ } else if (time_is_before_eq_jiffies(ah->ah_cal_next_short) &&
+ !(ah->ah_cal_mask & AR5K_CALIBRATION_FULL) &&
+ !(ah->ah_cal_mask & AR5K_CALIBRATION_SHORT)) {
+
+ /* Run calibration only when another calibration
+ * is not running.
+ *
+ * Note: This is for both full/short calibration,
+ * if it's time for a full one, ath5k_calibrate_work will deal
+ * with it. */
+
+ ah->ah_cal_next_short = jiffies +
+ msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_SHORT);
+ ieee80211_queue_work(ah->hw, &ah->calib_work);
}
/* we could use SWI to generate enough interrupts to meet our
* calibration interval requirements, if necessary:
@@ -2149,69 +2169,110 @@ ath5k_intr(int irq, void *dev_id)
enum ath5k_int status;
unsigned int counter = 1000;
+
+ /*
+ * If hw is not ready (or detached) and we get an
+ * interrupt, or if we have no interrupts pending
+ * (that means it's not for us) skip it.
+ *
+ * NOTE: Group 0/1 PCI interface registers are not
+ * supported on WiSOCs, so we can't check for pending
+ * interrupts (ISR belongs to another register group
+ * so we are ok).
+ */
if (unlikely(test_bit(ATH_STAT_INVALID, ah->status) ||
- ((ath5k_get_bus_type(ah) != ATH_AHB) &&
- !ath5k_hw_is_intr_pending(ah))))
+ ((ath5k_get_bus_type(ah) != ATH_AHB) &&
+ !ath5k_hw_is_intr_pending(ah))))
return IRQ_NONE;
+ /** Main loop **/
do {
- ath5k_hw_get_isr(ah, &status); /* NB: clears IRQ too */
+ ath5k_hw_get_isr(ah, &status); /* NB: clears IRQ too */
+
ATH5K_DBG(ah, ATH5K_DEBUG_INTR, "status 0x%x/0x%x\n",
status, ah->imask);
+
+ /*
+ * Fatal hw error -> Log and reset
+ *
+ * Fatal errors are unrecoverable so we have to
+ * reset the card. These errors include bus and
+ * dma errors.
+ */
if (unlikely(status & AR5K_INT_FATAL)) {
- /*
- * Fatal errors are unrecoverable.
- * Typically these are caused by DMA errors.
- */
+
ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
"fatal int, resetting\n");
ieee80211_queue_work(ah->hw, &ah->reset_work);
+
+ /*
+ * RX Overrun -> Count and reset if needed
+ *
+ * Receive buffers are full. Either the bus is busy or
+ * the CPU is not fast enough to process all received
+ * frames.
+ */
} else if (unlikely(status & AR5K_INT_RXORN)) {
+
/*
- * Receive buffers are full. Either the bus is busy or
- * the CPU is not fast enough to process all received
- * frames.
* Older chipsets need a reset to come out of this
* condition, but we treat it as RX for newer chips.
- * We don't know exactly which versions need a reset -
+ * We don't know exactly which versions need a reset
* this guess is copied from the HAL.
*/
ah->stats.rxorn_intr++;
+
if (ah->ah_mac_srev < AR5K_SREV_AR5212) {
ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
"rx overrun, resetting\n");
ieee80211_queue_work(ah->hw, &ah->reset_work);
} else
ath5k_schedule_rx(ah);
+
} else {
+
+ /* Software Beacon Alert -> Schedule beacon tasklet */
if (status & AR5K_INT_SWBA)
tasklet_hi_schedule(&ah->beacontq);
- if (status & AR5K_INT_RXEOL) {
- /*
- * NB: the hardware should re-read the link when
- * RXE bit is written, but it doesn't work at
- * least on older hardware revs.
- */
+ /*
+ * No more RX descriptors -> Just count
+ *
+ * NB: the hardware should re-read the link when
+ * RXE bit is written, but it doesn't work at
+ * least on older hardware revs.
+ */
+ if (status & AR5K_INT_RXEOL)
ah->stats.rxeol_intr++;
- }
- if (status & AR5K_INT_TXURN) {
- /* bump tx trigger level */
+
+
+ /* TX Underrun -> Bump tx trigger level */
+ if (status & AR5K_INT_TXURN)
ath5k_hw_update_tx_triglevel(ah, true);
- }
+
+ /* RX -> Schedule rx tasklet */
if (status & (AR5K_INT_RXOK | AR5K_INT_RXERR))
ath5k_schedule_rx(ah);
- if (status & (AR5K_INT_TXOK | AR5K_INT_TXDESC
- | AR5K_INT_TXERR | AR5K_INT_TXEOL))
+
+ /* TX -> Schedule tx tasklet */
+ if (status & (AR5K_INT_TXOK
+ | AR5K_INT_TXDESC
+ | AR5K_INT_TXERR
+ | AR5K_INT_TXEOL))
ath5k_schedule_tx(ah);
- if (status & AR5K_INT_BMISS) {
- /* TODO */
- }
+
+ /* Missed beacon -> TODO
+ if (status & AR5K_INT_BMISS)
+ */
+
+ /* MIB event -> Update counters and notify ANI */
if (status & AR5K_INT_MIB) {
ah->stats.mib_intr++;
ath5k_hw_update_mib_counters(ah);
ath5k_ani_mib_intr(ah);
}
+
+ /* GPIO -> Notify RFKill layer */
if (status & AR5K_INT_GPIO)
tasklet_schedule(&ah->rf_kill.toggleq);
@@ -2222,12 +2283,19 @@ ath5k_intr(int irq, void *dev_id)
} while (ath5k_hw_is_intr_pending(ah) && --counter > 0);
+ /*
+ * Until we handle rx/tx interrupts mask them on IMR
+ *
+ * NOTE: ah->(rx/tx)_pending are set when scheduling the tasklets
+ * and unset after we 've handled the interrupts.
+ */
if (ah->rx_pending || ah->tx_pending)
ath5k_set_current_imask(ah);
if (unlikely(!counter))
ATH5K_WARN(ah, "too many interrupts, giving up for now\n");
+ /* Fire up calibration poll */
ath5k_intr_calibration_poll(ah);
return IRQ_HANDLED;
@@ -2238,41 +2306,58 @@ ath5k_intr(int irq, void *dev_id)
* for temperature/environment changes.
*/
static void
-ath5k_tasklet_calibrate(unsigned long data)
+ath5k_calibrate_work(struct work_struct *work)
{
- struct ath5k_hw *ah = (void *)data;
+ struct ath5k_hw *ah = container_of(work, struct ath5k_hw,
+ calib_work);
+
+ /* Should we run a full calibration ? */
+ if (time_is_before_eq_jiffies(ah->ah_cal_next_full)) {
+
+ ah->ah_cal_next_full = jiffies +
+ msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_FULL);
+ ah->ah_cal_mask |= AR5K_CALIBRATION_FULL;
+
+ ATH5K_DBG(ah, ATH5K_DEBUG_CALIBRATE,
+ "running full calibration\n");
+
+ if (ath5k_hw_gainf_calibrate(ah) == AR5K_RFGAIN_NEED_CHANGE) {
+ /*
+ * Rfgain is out of bounds, reset the chip
+ * to load new gain values.
+ */
+ ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
+ "got new rfgain, resetting\n");
+ ieee80211_queue_work(ah->hw, &ah->reset_work);
+ }
+
+ /* TODO: On full calibration we should stop TX here,
+ * so that it doesn't interfere (mostly due to gain_f
+ * calibration that messes with tx packets -see phy.c).
+ *
+ * NOTE: Stopping the queues from above is not enough
+ * to stop TX but saves us from disconecting (at least
+ * we don't lose packets). */
+ ieee80211_stop_queues(ah->hw);
+ } else
+ ah->ah_cal_mask |= AR5K_CALIBRATION_SHORT;
- /* Only full calibration for now */
- ah->ah_cal_mask |= AR5K_CALIBRATION_FULL;
ATH5K_DBG(ah, ATH5K_DEBUG_CALIBRATE, "channel %u/%x\n",
ieee80211_frequency_to_channel(ah->curchan->center_freq),
ah->curchan->hw_value);
- if (ath5k_hw_gainf_calibrate(ah) == AR5K_RFGAIN_NEED_CHANGE) {
- /*
- * Rfgain is out of bounds, reset the chip
- * to load new gain values.
- */
- ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "calibration, resetting\n");
- ieee80211_queue_work(ah->hw, &ah->reset_work);
- }
if (ath5k_hw_phy_calibrate(ah, ah->curchan))
ATH5K_ERR(ah, "calibration of channel %u failed\n",
ieee80211_frequency_to_channel(
ah->curchan->center_freq));
- /* Noise floor calibration interrupts rx/tx path while I/Q calibration
- * doesn't.
- * TODO: We should stop TX here, so that it doesn't interfere.
- * Note that stopping the queues is not enough to stop TX! */
- if (time_is_before_eq_jiffies(ah->ah_cal_next_nf)) {
- ah->ah_cal_next_nf = jiffies +
- msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_NF);
- ath5k_hw_update_noise_floor(ah);
- }
-
- ah->ah_cal_mask &= ~AR5K_CALIBRATION_FULL;
+ /* Clear calibration flags */
+ if (ah->ah_cal_mask & AR5K_CALIBRATION_FULL) {
+ ieee80211_wake_queues(ah->hw);
+ ah->ah_cal_mask &= ~AR5K_CALIBRATION_FULL;
+ } else if (ah->ah_cal_mask & AR5K_CALIBRATION_SHORT)
+ ah->ah_cal_mask &= ~AR5K_CALIBRATION_SHORT;
}
@@ -2407,8 +2492,8 @@ ath5k_init_ah(struct ath5k_hw *ah, const struct ath_bus_ops *bus_ops)
if (ret)
goto err_irq;
- /* set up multi-rate retry capabilities */
- if (ah->ah_version == AR5K_AR5212) {
+ /* Set up multi-rate retry capabilities */
+ if (ah->ah_capabilities.cap_has_mrr_support) {
hw->max_rates = 4;
hw->max_rate_tries = max(AR5K_INIT_RETRY_SHORT,
AR5K_INIT_RETRY_LONG);
@@ -2544,15 +2629,22 @@ int ath5k_start(struct ieee80211_hw *hw)
* and then setup of the interrupt mask.
*/
ah->curchan = ah->hw->conf.channel;
- ah->imask = AR5K_INT_RXOK | AR5K_INT_RXERR | AR5K_INT_RXEOL |
- AR5K_INT_RXORN | AR5K_INT_TXDESC | AR5K_INT_TXEOL |
- AR5K_INT_FATAL | AR5K_INT_GLOBAL | AR5K_INT_MIB;
+ ah->imask = AR5K_INT_RXOK
+ | AR5K_INT_RXERR
+ | AR5K_INT_RXEOL
+ | AR5K_INT_RXORN
+ | AR5K_INT_TXDESC
+ | AR5K_INT_TXEOL
+ | AR5K_INT_FATAL
+ | AR5K_INT_GLOBAL
+ | AR5K_INT_MIB;
ret = ath5k_reset(ah, NULL, false);
if (ret)
goto done;
- ath5k_rfkill_hw_start(ah);
+ if (!ath5k_modparam_no_hw_rfkill_switch)
+ ath5k_rfkill_hw_start(ah);
/*
* Reset the key cache since some parts do not reset the
@@ -2585,7 +2677,6 @@ static void ath5k_stop_tasklets(struct ath5k_hw *ah)
ah->tx_pending = false;
tasklet_kill(&ah->rxtq);
tasklet_kill(&ah->txtq);
- tasklet_kill(&ah->calib);
tasklet_kill(&ah->beacontq);
tasklet_kill(&ah->ani_tasklet);
}
@@ -2637,7 +2728,8 @@ void ath5k_stop(struct ieee80211_hw *hw)
cancel_delayed_work_sync(&ah->tx_complete_work);
- ath5k_rfkill_hw_stop(ah);
+ if (!ath5k_modparam_no_hw_rfkill_switch)
+ ath5k_rfkill_hw_stop(ah);
}
/*
@@ -2689,9 +2781,24 @@ ath5k_reset(struct ath5k_hw *ah, struct ieee80211_channel *chan,
ath5k_ani_init(ah, ani_mode);
- ah->ah_cal_next_full = jiffies + msecs_to_jiffies(100);
- ah->ah_cal_next_ani = jiffies;
- ah->ah_cal_next_nf = jiffies;
+ /*
+ * Set calibration intervals
+ *
+ * Note: We don't need to run calibration imediately
+ * since some initial calibration is done on reset
+ * even for fast channel switching. Also on scanning
+ * this will get set again and again and it won't get
+ * executed unless we connect somewhere and spend some
+ * time on the channel (that's what calibration needs
+ * anyway to be accurate).
+ */
+ ah->ah_cal_next_full = jiffies +
+ msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_FULL);
+ ah->ah_cal_next_ani = jiffies +
+ msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_ANI);
+ ah->ah_cal_next_short = jiffies +
+ msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_SHORT);
+
ewma_init(&ah->ah_beacon_rssi_avg, 1024, 8);
/* clear survey data and cycle counters */
@@ -2745,20 +2852,6 @@ ath5k_init(struct ieee80211_hw *hw)
/*
- * Check if the MAC has multi-rate retry support.
- * We do this by trying to setup a fake extended
- * descriptor. MACs that don't have support will
- * return false w/o doing anything. MACs that do
- * support it will return true w/o doing anything.
- */
- ret = ath5k_hw_setup_mrr_tx_desc(ah, NULL, 0, 0, 0, 0, 0, 0);
-
- if (ret < 0)
- goto err;
- if (ret > 0)
- __set_bit(ATH_STAT_MRRETRY, ah->status);
-
- /*
* Collect the channel list. The 802.11 layer
* is responsible for filtering this list based
* on settings like the phy mode and regulatory
@@ -2841,11 +2934,11 @@ ath5k_init(struct ieee80211_hw *hw)
tasklet_init(&ah->rxtq, ath5k_tasklet_rx, (unsigned long)ah);
tasklet_init(&ah->txtq, ath5k_tasklet_tx, (unsigned long)ah);
- tasklet_init(&ah->calib, ath5k_tasklet_calibrate, (unsigned long)ah);
tasklet_init(&ah->beacontq, ath5k_tasklet_beacon, (unsigned long)ah);
tasklet_init(&ah->ani_tasklet, ath5k_tasklet_ani, (unsigned long)ah);
INIT_WORK(&ah->reset_work, ath5k_reset_work);
+ INIT_WORK(&ah->calib_work, ath5k_calibrate_work);
INIT_DELAYED_WORK(&ah->tx_complete_work, ath5k_tx_complete_poll_work);
ret = ath5k_hw_common(ah)->bus_ops->eeprom_read_mac(ah, mac);
diff --git a/drivers/net/wireless/ath/ath5k/caps.c b/drivers/net/wireless/ath/ath5k/caps.c
index 810fba96702..994169ad39c 100644
--- a/drivers/net/wireless/ath/ath5k/caps.c
+++ b/drivers/net/wireless/ath/ath5k/caps.c
@@ -85,12 +85,19 @@ int ath5k_hw_set_capabilities(struct ath5k_hw *ah)
caps->cap_range.range_2ghz_min = 2412;
caps->cap_range.range_2ghz_max = 2732;
- if (AR5K_EEPROM_HDR_11B(ee_header))
- __set_bit(AR5K_MODE_11B, caps->cap_mode);
-
- if (AR5K_EEPROM_HDR_11G(ee_header) &&
- ah->ah_version != AR5K_AR5211)
- __set_bit(AR5K_MODE_11G, caps->cap_mode);
+ /* Override 2GHz modes on SoCs that need it
+ * NOTE: cap_needs_2GHz_ovr gets set from
+ * ath_ahb_probe */
+ if (!caps->cap_needs_2GHz_ovr) {
+ if (AR5K_EEPROM_HDR_11B(ee_header))
+ __set_bit(AR5K_MODE_11B,
+ caps->cap_mode);
+
+ if (AR5K_EEPROM_HDR_11G(ee_header) &&
+ ah->ah_version != AR5K_AR5211)
+ __set_bit(AR5K_MODE_11G,
+ caps->cap_mode);
+ }
}
}
@@ -103,12 +110,18 @@ int ath5k_hw_set_capabilities(struct ath5k_hw *ah)
else
caps->cap_queues.q_tx_num = AR5K_NUM_TX_QUEUES;
- /* newer hardware has PHY error counters */
+ /* Newer hardware has PHY error counters */
if (ah->ah_mac_srev >= AR5K_SREV_AR5213A)
caps->cap_has_phyerr_counters = true;
else
caps->cap_has_phyerr_counters = false;
+ /* MACs since AR5212 have MRR support */
+ if (ah->ah_version == AR5K_AR5212)
+ caps->cap_has_mrr_support = true;
+ else
+ caps->cap_has_mrr_support = false;
+
return 0;
}
diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c
index fce8c904eea..8c5ce8b0c73 100644
--- a/drivers/net/wireless/ath/ath5k/debug.c
+++ b/drivers/net/wireless/ath/ath5k/debug.c
@@ -57,8 +57,9 @@
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGES.
*/
+#include <linux/export.h>
+#include <linux/moduleparam.h>
-#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/list.h>
#include "debug.h"
diff --git a/drivers/net/wireless/ath/ath5k/debug.h b/drivers/net/wireless/ath/ath5k/debug.h
index 7f37df3125f..0a3f916a1ef 100644
--- a/drivers/net/wireless/ath/ath5k/debug.h
+++ b/drivers/net/wireless/ath/ath5k/debug.h
@@ -141,10 +141,10 @@ ath5k_debug_printtxbuf(struct ath5k_hw *ah, struct ath5k_buf *bf);
#include <linux/compiler.h>
-static inline void __attribute__ ((format (printf, 3, 4)))
+static inline __printf(3, 4) void
ATH5K_DBG(struct ath5k_hw *ah, unsigned int m, const char *fmt, ...) {}
-static inline void __attribute__ ((format (printf, 3, 4)))
+static inline __printf(3, 4) void
ATH5K_DBG_UNLIMIT(struct ath5k_hw *ah, unsigned int m, const char *fmt, ...)
{}
diff --git a/drivers/net/wireless/ath/ath5k/desc.c b/drivers/net/wireless/ath/ath5k/desc.c
index 7e88dda8222..f8bfa3ac2af 100644
--- a/drivers/net/wireless/ath/ath5k/desc.c
+++ b/drivers/net/wireless/ath/ath5k/desc.c
@@ -26,20 +26,61 @@
#include "debug.h"
+/**
+ * DOC: Hardware descriptor functions
+ *
+ * Here we handle the processing of the low-level hw descriptors
+ * that hw reads and writes via DMA for each TX and RX attempt (that means
+ * we can also have descriptors for failed TX/RX tries). We have two kind of
+ * descriptors for RX and TX, control descriptors tell the hw how to send or
+ * receive a packet where to read/write it from/to etc and status descriptors
+ * that contain information about how the packet was sent or received (errors
+ * included).
+ *
+ * Descriptor format is not exactly the same for each MAC chip version so we
+ * have function pointers on &struct ath5k_hw we initialize at runtime based on
+ * the chip used.
+ */
+
+
/************************\
* TX Control descriptors *
\************************/
-/*
- * Initialize the 2-word tx control descriptor on 5210/5211
+/**
+ * ath5k_hw_setup_2word_tx_desc() - Initialize a 2-word tx control descriptor
+ * @ah: The &struct ath5k_hw
+ * @desc: The &struct ath5k_desc
+ * @pkt_len: Frame length in bytes
+ * @hdr_len: Header length in bytes (only used on AR5210)
+ * @padsize: Any padding we've added to the frame length
+ * @type: One of enum ath5k_pkt_type
+ * @tx_power: Tx power in 0.5dB steps
+ * @tx_rate0: HW idx for transmission rate
+ * @tx_tries0: Max number of retransmissions
+ * @key_index: Index on key table to use for encryption
+ * @antenna_mode: Which antenna to use (0 for auto)
+ * @flags: One of AR5K_TXDESC_* flags (desc.h)
+ * @rtscts_rate: HW idx for RTS/CTS transmission rate
+ * @rtscts_duration: What to put on duration field on the header of RTS/CTS
+ *
+ * Internal function to initialize a 2-Word TX control descriptor
+ * found on AR5210 and AR5211 MACs chips.
+ *
+ * Returns 0 on success or -EINVAL on false input
*/
static int
-ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
- unsigned int pkt_len, unsigned int hdr_len, int padsize,
- enum ath5k_pkt_type type,
- unsigned int tx_power, unsigned int tx_rate0, unsigned int tx_tries0,
- unsigned int key_index, unsigned int antenna_mode, unsigned int flags,
- unsigned int rtscts_rate, unsigned int rtscts_duration)
+ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah,
+ struct ath5k_desc *desc,
+ unsigned int pkt_len, unsigned int hdr_len,
+ int padsize,
+ enum ath5k_pkt_type type,
+ unsigned int tx_power,
+ unsigned int tx_rate0, unsigned int tx_tries0,
+ unsigned int key_index,
+ unsigned int antenna_mode,
+ unsigned int flags,
+ unsigned int rtscts_rate, unsigned int rtscts_duration)
{
u32 frame_type;
struct ath5k_hw_2w_tx_ctl *tx_ctl;
@@ -172,17 +213,40 @@ ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
return 0;
}
-/*
- * Initialize the 4-word tx control descriptor on 5212
+/**
+ * ath5k_hw_setup_4word_tx_desc() - Initialize a 4-word tx control descriptor
+ * @ah: The &struct ath5k_hw
+ * @desc: The &struct ath5k_desc
+ * @pkt_len: Frame length in bytes
+ * @hdr_len: Header length in bytes (only used on AR5210)
+ * @padsize: Any padding we've added to the frame length
+ * @type: One of enum ath5k_pkt_type
+ * @tx_power: Tx power in 0.5dB steps
+ * @tx_rate0: HW idx for transmission rate
+ * @tx_tries0: Max number of retransmissions
+ * @key_index: Index on key table to use for encryption
+ * @antenna_mode: Which antenna to use (0 for auto)
+ * @flags: One of AR5K_TXDESC_* flags (desc.h)
+ * @rtscts_rate: HW idx for RTS/CTS transmission rate
+ * @rtscts_duration: What to put on duration field on the header of RTS/CTS
+ *
+ * Internal function to initialize a 4-Word TX control descriptor
+ * found on AR5212 and later MACs chips.
+ *
+ * Returns 0 on success or -EINVAL on false input
*/
-static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah,
- struct ath5k_desc *desc, unsigned int pkt_len, unsigned int hdr_len,
- int padsize,
- enum ath5k_pkt_type type, unsigned int tx_power, unsigned int tx_rate0,
- unsigned int tx_tries0, unsigned int key_index,
- unsigned int antenna_mode, unsigned int flags,
- unsigned int rtscts_rate,
- unsigned int rtscts_duration)
+static int
+ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah,
+ struct ath5k_desc *desc,
+ unsigned int pkt_len, unsigned int hdr_len,
+ int padsize,
+ enum ath5k_pkt_type type,
+ unsigned int tx_power,
+ unsigned int tx_rate0, unsigned int tx_tries0,
+ unsigned int key_index,
+ unsigned int antenna_mode,
+ unsigned int flags,
+ unsigned int rtscts_rate, unsigned int rtscts_duration)
{
struct ath5k_hw_4w_tx_ctl *tx_ctl;
unsigned int frame_len;
@@ -292,13 +356,29 @@ static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah,
return 0;
}
-/*
- * Initialize a 4-word multi rate retry tx control descriptor on 5212
+/**
+ * ath5k_hw_setup_mrr_tx_desc() - Initialize an MRR tx control descriptor
+ * @ah: The &struct ath5k_hw
+ * @desc: The &struct ath5k_desc
+ * @tx_rate1: HW idx for rate used on transmission series 1
+ * @tx_tries1: Max number of retransmissions for transmission series 1
+ * @tx_rate2: HW idx for rate used on transmission series 2
+ * @tx_tries2: Max number of retransmissions for transmission series 2
+ * @tx_rate3: HW idx for rate used on transmission series 3
+ * @tx_tries3: Max number of retransmissions for transmission series 3
+ *
+ * Multi rate retry (MRR) tx control descriptors are available only on AR5212
+ * MACs, they are part of the normal 4-word tx control descriptor (see above)
+ * but we handle them through a separate function for better abstraction.
+ *
+ * Returns 0 on success or -EINVAL on invalid input
*/
int
-ath5k_hw_setup_mrr_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
- unsigned int tx_rate1, u_int tx_tries1, u_int tx_rate2,
- u_int tx_tries2, unsigned int tx_rate3, u_int tx_tries3)
+ath5k_hw_setup_mrr_tx_desc(struct ath5k_hw *ah,
+ struct ath5k_desc *desc,
+ u_int tx_rate1, u_int tx_tries1,
+ u_int tx_rate2, u_int tx_tries2,
+ u_int tx_rate3, u_int tx_tries3)
{
struct ath5k_hw_4w_tx_ctl *tx_ctl;
@@ -350,11 +430,16 @@ ath5k_hw_setup_mrr_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
* TX Status descriptors *
\***********************/
-/*
- * Process the tx status descriptor on 5210/5211
+/**
+ * ath5k_hw_proc_2word_tx_status() - Process a tx status descriptor on 5210/1
+ * @ah: The &struct ath5k_hw
+ * @desc: The &struct ath5k_desc
+ * @ts: The &struct ath5k_tx_status
*/
-static int ath5k_hw_proc_2word_tx_status(struct ath5k_hw *ah,
- struct ath5k_desc *desc, struct ath5k_tx_status *ts)
+static int
+ath5k_hw_proc_2word_tx_status(struct ath5k_hw *ah,
+ struct ath5k_desc *desc,
+ struct ath5k_tx_status *ts)
{
struct ath5k_hw_2w_tx_ctl *tx_ctl;
struct ath5k_hw_tx_status *tx_status;
@@ -399,11 +484,16 @@ static int ath5k_hw_proc_2word_tx_status(struct ath5k_hw *ah,
return 0;
}
-/*
- * Process a tx status descriptor on 5212
+/**
+ * ath5k_hw_proc_4word_tx_status() - Process a tx status descriptor on 5212
+ * @ah: The &struct ath5k_hw
+ * @desc: The &struct ath5k_desc
+ * @ts: The &struct ath5k_tx_status
*/
-static int ath5k_hw_proc_4word_tx_status(struct ath5k_hw *ah,
- struct ath5k_desc *desc, struct ath5k_tx_status *ts)
+static int
+ath5k_hw_proc_4word_tx_status(struct ath5k_hw *ah,
+ struct ath5k_desc *desc,
+ struct ath5k_tx_status *ts)
{
struct ath5k_hw_4w_tx_ctl *tx_ctl;
struct ath5k_hw_tx_status *tx_status;
@@ -460,11 +550,17 @@ static int ath5k_hw_proc_4word_tx_status(struct ath5k_hw *ah,
* RX Descriptors *
\****************/
-/*
- * Initialize an rx control descriptor
+/**
+ * ath5k_hw_setup_rx_desc() - Initialize an rx control descriptor
+ * @ah: The &struct ath5k_hw
+ * @desc: The &struct ath5k_desc
+ * @size: RX buffer length in bytes
+ * @flags: One of AR5K_RXDESC_* flags
*/
-int ath5k_hw_setup_rx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
- u32 size, unsigned int flags)
+int
+ath5k_hw_setup_rx_desc(struct ath5k_hw *ah,
+ struct ath5k_desc *desc,
+ u32 size, unsigned int flags)
{
struct ath5k_hw_rx_ctl *rx_ctl;
@@ -491,11 +587,22 @@ int ath5k_hw_setup_rx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
return 0;
}
-/*
- * Process the rx status descriptor on 5210/5211
+/**
+ * ath5k_hw_proc_5210_rx_status() - Process the rx status descriptor on 5210/1
+ * @ah: The &struct ath5k_hw
+ * @desc: The &struct ath5k_desc
+ * @rs: The &struct ath5k_rx_status
+ *
+ * Internal function used to process an RX status descriptor
+ * on AR5210/5211 MAC.
+ *
+ * Returns 0 on success or -EINPROGRESS in case we haven't received the who;e
+ * frame yet.
*/
-static int ath5k_hw_proc_5210_rx_status(struct ath5k_hw *ah,
- struct ath5k_desc *desc, struct ath5k_rx_status *rs)
+static int
+ath5k_hw_proc_5210_rx_status(struct ath5k_hw *ah,
+ struct ath5k_desc *desc,
+ struct ath5k_rx_status *rs)
{
struct ath5k_hw_rx_status *rx_status;
@@ -574,12 +681,22 @@ static int ath5k_hw_proc_5210_rx_status(struct ath5k_hw *ah,
return 0;
}
-/*
- * Process the rx status descriptor on 5212
+/**
+ * ath5k_hw_proc_5212_rx_status() - Process the rx status descriptor on 5212
+ * @ah: The &struct ath5k_hw
+ * @desc: The &struct ath5k_desc
+ * @rs: The &struct ath5k_rx_status
+ *
+ * Internal function used to process an RX status descriptor
+ * on AR5212 and later MAC.
+ *
+ * Returns 0 on success or -EINPROGRESS in case we haven't received the who;e
+ * frame yet.
*/
-static int ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah,
- struct ath5k_desc *desc,
- struct ath5k_rx_status *rs)
+static int
+ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah,
+ struct ath5k_desc *desc,
+ struct ath5k_rx_status *rs)
{
struct ath5k_hw_rx_status *rx_status;
u32 rxstat0, rxstat1;
@@ -646,10 +763,16 @@ static int ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah,
* Attach *
\********/
-/*
- * Init function pointers inside ath5k_hw struct
+/**
+ * ath5k_hw_init_desc_functions() - Init function pointers inside ah
+ * @ah: The &struct ath5k_hw
+ *
+ * Maps the internal descriptor functions to the function pointers on ah, used
+ * from above. This is used as an abstraction layer to handle the various chips
+ * the same way.
*/
-int ath5k_hw_init_desc_functions(struct ath5k_hw *ah)
+int
+ath5k_hw_init_desc_functions(struct ath5k_hw *ah)
{
if (ah->ah_version == AR5K_AR5212) {
ah->ah_setup_tx_desc = ath5k_hw_setup_4word_tx_desc;
diff --git a/drivers/net/wireless/ath/ath5k/desc.h b/drivers/net/wireless/ath/ath5k/desc.h
index cfd529b548f..8d6c01a49ea 100644
--- a/drivers/net/wireless/ath/ath5k/desc.h
+++ b/drivers/net/wireless/ath/ath5k/desc.h
@@ -20,25 +20,30 @@
* RX/TX descriptor structures
*/
-/*
- * Common hardware RX control descriptor
+/**
+ * struct ath5k_hw_rx_ctl - Common hardware RX control descriptor
+ * @rx_control_0: RX control word 0
+ * @rx_control_1: RX control word 1
*/
struct ath5k_hw_rx_ctl {
- u32 rx_control_0; /* RX control word 0 */
- u32 rx_control_1; /* RX control word 1 */
+ u32 rx_control_0;
+ u32 rx_control_1;
} __packed __aligned(4);
/* RX control word 1 fields/flags */
#define AR5K_DESC_RX_CTL1_BUF_LEN 0x00000fff /* data buffer length */
#define AR5K_DESC_RX_CTL1_INTREQ 0x00002000 /* RX interrupt request */
-/*
- * Common hardware RX status descriptor
+/**
+ * struct ath5k_hw_rx_status - Common hardware RX status descriptor
+ * @rx_status_0: RX status word 0
+ * @rx_status_1: RX status word 1
+ *
* 5210, 5211 and 5212 differ only in the fields and flags defined below
*/
struct ath5k_hw_rx_status {
- u32 rx_status_0; /* RX status word 0 */
- u32 rx_status_1; /* RX status word 1 */
+ u32 rx_status_0;
+ u32 rx_status_1;
} __packed __aligned(4);
/* 5210/5211 */
@@ -98,17 +103,36 @@ struct ath5k_hw_rx_status {
/**
* enum ath5k_phy_error_code - PHY Error codes
+ * @AR5K_RX_PHY_ERROR_UNDERRUN: Transmit underrun, [5210] No error
+ * @AR5K_RX_PHY_ERROR_TIMING: Timing error
+ * @AR5K_RX_PHY_ERROR_PARITY: Illegal parity
+ * @AR5K_RX_PHY_ERROR_RATE: Illegal rate
+ * @AR5K_RX_PHY_ERROR_LENGTH: Illegal length
+ * @AR5K_RX_PHY_ERROR_RADAR: Radar detect, [5210] 64 QAM rate
+ * @AR5K_RX_PHY_ERROR_SERVICE: Illegal service
+ * @AR5K_RX_PHY_ERROR_TOR: Transmit override receive
+ * @AR5K_RX_PHY_ERROR_OFDM_TIMING: OFDM Timing error [5212+]
+ * @AR5K_RX_PHY_ERROR_OFDM_SIGNAL_PARITY: OFDM Signal parity error [5212+]
+ * @AR5K_RX_PHY_ERROR_OFDM_RATE_ILLEGAL: OFDM Illegal rate [5212+]
+ * @AR5K_RX_PHY_ERROR_OFDM_LENGTH_ILLEGAL: OFDM Illegal length [5212+]
+ * @AR5K_RX_PHY_ERROR_OFDM_POWER_DROP: OFDM Power drop [5212+]
+ * @AR5K_RX_PHY_ERROR_OFDM_SERVICE: OFDM Service (?) [5212+]
+ * @AR5K_RX_PHY_ERROR_OFDM_RESTART: OFDM Restart (?) [5212+]
+ * @AR5K_RX_PHY_ERROR_CCK_TIMING: CCK Timing error [5212+]
+ * @AR5K_RX_PHY_ERROR_CCK_HEADER_CRC: Header CRC error [5212+]
+ * @AR5K_RX_PHY_ERROR_CCK_RATE_ILLEGAL: Illegal rate [5212+]
+ * @AR5K_RX_PHY_ERROR_CCK_SERVICE: CCK Service (?) [5212+]
+ * @AR5K_RX_PHY_ERROR_CCK_RESTART: CCK Restart (?) [5212+]
*/
enum ath5k_phy_error_code {
- AR5K_RX_PHY_ERROR_UNDERRUN = 0, /* Transmit underrun, [5210] No error */
- AR5K_RX_PHY_ERROR_TIMING = 1, /* Timing error */
- AR5K_RX_PHY_ERROR_PARITY = 2, /* Illegal parity */
- AR5K_RX_PHY_ERROR_RATE = 3, /* Illegal rate */
- AR5K_RX_PHY_ERROR_LENGTH = 4, /* Illegal length */
- AR5K_RX_PHY_ERROR_RADAR = 5, /* Radar detect, [5210] 64 QAM rate */
- AR5K_RX_PHY_ERROR_SERVICE = 6, /* Illegal service */
- AR5K_RX_PHY_ERROR_TOR = 7, /* Transmit override receive */
- /* these are specific to the 5212 */
+ AR5K_RX_PHY_ERROR_UNDERRUN = 0,
+ AR5K_RX_PHY_ERROR_TIMING = 1,
+ AR5K_RX_PHY_ERROR_PARITY = 2,
+ AR5K_RX_PHY_ERROR_RATE = 3,
+ AR5K_RX_PHY_ERROR_LENGTH = 4,
+ AR5K_RX_PHY_ERROR_RADAR = 5,
+ AR5K_RX_PHY_ERROR_SERVICE = 6,
+ AR5K_RX_PHY_ERROR_TOR = 7,
AR5K_RX_PHY_ERROR_OFDM_TIMING = 17,
AR5K_RX_PHY_ERROR_OFDM_SIGNAL_PARITY = 18,
AR5K_RX_PHY_ERROR_OFDM_RATE_ILLEGAL = 19,
@@ -123,12 +147,14 @@ enum ath5k_phy_error_code {
AR5K_RX_PHY_ERROR_CCK_RESTART = 31,
};
-/*
- * 5210/5211 hardware 2-word TX control descriptor
+/**
+ * struct ath5k_hw_2w_tx_ctl - 5210/5211 hardware 2-word TX control descriptor
+ * @tx_control_0: TX control word 0
+ * @tx_control_1: TX control word 1
*/
struct ath5k_hw_2w_tx_ctl {
- u32 tx_control_0; /* TX control word 0 */
- u32 tx_control_1; /* TX control word 1 */
+ u32 tx_control_0;
+ u32 tx_control_1;
} __packed __aligned(4);
/* TX control word 0 fields/flags */
@@ -177,14 +203,18 @@ struct ath5k_hw_2w_tx_ctl {
#define AR5K_AR5210_TX_DESC_FRAME_TYPE_PIFS 4
#define AR5K_AR5211_TX_DESC_FRAME_TYPE_PRESP 4
-/*
- * 5212 hardware 4-word TX control descriptor
+/**
+ * struct ath5k_hw_4w_tx_ctl - 5212 hardware 4-word TX control descriptor
+ * @tx_control_0: TX control word 0
+ * @tx_control_1: TX control word 1
+ * @tx_control_2: TX control word 2
+ * @tx_control_3: TX control word 3
*/
struct ath5k_hw_4w_tx_ctl {
- u32 tx_control_0; /* TX control word 0 */
- u32 tx_control_1; /* TX control word 1 */
- u32 tx_control_2; /* TX control word 2 */
- u32 tx_control_3; /* TX control word 3 */
+ u32 tx_control_0;
+ u32 tx_control_1;
+ u32 tx_control_2;
+ u32 tx_control_3;
} __packed __aligned(4);
/* TX control word 0 fields/flags */
@@ -238,12 +268,14 @@ struct ath5k_hw_4w_tx_ctl {
#define AR5K_4W_TX_DESC_CTL3_RTS_CTS_RATE 0x01f00000 /* RTS or CTS rate */
#define AR5K_4W_TX_DESC_CTL3_RTS_CTS_RATE_S 20
-/*
- * Common TX status descriptor
+/**
+ * struct ath5k_hw_tx_status - Common TX status descriptor
+ * @tx_status_0: TX status word 0
+ * @tx_status_1: TX status word 1
*/
struct ath5k_hw_tx_status {
- u32 tx_status_0; /* TX status word 0 */
- u32 tx_status_1; /* TX status word 1 */
+ u32 tx_status_0;
+ u32 tx_status_1;
} __packed __aligned(4);
/* TX status word 0 fields/flags */
@@ -276,37 +308,47 @@ struct ath5k_hw_tx_status {
#define AR5K_DESC_TX_STATUS1_COMP_SUCCESS_5212 0x00800000 /* [5212] compression status */
#define AR5K_DESC_TX_STATUS1_XMIT_ANTENNA_5212 0x01000000 /* [5212] transmit antenna */
-/*
- * 5210/5211 hardware TX descriptor
+/**
+ * struct ath5k_hw_5210_tx_desc - 5210/5211 hardware TX descriptor
+ * @tx_ctl: The &struct ath5k_hw_2w_tx_ctl
+ * @tx_stat: The &struct ath5k_hw_tx_status
*/
struct ath5k_hw_5210_tx_desc {
struct ath5k_hw_2w_tx_ctl tx_ctl;
struct ath5k_hw_tx_status tx_stat;
} __packed __aligned(4);
-/*
- * 5212 hardware TX descriptor
+/**
+ * struct ath5k_hw_5212_tx_desc - 5212 hardware TX descriptor
+ * @tx_ctl: The &struct ath5k_hw_4w_tx_ctl
+ * @tx_stat: The &struct ath5k_hw_tx_status
*/
struct ath5k_hw_5212_tx_desc {
struct ath5k_hw_4w_tx_ctl tx_ctl;
struct ath5k_hw_tx_status tx_stat;
} __packed __aligned(4);
-/*
- * Common hardware RX descriptor
+/**
+ * struct ath5k_hw_all_rx_desc - Common hardware RX descriptor
+ * @rx_ctl: The &struct ath5k_hw_rx_ctl
+ * @rx_stat: The &struct ath5k_hw_rx_status
*/
struct ath5k_hw_all_rx_desc {
struct ath5k_hw_rx_ctl rx_ctl;
struct ath5k_hw_rx_status rx_stat;
} __packed __aligned(4);
-/*
- * Atheros hardware DMA descriptor
+/**
+ * struct ath5k_desc - Atheros hardware DMA descriptor
+ * @ds_link: Physical address of the next descriptor
+ * @ds_data: Physical address of data buffer (skb)
+ * @ud: Union containing hw_5xxx_tx_desc structs and hw_all_rx_desc
+ *
* This is read and written to by the hardware
*/
struct ath5k_desc {
- u32 ds_link; /* physical address of the next descriptor */
- u32 ds_data; /* physical address of data buffer (skb) */
+ u32 ds_link;
+ u32 ds_data;
union {
struct ath5k_hw_5210_tx_desc ds_tx5210;
diff --git a/drivers/net/wireless/ath/ath5k/dma.c b/drivers/net/wireless/ath/ath5k/dma.c
index 2481f9c7f4b..5cc9aa81469 100644
--- a/drivers/net/wireless/ath/ath5k/dma.c
+++ b/drivers/net/wireless/ath/ath5k/dma.c
@@ -20,16 +20,13 @@
* DMA and interrupt masking functions *
\*************************************/
-/*
- * dma.c - DMA and interrupt masking functions
+/**
+ * DOC: DMA and interrupt masking functions
*
* Here we setup descriptor pointers (rxdp/txdp) start/stop dma engine and
* handle queue setup for 5210 chipset (rest are handled on qcu.c).
* Also we setup interrupt mask register (IMR) and read the various interrupt
* status registers (ISR).
- *
- * TODO: Handle SISR on 5211+ and introduce a function to return the queue
- * number that resulted the interrupt.
*/
#include "ath5k.h"
@@ -42,22 +39,22 @@
\*********/
/**
- * ath5k_hw_start_rx_dma - Start DMA receive
- *
+ * ath5k_hw_start_rx_dma() - Start DMA receive
* @ah: The &struct ath5k_hw
*/
-void ath5k_hw_start_rx_dma(struct ath5k_hw *ah)
+void
+ath5k_hw_start_rx_dma(struct ath5k_hw *ah)
{
ath5k_hw_reg_write(ah, AR5K_CR_RXE, AR5K_CR);
ath5k_hw_reg_read(ah, AR5K_CR);
}
/**
- * ath5k_hw_stop_rx_dma - Stop DMA receive
- *
+ * ath5k_hw_stop_rx_dma() - Stop DMA receive
* @ah: The &struct ath5k_hw
*/
-static int ath5k_hw_stop_rx_dma(struct ath5k_hw *ah)
+static int
+ath5k_hw_stop_rx_dma(struct ath5k_hw *ah)
{
unsigned int i;
@@ -79,24 +76,24 @@ static int ath5k_hw_stop_rx_dma(struct ath5k_hw *ah)
}
/**
- * ath5k_hw_get_rxdp - Get RX Descriptor's address
- *
+ * ath5k_hw_get_rxdp() - Get RX Descriptor's address
* @ah: The &struct ath5k_hw
*/
-u32 ath5k_hw_get_rxdp(struct ath5k_hw *ah)
+u32
+ath5k_hw_get_rxdp(struct ath5k_hw *ah)
{
return ath5k_hw_reg_read(ah, AR5K_RXDP);
}
/**
- * ath5k_hw_set_rxdp - Set RX Descriptor's address
- *
+ * ath5k_hw_set_rxdp() - Set RX Descriptor's address
* @ah: The &struct ath5k_hw
* @phys_addr: RX descriptor address
*
* Returns -EIO if rx is active
*/
-int ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr)
+int
+ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr)
{
if (ath5k_hw_reg_read(ah, AR5K_CR) & AR5K_CR_RXE) {
ATH5K_DBG(ah, ATH5K_DEBUG_DMA,
@@ -114,8 +111,7 @@ int ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr)
\**********/
/**
- * ath5k_hw_start_tx_dma - Start DMA transmit for a specific queue
- *
+ * ath5k_hw_start_tx_dma() - Start DMA transmit for a specific queue
* @ah: The &struct ath5k_hw
* @queue: The hw queue number
*
@@ -128,7 +124,8 @@ int ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr)
* NOTE: Must be called after setting up tx control descriptor for that
* queue (see below).
*/
-int ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue)
+int
+ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue)
{
u32 tx_queue;
@@ -177,17 +174,16 @@ int ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue)
}
/**
- * ath5k_hw_stop_tx_dma - Stop DMA transmit on a specific queue
- *
+ * ath5k_hw_stop_tx_dma() - Stop DMA transmit on a specific queue
* @ah: The &struct ath5k_hw
* @queue: The hw queue number
*
* Stop DMA transmit on a specific hw queue and drain queue so we don't
* have any pending frames. Returns -EBUSY if we still have pending frames,
* -EINVAL if queue number is out of range or inactive.
- *
*/
-static int ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue)
+static int
+ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue)
{
unsigned int i = 40;
u32 tx_queue, pending;
@@ -320,14 +316,14 @@ static int ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue)
}
/**
- * ath5k_hw_stop_beacon_queue - Stop beacon queue
- *
- * @ah The &struct ath5k_hw
- * @queue The queue number
+ * ath5k_hw_stop_beacon_queue() - Stop beacon queue
+ * @ah: The &struct ath5k_hw
+ * @queue: The queue number
*
* Returns -EIO if queue didn't stop
*/
-int ath5k_hw_stop_beacon_queue(struct ath5k_hw *ah, unsigned int queue)
+int
+ath5k_hw_stop_beacon_queue(struct ath5k_hw *ah, unsigned int queue)
{
int ret;
ret = ath5k_hw_stop_tx_dma(ah, queue);
@@ -340,8 +336,7 @@ int ath5k_hw_stop_beacon_queue(struct ath5k_hw *ah, unsigned int queue)
}
/**
- * ath5k_hw_get_txdp - Get TX Descriptor's address for a specific queue
- *
+ * ath5k_hw_get_txdp() - Get TX Descriptor's address for a specific queue
* @ah: The &struct ath5k_hw
* @queue: The hw queue number
*
@@ -352,7 +347,8 @@ int ath5k_hw_stop_beacon_queue(struct ath5k_hw *ah, unsigned int queue)
*
* XXX: Is TXDP read and clear ?
*/
-u32 ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue)
+u32
+ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue)
{
u16 tx_reg;
@@ -382,10 +378,10 @@ u32 ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue)
}
/**
- * ath5k_hw_set_txdp - Set TX Descriptor's address for a specific queue
- *
+ * ath5k_hw_set_txdp() - Set TX Descriptor's address for a specific queue
* @ah: The &struct ath5k_hw
* @queue: The hw queue number
+ * @phys_addr: The physical address
*
* Set TX descriptor's address for a specific queue. For 5210 we ignore
* the queue number and we use tx queue type since we only have 2 queues
@@ -394,7 +390,8 @@ u32 ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue)
* Returns -EINVAL if queue type is invalid for 5210 and -EIO if queue is still
* active.
*/
-int ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue, u32 phys_addr)
+int
+ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue, u32 phys_addr)
{
u16 tx_reg;
@@ -435,8 +432,7 @@ int ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue, u32 phys_addr)
}
/**
- * ath5k_hw_update_tx_triglevel - Update tx trigger level
- *
+ * ath5k_hw_update_tx_triglevel() - Update tx trigger level
* @ah: The &struct ath5k_hw
* @increase: Flag to force increase of trigger level
*
@@ -444,15 +440,15 @@ int ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue, u32 phys_addr)
* buffer (aka FIFO threshold) that is used to indicate when PCU flushes
* the buffer and transmits its data. Lowering this results sending small
* frames more quickly but can lead to tx underruns, raising it a lot can
- * result other problems (i think bmiss is related). Right now we start with
- * the lowest possible (64Bytes) and if we get tx underrun we increase it using
- * the increase flag. Returns -EIO if we have reached maximum/minimum.
+ * result other problems. Right now we start with the lowest possible
+ * (64Bytes) and if we get tx underrun we increase it using the increase
+ * flag. Returns -EIO if we have reached maximum/minimum.
*
* XXX: Link this with tx DMA size ?
- * XXX: Use it to save interrupts ?
- * TODO: Needs testing, i think it's related to bmiss...
+ * XXX2: Use it to save interrupts ?
*/
-int ath5k_hw_update_tx_triglevel(struct ath5k_hw *ah, bool increase)
+int
+ath5k_hw_update_tx_triglevel(struct ath5k_hw *ah, bool increase)
{
u32 trigger_level, imr;
int ret = -EIO;
@@ -498,21 +494,20 @@ done:
\*******************/
/**
- * ath5k_hw_is_intr_pending - Check if we have pending interrupts
- *
+ * ath5k_hw_is_intr_pending() - Check if we have pending interrupts
* @ah: The &struct ath5k_hw
*
* Check if we have pending interrupts to process. Returns 1 if we
* have pending interrupts and 0 if we haven't.
*/
-bool ath5k_hw_is_intr_pending(struct ath5k_hw *ah)
+bool
+ath5k_hw_is_intr_pending(struct ath5k_hw *ah)
{
return ath5k_hw_reg_read(ah, AR5K_INTPEND) == 1 ? 1 : 0;
}
/**
- * ath5k_hw_get_isr - Get interrupt status
- *
+ * ath5k_hw_get_isr() - Get interrupt status
* @ah: The @struct ath5k_hw
* @interrupt_mask: Driver's interrupt mask used to filter out
* interrupts in sw.
@@ -523,62 +518,162 @@ bool ath5k_hw_is_intr_pending(struct ath5k_hw *ah)
* being mapped on some standard non hw-specific positions
* (check out &ath5k_int).
*
- * NOTE: We use read-and-clear register, so after this function is called ISR
- * is zeroed.
+ * NOTE: We do write-to-clear, so the active PISR/SISR bits at the time this
+ * function gets called are cleared on return.
*/
-int ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask)
+int
+ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask)
{
- u32 data;
+ u32 data = 0;
/*
- * Read interrupt status from the Interrupt Status register
- * on 5210
+ * Read interrupt status from Primary Interrupt
+ * Register.
+ *
+ * Note: PISR/SISR Not available on 5210
*/
if (ah->ah_version == AR5K_AR5210) {
- data = ath5k_hw_reg_read(ah, AR5K_ISR);
- if (unlikely(data == AR5K_INT_NOCARD)) {
- *interrupt_mask = data;
+ u32 isr = 0;
+ isr = ath5k_hw_reg_read(ah, AR5K_ISR);
+ if (unlikely(isr == AR5K_INT_NOCARD)) {
+ *interrupt_mask = isr;
return -ENODEV;
}
- } else {
+
/*
- * Read interrupt status from Interrupt
- * Status Register shadow copy (Read And Clear)
- *
- * Note: PISR/SISR Not available on 5210
+ * Filter out the non-common bits from the interrupt
+ * status.
*/
- data = ath5k_hw_reg_read(ah, AR5K_RAC_PISR);
- if (unlikely(data == AR5K_INT_NOCARD)) {
- *interrupt_mask = data;
+ *interrupt_mask = (isr & AR5K_INT_COMMON) & ah->ah_imr;
+
+ /* Hanlde INT_FATAL */
+ if (unlikely(isr & (AR5K_ISR_SSERR | AR5K_ISR_MCABT
+ | AR5K_ISR_DPERR)))
+ *interrupt_mask |= AR5K_INT_FATAL;
+
+ /*
+ * XXX: BMISS interrupts may occur after association.
+ * I found this on 5210 code but it needs testing. If this is
+ * true we should disable them before assoc and re-enable them
+ * after a successful assoc + some jiffies.
+ interrupt_mask &= ~AR5K_INT_BMISS;
+ */
+
+ data = isr;
+ } else {
+ u32 pisr = 0;
+ u32 pisr_clear = 0;
+ u32 sisr0 = 0;
+ u32 sisr1 = 0;
+ u32 sisr2 = 0;
+ u32 sisr3 = 0;
+ u32 sisr4 = 0;
+
+ /* Read PISR and SISRs... */
+ pisr = ath5k_hw_reg_read(ah, AR5K_PISR);
+ if (unlikely(pisr == AR5K_INT_NOCARD)) {
+ *interrupt_mask = pisr;
return -ENODEV;
}
- }
- /*
- * Get abstract interrupt mask (driver-compatible)
- */
- *interrupt_mask = (data & AR5K_INT_COMMON) & ah->ah_imr;
+ sisr0 = ath5k_hw_reg_read(ah, AR5K_SISR0);
+ sisr1 = ath5k_hw_reg_read(ah, AR5K_SISR1);
+ sisr2 = ath5k_hw_reg_read(ah, AR5K_SISR2);
+ sisr3 = ath5k_hw_reg_read(ah, AR5K_SISR3);
+ sisr4 = ath5k_hw_reg_read(ah, AR5K_SISR4);
- if (ah->ah_version != AR5K_AR5210) {
- u32 sisr2 = ath5k_hw_reg_read(ah, AR5K_RAC_SISR2);
+ /*
+ * PISR holds the logical OR of interrupt bits
+ * from SISR registers:
+ *
+ * TXOK and TXDESC -> Logical OR of TXOK and TXDESC
+ * per-queue bits on SISR0
+ *
+ * TXERR and TXEOL -> Logical OR of TXERR and TXEOL
+ * per-queue bits on SISR1
+ *
+ * TXURN -> Logical OR of TXURN per-queue bits on SISR2
+ *
+ * HIUERR -> Logical OR of MCABT, SSERR and DPER bits on SISR2
+ *
+ * BCNMISC -> Logical OR of TIM, CAB_END, DTIM_SYNC
+ * BCN_TIMEOUT, CAB_TIMEOUT and DTIM
+ * (and TSFOOR ?) bits on SISR2
+ *
+ * QCBRORN and QCBRURN -> Logical OR of QCBRORN and
+ * QCBRURN per-queue bits on SISR3
+ * QTRIG -> Logical OR of QTRIG per-queue bits on SISR4
+ *
+ * If we clean these bits on PISR we 'll also clear all
+ * related bits from SISRs, e.g. if we write the TXOK bit on
+ * PISR we 'll clean all TXOK bits from SISR0 so if a new TXOK
+ * interrupt got fired for another queue while we were reading
+ * the interrupt registers and we write back the TXOK bit on
+ * PISR we 'll lose it. So make sure that we don't write back
+ * on PISR any bits that come from SISRs. Clearing them from
+ * SISRs will also clear PISR so no need to worry here.
+ */
- /*HIU = Host Interface Unit (PCI etc)*/
- if (unlikely(data & (AR5K_ISR_HIUERR)))
- *interrupt_mask |= AR5K_INT_FATAL;
+ pisr_clear = pisr & ~AR5K_ISR_BITS_FROM_SISRS;
- /*Beacon Not Ready*/
- if (unlikely(data & (AR5K_ISR_BNR)))
- *interrupt_mask |= AR5K_INT_BNR;
+ /*
+ * Write to clear them...
+ * Note: This means that each bit we write back
+ * to the registers will get cleared, leaving the
+ * rest unaffected. So this won't affect new interrupts
+ * we didn't catch while reading/processing, we 'll get
+ * them next time get_isr gets called.
+ */
+ ath5k_hw_reg_write(ah, sisr0, AR5K_SISR0);
+ ath5k_hw_reg_write(ah, sisr1, AR5K_SISR1);
+ ath5k_hw_reg_write(ah, sisr2, AR5K_SISR2);
+ ath5k_hw_reg_write(ah, sisr3, AR5K_SISR3);
+ ath5k_hw_reg_write(ah, sisr4, AR5K_SISR4);
+ ath5k_hw_reg_write(ah, pisr_clear, AR5K_PISR);
+ /* Flush previous write */
+ ath5k_hw_reg_read(ah, AR5K_PISR);
- if (unlikely(sisr2 & (AR5K_SISR2_SSERR |
- AR5K_SISR2_DPERR |
- AR5K_SISR2_MCABT)))
- *interrupt_mask |= AR5K_INT_FATAL;
+ /*
+ * Filter out the non-common bits from the interrupt
+ * status.
+ */
+ *interrupt_mask = (pisr & AR5K_INT_COMMON) & ah->ah_imr;
+
+
+ /* We treat TXOK,TXDESC, TXERR and TXEOL
+ * the same way (schedule the tx tasklet)
+ * so we track them all together per queue */
+ if (pisr & AR5K_ISR_TXOK)
+ ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr0,
+ AR5K_SISR0_QCU_TXOK);
- if (data & AR5K_ISR_TIM)
+ if (pisr & AR5K_ISR_TXDESC)
+ ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr0,
+ AR5K_SISR0_QCU_TXDESC);
+
+ if (pisr & AR5K_ISR_TXERR)
+ ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr1,
+ AR5K_SISR1_QCU_TXERR);
+
+ if (pisr & AR5K_ISR_TXEOL)
+ ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr1,
+ AR5K_SISR1_QCU_TXEOL);
+
+ /* Currently this is not much usefull since we treat
+ * all queues the same way if we get a TXURN (update
+ * tx trigger level) but we might need it later on*/
+ if (pisr & AR5K_ISR_TXURN)
+ ah->ah_txq_isr_txurn |= AR5K_REG_MS(sisr2,
+ AR5K_SISR2_QCU_TXURN);
+
+ /* Misc Beacon related interrupts */
+
+ /* For AR5211 */
+ if (pisr & AR5K_ISR_TIM)
*interrupt_mask |= AR5K_INT_TIM;
- if (data & AR5K_ISR_BCNMISC) {
+ /* For AR5212+ */
+ if (pisr & AR5K_ISR_BCNMISC) {
if (sisr2 & AR5K_SISR2_TIM)
*interrupt_mask |= AR5K_INT_TIM;
if (sisr2 & AR5K_SISR2_DTIM)
@@ -591,63 +686,39 @@ int ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask)
*interrupt_mask |= AR5K_INT_CAB_TIMEOUT;
}
- if (data & AR5K_ISR_RXDOPPLER)
- *interrupt_mask |= AR5K_INT_RX_DOPPLER;
- if (data & AR5K_ISR_QCBRORN) {
+ /* Below interrupts are unlikely to happen */
+
+ /* HIU = Host Interface Unit (PCI etc)
+ * Can be one of MCABT, SSERR, DPERR from SISR2 */
+ if (unlikely(pisr & (AR5K_ISR_HIUERR)))
+ *interrupt_mask |= AR5K_INT_FATAL;
+
+ /*Beacon Not Ready*/
+ if (unlikely(pisr & (AR5K_ISR_BNR)))
+ *interrupt_mask |= AR5K_INT_BNR;
+
+ /* A queue got CBR overrun */
+ if (unlikely(pisr & (AR5K_ISR_QCBRORN))) {
*interrupt_mask |= AR5K_INT_QCBRORN;
- ah->ah_txq_isr |= AR5K_REG_MS(
- ath5k_hw_reg_read(ah, AR5K_RAC_SISR3),
- AR5K_SISR3_QCBRORN);
+ ah->ah_txq_isr_qcborn |= AR5K_REG_MS(sisr3,
+ AR5K_SISR3_QCBRORN);
}
- if (data & AR5K_ISR_QCBRURN) {
+
+ /* A queue got CBR underrun */
+ if (unlikely(pisr & (AR5K_ISR_QCBRURN))) {
*interrupt_mask |= AR5K_INT_QCBRURN;
- ah->ah_txq_isr |= AR5K_REG_MS(
- ath5k_hw_reg_read(ah, AR5K_RAC_SISR3),
- AR5K_SISR3_QCBRURN);
+ ah->ah_txq_isr_qcburn |= AR5K_REG_MS(sisr3,
+ AR5K_SISR3_QCBRURN);
}
- if (data & AR5K_ISR_QTRIG) {
+
+ /* A queue got triggered */
+ if (unlikely(pisr & (AR5K_ISR_QTRIG))) {
*interrupt_mask |= AR5K_INT_QTRIG;
- ah->ah_txq_isr |= AR5K_REG_MS(
- ath5k_hw_reg_read(ah, AR5K_RAC_SISR4),
- AR5K_SISR4_QTRIG);
+ ah->ah_txq_isr_qtrig |= AR5K_REG_MS(sisr4,
+ AR5K_SISR4_QTRIG);
}
- if (data & AR5K_ISR_TXOK)
- ah->ah_txq_isr |= AR5K_REG_MS(
- ath5k_hw_reg_read(ah, AR5K_RAC_SISR0),
- AR5K_SISR0_QCU_TXOK);
-
- if (data & AR5K_ISR_TXDESC)
- ah->ah_txq_isr |= AR5K_REG_MS(
- ath5k_hw_reg_read(ah, AR5K_RAC_SISR0),
- AR5K_SISR0_QCU_TXDESC);
-
- if (data & AR5K_ISR_TXERR)
- ah->ah_txq_isr |= AR5K_REG_MS(
- ath5k_hw_reg_read(ah, AR5K_RAC_SISR1),
- AR5K_SISR1_QCU_TXERR);
-
- if (data & AR5K_ISR_TXEOL)
- ah->ah_txq_isr |= AR5K_REG_MS(
- ath5k_hw_reg_read(ah, AR5K_RAC_SISR1),
- AR5K_SISR1_QCU_TXEOL);
-
- if (data & AR5K_ISR_TXURN)
- ah->ah_txq_isr |= AR5K_REG_MS(
- ath5k_hw_reg_read(ah, AR5K_RAC_SISR2),
- AR5K_SISR2_QCU_TXURN);
- } else {
- if (unlikely(data & (AR5K_ISR_SSERR | AR5K_ISR_MCABT
- | AR5K_ISR_HIUERR | AR5K_ISR_DPERR)))
- *interrupt_mask |= AR5K_INT_FATAL;
-
- /*
- * XXX: BMISS interrupts may occur after association.
- * I found this on 5210 code but it needs testing. If this is
- * true we should disable them before assoc and re-enable them
- * after a successful assoc + some jiffies.
- interrupt_mask &= ~AR5K_INT_BMISS;
- */
+ data = pisr;
}
/*
@@ -661,8 +732,7 @@ int ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask)
}
/**
- * ath5k_hw_set_imr - Set interrupt mask
- *
+ * ath5k_hw_set_imr() - Set interrupt mask
* @ah: The &struct ath5k_hw
* @new_mask: The new interrupt mask to be set
*
@@ -670,7 +740,8 @@ int ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask)
* ath5k_int bits to hw-specific bits to remove abstraction and writing
* Interrupt Mask Register.
*/
-enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask)
+enum ath5k_int
+ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask)
{
enum ath5k_int old_mask, int_mask;
@@ -697,16 +768,14 @@ enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask)
u32 simr2 = ath5k_hw_reg_read(ah, AR5K_SIMR2)
& AR5K_SIMR2_QCU_TXURN;
+ /* Fatal interrupt abstraction for 5211+ */
if (new_mask & AR5K_INT_FATAL) {
int_mask |= AR5K_IMR_HIUERR;
simr2 |= (AR5K_SIMR2_MCABT | AR5K_SIMR2_SSERR
| AR5K_SIMR2_DPERR);
}
- /*Beacon Not Ready*/
- if (new_mask & AR5K_INT_BNR)
- int_mask |= AR5K_INT_BNR;
-
+ /* Misc beacon related interrupts */
if (new_mask & AR5K_INT_TIM)
int_mask |= AR5K_IMR_TIM;
@@ -721,8 +790,9 @@ enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask)
if (new_mask & AR5K_INT_CAB_TIMEOUT)
simr2 |= AR5K_SISR2_CAB_TIMEOUT;
- if (new_mask & AR5K_INT_RX_DOPPLER)
- int_mask |= AR5K_IMR_RXDOPPLER;
+ /*Beacon Not Ready*/
+ if (new_mask & AR5K_INT_BNR)
+ int_mask |= AR5K_INT_BNR;
/* Note: Per queue interrupt masks
* are set via ath5k_hw_reset_tx_queue() (qcu.c) */
@@ -730,10 +800,12 @@ enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask)
ath5k_hw_reg_write(ah, simr2, AR5K_SIMR2);
} else {
+ /* Fatal interrupt abstraction for 5210 */
if (new_mask & AR5K_INT_FATAL)
int_mask |= (AR5K_IMR_SSERR | AR5K_IMR_MCABT
| AR5K_IMR_HIUERR | AR5K_IMR_DPERR);
+ /* Only common interrupts left for 5210 (no SIMRs) */
ath5k_hw_reg_write(ah, int_mask, AR5K_IMR);
}
@@ -760,8 +832,7 @@ enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask)
\********************/
/**
- * ath5k_hw_dma_init - Initialize DMA unit
- *
+ * ath5k_hw_dma_init() - Initialize DMA unit
* @ah: The &struct ath5k_hw
*
* Set DMA size and pre-enable interrupts
@@ -770,7 +841,8 @@ enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask)
*
* XXX: Save/restore RXDP/TXDP registers ?
*/
-void ath5k_hw_dma_init(struct ath5k_hw *ah)
+void
+ath5k_hw_dma_init(struct ath5k_hw *ah)
{
/*
* Set Rx/Tx DMA Configuration
@@ -799,8 +871,7 @@ void ath5k_hw_dma_init(struct ath5k_hw *ah)
}
/**
- * ath5k_hw_dma_stop - stop DMA unit
- *
+ * ath5k_hw_dma_stop() - stop DMA unit
* @ah: The &struct ath5k_hw
*
* Stop tx/rx DMA and interrupts. Returns
@@ -810,7 +881,8 @@ void ath5k_hw_dma_init(struct ath5k_hw *ah)
* stuck frames on tx queues, only a reset
* can fix that.
*/
-int ath5k_hw_dma_stop(struct ath5k_hw *ah)
+int
+ath5k_hw_dma_stop(struct ath5k_hw *ah)
{
int i, qmax, err;
err = 0;
diff --git a/drivers/net/wireless/ath/ath5k/gpio.c b/drivers/net/wireless/ath/ath5k/gpio.c
index 85929781191..73d3dd8a306 100644
--- a/drivers/net/wireless/ath/ath5k/gpio.c
+++ b/drivers/net/wireless/ath/ath5k/gpio.c
@@ -24,10 +24,33 @@
#include "reg.h"
#include "debug.h"
-/*
- * Set led state
+
+/**
+ * DOC: GPIO/LED functions
+ *
+ * Here we control the 6 bidirectional GPIO pins provided by the hw.
+ * We can set a GPIO pin to be an input or an output pin on GPIO control
+ * register and then read or set its status from GPIO data input/output
+ * registers.
+ *
+ * We also control the two LED pins provided by the hw, LED_0 is our
+ * "power" LED and LED_1 is our "network activity" LED but many scenarios
+ * are available from hw. Vendors might also provide LEDs connected to the
+ * GPIO pins, we handle them through the LED subsystem on led.c
+ */
+
+
+/**
+ * ath5k_hw_set_ledstate() - Set led state
+ * @ah: The &struct ath5k_hw
+ * @state: One of AR5K_LED_*
+ *
+ * Used to set the LED blinking state. This only
+ * works for the LED connected to the LED_0, LED_1 pins,
+ * not the GPIO based.
*/
-void ath5k_hw_set_ledstate(struct ath5k_hw *ah, unsigned int state)
+void
+ath5k_hw_set_ledstate(struct ath5k_hw *ah, unsigned int state)
{
u32 led;
/*5210 has different led mode handling*/
@@ -74,10 +97,13 @@ void ath5k_hw_set_ledstate(struct ath5k_hw *ah, unsigned int state)
AR5K_REG_ENABLE_BITS(ah, AR5K_PCICFG, led_5210);
}
-/*
- * Set GPIO inputs
+/**
+ * ath5k_hw_set_gpio_input() - Set GPIO inputs
+ * @ah: The &struct ath5k_hw
+ * @gpio: GPIO pin to set as input
*/
-int ath5k_hw_set_gpio_input(struct ath5k_hw *ah, u32 gpio)
+int
+ath5k_hw_set_gpio_input(struct ath5k_hw *ah, u32 gpio)
{
if (gpio >= AR5K_NUM_GPIO)
return -EINVAL;
@@ -89,10 +115,13 @@ int ath5k_hw_set_gpio_input(struct ath5k_hw *ah, u32 gpio)
return 0;
}
-/*
- * Set GPIO outputs
+/**
+ * ath5k_hw_set_gpio_output() - Set GPIO outputs
+ * @ah: The &struct ath5k_hw
+ * @gpio: The GPIO pin to set as output
*/
-int ath5k_hw_set_gpio_output(struct ath5k_hw *ah, u32 gpio)
+int
+ath5k_hw_set_gpio_output(struct ath5k_hw *ah, u32 gpio)
{
if (gpio >= AR5K_NUM_GPIO)
return -EINVAL;
@@ -104,10 +133,13 @@ int ath5k_hw_set_gpio_output(struct ath5k_hw *ah, u32 gpio)
return 0;
}
-/*
- * Get GPIO state
+/**
+ * ath5k_hw_get_gpio() - Get GPIO state
+ * @ah: The &struct ath5k_hw
+ * @gpio: The GPIO pin to read
*/
-u32 ath5k_hw_get_gpio(struct ath5k_hw *ah, u32 gpio)
+u32
+ath5k_hw_get_gpio(struct ath5k_hw *ah, u32 gpio)
{
if (gpio >= AR5K_NUM_GPIO)
return 0xffffffff;
@@ -117,10 +149,14 @@ u32 ath5k_hw_get_gpio(struct ath5k_hw *ah, u32 gpio)
0x1;
}
-/*
- * Set GPIO state
+/**
+ * ath5k_hw_set_gpio() - Set GPIO state
+ * @ah: The &struct ath5k_hw
+ * @gpio: The GPIO pin to set
+ * @val: Value to set (boolean)
*/
-int ath5k_hw_set_gpio(struct ath5k_hw *ah, u32 gpio, u32 val)
+int
+ath5k_hw_set_gpio(struct ath5k_hw *ah, u32 gpio, u32 val)
{
u32 data;
@@ -138,10 +174,19 @@ int ath5k_hw_set_gpio(struct ath5k_hw *ah, u32 gpio, u32 val)
return 0;
}
-/*
- * Initialize the GPIO interrupt (RFKill switch)
+/**
+ * ath5k_hw_set_gpio_intr() - Initialize the GPIO interrupt (RFKill switch)
+ * @ah: The &struct ath5k_hw
+ * @gpio: The GPIO pin to use
+ * @interrupt_level: True to generate interrupt on active pin (high)
+ *
+ * This function is used to set up the GPIO interrupt for the hw RFKill switch.
+ * That switch is connected to a GPIO pin and it's number is stored on EEPROM.
+ * It can either open or close the circuit to indicate that we should disable
+ * RF/Wireless to save power (we also get that from EEPROM).
*/
-void ath5k_hw_set_gpio_intr(struct ath5k_hw *ah, unsigned int gpio,
+void
+ath5k_hw_set_gpio_intr(struct ath5k_hw *ah, unsigned int gpio,
u32 interrupt_level)
{
u32 data;
diff --git a/drivers/net/wireless/ath/ath5k/initvals.c b/drivers/net/wireless/ath/ath5k/initvals.c
index 1ffecc0fd3e..a1ea78e05b4 100644
--- a/drivers/net/wireless/ath/ath5k/initvals.c
+++ b/drivers/net/wireless/ath/ath5k/initvals.c
@@ -23,24 +23,27 @@
#include "reg.h"
#include "debug.h"
-/*
- * Mode-independent initial register writes
+/**
+ * struct ath5k_ini - Mode-independent initial register writes
+ * @ini_register: Register address
+ * @ini_value: Default value
+ * @ini_mode: 0 to write 1 to read (and clear)
*/
-
struct ath5k_ini {
u16 ini_register;
u32 ini_value;
enum {
AR5K_INI_WRITE = 0, /* Default */
- AR5K_INI_READ = 1, /* Cleared on read */
+ AR5K_INI_READ = 1,
} ini_mode;
};
-/*
- * Mode specific initial register values
+/**
+ * struct ath5k_ini_mode - Mode specific initial register values
+ * @mode_register: Register address
+ * @mode_value: Set of values for each enum ath5k_driver_mode
*/
-
struct ath5k_ini_mode {
u16 mode_register;
u32 mode_value[3];
@@ -386,11 +389,10 @@ static const struct ath5k_ini ar5211_ini[] = {
/* Initial mode-specific settings for AR5211
* 5211 supports OFDM-only g (draft g) but we
- * need to test it !
- */
+ * need to test it ! */
static const struct ath5k_ini_mode ar5211_ini_mode[] = {
{ AR5K_TXCFG,
- /* A/XR B G */
+ /* A B G */
{ 0x00000015, 0x0000001d, 0x00000015 } },
{ AR5K_QUEUE_DFS_LOCAL_IFS(0),
{ 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
@@ -460,7 +462,7 @@ static const struct ath5k_ini_mode ar5211_ini_mode[] = {
{ 0x00000010, 0x00000010, 0x00000010 } },
};
-/* Initial register settings for AR5212 */
+/* Initial register settings for AR5212 and newer chips */
static const struct ath5k_ini ar5212_ini_common_start[] = {
{ AR5K_RXDP, 0x00000000 },
{ AR5K_RXCFG, 0x00000005 },
@@ -724,7 +726,8 @@ static const struct ath5k_ini_mode ar5212_ini_mode_start[] = {
{ 0x00000000, 0x00000000, 0x00000108 } },
};
-/* Initial mode-specific settings for AR5212 + RF5111 (Written after ar5212_ini) */
+/* Initial mode-specific settings for AR5212 + RF5111
+ * (Written after ar5212_ini) */
static const struct ath5k_ini_mode rf5111_ini_mode_end[] = {
{ AR5K_TXCFG,
/* A/XR B G */
@@ -757,6 +760,7 @@ static const struct ath5k_ini_mode rf5111_ini_mode_end[] = {
{ 0x1883800a, 0x1873800a, 0x1883800a } },
};
+/* Common for all modes */
static const struct ath5k_ini rf5111_ini_common_end[] = {
{ AR5K_DCU_FP, 0x00000000 },
{ AR5K_PHY_AGC, 0x00000000 },
@@ -774,7 +778,9 @@ static const struct ath5k_ini rf5111_ini_common_end[] = {
{ 0xa23c, 0x13c889af },
};
-/* Initial mode-specific settings for AR5212 + RF5112 (Written after ar5212_ini) */
+
+/* Initial mode-specific settings for AR5212 + RF5112
+ * (Written after ar5212_ini) */
static const struct ath5k_ini_mode rf5112_ini_mode_end[] = {
{ AR5K_TXCFG,
/* A/XR B G */
@@ -825,7 +831,9 @@ static const struct ath5k_ini rf5112_ini_common_end[] = {
{ 0xa23c, 0x13c889af },
};
-/* Initial mode-specific settings for RF5413/5414 (Written after ar5212_ini) */
+
+/* Initial mode-specific settings for RF5413/5414
+ * (Written after ar5212_ini) */
static const struct ath5k_ini_mode rf5413_ini_mode_end[] = {
{ AR5K_TXCFG,
/* A/XR B G */
@@ -963,7 +971,8 @@ static const struct ath5k_ini rf5413_ini_common_end[] = {
{ 0xa384, 0xf3307ff0 },
};
-/* Initial mode-specific settings for RF2413/2414 (Written after ar5212_ini) */
+/* Initial mode-specific settings for RF2413/2414
+ * (Written after ar5212_ini) */
/* XXX: a mode ? */
static const struct ath5k_ini_mode rf2413_ini_mode_end[] = {
{ AR5K_TXCFG,
@@ -1085,7 +1094,8 @@ static const struct ath5k_ini rf2413_ini_common_end[] = {
{ 0xa384, 0xf3307ff0 },
};
-/* Initial mode-specific settings for RF2425 (Written after ar5212_ini) */
+/* Initial mode-specific settings for RF2425
+ * (Written after ar5212_ini) */
/* XXX: a mode ? */
static const struct ath5k_ini_mode rf2425_ini_mode_end[] = {
{ AR5K_TXCFG,
@@ -1357,10 +1367,15 @@ static const struct ath5k_ini rf5112_ini_bbgain[] = {
};
-/*
- * Write initial register dump
+/**
+ * ath5k_hw_ini_registers() - Write initial register dump common for all modes
+ * @ah: The &struct ath5k_hw
+ * @size: Dump size
+ * @ini_regs: The array of &struct ath5k_ini
+ * @skip_pcu: Skip PCU registers
*/
-static void ath5k_hw_ini_registers(struct ath5k_hw *ah, unsigned int size,
+static void
+ath5k_hw_ini_registers(struct ath5k_hw *ah, unsigned int size,
const struct ath5k_ini *ini_regs, bool skip_pcu)
{
unsigned int i;
@@ -1388,7 +1403,15 @@ static void ath5k_hw_ini_registers(struct ath5k_hw *ah, unsigned int size,
}
}
-static void ath5k_hw_ini_mode_registers(struct ath5k_hw *ah,
+/**
+ * ath5k_hw_ini_mode_registers() - Write initial mode-specific register dump
+ * @ah: The &struct ath5k_hw
+ * @size: Dump size
+ * @ini_mode: The array of &struct ath5k_ini_mode
+ * @mode: One of enum ath5k_driver_mode
+ */
+static void
+ath5k_hw_ini_mode_registers(struct ath5k_hw *ah,
unsigned int size, const struct ath5k_ini_mode *ini_mode,
u8 mode)
{
@@ -1402,7 +1425,17 @@ static void ath5k_hw_ini_mode_registers(struct ath5k_hw *ah,
}
-int ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool skip_pcu)
+/**
+ * ath5k_hw_write_initvals() - Write initial chip-specific register dump
+ * @ah: The &struct ath5k_hw
+ * @mode: One of enum ath5k_driver_mode
+ * @skip_pcu: Skip PCU registers
+ *
+ * Write initial chip-specific register dump, to get the chipset on a
+ * clean and ready-to-work state after warm reset.
+ */
+int
+ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool skip_pcu)
{
/*
* Write initial register settings
diff --git a/drivers/net/wireless/ath/ath5k/pci.c b/drivers/net/wireless/ath/ath5k/pci.c
index c1dff2ced04..849fa060ebc 100644
--- a/drivers/net/wireless/ath/ath5k/pci.c
+++ b/drivers/net/wireless/ath/ath5k/pci.c
@@ -18,6 +18,7 @@
#include <linux/pci.h>
#include <linux/pci-aspm.h>
#include <linux/etherdevice.h>
+#include <linux/module.h>
#include "../ath.h"
#include "ath5k.h"
#include "debug.h"
@@ -97,7 +98,7 @@ ath5k_pci_eeprom_read(struct ath_common *common, u32 offset, u16 *data)
0xffff);
return true;
}
- udelay(15);
+ usleep_range(15, 20);
}
return false;
diff --git a/drivers/net/wireless/ath/ath5k/pcu.c b/drivers/net/wireless/ath/ath5k/pcu.c
index a7eafa3edc2..cebfd6fd31d 100644
--- a/drivers/net/wireless/ath/ath5k/pcu.c
+++ b/drivers/net/wireless/ath/ath5k/pcu.c
@@ -30,11 +30,47 @@
#include "reg.h"
#include "debug.h"
-/*
+/**
+ * DOC: Protocol Control Unit (PCU) functions
+ *
+ * Protocol control unit is responsible to maintain various protocol
+ * properties before a frame is send and after a frame is received to/from
+ * baseband. To be more specific, PCU handles:
+ *
+ * - Buffering of RX and TX frames (after QCU/DCUs)
+ *
+ * - Encrypting and decrypting (using the built-in engine)
+ *
+ * - Generating ACKs, RTS/CTS frames
+ *
+ * - Maintaining TSF
+ *
+ * - FCS
+ *
+ * - Updating beacon data (with TSF etc)
+ *
+ * - Generating virtual CCA
+ *
+ * - RX/Multicast filtering
+ *
+ * - BSSID filtering
+ *
+ * - Various statistics
+ *
+ * -Different operating modes: AP, STA, IBSS
+ *
+ * Note: Most of these functions can be tweaked/bypassed so you can do
+ * them on sw above for debugging or research. For more infos check out PCU
+ * registers on reg.h.
+ */
+
+/**
+ * DOC: ACK rates
+ *
* AR5212+ can use higher rates for ack transmission
* based on current tx rate instead of the base rate.
* It does this to better utilize channel usage.
- * This is a mapping between G rates (that cover both
+ * There is a mapping between G rates (that cover both
* CCK and OFDM) and ack rates that we use when setting
* rate -> duration table. This mapping is hw-based so
* don't change anything.
@@ -63,17 +99,18 @@ static const unsigned int ack_rates_high[] =
\*******************/
/**
- * ath5k_hw_get_frame_duration - Get tx time of a frame
- *
+ * ath5k_hw_get_frame_duration() - Get tx time of a frame
* @ah: The &struct ath5k_hw
* @len: Frame's length in bytes
* @rate: The @struct ieee80211_rate
+ * @shortpre: Indicate short preample
*
* Calculate tx duration of a frame given it's rate and length
* It extends ieee80211_generic_frame_duration for non standard
* bwmodes.
*/
-int ath5k_hw_get_frame_duration(struct ath5k_hw *ah,
+int
+ath5k_hw_get_frame_duration(struct ath5k_hw *ah,
int len, struct ieee80211_rate *rate, bool shortpre)
{
int sifs, preamble, plcp_bits, sym_time;
@@ -129,11 +166,11 @@ int ath5k_hw_get_frame_duration(struct ath5k_hw *ah,
}
/**
- * ath5k_hw_get_default_slottime - Get the default slot time for current mode
- *
+ * ath5k_hw_get_default_slottime() - Get the default slot time for current mode
* @ah: The &struct ath5k_hw
*/
-unsigned int ath5k_hw_get_default_slottime(struct ath5k_hw *ah)
+unsigned int
+ath5k_hw_get_default_slottime(struct ath5k_hw *ah)
{
struct ieee80211_channel *channel = ah->ah_current_channel;
unsigned int slot_time;
@@ -160,11 +197,11 @@ unsigned int ath5k_hw_get_default_slottime(struct ath5k_hw *ah)
}
/**
- * ath5k_hw_get_default_sifs - Get the default SIFS for current mode
- *
+ * ath5k_hw_get_default_sifs() - Get the default SIFS for current mode
* @ah: The &struct ath5k_hw
*/
-unsigned int ath5k_hw_get_default_sifs(struct ath5k_hw *ah)
+unsigned int
+ath5k_hw_get_default_sifs(struct ath5k_hw *ah)
{
struct ieee80211_channel *channel = ah->ah_current_channel;
unsigned int sifs;
@@ -191,17 +228,17 @@ unsigned int ath5k_hw_get_default_sifs(struct ath5k_hw *ah)
}
/**
- * ath5k_hw_update_mib_counters - Update MIB counters (mac layer statistics)
- *
+ * ath5k_hw_update_mib_counters() - Update MIB counters (mac layer statistics)
* @ah: The &struct ath5k_hw
*
* Reads MIB counters from PCU and updates sw statistics. Is called after a
* MIB interrupt, because one of these counters might have reached their maximum
* and triggered the MIB interrupt, to let us read and clear the counter.
*
- * Is called in interrupt context!
+ * NOTE: Is called in interrupt context!
*/
-void ath5k_hw_update_mib_counters(struct ath5k_hw *ah)
+void
+ath5k_hw_update_mib_counters(struct ath5k_hw *ah)
{
struct ath5k_statistics *stats = &ah->stats;
@@ -219,10 +256,8 @@ void ath5k_hw_update_mib_counters(struct ath5k_hw *ah)
\******************/
/**
- * ath5k_hw_write_rate_duration - fill rate code to duration table
- *
- * @ah: the &struct ath5k_hw
- * @mode: one of enum ath5k_driver_mode
+ * ath5k_hw_write_rate_duration() - Fill rate code to duration table
+ * @ah: The &struct ath5k_hw
*
* Write the rate code to duration table upon hw reset. This is a helper for
* ath5k_hw_pcu_init(). It seems all this is doing is setting an ACK timeout on
@@ -236,7 +271,8 @@ void ath5k_hw_update_mib_counters(struct ath5k_hw *ah)
* that include all OFDM and CCK rates.
*
*/
-static inline void ath5k_hw_write_rate_duration(struct ath5k_hw *ah)
+static inline void
+ath5k_hw_write_rate_duration(struct ath5k_hw *ah)
{
struct ieee80211_rate *rate;
unsigned int i;
@@ -280,12 +316,12 @@ static inline void ath5k_hw_write_rate_duration(struct ath5k_hw *ah)
}
/**
- * ath5k_hw_set_ack_timeout - Set ACK timeout on PCU
- *
+ * ath5k_hw_set_ack_timeout() - Set ACK timeout on PCU
* @ah: The &struct ath5k_hw
* @timeout: Timeout in usec
*/
-static int ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout)
+static int
+ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout)
{
if (ath5k_hw_clocktoh(ah, AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_ACK))
<= timeout)
@@ -298,12 +334,12 @@ static int ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout)
}
/**
- * ath5k_hw_set_cts_timeout - Set CTS timeout on PCU
- *
+ * ath5k_hw_set_cts_timeout() - Set CTS timeout on PCU
* @ah: The &struct ath5k_hw
* @timeout: Timeout in usec
*/
-static int ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout)
+static int
+ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout)
{
if (ath5k_hw_clocktoh(ah, AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_CTS))
<= timeout)
@@ -321,14 +357,14 @@ static int ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout)
\*******************/
/**
- * ath5k_hw_set_lladdr - Set station id
- *
+ * ath5k_hw_set_lladdr() - Set station id
* @ah: The &struct ath5k_hw
- * @mac: The card's mac address
+ * @mac: The card's mac address (array of octets)
*
* Set station id on hw using the provided mac address
*/
-int ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac)
+int
+ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac)
{
struct ath_common *common = ath5k_hw_common(ah);
u32 low_id, high_id;
@@ -349,14 +385,14 @@ int ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac)
}
/**
- * ath5k_hw_set_bssid - Set current BSSID on hw
- *
+ * ath5k_hw_set_bssid() - Set current BSSID on hw
* @ah: The &struct ath5k_hw
*
* Sets the current BSSID and BSSID mask we have from the
* common struct into the hardware
*/
-void ath5k_hw_set_bssid(struct ath5k_hw *ah)
+void
+ath5k_hw_set_bssid(struct ath5k_hw *ah)
{
struct ath_common *common = ath5k_hw_common(ah);
u16 tim_offset = 0;
@@ -389,7 +425,23 @@ void ath5k_hw_set_bssid(struct ath5k_hw *ah)
ath5k_hw_enable_pspoll(ah, NULL, 0);
}
-void ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask)
+/**
+ * ath5k_hw_set_bssid_mask() - Filter out bssids we listen
+ * @ah: The &struct ath5k_hw
+ * @mask: The BSSID mask to set (array of octets)
+ *
+ * BSSID masking is a method used by AR5212 and newer hardware to inform PCU
+ * which bits of the interface's MAC address should be looked at when trying
+ * to decide which packets to ACK. In station mode and AP mode with a single
+ * BSS every bit matters since we lock to only one BSS. In AP mode with
+ * multiple BSSes (virtual interfaces) not every bit matters because hw must
+ * accept frames for all BSSes and so we tweak some bits of our mac address
+ * in order to have multiple BSSes.
+ *
+ * For more information check out ../hw.c of the common ath module.
+ */
+void
+ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask)
{
struct ath_common *common = ath5k_hw_common(ah);
@@ -400,18 +452,21 @@ void ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask)
ath_hw_setbssidmask(common);
}
-/*
- * Set multicast filter
+/**
+ * ath5k_hw_set_mcast_filter() - Set multicast filter
+ * @ah: The &struct ath5k_hw
+ * @filter0: Lower 32bits of muticast filter
+ * @filter1: Higher 16bits of multicast filter
*/
-void ath5k_hw_set_mcast_filter(struct ath5k_hw *ah, u32 filter0, u32 filter1)
+void
+ath5k_hw_set_mcast_filter(struct ath5k_hw *ah, u32 filter0, u32 filter1)
{
ath5k_hw_reg_write(ah, filter0, AR5K_MCAST_FILTER0);
ath5k_hw_reg_write(ah, filter1, AR5K_MCAST_FILTER1);
}
/**
- * ath5k_hw_get_rx_filter - Get current rx filter
- *
+ * ath5k_hw_get_rx_filter() - Get current rx filter
* @ah: The &struct ath5k_hw
*
* Returns the RX filter by reading rx filter and
@@ -420,7 +475,8 @@ void ath5k_hw_set_mcast_filter(struct ath5k_hw *ah, u32 filter0, u32 filter1)
* and pass to the driver. For a list of frame types
* check out reg.h.
*/
-u32 ath5k_hw_get_rx_filter(struct ath5k_hw *ah)
+u32
+ath5k_hw_get_rx_filter(struct ath5k_hw *ah)
{
u32 data, filter = 0;
@@ -440,8 +496,7 @@ u32 ath5k_hw_get_rx_filter(struct ath5k_hw *ah)
}
/**
- * ath5k_hw_set_rx_filter - Set rx filter
- *
+ * ath5k_hw_set_rx_filter() - Set rx filter
* @ah: The &struct ath5k_hw
* @filter: RX filter mask (see reg.h)
*
@@ -449,7 +504,8 @@ u32 ath5k_hw_get_rx_filter(struct ath5k_hw *ah)
* register on 5212 and newer chips so that we have proper PHY
* error reporting.
*/
-void ath5k_hw_set_rx_filter(struct ath5k_hw *ah, u32 filter)
+void
+ath5k_hw_set_rx_filter(struct ath5k_hw *ah, u32 filter)
{
u32 data = 0;
@@ -493,13 +549,13 @@ void ath5k_hw_set_rx_filter(struct ath5k_hw *ah, u32 filter)
#define ATH5K_MAX_TSF_READ 10
/**
- * ath5k_hw_get_tsf64 - Get the full 64bit TSF
- *
+ * ath5k_hw_get_tsf64() - Get the full 64bit TSF
* @ah: The &struct ath5k_hw
*
* Returns the current TSF
*/
-u64 ath5k_hw_get_tsf64(struct ath5k_hw *ah)
+u64
+ath5k_hw_get_tsf64(struct ath5k_hw *ah)
{
u32 tsf_lower, tsf_upper1, tsf_upper2;
int i;
@@ -536,28 +592,30 @@ u64 ath5k_hw_get_tsf64(struct ath5k_hw *ah)
return ((u64)tsf_upper1 << 32) | tsf_lower;
}
+#undef ATH5K_MAX_TSF_READ
+
/**
- * ath5k_hw_set_tsf64 - Set a new 64bit TSF
- *
+ * ath5k_hw_set_tsf64() - Set a new 64bit TSF
* @ah: The &struct ath5k_hw
* @tsf64: The new 64bit TSF
*
* Sets the new TSF
*/
-void ath5k_hw_set_tsf64(struct ath5k_hw *ah, u64 tsf64)
+void
+ath5k_hw_set_tsf64(struct ath5k_hw *ah, u64 tsf64)
{
ath5k_hw_reg_write(ah, tsf64 & 0xffffffff, AR5K_TSF_L32);
ath5k_hw_reg_write(ah, (tsf64 >> 32) & 0xffffffff, AR5K_TSF_U32);
}
/**
- * ath5k_hw_reset_tsf - Force a TSF reset
- *
+ * ath5k_hw_reset_tsf() - Force a TSF reset
* @ah: The &struct ath5k_hw
*
* Forces a TSF reset on PCU
*/
-void ath5k_hw_reset_tsf(struct ath5k_hw *ah)
+void
+ath5k_hw_reset_tsf(struct ath5k_hw *ah)
{
u32 val;
@@ -573,10 +631,17 @@ void ath5k_hw_reset_tsf(struct ath5k_hw *ah)
ath5k_hw_reg_write(ah, val, AR5K_BEACON);
}
-/*
- * Initialize beacon timers
+/**
+ * ath5k_hw_init_beacon_timers() - Initialize beacon timers
+ * @ah: The &struct ath5k_hw
+ * @next_beacon: Next TBTT
+ * @interval: Current beacon interval
+ *
+ * This function is used to initialize beacon timers based on current
+ * operation mode and settings.
*/
-void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval)
+void
+ath5k_hw_init_beacon_timers(struct ath5k_hw *ah, u32 next_beacon, u32 interval)
{
u32 timer1, timer2, timer3;
@@ -655,8 +720,7 @@ void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval)
}
/**
- * ath5k_check_timer_win - Check if timer B is timer A + window
- *
+ * ath5k_check_timer_win() - Check if timer B is timer A + window
* @a: timer a (before b)
* @b: timer b (after a)
* @window: difference between a and b
@@ -686,12 +750,11 @@ ath5k_check_timer_win(int a, int b, int window, int intval)
}
/**
- * ath5k_hw_check_beacon_timers - Check if the beacon timers are correct
- *
+ * ath5k_hw_check_beacon_timers() - Check if the beacon timers are correct
* @ah: The &struct ath5k_hw
* @intval: beacon interval
*
- * This is a workaround for IBSS mode:
+ * This is a workaround for IBSS mode
*
* The need for this function arises from the fact that we have 4 separate
* HW timer registers (TIMER0 - TIMER3), which are closely related to the
@@ -746,14 +809,14 @@ ath5k_hw_check_beacon_timers(struct ath5k_hw *ah, int intval)
}
/**
- * ath5k_hw_set_coverage_class - Set IEEE 802.11 coverage class
- *
+ * ath5k_hw_set_coverage_class() - Set IEEE 802.11 coverage class
* @ah: The &struct ath5k_hw
* @coverage_class: IEEE 802.11 coverage class number
*
* Sets IFS intervals and ACK/CTS timeouts for given coverage class.
*/
-void ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class)
+void
+ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class)
{
/* As defined by IEEE 802.11-2007 17.3.8.6 */
int slot_time = ath5k_hw_get_default_slottime(ah) + 3 * coverage_class;
@@ -772,8 +835,7 @@ void ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class)
\***************************/
/**
- * ath5k_hw_start_rx_pcu - Start RX engine
- *
+ * ath5k_hw_start_rx_pcu() - Start RX engine
* @ah: The &struct ath5k_hw
*
* Starts RX engine on PCU so that hw can process RXed frames
@@ -781,32 +843,33 @@ void ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class)
*
* NOTE: RX DMA should be already enabled using ath5k_hw_start_rx_dma
*/
-void ath5k_hw_start_rx_pcu(struct ath5k_hw *ah)
+void
+ath5k_hw_start_rx_pcu(struct ath5k_hw *ah)
{
AR5K_REG_DISABLE_BITS(ah, AR5K_DIAG_SW, AR5K_DIAG_SW_DIS_RX);
}
/**
- * at5k_hw_stop_rx_pcu - Stop RX engine
- *
+ * at5k_hw_stop_rx_pcu() - Stop RX engine
* @ah: The &struct ath5k_hw
*
* Stops RX engine on PCU
*/
-void ath5k_hw_stop_rx_pcu(struct ath5k_hw *ah)
+void
+ath5k_hw_stop_rx_pcu(struct ath5k_hw *ah)
{
AR5K_REG_ENABLE_BITS(ah, AR5K_DIAG_SW, AR5K_DIAG_SW_DIS_RX);
}
/**
- * ath5k_hw_set_opmode - Set PCU operating mode
- *
+ * ath5k_hw_set_opmode() - Set PCU operating mode
* @ah: The &struct ath5k_hw
- * @op_mode: &enum nl80211_iftype operating mode
+ * @op_mode: One of enum nl80211_iftype
*
* Configure PCU for the various operating modes (AP/STA etc)
*/
-int ath5k_hw_set_opmode(struct ath5k_hw *ah, enum nl80211_iftype op_mode)
+int
+ath5k_hw_set_opmode(struct ath5k_hw *ah, enum nl80211_iftype op_mode)
{
struct ath_common *common = ath5k_hw_common(ah);
u32 pcu_reg, beacon_reg, low_id, high_id;
@@ -873,8 +936,17 @@ int ath5k_hw_set_opmode(struct ath5k_hw *ah, enum nl80211_iftype op_mode)
return 0;
}
-void ath5k_hw_pcu_init(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
- u8 mode)
+/**
+ * ath5k_hw_pcu_init() - Initialize PCU
+ * @ah: The &struct ath5k_hw
+ * @op_mode: One of enum nl80211_iftype
+ * @mode: One of enum ath5k_driver_mode
+ *
+ * This function is used to initialize PCU by setting current
+ * operation mode and various other settings.
+ */
+void
+ath5k_hw_pcu_init(struct ath5k_hw *ah, enum nl80211_iftype op_mode)
{
/* Set bssid and bssid mask */
ath5k_hw_set_bssid(ah);
diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c
index 01cb72de44c..e1f8613426a 100644
--- a/drivers/net/wireless/ath/ath5k/phy.c
+++ b/drivers/net/wireless/ath/ath5k/phy.c
@@ -1,6 +1,4 @@
/*
- * PHY functions
- *
* Copyright (c) 2004-2007 Reyk Floeter <reyk@openbsd.org>
* Copyright (c) 2006-2009 Nick Kossifidis <mickflemm@gmail.com>
* Copyright (c) 2007-2008 Jiri Slaby <jirislaby@gmail.com>
@@ -20,6 +18,10 @@
*
*/
+/***********************\
+* PHY related functions *
+\***********************/
+
#include <linux/delay.h>
#include <linux/slab.h>
#include <asm/unaligned.h>
@@ -31,14 +33,53 @@
#include "../regd.h"
+/**
+ * DOC: PHY related functions
+ *
+ * Here we handle the low-level functions related to baseband
+ * and analog frontend (RF) parts. This is by far the most complex
+ * part of the hw code so make sure you know what you are doing.
+ *
+ * Here is a list of what this is all about:
+ *
+ * - Channel setting/switching
+ *
+ * - Automatic Gain Control (AGC) calibration
+ *
+ * - Noise Floor calibration
+ *
+ * - I/Q imbalance calibration (QAM correction)
+ *
+ * - Calibration due to thermal changes (gain_F)
+ *
+ * - Spur noise mitigation
+ *
+ * - RF/PHY initialization for the various operating modes and bwmodes
+ *
+ * - Antenna control
+ *
+ * - TX power control per channel/rate/packet type
+ *
+ * Also have in mind we never got documentation for most of these
+ * functions, what we have comes mostly from Atheros's code, reverse
+ * engineering and patent docs/presentations etc.
+ */
+
+
/******************\
* Helper functions *
\******************/
-/*
- * Get the PHY Chip revision
+/**
+ * ath5k_hw_radio_revision() - Get the PHY Chip revision
+ * @ah: The &struct ath5k_hw
+ * @band: One of enum ieee80211_band
+ *
+ * Returns the revision number of a 2GHz, 5GHz or single chip
+ * radio.
*/
-u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, enum ieee80211_band band)
+u16
+ath5k_hw_radio_revision(struct ath5k_hw *ah, enum ieee80211_band band)
{
unsigned int i;
u32 srev;
@@ -58,7 +99,7 @@ u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, enum ieee80211_band band)
return 0;
}
- mdelay(2);
+ usleep_range(2000, 2500);
/* ...wait until PHY is ready and read the selected radio revision */
ath5k_hw_reg_write(ah, 0x00001c16, AR5K_PHY(0x34));
@@ -81,10 +122,16 @@ u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, enum ieee80211_band band)
return ret;
}
-/*
- * Check if a channel is supported
+/**
+ * ath5k_channel_ok() - Check if a channel is supported by the hw
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ *
+ * Note: We don't do any regulatory domain checks here, it's just
+ * a sanity check.
*/
-bool ath5k_channel_ok(struct ath5k_hw *ah, struct ieee80211_channel *channel)
+bool
+ath5k_channel_ok(struct ath5k_hw *ah, struct ieee80211_channel *channel)
{
u16 freq = channel->center_freq;
@@ -101,7 +148,13 @@ bool ath5k_channel_ok(struct ath5k_hw *ah, struct ieee80211_channel *channel)
return false;
}
-bool ath5k_hw_chan_has_spur_noise(struct ath5k_hw *ah,
+/**
+ * ath5k_hw_chan_has_spur_noise() - Check if channel is sensitive to spur noise
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ */
+bool
+ath5k_hw_chan_has_spur_noise(struct ath5k_hw *ah,
struct ieee80211_channel *channel)
{
u8 refclk_freq;
@@ -122,11 +175,20 @@ bool ath5k_hw_chan_has_spur_noise(struct ath5k_hw *ah,
return false;
}
-/*
- * Used to modify RF Banks before writing them to AR5K_RF_BUFFER
+/**
+ * ath5k_hw_rfb_op() - Perform an operation on the given RF Buffer
+ * @ah: The &struct ath5k_hw
+ * @rf_regs: The struct ath5k_rf_reg
+ * @val: New value
+ * @reg_id: RF register ID
+ * @set: Indicate we need to swap data
+ *
+ * This is an internal function used to modify RF Banks before
+ * writing them to AR5K_RF_BUFFER. Check out rfbuffer.h for more
+ * infos.
*/
-static unsigned int ath5k_hw_rfb_op(struct ath5k_hw *ah,
- const struct ath5k_rf_reg *rf_regs,
+static unsigned int
+ath5k_hw_rfb_op(struct ath5k_hw *ah, const struct ath5k_rf_reg *rf_regs,
u32 val, u8 reg_id, bool set)
{
const struct ath5k_rf_reg *rfreg = NULL;
@@ -204,8 +266,7 @@ static unsigned int ath5k_hw_rfb_op(struct ath5k_hw *ah,
}
/**
- * ath5k_hw_write_ofdm_timings - set OFDM timings on AR5212
- *
+ * ath5k_hw_write_ofdm_timings() - set OFDM timings on AR5212
* @ah: the &struct ath5k_hw
* @channel: the currently set channel upon reset
*
@@ -216,10 +277,11 @@ static unsigned int ath5k_hw_rfb_op(struct ath5k_hw *ah,
* mantissa and provide these values on hw.
*
* For more infos i think this patent is related
- * http://www.freepatentsonline.com/7184495.html
+ * "http://www.freepatentsonline.com/7184495.html"
*/
-static inline int ath5k_hw_write_ofdm_timings(struct ath5k_hw *ah,
- struct ieee80211_channel *channel)
+static inline int
+ath5k_hw_write_ofdm_timings(struct ath5k_hw *ah,
+ struct ieee80211_channel *channel)
{
/* Get exponent and mantissa and set it */
u32 coef_scaled, coef_exp, coef_man,
@@ -278,6 +340,10 @@ static inline int ath5k_hw_write_ofdm_timings(struct ath5k_hw *ah,
return 0;
}
+/**
+ * ath5k_hw_phy_disable() - Disable PHY
+ * @ah: The &struct ath5k_hw
+ */
int ath5k_hw_phy_disable(struct ath5k_hw *ah)
{
/*Just a try M.F.*/
@@ -286,10 +352,13 @@ int ath5k_hw_phy_disable(struct ath5k_hw *ah)
return 0;
}
-/*
- * Wait for synth to settle
+/**
+ * ath5k_hw_wait_for_synth() - Wait for synth to settle
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
*/
-static void ath5k_hw_wait_for_synth(struct ath5k_hw *ah,
+static void
+ath5k_hw_wait_for_synth(struct ath5k_hw *ah,
struct ieee80211_channel *channel)
{
/*
@@ -308,9 +377,9 @@ static void ath5k_hw_wait_for_synth(struct ath5k_hw *ah,
delay = delay << 2;
/* XXX: /2 on turbo ? Let's be safe
* for now */
- udelay(100 + delay);
+ usleep_range(100 + delay, 100 + (2 * delay));
} else {
- mdelay(1);
+ usleep_range(1000, 1500);
}
}
@@ -319,7 +388,9 @@ static void ath5k_hw_wait_for_synth(struct ath5k_hw *ah,
* RF Gain optimization *
\**********************/
-/*
+/**
+ * DOC: RF Gain optimization
+ *
* This code is used to optimize RF gain on different environments
* (temperature mostly) based on feedback from a power detector.
*
@@ -328,22 +399,22 @@ static void ath5k_hw_wait_for_synth(struct ath5k_hw *ah,
* no gain optimization ladder-.
*
* For more infos check out this patent doc
- * http://www.freepatentsonline.com/7400691.html
+ * "http://www.freepatentsonline.com/7400691.html"
*
* This paper describes power drops as seen on the receiver due to
* probe packets
- * http://www.cnri.dit.ie/publications/ICT08%20-%20Practical%20Issues
- * %20of%20Power%20Control.pdf
+ * "http://www.cnri.dit.ie/publications/ICT08%20-%20Practical%20Issues
+ * %20of%20Power%20Control.pdf"
*
* And this is the MadWiFi bug entry related to the above
- * http://madwifi-project.org/ticket/1659
+ * "http://madwifi-project.org/ticket/1659"
* with various measurements and diagrams
- *
- * TODO: Deal with power drops due to probes by setting an appropriate
- * tx power on the probe packets ! Make this part of the calibration process.
*/
-/* Initialize ah_gain during attach */
+/**
+ * ath5k_hw_rfgain_opt_init() - Initialize ah_gain during attach
+ * @ah: The &struct ath5k_hw
+ */
int ath5k_hw_rfgain_opt_init(struct ath5k_hw *ah)
{
/* Initialize the gain optimization values */
@@ -367,17 +438,21 @@ int ath5k_hw_rfgain_opt_init(struct ath5k_hw *ah)
return 0;
}
-/* Schedule a gain probe check on the next transmitted packet.
+/**
+ * ath5k_hw_request_rfgain_probe() - Request a PAPD probe packet
+ * @ah: The &struct ath5k_hw
+ *
+ * Schedules a gain probe check on the next transmitted packet.
* That means our next packet is going to be sent with lower
* tx power and a Peak to Average Power Detector (PAPD) will try
* to measure the gain.
*
- * XXX: How about forcing a tx packet (bypassing PCU arbitrator etc)
+ * TODO: Force a tx packet (bypassing PCU arbitrator etc)
* just after we enable the probe so that we don't mess with
- * standard traffic ? Maybe it's time to use sw interrupts and
- * a probe tasklet !!!
+ * standard traffic.
*/
-static void ath5k_hw_request_rfgain_probe(struct ath5k_hw *ah)
+static void
+ath5k_hw_request_rfgain_probe(struct ath5k_hw *ah)
{
/* Skip if gain calibration is inactive or
@@ -395,9 +470,15 @@ static void ath5k_hw_request_rfgain_probe(struct ath5k_hw *ah)
}
-/* Calculate gain_F measurement correction
- * based on the current step for RF5112 rev. 2 */
-static u32 ath5k_hw_rf_gainf_corr(struct ath5k_hw *ah)
+/**
+ * ath5k_hw_rf_gainf_corr() - Calculate Gain_F measurement correction
+ * @ah: The &struct ath5k_hw
+ *
+ * Calculate Gain_F measurement correction
+ * based on the current step for RF5112 rev. 2
+ */
+static u32
+ath5k_hw_rf_gainf_corr(struct ath5k_hw *ah)
{
u32 mix, step;
u32 *rf;
@@ -450,11 +531,19 @@ static u32 ath5k_hw_rf_gainf_corr(struct ath5k_hw *ah)
return ah->ah_gain.g_f_corr;
}
-/* Check if current gain_F measurement is in the range of our
+/**
+ * ath5k_hw_rf_check_gainf_readback() - Validate Gain_F feedback from detector
+ * @ah: The &struct ath5k_hw
+ *
+ * Check if current gain_F measurement is in the range of our
* power detector windows. If we get a measurement outside range
* we know it's not accurate (detectors can't measure anything outside
- * their detection window) so we must ignore it */
-static bool ath5k_hw_rf_check_gainf_readback(struct ath5k_hw *ah)
+ * their detection window) so we must ignore it.
+ *
+ * Returns true if readback was O.K. or false on failure
+ */
+static bool
+ath5k_hw_rf_check_gainf_readback(struct ath5k_hw *ah)
{
const struct ath5k_rf_reg *rf_regs;
u32 step, mix_ovr, level[4];
@@ -506,9 +595,15 @@ static bool ath5k_hw_rf_check_gainf_readback(struct ath5k_hw *ah)
ah->ah_gain.g_current <= level[3]);
}
-/* Perform gain_F adjustment by choosing the right set
- * of parameters from RF gain optimization ladder */
-static s8 ath5k_hw_rf_gainf_adjust(struct ath5k_hw *ah)
+/**
+ * ath5k_hw_rf_gainf_adjust() - Perform Gain_F adjustment
+ * @ah: The &struct ath5k_hw
+ *
+ * Choose the right target gain based on current gain
+ * and RF gain optimization ladder
+ */
+static s8
+ath5k_hw_rf_gainf_adjust(struct ath5k_hw *ah)
{
const struct ath5k_gain_opt *go;
const struct ath5k_gain_opt_step *g_step;
@@ -572,13 +667,18 @@ done:
return ret;
}
-/* Main callback for thermal RF gain calibration engine
+/**
+ * ath5k_hw_gainf_calibrate() - Do a gain_F calibration
+ * @ah: The &struct ath5k_hw
+ *
+ * Main callback for thermal RF gain calibration engine
* Check for a new gain reading and schedule an adjustment
* if needed.
*
- * TODO: Use sw interrupt to schedule reset if gain_F needs
- * adjustment */
-enum ath5k_rfgain ath5k_hw_gainf_calibrate(struct ath5k_hw *ah)
+ * Returns one of enum ath5k_rfgain codes
+ */
+enum ath5k_rfgain
+ath5k_hw_gainf_calibrate(struct ath5k_hw *ah)
{
u32 data, type;
struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
@@ -638,10 +738,18 @@ done:
return ah->ah_gain.g_state;
}
-/* Write initial RF gain table to set the RF sensitivity
- * this one works on all RF chips and has nothing to do
- * with gain_F calibration */
-static int ath5k_hw_rfgain_init(struct ath5k_hw *ah, enum ieee80211_band band)
+/**
+ * ath5k_hw_rfgain_init() - Write initial RF gain settings to hw
+ * @ah: The &struct ath5k_hw
+ * @band: One of enum ieee80211_band
+ *
+ * Write initial RF gain table to set the RF sensitivity.
+ *
+ * NOTE: This one works on all RF chips and has nothing to do
+ * with Gain_F calibration
+ */
+static int
+ath5k_hw_rfgain_init(struct ath5k_hw *ah, enum ieee80211_band band)
{
const struct ath5k_ini_rfgain *ath5k_rfg;
unsigned int i, size, index;
@@ -688,16 +796,23 @@ static int ath5k_hw_rfgain_init(struct ath5k_hw *ah, enum ieee80211_band band)
}
-
/********************\
* RF Registers setup *
\********************/
-/*
- * Setup RF registers by writing RF buffer on hw
+/**
+ * ath5k_hw_rfregs_init() - Initialize RF register settings
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ * @mode: One of enum ath5k_driver_mode
+ *
+ * Setup RF registers by writing RF buffer on hw. For
+ * more infos on this, check out rfbuffer.h
*/
-static int ath5k_hw_rfregs_init(struct ath5k_hw *ah,
- struct ieee80211_channel *channel, unsigned int mode)
+static int
+ath5k_hw_rfregs_init(struct ath5k_hw *ah,
+ struct ieee80211_channel *channel,
+ unsigned int mode)
{
const struct ath5k_rf_reg *rf_regs;
const struct ath5k_ini_rfbuffer *ini_rfb;
@@ -1055,19 +1170,18 @@ static int ath5k_hw_rfregs_init(struct ath5k_hw *ah,
PHY/RF channel functions
\**************************/
-/*
- * Conversion needed for RF5110
+/**
+ * ath5k_hw_rf5110_chan2athchan() - Convert channel freq on RF5110
+ * @channel: The &struct ieee80211_channel
+ *
+ * Map channel frequency to IEEE channel number and convert it
+ * to an internal channel value used by the RF5110 chipset.
*/
-static u32 ath5k_hw_rf5110_chan2athchan(struct ieee80211_channel *channel)
+static u32
+ath5k_hw_rf5110_chan2athchan(struct ieee80211_channel *channel)
{
u32 athchan;
- /*
- * Convert IEEE channel/MHz to an internal channel value used
- * by the AR5210 chipset. This has not been verified with
- * newer chipsets like the AR5212A who have a completely
- * different RF/PHY part.
- */
athchan = (ath5k_hw_bitswap(
(ieee80211_frequency_to_channel(
channel->center_freq) - 24) / 2, 5)
@@ -1075,10 +1189,13 @@ static u32 ath5k_hw_rf5110_chan2athchan(struct ieee80211_channel *channel)
return athchan;
}
-/*
- * Set channel on RF5110
+/**
+ * ath5k_hw_rf5110_channel() - Set channel frequency on RF5110
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
*/
-static int ath5k_hw_rf5110_channel(struct ath5k_hw *ah,
+static int
+ath5k_hw_rf5110_channel(struct ath5k_hw *ah,
struct ieee80211_channel *channel)
{
u32 data;
@@ -1089,15 +1206,23 @@ static int ath5k_hw_rf5110_channel(struct ath5k_hw *ah,
data = ath5k_hw_rf5110_chan2athchan(channel);
ath5k_hw_reg_write(ah, data, AR5K_RF_BUFFER);
ath5k_hw_reg_write(ah, 0, AR5K_RF_BUFFER_CONTROL_0);
- mdelay(1);
+ usleep_range(1000, 1500);
return 0;
}
-/*
- * Conversion needed for 5111
+/**
+ * ath5k_hw_rf5111_chan2athchan() - Handle 2GHz channels on RF5111/2111
+ * @ieee: IEEE channel number
+ * @athchan: The &struct ath5k_athchan_2ghz
+ *
+ * In order to enable the RF2111 frequency converter on RF5111/2111 setups
+ * we need to add some offsets and extra flags to the data values we pass
+ * on to the PHY. So for every 2GHz channel this function gets called
+ * to do the conversion.
*/
-static int ath5k_hw_rf5111_chan2athchan(unsigned int ieee,
+static int
+ath5k_hw_rf5111_chan2athchan(unsigned int ieee,
struct ath5k_athchan_2ghz *athchan)
{
int channel;
@@ -1123,10 +1248,13 @@ static int ath5k_hw_rf5111_chan2athchan(unsigned int ieee,
return 0;
}
-/*
- * Set channel on 5111
+/**
+ * ath5k_hw_rf5111_channel() - Set channel frequency on RF5111/2111
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
*/
-static int ath5k_hw_rf5111_channel(struct ath5k_hw *ah,
+static int
+ath5k_hw_rf5111_channel(struct ath5k_hw *ah,
struct ieee80211_channel *channel)
{
struct ath5k_athchan_2ghz ath5k_channel_2ghz;
@@ -1171,10 +1299,20 @@ static int ath5k_hw_rf5111_channel(struct ath5k_hw *ah,
return 0;
}
-/*
- * Set channel on 5112 and newer
+/**
+ * ath5k_hw_rf5112_channel() - Set channel frequency on 5112 and newer
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ *
+ * On RF5112/2112 and newer we don't need to do any conversion.
+ * We pass the frequency value after a few modifications to the
+ * chip directly.
+ *
+ * NOTE: Make sure channel frequency given is within our range or else
+ * we might damage the chip ! Use ath5k_channel_ok before calling this one.
*/
-static int ath5k_hw_rf5112_channel(struct ath5k_hw *ah,
+static int
+ath5k_hw_rf5112_channel(struct ath5k_hw *ah,
struct ieee80211_channel *channel)
{
u32 data, data0, data1, data2;
@@ -1183,17 +1321,37 @@ static int ath5k_hw_rf5112_channel(struct ath5k_hw *ah,
data = data0 = data1 = data2 = 0;
c = channel->center_freq;
+ /* My guess based on code:
+ * 2GHz RF has 2 synth modes, one with a Local Oscillator
+ * at 2224Hz and one with a LO at 2192Hz. IF is 1520Hz
+ * (3040/2). data0 is used to set the PLL divider and data1
+ * selects synth mode. */
if (c < 4800) {
+ /* Channel 14 and all frequencies with 2Hz spacing
+ * below/above (non-standard channels) */
if (!((c - 2224) % 5)) {
+ /* Same as (c - 2224) / 5 */
data0 = ((2 * (c - 704)) - 3040) / 10;
data1 = 1;
+ /* Channel 1 and all frequencies with 5Hz spacing
+ * below/above (standard channels without channel 14) */
} else if (!((c - 2192) % 5)) {
+ /* Same as (c - 2192) / 5 */
data0 = ((2 * (c - 672)) - 3040) / 10;
data1 = 0;
} else
return -EINVAL;
data0 = ath5k_hw_bitswap((data0 << 2) & 0xff, 8);
+ /* This is more complex, we have a single synthesizer with
+ * 4 reference clock settings (?) based on frequency spacing
+ * and set using data2. LO is at 4800Hz and data0 is again used
+ * to set some divider.
+ *
+ * NOTE: There is an old atheros presentation at Stanford
+ * that mentions a method called dual direct conversion
+ * with 1GHz sliding IF for RF5110. Maybe that's what we
+ * have here, or an updated version. */
} else if ((c % 5) != 2 || c > 5435) {
if (!(c % 20) && c >= 5120) {
data0 = ath5k_hw_bitswap(((c - 4800) / 20 << 2), 8);
@@ -1219,10 +1377,16 @@ static int ath5k_hw_rf5112_channel(struct ath5k_hw *ah,
return 0;
}
-/*
- * Set the channel on the RF2425
+/**
+ * ath5k_hw_rf2425_channel() - Set channel frequency on RF2425
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ *
+ * AR2425/2417 have a different 2GHz RF so code changes
+ * a little bit from RF5112.
*/
-static int ath5k_hw_rf2425_channel(struct ath5k_hw *ah,
+static int
+ath5k_hw_rf2425_channel(struct ath5k_hw *ah,
struct ieee80211_channel *channel)
{
u32 data, data0, data2;
@@ -1258,10 +1422,16 @@ static int ath5k_hw_rf2425_channel(struct ath5k_hw *ah,
return 0;
}
-/*
- * Set a channel on the radio chip
+/**
+ * ath5k_hw_channel() - Set a channel on the radio chip
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ *
+ * This is the main function called to set a channel on the
+ * radio chip based on the radio chip version.
*/
-static int ath5k_hw_channel(struct ath5k_hw *ah,
+static int
+ath5k_hw_channel(struct ath5k_hw *ah,
struct ieee80211_channel *channel)
{
int ret;
@@ -1313,11 +1483,46 @@ static int ath5k_hw_channel(struct ath5k_hw *ah,
return 0;
}
+
/*****************\
PHY calibration
\*****************/
-static s32 ath5k_hw_read_measured_noise_floor(struct ath5k_hw *ah)
+/**
+ * DOC: PHY Calibration routines
+ *
+ * Noise floor calibration: When we tell the hardware to
+ * perform a noise floor calibration by setting the
+ * AR5K_PHY_AGCCTL_NF bit on AR5K_PHY_AGCCTL, it will periodically
+ * sample-and-hold the minimum noise level seen at the antennas.
+ * This value is then stored in a ring buffer of recently measured
+ * noise floor values so we have a moving window of the last few
+ * samples. The median of the values in the history is then loaded
+ * into the hardware for its own use for RSSI and CCA measurements.
+ * This type of calibration doesn't interfere with traffic.
+ *
+ * AGC calibration: When we tell the hardware to perform
+ * an AGC (Automatic Gain Control) calibration by setting the
+ * AR5K_PHY_AGCCTL_CAL, hw disconnects the antennas and does
+ * a calibration on the DC offsets of ADCs. During this period
+ * rx/tx gets disabled so we have to deal with it on the driver
+ * part.
+ *
+ * I/Q calibration: When we tell the hardware to perform
+ * an I/Q calibration, it tries to correct I/Q imbalance and
+ * fix QAM constellation by sampling data from rxed frames.
+ * It doesn't interfere with traffic.
+ *
+ * For more infos on AGC and I/Q calibration check out patent doc
+ * #03/094463.
+ */
+
+/**
+ * ath5k_hw_read_measured_noise_floor() - Read measured NF from hw
+ * @ah: The &struct ath5k_hw
+ */
+static s32
+ath5k_hw_read_measured_noise_floor(struct ath5k_hw *ah)
{
s32 val;
@@ -1325,7 +1530,12 @@ static s32 ath5k_hw_read_measured_noise_floor(struct ath5k_hw *ah)
return sign_extend32(AR5K_REG_MS(val, AR5K_PHY_NF_MINCCA_PWR), 8);
}
-void ath5k_hw_init_nfcal_hist(struct ath5k_hw *ah)
+/**
+ * ath5k_hw_init_nfcal_hist() - Initialize NF calibration history buffer
+ * @ah: The &struct ath5k_hw
+ */
+void
+ath5k_hw_init_nfcal_hist(struct ath5k_hw *ah)
{
int i;
@@ -1334,6 +1544,11 @@ void ath5k_hw_init_nfcal_hist(struct ath5k_hw *ah)
ah->ah_nfcal_hist.nfval[i] = AR5K_TUNE_CCA_MAX_GOOD_VALUE;
}
+/**
+ * ath5k_hw_update_nfcal_hist() - Update NF calibration history buffer
+ * @ah: The &struct ath5k_hw
+ * @noise_floor: The NF we got from hw
+ */
static void ath5k_hw_update_nfcal_hist(struct ath5k_hw *ah, s16 noise_floor)
{
struct ath5k_nfcal_hist *hist = &ah->ah_nfcal_hist;
@@ -1341,7 +1556,12 @@ static void ath5k_hw_update_nfcal_hist(struct ath5k_hw *ah, s16 noise_floor)
hist->nfval[hist->index] = noise_floor;
}
-static s16 ath5k_hw_get_median_noise_floor(struct ath5k_hw *ah)
+/**
+ * ath5k_hw_get_median_noise_floor() - Get median NF from history buffer
+ * @ah: The &struct ath5k_hw
+ */
+static s16
+ath5k_hw_get_median_noise_floor(struct ath5k_hw *ah)
{
s16 sort[ATH5K_NF_CAL_HIST_MAX];
s16 tmp;
@@ -1364,18 +1584,16 @@ static s16 ath5k_hw_get_median_noise_floor(struct ath5k_hw *ah)
return sort[(ATH5K_NF_CAL_HIST_MAX - 1) / 2];
}
-/*
- * When we tell the hardware to perform a noise floor calibration
- * by setting the AR5K_PHY_AGCCTL_NF bit, it will periodically
- * sample-and-hold the minimum noise level seen at the antennas.
- * This value is then stored in a ring buffer of recently measured
- * noise floor values so we have a moving window of the last few
- * samples.
+/**
+ * ath5k_hw_update_noise_floor() - Update NF on hardware
+ * @ah: The &struct ath5k_hw
*
- * The median of the values in the history is then loaded into the
- * hardware for its own use for RSSI and CCA measurements.
+ * This is the main function we call to perform a NF calibration,
+ * it reads NF from hardware, calculates the median and updates
+ * NF on hw.
*/
-void ath5k_hw_update_noise_floor(struct ath5k_hw *ah)
+void
+ath5k_hw_update_noise_floor(struct ath5k_hw *ah)
{
struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
u32 val;
@@ -1390,6 +1608,8 @@ void ath5k_hw_update_noise_floor(struct ath5k_hw *ah)
return;
}
+ ah->ah_cal_mask |= AR5K_CALIBRATION_NF;
+
ee_mode = ath5k_eeprom_mode_from_channel(ah->ah_current_channel);
/* completed NF calibration, test threshold */
@@ -1434,20 +1654,29 @@ void ath5k_hw_update_noise_floor(struct ath5k_hw *ah)
ah->ah_noise_floor = nf;
+ ah->ah_cal_mask &= ~AR5K_CALIBRATION_NF;
+
ATH5K_DBG(ah, ATH5K_DEBUG_CALIBRATE,
"noise floor calibrated: %d\n", nf);
}
-/*
- * Perform a PHY calibration on RF5110
- * -Fix BPSK/QAM Constellation (I/Q correction)
+/**
+ * ath5k_hw_rf5110_calibrate() - Perform a PHY calibration on RF5110
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ *
+ * Do a complete PHY calibration (AGC + NF + I/Q) on RF5110
*/
-static int ath5k_hw_rf5110_calibrate(struct ath5k_hw *ah,
+static int
+ath5k_hw_rf5110_calibrate(struct ath5k_hw *ah,
struct ieee80211_channel *channel)
{
u32 phy_sig, phy_agc, phy_sat, beacon;
int ret;
+ if (!(ah->ah_cal_mask & AR5K_CALIBRATION_FULL))
+ return 0;
+
/*
* Disable beacons and RX/TX queues, wait
*/
@@ -1456,7 +1685,7 @@ static int ath5k_hw_rf5110_calibrate(struct ath5k_hw *ah,
beacon = ath5k_hw_reg_read(ah, AR5K_BEACON_5210);
ath5k_hw_reg_write(ah, beacon & ~AR5K_BEACON_ENABLE, AR5K_BEACON_5210);
- mdelay(2);
+ usleep_range(2000, 2500);
/*
* Set the channel (with AGC turned off)
@@ -1469,7 +1698,7 @@ static int ath5k_hw_rf5110_calibrate(struct ath5k_hw *ah,
* Activate PHY and wait
*/
ath5k_hw_reg_write(ah, AR5K_PHY_ACT_ENABLE, AR5K_PHY_ACT);
- mdelay(1);
+ usleep_range(1000, 1500);
AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_AGC, AR5K_PHY_AGC_DISABLE);
@@ -1506,7 +1735,7 @@ static int ath5k_hw_rf5110_calibrate(struct ath5k_hw *ah,
ath5k_hw_reg_write(ah, AR5K_PHY_RFSTG_DISABLE, AR5K_PHY_RFSTG);
AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_AGC, AR5K_PHY_AGC_DISABLE);
- mdelay(1);
+ usleep_range(1000, 1500);
/*
* Enable calibration and wait until completion
@@ -1537,8 +1766,9 @@ static int ath5k_hw_rf5110_calibrate(struct ath5k_hw *ah,
return 0;
}
-/*
- * Perform I/Q calibration on RF5111/5112 and newer chips
+/**
+ * ath5k_hw_rf511x_iq_calibrate() - Perform I/Q calibration on RF5111 and newer
+ * @ah: The &struct ath5k_hw
*/
static int
ath5k_hw_rf511x_iq_calibrate(struct ath5k_hw *ah)
@@ -1547,12 +1777,19 @@ ath5k_hw_rf511x_iq_calibrate(struct ath5k_hw *ah)
s32 iq_corr, i_coff, i_coffd, q_coff, q_coffd;
int i;
- if (!ah->ah_calibration ||
- ath5k_hw_reg_read(ah, AR5K_PHY_IQ) & AR5K_PHY_IQ_RUN)
- return 0;
+ /* Skip if I/Q calibration is not needed or if it's still running */
+ if (!ah->ah_iq_cal_needed)
+ return -EINVAL;
+ else if (ath5k_hw_reg_read(ah, AR5K_PHY_IQ) & AR5K_PHY_IQ_RUN) {
+ ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_CALIBRATE,
+ "I/Q calibration still running");
+ return -EBUSY;
+ }
/* Calibration has finished, get the results and re-run */
- /* work around empty results which can apparently happen on 5212 */
+
+ /* Work around for empty results which can apparently happen on 5212:
+ * Read registers up to 10 times until we get both i_pr and q_pwr */
for (i = 0; i <= 10; i++) {
iq_corr = ath5k_hw_reg_read(ah, AR5K_PHY_IQRES_CAL_CORR);
i_pwr = ath5k_hw_reg_read(ah, AR5K_PHY_IQRES_CAL_PWR_I);
@@ -1570,9 +1807,13 @@ ath5k_hw_rf511x_iq_calibrate(struct ath5k_hw *ah)
else
q_coffd = q_pwr >> 7;
- /* protect against divide by 0 and loss of sign bits */
+ /* In case i_coffd became zero, cancel calibration
+ * not only it's too small, it'll also result a divide
+ * by zero later on. */
if (i_coffd == 0 || q_coffd < 2)
- return 0;
+ return -ECANCELED;
+
+ /* Protect against loss of sign bits */
i_coff = (-iq_corr) / i_coffd;
i_coff = clamp(i_coff, -32, 31); /* signed 6 bit */
@@ -1601,10 +1842,17 @@ ath5k_hw_rf511x_iq_calibrate(struct ath5k_hw *ah)
return 0;
}
-/*
- * Perform a PHY calibration
+/**
+ * ath5k_hw_phy_calibrate() - Perform a PHY calibration
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ *
+ * The main function we call from above to perform
+ * a short or full PHY calibration based on RF chip
+ * and current channel
*/
-int ath5k_hw_phy_calibrate(struct ath5k_hw *ah,
+int
+ath5k_hw_phy_calibrate(struct ath5k_hw *ah,
struct ieee80211_channel *channel)
{
int ret;
@@ -1613,10 +1861,43 @@ int ath5k_hw_phy_calibrate(struct ath5k_hw *ah,
return ath5k_hw_rf5110_calibrate(ah, channel);
ret = ath5k_hw_rf511x_iq_calibrate(ah);
+ if (ret) {
+ ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_CALIBRATE,
+ "No I/Q correction performed (%uMHz)\n",
+ channel->center_freq);
+
+ /* Happens all the time if there is not much
+ * traffic, consider it normal behaviour. */
+ ret = 0;
+ }
+
+ /* On full calibration do an AGC calibration and
+ * request a PAPD probe for gainf calibration if
+ * needed */
+ if (ah->ah_cal_mask & AR5K_CALIBRATION_FULL) {
- if ((ah->ah_radio == AR5K_RF5111 || ah->ah_radio == AR5K_RF5112) &&
- (channel->hw_value != AR5K_MODE_11B))
- ath5k_hw_request_rfgain_probe(ah);
+ AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL,
+ AR5K_PHY_AGCCTL_CAL);
+
+ ret = ath5k_hw_register_timeout(ah, AR5K_PHY_AGCCTL,
+ AR5K_PHY_AGCCTL_CAL | AR5K_PHY_AGCCTL_NF,
+ 0, false);
+ if (ret) {
+ ATH5K_ERR(ah,
+ "gain calibration timeout (%uMHz)\n",
+ channel->center_freq);
+ }
+
+ if ((ah->ah_radio == AR5K_RF5111 ||
+ ah->ah_radio == AR5K_RF5112)
+ && (channel->hw_value != AR5K_MODE_11B))
+ ath5k_hw_request_rfgain_probe(ah);
+ }
+
+ /* Update noise floor
+ * XXX: Only do this after AGC calibration */
+ if (!(ah->ah_cal_mask & AR5K_CALIBRATION_NF))
+ ath5k_hw_update_noise_floor(ah);
return ret;
}
@@ -1626,6 +1907,16 @@ int ath5k_hw_phy_calibrate(struct ath5k_hw *ah,
* Spur mitigation functions *
\***************************/
+/**
+ * ath5k_hw_set_spur_mitigation_filter() - Configure SPUR filter
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ *
+ * This function gets called during PHY initialization to
+ * configure the spur filter for the given channel. Spur is noise
+ * generated due to "reflection" effects, for more information on this
+ * method check out patent US7643810
+ */
static void
ath5k_hw_set_spur_mitigation_filter(struct ath5k_hw *ah,
struct ieee80211_channel *channel)
@@ -1865,15 +2156,73 @@ ath5k_hw_set_spur_mitigation_filter(struct ath5k_hw *ah,
* Antenna control *
\*****************/
-static void /*TODO:Boundary check*/
+/**
+ * DOC: Antenna control
+ *
+ * Hw supports up to 14 antennas ! I haven't found any card that implements
+ * that. The maximum number of antennas I've seen is up to 4 (2 for 2GHz and 2
+ * for 5GHz). Antenna 1 (MAIN) should be omnidirectional, 2 (AUX)
+ * omnidirectional or sectorial and antennas 3-14 sectorial (or directional).
+ *
+ * We can have a single antenna for RX and multiple antennas for TX.
+ * RX antenna is our "default" antenna (usually antenna 1) set on
+ * DEFAULT_ANTENNA register and TX antenna is set on each TX control descriptor
+ * (0 for automatic selection, 1 - 14 antenna number).
+ *
+ * We can let hw do all the work doing fast antenna diversity for both
+ * tx and rx or we can do things manually. Here are the options we have
+ * (all are bits of STA_ID1 register):
+ *
+ * AR5K_STA_ID1_DEFAULT_ANTENNA -> When 0 is set as the TX antenna on TX
+ * control descriptor, use the default antenna to transmit or else use the last
+ * antenna on which we received an ACK.
+ *
+ * AR5K_STA_ID1_DESC_ANTENNA -> Update default antenna after each TX frame to
+ * the antenna on which we got the ACK for that frame.
+ *
+ * AR5K_STA_ID1_RTS_DEF_ANTENNA -> Use default antenna for RTS or else use the
+ * one on the TX descriptor.
+ *
+ * AR5K_STA_ID1_SELFGEN_DEF_ANT -> Use default antenna for self generated frames
+ * (ACKs etc), or else use current antenna (the one we just used for TX).
+ *
+ * Using the above we support the following scenarios:
+ *
+ * AR5K_ANTMODE_DEFAULT -> Hw handles antenna diversity etc automatically
+ *
+ * AR5K_ANTMODE_FIXED_A -> Only antenna A (MAIN) is present
+ *
+ * AR5K_ANTMODE_FIXED_B -> Only antenna B (AUX) is present
+ *
+ * AR5K_ANTMODE_SINGLE_AP -> Sta locked on a single ap
+ *
+ * AR5K_ANTMODE_SECTOR_AP -> AP with tx antenna set on tx desc
+ *
+ * AR5K_ANTMODE_SECTOR_STA -> STA with tx antenna set on tx desc
+ *
+ * AR5K_ANTMODE_DEBUG Debug mode -A -> Rx, B-> Tx-
+ *
+ * Also note that when setting antenna to F on tx descriptor card inverts
+ * current tx antenna.
+ */
+
+/**
+ * ath5k_hw_set_def_antenna() - Set default rx antenna on AR5211/5212 and newer
+ * @ah: The &struct ath5k_hw
+ * @ant: Antenna number
+ */
+static void
ath5k_hw_set_def_antenna(struct ath5k_hw *ah, u8 ant)
{
if (ah->ah_version != AR5K_AR5210)
ath5k_hw_reg_write(ah, ant & 0x7, AR5K_DEFAULT_ANTENNA);
}
-/*
- * Enable/disable fast rx antenna diversity
+/**
+ * ath5k_hw_set_fast_div() - Enable/disable fast rx antenna diversity
+ * @ah: The &struct ath5k_hw
+ * @ee_mode: One of enum ath5k_driver_mode
+ * @enable: True to enable, false to disable
*/
static void
ath5k_hw_set_fast_div(struct ath5k_hw *ah, u8 ee_mode, bool enable)
@@ -1913,6 +2262,14 @@ ath5k_hw_set_fast_div(struct ath5k_hw *ah, u8 ee_mode, bool enable)
}
}
+/**
+ * ath5k_hw_set_antenna_switch() - Set up antenna switch table
+ * @ah: The &struct ath5k_hw
+ * @ee_mode: One of enum ath5k_driver_mode
+ *
+ * Switch table comes from EEPROM and includes information on controlling
+ * the 2 antenna RX attenuators
+ */
void
ath5k_hw_set_antenna_switch(struct ath5k_hw *ah, u8 ee_mode)
{
@@ -1944,8 +2301,10 @@ ath5k_hw_set_antenna_switch(struct ath5k_hw *ah, u8 ee_mode)
AR5K_PHY_ANT_SWITCH_TABLE_1);
}
-/*
- * Set antenna operating mode
+/**
+ * ath5k_hw_set_antenna_mode() - Set antenna operating mode
+ * @ah: The &struct ath5k_hw
+ * @ant_mode: One of enum ath5k_ant_mode
*/
void
ath5k_hw_set_antenna_mode(struct ath5k_hw *ah, u8 ant_mode)
@@ -2068,8 +2427,13 @@ ath5k_hw_set_antenna_mode(struct ath5k_hw *ah, u8 ant_mode)
* Helper functions
*/
-/*
- * Do linear interpolation between two given (x, y) points
+/**
+ * ath5k_get_interpolated_value() - Get interpolated Y val between two points
+ * @target: X value of the middle point
+ * @x_left: X value of the left point
+ * @x_right: X value of the right point
+ * @y_left: Y value of the left point
+ * @y_right: Y value of the right point
*/
static s16
ath5k_get_interpolated_value(s16 target, s16 x_left, s16 x_right,
@@ -2096,13 +2460,18 @@ ath5k_get_interpolated_value(s16 target, s16 x_left, s16 x_right,
return result;
}
-/*
- * Find vertical boundary (min pwr) for the linear PCDAC curve.
+/**
+ * ath5k_get_linear_pcdac_min() - Find vertical boundary (min pwr) for the
+ * linear PCDAC curve
+ * @stepL: Left array with y values (pcdac steps)
+ * @stepR: Right array with y values (pcdac steps)
+ * @pwrL: Left array with x values (power steps)
+ * @pwrR: Right array with x values (power steps)
*
* Since we have the top of the curve and we draw the line below
* until we reach 1 (1 pcdac step) we need to know which point
- * (x value) that is so that we don't go below y axis and have negative
- * pcdac values when creating the curve, or fill the table with zeroes.
+ * (x value) that is so that we don't go below x axis and have negative
+ * pcdac values when creating the curve, or fill the table with zeros.
*/
static s16
ath5k_get_linear_pcdac_min(const u8 *stepL, const u8 *stepR,
@@ -2148,7 +2517,16 @@ ath5k_get_linear_pcdac_min(const u8 *stepL, const u8 *stepR,
return max(min_pwrL, min_pwrR);
}
-/*
+/**
+ * ath5k_create_power_curve() - Create a Power to PDADC or PCDAC curve
+ * @pmin: Minimum power value (xmin)
+ * @pmax: Maximum power value (xmax)
+ * @pwr: Array of power steps (x values)
+ * @vpd: Array of matching PCDAC/PDADC steps (y values)
+ * @num_points: Number of provided points
+ * @vpd_table: Array to fill with the full PCDAC/PDADC values (y values)
+ * @type: One of enum ath5k_powertable_type (eeprom.h)
+ *
* Interpolate (pwr,vpd) points to create a Power to PDADC or a
* Power to PCDAC curve.
*
@@ -2206,7 +2584,14 @@ ath5k_create_power_curve(s16 pmin, s16 pmax,
}
}
-/*
+/**
+ * ath5k_get_chan_pcal_surrounding_piers() - Get surrounding calibration piers
+ * for a given channel.
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ * @pcinfo_l: The &struct ath5k_chan_pcal_info to put the left cal. pier
+ * @pcinfo_r: The &struct ath5k_chan_pcal_info to put the right cal. pier
+ *
* Get the surrounding per-channel power calibration piers
* for a given frequency so that we can interpolate between
* them and come up with an appropriate dataset for our current
@@ -2289,11 +2674,17 @@ done:
*pcinfo_r = &pcinfo[idx_r];
}
-/*
+/**
+ * ath5k_get_rate_pcal_data() - Get the interpolated per-rate power
+ * calibration data
+ * @ah: The &struct ath5k_hw *ah,
+ * @channel: The &struct ieee80211_channel
+ * @rates: The &struct ath5k_rate_pcal_info to fill
+ *
* Get the surrounding per-rate power calibration data
* for a given frequency and interpolate between power
* values to set max target power supported by hw for
- * each rate.
+ * each rate on this frequency.
*/
static void
ath5k_get_rate_pcal_data(struct ath5k_hw *ah,
@@ -2381,7 +2772,11 @@ done:
rpinfo[idx_r].target_power_54);
}
-/*
+/**
+ * ath5k_get_max_ctl_power() - Get max edge power for a given frequency
+ * @ah: the &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ *
* Get the max edge power for this channel if
* we have such data from EEPROM's Conformance Test
* Limits (CTL), and limit max power if needed.
@@ -2461,8 +2856,39 @@ ath5k_get_max_ctl_power(struct ath5k_hw *ah,
* Power to PCDAC table functions
*/
-/*
- * Fill Power to PCDAC table on RF5111
+/**
+ * DOC: Power to PCDAC table functions
+ *
+ * For RF5111 we have an XPD -eXternal Power Detector- curve
+ * for each calibrated channel. Each curve has 0,5dB Power steps
+ * on x axis and PCDAC steps (offsets) on y axis and looks like an
+ * exponential function. To recreate the curve we read 11 points
+ * from eeprom (eeprom.c) and interpolate here.
+ *
+ * For RF5112 we have 4 XPD -eXternal Power Detector- curves
+ * for each calibrated channel on 0, -6, -12 and -18dBm but we only
+ * use the higher (3) and the lower (0) curves. Each curve again has 0.5dB
+ * power steps on x axis and PCDAC steps on y axis and looks like a
+ * linear function. To recreate the curve and pass the power values
+ * on hw, we get 4 points for xpd 0 (lower gain -> max power)
+ * and 3 points for xpd 3 (higher gain -> lower power) from eeprom (eeprom.c)
+ * and interpolate here.
+ *
+ * For a given channel we get the calibrated points (piers) for it or
+ * -if we don't have calibration data for this specific channel- from the
+ * available surrounding channels we have calibration data for, after we do a
+ * linear interpolation between them. Then since we have our calibrated points
+ * for this channel, we do again a linear interpolation between them to get the
+ * whole curve.
+ *
+ * We finally write the Y values of the curve(s) (the PCDAC values) on hw
+ */
+
+/**
+ * ath5k_fill_pwr_to_pcdac_table() - Fill Power to PCDAC table on RF5111
+ * @ah: The &struct ath5k_hw
+ * @table_min: Minimum power (x min)
+ * @table_max: Maximum power (x max)
*
* No further processing is needed for RF5111, the only thing we have to
* do is fill the values below and above calibration range since eeprom data
@@ -2503,10 +2929,14 @@ ath5k_fill_pwr_to_pcdac_table(struct ath5k_hw *ah, s16* table_min,
}
-/*
- * Combine available XPD Curves and fill Linear Power to PCDAC table
- * on RF5112
+/**
+ * ath5k_combine_linear_pcdac_curves() - Combine available PCDAC Curves
+ * @ah: The &struct ath5k_hw
+ * @table_min: Minimum power (x min)
+ * @table_max: Maximum power (x max)
+ * @pdcurves: Number of pd curves
*
+ * Combine available XPD Curves and fill Linear Power to PCDAC table on RF5112
* RFX112 can have up to 2 curves (one for low txpower range and one for
* higher txpower range). We need to put them both on pcdac_out and place
* them in the correct location. In case we only have one curve available
@@ -2608,7 +3038,10 @@ ath5k_combine_linear_pcdac_curves(struct ath5k_hw *ah, s16* table_min,
}
}
-/* Write PCDAC values on hw */
+/**
+ * ath5k_write_pcdac_table() - Write the PCDAC values on hw
+ * @ah: The &struct ath5k_hw
+ */
static void
ath5k_write_pcdac_table(struct ath5k_hw *ah)
{
@@ -2631,9 +3064,32 @@ ath5k_write_pcdac_table(struct ath5k_hw *ah)
* Power to PDADC table functions
*/
-/*
- * Set the gain boundaries and create final Power to PDADC table
+/**
+ * DOC: Power to PDADC table functions
+ *
+ * For RF2413 and later we have a Power to PDADC table (Power Detector)
+ * instead of a PCDAC (Power Control) and 4 pd gain curves for each
+ * calibrated channel. Each curve has power on x axis in 0.5 db steps and
+ * PDADC steps on y axis and looks like an exponential function like the
+ * RF5111 curve.
+ *
+ * To recreate the curves we read the points from eeprom (eeprom.c)
+ * and interpolate here. Note that in most cases only 2 (higher and lower)
+ * curves are used (like RF5112) but vendors have the opportunity to include
+ * all 4 curves on eeprom. The final curve (higher power) has an extra
+ * point for better accuracy like RF5112.
*
+ * The process is similar to what we do above for RF5111/5112
+ */
+
+/**
+ * ath5k_combine_pwr_to_pdadc_curves() - Combine the various PDADC curves
+ * @ah: The &struct ath5k_hw
+ * @pwr_min: Minimum power (x min)
+ * @pwr_max: Maximum power (x max)
+ * @pdcurves: Number of available curves
+ *
+ * Combine the various pd curves and create the final Power to PDADC table
* We can have up to 4 pd curves, we need to do a similar process
* as we do for RF5112. This time we don't have an edge_flag but we
* set the gain boundaries on a separate register.
@@ -2757,7 +3213,11 @@ ath5k_combine_pwr_to_pdadc_curves(struct ath5k_hw *ah,
}
-/* Write PDADC values on hw */
+/**
+ * ath5k_write_pwr_to_pdadc_table() - Write the PDADC values on hw
+ * @ah: The &struct ath5k_hw
+ * @ee_mode: One of enum ath5k_driver_mode
+ */
static void
ath5k_write_pwr_to_pdadc_table(struct ath5k_hw *ah, u8 ee_mode)
{
@@ -2814,7 +3274,13 @@ ath5k_write_pwr_to_pdadc_table(struct ath5k_hw *ah, u8 ee_mode)
* Common code for PCDAC/PDADC tables
*/
-/*
+/**
+ * ath5k_setup_channel_powertable() - Set up power table for this channel
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ * @ee_mode: One of enum ath5k_driver_mode
+ * @type: One of enum ath5k_powertable_type (eeprom.h)
+ *
* This is the main function that uses all of the above
* to set PCDAC/PDADC table on hw for the current channel.
* This table is used for tx power calibration on the baseband,
@@ -3012,7 +3478,12 @@ ath5k_setup_channel_powertable(struct ath5k_hw *ah,
return 0;
}
-/* Write power table for current channel to hw */
+/**
+ * ath5k_write_channel_powertable() - Set power table for current channel on hw
+ * @ah: The &struct ath5k_hw
+ * @ee_mode: One of enum ath5k_driver_mode
+ * @type: One of enum ath5k_powertable_type (eeprom.h)
+ */
static void
ath5k_write_channel_powertable(struct ath5k_hw *ah, u8 ee_mode, u8 type)
{
@@ -3022,28 +3493,36 @@ ath5k_write_channel_powertable(struct ath5k_hw *ah, u8 ee_mode, u8 type)
ath5k_write_pcdac_table(ah);
}
-/*
- * Per-rate tx power setting
+
+/**
+ * DOC: Per-rate tx power setting
*
- * This is the code that sets the desired tx power (below
+ * This is the code that sets the desired tx power limit (below
* maximum) on hw for each rate (we also have TPC that sets
- * power per packet). We do that by providing an index on the
- * PCDAC/PDADC table we set up.
- */
-
-/*
- * Set rate power table
+ * power per packet type). We do that by providing an index on the
+ * PCDAC/PDADC table we set up above, for each rate.
*
* For now we only limit txpower based on maximum tx power
- * supported by hw (what's inside rate_info). We need to limit
- * this even more, based on regulatory domain etc.
+ * supported by hw (what's inside rate_info) + conformance test
+ * limits. We need to limit this even more, based on regulatory domain
+ * etc to be safe. Normally this is done from above so we don't care
+ * here, all we care is that the tx power we set will be O.K.
+ * for the hw (e.g. won't create noise on PA etc).
*
- * Rate power table contains indices to PCDAC/PDADC table (0.5dB steps)
- * and is indexed as follows:
+ * Rate power table contains indices to PCDAC/PDADC table (0.5dB steps -
+ * x values) and is indexed as follows:
* rates[0] - rates[7] -> OFDM rates
* rates[8] - rates[14] -> CCK rates
* rates[15] -> XR rates (they all have the same power)
*/
+
+/**
+ * ath5k_setup_rate_powertable() - Set up rate power table for a given tx power
+ * @ah: The &struct ath5k_hw
+ * @max_pwr: The maximum tx power requested in 0.5dB steps
+ * @rate_info: The &struct ath5k_rate_pcal_info to fill
+ * @ee_mode: One of enum ath5k_driver_mode
+ */
static void
ath5k_setup_rate_powertable(struct ath5k_hw *ah, u16 max_pwr,
struct ath5k_rate_pcal_info *rate_info,
@@ -3114,8 +3593,14 @@ ath5k_setup_rate_powertable(struct ath5k_hw *ah, u16 max_pwr,
}
-/*
- * Set transmission power
+/**
+ * ath5k_hw_txpower() - Set transmission power limit for a given channel
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ * @txpower: Requested tx power in 0.5dB steps
+ *
+ * Combines all of the above to set the requested tx power limit
+ * on hw.
*/
static int
ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel,
@@ -3233,7 +3718,16 @@ ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel,
return 0;
}
-int ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, u8 txpower)
+/**
+ * ath5k_hw_set_txpower_limit() - Set txpower limit for the current channel
+ * @ah: The &struct ath5k_hw
+ * @txpower: The requested tx power limit in 0.5dB steps
+ *
+ * This function provides access to ath5k_hw_txpower to the driver in
+ * case user or an application changes it while PHY is running.
+ */
+int
+ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, u8 txpower)
{
ATH5K_DBG(ah, ATH5K_DEBUG_TXPOWER,
"changing txpower to %d\n", txpower);
@@ -3241,11 +3735,26 @@ int ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, u8 txpower)
return ath5k_hw_txpower(ah, ah->ah_current_channel, txpower);
}
+
/*************\
Init function
\*************/
-int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
+/**
+ * ath5k_hw_phy_init() - Initialize PHY
+ * @ah: The &struct ath5k_hw
+ * @channel: The @struct ieee80211_channel
+ * @mode: One of enum ath5k_driver_mode
+ * @fast: Try a fast channel switch instead
+ *
+ * This is the main function used during reset to initialize PHY
+ * or do a fast channel change if possible.
+ *
+ * NOTE: Do not call this one from the driver, it assumes PHY is in a
+ * warm reset state !
+ */
+int
+ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
u8 mode, bool fast)
{
struct ieee80211_channel *curr_channel;
@@ -3355,7 +3864,7 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
if (ret)
return ret;
- mdelay(1);
+ usleep_range(1000, 1500);
/*
* Write RF buffer
@@ -3376,10 +3885,10 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
}
} else if (ah->ah_version == AR5K_AR5210) {
- mdelay(1);
+ usleep_range(1000, 1500);
/* Disable phy and wait */
ath5k_hw_reg_write(ah, AR5K_PHY_ACT_DISABLE, AR5K_PHY_ACT);
- mdelay(1);
+ usleep_range(1000, 1500);
}
/* Set channel on PHY */
@@ -3405,7 +3914,7 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
for (i = 0; i <= 20; i++) {
if (!(ath5k_hw_reg_read(ah, AR5K_PHY_ADC_TEST) & 0x10))
break;
- udelay(200);
+ usleep_range(200, 250);
}
ath5k_hw_reg_write(ah, phy_tst1, AR5K_PHY_TST1);
@@ -3433,9 +3942,9 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
/* At the same time start I/Q calibration for QAM constellation
* -no need for CCK- */
- ah->ah_calibration = false;
+ ah->ah_iq_cal_needed = false;
if (!(mode == AR5K_MODE_11B)) {
- ah->ah_calibration = true;
+ ah->ah_iq_cal_needed = true;
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_IQ,
AR5K_PHY_IQ_CAL_NUM_LOG_MAX, 15);
AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_IQ,
diff --git a/drivers/net/wireless/ath/ath5k/qcu.c b/drivers/net/wireless/ath/ath5k/qcu.c
index 776654228ea..30b50f93417 100644
--- a/drivers/net/wireless/ath/ath5k/qcu.c
+++ b/drivers/net/wireless/ath/ath5k/qcu.c
@@ -17,23 +17,48 @@
*/
/********************************************\
-Queue Control Unit, DFS Control Unit Functions
+Queue Control Unit, DCF Control Unit Functions
\********************************************/
#include "ath5k.h"
#include "reg.h"
#include "debug.h"
+#include <linux/log2.h>
+
+/**
+ * DOC: Queue Control Unit (QCU)/DCF Control Unit (DCU) functions
+ *
+ * Here we setup parameters for the 12 available TX queues. Note that
+ * on the various registers we can usually only map the first 10 of them so
+ * basically we have 10 queues to play with. Each queue has a matching
+ * QCU that controls when the queue will get triggered and multiple QCUs
+ * can be mapped to a single DCU that controls the various DFS parameters
+ * for the various queues. In our setup we have a 1:1 mapping between QCUs
+ * and DCUs allowing us to have different DFS settings for each queue.
+ *
+ * When a frame goes into a TX queue, QCU decides when it'll trigger a
+ * transmission based on various criteria (such as how many data we have inside
+ * it's buffer or -if it's a beacon queue- if it's time to fire up the queue
+ * based on TSF etc), DCU adds backoff, IFSes etc and then a scheduler
+ * (arbitrator) decides the priority of each QCU based on it's configuration
+ * (e.g. beacons are always transmitted when they leave DCU bypassing all other
+ * frames from other queues waiting to be transmitted). After a frame leaves
+ * the DCU it goes to PCU for further processing and then to PHY for
+ * the actual transmission.
+ */
/******************\
* Helper functions *
\******************/
-/*
- * Get number of pending frames
- * for a specific queue [5211+]
+/**
+ * ath5k_hw_num_tx_pending() - Get number of pending frames for a given queue
+ * @ah: The &struct ath5k_hw
+ * @queue: One of enum ath5k_tx_queue_id
*/
-u32 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue)
+u32
+ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue)
{
u32 pending;
AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
@@ -58,10 +83,13 @@ u32 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue)
return pending;
}
-/*
- * Set a transmit queue inactive
+/**
+ * ath5k_hw_release_tx_queue() - Set a transmit queue inactive
+ * @ah: The &struct ath5k_hw
+ * @queue: One of enum ath5k_tx_queue_id
*/
-void ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue)
+void
+ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue)
{
if (WARN_ON(queue >= ah->ah_capabilities.cap_queues.q_tx_num))
return;
@@ -72,34 +100,56 @@ void ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue)
AR5K_Q_DISABLE_BITS(ah->ah_txq_status, queue);
}
-/*
+/**
+ * ath5k_cw_validate() - Make sure the given cw is valid
+ * @cw_req: The contention window value to check
+ *
* Make sure cw is a power of 2 minus 1 and smaller than 1024
*/
-static u16 ath5k_cw_validate(u16 cw_req)
+static u16
+ath5k_cw_validate(u16 cw_req)
{
- u32 cw = 1;
cw_req = min(cw_req, (u16)1023);
- while (cw < cw_req)
- cw = (cw << 1) | 1;
+ /* Check if cw_req + 1 a power of 2 */
+ if (is_power_of_2(cw_req + 1))
+ return cw_req;
- return cw;
+ /* Check if cw_req is a power of 2 */
+ if (is_power_of_2(cw_req))
+ return cw_req - 1;
+
+ /* If none of the above is correct
+ * find the closest power of 2 */
+ cw_req = (u16) roundup_pow_of_two(cw_req) - 1;
+
+ return cw_req;
}
-/*
- * Get properties for a transmit queue
+/**
+ * ath5k_hw_get_tx_queueprops() - Get properties for a transmit queue
+ * @ah: The &struct ath5k_hw
+ * @queue: One of enum ath5k_tx_queue_id
+ * @queue_info: The &struct ath5k_txq_info to fill
*/
-int ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue,
+int
+ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue,
struct ath5k_txq_info *queue_info)
{
memcpy(queue_info, &ah->ah_txq[queue], sizeof(struct ath5k_txq_info));
return 0;
}
-/*
- * Set properties for a transmit queue
+/**
+ * ath5k_hw_set_tx_queueprops() - Set properties for a transmit queue
+ * @ah: The &struct ath5k_hw
+ * @queue: One of enum ath5k_tx_queue_id
+ * @qinfo: The &struct ath5k_txq_info to use
+ *
+ * Returns 0 on success or -EIO if queue is inactive
*/
-int ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue,
+int
+ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue,
const struct ath5k_txq_info *qinfo)
{
struct ath5k_txq_info *qi;
@@ -139,10 +189,16 @@ int ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue,
return 0;
}
-/*
- * Initialize a transmit queue
+/**
+ * ath5k_hw_setup_tx_queue() - Initialize a transmit queue
+ * @ah: The &struct ath5k_hw
+ * @queue_type: One of enum ath5k_tx_queue
+ * @queue_info: The &struct ath5k_txq_info to use
+ *
+ * Returns 0 on success, -EINVAL on invalid arguments
*/
-int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type,
+int
+ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type,
struct ath5k_txq_info *queue_info)
{
unsigned int queue;
@@ -217,10 +273,16 @@ int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type,
* Single QCU/DCU initialization *
\*******************************/
-/*
- * Set tx retry limits on DCU
+/**
+ * ath5k_hw_set_tx_retry_limits() - Set tx retry limits on DCU
+ * @ah: The &struct ath5k_hw
+ * @queue: One of enum ath5k_tx_queue_id
+ *
+ * This function is used when initializing a queue, to set
+ * retry limits based on ah->ah_retry_* and the chipset used.
*/
-void ath5k_hw_set_tx_retry_limits(struct ath5k_hw *ah,
+void
+ath5k_hw_set_tx_retry_limits(struct ath5k_hw *ah,
unsigned int queue)
{
/* Single data queue on AR5210 */
@@ -255,15 +317,15 @@ void ath5k_hw_set_tx_retry_limits(struct ath5k_hw *ah,
}
/**
- * ath5k_hw_reset_tx_queue - Initialize a single hw queue
+ * ath5k_hw_reset_tx_queue() - Initialize a single hw queue
+ * @ah: The &struct ath5k_hw
+ * @queue: One of enum ath5k_tx_queue_id
*
- * @ah The &struct ath5k_hw
- * @queue The hw queue number
- *
- * Set DFS properties for the given transmit queue on DCU
+ * Set DCF properties for the given transmit queue on DCU
* and configures all queue-specific parameters.
*/
-int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
+int
+ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
{
struct ath5k_txq_info *tq = &ah->ah_txq[queue];
@@ -491,10 +553,9 @@ int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
\**************************/
/**
- * ath5k_hw_set_ifs_intervals - Set global inter-frame spaces on DCU
- *
- * @ah The &struct ath5k_hw
- * @slot_time Slot time in us
+ * ath5k_hw_set_ifs_intervals() - Set global inter-frame spaces on DCU
+ * @ah: The &struct ath5k_hw
+ * @slot_time: Slot time in us
*
* Sets the global IFS intervals on DCU (also works on AR5210) for
* the given slot time and the current bwmode.
@@ -597,7 +658,15 @@ int ath5k_hw_set_ifs_intervals(struct ath5k_hw *ah, unsigned int slot_time)
}
-int ath5k_hw_init_queues(struct ath5k_hw *ah)
+/**
+ * ath5k_hw_init_queues() - Initialize tx queues
+ * @ah: The &struct ath5k_hw
+ *
+ * Initializes all tx queues based on information on
+ * ah->ah_txq* set by the driver
+ */
+int
+ath5k_hw_init_queues(struct ath5k_hw *ah)
{
int i, ret;
diff --git a/drivers/net/wireless/ath/ath5k/reg.h b/drivers/net/wireless/ath/ath5k/reg.h
index f5c1000045d..0ea1608b47f 100644
--- a/drivers/net/wireless/ath/ath5k/reg.h
+++ b/drivers/net/wireless/ath/ath5k/reg.h
@@ -280,6 +280,10 @@
* 5211/5212 we have one primary and 4 secondary registers.
* So we have AR5K_ISR for 5210 and AR5K_PISR /SISRx for 5211/5212.
* Most of these bits are common for all chipsets.
+ *
+ * NOTE: On 5211+ TXOK, TXDESC, TXERR, TXEOL and TXURN contain
+ * the logical OR from per-queue interrupt bits found on SISR registers
+ * (see below).
*/
#define AR5K_ISR 0x001c /* Register Address [5210] */
#define AR5K_PISR 0x0080 /* Register Address [5211+] */
@@ -292,7 +296,10 @@
#define AR5K_ISR_TXOK 0x00000040 /* Frame successfully transmitted */
#define AR5K_ISR_TXDESC 0x00000080 /* TX descriptor request */
#define AR5K_ISR_TXERR 0x00000100 /* Transmit error */
-#define AR5K_ISR_TXNOFRM 0x00000200 /* No frame transmitted (transmit timeout) */
+#define AR5K_ISR_TXNOFRM 0x00000200 /* No frame transmitted (transmit timeout)
+ * NOTE: We don't have per-queue info for this
+ * one, but we can enable it per-queue through
+ * TXNOFRM_QCU field on TXNOFRM register */
#define AR5K_ISR_TXEOL 0x00000400 /* Empty TX descriptor */
#define AR5K_ISR_TXURN 0x00000800 /* Transmit FIFO underrun */
#define AR5K_ISR_MIB 0x00001000 /* Update MIB counters */
@@ -302,21 +309,29 @@
#define AR5K_ISR_SWBA 0x00010000 /* Software beacon alert */
#define AR5K_ISR_BRSSI 0x00020000 /* Beacon rssi below threshold (?) */
#define AR5K_ISR_BMISS 0x00040000 /* Beacon missed */
-#define AR5K_ISR_HIUERR 0x00080000 /* Host Interface Unit error [5211+] */
+#define AR5K_ISR_HIUERR 0x00080000 /* Host Interface Unit error [5211+]
+ * 'or' of MCABT, SSERR, DPERR from SISR2 */
#define AR5K_ISR_BNR 0x00100000 /* Beacon not ready [5211+] */
#define AR5K_ISR_MCABT 0x00100000 /* Master Cycle Abort [5210] */
#define AR5K_ISR_RXCHIRP 0x00200000 /* CHIRP Received [5212+] */
#define AR5K_ISR_SSERR 0x00200000 /* Signaled System Error [5210] */
-#define AR5K_ISR_DPERR 0x00400000 /* Det par Error (?) [5210] */
+#define AR5K_ISR_DPERR 0x00400000 /* Bus parity error [5210] */
#define AR5K_ISR_RXDOPPLER 0x00400000 /* Doppler chirp received [5212+] */
#define AR5K_ISR_TIM 0x00800000 /* [5211+] */
-#define AR5K_ISR_BCNMISC 0x00800000 /* 'or' of TIM, CAB_END, DTIM_SYNC, BCN_TIMEOUT,
- CAB_TIMEOUT and DTIM bits from SISR2 [5212+] */
+#define AR5K_ISR_BCNMISC 0x00800000 /* Misc beacon related interrupt
+ * 'or' of TIM, CAB_END, DTIM_SYNC, BCN_TIMEOUT,
+ * CAB_TIMEOUT and DTIM bits from SISR2 [5212+] */
#define AR5K_ISR_GPIO 0x01000000 /* GPIO (rf kill) */
#define AR5K_ISR_QCBRORN 0x02000000 /* QCU CBR overrun [5211+] */
#define AR5K_ISR_QCBRURN 0x04000000 /* QCU CBR underrun [5211+] */
#define AR5K_ISR_QTRIG 0x08000000 /* QCU scheduling trigger [5211+] */
+#define AR5K_ISR_BITS_FROM_SISRS (AR5K_ISR_TXOK | AR5K_ISR_TXDESC |\
+ AR5K_ISR_TXERR | AR5K_ISR_TXEOL |\
+ AR5K_ISR_TXURN | AR5K_ISR_HIUERR |\
+ AR5K_ISR_BCNMISC | AR5K_ISR_QCBRORN |\
+ AR5K_ISR_QCBRURN | AR5K_ISR_QTRIG)
+
/*
* Secondary status registers [5211+] (0 - 4)
*
@@ -347,7 +362,7 @@
#define AR5K_SISR2_BCN_TIMEOUT 0x08000000 /* Beacon Timeout [5212+] */
#define AR5K_SISR2_CAB_TIMEOUT 0x10000000 /* CAB Timeout [5212+] */
#define AR5K_SISR2_DTIM 0x20000000 /* [5212+] */
-#define AR5K_SISR2_TSFOOR 0x80000000 /* TSF OOR (?) */
+#define AR5K_SISR2_TSFOOR 0x80000000 /* TSF Out of range */
#define AR5K_SISR3 0x0090 /* Register Address [5211+] */
#define AR5K_SISR3_QCBRORN 0x000003ff /* Mask for QCBRORN */
diff --git a/drivers/net/wireless/ath/ath5k/reset.c b/drivers/net/wireless/ath/ath5k/reset.c
index 2abac257b4b..4aed3a3ab10 100644
--- a/drivers/net/wireless/ath/ath5k/reset.c
+++ b/drivers/net/wireless/ath/ath5k/reset.c
@@ -19,9 +19,9 @@
*
*/
-/*****************************\
- Reset functions and helpers
-\*****************************/
+/****************************\
+ Reset function and helpers
+\****************************/
#include <asm/unaligned.h>
@@ -33,14 +33,36 @@
#include "debug.h"
+/**
+ * DOC: Reset function and helpers
+ *
+ * Here we implement the main reset routine, used to bring the card
+ * to a working state and ready to receive. We also handle routines
+ * that don't fit on other places such as clock, sleep and power control
+ */
+
+
/******************\
* Helper functions *
\******************/
-/*
- * Check if a register write has been completed
+/**
+ * ath5k_hw_register_timeout() - Poll a register for a flag/field change
+ * @ah: The &struct ath5k_hw
+ * @reg: The register to read
+ * @flag: The flag/field to check on the register
+ * @val: The field value we expect (if we check a field)
+ * @is_set: Instead of checking if the flag got cleared, check if it got set
+ *
+ * Some registers contain flags that indicate that an operation is
+ * running. We use this function to poll these registers and check
+ * if these flags get cleared. We also use it to poll a register
+ * field (containing multiple flags) until it gets a specific value.
+ *
+ * Returns -EAGAIN if we exceeded AR5K_TUNE_REGISTER_TIMEOUT * 15us or 0
*/
-int ath5k_hw_register_timeout(struct ath5k_hw *ah, u32 reg, u32 flag, u32 val,
+int
+ath5k_hw_register_timeout(struct ath5k_hw *ah, u32 reg, u32 flag, u32 val,
bool is_set)
{
int i;
@@ -64,35 +86,48 @@ int ath5k_hw_register_timeout(struct ath5k_hw *ah, u32 reg, u32 flag, u32 val,
\*************************/
/**
- * ath5k_hw_htoclock - Translate usec to hw clock units
- *
+ * ath5k_hw_htoclock() - Translate usec to hw clock units
* @ah: The &struct ath5k_hw
* @usec: value in microseconds
+ *
+ * Translate usecs to hw clock units based on the current
+ * hw clock rate.
+ *
+ * Returns number of clock units
*/
-unsigned int ath5k_hw_htoclock(struct ath5k_hw *ah, unsigned int usec)
+unsigned int
+ath5k_hw_htoclock(struct ath5k_hw *ah, unsigned int usec)
{
struct ath_common *common = ath5k_hw_common(ah);
return usec * common->clockrate;
}
/**
- * ath5k_hw_clocktoh - Translate hw clock units to usec
+ * ath5k_hw_clocktoh() - Translate hw clock units to usec
+ * @ah: The &struct ath5k_hw
* @clock: value in hw clock units
+ *
+ * Translate hw clock units to usecs based on the current
+ * hw clock rate.
+ *
+ * Returns number of usecs
*/
-unsigned int ath5k_hw_clocktoh(struct ath5k_hw *ah, unsigned int clock)
+unsigned int
+ath5k_hw_clocktoh(struct ath5k_hw *ah, unsigned int clock)
{
struct ath_common *common = ath5k_hw_common(ah);
return clock / common->clockrate;
}
/**
- * ath5k_hw_init_core_clock - Initialize core clock
- *
- * @ah The &struct ath5k_hw
+ * ath5k_hw_init_core_clock() - Initialize core clock
+ * @ah: The &struct ath5k_hw
*
- * Initialize core clock parameters (usec, usec32, latencies etc).
+ * Initialize core clock parameters (usec, usec32, latencies etc),
+ * based on current bwmode and chipset properties.
*/
-static void ath5k_hw_init_core_clock(struct ath5k_hw *ah)
+static void
+ath5k_hw_init_core_clock(struct ath5k_hw *ah)
{
struct ieee80211_channel *channel = ah->ah_current_channel;
struct ath_common *common = ath5k_hw_common(ah);
@@ -227,16 +262,21 @@ static void ath5k_hw_init_core_clock(struct ath5k_hw *ah)
}
}
-/*
+/**
+ * ath5k_hw_set_sleep_clock() - Setup sleep clock operation
+ * @ah: The &struct ath5k_hw
+ * @enable: Enable sleep clock operation (false to disable)
+ *
* If there is an external 32KHz crystal available, use it
* as ref. clock instead of 32/40MHz clock and baseband clocks
* to save power during sleep or restore normal 32/40MHz
* operation.
*
- * XXX: When operating on 32KHz certain PHY registers (27 - 31,
- * 123 - 127) require delay on access.
+ * NOTE: When operating on 32KHz certain PHY registers (27 - 31,
+ * 123 - 127) require delay on access.
*/
-static void ath5k_hw_set_sleep_clock(struct ath5k_hw *ah, bool enable)
+static void
+ath5k_hw_set_sleep_clock(struct ath5k_hw *ah, bool enable)
{
struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
u32 scal, spending, sclock;
@@ -340,10 +380,19 @@ static void ath5k_hw_set_sleep_clock(struct ath5k_hw *ah, bool enable)
* Reset/Sleep control *
\*********************/
-/*
- * Reset chipset
+/**
+ * ath5k_hw_nic_reset() - Reset the various chipset units
+ * @ah: The &struct ath5k_hw
+ * @val: Mask to indicate what units to reset
+ *
+ * To reset the various chipset units we need to write
+ * the mask to AR5K_RESET_CTL and poll the register until
+ * all flags are cleared.
+ *
+ * Returns 0 if we are O.K. or -EAGAIN (from athk5_hw_register_timeout)
*/
-static int ath5k_hw_nic_reset(struct ath5k_hw *ah, u32 val)
+static int
+ath5k_hw_nic_reset(struct ath5k_hw *ah, u32 val)
{
int ret;
u32 mask = val ? val : ~0U;
@@ -357,7 +406,7 @@ static int ath5k_hw_nic_reset(struct ath5k_hw *ah, u32 val)
ath5k_hw_reg_write(ah, val, AR5K_RESET_CTL);
/* Wait at least 128 PCI clocks */
- udelay(15);
+ usleep_range(15, 20);
if (ah->ah_version == AR5K_AR5210) {
val &= AR5K_RESET_CTL_PCU | AR5K_RESET_CTL_DMA
@@ -382,12 +431,17 @@ static int ath5k_hw_nic_reset(struct ath5k_hw *ah, u32 val)
return ret;
}
-/*
- * Reset AHB chipset
- * AR5K_RESET_CTL_PCU flag resets WMAC
- * AR5K_RESET_CTL_BASEBAND flag resets WBB
+/**
+ * ath5k_hw_wisoc_reset() - Reset AHB chipset
+ * @ah: The &struct ath5k_hw
+ * @flags: Mask to indicate what units to reset
+ *
+ * Same as ath5k_hw_nic_reset but for AHB based devices
+ *
+ * Returns 0 if we are O.K. or -EAGAIN (from athk5_hw_register_timeout)
*/
-static int ath5k_hw_wisoc_reset(struct ath5k_hw *ah, u32 flags)
+static int
+ath5k_hw_wisoc_reset(struct ath5k_hw *ah, u32 flags)
{
u32 mask = flags ? flags : ~0U;
u32 __iomem *reg;
@@ -422,7 +476,7 @@ static int ath5k_hw_wisoc_reset(struct ath5k_hw *ah, u32 flags)
regval = __raw_readl(reg);
__raw_writel(regval | val, reg);
regval = __raw_readl(reg);
- udelay(100);
+ usleep_range(100, 150);
/* Bring BB/MAC out of reset */
__raw_writel(regval & ~val, reg);
@@ -439,11 +493,23 @@ static int ath5k_hw_wisoc_reset(struct ath5k_hw *ah, u32 flags)
return 0;
}
-
-/*
- * Sleep control
+/**
+ * ath5k_hw_set_power_mode() - Set power mode
+ * @ah: The &struct ath5k_hw
+ * @mode: One of enum ath5k_power_mode
+ * @set_chip: Set to true to write sleep control register
+ * @sleep_duration: How much time the device is allowed to sleep
+ * when sleep logic is enabled (in 128 microsecond increments).
+ *
+ * This function is used to configure sleep policy and allowed
+ * sleep modes. For more information check out the sleep control
+ * register on reg.h and STA_ID1.
+ *
+ * Returns 0 on success, -EIO if chip didn't wake up or -EINVAL if an invalid
+ * mode is requested.
*/
-static int ath5k_hw_set_power(struct ath5k_hw *ah, enum ath5k_power_mode mode,
+static int
+ath5k_hw_set_power_mode(struct ath5k_hw *ah, enum ath5k_power_mode mode,
bool set_chip, u16 sleep_duration)
{
unsigned int i;
@@ -493,7 +559,7 @@ static int ath5k_hw_set_power(struct ath5k_hw *ah, enum ath5k_power_mode mode,
ath5k_hw_reg_write(ah, data | AR5K_SLEEP_CTL_SLE_WAKE,
AR5K_SLEEP_CTL);
- udelay(15);
+ usleep_range(15, 20);
for (i = 200; i > 0; i--) {
/* Check if the chip did wake up */
@@ -502,7 +568,7 @@ static int ath5k_hw_set_power(struct ath5k_hw *ah, enum ath5k_power_mode mode,
break;
/* Wait a bit and retry */
- udelay(50);
+ usleep_range(50, 75);
ath5k_hw_reg_write(ah, data | AR5K_SLEEP_CTL_SLE_WAKE,
AR5K_SLEEP_CTL);
}
@@ -523,17 +589,20 @@ commit:
return 0;
}
-/*
- * Put device on hold
+/**
+ * ath5k_hw_on_hold() - Put device on hold
+ * @ah: The &struct ath5k_hw
*
- * Put MAC and Baseband on warm reset and
- * keep that state (don't clean sleep control
- * register). After this MAC and Baseband are
- * disabled and a full reset is needed to come
- * back. This way we save as much power as possible
+ * Put MAC and Baseband on warm reset and keep that state
+ * (don't clean sleep control register). After this MAC
+ * and Baseband are disabled and a full reset is needed
+ * to come back. This way we save as much power as possible
* without putting the card on full sleep.
+ *
+ * Returns 0 on success or -EIO on error
*/
-int ath5k_hw_on_hold(struct ath5k_hw *ah)
+int
+ath5k_hw_on_hold(struct ath5k_hw *ah)
{
struct pci_dev *pdev = ah->pdev;
u32 bus_flags;
@@ -543,7 +612,7 @@ int ath5k_hw_on_hold(struct ath5k_hw *ah)
return 0;
/* Make sure device is awake */
- ret = ath5k_hw_set_power(ah, AR5K_PM_AWAKE, true, 0);
+ ret = ath5k_hw_set_power_mode(ah, AR5K_PM_AWAKE, true, 0);
if (ret) {
ATH5K_ERR(ah, "failed to wakeup the MAC Chip\n");
return ret;
@@ -563,7 +632,7 @@ int ath5k_hw_on_hold(struct ath5k_hw *ah)
ret = ath5k_hw_nic_reset(ah, AR5K_RESET_CTL_PCU |
AR5K_RESET_CTL_MAC | AR5K_RESET_CTL_DMA |
AR5K_RESET_CTL_PHY | AR5K_RESET_CTL_PCI);
- mdelay(2);
+ usleep_range(2000, 2500);
} else {
ret = ath5k_hw_nic_reset(ah, AR5K_RESET_CTL_PCU |
AR5K_RESET_CTL_BASEBAND | bus_flags);
@@ -575,7 +644,7 @@ int ath5k_hw_on_hold(struct ath5k_hw *ah)
}
/* ...wakeup again!*/
- ret = ath5k_hw_set_power(ah, AR5K_PM_AWAKE, true, 0);
+ ret = ath5k_hw_set_power_mode(ah, AR5K_PM_AWAKE, true, 0);
if (ret) {
ATH5K_ERR(ah, "failed to put device on hold\n");
return ret;
@@ -584,11 +653,18 @@ int ath5k_hw_on_hold(struct ath5k_hw *ah)
return ret;
}
-/*
+/**
+ * ath5k_hw_nic_wakeup() - Force card out of sleep
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ *
* Bring up MAC + PHY Chips and program PLL
- * Channel is NULL for the initial wakeup.
+ * NOTE: Channel is NULL for the initial wakeup.
+ *
+ * Returns 0 on success, -EIO on hw failure or -EINVAL for false channel infos
*/
-int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel)
+int
+ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel)
{
struct pci_dev *pdev = ah->pdev;
u32 turbo, mode, clock, bus_flags;
@@ -600,7 +676,7 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel)
if ((ath5k_get_bus_type(ah) != ATH_AHB) || channel) {
/* Wakeup the device */
- ret = ath5k_hw_set_power(ah, AR5K_PM_AWAKE, true, 0);
+ ret = ath5k_hw_set_power_mode(ah, AR5K_PM_AWAKE, true, 0);
if (ret) {
ATH5K_ERR(ah, "failed to wakeup the MAC Chip\n");
return ret;
@@ -621,7 +697,7 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel)
ret = ath5k_hw_nic_reset(ah, AR5K_RESET_CTL_PCU |
AR5K_RESET_CTL_MAC | AR5K_RESET_CTL_DMA |
AR5K_RESET_CTL_PHY | AR5K_RESET_CTL_PCI);
- mdelay(2);
+ usleep_range(2000, 2500);
} else {
if (ath5k_get_bus_type(ah) == ATH_AHB)
ret = ath5k_hw_wisoc_reset(ah, AR5K_RESET_CTL_PCU |
@@ -637,7 +713,7 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel)
}
/* ...wakeup again!...*/
- ret = ath5k_hw_set_power(ah, AR5K_PM_AWAKE, true, 0);
+ ret = ath5k_hw_set_power_mode(ah, AR5K_PM_AWAKE, true, 0);
if (ret) {
ATH5K_ERR(ah, "failed to resume the MAC Chip\n");
return ret;
@@ -739,7 +815,7 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel)
/* ...update PLL if needed */
if (ath5k_hw_reg_read(ah, AR5K_PHY_PLL) != clock) {
ath5k_hw_reg_write(ah, clock, AR5K_PHY_PLL);
- udelay(300);
+ usleep_range(300, 350);
}
/* ...set the PHY operating mode */
@@ -755,8 +831,19 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel)
* Post-initvals register modifications *
\**************************************/
-/* TODO: Half/Quarter rate */
-static void ath5k_hw_tweak_initval_settings(struct ath5k_hw *ah,
+/**
+ * ath5k_hw_tweak_initval_settings() - Tweak initial settings
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ *
+ * Some settings are not handled on initvals, e.g. bwmode
+ * settings, some phy settings, workarounds etc that in general
+ * don't fit anywhere else or are too small to introduce a separate
+ * function for each one. So we have this function to handle
+ * them all during reset and complete card's initialization.
+ */
+static void
+ath5k_hw_tweak_initval_settings(struct ath5k_hw *ah,
struct ieee80211_channel *channel)
{
if (ah->ah_version == AR5K_AR5212 &&
@@ -875,7 +962,16 @@ static void ath5k_hw_tweak_initval_settings(struct ath5k_hw *ah,
}
}
-static void ath5k_hw_commit_eeprom_settings(struct ath5k_hw *ah,
+/**
+ * ath5k_hw_commit_eeprom_settings() - Commit settings from EEPROM
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ *
+ * Use settings stored on EEPROM to properly initialize the card
+ * based on various infos and per-mode calibration data.
+ */
+static void
+ath5k_hw_commit_eeprom_settings(struct ath5k_hw *ah,
struct ieee80211_channel *channel)
{
struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
@@ -1029,7 +1125,23 @@ static void ath5k_hw_commit_eeprom_settings(struct ath5k_hw *ah,
* Main reset function *
\*********************/
-int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
+/**
+ * ath5k_hw_reset() - The main reset function
+ * @ah: The &struct ath5k_hw
+ * @op_mode: One of enum nl80211_iftype
+ * @channel: The &struct ieee80211_channel
+ * @fast: Enable fast channel switching
+ * @skip_pcu: Skip pcu initialization
+ *
+ * This is the function we call each time we want to (re)initialize the
+ * card and pass new settings to hw. We also call it when hw runs into
+ * trouble to make it come back to a working state.
+ *
+ * Returns 0 on success, -EINVAL on false op_mode or channel infos, or -EIO
+ * on failure.
+ */
+int
+ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
struct ieee80211_channel *channel, bool fast, bool skip_pcu)
{
u32 s_seq[10], s_led[3], tsf_up, tsf_lo;
@@ -1242,7 +1354,7 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
/*
* Initialize PCU
*/
- ath5k_hw_pcu_init(ah, op_mode, mode);
+ ath5k_hw_pcu_init(ah, op_mode);
/*
* Initialize PHY
diff --git a/drivers/net/wireless/ath/ath5k/rfbuffer.h b/drivers/net/wireless/ath/ath5k/rfbuffer.h
index 5d11c23b429..aed34d9954c 100644
--- a/drivers/net/wireless/ath/ath5k/rfbuffer.h
+++ b/drivers/net/wireless/ath/ath5k/rfbuffer.h
@@ -18,7 +18,9 @@
*/
-/*
+/**
+ * DOC: RF Buffer registers
+ *
* There are some special registers on the RF chip
* that control various operation settings related mostly to
* the analog parts (channel, gain adjustment etc).
@@ -44,40 +46,63 @@
*/
-/*
+/**
+ * struct ath5k_ini_rfbuffer - Initial RF Buffer settings
+ * @rfb_bank: RF Bank number
+ * @rfb_ctrl_register: RF Buffer control register
+ * @rfb_mode_data: RF Buffer data for each mode
+ *
* Struct to hold default mode specific RF
- * register values (RF Banks)
+ * register values (RF Banks) for each chip.
*/
struct ath5k_ini_rfbuffer {
- u8 rfb_bank; /* RF Bank number */
- u16 rfb_ctrl_register; /* RF Buffer control register */
- u32 rfb_mode_data[3]; /* RF Buffer data for each mode */
+ u8 rfb_bank;
+ u16 rfb_ctrl_register;
+ u32 rfb_mode_data[3];
};
-/*
+/**
+ * struct ath5k_rfb_field - An RF Buffer field (register/value)
+ * @len: Field length
+ * @pos: Offset on the raw packet
+ * @col: Used for shifting
+ *
* Struct to hold RF Buffer field
* infos used to access certain RF
* analog registers
*/
struct ath5k_rfb_field {
- u8 len; /* Field length */
- u16 pos; /* Offset on the raw packet */
- u8 col; /* Column -used for shifting */
+ u8 len;
+ u16 pos;
+ u8 col;
};
-/*
- * RF analog register definition
+/**
+ * struct ath5k_rf_reg - RF analog register definition
+ * @bank: RF Buffer Bank number
+ * @index: Register's index on ath5k_rf_regx_idx
+ * @field: The &struct ath5k_rfb_field
+ *
+ * We use this struct to define the set of RF registers
+ * on each chip that we want to tweak. Some RF registers
+ * are common between different chip versions so this saves
+ * us space and complexity because we can refer to an rf
+ * register by it's index no matter what chip we work with
+ * as long as it has that register.
*/
struct ath5k_rf_reg {
- u8 bank; /* RF Buffer Bank number */
- u8 index; /* Register's index on rf_regs_idx */
- struct ath5k_rfb_field field; /* RF Buffer field for this register */
+ u8 bank;
+ u8 index;
+ struct ath5k_rfb_field field;
};
-/* Map RF registers to indexes
+/**
+ * enum ath5k_rf_regs_idx - Map RF registers to indexes
+ *
* We do this to handle common bits and make our
* life easier by using an index for each register
- * instead of a full rfb_field */
+ * instead of a full rfb_field
+ */
enum ath5k_rf_regs_idx {
/* BANK 2 */
AR5K_RF_TURBO = 0,
diff --git a/drivers/net/wireless/ath/ath5k/rfgain.h b/drivers/net/wireless/ath/ath5k/rfgain.h
index ebfae052d89..4d21df0e597 100644
--- a/drivers/net/wireless/ath/ath5k/rfgain.h
+++ b/drivers/net/wireless/ath/ath5k/rfgain.h
@@ -18,13 +18,17 @@
*
*/
-/*
+/**
+ * struct ath5k_ini_rfgain - RF Gain table
+ * @rfg_register: RF Gain register address
+ * @rfg_value: Register value for 5 and 2GHz
+ *
* Mode-specific RF Gain table (64bytes) for RF5111/5112
* (RF5110 only comes with AR5210 and only supports a/turbo a mode so initial
* RF Gain values are included in AR5K_AR5210_INI)
*/
struct ath5k_ini_rfgain {
- u16 rfg_register; /* RF Gain register address */
+ u16 rfg_register;
u32 rfg_value[2]; /* [freq (see below)] */
};
@@ -455,18 +459,31 @@ static const struct ath5k_ini_rfgain rfgain_2425[] = {
#define AR5K_GAIN_CHECK_ADJUST(_g) \
((_g)->g_current <= (_g)->g_low || (_g)->g_current >= (_g)->g_high)
+/**
+ * struct ath5k_gain_opt_step - An RF gain optimization step
+ * @gos_param: Set of parameters
+ * @gos_gain: Gain
+ */
struct ath5k_gain_opt_step {
s8 gos_param[AR5K_GAIN_CRN_MAX_FIX_BITS];
s8 gos_gain;
};
+/**
+ * struct ath5k_gain_opt - RF Gain optimization ladder
+ * @go_default: The default step
+ * @go_steps_count: How many optimization steps
+ * @go_step: Array of &struct ath5k_gain_opt_step
+ */
struct ath5k_gain_opt {
u8 go_default;
u8 go_steps_count;
const struct ath5k_gain_opt_step go_step[AR5K_GAIN_STEP_COUNT];
};
+
/*
+ * RF5111
* Parameters on gos_param:
* 1) Tx clip PHY register
* 2) PWD 90 RF register
@@ -490,6 +507,7 @@ static const struct ath5k_gain_opt rfgain_opt_5111 = {
};
/*
+ * RF5112
* Parameters on gos_param:
* 1) Mixgain ovr RF register
* 2) PWD 138 RF register
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index abe3af3c618..6c59a217b1a 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -14,6 +14,8 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
+#include <linux/moduleparam.h>
+
#include "core.h"
#include "cfg80211.h"
#include "debug.h"
diff --git a/drivers/net/wireless/ath/ath6kl/debug.c b/drivers/net/wireless/ath/ath6kl/debug.c
index cf513a80b06..eb808b46f94 100644
--- a/drivers/net/wireless/ath/ath6kl/debug.c
+++ b/drivers/net/wireless/ath/ath6kl/debug.c
@@ -19,6 +19,7 @@
#include <linux/circ_buf.h>
#include <linux/fs.h>
#include <linux/vmalloc.h>
+#include <linux/export.h>
#include "debug.h"
#include "target.h"
diff --git a/drivers/net/wireless/ath/ath6kl/debug.h b/drivers/net/wireless/ath/ath6kl/debug.h
index 9b779aac83e..e569c652e35 100644
--- a/drivers/net/wireless/ath/ath6kl/debug.h
+++ b/drivers/net/wireless/ath/ath6kl/debug.h
@@ -46,8 +46,8 @@ enum ATH6K_DEBUG_MASK {
};
extern unsigned int debug_mask;
-extern int ath6kl_printk(const char *level, const char *fmt, ...)
- __attribute__ ((format (printf, 2, 3)));
+extern __printf(2, 3)
+int ath6kl_printk(const char *level, const char *fmt, ...);
#define ath6kl_info(fmt, ...) \
ath6kl_printk(KERN_INFO, fmt, ##__VA_ARGS__)
diff --git a/drivers/net/wireless/ath/ath6kl/init.c b/drivers/net/wireless/ath/ath6kl/init.c
index c614049d7b2..368ecbd172a 100644
--- a/drivers/net/wireless/ath/ath6kl/init.c
+++ b/drivers/net/wireless/ath/ath6kl/init.c
@@ -1663,6 +1663,7 @@ int ath6kl_core_init(struct ath6kl *ar)
ar->wiphy->flags |= WIPHY_FLAG_SUPPORTS_FW_ROAM |
WIPHY_FLAG_HAVE_AP_SME |
+ WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
if (test_bit(ATH6KL_FW_CAPABILITY_SCHED_SCAN, ar->fw_capabilities))
diff --git a/drivers/net/wireless/ath/ath6kl/sdio.c b/drivers/net/wireless/ath/ath6kl/sdio.c
index 07903e6114d..15c3f56caf4 100644
--- a/drivers/net/wireless/ath/ath6kl/sdio.c
+++ b/drivers/net/wireless/ath/ath6kl/sdio.c
@@ -14,6 +14,7 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
+#include <linux/module.h>
#include <linux/mmc/card.h>
#include <linux/mmc/mmc.h>
#include <linux/mmc/host.h>
diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
index 7b4c074e12f..1b4786ae00a 100644
--- a/drivers/net/wireless/ath/ath9k/Kconfig
+++ b/drivers/net/wireless/ath/ath9k/Kconfig
@@ -2,6 +2,9 @@ config ATH9K_HW
tristate
config ATH9K_COMMON
tristate
+config ATH9K_DFS_DEBUGFS
+ def_bool y
+ depends on ATH9K_DEBUGFS && ATH9K_DFS_CERTIFIED
config ATH9K
tristate "Atheros 802.11n wireless cards support"
@@ -51,6 +54,25 @@ config ATH9K_DEBUGFS
Also required for changing debug message flags at run time.
+config ATH9K_DFS_CERTIFIED
+ bool "Atheros DFS support for certified platforms"
+ depends on ATH9K && EXPERT
+ default n
+ ---help---
+ This option enables DFS support for initiating radiation on
+ ath9k. There is no way to dynamically detect if a card was DFS
+ certified and as such this is left as a build time option. This
+ option should only be enabled by system integrators that can
+ guarantee that all the platforms that their kernel will run on
+ have obtained appropriate regulatory body certification for a
+ respective Atheros card by using ath9k on the target shipping
+ platforms.
+
+ This is currently only a placeholder for future DFS support,
+ as DFS support requires more components that still need to be
+ developed. At this point enabling this option won't do anything
+ except increase code size.
+
config ATH9K_RATE_CONTROL
bool "Atheros ath9k rate control"
depends on ATH9K
diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile
index 49d3f25f509..da02242499a 100644
--- a/drivers/net/wireless/ath/ath9k/Makefile
+++ b/drivers/net/wireless/ath/ath9k/Makefile
@@ -10,6 +10,8 @@ ath9k-$(CONFIG_ATH9K_RATE_CONTROL) += rc.o
ath9k-$(CONFIG_ATH9K_PCI) += pci.o
ath9k-$(CONFIG_ATH9K_AHB) += ahb.o
ath9k-$(CONFIG_ATH9K_DEBUGFS) += debug.o
+ath9k-$(CONFIG_ATH9K_DFS_DEBUGFS) += dfs_debug.o
+ath9k-$(CONFIG_ATH9K_DFS_CERTIFIED) += dfs.o
obj-$(CONFIG_ATH9K) += ath9k.o
@@ -34,7 +36,8 @@ ath9k_hw-y:= \
ar9002_mac.o \
ar9003_mac.o \
ar9003_eeprom.o \
- ar9003_paprd.o
+ ar9003_paprd.o \
+ ar9003_mci.o
obj-$(CONFIG_ATH9K_HW) += ath9k_hw.o
diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c
index 85a54cd2b08..5e47ca6d16a 100644
--- a/drivers/net/wireless/ath/ath9k/ahb.c
+++ b/drivers/net/wireless/ath/ath9k/ahb.c
@@ -19,6 +19,7 @@
#include <linux/nl80211.h>
#include <linux/platform_device.h>
#include <linux/ath9k_platform.h>
+#include <linux/module.h>
#include "ath9k.h"
static const struct platform_device_id ath9k_platform_id_table[] = {
diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c
index 2776c3c1f50..a639b94f764 100644
--- a/drivers/net/wireless/ath/ath9k/ani.c
+++ b/drivers/net/wireless/ath/ath9k/ani.c
@@ -15,6 +15,7 @@
*/
#include <linux/kernel.h>
+#include <linux/export.h>
#include "hw.h"
#include "hw-ops.h"
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_calib.c b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
index 88279e325dc..157337febc2 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
@@ -203,7 +203,7 @@ static void ar9002_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
i);
ath_dbg(common, ATH_DBG_CALIBRATE,
- "Orignal: Chn %diq_corr_meas = 0x%08x\n",
+ "Original: Chn %d iq_corr_meas = 0x%08x\n",
i, ah->totalIqCorrMeas[i]);
iqCorrNeg = 0;
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_hw.c b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
index 626d547d2f0..11f192a1ceb 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
@@ -14,6 +14,7 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
+#include <linux/moduleparam.h>
#include "hw.h"
#include "ar5008_initvals.h"
#include "ar9001_initvals.h"
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
index f7d8e516a2a..b5920168606 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
@@ -15,6 +15,7 @@
*/
#include "hw.h"
+#include <linux/export.h>
#define AR_BufLen 0x00000fff
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
index 12a730dcb50..23b3a6c5780 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
@@ -18,6 +18,7 @@
#include "hw-ops.h"
#include "ar9003_phy.h"
#include "ar9003_rtt.h"
+#include "ar9003_mci.h"
#define MAX_MEASUREMENT MAX_IQCAL_MEASUREMENT
#define MAX_MAG_DELTA 11
@@ -225,7 +226,7 @@ static void ar9003_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
i);
ath_dbg(common, ATH_DBG_CALIBRATE,
- "Orignal: Chn %diq_corr_meas = 0x%08x\n",
+ "Original: Chn %d iq_corr_meas = 0x%08x\n",
i, ah->totalIqCorrMeas[i]);
iqCorrNeg = 0;
@@ -824,7 +825,7 @@ static void ar9003_hw_tx_iq_cal_post_proc(struct ath_hw *ah, bool is_reusable)
chan_info_tab[i] + offset);
ath_dbg(common, ATH_DBG_CALIBRATE,
- "IQ RES[%d]=0x%x"
+ "IQ_RES[%d]=0x%x "
"IQ_RES[%d]=0x%x\n",
idx, iq_res[idx], idx + 1,
iq_res[idx + 1]);
@@ -934,10 +935,12 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
{
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_hw_cal_data *caldata = ah->caldata;
+ struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci;
bool txiqcal_done = false, txclcal_done = false;
bool is_reusable = true, status = true;
bool run_rtt_cal = false, run_agc_cal;
bool rtt = !!(ah->caps.hw_caps & ATH9K_HW_CAP_RTT);
+ bool mci = !!(ah->caps.hw_caps & ATH9K_HW_CAP_MCI);
u32 agc_ctrl = 0, agc_supp_cals = AR_PHY_AGC_CONTROL_OFFSET_CAL |
AR_PHY_AGC_CONTROL_FLTR_CAL |
AR_PHY_AGC_CONTROL_PKDET_CAL;
@@ -1005,6 +1008,31 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
} else if (caldata && !caldata->done_txiqcal_once)
run_agc_cal = true;
+ if (mci && IS_CHAN_2GHZ(chan) &&
+ (mci_hw->bt_state == MCI_BT_AWAKE) &&
+ run_agc_cal &&
+ !(mci_hw->config & ATH_MCI_CONFIG_DISABLE_MCI_CAL)) {
+
+ u32 pld[4] = {0, 0, 0, 0};
+
+ /* send CAL_REQ only when BT is AWAKE. */
+ ath_dbg(common, ATH_DBG_MCI, "MCI send WLAN_CAL_REQ 0x%x\n",
+ mci_hw->wlan_cal_seq);
+ MCI_GPM_SET_CAL_TYPE(pld, MCI_GPM_WLAN_CAL_REQ);
+ pld[MCI_GPM_WLAN_CAL_W_SEQUENCE] = mci_hw->wlan_cal_seq++;
+ ar9003_mci_send_message(ah, MCI_GPM, 0, pld, 16, true, false);
+
+ /* Wait BT_CAL_GRANT for 50ms */
+ ath_dbg(common, ATH_DBG_MCI, "MCI wait for BT_CAL_GRANT");
+
+ if (ar9003_mci_wait_for_gpm(ah, MCI_GPM_BT_CAL_GRANT, 0, 50000))
+ ath_dbg(common, ATH_DBG_MCI, "MCI got BT_CAL_GRANT");
+ else {
+ is_reusable = false;
+ ath_dbg(common, ATH_DBG_MCI, "\nMCI BT is not responding");
+ }
+ }
+
txiqcal_done = ar9003_hw_tx_iq_cal_run(ah);
REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_DIS);
udelay(5);
@@ -1022,6 +1050,21 @@ skip_tx_iqcal:
AR_PHY_AGC_CONTROL_CAL,
0, AH_WAIT_TIMEOUT);
}
+
+ if (mci && IS_CHAN_2GHZ(chan) &&
+ (mci_hw->bt_state == MCI_BT_AWAKE) &&
+ run_agc_cal &&
+ !(mci_hw->config & ATH_MCI_CONFIG_DISABLE_MCI_CAL)) {
+
+ u32 pld[4] = {0, 0, 0, 0};
+
+ ath_dbg(common, ATH_DBG_MCI, "MCI Send WLAN_CAL_DONE 0x%x\n",
+ mci_hw->wlan_cal_done);
+ MCI_GPM_SET_CAL_TYPE(pld, MCI_GPM_WLAN_CAL_DONE);
+ pld[MCI_GPM_WLAN_CAL_W_SEQUENCE] = mci_hw->wlan_cal_done++;
+ ar9003_mci_send_message(ah, MCI_GPM, 0, pld, 16, true, false);
+ }
+
if (rtt && !run_rtt_cal) {
agc_ctrl |= agc_supp_cals;
REG_WRITE(ah, AR_PHY_AGC_CONTROL, agc_ctrl);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index a93bd63ad23..4ba6f52943a 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -4779,7 +4779,7 @@ static void ar9003_hw_set_power_per_rate_table(struct ath_hw *ah,
{
struct ath_common *common = ath9k_hw_common(ah);
struct ar9300_eeprom *pEepData = &ah->eeprom.ar9300_eep;
- u16 twiceMaxEdgePower = MAX_RATE_POWER;
+ u16 twiceMaxEdgePower;
int i;
u16 scaledPower = 0, minCtlPower;
static const u16 ctlModesFor11a[] = {
@@ -4880,6 +4880,7 @@ static void ar9003_hw_set_power_per_rate_table(struct ath_hw *ah,
ctlNum = AR9300_NUM_CTLS_5G;
}
+ twiceMaxEdgePower = MAX_RATE_POWER;
for (i = 0; (i < ctlNum) && ctlIndex[i]; i++) {
ath_dbg(common, ATH_DBG_REGULATORY,
"LOOP-Ctlidx %d: cfgCtl 0x%2.2x pCtlMode 0x%2.2x ctlIndex 0x%2.2x chan %d\n",
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
index b363cc06cfd..631fe4f2e49 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
@@ -13,6 +13,7 @@
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
+#include <linux/export.h>
#include "hw.h"
#include "ar9003_mac.h"
@@ -174,20 +175,24 @@ static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
u32 isr = 0;
u32 mask2 = 0;
struct ath9k_hw_capabilities *pCap = &ah->caps;
- u32 sync_cause = 0;
struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ u32 sync_cause = 0, async_cause;
- if (REG_READ(ah, AR_INTR_ASYNC_CAUSE) & AR_INTR_MAC_IRQ) {
+ async_cause = REG_READ(ah, AR_INTR_ASYNC_CAUSE);
+
+ if (async_cause & (AR_INTR_MAC_IRQ | AR_INTR_ASYNC_MASK_MCI)) {
if ((REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M)
== AR_RTC_STATUS_ON)
isr = REG_READ(ah, AR_ISR);
}
+
sync_cause = REG_READ(ah, AR_INTR_SYNC_CAUSE) & AR_INTR_SYNC_DEFAULT;
*masked = 0;
- if (!isr && !sync_cause)
+ if (!isr && !sync_cause && !async_cause)
return false;
if (isr) {
@@ -293,6 +298,35 @@ static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
ar9003_hw_bb_watchdog_read(ah);
}
+ if (async_cause & AR_INTR_ASYNC_MASK_MCI) {
+ u32 raw_intr, rx_msg_intr;
+
+ rx_msg_intr = REG_READ(ah, AR_MCI_INTERRUPT_RX_MSG_RAW);
+ raw_intr = REG_READ(ah, AR_MCI_INTERRUPT_RAW);
+
+ if ((raw_intr == 0xdeadbeef) || (rx_msg_intr == 0xdeadbeef))
+ ath_dbg(common, ATH_DBG_MCI,
+ "MCI gets 0xdeadbeef during MCI int processing"
+ "new raw_intr=0x%08x, new rx_msg_raw=0x%08x, "
+ "raw_intr=0x%08x, rx_msg_raw=0x%08x\n",
+ raw_intr, rx_msg_intr, mci->raw_intr,
+ mci->rx_msg_intr);
+ else {
+ mci->rx_msg_intr |= rx_msg_intr;
+ mci->raw_intr |= raw_intr;
+ *masked |= ATH9K_INT_MCI;
+
+ if (rx_msg_intr & AR_MCI_INTERRUPT_RX_MSG_CONT_INFO)
+ mci->cont_status =
+ REG_READ(ah, AR_MCI_CONT_STATUS);
+
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW, rx_msg_intr);
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RAW, raw_intr);
+ ath_dbg(common, ATH_DBG_MCI, "AR_INTR_SYNC_MCI\n");
+
+ }
+ }
+
if (sync_cause) {
if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT) {
REG_WRITE(ah, AR_RC, AR_RC_HOSTIF);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mci.c b/drivers/net/wireless/ath/ath9k/ar9003_mci.c
new file mode 100644
index 00000000000..8599822dc83
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mci.c
@@ -0,0 +1,1464 @@
+/*
+ * Copyright (c) 2008-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/export.h>
+#include "hw.h"
+#include "ar9003_phy.h"
+#include "ar9003_mci.h"
+
+static void ar9003_mci_reset_req_wakeup(struct ath_hw *ah)
+{
+ if (!AR_SREV_9462_20(ah))
+ return;
+
+ REG_RMW_FIELD(ah, AR_MCI_COMMAND2,
+ AR_MCI_COMMAND2_RESET_REQ_WAKEUP, 1);
+ udelay(1);
+ REG_RMW_FIELD(ah, AR_MCI_COMMAND2,
+ AR_MCI_COMMAND2_RESET_REQ_WAKEUP, 0);
+}
+
+static int ar9003_mci_wait_for_interrupt(struct ath_hw *ah, u32 address,
+ u32 bit_position, int time_out)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ while (time_out) {
+
+ if (REG_READ(ah, address) & bit_position) {
+
+ REG_WRITE(ah, address, bit_position);
+
+ if (address == AR_MCI_INTERRUPT_RX_MSG_RAW) {
+
+ if (bit_position &
+ AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE)
+ ar9003_mci_reset_req_wakeup(ah);
+
+ if (bit_position &
+ (AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING |
+ AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING))
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RAW,
+ AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE);
+
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RAW,
+ AR_MCI_INTERRUPT_RX_MSG);
+ }
+ break;
+ }
+
+ udelay(10);
+ time_out -= 10;
+
+ if (time_out < 0)
+ break;
+ }
+
+ if (time_out <= 0) {
+ ath_dbg(common, ATH_DBG_MCI,
+ "MCI Wait for Reg 0x%08x = 0x%08x timeout.\n",
+ address, bit_position);
+ ath_dbg(common, ATH_DBG_MCI,
+ "MCI INT_RAW = 0x%08x, RX_MSG_RAW = 0x%08x",
+ REG_READ(ah, AR_MCI_INTERRUPT_RAW),
+ REG_READ(ah, AR_MCI_INTERRUPT_RX_MSG_RAW));
+ time_out = 0;
+ }
+
+ return time_out;
+}
+
+void ar9003_mci_remote_reset(struct ath_hw *ah, bool wait_done)
+{
+ u32 payload[4] = { 0xffffffff, 0xffffffff, 0xffffffff, 0xffffff00};
+
+ ar9003_mci_send_message(ah, MCI_REMOTE_RESET, 0, payload, 16,
+ wait_done, false);
+ udelay(5);
+}
+
+void ar9003_mci_send_lna_transfer(struct ath_hw *ah, bool wait_done)
+{
+ u32 payload = 0x00000000;
+
+ ar9003_mci_send_message(ah, MCI_LNA_TRANS, 0, &payload, 1,
+ wait_done, false);
+}
+
+static void ar9003_mci_send_req_wake(struct ath_hw *ah, bool wait_done)
+{
+ ar9003_mci_send_message(ah, MCI_REQ_WAKE, MCI_FLAG_DISABLE_TIMESTAMP,
+ NULL, 0, wait_done, false);
+ udelay(5);
+}
+
+void ar9003_mci_send_sys_waking(struct ath_hw *ah, bool wait_done)
+{
+ ar9003_mci_send_message(ah, MCI_SYS_WAKING, MCI_FLAG_DISABLE_TIMESTAMP,
+ NULL, 0, wait_done, false);
+}
+
+static void ar9003_mci_send_lna_take(struct ath_hw *ah, bool wait_done)
+{
+ u32 payload = 0x70000000;
+
+ ar9003_mci_send_message(ah, MCI_LNA_TAKE, 0, &payload, 1,
+ wait_done, false);
+}
+
+static void ar9003_mci_send_sys_sleeping(struct ath_hw *ah, bool wait_done)
+{
+ ar9003_mci_send_message(ah, MCI_SYS_SLEEPING,
+ MCI_FLAG_DISABLE_TIMESTAMP,
+ NULL, 0, wait_done, false);
+}
+
+static void ar9003_mci_send_coex_version_query(struct ath_hw *ah,
+ bool wait_done)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ u32 payload[4] = {0, 0, 0, 0};
+
+ if (!mci->bt_version_known &&
+ (mci->bt_state != MCI_BT_SLEEP)) {
+ ath_dbg(common, ATH_DBG_MCI, "MCI Send Coex version query\n");
+ MCI_GPM_SET_TYPE_OPCODE(payload,
+ MCI_GPM_COEX_AGENT, MCI_GPM_COEX_VERSION_QUERY);
+ ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16,
+ wait_done, true);
+ }
+}
+
+static void ar9003_mci_send_coex_version_response(struct ath_hw *ah,
+ bool wait_done)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ u32 payload[4] = {0, 0, 0, 0};
+
+ ath_dbg(common, ATH_DBG_MCI, "MCI Send Coex version response\n");
+ MCI_GPM_SET_TYPE_OPCODE(payload, MCI_GPM_COEX_AGENT,
+ MCI_GPM_COEX_VERSION_RESPONSE);
+ *(((u8 *)payload) + MCI_GPM_COEX_B_MAJOR_VERSION) =
+ mci->wlan_ver_major;
+ *(((u8 *)payload) + MCI_GPM_COEX_B_MINOR_VERSION) =
+ mci->wlan_ver_minor;
+ ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16, wait_done, true);
+}
+
+static void ar9003_mci_send_coex_wlan_channels(struct ath_hw *ah,
+ bool wait_done)
+{
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ u32 *payload = &mci->wlan_channels[0];
+
+ if ((mci->wlan_channels_update == true) &&
+ (mci->bt_state != MCI_BT_SLEEP)) {
+ MCI_GPM_SET_TYPE_OPCODE(payload,
+ MCI_GPM_COEX_AGENT, MCI_GPM_COEX_WLAN_CHANNELS);
+ ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16,
+ wait_done, true);
+ MCI_GPM_SET_TYPE_OPCODE(payload, 0xff, 0xff);
+ }
+}
+
+static void ar9003_mci_send_coex_bt_status_query(struct ath_hw *ah,
+ bool wait_done, u8 query_type)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ u32 payload[4] = {0, 0, 0, 0};
+ bool query_btinfo = !!(query_type & (MCI_GPM_COEX_QUERY_BT_ALL_INFO |
+ MCI_GPM_COEX_QUERY_BT_TOPOLOGY));
+
+ if (mci->bt_state != MCI_BT_SLEEP) {
+
+ ath_dbg(common, ATH_DBG_MCI,
+ "MCI Send Coex BT Status Query 0x%02X\n", query_type);
+
+ MCI_GPM_SET_TYPE_OPCODE(payload,
+ MCI_GPM_COEX_AGENT, MCI_GPM_COEX_STATUS_QUERY);
+
+ *(((u8 *)payload) + MCI_GPM_COEX_B_BT_BITMAP) = query_type;
+ /*
+ * If bt_status_query message is not sent successfully,
+ * then need_flush_btinfo should be set again.
+ */
+ if (!ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16,
+ wait_done, true)) {
+ if (query_btinfo) {
+ mci->need_flush_btinfo = true;
+
+ ath_dbg(common, ATH_DBG_MCI,
+ "MCI send bt_status_query fail, "
+ "set flush flag again\n");
+ }
+ }
+
+ if (query_btinfo)
+ mci->query_bt = false;
+ }
+}
+
+void ar9003_mci_send_coex_halt_bt_gpm(struct ath_hw *ah, bool halt,
+ bool wait_done)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ u32 payload[4] = {0, 0, 0, 0};
+
+ ath_dbg(common, ATH_DBG_MCI, "MCI Send Coex %s BT GPM.\n",
+ (halt) ? "halt" : "unhalt");
+
+ MCI_GPM_SET_TYPE_OPCODE(payload,
+ MCI_GPM_COEX_AGENT, MCI_GPM_COEX_HALT_BT_GPM);
+
+ if (halt) {
+ mci->query_bt = true;
+ /* Send next unhalt no matter halt sent or not */
+ mci->unhalt_bt_gpm = true;
+ mci->need_flush_btinfo = true;
+ *(((u8 *)payload) + MCI_GPM_COEX_B_HALT_STATE) =
+ MCI_GPM_COEX_BT_GPM_HALT;
+ } else
+ *(((u8 *)payload) + MCI_GPM_COEX_B_HALT_STATE) =
+ MCI_GPM_COEX_BT_GPM_UNHALT;
+
+ ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16, wait_done, true);
+}
+
+
+static void ar9003_mci_prep_interface(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ u32 saved_mci_int_en;
+ u32 mci_timeout = 150;
+
+ mci->bt_state = MCI_BT_SLEEP;
+ saved_mci_int_en = REG_READ(ah, AR_MCI_INTERRUPT_EN);
+
+ REG_WRITE(ah, AR_MCI_INTERRUPT_EN, 0);
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
+ REG_READ(ah, AR_MCI_INTERRUPT_RX_MSG_RAW));
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RAW,
+ REG_READ(ah, AR_MCI_INTERRUPT_RAW));
+
+ /* Remote Reset */
+ ath_dbg(common, ATH_DBG_MCI, "MCI Reset sequence start\n");
+ ath_dbg(common, ATH_DBG_MCI, "MCI send REMOTE_RESET\n");
+ ar9003_mci_remote_reset(ah, true);
+
+ /*
+ * This delay is required for the reset delay worst case value 255 in
+ * MCI_COMMAND2 register
+ */
+
+ if (AR_SREV_9462_10(ah))
+ udelay(252);
+
+ ath_dbg(common, ATH_DBG_MCI, "MCI Send REQ_WAKE to remoter(BT)\n");
+ ar9003_mci_send_req_wake(ah, true);
+
+ if (ar9003_mci_wait_for_interrupt(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
+ AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING, 500)) {
+
+ ath_dbg(common, ATH_DBG_MCI,
+ "MCI SYS_WAKING from remote(BT)\n");
+ mci->bt_state = MCI_BT_AWAKE;
+
+ if (AR_SREV_9462_10(ah))
+ udelay(10);
+ /*
+ * we don't need to send more remote_reset at this moment.
+ * If BT receive first remote_reset, then BT HW will
+ * be cleaned up and will be able to receive req_wake
+ * and BT HW will respond sys_waking.
+ * In this case, WLAN will receive BT's HW sys_waking.
+ * Otherwise, if BT SW missed initial remote_reset,
+ * that remote_reset will still clean up BT MCI RX,
+ * and the req_wake will wake BT up,
+ * and BT SW will respond this req_wake with a remote_reset and
+ * sys_waking. In this case, WLAN will receive BT's SW
+ * sys_waking. In either case, BT's RX is cleaned up. So we
+ * don't need to reply BT's remote_reset now, if any.
+ * Similarly, if in any case, WLAN can receive BT's sys_waking,
+ * that means WLAN's RX is also fine.
+ */
+
+ /* Send SYS_WAKING to BT */
+
+ ath_dbg(common, ATH_DBG_MCI,
+ "MCI send SW SYS_WAKING to remote BT\n");
+
+ ar9003_mci_send_sys_waking(ah, true);
+ udelay(10);
+
+ /*
+ * Set BT priority interrupt value to be 0xff to
+ * avoid having too many BT PRIORITY interrupts.
+ */
+
+ REG_WRITE(ah, AR_MCI_BT_PRI0, 0xFFFFFFFF);
+ REG_WRITE(ah, AR_MCI_BT_PRI1, 0xFFFFFFFF);
+ REG_WRITE(ah, AR_MCI_BT_PRI2, 0xFFFFFFFF);
+ REG_WRITE(ah, AR_MCI_BT_PRI3, 0xFFFFFFFF);
+ REG_WRITE(ah, AR_MCI_BT_PRI, 0X000000FF);
+
+ /*
+ * A contention reset will be received after send out
+ * sys_waking. Also BT priority interrupt bits will be set.
+ * Clear those bits before the next step.
+ */
+
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
+ AR_MCI_INTERRUPT_RX_MSG_CONT_RST);
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RAW,
+ AR_MCI_INTERRUPT_BT_PRI);
+
+ if (AR_SREV_9462_10(ah) || mci->is_2g) {
+ /* Send LNA_TRANS */
+ ath_dbg(common, ATH_DBG_MCI,
+ "MCI send LNA_TRANS to BT\n");
+ ar9003_mci_send_lna_transfer(ah, true);
+ udelay(5);
+ }
+
+ if (AR_SREV_9462_10(ah) || (mci->is_2g &&
+ !mci->update_2g5g)) {
+ if (ar9003_mci_wait_for_interrupt(ah,
+ AR_MCI_INTERRUPT_RX_MSG_RAW,
+ AR_MCI_INTERRUPT_RX_MSG_LNA_INFO,
+ mci_timeout))
+ ath_dbg(common, ATH_DBG_MCI,
+ "MCI WLAN has control over the LNA & "
+ "BT obeys it\n");
+ else
+ ath_dbg(common, ATH_DBG_MCI,
+ "MCI BT didn't respond to"
+ "LNA_TRANS\n");
+ }
+
+ if (AR_SREV_9462_10(ah)) {
+ /* Send another remote_reset to deassert BT clk_req. */
+ ath_dbg(common, ATH_DBG_MCI,
+ "MCI another remote_reset to "
+ "deassert clk_req\n");
+ ar9003_mci_remote_reset(ah, true);
+ udelay(252);
+ }
+ }
+
+ /* Clear the extra redundant SYS_WAKING from BT */
+ if ((mci->bt_state == MCI_BT_AWAKE) &&
+ (REG_READ_FIELD(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
+ AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING)) &&
+ (REG_READ_FIELD(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
+ AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING) == 0)) {
+
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
+ AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING);
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RAW,
+ AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE);
+ }
+
+ REG_WRITE(ah, AR_MCI_INTERRUPT_EN, saved_mci_int_en);
+}
+
+void ar9003_mci_disable_interrupt(struct ath_hw *ah)
+{
+ REG_WRITE(ah, AR_MCI_INTERRUPT_EN, 0);
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_EN, 0);
+}
+
+void ar9003_mci_enable_interrupt(struct ath_hw *ah)
+{
+
+ REG_WRITE(ah, AR_MCI_INTERRUPT_EN, AR_MCI_INTERRUPT_DEFAULT);
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_EN,
+ AR_MCI_INTERRUPT_RX_MSG_DEFAULT);
+}
+
+bool ar9003_mci_check_int(struct ath_hw *ah, u32 ints)
+{
+ u32 intr;
+
+ intr = REG_READ(ah, AR_MCI_INTERRUPT_RX_MSG_RAW);
+ return ((intr & ints) == ints);
+}
+
+void ar9003_mci_get_interrupt(struct ath_hw *ah, u32 *raw_intr,
+ u32 *rx_msg_intr)
+{
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ *raw_intr = mci->raw_intr;
+ *rx_msg_intr = mci->rx_msg_intr;
+
+ /* Clean int bits after the values are read. */
+ mci->raw_intr = 0;
+ mci->rx_msg_intr = 0;
+}
+EXPORT_SYMBOL(ar9003_mci_get_interrupt);
+
+void ar9003_mci_2g5g_changed(struct ath_hw *ah, bool is_2g)
+{
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+
+ if (!mci->update_2g5g &&
+ (mci->is_2g != is_2g))
+ mci->update_2g5g = true;
+
+ mci->is_2g = is_2g;
+}
+
+static bool ar9003_mci_is_gpm_valid(struct ath_hw *ah, u32 msg_index)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ u32 *payload;
+ u32 recv_type, offset;
+
+ if (msg_index == MCI_GPM_INVALID)
+ return false;
+
+ offset = msg_index << 4;
+
+ payload = (u32 *)(mci->gpm_buf + offset);
+ recv_type = MCI_GPM_TYPE(payload);
+
+ if (recv_type == MCI_GPM_RSVD_PATTERN) {
+ ath_dbg(common, ATH_DBG_MCI, "MCI Skip RSVD GPM\n");
+ return false;
+ }
+
+ return true;
+}
+
+static void ar9003_mci_observation_set_up(struct ath_hw *ah)
+{
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ if (mci->config & ATH_MCI_CONFIG_MCI_OBS_MCI) {
+
+ ath9k_hw_cfg_output(ah, 3,
+ AR_GPIO_OUTPUT_MUX_AS_MCI_WLAN_DATA);
+ ath9k_hw_cfg_output(ah, 2, AR_GPIO_OUTPUT_MUX_AS_MCI_WLAN_CLK);
+ ath9k_hw_cfg_output(ah, 1, AR_GPIO_OUTPUT_MUX_AS_MCI_BT_DATA);
+ ath9k_hw_cfg_output(ah, 0, AR_GPIO_OUTPUT_MUX_AS_MCI_BT_CLK);
+
+ } else if (mci->config & ATH_MCI_CONFIG_MCI_OBS_TXRX) {
+
+ ath9k_hw_cfg_output(ah, 3, AR_GPIO_OUTPUT_MUX_AS_WL_IN_TX);
+ ath9k_hw_cfg_output(ah, 2, AR_GPIO_OUTPUT_MUX_AS_WL_IN_RX);
+ ath9k_hw_cfg_output(ah, 1, AR_GPIO_OUTPUT_MUX_AS_BT_IN_TX);
+ ath9k_hw_cfg_output(ah, 0, AR_GPIO_OUTPUT_MUX_AS_BT_IN_RX);
+ ath9k_hw_cfg_output(ah, 5, AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
+
+ } else if (mci->config & ATH_MCI_CONFIG_MCI_OBS_BT) {
+
+ ath9k_hw_cfg_output(ah, 3, AR_GPIO_OUTPUT_MUX_AS_BT_IN_TX);
+ ath9k_hw_cfg_output(ah, 2, AR_GPIO_OUTPUT_MUX_AS_BT_IN_RX);
+ ath9k_hw_cfg_output(ah, 1, AR_GPIO_OUTPUT_MUX_AS_MCI_BT_DATA);
+ ath9k_hw_cfg_output(ah, 0, AR_GPIO_OUTPUT_MUX_AS_MCI_BT_CLK);
+
+ } else
+ return;
+
+ REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL, AR_GPIO_JTAG_DISABLE);
+
+ if (AR_SREV_9462_20_OR_LATER(ah)) {
+ REG_RMW_FIELD(ah, AR_PHY_GLB_CONTROL,
+ AR_GLB_DS_JTAG_DISABLE, 1);
+ REG_RMW_FIELD(ah, AR_PHY_GLB_CONTROL,
+ AR_GLB_WLAN_UART_INTF_EN, 0);
+ REG_SET_BIT(ah, AR_GLB_GPIO_CONTROL,
+ ATH_MCI_CONFIG_MCI_OBS_GPIO);
+ }
+
+ REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2, AR_BTCOEX_CTRL2_GPIO_OBS_SEL, 0);
+ REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2, AR_BTCOEX_CTRL2_MAC_BB_OBS_SEL, 1);
+ REG_WRITE(ah, AR_OBS, 0x4b);
+ REG_RMW_FIELD(ah, AR_DIAG_SW, AR_DIAG_OBS_PT_SEL1, 0x03);
+ REG_RMW_FIELD(ah, AR_DIAG_SW, AR_DIAG_OBS_PT_SEL2, 0x01);
+ REG_RMW_FIELD(ah, AR_MACMISC, AR_MACMISC_MISC_OBS_BUS_LSB, 0x02);
+ REG_RMW_FIELD(ah, AR_MACMISC, AR_MACMISC_MISC_OBS_BUS_MSB, 0x03);
+ REG_RMW_FIELD(ah, AR_PHY_TEST_CTL_STATUS,
+ AR_PHY_TEST_CTL_DEBUGPORT_SEL, 0x07);
+}
+
+static bool ar9003_mci_send_coex_bt_flags(struct ath_hw *ah, bool wait_done,
+ u8 opcode, u32 bt_flags)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ u32 pld[4] = {0, 0, 0, 0};
+
+ MCI_GPM_SET_TYPE_OPCODE(pld,
+ MCI_GPM_COEX_AGENT, MCI_GPM_COEX_BT_UPDATE_FLAGS);
+
+ *(((u8 *)pld) + MCI_GPM_COEX_B_BT_FLAGS_OP) = opcode;
+ *(((u8 *)pld) + MCI_GPM_COEX_W_BT_FLAGS + 0) = bt_flags & 0xFF;
+ *(((u8 *)pld) + MCI_GPM_COEX_W_BT_FLAGS + 1) = (bt_flags >> 8) & 0xFF;
+ *(((u8 *)pld) + MCI_GPM_COEX_W_BT_FLAGS + 2) = (bt_flags >> 16) & 0xFF;
+ *(((u8 *)pld) + MCI_GPM_COEX_W_BT_FLAGS + 3) = (bt_flags >> 24) & 0xFF;
+
+ ath_dbg(common, ATH_DBG_MCI,
+ "MCI BT_MCI_FLAGS: Send Coex BT Update Flags %s 0x%08x\n",
+ (opcode == MCI_GPM_COEX_BT_FLAGS_READ) ? "READ" :
+ ((opcode == MCI_GPM_COEX_BT_FLAGS_SET) ? "SET" : "CLEAR"),
+ bt_flags);
+
+ return ar9003_mci_send_message(ah, MCI_GPM, 0, pld, 16,
+ wait_done, true);
+}
+
+void ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
+ bool is_full_sleep)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ u32 regval, thresh;
+
+ ath_dbg(common, ATH_DBG_MCI, "MCI full_sleep = %d, is_2g = %d\n",
+ is_full_sleep, is_2g);
+
+ /*
+ * GPM buffer and scheduling message buffer are not allocated
+ */
+
+ if (!mci->gpm_addr && !mci->sched_addr) {
+ ath_dbg(common, ATH_DBG_MCI,
+ "MCI GPM and schedule buffers are not allocated");
+ return;
+ }
+
+ if (REG_READ(ah, AR_BTCOEX_CTRL) == 0xdeadbeef) {
+ ath_dbg(common, ATH_DBG_MCI,
+ "MCI it's deadbeef, quit mci_reset\n");
+ return;
+ }
+
+ /* Program MCI DMA related registers */
+ REG_WRITE(ah, AR_MCI_GPM_0, mci->gpm_addr);
+ REG_WRITE(ah, AR_MCI_GPM_1, mci->gpm_len);
+ REG_WRITE(ah, AR_MCI_SCHD_TABLE_0, mci->sched_addr);
+
+ /*
+ * To avoid MCI state machine be affected by incoming remote MCI msgs,
+ * MCI mode will be enabled later, right before reset the MCI TX and RX.
+ */
+
+ regval = SM(1, AR_BTCOEX_CTRL_AR9462_MODE) |
+ SM(1, AR_BTCOEX_CTRL_WBTIMER_EN) |
+ SM(1, AR_BTCOEX_CTRL_PA_SHARED) |
+ SM(1, AR_BTCOEX_CTRL_LNA_SHARED) |
+ SM(2, AR_BTCOEX_CTRL_NUM_ANTENNAS) |
+ SM(3, AR_BTCOEX_CTRL_RX_CHAIN_MASK) |
+ SM(0, AR_BTCOEX_CTRL_1_CHAIN_ACK) |
+ SM(0, AR_BTCOEX_CTRL_1_CHAIN_BCN) |
+ SM(0, AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN);
+
+ if (is_2g && (AR_SREV_9462_20(ah)) &&
+ !(mci->config & ATH_MCI_CONFIG_DISABLE_OSLA)) {
+
+ regval |= SM(1, AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN);
+ ath_dbg(common, ATH_DBG_MCI,
+ "MCI sched one step look ahead\n");
+
+ if (!(mci->config &
+ ATH_MCI_CONFIG_DISABLE_AGGR_THRESH)) {
+
+ thresh = MS(mci->config,
+ ATH_MCI_CONFIG_AGGR_THRESH);
+ thresh &= 7;
+ regval |= SM(1,
+ AR_BTCOEX_CTRL_TIME_TO_NEXT_BT_THRESH_EN);
+ regval |= SM(thresh, AR_BTCOEX_CTRL_AGGR_THRESH);
+
+ REG_RMW_FIELD(ah, AR_MCI_SCHD_TABLE_2,
+ AR_MCI_SCHD_TABLE_2_HW_BASED, 1);
+ REG_RMW_FIELD(ah, AR_MCI_SCHD_TABLE_2,
+ AR_MCI_SCHD_TABLE_2_MEM_BASED, 1);
+
+ } else
+ ath_dbg(common, ATH_DBG_MCI,
+ "MCI sched aggr thresh: off\n");
+ } else
+ ath_dbg(common, ATH_DBG_MCI,
+ "MCI SCHED one step look ahead off\n");
+
+ if (AR_SREV_9462_10(ah))
+ regval |= SM(1, AR_BTCOEX_CTRL_SPDT_ENABLE_10);
+
+ REG_WRITE(ah, AR_BTCOEX_CTRL, regval);
+
+ if (AR_SREV_9462_20(ah)) {
+ REG_SET_BIT(ah, AR_PHY_GLB_CONTROL,
+ AR_BTCOEX_CTRL_SPDT_ENABLE);
+ REG_RMW_FIELD(ah, AR_BTCOEX_CTRL3,
+ AR_BTCOEX_CTRL3_CONT_INFO_TIMEOUT, 20);
+ }
+
+ REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2, AR_BTCOEX_CTRL2_RX_DEWEIGHT, 1);
+ REG_RMW_FIELD(ah, AR_PCU_MISC, AR_PCU_BT_ANT_PREVENT_RX, 0);
+
+ thresh = MS(mci->config, ATH_MCI_CONFIG_CLK_DIV);
+ REG_RMW_FIELD(ah, AR_MCI_TX_CTRL, AR_MCI_TX_CTRL_CLK_DIV, thresh);
+ REG_SET_BIT(ah, AR_BTCOEX_CTRL, AR_BTCOEX_CTRL_MCI_MODE_EN);
+
+ /* Resetting the Rx and Tx paths of MCI */
+ regval = REG_READ(ah, AR_MCI_COMMAND2);
+ regval |= SM(1, AR_MCI_COMMAND2_RESET_TX);
+ REG_WRITE(ah, AR_MCI_COMMAND2, regval);
+
+ udelay(1);
+
+ regval &= ~SM(1, AR_MCI_COMMAND2_RESET_TX);
+ REG_WRITE(ah, AR_MCI_COMMAND2, regval);
+
+ if (is_full_sleep) {
+ ar9003_mci_mute_bt(ah);
+ udelay(100);
+ }
+
+ regval |= SM(1, AR_MCI_COMMAND2_RESET_RX);
+ REG_WRITE(ah, AR_MCI_COMMAND2, regval);
+ udelay(1);
+ regval &= ~SM(1, AR_MCI_COMMAND2_RESET_RX);
+ REG_WRITE(ah, AR_MCI_COMMAND2, regval);
+
+ ar9003_mci_state(ah, MCI_STATE_INIT_GPM_OFFSET, NULL);
+ REG_WRITE(ah, AR_MCI_MSG_ATTRIBUTES_TABLE,
+ (SM(0xe801, AR_MCI_MSG_ATTRIBUTES_TABLE_INVALID_HDR) |
+ SM(0x0000, AR_MCI_MSG_ATTRIBUTES_TABLE_CHECKSUM)));
+
+ REG_CLR_BIT(ah, AR_MCI_TX_CTRL,
+ AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE);
+
+ if (AR_SREV_9462_20_OR_LATER(ah))
+ ar9003_mci_observation_set_up(ah);
+
+ mci->ready = true;
+ ar9003_mci_prep_interface(ah);
+
+ if (en_int)
+ ar9003_mci_enable_interrupt(ah);
+}
+
+void ar9003_mci_mute_bt(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ /* disable all MCI messages */
+ REG_WRITE(ah, AR_MCI_MSG_ATTRIBUTES_TABLE, 0xffff0000);
+ REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS0, 0xffffffff);
+ REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS1, 0xffffffff);
+ REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS2, 0xffffffff);
+ REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS3, 0xffffffff);
+ REG_SET_BIT(ah, AR_MCI_TX_CTRL, AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE);
+
+ /* wait pending HW messages to flush out */
+ udelay(10);
+
+ /*
+ * Send LNA_TAKE and SYS_SLEEPING when
+ * 1. reset not after resuming from full sleep
+ * 2. before reset MCI RX, to quiet BT and avoid MCI RX misalignment
+ */
+
+ ath_dbg(common, ATH_DBG_MCI, "MCI Send LNA take\n");
+ ar9003_mci_send_lna_take(ah, true);
+
+ udelay(5);
+
+ ath_dbg(common, ATH_DBG_MCI, "MCI Send sys sleeping\n");
+ ar9003_mci_send_sys_sleeping(ah, true);
+}
+
+void ar9003_mci_sync_bt_state(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ u32 cur_bt_state;
+
+ cur_bt_state = ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP, NULL);
+
+ if (mci->bt_state != cur_bt_state) {
+ ath_dbg(common, ATH_DBG_MCI,
+ "MCI BT state mismatches. old: %d, new: %d\n",
+ mci->bt_state, cur_bt_state);
+ mci->bt_state = cur_bt_state;
+ }
+
+ if (mci->bt_state != MCI_BT_SLEEP) {
+
+ ar9003_mci_send_coex_version_query(ah, true);
+ ar9003_mci_send_coex_wlan_channels(ah, true);
+
+ if (mci->unhalt_bt_gpm == true) {
+ ath_dbg(common, ATH_DBG_MCI, "MCI unhalt BT GPM");
+ ar9003_mci_send_coex_halt_bt_gpm(ah, false, true);
+ }
+ }
+}
+
+static void ar9003_mci_send_2g5g_status(struct ath_hw *ah, bool wait_done)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ u32 new_flags, to_set, to_clear;
+
+ if (AR_SREV_9462_20(ah) &&
+ mci->update_2g5g &&
+ (mci->bt_state != MCI_BT_SLEEP)) {
+
+ if (mci->is_2g) {
+ new_flags = MCI_2G_FLAGS;
+ to_clear = MCI_2G_FLAGS_CLEAR_MASK;
+ to_set = MCI_2G_FLAGS_SET_MASK;
+ } else {
+ new_flags = MCI_5G_FLAGS;
+ to_clear = MCI_5G_FLAGS_CLEAR_MASK;
+ to_set = MCI_5G_FLAGS_SET_MASK;
+ }
+
+ ath_dbg(common, ATH_DBG_MCI,
+ "MCI BT_MCI_FLAGS: %s 0x%08x clr=0x%08x, set=0x%08x\n",
+ mci->is_2g ? "2G" : "5G", new_flags, to_clear, to_set);
+
+ if (to_clear)
+ ar9003_mci_send_coex_bt_flags(ah, wait_done,
+ MCI_GPM_COEX_BT_FLAGS_CLEAR, to_clear);
+
+ if (to_set)
+ ar9003_mci_send_coex_bt_flags(ah, wait_done,
+ MCI_GPM_COEX_BT_FLAGS_SET, to_set);
+ }
+
+ if (AR_SREV_9462_10(ah) && (mci->bt_state != MCI_BT_SLEEP))
+ mci->update_2g5g = false;
+}
+
+static void ar9003_mci_queue_unsent_gpm(struct ath_hw *ah, u8 header,
+ u32 *payload, bool queue)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ u8 type, opcode;
+
+ if (queue) {
+
+ if (payload)
+ ath_dbg(common, ATH_DBG_MCI,
+ "MCI ERROR: Send fail: %02x: %02x %02x %02x\n",
+ header,
+ *(((u8 *)payload) + 4),
+ *(((u8 *)payload) + 5),
+ *(((u8 *)payload) + 6));
+ else
+ ath_dbg(common, ATH_DBG_MCI,
+ "MCI ERROR: Send fail: %02x\n", header);
+ }
+
+ /* check if the message is to be queued */
+ if (header != MCI_GPM)
+ return;
+
+ type = MCI_GPM_TYPE(payload);
+ opcode = MCI_GPM_OPCODE(payload);
+
+ if (type != MCI_GPM_COEX_AGENT)
+ return;
+
+ switch (opcode) {
+ case MCI_GPM_COEX_BT_UPDATE_FLAGS:
+
+ if (AR_SREV_9462_10(ah))
+ break;
+
+ if (*(((u8 *)payload) + MCI_GPM_COEX_B_BT_FLAGS_OP) ==
+ MCI_GPM_COEX_BT_FLAGS_READ)
+ break;
+
+ mci->update_2g5g = queue;
+
+ if (queue)
+ ath_dbg(common, ATH_DBG_MCI,
+ "MCI BT_MCI_FLAGS: 2G5G status <queued> %s.\n",
+ mci->is_2g ? "2G" : "5G");
+ else
+ ath_dbg(common, ATH_DBG_MCI,
+ "MCI BT_MCI_FLAGS: 2G5G status <sent> %s.\n",
+ mci->is_2g ? "2G" : "5G");
+
+ break;
+
+ case MCI_GPM_COEX_WLAN_CHANNELS:
+
+ mci->wlan_channels_update = queue;
+ if (queue)
+ ath_dbg(common, ATH_DBG_MCI,
+ "MCI WLAN channel map <queued>\n");
+ else
+ ath_dbg(common, ATH_DBG_MCI,
+ "MCI WLAN channel map <sent>\n");
+ break;
+
+ case MCI_GPM_COEX_HALT_BT_GPM:
+
+ if (*(((u8 *)payload) + MCI_GPM_COEX_B_HALT_STATE) ==
+ MCI_GPM_COEX_BT_GPM_UNHALT) {
+
+ mci->unhalt_bt_gpm = queue;
+
+ if (queue)
+ ath_dbg(common, ATH_DBG_MCI,
+ "MCI UNHALT BT GPM <queued>\n");
+ else {
+ mci->halted_bt_gpm = false;
+ ath_dbg(common, ATH_DBG_MCI,
+ "MCI UNHALT BT GPM <sent>\n");
+ }
+ }
+
+ if (*(((u8 *)payload) + MCI_GPM_COEX_B_HALT_STATE) ==
+ MCI_GPM_COEX_BT_GPM_HALT) {
+
+ mci->halted_bt_gpm = !queue;
+
+ if (queue)
+ ath_dbg(common, ATH_DBG_MCI,
+ "MCI HALT BT GPM <not sent>\n");
+ else
+ ath_dbg(common, ATH_DBG_MCI,
+ "MCI UNHALT BT GPM <sent>\n");
+ }
+
+ break;
+ default:
+ break;
+ }
+}
+
+void ar9003_mci_2g5g_switch(struct ath_hw *ah, bool wait_done)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+
+ if (mci->update_2g5g) {
+ if (mci->is_2g) {
+
+ ar9003_mci_send_2g5g_status(ah, true);
+ ath_dbg(common, ATH_DBG_MCI, "MCI Send LNA trans\n");
+ ar9003_mci_send_lna_transfer(ah, true);
+ udelay(5);
+
+ REG_CLR_BIT(ah, AR_MCI_TX_CTRL,
+ AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE);
+
+ if (AR_SREV_9462_20(ah)) {
+ REG_CLR_BIT(ah, AR_PHY_GLB_CONTROL,
+ AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL);
+ if (!(mci->config &
+ ATH_MCI_CONFIG_DISABLE_OSLA)) {
+ REG_SET_BIT(ah, AR_BTCOEX_CTRL,
+ AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN);
+ }
+ }
+ } else {
+ ath_dbg(common, ATH_DBG_MCI, "MCI Send LNA take\n");
+ ar9003_mci_send_lna_take(ah, true);
+ udelay(5);
+
+ REG_SET_BIT(ah, AR_MCI_TX_CTRL,
+ AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE);
+
+ if (AR_SREV_9462_20(ah)) {
+ REG_SET_BIT(ah, AR_PHY_GLB_CONTROL,
+ AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL);
+ REG_CLR_BIT(ah, AR_BTCOEX_CTRL,
+ AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN);
+ }
+
+ ar9003_mci_send_2g5g_status(ah, true);
+ }
+ }
+}
+
+bool ar9003_mci_send_message(struct ath_hw *ah, u8 header, u32 flag,
+ u32 *payload, u8 len, bool wait_done,
+ bool check_bt)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ bool msg_sent = false;
+ u32 regval;
+ u32 saved_mci_int_en;
+ int i;
+
+ saved_mci_int_en = REG_READ(ah, AR_MCI_INTERRUPT_EN);
+ regval = REG_READ(ah, AR_BTCOEX_CTRL);
+
+ if ((regval == 0xdeadbeef) || !(regval & AR_BTCOEX_CTRL_MCI_MODE_EN)) {
+
+ ath_dbg(common, ATH_DBG_MCI,
+ "MCI Not sending 0x%x. MCI is not enabled. "
+ "full_sleep = %d\n", header,
+ (ah->power_mode == ATH9K_PM_FULL_SLEEP) ? 1 : 0);
+
+ ar9003_mci_queue_unsent_gpm(ah, header, payload, true);
+ return false;
+
+ } else if (check_bt && (mci->bt_state == MCI_BT_SLEEP)) {
+
+ ath_dbg(common, ATH_DBG_MCI,
+ "MCI Don't send message 0x%x. BT is in sleep state\n", header);
+
+ ar9003_mci_queue_unsent_gpm(ah, header, payload, true);
+ return false;
+ }
+
+ if (wait_done)
+ REG_WRITE(ah, AR_MCI_INTERRUPT_EN, 0);
+
+ /* Need to clear SW_MSG_DONE raw bit before wait */
+
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RAW,
+ (AR_MCI_INTERRUPT_SW_MSG_DONE |
+ AR_MCI_INTERRUPT_MSG_FAIL_MASK));
+
+ if (payload) {
+ for (i = 0; (i * 4) < len; i++)
+ REG_WRITE(ah, (AR_MCI_TX_PAYLOAD0 + i * 4),
+ *(payload + i));
+ }
+
+ REG_WRITE(ah, AR_MCI_COMMAND0,
+ (SM((flag & MCI_FLAG_DISABLE_TIMESTAMP),
+ AR_MCI_COMMAND0_DISABLE_TIMESTAMP) |
+ SM(len, AR_MCI_COMMAND0_LEN) |
+ SM(header, AR_MCI_COMMAND0_HEADER)));
+
+ if (wait_done &&
+ !(ar9003_mci_wait_for_interrupt(ah, AR_MCI_INTERRUPT_RAW,
+ AR_MCI_INTERRUPT_SW_MSG_DONE, 500)))
+ ar9003_mci_queue_unsent_gpm(ah, header, payload, true);
+ else {
+ ar9003_mci_queue_unsent_gpm(ah, header, payload, false);
+ msg_sent = true;
+ }
+
+ if (wait_done)
+ REG_WRITE(ah, AR_MCI_INTERRUPT_EN, saved_mci_int_en);
+
+ return msg_sent;
+}
+EXPORT_SYMBOL(ar9003_mci_send_message);
+
+void ar9003_mci_setup(struct ath_hw *ah, u32 gpm_addr, void *gpm_buf,
+ u16 len, u32 sched_addr)
+{
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ void *sched_buf = (void *)((char *) gpm_buf + (sched_addr - gpm_addr));
+
+ mci->gpm_addr = gpm_addr;
+ mci->gpm_buf = gpm_buf;
+ mci->gpm_len = len;
+ mci->sched_addr = sched_addr;
+ mci->sched_buf = sched_buf;
+
+ ar9003_mci_reset(ah, true, true, true);
+}
+EXPORT_SYMBOL(ar9003_mci_setup);
+
+void ar9003_mci_cleanup(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ /* Turn off MCI and Jupiter mode. */
+ REG_WRITE(ah, AR_BTCOEX_CTRL, 0x00);
+ ath_dbg(common, ATH_DBG_MCI, "MCI ar9003_mci_cleanup\n");
+ ar9003_mci_disable_interrupt(ah);
+}
+EXPORT_SYMBOL(ar9003_mci_cleanup);
+
+static void ar9003_mci_process_gpm_extra(struct ath_hw *ah, u8 gpm_type,
+ u8 gpm_opcode, u32 *p_gpm)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ u8 *p_data = (u8 *) p_gpm;
+
+ if (gpm_type != MCI_GPM_COEX_AGENT)
+ return;
+
+ switch (gpm_opcode) {
+ case MCI_GPM_COEX_VERSION_QUERY:
+ ath_dbg(common, ATH_DBG_MCI,
+ "MCI Recv GPM COEX Version Query\n");
+ ar9003_mci_send_coex_version_response(ah, true);
+ break;
+ case MCI_GPM_COEX_VERSION_RESPONSE:
+ ath_dbg(common, ATH_DBG_MCI,
+ "MCI Recv GPM COEX Version Response\n");
+ mci->bt_ver_major =
+ *(p_data + MCI_GPM_COEX_B_MAJOR_VERSION);
+ mci->bt_ver_minor =
+ *(p_data + MCI_GPM_COEX_B_MINOR_VERSION);
+ mci->bt_version_known = true;
+ ath_dbg(common, ATH_DBG_MCI,
+ "MCI BT Coex version: %d.%d\n",
+ mci->bt_ver_major,
+ mci->bt_ver_minor);
+ break;
+ case MCI_GPM_COEX_STATUS_QUERY:
+ ath_dbg(common, ATH_DBG_MCI,
+ "MCI Recv GPM COEX Status Query = 0x%02X.\n",
+ *(p_data + MCI_GPM_COEX_B_WLAN_BITMAP));
+ mci->wlan_channels_update = true;
+ ar9003_mci_send_coex_wlan_channels(ah, true);
+ break;
+ case MCI_GPM_COEX_BT_PROFILE_INFO:
+ mci->query_bt = true;
+ ath_dbg(common, ATH_DBG_MCI,
+ "MCI Recv GPM COEX BT_Profile_Info\n");
+ break;
+ case MCI_GPM_COEX_BT_STATUS_UPDATE:
+ mci->query_bt = true;
+ ath_dbg(common, ATH_DBG_MCI,
+ "MCI Recv GPM COEX BT_Status_Update "
+ "SEQ=%d (drop&query)\n", *(p_gpm + 3));
+ break;
+ default:
+ break;
+ }
+}
+
+u32 ar9003_mci_wait_for_gpm(struct ath_hw *ah, u8 gpm_type,
+ u8 gpm_opcode, int time_out)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ u32 *p_gpm = NULL, mismatch = 0, more_data;
+ u32 offset;
+ u8 recv_type = 0, recv_opcode = 0;
+ bool b_is_bt_cal_done = (gpm_type == MCI_GPM_BT_CAL_DONE);
+
+ more_data = time_out ? MCI_GPM_NOMORE : MCI_GPM_MORE;
+
+ while (time_out > 0) {
+ if (p_gpm) {
+ MCI_GPM_RECYCLE(p_gpm);
+ p_gpm = NULL;
+ }
+
+ if (more_data != MCI_GPM_MORE)
+ time_out = ar9003_mci_wait_for_interrupt(ah,
+ AR_MCI_INTERRUPT_RX_MSG_RAW,
+ AR_MCI_INTERRUPT_RX_MSG_GPM,
+ time_out);
+
+ if (!time_out)
+ break;
+
+ offset = ar9003_mci_state(ah,
+ MCI_STATE_NEXT_GPM_OFFSET, &more_data);
+
+ if (offset == MCI_GPM_INVALID)
+ continue;
+
+ p_gpm = (u32 *) (mci->gpm_buf + offset);
+ recv_type = MCI_GPM_TYPE(p_gpm);
+ recv_opcode = MCI_GPM_OPCODE(p_gpm);
+
+ if (MCI_GPM_IS_CAL_TYPE(recv_type)) {
+
+ if (recv_type == gpm_type) {
+
+ if ((gpm_type == MCI_GPM_BT_CAL_DONE) &&
+ !b_is_bt_cal_done) {
+ gpm_type = MCI_GPM_BT_CAL_GRANT;
+ ath_dbg(common, ATH_DBG_MCI,
+ "MCI Recv BT_CAL_DONE"
+ "wait BT_CAL_GRANT\n");
+ continue;
+ }
+
+ break;
+ }
+ } else if ((recv_type == gpm_type) &&
+ (recv_opcode == gpm_opcode))
+ break;
+
+ /* not expected message */
+
+ /*
+ * check if it's cal_grant
+ *
+ * When we're waiting for cal_grant in reset routine,
+ * it's possible that BT sends out cal_request at the
+ * same time. Since BT's calibration doesn't happen
+ * that often, we'll let BT completes calibration then
+ * we continue to wait for cal_grant from BT.
+ * Orginal: Wait BT_CAL_GRANT.
+ * New: Receive BT_CAL_REQ -> send WLAN_CAL_GRANT->wait
+ * BT_CAL_DONE -> Wait BT_CAL_GRANT.
+ */
+
+ if ((gpm_type == MCI_GPM_BT_CAL_GRANT) &&
+ (recv_type == MCI_GPM_BT_CAL_REQ)) {
+
+ u32 payload[4] = {0, 0, 0, 0};
+
+ gpm_type = MCI_GPM_BT_CAL_DONE;
+ ath_dbg(common, ATH_DBG_MCI,
+ "MCI Rcv BT_CAL_REQ, send WLAN_CAL_GRANT\n");
+
+ MCI_GPM_SET_CAL_TYPE(payload,
+ MCI_GPM_WLAN_CAL_GRANT);
+
+ ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16,
+ false, false);
+
+ ath_dbg(common, ATH_DBG_MCI,
+ "MCI now wait for BT_CAL_DONE\n");
+
+ continue;
+ } else {
+ ath_dbg(common, ATH_DBG_MCI, "MCI GPM subtype"
+ "not match 0x%x\n", *(p_gpm + 1));
+ mismatch++;
+ ar9003_mci_process_gpm_extra(ah, recv_type,
+ recv_opcode, p_gpm);
+ }
+ }
+ if (p_gpm) {
+ MCI_GPM_RECYCLE(p_gpm);
+ p_gpm = NULL;
+ }
+
+ if (time_out <= 0) {
+ time_out = 0;
+ ath_dbg(common, ATH_DBG_MCI,
+ "MCI GPM received timeout, mismatch = %d\n", mismatch);
+ } else
+ ath_dbg(common, ATH_DBG_MCI,
+ "MCI Receive GPM type=0x%x, code=0x%x\n",
+ gpm_type, gpm_opcode);
+
+ while (more_data == MCI_GPM_MORE) {
+
+ ath_dbg(common, ATH_DBG_MCI, "MCI discard remaining GPM\n");
+ offset = ar9003_mci_state(ah, MCI_STATE_NEXT_GPM_OFFSET,
+ &more_data);
+
+ if (offset == MCI_GPM_INVALID)
+ break;
+
+ p_gpm = (u32 *) (mci->gpm_buf + offset);
+ recv_type = MCI_GPM_TYPE(p_gpm);
+ recv_opcode = MCI_GPM_OPCODE(p_gpm);
+
+ if (!MCI_GPM_IS_CAL_TYPE(recv_type))
+ ar9003_mci_process_gpm_extra(ah, recv_type,
+ recv_opcode, p_gpm);
+
+ MCI_GPM_RECYCLE(p_gpm);
+ }
+
+ return time_out;
+}
+
+u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type, u32 *p_data)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ u32 value = 0, more_gpm = 0, gpm_ptr;
+ u8 query_type;
+
+ switch (state_type) {
+ case MCI_STATE_ENABLE:
+ if (mci->ready) {
+
+ value = REG_READ(ah, AR_BTCOEX_CTRL);
+
+ if ((value == 0xdeadbeef) || (value == 0xffffffff))
+ value = 0;
+ }
+ value &= AR_BTCOEX_CTRL_MCI_MODE_EN;
+ break;
+ case MCI_STATE_INIT_GPM_OFFSET:
+ value = MS(REG_READ(ah, AR_MCI_GPM_1), AR_MCI_GPM_WRITE_PTR);
+ ath_dbg(common, ATH_DBG_MCI,
+ "MCI GPM initial WRITE_PTR=%d\n", value);
+ mci->gpm_idx = value;
+ break;
+ case MCI_STATE_NEXT_GPM_OFFSET:
+ case MCI_STATE_LAST_GPM_OFFSET:
+ /*
+ * This could be useful to avoid new GPM message interrupt which
+ * may lead to spurious interrupt after power sleep, or multiple
+ * entry of ath_mci_intr().
+ * Adding empty GPM check by returning HAL_MCI_GPM_INVALID can
+ * alleviate this effect, but clearing GPM RX interrupt bit is
+ * safe, because whether this is called from hw or driver code
+ * there must be an interrupt bit set/triggered initially
+ */
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
+ AR_MCI_INTERRUPT_RX_MSG_GPM);
+
+ gpm_ptr = MS(REG_READ(ah, AR_MCI_GPM_1), AR_MCI_GPM_WRITE_PTR);
+ value = gpm_ptr;
+
+ if (value == 0)
+ value = mci->gpm_len - 1;
+ else if (value >= mci->gpm_len) {
+ if (value != 0xFFFF) {
+ value = 0;
+ ath_dbg(common, ATH_DBG_MCI, "MCI GPM offset"
+ "out of range\n");
+ }
+ } else
+ value--;
+
+ if (value == 0xFFFF) {
+ value = MCI_GPM_INVALID;
+ more_gpm = MCI_GPM_NOMORE;
+ ath_dbg(common, ATH_DBG_MCI, "MCI GPM ptr invalid"
+ "@ptr=%d, offset=%d, more=GPM_NOMORE\n",
+ gpm_ptr, value);
+ } else if (state_type == MCI_STATE_NEXT_GPM_OFFSET) {
+
+ if (gpm_ptr == mci->gpm_idx) {
+ value = MCI_GPM_INVALID;
+ more_gpm = MCI_GPM_NOMORE;
+
+ ath_dbg(common, ATH_DBG_MCI, "MCI GPM message"
+ "not available @ptr=%d, @offset=%d,"
+ "more=GPM_NOMORE\n", gpm_ptr, value);
+ } else {
+ for (;;) {
+
+ u32 temp_index;
+
+ /* skip reserved GPM if any */
+
+ if (value != mci->gpm_idx)
+ more_gpm = MCI_GPM_MORE;
+ else
+ more_gpm = MCI_GPM_NOMORE;
+
+ temp_index = mci->gpm_idx;
+ mci->gpm_idx++;
+
+ if (mci->gpm_idx >=
+ mci->gpm_len)
+ mci->gpm_idx = 0;
+
+ ath_dbg(common, ATH_DBG_MCI,
+ "MCI GPM message got ptr=%d,"
+ "@offset=%d, more=%d\n",
+ gpm_ptr, temp_index,
+ (more_gpm == MCI_GPM_MORE));
+
+ if (ar9003_mci_is_gpm_valid(ah,
+ temp_index)) {
+ value = temp_index;
+ break;
+ }
+
+ if (more_gpm == MCI_GPM_NOMORE) {
+ value = MCI_GPM_INVALID;
+ break;
+ }
+ }
+ }
+ if (p_data)
+ *p_data = more_gpm;
+ }
+
+ if (value != MCI_GPM_INVALID)
+ value <<= 4;
+
+ break;
+ case MCI_STATE_LAST_SCHD_MSG_OFFSET:
+ value = MS(REG_READ(ah, AR_MCI_RX_STATUS),
+ AR_MCI_RX_LAST_SCHD_MSG_INDEX);
+ /* Make it in bytes */
+ value <<= 4;
+ break;
+
+ case MCI_STATE_REMOTE_SLEEP:
+ value = MS(REG_READ(ah, AR_MCI_RX_STATUS),
+ AR_MCI_RX_REMOTE_SLEEP) ?
+ MCI_BT_SLEEP : MCI_BT_AWAKE;
+ break;
+
+ case MCI_STATE_CONT_RSSI_POWER:
+ value = MS(mci->cont_status, AR_MCI_CONT_RSSI_POWER);
+ break;
+
+ case MCI_STATE_CONT_PRIORITY:
+ value = MS(mci->cont_status, AR_MCI_CONT_RRIORITY);
+ break;
+
+ case MCI_STATE_CONT_TXRX:
+ value = MS(mci->cont_status, AR_MCI_CONT_TXRX);
+ break;
+
+ case MCI_STATE_BT:
+ value = mci->bt_state;
+ break;
+
+ case MCI_STATE_SET_BT_SLEEP:
+ mci->bt_state = MCI_BT_SLEEP;
+ break;
+
+ case MCI_STATE_SET_BT_AWAKE:
+ mci->bt_state = MCI_BT_AWAKE;
+ ar9003_mci_send_coex_version_query(ah, true);
+ ar9003_mci_send_coex_wlan_channels(ah, true);
+
+ if (mci->unhalt_bt_gpm) {
+
+ ath_dbg(common, ATH_DBG_MCI,
+ "MCI unhalt BT GPM\n");
+ ar9003_mci_send_coex_halt_bt_gpm(ah, false, true);
+ }
+
+ ar9003_mci_2g5g_switch(ah, true);
+ break;
+
+ case MCI_STATE_SET_BT_CAL_START:
+ mci->bt_state = MCI_BT_CAL_START;
+ break;
+
+ case MCI_STATE_SET_BT_CAL:
+ mci->bt_state = MCI_BT_CAL;
+ break;
+
+ case MCI_STATE_RESET_REQ_WAKE:
+ ar9003_mci_reset_req_wakeup(ah);
+ mci->update_2g5g = true;
+
+ if ((AR_SREV_9462_20_OR_LATER(ah)) &&
+ (mci->config & ATH_MCI_CONFIG_MCI_OBS_MASK)) {
+ /* Check if we still have control of the GPIOs */
+ if ((REG_READ(ah, AR_GLB_GPIO_CONTROL) &
+ ATH_MCI_CONFIG_MCI_OBS_GPIO) !=
+ ATH_MCI_CONFIG_MCI_OBS_GPIO) {
+
+ ath_dbg(common, ATH_DBG_MCI,
+ "MCI reconfigure observation");
+ ar9003_mci_observation_set_up(ah);
+ }
+ }
+ break;
+
+ case MCI_STATE_SEND_WLAN_COEX_VERSION:
+ ar9003_mci_send_coex_version_response(ah, true);
+ break;
+
+ case MCI_STATE_SET_BT_COEX_VERSION:
+
+ if (!p_data)
+ ath_dbg(common, ATH_DBG_MCI,
+ "MCI Set BT Coex version with NULL data!!\n");
+ else {
+ mci->bt_ver_major = (*p_data >> 8) & 0xff;
+ mci->bt_ver_minor = (*p_data) & 0xff;
+ mci->bt_version_known = true;
+ ath_dbg(common, ATH_DBG_MCI,
+ "MCI BT version set: %d.%d\n",
+ mci->bt_ver_major,
+ mci->bt_ver_minor);
+ }
+ break;
+
+ case MCI_STATE_SEND_WLAN_CHANNELS:
+ if (p_data) {
+ if (((mci->wlan_channels[1] & 0xffff0000) ==
+ (*(p_data + 1) & 0xffff0000)) &&
+ (mci->wlan_channels[2] == *(p_data + 2)) &&
+ (mci->wlan_channels[3] == *(p_data + 3)))
+ break;
+
+ mci->wlan_channels[0] = *p_data++;
+ mci->wlan_channels[1] = *p_data++;
+ mci->wlan_channels[2] = *p_data++;
+ mci->wlan_channels[3] = *p_data++;
+ }
+ mci->wlan_channels_update = true;
+ ar9003_mci_send_coex_wlan_channels(ah, true);
+ break;
+
+ case MCI_STATE_SEND_VERSION_QUERY:
+ ar9003_mci_send_coex_version_query(ah, true);
+ break;
+
+ case MCI_STATE_SEND_STATUS_QUERY:
+ query_type = (AR_SREV_9462_10(ah)) ?
+ MCI_GPM_COEX_QUERY_BT_ALL_INFO :
+ MCI_GPM_COEX_QUERY_BT_TOPOLOGY;
+
+ ar9003_mci_send_coex_bt_status_query(ah, true, query_type);
+ break;
+
+ case MCI_STATE_NEED_FLUSH_BT_INFO:
+ /*
+ * btcoex_hw.mci.unhalt_bt_gpm means whether it's
+ * needed to send UNHALT message. It's set whenever
+ * there's a request to send HALT message.
+ * mci_halted_bt_gpm means whether HALT message is sent
+ * out successfully.
+ *
+ * Checking (mci_unhalt_bt_gpm == false) instead of
+ * checking (ah->mci_halted_bt_gpm == false) will make
+ * sure currently is in UNHALT-ed mode and BT can
+ * respond to status query.
+ */
+ value = (!mci->unhalt_bt_gpm &&
+ mci->need_flush_btinfo) ? 1 : 0;
+ if (p_data)
+ mci->need_flush_btinfo =
+ (*p_data != 0) ? true : false;
+ break;
+
+ case MCI_STATE_RECOVER_RX:
+
+ ath_dbg(common, ATH_DBG_MCI, "MCI hw RECOVER_RX\n");
+ ar9003_mci_prep_interface(ah);
+ mci->query_bt = true;
+ mci->need_flush_btinfo = true;
+ ar9003_mci_send_coex_wlan_channels(ah, true);
+ ar9003_mci_2g5g_switch(ah, true);
+ break;
+
+ case MCI_STATE_NEED_FTP_STOMP:
+ value = !(mci->config & ATH_MCI_CONFIG_DISABLE_FTP_STOMP);
+ break;
+
+ case MCI_STATE_NEED_TUNING:
+ value = !(mci->config & ATH_MCI_CONFIG_DISABLE_TUNING);
+ break;
+
+ default:
+ break;
+
+ }
+
+ return value;
+}
+EXPORT_SYMBOL(ar9003_mci_state);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mci.h b/drivers/net/wireless/ath/ath9k/ar9003_mci.h
new file mode 100644
index 00000000000..798da116a44
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mci.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef AR9003_MCI_H
+#define AR9003_MCI_H
+
+#define MCI_FLAG_DISABLE_TIMESTAMP 0x00000001 /* Disable time stamp */
+
+/* Default remote BT device MCI COEX version */
+#define MCI_GPM_COEX_MAJOR_VERSION_DEFAULT 3
+#define MCI_GPM_COEX_MINOR_VERSION_DEFAULT 0
+
+/* Local WLAN MCI COEX version */
+#define MCI_GPM_COEX_MAJOR_VERSION_WLAN 3
+#define MCI_GPM_COEX_MINOR_VERSION_WLAN 0
+
+enum mci_gpm_coex_query_type {
+ MCI_GPM_COEX_QUERY_BT_ALL_INFO = BIT(0),
+ MCI_GPM_COEX_QUERY_BT_TOPOLOGY = BIT(1),
+ MCI_GPM_COEX_QUERY_BT_DEBUG = BIT(2),
+};
+
+enum mci_gpm_coex_halt_bt_gpm {
+ MCI_GPM_COEX_BT_GPM_UNHALT,
+ MCI_GPM_COEX_BT_GPM_HALT
+};
+
+enum mci_gpm_coex_bt_update_flags_op {
+ MCI_GPM_COEX_BT_FLAGS_READ,
+ MCI_GPM_COEX_BT_FLAGS_SET,
+ MCI_GPM_COEX_BT_FLAGS_CLEAR
+};
+
+#define MCI_NUM_BT_CHANNELS 79
+
+#define MCI_BT_MCI_FLAGS_UPDATE_CORR 0x00000002
+#define MCI_BT_MCI_FLAGS_UPDATE_HDR 0x00000004
+#define MCI_BT_MCI_FLAGS_UPDATE_PLD 0x00000008
+#define MCI_BT_MCI_FLAGS_LNA_CTRL 0x00000010
+#define MCI_BT_MCI_FLAGS_DEBUG 0x00000020
+#define MCI_BT_MCI_FLAGS_SCHED_MSG 0x00000040
+#define MCI_BT_MCI_FLAGS_CONT_MSG 0x00000080
+#define MCI_BT_MCI_FLAGS_COEX_GPM 0x00000100
+#define MCI_BT_MCI_FLAGS_CPU_INT_MSG 0x00000200
+#define MCI_BT_MCI_FLAGS_MCI_MODE 0x00000400
+#define MCI_BT_MCI_FLAGS_AR9462_MODE 0x00001000
+#define MCI_BT_MCI_FLAGS_OTHER 0x00010000
+
+#define MCI_DEFAULT_BT_MCI_FLAGS 0x00011dde
+
+#define MCI_TOGGLE_BT_MCI_FLAGS (MCI_BT_MCI_FLAGS_UPDATE_CORR | \
+ MCI_BT_MCI_FLAGS_UPDATE_HDR | \
+ MCI_BT_MCI_FLAGS_UPDATE_PLD | \
+ MCI_BT_MCI_FLAGS_MCI_MODE)
+
+#define MCI_2G_FLAGS_CLEAR_MASK 0x00000000
+#define MCI_2G_FLAGS_SET_MASK MCI_TOGGLE_BT_MCI_FLAGS
+#define MCI_2G_FLAGS MCI_DEFAULT_BT_MCI_FLAGS
+
+#define MCI_5G_FLAGS_CLEAR_MASK MCI_TOGGLE_BT_MCI_FLAGS
+#define MCI_5G_FLAGS_SET_MASK 0x00000000
+#define MCI_5G_FLAGS (MCI_DEFAULT_BT_MCI_FLAGS & \
+ ~MCI_TOGGLE_BT_MCI_FLAGS)
+
+/*
+ * Default value for AR9462 is 0x00002201
+ */
+#define ATH_MCI_CONFIG_CONCUR_TX 0x00000003
+#define ATH_MCI_CONFIG_MCI_OBS_MCI 0x00000004
+#define ATH_MCI_CONFIG_MCI_OBS_TXRX 0x00000008
+#define ATH_MCI_CONFIG_MCI_OBS_BT 0x00000010
+#define ATH_MCI_CONFIG_DISABLE_MCI_CAL 0x00000020
+#define ATH_MCI_CONFIG_DISABLE_OSLA 0x00000040
+#define ATH_MCI_CONFIG_DISABLE_FTP_STOMP 0x00000080
+#define ATH_MCI_CONFIG_AGGR_THRESH 0x00000700
+#define ATH_MCI_CONFIG_AGGR_THRESH_S 8
+#define ATH_MCI_CONFIG_DISABLE_AGGR_THRESH 0x00000800
+#define ATH_MCI_CONFIG_CLK_DIV 0x00003000
+#define ATH_MCI_CONFIG_CLK_DIV_S 12
+#define ATH_MCI_CONFIG_DISABLE_TUNING 0x00004000
+#define ATH_MCI_CONFIG_MCI_WEIGHT_DBG 0x40000000
+#define ATH_MCI_CONFIG_DISABLE_MCI 0x80000000
+
+#define ATH_MCI_CONFIG_MCI_OBS_MASK (ATH_MCI_CONFIG_MCI_OBS_MCI | \
+ ATH_MCI_CONFIG_MCI_OBS_TXRX | \
+ ATH_MCI_CONFIG_MCI_OBS_BT)
+#define ATH_MCI_CONFIG_MCI_OBS_GPIO 0x0000002F
+
+#endif
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
index 0c462c904cb..a4450cba065 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
@@ -14,6 +14,7 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
+#include <linux/export.h>
#include "hw.h"
#include "ar9003_phy.h"
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index 04b060af508..e41d26939ab 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -14,6 +14,7 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
+#include <linux/export.h>
#include "hw.h"
#include "ar9003_phy.h"
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
index 497d7461838..ed64114571f 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
@@ -490,6 +490,8 @@
#define AR_PHY_TEST_CTL_TSTADC_EN_S 8
#define AR_PHY_TEST_CTL_RX_OBS_SEL 0x3C00
#define AR_PHY_TEST_CTL_RX_OBS_SEL_S 10
+#define AR_PHY_TEST_CTL_DEBUGPORT_SEL 0xe0000000
+#define AR_PHY_TEST_CTL_DEBUGPORT_SEL_S 29
#define AR_PHY_TSTDAC (AR_SM_BASE + 0x168)
@@ -1001,6 +1003,7 @@
/* GLB Registers */
#define AR_GLB_BASE 0x20000
+#define AR_GLB_GPIO_CONTROL (AR_GLB_BASE)
#define AR_PHY_GLB_CONTROL (AR_GLB_BASE + 0x44)
#define AR_GLB_SCRATCH(_ah) (AR_GLB_BASE + \
(AR_SREV_9462_20(_ah) ? 0x4c : 0x50))
diff --git a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
index 259a6f312af..dc2054f0378 100644
--- a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
@@ -41,24 +41,24 @@ static const u32 ar9462_pciephy_clkreq_enable_L1_2p0[][2] = {
static const u32 ar9462_2p0_baseband_postamble[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8011, 0xd00a8011},
- {0x00009820, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a012e},
- {0x00009824, 0x5ac640de, 0x5ac640d0, 0x5ac640d0, 0x5ac640de},
- {0x00009828, 0x0796be89, 0x0696b081, 0x0696b881, 0x0796be89},
+ {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8011, 0xd00a800d},
+ {0x00009820, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a01ae},
+ {0x00009824, 0x5ac640de, 0x5ac640d0, 0x5ac640d0, 0x63c640da},
+ {0x00009828, 0x0796be89, 0x0696b081, 0x0696b881, 0x09143e81},
{0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
{0x00009830, 0x0000059c, 0x0000059c, 0x0000119c, 0x0000119c},
{0x00009c00, 0x000000c4, 0x000000c4, 0x000000c4, 0x000000c4},
{0x00009e00, 0x0372111a, 0x0372111a, 0x037216a0, 0x037216a0},
{0x00009e04, 0x001c2020, 0x001c2020, 0x001c2020, 0x001c2020},
- {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
- {0x00009e10, 0x92c88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x92c84d2e},
- {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3379605e, 0x33795d5e},
+ {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000d8},
+ {0x00009e10, 0x92c88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec86d2e},
+ {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3376605e, 0x33795d5e},
{0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
{0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
{0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
- {0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c782, 0xcfd5c782},
- {0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27},
+ {0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c782, 0xcfd5c282},
+ {0x00009e44, 0x62321e27, 0x62321e27, 0xfe291e27, 0xfe291e27},
{0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
{0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
{0x0000a204, 0x013187c0, 0x013187c4, 0x013187c4, 0x013187c0},
@@ -81,6 +81,15 @@ static const u32 ar9462_2p0_baseband_postamble[][5] = {
{0x0000a2d0, 0x00041981, 0x00041981, 0x00041981, 0x00041982},
{0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
{0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a3a4, 0x00000010, 0x00000010, 0x00000000, 0x00000000},
+ {0x0000a3a8, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa},
+ {0x0000a3ac, 0xaaaaaa00, 0xaaaaaa30, 0xaaaaaa00, 0xaaaaaa00},
+ {0x0000a41c, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce},
+ {0x0000a420, 0x000001ce, 0x000001ce, 0x000001ce, 0x000001ce},
+ {0x0000a424, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce},
+ {0x0000a428, 0x000001ce, 0x000001ce, 0x000001ce, 0x000001ce},
+ {0x0000a42c, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce},
+ {0x0000a430, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce},
{0x0000a830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
{0x0000ae04, 0x001c0000, 0x001c0000, 0x001c0000, 0x00100000},
{0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
@@ -1107,11 +1116,11 @@ static const u32 ar9462_2p0_baseband_core[][2] = {
{0x00009e30, 0x06336f77},
{0x00009e34, 0x6af6532f},
{0x00009e38, 0x0cc80c00},
- {0x00009e40, 0x0d261820},
+ {0x00009e40, 0x15262820},
{0x00009e4c, 0x00001004},
{0x00009e50, 0x00ff03f1},
- {0x00009e54, 0xe4c355c7},
- {0x00009e58, 0xfd897735},
+ {0x00009e54, 0xe4c555c2},
+ {0x00009e58, 0xfd857722},
{0x00009e5c, 0xe9198724},
{0x00009fc0, 0x803e4788},
{0x00009fc4, 0x0001efb5},
@@ -1142,9 +1151,6 @@ static const u32 ar9462_2p0_baseband_core[][2] = {
{0x0000a398, 0x001f0e0f},
{0x0000a39c, 0x0075393f},
{0x0000a3a0, 0xb79f6427},
- {0x0000a3a4, 0x00000000},
- {0x0000a3a8, 0xaaaaaaaa},
- {0x0000a3ac, 0x3c466478},
{0x0000a3c0, 0x20202020},
{0x0000a3c4, 0x22222220},
{0x0000a3c8, 0x20200020},
@@ -1167,12 +1173,6 @@ static const u32 ar9462_2p0_baseband_core[][2] = {
{0x0000a40c, 0x00820820},
{0x0000a414, 0x1ce739ce},
{0x0000a418, 0x2d001dce},
- {0x0000a41c, 0x1ce739ce},
- {0x0000a420, 0x000001ce},
- {0x0000a424, 0x1ce739ce},
- {0x0000a428, 0x000001ce},
- {0x0000a42c, 0x1ce739ce},
- {0x0000a430, 0x1ce739ce},
{0x0000a434, 0x00000000},
{0x0000a438, 0x00001801},
{0x0000a43c, 0x00100000},
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 93b45b4b303..130e5dba955 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -159,6 +159,9 @@ void ath_descdma_cleanup(struct ath_softc *sc, struct ath_descdma *dd,
/* return block-ack bitmap index given sequence and starting sequence */
#define ATH_BA_INDEX(_st, _seq) (((_seq) - (_st)) & (IEEE80211_SEQ_MAX - 1))
+/* return the seqno for _start + _offset */
+#define ATH_BA_INDEX2SEQ(_seq, _offset) (((_seq) + (_offset)) & (IEEE80211_SEQ_MAX - 1))
+
/* returns delimiter padding required given the packet length */
#define ATH_AGGR_GET_NDELIM(_len) \
(((_len) >= ATH_AGGR_MINPLEN) ? 0 : \
@@ -238,6 +241,7 @@ struct ath_atx_tid {
struct ath_node *an;
struct ath_atx_ac *ac;
unsigned long tx_buf[BITS_TO_LONGS(ATH_TID_MAX_BUFS)];
+ int bar_index;
u16 seq_start;
u16 seq_next;
u16 baw_size;
@@ -252,9 +256,9 @@ struct ath_atx_tid {
struct ath_node {
#ifdef CONFIG_ATH9K_DEBUGFS
struct list_head list; /* for sc->nodes */
+#endif
struct ieee80211_sta *sta; /* station struct we're part of */
struct ieee80211_vif *vif; /* interface with which we're associated */
-#endif
struct ath_atx_tid tid[WME_NUM_TID];
struct ath_atx_ac ac[WME_NUM_AC];
int ps_key;
@@ -276,7 +280,6 @@ struct ath_tx_control {
};
#define ATH_TX_ERROR 0x01
-#define ATH_TX_BAR 0x02
/**
* @txq_map: Index is mac80211 queue number. This is
@@ -462,7 +465,7 @@ void ath9k_btcoex_timer_pause(struct ath_softc *sc);
#define ATH_LED_PIN_9287 8
#define ATH_LED_PIN_9300 10
#define ATH_LED_PIN_9485 6
-#define ATH_LED_PIN_9462 0
+#define ATH_LED_PIN_9462 4
#ifdef CONFIG_MAC80211_LEDS
void ath_init_leds(struct ath_softc *sc);
@@ -542,7 +545,7 @@ struct ath_ant_comb {
#define DEFAULT_CACHELINE 32
#define ATH_REGCLASSIDS_MAX 10
#define ATH_CABQ_READY_TIME 80 /* % of beacon interval */
-#define ATH_MAX_SW_RETRIES 10
+#define ATH_MAX_SW_RETRIES 30
#define ATH_CHAN_MAX 255
#define ATH_TXPOWER_MAX 100 /* .5 dBm units */
@@ -647,6 +650,7 @@ struct ath_softc {
struct delayed_work tx_complete_work;
struct delayed_work hw_pll_work;
struct ath_btcoex btcoex;
+ struct ath_mci_coex mci_coex;
struct ath_descdma txsdma;
diff --git a/drivers/net/wireless/ath/ath9k/btcoex.c b/drivers/net/wireless/ath/ath9k/btcoex.c
index 5a6361da981..bbb20810ec1 100644
--- a/drivers/net/wireless/ath/ath9k/btcoex.c
+++ b/drivers/net/wireless/ath/ath9k/btcoex.c
@@ -14,13 +14,14 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
+#include <linux/export.h>
#include "hw.h"
enum ath_bt_mode {
ATH_BT_COEX_MODE_LEGACY, /* legacy rx_clear mode */
ATH_BT_COEX_MODE_UNSLOTTED, /* untimed/unslotted mode */
ATH_BT_COEX_MODE_SLOTTED, /* slotted mode */
- ATH_BT_COEX_MODE_DISALBED, /* coexistence disabled */
+ ATH_BT_COEX_MODE_DISABLED, /* coexistence disabled */
};
struct ath_btcoex_config {
diff --git a/drivers/net/wireless/ath/ath9k/btcoex.h b/drivers/net/wireless/ath/ath9k/btcoex.h
index d5e5db1faad..278361c867c 100644
--- a/drivers/net/wireless/ath/ath9k/btcoex.h
+++ b/drivers/net/wireless/ath/ath9k/btcoex.h
@@ -54,8 +54,39 @@ enum ath_btcoex_scheme {
ATH_BTCOEX_CFG_MCI,
};
+struct ath9k_hw_mci {
+ u32 raw_intr;
+ u32 rx_msg_intr;
+ u32 cont_status;
+ u32 gpm_addr;
+ u32 gpm_len;
+ u32 gpm_idx;
+ u32 sched_addr;
+ u32 wlan_channels[4];
+ u32 wlan_cal_seq;
+ u32 wlan_cal_done;
+ u32 config;
+ u8 *gpm_buf;
+ u8 *sched_buf;
+ bool ready;
+ bool update_2g5g;
+ bool is_2g;
+ bool query_bt;
+ bool unhalt_bt_gpm; /* need send UNHALT */
+ bool halted_bt_gpm; /* HALT sent */
+ bool need_flush_btinfo;
+ bool bt_version_known;
+ bool wlan_channels_update;
+ u8 wlan_ver_major;
+ u8 wlan_ver_minor;
+ u8 bt_ver_major;
+ u8 bt_ver_minor;
+ u8 bt_state;
+};
+
struct ath_btcoex_hw {
enum ath_btcoex_scheme scheme;
+ struct ath9k_hw_mci mci;
bool enabled;
u8 wlanactive_gpio;
u8 btactive_gpio;
diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
index ebaf304f464..99538810a31 100644
--- a/drivers/net/wireless/ath/ath9k/calib.c
+++ b/drivers/net/wireless/ath/ath9k/calib.c
@@ -16,6 +16,7 @@
#include "hw.h"
#include "hw-ops.h"
+#include <linux/export.h>
/* Common calibration code */
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index 8e7e57ccbe9..68d972bf232 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -16,6 +16,7 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
+#include <linux/export.h>
#include <asm/unaligned.h>
#include "ath9k.h"
@@ -855,7 +856,7 @@ void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf,
sc->debug.stats.txstats[qnum].tx_bytes_all += bf->bf_mpdu->len;
if (bf_isampdu(bf)) {
- if (flags & ATH_TX_BAR)
+ if (flags & ATH_TX_ERROR)
TX_STAT_INC(qnum, a_xretries);
else
TX_STAT_INC(qnum, a_completed);
@@ -1629,6 +1630,9 @@ int ath9k_init_debug(struct ath_hw *ah)
debugfs_create_file("debug", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
sc, &fops_debug);
#endif
+
+ ath9k_dfs_init_debug(sc);
+
debugfs_create_file("dma", S_IRUSR, sc->debug.debugfs_phy, sc,
&fops_dma);
debugfs_create_file("interrupt", S_IRUSR, sc->debug.debugfs_phy, sc,
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index 356352ac2d6..776a24ada60 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -19,6 +19,7 @@
#include "hw.h"
#include "rc.h"
+#include "dfs_debug.h"
struct ath_txq;
struct ath_buf;
@@ -187,6 +188,7 @@ struct ath_stats {
struct ath_interrupt_stats istats;
struct ath_tx_stats txstats[ATH9K_NUM_TX_QUEUES];
struct ath_rx_stats rxstats;
+ struct ath_dfs_stats dfs_stats;
u32 reset[__RESET_TYPE_MAX];
};
diff --git a/drivers/net/wireless/ath/ath9k/dfs.c b/drivers/net/wireless/ath/ath9k/dfs.c
new file mode 100644
index 00000000000..e4e84a9e627
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/dfs.c
@@ -0,0 +1,215 @@
+/*
+ * Copyright (c) 2008-2011 Atheros Communications Inc.
+ * Copyright (c) 2011 Neratec Solutions AG
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "hw.h"
+#include "hw-ops.h"
+#include "ath9k.h"
+#include "dfs.h"
+#include "dfs_debug.h"
+
+/*
+ * TODO: move into or synchronize this with generic header
+ * as soon as IF is defined
+ */
+struct dfs_radar_pulse {
+ u16 freq;
+ u64 ts;
+ u32 width;
+ u8 rssi;
+};
+
+/* internal struct to pass radar data */
+struct ath_radar_data {
+ u8 pulse_bw_info;
+ u8 rssi;
+ u8 ext_rssi;
+ u8 pulse_length_ext;
+ u8 pulse_length_pri;
+};
+
+/* convert pulse duration to usecs, considering clock mode */
+static u32 dur_to_usecs(struct ath_hw *ah, u32 dur)
+{
+ const u32 AR93X_NSECS_PER_DUR = 800;
+ const u32 AR93X_NSECS_PER_DUR_FAST = (8000 / 11);
+ u32 nsecs;
+
+ if (IS_CHAN_A_FAST_CLOCK(ah, ah->curchan))
+ nsecs = dur * AR93X_NSECS_PER_DUR_FAST;
+ else
+ nsecs = dur * AR93X_NSECS_PER_DUR;
+
+ return (nsecs + 500) / 1000;
+}
+
+#define PRI_CH_RADAR_FOUND 0x01
+#define EXT_CH_RADAR_FOUND 0x02
+static bool
+ath9k_postprocess_radar_event(struct ath_softc *sc,
+ struct ath_radar_data *are,
+ struct dfs_radar_pulse *drp)
+{
+ u8 rssi;
+ u16 dur;
+
+ ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_DFS,
+ "pulse_bw_info=0x%x, pri,ext len/rssi=(%u/%u, %u/%u)\n",
+ are->pulse_bw_info,
+ are->pulse_length_pri, are->rssi,
+ are->pulse_length_ext, are->ext_rssi);
+
+ /*
+ * Only the last 2 bits of the BW info are relevant, they indicate
+ * which channel the radar was detected in.
+ */
+ are->pulse_bw_info &= 0x03;
+
+ switch (are->pulse_bw_info) {
+ case PRI_CH_RADAR_FOUND:
+ /* radar in ctrl channel */
+ dur = are->pulse_length_pri;
+ DFS_STAT_INC(sc, pri_phy_errors);
+ /*
+ * cannot use ctrl channel RSSI
+ * if extension channel is stronger
+ */
+ rssi = (are->ext_rssi >= (are->rssi + 3)) ? 0 : are->rssi;
+ break;
+ case EXT_CH_RADAR_FOUND:
+ /* radar in extension channel */
+ dur = are->pulse_length_ext;
+ DFS_STAT_INC(sc, ext_phy_errors);
+ /*
+ * cannot use extension channel RSSI
+ * if control channel is stronger
+ */
+ rssi = (are->rssi >= (are->ext_rssi + 12)) ? 0 : are->ext_rssi;
+ break;
+ case (PRI_CH_RADAR_FOUND | EXT_CH_RADAR_FOUND):
+ /*
+ * Conducted testing, when pulse is on DC, both pri and ext
+ * durations are reported to be same
+ *
+ * Radiated testing, when pulse is on DC, different pri and
+ * ext durations are reported, so take the larger of the two
+ */
+ if (are->pulse_length_ext >= are->pulse_length_pri)
+ dur = are->pulse_length_ext;
+ else
+ dur = are->pulse_length_pri;
+ DFS_STAT_INC(sc, dc_phy_errors);
+
+ /* when both are present use stronger one */
+ rssi = (are->rssi < are->ext_rssi) ? are->ext_rssi : are->rssi;
+ break;
+ default:
+ /*
+ * Bogus bandwidth info was received in descriptor,
+ * so ignore this PHY error
+ */
+ DFS_STAT_INC(sc, bwinfo_discards);
+ return false;
+ }
+
+ if (rssi == 0) {
+ DFS_STAT_INC(sc, rssi_discards);
+ return false;
+ }
+
+ /*
+ * TODO: check chirping pulses
+ * checks for chirping are dependent on the DFS regulatory domain
+ * used, which is yet TBD
+ */
+
+ /* convert duration to usecs */
+ drp->width = dur_to_usecs(sc->sc_ah, dur);
+ drp->rssi = rssi;
+
+ DFS_STAT_INC(sc, pulses_detected);
+ return true;
+}
+#undef PRI_CH_RADAR_FOUND
+#undef EXT_CH_RADAR_FOUND
+
+/*
+ * DFS: check PHY-error for radar pulse and feed the detector
+ */
+void ath9k_dfs_process_phyerr(struct ath_softc *sc, void *data,
+ struct ath_rx_status *rs, u64 mactime)
+{
+ struct ath_radar_data ard;
+ u16 datalen;
+ char *vdata_end;
+ struct dfs_radar_pulse drp;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ if ((!(rs->rs_phyerr != ATH9K_PHYERR_RADAR)) &&
+ (!(rs->rs_phyerr != ATH9K_PHYERR_FALSE_RADAR_EXT))) {
+ ath_dbg(common, ATH_DBG_DFS,
+ "Error: rs_phyer=0x%x not a radar error\n",
+ rs->rs_phyerr);
+ return;
+ }
+
+ datalen = rs->rs_datalen;
+ if (datalen == 0) {
+ DFS_STAT_INC(sc, datalen_discards);
+ return;
+ }
+
+ ard.rssi = rs->rs_rssi_ctl0;
+ ard.ext_rssi = rs->rs_rssi_ext0;
+
+ /*
+ * hardware stores this as 8 bit signed value.
+ * we will cap it at 0 if it is a negative number
+ */
+ if (ard.rssi & 0x80)
+ ard.rssi = 0;
+ if (ard.ext_rssi & 0x80)
+ ard.ext_rssi = 0;
+
+ vdata_end = (char *)data + datalen;
+ ard.pulse_bw_info = vdata_end[-1];
+ ard.pulse_length_ext = vdata_end[-2];
+ ard.pulse_length_pri = vdata_end[-3];
+
+ ath_dbg(common, ATH_DBG_DFS,
+ "bw_info=%d, length_pri=%d, length_ext=%d, "
+ "rssi_pri=%d, rssi_ext=%d\n",
+ ard.pulse_bw_info, ard.pulse_length_pri, ard.pulse_length_ext,
+ ard.rssi, ard.ext_rssi);
+
+ drp.freq = ah->curchan->channel;
+ drp.ts = mactime;
+ if (ath9k_postprocess_radar_event(sc, &ard, &drp)) {
+ static u64 last_ts;
+ ath_dbg(common, ATH_DBG_DFS,
+ "ath9k_dfs_process_phyerr: channel=%d, ts=%llu, "
+ "width=%d, rssi=%d, delta_ts=%llu\n",
+ drp.freq, drp.ts, drp.width, drp.rssi, drp.ts-last_ts);
+ last_ts = drp.ts;
+ /*
+ * TODO: forward pulse to pattern detector
+ *
+ * ieee80211_add_radar_pulse(drp.freq, drp.ts,
+ * drp.width, drp.rssi);
+ */
+ }
+}
diff --git a/drivers/net/wireless/ath/ath9k/dfs.h b/drivers/net/wireless/ath/ath9k/dfs.h
new file mode 100644
index 00000000000..c2412857f12
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/dfs.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2008-2011 Atheros Communications Inc.
+ * Copyright (c) 2011 Neratec Solutions AG
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef ATH9K_DFS_H
+#define ATH9K_DFS_H
+
+#if defined(CONFIG_ATH9K_DFS_CERTIFIED)
+/**
+ * ath9k_dfs_process_phyerr - process radar PHY error
+ * @sc: ath_softc
+ * @data: RX payload data
+ * @rs: RX status after processing descriptor
+ * @mactime: receive time
+ *
+ * This function is called whenever the HW DFS module detects a radar
+ * pulse and reports it as a PHY error.
+ *
+ * The radar information provided as raw payload data is validated and
+ * filtered for false pulses. Events passing all tests are forwarded to
+ * the upper layer for pattern detection.
+ */
+void ath9k_dfs_process_phyerr(struct ath_softc *sc, void *data,
+ struct ath_rx_status *rs, u64 mactime);
+#else
+static inline void ath9k_dfs_process_phyerr(struct ath_softc *sc, void *data,
+ struct ath_rx_status *rs, u64 mactime) { }
+#endif
+
+#endif /* ATH9K_DFS_H */
diff --git a/drivers/net/wireless/ath/ath9k/dfs_debug.c b/drivers/net/wireless/ath/ath9k/dfs_debug.c
new file mode 100644
index 00000000000..106d031d834
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/dfs_debug.c
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2008-2011 Atheros Communications Inc.
+ * Copyright (c) 2011 Neratec Solutions AG
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/export.h>
+
+#include "ath9k.h"
+#include "dfs_debug.h"
+
+#define ATH9K_DFS_STAT(s, p) \
+ len += snprintf(buf + len, size - len, "%28s : %10u\n", s, \
+ sc->debug.stats.dfs_stats.p);
+
+static ssize_t read_file_dfs(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ struct ath9k_hw_version *hw_ver = &sc->sc_ah->hw_version;
+ char *buf;
+ unsigned int len = 0, size = 8000;
+ ssize_t retval = 0;
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ len += snprintf(buf + len, size - len, "DFS support for "
+ "macVersion = 0x%x, macRev = 0x%x: %s\n",
+ hw_ver->macVersion, hw_ver->macRev,
+ (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_DFS) ?
+ "enabled" : "disabled");
+ ATH9K_DFS_STAT("DFS pulses detected ", pulses_detected);
+ ATH9K_DFS_STAT("Datalen discards ", datalen_discards);
+ ATH9K_DFS_STAT("RSSI discards ", rssi_discards);
+ ATH9K_DFS_STAT("BW info discards ", bwinfo_discards);
+ ATH9K_DFS_STAT("Primary channel pulses ", pri_phy_errors);
+ ATH9K_DFS_STAT("Secondary channel pulses", ext_phy_errors);
+ ATH9K_DFS_STAT("Dual channel pulses ", dc_phy_errors);
+
+ if (len > size)
+ len = size;
+
+ retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+
+ return retval;
+}
+
+static int ath9k_dfs_debugfs_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+
+ return 0;
+}
+
+static const struct file_operations fops_dfs_stats = {
+ .read = read_file_dfs,
+ .open = ath9k_dfs_debugfs_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+void ath9k_dfs_init_debug(struct ath_softc *sc)
+{
+ debugfs_create_file("dfs_stats", S_IRUSR,
+ sc->debug.debugfs_phy, sc, &fops_dfs_stats);
+}
diff --git a/drivers/net/wireless/ath/ath9k/dfs_debug.h b/drivers/net/wireless/ath/ath9k/dfs_debug.h
new file mode 100644
index 00000000000..6e1e2a71659
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/dfs_debug.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2008-2011 Atheros Communications Inc.
+ * Copyright (c) 2011 Neratec Solutions AG
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+
+#ifndef DFS_DEBUG_H
+#define DFS_DEBUG_H
+
+#include "hw.h"
+
+/**
+ * struct ath_dfs_stats - DFS Statistics
+ *
+ * @pulses_detected: No. of pulses detected so far
+ * @datalen_discards: No. of pulses discarded due to invalid datalen
+ * @rssi_discards: No. of pulses discarded due to invalid RSSI
+ * @bwinfo_discards: No. of pulses discarded due to invalid BW info
+ * @pri_phy_errors: No. of pulses reported for primary channel
+ * @ext_phy_errors: No. of pulses reported for extension channel
+ * @dc_phy_errors: No. of pulses reported for primary + extension channel
+ */
+struct ath_dfs_stats {
+ u32 pulses_detected;
+ u32 datalen_discards;
+ u32 rssi_discards;
+ u32 bwinfo_discards;
+ u32 pri_phy_errors;
+ u32 ext_phy_errors;
+ u32 dc_phy_errors;
+};
+
+#if defined(CONFIG_ATH9K_DFS_DEBUGFS)
+
+#define DFS_STAT_INC(sc, c) (sc->debug.stats.dfs_stats.c++)
+void ath9k_dfs_init_debug(struct ath_softc *sc);
+
+#else
+
+#define DFS_STAT_INC(sc, c) do { } while (0)
+static inline void ath9k_dfs_init_debug(struct ath_softc *sc) { }
+
+#endif /* CONFIG_ATH9K_DFS_DEBUGFS */
+
+#endif /* DFS_DEBUG_H */
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_4k.c b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
index 9a7520f987f..61fcab0e2d7 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_4k.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
@@ -473,7 +473,7 @@ static void ath9k_hw_set_4k_power_per_rate_table(struct ath_hw *ah,
int i;
u16 twiceMinEdgePower;
- u16 twiceMaxEdgePower = MAX_RATE_POWER;
+ u16 twiceMaxEdgePower;
u16 scaledPower = 0, minCtlPower;
u16 numCtlModes;
const u16 *pCtlMode;
@@ -542,9 +542,7 @@ static void ath9k_hw_set_4k_power_per_rate_table(struct ath_hw *ah,
else
freq = centers.ctl_center;
- if (ah->eep_ops->get_eeprom_ver(ah) == 14 &&
- ah->eep_ops->get_eeprom_rev(ah) <= 2)
- twiceMaxEdgePower = MAX_RATE_POWER;
+ twiceMaxEdgePower = MAX_RATE_POWER;
for (i = 0; (i < AR5416_EEP4K_NUM_CTLS) &&
pEepData->ctlIndex[i]; i++) {
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
index 4f5c50a87ce..0981c073471 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
@@ -569,7 +569,7 @@ static void ath9k_hw_set_ar9287_power_per_rate_table(struct ath_hw *ah,
#define REDUCE_SCALED_POWER_BY_TWO_CHAIN 6
#define REDUCE_SCALED_POWER_BY_THREE_CHAIN 10
- u16 twiceMaxEdgePower = MAX_RATE_POWER;
+ u16 twiceMaxEdgePower;
int i;
struct cal_ctl_data_ar9287 *rep;
struct cal_target_power_leg targetPowerOfdm = {0, {0, 0, 0, 0} },
@@ -669,6 +669,7 @@ static void ath9k_hw_set_ar9287_power_per_rate_table(struct ath_hw *ah,
else
freq = centers.ctl_center;
+ twiceMaxEdgePower = MAX_RATE_POWER;
/* Walk through the CTL indices stored in EEPROM */
for (i = 0; (i < AR9287_NUM_CTLS) && pEepData->ctlIndex[i]; i++) {
struct cal_ctl_edges *pRdEdgesPower;
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c
index 81e62967167..55a21d39167 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_def.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c
@@ -1000,7 +1000,7 @@ static void ath9k_hw_set_def_power_per_rate_table(struct ath_hw *ah,
#define REDUCE_SCALED_POWER_BY_THREE_CHAIN 9 /* 10*log10(3)*2 */
struct ar5416_eeprom_def *pEepData = &ah->eeprom.def;
- u16 twiceMaxEdgePower = MAX_RATE_POWER;
+ u16 twiceMaxEdgePower;
int i;
struct cal_ctl_data *rep;
struct cal_target_power_leg targetPowerOfdm, targetPowerCck = {
@@ -1121,9 +1121,7 @@ static void ath9k_hw_set_def_power_per_rate_table(struct ath_hw *ah,
else
freq = centers.ctl_center;
- if (ah->eep_ops->get_eeprom_ver(ah) == 14 &&
- ah->eep_ops->get_eeprom_rev(ah) <= 2)
- twiceMaxEdgePower = MAX_RATE_POWER;
+ twiceMaxEdgePower = MAX_RATE_POWER;
for (i = 0; (i < AR5416_NUM_CTLS) && pEepData->ctlIndex[i]; i++) {
if ((((cfgCtl & ~CTL_MODE_M) |
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index 0b9a0e8a495..f8ce4ea6f65 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -808,7 +808,8 @@ void ath9k_htc_ani_work(struct work_struct *work)
}
/* Verify whether we must check ANI */
- if ((timestamp - common->ani.checkani_timer) >= ATH_ANI_POLLINTERVAL) {
+ if (ah->config.enable_ani &&
+ (timestamp - common->ani.checkani_timer) >= ATH_ANI_POLLINTERVAL) {
aniflag = true;
common->ani.checkani_timer = timestamp;
}
@@ -838,7 +839,7 @@ set_timer:
* short calibration and long calibration.
*/
cal_interval = ATH_LONG_CALINTERVAL;
- if (priv->ah->config.enable_ani)
+ if (ah->config.enable_ani)
cal_interval = min(cal_interval, (u32)ATH_ANI_POLLINTERVAL);
if (!common->ani.caldone)
cal_interval = min(cal_interval, (u32)short_cal_interval);
diff --git a/drivers/net/wireless/ath/ath9k/hw-ops.h b/drivers/net/wireless/ath/ath9k/hw-ops.h
index e74c233757a..c4ad0b06bdb 100644
--- a/drivers/net/wireless/ath/ath9k/hw-ops.h
+++ b/drivers/net/wireless/ath/ath9k/hw-ops.h
@@ -212,4 +212,13 @@ static inline int ath9k_hw_fast_chan_change(struct ath_hw *ah,
return ath9k_hw_private_ops(ah)->fast_chan_change(ah, chan,
ini_reloaded);
}
+
+static inline void ath9k_hw_set_radar_params(struct ath_hw *ah)
+{
+ if (!ath9k_hw_private_ops(ah)->set_radar_params)
+ return;
+
+ ath9k_hw_private_ops(ah)->set_radar_params(ah, &ah->radar_conf);
+}
+
#endif /* ATH9K_HW_OPS_H */
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 27471f80d8b..8cda9a1513a 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -16,6 +16,7 @@
#include <linux/io.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include <asm/unaligned.h>
#include "hw.h"
@@ -503,7 +504,7 @@ static int ath9k_hw_post_init(struct ath_hw *ah)
return ecode;
}
- if (!AR_SREV_9100(ah) && !AR_SREV_9340(ah)) {
+ if (ah->config.enable_ani) {
ath9k_hw_ani_setup(ah);
ath9k_hw_ani_init(ah);
}
@@ -609,6 +610,10 @@ static int __ath9k_hw_init(struct ath_hw *ah)
if (!AR_SREV_9300_20_OR_LATER(ah))
ah->ani_function &= ~ATH9K_ANI_MRC_CCK;
+ /* disable ANI for 9340 */
+ if (AR_SREV_9340(ah))
+ ah->config.enable_ani = false;
+
ath9k_hw_init_mode_regs(ah);
if (!ah->is_pciexpress)
@@ -1349,6 +1354,7 @@ static bool ath9k_hw_set_reset_power_on(struct ath_hw *ah)
static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type)
{
+ bool ret = false;
if (AR_SREV_9300_20_OR_LATER(ah)) {
REG_WRITE(ah, AR_WA, ah->WARegVal);
@@ -1360,13 +1366,20 @@ static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type)
switch (type) {
case ATH9K_RESET_POWER_ON:
- return ath9k_hw_set_reset_power_on(ah);
+ ret = ath9k_hw_set_reset_power_on(ah);
+ break;
case ATH9K_RESET_WARM:
case ATH9K_RESET_COLD:
- return ath9k_hw_set_reset(ah, type);
+ ret = ath9k_hw_set_reset(ah, type);
+ break;
default:
- return false;
+ break;
}
+
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI)
+ REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2);
+
+ return ret;
}
static bool ath9k_hw_chip_reset(struct ath_hw *ah,
@@ -1505,6 +1518,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
struct ath9k_hw_cal_data *caldata, bool bChannelChange)
{
struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci;
u32 saveLedState;
struct ath9k_channel *curchan = ah->curchan;
u32 saveDefAntenna;
@@ -1512,6 +1526,53 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
u64 tsf = 0;
int i, r;
bool allow_fbs = false;
+ bool mci = !!(ah->caps.hw_caps & ATH9K_HW_CAP_MCI);
+ bool save_fullsleep = ah->chip_fullsleep;
+
+ if (mci) {
+
+ ar9003_mci_2g5g_changed(ah, IS_CHAN_2GHZ(chan));
+
+ if (mci_hw->bt_state == MCI_BT_CAL_START) {
+ u32 payload[4] = {0, 0, 0, 0};
+
+ ath_dbg(common, ATH_DBG_MCI, "MCI stop rx for BT CAL");
+
+ mci_hw->bt_state = MCI_BT_CAL;
+
+ /*
+ * MCI FIX: disable mci interrupt here. This is to avoid
+ * SW_MSG_DONE or RX_MSG bits to trigger MCI_INT and
+ * lead to mci_intr reentry.
+ */
+
+ ar9003_mci_disable_interrupt(ah);
+
+ ath_dbg(common, ATH_DBG_MCI, "send WLAN_CAL_GRANT");
+ MCI_GPM_SET_CAL_TYPE(payload, MCI_GPM_WLAN_CAL_GRANT);
+ ar9003_mci_send_message(ah, MCI_GPM, 0, payload,
+ 16, true, false);
+
+ ath_dbg(common, ATH_DBG_MCI, "\nMCI BT is calibrating");
+
+ /* Wait BT calibration to be completed for 25ms */
+
+ if (ar9003_mci_wait_for_gpm(ah, MCI_GPM_BT_CAL_DONE,
+ 0, 25000))
+ ath_dbg(common, ATH_DBG_MCI,
+ "MCI got BT_CAL_DONE\n");
+ else
+ ath_dbg(common, ATH_DBG_MCI,
+ "MCI ### BT cal takes to long, force"
+ "bt_state to be bt_awake\n");
+ mci_hw->bt_state = MCI_BT_AWAKE;
+ /* MCI FIX: enable mci interrupt here */
+ ar9003_mci_enable_interrupt(ah);
+
+ return true;
+ }
+ }
+
if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE))
return -EIO;
@@ -1549,12 +1610,29 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
if (ath9k_hw_channel_change(ah, chan)) {
ath9k_hw_loadnf(ah, ah->curchan);
ath9k_hw_start_nfcal(ah, true);
+ if (mci && mci_hw->ready)
+ ar9003_mci_2g5g_switch(ah, true);
+
if (AR_SREV_9271(ah))
ar9002_hw_load_ani_reg(ah, chan);
return 0;
}
}
+ if (mci) {
+ ar9003_mci_disable_interrupt(ah);
+
+ if (mci_hw->ready && !save_fullsleep) {
+ ar9003_mci_mute_bt(ah);
+ udelay(20);
+ REG_WRITE(ah, AR_BTCOEX_CTRL, 0);
+ }
+
+ mci_hw->bt_state = MCI_BT_SLEEP;
+ mci_hw->ready = false;
+ }
+
+
saveDefAntenna = REG_READ(ah, AR_DEF_ANTENNA);
if (saveDefAntenna == 0)
saveDefAntenna = 1;
@@ -1610,6 +1688,9 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
if (r)
return r;
+ if (mci)
+ ar9003_mci_reset(ah, false, IS_CHAN_2GHZ(chan), save_fullsleep);
+
/*
* Some AR91xx SoC devices frequently fail to accept TSF writes
* right after the chip reset. When that happens, write a new
@@ -1727,6 +1808,55 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
ath9k_hw_loadnf(ah, chan);
ath9k_hw_start_nfcal(ah, true);
+ if (mci && mci_hw->ready) {
+
+ if (IS_CHAN_2GHZ(chan) &&
+ (mci_hw->bt_state == MCI_BT_SLEEP)) {
+
+ if (ar9003_mci_check_int(ah,
+ AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET) ||
+ ar9003_mci_check_int(ah,
+ AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE)) {
+
+ /*
+ * BT is sleeping. Check if BT wakes up during
+ * WLAN calibration. If BT wakes up during
+ * WLAN calibration, need to go through all
+ * message exchanges again and recal.
+ */
+
+ ath_dbg(common, ATH_DBG_MCI, "MCI BT wakes up"
+ "during WLAN calibration\n");
+
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
+ AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET |
+ AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE);
+ ath_dbg(common, ATH_DBG_MCI, "MCI send"
+ "REMOTE_RESET\n");
+ ar9003_mci_remote_reset(ah, true);
+ ar9003_mci_send_sys_waking(ah, true);
+ udelay(1);
+ if (IS_CHAN_2GHZ(chan))
+ ar9003_mci_send_lna_transfer(ah, true);
+
+ mci_hw->bt_state = MCI_BT_AWAKE;
+
+ ath_dbg(common, ATH_DBG_MCI, "MCI re-cal\n");
+
+ if (caldata) {
+ caldata->done_txiqcal_once = false;
+ caldata->done_txclcal_once = false;
+ caldata->rtt_hist.num_readings = 0;
+ }
+
+ if (!ath9k_hw_init_cal(ah, chan))
+ return -EIO;
+
+ }
+ }
+ ar9003_mci_enable_interrupt(ah);
+ }
+
ENABLE_REGWRITE_BUFFER(ah);
ath9k_hw_restore_chainmask(ah);
@@ -1769,6 +1899,21 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
if (ah->btcoex_hw.enabled)
ath9k_hw_btcoex_enable(ah);
+ if (mci && mci_hw->ready) {
+ /*
+ * check BT state again to make
+ * sure it's not changed.
+ */
+
+ ar9003_mci_sync_bt_state(ah);
+ ar9003_mci_2g5g_switch(ah, true);
+
+ if ((mci_hw->bt_state == MCI_BT_AWAKE) &&
+ (mci_hw->query_bt == true)) {
+ mci_hw->need_flush_btinfo = true;
+ }
+ }
+
if (AR_SREV_9300_20_OR_LATER(ah)) {
ar9003_hw_bb_watchdog_config(ah);
@@ -1826,7 +1971,8 @@ static void ath9k_set_power_sleep(struct ath_hw *ah, int setChip)
}
/* Clear Bit 14 of AR_WA after putting chip into Full Sleep mode. */
- REG_WRITE(ah, AR_WA, ah->WARegVal & ~AR_WA_D3_L1_DISABLE);
+ if (AR_SREV_9300_20_OR_LATER(ah))
+ REG_WRITE(ah, AR_WA, ah->WARegVal & ~AR_WA_D3_L1_DISABLE);
}
/*
@@ -1932,6 +2078,7 @@ static bool ath9k_hw_set_power_awake(struct ath_hw *ah, int setChip)
bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode)
{
struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
int status = true, setChip = true;
static const char *modes[] = {
"AWAKE",
@@ -1949,12 +2096,35 @@ bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode)
switch (mode) {
case ATH9K_PM_AWAKE:
status = ath9k_hw_set_power_awake(ah, setChip);
+
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI)
+ REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2);
+
break;
case ATH9K_PM_FULL_SLEEP:
+
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI) {
+ if (ar9003_mci_state(ah, MCI_STATE_ENABLE, NULL) &&
+ (mci->bt_state != MCI_BT_SLEEP) &&
+ !mci->halted_bt_gpm) {
+ ath_dbg(common, ATH_DBG_MCI, "MCI halt BT GPM"
+ "(full_sleep)");
+ ar9003_mci_send_coex_halt_bt_gpm(ah,
+ true, true);
+ }
+
+ mci->ready = false;
+ REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2);
+ }
+
ath9k_set_power_sleep(ah, setChip);
ah->chip_fullsleep = true;
break;
case ATH9K_PM_NETWORK_SLEEP:
+
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI)
+ REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2);
+
ath9k_set_power_network_sleep(ah, setChip);
break;
default:
@@ -2107,6 +2277,30 @@ static u8 fixup_chainmask(u8 chip_chainmask, u8 eeprom_chainmask)
return chip_chainmask;
}
+/**
+ * ath9k_hw_dfs_tested - checks if DFS has been tested with used chipset
+ * @ah: the atheros hardware data structure
+ *
+ * We enable DFS support upstream on chipsets which have passed a series
+ * of tests. The testing requirements are going to be documented. Desired
+ * test requirements are documented at:
+ *
+ * http://wireless.kernel.org/en/users/Drivers/ath9k/dfs
+ *
+ * Once a new chipset gets properly tested an individual commit can be used
+ * to document the testing for DFS for that chipset.
+ */
+static bool ath9k_hw_dfs_tested(struct ath_hw *ah)
+{
+
+ switch (ah->hw_version.macVersion) {
+ /* AR9580 will likely be our first target to get testing on */
+ case AR_SREV_VERSION_9580:
+ default:
+ return false;
+ }
+}
+
int ath9k_hw_fill_cap_info(struct ath_hw *ah)
{
struct ath9k_hw_capabilities *pCap = &ah->caps;
@@ -2147,6 +2341,8 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
if (AR_SREV_9485(ah) || AR_SREV_9285(ah) || AR_SREV_9330(ah))
chip_chainmask = 1;
+ else if (AR_SREV_9462(ah))
+ chip_chainmask = 3;
else if (!AR_SREV_9280_20_OR_LATER(ah))
chip_chainmask = 7;
else if (!AR_SREV_9300_20_OR_LATER(ah) || AR_SREV_9340(ah))
@@ -2203,12 +2399,10 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
else
pCap->num_gpio_pins = AR_NUM_GPIO;
- if (AR_SREV_9160_10_OR_LATER(ah) || AR_SREV_9100(ah)) {
- pCap->hw_caps |= ATH9K_HW_CAP_CST;
+ if (AR_SREV_9160_10_OR_LATER(ah) || AR_SREV_9100(ah))
pCap->rts_aggr_limit = ATH_AMPDU_LIMIT_MAX;
- } else {
+ else
pCap->rts_aggr_limit = (8 * 1024);
- }
#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
ah->rfsilent = ah->eep_ops->get_eeprom(ah, EEP_RF_SILENT);
@@ -2232,7 +2426,9 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
pCap->hw_caps |= ATH9K_HW_CAP_4KB_SPLITTRANS;
if (common->btcoex_enabled) {
- if (AR_SREV_9300_20_OR_LATER(ah)) {
+ if (AR_SREV_9462(ah))
+ btcoex_hw->scheme = ATH_BTCOEX_CFG_MCI;
+ else if (AR_SREV_9300_20_OR_LATER(ah)) {
btcoex_hw->scheme = ATH_BTCOEX_CFG_3WIRE;
btcoex_hw->btactive_gpio = ATH_BTACTIVE_GPIO_9300;
btcoex_hw->wlanactive_gpio = ATH_WLANACTIVE_GPIO_9300;
@@ -2316,6 +2512,9 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
pCap->pcie_lcr_offset = 0x80;
}
+ if (ath9k_hw_dfs_tested(ah))
+ pCap->hw_caps |= ATH9K_HW_CAP_DFS;
+
tx_chainmask = pCap->tx_chainmask;
rx_chainmask = pCap->rx_chainmask;
while (tx_chainmask || rx_chainmask) {
@@ -2330,7 +2529,7 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
if (AR_SREV_9300_20_OR_LATER(ah)) {
ah->enabled_cals |= TX_IQ_CAL;
- if (!AR_SREV_9330(ah))
+ if (AR_SREV_9485_OR_LATER(ah))
ah->enabled_cals |= TX_IQ_ON_AGC_CAL;
}
if (AR_SREV_9462(ah))
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index 3cb878c28cc..615cc839f0d 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -126,6 +126,16 @@
#define AR_GPIO_OUTPUT_MUX_AS_RX_CLEAR_EXTERNAL 4
#define AR_GPIO_OUTPUT_MUX_AS_MAC_NETWORK_LED 5
#define AR_GPIO_OUTPUT_MUX_AS_MAC_POWER_LED 6
+#define AR_GPIO_OUTPUT_MUX_AS_MCI_WLAN_DATA 0x16
+#define AR_GPIO_OUTPUT_MUX_AS_MCI_WLAN_CLK 0x17
+#define AR_GPIO_OUTPUT_MUX_AS_MCI_BT_DATA 0x18
+#define AR_GPIO_OUTPUT_MUX_AS_MCI_BT_CLK 0x19
+#define AR_GPIO_OUTPUT_MUX_AS_WL_IN_TX 0x14
+#define AR_GPIO_OUTPUT_MUX_AS_WL_IN_RX 0x13
+#define AR_GPIO_OUTPUT_MUX_AS_BT_IN_TX 9
+#define AR_GPIO_OUTPUT_MUX_AS_BT_IN_RX 8
+#define AR_GPIO_OUTPUT_MUX_AS_RUCKUS_STROBE 0x1d
+#define AR_GPIO_OUTPUT_MUX_AS_RUCKUS_DATA 0x1e
#define AR_GPIOD_MASK 0x00001FFF
#define AR_GPIO_BIT(_gpio) (1 << (_gpio))
@@ -186,21 +196,21 @@ enum ath_ini_subsys {
enum ath9k_hw_caps {
ATH9K_HW_CAP_HT = BIT(0),
ATH9K_HW_CAP_RFSILENT = BIT(1),
- ATH9K_HW_CAP_CST = BIT(2),
- ATH9K_HW_CAP_AUTOSLEEP = BIT(4),
- ATH9K_HW_CAP_4KB_SPLITTRANS = BIT(5),
- ATH9K_HW_CAP_EDMA = BIT(6),
- ATH9K_HW_CAP_RAC_SUPPORTED = BIT(7),
- ATH9K_HW_CAP_LDPC = BIT(8),
- ATH9K_HW_CAP_FASTCLOCK = BIT(9),
- ATH9K_HW_CAP_SGI_20 = BIT(10),
- ATH9K_HW_CAP_PAPRD = BIT(11),
- ATH9K_HW_CAP_ANT_DIV_COMB = BIT(12),
- ATH9K_HW_CAP_2GHZ = BIT(13),
- ATH9K_HW_CAP_5GHZ = BIT(14),
- ATH9K_HW_CAP_APM = BIT(15),
- ATH9K_HW_CAP_RTT = BIT(16),
- ATH9K_HW_CAP_MCI = BIT(17),
+ ATH9K_HW_CAP_AUTOSLEEP = BIT(2),
+ ATH9K_HW_CAP_4KB_SPLITTRANS = BIT(3),
+ ATH9K_HW_CAP_EDMA = BIT(4),
+ ATH9K_HW_CAP_RAC_SUPPORTED = BIT(5),
+ ATH9K_HW_CAP_LDPC = BIT(6),
+ ATH9K_HW_CAP_FASTCLOCK = BIT(7),
+ ATH9K_HW_CAP_SGI_20 = BIT(8),
+ ATH9K_HW_CAP_PAPRD = BIT(9),
+ ATH9K_HW_CAP_ANT_DIV_COMB = BIT(10),
+ ATH9K_HW_CAP_2GHZ = BIT(11),
+ ATH9K_HW_CAP_5GHZ = BIT(12),
+ ATH9K_HW_CAP_APM = BIT(13),
+ ATH9K_HW_CAP_RTT = BIT(14),
+ ATH9K_HW_CAP_MCI = BIT(15),
+ ATH9K_HW_CAP_DFS = BIT(16),
};
struct ath9k_hw_capabilities {
@@ -266,6 +276,7 @@ enum ath9k_int {
ATH9K_INT_TX = 0x00000040,
ATH9K_INT_TXDESC = 0x00000080,
ATH9K_INT_TIM_TIMER = 0x00000100,
+ ATH9K_INT_MCI = 0x00000200,
ATH9K_INT_BB_WATCHDOG = 0x00000400,
ATH9K_INT_TXURN = 0x00000800,
ATH9K_INT_MIB = 0x00001000,
@@ -417,6 +428,25 @@ enum ath9k_rx_qtype {
ATH9K_RX_QUEUE_MAX,
};
+enum mci_message_header { /* length of payload */
+ MCI_LNA_CTRL = 0x10, /* len = 0 */
+ MCI_CONT_NACK = 0x20, /* len = 0 */
+ MCI_CONT_INFO = 0x30, /* len = 4 */
+ MCI_CONT_RST = 0x40, /* len = 0 */
+ MCI_SCHD_INFO = 0x50, /* len = 16 */
+ MCI_CPU_INT = 0x60, /* len = 4 */
+ MCI_SYS_WAKING = 0x70, /* len = 0 */
+ MCI_GPM = 0x80, /* len = 16 */
+ MCI_LNA_INFO = 0x90, /* len = 1 */
+ MCI_LNA_STATE = 0x94,
+ MCI_LNA_TAKE = 0x98,
+ MCI_LNA_TRANS = 0x9c,
+ MCI_SYS_SLEEPING = 0xa0, /* len = 0 */
+ MCI_REQ_WAKE = 0xc0, /* len = 0 */
+ MCI_DEBUG_16 = 0xfe, /* len = 2 */
+ MCI_REMOTE_RESET = 0xff /* len = 16 */
+};
+
enum ath_mci_gpm_coex_profile_type {
MCI_GPM_COEX_PROFILE_UNKNOWN,
MCI_GPM_COEX_PROFILE_RFCOMM,
@@ -427,6 +457,132 @@ enum ath_mci_gpm_coex_profile_type {
MCI_GPM_COEX_PROFILE_MAX
};
+/* MCI GPM/Coex opcode/type definitions */
+enum {
+ MCI_GPM_COEX_W_GPM_PAYLOAD = 1,
+ MCI_GPM_COEX_B_GPM_TYPE = 4,
+ MCI_GPM_COEX_B_GPM_OPCODE = 5,
+ /* MCI_GPM_WLAN_CAL_REQ, MCI_GPM_WLAN_CAL_DONE */
+ MCI_GPM_WLAN_CAL_W_SEQUENCE = 2,
+
+ /* MCI_GPM_COEX_VERSION_QUERY */
+ /* MCI_GPM_COEX_VERSION_RESPONSE */
+ MCI_GPM_COEX_B_MAJOR_VERSION = 6,
+ MCI_GPM_COEX_B_MINOR_VERSION = 7,
+ /* MCI_GPM_COEX_STATUS_QUERY */
+ MCI_GPM_COEX_B_BT_BITMAP = 6,
+ MCI_GPM_COEX_B_WLAN_BITMAP = 7,
+ /* MCI_GPM_COEX_HALT_BT_GPM */
+ MCI_GPM_COEX_B_HALT_STATE = 6,
+ /* MCI_GPM_COEX_WLAN_CHANNELS */
+ MCI_GPM_COEX_B_CHANNEL_MAP = 6,
+ /* MCI_GPM_COEX_BT_PROFILE_INFO */
+ MCI_GPM_COEX_B_PROFILE_TYPE = 6,
+ MCI_GPM_COEX_B_PROFILE_LINKID = 7,
+ MCI_GPM_COEX_B_PROFILE_STATE = 8,
+ MCI_GPM_COEX_B_PROFILE_ROLE = 9,
+ MCI_GPM_COEX_B_PROFILE_RATE = 10,
+ MCI_GPM_COEX_B_PROFILE_VOTYPE = 11,
+ MCI_GPM_COEX_H_PROFILE_T = 12,
+ MCI_GPM_COEX_B_PROFILE_W = 14,
+ MCI_GPM_COEX_B_PROFILE_A = 15,
+ /* MCI_GPM_COEX_BT_STATUS_UPDATE */
+ MCI_GPM_COEX_B_STATUS_TYPE = 6,
+ MCI_GPM_COEX_B_STATUS_LINKID = 7,
+ MCI_GPM_COEX_B_STATUS_STATE = 8,
+ /* MCI_GPM_COEX_BT_UPDATE_FLAGS */
+ MCI_GPM_COEX_W_BT_FLAGS = 6,
+ MCI_GPM_COEX_B_BT_FLAGS_OP = 10
+};
+
+enum mci_gpm_subtype {
+ MCI_GPM_BT_CAL_REQ = 0,
+ MCI_GPM_BT_CAL_GRANT = 1,
+ MCI_GPM_BT_CAL_DONE = 2,
+ MCI_GPM_WLAN_CAL_REQ = 3,
+ MCI_GPM_WLAN_CAL_GRANT = 4,
+ MCI_GPM_WLAN_CAL_DONE = 5,
+ MCI_GPM_COEX_AGENT = 0x0c,
+ MCI_GPM_RSVD_PATTERN = 0xfe,
+ MCI_GPM_RSVD_PATTERN32 = 0xfefefefe,
+ MCI_GPM_BT_DEBUG = 0xff
+};
+
+enum mci_bt_state {
+ MCI_BT_SLEEP,
+ MCI_BT_AWAKE,
+ MCI_BT_CAL_START,
+ MCI_BT_CAL
+};
+
+/* Type of state query */
+enum mci_state_type {
+ MCI_STATE_ENABLE,
+ MCI_STATE_INIT_GPM_OFFSET,
+ MCI_STATE_NEXT_GPM_OFFSET,
+ MCI_STATE_LAST_GPM_OFFSET,
+ MCI_STATE_BT,
+ MCI_STATE_SET_BT_SLEEP,
+ MCI_STATE_SET_BT_AWAKE,
+ MCI_STATE_SET_BT_CAL_START,
+ MCI_STATE_SET_BT_CAL,
+ MCI_STATE_LAST_SCHD_MSG_OFFSET,
+ MCI_STATE_REMOTE_SLEEP,
+ MCI_STATE_CONT_RSSI_POWER,
+ MCI_STATE_CONT_PRIORITY,
+ MCI_STATE_CONT_TXRX,
+ MCI_STATE_RESET_REQ_WAKE,
+ MCI_STATE_SEND_WLAN_COEX_VERSION,
+ MCI_STATE_SET_BT_COEX_VERSION,
+ MCI_STATE_SEND_WLAN_CHANNELS,
+ MCI_STATE_SEND_VERSION_QUERY,
+ MCI_STATE_SEND_STATUS_QUERY,
+ MCI_STATE_NEED_FLUSH_BT_INFO,
+ MCI_STATE_SET_CONCUR_TX_PRI,
+ MCI_STATE_RECOVER_RX,
+ MCI_STATE_NEED_FTP_STOMP,
+ MCI_STATE_NEED_TUNING,
+ MCI_STATE_DEBUG,
+ MCI_STATE_MAX
+};
+
+enum mci_gpm_coex_opcode {
+ MCI_GPM_COEX_VERSION_QUERY,
+ MCI_GPM_COEX_VERSION_RESPONSE,
+ MCI_GPM_COEX_STATUS_QUERY,
+ MCI_GPM_COEX_HALT_BT_GPM,
+ MCI_GPM_COEX_WLAN_CHANNELS,
+ MCI_GPM_COEX_BT_PROFILE_INFO,
+ MCI_GPM_COEX_BT_STATUS_UPDATE,
+ MCI_GPM_COEX_BT_UPDATE_FLAGS
+};
+
+#define MCI_GPM_NOMORE 0
+#define MCI_GPM_MORE 1
+#define MCI_GPM_INVALID 0xffffffff
+
+#define MCI_GPM_RECYCLE(_p_gpm) do { \
+ *(((u32 *)_p_gpm) + MCI_GPM_COEX_W_GPM_PAYLOAD) = \
+ MCI_GPM_RSVD_PATTERN32; \
+} while (0)
+
+#define MCI_GPM_TYPE(_p_gpm) \
+ (*(((u8 *)(_p_gpm)) + MCI_GPM_COEX_B_GPM_TYPE) & 0xff)
+
+#define MCI_GPM_OPCODE(_p_gpm) \
+ (*(((u8 *)(_p_gpm)) + MCI_GPM_COEX_B_GPM_OPCODE) & 0xff)
+
+#define MCI_GPM_SET_CAL_TYPE(_p_gpm, _cal_type) do { \
+ *(((u8 *)(_p_gpm)) + MCI_GPM_COEX_B_GPM_TYPE) = (_cal_type) & 0xff;\
+} while (0)
+
+#define MCI_GPM_SET_TYPE_OPCODE(_p_gpm, _type, _opcode) do { \
+ *(((u8 *)(_p_gpm)) + MCI_GPM_COEX_B_GPM_TYPE) = (_type) & 0xff; \
+ *(((u8 *)(_p_gpm)) + MCI_GPM_COEX_B_GPM_OPCODE) = (_opcode) & 0xff;\
+} while (0)
+
+#define MCI_GPM_IS_CAL_TYPE(_type) ((_type) <= MCI_GPM_WLAN_CAL_DONE)
+
struct ath9k_beacon_state {
u32 bs_nexttbtt;
u32 bs_nextdtim;
@@ -954,7 +1110,6 @@ bool ath9k_hw_disable(struct ath_hw *ah);
void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit, bool test);
void ath9k_hw_setopmode(struct ath_hw *ah);
void ath9k_hw_setmcastfilter(struct ath_hw *ah, u32 filter0, u32 filter1);
-void ath9k_hw_setbssidmask(struct ath_hw *ah);
void ath9k_hw_write_associd(struct ath_hw *ah);
u32 ath9k_hw_gettsf32(struct ath_hw *ah);
u64 ath9k_hw_gettsf64(struct ath_hw *ah);
@@ -1047,6 +1202,32 @@ void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning);
void ath9k_hw_proc_mib_event(struct ath_hw *ah);
void ath9k_hw_ani_monitor(struct ath_hw *ah, struct ath9k_channel *chan);
+bool ar9003_mci_send_message(struct ath_hw *ah, u8 header, u32 flag,
+ u32 *payload, u8 len, bool wait_done,
+ bool check_bt);
+void ar9003_mci_mute_bt(struct ath_hw *ah);
+u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type, u32 *p_data);
+void ar9003_mci_setup(struct ath_hw *ah, u32 gpm_addr, void *gpm_buf,
+ u16 len, u32 sched_addr);
+void ar9003_mci_cleanup(struct ath_hw *ah);
+void ar9003_mci_send_coex_halt_bt_gpm(struct ath_hw *ah, bool halt,
+ bool wait_done);
+u32 ar9003_mci_wait_for_gpm(struct ath_hw *ah, u8 gpm_type,
+ u8 gpm_opcode, int time_out);
+void ar9003_mci_2g5g_changed(struct ath_hw *ah, bool is_2g);
+void ar9003_mci_disable_interrupt(struct ath_hw *ah);
+void ar9003_mci_enable_interrupt(struct ath_hw *ah);
+void ar9003_mci_2g5g_switch(struct ath_hw *ah, bool wait_done);
+void ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
+ bool is_full_sleep);
+bool ar9003_mci_check_int(struct ath_hw *ah, u32 ints);
+void ar9003_mci_remote_reset(struct ath_hw *ah, bool wait_done);
+void ar9003_mci_send_sys_waking(struct ath_hw *ah, bool wait_done);
+void ar9003_mci_send_lna_transfer(struct ath_hw *ah, bool wait_done);
+void ar9003_mci_sync_bt_state(struct ath_hw *ah);
+void ar9003_mci_get_interrupt(struct ath_hw *ah, u32 *raw_intr,
+ u32 *rx_msg_intr);
+
#define ATH9K_CLOCK_RATE_CCK 22
#define ATH9K_CLOCK_RATE_5GHZ_OFDM 40
#define ATH9K_CLOCK_RATE_2GHZ_OFDM 44
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 5cb0599b01c..c5df98139c4 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -17,6 +17,7 @@
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/ath9k_platform.h>
+#include <linux/module.h>
#include "ath9k.h"
@@ -257,6 +258,8 @@ static void setup_ht_cap(struct ath_softc *sc,
if (AR_SREV_9330(ah) || AR_SREV_9485(ah))
max_streams = 1;
+ else if (AR_SREV_9462(ah))
+ max_streams = 2;
else if (AR_SREV_9300_20_OR_LATER(ah))
max_streams = 3;
else
@@ -294,9 +297,22 @@ static int ath9k_reg_notifier(struct wiphy *wiphy,
{
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
struct ath_softc *sc = hw->priv;
- struct ath_regulatory *reg = ath9k_hw_regulatory(sc->sc_ah);
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_regulatory *reg = ath9k_hw_regulatory(ah);
+ int ret;
+
+ ret = ath_reg_notifier_apply(wiphy, request, reg);
+
+ /* Set tx power */
+ if (ah->curchan) {
+ sc->config.txpowlimit = 2 * ah->curchan->chan->max_power;
+ ath9k_ps_wakeup(sc);
+ ath9k_hw_set_txpowerlimit(ah, sc->config.txpowlimit, false);
+ sc->curtxpow = ath9k_hw_regulatory(ah)->power_limit;
+ ath9k_ps_restore(sc);
+ }
- return ath_reg_notifier_apply(wiphy, request, reg);
+ return ret;
}
/*
@@ -407,6 +423,7 @@ fail:
static int ath9k_init_btcoex(struct ath_softc *sc)
{
struct ath_txq *txq;
+ struct ath_hw *ah = sc->sc_ah;
int r;
switch (sc->sc_ah->btcoex_hw.scheme) {
@@ -423,8 +440,37 @@ static int ath9k_init_btcoex(struct ath_softc *sc)
txq = sc->tx.txq_map[WME_AC_BE];
ath9k_hw_init_btcoex_hw(sc->sc_ah, txq->axq_qnum);
sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
+ break;
+ case ATH_BTCOEX_CFG_MCI:
+ sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
sc->btcoex.duty_cycle = ATH_BTCOEX_DEF_DUTY_CYCLE;
INIT_LIST_HEAD(&sc->btcoex.mci.info);
+
+ r = ath_mci_setup(sc);
+ if (r)
+ return r;
+
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_MCI) {
+ ah->btcoex_hw.mci.ready = false;
+ ah->btcoex_hw.mci.bt_state = 0;
+ ah->btcoex_hw.mci.bt_ver_major = 3;
+ ah->btcoex_hw.mci.bt_ver_minor = 0;
+ ah->btcoex_hw.mci.bt_version_known = false;
+ ah->btcoex_hw.mci.update_2g5g = true;
+ ah->btcoex_hw.mci.is_2g = true;
+ ah->btcoex_hw.mci.wlan_channels_update = false;
+ ah->btcoex_hw.mci.wlan_channels[0] = 0x00000000;
+ ah->btcoex_hw.mci.wlan_channels[1] = 0xffffffff;
+ ah->btcoex_hw.mci.wlan_channels[2] = 0xffffffff;
+ ah->btcoex_hw.mci.wlan_channels[3] = 0x7fffffff;
+ ah->btcoex_hw.mci.query_bt = true;
+ ah->btcoex_hw.mci.unhalt_bt_gpm = true;
+ ah->btcoex_hw.mci.halted_bt_gpm = false;
+ ah->btcoex_hw.mci.need_flush_btinfo = false;
+ ah->btcoex_hw.mci.wlan_cal_seq = 0;
+ ah->btcoex_hw.mci.wlan_cal_done = 0;
+ ah->btcoex_hw.mci.config = 0x2201;
+ }
break;
default:
WARN_ON(1);
@@ -838,6 +884,9 @@ static void ath9k_deinit_softc(struct ath_softc *sc)
sc->sc_ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
ath_gen_timer_free(sc->sc_ah, sc->btcoex.no_stomp_timer);
+ if (sc->sc_ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_MCI)
+ ath_mci_cleanup(sc);
+
for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
if (ATH_TXQ_SETUP(sc, i))
ath_tx_cleanupq(sc, &sc->tx.txq[i]);
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index 6a8fdf33a52..0e4fbb3bea3 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -16,6 +16,7 @@
#include "hw.h"
#include "hw-ops.h"
+#include <linux/export.h>
static void ath9k_hw_set_txq_interrupts(struct ath_hw *ah,
struct ath9k_tx_queue_info *qi)
@@ -759,7 +760,10 @@ bool ath9k_hw_intrpend(struct ath_hw *ah)
return true;
host_isr = REG_READ(ah, AR_INTR_ASYNC_CAUSE);
- if ((host_isr & AR_INTR_MAC_IRQ) && (host_isr != AR_INTR_SPURIOUS))
+
+ if (((host_isr & AR_INTR_MAC_IRQ) ||
+ (host_isr & AR_INTR_ASYNC_MASK_MCI)) &&
+ (host_isr != AR_INTR_SPURIOUS))
return true;
host_isr = REG_READ(ah, AR_INTR_SYNC_CAUSE);
@@ -797,6 +801,7 @@ void ath9k_hw_enable_interrupts(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
u32 sync_default = AR_INTR_SYNC_DEFAULT;
+ u32 async_mask;
if (!(ah->imask & ATH9K_INT_GLOBAL))
return;
@@ -811,13 +816,16 @@ void ath9k_hw_enable_interrupts(struct ath_hw *ah)
if (AR_SREV_9340(ah))
sync_default &= ~AR_INTR_SYNC_HOST1_FATAL;
+ async_mask = AR_INTR_MAC_IRQ;
+
+ if (ah->imask & ATH9K_INT_MCI)
+ async_mask |= AR_INTR_ASYNC_MASK_MCI;
+
ath_dbg(common, ATH_DBG_INTERRUPT, "enable IER\n");
REG_WRITE(ah, AR_IER, AR_IER_ENABLE);
if (!AR_SREV_9100(ah)) {
- REG_WRITE(ah, AR_INTR_ASYNC_ENABLE,
- AR_INTR_MAC_IRQ);
- REG_WRITE(ah, AR_INTR_ASYNC_MASK, AR_INTR_MAC_IRQ);
-
+ REG_WRITE(ah, AR_INTR_ASYNC_ENABLE, async_mask);
+ REG_WRITE(ah, AR_INTR_ASYNC_MASK, async_mask);
REG_WRITE(ah, AR_INTR_SYNC_ENABLE, sync_default);
REG_WRITE(ah, AR_INTR_SYNC_MASK, sync_default);
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index e43c41cff25..7fbc4bdd4ef 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -118,7 +118,7 @@ void ath9k_ps_restore(struct ath_softc *sc)
if (--sc->ps_usecount != 0)
goto unlock;
- if (sc->ps_idle)
+ if (sc->ps_idle && (sc->ps_flags & PS_WAIT_FOR_TX_ACK))
mode = ATH9K_PM_FULL_SLEEP;
else if (sc->ps_enabled &&
!(sc->ps_flags & (PS_WAIT_FOR_BEACON |
@@ -286,7 +286,7 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start)
ath_start_ani(common);
}
- if (ath9k_hw_ops(ah)->antdiv_comb_conf_get && sc->ant_rx != 3) {
+ if ((ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) && sc->ant_rx != 3) {
struct ath_hw_antcomb_conf div_ant_conf;
u8 lna_conf;
@@ -332,7 +332,8 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan,
hchan = ah->curchan;
}
- if (fastcc && !ath9k_hw_check_alive(ah))
+ if (fastcc && (ah->chip_fullsleep ||
+ !ath9k_hw_check_alive(ah)))
fastcc = false;
if (!ath_prepare_reset(sc, retry_tx, flush))
@@ -561,7 +562,6 @@ void ath_ani_calibrate(unsigned long data)
/* Long calibration runs independently of short calibration. */
if ((timestamp - common->ani.longcal_timer) >= long_cal_interval) {
longcal = true;
- ath_dbg(common, ATH_DBG_ANI, "longcal @%lu\n", jiffies);
common->ani.longcal_timer = timestamp;
}
@@ -569,8 +569,6 @@ void ath_ani_calibrate(unsigned long data)
if (!common->ani.caldone) {
if ((timestamp - common->ani.shortcal_timer) >= short_cal_interval) {
shortcal = true;
- ath_dbg(common, ATH_DBG_ANI,
- "shortcal @%lu\n", jiffies);
common->ani.shortcal_timer = timestamp;
common->ani.resetcal_timer = timestamp;
}
@@ -584,8 +582,9 @@ void ath_ani_calibrate(unsigned long data)
}
/* Verify whether we must check ANI */
- if ((timestamp - common->ani.checkani_timer) >=
- ah->config.ani_poll_interval) {
+ if (sc->sc_ah->config.enable_ani
+ && (timestamp - common->ani.checkani_timer) >=
+ ah->config.ani_poll_interval) {
aniflag = true;
common->ani.checkani_timer = timestamp;
}
@@ -605,6 +604,11 @@ void ath_ani_calibrate(unsigned long data)
ah->rxchainmask, longcal);
}
+ ath_dbg(common, ATH_DBG_ANI,
+ "Calibration @%lu finished: %s %s %s, caldone: %s\n", jiffies,
+ longcal ? "long" : "", shortcal ? "short" : "",
+ aniflag ? "ani" : "", common->ani.caldone ? "true" : "false");
+
ath9k_ps_restore(sc);
set_timer:
@@ -640,9 +644,9 @@ static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta,
spin_lock(&sc->nodes_lock);
list_add(&an->list, &sc->nodes);
spin_unlock(&sc->nodes_lock);
+#endif
an->sta = sta;
an->vif = vif;
-#endif
if (sc->sc_flags & SC_OP_TXAGGR) {
ath_tx_node_init(sc, an);
an->maxampdu = 1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
@@ -742,6 +746,9 @@ void ath9k_tasklet(unsigned long data)
if (status & ATH9K_INT_GENTIMER)
ath_gen_timer_isr(sc->sc_ah);
+ if (status & ATH9K_INT_MCI)
+ ath_mci_intr(sc);
+
out:
/* re-enable hardware interrupt */
ath9k_hw_enable_interrupts(ah);
@@ -764,7 +771,8 @@ irqreturn_t ath_isr(int irq, void *dev)
ATH9K_INT_BMISS | \
ATH9K_INT_CST | \
ATH9K_INT_TSFOOR | \
- ATH9K_INT_GENTIMER)
+ ATH9K_INT_GENTIMER | \
+ ATH9K_INT_MCI)
struct ath_softc *sc = dev;
struct ath_hw *ah = sc->sc_ah;
@@ -882,82 +890,6 @@ chip_reset:
#undef SCHED_INTR
}
-static void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw)
-{
- struct ath_hw *ah = sc->sc_ah;
- struct ath_common *common = ath9k_hw_common(ah);
- struct ieee80211_channel *channel = hw->conf.channel;
- int r;
-
- ath9k_ps_wakeup(sc);
- spin_lock_bh(&sc->sc_pcu_lock);
- atomic_set(&ah->intr_ref_cnt, -1);
-
- ath9k_hw_configpcipowersave(ah, false);
-
- if (!ah->curchan)
- ah->curchan = ath9k_cmn_get_curchannel(sc->hw, ah);
-
- r = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
- if (r) {
- ath_err(common,
- "Unable to reset channel (%u MHz), reset status %d\n",
- channel->center_freq, r);
- }
-
- ath_complete_reset(sc, true);
-
- /* Enable LED */
- ath9k_hw_cfg_output(ah, ah->led_pin,
- AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
- ath9k_hw_set_gpio(ah, ah->led_pin, 0);
-
- spin_unlock_bh(&sc->sc_pcu_lock);
-
- ath9k_ps_restore(sc);
-}
-
-void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw)
-{
- struct ath_hw *ah = sc->sc_ah;
- struct ieee80211_channel *channel = hw->conf.channel;
- int r;
-
- ath9k_ps_wakeup(sc);
-
- ath_cancel_work(sc);
-
- spin_lock_bh(&sc->sc_pcu_lock);
-
- /*
- * Keep the LED on when the radio is disabled
- * during idle unassociated state.
- */
- if (!sc->ps_idle) {
- ath9k_hw_set_gpio(ah, ah->led_pin, 1);
- ath9k_hw_cfg_gpio_input(ah, ah->led_pin);
- }
-
- ath_prepare_reset(sc, false, true);
-
- if (!ah->curchan)
- ah->curchan = ath9k_cmn_get_curchannel(hw, ah);
-
- r = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
- if (r) {
- ath_err(ath9k_hw_common(sc->sc_ah),
- "Unable to reset channel (%u MHz), reset status %d\n",
- channel->center_freq, r);
- }
-
- ath9k_hw_phy_disable(ah);
-
- ath9k_hw_configpcipowersave(ah, true);
-
- spin_unlock_bh(&sc->sc_pcu_lock);
- ath9k_ps_restore(sc);
-}
-
static int ath_reset(struct ath_softc *sc, bool retry_tx)
{
int r;
@@ -1093,6 +1025,9 @@ static int ath9k_start(struct ieee80211_hw *hw)
* and then setup of the interrupt mask.
*/
spin_lock_bh(&sc->sc_pcu_lock);
+
+ atomic_set(&ah->intr_ref_cnt, -1);
+
r = ath9k_hw_reset(ah, init_channel, ah->caldata, false);
if (r) {
ath_err(common,
@@ -1119,6 +1054,9 @@ static int ath9k_start(struct ieee80211_hw *hw)
if (ah->caps.hw_caps & ATH9K_HW_CAP_HT)
ah->imask |= ATH9K_INT_CST;
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI)
+ ah->imask |= ATH9K_INT_MCI;
+
sc->sc_flags &= ~SC_OP_INVALID;
sc->sc_ah->is_monitoring = false;
@@ -1131,6 +1069,18 @@ static int ath9k_start(struct ieee80211_hw *hw)
goto mutex_unlock;
}
+ if (ah->led_pin >= 0) {
+ ath9k_hw_cfg_output(ah, ah->led_pin,
+ AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
+ ath9k_hw_set_gpio(ah, ah->led_pin, 0);
+ }
+
+ /*
+ * Reset key cache to sane defaults (all entries cleared) instead of
+ * semi-random values after suspend/resume.
+ */
+ ath9k_cmn_init_crypto(sc->sc_ah);
+
spin_unlock_bh(&sc->sc_pcu_lock);
if ((ah->btcoex_hw.scheme != ATH_BTCOEX_CFG_NONE) &&
@@ -1176,6 +1126,13 @@ static void ath9k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
}
}
+ /*
+ * Cannot tx while the hardware is in full sleep, it first needs a full
+ * chip reset to recover from that
+ */
+ if (unlikely(sc->sc_ah->power_mode == ATH9K_PM_FULL_SLEEP))
+ goto exit;
+
if (unlikely(sc->sc_ah->power_mode != ATH9K_PM_AWAKE)) {
/*
* We are using PS-Poll and mac80211 can request TX while in
@@ -1222,6 +1179,7 @@ static void ath9k_stop(struct ieee80211_hw *hw)
struct ath_softc *sc = hw->priv;
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
+ bool prev_idle;
mutex_lock(&sc->mutex);
@@ -1252,35 +1210,45 @@ static void ath9k_stop(struct ieee80211_hw *hw)
* before setting the invalid flag. */
ath9k_hw_disable_interrupts(ah);
- if (!(sc->sc_flags & SC_OP_INVALID)) {
- ath_drain_all_txq(sc, false);
- ath_stoprecv(sc);
- ath9k_hw_phy_disable(ah);
- } else
- sc->rx.rxlink = NULL;
+ spin_unlock_bh(&sc->sc_pcu_lock);
+
+ /* we can now sync irq and kill any running tasklets, since we already
+ * disabled interrupts and not holding a spin lock */
+ synchronize_irq(sc->irq);
+ tasklet_kill(&sc->intr_tq);
+ tasklet_kill(&sc->bcon_tasklet);
+
+ prev_idle = sc->ps_idle;
+ sc->ps_idle = true;
+
+ spin_lock_bh(&sc->sc_pcu_lock);
+
+ if (ah->led_pin >= 0) {
+ ath9k_hw_set_gpio(ah, ah->led_pin, 1);
+ ath9k_hw_cfg_gpio_input(ah, ah->led_pin);
+ }
+
+ ath_prepare_reset(sc, false, true);
if (sc->rx.frag) {
dev_kfree_skb_any(sc->rx.frag);
sc->rx.frag = NULL;
}
- /* disable HAL and put h/w to sleep */
- ath9k_hw_disable(ah);
+ if (!ah->curchan)
+ ah->curchan = ath9k_cmn_get_curchannel(hw, ah);
+
+ ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
+ ath9k_hw_phy_disable(ah);
- spin_unlock_bh(&sc->sc_pcu_lock);
+ ath9k_hw_configpcipowersave(ah, true);
- /* we can now sync irq and kill any running tasklets, since we already
- * disabled interrupts and not holding a spin lock */
- synchronize_irq(sc->irq);
- tasklet_kill(&sc->intr_tq);
- tasklet_kill(&sc->bcon_tasklet);
+ spin_unlock_bh(&sc->sc_pcu_lock);
ath9k_ps_restore(sc);
- sc->ps_idle = true;
- ath_radio_disable(sc, hw);
-
sc->sc_flags |= SC_OP_INVALID;
+ sc->ps_idle = prev_idle;
mutex_unlock(&sc->mutex);
@@ -1620,8 +1588,8 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
struct ieee80211_conf *conf = &hw->conf;
- bool disable_radio = false;
+ ath9k_ps_wakeup(sc);
mutex_lock(&sc->mutex);
/*
@@ -1632,13 +1600,8 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
*/
if (changed & IEEE80211_CONF_CHANGE_IDLE) {
sc->ps_idle = !!(conf->flags & IEEE80211_CONF_IDLE);
- if (!sc->ps_idle) {
- ath_radio_enable(sc, hw);
- ath_dbg(common, ATH_DBG_CONFIG,
- "not-idle: enabling radio\n");
- } else {
- disable_radio = true;
- }
+ if (sc->ps_idle)
+ ath_cancel_work(sc);
}
/*
@@ -1745,18 +1708,12 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
ath_dbg(common, ATH_DBG_CONFIG,
"Set power: %d\n", conf->power_level);
sc->config.txpowlimit = 2 * conf->power_level;
- ath9k_ps_wakeup(sc);
ath9k_cmn_update_txpow(ah, sc->curtxpow,
sc->config.txpowlimit, &sc->curtxpow);
- ath9k_ps_restore(sc);
- }
-
- if (disable_radio) {
- ath_dbg(common, ATH_DBG_CONFIG, "idle: disabling radio\n");
- ath_radio_disable(sc, hw);
}
mutex_unlock(&sc->mutex);
+ ath9k_ps_restore(sc);
return 0;
}
@@ -1916,7 +1873,8 @@ static int ath9k_set_key(struct ieee80211_hw *hw,
if (ath9k_modparam_nohwcrypt)
return -ENOSPC;
- if (vif->type == NL80211_IFTYPE_ADHOC &&
+ if ((vif->type == NL80211_IFTYPE_ADHOC ||
+ vif->type == NL80211_IFTYPE_MESH_POINT) &&
(key->cipher == WLAN_CIPHER_SUITE_TKIP ||
key->cipher == WLAN_CIPHER_SUITE_CCMP) &&
!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
@@ -2324,9 +2282,6 @@ static void ath9k_flush(struct ieee80211_hw *hw, bool drop)
return;
}
- if (drop)
- timeout = 1;
-
for (j = 0; j < timeout; j++) {
bool npend = false;
@@ -2344,21 +2299,22 @@ static void ath9k_flush(struct ieee80211_hw *hw, bool drop)
}
if (!npend)
- goto out;
+ break;
}
- ath9k_ps_wakeup(sc);
- spin_lock_bh(&sc->sc_pcu_lock);
- drain_txq = ath_drain_all_txq(sc, false);
- spin_unlock_bh(&sc->sc_pcu_lock);
+ if (drop) {
+ ath9k_ps_wakeup(sc);
+ spin_lock_bh(&sc->sc_pcu_lock);
+ drain_txq = ath_drain_all_txq(sc, false);
+ spin_unlock_bh(&sc->sc_pcu_lock);
- if (!drain_txq)
- ath_reset(sc, false);
+ if (!drain_txq)
+ ath_reset(sc, false);
- ath9k_ps_restore(sc);
- ieee80211_wake_queues(hw);
+ ath9k_ps_restore(sc);
+ ieee80211_wake_queues(hw);
+ }
-out:
ieee80211_queue_delayed_work(hw, &sc->tx_complete_work, 0);
mutex_unlock(&sc->mutex);
}
diff --git a/drivers/net/wireless/ath/ath9k/mci.c b/drivers/net/wireless/ath/ath9k/mci.c
index 0fbb141bc30..691bf47906e 100644
--- a/drivers/net/wireless/ath/ath9k/mci.c
+++ b/drivers/net/wireless/ath/ath9k/mci.c
@@ -14,6 +14,9 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+
#include "ath9k.h"
#include "mci.h"
@@ -181,8 +184,58 @@ static void ath_mci_update_scheme(struct ath_softc *sc)
ath9k_btcoex_timer_resume(sc);
}
-void ath_mci_process_profile(struct ath_softc *sc,
- struct ath_mci_profile_info *info)
+
+static void ath_mci_cal_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ u32 payload[4] = {0, 0, 0, 0};
+
+ switch (opcode) {
+ case MCI_GPM_BT_CAL_REQ:
+
+ ath_dbg(common, ATH_DBG_MCI, "MCI received BT_CAL_REQ\n");
+
+ if (ar9003_mci_state(ah, MCI_STATE_BT, NULL) == MCI_BT_AWAKE) {
+ ar9003_mci_state(ah, MCI_STATE_SET_BT_CAL_START, NULL);
+ ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
+ } else
+ ath_dbg(common, ATH_DBG_MCI,
+ "MCI State mismatches: %d\n",
+ ar9003_mci_state(ah, MCI_STATE_BT, NULL));
+
+ break;
+
+ case MCI_GPM_BT_CAL_DONE:
+
+ ath_dbg(common, ATH_DBG_MCI, "MCI received BT_CAL_DONE\n");
+
+ if (ar9003_mci_state(ah, MCI_STATE_BT, NULL) == MCI_BT_CAL)
+ ath_dbg(common, ATH_DBG_MCI, "MCI error illegal!\n");
+ else
+ ath_dbg(common, ATH_DBG_MCI, "MCI BT not in CAL state\n");
+
+ break;
+
+ case MCI_GPM_BT_CAL_GRANT:
+
+ ath_dbg(common, ATH_DBG_MCI, "MCI received BT_CAL_GRANT\n");
+
+ /* Send WLAN_CAL_DONE for now */
+ ath_dbg(common, ATH_DBG_MCI, "MCI send WLAN_CAL_DONE\n");
+ MCI_GPM_SET_CAL_TYPE(payload, MCI_GPM_WLAN_CAL_DONE);
+ ar9003_mci_send_message(sc->sc_ah, MCI_GPM, 0, payload,
+ 16, false, true);
+ break;
+
+ default:
+ ath_dbg(common, ATH_DBG_MCI, "MCI Unknown GPM CAL message\n");
+ break;
+ }
+}
+
+static void ath_mci_process_profile(struct ath_softc *sc,
+ struct ath_mci_profile_info *info)
{
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_btcoex *btcoex = &sc->btcoex;
@@ -208,8 +261,8 @@ void ath_mci_process_profile(struct ath_softc *sc,
ath_mci_update_scheme(sc);
}
-void ath_mci_process_status(struct ath_softc *sc,
- struct ath_mci_profile_status *status)
+static void ath_mci_process_status(struct ath_softc *sc,
+ struct ath_mci_profile_status *status)
{
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_btcoex *btcoex = &sc->btcoex;
@@ -252,3 +305,369 @@ void ath_mci_process_status(struct ath_softc *sc,
if (old_num_mgmt != mci->num_mgmt)
ath_mci_update_scheme(sc);
}
+
+static void ath_mci_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_mci_profile_info profile_info;
+ struct ath_mci_profile_status profile_status;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ u32 version;
+ u8 major;
+ u8 minor;
+ u32 seq_num;
+
+ switch (opcode) {
+
+ case MCI_GPM_COEX_VERSION_QUERY:
+ ath_dbg(common, ATH_DBG_MCI,
+ "MCI Recv GPM COEX Version Query.\n");
+ version = ar9003_mci_state(ah,
+ MCI_STATE_SEND_WLAN_COEX_VERSION, NULL);
+ break;
+
+ case MCI_GPM_COEX_VERSION_RESPONSE:
+ ath_dbg(common, ATH_DBG_MCI,
+ "MCI Recv GPM COEX Version Response.\n");
+ major = *(rx_payload + MCI_GPM_COEX_B_MAJOR_VERSION);
+ minor = *(rx_payload + MCI_GPM_COEX_B_MINOR_VERSION);
+ ath_dbg(common, ATH_DBG_MCI,
+ "MCI BT Coex version: %d.%d\n", major, minor);
+ version = (major << 8) + minor;
+ version = ar9003_mci_state(ah,
+ MCI_STATE_SET_BT_COEX_VERSION, &version);
+ break;
+
+ case MCI_GPM_COEX_STATUS_QUERY:
+ ath_dbg(common, ATH_DBG_MCI,
+ "MCI Recv GPM COEX Status Query = 0x%02x.\n",
+ *(rx_payload + MCI_GPM_COEX_B_WLAN_BITMAP));
+ ar9003_mci_state(ah,
+ MCI_STATE_SEND_WLAN_CHANNELS, NULL);
+ break;
+
+ case MCI_GPM_COEX_BT_PROFILE_INFO:
+ ath_dbg(common, ATH_DBG_MCI,
+ "MCI Recv GPM Coex BT profile info\n");
+ memcpy(&profile_info,
+ (rx_payload + MCI_GPM_COEX_B_PROFILE_TYPE), 10);
+
+ if ((profile_info.type == MCI_GPM_COEX_PROFILE_UNKNOWN)
+ || (profile_info.type >=
+ MCI_GPM_COEX_PROFILE_MAX)) {
+
+ ath_dbg(common, ATH_DBG_MCI,
+ "illegal profile type = %d,"
+ "state = %d\n", profile_info.type,
+ profile_info.start);
+ break;
+ }
+
+ ath_mci_process_profile(sc, &profile_info);
+ break;
+
+ case MCI_GPM_COEX_BT_STATUS_UPDATE:
+ profile_status.is_link = *(rx_payload +
+ MCI_GPM_COEX_B_STATUS_TYPE);
+ profile_status.conn_handle = *(rx_payload +
+ MCI_GPM_COEX_B_STATUS_LINKID);
+ profile_status.is_critical = *(rx_payload +
+ MCI_GPM_COEX_B_STATUS_STATE);
+
+ seq_num = *((u32 *)(rx_payload + 12));
+ ath_dbg(common, ATH_DBG_MCI,
+ "MCI Recv GPM COEX BT_Status_Update: "
+ "is_link=%d, linkId=%d, state=%d, SEQ=%d\n",
+ profile_status.is_link, profile_status.conn_handle,
+ profile_status.is_critical, seq_num);
+
+ ath_mci_process_status(sc, &profile_status);
+ break;
+
+ default:
+ ath_dbg(common, ATH_DBG_MCI,
+ "MCI Unknown GPM COEX message = 0x%02x\n", opcode);
+ break;
+ }
+}
+
+static int ath_mci_buf_alloc(struct ath_softc *sc, struct ath_mci_buf *buf)
+{
+ int error = 0;
+
+ buf->bf_addr = dma_alloc_coherent(sc->dev, buf->bf_len,
+ &buf->bf_paddr, GFP_KERNEL);
+
+ if (buf->bf_addr == NULL) {
+ error = -ENOMEM;
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ memset(buf, 0, sizeof(*buf));
+ return error;
+}
+
+static void ath_mci_buf_free(struct ath_softc *sc, struct ath_mci_buf *buf)
+{
+ if (buf->bf_addr) {
+ dma_free_coherent(sc->dev, buf->bf_len, buf->bf_addr,
+ buf->bf_paddr);
+ memset(buf, 0, sizeof(*buf));
+ }
+}
+
+int ath_mci_setup(struct ath_softc *sc)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_mci_coex *mci = &sc->mci_coex;
+ int error = 0;
+
+ mci->sched_buf.bf_len = ATH_MCI_SCHED_BUF_SIZE + ATH_MCI_GPM_BUF_SIZE;
+
+ if (ath_mci_buf_alloc(sc, &mci->sched_buf)) {
+ ath_dbg(common, ATH_DBG_FATAL, "MCI buffer alloc failed\n");
+ error = -ENOMEM;
+ goto fail;
+ }
+
+ mci->sched_buf.bf_len = ATH_MCI_SCHED_BUF_SIZE;
+
+ memset(mci->sched_buf.bf_addr, MCI_GPM_RSVD_PATTERN,
+ mci->sched_buf.bf_len);
+
+ mci->gpm_buf.bf_len = ATH_MCI_GPM_BUF_SIZE;
+ mci->gpm_buf.bf_addr = (u8 *)mci->sched_buf.bf_addr +
+ mci->sched_buf.bf_len;
+ mci->gpm_buf.bf_paddr = mci->sched_buf.bf_paddr + mci->sched_buf.bf_len;
+
+ /* initialize the buffer */
+ memset(mci->gpm_buf.bf_addr, MCI_GPM_RSVD_PATTERN, mci->gpm_buf.bf_len);
+
+ ar9003_mci_setup(sc->sc_ah, mci->gpm_buf.bf_paddr,
+ mci->gpm_buf.bf_addr, (mci->gpm_buf.bf_len >> 4),
+ mci->sched_buf.bf_paddr);
+fail:
+ return error;
+}
+
+void ath_mci_cleanup(struct ath_softc *sc)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_mci_coex *mci = &sc->mci_coex;
+
+ /*
+ * both schedule and gpm buffers will be released
+ */
+ ath_mci_buf_free(sc, &mci->sched_buf);
+ ar9003_mci_cleanup(ah);
+}
+
+void ath_mci_intr(struct ath_softc *sc)
+{
+ struct ath_mci_coex *mci = &sc->mci_coex;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ u32 mci_int, mci_int_rxmsg;
+ u32 offset, subtype, opcode;
+ u32 *pgpm;
+ u32 more_data = MCI_GPM_MORE;
+ bool skip_gpm = false;
+
+ ar9003_mci_get_interrupt(sc->sc_ah, &mci_int, &mci_int_rxmsg);
+
+ if (ar9003_mci_state(ah, MCI_STATE_ENABLE, NULL) == 0) {
+
+ ar9003_mci_state(sc->sc_ah, MCI_STATE_INIT_GPM_OFFSET, NULL);
+ ath_dbg(common, ATH_DBG_MCI,
+ "MCI interrupt but MCI disabled\n");
+
+ ath_dbg(common, ATH_DBG_MCI,
+ "MCI interrupt: intr = 0x%x, intr_rxmsg = 0x%x\n",
+ mci_int, mci_int_rxmsg);
+ return;
+ }
+
+ if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE) {
+ u32 payload[4] = { 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffff00};
+
+ /*
+ * The following REMOTE_RESET and SYS_WAKING used to sent
+ * only when BT wake up. Now they are always sent, as a
+ * recovery method to reset BT MCI's RX alignment.
+ */
+ ath_dbg(common, ATH_DBG_MCI, "MCI interrupt send REMOTE_RESET\n");
+
+ ar9003_mci_send_message(ah, MCI_REMOTE_RESET, 0,
+ payload, 16, true, false);
+ ath_dbg(common, ATH_DBG_MCI, "MCI interrupt send SYS_WAKING\n");
+ ar9003_mci_send_message(ah, MCI_SYS_WAKING, 0,
+ NULL, 0, true, false);
+
+ mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE;
+ ar9003_mci_state(ah, MCI_STATE_RESET_REQ_WAKE, NULL);
+
+ /*
+ * always do this for recovery and 2G/5G toggling and LNA_TRANS
+ */
+ ath_dbg(common, ATH_DBG_MCI, "MCI Set BT state to AWAKE.\n");
+ ar9003_mci_state(ah, MCI_STATE_SET_BT_AWAKE, NULL);
+ }
+
+ /* Processing SYS_WAKING/SYS_SLEEPING */
+ if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING) {
+ mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING;
+
+ if (ar9003_mci_state(ah, MCI_STATE_BT, NULL) == MCI_BT_SLEEP) {
+
+ if (ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP, NULL)
+ == MCI_BT_SLEEP)
+ ath_dbg(common, ATH_DBG_MCI,
+ "MCI BT stays in sleep mode\n");
+ else {
+ ath_dbg(common, ATH_DBG_MCI,
+ "MCI Set BT state to AWAKE.\n");
+ ar9003_mci_state(ah,
+ MCI_STATE_SET_BT_AWAKE, NULL);
+ }
+ } else
+ ath_dbg(common, ATH_DBG_MCI,
+ "MCI BT stays in AWAKE mode.\n");
+ }
+
+ if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING) {
+
+ mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING;
+
+ if (ar9003_mci_state(ah, MCI_STATE_BT, NULL) == MCI_BT_AWAKE) {
+
+ if (ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP, NULL)
+ == MCI_BT_AWAKE)
+ ath_dbg(common, ATH_DBG_MCI,
+ "MCI BT stays in AWAKE mode.\n");
+ else {
+ ath_dbg(common, ATH_DBG_MCI,
+ "MCI SetBT state to SLEEP\n");
+ ar9003_mci_state(ah, MCI_STATE_SET_BT_SLEEP,
+ NULL);
+ }
+ } else
+ ath_dbg(common, ATH_DBG_MCI,
+ "MCI BT stays in SLEEP mode\n");
+ }
+
+ if ((mci_int & AR_MCI_INTERRUPT_RX_INVALID_HDR) ||
+ (mci_int & AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT)) {
+
+ ath_dbg(common, ATH_DBG_MCI, "MCI RX broken, skip GPM msgs\n");
+ ar9003_mci_state(ah, MCI_STATE_RECOVER_RX, NULL);
+ skip_gpm = true;
+ }
+
+ if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO) {
+
+ mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO;
+ offset = ar9003_mci_state(ah, MCI_STATE_LAST_SCHD_MSG_OFFSET,
+ NULL);
+ }
+
+ if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_GPM) {
+
+ mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_GPM;
+
+ while (more_data == MCI_GPM_MORE) {
+
+ pgpm = mci->gpm_buf.bf_addr;
+ offset = ar9003_mci_state(ah,
+ MCI_STATE_NEXT_GPM_OFFSET, &more_data);
+
+ if (offset == MCI_GPM_INVALID)
+ break;
+
+ pgpm += (offset >> 2);
+
+ /*
+ * The first dword is timer.
+ * The real data starts from 2nd dword.
+ */
+
+ subtype = MCI_GPM_TYPE(pgpm);
+ opcode = MCI_GPM_OPCODE(pgpm);
+
+ if (!skip_gpm) {
+
+ if (MCI_GPM_IS_CAL_TYPE(subtype))
+ ath_mci_cal_msg(sc, subtype,
+ (u8 *) pgpm);
+ else {
+ switch (subtype) {
+ case MCI_GPM_COEX_AGENT:
+ ath_mci_msg(sc, opcode,
+ (u8 *) pgpm);
+ break;
+ default:
+ break;
+ }
+ }
+ }
+ MCI_GPM_RECYCLE(pgpm);
+ }
+ }
+
+ if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_HW_MSG_MASK) {
+
+ if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL)
+ mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL;
+
+ if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_LNA_INFO) {
+ mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_LNA_INFO;
+ ath_dbg(common, ATH_DBG_MCI, "MCI LNA_INFO\n");
+ }
+
+ if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_CONT_INFO) {
+
+ int value_dbm = ar9003_mci_state(ah,
+ MCI_STATE_CONT_RSSI_POWER, NULL);
+
+ mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_CONT_INFO;
+
+ if (ar9003_mci_state(ah, MCI_STATE_CONT_TXRX, NULL))
+ ath_dbg(common, ATH_DBG_MCI,
+ "MCI CONT_INFO: "
+ "(tx) pri = %d, pwr = %d dBm\n",
+ ar9003_mci_state(ah,
+ MCI_STATE_CONT_PRIORITY, NULL),
+ value_dbm);
+ else
+ ath_dbg(common, ATH_DBG_MCI,
+ "MCI CONT_INFO:"
+ "(rx) pri = %d,pwr = %d dBm\n",
+ ar9003_mci_state(ah,
+ MCI_STATE_CONT_PRIORITY, NULL),
+ value_dbm);
+ }
+
+ if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_CONT_NACK) {
+ mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_CONT_NACK;
+ ath_dbg(common, ATH_DBG_MCI, "MCI CONT_NACK\n");
+ }
+
+ if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_CONT_RST) {
+ mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_CONT_RST;
+ ath_dbg(common, ATH_DBG_MCI, "MCI CONT_RST\n");
+ }
+ }
+
+ if ((mci_int & AR_MCI_INTERRUPT_RX_INVALID_HDR) ||
+ (mci_int & AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT))
+ mci_int &= ~(AR_MCI_INTERRUPT_RX_INVALID_HDR |
+ AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT);
+
+ if (mci_int_rxmsg & 0xfffffffe)
+ ath_dbg(common, ATH_DBG_MCI,
+ "MCI not processed mci_int_rxmsg = 0x%x\n",
+ mci_int_rxmsg);
+}
diff --git a/drivers/net/wireless/ath/ath9k/mci.h b/drivers/net/wireless/ath/ath9k/mci.h
index 9590c61822d..29e3e51d078 100644
--- a/drivers/net/wireless/ath/ath9k/mci.h
+++ b/drivers/net/wireless/ath/ath9k/mci.h
@@ -17,6 +17,9 @@
#ifndef MCI_H
#define MCI_H
+#define ATH_MCI_SCHED_BUF_SIZE (16 * 16) /* 16 entries, 4 dword each */
+#define ATH_MCI_GPM_MAX_ENTRY 16
+#define ATH_MCI_GPM_BUF_SIZE (ATH_MCI_GPM_MAX_ENTRY * 16)
#define ATH_MCI_DEF_BT_PERIOD 40
#define ATH_MCI_BDR_DUTY_CYCLE 20
#define ATH_MCI_MAX_DUTY_CYCLE 90
@@ -110,9 +113,22 @@ struct ath_mci_profile {
u8 num_bdr;
};
+
+struct ath_mci_buf {
+ void *bf_addr; /* virtual addr of desc */
+ dma_addr_t bf_paddr; /* physical addr of buffer */
+ u32 bf_len; /* len of data */
+};
+
+struct ath_mci_coex {
+ atomic_t mci_cal_flag;
+ struct ath_mci_buf sched_buf;
+ struct ath_mci_buf gpm_buf;
+ u32 bt_cal_start;
+};
+
void ath_mci_flush_profile(struct ath_mci_profile *mci);
-void ath_mci_process_profile(struct ath_softc *sc,
- struct ath_mci_profile_info *info);
-void ath_mci_process_status(struct ath_softc *sc,
- struct ath_mci_profile_status *status);
+int ath_mci_setup(struct ath_softc *sc);
+void ath_mci_cleanup(struct ath_softc *sc);
+void ath_mci_intr(struct ath_softc *sc);
#endif
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index edb0b4b3da3..a439edc5dc0 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -18,6 +18,7 @@
#include <linux/pci.h>
#include <linux/pci-aspm.h>
#include <linux/ath9k_platform.h>
+#include <linux/module.h>
#include "ath9k.h"
static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = {
@@ -306,12 +307,11 @@ static int ath_pci_suspend(struct device *device)
struct ieee80211_hw *hw = pci_get_drvdata(pdev);
struct ath_softc *sc = hw->priv;
- ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1);
-
/* The device has to be moved to FULLSLEEP forcibly.
* Otherwise the chip never moved to full sleep,
* when no interface is up.
*/
+ ath9k_hw_disable(sc->sc_ah);
ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_FULL_SLEEP);
return 0;
@@ -320,8 +320,6 @@ static int ath_pci_suspend(struct device *device)
static int ath_pci_resume(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
- struct ieee80211_hw *hw = pci_get_drvdata(pdev);
- struct ath_softc *sc = hw->priv;
u32 val;
/*
@@ -333,22 +331,6 @@ static int ath_pci_resume(struct device *device)
if ((val & 0x0000ff00) != 0)
pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
- ath9k_ps_wakeup(sc);
- /* Enable LED */
- ath9k_hw_cfg_output(sc->sc_ah, sc->sc_ah->led_pin,
- AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
- ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 0);
-
- /*
- * Reset key cache to sane defaults (all entries cleared) instead of
- * semi-random values after suspend/resume.
- */
- ath9k_cmn_init_crypto(sc->sc_ah);
- ath9k_ps_restore(sc);
-
- sc->ps_idle = true;
- ath_radio_disable(sc, hw);
-
return 0;
}
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
index 8448281dd06..528d5f3e868 100644
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ b/drivers/net/wireless/ath/ath9k/rc.c
@@ -16,6 +16,7 @@
*/
#include <linux/slab.h>
+#include <linux/export.h>
#include "ath9k.h"
@@ -1270,7 +1271,9 @@ static void ath_rc_init(struct ath_softc *sc,
ath_rc_priv->max_valid_rate = k;
ath_rc_sort_validrates(rate_table, ath_rc_priv);
- ath_rc_priv->rate_max_phy = ath_rc_priv->valid_rate_index[k-4];
+ ath_rc_priv->rate_max_phy = (k > 4) ?
+ ath_rc_priv->valid_rate_index[k-4] :
+ ath_rc_priv->valid_rate_index[k-1];
ath_rc_priv->rate_table = rate_table;
ath_dbg(common, ATH_DBG_CONFIG,
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 67b862cdae6..ad5176de07d 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -475,7 +475,6 @@ u32 ath_calcrxfilter(struct ath_softc *sc)
return rfilt;
-#undef RX_FILTER_PRESERVE
}
int ath_startrecv(struct ath_softc *sc)
@@ -1824,6 +1823,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
hdr = (struct ieee80211_hdr *) (hdr_skb->data + rx_status_len);
rxs = IEEE80211_SKB_RXCB(hdr_skb);
if (ieee80211_is_beacon(hdr->frame_control) &&
+ !is_zero_ether_addr(common->curbssid) &&
!compare_ether_addr(hdr->addr3, common->curbssid))
rs.is_mybeacon = true;
else
@@ -1838,11 +1838,6 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
if (sc->sc_flags & SC_OP_RXFLUSH)
goto requeue_drop_frag;
- retval = ath9k_rx_skb_preprocess(common, hw, hdr, &rs,
- rxs, &decrypt_error);
- if (retval)
- goto requeue_drop_frag;
-
rxs->mactime = (tsf & ~0xffffffffULL) | rs.rs_tstamp;
if (rs.rs_tstamp > tsf_lower &&
unlikely(rs.rs_tstamp - tsf_lower > 0x10000000))
@@ -1852,6 +1847,11 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
unlikely(tsf_lower - rs.rs_tstamp > 0x10000000))
rxs->mactime += 0x100000000ULL;
+ retval = ath9k_rx_skb_preprocess(common, hw, hdr, &rs,
+ rxs, &decrypt_error);
+ if (retval)
+ goto requeue_drop_frag;
+
/* Ensure we always have an skb to requeue once we are done
* processing the current buffer's skb */
requeue_skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC);
@@ -1923,15 +1923,20 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
skb = hdr_skb;
}
- /*
- * change the default rx antenna if rx diversity chooses the
- * other antenna 3 times in a row.
- */
- if (sc->rx.defant != rs.rs_antenna) {
- if (++sc->rx.rxotherant >= 3)
- ath_setdefantenna(sc, rs.rs_antenna);
- } else {
- sc->rx.rxotherant = 0;
+
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) {
+
+ /*
+ * change the default rx antenna if rx diversity
+ * chooses the other antenna 3 times in a row.
+ */
+ if (sc->rx.defant != rs.rs_antenna) {
+ if (++sc->rx.rxotherant >= 3)
+ ath_setdefantenna(sc, rs.rs_antenna);
+ } else {
+ sc->rx.rxotherant = 0;
+ }
+
}
if (rxs->flag & RX_FLAG_MMIC_STRIPPED)
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index 45910975d85..6e2f18861f5 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -1006,6 +1006,8 @@ enum {
#define AR_INTR_ASYNC_MASK (AR_SREV_9340(ah) ? 0x4018 : 0x4030)
#define AR_INTR_ASYNC_MASK_GPIO 0xFFFC0000
#define AR_INTR_ASYNC_MASK_GPIO_S 18
+#define AR_INTR_ASYNC_MASK_MCI 0x00000080
+#define AR_INTR_ASYNC_MASK_MCI_S 7
#define AR_INTR_SYNC_MASK (AR_SREV_9340(ah) ? 0x401c : 0x4034)
#define AR_INTR_SYNC_MASK_GPIO 0xFFFC0000
@@ -1013,6 +1015,14 @@ enum {
#define AR_INTR_ASYNC_CAUSE_CLR (AR_SREV_9340(ah) ? 0x4020 : 0x4038)
#define AR_INTR_ASYNC_CAUSE (AR_SREV_9340(ah) ? 0x4020 : 0x4038)
+#define AR_INTR_ASYNC_CAUSE_MCI 0x00000080
+#define AR_INTR_ASYNC_USED (AR_INTR_MAC_IRQ | \
+ AR_INTR_ASYNC_CAUSE_MCI)
+
+/* Asynchronous Interrupt Enable Register */
+#define AR_INTR_ASYNC_ENABLE_MCI 0x00000080
+#define AR_INTR_ASYNC_ENABLE_MCI_S 7
+
#define AR_INTR_ASYNC_ENABLE (AR_SREV_9340(ah) ? 0x4024 : 0x403c)
#define AR_INTR_ASYNC_ENABLE_GPIO 0xFFFC0000
@@ -1269,6 +1279,8 @@ enum {
#define AR_RTC_INTR_MASK \
((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0058) : 0x7058)
+#define AR_RTC_KEEP_AWAKE 0x7034
+
/* RTC_DERIVED_* - only for AR9100 */
#define AR_RTC_DERIVED_CLK \
@@ -1555,6 +1567,8 @@ enum {
#define AR_DIAG_FRAME_NV0 0x00020000
#define AR_DIAG_OBS_PT_SEL1 0x000C0000
#define AR_DIAG_OBS_PT_SEL1_S 18
+#define AR_DIAG_OBS_PT_SEL2 0x08000000
+#define AR_DIAG_OBS_PT_SEL2_S 27
#define AR_DIAG_FORCE_RX_CLEAR 0x00100000 /* force rx_clear high */
#define AR_DIAG_IGNORE_VIRT_CS 0x00200000
#define AR_DIAG_FORCE_CH_IDLE_HIGH 0x00400000
@@ -1929,37 +1943,277 @@ enum {
#define AR_PHY_AGC_CONTROL_YCOK_MAX_S 6
/* MCI Registers */
-#define AR_MCI_INTERRUPT_RX_MSG_EN 0x183c
-#define AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET 0x00000001
-#define AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET_S 0
-#define AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL 0x00000002
-#define AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL_S 1
-#define AR_MCI_INTERRUPT_RX_MSG_CONT_NACK 0x00000004
-#define AR_MCI_INTERRUPT_RX_MSG_CONT_NACK_S 2
-#define AR_MCI_INTERRUPT_RX_MSG_CONT_INFO 0x00000008
-#define AR_MCI_INTERRUPT_RX_MSG_CONT_INFO_S 3
-#define AR_MCI_INTERRUPT_RX_MSG_CONT_RST 0x00000010
-#define AR_MCI_INTERRUPT_RX_MSG_CONT_RST_S 4
-#define AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO 0x00000020
-#define AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO_S 5
-#define AR_MCI_INTERRUPT_RX_MSG_CPU_INT 0x00000040
-#define AR_MCI_INTERRUPT_RX_MSG_CPU_INT_S 6
-#define AR_MCI_INTERRUPT_RX_MSG_GPM 0x00000100
-#define AR_MCI_INTERRUPT_RX_MSG_GPM_S 8
-#define AR_MCI_INTERRUPT_RX_MSG_LNA_INFO 0x00000200
-#define AR_MCI_INTERRUPT_RX_MSG_LNA_INFO_S 9
-#define AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING 0x00000400
-#define AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING_S 10
-#define AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING 0x00000800
-#define AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING_S 11
-#define AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE 0x00001000
-#define AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE_S 12
-#define AR_MCI_INTERRUPT_RX_HW_MSG_MASK (AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO | \
+
+#define AR_MCI_COMMAND0 0x1800
+#define AR_MCI_COMMAND0_HEADER 0xFF
+#define AR_MCI_COMMAND0_HEADER_S 0
+#define AR_MCI_COMMAND0_LEN 0x1f00
+#define AR_MCI_COMMAND0_LEN_S 8
+#define AR_MCI_COMMAND0_DISABLE_TIMESTAMP 0x2000
+#define AR_MCI_COMMAND0_DISABLE_TIMESTAMP_S 13
+
+#define AR_MCI_COMMAND1 0x1804
+
+#define AR_MCI_COMMAND2 0x1808
+#define AR_MCI_COMMAND2_RESET_TX 0x01
+#define AR_MCI_COMMAND2_RESET_TX_S 0
+#define AR_MCI_COMMAND2_RESET_RX 0x02
+#define AR_MCI_COMMAND2_RESET_RX_S 1
+#define AR_MCI_COMMAND2_RESET_RX_NUM_CYCLES 0x3FC
+#define AR_MCI_COMMAND2_RESET_RX_NUM_CYCLES_S 2
+#define AR_MCI_COMMAND2_RESET_REQ_WAKEUP 0x400
+#define AR_MCI_COMMAND2_RESET_REQ_WAKEUP_S 10
+
+#define AR_MCI_RX_CTRL 0x180c
+
+#define AR_MCI_TX_CTRL 0x1810
+/* 0 = no division, 1 = divide by 2, 2 = divide by 4, 3 = divide by 8 */
+#define AR_MCI_TX_CTRL_CLK_DIV 0x03
+#define AR_MCI_TX_CTRL_CLK_DIV_S 0
+#define AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE 0x04
+#define AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE_S 2
+#define AR_MCI_TX_CTRL_GAIN_UPDATE_FREQ 0xFFFFF8
+#define AR_MCI_TX_CTRL_GAIN_UPDATE_FREQ_S 3
+#define AR_MCI_TX_CTRL_GAIN_UPDATE_NUM 0xF000000
+#define AR_MCI_TX_CTRL_GAIN_UPDATE_NUM_S 24
+
+#define AR_MCI_MSG_ATTRIBUTES_TABLE 0x1814
+#define AR_MCI_MSG_ATTRIBUTES_TABLE_CHECKSUM 0xFFFF
+#define AR_MCI_MSG_ATTRIBUTES_TABLE_CHECKSUM_S 0
+#define AR_MCI_MSG_ATTRIBUTES_TABLE_INVALID_HDR 0xFFFF0000
+#define AR_MCI_MSG_ATTRIBUTES_TABLE_INVALID_HDR_S 16
+
+#define AR_MCI_SCHD_TABLE_0 0x1818
+#define AR_MCI_SCHD_TABLE_1 0x181c
+#define AR_MCI_GPM_0 0x1820
+#define AR_MCI_GPM_1 0x1824
+#define AR_MCI_GPM_WRITE_PTR 0xFFFF0000
+#define AR_MCI_GPM_WRITE_PTR_S 16
+#define AR_MCI_GPM_BUF_LEN 0x0000FFFF
+#define AR_MCI_GPM_BUF_LEN_S 0
+
+#define AR_MCI_INTERRUPT_RAW 0x1828
+#define AR_MCI_INTERRUPT_EN 0x182c
+#define AR_MCI_INTERRUPT_SW_MSG_DONE 0x00000001
+#define AR_MCI_INTERRUPT_SW_MSG_DONE_S 0
+#define AR_MCI_INTERRUPT_CPU_INT_MSG 0x00000002
+#define AR_MCI_INTERRUPT_CPU_INT_MSG_S 1
+#define AR_MCI_INTERRUPT_RX_CKSUM_FAIL 0x00000004
+#define AR_MCI_INTERRUPT_RX_CKSUM_FAIL_S 2
+#define AR_MCI_INTERRUPT_RX_INVALID_HDR 0x00000008
+#define AR_MCI_INTERRUPT_RX_INVALID_HDR_S 3
+#define AR_MCI_INTERRUPT_RX_HW_MSG_FAIL 0x00000010
+#define AR_MCI_INTERRUPT_RX_HW_MSG_FAIL_S 4
+#define AR_MCI_INTERRUPT_RX_SW_MSG_FAIL 0x00000020
+#define AR_MCI_INTERRUPT_RX_SW_MSG_FAIL_S 5
+#define AR_MCI_INTERRUPT_TX_HW_MSG_FAIL 0x00000080
+#define AR_MCI_INTERRUPT_TX_HW_MSG_FAIL_S 7
+#define AR_MCI_INTERRUPT_TX_SW_MSG_FAIL 0x00000100
+#define AR_MCI_INTERRUPT_TX_SW_MSG_FAIL_S 8
+#define AR_MCI_INTERRUPT_RX_MSG 0x00000200
+#define AR_MCI_INTERRUPT_RX_MSG_S 9
+#define AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE 0x00000400
+#define AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE_S 10
+#define AR_MCI_INTERRUPT_BT_PRI 0x07fff800
+#define AR_MCI_INTERRUPT_BT_PRI_S 11
+#define AR_MCI_INTERRUPT_BT_PRI_THRESH 0x08000000
+#define AR_MCI_INTERRUPT_BT_PRI_THRESH_S 27
+#define AR_MCI_INTERRUPT_BT_FREQ 0x10000000
+#define AR_MCI_INTERRUPT_BT_FREQ_S 28
+#define AR_MCI_INTERRUPT_BT_STOMP 0x20000000
+#define AR_MCI_INTERRUPT_BT_STOMP_S 29
+#define AR_MCI_INTERRUPT_BB_AIC_IRQ 0x40000000
+#define AR_MCI_INTERRUPT_BB_AIC_IRQ_S 30
+#define AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT 0x80000000
+#define AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT_S 31
+
+#define AR_MCI_INTERRUPT_DEFAULT (AR_MCI_INTERRUPT_SW_MSG_DONE | \
+ AR_MCI_INTERRUPT_RX_INVALID_HDR | \
+ AR_MCI_INTERRUPT_RX_HW_MSG_FAIL | \
+ AR_MCI_INTERRUPT_RX_SW_MSG_FAIL | \
+ AR_MCI_INTERRUPT_TX_HW_MSG_FAIL | \
+ AR_MCI_INTERRUPT_TX_SW_MSG_FAIL | \
+ AR_MCI_INTERRUPT_RX_MSG | \
+ AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE | \
+ AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT)
+
+#define AR_MCI_INTERRUPT_MSG_FAIL_MASK (AR_MCI_INTERRUPT_RX_HW_MSG_FAIL | \
+ AR_MCI_INTERRUPT_RX_SW_MSG_FAIL | \
+ AR_MCI_INTERRUPT_TX_HW_MSG_FAIL | \
+ AR_MCI_INTERRUPT_TX_SW_MSG_FAIL)
+
+#define AR_MCI_REMOTE_CPU_INT 0x1830
+#define AR_MCI_REMOTE_CPU_INT_EN 0x1834
+#define AR_MCI_INTERRUPT_RX_MSG_RAW 0x1838
+#define AR_MCI_INTERRUPT_RX_MSG_EN 0x183c
+#define AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET 0x00000001
+#define AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET_S 0
+#define AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL 0x00000002
+#define AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL_S 1
+#define AR_MCI_INTERRUPT_RX_MSG_CONT_NACK 0x00000004
+#define AR_MCI_INTERRUPT_RX_MSG_CONT_NACK_S 2
+#define AR_MCI_INTERRUPT_RX_MSG_CONT_INFO 0x00000008
+#define AR_MCI_INTERRUPT_RX_MSG_CONT_INFO_S 3
+#define AR_MCI_INTERRUPT_RX_MSG_CONT_RST 0x00000010
+#define AR_MCI_INTERRUPT_RX_MSG_CONT_RST_S 4
+#define AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO 0x00000020
+#define AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO_S 5
+#define AR_MCI_INTERRUPT_RX_MSG_CPU_INT 0x00000040
+#define AR_MCI_INTERRUPT_RX_MSG_CPU_INT_S 6
+#define AR_MCI_INTERRUPT_RX_MSG_GPM 0x00000100
+#define AR_MCI_INTERRUPT_RX_MSG_GPM_S 8
+#define AR_MCI_INTERRUPT_RX_MSG_LNA_INFO 0x00000200
+#define AR_MCI_INTERRUPT_RX_MSG_LNA_INFO_S 9
+#define AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING 0x00000400
+#define AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING_S 10
+#define AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING 0x00000800
+#define AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING_S 11
+#define AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE 0x00001000
+#define AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE_S 12
+#define AR_MCI_INTERRUPT_RX_HW_MSG_MASK (AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO | \
AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL| \
AR_MCI_INTERRUPT_RX_MSG_LNA_INFO | \
AR_MCI_INTERRUPT_RX_MSG_CONT_NACK | \
AR_MCI_INTERRUPT_RX_MSG_CONT_INFO | \
AR_MCI_INTERRUPT_RX_MSG_CONT_RST)
+#define AR_MCI_INTERRUPT_RX_MSG_DEFAULT (AR_MCI_INTERRUPT_RX_MSG_GPM | \
+ AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET| \
+ AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING | \
+ AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING| \
+ AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO | \
+ AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL | \
+ AR_MCI_INTERRUPT_RX_MSG_LNA_INFO | \
+ AR_MCI_INTERRUPT_RX_MSG_CONT_NACK | \
+ AR_MCI_INTERRUPT_RX_MSG_CONT_INFO | \
+ AR_MCI_INTERRUPT_RX_MSG_CONT_RST | \
+ AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE)
+
+#define AR_MCI_CPU_INT 0x1840
+
+#define AR_MCI_RX_STATUS 0x1844
+#define AR_MCI_RX_LAST_SCHD_MSG_INDEX 0x00000F00
+#define AR_MCI_RX_LAST_SCHD_MSG_INDEX_S 8
+#define AR_MCI_RX_REMOTE_SLEEP 0x00001000
+#define AR_MCI_RX_REMOTE_SLEEP_S 12
+#define AR_MCI_RX_MCI_CLK_REQ 0x00002000
+#define AR_MCI_RX_MCI_CLK_REQ_S 13
+
+#define AR_MCI_CONT_STATUS 0x1848
+#define AR_MCI_CONT_RSSI_POWER 0x000000FF
+#define AR_MCI_CONT_RSSI_POWER_S 0
+#define AR_MCI_CONT_RRIORITY 0x0000FF00
+#define AR_MCI_CONT_RRIORITY_S 8
+#define AR_MCI_CONT_TXRX 0x00010000
+#define AR_MCI_CONT_TXRX_S 16
+
+#define AR_MCI_BT_PRI0 0x184c
+#define AR_MCI_BT_PRI1 0x1850
+#define AR_MCI_BT_PRI2 0x1854
+#define AR_MCI_BT_PRI3 0x1858
+#define AR_MCI_BT_PRI 0x185c
+#define AR_MCI_WL_FREQ0 0x1860
+#define AR_MCI_WL_FREQ1 0x1864
+#define AR_MCI_WL_FREQ2 0x1868
+#define AR_MCI_GAIN 0x186c
+#define AR_MCI_WBTIMER1 0x1870
+#define AR_MCI_WBTIMER2 0x1874
+#define AR_MCI_WBTIMER3 0x1878
+#define AR_MCI_WBTIMER4 0x187c
+#define AR_MCI_MAXGAIN 0x1880
+#define AR_MCI_HW_SCHD_TBL_CTL 0x1884
+#define AR_MCI_HW_SCHD_TBL_D0 0x1888
+#define AR_MCI_HW_SCHD_TBL_D1 0x188c
+#define AR_MCI_HW_SCHD_TBL_D2 0x1890
+#define AR_MCI_HW_SCHD_TBL_D3 0x1894
+#define AR_MCI_TX_PAYLOAD0 0x1898
+#define AR_MCI_TX_PAYLOAD1 0x189c
+#define AR_MCI_TX_PAYLOAD2 0x18a0
+#define AR_MCI_TX_PAYLOAD3 0x18a4
+#define AR_BTCOEX_WBTIMER 0x18a8
+
+#define AR_BTCOEX_CTRL 0x18ac
+#define AR_BTCOEX_CTRL_AR9462_MODE 0x00000001
+#define AR_BTCOEX_CTRL_AR9462_MODE_S 0
+#define AR_BTCOEX_CTRL_WBTIMER_EN 0x00000002
+#define AR_BTCOEX_CTRL_WBTIMER_EN_S 1
+#define AR_BTCOEX_CTRL_MCI_MODE_EN 0x00000004
+#define AR_BTCOEX_CTRL_MCI_MODE_EN_S 2
+#define AR_BTCOEX_CTRL_LNA_SHARED 0x00000008
+#define AR_BTCOEX_CTRL_LNA_SHARED_S 3
+#define AR_BTCOEX_CTRL_PA_SHARED 0x00000010
+#define AR_BTCOEX_CTRL_PA_SHARED_S 4
+#define AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN 0x00000020
+#define AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN_S 5
+#define AR_BTCOEX_CTRL_TIME_TO_NEXT_BT_THRESH_EN 0x00000040
+#define AR_BTCOEX_CTRL_TIME_TO_NEXT_BT_THRESH_EN_S 6
+#define AR_BTCOEX_CTRL_NUM_ANTENNAS 0x00000180
+#define AR_BTCOEX_CTRL_NUM_ANTENNAS_S 7
+#define AR_BTCOEX_CTRL_RX_CHAIN_MASK 0x00000E00
+#define AR_BTCOEX_CTRL_RX_CHAIN_MASK_S 9
+#define AR_BTCOEX_CTRL_AGGR_THRESH 0x00007000
+#define AR_BTCOEX_CTRL_AGGR_THRESH_S 12
+#define AR_BTCOEX_CTRL_1_CHAIN_BCN 0x00080000
+#define AR_BTCOEX_CTRL_1_CHAIN_BCN_S 19
+#define AR_BTCOEX_CTRL_1_CHAIN_ACK 0x00100000
+#define AR_BTCOEX_CTRL_1_CHAIN_ACK_S 20
+#define AR_BTCOEX_CTRL_WAIT_BA_MARGIN 0x1FE00000
+#define AR_BTCOEX_CTRL_WAIT_BA_MARGIN_S 28
+#define AR_BTCOEX_CTRL_REDUCE_TXPWR 0x20000000
+#define AR_BTCOEX_CTRL_REDUCE_TXPWR_S 29
+#define AR_BTCOEX_CTRL_SPDT_ENABLE_10 0x40000000
+#define AR_BTCOEX_CTRL_SPDT_ENABLE_10_S 30
+#define AR_BTCOEX_CTRL_SPDT_POLARITY 0x80000000
+#define AR_BTCOEX_CTRL_SPDT_POLARITY_S 31
+
+#define AR_BTCOEX_WL_WEIGHTS0 0x18b0
+#define AR_BTCOEX_WL_WEIGHTS1 0x18b4
+#define AR_BTCOEX_WL_WEIGHTS2 0x18b8
+#define AR_BTCOEX_WL_WEIGHTS3 0x18bc
+#define AR_BTCOEX_MAX_TXPWR(_x) (0x18c0 + ((_x) << 2))
+#define AR_BTCOEX_WL_LNA 0x1940
+#define AR_BTCOEX_RFGAIN_CTRL 0x1944
+
+#define AR_BTCOEX_CTRL2 0x1948
+#define AR_BTCOEX_CTRL2_TXPWR_THRESH 0x0007F800
+#define AR_BTCOEX_CTRL2_TXPWR_THRESH_S 11
+#define AR_BTCOEX_CTRL2_TX_CHAIN_MASK 0x00380000
+#define AR_BTCOEX_CTRL2_TX_CHAIN_MASK_S 19
+#define AR_BTCOEX_CTRL2_RX_DEWEIGHT 0x00400000
+#define AR_BTCOEX_CTRL2_RX_DEWEIGHT_S 22
+#define AR_BTCOEX_CTRL2_GPIO_OBS_SEL 0x00800000
+#define AR_BTCOEX_CTRL2_GPIO_OBS_SEL_S 23
+#define AR_BTCOEX_CTRL2_MAC_BB_OBS_SEL 0x01000000
+#define AR_BTCOEX_CTRL2_MAC_BB_OBS_SEL_S 24
+#define AR_BTCOEX_CTRL2_DESC_BASED_TXPWR_ENABLE 0x02000000
+#define AR_BTCOEX_CTRL2_DESC_BASED_TXPWR_ENABLE_S 25
+
+#define AR_BTCOEX_CTRL_SPDT_ENABLE 0x00000001
+#define AR_BTCOEX_CTRL_SPDT_ENABLE_S 0
+#define AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL 0x00000002
+#define AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL_S 1
+#define AR_BTCOEX_CTRL_USE_LATCHED_BT_ANT 0x00000004
+#define AR_BTCOEX_CTRL_USE_LATCHED_BT_ANT_S 2
+#define AR_GLB_WLAN_UART_INTF_EN 0x00020000
+#define AR_GLB_WLAN_UART_INTF_EN_S 17
+#define AR_GLB_DS_JTAG_DISABLE 0x00040000
+#define AR_GLB_DS_JTAG_DISABLE_S 18
+
+#define AR_BTCOEX_RC 0x194c
+#define AR_BTCOEX_MAX_RFGAIN(_x) (0x1950 + ((_x) << 2))
+#define AR_BTCOEX_DBG 0x1a50
+#define AR_MCI_LAST_HW_MSG_HDR 0x1a54
+#define AR_MCI_LAST_HW_MSG_BDY 0x1a58
+
+#define AR_MCI_SCHD_TABLE_2 0x1a5c
+#define AR_MCI_SCHD_TABLE_2_MEM_BASED 0x00000001
+#define AR_MCI_SCHD_TABLE_2_MEM_BASED_S 0
+#define AR_MCI_SCHD_TABLE_2_HW_BASED 0x00000002
+#define AR_MCI_SCHD_TABLE_2_HW_BASED_S 1
+
+#define AR_BTCOEX_CTRL3 0x1a60
+#define AR_BTCOEX_CTRL3_CONT_INFO_TIMEOUT 0x00000fff
+#define AR_BTCOEX_CTRL3_CONT_INFO_TIMEOUT_S 0
+
#endif
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 55d077e7135..23e80e63bca 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -53,7 +53,7 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
int tx_flags, struct ath_txq *txq);
static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
struct ath_txq *txq, struct list_head *bf_q,
- struct ath_tx_status *ts, int txok, int sendbar);
+ struct ath_tx_status *ts, int txok);
static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
struct list_head *head, bool internal);
static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
@@ -150,6 +150,12 @@ static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
}
+static void ath_send_bar(struct ath_atx_tid *tid, u16 seqno)
+{
+ ieee80211_send_bar(tid->an->vif, tid->an->sta->addr, tid->tidno,
+ seqno << IEEE80211_SEQ_SEQ_SHIFT);
+}
+
static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
{
struct ath_txq *txq = tid->ac->txq;
@@ -158,28 +164,33 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
struct list_head bf_head;
struct ath_tx_status ts;
struct ath_frame_info *fi;
+ bool sendbar = false;
INIT_LIST_HEAD(&bf_head);
memset(&ts, 0, sizeof(ts));
- spin_lock_bh(&txq->axq_lock);
while ((skb = __skb_dequeue(&tid->buf_q))) {
fi = get_frame_info(skb);
bf = fi->bf;
- spin_unlock_bh(&txq->axq_lock);
if (bf && fi->retries) {
list_add_tail(&bf->list, &bf_head);
ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
- ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 1);
+ ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
+ sendbar = true;
} else {
ath_tx_send_normal(sc, txq, NULL, skb);
}
- spin_lock_bh(&txq->axq_lock);
}
- spin_unlock_bh(&txq->axq_lock);
+ if (tid->baw_head == tid->baw_tail) {
+ tid->state &= ~AGGR_ADDBA_COMPLETE;
+ tid->state &= ~AGGR_CLEANUP;
+ }
+
+ if (sendbar)
+ ath_send_bar(tid, tid->seq_start);
}
static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
@@ -195,6 +206,8 @@ static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
INCR(tid->seq_start, IEEE80211_SEQ_MAX);
INCR(tid->baw_head, ATH_TID_MAX_BUFS);
+ if (tid->bar_index >= 0)
+ tid->bar_index--;
}
}
@@ -238,9 +251,7 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
bf = fi->bf;
if (!bf) {
- spin_unlock(&txq->axq_lock);
ath_tx_complete(sc, skb, ATH_TX_ERROR, txq);
- spin_lock(&txq->axq_lock);
continue;
}
@@ -249,24 +260,26 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
if (fi->retries)
ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
- spin_unlock(&txq->axq_lock);
- ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
- spin_lock(&txq->axq_lock);
+ ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
}
tid->seq_next = tid->seq_start;
tid->baw_tail = tid->baw_head;
+ tid->bar_index = -1;
}
static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
- struct sk_buff *skb)
+ struct sk_buff *skb, int count)
{
struct ath_frame_info *fi = get_frame_info(skb);
struct ath_buf *bf = fi->bf;
struct ieee80211_hdr *hdr;
+ int prev = fi->retries;
TX_STAT_INC(txq->axq_qnum, a_retries);
- if (fi->retries++ > 0)
+ fi->retries += count;
+
+ if (prev > 0)
return;
hdr = (struct ieee80211_hdr *)skb->data;
@@ -365,7 +378,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
struct list_head bf_head;
struct sk_buff_head bf_pending;
- u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
+ u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0, seq_first;
u32 ba[WME_BA_BMP_SIZE >> 5];
int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
bool rc_update = true;
@@ -374,6 +387,8 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
int nframes;
u8 tidno;
bool flush = !!(ts->ts_status & ATH9K_TX_FLUSH);
+ int i, retries;
+ int bar_index = -1;
skb = bf->bf_mpdu;
hdr = (struct ieee80211_hdr *)skb->data;
@@ -382,6 +397,10 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
memcpy(rates, tx_info->control.rates, sizeof(rates));
+ retries = ts->ts_longretry + 1;
+ for (i = 0; i < ts->ts_rateindex; i++)
+ retries += rates[i].count;
+
rcu_read_lock();
sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
@@ -395,8 +414,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
if (!bf->bf_stale || bf_next != NULL)
list_move_tail(&bf->list, &bf_head);
- ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
- 0, 0);
+ ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, 0);
bf = bf_next;
}
@@ -406,6 +424,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
an = (struct ath_node *)sta->drv_priv;
tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
tid = ATH_AN_2_TID(an, tidno);
+ seq_first = tid->seq_start;
/*
* The hardware occasionally sends a tx status for the wrong TID.
@@ -455,25 +474,25 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
} else if (!isaggr && txok) {
/* transmit completion */
acked_cnt++;
+ } else if ((tid->state & AGGR_CLEANUP) || !retry) {
+ /*
+ * cleanup in progress, just fail
+ * the un-acked sub-frames
+ */
+ txfail = 1;
+ } else if (flush) {
+ txpending = 1;
+ } else if (fi->retries < ATH_MAX_SW_RETRIES) {
+ if (txok || !an->sleeping)
+ ath_tx_set_retry(sc, txq, bf->bf_mpdu,
+ retries);
+
+ txpending = 1;
} else {
- if ((tid->state & AGGR_CLEANUP) || !retry) {
- /*
- * cleanup in progress, just fail
- * the un-acked sub-frames
- */
- txfail = 1;
- } else if (flush) {
- txpending = 1;
- } else if (fi->retries < ATH_MAX_SW_RETRIES) {
- if (txok || !an->sleeping)
- ath_tx_set_retry(sc, txq, bf->bf_mpdu);
-
- txpending = 1;
- } else {
- txfail = 1;
- sendbar = 1;
- txfail_cnt++;
- }
+ txfail = 1;
+ txfail_cnt++;
+ bar_index = max_t(int, bar_index,
+ ATH_BA_INDEX(seq_first, seqno));
}
/*
@@ -490,9 +509,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
* complete the acked-ones/xretried ones; update
* block-ack window
*/
- spin_lock_bh(&txq->axq_lock);
ath_tx_update_baw(sc, tid, seqno);
- spin_unlock_bh(&txq->axq_lock);
if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
memcpy(tx_info->control.rates, rates, sizeof(rates));
@@ -501,33 +518,30 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
}
ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
- !txfail, sendbar);
+ !txfail);
} else {
/* retry the un-acked ones */
- if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
- if (bf->bf_next == NULL && bf_last->bf_stale) {
- struct ath_buf *tbf;
-
- tbf = ath_clone_txbuf(sc, bf_last);
- /*
- * Update tx baw and complete the
- * frame with failed status if we
- * run out of tx buf.
- */
- if (!tbf) {
- spin_lock_bh(&txq->axq_lock);
- ath_tx_update_baw(sc, tid, seqno);
- spin_unlock_bh(&txq->axq_lock);
-
- ath_tx_complete_buf(sc, bf, txq,
- &bf_head,
- ts, 0,
- !flush);
- break;
- }
-
- fi->bf = tbf;
+ if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
+ bf->bf_next == NULL && bf_last->bf_stale) {
+ struct ath_buf *tbf;
+
+ tbf = ath_clone_txbuf(sc, bf_last);
+ /*
+ * Update tx baw and complete the
+ * frame with failed status if we
+ * run out of tx buf.
+ */
+ if (!tbf) {
+ ath_tx_update_baw(sc, tid, seqno);
+
+ ath_tx_complete_buf(sc, bf, txq,
+ &bf_head, ts, 0);
+ bar_index = max_t(int, bar_index,
+ ATH_BA_INDEX(seq_first, seqno));
+ break;
}
+
+ fi->bf = tbf;
}
/*
@@ -540,12 +554,18 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
bf = bf_next;
}
+ if (bar_index >= 0) {
+ u16 bar_seq = ATH_BA_INDEX2SEQ(seq_first, bar_index);
+ ath_send_bar(tid, ATH_BA_INDEX2SEQ(seq_first, bar_index + 1));
+ if (BAW_WITHIN(tid->seq_start, tid->baw_size, bar_seq))
+ tid->bar_index = ATH_BA_INDEX(tid->seq_start, bar_seq);
+ }
+
/* prepend un-acked frames to the beginning of the pending frame queue */
if (!skb_queue_empty(&bf_pending)) {
if (an->sleeping)
ieee80211_sta_set_buffered(sta, tid->tidno, true);
- spin_lock_bh(&txq->axq_lock);
skb_queue_splice(&bf_pending, &tid->buf_q);
if (!an->sleeping) {
ath_tx_queue_tid(txq, tid);
@@ -553,18 +573,11 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
if (ts->ts_status & ATH9K_TXERR_FILT)
tid->ac->clear_ps_filter = true;
}
- spin_unlock_bh(&txq->axq_lock);
}
- if (tid->state & AGGR_CLEANUP) {
+ if (tid->state & AGGR_CLEANUP)
ath_tx_flush_tid(sc, tid);
- if (tid->baw_head == tid->baw_tail) {
- tid->state &= ~AGGR_ADDBA_COMPLETE;
- tid->state &= ~AGGR_CLEANUP;
- }
- }
-
rcu_read_unlock();
if (needreset) {
@@ -618,24 +631,26 @@ static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
for (i = 0; i < 4; i++) {
- if (rates[i].count) {
- int modeidx;
- if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
- legacy = 1;
- break;
- }
+ int modeidx;
- if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
- modeidx = MCS_HT40;
- else
- modeidx = MCS_HT20;
-
- if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
- modeidx++;
+ if (!rates[i].count)
+ continue;
- frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
- max_4ms_framelen = min(max_4ms_framelen, frmlen);
+ if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
+ legacy = 1;
+ break;
}
+
+ if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+ modeidx = MCS_HT40;
+ else
+ modeidx = MCS_HT20;
+
+ if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
+ modeidx++;
+
+ frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
+ max_4ms_framelen = min(max_4ms_framelen, frmlen);
}
/*
@@ -771,8 +786,6 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
bf->bf_state.bf_type = BUF_AMPDU | BUF_AGGR;
seqno = bf->bf_state.seqno;
- if (!bf_first)
- bf_first = bf;
/* do not step over block-ack window */
if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno)) {
@@ -780,6 +793,21 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
break;
}
+ if (tid->bar_index > ATH_BA_INDEX(tid->seq_start, seqno)) {
+ struct ath_tx_status ts = {};
+ struct list_head bf_head;
+
+ INIT_LIST_HEAD(&bf_head);
+ list_add(&bf->list, &bf_head);
+ __skb_unlink(skb, &tid->buf_q);
+ ath_tx_update_baw(sc, tid, seqno);
+ ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
+ continue;
+ }
+
+ if (!bf_first)
+ bf_first = bf;
+
if (!rl) {
aggr_limit = ath_lookup_rate(sc, bf, tid);
rl = 1;
@@ -1122,6 +1150,7 @@ int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
txtid->state |= AGGR_ADDBA_PROGRESS;
txtid->paused = true;
*ssn = txtid->seq_start = txtid->seq_next;
+ txtid->bar_index = -1;
memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
txtid->baw_head = txtid->baw_tail = 0;
@@ -1156,9 +1185,9 @@ void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
txtid->state |= AGGR_CLEANUP;
else
txtid->state &= ~AGGR_ADDBA_COMPLETE;
- spin_unlock_bh(&txq->axq_lock);
ath_tx_flush_tid(sc, txtid);
+ spin_unlock_bh(&txq->axq_lock);
}
void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
@@ -1400,8 +1429,6 @@ static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
struct list_head *list, bool retry_tx)
- __releases(txq->axq_lock)
- __acquires(txq->axq_lock)
{
struct ath_buf *bf, *lastbf;
struct list_head bf_head;
@@ -1428,13 +1455,11 @@ static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
if (bf_is_ampdu_not_probing(bf))
txq->axq_ampdu_depth--;
- spin_unlock_bh(&txq->axq_lock);
if (bf_isampdu(bf))
ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0,
retry_tx);
else
- ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
- spin_lock_bh(&txq->axq_lock);
+ ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
}
}
@@ -1561,11 +1586,9 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
break;
}
- if (!list_empty(&ac->tid_q)) {
- if (!ac->sched) {
- ac->sched = true;
- list_add_tail(&ac->list, &txq->axq_acq);
- }
+ if (!list_empty(&ac->tid_q) && !ac->sched) {
+ ac->sched = true;
+ list_add_tail(&ac->list, &txq->axq_acq);
}
if (ac == last_ac ||
@@ -1708,10 +1731,6 @@ static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
list_add_tail(&bf->list, &bf_head);
bf->bf_state.bf_type = 0;
- /* update starting sequence number for subsequent ADDBA request */
- if (tid)
- INCR(tid->seq_start, IEEE80211_SEQ_MAX);
-
bf->bf_lastbf = bf;
ath_tx_fill_desc(sc, bf, txq, fi->framelen);
ath_tx_txqaddbuf(sc, txq, &bf_head, false);
@@ -1819,7 +1838,6 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
struct ath_buf *bf;
u8 tidno;
- spin_lock_bh(&txctl->txq->axq_lock);
if ((sc->sc_flags & SC_OP_TXAGGR) && txctl->an &&
ieee80211_is_data_qos(hdr->frame_control)) {
tidno = ieee80211_get_qos_ctl(hdr)[0] &
@@ -1838,7 +1856,7 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
} else {
bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
if (!bf)
- goto out;
+ return;
bf->bf_state.bfs_paprd = txctl->paprd;
@@ -1847,9 +1865,6 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
ath_tx_send_normal(sc, txctl->txq, tid, skb);
}
-
-out:
- spin_unlock_bh(&txctl->txq->axq_lock);
}
/* Upon failure caller should free skb */
@@ -1916,9 +1931,11 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
ieee80211_stop_queue(sc->hw, q);
txq->stopped = 1;
}
- spin_unlock_bh(&txq->axq_lock);
ath_tx_start_dma(sc, skb, txctl);
+
+ spin_unlock_bh(&txq->axq_lock);
+
return 0;
}
@@ -1937,9 +1954,6 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
ath_dbg(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
- if (tx_flags & ATH_TX_BAR)
- tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
-
if (!(tx_flags & ATH_TX_ERROR))
/* Frame was ACKed */
tx_info->flags |= IEEE80211_TX_STAT_ACK;
@@ -1955,7 +1969,7 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
skb_pull(skb, padsize);
}
- if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) {
+ if ((sc->ps_flags & PS_WAIT_FOR_TX_ACK) && !txq->axq_depth) {
sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
ath_dbg(common, ATH_DBG_PS,
"Going back to sleep after having received TX status (0x%lx)\n",
@@ -1967,7 +1981,6 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
q = skb_get_queue_mapping(skb);
if (txq == sc->tx.txq_map[q]) {
- spin_lock_bh(&txq->axq_lock);
if (WARN_ON(--txq->pending_frames < 0))
txq->pending_frames = 0;
@@ -1975,7 +1988,6 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
ieee80211_wake_queue(sc->hw, q);
txq->stopped = 0;
}
- spin_unlock_bh(&txq->axq_lock);
}
ieee80211_tx_status(hw, skb);
@@ -1983,16 +1995,13 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
struct ath_txq *txq, struct list_head *bf_q,
- struct ath_tx_status *ts, int txok, int sendbar)
+ struct ath_tx_status *ts, int txok)
{
struct sk_buff *skb = bf->bf_mpdu;
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
unsigned long flags;
int tx_flags = 0;
- if (sendbar)
- tx_flags = ATH_TX_BAR;
-
if (!txok)
tx_flags |= ATH_TX_ERROR;
@@ -2084,8 +2093,6 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
struct ath_tx_status *ts, struct ath_buf *bf,
struct list_head *bf_head)
- __releases(txq->axq_lock)
- __acquires(txq->axq_lock)
{
int txok;
@@ -2095,16 +2102,12 @@ static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
if (bf_is_ampdu_not_probing(bf))
txq->axq_ampdu_depth--;
- spin_unlock_bh(&txq->axq_lock);
-
if (!bf_isampdu(bf)) {
ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok);
- ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok, 0);
+ ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok);
} else
ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok, true);
- spin_lock_bh(&txq->axq_lock);
-
if (sc->sc_flags & SC_OP_TXAGGR)
ath_txq_schedule(sc, txq);
}
diff --git a/drivers/net/wireless/ath/carl9170/fw.c b/drivers/net/wireless/ath/carl9170/fw.c
index f4cae1cccbf..cba9d0435dc 100644
--- a/drivers/net/wireless/ath/carl9170/fw.c
+++ b/drivers/net/wireless/ath/carl9170/fw.c
@@ -23,6 +23,7 @@
#include <linux/kernel.h>
#include <linux/firmware.h>
#include <linux/crc32.h>
+#include <linux/module.h>
#include "carl9170.h"
#include "fwcmd.h"
#include "version.h"
diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c
index 59472e1605c..d19a9ee9d05 100644
--- a/drivers/net/wireless/ath/carl9170/tx.c
+++ b/drivers/net/wireless/ath/carl9170/tx.c
@@ -314,7 +314,7 @@ static void carl9170_tx_release(struct kref *ref)
* feedback either [CTL_REQ_TX_STATUS not set]
*/
- dev_kfree_skb_any(skb);
+ ieee80211_free_txskb(ar->hw, skb);
return;
} else {
/*
@@ -1432,7 +1432,7 @@ void carl9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
err_free:
ar->tx_dropped++;
- dev_kfree_skb_any(skb);
+ ieee80211_free_txskb(ar->hw, skb);
}
void carl9170_tx_scheduler(struct ar9170 *ar)
diff --git a/drivers/net/wireless/ath/debug.c b/drivers/net/wireless/ath/debug.c
index 5367b1086e0..508eccf5d98 100644
--- a/drivers/net/wireless/ath/debug.c
+++ b/drivers/net/wireless/ath/debug.c
@@ -14,6 +14,7 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
+#include <linux/export.h>
#include "ath.h"
const char *ath_opmode_to_string(enum nl80211_iftype opmode)
diff --git a/drivers/net/wireless/ath/hw.c b/drivers/net/wireless/ath/hw.c
index 3f508e59f14..19befb33107 100644
--- a/drivers/net/wireless/ath/hw.c
+++ b/drivers/net/wireless/ath/hw.c
@@ -14,6 +14,7 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
+#include <linux/export.h>
#include <asm/unaligned.h>
#include "ath.h"
diff --git a/drivers/net/wireless/ath/key.c b/drivers/net/wireless/ath/key.c
index 17b0efd86f9..4cf7c5eb481 100644
--- a/drivers/net/wireless/ath/key.c
+++ b/drivers/net/wireless/ath/key.c
@@ -15,6 +15,7 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
+#include <linux/export.h>
#include <asm/unaligned.h>
#include <net/mac80211.h>
diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c
index f1be57f0f5b..10dea37431b 100644
--- a/drivers/net/wireless/ath/regd.c
+++ b/drivers/net/wireless/ath/regd.c
@@ -15,11 +15,14 @@
*/
#include <linux/kernel.h>
+#include <linux/export.h>
#include <net/cfg80211.h>
#include <net/mac80211.h>
#include "regd.h"
#include "regd_common.h"
+static int __ath_regd_init(struct ath_regulatory *reg);
+
/*
* This is a set of common rules used by our world regulatory domains.
* We have 12 world regulatory domains. To save space we consolidate
@@ -346,10 +349,26 @@ static void ath_reg_apply_world_flags(struct wiphy *wiphy,
}
}
+static u16 ath_regd_find_country_by_name(char *alpha2)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(allCountries); i++) {
+ if (!memcmp(allCountries[i].isoName, alpha2, 2))
+ return allCountries[i].countryCode;
+ }
+
+ return -1;
+}
+
int ath_reg_notifier_apply(struct wiphy *wiphy,
struct regulatory_request *request,
struct ath_regulatory *reg)
{
+ struct ath_common *common = container_of(reg, struct ath_common,
+ regulatory);
+ u16 country_code;
+
/* We always apply this */
ath_reg_apply_radar_flags(wiphy);
@@ -362,14 +381,37 @@ int ath_reg_notifier_apply(struct wiphy *wiphy,
return 0;
switch (request->initiator) {
- case NL80211_REGDOM_SET_BY_DRIVER:
case NL80211_REGDOM_SET_BY_CORE:
+ /*
+ * If common->reg_world_copy is world roaming it means we *were*
+ * world roaming... so we now have to restore that data.
+ */
+ if (!ath_is_world_regd(&common->reg_world_copy))
+ break;
+
+ memcpy(reg, &common->reg_world_copy,
+ sizeof(struct ath_regulatory));
+ break;
+ case NL80211_REGDOM_SET_BY_DRIVER:
case NL80211_REGDOM_SET_BY_USER:
break;
case NL80211_REGDOM_SET_BY_COUNTRY_IE:
- if (ath_is_world_regd(reg))
- ath_reg_apply_world_flags(wiphy, request->initiator,
- reg);
+ if (!ath_is_world_regd(reg))
+ break;
+
+ country_code = ath_regd_find_country_by_name(request->alpha2);
+ if (country_code == (u16) -1)
+ break;
+
+ reg->current_rd = COUNTRY_ERD_FLAG;
+ reg->current_rd |= country_code;
+
+ printk(KERN_DEBUG "ath: regdomain 0x%0x updated by CountryIE\n",
+ reg->current_rd);
+ __ath_regd_init(reg);
+
+ ath_reg_apply_world_flags(wiphy, request->initiator, reg);
+
break;
}
@@ -507,11 +549,7 @@ static void ath_regd_sanitize(struct ath_regulatory *reg)
reg->current_rd = 0x64;
}
-int
-ath_regd_init(struct ath_regulatory *reg,
- struct wiphy *wiphy,
- int (*reg_notifier)(struct wiphy *wiphy,
- struct regulatory_request *request))
+static int __ath_regd_init(struct ath_regulatory *reg)
{
struct country_code_to_enum_rd *country = NULL;
u16 regdmn;
@@ -582,7 +620,29 @@ ath_regd_init(struct ath_regulatory *reg,
printk(KERN_DEBUG "ath: Regpair used: 0x%0x\n",
reg->regpair->regDmnEnum);
+ return 0;
+}
+
+int
+ath_regd_init(struct ath_regulatory *reg,
+ struct wiphy *wiphy,
+ int (*reg_notifier)(struct wiphy *wiphy,
+ struct regulatory_request *request))
+{
+ struct ath_common *common = container_of(reg, struct ath_common,
+ regulatory);
+ int r;
+
+ r = __ath_regd_init(reg);
+ if (r)
+ return r;
+
+ if (ath_is_world_regd(reg))
+ memcpy(&common->reg_world_copy, reg,
+ sizeof(struct ath_regulatory));
+
ath_regd_init_wiphy(reg, wiphy, reg_notifier);
+
return 0;
}
EXPORT_SYMBOL(ath_regd_init);
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index 447a2307c9d..37110dfd2c9 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -1011,14 +1011,10 @@ static inline bool b43_using_pio_transfers(struct b43_wldev *dev)
}
/* Message printing */
-void b43info(struct b43_wl *wl, const char *fmt, ...)
- __attribute__ ((format(printf, 2, 3)));
-void b43err(struct b43_wl *wl, const char *fmt, ...)
- __attribute__ ((format(printf, 2, 3)));
-void b43warn(struct b43_wl *wl, const char *fmt, ...)
- __attribute__ ((format(printf, 2, 3)));
-void b43dbg(struct b43_wl *wl, const char *fmt, ...)
- __attribute__ ((format(printf, 2, 3)));
+__printf(2, 3) void b43info(struct b43_wl *wl, const char *fmt, ...);
+__printf(2, 3) void b43err(struct b43_wl *wl, const char *fmt, ...);
+__printf(2, 3) void b43warn(struct b43_wl *wl, const char *fmt, ...);
+__printf(2, 3) void b43dbg(struct b43_wl *wl, const char *fmt, ...);
/* A WARN_ON variant that vanishes when b43 debugging is disabled.
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 7cf4125a162..5634d9a9965 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -34,7 +34,7 @@
#include <linux/delay.h>
#include <linux/init.h>
-#include <linux/moduleparam.h>
+#include <linux/module.h>
#include <linux/if_arp.h>
#include <linux/etherdevice.h>
#include <linux/firmware.h>
diff --git a/drivers/net/wireless/b43/pcmcia.c b/drivers/net/wireless/b43/pcmcia.c
index 12b6b4067a3..714cad649c4 100644
--- a/drivers/net/wireless/b43/pcmcia.c
+++ b/drivers/net/wireless/b43/pcmcia.c
@@ -25,6 +25,7 @@
#include <linux/ssb/ssb.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/ciscode.h>
diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c
index b17d9b6c33a..c8fa2cd97e6 100644
--- a/drivers/net/wireless/b43/phy_n.c
+++ b/drivers/net/wireless/b43/phy_n.c
@@ -228,10 +228,98 @@ static void b43_chantab_radio_2056_upload(struct b43_wldev *dev,
static void b43_radio_2056_setup(struct b43_wldev *dev,
const struct b43_nphy_channeltab_entry_rev3 *e)
{
+ struct ssb_sprom *sprom = dev->dev->bus_sprom;
+ enum ieee80211_band band = b43_current_band(dev->wl);
+ u16 offset;
+ u8 i;
+ u16 bias, cbias, pag_boost, pgag_boost, mixg_boost, padg_boost;
+
B43_WARN_ON(dev->phy.rev < 3);
b43_chantab_radio_2056_upload(dev, e);
- /* TODO */
+ b2056_upload_syn_pll_cp2(dev, band == IEEE80211_BAND_5GHZ);
+
+ if (sprom->boardflags2_lo & B43_BFL2_GPLL_WAR &&
+ b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER1, 0x1F);
+ b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER2, 0x1F);
+ if (dev->dev->chip_id == 0x4716) {
+ b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER4, 0x14);
+ b43_radio_write(dev, B2056_SYN_PLL_CP2, 0);
+ } else {
+ b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER4, 0x0B);
+ b43_radio_write(dev, B2056_SYN_PLL_CP2, 0x14);
+ }
+ }
+ if (sprom->boardflags2_lo & B43_BFL2_APLL_WAR &&
+ b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+ b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER1, 0x1F);
+ b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER2, 0x1F);
+ b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER4, 0x05);
+ b43_radio_write(dev, B2056_SYN_PLL_CP2, 0x0C);
+ }
+
+ if (dev->phy.n->ipa2g_on && band == IEEE80211_BAND_2GHZ) {
+ for (i = 0; i < 2; i++) {
+ offset = i ? B2056_TX1 : B2056_TX0;
+ if (dev->phy.rev >= 5) {
+ b43_radio_write(dev,
+ offset | B2056_TX_PADG_IDAC, 0xcc);
+
+ if (dev->dev->chip_id == 0x4716) {
+ bias = 0x40;
+ cbias = 0x45;
+ pag_boost = 0x5;
+ pgag_boost = 0x33;
+ mixg_boost = 0x55;
+ } else {
+ bias = 0x25;
+ cbias = 0x20;
+ pag_boost = 0x4;
+ pgag_boost = 0x03;
+ mixg_boost = 0x65;
+ }
+ padg_boost = 0x77;
+
+ b43_radio_write(dev,
+ offset | B2056_TX_INTPAG_IMAIN_STAT,
+ bias);
+ b43_radio_write(dev,
+ offset | B2056_TX_INTPAG_IAUX_STAT,
+ bias);
+ b43_radio_write(dev,
+ offset | B2056_TX_INTPAG_CASCBIAS,
+ cbias);
+ b43_radio_write(dev,
+ offset | B2056_TX_INTPAG_BOOST_TUNE,
+ pag_boost);
+ b43_radio_write(dev,
+ offset | B2056_TX_PGAG_BOOST_TUNE,
+ pgag_boost);
+ b43_radio_write(dev,
+ offset | B2056_TX_PADG_BOOST_TUNE,
+ padg_boost);
+ b43_radio_write(dev,
+ offset | B2056_TX_MIXG_BOOST_TUNE,
+ mixg_boost);
+ } else {
+ bias = dev->phy.is_40mhz ? 0x40 : 0x20;
+ b43_radio_write(dev,
+ offset | B2056_TX_INTPAG_IMAIN_STAT,
+ bias);
+ b43_radio_write(dev,
+ offset | B2056_TX_INTPAG_IAUX_STAT,
+ bias);
+ b43_radio_write(dev,
+ offset | B2056_TX_INTPAG_CASCBIAS,
+ 0x30);
+ }
+ b43_radio_write(dev, offset | B2056_TX_PA_SPARE1, 0xee);
+ }
+ } else if (dev->phy.n->ipa5g_on && band == IEEE80211_BAND_5GHZ) {
+ /* TODO */
+ }
+
udelay(50);
/* VCO calibration */
b43_radio_write(dev, B2056_SYN_PLL_VCOCAL12, 0x00);
@@ -387,7 +475,9 @@ static void b43_nphy_tx_power_fix(struct b43_wldev *dev)
if (nphy->hang_avoid)
b43_nphy_stay_in_carrier_search(dev, 1);
- if (dev->phy.rev >= 3) {
+ if (dev->phy.rev >= 7) {
+ txpi[0] = txpi[1] = 30;
+ } else if (dev->phy.rev >= 3) {
txpi[0] = 40;
txpi[1] = 40;
} else if (sprom->revision < 4) {
@@ -411,6 +501,9 @@ static void b43_nphy_tx_power_fix(struct b43_wldev *dev)
txpi[1] = 91;
}
}
+ if (dev->phy.rev < 7 &&
+ (txpi[0] < 40 || txpi[0] > 100 || txpi[1] < 40 || txpi[1] > 10))
+ txpi[0] = txpi[1] = 91;
/*
for (i = 0; i < 2; i++) {
@@ -421,15 +514,31 @@ static void b43_nphy_tx_power_fix(struct b43_wldev *dev)
for (i = 0; i < 2; i++) {
if (dev->phy.rev >= 3) {
- /* FIXME: support 5GHz */
- txgain = b43_ntab_tx_gain_rev3plus_2ghz[txpi[i]];
+ if (b43_nphy_ipa(dev)) {
+ txgain = *(b43_nphy_get_ipa_gain_table(dev) +
+ txpi[i]);
+ } else if (b43_current_band(dev->wl) ==
+ IEEE80211_BAND_5GHZ) {
+ /* FIXME: use 5GHz tables */
+ txgain =
+ b43_ntab_tx_gain_rev3plus_2ghz[txpi[i]];
+ } else {
+ if (dev->phy.rev >= 5 &&
+ sprom->fem.ghz5.extpa_gain == 3)
+ ; /* FIXME: 5GHz_txgain_HiPwrEPA */
+ txgain =
+ b43_ntab_tx_gain_rev3plus_2ghz[txpi[i]];
+ }
radio_gain = (txgain >> 16) & 0x1FFFF;
} else {
txgain = b43_ntab_tx_gain_rev0_1_2[txpi[i]];
radio_gain = (txgain >> 16) & 0x1FFF;
}
- dac_gain = (txgain >> 8) & 0x3F;
+ if (dev->phy.rev >= 7)
+ dac_gain = (txgain >> 8) & 0x7;
+ else
+ dac_gain = (txgain >> 8) & 0x3F;
bbmult = txgain & 0xFF;
if (dev->phy.rev >= 3) {
@@ -459,7 +568,8 @@ static void b43_nphy_tx_power_fix(struct b43_wldev *dev)
u32 tmp32;
u16 reg = (i == 0) ?
B43_NPHY_PAPD_EN0 : B43_NPHY_PAPD_EN1;
- tmp32 = b43_ntab_read(dev, B43_NTAB32(26 + i, txpi[i]));
+ tmp32 = b43_ntab_read(dev, B43_NTAB32(26 + i,
+ 576 + txpi[i]));
b43_phy_maskset(dev, reg, 0xE00F, (u32) tmp32 << 4);
b43_phy_set(dev, reg, 0x4);
}
@@ -1493,8 +1603,8 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
struct ssb_sprom *sprom = dev->dev->bus_sprom;
/* TX to RX */
- u8 tx2rx_events[9] = { 0x4, 0x3, 0x6, 0x5, 0x2, 0x1, 0x8, 0x1F };
- u8 tx2rx_delays[9] = { 8, 4, 2, 2, 4, 4, 6, 1 };
+ u8 tx2rx_events[8] = { 0x4, 0x3, 0x6, 0x5, 0x2, 0x1, 0x8, 0x1F };
+ u8 tx2rx_delays[8] = { 8, 4, 2, 2, 4, 4, 6, 1 };
/* RX to TX */
u8 rx2tx_events_ipa[9] = { 0x0, 0x1, 0x2, 0x8, 0x5, 0x6, 0xF, 0x3,
0x1F };
@@ -1505,6 +1615,9 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
u16 tmp16;
u32 tmp32;
+ b43_phy_write(dev, 0x23f, 0x1f8);
+ b43_phy_write(dev, 0x240, 0x1f8);
+
tmp32 = b43_ntab_read(dev, B43_NTAB32(30, 0));
tmp32 &= 0xffffff;
b43_ntab_write(dev, B43_NTAB32(30, 0), tmp32);
@@ -1520,12 +1633,13 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
b43_phy_write(dev, 0x2AE, 0x000C);
/* TX to RX */
- b43_nphy_set_rf_sequence(dev, 1, tx2rx_events, tx2rx_delays, 9);
+ b43_nphy_set_rf_sequence(dev, 1, tx2rx_events, tx2rx_delays,
+ ARRAY_SIZE(tx2rx_events));
/* RX to TX */
if (b43_nphy_ipa(dev))
- b43_nphy_set_rf_sequence(dev, 1, rx2tx_events_ipa,
- rx2tx_delays_ipa, 9);
+ b43_nphy_set_rf_sequence(dev, 0, rx2tx_events_ipa,
+ rx2tx_delays_ipa, ARRAY_SIZE(rx2tx_events_ipa));
if (nphy->hw_phyrxchain != 3 &&
nphy->hw_phyrxchain != nphy->hw_phytxchain) {
if (b43_nphy_ipa(dev)) {
@@ -1533,7 +1647,8 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
rx2tx_delays[6] = 1;
rx2tx_events[7] = 0x1F;
}
- b43_nphy_set_rf_sequence(dev, 1, rx2tx_events, rx2tx_delays, 9);
+ b43_nphy_set_rf_sequence(dev, 1, rx2tx_events, rx2tx_delays,
+ ARRAY_SIZE(rx2tx_events));
}
tmp16 = (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) ?
@@ -1547,8 +1662,8 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
b43_nphy_gain_ctrl_workarounds(dev);
- b43_ntab_write(dev, B43_NTAB32(8, 0), 2);
- b43_ntab_write(dev, B43_NTAB32(8, 16), 2);
+ b43_ntab_write(dev, B43_NTAB16(8, 0), 2);
+ b43_ntab_write(dev, B43_NTAB16(8, 16), 2);
/* TODO */
@@ -1560,6 +1675,8 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_BIAS_AUX, 0x07);
b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_LOB_BIAS, 0x88);
b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_LOB_BIAS, 0x88);
+ b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_CMFB_IDAC, 0x00);
+ b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_CMFB_IDAC, 0x00);
b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXG_CMFB_IDAC, 0x00);
b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXG_CMFB_IDAC, 0x00);
@@ -1584,18 +1701,18 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
0x70);
}
- b43_phy_write(dev, 0x224, 0x039C);
- b43_phy_write(dev, 0x225, 0x0357);
- b43_phy_write(dev, 0x226, 0x0317);
- b43_phy_write(dev, 0x227, 0x02D7);
- b43_phy_write(dev, 0x228, 0x039C);
- b43_phy_write(dev, 0x229, 0x0357);
- b43_phy_write(dev, 0x22A, 0x0317);
- b43_phy_write(dev, 0x22B, 0x02D7);
- b43_phy_write(dev, 0x22C, 0x039C);
- b43_phy_write(dev, 0x22D, 0x0357);
- b43_phy_write(dev, 0x22E, 0x0317);
- b43_phy_write(dev, 0x22F, 0x02D7);
+ b43_phy_write(dev, 0x224, 0x03eb);
+ b43_phy_write(dev, 0x225, 0x03eb);
+ b43_phy_write(dev, 0x226, 0x0341);
+ b43_phy_write(dev, 0x227, 0x0341);
+ b43_phy_write(dev, 0x228, 0x042b);
+ b43_phy_write(dev, 0x229, 0x042b);
+ b43_phy_write(dev, 0x22a, 0x0381);
+ b43_phy_write(dev, 0x22b, 0x0381);
+ b43_phy_write(dev, 0x22c, 0x042b);
+ b43_phy_write(dev, 0x22d, 0x042b);
+ b43_phy_write(dev, 0x22e, 0x0381);
+ b43_phy_write(dev, 0x22f, 0x0381);
}
static void b43_nphy_workarounds_rev1_2(struct b43_wldev *dev)
@@ -3928,6 +4045,76 @@ int b43_phy_initn(struct b43_wldev *dev)
return 0;
}
+/* http://bcm-v4.sipsolutions.net/802.11/PmuSpurAvoid */
+static void b43_nphy_pmu_spur_avoid(struct b43_wldev *dev, bool avoid)
+{
+ struct bcma_drv_cc *cc;
+ u32 pmu_ctl;
+
+ switch (dev->dev->bus_type) {
+#ifdef CONFIG_B43_BCMA
+ case B43_BUS_BCMA:
+ cc = &dev->dev->bdev->bus->drv_cc;
+ if (dev->dev->chip_id == 43224 || dev->dev->chip_id == 43225) {
+ if (avoid) {
+ bcma_chipco_pll_write(cc, 0x0, 0x11500010);
+ bcma_chipco_pll_write(cc, 0x1, 0x000C0C06);
+ bcma_chipco_pll_write(cc, 0x2, 0x0F600a08);
+ bcma_chipco_pll_write(cc, 0x3, 0x00000000);
+ bcma_chipco_pll_write(cc, 0x4, 0x2001E920);
+ bcma_chipco_pll_write(cc, 0x5, 0x88888815);
+ } else {
+ bcma_chipco_pll_write(cc, 0x0, 0x11100010);
+ bcma_chipco_pll_write(cc, 0x1, 0x000c0c06);
+ bcma_chipco_pll_write(cc, 0x2, 0x03000a08);
+ bcma_chipco_pll_write(cc, 0x3, 0x00000000);
+ bcma_chipco_pll_write(cc, 0x4, 0x200005c0);
+ bcma_chipco_pll_write(cc, 0x5, 0x88888815);
+ }
+ pmu_ctl = BCMA_CC_PMU_CTL_PLL_UPD;
+ } else if (dev->dev->chip_id == 0x4716) {
+ if (avoid) {
+ bcma_chipco_pll_write(cc, 0x0, 0x11500060);
+ bcma_chipco_pll_write(cc, 0x1, 0x080C0C06);
+ bcma_chipco_pll_write(cc, 0x2, 0x0F600000);
+ bcma_chipco_pll_write(cc, 0x3, 0x00000000);
+ bcma_chipco_pll_write(cc, 0x4, 0x2001E924);
+ bcma_chipco_pll_write(cc, 0x5, 0x88888815);
+ } else {
+ bcma_chipco_pll_write(cc, 0x0, 0x11100060);
+ bcma_chipco_pll_write(cc, 0x1, 0x080c0c06);
+ bcma_chipco_pll_write(cc, 0x2, 0x03000000);
+ bcma_chipco_pll_write(cc, 0x3, 0x00000000);
+ bcma_chipco_pll_write(cc, 0x4, 0x200005c0);
+ bcma_chipco_pll_write(cc, 0x5, 0x88888815);
+ }
+ pmu_ctl = BCMA_CC_PMU_CTL_PLL_UPD |
+ BCMA_CC_PMU_CTL_NOILPONW;
+ } else if (dev->dev->chip_id == 0x4322 ||
+ dev->dev->chip_id == 0x4340 ||
+ dev->dev->chip_id == 0x4341) {
+ bcma_chipco_pll_write(cc, 0x0, 0x11100070);
+ bcma_chipco_pll_write(cc, 0x1, 0x1014140a);
+ bcma_chipco_pll_write(cc, 0x5, 0x88888854);
+ if (avoid)
+ bcma_chipco_pll_write(cc, 0x2, 0x05201828);
+ else
+ bcma_chipco_pll_write(cc, 0x2, 0x05001828);
+ pmu_ctl = BCMA_CC_PMU_CTL_PLL_UPD;
+ } else {
+ return;
+ }
+ bcma_cc_set32(cc, BCMA_CC_PMU_CTL, pmu_ctl);
+ break;
+#endif
+#ifdef CONFIG_B43_SSB
+ case B43_BUS_SSB:
+ /* FIXME */
+ break;
+#endif
+ }
+}
+
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ChanspecSetup */
static void b43_nphy_channel_setup(struct b43_wldev *dev,
const struct b43_phy_n_sfo_cfg *e,
@@ -3935,6 +4122,7 @@ static void b43_nphy_channel_setup(struct b43_wldev *dev,
{
struct b43_phy *phy = &dev->phy;
struct b43_phy_n *nphy = dev->phy.n;
+ int ch = new_channel->hw_value;
u16 old_band_5ghz;
u32 tmp32;
@@ -3974,8 +4162,41 @@ static void b43_nphy_channel_setup(struct b43_wldev *dev,
b43_nphy_tx_lp_fbw(dev);
- if (dev->phy.rev >= 3 && 0) {
- /* TODO */
+ if (dev->phy.rev >= 3 &&
+ dev->phy.n->spur_avoid != B43_SPUR_AVOID_DISABLE) {
+ bool avoid = false;
+ if (dev->phy.n->spur_avoid == B43_SPUR_AVOID_FORCE) {
+ avoid = true;
+ } else if (!b43_channel_type_is_40mhz(phy->channel_type)) {
+ if ((ch >= 5 && ch <= 8) || ch == 13 || ch == 14)
+ avoid = true;
+ } else { /* 40MHz */
+ if (nphy->aband_spurwar_en &&
+ (ch == 38 || ch == 102 || ch == 118))
+ avoid = dev->dev->chip_id == 0x4716;
+ }
+
+ b43_nphy_pmu_spur_avoid(dev, avoid);
+
+ if (dev->dev->chip_id == 43222 || dev->dev->chip_id == 43224 ||
+ dev->dev->chip_id == 43225) {
+ b43_write16(dev, B43_MMIO_TSF_CLK_FRAC_LOW,
+ avoid ? 0x5341 : 0x8889);
+ b43_write16(dev, B43_MMIO_TSF_CLK_FRAC_HIGH, 0x8);
+ }
+
+ if (dev->phy.rev == 3 || dev->phy.rev == 4)
+ ; /* TODO: reset PLL */
+
+ if (avoid)
+ b43_phy_set(dev, B43_NPHY_BBCFG, B43_NPHY_BBCFG_RSTRX);
+ else
+ b43_phy_mask(dev, B43_NPHY_BBCFG,
+ ~B43_NPHY_BBCFG_RSTRX & 0xFFFF);
+
+ b43_nphy_reset_cca(dev);
+
+ /* wl sets useless phy_isspuravoid here */
}
b43_phy_write(dev, B43_NPHY_NDATAT_DUP40, 0x3830);
@@ -4055,10 +4276,13 @@ static void b43_nphy_op_prepare_structs(struct b43_wldev *dev)
{
struct b43_phy *phy = &dev->phy;
struct b43_phy_n *nphy = phy->n;
+ struct ssb_sprom *sprom = dev->dev->bus_sprom;
memset(nphy, 0, sizeof(*nphy));
nphy->hang_avoid = (phy->rev == 3 || phy->rev == 4);
+ nphy->spur_avoid = (phy->rev >= 3) ?
+ B43_SPUR_AVOID_AUTO : B43_SPUR_AVOID_DISABLE;
nphy->gain_boost = true; /* this way we follow wl, assume it is true */
nphy->txrx_chain = 2; /* sth different than 0 and 1 for now */
nphy->phyrxchain = 3; /* to avoid b43_nphy_set_rx_core_state like wl */
@@ -4067,6 +4291,38 @@ static void b43_nphy_op_prepare_structs(struct b43_wldev *dev)
* 0x7f == 127 and we check for 128 when restoring TX pwr ctl. */
nphy->tx_pwr_idx[0] = 128;
nphy->tx_pwr_idx[1] = 128;
+
+ /* Hardware TX power control and 5GHz power gain */
+ nphy->txpwrctrl = false;
+ nphy->pwg_gain_5ghz = false;
+ if (dev->phy.rev >= 3 ||
+ (dev->dev->board_vendor == PCI_VENDOR_ID_APPLE &&
+ (dev->dev->core_rev == 11 || dev->dev->core_rev == 12))) {
+ nphy->txpwrctrl = true;
+ nphy->pwg_gain_5ghz = true;
+ } else if (sprom->revision >= 4) {
+ if (dev->phy.rev >= 2 &&
+ (sprom->boardflags2_lo & B43_BFL2_TXPWRCTRL_EN)) {
+ nphy->txpwrctrl = true;
+#ifdef CONFIG_B43_SSB
+ if (dev->dev->bus_type == B43_BUS_SSB &&
+ dev->dev->sdev->bus->bustype == SSB_BUSTYPE_PCI) {
+ struct pci_dev *pdev =
+ dev->dev->sdev->bus->host_pci;
+ if (pdev->device == 0x4328 ||
+ pdev->device == 0x432a)
+ nphy->pwg_gain_5ghz = true;
+ }
+#endif
+ } else if (sprom->boardflags2_lo & B43_BFL2_5G_PWRGAIN) {
+ nphy->pwg_gain_5ghz = true;
+ }
+ }
+
+ if (dev->phy.rev >= 3) {
+ nphy->ipa2g_on = sprom->fem.ghz2.extpa_gain == 2;
+ nphy->ipa5g_on = sprom->fem.ghz5.extpa_gain == 2;
+ }
}
static void b43_nphy_op_free(struct b43_wldev *dev)
diff --git a/drivers/net/wireless/b43/phy_n.h b/drivers/net/wireless/b43/phy_n.h
index fbf520285bd..56ef97b5b81 100644
--- a/drivers/net/wireless/b43/phy_n.h
+++ b/drivers/net/wireless/b43/phy_n.h
@@ -716,6 +716,12 @@
struct b43_wldev;
+enum b43_nphy_spur_avoid {
+ B43_SPUR_AVOID_DISABLE,
+ B43_SPUR_AVOID_AUTO,
+ B43_SPUR_AVOID_FORCE,
+};
+
struct b43_chanspec {
u16 center_freq;
enum nl80211_channel_type channel_type;
@@ -785,6 +791,7 @@ struct b43_phy_n {
u16 mphase_txcal_bestcoeffs[11];
bool txpwrctrl;
+ bool pwg_gain_5ghz;
u8 tx_pwr_idx[2];
u16 adj_pwr_tbl[84];
u16 txcal_bbmult;
@@ -803,6 +810,7 @@ struct b43_phy_n {
u16 classifier_state;
u16 clip_state[2];
+ enum b43_nphy_spur_avoid spur_avoid;
bool aband_spurwar_en;
bool gband_spurwar_en;
diff --git a/drivers/net/wireless/b43/radio_2056.c b/drivers/net/wireless/b43/radio_2056.c
index a01f776ca4d..ce037fb6789 100644
--- a/drivers/net/wireless/b43/radio_2056.c
+++ b/drivers/net/wireless/b43/radio_2056.c
@@ -1572,14 +1572,14 @@ static const struct b2056_inittab_entry b2056_inittab_rev6_syn[] = {
[B2056_SYN_PLL_XTAL5] = { .ghz5 = 0x0077, .ghz2 = 0x0077, NOUPLOAD, },
[B2056_SYN_PLL_XTAL6] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
[B2056_SYN_PLL_REFDIV] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
- [B2056_SYN_PLL_PFD] = { .ghz5 = 0x0004, .ghz2 = 0x0004, NOUPLOAD, },
+ [B2056_SYN_PLL_PFD] = { .ghz5 = 0x0006, .ghz2 = 0x0006, UPLOAD, },
[B2056_SYN_PLL_CP1] = { .ghz5 = 0x000f, .ghz2 = 0x000f, NOUPLOAD, },
- [B2056_SYN_PLL_CP2] = { .ghz5 = 0x0030, .ghz2 = 0x0030, NOUPLOAD, },
+ [B2056_SYN_PLL_CP2] = { .ghz5 = 0x003f, .ghz2 = 0x003f, UPLOAD, },
[B2056_SYN_PLL_CP3] = { .ghz5 = 0x0032, .ghz2 = 0x0032, NOUPLOAD, },
- [B2056_SYN_PLL_LOOPFILTER1] = { .ghz5 = 0x000d, .ghz2 = 0x000d, NOUPLOAD, },
- [B2056_SYN_PLL_LOOPFILTER2] = { .ghz5 = 0x000d, .ghz2 = 0x000d, NOUPLOAD, },
+ [B2056_SYN_PLL_LOOPFILTER1] = { .ghz5 = 0x0006, .ghz2 = 0x0006, UPLOAD, },
+ [B2056_SYN_PLL_LOOPFILTER2] = { .ghz5 = 0x0006, .ghz2 = 0x0006, UPLOAD, },
[B2056_SYN_PLL_LOOPFILTER3] = { .ghz5 = 0x0004, .ghz2 = 0x0004, NOUPLOAD, },
- [B2056_SYN_PLL_LOOPFILTER4] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
+ [B2056_SYN_PLL_LOOPFILTER4] = { .ghz5 = 0x002b, .ghz2 = 0x002b, UPLOAD, },
[B2056_SYN_PLL_LOOPFILTER5] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
[B2056_SYN_PLL_MMD1] = { .ghz5 = 0x001c, .ghz2 = 0x001c, NOUPLOAD, },
[B2056_SYN_PLL_MMD2] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
@@ -9055,6 +9055,21 @@ void b2056_upload_inittabs(struct b43_wldev *dev,
B2056_RX1, pts->rx, pts->rx_length);
}
+void b2056_upload_syn_pll_cp2(struct b43_wldev *dev, bool ghz5)
+{
+ struct b2056_inittabs_pts *pts;
+ const struct b2056_inittab_entry *e;
+
+ if (dev->phy.rev >= ARRAY_SIZE(b2056_inittabs)) {
+ B43_WARN_ON(1);
+ return;
+ }
+ pts = &b2056_inittabs[dev->phy.rev];
+ e = &pts->syn[B2056_SYN_PLL_CP2];
+
+ b43_radio_write(dev, B2056_SYN_PLL_CP2, ghz5 ? e->ghz5 : e->ghz2);
+}
+
const struct b43_nphy_channeltab_entry_rev3 *
b43_nphy_get_chantabent_rev3(struct b43_wldev *dev, u16 freq)
{
diff --git a/drivers/net/wireless/b43/radio_2056.h b/drivers/net/wireless/b43/radio_2056.h
index a7159d8578b..5b86673459f 100644
--- a/drivers/net/wireless/b43/radio_2056.h
+++ b/drivers/net/wireless/b43/radio_2056.h
@@ -1090,6 +1090,7 @@ struct b43_nphy_channeltab_entry_rev3 {
void b2056_upload_inittabs(struct b43_wldev *dev,
bool ghz5, bool ignore_uploadflag);
+void b2056_upload_syn_pll_cp2(struct b43_wldev *dev, bool ghz5);
/* Get the NPHY Channel Switch Table entry for a channel.
* Returns NULL on failure to find an entry. */
diff --git a/drivers/net/wireless/b43/tables_nphy.c b/drivers/net/wireless/b43/tables_nphy.c
index 7b326f2efdc..3252560e9fa 100644
--- a/drivers/net/wireless/b43/tables_nphy.c
+++ b/drivers/net/wireless/b43/tables_nphy.c
@@ -2171,6 +2171,48 @@ static const u16 b43_ntab_loftlt1_r3[] = {
0x0000, 0x0000,
};
+/* volatile tables, PHY revision >= 3 */
+
+/* indexed by antswctl2g */
+static const u16 b43_ntab_antswctl2g_r3[4][32] = {
+ {
+ 0x0082, 0x0082, 0x0211, 0x0222, 0x0328,
+ 0x0000, 0x0000, 0x0000, 0x0144, 0x0000,
+ 0x0000, 0x0000, 0x0188, 0x0000, 0x0000,
+ 0x0000, 0x0082, 0x0082, 0x0211, 0x0222,
+ 0x0328, 0x0000, 0x0000, 0x0000, 0x0144,
+ 0x0000, 0x0000, 0x0000, 0x0188, 0x0000,
+ 0x0000, 0x0000,
+ },
+ {
+ 0x0022, 0x0022, 0x0011, 0x0022, 0x0022,
+ 0x0000, 0x0000, 0x0000, 0x0011, 0x0000,
+ 0x0000, 0x0000, 0x0022, 0x0000, 0x0000,
+ 0x0000, 0x0022, 0x0022, 0x0011, 0x0022,
+ 0x0022, 0x0000, 0x0000, 0x0000, 0x0011,
+ 0x0000, 0x0000, 0x0000, 0x0022, 0x0000,
+ 0x0000, 0x0000,
+ },
+ {
+ 0x0088, 0x0088, 0x0044, 0x0088, 0x0088,
+ 0x0000, 0x0000, 0x0000, 0x0044, 0x0000,
+ 0x0000, 0x0000, 0x0088, 0x0000, 0x0000,
+ 0x0000, 0x0088, 0x0088, 0x0044, 0x0088,
+ 0x0088, 0x0000, 0x0000, 0x0000, 0x0044,
+ 0x0000, 0x0000, 0x0000, 0x0088, 0x0000,
+ 0x0000, 0x0000,
+ },
+ {
+ 0x0022, 0x0022, 0x0011, 0x0022, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0011, 0x0000,
+ 0x0000, 0x0000, 0x0022, 0x0000, 0x0000,
+ 0x03cc, 0x0022, 0x0022, 0x0011, 0x0022,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0011,
+ 0x0000, 0x0000, 0x0000, 0x0022, 0x0000,
+ 0x0000, 0x03cc,
+ }
+};
+
/* TX gain tables */
const u32 b43_ntab_tx_gain_rev0_1_2[] = {
0x03cc2b44, 0x03cc2b42, 0x03cc2a44, 0x03cc2a42,
@@ -2652,7 +2694,7 @@ const u16 tbl_tx_iqlo_cal_cmds_fullcal_nphyrev3[] = {
const s16 tbl_tx_filter_coef_rev4[7][15] = {
{ -377, 137, -407, 208, -1527,
956, 93, 186, 93, 230,
- -44, 230, 20, -191, 201 },
+ -44, 230, 201, -191, 201 },
{ -77, 20, -98, 49, -93,
60, 56, 111, 56, 26,
-5, 26, 34, -32, 34 },
@@ -2838,9 +2880,8 @@ u32 b43_ntab_read(struct b43_wldev *dev, u32 offset)
break;
case B43_NTAB_32BIT:
b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset);
- value = b43_phy_read(dev, B43_NPHY_TABLE_DATAHI);
- value <<= 16;
- value |= b43_phy_read(dev, B43_NPHY_TABLE_DATALO);
+ value = b43_phy_read(dev, B43_NPHY_TABLE_DATALO);
+ value |= b43_phy_read(dev, B43_NPHY_TABLE_DATAHI) << 16;
break;
default:
B43_WARN_ON(1);
@@ -2864,6 +2905,12 @@ void b43_ntab_read_bulk(struct b43_wldev *dev, u32 offset,
b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset);
for (i = 0; i < nr_elements; i++) {
+ /* Auto increment broken + caching issue on BCM43224? */
+ if (dev->dev->chip_id == 43224 && dev->dev->chip_rev == 1) {
+ b43_phy_read(dev, B43_NPHY_TABLE_DATALO);
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset + i);
+ }
+
switch (type) {
case B43_NTAB_8BIT:
*data = b43_phy_read(dev, B43_NPHY_TABLE_DATALO) & 0xFF;
@@ -2874,9 +2921,10 @@ void b43_ntab_read_bulk(struct b43_wldev *dev, u32 offset,
data += 2;
break;
case B43_NTAB_32BIT:
- *((u32 *)data) = b43_phy_read(dev, B43_NPHY_TABLE_DATAHI);
- *((u32 *)data) <<= 16;
- *((u32 *)data) |= b43_phy_read(dev, B43_NPHY_TABLE_DATALO);
+ *((u32 *)data) =
+ b43_phy_read(dev, B43_NPHY_TABLE_DATALO);
+ *((u32 *)data) |=
+ b43_phy_read(dev, B43_NPHY_TABLE_DATAHI) << 16;
data += 4;
break;
default:
@@ -2932,6 +2980,13 @@ void b43_ntab_write_bulk(struct b43_wldev *dev, u32 offset,
b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset);
for (i = 0; i < nr_elements; i++) {
+ /* Auto increment broken + caching issue on BCM43224? */
+ if ((offset >> 10) == 9 && dev->dev->chip_id == 43224 &&
+ dev->dev->chip_rev == 1) {
+ b43_phy_read(dev, B43_NPHY_TABLE_DATALO);
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset + i);
+ }
+
switch (type) {
case B43_NTAB_8BIT:
value = *data;
@@ -2999,6 +3054,8 @@ void b43_nphy_rev0_1_2_tables_init(struct b43_wldev *dev)
} while (0)
void b43_nphy_rev3plus_tables_init(struct b43_wldev *dev)
{
+ struct ssb_sprom *sprom = dev->dev->bus_sprom;
+
/* Static tables */
ntab_upload_r3(dev, B43_NTAB_FRAMESTRUCT_R3, b43_ntab_framestruct_r3);
ntab_upload_r3(dev, B43_NTAB_PILOT_R3, b43_ntab_pilot_r3);
@@ -3029,7 +3086,11 @@ void b43_nphy_rev3plus_tables_init(struct b43_wldev *dev)
ntab_upload_r3(dev, B43_NTAB_C1_LOFEEDTH_R3, b43_ntab_loftlt1_r3);
/* Volatile tables */
- /* TODO */
+ if (sprom->fem.ghz2.antswlut < ARRAY_SIZE(b43_ntab_antswctl2g_r3))
+ ntab_upload_r3(dev, B43_NTAB_ANT_SW_CTL_R3,
+ b43_ntab_antswctl2g_r3[sprom->fem.ghz2.antswlut]);
+ else
+ B43_WARN_ON(1);
}
struct nphy_gain_ctl_workaround_entry *b43_nphy_get_gain_ctl_workaround_ent(
diff --git a/drivers/net/wireless/b43/tables_nphy.h b/drivers/net/wireless/b43/tables_nphy.h
index a81696bff0e..97038c48193 100644
--- a/drivers/net/wireless/b43/tables_nphy.h
+++ b/drivers/net/wireless/b43/tables_nphy.h
@@ -126,26 +126,29 @@ struct nphy_gain_ctl_workaround_entry *b43_nphy_get_gain_ctl_workaround_ent(
#define B43_NTAB_C1_LOFEEDTH B43_NTAB16(0x1B, 0x1C0) /* Local Oscillator Feed Through Lookup Table Core 1 */
#define B43_NTAB_C1_LOFEEDTH_SIZE 128
+/* Volatile N-PHY tables, PHY revision >= 3 */
+#define B43_NTAB_ANT_SW_CTL_R3 B43_NTAB16( 9, 0) /* antenna software control */
+
/* Static N-PHY tables, PHY revision >= 3 */
-#define B43_NTAB_FRAMESTRUCT_R3 B43_NTAB32(10, 000) /* frame struct */
-#define B43_NTAB_PILOT_R3 B43_NTAB16(11, 000) /* pilot */
-#define B43_NTAB_TMAP_R3 B43_NTAB32(12, 000) /* TM AP */
-#define B43_NTAB_INTLEVEL_R3 B43_NTAB32(13, 000) /* INT LV */
-#define B43_NTAB_TDTRN_R3 B43_NTAB32(14, 000) /* TD TRN */
-#define B43_NTAB_NOISEVAR0_R3 B43_NTAB32(16, 000) /* noise variance 0 */
+#define B43_NTAB_FRAMESTRUCT_R3 B43_NTAB32(10, 0) /* frame struct */
+#define B43_NTAB_PILOT_R3 B43_NTAB16(11, 0) /* pilot */
+#define B43_NTAB_TMAP_R3 B43_NTAB32(12, 0) /* TM AP */
+#define B43_NTAB_INTLEVEL_R3 B43_NTAB32(13, 0) /* INT LV */
+#define B43_NTAB_TDTRN_R3 B43_NTAB32(14, 0) /* TD TRN */
+#define B43_NTAB_NOISEVAR0_R3 B43_NTAB32(16, 0) /* noise variance 0 */
#define B43_NTAB_NOISEVAR1_R3 B43_NTAB32(16, 128) /* noise variance 1 */
-#define B43_NTAB_MCS_R3 B43_NTAB16(18, 000) /* MCS */
+#define B43_NTAB_MCS_R3 B43_NTAB16(18, 0) /* MCS */
#define B43_NTAB_TDI20A0_R3 B43_NTAB32(19, 128) /* TDI 20/0 */
#define B43_NTAB_TDI20A1_R3 B43_NTAB32(19, 256) /* TDI 20/1 */
#define B43_NTAB_TDI40A0_R3 B43_NTAB32(19, 640) /* TDI 40/0 */
#define B43_NTAB_TDI40A1_R3 B43_NTAB32(19, 768) /* TDI 40/1 */
-#define B43_NTAB_PILOTLT_R3 B43_NTAB32(20, 000) /* PLT lookup */
-#define B43_NTAB_CHANEST_R3 B43_NTAB32(22, 000) /* channel estimate */
-#define B43_NTAB_FRAMELT_R3 B43_NTAB8 (24, 000) /* frame lookup */
-#define B43_NTAB_C0_ESTPLT_R3 B43_NTAB8 (26, 000) /* estimated power lookup 0 */
-#define B43_NTAB_C1_ESTPLT_R3 B43_NTAB8 (27, 000) /* estimated power lookup 1 */
-#define B43_NTAB_C0_ADJPLT_R3 B43_NTAB8 (26, 064) /* adjusted power lookup 0 */
-#define B43_NTAB_C1_ADJPLT_R3 B43_NTAB8 (27, 064) /* adjusted power lookup 1 */
+#define B43_NTAB_PILOTLT_R3 B43_NTAB32(20, 0) /* PLT lookup */
+#define B43_NTAB_CHANEST_R3 B43_NTAB32(22, 0) /* channel estimate */
+#define B43_NTAB_FRAMELT_R3 B43_NTAB8(24, 0) /* frame lookup */
+#define B43_NTAB_C0_ESTPLT_R3 B43_NTAB8(26, 0) /* estimated power lookup 0 */
+#define B43_NTAB_C1_ESTPLT_R3 B43_NTAB8(27, 0) /* estimated power lookup 1 */
+#define B43_NTAB_C0_ADJPLT_R3 B43_NTAB8(26, 64) /* adjusted power lookup 0 */
+#define B43_NTAB_C1_ADJPLT_R3 B43_NTAB8(27, 64) /* adjusted power lookup 1 */
#define B43_NTAB_C0_GAINCTL_R3 B43_NTAB32(26, 192) /* gain control lookup 0 */
#define B43_NTAB_C1_GAINCTL_R3 B43_NTAB32(27, 192) /* gain control lookup 1 */
#define B43_NTAB_C0_IQLT_R3 B43_NTAB32(26, 320) /* I/Q lookup 0 */
diff --git a/drivers/net/wireless/b43legacy/b43legacy.h b/drivers/net/wireless/b43legacy/b43legacy.h
index 12b51825158..1d4fc9db7f5 100644
--- a/drivers/net/wireless/b43legacy/b43legacy.h
+++ b/drivers/net/wireless/b43legacy/b43legacy.h
@@ -810,15 +810,15 @@ struct b43legacy_lopair *b43legacy_get_lopair(struct b43legacy_phy *phy,
/* Message printing */
-void b43legacyinfo(struct b43legacy_wl *wl, const char *fmt, ...)
- __attribute__((format(printf, 2, 3)));
-void b43legacyerr(struct b43legacy_wl *wl, const char *fmt, ...)
- __attribute__((format(printf, 2, 3)));
-void b43legacywarn(struct b43legacy_wl *wl, const char *fmt, ...)
- __attribute__((format(printf, 2, 3)));
+__printf(2, 3)
+void b43legacyinfo(struct b43legacy_wl *wl, const char *fmt, ...);
+__printf(2, 3)
+void b43legacyerr(struct b43legacy_wl *wl, const char *fmt, ...);
+__printf(2, 3)
+void b43legacywarn(struct b43legacy_wl *wl, const char *fmt, ...);
#if B43legacy_DEBUG
-void b43legacydbg(struct b43legacy_wl *wl, const char *fmt, ...)
- __attribute__((format(printf, 2, 3)));
+__printf(2, 3)
+void b43legacydbg(struct b43legacy_wl *wl, const char *fmt, ...);
#else /* DEBUG */
# define b43legacydbg(wl, fmt...) do { /* nothing */ } while (0)
#endif /* DEBUG */
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index a3b72cd72c6..20f02437af8 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -31,7 +31,7 @@
#include <linux/delay.h>
#include <linux/init.h>
-#include <linux/moduleparam.h>
+#include <linux/module.h>
#include <linux/if_arp.h>
#include <linux/etherdevice.h>
#include <linux/firmware.h>
diff --git a/drivers/net/wireless/brcm80211/Kconfig b/drivers/net/wireless/brcm80211/Kconfig
index 2069fc8f7ad..8f54c2eb682 100644
--- a/drivers/net/wireless/brcm80211/Kconfig
+++ b/drivers/net/wireless/brcm80211/Kconfig
@@ -3,9 +3,8 @@ config BRCMUTIL
config BRCMSMAC
tristate "Broadcom IEEE802.11n PCIe SoftMAC WLAN driver"
- depends on PCI
depends on MAC80211
- depends on BCMA=n
+ depends on BCMA
select BRCMUTIL
select FW_LOADER
select CRC_CCITT
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmchip.h b/drivers/net/wireless/brcm80211/brcmfmac/bcmchip.h
deleted file mode 100644
index cecb5e5f412..00000000000
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmchip.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Copyright (c) 2011 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _bcmchip_h_
-#define _bcmchip_h_
-
-/* bcm4329 */
-/* firmware name */
-#define BCM4329_FW_NAME "brcm/bcm4329-fullmac-4.bin"
-#define BCM4329_NV_NAME "brcm/bcm4329-fullmac-4.txt"
-
-#endif /* _bcmchip_h_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
index bff9dcd6fad..6c85d668c9d 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
@@ -17,6 +17,7 @@
#include <linux/types.h>
#include <linux/netdevice.h>
+#include <linux/export.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/sched.h>
@@ -221,19 +222,12 @@ bool brcmf_sdcard_regfail(struct brcmf_sdio_dev *sdiodev)
return sdiodev->regfail;
}
-int
-brcmf_sdcard_recv_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
- uint flags,
- u8 *buf, uint nbytes, struct sk_buff *pkt)
+static int brcmf_sdcard_recv_prepare(struct brcmf_sdio_dev *sdiodev, uint fn,
+ uint flags, uint width, u32 *addr)
{
- int status;
- uint incr_fix;
- uint width;
- uint bar0 = addr & ~SBSDIO_SB_OFT_ADDR_MASK;
+ uint bar0 = *addr & ~SBSDIO_SB_OFT_ADDR_MASK;
int err = 0;
- brcmf_dbg(INFO, "fun = %d, addr = 0x%x, size = %d\n", fn, addr, nbytes);
-
/* Async not implemented yet */
if (flags & SDIO_REQ_ASYNC)
return -ENOTSUPP;
@@ -246,29 +240,114 @@ brcmf_sdcard_recv_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
sdiodev->sbwad = bar0;
}
- addr &= SBSDIO_SB_OFT_ADDR_MASK;
+ *addr &= SBSDIO_SB_OFT_ADDR_MASK;
+
+ if (width == 4)
+ *addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+
+ return 0;
+}
+
+int
+brcmf_sdcard_recv_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+ uint flags, u8 *buf, uint nbytes)
+{
+ struct sk_buff *mypkt;
+ int err;
+
+ mypkt = brcmu_pkt_buf_get_skb(nbytes);
+ if (!mypkt) {
+ brcmf_dbg(ERROR, "brcmu_pkt_buf_get_skb failed: len %d\n",
+ nbytes);
+ return -EIO;
+ }
+
+ err = brcmf_sdcard_recv_pkt(sdiodev, addr, fn, flags, mypkt);
+ if (!err)
+ memcpy(buf, mypkt->data, nbytes);
+
+ brcmu_pkt_buf_free_skb(mypkt);
+ return err;
+}
+
+int
+brcmf_sdcard_recv_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+ uint flags, struct sk_buff *pkt)
+{
+ uint incr_fix;
+ uint width;
+ int err = 0;
+
+ brcmf_dbg(INFO, "fun = %d, addr = 0x%x, size = %d\n",
+ fn, addr, pkt->len);
+
+ width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
+ err = brcmf_sdcard_recv_prepare(sdiodev, fn, flags, width, &addr);
+ if (err)
+ return err;
incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC;
+ err = brcmf_sdioh_request_buffer(sdiodev, incr_fix, SDIOH_READ,
+ fn, addr, pkt);
+
+ return err;
+}
+
+int brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+ uint flags, struct sk_buff_head *pktq)
+{
+ uint incr_fix;
+ uint width;
+ int err = 0;
+
+ brcmf_dbg(INFO, "fun = %d, addr = 0x%x, size = %d\n",
+ fn, addr, pktq->qlen);
+
width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
- if (width == 4)
- addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+ err = brcmf_sdcard_recv_prepare(sdiodev, fn, flags, width, &addr);
+ if (err)
+ return err;
- status = brcmf_sdioh_request_buffer(sdiodev, incr_fix, SDIOH_READ,
- fn, addr, width, nbytes, buf, pkt);
+ incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC;
+ err = brcmf_sdioh_request_chain(sdiodev, incr_fix, SDIOH_READ, fn, addr,
+ pktq);
- return status;
+ return err;
}
int
brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
- uint flags, u8 *buf, uint nbytes, struct sk_buff *pkt)
+ uint flags, u8 *buf, uint nbytes)
+{
+ struct sk_buff *mypkt;
+ int err;
+
+ mypkt = brcmu_pkt_buf_get_skb(nbytes);
+ if (!mypkt) {
+ brcmf_dbg(ERROR, "brcmu_pkt_buf_get_skb failed: len %d\n",
+ nbytes);
+ return -EIO;
+ }
+
+ memcpy(mypkt->data, buf, nbytes);
+ err = brcmf_sdcard_send_pkt(sdiodev, addr, fn, flags, mypkt);
+
+ brcmu_pkt_buf_free_skb(mypkt);
+ return err;
+
+}
+
+int
+brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+ uint flags, struct sk_buff *pkt)
{
uint incr_fix;
uint width;
uint bar0 = addr & ~SBSDIO_SB_OFT_ADDR_MASK;
int err = 0;
- brcmf_dbg(INFO, "fun = %d, addr = 0x%x, size = %d\n", fn, addr, nbytes);
+ brcmf_dbg(INFO, "fun = %d, addr = 0x%x, size = %d\n",
+ fn, addr, pkt->len);
/* Async not implemented yet */
if (flags & SDIO_REQ_ASYNC)
@@ -290,18 +369,39 @@ brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
return brcmf_sdioh_request_buffer(sdiodev, incr_fix, SDIOH_WRITE, fn,
- addr, width, nbytes, buf, pkt);
+ addr, pkt);
}
int brcmf_sdcard_rwdata(struct brcmf_sdio_dev *sdiodev, uint rw, u32 addr,
u8 *buf, uint nbytes)
{
+ struct sk_buff *mypkt;
+ bool write = rw ? SDIOH_WRITE : SDIOH_READ;
+ int err;
+
addr &= SBSDIO_SB_OFT_ADDR_MASK;
addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
- return brcmf_sdioh_request_buffer(sdiodev, SDIOH_DATA_INC,
- (rw ? SDIOH_WRITE : SDIOH_READ), SDIO_FUNC_1,
- addr, 4, nbytes, buf, NULL);
+ mypkt = brcmu_pkt_buf_get_skb(nbytes);
+ if (!mypkt) {
+ brcmf_dbg(ERROR, "brcmu_pkt_buf_get_skb failed: len %d\n",
+ nbytes);
+ return -EIO;
+ }
+
+ /* For a write, copy the buffer data into the packet. */
+ if (write)
+ memcpy(mypkt->data, buf, nbytes);
+
+ err = brcmf_sdioh_request_buffer(sdiodev, SDIOH_DATA_INC, write,
+ SDIO_FUNC_1, addr, mypkt);
+
+ /* For a read, copy the packet data back to the buffer. */
+ if (!err && !write)
+ memcpy(buf, mypkt->data, nbytes);
+
+ brcmu_pkt_buf_free_skb(mypkt);
+ return err;
}
int brcmf_sdcard_abort(struct brcmf_sdio_dev *sdiodev, uint fn)
@@ -332,7 +432,7 @@ int brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
sdiodev->sbwad = SI_ENUM_BASE;
/* try to attach to the target device */
- sdiodev->bus = brcmf_sdbrcm_probe(0, 0, 0, 0, regs, sdiodev);
+ sdiodev->bus = brcmf_sdbrcm_probe(regs, sdiodev);
if (!sdiodev->bus) {
brcmf_dbg(ERROR, "device attach failed\n");
ret = -ENODEV;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
index bbaeb2d5c93..b895f198a95 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
@@ -40,6 +40,7 @@
#define DMA_ALIGN_MASK 0x03
#define SDIO_DEVICE_ID_BROADCOM_4329 0x4329
+#define SDIO_DEVICE_ID_BROADCOM_4330 0x4330
#define SDIO_FUNC1_BLOCKSIZE 64
#define SDIO_FUNC2_BLOCKSIZE 512
@@ -47,6 +48,7 @@
/* devices we support, null terminated */
static const struct sdio_device_id brcmf_sdmmc_ids[] = {
{SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4329)},
+ {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4330)},
{ /* end: all zeroes */ },
};
MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids);
@@ -204,62 +206,75 @@ int brcmf_sdioh_request_word(struct brcmf_sdio_dev *sdiodev,
return err_ret;
}
+/* precondition: host controller is claimed */
static int
-brcmf_sdioh_request_packet(struct brcmf_sdio_dev *sdiodev, uint fix_inc,
- uint write, uint func, uint addr,
- struct sk_buff *pkt)
+brcmf_sdioh_request_data(struct brcmf_sdio_dev *sdiodev, uint write, bool fifo,
+ uint func, uint addr, struct sk_buff *pkt, uint pktlen)
+{
+ int err_ret = 0;
+
+ if ((write) && (!fifo)) {
+ err_ret = sdio_memcpy_toio(sdiodev->func[func], addr,
+ ((u8 *) (pkt->data)), pktlen);
+ } else if (write) {
+ err_ret = sdio_memcpy_toio(sdiodev->func[func], addr,
+ ((u8 *) (pkt->data)), pktlen);
+ } else if (fifo) {
+ err_ret = sdio_readsb(sdiodev->func[func],
+ ((u8 *) (pkt->data)), addr, pktlen);
+ } else {
+ err_ret = sdio_memcpy_fromio(sdiodev->func[func],
+ ((u8 *) (pkt->data)),
+ addr, pktlen);
+ }
+
+ return err_ret;
+}
+
+/*
+ * This function takes a queue of packets. The packets on the queue
+ * are assumed to be properly aligned by the caller.
+ */
+int
+brcmf_sdioh_request_chain(struct brcmf_sdio_dev *sdiodev, uint fix_inc,
+ uint write, uint func, uint addr,
+ struct sk_buff_head *pktq)
{
bool fifo = (fix_inc == SDIOH_DATA_FIX);
u32 SGCount = 0;
int err_ret = 0;
- struct sk_buff *pnext;
+ struct sk_buff *pkt;
brcmf_dbg(TRACE, "Enter\n");
- brcmf_pm_resume_wait(sdiodev, &sdiodev->request_packet_wait);
+ brcmf_pm_resume_wait(sdiodev, &sdiodev->request_chain_wait);
if (brcmf_pm_resume_error(sdiodev))
return -EIO;
/* Claim host controller */
sdio_claim_host(sdiodev->func[func]);
- for (pnext = pkt; pnext; pnext = pnext->next) {
- uint pkt_len = pnext->len;
+
+ skb_queue_walk(pktq, pkt) {
+ uint pkt_len = pkt->len;
pkt_len += 3;
pkt_len &= 0xFFFFFFFC;
- if ((write) && (!fifo)) {
- err_ret = sdio_memcpy_toio(sdiodev->func[func], addr,
- ((u8 *) (pnext->data)),
- pkt_len);
- } else if (write) {
- err_ret = sdio_memcpy_toio(sdiodev->func[func], addr,
- ((u8 *) (pnext->data)),
- pkt_len);
- } else if (fifo) {
- err_ret = sdio_readsb(sdiodev->func[func],
- ((u8 *) (pnext->data)),
- addr, pkt_len);
- } else {
- err_ret = sdio_memcpy_fromio(sdiodev->func[func],
- ((u8 *) (pnext->data)),
- addr, pkt_len);
- }
-
+ err_ret = brcmf_sdioh_request_data(sdiodev, write, fifo, func,
+ addr, pkt, pkt_len);
if (err_ret) {
brcmf_dbg(ERROR, "%s FAILED %p[%d], addr=0x%05x, pkt_len=%d, ERR=0x%08x\n",
- write ? "TX" : "RX", pnext, SGCount, addr,
+ write ? "TX" : "RX", pkt, SGCount, addr,
pkt_len, err_ret);
} else {
brcmf_dbg(TRACE, "%s xfr'd %p[%d], addr=0x%05x, len=%d\n",
- write ? "TX" : "RX", pnext, SGCount, addr,
+ write ? "TX" : "RX", pkt, SGCount, addr,
pkt_len);
}
-
if (!fifo)
addr += pkt_len;
- SGCount++;
+ SGCount++;
}
/* Release host controller */
@@ -270,91 +285,45 @@ brcmf_sdioh_request_packet(struct brcmf_sdio_dev *sdiodev, uint fix_inc,
}
/*
- * This function takes a buffer or packet, and fixes everything up
- * so that in the end, a DMA-able packet is created.
- *
- * A buffer does not have an associated packet pointer,
- * and may or may not be aligned.
- * A packet may consist of a single packet, or a packet chain.
- * If it is a packet chain, then all the packets in the chain
- * must be properly aligned.
- *
- * If the packet data is not aligned, then there may only be
- * one packet, and in this case, it is copied to a new
- * aligned packet.
- *
+ * This function takes a single DMA-able packet.
*/
int brcmf_sdioh_request_buffer(struct brcmf_sdio_dev *sdiodev,
uint fix_inc, uint write, uint func, uint addr,
- uint reg_width, uint buflen_u, u8 *buffer,
struct sk_buff *pkt)
{
- int Status;
- struct sk_buff *mypkt = NULL;
+ int status;
+ uint pkt_len = pkt->len;
+ bool fifo = (fix_inc == SDIOH_DATA_FIX);
brcmf_dbg(TRACE, "Enter\n");
+ if (pkt == NULL)
+ return -EINVAL;
+
brcmf_pm_resume_wait(sdiodev, &sdiodev->request_buffer_wait);
if (brcmf_pm_resume_error(sdiodev))
return -EIO;
- /* Case 1: we don't have a packet. */
- if (pkt == NULL) {
- brcmf_dbg(DATA, "Creating new %s Packet, len=%d\n",
- write ? "TX" : "RX", buflen_u);
- mypkt = brcmu_pkt_buf_get_skb(buflen_u);
- if (!mypkt) {
- brcmf_dbg(ERROR, "brcmu_pkt_buf_get_skb failed: len %d\n",
- buflen_u);
- return -EIO;
- }
-
- /* For a write, copy the buffer data into the packet. */
- if (write)
- memcpy(mypkt->data, buffer, buflen_u);
-
- Status = brcmf_sdioh_request_packet(sdiodev, fix_inc, write,
- func, addr, mypkt);
-
- /* For a read, copy the packet data back to the buffer. */
- if (!write)
- memcpy(buffer, mypkt->data, buflen_u);
-
- brcmu_pkt_buf_free_skb(mypkt);
- } else if (((ulong) (pkt->data) & DMA_ALIGN_MASK) != 0) {
- /*
- * Case 2: We have a packet, but it is unaligned.
- * In this case, we cannot have a chain (pkt->next == NULL)
- */
- brcmf_dbg(DATA, "Creating aligned %s Packet, len=%d\n",
- write ? "TX" : "RX", pkt->len);
- mypkt = brcmu_pkt_buf_get_skb(pkt->len);
- if (!mypkt) {
- brcmf_dbg(ERROR, "brcmu_pkt_buf_get_skb failed: len %d\n",
- pkt->len);
- return -EIO;
- }
- /* For a write, copy the buffer data into the packet. */
- if (write)
- memcpy(mypkt->data, pkt->data, pkt->len);
-
- Status = brcmf_sdioh_request_packet(sdiodev, fix_inc, write,
- func, addr, mypkt);
+ /* Claim host controller */
+ sdio_claim_host(sdiodev->func[func]);
- /* For a read, copy the packet data back to the buffer. */
- if (!write)
- memcpy(pkt->data, mypkt->data, mypkt->len);
+ pkt_len += 3;
+ pkt_len &= (uint)~3;
- brcmu_pkt_buf_free_skb(mypkt);
- } else { /* case 3: We have a packet and
- it is aligned. */
- brcmf_dbg(DATA, "Aligned %s Packet, direct DMA\n",
- write ? "Tx" : "Rx");
- Status = brcmf_sdioh_request_packet(sdiodev, fix_inc, write,
- func, addr, pkt);
+ status = brcmf_sdioh_request_data(sdiodev, write, fifo, func,
+ addr, pkt, pkt_len);
+ if (status) {
+ brcmf_dbg(ERROR, "%s FAILED %p, addr=0x%05x, pkt_len=%d, ERR=0x%08x\n",
+ write ? "TX" : "RX", pkt, addr, pkt_len, status);
+ } else {
+ brcmf_dbg(TRACE, "%s xfr'd %p, addr=0x%05x, len=%d\n",
+ write ? "TX" : "RX", pkt, addr, pkt_len);
}
- return Status;
+ /* Release host controller */
+ sdio_release_host(sdiodev->func[func]);
+
+ return status;
}
/* Read client card reg */
@@ -494,6 +463,7 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
{
int ret = 0;
struct brcmf_sdio_dev *sdiodev;
+ struct brcmf_bus *bus_if;
brcmf_dbg(TRACE, "Enter\n");
brcmf_dbg(TRACE, "func->class=%x\n", func->class);
brcmf_dbg(TRACE, "sdio_vendor: 0x%04x\n", func->vendor);
@@ -505,17 +475,25 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
brcmf_dbg(ERROR, "card private drvdata occupied\n");
return -ENXIO;
}
+ bus_if = kzalloc(sizeof(struct brcmf_bus), GFP_KERNEL);
+ if (!bus_if)
+ return -ENOMEM;
sdiodev = kzalloc(sizeof(struct brcmf_sdio_dev), GFP_KERNEL);
- if (!sdiodev)
+ if (!sdiodev) {
+ kfree(bus_if);
return -ENOMEM;
+ }
sdiodev->func[0] = func->card->sdio_func[0];
sdiodev->func[1] = func;
+ sdiodev->bus_if = bus_if;
+ bus_if->bus_priv = sdiodev;
+ bus_if->type = SDIO_BUS;
dev_set_drvdata(&func->card->dev, sdiodev);
atomic_set(&sdiodev->suspend, false);
init_waitqueue_head(&sdiodev->request_byte_wait);
init_waitqueue_head(&sdiodev->request_word_wait);
- init_waitqueue_head(&sdiodev->request_packet_wait);
+ init_waitqueue_head(&sdiodev->request_chain_wait);
init_waitqueue_head(&sdiodev->request_buffer_wait);
}
@@ -525,6 +503,10 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
return -ENODEV;
sdiodev->func[2] = func;
+ bus_if = sdiodev->bus_if;
+ sdiodev->dev = &func->dev;
+ dev_set_drvdata(&func->dev, bus_if);
+
brcmf_dbg(TRACE, "F2 found, calling brcmf_sdio_probe...\n");
ret = brcmf_sdio_probe(sdiodev);
}
@@ -534,6 +516,7 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
static void brcmf_ops_sdio_remove(struct sdio_func *func)
{
+ struct brcmf_bus *bus_if;
struct brcmf_sdio_dev *sdiodev;
brcmf_dbg(TRACE, "Enter\n");
brcmf_dbg(INFO, "func->class=%x\n", func->class);
@@ -542,10 +525,13 @@ static void brcmf_ops_sdio_remove(struct sdio_func *func)
brcmf_dbg(INFO, "Function#: 0x%04x\n", func->num);
if (func->num == 2) {
- sdiodev = dev_get_drvdata(&func->card->dev);
+ bus_if = dev_get_drvdata(&func->dev);
+ sdiodev = bus_if->bus_priv;
brcmf_dbg(TRACE, "F2 found, calling brcmf_sdio_remove...\n");
brcmf_sdio_remove(sdiodev);
dev_set_drvdata(&func->card->dev, NULL);
+ dev_set_drvdata(&func->dev, NULL);
+ kfree(bus_if);
kfree(sdiodev);
}
}
@@ -554,14 +540,12 @@ static void brcmf_ops_sdio_remove(struct sdio_func *func)
static int brcmf_sdio_suspend(struct device *dev)
{
mmc_pm_flag_t sdio_flags;
- struct brcmf_sdio_dev *sdiodev;
struct sdio_func *func = dev_to_sdio_func(dev);
+ struct brcmf_sdio_dev *sdiodev = dev_get_drvdata(&func->card->dev);
int ret = 0;
brcmf_dbg(TRACE, "\n");
- sdiodev = dev_get_drvdata(&func->card->dev);
-
atomic_set(&sdiodev->suspend, true);
sdio_flags = sdio_get_host_pm_caps(sdiodev->func[1]);
@@ -583,10 +567,9 @@ static int brcmf_sdio_suspend(struct device *dev)
static int brcmf_sdio_resume(struct device *dev)
{
- struct brcmf_sdio_dev *sdiodev;
struct sdio_func *func = dev_to_sdio_func(dev);
+ struct brcmf_sdio_dev *sdiodev = dev_get_drvdata(&func->card->dev);
- sdiodev = dev_get_drvdata(&func->card->dev);
brcmf_sdio_wdtmr_enable(sdiodev, true);
atomic_set(&sdiodev->suspend, false);
return 0;
@@ -610,17 +593,26 @@ static struct sdio_driver brcmf_sdmmc_driver = {
#endif /* CONFIG_PM_SLEEP */
};
-/* bus register interface */
-int brcmf_bus_register(void)
+static void __exit brcmf_sdio_exit(void)
{
brcmf_dbg(TRACE, "Enter\n");
- return sdio_register_driver(&brcmf_sdmmc_driver);
+ sdio_unregister_driver(&brcmf_sdmmc_driver);
}
-void brcmf_bus_unregister(void)
+static int __init brcmf_sdio_init(void)
{
+ int ret;
+
brcmf_dbg(TRACE, "Enter\n");
- sdio_unregister_driver(&brcmf_sdmmc_driver);
+ ret = sdio_register_driver(&brcmf_sdmmc_driver);
+
+ if (ret)
+ brcmf_dbg(ERROR, "sdio_register_driver failed: %d\n", ret);
+
+ return ret;
}
+
+module_init(brcmf_sdio_init);
+module_exit(brcmf_sdio_exit);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
index 6da519e7578..ed60f4d6962 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
@@ -87,7 +87,7 @@
#define TOE_TX_CSUM_OL 0x00000001
#define TOE_RX_CSUM_OL 0x00000002
-#define BRCMF_BSS_INFO_VERSION 108 /* curr ver of brcmf_bss_info_le struct */
+#define BRCMF_BSS_INFO_VERSION 109 /* curr ver of brcmf_bss_info_le struct */
/* size of brcmf_scan_params not including variable length array */
#define BRCMF_SCAN_PARAMS_FIXED_SIZE 64
@@ -571,8 +571,14 @@ struct brcmf_dcmd {
uint needed; /* bytes needed (optional) */
};
+struct brcmf_bus {
+ u8 type; /* bus type */
+ void *bus_priv; /* pointer to bus private structure */
+ enum brcmf_bus_state state;
+};
+
/* Forward decls for struct brcmf_pub (see below) */
-struct brcmf_bus; /* device bus info */
+struct brcmf_sdio; /* device bus info */
struct brcmf_proto; /* device communication protocol info */
struct brcmf_info; /* device driver info */
struct brcmf_cfg80211_dev; /* cfg80211 device info */
@@ -580,15 +586,16 @@ struct brcmf_cfg80211_dev; /* cfg80211 device info */
/* Common structure for module and instance linkage */
struct brcmf_pub {
/* Linkage ponters */
- struct brcmf_bus *bus;
+ struct brcmf_sdio *bus;
+ struct brcmf_bus *bus_if;
struct brcmf_proto *prot;
struct brcmf_info *info;
struct brcmf_cfg80211_dev *config;
+ struct device *dev; /* fullmac dongle device pointer */
/* Internal brcmf items */
bool up; /* Driver up/down (to OS) */
bool txoff; /* Transmit flow-controlled */
- enum brcmf_bus_state busstate;
uint hdrlen; /* Total BRCMF header length (proto + bus) */
uint maxctl; /* Max size rxctl request from proto to bus */
uint rxsz; /* Rx buffer size bus module should use */
@@ -656,7 +663,6 @@ struct brcmf_pub {
u8 country_code[BRCM_CNTRY_BUF_SZ];
char eventmask[BRCMF_EVENTING_MASK_LEN];
-
};
struct brcmf_if_event {
@@ -681,8 +687,8 @@ extern uint brcmf_c_mkiovar(char *name, char *data, uint datalen,
* Returned structure should have bus and prot pointers filled in.
* bus_hdrlen specifies required headroom for bus module header.
*/
-extern struct brcmf_pub *brcmf_attach(struct brcmf_bus *bus,
- uint bus_hdrlen);
+extern struct brcmf_pub *brcmf_attach(struct brcmf_sdio *bus,
+ uint bus_hdrlen, struct device *dev);
extern int brcmf_net_attach(struct brcmf_pub *drvr, int idx);
extern int brcmf_netdev_wait_pend8021x(struct net_device *ndev);
@@ -699,7 +705,16 @@ extern bool brcmf_c_prec_enq(struct brcmf_pub *drvr, struct pktq *q,
/* Receive frame for delivery to OS. Callee disposes of rxp. */
extern void brcmf_rx_frame(struct brcmf_pub *drvr, int ifidx,
- struct sk_buff *rxp, int numpkt);
+ struct sk_buff_head *rxlist);
+static inline void brcmf_rx_packet(struct brcmf_pub *drvr, int ifidx,
+ struct sk_buff *pkt)
+{
+ struct sk_buff_head q;
+
+ skb_queue_head_init(&q);
+ skb_queue_tail(&q, pkt);
+ brcmf_rx_frame(drvr, ifidx, &q);
+}
/* Return pointer to interface name */
extern char *brcmf_ifname(struct brcmf_pub *drvr, int idx);
@@ -724,8 +739,6 @@ extern int brcmf_c_host_event(struct brcmf_info *drvr_priv, int *idx,
void *pktdata, struct brcmf_event_msg *,
void **data_ptr);
-extern void brcmf_c_init(void);
-
extern int brcmf_add_if(struct brcmf_info *drvr_priv, int ifidx,
char *name, u8 *mac_addr);
extern void brcmf_del_if(struct brcmf_info *drvr_priv, int ifidx);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
index a249407c9a1..1841f996110 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
@@ -27,31 +27,24 @@
* Exported from brcmf bus module (brcmf_usb, brcmf_sdio)
*/
-/* Indicate (dis)interest in finding dongles. */
-extern int brcmf_bus_register(void);
-extern void brcmf_bus_unregister(void);
-
-/* obtain linux device object providing bus function */
-extern struct device *brcmf_bus_get_device(struct brcmf_bus *bus);
-
/* Stop bus module: clear pending frames, disable data flow */
-extern void brcmf_sdbrcm_bus_stop(struct brcmf_bus *bus);
+extern void brcmf_sdbrcm_bus_stop(struct device *dev);
/* Initialize bus module: prepare for communication w/dongle */
-extern int brcmf_sdbrcm_bus_init(struct brcmf_pub *drvr);
+extern int brcmf_sdbrcm_bus_init(struct device *dev);
/* Send a data frame to the dongle. Callee disposes of txp. */
-extern int brcmf_sdbrcm_bus_txdata(struct brcmf_bus *bus, struct sk_buff *txp);
+extern int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *txp);
/* Send/receive a control message to/from the dongle.
* Expects caller to enforce a single outstanding transaction.
*/
extern int
-brcmf_sdbrcm_bus_txctl(struct brcmf_bus *bus, unsigned char *msg, uint msglen);
+brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen);
extern int
-brcmf_sdbrcm_bus_rxctl(struct brcmf_bus *bus, unsigned char *msg, uint msglen);
+brcmf_sdbrcm_bus_rxctl(struct device *dev, unsigned char *msg, uint msglen);
-extern void brcmf_sdbrcm_wd_timer(struct brcmf_bus *bus, uint wdtick);
+extern void brcmf_sdbrcm_wd_timer(struct brcmf_sdio *bus, uint wdtick);
#endif /* _BRCMF_BUS_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c
index e34c5c3d1d5..ebd53aa7202 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c
@@ -58,7 +58,7 @@ struct brcmf_proto_cdc_dcmd {
* Used on data packets to convey priority across USB.
*/
#define BDC_HEADER_LEN 4
-#define BDC_PROTO_VER 1 /* Protocol version */
+#define BDC_PROTO_VER 2 /* Protocol version */
#define BDC_FLAG_VER_MASK 0xf0 /* Protocol version mask */
#define BDC_FLAG_VER_SHIFT 4 /* Protocol version shift */
#define BDC_FLAG_SUM_GOOD 0x04 /* Good RX checksums */
@@ -77,7 +77,7 @@ struct brcmf_proto_bdc_header {
u8 flags;
u8 priority; /* 802.1d Priority, 4:7 flow control info for usb */
u8 flags2;
- u8 rssi;
+ u8 data_offset;
};
@@ -116,7 +116,7 @@ static int brcmf_proto_cdc_msg(struct brcmf_pub *drvr)
len = CDC_MAX_MSG_SIZE;
/* Send request */
- return brcmf_sdbrcm_bus_txctl(drvr->bus, (unsigned char *)&prot->msg,
+ return brcmf_sdbrcm_bus_txctl(drvr->dev, (unsigned char *)&prot->msg,
len);
}
@@ -128,7 +128,7 @@ static int brcmf_proto_cdc_cmplt(struct brcmf_pub *drvr, u32 id, u32 len)
brcmf_dbg(TRACE, "Enter\n");
do {
- ret = brcmf_sdbrcm_bus_rxctl(drvr->bus,
+ ret = brcmf_sdbrcm_bus_rxctl(drvr->dev,
(unsigned char *)&prot->msg,
len + sizeof(struct brcmf_proto_cdc_dcmd));
if (ret < 0)
@@ -280,7 +280,7 @@ brcmf_proto_dcmd(struct brcmf_pub *drvr, int ifidx, struct brcmf_dcmd *dcmd,
struct brcmf_proto *prot = drvr->prot;
int ret = -1;
- if (drvr->busstate == BRCMF_BUS_DOWN) {
+ if (drvr->bus_if->state == BRCMF_BUS_DOWN) {
brcmf_dbg(ERROR, "bus is down. we have nothing to do.\n");
return ret;
}
@@ -372,7 +372,7 @@ void brcmf_proto_hdrpush(struct brcmf_pub *drvr, int ifidx,
h->priority = (pktbuf->priority & BDC_PRIORITY_MASK);
h->flags2 = 0;
- h->rssi = 0;
+ h->data_offset = 0;
BDC_SET_IF_IDX(h, ifidx);
}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
index 40928e58b6a..69f335aeb25 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
@@ -32,8 +32,6 @@
#define PKTFILTER_BUF_SIZE 2048
#define BRCMF_ARPOL_MODE 0xb /* agent|snoop|peer_autoreply */
-int brcmf_msg_level;
-
#define MSGTRACE_VERSION 1
#define BRCMF_PKT_FILTER_FIXED_LEN offsetof(struct brcmf_pkt_filter_le, u)
@@ -85,19 +83,6 @@ brcmf_c_mkiovar(char *name, char *data, uint datalen, char *buf, uint buflen)
return len;
}
-void brcmf_c_init(void)
-{
- /* Init global variables at run-time, not as part of the declaration.
- * This is required to support init/de-init of the driver.
- * Initialization
- * of globals as part of the declaration results in non-deterministic
- * behaviour since the value of the globals may be different on the
- * first time that the driver is initialized vs subsequent
- * initializations.
- */
- brcmf_msg_level = BRCMF_ERROR_VAL;
-}
-
bool brcmf_c_prec_enq(struct brcmf_pub *drvr, struct pktq *q,
struct sk_buff *pkt, int prec)
{
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
index 719fd9397eb..58d92bca9ca 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
@@ -43,7 +43,6 @@
#include "dhd_proto.h"
#include "dhd_dbg.h"
#include "wl_cfg80211.h"
-#include "bcmchip.h"
MODULE_AUTHOR("Broadcom Corporation");
MODULE_DESCRIPTION("Broadcom 802.11n wireless LAN fullmac driver.");
@@ -77,6 +76,7 @@ struct brcmf_info {
};
/* Error bits */
+int brcmf_msg_level = BRCMF_ERROR_VAL;
module_param(brcmf_msg_level, int, 0);
int brcmf_ifname2idx(struct brcmf_info *drvr_priv, char *name)
@@ -292,7 +292,7 @@ int brcmf_sendpkt(struct brcmf_pub *drvr, int ifidx, struct sk_buff *pktbuf)
struct brcmf_info *drvr_priv = drvr->info;
/* Reject if down */
- if (!drvr->up || (drvr->busstate == BRCMF_BUS_DOWN))
+ if (!drvr->up || (drvr->bus_if->state == BRCMF_BUS_DOWN))
return -ENODEV;
/* Update multicast statistic */
@@ -310,7 +310,7 @@ int brcmf_sendpkt(struct brcmf_pub *drvr, int ifidx, struct sk_buff *pktbuf)
brcmf_proto_hdrpush(drvr, ifidx, pktbuf);
/* Use bus module to send data frame */
- return brcmf_sdbrcm_bus_txdata(drvr->bus, pktbuf);
+ return brcmf_sdbrcm_bus_txdata(drvr->dev, pktbuf);
}
static int brcmf_netdev_start_xmit(struct sk_buff *skb, struct net_device *ndev)
@@ -322,9 +322,11 @@ static int brcmf_netdev_start_xmit(struct sk_buff *skb, struct net_device *ndev)
brcmf_dbg(TRACE, "Enter\n");
/* Reject if down */
- if (!drvr_priv->pub.up || (drvr_priv->pub.busstate == BRCMF_BUS_DOWN)) {
- brcmf_dbg(ERROR, "xmit rejected pub.up=%d busstate=%d\n",
- drvr_priv->pub.up, drvr_priv->pub.busstate);
+ if (!drvr_priv->pub.up ||
+ (drvr_priv->pub.bus_if->state == BRCMF_BUS_DOWN)) {
+ brcmf_dbg(ERROR, "xmit rejected pub.up=%d state=%d\n",
+ drvr_priv->pub.up,
+ drvr_priv->pub.bus_if->state);
netif_stop_queue(ndev);
return -ENODEV;
}
@@ -397,26 +399,21 @@ static int brcmf_host_event(struct brcmf_info *drvr_priv, int *ifidx,
return bcmerror;
}
-void brcmf_rx_frame(struct brcmf_pub *drvr, int ifidx, struct sk_buff *skb,
- int numpkt)
+void brcmf_rx_frame(struct brcmf_pub *drvr, int ifidx,
+ struct sk_buff_head *skb_list)
{
struct brcmf_info *drvr_priv = drvr->info;
unsigned char *eth;
uint len;
void *data;
- struct sk_buff *pnext, *save_pktbuf;
- int i;
+ struct sk_buff *skb, *pnext;
struct brcmf_if *ifp;
struct brcmf_event_msg event;
brcmf_dbg(TRACE, "Enter\n");
- save_pktbuf = skb;
-
- for (i = 0; skb && i < numpkt; i++, skb = pnext) {
-
- pnext = skb->next;
- skb->next = NULL;
+ skb_queue_walk_safe(skb_list, skb, pnext) {
+ skb_unlink(skb, skb_list);
/* Get the protocol, maintain skb around eth_type_trans()
* The main reason for this hack is for the limitation of
@@ -437,6 +434,12 @@ void brcmf_rx_frame(struct brcmf_pub *drvr, int ifidx, struct sk_buff *skb,
if (ifp == NULL)
ifp = drvr_priv->iflist[0];
+ if (!ifp || !ifp->ndev ||
+ ifp->ndev->reg_state != NETREG_REGISTERED) {
+ brcmu_pkt_buf_free_skb(skb);
+ continue;
+ }
+
skb->dev = ifp->ndev;
skb->protocol = eth_type_trans(skb, skb->dev);
@@ -605,9 +608,7 @@ static void brcmf_ethtool_get_drvinfo(struct net_device *ndev,
sprintf(info->driver, KBUILD_MODNAME);
sprintf(info->version, "%lu", drvr_priv->pub.drv_version);
- sprintf(info->fw_version, "%s", BCM4329_FW_NAME);
- sprintf(info->bus_info, "%s",
- dev_name(brcmf_bus_get_device(drvr_priv->pub.bus)));
+ sprintf(info->bus_info, "%s", dev_name(drvr_priv->pub.dev));
}
static struct ethtool_ops brcmf_ethtool_ops = {
@@ -761,7 +762,7 @@ s32 brcmf_exec_dcmd(struct net_device *ndev, u32 cmd, void *arg, u32 len)
buflen = min_t(uint, dcmd.len, BRCMF_DCMD_MAXLEN);
/* send to dongle (must be up, and wl) */
- if ((drvr_priv->pub.busstate != BRCMF_BUS_DATA)) {
+ if ((drvr_priv->pub.bus_if->state != BRCMF_BUS_DATA)) {
brcmf_dbg(ERROR, "DONGLE_DOWN\n");
err = -EIO;
goto done;
@@ -940,7 +941,8 @@ void brcmf_del_if(struct brcmf_info *drvr_priv, int ifidx)
}
}
-struct brcmf_pub *brcmf_attach(struct brcmf_bus *bus, uint bus_hdrlen)
+struct brcmf_pub *brcmf_attach(struct brcmf_sdio *bus, uint bus_hdrlen,
+ struct device *dev)
{
struct brcmf_info *drvr_priv = NULL;
@@ -959,6 +961,8 @@ struct brcmf_pub *brcmf_attach(struct brcmf_bus *bus, uint bus_hdrlen)
/* Link to bus module */
drvr_priv->pub.bus = bus;
drvr_priv->pub.hdrlen = bus_hdrlen;
+ drvr_priv->pub.bus_if = dev_get_drvdata(dev);
+ drvr_priv->pub.dev = dev;
/* Attach and link in the protocol */
if (brcmf_proto_attach(&drvr_priv->pub) != 0) {
@@ -988,14 +992,14 @@ int brcmf_bus_start(struct brcmf_pub *drvr)
brcmf_dbg(TRACE, "\n");
/* Bring up the bus */
- ret = brcmf_sdbrcm_bus_init(&drvr_priv->pub);
+ ret = brcmf_sdbrcm_bus_init(drvr_priv->pub.dev);
if (ret != 0) {
brcmf_dbg(ERROR, "brcmf_sdbrcm_bus_init failed %d\n", ret);
return ret;
}
/* If bus is not ready, can't come up */
- if (drvr_priv->pub.busstate != BRCMF_BUS_DATA) {
+ if (drvr_priv->pub.bus_if->state != BRCMF_BUS_DATA) {
brcmf_dbg(ERROR, "failed bus is not ready\n");
return -ENODEV;
}
@@ -1077,10 +1081,7 @@ int brcmf_net_attach(struct brcmf_pub *drvr, int ifidx)
/* attach to cfg80211 for primary interface */
if (!ifidx) {
- drvr->config =
- brcmf_cfg80211_attach(ndev,
- brcmf_bus_get_device(drvr->bus),
- drvr);
+ drvr->config = brcmf_cfg80211_attach(ndev, drvr->dev, drvr);
if (drvr->config == NULL) {
brcmf_dbg(ERROR, "wl_cfg80211_attach failed\n");
goto fail;
@@ -1114,7 +1115,7 @@ static void brcmf_bus_detach(struct brcmf_pub *drvr)
brcmf_proto_stop(&drvr_priv->pub);
/* Stop the bus module */
- brcmf_sdbrcm_bus_stop(drvr_priv->pub.bus);
+ brcmf_sdbrcm_bus_stop(drvr_priv->pub.dev);
}
}
}
@@ -1148,34 +1149,6 @@ void brcmf_detach(struct brcmf_pub *drvr)
}
}
-static void __exit brcmf_module_cleanup(void)
-{
- brcmf_dbg(TRACE, "Enter\n");
-
- brcmf_bus_unregister();
-}
-
-static int __init brcmf_module_init(void)
-{
- int error;
-
- brcmf_dbg(TRACE, "Enter\n");
-
- error = brcmf_bus_register();
-
- if (error) {
- brcmf_dbg(ERROR, "brcmf_bus_register failed\n");
- goto failed;
- }
- return 0;
-
-failed:
- return -EINVAL;
-}
-
-module_init(brcmf_module_init);
-module_exit(brcmf_module_cleanup);
-
int brcmf_os_proto_block(struct brcmf_pub *drvr)
{
struct brcmf_info *drvr_priv = drvr->info;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
index 22913af26db..43ba0dd4835 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
@@ -91,7 +91,6 @@ struct rte_console {
#include "dhd_bus.h"
#include "dhd_proto.h"
#include "dhd_dbg.h"
-#include <bcmchip.h>
#define TXQLEN 2048 /* bulk tx queue length */
#define TXHI (TXQLEN - 256) /* turn on flow control above TXHI */
@@ -310,6 +309,11 @@ struct rte_console {
/* Flags for SDH calls */
#define F2SYNC (SDIO_REQ_4BYTE | SDIO_REQ_FIXED)
+#define BRCMFMAC_FW_NAME "brcm/brcmfmac.bin"
+#define BRCMFMAC_NV_NAME "brcm/brcmfmac.txt"
+MODULE_FIRMWARE(BRCMFMAC_FW_NAME);
+MODULE_FIRMWARE(BRCMFMAC_NV_NAME);
+
/*
* Conversion of 802.1D priority to precedence level
*/
@@ -445,7 +449,7 @@ struct sdpcm_shared_le {
/* misc chip info needed by some of the routines */
/* Private data for SDIO bus interaction */
-struct brcmf_bus {
+struct brcmf_sdio {
struct brcmf_pub *drvr;
struct brcmf_sdio_dev *sdiodev; /* sdio device handler */
@@ -562,9 +566,7 @@ struct brcmf_bus {
struct semaphore sdsem;
- const char *fw_name;
const struct firmware *firmware;
- const char *nv_name;
u32 fw_ptr;
};
@@ -602,7 +604,7 @@ static void pkt_align(struct sk_buff *p, int len, int align)
}
/* To check if there's window offered */
-static bool data_ok(struct brcmf_bus *bus)
+static bool data_ok(struct brcmf_sdio *bus)
{
return (u8)(bus->tx_max - bus->tx_seq) != 0 &&
((u8)(bus->tx_max - bus->tx_seq) & 0x80) == 0;
@@ -613,7 +615,7 @@ static bool data_ok(struct brcmf_bus *bus)
* adresses on the 32 bit backplane bus.
*/
static void
-r_sdreg32(struct brcmf_bus *bus, u32 *regvar, u32 reg_offset, u32 *retryvar)
+r_sdreg32(struct brcmf_sdio *bus, u32 *regvar, u32 reg_offset, u32 *retryvar)
{
u8 idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
*retryvar = 0;
@@ -633,7 +635,7 @@ r_sdreg32(struct brcmf_bus *bus, u32 *regvar, u32 reg_offset, u32 *retryvar)
}
static void
-w_sdreg32(struct brcmf_bus *bus, u32 regval, u32 reg_offset, u32 *retryvar)
+w_sdreg32(struct brcmf_sdio *bus, u32 regval, u32 reg_offset, u32 *retryvar)
{
u8 idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
*retryvar = 0;
@@ -658,14 +660,14 @@ w_sdreg32(struct brcmf_bus *bus, u32 regval, u32 reg_offset, u32 *retryvar)
/* Packet free applicable unconditionally for sdio and sdspi.
* Conditional if bufpool was present for gspi bus.
*/
-static void brcmf_sdbrcm_pktfree2(struct brcmf_bus *bus, struct sk_buff *pkt)
+static void brcmf_sdbrcm_pktfree2(struct brcmf_sdio *bus, struct sk_buff *pkt)
{
if (bus->usebufpool)
brcmu_pkt_buf_free_skb(pkt);
}
/* Turn backplane clock on or off */
-static int brcmf_sdbrcm_htclk(struct brcmf_bus *bus, bool on, bool pendok)
+static int brcmf_sdbrcm_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
{
int err;
u8 clkctl, clkreq, devctl;
@@ -786,7 +788,7 @@ static int brcmf_sdbrcm_htclk(struct brcmf_bus *bus, bool on, bool pendok)
}
/* Change idle/active SD state */
-static int brcmf_sdbrcm_sdclk(struct brcmf_bus *bus, bool on)
+static int brcmf_sdbrcm_sdclk(struct brcmf_sdio *bus, bool on)
{
brcmf_dbg(TRACE, "Enter\n");
@@ -799,7 +801,7 @@ static int brcmf_sdbrcm_sdclk(struct brcmf_bus *bus, bool on)
}
/* Transition SD and backplane clock readiness */
-static int brcmf_sdbrcm_clkctl(struct brcmf_bus *bus, uint target, bool pendok)
+static int brcmf_sdbrcm_clkctl(struct brcmf_sdio *bus, uint target, bool pendok)
{
#ifdef BCMDBG
uint oldstate = bus->clkstate;
@@ -855,7 +857,7 @@ static int brcmf_sdbrcm_clkctl(struct brcmf_bus *bus, uint target, bool pendok)
return 0;
}
-static int brcmf_sdbrcm_bussleep(struct brcmf_bus *bus, bool sleep)
+static int brcmf_sdbrcm_bussleep(struct brcmf_sdio *bus, bool sleep)
{
uint retries = 0;
@@ -927,13 +929,13 @@ static int brcmf_sdbrcm_bussleep(struct brcmf_bus *bus, bool sleep)
return 0;
}
-static void bus_wake(struct brcmf_bus *bus)
+static void bus_wake(struct brcmf_sdio *bus)
{
if (bus->sleeping)
brcmf_sdbrcm_bussleep(bus, false);
}
-static u32 brcmf_sdbrcm_hostmail(struct brcmf_bus *bus)
+static u32 brcmf_sdbrcm_hostmail(struct brcmf_sdio *bus)
{
u32 intstatus = 0;
u32 hmb_data;
@@ -1009,7 +1011,7 @@ static u32 brcmf_sdbrcm_hostmail(struct brcmf_bus *bus)
return intstatus;
}
-static void brcmf_sdbrcm_rxfail(struct brcmf_bus *bus, bool abort, bool rtx)
+static void brcmf_sdbrcm_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
{
uint retries = 0;
u16 lastrbc;
@@ -1066,11 +1068,11 @@ static void brcmf_sdbrcm_rxfail(struct brcmf_bus *bus, bool abort, bool rtx)
/* If we can't reach the device, signal failure */
if (err || brcmf_sdcard_regfail(bus->sdiodev))
- bus->drvr->busstate = BRCMF_BUS_DOWN;
+ bus->drvr->bus_if->state = BRCMF_BUS_DOWN;
}
/* copy a buffer into a pkt buffer chain */
-static uint brcmf_sdbrcm_glom_from_buf(struct brcmf_bus *bus, uint len)
+static uint brcmf_sdbrcm_glom_from_buf(struct brcmf_sdio *bus, uint len)
{
uint n, ret = 0;
struct sk_buff *p;
@@ -1093,7 +1095,7 @@ static uint brcmf_sdbrcm_glom_from_buf(struct brcmf_bus *bus, uint len)
}
/* return total length of buffer chain */
-static uint brcmf_sdbrcm_glom_len(struct brcmf_bus *bus)
+static uint brcmf_sdbrcm_glom_len(struct brcmf_sdio *bus)
{
struct sk_buff *p;
uint total;
@@ -1104,7 +1106,7 @@ static uint brcmf_sdbrcm_glom_len(struct brcmf_bus *bus)
return total;
}
-static void brcmf_sdbrcm_free_glom(struct brcmf_bus *bus)
+static void brcmf_sdbrcm_free_glom(struct brcmf_sdio *bus)
{
struct sk_buff *cur, *next;
@@ -1114,13 +1116,13 @@ static void brcmf_sdbrcm_free_glom(struct brcmf_bus *bus)
}
}
-static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
+static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
{
u16 dlen, totlen;
u8 *dptr, num = 0;
u16 sublen, check;
- struct sk_buff *pfirst, *plast, *pnext, *save_pfirst;
+ struct sk_buff *pfirst, *pnext;
int errcode;
u8 chan, seq, doff, sfdoff;
@@ -1137,7 +1139,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
/* If there's a descriptor, generate the packet chain */
if (bus->glomd) {
- pfirst = plast = pnext = NULL;
+ pfirst = pnext = NULL;
dlen = (u16) (bus->glomd->len);
dptr = bus->glomd->data;
if (!dlen || (dlen & 1)) {
@@ -1228,17 +1230,14 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
* packet and and copy into the chain.
*/
if (usechain) {
- errcode = brcmf_sdcard_recv_buf(bus->sdiodev,
+ errcode = brcmf_sdcard_recv_chain(bus->sdiodev,
bus->sdiodev->sbwad,
- SDIO_FUNC_2,
- F2SYNC, (u8 *) pfirst->data, dlen,
- pfirst);
+ SDIO_FUNC_2, F2SYNC, &bus->glom);
} else if (bus->dataptr) {
errcode = brcmf_sdcard_recv_buf(bus->sdiodev,
bus->sdiodev->sbwad,
- SDIO_FUNC_2,
- F2SYNC, bus->dataptr, dlen,
- NULL);
+ SDIO_FUNC_2, F2SYNC,
+ bus->dataptr, dlen);
sublen = (u16) brcmf_sdbrcm_glom_from_buf(bus, dlen);
if (sublen != dlen) {
brcmf_dbg(ERROR, "FAILED TO COPY, dlen %d sublen %d\n",
@@ -1338,10 +1337,14 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
/* Remove superframe header, remember offset */
skb_pull(pfirst, doff);
sfdoff = doff;
+ num = 0;
/* Validate all the subframe headers */
- for (num = 0, pnext = pfirst; pnext && !errcode;
- num++, pnext = pnext->next) {
+ skb_queue_walk(&bus->glom, pnext) {
+ /* leave when invalid subframe is found */
+ if (errcode)
+ break;
+
dptr = (u8 *) (pnext->data);
dlen = (u16) (pnext->len);
sublen = get_unaligned_le16(dptr);
@@ -1374,6 +1377,8 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
num, doff, sublen, SDPCM_HDRLEN);
errcode = -1;
}
+ /* increase the subframe count */
+ num++;
}
if (errcode) {
@@ -1394,13 +1399,8 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
}
/* Basic SD framing looks ok - process each packet (header) */
- save_pfirst = pfirst;
- plast = NULL;
-
- for (num = 0; pfirst; rxseq++, pfirst = pnext) {
- pnext = pfirst->next;
- pfirst->next = NULL;
+ skb_queue_walk_safe(&bus->glom, pfirst, pnext) {
dptr = (u8 *) (pfirst->data);
sublen = get_unaligned_le16(dptr);
chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]);
@@ -1420,6 +1420,8 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
bus->rx_badseq++;
rxseq = seq;
}
+ rxseq++;
+
#ifdef BCMDBG
if (BRCMF_BYTES_ON() && BRCMF_DATA_ON()) {
printk(KERN_DEBUG "Rx Subframe Data:\n");
@@ -1432,36 +1434,22 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
skb_pull(pfirst, doff);
if (pfirst->len == 0) {
+ skb_unlink(pfirst, &bus->glom);
brcmu_pkt_buf_free_skb(pfirst);
- if (plast)
- plast->next = pnext;
- else
- save_pfirst = pnext;
-
continue;
} else if (brcmf_proto_hdrpull(bus->drvr, &ifidx,
pfirst) != 0) {
brcmf_dbg(ERROR, "rx protocol error\n");
bus->drvr->rx_errors++;
+ skb_unlink(pfirst, &bus->glom);
brcmu_pkt_buf_free_skb(pfirst);
- if (plast)
- plast->next = pnext;
- else
- save_pfirst = pnext;
-
continue;
}
- /* this packet will go up, link back into
- chain and count it */
- pfirst->next = pnext;
- plast = pfirst;
- num++;
-
#ifdef BCMDBG
if (BRCMF_GLOM_ON()) {
brcmf_dbg(GLOM, "subframe %d to stack, %p (%p/%d) nxt/lnk %p/%p\n",
- num, pfirst, pfirst->data,
+ bus->glom.qlen, pfirst, pfirst->data,
pfirst->len, pfirst->next,
pfirst->prev);
print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
@@ -1470,19 +1458,20 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
}
#endif /* BCMDBG */
}
- if (num) {
+ /* sent any remaining packets up */
+ if (bus->glom.qlen) {
up(&bus->sdsem);
- brcmf_rx_frame(bus->drvr, ifidx, save_pfirst, num);
+ brcmf_rx_frame(bus->drvr, ifidx, &bus->glom);
down(&bus->sdsem);
}
bus->rxglomframes++;
- bus->rxglompkts += num;
+ bus->rxglompkts += bus->glom.qlen;
}
return num;
}
-static int brcmf_sdbrcm_dcmd_resp_wait(struct brcmf_bus *bus, uint *condition,
+static int brcmf_sdbrcm_dcmd_resp_wait(struct brcmf_sdio *bus, uint *condition,
bool *pending)
{
DECLARE_WAITQUEUE(wait, current);
@@ -1504,7 +1493,7 @@ static int brcmf_sdbrcm_dcmd_resp_wait(struct brcmf_bus *bus, uint *condition,
return timeout;
}
-static int brcmf_sdbrcm_dcmd_resp_wake(struct brcmf_bus *bus)
+static int brcmf_sdbrcm_dcmd_resp_wake(struct brcmf_sdio *bus)
{
if (waitqueue_active(&bus->dcmd_resp_wait))
wake_up_interruptible(&bus->dcmd_resp_wait);
@@ -1512,7 +1501,7 @@ static int brcmf_sdbrcm_dcmd_resp_wake(struct brcmf_bus *bus)
return 0;
}
static void
-brcmf_sdbrcm_read_control(struct brcmf_bus *bus, u8 *hdr, uint len, uint doff)
+brcmf_sdbrcm_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff)
{
uint rdlen, pad;
@@ -1570,8 +1559,7 @@ brcmf_sdbrcm_read_control(struct brcmf_bus *bus, u8 *hdr, uint len, uint doff)
sdret = brcmf_sdcard_recv_buf(bus->sdiodev,
bus->sdiodev->sbwad,
SDIO_FUNC_2,
- F2SYNC, (bus->rxctl + BRCMF_FIRSTREAD), rdlen,
- NULL);
+ F2SYNC, (bus->rxctl + BRCMF_FIRSTREAD), rdlen);
bus->f2rxdata++;
/* Control frame failures need retransmission */
@@ -1602,7 +1590,7 @@ done:
}
/* Pad read to blocksize for efficiency */
-static void brcmf_pad(struct brcmf_bus *bus, u16 *pad, u16 *rdlen)
+static void brcmf_pad(struct brcmf_sdio *bus, u16 *pad, u16 *rdlen)
{
if (bus->roundup && bus->blocksize && *rdlen > bus->blocksize) {
*pad = bus->blocksize - (*rdlen % bus->blocksize);
@@ -1615,7 +1603,7 @@ static void brcmf_pad(struct brcmf_bus *bus, u16 *pad, u16 *rdlen)
}
static void
-brcmf_alloc_pkt_and_read(struct brcmf_bus *bus, u16 rdlen,
+brcmf_alloc_pkt_and_read(struct brcmf_sdio *bus, u16 rdlen,
struct sk_buff **pkt, u8 **rxbuf)
{
int sdret; /* Return code from calls */
@@ -1627,9 +1615,8 @@ brcmf_alloc_pkt_and_read(struct brcmf_bus *bus, u16 rdlen,
pkt_align(*pkt, rdlen, BRCMF_SDALIGN);
*rxbuf = (u8 *) ((*pkt)->data);
/* Read the entire frame */
- sdret = brcmf_sdcard_recv_buf(bus->sdiodev, bus->sdiodev->sbwad,
- SDIO_FUNC_2, F2SYNC,
- *rxbuf, rdlen, *pkt);
+ sdret = brcmf_sdcard_recv_pkt(bus->sdiodev, bus->sdiodev->sbwad,
+ SDIO_FUNC_2, F2SYNC, *pkt);
bus->f2rxdata++;
if (sdret < 0) {
@@ -1648,7 +1635,7 @@ brcmf_alloc_pkt_and_read(struct brcmf_bus *bus, u16 rdlen,
/* Checks the header */
static int
-brcmf_check_rxbuf(struct brcmf_bus *bus, struct sk_buff *pkt, u8 *rxbuf,
+brcmf_check_rxbuf(struct brcmf_sdio *bus, struct sk_buff *pkt, u8 *rxbuf,
u8 rxseq, u16 nextlen, u16 *len)
{
u16 check;
@@ -1704,7 +1691,7 @@ fail:
/* Return true if there may be more frames to read */
static uint
-brcmf_sdbrcm_readframes(struct brcmf_bus *bus, uint maxframes, bool *finished)
+brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished)
{
u16 len, check; /* Extracted hardware header fields */
u8 chan, seq, doff; /* Extracted software header fields */
@@ -1727,7 +1714,8 @@ brcmf_sdbrcm_readframes(struct brcmf_bus *bus, uint maxframes, bool *finished)
*finished = false;
for (rxseq = bus->rx_seq, rxleft = maxframes;
- !bus->rxskip && rxleft && bus->drvr->busstate != BRCMF_BUS_DOWN;
+ !bus->rxskip && rxleft &&
+ bus->drvr->bus_if->state != BRCMF_BUS_DOWN;
rxseq++, rxleft--) {
/* Handle glomming separately */
@@ -1857,7 +1845,7 @@ brcmf_sdbrcm_readframes(struct brcmf_bus *bus, uint maxframes, bool *finished)
/* Read frame header (hardware and software) */
sdret = brcmf_sdcard_recv_buf(bus->sdiodev, bus->sdiodev->sbwad,
SDIO_FUNC_2, F2SYNC, bus->rxhdr,
- BRCMF_FIRSTREAD, NULL);
+ BRCMF_FIRSTREAD);
bus->f2rxhdrs++;
if (sdret < 0) {
@@ -2006,9 +1994,8 @@ brcmf_sdbrcm_readframes(struct brcmf_bus *bus, uint maxframes, bool *finished)
pkt_align(pkt, rdlen, BRCMF_SDALIGN);
/* Read the remaining frame data */
- sdret = brcmf_sdcard_recv_buf(bus->sdiodev, bus->sdiodev->sbwad,
- SDIO_FUNC_2, F2SYNC, ((u8 *) (pkt->data)),
- rdlen, pkt);
+ sdret = brcmf_sdcard_recv_pkt(bus->sdiodev, bus->sdiodev->sbwad,
+ SDIO_FUNC_2, F2SYNC, pkt);
bus->f2rxdata++;
if (sdret < 0) {
@@ -2075,7 +2062,7 @@ deliver:
/* Unlock during rx call */
up(&bus->sdsem);
- brcmf_rx_frame(bus->drvr, ifidx, pkt, 1);
+ brcmf_rx_packet(bus->drvr, ifidx, pkt);
down(&bus->sdsem);
}
rxcount = maxframes - rxleft;
@@ -2095,16 +2082,8 @@ deliver:
return rxcount;
}
-static int
-brcmf_sdbrcm_send_buf(struct brcmf_bus *bus, u32 addr, uint fn, uint flags,
- u8 *buf, uint nbytes, struct sk_buff *pkt)
-{
- return brcmf_sdcard_send_buf
- (bus->sdiodev, addr, fn, flags, buf, nbytes, pkt);
-}
-
static void
-brcmf_sdbrcm_wait_for_event(struct brcmf_bus *bus, bool *lockvar)
+brcmf_sdbrcm_wait_for_event(struct brcmf_sdio *bus, bool *lockvar)
{
up(&bus->sdsem);
wait_event_interruptible_timeout(bus->ctrl_wait,
@@ -2114,7 +2093,7 @@ brcmf_sdbrcm_wait_for_event(struct brcmf_bus *bus, bool *lockvar)
}
static void
-brcmf_sdbrcm_wait_event_wakeup(struct brcmf_bus *bus)
+brcmf_sdbrcm_wait_event_wakeup(struct brcmf_sdio *bus)
{
if (waitqueue_active(&bus->ctrl_wait))
wake_up_interruptible(&bus->ctrl_wait);
@@ -2123,7 +2102,7 @@ brcmf_sdbrcm_wait_event_wakeup(struct brcmf_bus *bus)
/* Writes a HW/SW header into the packet and sends it. */
/* Assumes: (a) header space already there, (b) caller holds lock */
-static int brcmf_sdbrcm_txpkt(struct brcmf_bus *bus, struct sk_buff *pkt,
+static int brcmf_sdbrcm_txpkt(struct brcmf_sdio *bus, struct sk_buff *pkt,
uint chan, bool free_pkt)
{
int ret;
@@ -2212,9 +2191,8 @@ static int brcmf_sdbrcm_txpkt(struct brcmf_bus *bus, struct sk_buff *pkt,
if (len & (ALIGNMENT - 1))
len = roundup(len, ALIGNMENT);
- ret = brcmf_sdbrcm_send_buf(bus, bus->sdiodev->sbwad,
- SDIO_FUNC_2, F2SYNC, frame,
- len, pkt);
+ ret = brcmf_sdcard_send_pkt(bus->sdiodev, bus->sdiodev->sbwad,
+ SDIO_FUNC_2, F2SYNC, pkt);
bus->f2txdata++;
if (ret < 0) {
@@ -2261,7 +2239,7 @@ done:
return ret;
}
-static uint brcmf_sdbrcm_sendfromq(struct brcmf_bus *bus, uint maxframes)
+static uint brcmf_sdbrcm_sendfromq(struct brcmf_sdio *bus, uint maxframes)
{
struct sk_buff *pkt;
u32 intstatus = 0;
@@ -2309,14 +2287,14 @@ static uint brcmf_sdbrcm_sendfromq(struct brcmf_bus *bus, uint maxframes)
}
/* Deflow-control stack if needed */
- if (drvr->up && (drvr->busstate == BRCMF_BUS_DATA) &&
+ if (drvr->up && (drvr->bus_if->state == BRCMF_BUS_DATA) &&
drvr->txoff && (pktq_len(&bus->txq) < TXLOW))
brcmf_txflowcontrol(drvr, 0, OFF);
return cnt;
}
-static bool brcmf_sdbrcm_dpc(struct brcmf_bus *bus)
+static bool brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
{
u32 intstatus, newstatus = 0;
uint retries = 0;
@@ -2344,7 +2322,7 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_bus *bus)
SBSDIO_DEVICE_CTL, &err);
if (err) {
brcmf_dbg(ERROR, "error reading DEVCTL: %d\n", err);
- bus->drvr->busstate = BRCMF_BUS_DOWN;
+ bus->drvr->bus_if->state = BRCMF_BUS_DOWN;
}
#endif /* BCMDBG */
@@ -2354,7 +2332,7 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_bus *bus)
if (err) {
brcmf_dbg(ERROR, "error reading CSR: %d\n",
err);
- bus->drvr->busstate = BRCMF_BUS_DOWN;
+ bus->drvr->bus_if->state = BRCMF_BUS_DOWN;
}
brcmf_dbg(INFO, "DPC: PENDING, devctl 0x%02x clkctl 0x%02x\n",
@@ -2367,7 +2345,7 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_bus *bus)
if (err) {
brcmf_dbg(ERROR, "error reading DEVCTL: %d\n",
err);
- bus->drvr->busstate = BRCMF_BUS_DOWN;
+ bus->drvr->bus_if->state = BRCMF_BUS_DOWN;
}
devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
@@ -2375,7 +2353,7 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_bus *bus)
if (err) {
brcmf_dbg(ERROR, "error writing DEVCTL: %d\n",
err);
- bus->drvr->busstate = BRCMF_BUS_DOWN;
+ bus->drvr->bus_if->state = BRCMF_BUS_DOWN;
}
bus->clkstate = CLK_AVAIL;
} else {
@@ -2477,9 +2455,9 @@ clkwait:
(bus->clkstate == CLK_AVAIL)) {
int ret, i;
- ret = brcmf_sdbrcm_send_buf(bus, bus->sdiodev->sbwad,
+ ret = brcmf_sdcard_send_buf(bus->sdiodev, bus->sdiodev->sbwad,
SDIO_FUNC_2, F2SYNC, (u8 *) bus->ctrl_frame_buf,
- (u32) bus->ctrl_frame_len, NULL);
+ (u32) bus->ctrl_frame_len);
if (ret < 0) {
/* On failure, abort the command and
@@ -2531,11 +2509,11 @@ clkwait:
else await next interrupt */
/* On failed register access, all bets are off:
no resched or interrupts */
- if ((bus->drvr->busstate == BRCMF_BUS_DOWN) ||
+ if ((bus->drvr->bus_if->state == BRCMF_BUS_DOWN) ||
brcmf_sdcard_regfail(bus->sdiodev)) {
brcmf_dbg(ERROR, "failed backplane access over SDIO, halting operation %d\n",
brcmf_sdcard_regfail(bus->sdiodev));
- bus->drvr->busstate = BRCMF_BUS_DOWN;
+ bus->drvr->bus_if->state = BRCMF_BUS_DOWN;
bus->intstatus = 0;
} else if (bus->clkstate == CLK_PENDING) {
brcmf_dbg(INFO, "rescheduled due to CLK_PENDING awaiting I_CHIPACTIVE interrupt\n");
@@ -2562,7 +2540,7 @@ clkwait:
static int brcmf_sdbrcm_dpc_thread(void *data)
{
- struct brcmf_bus *bus = (struct brcmf_bus *) data;
+ struct brcmf_sdio *bus = (struct brcmf_sdio *) data;
allow_signal(SIGTERM);
/* Run until signal received */
@@ -2572,12 +2550,12 @@ static int brcmf_sdbrcm_dpc_thread(void *data)
if (!wait_for_completion_interruptible(&bus->dpc_wait)) {
/* Call bus dpc unless it indicated down
(then clean stop) */
- if (bus->drvr->busstate != BRCMF_BUS_DOWN) {
+ if (bus->drvr->bus_if->state != BRCMF_BUS_DOWN) {
if (brcmf_sdbrcm_dpc(bus))
complete(&bus->dpc_wait);
} else {
/* after stopping the bus, exit thread */
- brcmf_sdbrcm_bus_stop(bus);
+ brcmf_sdbrcm_bus_stop(bus->sdiodev->dev);
bus->dpc_tsk = NULL;
break;
}
@@ -2587,10 +2565,13 @@ static int brcmf_sdbrcm_dpc_thread(void *data)
return 0;
}
-int brcmf_sdbrcm_bus_txdata(struct brcmf_bus *bus, struct sk_buff *pkt)
+int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt)
{
int ret = -EBADE;
uint datalen, prec;
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv;
+ struct brcmf_sdio *bus = sdiodev->bus;
brcmf_dbg(TRACE, "Enter\n");
@@ -2638,7 +2619,7 @@ int brcmf_sdbrcm_bus_txdata(struct brcmf_bus *bus, struct sk_buff *pkt)
}
static int
-brcmf_sdbrcm_membytes(struct brcmf_bus *bus, bool write, u32 address, u8 *data,
+brcmf_sdbrcm_membytes(struct brcmf_sdio *bus, bool write, u32 address, u8 *data,
uint size)
{
int bcmerror = 0;
@@ -2699,7 +2680,7 @@ xfer_done:
#ifdef BCMDBG
#define CONSOLE_LINE_MAX 192
-static int brcmf_sdbrcm_readconsole(struct brcmf_bus *bus)
+static int brcmf_sdbrcm_readconsole(struct brcmf_sdio *bus)
{
struct brcmf_console *c = &bus->console;
u8 line[CONSOLE_LINE_MAX], ch;
@@ -2776,14 +2757,14 @@ break2:
}
#endif /* BCMDBG */
-static int brcmf_tx_frame(struct brcmf_bus *bus, u8 *frame, u16 len)
+static int brcmf_tx_frame(struct brcmf_sdio *bus, u8 *frame, u16 len)
{
int i;
int ret;
bus->ctrl_frame_stat = false;
- ret = brcmf_sdbrcm_send_buf(bus, bus->sdiodev->sbwad,
- SDIO_FUNC_2, F2SYNC, frame, len, NULL);
+ ret = brcmf_sdcard_send_buf(bus->sdiodev, bus->sdiodev->sbwad,
+ SDIO_FUNC_2, F2SYNC, frame, len);
if (ret < 0) {
/* On failure, abort the command and terminate the frame */
@@ -2819,7 +2800,7 @@ static int brcmf_tx_frame(struct brcmf_bus *bus, u8 *frame, u16 len)
}
int
-brcmf_sdbrcm_bus_txctl(struct brcmf_bus *bus, unsigned char *msg, uint msglen)
+brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
{
u8 *frame;
u16 len;
@@ -2827,6 +2808,9 @@ brcmf_sdbrcm_bus_txctl(struct brcmf_bus *bus, unsigned char *msg, uint msglen)
uint retries = 0;
u8 doff = 0;
int ret = -1;
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv;
+ struct brcmf_sdio *bus = sdiodev->bus;
brcmf_dbg(TRACE, "Enter\n");
@@ -2934,11 +2918,14 @@ brcmf_sdbrcm_bus_txctl(struct brcmf_bus *bus, unsigned char *msg, uint msglen)
}
int
-brcmf_sdbrcm_bus_rxctl(struct brcmf_bus *bus, unsigned char *msg, uint msglen)
+brcmf_sdbrcm_bus_rxctl(struct device *dev, unsigned char *msg, uint msglen)
{
int timeleft;
uint rxlen = 0;
bool pending;
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv;
+ struct brcmf_sdio *bus = sdiodev->bus;
brcmf_dbg(TRACE, "Enter\n");
@@ -2971,7 +2958,7 @@ brcmf_sdbrcm_bus_rxctl(struct brcmf_bus *bus, unsigned char *msg, uint msglen)
return rxlen ? (int)rxlen : -ETIMEDOUT;
}
-static int brcmf_sdbrcm_downloadvars(struct brcmf_bus *bus, void *arg, int len)
+static int brcmf_sdbrcm_downloadvars(struct brcmf_sdio *bus, void *arg, int len)
{
int bcmerror = 0;
@@ -3004,7 +2991,7 @@ err:
return bcmerror;
}
-static int brcmf_sdbrcm_write_vars(struct brcmf_bus *bus)
+static int brcmf_sdbrcm_write_vars(struct brcmf_sdio *bus)
{
int bcmerror = 0;
u32 varsize;
@@ -3091,7 +3078,7 @@ static int brcmf_sdbrcm_write_vars(struct brcmf_bus *bus)
return bcmerror;
}
-static int brcmf_sdbrcm_download_state(struct brcmf_bus *bus, bool enter)
+static int brcmf_sdbrcm_download_state(struct brcmf_sdio *bus, bool enter)
{
uint retries;
int bcmerror = 0;
@@ -3134,13 +3121,13 @@ static int brcmf_sdbrcm_download_state(struct brcmf_bus *bus, bool enter)
/* Allow HT Clock now that the ARM is running. */
bus->alp_only = false;
- bus->drvr->busstate = BRCMF_BUS_LOAD;
+ bus->drvr->bus_if->state = BRCMF_BUS_LOAD;
}
fail:
return bcmerror;
}
-static int brcmf_sdbrcm_get_image(char *buf, int len, struct brcmf_bus *bus)
+static int brcmf_sdbrcm_get_image(char *buf, int len, struct brcmf_sdio *bus)
{
if (bus->firmware->size < bus->fw_ptr + len)
len = bus->firmware->size - bus->fw_ptr;
@@ -3150,10 +3137,7 @@ static int brcmf_sdbrcm_get_image(char *buf, int len, struct brcmf_bus *bus)
return len;
}
-MODULE_FIRMWARE(BCM4329_FW_NAME);
-MODULE_FIRMWARE(BCM4329_NV_NAME);
-
-static int brcmf_sdbrcm_download_code_file(struct brcmf_bus *bus)
+static int brcmf_sdbrcm_download_code_file(struct brcmf_sdio *bus)
{
int offset = 0;
uint len;
@@ -3162,8 +3146,7 @@ static int brcmf_sdbrcm_download_code_file(struct brcmf_bus *bus)
brcmf_dbg(INFO, "Enter\n");
- bus->fw_name = BCM4329_FW_NAME;
- ret = request_firmware(&bus->firmware, bus->fw_name,
+ ret = request_firmware(&bus->firmware, BRCMFMAC_FW_NAME,
&bus->sdiodev->func[2]->dev);
if (ret) {
brcmf_dbg(ERROR, "Fail to request firmware %d\n", ret);
@@ -3253,15 +3236,14 @@ static uint brcmf_process_nvram_vars(char *varbuf, uint len)
return buf_len;
}
-static int brcmf_sdbrcm_download_nvram(struct brcmf_bus *bus)
+static int brcmf_sdbrcm_download_nvram(struct brcmf_sdio *bus)
{
uint len;
char *memblock = NULL;
char *bufp;
int ret;
- bus->nv_name = BCM4329_NV_NAME;
- ret = request_firmware(&bus->firmware, bus->nv_name,
+ ret = request_firmware(&bus->firmware, BRCMFMAC_NV_NAME,
&bus->sdiodev->func[2]->dev);
if (ret) {
brcmf_dbg(ERROR, "Fail to request nvram %d\n", ret);
@@ -3301,7 +3283,7 @@ err:
return ret;
}
-static int _brcmf_sdbrcm_download_firmware(struct brcmf_bus *bus)
+static int _brcmf_sdbrcm_download_firmware(struct brcmf_sdio *bus)
{
int bcmerror = -1;
@@ -3334,7 +3316,7 @@ err:
}
static bool
-brcmf_sdbrcm_download_firmware(struct brcmf_bus *bus)
+brcmf_sdbrcm_download_firmware(struct brcmf_sdio *bus)
{
bool ret;
@@ -3348,12 +3330,15 @@ brcmf_sdbrcm_download_firmware(struct brcmf_bus *bus)
return ret;
}
-void brcmf_sdbrcm_bus_stop(struct brcmf_bus *bus)
+void brcmf_sdbrcm_bus_stop(struct device *dev)
{
u32 local_hostintmask;
u8 saveclk;
uint retries;
int err;
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv;
+ struct brcmf_sdio *bus = sdiodev->bus;
brcmf_dbg(TRACE, "Enter\n");
@@ -3382,7 +3367,7 @@ void brcmf_sdbrcm_bus_stop(struct brcmf_bus *bus)
bus->hostintmask = 0;
/* Change our idea of bus state */
- bus->drvr->busstate = BRCMF_BUS_DOWN;
+ bus->drvr->bus_if->state = BRCMF_BUS_DOWN;
/* Force clocks on backplane to be sure F2 interrupt propagates */
saveclk = brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1,
@@ -3426,9 +3411,11 @@ void brcmf_sdbrcm_bus_stop(struct brcmf_bus *bus)
up(&bus->sdsem);
}
-int brcmf_sdbrcm_bus_init(struct brcmf_pub *drvr)
+int brcmf_sdbrcm_bus_init(struct device *dev)
{
- struct brcmf_bus *bus = drvr->bus;
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv;
+ struct brcmf_sdio *bus = sdiodev->bus;
unsigned long timeout;
uint retries = 0;
u8 ready, enable;
@@ -3438,7 +3425,7 @@ int brcmf_sdbrcm_bus_init(struct brcmf_pub *drvr)
brcmf_dbg(TRACE, "Enter\n");
/* try to download image and nvram to the dongle */
- if (drvr->busstate == BRCMF_BUS_DOWN) {
+ if (bus_if->state == BRCMF_BUS_DOWN) {
if (!(brcmf_sdbrcm_download_firmware(bus)))
return -1;
}
@@ -3504,7 +3491,7 @@ int brcmf_sdbrcm_bus_init(struct brcmf_pub *drvr)
SBSDIO_WATERMARK, 8, &err);
/* Set bus state according to enable result */
- drvr->busstate = BRCMF_BUS_DATA;
+ bus_if->state = BRCMF_BUS_DATA;
}
else {
@@ -3519,7 +3506,7 @@ int brcmf_sdbrcm_bus_init(struct brcmf_pub *drvr)
SBSDIO_FUNC1_CHIPCLKCSR, saveclk, &err);
/* If we didn't come up, turn off backplane clock */
- if (drvr->busstate != BRCMF_BUS_DATA)
+ if (bus_if->state != BRCMF_BUS_DATA)
brcmf_sdbrcm_clkctl(bus, CLK_NONE, false);
exit:
@@ -3530,7 +3517,7 @@ exit:
void brcmf_sdbrcm_isr(void *arg)
{
- struct brcmf_bus *bus = (struct brcmf_bus *) arg;
+ struct brcmf_sdio *bus = (struct brcmf_sdio *) arg;
brcmf_dbg(TRACE, "Enter\n");
@@ -3539,7 +3526,7 @@ void brcmf_sdbrcm_isr(void *arg)
return;
}
- if (bus->drvr->busstate == BRCMF_BUS_DOWN) {
+ if (bus->drvr->bus_if->state == BRCMF_BUS_DOWN) {
brcmf_dbg(ERROR, "bus is down. we have nothing to do\n");
return;
}
@@ -3562,14 +3549,14 @@ void brcmf_sdbrcm_isr(void *arg)
complete(&bus->dpc_wait);
}
-static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_pub *drvr)
+static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
{
- struct brcmf_bus *bus;
+#ifdef BCMDBG
+ struct brcmf_bus *bus_if = dev_get_drvdata(bus->sdiodev->dev);
+#endif /* BCMDBG */
brcmf_dbg(TIMER, "Enter\n");
- bus = drvr->bus;
-
/* Ignore the timer if simulating bus down */
if (bus->sleeping)
return false;
@@ -3613,7 +3600,8 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_pub *drvr)
}
#ifdef BCMDBG
/* Poll for console output periodically */
- if (drvr->busstate == BRCMF_BUS_DATA && bus->console_interval != 0) {
+ if (bus_if->state == BRCMF_BUS_DATA &&
+ bus->console_interval != 0) {
bus->console.count += BRCMF_WD_POLL_MS;
if (bus->console.count >= bus->console_interval) {
bus->console.count -= bus->console_interval;
@@ -3648,10 +3636,12 @@ static bool brcmf_sdbrcm_chipmatch(u16 chipid)
{
if (chipid == BCM4329_CHIP_ID)
return true;
+ if (chipid == BCM4330_CHIP_ID)
+ return true;
return false;
}
-static void brcmf_sdbrcm_release_malloc(struct brcmf_bus *bus)
+static void brcmf_sdbrcm_release_malloc(struct brcmf_sdio *bus)
{
brcmf_dbg(TRACE, "Enter\n");
@@ -3663,7 +3653,7 @@ static void brcmf_sdbrcm_release_malloc(struct brcmf_bus *bus)
bus->databuf = NULL;
}
-static bool brcmf_sdbrcm_probe_malloc(struct brcmf_bus *bus)
+static bool brcmf_sdbrcm_probe_malloc(struct brcmf_sdio *bus)
{
brcmf_dbg(TRACE, "Enter\n");
@@ -3699,7 +3689,7 @@ fail:
}
static bool
-brcmf_sdbrcm_probe_attach(struct brcmf_bus *bus, u32 regsva)
+brcmf_sdbrcm_probe_attach(struct brcmf_sdio *bus, u32 regsva)
{
u8 clkctl = 0;
int err = 0;
@@ -3784,7 +3774,7 @@ fail:
return false;
}
-static bool brcmf_sdbrcm_probe_init(struct brcmf_bus *bus)
+static bool brcmf_sdbrcm_probe_init(struct brcmf_sdio *bus)
{
brcmf_dbg(TRACE, "Enter\n");
@@ -3792,7 +3782,7 @@ static bool brcmf_sdbrcm_probe_init(struct brcmf_bus *bus)
brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_0, SDIO_CCCR_IOEx,
SDIO_FUNC_ENABLE_1, NULL);
- bus->drvr->busstate = BRCMF_BUS_DOWN;
+ bus->drvr->bus_if->state = BRCMF_BUS_DOWN;
bus->sleeping = false;
bus->rxflow = false;
@@ -3819,7 +3809,7 @@ static bool brcmf_sdbrcm_probe_init(struct brcmf_bus *bus)
static int
brcmf_sdbrcm_watchdog_thread(void *data)
{
- struct brcmf_bus *bus = (struct brcmf_bus *)data;
+ struct brcmf_sdio *bus = (struct brcmf_sdio *)data;
allow_signal(SIGTERM);
/* Run until signal received */
@@ -3827,7 +3817,7 @@ brcmf_sdbrcm_watchdog_thread(void *data)
if (kthread_should_stop())
break;
if (!wait_for_completion_interruptible(&bus->watchdog_wait)) {
- brcmf_sdbrcm_bus_watchdog(bus->drvr);
+ brcmf_sdbrcm_bus_watchdog(bus);
/* Count the tick for reference */
bus->drvr->tickcnt++;
} else
@@ -3839,7 +3829,7 @@ brcmf_sdbrcm_watchdog_thread(void *data)
static void
brcmf_sdbrcm_watchdog(unsigned long data)
{
- struct brcmf_bus *bus = (struct brcmf_bus *)data;
+ struct brcmf_sdio *bus = (struct brcmf_sdio *)data;
if (bus->watchdog_tsk) {
complete(&bus->watchdog_wait);
@@ -3850,7 +3840,7 @@ brcmf_sdbrcm_watchdog(unsigned long data)
}
}
-static void brcmf_sdbrcm_release_dongle(struct brcmf_bus *bus)
+static void brcmf_sdbrcm_release_dongle(struct brcmf_sdio *bus)
{
brcmf_dbg(TRACE, "Enter\n");
@@ -3867,7 +3857,7 @@ static void brcmf_sdbrcm_release_dongle(struct brcmf_bus *bus)
}
/* Detach and free everything */
-static void brcmf_sdbrcm_release(struct brcmf_bus *bus)
+static void brcmf_sdbrcm_release(struct brcmf_sdio *bus)
{
brcmf_dbg(TRACE, "Enter\n");
@@ -3889,21 +3879,10 @@ static void brcmf_sdbrcm_release(struct brcmf_bus *bus)
brcmf_dbg(TRACE, "Disconnected\n");
}
-void *brcmf_sdbrcm_probe(u16 bus_no, u16 slot, u16 func, uint bustype,
- u32 regsva, struct brcmf_sdio_dev *sdiodev)
+void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
{
int ret;
- struct brcmf_bus *bus;
-
- /* Init global variables at run-time, not as part of the declaration.
- * This is required to support init/de-init of the driver.
- * Initialization
- * of globals as part of the declaration results in non-deterministic
- * behavior since the value of the globals may be different on the
- * first time that the driver is initialized vs subsequent
- * initializations.
- */
- brcmf_c_init();
+ struct brcmf_sdio *bus;
brcmf_dbg(TRACE, "Enter\n");
@@ -3911,7 +3890,7 @@ void *brcmf_sdbrcm_probe(u16 bus_no, u16 slot, u16 func, uint bustype,
* regsva == SI_ENUM_BASE*/
/* Allocate private bus interface state */
- bus = kzalloc(sizeof(struct brcmf_bus), GFP_ATOMIC);
+ bus = kzalloc(sizeof(struct brcmf_sdio), GFP_ATOMIC);
if (!bus)
goto fail;
@@ -3963,7 +3942,7 @@ void *brcmf_sdbrcm_probe(u16 bus_no, u16 slot, u16 func, uint bustype,
}
/* Attach to the brcmf/OS/network interface */
- bus->drvr = brcmf_attach(bus, SDPCM_RESERVE);
+ bus->drvr = brcmf_attach(bus, SDPCM_RESERVE, bus->sdiodev->dev);
if (!bus->drvr) {
brcmf_dbg(ERROR, "brcmf_attach failed\n");
goto fail;
@@ -4015,7 +3994,7 @@ fail:
void brcmf_sdbrcm_disconnect(void *ptr)
{
- struct brcmf_bus *bus = (struct brcmf_bus *)ptr;
+ struct brcmf_sdio *bus = (struct brcmf_sdio *)ptr;
brcmf_dbg(TRACE, "Enter\n");
@@ -4025,13 +4004,8 @@ void brcmf_sdbrcm_disconnect(void *ptr)
brcmf_dbg(TRACE, "Disconnected\n");
}
-struct device *brcmf_bus_get_device(struct brcmf_bus *bus)
-{
- return &bus->sdiodev->func[2]->dev;
-}
-
void
-brcmf_sdbrcm_wd_timer(struct brcmf_bus *bus, uint wdtick)
+brcmf_sdbrcm_wd_timer(struct brcmf_sdio *bus, uint wdtick)
{
/* Totally stop the timer */
if (!wdtick && bus->wd_timer_valid == true) {
@@ -4042,7 +4016,7 @@ brcmf_sdbrcm_wd_timer(struct brcmf_bus *bus, uint wdtick)
}
/* don't start the wd until fw is loaded */
- if (bus->drvr->busstate == BRCMF_BUS_DOWN)
+ if (bus->drvr->bus_if->state == BRCMF_BUS_DOWN)
return;
if (wdtick) {
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
index f6b1822031f..a6048d78d29 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
@@ -59,37 +59,17 @@ struct sdiod_drive_str {
u8 strength; /* Pad Drive Strength in mA */
u8 sel; /* Chip-specific select value */
};
-/* SDIO Drive Strength to sel value table for PMU Rev 1 */
-static const struct sdiod_drive_str sdiod_drive_strength_tab1[] = {
- {
- 4, 0x2}, {
- 2, 0x3}, {
- 1, 0x0}, {
- 0, 0x0}
- };
-/* SDIO Drive Strength to sel value table for PMU Rev 2, 3 */
-static const struct sdiod_drive_str sdiod_drive_strength_tab2[] = {
- {
- 12, 0x7}, {
- 10, 0x6}, {
- 8, 0x5}, {
- 6, 0x4}, {
- 4, 0x2}, {
- 2, 0x1}, {
- 0, 0x0}
- };
-/* SDIO Drive Strength to sel value table for PMU Rev 8 (1.8V) */
-static const struct sdiod_drive_str sdiod_drive_strength_tab3[] = {
- {
- 32, 0x7}, {
- 26, 0x6}, {
- 22, 0x5}, {
- 16, 0x4}, {
- 12, 0x3}, {
- 8, 0x2}, {
- 4, 0x1}, {
- 0, 0x0}
- };
+/* SDIO Drive Strength to sel value table for PMU Rev 11 (1.8V) */
+static const struct sdiod_drive_str sdiod_drvstr_tab1_1v8[] = {
+ {32, 0x6},
+ {26, 0x7},
+ {22, 0x4},
+ {16, 0x5},
+ {12, 0x2},
+ {8, 0x3},
+ {4, 0x0},
+ {0, 0x1}
+};
u8
brcmf_sdio_chip_getinfidx(struct chip_info *ci, u16 coreid)
@@ -396,6 +376,23 @@ static int brcmf_sdio_chip_recognition(struct brcmf_sdio_dev *sdiodev,
ci->c_inf[3].base = BCM4329_CORE_ARM_BASE;
ci->ramsize = BCM4329_RAMSIZE;
break;
+ case BCM4330_CHIP_ID:
+ ci->c_inf[0].wrapbase = 0x18100000;
+ ci->c_inf[0].cib = 0x27004211;
+ ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
+ ci->c_inf[1].base = 0x18002000;
+ ci->c_inf[1].wrapbase = 0x18102000;
+ ci->c_inf[1].cib = 0x07004211;
+ ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
+ ci->c_inf[2].base = 0x18004000;
+ ci->c_inf[2].wrapbase = 0x18104000;
+ ci->c_inf[2].cib = 0x0d080401;
+ ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
+ ci->c_inf[3].base = 0x18003000;
+ ci->c_inf[3].wrapbase = 0x18103000;
+ ci->c_inf[3].cib = 0x03004211;
+ ci->ramsize = 0x48000;
+ break;
default:
brcmf_dbg(ERROR, "chipid 0x%x is not supported\n", ci->chip);
return -ENODEV;
@@ -569,19 +566,8 @@ brcmf_sdio_chip_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
return;
switch (SDIOD_DRVSTR_KEY(ci->chip, ci->pmurev)) {
- case SDIOD_DRVSTR_KEY(BCM4325_CHIP_ID, 1):
- str_tab = (struct sdiod_drive_str *)&sdiod_drive_strength_tab1;
- str_mask = 0x30000000;
- str_shift = 28;
- break;
- case SDIOD_DRVSTR_KEY(BCM4325_CHIP_ID, 2):
- case SDIOD_DRVSTR_KEY(BCM4325_CHIP_ID, 3):
- str_tab = (struct sdiod_drive_str *)&sdiod_drive_strength_tab2;
- str_mask = 0x00003800;
- str_shift = 11;
- break;
- case SDIOD_DRVSTR_KEY(BCM4336_CHIP_ID, 8):
- str_tab = (struct sdiod_drive_str *)&sdiod_drive_strength_tab3;
+ case SDIOD_DRVSTR_KEY(BCM4330_CHIP_ID, 12):
+ str_tab = (struct sdiod_drive_str *)&sdiod_drvstr_tab1_1v8;
str_mask = 0x00003800;
str_shift = 11;
break;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h b/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
index 726fa898111..d36a2a855a6 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
@@ -132,9 +132,10 @@ struct brcmf_sdio_dev {
atomic_t suspend; /* suspend flag */
wait_queue_head_t request_byte_wait;
wait_queue_head_t request_word_wait;
- wait_queue_head_t request_packet_wait;
+ wait_queue_head_t request_chain_wait;
wait_queue_head_t request_buffer_wait;
-
+ struct device *dev;
+ struct brcmf_bus *bus_if;
};
/* Register/deregister device interrupt handler. */
@@ -182,11 +183,21 @@ extern bool brcmf_sdcard_regfail(struct brcmf_sdio_dev *sdiodev);
* NOTE: Async operation is not currently supported.
*/
extern int
+brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+ uint flags, struct sk_buff *pkt);
+extern int
brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
- uint flags, u8 *buf, uint nbytes, struct sk_buff *pkt);
+ uint flags, u8 *buf, uint nbytes);
+
+extern int
+brcmf_sdcard_recv_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+ uint flags, struct sk_buff *pkt);
extern int
brcmf_sdcard_recv_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
- uint flags, u8 *buf, uint nbytes, struct sk_buff *pkt);
+ uint flags, u8 *buf, uint nbytes);
+extern int
+brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+ uint flags, struct sk_buff_head *pktq);
/* Flags bits */
@@ -237,16 +248,18 @@ brcmf_sdioh_request_word(struct brcmf_sdio_dev *sdiodev,
/* read or write any buffer using cmd53 */
extern int
brcmf_sdioh_request_buffer(struct brcmf_sdio_dev *sdiodev,
- uint fix_inc, uint rw, uint fnc_num,
- u32 addr, uint regwidth,
- u32 buflen, u8 *buffer, struct sk_buff *pkt);
+ uint fix_inc, uint rw, uint fnc_num, u32 addr,
+ struct sk_buff *pkt);
+extern int
+brcmf_sdioh_request_chain(struct brcmf_sdio_dev *sdiodev, uint fix_inc,
+ uint write, uint func, uint addr,
+ struct sk_buff_head *pktq);
/* Watchdog timer interface for pm ops */
extern void brcmf_sdio_wdtmr_enable(struct brcmf_sdio_dev *sdiodev,
bool enable);
-extern void *brcmf_sdbrcm_probe(u16 bus_no, u16 slot, u16 func, uint bustype,
- u32 regsva, struct brcmf_sdio_dev *sdiodev);
+extern void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev);
extern void brcmf_sdbrcm_disconnect(void *ptr);
extern void brcmf_sdbrcm_isr(void *arg);
#endif /* _BRCM_SDH_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
index cc19a733ac6..f23b0c3e4ea 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
@@ -1429,7 +1429,7 @@ brcmf_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *ndev,
static s32
brcmf_cfg80211_set_tx_power(struct wiphy *wiphy,
- enum nl80211_tx_power_setting type, s32 dbm)
+ enum nl80211_tx_power_setting type, s32 mbm)
{
struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
@@ -1437,6 +1437,7 @@ brcmf_cfg80211_set_tx_power(struct wiphy *wiphy,
u16 txpwrmw;
s32 err = 0;
s32 disable = 0;
+ s32 dbm = MBM_TO_DBM(mbm);
WL_TRACE("Enter\n");
if (!check_sys_up(wiphy))
@@ -1446,12 +1447,6 @@ brcmf_cfg80211_set_tx_power(struct wiphy *wiphy,
case NL80211_TX_POWER_AUTOMATIC:
break;
case NL80211_TX_POWER_LIMITED:
- if (dbm < 0) {
- WL_ERR("TX_POWER_LIMITED - dbm is negative\n");
- err = -EINVAL;
- goto done;
- }
- break;
case NL80211_TX_POWER_FIXED:
if (dbm < 0) {
WL_ERR("TX_POWER_FIXED - dbm is negative\n");
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c
index 39e305443d7..ab9bb11abfb 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c
@@ -318,37 +318,13 @@
#define BADIDX (SI_MAXCORES + 1)
-/* Newer chips can access PCI/PCIE and CC core without requiring to change
- * PCI BAR0 WIN
- */
-#define SI_FAST(si) (((si)->pub.buscoretype == PCIE_CORE_ID) || \
- (((si)->pub.buscoretype == PCI_CORE_ID) && \
- (si)->pub.buscorerev >= 13))
-
-#define CCREGS_FAST(si) (((char __iomem *)((si)->curmap) + \
- PCI_16KB0_CCREGS_OFFSET))
-
#define IS_SIM(chippkg) \
((chippkg == HDLSIM_PKG_ID) || (chippkg == HWSIM_PKG_ID))
-/*
- * Macros to disable/restore function core(D11, ENET, ILINE20, etc) interrupts
- * before after core switching to avoid invalid register accesss inside ISR.
- */
-#define INTR_OFF(si, intr_val) \
- if ((si)->intrsoff_fn && \
- (si)->coreid[(si)->curidx] == (si)->dev_coreid) \
- intr_val = (*(si)->intrsoff_fn)((si)->intr_arg)
-
-#define INTR_RESTORE(si, intr_val) \
- if ((si)->intrsrestore_fn && \
- (si)->coreid[(si)->curidx] == (si)->dev_coreid) \
- (*(si)->intrsrestore_fn)((si)->intr_arg, intr_val)
+#define PCI(sih) (ai_get_buscoretype(sih) == PCI_CORE_ID)
+#define PCIE(sih) (ai_get_buscoretype(sih) == PCIE_CORE_ID)
-#define PCI(si) ((si)->pub.buscoretype == PCI_CORE_ID)
-#define PCIE(si) ((si)->pub.buscoretype == PCIE_CORE_ID)
-
-#define PCI_FORCEHT(si) (PCIE(si) && (si->pub.chip == BCM4716_CHIP_ID))
+#define PCI_FORCEHT(sih) (PCIE(sih) && (ai_get_chip_id(sih) == BCM4716_CHIP_ID))
#ifdef BCMDBG
#define SI_MSG(fmt, ...) pr_debug(fmt, ##__VA_ARGS__)
@@ -360,9 +336,6 @@
(((x) >= (b)) && ((x) < ((b) + SI_MAXCORES * SI_CORE_SIZE)) && \
IS_ALIGNED((x), SI_CORE_SIZE))
-#define PCIEREGS(si) ((__iomem char *)((si)->curmap) + \
- PCI_16KB0_PCIREGS_OFFSET)
-
struct aidmp {
u32 oobselina30; /* 0x000 */
u32 oobselina74; /* 0x004 */
@@ -481,406 +454,13 @@ struct aidmp {
u32 componentid3; /* 0xffc */
};
-/* EROM parsing */
-
-static u32
-get_erom_ent(struct si_pub *sih, u32 __iomem **eromptr, u32 mask, u32 match)
-{
- u32 ent;
- uint inv = 0, nom = 0;
-
- while (true) {
- ent = R_REG(*eromptr);
- (*eromptr)++;
-
- if (mask == 0)
- break;
-
- if ((ent & ER_VALID) == 0) {
- inv++;
- continue;
- }
-
- if (ent == (ER_END | ER_VALID))
- break;
-
- if ((ent & mask) == match)
- break;
-
- nom++;
- }
-
- return ent;
-}
-
-static u32
-get_asd(struct si_pub *sih, u32 __iomem **eromptr, uint sp, uint ad, uint st,
- u32 *addrl, u32 *addrh, u32 *sizel, u32 *sizeh)
-{
- u32 asd, sz, szd;
-
- asd = get_erom_ent(sih, eromptr, ER_VALID, ER_VALID);
- if (((asd & ER_TAG1) != ER_ADD) ||
- (((asd & AD_SP_MASK) >> AD_SP_SHIFT) != sp) ||
- ((asd & AD_ST_MASK) != st)) {
- /* This is not what we want, "push" it back */
- (*eromptr)--;
- return 0;
- }
- *addrl = asd & AD_ADDR_MASK;
- if (asd & AD_AG32)
- *addrh = get_erom_ent(sih, eromptr, 0, 0);
- else
- *addrh = 0;
- *sizeh = 0;
- sz = asd & AD_SZ_MASK;
- if (sz == AD_SZ_SZD) {
- szd = get_erom_ent(sih, eromptr, 0, 0);
- *sizel = szd & SD_SZ_MASK;
- if (szd & SD_SG32)
- *sizeh = get_erom_ent(sih, eromptr, 0, 0);
- } else
- *sizel = AD_SZ_BASE << (sz >> AD_SZ_SHIFT);
-
- return asd;
-}
-
-static void ai_hwfixup(struct si_info *sii)
-{
-}
-
-/* parse the enumeration rom to identify all cores */
-static void ai_scan(struct si_pub *sih, struct chipcregs __iomem *cc)
-{
- struct si_info *sii = (struct si_info *)sih;
-
- u32 erombase;
- u32 __iomem *eromptr, *eromlim;
- void __iomem *regs = cc;
-
- erombase = R_REG(&cc->eromptr);
-
- /* Set wrappers address */
- sii->curwrap = (void *)((unsigned long)cc + SI_CORE_SIZE);
-
- /* Now point the window at the erom */
- pci_write_config_dword(sii->pbus, PCI_BAR0_WIN, erombase);
- eromptr = regs;
- eromlim = eromptr + (ER_REMAPCONTROL / sizeof(u32));
-
- while (eromptr < eromlim) {
- u32 cia, cib, cid, mfg, crev, nmw, nsw, nmp, nsp;
- u32 mpd, asd, addrl, addrh, sizel, sizeh;
- u32 __iomem *base;
- uint i, j, idx;
- bool br;
-
- br = false;
-
- /* Grok a component */
- cia = get_erom_ent(sih, &eromptr, ER_TAG, ER_CI);
- if (cia == (ER_END | ER_VALID)) {
- /* Found END of erom */
- ai_hwfixup(sii);
- return;
- }
- base = eromptr - 1;
- cib = get_erom_ent(sih, &eromptr, 0, 0);
-
- if ((cib & ER_TAG) != ER_CI) {
- /* CIA not followed by CIB */
- goto error;
- }
-
- cid = (cia & CIA_CID_MASK) >> CIA_CID_SHIFT;
- mfg = (cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT;
- crev = (cib & CIB_REV_MASK) >> CIB_REV_SHIFT;
- nmw = (cib & CIB_NMW_MASK) >> CIB_NMW_SHIFT;
- nsw = (cib & CIB_NSW_MASK) >> CIB_NSW_SHIFT;
- nmp = (cib & CIB_NMP_MASK) >> CIB_NMP_SHIFT;
- nsp = (cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT;
-
- if (((mfg == MFGID_ARM) && (cid == DEF_AI_COMP)) || (nsp == 0))
- continue;
- if ((nmw + nsw == 0)) {
- /* A component which is not a core */
- if (cid == OOB_ROUTER_CORE_ID) {
- asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE,
- &addrl, &addrh, &sizel, &sizeh);
- if (asd != 0)
- sii->oob_router = addrl;
- }
- continue;
- }
-
- idx = sii->numcores;
-/* sii->eromptr[idx] = base; */
- sii->cia[idx] = cia;
- sii->cib[idx] = cib;
- sii->coreid[idx] = cid;
-
- for (i = 0; i < nmp; i++) {
- mpd = get_erom_ent(sih, &eromptr, ER_VALID, ER_VALID);
- if ((mpd & ER_TAG) != ER_MP) {
- /* Not enough MP entries for component */
- goto error;
- }
- }
-
- /* First Slave Address Descriptor should be port 0:
- * the main register space for the core
- */
- asd =
- get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, &addrl, &addrh,
- &sizel, &sizeh);
- if (asd == 0) {
- /* Try again to see if it is a bridge */
- asd =
- get_asd(sih, &eromptr, 0, 0, AD_ST_BRIDGE, &addrl,
- &addrh, &sizel, &sizeh);
- if (asd != 0)
- br = true;
- else if ((addrh != 0) || (sizeh != 0)
- || (sizel != SI_CORE_SIZE)) {
- /* First Slave ASD for core malformed */
- goto error;
- }
- }
- sii->coresba[idx] = addrl;
- sii->coresba_size[idx] = sizel;
- /* Get any more ASDs in port 0 */
- j = 1;
- do {
- asd =
- get_asd(sih, &eromptr, 0, j, AD_ST_SLAVE, &addrl,
- &addrh, &sizel, &sizeh);
- if ((asd != 0) && (j == 1) && (sizel == SI_CORE_SIZE)) {
- sii->coresba2[idx] = addrl;
- sii->coresba2_size[idx] = sizel;
- }
- j++;
- } while (asd != 0);
-
- /* Go through the ASDs for other slave ports */
- for (i = 1; i < nsp; i++) {
- j = 0;
- do {
- asd =
- get_asd(sih, &eromptr, i, j++, AD_ST_SLAVE,
- &addrl, &addrh, &sizel, &sizeh);
- } while (asd != 0);
- if (j == 0) {
- /* SP has no address descriptors */
- goto error;
- }
- }
-
- /* Now get master wrappers */
- for (i = 0; i < nmw; i++) {
- asd =
- get_asd(sih, &eromptr, i, 0, AD_ST_MWRAP, &addrl,
- &addrh, &sizel, &sizeh);
- if (asd == 0) {
- /* Missing descriptor for MW */
- goto error;
- }
- if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) {
- /* Master wrapper %d is not 4KB */
- goto error;
- }
- if (i == 0)
- sii->wrapba[idx] = addrl;
- }
-
- /* And finally slave wrappers */
- for (i = 0; i < nsw; i++) {
- uint fwp = (nsp == 1) ? 0 : 1;
- asd =
- get_asd(sih, &eromptr, fwp + i, 0, AD_ST_SWRAP,
- &addrl, &addrh, &sizel, &sizeh);
- if (asd == 0) {
- /* Missing descriptor for SW */
- goto error;
- }
- if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) {
- /* Slave wrapper is not 4KB */
- goto error;
- }
- if ((nmw == 0) && (i == 0))
- sii->wrapba[idx] = addrl;
- }
-
- /* Don't record bridges */
- if (br)
- continue;
-
- /* Done with core */
- sii->numcores++;
- }
-
- error:
- /* Reached end of erom without finding END */
- sii->numcores = 0;
- return;
-}
-
-/*
- * This function changes the logical "focus" to the indicated core.
- * Return the current core's virtual address. Since each core starts with the
- * same set of registers (BIST, clock control, etc), the returned address
- * contains the first register of this 'common' register block (not to be
- * confused with 'common core').
- */
-void __iomem *ai_setcoreidx(struct si_pub *sih, uint coreidx)
-{
- struct si_info *sii = (struct si_info *)sih;
- u32 addr = sii->coresba[coreidx];
- u32 wrap = sii->wrapba[coreidx];
-
- if (coreidx >= sii->numcores)
- return NULL;
-
- /* point bar0 window */
- pci_write_config_dword(sii->pbus, PCI_BAR0_WIN, addr);
- /* point bar0 2nd 4KB window */
- pci_write_config_dword(sii->pbus, PCI_BAR0_WIN2, wrap);
- sii->curidx = coreidx;
-
- return sii->curmap;
-}
-
-/* Return the number of address spaces in current core */
-int ai_numaddrspaces(struct si_pub *sih)
-{
- return 2;
-}
-
-/* Return the address of the nth address space in the current core */
-u32 ai_addrspace(struct si_pub *sih, uint asidx)
-{
- struct si_info *sii;
- uint cidx;
-
- sii = (struct si_info *)sih;
- cidx = sii->curidx;
-
- if (asidx == 0)
- return sii->coresba[cidx];
- else if (asidx == 1)
- return sii->coresba2[cidx];
- else {
- /* Need to parse the erom again to find addr space */
- return 0;
- }
-}
-
-/* Return the size of the nth address space in the current core */
-u32 ai_addrspacesize(struct si_pub *sih, uint asidx)
-{
- struct si_info *sii;
- uint cidx;
-
- sii = (struct si_info *)sih;
- cidx = sii->curidx;
-
- if (asidx == 0)
- return sii->coresba_size[cidx];
- else if (asidx == 1)
- return sii->coresba2_size[cidx];
- else {
- /* Need to parse the erom again to find addr */
- return 0;
- }
-}
-
-uint ai_flag(struct si_pub *sih)
-{
- struct si_info *sii;
- struct aidmp *ai;
-
- sii = (struct si_info *)sih;
- ai = sii->curwrap;
-
- return R_REG(&ai->oobselouta30) & 0x1f;
-}
-
-void ai_setint(struct si_pub *sih, int siflag)
-{
-}
-
-uint ai_corevendor(struct si_pub *sih)
-{
- struct si_info *sii;
- u32 cia;
-
- sii = (struct si_info *)sih;
- cia = sii->cia[sii->curidx];
- return (cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT;
-}
-
-uint ai_corerev(struct si_pub *sih)
-{
- struct si_info *sii;
- u32 cib;
-
- sii = (struct si_info *)sih;
- cib = sii->cib[sii->curidx];
- return (cib & CIB_REV_MASK) >> CIB_REV_SHIFT;
-}
-
-bool ai_iscoreup(struct si_pub *sih)
-{
- struct si_info *sii;
- struct aidmp *ai;
-
- sii = (struct si_info *)sih;
- ai = sii->curwrap;
-
- return (((R_REG(&ai->ioctrl) & (SICF_FGC | SICF_CLOCK_EN)) ==
- SICF_CLOCK_EN)
- && ((R_REG(&ai->resetctrl) & AIRC_RESET) == 0));
-}
-
-void ai_core_cflags_wo(struct si_pub *sih, u32 mask, u32 val)
-{
- struct si_info *sii;
- struct aidmp *ai;
- u32 w;
-
- sii = (struct si_info *)sih;
-
- ai = sii->curwrap;
-
- if (mask || val) {
- w = ((R_REG(&ai->ioctrl) & ~mask) | val);
- W_REG(&ai->ioctrl, w);
- }
-}
-
-u32 ai_core_cflags(struct si_pub *sih, u32 mask, u32 val)
-{
- struct si_info *sii;
- struct aidmp *ai;
- u32 w;
-
- sii = (struct si_info *)sih;
- ai = sii->curwrap;
-
- if (mask || val) {
- w = ((R_REG(&ai->ioctrl) & ~mask) | val);
- W_REG(&ai->ioctrl, w);
- }
-
- return R_REG(&ai->ioctrl);
-}
-
/* return true if PCIE capability exists in the pci config space */
static bool ai_ispcie(struct si_info *sii)
{
u8 cap_ptr;
cap_ptr =
- pcicore_find_pci_capability(sii->pbus, PCI_CAP_ID_EXP, NULL,
+ pcicore_find_pci_capability(sii->pcibus, PCI_CAP_ID_EXP, NULL,
NULL);
if (!cap_ptr)
return false;
@@ -896,117 +476,69 @@ static bool ai_buscore_prep(struct si_info *sii)
return true;
}
-u32 ai_core_sflags(struct si_pub *sih, u32 mask, u32 val)
-{
- struct si_info *sii;
- struct aidmp *ai;
- u32 w;
-
- sii = (struct si_info *)sih;
- ai = sii->curwrap;
-
- if (mask || val) {
- w = ((R_REG(&ai->iostatus) & ~mask) | val);
- W_REG(&ai->iostatus, w);
- }
-
- return R_REG(&ai->iostatus);
-}
-
static bool
-ai_buscore_setup(struct si_info *sii, u32 savewin, uint *origidx)
+ai_buscore_setup(struct si_info *sii, struct bcma_device *cc)
{
- bool pci, pcie;
- uint i;
- uint pciidx, pcieidx, pcirev, pcierev;
- struct chipcregs __iomem *cc;
+ struct bcma_device *pci = NULL;
+ struct bcma_device *pcie = NULL;
+ struct bcma_device *core;
- cc = ai_setcoreidx(&sii->pub, SI_CC_IDX);
+
+ /* no cores found, bail out */
+ if (cc->bus->nr_cores == 0)
+ return false;
/* get chipcommon rev */
- sii->pub.ccrev = (int)ai_corerev(&sii->pub);
+ sii->pub.ccrev = cc->id.rev;
/* get chipcommon chipstatus */
- if (sii->pub.ccrev >= 11)
- sii->pub.chipst = R_REG(&cc->chipstatus);
+ if (ai_get_ccrev(&sii->pub) >= 11)
+ sii->chipst = bcma_read32(cc, CHIPCREGOFFS(chipstatus));
/* get chipcommon capabilites */
- sii->pub.cccaps = R_REG(&cc->capabilities);
- /* get chipcommon extended capabilities */
-
- if (sii->pub.ccrev >= 35)
- sii->pub.cccaps_ext = R_REG(&cc->capabilities_ext);
+ sii->pub.cccaps = bcma_read32(cc, CHIPCREGOFFS(capabilities));
/* get pmu rev and caps */
- if (sii->pub.cccaps & CC_CAP_PMU) {
- sii->pub.pmucaps = R_REG(&cc->pmucapabilities);
+ if (ai_get_cccaps(&sii->pub) & CC_CAP_PMU) {
+ sii->pub.pmucaps = bcma_read32(cc,
+ CHIPCREGOFFS(pmucapabilities));
sii->pub.pmurev = sii->pub.pmucaps & PCAP_REV_MASK;
}
- /* figure out bus/orignal core idx */
- sii->pub.buscoretype = NODEV_CORE_ID;
- sii->pub.buscorerev = NOREV;
- sii->pub.buscoreidx = BADIDX;
-
- pci = pcie = false;
- pcirev = pcierev = NOREV;
- pciidx = pcieidx = BADIDX;
-
- for (i = 0; i < sii->numcores; i++) {
+ /* figure out buscore */
+ list_for_each_entry(core, &cc->bus->cores, list) {
uint cid, crev;
- ai_setcoreidx(&sii->pub, i);
- cid = ai_coreid(&sii->pub);
- crev = ai_corerev(&sii->pub);
+ cid = core->id.id;
+ crev = core->id.rev;
if (cid == PCI_CORE_ID) {
- pciidx = i;
- pcirev = crev;
- pci = true;
+ pci = core;
} else if (cid == PCIE_CORE_ID) {
- pcieidx = i;
- pcierev = crev;
- pcie = true;
+ pcie = core;
}
-
- /* find the core idx before entering this func. */
- if ((savewin && (savewin == sii->coresba[i])) ||
- (cc == sii->regs[i]))
- *origidx = i;
}
if (pci && pcie) {
if (ai_ispcie(sii))
- pci = false;
+ pci = NULL;
else
- pcie = false;
+ pcie = NULL;
}
if (pci) {
- sii->pub.buscoretype = PCI_CORE_ID;
- sii->pub.buscorerev = pcirev;
- sii->pub.buscoreidx = pciidx;
+ sii->buscore = pci;
} else if (pcie) {
- sii->pub.buscoretype = PCIE_CORE_ID;
- sii->pub.buscorerev = pcierev;
- sii->pub.buscoreidx = pcieidx;
+ sii->buscore = pcie;
}
/* fixup necessary chip/core configurations */
- if (SI_FAST(sii)) {
- if (!sii->pch) {
- sii->pch = pcicore_init(&sii->pub, sii->pbus,
- (__iomem void *)PCIEREGS(sii));
- if (sii->pch == NULL)
- return false;
- }
+ if (!sii->pch) {
+ sii->pch = pcicore_init(&sii->pub, sii->icbus->drv_pci.core);
+ if (sii->pch == NULL)
+ return false;
}
- if (ai_pci_fixcfg(&sii->pub)) {
- /* si_doattach: si_pci_fixcfg failed */
+ if (ai_pci_fixcfg(&sii->pub))
return false;
- }
-
- /* return to the original core */
- ai_setcoreidx(&sii->pub, *origidx);
return true;
}
@@ -1019,39 +551,27 @@ static __used void ai_nvram_process(struct si_info *sii)
uint w = 0;
/* do a pci config read to get subsystem id and subvendor id */
- pci_read_config_dword(sii->pbus, PCI_SUBSYSTEM_VENDOR_ID, &w);
+ pci_read_config_dword(sii->pcibus, PCI_SUBSYSTEM_VENDOR_ID, &w);
sii->pub.boardvendor = w & 0xffff;
sii->pub.boardtype = (w >> 16) & 0xffff;
- sii->pub.boardflags = getintvar(&sii->pub, BRCMS_SROM_BOARDFLAGS);
}
static struct si_info *ai_doattach(struct si_info *sii,
- void __iomem *regs, struct pci_dev *pbus)
+ struct bcma_bus *pbus)
{
struct si_pub *sih = &sii->pub;
u32 w, savewin;
- struct chipcregs __iomem *cc;
+ struct bcma_device *cc;
uint socitype;
- uint origidx;
-
- memset((unsigned char *) sii, 0, sizeof(struct si_info));
savewin = 0;
- sih->buscoreidx = BADIDX;
-
- sii->curmap = regs;
- sii->pbus = pbus;
+ sii->icbus = pbus;
+ sii->pcibus = pbus->host_pci;
- /* find Chipcommon address */
- pci_read_config_dword(sii->pbus, PCI_BAR0_WIN, &savewin);
- if (!GOODCOREADDR(savewin, SI_ENUM_BASE))
- savewin = SI_ENUM_BASE;
-
- pci_write_config_dword(sii->pbus, PCI_BAR0_WIN,
- SI_ENUM_BASE);
- cc = (struct chipcregs __iomem *) regs;
+ /* switch to Chipcommon core */
+ cc = pbus->drv_cc.core;
/* bus/core/clk setup for register access */
if (!ai_buscore_prep(sii))
@@ -1064,89 +584,69 @@ static struct si_info *ai_doattach(struct si_info *sii,
* hosts w/o chipcommon), some way of recognizing them needs to
* be added here.
*/
- w = R_REG(&cc->chipid);
+ w = bcma_read32(cc, CHIPCREGOFFS(chipid));
socitype = (w & CID_TYPE_MASK) >> CID_TYPE_SHIFT;
/* Might as wll fill in chip id rev & pkg */
sih->chip = w & CID_ID_MASK;
sih->chiprev = (w & CID_REV_MASK) >> CID_REV_SHIFT;
sih->chippkg = (w & CID_PKG_MASK) >> CID_PKG_SHIFT;
- sih->issim = false;
-
/* scan for cores */
- if (socitype == SOCI_AI) {
- SI_MSG("Found chip type AI (0x%08x)\n", w);
- /* pass chipc address instead of original core base */
- ai_scan(&sii->pub, cc);
- } else {
- /* Found chip of unknown type */
- return NULL;
- }
- /* no cores found, bail out */
- if (sii->numcores == 0)
+ if (socitype != SOCI_AI)
return NULL;
- /* bus/core/clk setup */
- origidx = SI_CC_IDX;
- if (!ai_buscore_setup(sii, savewin, &origidx))
+ SI_MSG("Found chip type AI (0x%08x)\n", w);
+ if (!ai_buscore_setup(sii, cc))
goto exit;
/* Init nvram from sprom/otp if they exist */
- if (srom_var_init(&sii->pub, cc))
+ if (srom_var_init(&sii->pub))
goto exit;
ai_nvram_process(sii);
/* === NVRAM, clock is ready === */
- cc = (struct chipcregs __iomem *) ai_setcore(sih, CC_CORE_ID, 0);
- W_REG(&cc->gpiopullup, 0);
- W_REG(&cc->gpiopulldown, 0);
- ai_setcoreidx(sih, origidx);
+ bcma_write32(cc, CHIPCREGOFFS(gpiopullup), 0);
+ bcma_write32(cc, CHIPCREGOFFS(gpiopulldown), 0);
/* PMU specific initializations */
- if (sih->cccaps & CC_CAP_PMU) {
- u32 xtalfreq;
+ if (ai_get_cccaps(sih) & CC_CAP_PMU) {
si_pmu_init(sih);
- si_pmu_chip_init(sih);
-
- xtalfreq = si_pmu_measure_alpclk(sih);
- si_pmu_pll_init(sih, xtalfreq);
+ (void)si_pmu_measure_alpclk(sih);
si_pmu_res_init(sih);
- si_pmu_swreg_init(sih);
}
/* setup the GPIO based LED powersave register */
w = getintvar(sih, BRCMS_SROM_LEDDC);
if (w == 0)
w = DEFAULT_GPIOTIMERVAL;
- ai_corereg(sih, SI_CC_IDX, offsetof(struct chipcregs, gpiotimerval),
- ~0, w);
+ ai_cc_reg(sih, offsetof(struct chipcregs, gpiotimerval),
+ ~0, w);
- if (PCIE(sii))
+ if (PCIE(sih))
pcicore_attach(sii->pch, SI_DOATTACH);
- if (sih->chip == BCM43224_CHIP_ID) {
+ if (ai_get_chip_id(sih) == BCM43224_CHIP_ID) {
/*
* enable 12 mA drive strenth for 43224 and
* set chipControl register bit 15
*/
- if (sih->chiprev == 0) {
+ if (ai_get_chiprev(sih) == 0) {
SI_MSG("Applying 43224A0 WARs\n");
- ai_corereg(sih, SI_CC_IDX,
- offsetof(struct chipcregs, chipcontrol),
- CCTRL43224_GPIO_TOGGLE,
- CCTRL43224_GPIO_TOGGLE);
+ ai_cc_reg(sih, offsetof(struct chipcregs, chipcontrol),
+ CCTRL43224_GPIO_TOGGLE,
+ CCTRL43224_GPIO_TOGGLE);
si_pmu_chipcontrol(sih, 0, CCTRL_43224A0_12MA_LED_DRIVE,
CCTRL_43224A0_12MA_LED_DRIVE);
}
- if (sih->chiprev >= 1) {
+ if (ai_get_chiprev(sih) >= 1) {
SI_MSG("Applying 43224B0+ WARs\n");
si_pmu_chipcontrol(sih, 0, CCTRL_43224B0_12MA_LED_DRIVE,
CCTRL_43224B0_12MA_LED_DRIVE);
}
}
- if (sih->chip == BCM4313_CHIP_ID) {
+ if (ai_get_chip_id(sih) == BCM4313_CHIP_ID) {
/*
* enable 12 mA drive strenth for 4313 and
* set chipControl register bit 1
@@ -1167,22 +667,19 @@ static struct si_info *ai_doattach(struct si_info *sii,
}
/*
- * Allocate a si handle.
- * devid - pci device id (used to determine chip#)
- * osh - opaque OS handle
- * regs - virtual address of initial core registers
+ * Allocate a si handle and do the attach.
*/
struct si_pub *
-ai_attach(void __iomem *regs, struct pci_dev *sdh)
+ai_attach(struct bcma_bus *pbus)
{
struct si_info *sii;
/* alloc struct si_info */
- sii = kmalloc(sizeof(struct si_info), GFP_ATOMIC);
+ sii = kzalloc(sizeof(struct si_info), GFP_ATOMIC);
if (sii == NULL)
return NULL;
- if (ai_doattach(sii, regs, sdh) == NULL) {
+ if (ai_doattach(sii, pbus) == NULL) {
kfree(sii);
return NULL;
}
@@ -1211,292 +708,66 @@ void ai_detach(struct si_pub *sih)
kfree(sii);
}
-/* register driver interrupt disabling and restoring callback functions */
-void
-ai_register_intr_callback(struct si_pub *sih, void *intrsoff_fn,
- void *intrsrestore_fn,
- void *intrsenabled_fn, void *intr_arg)
-{
- struct si_info *sii;
-
- sii = (struct si_info *)sih;
- sii->intr_arg = intr_arg;
- sii->intrsoff_fn = (u32 (*)(void *)) intrsoff_fn;
- sii->intrsrestore_fn = (void (*) (void *, u32)) intrsrestore_fn;
- sii->intrsenabled_fn = (bool (*)(void *)) intrsenabled_fn;
- /* save current core id. when this function called, the current core
- * must be the core which provides driver functions(il, et, wl, etc.)
- */
- sii->dev_coreid = sii->coreid[sii->curidx];
-}
-
-void ai_deregister_intr_callback(struct si_pub *sih)
-{
- struct si_info *sii;
-
- sii = (struct si_info *)sih;
- sii->intrsoff_fn = NULL;
-}
-
-uint ai_coreid(struct si_pub *sih)
-{
- struct si_info *sii;
-
- sii = (struct si_info *)sih;
- return sii->coreid[sii->curidx];
-}
-
-uint ai_coreidx(struct si_pub *sih)
-{
- struct si_info *sii;
-
- sii = (struct si_info *)sih;
- return sii->curidx;
-}
-
-bool ai_backplane64(struct si_pub *sih)
-{
- return (sih->cccaps & CC_CAP_BKPLN64) != 0;
-}
-
/* return index of coreid or BADIDX if not found */
-uint ai_findcoreidx(struct si_pub *sih, uint coreid, uint coreunit)
+struct bcma_device *ai_findcore(struct si_pub *sih, u16 coreid, u16 coreunit)
{
+ struct bcma_device *core;
struct si_info *sii;
uint found;
- uint i;
sii = (struct si_info *)sih;
found = 0;
- for (i = 0; i < sii->numcores; i++)
- if (sii->coreid[i] == coreid) {
+ list_for_each_entry(core, &sii->icbus->cores, list)
+ if (core->id.id == coreid) {
if (found == coreunit)
- return i;
+ return core;
found++;
}
- return BADIDX;
-}
-
-/*
- * This function changes logical "focus" to the indicated core;
- * must be called with interrupts off.
- * Moreover, callers should keep interrupts off during switching
- * out of and back to d11 core.
- */
-void __iomem *ai_setcore(struct si_pub *sih, uint coreid, uint coreunit)
-{
- uint idx;
-
- idx = ai_findcoreidx(sih, coreid, coreunit);
- if (idx >= SI_MAXCORES)
- return NULL;
-
- return ai_setcoreidx(sih, idx);
-}
-
-/* Turn off interrupt as required by ai_setcore, before switch core */
-void __iomem *ai_switch_core(struct si_pub *sih, uint coreid, uint *origidx,
- uint *intr_val)
-{
- void __iomem *cc;
- struct si_info *sii;
-
- sii = (struct si_info *)sih;
-
- if (SI_FAST(sii)) {
- /* Overloading the origidx variable to remember the coreid,
- * this works because the core ids cannot be confused with
- * core indices.
- */
- *origidx = coreid;
- if (coreid == CC_CORE_ID)
- return CCREGS_FAST(sii);
- else if (coreid == sih->buscoretype)
- return PCIEREGS(sii);
- }
- INTR_OFF(sii, *intr_val);
- *origidx = sii->curidx;
- cc = ai_setcore(sih, coreid, 0);
- return cc;
-}
-
-/* restore coreidx and restore interrupt */
-void ai_restore_core(struct si_pub *sih, uint coreid, uint intr_val)
-{
- struct si_info *sii;
-
- sii = (struct si_info *)sih;
- if (SI_FAST(sii)
- && ((coreid == CC_CORE_ID) || (coreid == sih->buscoretype)))
- return;
-
- ai_setcoreidx(sih, coreid);
- INTR_RESTORE(sii, intr_val);
-}
-
-void ai_write_wrapperreg(struct si_pub *sih, u32 offset, u32 val)
-{
- struct si_info *sii = (struct si_info *)sih;
- u32 *w = (u32 *) sii->curwrap;
- W_REG(w + (offset / 4), val);
- return;
+ return NULL;
}
/*
- * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set
- * operation, switch back to the original core, and return the new value.
- *
- * When using the silicon backplane, no fiddling with interrupts or core
- * switches is needed.
- *
- * Also, when using pci/pcie, we can optimize away the core switching for pci
- * registers and (on newer pci cores) chipcommon registers.
+ * read/modify chipcommon core register.
*/
-uint ai_corereg(struct si_pub *sih, uint coreidx, uint regoff, uint mask,
- uint val)
+uint ai_cc_reg(struct si_pub *sih, uint regoff, u32 mask, u32 val)
{
- uint origidx = 0;
- u32 __iomem *r = NULL;
- uint w;
- uint intr_val = 0;
- bool fast = false;
+ struct bcma_device *cc;
+ u32 w;
struct si_info *sii;
sii = (struct si_info *)sih;
-
- if (coreidx >= SI_MAXCORES)
- return 0;
-
- /*
- * If pci/pcie, we can get at pci/pcie regs
- * and on newer cores to chipc
- */
- if ((sii->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
- /* Chipc registers are mapped at 12KB */
- fast = true;
- r = (u32 __iomem *)((__iomem char *)sii->curmap +
- PCI_16KB0_CCREGS_OFFSET + regoff);
- } else if (sii->pub.buscoreidx == coreidx) {
- /*
- * pci registers are at either in the last 2KB of
- * an 8KB window or, in pcie and pci rev 13 at 8KB
- */
- fast = true;
- if (SI_FAST(sii))
- r = (u32 __iomem *)((__iomem char *)sii->curmap +
- PCI_16KB0_PCIREGS_OFFSET + regoff);
- else
- r = (u32 __iomem *)((__iomem char *)sii->curmap +
- ((regoff >= SBCONFIGOFF) ?
- PCI_BAR0_PCISBR_OFFSET :
- PCI_BAR0_PCIREGS_OFFSET) + regoff);
- }
-
- if (!fast) {
- INTR_OFF(sii, intr_val);
-
- /* save current core index */
- origidx = ai_coreidx(&sii->pub);
-
- /* switch core */
- r = (u32 __iomem *) ((unsigned char __iomem *)
- ai_setcoreidx(&sii->pub, coreidx) + regoff);
- }
+ cc = sii->icbus->drv_cc.core;
/* mask and set */
if (mask || val) {
- w = (R_REG(r) & ~mask) | val;
- W_REG(r, w);
+ bcma_maskset32(cc, regoff, ~mask, val);
}
/* readback */
- w = R_REG(r);
-
- if (!fast) {
- /* restore core index */
- if (origidx != coreidx)
- ai_setcoreidx(&sii->pub, origidx);
-
- INTR_RESTORE(sii, intr_val);
- }
+ w = bcma_read32(cc, regoff);
return w;
}
-void ai_core_disable(struct si_pub *sih, u32 bits)
-{
- struct si_info *sii;
- u32 dummy;
- struct aidmp *ai;
-
- sii = (struct si_info *)sih;
-
- ai = sii->curwrap;
-
- /* if core is already in reset, just return */
- if (R_REG(&ai->resetctrl) & AIRC_RESET)
- return;
-
- W_REG(&ai->ioctrl, bits);
- dummy = R_REG(&ai->ioctrl);
- udelay(10);
-
- W_REG(&ai->resetctrl, AIRC_RESET);
- udelay(1);
-}
-
-/* reset and re-enable a core
- * inputs:
- * bits - core specific bits that are set during and after reset sequence
- * resetbits - core specific bits that are set only during reset sequence
- */
-void ai_core_reset(struct si_pub *sih, u32 bits, u32 resetbits)
-{
- struct si_info *sii;
- struct aidmp *ai;
- u32 dummy;
-
- sii = (struct si_info *)sih;
- ai = sii->curwrap;
-
- /*
- * Must do the disable sequence first to work
- * for arbitrary current core state.
- */
- ai_core_disable(sih, (bits | resetbits));
-
- /*
- * Now do the initialization sequence.
- */
- W_REG(&ai->ioctrl, (bits | SICF_FGC | SICF_CLOCK_EN));
- dummy = R_REG(&ai->ioctrl);
- W_REG(&ai->resetctrl, 0);
- udelay(1);
-
- W_REG(&ai->ioctrl, (bits | SICF_CLOCK_EN));
- dummy = R_REG(&ai->ioctrl);
- udelay(1);
-}
-
/* return the slow clock source - LPO, XTAL, or PCI */
-static uint ai_slowclk_src(struct si_info *sii)
+static uint ai_slowclk_src(struct si_pub *sih, struct bcma_device *cc)
{
- struct chipcregs __iomem *cc;
+ struct si_info *sii;
u32 val;
- if (sii->pub.ccrev < 6) {
- pci_read_config_dword(sii->pbus, PCI_GPIO_OUT,
+ sii = (struct si_info *)sih;
+ if (ai_get_ccrev(&sii->pub) < 6) {
+ pci_read_config_dword(sii->pcibus, PCI_GPIO_OUT,
&val);
if (val & PCI_CFG_GPIO_SCS)
return SCC_SS_PCI;
return SCC_SS_XTAL;
- } else if (sii->pub.ccrev < 10) {
- cc = (struct chipcregs __iomem *)
- ai_setcoreidx(&sii->pub, sii->curidx);
- return R_REG(&cc->slow_clk_ctl) & SCC_SS_MASK;
+ } else if (ai_get_ccrev(&sii->pub) < 10) {
+ return bcma_read32(cc, CHIPCREGOFFS(slow_clk_ctl)) &
+ SCC_SS_MASK;
} else /* Insta-clock */
return SCC_SS_XTAL;
}
@@ -1505,24 +776,24 @@ static uint ai_slowclk_src(struct si_info *sii)
* return the ILP (slowclock) min or max frequency
* precondition: we've established the chip has dynamic clk control
*/
-static uint ai_slowclk_freq(struct si_info *sii, bool max_freq,
- struct chipcregs __iomem *cc)
+static uint ai_slowclk_freq(struct si_pub *sih, bool max_freq,
+ struct bcma_device *cc)
{
u32 slowclk;
uint div;
- slowclk = ai_slowclk_src(sii);
- if (sii->pub.ccrev < 6) {
+ slowclk = ai_slowclk_src(sih, cc);
+ if (ai_get_ccrev(sih) < 6) {
if (slowclk == SCC_SS_PCI)
return max_freq ? (PCIMAXFREQ / 64)
: (PCIMINFREQ / 64);
else
return max_freq ? (XTALMAXFREQ / 32)
: (XTALMINFREQ / 32);
- } else if (sii->pub.ccrev < 10) {
+ } else if (ai_get_ccrev(sih) < 10) {
div = 4 *
- (((R_REG(&cc->slow_clk_ctl) & SCC_CD_MASK) >>
- SCC_CD_SHIFT) + 1);
+ (((bcma_read32(cc, CHIPCREGOFFS(slow_clk_ctl)) &
+ SCC_CD_MASK) >> SCC_CD_SHIFT) + 1);
if (slowclk == SCC_SS_LPO)
return max_freq ? LPOMAXFREQ : LPOMINFREQ;
else if (slowclk == SCC_SS_XTAL)
@@ -1533,15 +804,15 @@ static uint ai_slowclk_freq(struct si_info *sii, bool max_freq,
: (PCIMINFREQ / div);
} else {
/* Chipc rev 10 is InstaClock */
- div = R_REG(&cc->system_clk_ctl) >> SYCC_CD_SHIFT;
- div = 4 * (div + 1);
+ div = bcma_read32(cc, CHIPCREGOFFS(system_clk_ctl));
+ div = 4 * ((div >> SYCC_CD_SHIFT) + 1);
return max_freq ? XTALMAXFREQ : (XTALMINFREQ / div);
}
return 0;
}
static void
-ai_clkctl_setdelay(struct si_info *sii, struct chipcregs __iomem *cc)
+ai_clkctl_setdelay(struct si_pub *sih, struct bcma_device *cc)
{
uint slowmaxfreq, pll_delay, slowclk;
uint pll_on_delay, fref_sel_delay;
@@ -1554,55 +825,40 @@ ai_clkctl_setdelay(struct si_info *sii, struct chipcregs __iomem *cc)
* powered down by dynamic clk control logic.
*/
- slowclk = ai_slowclk_src(sii);
+ slowclk = ai_slowclk_src(sih, cc);
if (slowclk != SCC_SS_XTAL)
pll_delay += XTAL_ON_DELAY;
/* Starting with 4318 it is ILP that is used for the delays */
slowmaxfreq =
- ai_slowclk_freq(sii, (sii->pub.ccrev >= 10) ? false : true, cc);
+ ai_slowclk_freq(sih,
+ (ai_get_ccrev(sih) >= 10) ? false : true, cc);
pll_on_delay = ((slowmaxfreq * pll_delay) + 999999) / 1000000;
fref_sel_delay = ((slowmaxfreq * FREF_DELAY) + 999999) / 1000000;
- W_REG(&cc->pll_on_delay, pll_on_delay);
- W_REG(&cc->fref_sel_delay, fref_sel_delay);
+ bcma_write32(cc, CHIPCREGOFFS(pll_on_delay), pll_on_delay);
+ bcma_write32(cc, CHIPCREGOFFS(fref_sel_delay), fref_sel_delay);
}
/* initialize power control delay registers */
void ai_clkctl_init(struct si_pub *sih)
{
- struct si_info *sii;
- uint origidx = 0;
- struct chipcregs __iomem *cc;
- bool fast;
+ struct bcma_device *cc;
- if (!(sih->cccaps & CC_CAP_PWR_CTL))
+ if (!(ai_get_cccaps(sih) & CC_CAP_PWR_CTL))
return;
- sii = (struct si_info *)sih;
- fast = SI_FAST(sii);
- if (!fast) {
- origidx = sii->curidx;
- cc = (struct chipcregs __iomem *)
- ai_setcore(sih, CC_CORE_ID, 0);
- if (cc == NULL)
- return;
- } else {
- cc = (struct chipcregs __iomem *) CCREGS_FAST(sii);
- if (cc == NULL)
- return;
- }
+ cc = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0);
+ if (cc == NULL)
+ return;
/* set all Instaclk chip ILP to 1 MHz */
- if (sih->ccrev >= 10)
- SET_REG(&cc->system_clk_ctl, SYCC_CD_MASK,
- (ILP_DIV_1MHZ << SYCC_CD_SHIFT));
+ if (ai_get_ccrev(sih) >= 10)
+ bcma_maskset32(cc, CHIPCREGOFFS(system_clk_ctl), SYCC_CD_MASK,
+ (ILP_DIV_1MHZ << SYCC_CD_SHIFT));
- ai_clkctl_setdelay(sii, cc);
-
- if (!fast)
- ai_setcoreidx(sih, origidx);
+ ai_clkctl_setdelay(sih, cc);
}
/*
@@ -1612,47 +868,25 @@ void ai_clkctl_init(struct si_pub *sih)
u16 ai_clkctl_fast_pwrup_delay(struct si_pub *sih)
{
struct si_info *sii;
- uint origidx = 0;
- struct chipcregs __iomem *cc;
+ struct bcma_device *cc;
uint slowminfreq;
u16 fpdelay;
- uint intr_val = 0;
- bool fast;
sii = (struct si_info *)sih;
- if (sih->cccaps & CC_CAP_PMU) {
- INTR_OFF(sii, intr_val);
+ if (ai_get_cccaps(sih) & CC_CAP_PMU) {
fpdelay = si_pmu_fast_pwrup_delay(sih);
- INTR_RESTORE(sii, intr_val);
return fpdelay;
}
- if (!(sih->cccaps & CC_CAP_PWR_CTL))
+ if (!(ai_get_cccaps(sih) & CC_CAP_PWR_CTL))
return 0;
- fast = SI_FAST(sii);
fpdelay = 0;
- if (!fast) {
- origidx = sii->curidx;
- INTR_OFF(sii, intr_val);
- cc = (struct chipcregs __iomem *)
- ai_setcore(sih, CC_CORE_ID, 0);
- if (cc == NULL)
- goto done;
- } else {
- cc = (struct chipcregs __iomem *) CCREGS_FAST(sii);
- if (cc == NULL)
- goto done;
- }
-
- slowminfreq = ai_slowclk_freq(sii, false, cc);
- fpdelay = (((R_REG(&cc->pll_on_delay) + 2) * 1000000) +
- (slowminfreq - 1)) / slowminfreq;
-
- done:
- if (!fast) {
- ai_setcoreidx(sih, origidx);
- INTR_RESTORE(sii, intr_val);
+ cc = ai_findcore(sih, CC_CORE_ID, 0);
+ if (cc) {
+ slowminfreq = ai_slowclk_freq(sih, false, cc);
+ fpdelay = (((bcma_read32(cc, CHIPCREGOFFS(pll_on_delay)) + 2)
+ * 1000000) + (slowminfreq - 1)) / slowminfreq;
}
return fpdelay;
}
@@ -1666,12 +900,12 @@ int ai_clkctl_xtal(struct si_pub *sih, uint what, bool on)
sii = (struct si_info *)sih;
/* pcie core doesn't have any mapping to control the xtal pu */
- if (PCIE(sii))
+ if (PCIE(sih))
return -1;
- pci_read_config_dword(sii->pbus, PCI_GPIO_IN, &in);
- pci_read_config_dword(sii->pbus, PCI_GPIO_OUT, &out);
- pci_read_config_dword(sii->pbus, PCI_GPIO_OUTEN, &outen);
+ pci_read_config_dword(sii->pcibus, PCI_GPIO_IN, &in);
+ pci_read_config_dword(sii->pcibus, PCI_GPIO_OUT, &out);
+ pci_read_config_dword(sii->pcibus, PCI_GPIO_OUTEN, &outen);
/*
* Avoid glitching the clock if GPRS is already using it.
@@ -1692,9 +926,9 @@ int ai_clkctl_xtal(struct si_pub *sih, uint what, bool on)
out |= PCI_CFG_GPIO_XTAL;
if (what & PLL)
out |= PCI_CFG_GPIO_PLL;
- pci_write_config_dword(sii->pbus,
+ pci_write_config_dword(sii->pcibus,
PCI_GPIO_OUT, out);
- pci_write_config_dword(sii->pbus,
+ pci_write_config_dword(sii->pcibus,
PCI_GPIO_OUTEN, outen);
udelay(XTAL_ON_DELAY);
}
@@ -1702,7 +936,7 @@ int ai_clkctl_xtal(struct si_pub *sih, uint what, bool on)
/* turn pll on */
if (what & PLL) {
out &= ~PCI_CFG_GPIO_PLL;
- pci_write_config_dword(sii->pbus,
+ pci_write_config_dword(sii->pcibus,
PCI_GPIO_OUT, out);
mdelay(2);
}
@@ -1711,9 +945,9 @@ int ai_clkctl_xtal(struct si_pub *sih, uint what, bool on)
out &= ~PCI_CFG_GPIO_XTAL;
if (what & PLL)
out |= PCI_CFG_GPIO_PLL;
- pci_write_config_dword(sii->pbus,
+ pci_write_config_dword(sii->pcibus,
PCI_GPIO_OUT, out);
- pci_write_config_dword(sii->pbus,
+ pci_write_config_dword(sii->pcibus,
PCI_GPIO_OUTEN, outen);
}
@@ -1723,63 +957,52 @@ int ai_clkctl_xtal(struct si_pub *sih, uint what, bool on)
/* clk control mechanism through chipcommon, no policy checking */
static bool _ai_clkctl_cc(struct si_info *sii, uint mode)
{
- uint origidx = 0;
- struct chipcregs __iomem *cc;
+ struct bcma_device *cc;
u32 scc;
- uint intr_val = 0;
- bool fast = SI_FAST(sii);
/* chipcommon cores prior to rev6 don't support dynamic clock control */
- if (sii->pub.ccrev < 6)
+ if (ai_get_ccrev(&sii->pub) < 6)
return false;
- if (!fast) {
- INTR_OFF(sii, intr_val);
- origidx = sii->curidx;
- cc = (struct chipcregs __iomem *)
- ai_setcore(&sii->pub, CC_CORE_ID, 0);
- } else {
- cc = (struct chipcregs __iomem *) CCREGS_FAST(sii);
- if (cc == NULL)
- goto done;
- }
+ cc = ai_findcore(&sii->pub, BCMA_CORE_CHIPCOMMON, 0);
- if (!(sii->pub.cccaps & CC_CAP_PWR_CTL) && (sii->pub.ccrev < 20))
- goto done;
+ if (!(ai_get_cccaps(&sii->pub) & CC_CAP_PWR_CTL) &&
+ (ai_get_ccrev(&sii->pub) < 20))
+ return mode == CLK_FAST;
switch (mode) {
case CLK_FAST: /* FORCEHT, fast (pll) clock */
- if (sii->pub.ccrev < 10) {
+ if (ai_get_ccrev(&sii->pub) < 10) {
/*
* don't forget to force xtal back
* on before we clear SCC_DYN_XTAL..
*/
ai_clkctl_xtal(&sii->pub, XTAL, ON);
- SET_REG(&cc->slow_clk_ctl,
- (SCC_XC | SCC_FS | SCC_IP), SCC_IP);
- } else if (sii->pub.ccrev < 20) {
- OR_REG(&cc->system_clk_ctl, SYCC_HR);
+ bcma_maskset32(cc, CHIPCREGOFFS(slow_clk_ctl),
+ (SCC_XC | SCC_FS | SCC_IP), SCC_IP);
+ } else if (ai_get_ccrev(&sii->pub) < 20) {
+ bcma_set32(cc, CHIPCREGOFFS(system_clk_ctl), SYCC_HR);
} else {
- OR_REG(&cc->clk_ctl_st, CCS_FORCEHT);
+ bcma_set32(cc, CHIPCREGOFFS(clk_ctl_st), CCS_FORCEHT);
}
/* wait for the PLL */
- if (sii->pub.cccaps & CC_CAP_PMU) {
+ if (ai_get_cccaps(&sii->pub) & CC_CAP_PMU) {
u32 htavail = CCS_HTAVAIL;
- SPINWAIT(((R_REG(&cc->clk_ctl_st) & htavail)
- == 0), PMU_MAX_TRANSITION_DLY);
+ SPINWAIT(((bcma_read32(cc, CHIPCREGOFFS(clk_ctl_st)) &
+ htavail) == 0), PMU_MAX_TRANSITION_DLY);
} else {
udelay(PLL_DELAY);
}
break;
case CLK_DYNAMIC: /* enable dynamic clock control */
- if (sii->pub.ccrev < 10) {
- scc = R_REG(&cc->slow_clk_ctl);
+ if (ai_get_ccrev(&sii->pub) < 10) {
+ scc = bcma_read32(cc, CHIPCREGOFFS(slow_clk_ctl));
scc &= ~(SCC_FS | SCC_IP | SCC_XC);
if ((scc & SCC_SS_MASK) != SCC_SS_XTAL)
scc |= SCC_XC;
- W_REG(&cc->slow_clk_ctl, scc);
+ bcma_write32(cc, CHIPCREGOFFS(slow_clk_ctl), scc);
/*
* for dynamic control, we have to
@@ -1787,11 +1010,11 @@ static bool _ai_clkctl_cc(struct si_info *sii, uint mode)
*/
if (scc & SCC_XC)
ai_clkctl_xtal(&sii->pub, XTAL, OFF);
- } else if (sii->pub.ccrev < 20) {
+ } else if (ai_get_ccrev(&sii->pub) < 20) {
/* Instaclock */
- AND_REG(&cc->system_clk_ctl, ~SYCC_HR);
+ bcma_mask32(cc, CHIPCREGOFFS(system_clk_ctl), ~SYCC_HR);
} else {
- AND_REG(&cc->clk_ctl_st, ~CCS_FORCEHT);
+ bcma_mask32(cc, CHIPCREGOFFS(clk_ctl_st), ~CCS_FORCEHT);
}
break;
@@ -1799,11 +1022,6 @@ static bool _ai_clkctl_cc(struct si_info *sii, uint mode)
break;
}
- done:
- if (!fast) {
- ai_setcoreidx(&sii->pub, origidx);
- INTR_RESTORE(sii, intr_val);
- }
return mode == CLK_FAST;
}
@@ -1822,46 +1040,25 @@ bool ai_clkctl_cc(struct si_pub *sih, uint mode)
sii = (struct si_info *)sih;
/* chipcommon cores prior to rev6 don't support dynamic clock control */
- if (sih->ccrev < 6)
+ if (ai_get_ccrev(sih) < 6)
return false;
- if (PCI_FORCEHT(sii))
+ if (PCI_FORCEHT(sih))
return mode == CLK_FAST;
return _ai_clkctl_cc(sii, mode);
}
-/* Build device path */
-int ai_devpath(struct si_pub *sih, char *path, int size)
-{
- int slen;
-
- if (!path || size <= 0)
- return -1;
-
- slen = snprintf(path, (size_t) size, "pci/%u/%u/",
- ((struct si_info *)sih)->pbus->bus->number,
- PCI_SLOT(((struct pci_dev *)
- (((struct si_info *)(sih))->pbus))->devfn));
-
- if (slen < 0 || slen >= size) {
- path[0] = '\0';
- return -1;
- }
-
- return 0;
-}
-
void ai_pci_up(struct si_pub *sih)
{
struct si_info *sii;
sii = (struct si_info *)sih;
- if (PCI_FORCEHT(sii))
+ if (PCI_FORCEHT(sih))
_ai_clkctl_cc(sii, CLK_FAST);
- if (PCIE(sii))
+ if (PCIE(sih))
pcicore_up(sii->pch, SI_PCIUP);
}
@@ -1884,7 +1081,7 @@ void ai_pci_down(struct si_pub *sih)
sii = (struct si_info *)sih;
/* release FORCEHT since chip is going to "down" state */
- if (PCI_FORCEHT(sii))
+ if (PCI_FORCEHT(sih))
_ai_clkctl_cc(sii, CLK_DYNAMIC);
pcicore_down(sii->pch, SI_PCIDOWN);
@@ -1897,42 +1094,23 @@ void ai_pci_down(struct si_pub *sih)
void ai_pci_setup(struct si_pub *sih, uint coremask)
{
struct si_info *sii;
- struct sbpciregs __iomem *regs = NULL;
- u32 siflag = 0, w;
- uint idx = 0;
+ u32 w;
sii = (struct si_info *)sih;
- if (PCI(sii)) {
- /* get current core index */
- idx = sii->curidx;
-
- /* we interrupt on this backplane flag number */
- siflag = ai_flag(sih);
-
- /* switch over to pci core */
- regs = ai_setcoreidx(sih, sii->pub.buscoreidx);
- }
-
/*
* Enable sb->pci interrupts. Assume
* PCI rev 2.3 support was added in pci core rev 6 and things changed..
*/
- if (PCIE(sii) || (PCI(sii) && ((sii->pub.buscorerev) >= 6))) {
+ if (PCIE(sih) || (PCI(sih) && (ai_get_buscorerev(sih) >= 6))) {
/* pci config write to set this core bit in PCIIntMask */
- pci_read_config_dword(sii->pbus, PCI_INT_MASK, &w);
+ pci_read_config_dword(sii->pcibus, PCI_INT_MASK, &w);
w |= (coremask << PCI_SBIM_SHIFT);
- pci_write_config_dword(sii->pbus, PCI_INT_MASK, w);
- } else {
- /* set sbintvec bit for our flag number */
- ai_setint(sih, siflag);
+ pci_write_config_dword(sii->pcibus, PCI_INT_MASK, w);
}
- if (PCI(sii)) {
- pcicore_pci_setup(sii->pch, regs);
-
- /* switch back to previous core */
- ai_setcoreidx(sih, idx);
+ if (PCI(sih)) {
+ pcicore_pci_setup(sii->pch);
}
}
@@ -1942,25 +1120,11 @@ void ai_pci_setup(struct si_pub *sih, uint coremask)
*/
int ai_pci_fixcfg(struct si_pub *sih)
{
- uint origidx;
- void __iomem *regs = NULL;
struct si_info *sii = (struct si_info *)sih;
/* Fixup PI in SROM shadow area to enable the correct PCI core access */
- /* save the current index */
- origidx = ai_coreidx(&sii->pub);
-
/* check 'pi' is correct and fix it if not */
- regs = ai_setcore(&sii->pub, sii->pub.buscoretype, 0);
- if (sii->pub.buscoretype == PCIE_CORE_ID)
- pcicore_fixcfg_pcie(sii->pch,
- (struct sbpcieregs __iomem *)regs);
- else if (sii->pub.buscoretype == PCI_CORE_ID)
- pcicore_fixcfg_pci(sii->pch, (struct sbpciregs __iomem *)regs);
-
- /* restore the original index */
- ai_setcoreidx(&sii->pub, origidx);
-
+ pcicore_fixcfg(sii->pch);
pcicore_hwup(sii->pch);
return 0;
}
@@ -1971,58 +1135,42 @@ u32 ai_gpiocontrol(struct si_pub *sih, u32 mask, u32 val, u8 priority)
uint regoff;
regoff = offsetof(struct chipcregs, gpiocontrol);
- return ai_corereg(sih, SI_CC_IDX, regoff, mask, val);
+ return ai_cc_reg(sih, regoff, mask, val);
}
void ai_chipcontrl_epa4331(struct si_pub *sih, bool on)
{
- struct si_info *sii;
- struct chipcregs __iomem *cc;
- uint origidx;
+ struct bcma_device *cc;
u32 val;
- sii = (struct si_info *)sih;
- origidx = ai_coreidx(sih);
-
- cc = (struct chipcregs __iomem *) ai_setcore(sih, CC_CORE_ID, 0);
-
- val = R_REG(&cc->chipcontrol);
+ cc = ai_findcore(sih, CC_CORE_ID, 0);
if (on) {
- if (sih->chippkg == 9 || sih->chippkg == 0xb)
+ if (ai_get_chippkg(sih) == 9 || ai_get_chippkg(sih) == 0xb)
/* Ext PA Controls for 4331 12x9 Package */
- W_REG(&cc->chipcontrol, val |
- CCTRL4331_EXTPA_EN |
- CCTRL4331_EXTPA_ON_GPIO2_5);
+ bcma_set32(cc, CHIPCREGOFFS(chipcontrol),
+ CCTRL4331_EXTPA_EN |
+ CCTRL4331_EXTPA_ON_GPIO2_5);
else
/* Ext PA Controls for 4331 12x12 Package */
- W_REG(&cc->chipcontrol,
- val | CCTRL4331_EXTPA_EN);
+ bcma_set32(cc, CHIPCREGOFFS(chipcontrol),
+ CCTRL4331_EXTPA_EN);
} else {
val &= ~(CCTRL4331_EXTPA_EN | CCTRL4331_EXTPA_ON_GPIO2_5);
- W_REG(&cc->chipcontrol, val);
+ bcma_mask32(cc, CHIPCREGOFFS(chipcontrol),
+ ~(CCTRL4331_EXTPA_EN | CCTRL4331_EXTPA_ON_GPIO2_5));
}
-
- ai_setcoreidx(sih, origidx);
}
/* Enable BT-COEX & Ex-PA for 4313 */
void ai_epa_4313war(struct si_pub *sih)
{
- struct si_info *sii;
- struct chipcregs __iomem *cc;
- uint origidx;
+ struct bcma_device *cc;
- sii = (struct si_info *)sih;
- origidx = ai_coreidx(sih);
-
- cc = ai_setcore(sih, CC_CORE_ID, 0);
+ cc = ai_findcore(sih, CC_CORE_ID, 0);
/* EPA Fix */
- W_REG(&cc->gpiocontrol,
- R_REG(&cc->gpiocontrol) | GPIO_CTRL_EPA_EN_MASK);
-
- ai_setcoreidx(sih, origidx);
+ bcma_set32(cc, CHIPCREGOFFS(gpiocontrol), GPIO_CTRL_EPA_EN_MASK);
}
/* check if the device is removed */
@@ -2033,7 +1181,7 @@ bool ai_deviceremoved(struct si_pub *sih)
sii = (struct si_info *)sih;
- pci_read_config_dword(sii->pbus, PCI_VENDOR_ID, &w);
+ pci_read_config_dword(sii->pcibus, PCI_VENDOR_ID, &w);
if ((w & 0xFFFF) != PCI_VENDOR_ID_BROADCOM)
return true;
@@ -2042,26 +1190,23 @@ bool ai_deviceremoved(struct si_pub *sih)
bool ai_is_sprom_available(struct si_pub *sih)
{
- if (sih->ccrev >= 31) {
- struct si_info *sii;
- uint origidx;
- struct chipcregs __iomem *cc;
+ struct si_info *sii = (struct si_info *)sih;
+
+ if (ai_get_ccrev(sih) >= 31) {
+ struct bcma_device *cc;
u32 sromctrl;
- if ((sih->cccaps & CC_CAP_SROM) == 0)
+ if ((ai_get_cccaps(sih) & CC_CAP_SROM) == 0)
return false;
- sii = (struct si_info *)sih;
- origidx = sii->curidx;
- cc = ai_setcoreidx(sih, SI_CC_IDX);
- sromctrl = R_REG(&cc->sromcontrol);
- ai_setcoreidx(sih, origidx);
+ cc = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0);
+ sromctrl = bcma_read32(cc, CHIPCREGOFFS(sromcontrol));
return sromctrl & SRC_PRESENT;
}
- switch (sih->chip) {
+ switch (ai_get_chip_id(sih)) {
case BCM4313_CHIP_ID:
- return (sih->chipst & CST4313_SPROM_PRESENT) != 0;
+ return (sii->chipst & CST4313_SPROM_PRESENT) != 0;
default:
return true;
}
@@ -2069,9 +1214,11 @@ bool ai_is_sprom_available(struct si_pub *sih)
bool ai_is_otp_disabled(struct si_pub *sih)
{
- switch (sih->chip) {
+ struct si_info *sii = (struct si_info *)sih;
+
+ switch (ai_get_chip_id(sih)) {
case BCM4313_CHIP_ID:
- return (sih->chipst & CST4313_OTP_PRESENT) == 0;
+ return (sii->chipst & CST4313_OTP_PRESENT) == 0;
/* These chips always have their OTP on */
case BCM43224_CHIP_ID:
case BCM43225_CHIP_ID:
@@ -2079,3 +1226,15 @@ bool ai_is_otp_disabled(struct si_pub *sih)
return false;
}
}
+
+uint ai_get_buscoretype(struct si_pub *sih)
+{
+ struct si_info *sii = (struct si_info *)sih;
+ return sii->buscore->id.id;
+}
+
+uint ai_get_buscorerev(struct si_pub *sih)
+{
+ struct si_info *sii = (struct si_info *)sih;
+ return sii->buscore->id.rev;
+}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h
index b51d1e421e2..f84c6f78169 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h
@@ -17,6 +17,8 @@
#ifndef _BRCM_AIUTILS_H_
#define _BRCM_AIUTILS_H_
+#include <linux/bcma/bcma.h>
+
#include "types.h"
/*
@@ -144,26 +146,15 @@
* public (read-only) portion of aiutils handle returned by si_attach()
*/
struct si_pub {
- uint buscoretype; /* PCI_CORE_ID, PCIE_CORE_ID, PCMCIA_CORE_ID */
- uint buscorerev; /* buscore rev */
- uint buscoreidx; /* buscore index */
int ccrev; /* chip common core rev */
u32 cccaps; /* chip common capabilities */
- u32 cccaps_ext; /* chip common capabilities extension */
int pmurev; /* pmu core rev */
u32 pmucaps; /* pmu capabilities */
uint boardtype; /* board type */
uint boardvendor; /* board vendor */
- uint boardflags; /* board flags */
- uint boardflags2; /* board flags2 */
uint chip; /* chip number */
uint chiprev; /* chip revision */
uint chippkg; /* chip package option */
- u32 chipst; /* chip status */
- bool issim; /* chip is in simulation or emulation */
- uint socirev; /* SOC interconnect rev */
- bool pci_pr32414;
-
};
struct pci_dev;
@@ -179,38 +170,13 @@ struct gpioh_item {
/* misc si info needed by some of the routines */
struct si_info {
struct si_pub pub; /* back plane public state (must be first) */
- struct pci_dev *pbus; /* handle to pci bus */
- uint dev_coreid; /* the core provides driver functions */
- void *intr_arg; /* interrupt callback function arg */
- u32 (*intrsoff_fn) (void *intr_arg); /* turns chip interrupts off */
- /* restore chip interrupts */
- void (*intrsrestore_fn) (void *intr_arg, u32 arg);
- /* check if interrupts are enabled */
- bool (*intrsenabled_fn) (void *intr_arg);
-
+ struct bcma_bus *icbus; /* handle to soc interconnect bus */
+ struct pci_dev *pcibus; /* handle to pci bus */
struct pcicore_info *pch; /* PCI/E core handle */
-
+ struct bcma_device *buscore;
struct list_head var_list; /* list of srom variables */
- void __iomem *curmap; /* current regs va */
- void __iomem *regs[SI_MAXCORES]; /* other regs va */
-
- uint curidx; /* current core index */
- uint numcores; /* # discovered cores */
- uint coreid[SI_MAXCORES]; /* id of each core */
- u32 coresba[SI_MAXCORES]; /* backplane address of each core */
- void *regs2[SI_MAXCORES]; /* 2nd virtual address per core (usbh20) */
- u32 coresba2[SI_MAXCORES]; /* 2nd phys address per core (usbh20) */
- u32 coresba_size[SI_MAXCORES]; /* backplane address space size */
- u32 coresba2_size[SI_MAXCORES]; /* second address space size */
-
- void *curwrap; /* current wrapper va */
- void *wrappers[SI_MAXCORES]; /* other cores wrapper va */
- u32 wrapba[SI_MAXCORES]; /* address of controlling wrapper */
-
- u32 cia[SI_MAXCORES]; /* erom cia entry for each core */
- u32 cib[SI_MAXCORES]; /* erom cia entry for each core */
- u32 oob_router; /* oob router registers for axi */
+ u32 chipst; /* chip status */
};
/*
@@ -223,52 +189,15 @@ struct si_info {
/* AMBA Interconnect exported externs */
-extern uint ai_flag(struct si_pub *sih);
-extern void ai_setint(struct si_pub *sih, int siflag);
-extern uint ai_coreidx(struct si_pub *sih);
-extern uint ai_corevendor(struct si_pub *sih);
-extern uint ai_corerev(struct si_pub *sih);
-extern bool ai_iscoreup(struct si_pub *sih);
-extern u32 ai_core_cflags(struct si_pub *sih, u32 mask, u32 val);
-extern void ai_core_cflags_wo(struct si_pub *sih, u32 mask, u32 val);
-extern u32 ai_core_sflags(struct si_pub *sih, u32 mask, u32 val);
-extern uint ai_corereg(struct si_pub *sih, uint coreidx, uint regoff, uint mask,
- uint val);
-extern void ai_core_reset(struct si_pub *sih, u32 bits, u32 resetbits);
-extern void ai_core_disable(struct si_pub *sih, u32 bits);
-extern int ai_numaddrspaces(struct si_pub *sih);
-extern u32 ai_addrspace(struct si_pub *sih, uint asidx);
-extern u32 ai_addrspacesize(struct si_pub *sih, uint asidx);
-extern void ai_write_wrap_reg(struct si_pub *sih, u32 offset, u32 val);
+extern struct bcma_device *ai_findcore(struct si_pub *sih,
+ u16 coreid, u16 coreunit);
+extern u32 ai_core_cflags(struct bcma_device *core, u32 mask, u32 val);
/* === exported functions === */
-extern struct si_pub *ai_attach(void __iomem *regs, struct pci_dev *sdh);
+extern struct si_pub *ai_attach(struct bcma_bus *pbus);
extern void ai_detach(struct si_pub *sih);
-extern uint ai_coreid(struct si_pub *sih);
-extern uint ai_corerev(struct si_pub *sih);
-extern uint ai_corereg(struct si_pub *sih, uint coreidx, uint regoff, uint mask,
- uint val);
-extern void ai_write_wrapperreg(struct si_pub *sih, u32 offset, u32 val);
-extern u32 ai_core_cflags(struct si_pub *sih, u32 mask, u32 val);
-extern u32 ai_core_sflags(struct si_pub *sih, u32 mask, u32 val);
-extern bool ai_iscoreup(struct si_pub *sih);
-extern uint ai_findcoreidx(struct si_pub *sih, uint coreid, uint coreunit);
-extern void __iomem *ai_setcoreidx(struct si_pub *sih, uint coreidx);
-extern void __iomem *ai_setcore(struct si_pub *sih, uint coreid, uint coreunit);
-extern void __iomem *ai_switch_core(struct si_pub *sih, uint coreid,
- uint *origidx, uint *intr_val);
-extern void ai_restore_core(struct si_pub *sih, uint coreid, uint intr_val);
-extern void ai_core_reset(struct si_pub *sih, u32 bits, u32 resetbits);
-extern void ai_core_disable(struct si_pub *sih, u32 bits);
-extern u32 ai_alp_clock(struct si_pub *sih);
-extern u32 ai_ilp_clock(struct si_pub *sih);
+extern uint ai_cc_reg(struct si_pub *sih, uint regoff, u32 mask, u32 val);
extern void ai_pci_setup(struct si_pub *sih, uint coremask);
-extern void ai_setint(struct si_pub *sih, int siflag);
-extern bool ai_backplane64(struct si_pub *sih);
-extern void ai_register_intr_callback(struct si_pub *sih, void *intrsoff_fn,
- void *intrsrestore_fn,
- void *intrsenabled_fn, void *intr_arg);
-extern void ai_deregister_intr_callback(struct si_pub *sih);
extern void ai_clkctl_init(struct si_pub *sih);
extern u16 ai_clkctl_fast_pwrup_delay(struct si_pub *sih);
extern bool ai_clkctl_cc(struct si_pub *sih, uint mode);
@@ -283,13 +212,6 @@ extern bool ai_is_otp_disabled(struct si_pub *sih);
/* SPROM availability */
extern bool ai_is_sprom_available(struct si_pub *sih);
-/*
- * Build device path. Path size must be >= SI_DEVPATH_BUFSZ.
- * The returned path is NULL terminated and has trailing '/'.
- * Return 0 on success, nonzero otherwise.
- */
-extern int ai_devpath(struct si_pub *sih, char *path, int size);
-
extern void ai_pci_sleep(struct si_pub *sih);
extern void ai_pci_down(struct si_pub *sih);
extern void ai_pci_up(struct si_pub *sih);
@@ -299,4 +221,52 @@ extern void ai_chipcontrl_epa4331(struct si_pub *sih, bool on);
/* Enable Ex-PA for 4313 */
extern void ai_epa_4313war(struct si_pub *sih);
+extern uint ai_get_buscoretype(struct si_pub *sih);
+extern uint ai_get_buscorerev(struct si_pub *sih);
+
+static inline int ai_get_ccrev(struct si_pub *sih)
+{
+ return sih->ccrev;
+}
+
+static inline u32 ai_get_cccaps(struct si_pub *sih)
+{
+ return sih->cccaps;
+}
+
+static inline int ai_get_pmurev(struct si_pub *sih)
+{
+ return sih->pmurev;
+}
+
+static inline u32 ai_get_pmucaps(struct si_pub *sih)
+{
+ return sih->pmucaps;
+}
+
+static inline uint ai_get_boardtype(struct si_pub *sih)
+{
+ return sih->boardtype;
+}
+
+static inline uint ai_get_boardvendor(struct si_pub *sih)
+{
+ return sih->boardvendor;
+}
+
+static inline uint ai_get_chip_id(struct si_pub *sih)
+{
+ return sih->chip;
+}
+
+static inline uint ai_get_chiprev(struct si_pub *sih)
+{
+ return sih->chiprev;
+}
+
+static inline uint ai_get_chippkg(struct si_pub *sih)
+{
+ return sih->chippkg;
+}
+
#endif /* _BRCM_AIUTILS_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c b/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c
index 43f7a724dda..90911eec0cf 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c
@@ -1118,14 +1118,17 @@ brcms_c_ampdu_dotxstatus(struct ampdu_info *ampdu, struct scb *scb,
u8 status_delay = 0;
/* wait till the next 8 bytes of txstatus is available */
- while (((s1 = R_REG(&wlc->regs->frmtxstatus)) & TXS_V) == 0) {
+ s1 = bcma_read32(wlc->hw->d11core, D11REGOFFS(frmtxstatus));
+ while ((s1 & TXS_V) == 0) {
udelay(1);
status_delay++;
if (status_delay > 10)
return; /* error condition */
+ s1 = bcma_read32(wlc->hw->d11core,
+ D11REGOFFS(frmtxstatus));
}
- s2 = R_REG(&wlc->regs->frmtxstatus2);
+ s2 = bcma_read32(wlc->hw->d11core, D11REGOFFS(frmtxstatus2));
}
if (scb) {
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/d11.h b/drivers/net/wireless/brcm80211/brcmsmac/d11.h
index ed51616abc8..1948cb2771e 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/d11.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/d11.h
@@ -430,6 +430,9 @@ struct d11regs {
u16 PAD[0x380]; /* 0x800 - 0xEFE */
};
+/* d11 register field offset */
+#define D11REGOFFS(field) offsetof(struct d11regs, field)
+
#define PIHR_BASE 0x0400 /* byte address of packed IHR region */
/* biststatus */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/dma.c b/drivers/net/wireless/brcm80211/brcmsmac/dma.c
index 0bb8c37e979..b4cf617276c 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/dma.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/dma.c
@@ -27,6 +27,13 @@
#include "soc.h"
/*
+ * dma register field offset calculation
+ */
+#define DMA64REGOFFS(field) offsetof(struct dma64regs, field)
+#define DMA64TXREGOFFS(di, field) (di->d64txregbase + DMA64REGOFFS(field))
+#define DMA64RXREGOFFS(di, field) (di->d64rxregbase + DMA64REGOFFS(field))
+
+/*
* DMA hardware requires each descriptor ring to be 8kB aligned, and fit within
* a contiguous 8kB physical address.
*/
@@ -220,15 +227,16 @@ struct dma_info {
uint *msg_level; /* message level pointer */
char name[MAXNAMEL]; /* callers name for diag msgs */
- struct pci_dev *pbus; /* bus handle */
+ struct bcma_device *core;
+ struct device *dmadev;
bool dma64; /* this dma engine is operating in 64-bit mode */
bool addrext; /* this dma engine supports DmaExtendedAddrChanges */
/* 64-bit dma tx engine registers */
- struct dma64regs __iomem *d64txregs;
+ uint d64txregbase;
/* 64-bit dma rx engine registers */
- struct dma64regs __iomem *d64rxregs;
+ uint d64rxregbase;
/* pointer to dma64 tx descriptor ring */
struct dma64desc *txd64;
/* pointer to dma64 rx descriptor ring */
@@ -375,15 +383,16 @@ static uint _dma_ctrlflags(struct dma_info *di, uint mask, uint flags)
if (dmactrlflags & DMA_CTRL_PEN) {
u32 control;
- control = R_REG(&di->d64txregs->control);
- W_REG(&di->d64txregs->control,
+ control = bcma_read32(di->core, DMA64TXREGOFFS(di, control));
+ bcma_write32(di->core, DMA64TXREGOFFS(di, control),
control | D64_XC_PD);
- if (R_REG(&di->d64txregs->control) & D64_XC_PD)
+ if (bcma_read32(di->core, DMA64TXREGOFFS(di, control)) &
+ D64_XC_PD)
/* We *can* disable it so it is supported,
* restore control register
*/
- W_REG(&di->d64txregs->control,
- control);
+ bcma_write32(di->core, DMA64TXREGOFFS(di, control),
+ control);
else
/* Not supported, don't allow it to be enabled */
dmactrlflags &= ~DMA_CTRL_PEN;
@@ -394,12 +403,12 @@ static uint _dma_ctrlflags(struct dma_info *di, uint mask, uint flags)
return dmactrlflags;
}
-static bool _dma64_addrext(struct dma64regs __iomem *dma64regs)
+static bool _dma64_addrext(struct dma_info *di, uint ctrl_offset)
{
u32 w;
- OR_REG(&dma64regs->control, D64_XC_AE);
- w = R_REG(&dma64regs->control);
- AND_REG(&dma64regs->control, ~D64_XC_AE);
+ bcma_set32(di->core, ctrl_offset, D64_XC_AE);
+ w = bcma_read32(di->core, ctrl_offset);
+ bcma_mask32(di->core, ctrl_offset, ~D64_XC_AE);
return (w & D64_XC_AE) == D64_XC_AE;
}
@@ -412,13 +421,13 @@ static bool _dma_isaddrext(struct dma_info *di)
/* DMA64 supports full 32- or 64-bit operation. AE is always valid */
/* not all tx or rx channel are available */
- if (di->d64txregs != NULL) {
- if (!_dma64_addrext(di->d64txregs))
+ if (di->d64txregbase != 0) {
+ if (!_dma64_addrext(di, DMA64TXREGOFFS(di, control)))
DMA_ERROR("%s: DMA64 tx doesn't have AE set\n",
di->name);
return true;
- } else if (di->d64rxregs != NULL) {
- if (!_dma64_addrext(di->d64rxregs))
+ } else if (di->d64rxregbase != 0) {
+ if (!_dma64_addrext(di, DMA64RXREGOFFS(di, control)))
DMA_ERROR("%s: DMA64 rx doesn't have AE set\n",
di->name);
return true;
@@ -432,14 +441,14 @@ static bool _dma_descriptor_align(struct dma_info *di)
u32 addrl;
/* Check to see if the descriptors need to be aligned on 4K/8K or not */
- if (di->d64txregs != NULL) {
- W_REG(&di->d64txregs->addrlow, 0xff0);
- addrl = R_REG(&di->d64txregs->addrlow);
+ if (di->d64txregbase != 0) {
+ bcma_write32(di->core, DMA64TXREGOFFS(di, addrlow), 0xff0);
+ addrl = bcma_read32(di->core, DMA64TXREGOFFS(di, addrlow));
if (addrl != 0)
return false;
- } else if (di->d64rxregs != NULL) {
- W_REG(&di->d64rxregs->addrlow, 0xff0);
- addrl = R_REG(&di->d64rxregs->addrlow);
+ } else if (di->d64rxregbase != 0) {
+ bcma_write32(di->core, DMA64RXREGOFFS(di, addrlow), 0xff0);
+ addrl = bcma_read32(di->core, DMA64RXREGOFFS(di, addrlow));
if (addrl != 0)
return false;
}
@@ -450,7 +459,7 @@ static bool _dma_descriptor_align(struct dma_info *di)
* Descriptor table must start at the DMA hardware dictated alignment, so
* allocated memory must be large enough to support this requirement.
*/
-static void *dma_alloc_consistent(struct pci_dev *pdev, uint size,
+static void *dma_alloc_consistent(struct dma_info *di, uint size,
u16 align_bits, uint *alloced,
dma_addr_t *pap)
{
@@ -460,7 +469,7 @@ static void *dma_alloc_consistent(struct pci_dev *pdev, uint size,
size += align;
*alloced = size;
}
- return pci_alloc_consistent(pdev, size, pap);
+ return dma_alloc_coherent(di->dmadev, size, pap, GFP_ATOMIC);
}
static
@@ -486,7 +495,7 @@ static void *dma_ringalloc(struct dma_info *di, u32 boundary, uint size,
u32 desc_strtaddr;
u32 alignbytes = 1 << *alignbits;
- va = dma_alloc_consistent(di->pbus, size, *alignbits, alloced, descpa);
+ va = dma_alloc_consistent(di, size, *alignbits, alloced, descpa);
if (NULL == va)
return NULL;
@@ -495,8 +504,8 @@ static void *dma_ringalloc(struct dma_info *di, u32 boundary, uint size,
if (((desc_strtaddr + size - 1) & boundary) != (desc_strtaddr
& boundary)) {
*alignbits = dma_align_sizetobits(size);
- pci_free_consistent(di->pbus, size, va, *descpa);
- va = dma_alloc_consistent(di->pbus, size, *alignbits,
+ dma_free_coherent(di->dmadev, size, va, *descpa);
+ va = dma_alloc_consistent(di, size, *alignbits,
alloced, descpa);
}
return va;
@@ -556,12 +565,13 @@ static bool _dma_alloc(struct dma_info *di, uint direction)
}
struct dma_pub *dma_attach(char *name, struct si_pub *sih,
- void __iomem *dmaregstx, void __iomem *dmaregsrx,
- uint ntxd, uint nrxd,
- uint rxbufsize, int rxextheadroom,
- uint nrxpost, uint rxoffset, uint *msg_level)
+ struct bcma_device *core,
+ uint txregbase, uint rxregbase, uint ntxd, uint nrxd,
+ uint rxbufsize, int rxextheadroom,
+ uint nrxpost, uint rxoffset, uint *msg_level)
{
struct dma_info *di;
+ u8 rev = core->id.rev;
uint size;
/* allocate private info structure */
@@ -572,11 +582,13 @@ struct dma_pub *dma_attach(char *name, struct si_pub *sih,
di->msg_level = msg_level ? msg_level : &dma_msg_level;
- di->dma64 = ((ai_core_sflags(sih, 0, 0) & SISF_DMA64) == SISF_DMA64);
+ di->dma64 =
+ ((bcma_aread32(core, BCMA_IOST) & SISF_DMA64) == SISF_DMA64);
- /* init dma reg pointer */
- di->d64txregs = (struct dma64regs __iomem *) dmaregstx;
- di->d64rxregs = (struct dma64regs __iomem *) dmaregsrx;
+ /* init dma reg info */
+ di->core = core;
+ di->d64txregbase = txregbase;
+ di->d64rxregbase = rxregbase;
/*
* Default flags (which can be changed by the driver calling
@@ -585,16 +597,17 @@ struct dma_pub *dma_attach(char *name, struct si_pub *sih,
*/
_dma_ctrlflags(di, DMA_CTRL_ROC | DMA_CTRL_PEN, 0);
- DMA_TRACE("%s: %s flags 0x%x ntxd %d nrxd %d rxbufsize %d rxextheadroom %d nrxpost %d rxoffset %d dmaregstx %p dmaregsrx %p\n",
- name, "DMA64",
+ DMA_TRACE("%s: %s flags 0x%x ntxd %d nrxd %d "
+ "rxbufsize %d rxextheadroom %d nrxpost %d rxoffset %d "
+ "txregbase %u rxregbase %u\n", name, "DMA64",
di->dma.dmactrlflags, ntxd, nrxd, rxbufsize,
- rxextheadroom, nrxpost, rxoffset, dmaregstx, dmaregsrx);
+ rxextheadroom, nrxpost, rxoffset, txregbase, rxregbase);
/* make a private copy of our callers name */
strncpy(di->name, name, MAXNAMEL);
di->name[MAXNAMEL - 1] = '\0';
- di->pbus = ((struct si_info *)sih)->pbus;
+ di->dmadev = core->dma_dev;
/* save tunables */
di->ntxd = (u16) ntxd;
@@ -626,11 +639,11 @@ struct dma_pub *dma_attach(char *name, struct si_pub *sih,
di->dataoffsetlow = di->ddoffsetlow;
di->dataoffsethigh = di->ddoffsethigh;
/* WAR64450 : DMACtl.Addr ext fields are not supported in SDIOD core. */
- if ((ai_coreid(sih) == SDIOD_CORE_ID)
- && ((ai_corerev(sih) > 0) && (ai_corerev(sih) <= 2)))
+ if ((core->id.id == SDIOD_CORE_ID)
+ && ((rev > 0) && (rev <= 2)))
di->addrext = 0;
- else if ((ai_coreid(sih) == I2S_CORE_ID) &&
- ((ai_corerev(sih) == 0) || (ai_corerev(sih) == 1)))
+ else if ((core->id.id == I2S_CORE_ID) &&
+ ((rev == 0) || (rev == 1)))
di->addrext = 0;
else
di->addrext = _dma_isaddrext(di);
@@ -749,13 +762,13 @@ void dma_detach(struct dma_pub *pub)
/* free dma descriptor rings */
if (di->txd64)
- pci_free_consistent(di->pbus, di->txdalloc,
- ((s8 *)di->txd64 - di->txdalign),
- (di->txdpaorig));
+ dma_free_coherent(di->dmadev, di->txdalloc,
+ ((s8 *)di->txd64 - di->txdalign),
+ (di->txdpaorig));
if (di->rxd64)
- pci_free_consistent(di->pbus, di->rxdalloc,
- ((s8 *)di->rxd64 - di->rxdalign),
- (di->rxdpaorig));
+ dma_free_coherent(di->dmadev, di->rxdalloc,
+ ((s8 *)di->rxd64 - di->rxdalign),
+ (di->rxdpaorig));
/* free packet pointer vectors */
kfree(di->txp);
@@ -780,11 +793,15 @@ _dma_ddtable_init(struct dma_info *di, uint direction, dma_addr_t pa)
if ((di->ddoffsetlow == 0)
|| !(pa & PCI32ADDR_HIGH)) {
if (direction == DMA_TX) {
- W_REG(&di->d64txregs->addrlow, pa + di->ddoffsetlow);
- W_REG(&di->d64txregs->addrhigh, di->ddoffsethigh);
+ bcma_write32(di->core, DMA64TXREGOFFS(di, addrlow),
+ pa + di->ddoffsetlow);
+ bcma_write32(di->core, DMA64TXREGOFFS(di, addrhigh),
+ di->ddoffsethigh);
} else {
- W_REG(&di->d64rxregs->addrlow, pa + di->ddoffsetlow);
- W_REG(&di->d64rxregs->addrhigh, di->ddoffsethigh);
+ bcma_write32(di->core, DMA64RXREGOFFS(di, addrlow),
+ pa + di->ddoffsetlow);
+ bcma_write32(di->core, DMA64RXREGOFFS(di, addrhigh),
+ di->ddoffsethigh);
}
} else {
/* DMA64 32bits address extension */
@@ -795,15 +812,19 @@ _dma_ddtable_init(struct dma_info *di, uint direction, dma_addr_t pa)
pa &= ~PCI32ADDR_HIGH;
if (direction == DMA_TX) {
- W_REG(&di->d64txregs->addrlow, pa + di->ddoffsetlow);
- W_REG(&di->d64txregs->addrhigh, di->ddoffsethigh);
- SET_REG(&di->d64txregs->control,
- D64_XC_AE, (ae << D64_XC_AE_SHIFT));
+ bcma_write32(di->core, DMA64TXREGOFFS(di, addrlow),
+ pa + di->ddoffsetlow);
+ bcma_write32(di->core, DMA64TXREGOFFS(di, addrhigh),
+ di->ddoffsethigh);
+ bcma_maskset32(di->core, DMA64TXREGOFFS(di, control),
+ D64_XC_AE, (ae << D64_XC_AE_SHIFT));
} else {
- W_REG(&di->d64rxregs->addrlow, pa + di->ddoffsetlow);
- W_REG(&di->d64rxregs->addrhigh, di->ddoffsethigh);
- SET_REG(&di->d64rxregs->control,
- D64_RC_AE, (ae << D64_RC_AE_SHIFT));
+ bcma_write32(di->core, DMA64RXREGOFFS(di, addrlow),
+ pa + di->ddoffsetlow);
+ bcma_write32(di->core, DMA64RXREGOFFS(di, addrhigh),
+ di->ddoffsethigh);
+ bcma_maskset32(di->core, DMA64RXREGOFFS(di, control),
+ D64_RC_AE, (ae << D64_RC_AE_SHIFT));
}
}
}
@@ -815,9 +836,9 @@ static void _dma_rxenable(struct dma_info *di)
DMA_TRACE("%s:\n", di->name);
- control =
- (R_REG(&di->d64rxregs->control) & D64_RC_AE) |
- D64_RC_RE;
+ control = D64_RC_RE | (bcma_read32(di->core,
+ DMA64RXREGOFFS(di, control)) &
+ D64_RC_AE);
if ((dmactrlflags & DMA_CTRL_PEN) == 0)
control |= D64_RC_PD;
@@ -825,7 +846,7 @@ static void _dma_rxenable(struct dma_info *di)
if (dmactrlflags & DMA_CTRL_ROC)
control |= D64_RC_OC;
- W_REG(&di->d64rxregs->control,
+ bcma_write32(di->core, DMA64RXREGOFFS(di, control),
((di->rxoffset << D64_RC_RO_SHIFT) | control));
}
@@ -868,7 +889,8 @@ static struct sk_buff *dma64_getnextrxp(struct dma_info *di, bool forceall)
return NULL;
curr =
- B2I(((R_REG(&di->d64rxregs->status0) & D64_RS0_CD_MASK) -
+ B2I(((bcma_read32(di->core,
+ DMA64RXREGOFFS(di, status0)) & D64_RS0_CD_MASK) -
di->rcvptrbase) & D64_RS0_CD_MASK, struct dma64desc);
/* ignore curr if forceall */
@@ -882,7 +904,7 @@ static struct sk_buff *dma64_getnextrxp(struct dma_info *di, bool forceall)
pa = le32_to_cpu(di->rxd64[i].addrlow) - di->dataoffsetlow;
/* clear this packet from the descriptor ring */
- pci_unmap_single(di->pbus, pa, di->rxbufsize, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(di->dmadev, pa, di->rxbufsize, DMA_FROM_DEVICE);
di->rxd64[i].addrlow = cpu_to_le32(0xdeadbeef);
di->rxd64[i].addrhigh = cpu_to_le32(0xdeadbeef);
@@ -950,12 +972,12 @@ int dma_rx(struct dma_pub *pub, struct sk_buff_head *skb_list)
if (resid > 0) {
uint cur;
cur =
- B2I(((R_REG(&di->d64rxregs->status0) &
- D64_RS0_CD_MASK) -
- di->rcvptrbase) & D64_RS0_CD_MASK,
- struct dma64desc);
+ B2I(((bcma_read32(di->core,
+ DMA64RXREGOFFS(di, status0)) &
+ D64_RS0_CD_MASK) - di->rcvptrbase) &
+ D64_RS0_CD_MASK, struct dma64desc);
DMA_ERROR("rxin %d rxout %d, hw_curr %d\n",
- di->rxin, di->rxout, cur);
+ di->rxin, di->rxout, cur);
}
#endif /* BCMDBG */
@@ -983,8 +1005,10 @@ static bool dma64_rxidle(struct dma_info *di)
if (di->nrxd == 0)
return true;
- return ((R_REG(&di->d64rxregs->status0) & D64_RS0_CD_MASK) ==
- (R_REG(&di->d64rxregs->ptr) & D64_RS0_CD_MASK));
+ return ((bcma_read32(di->core,
+ DMA64RXREGOFFS(di, status0)) & D64_RS0_CD_MASK) ==
+ (bcma_read32(di->core, DMA64RXREGOFFS(di, ptr)) &
+ D64_RS0_CD_MASK));
}
/*
@@ -1048,8 +1072,8 @@ bool dma_rxfill(struct dma_pub *pub)
*/
*(u32 *) (p->data) = 0;
- pa = pci_map_single(di->pbus, p->data,
- di->rxbufsize, PCI_DMA_FROMDEVICE);
+ pa = dma_map_single(di->dmadev, p->data, di->rxbufsize,
+ DMA_FROM_DEVICE);
/* save the free packet pointer */
di->rxp[rxout] = p;
@@ -1067,7 +1091,7 @@ bool dma_rxfill(struct dma_pub *pub)
di->rxout = rxout;
/* update the chip lastdscr pointer */
- W_REG(&di->d64rxregs->ptr,
+ bcma_write32(di->core, DMA64RXREGOFFS(di, ptr),
di->rcvptrbase + I2B(rxout, struct dma64desc));
return ring_empty;
@@ -1128,7 +1152,7 @@ void dma_txinit(struct dma_pub *pub)
if ((di->dma.dmactrlflags & DMA_CTRL_PEN) == 0)
control |= D64_XC_PD;
- OR_REG(&di->d64txregs->control, control);
+ bcma_set32(di->core, DMA64TXREGOFFS(di, control), control);
/* DMA engine with alignment requirement requires table to be inited
* before enabling the engine
@@ -1146,7 +1170,7 @@ void dma_txsuspend(struct dma_pub *pub)
if (di->ntxd == 0)
return;
- OR_REG(&di->d64txregs->control, D64_XC_SE);
+ bcma_set32(di->core, DMA64TXREGOFFS(di, control), D64_XC_SE);
}
void dma_txresume(struct dma_pub *pub)
@@ -1158,7 +1182,7 @@ void dma_txresume(struct dma_pub *pub)
if (di->ntxd == 0)
return;
- AND_REG(&di->d64txregs->control, ~D64_XC_SE);
+ bcma_mask32(di->core, DMA64TXREGOFFS(di, control), ~D64_XC_SE);
}
bool dma_txsuspended(struct dma_pub *pub)
@@ -1166,8 +1190,9 @@ bool dma_txsuspended(struct dma_pub *pub)
struct dma_info *di = (struct dma_info *)pub;
return (di->ntxd == 0) ||
- ((R_REG(&di->d64txregs->control) & D64_XC_SE) ==
- D64_XC_SE);
+ ((bcma_read32(di->core,
+ DMA64TXREGOFFS(di, control)) & D64_XC_SE) ==
+ D64_XC_SE);
}
void dma_txreclaim(struct dma_pub *pub, enum txd_range range)
@@ -1200,16 +1225,17 @@ bool dma_txreset(struct dma_pub *pub)
return true;
/* suspend tx DMA first */
- W_REG(&di->d64txregs->control, D64_XC_SE);
+ bcma_write32(di->core, DMA64TXREGOFFS(di, control), D64_XC_SE);
SPINWAIT(((status =
- (R_REG(&di->d64txregs->status0) & D64_XS0_XS_MASK))
- != D64_XS0_XS_DISABLED) && (status != D64_XS0_XS_IDLE)
- && (status != D64_XS0_XS_STOPPED), 10000);
+ (bcma_read32(di->core, DMA64TXREGOFFS(di, status0)) &
+ D64_XS0_XS_MASK)) != D64_XS0_XS_DISABLED) &&
+ (status != D64_XS0_XS_IDLE) && (status != D64_XS0_XS_STOPPED),
+ 10000);
- W_REG(&di->d64txregs->control, 0);
+ bcma_write32(di->core, DMA64TXREGOFFS(di, control), 0);
SPINWAIT(((status =
- (R_REG(&di->d64txregs->status0) & D64_XS0_XS_MASK))
- != D64_XS0_XS_DISABLED), 10000);
+ (bcma_read32(di->core, DMA64TXREGOFFS(di, status0)) &
+ D64_XS0_XS_MASK)) != D64_XS0_XS_DISABLED), 10000);
/* wait for the last transaction to complete */
udelay(300);
@@ -1225,10 +1251,10 @@ bool dma_rxreset(struct dma_pub *pub)
if (di->nrxd == 0)
return true;
- W_REG(&di->d64rxregs->control, 0);
+ bcma_write32(di->core, DMA64RXREGOFFS(di, control), 0);
SPINWAIT(((status =
- (R_REG(&di->d64rxregs->status0) & D64_RS0_RS_MASK))
- != D64_RS0_RS_DISABLED), 10000);
+ (bcma_read32(di->core, DMA64RXREGOFFS(di, status0)) &
+ D64_RS0_RS_MASK)) != D64_RS0_RS_DISABLED), 10000);
return status == D64_RS0_RS_DISABLED;
}
@@ -1239,10 +1265,9 @@ bool dma_rxreset(struct dma_pub *pub)
* the error(toss frames) could be fatal and cause many subsequent hard
* to debug problems
*/
-int dma_txfast(struct dma_pub *pub, struct sk_buff *p0, bool commit)
+int dma_txfast(struct dma_pub *pub, struct sk_buff *p, bool commit)
{
struct dma_info *di = (struct dma_info *)pub;
- struct sk_buff *p, *next;
unsigned char *data;
uint len;
u16 txout;
@@ -1254,57 +1279,44 @@ int dma_txfast(struct dma_pub *pub, struct sk_buff *p0, bool commit)
txout = di->txout;
/*
- * Walk the chain of packet buffers
- * allocating and initializing transmit descriptor entries.
+ * obtain and initialize transmit descriptor entry.
*/
- for (p = p0; p; p = next) {
- data = p->data;
- len = p->len;
- next = p->next;
-
- /* return nonzero if out of tx descriptors */
- if (nexttxd(di, txout) == di->txin)
- goto outoftxd;
-
- if (len == 0)
- continue;
+ data = p->data;
+ len = p->len;
- /* get physical address of buffer start */
- pa = pci_map_single(di->pbus, data, len, PCI_DMA_TODEVICE);
+ /* no use to transmit a zero length packet */
+ if (len == 0)
+ return 0;
- flags = 0;
- if (p == p0)
- flags |= D64_CTRL1_SOF;
+ /* return nonzero if out of tx descriptors */
+ if (nexttxd(di, txout) == di->txin)
+ goto outoftxd;
- /* With a DMA segment list, Descriptor table is filled
- * using the segment list instead of looping over
- * buffers in multi-chain DMA. Therefore, EOF for SGLIST
- * is when end of segment list is reached.
- */
- if (next == NULL)
- flags |= (D64_CTRL1_IOC | D64_CTRL1_EOF);
- if (txout == (di->ntxd - 1))
- flags |= D64_CTRL1_EOT;
+ /* get physical address of buffer start */
+ pa = dma_map_single(di->dmadev, data, len, DMA_TO_DEVICE);
- dma64_dd_upd(di, di->txd64, pa, txout, &flags, len);
+ /* With a DMA segment list, Descriptor table is filled
+ * using the segment list instead of looping over
+ * buffers in multi-chain DMA. Therefore, EOF for SGLIST
+ * is when end of segment list is reached.
+ */
+ flags = D64_CTRL1_SOF | D64_CTRL1_IOC | D64_CTRL1_EOF;
+ if (txout == (di->ntxd - 1))
+ flags |= D64_CTRL1_EOT;
- txout = nexttxd(di, txout);
- }
+ dma64_dd_upd(di, di->txd64, pa, txout, &flags, len);
- /* if last txd eof not set, fix it */
- if (!(flags & D64_CTRL1_EOF))
- di->txd64[prevtxd(di, txout)].ctrl1 =
- cpu_to_le32(flags | D64_CTRL1_IOC | D64_CTRL1_EOF);
+ txout = nexttxd(di, txout);
/* save the packet */
- di->txp[prevtxd(di, txout)] = p0;
+ di->txp[prevtxd(di, txout)] = p;
/* bump the tx descriptor index */
di->txout = txout;
/* kick the chip */
if (commit)
- W_REG(&di->d64txregs->ptr,
+ bcma_write32(di->core, DMA64TXREGOFFS(di, ptr),
di->xmtptrbase + I2B(txout, struct dma64desc));
/* tx flow control */
@@ -1314,7 +1326,7 @@ int dma_txfast(struct dma_pub *pub, struct sk_buff *p0, bool commit)
outoftxd:
DMA_ERROR("%s: out of txds !!!\n", di->name);
- brcmu_pkt_buf_free_skb(p0);
+ brcmu_pkt_buf_free_skb(p);
di->dma.txavail = 0;
di->dma.txnobuf++;
return -1;
@@ -1352,16 +1364,15 @@ struct sk_buff *dma_getnexttxp(struct dma_pub *pub, enum txd_range range)
if (range == DMA_RANGE_ALL)
end = di->txout;
else {
- struct dma64regs __iomem *dregs = di->d64txregs;
-
- end = (u16) (B2I(((R_REG(&dregs->status0) &
- D64_XS0_CD_MASK) -
- di->xmtptrbase) & D64_XS0_CD_MASK,
- struct dma64desc));
+ end = (u16) (B2I(((bcma_read32(di->core,
+ DMA64TXREGOFFS(di, status0)) &
+ D64_XS0_CD_MASK) - di->xmtptrbase) &
+ D64_XS0_CD_MASK, struct dma64desc));
if (range == DMA_RANGE_TRANSFERED) {
active_desc =
- (u16) (R_REG(&dregs->status1) &
+ (u16)(bcma_read32(di->core,
+ DMA64TXREGOFFS(di, status1)) &
D64_XS1_AD_MASK);
active_desc =
(active_desc - di->xmtptrbase) & D64_XS0_CD_MASK;
@@ -1390,7 +1401,7 @@ struct sk_buff *dma_getnexttxp(struct dma_pub *pub, enum txd_range range)
txp = di->txp[i];
di->txp[i] = NULL;
- pci_unmap_single(di->pbus, pa, size, PCI_DMA_TODEVICE);
+ dma_unmap_single(di->dmadev, pa, size, DMA_TO_DEVICE);
}
di->txin = i;
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/dma.h b/drivers/net/wireless/brcm80211/brcmsmac/dma.h
index d317c7c12f9..cc269ee5c49 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/dma.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/dma.h
@@ -75,10 +75,11 @@ struct dma_pub {
};
extern struct dma_pub *dma_attach(char *name, struct si_pub *sih,
- void __iomem *dmaregstx, void __iomem *dmaregsrx,
- uint ntxd, uint nrxd,
- uint rxbufsize, int rxextheadroom,
- uint nrxpost, uint rxoffset, uint *msg_level);
+ struct bcma_device *d11core,
+ uint txregbase, uint rxregbase,
+ uint ntxd, uint nrxd,
+ uint rxbufsize, int rxextheadroom,
+ uint nrxpost, uint rxoffset, uint *msg_level);
void dma_rxinit(struct dma_pub *pub);
int dma_rx(struct dma_pub *pub, struct sk_buff_head *skb_list);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
index 6d3c7b6c5aa..77fdc45b43e 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
@@ -17,10 +17,11 @@
#define __UNDEF_NO_VERSION__
#include <linux/etherdevice.h>
-#include <linux/pci.h>
#include <linux/sched.h>
#include <linux/firmware.h>
#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/bcma/bcma.h>
#include <net/mac80211.h>
#include <defs.h>
#include "nicpci.h"
@@ -39,10 +40,10 @@
#define MAC_FILTERS (FIF_PROMISC_IN_BSS | \
FIF_ALLMULTI | \
FIF_FCSFAIL | \
- FIF_PLCPFAIL | \
FIF_CONTROL | \
FIF_OTHER_BSS | \
- FIF_BCN_PRBRESP_PROMISC)
+ FIF_BCN_PRBRESP_PROMISC | \
+ FIF_PSPOLL)
#define CHAN2GHZ(channel, freqency, chflags) { \
.band = IEEE80211_BAND_2GHZ, \
@@ -86,16 +87,14 @@ MODULE_DESCRIPTION("Broadcom 802.11n wireless LAN driver.");
MODULE_SUPPORTED_DEVICE("Broadcom 802.11n WLAN cards");
MODULE_LICENSE("Dual BSD/GPL");
-/* recognized PCI IDs */
-static DEFINE_PCI_DEVICE_TABLE(brcms_pci_id_table) = {
- { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4357) }, /* 43225 2G */
- { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4353) }, /* 43224 DUAL */
- { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4727) }, /* 4313 DUAL */
- { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x0576) }, /* 43224 Ven */
- {0}
-};
-MODULE_DEVICE_TABLE(pci, brcms_pci_id_table);
+/* recognized BCMA Core IDs */
+static struct bcma_device_id brcms_coreid_table[] = {
+ BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 23, BCMA_ANY_CLASS),
+ BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 24, BCMA_ANY_CLASS),
+ BCMA_CORETABLE_END
+};
+MODULE_DEVICE_TABLE(bcma, brcms_coreid_table);
#ifdef BCMDBG
static int msglevel = 0xdeadbeef;
@@ -372,7 +371,7 @@ static int brcms_ops_config(struct ieee80211_hw *hw, u32 changed)
conf->listen_interval);
}
if (changed & IEEE80211_CONF_CHANGE_MONITOR)
- wiphy_err(wiphy, "%s: change monitor mode: %s (implement)\n",
+ wiphy_dbg(wiphy, "%s: change monitor mode: %s\n",
__func__, conf->flags & IEEE80211_CONF_MONITOR ?
"true" : "false");
if (changed & IEEE80211_CONF_CHANGE_PS)
@@ -549,29 +548,25 @@ brcms_ops_configure_filter(struct ieee80211_hw *hw,
changed_flags &= MAC_FILTERS;
*total_flags &= MAC_FILTERS;
+
if (changed_flags & FIF_PROMISC_IN_BSS)
- wiphy_err(wiphy, "FIF_PROMISC_IN_BSS\n");
+ wiphy_dbg(wiphy, "FIF_PROMISC_IN_BSS\n");
if (changed_flags & FIF_ALLMULTI)
- wiphy_err(wiphy, "FIF_ALLMULTI\n");
+ wiphy_dbg(wiphy, "FIF_ALLMULTI\n");
if (changed_flags & FIF_FCSFAIL)
- wiphy_err(wiphy, "FIF_FCSFAIL\n");
- if (changed_flags & FIF_PLCPFAIL)
- wiphy_err(wiphy, "FIF_PLCPFAIL\n");
+ wiphy_dbg(wiphy, "FIF_FCSFAIL\n");
if (changed_flags & FIF_CONTROL)
- wiphy_err(wiphy, "FIF_CONTROL\n");
+ wiphy_dbg(wiphy, "FIF_CONTROL\n");
if (changed_flags & FIF_OTHER_BSS)
- wiphy_err(wiphy, "FIF_OTHER_BSS\n");
- if (changed_flags & FIF_BCN_PRBRESP_PROMISC) {
- spin_lock_bh(&wl->lock);
- if (*total_flags & FIF_BCN_PRBRESP_PROMISC) {
- wl->pub->mac80211_state |= MAC80211_PROMISC_BCNS;
- brcms_c_mac_bcn_promisc_change(wl->wlc, 1);
- } else {
- brcms_c_mac_bcn_promisc_change(wl->wlc, 0);
- wl->pub->mac80211_state &= ~MAC80211_PROMISC_BCNS;
- }
- spin_unlock_bh(&wl->lock);
- }
+ wiphy_dbg(wiphy, "FIF_OTHER_BSS\n");
+ if (changed_flags & FIF_PSPOLL)
+ wiphy_dbg(wiphy, "FIF_PSPOLL\n");
+ if (changed_flags & FIF_BCN_PRBRESP_PROMISC)
+ wiphy_dbg(wiphy, "FIF_BCN_PRBRESP_PROMISC\n");
+
+ spin_lock_bh(&wl->lock);
+ brcms_c_mac_promisc(wl->wlc, *total_flags);
+ spin_unlock_bh(&wl->lock);
return;
}
@@ -727,7 +722,7 @@ static const struct ieee80211_ops brcms_ops = {
};
/*
- * is called in brcms_pci_probe() context, therefore no locking required.
+ * is called in brcms_bcma_probe() context, therefore no locking required.
*/
static int brcms_set_hint(struct brcms_info *wl, char *abbrev)
{
@@ -867,25 +862,15 @@ static void brcms_free(struct brcms_info *wl)
#endif
kfree(t);
}
-
- /*
- * unregister_netdev() calls get_stats() which may read chip
- * registers so we cannot unmap the chip registers until
- * after calling unregister_netdev() .
- */
- if (wl->regsva)
- iounmap(wl->regsva);
-
- wl->regsva = NULL;
}
/*
* called from both kernel as from this kernel module (error flow on attach)
* precondition: perimeter lock is not acquired.
*/
-static void brcms_remove(struct pci_dev *pdev)
+static void brcms_remove(struct bcma_device *pdev)
{
- struct ieee80211_hw *hw = pci_get_drvdata(pdev);
+ struct ieee80211_hw *hw = bcma_get_drvdata(pdev);
struct brcms_info *wl = hw->priv;
if (wl->wlc) {
@@ -893,11 +878,10 @@ static void brcms_remove(struct pci_dev *pdev)
wiphy_rfkill_stop_polling(wl->pub->ieee_hw->wiphy);
ieee80211_unregister_hw(hw);
}
- pci_disable_device(pdev);
brcms_free(wl);
- pci_set_drvdata(pdev, NULL);
+ bcma_set_drvdata(pdev, NULL);
ieee80211_free_hw(hw);
}
@@ -1005,11 +989,9 @@ static int ieee_hw_init(struct ieee80211_hw *hw)
* it as static.
*
*
- * is called in brcms_pci_probe() context, therefore no locking required.
+ * is called in brcms_bcma_probe() context, therefore no locking required.
*/
-static struct brcms_info *brcms_attach(u16 vendor, u16 device,
- resource_size_t regs,
- struct pci_dev *btparam, uint irq)
+static struct brcms_info *brcms_attach(struct bcma_device *pdev)
{
struct brcms_info *wl = NULL;
int unit, err;
@@ -1023,7 +1005,7 @@ static struct brcms_info *brcms_attach(u16 vendor, u16 device,
return NULL;
/* allocate private info */
- hw = pci_get_drvdata(btparam); /* btparam == pdev */
+ hw = bcma_get_drvdata(pdev);
if (hw != NULL)
wl = hw->priv;
if (WARN_ON(hw == NULL) || WARN_ON(wl == NULL))
@@ -1035,26 +1017,20 @@ static struct brcms_info *brcms_attach(u16 vendor, u16 device,
/* setup the bottom half handler */
tasklet_init(&wl->tasklet, brcms_dpc, (unsigned long) wl);
- wl->regsva = ioremap_nocache(regs, PCI_BAR0_WINSZ);
- if (wl->regsva == NULL) {
- wiphy_err(wl->wiphy, "wl%d: ioremap() failed\n", unit);
- goto fail;
- }
spin_lock_init(&wl->lock);
spin_lock_init(&wl->isr_lock);
/* prepare ucode */
- if (brcms_request_fw(wl, btparam) < 0) {
+ if (brcms_request_fw(wl, pdev->bus->host_pci) < 0) {
wiphy_err(wl->wiphy, "%s: Failed to find firmware usually in "
"%s\n", KBUILD_MODNAME, "/lib/firmware/brcm");
brcms_release_fw(wl);
- brcms_remove(btparam);
+ brcms_remove(pdev);
return NULL;
}
/* common load-time initialization */
- wl->wlc = brcms_c_attach(wl, vendor, device, unit, false,
- wl->regsva, btparam, &err);
+ wl->wlc = brcms_c_attach((void *)wl, pdev, unit, false, &err);
brcms_release_fw(wl);
if (!wl->wlc) {
wiphy_err(wl->wiphy, "%s: attach() failed with code %d\n",
@@ -1066,11 +1042,12 @@ static struct brcms_info *brcms_attach(u16 vendor, u16 device,
wl->pub->ieee_hw = hw;
/* register our interrupt handler */
- if (request_irq(irq, brcms_isr, IRQF_SHARED, KBUILD_MODNAME, wl)) {
+ if (request_irq(pdev->bus->host_pci->irq, brcms_isr,
+ IRQF_SHARED, KBUILD_MODNAME, wl)) {
wiphy_err(wl->wiphy, "wl%d: request_irq() failed\n", unit);
goto fail;
}
- wl->irq = irq;
+ wl->irq = pdev->bus->host_pci->irq;
/* register module */
brcms_c_module_register(wl->pub, "linux", wl, NULL);
@@ -1117,37 +1094,18 @@ fail:
*
* Perimeter lock is initialized in the course of this function.
*/
-static int __devinit
-brcms_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+static int __devinit brcms_bcma_probe(struct bcma_device *pdev)
{
- int rc;
struct brcms_info *wl;
struct ieee80211_hw *hw;
- u32 val;
-
- dev_info(&pdev->dev, "bus %d slot %d func %d irq %d\n",
- pdev->bus->number, PCI_SLOT(pdev->devfn),
- PCI_FUNC(pdev->devfn), pdev->irq);
- if ((pdev->vendor != PCI_VENDOR_ID_BROADCOM) ||
- ((pdev->device != 0x0576) &&
- ((pdev->device & 0xff00) != 0x4300) &&
- ((pdev->device & 0xff00) != 0x4700) &&
- ((pdev->device < 43000) || (pdev->device > 43999))))
- return -ENODEV;
+ dev_info(&pdev->dev, "mfg %x core %x rev %d class %d irq %d\n",
+ pdev->id.manuf, pdev->id.id, pdev->id.rev, pdev->id.class,
+ pdev->bus->host_pci->irq);
- rc = pci_enable_device(pdev);
- if (rc) {
- pr_err("%s: Cannot enable device %d-%d_%d\n",
- __func__, pdev->bus->number, PCI_SLOT(pdev->devfn),
- PCI_FUNC(pdev->devfn));
+ if ((pdev->id.manuf != BCMA_MANUF_BCM) ||
+ (pdev->id.id != BCMA_CORE_80211))
return -ENODEV;
- }
- pci_set_master(pdev);
-
- pci_read_config_dword(pdev, 0x40, &val);
- if ((val & 0x0000ff00) != 0)
- pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
hw = ieee80211_alloc_hw(sizeof(struct brcms_info), &brcms_ops);
if (!hw) {
@@ -1157,14 +1115,11 @@ brcms_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
SET_IEEE80211_DEV(hw, &pdev->dev);
- pci_set_drvdata(pdev, hw);
+ bcma_set_drvdata(pdev, hw);
memset(hw->priv, 0, sizeof(*wl));
- wl = brcms_attach(pdev->vendor, pdev->device,
- pci_resource_start(pdev, 0), pdev,
- pdev->irq);
-
+ wl = brcms_attach(pdev);
if (!wl) {
pr_err("%s: %s: brcms_attach failed!\n", KBUILD_MODNAME,
__func__);
@@ -1173,16 +1128,23 @@ brcms_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
}
-static int brcms_suspend(struct pci_dev *pdev, pm_message_t state)
+static int brcms_pci_suspend(struct pci_dev *pdev)
+{
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+ return pci_set_power_state(pdev, PCI_D3hot);
+}
+
+static int brcms_suspend(struct bcma_device *pdev, pm_message_t state)
{
struct brcms_info *wl;
struct ieee80211_hw *hw;
- hw = pci_get_drvdata(pdev);
+ hw = bcma_get_drvdata(pdev);
wl = hw->priv;
if (!wl) {
wiphy_err(wl->wiphy,
- "brcms_suspend: pci_get_drvdata failed\n");
+ "brcms_suspend: bcma_get_drvdata failed\n");
return -ENODEV;
}
@@ -1191,25 +1153,14 @@ static int brcms_suspend(struct pci_dev *pdev, pm_message_t state)
wl->pub->hw_up = false;
spin_unlock_bh(&wl->lock);
- pci_save_state(pdev);
- pci_disable_device(pdev);
- return pci_set_power_state(pdev, PCI_D3hot);
+ /* temporarily do suspend ourselves */
+ return brcms_pci_suspend(pdev->bus->host_pci);
}
-static int brcms_resume(struct pci_dev *pdev)
+static int brcms_pci_resume(struct pci_dev *pdev)
{
- struct brcms_info *wl;
- struct ieee80211_hw *hw;
int err = 0;
- u32 val;
-
- hw = pci_get_drvdata(pdev);
- wl = hw->priv;
- if (!wl) {
- wiphy_err(wl->wiphy,
- "wl: brcms_resume: pci_get_drvdata failed\n");
- return -ENODEV;
- }
+ uint val;
err = pci_set_power_state(pdev, PCI_D0);
if (err)
@@ -1227,24 +1178,28 @@ static int brcms_resume(struct pci_dev *pdev)
if ((val & 0x0000ff00) != 0)
pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
+ return 0;
+}
+
+static int brcms_resume(struct bcma_device *pdev)
+{
/*
- * done. driver will be put in up state
- * in brcms_ops_add_interface() call.
+ * just do pci resume for now until bcma supports it.
*/
- return err;
+ return brcms_pci_resume(pdev->bus->host_pci);
}
-static struct pci_driver brcms_pci_driver = {
+static struct bcma_driver brcms_bcma_driver = {
.name = KBUILD_MODNAME,
- .probe = brcms_pci_probe,
+ .probe = brcms_bcma_probe,
.suspend = brcms_suspend,
.resume = brcms_resume,
.remove = __devexit_p(brcms_remove),
- .id_table = brcms_pci_id_table,
+ .id_table = brcms_coreid_table,
};
/**
- * This is the main entry point for the WL driver.
+ * This is the main entry point for the brcmsmac driver.
*
* This function determines if a device pointed to by pdev is a WL device,
* and if so, performs a brcms_attach() on it.
@@ -1259,26 +1214,24 @@ static int __init brcms_module_init(void)
brcm_msg_level = msglevel;
#endif /* BCMDBG */
- error = pci_register_driver(&brcms_pci_driver);
+ error = bcma_driver_register(&brcms_bcma_driver);
+ printk(KERN_ERR "%s: register returned %d\n", __func__, error);
if (!error)
return 0;
-
-
return error;
}
/**
- * This function unloads the WL driver from the system.
+ * This function unloads the brcmsmac driver from the system.
*
- * This function unconditionally unloads the WL driver module from the
+ * This function unconditionally unloads the brcmsmac driver module from the
* system.
*
*/
static void __exit brcms_module_exit(void)
{
- pci_unregister_driver(&brcms_pci_driver);
-
+ bcma_driver_unregister(&brcms_bcma_driver);
}
module_init(brcms_module_init);
@@ -1549,11 +1502,10 @@ int brcms_ucode_init_buf(struct brcms_info *wl, void **pbuf, u32 idx)
if (le32_to_cpu(hdr->idx) == idx) {
pdata = wl->fw.fw_bin[i]->data +
le32_to_cpu(hdr->offset);
- *pbuf = kmalloc(len, GFP_ATOMIC);
+ *pbuf = kmemdup(pdata, len, GFP_ATOMIC);
if (*pbuf == NULL)
goto fail;
- memcpy(*pbuf, pdata, len);
return 0;
}
}
@@ -1566,7 +1518,7 @@ fail:
}
/*
- * Precondition: Since this function is called in brcms_pci_probe() context,
+ * Precondition: Since this function is called in brcms_bcma_probe() context,
* no locking is required.
*/
int brcms_ucode_init_uint(struct brcms_info *wl, size_t *n_bytes, u32 idx)
@@ -1606,7 +1558,7 @@ void brcms_ucode_free_buf(void *p)
/*
* checks validity of all firmware images loaded from user space
*
- * Precondition: Since this function is called in brcms_pci_probe() context,
+ * Precondition: Since this function is called in brcms_bcma_probe() context,
* no locking is required.
*/
int brcms_check_firmwares(struct brcms_info *wl)
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h
index 6242f188b71..8f60419c37b 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h
@@ -68,8 +68,6 @@ struct brcms_info {
spinlock_t lock; /* per-device perimeter lock */
spinlock_t isr_lock; /* per-device ISR synchronization lock */
- /* regsva for unmap in brcms_free() */
- void __iomem *regsva; /* opaque chip registers virtual address */
/* timer related fields */
atomic_t callbacks; /* # outstanding callback functions */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c
index 36e3e063830..f7ed34034f8 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c
@@ -388,10 +388,13 @@ static u16 get_sifs(struct brcms_band *band)
*/
static bool brcms_deviceremoved(struct brcms_c_info *wlc)
{
+ u32 macctrl;
+
if (!wlc->hw->clk)
return ai_deviceremoved(wlc->hw->sih);
- return (R_REG(&wlc->hw->regs->maccontrol) &
- (MCTL_PSM_JMP_0 | MCTL_IHR_EN)) != MCTL_IHR_EN;
+ macctrl = bcma_read32(wlc->hw->d11core,
+ D11REGOFFS(maccontrol));
+ return (macctrl & (MCTL_PSM_JMP_0 | MCTL_IHR_EN)) != MCTL_IHR_EN;
}
/* sum the individual fifo tx pending packet counts */
@@ -582,17 +585,15 @@ brcms_c_attach_malloc(uint unit, uint *err, uint devid)
static void brcms_b_update_slot_timing(struct brcms_hardware *wlc_hw,
bool shortslot)
{
- struct d11regs __iomem *regs;
-
- regs = wlc_hw->regs;
+ struct bcma_device *core = wlc_hw->d11core;
if (shortslot) {
/* 11g short slot: 11a timing */
- W_REG(&regs->ifs_slot, 0x0207); /* APHY_SLOT_TIME */
+ bcma_write16(core, D11REGOFFS(ifs_slot), 0x0207);
brcms_b_write_shm(wlc_hw, M_DOT11_SLOT, APHY_SLOT_TIME);
} else {
/* 11g long slot: 11b timing */
- W_REG(&regs->ifs_slot, 0x0212); /* BPHY_SLOT_TIME */
+ bcma_write16(core, D11REGOFFS(ifs_slot), 0x0212);
brcms_b_write_shm(wlc_hw, M_DOT11_SLOT, BPHY_SLOT_TIME);
}
}
@@ -672,24 +673,22 @@ static uint brcms_c_calc_frame_time(struct brcms_c_info *wlc, u32 ratespec,
static void brcms_c_write_inits(struct brcms_hardware *wlc_hw,
const struct d11init *inits)
{
+ struct bcma_device *core = wlc_hw->d11core;
int i;
- u8 __iomem *base;
- u8 __iomem *addr;
+ uint offset;
u16 size;
u32 value;
BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
- base = (u8 __iomem *)wlc_hw->regs;
-
for (i = 0; inits[i].addr != cpu_to_le16(0xffff); i++) {
size = le16_to_cpu(inits[i].size);
- addr = base + le16_to_cpu(inits[i].addr);
+ offset = le16_to_cpu(inits[i].addr);
value = le32_to_cpu(inits[i].value);
if (size == 2)
- W_REG((u16 __iomem *)addr, value);
+ bcma_write16(core, offset, value);
else if (size == 4)
- W_REG((u32 __iomem *)addr, value);
+ bcma_write32(core, offset, value);
else
break;
}
@@ -739,6 +738,14 @@ static void brcms_c_ucode_bsinit(struct brcms_hardware *wlc_hw)
}
}
+static void brcms_b_core_ioctl(struct brcms_hardware *wlc_hw, u32 m, u32 v)
+{
+ struct bcma_device *core = wlc_hw->d11core;
+ u32 ioctl = bcma_aread32(core, BCMA_IOCTL) & ~m;
+
+ bcma_awrite32(core, BCMA_IOCTL, ioctl | v);
+}
+
static void brcms_b_core_phy_clk(struct brcms_hardware *wlc_hw, bool clk)
{
BCMMSG(wlc_hw->wlc->wiphy, "wl%d: clk %d\n", wlc_hw->unit, clk);
@@ -747,17 +754,17 @@ static void brcms_b_core_phy_clk(struct brcms_hardware *wlc_hw, bool clk)
if (OFF == clk) { /* clear gmode bit, put phy into reset */
- ai_core_cflags(wlc_hw->sih, (SICF_PRST | SICF_FGC | SICF_GMODE),
- (SICF_PRST | SICF_FGC));
+ brcms_b_core_ioctl(wlc_hw, (SICF_PRST | SICF_FGC | SICF_GMODE),
+ (SICF_PRST | SICF_FGC));
udelay(1);
- ai_core_cflags(wlc_hw->sih, (SICF_PRST | SICF_FGC), SICF_PRST);
+ brcms_b_core_ioctl(wlc_hw, (SICF_PRST | SICF_FGC), SICF_PRST);
udelay(1);
} else { /* take phy out of reset */
- ai_core_cflags(wlc_hw->sih, (SICF_PRST | SICF_FGC), SICF_FGC);
+ brcms_b_core_ioctl(wlc_hw, (SICF_PRST | SICF_FGC), SICF_FGC);
udelay(1);
- ai_core_cflags(wlc_hw->sih, (SICF_FGC), 0);
+ brcms_b_core_ioctl(wlc_hw, SICF_FGC, 0);
udelay(1);
}
@@ -778,9 +785,14 @@ static void brcms_c_setxband(struct brcms_hardware *wlc_hw, uint bandunit)
wlc_hw->wlc->band = wlc_hw->wlc->bandstate[bandunit];
/* set gmode core flag */
- if (wlc_hw->sbclk && !wlc_hw->noreset)
- ai_core_cflags(wlc_hw->sih, SICF_GMODE,
- ((bandunit == 0) ? SICF_GMODE : 0));
+ if (wlc_hw->sbclk && !wlc_hw->noreset) {
+ u32 gmode = 0;
+
+ if (bandunit == 0)
+ gmode = SICF_GMODE;
+
+ brcms_b_core_ioctl(wlc_hw, SICF_GMODE, gmode);
+ }
}
/* switch to new band but leave it inactive */
@@ -788,10 +800,12 @@ static u32 brcms_c_setband_inact(struct brcms_c_info *wlc, uint bandunit)
{
struct brcms_hardware *wlc_hw = wlc->hw;
u32 macintmask;
+ u32 macctrl;
BCMMSG(wlc->wiphy, "wl%d\n", wlc_hw->unit);
-
- WARN_ON((R_REG(&wlc_hw->regs->maccontrol) & MCTL_EN_MAC) != 0);
+ macctrl = bcma_read32(wlc_hw->d11core,
+ D11REGOFFS(maccontrol));
+ WARN_ON((macctrl & MCTL_EN_MAC) != 0);
/* disable interrupts */
macintmask = brcms_intrsoff(wlc->wl);
@@ -955,8 +969,6 @@ brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs)
brcms_c_txfifo_complete(wlc, queue, 1);
if (lastframe) {
- p->next = NULL;
- p->prev = NULL;
/* remove PLCP & Broadcom tx descriptor header */
skb_pull(p, D11_PHY_HDR_LEN);
skb_pull(p, D11_TXH_LEN);
@@ -984,7 +996,7 @@ brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal)
{
bool morepending = false;
struct brcms_c_info *wlc = wlc_hw->wlc;
- struct d11regs __iomem *regs;
+ struct bcma_device *core;
struct tx_status txstatus, *txs;
u32 s1, s2;
uint n = 0;
@@ -997,18 +1009,18 @@ brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal)
BCMMSG(wlc->wiphy, "wl%d\n", wlc_hw->unit);
txs = &txstatus;
- regs = wlc_hw->regs;
+ core = wlc_hw->d11core;
*fatal = false;
+ s1 = bcma_read32(core, D11REGOFFS(frmtxstatus));
while (!(*fatal)
- && (s1 = R_REG(&regs->frmtxstatus)) & TXS_V) {
+ && (s1 & TXS_V)) {
if (s1 == 0xffffffff) {
wiphy_err(wlc->wiphy, "wl%d: %s: dead chip\n",
wlc_hw->unit, __func__);
return morepending;
}
-
- s2 = R_REG(&regs->frmtxstatus2);
+ s2 = bcma_read32(core, D11REGOFFS(frmtxstatus2));
txs->status = s1 & TXS_STATUS_MASK;
txs->frameid = (s1 & TXS_FID_MASK) >> TXS_FID_SHIFT;
@@ -1021,6 +1033,7 @@ brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal)
/* !give others some time to run! */
if (++n >= max_tx_num)
break;
+ s1 = bcma_read32(core, D11REGOFFS(frmtxstatus));
}
if (*fatal)
@@ -1065,12 +1078,12 @@ brcms_c_mhfdef(struct brcms_c_info *wlc, u16 *mhfs, u16 mhf2_init)
}
}
-static struct dma64regs __iomem *
-dmareg(struct brcms_hardware *hw, uint direction, uint fifonum)
+static uint
+dmareg(uint direction, uint fifonum)
{
if (direction == DMA_TX)
- return &(hw->regs->fifo64regs[fifonum].dmaxmt);
- return &(hw->regs->fifo64regs[fifonum].dmarcv);
+ return offsetof(struct d11regs, fifo64regs[fifonum].dmaxmt);
+ return offsetof(struct d11regs, fifo64regs[fifonum].dmarcv);
}
static bool brcms_b_attach_dmapio(struct brcms_c_info *wlc, uint j, bool wme)
@@ -1096,9 +1109,9 @@ static bool brcms_b_attach_dmapio(struct brcms_c_info *wlc, uint j, bool wme)
* TX: TX_AC_BK_FIFO (TX AC Background data packets)
* RX: RX_FIFO (RX data packets)
*/
- wlc_hw->di[0] = dma_attach(name, wlc_hw->sih,
- (wme ? dmareg(wlc_hw, DMA_TX, 0) :
- NULL), dmareg(wlc_hw, DMA_RX, 0),
+ wlc_hw->di[0] = dma_attach(name, wlc_hw->sih, wlc_hw->d11core,
+ (wme ? dmareg(DMA_TX, 0) : 0),
+ dmareg(DMA_RX, 0),
(wme ? NTXD : 0), NRXD,
RXBUFSZ, -1, NRXBUFPOST,
BRCMS_HWRXOFF, &brcm_msg_level);
@@ -1110,8 +1123,8 @@ static bool brcms_b_attach_dmapio(struct brcms_c_info *wlc, uint j, bool wme)
* (legacy) TX_DATA_FIFO (TX data packets)
* RX: UNUSED
*/
- wlc_hw->di[1] = dma_attach(name, wlc_hw->sih,
- dmareg(wlc_hw, DMA_TX, 1), NULL,
+ wlc_hw->di[1] = dma_attach(name, wlc_hw->sih, wlc_hw->d11core,
+ dmareg(DMA_TX, 1), 0,
NTXD, 0, 0, -1, 0, 0,
&brcm_msg_level);
dma_attach_err |= (NULL == wlc_hw->di[1]);
@@ -1121,8 +1134,8 @@ static bool brcms_b_attach_dmapio(struct brcms_c_info *wlc, uint j, bool wme)
* TX: TX_AC_VI_FIFO (TX AC Video data packets)
* RX: UNUSED
*/
- wlc_hw->di[2] = dma_attach(name, wlc_hw->sih,
- dmareg(wlc_hw, DMA_TX, 2), NULL,
+ wlc_hw->di[2] = dma_attach(name, wlc_hw->sih, wlc_hw->d11core,
+ dmareg(DMA_TX, 2), 0,
NTXD, 0, 0, -1, 0, 0,
&brcm_msg_level);
dma_attach_err |= (NULL == wlc_hw->di[2]);
@@ -1131,9 +1144,9 @@ static bool brcms_b_attach_dmapio(struct brcms_c_info *wlc, uint j, bool wme)
* TX: TX_AC_VO_FIFO (TX AC Voice data packets)
* (legacy) TX_CTL_FIFO (TX control & mgmt packets)
*/
- wlc_hw->di[3] = dma_attach(name, wlc_hw->sih,
- dmareg(wlc_hw, DMA_TX, 3),
- NULL, NTXD, 0, 0, -1,
+ wlc_hw->di[3] = dma_attach(name, wlc_hw->sih, wlc_hw->d11core,
+ dmareg(DMA_TX, 3),
+ 0, NTXD, 0, 0, -1,
0, 0, &brcm_msg_level);
dma_attach_err |= (NULL == wlc_hw->di[3]);
/* Cleaner to leave this as if with AP defined */
@@ -1207,7 +1220,7 @@ static void brcms_b_wait_for_wake(struct brcms_hardware *wlc_hw)
/* control chip clock to save power, enable dynamic clock or force fast clock */
static void brcms_b_clkctl_clk(struct brcms_hardware *wlc_hw, uint mode)
{
- if (wlc_hw->sih->cccaps & CC_CAP_PMU) {
+ if (ai_get_cccaps(wlc_hw->sih) & CC_CAP_PMU) {
/* new chips with PMU, CCS_FORCEHT will distribute the HT clock
* on backplane, but mac core will still run on ALP(not HT) when
* it enters powersave mode, which means the FCA bit may not be
@@ -1216,29 +1229,33 @@ static void brcms_b_clkctl_clk(struct brcms_hardware *wlc_hw, uint mode)
if (wlc_hw->clk) {
if (mode == CLK_FAST) {
- OR_REG(&wlc_hw->regs->clk_ctl_st,
- CCS_FORCEHT);
+ bcma_set32(wlc_hw->d11core,
+ D11REGOFFS(clk_ctl_st),
+ CCS_FORCEHT);
udelay(64);
- SPINWAIT(((R_REG
- (&wlc_hw->regs->
- clk_ctl_st) & CCS_HTAVAIL) == 0),
- PMU_MAX_TRANSITION_DLY);
- WARN_ON(!(R_REG
- (&wlc_hw->regs->
- clk_ctl_st) & CCS_HTAVAIL));
+ SPINWAIT(
+ ((bcma_read32(wlc_hw->d11core,
+ D11REGOFFS(clk_ctl_st)) &
+ CCS_HTAVAIL) == 0),
+ PMU_MAX_TRANSITION_DLY);
+ WARN_ON(!(bcma_read32(wlc_hw->d11core,
+ D11REGOFFS(clk_ctl_st)) &
+ CCS_HTAVAIL));
} else {
- if ((wlc_hw->sih->pmurev == 0) &&
- (R_REG
- (&wlc_hw->regs->
- clk_ctl_st) & (CCS_FORCEHT | CCS_HTAREQ)))
- SPINWAIT(((R_REG
- (&wlc_hw->regs->
- clk_ctl_st) & CCS_HTAVAIL)
- == 0),
- PMU_MAX_TRANSITION_DLY);
- AND_REG(&wlc_hw->regs->clk_ctl_st,
+ if ((ai_get_pmurev(wlc_hw->sih) == 0) &&
+ (bcma_read32(wlc_hw->d11core,
+ D11REGOFFS(clk_ctl_st)) &
+ (CCS_FORCEHT | CCS_HTAREQ)))
+ SPINWAIT(
+ ((bcma_read32(wlc_hw->d11core,
+ offsetof(struct d11regs,
+ clk_ctl_st)) &
+ CCS_HTAVAIL) == 0),
+ PMU_MAX_TRANSITION_DLY);
+ bcma_mask32(wlc_hw->d11core,
+ D11REGOFFS(clk_ctl_st),
~CCS_FORCEHT);
}
}
@@ -1253,7 +1270,7 @@ static void brcms_b_clkctl_clk(struct brcms_hardware *wlc_hw, uint mode)
/* check fast clock is available (if core is not in reset) */
if (wlc_hw->forcefastclk && wlc_hw->clk)
- WARN_ON(!(ai_core_sflags(wlc_hw->sih, 0, 0) &
+ WARN_ON(!(bcma_aread32(wlc_hw->d11core, BCMA_IOST) &
SISF_FCLKA));
/*
@@ -1370,7 +1387,8 @@ static void brcms_c_mctrl_write(struct brcms_hardware *wlc_hw)
maccontrol |= MCTL_INFRA;
}
- W_REG(&wlc_hw->regs->maccontrol, maccontrol);
+ bcma_write32(wlc_hw->d11core, D11REGOFFS(maccontrol),
+ maccontrol);
}
/* set or clear maccontrol bits */
@@ -1464,7 +1482,7 @@ static void
brcms_b_set_addrmatch(struct brcms_hardware *wlc_hw, int match_reg_offset,
const u8 *addr)
{
- struct d11regs __iomem *regs;
+ struct bcma_device *core = wlc_hw->d11core;
u16 mac_l;
u16 mac_m;
u16 mac_h;
@@ -1472,38 +1490,36 @@ brcms_b_set_addrmatch(struct brcms_hardware *wlc_hw, int match_reg_offset,
BCMMSG(wlc_hw->wlc->wiphy, "wl%d: brcms_b_set_addrmatch\n",
wlc_hw->unit);
- regs = wlc_hw->regs;
mac_l = addr[0] | (addr[1] << 8);
mac_m = addr[2] | (addr[3] << 8);
mac_h = addr[4] | (addr[5] << 8);
/* enter the MAC addr into the RXE match registers */
- W_REG(&regs->rcm_ctl, RCM_INC_DATA | match_reg_offset);
- W_REG(&regs->rcm_mat_data, mac_l);
- W_REG(&regs->rcm_mat_data, mac_m);
- W_REG(&regs->rcm_mat_data, mac_h);
-
+ bcma_write16(core, D11REGOFFS(rcm_ctl),
+ RCM_INC_DATA | match_reg_offset);
+ bcma_write16(core, D11REGOFFS(rcm_mat_data), mac_l);
+ bcma_write16(core, D11REGOFFS(rcm_mat_data), mac_m);
+ bcma_write16(core, D11REGOFFS(rcm_mat_data), mac_h);
}
void
brcms_b_write_template_ram(struct brcms_hardware *wlc_hw, int offset, int len,
void *buf)
{
- struct d11regs __iomem *regs;
+ struct bcma_device *core = wlc_hw->d11core;
u32 word;
__le32 word_le;
__be32 word_be;
bool be_bit;
BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
- regs = wlc_hw->regs;
- W_REG(&regs->tplatewrptr, offset);
+ bcma_write32(core, D11REGOFFS(tplatewrptr), offset);
/* if MCTL_BIGEND bit set in mac control register,
* the chip swaps data in fifo, as well as data in
* template ram
*/
- be_bit = (R_REG(&regs->maccontrol) & MCTL_BIGEND) != 0;
+ be_bit = (bcma_read32(core, D11REGOFFS(maccontrol)) & MCTL_BIGEND) != 0;
while (len > 0) {
memcpy(&word, buf, sizeof(u32));
@@ -1516,7 +1532,7 @@ brcms_b_write_template_ram(struct brcms_hardware *wlc_hw, int offset, int len,
word = *(u32 *)&word_le;
}
- W_REG(&regs->tplatewrdata, word);
+ bcma_write32(core, D11REGOFFS(tplatewrdata), word);
buf = (u8 *) buf + sizeof(u32);
len -= sizeof(u32);
@@ -1527,18 +1543,20 @@ static void brcms_b_set_cwmin(struct brcms_hardware *wlc_hw, u16 newmin)
{
wlc_hw->band->CWmin = newmin;
- W_REG(&wlc_hw->regs->objaddr, OBJADDR_SCR_SEL | S_DOT11_CWMIN);
- (void)R_REG(&wlc_hw->regs->objaddr);
- W_REG(&wlc_hw->regs->objdata, newmin);
+ bcma_write32(wlc_hw->d11core, D11REGOFFS(objaddr),
+ OBJADDR_SCR_SEL | S_DOT11_CWMIN);
+ (void)bcma_read32(wlc_hw->d11core, D11REGOFFS(objaddr));
+ bcma_write32(wlc_hw->d11core, D11REGOFFS(objdata), newmin);
}
static void brcms_b_set_cwmax(struct brcms_hardware *wlc_hw, u16 newmax)
{
wlc_hw->band->CWmax = newmax;
- W_REG(&wlc_hw->regs->objaddr, OBJADDR_SCR_SEL | S_DOT11_CWMAX);
- (void)R_REG(&wlc_hw->regs->objaddr);
- W_REG(&wlc_hw->regs->objdata, newmax);
+ bcma_write32(wlc_hw->d11core, D11REGOFFS(objaddr),
+ OBJADDR_SCR_SEL | S_DOT11_CWMAX);
+ (void)bcma_read32(wlc_hw->d11core, D11REGOFFS(objaddr));
+ bcma_write32(wlc_hw->d11core, D11REGOFFS(objdata), newmax);
}
void brcms_b_bw_set(struct brcms_hardware *wlc_hw, u16 bw)
@@ -1704,17 +1722,17 @@ void brcms_b_core_phypll_reset(struct brcms_hardware *wlc_hw)
{
BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
- ai_corereg(wlc_hw->sih, SI_CC_IDX,
- offsetof(struct chipcregs, chipcontrol_addr), ~0, 0);
+ ai_cc_reg(wlc_hw->sih, offsetof(struct chipcregs, chipcontrol_addr),
+ ~0, 0);
udelay(1);
- ai_corereg(wlc_hw->sih, SI_CC_IDX,
- offsetof(struct chipcregs, chipcontrol_data), 0x4, 0);
+ ai_cc_reg(wlc_hw->sih, offsetof(struct chipcregs, chipcontrol_data),
+ 0x4, 0);
udelay(1);
- ai_corereg(wlc_hw->sih, SI_CC_IDX,
- offsetof(struct chipcregs, chipcontrol_data), 0x4, 4);
+ ai_cc_reg(wlc_hw->sih, offsetof(struct chipcregs, chipcontrol_data),
+ 0x4, 4);
udelay(1);
- ai_corereg(wlc_hw->sih, SI_CC_IDX,
- offsetof(struct chipcregs, chipcontrol_data), 0x4, 0);
+ ai_cc_reg(wlc_hw->sih, offsetof(struct chipcregs, chipcontrol_data),
+ 0x4, 0);
udelay(1);
}
@@ -1728,18 +1746,18 @@ void brcms_b_phyclk_fgc(struct brcms_hardware *wlc_hw, bool clk)
return;
if (ON == clk)
- ai_core_cflags(wlc_hw->sih, SICF_FGC, SICF_FGC);
+ brcms_b_core_ioctl(wlc_hw, SICF_FGC, SICF_FGC);
else
- ai_core_cflags(wlc_hw->sih, SICF_FGC, 0);
+ brcms_b_core_ioctl(wlc_hw, SICF_FGC, 0);
}
void brcms_b_macphyclk_set(struct brcms_hardware *wlc_hw, bool clk)
{
if (ON == clk)
- ai_core_cflags(wlc_hw->sih, SICF_MPCLKE, SICF_MPCLKE);
+ brcms_b_core_ioctl(wlc_hw, SICF_MPCLKE, SICF_MPCLKE);
else
- ai_core_cflags(wlc_hw->sih, SICF_MPCLKE, 0);
+ brcms_b_core_ioctl(wlc_hw, SICF_MPCLKE, 0);
}
void brcms_b_phy_reset(struct brcms_hardware *wlc_hw)
@@ -1759,7 +1777,7 @@ void brcms_b_phy_reset(struct brcms_hardware *wlc_hw)
if (BRCMS_ISNPHY(wlc_hw->band) && NREV_GE(wlc_hw->band->phyrev, 3) &&
NREV_LE(wlc_hw->band->phyrev, 4)) {
/* Set the PHY bandwidth */
- ai_core_cflags(wlc_hw->sih, SICF_BWMASK, phy_bw_clkbits);
+ brcms_b_core_ioctl(wlc_hw, SICF_BWMASK, phy_bw_clkbits);
udelay(1);
@@ -1767,13 +1785,13 @@ void brcms_b_phy_reset(struct brcms_hardware *wlc_hw)
brcms_b_core_phypll_reset(wlc_hw);
/* reset the PHY */
- ai_core_cflags(wlc_hw->sih, (SICF_PRST | SICF_PCLKE),
- (SICF_PRST | SICF_PCLKE));
+ brcms_b_core_ioctl(wlc_hw, (SICF_PRST | SICF_PCLKE),
+ (SICF_PRST | SICF_PCLKE));
phy_in_reset = true;
} else {
- ai_core_cflags(wlc_hw->sih,
- (SICF_PRST | SICF_PCLKE | SICF_BWMASK),
- (SICF_PRST | SICF_PCLKE | phy_bw_clkbits));
+ brcms_b_core_ioctl(wlc_hw,
+ (SICF_PRST | SICF_PCLKE | SICF_BWMASK),
+ (SICF_PRST | SICF_PCLKE | phy_bw_clkbits));
}
udelay(2);
@@ -1790,8 +1808,8 @@ static void brcms_b_setband(struct brcms_hardware *wlc_hw, uint bandunit,
u32 macintmask;
/* Enable the d11 core before accessing it */
- if (!ai_iscoreup(wlc_hw->sih)) {
- ai_core_reset(wlc_hw->sih, 0, 0);
+ if (!bcma_core_is_enabled(wlc_hw->d11core)) {
+ bcma_core_enable(wlc_hw->d11core, 0);
brcms_c_mctrl_reset(wlc_hw);
}
@@ -1817,7 +1835,8 @@ static void brcms_b_setband(struct brcms_hardware *wlc_hw, uint bandunit,
brcms_intrsrestore(wlc->wl, macintmask);
/* ucode should still be suspended.. */
- WARN_ON((R_REG(&wlc_hw->regs->maccontrol) & MCTL_EN_MAC) != 0);
+ WARN_ON((bcma_read32(wlc_hw->d11core, D11REGOFFS(maccontrol)) &
+ MCTL_EN_MAC) != 0);
}
static bool brcms_c_isgoodchip(struct brcms_hardware *wlc_hw)
@@ -1845,7 +1864,7 @@ static bool brcms_c_validboardtype(struct brcms_hardware *wlc_hw)
uint b2 = boardrev & 0xf;
/* voards from other vendors are always considered valid */
- if (wlc_hw->sih->boardvendor != PCI_VENDOR_ID_BROADCOM)
+ if (ai_get_boardvendor(wlc_hw->sih) != PCI_VENDOR_ID_BROADCOM)
return true;
/* do some boardrev sanity checks when boardvendor is Broadcom */
@@ -1917,7 +1936,7 @@ static void brcms_b_xtal(struct brcms_hardware *wlc_hw, bool want)
static bool brcms_b_radio_read_hwdisabled(struct brcms_hardware *wlc_hw)
{
bool v, clk, xtal;
- u32 resetbits = 0, flags = 0;
+ u32 flags = 0;
xtal = wlc_hw->sbclk;
if (!xtal)
@@ -1934,22 +1953,22 @@ static bool brcms_b_radio_read_hwdisabled(struct brcms_hardware *wlc_hw)
flags |= SICF_PCLKE;
/*
+ * TODO: test suspend/resume
+ *
* AI chip doesn't restore bar0win2 on
* hibernation/resume, need sw fixup
*/
- if ((wlc_hw->sih->chip == BCM43224_CHIP_ID) ||
- (wlc_hw->sih->chip == BCM43225_CHIP_ID))
- wlc_hw->regs = (struct d11regs __iomem *)
- ai_setcore(wlc_hw->sih, D11_CORE_ID, 0);
- ai_core_reset(wlc_hw->sih, flags, resetbits);
+
+ bcma_core_enable(wlc_hw->d11core, flags);
brcms_c_mctrl_reset(wlc_hw);
}
- v = ((R_REG(&wlc_hw->regs->phydebug) & PDBG_RFD) != 0);
+ v = ((bcma_read32(wlc_hw->d11core,
+ D11REGOFFS(phydebug)) & PDBG_RFD) != 0);
/* put core back into reset */
if (!clk)
- ai_core_disable(wlc_hw->sih, 0);
+ bcma_core_disable(wlc_hw->d11core, 0);
if (!xtal)
brcms_b_xtal(wlc_hw, OFF);
@@ -1973,25 +1992,21 @@ static bool wlc_dma_rxreset(struct brcms_hardware *wlc_hw, uint fifo)
*/
void brcms_b_corereset(struct brcms_hardware *wlc_hw, u32 flags)
{
- struct d11regs __iomem *regs;
uint i;
bool fastclk;
- u32 resetbits = 0;
if (flags == BRCMS_USE_COREFLAGS)
flags = (wlc_hw->band->pi ? wlc_hw->band->core_flags : 0);
BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
- regs = wlc_hw->regs;
-
/* request FAST clock if not on */
fastclk = wlc_hw->forcefastclk;
if (!fastclk)
brcms_b_clkctl_clk(wlc_hw, CLK_FAST);
/* reset the dma engines except first time thru */
- if (ai_iscoreup(wlc_hw->sih)) {
+ if (bcma_core_is_enabled(wlc_hw->d11core)) {
for (i = 0; i < NFIFO; i++)
if ((wlc_hw->di[i]) && (!dma_txreset(wlc_hw->di[i])))
wiphy_err(wlc_hw->wlc->wiphy, "wl%d: %s: "
@@ -2029,14 +2044,14 @@ void brcms_b_corereset(struct brcms_hardware *wlc_hw, u32 flags)
* they may touch chipcommon as well.
*/
wlc_hw->clk = false;
- ai_core_reset(wlc_hw->sih, flags, resetbits);
+ bcma_core_enable(wlc_hw->d11core, flags);
wlc_hw->clk = true;
if (wlc_hw->band && wlc_hw->band->pi)
wlc_phy_hw_clk_state_upd(wlc_hw->band->pi, true);
brcms_c_mctrl_reset(wlc_hw);
- if (wlc_hw->sih->cccaps & CC_CAP_PMU)
+ if (ai_get_cccaps(wlc_hw->sih) & CC_CAP_PMU)
brcms_b_clkctl_clk(wlc_hw, CLK_FAST);
brcms_b_phy_reset(wlc_hw);
@@ -2057,7 +2072,7 @@ void brcms_b_corereset(struct brcms_hardware *wlc_hw, u32 flags)
*/
static void brcms_b_corerev_fifofixup(struct brcms_hardware *wlc_hw)
{
- struct d11regs __iomem *regs = wlc_hw->regs;
+ struct bcma_device *core = wlc_hw->d11core;
u16 fifo_nu;
u16 txfifo_startblk = TXFIFO_START_BLK, txfifo_endblk;
u16 txfifo_def, txfifo_def1;
@@ -2078,11 +2093,11 @@ static void brcms_b_corerev_fifofixup(struct brcms_hardware *wlc_hw)
txfifo_cmd =
TXFIFOCMD_RESET_MASK | (fifo_nu << TXFIFOCMD_FIFOSEL_SHIFT);
- W_REG(&regs->xmtfifocmd, txfifo_cmd);
- W_REG(&regs->xmtfifodef, txfifo_def);
- W_REG(&regs->xmtfifodef1, txfifo_def1);
+ bcma_write16(core, D11REGOFFS(xmtfifocmd), txfifo_cmd);
+ bcma_write16(core, D11REGOFFS(xmtfifodef), txfifo_def);
+ bcma_write16(core, D11REGOFFS(xmtfifodef1), txfifo_def1);
- W_REG(&regs->xmtfifocmd, txfifo_cmd);
+ bcma_write16(core, D11REGOFFS(xmtfifocmd), txfifo_cmd);
txfifo_startblk += wlc_hw->xmtfifo_sz[fifo_nu];
}
@@ -2117,27 +2132,27 @@ static void brcms_b_corerev_fifofixup(struct brcms_hardware *wlc_hw)
void brcms_b_switch_macfreq(struct brcms_hardware *wlc_hw, u8 spurmode)
{
- struct d11regs __iomem *regs = wlc_hw->regs;
+ struct bcma_device *core = wlc_hw->d11core;
- if ((wlc_hw->sih->chip == BCM43224_CHIP_ID) ||
- (wlc_hw->sih->chip == BCM43225_CHIP_ID)) {
+ if ((ai_get_chip_id(wlc_hw->sih) == BCM43224_CHIP_ID) ||
+ (ai_get_chip_id(wlc_hw->sih) == BCM43225_CHIP_ID)) {
if (spurmode == WL_SPURAVOID_ON2) { /* 126Mhz */
- W_REG(&regs->tsf_clk_frac_l, 0x2082);
- W_REG(&regs->tsf_clk_frac_h, 0x8);
+ bcma_write16(core, D11REGOFFS(tsf_clk_frac_l), 0x2082);
+ bcma_write16(core, D11REGOFFS(tsf_clk_frac_h), 0x8);
} else if (spurmode == WL_SPURAVOID_ON1) { /* 123Mhz */
- W_REG(&regs->tsf_clk_frac_l, 0x5341);
- W_REG(&regs->tsf_clk_frac_h, 0x8);
+ bcma_write16(core, D11REGOFFS(tsf_clk_frac_l), 0x5341);
+ bcma_write16(core, D11REGOFFS(tsf_clk_frac_h), 0x8);
} else { /* 120Mhz */
- W_REG(&regs->tsf_clk_frac_l, 0x8889);
- W_REG(&regs->tsf_clk_frac_h, 0x8);
+ bcma_write16(core, D11REGOFFS(tsf_clk_frac_l), 0x8889);
+ bcma_write16(core, D11REGOFFS(tsf_clk_frac_h), 0x8);
}
} else if (BRCMS_ISLCNPHY(wlc_hw->band)) {
if (spurmode == WL_SPURAVOID_ON1) { /* 82Mhz */
- W_REG(&regs->tsf_clk_frac_l, 0x7CE0);
- W_REG(&regs->tsf_clk_frac_h, 0xC);
+ bcma_write16(core, D11REGOFFS(tsf_clk_frac_l), 0x7CE0);
+ bcma_write16(core, D11REGOFFS(tsf_clk_frac_h), 0xC);
} else { /* 80Mhz */
- W_REG(&regs->tsf_clk_frac_l, 0xCCCD);
- W_REG(&regs->tsf_clk_frac_h, 0xC);
+ bcma_write16(core, D11REGOFFS(tsf_clk_frac_l), 0xCCCD);
+ bcma_write16(core, D11REGOFFS(tsf_clk_frac_h), 0xC);
}
}
}
@@ -2146,11 +2161,8 @@ void brcms_b_switch_macfreq(struct brcms_hardware *wlc_hw, u8 spurmode)
static void brcms_c_gpio_init(struct brcms_c_info *wlc)
{
struct brcms_hardware *wlc_hw = wlc->hw;
- struct d11regs __iomem *regs;
u32 gc, gm;
- regs = wlc_hw->regs;
-
/* use GPIO select 0 to get all gpio signals from the gpio out reg */
brcms_b_mctrl(wlc_hw, MCTL_GPOUT_SEL_MASK, 0);
@@ -2181,10 +2193,10 @@ static void brcms_c_gpio_init(struct brcms_c_info *wlc)
* The board itself is powered by these GPIOs
* (when not sending pattern) so set them high
*/
- OR_REG(&regs->psm_gpio_oe,
- (BOARD_GPIO_12 | BOARD_GPIO_13));
- OR_REG(&regs->psm_gpio_out,
- (BOARD_GPIO_12 | BOARD_GPIO_13));
+ bcma_set16(wlc_hw->d11core, D11REGOFFS(psm_gpio_oe),
+ (BOARD_GPIO_12 | BOARD_GPIO_13));
+ bcma_set16(wlc_hw->d11core, D11REGOFFS(psm_gpio_out),
+ (BOARD_GPIO_12 | BOARD_GPIO_13));
/* Enable antenna diversity, use 2x4 mode */
brcms_b_mhf(wlc_hw, MHF3, MHF3_ANTSEL_EN,
@@ -2211,7 +2223,7 @@ static void brcms_c_gpio_init(struct brcms_c_info *wlc)
static void brcms_ucode_write(struct brcms_hardware *wlc_hw,
const __le32 ucode[], const size_t nbytes)
{
- struct d11regs __iomem *regs = wlc_hw->regs;
+ struct bcma_device *core = wlc_hw->d11core;
uint i;
uint count;
@@ -2219,10 +2231,11 @@ static void brcms_ucode_write(struct brcms_hardware *wlc_hw,
count = (nbytes / sizeof(u32));
- W_REG(&regs->objaddr, (OBJADDR_AUTO_INC | OBJADDR_UCM_SEL));
- (void)R_REG(&regs->objaddr);
+ bcma_write32(core, D11REGOFFS(objaddr),
+ OBJADDR_AUTO_INC | OBJADDR_UCM_SEL);
+ (void)bcma_read32(core, D11REGOFFS(objaddr));
for (i = 0; i < count; i++)
- W_REG(&regs->objdata, le32_to_cpu(ucode[i]));
+ bcma_write32(core, D11REGOFFS(objdata), le32_to_cpu(ucode[i]));
}
@@ -2288,7 +2301,7 @@ static void brcms_b_fifoerrors(struct brcms_hardware *wlc_hw)
bool fatal = false;
uint unit;
uint intstatus, idx;
- struct d11regs __iomem *regs = wlc_hw->regs;
+ struct bcma_device *core = wlc_hw->d11core;
struct wiphy *wiphy = wlc_hw->wlc->wiphy;
unit = wlc_hw->unit;
@@ -2296,7 +2309,9 @@ static void brcms_b_fifoerrors(struct brcms_hardware *wlc_hw)
for (idx = 0; idx < NFIFO; idx++) {
/* read intstatus register and ignore any non-error bits */
intstatus =
- R_REG(&regs->intctrlregs[idx].intstatus) & I_ERRORS;
+ bcma_read32(core,
+ D11REGOFFS(intctrlregs[idx].intstatus)) &
+ I_ERRORS;
if (!intstatus)
continue;
@@ -2341,8 +2356,9 @@ static void brcms_b_fifoerrors(struct brcms_hardware *wlc_hw)
brcms_fatal_error(wlc_hw->wlc->wl); /* big hammer */
break;
} else
- W_REG(&regs->intctrlregs[idx].intstatus,
- intstatus);
+ bcma_write32(core,
+ D11REGOFFS(intctrlregs[idx].intstatus),
+ intstatus);
}
}
@@ -2350,28 +2366,7 @@ void brcms_c_intrson(struct brcms_c_info *wlc)
{
struct brcms_hardware *wlc_hw = wlc->hw;
wlc->macintmask = wlc->defmacintmask;
- W_REG(&wlc_hw->regs->macintmask, wlc->macintmask);
-}
-
-/*
- * callback for siutils.c, which has only wlc handler, no wl they both check
- * up, not only because there is no need to off/restore d11 interrupt but also
- * because per-port code may require sync with valid interrupt.
- */
-static u32 brcms_c_wlintrsoff(struct brcms_c_info *wlc)
-{
- if (!wlc->hw->up)
- return 0;
-
- return brcms_intrsoff(wlc->wl);
-}
-
-static void brcms_c_wlintrsrestore(struct brcms_c_info *wlc, u32 macintmask)
-{
- if (!wlc->hw->up)
- return;
-
- brcms_intrsrestore(wlc->wl, macintmask);
+ bcma_write32(wlc_hw->d11core, D11REGOFFS(macintmask), wlc->macintmask);
}
u32 brcms_c_intrsoff(struct brcms_c_info *wlc)
@@ -2384,8 +2379,8 @@ u32 brcms_c_intrsoff(struct brcms_c_info *wlc)
macintmask = wlc->macintmask; /* isr can still happen */
- W_REG(&wlc_hw->regs->macintmask, 0);
- (void)R_REG(&wlc_hw->regs->macintmask); /* sync readback */
+ bcma_write32(wlc_hw->d11core, D11REGOFFS(macintmask), 0);
+ (void)bcma_read32(wlc_hw->d11core, D11REGOFFS(macintmask));
udelay(1); /* ensure int line is no longer driven */
wlc->macintmask = 0;
@@ -2400,7 +2395,7 @@ void brcms_c_intrsrestore(struct brcms_c_info *wlc, u32 macintmask)
return;
wlc->macintmask = macintmask;
- W_REG(&wlc_hw->regs->macintmask, wlc->macintmask);
+ bcma_write32(wlc_hw->d11core, D11REGOFFS(macintmask), wlc->macintmask);
}
/* assumes that the d11 MAC is enabled */
@@ -2512,11 +2507,11 @@ brcms_c_mute(struct brcms_c_info *wlc, bool mute_tx)
static inline u32 wlc_intstatus(struct brcms_c_info *wlc, bool in_isr)
{
struct brcms_hardware *wlc_hw = wlc->hw;
- struct d11regs __iomem *regs = wlc_hw->regs;
+ struct bcma_device *core = wlc_hw->d11core;
u32 macintstatus;
/* macintstatus includes a DMA interrupt summary bit */
- macintstatus = R_REG(&regs->macintstatus);
+ macintstatus = bcma_read32(core, D11REGOFFS(macintstatus));
BCMMSG(wlc->wiphy, "wl%d: macintstatus: 0x%x\n", wlc_hw->unit,
macintstatus);
@@ -2543,12 +2538,12 @@ static inline u32 wlc_intstatus(struct brcms_c_info *wlc, bool in_isr)
* consequences
*/
/* turn off the interrupts */
- W_REG(&regs->macintmask, 0);
- (void)R_REG(&regs->macintmask); /* sync readback */
+ bcma_write32(core, D11REGOFFS(macintmask), 0);
+ (void)bcma_read32(core, D11REGOFFS(macintmask));
wlc->macintmask = 0;
/* clear device interrupts */
- W_REG(&regs->macintstatus, macintstatus);
+ bcma_write32(core, D11REGOFFS(macintstatus), macintstatus);
/* MI_DMAINT is indication of non-zero intstatus */
if (macintstatus & MI_DMAINT)
@@ -2557,8 +2552,8 @@ static inline u32 wlc_intstatus(struct brcms_c_info *wlc, bool in_isr)
* RX_FIFO. If MI_DMAINT is set, assume it
* is set and clear the interrupt.
*/
- W_REG(&regs->intctrlregs[RX_FIFO].intstatus,
- DEF_RXINTMASK);
+ bcma_write32(core, D11REGOFFS(intctrlregs[RX_FIFO].intstatus),
+ DEF_RXINTMASK);
return macintstatus;
}
@@ -2621,7 +2616,7 @@ bool brcms_c_isr(struct brcms_c_info *wlc, bool *wantdpc)
void brcms_c_suspend_mac_and_wait(struct brcms_c_info *wlc)
{
struct brcms_hardware *wlc_hw = wlc->hw;
- struct d11regs __iomem *regs = wlc_hw->regs;
+ struct bcma_device *core = wlc_hw->d11core;
u32 mc, mi;
struct wiphy *wiphy = wlc->wiphy;
@@ -2638,7 +2633,7 @@ void brcms_c_suspend_mac_and_wait(struct brcms_c_info *wlc)
/* force the core awake */
brcms_c_ucode_wake_override_set(wlc_hw, BRCMS_WAKE_OVERRIDE_MACSUSPEND);
- mc = R_REG(&regs->maccontrol);
+ mc = bcma_read32(core, D11REGOFFS(maccontrol));
if (mc == 0xffffffff) {
wiphy_err(wiphy, "wl%d: %s: dead chip\n", wlc_hw->unit,
@@ -2650,7 +2645,7 @@ void brcms_c_suspend_mac_and_wait(struct brcms_c_info *wlc)
WARN_ON(!(mc & MCTL_PSM_RUN));
WARN_ON(!(mc & MCTL_EN_MAC));
- mi = R_REG(&regs->macintstatus);
+ mi = bcma_read32(core, D11REGOFFS(macintstatus));
if (mi == 0xffffffff) {
wiphy_err(wiphy, "wl%d: %s: dead chip\n", wlc_hw->unit,
__func__);
@@ -2661,21 +2656,21 @@ void brcms_c_suspend_mac_and_wait(struct brcms_c_info *wlc)
brcms_b_mctrl(wlc_hw, MCTL_EN_MAC, 0);
- SPINWAIT(!(R_REG(&regs->macintstatus) & MI_MACSSPNDD),
+ SPINWAIT(!(bcma_read32(core, D11REGOFFS(macintstatus)) & MI_MACSSPNDD),
BRCMS_MAX_MAC_SUSPEND);
- if (!(R_REG(&regs->macintstatus) & MI_MACSSPNDD)) {
+ if (!(bcma_read32(core, D11REGOFFS(macintstatus)) & MI_MACSSPNDD)) {
wiphy_err(wiphy, "wl%d: wlc_suspend_mac_and_wait: waited %d uS"
" and MI_MACSSPNDD is still not on.\n",
wlc_hw->unit, BRCMS_MAX_MAC_SUSPEND);
wiphy_err(wiphy, "wl%d: psmdebug 0x%08x, phydebug 0x%08x, "
"psm_brc 0x%04x\n", wlc_hw->unit,
- R_REG(&regs->psmdebug),
- R_REG(&regs->phydebug),
- R_REG(&regs->psm_brc));
+ bcma_read32(core, D11REGOFFS(psmdebug)),
+ bcma_read32(core, D11REGOFFS(phydebug)),
+ bcma_read16(core, D11REGOFFS(psm_brc)));
}
- mc = R_REG(&regs->maccontrol);
+ mc = bcma_read32(core, D11REGOFFS(maccontrol));
if (mc == 0xffffffff) {
wiphy_err(wiphy, "wl%d: %s: dead chip\n", wlc_hw->unit,
__func__);
@@ -2690,7 +2685,7 @@ void brcms_c_suspend_mac_and_wait(struct brcms_c_info *wlc)
void brcms_c_enable_mac(struct brcms_c_info *wlc)
{
struct brcms_hardware *wlc_hw = wlc->hw;
- struct d11regs __iomem *regs = wlc_hw->regs;
+ struct bcma_device *core = wlc_hw->d11core;
u32 mc, mi;
BCMMSG(wlc->wiphy, "wl%d: bandunit %d\n", wlc_hw->unit,
@@ -2703,20 +2698,20 @@ void brcms_c_enable_mac(struct brcms_c_info *wlc)
if (wlc_hw->mac_suspend_depth > 0)
return;
- mc = R_REG(&regs->maccontrol);
+ mc = bcma_read32(core, D11REGOFFS(maccontrol));
WARN_ON(mc & MCTL_PSM_JMP_0);
WARN_ON(mc & MCTL_EN_MAC);
WARN_ON(!(mc & MCTL_PSM_RUN));
brcms_b_mctrl(wlc_hw, MCTL_EN_MAC, MCTL_EN_MAC);
- W_REG(&regs->macintstatus, MI_MACSSPNDD);
+ bcma_write32(core, D11REGOFFS(macintstatus), MI_MACSSPNDD);
- mc = R_REG(&regs->maccontrol);
+ mc = bcma_read32(core, D11REGOFFS(maccontrol));
WARN_ON(mc & MCTL_PSM_JMP_0);
WARN_ON(!(mc & MCTL_EN_MAC));
WARN_ON(!(mc & MCTL_PSM_RUN));
- mi = R_REG(&regs->macintstatus);
+ mi = bcma_read32(core, D11REGOFFS(macintstatus));
WARN_ON(mi & MI_MACSSPNDD);
brcms_c_ucode_wake_override_clear(wlc_hw,
@@ -2733,55 +2728,53 @@ void brcms_b_band_stf_ss_set(struct brcms_hardware *wlc_hw, u8 stf_mode)
static bool brcms_b_validate_chip_access(struct brcms_hardware *wlc_hw)
{
- struct d11regs __iomem *regs;
+ struct bcma_device *core = wlc_hw->d11core;
u32 w, val;
struct wiphy *wiphy = wlc_hw->wlc->wiphy;
BCMMSG(wiphy, "wl%d\n", wlc_hw->unit);
- regs = wlc_hw->regs;
-
/* Validate dchip register access */
- W_REG(&regs->objaddr, OBJADDR_SHM_SEL | 0);
- (void)R_REG(&regs->objaddr);
- w = R_REG(&regs->objdata);
+ bcma_write32(core, D11REGOFFS(objaddr), OBJADDR_SHM_SEL | 0);
+ (void)bcma_read32(core, D11REGOFFS(objaddr));
+ w = bcma_read32(core, D11REGOFFS(objdata));
/* Can we write and read back a 32bit register? */
- W_REG(&regs->objaddr, OBJADDR_SHM_SEL | 0);
- (void)R_REG(&regs->objaddr);
- W_REG(&regs->objdata, (u32) 0xaa5555aa);
+ bcma_write32(core, D11REGOFFS(objaddr), OBJADDR_SHM_SEL | 0);
+ (void)bcma_read32(core, D11REGOFFS(objaddr));
+ bcma_write32(core, D11REGOFFS(objdata), (u32) 0xaa5555aa);
- W_REG(&regs->objaddr, OBJADDR_SHM_SEL | 0);
- (void)R_REG(&regs->objaddr);
- val = R_REG(&regs->objdata);
+ bcma_write32(core, D11REGOFFS(objaddr), OBJADDR_SHM_SEL | 0);
+ (void)bcma_read32(core, D11REGOFFS(objaddr));
+ val = bcma_read32(core, D11REGOFFS(objdata));
if (val != (u32) 0xaa5555aa) {
wiphy_err(wiphy, "wl%d: validate_chip_access: SHM = 0x%x, "
"expected 0xaa5555aa\n", wlc_hw->unit, val);
return false;
}
- W_REG(&regs->objaddr, OBJADDR_SHM_SEL | 0);
- (void)R_REG(&regs->objaddr);
- W_REG(&regs->objdata, (u32) 0x55aaaa55);
+ bcma_write32(core, D11REGOFFS(objaddr), OBJADDR_SHM_SEL | 0);
+ (void)bcma_read32(core, D11REGOFFS(objaddr));
+ bcma_write32(core, D11REGOFFS(objdata), (u32) 0x55aaaa55);
- W_REG(&regs->objaddr, OBJADDR_SHM_SEL | 0);
- (void)R_REG(&regs->objaddr);
- val = R_REG(&regs->objdata);
+ bcma_write32(core, D11REGOFFS(objaddr), OBJADDR_SHM_SEL | 0);
+ (void)bcma_read32(core, D11REGOFFS(objaddr));
+ val = bcma_read32(core, D11REGOFFS(objdata));
if (val != (u32) 0x55aaaa55) {
wiphy_err(wiphy, "wl%d: validate_chip_access: SHM = 0x%x, "
"expected 0x55aaaa55\n", wlc_hw->unit, val);
return false;
}
- W_REG(&regs->objaddr, OBJADDR_SHM_SEL | 0);
- (void)R_REG(&regs->objaddr);
- W_REG(&regs->objdata, w);
+ bcma_write32(core, D11REGOFFS(objaddr), OBJADDR_SHM_SEL | 0);
+ (void)bcma_read32(core, D11REGOFFS(objaddr));
+ bcma_write32(core, D11REGOFFS(objdata), w);
/* clear CFPStart */
- W_REG(&regs->tsf_cfpstart, 0);
+ bcma_write32(core, D11REGOFFS(tsf_cfpstart), 0);
- w = R_REG(&regs->maccontrol);
+ w = bcma_read32(core, D11REGOFFS(maccontrol));
if ((w != (MCTL_IHR_EN | MCTL_WAKE)) &&
(w != (MCTL_IHR_EN | MCTL_GMODE | MCTL_WAKE))) {
wiphy_err(wiphy, "wl%d: validate_chip_access: maccontrol = "
@@ -2798,38 +2791,38 @@ static bool brcms_b_validate_chip_access(struct brcms_hardware *wlc_hw)
void brcms_b_core_phypll_ctl(struct brcms_hardware *wlc_hw, bool on)
{
- struct d11regs __iomem *regs;
+ struct bcma_device *core = wlc_hw->d11core;
u32 tmp;
BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
tmp = 0;
- regs = wlc_hw->regs;
if (on) {
- if ((wlc_hw->sih->chip == BCM4313_CHIP_ID)) {
- OR_REG(&regs->clk_ctl_st,
- (CCS_ERSRC_REQ_HT | CCS_ERSRC_REQ_D11PLL |
- CCS_ERSRC_REQ_PHYPLL));
- SPINWAIT((R_REG(&regs->clk_ctl_st) &
- (CCS_ERSRC_AVAIL_HT)) != (CCS_ERSRC_AVAIL_HT),
+ if ((ai_get_chip_id(wlc_hw->sih) == BCM4313_CHIP_ID)) {
+ bcma_set32(core, D11REGOFFS(clk_ctl_st),
+ CCS_ERSRC_REQ_HT |
+ CCS_ERSRC_REQ_D11PLL |
+ CCS_ERSRC_REQ_PHYPLL);
+ SPINWAIT((bcma_read32(core, D11REGOFFS(clk_ctl_st)) &
+ CCS_ERSRC_AVAIL_HT) != CCS_ERSRC_AVAIL_HT,
PHYPLL_WAIT_US);
- tmp = R_REG(&regs->clk_ctl_st);
- if ((tmp & (CCS_ERSRC_AVAIL_HT)) !=
- (CCS_ERSRC_AVAIL_HT))
+ tmp = bcma_read32(core, D11REGOFFS(clk_ctl_st));
+ if ((tmp & CCS_ERSRC_AVAIL_HT) != CCS_ERSRC_AVAIL_HT)
wiphy_err(wlc_hw->wlc->wiphy, "%s: turn on PHY"
" PLL failed\n", __func__);
} else {
- OR_REG(&regs->clk_ctl_st,
- (CCS_ERSRC_REQ_D11PLL | CCS_ERSRC_REQ_PHYPLL));
- SPINWAIT((R_REG(&regs->clk_ctl_st) &
+ bcma_set32(core, D11REGOFFS(clk_ctl_st),
+ tmp | CCS_ERSRC_REQ_D11PLL |
+ CCS_ERSRC_REQ_PHYPLL);
+ SPINWAIT((bcma_read32(core, D11REGOFFS(clk_ctl_st)) &
(CCS_ERSRC_AVAIL_D11PLL |
CCS_ERSRC_AVAIL_PHYPLL)) !=
(CCS_ERSRC_AVAIL_D11PLL |
CCS_ERSRC_AVAIL_PHYPLL), PHYPLL_WAIT_US);
- tmp = R_REG(&regs->clk_ctl_st);
+ tmp = bcma_read32(core, D11REGOFFS(clk_ctl_st));
if ((tmp &
(CCS_ERSRC_AVAIL_D11PLL | CCS_ERSRC_AVAIL_PHYPLL))
!=
@@ -2843,8 +2836,9 @@ void brcms_b_core_phypll_ctl(struct brcms_hardware *wlc_hw, bool on)
* be requesting it; so we'll deassert the request but
* not wait for status to comply.
*/
- AND_REG(&regs->clk_ctl_st, ~CCS_ERSRC_REQ_PHYPLL);
- tmp = R_REG(&regs->clk_ctl_st);
+ bcma_mask32(core, D11REGOFFS(clk_ctl_st),
+ ~CCS_ERSRC_REQ_PHYPLL);
+ (void)bcma_read32(core, D11REGOFFS(clk_ctl_st));
}
}
@@ -2872,7 +2866,7 @@ static void brcms_c_coredisable(struct brcms_hardware *wlc_hw)
brcms_b_core_phypll_ctl(wlc_hw, false);
wlc_hw->clk = false;
- ai_core_disable(wlc_hw->sih, 0);
+ bcma_core_disable(wlc_hw->d11core, 0);
wlc_phy_hw_clk_state_upd(wlc_hw->band->pi, false);
}
@@ -2896,35 +2890,31 @@ static void brcms_c_flushqueues(struct brcms_c_info *wlc)
static u16
brcms_b_read_objmem(struct brcms_hardware *wlc_hw, uint offset, u32 sel)
{
- struct d11regs __iomem *regs = wlc_hw->regs;
- u16 __iomem *objdata_lo = (u16 __iomem *)&regs->objdata;
- u16 __iomem *objdata_hi = objdata_lo + 1;
- u16 v;
+ struct bcma_device *core = wlc_hw->d11core;
+ u16 objoff = D11REGOFFS(objdata);
- W_REG(&regs->objaddr, sel | (offset >> 2));
- (void)R_REG(&regs->objaddr);
+ bcma_write32(core, D11REGOFFS(objaddr), sel | (offset >> 2));
+ (void)bcma_read32(core, D11REGOFFS(objaddr));
if (offset & 2)
- v = R_REG(objdata_hi);
- else
- v = R_REG(objdata_lo);
+ objoff += 2;
- return v;
+ return bcma_read16(core, objoff);
+;
}
static void
brcms_b_write_objmem(struct brcms_hardware *wlc_hw, uint offset, u16 v,
u32 sel)
{
- struct d11regs __iomem *regs = wlc_hw->regs;
- u16 __iomem *objdata_lo = (u16 __iomem *)&regs->objdata;
- u16 __iomem *objdata_hi = objdata_lo + 1;
+ struct bcma_device *core = wlc_hw->d11core;
+ u16 objoff = D11REGOFFS(objdata);
- W_REG(&regs->objaddr, sel | (offset >> 2));
- (void)R_REG(&regs->objaddr);
+ bcma_write32(core, D11REGOFFS(objaddr), sel | (offset >> 2));
+ (void)bcma_read32(core, D11REGOFFS(objaddr));
if (offset & 2)
- W_REG(objdata_hi, v);
- else
- W_REG(objdata_lo, v);
+ objoff += 2;
+
+ bcma_write16(core, objoff, v);
}
/*
@@ -3010,14 +3000,14 @@ static void brcms_b_retrylimit_upd(struct brcms_hardware *wlc_hw,
/* write retry limit to SCR, shouldn't need to suspend */
if (wlc_hw->up) {
- W_REG(&wlc_hw->regs->objaddr,
- OBJADDR_SCR_SEL | S_DOT11_SRC_LMT);
- (void)R_REG(&wlc_hw->regs->objaddr);
- W_REG(&wlc_hw->regs->objdata, wlc_hw->SRL);
- W_REG(&wlc_hw->regs->objaddr,
- OBJADDR_SCR_SEL | S_DOT11_LRC_LMT);
- (void)R_REG(&wlc_hw->regs->objaddr);
- W_REG(&wlc_hw->regs->objdata, wlc_hw->LRL);
+ bcma_write32(wlc_hw->d11core, D11REGOFFS(objaddr),
+ OBJADDR_SCR_SEL | S_DOT11_SRC_LMT);
+ (void)bcma_read32(wlc_hw->d11core, D11REGOFFS(objaddr));
+ bcma_write32(wlc_hw->d11core, D11REGOFFS(objdata), wlc_hw->SRL);
+ bcma_write32(wlc_hw->d11core, D11REGOFFS(objaddr),
+ OBJADDR_SCR_SEL | S_DOT11_LRC_LMT);
+ (void)bcma_read32(wlc_hw->d11core, D11REGOFFS(objaddr));
+ bcma_write32(wlc_hw->d11core, D11REGOFFS(objdata), wlc_hw->LRL);
}
}
@@ -3064,7 +3054,7 @@ static bool brcms_c_ps_allowed(struct brcms_c_info *wlc)
return false;
/* disallow PS when one of these meets when not scanning */
- if (wlc->monitor)
+ if (wlc->filter_flags & FIF_PROMISC_IN_BSS)
return false;
if (cfg->associated) {
@@ -3199,9 +3189,9 @@ void brcms_c_init_scb(struct scb *scb)
static void brcms_b_coreinit(struct brcms_c_info *wlc)
{
struct brcms_hardware *wlc_hw = wlc->hw;
- struct d11regs __iomem *regs;
+ struct bcma_device *core = wlc_hw->d11core;
u32 sflags;
- uint bcnint_us;
+ u32 bcnint_us;
uint i = 0;
bool fifosz_fixup = false;
int err = 0;
@@ -3209,8 +3199,6 @@ static void brcms_b_coreinit(struct brcms_c_info *wlc)
struct wiphy *wiphy = wlc->wiphy;
struct brcms_ucode *ucode = &wlc_hw->wlc->wl->ucode;
- regs = wlc_hw->regs;
-
BCMMSG(wlc->wiphy, "wl%d\n", wlc_hw->unit);
/* reset PSM */
@@ -3223,20 +3211,20 @@ static void brcms_b_coreinit(struct brcms_c_info *wlc)
fifosz_fixup = true;
/* let the PSM run to the suspended state, set mode to BSS STA */
- W_REG(&regs->macintstatus, -1);
+ bcma_write32(core, D11REGOFFS(macintstatus), -1);
brcms_b_mctrl(wlc_hw, ~0,
(MCTL_IHR_EN | MCTL_INFRA | MCTL_PSM_RUN | MCTL_WAKE));
/* wait for ucode to self-suspend after auto-init */
- SPINWAIT(((R_REG(&regs->macintstatus) & MI_MACSSPNDD) == 0),
- 1000 * 1000);
- if ((R_REG(&regs->macintstatus) & MI_MACSSPNDD) == 0)
+ SPINWAIT(((bcma_read32(core, D11REGOFFS(macintstatus)) &
+ MI_MACSSPNDD) == 0), 1000 * 1000);
+ if ((bcma_read32(core, D11REGOFFS(macintstatus)) & MI_MACSSPNDD) == 0)
wiphy_err(wiphy, "wl%d: wlc_coreinit: ucode did not self-"
"suspend!\n", wlc_hw->unit);
brcms_c_gpio_init(wlc);
- sflags = ai_core_sflags(wlc_hw->sih, 0, 0);
+ sflags = bcma_aread32(core, BCMA_IOST);
if (D11REV_IS(wlc_hw->corerev, 23)) {
if (BRCMS_ISNPHY(wlc_hw->band))
@@ -3300,7 +3288,7 @@ static void brcms_b_coreinit(struct brcms_c_info *wlc)
wlc_hw->xmtfifo_sz[i], i);
/* make sure we can still talk to the mac */
- WARN_ON(R_REG(&regs->maccontrol) == 0xffffffff);
+ WARN_ON(bcma_read32(core, D11REGOFFS(maccontrol)) == 0xffffffff);
/* band-specific inits done by wlc_bsinit() */
@@ -3309,7 +3297,7 @@ static void brcms_b_coreinit(struct brcms_c_info *wlc)
brcms_b_write_shm(wlc_hw, M_MAX_ANTCNT, ANTCNT);
/* enable one rx interrupt per received frame */
- W_REG(&regs->intrcvlazy[0], (1 << IRL_FC_SHIFT));
+ bcma_write32(core, D11REGOFFS(intrcvlazy[0]), (1 << IRL_FC_SHIFT));
/* set the station mode (BSS STA) */
brcms_b_mctrl(wlc_hw,
@@ -3318,19 +3306,21 @@ static void brcms_b_coreinit(struct brcms_c_info *wlc)
/* set up Beacon interval */
bcnint_us = 0x8000 << 10;
- W_REG(&regs->tsf_cfprep, (bcnint_us << CFPREP_CBI_SHIFT));
- W_REG(&regs->tsf_cfpstart, bcnint_us);
- W_REG(&regs->macintstatus, MI_GP1);
+ bcma_write32(core, D11REGOFFS(tsf_cfprep),
+ (bcnint_us << CFPREP_CBI_SHIFT));
+ bcma_write32(core, D11REGOFFS(tsf_cfpstart), bcnint_us);
+ bcma_write32(core, D11REGOFFS(macintstatus), MI_GP1);
/* write interrupt mask */
- W_REG(&regs->intctrlregs[RX_FIFO].intmask, DEF_RXINTMASK);
+ bcma_write32(core, D11REGOFFS(intctrlregs[RX_FIFO].intmask),
+ DEF_RXINTMASK);
/* allow the MAC to control the PHY clock (dynamic on/off) */
brcms_b_macphyclk_set(wlc_hw, ON);
/* program dynamic clock control fast powerup delay register */
wlc->fastpwrup_dly = ai_clkctl_fast_pwrup_delay(wlc_hw->sih);
- W_REG(&regs->scc_fastpwrup_dly, wlc->fastpwrup_dly);
+ bcma_write16(core, D11REGOFFS(scc_fastpwrup_dly), wlc->fastpwrup_dly);
/* tell the ucode the corerev */
brcms_b_write_shm(wlc_hw, M_MACHW_VER, (u16) wlc_hw->corerev);
@@ -3343,19 +3333,21 @@ static void brcms_b_coreinit(struct brcms_c_info *wlc)
machwcap >> 16) & 0xffff));
/* write retry limits to SCR, this done after PSM init */
- W_REG(&regs->objaddr, OBJADDR_SCR_SEL | S_DOT11_SRC_LMT);
- (void)R_REG(&regs->objaddr);
- W_REG(&regs->objdata, wlc_hw->SRL);
- W_REG(&regs->objaddr, OBJADDR_SCR_SEL | S_DOT11_LRC_LMT);
- (void)R_REG(&regs->objaddr);
- W_REG(&regs->objdata, wlc_hw->LRL);
+ bcma_write32(core, D11REGOFFS(objaddr),
+ OBJADDR_SCR_SEL | S_DOT11_SRC_LMT);
+ (void)bcma_read32(core, D11REGOFFS(objaddr));
+ bcma_write32(core, D11REGOFFS(objdata), wlc_hw->SRL);
+ bcma_write32(core, D11REGOFFS(objaddr),
+ OBJADDR_SCR_SEL | S_DOT11_LRC_LMT);
+ (void)bcma_read32(core, D11REGOFFS(objaddr));
+ bcma_write32(core, D11REGOFFS(objdata), wlc_hw->LRL);
/* write rate fallback retry limits */
brcms_b_write_shm(wlc_hw, M_SFRMTXCNTFBRTHSD, wlc_hw->SFBL);
brcms_b_write_shm(wlc_hw, M_LFRMTXCNTFBRTHSD, wlc_hw->LFBL);
- AND_REG(&regs->ifs_ctl, 0x0FFF);
- W_REG(&regs->ifs_aifsn, EDCF_AIFSN_MIN);
+ bcma_mask16(core, D11REGOFFS(ifs_ctl), 0x0FFF);
+ bcma_write16(core, D11REGOFFS(ifs_aifsn), EDCF_AIFSN_MIN);
/* init the tx dma engines */
for (i = 0; i < NFIFO; i++) {
@@ -3584,29 +3576,31 @@ static void brcms_c_bandinit_ordered(struct brcms_c_info *wlc,
}
/*
- * Set or clear maccontrol bits MCTL_PROMISC, MCTL_BCNS_PROMISC and
- * MCTL_KEEPCONTROL
+ * Set or clear filtering related maccontrol bits based on
+ * specified filter flags
*/
-static void brcms_c_mac_promisc(struct brcms_c_info *wlc)
+void brcms_c_mac_promisc(struct brcms_c_info *wlc, uint filter_flags)
{
u32 promisc_bits = 0;
- if (wlc->bcnmisc_monitor)
+ wlc->filter_flags = filter_flags;
+
+ if (filter_flags & (FIF_PROMISC_IN_BSS | FIF_OTHER_BSS))
+ promisc_bits |= MCTL_PROMISC;
+
+ if (filter_flags & FIF_BCN_PRBRESP_PROMISC)
promisc_bits |= MCTL_BCNS_PROMISC;
- if (wlc->monitor)
- promisc_bits |=
- MCTL_PROMISC | MCTL_BCNS_PROMISC | MCTL_KEEPCONTROL;
+ if (filter_flags & FIF_FCSFAIL)
+ promisc_bits |= MCTL_KEEPBADFCS;
- brcms_b_mctrl(wlc->hw,
- MCTL_PROMISC | MCTL_BCNS_PROMISC | MCTL_KEEPCONTROL,
- promisc_bits);
-}
+ if (filter_flags & (FIF_CONTROL | FIF_PSPOLL))
+ promisc_bits |= MCTL_KEEPCONTROL;
-void brcms_c_mac_bcn_promisc_change(struct brcms_c_info *wlc, bool promisc)
-{
- wlc->bcnmisc_monitor = promisc;
- brcms_c_mac_promisc(wlc);
+ brcms_b_mctrl(wlc->hw,
+ MCTL_PROMISC | MCTL_BCNS_PROMISC |
+ MCTL_KEEPCONTROL | MCTL_KEEPBADFCS,
+ promisc_bits);
}
/*
@@ -3636,9 +3630,6 @@ static void brcms_c_ucode_mac_upd(struct brcms_c_info *wlc)
} else {
/* disable an active IBSS if we are not on the home channel */
}
-
- /* update the various promisc bits */
- brcms_c_mac_promisc(wlc);
}
static void brcms_c_write_rate_shm(struct brcms_c_info *wlc, u8 rate,
@@ -3813,7 +3804,7 @@ static void brcms_c_set_ps_ctrl(struct brcms_c_info *wlc)
BCMMSG(wlc->wiphy, "wl%d: hps %d\n", wlc->pub->unit, hps);
- v1 = R_REG(&wlc->regs->maccontrol);
+ v1 = bcma_read32(wlc->hw->d11core, D11REGOFFS(maccontrol));
v2 = MCTL_WAKE;
if (hps)
v2 |= MCTL_HPS;
@@ -4132,7 +4123,8 @@ void brcms_c_wme_setparams(struct brcms_c_info *wlc, u16 aci,
acp_shm.cwmax = params->cw_max;
acp_shm.cwcur = acp_shm.cwmin;
acp_shm.bslots =
- R_REG(&wlc->regs->tsf_random) & acp_shm.cwcur;
+ bcma_read16(wlc->hw->d11core, D11REGOFFS(tsf_random)) &
+ acp_shm.cwcur;
acp_shm.reggap = acp_shm.bslots + acp_shm.aifs;
/* Indicate the new params to the ucode */
acp_shm.status = brcms_b_read_shm(wlc->hw, (M_EDCF_QINFO +
@@ -4440,21 +4432,21 @@ struct brcms_pub *brcms_c_pub(struct brcms_c_info *wlc)
* initialize software state for each core and band
* put the whole chip in reset(driver down state), no clock
*/
-static int brcms_b_attach(struct brcms_c_info *wlc, u16 vendor, u16 device,
- uint unit, bool piomode, void __iomem *regsva,
- struct pci_dev *btparam)
+static int brcms_b_attach(struct brcms_c_info *wlc, struct bcma_device *core,
+ uint unit, bool piomode)
{
struct brcms_hardware *wlc_hw;
- struct d11regs __iomem *regs;
char *macaddr = NULL;
uint err = 0;
uint j;
bool wme = false;
struct shared_phy_params sha_params;
struct wiphy *wiphy = wlc->wiphy;
+ struct pci_dev *pcidev = core->bus->host_pci;
- BCMMSG(wlc->wiphy, "wl%d: vendor 0x%x device 0x%x\n", unit, vendor,
- device);
+ BCMMSG(wlc->wiphy, "wl%d: vendor 0x%x device 0x%x\n", unit,
+ pcidev->vendor,
+ pcidev->device);
wme = true;
@@ -4471,7 +4463,7 @@ static int brcms_b_attach(struct brcms_c_info *wlc, u16 vendor, u16 device,
* Do the hardware portion of the attach. Also initialize software
* state that depends on the particular hardware we are running.
*/
- wlc_hw->sih = ai_attach(regsva, btparam);
+ wlc_hw->sih = ai_attach(core->bus);
if (wlc_hw->sih == NULL) {
wiphy_err(wiphy, "wl%d: brcms_b_attach: si_attach failed\n",
unit);
@@ -4480,25 +4472,19 @@ static int brcms_b_attach(struct brcms_c_info *wlc, u16 vendor, u16 device,
}
/* verify again the device is supported */
- if (!brcms_c_chipmatch(vendor, device)) {
+ if (!brcms_c_chipmatch(pcidev->vendor, pcidev->device)) {
wiphy_err(wiphy, "wl%d: brcms_b_attach: Unsupported "
"vendor/device (0x%x/0x%x)\n",
- unit, vendor, device);
+ unit, pcidev->vendor, pcidev->device);
err = 12;
goto fail;
}
- wlc_hw->vendorid = vendor;
- wlc_hw->deviceid = device;
-
- /* set bar0 window to point at D11 core */
- wlc_hw->regs = (struct d11regs __iomem *)
- ai_setcore(wlc_hw->sih, D11_CORE_ID, 0);
- wlc_hw->corerev = ai_corerev(wlc_hw->sih);
-
- regs = wlc_hw->regs;
+ wlc_hw->vendorid = pcidev->vendor;
+ wlc_hw->deviceid = pcidev->device;
- wlc->regs = wlc_hw->regs;
+ wlc_hw->d11core = core;
+ wlc_hw->corerev = core->id.rev;
/* validate chip, chiprev and corerev */
if (!brcms_c_isgoodchip(wlc_hw)) {
@@ -4533,8 +4519,9 @@ static int brcms_b_attach(struct brcms_c_info *wlc, u16 vendor, u16 device,
wlc_hw->boardrev = (u16) j;
if (!brcms_c_validboardtype(wlc_hw)) {
wiphy_err(wiphy, "wl%d: brcms_b_attach: Unsupported Broadcom "
- "board type (0x%x)" " or revision level (0x%x)\n",
- unit, wlc_hw->sih->boardtype, wlc_hw->boardrev);
+ "board type (0x%x)" " or revision level (0x%x)\n",
+ unit, ai_get_boardtype(wlc_hw->sih),
+ wlc_hw->boardrev);
err = 15;
goto fail;
}
@@ -4555,7 +4542,7 @@ static int brcms_b_attach(struct brcms_c_info *wlc, u16 vendor, u16 device,
else
wlc_hw->_nbands = 1;
- if ((wlc_hw->sih->chip == BCM43225_CHIP_ID))
+ if ((ai_get_chip_id(wlc_hw->sih) == BCM43225_CHIP_ID))
wlc_hw->_nbands = 1;
/* BMAC_NOTE: remove init of pub values when brcms_c_attach()
@@ -4587,16 +4574,14 @@ static int brcms_b_attach(struct brcms_c_info *wlc, u16 vendor, u16 device,
sha_params.corerev = wlc_hw->corerev;
sha_params.vid = wlc_hw->vendorid;
sha_params.did = wlc_hw->deviceid;
- sha_params.chip = wlc_hw->sih->chip;
- sha_params.chiprev = wlc_hw->sih->chiprev;
- sha_params.chippkg = wlc_hw->sih->chippkg;
+ sha_params.chip = ai_get_chip_id(wlc_hw->sih);
+ sha_params.chiprev = ai_get_chiprev(wlc_hw->sih);
+ sha_params.chippkg = ai_get_chippkg(wlc_hw->sih);
sha_params.sromrev = wlc_hw->sromrev;
- sha_params.boardtype = wlc_hw->sih->boardtype;
+ sha_params.boardtype = ai_get_boardtype(wlc_hw->sih);
sha_params.boardrev = wlc_hw->boardrev;
- sha_params.boardvendor = wlc_hw->sih->boardvendor;
sha_params.boardflags = wlc_hw->boardflags;
sha_params.boardflags2 = wlc_hw->boardflags2;
- sha_params.buscorerev = wlc_hw->sih->buscorerev;
/* alloc and save pointer to shared phy state area */
wlc_hw->phy_sh = wlc_phy_shared_attach(&sha_params);
@@ -4618,9 +4603,9 @@ static int brcms_b_attach(struct brcms_c_info *wlc, u16 vendor, u16 device,
wlc_hw->band->bandtype = j ? BRCM_BAND_5G : BRCM_BAND_2G;
wlc->band->bandunit = j;
wlc->band->bandtype = j ? BRCM_BAND_5G : BRCM_BAND_2G;
- wlc->core->coreidx = ai_coreidx(wlc_hw->sih);
+ wlc->core->coreidx = core->core_index;
- wlc_hw->machwcap = R_REG(&regs->machwcap);
+ wlc_hw->machwcap = bcma_read32(core, D11REGOFFS(machwcap));
wlc_hw->machwcap_backup = wlc_hw->machwcap;
/* init tx fifo size */
@@ -4629,7 +4614,7 @@ static int brcms_b_attach(struct brcms_c_info *wlc, u16 vendor, u16 device,
/* Get a phy for this band */
wlc_hw->band->pi =
- wlc_phy_attach(wlc_hw->phy_sh, regs,
+ wlc_phy_attach(wlc_hw->phy_sh, core,
wlc_hw->band->bandtype,
wlc->wiphy);
if (wlc_hw->band->pi == NULL) {
@@ -4703,10 +4688,6 @@ static int brcms_b_attach(struct brcms_c_info *wlc, u16 vendor, u16 device,
/* Match driver "down" state */
ai_pci_down(wlc_hw->sih);
- /* register sb interrupt callback functions */
- ai_register_intr_callback(wlc_hw->sih, (void *)brcms_c_wlintrsoff,
- (void *)brcms_c_wlintrsrestore, NULL, wlc);
-
/* turn off pll and xtal to match driver "down" state */
brcms_b_xtal(wlc_hw, OFF);
@@ -4737,10 +4718,9 @@ static int brcms_b_attach(struct brcms_c_info *wlc, u16 vendor, u16 device,
goto fail;
}
- BCMMSG(wlc->wiphy,
- "deviceid 0x%x nbands %d board 0x%x macaddr: %s\n",
- wlc_hw->deviceid, wlc_hw->_nbands,
- wlc_hw->sih->boardtype, macaddr);
+ BCMMSG(wlc->wiphy, "deviceid 0x%x nbands %d board 0x%x macaddr: %s\n",
+ wlc_hw->deviceid, wlc_hw->_nbands, ai_get_boardtype(wlc_hw->sih),
+ macaddr);
return err;
@@ -4978,7 +4958,6 @@ static int brcms_b_detach(struct brcms_c_info *wlc)
* and per-port interrupt object may has been freed. this must
* be done before sb core switch
*/
- ai_deregister_intr_callback(wlc_hw->sih);
ai_pci_sleep(wlc_hw->sih);
}
@@ -5073,13 +5052,11 @@ static void brcms_b_hw_up(struct brcms_hardware *wlc_hw)
ai_pci_fixcfg(wlc_hw->sih);
/*
+ * TODO: test suspend/resume
+ *
* AI chip doesn't restore bar0win2 on
* hibernation/resume, need sw fixup
*/
- if ((wlc_hw->sih->chip == BCM43224_CHIP_ID) ||
- (wlc_hw->sih->chip == BCM43225_CHIP_ID))
- wlc_hw->regs = (struct d11regs __iomem *)
- ai_setcore(wlc_hw->sih, D11_CORE_ID, 0);
/*
* Inform phy that a POR reset has occurred so
@@ -5091,7 +5068,7 @@ static void brcms_b_hw_up(struct brcms_hardware *wlc_hw)
wlc_hw->wlc->pub->hw_up = true;
if ((wlc_hw->boardflags & BFL_FEM)
- && (wlc_hw->sih->chip == BCM4313_CHIP_ID)) {
+ && (ai_get_chip_id(wlc_hw->sih) == BCM4313_CHIP_ID)) {
if (!
(wlc_hw->boardrev >= 0x1250
&& (wlc_hw->boardflags & BFL_FEM_BT)))
@@ -5186,7 +5163,7 @@ int brcms_c_up(struct brcms_c_info *wlc)
}
if ((wlc->pub->boardflags & BFL_FEM)
- && (wlc->pub->sih->chip == BCM4313_CHIP_ID)) {
+ && (ai_get_chip_id(wlc->hw->sih) == BCM4313_CHIP_ID)) {
if (wlc->pub->boardrev >= 0x1250
&& (wlc->pub->boardflags & BFL_FEM_BT))
brcms_b_mhf(wlc->hw, MHF5, MHF5_4313_GPIOCTRL,
@@ -5323,9 +5300,9 @@ static int brcms_b_down_finish(struct brcms_hardware *wlc_hw)
} else {
/* Reset and disable the core */
- if (ai_iscoreup(wlc_hw->sih)) {
- if (R_REG(&wlc_hw->regs->maccontrol) &
- MCTL_EN_MAC)
+ if (bcma_core_is_enabled(wlc_hw->d11core)) {
+ if (bcma_read32(wlc_hw->d11core,
+ D11REGOFFS(maccontrol)) & MCTL_EN_MAC)
brcms_c_suspend_mac_and_wait(wlc_hw->wlc);
callbacks += brcms_reset(wlc_hw->wlc->wl);
brcms_c_coredisable(wlc_hw);
@@ -7482,11 +7459,11 @@ static void
brcms_b_read_tsf(struct brcms_hardware *wlc_hw, u32 *tsf_l_ptr,
u32 *tsf_h_ptr)
{
- struct d11regs __iomem *regs = wlc_hw->regs;
+ struct bcma_device *core = wlc_hw->d11core;
/* read the tsf timer low, then high to get an atomic read */
- *tsf_l_ptr = R_REG(&regs->tsf_timerlow);
- *tsf_h_ptr = R_REG(&regs->tsf_timerhigh);
+ *tsf_l_ptr = bcma_read32(core, D11REGOFFS(tsf_timerlow));
+ *tsf_h_ptr = bcma_read32(core, D11REGOFFS(tsf_timerhigh));
}
/*
@@ -8074,14 +8051,8 @@ static void brcms_c_recv(struct brcms_c_info *wlc, struct sk_buff *p)
len = p->len;
if (rxh->RxStatus1 & RXS_FCSERR) {
- if (wlc->pub->mac80211_state & MAC80211_PROMISC_BCNS) {
- wiphy_err(wlc->wiphy, "FCSERR while scanning******* -"
- " tossing\n");
- goto toss;
- } else {
- wiphy_err(wlc->wiphy, "RCSERR!!!\n");
+ if (!(wlc->filter_flags & FIF_FCSFAIL))
goto toss;
- }
}
/* check received pkt has at least frame control field */
@@ -8165,7 +8136,7 @@ bool brcms_c_dpc(struct brcms_c_info *wlc, bool bounded)
{
u32 macintstatus;
struct brcms_hardware *wlc_hw = wlc->hw;
- struct d11regs __iomem *regs = wlc_hw->regs;
+ struct bcma_device *core = wlc_hw->d11core;
struct wiphy *wiphy = wlc->wiphy;
if (brcms_deviceremoved(wlc)) {
@@ -8201,7 +8172,7 @@ bool brcms_c_dpc(struct brcms_c_info *wlc, bool bounded)
/* ATIM window end */
if (macintstatus & MI_ATIMWINEND) {
BCMMSG(wlc->wiphy, "end of ATIM window\n");
- OR_REG(&regs->maccommand, wlc->qvalid);
+ bcma_set32(core, D11REGOFFS(maccommand), wlc->qvalid);
wlc->qvalid = 0;
}
@@ -8219,17 +8190,17 @@ bool brcms_c_dpc(struct brcms_c_info *wlc, bool bounded)
if (macintstatus & MI_GP0) {
wiphy_err(wiphy, "wl%d: PSM microcode watchdog fired at %d "
- "(seconds). Resetting.\n", wlc_hw->unit, wlc_hw->now);
+ "(seconds). Resetting.\n", wlc_hw->unit, wlc_hw->now);
printk_once("%s : PSM Watchdog, chipid 0x%x, chiprev 0x%x\n",
- __func__, wlc_hw->sih->chip,
- wlc_hw->sih->chiprev);
+ __func__, ai_get_chip_id(wlc_hw->sih),
+ ai_get_chiprev(wlc_hw->sih));
brcms_fatal_error(wlc_hw->wlc->wl);
}
/* gptimer timeout */
if (macintstatus & MI_TO)
- W_REG(&regs->gptimer, 0);
+ bcma_write32(core, D11REGOFFS(gptimer), 0);
if (macintstatus & MI_RFDISABLE) {
BCMMSG(wlc->wiphy, "wl%d: BMAC Detected a change on the"
@@ -8251,13 +8222,11 @@ bool brcms_c_dpc(struct brcms_c_info *wlc, bool bounded)
void brcms_c_init(struct brcms_c_info *wlc, bool mute_tx)
{
- struct d11regs __iomem *regs;
+ struct bcma_device *core = wlc->hw->d11core;
u16 chanspec;
BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit);
- regs = wlc->regs;
-
/*
* This will happen if a big-hammer was executed. In
* that case, we want to go back to the channel that
@@ -8287,8 +8256,8 @@ void brcms_c_init(struct brcms_c_info *wlc, bool mute_tx)
* update since init path would reset
* to default value
*/
- W_REG(&regs->tsf_cfprep,
- (bi << CFPREP_CBI_SHIFT));
+ bcma_write32(core, D11REGOFFS(tsf_cfprep),
+ bi << CFPREP_CBI_SHIFT);
/* Update maccontrol PM related bits */
brcms_c_set_ps_ctrl(wlc);
@@ -8318,7 +8287,7 @@ void brcms_c_init(struct brcms_c_info *wlc, bool mute_tx)
brcms_c_bsinit(wlc);
/* Enable EDCF mode (while the MAC is suspended) */
- OR_REG(&regs->ifs_ctl, IFS_USEEDCF);
+ bcma_set16(core, D11REGOFFS(ifs_ctl), IFS_USEEDCF);
brcms_c_edcf_setparams(wlc, false);
/* Init precedence maps for empty FIFOs */
@@ -8342,7 +8311,7 @@ void brcms_c_init(struct brcms_c_info *wlc, bool mute_tx)
brcms_c_txflowcontrol_reset(wlc);
/* enable the RF Disable Delay timer */
- W_REG(&wlc->regs->rfdisabledly, RFDISABLE_DEFAULT);
+ bcma_write32(core, D11REGOFFS(rfdisabledly), RFDISABLE_DEFAULT);
/*
* Initialize WME parameters; if they haven't been set by some other
@@ -8362,9 +8331,8 @@ void brcms_c_init(struct brcms_c_info *wlc, bool mute_tx)
* The common driver entry routine. Error codes should be unique
*/
struct brcms_c_info *
-brcms_c_attach(struct brcms_info *wl, u16 vendor, u16 device, uint unit,
- bool piomode, void __iomem *regsva, struct pci_dev *btparam,
- uint *perr)
+brcms_c_attach(struct brcms_info *wl, struct bcma_device *core, uint unit,
+ bool piomode, uint *perr)
{
struct brcms_c_info *wlc;
uint err = 0;
@@ -8372,7 +8340,7 @@ brcms_c_attach(struct brcms_info *wl, u16 vendor, u16 device, uint unit,
struct brcms_pub *pub;
/* allocate struct brcms_c_info state and its substructures */
- wlc = (struct brcms_c_info *) brcms_c_attach_malloc(unit, &err, device);
+ wlc = (struct brcms_c_info *) brcms_c_attach_malloc(unit, &err, 0);
if (wlc == NULL)
goto fail;
wlc->wiphy = wl->wiphy;
@@ -8399,8 +8367,7 @@ brcms_c_attach(struct brcms_info *wl, u16 vendor, u16 device, uint unit,
* low level attach steps(all hw accesses go
* inside, no more in rest of the attach)
*/
- err = brcms_b_attach(wlc, vendor, device, unit, piomode, regsva,
- btparam);
+ err = brcms_b_attach(wlc, core, unit, piomode);
if (err)
goto fail;
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.h b/drivers/net/wireless/brcm80211/brcmsmac/main.h
index 251c350b316..adb136ec1f0 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/main.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/main.h
@@ -334,7 +334,7 @@ struct brcms_hardware {
u32 machwcap_backup; /* backup of machwcap */
struct si_pub *sih; /* SI handle (cookie for siutils calls) */
- struct d11regs __iomem *regs; /* pointer to device registers */
+ struct bcma_device *d11core; /* pointer to 802.11 core */
struct phy_shim_info *physhim; /* phy shim layer handler */
struct shared_phy *phy_sh; /* pointer to shared phy state */
struct brcms_hw_band *band;/* pointer to active per-band state */
@@ -400,7 +400,6 @@ struct brcms_txq_info {
*
* pub: pointer to driver public state.
* wl: pointer to specific private state.
- * regs: pointer to device registers.
* hw: HW related state.
* clkreq_override: setting for clkreq for PCIE : Auto, 0, 1.
* fastpwrup_dly: time in us needed to bring up d11 fast clock.
@@ -477,7 +476,6 @@ struct brcms_txq_info {
struct brcms_c_info {
struct brcms_pub *pub;
struct brcms_info *wl;
- struct d11regs __iomem *regs;
struct brcms_hardware *hw;
/* clock */
@@ -519,8 +517,7 @@ struct brcms_c_info {
struct brcms_timer *radio_timer;
/* promiscuous */
- bool monitor;
- bool bcnmisc_monitor;
+ uint filter_flags;
/* driver feature */
bool _rifs;
@@ -658,8 +655,7 @@ extern void brcms_c_print_txdesc(struct d11txh *txh);
#endif
extern int brcms_c_set_gmode(struct brcms_c_info *wlc, u8 gmode, bool config);
-extern void brcms_c_mac_bcn_promisc_change(struct brcms_c_info *wlc,
- bool promisc);
+extern void brcms_c_mac_promisc(struct brcms_c_info *wlc, uint filter_flags);
extern void brcms_c_send_q(struct brcms_c_info *wlc);
extern int brcms_c_prep_pdu(struct brcms_c_info *wlc, struct sk_buff *pdu,
uint *fifo);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/nicpci.c b/drivers/net/wireless/brcm80211/brcmsmac/nicpci.c
index 0bcb2679204..7fad6dc1925 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/nicpci.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/nicpci.c
@@ -139,6 +139,9 @@
#define SRSH_PI_MASK 0xf000 /* bit 15:12 */
#define SRSH_PI_SHIFT 12 /* bit 15:12 */
+#define PCIREGOFFS(field) offsetof(struct sbpciregs, field)
+#define PCIEREGOFFS(field) offsetof(struct sbpcieregs, field)
+
/* Sonics side: PCI core and host control registers */
struct sbpciregs {
u32 control; /* PCI control */
@@ -205,11 +208,7 @@ struct sbpcieregs {
};
struct pcicore_info {
- union {
- struct sbpcieregs __iomem *pcieregs;
- struct sbpciregs __iomem *pciregs;
- } regs; /* Memory mapped register to the core */
-
+ struct bcma_device *core;
struct si_pub *sih; /* System interconnect handle */
struct pci_dev *dev;
u8 pciecap_lcreg_offset;/* PCIE capability LCreg offset
@@ -224,9 +223,9 @@ struct pcicore_info {
};
#define PCIE_ASPM(sih) \
- (((sih)->buscoretype == PCIE_CORE_ID) && \
- (((sih)->buscorerev >= 3) && \
- ((sih)->buscorerev <= 5)))
+ ((ai_get_buscoretype(sih) == PCIE_CORE_ID) && \
+ ((ai_get_buscorerev(sih) >= 3) && \
+ (ai_get_buscorerev(sih) <= 5)))
/* delay needed between the mdio control/ mdiodata register data access */
@@ -238,8 +237,7 @@ static void pr28829_delay(void)
/* Initialize the PCI core.
* It's caller's responsibility to make sure that this is done only once
*/
-struct pcicore_info *pcicore_init(struct si_pub *sih, struct pci_dev *pdev,
- void __iomem *regs)
+struct pcicore_info *pcicore_init(struct si_pub *sih, struct bcma_device *core)
{
struct pcicore_info *pi;
@@ -249,17 +247,15 @@ struct pcicore_info *pcicore_init(struct si_pub *sih, struct pci_dev *pdev,
return NULL;
pi->sih = sih;
- pi->dev = pdev;
+ pi->dev = core->bus->host_pci;
+ pi->core = core;
- if (sih->buscoretype == PCIE_CORE_ID) {
+ if (core->id.id == PCIE_CORE_ID) {
u8 cap_ptr;
- pi->regs.pcieregs = regs;
cap_ptr = pcicore_find_pci_capability(pi->dev, PCI_CAP_ID_EXP,
NULL, NULL);
pi->pciecap_lcreg_offset = cap_ptr + PCIE_CAP_LINKCTRL_OFFSET;
- } else
- pi->regs.pciregs = regs;
-
+ }
return pi;
}
@@ -334,37 +330,37 @@ end:
/* ***** Register Access API */
static uint
-pcie_readreg(struct sbpcieregs __iomem *pcieregs, uint addrtype, uint offset)
+pcie_readreg(struct bcma_device *core, uint addrtype, uint offset)
{
uint retval = 0xFFFFFFFF;
switch (addrtype) {
case PCIE_CONFIGREGS:
- W_REG(&pcieregs->configaddr, offset);
- (void)R_REG((&pcieregs->configaddr));
- retval = R_REG(&pcieregs->configdata);
+ bcma_write32(core, PCIEREGOFFS(configaddr), offset);
+ (void)bcma_read32(core, PCIEREGOFFS(configaddr));
+ retval = bcma_read32(core, PCIEREGOFFS(configdata));
break;
case PCIE_PCIEREGS:
- W_REG(&pcieregs->pcieindaddr, offset);
- (void)R_REG(&pcieregs->pcieindaddr);
- retval = R_REG(&pcieregs->pcieinddata);
+ bcma_write32(core, PCIEREGOFFS(pcieindaddr), offset);
+ (void)bcma_read32(core, PCIEREGOFFS(pcieindaddr));
+ retval = bcma_read32(core, PCIEREGOFFS(pcieinddata));
break;
}
return retval;
}
-static uint pcie_writereg(struct sbpcieregs __iomem *pcieregs, uint addrtype,
+static uint pcie_writereg(struct bcma_device *core, uint addrtype,
uint offset, uint val)
{
switch (addrtype) {
case PCIE_CONFIGREGS:
- W_REG((&pcieregs->configaddr), offset);
- W_REG((&pcieregs->configdata), val);
+ bcma_write32(core, PCIEREGOFFS(configaddr), offset);
+ bcma_write32(core, PCIEREGOFFS(configdata), val);
break;
case PCIE_PCIEREGS:
- W_REG((&pcieregs->pcieindaddr), offset);
- W_REG((&pcieregs->pcieinddata), val);
+ bcma_write32(core, PCIEREGOFFS(pcieindaddr), offset);
+ bcma_write32(core, PCIEREGOFFS(pcieinddata), val);
break;
default:
break;
@@ -374,7 +370,6 @@ static uint pcie_writereg(struct sbpcieregs __iomem *pcieregs, uint addrtype,
static bool pcie_mdiosetblock(struct pcicore_info *pi, uint blk)
{
- struct sbpcieregs __iomem *pcieregs = pi->regs.pcieregs;
uint mdiodata, i = 0;
uint pcie_serdes_spinwait = 200;
@@ -382,12 +377,13 @@ static bool pcie_mdiosetblock(struct pcicore_info *pi, uint blk)
(MDIODATA_DEV_ADDR << MDIODATA_DEVADDR_SHF) |
(MDIODATA_BLK_ADDR << MDIODATA_REGADDR_SHF) |
(blk << 4));
- W_REG(&pcieregs->mdiodata, mdiodata);
+ bcma_write32(pi->core, PCIEREGOFFS(mdiodata), mdiodata);
pr28829_delay();
/* retry till the transaction is complete */
while (i < pcie_serdes_spinwait) {
- if (R_REG(&pcieregs->mdiocontrol) & MDIOCTL_ACCESS_DONE)
+ if (bcma_read32(pi->core, PCIEREGOFFS(mdiocontrol)) &
+ MDIOCTL_ACCESS_DONE)
break;
udelay(1000);
@@ -404,15 +400,15 @@ static int
pcie_mdioop(struct pcicore_info *pi, uint physmedia, uint regaddr, bool write,
uint *val)
{
- struct sbpcieregs __iomem *pcieregs = pi->regs.pcieregs;
uint mdiodata;
uint i = 0;
uint pcie_serdes_spinwait = 10;
/* enable mdio access to SERDES */
- W_REG(&pcieregs->mdiocontrol, MDIOCTL_PREAM_EN | MDIOCTL_DIVISOR_VAL);
+ bcma_write32(pi->core, PCIEREGOFFS(mdiocontrol),
+ MDIOCTL_PREAM_EN | MDIOCTL_DIVISOR_VAL);
- if (pi->sih->buscorerev >= 10) {
+ if (ai_get_buscorerev(pi->sih) >= 10) {
/* new serdes is slower in rw,
* using two layers of reg address mapping
*/
@@ -432,20 +428,22 @@ pcie_mdioop(struct pcicore_info *pi, uint physmedia, uint regaddr, bool write,
mdiodata |= (MDIODATA_START | MDIODATA_WRITE | MDIODATA_TA |
*val);
- W_REG(&pcieregs->mdiodata, mdiodata);
+ bcma_write32(pi->core, PCIEREGOFFS(mdiodata), mdiodata);
pr28829_delay();
/* retry till the transaction is complete */
while (i < pcie_serdes_spinwait) {
- if (R_REG(&pcieregs->mdiocontrol) & MDIOCTL_ACCESS_DONE) {
+ if (bcma_read32(pi->core, PCIEREGOFFS(mdiocontrol)) &
+ MDIOCTL_ACCESS_DONE) {
if (!write) {
pr28829_delay();
- *val = (R_REG(&pcieregs->mdiodata) &
+ *val = (bcma_read32(pi->core,
+ PCIEREGOFFS(mdiodata)) &
MDIODATA_MASK);
}
/* Disable mdio access to SERDES */
- W_REG(&pcieregs->mdiocontrol, 0);
+ bcma_write32(pi->core, PCIEREGOFFS(mdiocontrol), 0);
return 0;
}
udelay(1000);
@@ -453,7 +451,7 @@ pcie_mdioop(struct pcicore_info *pi, uint physmedia, uint regaddr, bool write,
}
/* Timed out. Disable mdio access to SERDES. */
- W_REG(&pcieregs->mdiocontrol, 0);
+ bcma_write32(pi->core, PCIEREGOFFS(mdiocontrol), 0);
return 1;
}
@@ -502,18 +500,18 @@ static void pcie_extendL1timer(struct pcicore_info *pi, bool extend)
{
u32 w;
struct si_pub *sih = pi->sih;
- struct sbpcieregs __iomem *pcieregs = pi->regs.pcieregs;
- if (sih->buscoretype != PCIE_CORE_ID || sih->buscorerev < 7)
+ if (ai_get_buscoretype(sih) != PCIE_CORE_ID ||
+ ai_get_buscorerev(sih) < 7)
return;
- w = pcie_readreg(pcieregs, PCIE_PCIEREGS, PCIE_DLLP_PMTHRESHREG);
+ w = pcie_readreg(pi->core, PCIE_PCIEREGS, PCIE_DLLP_PMTHRESHREG);
if (extend)
w |= PCIE_ASPMTIMER_EXTEND;
else
w &= ~PCIE_ASPMTIMER_EXTEND;
- pcie_writereg(pcieregs, PCIE_PCIEREGS, PCIE_DLLP_PMTHRESHREG, w);
- w = pcie_readreg(pcieregs, PCIE_PCIEREGS, PCIE_DLLP_PMTHRESHREG);
+ pcie_writereg(pi->core, PCIE_PCIEREGS, PCIE_DLLP_PMTHRESHREG, w);
+ w = pcie_readreg(pi->core, PCIE_PCIEREGS, PCIE_DLLP_PMTHRESHREG);
}
/* centralized clkreq control policy */
@@ -527,25 +525,27 @@ static void pcie_clkreq_upd(struct pcicore_info *pi, uint state)
pcie_clkreq(pi, 1, 0);
break;
case SI_PCIDOWN:
- if (sih->buscorerev == 6) { /* turn on serdes PLL down */
- ai_corereg(sih, SI_CC_IDX,
- offsetof(struct chipcregs, chipcontrol_addr),
- ~0, 0);
- ai_corereg(sih, SI_CC_IDX,
- offsetof(struct chipcregs, chipcontrol_data),
- ~0x40, 0);
+ /* turn on serdes PLL down */
+ if (ai_get_buscorerev(sih) == 6) {
+ ai_cc_reg(sih,
+ offsetof(struct chipcregs, chipcontrol_addr),
+ ~0, 0);
+ ai_cc_reg(sih,
+ offsetof(struct chipcregs, chipcontrol_data),
+ ~0x40, 0);
} else if (pi->pcie_pr42767) {
pcie_clkreq(pi, 1, 1);
}
break;
case SI_PCIUP:
- if (sih->buscorerev == 6) { /* turn off serdes PLL down */
- ai_corereg(sih, SI_CC_IDX,
- offsetof(struct chipcregs, chipcontrol_addr),
- ~0, 0);
- ai_corereg(sih, SI_CC_IDX,
- offsetof(struct chipcregs, chipcontrol_data),
- ~0x40, 0x40);
+ /* turn off serdes PLL down */
+ if (ai_get_buscorerev(sih) == 6) {
+ ai_cc_reg(sih,
+ offsetof(struct chipcregs, chipcontrol_addr),
+ ~0, 0);
+ ai_cc_reg(sih,
+ offsetof(struct chipcregs, chipcontrol_data),
+ ~0x40, 0x40);
} else if (PCIE_ASPM(sih)) { /* disable clkreq */
pcie_clkreq(pi, 1, 0);
}
@@ -562,7 +562,7 @@ static void pcie_war_polarity(struct pcicore_info *pi)
if (pi->pcie_polarity != 0)
return;
- w = pcie_readreg(pi->regs.pcieregs, PCIE_PCIEREGS, PCIE_PLP_STATUSREG);
+ w = pcie_readreg(pi->core, PCIE_PCIEREGS, PCIE_PLP_STATUSREG);
/* Detect the current polarity at attach and force that polarity and
* disable changing the polarity
@@ -581,18 +581,15 @@ static void pcie_war_polarity(struct pcicore_info *pi)
*/
static void pcie_war_aspm_clkreq(struct pcicore_info *pi)
{
- struct sbpcieregs __iomem *pcieregs = pi->regs.pcieregs;
struct si_pub *sih = pi->sih;
u16 val16;
- u16 __iomem *reg16;
u32 w;
if (!PCIE_ASPM(sih))
return;
/* bypass this on QT or VSIM */
- reg16 = &pcieregs->sprom[SRSH_ASPM_OFFSET];
- val16 = R_REG(reg16);
+ val16 = bcma_read16(pi->core, PCIEREGOFFS(sprom[SRSH_ASPM_OFFSET]));
val16 &= ~SRSH_ASPM_ENB;
if (pi->pcie_war_aspm_ovr == PCIE_ASPM_ENAB)
@@ -602,15 +599,15 @@ static void pcie_war_aspm_clkreq(struct pcicore_info *pi)
else if (pi->pcie_war_aspm_ovr == PCIE_ASPM_L0s_ENAB)
val16 |= SRSH_ASPM_L0s_ENB;
- W_REG(reg16, val16);
+ bcma_write16(pi->core, PCIEREGOFFS(sprom[SRSH_ASPM_OFFSET]), val16);
pci_read_config_dword(pi->dev, pi->pciecap_lcreg_offset, &w);
w &= ~PCIE_ASPM_ENAB;
w |= pi->pcie_war_aspm_ovr;
pci_write_config_dword(pi->dev, pi->pciecap_lcreg_offset, w);
- reg16 = &pcieregs->sprom[SRSH_CLKREQ_OFFSET_REV5];
- val16 = R_REG(reg16);
+ val16 = bcma_read16(pi->core,
+ PCIEREGOFFS(sprom[SRSH_CLKREQ_OFFSET_REV5]));
if (pi->pcie_war_aspm_ovr != PCIE_ASPM_DISAB) {
val16 |= SRSH_CLKREQ_ENB;
@@ -618,7 +615,8 @@ static void pcie_war_aspm_clkreq(struct pcicore_info *pi)
} else
val16 &= ~SRSH_CLKREQ_ENB;
- W_REG(reg16, val16);
+ bcma_write16(pi->core, PCIEREGOFFS(sprom[SRSH_CLKREQ_OFFSET_REV5]),
+ val16);
}
/* Apply the polarity determined at the start */
@@ -642,16 +640,15 @@ static void pcie_war_serdes(struct pcicore_info *pi)
/* Needs to happen when coming out of 'standby'/'hibernate' */
static void pcie_misc_config_fixup(struct pcicore_info *pi)
{
- struct sbpcieregs __iomem *pcieregs = pi->regs.pcieregs;
u16 val16;
- u16 __iomem *reg16;
- reg16 = &pcieregs->sprom[SRSH_PCIE_MISC_CONFIG];
- val16 = R_REG(reg16);
+ val16 = bcma_read16(pi->core,
+ PCIEREGOFFS(sprom[SRSH_PCIE_MISC_CONFIG]));
if ((val16 & SRSH_L23READY_EXIT_NOPERST) == 0) {
val16 |= SRSH_L23READY_EXIT_NOPERST;
- W_REG(reg16, val16);
+ bcma_write16(pi->core,
+ PCIEREGOFFS(sprom[SRSH_PCIE_MISC_CONFIG]), val16);
}
}
@@ -659,62 +656,57 @@ static void pcie_misc_config_fixup(struct pcicore_info *pi)
/* Needs to happen when coming out of 'standby'/'hibernate' */
static void pcie_war_noplldown(struct pcicore_info *pi)
{
- struct sbpcieregs __iomem *pcieregs = pi->regs.pcieregs;
- u16 __iomem *reg16;
-
/* turn off serdes PLL down */
- ai_corereg(pi->sih, SI_CC_IDX, offsetof(struct chipcregs, chipcontrol),
- CHIPCTRL_4321_PLL_DOWN, CHIPCTRL_4321_PLL_DOWN);
+ ai_cc_reg(pi->sih, offsetof(struct chipcregs, chipcontrol),
+ CHIPCTRL_4321_PLL_DOWN, CHIPCTRL_4321_PLL_DOWN);
/* clear srom shadow backdoor */
- reg16 = &pcieregs->sprom[SRSH_BD_OFFSET];
- W_REG(reg16, 0);
+ bcma_write16(pi->core, PCIEREGOFFS(sprom[SRSH_BD_OFFSET]), 0);
}
/* Needs to happen when coming out of 'standby'/'hibernate' */
static void pcie_war_pci_setup(struct pcicore_info *pi)
{
struct si_pub *sih = pi->sih;
- struct sbpcieregs __iomem *pcieregs = pi->regs.pcieregs;
u32 w;
- if (sih->buscorerev == 0 || sih->buscorerev == 1) {
- w = pcie_readreg(pcieregs, PCIE_PCIEREGS,
+ if (ai_get_buscorerev(sih) == 0 || ai_get_buscorerev(sih) == 1) {
+ w = pcie_readreg(pi->core, PCIE_PCIEREGS,
PCIE_TLP_WORKAROUNDSREG);
w |= 0x8;
- pcie_writereg(pcieregs, PCIE_PCIEREGS,
+ pcie_writereg(pi->core, PCIE_PCIEREGS,
PCIE_TLP_WORKAROUNDSREG, w);
}
- if (sih->buscorerev == 1) {
- w = pcie_readreg(pcieregs, PCIE_PCIEREGS, PCIE_DLLP_LCREG);
+ if (ai_get_buscorerev(sih) == 1) {
+ w = pcie_readreg(pi->core, PCIE_PCIEREGS, PCIE_DLLP_LCREG);
w |= 0x40;
- pcie_writereg(pcieregs, PCIE_PCIEREGS, PCIE_DLLP_LCREG, w);
+ pcie_writereg(pi->core, PCIE_PCIEREGS, PCIE_DLLP_LCREG, w);
}
- if (sih->buscorerev == 0) {
+ if (ai_get_buscorerev(sih) == 0) {
pcie_mdiowrite(pi, MDIODATA_DEV_RX, SERDES_RX_TIMER1, 0x8128);
pcie_mdiowrite(pi, MDIODATA_DEV_RX, SERDES_RX_CDR, 0x0100);
pcie_mdiowrite(pi, MDIODATA_DEV_RX, SERDES_RX_CDRBW, 0x1466);
} else if (PCIE_ASPM(sih)) {
/* Change the L1 threshold for better performance */
- w = pcie_readreg(pcieregs, PCIE_PCIEREGS,
+ w = pcie_readreg(pi->core, PCIE_PCIEREGS,
PCIE_DLLP_PMTHRESHREG);
w &= ~PCIE_L1THRESHOLDTIME_MASK;
w |= PCIE_L1THRESHOLD_WARVAL << PCIE_L1THRESHOLDTIME_SHIFT;
- pcie_writereg(pcieregs, PCIE_PCIEREGS,
+ pcie_writereg(pi->core, PCIE_PCIEREGS,
PCIE_DLLP_PMTHRESHREG, w);
pcie_war_serdes(pi);
pcie_war_aspm_clkreq(pi);
- } else if (pi->sih->buscorerev == 7)
+ } else if (ai_get_buscorerev(pi->sih) == 7)
pcie_war_noplldown(pi);
/* Note that the fix is actually in the SROM,
* that's why this is open-ended
*/
- if (pi->sih->buscorerev >= 6)
+ if (ai_get_buscorerev(pi->sih) >= 6)
pcie_misc_config_fixup(pi);
}
@@ -745,7 +737,7 @@ void pcicore_attach(struct pcicore_info *pi, int state)
void pcicore_hwup(struct pcicore_info *pi)
{
- if (!pi || pi->sih->buscoretype != PCIE_CORE_ID)
+ if (!pi || ai_get_buscoretype(pi->sih) != PCIE_CORE_ID)
return;
pcie_war_pci_setup(pi);
@@ -753,7 +745,7 @@ void pcicore_hwup(struct pcicore_info *pi)
void pcicore_up(struct pcicore_info *pi, int state)
{
- if (!pi || pi->sih->buscoretype != PCIE_CORE_ID)
+ if (!pi || ai_get_buscoretype(pi->sih) != PCIE_CORE_ID)
return;
/* Restore L1 timer for better performance */
@@ -781,7 +773,7 @@ void pcicore_sleep(struct pcicore_info *pi)
void pcicore_down(struct pcicore_info *pi, int state)
{
- if (!pi || pi->sih->buscoretype != PCIE_CORE_ID)
+ if (!pi || ai_get_buscoretype(pi->sih) != PCIE_CORE_ID)
return;
pcie_clkreq_upd(pi, state);
@@ -790,46 +782,45 @@ void pcicore_down(struct pcicore_info *pi, int state)
pcie_extendL1timer(pi, false);
}
-/* precondition: current core is sii->buscoretype */
-static void pcicore_fixcfg(struct pcicore_info *pi, u16 __iomem *reg16)
+void pcicore_fixcfg(struct pcicore_info *pi)
{
- struct si_info *sii = (struct si_info *)(pi->sih);
+ struct bcma_device *core = pi->core;
u16 val16;
- uint pciidx;
+ uint regoff;
- pciidx = ai_coreidx(&sii->pub);
- val16 = R_REG(reg16);
- if (((val16 & SRSH_PI_MASK) >> SRSH_PI_SHIFT) != (u16)pciidx) {
- val16 = (u16)(pciidx << SRSH_PI_SHIFT) |
- (val16 & ~SRSH_PI_MASK);
- W_REG(reg16, val16);
- }
-}
+ switch (pi->core->id.id) {
+ case BCMA_CORE_PCI:
+ regoff = PCIREGOFFS(sprom[SRSH_PI_OFFSET]);
+ break;
-void
-pcicore_fixcfg_pci(struct pcicore_info *pi, struct sbpciregs __iomem *pciregs)
-{
- pcicore_fixcfg(pi, &pciregs->sprom[SRSH_PI_OFFSET]);
-}
+ case BCMA_CORE_PCIE:
+ regoff = PCIEREGOFFS(sprom[SRSH_PI_OFFSET]);
+ break;
-void pcicore_fixcfg_pcie(struct pcicore_info *pi,
- struct sbpcieregs __iomem *pcieregs)
-{
- pcicore_fixcfg(pi, &pcieregs->sprom[SRSH_PI_OFFSET]);
+ default:
+ return;
+ }
+
+ val16 = bcma_read16(pi->core, regoff);
+ if (((val16 & SRSH_PI_MASK) >> SRSH_PI_SHIFT) !=
+ (u16)core->core_index) {
+ val16 = ((u16)core->core_index << SRSH_PI_SHIFT) |
+ (val16 & ~SRSH_PI_MASK);
+ bcma_write16(pi->core, regoff, val16);
+ }
}
/* precondition: current core is pci core */
void
-pcicore_pci_setup(struct pcicore_info *pi, struct sbpciregs __iomem *pciregs)
+pcicore_pci_setup(struct pcicore_info *pi)
{
- u32 w;
-
- OR_REG(&pciregs->sbtopci2, SBTOPCI_PREF | SBTOPCI_BURST);
-
- if (((struct si_info *)(pi->sih))->pub.buscorerev >= 11) {
- OR_REG(&pciregs->sbtopci2, SBTOPCI_RC_READMULTI);
- w = R_REG(&pciregs->clkrun);
- W_REG(&pciregs->clkrun, w | PCI_CLKRUN_DSBL);
- w = R_REG(&pciregs->clkrun);
+ bcma_set32(pi->core, PCIREGOFFS(sbtopci2),
+ SBTOPCI_PREF | SBTOPCI_BURST);
+
+ if (pi->core->id.rev >= 11) {
+ bcma_set32(pi->core, PCIREGOFFS(sbtopci2),
+ SBTOPCI_RC_READMULTI);
+ bcma_set32(pi->core, PCIREGOFFS(clkrun), PCI_CLKRUN_DSBL);
+ (void)bcma_read32(pi->core, PCIREGOFFS(clkrun));
}
}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/nicpci.h b/drivers/net/wireless/brcm80211/brcmsmac/nicpci.h
index 58aa80dc332..9fc3ead540a 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/nicpci.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/nicpci.h
@@ -62,8 +62,7 @@ struct sbpciregs;
struct sbpcieregs;
extern struct pcicore_info *pcicore_init(struct si_pub *sih,
- struct pci_dev *pdev,
- void __iomem *regs);
+ struct bcma_device *core);
extern void pcicore_deinit(struct pcicore_info *pch);
extern void pcicore_attach(struct pcicore_info *pch, int state);
extern void pcicore_hwup(struct pcicore_info *pch);
@@ -72,11 +71,7 @@ extern void pcicore_sleep(struct pcicore_info *pch);
extern void pcicore_down(struct pcicore_info *pch, int state);
extern u8 pcicore_find_pci_capability(struct pci_dev *dev, u8 req_cap_id,
unsigned char *buf, u32 *buflen);
-extern void pcicore_fixcfg_pci(struct pcicore_info *pch,
- struct sbpciregs __iomem *pciregs);
-extern void pcicore_fixcfg_pcie(struct pcicore_info *pch,
- struct sbpcieregs __iomem *pciregs);
-extern void pcicore_pci_setup(struct pcicore_info *pch,
- struct sbpciregs __iomem *pciregs);
+extern void pcicore_fixcfg(struct pcicore_info *pch);
+extern void pcicore_pci_setup(struct pcicore_info *pch);
#endif /* _BRCM_NICPCI_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/otp.c b/drivers/net/wireless/brcm80211/brcmsmac/otp.c
index edf551561fd..f1ca1262586 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/otp.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/otp.c
@@ -77,7 +77,7 @@ struct otp_fn_s {
};
struct otpinfo {
- uint ccrev; /* chipc revision */
+ struct bcma_device *core; /* chipc core */
const struct otp_fn_s *fn; /* OTP functions */
struct si_pub *sih; /* Saved sb handle */
@@ -133,9 +133,10 @@ struct otpinfo {
#define OTP_SZ_FU_144 (144/8) /* 144 bits */
static u16
-ipxotp_otpr(struct otpinfo *oi, struct chipcregs __iomem *cc, uint wn)
+ipxotp_otpr(struct otpinfo *oi, uint wn)
{
- return R_REG(&cc->sromotp[wn]);
+ return bcma_read16(oi->core,
+ CHIPCREGOFFS(sromotp[wn]));
}
/*
@@ -146,7 +147,7 @@ static int ipxotp_max_rgnsz(struct si_pub *sih, int osizew)
{
int ret = 0;
- switch (sih->chip) {
+ switch (ai_get_chip_id(sih)) {
case BCM43224_CHIP_ID:
case BCM43225_CHIP_ID:
ret = osizew * 2 - OTP_SZ_FU_72 - OTP_SZ_CHECKSUM;
@@ -161,19 +162,21 @@ static int ipxotp_max_rgnsz(struct si_pub *sih, int osizew)
return ret;
}
-static void _ipxotp_init(struct otpinfo *oi, struct chipcregs __iomem *cc)
+static void _ipxotp_init(struct otpinfo *oi)
{
uint k;
u32 otpp, st;
+ int ccrev = ai_get_ccrev(oi->sih);
+
/*
* record word offset of General Use Region
* for various chipcommon revs
*/
- if (oi->sih->ccrev == 21 || oi->sih->ccrev == 24
- || oi->sih->ccrev == 27) {
+ if (ccrev == 21 || ccrev == 24
+ || ccrev == 27) {
oi->otpgu_base = REVA4_OTPGU_BASE;
- } else if (oi->sih->ccrev == 36) {
+ } else if (ccrev == 36) {
/*
* OTP size greater than equal to 2KB (128 words),
* otpgu_base is similar to rev23
@@ -182,7 +185,7 @@ static void _ipxotp_init(struct otpinfo *oi, struct chipcregs __iomem *cc)
oi->otpgu_base = REVB8_OTPGU_BASE;
else
oi->otpgu_base = REV36_OTPGU_BASE;
- } else if (oi->sih->ccrev == 23 || oi->sih->ccrev >= 25) {
+ } else if (ccrev == 23 || ccrev >= 25) {
oi->otpgu_base = REVB8_OTPGU_BASE;
}
@@ -190,24 +193,21 @@ static void _ipxotp_init(struct otpinfo *oi, struct chipcregs __iomem *cc)
otpp =
OTPP_START_BUSY | ((OTPPOC_INIT << OTPP_OC_SHIFT) & OTPP_OC_MASK);
- W_REG(&cc->otpprog, otpp);
- for (k = 0;
- ((st = R_REG(&cc->otpprog)) & OTPP_START_BUSY)
- && (k < OTPP_TRIES); k++)
- ;
+ bcma_write32(oi->core, CHIPCREGOFFS(otpprog), otpp);
+ st = bcma_read32(oi->core, CHIPCREGOFFS(otpprog));
+ for (k = 0; (st & OTPP_START_BUSY) && (k < OTPP_TRIES); k++)
+ st = bcma_read32(oi->core, CHIPCREGOFFS(otpprog));
if (k >= OTPP_TRIES)
return;
/* Read OTP lock bits and subregion programmed indication bits */
- oi->status = R_REG(&cc->otpstatus);
+ oi->status = bcma_read32(oi->core, CHIPCREGOFFS(otpstatus));
- if ((oi->sih->chip == BCM43224_CHIP_ID)
- || (oi->sih->chip == BCM43225_CHIP_ID)) {
+ if ((ai_get_chip_id(oi->sih) == BCM43224_CHIP_ID)
+ || (ai_get_chip_id(oi->sih) == BCM43225_CHIP_ID)) {
u32 p_bits;
- p_bits =
- (ipxotp_otpr(oi, cc, oi->otpgu_base + OTPGU_P_OFF) &
- OTPGU_P_MSK)
- >> OTPGU_P_SHIFT;
+ p_bits = (ipxotp_otpr(oi, oi->otpgu_base + OTPGU_P_OFF) &
+ OTPGU_P_MSK) >> OTPGU_P_SHIFT;
oi->status |= (p_bits << OTPS_GUP_SHIFT);
}
@@ -220,7 +220,7 @@ static void _ipxotp_init(struct otpinfo *oi, struct chipcregs __iomem *cc)
oi->hwlim = oi->wsize;
if (oi->status & OTPS_GUP_HW) {
oi->hwlim =
- ipxotp_otpr(oi, cc, oi->otpgu_base + OTPGU_HSB_OFF) / 16;
+ ipxotp_otpr(oi, oi->otpgu_base + OTPGU_HSB_OFF) / 16;
oi->swbase = oi->hwlim;
} else
oi->swbase = oi->hwbase;
@@ -230,7 +230,7 @@ static void _ipxotp_init(struct otpinfo *oi, struct chipcregs __iomem *cc)
if (oi->status & OTPS_GUP_SW) {
oi->swlim =
- ipxotp_otpr(oi, cc, oi->otpgu_base + OTPGU_SFB_OFF) / 16;
+ ipxotp_otpr(oi, oi->otpgu_base + OTPGU_SFB_OFF) / 16;
oi->fbase = oi->swlim;
} else
oi->fbase = oi->swbase;
@@ -240,11 +240,8 @@ static void _ipxotp_init(struct otpinfo *oi, struct chipcregs __iomem *cc)
static int ipxotp_init(struct si_pub *sih, struct otpinfo *oi)
{
- uint idx;
- struct chipcregs __iomem *cc;
-
/* Make sure we're running IPX OTP */
- if (!OTPTYPE_IPX(sih->ccrev))
+ if (!OTPTYPE_IPX(ai_get_ccrev(sih)))
return -EBADE;
/* Make sure OTP is not disabled */
@@ -252,7 +249,7 @@ static int ipxotp_init(struct si_pub *sih, struct otpinfo *oi)
return -EBADE;
/* Check for otp size */
- switch ((sih->cccaps & CC_CAP_OTPSIZE) >> CC_CAP_OTPSIZE_SHIFT) {
+ switch ((ai_get_cccaps(sih) & CC_CAP_OTPSIZE) >> CC_CAP_OTPSIZE_SHIFT) {
case 0:
/* Nothing there */
return -EBADE;
@@ -282,21 +279,13 @@ static int ipxotp_init(struct si_pub *sih, struct otpinfo *oi)
}
/* Retrieve OTP region info */
- idx = ai_coreidx(sih);
- cc = ai_setcoreidx(sih, SI_CC_IDX);
-
- _ipxotp_init(oi, cc);
-
- ai_setcoreidx(sih, idx);
-
+ _ipxotp_init(oi);
return 0;
}
static int
ipxotp_read_region(struct otpinfo *oi, int region, u16 *data, uint *wlen)
{
- uint idx;
- struct chipcregs __iomem *cc;
uint base, i, sz;
/* Validate region selection */
@@ -365,14 +354,10 @@ ipxotp_read_region(struct otpinfo *oi, int region, u16 *data, uint *wlen)
return -EINVAL;
}
- idx = ai_coreidx(oi->sih);
- cc = ai_setcoreidx(oi->sih, SI_CC_IDX);
-
/* Read the data */
for (i = 0; i < sz; i++)
- data[i] = ipxotp_otpr(oi, cc, base + i);
+ data[i] = ipxotp_otpr(oi, base + i);
- ai_setcoreidx(oi->sih, idx);
*wlen = sz;
return 0;
}
@@ -384,14 +369,13 @@ static const struct otp_fn_s ipxotp_fn = {
static int otp_init(struct si_pub *sih, struct otpinfo *oi)
{
-
int ret;
memset(oi, 0, sizeof(struct otpinfo));
- oi->ccrev = sih->ccrev;
+ oi->core = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0);
- if (OTPTYPE_IPX(oi->ccrev))
+ if (OTPTYPE_IPX(ai_get_ccrev(sih)))
oi->fn = &ipxotp_fn;
if (oi->fn == NULL)
@@ -399,7 +383,7 @@ static int otp_init(struct si_pub *sih, struct otpinfo *oi)
oi->sih = sih;
- ret = (oi->fn->init) (sih, oi);
+ ret = (oi->fn->init)(sih, oi);
return ret;
}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_cmn.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_cmn.c
index e17edf7e683..264f8c4c703 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_cmn.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_cmn.c
@@ -109,7 +109,7 @@ static const struct chan_info_basic chan_info_all[] = {
{204, 5020},
{208, 5040},
{212, 5060},
- {216, 50800}
+ {216, 5080}
};
static const u8 ofdm_rate_lookup[] = {
@@ -149,9 +149,8 @@ void wlc_radioreg_enter(struct brcms_phy_pub *pih)
void wlc_radioreg_exit(struct brcms_phy_pub *pih)
{
struct brcms_phy *pi = (struct brcms_phy *) pih;
- u16 dummy;
- dummy = R_REG(&pi->regs->phyversion);
+ (void)bcma_read16(pi->d11core, D11REGOFFS(phyversion));
pi->phy_wreg = 0;
wlapi_bmac_mctrl(pi->sh->physhim, MCTL_LOCK_RADIO, 0);
}
@@ -186,11 +185,11 @@ u16 read_radio_reg(struct brcms_phy *pi, u16 addr)
if ((D11REV_GE(pi->sh->corerev, 24)) ||
(D11REV_IS(pi->sh->corerev, 22)
&& (pi->pubpi.phy_type != PHY_TYPE_SSN))) {
- W_REG_FLUSH(&pi->regs->radioregaddr, addr);
- data = R_REG(&pi->regs->radioregdata);
+ bcma_wflush16(pi->d11core, D11REGOFFS(radioregaddr), addr);
+ data = bcma_read16(pi->d11core, D11REGOFFS(radioregdata));
} else {
- W_REG_FLUSH(&pi->regs->phy4waddr, addr);
- data = R_REG(&pi->regs->phy4wdatalo);
+ bcma_wflush16(pi->d11core, D11REGOFFS(phy4waddr), addr);
+ data = bcma_read16(pi->d11core, D11REGOFFS(phy4wdatalo));
}
pi->phy_wreg = 0;
@@ -203,15 +202,15 @@ void write_radio_reg(struct brcms_phy *pi, u16 addr, u16 val)
(D11REV_IS(pi->sh->corerev, 22)
&& (pi->pubpi.phy_type != PHY_TYPE_SSN))) {
- W_REG_FLUSH(&pi->regs->radioregaddr, addr);
- W_REG(&pi->regs->radioregdata, val);
+ bcma_wflush16(pi->d11core, D11REGOFFS(radioregaddr), addr);
+ bcma_write16(pi->d11core, D11REGOFFS(radioregdata), val);
} else {
- W_REG_FLUSH(&pi->regs->phy4waddr, addr);
- W_REG(&pi->regs->phy4wdatalo, val);
+ bcma_wflush16(pi->d11core, D11REGOFFS(phy4waddr), addr);
+ bcma_write16(pi->d11core, D11REGOFFS(phy4wdatalo), val);
}
if (++pi->phy_wreg >= pi->phy_wreg_limit) {
- (void)R_REG(&pi->regs->maccontrol);
+ (void)bcma_read32(pi->d11core, D11REGOFFS(maccontrol));
pi->phy_wreg = 0;
}
}
@@ -223,19 +222,20 @@ static u32 read_radio_id(struct brcms_phy *pi)
if (D11REV_GE(pi->sh->corerev, 24)) {
u32 b0, b1, b2;
- W_REG_FLUSH(&pi->regs->radioregaddr, 0);
- b0 = (u32) R_REG(&pi->regs->radioregdata);
- W_REG_FLUSH(&pi->regs->radioregaddr, 1);
- b1 = (u32) R_REG(&pi->regs->radioregdata);
- W_REG_FLUSH(&pi->regs->radioregaddr, 2);
- b2 = (u32) R_REG(&pi->regs->radioregdata);
+ bcma_wflush16(pi->d11core, D11REGOFFS(radioregaddr), 0);
+ b0 = (u32) bcma_read16(pi->d11core, D11REGOFFS(radioregdata));
+ bcma_wflush16(pi->d11core, D11REGOFFS(radioregaddr), 1);
+ b1 = (u32) bcma_read16(pi->d11core, D11REGOFFS(radioregdata));
+ bcma_wflush16(pi->d11core, D11REGOFFS(radioregaddr), 2);
+ b2 = (u32) bcma_read16(pi->d11core, D11REGOFFS(radioregdata));
id = ((b0 & 0xf) << 28) | (((b2 << 8) | b1) << 12) | ((b0 >> 4)
& 0xf);
} else {
- W_REG_FLUSH(&pi->regs->phy4waddr, RADIO_IDCODE);
- id = (u32) R_REG(&pi->regs->phy4wdatalo);
- id |= (u32) R_REG(&pi->regs->phy4wdatahi) << 16;
+ bcma_wflush16(pi->d11core, D11REGOFFS(phy4waddr), RADIO_IDCODE);
+ id = (u32) bcma_read16(pi->d11core, D11REGOFFS(phy4wdatalo));
+ id |= (u32) bcma_read16(pi->d11core,
+ D11REGOFFS(phy4wdatahi)) << 16;
}
pi->phy_wreg = 0;
return id;
@@ -275,75 +275,52 @@ void mod_radio_reg(struct brcms_phy *pi, u16 addr, u16 mask, u16 val)
void write_phy_channel_reg(struct brcms_phy *pi, uint val)
{
- W_REG(&pi->regs->phychannel, val);
+ bcma_write16(pi->d11core, D11REGOFFS(phychannel), val);
}
u16 read_phy_reg(struct brcms_phy *pi, u16 addr)
{
- struct d11regs __iomem *regs;
-
- regs = pi->regs;
-
- W_REG_FLUSH(&regs->phyregaddr, addr);
+ bcma_wflush16(pi->d11core, D11REGOFFS(phyregaddr), addr);
pi->phy_wreg = 0;
- return R_REG(&regs->phyregdata);
+ return bcma_read16(pi->d11core, D11REGOFFS(phyregdata));
}
void write_phy_reg(struct brcms_phy *pi, u16 addr, u16 val)
{
- struct d11regs __iomem *regs;
-
- regs = pi->regs;
-
#ifdef CONFIG_BCM47XX
- W_REG_FLUSH(&regs->phyregaddr, addr);
- W_REG(&regs->phyregdata, val);
+ bcma_wflush16(pi->d11core, D11REGOFFS(phyregaddr), addr);
+ bcma_write16(pi->d11core, D11REGOFFS(phyregdata), val);
if (addr == 0x72)
- (void)R_REG(&regs->phyregdata);
+ (void)bcma_read16(pi->d11core, D11REGOFFS(phyversion));
#else
- W_REG((u32 __iomem *)(&regs->phyregaddr), addr | (val << 16));
+ bcma_write32(pi->d11core, D11REGOFFS(phyregaddr), addr | (val << 16));
if (++pi->phy_wreg >= pi->phy_wreg_limit) {
pi->phy_wreg = 0;
- (void)R_REG(&regs->phyversion);
+ (void)bcma_read16(pi->d11core, D11REGOFFS(phyversion));
}
#endif
}
void and_phy_reg(struct brcms_phy *pi, u16 addr, u16 val)
{
- struct d11regs __iomem *regs;
-
- regs = pi->regs;
-
- W_REG_FLUSH(&regs->phyregaddr, addr);
-
- W_REG(&regs->phyregdata, (R_REG(&regs->phyregdata) & val));
+ bcma_wflush16(pi->d11core, D11REGOFFS(phyregaddr), addr);
+ bcma_mask16(pi->d11core, D11REGOFFS(phyregdata), val);
pi->phy_wreg = 0;
}
void or_phy_reg(struct brcms_phy *pi, u16 addr, u16 val)
{
- struct d11regs __iomem *regs;
-
- regs = pi->regs;
-
- W_REG_FLUSH(&regs->phyregaddr, addr);
-
- W_REG(&regs->phyregdata, (R_REG(&regs->phyregdata) | val));
+ bcma_wflush16(pi->d11core, D11REGOFFS(phyregaddr), addr);
+ bcma_set16(pi->d11core, D11REGOFFS(phyregdata), val);
pi->phy_wreg = 0;
}
void mod_phy_reg(struct brcms_phy *pi, u16 addr, u16 mask, u16 val)
{
- struct d11regs __iomem *regs;
-
- regs = pi->regs;
-
- W_REG_FLUSH(&regs->phyregaddr, addr);
-
- W_REG(&regs->phyregdata,
- ((R_REG(&regs->phyregdata) & ~mask) | (val & mask)));
+ val &= mask;
+ bcma_wflush16(pi->d11core, D11REGOFFS(phyregaddr), addr);
+ bcma_maskset16(pi->d11core, D11REGOFFS(phyregdata), ~mask, val);
pi->phy_wreg = 0;
}
@@ -404,10 +381,8 @@ struct shared_phy *wlc_phy_shared_attach(struct shared_phy_params *shp)
sh->sromrev = shp->sromrev;
sh->boardtype = shp->boardtype;
sh->boardrev = shp->boardrev;
- sh->boardvendor = shp->boardvendor;
sh->boardflags = shp->boardflags;
sh->boardflags2 = shp->boardflags2;
- sh->buscorerev = shp->buscorerev;
sh->fast_timer = PHY_SW_TIMER_FAST;
sh->slow_timer = PHY_SW_TIMER_SLOW;
@@ -450,7 +425,7 @@ static u32 wlc_phy_get_radio_ver(struct brcms_phy *pi)
}
struct brcms_phy_pub *
-wlc_phy_attach(struct shared_phy *sh, struct d11regs __iomem *regs,
+wlc_phy_attach(struct shared_phy *sh, struct bcma_device *d11core,
int bandtype, struct wiphy *wiphy)
{
struct brcms_phy *pi;
@@ -462,7 +437,7 @@ wlc_phy_attach(struct shared_phy *sh, struct d11regs __iomem *regs,
if (D11REV_IS(sh->corerev, 4))
sflags = SISF_2G_PHY | SISF_5G_PHY;
else
- sflags = ai_core_sflags(sh->sih, 0, 0);
+ sflags = bcma_aread32(d11core, BCMA_IOST);
if (bandtype == BRCM_BAND_5G) {
if ((sflags & (SISF_5G_PHY | SISF_DB_PHY)) == 0)
@@ -480,7 +455,7 @@ wlc_phy_attach(struct shared_phy *sh, struct d11regs __iomem *regs,
if (pi == NULL)
return NULL;
pi->wiphy = wiphy;
- pi->regs = regs;
+ pi->d11core = d11core;
pi->sh = sh;
pi->phy_init_por = true;
pi->phy_wreg_limit = PHY_WREG_LIMIT;
@@ -495,7 +470,7 @@ wlc_phy_attach(struct shared_phy *sh, struct d11regs __iomem *regs,
pi->pubpi.coreflags = SICF_GMODE;
wlapi_bmac_corereset(pi->sh->physhim, pi->pubpi.coreflags);
- phyversion = R_REG(&pi->regs->phyversion);
+ phyversion = bcma_read16(pi->d11core, D11REGOFFS(phyversion));
pi->pubpi.phy_type = PHY_TYPE(phyversion);
pi->pubpi.phy_rev = phyversion & PV_PV_MASK;
@@ -507,8 +482,8 @@ wlc_phy_attach(struct shared_phy *sh, struct d11regs __iomem *regs,
pi->pubpi.phy_corenum = PHY_CORE_NUM_2;
pi->pubpi.ana_rev = (phyversion & PV_AV_MASK) >> PV_AV_SHIFT;
- if (!pi->pubpi.phy_type == PHY_TYPE_N &&
- !pi->pubpi.phy_type == PHY_TYPE_LCN)
+ if (pi->pubpi.phy_type != PHY_TYPE_N &&
+ pi->pubpi.phy_type != PHY_TYPE_LCN)
goto err;
if (bandtype == BRCM_BAND_5G) {
@@ -779,14 +754,14 @@ void wlc_phy_init(struct brcms_phy_pub *pih, u16 chanspec)
pi->radio_chanspec = chanspec;
- mc = R_REG(&pi->regs->maccontrol);
+ mc = bcma_read32(pi->d11core, D11REGOFFS(maccontrol));
if (WARN(mc & MCTL_EN_MAC, "HW error MAC running on init"))
return;
if (!(pi->measure_hold & PHY_HOLD_FOR_SCAN))
pi->measure_hold |= PHY_HOLD_FOR_NOT_ASSOC;
- if (WARN(!(ai_core_sflags(pi->sh->sih, 0, 0) & SISF_FCLKA),
+ if (WARN(!(bcma_aread32(pi->d11core, BCMA_IOST) & SISF_FCLKA),
"HW error SISF_FCLKA\n"))
return;
@@ -825,8 +800,8 @@ void wlc_phy_cal_init(struct brcms_phy_pub *pih)
struct brcms_phy *pi = (struct brcms_phy *) pih;
void (*cal_init)(struct brcms_phy *) = NULL;
- if (WARN((R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC) != 0,
- "HW error: MAC enabled during phy cal\n"))
+ if (WARN((bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
+ MCTL_EN_MAC) != 0, "HW error: MAC enabled during phy cal\n"))
return;
if (!pi->initialized) {
@@ -1017,7 +992,7 @@ wlc_phy_init_radio_regs(struct brcms_phy *pi,
void wlc_phy_do_dummy_tx(struct brcms_phy *pi, bool ofdm, bool pa_on)
{
#define DUMMY_PKT_LEN 20
- struct d11regs __iomem *regs = pi->regs;
+ struct bcma_device *core = pi->d11core;
int i, count;
u8 ofdmpkt[DUMMY_PKT_LEN] = {
0xcc, 0x01, 0x02, 0x00, 0x00, 0x00, 0xd4, 0x00, 0x00, 0x00,
@@ -1033,26 +1008,28 @@ void wlc_phy_do_dummy_tx(struct brcms_phy *pi, bool ofdm, bool pa_on)
wlapi_bmac_write_template_ram(pi->sh->physhim, 0, DUMMY_PKT_LEN,
dummypkt);
- W_REG(&regs->xmtsel, 0);
+ bcma_write16(core, D11REGOFFS(xmtsel), 0);
if (D11REV_GE(pi->sh->corerev, 11))
- W_REG(&regs->wepctl, 0x100);
+ bcma_write16(core, D11REGOFFS(wepctl), 0x100);
else
- W_REG(&regs->wepctl, 0);
+ bcma_write16(core, D11REGOFFS(wepctl), 0);
- W_REG(&regs->txe_phyctl, (ofdm ? 1 : 0) | PHY_TXC_ANT_0);
+ bcma_write16(core, D11REGOFFS(txe_phyctl),
+ (ofdm ? 1 : 0) | PHY_TXC_ANT_0);
if (ISNPHY(pi) || ISLCNPHY(pi))
- W_REG(&regs->txe_phyctl1, 0x1A02);
+ bcma_write16(core, D11REGOFFS(txe_phyctl1), 0x1A02);
- W_REG(&regs->txe_wm_0, 0);
- W_REG(&regs->txe_wm_1, 0);
+ bcma_write16(core, D11REGOFFS(txe_wm_0), 0);
+ bcma_write16(core, D11REGOFFS(txe_wm_1), 0);
- W_REG(&regs->xmttplatetxptr, 0);
- W_REG(&regs->xmttxcnt, DUMMY_PKT_LEN);
+ bcma_write16(core, D11REGOFFS(xmttplatetxptr), 0);
+ bcma_write16(core, D11REGOFFS(xmttxcnt), DUMMY_PKT_LEN);
- W_REG(&regs->xmtsel, ((8 << 8) | (1 << 5) | (1 << 2) | 2));
+ bcma_write16(core, D11REGOFFS(xmtsel),
+ ((8 << 8) | (1 << 5) | (1 << 2) | 2));
- W_REG(&regs->txe_ctl, 0);
+ bcma_write16(core, D11REGOFFS(txe_ctl), 0);
if (!pa_on) {
if (ISNPHY(pi))
@@ -1060,27 +1037,28 @@ void wlc_phy_do_dummy_tx(struct brcms_phy *pi, bool ofdm, bool pa_on)
}
if (ISNPHY(pi) || ISLCNPHY(pi))
- W_REG(&regs->txe_aux, 0xD0);
+ bcma_write16(core, D11REGOFFS(txe_aux), 0xD0);
else
- W_REG(&regs->txe_aux, ((1 << 5) | (1 << 4)));
+ bcma_write16(core, D11REGOFFS(txe_aux), ((1 << 5) | (1 << 4)));
- (void)R_REG(&regs->txe_aux);
+ (void)bcma_read16(core, D11REGOFFS(txe_aux));
i = 0;
count = ofdm ? 30 : 250;
while ((i++ < count)
- && (R_REG(&regs->txe_status) & (1 << 7)))
+ && (bcma_read16(core, D11REGOFFS(txe_status)) & (1 << 7)))
udelay(10);
i = 0;
- while ((i++ < 10)
- && ((R_REG(&regs->txe_status) & (1 << 10)) == 0))
+ while ((i++ < 10) &&
+ ((bcma_read16(core, D11REGOFFS(txe_status)) & (1 << 10)) == 0))
udelay(10);
i = 0;
- while ((i++ < 10) && ((R_REG(&regs->ifsstat) & (1 << 8))))
+ while ((i++ < 10) &&
+ ((bcma_read16(core, D11REGOFFS(ifsstat)) & (1 << 8))))
udelay(10);
if (!pa_on) {
@@ -1137,7 +1115,7 @@ static bool wlc_phy_cal_txpower_recalc_sw(struct brcms_phy *pi)
void wlc_phy_switch_radio(struct brcms_phy_pub *pih, bool on)
{
struct brcms_phy *pi = (struct brcms_phy *) pih;
- (void)R_REG(&pi->regs->maccontrol);
+ (void)bcma_read32(pi->d11core, D11REGOFFS(maccontrol));
if (ISNPHY(pi)) {
wlc_phy_switch_radio_nphy(pi, on);
@@ -1377,7 +1355,7 @@ void wlc_phy_txpower_target_set(struct brcms_phy_pub *ppi,
memcpy(&pi->tx_user_target[TXP_FIRST_MCS_40_SDM],
&txpwr->mcs_40_mimo[0], BRCMS_NUM_RATES_MCS_2_STREAM);
- if (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC)
+ if (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) & MCTL_EN_MAC)
mac_enabled = true;
if (mac_enabled)
@@ -1407,7 +1385,8 @@ int wlc_phy_txpower_set(struct brcms_phy_pub *ppi, uint qdbm, bool override)
if (!SCAN_INPROG_PHY(pi)) {
bool suspend;
- suspend = (0 == (R_REG(&pi->regs->maccontrol) &
+ suspend = (0 == (bcma_read32(pi->d11core,
+ D11REGOFFS(maccontrol)) &
MCTL_EN_MAC));
if (!suspend)
@@ -1860,18 +1839,17 @@ void wlc_phy_runbist_config(struct brcms_phy_pub *ppi, bool start_end)
if (NREV_IS(pi->pubpi.phy_rev, 3)
|| NREV_IS(pi->pubpi.phy_rev, 4)) {
- W_REG(&pi->regs->phyregaddr, 0xa0);
- (void)R_REG(&pi->regs->phyregaddr);
- rxc = R_REG(&pi->regs->phyregdata);
- W_REG(&pi->regs->phyregdata,
- (0x1 << 15) | rxc);
+ bcma_wflush16(pi->d11core, D11REGOFFS(phyregaddr),
+ 0xa0);
+ bcma_set16(pi->d11core, D11REGOFFS(phyregdata),
+ 0x1 << 15);
}
} else {
if (NREV_IS(pi->pubpi.phy_rev, 3)
|| NREV_IS(pi->pubpi.phy_rev, 4)) {
- W_REG(&pi->regs->phyregaddr, 0xa0);
- (void)R_REG(&pi->regs->phyregaddr);
- W_REG(&pi->regs->phyregdata, rxc);
+ bcma_wflush16(pi->d11core, D11REGOFFS(phyregaddr),
+ 0xa0);
+ bcma_write16(pi->d11core, D11REGOFFS(phyregdata), rxc);
}
wlc_phy_por_inform(ppi);
@@ -1991,7 +1969,9 @@ void wlc_phy_txpower_hw_ctrl_set(struct brcms_phy_pub *ppi, bool hwpwrctrl)
pi->txpwrctrl = hwpwrctrl;
if (ISNPHY(pi)) {
- suspend = (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC));
+ suspend = (0 == (bcma_read32(pi->d11core,
+ D11REGOFFS(maccontrol)) &
+ MCTL_EN_MAC));
if (!suspend)
wlapi_suspend_mac_and_wait(pi->sh->physhim);
@@ -2193,7 +2173,8 @@ void wlc_phy_ant_rxdiv_set(struct brcms_phy_pub *ppi, u8 val)
if (!pi->sh->clk)
return;
- suspend = (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC));
+ suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
+ MCTL_EN_MAC));
if (!suspend)
wlapi_suspend_mac_and_wait(pi->sh->physhim);
@@ -2411,8 +2392,8 @@ wlc_phy_noise_sample_request(struct brcms_phy_pub *pih, u8 reason, u8 ch)
wlapi_bmac_write_shm(pi->sh->physhim, M_PWRIND_MAP2, 0);
wlapi_bmac_write_shm(pi->sh->physhim, M_PWRIND_MAP3, 0);
- OR_REG(&pi->regs->maccommand,
- MCMD_BG_NOISE);
+ bcma_set32(pi->d11core, D11REGOFFS(maccommand),
+ MCMD_BG_NOISE);
} else {
wlapi_suspend_mac_and_wait(pi->sh->physhim);
wlc_lcnphy_deaf_mode(pi, (bool) 0);
@@ -2430,8 +2411,8 @@ wlc_phy_noise_sample_request(struct brcms_phy_pub *pih, u8 reason, u8 ch)
wlapi_bmac_write_shm(pi->sh->physhim, M_PWRIND_MAP2, 0);
wlapi_bmac_write_shm(pi->sh->physhim, M_PWRIND_MAP3, 0);
- OR_REG(&pi->regs->maccommand,
- MCMD_BG_NOISE);
+ bcma_set32(pi->d11core, D11REGOFFS(maccommand),
+ MCMD_BG_NOISE);
} else {
struct phy_iq_est est[PHY_CORE_MAX];
u32 cmplx_pwr[PHY_CORE_MAX];
@@ -2924,29 +2905,29 @@ void wlc_lcnphy_epa_switch(struct brcms_phy *pi, bool mode)
mod_phy_reg(pi, 0x44c, (0x1 << 2), (1) << 2);
}
- ai_corereg(pi->sh->sih, SI_CC_IDX,
- offsetof(struct chipcregs, gpiocontrol),
- ~0x0, 0x0);
- ai_corereg(pi->sh->sih, SI_CC_IDX,
- offsetof(struct chipcregs, gpioout), 0x40,
- 0x40);
- ai_corereg(pi->sh->sih, SI_CC_IDX,
- offsetof(struct chipcregs, gpioouten), 0x40,
- 0x40);
+ ai_cc_reg(pi->sh->sih,
+ offsetof(struct chipcregs, gpiocontrol),
+ ~0x0, 0x0);
+ ai_cc_reg(pi->sh->sih,
+ offsetof(struct chipcregs, gpioout),
+ 0x40, 0x40);
+ ai_cc_reg(pi->sh->sih,
+ offsetof(struct chipcregs, gpioouten),
+ 0x40, 0x40);
} else {
mod_phy_reg(pi, 0x44c, (0x1 << 2), (0) << 2);
mod_phy_reg(pi, 0x44d, (0x1 << 2), (0) << 2);
- ai_corereg(pi->sh->sih, SI_CC_IDX,
- offsetof(struct chipcregs, gpioout), 0x40,
- 0x00);
- ai_corereg(pi->sh->sih, SI_CC_IDX,
- offsetof(struct chipcregs, gpioouten), 0x40,
- 0x0);
- ai_corereg(pi->sh->sih, SI_CC_IDX,
- offsetof(struct chipcregs, gpiocontrol),
- ~0x0, 0x40);
+ ai_cc_reg(pi->sh->sih,
+ offsetof(struct chipcregs, gpioout),
+ 0x40, 0x00);
+ ai_cc_reg(pi->sh->sih,
+ offsetof(struct chipcregs, gpioouten),
+ 0x40, 0x0);
+ ai_cc_reg(pi->sh->sih,
+ offsetof(struct chipcregs, gpiocontrol),
+ ~0x0, 0x40);
}
}
}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_hal.h b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_hal.h
index 96e15163222..e34a71e7d24 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_hal.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_hal.h
@@ -166,7 +166,6 @@ struct shared_phy_params {
struct phy_shim_info *physhim;
uint unit;
uint corerev;
- uint buscorerev;
u16 vid;
u16 did;
uint chip;
@@ -175,7 +174,6 @@ struct shared_phy_params {
uint sromrev;
uint boardtype;
uint boardrev;
- uint boardvendor;
u32 boardflags;
u32 boardflags2;
};
@@ -183,7 +181,7 @@ struct shared_phy_params {
extern struct shared_phy *wlc_phy_shared_attach(struct shared_phy_params *shp);
extern struct brcms_phy_pub *wlc_phy_attach(struct shared_phy *sh,
- struct d11regs __iomem *regs,
+ struct bcma_device *d11core,
int bandtype, struct wiphy *wiphy);
extern void wlc_phy_detach(struct brcms_phy_pub *ppi);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
index 5f9478b1c99..af00e2c2b26 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
@@ -503,10 +503,8 @@ struct shared_phy {
uint sromrev;
uint boardtype;
uint boardrev;
- uint boardvendor;
u32 boardflags;
u32 boardflags2;
- uint buscorerev;
uint fast_timer;
uint slow_timer;
uint glacial_timer;
@@ -559,7 +557,7 @@ struct brcms_phy {
} u;
bool user_txpwr_at_rfport;
- struct d11regs __iomem *regs;
+ struct bcma_device *d11core;
struct brcms_phy *next;
struct brcms_phy_pub pubpi;
@@ -1090,7 +1088,7 @@ extern void wlc_phy_table_write_nphy(struct brcms_phy *pi, u32, u32, u32,
#define BRCMS_PHY_WAR_PR51571(pi) \
if (NREV_LT((pi)->pubpi.phy_rev, 3)) \
- (void)R_REG(&(pi)->regs->maccontrol)
+ (void)bcma_read32(pi->d11core, D11REGOFFS(maccontrol))
extern void wlc_phy_cal_perical_nphy_run(struct brcms_phy *pi, u8 caltype);
extern void wlc_phy_aci_reset_nphy(struct brcms_phy *pi);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c
index a63aa99d981..efa0142bdad 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c
@@ -2813,10 +2813,8 @@ static void wlc_lcnphy_idle_tssi_est(struct brcms_phy_pub *ppi)
u16 SAVE_jtag_auxpga = read_radio_reg(pi, RADIO_2064_REG0FF) & 0x10;
u16 SAVE_iqadc_aux_en = read_radio_reg(pi, RADIO_2064_REG11F) & 4;
idleTssi = read_phy_reg(pi, 0x4ab);
- suspend =
- (0 ==
- (R_REG(&((struct brcms_phy *) pi)->regs->maccontrol) &
- MCTL_EN_MAC));
+ suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
+ MCTL_EN_MAC));
if (!suspend)
wlapi_suspend_mac_and_wait(pi->sh->physhim);
wlc_lcnphy_set_tx_pwr_ctrl(pi, LCNPHY_TX_PWR_CTRL_OFF);
@@ -2890,7 +2888,8 @@ static void wlc_lcnphy_vbat_temp_sense_setup(struct brcms_phy *pi, u8 mode)
for (i = 0; i < 14; i++)
values_to_save[i] = read_phy_reg(pi, tempsense_phy_regs[i]);
- suspend = (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC));
+ suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
+ MCTL_EN_MAC));
if (!suspend)
wlapi_suspend_mac_and_wait(pi->sh->physhim);
save_txpwrCtrlEn = read_radio_reg(pi, 0x4a4);
@@ -3016,8 +3015,8 @@ static void wlc_lcnphy_tx_pwr_ctrl_init(struct brcms_phy_pub *ppi)
bool suspend;
struct brcms_phy *pi = (struct brcms_phy *) ppi;
- suspend =
- (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC));
+ suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
+ MCTL_EN_MAC));
if (!suspend)
wlapi_suspend_mac_and_wait(pi->sh->physhim);
@@ -3535,15 +3534,17 @@ wlc_lcnphy_samp_cap(struct brcms_phy *pi, int clip_detect_algo, u16 thresh,
timer = 0;
old_sslpnCalibClkEnCtrl = read_phy_reg(pi, 0x6da);
- curval1 = R_REG(&pi->regs->psm_corectlsts);
+ curval1 = bcma_read16(pi->d11core, D11REGOFFS(psm_corectlsts));
ptr[130] = 0;
- W_REG(&pi->regs->psm_corectlsts, ((1 << 6) | curval1));
+ bcma_write16(pi->d11core, D11REGOFFS(psm_corectlsts),
+ ((1 << 6) | curval1));
- W_REG(&pi->regs->smpl_clct_strptr, 0x7E00);
- W_REG(&pi->regs->smpl_clct_stpptr, 0x8000);
+ bcma_write16(pi->d11core, D11REGOFFS(smpl_clct_strptr), 0x7E00);
+ bcma_write16(pi->d11core, D11REGOFFS(smpl_clct_stpptr), 0x8000);
udelay(20);
- curval2 = R_REG(&pi->regs->psm_phy_hdr_param);
- W_REG(&pi->regs->psm_phy_hdr_param, curval2 | 0x30);
+ curval2 = bcma_read16(pi->d11core, D11REGOFFS(psm_phy_hdr_param));
+ bcma_write16(pi->d11core, D11REGOFFS(psm_phy_hdr_param),
+ curval2 | 0x30);
write_phy_reg(pi, 0x555, 0x0);
write_phy_reg(pi, 0x5a6, 0x5);
@@ -3560,19 +3561,19 @@ wlc_lcnphy_samp_cap(struct brcms_phy *pi, int clip_detect_algo, u16 thresh,
sslpnCalibClkEnCtrl = read_phy_reg(pi, 0x6da);
write_phy_reg(pi, 0x6da, (u32) (sslpnCalibClkEnCtrl | 0x2008));
- stpptr = R_REG(&pi->regs->smpl_clct_stpptr);
- curptr = R_REG(&pi->regs->smpl_clct_curptr);
+ stpptr = bcma_read16(pi->d11core, D11REGOFFS(smpl_clct_stpptr));
+ curptr = bcma_read16(pi->d11core, D11REGOFFS(smpl_clct_curptr));
do {
udelay(10);
- curptr = R_REG(&pi->regs->smpl_clct_curptr);
+ curptr = bcma_read16(pi->d11core, D11REGOFFS(smpl_clct_curptr));
timer++;
} while ((curptr != stpptr) && (timer < 500));
- W_REG(&pi->regs->psm_phy_hdr_param, 0x2);
+ bcma_write16(pi->d11core, D11REGOFFS(psm_phy_hdr_param), 0x2);
strptr = 0x7E00;
- W_REG(&pi->regs->tplatewrptr, strptr);
+ bcma_write32(pi->d11core, D11REGOFFS(tplatewrptr), strptr);
while (strptr < 0x8000) {
- val = R_REG(&pi->regs->tplatewrdata);
+ val = bcma_read32(pi->d11core, D11REGOFFS(tplatewrdata));
imag = ((val >> 16) & 0x3ff);
real = ((val) & 0x3ff);
if (imag > 511)
@@ -3597,8 +3598,8 @@ wlc_lcnphy_samp_cap(struct brcms_phy *pi, int clip_detect_algo, u16 thresh,
}
write_phy_reg(pi, 0x6da, old_sslpnCalibClkEnCtrl);
- W_REG(&pi->regs->psm_phy_hdr_param, curval2);
- W_REG(&pi->regs->psm_corectlsts, curval1);
+ bcma_write16(pi->d11core, D11REGOFFS(psm_phy_hdr_param), curval2);
+ bcma_write16(pi->d11core, D11REGOFFS(psm_corectlsts), curval1);
}
static void
@@ -3968,9 +3969,9 @@ s16 wlc_lcnphy_tempsense_new(struct brcms_phy *pi, bool mode)
bool suspend = 0;
if (mode == 1) {
- suspend =
- (0 ==
- (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC));
+ suspend = (0 == (bcma_read32(pi->d11core,
+ D11REGOFFS(maccontrol)) &
+ MCTL_EN_MAC));
if (!suspend)
wlapi_suspend_mac_and_wait(pi->sh->physhim);
wlc_lcnphy_vbat_temp_sense_setup(pi, TEMPSENSE);
@@ -4012,9 +4013,9 @@ u16 wlc_lcnphy_tempsense(struct brcms_phy *pi, bool mode)
struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
if (mode == 1) {
- suspend =
- (0 ==
- (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC));
+ suspend = (0 == (bcma_read32(pi->d11core,
+ D11REGOFFS(maccontrol)) &
+ MCTL_EN_MAC));
if (!suspend)
wlapi_suspend_mac_and_wait(pi->sh->physhim);
wlc_lcnphy_vbat_temp_sense_setup(pi, TEMPSENSE);
@@ -4078,9 +4079,9 @@ s8 wlc_lcnphy_vbatsense(struct brcms_phy *pi, bool mode)
bool suspend = 0;
if (mode == 1) {
- suspend =
- (0 ==
- (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC));
+ suspend = (0 == (bcma_read32(pi->d11core,
+ D11REGOFFS(maccontrol)) &
+ MCTL_EN_MAC));
if (!suspend)
wlapi_suspend_mac_and_wait(pi->sh->physhim);
wlc_lcnphy_vbat_temp_sense_setup(pi, VBATSENSE);
@@ -4127,8 +4128,8 @@ static void wlc_lcnphy_glacial_timer_based_cal(struct brcms_phy *pi)
s8 index;
u16 SAVE_pwrctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi);
struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
- suspend =
- (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC));
+ suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
+ MCTL_EN_MAC));
if (!suspend)
wlapi_suspend_mac_and_wait(pi->sh->physhim);
wlc_lcnphy_deaf_mode(pi, true);
@@ -4166,8 +4167,8 @@ static void wlc_lcnphy_periodic_cal(struct brcms_phy *pi)
pi_lcn->lcnphy_full_cal_channel = CHSPEC_CHANNEL(pi->radio_chanspec);
index = pi_lcn->lcnphy_current_index;
- suspend =
- (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC));
+ suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
+ MCTL_EN_MAC));
if (!suspend) {
wlapi_bmac_write_shm(pi->sh->physhim, M_CTS_DURATION, 10000);
wlapi_suspend_mac_and_wait(pi->sh->physhim);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c
index ec9b56639d5..a16f1ab292f 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c
@@ -17802,7 +17802,7 @@ static void wlc_phy_txpwrctrl_pwr_setup_nphy(struct brcms_phy *pi)
if (D11REV_IS(pi->sh->corerev, 11) || D11REV_IS(pi->sh->corerev, 12)) {
wlapi_bmac_mctrl(pi->sh->physhim, MCTL_PHYLOCK, MCTL_PHYLOCK);
- (void)R_REG(&pi->regs->maccontrol);
+ (void)bcma_read32(pi->d11core, D11REGOFFS(maccontrol));
udelay(1);
}
@@ -17953,7 +17953,7 @@ static void wlc_phy_txpwrctrl_pwr_setup_nphy(struct brcms_phy *pi)
if (D11REV_IS(pi->sh->corerev, 11) || D11REV_IS(pi->sh->corerev, 12)) {
wlapi_bmac_mctrl(pi->sh->physhim, MCTL_PHYLOCK, MCTL_PHYLOCK);
- (void)R_REG(&pi->regs->maccontrol);
+ (void)bcma_read32(pi->d11core, D11REGOFFS(maccontrol));
udelay(1);
}
@@ -19447,8 +19447,6 @@ void wlc_phy_init_nphy(struct brcms_phy *pi)
u8 tx_pwr_ctrl_state;
bool do_nphy_cal = false;
uint core;
- uint origidx, intr_val;
- struct d11regs __iomem *regs;
u32 d11_clk_ctl_st;
bool do_rssi_cal = false;
@@ -19462,25 +19460,21 @@ void wlc_phy_init_nphy(struct brcms_phy *pi)
(pi->sh->chippkg == BCM4718_PKG_ID))) {
if ((pi->sh->boardflags & BFL_EXTLNA) &&
(CHSPEC_IS2G(pi->radio_chanspec)))
- ai_corereg(pi->sh->sih, SI_CC_IDX,
- offsetof(struct chipcregs, chipcontrol),
- 0x40, 0x40);
+ ai_cc_reg(pi->sh->sih,
+ offsetof(struct chipcregs, chipcontrol),
+ 0x40, 0x40);
}
if ((pi->nphy_gband_spurwar2_en) && CHSPEC_IS2G(pi->radio_chanspec) &&
CHSPEC_IS40(pi->radio_chanspec)) {
- regs = (struct d11regs __iomem *)
- ai_switch_core(pi->sh->sih,
- D11_CORE_ID, &origidx,
- &intr_val);
- d11_clk_ctl_st = R_REG(&regs->clk_ctl_st);
- AND_REG(&regs->clk_ctl_st,
- ~(CCS_FORCEHT | CCS_HTAREQ));
+ d11_clk_ctl_st = bcma_read32(pi->d11core,
+ D11REGOFFS(clk_ctl_st));
+ bcma_mask32(pi->d11core, D11REGOFFS(clk_ctl_st),
+ ~(CCS_FORCEHT | CCS_HTAREQ));
- W_REG(&regs->clk_ctl_st, d11_clk_ctl_st);
-
- ai_restore_core(pi->sh->sih, origidx, intr_val);
+ bcma_write32(pi->d11core, D11REGOFFS(clk_ctl_st),
+ d11_clk_ctl_st);
}
pi->use_int_tx_iqlo_cal_nphy =
@@ -19885,7 +19879,8 @@ void wlc_phy_rxcore_setstate_nphy(struct brcms_phy_pub *pih, u8 rxcore_bitmask)
if (!pi->sh->clk)
return;
- suspend = (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC));
+ suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
+ MCTL_EN_MAC));
if (!suspend)
wlapi_suspend_mac_and_wait(pi->sh->physhim);
@@ -21263,28 +21258,28 @@ wlc_phy_chanspec_nphy_setup(struct brcms_phy *pi, u16 chanspec,
val = read_phy_reg(pi, 0x09) & NPHY_BandControl_currentBand;
if (CHSPEC_IS5G(chanspec) && !val) {
- val = R_REG(&pi->regs->psm_phy_hdr_param);
- W_REG(&pi->regs->psm_phy_hdr_param,
+ val = bcma_read16(pi->d11core, D11REGOFFS(psm_phy_hdr_param));
+ bcma_write16(pi->d11core, D11REGOFFS(psm_phy_hdr_param),
(val | MAC_PHY_FORCE_CLK));
or_phy_reg(pi, (NPHY_TO_BPHY_OFF + BPHY_BB_CONFIG),
(BBCFG_RESETCCA | BBCFG_RESETRX));
- W_REG(&pi->regs->psm_phy_hdr_param, val);
+ bcma_write16(pi->d11core, D11REGOFFS(psm_phy_hdr_param), val);
or_phy_reg(pi, 0x09, NPHY_BandControl_currentBand);
} else if (!CHSPEC_IS5G(chanspec) && val) {
and_phy_reg(pi, 0x09, ~NPHY_BandControl_currentBand);
- val = R_REG(&pi->regs->psm_phy_hdr_param);
- W_REG(&pi->regs->psm_phy_hdr_param,
+ val = bcma_read16(pi->d11core, D11REGOFFS(psm_phy_hdr_param));
+ bcma_write16(pi->d11core, D11REGOFFS(psm_phy_hdr_param),
(val | MAC_PHY_FORCE_CLK));
and_phy_reg(pi, (NPHY_TO_BPHY_OFF + BPHY_BB_CONFIG),
(u16) (~(BBCFG_RESETCCA | BBCFG_RESETRX)));
- W_REG(&pi->regs->psm_phy_hdr_param, val);
+ bcma_write16(pi->d11core, D11REGOFFS(psm_phy_hdr_param), val);
}
write_phy_reg(pi, 0x1ce, ci->PHY_BW1a);
@@ -21342,24 +21337,23 @@ wlc_phy_chanspec_nphy_setup(struct brcms_phy *pi, u16 chanspec,
spuravoid = 1;
wlapi_bmac_core_phypll_ctl(pi->sh->physhim, false);
- si_pmu_spuravoid(pi->sh->sih, spuravoid);
+ si_pmu_spuravoid_pllupdate(pi->sh->sih, spuravoid);
wlapi_bmac_core_phypll_ctl(pi->sh->physhim, true);
if ((pi->sh->chip == BCM43224_CHIP_ID) ||
(pi->sh->chip == BCM43225_CHIP_ID)) {
-
if (spuravoid == 1) {
-
- W_REG(&pi->regs->tsf_clk_frac_l,
- 0x5341);
- W_REG(&pi->regs->tsf_clk_frac_h,
- 0x8);
+ bcma_write16(pi->d11core,
+ D11REGOFFS(tsf_clk_frac_l),
+ 0x5341);
+ bcma_write16(pi->d11core,
+ D11REGOFFS(tsf_clk_frac_h), 0x8);
} else {
-
- W_REG(&pi->regs->tsf_clk_frac_l,
- 0x8889);
- W_REG(&pi->regs->tsf_clk_frac_h,
- 0x8);
+ bcma_write16(pi->d11core,
+ D11REGOFFS(tsf_clk_frac_l),
+ 0x8889);
+ bcma_write16(pi->d11core,
+ D11REGOFFS(tsf_clk_frac_h), 0x8);
}
}
@@ -21499,13 +21493,13 @@ void wlc_phy_antsel_init(struct brcms_phy_pub *ppi, bool lut_init)
ai_gpiocontrol(pi->sh->sih, mask, mask, GPIO_DRV_PRIORITY);
- mc = R_REG(&pi->regs->maccontrol);
+ mc = bcma_read32(pi->d11core, D11REGOFFS(maccontrol));
mc &= ~MCTL_GPOUT_SEL_MASK;
- W_REG(&pi->regs->maccontrol, mc);
+ bcma_write32(pi->d11core, D11REGOFFS(maccontrol), mc);
- OR_REG(&pi->regs->psm_gpio_oe, mask);
+ bcma_set16(pi->d11core, D11REGOFFS(psm_gpio_oe), mask);
- AND_REG(&pi->regs->psm_gpio_out, ~mask);
+ bcma_mask16(pi->d11core, D11REGOFFS(psm_gpio_out), ~mask);
if (lut_init) {
write_phy_reg(pi, 0xf8, 0x02d8);
@@ -21522,9 +21516,8 @@ u16 wlc_phy_classifier_nphy(struct brcms_phy *pi, u16 mask, u16 val)
bool suspended = false;
if (D11REV_IS(pi->sh->corerev, 16)) {
- suspended =
- (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC) ?
- false : true;
+ suspended = (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
+ MCTL_EN_MAC) ? false : true;
if (!suspended)
wlapi_suspend_mac_and_wait(pi->sh->physhim);
}
@@ -25383,7 +25376,8 @@ static void wlc_phy_a4(struct brcms_phy *pi, bool full_cal)
if (pi->nphy_papd_skip == 1)
return;
- phy_b3 = (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC));
+ phy_b3 = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
+ MCTL_EN_MAC));
if (!phy_b3)
wlapi_suspend_mac_and_wait(pi->sh->physhim);
@@ -28357,7 +28351,7 @@ void wlc_phy_txpower_recalc_target_nphy(struct brcms_phy *pi)
if (D11REV_IS(pi->sh->corerev, 11) || D11REV_IS(pi->sh->corerev, 12)) {
wlapi_bmac_mctrl(pi->sh->physhim, MCTL_PHYLOCK, MCTL_PHYLOCK);
- (void)R_REG(&pi->regs->maccontrol);
+ (void)bcma_read32(pi->d11core, D11REGOFFS(maccontrol));
udelay(1);
}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/pmu.c b/drivers/net/wireless/brcm80211/brcmsmac/pmu.c
index 12ba575f578..4931d29d077 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/pmu.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/pmu.c
@@ -115,10 +115,10 @@ static void si_pmu_res_masks(struct si_pub *sih, u32 * pmin, u32 * pmax)
uint rsrcs;
/* # resources */
- rsrcs = (sih->pmucaps & PCAP_RC_MASK) >> PCAP_RC_SHIFT;
+ rsrcs = (ai_get_pmucaps(sih) & PCAP_RC_MASK) >> PCAP_RC_SHIFT;
/* determine min/max rsrc masks */
- switch (sih->chip) {
+ switch (ai_get_chip_id(sih)) {
case BCM43224_CHIP_ID:
case BCM43225_CHIP_ID:
/* ??? */
@@ -139,75 +139,84 @@ static void si_pmu_res_masks(struct si_pub *sih, u32 * pmin, u32 * pmax)
*pmax = max_mask;
}
-static void
-si_pmu_spuravoid_pllupdate(struct si_pub *sih, struct chipcregs __iomem *cc,
- u8 spuravoid)
+void si_pmu_spuravoid_pllupdate(struct si_pub *sih, u8 spuravoid)
{
u32 tmp = 0;
+ struct bcma_device *core;
- switch (sih->chip) {
+ /* switch to chipc */
+ core = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0);
+
+ switch (ai_get_chip_id(sih)) {
case BCM43224_CHIP_ID:
case BCM43225_CHIP_ID:
if (spuravoid == 1) {
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL0);
- W_REG(&cc->pllcontrol_data, 0x11500010);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL1);
- W_REG(&cc->pllcontrol_data, 0x000C0C06);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL2);
- W_REG(&cc->pllcontrol_data, 0x0F600a08);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL3);
- W_REG(&cc->pllcontrol_data, 0x00000000);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL4);
- W_REG(&cc->pllcontrol_data, 0x2001E920);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL5);
- W_REG(&cc->pllcontrol_data, 0x88888815);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
+ PMU1_PLL0_PLLCTL0);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
+ 0x11500010);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
+ PMU1_PLL0_PLLCTL1);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
+ 0x000C0C06);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
+ PMU1_PLL0_PLLCTL2);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
+ 0x0F600a08);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
+ PMU1_PLL0_PLLCTL3);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
+ 0x00000000);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
+ PMU1_PLL0_PLLCTL4);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
+ 0x2001E920);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
+ PMU1_PLL0_PLLCTL5);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
+ 0x88888815);
} else {
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL0);
- W_REG(&cc->pllcontrol_data, 0x11100010);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL1);
- W_REG(&cc->pllcontrol_data, 0x000c0c06);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL2);
- W_REG(&cc->pllcontrol_data, 0x03000a08);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL3);
- W_REG(&cc->pllcontrol_data, 0x00000000);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL4);
- W_REG(&cc->pllcontrol_data, 0x200005c0);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL5);
- W_REG(&cc->pllcontrol_data, 0x88888815);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
+ PMU1_PLL0_PLLCTL0);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
+ 0x11100010);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
+ PMU1_PLL0_PLLCTL1);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
+ 0x000c0c06);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
+ PMU1_PLL0_PLLCTL2);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
+ 0x03000a08);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
+ PMU1_PLL0_PLLCTL3);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
+ 0x00000000);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
+ PMU1_PLL0_PLLCTL4);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
+ 0x200005c0);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
+ PMU1_PLL0_PLLCTL5);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
+ 0x88888815);
}
tmp = 1 << 10;
break;
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL0);
- W_REG(&cc->pllcontrol_data, 0x11100008);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL1);
- W_REG(&cc->pllcontrol_data, 0x0c000c06);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL2);
- W_REG(&cc->pllcontrol_data, 0x03000a08);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL3);
- W_REG(&cc->pllcontrol_data, 0x00000000);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL4);
- W_REG(&cc->pllcontrol_data, 0x200005c0);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL5);
- W_REG(&cc->pllcontrol_data, 0x88888855);
-
- tmp = 1 << 10;
- break;
-
default:
/* bail out */
return;
}
- tmp |= R_REG(&cc->pmucontrol);
- W_REG(&cc->pmucontrol, tmp);
+ bcma_set32(core, CHIPCREGOFFS(pmucontrol), tmp);
}
u16 si_pmu_fast_pwrup_delay(struct si_pub *sih)
{
uint delay = PMU_MAX_TRANSITION_DLY;
- switch (sih->chip) {
+ switch (ai_get_chip_id(sih)) {
case BCM43224_CHIP_ID:
case BCM43225_CHIP_ID:
case BCM4313_CHIP_ID:
@@ -220,54 +229,35 @@ u16 si_pmu_fast_pwrup_delay(struct si_pub *sih)
return (u16) delay;
}
-void si_pmu_sprom_enable(struct si_pub *sih, bool enable)
-{
- struct chipcregs __iomem *cc;
- uint origidx;
-
- /* Remember original core before switch to chipc */
- origidx = ai_coreidx(sih);
- cc = ai_setcoreidx(sih, SI_CC_IDX);
-
- /* Return to original core */
- ai_setcoreidx(sih, origidx);
-}
-
/* Read/write a chipcontrol reg */
u32 si_pmu_chipcontrol(struct si_pub *sih, uint reg, u32 mask, u32 val)
{
- ai_corereg(sih, SI_CC_IDX, offsetof(struct chipcregs, chipcontrol_addr),
- ~0, reg);
- return ai_corereg(sih, SI_CC_IDX,
- offsetof(struct chipcregs, chipcontrol_data), mask,
- val);
+ ai_cc_reg(sih, offsetof(struct chipcregs, chipcontrol_addr), ~0, reg);
+ return ai_cc_reg(sih, offsetof(struct chipcregs, chipcontrol_data),
+ mask, val);
}
/* Read/write a regcontrol reg */
u32 si_pmu_regcontrol(struct si_pub *sih, uint reg, u32 mask, u32 val)
{
- ai_corereg(sih, SI_CC_IDX, offsetof(struct chipcregs, regcontrol_addr),
- ~0, reg);
- return ai_corereg(sih, SI_CC_IDX,
- offsetof(struct chipcregs, regcontrol_data), mask,
- val);
+ ai_cc_reg(sih, offsetof(struct chipcregs, regcontrol_addr), ~0, reg);
+ return ai_cc_reg(sih, offsetof(struct chipcregs, regcontrol_data),
+ mask, val);
}
/* Read/write a pllcontrol reg */
u32 si_pmu_pllcontrol(struct si_pub *sih, uint reg, u32 mask, u32 val)
{
- ai_corereg(sih, SI_CC_IDX, offsetof(struct chipcregs, pllcontrol_addr),
- ~0, reg);
- return ai_corereg(sih, SI_CC_IDX,
- offsetof(struct chipcregs, pllcontrol_data), mask,
- val);
+ ai_cc_reg(sih, offsetof(struct chipcregs, pllcontrol_addr), ~0, reg);
+ return ai_cc_reg(sih, offsetof(struct chipcregs, pllcontrol_data),
+ mask, val);
}
/* PMU PLL update */
void si_pmu_pllupd(struct si_pub *sih)
{
- ai_corereg(sih, SI_CC_IDX, offsetof(struct chipcregs, pmucontrol),
- PCTL_PLL_PLLCTL_UPD, PCTL_PLL_PLLCTL_UPD);
+ ai_cc_reg(sih, offsetof(struct chipcregs, pmucontrol),
+ PCTL_PLL_PLLCTL_UPD, PCTL_PLL_PLLCTL_UPD);
}
/* query alp/xtal clock frequency */
@@ -276,10 +266,10 @@ u32 si_pmu_alp_clock(struct si_pub *sih)
u32 clock = ALP_CLOCK;
/* bail out with default */
- if (!(sih->cccaps & CC_CAP_PMU))
+ if (!(ai_get_cccaps(sih) & CC_CAP_PMU))
return clock;
- switch (sih->chip) {
+ switch (ai_get_chip_id(sih)) {
case BCM43224_CHIP_ID:
case BCM43225_CHIP_ID:
case BCM4313_CHIP_ID:
@@ -293,95 +283,29 @@ u32 si_pmu_alp_clock(struct si_pub *sih)
return clock;
}
-void si_pmu_spuravoid(struct si_pub *sih, u8 spuravoid)
-{
- struct chipcregs __iomem *cc;
- uint origidx, intr_val;
-
- /* Remember original core before switch to chipc */
- cc = (struct chipcregs __iomem *)
- ai_switch_core(sih, CC_CORE_ID, &origidx, &intr_val);
-
- /* update the pll changes */
- si_pmu_spuravoid_pllupdate(sih, cc, spuravoid);
-
- /* Return to original core */
- ai_restore_core(sih, origidx, intr_val);
-}
-
/* initialize PMU */
void si_pmu_init(struct si_pub *sih)
{
- struct chipcregs __iomem *cc;
- uint origidx;
+ struct bcma_device *core;
- /* Remember original core before switch to chipc */
- origidx = ai_coreidx(sih);
- cc = ai_setcoreidx(sih, SI_CC_IDX);
-
- if (sih->pmurev == 1)
- AND_REG(&cc->pmucontrol, ~PCTL_NOILP_ON_WAIT);
- else if (sih->pmurev >= 2)
- OR_REG(&cc->pmucontrol, PCTL_NOILP_ON_WAIT);
+ /* select chipc */
+ core = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0);
- /* Return to original core */
- ai_setcoreidx(sih, origidx);
-}
-
-/* initialize PMU chip controls and other chip level stuff */
-void si_pmu_chip_init(struct si_pub *sih)
-{
- uint origidx;
-
- /* Gate off SPROM clock and chip select signals */
- si_pmu_sprom_enable(sih, false);
-
- /* Remember original core */
- origidx = ai_coreidx(sih);
-
- /* Return to original core */
- ai_setcoreidx(sih, origidx);
-}
-
-/* initialize PMU switch/regulators */
-void si_pmu_swreg_init(struct si_pub *sih)
-{
-}
-
-/* initialize PLL */
-void si_pmu_pll_init(struct si_pub *sih, uint xtalfreq)
-{
- struct chipcregs __iomem *cc;
- uint origidx;
-
- /* Remember original core before switch to chipc */
- origidx = ai_coreidx(sih);
- cc = ai_setcoreidx(sih, SI_CC_IDX);
-
- switch (sih->chip) {
- case BCM4313_CHIP_ID:
- case BCM43224_CHIP_ID:
- case BCM43225_CHIP_ID:
- /* ??? */
- break;
- default:
- break;
- }
-
- /* Return to original core */
- ai_setcoreidx(sih, origidx);
+ if (ai_get_pmurev(sih) == 1)
+ bcma_mask32(core, CHIPCREGOFFS(pmucontrol),
+ ~PCTL_NOILP_ON_WAIT);
+ else if (ai_get_pmurev(sih) >= 2)
+ bcma_set32(core, CHIPCREGOFFS(pmucontrol), PCTL_NOILP_ON_WAIT);
}
/* initialize PMU resources */
void si_pmu_res_init(struct si_pub *sih)
{
- struct chipcregs __iomem *cc;
- uint origidx;
+ struct bcma_device *core;
u32 min_mask = 0, max_mask = 0;
- /* Remember original core before switch to chipc */
- origidx = ai_coreidx(sih);
- cc = ai_setcoreidx(sih, SI_CC_IDX);
+ /* select to chipc */
+ core = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0);
/* Determine min/max rsrc masks */
si_pmu_res_masks(sih, &min_mask, &max_mask);
@@ -391,55 +315,50 @@ void si_pmu_res_init(struct si_pub *sih)
/* Program max resource mask */
if (max_mask)
- W_REG(&cc->max_res_mask, max_mask);
+ bcma_write32(core, CHIPCREGOFFS(max_res_mask), max_mask);
/* Program min resource mask */
if (min_mask)
- W_REG(&cc->min_res_mask, min_mask);
+ bcma_write32(core, CHIPCREGOFFS(min_res_mask), min_mask);
/* Add some delay; allow resources to come up and settle. */
mdelay(2);
-
- /* Return to original core */
- ai_setcoreidx(sih, origidx);
}
u32 si_pmu_measure_alpclk(struct si_pub *sih)
{
- struct chipcregs __iomem *cc;
- uint origidx;
+ struct bcma_device *core;
u32 alp_khz;
- if (sih->pmurev < 10)
+ if (ai_get_pmurev(sih) < 10)
return 0;
/* Remember original core before switch to chipc */
- origidx = ai_coreidx(sih);
- cc = ai_setcoreidx(sih, SI_CC_IDX);
+ core = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0);
- if (R_REG(&cc->pmustatus) & PST_EXTLPOAVAIL) {
+ if (bcma_read32(core, CHIPCREGOFFS(pmustatus)) & PST_EXTLPOAVAIL) {
u32 ilp_ctr, alp_hz;
/*
* Enable the reg to measure the freq,
* in case it was disabled before
*/
- W_REG(&cc->pmu_xtalfreq,
- 1U << PMU_XTALFREQ_REG_MEASURE_SHIFT);
+ bcma_write32(core, CHIPCREGOFFS(pmu_xtalfreq),
+ 1U << PMU_XTALFREQ_REG_MEASURE_SHIFT);
/* Delay for well over 4 ILP clocks */
udelay(1000);
/* Read the latched number of ALP ticks per 4 ILP ticks */
- ilp_ctr =
- R_REG(&cc->pmu_xtalfreq) & PMU_XTALFREQ_REG_ILPCTR_MASK;
+ ilp_ctr = bcma_read32(core, CHIPCREGOFFS(pmu_xtalfreq)) &
+ PMU_XTALFREQ_REG_ILPCTR_MASK;
/*
* Turn off the PMU_XTALFREQ_REG_MEASURE_SHIFT
* bit to save power
*/
- W_REG(&cc->pmu_xtalfreq, 0);
+ bcma_write32(core, CHIPCREGOFFS(pmu_xtalfreq), 0);
/* Calculate ALP frequency */
alp_hz = (ilp_ctr * EXT_ILP_HZ) / 4;
@@ -452,8 +371,5 @@ u32 si_pmu_measure_alpclk(struct si_pub *sih)
} else
alp_khz = 0;
- /* Return to original core */
- ai_setcoreidx(sih, origidx);
-
return alp_khz;
}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/pmu.h b/drivers/net/wireless/brcm80211/brcmsmac/pmu.h
index 3a08c620640..3e39c5e0f9f 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/pmu.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/pmu.h
@@ -26,13 +26,10 @@ extern u32 si_pmu_chipcontrol(struct si_pub *sih, uint reg, u32 mask, u32 val);
extern u32 si_pmu_regcontrol(struct si_pub *sih, uint reg, u32 mask, u32 val);
extern u32 si_pmu_alp_clock(struct si_pub *sih);
extern void si_pmu_pllupd(struct si_pub *sih);
-extern void si_pmu_spuravoid(struct si_pub *sih, u8 spuravoid);
+extern void si_pmu_spuravoid_pllupdate(struct si_pub *sih, u8 spuravoid);
extern u32 si_pmu_pllcontrol(struct si_pub *sih, uint reg, u32 mask, u32 val);
extern void si_pmu_init(struct si_pub *sih);
-extern void si_pmu_chip_init(struct si_pub *sih);
-extern void si_pmu_pll_init(struct si_pub *sih, u32 xtalfreq);
extern void si_pmu_res_init(struct si_pub *sih);
-extern void si_pmu_swreg_init(struct si_pub *sih);
extern u32 si_pmu_measure_alpclk(struct si_pub *sih);
#endif /* _BRCM_PMU_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/pub.h b/drivers/net/wireless/brcm80211/brcmsmac/pub.h
index 21ccf3a0398..f0038ad7d7b 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/pub.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/pub.h
@@ -17,6 +17,7 @@
#ifndef _BRCM_PUB_H_
#define _BRCM_PUB_H_
+#include <linux/bcma/bcma.h>
#include <brcmu_wifi.h>
#include "types.h"
#include "defs.h"
@@ -530,9 +531,8 @@ struct brcms_antselcfg {
/* common functions for every port */
extern struct brcms_c_info *
-brcms_c_attach(struct brcms_info *wl, u16 vendor, u16 device, uint unit,
- bool piomode, void __iomem *regsva, struct pci_dev *btparam,
- uint *perr);
+brcms_c_attach(struct brcms_info *wl, struct bcma_device *core, uint unit,
+ bool piomode, uint *perr);
extern uint brcms_c_detach(struct brcms_c_info *wlc);
extern int brcms_c_up(struct brcms_c_info *wlc);
extern uint brcms_c_down(struct brcms_c_info *wlc);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/srom.c b/drivers/net/wireless/brcm80211/brcmsmac/srom.c
index b6987ea9fc6..61092156755 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/srom.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/srom.c
@@ -586,17 +586,6 @@ static const struct brcms_sromvar perpath_pci_sromvars[] = {
* shared between devices. */
static u8 brcms_srom_crc8_table[CRC8_TABLE_SIZE];
-static u8 __iomem *
-srom_window_address(struct si_pub *sih, u8 __iomem *curmap)
-{
- if (sih->ccrev < 32)
- return curmap + PCI_BAR0_SPROM_OFFSET;
- if (sih->cccaps & CC_CAP_SROM)
- return curmap + PCI_16KB0_CCREGS_OFFSET + CC_SROM_OTP;
-
- return NULL;
-}
-
static uint mask_shift(u16 mask)
{
uint i;
@@ -779,17 +768,27 @@ _initvars_srom_pci(u8 sromrev, u16 *srom, struct list_head *var_list)
* Return 0 on success, nonzero on error.
*/
static int
-sprom_read_pci(struct si_pub *sih, u8 __iomem *sprom, uint wordoff,
- u16 *buf, uint nwords, bool check_crc)
+sprom_read_pci(struct si_pub *sih, u16 *buf, uint nwords, bool check_crc)
{
int err = 0;
uint i;
u8 *bbuf = (u8 *)buf; /* byte buffer */
uint nbytes = nwords << 1;
+ struct bcma_device *core;
+ uint sprom_offset;
+
+ /* determine core to read */
+ if (ai_get_ccrev(sih) < 32) {
+ core = ai_findcore(sih, BCMA_CORE_80211, 0);
+ sprom_offset = PCI_BAR0_SPROM_OFFSET;
+ } else {
+ core = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0);
+ sprom_offset = CHIPCREGOFFS(sromotp);
+ }
/* read the sprom in bytes */
for (i = 0; i < nbytes; i++)
- bbuf[i] = readb(sprom+i);
+ bbuf[i] = bcma_read8(core, sprom_offset+i);
if (buf[0] == 0xffff)
/*
@@ -851,10 +850,9 @@ static int otp_read_pci(struct si_pub *sih, u16 *buf, uint nwords)
* Initialize nonvolatile variable table from sprom.
* Return 0 on success, nonzero on error.
*/
-static int initvars_srom_pci(struct si_pub *sih, void __iomem *curmap)
+int srom_var_init(struct si_pub *sih)
{
u16 *srom;
- u8 __iomem *sromwindow;
u8 sromrev = 0;
u32 sr;
int err = 0;
@@ -866,12 +864,9 @@ static int initvars_srom_pci(struct si_pub *sih, void __iomem *curmap)
if (!srom)
return -ENOMEM;
- sromwindow = srom_window_address(sih, curmap);
-
crc8_populate_lsb(brcms_srom_crc8_table, SROM_CRC8_POLY);
if (ai_is_sprom_available(sih)) {
- err = sprom_read_pci(sih, sromwindow, 0, srom,
- SROM4_WORDS, true);
+ err = sprom_read_pci(sih, srom, SROM4_WORDS, true);
if (err == 0)
/* srom read and passed crc */
@@ -921,21 +916,6 @@ void srom_free_vars(struct si_pub *sih)
kfree(entry);
}
}
-/*
- * Initialize local vars from the right source for this platform.
- * Return 0 on success, nonzero on error.
- */
-int srom_var_init(struct si_pub *sih, void __iomem *curmap)
-{
- uint len;
-
- len = 0;
-
- if (curmap != NULL)
- return initvars_srom_pci(sih, curmap);
-
- return -EINVAL;
-}
/*
* Search the name=value vars for a specific one and return its value.
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/srom.h b/drivers/net/wireless/brcm80211/brcmsmac/srom.h
index c81df9798e5..f2a58f262c9 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/srom.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/srom.h
@@ -20,7 +20,7 @@
#include "types.h"
/* Prototypes */
-extern int srom_var_init(struct si_pub *sih, void __iomem *curmap);
+extern int srom_var_init(struct si_pub *sih);
extern void srom_free_vars(struct si_pub *sih);
extern int srom_read(struct si_pub *sih, uint bus, void *curmap,
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/types.h b/drivers/net/wireless/brcm80211/brcmsmac/types.h
index 27a814b0746..e11ae83111e 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/types.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/types.h
@@ -250,66 +250,18 @@ do { \
wiphy_err(dev, "%s: " fmt, __func__, ##args); \
} while (0)
-/*
- * Register access macros.
- *
- * These macro's take a pointer to the address to read as one of their
- * arguments. The macro itself deduces the size of the IO transaction (u8, u16
- * or u32). Advantage of this approach in combination with using a struct to
- * define the registers in a register block, is that access size and access
- * location are defined in only one spot. This reduces the risk of the
- * programmer trying to use an unsupported transaction size on a register.
- *
- */
-
-#define R_REG(r) \
- ({ \
- __typeof(*(r)) __osl_v; \
- switch (sizeof(*(r))) { \
- case sizeof(u8): \
- __osl_v = readb((u8 __iomem *)(r)); \
- break; \
- case sizeof(u16): \
- __osl_v = readw((u16 __iomem *)(r)); \
- break; \
- case sizeof(u32): \
- __osl_v = readl((u32 __iomem *)(r)); \
- break; \
- } \
- __osl_v; \
- })
-
-#define W_REG(r, v) do { \
- switch (sizeof(*(r))) { \
- case sizeof(u8): \
- writeb((u8)((v) & 0xFF), (u8 __iomem *)(r)); \
- break; \
- case sizeof(u16): \
- writew((u16)((v) & 0xFFFF), (u16 __iomem *)(r)); \
- break; \
- case sizeof(u32): \
- writel((u32)(v), (u32 __iomem *)(r)); \
- break; \
- } \
- } while (0)
-
#ifdef CONFIG_BCM47XX
/*
* bcm4716 (which includes 4717 & 4718), plus 4706 on PCIe can reorder
* transactions. As a fix, a read after write is performed on certain places
* in the code. Older chips and the newer 5357 family don't require this fix.
*/
-#define W_REG_FLUSH(r, v) ({ W_REG((r), (v)); (void)R_REG(r); })
+#define bcma_wflush16(c, o, v) \
+ ({ bcma_write16(c, o, v); (void)bcma_read16(c, o); })
#else
-#define W_REG_FLUSH(r, v) W_REG((r), (v))
+#define bcma_wflush16(c, o, v) bcma_write16(c, o, v)
#endif /* CONFIG_BCM47XX */
-#define AND_REG(r, v) W_REG((r), R_REG(r) & (v))
-#define OR_REG(r, v) W_REG((r), R_REG(r) | (v))
-
-#define SET_REG(r, mask, val) \
- W_REG((r), ((R_REG(r) & ~(mask)) | (val)))
-
/* multi-bool data type: set of bools, mbool is true if any is set */
/* set one bool */
diff --git a/drivers/net/wireless/brcm80211/include/chipcommon.h b/drivers/net/wireless/brcm80211/include/chipcommon.h
index fefabc39e64..f96834a7c05 100644
--- a/drivers/net/wireless/brcm80211/include/chipcommon.h
+++ b/drivers/net/wireless/brcm80211/include/chipcommon.h
@@ -19,6 +19,8 @@
#include "defs.h" /* for PAD macro */
+#define CHIPCREGOFFS(field) offsetof(struct chipcregs, field)
+
struct chipcregs {
u32 chipid; /* 0x0 */
u32 capabilities;
diff --git a/drivers/net/wireless/hostap/hostap_80211_rx.c b/drivers/net/wireless/hostap/hostap_80211_rx.c
index e0b3e8d406b..df7050abe71 100644
--- a/drivers/net/wireless/hostap/hostap_80211_rx.c
+++ b/drivers/net/wireless/hostap/hostap_80211_rx.c
@@ -1,5 +1,6 @@
#include <linux/etherdevice.h>
#include <linux/slab.h>
+#include <linux/export.h>
#include <net/lib80211.h>
#include <linux/if_arp.h>
diff --git a/drivers/net/wireless/hostap/hostap_80211_tx.c b/drivers/net/wireless/hostap/hostap_80211_tx.c
index c34a3b7f129..344a981a052 100644
--- a/drivers/net/wireless/hostap/hostap_80211_tx.c
+++ b/drivers/net/wireless/hostap/hostap_80211_tx.c
@@ -1,4 +1,5 @@
#include <linux/slab.h>
+#include <linux/export.h>
#include "hostap_80211.h"
#include "hostap_common.h"
diff --git a/drivers/net/wireless/hostap/hostap_ap.c b/drivers/net/wireless/hostap/hostap_ap.c
index 3d05dc15c6b..e1f41027724 100644
--- a/drivers/net/wireless/hostap/hostap_ap.c
+++ b/drivers/net/wireless/hostap/hostap_ap.c
@@ -21,6 +21,8 @@
#include <linux/random.h>
#include <linux/if_arp.h>
#include <linux/slab.h>
+#include <linux/export.h>
+#include <linux/moduleparam.h>
#include "hostap_wlan.h"
#include "hostap.h"
diff --git a/drivers/net/wireless/hostap/hostap_cs.c b/drivers/net/wireless/hostap/hostap_cs.c
index 5441ad19511..89e9d3a78c3 100644
--- a/drivers/net/wireless/hostap/hostap_cs.c
+++ b/drivers/net/wireless/hostap/hostap_cs.c
@@ -656,6 +656,9 @@ static const struct pcmcia_device_id hostap_cs_ids[] = {
"Addtron", "AWP-100 Wireless PCMCIA", "Version 01.02",
0xe6ec52ce, 0x08649af2, 0x4b74baa0),
PCMCIA_DEVICE_PROD_ID123(
+ "Canon", "Wireless LAN CF Card K30225", "Version 01.00",
+ 0x96ef6fe2, 0x263fcbab, 0xa57adb8c),
+ PCMCIA_DEVICE_PROD_ID123(
"D", "Link DWL-650 11Mbps WLAN Card", "Version 01.02",
0x71b18589, 0xb6f1b0ab, 0x4b74baa0),
PCMCIA_DEVICE_PROD_ID123(
diff --git a/drivers/net/wireless/hostap/hostap_info.c b/drivers/net/wireless/hostap/hostap_info.c
index d737091cf6a..47932b28aac 100644
--- a/drivers/net/wireless/hostap/hostap_info.c
+++ b/drivers/net/wireless/hostap/hostap_info.c
@@ -3,6 +3,7 @@
#include <linux/if_arp.h>
#include <linux/sched.h>
#include <linux/slab.h>
+#include <linux/export.h>
#include "hostap_wlan.h"
#include "hostap.h"
#include "hostap_ap.h"
diff --git a/drivers/net/wireless/hostap/hostap_ioctl.c b/drivers/net/wireless/hostap/hostap_ioctl.c
index 46a8c291c08..18054d9c668 100644
--- a/drivers/net/wireless/hostap/hostap_ioctl.c
+++ b/drivers/net/wireless/hostap/hostap_ioctl.c
@@ -5,6 +5,7 @@
#include <linux/sched.h>
#include <linux/ethtool.h>
#include <linux/if_arp.h>
+#include <linux/module.h>
#include <net/lib80211.h>
#include "hostap_wlan.h"
diff --git a/drivers/net/wireless/hostap/hostap_proc.c b/drivers/net/wireless/hostap/hostap_proc.c
index 005ff25a405..75ef8f04aab 100644
--- a/drivers/net/wireless/hostap/hostap_proc.c
+++ b/drivers/net/wireless/hostap/hostap_proc.c
@@ -2,6 +2,7 @@
#include <linux/types.h>
#include <linux/proc_fs.h>
+#include <linux/export.h>
#include <net/lib80211.h>
#include "hostap_wlan.h"
diff --git a/drivers/net/wireless/iwlegacy/3945-debug.c b/drivers/net/wireless/iwlegacy/3945-debug.c
new file mode 100644
index 00000000000..5e1a19fd354
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/3945-debug.c
@@ -0,0 +1,505 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *****************************************************************************/
+
+#include "common.h"
+#include "3945.h"
+
+static int
+il3945_stats_flag(struct il_priv *il, char *buf, int bufsz)
+{
+ int p = 0;
+
+ p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n",
+ le32_to_cpu(il->_3945.stats.flag));
+ if (le32_to_cpu(il->_3945.stats.flag) & UCODE_STATS_CLEAR_MSK)
+ p += scnprintf(buf + p, bufsz - p,
+ "\tStatistics have been cleared\n");
+ p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n",
+ (le32_to_cpu(il->_3945.stats.flag) &
+ UCODE_STATS_FREQUENCY_MSK) ? "2.4 GHz" : "5.2 GHz");
+ p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n",
+ (le32_to_cpu(il->_3945.stats.flag) &
+ UCODE_STATS_NARROW_BAND_MSK) ? "enabled" : "disabled");
+ return p;
+}
+
+ssize_t
+il3945_ucode_rx_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ int pos = 0;
+ char *buf;
+ int bufsz =
+ sizeof(struct iwl39_stats_rx_phy) * 40 +
+ sizeof(struct iwl39_stats_rx_non_phy) * 40 + 400;
+ ssize_t ret;
+ struct iwl39_stats_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm;
+ struct iwl39_stats_rx_phy *cck, *accum_cck, *delta_cck, *max_cck;
+ struct iwl39_stats_rx_non_phy *general, *accum_general;
+ struct iwl39_stats_rx_non_phy *delta_general, *max_general;
+
+ if (!il_is_alive(il))
+ return -EAGAIN;
+
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf) {
+ IL_ERR("Can not allocate Buffer\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * The statistic information display here is based on
+ * the last stats notification from uCode
+ * might not reflect the current uCode activity
+ */
+ ofdm = &il->_3945.stats.rx.ofdm;
+ cck = &il->_3945.stats.rx.cck;
+ general = &il->_3945.stats.rx.general;
+ accum_ofdm = &il->_3945.accum_stats.rx.ofdm;
+ accum_cck = &il->_3945.accum_stats.rx.cck;
+ accum_general = &il->_3945.accum_stats.rx.general;
+ delta_ofdm = &il->_3945.delta_stats.rx.ofdm;
+ delta_cck = &il->_3945.delta_stats.rx.cck;
+ delta_general = &il->_3945.delta_stats.rx.general;
+ max_ofdm = &il->_3945.max_delta.rx.ofdm;
+ max_cck = &il->_3945.max_delta.rx.cck;
+ max_general = &il->_3945.max_delta.rx.general;
+
+ pos += il3945_stats_flag(il, buf, bufsz);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "%-32s current"
+ "acumulative delta max\n",
+ "Statistics_Rx - OFDM:");
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "ina_cnt:",
+ le32_to_cpu(ofdm->ina_cnt), accum_ofdm->ina_cnt,
+ delta_ofdm->ina_cnt, max_ofdm->ina_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "fina_cnt:",
+ le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt,
+ delta_ofdm->fina_cnt, max_ofdm->fina_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "plcp_err:",
+ le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err,
+ delta_ofdm->plcp_err, max_ofdm->plcp_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "crc32_err:",
+ le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err,
+ delta_ofdm->crc32_err, max_ofdm->crc32_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "overrun_err:",
+ le32_to_cpu(ofdm->overrun_err), accum_ofdm->overrun_err,
+ delta_ofdm->overrun_err, max_ofdm->overrun_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "early_overrun_err:",
+ le32_to_cpu(ofdm->early_overrun_err),
+ accum_ofdm->early_overrun_err,
+ delta_ofdm->early_overrun_err,
+ max_ofdm->early_overrun_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "crc32_good:",
+ le32_to_cpu(ofdm->crc32_good), accum_ofdm->crc32_good,
+ delta_ofdm->crc32_good, max_ofdm->crc32_good);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "false_alarm_cnt:",
+ le32_to_cpu(ofdm->false_alarm_cnt),
+ accum_ofdm->false_alarm_cnt, delta_ofdm->false_alarm_cnt,
+ max_ofdm->false_alarm_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "fina_sync_err_cnt:",
+ le32_to_cpu(ofdm->fina_sync_err_cnt),
+ accum_ofdm->fina_sync_err_cnt,
+ delta_ofdm->fina_sync_err_cnt,
+ max_ofdm->fina_sync_err_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "sfd_timeout:",
+ le32_to_cpu(ofdm->sfd_timeout), accum_ofdm->sfd_timeout,
+ delta_ofdm->sfd_timeout, max_ofdm->sfd_timeout);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "fina_timeout:",
+ le32_to_cpu(ofdm->fina_timeout), accum_ofdm->fina_timeout,
+ delta_ofdm->fina_timeout, max_ofdm->fina_timeout);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "unresponded_rts:",
+ le32_to_cpu(ofdm->unresponded_rts),
+ accum_ofdm->unresponded_rts, delta_ofdm->unresponded_rts,
+ max_ofdm->unresponded_rts);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "rxe_frame_lmt_ovrun:",
+ le32_to_cpu(ofdm->rxe_frame_limit_overrun),
+ accum_ofdm->rxe_frame_limit_overrun,
+ delta_ofdm->rxe_frame_limit_overrun,
+ max_ofdm->rxe_frame_limit_overrun);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "sent_ack_cnt:",
+ le32_to_cpu(ofdm->sent_ack_cnt), accum_ofdm->sent_ack_cnt,
+ delta_ofdm->sent_ack_cnt, max_ofdm->sent_ack_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "sent_cts_cnt:",
+ le32_to_cpu(ofdm->sent_cts_cnt), accum_ofdm->sent_cts_cnt,
+ delta_ofdm->sent_cts_cnt, max_ofdm->sent_cts_cnt);
+
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "%-32s current"
+ "acumulative delta max\n",
+ "Statistics_Rx - CCK:");
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "ina_cnt:",
+ le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt,
+ delta_cck->ina_cnt, max_cck->ina_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "fina_cnt:",
+ le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt,
+ delta_cck->fina_cnt, max_cck->fina_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "plcp_err:",
+ le32_to_cpu(cck->plcp_err), accum_cck->plcp_err,
+ delta_cck->plcp_err, max_cck->plcp_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "crc32_err:",
+ le32_to_cpu(cck->crc32_err), accum_cck->crc32_err,
+ delta_cck->crc32_err, max_cck->crc32_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "overrun_err:",
+ le32_to_cpu(cck->overrun_err), accum_cck->overrun_err,
+ delta_cck->overrun_err, max_cck->overrun_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "early_overrun_err:",
+ le32_to_cpu(cck->early_overrun_err),
+ accum_cck->early_overrun_err,
+ delta_cck->early_overrun_err, max_cck->early_overrun_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "crc32_good:",
+ le32_to_cpu(cck->crc32_good), accum_cck->crc32_good,
+ delta_cck->crc32_good, max_cck->crc32_good);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "false_alarm_cnt:",
+ le32_to_cpu(cck->false_alarm_cnt),
+ accum_cck->false_alarm_cnt, delta_cck->false_alarm_cnt,
+ max_cck->false_alarm_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "fina_sync_err_cnt:",
+ le32_to_cpu(cck->fina_sync_err_cnt),
+ accum_cck->fina_sync_err_cnt,
+ delta_cck->fina_sync_err_cnt, max_cck->fina_sync_err_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "sfd_timeout:",
+ le32_to_cpu(cck->sfd_timeout), accum_cck->sfd_timeout,
+ delta_cck->sfd_timeout, max_cck->sfd_timeout);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "fina_timeout:",
+ le32_to_cpu(cck->fina_timeout), accum_cck->fina_timeout,
+ delta_cck->fina_timeout, max_cck->fina_timeout);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "unresponded_rts:",
+ le32_to_cpu(cck->unresponded_rts),
+ accum_cck->unresponded_rts, delta_cck->unresponded_rts,
+ max_cck->unresponded_rts);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "rxe_frame_lmt_ovrun:",
+ le32_to_cpu(cck->rxe_frame_limit_overrun),
+ accum_cck->rxe_frame_limit_overrun,
+ delta_cck->rxe_frame_limit_overrun,
+ max_cck->rxe_frame_limit_overrun);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "sent_ack_cnt:",
+ le32_to_cpu(cck->sent_ack_cnt), accum_cck->sent_ack_cnt,
+ delta_cck->sent_ack_cnt, max_cck->sent_ack_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "sent_cts_cnt:",
+ le32_to_cpu(cck->sent_cts_cnt), accum_cck->sent_cts_cnt,
+ delta_cck->sent_cts_cnt, max_cck->sent_cts_cnt);
+
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "%-32s current"
+ "acumulative delta max\n",
+ "Statistics_Rx - GENERAL:");
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "bogus_cts:",
+ le32_to_cpu(general->bogus_cts), accum_general->bogus_cts,
+ delta_general->bogus_cts, max_general->bogus_cts);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "bogus_ack:",
+ le32_to_cpu(general->bogus_ack), accum_general->bogus_ack,
+ delta_general->bogus_ack, max_general->bogus_ack);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "non_bssid_frames:",
+ le32_to_cpu(general->non_bssid_frames),
+ accum_general->non_bssid_frames,
+ delta_general->non_bssid_frames,
+ max_general->non_bssid_frames);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "filtered_frames:",
+ le32_to_cpu(general->filtered_frames),
+ accum_general->filtered_frames,
+ delta_general->filtered_frames,
+ max_general->filtered_frames);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "non_channel_beacons:",
+ le32_to_cpu(general->non_channel_beacons),
+ accum_general->non_channel_beacons,
+ delta_general->non_channel_beacons,
+ max_general->non_channel_beacons);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+ssize_t
+il3945_ucode_tx_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ int pos = 0;
+ char *buf;
+ int bufsz = (sizeof(struct iwl39_stats_tx) * 48) + 250;
+ ssize_t ret;
+ struct iwl39_stats_tx *tx, *accum_tx, *delta_tx, *max_tx;
+
+ if (!il_is_alive(il))
+ return -EAGAIN;
+
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf) {
+ IL_ERR("Can not allocate Buffer\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * The statistic information display here is based on
+ * the last stats notification from uCode
+ * might not reflect the current uCode activity
+ */
+ tx = &il->_3945.stats.tx;
+ accum_tx = &il->_3945.accum_stats.tx;
+ delta_tx = &il->_3945.delta_stats.tx;
+ max_tx = &il->_3945.max_delta.tx;
+ pos += il3945_stats_flag(il, buf, bufsz);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "%-32s current"
+ "acumulative delta max\n",
+ "Statistics_Tx:");
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "preamble:",
+ le32_to_cpu(tx->preamble_cnt), accum_tx->preamble_cnt,
+ delta_tx->preamble_cnt, max_tx->preamble_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "rx_detected_cnt:",
+ le32_to_cpu(tx->rx_detected_cnt),
+ accum_tx->rx_detected_cnt, delta_tx->rx_detected_cnt,
+ max_tx->rx_detected_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "bt_prio_defer_cnt:",
+ le32_to_cpu(tx->bt_prio_defer_cnt),
+ accum_tx->bt_prio_defer_cnt, delta_tx->bt_prio_defer_cnt,
+ max_tx->bt_prio_defer_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "bt_prio_kill_cnt:",
+ le32_to_cpu(tx->bt_prio_kill_cnt),
+ accum_tx->bt_prio_kill_cnt, delta_tx->bt_prio_kill_cnt,
+ max_tx->bt_prio_kill_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "few_bytes_cnt:",
+ le32_to_cpu(tx->few_bytes_cnt), accum_tx->few_bytes_cnt,
+ delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "cts_timeout:",
+ le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout,
+ delta_tx->cts_timeout, max_tx->cts_timeout);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "ack_timeout:",
+ le32_to_cpu(tx->ack_timeout), accum_tx->ack_timeout,
+ delta_tx->ack_timeout, max_tx->ack_timeout);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "expected_ack_cnt:",
+ le32_to_cpu(tx->expected_ack_cnt),
+ accum_tx->expected_ack_cnt, delta_tx->expected_ack_cnt,
+ max_tx->expected_ack_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "actual_ack_cnt:",
+ le32_to_cpu(tx->actual_ack_cnt), accum_tx->actual_ack_cnt,
+ delta_tx->actual_ack_cnt, max_tx->actual_ack_cnt);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+ssize_t
+il3945_ucode_general_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ int pos = 0;
+ char *buf;
+ int bufsz = sizeof(struct iwl39_stats_general) * 10 + 300;
+ ssize_t ret;
+ struct iwl39_stats_general *general, *accum_general;
+ struct iwl39_stats_general *delta_general, *max_general;
+ struct stats_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
+ struct iwl39_stats_div *div, *accum_div, *delta_div, *max_div;
+
+ if (!il_is_alive(il))
+ return -EAGAIN;
+
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf) {
+ IL_ERR("Can not allocate Buffer\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * The statistic information display here is based on
+ * the last stats notification from uCode
+ * might not reflect the current uCode activity
+ */
+ general = &il->_3945.stats.general;
+ dbg = &il->_3945.stats.general.dbg;
+ div = &il->_3945.stats.general.div;
+ accum_general = &il->_3945.accum_stats.general;
+ delta_general = &il->_3945.delta_stats.general;
+ max_general = &il->_3945.max_delta.general;
+ accum_dbg = &il->_3945.accum_stats.general.dbg;
+ delta_dbg = &il->_3945.delta_stats.general.dbg;
+ max_dbg = &il->_3945.max_delta.general.dbg;
+ accum_div = &il->_3945.accum_stats.general.div;
+ delta_div = &il->_3945.delta_stats.general.div;
+ max_div = &il->_3945.max_delta.general.div;
+ pos += il3945_stats_flag(il, buf, bufsz);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "%-32s current"
+ "acumulative delta max\n",
+ "Statistics_General:");
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "burst_check:",
+ le32_to_cpu(dbg->burst_check), accum_dbg->burst_check,
+ delta_dbg->burst_check, max_dbg->burst_check);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "burst_count:",
+ le32_to_cpu(dbg->burst_count), accum_dbg->burst_count,
+ delta_dbg->burst_count, max_dbg->burst_count);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "sleep_time:",
+ le32_to_cpu(general->sleep_time),
+ accum_general->sleep_time, delta_general->sleep_time,
+ max_general->sleep_time);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "slots_out:",
+ le32_to_cpu(general->slots_out), accum_general->slots_out,
+ delta_general->slots_out, max_general->slots_out);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "slots_idle:",
+ le32_to_cpu(general->slots_idle),
+ accum_general->slots_idle, delta_general->slots_idle,
+ max_general->slots_idle);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "ttl_timestamp:\t\t\t%u\n",
+ le32_to_cpu(general->ttl_timestamp));
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "tx_on_a:",
+ le32_to_cpu(div->tx_on_a), accum_div->tx_on_a,
+ delta_div->tx_on_a, max_div->tx_on_a);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "tx_on_b:",
+ le32_to_cpu(div->tx_on_b), accum_div->tx_on_b,
+ delta_div->tx_on_b, max_div->tx_on_b);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "exec_time:",
+ le32_to_cpu(div->exec_time), accum_div->exec_time,
+ delta_div->exec_time, max_div->exec_time);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "probe_time:",
+ le32_to_cpu(div->probe_time), accum_div->probe_time,
+ delta_div->probe_time, max_div->probe_time);
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
new file mode 100644
index 00000000000..daef6b58f6c
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
@@ -0,0 +1,3977 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are derived from the ipw3945 project, as well
+ * as portions of the ieee80211 subsystem header files.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/pci-aspm.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/firmware.h>
+#include <linux/etherdevice.h>
+#include <linux/if_arp.h>
+
+#include <net/ieee80211_radiotap.h>
+#include <net/mac80211.h>
+
+#include <asm/div64.h>
+
+#define DRV_NAME "iwl3945"
+
+#include "commands.h"
+#include "common.h"
+#include "3945.h"
+#include "iwl-spectrum.h"
+
+/*
+ * module name, copyright, version, etc.
+ */
+
+#define DRV_DESCRIPTION \
+"Intel(R) PRO/Wireless 3945ABG/BG Network Connection driver for Linux"
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+#define VD "d"
+#else
+#define VD
+#endif
+
+/*
+ * add "s" to indicate spectrum measurement included.
+ * we add it here to be consistent with previous releases in which
+ * this was configurable.
+ */
+#define DRV_VERSION IWLWIFI_VERSION VD "s"
+#define DRV_COPYRIGHT "Copyright(c) 2003-2011 Intel Corporation"
+#define DRV_AUTHOR "<ilw@linux.intel.com>"
+
+MODULE_DESCRIPTION(DRV_DESCRIPTION);
+MODULE_VERSION(DRV_VERSION);
+MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
+MODULE_LICENSE("GPL");
+
+ /* module parameters */
+struct il_mod_params il3945_mod_params = {
+ .sw_crypto = 1,
+ .restart_fw = 1,
+ .disable_hw_scan = 1,
+ /* the rest are 0 by default */
+};
+
+/**
+ * il3945_get_antenna_flags - Get antenna flags for RXON command
+ * @il: eeprom and antenna fields are used to determine antenna flags
+ *
+ * il->eeprom39 is used to determine if antenna AUX/MAIN are reversed
+ * il3945_mod_params.antenna specifies the antenna diversity mode:
+ *
+ * IL_ANTENNA_DIVERSITY - NIC selects best antenna by itself
+ * IL_ANTENNA_MAIN - Force MAIN antenna
+ * IL_ANTENNA_AUX - Force AUX antenna
+ */
+__le32
+il3945_get_antenna_flags(const struct il_priv *il)
+{
+ struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
+
+ switch (il3945_mod_params.antenna) {
+ case IL_ANTENNA_DIVERSITY:
+ return 0;
+
+ case IL_ANTENNA_MAIN:
+ if (eeprom->antenna_switch_type)
+ return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_B_MSK;
+ return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_A_MSK;
+
+ case IL_ANTENNA_AUX:
+ if (eeprom->antenna_switch_type)
+ return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_A_MSK;
+ return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_B_MSK;
+ }
+
+ /* bad antenna selector value */
+ IL_ERR("Bad antenna selector value (0x%x)\n",
+ il3945_mod_params.antenna);
+
+ return 0; /* "diversity" is default if error */
+}
+
+static int
+il3945_set_ccmp_dynamic_key_info(struct il_priv *il,
+ struct ieee80211_key_conf *keyconf, u8 sta_id)
+{
+ unsigned long flags;
+ __le16 key_flags = 0;
+ int ret;
+
+ key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
+ key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
+
+ if (sta_id == il->ctx.bcast_sta_id)
+ key_flags |= STA_KEY_MULTICAST_MSK;
+
+ keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+ keyconf->hw_key_idx = keyconf->keyidx;
+ key_flags &= ~STA_KEY_FLG_INVALID;
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+ il->stations[sta_id].keyinfo.cipher = keyconf->cipher;
+ il->stations[sta_id].keyinfo.keylen = keyconf->keylen;
+ memcpy(il->stations[sta_id].keyinfo.key, keyconf->key, keyconf->keylen);
+
+ memcpy(il->stations[sta_id].sta.key.key, keyconf->key, keyconf->keylen);
+
+ if ((il->stations[sta_id].sta.key.
+ key_flags & STA_KEY_FLG_ENCRYPT_MSK) == STA_KEY_FLG_NO_ENC)
+ il->stations[sta_id].sta.key.key_offset =
+ il_get_free_ucode_key_idx(il);
+ /* else, we are overriding an existing key => no need to allocated room
+ * in uCode. */
+
+ WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
+ "no space for a new key");
+
+ il->stations[sta_id].sta.key.key_flags = key_flags;
+ il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
+ il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+
+ D_INFO("hwcrypto: modify ucode station key info\n");
+
+ ret = il_send_add_sta(il, &il->stations[sta_id].sta, CMD_ASYNC);
+
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+ return ret;
+}
+
+static int
+il3945_set_tkip_dynamic_key_info(struct il_priv *il,
+ struct ieee80211_key_conf *keyconf, u8 sta_id)
+{
+ return -EOPNOTSUPP;
+}
+
+static int
+il3945_set_wep_dynamic_key_info(struct il_priv *il,
+ struct ieee80211_key_conf *keyconf, u8 sta_id)
+{
+ return -EOPNOTSUPP;
+}
+
+static int
+il3945_clear_sta_key_info(struct il_priv *il, u8 sta_id)
+{
+ unsigned long flags;
+ struct il_addsta_cmd sta_cmd;
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+ memset(&il->stations[sta_id].keyinfo, 0, sizeof(struct il_hw_key));
+ memset(&il->stations[sta_id].sta.key, 0, sizeof(struct il4965_keyinfo));
+ il->stations[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC;
+ il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
+ il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+ memcpy(&sta_cmd, &il->stations[sta_id].sta,
+ sizeof(struct il_addsta_cmd));
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+ D_INFO("hwcrypto: clear ucode station key info\n");
+ return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
+}
+
+static int
+il3945_set_dynamic_key(struct il_priv *il, struct ieee80211_key_conf *keyconf,
+ u8 sta_id)
+{
+ int ret = 0;
+
+ keyconf->hw_key_idx = HW_KEY_DYNAMIC;
+
+ switch (keyconf->cipher) {
+ case WLAN_CIPHER_SUITE_CCMP:
+ ret = il3945_set_ccmp_dynamic_key_info(il, keyconf, sta_id);
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ ret = il3945_set_tkip_dynamic_key_info(il, keyconf, sta_id);
+ break;
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ ret = il3945_set_wep_dynamic_key_info(il, keyconf, sta_id);
+ break;
+ default:
+ IL_ERR("Unknown alg: %s alg=%x\n", __func__, keyconf->cipher);
+ ret = -EINVAL;
+ }
+
+ D_WEP("Set dynamic key: alg=%x len=%d idx=%d sta=%d ret=%d\n",
+ keyconf->cipher, keyconf->keylen, keyconf->keyidx, sta_id, ret);
+
+ return ret;
+}
+
+static int
+il3945_remove_static_key(struct il_priv *il)
+{
+ int ret = -EOPNOTSUPP;
+
+ return ret;
+}
+
+static int
+il3945_set_static_key(struct il_priv *il, struct ieee80211_key_conf *key)
+{
+ if (key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
+ key->cipher == WLAN_CIPHER_SUITE_WEP104)
+ return -EOPNOTSUPP;
+
+ IL_ERR("Static key invalid: cipher %x\n", key->cipher);
+ return -EINVAL;
+}
+
+static void
+il3945_clear_free_frames(struct il_priv *il)
+{
+ struct list_head *element;
+
+ D_INFO("%d frames on pre-allocated heap on clear.\n", il->frames_count);
+
+ while (!list_empty(&il->free_frames)) {
+ element = il->free_frames.next;
+ list_del(element);
+ kfree(list_entry(element, struct il3945_frame, list));
+ il->frames_count--;
+ }
+
+ if (il->frames_count) {
+ IL_WARN("%d frames still in use. Did we lose one?\n",
+ il->frames_count);
+ il->frames_count = 0;
+ }
+}
+
+static struct il3945_frame *
+il3945_get_free_frame(struct il_priv *il)
+{
+ struct il3945_frame *frame;
+ struct list_head *element;
+ if (list_empty(&il->free_frames)) {
+ frame = kzalloc(sizeof(*frame), GFP_KERNEL);
+ if (!frame) {
+ IL_ERR("Could not allocate frame!\n");
+ return NULL;
+ }
+
+ il->frames_count++;
+ return frame;
+ }
+
+ element = il->free_frames.next;
+ list_del(element);
+ return list_entry(element, struct il3945_frame, list);
+}
+
+static void
+il3945_free_frame(struct il_priv *il, struct il3945_frame *frame)
+{
+ memset(frame, 0, sizeof(*frame));
+ list_add(&frame->list, &il->free_frames);
+}
+
+unsigned int
+il3945_fill_beacon_frame(struct il_priv *il, struct ieee80211_hdr *hdr,
+ int left)
+{
+
+ if (!il_is_associated(il) || !il->beacon_skb)
+ return 0;
+
+ if (il->beacon_skb->len > left)
+ return 0;
+
+ memcpy(hdr, il->beacon_skb->data, il->beacon_skb->len);
+
+ return il->beacon_skb->len;
+}
+
+static int
+il3945_send_beacon_cmd(struct il_priv *il)
+{
+ struct il3945_frame *frame;
+ unsigned int frame_size;
+ int rc;
+ u8 rate;
+
+ frame = il3945_get_free_frame(il);
+
+ if (!frame) {
+ IL_ERR("Could not obtain free frame buffer for beacon "
+ "command.\n");
+ return -ENOMEM;
+ }
+
+ rate = il_get_lowest_plcp(il, &il->ctx);
+
+ frame_size = il3945_hw_get_beacon_cmd(il, frame, rate);
+
+ rc = il_send_cmd_pdu(il, C_TX_BEACON, frame_size, &frame->u.cmd[0]);
+
+ il3945_free_frame(il, frame);
+
+ return rc;
+}
+
+static void
+il3945_unset_hw_params(struct il_priv *il)
+{
+ if (il->_3945.shared_virt)
+ dma_free_coherent(&il->pci_dev->dev,
+ sizeof(struct il3945_shared),
+ il->_3945.shared_virt, il->_3945.shared_phys);
+}
+
+static void
+il3945_build_tx_cmd_hwcrypto(struct il_priv *il, struct ieee80211_tx_info *info,
+ struct il_device_cmd *cmd,
+ struct sk_buff *skb_frag, int sta_id)
+{
+ struct il3945_tx_cmd *tx_cmd = (struct il3945_tx_cmd *)cmd->cmd.payload;
+ struct il_hw_key *keyinfo = &il->stations[sta_id].keyinfo;
+
+ tx_cmd->sec_ctl = 0;
+
+ switch (keyinfo->cipher) {
+ case WLAN_CIPHER_SUITE_CCMP:
+ tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
+ memcpy(tx_cmd->key, keyinfo->key, keyinfo->keylen);
+ D_TX("tx_cmd with AES hwcrypto\n");
+ break;
+
+ case WLAN_CIPHER_SUITE_TKIP:
+ break;
+
+ case WLAN_CIPHER_SUITE_WEP104:
+ tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
+ /* fall through */
+ case WLAN_CIPHER_SUITE_WEP40:
+ tx_cmd->sec_ctl |=
+ TX_CMD_SEC_WEP | (info->control.hw_key->
+ hw_key_idx & TX_CMD_SEC_MSK) <<
+ TX_CMD_SEC_SHIFT;
+
+ memcpy(&tx_cmd->key[3], keyinfo->key, keyinfo->keylen);
+
+ D_TX("Configuring packet for WEP encryption " "with key %d\n",
+ info->control.hw_key->hw_key_idx);
+ break;
+
+ default:
+ IL_ERR("Unknown encode cipher %x\n", keyinfo->cipher);
+ break;
+ }
+}
+
+/*
+ * handle build C_TX command notification.
+ */
+static void
+il3945_build_tx_cmd_basic(struct il_priv *il, struct il_device_cmd *cmd,
+ struct ieee80211_tx_info *info,
+ struct ieee80211_hdr *hdr, u8 std_id)
+{
+ struct il3945_tx_cmd *tx_cmd = (struct il3945_tx_cmd *)cmd->cmd.payload;
+ __le32 tx_flags = tx_cmd->tx_flags;
+ __le16 fc = hdr->frame_control;
+
+ tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
+ if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
+ tx_flags |= TX_CMD_FLG_ACK_MSK;
+ if (ieee80211_is_mgmt(fc))
+ tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+ if (ieee80211_is_probe_resp(fc) &&
+ !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
+ tx_flags |= TX_CMD_FLG_TSF_MSK;
+ } else {
+ tx_flags &= (~TX_CMD_FLG_ACK_MSK);
+ tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+ }
+
+ tx_cmd->sta_id = std_id;
+ if (ieee80211_has_morefrags(fc))
+ tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
+
+ if (ieee80211_is_data_qos(fc)) {
+ u8 *qc = ieee80211_get_qos_ctl(hdr);
+ tx_cmd->tid_tspec = qc[0] & 0xf;
+ tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
+ } else {
+ tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+ }
+
+ il_tx_cmd_protection(il, info, fc, &tx_flags);
+
+ tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
+ if (ieee80211_is_mgmt(fc)) {
+ if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
+ tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
+ else
+ tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
+ } else {
+ tx_cmd->timeout.pm_frame_timeout = 0;
+ }
+
+ tx_cmd->driver_txop = 0;
+ tx_cmd->tx_flags = tx_flags;
+ tx_cmd->next_frame_len = 0;
+}
+
+/*
+ * start C_TX command process
+ */
+static int
+il3945_tx_skb(struct il_priv *il, struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct il3945_tx_cmd *tx_cmd;
+ struct il_tx_queue *txq = NULL;
+ struct il_queue *q = NULL;
+ struct il_device_cmd *out_cmd;
+ struct il_cmd_meta *out_meta;
+ dma_addr_t phys_addr;
+ dma_addr_t txcmd_phys;
+ int txq_id = skb_get_queue_mapping(skb);
+ u16 len, idx, hdr_len;
+ u8 id;
+ u8 unicast;
+ u8 sta_id;
+ u8 tid = 0;
+ __le16 fc;
+ u8 wait_write_ptr = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&il->lock, flags);
+ if (il_is_rfkill(il)) {
+ D_DROP("Dropping - RF KILL\n");
+ goto drop_unlock;
+ }
+
+ if ((ieee80211_get_tx_rate(il->hw, info)->hw_value & 0xFF) ==
+ IL_INVALID_RATE) {
+ IL_ERR("ERROR: No TX rate available.\n");
+ goto drop_unlock;
+ }
+
+ unicast = !is_multicast_ether_addr(hdr->addr1);
+ id = 0;
+
+ fc = hdr->frame_control;
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+ if (ieee80211_is_auth(fc))
+ D_TX("Sending AUTH frame\n");
+ else if (ieee80211_is_assoc_req(fc))
+ D_TX("Sending ASSOC frame\n");
+ else if (ieee80211_is_reassoc_req(fc))
+ D_TX("Sending REASSOC frame\n");
+#endif
+
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ hdr_len = ieee80211_hdrlen(fc);
+
+ /* Find idx into station table for destination station */
+ sta_id = il_sta_id_or_broadcast(il, &il->ctx, info->control.sta);
+ if (sta_id == IL_INVALID_STATION) {
+ D_DROP("Dropping - INVALID STATION: %pM\n", hdr->addr1);
+ goto drop;
+ }
+
+ D_RATE("station Id %d\n", sta_id);
+
+ if (ieee80211_is_data_qos(fc)) {
+ u8 *qc = ieee80211_get_qos_ctl(hdr);
+ tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
+ if (unlikely(tid >= MAX_TID_COUNT))
+ goto drop;
+ }
+
+ /* Descriptor for chosen Tx queue */
+ txq = &il->txq[txq_id];
+ q = &txq->q;
+
+ if ((il_queue_space(q) < q->high_mark))
+ goto drop;
+
+ spin_lock_irqsave(&il->lock, flags);
+
+ idx = il_get_cmd_idx(q, q->write_ptr, 0);
+
+ /* Set up driver data for this TFD */
+ memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct il_tx_info));
+ txq->txb[q->write_ptr].skb = skb;
+ txq->txb[q->write_ptr].ctx = &il->ctx;
+
+ /* Init first empty entry in queue's array of Tx/cmd buffers */
+ out_cmd = txq->cmd[idx];
+ out_meta = &txq->meta[idx];
+ tx_cmd = (struct il3945_tx_cmd *)out_cmd->cmd.payload;
+ memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
+ memset(tx_cmd, 0, sizeof(*tx_cmd));
+
+ /*
+ * Set up the Tx-command (not MAC!) header.
+ * Store the chosen Tx queue and TFD idx within the sequence field;
+ * after Tx, uCode's Tx response will return this value so driver can
+ * locate the frame within the tx queue and do post-tx processing.
+ */
+ out_cmd->hdr.cmd = C_TX;
+ out_cmd->hdr.sequence =
+ cpu_to_le16((u16)
+ (QUEUE_TO_SEQ(txq_id) | IDX_TO_SEQ(q->write_ptr)));
+
+ /* Copy MAC header from skb into command buffer */
+ memcpy(tx_cmd->hdr, hdr, hdr_len);
+
+ if (info->control.hw_key)
+ il3945_build_tx_cmd_hwcrypto(il, info, out_cmd, skb, sta_id);
+
+ /* TODO need this for burst mode later on */
+ il3945_build_tx_cmd_basic(il, out_cmd, info, hdr, sta_id);
+
+ /* set is_hcca to 0; it probably will never be implemented */
+ il3945_hw_build_tx_cmd_rate(il, out_cmd, info, hdr, sta_id, 0);
+
+ /* Total # bytes to be transmitted */
+ len = (u16) skb->len;
+ tx_cmd->len = cpu_to_le16(len);
+
+ il_dbg_log_tx_data_frame(il, len, hdr);
+ il_update_stats(il, true, fc, len);
+ tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_A_MSK;
+ tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_B_MSK;
+
+ if (!ieee80211_has_morefrags(hdr->frame_control)) {
+ txq->need_update = 1;
+ } else {
+ wait_write_ptr = 1;
+ txq->need_update = 0;
+ }
+
+ D_TX("sequence nr = 0X%x\n", le16_to_cpu(out_cmd->hdr.sequence));
+ D_TX("tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
+ il_print_hex_dump(il, IL_DL_TX, tx_cmd, sizeof(*tx_cmd));
+ il_print_hex_dump(il, IL_DL_TX, (u8 *) tx_cmd->hdr,
+ ieee80211_hdrlen(fc));
+
+ /*
+ * Use the first empty entry in this queue's command buffer array
+ * to contain the Tx command and MAC header concatenated together
+ * (payload data will be in another buffer).
+ * Size of this varies, due to varying MAC header length.
+ * If end is not dword aligned, we'll have 2 extra bytes at the end
+ * of the MAC header (device reads on dword boundaries).
+ * We'll tell device about this padding later.
+ */
+ len =
+ sizeof(struct il3945_tx_cmd) + sizeof(struct il_cmd_header) +
+ hdr_len;
+ len = (len + 3) & ~3;
+
+ /* Physical address of this Tx command's header (not MAC header!),
+ * within command buffer array. */
+ txcmd_phys =
+ pci_map_single(il->pci_dev, &out_cmd->hdr, len, PCI_DMA_TODEVICE);
+ /* we do not map meta data ... so we can safely access address to
+ * provide to unmap command*/
+ dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
+ dma_unmap_len_set(out_meta, len, len);
+
+ /* Add buffer containing Tx command and MAC(!) header to TFD's
+ * first entry */
+ il->cfg->ops->lib->txq_attach_buf_to_tfd(il, txq, txcmd_phys, len, 1,
+ 0);
+
+ /* Set up TFD's 2nd entry to point directly to remainder of skb,
+ * if any (802.11 null frames have no payload). */
+ len = skb->len - hdr_len;
+ if (len) {
+ phys_addr =
+ pci_map_single(il->pci_dev, skb->data + hdr_len, len,
+ PCI_DMA_TODEVICE);
+ il->cfg->ops->lib->txq_attach_buf_to_tfd(il, txq, phys_addr,
+ len, 0, U32_PAD(len));
+ }
+
+ /* Tell device the write idx *just past* this latest filled TFD */
+ q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd);
+ il_txq_update_write_ptr(il, txq);
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ if (il_queue_space(q) < q->high_mark && il->mac80211_registered) {
+ if (wait_write_ptr) {
+ spin_lock_irqsave(&il->lock, flags);
+ txq->need_update = 1;
+ il_txq_update_write_ptr(il, txq);
+ spin_unlock_irqrestore(&il->lock, flags);
+ }
+
+ il_stop_queue(il, txq);
+ }
+
+ return 0;
+
+drop_unlock:
+ spin_unlock_irqrestore(&il->lock, flags);
+drop:
+ return -1;
+}
+
+static int
+il3945_get_measurement(struct il_priv *il,
+ struct ieee80211_measurement_params *params, u8 type)
+{
+ struct il_spectrum_cmd spectrum;
+ struct il_rx_pkt *pkt;
+ struct il_host_cmd cmd = {
+ .id = C_SPECTRUM_MEASUREMENT,
+ .data = (void *)&spectrum,
+ .flags = CMD_WANT_SKB,
+ };
+ u32 add_time = le64_to_cpu(params->start_time);
+ int rc;
+ int spectrum_resp_status;
+ int duration = le16_to_cpu(params->duration);
+ struct il_rxon_context *ctx = &il->ctx;
+
+ if (il_is_associated(il))
+ add_time =
+ il_usecs_to_beacons(il,
+ le64_to_cpu(params->start_time) -
+ il->_3945.last_tsf,
+ le16_to_cpu(ctx->timing.
+ beacon_interval));
+
+ memset(&spectrum, 0, sizeof(spectrum));
+
+ spectrum.channel_count = cpu_to_le16(1);
+ spectrum.flags =
+ RXON_FLG_TSF2HOST_MSK | RXON_FLG_ANT_A_MSK | RXON_FLG_DIS_DIV_MSK;
+ spectrum.filter_flags = MEASUREMENT_FILTER_FLAG;
+ cmd.len = sizeof(spectrum);
+ spectrum.len = cpu_to_le16(cmd.len - sizeof(spectrum.len));
+
+ if (il_is_associated(il))
+ spectrum.start_time =
+ il_add_beacon_time(il, il->_3945.last_beacon_time, add_time,
+ le16_to_cpu(ctx->timing.
+ beacon_interval));
+ else
+ spectrum.start_time = 0;
+
+ spectrum.channels[0].duration = cpu_to_le32(duration * TIME_UNIT);
+ spectrum.channels[0].channel = params->channel;
+ spectrum.channels[0].type = type;
+ if (ctx->active.flags & RXON_FLG_BAND_24G_MSK)
+ spectrum.flags |=
+ RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK |
+ RXON_FLG_TGG_PROTECT_MSK;
+
+ rc = il_send_cmd_sync(il, &cmd);
+ if (rc)
+ return rc;
+
+ pkt = (struct il_rx_pkt *)cmd.reply_page;
+ if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
+ IL_ERR("Bad return from N_RX_ON_ASSOC command\n");
+ rc = -EIO;
+ }
+
+ spectrum_resp_status = le16_to_cpu(pkt->u.spectrum.status);
+ switch (spectrum_resp_status) {
+ case 0: /* Command will be handled */
+ if (pkt->u.spectrum.id != 0xff) {
+ D_INFO("Replaced existing measurement: %d\n",
+ pkt->u.spectrum.id);
+ il->measurement_status &= ~MEASUREMENT_READY;
+ }
+ il->measurement_status |= MEASUREMENT_ACTIVE;
+ rc = 0;
+ break;
+
+ case 1: /* Command will not be handled */
+ rc = -EAGAIN;
+ break;
+ }
+
+ il_free_pages(il, cmd.reply_page);
+
+ return rc;
+}
+
+static void
+il3945_hdl_alive(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ struct il_alive_resp *palive;
+ struct delayed_work *pwork;
+
+ palive = &pkt->u.alive_frame;
+
+ D_INFO("Alive ucode status 0x%08X revision " "0x%01X 0x%01X\n",
+ palive->is_valid, palive->ver_type, palive->ver_subtype);
+
+ if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
+ D_INFO("Initialization Alive received.\n");
+ memcpy(&il->card_alive_init, &pkt->u.alive_frame,
+ sizeof(struct il_alive_resp));
+ pwork = &il->init_alive_start;
+ } else {
+ D_INFO("Runtime Alive received.\n");
+ memcpy(&il->card_alive, &pkt->u.alive_frame,
+ sizeof(struct il_alive_resp));
+ pwork = &il->alive_start;
+ il3945_disable_events(il);
+ }
+
+ /* We delay the ALIVE response by 5ms to
+ * give the HW RF Kill time to activate... */
+ if (palive->is_valid == UCODE_VALID_OK)
+ queue_delayed_work(il->workqueue, pwork, msecs_to_jiffies(5));
+ else
+ IL_WARN("uCode did not respond OK.\n");
+}
+
+static void
+il3945_hdl_add_sta(struct il_priv *il, struct il_rx_buf *rxb)
+{
+#ifdef CONFIG_IWLEGACY_DEBUG
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+#endif
+
+ D_RX("Received C_ADD_STA: 0x%02X\n", pkt->u.status);
+}
+
+static void
+il3945_hdl_beacon(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ struct il3945_beacon_notif *beacon = &(pkt->u.beacon_status);
+#ifdef CONFIG_IWLEGACY_DEBUG
+ u8 rate = beacon->beacon_notify_hdr.rate;
+
+ D_RX("beacon status %x retries %d iss %d " "tsf %d %d rate %d\n",
+ le32_to_cpu(beacon->beacon_notify_hdr.status) & TX_STATUS_MSK,
+ beacon->beacon_notify_hdr.failure_frame,
+ le32_to_cpu(beacon->ibss_mgr_status),
+ le32_to_cpu(beacon->high_tsf), le32_to_cpu(beacon->low_tsf), rate);
+#endif
+
+ il->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
+
+}
+
+/* Handle notification from uCode that card's power state is changing
+ * due to software, hardware, or critical temperature RFKILL */
+static void
+il3945_hdl_card_state(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
+ unsigned long status = il->status;
+
+ IL_WARN("Card state received: HW:%s SW:%s\n",
+ (flags & HW_CARD_DISABLED) ? "Kill" : "On",
+ (flags & SW_CARD_DISABLED) ? "Kill" : "On");
+
+ _il_wr(il, CSR_UCODE_DRV_GP1_SET, CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
+
+ if (flags & HW_CARD_DISABLED)
+ set_bit(S_RF_KILL_HW, &il->status);
+ else
+ clear_bit(S_RF_KILL_HW, &il->status);
+
+ il_scan_cancel(il);
+
+ if ((test_bit(S_RF_KILL_HW, &status) !=
+ test_bit(S_RF_KILL_HW, &il->status)))
+ wiphy_rfkill_set_hw_state(il->hw->wiphy,
+ test_bit(S_RF_KILL_HW, &il->status));
+ else
+ wake_up(&il->wait_command_queue);
+}
+
+/**
+ * il3945_setup_handlers - Initialize Rx handler callbacks
+ *
+ * Setup the RX handlers for each of the reply types sent from the uCode
+ * to the host.
+ *
+ * This function chains into the hardware specific files for them to setup
+ * any hardware specific handlers as well.
+ */
+static void
+il3945_setup_handlers(struct il_priv *il)
+{
+ il->handlers[N_ALIVE] = il3945_hdl_alive;
+ il->handlers[C_ADD_STA] = il3945_hdl_add_sta;
+ il->handlers[N_ERROR] = il_hdl_error;
+ il->handlers[N_CHANNEL_SWITCH] = il_hdl_csa;
+ il->handlers[N_SPECTRUM_MEASUREMENT] = il_hdl_spectrum_measurement;
+ il->handlers[N_PM_SLEEP] = il_hdl_pm_sleep;
+ il->handlers[N_PM_DEBUG_STATS] = il_hdl_pm_debug_stats;
+ il->handlers[N_BEACON] = il3945_hdl_beacon;
+
+ /*
+ * The same handler is used for both the REPLY to a discrete
+ * stats request from the host as well as for the periodic
+ * stats notifications (after received beacons) from the uCode.
+ */
+ il->handlers[C_STATS] = il3945_hdl_c_stats;
+ il->handlers[N_STATS] = il3945_hdl_stats;
+
+ il_setup_rx_scan_handlers(il);
+ il->handlers[N_CARD_STATE] = il3945_hdl_card_state;
+
+ /* Set up hardware specific Rx handlers */
+ il3945_hw_handler_setup(il);
+}
+
+/************************** RX-FUNCTIONS ****************************/
+/*
+ * Rx theory of operation
+ *
+ * The host allocates 32 DMA target addresses and passes the host address
+ * to the firmware at register IL_RFDS_TBL_LOWER + N * RFD_SIZE where N is
+ * 0 to 31
+ *
+ * Rx Queue Indexes
+ * The host/firmware share two idx registers for managing the Rx buffers.
+ *
+ * The READ idx maps to the first position that the firmware may be writing
+ * to -- the driver can read up to (but not including) this position and get
+ * good data.
+ * The READ idx is managed by the firmware once the card is enabled.
+ *
+ * The WRITE idx maps to the last position the driver has read from -- the
+ * position preceding WRITE is the last slot the firmware can place a packet.
+ *
+ * The queue is empty (no good data) if WRITE = READ - 1, and is full if
+ * WRITE = READ.
+ *
+ * During initialization, the host sets up the READ queue position to the first
+ * IDX position, and WRITE to the last (READ - 1 wrapped)
+ *
+ * When the firmware places a packet in a buffer, it will advance the READ idx
+ * and fire the RX interrupt. The driver can then query the READ idx and
+ * process as many packets as possible, moving the WRITE idx forward as it
+ * resets the Rx queue buffers with new memory.
+ *
+ * The management in the driver is as follows:
+ * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
+ * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
+ * to replenish the iwl->rxq->rx_free.
+ * + In il3945_rx_replenish (scheduled) if 'processed' != 'read' then the
+ * iwl->rxq is replenished and the READ IDX is updated (updating the
+ * 'processed' and 'read' driver idxes as well)
+ * + A received packet is processed and handed to the kernel network stack,
+ * detached from the iwl->rxq. The driver 'processed' idx is updated.
+ * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
+ * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
+ * IDX is not incremented and iwl->status(RX_STALLED) is set. If there
+ * were enough free buffers and RX_STALLED is set it is cleared.
+ *
+ *
+ * Driver sequence:
+ *
+ * il3945_rx_replenish() Replenishes rx_free list from rx_used, and calls
+ * il3945_rx_queue_restock
+ * il3945_rx_queue_restock() Moves available buffers from rx_free into Rx
+ * queue, updates firmware pointers, and updates
+ * the WRITE idx. If insufficient rx_free buffers
+ * are available, schedules il3945_rx_replenish
+ *
+ * -- enable interrupts --
+ * ISR - il3945_rx() Detach il_rx_bufs from pool up to the
+ * READ IDX, detaching the SKB from the pool.
+ * Moves the packet buffer from queue to rx_used.
+ * Calls il3945_rx_queue_restock to refill any empty
+ * slots.
+ * ...
+ *
+ */
+
+/**
+ * il3945_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
+ */
+static inline __le32
+il3945_dma_addr2rbd_ptr(struct il_priv *il, dma_addr_t dma_addr)
+{
+ return cpu_to_le32((u32) dma_addr);
+}
+
+/**
+ * il3945_rx_queue_restock - refill RX queue from pre-allocated pool
+ *
+ * If there are slots in the RX queue that need to be restocked,
+ * and we have free pre-allocated buffers, fill the ranks as much
+ * as we can, pulling from rx_free.
+ *
+ * This moves the 'write' idx forward to catch up with 'processed', and
+ * also updates the memory address in the firmware to reference the new
+ * target buffer.
+ */
+static void
+il3945_rx_queue_restock(struct il_priv *il)
+{
+ struct il_rx_queue *rxq = &il->rxq;
+ struct list_head *element;
+ struct il_rx_buf *rxb;
+ unsigned long flags;
+ int write;
+
+ spin_lock_irqsave(&rxq->lock, flags);
+ write = rxq->write & ~0x7;
+ while (il_rx_queue_space(rxq) > 0 && rxq->free_count) {
+ /* Get next free Rx buffer, remove from free list */
+ element = rxq->rx_free.next;
+ rxb = list_entry(element, struct il_rx_buf, list);
+ list_del(element);
+
+ /* Point to Rx buffer via next RBD in circular buffer */
+ rxq->bd[rxq->write] =
+ il3945_dma_addr2rbd_ptr(il, rxb->page_dma);
+ rxq->queue[rxq->write] = rxb;
+ rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
+ rxq->free_count--;
+ }
+ spin_unlock_irqrestore(&rxq->lock, flags);
+ /* If the pre-allocated buffer pool is dropping low, schedule to
+ * refill it */
+ if (rxq->free_count <= RX_LOW_WATERMARK)
+ queue_work(il->workqueue, &il->rx_replenish);
+
+ /* If we've added more space for the firmware to place data, tell it.
+ * Increment device's write pointer in multiples of 8. */
+ if (rxq->write_actual != (rxq->write & ~0x7) ||
+ abs(rxq->write - rxq->read) > 7) {
+ spin_lock_irqsave(&rxq->lock, flags);
+ rxq->need_update = 1;
+ spin_unlock_irqrestore(&rxq->lock, flags);
+ il_rx_queue_update_write_ptr(il, rxq);
+ }
+}
+
+/**
+ * il3945_rx_replenish - Move all used packet from rx_used to rx_free
+ *
+ * When moving to rx_free an SKB is allocated for the slot.
+ *
+ * Also restock the Rx queue via il3945_rx_queue_restock.
+ * This is called as a scheduled work item (except for during initialization)
+ */
+static void
+il3945_rx_allocate(struct il_priv *il, gfp_t priority)
+{
+ struct il_rx_queue *rxq = &il->rxq;
+ struct list_head *element;
+ struct il_rx_buf *rxb;
+ struct page *page;
+ unsigned long flags;
+ gfp_t gfp_mask = priority;
+
+ while (1) {
+ spin_lock_irqsave(&rxq->lock, flags);
+
+ if (list_empty(&rxq->rx_used)) {
+ spin_unlock_irqrestore(&rxq->lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&rxq->lock, flags);
+
+ if (rxq->free_count > RX_LOW_WATERMARK)
+ gfp_mask |= __GFP_NOWARN;
+
+ if (il->hw_params.rx_page_order > 0)
+ gfp_mask |= __GFP_COMP;
+
+ /* Alloc a new receive buffer */
+ page = alloc_pages(gfp_mask, il->hw_params.rx_page_order);
+ if (!page) {
+ if (net_ratelimit())
+ D_INFO("Failed to allocate SKB buffer.\n");
+ if (rxq->free_count <= RX_LOW_WATERMARK &&
+ net_ratelimit())
+ IL_ERR("Failed to allocate SKB buffer with %0x."
+ "Only %u free buffers remaining.\n",
+ priority, rxq->free_count);
+ /* We don't reschedule replenish work here -- we will
+ * call the restock method and if it still needs
+ * more buffers it will schedule replenish */
+ break;
+ }
+
+ spin_lock_irqsave(&rxq->lock, flags);
+ if (list_empty(&rxq->rx_used)) {
+ spin_unlock_irqrestore(&rxq->lock, flags);
+ __free_pages(page, il->hw_params.rx_page_order);
+ return;
+ }
+ element = rxq->rx_used.next;
+ rxb = list_entry(element, struct il_rx_buf, list);
+ list_del(element);
+ spin_unlock_irqrestore(&rxq->lock, flags);
+
+ rxb->page = page;
+ /* Get physical address of RB/SKB */
+ rxb->page_dma =
+ pci_map_page(il->pci_dev, page, 0,
+ PAGE_SIZE << il->hw_params.rx_page_order,
+ PCI_DMA_FROMDEVICE);
+
+ spin_lock_irqsave(&rxq->lock, flags);
+
+ list_add_tail(&rxb->list, &rxq->rx_free);
+ rxq->free_count++;
+ il->alloc_rxb_page++;
+
+ spin_unlock_irqrestore(&rxq->lock, flags);
+ }
+}
+
+void
+il3945_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq)
+{
+ unsigned long flags;
+ int i;
+ spin_lock_irqsave(&rxq->lock, flags);
+ INIT_LIST_HEAD(&rxq->rx_free);
+ INIT_LIST_HEAD(&rxq->rx_used);
+ /* Fill the rx_used queue with _all_ of the Rx buffers */
+ for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
+ /* In the reset function, these buffers may have been allocated
+ * to an SKB, so we need to unmap and free potential storage */
+ if (rxq->pool[i].page != NULL) {
+ pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma,
+ PAGE_SIZE << il->hw_params.rx_page_order,
+ PCI_DMA_FROMDEVICE);
+ __il_free_pages(il, rxq->pool[i].page);
+ rxq->pool[i].page = NULL;
+ }
+ list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
+ }
+
+ /* Set us so that we have processed and used all buffers, but have
+ * not restocked the Rx queue with fresh buffers */
+ rxq->read = rxq->write = 0;
+ rxq->write_actual = 0;
+ rxq->free_count = 0;
+ spin_unlock_irqrestore(&rxq->lock, flags);
+}
+
+void
+il3945_rx_replenish(void *data)
+{
+ struct il_priv *il = data;
+ unsigned long flags;
+
+ il3945_rx_allocate(il, GFP_KERNEL);
+
+ spin_lock_irqsave(&il->lock, flags);
+ il3945_rx_queue_restock(il);
+ spin_unlock_irqrestore(&il->lock, flags);
+}
+
+static void
+il3945_rx_replenish_now(struct il_priv *il)
+{
+ il3945_rx_allocate(il, GFP_ATOMIC);
+
+ il3945_rx_queue_restock(il);
+}
+
+/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
+ * If an SKB has been detached, the POOL needs to have its SKB set to NULL
+ * This free routine walks the list of POOL entries and if SKB is set to
+ * non NULL it is unmapped and freed
+ */
+static void
+il3945_rx_queue_free(struct il_priv *il, struct il_rx_queue *rxq)
+{
+ int i;
+ for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
+ if (rxq->pool[i].page != NULL) {
+ pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma,
+ PAGE_SIZE << il->hw_params.rx_page_order,
+ PCI_DMA_FROMDEVICE);
+ __il_free_pages(il, rxq->pool[i].page);
+ rxq->pool[i].page = NULL;
+ }
+ }
+
+ dma_free_coherent(&il->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
+ rxq->bd_dma);
+ dma_free_coherent(&il->pci_dev->dev, sizeof(struct il_rb_status),
+ rxq->rb_stts, rxq->rb_stts_dma);
+ rxq->bd = NULL;
+ rxq->rb_stts = NULL;
+}
+
+/* Convert linear signal-to-noise ratio into dB */
+static u8 ratio2dB[100] = {
+/* 0 1 2 3 4 5 6 7 8 9 */
+ 0, 0, 6, 10, 12, 14, 16, 17, 18, 19, /* 00 - 09 */
+ 20, 21, 22, 22, 23, 23, 24, 25, 26, 26, /* 10 - 19 */
+ 26, 26, 26, 27, 27, 28, 28, 28, 29, 29, /* 20 - 29 */
+ 29, 30, 30, 30, 31, 31, 31, 31, 32, 32, /* 30 - 39 */
+ 32, 32, 32, 33, 33, 33, 33, 33, 34, 34, /* 40 - 49 */
+ 34, 34, 34, 34, 35, 35, 35, 35, 35, 35, /* 50 - 59 */
+ 36, 36, 36, 36, 36, 36, 36, 37, 37, 37, /* 60 - 69 */
+ 37, 37, 37, 37, 37, 38, 38, 38, 38, 38, /* 70 - 79 */
+ 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, /* 80 - 89 */
+ 39, 39, 39, 39, 39, 40, 40, 40, 40, 40 /* 90 - 99 */
+};
+
+/* Calculates a relative dB value from a ratio of linear
+ * (i.e. not dB) signal levels.
+ * Conversion assumes that levels are voltages (20*log), not powers (10*log). */
+int
+il3945_calc_db_from_ratio(int sig_ratio)
+{
+ /* 1000:1 or higher just report as 60 dB */
+ if (sig_ratio >= 1000)
+ return 60;
+
+ /* 100:1 or higher, divide by 10 and use table,
+ * add 20 dB to make up for divide by 10 */
+ if (sig_ratio >= 100)
+ return 20 + (int)ratio2dB[sig_ratio / 10];
+
+ /* We shouldn't see this */
+ if (sig_ratio < 1)
+ return 0;
+
+ /* Use table for ratios 1:1 - 99:1 */
+ return (int)ratio2dB[sig_ratio];
+}
+
+/**
+ * il3945_rx_handle - Main entry function for receiving responses from uCode
+ *
+ * Uses the il->handlers callback function array to invoke
+ * the appropriate handlers, including command responses,
+ * frame-received notifications, and other notifications.
+ */
+static void
+il3945_rx_handle(struct il_priv *il)
+{
+ struct il_rx_buf *rxb;
+ struct il_rx_pkt *pkt;
+ struct il_rx_queue *rxq = &il->rxq;
+ u32 r, i;
+ int reclaim;
+ unsigned long flags;
+ u8 fill_rx = 0;
+ u32 count = 8;
+ int total_empty = 0;
+
+ /* uCode's read idx (stored in shared DRAM) indicates the last Rx
+ * buffer that the driver may process (last buffer filled by ucode). */
+ r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF;
+ i = rxq->read;
+
+ /* calculate total frames need to be restock after handling RX */
+ total_empty = r - rxq->write_actual;
+ if (total_empty < 0)
+ total_empty += RX_QUEUE_SIZE;
+
+ if (total_empty > (RX_QUEUE_SIZE / 2))
+ fill_rx = 1;
+ /* Rx interrupt, but nothing sent from uCode */
+ if (i == r)
+ D_RX("r = %d, i = %d\n", r, i);
+
+ while (i != r) {
+ int len;
+
+ rxb = rxq->queue[i];
+
+ /* If an RXB doesn't have a Rx queue slot associated with it,
+ * then a bug has been introduced in the queue refilling
+ * routines -- catch it here */
+ BUG_ON(rxb == NULL);
+
+ rxq->queue[i] = NULL;
+
+ pci_unmap_page(il->pci_dev, rxb->page_dma,
+ PAGE_SIZE << il->hw_params.rx_page_order,
+ PCI_DMA_FROMDEVICE);
+ pkt = rxb_addr(rxb);
+
+ len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK;
+ len += sizeof(u32); /* account for status word */
+
+ /* Reclaim a command buffer only if this packet is a response
+ * to a (driver-originated) command.
+ * If the packet (e.g. Rx frame) originated from uCode,
+ * there is no command buffer to reclaim.
+ * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
+ * but apparently a few don't get set; catch them here. */
+ reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
+ pkt->hdr.cmd != N_STATS && pkt->hdr.cmd != C_TX;
+
+ /* Based on type of command response or notification,
+ * handle those that need handling via function in
+ * handlers table. See il3945_setup_handlers() */
+ if (il->handlers[pkt->hdr.cmd]) {
+ D_RX("r = %d, i = %d, %s, 0x%02x\n", r, i,
+ il_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
+ il->isr_stats.handlers[pkt->hdr.cmd]++;
+ il->handlers[pkt->hdr.cmd] (il, rxb);
+ } else {
+ /* No handling needed */
+ D_RX("r %d i %d No handler needed for %s, 0x%02x\n", r,
+ i, il_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
+ }
+
+ /*
+ * XXX: After here, we should always check rxb->page
+ * against NULL before touching it or its virtual
+ * memory (pkt). Because some handler might have
+ * already taken or freed the pages.
+ */
+
+ if (reclaim) {
+ /* Invoke any callbacks, transfer the buffer to caller,
+ * and fire off the (possibly) blocking il_send_cmd()
+ * as we reclaim the driver command queue */
+ if (rxb->page)
+ il_tx_cmd_complete(il, rxb);
+ else
+ IL_WARN("Claim null rxb?\n");
+ }
+
+ /* Reuse the page if possible. For notification packets and
+ * SKBs that fail to Rx correctly, add them back into the
+ * rx_free list for reuse later. */
+ spin_lock_irqsave(&rxq->lock, flags);
+ if (rxb->page != NULL) {
+ rxb->page_dma =
+ pci_map_page(il->pci_dev, rxb->page, 0,
+ PAGE_SIZE << il->hw_params.
+ rx_page_order, PCI_DMA_FROMDEVICE);
+ list_add_tail(&rxb->list, &rxq->rx_free);
+ rxq->free_count++;
+ } else
+ list_add_tail(&rxb->list, &rxq->rx_used);
+
+ spin_unlock_irqrestore(&rxq->lock, flags);
+
+ i = (i + 1) & RX_QUEUE_MASK;
+ /* If there are a lot of unused frames,
+ * restock the Rx queue so ucode won't assert. */
+ if (fill_rx) {
+ count++;
+ if (count >= 8) {
+ rxq->read = i;
+ il3945_rx_replenish_now(il);
+ count = 0;
+ }
+ }
+ }
+
+ /* Backtrack one entry */
+ rxq->read = i;
+ if (fill_rx)
+ il3945_rx_replenish_now(il);
+ else
+ il3945_rx_queue_restock(il);
+}
+
+/* call this function to flush any scheduled tasklet */
+static inline void
+il3945_synchronize_irq(struct il_priv *il)
+{
+ /* wait to make sure we flush pending tasklet */
+ synchronize_irq(il->pci_dev->irq);
+ tasklet_kill(&il->irq_tasklet);
+}
+
+static const char *
+il3945_desc_lookup(int i)
+{
+ switch (i) {
+ case 1:
+ return "FAIL";
+ case 2:
+ return "BAD_PARAM";
+ case 3:
+ return "BAD_CHECKSUM";
+ case 4:
+ return "NMI_INTERRUPT";
+ case 5:
+ return "SYSASSERT";
+ case 6:
+ return "FATAL_ERROR";
+ }
+
+ return "UNKNOWN";
+}
+
+#define ERROR_START_OFFSET (1 * sizeof(u32))
+#define ERROR_ELEM_SIZE (7 * sizeof(u32))
+
+void
+il3945_dump_nic_error_log(struct il_priv *il)
+{
+ u32 i;
+ u32 desc, time, count, base, data1;
+ u32 blink1, blink2, ilink1, ilink2;
+
+ base = le32_to_cpu(il->card_alive.error_event_table_ptr);
+
+ if (!il3945_hw_valid_rtc_data_addr(base)) {
+ IL_ERR("Not valid error log pointer 0x%08X\n", base);
+ return;
+ }
+
+ count = il_read_targ_mem(il, base);
+
+ if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
+ IL_ERR("Start IWL Error Log Dump:\n");
+ IL_ERR("Status: 0x%08lX, count: %d\n", il->status, count);
+ }
+
+ IL_ERR("Desc Time asrtPC blink2 "
+ "ilink1 nmiPC Line\n");
+ for (i = ERROR_START_OFFSET;
+ i < (count * ERROR_ELEM_SIZE) + ERROR_START_OFFSET;
+ i += ERROR_ELEM_SIZE) {
+ desc = il_read_targ_mem(il, base + i);
+ time = il_read_targ_mem(il, base + i + 1 * sizeof(u32));
+ blink1 = il_read_targ_mem(il, base + i + 2 * sizeof(u32));
+ blink2 = il_read_targ_mem(il, base + i + 3 * sizeof(u32));
+ ilink1 = il_read_targ_mem(il, base + i + 4 * sizeof(u32));
+ ilink2 = il_read_targ_mem(il, base + i + 5 * sizeof(u32));
+ data1 = il_read_targ_mem(il, base + i + 6 * sizeof(u32));
+
+ IL_ERR("%-13s (0x%X) %010u 0x%05X 0x%05X 0x%05X 0x%05X %u\n\n",
+ il3945_desc_lookup(desc), desc, time, blink1, blink2,
+ ilink1, ilink2, data1);
+ }
+}
+
+static void
+il3945_irq_tasklet(struct il_priv *il)
+{
+ u32 inta, handled = 0;
+ u32 inta_fh;
+ unsigned long flags;
+#ifdef CONFIG_IWLEGACY_DEBUG
+ u32 inta_mask;
+#endif
+
+ spin_lock_irqsave(&il->lock, flags);
+
+ /* Ack/clear/reset pending uCode interrupts.
+ * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
+ * and will clear only when CSR_FH_INT_STATUS gets cleared. */
+ inta = _il_rd(il, CSR_INT);
+ _il_wr(il, CSR_INT, inta);
+
+ /* Ack/clear/reset pending flow-handler (DMA) interrupts.
+ * Any new interrupts that happen after this, either while we're
+ * in this tasklet, or later, will show up in next ISR/tasklet. */
+ inta_fh = _il_rd(il, CSR_FH_INT_STATUS);
+ _il_wr(il, CSR_FH_INT_STATUS, inta_fh);
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+ if (il_get_debug_level(il) & IL_DL_ISR) {
+ /* just for debug */
+ inta_mask = _il_rd(il, CSR_INT_MASK);
+ D_ISR("inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", inta,
+ inta_mask, inta_fh);
+ }
+#endif
+
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
+ * atomic, make sure that inta covers all the interrupts that
+ * we've discovered, even if FH interrupt came in just after
+ * reading CSR_INT. */
+ if (inta_fh & CSR39_FH_INT_RX_MASK)
+ inta |= CSR_INT_BIT_FH_RX;
+ if (inta_fh & CSR39_FH_INT_TX_MASK)
+ inta |= CSR_INT_BIT_FH_TX;
+
+ /* Now service all interrupt bits discovered above. */
+ if (inta & CSR_INT_BIT_HW_ERR) {
+ IL_ERR("Hardware error detected. Restarting.\n");
+
+ /* Tell the device to stop sending interrupts */
+ il_disable_interrupts(il);
+
+ il->isr_stats.hw++;
+ il_irq_handle_error(il);
+
+ handled |= CSR_INT_BIT_HW_ERR;
+
+ return;
+ }
+#ifdef CONFIG_IWLEGACY_DEBUG
+ if (il_get_debug_level(il) & (IL_DL_ISR)) {
+ /* NIC fires this, but we don't use it, redundant with WAKEUP */
+ if (inta & CSR_INT_BIT_SCD) {
+ D_ISR("Scheduler finished to transmit "
+ "the frame/frames.\n");
+ il->isr_stats.sch++;
+ }
+
+ /* Alive notification via Rx interrupt will do the real work */
+ if (inta & CSR_INT_BIT_ALIVE) {
+ D_ISR("Alive interrupt\n");
+ il->isr_stats.alive++;
+ }
+ }
+#endif
+ /* Safely ignore these bits for debug checks below */
+ inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
+
+ /* Error detected by uCode */
+ if (inta & CSR_INT_BIT_SW_ERR) {
+ IL_ERR("Microcode SW error detected. " "Restarting 0x%X.\n",
+ inta);
+ il->isr_stats.sw++;
+ il_irq_handle_error(il);
+ handled |= CSR_INT_BIT_SW_ERR;
+ }
+
+ /* uCode wakes up after power-down sleep */
+ if (inta & CSR_INT_BIT_WAKEUP) {
+ D_ISR("Wakeup interrupt\n");
+ il_rx_queue_update_write_ptr(il, &il->rxq);
+ il_txq_update_write_ptr(il, &il->txq[0]);
+ il_txq_update_write_ptr(il, &il->txq[1]);
+ il_txq_update_write_ptr(il, &il->txq[2]);
+ il_txq_update_write_ptr(il, &il->txq[3]);
+ il_txq_update_write_ptr(il, &il->txq[4]);
+ il_txq_update_write_ptr(il, &il->txq[5]);
+
+ il->isr_stats.wakeup++;
+ handled |= CSR_INT_BIT_WAKEUP;
+ }
+
+ /* All uCode command responses, including Tx command responses,
+ * Rx "responses" (frame-received notification), and other
+ * notifications from uCode come through here*/
+ if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
+ il3945_rx_handle(il);
+ il->isr_stats.rx++;
+ handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
+ }
+
+ if (inta & CSR_INT_BIT_FH_TX) {
+ D_ISR("Tx interrupt\n");
+ il->isr_stats.tx++;
+
+ _il_wr(il, CSR_FH_INT_STATUS, (1 << 6));
+ il_wr(il, FH39_TCSR_CREDIT(FH39_SRVC_CHNL), 0x0);
+ handled |= CSR_INT_BIT_FH_TX;
+ }
+
+ if (inta & ~handled) {
+ IL_ERR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
+ il->isr_stats.unhandled++;
+ }
+
+ if (inta & ~il->inta_mask) {
+ IL_WARN("Disabled INTA bits 0x%08x were pending\n",
+ inta & ~il->inta_mask);
+ IL_WARN(" with inta_fh = 0x%08x\n", inta_fh);
+ }
+
+ /* Re-enable all interrupts */
+ /* only Re-enable if disabled by irq */
+ if (test_bit(S_INT_ENABLED, &il->status))
+ il_enable_interrupts(il);
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+ if (il_get_debug_level(il) & (IL_DL_ISR)) {
+ inta = _il_rd(il, CSR_INT);
+ inta_mask = _il_rd(il, CSR_INT_MASK);
+ inta_fh = _il_rd(il, CSR_FH_INT_STATUS);
+ D_ISR("End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
+ "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
+ }
+#endif
+}
+
+static int
+il3945_get_channels_for_scan(struct il_priv *il, enum ieee80211_band band,
+ u8 is_active, u8 n_probes,
+ struct il3945_scan_channel *scan_ch,
+ struct ieee80211_vif *vif)
+{
+ struct ieee80211_channel *chan;
+ const struct ieee80211_supported_band *sband;
+ const struct il_channel_info *ch_info;
+ u16 passive_dwell = 0;
+ u16 active_dwell = 0;
+ int added, i;
+
+ sband = il_get_hw_mode(il, band);
+ if (!sband)
+ return 0;
+
+ active_dwell = il_get_active_dwell_time(il, band, n_probes);
+ passive_dwell = il_get_passive_dwell_time(il, band, vif);
+
+ if (passive_dwell <= active_dwell)
+ passive_dwell = active_dwell + 1;
+
+ for (i = 0, added = 0; i < il->scan_request->n_channels; i++) {
+ chan = il->scan_request->channels[i];
+
+ if (chan->band != band)
+ continue;
+
+ scan_ch->channel = chan->hw_value;
+
+ ch_info = il_get_channel_info(il, band, scan_ch->channel);
+ if (!il_is_channel_valid(ch_info)) {
+ D_SCAN("Channel %d is INVALID for this band.\n",
+ scan_ch->channel);
+ continue;
+ }
+
+ scan_ch->active_dwell = cpu_to_le16(active_dwell);
+ scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
+ /* If passive , set up for auto-switch
+ * and use long active_dwell time.
+ */
+ if (!is_active || il_is_channel_passive(ch_info) ||
+ (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN)) {
+ scan_ch->type = 0; /* passive */
+ if (IL_UCODE_API(il->ucode_ver) == 1)
+ scan_ch->active_dwell =
+ cpu_to_le16(passive_dwell - 1);
+ } else {
+ scan_ch->type = 1; /* active */
+ }
+
+ /* Set direct probe bits. These may be used both for active
+ * scan channels (probes gets sent right away),
+ * or for passive channels (probes get se sent only after
+ * hearing clear Rx packet).*/
+ if (IL_UCODE_API(il->ucode_ver) >= 2) {
+ if (n_probes)
+ scan_ch->type |= IL39_SCAN_PROBE_MASK(n_probes);
+ } else {
+ /* uCode v1 does not allow setting direct probe bits on
+ * passive channel. */
+ if ((scan_ch->type & 1) && n_probes)
+ scan_ch->type |= IL39_SCAN_PROBE_MASK(n_probes);
+ }
+
+ /* Set txpower levels to defaults */
+ scan_ch->tpc.dsp_atten = 110;
+ /* scan_pwr_info->tpc.dsp_atten; */
+
+ /*scan_pwr_info->tpc.tx_gain; */
+ if (band == IEEE80211_BAND_5GHZ)
+ scan_ch->tpc.tx_gain = ((1 << 5) | (3 << 3)) | 3;
+ else {
+ scan_ch->tpc.tx_gain = ((1 << 5) | (5 << 3));
+ /* NOTE: if we were doing 6Mb OFDM for scans we'd use
+ * power level:
+ * scan_ch->tpc.tx_gain = ((1 << 5) | (2 << 3)) | 3;
+ */
+ }
+
+ D_SCAN("Scanning %d [%s %d]\n", scan_ch->channel,
+ (scan_ch->type & 1) ? "ACTIVE" : "PASSIVE",
+ (scan_ch->type & 1) ? active_dwell : passive_dwell);
+
+ scan_ch++;
+ added++;
+ }
+
+ D_SCAN("total channels to scan %d\n", added);
+ return added;
+}
+
+static void
+il3945_init_hw_rates(struct il_priv *il, struct ieee80211_rate *rates)
+{
+ int i;
+
+ for (i = 0; i < RATE_COUNT_LEGACY; i++) {
+ rates[i].bitrate = il3945_rates[i].ieee * 5;
+ rates[i].hw_value = i; /* Rate scaling will work on idxes */
+ rates[i].hw_value_short = i;
+ rates[i].flags = 0;
+ if (i > IL39_LAST_OFDM_RATE || i < IL_FIRST_OFDM_RATE) {
+ /*
+ * If CCK != 1M then set short preamble rate flag.
+ */
+ rates[i].flags |=
+ (il3945_rates[i].plcp ==
+ 10) ? 0 : IEEE80211_RATE_SHORT_PREAMBLE;
+ }
+ }
+}
+
+/******************************************************************************
+ *
+ * uCode download functions
+ *
+ ******************************************************************************/
+
+static void
+il3945_dealloc_ucode_pci(struct il_priv *il)
+{
+ il_free_fw_desc(il->pci_dev, &il->ucode_code);
+ il_free_fw_desc(il->pci_dev, &il->ucode_data);
+ il_free_fw_desc(il->pci_dev, &il->ucode_data_backup);
+ il_free_fw_desc(il->pci_dev, &il->ucode_init);
+ il_free_fw_desc(il->pci_dev, &il->ucode_init_data);
+ il_free_fw_desc(il->pci_dev, &il->ucode_boot);
+}
+
+/**
+ * il3945_verify_inst_full - verify runtime uCode image in card vs. host,
+ * looking at all data.
+ */
+static int
+il3945_verify_inst_full(struct il_priv *il, __le32 * image, u32 len)
+{
+ u32 val;
+ u32 save_len = len;
+ int rc = 0;
+ u32 errcnt;
+
+ D_INFO("ucode inst image size is %u\n", len);
+
+ il_wr(il, HBUS_TARG_MEM_RADDR, IL39_RTC_INST_LOWER_BOUND);
+
+ errcnt = 0;
+ for (; len > 0; len -= sizeof(u32), image++) {
+ /* read data comes through single port, auto-incr addr */
+ /* NOTE: Use the debugless read so we don't flood kernel log
+ * if IL_DL_IO is set */
+ val = _il_rd(il, HBUS_TARG_MEM_RDAT);
+ if (val != le32_to_cpu(*image)) {
+ IL_ERR("uCode INST section is invalid at "
+ "offset 0x%x, is 0x%x, s/b 0x%x\n",
+ save_len - len, val, le32_to_cpu(*image));
+ rc = -EIO;
+ errcnt++;
+ if (errcnt >= 20)
+ break;
+ }
+ }
+
+ if (!errcnt)
+ D_INFO("ucode image in INSTRUCTION memory is good\n");
+
+ return rc;
+}
+
+/**
+ * il3945_verify_inst_sparse - verify runtime uCode image in card vs. host,
+ * using sample data 100 bytes apart. If these sample points are good,
+ * it's a pretty good bet that everything between them is good, too.
+ */
+static int
+il3945_verify_inst_sparse(struct il_priv *il, __le32 * image, u32 len)
+{
+ u32 val;
+ int rc = 0;
+ u32 errcnt = 0;
+ u32 i;
+
+ D_INFO("ucode inst image size is %u\n", len);
+
+ for (i = 0; i < len; i += 100, image += 100 / sizeof(u32)) {
+ /* read data comes through single port, auto-incr addr */
+ /* NOTE: Use the debugless read so we don't flood kernel log
+ * if IL_DL_IO is set */
+ il_wr(il, HBUS_TARG_MEM_RADDR, i + IL39_RTC_INST_LOWER_BOUND);
+ val = _il_rd(il, HBUS_TARG_MEM_RDAT);
+ if (val != le32_to_cpu(*image)) {
+#if 0 /* Enable this if you want to see details */
+ IL_ERR("uCode INST section is invalid at "
+ "offset 0x%x, is 0x%x, s/b 0x%x\n", i, val,
+ *image);
+#endif
+ rc = -EIO;
+ errcnt++;
+ if (errcnt >= 3)
+ break;
+ }
+ }
+
+ return rc;
+}
+
+/**
+ * il3945_verify_ucode - determine which instruction image is in SRAM,
+ * and verify its contents
+ */
+static int
+il3945_verify_ucode(struct il_priv *il)
+{
+ __le32 *image;
+ u32 len;
+ int rc = 0;
+
+ /* Try bootstrap */
+ image = (__le32 *) il->ucode_boot.v_addr;
+ len = il->ucode_boot.len;
+ rc = il3945_verify_inst_sparse(il, image, len);
+ if (rc == 0) {
+ D_INFO("Bootstrap uCode is good in inst SRAM\n");
+ return 0;
+ }
+
+ /* Try initialize */
+ image = (__le32 *) il->ucode_init.v_addr;
+ len = il->ucode_init.len;
+ rc = il3945_verify_inst_sparse(il, image, len);
+ if (rc == 0) {
+ D_INFO("Initialize uCode is good in inst SRAM\n");
+ return 0;
+ }
+
+ /* Try runtime/protocol */
+ image = (__le32 *) il->ucode_code.v_addr;
+ len = il->ucode_code.len;
+ rc = il3945_verify_inst_sparse(il, image, len);
+ if (rc == 0) {
+ D_INFO("Runtime uCode is good in inst SRAM\n");
+ return 0;
+ }
+
+ IL_ERR("NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
+
+ /* Since nothing seems to match, show first several data entries in
+ * instruction SRAM, so maybe visual inspection will give a clue.
+ * Selection of bootstrap image (vs. other images) is arbitrary. */
+ image = (__le32 *) il->ucode_boot.v_addr;
+ len = il->ucode_boot.len;
+ rc = il3945_verify_inst_full(il, image, len);
+
+ return rc;
+}
+
+static void
+il3945_nic_start(struct il_priv *il)
+{
+ /* Remove all resets to allow NIC to operate */
+ _il_wr(il, CSR_RESET, 0);
+}
+
+#define IL3945_UCODE_GET(item) \
+static u32 il3945_ucode_get_##item(const struct il_ucode_header *ucode)\
+{ \
+ return le32_to_cpu(ucode->v1.item); \
+}
+
+static u32
+il3945_ucode_get_header_size(u32 api_ver)
+{
+ return 24;
+}
+
+static u8 *
+il3945_ucode_get_data(const struct il_ucode_header *ucode)
+{
+ return (u8 *) ucode->v1.data;
+}
+
+IL3945_UCODE_GET(inst_size);
+IL3945_UCODE_GET(data_size);
+IL3945_UCODE_GET(init_size);
+IL3945_UCODE_GET(init_data_size);
+IL3945_UCODE_GET(boot_size);
+
+/**
+ * il3945_read_ucode - Read uCode images from disk file.
+ *
+ * Copy into buffers for card to fetch via bus-mastering
+ */
+static int
+il3945_read_ucode(struct il_priv *il)
+{
+ const struct il_ucode_header *ucode;
+ int ret = -EINVAL, idx;
+ const struct firmware *ucode_raw;
+ /* firmware file name contains uCode/driver compatibility version */
+ const char *name_pre = il->cfg->fw_name_pre;
+ const unsigned int api_max = il->cfg->ucode_api_max;
+ const unsigned int api_min = il->cfg->ucode_api_min;
+ char buf[25];
+ u8 *src;
+ size_t len;
+ u32 api_ver, inst_size, data_size, init_size, init_data_size, boot_size;
+
+ /* Ask kernel firmware_class module to get the boot firmware off disk.
+ * request_firmware() is synchronous, file is in memory on return. */
+ for (idx = api_max; idx >= api_min; idx--) {
+ sprintf(buf, "%s%u%s", name_pre, idx, ".ucode");
+ ret = request_firmware(&ucode_raw, buf, &il->pci_dev->dev);
+ if (ret < 0) {
+ IL_ERR("%s firmware file req failed: %d\n", buf, ret);
+ if (ret == -ENOENT)
+ continue;
+ else
+ goto error;
+ } else {
+ if (idx < api_max)
+ IL_ERR("Loaded firmware %s, "
+ "which is deprecated. "
+ " Please use API v%u instead.\n", buf,
+ api_max);
+ D_INFO("Got firmware '%s' file "
+ "(%zd bytes) from disk\n", buf, ucode_raw->size);
+ break;
+ }
+ }
+
+ if (ret < 0)
+ goto error;
+
+ /* Make sure that we got at least our header! */
+ if (ucode_raw->size < il3945_ucode_get_header_size(1)) {
+ IL_ERR("File size way too small!\n");
+ ret = -EINVAL;
+ goto err_release;
+ }
+
+ /* Data from ucode file: header followed by uCode images */
+ ucode = (struct il_ucode_header *)ucode_raw->data;
+
+ il->ucode_ver = le32_to_cpu(ucode->ver);
+ api_ver = IL_UCODE_API(il->ucode_ver);
+ inst_size = il3945_ucode_get_inst_size(ucode);
+ data_size = il3945_ucode_get_data_size(ucode);
+ init_size = il3945_ucode_get_init_size(ucode);
+ init_data_size = il3945_ucode_get_init_data_size(ucode);
+ boot_size = il3945_ucode_get_boot_size(ucode);
+ src = il3945_ucode_get_data(ucode);
+
+ /* api_ver should match the api version forming part of the
+ * firmware filename ... but we don't check for that and only rely
+ * on the API version read from firmware header from here on forward */
+
+ if (api_ver < api_min || api_ver > api_max) {
+ IL_ERR("Driver unable to support your firmware API. "
+ "Driver supports v%u, firmware is v%u.\n", api_max,
+ api_ver);
+ il->ucode_ver = 0;
+ ret = -EINVAL;
+ goto err_release;
+ }
+ if (api_ver != api_max)
+ IL_ERR("Firmware has old API version. Expected %u, "
+ "got %u. New firmware can be obtained "
+ "from http://www.intellinuxwireless.org.\n", api_max,
+ api_ver);
+
+ IL_INFO("loaded firmware version %u.%u.%u.%u\n",
+ IL_UCODE_MAJOR(il->ucode_ver), IL_UCODE_MINOR(il->ucode_ver),
+ IL_UCODE_API(il->ucode_ver), IL_UCODE_SERIAL(il->ucode_ver));
+
+ snprintf(il->hw->wiphy->fw_version, sizeof(il->hw->wiphy->fw_version),
+ "%u.%u.%u.%u", IL_UCODE_MAJOR(il->ucode_ver),
+ IL_UCODE_MINOR(il->ucode_ver), IL_UCODE_API(il->ucode_ver),
+ IL_UCODE_SERIAL(il->ucode_ver));
+
+ D_INFO("f/w package hdr ucode version raw = 0x%x\n", il->ucode_ver);
+ D_INFO("f/w package hdr runtime inst size = %u\n", inst_size);
+ D_INFO("f/w package hdr runtime data size = %u\n", data_size);
+ D_INFO("f/w package hdr init inst size = %u\n", init_size);
+ D_INFO("f/w package hdr init data size = %u\n", init_data_size);
+ D_INFO("f/w package hdr boot inst size = %u\n", boot_size);
+
+ /* Verify size of file vs. image size info in file's header */
+ if (ucode_raw->size !=
+ il3945_ucode_get_header_size(api_ver) + inst_size + data_size +
+ init_size + init_data_size + boot_size) {
+
+ D_INFO("uCode file size %zd does not match expected size\n",
+ ucode_raw->size);
+ ret = -EINVAL;
+ goto err_release;
+ }
+
+ /* Verify that uCode images will fit in card's SRAM */
+ if (inst_size > IL39_MAX_INST_SIZE) {
+ D_INFO("uCode instr len %d too large to fit in\n", inst_size);
+ ret = -EINVAL;
+ goto err_release;
+ }
+
+ if (data_size > IL39_MAX_DATA_SIZE) {
+ D_INFO("uCode data len %d too large to fit in\n", data_size);
+ ret = -EINVAL;
+ goto err_release;
+ }
+ if (init_size > IL39_MAX_INST_SIZE) {
+ D_INFO("uCode init instr len %d too large to fit in\n",
+ init_size);
+ ret = -EINVAL;
+ goto err_release;
+ }
+ if (init_data_size > IL39_MAX_DATA_SIZE) {
+ D_INFO("uCode init data len %d too large to fit in\n",
+ init_data_size);
+ ret = -EINVAL;
+ goto err_release;
+ }
+ if (boot_size > IL39_MAX_BSM_SIZE) {
+ D_INFO("uCode boot instr len %d too large to fit in\n",
+ boot_size);
+ ret = -EINVAL;
+ goto err_release;
+ }
+
+ /* Allocate ucode buffers for card's bus-master loading ... */
+
+ /* Runtime instructions and 2 copies of data:
+ * 1) unmodified from disk
+ * 2) backup cache for save/restore during power-downs */
+ il->ucode_code.len = inst_size;
+ il_alloc_fw_desc(il->pci_dev, &il->ucode_code);
+
+ il->ucode_data.len = data_size;
+ il_alloc_fw_desc(il->pci_dev, &il->ucode_data);
+
+ il->ucode_data_backup.len = data_size;
+ il_alloc_fw_desc(il->pci_dev, &il->ucode_data_backup);
+
+ if (!il->ucode_code.v_addr || !il->ucode_data.v_addr ||
+ !il->ucode_data_backup.v_addr)
+ goto err_pci_alloc;
+
+ /* Initialization instructions and data */
+ if (init_size && init_data_size) {
+ il->ucode_init.len = init_size;
+ il_alloc_fw_desc(il->pci_dev, &il->ucode_init);
+
+ il->ucode_init_data.len = init_data_size;
+ il_alloc_fw_desc(il->pci_dev, &il->ucode_init_data);
+
+ if (!il->ucode_init.v_addr || !il->ucode_init_data.v_addr)
+ goto err_pci_alloc;
+ }
+
+ /* Bootstrap (instructions only, no data) */
+ if (boot_size) {
+ il->ucode_boot.len = boot_size;
+ il_alloc_fw_desc(il->pci_dev, &il->ucode_boot);
+
+ if (!il->ucode_boot.v_addr)
+ goto err_pci_alloc;
+ }
+
+ /* Copy images into buffers for card's bus-master reads ... */
+
+ /* Runtime instructions (first block of data in file) */
+ len = inst_size;
+ D_INFO("Copying (but not loading) uCode instr len %zd\n", len);
+ memcpy(il->ucode_code.v_addr, src, len);
+ src += len;
+
+ D_INFO("uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
+ il->ucode_code.v_addr, (u32) il->ucode_code.p_addr);
+
+ /* Runtime data (2nd block)
+ * NOTE: Copy into backup buffer will be done in il3945_up() */
+ len = data_size;
+ D_INFO("Copying (but not loading) uCode data len %zd\n", len);
+ memcpy(il->ucode_data.v_addr, src, len);
+ memcpy(il->ucode_data_backup.v_addr, src, len);
+ src += len;
+
+ /* Initialization instructions (3rd block) */
+ if (init_size) {
+ len = init_size;
+ D_INFO("Copying (but not loading) init instr len %zd\n", len);
+ memcpy(il->ucode_init.v_addr, src, len);
+ src += len;
+ }
+
+ /* Initialization data (4th block) */
+ if (init_data_size) {
+ len = init_data_size;
+ D_INFO("Copying (but not loading) init data len %zd\n", len);
+ memcpy(il->ucode_init_data.v_addr, src, len);
+ src += len;
+ }
+
+ /* Bootstrap instructions (5th block) */
+ len = boot_size;
+ D_INFO("Copying (but not loading) boot instr len %zd\n", len);
+ memcpy(il->ucode_boot.v_addr, src, len);
+
+ /* We have our copies now, allow OS release its copies */
+ release_firmware(ucode_raw);
+ return 0;
+
+err_pci_alloc:
+ IL_ERR("failed to allocate pci memory\n");
+ ret = -ENOMEM;
+ il3945_dealloc_ucode_pci(il);
+
+err_release:
+ release_firmware(ucode_raw);
+
+error:
+ return ret;
+}
+
+/**
+ * il3945_set_ucode_ptrs - Set uCode address location
+ *
+ * Tell initialization uCode where to find runtime uCode.
+ *
+ * BSM registers initially contain pointers to initialization uCode.
+ * We need to replace them to load runtime uCode inst and data,
+ * and to save runtime data when powering down.
+ */
+static int
+il3945_set_ucode_ptrs(struct il_priv *il)
+{
+ dma_addr_t pinst;
+ dma_addr_t pdata;
+
+ /* bits 31:0 for 3945 */
+ pinst = il->ucode_code.p_addr;
+ pdata = il->ucode_data_backup.p_addr;
+
+ /* Tell bootstrap uCode where to find image to load */
+ il_wr_prph(il, BSM_DRAM_INST_PTR_REG, pinst);
+ il_wr_prph(il, BSM_DRAM_DATA_PTR_REG, pdata);
+ il_wr_prph(il, BSM_DRAM_DATA_BYTECOUNT_REG, il->ucode_data.len);
+
+ /* Inst byte count must be last to set up, bit 31 signals uCode
+ * that all new ptr/size info is in place */
+ il_wr_prph(il, BSM_DRAM_INST_BYTECOUNT_REG,
+ il->ucode_code.len | BSM_DRAM_INST_LOAD);
+
+ D_INFO("Runtime uCode pointers are set.\n");
+
+ return 0;
+}
+
+/**
+ * il3945_init_alive_start - Called after N_ALIVE notification received
+ *
+ * Called after N_ALIVE notification received from "initialize" uCode.
+ *
+ * Tell "initialize" uCode to go ahead and load the runtime uCode.
+ */
+static void
+il3945_init_alive_start(struct il_priv *il)
+{
+ /* Check alive response for "valid" sign from uCode */
+ if (il->card_alive_init.is_valid != UCODE_VALID_OK) {
+ /* We had an error bringing up the hardware, so take it
+ * all the way back down so we can try again */
+ D_INFO("Initialize Alive failed.\n");
+ goto restart;
+ }
+
+ /* Bootstrap uCode has loaded initialize uCode ... verify inst image.
+ * This is a paranoid check, because we would not have gotten the
+ * "initialize" alive if code weren't properly loaded. */
+ if (il3945_verify_ucode(il)) {
+ /* Runtime instruction load was bad;
+ * take it all the way back down so we can try again */
+ D_INFO("Bad \"initialize\" uCode load.\n");
+ goto restart;
+ }
+
+ /* Send pointers to protocol/runtime uCode image ... init code will
+ * load and launch runtime uCode, which will send us another "Alive"
+ * notification. */
+ D_INFO("Initialization Alive received.\n");
+ if (il3945_set_ucode_ptrs(il)) {
+ /* Runtime instruction load won't happen;
+ * take it all the way back down so we can try again */
+ D_INFO("Couldn't set up uCode pointers.\n");
+ goto restart;
+ }
+ return;
+
+restart:
+ queue_work(il->workqueue, &il->restart);
+}
+
+/**
+ * il3945_alive_start - called after N_ALIVE notification received
+ * from protocol/runtime uCode (initialization uCode's
+ * Alive gets handled by il3945_init_alive_start()).
+ */
+static void
+il3945_alive_start(struct il_priv *il)
+{
+ int thermal_spin = 0;
+ u32 rfkill;
+ struct il_rxon_context *ctx = &il->ctx;
+
+ D_INFO("Runtime Alive received.\n");
+
+ if (il->card_alive.is_valid != UCODE_VALID_OK) {
+ /* We had an error bringing up the hardware, so take it
+ * all the way back down so we can try again */
+ D_INFO("Alive failed.\n");
+ goto restart;
+ }
+
+ /* Initialize uCode has loaded Runtime uCode ... verify inst image.
+ * This is a paranoid check, because we would not have gotten the
+ * "runtime" alive if code weren't properly loaded. */
+ if (il3945_verify_ucode(il)) {
+ /* Runtime instruction load was bad;
+ * take it all the way back down so we can try again */
+ D_INFO("Bad runtime uCode load.\n");
+ goto restart;
+ }
+
+ rfkill = il_rd_prph(il, APMG_RFKILL_REG);
+ D_INFO("RFKILL status: 0x%x\n", rfkill);
+
+ if (rfkill & 0x1) {
+ clear_bit(S_RF_KILL_HW, &il->status);
+ /* if RFKILL is not on, then wait for thermal
+ * sensor in adapter to kick in */
+ while (il3945_hw_get_temperature(il) == 0) {
+ thermal_spin++;
+ udelay(10);
+ }
+
+ if (thermal_spin)
+ D_INFO("Thermal calibration took %dus\n",
+ thermal_spin * 10);
+ } else
+ set_bit(S_RF_KILL_HW, &il->status);
+
+ /* After the ALIVE response, we can send commands to 3945 uCode */
+ set_bit(S_ALIVE, &il->status);
+
+ /* Enable watchdog to monitor the driver tx queues */
+ il_setup_watchdog(il);
+
+ if (il_is_rfkill(il))
+ return;
+
+ ieee80211_wake_queues(il->hw);
+
+ il->active_rate = RATES_MASK_3945;
+
+ il_power_update_mode(il, true);
+
+ if (il_is_associated(il)) {
+ struct il3945_rxon_cmd *active_rxon =
+ (struct il3945_rxon_cmd *)(&ctx->active);
+
+ ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
+ active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+ } else {
+ /* Initialize our rx_config data */
+ il_connection_init_rx_config(il, ctx);
+ }
+
+ /* Configure Bluetooth device coexistence support */
+ il_send_bt_config(il);
+
+ set_bit(S_READY, &il->status);
+
+ /* Configure the adapter for unassociated operation */
+ il3945_commit_rxon(il, ctx);
+
+ il3945_reg_txpower_periodic(il);
+
+ D_INFO("ALIVE processing complete.\n");
+ wake_up(&il->wait_command_queue);
+
+ return;
+
+restart:
+ queue_work(il->workqueue, &il->restart);
+}
+
+static void il3945_cancel_deferred_work(struct il_priv *il);
+
+static void
+__il3945_down(struct il_priv *il)
+{
+ unsigned long flags;
+ int exit_pending;
+
+ D_INFO(DRV_NAME " is going down\n");
+
+ il_scan_cancel_timeout(il, 200);
+
+ exit_pending = test_and_set_bit(S_EXIT_PENDING, &il->status);
+
+ /* Stop TX queues watchdog. We need to have S_EXIT_PENDING bit set
+ * to prevent rearm timer */
+ del_timer_sync(&il->watchdog);
+
+ /* Station information will now be cleared in device */
+ il_clear_ucode_stations(il, NULL);
+ il_dealloc_bcast_stations(il);
+ il_clear_driver_stations(il);
+
+ /* Unblock any waiting calls */
+ wake_up_all(&il->wait_command_queue);
+
+ /* Wipe out the EXIT_PENDING status bit if we are not actually
+ * exiting the module */
+ if (!exit_pending)
+ clear_bit(S_EXIT_PENDING, &il->status);
+
+ /* stop and reset the on-board processor */
+ _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
+
+ /* tell the device to stop sending interrupts */
+ spin_lock_irqsave(&il->lock, flags);
+ il_disable_interrupts(il);
+ spin_unlock_irqrestore(&il->lock, flags);
+ il3945_synchronize_irq(il);
+
+ if (il->mac80211_registered)
+ ieee80211_stop_queues(il->hw);
+
+ /* If we have not previously called il3945_init() then
+ * clear all bits but the RF Kill bits and return */
+ if (!il_is_init(il)) {
+ il->status =
+ test_bit(S_RF_KILL_HW,
+ &il->
+ status) << S_RF_KILL_HW |
+ test_bit(S_GEO_CONFIGURED,
+ &il->
+ status) << S_GEO_CONFIGURED |
+ test_bit(S_EXIT_PENDING, &il->status) << S_EXIT_PENDING;
+ goto exit;
+ }
+
+ /* ...otherwise clear out all the status bits but the RF Kill
+ * bit and continue taking the NIC down. */
+ il->status &=
+ test_bit(S_RF_KILL_HW,
+ &il->status) << S_RF_KILL_HW | test_bit(S_GEO_CONFIGURED,
+ &il->
+ status) <<
+ S_GEO_CONFIGURED | test_bit(S_FW_ERROR,
+ &il->
+ status) << S_FW_ERROR |
+ test_bit(S_EXIT_PENDING, &il->status) << S_EXIT_PENDING;
+
+ il3945_hw_txq_ctx_stop(il);
+ il3945_hw_rxq_stop(il);
+
+ /* Power-down device's busmaster DMA clocks */
+ il_wr_prph(il, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
+ udelay(5);
+
+ /* Stop the device, and put it in low power state */
+ il_apm_stop(il);
+
+exit:
+ memset(&il->card_alive, 0, sizeof(struct il_alive_resp));
+
+ if (il->beacon_skb)
+ dev_kfree_skb(il->beacon_skb);
+ il->beacon_skb = NULL;
+
+ /* clear out any free frames */
+ il3945_clear_free_frames(il);
+}
+
+static void
+il3945_down(struct il_priv *il)
+{
+ mutex_lock(&il->mutex);
+ __il3945_down(il);
+ mutex_unlock(&il->mutex);
+
+ il3945_cancel_deferred_work(il);
+}
+
+#define MAX_HW_RESTARTS 5
+
+static int
+il3945_alloc_bcast_station(struct il_priv *il)
+{
+ struct il_rxon_context *ctx = &il->ctx;
+ unsigned long flags;
+ u8 sta_id;
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+ sta_id = il_prep_station(il, ctx, il_bcast_addr, false, NULL);
+ if (sta_id == IL_INVALID_STATION) {
+ IL_ERR("Unable to prepare broadcast station\n");
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+ return -EINVAL;
+ }
+
+ il->stations[sta_id].used |= IL_STA_DRIVER_ACTIVE;
+ il->stations[sta_id].used |= IL_STA_BCAST;
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+ return 0;
+}
+
+static int
+__il3945_up(struct il_priv *il)
+{
+ int rc, i;
+
+ rc = il3945_alloc_bcast_station(il);
+ if (rc)
+ return rc;
+
+ if (test_bit(S_EXIT_PENDING, &il->status)) {
+ IL_WARN("Exit pending; will not bring the NIC up\n");
+ return -EIO;
+ }
+
+ if (!il->ucode_data_backup.v_addr || !il->ucode_data.v_addr) {
+ IL_ERR("ucode not available for device bring up\n");
+ return -EIO;
+ }
+
+ /* If platform's RF_KILL switch is NOT set to KILL */
+ if (_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
+ clear_bit(S_RF_KILL_HW, &il->status);
+ else {
+ set_bit(S_RF_KILL_HW, &il->status);
+ IL_WARN("Radio disabled by HW RF Kill switch\n");
+ return -ENODEV;
+ }
+
+ _il_wr(il, CSR_INT, 0xFFFFFFFF);
+
+ rc = il3945_hw_nic_init(il);
+ if (rc) {
+ IL_ERR("Unable to int nic\n");
+ return rc;
+ }
+
+ /* make sure rfkill handshake bits are cleared */
+ _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+ _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
+
+ /* clear (again), then enable host interrupts */
+ _il_wr(il, CSR_INT, 0xFFFFFFFF);
+ il_enable_interrupts(il);
+
+ /* really make sure rfkill handshake bits are cleared */
+ _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+ _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+
+ /* Copy original ucode data image from disk into backup cache.
+ * This will be used to initialize the on-board processor's
+ * data SRAM for a clean start when the runtime program first loads. */
+ memcpy(il->ucode_data_backup.v_addr, il->ucode_data.v_addr,
+ il->ucode_data.len);
+
+ /* We return success when we resume from suspend and rf_kill is on. */
+ if (test_bit(S_RF_KILL_HW, &il->status))
+ return 0;
+
+ for (i = 0; i < MAX_HW_RESTARTS; i++) {
+
+ /* load bootstrap state machine,
+ * load bootstrap program into processor's memory,
+ * prepare to load the "initialize" uCode */
+ rc = il->cfg->ops->lib->load_ucode(il);
+
+ if (rc) {
+ IL_ERR("Unable to set up bootstrap uCode: %d\n", rc);
+ continue;
+ }
+
+ /* start card; "initialize" will load runtime ucode */
+ il3945_nic_start(il);
+
+ D_INFO(DRV_NAME " is coming up\n");
+
+ return 0;
+ }
+
+ set_bit(S_EXIT_PENDING, &il->status);
+ __il3945_down(il);
+ clear_bit(S_EXIT_PENDING, &il->status);
+
+ /* tried to restart and config the device for as long as our
+ * patience could withstand */
+ IL_ERR("Unable to initialize device after %d attempts.\n", i);
+ return -EIO;
+}
+
+/*****************************************************************************
+ *
+ * Workqueue callbacks
+ *
+ *****************************************************************************/
+
+static void
+il3945_bg_init_alive_start(struct work_struct *data)
+{
+ struct il_priv *il =
+ container_of(data, struct il_priv, init_alive_start.work);
+
+ mutex_lock(&il->mutex);
+ if (test_bit(S_EXIT_PENDING, &il->status))
+ goto out;
+
+ il3945_init_alive_start(il);
+out:
+ mutex_unlock(&il->mutex);
+}
+
+static void
+il3945_bg_alive_start(struct work_struct *data)
+{
+ struct il_priv *il =
+ container_of(data, struct il_priv, alive_start.work);
+
+ mutex_lock(&il->mutex);
+ if (test_bit(S_EXIT_PENDING, &il->status))
+ goto out;
+
+ il3945_alive_start(il);
+out:
+ mutex_unlock(&il->mutex);
+}
+
+/*
+ * 3945 cannot interrupt driver when hardware rf kill switch toggles;
+ * driver must poll CSR_GP_CNTRL_REG register for change. This register
+ * *is* readable even when device has been SW_RESET into low power mode
+ * (e.g. during RF KILL).
+ */
+static void
+il3945_rfkill_poll(struct work_struct *data)
+{
+ struct il_priv *il =
+ container_of(data, struct il_priv, _3945.rfkill_poll.work);
+ bool old_rfkill = test_bit(S_RF_KILL_HW, &il->status);
+ bool new_rfkill =
+ !(_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
+
+ if (new_rfkill != old_rfkill) {
+ if (new_rfkill)
+ set_bit(S_RF_KILL_HW, &il->status);
+ else
+ clear_bit(S_RF_KILL_HW, &il->status);
+
+ wiphy_rfkill_set_hw_state(il->hw->wiphy, new_rfkill);
+
+ D_RF_KILL("RF_KILL bit toggled to %s.\n",
+ new_rfkill ? "disable radio" : "enable radio");
+ }
+
+ /* Keep this running, even if radio now enabled. This will be
+ * cancelled in mac_start() if system decides to start again */
+ queue_delayed_work(il->workqueue, &il->_3945.rfkill_poll,
+ round_jiffies_relative(2 * HZ));
+
+}
+
+int
+il3945_request_scan(struct il_priv *il, struct ieee80211_vif *vif)
+{
+ struct il_host_cmd cmd = {
+ .id = C_SCAN,
+ .len = sizeof(struct il3945_scan_cmd),
+ .flags = CMD_SIZE_HUGE,
+ };
+ struct il3945_scan_cmd *scan;
+ u8 n_probes = 0;
+ enum ieee80211_band band;
+ bool is_active = false;
+ int ret;
+ u16 len;
+
+ lockdep_assert_held(&il->mutex);
+
+ if (!il->scan_cmd) {
+ il->scan_cmd =
+ kmalloc(sizeof(struct il3945_scan_cmd) + IL_MAX_SCAN_SIZE,
+ GFP_KERNEL);
+ if (!il->scan_cmd) {
+ D_SCAN("Fail to allocate scan memory\n");
+ return -ENOMEM;
+ }
+ }
+ scan = il->scan_cmd;
+ memset(scan, 0, sizeof(struct il3945_scan_cmd) + IL_MAX_SCAN_SIZE);
+
+ scan->quiet_plcp_th = IL_PLCP_QUIET_THRESH;
+ scan->quiet_time = IL_ACTIVE_QUIET_TIME;
+
+ if (il_is_associated(il)) {
+ u16 interval;
+ u32 extra;
+ u32 suspend_time = 100;
+ u32 scan_suspend_time = 100;
+
+ D_INFO("Scanning while associated...\n");
+
+ interval = vif->bss_conf.beacon_int;
+
+ scan->suspend_time = 0;
+ scan->max_out_time = cpu_to_le32(200 * 1024);
+ if (!interval)
+ interval = suspend_time;
+ /*
+ * suspend time format:
+ * 0-19: beacon interval in usec (time before exec.)
+ * 20-23: 0
+ * 24-31: number of beacons (suspend between channels)
+ */
+
+ extra = (suspend_time / interval) << 24;
+ scan_suspend_time =
+ 0xFF0FFFFF & (extra | ((suspend_time % interval) * 1024));
+
+ scan->suspend_time = cpu_to_le32(scan_suspend_time);
+ D_SCAN("suspend_time 0x%X beacon interval %d\n",
+ scan_suspend_time, interval);
+ }
+
+ if (il->scan_request->n_ssids) {
+ int i, p = 0;
+ D_SCAN("Kicking off active scan\n");
+ for (i = 0; i < il->scan_request->n_ssids; i++) {
+ /* always does wildcard anyway */
+ if (!il->scan_request->ssids[i].ssid_len)
+ continue;
+ scan->direct_scan[p].id = WLAN_EID_SSID;
+ scan->direct_scan[p].len =
+ il->scan_request->ssids[i].ssid_len;
+ memcpy(scan->direct_scan[p].ssid,
+ il->scan_request->ssids[i].ssid,
+ il->scan_request->ssids[i].ssid_len);
+ n_probes++;
+ p++;
+ }
+ is_active = true;
+ } else
+ D_SCAN("Kicking off passive scan.\n");
+
+ /* We don't build a direct scan probe request; the uCode will do
+ * that based on the direct_mask added to each channel entry */
+ scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
+ scan->tx_cmd.sta_id = il->ctx.bcast_sta_id;
+ scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
+
+ /* flags + rate selection */
+
+ switch (il->scan_band) {
+ case IEEE80211_BAND_2GHZ:
+ scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
+ scan->tx_cmd.rate = RATE_1M_PLCP;
+ band = IEEE80211_BAND_2GHZ;
+ break;
+ case IEEE80211_BAND_5GHZ:
+ scan->tx_cmd.rate = RATE_6M_PLCP;
+ band = IEEE80211_BAND_5GHZ;
+ break;
+ default:
+ IL_WARN("Invalid scan band\n");
+ return -EIO;
+ }
+
+ /*
+ * If active scaning is requested but a certain channel
+ * is marked passive, we can do active scanning if we
+ * detect transmissions.
+ */
+ scan->good_CRC_th =
+ is_active ? IL_GOOD_CRC_TH_DEFAULT : IL_GOOD_CRC_TH_DISABLED;
+
+ len =
+ il_fill_probe_req(il, (struct ieee80211_mgmt *)scan->data,
+ vif->addr, il->scan_request->ie,
+ il->scan_request->ie_len,
+ IL_MAX_SCAN_SIZE - sizeof(*scan));
+ scan->tx_cmd.len = cpu_to_le16(len);
+
+ /* select Rx antennas */
+ scan->flags |= il3945_get_antenna_flags(il);
+
+ scan->channel_count =
+ il3945_get_channels_for_scan(il, band, is_active, n_probes,
+ (void *)&scan->data[len], vif);
+ if (scan->channel_count == 0) {
+ D_SCAN("channel count %d\n", scan->channel_count);
+ return -EIO;
+ }
+
+ cmd.len +=
+ le16_to_cpu(scan->tx_cmd.len) +
+ scan->channel_count * sizeof(struct il3945_scan_channel);
+ cmd.data = scan;
+ scan->len = cpu_to_le16(cmd.len);
+
+ set_bit(S_SCAN_HW, &il->status);
+ ret = il_send_cmd_sync(il, &cmd);
+ if (ret)
+ clear_bit(S_SCAN_HW, &il->status);
+ return ret;
+}
+
+void
+il3945_post_scan(struct il_priv *il)
+{
+ struct il_rxon_context *ctx = &il->ctx;
+
+ /*
+ * Since setting the RXON may have been deferred while
+ * performing the scan, fire one off if needed
+ */
+ if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
+ il3945_commit_rxon(il, ctx);
+}
+
+static void
+il3945_bg_restart(struct work_struct *data)
+{
+ struct il_priv *il = container_of(data, struct il_priv, restart);
+
+ if (test_bit(S_EXIT_PENDING, &il->status))
+ return;
+
+ if (test_and_clear_bit(S_FW_ERROR, &il->status)) {
+ mutex_lock(&il->mutex);
+ il->ctx.vif = NULL;
+ il->is_open = 0;
+ mutex_unlock(&il->mutex);
+ il3945_down(il);
+ ieee80211_restart_hw(il->hw);
+ } else {
+ il3945_down(il);
+
+ mutex_lock(&il->mutex);
+ if (test_bit(S_EXIT_PENDING, &il->status)) {
+ mutex_unlock(&il->mutex);
+ return;
+ }
+
+ __il3945_up(il);
+ mutex_unlock(&il->mutex);
+ }
+}
+
+static void
+il3945_bg_rx_replenish(struct work_struct *data)
+{
+ struct il_priv *il = container_of(data, struct il_priv, rx_replenish);
+
+ mutex_lock(&il->mutex);
+ if (test_bit(S_EXIT_PENDING, &il->status))
+ goto out;
+
+ il3945_rx_replenish(il);
+out:
+ mutex_unlock(&il->mutex);
+}
+
+void
+il3945_post_associate(struct il_priv *il)
+{
+ int rc = 0;
+ struct ieee80211_conf *conf = NULL;
+ struct il_rxon_context *ctx = &il->ctx;
+
+ if (!ctx->vif || !il->is_open)
+ return;
+
+ D_ASSOC("Associated as %d to: %pM\n", ctx->vif->bss_conf.aid,
+ ctx->active.bssid_addr);
+
+ if (test_bit(S_EXIT_PENDING, &il->status))
+ return;
+
+ il_scan_cancel_timeout(il, 200);
+
+ conf = &il->hw->conf;
+
+ ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+ il3945_commit_rxon(il, ctx);
+
+ rc = il_send_rxon_timing(il, ctx);
+ if (rc)
+ IL_WARN("C_RXON_TIMING failed - " "Attempting to continue.\n");
+
+ ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
+
+ ctx->staging.assoc_id = cpu_to_le16(ctx->vif->bss_conf.aid);
+
+ D_ASSOC("assoc id %d beacon interval %d\n", ctx->vif->bss_conf.aid,
+ ctx->vif->bss_conf.beacon_int);
+
+ if (ctx->vif->bss_conf.use_short_preamble)
+ ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
+ else
+ ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
+
+ if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
+ if (ctx->vif->bss_conf.use_short_slot)
+ ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
+ else
+ ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
+ }
+
+ il3945_commit_rxon(il, ctx);
+
+ switch (ctx->vif->type) {
+ case NL80211_IFTYPE_STATION:
+ il3945_rate_scale_init(il->hw, IL_AP_ID);
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ il3945_send_beacon_cmd(il);
+ break;
+ default:
+ IL_ERR("%s Should not be called in %d mode\n", __func__,
+ ctx->vif->type);
+ break;
+ }
+}
+
+/*****************************************************************************
+ *
+ * mac80211 entry point functions
+ *
+ *****************************************************************************/
+
+#define UCODE_READY_TIMEOUT (2 * HZ)
+
+static int
+il3945_mac_start(struct ieee80211_hw *hw)
+{
+ struct il_priv *il = hw->priv;
+ int ret;
+
+ D_MAC80211("enter\n");
+
+ /* we should be verifying the device is ready to be opened */
+ mutex_lock(&il->mutex);
+
+ /* fetch ucode file from disk, alloc and copy to bus-master buffers ...
+ * ucode filename and max sizes are card-specific. */
+
+ if (!il->ucode_code.len) {
+ ret = il3945_read_ucode(il);
+ if (ret) {
+ IL_ERR("Could not read microcode: %d\n", ret);
+ mutex_unlock(&il->mutex);
+ goto out_release_irq;
+ }
+ }
+
+ ret = __il3945_up(il);
+
+ mutex_unlock(&il->mutex);
+
+ if (ret)
+ goto out_release_irq;
+
+ D_INFO("Start UP work.\n");
+
+ /* Wait for START_ALIVE from ucode. Otherwise callbacks from
+ * mac80211 will not be run successfully. */
+ ret = wait_event_timeout(il->wait_command_queue,
+ test_bit(S_READY, &il->status),
+ UCODE_READY_TIMEOUT);
+ if (!ret) {
+ if (!test_bit(S_READY, &il->status)) {
+ IL_ERR("Wait for START_ALIVE timeout after %dms.\n",
+ jiffies_to_msecs(UCODE_READY_TIMEOUT));
+ ret = -ETIMEDOUT;
+ goto out_release_irq;
+ }
+ }
+
+ /* ucode is running and will send rfkill notifications,
+ * no need to poll the killswitch state anymore */
+ cancel_delayed_work(&il->_3945.rfkill_poll);
+
+ il->is_open = 1;
+ D_MAC80211("leave\n");
+ return 0;
+
+out_release_irq:
+ il->is_open = 0;
+ D_MAC80211("leave - failed\n");
+ return ret;
+}
+
+static void
+il3945_mac_stop(struct ieee80211_hw *hw)
+{
+ struct il_priv *il = hw->priv;
+
+ D_MAC80211("enter\n");
+
+ if (!il->is_open) {
+ D_MAC80211("leave - skip\n");
+ return;
+ }
+
+ il->is_open = 0;
+
+ il3945_down(il);
+
+ flush_workqueue(il->workqueue);
+
+ /* start polling the killswitch state again */
+ queue_delayed_work(il->workqueue, &il->_3945.rfkill_poll,
+ round_jiffies_relative(2 * HZ));
+
+ D_MAC80211("leave\n");
+}
+
+static void
+il3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+ struct il_priv *il = hw->priv;
+
+ D_MAC80211("enter\n");
+
+ D_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
+ ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
+
+ if (il3945_tx_skb(il, skb))
+ dev_kfree_skb_any(skb);
+
+ D_MAC80211("leave\n");
+}
+
+void
+il3945_config_ap(struct il_priv *il)
+{
+ struct il_rxon_context *ctx = &il->ctx;
+ struct ieee80211_vif *vif = ctx->vif;
+ int rc = 0;
+
+ if (test_bit(S_EXIT_PENDING, &il->status))
+ return;
+
+ /* The following should be done only at AP bring up */
+ if (!(il_is_associated(il))) {
+
+ /* RXON - unassoc (to set timing command) */
+ ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+ il3945_commit_rxon(il, ctx);
+
+ /* RXON Timing */
+ rc = il_send_rxon_timing(il, ctx);
+ if (rc)
+ IL_WARN("C_RXON_TIMING failed - "
+ "Attempting to continue.\n");
+
+ ctx->staging.assoc_id = 0;
+
+ if (vif->bss_conf.use_short_preamble)
+ ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
+ else
+ ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
+
+ if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
+ if (vif->bss_conf.use_short_slot)
+ ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
+ else
+ ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
+ }
+ /* restore RXON assoc */
+ ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
+ il3945_commit_rxon(il, ctx);
+ }
+ il3945_send_beacon_cmd(il);
+}
+
+static int
+il3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct il_priv *il = hw->priv;
+ int ret = 0;
+ u8 sta_id = IL_INVALID_STATION;
+ u8 static_key;
+
+ D_MAC80211("enter\n");
+
+ if (il3945_mod_params.sw_crypto) {
+ D_MAC80211("leave - hwcrypto disabled\n");
+ return -EOPNOTSUPP;
+ }
+
+ /*
+ * To support IBSS RSN, don't program group keys in IBSS, the
+ * hardware will then not attempt to decrypt the frames.
+ */
+ if (vif->type == NL80211_IFTYPE_ADHOC &&
+ !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ return -EOPNOTSUPP;
+
+ static_key = !il_is_associated(il);
+
+ if (!static_key) {
+ sta_id = il_sta_id_or_broadcast(il, &il->ctx, sta);
+ if (sta_id == IL_INVALID_STATION)
+ return -EINVAL;
+ }
+
+ mutex_lock(&il->mutex);
+ il_scan_cancel_timeout(il, 100);
+
+ switch (cmd) {
+ case SET_KEY:
+ if (static_key)
+ ret = il3945_set_static_key(il, key);
+ else
+ ret = il3945_set_dynamic_key(il, key, sta_id);
+ D_MAC80211("enable hwcrypto key\n");
+ break;
+ case DISABLE_KEY:
+ if (static_key)
+ ret = il3945_remove_static_key(il);
+ else
+ ret = il3945_clear_sta_key_info(il, sta_id);
+ D_MAC80211("disable hwcrypto key\n");
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ mutex_unlock(&il->mutex);
+ D_MAC80211("leave\n");
+
+ return ret;
+}
+
+static int
+il3945_mac_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct il_priv *il = hw->priv;
+ struct il3945_sta_priv *sta_priv = (void *)sta->drv_priv;
+ int ret;
+ bool is_ap = vif->type == NL80211_IFTYPE_STATION;
+ u8 sta_id;
+
+ D_INFO("received request to add station %pM\n", sta->addr);
+ mutex_lock(&il->mutex);
+ D_INFO("proceeding to add station %pM\n", sta->addr);
+ sta_priv->common.sta_id = IL_INVALID_STATION;
+
+ ret =
+ il_add_station_common(il, &il->ctx, sta->addr, is_ap, sta, &sta_id);
+ if (ret) {
+ IL_ERR("Unable to add station %pM (%d)\n", sta->addr, ret);
+ /* Should we return success if return code is EEXIST ? */
+ mutex_unlock(&il->mutex);
+ return ret;
+ }
+
+ sta_priv->common.sta_id = sta_id;
+
+ /* Initialize rate scaling */
+ D_INFO("Initializing rate scaling for station %pM\n", sta->addr);
+ il3945_rs_rate_init(il, sta, sta_id);
+ mutex_unlock(&il->mutex);
+
+ return 0;
+}
+
+static void
+il3945_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
+ unsigned int *total_flags, u64 multicast)
+{
+ struct il_priv *il = hw->priv;
+ __le32 filter_or = 0, filter_nand = 0;
+ struct il_rxon_context *ctx = &il->ctx;
+
+#define CHK(test, flag) do { \
+ if (*total_flags & (test)) \
+ filter_or |= (flag); \
+ else \
+ filter_nand |= (flag); \
+ } while (0)
+
+ D_MAC80211("Enter: changed: 0x%x, total: 0x%x\n", changed_flags,
+ *total_flags);
+
+ CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
+ CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK);
+ CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
+
+#undef CHK
+
+ mutex_lock(&il->mutex);
+
+ ctx->staging.filter_flags &= ~filter_nand;
+ ctx->staging.filter_flags |= filter_or;
+
+ /*
+ * Not committing directly because hardware can perform a scan,
+ * but even if hw is ready, committing here breaks for some reason,
+ * we'll eventually commit the filter flags change anyway.
+ */
+
+ mutex_unlock(&il->mutex);
+
+ /*
+ * Receiving all multicast frames is always enabled by the
+ * default flags setup in il_connection_init_rx_config()
+ * since we currently do not support programming multicast
+ * filters into the device.
+ */
+ *total_flags &=
+ FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
+ FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
+}
+
+/*****************************************************************************
+ *
+ * sysfs attributes
+ *
+ *****************************************************************************/
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+
+/*
+ * The following adds a new attribute to the sysfs representation
+ * of this device driver (i.e. a new file in /sys/bus/pci/drivers/iwl/)
+ * used for controlling the debug level.
+ *
+ * See the level definitions in iwl for details.
+ *
+ * The debug_level being managed using sysfs below is a per device debug
+ * level that is used instead of the global debug level if it (the per
+ * device debug level) is set.
+ */
+static ssize_t
+il3945_show_debug_level(struct device *d, struct device_attribute *attr,
+ char *buf)
+{
+ struct il_priv *il = dev_get_drvdata(d);
+ return sprintf(buf, "0x%08X\n", il_get_debug_level(il));
+}
+
+static ssize_t
+il3945_store_debug_level(struct device *d, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct il_priv *il = dev_get_drvdata(d);
+ unsigned long val;
+ int ret;
+
+ ret = strict_strtoul(buf, 0, &val);
+ if (ret)
+ IL_INFO("%s is not in hex or decimal form.\n", buf);
+ else {
+ il->debug_level = val;
+ if (il_alloc_traffic_mem(il))
+ IL_ERR("Not enough memory to generate traffic log\n");
+ }
+ return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO, il3945_show_debug_level,
+ il3945_store_debug_level);
+
+#endif /* CONFIG_IWLEGACY_DEBUG */
+
+static ssize_t
+il3945_show_temperature(struct device *d, struct device_attribute *attr,
+ char *buf)
+{
+ struct il_priv *il = dev_get_drvdata(d);
+
+ if (!il_is_alive(il))
+ return -EAGAIN;
+
+ return sprintf(buf, "%d\n", il3945_hw_get_temperature(il));
+}
+
+static DEVICE_ATTR(temperature, S_IRUGO, il3945_show_temperature, NULL);
+
+static ssize_t
+il3945_show_tx_power(struct device *d, struct device_attribute *attr, char *buf)
+{
+ struct il_priv *il = dev_get_drvdata(d);
+ return sprintf(buf, "%d\n", il->tx_power_user_lmt);
+}
+
+static ssize_t
+il3945_store_tx_power(struct device *d, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct il_priv *il = dev_get_drvdata(d);
+ char *p = (char *)buf;
+ u32 val;
+
+ val = simple_strtoul(p, &p, 10);
+ if (p == buf)
+ IL_INFO(": %s is not in decimal form.\n", buf);
+ else
+ il3945_hw_reg_set_txpower(il, val);
+
+ return count;
+}
+
+static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, il3945_show_tx_power,
+ il3945_store_tx_power);
+
+static ssize_t
+il3945_show_flags(struct device *d, struct device_attribute *attr, char *buf)
+{
+ struct il_priv *il = dev_get_drvdata(d);
+ struct il_rxon_context *ctx = &il->ctx;
+
+ return sprintf(buf, "0x%04X\n", ctx->active.flags);
+}
+
+static ssize_t
+il3945_store_flags(struct device *d, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct il_priv *il = dev_get_drvdata(d);
+ u32 flags = simple_strtoul(buf, NULL, 0);
+ struct il_rxon_context *ctx = &il->ctx;
+
+ mutex_lock(&il->mutex);
+ if (le32_to_cpu(ctx->staging.flags) != flags) {
+ /* Cancel any currently running scans... */
+ if (il_scan_cancel_timeout(il, 100))
+ IL_WARN("Could not cancel scan.\n");
+ else {
+ D_INFO("Committing rxon.flags = 0x%04X\n", flags);
+ ctx->staging.flags = cpu_to_le32(flags);
+ il3945_commit_rxon(il, ctx);
+ }
+ }
+ mutex_unlock(&il->mutex);
+
+ return count;
+}
+
+static DEVICE_ATTR(flags, S_IWUSR | S_IRUGO, il3945_show_flags,
+ il3945_store_flags);
+
+static ssize_t
+il3945_show_filter_flags(struct device *d, struct device_attribute *attr,
+ char *buf)
+{
+ struct il_priv *il = dev_get_drvdata(d);
+ struct il_rxon_context *ctx = &il->ctx;
+
+ return sprintf(buf, "0x%04X\n", le32_to_cpu(ctx->active.filter_flags));
+}
+
+static ssize_t
+il3945_store_filter_flags(struct device *d, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct il_priv *il = dev_get_drvdata(d);
+ struct il_rxon_context *ctx = &il->ctx;
+ u32 filter_flags = simple_strtoul(buf, NULL, 0);
+
+ mutex_lock(&il->mutex);
+ if (le32_to_cpu(ctx->staging.filter_flags) != filter_flags) {
+ /* Cancel any currently running scans... */
+ if (il_scan_cancel_timeout(il, 100))
+ IL_WARN("Could not cancel scan.\n");
+ else {
+ D_INFO("Committing rxon.filter_flags = " "0x%04X\n",
+ filter_flags);
+ ctx->staging.filter_flags = cpu_to_le32(filter_flags);
+ il3945_commit_rxon(il, ctx);
+ }
+ }
+ mutex_unlock(&il->mutex);
+
+ return count;
+}
+
+static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, il3945_show_filter_flags,
+ il3945_store_filter_flags);
+
+static ssize_t
+il3945_show_measurement(struct device *d, struct device_attribute *attr,
+ char *buf)
+{
+ struct il_priv *il = dev_get_drvdata(d);
+ struct il_spectrum_notification measure_report;
+ u32 size = sizeof(measure_report), len = 0, ofs = 0;
+ u8 *data = (u8 *) &measure_report;
+ unsigned long flags;
+
+ spin_lock_irqsave(&il->lock, flags);
+ if (!(il->measurement_status & MEASUREMENT_READY)) {
+ spin_unlock_irqrestore(&il->lock, flags);
+ return 0;
+ }
+ memcpy(&measure_report, &il->measure_report, size);
+ il->measurement_status = 0;
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ while (size && PAGE_SIZE - len) {
+ hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len,
+ PAGE_SIZE - len, 1);
+ len = strlen(buf);
+ if (PAGE_SIZE - len)
+ buf[len++] = '\n';
+
+ ofs += 16;
+ size -= min(size, 16U);
+ }
+
+ return len;
+}
+
+static ssize_t
+il3945_store_measurement(struct device *d, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct il_priv *il = dev_get_drvdata(d);
+ struct il_rxon_context *ctx = &il->ctx;
+ struct ieee80211_measurement_params params = {
+ .channel = le16_to_cpu(ctx->active.channel),
+ .start_time = cpu_to_le64(il->_3945.last_tsf),
+ .duration = cpu_to_le16(1),
+ };
+ u8 type = IL_MEASURE_BASIC;
+ u8 buffer[32];
+ u8 channel;
+
+ if (count) {
+ char *p = buffer;
+ strncpy(buffer, buf, min(sizeof(buffer), count));
+ channel = simple_strtoul(p, NULL, 0);
+ if (channel)
+ params.channel = channel;
+
+ p = buffer;
+ while (*p && *p != ' ')
+ p++;
+ if (*p)
+ type = simple_strtoul(p + 1, NULL, 0);
+ }
+
+ D_INFO("Invoking measurement of type %d on " "channel %d (for '%s')\n",
+ type, params.channel, buf);
+ il3945_get_measurement(il, &params, type);
+
+ return count;
+}
+
+static DEVICE_ATTR(measurement, S_IRUSR | S_IWUSR, il3945_show_measurement,
+ il3945_store_measurement);
+
+static ssize_t
+il3945_store_retry_rate(struct device *d, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct il_priv *il = dev_get_drvdata(d);
+
+ il->retry_rate = simple_strtoul(buf, NULL, 0);
+ if (il->retry_rate <= 0)
+ il->retry_rate = 1;
+
+ return count;
+}
+
+static ssize_t
+il3945_show_retry_rate(struct device *d, struct device_attribute *attr,
+ char *buf)
+{
+ struct il_priv *il = dev_get_drvdata(d);
+ return sprintf(buf, "%d", il->retry_rate);
+}
+
+static DEVICE_ATTR(retry_rate, S_IWUSR | S_IRUSR, il3945_show_retry_rate,
+ il3945_store_retry_rate);
+
+static ssize_t
+il3945_show_channels(struct device *d, struct device_attribute *attr, char *buf)
+{
+ /* all this shit doesn't belong into sysfs anyway */
+ return 0;
+}
+
+static DEVICE_ATTR(channels, S_IRUSR, il3945_show_channels, NULL);
+
+static ssize_t
+il3945_show_antenna(struct device *d, struct device_attribute *attr, char *buf)
+{
+ struct il_priv *il = dev_get_drvdata(d);
+
+ if (!il_is_alive(il))
+ return -EAGAIN;
+
+ return sprintf(buf, "%d\n", il3945_mod_params.antenna);
+}
+
+static ssize_t
+il3945_store_antenna(struct device *d, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct il_priv *il __maybe_unused = dev_get_drvdata(d);
+ int ant;
+
+ if (count == 0)
+ return 0;
+
+ if (sscanf(buf, "%1i", &ant) != 1) {
+ D_INFO("not in hex or decimal form.\n");
+ return count;
+ }
+
+ if (ant >= 0 && ant <= 2) {
+ D_INFO("Setting antenna select to %d.\n", ant);
+ il3945_mod_params.antenna = (enum il3945_antenna)ant;
+ } else
+ D_INFO("Bad antenna select value %d.\n", ant);
+
+ return count;
+}
+
+static DEVICE_ATTR(antenna, S_IWUSR | S_IRUGO, il3945_show_antenna,
+ il3945_store_antenna);
+
+static ssize_t
+il3945_show_status(struct device *d, struct device_attribute *attr, char *buf)
+{
+ struct il_priv *il = dev_get_drvdata(d);
+ if (!il_is_alive(il))
+ return -EAGAIN;
+ return sprintf(buf, "0x%08x\n", (int)il->status);
+}
+
+static DEVICE_ATTR(status, S_IRUGO, il3945_show_status, NULL);
+
+static ssize_t
+il3945_dump_error_log(struct device *d, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct il_priv *il = dev_get_drvdata(d);
+ char *p = (char *)buf;
+
+ if (p[0] == '1')
+ il3945_dump_nic_error_log(il);
+
+ return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, il3945_dump_error_log);
+
+/*****************************************************************************
+ *
+ * driver setup and tear down
+ *
+ *****************************************************************************/
+
+static void
+il3945_setup_deferred_work(struct il_priv *il)
+{
+ il->workqueue = create_singlethread_workqueue(DRV_NAME);
+
+ init_waitqueue_head(&il->wait_command_queue);
+
+ INIT_WORK(&il->restart, il3945_bg_restart);
+ INIT_WORK(&il->rx_replenish, il3945_bg_rx_replenish);
+ INIT_DELAYED_WORK(&il->init_alive_start, il3945_bg_init_alive_start);
+ INIT_DELAYED_WORK(&il->alive_start, il3945_bg_alive_start);
+ INIT_DELAYED_WORK(&il->_3945.rfkill_poll, il3945_rfkill_poll);
+
+ il_setup_scan_deferred_work(il);
+
+ il3945_hw_setup_deferred_work(il);
+
+ init_timer(&il->watchdog);
+ il->watchdog.data = (unsigned long)il;
+ il->watchdog.function = il_bg_watchdog;
+
+ tasklet_init(&il->irq_tasklet,
+ (void (*)(unsigned long))il3945_irq_tasklet,
+ (unsigned long)il);
+}
+
+static void
+il3945_cancel_deferred_work(struct il_priv *il)
+{
+ il3945_hw_cancel_deferred_work(il);
+
+ cancel_delayed_work_sync(&il->init_alive_start);
+ cancel_delayed_work(&il->alive_start);
+
+ il_cancel_scan_deferred_work(il);
+}
+
+static struct attribute *il3945_sysfs_entries[] = {
+ &dev_attr_antenna.attr,
+ &dev_attr_channels.attr,
+ &dev_attr_dump_errors.attr,
+ &dev_attr_flags.attr,
+ &dev_attr_filter_flags.attr,
+ &dev_attr_measurement.attr,
+ &dev_attr_retry_rate.attr,
+ &dev_attr_status.attr,
+ &dev_attr_temperature.attr,
+ &dev_attr_tx_power.attr,
+#ifdef CONFIG_IWLEGACY_DEBUG
+ &dev_attr_debug_level.attr,
+#endif
+ NULL
+};
+
+static struct attribute_group il3945_attribute_group = {
+ .name = NULL, /* put in device directory */
+ .attrs = il3945_sysfs_entries,
+};
+
+struct ieee80211_ops il3945_hw_ops = {
+ .tx = il3945_mac_tx,
+ .start = il3945_mac_start,
+ .stop = il3945_mac_stop,
+ .add_interface = il_mac_add_interface,
+ .remove_interface = il_mac_remove_interface,
+ .change_interface = il_mac_change_interface,
+ .config = il_mac_config,
+ .configure_filter = il3945_configure_filter,
+ .set_key = il3945_mac_set_key,
+ .conf_tx = il_mac_conf_tx,
+ .reset_tsf = il_mac_reset_tsf,
+ .bss_info_changed = il_mac_bss_info_changed,
+ .hw_scan = il_mac_hw_scan,
+ .sta_add = il3945_mac_sta_add,
+ .sta_remove = il_mac_sta_remove,
+ .tx_last_beacon = il_mac_tx_last_beacon,
+};
+
+static int
+il3945_init_drv(struct il_priv *il)
+{
+ int ret;
+ struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
+
+ il->retry_rate = 1;
+ il->beacon_skb = NULL;
+
+ spin_lock_init(&il->sta_lock);
+ spin_lock_init(&il->hcmd_lock);
+
+ INIT_LIST_HEAD(&il->free_frames);
+
+ mutex_init(&il->mutex);
+
+ il->ieee_channels = NULL;
+ il->ieee_rates = NULL;
+ il->band = IEEE80211_BAND_2GHZ;
+
+ il->iw_mode = NL80211_IFTYPE_STATION;
+ il->missed_beacon_threshold = IL_MISSED_BEACON_THRESHOLD_DEF;
+
+ /* initialize force reset */
+ il->force_reset.reset_duration = IL_DELAY_NEXT_FORCE_FW_RELOAD;
+
+ if (eeprom->version < EEPROM_3945_EEPROM_VERSION) {
+ IL_WARN("Unsupported EEPROM version: 0x%04X\n",
+ eeprom->version);
+ ret = -EINVAL;
+ goto err;
+ }
+ ret = il_init_channel_map(il);
+ if (ret) {
+ IL_ERR("initializing regulatory failed: %d\n", ret);
+ goto err;
+ }
+
+ /* Set up txpower settings in driver for all channels */
+ if (il3945_txpower_set_from_eeprom(il)) {
+ ret = -EIO;
+ goto err_free_channel_map;
+ }
+
+ ret = il_init_geos(il);
+ if (ret) {
+ IL_ERR("initializing geos failed: %d\n", ret);
+ goto err_free_channel_map;
+ }
+ il3945_init_hw_rates(il, il->ieee_rates);
+
+ return 0;
+
+err_free_channel_map:
+ il_free_channel_map(il);
+err:
+ return ret;
+}
+
+#define IL3945_MAX_PROBE_REQUEST 200
+
+static int
+il3945_setup_mac(struct il_priv *il)
+{
+ int ret;
+ struct ieee80211_hw *hw = il->hw;
+
+ hw->rate_control_algorithm = "iwl-3945-rs";
+ hw->sta_data_size = sizeof(struct il3945_sta_priv);
+ hw->vif_data_size = sizeof(struct il_vif_priv);
+
+ /* Tell mac80211 our characteristics */
+ hw->flags = IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_SPECTRUM_MGMT;
+
+ hw->wiphy->interface_modes = il->ctx.interface_modes;
+
+ hw->wiphy->flags |=
+ WIPHY_FLAG_CUSTOM_REGULATORY | WIPHY_FLAG_DISABLE_BEACON_HINTS |
+ WIPHY_FLAG_IBSS_RSN;
+
+ hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945;
+ /* we create the 802.11 header and a zero-length SSID element */
+ hw->wiphy->max_scan_ie_len = IL3945_MAX_PROBE_REQUEST - 24 - 2;
+
+ /* Default value; 4 EDCA QOS priorities */
+ hw->queues = 4;
+
+ if (il->bands[IEEE80211_BAND_2GHZ].n_channels)
+ il->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
+ &il->bands[IEEE80211_BAND_2GHZ];
+
+ if (il->bands[IEEE80211_BAND_5GHZ].n_channels)
+ il->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
+ &il->bands[IEEE80211_BAND_5GHZ];
+
+ il_leds_init(il);
+
+ ret = ieee80211_register_hw(il->hw);
+ if (ret) {
+ IL_ERR("Failed to register hw (error %d)\n", ret);
+ return ret;
+ }
+ il->mac80211_registered = 1;
+
+ return 0;
+}
+
+static int
+il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ int err = 0;
+ struct il_priv *il;
+ struct ieee80211_hw *hw;
+ struct il_cfg *cfg = (struct il_cfg *)(ent->driver_data);
+ struct il3945_eeprom *eeprom;
+ unsigned long flags;
+
+ /***********************
+ * 1. Allocating HW data
+ * ********************/
+
+ /* mac80211 allocates memory for this device instance, including
+ * space for this driver's ilate structure */
+ hw = il_alloc_all(cfg);
+ if (hw == NULL) {
+ pr_err("Can not allocate network device\n");
+ err = -ENOMEM;
+ goto out;
+ }
+ il = hw->priv;
+ SET_IEEE80211_DEV(hw, &pdev->dev);
+
+ il->cmd_queue = IL39_CMD_QUEUE_NUM;
+
+ il->ctx.ctxid = 0;
+
+ il->ctx.rxon_cmd = C_RXON;
+ il->ctx.rxon_timing_cmd = C_RXON_TIMING;
+ il->ctx.rxon_assoc_cmd = C_RXON_ASSOC;
+ il->ctx.qos_cmd = C_QOS_PARAM;
+ il->ctx.ap_sta_id = IL_AP_ID;
+ il->ctx.wep_key_cmd = C_WEPKEY;
+ il->ctx.interface_modes =
+ BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC);
+ il->ctx.ibss_devtype = RXON_DEV_TYPE_IBSS;
+ il->ctx.station_devtype = RXON_DEV_TYPE_ESS;
+ il->ctx.unused_devtype = RXON_DEV_TYPE_ESS;
+
+ /*
+ * Disabling hardware scan means that mac80211 will perform scans
+ * "the hard way", rather than using device's scan.
+ */
+ if (il3945_mod_params.disable_hw_scan) {
+ D_INFO("Disabling hw_scan\n");
+ il3945_hw_ops.hw_scan = NULL;
+ }
+
+ D_INFO("*** LOAD DRIVER ***\n");
+ il->cfg = cfg;
+ il->pci_dev = pdev;
+ il->inta_mask = CSR_INI_SET_MASK;
+
+ if (il_alloc_traffic_mem(il))
+ IL_ERR("Not enough memory to generate traffic log\n");
+
+ /***************************
+ * 2. Initializing PCI bus
+ * *************************/
+ pci_disable_link_state(pdev,
+ PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
+ PCIE_LINK_STATE_CLKPM);
+
+ if (pci_enable_device(pdev)) {
+ err = -ENODEV;
+ goto out_ieee80211_free_hw;
+ }
+
+ pci_set_master(pdev);
+
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (!err)
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (err) {
+ IL_WARN("No suitable DMA available.\n");
+ goto out_pci_disable_device;
+ }
+
+ pci_set_drvdata(pdev, il);
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err)
+ goto out_pci_disable_device;
+
+ /***********************
+ * 3. Read REV Register
+ * ********************/
+ il->hw_base = pci_iomap(pdev, 0, 0);
+ if (!il->hw_base) {
+ err = -ENODEV;
+ goto out_pci_release_regions;
+ }
+
+ D_INFO("pci_resource_len = 0x%08llx\n",
+ (unsigned long long)pci_resource_len(pdev, 0));
+ D_INFO("pci_resource_base = %p\n", il->hw_base);
+
+ /* We disable the RETRY_TIMEOUT register (0x41) to keep
+ * PCI Tx retries from interfering with C3 CPU state */
+ pci_write_config_byte(pdev, 0x41, 0x00);
+
+ /* these spin locks will be used in apm_ops.init and EEPROM access
+ * we should init now
+ */
+ spin_lock_init(&il->reg_lock);
+ spin_lock_init(&il->lock);
+
+ /*
+ * stop and reset the on-board processor just in case it is in a
+ * strange state ... like being left stranded by a primary kernel
+ * and this is now the kdump kernel trying to start up
+ */
+ _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
+
+ /***********************
+ * 4. Read EEPROM
+ * ********************/
+
+ /* Read the EEPROM */
+ err = il_eeprom_init(il);
+ if (err) {
+ IL_ERR("Unable to init EEPROM\n");
+ goto out_iounmap;
+ }
+ /* MAC Address location in EEPROM same for 3945/4965 */
+ eeprom = (struct il3945_eeprom *)il->eeprom;
+ D_INFO("MAC address: %pM\n", eeprom->mac_address);
+ SET_IEEE80211_PERM_ADDR(il->hw, eeprom->mac_address);
+
+ /***********************
+ * 5. Setup HW Constants
+ * ********************/
+ /* Device-specific setup */
+ if (il3945_hw_set_hw_params(il)) {
+ IL_ERR("failed to set hw settings\n");
+ goto out_eeprom_free;
+ }
+
+ /***********************
+ * 6. Setup il
+ * ********************/
+
+ err = il3945_init_drv(il);
+ if (err) {
+ IL_ERR("initializing driver failed\n");
+ goto out_unset_hw_params;
+ }
+
+ IL_INFO("Detected Intel Wireless WiFi Link %s\n", il->cfg->name);
+
+ /***********************
+ * 7. Setup Services
+ * ********************/
+
+ spin_lock_irqsave(&il->lock, flags);
+ il_disable_interrupts(il);
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ pci_enable_msi(il->pci_dev);
+
+ err = request_irq(il->pci_dev->irq, il_isr, IRQF_SHARED, DRV_NAME, il);
+ if (err) {
+ IL_ERR("Error allocating IRQ %d\n", il->pci_dev->irq);
+ goto out_disable_msi;
+ }
+
+ err = sysfs_create_group(&pdev->dev.kobj, &il3945_attribute_group);
+ if (err) {
+ IL_ERR("failed to create sysfs device attributes\n");
+ goto out_release_irq;
+ }
+
+ il_set_rxon_channel(il, &il->bands[IEEE80211_BAND_2GHZ].channels[5],
+ &il->ctx);
+ il3945_setup_deferred_work(il);
+ il3945_setup_handlers(il);
+ il_power_initialize(il);
+
+ /*********************************
+ * 8. Setup and Register mac80211
+ * *******************************/
+
+ il_enable_interrupts(il);
+
+ err = il3945_setup_mac(il);
+ if (err)
+ goto out_remove_sysfs;
+
+ err = il_dbgfs_register(il, DRV_NAME);
+ if (err)
+ IL_ERR("failed to create debugfs files. Ignoring error: %d\n",
+ err);
+
+ /* Start monitoring the killswitch */
+ queue_delayed_work(il->workqueue, &il->_3945.rfkill_poll, 2 * HZ);
+
+ return 0;
+
+out_remove_sysfs:
+ destroy_workqueue(il->workqueue);
+ il->workqueue = NULL;
+ sysfs_remove_group(&pdev->dev.kobj, &il3945_attribute_group);
+out_release_irq:
+ free_irq(il->pci_dev->irq, il);
+out_disable_msi:
+ pci_disable_msi(il->pci_dev);
+ il_free_geos(il);
+ il_free_channel_map(il);
+out_unset_hw_params:
+ il3945_unset_hw_params(il);
+out_eeprom_free:
+ il_eeprom_free(il);
+out_iounmap:
+ pci_iounmap(pdev, il->hw_base);
+out_pci_release_regions:
+ pci_release_regions(pdev);
+out_pci_disable_device:
+ pci_set_drvdata(pdev, NULL);
+ pci_disable_device(pdev);
+out_ieee80211_free_hw:
+ il_free_traffic_mem(il);
+ ieee80211_free_hw(il->hw);
+out:
+ return err;
+}
+
+static void __devexit
+il3945_pci_remove(struct pci_dev *pdev)
+{
+ struct il_priv *il = pci_get_drvdata(pdev);
+ unsigned long flags;
+
+ if (!il)
+ return;
+
+ D_INFO("*** UNLOAD DRIVER ***\n");
+
+ il_dbgfs_unregister(il);
+
+ set_bit(S_EXIT_PENDING, &il->status);
+
+ il_leds_exit(il);
+
+ if (il->mac80211_registered) {
+ ieee80211_unregister_hw(il->hw);
+ il->mac80211_registered = 0;
+ } else {
+ il3945_down(il);
+ }
+
+ /*
+ * Make sure device is reset to low power before unloading driver.
+ * This may be redundant with il_down(), but there are paths to
+ * run il_down() without calling apm_ops.stop(), and there are
+ * paths to avoid running il_down() at all before leaving driver.
+ * This (inexpensive) call *makes sure* device is reset.
+ */
+ il_apm_stop(il);
+
+ /* make sure we flush any pending irq or
+ * tasklet for the driver
+ */
+ spin_lock_irqsave(&il->lock, flags);
+ il_disable_interrupts(il);
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ il3945_synchronize_irq(il);
+
+ sysfs_remove_group(&pdev->dev.kobj, &il3945_attribute_group);
+
+ cancel_delayed_work_sync(&il->_3945.rfkill_poll);
+
+ il3945_dealloc_ucode_pci(il);
+
+ if (il->rxq.bd)
+ il3945_rx_queue_free(il, &il->rxq);
+ il3945_hw_txq_ctx_free(il);
+
+ il3945_unset_hw_params(il);
+
+ /*netif_stop_queue(dev); */
+ flush_workqueue(il->workqueue);
+
+ /* ieee80211_unregister_hw calls il3945_mac_stop, which flushes
+ * il->workqueue... so we can't take down the workqueue
+ * until now... */
+ destroy_workqueue(il->workqueue);
+ il->workqueue = NULL;
+ il_free_traffic_mem(il);
+
+ free_irq(pdev->irq, il);
+ pci_disable_msi(pdev);
+
+ pci_iounmap(pdev, il->hw_base);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+
+ il_free_channel_map(il);
+ il_free_geos(il);
+ kfree(il->scan_cmd);
+ if (il->beacon_skb)
+ dev_kfree_skb(il->beacon_skb);
+
+ ieee80211_free_hw(il->hw);
+}
+
+/*****************************************************************************
+ *
+ * driver and module entry point
+ *
+ *****************************************************************************/
+
+static struct pci_driver il3945_driver = {
+ .name = DRV_NAME,
+ .id_table = il3945_hw_card_ids,
+ .probe = il3945_pci_probe,
+ .remove = __devexit_p(il3945_pci_remove),
+ .driver.pm = IL_LEGACY_PM_OPS,
+};
+
+static int __init
+il3945_init(void)
+{
+
+ int ret;
+ pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
+ pr_info(DRV_COPYRIGHT "\n");
+
+ ret = il3945_rate_control_register();
+ if (ret) {
+ pr_err("Unable to register rate control algorithm: %d\n", ret);
+ return ret;
+ }
+
+ ret = pci_register_driver(&il3945_driver);
+ if (ret) {
+ pr_err("Unable to initialize PCI module\n");
+ goto error_register;
+ }
+
+ return ret;
+
+error_register:
+ il3945_rate_control_unregister();
+ return ret;
+}
+
+static void __exit
+il3945_exit(void)
+{
+ pci_unregister_driver(&il3945_driver);
+ il3945_rate_control_unregister();
+}
+
+MODULE_FIRMWARE(IL3945_MODULE_FIRMWARE(IL3945_UCODE_API_MAX));
+
+module_param_named(antenna, il3945_mod_params.antenna, int, S_IRUGO);
+MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])");
+module_param_named(swcrypto, il3945_mod_params.sw_crypto, int, S_IRUGO);
+MODULE_PARM_DESC(swcrypto, "using software crypto (default 1 [software])");
+module_param_named(disable_hw_scan, il3945_mod_params.disable_hw_scan, int,
+ S_IRUGO);
+MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 1)");
+#ifdef CONFIG_IWLEGACY_DEBUG
+module_param_named(debug, il_debug_level, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "debug output mask");
+#endif
+module_param_named(fw_restart, il3945_mod_params.restart_fw, int, S_IRUGO);
+MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
+
+module_exit(il3945_exit);
+module_init(il3945_init);
diff --git a/drivers/net/wireless/iwlegacy/3945-rs.c b/drivers/net/wireless/iwlegacy/3945-rs.c
new file mode 100644
index 00000000000..30ad404f8df
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/3945-rs.c
@@ -0,0 +1,995 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <net/mac80211.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+
+#include <linux/workqueue.h>
+
+#include "commands.h"
+#include "3945.h"
+
+#define RS_NAME "iwl-3945-rs"
+
+static s32 il3945_expected_tpt_g[RATE_COUNT_3945] = {
+ 7, 13, 35, 58, 0, 0, 76, 104, 130, 168, 191, 202
+};
+
+static s32 il3945_expected_tpt_g_prot[RATE_COUNT_3945] = {
+ 7, 13, 35, 58, 0, 0, 0, 80, 93, 113, 123, 125
+};
+
+static s32 il3945_expected_tpt_a[RATE_COUNT_3945] = {
+ 0, 0, 0, 0, 40, 57, 72, 98, 121, 154, 177, 186
+};
+
+static s32 il3945_expected_tpt_b[RATE_COUNT_3945] = {
+ 7, 13, 35, 58, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+struct il3945_tpt_entry {
+ s8 min_rssi;
+ u8 idx;
+};
+
+static struct il3945_tpt_entry il3945_tpt_table_a[] = {
+ {-60, RATE_54M_IDX},
+ {-64, RATE_48M_IDX},
+ {-72, RATE_36M_IDX},
+ {-80, RATE_24M_IDX},
+ {-84, RATE_18M_IDX},
+ {-85, RATE_12M_IDX},
+ {-87, RATE_9M_IDX},
+ {-89, RATE_6M_IDX}
+};
+
+static struct il3945_tpt_entry il3945_tpt_table_g[] = {
+ {-60, RATE_54M_IDX},
+ {-64, RATE_48M_IDX},
+ {-68, RATE_36M_IDX},
+ {-80, RATE_24M_IDX},
+ {-84, RATE_18M_IDX},
+ {-85, RATE_12M_IDX},
+ {-86, RATE_11M_IDX},
+ {-88, RATE_5M_IDX},
+ {-90, RATE_2M_IDX},
+ {-92, RATE_1M_IDX}
+};
+
+#define RATE_MAX_WINDOW 62
+#define RATE_FLUSH (3*HZ)
+#define RATE_WIN_FLUSH (HZ/2)
+#define IL39_RATE_HIGH_TH 11520
+#define IL_SUCCESS_UP_TH 8960
+#define IL_SUCCESS_DOWN_TH 10880
+#define RATE_MIN_FAILURE_TH 6
+#define RATE_MIN_SUCCESS_TH 8
+#define RATE_DECREASE_TH 1920
+#define RATE_RETRY_TH 15
+
+static u8
+il3945_get_rate_idx_by_rssi(s32 rssi, enum ieee80211_band band)
+{
+ u32 idx = 0;
+ u32 table_size = 0;
+ struct il3945_tpt_entry *tpt_table = NULL;
+
+ if (rssi < IL_MIN_RSSI_VAL || rssi > IL_MAX_RSSI_VAL)
+ rssi = IL_MIN_RSSI_VAL;
+
+ switch (band) {
+ case IEEE80211_BAND_2GHZ:
+ tpt_table = il3945_tpt_table_g;
+ table_size = ARRAY_SIZE(il3945_tpt_table_g);
+ break;
+
+ case IEEE80211_BAND_5GHZ:
+ tpt_table = il3945_tpt_table_a;
+ table_size = ARRAY_SIZE(il3945_tpt_table_a);
+ break;
+
+ default:
+ BUG();
+ break;
+ }
+
+ while (idx < table_size && rssi < tpt_table[idx].min_rssi)
+ idx++;
+
+ idx = min(idx, (table_size - 1));
+
+ return tpt_table[idx].idx;
+}
+
+static void
+il3945_clear_win(struct il3945_rate_scale_data *win)
+{
+ win->data = 0;
+ win->success_counter = 0;
+ win->success_ratio = -1;
+ win->counter = 0;
+ win->average_tpt = IL_INVALID_VALUE;
+ win->stamp = 0;
+}
+
+/**
+ * il3945_rate_scale_flush_wins - flush out the rate scale wins
+ *
+ * Returns the number of wins that have gathered data but were
+ * not flushed. If there were any that were not flushed, then
+ * reschedule the rate flushing routine.
+ */
+static int
+il3945_rate_scale_flush_wins(struct il3945_rs_sta *rs_sta)
+{
+ int unflushed = 0;
+ int i;
+ unsigned long flags;
+ struct il_priv *il __maybe_unused = rs_sta->il;
+
+ /*
+ * For each rate, if we have collected data on that rate
+ * and it has been more than RATE_WIN_FLUSH
+ * since we flushed, clear out the gathered stats
+ */
+ for (i = 0; i < RATE_COUNT_3945; i++) {
+ if (!rs_sta->win[i].counter)
+ continue;
+
+ spin_lock_irqsave(&rs_sta->lock, flags);
+ if (time_after(jiffies, rs_sta->win[i].stamp + RATE_WIN_FLUSH)) {
+ D_RATE("flushing %d samples of rate " "idx %d\n",
+ rs_sta->win[i].counter, i);
+ il3945_clear_win(&rs_sta->win[i]);
+ } else
+ unflushed++;
+ spin_unlock_irqrestore(&rs_sta->lock, flags);
+ }
+
+ return unflushed;
+}
+
+#define RATE_FLUSH_MAX 5000 /* msec */
+#define RATE_FLUSH_MIN 50 /* msec */
+#define IL_AVERAGE_PACKETS 1500
+
+static void
+il3945_bg_rate_scale_flush(unsigned long data)
+{
+ struct il3945_rs_sta *rs_sta = (void *)data;
+ struct il_priv *il __maybe_unused = rs_sta->il;
+ int unflushed = 0;
+ unsigned long flags;
+ u32 packet_count, duration, pps;
+
+ D_RATE("enter\n");
+
+ unflushed = il3945_rate_scale_flush_wins(rs_sta);
+
+ spin_lock_irqsave(&rs_sta->lock, flags);
+
+ /* Number of packets Rx'd since last time this timer ran */
+ packet_count = (rs_sta->tx_packets - rs_sta->last_tx_packets) + 1;
+
+ rs_sta->last_tx_packets = rs_sta->tx_packets + 1;
+
+ if (unflushed) {
+ duration =
+ jiffies_to_msecs(jiffies - rs_sta->last_partial_flush);
+
+ D_RATE("Tx'd %d packets in %dms\n", packet_count, duration);
+
+ /* Determine packets per second */
+ if (duration)
+ pps = (packet_count * 1000) / duration;
+ else
+ pps = 0;
+
+ if (pps) {
+ duration = (IL_AVERAGE_PACKETS * 1000) / pps;
+ if (duration < RATE_FLUSH_MIN)
+ duration = RATE_FLUSH_MIN;
+ else if (duration > RATE_FLUSH_MAX)
+ duration = RATE_FLUSH_MAX;
+ } else
+ duration = RATE_FLUSH_MAX;
+
+ rs_sta->flush_time = msecs_to_jiffies(duration);
+
+ D_RATE("new flush period: %d msec ave %d\n", duration,
+ packet_count);
+
+ mod_timer(&rs_sta->rate_scale_flush,
+ jiffies + rs_sta->flush_time);
+
+ rs_sta->last_partial_flush = jiffies;
+ } else {
+ rs_sta->flush_time = RATE_FLUSH;
+ rs_sta->flush_pending = 0;
+ }
+ /* If there weren't any unflushed entries, we don't schedule the timer
+ * to run again */
+
+ rs_sta->last_flush = jiffies;
+
+ spin_unlock_irqrestore(&rs_sta->lock, flags);
+
+ D_RATE("leave\n");
+}
+
+/**
+ * il3945_collect_tx_data - Update the success/failure sliding win
+ *
+ * We keep a sliding win of the last 64 packets transmitted
+ * at this rate. win->data contains the bitmask of successful
+ * packets.
+ */
+static void
+il3945_collect_tx_data(struct il3945_rs_sta *rs_sta,
+ struct il3945_rate_scale_data *win, int success,
+ int retries, int idx)
+{
+ unsigned long flags;
+ s32 fail_count;
+ struct il_priv *il __maybe_unused = rs_sta->il;
+
+ if (!retries) {
+ D_RATE("leave: retries == 0 -- should be at least 1\n");
+ return;
+ }
+
+ spin_lock_irqsave(&rs_sta->lock, flags);
+
+ /*
+ * Keep track of only the latest 62 tx frame attempts in this rate's
+ * history win; anything older isn't really relevant any more.
+ * If we have filled up the sliding win, drop the oldest attempt;
+ * if the oldest attempt (highest bit in bitmap) shows "success",
+ * subtract "1" from the success counter (this is the main reason
+ * we keep these bitmaps!).
+ * */
+ while (retries > 0) {
+ if (win->counter >= RATE_MAX_WINDOW) {
+
+ /* remove earliest */
+ win->counter = RATE_MAX_WINDOW - 1;
+
+ if (win->data & (1ULL << (RATE_MAX_WINDOW - 1))) {
+ win->data &= ~(1ULL << (RATE_MAX_WINDOW - 1));
+ win->success_counter--;
+ }
+ }
+
+ /* Increment frames-attempted counter */
+ win->counter++;
+
+ /* Shift bitmap by one frame (throw away oldest history),
+ * OR in "1", and increment "success" if this
+ * frame was successful. */
+ win->data <<= 1;
+ if (success > 0) {
+ win->success_counter++;
+ win->data |= 0x1;
+ success--;
+ }
+
+ retries--;
+ }
+
+ /* Calculate current success ratio, avoid divide-by-0! */
+ if (win->counter > 0)
+ win->success_ratio =
+ 128 * (100 * win->success_counter) / win->counter;
+ else
+ win->success_ratio = IL_INVALID_VALUE;
+
+ fail_count = win->counter - win->success_counter;
+
+ /* Calculate average throughput, if we have enough history. */
+ if (fail_count >= RATE_MIN_FAILURE_TH ||
+ win->success_counter >= RATE_MIN_SUCCESS_TH)
+ win->average_tpt =
+ ((win->success_ratio * rs_sta->expected_tpt[idx] +
+ 64) / 128);
+ else
+ win->average_tpt = IL_INVALID_VALUE;
+
+ /* Tag this win as having been updated */
+ win->stamp = jiffies;
+
+ spin_unlock_irqrestore(&rs_sta->lock, flags);
+
+}
+
+/*
+ * Called after adding a new station to initialize rate scaling
+ */
+void
+il3945_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta, u8 sta_id)
+{
+ struct ieee80211_hw *hw = il->hw;
+ struct ieee80211_conf *conf = &il->hw->conf;
+ struct il3945_sta_priv *psta;
+ struct il3945_rs_sta *rs_sta;
+ struct ieee80211_supported_band *sband;
+ int i;
+
+ D_INFO("enter\n");
+ if (sta_id == il->ctx.bcast_sta_id)
+ goto out;
+
+ psta = (struct il3945_sta_priv *)sta->drv_priv;
+ rs_sta = &psta->rs_sta;
+ sband = hw->wiphy->bands[conf->channel->band];
+
+ rs_sta->il = il;
+
+ rs_sta->start_rate = RATE_INVALID;
+
+ /* default to just 802.11b */
+ rs_sta->expected_tpt = il3945_expected_tpt_b;
+
+ rs_sta->last_partial_flush = jiffies;
+ rs_sta->last_flush = jiffies;
+ rs_sta->flush_time = RATE_FLUSH;
+ rs_sta->last_tx_packets = 0;
+
+ rs_sta->rate_scale_flush.data = (unsigned long)rs_sta;
+ rs_sta->rate_scale_flush.function = il3945_bg_rate_scale_flush;
+
+ for (i = 0; i < RATE_COUNT_3945; i++)
+ il3945_clear_win(&rs_sta->win[i]);
+
+ /* TODO: what is a good starting rate for STA? About middle? Maybe not
+ * the lowest or the highest rate.. Could consider using RSSI from
+ * previous packets? Need to have IEEE 802.1X auth succeed immediately
+ * after assoc.. */
+
+ for (i = sband->n_bitrates - 1; i >= 0; i--) {
+ if (sta->supp_rates[sband->band] & (1 << i)) {
+ rs_sta->last_txrate_idx = i;
+ break;
+ }
+ }
+
+ il->_3945.sta_supp_rates = sta->supp_rates[sband->band];
+ /* For 5 GHz band it start at IL_FIRST_OFDM_RATE */
+ if (sband->band == IEEE80211_BAND_5GHZ) {
+ rs_sta->last_txrate_idx += IL_FIRST_OFDM_RATE;
+ il->_3945.sta_supp_rates =
+ il->_3945.sta_supp_rates << IL_FIRST_OFDM_RATE;
+ }
+
+out:
+ il->stations[sta_id].used &= ~IL_STA_UCODE_INPROGRESS;
+
+ D_INFO("leave\n");
+}
+
+static void *
+il3945_rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
+{
+ return hw->priv;
+}
+
+/* rate scale requires free function to be implemented */
+static void
+il3945_rs_free(void *il)
+{
+ return;
+}
+
+static void *
+il3945_rs_alloc_sta(void *il_priv, struct ieee80211_sta *sta, gfp_t gfp)
+{
+ struct il3945_rs_sta *rs_sta;
+ struct il3945_sta_priv *psta = (void *)sta->drv_priv;
+ struct il_priv *il __maybe_unused = il_priv;
+
+ D_RATE("enter\n");
+
+ rs_sta = &psta->rs_sta;
+
+ spin_lock_init(&rs_sta->lock);
+ init_timer(&rs_sta->rate_scale_flush);
+
+ D_RATE("leave\n");
+
+ return rs_sta;
+}
+
+static void
+il3945_rs_free_sta(void *il_priv, struct ieee80211_sta *sta, void *il_sta)
+{
+ struct il3945_rs_sta *rs_sta = il_sta;
+
+ /*
+ * Be careful not to use any members of il3945_rs_sta (like trying
+ * to use il_priv to print out debugging) since it may not be fully
+ * initialized at this point.
+ */
+ del_timer_sync(&rs_sta->rate_scale_flush);
+}
+
+/**
+ * il3945_rs_tx_status - Update rate control values based on Tx results
+ *
+ * NOTE: Uses il_priv->retry_rate for the # of retries attempted by
+ * the hardware for each rate.
+ */
+static void
+il3945_rs_tx_status(void *il_rate, struct ieee80211_supported_band *sband,
+ struct ieee80211_sta *sta, void *il_sta,
+ struct sk_buff *skb)
+{
+ s8 retries = 0, current_count;
+ int scale_rate_idx, first_idx, last_idx;
+ unsigned long flags;
+ struct il_priv *il = (struct il_priv *)il_rate;
+ struct il3945_rs_sta *rs_sta = il_sta;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+ D_RATE("enter\n");
+
+ retries = info->status.rates[0].count;
+ /* Sanity Check for retries */
+ if (retries > RATE_RETRY_TH)
+ retries = RATE_RETRY_TH;
+
+ first_idx = sband->bitrates[info->status.rates[0].idx].hw_value;
+ if (first_idx < 0 || first_idx >= RATE_COUNT_3945) {
+ D_RATE("leave: Rate out of bounds: %d\n", first_idx);
+ return;
+ }
+
+ if (!il_sta) {
+ D_RATE("leave: No STA il data to update!\n");
+ return;
+ }
+
+ /* Treat uninitialized rate scaling data same as non-existing. */
+ if (!rs_sta->il) {
+ D_RATE("leave: STA il data uninitialized!\n");
+ return;
+ }
+
+ rs_sta->tx_packets++;
+
+ scale_rate_idx = first_idx;
+ last_idx = first_idx;
+
+ /*
+ * Update the win for each rate. We determine which rates
+ * were Tx'd based on the total number of retries vs. the number
+ * of retries configured for each rate -- currently set to the
+ * il value 'retry_rate' vs. rate specific
+ *
+ * On exit from this while loop last_idx indicates the rate
+ * at which the frame was finally transmitted (or failed if no
+ * ACK)
+ */
+ while (retries > 1) {
+ if ((retries - 1) < il->retry_rate) {
+ current_count = (retries - 1);
+ last_idx = scale_rate_idx;
+ } else {
+ current_count = il->retry_rate;
+ last_idx = il3945_rs_next_rate(il, scale_rate_idx);
+ }
+
+ /* Update this rate accounting for as many retries
+ * as was used for it (per current_count) */
+ il3945_collect_tx_data(rs_sta, &rs_sta->win[scale_rate_idx], 0,
+ current_count, scale_rate_idx);
+ D_RATE("Update rate %d for %d retries.\n", scale_rate_idx,
+ current_count);
+
+ retries -= current_count;
+
+ scale_rate_idx = last_idx;
+ }
+
+ /* Update the last idx win with success/failure based on ACK */
+ D_RATE("Update rate %d with %s.\n", last_idx,
+ (info->flags & IEEE80211_TX_STAT_ACK) ? "success" : "failure");
+ il3945_collect_tx_data(rs_sta, &rs_sta->win[last_idx],
+ info->flags & IEEE80211_TX_STAT_ACK, 1,
+ last_idx);
+
+ /* We updated the rate scale win -- if its been more than
+ * flush_time since the last run, schedule the flush
+ * again */
+ spin_lock_irqsave(&rs_sta->lock, flags);
+
+ if (!rs_sta->flush_pending &&
+ time_after(jiffies, rs_sta->last_flush + rs_sta->flush_time)) {
+
+ rs_sta->last_partial_flush = jiffies;
+ rs_sta->flush_pending = 1;
+ mod_timer(&rs_sta->rate_scale_flush,
+ jiffies + rs_sta->flush_time);
+ }
+
+ spin_unlock_irqrestore(&rs_sta->lock, flags);
+
+ D_RATE("leave\n");
+}
+
+static u16
+il3945_get_adjacent_rate(struct il3945_rs_sta *rs_sta, u8 idx, u16 rate_mask,
+ enum ieee80211_band band)
+{
+ u8 high = RATE_INVALID;
+ u8 low = RATE_INVALID;
+ struct il_priv *il __maybe_unused = rs_sta->il;
+
+ /* 802.11A walks to the next literal adjacent rate in
+ * the rate table */
+ if (unlikely(band == IEEE80211_BAND_5GHZ)) {
+ int i;
+ u32 mask;
+
+ /* Find the previous rate that is in the rate mask */
+ i = idx - 1;
+ for (mask = (1 << i); i >= 0; i--, mask >>= 1) {
+ if (rate_mask & mask) {
+ low = i;
+ break;
+ }
+ }
+
+ /* Find the next rate that is in the rate mask */
+ i = idx + 1;
+ for (mask = (1 << i); i < RATE_COUNT_3945; i++, mask <<= 1) {
+ if (rate_mask & mask) {
+ high = i;
+ break;
+ }
+ }
+
+ return (high << 8) | low;
+ }
+
+ low = idx;
+ while (low != RATE_INVALID) {
+ if (rs_sta->tgg)
+ low = il3945_rates[low].prev_rs_tgg;
+ else
+ low = il3945_rates[low].prev_rs;
+ if (low == RATE_INVALID)
+ break;
+ if (rate_mask & (1 << low))
+ break;
+ D_RATE("Skipping masked lower rate: %d\n", low);
+ }
+
+ high = idx;
+ while (high != RATE_INVALID) {
+ if (rs_sta->tgg)
+ high = il3945_rates[high].next_rs_tgg;
+ else
+ high = il3945_rates[high].next_rs;
+ if (high == RATE_INVALID)
+ break;
+ if (rate_mask & (1 << high))
+ break;
+ D_RATE("Skipping masked higher rate: %d\n", high);
+ }
+
+ return (high << 8) | low;
+}
+
+/**
+ * il3945_rs_get_rate - find the rate for the requested packet
+ *
+ * Returns the ieee80211_rate structure allocated by the driver.
+ *
+ * The rate control algorithm has no internal mapping between hw_mode's
+ * rate ordering and the rate ordering used by the rate control algorithm.
+ *
+ * The rate control algorithm uses a single table of rates that goes across
+ * the entire A/B/G spectrum vs. being limited to just one particular
+ * hw_mode.
+ *
+ * As such, we can't convert the idx obtained below into the hw_mode's
+ * rate table and must reference the driver allocated rate table
+ *
+ */
+static void
+il3945_rs_get_rate(void *il_r, struct ieee80211_sta *sta, void *il_sta,
+ struct ieee80211_tx_rate_control *txrc)
+{
+ struct ieee80211_supported_band *sband = txrc->sband;
+ struct sk_buff *skb = txrc->skb;
+ u8 low = RATE_INVALID;
+ u8 high = RATE_INVALID;
+ u16 high_low;
+ int idx;
+ struct il3945_rs_sta *rs_sta = il_sta;
+ struct il3945_rate_scale_data *win = NULL;
+ int current_tpt = IL_INVALID_VALUE;
+ int low_tpt = IL_INVALID_VALUE;
+ int high_tpt = IL_INVALID_VALUE;
+ u32 fail_count;
+ s8 scale_action = 0;
+ unsigned long flags;
+ u16 rate_mask;
+ s8 max_rate_idx = -1;
+ struct il_priv *il __maybe_unused = (struct il_priv *)il_r;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+ D_RATE("enter\n");
+
+ /* Treat uninitialized rate scaling data same as non-existing. */
+ if (rs_sta && !rs_sta->il) {
+ D_RATE("Rate scaling information not initialized yet.\n");
+ il_sta = NULL;
+ }
+
+ if (rate_control_send_low(sta, il_sta, txrc))
+ return;
+
+ rate_mask = sta->supp_rates[sband->band];
+
+ /* get user max rate if set */
+ max_rate_idx = txrc->max_rate_idx;
+ if (sband->band == IEEE80211_BAND_5GHZ && max_rate_idx != -1)
+ max_rate_idx += IL_FIRST_OFDM_RATE;
+ if (max_rate_idx < 0 || max_rate_idx >= RATE_COUNT)
+ max_rate_idx = -1;
+
+ idx = min(rs_sta->last_txrate_idx & 0xffff, RATE_COUNT_3945 - 1);
+
+ if (sband->band == IEEE80211_BAND_5GHZ)
+ rate_mask = rate_mask << IL_FIRST_OFDM_RATE;
+
+ spin_lock_irqsave(&rs_sta->lock, flags);
+
+ /* for recent assoc, choose best rate regarding
+ * to rssi value
+ */
+ if (rs_sta->start_rate != RATE_INVALID) {
+ if (rs_sta->start_rate < idx &&
+ (rate_mask & (1 << rs_sta->start_rate)))
+ idx = rs_sta->start_rate;
+ rs_sta->start_rate = RATE_INVALID;
+ }
+
+ /* force user max rate if set by user */
+ if (max_rate_idx != -1 && max_rate_idx < idx) {
+ if (rate_mask & (1 << max_rate_idx))
+ idx = max_rate_idx;
+ }
+
+ win = &(rs_sta->win[idx]);
+
+ fail_count = win->counter - win->success_counter;
+
+ if (fail_count < RATE_MIN_FAILURE_TH &&
+ win->success_counter < RATE_MIN_SUCCESS_TH) {
+ spin_unlock_irqrestore(&rs_sta->lock, flags);
+
+ D_RATE("Invalid average_tpt on rate %d: "
+ "counter: %d, success_counter: %d, "
+ "expected_tpt is %sNULL\n", idx, win->counter,
+ win->success_counter,
+ rs_sta->expected_tpt ? "not " : "");
+
+ /* Can't calculate this yet; not enough history */
+ win->average_tpt = IL_INVALID_VALUE;
+ goto out;
+
+ }
+
+ current_tpt = win->average_tpt;
+
+ high_low =
+ il3945_get_adjacent_rate(rs_sta, idx, rate_mask, sband->band);
+ low = high_low & 0xff;
+ high = (high_low >> 8) & 0xff;
+
+ /* If user set max rate, dont allow higher than user constrain */
+ if (max_rate_idx != -1 && max_rate_idx < high)
+ high = RATE_INVALID;
+
+ /* Collect Measured throughputs of adjacent rates */
+ if (low != RATE_INVALID)
+ low_tpt = rs_sta->win[low].average_tpt;
+
+ if (high != RATE_INVALID)
+ high_tpt = rs_sta->win[high].average_tpt;
+
+ spin_unlock_irqrestore(&rs_sta->lock, flags);
+
+ scale_action = 0;
+
+ /* Low success ratio , need to drop the rate */
+ if (win->success_ratio < RATE_DECREASE_TH || !current_tpt) {
+ D_RATE("decrease rate because of low success_ratio\n");
+ scale_action = -1;
+ /* No throughput measured yet for adjacent rates,
+ * try increase */
+ } else if (low_tpt == IL_INVALID_VALUE && high_tpt == IL_INVALID_VALUE) {
+
+ if (high != RATE_INVALID &&
+ win->success_ratio >= RATE_INCREASE_TH)
+ scale_action = 1;
+ else if (low != RATE_INVALID)
+ scale_action = 0;
+
+ /* Both adjacent throughputs are measured, but neither one has
+ * better throughput; we're using the best rate, don't change
+ * it! */
+ } else if (low_tpt != IL_INVALID_VALUE && high_tpt != IL_INVALID_VALUE
+ && low_tpt < current_tpt && high_tpt < current_tpt) {
+
+ D_RATE("No action -- low [%d] & high [%d] < "
+ "current_tpt [%d]\n", low_tpt, high_tpt, current_tpt);
+ scale_action = 0;
+
+ /* At least one of the rates has better throughput */
+ } else {
+ if (high_tpt != IL_INVALID_VALUE) {
+
+ /* High rate has better throughput, Increase
+ * rate */
+ if (high_tpt > current_tpt &&
+ win->success_ratio >= RATE_INCREASE_TH)
+ scale_action = 1;
+ else {
+ D_RATE("decrease rate because of high tpt\n");
+ scale_action = 0;
+ }
+ } else if (low_tpt != IL_INVALID_VALUE) {
+ if (low_tpt > current_tpt) {
+ D_RATE("decrease rate because of low tpt\n");
+ scale_action = -1;
+ } else if (win->success_ratio >= RATE_INCREASE_TH) {
+ /* Lower rate has better
+ * throughput,decrease rate */
+ scale_action = 1;
+ }
+ }
+ }
+
+ /* Sanity check; asked for decrease, but success rate or throughput
+ * has been good at old rate. Don't change it. */
+ if (scale_action == -1 && low != RATE_INVALID &&
+ (win->success_ratio > RATE_HIGH_TH ||
+ current_tpt > 100 * rs_sta->expected_tpt[low]))
+ scale_action = 0;
+
+ switch (scale_action) {
+ case -1:
+
+ /* Decrese rate */
+ if (low != RATE_INVALID)
+ idx = low;
+ break;
+
+ case 1:
+ /* Increase rate */
+ if (high != RATE_INVALID)
+ idx = high;
+
+ break;
+
+ case 0:
+ default:
+ /* No change */
+ break;
+ }
+
+ D_RATE("Selected %d (action %d) - low %d high %d\n", idx, scale_action,
+ low, high);
+
+out:
+
+ if (sband->band == IEEE80211_BAND_5GHZ) {
+ if (WARN_ON_ONCE(idx < IL_FIRST_OFDM_RATE))
+ idx = IL_FIRST_OFDM_RATE;
+ rs_sta->last_txrate_idx = idx;
+ info->control.rates[0].idx = idx - IL_FIRST_OFDM_RATE;
+ } else {
+ rs_sta->last_txrate_idx = idx;
+ info->control.rates[0].idx = rs_sta->last_txrate_idx;
+ }
+
+ D_RATE("leave: %d\n", idx);
+}
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+static int
+il3945_open_file_generic(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t
+il3945_sta_dbgfs_stats_table_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ char *buff;
+ int desc = 0;
+ int j;
+ ssize_t ret;
+ struct il3945_rs_sta *lq_sta = file->private_data;
+
+ buff = kmalloc(1024, GFP_KERNEL);
+ if (!buff)
+ return -ENOMEM;
+
+ desc +=
+ sprintf(buff + desc,
+ "tx packets=%d last rate idx=%d\n"
+ "rate=0x%X flush time %d\n", lq_sta->tx_packets,
+ lq_sta->last_txrate_idx, lq_sta->start_rate,
+ jiffies_to_msecs(lq_sta->flush_time));
+ for (j = 0; j < RATE_COUNT_3945; j++) {
+ desc +=
+ sprintf(buff + desc, "counter=%d success=%d %%=%d\n",
+ lq_sta->win[j].counter,
+ lq_sta->win[j].success_counter,
+ lq_sta->win[j].success_ratio);
+ }
+ ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
+ kfree(buff);
+ return ret;
+}
+
+static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
+ .read = il3945_sta_dbgfs_stats_table_read,
+ .open = il3945_open_file_generic,
+ .llseek = default_llseek,
+};
+
+static void
+il3945_add_debugfs(void *il, void *il_sta, struct dentry *dir)
+{
+ struct il3945_rs_sta *lq_sta = il_sta;
+
+ lq_sta->rs_sta_dbgfs_stats_table_file =
+ debugfs_create_file("rate_stats_table", 0600, dir, lq_sta,
+ &rs_sta_dbgfs_stats_table_ops);
+
+}
+
+static void
+il3945_remove_debugfs(void *il, void *il_sta)
+{
+ struct il3945_rs_sta *lq_sta = il_sta;
+ debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
+}
+#endif
+
+/*
+ * Initialization of rate scaling information is done by driver after
+ * the station is added. Since mac80211 calls this function before a
+ * station is added we ignore it.
+ */
+static void
+il3945_rs_rate_init_stub(void *il_r, struct ieee80211_supported_band *sband,
+ struct ieee80211_sta *sta, void *il_sta)
+{
+}
+
+static struct rate_control_ops rs_ops = {
+ .module = NULL,
+ .name = RS_NAME,
+ .tx_status = il3945_rs_tx_status,
+ .get_rate = il3945_rs_get_rate,
+ .rate_init = il3945_rs_rate_init_stub,
+ .alloc = il3945_rs_alloc,
+ .free = il3945_rs_free,
+ .alloc_sta = il3945_rs_alloc_sta,
+ .free_sta = il3945_rs_free_sta,
+#ifdef CONFIG_MAC80211_DEBUGFS
+ .add_sta_debugfs = il3945_add_debugfs,
+ .remove_sta_debugfs = il3945_remove_debugfs,
+#endif
+
+};
+
+void
+il3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id)
+{
+ struct il_priv *il = hw->priv;
+ s32 rssi = 0;
+ unsigned long flags;
+ struct il3945_rs_sta *rs_sta;
+ struct ieee80211_sta *sta;
+ struct il3945_sta_priv *psta;
+
+ D_RATE("enter\n");
+
+ rcu_read_lock();
+
+ sta =
+ ieee80211_find_sta(il->ctx.vif, il->stations[sta_id].sta.sta.addr);
+ if (!sta) {
+ D_RATE("Unable to find station to initialize rate scaling.\n");
+ rcu_read_unlock();
+ return;
+ }
+
+ psta = (void *)sta->drv_priv;
+ rs_sta = &psta->rs_sta;
+
+ spin_lock_irqsave(&rs_sta->lock, flags);
+
+ rs_sta->tgg = 0;
+ switch (il->band) {
+ case IEEE80211_BAND_2GHZ:
+ /* TODO: this always does G, not a regression */
+ if (il->ctx.active.flags & RXON_FLG_TGG_PROTECT_MSK) {
+ rs_sta->tgg = 1;
+ rs_sta->expected_tpt = il3945_expected_tpt_g_prot;
+ } else
+ rs_sta->expected_tpt = il3945_expected_tpt_g;
+ break;
+
+ case IEEE80211_BAND_5GHZ:
+ rs_sta->expected_tpt = il3945_expected_tpt_a;
+ break;
+ case IEEE80211_NUM_BANDS:
+ BUG();
+ break;
+ }
+
+ spin_unlock_irqrestore(&rs_sta->lock, flags);
+
+ rssi = il->_3945.last_rx_rssi;
+ if (rssi == 0)
+ rssi = IL_MIN_RSSI_VAL;
+
+ D_RATE("Network RSSI: %d\n", rssi);
+
+ rs_sta->start_rate = il3945_get_rate_idx_by_rssi(rssi, il->band);
+
+ D_RATE("leave: rssi %d assign rate idx: " "%d (plcp 0x%x)\n", rssi,
+ rs_sta->start_rate, il3945_rates[rs_sta->start_rate].plcp);
+ rcu_read_unlock();
+}
+
+int
+il3945_rate_control_register(void)
+{
+ return ieee80211_rate_control_register(&rs_ops);
+}
+
+void
+il3945_rate_control_unregister(void)
+{
+ ieee80211_rate_control_unregister(&rs_ops);
+}
diff --git a/drivers/net/wireless/iwlegacy/3945.c b/drivers/net/wireless/iwlegacy/3945.c
new file mode 100644
index 00000000000..863664f9ba8
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/3945.c
@@ -0,0 +1,2751 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/firmware.h>
+#include <linux/etherdevice.h>
+#include <asm/unaligned.h>
+#include <net/mac80211.h>
+
+#include "common.h"
+#include "3945.h"
+
+/* Send led command */
+static int
+il3945_send_led_cmd(struct il_priv *il, struct il_led_cmd *led_cmd)
+{
+ struct il_host_cmd cmd = {
+ .id = C_LEDS,
+ .len = sizeof(struct il_led_cmd),
+ .data = led_cmd,
+ .flags = CMD_ASYNC,
+ .callback = NULL,
+ };
+
+ return il_send_cmd(il, &cmd);
+}
+
+const struct il_led_ops il3945_led_ops = {
+ .cmd = il3945_send_led_cmd,
+};
+
+#define IL_DECLARE_RATE_INFO(r, ip, in, rp, rn, pp, np) \
+ [RATE_##r##M_IDX] = { RATE_##r##M_PLCP, \
+ RATE_##r##M_IEEE, \
+ RATE_##ip##M_IDX, \
+ RATE_##in##M_IDX, \
+ RATE_##rp##M_IDX, \
+ RATE_##rn##M_IDX, \
+ RATE_##pp##M_IDX, \
+ RATE_##np##M_IDX, \
+ RATE_##r##M_IDX_TBL, \
+ RATE_##ip##M_IDX_TBL }
+
+/*
+ * Parameter order:
+ * rate, prev rate, next rate, prev tgg rate, next tgg rate
+ *
+ * If there isn't a valid next or previous rate then INV is used which
+ * maps to RATE_INVALID
+ *
+ */
+const struct il3945_rate_info il3945_rates[RATE_COUNT_3945] = {
+ IL_DECLARE_RATE_INFO(1, INV, 2, INV, 2, INV, 2), /* 1mbps */
+ IL_DECLARE_RATE_INFO(2, 1, 5, 1, 5, 1, 5), /* 2mbps */
+ IL_DECLARE_RATE_INFO(5, 2, 6, 2, 11, 2, 11), /*5.5mbps */
+ IL_DECLARE_RATE_INFO(11, 9, 12, 5, 12, 5, 18), /* 11mbps */
+ IL_DECLARE_RATE_INFO(6, 5, 9, 5, 11, 5, 11), /* 6mbps */
+ IL_DECLARE_RATE_INFO(9, 6, 11, 5, 11, 5, 11), /* 9mbps */
+ IL_DECLARE_RATE_INFO(12, 11, 18, 11, 18, 11, 18), /* 12mbps */
+ IL_DECLARE_RATE_INFO(18, 12, 24, 12, 24, 11, 24), /* 18mbps */
+ IL_DECLARE_RATE_INFO(24, 18, 36, 18, 36, 18, 36), /* 24mbps */
+ IL_DECLARE_RATE_INFO(36, 24, 48, 24, 48, 24, 48), /* 36mbps */
+ IL_DECLARE_RATE_INFO(48, 36, 54, 36, 54, 36, 54), /* 48mbps */
+ IL_DECLARE_RATE_INFO(54, 48, INV, 48, INV, 48, INV), /* 54mbps */
+};
+
+static inline u8
+il3945_get_prev_ieee_rate(u8 rate_idx)
+{
+ u8 rate = il3945_rates[rate_idx].prev_ieee;
+
+ if (rate == RATE_INVALID)
+ rate = rate_idx;
+ return rate;
+}
+
+/* 1 = enable the il3945_disable_events() function */
+#define IL_EVT_DISABLE (0)
+#define IL_EVT_DISABLE_SIZE (1532/32)
+
+/**
+ * il3945_disable_events - Disable selected events in uCode event log
+ *
+ * Disable an event by writing "1"s into "disable"
+ * bitmap in SRAM. Bit position corresponds to Event # (id/type).
+ * Default values of 0 enable uCode events to be logged.
+ * Use for only special debugging. This function is just a placeholder as-is,
+ * you'll need to provide the special bits! ...
+ * ... and set IL_EVT_DISABLE to 1. */
+void
+il3945_disable_events(struct il_priv *il)
+{
+ int i;
+ u32 base; /* SRAM address of event log header */
+ u32 disable_ptr; /* SRAM address of event-disable bitmap array */
+ u32 array_size; /* # of u32 entries in array */
+ static const u32 evt_disable[IL_EVT_DISABLE_SIZE] = {
+ 0x00000000, /* 31 - 0 Event id numbers */
+ 0x00000000, /* 63 - 32 */
+ 0x00000000, /* 95 - 64 */
+ 0x00000000, /* 127 - 96 */
+ 0x00000000, /* 159 - 128 */
+ 0x00000000, /* 191 - 160 */
+ 0x00000000, /* 223 - 192 */
+ 0x00000000, /* 255 - 224 */
+ 0x00000000, /* 287 - 256 */
+ 0x00000000, /* 319 - 288 */
+ 0x00000000, /* 351 - 320 */
+ 0x00000000, /* 383 - 352 */
+ 0x00000000, /* 415 - 384 */
+ 0x00000000, /* 447 - 416 */
+ 0x00000000, /* 479 - 448 */
+ 0x00000000, /* 511 - 480 */
+ 0x00000000, /* 543 - 512 */
+ 0x00000000, /* 575 - 544 */
+ 0x00000000, /* 607 - 576 */
+ 0x00000000, /* 639 - 608 */
+ 0x00000000, /* 671 - 640 */
+ 0x00000000, /* 703 - 672 */
+ 0x00000000, /* 735 - 704 */
+ 0x00000000, /* 767 - 736 */
+ 0x00000000, /* 799 - 768 */
+ 0x00000000, /* 831 - 800 */
+ 0x00000000, /* 863 - 832 */
+ 0x00000000, /* 895 - 864 */
+ 0x00000000, /* 927 - 896 */
+ 0x00000000, /* 959 - 928 */
+ 0x00000000, /* 991 - 960 */
+ 0x00000000, /* 1023 - 992 */
+ 0x00000000, /* 1055 - 1024 */
+ 0x00000000, /* 1087 - 1056 */
+ 0x00000000, /* 1119 - 1088 */
+ 0x00000000, /* 1151 - 1120 */
+ 0x00000000, /* 1183 - 1152 */
+ 0x00000000, /* 1215 - 1184 */
+ 0x00000000, /* 1247 - 1216 */
+ 0x00000000, /* 1279 - 1248 */
+ 0x00000000, /* 1311 - 1280 */
+ 0x00000000, /* 1343 - 1312 */
+ 0x00000000, /* 1375 - 1344 */
+ 0x00000000, /* 1407 - 1376 */
+ 0x00000000, /* 1439 - 1408 */
+ 0x00000000, /* 1471 - 1440 */
+ 0x00000000, /* 1503 - 1472 */
+ };
+
+ base = le32_to_cpu(il->card_alive.log_event_table_ptr);
+ if (!il3945_hw_valid_rtc_data_addr(base)) {
+ IL_ERR("Invalid event log pointer 0x%08X\n", base);
+ return;
+ }
+
+ disable_ptr = il_read_targ_mem(il, base + (4 * sizeof(u32)));
+ array_size = il_read_targ_mem(il, base + (5 * sizeof(u32)));
+
+ if (IL_EVT_DISABLE && array_size == IL_EVT_DISABLE_SIZE) {
+ D_INFO("Disabling selected uCode log events at 0x%x\n",
+ disable_ptr);
+ for (i = 0; i < IL_EVT_DISABLE_SIZE; i++)
+ il_write_targ_mem(il, disable_ptr + (i * sizeof(u32)),
+ evt_disable[i]);
+
+ } else {
+ D_INFO("Selected uCode log events may be disabled\n");
+ D_INFO(" by writing \"1\"s into disable bitmap\n");
+ D_INFO(" in SRAM at 0x%x, size %d u32s\n", disable_ptr,
+ array_size);
+ }
+
+}
+
+static int
+il3945_hwrate_to_plcp_idx(u8 plcp)
+{
+ int idx;
+
+ for (idx = 0; idx < RATE_COUNT_3945; idx++)
+ if (il3945_rates[idx].plcp == plcp)
+ return idx;
+ return -1;
+}
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+#define TX_STATUS_ENTRY(x) case TX_3945_STATUS_FAIL_ ## x: return #x
+
+static const char *
+il3945_get_tx_fail_reason(u32 status)
+{
+ switch (status & TX_STATUS_MSK) {
+ case TX_3945_STATUS_SUCCESS:
+ return "SUCCESS";
+ TX_STATUS_ENTRY(SHORT_LIMIT);
+ TX_STATUS_ENTRY(LONG_LIMIT);
+ TX_STATUS_ENTRY(FIFO_UNDERRUN);
+ TX_STATUS_ENTRY(MGMNT_ABORT);
+ TX_STATUS_ENTRY(NEXT_FRAG);
+ TX_STATUS_ENTRY(LIFE_EXPIRE);
+ TX_STATUS_ENTRY(DEST_PS);
+ TX_STATUS_ENTRY(ABORTED);
+ TX_STATUS_ENTRY(BT_RETRY);
+ TX_STATUS_ENTRY(STA_INVALID);
+ TX_STATUS_ENTRY(FRAG_DROPPED);
+ TX_STATUS_ENTRY(TID_DISABLE);
+ TX_STATUS_ENTRY(FRAME_FLUSHED);
+ TX_STATUS_ENTRY(INSUFFICIENT_CF_POLL);
+ TX_STATUS_ENTRY(TX_LOCKED);
+ TX_STATUS_ENTRY(NO_BEACON_ON_RADAR);
+ }
+
+ return "UNKNOWN";
+}
+#else
+static inline const char *
+il3945_get_tx_fail_reason(u32 status)
+{
+ return "";
+}
+#endif
+
+/*
+ * get ieee prev rate from rate scale table.
+ * for A and B mode we need to overright prev
+ * value
+ */
+int
+il3945_rs_next_rate(struct il_priv *il, int rate)
+{
+ int next_rate = il3945_get_prev_ieee_rate(rate);
+
+ switch (il->band) {
+ case IEEE80211_BAND_5GHZ:
+ if (rate == RATE_12M_IDX)
+ next_rate = RATE_9M_IDX;
+ else if (rate == RATE_6M_IDX)
+ next_rate = RATE_6M_IDX;
+ break;
+ case IEEE80211_BAND_2GHZ:
+ if (!(il->_3945.sta_supp_rates & IL_OFDM_RATES_MASK) &&
+ il_is_associated(il)) {
+ if (rate == RATE_11M_IDX)
+ next_rate = RATE_5M_IDX;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ return next_rate;
+}
+
+/**
+ * il3945_tx_queue_reclaim - Reclaim Tx queue entries already Tx'd
+ *
+ * When FW advances 'R' idx, all entries between old and new 'R' idx
+ * need to be reclaimed. As result, some free space forms. If there is
+ * enough free space (> low mark), wake the stack that feeds us.
+ */
+static void
+il3945_tx_queue_reclaim(struct il_priv *il, int txq_id, int idx)
+{
+ struct il_tx_queue *txq = &il->txq[txq_id];
+ struct il_queue *q = &txq->q;
+ struct il_tx_info *tx_info;
+
+ BUG_ON(txq_id == IL39_CMD_QUEUE_NUM);
+
+ for (idx = il_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
+ q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd)) {
+
+ tx_info = &txq->txb[txq->q.read_ptr];
+ ieee80211_tx_status_irqsafe(il->hw, tx_info->skb);
+ tx_info->skb = NULL;
+ il->cfg->ops->lib->txq_free_tfd(il, txq);
+ }
+
+ if (il_queue_space(q) > q->low_mark && txq_id >= 0 &&
+ txq_id != IL39_CMD_QUEUE_NUM && il->mac80211_registered)
+ il_wake_queue(il, txq);
+}
+
+/**
+ * il3945_hdl_tx - Handle Tx response
+ */
+static void
+il3945_hdl_tx(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ u16 sequence = le16_to_cpu(pkt->hdr.sequence);
+ int txq_id = SEQ_TO_QUEUE(sequence);
+ int idx = SEQ_TO_IDX(sequence);
+ struct il_tx_queue *txq = &il->txq[txq_id];
+ struct ieee80211_tx_info *info;
+ struct il3945_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
+ u32 status = le32_to_cpu(tx_resp->status);
+ int rate_idx;
+ int fail;
+
+ if (idx >= txq->q.n_bd || il_queue_used(&txq->q, idx) == 0) {
+ IL_ERR("Read idx for DMA queue txq_id (%d) idx %d "
+ "is out of range [0-%d] %d %d\n", txq_id, idx,
+ txq->q.n_bd, txq->q.write_ptr, txq->q.read_ptr);
+ return;
+ }
+
+ txq->time_stamp = jiffies;
+ info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb);
+ ieee80211_tx_info_clear_status(info);
+
+ /* Fill the MRR chain with some info about on-chip retransmissions */
+ rate_idx = il3945_hwrate_to_plcp_idx(tx_resp->rate);
+ if (info->band == IEEE80211_BAND_5GHZ)
+ rate_idx -= IL_FIRST_OFDM_RATE;
+
+ fail = tx_resp->failure_frame;
+
+ info->status.rates[0].idx = rate_idx;
+ info->status.rates[0].count = fail + 1; /* add final attempt */
+
+ /* tx_status->rts_retry_count = tx_resp->failure_rts; */
+ info->flags |=
+ ((status & TX_STATUS_MSK) ==
+ TX_STATUS_SUCCESS) ? IEEE80211_TX_STAT_ACK : 0;
+
+ D_TX("Tx queue %d Status %s (0x%08x) plcp rate %d retries %d\n", txq_id,
+ il3945_get_tx_fail_reason(status), status, tx_resp->rate,
+ tx_resp->failure_frame);
+
+ D_TX_REPLY("Tx queue reclaim %d\n", idx);
+ il3945_tx_queue_reclaim(il, txq_id, idx);
+
+ if (status & TX_ABORT_REQUIRED_MSK)
+ IL_ERR("TODO: Implement Tx ABORT REQUIRED!!!\n");
+}
+
+/*****************************************************************************
+ *
+ * Intel PRO/Wireless 3945ABG/BG Network Connection
+ *
+ * RX handler implementations
+ *
+ *****************************************************************************/
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+static void
+il3945_accumulative_stats(struct il_priv *il, __le32 * stats)
+{
+ int i;
+ __le32 *prev_stats;
+ u32 *accum_stats;
+ u32 *delta, *max_delta;
+
+ prev_stats = (__le32 *) &il->_3945.stats;
+ accum_stats = (u32 *) &il->_3945.accum_stats;
+ delta = (u32 *) &il->_3945.delta_stats;
+ max_delta = (u32 *) &il->_3945.max_delta;
+
+ for (i = sizeof(__le32); i < sizeof(struct il3945_notif_stats);
+ i +=
+ sizeof(__le32), stats++, prev_stats++, delta++, max_delta++,
+ accum_stats++) {
+ if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
+ *delta =
+ (le32_to_cpu(*stats) - le32_to_cpu(*prev_stats));
+ *accum_stats += *delta;
+ if (*delta > *max_delta)
+ *max_delta = *delta;
+ }
+ }
+
+ /* reset accumulative stats for "no-counter" type stats */
+ il->_3945.accum_stats.general.temperature =
+ il->_3945.stats.general.temperature;
+ il->_3945.accum_stats.general.ttl_timestamp =
+ il->_3945.stats.general.ttl_timestamp;
+}
+#endif
+
+void
+il3945_hdl_stats(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+
+ D_RX("Statistics notification received (%d vs %d).\n",
+ (int)sizeof(struct il3945_notif_stats),
+ le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK);
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+ il3945_accumulative_stats(il, (__le32 *) &pkt->u.raw);
+#endif
+
+ memcpy(&il->_3945.stats, pkt->u.raw, sizeof(il->_3945.stats));
+}
+
+void
+il3945_hdl_c_stats(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ __le32 *flag = (__le32 *) &pkt->u.raw;
+
+ if (le32_to_cpu(*flag) & UCODE_STATS_CLEAR_MSK) {
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+ memset(&il->_3945.accum_stats, 0,
+ sizeof(struct il3945_notif_stats));
+ memset(&il->_3945.delta_stats, 0,
+ sizeof(struct il3945_notif_stats));
+ memset(&il->_3945.max_delta, 0,
+ sizeof(struct il3945_notif_stats));
+#endif
+ D_RX("Statistics have been cleared\n");
+ }
+ il3945_hdl_stats(il, rxb);
+}
+
+/******************************************************************************
+ *
+ * Misc. internal state and helper functions
+ *
+ ******************************************************************************/
+
+/* This is necessary only for a number of stats, see the caller. */
+static int
+il3945_is_network_packet(struct il_priv *il, struct ieee80211_hdr *header)
+{
+ /* Filter incoming packets to determine if they are targeted toward
+ * this network, discarding packets coming from ourselves */
+ switch (il->iw_mode) {
+ case NL80211_IFTYPE_ADHOC: /* Header: Dest. | Source | BSSID */
+ /* packets to our IBSS update information */
+ return !compare_ether_addr(header->addr3, il->bssid);
+ case NL80211_IFTYPE_STATION: /* Header: Dest. | AP{BSSID} | Source */
+ /* packets to our IBSS update information */
+ return !compare_ether_addr(header->addr2, il->bssid);
+ default:
+ return 1;
+ }
+}
+
+static void
+il3945_pass_packet_to_mac80211(struct il_priv *il, struct il_rx_buf *rxb,
+ struct ieee80211_rx_status *stats)
+{
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)IL_RX_DATA(pkt);
+ struct il3945_rx_frame_hdr *rx_hdr = IL_RX_HDR(pkt);
+ struct il3945_rx_frame_end *rx_end = IL_RX_END(pkt);
+ u16 len = le16_to_cpu(rx_hdr->len);
+ struct sk_buff *skb;
+ __le16 fc = hdr->frame_control;
+
+ /* We received data from the HW, so stop the watchdog */
+ if (unlikely
+ (len + IL39_RX_FRAME_SIZE >
+ PAGE_SIZE << il->hw_params.rx_page_order)) {
+ D_DROP("Corruption detected!\n");
+ return;
+ }
+
+ /* We only process data packets if the interface is open */
+ if (unlikely(!il->is_open)) {
+ D_DROP("Dropping packet while interface is not open.\n");
+ return;
+ }
+
+ skb = dev_alloc_skb(128);
+ if (!skb) {
+ IL_ERR("dev_alloc_skb failed\n");
+ return;
+ }
+
+ if (!il3945_mod_params.sw_crypto)
+ il_set_decrypted_flag(il, (struct ieee80211_hdr *)rxb_addr(rxb),
+ le32_to_cpu(rx_end->status), stats);
+
+ skb_add_rx_frag(skb, 0, rxb->page,
+ (void *)rx_hdr->payload - (void *)pkt, len);
+
+ il_update_stats(il, false, fc, len);
+ memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
+
+ ieee80211_rx(il->hw, skb);
+ il->alloc_rxb_page--;
+ rxb->page = NULL;
+}
+
+#define IL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6)
+
+static void
+il3945_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ struct ieee80211_hdr *header;
+ struct ieee80211_rx_status rx_status;
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ struct il3945_rx_frame_stats *rx_stats = IL_RX_STATS(pkt);
+ struct il3945_rx_frame_hdr *rx_hdr = IL_RX_HDR(pkt);
+ struct il3945_rx_frame_end *rx_end = IL_RX_END(pkt);
+ u16 rx_stats_sig_avg __maybe_unused = le16_to_cpu(rx_stats->sig_avg);
+ u16 rx_stats_noise_diff __maybe_unused =
+ le16_to_cpu(rx_stats->noise_diff);
+ u8 network_packet;
+
+ rx_status.flag = 0;
+ rx_status.mactime = le64_to_cpu(rx_end->timestamp);
+ rx_status.band =
+ (rx_hdr->
+ phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? IEEE80211_BAND_2GHZ :
+ IEEE80211_BAND_5GHZ;
+ rx_status.freq =
+ ieee80211_channel_to_frequency(le16_to_cpu(rx_hdr->channel),
+ rx_status.band);
+
+ rx_status.rate_idx = il3945_hwrate_to_plcp_idx(rx_hdr->rate);
+ if (rx_status.band == IEEE80211_BAND_5GHZ)
+ rx_status.rate_idx -= IL_FIRST_OFDM_RATE;
+
+ rx_status.antenna =
+ (le16_to_cpu(rx_hdr->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK) >>
+ 4;
+
+ /* set the preamble flag if appropriate */
+ if (rx_hdr->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
+ rx_status.flag |= RX_FLAG_SHORTPRE;
+
+ if ((unlikely(rx_stats->phy_count > 20))) {
+ D_DROP("dsp size out of range [0,20]: %d/n",
+ rx_stats->phy_count);
+ return;
+ }
+
+ if (!(rx_end->status & RX_RES_STATUS_NO_CRC32_ERROR) ||
+ !(rx_end->status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
+ D_RX("Bad CRC or FIFO: 0x%08X.\n", rx_end->status);
+ return;
+ }
+
+ /* Convert 3945's rssi indicator to dBm */
+ rx_status.signal = rx_stats->rssi - IL39_RSSI_OFFSET;
+
+ D_STATS("Rssi %d sig_avg %d noise_diff %d\n", rx_status.signal,
+ rx_stats_sig_avg, rx_stats_noise_diff);
+
+ header = (struct ieee80211_hdr *)IL_RX_DATA(pkt);
+
+ network_packet = il3945_is_network_packet(il, header);
+
+ D_STATS("[%c] %d RSSI:%d Signal:%u, Rate:%u\n",
+ network_packet ? '*' : ' ', le16_to_cpu(rx_hdr->channel),
+ rx_status.signal, rx_status.signal, rx_status.rate_idx);
+
+ il_dbg_log_rx_data_frame(il, le16_to_cpu(rx_hdr->len), header);
+
+ if (network_packet) {
+ il->_3945.last_beacon_time =
+ le32_to_cpu(rx_end->beacon_timestamp);
+ il->_3945.last_tsf = le64_to_cpu(rx_end->timestamp);
+ il->_3945.last_rx_rssi = rx_status.signal;
+ }
+
+ il3945_pass_packet_to_mac80211(il, rxb, &rx_status);
+}
+
+int
+il3945_hw_txq_attach_buf_to_tfd(struct il_priv *il, struct il_tx_queue *txq,
+ dma_addr_t addr, u16 len, u8 reset, u8 pad)
+{
+ int count;
+ struct il_queue *q;
+ struct il3945_tfd *tfd, *tfd_tmp;
+
+ q = &txq->q;
+ tfd_tmp = (struct il3945_tfd *)txq->tfds;
+ tfd = &tfd_tmp[q->write_ptr];
+
+ if (reset)
+ memset(tfd, 0, sizeof(*tfd));
+
+ count = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags));
+
+ if (count >= NUM_TFD_CHUNKS || count < 0) {
+ IL_ERR("Error can not send more than %d chunks\n",
+ NUM_TFD_CHUNKS);
+ return -EINVAL;
+ }
+
+ tfd->tbs[count].addr = cpu_to_le32(addr);
+ tfd->tbs[count].len = cpu_to_le32(len);
+
+ count++;
+
+ tfd->control_flags =
+ cpu_to_le32(TFD_CTL_COUNT_SET(count) | TFD_CTL_PAD_SET(pad));
+
+ return 0;
+}
+
+/**
+ * il3945_hw_txq_free_tfd - Free one TFD, those at idx [txq->q.read_ptr]
+ *
+ * Does NOT advance any idxes
+ */
+void
+il3945_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq)
+{
+ struct il3945_tfd *tfd_tmp = (struct il3945_tfd *)txq->tfds;
+ int idx = txq->q.read_ptr;
+ struct il3945_tfd *tfd = &tfd_tmp[idx];
+ struct pci_dev *dev = il->pci_dev;
+ int i;
+ int counter;
+
+ /* sanity check */
+ counter = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags));
+ if (counter > NUM_TFD_CHUNKS) {
+ IL_ERR("Too many chunks: %i\n", counter);
+ /* @todo issue fatal error, it is quite serious situation */
+ return;
+ }
+
+ /* Unmap tx_cmd */
+ if (counter)
+ pci_unmap_single(dev, dma_unmap_addr(&txq->meta[idx], mapping),
+ dma_unmap_len(&txq->meta[idx], len),
+ PCI_DMA_TODEVICE);
+
+ /* unmap chunks if any */
+
+ for (i = 1; i < counter; i++)
+ pci_unmap_single(dev, le32_to_cpu(tfd->tbs[i].addr),
+ le32_to_cpu(tfd->tbs[i].len),
+ PCI_DMA_TODEVICE);
+
+ /* free SKB */
+ if (txq->txb) {
+ struct sk_buff *skb;
+
+ skb = txq->txb[txq->q.read_ptr].skb;
+
+ /* can be called from irqs-disabled context */
+ if (skb) {
+ dev_kfree_skb_any(skb);
+ txq->txb[txq->q.read_ptr].skb = NULL;
+ }
+ }
+}
+
+/**
+ * il3945_hw_build_tx_cmd_rate - Add rate portion to TX_CMD:
+ *
+*/
+void
+il3945_hw_build_tx_cmd_rate(struct il_priv *il, struct il_device_cmd *cmd,
+ struct ieee80211_tx_info *info,
+ struct ieee80211_hdr *hdr, int sta_id, int tx_id)
+{
+ u16 hw_value = ieee80211_get_tx_rate(il->hw, info)->hw_value;
+ u16 rate_idx = min(hw_value & 0xffff, RATE_COUNT_3945);
+ u16 rate_mask;
+ int rate;
+ u8 rts_retry_limit;
+ u8 data_retry_limit;
+ __le32 tx_flags;
+ __le16 fc = hdr->frame_control;
+ struct il3945_tx_cmd *tx_cmd = (struct il3945_tx_cmd *)cmd->cmd.payload;
+
+ rate = il3945_rates[rate_idx].plcp;
+ tx_flags = tx_cmd->tx_flags;
+
+ /* We need to figure out how to get the sta->supp_rates while
+ * in this running context */
+ rate_mask = RATES_MASK_3945;
+
+ /* Set retry limit on DATA packets and Probe Responses */
+ if (ieee80211_is_probe_resp(fc))
+ data_retry_limit = 3;
+ else
+ data_retry_limit = IL_DEFAULT_TX_RETRY;
+ tx_cmd->data_retry_limit = data_retry_limit;
+
+ if (tx_id >= IL39_CMD_QUEUE_NUM)
+ rts_retry_limit = 3;
+ else
+ rts_retry_limit = 7;
+
+ if (data_retry_limit < rts_retry_limit)
+ rts_retry_limit = data_retry_limit;
+ tx_cmd->rts_retry_limit = rts_retry_limit;
+
+ tx_cmd->rate = rate;
+ tx_cmd->tx_flags = tx_flags;
+
+ /* OFDM */
+ tx_cmd->supp_rates[0] =
+ ((rate_mask & IL_OFDM_RATES_MASK) >> IL_FIRST_OFDM_RATE) & 0xFF;
+
+ /* CCK */
+ tx_cmd->supp_rates[1] = (rate_mask & 0xF);
+
+ D_RATE("Tx sta id: %d, rate: %d (plcp), flags: 0x%4X "
+ "cck/ofdm mask: 0x%x/0x%x\n", sta_id, tx_cmd->rate,
+ le32_to_cpu(tx_cmd->tx_flags), tx_cmd->supp_rates[1],
+ tx_cmd->supp_rates[0]);
+}
+
+static u8
+il3945_sync_sta(struct il_priv *il, int sta_id, u16 tx_rate)
+{
+ unsigned long flags_spin;
+ struct il_station_entry *station;
+
+ if (sta_id == IL_INVALID_STATION)
+ return IL_INVALID_STATION;
+
+ spin_lock_irqsave(&il->sta_lock, flags_spin);
+ station = &il->stations[sta_id];
+
+ station->sta.sta.modify_mask = STA_MODIFY_TX_RATE_MSK;
+ station->sta.rate_n_flags = cpu_to_le16(tx_rate);
+ station->sta.mode = STA_CONTROL_MODIFY_MSK;
+ il_send_add_sta(il, &station->sta, CMD_ASYNC);
+ spin_unlock_irqrestore(&il->sta_lock, flags_spin);
+
+ D_RATE("SCALE sync station %d to rate %d\n", sta_id, tx_rate);
+ return sta_id;
+}
+
+static void
+il3945_set_pwr_vmain(struct il_priv *il)
+{
+/*
+ * (for documentation purposes)
+ * to set power to V_AUX, do
+
+ if (pci_pme_capable(il->pci_dev, PCI_D3cold)) {
+ il_set_bits_mask_prph(il, APMG_PS_CTRL_REG,
+ APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
+ ~APMG_PS_CTRL_MSK_PWR_SRC);
+
+ _il_poll_bit(il, CSR_GPIO_IN,
+ CSR_GPIO_IN_VAL_VAUX_PWR_SRC,
+ CSR_GPIO_IN_BIT_AUX_POWER, 5000);
+ }
+ */
+
+ il_set_bits_mask_prph(il, APMG_PS_CTRL_REG,
+ APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
+ ~APMG_PS_CTRL_MSK_PWR_SRC);
+
+ _il_poll_bit(il, CSR_GPIO_IN, CSR_GPIO_IN_VAL_VMAIN_PWR_SRC,
+ CSR_GPIO_IN_BIT_AUX_POWER, 5000);
+}
+
+static int
+il3945_rx_init(struct il_priv *il, struct il_rx_queue *rxq)
+{
+ il_wr(il, FH39_RCSR_RBD_BASE(0), rxq->bd_dma);
+ il_wr(il, FH39_RCSR_RPTR_ADDR(0), rxq->rb_stts_dma);
+ il_wr(il, FH39_RCSR_WPTR(0), 0);
+ il_wr(il, FH39_RCSR_CONFIG(0),
+ FH39_RCSR_RX_CONFIG_REG_VAL_DMA_CHNL_EN_ENABLE |
+ FH39_RCSR_RX_CONFIG_REG_VAL_RDRBD_EN_ENABLE |
+ FH39_RCSR_RX_CONFIG_REG_BIT_WR_STTS_EN |
+ FH39_RCSR_RX_CONFIG_REG_VAL_MAX_FRAG_SIZE_128 | (RX_QUEUE_SIZE_LOG
+ <<
+ FH39_RCSR_RX_CONFIG_REG_POS_RBDC_SIZE)
+ | FH39_RCSR_RX_CONFIG_REG_VAL_IRQ_DEST_INT_HOST | (1 <<
+ FH39_RCSR_RX_CONFIG_REG_POS_IRQ_RBTH)
+ | FH39_RCSR_RX_CONFIG_REG_VAL_MSG_MODE_FH);
+
+ /* fake read to flush all prev I/O */
+ il_rd(il, FH39_RSSR_CTRL);
+
+ return 0;
+}
+
+static int
+il3945_tx_reset(struct il_priv *il)
+{
+
+ /* bypass mode */
+ il_wr_prph(il, ALM_SCD_MODE_REG, 0x2);
+
+ /* RA 0 is active */
+ il_wr_prph(il, ALM_SCD_ARASTAT_REG, 0x01);
+
+ /* all 6 fifo are active */
+ il_wr_prph(il, ALM_SCD_TXFACT_REG, 0x3f);
+
+ il_wr_prph(il, ALM_SCD_SBYP_MODE_1_REG, 0x010000);
+ il_wr_prph(il, ALM_SCD_SBYP_MODE_2_REG, 0x030002);
+ il_wr_prph(il, ALM_SCD_TXF4MF_REG, 0x000004);
+ il_wr_prph(il, ALM_SCD_TXF5MF_REG, 0x000005);
+
+ il_wr(il, FH39_TSSR_CBB_BASE, il->_3945.shared_phys);
+
+ il_wr(il, FH39_TSSR_MSG_CONFIG,
+ FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON |
+ FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_TXPD_ON |
+ FH39_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_128B |
+ FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TFD_ON |
+ FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_CBB_ON |
+ FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RSP_WAIT_TH |
+ FH39_TSSR_TX_MSG_CONFIG_REG_VAL_RSP_WAIT_TH);
+
+ return 0;
+}
+
+/**
+ * il3945_txq_ctx_reset - Reset TX queue context
+ *
+ * Destroys all DMA structures and initialize them again
+ */
+static int
+il3945_txq_ctx_reset(struct il_priv *il)
+{
+ int rc;
+ int txq_id, slots_num;
+
+ il3945_hw_txq_ctx_free(il);
+
+ /* allocate tx queue structure */
+ rc = il_alloc_txq_mem(il);
+ if (rc)
+ return rc;
+
+ /* Tx CMD queue */
+ rc = il3945_tx_reset(il);
+ if (rc)
+ goto error;
+
+ /* Tx queue(s) */
+ for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) {
+ slots_num =
+ (txq_id ==
+ IL39_CMD_QUEUE_NUM) ? TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
+ rc = il_tx_queue_init(il, &il->txq[txq_id], slots_num, txq_id);
+ if (rc) {
+ IL_ERR("Tx %d queue init failed\n", txq_id);
+ goto error;
+ }
+ }
+
+ return rc;
+
+error:
+ il3945_hw_txq_ctx_free(il);
+ return rc;
+}
+
+/*
+ * Start up 3945's basic functionality after it has been reset
+ * (e.g. after platform boot, or shutdown via il_apm_stop())
+ * NOTE: This does not load uCode nor start the embedded processor
+ */
+static int
+il3945_apm_init(struct il_priv *il)
+{
+ int ret = il_apm_init(il);
+
+ /* Clear APMG (NIC's internal power management) interrupts */
+ il_wr_prph(il, APMG_RTC_INT_MSK_REG, 0x0);
+ il_wr_prph(il, APMG_RTC_INT_STT_REG, 0xFFFFFFFF);
+
+ /* Reset radio chip */
+ il_set_bits_prph(il, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_RESET_REQ);
+ udelay(5);
+ il_clear_bits_prph(il, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_RESET_REQ);
+
+ return ret;
+}
+
+static void
+il3945_nic_config(struct il_priv *il)
+{
+ struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
+ unsigned long flags;
+ u8 rev_id = il->pci_dev->revision;
+
+ spin_lock_irqsave(&il->lock, flags);
+
+ /* Determine HW type */
+ D_INFO("HW Revision ID = 0x%X\n", rev_id);
+
+ if (rev_id & PCI_CFG_REV_ID_BIT_RTP)
+ D_INFO("RTP type\n");
+ else if (rev_id & PCI_CFG_REV_ID_BIT_BASIC_SKU) {
+ D_INFO("3945 RADIO-MB type\n");
+ il_set_bit(il, CSR_HW_IF_CONFIG_REG,
+ CSR39_HW_IF_CONFIG_REG_BIT_3945_MB);
+ } else {
+ D_INFO("3945 RADIO-MM type\n");
+ il_set_bit(il, CSR_HW_IF_CONFIG_REG,
+ CSR39_HW_IF_CONFIG_REG_BIT_3945_MM);
+ }
+
+ if (EEPROM_SKU_CAP_OP_MODE_MRC == eeprom->sku_cap) {
+ D_INFO("SKU OP mode is mrc\n");
+ il_set_bit(il, CSR_HW_IF_CONFIG_REG,
+ CSR39_HW_IF_CONFIG_REG_BIT_SKU_MRC);
+ } else
+ D_INFO("SKU OP mode is basic\n");
+
+ if ((eeprom->board_revision & 0xF0) == 0xD0) {
+ D_INFO("3945ABG revision is 0x%X\n", eeprom->board_revision);
+ il_set_bit(il, CSR_HW_IF_CONFIG_REG,
+ CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE);
+ } else {
+ D_INFO("3945ABG revision is 0x%X\n", eeprom->board_revision);
+ il_clear_bit(il, CSR_HW_IF_CONFIG_REG,
+ CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE);
+ }
+
+ if (eeprom->almgor_m_version <= 1) {
+ il_set_bit(il, CSR_HW_IF_CONFIG_REG,
+ CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_A);
+ D_INFO("Card M type A version is 0x%X\n",
+ eeprom->almgor_m_version);
+ } else {
+ D_INFO("Card M type B version is 0x%X\n",
+ eeprom->almgor_m_version);
+ il_set_bit(il, CSR_HW_IF_CONFIG_REG,
+ CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_B);
+ }
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ if (eeprom->sku_cap & EEPROM_SKU_CAP_SW_RF_KILL_ENABLE)
+ D_RF_KILL("SW RF KILL supported in EEPROM.\n");
+
+ if (eeprom->sku_cap & EEPROM_SKU_CAP_HW_RF_KILL_ENABLE)
+ D_RF_KILL("HW RF KILL supported in EEPROM.\n");
+}
+
+int
+il3945_hw_nic_init(struct il_priv *il)
+{
+ int rc;
+ unsigned long flags;
+ struct il_rx_queue *rxq = &il->rxq;
+
+ spin_lock_irqsave(&il->lock, flags);
+ il->cfg->ops->lib->apm_ops.init(il);
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ il3945_set_pwr_vmain(il);
+
+ il->cfg->ops->lib->apm_ops.config(il);
+
+ /* Allocate the RX queue, or reset if it is already allocated */
+ if (!rxq->bd) {
+ rc = il_rx_queue_alloc(il);
+ if (rc) {
+ IL_ERR("Unable to initialize Rx queue\n");
+ return -ENOMEM;
+ }
+ } else
+ il3945_rx_queue_reset(il, rxq);
+
+ il3945_rx_replenish(il);
+
+ il3945_rx_init(il, rxq);
+
+ /* Look at using this instead:
+ rxq->need_update = 1;
+ il_rx_queue_update_write_ptr(il, rxq);
+ */
+
+ il_wr(il, FH39_RCSR_WPTR(0), rxq->write & ~7);
+
+ rc = il3945_txq_ctx_reset(il);
+ if (rc)
+ return rc;
+
+ set_bit(S_INIT, &il->status);
+
+ return 0;
+}
+
+/**
+ * il3945_hw_txq_ctx_free - Free TXQ Context
+ *
+ * Destroy all TX DMA queues and structures
+ */
+void
+il3945_hw_txq_ctx_free(struct il_priv *il)
+{
+ int txq_id;
+
+ /* Tx queues */
+ if (il->txq)
+ for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
+ if (txq_id == IL39_CMD_QUEUE_NUM)
+ il_cmd_queue_free(il);
+ else
+ il_tx_queue_free(il, txq_id);
+
+ /* free tx queue structure */
+ il_txq_mem(il);
+}
+
+void
+il3945_hw_txq_ctx_stop(struct il_priv *il)
+{
+ int txq_id;
+
+ /* stop SCD */
+ il_wr_prph(il, ALM_SCD_MODE_REG, 0);
+ il_wr_prph(il, ALM_SCD_TXFACT_REG, 0);
+
+ /* reset TFD queues */
+ for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) {
+ il_wr(il, FH39_TCSR_CONFIG(txq_id), 0x0);
+ il_poll_bit(il, FH39_TSSR_TX_STATUS,
+ FH39_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(txq_id),
+ 1000);
+ }
+
+ il3945_hw_txq_ctx_free(il);
+}
+
+/**
+ * il3945_hw_reg_adjust_power_by_temp
+ * return idx delta into power gain settings table
+*/
+static int
+il3945_hw_reg_adjust_power_by_temp(int new_reading, int old_reading)
+{
+ return (new_reading - old_reading) * (-11) / 100;
+}
+
+/**
+ * il3945_hw_reg_temp_out_of_range - Keep temperature in sane range
+ */
+static inline int
+il3945_hw_reg_temp_out_of_range(int temperature)
+{
+ return (temperature < -260 || temperature > 25) ? 1 : 0;
+}
+
+int
+il3945_hw_get_temperature(struct il_priv *il)
+{
+ return _il_rd(il, CSR_UCODE_DRV_GP2);
+}
+
+/**
+ * il3945_hw_reg_txpower_get_temperature
+ * get the current temperature by reading from NIC
+*/
+static int
+il3945_hw_reg_txpower_get_temperature(struct il_priv *il)
+{
+ struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
+ int temperature;
+
+ temperature = il3945_hw_get_temperature(il);
+
+ /* driver's okay range is -260 to +25.
+ * human readable okay range is 0 to +285 */
+ D_INFO("Temperature: %d\n", temperature + IL_TEMP_CONVERT);
+
+ /* handle insane temp reading */
+ if (il3945_hw_reg_temp_out_of_range(temperature)) {
+ IL_ERR("Error bad temperature value %d\n", temperature);
+
+ /* if really really hot(?),
+ * substitute the 3rd band/group's temp measured at factory */
+ if (il->last_temperature > 100)
+ temperature = eeprom->groups[2].temperature;
+ else /* else use most recent "sane" value from driver */
+ temperature = il->last_temperature;
+ }
+
+ return temperature; /* raw, not "human readable" */
+}
+
+/* Adjust Txpower only if temperature variance is greater than threshold.
+ *
+ * Both are lower than older versions' 9 degrees */
+#define IL_TEMPERATURE_LIMIT_TIMER 6
+
+/**
+ * il3945_is_temp_calib_needed - determines if new calibration is needed
+ *
+ * records new temperature in tx_mgr->temperature.
+ * replaces tx_mgr->last_temperature *only* if calib needed
+ * (assumes caller will actually do the calibration!). */
+static int
+il3945_is_temp_calib_needed(struct il_priv *il)
+{
+ int temp_diff;
+
+ il->temperature = il3945_hw_reg_txpower_get_temperature(il);
+ temp_diff = il->temperature - il->last_temperature;
+
+ /* get absolute value */
+ if (temp_diff < 0) {
+ D_POWER("Getting cooler, delta %d,\n", temp_diff);
+ temp_diff = -temp_diff;
+ } else if (temp_diff == 0)
+ D_POWER("Same temp,\n");
+ else
+ D_POWER("Getting warmer, delta %d,\n", temp_diff);
+
+ /* if we don't need calibration, *don't* update last_temperature */
+ if (temp_diff < IL_TEMPERATURE_LIMIT_TIMER) {
+ D_POWER("Timed thermal calib not needed\n");
+ return 0;
+ }
+
+ D_POWER("Timed thermal calib needed\n");
+
+ /* assume that caller will actually do calib ...
+ * update the "last temperature" value */
+ il->last_temperature = il->temperature;
+ return 1;
+}
+
+#define IL_MAX_GAIN_ENTRIES 78
+#define IL_CCK_FROM_OFDM_POWER_DIFF -5
+#define IL_CCK_FROM_OFDM_IDX_DIFF (10)
+
+/* radio and DSP power table, each step is 1/2 dB.
+ * 1st number is for RF analog gain, 2nd number is for DSP pre-DAC gain. */
+static struct il3945_tx_power power_gain_table[2][IL_MAX_GAIN_ENTRIES] = {
+ {
+ {251, 127}, /* 2.4 GHz, highest power */
+ {251, 127},
+ {251, 127},
+ {251, 127},
+ {251, 125},
+ {251, 110},
+ {251, 105},
+ {251, 98},
+ {187, 125},
+ {187, 115},
+ {187, 108},
+ {187, 99},
+ {243, 119},
+ {243, 111},
+ {243, 105},
+ {243, 97},
+ {243, 92},
+ {211, 106},
+ {211, 100},
+ {179, 120},
+ {179, 113},
+ {179, 107},
+ {147, 125},
+ {147, 119},
+ {147, 112},
+ {147, 106},
+ {147, 101},
+ {147, 97},
+ {147, 91},
+ {115, 107},
+ {235, 121},
+ {235, 115},
+ {235, 109},
+ {203, 127},
+ {203, 121},
+ {203, 115},
+ {203, 108},
+ {203, 102},
+ {203, 96},
+ {203, 92},
+ {171, 110},
+ {171, 104},
+ {171, 98},
+ {139, 116},
+ {227, 125},
+ {227, 119},
+ {227, 113},
+ {227, 107},
+ {227, 101},
+ {227, 96},
+ {195, 113},
+ {195, 106},
+ {195, 102},
+ {195, 95},
+ {163, 113},
+ {163, 106},
+ {163, 102},
+ {163, 95},
+ {131, 113},
+ {131, 106},
+ {131, 102},
+ {131, 95},
+ {99, 113},
+ {99, 106},
+ {99, 102},
+ {99, 95},
+ {67, 113},
+ {67, 106},
+ {67, 102},
+ {67, 95},
+ {35, 113},
+ {35, 106},
+ {35, 102},
+ {35, 95},
+ {3, 113},
+ {3, 106},
+ {3, 102},
+ {3, 95} /* 2.4 GHz, lowest power */
+ },
+ {
+ {251, 127}, /* 5.x GHz, highest power */
+ {251, 120},
+ {251, 114},
+ {219, 119},
+ {219, 101},
+ {187, 113},
+ {187, 102},
+ {155, 114},
+ {155, 103},
+ {123, 117},
+ {123, 107},
+ {123, 99},
+ {123, 92},
+ {91, 108},
+ {59, 125},
+ {59, 118},
+ {59, 109},
+ {59, 102},
+ {59, 96},
+ {59, 90},
+ {27, 104},
+ {27, 98},
+ {27, 92},
+ {115, 118},
+ {115, 111},
+ {115, 104},
+ {83, 126},
+ {83, 121},
+ {83, 113},
+ {83, 105},
+ {83, 99},
+ {51, 118},
+ {51, 111},
+ {51, 104},
+ {51, 98},
+ {19, 116},
+ {19, 109},
+ {19, 102},
+ {19, 98},
+ {19, 93},
+ {171, 113},
+ {171, 107},
+ {171, 99},
+ {139, 120},
+ {139, 113},
+ {139, 107},
+ {139, 99},
+ {107, 120},
+ {107, 113},
+ {107, 107},
+ {107, 99},
+ {75, 120},
+ {75, 113},
+ {75, 107},
+ {75, 99},
+ {43, 120},
+ {43, 113},
+ {43, 107},
+ {43, 99},
+ {11, 120},
+ {11, 113},
+ {11, 107},
+ {11, 99},
+ {131, 107},
+ {131, 99},
+ {99, 120},
+ {99, 113},
+ {99, 107},
+ {99, 99},
+ {67, 120},
+ {67, 113},
+ {67, 107},
+ {67, 99},
+ {35, 120},
+ {35, 113},
+ {35, 107},
+ {35, 99},
+ {3, 120} /* 5.x GHz, lowest power */
+ }
+};
+
+static inline u8
+il3945_hw_reg_fix_power_idx(int idx)
+{
+ if (idx < 0)
+ return 0;
+ if (idx >= IL_MAX_GAIN_ENTRIES)
+ return IL_MAX_GAIN_ENTRIES - 1;
+ return (u8) idx;
+}
+
+/* Kick off thermal recalibration check every 60 seconds */
+#define REG_RECALIB_PERIOD (60)
+
+/**
+ * il3945_hw_reg_set_scan_power - Set Tx power for scan probe requests
+ *
+ * Set (in our channel info database) the direct scan Tx power for 1 Mbit (CCK)
+ * or 6 Mbit (OFDM) rates.
+ */
+static void
+il3945_hw_reg_set_scan_power(struct il_priv *il, u32 scan_tbl_idx, s32 rate_idx,
+ const s8 *clip_pwrs,
+ struct il_channel_info *ch_info, int band_idx)
+{
+ struct il3945_scan_power_info *scan_power_info;
+ s8 power;
+ u8 power_idx;
+
+ scan_power_info = &ch_info->scan_pwr_info[scan_tbl_idx];
+
+ /* use this channel group's 6Mbit clipping/saturation pwr,
+ * but cap at regulatory scan power restriction (set during init
+ * based on eeprom channel data) for this channel. */
+ power = min(ch_info->scan_power, clip_pwrs[RATE_6M_IDX_TBL]);
+
+ power = min(power, il->tx_power_user_lmt);
+ scan_power_info->requested_power = power;
+
+ /* find difference between new scan *power* and current "normal"
+ * Tx *power* for 6Mb. Use this difference (x2) to adjust the
+ * current "normal" temperature-compensated Tx power *idx* for
+ * this rate (1Mb or 6Mb) to yield new temp-compensated scan power
+ * *idx*. */
+ power_idx =
+ ch_info->power_info[rate_idx].power_table_idx - (power -
+ ch_info->
+ power_info
+ [RATE_6M_IDX_TBL].
+ requested_power) *
+ 2;
+
+ /* store reference idx that we use when adjusting *all* scan
+ * powers. So we can accommodate user (all channel) or spectrum
+ * management (single channel) power changes "between" temperature
+ * feedback compensation procedures.
+ * don't force fit this reference idx into gain table; it may be a
+ * negative number. This will help avoid errors when we're at
+ * the lower bounds (highest gains, for warmest temperatures)
+ * of the table. */
+
+ /* don't exceed table bounds for "real" setting */
+ power_idx = il3945_hw_reg_fix_power_idx(power_idx);
+
+ scan_power_info->power_table_idx = power_idx;
+ scan_power_info->tpc.tx_gain =
+ power_gain_table[band_idx][power_idx].tx_gain;
+ scan_power_info->tpc.dsp_atten =
+ power_gain_table[band_idx][power_idx].dsp_atten;
+}
+
+/**
+ * il3945_send_tx_power - fill in Tx Power command with gain settings
+ *
+ * Configures power settings for all rates for the current channel,
+ * using values from channel info struct, and send to NIC
+ */
+static int
+il3945_send_tx_power(struct il_priv *il)
+{
+ int rate_idx, i;
+ const struct il_channel_info *ch_info = NULL;
+ struct il3945_txpowertable_cmd txpower = {
+ .channel = il->ctx.active.channel,
+ };
+ u16 chan;
+
+ if (WARN_ONCE
+ (test_bit(S_SCAN_HW, &il->status),
+ "TX Power requested while scanning!\n"))
+ return -EAGAIN;
+
+ chan = le16_to_cpu(il->ctx.active.channel);
+
+ txpower.band = (il->band == IEEE80211_BAND_5GHZ) ? 0 : 1;
+ ch_info = il_get_channel_info(il, il->band, chan);
+ if (!ch_info) {
+ IL_ERR("Failed to get channel info for channel %d [%d]\n", chan,
+ il->band);
+ return -EINVAL;
+ }
+
+ if (!il_is_channel_valid(ch_info)) {
+ D_POWER("Not calling TX_PWR_TBL_CMD on " "non-Tx channel.\n");
+ return 0;
+ }
+
+ /* fill cmd with power settings for all rates for current channel */
+ /* Fill OFDM rate */
+ for (rate_idx = IL_FIRST_OFDM_RATE, i = 0;
+ rate_idx <= IL39_LAST_OFDM_RATE; rate_idx++, i++) {
+
+ txpower.power[i].tpc = ch_info->power_info[i].tpc;
+ txpower.power[i].rate = il3945_rates[rate_idx].plcp;
+
+ D_POWER("ch %d:%d rf %d dsp %3d rate code 0x%02x\n",
+ le16_to_cpu(txpower.channel), txpower.band,
+ txpower.power[i].tpc.tx_gain,
+ txpower.power[i].tpc.dsp_atten, txpower.power[i].rate);
+ }
+ /* Fill CCK rates */
+ for (rate_idx = IL_FIRST_CCK_RATE; rate_idx <= IL_LAST_CCK_RATE;
+ rate_idx++, i++) {
+ txpower.power[i].tpc = ch_info->power_info[i].tpc;
+ txpower.power[i].rate = il3945_rates[rate_idx].plcp;
+
+ D_POWER("ch %d:%d rf %d dsp %3d rate code 0x%02x\n",
+ le16_to_cpu(txpower.channel), txpower.band,
+ txpower.power[i].tpc.tx_gain,
+ txpower.power[i].tpc.dsp_atten, txpower.power[i].rate);
+ }
+
+ return il_send_cmd_pdu(il, C_TX_PWR_TBL,
+ sizeof(struct il3945_txpowertable_cmd),
+ &txpower);
+
+}
+
+/**
+ * il3945_hw_reg_set_new_power - Configures power tables at new levels
+ * @ch_info: Channel to update. Uses power_info.requested_power.
+ *
+ * Replace requested_power and base_power_idx ch_info fields for
+ * one channel.
+ *
+ * Called if user or spectrum management changes power preferences.
+ * Takes into account h/w and modulation limitations (clip power).
+ *
+ * This does *not* send anything to NIC, just sets up ch_info for one channel.
+ *
+ * NOTE: reg_compensate_for_temperature_dif() *must* be run after this to
+ * properly fill out the scan powers, and actual h/w gain settings,
+ * and send changes to NIC
+ */
+static int
+il3945_hw_reg_set_new_power(struct il_priv *il, struct il_channel_info *ch_info)
+{
+ struct il3945_channel_power_info *power_info;
+ int power_changed = 0;
+ int i;
+ const s8 *clip_pwrs;
+ int power;
+
+ /* Get this chnlgrp's rate-to-max/clip-powers table */
+ clip_pwrs = il->_3945.clip_groups[ch_info->group_idx].clip_powers;
+
+ /* Get this channel's rate-to-current-power settings table */
+ power_info = ch_info->power_info;
+
+ /* update OFDM Txpower settings */
+ for (i = RATE_6M_IDX_TBL; i <= RATE_54M_IDX_TBL; i++, ++power_info) {
+ int delta_idx;
+
+ /* limit new power to be no more than h/w capability */
+ power = min(ch_info->curr_txpow, clip_pwrs[i]);
+ if (power == power_info->requested_power)
+ continue;
+
+ /* find difference between old and new requested powers,
+ * update base (non-temp-compensated) power idx */
+ delta_idx = (power - power_info->requested_power) * 2;
+ power_info->base_power_idx -= delta_idx;
+
+ /* save new requested power value */
+ power_info->requested_power = power;
+
+ power_changed = 1;
+ }
+
+ /* update CCK Txpower settings, based on OFDM 12M setting ...
+ * ... all CCK power settings for a given channel are the *same*. */
+ if (power_changed) {
+ power =
+ ch_info->power_info[RATE_12M_IDX_TBL].requested_power +
+ IL_CCK_FROM_OFDM_POWER_DIFF;
+
+ /* do all CCK rates' il3945_channel_power_info structures */
+ for (i = RATE_1M_IDX_TBL; i <= RATE_11M_IDX_TBL; i++) {
+ power_info->requested_power = power;
+ power_info->base_power_idx =
+ ch_info->power_info[RATE_12M_IDX_TBL].
+ base_power_idx + IL_CCK_FROM_OFDM_IDX_DIFF;
+ ++power_info;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * il3945_hw_reg_get_ch_txpower_limit - returns new power limit for channel
+ *
+ * NOTE: Returned power limit may be less (but not more) than requested,
+ * based strictly on regulatory (eeprom and spectrum mgt) limitations
+ * (no consideration for h/w clipping limitations).
+ */
+static int
+il3945_hw_reg_get_ch_txpower_limit(struct il_channel_info *ch_info)
+{
+ s8 max_power;
+
+#if 0
+ /* if we're using TGd limits, use lower of TGd or EEPROM */
+ if (ch_info->tgd_data.max_power != 0)
+ max_power =
+ min(ch_info->tgd_data.max_power,
+ ch_info->eeprom.max_power_avg);
+
+ /* else just use EEPROM limits */
+ else
+#endif
+ max_power = ch_info->eeprom.max_power_avg;
+
+ return min(max_power, ch_info->max_power_avg);
+}
+
+/**
+ * il3945_hw_reg_comp_txpower_temp - Compensate for temperature
+ *
+ * Compensate txpower settings of *all* channels for temperature.
+ * This only accounts for the difference between current temperature
+ * and the factory calibration temperatures, and bases the new settings
+ * on the channel's base_power_idx.
+ *
+ * If RxOn is "associated", this sends the new Txpower to NIC!
+ */
+static int
+il3945_hw_reg_comp_txpower_temp(struct il_priv *il)
+{
+ struct il_channel_info *ch_info = NULL;
+ struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
+ int delta_idx;
+ const s8 *clip_pwrs; /* array of h/w max power levels for each rate */
+ u8 a_band;
+ u8 rate_idx;
+ u8 scan_tbl_idx;
+ u8 i;
+ int ref_temp;
+ int temperature = il->temperature;
+
+ if (il->disable_tx_power_cal || test_bit(S_SCANNING, &il->status)) {
+ /* do not perform tx power calibration */
+ return 0;
+ }
+ /* set up new Tx power info for each and every channel, 2.4 and 5.x */
+ for (i = 0; i < il->channel_count; i++) {
+ ch_info = &il->channel_info[i];
+ a_band = il_is_channel_a_band(ch_info);
+
+ /* Get this chnlgrp's factory calibration temperature */
+ ref_temp = (s16) eeprom->groups[ch_info->group_idx].temperature;
+
+ /* get power idx adjustment based on current and factory
+ * temps */
+ delta_idx =
+ il3945_hw_reg_adjust_power_by_temp(temperature, ref_temp);
+
+ /* set tx power value for all rates, OFDM and CCK */
+ for (rate_idx = 0; rate_idx < RATE_COUNT_3945; rate_idx++) {
+ int power_idx =
+ ch_info->power_info[rate_idx].base_power_idx;
+
+ /* temperature compensate */
+ power_idx += delta_idx;
+
+ /* stay within table range */
+ power_idx = il3945_hw_reg_fix_power_idx(power_idx);
+ ch_info->power_info[rate_idx].power_table_idx =
+ (u8) power_idx;
+ ch_info->power_info[rate_idx].tpc =
+ power_gain_table[a_band][power_idx];
+ }
+
+ /* Get this chnlgrp's rate-to-max/clip-powers table */
+ clip_pwrs =
+ il->_3945.clip_groups[ch_info->group_idx].clip_powers;
+
+ /* set scan tx power, 1Mbit for CCK, 6Mbit for OFDM */
+ for (scan_tbl_idx = 0; scan_tbl_idx < IL_NUM_SCAN_RATES;
+ scan_tbl_idx++) {
+ s32 actual_idx =
+ (scan_tbl_idx ==
+ 0) ? RATE_1M_IDX_TBL : RATE_6M_IDX_TBL;
+ il3945_hw_reg_set_scan_power(il, scan_tbl_idx,
+ actual_idx, clip_pwrs,
+ ch_info, a_band);
+ }
+ }
+
+ /* send Txpower command for current channel to ucode */
+ return il->cfg->ops->lib->send_tx_power(il);
+}
+
+int
+il3945_hw_reg_set_txpower(struct il_priv *il, s8 power)
+{
+ struct il_channel_info *ch_info;
+ s8 max_power;
+ u8 a_band;
+ u8 i;
+
+ if (il->tx_power_user_lmt == power) {
+ D_POWER("Requested Tx power same as current " "limit: %ddBm.\n",
+ power);
+ return 0;
+ }
+
+ D_POWER("Setting upper limit clamp to %ddBm.\n", power);
+ il->tx_power_user_lmt = power;
+
+ /* set up new Tx powers for each and every channel, 2.4 and 5.x */
+
+ for (i = 0; i < il->channel_count; i++) {
+ ch_info = &il->channel_info[i];
+ a_band = il_is_channel_a_band(ch_info);
+
+ /* find minimum power of all user and regulatory constraints
+ * (does not consider h/w clipping limitations) */
+ max_power = il3945_hw_reg_get_ch_txpower_limit(ch_info);
+ max_power = min(power, max_power);
+ if (max_power != ch_info->curr_txpow) {
+ ch_info->curr_txpow = max_power;
+
+ /* this considers the h/w clipping limitations */
+ il3945_hw_reg_set_new_power(il, ch_info);
+ }
+ }
+
+ /* update txpower settings for all channels,
+ * send to NIC if associated. */
+ il3945_is_temp_calib_needed(il);
+ il3945_hw_reg_comp_txpower_temp(il);
+
+ return 0;
+}
+
+static int
+il3945_send_rxon_assoc(struct il_priv *il, struct il_rxon_context *ctx)
+{
+ int rc = 0;
+ struct il_rx_pkt *pkt;
+ struct il3945_rxon_assoc_cmd rxon_assoc;
+ struct il_host_cmd cmd = {
+ .id = C_RXON_ASSOC,
+ .len = sizeof(rxon_assoc),
+ .flags = CMD_WANT_SKB,
+ .data = &rxon_assoc,
+ };
+ const struct il_rxon_cmd *rxon1 = &ctx->staging;
+ const struct il_rxon_cmd *rxon2 = &ctx->active;
+
+ if (rxon1->flags == rxon2->flags &&
+ rxon1->filter_flags == rxon2->filter_flags &&
+ rxon1->cck_basic_rates == rxon2->cck_basic_rates &&
+ rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates) {
+ D_INFO("Using current RXON_ASSOC. Not resending.\n");
+ return 0;
+ }
+
+ rxon_assoc.flags = ctx->staging.flags;
+ rxon_assoc.filter_flags = ctx->staging.filter_flags;
+ rxon_assoc.ofdm_basic_rates = ctx->staging.ofdm_basic_rates;
+ rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates;
+ rxon_assoc.reserved = 0;
+
+ rc = il_send_cmd_sync(il, &cmd);
+ if (rc)
+ return rc;
+
+ pkt = (struct il_rx_pkt *)cmd.reply_page;
+ if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
+ IL_ERR("Bad return from C_RXON_ASSOC command\n");
+ rc = -EIO;
+ }
+
+ il_free_pages(il, cmd.reply_page);
+
+ return rc;
+}
+
+/**
+ * il3945_commit_rxon - commit staging_rxon to hardware
+ *
+ * The RXON command in staging_rxon is committed to the hardware and
+ * the active_rxon structure is updated with the new data. This
+ * function correctly transitions out of the RXON_ASSOC_MSK state if
+ * a HW tune is required based on the RXON structure changes.
+ */
+int
+il3945_commit_rxon(struct il_priv *il, struct il_rxon_context *ctx)
+{
+ /* cast away the const for active_rxon in this function */
+ struct il3945_rxon_cmd *active_rxon = (void *)&ctx->active;
+ struct il3945_rxon_cmd *staging_rxon = (void *)&ctx->staging;
+ int rc = 0;
+ bool new_assoc = !!(staging_rxon->filter_flags & RXON_FILTER_ASSOC_MSK);
+
+ if (test_bit(S_EXIT_PENDING, &il->status))
+ return -EINVAL;
+
+ if (!il_is_alive(il))
+ return -1;
+
+ /* always get timestamp with Rx frame */
+ staging_rxon->flags |= RXON_FLG_TSF2HOST_MSK;
+
+ /* select antenna */
+ staging_rxon->flags &= ~(RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_SEL_MSK);
+ staging_rxon->flags |= il3945_get_antenna_flags(il);
+
+ rc = il_check_rxon_cmd(il, ctx);
+ if (rc) {
+ IL_ERR("Invalid RXON configuration. Not committing.\n");
+ return -EINVAL;
+ }
+
+ /* If we don't need to send a full RXON, we can use
+ * il3945_rxon_assoc_cmd which is used to reconfigure filter
+ * and other flags for the current radio configuration. */
+ if (!il_full_rxon_required(il, &il->ctx)) {
+ rc = il_send_rxon_assoc(il, &il->ctx);
+ if (rc) {
+ IL_ERR("Error setting RXON_ASSOC "
+ "configuration (%d).\n", rc);
+ return rc;
+ }
+
+ memcpy(active_rxon, staging_rxon, sizeof(*active_rxon));
+ /*
+ * We do not commit tx power settings while channel changing,
+ * do it now if tx power changed.
+ */
+ il_set_tx_power(il, il->tx_power_next, false);
+ return 0;
+ }
+
+ /* If we are currently associated and the new config requires
+ * an RXON_ASSOC and the new config wants the associated mask enabled,
+ * we must clear the associated from the active configuration
+ * before we apply the new config */
+ if (il_is_associated(il) && new_assoc) {
+ D_INFO("Toggling associated bit on current RXON\n");
+ active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+
+ /*
+ * reserved4 and 5 could have been filled by the iwlcore code.
+ * Let's clear them before pushing to the 3945.
+ */
+ active_rxon->reserved4 = 0;
+ active_rxon->reserved5 = 0;
+ rc = il_send_cmd_pdu(il, C_RXON, sizeof(struct il3945_rxon_cmd),
+ &il->ctx.active);
+
+ /* If the mask clearing failed then we set
+ * active_rxon back to what it was previously */
+ if (rc) {
+ active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK;
+ IL_ERR("Error clearing ASSOC_MSK on current "
+ "configuration (%d).\n", rc);
+ return rc;
+ }
+ il_clear_ucode_stations(il, &il->ctx);
+ il_restore_stations(il, &il->ctx);
+ }
+
+ D_INFO("Sending RXON\n" "* with%s RXON_FILTER_ASSOC_MSK\n"
+ "* channel = %d\n" "* bssid = %pM\n", (new_assoc ? "" : "out"),
+ le16_to_cpu(staging_rxon->channel), staging_rxon->bssid_addr);
+
+ /*
+ * reserved4 and 5 could have been filled by the iwlcore code.
+ * Let's clear them before pushing to the 3945.
+ */
+ staging_rxon->reserved4 = 0;
+ staging_rxon->reserved5 = 0;
+
+ il_set_rxon_hwcrypto(il, ctx, !il3945_mod_params.sw_crypto);
+
+ /* Apply the new configuration */
+ rc = il_send_cmd_pdu(il, C_RXON, sizeof(struct il3945_rxon_cmd),
+ staging_rxon);
+ if (rc) {
+ IL_ERR("Error setting new configuration (%d).\n", rc);
+ return rc;
+ }
+
+ memcpy(active_rxon, staging_rxon, sizeof(*active_rxon));
+
+ if (!new_assoc) {
+ il_clear_ucode_stations(il, &il->ctx);
+ il_restore_stations(il, &il->ctx);
+ }
+
+ /* If we issue a new RXON command which required a tune then we must
+ * send a new TXPOWER command or we won't be able to Tx any frames */
+ rc = il_set_tx_power(il, il->tx_power_next, true);
+ if (rc) {
+ IL_ERR("Error setting Tx power (%d).\n", rc);
+ return rc;
+ }
+
+ /* Init the hardware's rate fallback order based on the band */
+ rc = il3945_init_hw_rate_table(il);
+ if (rc) {
+ IL_ERR("Error setting HW rate table: %02X\n", rc);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * il3945_reg_txpower_periodic - called when time to check our temperature.
+ *
+ * -- reset periodic timer
+ * -- see if temp has changed enough to warrant re-calibration ... if so:
+ * -- correct coeffs for temp (can reset temp timer)
+ * -- save this temp as "last",
+ * -- send new set of gain settings to NIC
+ * NOTE: This should continue working, even when we're not associated,
+ * so we can keep our internal table of scan powers current. */
+void
+il3945_reg_txpower_periodic(struct il_priv *il)
+{
+ /* This will kick in the "brute force"
+ * il3945_hw_reg_comp_txpower_temp() below */
+ if (!il3945_is_temp_calib_needed(il))
+ goto reschedule;
+
+ /* Set up a new set of temp-adjusted TxPowers, send to NIC.
+ * This is based *only* on current temperature,
+ * ignoring any previous power measurements */
+ il3945_hw_reg_comp_txpower_temp(il);
+
+reschedule:
+ queue_delayed_work(il->workqueue, &il->_3945.thermal_periodic,
+ REG_RECALIB_PERIOD * HZ);
+}
+
+static void
+il3945_bg_reg_txpower_periodic(struct work_struct *work)
+{
+ struct il_priv *il = container_of(work, struct il_priv,
+ _3945.thermal_periodic.work);
+
+ if (test_bit(S_EXIT_PENDING, &il->status))
+ return;
+
+ mutex_lock(&il->mutex);
+ il3945_reg_txpower_periodic(il);
+ mutex_unlock(&il->mutex);
+}
+
+/**
+ * il3945_hw_reg_get_ch_grp_idx - find the channel-group idx (0-4) for channel.
+ *
+ * This function is used when initializing channel-info structs.
+ *
+ * NOTE: These channel groups do *NOT* match the bands above!
+ * These channel groups are based on factory-tested channels;
+ * on A-band, EEPROM's "group frequency" entries represent the top
+ * channel in each group 1-4. Group 5 All B/G channels are in group 0.
+ */
+static u16
+il3945_hw_reg_get_ch_grp_idx(struct il_priv *il,
+ const struct il_channel_info *ch_info)
+{
+ struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
+ struct il3945_eeprom_txpower_group *ch_grp = &eeprom->groups[0];
+ u8 group;
+ u16 group_idx = 0; /* based on factory calib frequencies */
+ u8 grp_channel;
+
+ /* Find the group idx for the channel ... don't use idx 1(?) */
+ if (il_is_channel_a_band(ch_info)) {
+ for (group = 1; group < 5; group++) {
+ grp_channel = ch_grp[group].group_channel;
+ if (ch_info->channel <= grp_channel) {
+ group_idx = group;
+ break;
+ }
+ }
+ /* group 4 has a few channels *above* its factory cal freq */
+ if (group == 5)
+ group_idx = 4;
+ } else
+ group_idx = 0; /* 2.4 GHz, group 0 */
+
+ D_POWER("Chnl %d mapped to grp %d\n", ch_info->channel, group_idx);
+ return group_idx;
+}
+
+/**
+ * il3945_hw_reg_get_matched_power_idx - Interpolate to get nominal idx
+ *
+ * Interpolate to get nominal (i.e. at factory calibration temperature) idx
+ * into radio/DSP gain settings table for requested power.
+ */
+static int
+il3945_hw_reg_get_matched_power_idx(struct il_priv *il, s8 requested_power,
+ s32 setting_idx, s32 *new_idx)
+{
+ const struct il3945_eeprom_txpower_group *chnl_grp = NULL;
+ struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
+ s32 idx0, idx1;
+ s32 power = 2 * requested_power;
+ s32 i;
+ const struct il3945_eeprom_txpower_sample *samples;
+ s32 gains0, gains1;
+ s32 res;
+ s32 denominator;
+
+ chnl_grp = &eeprom->groups[setting_idx];
+ samples = chnl_grp->samples;
+ for (i = 0; i < 5; i++) {
+ if (power == samples[i].power) {
+ *new_idx = samples[i].gain_idx;
+ return 0;
+ }
+ }
+
+ if (power > samples[1].power) {
+ idx0 = 0;
+ idx1 = 1;
+ } else if (power > samples[2].power) {
+ idx0 = 1;
+ idx1 = 2;
+ } else if (power > samples[3].power) {
+ idx0 = 2;
+ idx1 = 3;
+ } else {
+ idx0 = 3;
+ idx1 = 4;
+ }
+
+ denominator = (s32) samples[idx1].power - (s32) samples[idx0].power;
+ if (denominator == 0)
+ return -EINVAL;
+ gains0 = (s32) samples[idx0].gain_idx * (1 << 19);
+ gains1 = (s32) samples[idx1].gain_idx * (1 << 19);
+ res =
+ gains0 + (gains1 - gains0) * ((s32) power -
+ (s32) samples[idx0].power) /
+ denominator + (1 << 18);
+ *new_idx = res >> 19;
+ return 0;
+}
+
+static void
+il3945_hw_reg_init_channel_groups(struct il_priv *il)
+{
+ u32 i;
+ s32 rate_idx;
+ struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
+ const struct il3945_eeprom_txpower_group *group;
+
+ D_POWER("Initializing factory calib info from EEPROM\n");
+
+ for (i = 0; i < IL_NUM_TX_CALIB_GROUPS; i++) {
+ s8 *clip_pwrs; /* table of power levels for each rate */
+ s8 satur_pwr; /* saturation power for each chnl group */
+ group = &eeprom->groups[i];
+
+ /* sanity check on factory saturation power value */
+ if (group->saturation_power < 40) {
+ IL_WARN("Error: saturation power is %d, "
+ "less than minimum expected 40\n",
+ group->saturation_power);
+ return;
+ }
+
+ /*
+ * Derive requested power levels for each rate, based on
+ * hardware capabilities (saturation power for band).
+ * Basic value is 3dB down from saturation, with further
+ * power reductions for highest 3 data rates. These
+ * backoffs provide headroom for high rate modulation
+ * power peaks, without too much distortion (clipping).
+ */
+ /* we'll fill in this array with h/w max power levels */
+ clip_pwrs = (s8 *) il->_3945.clip_groups[i].clip_powers;
+
+ /* divide factory saturation power by 2 to find -3dB level */
+ satur_pwr = (s8) (group->saturation_power >> 1);
+
+ /* fill in channel group's nominal powers for each rate */
+ for (rate_idx = 0; rate_idx < RATE_COUNT_3945;
+ rate_idx++, clip_pwrs++) {
+ switch (rate_idx) {
+ case RATE_36M_IDX_TBL:
+ if (i == 0) /* B/G */
+ *clip_pwrs = satur_pwr;
+ else /* A */
+ *clip_pwrs = satur_pwr - 5;
+ break;
+ case RATE_48M_IDX_TBL:
+ if (i == 0)
+ *clip_pwrs = satur_pwr - 7;
+ else
+ *clip_pwrs = satur_pwr - 10;
+ break;
+ case RATE_54M_IDX_TBL:
+ if (i == 0)
+ *clip_pwrs = satur_pwr - 9;
+ else
+ *clip_pwrs = satur_pwr - 12;
+ break;
+ default:
+ *clip_pwrs = satur_pwr;
+ break;
+ }
+ }
+ }
+}
+
+/**
+ * il3945_txpower_set_from_eeprom - Set channel power info based on EEPROM
+ *
+ * Second pass (during init) to set up il->channel_info
+ *
+ * Set up Tx-power settings in our channel info database for each VALID
+ * (for this geo/SKU) channel, at all Tx data rates, based on eeprom values
+ * and current temperature.
+ *
+ * Since this is based on current temperature (at init time), these values may
+ * not be valid for very long, but it gives us a starting/default point,
+ * and allows us to active (i.e. using Tx) scan.
+ *
+ * This does *not* write values to NIC, just sets up our internal table.
+ */
+int
+il3945_txpower_set_from_eeprom(struct il_priv *il)
+{
+ struct il_channel_info *ch_info = NULL;
+ struct il3945_channel_power_info *pwr_info;
+ struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
+ int delta_idx;
+ u8 rate_idx;
+ u8 scan_tbl_idx;
+ const s8 *clip_pwrs; /* array of power levels for each rate */
+ u8 gain, dsp_atten;
+ s8 power;
+ u8 pwr_idx, base_pwr_idx, a_band;
+ u8 i;
+ int temperature;
+
+ /* save temperature reference,
+ * so we can determine next time to calibrate */
+ temperature = il3945_hw_reg_txpower_get_temperature(il);
+ il->last_temperature = temperature;
+
+ il3945_hw_reg_init_channel_groups(il);
+
+ /* initialize Tx power info for each and every channel, 2.4 and 5.x */
+ for (i = 0, ch_info = il->channel_info; i < il->channel_count;
+ i++, ch_info++) {
+ a_band = il_is_channel_a_band(ch_info);
+ if (!il_is_channel_valid(ch_info))
+ continue;
+
+ /* find this channel's channel group (*not* "band") idx */
+ ch_info->group_idx = il3945_hw_reg_get_ch_grp_idx(il, ch_info);
+
+ /* Get this chnlgrp's rate->max/clip-powers table */
+ clip_pwrs =
+ il->_3945.clip_groups[ch_info->group_idx].clip_powers;
+
+ /* calculate power idx *adjustment* value according to
+ * diff between current temperature and factory temperature */
+ delta_idx =
+ il3945_hw_reg_adjust_power_by_temp(temperature,
+ eeprom->groups[ch_info->
+ group_idx].
+ temperature);
+
+ D_POWER("Delta idx for channel %d: %d [%d]\n", ch_info->channel,
+ delta_idx, temperature + IL_TEMP_CONVERT);
+
+ /* set tx power value for all OFDM rates */
+ for (rate_idx = 0; rate_idx < IL_OFDM_RATES; rate_idx++) {
+ s32 uninitialized_var(power_idx);
+ int rc;
+
+ /* use channel group's clip-power table,
+ * but don't exceed channel's max power */
+ s8 pwr = min(ch_info->max_power_avg,
+ clip_pwrs[rate_idx]);
+
+ pwr_info = &ch_info->power_info[rate_idx];
+
+ /* get base (i.e. at factory-measured temperature)
+ * power table idx for this rate's power */
+ rc = il3945_hw_reg_get_matched_power_idx(il, pwr,
+ ch_info->
+ group_idx,
+ &power_idx);
+ if (rc) {
+ IL_ERR("Invalid power idx\n");
+ return rc;
+ }
+ pwr_info->base_power_idx = (u8) power_idx;
+
+ /* temperature compensate */
+ power_idx += delta_idx;
+
+ /* stay within range of gain table */
+ power_idx = il3945_hw_reg_fix_power_idx(power_idx);
+
+ /* fill 1 OFDM rate's il3945_channel_power_info struct */
+ pwr_info->requested_power = pwr;
+ pwr_info->power_table_idx = (u8) power_idx;
+ pwr_info->tpc.tx_gain =
+ power_gain_table[a_band][power_idx].tx_gain;
+ pwr_info->tpc.dsp_atten =
+ power_gain_table[a_band][power_idx].dsp_atten;
+ }
+
+ /* set tx power for CCK rates, based on OFDM 12 Mbit settings */
+ pwr_info = &ch_info->power_info[RATE_12M_IDX_TBL];
+ power = pwr_info->requested_power + IL_CCK_FROM_OFDM_POWER_DIFF;
+ pwr_idx = pwr_info->power_table_idx + IL_CCK_FROM_OFDM_IDX_DIFF;
+ base_pwr_idx =
+ pwr_info->base_power_idx + IL_CCK_FROM_OFDM_IDX_DIFF;
+
+ /* stay within table range */
+ pwr_idx = il3945_hw_reg_fix_power_idx(pwr_idx);
+ gain = power_gain_table[a_band][pwr_idx].tx_gain;
+ dsp_atten = power_gain_table[a_band][pwr_idx].dsp_atten;
+
+ /* fill each CCK rate's il3945_channel_power_info structure
+ * NOTE: All CCK-rate Txpwrs are the same for a given chnl!
+ * NOTE: CCK rates start at end of OFDM rates! */
+ for (rate_idx = 0; rate_idx < IL_CCK_RATES; rate_idx++) {
+ pwr_info =
+ &ch_info->power_info[rate_idx + IL_OFDM_RATES];
+ pwr_info->requested_power = power;
+ pwr_info->power_table_idx = pwr_idx;
+ pwr_info->base_power_idx = base_pwr_idx;
+ pwr_info->tpc.tx_gain = gain;
+ pwr_info->tpc.dsp_atten = dsp_atten;
+ }
+
+ /* set scan tx power, 1Mbit for CCK, 6Mbit for OFDM */
+ for (scan_tbl_idx = 0; scan_tbl_idx < IL_NUM_SCAN_RATES;
+ scan_tbl_idx++) {
+ s32 actual_idx =
+ (scan_tbl_idx ==
+ 0) ? RATE_1M_IDX_TBL : RATE_6M_IDX_TBL;
+ il3945_hw_reg_set_scan_power(il, scan_tbl_idx,
+ actual_idx, clip_pwrs,
+ ch_info, a_band);
+ }
+ }
+
+ return 0;
+}
+
+int
+il3945_hw_rxq_stop(struct il_priv *il)
+{
+ int rc;
+
+ il_wr(il, FH39_RCSR_CONFIG(0), 0);
+ rc = il_poll_bit(il, FH39_RSSR_STATUS,
+ FH39_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
+ if (rc < 0)
+ IL_ERR("Can't stop Rx DMA.\n");
+
+ return 0;
+}
+
+int
+il3945_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq)
+{
+ int txq_id = txq->q.id;
+
+ struct il3945_shared *shared_data = il->_3945.shared_virt;
+
+ shared_data->tx_base_ptr[txq_id] = cpu_to_le32((u32) txq->q.dma_addr);
+
+ il_wr(il, FH39_CBCC_CTRL(txq_id), 0);
+ il_wr(il, FH39_CBCC_BASE(txq_id), 0);
+
+ il_wr(il, FH39_TCSR_CONFIG(txq_id),
+ FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT |
+ FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF |
+ FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD |
+ FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL |
+ FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE);
+
+ /* fake read to flush all prev. writes */
+ _il_rd(il, FH39_TSSR_CBB_BASE);
+
+ return 0;
+}
+
+/*
+ * HCMD utils
+ */
+static u16
+il3945_get_hcmd_size(u8 cmd_id, u16 len)
+{
+ switch (cmd_id) {
+ case C_RXON:
+ return sizeof(struct il3945_rxon_cmd);
+ case C_POWER_TBL:
+ return sizeof(struct il3945_powertable_cmd);
+ default:
+ return len;
+ }
+}
+
+static u16
+il3945_build_addsta_hcmd(const struct il_addsta_cmd *cmd, u8 * data)
+{
+ struct il3945_addsta_cmd *addsta = (struct il3945_addsta_cmd *)data;
+ addsta->mode = cmd->mode;
+ memcpy(&addsta->sta, &cmd->sta, sizeof(struct sta_id_modify));
+ memcpy(&addsta->key, &cmd->key, sizeof(struct il4965_keyinfo));
+ addsta->station_flags = cmd->station_flags;
+ addsta->station_flags_msk = cmd->station_flags_msk;
+ addsta->tid_disable_tx = cpu_to_le16(0);
+ addsta->rate_n_flags = cmd->rate_n_flags;
+ addsta->add_immediate_ba_tid = cmd->add_immediate_ba_tid;
+ addsta->remove_immediate_ba_tid = cmd->remove_immediate_ba_tid;
+ addsta->add_immediate_ba_ssn = cmd->add_immediate_ba_ssn;
+
+ return (u16) sizeof(struct il3945_addsta_cmd);
+}
+
+static int
+il3945_add_bssid_station(struct il_priv *il, const u8 * addr, u8 * sta_id_r)
+{
+ struct il_rxon_context *ctx = &il->ctx;
+ int ret;
+ u8 sta_id;
+ unsigned long flags;
+
+ if (sta_id_r)
+ *sta_id_r = IL_INVALID_STATION;
+
+ ret = il_add_station_common(il, ctx, addr, 0, NULL, &sta_id);
+ if (ret) {
+ IL_ERR("Unable to add station %pM\n", addr);
+ return ret;
+ }
+
+ if (sta_id_r)
+ *sta_id_r = sta_id;
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+ il->stations[sta_id].used |= IL_STA_LOCAL;
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+ return 0;
+}
+
+static int
+il3945_manage_ibss_station(struct il_priv *il, struct ieee80211_vif *vif,
+ bool add)
+{
+ struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
+ int ret;
+
+ if (add) {
+ ret =
+ il3945_add_bssid_station(il, vif->bss_conf.bssid,
+ &vif_priv->ibss_bssid_sta_id);
+ if (ret)
+ return ret;
+
+ il3945_sync_sta(il, vif_priv->ibss_bssid_sta_id,
+ (il->band ==
+ IEEE80211_BAND_5GHZ) ? RATE_6M_PLCP :
+ RATE_1M_PLCP);
+ il3945_rate_scale_init(il->hw, vif_priv->ibss_bssid_sta_id);
+
+ return 0;
+ }
+
+ return il_remove_station(il, vif_priv->ibss_bssid_sta_id,
+ vif->bss_conf.bssid);
+}
+
+/**
+ * il3945_init_hw_rate_table - Initialize the hardware rate fallback table
+ */
+int
+il3945_init_hw_rate_table(struct il_priv *il)
+{
+ int rc, i, idx, prev_idx;
+ struct il3945_rate_scaling_cmd rate_cmd = {
+ .reserved = {0, 0, 0},
+ };
+ struct il3945_rate_scaling_info *table = rate_cmd.table;
+
+ for (i = 0; i < ARRAY_SIZE(il3945_rates); i++) {
+ idx = il3945_rates[i].table_rs_idx;
+
+ table[idx].rate_n_flags =
+ il3945_hw_set_rate_n_flags(il3945_rates[i].plcp, 0);
+ table[idx].try_cnt = il->retry_rate;
+ prev_idx = il3945_get_prev_ieee_rate(i);
+ table[idx].next_rate_idx = il3945_rates[prev_idx].table_rs_idx;
+ }
+
+ switch (il->band) {
+ case IEEE80211_BAND_5GHZ:
+ D_RATE("Select A mode rate scale\n");
+ /* If one of the following CCK rates is used,
+ * have it fall back to the 6M OFDM rate */
+ for (i = RATE_1M_IDX_TBL; i <= RATE_11M_IDX_TBL; i++)
+ table[i].next_rate_idx =
+ il3945_rates[IL_FIRST_OFDM_RATE].table_rs_idx;
+
+ /* Don't fall back to CCK rates */
+ table[RATE_12M_IDX_TBL].next_rate_idx = RATE_9M_IDX_TBL;
+
+ /* Don't drop out of OFDM rates */
+ table[RATE_6M_IDX_TBL].next_rate_idx =
+ il3945_rates[IL_FIRST_OFDM_RATE].table_rs_idx;
+ break;
+
+ case IEEE80211_BAND_2GHZ:
+ D_RATE("Select B/G mode rate scale\n");
+ /* If an OFDM rate is used, have it fall back to the
+ * 1M CCK rates */
+
+ if (!(il->_3945.sta_supp_rates & IL_OFDM_RATES_MASK) &&
+ il_is_associated(il)) {
+
+ idx = IL_FIRST_CCK_RATE;
+ for (i = RATE_6M_IDX_TBL; i <= RATE_54M_IDX_TBL; i++)
+ table[i].next_rate_idx =
+ il3945_rates[idx].table_rs_idx;
+
+ idx = RATE_11M_IDX_TBL;
+ /* CCK shouldn't fall back to OFDM... */
+ table[idx].next_rate_idx = RATE_5M_IDX_TBL;
+ }
+ break;
+
+ default:
+ WARN_ON(1);
+ break;
+ }
+
+ /* Update the rate scaling for control frame Tx */
+ rate_cmd.table_id = 0;
+ rc = il_send_cmd_pdu(il, C_RATE_SCALE, sizeof(rate_cmd), &rate_cmd);
+ if (rc)
+ return rc;
+
+ /* Update the rate scaling for data frame Tx */
+ rate_cmd.table_id = 1;
+ return il_send_cmd_pdu(il, C_RATE_SCALE, sizeof(rate_cmd), &rate_cmd);
+}
+
+/* Called when initializing driver */
+int
+il3945_hw_set_hw_params(struct il_priv *il)
+{
+ memset((void *)&il->hw_params, 0, sizeof(struct il_hw_params));
+
+ il->_3945.shared_virt =
+ dma_alloc_coherent(&il->pci_dev->dev, sizeof(struct il3945_shared),
+ &il->_3945.shared_phys, GFP_KERNEL);
+ if (!il->_3945.shared_virt) {
+ IL_ERR("failed to allocate pci memory\n");
+ return -ENOMEM;
+ }
+
+ /* Assign number of Usable TX queues */
+ il->hw_params.max_txq_num = il->cfg->base_params->num_of_queues;
+
+ il->hw_params.tfd_size = sizeof(struct il3945_tfd);
+ il->hw_params.rx_page_order = get_order(IL_RX_BUF_SIZE_3K);
+ il->hw_params.max_rxq_size = RX_QUEUE_SIZE;
+ il->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
+ il->hw_params.max_stations = IL3945_STATION_COUNT;
+ il->ctx.bcast_sta_id = IL3945_BROADCAST_ID;
+
+ il->sta_key_max_num = STA_KEY_MAX_NUM;
+
+ il->hw_params.rx_wrt_ptr_reg = FH39_RSCSR_CHNL0_WPTR;
+ il->hw_params.max_beacon_itrvl = IL39_MAX_UCODE_BEACON_INTERVAL;
+ il->hw_params.beacon_time_tsf_bits = IL3945_EXT_BEACON_TIME_POS;
+
+ return 0;
+}
+
+unsigned int
+il3945_hw_get_beacon_cmd(struct il_priv *il, struct il3945_frame *frame,
+ u8 rate)
+{
+ struct il3945_tx_beacon_cmd *tx_beacon_cmd;
+ unsigned int frame_size;
+
+ tx_beacon_cmd = (struct il3945_tx_beacon_cmd *)&frame->u;
+ memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
+
+ tx_beacon_cmd->tx.sta_id = il->ctx.bcast_sta_id;
+ tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
+
+ frame_size =
+ il3945_fill_beacon_frame(il, tx_beacon_cmd->frame,
+ sizeof(frame->u) - sizeof(*tx_beacon_cmd));
+
+ BUG_ON(frame_size > MAX_MPDU_SIZE);
+ tx_beacon_cmd->tx.len = cpu_to_le16((u16) frame_size);
+
+ tx_beacon_cmd->tx.rate = rate;
+ tx_beacon_cmd->tx.tx_flags =
+ (TX_CMD_FLG_SEQ_CTL_MSK | TX_CMD_FLG_TSF_MSK);
+
+ /* supp_rates[0] == OFDM start at IL_FIRST_OFDM_RATE */
+ tx_beacon_cmd->tx.supp_rates[0] =
+ (IL_OFDM_BASIC_RATES_MASK >> IL_FIRST_OFDM_RATE) & 0xFF;
+
+ tx_beacon_cmd->tx.supp_rates[1] = (IL_CCK_BASIC_RATES_MASK & 0xF);
+
+ return sizeof(struct il3945_tx_beacon_cmd) + frame_size;
+}
+
+void
+il3945_hw_handler_setup(struct il_priv *il)
+{
+ il->handlers[C_TX] = il3945_hdl_tx;
+ il->handlers[N_3945_RX] = il3945_hdl_rx;
+}
+
+void
+il3945_hw_setup_deferred_work(struct il_priv *il)
+{
+ INIT_DELAYED_WORK(&il->_3945.thermal_periodic,
+ il3945_bg_reg_txpower_periodic);
+}
+
+void
+il3945_hw_cancel_deferred_work(struct il_priv *il)
+{
+ cancel_delayed_work(&il->_3945.thermal_periodic);
+}
+
+/* check contents of special bootstrap uCode SRAM */
+static int
+il3945_verify_bsm(struct il_priv *il)
+{
+ __le32 *image = il->ucode_boot.v_addr;
+ u32 len = il->ucode_boot.len;
+ u32 reg;
+ u32 val;
+
+ D_INFO("Begin verify bsm\n");
+
+ /* verify BSM SRAM contents */
+ val = il_rd_prph(il, BSM_WR_DWCOUNT_REG);
+ for (reg = BSM_SRAM_LOWER_BOUND; reg < BSM_SRAM_LOWER_BOUND + len;
+ reg += sizeof(u32), image++) {
+ val = il_rd_prph(il, reg);
+ if (val != le32_to_cpu(*image)) {
+ IL_ERR("BSM uCode verification failed at "
+ "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n",
+ BSM_SRAM_LOWER_BOUND, reg - BSM_SRAM_LOWER_BOUND,
+ len, val, le32_to_cpu(*image));
+ return -EIO;
+ }
+ }
+
+ D_INFO("BSM bootstrap uCode image OK\n");
+
+ return 0;
+}
+
+/******************************************************************************
+ *
+ * EEPROM related functions
+ *
+ ******************************************************************************/
+
+/*
+ * Clear the OWNER_MSK, to establish driver (instead of uCode running on
+ * embedded controller) as EEPROM reader; each read is a series of pulses
+ * to/from the EEPROM chip, not a single event, so even reads could conflict
+ * if they weren't arbitrated by some ownership mechanism. Here, the driver
+ * simply claims ownership, which should be safe when this function is called
+ * (i.e. before loading uCode!).
+ */
+static int
+il3945_eeprom_acquire_semaphore(struct il_priv *il)
+{
+ _il_clear_bit(il, CSR_EEPROM_GP, CSR_EEPROM_GP_IF_OWNER_MSK);
+ return 0;
+}
+
+static void
+il3945_eeprom_release_semaphore(struct il_priv *il)
+{
+ return;
+}
+
+ /**
+ * il3945_load_bsm - Load bootstrap instructions
+ *
+ * BSM operation:
+ *
+ * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program
+ * in special SRAM that does not power down during RFKILL. When powering back
+ * up after power-saving sleeps (or during initial uCode load), the BSM loads
+ * the bootstrap program into the on-board processor, and starts it.
+ *
+ * The bootstrap program loads (via DMA) instructions and data for a new
+ * program from host DRAM locations indicated by the host driver in the
+ * BSM_DRAM_* registers. Once the new program is loaded, it starts
+ * automatically.
+ *
+ * When initializing the NIC, the host driver points the BSM to the
+ * "initialize" uCode image. This uCode sets up some internal data, then
+ * notifies host via "initialize alive" that it is complete.
+ *
+ * The host then replaces the BSM_DRAM_* pointer values to point to the
+ * normal runtime uCode instructions and a backup uCode data cache buffer
+ * (filled initially with starting data values for the on-board processor),
+ * then triggers the "initialize" uCode to load and launch the runtime uCode,
+ * which begins normal operation.
+ *
+ * When doing a power-save shutdown, runtime uCode saves data SRAM into
+ * the backup data cache in DRAM before SRAM is powered down.
+ *
+ * When powering back up, the BSM loads the bootstrap program. This reloads
+ * the runtime uCode instructions and the backup data cache into SRAM,
+ * and re-launches the runtime uCode from where it left off.
+ */
+static int
+il3945_load_bsm(struct il_priv *il)
+{
+ __le32 *image = il->ucode_boot.v_addr;
+ u32 len = il->ucode_boot.len;
+ dma_addr_t pinst;
+ dma_addr_t pdata;
+ u32 inst_len;
+ u32 data_len;
+ int rc;
+ int i;
+ u32 done;
+ u32 reg_offset;
+
+ D_INFO("Begin load bsm\n");
+
+ /* make sure bootstrap program is no larger than BSM's SRAM size */
+ if (len > IL39_MAX_BSM_SIZE)
+ return -EINVAL;
+
+ /* Tell bootstrap uCode where to find the "Initialize" uCode
+ * in host DRAM ... host DRAM physical address bits 31:0 for 3945.
+ * NOTE: il3945_initialize_alive_start() will replace these values,
+ * after the "initialize" uCode has run, to point to
+ * runtime/protocol instructions and backup data cache. */
+ pinst = il->ucode_init.p_addr;
+ pdata = il->ucode_init_data.p_addr;
+ inst_len = il->ucode_init.len;
+ data_len = il->ucode_init_data.len;
+
+ il_wr_prph(il, BSM_DRAM_INST_PTR_REG, pinst);
+ il_wr_prph(il, BSM_DRAM_DATA_PTR_REG, pdata);
+ il_wr_prph(il, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
+ il_wr_prph(il, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
+
+ /* Fill BSM memory with bootstrap instructions */
+ for (reg_offset = BSM_SRAM_LOWER_BOUND;
+ reg_offset < BSM_SRAM_LOWER_BOUND + len;
+ reg_offset += sizeof(u32), image++)
+ _il_wr_prph(il, reg_offset, le32_to_cpu(*image));
+
+ rc = il3945_verify_bsm(il);
+ if (rc)
+ return rc;
+
+ /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
+ il_wr_prph(il, BSM_WR_MEM_SRC_REG, 0x0);
+ il_wr_prph(il, BSM_WR_MEM_DST_REG, IL39_RTC_INST_LOWER_BOUND);
+ il_wr_prph(il, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
+
+ /* Load bootstrap code into instruction SRAM now,
+ * to prepare to load "initialize" uCode */
+ il_wr_prph(il, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START);
+
+ /* Wait for load of bootstrap uCode to finish */
+ for (i = 0; i < 100; i++) {
+ done = il_rd_prph(il, BSM_WR_CTRL_REG);
+ if (!(done & BSM_WR_CTRL_REG_BIT_START))
+ break;
+ udelay(10);
+ }
+ if (i < 100)
+ D_INFO("BSM write complete, poll %d iterations\n", i);
+ else {
+ IL_ERR("BSM write did not complete!\n");
+ return -EIO;
+ }
+
+ /* Enable future boot loads whenever power management unit triggers it
+ * (e.g. when powering back up after power-save shutdown) */
+ il_wr_prph(il, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN);
+
+ return 0;
+}
+
+static struct il_hcmd_ops il3945_hcmd = {
+ .rxon_assoc = il3945_send_rxon_assoc,
+ .commit_rxon = il3945_commit_rxon,
+};
+
+static struct il_lib_ops il3945_lib = {
+ .txq_attach_buf_to_tfd = il3945_hw_txq_attach_buf_to_tfd,
+ .txq_free_tfd = il3945_hw_txq_free_tfd,
+ .txq_init = il3945_hw_tx_queue_init,
+ .load_ucode = il3945_load_bsm,
+ .dump_nic_error_log = il3945_dump_nic_error_log,
+ .apm_ops = {
+ .init = il3945_apm_init,
+ .config = il3945_nic_config,
+ },
+ .eeprom_ops = {
+ .regulatory_bands = {
+ EEPROM_REGULATORY_BAND_1_CHANNELS,
+ EEPROM_REGULATORY_BAND_2_CHANNELS,
+ EEPROM_REGULATORY_BAND_3_CHANNELS,
+ EEPROM_REGULATORY_BAND_4_CHANNELS,
+ EEPROM_REGULATORY_BAND_5_CHANNELS,
+ EEPROM_REGULATORY_BAND_NO_HT40,
+ EEPROM_REGULATORY_BAND_NO_HT40,
+ },
+ .acquire_semaphore = il3945_eeprom_acquire_semaphore,
+ .release_semaphore = il3945_eeprom_release_semaphore,
+ },
+ .send_tx_power = il3945_send_tx_power,
+ .is_valid_rtc_data_addr = il3945_hw_valid_rtc_data_addr,
+
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+ .debugfs_ops = {
+ .rx_stats_read = il3945_ucode_rx_stats_read,
+ .tx_stats_read = il3945_ucode_tx_stats_read,
+ .general_stats_read = il3945_ucode_general_stats_read,
+ },
+#endif
+};
+
+static const struct il_legacy_ops il3945_legacy_ops = {
+ .post_associate = il3945_post_associate,
+ .config_ap = il3945_config_ap,
+ .manage_ibss_station = il3945_manage_ibss_station,
+};
+
+static struct il_hcmd_utils_ops il3945_hcmd_utils = {
+ .get_hcmd_size = il3945_get_hcmd_size,
+ .build_addsta_hcmd = il3945_build_addsta_hcmd,
+ .request_scan = il3945_request_scan,
+ .post_scan = il3945_post_scan,
+};
+
+static const struct il_ops il3945_ops = {
+ .lib = &il3945_lib,
+ .hcmd = &il3945_hcmd,
+ .utils = &il3945_hcmd_utils,
+ .led = &il3945_led_ops,
+ .legacy = &il3945_legacy_ops,
+ .ieee80211_ops = &il3945_hw_ops,
+};
+
+static struct il_base_params il3945_base_params = {
+ .eeprom_size = IL3945_EEPROM_IMG_SIZE,
+ .num_of_queues = IL39_NUM_QUEUES,
+ .pll_cfg_val = CSR39_ANA_PLL_CFG_VAL,
+ .set_l0s = false,
+ .use_bsm = true,
+ .led_compensation = 64,
+ .wd_timeout = IL_DEF_WD_TIMEOUT,
+};
+
+static struct il_cfg il3945_bg_cfg = {
+ .name = "3945BG",
+ .fw_name_pre = IL3945_FW_PRE,
+ .ucode_api_max = IL3945_UCODE_API_MAX,
+ .ucode_api_min = IL3945_UCODE_API_MIN,
+ .sku = IL_SKU_G,
+ .eeprom_ver = EEPROM_3945_EEPROM_VERSION,
+ .ops = &il3945_ops,
+ .mod_params = &il3945_mod_params,
+ .base_params = &il3945_base_params,
+ .led_mode = IL_LED_BLINK,
+};
+
+static struct il_cfg il3945_abg_cfg = {
+ .name = "3945ABG",
+ .fw_name_pre = IL3945_FW_PRE,
+ .ucode_api_max = IL3945_UCODE_API_MAX,
+ .ucode_api_min = IL3945_UCODE_API_MIN,
+ .sku = IL_SKU_A | IL_SKU_G,
+ .eeprom_ver = EEPROM_3945_EEPROM_VERSION,
+ .ops = &il3945_ops,
+ .mod_params = &il3945_mod_params,
+ .base_params = &il3945_base_params,
+ .led_mode = IL_LED_BLINK,
+};
+
+DEFINE_PCI_DEVICE_TABLE(il3945_hw_card_ids) = {
+ {IL_PCI_DEVICE(0x4222, 0x1005, il3945_bg_cfg)},
+ {IL_PCI_DEVICE(0x4222, 0x1034, il3945_bg_cfg)},
+ {IL_PCI_DEVICE(0x4222, 0x1044, il3945_bg_cfg)},
+ {IL_PCI_DEVICE(0x4227, 0x1014, il3945_bg_cfg)},
+ {IL_PCI_DEVICE(0x4222, PCI_ANY_ID, il3945_abg_cfg)},
+ {IL_PCI_DEVICE(0x4227, PCI_ANY_ID, il3945_abg_cfg)},
+ {0}
+};
+
+MODULE_DEVICE_TABLE(pci, il3945_hw_card_ids);
diff --git a/drivers/net/wireless/iwlegacy/3945.h b/drivers/net/wireless/iwlegacy/3945.h
new file mode 100644
index 00000000000..2b2895c544d
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/3945.h
@@ -0,0 +1,626 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#ifndef __il_3945_h__
+#define __il_3945_h__
+
+#include <linux/pci.h> /* for struct pci_device_id */
+#include <linux/kernel.h>
+#include <net/ieee80211_radiotap.h>
+
+/* Hardware specific file defines the PCI IDs table for that hardware module */
+extern const struct pci_device_id il3945_hw_card_ids[];
+
+#include "common.h"
+
+/* Highest firmware API version supported */
+#define IL3945_UCODE_API_MAX 2
+
+/* Lowest firmware API version supported */
+#define IL3945_UCODE_API_MIN 1
+
+#define IL3945_FW_PRE "iwlwifi-3945-"
+#define _IL3945_MODULE_FIRMWARE(api) IL3945_FW_PRE #api ".ucode"
+#define IL3945_MODULE_FIRMWARE(api) _IL3945_MODULE_FIRMWARE(api)
+
+/* Default noise level to report when noise measurement is not available.
+ * This may be because we're:
+ * 1) Not associated (4965, no beacon stats being sent to driver)
+ * 2) Scanning (noise measurement does not apply to associated channel)
+ * 3) Receiving CCK (3945 delivers noise info only for OFDM frames)
+ * Use default noise value of -127 ... this is below the range of measurable
+ * Rx dBm for either 3945 or 4965, so it can indicate "unmeasurable" to user.
+ * Also, -127 works better than 0 when averaging frames with/without
+ * noise info (e.g. averaging might be done in app); measured dBm values are
+ * always negative ... using a negative value as the default keeps all
+ * averages within an s8's (used in some apps) range of negative values. */
+#define IL_NOISE_MEAS_NOT_AVAILABLE (-127)
+
+/* Module parameters accessible from iwl-*.c */
+extern struct il_mod_params il3945_mod_params;
+
+struct il3945_rate_scale_data {
+ u64 data;
+ s32 success_counter;
+ s32 success_ratio;
+ s32 counter;
+ s32 average_tpt;
+ unsigned long stamp;
+};
+
+struct il3945_rs_sta {
+ spinlock_t lock;
+ struct il_priv *il;
+ s32 *expected_tpt;
+ unsigned long last_partial_flush;
+ unsigned long last_flush;
+ u32 flush_time;
+ u32 last_tx_packets;
+ u32 tx_packets;
+ u8 tgg;
+ u8 flush_pending;
+ u8 start_rate;
+ struct timer_list rate_scale_flush;
+ struct il3945_rate_scale_data win[RATE_COUNT_3945];
+#ifdef CONFIG_MAC80211_DEBUGFS
+ struct dentry *rs_sta_dbgfs_stats_table_file;
+#endif
+
+ /* used to be in sta_info */
+ int last_txrate_idx;
+};
+
+/*
+ * The common struct MUST be first because it is shared between
+ * 3945 and 4965!
+ */
+struct il3945_sta_priv {
+ struct il_station_priv_common common;
+ struct il3945_rs_sta rs_sta;
+};
+
+enum il3945_antenna {
+ IL_ANTENNA_DIVERSITY,
+ IL_ANTENNA_MAIN,
+ IL_ANTENNA_AUX
+};
+
+/*
+ * RTS threshold here is total size [2347] minus 4 FCS bytes
+ * Per spec:
+ * a value of 0 means RTS on all data/management packets
+ * a value > max MSDU size means no RTS
+ * else RTS for data/management frames where MPDU is larger
+ * than RTS value.
+ */
+#define DEFAULT_RTS_THRESHOLD 2347U
+#define MIN_RTS_THRESHOLD 0U
+#define MAX_RTS_THRESHOLD 2347U
+#define MAX_MSDU_SIZE 2304U
+#define MAX_MPDU_SIZE 2346U
+#define DEFAULT_BEACON_INTERVAL 100U
+#define DEFAULT_SHORT_RETRY_LIMIT 7U
+#define DEFAULT_LONG_RETRY_LIMIT 4U
+
+#define IL_TX_FIFO_AC0 0
+#define IL_TX_FIFO_AC1 1
+#define IL_TX_FIFO_AC2 2
+#define IL_TX_FIFO_AC3 3
+#define IL_TX_FIFO_HCCA_1 5
+#define IL_TX_FIFO_HCCA_2 6
+#define IL_TX_FIFO_NONE 7
+
+#define IEEE80211_DATA_LEN 2304
+#define IEEE80211_4ADDR_LEN 30
+#define IEEE80211_HLEN (IEEE80211_4ADDR_LEN)
+#define IEEE80211_FRAME_LEN (IEEE80211_DATA_LEN + IEEE80211_HLEN)
+
+struct il3945_frame {
+ union {
+ struct ieee80211_hdr frame;
+ struct il3945_tx_beacon_cmd beacon;
+ u8 raw[IEEE80211_FRAME_LEN];
+ u8 cmd[360];
+ } u;
+ struct list_head list;
+};
+
+#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
+#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
+#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
+
+#define SUP_RATE_11A_MAX_NUM_CHANNELS 8
+#define SUP_RATE_11B_MAX_NUM_CHANNELS 4
+#define SUP_RATE_11G_MAX_NUM_CHANNELS 12
+
+#define IL_SUPPORTED_RATES_IE_LEN 8
+
+#define SCAN_INTERVAL 100
+
+#define MAX_TID_COUNT 9
+
+#define IL_INVALID_RATE 0xFF
+#define IL_INVALID_VALUE -1
+
+#define STA_PS_STATUS_WAKE 0
+#define STA_PS_STATUS_SLEEP 1
+
+struct il3945_ibss_seq {
+ u8 mac[ETH_ALEN];
+ u16 seq_num;
+ u16 frag_num;
+ unsigned long packet_time;
+ struct list_head list;
+};
+
+#define IL_RX_HDR(x) ((struct il3945_rx_frame_hdr *)(\
+ x->u.rx_frame.stats.payload + \
+ x->u.rx_frame.stats.phy_count))
+#define IL_RX_END(x) ((struct il3945_rx_frame_end *)(\
+ IL_RX_HDR(x)->payload + \
+ le16_to_cpu(IL_RX_HDR(x)->len)))
+#define IL_RX_STATS(x) (&x->u.rx_frame.stats)
+#define IL_RX_DATA(x) (IL_RX_HDR(x)->payload)
+
+/******************************************************************************
+ *
+ * Functions implemented in iwl3945-base.c which are forward declared here
+ * for use by iwl-*.c
+ *
+ *****************************************************************************/
+extern int il3945_calc_db_from_ratio(int sig_ratio);
+extern void il3945_rx_replenish(void *data);
+extern void il3945_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq);
+extern unsigned int il3945_fill_beacon_frame(struct il_priv *il,
+ struct ieee80211_hdr *hdr,
+ int left);
+extern int il3945_dump_nic_event_log(struct il_priv *il, bool full_log,
+ char **buf, bool display);
+extern void il3945_dump_nic_error_log(struct il_priv *il);
+
+/******************************************************************************
+ *
+ * Functions implemented in iwl-[34]*.c which are forward declared here
+ * for use by iwl3945-base.c
+ *
+ * NOTE: The implementation of these functions are hardware specific
+ * which is why they are in the hardware specific files (vs. iwl-base.c)
+ *
+ * Naming convention --
+ * il3945_ <-- Its part of iwlwifi (should be changed to il3945_)
+ * il3945_hw_ <-- Hardware specific (implemented in iwl-XXXX.c by all HW)
+ * iwlXXXX_ <-- Hardware specific (implemented in iwl-XXXX.c for XXXX)
+ * il3945_bg_ <-- Called from work queue context
+ * il3945_mac_ <-- mac80211 callback
+ *
+ ****************************************************************************/
+extern void il3945_hw_handler_setup(struct il_priv *il);
+extern void il3945_hw_setup_deferred_work(struct il_priv *il);
+extern void il3945_hw_cancel_deferred_work(struct il_priv *il);
+extern int il3945_hw_rxq_stop(struct il_priv *il);
+extern int il3945_hw_set_hw_params(struct il_priv *il);
+extern int il3945_hw_nic_init(struct il_priv *il);
+extern int il3945_hw_nic_stop_master(struct il_priv *il);
+extern void il3945_hw_txq_ctx_free(struct il_priv *il);
+extern void il3945_hw_txq_ctx_stop(struct il_priv *il);
+extern int il3945_hw_nic_reset(struct il_priv *il);
+extern int il3945_hw_txq_attach_buf_to_tfd(struct il_priv *il,
+ struct il_tx_queue *txq,
+ dma_addr_t addr, u16 len, u8 reset,
+ u8 pad);
+extern void il3945_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq);
+extern int il3945_hw_get_temperature(struct il_priv *il);
+extern int il3945_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq);
+extern unsigned int il3945_hw_get_beacon_cmd(struct il_priv *il,
+ struct il3945_frame *frame,
+ u8 rate);
+void il3945_hw_build_tx_cmd_rate(struct il_priv *il, struct il_device_cmd *cmd,
+ struct ieee80211_tx_info *info,
+ struct ieee80211_hdr *hdr, int sta_id,
+ int tx_id);
+extern int il3945_hw_reg_send_txpower(struct il_priv *il);
+extern int il3945_hw_reg_set_txpower(struct il_priv *il, s8 power);
+extern void il3945_hdl_stats(struct il_priv *il, struct il_rx_buf *rxb);
+void il3945_hdl_c_stats(struct il_priv *il, struct il_rx_buf *rxb);
+extern void il3945_disable_events(struct il_priv *il);
+extern int il4965_get_temperature(const struct il_priv *il);
+extern void il3945_post_associate(struct il_priv *il);
+extern void il3945_config_ap(struct il_priv *il);
+
+extern int il3945_commit_rxon(struct il_priv *il, struct il_rxon_context *ctx);
+
+/**
+ * il3945_hw_find_station - Find station id for a given BSSID
+ * @bssid: MAC address of station ID to find
+ *
+ * NOTE: This should not be hardware specific but the code has
+ * not yet been merged into a single common layer for managing the
+ * station tables.
+ */
+extern u8 il3945_hw_find_station(struct il_priv *il, const u8 * bssid);
+
+extern struct ieee80211_ops il3945_hw_ops;
+
+extern __le32 il3945_get_antenna_flags(const struct il_priv *il);
+extern int il3945_init_hw_rate_table(struct il_priv *il);
+extern void il3945_reg_txpower_periodic(struct il_priv *il);
+extern int il3945_txpower_set_from_eeprom(struct il_priv *il);
+
+extern int il3945_rs_next_rate(struct il_priv *il, int rate);
+
+/* scanning */
+int il3945_request_scan(struct il_priv *il, struct ieee80211_vif *vif);
+void il3945_post_scan(struct il_priv *il);
+
+/* rates */
+extern const struct il3945_rate_info il3945_rates[RATE_COUNT_3945];
+
+/* RSSI to dBm */
+#define IL39_RSSI_OFFSET 95
+
+/*
+ * EEPROM related constants, enums, and structures.
+ */
+#define EEPROM_SKU_CAP_OP_MODE_MRC (1 << 7)
+
+/*
+ * Mapping of a Tx power level, at factory calibration temperature,
+ * to a radio/DSP gain table idx.
+ * One for each of 5 "sample" power levels in each band.
+ * v_det is measured at the factory, using the 3945's built-in power amplifier
+ * (PA) output voltage detector. This same detector is used during Tx of
+ * long packets in normal operation to provide feedback as to proper output
+ * level.
+ * Data copied from EEPROM.
+ * DO NOT ALTER THIS STRUCTURE!!!
+ */
+struct il3945_eeprom_txpower_sample {
+ u8 gain_idx; /* idx into power (gain) setup table ... */
+ s8 power; /* ... for this pwr level for this chnl group */
+ u16 v_det; /* PA output voltage */
+} __packed;
+
+/*
+ * Mappings of Tx power levels -> nominal radio/DSP gain table idxes.
+ * One for each channel group (a.k.a. "band") (1 for BG, 4 for A).
+ * Tx power setup code interpolates between the 5 "sample" power levels
+ * to determine the nominal setup for a requested power level.
+ * Data copied from EEPROM.
+ * DO NOT ALTER THIS STRUCTURE!!!
+ */
+struct il3945_eeprom_txpower_group {
+ struct il3945_eeprom_txpower_sample samples[5]; /* 5 power levels */
+ s32 a, b, c, d, e; /* coefficients for voltage->power
+ * formula (signed) */
+ s32 Fa, Fb, Fc, Fd, Fe; /* these modify coeffs based on
+ * frequency (signed) */
+ s8 saturation_power; /* highest power possible by h/w in this
+ * band */
+ u8 group_channel; /* "representative" channel # in this band */
+ s16 temperature; /* h/w temperature at factory calib this band
+ * (signed) */
+} __packed;
+
+/*
+ * Temperature-based Tx-power compensation data, not band-specific.
+ * These coefficients are use to modify a/b/c/d/e coeffs based on
+ * difference between current temperature and factory calib temperature.
+ * Data copied from EEPROM.
+ */
+struct il3945_eeprom_temperature_corr {
+ u32 Ta;
+ u32 Tb;
+ u32 Tc;
+ u32 Td;
+ u32 Te;
+} __packed;
+
+/*
+ * EEPROM map
+ */
+struct il3945_eeprom {
+ u8 reserved0[16];
+ u16 device_id; /* abs.ofs: 16 */
+ u8 reserved1[2];
+ u16 pmc; /* abs.ofs: 20 */
+ u8 reserved2[20];
+ u8 mac_address[6]; /* abs.ofs: 42 */
+ u8 reserved3[58];
+ u16 board_revision; /* abs.ofs: 106 */
+ u8 reserved4[11];
+ u8 board_pba_number[9]; /* abs.ofs: 119 */
+ u8 reserved5[8];
+ u16 version; /* abs.ofs: 136 */
+ u8 sku_cap; /* abs.ofs: 138 */
+ u8 leds_mode; /* abs.ofs: 139 */
+ u16 oem_mode;
+ u16 wowlan_mode; /* abs.ofs: 142 */
+ u16 leds_time_interval; /* abs.ofs: 144 */
+ u8 leds_off_time; /* abs.ofs: 146 */
+ u8 leds_on_time; /* abs.ofs: 147 */
+ u8 almgor_m_version; /* abs.ofs: 148 */
+ u8 antenna_switch_type; /* abs.ofs: 149 */
+ u8 reserved6[42];
+ u8 sku_id[4]; /* abs.ofs: 192 */
+
+/*
+ * Per-channel regulatory data.
+ *
+ * Each channel that *might* be supported by 3945 has a fixed location
+ * in EEPROM containing EEPROM_CHANNEL_* usage flags (LSB) and max regulatory
+ * txpower (MSB).
+ *
+ * Entries immediately below are for 20 MHz channel width.
+ *
+ * 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
+ */
+ u16 band_1_count; /* abs.ofs: 196 */
+ struct il_eeprom_channel band_1_channels[14]; /* abs.ofs: 198 */
+
+/*
+ * 4.9 GHz channels 183, 184, 185, 187, 188, 189, 192, 196,
+ * 5.0 GHz channels 7, 8, 11, 12, 16
+ * (4915-5080MHz) (none of these is ever supported)
+ */
+ u16 band_2_count; /* abs.ofs: 226 */
+ struct il_eeprom_channel band_2_channels[13]; /* abs.ofs: 228 */
+
+/*
+ * 5.2 GHz channels 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
+ * (5170-5320MHz)
+ */
+ u16 band_3_count; /* abs.ofs: 254 */
+ struct il_eeprom_channel band_3_channels[12]; /* abs.ofs: 256 */
+
+/*
+ * 5.5 GHz channels 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
+ * (5500-5700MHz)
+ */
+ u16 band_4_count; /* abs.ofs: 280 */
+ struct il_eeprom_channel band_4_channels[11]; /* abs.ofs: 282 */
+
+/*
+ * 5.7 GHz channels 145, 149, 153, 157, 161, 165
+ * (5725-5825MHz)
+ */
+ u16 band_5_count; /* abs.ofs: 304 */
+ struct il_eeprom_channel band_5_channels[6]; /* abs.ofs: 306 */
+
+ u8 reserved9[194];
+
+/*
+ * 3945 Txpower calibration data.
+ */
+#define IL_NUM_TX_CALIB_GROUPS 5
+ struct il3945_eeprom_txpower_group groups[IL_NUM_TX_CALIB_GROUPS];
+/* abs.ofs: 512 */
+ struct il3945_eeprom_temperature_corr corrections; /* abs.ofs: 832 */
+ u8 reserved16[172]; /* fill out to full 1024 byte block */
+} __packed;
+
+#define IL3945_EEPROM_IMG_SIZE 1024
+
+/* End of EEPROM */
+
+#define PCI_CFG_REV_ID_BIT_BASIC_SKU (0x40) /* bit 6 */
+#define PCI_CFG_REV_ID_BIT_RTP (0x80) /* bit 7 */
+
+/* 4 DATA + 1 CMD. There are 2 HCCA queues that are not used. */
+#define IL39_NUM_QUEUES 5
+#define IL39_CMD_QUEUE_NUM 4
+
+#define IL_DEFAULT_TX_RETRY 15
+
+/*********************************************/
+
+#define RFD_SIZE 4
+#define NUM_TFD_CHUNKS 4
+
+#define TFD_CTL_COUNT_SET(n) (n << 24)
+#define TFD_CTL_COUNT_GET(ctl) ((ctl >> 24) & 7)
+#define TFD_CTL_PAD_SET(n) (n << 28)
+#define TFD_CTL_PAD_GET(ctl) (ctl >> 28)
+
+/* Sizes and addresses for instruction and data memory (SRAM) in
+ * 3945's embedded processor. Driver access is via HBUS_TARG_MEM_* regs. */
+#define IL39_RTC_INST_LOWER_BOUND (0x000000)
+#define IL39_RTC_INST_UPPER_BOUND (0x014000)
+
+#define IL39_RTC_DATA_LOWER_BOUND (0x800000)
+#define IL39_RTC_DATA_UPPER_BOUND (0x808000)
+
+#define IL39_RTC_INST_SIZE (IL39_RTC_INST_UPPER_BOUND - \
+ IL39_RTC_INST_LOWER_BOUND)
+#define IL39_RTC_DATA_SIZE (IL39_RTC_DATA_UPPER_BOUND - \
+ IL39_RTC_DATA_LOWER_BOUND)
+
+#define IL39_MAX_INST_SIZE IL39_RTC_INST_SIZE
+#define IL39_MAX_DATA_SIZE IL39_RTC_DATA_SIZE
+
+/* Size of uCode instruction memory in bootstrap state machine */
+#define IL39_MAX_BSM_SIZE IL39_RTC_INST_SIZE
+
+static inline int
+il3945_hw_valid_rtc_data_addr(u32 addr)
+{
+ return (addr >= IL39_RTC_DATA_LOWER_BOUND &&
+ addr < IL39_RTC_DATA_UPPER_BOUND);
+}
+
+/* Base physical address of il3945_shared is provided to FH39_TSSR_CBB_BASE
+ * and &il3945_shared.rx_read_ptr[0] is provided to FH39_RCSR_RPTR_ADDR(0) */
+struct il3945_shared {
+ __le32 tx_base_ptr[8];
+} __packed;
+
+static inline u8
+il3945_hw_get_rate(__le16 rate_n_flags)
+{
+ return le16_to_cpu(rate_n_flags) & 0xFF;
+}
+
+static inline u16
+il3945_hw_get_rate_n_flags(__le16 rate_n_flags)
+{
+ return le16_to_cpu(rate_n_flags);
+}
+
+static inline __le16
+il3945_hw_set_rate_n_flags(u8 rate, u16 flags)
+{
+ return cpu_to_le16((u16) rate | flags);
+}
+
+/************************************/
+/* iwl3945 Flow Handler Definitions */
+/************************************/
+
+/**
+ * This I/O area is directly read/writable by driver (e.g. Linux uses writel())
+ * Addresses are offsets from device's PCI hardware base address.
+ */
+#define FH39_MEM_LOWER_BOUND (0x0800)
+#define FH39_MEM_UPPER_BOUND (0x1000)
+
+#define FH39_CBCC_TBL (FH39_MEM_LOWER_BOUND + 0x140)
+#define FH39_TFDB_TBL (FH39_MEM_LOWER_BOUND + 0x180)
+#define FH39_RCSR_TBL (FH39_MEM_LOWER_BOUND + 0x400)
+#define FH39_RSSR_TBL (FH39_MEM_LOWER_BOUND + 0x4c0)
+#define FH39_TCSR_TBL (FH39_MEM_LOWER_BOUND + 0x500)
+#define FH39_TSSR_TBL (FH39_MEM_LOWER_BOUND + 0x680)
+
+/* TFDB (Transmit Frame Buffer Descriptor) */
+#define FH39_TFDB(_ch, buf) (FH39_TFDB_TBL + \
+ ((_ch) * 2 + (buf)) * 0x28)
+#define FH39_TFDB_CHNL_BUF_CTRL_REG(_ch) (FH39_TFDB_TBL + 0x50 * (_ch))
+
+/* CBCC channel is [0,2] */
+#define FH39_CBCC(_ch) (FH39_CBCC_TBL + (_ch) * 0x8)
+#define FH39_CBCC_CTRL(_ch) (FH39_CBCC(_ch) + 0x00)
+#define FH39_CBCC_BASE(_ch) (FH39_CBCC(_ch) + 0x04)
+
+/* RCSR channel is [0,2] */
+#define FH39_RCSR(_ch) (FH39_RCSR_TBL + (_ch) * 0x40)
+#define FH39_RCSR_CONFIG(_ch) (FH39_RCSR(_ch) + 0x00)
+#define FH39_RCSR_RBD_BASE(_ch) (FH39_RCSR(_ch) + 0x04)
+#define FH39_RCSR_WPTR(_ch) (FH39_RCSR(_ch) + 0x20)
+#define FH39_RCSR_RPTR_ADDR(_ch) (FH39_RCSR(_ch) + 0x24)
+
+#define FH39_RSCSR_CHNL0_WPTR (FH39_RCSR_WPTR(0))
+
+/* RSSR */
+#define FH39_RSSR_CTRL (FH39_RSSR_TBL + 0x000)
+#define FH39_RSSR_STATUS (FH39_RSSR_TBL + 0x004)
+
+/* TCSR */
+#define FH39_TCSR(_ch) (FH39_TCSR_TBL + (_ch) * 0x20)
+#define FH39_TCSR_CONFIG(_ch) (FH39_TCSR(_ch) + 0x00)
+#define FH39_TCSR_CREDIT(_ch) (FH39_TCSR(_ch) + 0x04)
+#define FH39_TCSR_BUFF_STTS(_ch) (FH39_TCSR(_ch) + 0x08)
+
+/* TSSR */
+#define FH39_TSSR_CBB_BASE (FH39_TSSR_TBL + 0x000)
+#define FH39_TSSR_MSG_CONFIG (FH39_TSSR_TBL + 0x008)
+#define FH39_TSSR_TX_STATUS (FH39_TSSR_TBL + 0x010)
+
+/* DBM */
+
+#define FH39_SRVC_CHNL (6)
+
+#define FH39_RCSR_RX_CONFIG_REG_POS_RBDC_SIZE (20)
+#define FH39_RCSR_RX_CONFIG_REG_POS_IRQ_RBTH (4)
+
+#define FH39_RCSR_RX_CONFIG_REG_BIT_WR_STTS_EN (0x08000000)
+
+#define FH39_RCSR_RX_CONFIG_REG_VAL_DMA_CHNL_EN_ENABLE (0x80000000)
+
+#define FH39_RCSR_RX_CONFIG_REG_VAL_RDRBD_EN_ENABLE (0x20000000)
+
+#define FH39_RCSR_RX_CONFIG_REG_VAL_MAX_FRAG_SIZE_128 (0x01000000)
+
+#define FH39_RCSR_RX_CONFIG_REG_VAL_IRQ_DEST_INT_HOST (0x00001000)
+
+#define FH39_RCSR_RX_CONFIG_REG_VAL_MSG_MODE_FH (0x00000000)
+
+#define FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF (0x00000000)
+#define FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_DRIVER (0x00000001)
+
+#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE_VAL (0x00000000)
+#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL (0x00000008)
+
+#define FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD (0x00200000)
+
+#define FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT (0x00000000)
+
+#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE (0x00000000)
+#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE (0x80000000)
+
+#define FH39_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID (0x00004000)
+
+#define FH39_TCSR_CHNL_TX_BUF_STS_REG_BIT_TFDB_WPTR (0x00000001)
+
+#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON (0xFF000000)
+#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_TXPD_ON (0x00FF0000)
+
+#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_128B (0x00000400)
+
+#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TFD_ON (0x00000100)
+#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_CBB_ON (0x00000080)
+
+#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RSP_WAIT_TH (0x00000020)
+#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_RSP_WAIT_TH (0x00000005)
+
+#define FH39_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_ch) (BIT(_ch) << 24)
+#define FH39_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_ch) (BIT(_ch) << 16)
+
+#define FH39_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_ch) \
+ (FH39_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_ch) | \
+ FH39_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_ch))
+
+#define FH39_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (0x01000000)
+
+struct il3945_tfd_tb {
+ __le32 addr;
+ __le32 len;
+} __packed;
+
+struct il3945_tfd {
+ __le32 control_flags;
+ struct il3945_tfd_tb tbs[4];
+ u8 __pad[28];
+} __packed;
+
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+ssize_t il3945_ucode_rx_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos);
+ssize_t il3945_ucode_tx_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos);
+ssize_t il3945_ucode_general_stats_read(struct file *file,
+ char __user *user_buf, size_t count,
+ loff_t *ppos);
+#endif
+
+#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-calib.c b/drivers/net/wireless/iwlegacy/4965-calib.c
index 162d877e686..d3248e3ef23 100644
--- a/drivers/net/wireless/iwlegacy/iwl-4965-calib.c
+++ b/drivers/net/wireless/iwlegacy/4965-calib.c
@@ -63,15 +63,14 @@
#include <linux/slab.h>
#include <net/mac80211.h>
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-4965-calib.h"
+#include "common.h"
+#include "4965.h"
/*****************************************************************************
* INIT calibrations framework
*****************************************************************************/
-struct statistics_general_data {
+struct stats_general_data {
u32 beacon_silence_rssi_a;
u32 beacon_silence_rssi_b;
u32 beacon_silence_rssi_c;
@@ -80,14 +79,15 @@ struct statistics_general_data {
u32 beacon_energy_c;
};
-void iwl4965_calib_free_results(struct iwl_priv *priv)
+void
+il4965_calib_free_results(struct il_priv *il)
{
int i;
- for (i = 0; i < IWL_CALIB_MAX; i++) {
- kfree(priv->calib_results[i].buf);
- priv->calib_results[i].buf = NULL;
- priv->calib_results[i].buf_len = 0;
+ for (i = 0; i < IL_CALIB_MAX; i++) {
+ kfree(il->calib_results[i].buf);
+ il->calib_results[i].buf = NULL;
+ il->calib_results[i].buf_len = 0;
}
}
@@ -103,10 +103,9 @@ void iwl4965_calib_free_results(struct iwl_priv *priv)
* enough to receive all of our own network traffic, but not so
* high that our DSP gets too busy trying to lock onto non-network
* activity/noise. */
-static int iwl4965_sens_energy_cck(struct iwl_priv *priv,
- u32 norm_fa,
- u32 rx_enable_time,
- struct statistics_general_data *rx_info)
+static int
+il4965_sens_energy_cck(struct il_priv *il, u32 norm_fa, u32 rx_enable_time,
+ struct stats_general_data *rx_info)
{
u32 max_nrg_cck = 0;
int i = 0;
@@ -129,22 +128,22 @@ static int iwl4965_sens_energy_cck(struct iwl_priv *priv,
u32 false_alarms = norm_fa * 200 * 1024;
u32 max_false_alarms = MAX_FA_CCK * rx_enable_time;
u32 min_false_alarms = MIN_FA_CCK * rx_enable_time;
- struct iwl_sensitivity_data *data = NULL;
- const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens;
+ struct il_sensitivity_data *data = NULL;
+ const struct il_sensitivity_ranges *ranges = il->hw_params.sens;
- data = &(priv->sensitivity_data);
+ data = &(il->sensitivity_data);
data->nrg_auto_corr_silence_diff = 0;
/* Find max silence rssi among all 3 receivers.
* This is background noise, which may include transmissions from other
* networks, measured during silence before our network's beacon */
- silence_rssi_a = (u8)((rx_info->beacon_silence_rssi_a &
- ALL_BAND_FILTER) >> 8);
- silence_rssi_b = (u8)((rx_info->beacon_silence_rssi_b &
- ALL_BAND_FILTER) >> 8);
- silence_rssi_c = (u8)((rx_info->beacon_silence_rssi_c &
- ALL_BAND_FILTER) >> 8);
+ silence_rssi_a =
+ (u8) ((rx_info->beacon_silence_rssi_a & ALL_BAND_FILTER) >> 8);
+ silence_rssi_b =
+ (u8) ((rx_info->beacon_silence_rssi_b & ALL_BAND_FILTER) >> 8);
+ silence_rssi_c =
+ (u8) ((rx_info->beacon_silence_rssi_c & ALL_BAND_FILTER) >> 8);
val = max(silence_rssi_b, silence_rssi_c);
max_silence_rssi = max(silence_rssi_a, (u8) val);
@@ -160,9 +159,8 @@ static int iwl4965_sens_energy_cck(struct iwl_priv *priv,
val = data->nrg_silence_rssi[i];
silence_ref = max(silence_ref, val);
}
- IWL_DEBUG_CALIB(priv, "silence a %u, b %u, c %u, 20-bcn max %u\n",
- silence_rssi_a, silence_rssi_b, silence_rssi_c,
- silence_ref);
+ D_CALIB("silence a %u, b %u, c %u, 20-bcn max %u\n", silence_rssi_a,
+ silence_rssi_b, silence_rssi_c, silence_ref);
/* Find max rx energy (min value!) among all 3 receivers,
* measured during beacon frame.
@@ -184,9 +182,9 @@ static int iwl4965_sens_energy_cck(struct iwl_priv *priv,
max_nrg_cck = (u32) max(max_nrg_cck, (data->nrg_value[i]));
max_nrg_cck += 6;
- IWL_DEBUG_CALIB(priv, "rx energy a %u, b %u, c %u, 10-bcn max/min %u\n",
- rx_info->beacon_energy_a, rx_info->beacon_energy_b,
- rx_info->beacon_energy_c, max_nrg_cck - 6);
+ D_CALIB("rx energy a %u, b %u, c %u, 10-bcn max/min %u\n",
+ rx_info->beacon_energy_a, rx_info->beacon_energy_b,
+ rx_info->beacon_energy_c, max_nrg_cck - 6);
/* Count number of consecutive beacons with fewer-than-desired
* false alarms. */
@@ -194,35 +192,34 @@ static int iwl4965_sens_energy_cck(struct iwl_priv *priv,
data->num_in_cck_no_fa++;
else
data->num_in_cck_no_fa = 0;
- IWL_DEBUG_CALIB(priv, "consecutive bcns with few false alarms = %u\n",
- data->num_in_cck_no_fa);
+ D_CALIB("consecutive bcns with few false alarms = %u\n",
+ data->num_in_cck_no_fa);
/* If we got too many false alarms this time, reduce sensitivity */
- if ((false_alarms > max_false_alarms) &&
- (data->auto_corr_cck > AUTO_CORR_MAX_TH_CCK)) {
- IWL_DEBUG_CALIB(priv, "norm FA %u > max FA %u\n",
- false_alarms, max_false_alarms);
- IWL_DEBUG_CALIB(priv, "... reducing sensitivity\n");
- data->nrg_curr_state = IWL_FA_TOO_MANY;
+ if (false_alarms > max_false_alarms &&
+ data->auto_corr_cck > AUTO_CORR_MAX_TH_CCK) {
+ D_CALIB("norm FA %u > max FA %u\n", false_alarms,
+ max_false_alarms);
+ D_CALIB("... reducing sensitivity\n");
+ data->nrg_curr_state = IL_FA_TOO_MANY;
/* Store for "fewer than desired" on later beacon */
data->nrg_silence_ref = silence_ref;
/* increase energy threshold (reduce nrg value)
* to decrease sensitivity */
data->nrg_th_cck = data->nrg_th_cck - NRG_STEP_CCK;
- /* Else if we got fewer than desired, increase sensitivity */
+ /* Else if we got fewer than desired, increase sensitivity */
} else if (false_alarms < min_false_alarms) {
- data->nrg_curr_state = IWL_FA_TOO_FEW;
+ data->nrg_curr_state = IL_FA_TOO_FEW;
/* Compare silence level with silence level for most recent
* healthy number or too many false alarms */
- data->nrg_auto_corr_silence_diff = (s32)data->nrg_silence_ref -
- (s32)silence_ref;
+ data->nrg_auto_corr_silence_diff =
+ (s32) data->nrg_silence_ref - (s32) silence_ref;
- IWL_DEBUG_CALIB(priv,
- "norm FA %u < min FA %u, silence diff %d\n",
- false_alarms, min_false_alarms,
- data->nrg_auto_corr_silence_diff);
+ D_CALIB("norm FA %u < min FA %u, silence diff %d\n",
+ false_alarms, min_false_alarms,
+ data->nrg_auto_corr_silence_diff);
/* Increase value to increase sensitivity, but only if:
* 1a) previous beacon did *not* have *too many* false alarms
@@ -230,23 +227,22 @@ static int iwl4965_sens_energy_cck(struct iwl_priv *priv,
* from a previous beacon with too many, or healthy # FAs
* OR 2) We've seen a lot of beacons (100) with too few
* false alarms */
- if ((data->nrg_prev_state != IWL_FA_TOO_MANY) &&
- ((data->nrg_auto_corr_silence_diff > NRG_DIFF) ||
- (data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA))) {
+ if (data->nrg_prev_state != IL_FA_TOO_MANY &&
+ (data->nrg_auto_corr_silence_diff > NRG_DIFF ||
+ data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA)) {
- IWL_DEBUG_CALIB(priv, "... increasing sensitivity\n");
+ D_CALIB("... increasing sensitivity\n");
/* Increase nrg value to increase sensitivity */
val = data->nrg_th_cck + NRG_STEP_CCK;
- data->nrg_th_cck = min((u32)ranges->min_nrg_cck, val);
+ data->nrg_th_cck = min((u32) ranges->min_nrg_cck, val);
} else {
- IWL_DEBUG_CALIB(priv,
- "... but not changing sensitivity\n");
+ D_CALIB("... but not changing sensitivity\n");
}
- /* Else we got a healthy number of false alarms, keep status quo */
+ /* Else we got a healthy number of false alarms, keep status quo */
} else {
- IWL_DEBUG_CALIB(priv, " FA in safe zone\n");
- data->nrg_curr_state = IWL_FA_GOOD_RANGE;
+ D_CALIB(" FA in safe zone\n");
+ data->nrg_curr_state = IL_FA_GOOD_RANGE;
/* Store for use in "fewer than desired" with later beacon */
data->nrg_silence_ref = silence_ref;
@@ -254,8 +250,8 @@ static int iwl4965_sens_energy_cck(struct iwl_priv *priv,
/* If previous beacon had too many false alarms,
* give it some extra margin by reducing sensitivity again
* (but don't go below measured energy of desired Rx) */
- if (IWL_FA_TOO_MANY == data->nrg_prev_state) {
- IWL_DEBUG_CALIB(priv, "... increasing margin\n");
+ if (IL_FA_TOO_MANY == data->nrg_prev_state) {
+ D_CALIB("... increasing margin\n");
if (data->nrg_th_cck > (max_nrg_cck + NRG_MARGIN))
data->nrg_th_cck -= NRG_MARGIN;
else
@@ -269,7 +265,7 @@ static int iwl4965_sens_energy_cck(struct iwl_priv *priv,
* Lower value is higher energy, so we use max()!
*/
data->nrg_th_cck = max(max_nrg_cck, data->nrg_th_cck);
- IWL_DEBUG_CALIB(priv, "new nrg_th_cck %u\n", data->nrg_th_cck);
+ D_CALIB("new nrg_th_cck %u\n", data->nrg_th_cck);
data->nrg_prev_state = data->nrg_curr_state;
@@ -284,190 +280,187 @@ static int iwl4965_sens_energy_cck(struct iwl_priv *priv,
else {
val = data->auto_corr_cck + AUTO_CORR_STEP_CCK;
data->auto_corr_cck =
- min((u32)ranges->auto_corr_max_cck, val);
+ min((u32) ranges->auto_corr_max_cck, val);
}
val = data->auto_corr_cck_mrc + AUTO_CORR_STEP_CCK;
data->auto_corr_cck_mrc =
- min((u32)ranges->auto_corr_max_cck_mrc, val);
- } else if ((false_alarms < min_false_alarms) &&
- ((data->nrg_auto_corr_silence_diff > NRG_DIFF) ||
- (data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA))) {
+ min((u32) ranges->auto_corr_max_cck_mrc, val);
+ } else if (false_alarms < min_false_alarms &&
+ (data->nrg_auto_corr_silence_diff > NRG_DIFF ||
+ data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA)) {
/* Decrease auto_corr values to increase sensitivity */
val = data->auto_corr_cck - AUTO_CORR_STEP_CCK;
- data->auto_corr_cck =
- max((u32)ranges->auto_corr_min_cck, val);
+ data->auto_corr_cck = max((u32) ranges->auto_corr_min_cck, val);
val = data->auto_corr_cck_mrc - AUTO_CORR_STEP_CCK;
data->auto_corr_cck_mrc =
- max((u32)ranges->auto_corr_min_cck_mrc, val);
+ max((u32) ranges->auto_corr_min_cck_mrc, val);
}
return 0;
}
-
-static int iwl4965_sens_auto_corr_ofdm(struct iwl_priv *priv,
- u32 norm_fa,
- u32 rx_enable_time)
+static int
+il4965_sens_auto_corr_ofdm(struct il_priv *il, u32 norm_fa, u32 rx_enable_time)
{
u32 val;
u32 false_alarms = norm_fa * 200 * 1024;
u32 max_false_alarms = MAX_FA_OFDM * rx_enable_time;
u32 min_false_alarms = MIN_FA_OFDM * rx_enable_time;
- struct iwl_sensitivity_data *data = NULL;
- const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens;
+ struct il_sensitivity_data *data = NULL;
+ const struct il_sensitivity_ranges *ranges = il->hw_params.sens;
- data = &(priv->sensitivity_data);
+ data = &(il->sensitivity_data);
/* If we got too many false alarms this time, reduce sensitivity */
if (false_alarms > max_false_alarms) {
- IWL_DEBUG_CALIB(priv, "norm FA %u > max FA %u)\n",
- false_alarms, max_false_alarms);
+ D_CALIB("norm FA %u > max FA %u)\n", false_alarms,
+ max_false_alarms);
val = data->auto_corr_ofdm + AUTO_CORR_STEP_OFDM;
data->auto_corr_ofdm =
- min((u32)ranges->auto_corr_max_ofdm, val);
+ min((u32) ranges->auto_corr_max_ofdm, val);
val = data->auto_corr_ofdm_mrc + AUTO_CORR_STEP_OFDM;
data->auto_corr_ofdm_mrc =
- min((u32)ranges->auto_corr_max_ofdm_mrc, val);
+ min((u32) ranges->auto_corr_max_ofdm_mrc, val);
val = data->auto_corr_ofdm_x1 + AUTO_CORR_STEP_OFDM;
data->auto_corr_ofdm_x1 =
- min((u32)ranges->auto_corr_max_ofdm_x1, val);
+ min((u32) ranges->auto_corr_max_ofdm_x1, val);
val = data->auto_corr_ofdm_mrc_x1 + AUTO_CORR_STEP_OFDM;
data->auto_corr_ofdm_mrc_x1 =
- min((u32)ranges->auto_corr_max_ofdm_mrc_x1, val);
+ min((u32) ranges->auto_corr_max_ofdm_mrc_x1, val);
}
/* Else if we got fewer than desired, increase sensitivity */
else if (false_alarms < min_false_alarms) {
- IWL_DEBUG_CALIB(priv, "norm FA %u < min FA %u\n",
- false_alarms, min_false_alarms);
+ D_CALIB("norm FA %u < min FA %u\n", false_alarms,
+ min_false_alarms);
val = data->auto_corr_ofdm - AUTO_CORR_STEP_OFDM;
data->auto_corr_ofdm =
- max((u32)ranges->auto_corr_min_ofdm, val);
+ max((u32) ranges->auto_corr_min_ofdm, val);
val = data->auto_corr_ofdm_mrc - AUTO_CORR_STEP_OFDM;
data->auto_corr_ofdm_mrc =
- max((u32)ranges->auto_corr_min_ofdm_mrc, val);
+ max((u32) ranges->auto_corr_min_ofdm_mrc, val);
val = data->auto_corr_ofdm_x1 - AUTO_CORR_STEP_OFDM;
data->auto_corr_ofdm_x1 =
- max((u32)ranges->auto_corr_min_ofdm_x1, val);
+ max((u32) ranges->auto_corr_min_ofdm_x1, val);
val = data->auto_corr_ofdm_mrc_x1 - AUTO_CORR_STEP_OFDM;
data->auto_corr_ofdm_mrc_x1 =
- max((u32)ranges->auto_corr_min_ofdm_mrc_x1, val);
+ max((u32) ranges->auto_corr_min_ofdm_mrc_x1, val);
} else {
- IWL_DEBUG_CALIB(priv, "min FA %u < norm FA %u < max FA %u OK\n",
- min_false_alarms, false_alarms, max_false_alarms);
+ D_CALIB("min FA %u < norm FA %u < max FA %u OK\n",
+ min_false_alarms, false_alarms, max_false_alarms);
}
return 0;
}
-static void iwl4965_prepare_legacy_sensitivity_tbl(struct iwl_priv *priv,
- struct iwl_sensitivity_data *data,
- __le16 *tbl)
+static void
+il4965_prepare_legacy_sensitivity_tbl(struct il_priv *il,
+ struct il_sensitivity_data *data,
+ __le16 *tbl)
{
- tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX] =
- cpu_to_le16((u16)data->auto_corr_ofdm);
- tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX] =
- cpu_to_le16((u16)data->auto_corr_ofdm_mrc);
- tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX] =
- cpu_to_le16((u16)data->auto_corr_ofdm_x1);
- tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX] =
- cpu_to_le16((u16)data->auto_corr_ofdm_mrc_x1);
-
- tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX] =
- cpu_to_le16((u16)data->auto_corr_cck);
- tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX] =
- cpu_to_le16((u16)data->auto_corr_cck_mrc);
-
- tbl[HD_MIN_ENERGY_CCK_DET_INDEX] =
- cpu_to_le16((u16)data->nrg_th_cck);
- tbl[HD_MIN_ENERGY_OFDM_DET_INDEX] =
- cpu_to_le16((u16)data->nrg_th_ofdm);
-
- tbl[HD_BARKER_CORR_TH_ADD_MIN_INDEX] =
- cpu_to_le16(data->barker_corr_th_min);
- tbl[HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX] =
- cpu_to_le16(data->barker_corr_th_min_mrc);
- tbl[HD_OFDM_ENERGY_TH_IN_INDEX] =
- cpu_to_le16(data->nrg_th_cca);
-
- IWL_DEBUG_CALIB(priv, "ofdm: ac %u mrc %u x1 %u mrc_x1 %u thresh %u\n",
- data->auto_corr_ofdm, data->auto_corr_ofdm_mrc,
- data->auto_corr_ofdm_x1, data->auto_corr_ofdm_mrc_x1,
- data->nrg_th_ofdm);
-
- IWL_DEBUG_CALIB(priv, "cck: ac %u mrc %u thresh %u\n",
- data->auto_corr_cck, data->auto_corr_cck_mrc,
- data->nrg_th_cck);
+ tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_IDX] =
+ cpu_to_le16((u16) data->auto_corr_ofdm);
+ tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_IDX] =
+ cpu_to_le16((u16) data->auto_corr_ofdm_mrc);
+ tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_IDX] =
+ cpu_to_le16((u16) data->auto_corr_ofdm_x1);
+ tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_IDX] =
+ cpu_to_le16((u16) data->auto_corr_ofdm_mrc_x1);
+
+ tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_IDX] =
+ cpu_to_le16((u16) data->auto_corr_cck);
+ tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX] =
+ cpu_to_le16((u16) data->auto_corr_cck_mrc);
+
+ tbl[HD_MIN_ENERGY_CCK_DET_IDX] = cpu_to_le16((u16) data->nrg_th_cck);
+ tbl[HD_MIN_ENERGY_OFDM_DET_IDX] = cpu_to_le16((u16) data->nrg_th_ofdm);
+
+ tbl[HD_BARKER_CORR_TH_ADD_MIN_IDX] =
+ cpu_to_le16(data->barker_corr_th_min);
+ tbl[HD_BARKER_CORR_TH_ADD_MIN_MRC_IDX] =
+ cpu_to_le16(data->barker_corr_th_min_mrc);
+ tbl[HD_OFDM_ENERGY_TH_IN_IDX] = cpu_to_le16(data->nrg_th_cca);
+
+ D_CALIB("ofdm: ac %u mrc %u x1 %u mrc_x1 %u thresh %u\n",
+ data->auto_corr_ofdm, data->auto_corr_ofdm_mrc,
+ data->auto_corr_ofdm_x1, data->auto_corr_ofdm_mrc_x1,
+ data->nrg_th_ofdm);
+
+ D_CALIB("cck: ac %u mrc %u thresh %u\n", data->auto_corr_cck,
+ data->auto_corr_cck_mrc, data->nrg_th_cck);
}
-/* Prepare a SENSITIVITY_CMD, send to uCode if values have changed */
-static int iwl4965_sensitivity_write(struct iwl_priv *priv)
+/* Prepare a C_SENSITIVITY, send to uCode if values have changed */
+static int
+il4965_sensitivity_write(struct il_priv *il)
{
- struct iwl_sensitivity_cmd cmd;
- struct iwl_sensitivity_data *data = NULL;
- struct iwl_host_cmd cmd_out = {
- .id = SENSITIVITY_CMD,
- .len = sizeof(struct iwl_sensitivity_cmd),
+ struct il_sensitivity_cmd cmd;
+ struct il_sensitivity_data *data = NULL;
+ struct il_host_cmd cmd_out = {
+ .id = C_SENSITIVITY,
+ .len = sizeof(struct il_sensitivity_cmd),
.flags = CMD_ASYNC,
.data = &cmd,
};
- data = &(priv->sensitivity_data);
+ data = &(il->sensitivity_data);
memset(&cmd, 0, sizeof(cmd));
- iwl4965_prepare_legacy_sensitivity_tbl(priv, data, &cmd.table[0]);
+ il4965_prepare_legacy_sensitivity_tbl(il, data, &cmd.table[0]);
/* Update uCode's "work" table, and copy it to DSP */
- cmd.control = SENSITIVITY_CMD_CONTROL_WORK_TABLE;
+ cmd.control = C_SENSITIVITY_CONTROL_WORK_TBL;
/* Don't send command to uCode if nothing has changed */
- if (!memcmp(&cmd.table[0], &(priv->sensitivity_tbl[0]),
- sizeof(u16)*HD_TABLE_SIZE)) {
- IWL_DEBUG_CALIB(priv, "No change in SENSITIVITY_CMD\n");
+ if (!memcmp
+ (&cmd.table[0], &(il->sensitivity_tbl[0]),
+ sizeof(u16) * HD_TBL_SIZE)) {
+ D_CALIB("No change in C_SENSITIVITY\n");
return 0;
}
/* Copy table for comparison next time */
- memcpy(&(priv->sensitivity_tbl[0]), &(cmd.table[0]),
- sizeof(u16)*HD_TABLE_SIZE);
+ memcpy(&(il->sensitivity_tbl[0]), &(cmd.table[0]),
+ sizeof(u16) * HD_TBL_SIZE);
- return iwl_legacy_send_cmd(priv, &cmd_out);
+ return il_send_cmd(il, &cmd_out);
}
-void iwl4965_init_sensitivity(struct iwl_priv *priv)
+void
+il4965_init_sensitivity(struct il_priv *il)
{
int ret = 0;
int i;
- struct iwl_sensitivity_data *data = NULL;
- const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens;
+ struct il_sensitivity_data *data = NULL;
+ const struct il_sensitivity_ranges *ranges = il->hw_params.sens;
- if (priv->disable_sens_cal)
+ if (il->disable_sens_cal)
return;
- IWL_DEBUG_CALIB(priv, "Start iwl4965_init_sensitivity\n");
+ D_CALIB("Start il4965_init_sensitivity\n");
/* Clear driver's sensitivity algo data */
- data = &(priv->sensitivity_data);
+ data = &(il->sensitivity_data);
if (ranges == NULL)
return;
- memset(data, 0, sizeof(struct iwl_sensitivity_data));
+ memset(data, 0, sizeof(struct il_sensitivity_data));
data->num_in_cck_no_fa = 0;
- data->nrg_curr_state = IWL_FA_TOO_MANY;
- data->nrg_prev_state = IWL_FA_TOO_MANY;
+ data->nrg_curr_state = IL_FA_TOO_MANY;
+ data->nrg_prev_state = IL_FA_TOO_MANY;
data->nrg_silence_ref = 0;
data->nrg_silence_idx = 0;
data->nrg_energy_idx = 0;
@@ -478,9 +471,9 @@ void iwl4965_init_sensitivity(struct iwl_priv *priv)
for (i = 0; i < NRG_NUM_PREV_STAT_L; i++)
data->nrg_silence_rssi[i] = 0;
- data->auto_corr_ofdm = ranges->auto_corr_min_ofdm;
+ data->auto_corr_ofdm = ranges->auto_corr_min_ofdm;
data->auto_corr_ofdm_mrc = ranges->auto_corr_min_ofdm_mrc;
- data->auto_corr_ofdm_x1 = ranges->auto_corr_min_ofdm_x1;
+ data->auto_corr_ofdm_x1 = ranges->auto_corr_min_ofdm_x1;
data->auto_corr_ofdm_mrc_x1 = ranges->auto_corr_min_ofdm_mrc_x1;
data->auto_corr_cck = AUTO_CORR_CCK_MIN_VAL_DEF;
data->auto_corr_cck_mrc = ranges->auto_corr_min_cck_mrc;
@@ -495,11 +488,12 @@ void iwl4965_init_sensitivity(struct iwl_priv *priv)
data->last_bad_plcp_cnt_cck = 0;
data->last_fa_cnt_cck = 0;
- ret |= iwl4965_sensitivity_write(priv);
- IWL_DEBUG_CALIB(priv, "<<return 0x%X\n", ret);
+ ret |= il4965_sensitivity_write(il);
+ D_CALIB("<<return 0x%X\n", ret);
}
-void iwl4965_sensitivity_calibration(struct iwl_priv *priv, void *resp)
+void
+il4965_sensitivity_calibration(struct il_priv *il, void *resp)
{
u32 rx_enable_time;
u32 fa_cck;
@@ -508,31 +502,31 @@ void iwl4965_sensitivity_calibration(struct iwl_priv *priv, void *resp)
u32 bad_plcp_ofdm;
u32 norm_fa_ofdm;
u32 norm_fa_cck;
- struct iwl_sensitivity_data *data = NULL;
- struct statistics_rx_non_phy *rx_info;
- struct statistics_rx_phy *ofdm, *cck;
+ struct il_sensitivity_data *data = NULL;
+ struct stats_rx_non_phy *rx_info;
+ struct stats_rx_phy *ofdm, *cck;
unsigned long flags;
- struct statistics_general_data statis;
+ struct stats_general_data statis;
- if (priv->disable_sens_cal)
+ if (il->disable_sens_cal)
return;
- data = &(priv->sensitivity_data);
+ data = &(il->sensitivity_data);
- if (!iwl_legacy_is_any_associated(priv)) {
- IWL_DEBUG_CALIB(priv, "<< - not associated\n");
+ if (!il_is_any_associated(il)) {
+ D_CALIB("<< - not associated\n");
return;
}
- spin_lock_irqsave(&priv->lock, flags);
+ spin_lock_irqsave(&il->lock, flags);
- rx_info = &(((struct iwl_notif_statistics *)resp)->rx.general);
- ofdm = &(((struct iwl_notif_statistics *)resp)->rx.ofdm);
- cck = &(((struct iwl_notif_statistics *)resp)->rx.cck);
+ rx_info = &(((struct il_notif_stats *)resp)->rx.general);
+ ofdm = &(((struct il_notif_stats *)resp)->rx.ofdm);
+ cck = &(((struct il_notif_stats *)resp)->rx.cck);
if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
- IWL_DEBUG_CALIB(priv, "<< invalid data.\n");
- spin_unlock_irqrestore(&priv->lock, flags);
+ D_CALIB("<< invalid data.\n");
+ spin_unlock_irqrestore(&il->lock, flags);
return;
}
@@ -544,30 +538,27 @@ void iwl4965_sensitivity_calibration(struct iwl_priv *priv, void *resp)
bad_plcp_ofdm = le32_to_cpu(ofdm->plcp_err);
statis.beacon_silence_rssi_a =
- le32_to_cpu(rx_info->beacon_silence_rssi_a);
+ le32_to_cpu(rx_info->beacon_silence_rssi_a);
statis.beacon_silence_rssi_b =
- le32_to_cpu(rx_info->beacon_silence_rssi_b);
+ le32_to_cpu(rx_info->beacon_silence_rssi_b);
statis.beacon_silence_rssi_c =
- le32_to_cpu(rx_info->beacon_silence_rssi_c);
- statis.beacon_energy_a =
- le32_to_cpu(rx_info->beacon_energy_a);
- statis.beacon_energy_b =
- le32_to_cpu(rx_info->beacon_energy_b);
- statis.beacon_energy_c =
- le32_to_cpu(rx_info->beacon_energy_c);
+ le32_to_cpu(rx_info->beacon_silence_rssi_c);
+ statis.beacon_energy_a = le32_to_cpu(rx_info->beacon_energy_a);
+ statis.beacon_energy_b = le32_to_cpu(rx_info->beacon_energy_b);
+ statis.beacon_energy_c = le32_to_cpu(rx_info->beacon_energy_c);
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock_irqrestore(&il->lock, flags);
- IWL_DEBUG_CALIB(priv, "rx_enable_time = %u usecs\n", rx_enable_time);
+ D_CALIB("rx_enable_time = %u usecs\n", rx_enable_time);
if (!rx_enable_time) {
- IWL_DEBUG_CALIB(priv, "<< RX Enable Time == 0!\n");
+ D_CALIB("<< RX Enable Time == 0!\n");
return;
}
- /* These statistics increase monotonically, and do not reset
+ /* These stats increase monotonically, and do not reset
* at each beacon. Calculate difference from last value, or just
- * use the new statistics value if it has reset or wrapped around. */
+ * use the new stats value if it has reset or wrapped around. */
if (data->last_bad_plcp_cnt_cck > bad_plcp_cck)
data->last_bad_plcp_cnt_cck = bad_plcp_cck;
else {
@@ -600,17 +591,17 @@ void iwl4965_sensitivity_calibration(struct iwl_priv *priv, void *resp)
norm_fa_ofdm = fa_ofdm + bad_plcp_ofdm;
norm_fa_cck = fa_cck + bad_plcp_cck;
- IWL_DEBUG_CALIB(priv,
- "cck: fa %u badp %u ofdm: fa %u badp %u\n", fa_cck,
- bad_plcp_cck, fa_ofdm, bad_plcp_ofdm);
+ D_CALIB("cck: fa %u badp %u ofdm: fa %u badp %u\n", fa_cck,
+ bad_plcp_cck, fa_ofdm, bad_plcp_ofdm);
- iwl4965_sens_auto_corr_ofdm(priv, norm_fa_ofdm, rx_enable_time);
- iwl4965_sens_energy_cck(priv, norm_fa_cck, rx_enable_time, &statis);
+ il4965_sens_auto_corr_ofdm(il, norm_fa_ofdm, rx_enable_time);
+ il4965_sens_energy_cck(il, norm_fa_cck, rx_enable_time, &statis);
- iwl4965_sensitivity_write(priv);
+ il4965_sensitivity_write(il);
}
-static inline u8 iwl4965_find_first_chain(u8 mask)
+static inline u8
+il4965_find_first_chain(u8 mask)
{
if (mask & ANT_A)
return CHAIN_A;
@@ -624,8 +615,8 @@ static inline u8 iwl4965_find_first_chain(u8 mask)
* disconnected.
*/
static void
-iwl4965_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
- struct iwl_chain_noise_data *data)
+il4965_find_disconn_antenna(struct il_priv *il, u32 * average_sig,
+ struct il_chain_noise_data *data)
{
u32 active_chains = 0;
u32 max_average_sig;
@@ -634,12 +625,15 @@ iwl4965_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
u8 first_chain;
u16 i = 0;
- average_sig[0] = data->chain_signal_a /
- priv->cfg->base_params->chain_noise_num_beacons;
- average_sig[1] = data->chain_signal_b /
- priv->cfg->base_params->chain_noise_num_beacons;
- average_sig[2] = data->chain_signal_c /
- priv->cfg->base_params->chain_noise_num_beacons;
+ average_sig[0] =
+ data->chain_signal_a /
+ il->cfg->base_params->chain_noise_num_beacons;
+ average_sig[1] =
+ data->chain_signal_b /
+ il->cfg->base_params->chain_noise_num_beacons;
+ average_sig[2] =
+ data->chain_signal_c /
+ il->cfg->base_params->chain_noise_num_beacons;
if (average_sig[0] >= average_sig[1]) {
max_average_sig = average_sig[0];
@@ -657,10 +651,10 @@ iwl4965_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
active_chains = (1 << max_average_sig_antenna_i);
}
- IWL_DEBUG_CALIB(priv, "average_sig: a %d b %d c %d\n",
- average_sig[0], average_sig[1], average_sig[2]);
- IWL_DEBUG_CALIB(priv, "max_average_sig = %d, antenna %d\n",
- max_average_sig, max_average_sig_antenna_i);
+ D_CALIB("average_sig: a %d b %d c %d\n", average_sig[0], average_sig[1],
+ average_sig[2]);
+ D_CALIB("max_average_sig = %d, antenna %d\n", max_average_sig,
+ max_average_sig_antenna_i);
/* Compare signal strengths for all 3 receivers. */
for (i = 0; i < NUM_RX_CHAINS; i++) {
@@ -673,9 +667,9 @@ iwl4965_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
data->disconn_array[i] = 1;
else
active_chains |= (1 << i);
- IWL_DEBUG_CALIB(priv, "i = %d rssiDelta = %d "
- "disconn_array[i] = %d\n",
- i, rssi_delta, data->disconn_array[i]);
+ D_CALIB("i = %d rssiDelta = %d "
+ "disconn_array[i] = %d\n", i, rssi_delta,
+ data->disconn_array[i]);
}
}
@@ -689,119 +683,110 @@ iwl4965_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
* To be safe, simply mask out any chains that we know
* are not on the device.
*/
- active_chains &= priv->hw_params.valid_rx_ant;
+ active_chains &= il->hw_params.valid_rx_ant;
num_tx_chains = 0;
for (i = 0; i < NUM_RX_CHAINS; i++) {
/* loops on all the bits of
- * priv->hw_setting.valid_tx_ant */
+ * il->hw_setting.valid_tx_ant */
u8 ant_msk = (1 << i);
- if (!(priv->hw_params.valid_tx_ant & ant_msk))
+ if (!(il->hw_params.valid_tx_ant & ant_msk))
continue;
num_tx_chains++;
if (data->disconn_array[i] == 0)
/* there is a Tx antenna connected */
break;
- if (num_tx_chains == priv->hw_params.tx_chains_num &&
+ if (num_tx_chains == il->hw_params.tx_chains_num &&
data->disconn_array[i]) {
/*
* If all chains are disconnected
* connect the first valid tx chain
*/
first_chain =
- iwl4965_find_first_chain(priv->cfg->valid_tx_ant);
+ il4965_find_first_chain(il->cfg->valid_tx_ant);
data->disconn_array[first_chain] = 0;
active_chains |= BIT(first_chain);
- IWL_DEBUG_CALIB(priv,
- "All Tx chains are disconnected W/A - declare %d as connected\n",
- first_chain);
+ D_CALIB("All Tx chains are disconnected"
+ "- declare %d as connected\n", first_chain);
break;
}
}
- if (active_chains != priv->hw_params.valid_rx_ant &&
- active_chains != priv->chain_noise_data.active_chains)
- IWL_DEBUG_CALIB(priv,
- "Detected that not all antennas are connected! "
- "Connected: %#x, valid: %#x.\n",
- active_chains, priv->hw_params.valid_rx_ant);
+ if (active_chains != il->hw_params.valid_rx_ant &&
+ active_chains != il->chain_noise_data.active_chains)
+ D_CALIB("Detected that not all antennas are connected! "
+ "Connected: %#x, valid: %#x.\n", active_chains,
+ il->hw_params.valid_rx_ant);
/* Save for use within RXON, TX, SCAN commands, etc. */
data->active_chains = active_chains;
- IWL_DEBUG_CALIB(priv, "active_chains (bitwise) = 0x%x\n",
- active_chains);
+ D_CALIB("active_chains (bitwise) = 0x%x\n", active_chains);
}
-static void iwl4965_gain_computation(struct iwl_priv *priv,
- u32 *average_noise,
- u16 min_average_noise_antenna_i,
- u32 min_average_noise,
- u8 default_chain)
+static void
+il4965_gain_computation(struct il_priv *il, u32 * average_noise,
+ u16 min_average_noise_antenna_i, u32 min_average_noise,
+ u8 default_chain)
{
int i, ret;
- struct iwl_chain_noise_data *data = &priv->chain_noise_data;
+ struct il_chain_noise_data *data = &il->chain_noise_data;
data->delta_gain_code[min_average_noise_antenna_i] = 0;
for (i = default_chain; i < NUM_RX_CHAINS; i++) {
s32 delta_g = 0;
- if (!(data->disconn_array[i]) &&
- (data->delta_gain_code[i] ==
- CHAIN_NOISE_DELTA_GAIN_INIT_VAL)) {
+ if (!data->disconn_array[i] &&
+ data->delta_gain_code[i] ==
+ CHAIN_NOISE_DELTA_GAIN_INIT_VAL) {
delta_g = average_noise[i] - min_average_noise;
- data->delta_gain_code[i] = (u8)((delta_g * 10) / 15);
+ data->delta_gain_code[i] = (u8) ((delta_g * 10) / 15);
data->delta_gain_code[i] =
- min(data->delta_gain_code[i],
+ min(data->delta_gain_code[i],
(u8) CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
data->delta_gain_code[i] =
- (data->delta_gain_code[i] | (1 << 2));
+ (data->delta_gain_code[i] | (1 << 2));
} else {
data->delta_gain_code[i] = 0;
}
}
- IWL_DEBUG_CALIB(priv, "delta_gain_codes: a %d b %d c %d\n",
- data->delta_gain_code[0],
- data->delta_gain_code[1],
- data->delta_gain_code[2]);
+ D_CALIB("delta_gain_codes: a %d b %d c %d\n", data->delta_gain_code[0],
+ data->delta_gain_code[1], data->delta_gain_code[2]);
/* Differential gain gets sent to uCode only once */
if (!data->radio_write) {
- struct iwl_calib_diff_gain_cmd cmd;
+ struct il_calib_diff_gain_cmd cmd;
data->radio_write = 1;
memset(&cmd, 0, sizeof(cmd));
- cmd.hdr.op_code = IWL_PHY_CALIBRATE_DIFF_GAIN_CMD;
+ cmd.hdr.op_code = IL_PHY_CALIBRATE_DIFF_GAIN_CMD;
cmd.diff_gain_a = data->delta_gain_code[0];
cmd.diff_gain_b = data->delta_gain_code[1];
cmd.diff_gain_c = data->delta_gain_code[2];
- ret = iwl_legacy_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
- sizeof(cmd), &cmd);
+ ret = il_send_cmd_pdu(il, C_PHY_CALIBRATION, sizeof(cmd), &cmd);
if (ret)
- IWL_DEBUG_CALIB(priv, "fail sending cmd "
- "REPLY_PHY_CALIBRATION_CMD\n");
+ D_CALIB("fail sending cmd " "C_PHY_CALIBRATION\n");
/* TODO we might want recalculate
* rx_chain in rxon cmd */
/* Mark so we run this algo only once! */
- data->state = IWL_CHAIN_NOISE_CALIBRATED;
+ data->state = IL_CHAIN_NOISE_CALIBRATED;
}
}
-
-
/*
- * Accumulate 16 beacons of signal and noise statistics for each of
+ * Accumulate 16 beacons of signal and noise stats for each of
* 3 receivers/antennas/rx-chains, then figure out:
* 1) Which antennas are connected.
* 2) Differential rx gain settings to balance the 3 receivers.
*/
-void iwl4965_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp)
+void
+il4965_chain_noise_calibration(struct il_priv *il, void *stat_resp)
{
- struct iwl_chain_noise_data *data = NULL;
+ struct il_chain_noise_data *data = NULL;
u32 chain_noise_a;
u32 chain_noise_b;
@@ -809,8 +794,8 @@ void iwl4965_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp)
u32 chain_sig_a;
u32 chain_sig_b;
u32 chain_sig_c;
- u32 average_sig[NUM_RX_CHAINS] = {INITIALIZATION_VALUE};
- u32 average_noise[NUM_RX_CHAINS] = {INITIALIZATION_VALUE};
+ u32 average_sig[NUM_RX_CHAINS] = { INITIALIZATION_VALUE };
+ u32 average_noise[NUM_RX_CHAINS] = { INITIALIZATION_VALUE };
u32 min_average_noise = MIN_AVERAGE_NOISE_MAX_VALUE;
u16 min_average_noise_antenna_i = INITIALIZATION_VALUE;
u16 i = 0;
@@ -819,70 +804,69 @@ void iwl4965_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp)
u8 rxon_band24;
u8 stat_band24;
unsigned long flags;
- struct statistics_rx_non_phy *rx_info;
+ struct stats_rx_non_phy *rx_info;
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+ struct il_rxon_context *ctx = &il->ctx;
- if (priv->disable_chain_noise_cal)
+ if (il->disable_chain_noise_cal)
return;
- data = &(priv->chain_noise_data);
+ data = &(il->chain_noise_data);
/*
* Accumulate just the first "chain_noise_num_beacons" after
* the first association, then we're done forever.
*/
- if (data->state != IWL_CHAIN_NOISE_ACCUMULATE) {
- if (data->state == IWL_CHAIN_NOISE_ALIVE)
- IWL_DEBUG_CALIB(priv, "Wait for noise calib reset\n");
+ if (data->state != IL_CHAIN_NOISE_ACCUMULATE) {
+ if (data->state == IL_CHAIN_NOISE_ALIVE)
+ D_CALIB("Wait for noise calib reset\n");
return;
}
- spin_lock_irqsave(&priv->lock, flags);
+ spin_lock_irqsave(&il->lock, flags);
- rx_info = &(((struct iwl_notif_statistics *)stat_resp)->
- rx.general);
+ rx_info = &(((struct il_notif_stats *)stat_resp)->rx.general);
if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
- IWL_DEBUG_CALIB(priv, " << Interference data unavailable\n");
- spin_unlock_irqrestore(&priv->lock, flags);
+ D_CALIB(" << Interference data unavailable\n");
+ spin_unlock_irqrestore(&il->lock, flags);
return;
}
rxon_band24 = !!(ctx->staging.flags & RXON_FLG_BAND_24G_MSK);
rxon_chnum = le16_to_cpu(ctx->staging.channel);
- stat_band24 = !!(((struct iwl_notif_statistics *)
- stat_resp)->flag &
- STATISTICS_REPLY_FLG_BAND_24G_MSK);
- stat_chnum = le32_to_cpu(((struct iwl_notif_statistics *)
- stat_resp)->flag) >> 16;
+ stat_band24 =
+ !!(((struct il_notif_stats *)stat_resp)->
+ flag & STATS_REPLY_FLG_BAND_24G_MSK);
+ stat_chnum =
+ le32_to_cpu(((struct il_notif_stats *)stat_resp)->flag) >> 16;
/* Make sure we accumulate data for just the associated channel
* (even if scanning). */
- if ((rxon_chnum != stat_chnum) || (rxon_band24 != stat_band24)) {
- IWL_DEBUG_CALIB(priv, "Stats not from chan=%d, band24=%d\n",
- rxon_chnum, rxon_band24);
- spin_unlock_irqrestore(&priv->lock, flags);
+ if (rxon_chnum != stat_chnum || rxon_band24 != stat_band24) {
+ D_CALIB("Stats not from chan=%d, band24=%d\n", rxon_chnum,
+ rxon_band24);
+ spin_unlock_irqrestore(&il->lock, flags);
return;
}
/*
- * Accumulate beacon statistics values across
+ * Accumulate beacon stats values across
* "chain_noise_num_beacons"
*/
- chain_noise_a = le32_to_cpu(rx_info->beacon_silence_rssi_a) &
- IN_BAND_FILTER;
- chain_noise_b = le32_to_cpu(rx_info->beacon_silence_rssi_b) &
- IN_BAND_FILTER;
- chain_noise_c = le32_to_cpu(rx_info->beacon_silence_rssi_c) &
- IN_BAND_FILTER;
+ chain_noise_a =
+ le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
+ chain_noise_b =
+ le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
+ chain_noise_c =
+ le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
chain_sig_a = le32_to_cpu(rx_info->beacon_rssi_a) & IN_BAND_FILTER;
chain_sig_b = le32_to_cpu(rx_info->beacon_rssi_b) & IN_BAND_FILTER;
chain_sig_c = le32_to_cpu(rx_info->beacon_rssi_c) & IN_BAND_FILTER;
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock_irqrestore(&il->lock, flags);
data->beacon_count++;
@@ -894,34 +878,33 @@ void iwl4965_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp)
data->chain_signal_b = (chain_sig_b + data->chain_signal_b);
data->chain_signal_c = (chain_sig_c + data->chain_signal_c);
- IWL_DEBUG_CALIB(priv, "chan=%d, band24=%d, beacon=%d\n",
- rxon_chnum, rxon_band24, data->beacon_count);
- IWL_DEBUG_CALIB(priv, "chain_sig: a %d b %d c %d\n",
- chain_sig_a, chain_sig_b, chain_sig_c);
- IWL_DEBUG_CALIB(priv, "chain_noise: a %d b %d c %d\n",
- chain_noise_a, chain_noise_b, chain_noise_c);
+ D_CALIB("chan=%d, band24=%d, beacon=%d\n", rxon_chnum, rxon_band24,
+ data->beacon_count);
+ D_CALIB("chain_sig: a %d b %d c %d\n", chain_sig_a, chain_sig_b,
+ chain_sig_c);
+ D_CALIB("chain_noise: a %d b %d c %d\n", chain_noise_a, chain_noise_b,
+ chain_noise_c);
/* If this is the "chain_noise_num_beacons", determine:
* 1) Disconnected antennas (using signal strengths)
* 2) Differential gain (using silence noise) to balance receivers */
- if (data->beacon_count !=
- priv->cfg->base_params->chain_noise_num_beacons)
+ if (data->beacon_count != il->cfg->base_params->chain_noise_num_beacons)
return;
/* Analyze signal for disconnected antenna */
- iwl4965_find_disconn_antenna(priv, average_sig, data);
+ il4965_find_disconn_antenna(il, average_sig, data);
/* Analyze noise for rx balance */
- average_noise[0] = data->chain_noise_a /
- priv->cfg->base_params->chain_noise_num_beacons;
- average_noise[1] = data->chain_noise_b /
- priv->cfg->base_params->chain_noise_num_beacons;
- average_noise[2] = data->chain_noise_c /
- priv->cfg->base_params->chain_noise_num_beacons;
+ average_noise[0] =
+ data->chain_noise_a / il->cfg->base_params->chain_noise_num_beacons;
+ average_noise[1] =
+ data->chain_noise_b / il->cfg->base_params->chain_noise_num_beacons;
+ average_noise[2] =
+ data->chain_noise_c / il->cfg->base_params->chain_noise_num_beacons;
for (i = 0; i < NUM_RX_CHAINS; i++) {
- if (!(data->disconn_array[i]) &&
- (average_noise[i] <= min_average_noise)) {
+ if (!data->disconn_array[i] &&
+ average_noise[i] <= min_average_noise) {
/* This means that chain i is active and has
* lower noise values so far: */
min_average_noise = average_noise[i];
@@ -929,39 +912,37 @@ void iwl4965_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp)
}
}
- IWL_DEBUG_CALIB(priv, "average_noise: a %d b %d c %d\n",
- average_noise[0], average_noise[1],
- average_noise[2]);
+ D_CALIB("average_noise: a %d b %d c %d\n", average_noise[0],
+ average_noise[1], average_noise[2]);
- IWL_DEBUG_CALIB(priv, "min_average_noise = %d, antenna %d\n",
- min_average_noise, min_average_noise_antenna_i);
+ D_CALIB("min_average_noise = %d, antenna %d\n", min_average_noise,
+ min_average_noise_antenna_i);
- iwl4965_gain_computation(priv, average_noise,
- min_average_noise_antenna_i, min_average_noise,
- iwl4965_find_first_chain(priv->cfg->valid_rx_ant));
+ il4965_gain_computation(il, average_noise, min_average_noise_antenna_i,
+ min_average_noise,
+ il4965_find_first_chain(il->cfg->valid_rx_ant));
/* Some power changes may have been made during the calibration.
* Update and commit the RXON
*/
- if (priv->cfg->ops->lib->update_chain_flags)
- priv->cfg->ops->lib->update_chain_flags(priv);
+ if (il->cfg->ops->lib->update_chain_flags)
+ il->cfg->ops->lib->update_chain_flags(il);
- data->state = IWL_CHAIN_NOISE_DONE;
- iwl_legacy_power_update_mode(priv, false);
+ data->state = IL_CHAIN_NOISE_DONE;
+ il_power_update_mode(il, false);
}
-void iwl4965_reset_run_time_calib(struct iwl_priv *priv)
+void
+il4965_reset_run_time_calib(struct il_priv *il)
{
int i;
- memset(&(priv->sensitivity_data), 0,
- sizeof(struct iwl_sensitivity_data));
- memset(&(priv->chain_noise_data), 0,
- sizeof(struct iwl_chain_noise_data));
+ memset(&(il->sensitivity_data), 0, sizeof(struct il_sensitivity_data));
+ memset(&(il->chain_noise_data), 0, sizeof(struct il_chain_noise_data));
for (i = 0; i < NUM_RX_CHAINS; i++)
- priv->chain_noise_data.delta_gain_code[i] =
- CHAIN_NOISE_DELTA_GAIN_INIT_VAL;
+ il->chain_noise_data.delta_gain_code[i] =
+ CHAIN_NOISE_DELTA_GAIN_INIT_VAL;
- /* Ask for statistics now, the uCode will send notification
+ /* Ask for stats now, the uCode will send notification
* periodically after association */
- iwl_legacy_send_statistics_request(priv, CMD_ASYNC, true);
+ il_send_stats_request(il, CMD_ASYNC, true);
}
diff --git a/drivers/net/wireless/iwlegacy/4965-debug.c b/drivers/net/wireless/iwlegacy/4965-debug.c
new file mode 100644
index 00000000000..98ec39f56ba
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/4965-debug.c
@@ -0,0 +1,746 @@
+/******************************************************************************
+*
+* GPL LICENSE SUMMARY
+*
+* Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of version 2 of the GNU General Public License as
+* published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful, but
+* WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+* General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+* USA
+*
+* The full GNU General Public License is included in this distribution
+* in the file called LICENSE.GPL.
+*
+* Contact Information:
+* Intel Linux Wireless <ilw@linux.intel.com>
+* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+*****************************************************************************/
+#include "common.h"
+#include "4965.h"
+
+static const char *fmt_value = " %-30s %10u\n";
+static const char *fmt_table = " %-30s %10u %10u %10u %10u\n";
+static const char *fmt_header =
+ "%-32s current cumulative delta max\n";
+
+static int
+il4965_stats_flag(struct il_priv *il, char *buf, int bufsz)
+{
+ int p = 0;
+ u32 flag;
+
+ flag = le32_to_cpu(il->_4965.stats.flag);
+
+ p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n", flag);
+ if (flag & UCODE_STATS_CLEAR_MSK)
+ p += scnprintf(buf + p, bufsz - p,
+ "\tStatistics have been cleared\n");
+ p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n",
+ (flag & UCODE_STATS_FREQUENCY_MSK) ? "2.4 GHz" :
+ "5.2 GHz");
+ p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n",
+ (flag & UCODE_STATS_NARROW_BAND_MSK) ? "enabled" :
+ "disabled");
+
+ return p;
+}
+
+ssize_t
+il4965_ucode_rx_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ int pos = 0;
+ char *buf;
+ int bufsz =
+ sizeof(struct stats_rx_phy) * 40 +
+ sizeof(struct stats_rx_non_phy) * 40 +
+ sizeof(struct stats_rx_ht_phy) * 40 + 400;
+ ssize_t ret;
+ struct stats_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm;
+ struct stats_rx_phy *cck, *accum_cck, *delta_cck, *max_cck;
+ struct stats_rx_non_phy *general, *accum_general;
+ struct stats_rx_non_phy *delta_general, *max_general;
+ struct stats_rx_ht_phy *ht, *accum_ht, *delta_ht, *max_ht;
+
+ if (!il_is_alive(il))
+ return -EAGAIN;
+
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf) {
+ IL_ERR("Can not allocate Buffer\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * the statistic information display here is based on
+ * the last stats notification from uCode
+ * might not reflect the current uCode activity
+ */
+ ofdm = &il->_4965.stats.rx.ofdm;
+ cck = &il->_4965.stats.rx.cck;
+ general = &il->_4965.stats.rx.general;
+ ht = &il->_4965.stats.rx.ofdm_ht;
+ accum_ofdm = &il->_4965.accum_stats.rx.ofdm;
+ accum_cck = &il->_4965.accum_stats.rx.cck;
+ accum_general = &il->_4965.accum_stats.rx.general;
+ accum_ht = &il->_4965.accum_stats.rx.ofdm_ht;
+ delta_ofdm = &il->_4965.delta_stats.rx.ofdm;
+ delta_cck = &il->_4965.delta_stats.rx.cck;
+ delta_general = &il->_4965.delta_stats.rx.general;
+ delta_ht = &il->_4965.delta_stats.rx.ofdm_ht;
+ max_ofdm = &il->_4965.max_delta.rx.ofdm;
+ max_cck = &il->_4965.max_delta.rx.cck;
+ max_general = &il->_4965.max_delta.rx.general;
+ max_ht = &il->_4965.max_delta.rx.ofdm_ht;
+
+ pos += il4965_stats_flag(il, buf, bufsz);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_header,
+ "Statistics_Rx - OFDM:");
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "ina_cnt:",
+ le32_to_cpu(ofdm->ina_cnt), accum_ofdm->ina_cnt,
+ delta_ofdm->ina_cnt, max_ofdm->ina_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_cnt:",
+ le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt,
+ delta_ofdm->fina_cnt, max_ofdm->fina_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "plcp_err:",
+ le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err,
+ delta_ofdm->plcp_err, max_ofdm->plcp_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_err:",
+ le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err,
+ delta_ofdm->crc32_err, max_ofdm->crc32_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "overrun_err:",
+ le32_to_cpu(ofdm->overrun_err), accum_ofdm->overrun_err,
+ delta_ofdm->overrun_err, max_ofdm->overrun_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "early_overrun_err:",
+ le32_to_cpu(ofdm->early_overrun_err),
+ accum_ofdm->early_overrun_err,
+ delta_ofdm->early_overrun_err,
+ max_ofdm->early_overrun_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_good:",
+ le32_to_cpu(ofdm->crc32_good), accum_ofdm->crc32_good,
+ delta_ofdm->crc32_good, max_ofdm->crc32_good);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "false_alarm_cnt:",
+ le32_to_cpu(ofdm->false_alarm_cnt),
+ accum_ofdm->false_alarm_cnt, delta_ofdm->false_alarm_cnt,
+ max_ofdm->false_alarm_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_sync_err_cnt:",
+ le32_to_cpu(ofdm->fina_sync_err_cnt),
+ accum_ofdm->fina_sync_err_cnt,
+ delta_ofdm->fina_sync_err_cnt,
+ max_ofdm->fina_sync_err_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "sfd_timeout:",
+ le32_to_cpu(ofdm->sfd_timeout), accum_ofdm->sfd_timeout,
+ delta_ofdm->sfd_timeout, max_ofdm->sfd_timeout);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_timeout:",
+ le32_to_cpu(ofdm->fina_timeout), accum_ofdm->fina_timeout,
+ delta_ofdm->fina_timeout, max_ofdm->fina_timeout);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "unresponded_rts:",
+ le32_to_cpu(ofdm->unresponded_rts),
+ accum_ofdm->unresponded_rts, delta_ofdm->unresponded_rts,
+ max_ofdm->unresponded_rts);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "rxe_frame_lmt_ovrun:",
+ le32_to_cpu(ofdm->rxe_frame_limit_overrun),
+ accum_ofdm->rxe_frame_limit_overrun,
+ delta_ofdm->rxe_frame_limit_overrun,
+ max_ofdm->rxe_frame_limit_overrun);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_ack_cnt:",
+ le32_to_cpu(ofdm->sent_ack_cnt), accum_ofdm->sent_ack_cnt,
+ delta_ofdm->sent_ack_cnt, max_ofdm->sent_ack_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_cts_cnt:",
+ le32_to_cpu(ofdm->sent_cts_cnt), accum_ofdm->sent_cts_cnt,
+ delta_ofdm->sent_cts_cnt, max_ofdm->sent_cts_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_ba_rsp_cnt:",
+ le32_to_cpu(ofdm->sent_ba_rsp_cnt),
+ accum_ofdm->sent_ba_rsp_cnt, delta_ofdm->sent_ba_rsp_cnt,
+ max_ofdm->sent_ba_rsp_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "dsp_self_kill:",
+ le32_to_cpu(ofdm->dsp_self_kill),
+ accum_ofdm->dsp_self_kill, delta_ofdm->dsp_self_kill,
+ max_ofdm->dsp_self_kill);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "mh_format_err:",
+ le32_to_cpu(ofdm->mh_format_err),
+ accum_ofdm->mh_format_err, delta_ofdm->mh_format_err,
+ max_ofdm->mh_format_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table,
+ "re_acq_main_rssi_sum:",
+ le32_to_cpu(ofdm->re_acq_main_rssi_sum),
+ accum_ofdm->re_acq_main_rssi_sum,
+ delta_ofdm->re_acq_main_rssi_sum,
+ max_ofdm->re_acq_main_rssi_sum);
+
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_header,
+ "Statistics_Rx - CCK:");
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "ina_cnt:",
+ le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt,
+ delta_cck->ina_cnt, max_cck->ina_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_cnt:",
+ le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt,
+ delta_cck->fina_cnt, max_cck->fina_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "plcp_err:",
+ le32_to_cpu(cck->plcp_err), accum_cck->plcp_err,
+ delta_cck->plcp_err, max_cck->plcp_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_err:",
+ le32_to_cpu(cck->crc32_err), accum_cck->crc32_err,
+ delta_cck->crc32_err, max_cck->crc32_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "overrun_err:",
+ le32_to_cpu(cck->overrun_err), accum_cck->overrun_err,
+ delta_cck->overrun_err, max_cck->overrun_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "early_overrun_err:",
+ le32_to_cpu(cck->early_overrun_err),
+ accum_cck->early_overrun_err,
+ delta_cck->early_overrun_err, max_cck->early_overrun_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_good:",
+ le32_to_cpu(cck->crc32_good), accum_cck->crc32_good,
+ delta_cck->crc32_good, max_cck->crc32_good);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "false_alarm_cnt:",
+ le32_to_cpu(cck->false_alarm_cnt),
+ accum_cck->false_alarm_cnt, delta_cck->false_alarm_cnt,
+ max_cck->false_alarm_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_sync_err_cnt:",
+ le32_to_cpu(cck->fina_sync_err_cnt),
+ accum_cck->fina_sync_err_cnt,
+ delta_cck->fina_sync_err_cnt, max_cck->fina_sync_err_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "sfd_timeout:",
+ le32_to_cpu(cck->sfd_timeout), accum_cck->sfd_timeout,
+ delta_cck->sfd_timeout, max_cck->sfd_timeout);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_timeout:",
+ le32_to_cpu(cck->fina_timeout), accum_cck->fina_timeout,
+ delta_cck->fina_timeout, max_cck->fina_timeout);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "unresponded_rts:",
+ le32_to_cpu(cck->unresponded_rts),
+ accum_cck->unresponded_rts, delta_cck->unresponded_rts,
+ max_cck->unresponded_rts);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "rxe_frame_lmt_ovrun:",
+ le32_to_cpu(cck->rxe_frame_limit_overrun),
+ accum_cck->rxe_frame_limit_overrun,
+ delta_cck->rxe_frame_limit_overrun,
+ max_cck->rxe_frame_limit_overrun);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_ack_cnt:",
+ le32_to_cpu(cck->sent_ack_cnt), accum_cck->sent_ack_cnt,
+ delta_cck->sent_ack_cnt, max_cck->sent_ack_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_cts_cnt:",
+ le32_to_cpu(cck->sent_cts_cnt), accum_cck->sent_cts_cnt,
+ delta_cck->sent_cts_cnt, max_cck->sent_cts_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_ba_rsp_cnt:",
+ le32_to_cpu(cck->sent_ba_rsp_cnt),
+ accum_cck->sent_ba_rsp_cnt, delta_cck->sent_ba_rsp_cnt,
+ max_cck->sent_ba_rsp_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "dsp_self_kill:",
+ le32_to_cpu(cck->dsp_self_kill), accum_cck->dsp_self_kill,
+ delta_cck->dsp_self_kill, max_cck->dsp_self_kill);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "mh_format_err:",
+ le32_to_cpu(cck->mh_format_err), accum_cck->mh_format_err,
+ delta_cck->mh_format_err, max_cck->mh_format_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table,
+ "re_acq_main_rssi_sum:",
+ le32_to_cpu(cck->re_acq_main_rssi_sum),
+ accum_cck->re_acq_main_rssi_sum,
+ delta_cck->re_acq_main_rssi_sum,
+ max_cck->re_acq_main_rssi_sum);
+
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_header,
+ "Statistics_Rx - GENERAL:");
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "bogus_cts:",
+ le32_to_cpu(general->bogus_cts), accum_general->bogus_cts,
+ delta_general->bogus_cts, max_general->bogus_cts);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "bogus_ack:",
+ le32_to_cpu(general->bogus_ack), accum_general->bogus_ack,
+ delta_general->bogus_ack, max_general->bogus_ack);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "non_bssid_frames:",
+ le32_to_cpu(general->non_bssid_frames),
+ accum_general->non_bssid_frames,
+ delta_general->non_bssid_frames,
+ max_general->non_bssid_frames);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "filtered_frames:",
+ le32_to_cpu(general->filtered_frames),
+ accum_general->filtered_frames,
+ delta_general->filtered_frames,
+ max_general->filtered_frames);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "non_channel_beacons:",
+ le32_to_cpu(general->non_channel_beacons),
+ accum_general->non_channel_beacons,
+ delta_general->non_channel_beacons,
+ max_general->non_channel_beacons);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "channel_beacons:",
+ le32_to_cpu(general->channel_beacons),
+ accum_general->channel_beacons,
+ delta_general->channel_beacons,
+ max_general->channel_beacons);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "num_missed_bcon:",
+ le32_to_cpu(general->num_missed_bcon),
+ accum_general->num_missed_bcon,
+ delta_general->num_missed_bcon,
+ max_general->num_missed_bcon);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table,
+ "adc_rx_saturation_time:",
+ le32_to_cpu(general->adc_rx_saturation_time),
+ accum_general->adc_rx_saturation_time,
+ delta_general->adc_rx_saturation_time,
+ max_general->adc_rx_saturation_time);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table,
+ "ina_detect_search_tm:",
+ le32_to_cpu(general->ina_detection_search_time),
+ accum_general->ina_detection_search_time,
+ delta_general->ina_detection_search_time,
+ max_general->ina_detection_search_time);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table,
+ "beacon_silence_rssi_a:",
+ le32_to_cpu(general->beacon_silence_rssi_a),
+ accum_general->beacon_silence_rssi_a,
+ delta_general->beacon_silence_rssi_a,
+ max_general->beacon_silence_rssi_a);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table,
+ "beacon_silence_rssi_b:",
+ le32_to_cpu(general->beacon_silence_rssi_b),
+ accum_general->beacon_silence_rssi_b,
+ delta_general->beacon_silence_rssi_b,
+ max_general->beacon_silence_rssi_b);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table,
+ "beacon_silence_rssi_c:",
+ le32_to_cpu(general->beacon_silence_rssi_c),
+ accum_general->beacon_silence_rssi_c,
+ delta_general->beacon_silence_rssi_c,
+ max_general->beacon_silence_rssi_c);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table,
+ "interference_data_flag:",
+ le32_to_cpu(general->interference_data_flag),
+ accum_general->interference_data_flag,
+ delta_general->interference_data_flag,
+ max_general->interference_data_flag);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "channel_load:",
+ le32_to_cpu(general->channel_load),
+ accum_general->channel_load, delta_general->channel_load,
+ max_general->channel_load);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "dsp_false_alarms:",
+ le32_to_cpu(general->dsp_false_alarms),
+ accum_general->dsp_false_alarms,
+ delta_general->dsp_false_alarms,
+ max_general->dsp_false_alarms);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_rssi_a:",
+ le32_to_cpu(general->beacon_rssi_a),
+ accum_general->beacon_rssi_a,
+ delta_general->beacon_rssi_a, max_general->beacon_rssi_a);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_rssi_b:",
+ le32_to_cpu(general->beacon_rssi_b),
+ accum_general->beacon_rssi_b,
+ delta_general->beacon_rssi_b, max_general->beacon_rssi_b);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_rssi_c:",
+ le32_to_cpu(general->beacon_rssi_c),
+ accum_general->beacon_rssi_c,
+ delta_general->beacon_rssi_c, max_general->beacon_rssi_c);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_energy_a:",
+ le32_to_cpu(general->beacon_energy_a),
+ accum_general->beacon_energy_a,
+ delta_general->beacon_energy_a,
+ max_general->beacon_energy_a);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_energy_b:",
+ le32_to_cpu(general->beacon_energy_b),
+ accum_general->beacon_energy_b,
+ delta_general->beacon_energy_b,
+ max_general->beacon_energy_b);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_energy_c:",
+ le32_to_cpu(general->beacon_energy_c),
+ accum_general->beacon_energy_c,
+ delta_general->beacon_energy_c,
+ max_general->beacon_energy_c);
+
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_header,
+ "Statistics_Rx - OFDM_HT:");
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "plcp_err:",
+ le32_to_cpu(ht->plcp_err), accum_ht->plcp_err,
+ delta_ht->plcp_err, max_ht->plcp_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "overrun_err:",
+ le32_to_cpu(ht->overrun_err), accum_ht->overrun_err,
+ delta_ht->overrun_err, max_ht->overrun_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "early_overrun_err:",
+ le32_to_cpu(ht->early_overrun_err),
+ accum_ht->early_overrun_err, delta_ht->early_overrun_err,
+ max_ht->early_overrun_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_good:",
+ le32_to_cpu(ht->crc32_good), accum_ht->crc32_good,
+ delta_ht->crc32_good, max_ht->crc32_good);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_err:",
+ le32_to_cpu(ht->crc32_err), accum_ht->crc32_err,
+ delta_ht->crc32_err, max_ht->crc32_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "mh_format_err:",
+ le32_to_cpu(ht->mh_format_err), accum_ht->mh_format_err,
+ delta_ht->mh_format_err, max_ht->mh_format_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "agg_crc32_good:",
+ le32_to_cpu(ht->agg_crc32_good), accum_ht->agg_crc32_good,
+ delta_ht->agg_crc32_good, max_ht->agg_crc32_good);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "agg_mpdu_cnt:",
+ le32_to_cpu(ht->agg_mpdu_cnt), accum_ht->agg_mpdu_cnt,
+ delta_ht->agg_mpdu_cnt, max_ht->agg_mpdu_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "agg_cnt:",
+ le32_to_cpu(ht->agg_cnt), accum_ht->agg_cnt,
+ delta_ht->agg_cnt, max_ht->agg_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "unsupport_mcs:",
+ le32_to_cpu(ht->unsupport_mcs), accum_ht->unsupport_mcs,
+ delta_ht->unsupport_mcs, max_ht->unsupport_mcs);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+ssize_t
+il4965_ucode_tx_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ int pos = 0;
+ char *buf;
+ int bufsz = (sizeof(struct stats_tx) * 48) + 250;
+ ssize_t ret;
+ struct stats_tx *tx, *accum_tx, *delta_tx, *max_tx;
+
+ if (!il_is_alive(il))
+ return -EAGAIN;
+
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf) {
+ IL_ERR("Can not allocate Buffer\n");
+ return -ENOMEM;
+ }
+
+ /* the statistic information display here is based on
+ * the last stats notification from uCode
+ * might not reflect the current uCode activity
+ */
+ tx = &il->_4965.stats.tx;
+ accum_tx = &il->_4965.accum_stats.tx;
+ delta_tx = &il->_4965.delta_stats.tx;
+ max_tx = &il->_4965.max_delta.tx;
+
+ pos += il4965_stats_flag(il, buf, bufsz);
+ pos += scnprintf(buf + pos, bufsz - pos, fmt_header, "Statistics_Tx:");
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "preamble:",
+ le32_to_cpu(tx->preamble_cnt), accum_tx->preamble_cnt,
+ delta_tx->preamble_cnt, max_tx->preamble_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "rx_detected_cnt:",
+ le32_to_cpu(tx->rx_detected_cnt),
+ accum_tx->rx_detected_cnt, delta_tx->rx_detected_cnt,
+ max_tx->rx_detected_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "bt_prio_defer_cnt:",
+ le32_to_cpu(tx->bt_prio_defer_cnt),
+ accum_tx->bt_prio_defer_cnt, delta_tx->bt_prio_defer_cnt,
+ max_tx->bt_prio_defer_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "bt_prio_kill_cnt:",
+ le32_to_cpu(tx->bt_prio_kill_cnt),
+ accum_tx->bt_prio_kill_cnt, delta_tx->bt_prio_kill_cnt,
+ max_tx->bt_prio_kill_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "few_bytes_cnt:",
+ le32_to_cpu(tx->few_bytes_cnt), accum_tx->few_bytes_cnt,
+ delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "cts_timeout:",
+ le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout,
+ delta_tx->cts_timeout, max_tx->cts_timeout);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "ack_timeout:",
+ le32_to_cpu(tx->ack_timeout), accum_tx->ack_timeout,
+ delta_tx->ack_timeout, max_tx->ack_timeout);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "expected_ack_cnt:",
+ le32_to_cpu(tx->expected_ack_cnt),
+ accum_tx->expected_ack_cnt, delta_tx->expected_ack_cnt,
+ max_tx->expected_ack_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "actual_ack_cnt:",
+ le32_to_cpu(tx->actual_ack_cnt), accum_tx->actual_ack_cnt,
+ delta_tx->actual_ack_cnt, max_tx->actual_ack_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "dump_msdu_cnt:",
+ le32_to_cpu(tx->dump_msdu_cnt), accum_tx->dump_msdu_cnt,
+ delta_tx->dump_msdu_cnt, max_tx->dump_msdu_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table,
+ "abort_nxt_frame_mismatch:",
+ le32_to_cpu(tx->burst_abort_next_frame_mismatch_cnt),
+ accum_tx->burst_abort_next_frame_mismatch_cnt,
+ delta_tx->burst_abort_next_frame_mismatch_cnt,
+ max_tx->burst_abort_next_frame_mismatch_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table,
+ "abort_missing_nxt_frame:",
+ le32_to_cpu(tx->burst_abort_missing_next_frame_cnt),
+ accum_tx->burst_abort_missing_next_frame_cnt,
+ delta_tx->burst_abort_missing_next_frame_cnt,
+ max_tx->burst_abort_missing_next_frame_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table,
+ "cts_timeout_collision:",
+ le32_to_cpu(tx->cts_timeout_collision),
+ accum_tx->cts_timeout_collision,
+ delta_tx->cts_timeout_collision,
+ max_tx->cts_timeout_collision);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table,
+ "ack_ba_timeout_collision:",
+ le32_to_cpu(tx->ack_or_ba_timeout_collision),
+ accum_tx->ack_or_ba_timeout_collision,
+ delta_tx->ack_or_ba_timeout_collision,
+ max_tx->ack_or_ba_timeout_collision);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "agg ba_timeout:",
+ le32_to_cpu(tx->agg.ba_timeout), accum_tx->agg.ba_timeout,
+ delta_tx->agg.ba_timeout, max_tx->agg.ba_timeout);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table,
+ "agg ba_resched_frames:",
+ le32_to_cpu(tx->agg.ba_reschedule_frames),
+ accum_tx->agg.ba_reschedule_frames,
+ delta_tx->agg.ba_reschedule_frames,
+ max_tx->agg.ba_reschedule_frames);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table,
+ "agg scd_query_agg_frame:",
+ le32_to_cpu(tx->agg.scd_query_agg_frame_cnt),
+ accum_tx->agg.scd_query_agg_frame_cnt,
+ delta_tx->agg.scd_query_agg_frame_cnt,
+ max_tx->agg.scd_query_agg_frame_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table,
+ "agg scd_query_no_agg:",
+ le32_to_cpu(tx->agg.scd_query_no_agg),
+ accum_tx->agg.scd_query_no_agg,
+ delta_tx->agg.scd_query_no_agg,
+ max_tx->agg.scd_query_no_agg);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "agg scd_query_agg:",
+ le32_to_cpu(tx->agg.scd_query_agg),
+ accum_tx->agg.scd_query_agg, delta_tx->agg.scd_query_agg,
+ max_tx->agg.scd_query_agg);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table,
+ "agg scd_query_mismatch:",
+ le32_to_cpu(tx->agg.scd_query_mismatch),
+ accum_tx->agg.scd_query_mismatch,
+ delta_tx->agg.scd_query_mismatch,
+ max_tx->agg.scd_query_mismatch);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "agg frame_not_ready:",
+ le32_to_cpu(tx->agg.frame_not_ready),
+ accum_tx->agg.frame_not_ready,
+ delta_tx->agg.frame_not_ready,
+ max_tx->agg.frame_not_ready);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "agg underrun:",
+ le32_to_cpu(tx->agg.underrun), accum_tx->agg.underrun,
+ delta_tx->agg.underrun, max_tx->agg.underrun);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "agg bt_prio_kill:",
+ le32_to_cpu(tx->agg.bt_prio_kill),
+ accum_tx->agg.bt_prio_kill, delta_tx->agg.bt_prio_kill,
+ max_tx->agg.bt_prio_kill);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "agg rx_ba_rsp_cnt:",
+ le32_to_cpu(tx->agg.rx_ba_rsp_cnt),
+ accum_tx->agg.rx_ba_rsp_cnt, delta_tx->agg.rx_ba_rsp_cnt,
+ max_tx->agg.rx_ba_rsp_cnt);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+ssize_t
+il4965_ucode_general_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ int pos = 0;
+ char *buf;
+ int bufsz = sizeof(struct stats_general) * 10 + 300;
+ ssize_t ret;
+ struct stats_general_common *general, *accum_general;
+ struct stats_general_common *delta_general, *max_general;
+ struct stats_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
+ struct stats_div *div, *accum_div, *delta_div, *max_div;
+
+ if (!il_is_alive(il))
+ return -EAGAIN;
+
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf) {
+ IL_ERR("Can not allocate Buffer\n");
+ return -ENOMEM;
+ }
+
+ /* the statistic information display here is based on
+ * the last stats notification from uCode
+ * might not reflect the current uCode activity
+ */
+ general = &il->_4965.stats.general.common;
+ dbg = &il->_4965.stats.general.common.dbg;
+ div = &il->_4965.stats.general.common.div;
+ accum_general = &il->_4965.accum_stats.general.common;
+ accum_dbg = &il->_4965.accum_stats.general.common.dbg;
+ accum_div = &il->_4965.accum_stats.general.common.div;
+ delta_general = &il->_4965.delta_stats.general.common;
+ max_general = &il->_4965.max_delta.general.common;
+ delta_dbg = &il->_4965.delta_stats.general.common.dbg;
+ max_dbg = &il->_4965.max_delta.general.common.dbg;
+ delta_div = &il->_4965.delta_stats.general.common.div;
+ max_div = &il->_4965.max_delta.general.common.div;
+
+ pos += il4965_stats_flag(il, buf, bufsz);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_header,
+ "Statistics_General:");
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_value, "temperature:",
+ le32_to_cpu(general->temperature));
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_value, "ttl_timestamp:",
+ le32_to_cpu(general->ttl_timestamp));
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "burst_check:",
+ le32_to_cpu(dbg->burst_check), accum_dbg->burst_check,
+ delta_dbg->burst_check, max_dbg->burst_check);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "burst_count:",
+ le32_to_cpu(dbg->burst_count), accum_dbg->burst_count,
+ delta_dbg->burst_count, max_dbg->burst_count);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table,
+ "wait_for_silence_timeout_count:",
+ le32_to_cpu(dbg->wait_for_silence_timeout_cnt),
+ accum_dbg->wait_for_silence_timeout_cnt,
+ delta_dbg->wait_for_silence_timeout_cnt,
+ max_dbg->wait_for_silence_timeout_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "sleep_time:",
+ le32_to_cpu(general->sleep_time),
+ accum_general->sleep_time, delta_general->sleep_time,
+ max_general->sleep_time);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "slots_out:",
+ le32_to_cpu(general->slots_out), accum_general->slots_out,
+ delta_general->slots_out, max_general->slots_out);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "slots_idle:",
+ le32_to_cpu(general->slots_idle),
+ accum_general->slots_idle, delta_general->slots_idle,
+ max_general->slots_idle);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "tx_on_a:",
+ le32_to_cpu(div->tx_on_a), accum_div->tx_on_a,
+ delta_div->tx_on_a, max_div->tx_on_a);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "tx_on_b:",
+ le32_to_cpu(div->tx_on_b), accum_div->tx_on_b,
+ delta_div->tx_on_b, max_div->tx_on_b);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "exec_time:",
+ le32_to_cpu(div->exec_time), accum_div->exec_time,
+ delta_div->exec_time, max_div->exec_time);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "probe_time:",
+ le32_to_cpu(div->probe_time), accum_div->probe_time,
+ delta_div->probe_time, max_div->probe_time);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "rx_enable_counter:",
+ le32_to_cpu(general->rx_enable_counter),
+ accum_general->rx_enable_counter,
+ delta_general->rx_enable_counter,
+ max_general->rx_enable_counter);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "num_of_sos_states:",
+ le32_to_cpu(general->num_of_sos_states),
+ accum_general->num_of_sos_states,
+ delta_general->num_of_sos_states,
+ max_general->num_of_sos_states);
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c
new file mode 100644
index 00000000000..4aaef413556
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/4965-mac.c
@@ -0,0 +1,6536 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are derived from the ipw3945 project, as well
+ * as portions of the ieee80211 subsystem header files.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/pci-aspm.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/firmware.h>
+#include <linux/etherdevice.h>
+#include <linux/if_arp.h>
+
+#include <net/mac80211.h>
+
+#include <asm/div64.h>
+
+#define DRV_NAME "iwl4965"
+
+#include "common.h"
+#include "4965.h"
+
+/******************************************************************************
+ *
+ * module boiler plate
+ *
+ ******************************************************************************/
+
+/*
+ * module name, copyright, version, etc.
+ */
+#define DRV_DESCRIPTION "Intel(R) Wireless WiFi 4965 driver for Linux"
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+#define VD "d"
+#else
+#define VD
+#endif
+
+#define DRV_VERSION IWLWIFI_VERSION VD
+
+MODULE_DESCRIPTION(DRV_DESCRIPTION);
+MODULE_VERSION(DRV_VERSION);
+MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("iwl4965");
+
+void
+il4965_check_abort_status(struct il_priv *il, u8 frame_count, u32 status)
+{
+ if (frame_count == 1 && status == TX_STATUS_FAIL_RFKILL_FLUSH) {
+ IL_ERR("Tx flush command to flush out all frames\n");
+ if (!test_bit(S_EXIT_PENDING, &il->status))
+ queue_work(il->workqueue, &il->tx_flush);
+ }
+}
+
+/*
+ * EEPROM
+ */
+struct il_mod_params il4965_mod_params = {
+ .amsdu_size_8K = 1,
+ .restart_fw = 1,
+ /* the rest are 0 by default */
+};
+
+void
+il4965_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq)
+{
+ unsigned long flags;
+ int i;
+ spin_lock_irqsave(&rxq->lock, flags);
+ INIT_LIST_HEAD(&rxq->rx_free);
+ INIT_LIST_HEAD(&rxq->rx_used);
+ /* Fill the rx_used queue with _all_ of the Rx buffers */
+ for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
+ /* In the reset function, these buffers may have been allocated
+ * to an SKB, so we need to unmap and free potential storage */
+ if (rxq->pool[i].page != NULL) {
+ pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma,
+ PAGE_SIZE << il->hw_params.rx_page_order,
+ PCI_DMA_FROMDEVICE);
+ __il_free_pages(il, rxq->pool[i].page);
+ rxq->pool[i].page = NULL;
+ }
+ list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
+ }
+
+ for (i = 0; i < RX_QUEUE_SIZE; i++)
+ rxq->queue[i] = NULL;
+
+ /* Set us so that we have processed and used all buffers, but have
+ * not restocked the Rx queue with fresh buffers */
+ rxq->read = rxq->write = 0;
+ rxq->write_actual = 0;
+ rxq->free_count = 0;
+ spin_unlock_irqrestore(&rxq->lock, flags);
+}
+
+int
+il4965_rx_init(struct il_priv *il, struct il_rx_queue *rxq)
+{
+ u32 rb_size;
+ const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
+ u32 rb_timeout = 0;
+
+ if (il->cfg->mod_params->amsdu_size_8K)
+ rb_size = FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
+ else
+ rb_size = FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
+
+ /* Stop Rx DMA */
+ il_wr(il, FH49_MEM_RCSR_CHNL0_CONFIG_REG, 0);
+
+ /* Reset driver's Rx queue write idx */
+ il_wr(il, FH49_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
+
+ /* Tell device where to find RBD circular buffer in DRAM */
+ il_wr(il, FH49_RSCSR_CHNL0_RBDCB_BASE_REG, (u32) (rxq->bd_dma >> 8));
+
+ /* Tell device where in DRAM to update its Rx status */
+ il_wr(il, FH49_RSCSR_CHNL0_STTS_WPTR_REG, rxq->rb_stts_dma >> 4);
+
+ /* Enable Rx DMA
+ * Direct rx interrupts to hosts
+ * Rx buffer size 4 or 8k
+ * RB timeout 0x10
+ * 256 RBDs
+ */
+ il_wr(il, FH49_MEM_RCSR_CHNL0_CONFIG_REG,
+ FH49_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
+ FH49_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
+ FH49_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
+ rb_size |
+ (rb_timeout << FH49_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
+ (rfdnlog << FH49_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
+
+ /* Set interrupt coalescing timer to default (2048 usecs) */
+ il_write8(il, CSR_INT_COALESCING, IL_HOST_INT_TIMEOUT_DEF);
+
+ return 0;
+}
+
+static void
+il4965_set_pwr_vmain(struct il_priv *il)
+{
+/*
+ * (for documentation purposes)
+ * to set power to V_AUX, do:
+
+ if (pci_pme_capable(il->pci_dev, PCI_D3cold))
+ il_set_bits_mask_prph(il, APMG_PS_CTRL_REG,
+ APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
+ ~APMG_PS_CTRL_MSK_PWR_SRC);
+ */
+
+ il_set_bits_mask_prph(il, APMG_PS_CTRL_REG,
+ APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
+ ~APMG_PS_CTRL_MSK_PWR_SRC);
+}
+
+int
+il4965_hw_nic_init(struct il_priv *il)
+{
+ unsigned long flags;
+ struct il_rx_queue *rxq = &il->rxq;
+ int ret;
+
+ /* nic_init */
+ spin_lock_irqsave(&il->lock, flags);
+ il->cfg->ops->lib->apm_ops.init(il);
+
+ /* Set interrupt coalescing calibration timer to default (512 usecs) */
+ il_write8(il, CSR_INT_COALESCING, IL_HOST_INT_CALIB_TIMEOUT_DEF);
+
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ il4965_set_pwr_vmain(il);
+
+ il->cfg->ops->lib->apm_ops.config(il);
+
+ /* Allocate the RX queue, or reset if it is already allocated */
+ if (!rxq->bd) {
+ ret = il_rx_queue_alloc(il);
+ if (ret) {
+ IL_ERR("Unable to initialize Rx queue\n");
+ return -ENOMEM;
+ }
+ } else
+ il4965_rx_queue_reset(il, rxq);
+
+ il4965_rx_replenish(il);
+
+ il4965_rx_init(il, rxq);
+
+ spin_lock_irqsave(&il->lock, flags);
+
+ rxq->need_update = 1;
+ il_rx_queue_update_write_ptr(il, rxq);
+
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ /* Allocate or reset and init all Tx and Command queues */
+ if (!il->txq) {
+ ret = il4965_txq_ctx_alloc(il);
+ if (ret)
+ return ret;
+ } else
+ il4965_txq_ctx_reset(il);
+
+ set_bit(S_INIT, &il->status);
+
+ return 0;
+}
+
+/**
+ * il4965_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
+ */
+static inline __le32
+il4965_dma_addr2rbd_ptr(struct il_priv *il, dma_addr_t dma_addr)
+{
+ return cpu_to_le32((u32) (dma_addr >> 8));
+}
+
+/**
+ * il4965_rx_queue_restock - refill RX queue from pre-allocated pool
+ *
+ * If there are slots in the RX queue that need to be restocked,
+ * and we have free pre-allocated buffers, fill the ranks as much
+ * as we can, pulling from rx_free.
+ *
+ * This moves the 'write' idx forward to catch up with 'processed', and
+ * also updates the memory address in the firmware to reference the new
+ * target buffer.
+ */
+void
+il4965_rx_queue_restock(struct il_priv *il)
+{
+ struct il_rx_queue *rxq = &il->rxq;
+ struct list_head *element;
+ struct il_rx_buf *rxb;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rxq->lock, flags);
+ while (il_rx_queue_space(rxq) > 0 && rxq->free_count) {
+ /* The overwritten rxb must be a used one */
+ rxb = rxq->queue[rxq->write];
+ BUG_ON(rxb && rxb->page);
+
+ /* Get next free Rx buffer, remove from free list */
+ element = rxq->rx_free.next;
+ rxb = list_entry(element, struct il_rx_buf, list);
+ list_del(element);
+
+ /* Point to Rx buffer via next RBD in circular buffer */
+ rxq->bd[rxq->write] =
+ il4965_dma_addr2rbd_ptr(il, rxb->page_dma);
+ rxq->queue[rxq->write] = rxb;
+ rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
+ rxq->free_count--;
+ }
+ spin_unlock_irqrestore(&rxq->lock, flags);
+ /* If the pre-allocated buffer pool is dropping low, schedule to
+ * refill it */
+ if (rxq->free_count <= RX_LOW_WATERMARK)
+ queue_work(il->workqueue, &il->rx_replenish);
+
+ /* If we've added more space for the firmware to place data, tell it.
+ * Increment device's write pointer in multiples of 8. */
+ if (rxq->write_actual != (rxq->write & ~0x7)) {
+ spin_lock_irqsave(&rxq->lock, flags);
+ rxq->need_update = 1;
+ spin_unlock_irqrestore(&rxq->lock, flags);
+ il_rx_queue_update_write_ptr(il, rxq);
+ }
+}
+
+/**
+ * il4965_rx_replenish - Move all used packet from rx_used to rx_free
+ *
+ * When moving to rx_free an SKB is allocated for the slot.
+ *
+ * Also restock the Rx queue via il_rx_queue_restock.
+ * This is called as a scheduled work item (except for during initialization)
+ */
+static void
+il4965_rx_allocate(struct il_priv *il, gfp_t priority)
+{
+ struct il_rx_queue *rxq = &il->rxq;
+ struct list_head *element;
+ struct il_rx_buf *rxb;
+ struct page *page;
+ unsigned long flags;
+ gfp_t gfp_mask = priority;
+
+ while (1) {
+ spin_lock_irqsave(&rxq->lock, flags);
+ if (list_empty(&rxq->rx_used)) {
+ spin_unlock_irqrestore(&rxq->lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&rxq->lock, flags);
+
+ if (rxq->free_count > RX_LOW_WATERMARK)
+ gfp_mask |= __GFP_NOWARN;
+
+ if (il->hw_params.rx_page_order > 0)
+ gfp_mask |= __GFP_COMP;
+
+ /* Alloc a new receive buffer */
+ page = alloc_pages(gfp_mask, il->hw_params.rx_page_order);
+ if (!page) {
+ if (net_ratelimit())
+ D_INFO("alloc_pages failed, " "order: %d\n",
+ il->hw_params.rx_page_order);
+
+ if (rxq->free_count <= RX_LOW_WATERMARK &&
+ net_ratelimit())
+ IL_ERR("Failed to alloc_pages with %s. "
+ "Only %u free buffers remaining.\n",
+ priority ==
+ GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL",
+ rxq->free_count);
+ /* We don't reschedule replenish work here -- we will
+ * call the restock method and if it still needs
+ * more buffers it will schedule replenish */
+ return;
+ }
+
+ spin_lock_irqsave(&rxq->lock, flags);
+
+ if (list_empty(&rxq->rx_used)) {
+ spin_unlock_irqrestore(&rxq->lock, flags);
+ __free_pages(page, il->hw_params.rx_page_order);
+ return;
+ }
+ element = rxq->rx_used.next;
+ rxb = list_entry(element, struct il_rx_buf, list);
+ list_del(element);
+
+ spin_unlock_irqrestore(&rxq->lock, flags);
+
+ BUG_ON(rxb->page);
+ rxb->page = page;
+ /* Get physical address of the RB */
+ rxb->page_dma =
+ pci_map_page(il->pci_dev, page, 0,
+ PAGE_SIZE << il->hw_params.rx_page_order,
+ PCI_DMA_FROMDEVICE);
+ /* dma address must be no more than 36 bits */
+ BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
+ /* and also 256 byte aligned! */
+ BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
+
+ spin_lock_irqsave(&rxq->lock, flags);
+
+ list_add_tail(&rxb->list, &rxq->rx_free);
+ rxq->free_count++;
+ il->alloc_rxb_page++;
+
+ spin_unlock_irqrestore(&rxq->lock, flags);
+ }
+}
+
+void
+il4965_rx_replenish(struct il_priv *il)
+{
+ unsigned long flags;
+
+ il4965_rx_allocate(il, GFP_KERNEL);
+
+ spin_lock_irqsave(&il->lock, flags);
+ il4965_rx_queue_restock(il);
+ spin_unlock_irqrestore(&il->lock, flags);
+}
+
+void
+il4965_rx_replenish_now(struct il_priv *il)
+{
+ il4965_rx_allocate(il, GFP_ATOMIC);
+
+ il4965_rx_queue_restock(il);
+}
+
+/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
+ * If an SKB has been detached, the POOL needs to have its SKB set to NULL
+ * This free routine walks the list of POOL entries and if SKB is set to
+ * non NULL it is unmapped and freed
+ */
+void
+il4965_rx_queue_free(struct il_priv *il, struct il_rx_queue *rxq)
+{
+ int i;
+ for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
+ if (rxq->pool[i].page != NULL) {
+ pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma,
+ PAGE_SIZE << il->hw_params.rx_page_order,
+ PCI_DMA_FROMDEVICE);
+ __il_free_pages(il, rxq->pool[i].page);
+ rxq->pool[i].page = NULL;
+ }
+ }
+
+ dma_free_coherent(&il->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
+ rxq->bd_dma);
+ dma_free_coherent(&il->pci_dev->dev, sizeof(struct il_rb_status),
+ rxq->rb_stts, rxq->rb_stts_dma);
+ rxq->bd = NULL;
+ rxq->rb_stts = NULL;
+}
+
+int
+il4965_rxq_stop(struct il_priv *il)
+{
+
+ /* stop Rx DMA */
+ il_wr(il, FH49_MEM_RCSR_CHNL0_CONFIG_REG, 0);
+ il_poll_bit(il, FH49_MEM_RSSR_RX_STATUS_REG,
+ FH49_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
+
+ return 0;
+}
+
+int
+il4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band)
+{
+ int idx = 0;
+ int band_offset = 0;
+
+ /* HT rate format: mac80211 wants an MCS number, which is just LSB */
+ if (rate_n_flags & RATE_MCS_HT_MSK) {
+ idx = (rate_n_flags & 0xff);
+ return idx;
+ /* Legacy rate format, search for match in table */
+ } else {
+ if (band == IEEE80211_BAND_5GHZ)
+ band_offset = IL_FIRST_OFDM_RATE;
+ for (idx = band_offset; idx < RATE_COUNT_LEGACY; idx++)
+ if (il_rates[idx].plcp == (rate_n_flags & 0xFF))
+ return idx - band_offset;
+ }
+
+ return -1;
+}
+
+static int
+il4965_calc_rssi(struct il_priv *il, struct il_rx_phy_res *rx_resp)
+{
+ /* data from PHY/DSP regarding signal strength, etc.,
+ * contents are always there, not configurable by host. */
+ struct il4965_rx_non_cfg_phy *ncphy =
+ (struct il4965_rx_non_cfg_phy *)rx_resp->non_cfg_phy_buf;
+ u32 agc =
+ (le16_to_cpu(ncphy->agc_info) & IL49_AGC_DB_MASK) >>
+ IL49_AGC_DB_POS;
+
+ u32 valid_antennae =
+ (le16_to_cpu(rx_resp->phy_flags) & IL49_RX_PHY_FLAGS_ANTENNAE_MASK)
+ >> IL49_RX_PHY_FLAGS_ANTENNAE_OFFSET;
+ u8 max_rssi = 0;
+ u32 i;
+
+ /* Find max rssi among 3 possible receivers.
+ * These values are measured by the digital signal processor (DSP).
+ * They should stay fairly constant even as the signal strength varies,
+ * if the radio's automatic gain control (AGC) is working right.
+ * AGC value (see below) will provide the "interesting" info. */
+ for (i = 0; i < 3; i++)
+ if (valid_antennae & (1 << i))
+ max_rssi = max(ncphy->rssi_info[i << 1], max_rssi);
+
+ D_STATS("Rssi In A %d B %d C %d Max %d AGC dB %d\n",
+ ncphy->rssi_info[0], ncphy->rssi_info[2], ncphy->rssi_info[4],
+ max_rssi, agc);
+
+ /* dBm = max_rssi dB - agc dB - constant.
+ * Higher AGC (higher radio gain) means lower signal. */
+ return max_rssi - agc - IL4965_RSSI_OFFSET;
+}
+
+static u32
+il4965_translate_rx_status(struct il_priv *il, u32 decrypt_in)
+{
+ u32 decrypt_out = 0;
+
+ if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) ==
+ RX_RES_STATUS_STATION_FOUND)
+ decrypt_out |=
+ (RX_RES_STATUS_STATION_FOUND |
+ RX_RES_STATUS_NO_STATION_INFO_MISMATCH);
+
+ decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK);
+
+ /* packet was not encrypted */
+ if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
+ RX_RES_STATUS_SEC_TYPE_NONE)
+ return decrypt_out;
+
+ /* packet was encrypted with unknown alg */
+ if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
+ RX_RES_STATUS_SEC_TYPE_ERR)
+ return decrypt_out;
+
+ /* decryption was not done in HW */
+ if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) !=
+ RX_MPDU_RES_STATUS_DEC_DONE_MSK)
+ return decrypt_out;
+
+ switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) {
+
+ case RX_RES_STATUS_SEC_TYPE_CCMP:
+ /* alg is CCM: check MIC only */
+ if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK))
+ /* Bad MIC */
+ decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
+ else
+ decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
+
+ break;
+
+ case RX_RES_STATUS_SEC_TYPE_TKIP:
+ if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) {
+ /* Bad TTAK */
+ decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
+ break;
+ }
+ /* fall through if TTAK OK */
+ default:
+ if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
+ decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
+ else
+ decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
+ break;
+ }
+
+ D_RX("decrypt_in:0x%x decrypt_out = 0x%x\n", decrypt_in, decrypt_out);
+
+ return decrypt_out;
+}
+
+static void
+il4965_pass_packet_to_mac80211(struct il_priv *il, struct ieee80211_hdr *hdr,
+ u16 len, u32 ampdu_status, struct il_rx_buf *rxb,
+ struct ieee80211_rx_status *stats)
+{
+ struct sk_buff *skb;
+ __le16 fc = hdr->frame_control;
+
+ /* We only process data packets if the interface is open */
+ if (unlikely(!il->is_open)) {
+ D_DROP("Dropping packet while interface is not open.\n");
+ return;
+ }
+
+ /* In case of HW accelerated crypto and bad decryption, drop */
+ if (!il->cfg->mod_params->sw_crypto &&
+ il_set_decrypted_flag(il, hdr, ampdu_status, stats))
+ return;
+
+ skb = dev_alloc_skb(128);
+ if (!skb) {
+ IL_ERR("dev_alloc_skb failed\n");
+ return;
+ }
+
+ skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len);
+
+ il_update_stats(il, false, fc, len);
+ memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
+
+ ieee80211_rx(il->hw, skb);
+ il->alloc_rxb_page--;
+ rxb->page = NULL;
+}
+
+/* Called for N_RX (legacy ABG frames), or
+ * N_RX_MPDU (HT high-throughput N frames). */
+void
+il4965_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ struct ieee80211_hdr *header;
+ struct ieee80211_rx_status rx_status;
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ struct il_rx_phy_res *phy_res;
+ __le32 rx_pkt_status;
+ struct il_rx_mpdu_res_start *amsdu;
+ u32 len;
+ u32 ampdu_status;
+ u32 rate_n_flags;
+
+ /**
+ * N_RX and N_RX_MPDU are handled differently.
+ * N_RX: physical layer info is in this buffer
+ * N_RX_MPDU: physical layer info was sent in separate
+ * command and cached in il->last_phy_res
+ *
+ * Here we set up local variables depending on which command is
+ * received.
+ */
+ if (pkt->hdr.cmd == N_RX) {
+ phy_res = (struct il_rx_phy_res *)pkt->u.raw;
+ header =
+ (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res) +
+ phy_res->cfg_phy_cnt);
+
+ len = le16_to_cpu(phy_res->byte_count);
+ rx_pkt_status =
+ *(__le32 *) (pkt->u.raw + sizeof(*phy_res) +
+ phy_res->cfg_phy_cnt + len);
+ ampdu_status = le32_to_cpu(rx_pkt_status);
+ } else {
+ if (!il->_4965.last_phy_res_valid) {
+ IL_ERR("MPDU frame without cached PHY data\n");
+ return;
+ }
+ phy_res = &il->_4965.last_phy_res;
+ amsdu = (struct il_rx_mpdu_res_start *)pkt->u.raw;
+ header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu));
+ len = le16_to_cpu(amsdu->byte_count);
+ rx_pkt_status = *(__le32 *) (pkt->u.raw + sizeof(*amsdu) + len);
+ ampdu_status =
+ il4965_translate_rx_status(il, le32_to_cpu(rx_pkt_status));
+ }
+
+ if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
+ D_DROP("dsp size out of range [0,20]: %d/n",
+ phy_res->cfg_phy_cnt);
+ return;
+ }
+
+ if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
+ !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
+ D_RX("Bad CRC or FIFO: 0x%08X.\n", le32_to_cpu(rx_pkt_status));
+ return;
+ }
+
+ /* This will be used in several places later */
+ rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
+
+ /* rx_status carries information about the packet to mac80211 */
+ rx_status.mactime = le64_to_cpu(phy_res->timestamp);
+ rx_status.band =
+ (phy_res->
+ phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? IEEE80211_BAND_2GHZ :
+ IEEE80211_BAND_5GHZ;
+ rx_status.freq =
+ ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel),
+ rx_status.band);
+ rx_status.rate_idx =
+ il4965_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band);
+ rx_status.flag = 0;
+
+ /* TSF isn't reliable. In order to allow smooth user experience,
+ * this W/A doesn't propagate it to the mac80211 */
+ /*rx_status.flag |= RX_FLAG_MACTIME_MPDU; */
+
+ il->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp);
+
+ /* Find max signal strength (dBm) among 3 antenna/receiver chains */
+ rx_status.signal = il4965_calc_rssi(il, phy_res);
+
+ il_dbg_log_rx_data_frame(il, len, header);
+ D_STATS("Rssi %d, TSF %llu\n", rx_status.signal,
+ (unsigned long long)rx_status.mactime);
+
+ /*
+ * "antenna number"
+ *
+ * It seems that the antenna field in the phy flags value
+ * is actually a bit field. This is undefined by radiotap,
+ * it wants an actual antenna number but I always get "7"
+ * for most legacy frames I receive indicating that the
+ * same frame was received on all three RX chains.
+ *
+ * I think this field should be removed in favor of a
+ * new 802.11n radiotap field "RX chains" that is defined
+ * as a bitmask.
+ */
+ rx_status.antenna =
+ (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK) >>
+ RX_RES_PHY_FLAGS_ANTENNA_POS;
+
+ /* set the preamble flag if appropriate */
+ if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
+ rx_status.flag |= RX_FLAG_SHORTPRE;
+
+ /* Set up the HT phy flags */
+ if (rate_n_flags & RATE_MCS_HT_MSK)
+ rx_status.flag |= RX_FLAG_HT;
+ if (rate_n_flags & RATE_MCS_HT40_MSK)
+ rx_status.flag |= RX_FLAG_40MHZ;
+ if (rate_n_flags & RATE_MCS_SGI_MSK)
+ rx_status.flag |= RX_FLAG_SHORT_GI;
+
+ il4965_pass_packet_to_mac80211(il, header, len, ampdu_status, rxb,
+ &rx_status);
+}
+
+/* Cache phy data (Rx signal strength, etc) for HT frame (N_RX_PHY).
+ * This will be used later in il_hdl_rx() for N_RX_MPDU. */
+void
+il4965_hdl_rx_phy(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ il->_4965.last_phy_res_valid = true;
+ memcpy(&il->_4965.last_phy_res, pkt->u.raw,
+ sizeof(struct il_rx_phy_res));
+}
+
+static int
+il4965_get_channels_for_scan(struct il_priv *il, struct ieee80211_vif *vif,
+ enum ieee80211_band band, u8 is_active,
+ u8 n_probes, struct il_scan_channel *scan_ch)
+{
+ struct ieee80211_channel *chan;
+ const struct ieee80211_supported_band *sband;
+ const struct il_channel_info *ch_info;
+ u16 passive_dwell = 0;
+ u16 active_dwell = 0;
+ int added, i;
+ u16 channel;
+
+ sband = il_get_hw_mode(il, band);
+ if (!sband)
+ return 0;
+
+ active_dwell = il_get_active_dwell_time(il, band, n_probes);
+ passive_dwell = il_get_passive_dwell_time(il, band, vif);
+
+ if (passive_dwell <= active_dwell)
+ passive_dwell = active_dwell + 1;
+
+ for (i = 0, added = 0; i < il->scan_request->n_channels; i++) {
+ chan = il->scan_request->channels[i];
+
+ if (chan->band != band)
+ continue;
+
+ channel = chan->hw_value;
+ scan_ch->channel = cpu_to_le16(channel);
+
+ ch_info = il_get_channel_info(il, band, channel);
+ if (!il_is_channel_valid(ch_info)) {
+ D_SCAN("Channel %d is INVALID for this band.\n",
+ channel);
+ continue;
+ }
+
+ if (!is_active || il_is_channel_passive(ch_info) ||
+ (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN))
+ scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
+ else
+ scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;
+
+ if (n_probes)
+ scan_ch->type |= IL_SCAN_PROBE_MASK(n_probes);
+
+ scan_ch->active_dwell = cpu_to_le16(active_dwell);
+ scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
+
+ /* Set txpower levels to defaults */
+ scan_ch->dsp_atten = 110;
+
+ /* NOTE: if we were doing 6Mb OFDM for scans we'd use
+ * power level:
+ * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
+ */
+ if (band == IEEE80211_BAND_5GHZ)
+ scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
+ else
+ scan_ch->tx_gain = ((1 << 5) | (5 << 3));
+
+ D_SCAN("Scanning ch=%d prob=0x%X [%s %d]\n", channel,
+ le32_to_cpu(scan_ch->type),
+ (scan_ch->
+ type & SCAN_CHANNEL_TYPE_ACTIVE) ? "ACTIVE" : "PASSIVE",
+ (scan_ch->
+ type & SCAN_CHANNEL_TYPE_ACTIVE) ? active_dwell :
+ passive_dwell);
+
+ scan_ch++;
+ added++;
+ }
+
+ D_SCAN("total channels to scan %d\n", added);
+ return added;
+}
+
+static inline u32
+il4965_ant_idx_to_flags(u8 ant_idx)
+{
+ return BIT(ant_idx) << RATE_MCS_ANT_POS;
+}
+
+int
+il4965_request_scan(struct il_priv *il, struct ieee80211_vif *vif)
+{
+ struct il_host_cmd cmd = {
+ .id = C_SCAN,
+ .len = sizeof(struct il_scan_cmd),
+ .flags = CMD_SIZE_HUGE,
+ };
+ struct il_scan_cmd *scan;
+ struct il_rxon_context *ctx = &il->ctx;
+ u32 rate_flags = 0;
+ u16 cmd_len;
+ u16 rx_chain = 0;
+ enum ieee80211_band band;
+ u8 n_probes = 0;
+ u8 rx_ant = il->hw_params.valid_rx_ant;
+ u8 rate;
+ bool is_active = false;
+ int chan_mod;
+ u8 active_chains;
+ u8 scan_tx_antennas = il->hw_params.valid_tx_ant;
+ int ret;
+
+ lockdep_assert_held(&il->mutex);
+
+ ctx = il_rxon_ctx_from_vif(vif);
+
+ if (!il->scan_cmd) {
+ il->scan_cmd =
+ kmalloc(sizeof(struct il_scan_cmd) + IL_MAX_SCAN_SIZE,
+ GFP_KERNEL);
+ if (!il->scan_cmd) {
+ D_SCAN("fail to allocate memory for scan\n");
+ return -ENOMEM;
+ }
+ }
+ scan = il->scan_cmd;
+ memset(scan, 0, sizeof(struct il_scan_cmd) + IL_MAX_SCAN_SIZE);
+
+ scan->quiet_plcp_th = IL_PLCP_QUIET_THRESH;
+ scan->quiet_time = IL_ACTIVE_QUIET_TIME;
+
+ if (il_is_any_associated(il)) {
+ u16 interval;
+ u32 extra;
+ u32 suspend_time = 100;
+ u32 scan_suspend_time = 100;
+
+ D_INFO("Scanning while associated...\n");
+ interval = vif->bss_conf.beacon_int;
+
+ scan->suspend_time = 0;
+ scan->max_out_time = cpu_to_le32(200 * 1024);
+ if (!interval)
+ interval = suspend_time;
+
+ extra = (suspend_time / interval) << 22;
+ scan_suspend_time =
+ (extra | ((suspend_time % interval) * 1024));
+ scan->suspend_time = cpu_to_le32(scan_suspend_time);
+ D_SCAN("suspend_time 0x%X beacon interval %d\n",
+ scan_suspend_time, interval);
+ }
+
+ if (il->scan_request->n_ssids) {
+ int i, p = 0;
+ D_SCAN("Kicking off active scan\n");
+ for (i = 0; i < il->scan_request->n_ssids; i++) {
+ /* always does wildcard anyway */
+ if (!il->scan_request->ssids[i].ssid_len)
+ continue;
+ scan->direct_scan[p].id = WLAN_EID_SSID;
+ scan->direct_scan[p].len =
+ il->scan_request->ssids[i].ssid_len;
+ memcpy(scan->direct_scan[p].ssid,
+ il->scan_request->ssids[i].ssid,
+ il->scan_request->ssids[i].ssid_len);
+ n_probes++;
+ p++;
+ }
+ is_active = true;
+ } else
+ D_SCAN("Start passive scan.\n");
+
+ scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
+ scan->tx_cmd.sta_id = ctx->bcast_sta_id;
+ scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
+
+ switch (il->scan_band) {
+ case IEEE80211_BAND_2GHZ:
+ scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
+ chan_mod =
+ le32_to_cpu(il->ctx.active.
+ flags & RXON_FLG_CHANNEL_MODE_MSK) >>
+ RXON_FLG_CHANNEL_MODE_POS;
+ if (chan_mod == CHANNEL_MODE_PURE_40) {
+ rate = RATE_6M_PLCP;
+ } else {
+ rate = RATE_1M_PLCP;
+ rate_flags = RATE_MCS_CCK_MSK;
+ }
+ break;
+ case IEEE80211_BAND_5GHZ:
+ rate = RATE_6M_PLCP;
+ break;
+ default:
+ IL_WARN("Invalid scan band\n");
+ return -EIO;
+ }
+
+ /*
+ * If active scanning is requested but a certain channel is
+ * marked passive, we can do active scanning if we detect
+ * transmissions.
+ *
+ * There is an issue with some firmware versions that triggers
+ * a sysassert on a "good CRC threshold" of zero (== disabled),
+ * on a radar channel even though this means that we should NOT
+ * send probes.
+ *
+ * The "good CRC threshold" is the number of frames that we
+ * need to receive during our dwell time on a channel before
+ * sending out probes -- setting this to a huge value will
+ * mean we never reach it, but at the same time work around
+ * the aforementioned issue. Thus use IL_GOOD_CRC_TH_NEVER
+ * here instead of IL_GOOD_CRC_TH_DISABLED.
+ */
+ scan->good_CRC_th =
+ is_active ? IL_GOOD_CRC_TH_DEFAULT : IL_GOOD_CRC_TH_NEVER;
+
+ band = il->scan_band;
+
+ if (il->cfg->scan_rx_antennas[band])
+ rx_ant = il->cfg->scan_rx_antennas[band];
+
+ il->scan_tx_ant[band] =
+ il4965_toggle_tx_ant(il, il->scan_tx_ant[band], scan_tx_antennas);
+ rate_flags |= il4965_ant_idx_to_flags(il->scan_tx_ant[band]);
+ scan->tx_cmd.rate_n_flags =
+ il4965_hw_set_rate_n_flags(rate, rate_flags);
+
+ /* In power save mode use one chain, otherwise use all chains */
+ if (test_bit(S_POWER_PMI, &il->status)) {
+ /* rx_ant has been set to all valid chains previously */
+ active_chains =
+ rx_ant & ((u8) (il->chain_noise_data.active_chains));
+ if (!active_chains)
+ active_chains = rx_ant;
+
+ D_SCAN("chain_noise_data.active_chains: %u\n",
+ il->chain_noise_data.active_chains);
+
+ rx_ant = il4965_first_antenna(active_chains);
+ }
+
+ /* MIMO is not used here, but value is required */
+ rx_chain |= il->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS;
+ rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
+ rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS;
+ rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
+ scan->rx_chain = cpu_to_le16(rx_chain);
+
+ cmd_len =
+ il_fill_probe_req(il, (struct ieee80211_mgmt *)scan->data,
+ vif->addr, il->scan_request->ie,
+ il->scan_request->ie_len,
+ IL_MAX_SCAN_SIZE - sizeof(*scan));
+ scan->tx_cmd.len = cpu_to_le16(cmd_len);
+
+ scan->filter_flags |=
+ (RXON_FILTER_ACCEPT_GRP_MSK | RXON_FILTER_BCON_AWARE_MSK);
+
+ scan->channel_count =
+ il4965_get_channels_for_scan(il, vif, band, is_active, n_probes,
+ (void *)&scan->data[cmd_len]);
+ if (scan->channel_count == 0) {
+ D_SCAN("channel count %d\n", scan->channel_count);
+ return -EIO;
+ }
+
+ cmd.len +=
+ le16_to_cpu(scan->tx_cmd.len) +
+ scan->channel_count * sizeof(struct il_scan_channel);
+ cmd.data = scan;
+ scan->len = cpu_to_le16(cmd.len);
+
+ set_bit(S_SCAN_HW, &il->status);
+
+ ret = il_send_cmd_sync(il, &cmd);
+ if (ret)
+ clear_bit(S_SCAN_HW, &il->status);
+
+ return ret;
+}
+
+int
+il4965_manage_ibss_station(struct il_priv *il, struct ieee80211_vif *vif,
+ bool add)
+{
+ struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
+
+ if (add)
+ return il4965_add_bssid_station(il, vif_priv->ctx,
+ vif->bss_conf.bssid,
+ &vif_priv->ibss_bssid_sta_id);
+ return il_remove_station(il, vif_priv->ibss_bssid_sta_id,
+ vif->bss_conf.bssid);
+}
+
+void
+il4965_free_tfds_in_queue(struct il_priv *il, int sta_id, int tid, int freed)
+{
+ lockdep_assert_held(&il->sta_lock);
+
+ if (il->stations[sta_id].tid[tid].tfds_in_queue >= freed)
+ il->stations[sta_id].tid[tid].tfds_in_queue -= freed;
+ else {
+ D_TX("free more than tfds_in_queue (%u:%d)\n",
+ il->stations[sta_id].tid[tid].tfds_in_queue, freed);
+ il->stations[sta_id].tid[tid].tfds_in_queue = 0;
+ }
+}
+
+#define IL_TX_QUEUE_MSK 0xfffff
+
+static bool
+il4965_is_single_rx_stream(struct il_priv *il)
+{
+ return il->current_ht_config.smps == IEEE80211_SMPS_STATIC ||
+ il->current_ht_config.single_chain_sufficient;
+}
+
+#define IL_NUM_RX_CHAINS_MULTIPLE 3
+#define IL_NUM_RX_CHAINS_SINGLE 2
+#define IL_NUM_IDLE_CHAINS_DUAL 2
+#define IL_NUM_IDLE_CHAINS_SINGLE 1
+
+/*
+ * Determine how many receiver/antenna chains to use.
+ *
+ * More provides better reception via diversity. Fewer saves power
+ * at the expense of throughput, but only when not in powersave to
+ * start with.
+ *
+ * MIMO (dual stream) requires at least 2, but works better with 3.
+ * This does not determine *which* chains to use, just how many.
+ */
+static int
+il4965_get_active_rx_chain_count(struct il_priv *il)
+{
+ /* # of Rx chains to use when expecting MIMO. */
+ if (il4965_is_single_rx_stream(il))
+ return IL_NUM_RX_CHAINS_SINGLE;
+ else
+ return IL_NUM_RX_CHAINS_MULTIPLE;
+}
+
+/*
+ * When we are in power saving mode, unless device support spatial
+ * multiplexing power save, use the active count for rx chain count.
+ */
+static int
+il4965_get_idle_rx_chain_count(struct il_priv *il, int active_cnt)
+{
+ /* # Rx chains when idling, depending on SMPS mode */
+ switch (il->current_ht_config.smps) {
+ case IEEE80211_SMPS_STATIC:
+ case IEEE80211_SMPS_DYNAMIC:
+ return IL_NUM_IDLE_CHAINS_SINGLE;
+ case IEEE80211_SMPS_OFF:
+ return active_cnt;
+ default:
+ WARN(1, "invalid SMPS mode %d", il->current_ht_config.smps);
+ return active_cnt;
+ }
+}
+
+/* up to 4 chains */
+static u8
+il4965_count_chain_bitmap(u32 chain_bitmap)
+{
+ u8 res;
+ res = (chain_bitmap & BIT(0)) >> 0;
+ res += (chain_bitmap & BIT(1)) >> 1;
+ res += (chain_bitmap & BIT(2)) >> 2;
+ res += (chain_bitmap & BIT(3)) >> 3;
+ return res;
+}
+
+/**
+ * il4965_set_rxon_chain - Set up Rx chain usage in "staging" RXON image
+ *
+ * Selects how many and which Rx receivers/antennas/chains to use.
+ * This should not be used for scan command ... it puts data in wrong place.
+ */
+void
+il4965_set_rxon_chain(struct il_priv *il, struct il_rxon_context *ctx)
+{
+ bool is_single = il4965_is_single_rx_stream(il);
+ bool is_cam = !test_bit(S_POWER_PMI, &il->status);
+ u8 idle_rx_cnt, active_rx_cnt, valid_rx_cnt;
+ u32 active_chains;
+ u16 rx_chain;
+
+ /* Tell uCode which antennas are actually connected.
+ * Before first association, we assume all antennas are connected.
+ * Just after first association, il4965_chain_noise_calibration()
+ * checks which antennas actually *are* connected. */
+ if (il->chain_noise_data.active_chains)
+ active_chains = il->chain_noise_data.active_chains;
+ else
+ active_chains = il->hw_params.valid_rx_ant;
+
+ rx_chain = active_chains << RXON_RX_CHAIN_VALID_POS;
+
+ /* How many receivers should we use? */
+ active_rx_cnt = il4965_get_active_rx_chain_count(il);
+ idle_rx_cnt = il4965_get_idle_rx_chain_count(il, active_rx_cnt);
+
+ /* correct rx chain count according hw settings
+ * and chain noise calibration
+ */
+ valid_rx_cnt = il4965_count_chain_bitmap(active_chains);
+ if (valid_rx_cnt < active_rx_cnt)
+ active_rx_cnt = valid_rx_cnt;
+
+ if (valid_rx_cnt < idle_rx_cnt)
+ idle_rx_cnt = valid_rx_cnt;
+
+ rx_chain |= active_rx_cnt << RXON_RX_CHAIN_MIMO_CNT_POS;
+ rx_chain |= idle_rx_cnt << RXON_RX_CHAIN_CNT_POS;
+
+ ctx->staging.rx_chain = cpu_to_le16(rx_chain);
+
+ if (!is_single && active_rx_cnt >= IL_NUM_RX_CHAINS_SINGLE && is_cam)
+ ctx->staging.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK;
+ else
+ ctx->staging.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK;
+
+ D_ASSOC("rx_chain=0x%X active=%d idle=%d\n", ctx->staging.rx_chain,
+ active_rx_cnt, idle_rx_cnt);
+
+ WARN_ON(active_rx_cnt == 0 || idle_rx_cnt == 0 ||
+ active_rx_cnt < idle_rx_cnt);
+}
+
+u8
+il4965_toggle_tx_ant(struct il_priv *il, u8 ant, u8 valid)
+{
+ int i;
+ u8 ind = ant;
+
+ for (i = 0; i < RATE_ANT_NUM - 1; i++) {
+ ind = (ind + 1) < RATE_ANT_NUM ? ind + 1 : 0;
+ if (valid & BIT(ind))
+ return ind;
+ }
+ return ant;
+}
+
+static const char *
+il4965_get_fh_string(int cmd)
+{
+ switch (cmd) {
+ IL_CMD(FH49_RSCSR_CHNL0_STTS_WPTR_REG);
+ IL_CMD(FH49_RSCSR_CHNL0_RBDCB_BASE_REG);
+ IL_CMD(FH49_RSCSR_CHNL0_WPTR);
+ IL_CMD(FH49_MEM_RCSR_CHNL0_CONFIG_REG);
+ IL_CMD(FH49_MEM_RSSR_SHARED_CTRL_REG);
+ IL_CMD(FH49_MEM_RSSR_RX_STATUS_REG);
+ IL_CMD(FH49_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
+ IL_CMD(FH49_TSSR_TX_STATUS_REG);
+ IL_CMD(FH49_TSSR_TX_ERROR_REG);
+ default:
+ return "UNKNOWN";
+ }
+}
+
+int
+il4965_dump_fh(struct il_priv *il, char **buf, bool display)
+{
+ int i;
+#ifdef CONFIG_IWLEGACY_DEBUG
+ int pos = 0;
+ size_t bufsz = 0;
+#endif
+ static const u32 fh_tbl[] = {
+ FH49_RSCSR_CHNL0_STTS_WPTR_REG,
+ FH49_RSCSR_CHNL0_RBDCB_BASE_REG,
+ FH49_RSCSR_CHNL0_WPTR,
+ FH49_MEM_RCSR_CHNL0_CONFIG_REG,
+ FH49_MEM_RSSR_SHARED_CTRL_REG,
+ FH49_MEM_RSSR_RX_STATUS_REG,
+ FH49_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
+ FH49_TSSR_TX_STATUS_REG,
+ FH49_TSSR_TX_ERROR_REG
+ };
+#ifdef CONFIG_IWLEGACY_DEBUG
+ if (display) {
+ bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
+ *buf = kmalloc(bufsz, GFP_KERNEL);
+ if (!*buf)
+ return -ENOMEM;
+ pos +=
+ scnprintf(*buf + pos, bufsz - pos, "FH register values:\n");
+ for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
+ pos +=
+ scnprintf(*buf + pos, bufsz - pos,
+ " %34s: 0X%08x\n",
+ il4965_get_fh_string(fh_tbl[i]),
+ il_rd(il, fh_tbl[i]));
+ }
+ return pos;
+ }
+#endif
+ IL_ERR("FH register values:\n");
+ for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
+ IL_ERR(" %34s: 0X%08x\n", il4965_get_fh_string(fh_tbl[i]),
+ il_rd(il, fh_tbl[i]));
+ }
+ return 0;
+}
+
+void
+il4965_hdl_missed_beacon(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ struct il_missed_beacon_notif *missed_beacon;
+
+ missed_beacon = &pkt->u.missed_beacon;
+ if (le32_to_cpu(missed_beacon->consecutive_missed_beacons) >
+ il->missed_beacon_threshold) {
+ D_CALIB("missed bcn cnsq %d totl %d rcd %d expctd %d\n",
+ le32_to_cpu(missed_beacon->consecutive_missed_beacons),
+ le32_to_cpu(missed_beacon->total_missed_becons),
+ le32_to_cpu(missed_beacon->num_recvd_beacons),
+ le32_to_cpu(missed_beacon->num_expected_beacons));
+ if (!test_bit(S_SCANNING, &il->status))
+ il4965_init_sensitivity(il);
+ }
+}
+
+/* Calculate noise level, based on measurements during network silence just
+ * before arriving beacon. This measurement can be done only if we know
+ * exactly when to expect beacons, therefore only when we're associated. */
+static void
+il4965_rx_calc_noise(struct il_priv *il)
+{
+ struct stats_rx_non_phy *rx_info;
+ int num_active_rx = 0;
+ int total_silence = 0;
+ int bcn_silence_a, bcn_silence_b, bcn_silence_c;
+ int last_rx_noise;
+
+ rx_info = &(il->_4965.stats.rx.general);
+ bcn_silence_a =
+ le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
+ bcn_silence_b =
+ le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
+ bcn_silence_c =
+ le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
+
+ if (bcn_silence_a) {
+ total_silence += bcn_silence_a;
+ num_active_rx++;
+ }
+ if (bcn_silence_b) {
+ total_silence += bcn_silence_b;
+ num_active_rx++;
+ }
+ if (bcn_silence_c) {
+ total_silence += bcn_silence_c;
+ num_active_rx++;
+ }
+
+ /* Average among active antennas */
+ if (num_active_rx)
+ last_rx_noise = (total_silence / num_active_rx) - 107;
+ else
+ last_rx_noise = IL_NOISE_MEAS_NOT_AVAILABLE;
+
+ D_CALIB("inband silence a %u, b %u, c %u, dBm %d\n", bcn_silence_a,
+ bcn_silence_b, bcn_silence_c, last_rx_noise);
+}
+
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+/*
+ * based on the assumption of all stats counter are in DWORD
+ * FIXME: This function is for debugging, do not deal with
+ * the case of counters roll-over.
+ */
+static void
+il4965_accumulative_stats(struct il_priv *il, __le32 * stats)
+{
+ int i, size;
+ __le32 *prev_stats;
+ u32 *accum_stats;
+ u32 *delta, *max_delta;
+ struct stats_general_common *general, *accum_general;
+ struct stats_tx *tx, *accum_tx;
+
+ prev_stats = (__le32 *) &il->_4965.stats;
+ accum_stats = (u32 *) &il->_4965.accum_stats;
+ size = sizeof(struct il_notif_stats);
+ general = &il->_4965.stats.general.common;
+ accum_general = &il->_4965.accum_stats.general.common;
+ tx = &il->_4965.stats.tx;
+ accum_tx = &il->_4965.accum_stats.tx;
+ delta = (u32 *) &il->_4965.delta_stats;
+ max_delta = (u32 *) &il->_4965.max_delta;
+
+ for (i = sizeof(__le32); i < size;
+ i +=
+ sizeof(__le32), stats++, prev_stats++, delta++, max_delta++,
+ accum_stats++) {
+ if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
+ *delta =
+ (le32_to_cpu(*stats) - le32_to_cpu(*prev_stats));
+ *accum_stats += *delta;
+ if (*delta > *max_delta)
+ *max_delta = *delta;
+ }
+ }
+
+ /* reset accumulative stats for "no-counter" type stats */
+ accum_general->temperature = general->temperature;
+ accum_general->ttl_timestamp = general->ttl_timestamp;
+}
+#endif
+
+#define REG_RECALIB_PERIOD (60)
+
+void
+il4965_hdl_stats(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ int change;
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+
+ D_RX("Statistics notification received (%d vs %d).\n",
+ (int)sizeof(struct il_notif_stats),
+ le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK);
+
+ change =
+ ((il->_4965.stats.general.common.temperature !=
+ pkt->u.stats.general.common.temperature) ||
+ ((il->_4965.stats.flag & STATS_REPLY_FLG_HT40_MODE_MSK) !=
+ (pkt->u.stats.flag & STATS_REPLY_FLG_HT40_MODE_MSK)));
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+ il4965_accumulative_stats(il, (__le32 *) &pkt->u.stats);
+#endif
+
+ /* TODO: reading some of stats is unneeded */
+ memcpy(&il->_4965.stats, &pkt->u.stats, sizeof(il->_4965.stats));
+
+ set_bit(S_STATS, &il->status);
+
+ /* Reschedule the stats timer to occur in
+ * REG_RECALIB_PERIOD seconds to ensure we get a
+ * thermal update even if the uCode doesn't give
+ * us one */
+ mod_timer(&il->stats_periodic,
+ jiffies + msecs_to_jiffies(REG_RECALIB_PERIOD * 1000));
+
+ if (unlikely(!test_bit(S_SCANNING, &il->status)) &&
+ (pkt->hdr.cmd == N_STATS)) {
+ il4965_rx_calc_noise(il);
+ queue_work(il->workqueue, &il->run_time_calib_work);
+ }
+ if (il->cfg->ops->lib->temp_ops.temperature && change)
+ il->cfg->ops->lib->temp_ops.temperature(il);
+}
+
+void
+il4965_hdl_c_stats(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+
+ if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATS_CLEAR_MSK) {
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+ memset(&il->_4965.accum_stats, 0,
+ sizeof(struct il_notif_stats));
+ memset(&il->_4965.delta_stats, 0,
+ sizeof(struct il_notif_stats));
+ memset(&il->_4965.max_delta, 0, sizeof(struct il_notif_stats));
+#endif
+ D_RX("Statistics have been cleared\n");
+ }
+ il4965_hdl_stats(il, rxb);
+}
+
+
+/*
+ * mac80211 queues, ACs, hardware queues, FIFOs.
+ *
+ * Cf. http://wireless.kernel.org/en/developers/Documentation/mac80211/queues
+ *
+ * Mac80211 uses the following numbers, which we get as from it
+ * by way of skb_get_queue_mapping(skb):
+ *
+ * VO 0
+ * VI 1
+ * BE 2
+ * BK 3
+ *
+ *
+ * Regular (not A-MPDU) frames are put into hardware queues corresponding
+ * to the FIFOs, see comments in iwl-prph.h. Aggregated frames get their
+ * own queue per aggregation session (RA/TID combination), such queues are
+ * set up to map into FIFOs too, for which we need an AC->FIFO mapping. In
+ * order to map frames to the right queue, we also need an AC->hw queue
+ * mapping. This is implemented here.
+ *
+ * Due to the way hw queues are set up (by the hw specific modules like
+ * 4965.c), the AC->hw queue mapping is the identity
+ * mapping.
+ */
+
+static const u8 tid_to_ac[] = {
+ IEEE80211_AC_BE,
+ IEEE80211_AC_BK,
+ IEEE80211_AC_BK,
+ IEEE80211_AC_BE,
+ IEEE80211_AC_VI,
+ IEEE80211_AC_VI,
+ IEEE80211_AC_VO,
+ IEEE80211_AC_VO
+};
+
+static inline int
+il4965_get_ac_from_tid(u16 tid)
+{
+ if (likely(tid < ARRAY_SIZE(tid_to_ac)))
+ return tid_to_ac[tid];
+
+ /* no support for TIDs 8-15 yet */
+ return -EINVAL;
+}
+
+static inline int
+il4965_get_fifo_from_tid(struct il_rxon_context *ctx, u16 tid)
+{
+ if (likely(tid < ARRAY_SIZE(tid_to_ac)))
+ return ctx->ac_to_fifo[tid_to_ac[tid]];
+
+ /* no support for TIDs 8-15 yet */
+ return -EINVAL;
+}
+
+/*
+ * handle build C_TX command notification.
+ */
+static void
+il4965_tx_cmd_build_basic(struct il_priv *il, struct sk_buff *skb,
+ struct il_tx_cmd *tx_cmd,
+ struct ieee80211_tx_info *info,
+ struct ieee80211_hdr *hdr, u8 std_id)
+{
+ __le16 fc = hdr->frame_control;
+ __le32 tx_flags = tx_cmd->tx_flags;
+
+ tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
+ if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
+ tx_flags |= TX_CMD_FLG_ACK_MSK;
+ if (ieee80211_is_mgmt(fc))
+ tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+ if (ieee80211_is_probe_resp(fc) &&
+ !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
+ tx_flags |= TX_CMD_FLG_TSF_MSK;
+ } else {
+ tx_flags &= (~TX_CMD_FLG_ACK_MSK);
+ tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+ }
+
+ if (ieee80211_is_back_req(fc))
+ tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
+
+ tx_cmd->sta_id = std_id;
+ if (ieee80211_has_morefrags(fc))
+ tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
+
+ if (ieee80211_is_data_qos(fc)) {
+ u8 *qc = ieee80211_get_qos_ctl(hdr);
+ tx_cmd->tid_tspec = qc[0] & 0xf;
+ tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
+ } else {
+ tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+ }
+
+ il_tx_cmd_protection(il, info, fc, &tx_flags);
+
+ tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
+ if (ieee80211_is_mgmt(fc)) {
+ if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
+ tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
+ else
+ tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
+ } else {
+ tx_cmd->timeout.pm_frame_timeout = 0;
+ }
+
+ tx_cmd->driver_txop = 0;
+ tx_cmd->tx_flags = tx_flags;
+ tx_cmd->next_frame_len = 0;
+}
+
+#define RTS_DFAULT_RETRY_LIMIT 60
+
+static void
+il4965_tx_cmd_build_rate(struct il_priv *il, struct il_tx_cmd *tx_cmd,
+ struct ieee80211_tx_info *info, __le16 fc)
+{
+ u32 rate_flags;
+ int rate_idx;
+ u8 rts_retry_limit;
+ u8 data_retry_limit;
+ u8 rate_plcp;
+
+ /* Set retry limit on DATA packets and Probe Responses */
+ if (ieee80211_is_probe_resp(fc))
+ data_retry_limit = 3;
+ else
+ data_retry_limit = IL4965_DEFAULT_TX_RETRY;
+ tx_cmd->data_retry_limit = data_retry_limit;
+
+ /* Set retry limit on RTS packets */
+ rts_retry_limit = RTS_DFAULT_RETRY_LIMIT;
+ if (data_retry_limit < rts_retry_limit)
+ rts_retry_limit = data_retry_limit;
+ tx_cmd->rts_retry_limit = rts_retry_limit;
+
+ /* DATA packets will use the uCode station table for rate/antenna
+ * selection */
+ if (ieee80211_is_data(fc)) {
+ tx_cmd->initial_rate_idx = 0;
+ tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
+ return;
+ }
+
+ /**
+ * If the current TX rate stored in mac80211 has the MCS bit set, it's
+ * not really a TX rate. Thus, we use the lowest supported rate for
+ * this band. Also use the lowest supported rate if the stored rate
+ * idx is invalid.
+ */
+ rate_idx = info->control.rates[0].idx;
+ if ((info->control.rates[0].flags & IEEE80211_TX_RC_MCS) || rate_idx < 0
+ || rate_idx > RATE_COUNT_LEGACY)
+ rate_idx =
+ rate_lowest_index(&il->bands[info->band],
+ info->control.sta);
+ /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
+ if (info->band == IEEE80211_BAND_5GHZ)
+ rate_idx += IL_FIRST_OFDM_RATE;
+ /* Get PLCP rate for tx_cmd->rate_n_flags */
+ rate_plcp = il_rates[rate_idx].plcp;
+ /* Zero out flags for this packet */
+ rate_flags = 0;
+
+ /* Set CCK flag as needed */
+ if (rate_idx >= IL_FIRST_CCK_RATE && rate_idx <= IL_LAST_CCK_RATE)
+ rate_flags |= RATE_MCS_CCK_MSK;
+
+ /* Set up antennas */
+ il->mgmt_tx_ant =
+ il4965_toggle_tx_ant(il, il->mgmt_tx_ant,
+ il->hw_params.valid_tx_ant);
+
+ rate_flags |= il4965_ant_idx_to_flags(il->mgmt_tx_ant);
+
+ /* Set the rate in the TX cmd */
+ tx_cmd->rate_n_flags =
+ il4965_hw_set_rate_n_flags(rate_plcp, rate_flags);
+}
+
+static void
+il4965_tx_cmd_build_hwcrypto(struct il_priv *il, struct ieee80211_tx_info *info,
+ struct il_tx_cmd *tx_cmd, struct sk_buff *skb_frag,
+ int sta_id)
+{
+ struct ieee80211_key_conf *keyconf = info->control.hw_key;
+
+ switch (keyconf->cipher) {
+ case WLAN_CIPHER_SUITE_CCMP:
+ tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
+ memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
+ if (info->flags & IEEE80211_TX_CTL_AMPDU)
+ tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
+ D_TX("tx_cmd with AES hwcrypto\n");
+ break;
+
+ case WLAN_CIPHER_SUITE_TKIP:
+ tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
+ ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key);
+ D_TX("tx_cmd with tkip hwcrypto\n");
+ break;
+
+ case WLAN_CIPHER_SUITE_WEP104:
+ tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
+ /* fall through */
+ case WLAN_CIPHER_SUITE_WEP40:
+ tx_cmd->sec_ctl |=
+ (TX_CMD_SEC_WEP | (keyconf->keyidx & TX_CMD_SEC_MSK) <<
+ TX_CMD_SEC_SHIFT);
+
+ memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
+
+ D_TX("Configuring packet for WEP encryption " "with key %d\n",
+ keyconf->keyidx);
+ break;
+
+ default:
+ IL_ERR("Unknown encode cipher %x\n", keyconf->cipher);
+ break;
+ }
+}
+
+/*
+ * start C_TX command process
+ */
+int
+il4965_tx_skb(struct il_priv *il, struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_sta *sta = info->control.sta;
+ struct il_station_priv *sta_priv = NULL;
+ struct il_tx_queue *txq;
+ struct il_queue *q;
+ struct il_device_cmd *out_cmd;
+ struct il_cmd_meta *out_meta;
+ struct il_tx_cmd *tx_cmd;
+ struct il_rxon_context *ctx = &il->ctx;
+ int txq_id;
+ dma_addr_t phys_addr;
+ dma_addr_t txcmd_phys;
+ dma_addr_t scratch_phys;
+ u16 len, firstlen, secondlen;
+ u16 seq_number = 0;
+ __le16 fc;
+ u8 hdr_len;
+ u8 sta_id;
+ u8 wait_write_ptr = 0;
+ u8 tid = 0;
+ u8 *qc = NULL;
+ unsigned long flags;
+ bool is_agg = false;
+
+ if (info->control.vif)
+ ctx = il_rxon_ctx_from_vif(info->control.vif);
+
+ spin_lock_irqsave(&il->lock, flags);
+ if (il_is_rfkill(il)) {
+ D_DROP("Dropping - RF KILL\n");
+ goto drop_unlock;
+ }
+
+ fc = hdr->frame_control;
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+ if (ieee80211_is_auth(fc))
+ D_TX("Sending AUTH frame\n");
+ else if (ieee80211_is_assoc_req(fc))
+ D_TX("Sending ASSOC frame\n");
+ else if (ieee80211_is_reassoc_req(fc))
+ D_TX("Sending REASSOC frame\n");
+#endif
+
+ hdr_len = ieee80211_hdrlen(fc);
+
+ /* For management frames use broadcast id to do not break aggregation */
+ if (!ieee80211_is_data(fc))
+ sta_id = ctx->bcast_sta_id;
+ else {
+ /* Find idx into station table for destination station */
+ sta_id = il_sta_id_or_broadcast(il, ctx, info->control.sta);
+
+ if (sta_id == IL_INVALID_STATION) {
+ D_DROP("Dropping - INVALID STATION: %pM\n", hdr->addr1);
+ goto drop_unlock;
+ }
+ }
+
+ D_TX("station Id %d\n", sta_id);
+
+ if (sta)
+ sta_priv = (void *)sta->drv_priv;
+
+ if (sta_priv && sta_priv->asleep &&
+ (info->flags & IEEE80211_TX_CTL_POLL_RESPONSE)) {
+ /*
+ * This sends an asynchronous command to the device,
+ * but we can rely on it being processed before the
+ * next frame is processed -- and the next frame to
+ * this station is the one that will consume this
+ * counter.
+ * For now set the counter to just 1 since we do not
+ * support uAPSD yet.
+ */
+ il4965_sta_modify_sleep_tx_count(il, sta_id, 1);
+ }
+
+ /*
+ * Send this frame after DTIM -- there's a special queue
+ * reserved for this for contexts that support AP mode.
+ */
+ if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
+ txq_id = ctx->mcast_queue;
+ /*
+ * The microcode will clear the more data
+ * bit in the last frame it transmits.
+ */
+ hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
+ } else
+ txq_id = ctx->ac_to_queue[skb_get_queue_mapping(skb)];
+
+ /* irqs already disabled/saved above when locking il->lock */
+ spin_lock(&il->sta_lock);
+
+ if (ieee80211_is_data_qos(fc)) {
+ qc = ieee80211_get_qos_ctl(hdr);
+ tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
+ if (WARN_ON_ONCE(tid >= MAX_TID_COUNT)) {
+ spin_unlock(&il->sta_lock);
+ goto drop_unlock;
+ }
+ seq_number = il->stations[sta_id].tid[tid].seq_number;
+ seq_number &= IEEE80211_SCTL_SEQ;
+ hdr->seq_ctrl =
+ hdr->seq_ctrl & cpu_to_le16(IEEE80211_SCTL_FRAG);
+ hdr->seq_ctrl |= cpu_to_le16(seq_number);
+ seq_number += 0x10;
+ /* aggregation is on for this <sta,tid> */
+ if (info->flags & IEEE80211_TX_CTL_AMPDU &&
+ il->stations[sta_id].tid[tid].agg.state == IL_AGG_ON) {
+ txq_id = il->stations[sta_id].tid[tid].agg.txq_id;
+ is_agg = true;
+ }
+ }
+
+ txq = &il->txq[txq_id];
+ q = &txq->q;
+
+ if (unlikely(il_queue_space(q) < q->high_mark)) {
+ spin_unlock(&il->sta_lock);
+ goto drop_unlock;
+ }
+
+ if (ieee80211_is_data_qos(fc)) {
+ il->stations[sta_id].tid[tid].tfds_in_queue++;
+ if (!ieee80211_has_morefrags(fc))
+ il->stations[sta_id].tid[tid].seq_number = seq_number;
+ }
+
+ spin_unlock(&il->sta_lock);
+
+ /* Set up driver data for this TFD */
+ memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct il_tx_info));
+ txq->txb[q->write_ptr].skb = skb;
+ txq->txb[q->write_ptr].ctx = ctx;
+
+ /* Set up first empty entry in queue's array of Tx/cmd buffers */
+ out_cmd = txq->cmd[q->write_ptr];
+ out_meta = &txq->meta[q->write_ptr];
+ tx_cmd = &out_cmd->cmd.tx;
+ memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
+ memset(tx_cmd, 0, sizeof(struct il_tx_cmd));
+
+ /*
+ * Set up the Tx-command (not MAC!) header.
+ * Store the chosen Tx queue and TFD idx within the sequence field;
+ * after Tx, uCode's Tx response will return this value so driver can
+ * locate the frame within the tx queue and do post-tx processing.
+ */
+ out_cmd->hdr.cmd = C_TX;
+ out_cmd->hdr.sequence =
+ cpu_to_le16((u16)
+ (QUEUE_TO_SEQ(txq_id) | IDX_TO_SEQ(q->write_ptr)));
+
+ /* Copy MAC header from skb into command buffer */
+ memcpy(tx_cmd->hdr, hdr, hdr_len);
+
+ /* Total # bytes to be transmitted */
+ len = (u16) skb->len;
+ tx_cmd->len = cpu_to_le16(len);
+
+ if (info->control.hw_key)
+ il4965_tx_cmd_build_hwcrypto(il, info, tx_cmd, skb, sta_id);
+
+ /* TODO need this for burst mode later on */
+ il4965_tx_cmd_build_basic(il, skb, tx_cmd, info, hdr, sta_id);
+ il_dbg_log_tx_data_frame(il, len, hdr);
+
+ il4965_tx_cmd_build_rate(il, tx_cmd, info, fc);
+
+ il_update_stats(il, true, fc, len);
+ /*
+ * Use the first empty entry in this queue's command buffer array
+ * to contain the Tx command and MAC header concatenated together
+ * (payload data will be in another buffer).
+ * Size of this varies, due to varying MAC header length.
+ * If end is not dword aligned, we'll have 2 extra bytes at the end
+ * of the MAC header (device reads on dword boundaries).
+ * We'll tell device about this padding later.
+ */
+ len = sizeof(struct il_tx_cmd) + sizeof(struct il_cmd_header) + hdr_len;
+ firstlen = (len + 3) & ~3;
+
+ /* Tell NIC about any 2-byte padding after MAC header */
+ if (firstlen != len)
+ tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
+
+ /* Physical address of this Tx command's header (not MAC header!),
+ * within command buffer array. */
+ txcmd_phys =
+ pci_map_single(il->pci_dev, &out_cmd->hdr, firstlen,
+ PCI_DMA_BIDIRECTIONAL);
+ dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
+ dma_unmap_len_set(out_meta, len, firstlen);
+ /* Add buffer containing Tx command and MAC(!) header to TFD's
+ * first entry */
+ il->cfg->ops->lib->txq_attach_buf_to_tfd(il, txq, txcmd_phys, firstlen,
+ 1, 0);
+
+ if (!ieee80211_has_morefrags(hdr->frame_control)) {
+ txq->need_update = 1;
+ } else {
+ wait_write_ptr = 1;
+ txq->need_update = 0;
+ }
+
+ /* Set up TFD's 2nd entry to point directly to remainder of skb,
+ * if any (802.11 null frames have no payload). */
+ secondlen = skb->len - hdr_len;
+ if (secondlen > 0) {
+ phys_addr =
+ pci_map_single(il->pci_dev, skb->data + hdr_len, secondlen,
+ PCI_DMA_TODEVICE);
+ il->cfg->ops->lib->txq_attach_buf_to_tfd(il, txq, phys_addr,
+ secondlen, 0, 0);
+ }
+
+ scratch_phys =
+ txcmd_phys + sizeof(struct il_cmd_header) +
+ offsetof(struct il_tx_cmd, scratch);
+
+ /* take back ownership of DMA buffer to enable update */
+ pci_dma_sync_single_for_cpu(il->pci_dev, txcmd_phys, firstlen,
+ PCI_DMA_BIDIRECTIONAL);
+ tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
+ tx_cmd->dram_msb_ptr = il_get_dma_hi_addr(scratch_phys);
+
+ D_TX("sequence nr = 0X%x\n", le16_to_cpu(out_cmd->hdr.sequence));
+ D_TX("tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
+ il_print_hex_dump(il, IL_DL_TX, (u8 *) tx_cmd, sizeof(*tx_cmd));
+ il_print_hex_dump(il, IL_DL_TX, (u8 *) tx_cmd->hdr, hdr_len);
+
+ /* Set up entry for this TFD in Tx byte-count array */
+ if (info->flags & IEEE80211_TX_CTL_AMPDU)
+ il->cfg->ops->lib->txq_update_byte_cnt_tbl(il, txq,
+ le16_to_cpu(tx_cmd->
+ len));
+
+ pci_dma_sync_single_for_device(il->pci_dev, txcmd_phys, firstlen,
+ PCI_DMA_BIDIRECTIONAL);
+
+ /* Tell device the write idx *just past* this latest filled TFD */
+ q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd);
+ il_txq_update_write_ptr(il, txq);
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ /*
+ * At this point the frame is "transmitted" successfully
+ * and we will get a TX status notification eventually,
+ * regardless of the value of ret. "ret" only indicates
+ * whether or not we should update the write pointer.
+ */
+
+ /*
+ * Avoid atomic ops if it isn't an associated client.
+ * Also, if this is a packet for aggregation, don't
+ * increase the counter because the ucode will stop
+ * aggregation queues when their respective station
+ * goes to sleep.
+ */
+ if (sta_priv && sta_priv->client && !is_agg)
+ atomic_inc(&sta_priv->pending_frames);
+
+ if (il_queue_space(q) < q->high_mark && il->mac80211_registered) {
+ if (wait_write_ptr) {
+ spin_lock_irqsave(&il->lock, flags);
+ txq->need_update = 1;
+ il_txq_update_write_ptr(il, txq);
+ spin_unlock_irqrestore(&il->lock, flags);
+ } else {
+ il_stop_queue(il, txq);
+ }
+ }
+
+ return 0;
+
+drop_unlock:
+ spin_unlock_irqrestore(&il->lock, flags);
+ return -1;
+}
+
+static inline int
+il4965_alloc_dma_ptr(struct il_priv *il, struct il_dma_ptr *ptr, size_t size)
+{
+ ptr->addr =
+ dma_alloc_coherent(&il->pci_dev->dev, size, &ptr->dma, GFP_KERNEL);
+ if (!ptr->addr)
+ return -ENOMEM;
+ ptr->size = size;
+ return 0;
+}
+
+static inline void
+il4965_free_dma_ptr(struct il_priv *il, struct il_dma_ptr *ptr)
+{
+ if (unlikely(!ptr->addr))
+ return;
+
+ dma_free_coherent(&il->pci_dev->dev, ptr->size, ptr->addr, ptr->dma);
+ memset(ptr, 0, sizeof(*ptr));
+}
+
+/**
+ * il4965_hw_txq_ctx_free - Free TXQ Context
+ *
+ * Destroy all TX DMA queues and structures
+ */
+void
+il4965_hw_txq_ctx_free(struct il_priv *il)
+{
+ int txq_id;
+
+ /* Tx queues */
+ if (il->txq) {
+ for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
+ if (txq_id == il->cmd_queue)
+ il_cmd_queue_free(il);
+ else
+ il_tx_queue_free(il, txq_id);
+ }
+ il4965_free_dma_ptr(il, &il->kw);
+
+ il4965_free_dma_ptr(il, &il->scd_bc_tbls);
+
+ /* free tx queue structure */
+ il_txq_mem(il);
+}
+
+/**
+ * il4965_txq_ctx_alloc - allocate TX queue context
+ * Allocate all Tx DMA structures and initialize them
+ *
+ * @param il
+ * @return error code
+ */
+int
+il4965_txq_ctx_alloc(struct il_priv *il)
+{
+ int ret;
+ int txq_id, slots_num;
+ unsigned long flags;
+
+ /* Free all tx/cmd queues and keep-warm buffer */
+ il4965_hw_txq_ctx_free(il);
+
+ ret =
+ il4965_alloc_dma_ptr(il, &il->scd_bc_tbls,
+ il->hw_params.scd_bc_tbls_size);
+ if (ret) {
+ IL_ERR("Scheduler BC Table allocation failed\n");
+ goto error_bc_tbls;
+ }
+ /* Alloc keep-warm buffer */
+ ret = il4965_alloc_dma_ptr(il, &il->kw, IL_KW_SIZE);
+ if (ret) {
+ IL_ERR("Keep Warm allocation failed\n");
+ goto error_kw;
+ }
+
+ /* allocate tx queue structure */
+ ret = il_alloc_txq_mem(il);
+ if (ret)
+ goto error;
+
+ spin_lock_irqsave(&il->lock, flags);
+
+ /* Turn off all Tx DMA fifos */
+ il4965_txq_set_sched(il, 0);
+
+ /* Tell NIC where to find the "keep warm" buffer */
+ il_wr(il, FH49_KW_MEM_ADDR_REG, il->kw.dma >> 4);
+
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ /* Alloc and init all Tx queues, including the command queue (#4/#9) */
+ for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) {
+ slots_num =
+ (txq_id ==
+ il->cmd_queue) ? TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
+ ret = il_tx_queue_init(il, &il->txq[txq_id], slots_num, txq_id);
+ if (ret) {
+ IL_ERR("Tx %d queue init failed\n", txq_id);
+ goto error;
+ }
+ }
+
+ return ret;
+
+error:
+ il4965_hw_txq_ctx_free(il);
+ il4965_free_dma_ptr(il, &il->kw);
+error_kw:
+ il4965_free_dma_ptr(il, &il->scd_bc_tbls);
+error_bc_tbls:
+ return ret;
+}
+
+void
+il4965_txq_ctx_reset(struct il_priv *il)
+{
+ int txq_id, slots_num;
+ unsigned long flags;
+
+ spin_lock_irqsave(&il->lock, flags);
+
+ /* Turn off all Tx DMA fifos */
+ il4965_txq_set_sched(il, 0);
+
+ /* Tell NIC where to find the "keep warm" buffer */
+ il_wr(il, FH49_KW_MEM_ADDR_REG, il->kw.dma >> 4);
+
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ /* Alloc and init all Tx queues, including the command queue (#4) */
+ for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) {
+ slots_num =
+ txq_id == il->cmd_queue ? TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
+ il_tx_queue_reset(il, &il->txq[txq_id], slots_num, txq_id);
+ }
+}
+
+/**
+ * il4965_txq_ctx_stop - Stop all Tx DMA channels
+ */
+void
+il4965_txq_ctx_stop(struct il_priv *il)
+{
+ int ch, txq_id;
+ unsigned long flags;
+
+ /* Turn off all Tx DMA fifos */
+ spin_lock_irqsave(&il->lock, flags);
+
+ il4965_txq_set_sched(il, 0);
+
+ /* Stop each Tx DMA channel, and wait for it to be idle */
+ for (ch = 0; ch < il->hw_params.dma_chnl_num; ch++) {
+ il_wr(il, FH49_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
+ if (il_poll_bit
+ (il, FH49_TSSR_TX_STATUS_REG,
+ FH49_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), 1000))
+ IL_ERR("Failing on timeout while stopping"
+ " DMA channel %d [0x%08x]", ch,
+ il_rd(il, FH49_TSSR_TX_STATUS_REG));
+ }
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ if (!il->txq)
+ return;
+
+ /* Unmap DMA from host system and free skb's */
+ for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
+ if (txq_id == il->cmd_queue)
+ il_cmd_queue_unmap(il);
+ else
+ il_tx_queue_unmap(il, txq_id);
+}
+
+/*
+ * Find first available (lowest unused) Tx Queue, mark it "active".
+ * Called only when finding queue for aggregation.
+ * Should never return anything < 7, because they should already
+ * be in use as EDCA AC (0-3), Command (4), reserved (5, 6)
+ */
+static int
+il4965_txq_ctx_activate_free(struct il_priv *il)
+{
+ int txq_id;
+
+ for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
+ if (!test_and_set_bit(txq_id, &il->txq_ctx_active_msk))
+ return txq_id;
+ return -1;
+}
+
+/**
+ * il4965_tx_queue_stop_scheduler - Stop queue, but keep configuration
+ */
+static void
+il4965_tx_queue_stop_scheduler(struct il_priv *il, u16 txq_id)
+{
+ /* Simply stop the queue, but don't change any configuration;
+ * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
+ il_wr_prph(il, IL49_SCD_QUEUE_STATUS_BITS(txq_id),
+ (0 << IL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
+ (1 << IL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
+}
+
+/**
+ * il4965_tx_queue_set_q2ratid - Map unique receiver/tid combination to a queue
+ */
+static int
+il4965_tx_queue_set_q2ratid(struct il_priv *il, u16 ra_tid, u16 txq_id)
+{
+ u32 tbl_dw_addr;
+ u32 tbl_dw;
+ u16 scd_q2ratid;
+
+ scd_q2ratid = ra_tid & IL_SCD_QUEUE_RA_TID_MAP_RATID_MSK;
+
+ tbl_dw_addr =
+ il->scd_base_addr + IL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id);
+
+ tbl_dw = il_read_targ_mem(il, tbl_dw_addr);
+
+ if (txq_id & 0x1)
+ tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
+ else
+ tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
+
+ il_write_targ_mem(il, tbl_dw_addr, tbl_dw);
+
+ return 0;
+}
+
+/**
+ * il4965_tx_queue_agg_enable - Set up & enable aggregation for selected queue
+ *
+ * NOTE: txq_id must be greater than IL49_FIRST_AMPDU_QUEUE,
+ * i.e. it must be one of the higher queues used for aggregation
+ */
+static int
+il4965_txq_agg_enable(struct il_priv *il, int txq_id, int tx_fifo, int sta_id,
+ int tid, u16 ssn_idx)
+{
+ unsigned long flags;
+ u16 ra_tid;
+ int ret;
+
+ if ((IL49_FIRST_AMPDU_QUEUE > txq_id) ||
+ (IL49_FIRST_AMPDU_QUEUE +
+ il->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
+ IL_WARN("queue number out of range: %d, must be %d to %d\n",
+ txq_id, IL49_FIRST_AMPDU_QUEUE,
+ IL49_FIRST_AMPDU_QUEUE +
+ il->cfg->base_params->num_of_ampdu_queues - 1);
+ return -EINVAL;
+ }
+
+ ra_tid = BUILD_RAxTID(sta_id, tid);
+
+ /* Modify device's station table to Tx this TID */
+ ret = il4965_sta_tx_modify_enable_tid(il, sta_id, tid);
+ if (ret)
+ return ret;
+
+ spin_lock_irqsave(&il->lock, flags);
+
+ /* Stop this Tx queue before configuring it */
+ il4965_tx_queue_stop_scheduler(il, txq_id);
+
+ /* Map receiver-address / traffic-ID to this queue */
+ il4965_tx_queue_set_q2ratid(il, ra_tid, txq_id);
+
+ /* Set this queue as a chain-building queue */
+ il_set_bits_prph(il, IL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
+
+ /* Place first TFD at idx corresponding to start sequence number.
+ * Assumes that ssn_idx is valid (!= 0xFFF) */
+ il->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
+ il->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
+ il4965_set_wr_ptrs(il, txq_id, ssn_idx);
+
+ /* Set up Tx win size and frame limit for this queue */
+ il_write_targ_mem(il,
+ il->scd_base_addr +
+ IL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id),
+ (SCD_WIN_SIZE << IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS)
+ & IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
+
+ il_write_targ_mem(il,
+ il->scd_base_addr +
+ IL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
+ (SCD_FRAME_LIMIT <<
+ IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
+ IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
+
+ il_set_bits_prph(il, IL49_SCD_INTERRUPT_MASK, (1 << txq_id));
+
+ /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
+ il4965_tx_queue_set_status(il, &il->txq[txq_id], tx_fifo, 1);
+
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ return 0;
+}
+
+int
+il4965_tx_agg_start(struct il_priv *il, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 tid, u16 * ssn)
+{
+ int sta_id;
+ int tx_fifo;
+ int txq_id;
+ int ret;
+ unsigned long flags;
+ struct il_tid_data *tid_data;
+
+ tx_fifo = il4965_get_fifo_from_tid(il_rxon_ctx_from_vif(vif), tid);
+ if (unlikely(tx_fifo < 0))
+ return tx_fifo;
+
+ D_HT("%s on ra = %pM tid = %d\n", __func__, sta->addr, tid);
+
+ sta_id = il_sta_id(sta);
+ if (sta_id == IL_INVALID_STATION) {
+ IL_ERR("Start AGG on invalid station\n");
+ return -ENXIO;
+ }
+ if (unlikely(tid >= MAX_TID_COUNT))
+ return -EINVAL;
+
+ if (il->stations[sta_id].tid[tid].agg.state != IL_AGG_OFF) {
+ IL_ERR("Start AGG when state is not IL_AGG_OFF !\n");
+ return -ENXIO;
+ }
+
+ txq_id = il4965_txq_ctx_activate_free(il);
+ if (txq_id == -1) {
+ IL_ERR("No free aggregation queue available\n");
+ return -ENXIO;
+ }
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+ tid_data = &il->stations[sta_id].tid[tid];
+ *ssn = SEQ_TO_SN(tid_data->seq_number);
+ tid_data->agg.txq_id = txq_id;
+ il_set_swq_id(&il->txq[txq_id], il4965_get_ac_from_tid(tid), txq_id);
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+ ret = il4965_txq_agg_enable(il, txq_id, tx_fifo, sta_id, tid, *ssn);
+ if (ret)
+ return ret;
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+ tid_data = &il->stations[sta_id].tid[tid];
+ if (tid_data->tfds_in_queue == 0) {
+ D_HT("HW queue is empty\n");
+ tid_data->agg.state = IL_AGG_ON;
+ ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ } else {
+ D_HT("HW queue is NOT empty: %d packets in HW queue\n",
+ tid_data->tfds_in_queue);
+ tid_data->agg.state = IL_EMPTYING_HW_QUEUE_ADDBA;
+ }
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+ return ret;
+}
+
+/**
+ * txq_id must be greater than IL49_FIRST_AMPDU_QUEUE
+ * il->lock must be held by the caller
+ */
+static int
+il4965_txq_agg_disable(struct il_priv *il, u16 txq_id, u16 ssn_idx, u8 tx_fifo)
+{
+ if ((IL49_FIRST_AMPDU_QUEUE > txq_id) ||
+ (IL49_FIRST_AMPDU_QUEUE +
+ il->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
+ IL_WARN("queue number out of range: %d, must be %d to %d\n",
+ txq_id, IL49_FIRST_AMPDU_QUEUE,
+ IL49_FIRST_AMPDU_QUEUE +
+ il->cfg->base_params->num_of_ampdu_queues - 1);
+ return -EINVAL;
+ }
+
+ il4965_tx_queue_stop_scheduler(il, txq_id);
+
+ il_clear_bits_prph(il, IL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
+
+ il->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
+ il->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
+ /* supposes that ssn_idx is valid (!= 0xFFF) */
+ il4965_set_wr_ptrs(il, txq_id, ssn_idx);
+
+ il_clear_bits_prph(il, IL49_SCD_INTERRUPT_MASK, (1 << txq_id));
+ il_txq_ctx_deactivate(il, txq_id);
+ il4965_tx_queue_set_status(il, &il->txq[txq_id], tx_fifo, 0);
+
+ return 0;
+}
+
+int
+il4965_tx_agg_stop(struct il_priv *il, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 tid)
+{
+ int tx_fifo_id, txq_id, sta_id, ssn;
+ struct il_tid_data *tid_data;
+ int write_ptr, read_ptr;
+ unsigned long flags;
+
+ tx_fifo_id = il4965_get_fifo_from_tid(il_rxon_ctx_from_vif(vif), tid);
+ if (unlikely(tx_fifo_id < 0))
+ return tx_fifo_id;
+
+ sta_id = il_sta_id(sta);
+
+ if (sta_id == IL_INVALID_STATION) {
+ IL_ERR("Invalid station for AGG tid %d\n", tid);
+ return -ENXIO;
+ }
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+
+ tid_data = &il->stations[sta_id].tid[tid];
+ ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
+ txq_id = tid_data->agg.txq_id;
+
+ switch (il->stations[sta_id].tid[tid].agg.state) {
+ case IL_EMPTYING_HW_QUEUE_ADDBA:
+ /*
+ * This can happen if the peer stops aggregation
+ * again before we've had a chance to drain the
+ * queue we selected previously, i.e. before the
+ * session was really started completely.
+ */
+ D_HT("AGG stop before setup done\n");
+ goto turn_off;
+ case IL_AGG_ON:
+ break;
+ default:
+ IL_WARN("Stopping AGG while state not ON or starting\n");
+ }
+
+ write_ptr = il->txq[txq_id].q.write_ptr;
+ read_ptr = il->txq[txq_id].q.read_ptr;
+
+ /* The queue is not empty */
+ if (write_ptr != read_ptr) {
+ D_HT("Stopping a non empty AGG HW QUEUE\n");
+ il->stations[sta_id].tid[tid].agg.state =
+ IL_EMPTYING_HW_QUEUE_DELBA;
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+ return 0;
+ }
+
+ D_HT("HW queue is empty\n");
+turn_off:
+ il->stations[sta_id].tid[tid].agg.state = IL_AGG_OFF;
+
+ /* do not restore/save irqs */
+ spin_unlock(&il->sta_lock);
+ spin_lock(&il->lock);
+
+ /*
+ * the only reason this call can fail is queue number out of range,
+ * which can happen if uCode is reloaded and all the station
+ * information are lost. if it is outside the range, there is no need
+ * to deactivate the uCode queue, just return "success" to allow
+ * mac80211 to clean up it own data.
+ */
+ il4965_txq_agg_disable(il, txq_id, ssn, tx_fifo_id);
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+
+ return 0;
+}
+
+int
+il4965_txq_check_empty(struct il_priv *il, int sta_id, u8 tid, int txq_id)
+{
+ struct il_queue *q = &il->txq[txq_id].q;
+ u8 *addr = il->stations[sta_id].sta.sta.addr;
+ struct il_tid_data *tid_data = &il->stations[sta_id].tid[tid];
+ struct il_rxon_context *ctx;
+
+ ctx = &il->ctx;
+
+ lockdep_assert_held(&il->sta_lock);
+
+ switch (il->stations[sta_id].tid[tid].agg.state) {
+ case IL_EMPTYING_HW_QUEUE_DELBA:
+ /* We are reclaiming the last packet of the */
+ /* aggregated HW queue */
+ if (txq_id == tid_data->agg.txq_id &&
+ q->read_ptr == q->write_ptr) {
+ u16 ssn = SEQ_TO_SN(tid_data->seq_number);
+ int tx_fifo = il4965_get_fifo_from_tid(ctx, tid);
+ D_HT("HW queue empty: continue DELBA flow\n");
+ il4965_txq_agg_disable(il, txq_id, ssn, tx_fifo);
+ tid_data->agg.state = IL_AGG_OFF;
+ ieee80211_stop_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
+ }
+ break;
+ case IL_EMPTYING_HW_QUEUE_ADDBA:
+ /* We are reclaiming the last packet of the queue */
+ if (tid_data->tfds_in_queue == 0) {
+ D_HT("HW queue empty: continue ADDBA flow\n");
+ tid_data->agg.state = IL_AGG_ON;
+ ieee80211_start_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
+ }
+ break;
+ }
+
+ return 0;
+}
+
+static void
+il4965_non_agg_tx_status(struct il_priv *il, struct il_rxon_context *ctx,
+ const u8 *addr1)
+{
+ struct ieee80211_sta *sta;
+ struct il_station_priv *sta_priv;
+
+ rcu_read_lock();
+ sta = ieee80211_find_sta(ctx->vif, addr1);
+ if (sta) {
+ sta_priv = (void *)sta->drv_priv;
+ /* avoid atomic ops if this isn't a client */
+ if (sta_priv->client &&
+ atomic_dec_return(&sta_priv->pending_frames) == 0)
+ ieee80211_sta_block_awake(il->hw, sta, false);
+ }
+ rcu_read_unlock();
+}
+
+static void
+il4965_tx_status(struct il_priv *il, struct il_tx_info *tx_info, bool is_agg)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data;
+
+ if (!is_agg)
+ il4965_non_agg_tx_status(il, tx_info->ctx, hdr->addr1);
+
+ ieee80211_tx_status_irqsafe(il->hw, tx_info->skb);
+}
+
+int
+il4965_tx_queue_reclaim(struct il_priv *il, int txq_id, int idx)
+{
+ struct il_tx_queue *txq = &il->txq[txq_id];
+ struct il_queue *q = &txq->q;
+ struct il_tx_info *tx_info;
+ int nfreed = 0;
+ struct ieee80211_hdr *hdr;
+
+ if (idx >= q->n_bd || il_queue_used(q, idx) == 0) {
+ IL_ERR("Read idx for DMA queue txq id (%d), idx %d, "
+ "is out of range [0-%d] %d %d.\n", txq_id, idx, q->n_bd,
+ q->write_ptr, q->read_ptr);
+ return 0;
+ }
+
+ for (idx = il_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
+ q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd)) {
+
+ tx_info = &txq->txb[txq->q.read_ptr];
+
+ if (WARN_ON_ONCE(tx_info->skb == NULL))
+ continue;
+
+ hdr = (struct ieee80211_hdr *)tx_info->skb->data;
+ if (ieee80211_is_data_qos(hdr->frame_control))
+ nfreed++;
+
+ il4965_tx_status(il, tx_info,
+ txq_id >= IL4965_FIRST_AMPDU_QUEUE);
+ tx_info->skb = NULL;
+
+ il->cfg->ops->lib->txq_free_tfd(il, txq);
+ }
+ return nfreed;
+}
+
+/**
+ * il4965_tx_status_reply_compressed_ba - Update tx status from block-ack
+ *
+ * Go through block-ack's bitmap of ACK'd frames, update driver's record of
+ * ACK vs. not. This gets sent to mac80211, then to rate scaling algo.
+ */
+static int
+il4965_tx_status_reply_compressed_ba(struct il_priv *il, struct il_ht_agg *agg,
+ struct il_compressed_ba_resp *ba_resp)
+{
+ int i, sh, ack;
+ u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
+ u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
+ int successes = 0;
+ struct ieee80211_tx_info *info;
+ u64 bitmap, sent_bitmap;
+
+ if (unlikely(!agg->wait_for_ba)) {
+ if (unlikely(ba_resp->bitmap))
+ IL_ERR("Received BA when not expected\n");
+ return -EINVAL;
+ }
+
+ /* Mark that the expected block-ack response arrived */
+ agg->wait_for_ba = 0;
+ D_TX_REPLY("BA %d %d\n", agg->start_idx, ba_resp->seq_ctl);
+
+ /* Calculate shift to align block-ack bits with our Tx win bits */
+ sh = agg->start_idx - SEQ_TO_IDX(seq_ctl >> 4);
+ if (sh < 0) /* tbw something is wrong with indices */
+ sh += 0x100;
+
+ if (agg->frame_count > (64 - sh)) {
+ D_TX_REPLY("more frames than bitmap size");
+ return -1;
+ }
+
+ /* don't use 64-bit values for now */
+ bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
+
+ /* check for success or failure according to the
+ * transmitted bitmap and block-ack bitmap */
+ sent_bitmap = bitmap & agg->bitmap;
+
+ /* For each frame attempted in aggregation,
+ * update driver's record of tx frame's status. */
+ i = 0;
+ while (sent_bitmap) {
+ ack = sent_bitmap & 1ULL;
+ successes += ack;
+ D_TX_REPLY("%s ON i=%d idx=%d raw=%d\n", ack ? "ACK" : "NACK",
+ i, (agg->start_idx + i) & 0xff, agg->start_idx + i);
+ sent_bitmap >>= 1;
+ ++i;
+ }
+
+ D_TX_REPLY("Bitmap %llx\n", (unsigned long long)bitmap);
+
+ info = IEEE80211_SKB_CB(il->txq[scd_flow].txb[agg->start_idx].skb);
+ memset(&info->status, 0, sizeof(info->status));
+ info->flags |= IEEE80211_TX_STAT_ACK;
+ info->flags |= IEEE80211_TX_STAT_AMPDU;
+ info->status.ampdu_ack_len = successes;
+ info->status.ampdu_len = agg->frame_count;
+ il4965_hwrate_to_tx_control(il, agg->rate_n_flags, info);
+
+ return 0;
+}
+
+/**
+ * translate ucode response to mac80211 tx status control values
+ */
+void
+il4965_hwrate_to_tx_control(struct il_priv *il, u32 rate_n_flags,
+ struct ieee80211_tx_info *info)
+{
+ struct ieee80211_tx_rate *r = &info->control.rates[0];
+
+ info->antenna_sel_tx =
+ ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
+ if (rate_n_flags & RATE_MCS_HT_MSK)
+ r->flags |= IEEE80211_TX_RC_MCS;
+ if (rate_n_flags & RATE_MCS_GF_MSK)
+ r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
+ if (rate_n_flags & RATE_MCS_HT40_MSK)
+ r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+ if (rate_n_flags & RATE_MCS_DUP_MSK)
+ r->flags |= IEEE80211_TX_RC_DUP_DATA;
+ if (rate_n_flags & RATE_MCS_SGI_MSK)
+ r->flags |= IEEE80211_TX_RC_SHORT_GI;
+ r->idx = il4965_hwrate_to_mac80211_idx(rate_n_flags, info->band);
+}
+
+/**
+ * il4965_hdl_compressed_ba - Handler for N_COMPRESSED_BA
+ *
+ * Handles block-acknowledge notification from device, which reports success
+ * of frames sent via aggregation.
+ */
+void
+il4965_hdl_compressed_ba(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ struct il_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
+ struct il_tx_queue *txq = NULL;
+ struct il_ht_agg *agg;
+ int idx;
+ int sta_id;
+ int tid;
+ unsigned long flags;
+
+ /* "flow" corresponds to Tx queue */
+ u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
+
+ /* "ssn" is start of block-ack Tx win, corresponds to idx
+ * (in Tx queue's circular buffer) of first TFD/frame in win */
+ u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
+
+ if (scd_flow >= il->hw_params.max_txq_num) {
+ IL_ERR("BUG_ON scd_flow is bigger than number of queues\n");
+ return;
+ }
+
+ txq = &il->txq[scd_flow];
+ sta_id = ba_resp->sta_id;
+ tid = ba_resp->tid;
+ agg = &il->stations[sta_id].tid[tid].agg;
+ if (unlikely(agg->txq_id != scd_flow)) {
+ /*
+ * FIXME: this is a uCode bug which need to be addressed,
+ * log the information and return for now!
+ * since it is possible happen very often and in order
+ * not to fill the syslog, don't enable the logging by default
+ */
+ D_TX_REPLY("BA scd_flow %d does not match txq_id %d\n",
+ scd_flow, agg->txq_id);
+ return;
+ }
+
+ /* Find idx just before block-ack win */
+ idx = il_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+
+ D_TX_REPLY("N_COMPRESSED_BA [%d] Received from %pM, " "sta_id = %d\n",
+ agg->wait_for_ba, (u8 *) &ba_resp->sta_addr_lo32,
+ ba_resp->sta_id);
+ D_TX_REPLY("TID = %d, SeqCtl = %d, bitmap = 0x%llx," "scd_flow = "
+ "%d, scd_ssn = %d\n", ba_resp->tid, ba_resp->seq_ctl,
+ (unsigned long long)le64_to_cpu(ba_resp->bitmap),
+ ba_resp->scd_flow, ba_resp->scd_ssn);
+ D_TX_REPLY("DAT start_idx = %d, bitmap = 0x%llx\n", agg->start_idx,
+ (unsigned long long)agg->bitmap);
+
+ /* Update driver's record of ACK vs. not for each frame in win */
+ il4965_tx_status_reply_compressed_ba(il, agg, ba_resp);
+
+ /* Release all TFDs before the SSN, i.e. all TFDs in front of
+ * block-ack win (we assume that they've been successfully
+ * transmitted ... if not, it's too late anyway). */
+ if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
+ /* calculate mac80211 ampdu sw queue to wake */
+ int freed = il4965_tx_queue_reclaim(il, scd_flow, idx);
+ il4965_free_tfds_in_queue(il, sta_id, tid, freed);
+
+ if (il_queue_space(&txq->q) > txq->q.low_mark &&
+ il->mac80211_registered &&
+ agg->state != IL_EMPTYING_HW_QUEUE_DELBA)
+ il_wake_queue(il, txq);
+
+ il4965_txq_check_empty(il, sta_id, tid, scd_flow);
+ }
+
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+}
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+const char *
+il4965_get_tx_fail_reason(u32 status)
+{
+#define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
+#define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
+
+ switch (status & TX_STATUS_MSK) {
+ case TX_STATUS_SUCCESS:
+ return "SUCCESS";
+ TX_STATUS_POSTPONE(DELAY);
+ TX_STATUS_POSTPONE(FEW_BYTES);
+ TX_STATUS_POSTPONE(QUIET_PERIOD);
+ TX_STATUS_POSTPONE(CALC_TTAK);
+ TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY);
+ TX_STATUS_FAIL(SHORT_LIMIT);
+ TX_STATUS_FAIL(LONG_LIMIT);
+ TX_STATUS_FAIL(FIFO_UNDERRUN);
+ TX_STATUS_FAIL(DRAIN_FLOW);
+ TX_STATUS_FAIL(RFKILL_FLUSH);
+ TX_STATUS_FAIL(LIFE_EXPIRE);
+ TX_STATUS_FAIL(DEST_PS);
+ TX_STATUS_FAIL(HOST_ABORTED);
+ TX_STATUS_FAIL(BT_RETRY);
+ TX_STATUS_FAIL(STA_INVALID);
+ TX_STATUS_FAIL(FRAG_DROPPED);
+ TX_STATUS_FAIL(TID_DISABLE);
+ TX_STATUS_FAIL(FIFO_FLUSHED);
+ TX_STATUS_FAIL(INSUFFICIENT_CF_POLL);
+ TX_STATUS_FAIL(PASSIVE_NO_RX);
+ TX_STATUS_FAIL(NO_BEACON_ON_RADAR);
+ }
+
+ return "UNKNOWN";
+
+#undef TX_STATUS_FAIL
+#undef TX_STATUS_POSTPONE
+}
+#endif /* CONFIG_IWLEGACY_DEBUG */
+
+static struct il_link_quality_cmd *
+il4965_sta_alloc_lq(struct il_priv *il, u8 sta_id)
+{
+ int i, r;
+ struct il_link_quality_cmd *link_cmd;
+ u32 rate_flags = 0;
+ __le32 rate_n_flags;
+
+ link_cmd = kzalloc(sizeof(struct il_link_quality_cmd), GFP_KERNEL);
+ if (!link_cmd) {
+ IL_ERR("Unable to allocate memory for LQ cmd.\n");
+ return NULL;
+ }
+ /* Set up the rate scaling to start at selected rate, fall back
+ * all the way down to 1M in IEEE order, and then spin on 1M */
+ if (il->band == IEEE80211_BAND_5GHZ)
+ r = RATE_6M_IDX;
+ else
+ r = RATE_1M_IDX;
+
+ if (r >= IL_FIRST_CCK_RATE && r <= IL_LAST_CCK_RATE)
+ rate_flags |= RATE_MCS_CCK_MSK;
+
+ rate_flags |=
+ il4965_first_antenna(il->hw_params.
+ valid_tx_ant) << RATE_MCS_ANT_POS;
+ rate_n_flags = il4965_hw_set_rate_n_flags(il_rates[r].plcp, rate_flags);
+ for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
+ link_cmd->rs_table[i].rate_n_flags = rate_n_flags;
+
+ link_cmd->general_params.single_stream_ant_msk =
+ il4965_first_antenna(il->hw_params.valid_tx_ant);
+
+ link_cmd->general_params.dual_stream_ant_msk =
+ il->hw_params.valid_tx_ant & ~il4965_first_antenna(il->hw_params.
+ valid_tx_ant);
+ if (!link_cmd->general_params.dual_stream_ant_msk) {
+ link_cmd->general_params.dual_stream_ant_msk = ANT_AB;
+ } else if (il4965_num_of_ant(il->hw_params.valid_tx_ant) == 2) {
+ link_cmd->general_params.dual_stream_ant_msk =
+ il->hw_params.valid_tx_ant;
+ }
+
+ link_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
+ link_cmd->agg_params.agg_time_limit =
+ cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
+
+ link_cmd->sta_id = sta_id;
+
+ return link_cmd;
+}
+
+/*
+ * il4965_add_bssid_station - Add the special IBSS BSSID station
+ *
+ * Function sleeps.
+ */
+int
+il4965_add_bssid_station(struct il_priv *il, struct il_rxon_context *ctx,
+ const u8 *addr, u8 *sta_id_r)
+{
+ int ret;
+ u8 sta_id;
+ struct il_link_quality_cmd *link_cmd;
+ unsigned long flags;
+
+ if (sta_id_r)
+ *sta_id_r = IL_INVALID_STATION;
+
+ ret = il_add_station_common(il, ctx, addr, 0, NULL, &sta_id);
+ if (ret) {
+ IL_ERR("Unable to add station %pM\n", addr);
+ return ret;
+ }
+
+ if (sta_id_r)
+ *sta_id_r = sta_id;
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+ il->stations[sta_id].used |= IL_STA_LOCAL;
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+ /* Set up default rate scaling table in device's station table */
+ link_cmd = il4965_sta_alloc_lq(il, sta_id);
+ if (!link_cmd) {
+ IL_ERR("Unable to initialize rate scaling for station %pM.\n",
+ addr);
+ return -ENOMEM;
+ }
+
+ ret = il_send_lq_cmd(il, ctx, link_cmd, CMD_SYNC, true);
+ if (ret)
+ IL_ERR("Link quality command failed (%d)\n", ret);
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+ il->stations[sta_id].lq = link_cmd;
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+ return 0;
+}
+
+static int
+il4965_static_wepkey_cmd(struct il_priv *il, struct il_rxon_context *ctx,
+ bool send_if_empty)
+{
+ int i, not_empty = 0;
+ u8 buff[sizeof(struct il_wep_cmd) +
+ sizeof(struct il_wep_key) * WEP_KEYS_MAX];
+ struct il_wep_cmd *wep_cmd = (struct il_wep_cmd *)buff;
+ size_t cmd_size = sizeof(struct il_wep_cmd);
+ struct il_host_cmd cmd = {
+ .id = ctx->wep_key_cmd,
+ .data = wep_cmd,
+ .flags = CMD_SYNC,
+ };
+
+ might_sleep();
+
+ memset(wep_cmd, 0,
+ cmd_size + (sizeof(struct il_wep_key) * WEP_KEYS_MAX));
+
+ for (i = 0; i < WEP_KEYS_MAX; i++) {
+ wep_cmd->key[i].key_idx = i;
+ if (ctx->wep_keys[i].key_size) {
+ wep_cmd->key[i].key_offset = i;
+ not_empty = 1;
+ } else {
+ wep_cmd->key[i].key_offset = WEP_INVALID_OFFSET;
+ }
+
+ wep_cmd->key[i].key_size = ctx->wep_keys[i].key_size;
+ memcpy(&wep_cmd->key[i].key[3], ctx->wep_keys[i].key,
+ ctx->wep_keys[i].key_size);
+ }
+
+ wep_cmd->global_key_type = WEP_KEY_WEP_TYPE;
+ wep_cmd->num_keys = WEP_KEYS_MAX;
+
+ cmd_size += sizeof(struct il_wep_key) * WEP_KEYS_MAX;
+
+ cmd.len = cmd_size;
+
+ if (not_empty || send_if_empty)
+ return il_send_cmd(il, &cmd);
+ else
+ return 0;
+}
+
+int
+il4965_restore_default_wep_keys(struct il_priv *il, struct il_rxon_context *ctx)
+{
+ lockdep_assert_held(&il->mutex);
+
+ return il4965_static_wepkey_cmd(il, ctx, false);
+}
+
+int
+il4965_remove_default_wep_key(struct il_priv *il, struct il_rxon_context *ctx,
+ struct ieee80211_key_conf *keyconf)
+{
+ int ret;
+
+ lockdep_assert_held(&il->mutex);
+
+ D_WEP("Removing default WEP key: idx=%d\n", keyconf->keyidx);
+
+ memset(&ctx->wep_keys[keyconf->keyidx], 0, sizeof(ctx->wep_keys[0]));
+ if (il_is_rfkill(il)) {
+ D_WEP("Not sending C_WEPKEY command due to RFKILL.\n");
+ /* but keys in device are clear anyway so return success */
+ return 0;
+ }
+ ret = il4965_static_wepkey_cmd(il, ctx, 1);
+ D_WEP("Remove default WEP key: idx=%d ret=%d\n", keyconf->keyidx, ret);
+
+ return ret;
+}
+
+int
+il4965_set_default_wep_key(struct il_priv *il, struct il_rxon_context *ctx,
+ struct ieee80211_key_conf *keyconf)
+{
+ int ret;
+
+ lockdep_assert_held(&il->mutex);
+
+ if (keyconf->keylen != WEP_KEY_LEN_128 &&
+ keyconf->keylen != WEP_KEY_LEN_64) {
+ D_WEP("Bad WEP key length %d\n", keyconf->keylen);
+ return -EINVAL;
+ }
+
+ keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
+ keyconf->hw_key_idx = HW_KEY_DEFAULT;
+ il->stations[ctx->ap_sta_id].keyinfo.cipher = keyconf->cipher;
+
+ ctx->wep_keys[keyconf->keyidx].key_size = keyconf->keylen;
+ memcpy(&ctx->wep_keys[keyconf->keyidx].key, &keyconf->key,
+ keyconf->keylen);
+
+ ret = il4965_static_wepkey_cmd(il, ctx, false);
+ D_WEP("Set default WEP key: len=%d idx=%d ret=%d\n", keyconf->keylen,
+ keyconf->keyidx, ret);
+
+ return ret;
+}
+
+static int
+il4965_set_wep_dynamic_key_info(struct il_priv *il, struct il_rxon_context *ctx,
+ struct ieee80211_key_conf *keyconf, u8 sta_id)
+{
+ unsigned long flags;
+ __le16 key_flags = 0;
+ struct il_addsta_cmd sta_cmd;
+
+ lockdep_assert_held(&il->mutex);
+
+ keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
+
+ key_flags |= (STA_KEY_FLG_WEP | STA_KEY_FLG_MAP_KEY_MSK);
+ key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
+ key_flags &= ~STA_KEY_FLG_INVALID;
+
+ if (keyconf->keylen == WEP_KEY_LEN_128)
+ key_flags |= STA_KEY_FLG_KEY_SIZE_MSK;
+
+ if (sta_id == ctx->bcast_sta_id)
+ key_flags |= STA_KEY_MULTICAST_MSK;
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+
+ il->stations[sta_id].keyinfo.cipher = keyconf->cipher;
+ il->stations[sta_id].keyinfo.keylen = keyconf->keylen;
+ il->stations[sta_id].keyinfo.keyidx = keyconf->keyidx;
+
+ memcpy(il->stations[sta_id].keyinfo.key, keyconf->key, keyconf->keylen);
+
+ memcpy(&il->stations[sta_id].sta.key.key[3], keyconf->key,
+ keyconf->keylen);
+
+ if ((il->stations[sta_id].sta.key.
+ key_flags & STA_KEY_FLG_ENCRYPT_MSK) == STA_KEY_FLG_NO_ENC)
+ il->stations[sta_id].sta.key.key_offset =
+ il_get_free_ucode_key_idx(il);
+ /* else, we are overriding an existing key => no need to allocated room
+ * in uCode. */
+
+ WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
+ "no space for a new key");
+
+ il->stations[sta_id].sta.key.key_flags = key_flags;
+ il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
+ il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+
+ memcpy(&sta_cmd, &il->stations[sta_id].sta,
+ sizeof(struct il_addsta_cmd));
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+ return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
+}
+
+static int
+il4965_set_ccmp_dynamic_key_info(struct il_priv *il,
+ struct il_rxon_context *ctx,
+ struct ieee80211_key_conf *keyconf, u8 sta_id)
+{
+ unsigned long flags;
+ __le16 key_flags = 0;
+ struct il_addsta_cmd sta_cmd;
+
+ lockdep_assert_held(&il->mutex);
+
+ key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
+ key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
+ key_flags &= ~STA_KEY_FLG_INVALID;
+
+ if (sta_id == ctx->bcast_sta_id)
+ key_flags |= STA_KEY_MULTICAST_MSK;
+
+ keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+ il->stations[sta_id].keyinfo.cipher = keyconf->cipher;
+ il->stations[sta_id].keyinfo.keylen = keyconf->keylen;
+
+ memcpy(il->stations[sta_id].keyinfo.key, keyconf->key, keyconf->keylen);
+
+ memcpy(il->stations[sta_id].sta.key.key, keyconf->key, keyconf->keylen);
+
+ if ((il->stations[sta_id].sta.key.
+ key_flags & STA_KEY_FLG_ENCRYPT_MSK) == STA_KEY_FLG_NO_ENC)
+ il->stations[sta_id].sta.key.key_offset =
+ il_get_free_ucode_key_idx(il);
+ /* else, we are overriding an existing key => no need to allocated room
+ * in uCode. */
+
+ WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
+ "no space for a new key");
+
+ il->stations[sta_id].sta.key.key_flags = key_flags;
+ il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
+ il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+
+ memcpy(&sta_cmd, &il->stations[sta_id].sta,
+ sizeof(struct il_addsta_cmd));
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+ return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
+}
+
+static int
+il4965_set_tkip_dynamic_key_info(struct il_priv *il,
+ struct il_rxon_context *ctx,
+ struct ieee80211_key_conf *keyconf, u8 sta_id)
+{
+ unsigned long flags;
+ int ret = 0;
+ __le16 key_flags = 0;
+
+ key_flags |= (STA_KEY_FLG_TKIP | STA_KEY_FLG_MAP_KEY_MSK);
+ key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
+ key_flags &= ~STA_KEY_FLG_INVALID;
+
+ if (sta_id == ctx->bcast_sta_id)
+ key_flags |= STA_KEY_MULTICAST_MSK;
+
+ keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+ keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+
+ il->stations[sta_id].keyinfo.cipher = keyconf->cipher;
+ il->stations[sta_id].keyinfo.keylen = 16;
+
+ if ((il->stations[sta_id].sta.key.
+ key_flags & STA_KEY_FLG_ENCRYPT_MSK) == STA_KEY_FLG_NO_ENC)
+ il->stations[sta_id].sta.key.key_offset =
+ il_get_free_ucode_key_idx(il);
+ /* else, we are overriding an existing key => no need to allocated room
+ * in uCode. */
+
+ WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
+ "no space for a new key");
+
+ il->stations[sta_id].sta.key.key_flags = key_flags;
+
+ /* This copy is acutally not needed: we get the key with each TX */
+ memcpy(il->stations[sta_id].keyinfo.key, keyconf->key, 16);
+
+ memcpy(il->stations[sta_id].sta.key.key, keyconf->key, 16);
+
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+ return ret;
+}
+
+void
+il4965_update_tkip_key(struct il_priv *il, struct il_rxon_context *ctx,
+ struct ieee80211_key_conf *keyconf,
+ struct ieee80211_sta *sta, u32 iv32, u16 * phase1key)
+{
+ u8 sta_id;
+ unsigned long flags;
+ int i;
+
+ if (il_scan_cancel(il)) {
+ /* cancel scan failed, just live w/ bad key and rely
+ briefly on SW decryption */
+ return;
+ }
+
+ sta_id = il_sta_id_or_broadcast(il, ctx, sta);
+ if (sta_id == IL_INVALID_STATION)
+ return;
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+
+ il->stations[sta_id].sta.key.tkip_rx_tsc_byte2 = (u8) iv32;
+
+ for (i = 0; i < 5; i++)
+ il->stations[sta_id].sta.key.tkip_rx_ttak[i] =
+ cpu_to_le16(phase1key[i]);
+
+ il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
+ il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+
+ il_send_add_sta(il, &il->stations[sta_id].sta, CMD_ASYNC);
+
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+}
+
+int
+il4965_remove_dynamic_key(struct il_priv *il, struct il_rxon_context *ctx,
+ struct ieee80211_key_conf *keyconf, u8 sta_id)
+{
+ unsigned long flags;
+ u16 key_flags;
+ u8 keyidx;
+ struct il_addsta_cmd sta_cmd;
+
+ lockdep_assert_held(&il->mutex);
+
+ ctx->key_mapping_keys--;
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+ key_flags = le16_to_cpu(il->stations[sta_id].sta.key.key_flags);
+ keyidx = (key_flags >> STA_KEY_FLG_KEYID_POS) & 0x3;
+
+ D_WEP("Remove dynamic key: idx=%d sta=%d\n", keyconf->keyidx, sta_id);
+
+ if (keyconf->keyidx != keyidx) {
+ /* We need to remove a key with idx different that the one
+ * in the uCode. This means that the key we need to remove has
+ * been replaced by another one with different idx.
+ * Don't do anything and return ok
+ */
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+ return 0;
+ }
+
+ if (il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET) {
+ IL_WARN("Removing wrong key %d 0x%x\n", keyconf->keyidx,
+ key_flags);
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+ return 0;
+ }
+
+ if (!test_and_clear_bit
+ (il->stations[sta_id].sta.key.key_offset, &il->ucode_key_table))
+ IL_ERR("idx %d not used in uCode key table.\n",
+ il->stations[sta_id].sta.key.key_offset);
+ memset(&il->stations[sta_id].keyinfo, 0, sizeof(struct il_hw_key));
+ memset(&il->stations[sta_id].sta.key, 0, sizeof(struct il4965_keyinfo));
+ il->stations[sta_id].sta.key.key_flags =
+ STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID;
+ il->stations[sta_id].sta.key.key_offset = WEP_INVALID_OFFSET;
+ il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
+ il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+
+ if (il_is_rfkill(il)) {
+ D_WEP
+ ("Not sending C_ADD_STA command because RFKILL enabled.\n");
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+ return 0;
+ }
+ memcpy(&sta_cmd, &il->stations[sta_id].sta,
+ sizeof(struct il_addsta_cmd));
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+ return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
+}
+
+int
+il4965_set_dynamic_key(struct il_priv *il, struct il_rxon_context *ctx,
+ struct ieee80211_key_conf *keyconf, u8 sta_id)
+{
+ int ret;
+
+ lockdep_assert_held(&il->mutex);
+
+ ctx->key_mapping_keys++;
+ keyconf->hw_key_idx = HW_KEY_DYNAMIC;
+
+ switch (keyconf->cipher) {
+ case WLAN_CIPHER_SUITE_CCMP:
+ ret =
+ il4965_set_ccmp_dynamic_key_info(il, ctx, keyconf, sta_id);
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ ret =
+ il4965_set_tkip_dynamic_key_info(il, ctx, keyconf, sta_id);
+ break;
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ ret = il4965_set_wep_dynamic_key_info(il, ctx, keyconf, sta_id);
+ break;
+ default:
+ IL_ERR("Unknown alg: %s cipher = %x\n", __func__,
+ keyconf->cipher);
+ ret = -EINVAL;
+ }
+
+ D_WEP("Set dynamic key: cipher=%x len=%d idx=%d sta=%d ret=%d\n",
+ keyconf->cipher, keyconf->keylen, keyconf->keyidx, sta_id, ret);
+
+ return ret;
+}
+
+/**
+ * il4965_alloc_bcast_station - add broadcast station into driver's station table.
+ *
+ * This adds the broadcast station into the driver's station table
+ * and marks it driver active, so that it will be restored to the
+ * device at the next best time.
+ */
+int
+il4965_alloc_bcast_station(struct il_priv *il, struct il_rxon_context *ctx)
+{
+ struct il_link_quality_cmd *link_cmd;
+ unsigned long flags;
+ u8 sta_id;
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+ sta_id = il_prep_station(il, ctx, il_bcast_addr, false, NULL);
+ if (sta_id == IL_INVALID_STATION) {
+ IL_ERR("Unable to prepare broadcast station\n");
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+ return -EINVAL;
+ }
+
+ il->stations[sta_id].used |= IL_STA_DRIVER_ACTIVE;
+ il->stations[sta_id].used |= IL_STA_BCAST;
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+ link_cmd = il4965_sta_alloc_lq(il, sta_id);
+ if (!link_cmd) {
+ IL_ERR
+ ("Unable to initialize rate scaling for bcast station.\n");
+ return -ENOMEM;
+ }
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+ il->stations[sta_id].lq = link_cmd;
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+ return 0;
+}
+
+/**
+ * il4965_update_bcast_station - update broadcast station's LQ command
+ *
+ * Only used by iwl4965. Placed here to have all bcast station management
+ * code together.
+ */
+static int
+il4965_update_bcast_station(struct il_priv *il, struct il_rxon_context *ctx)
+{
+ unsigned long flags;
+ struct il_link_quality_cmd *link_cmd;
+ u8 sta_id = ctx->bcast_sta_id;
+
+ link_cmd = il4965_sta_alloc_lq(il, sta_id);
+ if (!link_cmd) {
+ IL_ERR("Unable to initialize rate scaling for bcast sta.\n");
+ return -ENOMEM;
+ }
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+ if (il->stations[sta_id].lq)
+ kfree(il->stations[sta_id].lq);
+ else
+ D_INFO("Bcast sta rate scaling has not been initialized.\n");
+ il->stations[sta_id].lq = link_cmd;
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+ return 0;
+}
+
+int
+il4965_update_bcast_stations(struct il_priv *il)
+{
+ return il4965_update_bcast_station(il, &il->ctx);
+}
+
+/**
+ * il4965_sta_tx_modify_enable_tid - Enable Tx for this TID in station table
+ */
+int
+il4965_sta_tx_modify_enable_tid(struct il_priv *il, int sta_id, int tid)
+{
+ unsigned long flags;
+ struct il_addsta_cmd sta_cmd;
+
+ lockdep_assert_held(&il->mutex);
+
+ /* Remove "disable" flag, to enable Tx for this TID */
+ spin_lock_irqsave(&il->sta_lock, flags);
+ il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX;
+ il->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid));
+ il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+ memcpy(&sta_cmd, &il->stations[sta_id].sta,
+ sizeof(struct il_addsta_cmd));
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+ return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
+}
+
+int
+il4965_sta_rx_agg_start(struct il_priv *il, struct ieee80211_sta *sta, int tid,
+ u16 ssn)
+{
+ unsigned long flags;
+ int sta_id;
+ struct il_addsta_cmd sta_cmd;
+
+ lockdep_assert_held(&il->mutex);
+
+ sta_id = il_sta_id(sta);
+ if (sta_id == IL_INVALID_STATION)
+ return -ENXIO;
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+ il->stations[sta_id].sta.station_flags_msk = 0;
+ il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_ADDBA_TID_MSK;
+ il->stations[sta_id].sta.add_immediate_ba_tid = (u8) tid;
+ il->stations[sta_id].sta.add_immediate_ba_ssn = cpu_to_le16(ssn);
+ il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+ memcpy(&sta_cmd, &il->stations[sta_id].sta,
+ sizeof(struct il_addsta_cmd));
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+ return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
+}
+
+int
+il4965_sta_rx_agg_stop(struct il_priv *il, struct ieee80211_sta *sta, int tid)
+{
+ unsigned long flags;
+ int sta_id;
+ struct il_addsta_cmd sta_cmd;
+
+ lockdep_assert_held(&il->mutex);
+
+ sta_id = il_sta_id(sta);
+ if (sta_id == IL_INVALID_STATION) {
+ IL_ERR("Invalid station for AGG tid %d\n", tid);
+ return -ENXIO;
+ }
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+ il->stations[sta_id].sta.station_flags_msk = 0;
+ il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK;
+ il->stations[sta_id].sta.remove_immediate_ba_tid = (u8) tid;
+ il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+ memcpy(&sta_cmd, &il->stations[sta_id].sta,
+ sizeof(struct il_addsta_cmd));
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+ return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
+}
+
+void
+il4965_sta_modify_sleep_tx_count(struct il_priv *il, int sta_id, int cnt)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+ il->stations[sta_id].sta.station_flags |= STA_FLG_PWR_SAVE_MSK;
+ il->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK;
+ il->stations[sta_id].sta.sta.modify_mask =
+ STA_MODIFY_SLEEP_TX_COUNT_MSK;
+ il->stations[sta_id].sta.sleep_tx_count = cpu_to_le16(cnt);
+ il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+ il_send_add_sta(il, &il->stations[sta_id].sta, CMD_ASYNC);
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+}
+
+void
+il4965_update_chain_flags(struct il_priv *il)
+{
+ if (il->cfg->ops->hcmd->set_rxon_chain) {
+ il->cfg->ops->hcmd->set_rxon_chain(il, &il->ctx);
+ if (il->ctx.active.rx_chain != il->ctx.staging.rx_chain)
+ il_commit_rxon(il, &il->ctx);
+ }
+}
+
+static void
+il4965_clear_free_frames(struct il_priv *il)
+{
+ struct list_head *element;
+
+ D_INFO("%d frames on pre-allocated heap on clear.\n", il->frames_count);
+
+ while (!list_empty(&il->free_frames)) {
+ element = il->free_frames.next;
+ list_del(element);
+ kfree(list_entry(element, struct il_frame, list));
+ il->frames_count--;
+ }
+
+ if (il->frames_count) {
+ IL_WARN("%d frames still in use. Did we lose one?\n",
+ il->frames_count);
+ il->frames_count = 0;
+ }
+}
+
+static struct il_frame *
+il4965_get_free_frame(struct il_priv *il)
+{
+ struct il_frame *frame;
+ struct list_head *element;
+ if (list_empty(&il->free_frames)) {
+ frame = kzalloc(sizeof(*frame), GFP_KERNEL);
+ if (!frame) {
+ IL_ERR("Could not allocate frame!\n");
+ return NULL;
+ }
+
+ il->frames_count++;
+ return frame;
+ }
+
+ element = il->free_frames.next;
+ list_del(element);
+ return list_entry(element, struct il_frame, list);
+}
+
+static void
+il4965_free_frame(struct il_priv *il, struct il_frame *frame)
+{
+ memset(frame, 0, sizeof(*frame));
+ list_add(&frame->list, &il->free_frames);
+}
+
+static u32
+il4965_fill_beacon_frame(struct il_priv *il, struct ieee80211_hdr *hdr,
+ int left)
+{
+ lockdep_assert_held(&il->mutex);
+
+ if (!il->beacon_skb)
+ return 0;
+
+ if (il->beacon_skb->len > left)
+ return 0;
+
+ memcpy(hdr, il->beacon_skb->data, il->beacon_skb->len);
+
+ return il->beacon_skb->len;
+}
+
+/* Parse the beacon frame to find the TIM element and set tim_idx & tim_size */
+static void
+il4965_set_beacon_tim(struct il_priv *il,
+ struct il_tx_beacon_cmd *tx_beacon_cmd, u8 * beacon,
+ u32 frame_size)
+{
+ u16 tim_idx;
+ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)beacon;
+
+ /*
+ * The idx is relative to frame start but we start looking at the
+ * variable-length part of the beacon.
+ */
+ tim_idx = mgmt->u.beacon.variable - beacon;
+
+ /* Parse variable-length elements of beacon to find WLAN_EID_TIM */
+ while ((tim_idx < (frame_size - 2)) &&
+ (beacon[tim_idx] != WLAN_EID_TIM))
+ tim_idx += beacon[tim_idx + 1] + 2;
+
+ /* If TIM field was found, set variables */
+ if ((tim_idx < (frame_size - 1)) && (beacon[tim_idx] == WLAN_EID_TIM)) {
+ tx_beacon_cmd->tim_idx = cpu_to_le16(tim_idx);
+ tx_beacon_cmd->tim_size = beacon[tim_idx + 1];
+ } else
+ IL_WARN("Unable to find TIM Element in beacon\n");
+}
+
+static unsigned int
+il4965_hw_get_beacon_cmd(struct il_priv *il, struct il_frame *frame)
+{
+ struct il_tx_beacon_cmd *tx_beacon_cmd;
+ u32 frame_size;
+ u32 rate_flags;
+ u32 rate;
+ /*
+ * We have to set up the TX command, the TX Beacon command, and the
+ * beacon contents.
+ */
+
+ lockdep_assert_held(&il->mutex);
+
+ if (!il->beacon_ctx) {
+ IL_ERR("trying to build beacon w/o beacon context!\n");
+ return 0;
+ }
+
+ /* Initialize memory */
+ tx_beacon_cmd = &frame->u.beacon;
+ memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
+
+ /* Set up TX beacon contents */
+ frame_size =
+ il4965_fill_beacon_frame(il, tx_beacon_cmd->frame,
+ sizeof(frame->u) - sizeof(*tx_beacon_cmd));
+ if (WARN_ON_ONCE(frame_size > MAX_MPDU_SIZE))
+ return 0;
+ if (!frame_size)
+ return 0;
+
+ /* Set up TX command fields */
+ tx_beacon_cmd->tx.len = cpu_to_le16((u16) frame_size);
+ tx_beacon_cmd->tx.sta_id = il->beacon_ctx->bcast_sta_id;
+ tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
+ tx_beacon_cmd->tx.tx_flags =
+ TX_CMD_FLG_SEQ_CTL_MSK | TX_CMD_FLG_TSF_MSK |
+ TX_CMD_FLG_STA_RATE_MSK;
+
+ /* Set up TX beacon command fields */
+ il4965_set_beacon_tim(il, tx_beacon_cmd, (u8 *) tx_beacon_cmd->frame,
+ frame_size);
+
+ /* Set up packet rate and flags */
+ rate = il_get_lowest_plcp(il, il->beacon_ctx);
+ il->mgmt_tx_ant =
+ il4965_toggle_tx_ant(il, il->mgmt_tx_ant,
+ il->hw_params.valid_tx_ant);
+ rate_flags = il4965_ant_idx_to_flags(il->mgmt_tx_ant);
+ if ((rate >= IL_FIRST_CCK_RATE) && (rate <= IL_LAST_CCK_RATE))
+ rate_flags |= RATE_MCS_CCK_MSK;
+ tx_beacon_cmd->tx.rate_n_flags =
+ il4965_hw_set_rate_n_flags(rate, rate_flags);
+
+ return sizeof(*tx_beacon_cmd) + frame_size;
+}
+
+int
+il4965_send_beacon_cmd(struct il_priv *il)
+{
+ struct il_frame *frame;
+ unsigned int frame_size;
+ int rc;
+
+ frame = il4965_get_free_frame(il);
+ if (!frame) {
+ IL_ERR("Could not obtain free frame buffer for beacon "
+ "command.\n");
+ return -ENOMEM;
+ }
+
+ frame_size = il4965_hw_get_beacon_cmd(il, frame);
+ if (!frame_size) {
+ IL_ERR("Error configuring the beacon command\n");
+ il4965_free_frame(il, frame);
+ return -EINVAL;
+ }
+
+ rc = il_send_cmd_pdu(il, C_TX_BEACON, frame_size, &frame->u.cmd[0]);
+
+ il4965_free_frame(il, frame);
+
+ return rc;
+}
+
+static inline dma_addr_t
+il4965_tfd_tb_get_addr(struct il_tfd *tfd, u8 idx)
+{
+ struct il_tfd_tb *tb = &tfd->tbs[idx];
+
+ dma_addr_t addr = get_unaligned_le32(&tb->lo);
+ if (sizeof(dma_addr_t) > sizeof(u32))
+ addr |=
+ ((dma_addr_t) (le16_to_cpu(tb->hi_n_len) & 0xF) << 16) <<
+ 16;
+
+ return addr;
+}
+
+static inline u16
+il4965_tfd_tb_get_len(struct il_tfd *tfd, u8 idx)
+{
+ struct il_tfd_tb *tb = &tfd->tbs[idx];
+
+ return le16_to_cpu(tb->hi_n_len) >> 4;
+}
+
+static inline void
+il4965_tfd_set_tb(struct il_tfd *tfd, u8 idx, dma_addr_t addr, u16 len)
+{
+ struct il_tfd_tb *tb = &tfd->tbs[idx];
+ u16 hi_n_len = len << 4;
+
+ put_unaligned_le32(addr, &tb->lo);
+ if (sizeof(dma_addr_t) > sizeof(u32))
+ hi_n_len |= ((addr >> 16) >> 16) & 0xF;
+
+ tb->hi_n_len = cpu_to_le16(hi_n_len);
+
+ tfd->num_tbs = idx + 1;
+}
+
+static inline u8
+il4965_tfd_get_num_tbs(struct il_tfd *tfd)
+{
+ return tfd->num_tbs & 0x1f;
+}
+
+/**
+ * il4965_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
+ * @il - driver ilate data
+ * @txq - tx queue
+ *
+ * Does NOT advance any TFD circular buffer read/write idxes
+ * Does NOT free the TFD itself (which is within circular buffer)
+ */
+void
+il4965_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq)
+{
+ struct il_tfd *tfd_tmp = (struct il_tfd *)txq->tfds;
+ struct il_tfd *tfd;
+ struct pci_dev *dev = il->pci_dev;
+ int idx = txq->q.read_ptr;
+ int i;
+ int num_tbs;
+
+ tfd = &tfd_tmp[idx];
+
+ /* Sanity check on number of chunks */
+ num_tbs = il4965_tfd_get_num_tbs(tfd);
+
+ if (num_tbs >= IL_NUM_OF_TBS) {
+ IL_ERR("Too many chunks: %i\n", num_tbs);
+ /* @todo issue fatal error, it is quite serious situation */
+ return;
+ }
+
+ /* Unmap tx_cmd */
+ if (num_tbs)
+ pci_unmap_single(dev, dma_unmap_addr(&txq->meta[idx], mapping),
+ dma_unmap_len(&txq->meta[idx], len),
+ PCI_DMA_BIDIRECTIONAL);
+
+ /* Unmap chunks, if any. */
+ for (i = 1; i < num_tbs; i++)
+ pci_unmap_single(dev, il4965_tfd_tb_get_addr(tfd, i),
+ il4965_tfd_tb_get_len(tfd, i),
+ PCI_DMA_TODEVICE);
+
+ /* free SKB */
+ if (txq->txb) {
+ struct sk_buff *skb;
+
+ skb = txq->txb[txq->q.read_ptr].skb;
+
+ /* can be called from irqs-disabled context */
+ if (skb) {
+ dev_kfree_skb_any(skb);
+ txq->txb[txq->q.read_ptr].skb = NULL;
+ }
+ }
+}
+
+int
+il4965_hw_txq_attach_buf_to_tfd(struct il_priv *il, struct il_tx_queue *txq,
+ dma_addr_t addr, u16 len, u8 reset, u8 pad)
+{
+ struct il_queue *q;
+ struct il_tfd *tfd, *tfd_tmp;
+ u32 num_tbs;
+
+ q = &txq->q;
+ tfd_tmp = (struct il_tfd *)txq->tfds;
+ tfd = &tfd_tmp[q->write_ptr];
+
+ if (reset)
+ memset(tfd, 0, sizeof(*tfd));
+
+ num_tbs = il4965_tfd_get_num_tbs(tfd);
+
+ /* Each TFD can point to a maximum 20 Tx buffers */
+ if (num_tbs >= IL_NUM_OF_TBS) {
+ IL_ERR("Error can not send more than %d chunks\n",
+ IL_NUM_OF_TBS);
+ return -EINVAL;
+ }
+
+ BUG_ON(addr & ~DMA_BIT_MASK(36));
+ if (unlikely(addr & ~IL_TX_DMA_MASK))
+ IL_ERR("Unaligned address = %llx\n", (unsigned long long)addr);
+
+ il4965_tfd_set_tb(tfd, num_tbs, addr, len);
+
+ return 0;
+}
+
+/*
+ * Tell nic where to find circular buffer of Tx Frame Descriptors for
+ * given Tx queue, and enable the DMA channel used for that queue.
+ *
+ * 4965 supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA
+ * channels supported in hardware.
+ */
+int
+il4965_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq)
+{
+ int txq_id = txq->q.id;
+
+ /* Circular buffer (TFD queue in DRAM) physical base address */
+ il_wr(il, FH49_MEM_CBBC_QUEUE(txq_id), txq->q.dma_addr >> 8);
+
+ return 0;
+}
+
+/******************************************************************************
+ *
+ * Generic RX handler implementations
+ *
+ ******************************************************************************/
+static void
+il4965_hdl_alive(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ struct il_alive_resp *palive;
+ struct delayed_work *pwork;
+
+ palive = &pkt->u.alive_frame;
+
+ D_INFO("Alive ucode status 0x%08X revision " "0x%01X 0x%01X\n",
+ palive->is_valid, palive->ver_type, palive->ver_subtype);
+
+ if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
+ D_INFO("Initialization Alive received.\n");
+ memcpy(&il->card_alive_init, &pkt->u.alive_frame,
+ sizeof(struct il_init_alive_resp));
+ pwork = &il->init_alive_start;
+ } else {
+ D_INFO("Runtime Alive received.\n");
+ memcpy(&il->card_alive, &pkt->u.alive_frame,
+ sizeof(struct il_alive_resp));
+ pwork = &il->alive_start;
+ }
+
+ /* We delay the ALIVE response by 5ms to
+ * give the HW RF Kill time to activate... */
+ if (palive->is_valid == UCODE_VALID_OK)
+ queue_delayed_work(il->workqueue, pwork, msecs_to_jiffies(5));
+ else
+ IL_WARN("uCode did not respond OK.\n");
+}
+
+/**
+ * il4965_bg_stats_periodic - Timer callback to queue stats
+ *
+ * This callback is provided in order to send a stats request.
+ *
+ * This timer function is continually reset to execute within
+ * REG_RECALIB_PERIOD seconds since the last N_STATS
+ * was received. We need to ensure we receive the stats in order
+ * to update the temperature used for calibrating the TXPOWER.
+ */
+static void
+il4965_bg_stats_periodic(unsigned long data)
+{
+ struct il_priv *il = (struct il_priv *)data;
+
+ if (test_bit(S_EXIT_PENDING, &il->status))
+ return;
+
+ /* dont send host command if rf-kill is on */
+ if (!il_is_ready_rf(il))
+ return;
+
+ il_send_stats_request(il, CMD_ASYNC, false);
+}
+
+static void
+il4965_hdl_beacon(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ struct il4965_beacon_notif *beacon =
+ (struct il4965_beacon_notif *)pkt->u.raw;
+#ifdef CONFIG_IWLEGACY_DEBUG
+ u8 rate = il4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
+
+ D_RX("beacon status %x retries %d iss %d " "tsf %d %d rate %d\n",
+ le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK,
+ beacon->beacon_notify_hdr.failure_frame,
+ le32_to_cpu(beacon->ibss_mgr_status),
+ le32_to_cpu(beacon->high_tsf), le32_to_cpu(beacon->low_tsf), rate);
+#endif
+
+ il->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
+}
+
+static void
+il4965_perform_ct_kill_task(struct il_priv *il)
+{
+ unsigned long flags;
+
+ D_POWER("Stop all queues\n");
+
+ if (il->mac80211_registered)
+ ieee80211_stop_queues(il->hw);
+
+ _il_wr(il, CSR_UCODE_DRV_GP1_SET,
+ CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
+ _il_rd(il, CSR_UCODE_DRV_GP1);
+
+ spin_lock_irqsave(&il->reg_lock, flags);
+ if (!_il_grab_nic_access(il))
+ _il_release_nic_access(il);
+ spin_unlock_irqrestore(&il->reg_lock, flags);
+}
+
+/* Handle notification from uCode that card's power state is changing
+ * due to software, hardware, or critical temperature RFKILL */
+static void
+il4965_hdl_card_state(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
+ unsigned long status = il->status;
+
+ D_RF_KILL("Card state received: HW:%s SW:%s CT:%s\n",
+ (flags & HW_CARD_DISABLED) ? "Kill" : "On",
+ (flags & SW_CARD_DISABLED) ? "Kill" : "On",
+ (flags & CT_CARD_DISABLED) ? "Reached" : "Not reached");
+
+ if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED | CT_CARD_DISABLED)) {
+
+ _il_wr(il, CSR_UCODE_DRV_GP1_SET,
+ CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
+
+ il_wr(il, HBUS_TARG_MBX_C, HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
+
+ if (!(flags & RXON_CARD_DISABLED)) {
+ _il_wr(il, CSR_UCODE_DRV_GP1_CLR,
+ CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
+ il_wr(il, HBUS_TARG_MBX_C,
+ HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
+ }
+ }
+
+ if (flags & CT_CARD_DISABLED)
+ il4965_perform_ct_kill_task(il);
+
+ if (flags & HW_CARD_DISABLED)
+ set_bit(S_RF_KILL_HW, &il->status);
+ else
+ clear_bit(S_RF_KILL_HW, &il->status);
+
+ if (!(flags & RXON_CARD_DISABLED))
+ il_scan_cancel(il);
+
+ if ((test_bit(S_RF_KILL_HW, &status) !=
+ test_bit(S_RF_KILL_HW, &il->status)))
+ wiphy_rfkill_set_hw_state(il->hw->wiphy,
+ test_bit(S_RF_KILL_HW, &il->status));
+ else
+ wake_up(&il->wait_command_queue);
+}
+
+/**
+ * il4965_setup_handlers - Initialize Rx handler callbacks
+ *
+ * Setup the RX handlers for each of the reply types sent from the uCode
+ * to the host.
+ *
+ * This function chains into the hardware specific files for them to setup
+ * any hardware specific handlers as well.
+ */
+static void
+il4965_setup_handlers(struct il_priv *il)
+{
+ il->handlers[N_ALIVE] = il4965_hdl_alive;
+ il->handlers[N_ERROR] = il_hdl_error;
+ il->handlers[N_CHANNEL_SWITCH] = il_hdl_csa;
+ il->handlers[N_SPECTRUM_MEASUREMENT] = il_hdl_spectrum_measurement;
+ il->handlers[N_PM_SLEEP] = il_hdl_pm_sleep;
+ il->handlers[N_PM_DEBUG_STATS] = il_hdl_pm_debug_stats;
+ il->handlers[N_BEACON] = il4965_hdl_beacon;
+
+ /*
+ * The same handler is used for both the REPLY to a discrete
+ * stats request from the host as well as for the periodic
+ * stats notifications (after received beacons) from the uCode.
+ */
+ il->handlers[C_STATS] = il4965_hdl_c_stats;
+ il->handlers[N_STATS] = il4965_hdl_stats;
+
+ il_setup_rx_scan_handlers(il);
+
+ /* status change handler */
+ il->handlers[N_CARD_STATE] = il4965_hdl_card_state;
+
+ il->handlers[N_MISSED_BEACONS] = il4965_hdl_missed_beacon;
+ /* Rx handlers */
+ il->handlers[N_RX_PHY] = il4965_hdl_rx_phy;
+ il->handlers[N_RX_MPDU] = il4965_hdl_rx;
+ /* block ack */
+ il->handlers[N_COMPRESSED_BA] = il4965_hdl_compressed_ba;
+ /* Set up hardware specific Rx handlers */
+ il->cfg->ops->lib->handler_setup(il);
+}
+
+/**
+ * il4965_rx_handle - Main entry function for receiving responses from uCode
+ *
+ * Uses the il->handlers callback function array to invoke
+ * the appropriate handlers, including command responses,
+ * frame-received notifications, and other notifications.
+ */
+void
+il4965_rx_handle(struct il_priv *il)
+{
+ struct il_rx_buf *rxb;
+ struct il_rx_pkt *pkt;
+ struct il_rx_queue *rxq = &il->rxq;
+ u32 r, i;
+ int reclaim;
+ unsigned long flags;
+ u8 fill_rx = 0;
+ u32 count = 8;
+ int total_empty;
+
+ /* uCode's read idx (stored in shared DRAM) indicates the last Rx
+ * buffer that the driver may process (last buffer filled by ucode). */
+ r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF;
+ i = rxq->read;
+
+ /* Rx interrupt, but nothing sent from uCode */
+ if (i == r)
+ D_RX("r = %d, i = %d\n", r, i);
+
+ /* calculate total frames need to be restock after handling RX */
+ total_empty = r - rxq->write_actual;
+ if (total_empty < 0)
+ total_empty += RX_QUEUE_SIZE;
+
+ if (total_empty > (RX_QUEUE_SIZE / 2))
+ fill_rx = 1;
+
+ while (i != r) {
+ int len;
+
+ rxb = rxq->queue[i];
+
+ /* If an RXB doesn't have a Rx queue slot associated with it,
+ * then a bug has been introduced in the queue refilling
+ * routines -- catch it here */
+ BUG_ON(rxb == NULL);
+
+ rxq->queue[i] = NULL;
+
+ pci_unmap_page(il->pci_dev, rxb->page_dma,
+ PAGE_SIZE << il->hw_params.rx_page_order,
+ PCI_DMA_FROMDEVICE);
+ pkt = rxb_addr(rxb);
+
+ len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK;
+ len += sizeof(u32); /* account for status word */
+
+ /* Reclaim a command buffer only if this packet is a response
+ * to a (driver-originated) command.
+ * If the packet (e.g. Rx frame) originated from uCode,
+ * there is no command buffer to reclaim.
+ * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
+ * but apparently a few don't get set; catch them here. */
+ reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
+ (pkt->hdr.cmd != N_RX_PHY) && (pkt->hdr.cmd != N_RX) &&
+ (pkt->hdr.cmd != N_RX_MPDU) &&
+ (pkt->hdr.cmd != N_COMPRESSED_BA) &&
+ (pkt->hdr.cmd != N_STATS) && (pkt->hdr.cmd != C_TX);
+
+ /* Based on type of command response or notification,
+ * handle those that need handling via function in
+ * handlers table. See il4965_setup_handlers() */
+ if (il->handlers[pkt->hdr.cmd]) {
+ D_RX("r = %d, i = %d, %s, 0x%02x\n", r, i,
+ il_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
+ il->isr_stats.handlers[pkt->hdr.cmd]++;
+ il->handlers[pkt->hdr.cmd] (il, rxb);
+ } else {
+ /* No handling needed */
+ D_RX("r %d i %d No handler needed for %s, 0x%02x\n", r,
+ i, il_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
+ }
+
+ /*
+ * XXX: After here, we should always check rxb->page
+ * against NULL before touching it or its virtual
+ * memory (pkt). Because some handler might have
+ * already taken or freed the pages.
+ */
+
+ if (reclaim) {
+ /* Invoke any callbacks, transfer the buffer to caller,
+ * and fire off the (possibly) blocking il_send_cmd()
+ * as we reclaim the driver command queue */
+ if (rxb->page)
+ il_tx_cmd_complete(il, rxb);
+ else
+ IL_WARN("Claim null rxb?\n");
+ }
+
+ /* Reuse the page if possible. For notification packets and
+ * SKBs that fail to Rx correctly, add them back into the
+ * rx_free list for reuse later. */
+ spin_lock_irqsave(&rxq->lock, flags);
+ if (rxb->page != NULL) {
+ rxb->page_dma =
+ pci_map_page(il->pci_dev, rxb->page, 0,
+ PAGE_SIZE << il->hw_params.
+ rx_page_order, PCI_DMA_FROMDEVICE);
+ list_add_tail(&rxb->list, &rxq->rx_free);
+ rxq->free_count++;
+ } else
+ list_add_tail(&rxb->list, &rxq->rx_used);
+
+ spin_unlock_irqrestore(&rxq->lock, flags);
+
+ i = (i + 1) & RX_QUEUE_MASK;
+ /* If there are a lot of unused frames,
+ * restock the Rx queue so ucode wont assert. */
+ if (fill_rx) {
+ count++;
+ if (count >= 8) {
+ rxq->read = i;
+ il4965_rx_replenish_now(il);
+ count = 0;
+ }
+ }
+ }
+
+ /* Backtrack one entry */
+ rxq->read = i;
+ if (fill_rx)
+ il4965_rx_replenish_now(il);
+ else
+ il4965_rx_queue_restock(il);
+}
+
+/* call this function to flush any scheduled tasklet */
+static inline void
+il4965_synchronize_irq(struct il_priv *il)
+{
+ /* wait to make sure we flush pending tasklet */
+ synchronize_irq(il->pci_dev->irq);
+ tasklet_kill(&il->irq_tasklet);
+}
+
+static void
+il4965_irq_tasklet(struct il_priv *il)
+{
+ u32 inta, handled = 0;
+ u32 inta_fh;
+ unsigned long flags;
+ u32 i;
+#ifdef CONFIG_IWLEGACY_DEBUG
+ u32 inta_mask;
+#endif
+
+ spin_lock_irqsave(&il->lock, flags);
+
+ /* Ack/clear/reset pending uCode interrupts.
+ * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
+ * and will clear only when CSR_FH_INT_STATUS gets cleared. */
+ inta = _il_rd(il, CSR_INT);
+ _il_wr(il, CSR_INT, inta);
+
+ /* Ack/clear/reset pending flow-handler (DMA) interrupts.
+ * Any new interrupts that happen after this, either while we're
+ * in this tasklet, or later, will show up in next ISR/tasklet. */
+ inta_fh = _il_rd(il, CSR_FH_INT_STATUS);
+ _il_wr(il, CSR_FH_INT_STATUS, inta_fh);
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+ if (il_get_debug_level(il) & IL_DL_ISR) {
+ /* just for debug */
+ inta_mask = _il_rd(il, CSR_INT_MASK);
+ D_ISR("inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", inta,
+ inta_mask, inta_fh);
+ }
+#endif
+
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
+ * atomic, make sure that inta covers all the interrupts that
+ * we've discovered, even if FH interrupt came in just after
+ * reading CSR_INT. */
+ if (inta_fh & CSR49_FH_INT_RX_MASK)
+ inta |= CSR_INT_BIT_FH_RX;
+ if (inta_fh & CSR49_FH_INT_TX_MASK)
+ inta |= CSR_INT_BIT_FH_TX;
+
+ /* Now service all interrupt bits discovered above. */
+ if (inta & CSR_INT_BIT_HW_ERR) {
+ IL_ERR("Hardware error detected. Restarting.\n");
+
+ /* Tell the device to stop sending interrupts */
+ il_disable_interrupts(il);
+
+ il->isr_stats.hw++;
+ il_irq_handle_error(il);
+
+ handled |= CSR_INT_BIT_HW_ERR;
+
+ return;
+ }
+#ifdef CONFIG_IWLEGACY_DEBUG
+ if (il_get_debug_level(il) & (IL_DL_ISR)) {
+ /* NIC fires this, but we don't use it, redundant with WAKEUP */
+ if (inta & CSR_INT_BIT_SCD) {
+ D_ISR("Scheduler finished to transmit "
+ "the frame/frames.\n");
+ il->isr_stats.sch++;
+ }
+
+ /* Alive notification via Rx interrupt will do the real work */
+ if (inta & CSR_INT_BIT_ALIVE) {
+ D_ISR("Alive interrupt\n");
+ il->isr_stats.alive++;
+ }
+ }
+#endif
+ /* Safely ignore these bits for debug checks below */
+ inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
+
+ /* HW RF KILL switch toggled */
+ if (inta & CSR_INT_BIT_RF_KILL) {
+ int hw_rf_kill = 0;
+ if (!
+ (_il_rd(il, CSR_GP_CNTRL) &
+ CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
+ hw_rf_kill = 1;
+
+ IL_WARN("RF_KILL bit toggled to %s.\n",
+ hw_rf_kill ? "disable radio" : "enable radio");
+
+ il->isr_stats.rfkill++;
+
+ /* driver only loads ucode once setting the interface up.
+ * the driver allows loading the ucode even if the radio
+ * is killed. Hence update the killswitch state here. The
+ * rfkill handler will care about restarting if needed.
+ */
+ if (!test_bit(S_ALIVE, &il->status)) {
+ if (hw_rf_kill)
+ set_bit(S_RF_KILL_HW, &il->status);
+ else
+ clear_bit(S_RF_KILL_HW, &il->status);
+ wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rf_kill);
+ }
+
+ handled |= CSR_INT_BIT_RF_KILL;
+ }
+
+ /* Chip got too hot and stopped itself */
+ if (inta & CSR_INT_BIT_CT_KILL) {
+ IL_ERR("Microcode CT kill error detected.\n");
+ il->isr_stats.ctkill++;
+ handled |= CSR_INT_BIT_CT_KILL;
+ }
+
+ /* Error detected by uCode */
+ if (inta & CSR_INT_BIT_SW_ERR) {
+ IL_ERR("Microcode SW error detected. " " Restarting 0x%X.\n",
+ inta);
+ il->isr_stats.sw++;
+ il_irq_handle_error(il);
+ handled |= CSR_INT_BIT_SW_ERR;
+ }
+
+ /*
+ * uCode wakes up after power-down sleep.
+ * Tell device about any new tx or host commands enqueued,
+ * and about any Rx buffers made available while asleep.
+ */
+ if (inta & CSR_INT_BIT_WAKEUP) {
+ D_ISR("Wakeup interrupt\n");
+ il_rx_queue_update_write_ptr(il, &il->rxq);
+ for (i = 0; i < il->hw_params.max_txq_num; i++)
+ il_txq_update_write_ptr(il, &il->txq[i]);
+ il->isr_stats.wakeup++;
+ handled |= CSR_INT_BIT_WAKEUP;
+ }
+
+ /* All uCode command responses, including Tx command responses,
+ * Rx "responses" (frame-received notification), and other
+ * notifications from uCode come through here*/
+ if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
+ il4965_rx_handle(il);
+ il->isr_stats.rx++;
+ handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
+ }
+
+ /* This "Tx" DMA channel is used only for loading uCode */
+ if (inta & CSR_INT_BIT_FH_TX) {
+ D_ISR("uCode load interrupt\n");
+ il->isr_stats.tx++;
+ handled |= CSR_INT_BIT_FH_TX;
+ /* Wake up uCode load routine, now that load is complete */
+ il->ucode_write_complete = 1;
+ wake_up(&il->wait_command_queue);
+ }
+
+ if (inta & ~handled) {
+ IL_ERR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
+ il->isr_stats.unhandled++;
+ }
+
+ if (inta & ~(il->inta_mask)) {
+ IL_WARN("Disabled INTA bits 0x%08x were pending\n",
+ inta & ~il->inta_mask);
+ IL_WARN(" with FH49_INT = 0x%08x\n", inta_fh);
+ }
+
+ /* Re-enable all interrupts */
+ /* only Re-enable if disabled by irq */
+ if (test_bit(S_INT_ENABLED, &il->status))
+ il_enable_interrupts(il);
+ /* Re-enable RF_KILL if it occurred */
+ else if (handled & CSR_INT_BIT_RF_KILL)
+ il_enable_rfkill_int(il);
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+ if (il_get_debug_level(il) & (IL_DL_ISR)) {
+ inta = _il_rd(il, CSR_INT);
+ inta_mask = _il_rd(il, CSR_INT_MASK);
+ inta_fh = _il_rd(il, CSR_FH_INT_STATUS);
+ D_ISR("End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
+ "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
+ }
+#endif
+}
+
+/*****************************************************************************
+ *
+ * sysfs attributes
+ *
+ *****************************************************************************/
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+
+/*
+ * The following adds a new attribute to the sysfs representation
+ * of this device driver (i.e. a new file in /sys/class/net/wlan0/device/)
+ * used for controlling the debug level.
+ *
+ * See the level definitions in iwl for details.
+ *
+ * The debug_level being managed using sysfs below is a per device debug
+ * level that is used instead of the global debug level if it (the per
+ * device debug level) is set.
+ */
+static ssize_t
+il4965_show_debug_level(struct device *d, struct device_attribute *attr,
+ char *buf)
+{
+ struct il_priv *il = dev_get_drvdata(d);
+ return sprintf(buf, "0x%08X\n", il_get_debug_level(il));
+}
+
+static ssize_t
+il4965_store_debug_level(struct device *d, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct il_priv *il = dev_get_drvdata(d);
+ unsigned long val;
+ int ret;
+
+ ret = strict_strtoul(buf, 0, &val);
+ if (ret)
+ IL_ERR("%s is not in hex or decimal form.\n", buf);
+ else {
+ il->debug_level = val;
+ if (il_alloc_traffic_mem(il))
+ IL_ERR("Not enough memory to generate traffic log\n");
+ }
+ return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO, il4965_show_debug_level,
+ il4965_store_debug_level);
+
+#endif /* CONFIG_IWLEGACY_DEBUG */
+
+static ssize_t
+il4965_show_temperature(struct device *d, struct device_attribute *attr,
+ char *buf)
+{
+ struct il_priv *il = dev_get_drvdata(d);
+
+ if (!il_is_alive(il))
+ return -EAGAIN;
+
+ return sprintf(buf, "%d\n", il->temperature);
+}
+
+static DEVICE_ATTR(temperature, S_IRUGO, il4965_show_temperature, NULL);
+
+static ssize_t
+il4965_show_tx_power(struct device *d, struct device_attribute *attr, char *buf)
+{
+ struct il_priv *il = dev_get_drvdata(d);
+
+ if (!il_is_ready_rf(il))
+ return sprintf(buf, "off\n");
+ else
+ return sprintf(buf, "%d\n", il->tx_power_user_lmt);
+}
+
+static ssize_t
+il4965_store_tx_power(struct device *d, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct il_priv *il = dev_get_drvdata(d);
+ unsigned long val;
+ int ret;
+
+ ret = strict_strtoul(buf, 10, &val);
+ if (ret)
+ IL_INFO("%s is not in decimal form.\n", buf);
+ else {
+ ret = il_set_tx_power(il, val, false);
+ if (ret)
+ IL_ERR("failed setting tx power (0x%d).\n", ret);
+ else
+ ret = count;
+ }
+ return ret;
+}
+
+static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, il4965_show_tx_power,
+ il4965_store_tx_power);
+
+static struct attribute *il_sysfs_entries[] = {
+ &dev_attr_temperature.attr,
+ &dev_attr_tx_power.attr,
+#ifdef CONFIG_IWLEGACY_DEBUG
+ &dev_attr_debug_level.attr,
+#endif
+ NULL
+};
+
+static struct attribute_group il_attribute_group = {
+ .name = NULL, /* put in device directory */
+ .attrs = il_sysfs_entries,
+};
+
+/******************************************************************************
+ *
+ * uCode download functions
+ *
+ ******************************************************************************/
+
+static void
+il4965_dealloc_ucode_pci(struct il_priv *il)
+{
+ il_free_fw_desc(il->pci_dev, &il->ucode_code);
+ il_free_fw_desc(il->pci_dev, &il->ucode_data);
+ il_free_fw_desc(il->pci_dev, &il->ucode_data_backup);
+ il_free_fw_desc(il->pci_dev, &il->ucode_init);
+ il_free_fw_desc(il->pci_dev, &il->ucode_init_data);
+ il_free_fw_desc(il->pci_dev, &il->ucode_boot);
+}
+
+static void
+il4965_nic_start(struct il_priv *il)
+{
+ /* Remove all resets to allow NIC to operate */
+ _il_wr(il, CSR_RESET, 0);
+}
+
+static void il4965_ucode_callback(const struct firmware *ucode_raw,
+ void *context);
+static int il4965_mac_setup_register(struct il_priv *il, u32 max_probe_length);
+
+static int __must_check
+il4965_request_firmware(struct il_priv *il, bool first)
+{
+ const char *name_pre = il->cfg->fw_name_pre;
+ char tag[8];
+
+ if (first) {
+ il->fw_idx = il->cfg->ucode_api_max;
+ sprintf(tag, "%d", il->fw_idx);
+ } else {
+ il->fw_idx--;
+ sprintf(tag, "%d", il->fw_idx);
+ }
+
+ if (il->fw_idx < il->cfg->ucode_api_min) {
+ IL_ERR("no suitable firmware found!\n");
+ return -ENOENT;
+ }
+
+ sprintf(il->firmware_name, "%s%s%s", name_pre, tag, ".ucode");
+
+ D_INFO("attempting to load firmware '%s'\n", il->firmware_name);
+
+ return request_firmware_nowait(THIS_MODULE, 1, il->firmware_name,
+ &il->pci_dev->dev, GFP_KERNEL, il,
+ il4965_ucode_callback);
+}
+
+struct il4965_firmware_pieces {
+ const void *inst, *data, *init, *init_data, *boot;
+ size_t inst_size, data_size, init_size, init_data_size, boot_size;
+};
+
+static int
+il4965_load_firmware(struct il_priv *il, const struct firmware *ucode_raw,
+ struct il4965_firmware_pieces *pieces)
+{
+ struct il_ucode_header *ucode = (void *)ucode_raw->data;
+ u32 api_ver, hdr_size;
+ const u8 *src;
+
+ il->ucode_ver = le32_to_cpu(ucode->ver);
+ api_ver = IL_UCODE_API(il->ucode_ver);
+
+ switch (api_ver) {
+ default:
+ case 0:
+ case 1:
+ case 2:
+ hdr_size = 24;
+ if (ucode_raw->size < hdr_size) {
+ IL_ERR("File size too small!\n");
+ return -EINVAL;
+ }
+ pieces->inst_size = le32_to_cpu(ucode->v1.inst_size);
+ pieces->data_size = le32_to_cpu(ucode->v1.data_size);
+ pieces->init_size = le32_to_cpu(ucode->v1.init_size);
+ pieces->init_data_size = le32_to_cpu(ucode->v1.init_data_size);
+ pieces->boot_size = le32_to_cpu(ucode->v1.boot_size);
+ src = ucode->v1.data;
+ break;
+ }
+
+ /* Verify size of file vs. image size info in file's header */
+ if (ucode_raw->size !=
+ hdr_size + pieces->inst_size + pieces->data_size +
+ pieces->init_size + pieces->init_data_size + pieces->boot_size) {
+
+ IL_ERR("uCode file size %d does not match expected size\n",
+ (int)ucode_raw->size);
+ return -EINVAL;
+ }
+
+ pieces->inst = src;
+ src += pieces->inst_size;
+ pieces->data = src;
+ src += pieces->data_size;
+ pieces->init = src;
+ src += pieces->init_size;
+ pieces->init_data = src;
+ src += pieces->init_data_size;
+ pieces->boot = src;
+ src += pieces->boot_size;
+
+ return 0;
+}
+
+/**
+ * il4965_ucode_callback - callback when firmware was loaded
+ *
+ * If loaded successfully, copies the firmware into buffers
+ * for the card to fetch (via DMA).
+ */
+static void
+il4965_ucode_callback(const struct firmware *ucode_raw, void *context)
+{
+ struct il_priv *il = context;
+ struct il_ucode_header *ucode;
+ int err;
+ struct il4965_firmware_pieces pieces;
+ const unsigned int api_max = il->cfg->ucode_api_max;
+ const unsigned int api_min = il->cfg->ucode_api_min;
+ u32 api_ver;
+
+ u32 max_probe_length = 200;
+ u32 standard_phy_calibration_size =
+ IL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE;
+
+ memset(&pieces, 0, sizeof(pieces));
+
+ if (!ucode_raw) {
+ if (il->fw_idx <= il->cfg->ucode_api_max)
+ IL_ERR("request for firmware file '%s' failed.\n",
+ il->firmware_name);
+ goto try_again;
+ }
+
+ D_INFO("Loaded firmware file '%s' (%zd bytes).\n", il->firmware_name,
+ ucode_raw->size);
+
+ /* Make sure that we got at least the API version number */
+ if (ucode_raw->size < 4) {
+ IL_ERR("File size way too small!\n");
+ goto try_again;
+ }
+
+ /* Data from ucode file: header followed by uCode images */
+ ucode = (struct il_ucode_header *)ucode_raw->data;
+
+ err = il4965_load_firmware(il, ucode_raw, &pieces);
+
+ if (err)
+ goto try_again;
+
+ api_ver = IL_UCODE_API(il->ucode_ver);
+
+ /*
+ * api_ver should match the api version forming part of the
+ * firmware filename ... but we don't check for that and only rely
+ * on the API version read from firmware header from here on forward
+ */
+ if (api_ver < api_min || api_ver > api_max) {
+ IL_ERR("Driver unable to support your firmware API. "
+ "Driver supports v%u, firmware is v%u.\n", api_max,
+ api_ver);
+ goto try_again;
+ }
+
+ if (api_ver != api_max)
+ IL_ERR("Firmware has old API version. Expected v%u, "
+ "got v%u. New firmware can be obtained "
+ "from http://www.intellinuxwireless.org.\n", api_max,
+ api_ver);
+
+ IL_INFO("loaded firmware version %u.%u.%u.%u\n",
+ IL_UCODE_MAJOR(il->ucode_ver), IL_UCODE_MINOR(il->ucode_ver),
+ IL_UCODE_API(il->ucode_ver), IL_UCODE_SERIAL(il->ucode_ver));
+
+ snprintf(il->hw->wiphy->fw_version, sizeof(il->hw->wiphy->fw_version),
+ "%u.%u.%u.%u", IL_UCODE_MAJOR(il->ucode_ver),
+ IL_UCODE_MINOR(il->ucode_ver), IL_UCODE_API(il->ucode_ver),
+ IL_UCODE_SERIAL(il->ucode_ver));
+
+ /*
+ * For any of the failures below (before allocating pci memory)
+ * we will try to load a version with a smaller API -- maybe the
+ * user just got a corrupted version of the latest API.
+ */
+
+ D_INFO("f/w package hdr ucode version raw = 0x%x\n", il->ucode_ver);
+ D_INFO("f/w package hdr runtime inst size = %Zd\n", pieces.inst_size);
+ D_INFO("f/w package hdr runtime data size = %Zd\n", pieces.data_size);
+ D_INFO("f/w package hdr init inst size = %Zd\n", pieces.init_size);
+ D_INFO("f/w package hdr init data size = %Zd\n", pieces.init_data_size);
+ D_INFO("f/w package hdr boot inst size = %Zd\n", pieces.boot_size);
+
+ /* Verify that uCode images will fit in card's SRAM */
+ if (pieces.inst_size > il->hw_params.max_inst_size) {
+ IL_ERR("uCode instr len %Zd too large to fit in\n",
+ pieces.inst_size);
+ goto try_again;
+ }
+
+ if (pieces.data_size > il->hw_params.max_data_size) {
+ IL_ERR("uCode data len %Zd too large to fit in\n",
+ pieces.data_size);
+ goto try_again;
+ }
+
+ if (pieces.init_size > il->hw_params.max_inst_size) {
+ IL_ERR("uCode init instr len %Zd too large to fit in\n",
+ pieces.init_size);
+ goto try_again;
+ }
+
+ if (pieces.init_data_size > il->hw_params.max_data_size) {
+ IL_ERR("uCode init data len %Zd too large to fit in\n",
+ pieces.init_data_size);
+ goto try_again;
+ }
+
+ if (pieces.boot_size > il->hw_params.max_bsm_size) {
+ IL_ERR("uCode boot instr len %Zd too large to fit in\n",
+ pieces.boot_size);
+ goto try_again;
+ }
+
+ /* Allocate ucode buffers for card's bus-master loading ... */
+
+ /* Runtime instructions and 2 copies of data:
+ * 1) unmodified from disk
+ * 2) backup cache for save/restore during power-downs */
+ il->ucode_code.len = pieces.inst_size;
+ il_alloc_fw_desc(il->pci_dev, &il->ucode_code);
+
+ il->ucode_data.len = pieces.data_size;
+ il_alloc_fw_desc(il->pci_dev, &il->ucode_data);
+
+ il->ucode_data_backup.len = pieces.data_size;
+ il_alloc_fw_desc(il->pci_dev, &il->ucode_data_backup);
+
+ if (!il->ucode_code.v_addr || !il->ucode_data.v_addr ||
+ !il->ucode_data_backup.v_addr)
+ goto err_pci_alloc;
+
+ /* Initialization instructions and data */
+ if (pieces.init_size && pieces.init_data_size) {
+ il->ucode_init.len = pieces.init_size;
+ il_alloc_fw_desc(il->pci_dev, &il->ucode_init);
+
+ il->ucode_init_data.len = pieces.init_data_size;
+ il_alloc_fw_desc(il->pci_dev, &il->ucode_init_data);
+
+ if (!il->ucode_init.v_addr || !il->ucode_init_data.v_addr)
+ goto err_pci_alloc;
+ }
+
+ /* Bootstrap (instructions only, no data) */
+ if (pieces.boot_size) {
+ il->ucode_boot.len = pieces.boot_size;
+ il_alloc_fw_desc(il->pci_dev, &il->ucode_boot);
+
+ if (!il->ucode_boot.v_addr)
+ goto err_pci_alloc;
+ }
+
+ /* Now that we can no longer fail, copy information */
+
+ il->sta_key_max_num = STA_KEY_MAX_NUM;
+
+ /* Copy images into buffers for card's bus-master reads ... */
+
+ /* Runtime instructions (first block of data in file) */
+ D_INFO("Copying (but not loading) uCode instr len %Zd\n",
+ pieces.inst_size);
+ memcpy(il->ucode_code.v_addr, pieces.inst, pieces.inst_size);
+
+ D_INFO("uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
+ il->ucode_code.v_addr, (u32) il->ucode_code.p_addr);
+
+ /*
+ * Runtime data
+ * NOTE: Copy into backup buffer will be done in il_up()
+ */
+ D_INFO("Copying (but not loading) uCode data len %Zd\n",
+ pieces.data_size);
+ memcpy(il->ucode_data.v_addr, pieces.data, pieces.data_size);
+ memcpy(il->ucode_data_backup.v_addr, pieces.data, pieces.data_size);
+
+ /* Initialization instructions */
+ if (pieces.init_size) {
+ D_INFO("Copying (but not loading) init instr len %Zd\n",
+ pieces.init_size);
+ memcpy(il->ucode_init.v_addr, pieces.init, pieces.init_size);
+ }
+
+ /* Initialization data */
+ if (pieces.init_data_size) {
+ D_INFO("Copying (but not loading) init data len %Zd\n",
+ pieces.init_data_size);
+ memcpy(il->ucode_init_data.v_addr, pieces.init_data,
+ pieces.init_data_size);
+ }
+
+ /* Bootstrap instructions */
+ D_INFO("Copying (but not loading) boot instr len %Zd\n",
+ pieces.boot_size);
+ memcpy(il->ucode_boot.v_addr, pieces.boot, pieces.boot_size);
+
+ /*
+ * figure out the offset of chain noise reset and gain commands
+ * base on the size of standard phy calibration commands table size
+ */
+ il->_4965.phy_calib_chain_noise_reset_cmd =
+ standard_phy_calibration_size;
+ il->_4965.phy_calib_chain_noise_gain_cmd =
+ standard_phy_calibration_size + 1;
+
+ /**************************************************
+ * This is still part of probe() in a sense...
+ *
+ * 9. Setup and register with mac80211 and debugfs
+ **************************************************/
+ err = il4965_mac_setup_register(il, max_probe_length);
+ if (err)
+ goto out_unbind;
+
+ err = il_dbgfs_register(il, DRV_NAME);
+ if (err)
+ IL_ERR("failed to create debugfs files. Ignoring error: %d\n",
+ err);
+
+ err = sysfs_create_group(&il->pci_dev->dev.kobj, &il_attribute_group);
+ if (err) {
+ IL_ERR("failed to create sysfs device attributes\n");
+ goto out_unbind;
+ }
+
+ /* We have our copies now, allow OS release its copies */
+ release_firmware(ucode_raw);
+ complete(&il->_4965.firmware_loading_complete);
+ return;
+
+try_again:
+ /* try next, if any */
+ if (il4965_request_firmware(il, false))
+ goto out_unbind;
+ release_firmware(ucode_raw);
+ return;
+
+err_pci_alloc:
+ IL_ERR("failed to allocate pci memory\n");
+ il4965_dealloc_ucode_pci(il);
+out_unbind:
+ complete(&il->_4965.firmware_loading_complete);
+ device_release_driver(&il->pci_dev->dev);
+ release_firmware(ucode_raw);
+}
+
+static const char *const desc_lookup_text[] = {
+ "OK",
+ "FAIL",
+ "BAD_PARAM",
+ "BAD_CHECKSUM",
+ "NMI_INTERRUPT_WDG",
+ "SYSASSERT",
+ "FATAL_ERROR",
+ "BAD_COMMAND",
+ "HW_ERROR_TUNE_LOCK",
+ "HW_ERROR_TEMPERATURE",
+ "ILLEGAL_CHAN_FREQ",
+ "VCC_NOT_STBL",
+ "FH49_ERROR",
+ "NMI_INTERRUPT_HOST",
+ "NMI_INTERRUPT_ACTION_PT",
+ "NMI_INTERRUPT_UNKNOWN",
+ "UCODE_VERSION_MISMATCH",
+ "HW_ERROR_ABS_LOCK",
+ "HW_ERROR_CAL_LOCK_FAIL",
+ "NMI_INTERRUPT_INST_ACTION_PT",
+ "NMI_INTERRUPT_DATA_ACTION_PT",
+ "NMI_TRM_HW_ER",
+ "NMI_INTERRUPT_TRM",
+ "NMI_INTERRUPT_BREAK_POINT",
+ "DEBUG_0",
+ "DEBUG_1",
+ "DEBUG_2",
+ "DEBUG_3",
+};
+
+static struct {
+ char *name;
+ u8 num;
+} advanced_lookup[] = {
+ {
+ "NMI_INTERRUPT_WDG", 0x34}, {
+ "SYSASSERT", 0x35}, {
+ "UCODE_VERSION_MISMATCH", 0x37}, {
+ "BAD_COMMAND", 0x38}, {
+ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C}, {
+ "FATAL_ERROR", 0x3D}, {
+ "NMI_TRM_HW_ERR", 0x46}, {
+ "NMI_INTERRUPT_TRM", 0x4C}, {
+ "NMI_INTERRUPT_BREAK_POINT", 0x54}, {
+ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C}, {
+ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64}, {
+ "NMI_INTERRUPT_HOST", 0x66}, {
+ "NMI_INTERRUPT_ACTION_PT", 0x7C}, {
+ "NMI_INTERRUPT_UNKNOWN", 0x84}, {
+ "NMI_INTERRUPT_INST_ACTION_PT", 0x86}, {
+"ADVANCED_SYSASSERT", 0},};
+
+static const char *
+il4965_desc_lookup(u32 num)
+{
+ int i;
+ int max = ARRAY_SIZE(desc_lookup_text);
+
+ if (num < max)
+ return desc_lookup_text[num];
+
+ max = ARRAY_SIZE(advanced_lookup) - 1;
+ for (i = 0; i < max; i++) {
+ if (advanced_lookup[i].num == num)
+ break;
+ }
+ return advanced_lookup[i].name;
+}
+
+#define ERROR_START_OFFSET (1 * sizeof(u32))
+#define ERROR_ELEM_SIZE (7 * sizeof(u32))
+
+void
+il4965_dump_nic_error_log(struct il_priv *il)
+{
+ u32 data2, line;
+ u32 desc, time, count, base, data1;
+ u32 blink1, blink2, ilink1, ilink2;
+ u32 pc, hcmd;
+
+ if (il->ucode_type == UCODE_INIT)
+ base = le32_to_cpu(il->card_alive_init.error_event_table_ptr);
+ else
+ base = le32_to_cpu(il->card_alive.error_event_table_ptr);
+
+ if (!il->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
+ IL_ERR("Not valid error log pointer 0x%08X for %s uCode\n",
+ base, (il->ucode_type == UCODE_INIT) ? "Init" : "RT");
+ return;
+ }
+
+ count = il_read_targ_mem(il, base);
+
+ if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
+ IL_ERR("Start IWL Error Log Dump:\n");
+ IL_ERR("Status: 0x%08lX, count: %d\n", il->status, count);
+ }
+
+ desc = il_read_targ_mem(il, base + 1 * sizeof(u32));
+ il->isr_stats.err_code = desc;
+ pc = il_read_targ_mem(il, base + 2 * sizeof(u32));
+ blink1 = il_read_targ_mem(il, base + 3 * sizeof(u32));
+ blink2 = il_read_targ_mem(il, base + 4 * sizeof(u32));
+ ilink1 = il_read_targ_mem(il, base + 5 * sizeof(u32));
+ ilink2 = il_read_targ_mem(il, base + 6 * sizeof(u32));
+ data1 = il_read_targ_mem(il, base + 7 * sizeof(u32));
+ data2 = il_read_targ_mem(il, base + 8 * sizeof(u32));
+ line = il_read_targ_mem(il, base + 9 * sizeof(u32));
+ time = il_read_targ_mem(il, base + 11 * sizeof(u32));
+ hcmd = il_read_targ_mem(il, base + 22 * sizeof(u32));
+
+ IL_ERR("Desc Time "
+ "data1 data2 line\n");
+ IL_ERR("%-28s (0x%04X) %010u 0x%08X 0x%08X %u\n",
+ il4965_desc_lookup(desc), desc, time, data1, data2, line);
+ IL_ERR("pc blink1 blink2 ilink1 ilink2 hcmd\n");
+ IL_ERR("0x%05X 0x%05X 0x%05X 0x%05X 0x%05X 0x%05X\n", pc, blink1,
+ blink2, ilink1, ilink2, hcmd);
+}
+
+static void
+il4965_rf_kill_ct_config(struct il_priv *il)
+{
+ struct il_ct_kill_config cmd;
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&il->lock, flags);
+ _il_wr(il, CSR_UCODE_DRV_GP1_CLR,
+ CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ cmd.critical_temperature_R =
+ cpu_to_le32(il->hw_params.ct_kill_threshold);
+
+ ret = il_send_cmd_pdu(il, C_CT_KILL_CONFIG, sizeof(cmd), &cmd);
+ if (ret)
+ IL_ERR("C_CT_KILL_CONFIG failed\n");
+ else
+ D_INFO("C_CT_KILL_CONFIG " "succeeded, "
+ "critical temperature is %d\n",
+ il->hw_params.ct_kill_threshold);
+}
+
+static const s8 default_queue_to_tx_fifo[] = {
+ IL_TX_FIFO_VO,
+ IL_TX_FIFO_VI,
+ IL_TX_FIFO_BE,
+ IL_TX_FIFO_BK,
+ IL49_CMD_FIFO_NUM,
+ IL_TX_FIFO_UNUSED,
+ IL_TX_FIFO_UNUSED,
+};
+
+#define IL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
+
+static int
+il4965_alive_notify(struct il_priv *il)
+{
+ u32 a;
+ unsigned long flags;
+ int i, chan;
+ u32 reg_val;
+
+ spin_lock_irqsave(&il->lock, flags);
+
+ /* Clear 4965's internal Tx Scheduler data base */
+ il->scd_base_addr = il_rd_prph(il, IL49_SCD_SRAM_BASE_ADDR);
+ a = il->scd_base_addr + IL49_SCD_CONTEXT_DATA_OFFSET;
+ for (; a < il->scd_base_addr + IL49_SCD_TX_STTS_BITMAP_OFFSET; a += 4)
+ il_write_targ_mem(il, a, 0);
+ for (; a < il->scd_base_addr + IL49_SCD_TRANSLATE_TBL_OFFSET; a += 4)
+ il_write_targ_mem(il, a, 0);
+ for (;
+ a <
+ il->scd_base_addr +
+ IL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(il->hw_params.max_txq_num);
+ a += 4)
+ il_write_targ_mem(il, a, 0);
+
+ /* Tel 4965 where to find Tx byte count tables */
+ il_wr_prph(il, IL49_SCD_DRAM_BASE_ADDR, il->scd_bc_tbls.dma >> 10);
+
+ /* Enable DMA channel */
+ for (chan = 0; chan < FH49_TCSR_CHNL_NUM; chan++)
+ il_wr(il, FH49_TCSR_CHNL_TX_CONFIG_REG(chan),
+ FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
+ FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
+
+ /* Update FH chicken bits */
+ reg_val = il_rd(il, FH49_TX_CHICKEN_BITS_REG);
+ il_wr(il, FH49_TX_CHICKEN_BITS_REG,
+ reg_val | FH49_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
+
+ /* Disable chain mode for all queues */
+ il_wr_prph(il, IL49_SCD_QUEUECHAIN_SEL, 0);
+
+ /* Initialize each Tx queue (including the command queue) */
+ for (i = 0; i < il->hw_params.max_txq_num; i++) {
+
+ /* TFD circular buffer read/write idxes */
+ il_wr_prph(il, IL49_SCD_QUEUE_RDPTR(i), 0);
+ il_wr(il, HBUS_TARG_WRPTR, 0 | (i << 8));
+
+ /* Max Tx Window size for Scheduler-ACK mode */
+ il_write_targ_mem(il,
+ il->scd_base_addr +
+ IL49_SCD_CONTEXT_QUEUE_OFFSET(i),
+ (SCD_WIN_SIZE <<
+ IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
+ IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
+
+ /* Frame limit */
+ il_write_targ_mem(il,
+ il->scd_base_addr +
+ IL49_SCD_CONTEXT_QUEUE_OFFSET(i) +
+ sizeof(u32),
+ (SCD_FRAME_LIMIT <<
+ IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
+ IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
+
+ }
+ il_wr_prph(il, IL49_SCD_INTERRUPT_MASK,
+ (1 << il->hw_params.max_txq_num) - 1);
+
+ /* Activate all Tx DMA/FIFO channels */
+ il4965_txq_set_sched(il, IL_MASK(0, 6));
+
+ il4965_set_wr_ptrs(il, IL_DEFAULT_CMD_QUEUE_NUM, 0);
+
+ /* make sure all queue are not stopped */
+ memset(&il->queue_stopped[0], 0, sizeof(il->queue_stopped));
+ for (i = 0; i < 4; i++)
+ atomic_set(&il->queue_stop_count[i], 0);
+
+ /* reset to 0 to enable all the queue first */
+ il->txq_ctx_active_msk = 0;
+ /* Map each Tx/cmd queue to its corresponding fifo */
+ BUILD_BUG_ON(ARRAY_SIZE(default_queue_to_tx_fifo) != 7);
+
+ for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) {
+ int ac = default_queue_to_tx_fifo[i];
+
+ il_txq_ctx_activate(il, i);
+
+ if (ac == IL_TX_FIFO_UNUSED)
+ continue;
+
+ il4965_tx_queue_set_status(il, &il->txq[i], ac, 0);
+ }
+
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ return 0;
+}
+
+/**
+ * il4965_alive_start - called after N_ALIVE notification received
+ * from protocol/runtime uCode (initialization uCode's
+ * Alive gets handled by il_init_alive_start()).
+ */
+static void
+il4965_alive_start(struct il_priv *il)
+{
+ int ret = 0;
+ struct il_rxon_context *ctx = &il->ctx;
+
+ D_INFO("Runtime Alive received.\n");
+
+ if (il->card_alive.is_valid != UCODE_VALID_OK) {
+ /* We had an error bringing up the hardware, so take it
+ * all the way back down so we can try again */
+ D_INFO("Alive failed.\n");
+ goto restart;
+ }
+
+ /* Initialize uCode has loaded Runtime uCode ... verify inst image.
+ * This is a paranoid check, because we would not have gotten the
+ * "runtime" alive if code weren't properly loaded. */
+ if (il4965_verify_ucode(il)) {
+ /* Runtime instruction load was bad;
+ * take it all the way back down so we can try again */
+ D_INFO("Bad runtime uCode load.\n");
+ goto restart;
+ }
+
+ ret = il4965_alive_notify(il);
+ if (ret) {
+ IL_WARN("Could not complete ALIVE transition [ntf]: %d\n", ret);
+ goto restart;
+ }
+
+ /* After the ALIVE response, we can send host commands to the uCode */
+ set_bit(S_ALIVE, &il->status);
+
+ /* Enable watchdog to monitor the driver tx queues */
+ il_setup_watchdog(il);
+
+ if (il_is_rfkill(il))
+ return;
+
+ ieee80211_wake_queues(il->hw);
+
+ il->active_rate = RATES_MASK;
+
+ if (il_is_associated_ctx(ctx)) {
+ struct il_rxon_cmd *active_rxon =
+ (struct il_rxon_cmd *)&ctx->active;
+ /* apply any changes in staging */
+ ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
+ active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+ } else {
+ /* Initialize our rx_config data */
+ il_connection_init_rx_config(il, &il->ctx);
+
+ if (il->cfg->ops->hcmd->set_rxon_chain)
+ il->cfg->ops->hcmd->set_rxon_chain(il, ctx);
+ }
+
+ /* Configure bluetooth coexistence if enabled */
+ il_send_bt_config(il);
+
+ il4965_reset_run_time_calib(il);
+
+ set_bit(S_READY, &il->status);
+
+ /* Configure the adapter for unassociated operation */
+ il_commit_rxon(il, ctx);
+
+ /* At this point, the NIC is initialized and operational */
+ il4965_rf_kill_ct_config(il);
+
+ D_INFO("ALIVE processing complete.\n");
+ wake_up(&il->wait_command_queue);
+
+ il_power_update_mode(il, true);
+ D_INFO("Updated power mode\n");
+
+ return;
+
+restart:
+ queue_work(il->workqueue, &il->restart);
+}
+
+static void il4965_cancel_deferred_work(struct il_priv *il);
+
+static void
+__il4965_down(struct il_priv *il)
+{
+ unsigned long flags;
+ int exit_pending;
+
+ D_INFO(DRV_NAME " is going down\n");
+
+ il_scan_cancel_timeout(il, 200);
+
+ exit_pending = test_and_set_bit(S_EXIT_PENDING, &il->status);
+
+ /* Stop TX queues watchdog. We need to have S_EXIT_PENDING bit set
+ * to prevent rearm timer */
+ del_timer_sync(&il->watchdog);
+
+ il_clear_ucode_stations(il, NULL);
+ il_dealloc_bcast_stations(il);
+ il_clear_driver_stations(il);
+
+ /* Unblock any waiting calls */
+ wake_up_all(&il->wait_command_queue);
+
+ /* Wipe out the EXIT_PENDING status bit if we are not actually
+ * exiting the module */
+ if (!exit_pending)
+ clear_bit(S_EXIT_PENDING, &il->status);
+
+ /* stop and reset the on-board processor */
+ _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
+
+ /* tell the device to stop sending interrupts */
+ spin_lock_irqsave(&il->lock, flags);
+ il_disable_interrupts(il);
+ spin_unlock_irqrestore(&il->lock, flags);
+ il4965_synchronize_irq(il);
+
+ if (il->mac80211_registered)
+ ieee80211_stop_queues(il->hw);
+
+ /* If we have not previously called il_init() then
+ * clear all bits but the RF Kill bit and return */
+ if (!il_is_init(il)) {
+ il->status =
+ test_bit(S_RF_KILL_HW,
+ &il->
+ status) << S_RF_KILL_HW |
+ test_bit(S_GEO_CONFIGURED,
+ &il->
+ status) << S_GEO_CONFIGURED |
+ test_bit(S_EXIT_PENDING, &il->status) << S_EXIT_PENDING;
+ goto exit;
+ }
+
+ /* ...otherwise clear out all the status bits but the RF Kill
+ * bit and continue taking the NIC down. */
+ il->status &=
+ test_bit(S_RF_KILL_HW,
+ &il->status) << S_RF_KILL_HW | test_bit(S_GEO_CONFIGURED,
+ &il->
+ status) <<
+ S_GEO_CONFIGURED | test_bit(S_FW_ERROR,
+ &il->
+ status) << S_FW_ERROR |
+ test_bit(S_EXIT_PENDING, &il->status) << S_EXIT_PENDING;
+
+ il4965_txq_ctx_stop(il);
+ il4965_rxq_stop(il);
+
+ /* Power-down device's busmaster DMA clocks */
+ il_wr_prph(il, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
+ udelay(5);
+
+ /* Make sure (redundant) we've released our request to stay awake */
+ il_clear_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+
+ /* Stop the device, and put it in low power state */
+ il_apm_stop(il);
+
+exit:
+ memset(&il->card_alive, 0, sizeof(struct il_alive_resp));
+
+ dev_kfree_skb(il->beacon_skb);
+ il->beacon_skb = NULL;
+
+ /* clear out any free frames */
+ il4965_clear_free_frames(il);
+}
+
+static void
+il4965_down(struct il_priv *il)
+{
+ mutex_lock(&il->mutex);
+ __il4965_down(il);
+ mutex_unlock(&il->mutex);
+
+ il4965_cancel_deferred_work(il);
+}
+
+#define HW_READY_TIMEOUT (50)
+
+static int
+il4965_set_hw_ready(struct il_priv *il)
+{
+ int ret = 0;
+
+ il_set_bit(il, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
+
+ /* See if we got it */
+ ret =
+ _il_poll_bit(il, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
+ CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, HW_READY_TIMEOUT);
+ if (ret != -ETIMEDOUT)
+ il->hw_ready = true;
+ else
+ il->hw_ready = false;
+
+ D_INFO("hardware %s\n", (il->hw_ready == 1) ? "ready" : "not ready");
+ return ret;
+}
+
+static int
+il4965_prepare_card_hw(struct il_priv *il)
+{
+ int ret = 0;
+
+ D_INFO("il4965_prepare_card_hw enter\n");
+
+ ret = il4965_set_hw_ready(il);
+ if (il->hw_ready)
+ return ret;
+
+ /* If HW is not ready, prepare the conditions to check again */
+ il_set_bit(il, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_PREPARE);
+
+ ret =
+ _il_poll_bit(il, CSR_HW_IF_CONFIG_REG,
+ ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
+ CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
+
+ /* HW should be ready by now, check again. */
+ if (ret != -ETIMEDOUT)
+ il4965_set_hw_ready(il);
+
+ return ret;
+}
+
+#define MAX_HW_RESTARTS 5
+
+static int
+__il4965_up(struct il_priv *il)
+{
+ int i;
+ int ret;
+
+ if (test_bit(S_EXIT_PENDING, &il->status)) {
+ IL_WARN("Exit pending; will not bring the NIC up\n");
+ return -EIO;
+ }
+
+ if (!il->ucode_data_backup.v_addr || !il->ucode_data.v_addr) {
+ IL_ERR("ucode not available for device bringup\n");
+ return -EIO;
+ }
+
+ ret = il4965_alloc_bcast_station(il, &il->ctx);
+ if (ret) {
+ il_dealloc_bcast_stations(il);
+ return ret;
+ }
+
+ il4965_prepare_card_hw(il);
+
+ if (!il->hw_ready) {
+ IL_WARN("Exit HW not ready\n");
+ return -EIO;
+ }
+
+ /* If platform's RF_KILL switch is NOT set to KILL */
+ if (_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
+ clear_bit(S_RF_KILL_HW, &il->status);
+ else
+ set_bit(S_RF_KILL_HW, &il->status);
+
+ if (il_is_rfkill(il)) {
+ wiphy_rfkill_set_hw_state(il->hw->wiphy, true);
+
+ il_enable_interrupts(il);
+ IL_WARN("Radio disabled by HW RF Kill switch\n");
+ return 0;
+ }
+
+ _il_wr(il, CSR_INT, 0xFFFFFFFF);
+
+ /* must be initialised before il_hw_nic_init */
+ il->cmd_queue = IL_DEFAULT_CMD_QUEUE_NUM;
+
+ ret = il4965_hw_nic_init(il);
+ if (ret) {
+ IL_ERR("Unable to init nic\n");
+ return ret;
+ }
+
+ /* make sure rfkill handshake bits are cleared */
+ _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+ _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
+
+ /* clear (again), then enable host interrupts */
+ _il_wr(il, CSR_INT, 0xFFFFFFFF);
+ il_enable_interrupts(il);
+
+ /* really make sure rfkill handshake bits are cleared */
+ _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+ _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+
+ /* Copy original ucode data image from disk into backup cache.
+ * This will be used to initialize the on-board processor's
+ * data SRAM for a clean start when the runtime program first loads. */
+ memcpy(il->ucode_data_backup.v_addr, il->ucode_data.v_addr,
+ il->ucode_data.len);
+
+ for (i = 0; i < MAX_HW_RESTARTS; i++) {
+
+ /* load bootstrap state machine,
+ * load bootstrap program into processor's memory,
+ * prepare to load the "initialize" uCode */
+ ret = il->cfg->ops->lib->load_ucode(il);
+
+ if (ret) {
+ IL_ERR("Unable to set up bootstrap uCode: %d\n", ret);
+ continue;
+ }
+
+ /* start card; "initialize" will load runtime ucode */
+ il4965_nic_start(il);
+
+ D_INFO(DRV_NAME " is coming up\n");
+
+ return 0;
+ }
+
+ set_bit(S_EXIT_PENDING, &il->status);
+ __il4965_down(il);
+ clear_bit(S_EXIT_PENDING, &il->status);
+
+ /* tried to restart and config the device for as long as our
+ * patience could withstand */
+ IL_ERR("Unable to initialize device after %d attempts.\n", i);
+ return -EIO;
+}
+
+/*****************************************************************************
+ *
+ * Workqueue callbacks
+ *
+ *****************************************************************************/
+
+static void
+il4965_bg_init_alive_start(struct work_struct *data)
+{
+ struct il_priv *il =
+ container_of(data, struct il_priv, init_alive_start.work);
+
+ mutex_lock(&il->mutex);
+ if (test_bit(S_EXIT_PENDING, &il->status))
+ goto out;
+
+ il->cfg->ops->lib->init_alive_start(il);
+out:
+ mutex_unlock(&il->mutex);
+}
+
+static void
+il4965_bg_alive_start(struct work_struct *data)
+{
+ struct il_priv *il =
+ container_of(data, struct il_priv, alive_start.work);
+
+ mutex_lock(&il->mutex);
+ if (test_bit(S_EXIT_PENDING, &il->status))
+ goto out;
+
+ il4965_alive_start(il);
+out:
+ mutex_unlock(&il->mutex);
+}
+
+static void
+il4965_bg_run_time_calib_work(struct work_struct *work)
+{
+ struct il_priv *il = container_of(work, struct il_priv,
+ run_time_calib_work);
+
+ mutex_lock(&il->mutex);
+
+ if (test_bit(S_EXIT_PENDING, &il->status) ||
+ test_bit(S_SCANNING, &il->status)) {
+ mutex_unlock(&il->mutex);
+ return;
+ }
+
+ if (il->start_calib) {
+ il4965_chain_noise_calibration(il, (void *)&il->_4965.stats);
+ il4965_sensitivity_calibration(il, (void *)&il->_4965.stats);
+ }
+
+ mutex_unlock(&il->mutex);
+}
+
+static void
+il4965_bg_restart(struct work_struct *data)
+{
+ struct il_priv *il = container_of(data, struct il_priv, restart);
+
+ if (test_bit(S_EXIT_PENDING, &il->status))
+ return;
+
+ if (test_and_clear_bit(S_FW_ERROR, &il->status)) {
+ mutex_lock(&il->mutex);
+ il->ctx.vif = NULL;
+ il->is_open = 0;
+
+ __il4965_down(il);
+
+ mutex_unlock(&il->mutex);
+ il4965_cancel_deferred_work(il);
+ ieee80211_restart_hw(il->hw);
+ } else {
+ il4965_down(il);
+
+ mutex_lock(&il->mutex);
+ if (test_bit(S_EXIT_PENDING, &il->status)) {
+ mutex_unlock(&il->mutex);
+ return;
+ }
+
+ __il4965_up(il);
+ mutex_unlock(&il->mutex);
+ }
+}
+
+static void
+il4965_bg_rx_replenish(struct work_struct *data)
+{
+ struct il_priv *il = container_of(data, struct il_priv, rx_replenish);
+
+ if (test_bit(S_EXIT_PENDING, &il->status))
+ return;
+
+ mutex_lock(&il->mutex);
+ il4965_rx_replenish(il);
+ mutex_unlock(&il->mutex);
+}
+
+/*****************************************************************************
+ *
+ * mac80211 entry point functions
+ *
+ *****************************************************************************/
+
+#define UCODE_READY_TIMEOUT (4 * HZ)
+
+/*
+ * Not a mac80211 entry point function, but it fits in with all the
+ * other mac80211 functions grouped here.
+ */
+static int
+il4965_mac_setup_register(struct il_priv *il, u32 max_probe_length)
+{
+ int ret;
+ struct ieee80211_hw *hw = il->hw;
+
+ hw->rate_control_algorithm = "iwl-4965-rs";
+
+ /* Tell mac80211 our characteristics */
+ hw->flags =
+ IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_AMPDU_AGGREGATION |
+ IEEE80211_HW_NEED_DTIM_PERIOD | IEEE80211_HW_SPECTRUM_MGMT |
+ IEEE80211_HW_REPORTS_TX_ACK_STATUS;
+
+ if (il->cfg->sku & IL_SKU_N)
+ hw->flags |=
+ IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
+ IEEE80211_HW_SUPPORTS_STATIC_SMPS;
+
+ hw->sta_data_size = sizeof(struct il_station_priv);
+ hw->vif_data_size = sizeof(struct il_vif_priv);
+
+ hw->wiphy->interface_modes |= il->ctx.interface_modes;
+ hw->wiphy->interface_modes |= il->ctx.exclusive_interface_modes;
+
+ hw->wiphy->flags |=
+ WIPHY_FLAG_CUSTOM_REGULATORY | WIPHY_FLAG_DISABLE_BEACON_HINTS;
+
+ /*
+ * For now, disable PS by default because it affects
+ * RX performance significantly.
+ */
+ hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+
+ hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
+ /* we create the 802.11 header and a zero-length SSID element */
+ hw->wiphy->max_scan_ie_len = max_probe_length - 24 - 2;
+
+ /* Default value; 4 EDCA QOS priorities */
+ hw->queues = 4;
+
+ hw->max_listen_interval = IL_CONN_MAX_LISTEN_INTERVAL;
+
+ if (il->bands[IEEE80211_BAND_2GHZ].n_channels)
+ il->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
+ &il->bands[IEEE80211_BAND_2GHZ];
+ if (il->bands[IEEE80211_BAND_5GHZ].n_channels)
+ il->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
+ &il->bands[IEEE80211_BAND_5GHZ];
+
+ il_leds_init(il);
+
+ ret = ieee80211_register_hw(il->hw);
+ if (ret) {
+ IL_ERR("Failed to register hw (error %d)\n", ret);
+ return ret;
+ }
+ il->mac80211_registered = 1;
+
+ return 0;
+}
+
+int
+il4965_mac_start(struct ieee80211_hw *hw)
+{
+ struct il_priv *il = hw->priv;
+ int ret;
+
+ D_MAC80211("enter\n");
+
+ /* we should be verifying the device is ready to be opened */
+ mutex_lock(&il->mutex);
+ ret = __il4965_up(il);
+ mutex_unlock(&il->mutex);
+
+ if (ret)
+ return ret;
+
+ if (il_is_rfkill(il))
+ goto out;
+
+ D_INFO("Start UP work done.\n");
+
+ /* Wait for START_ALIVE from Run Time ucode. Otherwise callbacks from
+ * mac80211 will not be run successfully. */
+ ret = wait_event_timeout(il->wait_command_queue,
+ test_bit(S_READY, &il->status),
+ UCODE_READY_TIMEOUT);
+ if (!ret) {
+ if (!test_bit(S_READY, &il->status)) {
+ IL_ERR("START_ALIVE timeout after %dms.\n",
+ jiffies_to_msecs(UCODE_READY_TIMEOUT));
+ return -ETIMEDOUT;
+ }
+ }
+
+ il4965_led_enable(il);
+
+out:
+ il->is_open = 1;
+ D_MAC80211("leave\n");
+ return 0;
+}
+
+void
+il4965_mac_stop(struct ieee80211_hw *hw)
+{
+ struct il_priv *il = hw->priv;
+
+ D_MAC80211("enter\n");
+
+ if (!il->is_open)
+ return;
+
+ il->is_open = 0;
+
+ il4965_down(il);
+
+ flush_workqueue(il->workqueue);
+
+ /* User space software may expect getting rfkill changes
+ * even if interface is down */
+ _il_wr(il, CSR_INT, 0xFFFFFFFF);
+ il_enable_rfkill_int(il);
+
+ D_MAC80211("leave\n");
+}
+
+void
+il4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+ struct il_priv *il = hw->priv;
+
+ D_MACDUMP("enter\n");
+
+ D_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
+ ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
+
+ if (il4965_tx_skb(il, skb))
+ dev_kfree_skb_any(skb);
+
+ D_MACDUMP("leave\n");
+}
+
+void
+il4965_mac_update_tkip_key(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_key_conf *keyconf,
+ struct ieee80211_sta *sta, u32 iv32, u16 * phase1key)
+{
+ struct il_priv *il = hw->priv;
+ struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
+
+ D_MAC80211("enter\n");
+
+ il4965_update_tkip_key(il, vif_priv->ctx, keyconf, sta, iv32,
+ phase1key);
+
+ D_MAC80211("leave\n");
+}
+
+int
+il4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct il_priv *il = hw->priv;
+ struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
+ struct il_rxon_context *ctx = vif_priv->ctx;
+ int ret;
+ u8 sta_id;
+ bool is_default_wep_key = false;
+
+ D_MAC80211("enter\n");
+
+ if (il->cfg->mod_params->sw_crypto) {
+ D_MAC80211("leave - hwcrypto disabled\n");
+ return -EOPNOTSUPP;
+ }
+
+ sta_id = il_sta_id_or_broadcast(il, vif_priv->ctx, sta);
+ if (sta_id == IL_INVALID_STATION)
+ return -EINVAL;
+
+ mutex_lock(&il->mutex);
+ il_scan_cancel_timeout(il, 100);
+
+ /*
+ * If we are getting WEP group key and we didn't receive any key mapping
+ * so far, we are in legacy wep mode (group key only), otherwise we are
+ * in 1X mode.
+ * In legacy wep mode, we use another host command to the uCode.
+ */
+ if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
+ key->cipher == WLAN_CIPHER_SUITE_WEP104) && !sta) {
+ if (cmd == SET_KEY)
+ is_default_wep_key = !ctx->key_mapping_keys;
+ else
+ is_default_wep_key =
+ (key->hw_key_idx == HW_KEY_DEFAULT);
+ }
+
+ switch (cmd) {
+ case SET_KEY:
+ if (is_default_wep_key)
+ ret =
+ il4965_set_default_wep_key(il, vif_priv->ctx, key);
+ else
+ ret =
+ il4965_set_dynamic_key(il, vif_priv->ctx, key,
+ sta_id);
+
+ D_MAC80211("enable hwcrypto key\n");
+ break;
+ case DISABLE_KEY:
+ if (is_default_wep_key)
+ ret = il4965_remove_default_wep_key(il, ctx, key);
+ else
+ ret = il4965_remove_dynamic_key(il, ctx, key, sta_id);
+
+ D_MAC80211("disable hwcrypto key\n");
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ mutex_unlock(&il->mutex);
+ D_MAC80211("leave\n");
+
+ return ret;
+}
+
+int
+il4965_mac_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ enum ieee80211_ampdu_mlme_action action,
+ struct ieee80211_sta *sta, u16 tid, u16 * ssn,
+ u8 buf_size)
+{
+ struct il_priv *il = hw->priv;
+ int ret = -EINVAL;
+
+ D_HT("A-MPDU action on addr %pM tid %d\n", sta->addr, tid);
+
+ if (!(il->cfg->sku & IL_SKU_N))
+ return -EACCES;
+
+ mutex_lock(&il->mutex);
+
+ switch (action) {
+ case IEEE80211_AMPDU_RX_START:
+ D_HT("start Rx\n");
+ ret = il4965_sta_rx_agg_start(il, sta, tid, *ssn);
+ break;
+ case IEEE80211_AMPDU_RX_STOP:
+ D_HT("stop Rx\n");
+ ret = il4965_sta_rx_agg_stop(il, sta, tid);
+ if (test_bit(S_EXIT_PENDING, &il->status))
+ ret = 0;
+ break;
+ case IEEE80211_AMPDU_TX_START:
+ D_HT("start Tx\n");
+ ret = il4965_tx_agg_start(il, vif, sta, tid, ssn);
+ break;
+ case IEEE80211_AMPDU_TX_STOP:
+ D_HT("stop Tx\n");
+ ret = il4965_tx_agg_stop(il, vif, sta, tid);
+ if (test_bit(S_EXIT_PENDING, &il->status))
+ ret = 0;
+ break;
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ ret = 0;
+ break;
+ }
+ mutex_unlock(&il->mutex);
+
+ return ret;
+}
+
+int
+il4965_mac_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct il_priv *il = hw->priv;
+ struct il_station_priv *sta_priv = (void *)sta->drv_priv;
+ struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
+ bool is_ap = vif->type == NL80211_IFTYPE_STATION;
+ int ret;
+ u8 sta_id;
+
+ D_INFO("received request to add station %pM\n", sta->addr);
+ mutex_lock(&il->mutex);
+ D_INFO("proceeding to add station %pM\n", sta->addr);
+ sta_priv->common.sta_id = IL_INVALID_STATION;
+
+ atomic_set(&sta_priv->pending_frames, 0);
+
+ ret =
+ il_add_station_common(il, vif_priv->ctx, sta->addr, is_ap, sta,
+ &sta_id);
+ if (ret) {
+ IL_ERR("Unable to add station %pM (%d)\n", sta->addr, ret);
+ /* Should we return success if return code is EEXIST ? */
+ mutex_unlock(&il->mutex);
+ return ret;
+ }
+
+ sta_priv->common.sta_id = sta_id;
+
+ /* Initialize rate scaling */
+ D_INFO("Initializing rate scaling for station %pM\n", sta->addr);
+ il4965_rs_rate_init(il, sta, sta_id);
+ mutex_unlock(&il->mutex);
+
+ return 0;
+}
+
+void
+il4965_mac_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_channel_switch *ch_switch)
+{
+ struct il_priv *il = hw->priv;
+ const struct il_channel_info *ch_info;
+ struct ieee80211_conf *conf = &hw->conf;
+ struct ieee80211_channel *channel = ch_switch->channel;
+ struct il_ht_config *ht_conf = &il->current_ht_config;
+
+ struct il_rxon_context *ctx = &il->ctx;
+ u16 ch;
+
+ D_MAC80211("enter\n");
+
+ mutex_lock(&il->mutex);
+
+ if (il_is_rfkill(il))
+ goto out;
+
+ if (test_bit(S_EXIT_PENDING, &il->status) ||
+ test_bit(S_SCANNING, &il->status) ||
+ test_bit(S_CHANNEL_SWITCH_PENDING, &il->status))
+ goto out;
+
+ if (!il_is_associated_ctx(ctx))
+ goto out;
+
+ if (!il->cfg->ops->lib->set_channel_switch)
+ goto out;
+
+ ch = channel->hw_value;
+ if (le16_to_cpu(ctx->active.channel) == ch)
+ goto out;
+
+ ch_info = il_get_channel_info(il, channel->band, ch);
+ if (!il_is_channel_valid(ch_info)) {
+ D_MAC80211("invalid channel\n");
+ goto out;
+ }
+
+ spin_lock_irq(&il->lock);
+
+ il->current_ht_config.smps = conf->smps_mode;
+
+ /* Configure HT40 channels */
+ ctx->ht.enabled = conf_is_ht(conf);
+ if (ctx->ht.enabled) {
+ if (conf_is_ht40_minus(conf)) {
+ ctx->ht.extension_chan_offset =
+ IEEE80211_HT_PARAM_CHA_SEC_BELOW;
+ ctx->ht.is_40mhz = true;
+ } else if (conf_is_ht40_plus(conf)) {
+ ctx->ht.extension_chan_offset =
+ IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
+ ctx->ht.is_40mhz = true;
+ } else {
+ ctx->ht.extension_chan_offset =
+ IEEE80211_HT_PARAM_CHA_SEC_NONE;
+ ctx->ht.is_40mhz = false;
+ }
+ } else
+ ctx->ht.is_40mhz = false;
+
+ if ((le16_to_cpu(ctx->staging.channel) != ch))
+ ctx->staging.flags = 0;
+
+ il_set_rxon_channel(il, channel, ctx);
+ il_set_rxon_ht(il, ht_conf);
+ il_set_flags_for_band(il, ctx, channel->band, ctx->vif);
+
+ spin_unlock_irq(&il->lock);
+
+ il_set_rate(il);
+ /*
+ * at this point, staging_rxon has the
+ * configuration for channel switch
+ */
+ set_bit(S_CHANNEL_SWITCH_PENDING, &il->status);
+ il->switch_channel = cpu_to_le16(ch);
+ if (il->cfg->ops->lib->set_channel_switch(il, ch_switch)) {
+ clear_bit(S_CHANNEL_SWITCH_PENDING, &il->status);
+ il->switch_channel = 0;
+ ieee80211_chswitch_done(ctx->vif, false);
+ }
+
+out:
+ mutex_unlock(&il->mutex);
+ D_MAC80211("leave\n");
+}
+
+void
+il4965_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
+ unsigned int *total_flags, u64 multicast)
+{
+ struct il_priv *il = hw->priv;
+ __le32 filter_or = 0, filter_nand = 0;
+
+#define CHK(test, flag) do { \
+ if (*total_flags & (test)) \
+ filter_or |= (flag); \
+ else \
+ filter_nand |= (flag); \
+ } while (0)
+
+ D_MAC80211("Enter: changed: 0x%x, total: 0x%x\n", changed_flags,
+ *total_flags);
+
+ CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
+ /* Setting _just_ RXON_FILTER_CTL2HOST_MSK causes FH errors */
+ CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
+ CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
+
+#undef CHK
+
+ mutex_lock(&il->mutex);
+
+ il->ctx.staging.filter_flags &= ~filter_nand;
+ il->ctx.staging.filter_flags |= filter_or;
+
+ /*
+ * Not committing directly because hardware can perform a scan,
+ * but we'll eventually commit the filter flags change anyway.
+ */
+
+ mutex_unlock(&il->mutex);
+
+ /*
+ * Receiving all multicast frames is always enabled by the
+ * default flags setup in il_connection_init_rx_config()
+ * since we currently do not support programming multicast
+ * filters into the device.
+ */
+ *total_flags &=
+ FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
+ FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
+}
+
+/*****************************************************************************
+ *
+ * driver setup and teardown
+ *
+ *****************************************************************************/
+
+static void
+il4965_bg_txpower_work(struct work_struct *work)
+{
+ struct il_priv *il = container_of(work, struct il_priv,
+ txpower_work);
+
+ mutex_lock(&il->mutex);
+
+ /* If a scan happened to start before we got here
+ * then just return; the stats notification will
+ * kick off another scheduled work to compensate for
+ * any temperature delta we missed here. */
+ if (test_bit(S_EXIT_PENDING, &il->status) ||
+ test_bit(S_SCANNING, &il->status))
+ goto out;
+
+ /* Regardless of if we are associated, we must reconfigure the
+ * TX power since frames can be sent on non-radar channels while
+ * not associated */
+ il->cfg->ops->lib->send_tx_power(il);
+
+ /* Update last_temperature to keep is_calib_needed from running
+ * when it isn't needed... */
+ il->last_temperature = il->temperature;
+out:
+ mutex_unlock(&il->mutex);
+}
+
+static void
+il4965_setup_deferred_work(struct il_priv *il)
+{
+ il->workqueue = create_singlethread_workqueue(DRV_NAME);
+
+ init_waitqueue_head(&il->wait_command_queue);
+
+ INIT_WORK(&il->restart, il4965_bg_restart);
+ INIT_WORK(&il->rx_replenish, il4965_bg_rx_replenish);
+ INIT_WORK(&il->run_time_calib_work, il4965_bg_run_time_calib_work);
+ INIT_DELAYED_WORK(&il->init_alive_start, il4965_bg_init_alive_start);
+ INIT_DELAYED_WORK(&il->alive_start, il4965_bg_alive_start);
+
+ il_setup_scan_deferred_work(il);
+
+ INIT_WORK(&il->txpower_work, il4965_bg_txpower_work);
+
+ init_timer(&il->stats_periodic);
+ il->stats_periodic.data = (unsigned long)il;
+ il->stats_periodic.function = il4965_bg_stats_periodic;
+
+ init_timer(&il->watchdog);
+ il->watchdog.data = (unsigned long)il;
+ il->watchdog.function = il_bg_watchdog;
+
+ tasklet_init(&il->irq_tasklet,
+ (void (*)(unsigned long))il4965_irq_tasklet,
+ (unsigned long)il);
+}
+
+static void
+il4965_cancel_deferred_work(struct il_priv *il)
+{
+ cancel_work_sync(&il->txpower_work);
+ cancel_delayed_work_sync(&il->init_alive_start);
+ cancel_delayed_work(&il->alive_start);
+ cancel_work_sync(&il->run_time_calib_work);
+
+ il_cancel_scan_deferred_work(il);
+
+ del_timer_sync(&il->stats_periodic);
+}
+
+static void
+il4965_init_hw_rates(struct il_priv *il, struct ieee80211_rate *rates)
+{
+ int i;
+
+ for (i = 0; i < RATE_COUNT_LEGACY; i++) {
+ rates[i].bitrate = il_rates[i].ieee * 5;
+ rates[i].hw_value = i; /* Rate scaling will work on idxes */
+ rates[i].hw_value_short = i;
+ rates[i].flags = 0;
+ if ((i >= IL_FIRST_CCK_RATE) && (i <= IL_LAST_CCK_RATE)) {
+ /*
+ * If CCK != 1M then set short preamble rate flag.
+ */
+ rates[i].flags |=
+ (il_rates[i].plcp ==
+ RATE_1M_PLCP) ? 0 : IEEE80211_RATE_SHORT_PREAMBLE;
+ }
+ }
+}
+
+/*
+ * Acquire il->lock before calling this function !
+ */
+void
+il4965_set_wr_ptrs(struct il_priv *il, int txq_id, u32 idx)
+{
+ il_wr(il, HBUS_TARG_WRPTR, (idx & 0xff) | (txq_id << 8));
+ il_wr_prph(il, IL49_SCD_QUEUE_RDPTR(txq_id), idx);
+}
+
+void
+il4965_tx_queue_set_status(struct il_priv *il, struct il_tx_queue *txq,
+ int tx_fifo_id, int scd_retry)
+{
+ int txq_id = txq->q.id;
+
+ /* Find out whether to activate Tx queue */
+ int active = test_bit(txq_id, &il->txq_ctx_active_msk) ? 1 : 0;
+
+ /* Set up and activate */
+ il_wr_prph(il, IL49_SCD_QUEUE_STATUS_BITS(txq_id),
+ (active << IL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
+ (tx_fifo_id << IL49_SCD_QUEUE_STTS_REG_POS_TXF) |
+ (scd_retry << IL49_SCD_QUEUE_STTS_REG_POS_WSL) |
+ (scd_retry << IL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK) |
+ IL49_SCD_QUEUE_STTS_REG_MSK);
+
+ txq->sched_retry = scd_retry;
+
+ D_INFO("%s %s Queue %d on AC %d\n", active ? "Activate" : "Deactivate",
+ scd_retry ? "BA" : "AC", txq_id, tx_fifo_id);
+}
+
+static int
+il4965_init_drv(struct il_priv *il)
+{
+ int ret;
+
+ spin_lock_init(&il->sta_lock);
+ spin_lock_init(&il->hcmd_lock);
+
+ INIT_LIST_HEAD(&il->free_frames);
+
+ mutex_init(&il->mutex);
+
+ il->ieee_channels = NULL;
+ il->ieee_rates = NULL;
+ il->band = IEEE80211_BAND_2GHZ;
+
+ il->iw_mode = NL80211_IFTYPE_STATION;
+ il->current_ht_config.smps = IEEE80211_SMPS_STATIC;
+ il->missed_beacon_threshold = IL_MISSED_BEACON_THRESHOLD_DEF;
+
+ /* initialize force reset */
+ il->force_reset.reset_duration = IL_DELAY_NEXT_FORCE_FW_RELOAD;
+
+ /* Choose which receivers/antennas to use */
+ if (il->cfg->ops->hcmd->set_rxon_chain)
+ il->cfg->ops->hcmd->set_rxon_chain(il, &il->ctx);
+
+ il_init_scan_params(il);
+
+ ret = il_init_channel_map(il);
+ if (ret) {
+ IL_ERR("initializing regulatory failed: %d\n", ret);
+ goto err;
+ }
+
+ ret = il_init_geos(il);
+ if (ret) {
+ IL_ERR("initializing geos failed: %d\n", ret);
+ goto err_free_channel_map;
+ }
+ il4965_init_hw_rates(il, il->ieee_rates);
+
+ return 0;
+
+err_free_channel_map:
+ il_free_channel_map(il);
+err:
+ return ret;
+}
+
+static void
+il4965_uninit_drv(struct il_priv *il)
+{
+ il4965_calib_free_results(il);
+ il_free_geos(il);
+ il_free_channel_map(il);
+ kfree(il->scan_cmd);
+}
+
+static void
+il4965_hw_detect(struct il_priv *il)
+{
+ il->hw_rev = _il_rd(il, CSR_HW_REV);
+ il->hw_wa_rev = _il_rd(il, CSR_HW_REV_WA_REG);
+ il->rev_id = il->pci_dev->revision;
+ D_INFO("HW Revision ID = 0x%X\n", il->rev_id);
+}
+
+static int
+il4965_set_hw_params(struct il_priv *il)
+{
+ il->hw_params.max_rxq_size = RX_QUEUE_SIZE;
+ il->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
+ if (il->cfg->mod_params->amsdu_size_8K)
+ il->hw_params.rx_page_order = get_order(IL_RX_BUF_SIZE_8K);
+ else
+ il->hw_params.rx_page_order = get_order(IL_RX_BUF_SIZE_4K);
+
+ il->hw_params.max_beacon_itrvl = IL_MAX_UCODE_BEACON_INTERVAL;
+
+ if (il->cfg->mod_params->disable_11n)
+ il->cfg->sku &= ~IL_SKU_N;
+
+ /* Device-specific setup */
+ return il->cfg->ops->lib->set_hw_params(il);
+}
+
+static const u8 il4965_bss_ac_to_fifo[] = {
+ IL_TX_FIFO_VO,
+ IL_TX_FIFO_VI,
+ IL_TX_FIFO_BE,
+ IL_TX_FIFO_BK,
+};
+
+static const u8 il4965_bss_ac_to_queue[] = {
+ 0, 1, 2, 3,
+};
+
+static int
+il4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ int err = 0;
+ struct il_priv *il;
+ struct ieee80211_hw *hw;
+ struct il_cfg *cfg = (struct il_cfg *)(ent->driver_data);
+ unsigned long flags;
+ u16 pci_cmd;
+
+ /************************
+ * 1. Allocating HW data
+ ************************/
+
+ hw = il_alloc_all(cfg);
+ if (!hw) {
+ err = -ENOMEM;
+ goto out;
+ }
+ il = hw->priv;
+ /* At this point both hw and il are allocated. */
+
+ il->ctx.ctxid = 0;
+
+ il->ctx.always_active = true;
+ il->ctx.is_active = true;
+ il->ctx.rxon_cmd = C_RXON;
+ il->ctx.rxon_timing_cmd = C_RXON_TIMING;
+ il->ctx.rxon_assoc_cmd = C_RXON_ASSOC;
+ il->ctx.qos_cmd = C_QOS_PARAM;
+ il->ctx.ap_sta_id = IL_AP_ID;
+ il->ctx.wep_key_cmd = C_WEPKEY;
+ il->ctx.ac_to_fifo = il4965_bss_ac_to_fifo;
+ il->ctx.ac_to_queue = il4965_bss_ac_to_queue;
+ il->ctx.exclusive_interface_modes = BIT(NL80211_IFTYPE_ADHOC);
+ il->ctx.interface_modes = BIT(NL80211_IFTYPE_STATION);
+ il->ctx.ap_devtype = RXON_DEV_TYPE_AP;
+ il->ctx.ibss_devtype = RXON_DEV_TYPE_IBSS;
+ il->ctx.station_devtype = RXON_DEV_TYPE_ESS;
+ il->ctx.unused_devtype = RXON_DEV_TYPE_ESS;
+
+ SET_IEEE80211_DEV(hw, &pdev->dev);
+
+ D_INFO("*** LOAD DRIVER ***\n");
+ il->cfg = cfg;
+ il->pci_dev = pdev;
+ il->inta_mask = CSR_INI_SET_MASK;
+
+ if (il_alloc_traffic_mem(il))
+ IL_ERR("Not enough memory to generate traffic log\n");
+
+ /**************************
+ * 2. Initializing PCI bus
+ **************************/
+ pci_disable_link_state(pdev,
+ PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
+ PCIE_LINK_STATE_CLKPM);
+
+ if (pci_enable_device(pdev)) {
+ err = -ENODEV;
+ goto out_ieee80211_free_hw;
+ }
+
+ pci_set_master(pdev);
+
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
+ if (!err)
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
+ if (err) {
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (!err)
+ err =
+ pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ /* both attempts failed: */
+ if (err) {
+ IL_WARN("No suitable DMA available.\n");
+ goto out_pci_disable_device;
+ }
+ }
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err)
+ goto out_pci_disable_device;
+
+ pci_set_drvdata(pdev, il);
+
+ /***********************
+ * 3. Read REV register
+ ***********************/
+ il->hw_base = pci_iomap(pdev, 0, 0);
+ if (!il->hw_base) {
+ err = -ENODEV;
+ goto out_pci_release_regions;
+ }
+
+ D_INFO("pci_resource_len = 0x%08llx\n",
+ (unsigned long long)pci_resource_len(pdev, 0));
+ D_INFO("pci_resource_base = %p\n", il->hw_base);
+
+ /* these spin locks will be used in apm_ops.init and EEPROM access
+ * we should init now
+ */
+ spin_lock_init(&il->reg_lock);
+ spin_lock_init(&il->lock);
+
+ /*
+ * stop and reset the on-board processor just in case it is in a
+ * strange state ... like being left stranded by a primary kernel
+ * and this is now the kdump kernel trying to start up
+ */
+ _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
+
+ il4965_hw_detect(il);
+ IL_INFO("Detected %s, REV=0x%X\n", il->cfg->name, il->hw_rev);
+
+ /* We disable the RETRY_TIMEOUT register (0x41) to keep
+ * PCI Tx retries from interfering with C3 CPU state */
+ pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
+
+ il4965_prepare_card_hw(il);
+ if (!il->hw_ready) {
+ IL_WARN("Failed, HW not ready\n");
+ goto out_iounmap;
+ }
+
+ /*****************
+ * 4. Read EEPROM
+ *****************/
+ /* Read the EEPROM */
+ err = il_eeprom_init(il);
+ if (err) {
+ IL_ERR("Unable to init EEPROM\n");
+ goto out_iounmap;
+ }
+ err = il4965_eeprom_check_version(il);
+ if (err)
+ goto out_free_eeprom;
+
+ if (err)
+ goto out_free_eeprom;
+
+ /* extract MAC Address */
+ il4965_eeprom_get_mac(il, il->addresses[0].addr);
+ D_INFO("MAC address: %pM\n", il->addresses[0].addr);
+ il->hw->wiphy->addresses = il->addresses;
+ il->hw->wiphy->n_addresses = 1;
+
+ /************************
+ * 5. Setup HW constants
+ ************************/
+ if (il4965_set_hw_params(il)) {
+ IL_ERR("failed to set hw parameters\n");
+ goto out_free_eeprom;
+ }
+
+ /*******************
+ * 6. Setup il
+ *******************/
+
+ err = il4965_init_drv(il);
+ if (err)
+ goto out_free_eeprom;
+ /* At this point both hw and il are initialized. */
+
+ /********************
+ * 7. Setup services
+ ********************/
+ spin_lock_irqsave(&il->lock, flags);
+ il_disable_interrupts(il);
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ pci_enable_msi(il->pci_dev);
+
+ err = request_irq(il->pci_dev->irq, il_isr, IRQF_SHARED, DRV_NAME, il);
+ if (err) {
+ IL_ERR("Error allocating IRQ %d\n", il->pci_dev->irq);
+ goto out_disable_msi;
+ }
+
+ il4965_setup_deferred_work(il);
+ il4965_setup_handlers(il);
+
+ /*********************************************
+ * 8. Enable interrupts and read RFKILL state
+ *********************************************/
+
+ /* enable rfkill interrupt: hw bug w/a */
+ pci_read_config_word(il->pci_dev, PCI_COMMAND, &pci_cmd);
+ if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
+ pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
+ pci_write_config_word(il->pci_dev, PCI_COMMAND, pci_cmd);
+ }
+
+ il_enable_rfkill_int(il);
+
+ /* If platform's RF_KILL switch is NOT set to KILL */
+ if (_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
+ clear_bit(S_RF_KILL_HW, &il->status);
+ else
+ set_bit(S_RF_KILL_HW, &il->status);
+
+ wiphy_rfkill_set_hw_state(il->hw->wiphy,
+ test_bit(S_RF_KILL_HW, &il->status));
+
+ il_power_initialize(il);
+
+ init_completion(&il->_4965.firmware_loading_complete);
+
+ err = il4965_request_firmware(il, true);
+ if (err)
+ goto out_destroy_workqueue;
+
+ return 0;
+
+out_destroy_workqueue:
+ destroy_workqueue(il->workqueue);
+ il->workqueue = NULL;
+ free_irq(il->pci_dev->irq, il);
+out_disable_msi:
+ pci_disable_msi(il->pci_dev);
+ il4965_uninit_drv(il);
+out_free_eeprom:
+ il_eeprom_free(il);
+out_iounmap:
+ pci_iounmap(pdev, il->hw_base);
+out_pci_release_regions:
+ pci_set_drvdata(pdev, NULL);
+ pci_release_regions(pdev);
+out_pci_disable_device:
+ pci_disable_device(pdev);
+out_ieee80211_free_hw:
+ il_free_traffic_mem(il);
+ ieee80211_free_hw(il->hw);
+out:
+ return err;
+}
+
+static void __devexit
+il4965_pci_remove(struct pci_dev *pdev)
+{
+ struct il_priv *il = pci_get_drvdata(pdev);
+ unsigned long flags;
+
+ if (!il)
+ return;
+
+ wait_for_completion(&il->_4965.firmware_loading_complete);
+
+ D_INFO("*** UNLOAD DRIVER ***\n");
+
+ il_dbgfs_unregister(il);
+ sysfs_remove_group(&pdev->dev.kobj, &il_attribute_group);
+
+ /* ieee80211_unregister_hw call wil cause il_mac_stop to
+ * to be called and il4965_down since we are removing the device
+ * we need to set S_EXIT_PENDING bit.
+ */
+ set_bit(S_EXIT_PENDING, &il->status);
+
+ il_leds_exit(il);
+
+ if (il->mac80211_registered) {
+ ieee80211_unregister_hw(il->hw);
+ il->mac80211_registered = 0;
+ } else {
+ il4965_down(il);
+ }
+
+ /*
+ * Make sure device is reset to low power before unloading driver.
+ * This may be redundant with il4965_down(), but there are paths to
+ * run il4965_down() without calling apm_ops.stop(), and there are
+ * paths to avoid running il4965_down() at all before leaving driver.
+ * This (inexpensive) call *makes sure* device is reset.
+ */
+ il_apm_stop(il);
+
+ /* make sure we flush any pending irq or
+ * tasklet for the driver
+ */
+ spin_lock_irqsave(&il->lock, flags);
+ il_disable_interrupts(il);
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ il4965_synchronize_irq(il);
+
+ il4965_dealloc_ucode_pci(il);
+
+ if (il->rxq.bd)
+ il4965_rx_queue_free(il, &il->rxq);
+ il4965_hw_txq_ctx_free(il);
+
+ il_eeprom_free(il);
+
+ /*netif_stop_queue(dev); */
+ flush_workqueue(il->workqueue);
+
+ /* ieee80211_unregister_hw calls il_mac_stop, which flushes
+ * il->workqueue... so we can't take down the workqueue
+ * until now... */
+ destroy_workqueue(il->workqueue);
+ il->workqueue = NULL;
+ il_free_traffic_mem(il);
+
+ free_irq(il->pci_dev->irq, il);
+ pci_disable_msi(il->pci_dev);
+ pci_iounmap(pdev, il->hw_base);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+
+ il4965_uninit_drv(il);
+
+ dev_kfree_skb(il->beacon_skb);
+
+ ieee80211_free_hw(il->hw);
+}
+
+/*
+ * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
+ * must be called under il->lock and mac access
+ */
+void
+il4965_txq_set_sched(struct il_priv *il, u32 mask)
+{
+ il_wr_prph(il, IL49_SCD_TXFACT, mask);
+}
+
+/*****************************************************************************
+ *
+ * driver and module entry point
+ *
+ *****************************************************************************/
+
+/* Hardware specific file defines the PCI IDs table for that hardware module */
+static DEFINE_PCI_DEVICE_TABLE(il4965_hw_card_ids) = {
+ {IL_PCI_DEVICE(0x4229, PCI_ANY_ID, il4965_cfg)},
+ {IL_PCI_DEVICE(0x4230, PCI_ANY_ID, il4965_cfg)},
+ {0}
+};
+MODULE_DEVICE_TABLE(pci, il4965_hw_card_ids);
+
+static struct pci_driver il4965_driver = {
+ .name = DRV_NAME,
+ .id_table = il4965_hw_card_ids,
+ .probe = il4965_pci_probe,
+ .remove = __devexit_p(il4965_pci_remove),
+ .driver.pm = IL_LEGACY_PM_OPS,
+};
+
+static int __init
+il4965_init(void)
+{
+
+ int ret;
+ pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
+ pr_info(DRV_COPYRIGHT "\n");
+
+ ret = il4965_rate_control_register();
+ if (ret) {
+ pr_err("Unable to register rate control algorithm: %d\n", ret);
+ return ret;
+ }
+
+ ret = pci_register_driver(&il4965_driver);
+ if (ret) {
+ pr_err("Unable to initialize PCI module\n");
+ goto error_register;
+ }
+
+ return ret;
+
+error_register:
+ il4965_rate_control_unregister();
+ return ret;
+}
+
+static void __exit
+il4965_exit(void)
+{
+ pci_unregister_driver(&il4965_driver);
+ il4965_rate_control_unregister();
+}
+
+module_exit(il4965_exit);
+module_init(il4965_init);
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+module_param_named(debug, il_debug_level, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "debug output mask");
+#endif
+
+module_param_named(swcrypto, il4965_mod_params.sw_crypto, int, S_IRUGO);
+MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
+module_param_named(queues_num, il4965_mod_params.num_of_queues, int, S_IRUGO);
+MODULE_PARM_DESC(queues_num, "number of hw queues.");
+module_param_named(11n_disable, il4965_mod_params.disable_11n, int, S_IRUGO);
+MODULE_PARM_DESC(11n_disable, "disable 11n functionality");
+module_param_named(amsdu_size_8K, il4965_mod_params.amsdu_size_8K, int,
+ S_IRUGO);
+MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
+module_param_named(fw_restart, il4965_mod_params.restart_fw, int, S_IRUGO);
+MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
diff --git a/drivers/net/wireless/iwlegacy/4965-rs.c b/drivers/net/wireless/iwlegacy/4965-rs.c
new file mode 100644
index 00000000000..467d0cb14ec
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/4965-rs.c
@@ -0,0 +1,2860 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <net/mac80211.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+
+#include <linux/workqueue.h>
+
+#include "common.h"
+#include "4965.h"
+
+#define IL4965_RS_NAME "iwl-4965-rs"
+
+#define NUM_TRY_BEFORE_ANT_TOGGLE 1
+#define IL_NUMBER_TRY 1
+#define IL_HT_NUMBER_TRY 3
+
+#define RATE_MAX_WINDOW 62 /* # tx in history win */
+#define RATE_MIN_FAILURE_TH 6 /* min failures to calc tpt */
+#define RATE_MIN_SUCCESS_TH 8 /* min successes to calc tpt */
+
+/* max allowed rate miss before sync LQ cmd */
+#define IL_MISSED_RATE_MAX 15
+/* max time to accum history 2 seconds */
+#define RATE_SCALE_FLUSH_INTVL (3*HZ)
+
+static u8 rs_ht_to_legacy[] = {
+ RATE_6M_IDX, RATE_6M_IDX,
+ RATE_6M_IDX, RATE_6M_IDX,
+ RATE_6M_IDX,
+ RATE_6M_IDX, RATE_9M_IDX,
+ RATE_12M_IDX, RATE_18M_IDX,
+ RATE_24M_IDX, RATE_36M_IDX,
+ RATE_48M_IDX, RATE_54M_IDX
+};
+
+static const u8 ant_toggle_lookup[] = {
+ /*ANT_NONE -> */ ANT_NONE,
+ /*ANT_A -> */ ANT_B,
+ /*ANT_B -> */ ANT_C,
+ /*ANT_AB -> */ ANT_BC,
+ /*ANT_C -> */ ANT_A,
+ /*ANT_AC -> */ ANT_AB,
+ /*ANT_BC -> */ ANT_AC,
+ /*ANT_ABC -> */ ANT_ABC,
+};
+
+#define IL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np) \
+ [RATE_##r##M_IDX] = { RATE_##r##M_PLCP, \
+ RATE_SISO_##s##M_PLCP, \
+ RATE_MIMO2_##s##M_PLCP,\
+ RATE_##r##M_IEEE, \
+ RATE_##ip##M_IDX, \
+ RATE_##in##M_IDX, \
+ RATE_##rp##M_IDX, \
+ RATE_##rn##M_IDX, \
+ RATE_##pp##M_IDX, \
+ RATE_##np##M_IDX }
+
+/*
+ * Parameter order:
+ * rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate
+ *
+ * If there isn't a valid next or previous rate then INV is used which
+ * maps to RATE_INVALID
+ *
+ */
+const struct il_rate_info il_rates[RATE_COUNT] = {
+ IL_DECLARE_RATE_INFO(1, INV, INV, 2, INV, 2, INV, 2), /* 1mbps */
+ IL_DECLARE_RATE_INFO(2, INV, 1, 5, 1, 5, 1, 5), /* 2mbps */
+ IL_DECLARE_RATE_INFO(5, INV, 2, 6, 2, 11, 2, 11), /*5.5mbps */
+ IL_DECLARE_RATE_INFO(11, INV, 9, 12, 9, 12, 5, 18), /* 11mbps */
+ IL_DECLARE_RATE_INFO(6, 6, 5, 9, 5, 11, 5, 11), /* 6mbps */
+ IL_DECLARE_RATE_INFO(9, 6, 6, 11, 6, 11, 5, 11), /* 9mbps */
+ IL_DECLARE_RATE_INFO(12, 12, 11, 18, 11, 18, 11, 18), /* 12mbps */
+ IL_DECLARE_RATE_INFO(18, 18, 12, 24, 12, 24, 11, 24), /* 18mbps */
+ IL_DECLARE_RATE_INFO(24, 24, 18, 36, 18, 36, 18, 36), /* 24mbps */
+ IL_DECLARE_RATE_INFO(36, 36, 24, 48, 24, 48, 24, 48), /* 36mbps */
+ IL_DECLARE_RATE_INFO(48, 48, 36, 54, 36, 54, 36, 54), /* 48mbps */
+ IL_DECLARE_RATE_INFO(54, 54, 48, INV, 48, INV, 48, INV),/* 54mbps */
+ IL_DECLARE_RATE_INFO(60, 60, 48, INV, 48, INV, 48, INV),/* 60mbps */
+};
+
+static int
+il4965_hwrate_to_plcp_idx(u32 rate_n_flags)
+{
+ int idx = 0;
+
+ /* HT rate format */
+ if (rate_n_flags & RATE_MCS_HT_MSK) {
+ idx = (rate_n_flags & 0xff);
+
+ if (idx >= RATE_MIMO2_6M_PLCP)
+ idx = idx - RATE_MIMO2_6M_PLCP;
+
+ idx += IL_FIRST_OFDM_RATE;
+ /* skip 9M not supported in ht */
+ if (idx >= RATE_9M_IDX)
+ idx += 1;
+ if (idx >= IL_FIRST_OFDM_RATE && idx <= IL_LAST_OFDM_RATE)
+ return idx;
+
+ /* legacy rate format, search for match in table */
+ } else {
+ for (idx = 0; idx < ARRAY_SIZE(il_rates); idx++)
+ if (il_rates[idx].plcp == (rate_n_flags & 0xFF))
+ return idx;
+ }
+
+ return -1;
+}
+
+static void il4965_rs_rate_scale_perform(struct il_priv *il,
+ struct sk_buff *skb,
+ struct ieee80211_sta *sta,
+ struct il_lq_sta *lq_sta);
+static void il4965_rs_fill_link_cmd(struct il_priv *il,
+ struct il_lq_sta *lq_sta, u32 rate_n_flags);
+static void il4965_rs_stay_in_table(struct il_lq_sta *lq_sta,
+ bool force_search);
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+static void il4965_rs_dbgfs_set_mcs(struct il_lq_sta *lq_sta,
+ u32 *rate_n_flags, int idx);
+#else
+static void
+il4965_rs_dbgfs_set_mcs(struct il_lq_sta *lq_sta, u32 * rate_n_flags, int idx)
+{
+}
+#endif
+
+/**
+ * The following tables contain the expected throughput metrics for all rates
+ *
+ * 1, 2, 5.5, 11, 6, 9, 12, 18, 24, 36, 48, 54, 60 MBits
+ *
+ * where invalid entries are zeros.
+ *
+ * CCK rates are only valid in legacy table and will only be used in G
+ * (2.4 GHz) band.
+ */
+
+static s32 expected_tpt_legacy[RATE_COUNT] = {
+ 7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 0
+};
+
+static s32 expected_tpt_siso20MHz[4][RATE_COUNT] = {
+ {0, 0, 0, 0, 42, 0, 76, 102, 124, 158, 183, 193, 202}, /* Norm */
+ {0, 0, 0, 0, 46, 0, 82, 110, 132, 167, 192, 202, 210}, /* SGI */
+ {0, 0, 0, 0, 48, 0, 93, 135, 176, 251, 319, 351, 381}, /* AGG */
+ {0, 0, 0, 0, 53, 0, 102, 149, 193, 275, 348, 381, 413}, /* AGG+SGI */
+};
+
+static s32 expected_tpt_siso40MHz[4][RATE_COUNT] = {
+ {0, 0, 0, 0, 77, 0, 127, 160, 184, 220, 242, 250, 257}, /* Norm */
+ {0, 0, 0, 0, 83, 0, 135, 169, 193, 229, 250, 257, 264}, /* SGI */
+ {0, 0, 0, 0, 96, 0, 182, 259, 328, 451, 553, 598, 640}, /* AGG */
+ {0, 0, 0, 0, 106, 0, 199, 282, 357, 487, 593, 640, 683}, /* AGG+SGI */
+};
+
+static s32 expected_tpt_mimo2_20MHz[4][RATE_COUNT] = {
+ {0, 0, 0, 0, 74, 0, 123, 155, 179, 213, 235, 243, 250}, /* Norm */
+ {0, 0, 0, 0, 81, 0, 131, 164, 187, 221, 242, 250, 256}, /* SGI */
+ {0, 0, 0, 0, 92, 0, 175, 250, 317, 436, 534, 578, 619}, /* AGG */
+ {0, 0, 0, 0, 102, 0, 192, 273, 344, 470, 573, 619, 660}, /* AGG+SGI */
+};
+
+static s32 expected_tpt_mimo2_40MHz[4][RATE_COUNT] = {
+ {0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289}, /* Norm */
+ {0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293}, /* SGI */
+ {0, 0, 0, 0, 180, 0, 327, 446, 545, 708, 828, 878, 922}, /* AGG */
+ {0, 0, 0, 0, 197, 0, 355, 481, 584, 752, 872, 922, 966}, /* AGG+SGI */
+};
+
+/* mbps, mcs */
+static const struct il_rate_mcs_info il_rate_mcs[RATE_COUNT] = {
+ {"1", "BPSK DSSS"},
+ {"2", "QPSK DSSS"},
+ {"5.5", "BPSK CCK"},
+ {"11", "QPSK CCK"},
+ {"6", "BPSK 1/2"},
+ {"9", "BPSK 1/2"},
+ {"12", "QPSK 1/2"},
+ {"18", "QPSK 3/4"},
+ {"24", "16QAM 1/2"},
+ {"36", "16QAM 3/4"},
+ {"48", "64QAM 2/3"},
+ {"54", "64QAM 3/4"},
+ {"60", "64QAM 5/6"},
+};
+
+#define MCS_IDX_PER_STREAM (8)
+
+static inline u8
+il4965_rs_extract_rate(u32 rate_n_flags)
+{
+ return (u8) (rate_n_flags & 0xFF);
+}
+
+static void
+il4965_rs_rate_scale_clear_win(struct il_rate_scale_data *win)
+{
+ win->data = 0;
+ win->success_counter = 0;
+ win->success_ratio = IL_INVALID_VALUE;
+ win->counter = 0;
+ win->average_tpt = IL_INVALID_VALUE;
+ win->stamp = 0;
+}
+
+static inline u8
+il4965_rs_is_valid_ant(u8 valid_antenna, u8 ant_type)
+{
+ return (ant_type & valid_antenna) == ant_type;
+}
+
+/*
+ * removes the old data from the stats. All data that is older than
+ * TID_MAX_TIME_DIFF, will be deleted.
+ */
+static void
+il4965_rs_tl_rm_old_stats(struct il_traffic_load *tl, u32 curr_time)
+{
+ /* The oldest age we want to keep */
+ u32 oldest_time = curr_time - TID_MAX_TIME_DIFF;
+
+ while (tl->queue_count && tl->time_stamp < oldest_time) {
+ tl->total -= tl->packet_count[tl->head];
+ tl->packet_count[tl->head] = 0;
+ tl->time_stamp += TID_QUEUE_CELL_SPACING;
+ tl->queue_count--;
+ tl->head++;
+ if (tl->head >= TID_QUEUE_MAX_SIZE)
+ tl->head = 0;
+ }
+}
+
+/*
+ * increment traffic load value for tid and also remove
+ * any old values if passed the certain time period
+ */
+static u8
+il4965_rs_tl_add_packet(struct il_lq_sta *lq_data, struct ieee80211_hdr *hdr)
+{
+ u32 curr_time = jiffies_to_msecs(jiffies);
+ u32 time_diff;
+ s32 idx;
+ struct il_traffic_load *tl = NULL;
+ u8 tid;
+
+ if (ieee80211_is_data_qos(hdr->frame_control)) {
+ u8 *qc = ieee80211_get_qos_ctl(hdr);
+ tid = qc[0] & 0xf;
+ } else
+ return MAX_TID_COUNT;
+
+ if (unlikely(tid >= TID_MAX_LOAD_COUNT))
+ return MAX_TID_COUNT;
+
+ tl = &lq_data->load[tid];
+
+ curr_time -= curr_time % TID_ROUND_VALUE;
+
+ /* Happens only for the first packet. Initialize the data */
+ if (!(tl->queue_count)) {
+ tl->total = 1;
+ tl->time_stamp = curr_time;
+ tl->queue_count = 1;
+ tl->head = 0;
+ tl->packet_count[0] = 1;
+ return MAX_TID_COUNT;
+ }
+
+ time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
+ idx = time_diff / TID_QUEUE_CELL_SPACING;
+
+ /* The history is too long: remove data that is older than */
+ /* TID_MAX_TIME_DIFF */
+ if (idx >= TID_QUEUE_MAX_SIZE)
+ il4965_rs_tl_rm_old_stats(tl, curr_time);
+
+ idx = (tl->head + idx) % TID_QUEUE_MAX_SIZE;
+ tl->packet_count[idx] = tl->packet_count[idx] + 1;
+ tl->total = tl->total + 1;
+
+ if ((idx + 1) > tl->queue_count)
+ tl->queue_count = idx + 1;
+
+ return tid;
+}
+
+/*
+ get the traffic load value for tid
+*/
+static u32
+il4965_rs_tl_get_load(struct il_lq_sta *lq_data, u8 tid)
+{
+ u32 curr_time = jiffies_to_msecs(jiffies);
+ u32 time_diff;
+ s32 idx;
+ struct il_traffic_load *tl = NULL;
+
+ if (tid >= TID_MAX_LOAD_COUNT)
+ return 0;
+
+ tl = &(lq_data->load[tid]);
+
+ curr_time -= curr_time % TID_ROUND_VALUE;
+
+ if (!(tl->queue_count))
+ return 0;
+
+ time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
+ idx = time_diff / TID_QUEUE_CELL_SPACING;
+
+ /* The history is too long: remove data that is older than */
+ /* TID_MAX_TIME_DIFF */
+ if (idx >= TID_QUEUE_MAX_SIZE)
+ il4965_rs_tl_rm_old_stats(tl, curr_time);
+
+ return tl->total;
+}
+
+static int
+il4965_rs_tl_turn_on_agg_for_tid(struct il_priv *il, struct il_lq_sta *lq_data,
+ u8 tid, struct ieee80211_sta *sta)
+{
+ int ret = -EAGAIN;
+ u32 load;
+
+ load = il4965_rs_tl_get_load(lq_data, tid);
+
+ if (load > IL_AGG_LOAD_THRESHOLD) {
+ D_HT("Starting Tx agg: STA: %pM tid: %d\n", sta->addr, tid);
+ ret = ieee80211_start_tx_ba_session(sta, tid, 5000);
+ if (ret == -EAGAIN) {
+ /*
+ * driver and mac80211 is out of sync
+ * this might be cause by reloading firmware
+ * stop the tx ba session here
+ */
+ IL_ERR("Fail start Tx agg on tid: %d\n", tid);
+ ieee80211_stop_tx_ba_session(sta, tid);
+ }
+ } else
+ D_HT("Aggregation not enabled for tid %d because load = %u\n",
+ tid, load);
+
+ return ret;
+}
+
+static void
+il4965_rs_tl_turn_on_agg(struct il_priv *il, u8 tid, struct il_lq_sta *lq_data,
+ struct ieee80211_sta *sta)
+{
+ if (tid < TID_MAX_LOAD_COUNT)
+ il4965_rs_tl_turn_on_agg_for_tid(il, lq_data, tid, sta);
+ else
+ IL_ERR("tid exceeds max load count: %d/%d\n", tid,
+ TID_MAX_LOAD_COUNT);
+}
+
+static inline int
+il4965_get_il4965_num_of_ant_from_rate(u32 rate_n_flags)
+{
+ return !!(rate_n_flags & RATE_MCS_ANT_A_MSK) +
+ !!(rate_n_flags & RATE_MCS_ANT_B_MSK) +
+ !!(rate_n_flags & RATE_MCS_ANT_C_MSK);
+}
+
+/*
+ * Static function to get the expected throughput from an il_scale_tbl_info
+ * that wraps a NULL pointer check
+ */
+static s32
+il4965_get_expected_tpt(struct il_scale_tbl_info *tbl, int rs_idx)
+{
+ if (tbl->expected_tpt)
+ return tbl->expected_tpt[rs_idx];
+ return 0;
+}
+
+/**
+ * il4965_rs_collect_tx_data - Update the success/failure sliding win
+ *
+ * We keep a sliding win of the last 62 packets transmitted
+ * at this rate. win->data contains the bitmask of successful
+ * packets.
+ */
+static int
+il4965_rs_collect_tx_data(struct il_scale_tbl_info *tbl, int scale_idx,
+ int attempts, int successes)
+{
+ struct il_rate_scale_data *win = NULL;
+ static const u64 mask = (((u64) 1) << (RATE_MAX_WINDOW - 1));
+ s32 fail_count, tpt;
+
+ if (scale_idx < 0 || scale_idx >= RATE_COUNT)
+ return -EINVAL;
+
+ /* Select win for current tx bit rate */
+ win = &(tbl->win[scale_idx]);
+
+ /* Get expected throughput */
+ tpt = il4965_get_expected_tpt(tbl, scale_idx);
+
+ /*
+ * Keep track of only the latest 62 tx frame attempts in this rate's
+ * history win; anything older isn't really relevant any more.
+ * If we have filled up the sliding win, drop the oldest attempt;
+ * if the oldest attempt (highest bit in bitmap) shows "success",
+ * subtract "1" from the success counter (this is the main reason
+ * we keep these bitmaps!).
+ */
+ while (attempts > 0) {
+ if (win->counter >= RATE_MAX_WINDOW) {
+
+ /* remove earliest */
+ win->counter = RATE_MAX_WINDOW - 1;
+
+ if (win->data & mask) {
+ win->data &= ~mask;
+ win->success_counter--;
+ }
+ }
+
+ /* Increment frames-attempted counter */
+ win->counter++;
+
+ /* Shift bitmap by one frame to throw away oldest history */
+ win->data <<= 1;
+
+ /* Mark the most recent #successes attempts as successful */
+ if (successes > 0) {
+ win->success_counter++;
+ win->data |= 0x1;
+ successes--;
+ }
+
+ attempts--;
+ }
+
+ /* Calculate current success ratio, avoid divide-by-0! */
+ if (win->counter > 0)
+ win->success_ratio =
+ 128 * (100 * win->success_counter) / win->counter;
+ else
+ win->success_ratio = IL_INVALID_VALUE;
+
+ fail_count = win->counter - win->success_counter;
+
+ /* Calculate average throughput, if we have enough history. */
+ if (fail_count >= RATE_MIN_FAILURE_TH ||
+ win->success_counter >= RATE_MIN_SUCCESS_TH)
+ win->average_tpt = (win->success_ratio * tpt + 64) / 128;
+ else
+ win->average_tpt = IL_INVALID_VALUE;
+
+ /* Tag this win as having been updated */
+ win->stamp = jiffies;
+
+ return 0;
+}
+
+/*
+ * Fill uCode API rate_n_flags field, based on "search" or "active" table.
+ */
+static u32
+il4965_rate_n_flags_from_tbl(struct il_priv *il, struct il_scale_tbl_info *tbl,
+ int idx, u8 use_green)
+{
+ u32 rate_n_flags = 0;
+
+ if (is_legacy(tbl->lq_type)) {
+ rate_n_flags = il_rates[idx].plcp;
+ if (idx >= IL_FIRST_CCK_RATE && idx <= IL_LAST_CCK_RATE)
+ rate_n_flags |= RATE_MCS_CCK_MSK;
+
+ } else if (is_Ht(tbl->lq_type)) {
+ if (idx > IL_LAST_OFDM_RATE) {
+ IL_ERR("Invalid HT rate idx %d\n", idx);
+ idx = IL_LAST_OFDM_RATE;
+ }
+ rate_n_flags = RATE_MCS_HT_MSK;
+
+ if (is_siso(tbl->lq_type))
+ rate_n_flags |= il_rates[idx].plcp_siso;
+ else
+ rate_n_flags |= il_rates[idx].plcp_mimo2;
+ } else {
+ IL_ERR("Invalid tbl->lq_type %d\n", tbl->lq_type);
+ }
+
+ rate_n_flags |=
+ ((tbl->ant_type << RATE_MCS_ANT_POS) & RATE_MCS_ANT_ABC_MSK);
+
+ if (is_Ht(tbl->lq_type)) {
+ if (tbl->is_ht40) {
+ if (tbl->is_dup)
+ rate_n_flags |= RATE_MCS_DUP_MSK;
+ else
+ rate_n_flags |= RATE_MCS_HT40_MSK;
+ }
+ if (tbl->is_SGI)
+ rate_n_flags |= RATE_MCS_SGI_MSK;
+
+ if (use_green) {
+ rate_n_flags |= RATE_MCS_GF_MSK;
+ if (is_siso(tbl->lq_type) && tbl->is_SGI) {
+ rate_n_flags &= ~RATE_MCS_SGI_MSK;
+ IL_ERR("GF was set with SGI:SISO\n");
+ }
+ }
+ }
+ return rate_n_flags;
+}
+
+/*
+ * Interpret uCode API's rate_n_flags format,
+ * fill "search" or "active" tx mode table.
+ */
+static int
+il4965_rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
+ enum ieee80211_band band,
+ struct il_scale_tbl_info *tbl, int *rate_idx)
+{
+ u32 ant_msk = (rate_n_flags & RATE_MCS_ANT_ABC_MSK);
+ u8 il4965_num_of_ant =
+ il4965_get_il4965_num_of_ant_from_rate(rate_n_flags);
+ u8 mcs;
+
+ memset(tbl, 0, sizeof(struct il_scale_tbl_info));
+ *rate_idx = il4965_hwrate_to_plcp_idx(rate_n_flags);
+
+ if (*rate_idx == RATE_INVALID) {
+ *rate_idx = -1;
+ return -EINVAL;
+ }
+ tbl->is_SGI = 0; /* default legacy setup */
+ tbl->is_ht40 = 0;
+ tbl->is_dup = 0;
+ tbl->ant_type = (ant_msk >> RATE_MCS_ANT_POS);
+ tbl->lq_type = LQ_NONE;
+ tbl->max_search = IL_MAX_SEARCH;
+
+ /* legacy rate format */
+ if (!(rate_n_flags & RATE_MCS_HT_MSK)) {
+ if (il4965_num_of_ant == 1) {
+ if (band == IEEE80211_BAND_5GHZ)
+ tbl->lq_type = LQ_A;
+ else
+ tbl->lq_type = LQ_G;
+ }
+ /* HT rate format */
+ } else {
+ if (rate_n_flags & RATE_MCS_SGI_MSK)
+ tbl->is_SGI = 1;
+
+ if ((rate_n_flags & RATE_MCS_HT40_MSK) ||
+ (rate_n_flags & RATE_MCS_DUP_MSK))
+ tbl->is_ht40 = 1;
+
+ if (rate_n_flags & RATE_MCS_DUP_MSK)
+ tbl->is_dup = 1;
+
+ mcs = il4965_rs_extract_rate(rate_n_flags);
+
+ /* SISO */
+ if (mcs <= RATE_SISO_60M_PLCP) {
+ if (il4965_num_of_ant == 1)
+ tbl->lq_type = LQ_SISO; /*else NONE */
+ /* MIMO2 */
+ } else {
+ if (il4965_num_of_ant == 2)
+ tbl->lq_type = LQ_MIMO2;
+ }
+ }
+ return 0;
+}
+
+/* switch to another antenna/antennas and return 1 */
+/* if no other valid antenna found, return 0 */
+static int
+il4965_rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags,
+ struct il_scale_tbl_info *tbl)
+{
+ u8 new_ant_type;
+
+ if (!tbl->ant_type || tbl->ant_type > ANT_ABC)
+ return 0;
+
+ if (!il4965_rs_is_valid_ant(valid_ant, tbl->ant_type))
+ return 0;
+
+ new_ant_type = ant_toggle_lookup[tbl->ant_type];
+
+ while (new_ant_type != tbl->ant_type &&
+ !il4965_rs_is_valid_ant(valid_ant, new_ant_type))
+ new_ant_type = ant_toggle_lookup[new_ant_type];
+
+ if (new_ant_type == tbl->ant_type)
+ return 0;
+
+ tbl->ant_type = new_ant_type;
+ *rate_n_flags &= ~RATE_MCS_ANT_ABC_MSK;
+ *rate_n_flags |= new_ant_type << RATE_MCS_ANT_POS;
+ return 1;
+}
+
+/**
+ * Green-field mode is valid if the station supports it and
+ * there are no non-GF stations present in the BSS.
+ */
+static bool
+il4965_rs_use_green(struct ieee80211_sta *sta)
+{
+ struct il_station_priv *sta_priv = (void *)sta->drv_priv;
+ struct il_rxon_context *ctx = sta_priv->common.ctx;
+
+ return (sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) &&
+ !(ctx->ht.non_gf_sta_present);
+}
+
+/**
+ * il4965_rs_get_supported_rates - get the available rates
+ *
+ * if management frame or broadcast frame only return
+ * basic available rates.
+ *
+ */
+static u16
+il4965_rs_get_supported_rates(struct il_lq_sta *lq_sta,
+ struct ieee80211_hdr *hdr,
+ enum il_table_type rate_type)
+{
+ if (is_legacy(rate_type)) {
+ return lq_sta->active_legacy_rate;
+ } else {
+ if (is_siso(rate_type))
+ return lq_sta->active_siso_rate;
+ else
+ return lq_sta->active_mimo2_rate;
+ }
+}
+
+static u16
+il4965_rs_get_adjacent_rate(struct il_priv *il, u8 idx, u16 rate_mask,
+ int rate_type)
+{
+ u8 high = RATE_INVALID;
+ u8 low = RATE_INVALID;
+
+ /* 802.11A or ht walks to the next literal adjacent rate in
+ * the rate table */
+ if (is_a_band(rate_type) || !is_legacy(rate_type)) {
+ int i;
+ u32 mask;
+
+ /* Find the previous rate that is in the rate mask */
+ i = idx - 1;
+ for (mask = (1 << i); i >= 0; i--, mask >>= 1) {
+ if (rate_mask & mask) {
+ low = i;
+ break;
+ }
+ }
+
+ /* Find the next rate that is in the rate mask */
+ i = idx + 1;
+ for (mask = (1 << i); i < RATE_COUNT; i++, mask <<= 1) {
+ if (rate_mask & mask) {
+ high = i;
+ break;
+ }
+ }
+
+ return (high << 8) | low;
+ }
+
+ low = idx;
+ while (low != RATE_INVALID) {
+ low = il_rates[low].prev_rs;
+ if (low == RATE_INVALID)
+ break;
+ if (rate_mask & (1 << low))
+ break;
+ D_RATE("Skipping masked lower rate: %d\n", low);
+ }
+
+ high = idx;
+ while (high != RATE_INVALID) {
+ high = il_rates[high].next_rs;
+ if (high == RATE_INVALID)
+ break;
+ if (rate_mask & (1 << high))
+ break;
+ D_RATE("Skipping masked higher rate: %d\n", high);
+ }
+
+ return (high << 8) | low;
+}
+
+static u32
+il4965_rs_get_lower_rate(struct il_lq_sta *lq_sta,
+ struct il_scale_tbl_info *tbl, u8 scale_idx,
+ u8 ht_possible)
+{
+ s32 low;
+ u16 rate_mask;
+ u16 high_low;
+ u8 switch_to_legacy = 0;
+ u8 is_green = lq_sta->is_green;
+ struct il_priv *il = lq_sta->drv;
+
+ /* check if we need to switch from HT to legacy rates.
+ * assumption is that mandatory rates (1Mbps or 6Mbps)
+ * are always supported (spec demand) */
+ if (!is_legacy(tbl->lq_type) && (!ht_possible || !scale_idx)) {
+ switch_to_legacy = 1;
+ scale_idx = rs_ht_to_legacy[scale_idx];
+ if (lq_sta->band == IEEE80211_BAND_5GHZ)
+ tbl->lq_type = LQ_A;
+ else
+ tbl->lq_type = LQ_G;
+
+ if (il4965_num_of_ant(tbl->ant_type) > 1)
+ tbl->ant_type =
+ il4965_first_antenna(il->hw_params.valid_tx_ant);
+
+ tbl->is_ht40 = 0;
+ tbl->is_SGI = 0;
+ tbl->max_search = IL_MAX_SEARCH;
+ }
+
+ rate_mask = il4965_rs_get_supported_rates(lq_sta, NULL, tbl->lq_type);
+
+ /* Mask with station rate restriction */
+ if (is_legacy(tbl->lq_type)) {
+ /* supp_rates has no CCK bits in A mode */
+ if (lq_sta->band == IEEE80211_BAND_5GHZ)
+ rate_mask =
+ (u16) (rate_mask &
+ (lq_sta->supp_rates << IL_FIRST_OFDM_RATE));
+ else
+ rate_mask = (u16) (rate_mask & lq_sta->supp_rates);
+ }
+
+ /* If we switched from HT to legacy, check current rate */
+ if (switch_to_legacy && (rate_mask & (1 << scale_idx))) {
+ low = scale_idx;
+ goto out;
+ }
+
+ high_low =
+ il4965_rs_get_adjacent_rate(lq_sta->drv, scale_idx, rate_mask,
+ tbl->lq_type);
+ low = high_low & 0xff;
+
+ if (low == RATE_INVALID)
+ low = scale_idx;
+
+out:
+ return il4965_rate_n_flags_from_tbl(lq_sta->drv, tbl, low, is_green);
+}
+
+/*
+ * Simple function to compare two rate scale table types
+ */
+static bool
+il4965_table_type_matches(struct il_scale_tbl_info *a,
+ struct il_scale_tbl_info *b)
+{
+ return (a->lq_type == b->lq_type && a->ant_type == b->ant_type &&
+ a->is_SGI == b->is_SGI);
+}
+
+/*
+ * mac80211 sends us Tx status
+ */
+static void
+il4965_rs_tx_status(void *il_r, struct ieee80211_supported_band *sband,
+ struct ieee80211_sta *sta, void *il_sta,
+ struct sk_buff *skb)
+{
+ int legacy_success;
+ int retries;
+ int rs_idx, mac_idx, i;
+ struct il_lq_sta *lq_sta = il_sta;
+ struct il_link_quality_cmd *table;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct il_priv *il = (struct il_priv *)il_r;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ enum mac80211_rate_control_flags mac_flags;
+ u32 tx_rate;
+ struct il_scale_tbl_info tbl_type;
+ struct il_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl;
+ struct il_station_priv *sta_priv = (void *)sta->drv_priv;
+ struct il_rxon_context *ctx = sta_priv->common.ctx;
+
+ D_RATE("get frame ack response, update rate scale win\n");
+
+ /* Treat uninitialized rate scaling data same as non-existing. */
+ if (!lq_sta) {
+ D_RATE("Station rate scaling not created yet.\n");
+ return;
+ } else if (!lq_sta->drv) {
+ D_RATE("Rate scaling not initialized yet.\n");
+ return;
+ }
+
+ if (!ieee80211_is_data(hdr->frame_control) ||
+ (info->flags & IEEE80211_TX_CTL_NO_ACK))
+ return;
+
+ /* This packet was aggregated but doesn't carry status info */
+ if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
+ !(info->flags & IEEE80211_TX_STAT_AMPDU))
+ return;
+
+ /*
+ * Ignore this Tx frame response if its initial rate doesn't match
+ * that of latest Link Quality command. There may be stragglers
+ * from a previous Link Quality command, but we're no longer interested
+ * in those; they're either from the "active" mode while we're trying
+ * to check "search" mode, or a prior "search" mode after we've moved
+ * to a new "search" mode (which might become the new "active" mode).
+ */
+ table = &lq_sta->lq;
+ tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
+ il4965_rs_get_tbl_info_from_mcs(tx_rate, il->band, &tbl_type, &rs_idx);
+ if (il->band == IEEE80211_BAND_5GHZ)
+ rs_idx -= IL_FIRST_OFDM_RATE;
+ mac_flags = info->status.rates[0].flags;
+ mac_idx = info->status.rates[0].idx;
+ /* For HT packets, map MCS to PLCP */
+ if (mac_flags & IEEE80211_TX_RC_MCS) {
+ mac_idx &= RATE_MCS_CODE_MSK; /* Remove # of streams */
+ if (mac_idx >= (RATE_9M_IDX - IL_FIRST_OFDM_RATE))
+ mac_idx++;
+ /*
+ * mac80211 HT idx is always zero-idxed; we need to move
+ * HT OFDM rates after CCK rates in 2.4 GHz band
+ */
+ if (il->band == IEEE80211_BAND_2GHZ)
+ mac_idx += IL_FIRST_OFDM_RATE;
+ }
+ /* Here we actually compare this rate to the latest LQ command */
+ if (mac_idx < 0 ||
+ tbl_type.is_SGI != !!(mac_flags & IEEE80211_TX_RC_SHORT_GI) ||
+ tbl_type.is_ht40 != !!(mac_flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ||
+ tbl_type.is_dup != !!(mac_flags & IEEE80211_TX_RC_DUP_DATA) ||
+ tbl_type.ant_type != info->antenna_sel_tx ||
+ !!(tx_rate & RATE_MCS_HT_MSK) != !!(mac_flags & IEEE80211_TX_RC_MCS)
+ || !!(tx_rate & RATE_MCS_GF_MSK) !=
+ !!(mac_flags & IEEE80211_TX_RC_GREEN_FIELD) || rs_idx != mac_idx) {
+ D_RATE("initial rate %d does not match %d (0x%x)\n", mac_idx,
+ rs_idx, tx_rate);
+ /*
+ * Since rates mis-match, the last LQ command may have failed.
+ * After IL_MISSED_RATE_MAX mis-matches, resync the uCode with
+ * ... driver.
+ */
+ lq_sta->missed_rate_counter++;
+ if (lq_sta->missed_rate_counter > IL_MISSED_RATE_MAX) {
+ lq_sta->missed_rate_counter = 0;
+ il_send_lq_cmd(il, ctx, &lq_sta->lq, CMD_ASYNC, false);
+ }
+ /* Regardless, ignore this status info for outdated rate */
+ return;
+ } else
+ /* Rate did match, so reset the missed_rate_counter */
+ lq_sta->missed_rate_counter = 0;
+
+ /* Figure out if rate scale algorithm is in active or search table */
+ if (il4965_table_type_matches
+ (&tbl_type, &(lq_sta->lq_info[lq_sta->active_tbl]))) {
+ curr_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+ other_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
+ } else
+ if (il4965_table_type_matches
+ (&tbl_type, &lq_sta->lq_info[1 - lq_sta->active_tbl])) {
+ curr_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
+ other_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+ } else {
+ D_RATE("Neither active nor search matches tx rate\n");
+ tmp_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+ D_RATE("active- lq:%x, ant:%x, SGI:%d\n", tmp_tbl->lq_type,
+ tmp_tbl->ant_type, tmp_tbl->is_SGI);
+ tmp_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
+ D_RATE("search- lq:%x, ant:%x, SGI:%d\n", tmp_tbl->lq_type,
+ tmp_tbl->ant_type, tmp_tbl->is_SGI);
+ D_RATE("actual- lq:%x, ant:%x, SGI:%d\n", tbl_type.lq_type,
+ tbl_type.ant_type, tbl_type.is_SGI);
+ /*
+ * no matching table found, let's by-pass the data collection
+ * and continue to perform rate scale to find the rate table
+ */
+ il4965_rs_stay_in_table(lq_sta, true);
+ goto done;
+ }
+
+ /*
+ * Updating the frame history depends on whether packets were
+ * aggregated.
+ *
+ * For aggregation, all packets were transmitted at the same rate, the
+ * first idx into rate scale table.
+ */
+ if (info->flags & IEEE80211_TX_STAT_AMPDU) {
+ tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
+ il4965_rs_get_tbl_info_from_mcs(tx_rate, il->band, &tbl_type,
+ &rs_idx);
+ il4965_rs_collect_tx_data(curr_tbl, rs_idx,
+ info->status.ampdu_len,
+ info->status.ampdu_ack_len);
+
+ /* Update success/fail counts if not searching for new mode */
+ if (lq_sta->stay_in_tbl) {
+ lq_sta->total_success += info->status.ampdu_ack_len;
+ lq_sta->total_failed +=
+ (info->status.ampdu_len -
+ info->status.ampdu_ack_len);
+ }
+ } else {
+ /*
+ * For legacy, update frame history with for each Tx retry.
+ */
+ retries = info->status.rates[0].count - 1;
+ /* HW doesn't send more than 15 retries */
+ retries = min(retries, 15);
+
+ /* The last transmission may have been successful */
+ legacy_success = !!(info->flags & IEEE80211_TX_STAT_ACK);
+ /* Collect data for each rate used during failed TX attempts */
+ for (i = 0; i <= retries; ++i) {
+ tx_rate = le32_to_cpu(table->rs_table[i].rate_n_flags);
+ il4965_rs_get_tbl_info_from_mcs(tx_rate, il->band,
+ &tbl_type, &rs_idx);
+ /*
+ * Only collect stats if retried rate is in the same RS
+ * table as active/search.
+ */
+ if (il4965_table_type_matches(&tbl_type, curr_tbl))
+ tmp_tbl = curr_tbl;
+ else if (il4965_table_type_matches
+ (&tbl_type, other_tbl))
+ tmp_tbl = other_tbl;
+ else
+ continue;
+ il4965_rs_collect_tx_data(tmp_tbl, rs_idx, 1,
+ i <
+ retries ? 0 : legacy_success);
+ }
+
+ /* Update success/fail counts if not searching for new mode */
+ if (lq_sta->stay_in_tbl) {
+ lq_sta->total_success += legacy_success;
+ lq_sta->total_failed += retries + (1 - legacy_success);
+ }
+ }
+ /* The last TX rate is cached in lq_sta; it's set in if/else above */
+ lq_sta->last_rate_n_flags = tx_rate;
+done:
+ /* See if there's a better rate or modulation mode to try. */
+ if (sta->supp_rates[sband->band])
+ il4965_rs_rate_scale_perform(il, skb, sta, lq_sta);
+}
+
+/*
+ * Begin a period of staying with a selected modulation mode.
+ * Set "stay_in_tbl" flag to prevent any mode switches.
+ * Set frame tx success limits according to legacy vs. high-throughput,
+ * and reset overall (spanning all rates) tx success history stats.
+ * These control how long we stay using same modulation mode before
+ * searching for a new mode.
+ */
+static void
+il4965_rs_set_stay_in_table(struct il_priv *il, u8 is_legacy,
+ struct il_lq_sta *lq_sta)
+{
+ D_RATE("we are staying in the same table\n");
+ lq_sta->stay_in_tbl = 1; /* only place this gets set */
+ if (is_legacy) {
+ lq_sta->table_count_limit = IL_LEGACY_TBL_COUNT;
+ lq_sta->max_failure_limit = IL_LEGACY_FAILURE_LIMIT;
+ lq_sta->max_success_limit = IL_LEGACY_SUCCESS_LIMIT;
+ } else {
+ lq_sta->table_count_limit = IL_NONE_LEGACY_TBL_COUNT;
+ lq_sta->max_failure_limit = IL_NONE_LEGACY_FAILURE_LIMIT;
+ lq_sta->max_success_limit = IL_NONE_LEGACY_SUCCESS_LIMIT;
+ }
+ lq_sta->table_count = 0;
+ lq_sta->total_failed = 0;
+ lq_sta->total_success = 0;
+ lq_sta->flush_timer = jiffies;
+ lq_sta->action_counter = 0;
+}
+
+/*
+ * Find correct throughput table for given mode of modulation
+ */
+static void
+il4965_rs_set_expected_tpt_table(struct il_lq_sta *lq_sta,
+ struct il_scale_tbl_info *tbl)
+{
+ /* Used to choose among HT tables */
+ s32(*ht_tbl_pointer)[RATE_COUNT];
+
+ /* Check for invalid LQ type */
+ if (WARN_ON_ONCE(!is_legacy(tbl->lq_type) && !is_Ht(tbl->lq_type))) {
+ tbl->expected_tpt = expected_tpt_legacy;
+ return;
+ }
+
+ /* Legacy rates have only one table */
+ if (is_legacy(tbl->lq_type)) {
+ tbl->expected_tpt = expected_tpt_legacy;
+ return;
+ }
+
+ /* Choose among many HT tables depending on number of streams
+ * (SISO/MIMO2), channel width (20/40), SGI, and aggregation
+ * status */
+ if (is_siso(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup))
+ ht_tbl_pointer = expected_tpt_siso20MHz;
+ else if (is_siso(tbl->lq_type))
+ ht_tbl_pointer = expected_tpt_siso40MHz;
+ else if (is_mimo2(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup))
+ ht_tbl_pointer = expected_tpt_mimo2_20MHz;
+ else /* if (is_mimo2(tbl->lq_type)) <-- must be true */
+ ht_tbl_pointer = expected_tpt_mimo2_40MHz;
+
+ if (!tbl->is_SGI && !lq_sta->is_agg) /* Normal */
+ tbl->expected_tpt = ht_tbl_pointer[0];
+ else if (tbl->is_SGI && !lq_sta->is_agg) /* SGI */
+ tbl->expected_tpt = ht_tbl_pointer[1];
+ else if (!tbl->is_SGI && lq_sta->is_agg) /* AGG */
+ tbl->expected_tpt = ht_tbl_pointer[2];
+ else /* AGG+SGI */
+ tbl->expected_tpt = ht_tbl_pointer[3];
+}
+
+/*
+ * Find starting rate for new "search" high-throughput mode of modulation.
+ * Goal is to find lowest expected rate (under perfect conditions) that is
+ * above the current measured throughput of "active" mode, to give new mode
+ * a fair chance to prove itself without too many challenges.
+ *
+ * This gets called when transitioning to more aggressive modulation
+ * (i.e. legacy to SISO or MIMO, or SISO to MIMO), as well as less aggressive
+ * (i.e. MIMO to SISO). When moving to MIMO, bit rate will typically need
+ * to decrease to match "active" throughput. When moving from MIMO to SISO,
+ * bit rate will typically need to increase, but not if performance was bad.
+ */
+static s32
+il4965_rs_get_best_rate(struct il_priv *il, struct il_lq_sta *lq_sta,
+ struct il_scale_tbl_info *tbl, /* "search" */
+ u16 rate_mask, s8 idx)
+{
+ /* "active" values */
+ struct il_scale_tbl_info *active_tbl =
+ &(lq_sta->lq_info[lq_sta->active_tbl]);
+ s32 active_sr = active_tbl->win[idx].success_ratio;
+ s32 active_tpt = active_tbl->expected_tpt[idx];
+
+ /* expected "search" throughput */
+ s32 *tpt_tbl = tbl->expected_tpt;
+
+ s32 new_rate, high, low, start_hi;
+ u16 high_low;
+ s8 rate = idx;
+
+ new_rate = high = low = start_hi = RATE_INVALID;
+
+ for (;;) {
+ high_low =
+ il4965_rs_get_adjacent_rate(il, rate, rate_mask,
+ tbl->lq_type);
+
+ low = high_low & 0xff;
+ high = (high_low >> 8) & 0xff;
+
+ /*
+ * Lower the "search" bit rate, to give new "search" mode
+ * approximately the same throughput as "active" if:
+ *
+ * 1) "Active" mode has been working modestly well (but not
+ * great), and expected "search" throughput (under perfect
+ * conditions) at candidate rate is above the actual
+ * measured "active" throughput (but less than expected
+ * "active" throughput under perfect conditions).
+ * OR
+ * 2) "Active" mode has been working perfectly or very well
+ * and expected "search" throughput (under perfect
+ * conditions) at candidate rate is above expected
+ * "active" throughput (under perfect conditions).
+ */
+ if ((100 * tpt_tbl[rate] > lq_sta->last_tpt &&
+ (active_sr > RATE_DECREASE_TH && active_sr <= RATE_HIGH_TH
+ && tpt_tbl[rate] <= active_tpt)) ||
+ (active_sr >= RATE_SCALE_SWITCH &&
+ tpt_tbl[rate] > active_tpt)) {
+
+ /* (2nd or later pass)
+ * If we've already tried to raise the rate, and are
+ * now trying to lower it, use the higher rate. */
+ if (start_hi != RATE_INVALID) {
+ new_rate = start_hi;
+ break;
+ }
+
+ new_rate = rate;
+
+ /* Loop again with lower rate */
+ if (low != RATE_INVALID)
+ rate = low;
+
+ /* Lower rate not available, use the original */
+ else
+ break;
+
+ /* Else try to raise the "search" rate to match "active" */
+ } else {
+ /* (2nd or later pass)
+ * If we've already tried to lower the rate, and are
+ * now trying to raise it, use the lower rate. */
+ if (new_rate != RATE_INVALID)
+ break;
+
+ /* Loop again with higher rate */
+ else if (high != RATE_INVALID) {
+ start_hi = high;
+ rate = high;
+
+ /* Higher rate not available, use the original */
+ } else {
+ new_rate = rate;
+ break;
+ }
+ }
+ }
+
+ return new_rate;
+}
+
+/*
+ * Set up search table for MIMO2
+ */
+static int
+il4965_rs_switch_to_mimo2(struct il_priv *il, struct il_lq_sta *lq_sta,
+ struct ieee80211_conf *conf,
+ struct ieee80211_sta *sta,
+ struct il_scale_tbl_info *tbl, int idx)
+{
+ u16 rate_mask;
+ s32 rate;
+ s8 is_green = lq_sta->is_green;
+ struct il_station_priv *sta_priv = (void *)sta->drv_priv;
+ struct il_rxon_context *ctx = sta_priv->common.ctx;
+
+ if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
+ return -1;
+
+ if (((sta->ht_cap.cap & IEEE80211_HT_CAP_SM_PS) >> 2) ==
+ WLAN_HT_CAP_SM_PS_STATIC)
+ return -1;
+
+ /* Need both Tx chains/antennas to support MIMO */
+ if (il->hw_params.tx_chains_num < 2)
+ return -1;
+
+ D_RATE("LQ: try to switch to MIMO2\n");
+
+ tbl->lq_type = LQ_MIMO2;
+ tbl->is_dup = lq_sta->is_dup;
+ tbl->action = 0;
+ tbl->max_search = IL_MAX_SEARCH;
+ rate_mask = lq_sta->active_mimo2_rate;
+
+ if (il_is_ht40_tx_allowed(il, ctx, &sta->ht_cap))
+ tbl->is_ht40 = 1;
+ else
+ tbl->is_ht40 = 0;
+
+ il4965_rs_set_expected_tpt_table(lq_sta, tbl);
+
+ rate = il4965_rs_get_best_rate(il, lq_sta, tbl, rate_mask, idx);
+
+ D_RATE("LQ: MIMO2 best rate %d mask %X\n", rate, rate_mask);
+ if (rate == RATE_INVALID || !((1 << rate) & rate_mask)) {
+ D_RATE("Can't switch with idx %d rate mask %x\n", rate,
+ rate_mask);
+ return -1;
+ }
+ tbl->current_rate =
+ il4965_rate_n_flags_from_tbl(il, tbl, rate, is_green);
+
+ D_RATE("LQ: Switch to new mcs %X idx is green %X\n", tbl->current_rate,
+ is_green);
+ return 0;
+}
+
+/*
+ * Set up search table for SISO
+ */
+static int
+il4965_rs_switch_to_siso(struct il_priv *il, struct il_lq_sta *lq_sta,
+ struct ieee80211_conf *conf, struct ieee80211_sta *sta,
+ struct il_scale_tbl_info *tbl, int idx)
+{
+ u16 rate_mask;
+ u8 is_green = lq_sta->is_green;
+ s32 rate;
+ struct il_station_priv *sta_priv = (void *)sta->drv_priv;
+ struct il_rxon_context *ctx = sta_priv->common.ctx;
+
+ if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
+ return -1;
+
+ D_RATE("LQ: try to switch to SISO\n");
+
+ tbl->is_dup = lq_sta->is_dup;
+ tbl->lq_type = LQ_SISO;
+ tbl->action = 0;
+ tbl->max_search = IL_MAX_SEARCH;
+ rate_mask = lq_sta->active_siso_rate;
+
+ if (il_is_ht40_tx_allowed(il, ctx, &sta->ht_cap))
+ tbl->is_ht40 = 1;
+ else
+ tbl->is_ht40 = 0;
+
+ if (is_green)
+ tbl->is_SGI = 0; /*11n spec: no SGI in SISO+Greenfield */
+
+ il4965_rs_set_expected_tpt_table(lq_sta, tbl);
+ rate = il4965_rs_get_best_rate(il, lq_sta, tbl, rate_mask, idx);
+
+ D_RATE("LQ: get best rate %d mask %X\n", rate, rate_mask);
+ if (rate == RATE_INVALID || !((1 << rate) & rate_mask)) {
+ D_RATE("can not switch with idx %d rate mask %x\n", rate,
+ rate_mask);
+ return -1;
+ }
+ tbl->current_rate =
+ il4965_rate_n_flags_from_tbl(il, tbl, rate, is_green);
+ D_RATE("LQ: Switch to new mcs %X idx is green %X\n", tbl->current_rate,
+ is_green);
+ return 0;
+}
+
+/*
+ * Try to switch to new modulation mode from legacy
+ */
+static int
+il4965_rs_move_legacy_other(struct il_priv *il, struct il_lq_sta *lq_sta,
+ struct ieee80211_conf *conf,
+ struct ieee80211_sta *sta, int idx)
+{
+ struct il_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+ struct il_scale_tbl_info *search_tbl =
+ &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
+ struct il_rate_scale_data *win = &(tbl->win[idx]);
+ u32 sz =
+ (sizeof(struct il_scale_tbl_info) -
+ (sizeof(struct il_rate_scale_data) * RATE_COUNT));
+ u8 start_action;
+ u8 valid_tx_ant = il->hw_params.valid_tx_ant;
+ u8 tx_chains_num = il->hw_params.tx_chains_num;
+ int ret = 0;
+ u8 update_search_tbl_counter = 0;
+
+ tbl->action = IL_LEGACY_SWITCH_SISO;
+
+ start_action = tbl->action;
+ for (;;) {
+ lq_sta->action_counter++;
+ switch (tbl->action) {
+ case IL_LEGACY_SWITCH_ANTENNA1:
+ case IL_LEGACY_SWITCH_ANTENNA2:
+ D_RATE("LQ: Legacy toggle Antenna\n");
+
+ if ((tbl->action == IL_LEGACY_SWITCH_ANTENNA1 &&
+ tx_chains_num <= 1) ||
+ (tbl->action == IL_LEGACY_SWITCH_ANTENNA2 &&
+ tx_chains_num <= 2))
+ break;
+
+ /* Don't change antenna if success has been great */
+ if (win->success_ratio >= IL_RS_GOOD_RATIO)
+ break;
+
+ /* Set up search table to try other antenna */
+ memcpy(search_tbl, tbl, sz);
+
+ if (il4965_rs_toggle_antenna
+ (valid_tx_ant, &search_tbl->current_rate,
+ search_tbl)) {
+ update_search_tbl_counter = 1;
+ il4965_rs_set_expected_tpt_table(lq_sta,
+ search_tbl);
+ goto out;
+ }
+ break;
+ case IL_LEGACY_SWITCH_SISO:
+ D_RATE("LQ: Legacy switch to SISO\n");
+
+ /* Set up search table to try SISO */
+ memcpy(search_tbl, tbl, sz);
+ search_tbl->is_SGI = 0;
+ ret =
+ il4965_rs_switch_to_siso(il, lq_sta, conf, sta,
+ search_tbl, idx);
+ if (!ret) {
+ lq_sta->action_counter = 0;
+ goto out;
+ }
+
+ break;
+ case IL_LEGACY_SWITCH_MIMO2_AB:
+ case IL_LEGACY_SWITCH_MIMO2_AC:
+ case IL_LEGACY_SWITCH_MIMO2_BC:
+ D_RATE("LQ: Legacy switch to MIMO2\n");
+
+ /* Set up search table to try MIMO */
+ memcpy(search_tbl, tbl, sz);
+ search_tbl->is_SGI = 0;
+
+ if (tbl->action == IL_LEGACY_SWITCH_MIMO2_AB)
+ search_tbl->ant_type = ANT_AB;
+ else if (tbl->action == IL_LEGACY_SWITCH_MIMO2_AC)
+ search_tbl->ant_type = ANT_AC;
+ else
+ search_tbl->ant_type = ANT_BC;
+
+ if (!il4965_rs_is_valid_ant
+ (valid_tx_ant, search_tbl->ant_type))
+ break;
+
+ ret =
+ il4965_rs_switch_to_mimo2(il, lq_sta, conf, sta,
+ search_tbl, idx);
+ if (!ret) {
+ lq_sta->action_counter = 0;
+ goto out;
+ }
+ break;
+ }
+ tbl->action++;
+ if (tbl->action > IL_LEGACY_SWITCH_MIMO2_BC)
+ tbl->action = IL_LEGACY_SWITCH_ANTENNA1;
+
+ if (tbl->action == start_action)
+ break;
+
+ }
+ search_tbl->lq_type = LQ_NONE;
+ return 0;
+
+out:
+ lq_sta->search_better_tbl = 1;
+ tbl->action++;
+ if (tbl->action > IL_LEGACY_SWITCH_MIMO2_BC)
+ tbl->action = IL_LEGACY_SWITCH_ANTENNA1;
+ if (update_search_tbl_counter)
+ search_tbl->action = tbl->action;
+ return 0;
+
+}
+
+/*
+ * Try to switch to new modulation mode from SISO
+ */
+static int
+il4965_rs_move_siso_to_other(struct il_priv *il, struct il_lq_sta *lq_sta,
+ struct ieee80211_conf *conf,
+ struct ieee80211_sta *sta, int idx)
+{
+ u8 is_green = lq_sta->is_green;
+ struct il_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+ struct il_scale_tbl_info *search_tbl =
+ &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
+ struct il_rate_scale_data *win = &(tbl->win[idx]);
+ struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+ u32 sz =
+ (sizeof(struct il_scale_tbl_info) -
+ (sizeof(struct il_rate_scale_data) * RATE_COUNT));
+ u8 start_action;
+ u8 valid_tx_ant = il->hw_params.valid_tx_ant;
+ u8 tx_chains_num = il->hw_params.tx_chains_num;
+ u8 update_search_tbl_counter = 0;
+ int ret;
+
+ start_action = tbl->action;
+
+ for (;;) {
+ lq_sta->action_counter++;
+ switch (tbl->action) {
+ case IL_SISO_SWITCH_ANTENNA1:
+ case IL_SISO_SWITCH_ANTENNA2:
+ D_RATE("LQ: SISO toggle Antenna\n");
+ if ((tbl->action == IL_SISO_SWITCH_ANTENNA1 &&
+ tx_chains_num <= 1) ||
+ (tbl->action == IL_SISO_SWITCH_ANTENNA2 &&
+ tx_chains_num <= 2))
+ break;
+
+ if (win->success_ratio >= IL_RS_GOOD_RATIO)
+ break;
+
+ memcpy(search_tbl, tbl, sz);
+ if (il4965_rs_toggle_antenna
+ (valid_tx_ant, &search_tbl->current_rate,
+ search_tbl)) {
+ update_search_tbl_counter = 1;
+ goto out;
+ }
+ break;
+ case IL_SISO_SWITCH_MIMO2_AB:
+ case IL_SISO_SWITCH_MIMO2_AC:
+ case IL_SISO_SWITCH_MIMO2_BC:
+ D_RATE("LQ: SISO switch to MIMO2\n");
+ memcpy(search_tbl, tbl, sz);
+ search_tbl->is_SGI = 0;
+
+ if (tbl->action == IL_SISO_SWITCH_MIMO2_AB)
+ search_tbl->ant_type = ANT_AB;
+ else if (tbl->action == IL_SISO_SWITCH_MIMO2_AC)
+ search_tbl->ant_type = ANT_AC;
+ else
+ search_tbl->ant_type = ANT_BC;
+
+ if (!il4965_rs_is_valid_ant
+ (valid_tx_ant, search_tbl->ant_type))
+ break;
+
+ ret =
+ il4965_rs_switch_to_mimo2(il, lq_sta, conf, sta,
+ search_tbl, idx);
+ if (!ret)
+ goto out;
+ break;
+ case IL_SISO_SWITCH_GI:
+ if (!tbl->is_ht40 &&
+ !(ht_cap->cap & IEEE80211_HT_CAP_SGI_20))
+ break;
+ if (tbl->is_ht40 &&
+ !(ht_cap->cap & IEEE80211_HT_CAP_SGI_40))
+ break;
+
+ D_RATE("LQ: SISO toggle SGI/NGI\n");
+
+ memcpy(search_tbl, tbl, sz);
+ if (is_green) {
+ if (!tbl->is_SGI)
+ break;
+ else
+ IL_ERR("SGI was set in GF+SISO\n");
+ }
+ search_tbl->is_SGI = !tbl->is_SGI;
+ il4965_rs_set_expected_tpt_table(lq_sta, search_tbl);
+ if (tbl->is_SGI) {
+ s32 tpt = lq_sta->last_tpt / 100;
+ if (tpt >= search_tbl->expected_tpt[idx])
+ break;
+ }
+ search_tbl->current_rate =
+ il4965_rate_n_flags_from_tbl(il, search_tbl, idx,
+ is_green);
+ update_search_tbl_counter = 1;
+ goto out;
+ }
+ tbl->action++;
+ if (tbl->action > IL_SISO_SWITCH_GI)
+ tbl->action = IL_SISO_SWITCH_ANTENNA1;
+
+ if (tbl->action == start_action)
+ break;
+ }
+ search_tbl->lq_type = LQ_NONE;
+ return 0;
+
+out:
+ lq_sta->search_better_tbl = 1;
+ tbl->action++;
+ if (tbl->action > IL_SISO_SWITCH_GI)
+ tbl->action = IL_SISO_SWITCH_ANTENNA1;
+ if (update_search_tbl_counter)
+ search_tbl->action = tbl->action;
+
+ return 0;
+}
+
+/*
+ * Try to switch to new modulation mode from MIMO2
+ */
+static int
+il4965_rs_move_mimo2_to_other(struct il_priv *il, struct il_lq_sta *lq_sta,
+ struct ieee80211_conf *conf,
+ struct ieee80211_sta *sta, int idx)
+{
+ s8 is_green = lq_sta->is_green;
+ struct il_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+ struct il_scale_tbl_info *search_tbl =
+ &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
+ struct il_rate_scale_data *win = &(tbl->win[idx]);
+ struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+ u32 sz =
+ (sizeof(struct il_scale_tbl_info) -
+ (sizeof(struct il_rate_scale_data) * RATE_COUNT));
+ u8 start_action;
+ u8 valid_tx_ant = il->hw_params.valid_tx_ant;
+ u8 tx_chains_num = il->hw_params.tx_chains_num;
+ u8 update_search_tbl_counter = 0;
+ int ret;
+
+ start_action = tbl->action;
+ for (;;) {
+ lq_sta->action_counter++;
+ switch (tbl->action) {
+ case IL_MIMO2_SWITCH_ANTENNA1:
+ case IL_MIMO2_SWITCH_ANTENNA2:
+ D_RATE("LQ: MIMO2 toggle Antennas\n");
+
+ if (tx_chains_num <= 2)
+ break;
+
+ if (win->success_ratio >= IL_RS_GOOD_RATIO)
+ break;
+
+ memcpy(search_tbl, tbl, sz);
+ if (il4965_rs_toggle_antenna
+ (valid_tx_ant, &search_tbl->current_rate,
+ search_tbl)) {
+ update_search_tbl_counter = 1;
+ goto out;
+ }
+ break;
+ case IL_MIMO2_SWITCH_SISO_A:
+ case IL_MIMO2_SWITCH_SISO_B:
+ case IL_MIMO2_SWITCH_SISO_C:
+ D_RATE("LQ: MIMO2 switch to SISO\n");
+
+ /* Set up new search table for SISO */
+ memcpy(search_tbl, tbl, sz);
+
+ if (tbl->action == IL_MIMO2_SWITCH_SISO_A)
+ search_tbl->ant_type = ANT_A;
+ else if (tbl->action == IL_MIMO2_SWITCH_SISO_B)
+ search_tbl->ant_type = ANT_B;
+ else
+ search_tbl->ant_type = ANT_C;
+
+ if (!il4965_rs_is_valid_ant
+ (valid_tx_ant, search_tbl->ant_type))
+ break;
+
+ ret =
+ il4965_rs_switch_to_siso(il, lq_sta, conf, sta,
+ search_tbl, idx);
+ if (!ret)
+ goto out;
+
+ break;
+
+ case IL_MIMO2_SWITCH_GI:
+ if (!tbl->is_ht40 &&
+ !(ht_cap->cap & IEEE80211_HT_CAP_SGI_20))
+ break;
+ if (tbl->is_ht40 &&
+ !(ht_cap->cap & IEEE80211_HT_CAP_SGI_40))
+ break;
+
+ D_RATE("LQ: MIMO2 toggle SGI/NGI\n");
+
+ /* Set up new search table for MIMO2 */
+ memcpy(search_tbl, tbl, sz);
+ search_tbl->is_SGI = !tbl->is_SGI;
+ il4965_rs_set_expected_tpt_table(lq_sta, search_tbl);
+ /*
+ * If active table already uses the fastest possible
+ * modulation (dual stream with short guard interval),
+ * and it's working well, there's no need to look
+ * for a better type of modulation!
+ */
+ if (tbl->is_SGI) {
+ s32 tpt = lq_sta->last_tpt / 100;
+ if (tpt >= search_tbl->expected_tpt[idx])
+ break;
+ }
+ search_tbl->current_rate =
+ il4965_rate_n_flags_from_tbl(il, search_tbl, idx,
+ is_green);
+ update_search_tbl_counter = 1;
+ goto out;
+
+ }
+ tbl->action++;
+ if (tbl->action > IL_MIMO2_SWITCH_GI)
+ tbl->action = IL_MIMO2_SWITCH_ANTENNA1;
+
+ if (tbl->action == start_action)
+ break;
+ }
+ search_tbl->lq_type = LQ_NONE;
+ return 0;
+out:
+ lq_sta->search_better_tbl = 1;
+ tbl->action++;
+ if (tbl->action > IL_MIMO2_SWITCH_GI)
+ tbl->action = IL_MIMO2_SWITCH_ANTENNA1;
+ if (update_search_tbl_counter)
+ search_tbl->action = tbl->action;
+
+ return 0;
+
+}
+
+/*
+ * Check whether we should continue using same modulation mode, or
+ * begin search for a new mode, based on:
+ * 1) # tx successes or failures while using this mode
+ * 2) # times calling this function
+ * 3) elapsed time in this mode (not used, for now)
+ */
+static void
+il4965_rs_stay_in_table(struct il_lq_sta *lq_sta, bool force_search)
+{
+ struct il_scale_tbl_info *tbl;
+ int i;
+ int active_tbl;
+ int flush_interval_passed = 0;
+ struct il_priv *il;
+
+ il = lq_sta->drv;
+ active_tbl = lq_sta->active_tbl;
+
+ tbl = &(lq_sta->lq_info[active_tbl]);
+
+ /* If we've been disallowing search, see if we should now allow it */
+ if (lq_sta->stay_in_tbl) {
+
+ /* Elapsed time using current modulation mode */
+ if (lq_sta->flush_timer)
+ flush_interval_passed =
+ time_after(jiffies,
+ (unsigned long)(lq_sta->flush_timer +
+ RATE_SCALE_FLUSH_INTVL));
+
+ /*
+ * Check if we should allow search for new modulation mode.
+ * If many frames have failed or succeeded, or we've used
+ * this same modulation for a long time, allow search, and
+ * reset history stats that keep track of whether we should
+ * allow a new search. Also (below) reset all bitmaps and
+ * stats in active history.
+ */
+ if (force_search ||
+ lq_sta->total_failed > lq_sta->max_failure_limit ||
+ lq_sta->total_success > lq_sta->max_success_limit ||
+ (!lq_sta->search_better_tbl && lq_sta->flush_timer &&
+ flush_interval_passed)) {
+ D_RATE("LQ: stay is expired %d %d %d\n:",
+ lq_sta->total_failed, lq_sta->total_success,
+ flush_interval_passed);
+
+ /* Allow search for new mode */
+ lq_sta->stay_in_tbl = 0; /* only place reset */
+ lq_sta->total_failed = 0;
+ lq_sta->total_success = 0;
+ lq_sta->flush_timer = 0;
+
+ /*
+ * Else if we've used this modulation mode enough repetitions
+ * (regardless of elapsed time or success/failure), reset
+ * history bitmaps and rate-specific stats for all rates in
+ * active table.
+ */
+ } else {
+ lq_sta->table_count++;
+ if (lq_sta->table_count >= lq_sta->table_count_limit) {
+ lq_sta->table_count = 0;
+
+ D_RATE("LQ: stay in table clear win\n");
+ for (i = 0; i < RATE_COUNT; i++)
+ il4965_rs_rate_scale_clear_win(&
+ (tbl->
+ win
+ [i]));
+ }
+ }
+
+ /* If transitioning to allow "search", reset all history
+ * bitmaps and stats in active table (this will become the new
+ * "search" table). */
+ if (!lq_sta->stay_in_tbl) {
+ for (i = 0; i < RATE_COUNT; i++)
+ il4965_rs_rate_scale_clear_win(&(tbl->win[i]));
+ }
+ }
+}
+
+/*
+ * setup rate table in uCode
+ */
+static void
+il4965_rs_update_rate_tbl(struct il_priv *il, struct il_rxon_context *ctx,
+ struct il_lq_sta *lq_sta,
+ struct il_scale_tbl_info *tbl, int idx, u8 is_green)
+{
+ u32 rate;
+
+ /* Update uCode's rate table. */
+ rate = il4965_rate_n_flags_from_tbl(il, tbl, idx, is_green);
+ il4965_rs_fill_link_cmd(il, lq_sta, rate);
+ il_send_lq_cmd(il, ctx, &lq_sta->lq, CMD_ASYNC, false);
+}
+
+/*
+ * Do rate scaling and search for new modulation mode.
+ */
+static void
+il4965_rs_rate_scale_perform(struct il_priv *il, struct sk_buff *skb,
+ struct ieee80211_sta *sta,
+ struct il_lq_sta *lq_sta)
+{
+ struct ieee80211_hw *hw = il->hw;
+ struct ieee80211_conf *conf = &hw->conf;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ int low = RATE_INVALID;
+ int high = RATE_INVALID;
+ int idx;
+ int i;
+ struct il_rate_scale_data *win = NULL;
+ int current_tpt = IL_INVALID_VALUE;
+ int low_tpt = IL_INVALID_VALUE;
+ int high_tpt = IL_INVALID_VALUE;
+ u32 fail_count;
+ s8 scale_action = 0;
+ u16 rate_mask;
+ u8 update_lq = 0;
+ struct il_scale_tbl_info *tbl, *tbl1;
+ u16 rate_scale_idx_msk = 0;
+ u8 is_green = 0;
+ u8 active_tbl = 0;
+ u8 done_search = 0;
+ u16 high_low;
+ s32 sr;
+ u8 tid = MAX_TID_COUNT;
+ struct il_tid_data *tid_data;
+ struct il_station_priv *sta_priv = (void *)sta->drv_priv;
+ struct il_rxon_context *ctx = sta_priv->common.ctx;
+
+ D_RATE("rate scale calculate new rate for skb\n");
+
+ /* Send management frames and NO_ACK data using lowest rate. */
+ /* TODO: this could probably be improved.. */
+ if (!ieee80211_is_data(hdr->frame_control) ||
+ (info->flags & IEEE80211_TX_CTL_NO_ACK))
+ return;
+
+ lq_sta->supp_rates = sta->supp_rates[lq_sta->band];
+
+ tid = il4965_rs_tl_add_packet(lq_sta, hdr);
+ if (tid != MAX_TID_COUNT && (lq_sta->tx_agg_tid_en & (1 << tid))) {
+ tid_data = &il->stations[lq_sta->lq.sta_id].tid[tid];
+ if (tid_data->agg.state == IL_AGG_OFF)
+ lq_sta->is_agg = 0;
+ else
+ lq_sta->is_agg = 1;
+ } else
+ lq_sta->is_agg = 0;
+
+ /*
+ * Select rate-scale / modulation-mode table to work with in
+ * the rest of this function: "search" if searching for better
+ * modulation mode, or "active" if doing rate scaling within a mode.
+ */
+ if (!lq_sta->search_better_tbl)
+ active_tbl = lq_sta->active_tbl;
+ else
+ active_tbl = 1 - lq_sta->active_tbl;
+
+ tbl = &(lq_sta->lq_info[active_tbl]);
+ if (is_legacy(tbl->lq_type))
+ lq_sta->is_green = 0;
+ else
+ lq_sta->is_green = il4965_rs_use_green(sta);
+ is_green = lq_sta->is_green;
+
+ /* current tx rate */
+ idx = lq_sta->last_txrate_idx;
+
+ D_RATE("Rate scale idx %d for type %d\n", idx, tbl->lq_type);
+
+ /* rates available for this association, and for modulation mode */
+ rate_mask = il4965_rs_get_supported_rates(lq_sta, hdr, tbl->lq_type);
+
+ D_RATE("mask 0x%04X\n", rate_mask);
+
+ /* mask with station rate restriction */
+ if (is_legacy(tbl->lq_type)) {
+ if (lq_sta->band == IEEE80211_BAND_5GHZ)
+ /* supp_rates has no CCK bits in A mode */
+ rate_scale_idx_msk =
+ (u16) (rate_mask &
+ (lq_sta->supp_rates << IL_FIRST_OFDM_RATE));
+ else
+ rate_scale_idx_msk =
+ (u16) (rate_mask & lq_sta->supp_rates);
+
+ } else
+ rate_scale_idx_msk = rate_mask;
+
+ if (!rate_scale_idx_msk)
+ rate_scale_idx_msk = rate_mask;
+
+ if (!((1 << idx) & rate_scale_idx_msk)) {
+ IL_ERR("Current Rate is not valid\n");
+ if (lq_sta->search_better_tbl) {
+ /* revert to active table if search table is not valid */
+ tbl->lq_type = LQ_NONE;
+ lq_sta->search_better_tbl = 0;
+ tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+ /* get "active" rate info */
+ idx = il4965_hwrate_to_plcp_idx(tbl->current_rate);
+ il4965_rs_update_rate_tbl(il, ctx, lq_sta, tbl, idx,
+ is_green);
+ }
+ return;
+ }
+
+ /* Get expected throughput table and history win for current rate */
+ if (!tbl->expected_tpt) {
+ IL_ERR("tbl->expected_tpt is NULL\n");
+ return;
+ }
+
+ /* force user max rate if set by user */
+ if (lq_sta->max_rate_idx != -1 && lq_sta->max_rate_idx < idx) {
+ idx = lq_sta->max_rate_idx;
+ update_lq = 1;
+ win = &(tbl->win[idx]);
+ goto lq_update;
+ }
+
+ win = &(tbl->win[idx]);
+
+ /*
+ * If there is not enough history to calculate actual average
+ * throughput, keep analyzing results of more tx frames, without
+ * changing rate or mode (bypass most of the rest of this function).
+ * Set up new rate table in uCode only if old rate is not supported
+ * in current association (use new rate found above).
+ */
+ fail_count = win->counter - win->success_counter;
+ if (fail_count < RATE_MIN_FAILURE_TH &&
+ win->success_counter < RATE_MIN_SUCCESS_TH) {
+ D_RATE("LQ: still below TH. succ=%d total=%d " "for idx %d\n",
+ win->success_counter, win->counter, idx);
+
+ /* Can't calculate this yet; not enough history */
+ win->average_tpt = IL_INVALID_VALUE;
+
+ /* Should we stay with this modulation mode,
+ * or search for a new one? */
+ il4965_rs_stay_in_table(lq_sta, false);
+
+ goto out;
+ }
+ /* Else we have enough samples; calculate estimate of
+ * actual average throughput */
+ if (win->average_tpt !=
+ ((win->success_ratio * tbl->expected_tpt[idx] + 64) / 128)) {
+ IL_ERR("expected_tpt should have been calculated by now\n");
+ win->average_tpt =
+ ((win->success_ratio * tbl->expected_tpt[idx] + 64) / 128);
+ }
+
+ /* If we are searching for better modulation mode, check success. */
+ if (lq_sta->search_better_tbl) {
+ /* If good success, continue using the "search" mode;
+ * no need to send new link quality command, since we're
+ * continuing to use the setup that we've been trying. */
+ if (win->average_tpt > lq_sta->last_tpt) {
+
+ D_RATE("LQ: SWITCHING TO NEW TBL "
+ "suc=%d cur-tpt=%d old-tpt=%d\n",
+ win->success_ratio, win->average_tpt,
+ lq_sta->last_tpt);
+
+ if (!is_legacy(tbl->lq_type))
+ lq_sta->enable_counter = 1;
+
+ /* Swap tables; "search" becomes "active" */
+ lq_sta->active_tbl = active_tbl;
+ current_tpt = win->average_tpt;
+
+ /* Else poor success; go back to mode in "active" table */
+ } else {
+
+ D_RATE("LQ: GOING BACK TO THE OLD TBL "
+ "suc=%d cur-tpt=%d old-tpt=%d\n",
+ win->success_ratio, win->average_tpt,
+ lq_sta->last_tpt);
+
+ /* Nullify "search" table */
+ tbl->lq_type = LQ_NONE;
+
+ /* Revert to "active" table */
+ active_tbl = lq_sta->active_tbl;
+ tbl = &(lq_sta->lq_info[active_tbl]);
+
+ /* Revert to "active" rate and throughput info */
+ idx = il4965_hwrate_to_plcp_idx(tbl->current_rate);
+ current_tpt = lq_sta->last_tpt;
+
+ /* Need to set up a new rate table in uCode */
+ update_lq = 1;
+ }
+
+ /* Either way, we've made a decision; modulation mode
+ * search is done, allow rate adjustment next time. */
+ lq_sta->search_better_tbl = 0;
+ done_search = 1; /* Don't switch modes below! */
+ goto lq_update;
+ }
+
+ /* (Else) not in search of better modulation mode, try for better
+ * starting rate, while staying in this mode. */
+ high_low =
+ il4965_rs_get_adjacent_rate(il, idx, rate_scale_idx_msk,
+ tbl->lq_type);
+ low = high_low & 0xff;
+ high = (high_low >> 8) & 0xff;
+
+ /* If user set max rate, dont allow higher than user constrain */
+ if (lq_sta->max_rate_idx != -1 && lq_sta->max_rate_idx < high)
+ high = RATE_INVALID;
+
+ sr = win->success_ratio;
+
+ /* Collect measured throughputs for current and adjacent rates */
+ current_tpt = win->average_tpt;
+ if (low != RATE_INVALID)
+ low_tpt = tbl->win[low].average_tpt;
+ if (high != RATE_INVALID)
+ high_tpt = tbl->win[high].average_tpt;
+
+ scale_action = 0;
+
+ /* Too many failures, decrease rate */
+ if (sr <= RATE_DECREASE_TH || current_tpt == 0) {
+ D_RATE("decrease rate because of low success_ratio\n");
+ scale_action = -1;
+
+ /* No throughput measured yet for adjacent rates; try increase. */
+ } else if (low_tpt == IL_INVALID_VALUE && high_tpt == IL_INVALID_VALUE) {
+
+ if (high != RATE_INVALID && sr >= RATE_INCREASE_TH)
+ scale_action = 1;
+ else if (low != RATE_INVALID)
+ scale_action = 0;
+ }
+
+ /* Both adjacent throughputs are measured, but neither one has better
+ * throughput; we're using the best rate, don't change it! */
+ else if (low_tpt != IL_INVALID_VALUE && high_tpt != IL_INVALID_VALUE &&
+ low_tpt < current_tpt && high_tpt < current_tpt)
+ scale_action = 0;
+
+ /* At least one adjacent rate's throughput is measured,
+ * and may have better performance. */
+ else {
+ /* Higher adjacent rate's throughput is measured */
+ if (high_tpt != IL_INVALID_VALUE) {
+ /* Higher rate has better throughput */
+ if (high_tpt > current_tpt && sr >= RATE_INCREASE_TH)
+ scale_action = 1;
+ else
+ scale_action = 0;
+
+ /* Lower adjacent rate's throughput is measured */
+ } else if (low_tpt != IL_INVALID_VALUE) {
+ /* Lower rate has better throughput */
+ if (low_tpt > current_tpt) {
+ D_RATE("decrease rate because of low tpt\n");
+ scale_action = -1;
+ } else if (sr >= RATE_INCREASE_TH) {
+ scale_action = 1;
+ }
+ }
+ }
+
+ /* Sanity check; asked for decrease, but success rate or throughput
+ * has been good at old rate. Don't change it. */
+ if (scale_action == -1 && low != RATE_INVALID &&
+ (sr > RATE_HIGH_TH || current_tpt > 100 * tbl->expected_tpt[low]))
+ scale_action = 0;
+
+ switch (scale_action) {
+ case -1:
+ /* Decrease starting rate, update uCode's rate table */
+ if (low != RATE_INVALID) {
+ update_lq = 1;
+ idx = low;
+ }
+
+ break;
+ case 1:
+ /* Increase starting rate, update uCode's rate table */
+ if (high != RATE_INVALID) {
+ update_lq = 1;
+ idx = high;
+ }
+
+ break;
+ case 0:
+ /* No change */
+ default:
+ break;
+ }
+
+ D_RATE("choose rate scale idx %d action %d low %d " "high %d type %d\n",
+ idx, scale_action, low, high, tbl->lq_type);
+
+lq_update:
+ /* Replace uCode's rate table for the destination station. */
+ if (update_lq)
+ il4965_rs_update_rate_tbl(il, ctx, lq_sta, tbl, idx,
+ is_green);
+
+ /* Should we stay with this modulation mode,
+ * or search for a new one? */
+ il4965_rs_stay_in_table(lq_sta, false);
+
+ /*
+ * Search for new modulation mode if we're:
+ * 1) Not changing rates right now
+ * 2) Not just finishing up a search
+ * 3) Allowing a new search
+ */
+ if (!update_lq && !done_search && !lq_sta->stay_in_tbl && win->counter) {
+ /* Save current throughput to compare with "search" throughput */
+ lq_sta->last_tpt = current_tpt;
+
+ /* Select a new "search" modulation mode to try.
+ * If one is found, set up the new "search" table. */
+ if (is_legacy(tbl->lq_type))
+ il4965_rs_move_legacy_other(il, lq_sta, conf, sta, idx);
+ else if (is_siso(tbl->lq_type))
+ il4965_rs_move_siso_to_other(il, lq_sta, conf, sta,
+ idx);
+ else /* (is_mimo2(tbl->lq_type)) */
+ il4965_rs_move_mimo2_to_other(il, lq_sta, conf, sta,
+ idx);
+
+ /* If new "search" mode was selected, set up in uCode table */
+ if (lq_sta->search_better_tbl) {
+ /* Access the "search" table, clear its history. */
+ tbl = &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
+ for (i = 0; i < RATE_COUNT; i++)
+ il4965_rs_rate_scale_clear_win(&(tbl->win[i]));
+
+ /* Use new "search" start rate */
+ idx = il4965_hwrate_to_plcp_idx(tbl->current_rate);
+
+ D_RATE("Switch current mcs: %X idx: %d\n",
+ tbl->current_rate, idx);
+ il4965_rs_fill_link_cmd(il, lq_sta, tbl->current_rate);
+ il_send_lq_cmd(il, ctx, &lq_sta->lq, CMD_ASYNC, false);
+ } else
+ done_search = 1;
+ }
+
+ if (done_search && !lq_sta->stay_in_tbl) {
+ /* If the "active" (non-search) mode was legacy,
+ * and we've tried switching antennas,
+ * but we haven't been able to try HT modes (not available),
+ * stay with best antenna legacy modulation for a while
+ * before next round of mode comparisons. */
+ tbl1 = &(lq_sta->lq_info[lq_sta->active_tbl]);
+ if (is_legacy(tbl1->lq_type) && !conf_is_ht(conf) &&
+ lq_sta->action_counter > tbl1->max_search) {
+ D_RATE("LQ: STAY in legacy table\n");
+ il4965_rs_set_stay_in_table(il, 1, lq_sta);
+ }
+
+ /* If we're in an HT mode, and all 3 mode switch actions
+ * have been tried and compared, stay in this best modulation
+ * mode for a while before next round of mode comparisons. */
+ if (lq_sta->enable_counter &&
+ lq_sta->action_counter >= tbl1->max_search) {
+ if (lq_sta->last_tpt > IL_AGG_TPT_THREHOLD &&
+ (lq_sta->tx_agg_tid_en & (1 << tid)) &&
+ tid != MAX_TID_COUNT) {
+ tid_data =
+ &il->stations[lq_sta->lq.sta_id].tid[tid];
+ if (tid_data->agg.state == IL_AGG_OFF) {
+ D_RATE("try to aggregate tid %d\n",
+ tid);
+ il4965_rs_tl_turn_on_agg(il, tid,
+ lq_sta, sta);
+ }
+ }
+ il4965_rs_set_stay_in_table(il, 0, lq_sta);
+ }
+ }
+
+out:
+ tbl->current_rate =
+ il4965_rate_n_flags_from_tbl(il, tbl, idx, is_green);
+ i = idx;
+ lq_sta->last_txrate_idx = i;
+}
+
+/**
+ * il4965_rs_initialize_lq - Initialize a station's hardware rate table
+ *
+ * The uCode's station table contains a table of fallback rates
+ * for automatic fallback during transmission.
+ *
+ * NOTE: This sets up a default set of values. These will be replaced later
+ * if the driver's iwl-4965-rs rate scaling algorithm is used, instead of
+ * rc80211_simple.
+ *
+ * NOTE: Run C_ADD_STA command to set up station table entry, before
+ * calling this function (which runs C_TX_LINK_QUALITY_CMD,
+ * which requires station table entry to exist).
+ */
+static void
+il4965_rs_initialize_lq(struct il_priv *il, struct ieee80211_conf *conf,
+ struct ieee80211_sta *sta, struct il_lq_sta *lq_sta)
+{
+ struct il_scale_tbl_info *tbl;
+ int rate_idx;
+ int i;
+ u32 rate;
+ u8 use_green = il4965_rs_use_green(sta);
+ u8 active_tbl = 0;
+ u8 valid_tx_ant;
+ struct il_station_priv *sta_priv;
+ struct il_rxon_context *ctx;
+
+ if (!sta || !lq_sta)
+ return;
+
+ sta_priv = (void *)sta->drv_priv;
+ ctx = sta_priv->common.ctx;
+
+ i = lq_sta->last_txrate_idx;
+
+ valid_tx_ant = il->hw_params.valid_tx_ant;
+
+ if (!lq_sta->search_better_tbl)
+ active_tbl = lq_sta->active_tbl;
+ else
+ active_tbl = 1 - lq_sta->active_tbl;
+
+ tbl = &(lq_sta->lq_info[active_tbl]);
+
+ if (i < 0 || i >= RATE_COUNT)
+ i = 0;
+
+ rate = il_rates[i].plcp;
+ tbl->ant_type = il4965_first_antenna(valid_tx_ant);
+ rate |= tbl->ant_type << RATE_MCS_ANT_POS;
+
+ if (i >= IL_FIRST_CCK_RATE && i <= IL_LAST_CCK_RATE)
+ rate |= RATE_MCS_CCK_MSK;
+
+ il4965_rs_get_tbl_info_from_mcs(rate, il->band, tbl, &rate_idx);
+ if (!il4965_rs_is_valid_ant(valid_tx_ant, tbl->ant_type))
+ il4965_rs_toggle_antenna(valid_tx_ant, &rate, tbl);
+
+ rate = il4965_rate_n_flags_from_tbl(il, tbl, rate_idx, use_green);
+ tbl->current_rate = rate;
+ il4965_rs_set_expected_tpt_table(lq_sta, tbl);
+ il4965_rs_fill_link_cmd(NULL, lq_sta, rate);
+ il->stations[lq_sta->lq.sta_id].lq = &lq_sta->lq;
+ il_send_lq_cmd(il, ctx, &lq_sta->lq, CMD_SYNC, true);
+}
+
+static void
+il4965_rs_get_rate(void *il_r, struct ieee80211_sta *sta, void *il_sta,
+ struct ieee80211_tx_rate_control *txrc)
+{
+
+ struct sk_buff *skb = txrc->skb;
+ struct ieee80211_supported_band *sband = txrc->sband;
+ struct il_priv *il __maybe_unused = (struct il_priv *)il_r;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct il_lq_sta *lq_sta = il_sta;
+ int rate_idx;
+
+ D_RATE("rate scale calculate new rate for skb\n");
+
+ /* Get max rate if user set max rate */
+ if (lq_sta) {
+ lq_sta->max_rate_idx = txrc->max_rate_idx;
+ if (sband->band == IEEE80211_BAND_5GHZ &&
+ lq_sta->max_rate_idx != -1)
+ lq_sta->max_rate_idx += IL_FIRST_OFDM_RATE;
+ if (lq_sta->max_rate_idx < 0 ||
+ lq_sta->max_rate_idx >= RATE_COUNT)
+ lq_sta->max_rate_idx = -1;
+ }
+
+ /* Treat uninitialized rate scaling data same as non-existing. */
+ if (lq_sta && !lq_sta->drv) {
+ D_RATE("Rate scaling not initialized yet.\n");
+ il_sta = NULL;
+ }
+
+ /* Send management frames and NO_ACK data using lowest rate. */
+ if (rate_control_send_low(sta, il_sta, txrc))
+ return;
+
+ if (!lq_sta)
+ return;
+
+ rate_idx = lq_sta->last_txrate_idx;
+
+ if (lq_sta->last_rate_n_flags & RATE_MCS_HT_MSK) {
+ rate_idx -= IL_FIRST_OFDM_RATE;
+ /* 6M and 9M shared same MCS idx */
+ rate_idx = (rate_idx > 0) ? (rate_idx - 1) : 0;
+ if (il4965_rs_extract_rate(lq_sta->last_rate_n_flags) >=
+ RATE_MIMO2_6M_PLCP)
+ rate_idx = rate_idx + MCS_IDX_PER_STREAM;
+ info->control.rates[0].flags = IEEE80211_TX_RC_MCS;
+ if (lq_sta->last_rate_n_flags & RATE_MCS_SGI_MSK)
+ info->control.rates[0].flags |=
+ IEEE80211_TX_RC_SHORT_GI;
+ if (lq_sta->last_rate_n_flags & RATE_MCS_DUP_MSK)
+ info->control.rates[0].flags |=
+ IEEE80211_TX_RC_DUP_DATA;
+ if (lq_sta->last_rate_n_flags & RATE_MCS_HT40_MSK)
+ info->control.rates[0].flags |=
+ IEEE80211_TX_RC_40_MHZ_WIDTH;
+ if (lq_sta->last_rate_n_flags & RATE_MCS_GF_MSK)
+ info->control.rates[0].flags |=
+ IEEE80211_TX_RC_GREEN_FIELD;
+ } else {
+ /* Check for invalid rates */
+ if (rate_idx < 0 || rate_idx >= RATE_COUNT_LEGACY ||
+ (sband->band == IEEE80211_BAND_5GHZ &&
+ rate_idx < IL_FIRST_OFDM_RATE))
+ rate_idx = rate_lowest_index(sband, sta);
+ /* On valid 5 GHz rate, adjust idx */
+ else if (sband->band == IEEE80211_BAND_5GHZ)
+ rate_idx -= IL_FIRST_OFDM_RATE;
+ info->control.rates[0].flags = 0;
+ }
+ info->control.rates[0].idx = rate_idx;
+
+}
+
+static void *
+il4965_rs_alloc_sta(void *il_rate, struct ieee80211_sta *sta, gfp_t gfp)
+{
+ struct il_station_priv *sta_priv =
+ (struct il_station_priv *)sta->drv_priv;
+ struct il_priv *il;
+
+ il = (struct il_priv *)il_rate;
+ D_RATE("create station rate scale win\n");
+
+ return &sta_priv->lq_sta;
+}
+
+/*
+ * Called after adding a new station to initialize rate scaling
+ */
+void
+il4965_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta, u8 sta_id)
+{
+ int i, j;
+ struct ieee80211_hw *hw = il->hw;
+ struct ieee80211_conf *conf = &il->hw->conf;
+ struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+ struct il_station_priv *sta_priv;
+ struct il_lq_sta *lq_sta;
+ struct ieee80211_supported_band *sband;
+
+ sta_priv = (struct il_station_priv *)sta->drv_priv;
+ lq_sta = &sta_priv->lq_sta;
+ sband = hw->wiphy->bands[conf->channel->band];
+
+ lq_sta->lq.sta_id = sta_id;
+
+ for (j = 0; j < LQ_SIZE; j++)
+ for (i = 0; i < RATE_COUNT; i++)
+ il4965_rs_rate_scale_clear_win(&lq_sta->lq_info[j].
+ win[i]);
+
+ lq_sta->flush_timer = 0;
+ lq_sta->supp_rates = sta->supp_rates[sband->band];
+ for (j = 0; j < LQ_SIZE; j++)
+ for (i = 0; i < RATE_COUNT; i++)
+ il4965_rs_rate_scale_clear_win(&lq_sta->lq_info[j].
+ win[i]);
+
+ D_RATE("LQ:" "*** rate scale station global init for station %d ***\n",
+ sta_id);
+ /* TODO: what is a good starting rate for STA? About middle? Maybe not
+ * the lowest or the highest rate.. Could consider using RSSI from
+ * previous packets? Need to have IEEE 802.1X auth succeed immediately
+ * after assoc.. */
+
+ lq_sta->is_dup = 0;
+ lq_sta->max_rate_idx = -1;
+ lq_sta->missed_rate_counter = IL_MISSED_RATE_MAX;
+ lq_sta->is_green = il4965_rs_use_green(sta);
+ lq_sta->active_legacy_rate = il->active_rate & ~(0x1000);
+ lq_sta->band = il->band;
+ /*
+ * active_siso_rate mask includes 9 MBits (bit 5), and CCK (bits 0-3),
+ * supp_rates[] does not; shift to convert format, force 9 MBits off.
+ */
+ lq_sta->active_siso_rate = ht_cap->mcs.rx_mask[0] << 1;
+ lq_sta->active_siso_rate |= ht_cap->mcs.rx_mask[0] & 0x1;
+ lq_sta->active_siso_rate &= ~((u16) 0x2);
+ lq_sta->active_siso_rate <<= IL_FIRST_OFDM_RATE;
+
+ /* Same here */
+ lq_sta->active_mimo2_rate = ht_cap->mcs.rx_mask[1] << 1;
+ lq_sta->active_mimo2_rate |= ht_cap->mcs.rx_mask[1] & 0x1;
+ lq_sta->active_mimo2_rate &= ~((u16) 0x2);
+ lq_sta->active_mimo2_rate <<= IL_FIRST_OFDM_RATE;
+
+ /* These values will be overridden later */
+ lq_sta->lq.general_params.single_stream_ant_msk =
+ il4965_first_antenna(il->hw_params.valid_tx_ant);
+ lq_sta->lq.general_params.dual_stream_ant_msk =
+ il->hw_params.valid_tx_ant & ~il4965_first_antenna(il->hw_params.
+ valid_tx_ant);
+ if (!lq_sta->lq.general_params.dual_stream_ant_msk) {
+ lq_sta->lq.general_params.dual_stream_ant_msk = ANT_AB;
+ } else if (il4965_num_of_ant(il->hw_params.valid_tx_ant) == 2) {
+ lq_sta->lq.general_params.dual_stream_ant_msk =
+ il->hw_params.valid_tx_ant;
+ }
+
+ /* as default allow aggregation for all tids */
+ lq_sta->tx_agg_tid_en = IL_AGG_ALL_TID;
+ lq_sta->drv = il;
+
+ /* Set last_txrate_idx to lowest rate */
+ lq_sta->last_txrate_idx = rate_lowest_index(sband, sta);
+ if (sband->band == IEEE80211_BAND_5GHZ)
+ lq_sta->last_txrate_idx += IL_FIRST_OFDM_RATE;
+ lq_sta->is_agg = 0;
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+ lq_sta->dbg_fixed_rate = 0;
+#endif
+
+ il4965_rs_initialize_lq(il, conf, sta, lq_sta);
+}
+
+static void
+il4965_rs_fill_link_cmd(struct il_priv *il, struct il_lq_sta *lq_sta,
+ u32 new_rate)
+{
+ struct il_scale_tbl_info tbl_type;
+ int idx = 0;
+ int rate_idx;
+ int repeat_rate = 0;
+ u8 ant_toggle_cnt = 0;
+ u8 use_ht_possible = 1;
+ u8 valid_tx_ant = 0;
+ struct il_link_quality_cmd *lq_cmd = &lq_sta->lq;
+
+ /* Override starting rate (idx 0) if needed for debug purposes */
+ il4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, idx);
+
+ /* Interpret new_rate (rate_n_flags) */
+ il4965_rs_get_tbl_info_from_mcs(new_rate, lq_sta->band, &tbl_type,
+ &rate_idx);
+
+ /* How many times should we repeat the initial rate? */
+ if (is_legacy(tbl_type.lq_type)) {
+ ant_toggle_cnt = 1;
+ repeat_rate = IL_NUMBER_TRY;
+ } else {
+ repeat_rate = IL_HT_NUMBER_TRY;
+ }
+
+ lq_cmd->general_params.mimo_delimiter =
+ is_mimo(tbl_type.lq_type) ? 1 : 0;
+
+ /* Fill 1st table entry (idx 0) */
+ lq_cmd->rs_table[idx].rate_n_flags = cpu_to_le32(new_rate);
+
+ if (il4965_num_of_ant(tbl_type.ant_type) == 1) {
+ lq_cmd->general_params.single_stream_ant_msk =
+ tbl_type.ant_type;
+ } else if (il4965_num_of_ant(tbl_type.ant_type) == 2) {
+ lq_cmd->general_params.dual_stream_ant_msk = tbl_type.ant_type;
+ }
+ /* otherwise we don't modify the existing value */
+ idx++;
+ repeat_rate--;
+ if (il)
+ valid_tx_ant = il->hw_params.valid_tx_ant;
+
+ /* Fill rest of rate table */
+ while (idx < LINK_QUAL_MAX_RETRY_NUM) {
+ /* Repeat initial/next rate.
+ * For legacy IL_NUMBER_TRY == 1, this loop will not execute.
+ * For HT IL_HT_NUMBER_TRY == 3, this executes twice. */
+ while (repeat_rate > 0 && idx < LINK_QUAL_MAX_RETRY_NUM) {
+ if (is_legacy(tbl_type.lq_type)) {
+ if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
+ ant_toggle_cnt++;
+ else if (il &&
+ il4965_rs_toggle_antenna(valid_tx_ant,
+ &new_rate,
+ &tbl_type))
+ ant_toggle_cnt = 1;
+ }
+
+ /* Override next rate if needed for debug purposes */
+ il4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, idx);
+
+ /* Fill next table entry */
+ lq_cmd->rs_table[idx].rate_n_flags =
+ cpu_to_le32(new_rate);
+ repeat_rate--;
+ idx++;
+ }
+
+ il4965_rs_get_tbl_info_from_mcs(new_rate, lq_sta->band,
+ &tbl_type, &rate_idx);
+
+ /* Indicate to uCode which entries might be MIMO.
+ * If initial rate was MIMO, this will finally end up
+ * as (IL_HT_NUMBER_TRY * 2), after 2nd pass, otherwise 0. */
+ if (is_mimo(tbl_type.lq_type))
+ lq_cmd->general_params.mimo_delimiter = idx;
+
+ /* Get next rate */
+ new_rate =
+ il4965_rs_get_lower_rate(lq_sta, &tbl_type, rate_idx,
+ use_ht_possible);
+
+ /* How many times should we repeat the next rate? */
+ if (is_legacy(tbl_type.lq_type)) {
+ if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
+ ant_toggle_cnt++;
+ else if (il &&
+ il4965_rs_toggle_antenna(valid_tx_ant,
+ &new_rate, &tbl_type))
+ ant_toggle_cnt = 1;
+
+ repeat_rate = IL_NUMBER_TRY;
+ } else {
+ repeat_rate = IL_HT_NUMBER_TRY;
+ }
+
+ /* Don't allow HT rates after next pass.
+ * il4965_rs_get_lower_rate() will change type to LQ_A or LQ_G. */
+ use_ht_possible = 0;
+
+ /* Override next rate if needed for debug purposes */
+ il4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, idx);
+
+ /* Fill next table entry */
+ lq_cmd->rs_table[idx].rate_n_flags = cpu_to_le32(new_rate);
+
+ idx++;
+ repeat_rate--;
+ }
+
+ lq_cmd->agg_params.agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
+ lq_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
+
+ lq_cmd->agg_params.agg_time_limit =
+ cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
+}
+
+static void *
+il4965_rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
+{
+ return hw->priv;
+}
+
+/* rate scale requires free function to be implemented */
+static void
+il4965_rs_free(void *il_rate)
+{
+ return;
+}
+
+static void
+il4965_rs_free_sta(void *il_r, struct ieee80211_sta *sta, void *il_sta)
+{
+ struct il_priv *il __maybe_unused = il_r;
+
+ D_RATE("enter\n");
+ D_RATE("leave\n");
+}
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+static int
+il4965_open_file_generic(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static void
+il4965_rs_dbgfs_set_mcs(struct il_lq_sta *lq_sta, u32 * rate_n_flags, int idx)
+{
+ struct il_priv *il;
+ u8 valid_tx_ant;
+ u8 ant_sel_tx;
+
+ il = lq_sta->drv;
+ valid_tx_ant = il->hw_params.valid_tx_ant;
+ if (lq_sta->dbg_fixed_rate) {
+ ant_sel_tx =
+ ((lq_sta->
+ dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK) >>
+ RATE_MCS_ANT_POS);
+ if ((valid_tx_ant & ant_sel_tx) == ant_sel_tx) {
+ *rate_n_flags = lq_sta->dbg_fixed_rate;
+ D_RATE("Fixed rate ON\n");
+ } else {
+ lq_sta->dbg_fixed_rate = 0;
+ IL_ERR
+ ("Invalid antenna selection 0x%X, Valid is 0x%X\n",
+ ant_sel_tx, valid_tx_ant);
+ D_RATE("Fixed rate OFF\n");
+ }
+ } else {
+ D_RATE("Fixed rate OFF\n");
+ }
+}
+
+static ssize_t
+il4965_rs_sta_dbgfs_scale_table_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct il_lq_sta *lq_sta = file->private_data;
+ struct il_priv *il;
+ char buf[64];
+ size_t buf_size;
+ u32 parsed_rate;
+ struct il_station_priv *sta_priv =
+ container_of(lq_sta, struct il_station_priv, lq_sta);
+ struct il_rxon_context *ctx = sta_priv->common.ctx;
+
+ il = lq_sta->drv;
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+
+ if (sscanf(buf, "%x", &parsed_rate) == 1)
+ lq_sta->dbg_fixed_rate = parsed_rate;
+ else
+ lq_sta->dbg_fixed_rate = 0;
+
+ lq_sta->active_legacy_rate = 0x0FFF; /* 1 - 54 MBits, includes CCK */
+ lq_sta->active_siso_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
+ lq_sta->active_mimo2_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
+
+ D_RATE("sta_id %d rate 0x%X\n", lq_sta->lq.sta_id,
+ lq_sta->dbg_fixed_rate);
+
+ if (lq_sta->dbg_fixed_rate) {
+ il4965_rs_fill_link_cmd(NULL, lq_sta, lq_sta->dbg_fixed_rate);
+ il_send_lq_cmd(lq_sta->drv, ctx, &lq_sta->lq, CMD_ASYNC, false);
+ }
+
+ return count;
+}
+
+static ssize_t
+il4965_rs_sta_dbgfs_scale_table_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ char *buff;
+ int desc = 0;
+ int i = 0;
+ int idx = 0;
+ ssize_t ret;
+
+ struct il_lq_sta *lq_sta = file->private_data;
+ struct il_priv *il;
+ struct il_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+
+ il = lq_sta->drv;
+ buff = kmalloc(1024, GFP_KERNEL);
+ if (!buff)
+ return -ENOMEM;
+
+ desc += sprintf(buff + desc, "sta_id %d\n", lq_sta->lq.sta_id);
+ desc +=
+ sprintf(buff + desc, "failed=%d success=%d rate=0%X\n",
+ lq_sta->total_failed, lq_sta->total_success,
+ lq_sta->active_legacy_rate);
+ desc +=
+ sprintf(buff + desc, "fixed rate 0x%X\n", lq_sta->dbg_fixed_rate);
+ desc +=
+ sprintf(buff + desc, "valid_tx_ant %s%s%s\n",
+ (il->hw_params.valid_tx_ant & ANT_A) ? "ANT_A," : "",
+ (il->hw_params.valid_tx_ant & ANT_B) ? "ANT_B," : "",
+ (il->hw_params.valid_tx_ant & ANT_C) ? "ANT_C" : "");
+ desc +=
+ sprintf(buff + desc, "lq type %s\n",
+ (is_legacy(tbl->lq_type)) ? "legacy" : "HT");
+ if (is_Ht(tbl->lq_type)) {
+ desc +=
+ sprintf(buff + desc, " %s",
+ (is_siso(tbl->lq_type)) ? "SISO" : "MIMO2");
+ desc +=
+ sprintf(buff + desc, " %s",
+ (tbl->is_ht40) ? "40MHz" : "20MHz");
+ desc +=
+ sprintf(buff + desc, " %s %s %s\n",
+ (tbl->is_SGI) ? "SGI" : "",
+ (lq_sta->is_green) ? "GF enabled" : "",
+ (lq_sta->is_agg) ? "AGG on" : "");
+ }
+ desc +=
+ sprintf(buff + desc, "last tx rate=0x%X\n",
+ lq_sta->last_rate_n_flags);
+ desc +=
+ sprintf(buff + desc,
+ "general:" "flags=0x%X mimo-d=%d s-ant0x%x d-ant=0x%x\n",
+ lq_sta->lq.general_params.flags,
+ lq_sta->lq.general_params.mimo_delimiter,
+ lq_sta->lq.general_params.single_stream_ant_msk,
+ lq_sta->lq.general_params.dual_stream_ant_msk);
+
+ desc +=
+ sprintf(buff + desc,
+ "agg:"
+ "time_limit=%d dist_start_th=%d frame_cnt_limit=%d\n",
+ le16_to_cpu(lq_sta->lq.agg_params.agg_time_limit),
+ lq_sta->lq.agg_params.agg_dis_start_th,
+ lq_sta->lq.agg_params.agg_frame_cnt_limit);
+
+ desc +=
+ sprintf(buff + desc,
+ "Start idx [0]=0x%x [1]=0x%x [2]=0x%x [3]=0x%x\n",
+ lq_sta->lq.general_params.start_rate_idx[0],
+ lq_sta->lq.general_params.start_rate_idx[1],
+ lq_sta->lq.general_params.start_rate_idx[2],
+ lq_sta->lq.general_params.start_rate_idx[3]);
+
+ for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
+ idx =
+ il4965_hwrate_to_plcp_idx(le32_to_cpu
+ (lq_sta->lq.rs_table[i].
+ rate_n_flags));
+ if (is_legacy(tbl->lq_type)) {
+ desc +=
+ sprintf(buff + desc, " rate[%d] 0x%X %smbps\n", i,
+ le32_to_cpu(lq_sta->lq.rs_table[i].
+ rate_n_flags),
+ il_rate_mcs[idx].mbps);
+ } else {
+ desc +=
+ sprintf(buff + desc, " rate[%d] 0x%X %smbps (%s)\n",
+ i,
+ le32_to_cpu(lq_sta->lq.rs_table[i].
+ rate_n_flags),
+ il_rate_mcs[idx].mbps,
+ il_rate_mcs[idx].mcs);
+ }
+ }
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
+ kfree(buff);
+ return ret;
+}
+
+static const struct file_operations rs_sta_dbgfs_scale_table_ops = {
+ .write = il4965_rs_sta_dbgfs_scale_table_write,
+ .read = il4965_rs_sta_dbgfs_scale_table_read,
+ .open = il4965_open_file_generic,
+ .llseek = default_llseek,
+};
+
+static ssize_t
+il4965_rs_sta_dbgfs_stats_table_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ char *buff;
+ int desc = 0;
+ int i, j;
+ ssize_t ret;
+
+ struct il_lq_sta *lq_sta = file->private_data;
+
+ buff = kmalloc(1024, GFP_KERNEL);
+ if (!buff)
+ return -ENOMEM;
+
+ for (i = 0; i < LQ_SIZE; i++) {
+ desc +=
+ sprintf(buff + desc,
+ "%s type=%d SGI=%d HT40=%d DUP=%d GF=%d\n"
+ "rate=0x%X\n", lq_sta->active_tbl == i ? "*" : "x",
+ lq_sta->lq_info[i].lq_type,
+ lq_sta->lq_info[i].is_SGI,
+ lq_sta->lq_info[i].is_ht40,
+ lq_sta->lq_info[i].is_dup, lq_sta->is_green,
+ lq_sta->lq_info[i].current_rate);
+ for (j = 0; j < RATE_COUNT; j++) {
+ desc +=
+ sprintf(buff + desc,
+ "counter=%d success=%d %%=%d\n",
+ lq_sta->lq_info[i].win[j].counter,
+ lq_sta->lq_info[i].win[j].success_counter,
+ lq_sta->lq_info[i].win[j].success_ratio);
+ }
+ }
+ ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
+ kfree(buff);
+ return ret;
+}
+
+static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
+ .read = il4965_rs_sta_dbgfs_stats_table_read,
+ .open = il4965_open_file_generic,
+ .llseek = default_llseek,
+};
+
+static ssize_t
+il4965_rs_sta_dbgfs_rate_scale_data_read(struct file *file,
+ char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+ char buff[120];
+ int desc = 0;
+ struct il_lq_sta *lq_sta = file->private_data;
+ struct il_scale_tbl_info *tbl = &lq_sta->lq_info[lq_sta->active_tbl];
+
+ if (is_Ht(tbl->lq_type))
+ desc +=
+ sprintf(buff + desc, "Bit Rate= %d Mb/s\n",
+ tbl->expected_tpt[lq_sta->last_txrate_idx]);
+ else
+ desc +=
+ sprintf(buff + desc, "Bit Rate= %d Mb/s\n",
+ il_rates[lq_sta->last_txrate_idx].ieee >> 1);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buff, desc);
+}
+
+static const struct file_operations rs_sta_dbgfs_rate_scale_data_ops = {
+ .read = il4965_rs_sta_dbgfs_rate_scale_data_read,
+ .open = il4965_open_file_generic,
+ .llseek = default_llseek,
+};
+
+static void
+il4965_rs_add_debugfs(void *il, void *il_sta, struct dentry *dir)
+{
+ struct il_lq_sta *lq_sta = il_sta;
+ lq_sta->rs_sta_dbgfs_scale_table_file =
+ debugfs_create_file("rate_scale_table", S_IRUSR | S_IWUSR, dir,
+ lq_sta, &rs_sta_dbgfs_scale_table_ops);
+ lq_sta->rs_sta_dbgfs_stats_table_file =
+ debugfs_create_file("rate_stats_table", S_IRUSR, dir, lq_sta,
+ &rs_sta_dbgfs_stats_table_ops);
+ lq_sta->rs_sta_dbgfs_rate_scale_data_file =
+ debugfs_create_file("rate_scale_data", S_IRUSR, dir, lq_sta,
+ &rs_sta_dbgfs_rate_scale_data_ops);
+ lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file =
+ debugfs_create_u8("tx_agg_tid_enable", S_IRUSR | S_IWUSR, dir,
+ &lq_sta->tx_agg_tid_en);
+
+}
+
+static void
+il4965_rs_remove_debugfs(void *il, void *il_sta)
+{
+ struct il_lq_sta *lq_sta = il_sta;
+ debugfs_remove(lq_sta->rs_sta_dbgfs_scale_table_file);
+ debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
+ debugfs_remove(lq_sta->rs_sta_dbgfs_rate_scale_data_file);
+ debugfs_remove(lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file);
+}
+#endif
+
+/*
+ * Initialization of rate scaling information is done by driver after
+ * the station is added. Since mac80211 calls this function before a
+ * station is added we ignore it.
+ */
+static void
+il4965_rs_rate_init_stub(void *il_r, struct ieee80211_supported_band *sband,
+ struct ieee80211_sta *sta, void *il_sta)
+{
+}
+
+static struct rate_control_ops rs_4965_ops = {
+ .module = NULL,
+ .name = IL4965_RS_NAME,
+ .tx_status = il4965_rs_tx_status,
+ .get_rate = il4965_rs_get_rate,
+ .rate_init = il4965_rs_rate_init_stub,
+ .alloc = il4965_rs_alloc,
+ .free = il4965_rs_free,
+ .alloc_sta = il4965_rs_alloc_sta,
+ .free_sta = il4965_rs_free_sta,
+#ifdef CONFIG_MAC80211_DEBUGFS
+ .add_sta_debugfs = il4965_rs_add_debugfs,
+ .remove_sta_debugfs = il4965_rs_remove_debugfs,
+#endif
+};
+
+int
+il4965_rate_control_register(void)
+{
+ return ieee80211_rate_control_register(&rs_4965_ops);
+}
+
+void
+il4965_rate_control_unregister(void)
+{
+ ieee80211_rate_control_unregister(&rs_4965_ops);
+}
diff --git a/drivers/net/wireless/iwlegacy/4965.c b/drivers/net/wireless/iwlegacy/4965.c
new file mode 100644
index 00000000000..84c54dccf19
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/4965.c
@@ -0,0 +1,2421 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <net/mac80211.h>
+#include <linux/etherdevice.h>
+#include <asm/unaligned.h>
+
+#include "common.h"
+#include "4965.h"
+
+/**
+ * il_verify_inst_sparse - verify runtime uCode image in card vs. host,
+ * using sample data 100 bytes apart. If these sample points are good,
+ * it's a pretty good bet that everything between them is good, too.
+ */
+static int
+il4965_verify_inst_sparse(struct il_priv *il, __le32 * image, u32 len)
+{
+ u32 val;
+ int ret = 0;
+ u32 errcnt = 0;
+ u32 i;
+
+ D_INFO("ucode inst image size is %u\n", len);
+
+ for (i = 0; i < len; i += 100, image += 100 / sizeof(u32)) {
+ /* read data comes through single port, auto-incr addr */
+ /* NOTE: Use the debugless read so we don't flood kernel log
+ * if IL_DL_IO is set */
+ il_wr(il, HBUS_TARG_MEM_RADDR, i + IL4965_RTC_INST_LOWER_BOUND);
+ val = _il_rd(il, HBUS_TARG_MEM_RDAT);
+ if (val != le32_to_cpu(*image)) {
+ ret = -EIO;
+ errcnt++;
+ if (errcnt >= 3)
+ break;
+ }
+ }
+
+ return ret;
+}
+
+/**
+ * il4965_verify_inst_full - verify runtime uCode image in card vs. host,
+ * looking at all data.
+ */
+static int
+il4965_verify_inst_full(struct il_priv *il, __le32 * image, u32 len)
+{
+ u32 val;
+ u32 save_len = len;
+ int ret = 0;
+ u32 errcnt;
+
+ D_INFO("ucode inst image size is %u\n", len);
+
+ il_wr(il, HBUS_TARG_MEM_RADDR, IL4965_RTC_INST_LOWER_BOUND);
+
+ errcnt = 0;
+ for (; len > 0; len -= sizeof(u32), image++) {
+ /* read data comes through single port, auto-incr addr */
+ /* NOTE: Use the debugless read so we don't flood kernel log
+ * if IL_DL_IO is set */
+ val = _il_rd(il, HBUS_TARG_MEM_RDAT);
+ if (val != le32_to_cpu(*image)) {
+ IL_ERR("uCode INST section is invalid at "
+ "offset 0x%x, is 0x%x, s/b 0x%x\n",
+ save_len - len, val, le32_to_cpu(*image));
+ ret = -EIO;
+ errcnt++;
+ if (errcnt >= 20)
+ break;
+ }
+ }
+
+ if (!errcnt)
+ D_INFO("ucode image in INSTRUCTION memory is good\n");
+
+ return ret;
+}
+
+/**
+ * il4965_verify_ucode - determine which instruction image is in SRAM,
+ * and verify its contents
+ */
+int
+il4965_verify_ucode(struct il_priv *il)
+{
+ __le32 *image;
+ u32 len;
+ int ret;
+
+ /* Try bootstrap */
+ image = (__le32 *) il->ucode_boot.v_addr;
+ len = il->ucode_boot.len;
+ ret = il4965_verify_inst_sparse(il, image, len);
+ if (!ret) {
+ D_INFO("Bootstrap uCode is good in inst SRAM\n");
+ return 0;
+ }
+
+ /* Try initialize */
+ image = (__le32 *) il->ucode_init.v_addr;
+ len = il->ucode_init.len;
+ ret = il4965_verify_inst_sparse(il, image, len);
+ if (!ret) {
+ D_INFO("Initialize uCode is good in inst SRAM\n");
+ return 0;
+ }
+
+ /* Try runtime/protocol */
+ image = (__le32 *) il->ucode_code.v_addr;
+ len = il->ucode_code.len;
+ ret = il4965_verify_inst_sparse(il, image, len);
+ if (!ret) {
+ D_INFO("Runtime uCode is good in inst SRAM\n");
+ return 0;
+ }
+
+ IL_ERR("NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
+
+ /* Since nothing seems to match, show first several data entries in
+ * instruction SRAM, so maybe visual inspection will give a clue.
+ * Selection of bootstrap image (vs. other images) is arbitrary. */
+ image = (__le32 *) il->ucode_boot.v_addr;
+ len = il->ucode_boot.len;
+ ret = il4965_verify_inst_full(il, image, len);
+
+ return ret;
+}
+
+/******************************************************************************
+ *
+ * EEPROM related functions
+ *
+******************************************************************************/
+
+/*
+ * The device's EEPROM semaphore prevents conflicts between driver and uCode
+ * when accessing the EEPROM; each access is a series of pulses to/from the
+ * EEPROM chip, not a single event, so even reads could conflict if they
+ * weren't arbitrated by the semaphore.
+ */
+int
+il4965_eeprom_acquire_semaphore(struct il_priv *il)
+{
+ u16 count;
+ int ret;
+
+ for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) {
+ /* Request semaphore */
+ il_set_bit(il, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
+
+ /* See if we got it */
+ ret =
+ _il_poll_bit(il, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
+ CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
+ EEPROM_SEM_TIMEOUT);
+ if (ret >= 0)
+ return ret;
+ }
+
+ return ret;
+}
+
+void
+il4965_eeprom_release_semaphore(struct il_priv *il)
+{
+ il_clear_bit(il, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
+
+}
+
+int
+il4965_eeprom_check_version(struct il_priv *il)
+{
+ u16 eeprom_ver;
+ u16 calib_ver;
+
+ eeprom_ver = il_eeprom_query16(il, EEPROM_VERSION);
+ calib_ver = il_eeprom_query16(il, EEPROM_4965_CALIB_VERSION_OFFSET);
+
+ if (eeprom_ver < il->cfg->eeprom_ver ||
+ calib_ver < il->cfg->eeprom_calib_ver)
+ goto err;
+
+ IL_INFO("device EEPROM VER=0x%x, CALIB=0x%x\n", eeprom_ver, calib_ver);
+
+ return 0;
+err:
+ IL_ERR("Unsupported (too old) EEPROM VER=0x%x < 0x%x "
+ "CALIB=0x%x < 0x%x\n", eeprom_ver, il->cfg->eeprom_ver,
+ calib_ver, il->cfg->eeprom_calib_ver);
+ return -EINVAL;
+
+}
+
+void
+il4965_eeprom_get_mac(const struct il_priv *il, u8 * mac)
+{
+ const u8 *addr = il_eeprom_query_addr(il,
+ EEPROM_MAC_ADDRESS);
+ memcpy(mac, addr, ETH_ALEN);
+}
+
+/* Send led command */
+static int
+il4965_send_led_cmd(struct il_priv *il, struct il_led_cmd *led_cmd)
+{
+ struct il_host_cmd cmd = {
+ .id = C_LEDS,
+ .len = sizeof(struct il_led_cmd),
+ .data = led_cmd,
+ .flags = CMD_ASYNC,
+ .callback = NULL,
+ };
+ u32 reg;
+
+ reg = _il_rd(il, CSR_LED_REG);
+ if (reg != (reg & CSR_LED_BSM_CTRL_MSK))
+ _il_wr(il, CSR_LED_REG, reg & CSR_LED_BSM_CTRL_MSK);
+
+ return il_send_cmd(il, &cmd);
+}
+
+/* Set led register off */
+void
+il4965_led_enable(struct il_priv *il)
+{
+ _il_wr(il, CSR_LED_REG, CSR_LED_REG_TRUN_ON);
+}
+
+const struct il_led_ops il4965_led_ops = {
+ .cmd = il4965_send_led_cmd,
+};
+
+static int il4965_send_tx_power(struct il_priv *il);
+static int il4965_hw_get_temperature(struct il_priv *il);
+
+/* Highest firmware API version supported */
+#define IL4965_UCODE_API_MAX 2
+
+/* Lowest firmware API version supported */
+#define IL4965_UCODE_API_MIN 2
+
+#define IL4965_FW_PRE "iwlwifi-4965-"
+#define _IL4965_MODULE_FIRMWARE(api) IL4965_FW_PRE #api ".ucode"
+#define IL4965_MODULE_FIRMWARE(api) _IL4965_MODULE_FIRMWARE(api)
+
+/* check contents of special bootstrap uCode SRAM */
+static int
+il4965_verify_bsm(struct il_priv *il)
+{
+ __le32 *image = il->ucode_boot.v_addr;
+ u32 len = il->ucode_boot.len;
+ u32 reg;
+ u32 val;
+
+ D_INFO("Begin verify bsm\n");
+
+ /* verify BSM SRAM contents */
+ val = il_rd_prph(il, BSM_WR_DWCOUNT_REG);
+ for (reg = BSM_SRAM_LOWER_BOUND; reg < BSM_SRAM_LOWER_BOUND + len;
+ reg += sizeof(u32), image++) {
+ val = il_rd_prph(il, reg);
+ if (val != le32_to_cpu(*image)) {
+ IL_ERR("BSM uCode verification failed at "
+ "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n",
+ BSM_SRAM_LOWER_BOUND, reg - BSM_SRAM_LOWER_BOUND,
+ len, val, le32_to_cpu(*image));
+ return -EIO;
+ }
+ }
+
+ D_INFO("BSM bootstrap uCode image OK\n");
+
+ return 0;
+}
+
+/**
+ * il4965_load_bsm - Load bootstrap instructions
+ *
+ * BSM operation:
+ *
+ * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program
+ * in special SRAM that does not power down during RFKILL. When powering back
+ * up after power-saving sleeps (or during initial uCode load), the BSM loads
+ * the bootstrap program into the on-board processor, and starts it.
+ *
+ * The bootstrap program loads (via DMA) instructions and data for a new
+ * program from host DRAM locations indicated by the host driver in the
+ * BSM_DRAM_* registers. Once the new program is loaded, it starts
+ * automatically.
+ *
+ * When initializing the NIC, the host driver points the BSM to the
+ * "initialize" uCode image. This uCode sets up some internal data, then
+ * notifies host via "initialize alive" that it is complete.
+ *
+ * The host then replaces the BSM_DRAM_* pointer values to point to the
+ * normal runtime uCode instructions and a backup uCode data cache buffer
+ * (filled initially with starting data values for the on-board processor),
+ * then triggers the "initialize" uCode to load and launch the runtime uCode,
+ * which begins normal operation.
+ *
+ * When doing a power-save shutdown, runtime uCode saves data SRAM into
+ * the backup data cache in DRAM before SRAM is powered down.
+ *
+ * When powering back up, the BSM loads the bootstrap program. This reloads
+ * the runtime uCode instructions and the backup data cache into SRAM,
+ * and re-launches the runtime uCode from where it left off.
+ */
+static int
+il4965_load_bsm(struct il_priv *il)
+{
+ __le32 *image = il->ucode_boot.v_addr;
+ u32 len = il->ucode_boot.len;
+ dma_addr_t pinst;
+ dma_addr_t pdata;
+ u32 inst_len;
+ u32 data_len;
+ int i;
+ u32 done;
+ u32 reg_offset;
+ int ret;
+
+ D_INFO("Begin load bsm\n");
+
+ il->ucode_type = UCODE_RT;
+
+ /* make sure bootstrap program is no larger than BSM's SRAM size */
+ if (len > IL49_MAX_BSM_SIZE)
+ return -EINVAL;
+
+ /* Tell bootstrap uCode where to find the "Initialize" uCode
+ * in host DRAM ... host DRAM physical address bits 35:4 for 4965.
+ * NOTE: il_init_alive_start() will replace these values,
+ * after the "initialize" uCode has run, to point to
+ * runtime/protocol instructions and backup data cache.
+ */
+ pinst = il->ucode_init.p_addr >> 4;
+ pdata = il->ucode_init_data.p_addr >> 4;
+ inst_len = il->ucode_init.len;
+ data_len = il->ucode_init_data.len;
+
+ il_wr_prph(il, BSM_DRAM_INST_PTR_REG, pinst);
+ il_wr_prph(il, BSM_DRAM_DATA_PTR_REG, pdata);
+ il_wr_prph(il, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
+ il_wr_prph(il, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
+
+ /* Fill BSM memory with bootstrap instructions */
+ for (reg_offset = BSM_SRAM_LOWER_BOUND;
+ reg_offset < BSM_SRAM_LOWER_BOUND + len;
+ reg_offset += sizeof(u32), image++)
+ _il_wr_prph(il, reg_offset, le32_to_cpu(*image));
+
+ ret = il4965_verify_bsm(il);
+ if (ret)
+ return ret;
+
+ /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
+ il_wr_prph(il, BSM_WR_MEM_SRC_REG, 0x0);
+ il_wr_prph(il, BSM_WR_MEM_DST_REG, IL49_RTC_INST_LOWER_BOUND);
+ il_wr_prph(il, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
+
+ /* Load bootstrap code into instruction SRAM now,
+ * to prepare to load "initialize" uCode */
+ il_wr_prph(il, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START);
+
+ /* Wait for load of bootstrap uCode to finish */
+ for (i = 0; i < 100; i++) {
+ done = il_rd_prph(il, BSM_WR_CTRL_REG);
+ if (!(done & BSM_WR_CTRL_REG_BIT_START))
+ break;
+ udelay(10);
+ }
+ if (i < 100)
+ D_INFO("BSM write complete, poll %d iterations\n", i);
+ else {
+ IL_ERR("BSM write did not complete!\n");
+ return -EIO;
+ }
+
+ /* Enable future boot loads whenever power management unit triggers it
+ * (e.g. when powering back up after power-save shutdown) */
+ il_wr_prph(il, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN);
+
+ return 0;
+}
+
+/**
+ * il4965_set_ucode_ptrs - Set uCode address location
+ *
+ * Tell initialization uCode where to find runtime uCode.
+ *
+ * BSM registers initially contain pointers to initialization uCode.
+ * We need to replace them to load runtime uCode inst and data,
+ * and to save runtime data when powering down.
+ */
+static int
+il4965_set_ucode_ptrs(struct il_priv *il)
+{
+ dma_addr_t pinst;
+ dma_addr_t pdata;
+ int ret = 0;
+
+ /* bits 35:4 for 4965 */
+ pinst = il->ucode_code.p_addr >> 4;
+ pdata = il->ucode_data_backup.p_addr >> 4;
+
+ /* Tell bootstrap uCode where to find image to load */
+ il_wr_prph(il, BSM_DRAM_INST_PTR_REG, pinst);
+ il_wr_prph(il, BSM_DRAM_DATA_PTR_REG, pdata);
+ il_wr_prph(il, BSM_DRAM_DATA_BYTECOUNT_REG, il->ucode_data.len);
+
+ /* Inst byte count must be last to set up, bit 31 signals uCode
+ * that all new ptr/size info is in place */
+ il_wr_prph(il, BSM_DRAM_INST_BYTECOUNT_REG,
+ il->ucode_code.len | BSM_DRAM_INST_LOAD);
+ D_INFO("Runtime uCode pointers are set.\n");
+
+ return ret;
+}
+
+/**
+ * il4965_init_alive_start - Called after N_ALIVE notification received
+ *
+ * Called after N_ALIVE notification received from "initialize" uCode.
+ *
+ * The 4965 "initialize" ALIVE reply contains calibration data for:
+ * Voltage, temperature, and MIMO tx gain correction, now stored in il
+ * (3945 does not contain this data).
+ *
+ * Tell "initialize" uCode to go ahead and load the runtime uCode.
+*/
+static void
+il4965_init_alive_start(struct il_priv *il)
+{
+ /* Bootstrap uCode has loaded initialize uCode ... verify inst image.
+ * This is a paranoid check, because we would not have gotten the
+ * "initialize" alive if code weren't properly loaded. */
+ if (il4965_verify_ucode(il)) {
+ /* Runtime instruction load was bad;
+ * take it all the way back down so we can try again */
+ D_INFO("Bad \"initialize\" uCode load.\n");
+ goto restart;
+ }
+
+ /* Calculate temperature */
+ il->temperature = il4965_hw_get_temperature(il);
+
+ /* Send pointers to protocol/runtime uCode image ... init code will
+ * load and launch runtime uCode, which will send us another "Alive"
+ * notification. */
+ D_INFO("Initialization Alive received.\n");
+ if (il4965_set_ucode_ptrs(il)) {
+ /* Runtime instruction load won't happen;
+ * take it all the way back down so we can try again */
+ D_INFO("Couldn't set up uCode pointers.\n");
+ goto restart;
+ }
+ return;
+
+restart:
+ queue_work(il->workqueue, &il->restart);
+}
+
+static bool
+iw4965_is_ht40_channel(__le32 rxon_flags)
+{
+ int chan_mod =
+ le32_to_cpu(rxon_flags & RXON_FLG_CHANNEL_MODE_MSK) >>
+ RXON_FLG_CHANNEL_MODE_POS;
+ return (chan_mod == CHANNEL_MODE_PURE_40 ||
+ chan_mod == CHANNEL_MODE_MIXED);
+}
+
+static void
+il4965_nic_config(struct il_priv *il)
+{
+ unsigned long flags;
+ u16 radio_cfg;
+
+ spin_lock_irqsave(&il->lock, flags);
+
+ radio_cfg = il_eeprom_query16(il, EEPROM_RADIO_CONFIG);
+
+ /* write radio config values to register */
+ if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) == EEPROM_4965_RF_CFG_TYPE_MAX)
+ il_set_bit(il, CSR_HW_IF_CONFIG_REG,
+ EEPROM_RF_CFG_TYPE_MSK(radio_cfg) |
+ EEPROM_RF_CFG_STEP_MSK(radio_cfg) |
+ EEPROM_RF_CFG_DASH_MSK(radio_cfg));
+
+ /* set CSR_HW_CONFIG_REG for uCode use */
+ il_set_bit(il, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
+ CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
+
+ il->calib_info =
+ (struct il_eeprom_calib_info *)
+ il_eeprom_query_addr(il, EEPROM_4965_CALIB_TXPOWER_OFFSET);
+
+ spin_unlock_irqrestore(&il->lock, flags);
+}
+
+/* Reset differential Rx gains in NIC to prepare for chain noise calibration.
+ * Called after every association, but this runs only once!
+ * ... once chain noise is calibrated the first time, it's good forever. */
+static void
+il4965_chain_noise_reset(struct il_priv *il)
+{
+ struct il_chain_noise_data *data = &(il->chain_noise_data);
+
+ if (data->state == IL_CHAIN_NOISE_ALIVE && il_is_any_associated(il)) {
+ struct il_calib_diff_gain_cmd cmd;
+
+ /* clear data for chain noise calibration algorithm */
+ data->chain_noise_a = 0;
+ data->chain_noise_b = 0;
+ data->chain_noise_c = 0;
+ data->chain_signal_a = 0;
+ data->chain_signal_b = 0;
+ data->chain_signal_c = 0;
+ data->beacon_count = 0;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.hdr.op_code = IL_PHY_CALIBRATE_DIFF_GAIN_CMD;
+ cmd.diff_gain_a = 0;
+ cmd.diff_gain_b = 0;
+ cmd.diff_gain_c = 0;
+ if (il_send_cmd_pdu(il, C_PHY_CALIBRATION, sizeof(cmd), &cmd))
+ IL_ERR("Could not send C_PHY_CALIBRATION\n");
+ data->state = IL_CHAIN_NOISE_ACCUMULATE;
+ D_CALIB("Run chain_noise_calibrate\n");
+ }
+}
+
+static struct il_sensitivity_ranges il4965_sensitivity = {
+ .min_nrg_cck = 97,
+ .max_nrg_cck = 0, /* not used, set to 0 */
+
+ .auto_corr_min_ofdm = 85,
+ .auto_corr_min_ofdm_mrc = 170,
+ .auto_corr_min_ofdm_x1 = 105,
+ .auto_corr_min_ofdm_mrc_x1 = 220,
+
+ .auto_corr_max_ofdm = 120,
+ .auto_corr_max_ofdm_mrc = 210,
+ .auto_corr_max_ofdm_x1 = 140,
+ .auto_corr_max_ofdm_mrc_x1 = 270,
+
+ .auto_corr_min_cck = 125,
+ .auto_corr_max_cck = 200,
+ .auto_corr_min_cck_mrc = 200,
+ .auto_corr_max_cck_mrc = 400,
+
+ .nrg_th_cck = 100,
+ .nrg_th_ofdm = 100,
+
+ .barker_corr_th_min = 190,
+ .barker_corr_th_min_mrc = 390,
+ .nrg_th_cca = 62,
+};
+
+static void
+il4965_set_ct_threshold(struct il_priv *il)
+{
+ /* want Kelvin */
+ il->hw_params.ct_kill_threshold =
+ CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD_LEGACY);
+}
+
+/**
+ * il4965_hw_set_hw_params
+ *
+ * Called when initializing driver
+ */
+static int
+il4965_hw_set_hw_params(struct il_priv *il)
+{
+ if (il->cfg->mod_params->num_of_queues >= IL_MIN_NUM_QUEUES &&
+ il->cfg->mod_params->num_of_queues <= IL49_NUM_QUEUES)
+ il->cfg->base_params->num_of_queues =
+ il->cfg->mod_params->num_of_queues;
+
+ il->hw_params.max_txq_num = il->cfg->base_params->num_of_queues;
+ il->hw_params.dma_chnl_num = FH49_TCSR_CHNL_NUM;
+ il->hw_params.scd_bc_tbls_size =
+ il->cfg->base_params->num_of_queues *
+ sizeof(struct il4965_scd_bc_tbl);
+ il->hw_params.tfd_size = sizeof(struct il_tfd);
+ il->hw_params.max_stations = IL4965_STATION_COUNT;
+ il->ctx.bcast_sta_id = IL4965_BROADCAST_ID;
+ il->hw_params.max_data_size = IL49_RTC_DATA_SIZE;
+ il->hw_params.max_inst_size = IL49_RTC_INST_SIZE;
+ il->hw_params.max_bsm_size = BSM_SRAM_SIZE;
+ il->hw_params.ht40_channel = BIT(IEEE80211_BAND_5GHZ);
+
+ il->hw_params.rx_wrt_ptr_reg = FH49_RSCSR_CHNL0_WPTR;
+
+ il->hw_params.tx_chains_num = il4965_num_of_ant(il->cfg->valid_tx_ant);
+ il->hw_params.rx_chains_num = il4965_num_of_ant(il->cfg->valid_rx_ant);
+ il->hw_params.valid_tx_ant = il->cfg->valid_tx_ant;
+ il->hw_params.valid_rx_ant = il->cfg->valid_rx_ant;
+
+ il4965_set_ct_threshold(il);
+
+ il->hw_params.sens = &il4965_sensitivity;
+ il->hw_params.beacon_time_tsf_bits = IL4965_EXT_BEACON_TIME_POS;
+
+ return 0;
+}
+
+static s32
+il4965_math_div_round(s32 num, s32 denom, s32 * res)
+{
+ s32 sign = 1;
+
+ if (num < 0) {
+ sign = -sign;
+ num = -num;
+ }
+ if (denom < 0) {
+ sign = -sign;
+ denom = -denom;
+ }
+ *res = 1;
+ *res = ((num * 2 + denom) / (denom * 2)) * sign;
+
+ return 1;
+}
+
+/**
+ * il4965_get_voltage_compensation - Power supply voltage comp for txpower
+ *
+ * Determines power supply voltage compensation for txpower calculations.
+ * Returns number of 1/2-dB steps to subtract from gain table idx,
+ * to compensate for difference between power supply voltage during
+ * factory measurements, vs. current power supply voltage.
+ *
+ * Voltage indication is higher for lower voltage.
+ * Lower voltage requires more gain (lower gain table idx).
+ */
+static s32
+il4965_get_voltage_compensation(s32 eeprom_voltage, s32 current_voltage)
+{
+ s32 comp = 0;
+
+ if (TX_POWER_IL_ILLEGAL_VOLTAGE == eeprom_voltage ||
+ TX_POWER_IL_ILLEGAL_VOLTAGE == current_voltage)
+ return 0;
+
+ il4965_math_div_round(current_voltage - eeprom_voltage,
+ TX_POWER_IL_VOLTAGE_CODES_PER_03V, &comp);
+
+ if (current_voltage > eeprom_voltage)
+ comp *= 2;
+ if ((comp < -2) || (comp > 2))
+ comp = 0;
+
+ return comp;
+}
+
+static s32
+il4965_get_tx_atten_grp(u16 channel)
+{
+ if (channel >= CALIB_IL_TX_ATTEN_GR5_FCH &&
+ channel <= CALIB_IL_TX_ATTEN_GR5_LCH)
+ return CALIB_CH_GROUP_5;
+
+ if (channel >= CALIB_IL_TX_ATTEN_GR1_FCH &&
+ channel <= CALIB_IL_TX_ATTEN_GR1_LCH)
+ return CALIB_CH_GROUP_1;
+
+ if (channel >= CALIB_IL_TX_ATTEN_GR2_FCH &&
+ channel <= CALIB_IL_TX_ATTEN_GR2_LCH)
+ return CALIB_CH_GROUP_2;
+
+ if (channel >= CALIB_IL_TX_ATTEN_GR3_FCH &&
+ channel <= CALIB_IL_TX_ATTEN_GR3_LCH)
+ return CALIB_CH_GROUP_3;
+
+ if (channel >= CALIB_IL_TX_ATTEN_GR4_FCH &&
+ channel <= CALIB_IL_TX_ATTEN_GR4_LCH)
+ return CALIB_CH_GROUP_4;
+
+ return -EINVAL;
+}
+
+static u32
+il4965_get_sub_band(const struct il_priv *il, u32 channel)
+{
+ s32 b = -1;
+
+ for (b = 0; b < EEPROM_TX_POWER_BANDS; b++) {
+ if (il->calib_info->band_info[b].ch_from == 0)
+ continue;
+
+ if (channel >= il->calib_info->band_info[b].ch_from &&
+ channel <= il->calib_info->band_info[b].ch_to)
+ break;
+ }
+
+ return b;
+}
+
+static s32
+il4965_interpolate_value(s32 x, s32 x1, s32 y1, s32 x2, s32 y2)
+{
+ s32 val;
+
+ if (x2 == x1)
+ return y1;
+ else {
+ il4965_math_div_round((x2 - x) * (y1 - y2), (x2 - x1), &val);
+ return val + y2;
+ }
+}
+
+/**
+ * il4965_interpolate_chan - Interpolate factory measurements for one channel
+ *
+ * Interpolates factory measurements from the two sample channels within a
+ * sub-band, to apply to channel of interest. Interpolation is proportional to
+ * differences in channel frequencies, which is proportional to differences
+ * in channel number.
+ */
+static int
+il4965_interpolate_chan(struct il_priv *il, u32 channel,
+ struct il_eeprom_calib_ch_info *chan_info)
+{
+ s32 s = -1;
+ u32 c;
+ u32 m;
+ const struct il_eeprom_calib_measure *m1;
+ const struct il_eeprom_calib_measure *m2;
+ struct il_eeprom_calib_measure *omeas;
+ u32 ch_i1;
+ u32 ch_i2;
+
+ s = il4965_get_sub_band(il, channel);
+ if (s >= EEPROM_TX_POWER_BANDS) {
+ IL_ERR("Tx Power can not find channel %d\n", channel);
+ return -1;
+ }
+
+ ch_i1 = il->calib_info->band_info[s].ch1.ch_num;
+ ch_i2 = il->calib_info->band_info[s].ch2.ch_num;
+ chan_info->ch_num = (u8) channel;
+
+ D_TXPOWER("channel %d subband %d factory cal ch %d & %d\n", channel, s,
+ ch_i1, ch_i2);
+
+ for (c = 0; c < EEPROM_TX_POWER_TX_CHAINS; c++) {
+ for (m = 0; m < EEPROM_TX_POWER_MEASUREMENTS; m++) {
+ m1 = &(il->calib_info->band_info[s].ch1.
+ measurements[c][m]);
+ m2 = &(il->calib_info->band_info[s].ch2.
+ measurements[c][m]);
+ omeas = &(chan_info->measurements[c][m]);
+
+ omeas->actual_pow =
+ (u8) il4965_interpolate_value(channel, ch_i1,
+ m1->actual_pow, ch_i2,
+ m2->actual_pow);
+ omeas->gain_idx =
+ (u8) il4965_interpolate_value(channel, ch_i1,
+ m1->gain_idx, ch_i2,
+ m2->gain_idx);
+ omeas->temperature =
+ (u8) il4965_interpolate_value(channel, ch_i1,
+ m1->temperature,
+ ch_i2,
+ m2->temperature);
+ omeas->pa_det =
+ (s8) il4965_interpolate_value(channel, ch_i1,
+ m1->pa_det, ch_i2,
+ m2->pa_det);
+
+ D_TXPOWER("chain %d meas %d AP1=%d AP2=%d AP=%d\n", c,
+ m, m1->actual_pow, m2->actual_pow,
+ omeas->actual_pow);
+ D_TXPOWER("chain %d meas %d NI1=%d NI2=%d NI=%d\n", c,
+ m, m1->gain_idx, m2->gain_idx,
+ omeas->gain_idx);
+ D_TXPOWER("chain %d meas %d PA1=%d PA2=%d PA=%d\n", c,
+ m, m1->pa_det, m2->pa_det, omeas->pa_det);
+ D_TXPOWER("chain %d meas %d T1=%d T2=%d T=%d\n", c,
+ m, m1->temperature, m2->temperature,
+ omeas->temperature);
+ }
+ }
+
+ return 0;
+}
+
+/* bit-rate-dependent table to prevent Tx distortion, in half-dB units,
+ * for OFDM 6, 12, 18, 24, 36, 48, 54, 60 MBit, and CCK all rates. */
+static s32 back_off_table[] = {
+ 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 20 MHz */
+ 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 20 MHz */
+ 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 40 MHz */
+ 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 40 MHz */
+ 10 /* CCK */
+};
+
+/* Thermal compensation values for txpower for various frequency ranges ...
+ * ratios from 3:1 to 4.5:1 of degrees (Celsius) per half-dB gain adjust */
+static struct il4965_txpower_comp_entry {
+ s32 degrees_per_05db_a;
+ s32 degrees_per_05db_a_denom;
+} tx_power_cmp_tble[CALIB_CH_GROUP_MAX] = {
+ {
+ 9, 2}, /* group 0 5.2, ch 34-43 */
+ {
+ 4, 1}, /* group 1 5.2, ch 44-70 */
+ {
+ 4, 1}, /* group 2 5.2, ch 71-124 */
+ {
+ 4, 1}, /* group 3 5.2, ch 125-200 */
+ {
+ 3, 1} /* group 4 2.4, ch all */
+};
+
+static s32
+get_min_power_idx(s32 rate_power_idx, u32 band)
+{
+ if (!band) {
+ if ((rate_power_idx & 7) <= 4)
+ return MIN_TX_GAIN_IDX_52GHZ_EXT;
+ }
+ return MIN_TX_GAIN_IDX;
+}
+
+struct gain_entry {
+ u8 dsp;
+ u8 radio;
+};
+
+static const struct gain_entry gain_table[2][108] = {
+ /* 5.2GHz power gain idx table */
+ {
+ {123, 0x3F}, /* highest txpower */
+ {117, 0x3F},
+ {110, 0x3F},
+ {104, 0x3F},
+ {98, 0x3F},
+ {110, 0x3E},
+ {104, 0x3E},
+ {98, 0x3E},
+ {110, 0x3D},
+ {104, 0x3D},
+ {98, 0x3D},
+ {110, 0x3C},
+ {104, 0x3C},
+ {98, 0x3C},
+ {110, 0x3B},
+ {104, 0x3B},
+ {98, 0x3B},
+ {110, 0x3A},
+ {104, 0x3A},
+ {98, 0x3A},
+ {110, 0x39},
+ {104, 0x39},
+ {98, 0x39},
+ {110, 0x38},
+ {104, 0x38},
+ {98, 0x38},
+ {110, 0x37},
+ {104, 0x37},
+ {98, 0x37},
+ {110, 0x36},
+ {104, 0x36},
+ {98, 0x36},
+ {110, 0x35},
+ {104, 0x35},
+ {98, 0x35},
+ {110, 0x34},
+ {104, 0x34},
+ {98, 0x34},
+ {110, 0x33},
+ {104, 0x33},
+ {98, 0x33},
+ {110, 0x32},
+ {104, 0x32},
+ {98, 0x32},
+ {110, 0x31},
+ {104, 0x31},
+ {98, 0x31},
+ {110, 0x30},
+ {104, 0x30},
+ {98, 0x30},
+ {110, 0x25},
+ {104, 0x25},
+ {98, 0x25},
+ {110, 0x24},
+ {104, 0x24},
+ {98, 0x24},
+ {110, 0x23},
+ {104, 0x23},
+ {98, 0x23},
+ {110, 0x22},
+ {104, 0x18},
+ {98, 0x18},
+ {110, 0x17},
+ {104, 0x17},
+ {98, 0x17},
+ {110, 0x16},
+ {104, 0x16},
+ {98, 0x16},
+ {110, 0x15},
+ {104, 0x15},
+ {98, 0x15},
+ {110, 0x14},
+ {104, 0x14},
+ {98, 0x14},
+ {110, 0x13},
+ {104, 0x13},
+ {98, 0x13},
+ {110, 0x12},
+ {104, 0x08},
+ {98, 0x08},
+ {110, 0x07},
+ {104, 0x07},
+ {98, 0x07},
+ {110, 0x06},
+ {104, 0x06},
+ {98, 0x06},
+ {110, 0x05},
+ {104, 0x05},
+ {98, 0x05},
+ {110, 0x04},
+ {104, 0x04},
+ {98, 0x04},
+ {110, 0x03},
+ {104, 0x03},
+ {98, 0x03},
+ {110, 0x02},
+ {104, 0x02},
+ {98, 0x02},
+ {110, 0x01},
+ {104, 0x01},
+ {98, 0x01},
+ {110, 0x00},
+ {104, 0x00},
+ {98, 0x00},
+ {93, 0x00},
+ {88, 0x00},
+ {83, 0x00},
+ {78, 0x00},
+ },
+ /* 2.4GHz power gain idx table */
+ {
+ {110, 0x3f}, /* highest txpower */
+ {104, 0x3f},
+ {98, 0x3f},
+ {110, 0x3e},
+ {104, 0x3e},
+ {98, 0x3e},
+ {110, 0x3d},
+ {104, 0x3d},
+ {98, 0x3d},
+ {110, 0x3c},
+ {104, 0x3c},
+ {98, 0x3c},
+ {110, 0x3b},
+ {104, 0x3b},
+ {98, 0x3b},
+ {110, 0x3a},
+ {104, 0x3a},
+ {98, 0x3a},
+ {110, 0x39},
+ {104, 0x39},
+ {98, 0x39},
+ {110, 0x38},
+ {104, 0x38},
+ {98, 0x38},
+ {110, 0x37},
+ {104, 0x37},
+ {98, 0x37},
+ {110, 0x36},
+ {104, 0x36},
+ {98, 0x36},
+ {110, 0x35},
+ {104, 0x35},
+ {98, 0x35},
+ {110, 0x34},
+ {104, 0x34},
+ {98, 0x34},
+ {110, 0x33},
+ {104, 0x33},
+ {98, 0x33},
+ {110, 0x32},
+ {104, 0x32},
+ {98, 0x32},
+ {110, 0x31},
+ {104, 0x31},
+ {98, 0x31},
+ {110, 0x30},
+ {104, 0x30},
+ {98, 0x30},
+ {110, 0x6},
+ {104, 0x6},
+ {98, 0x6},
+ {110, 0x5},
+ {104, 0x5},
+ {98, 0x5},
+ {110, 0x4},
+ {104, 0x4},
+ {98, 0x4},
+ {110, 0x3},
+ {104, 0x3},
+ {98, 0x3},
+ {110, 0x2},
+ {104, 0x2},
+ {98, 0x2},
+ {110, 0x1},
+ {104, 0x1},
+ {98, 0x1},
+ {110, 0x0},
+ {104, 0x0},
+ {98, 0x0},
+ {97, 0},
+ {96, 0},
+ {95, 0},
+ {94, 0},
+ {93, 0},
+ {92, 0},
+ {91, 0},
+ {90, 0},
+ {89, 0},
+ {88, 0},
+ {87, 0},
+ {86, 0},
+ {85, 0},
+ {84, 0},
+ {83, 0},
+ {82, 0},
+ {81, 0},
+ {80, 0},
+ {79, 0},
+ {78, 0},
+ {77, 0},
+ {76, 0},
+ {75, 0},
+ {74, 0},
+ {73, 0},
+ {72, 0},
+ {71, 0},
+ {70, 0},
+ {69, 0},
+ {68, 0},
+ {67, 0},
+ {66, 0},
+ {65, 0},
+ {64, 0},
+ {63, 0},
+ {62, 0},
+ {61, 0},
+ {60, 0},
+ {59, 0},
+ }
+};
+
+static int
+il4965_fill_txpower_tbl(struct il_priv *il, u8 band, u16 channel, u8 is_ht40,
+ u8 ctrl_chan_high,
+ struct il4965_tx_power_db *tx_power_tbl)
+{
+ u8 saturation_power;
+ s32 target_power;
+ s32 user_target_power;
+ s32 power_limit;
+ s32 current_temp;
+ s32 reg_limit;
+ s32 current_regulatory;
+ s32 txatten_grp = CALIB_CH_GROUP_MAX;
+ int i;
+ int c;
+ const struct il_channel_info *ch_info = NULL;
+ struct il_eeprom_calib_ch_info ch_eeprom_info;
+ const struct il_eeprom_calib_measure *measurement;
+ s16 voltage;
+ s32 init_voltage;
+ s32 voltage_compensation;
+ s32 degrees_per_05db_num;
+ s32 degrees_per_05db_denom;
+ s32 factory_temp;
+ s32 temperature_comp[2];
+ s32 factory_gain_idx[2];
+ s32 factory_actual_pwr[2];
+ s32 power_idx;
+
+ /* tx_power_user_lmt is in dBm, convert to half-dBm (half-dB units
+ * are used for idxing into txpower table) */
+ user_target_power = 2 * il->tx_power_user_lmt;
+
+ /* Get current (RXON) channel, band, width */
+ D_TXPOWER("chan %d band %d is_ht40 %d\n", channel, band, is_ht40);
+
+ ch_info = il_get_channel_info(il, il->band, channel);
+
+ if (!il_is_channel_valid(ch_info))
+ return -EINVAL;
+
+ /* get txatten group, used to select 1) thermal txpower adjustment
+ * and 2) mimo txpower balance between Tx chains. */
+ txatten_grp = il4965_get_tx_atten_grp(channel);
+ if (txatten_grp < 0) {
+ IL_ERR("Can't find txatten group for channel %d.\n", channel);
+ return txatten_grp;
+ }
+
+ D_TXPOWER("channel %d belongs to txatten group %d\n", channel,
+ txatten_grp);
+
+ if (is_ht40) {
+ if (ctrl_chan_high)
+ channel -= 2;
+ else
+ channel += 2;
+ }
+
+ /* hardware txpower limits ...
+ * saturation (clipping distortion) txpowers are in half-dBm */
+ if (band)
+ saturation_power = il->calib_info->saturation_power24;
+ else
+ saturation_power = il->calib_info->saturation_power52;
+
+ if (saturation_power < IL_TX_POWER_SATURATION_MIN ||
+ saturation_power > IL_TX_POWER_SATURATION_MAX) {
+ if (band)
+ saturation_power = IL_TX_POWER_DEFAULT_SATURATION_24;
+ else
+ saturation_power = IL_TX_POWER_DEFAULT_SATURATION_52;
+ }
+
+ /* regulatory txpower limits ... reg_limit values are in half-dBm,
+ * max_power_avg values are in dBm, convert * 2 */
+ if (is_ht40)
+ reg_limit = ch_info->ht40_max_power_avg * 2;
+ else
+ reg_limit = ch_info->max_power_avg * 2;
+
+ if ((reg_limit < IL_TX_POWER_REGULATORY_MIN) ||
+ (reg_limit > IL_TX_POWER_REGULATORY_MAX)) {
+ if (band)
+ reg_limit = IL_TX_POWER_DEFAULT_REGULATORY_24;
+ else
+ reg_limit = IL_TX_POWER_DEFAULT_REGULATORY_52;
+ }
+
+ /* Interpolate txpower calibration values for this channel,
+ * based on factory calibration tests on spaced channels. */
+ il4965_interpolate_chan(il, channel, &ch_eeprom_info);
+
+ /* calculate tx gain adjustment based on power supply voltage */
+ voltage = le16_to_cpu(il->calib_info->voltage);
+ init_voltage = (s32) le32_to_cpu(il->card_alive_init.voltage);
+ voltage_compensation =
+ il4965_get_voltage_compensation(voltage, init_voltage);
+
+ D_TXPOWER("curr volt %d eeprom volt %d volt comp %d\n", init_voltage,
+ voltage, voltage_compensation);
+
+ /* get current temperature (Celsius) */
+ current_temp = max(il->temperature, IL_TX_POWER_TEMPERATURE_MIN);
+ current_temp = min(il->temperature, IL_TX_POWER_TEMPERATURE_MAX);
+ current_temp = KELVIN_TO_CELSIUS(current_temp);
+
+ /* select thermal txpower adjustment params, based on channel group
+ * (same frequency group used for mimo txatten adjustment) */
+ degrees_per_05db_num =
+ tx_power_cmp_tble[txatten_grp].degrees_per_05db_a;
+ degrees_per_05db_denom =
+ tx_power_cmp_tble[txatten_grp].degrees_per_05db_a_denom;
+
+ /* get per-chain txpower values from factory measurements */
+ for (c = 0; c < 2; c++) {
+ measurement = &ch_eeprom_info.measurements[c][1];
+
+ /* txgain adjustment (in half-dB steps) based on difference
+ * between factory and current temperature */
+ factory_temp = measurement->temperature;
+ il4965_math_div_round((current_temp -
+ factory_temp) * degrees_per_05db_denom,
+ degrees_per_05db_num,
+ &temperature_comp[c]);
+
+ factory_gain_idx[c] = measurement->gain_idx;
+ factory_actual_pwr[c] = measurement->actual_pow;
+
+ D_TXPOWER("chain = %d\n", c);
+ D_TXPOWER("fctry tmp %d, " "curr tmp %d, comp %d steps\n",
+ factory_temp, current_temp, temperature_comp[c]);
+
+ D_TXPOWER("fctry idx %d, fctry pwr %d\n", factory_gain_idx[c],
+ factory_actual_pwr[c]);
+ }
+
+ /* for each of 33 bit-rates (including 1 for CCK) */
+ for (i = 0; i < POWER_TBL_NUM_ENTRIES; i++) {
+ u8 is_mimo_rate;
+ union il4965_tx_power_dual_stream tx_power;
+
+ /* for mimo, reduce each chain's txpower by half
+ * (3dB, 6 steps), so total output power is regulatory
+ * compliant. */
+ if (i & 0x8) {
+ current_regulatory =
+ reg_limit -
+ IL_TX_POWER_MIMO_REGULATORY_COMPENSATION;
+ is_mimo_rate = 1;
+ } else {
+ current_regulatory = reg_limit;
+ is_mimo_rate = 0;
+ }
+
+ /* find txpower limit, either hardware or regulatory */
+ power_limit = saturation_power - back_off_table[i];
+ if (power_limit > current_regulatory)
+ power_limit = current_regulatory;
+
+ /* reduce user's txpower request if necessary
+ * for this rate on this channel */
+ target_power = user_target_power;
+ if (target_power > power_limit)
+ target_power = power_limit;
+
+ D_TXPOWER("rate %d sat %d reg %d usr %d tgt %d\n", i,
+ saturation_power - back_off_table[i],
+ current_regulatory, user_target_power, target_power);
+
+ /* for each of 2 Tx chains (radio transmitters) */
+ for (c = 0; c < 2; c++) {
+ s32 atten_value;
+
+ if (is_mimo_rate)
+ atten_value =
+ (s32) le32_to_cpu(il->card_alive_init.
+ tx_atten[txatten_grp][c]);
+ else
+ atten_value = 0;
+
+ /* calculate idx; higher idx means lower txpower */
+ power_idx =
+ (u8) (factory_gain_idx[c] -
+ (target_power - factory_actual_pwr[c]) -
+ temperature_comp[c] - voltage_compensation +
+ atten_value);
+
+/* D_TXPOWER("calculated txpower idx %d\n",
+ power_idx); */
+
+ if (power_idx < get_min_power_idx(i, band))
+ power_idx = get_min_power_idx(i, band);
+
+ /* adjust 5 GHz idx to support negative idxes */
+ if (!band)
+ power_idx += 9;
+
+ /* CCK, rate 32, reduce txpower for CCK */
+ if (i == POWER_TBL_CCK_ENTRY)
+ power_idx +=
+ IL_TX_POWER_CCK_COMPENSATION_C_STEP;
+
+ /* stay within the table! */
+ if (power_idx > 107) {
+ IL_WARN("txpower idx %d > 107\n", power_idx);
+ power_idx = 107;
+ }
+ if (power_idx < 0) {
+ IL_WARN("txpower idx %d < 0\n", power_idx);
+ power_idx = 0;
+ }
+
+ /* fill txpower command for this rate/chain */
+ tx_power.s.radio_tx_gain[c] =
+ gain_table[band][power_idx].radio;
+ tx_power.s.dsp_predis_atten[c] =
+ gain_table[band][power_idx].dsp;
+
+ D_TXPOWER("chain %d mimo %d idx %d "
+ "gain 0x%02x dsp %d\n", c, atten_value,
+ power_idx, tx_power.s.radio_tx_gain[c],
+ tx_power.s.dsp_predis_atten[c]);
+ } /* for each chain */
+
+ tx_power_tbl->power_tbl[i].dw = cpu_to_le32(tx_power.dw);
+
+ } /* for each rate */
+
+ return 0;
+}
+
+/**
+ * il4965_send_tx_power - Configure the TXPOWER level user limit
+ *
+ * Uses the active RXON for channel, band, and characteristics (ht40, high)
+ * The power limit is taken from il->tx_power_user_lmt.
+ */
+static int
+il4965_send_tx_power(struct il_priv *il)
+{
+ struct il4965_txpowertable_cmd cmd = { 0 };
+ int ret;
+ u8 band = 0;
+ bool is_ht40 = false;
+ u8 ctrl_chan_high = 0;
+ struct il_rxon_context *ctx = &il->ctx;
+
+ if (WARN_ONCE
+ (test_bit(S_SCAN_HW, &il->status),
+ "TX Power requested while scanning!\n"))
+ return -EAGAIN;
+
+ band = il->band == IEEE80211_BAND_2GHZ;
+
+ is_ht40 = iw4965_is_ht40_channel(ctx->active.flags);
+
+ if (is_ht40 && (ctx->active.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
+ ctrl_chan_high = 1;
+
+ cmd.band = band;
+ cmd.channel = ctx->active.channel;
+
+ ret =
+ il4965_fill_txpower_tbl(il, band, le16_to_cpu(ctx->active.channel),
+ is_ht40, ctrl_chan_high, &cmd.tx_power);
+ if (ret)
+ goto out;
+
+ ret = il_send_cmd_pdu(il, C_TX_PWR_TBL, sizeof(cmd), &cmd);
+
+out:
+ return ret;
+}
+
+static int
+il4965_send_rxon_assoc(struct il_priv *il, struct il_rxon_context *ctx)
+{
+ int ret = 0;
+ struct il4965_rxon_assoc_cmd rxon_assoc;
+ const struct il_rxon_cmd *rxon1 = &ctx->staging;
+ const struct il_rxon_cmd *rxon2 = &ctx->active;
+
+ if (rxon1->flags == rxon2->flags &&
+ rxon1->filter_flags == rxon2->filter_flags &&
+ rxon1->cck_basic_rates == rxon2->cck_basic_rates &&
+ rxon1->ofdm_ht_single_stream_basic_rates ==
+ rxon2->ofdm_ht_single_stream_basic_rates &&
+ rxon1->ofdm_ht_dual_stream_basic_rates ==
+ rxon2->ofdm_ht_dual_stream_basic_rates &&
+ rxon1->rx_chain == rxon2->rx_chain &&
+ rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates) {
+ D_INFO("Using current RXON_ASSOC. Not resending.\n");
+ return 0;
+ }
+
+ rxon_assoc.flags = ctx->staging.flags;
+ rxon_assoc.filter_flags = ctx->staging.filter_flags;
+ rxon_assoc.ofdm_basic_rates = ctx->staging.ofdm_basic_rates;
+ rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates;
+ rxon_assoc.reserved = 0;
+ rxon_assoc.ofdm_ht_single_stream_basic_rates =
+ ctx->staging.ofdm_ht_single_stream_basic_rates;
+ rxon_assoc.ofdm_ht_dual_stream_basic_rates =
+ ctx->staging.ofdm_ht_dual_stream_basic_rates;
+ rxon_assoc.rx_chain_select_flags = ctx->staging.rx_chain;
+
+ ret =
+ il_send_cmd_pdu_async(il, C_RXON_ASSOC, sizeof(rxon_assoc),
+ &rxon_assoc, NULL);
+
+ return ret;
+}
+
+static int
+il4965_commit_rxon(struct il_priv *il, struct il_rxon_context *ctx)
+{
+ /* cast away the const for active_rxon in this function */
+ struct il_rxon_cmd *active_rxon = (void *)&ctx->active;
+ int ret;
+ bool new_assoc = !!(ctx->staging.filter_flags & RXON_FILTER_ASSOC_MSK);
+
+ if (!il_is_alive(il))
+ return -EBUSY;
+
+ if (!ctx->is_active)
+ return 0;
+
+ /* always get timestamp with Rx frame */
+ ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK;
+
+ ret = il_check_rxon_cmd(il, ctx);
+ if (ret) {
+ IL_ERR("Invalid RXON configuration. Not committing.\n");
+ return -EINVAL;
+ }
+
+ /*
+ * receive commit_rxon request
+ * abort any previous channel switch if still in process
+ */
+ if (test_bit(S_CHANNEL_SWITCH_PENDING, &il->status) &&
+ il->switch_channel != ctx->staging.channel) {
+ D_11H("abort channel switch on %d\n",
+ le16_to_cpu(il->switch_channel));
+ il_chswitch_done(il, false);
+ }
+
+ /* If we don't need to send a full RXON, we can use
+ * il_rxon_assoc_cmd which is used to reconfigure filter
+ * and other flags for the current radio configuration. */
+ if (!il_full_rxon_required(il, ctx)) {
+ ret = il_send_rxon_assoc(il, ctx);
+ if (ret) {
+ IL_ERR("Error setting RXON_ASSOC (%d)\n", ret);
+ return ret;
+ }
+
+ memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
+ il_print_rx_config_cmd(il, ctx);
+ /*
+ * We do not commit tx power settings while channel changing,
+ * do it now if tx power changed.
+ */
+ il_set_tx_power(il, il->tx_power_next, false);
+ return 0;
+ }
+
+ /* If we are currently associated and the new config requires
+ * an RXON_ASSOC and the new config wants the associated mask enabled,
+ * we must clear the associated from the active configuration
+ * before we apply the new config */
+ if (il_is_associated_ctx(ctx) && new_assoc) {
+ D_INFO("Toggling associated bit on current RXON\n");
+ active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+
+ ret =
+ il_send_cmd_pdu(il, ctx->rxon_cmd,
+ sizeof(struct il_rxon_cmd), active_rxon);
+
+ /* If the mask clearing failed then we set
+ * active_rxon back to what it was previously */
+ if (ret) {
+ active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK;
+ IL_ERR("Error clearing ASSOC_MSK (%d)\n", ret);
+ return ret;
+ }
+ il_clear_ucode_stations(il, ctx);
+ il_restore_stations(il, ctx);
+ ret = il4965_restore_default_wep_keys(il, ctx);
+ if (ret) {
+ IL_ERR("Failed to restore WEP keys (%d)\n", ret);
+ return ret;
+ }
+ }
+
+ D_INFO("Sending RXON\n" "* with%s RXON_FILTER_ASSOC_MSK\n"
+ "* channel = %d\n" "* bssid = %pM\n", (new_assoc ? "" : "out"),
+ le16_to_cpu(ctx->staging.channel), ctx->staging.bssid_addr);
+
+ il_set_rxon_hwcrypto(il, ctx, !il->cfg->mod_params->sw_crypto);
+
+ /* Apply the new configuration
+ * RXON unassoc clears the station table in uCode so restoration of
+ * stations is needed after it (the RXON command) completes
+ */
+ if (!new_assoc) {
+ ret =
+ il_send_cmd_pdu(il, ctx->rxon_cmd,
+ sizeof(struct il_rxon_cmd), &ctx->staging);
+ if (ret) {
+ IL_ERR("Error setting new RXON (%d)\n", ret);
+ return ret;
+ }
+ D_INFO("Return from !new_assoc RXON.\n");
+ memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
+ il_clear_ucode_stations(il, ctx);
+ il_restore_stations(il, ctx);
+ ret = il4965_restore_default_wep_keys(il, ctx);
+ if (ret) {
+ IL_ERR("Failed to restore WEP keys (%d)\n", ret);
+ return ret;
+ }
+ }
+ if (new_assoc) {
+ il->start_calib = 0;
+ /* Apply the new configuration
+ * RXON assoc doesn't clear the station table in uCode,
+ */
+ ret =
+ il_send_cmd_pdu(il, ctx->rxon_cmd,
+ sizeof(struct il_rxon_cmd), &ctx->staging);
+ if (ret) {
+ IL_ERR("Error setting new RXON (%d)\n", ret);
+ return ret;
+ }
+ memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
+ }
+ il_print_rx_config_cmd(il, ctx);
+
+ il4965_init_sensitivity(il);
+
+ /* If we issue a new RXON command which required a tune then we must
+ * send a new TXPOWER command or we won't be able to Tx any frames */
+ ret = il_set_tx_power(il, il->tx_power_next, true);
+ if (ret) {
+ IL_ERR("Error sending TX power (%d)\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+il4965_hw_channel_switch(struct il_priv *il,
+ struct ieee80211_channel_switch *ch_switch)
+{
+ struct il_rxon_context *ctx = &il->ctx;
+ int rc;
+ u8 band = 0;
+ bool is_ht40 = false;
+ u8 ctrl_chan_high = 0;
+ struct il4965_channel_switch_cmd cmd;
+ const struct il_channel_info *ch_info;
+ u32 switch_time_in_usec, ucode_switch_time;
+ u16 ch;
+ u32 tsf_low;
+ u8 switch_count;
+ u16 beacon_interval = le16_to_cpu(ctx->timing.beacon_interval);
+ struct ieee80211_vif *vif = ctx->vif;
+ band = il->band == IEEE80211_BAND_2GHZ;
+
+ is_ht40 = iw4965_is_ht40_channel(ctx->staging.flags);
+
+ if (is_ht40 && (ctx->staging.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
+ ctrl_chan_high = 1;
+
+ cmd.band = band;
+ cmd.expect_beacon = 0;
+ ch = ch_switch->channel->hw_value;
+ cmd.channel = cpu_to_le16(ch);
+ cmd.rxon_flags = ctx->staging.flags;
+ cmd.rxon_filter_flags = ctx->staging.filter_flags;
+ switch_count = ch_switch->count;
+ tsf_low = ch_switch->timestamp & 0x0ffffffff;
+ /*
+ * calculate the ucode channel switch time
+ * adding TSF as one of the factor for when to switch
+ */
+ if (il->ucode_beacon_time > tsf_low && beacon_interval) {
+ if (switch_count >
+ ((il->ucode_beacon_time - tsf_low) / beacon_interval)) {
+ switch_count -=
+ (il->ucode_beacon_time - tsf_low) / beacon_interval;
+ } else
+ switch_count = 0;
+ }
+ if (switch_count <= 1)
+ cmd.switch_time = cpu_to_le32(il->ucode_beacon_time);
+ else {
+ switch_time_in_usec =
+ vif->bss_conf.beacon_int * switch_count * TIME_UNIT;
+ ucode_switch_time =
+ il_usecs_to_beacons(il, switch_time_in_usec,
+ beacon_interval);
+ cmd.switch_time =
+ il_add_beacon_time(il, il->ucode_beacon_time,
+ ucode_switch_time, beacon_interval);
+ }
+ D_11H("uCode time for the switch is 0x%x\n", cmd.switch_time);
+ ch_info = il_get_channel_info(il, il->band, ch);
+ if (ch_info)
+ cmd.expect_beacon = il_is_channel_radar(ch_info);
+ else {
+ IL_ERR("invalid channel switch from %u to %u\n",
+ ctx->active.channel, ch);
+ return -EFAULT;
+ }
+
+ rc = il4965_fill_txpower_tbl(il, band, ch, is_ht40, ctrl_chan_high,
+ &cmd.tx_power);
+ if (rc) {
+ D_11H("error:%d fill txpower_tbl\n", rc);
+ return rc;
+ }
+
+ return il_send_cmd_pdu(il, C_CHANNEL_SWITCH, sizeof(cmd), &cmd);
+}
+
+/**
+ * il4965_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
+ */
+static void
+il4965_txq_update_byte_cnt_tbl(struct il_priv *il, struct il_tx_queue *txq,
+ u16 byte_cnt)
+{
+ struct il4965_scd_bc_tbl *scd_bc_tbl = il->scd_bc_tbls.addr;
+ int txq_id = txq->q.id;
+ int write_ptr = txq->q.write_ptr;
+ int len = byte_cnt + IL_TX_CRC_SIZE + IL_TX_DELIMITER_SIZE;
+ __le16 bc_ent;
+
+ WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);
+
+ bc_ent = cpu_to_le16(len & 0xFFF);
+ /* Set up byte count within first 256 entries */
+ scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
+
+ /* If within first 64 entries, duplicate at end */
+ if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
+ scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] =
+ bc_ent;
+}
+
+/**
+ * il4965_hw_get_temperature - return the calibrated temperature (in Kelvin)
+ * @stats: Provides the temperature reading from the uCode
+ *
+ * A return of <0 indicates bogus data in the stats
+ */
+static int
+il4965_hw_get_temperature(struct il_priv *il)
+{
+ s32 temperature;
+ s32 vt;
+ s32 R1, R2, R3;
+ u32 R4;
+
+ if (test_bit(S_TEMPERATURE, &il->status) &&
+ (il->_4965.stats.flag & STATS_REPLY_FLG_HT40_MODE_MSK)) {
+ D_TEMP("Running HT40 temperature calibration\n");
+ R1 = (s32) le32_to_cpu(il->card_alive_init.therm_r1[1]);
+ R2 = (s32) le32_to_cpu(il->card_alive_init.therm_r2[1]);
+ R3 = (s32) le32_to_cpu(il->card_alive_init.therm_r3[1]);
+ R4 = le32_to_cpu(il->card_alive_init.therm_r4[1]);
+ } else {
+ D_TEMP("Running temperature calibration\n");
+ R1 = (s32) le32_to_cpu(il->card_alive_init.therm_r1[0]);
+ R2 = (s32) le32_to_cpu(il->card_alive_init.therm_r2[0]);
+ R3 = (s32) le32_to_cpu(il->card_alive_init.therm_r3[0]);
+ R4 = le32_to_cpu(il->card_alive_init.therm_r4[0]);
+ }
+
+ /*
+ * Temperature is only 23 bits, so sign extend out to 32.
+ *
+ * NOTE If we haven't received a stats notification yet
+ * with an updated temperature, use R4 provided to us in the
+ * "initialize" ALIVE response.
+ */
+ if (!test_bit(S_TEMPERATURE, &il->status))
+ vt = sign_extend32(R4, 23);
+ else
+ vt = sign_extend32(le32_to_cpu
+ (il->_4965.stats.general.common.temperature),
+ 23);
+
+ D_TEMP("Calib values R[1-3]: %d %d %d R4: %d\n", R1, R2, R3, vt);
+
+ if (R3 == R1) {
+ IL_ERR("Calibration conflict R1 == R3\n");
+ return -1;
+ }
+
+ /* Calculate temperature in degrees Kelvin, adjust by 97%.
+ * Add offset to center the adjustment around 0 degrees Centigrade. */
+ temperature = TEMPERATURE_CALIB_A_VAL * (vt - R2);
+ temperature /= (R3 - R1);
+ temperature =
+ (temperature * 97) / 100 + TEMPERATURE_CALIB_KELVIN_OFFSET;
+
+ D_TEMP("Calibrated temperature: %dK, %dC\n", temperature,
+ KELVIN_TO_CELSIUS(temperature));
+
+ return temperature;
+}
+
+/* Adjust Txpower only if temperature variance is greater than threshold. */
+#define IL_TEMPERATURE_THRESHOLD 3
+
+/**
+ * il4965_is_temp_calib_needed - determines if new calibration is needed
+ *
+ * If the temperature changed has changed sufficiently, then a recalibration
+ * is needed.
+ *
+ * Assumes caller will replace il->last_temperature once calibration
+ * executed.
+ */
+static int
+il4965_is_temp_calib_needed(struct il_priv *il)
+{
+ int temp_diff;
+
+ if (!test_bit(S_STATS, &il->status)) {
+ D_TEMP("Temperature not updated -- no stats.\n");
+ return 0;
+ }
+
+ temp_diff = il->temperature - il->last_temperature;
+
+ /* get absolute value */
+ if (temp_diff < 0) {
+ D_POWER("Getting cooler, delta %d\n", temp_diff);
+ temp_diff = -temp_diff;
+ } else if (temp_diff == 0)
+ D_POWER("Temperature unchanged\n");
+ else
+ D_POWER("Getting warmer, delta %d\n", temp_diff);
+
+ if (temp_diff < IL_TEMPERATURE_THRESHOLD) {
+ D_POWER(" => thermal txpower calib not needed\n");
+ return 0;
+ }
+
+ D_POWER(" => thermal txpower calib needed\n");
+
+ return 1;
+}
+
+static void
+il4965_temperature_calib(struct il_priv *il)
+{
+ s32 temp;
+
+ temp = il4965_hw_get_temperature(il);
+ if (IL_TX_POWER_TEMPERATURE_OUT_OF_RANGE(temp))
+ return;
+
+ if (il->temperature != temp) {
+ if (il->temperature)
+ D_TEMP("Temperature changed " "from %dC to %dC\n",
+ KELVIN_TO_CELSIUS(il->temperature),
+ KELVIN_TO_CELSIUS(temp));
+ else
+ D_TEMP("Temperature " "initialized to %dC\n",
+ KELVIN_TO_CELSIUS(temp));
+ }
+
+ il->temperature = temp;
+ set_bit(S_TEMPERATURE, &il->status);
+
+ if (!il->disable_tx_power_cal &&
+ unlikely(!test_bit(S_SCANNING, &il->status)) &&
+ il4965_is_temp_calib_needed(il))
+ queue_work(il->workqueue, &il->txpower_work);
+}
+
+static u16
+il4965_get_hcmd_size(u8 cmd_id, u16 len)
+{
+ switch (cmd_id) {
+ case C_RXON:
+ return (u16) sizeof(struct il4965_rxon_cmd);
+ default:
+ return len;
+ }
+}
+
+static u16
+il4965_build_addsta_hcmd(const struct il_addsta_cmd *cmd, u8 * data)
+{
+ struct il4965_addsta_cmd *addsta = (struct il4965_addsta_cmd *)data;
+ addsta->mode = cmd->mode;
+ memcpy(&addsta->sta, &cmd->sta, sizeof(struct sta_id_modify));
+ memcpy(&addsta->key, &cmd->key, sizeof(struct il4965_keyinfo));
+ addsta->station_flags = cmd->station_flags;
+ addsta->station_flags_msk = cmd->station_flags_msk;
+ addsta->tid_disable_tx = cmd->tid_disable_tx;
+ addsta->add_immediate_ba_tid = cmd->add_immediate_ba_tid;
+ addsta->remove_immediate_ba_tid = cmd->remove_immediate_ba_tid;
+ addsta->add_immediate_ba_ssn = cmd->add_immediate_ba_ssn;
+ addsta->sleep_tx_count = cmd->sleep_tx_count;
+ addsta->reserved1 = cpu_to_le16(0);
+ addsta->reserved2 = cpu_to_le16(0);
+
+ return (u16) sizeof(struct il4965_addsta_cmd);
+}
+
+static inline u32
+il4965_get_scd_ssn(struct il4965_tx_resp *tx_resp)
+{
+ return le32_to_cpup(&tx_resp->u.status + tx_resp->frame_count) & MAX_SN;
+}
+
+static inline u32
+il4965_tx_status_to_mac80211(u32 status)
+{
+ status &= TX_STATUS_MSK;
+
+ switch (status) {
+ case TX_STATUS_SUCCESS:
+ case TX_STATUS_DIRECT_DONE:
+ return IEEE80211_TX_STAT_ACK;
+ case TX_STATUS_FAIL_DEST_PS:
+ return IEEE80211_TX_STAT_TX_FILTERED;
+ default:
+ return 0;
+ }
+}
+
+static inline bool
+il4965_is_tx_success(u32 status)
+{
+ status &= TX_STATUS_MSK;
+ return (status == TX_STATUS_SUCCESS || status == TX_STATUS_DIRECT_DONE);
+}
+
+/**
+ * il4965_tx_status_reply_tx - Handle Tx response for frames in aggregation queue
+ */
+static int
+il4965_tx_status_reply_tx(struct il_priv *il, struct il_ht_agg *agg,
+ struct il4965_tx_resp *tx_resp, int txq_id,
+ u16 start_idx)
+{
+ u16 status;
+ struct agg_tx_status *frame_status = tx_resp->u.agg_status;
+ struct ieee80211_tx_info *info = NULL;
+ struct ieee80211_hdr *hdr = NULL;
+ u32 rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
+ int i, sh, idx;
+ u16 seq;
+ if (agg->wait_for_ba)
+ D_TX_REPLY("got tx response w/o block-ack\n");
+
+ agg->frame_count = tx_resp->frame_count;
+ agg->start_idx = start_idx;
+ agg->rate_n_flags = rate_n_flags;
+ agg->bitmap = 0;
+
+ /* num frames attempted by Tx command */
+ if (agg->frame_count == 1) {
+ /* Only one frame was attempted; no block-ack will arrive */
+ status = le16_to_cpu(frame_status[0].status);
+ idx = start_idx;
+
+ D_TX_REPLY("FrameCnt = %d, StartIdx=%d idx=%d\n",
+ agg->frame_count, agg->start_idx, idx);
+
+ info = IEEE80211_SKB_CB(il->txq[txq_id].txb[idx].skb);
+ info->status.rates[0].count = tx_resp->failure_frame + 1;
+ info->flags &= ~IEEE80211_TX_CTL_AMPDU;
+ info->flags |= il4965_tx_status_to_mac80211(status);
+ il4965_hwrate_to_tx_control(il, rate_n_flags, info);
+
+ D_TX_REPLY("1 Frame 0x%x failure :%d\n", status & 0xff,
+ tx_resp->failure_frame);
+ D_TX_REPLY("Rate Info rate_n_flags=%x\n", rate_n_flags);
+
+ agg->wait_for_ba = 0;
+ } else {
+ /* Two or more frames were attempted; expect block-ack */
+ u64 bitmap = 0;
+ int start = agg->start_idx;
+
+ /* Construct bit-map of pending frames within Tx win */
+ for (i = 0; i < agg->frame_count; i++) {
+ u16 sc;
+ status = le16_to_cpu(frame_status[i].status);
+ seq = le16_to_cpu(frame_status[i].sequence);
+ idx = SEQ_TO_IDX(seq);
+ txq_id = SEQ_TO_QUEUE(seq);
+
+ if (status &
+ (AGG_TX_STATE_FEW_BYTES_MSK |
+ AGG_TX_STATE_ABORT_MSK))
+ continue;
+
+ D_TX_REPLY("FrameCnt = %d, txq_id=%d idx=%d\n",
+ agg->frame_count, txq_id, idx);
+
+ hdr = il_tx_queue_get_hdr(il, txq_id, idx);
+ if (!hdr) {
+ IL_ERR("BUG_ON idx doesn't point to valid skb"
+ " idx=%d, txq_id=%d\n", idx, txq_id);
+ return -1;
+ }
+
+ sc = le16_to_cpu(hdr->seq_ctrl);
+ if (idx != (SEQ_TO_SN(sc) & 0xff)) {
+ IL_ERR("BUG_ON idx doesn't match seq control"
+ " idx=%d, seq_idx=%d, seq=%d\n", idx,
+ SEQ_TO_SN(sc), hdr->seq_ctrl);
+ return -1;
+ }
+
+ D_TX_REPLY("AGG Frame i=%d idx %d seq=%d\n", i, idx,
+ SEQ_TO_SN(sc));
+
+ sh = idx - start;
+ if (sh > 64) {
+ sh = (start - idx) + 0xff;
+ bitmap = bitmap << sh;
+ sh = 0;
+ start = idx;
+ } else if (sh < -64)
+ sh = 0xff - (start - idx);
+ else if (sh < 0) {
+ sh = start - idx;
+ start = idx;
+ bitmap = bitmap << sh;
+ sh = 0;
+ }
+ bitmap |= 1ULL << sh;
+ D_TX_REPLY("start=%d bitmap=0x%llx\n", start,
+ (unsigned long long)bitmap);
+ }
+
+ agg->bitmap = bitmap;
+ agg->start_idx = start;
+ D_TX_REPLY("Frames %d start_idx=%d bitmap=0x%llx\n",
+ agg->frame_count, agg->start_idx,
+ (unsigned long long)agg->bitmap);
+
+ if (bitmap)
+ agg->wait_for_ba = 1;
+ }
+ return 0;
+}
+
+static u8
+il4965_find_station(struct il_priv *il, const u8 * addr)
+{
+ int i;
+ int start = 0;
+ int ret = IL_INVALID_STATION;
+ unsigned long flags;
+
+ if ((il->iw_mode == NL80211_IFTYPE_ADHOC))
+ start = IL_STA_ID;
+
+ if (is_broadcast_ether_addr(addr))
+ return il->ctx.bcast_sta_id;
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+ for (i = start; i < il->hw_params.max_stations; i++)
+ if (il->stations[i].used &&
+ (!compare_ether_addr(il->stations[i].sta.sta.addr, addr))) {
+ ret = i;
+ goto out;
+ }
+
+ D_ASSOC("can not find STA %pM total %d\n", addr, il->num_stations);
+
+out:
+ /*
+ * It may be possible that more commands interacting with stations
+ * arrive before we completed processing the adding of
+ * station
+ */
+ if (ret != IL_INVALID_STATION &&
+ (!(il->stations[ret].used & IL_STA_UCODE_ACTIVE) ||
+ ((il->stations[ret].used & IL_STA_UCODE_ACTIVE) &&
+ (il->stations[ret].used & IL_STA_UCODE_INPROGRESS)))) {
+ IL_ERR("Requested station info for sta %d before ready.\n",
+ ret);
+ ret = IL_INVALID_STATION;
+ }
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+ return ret;
+}
+
+static int
+il4965_get_ra_sta_id(struct il_priv *il, struct ieee80211_hdr *hdr)
+{
+ if (il->iw_mode == NL80211_IFTYPE_STATION) {
+ return IL_AP_ID;
+ } else {
+ u8 *da = ieee80211_get_DA(hdr);
+ return il4965_find_station(il, da);
+ }
+}
+
+/**
+ * il4965_hdl_tx - Handle standard (non-aggregation) Tx response
+ */
+static void
+il4965_hdl_tx(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ u16 sequence = le16_to_cpu(pkt->hdr.sequence);
+ int txq_id = SEQ_TO_QUEUE(sequence);
+ int idx = SEQ_TO_IDX(sequence);
+ struct il_tx_queue *txq = &il->txq[txq_id];
+ struct ieee80211_hdr *hdr;
+ struct ieee80211_tx_info *info;
+ struct il4965_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
+ u32 status = le32_to_cpu(tx_resp->u.status);
+ int uninitialized_var(tid);
+ int sta_id;
+ int freed;
+ u8 *qc = NULL;
+ unsigned long flags;
+
+ if (idx >= txq->q.n_bd || il_queue_used(&txq->q, idx) == 0) {
+ IL_ERR("Read idx for DMA queue txq_id (%d) idx %d "
+ "is out of range [0-%d] %d %d\n", txq_id, idx,
+ txq->q.n_bd, txq->q.write_ptr, txq->q.read_ptr);
+ return;
+ }
+
+ txq->time_stamp = jiffies;
+ info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb);
+ memset(&info->status, 0, sizeof(info->status));
+
+ hdr = il_tx_queue_get_hdr(il, txq_id, idx);
+ if (ieee80211_is_data_qos(hdr->frame_control)) {
+ qc = ieee80211_get_qos_ctl(hdr);
+ tid = qc[0] & 0xf;
+ }
+
+ sta_id = il4965_get_ra_sta_id(il, hdr);
+ if (txq->sched_retry && unlikely(sta_id == IL_INVALID_STATION)) {
+ IL_ERR("Station not known\n");
+ return;
+ }
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+ if (txq->sched_retry) {
+ const u32 scd_ssn = il4965_get_scd_ssn(tx_resp);
+ struct il_ht_agg *agg = NULL;
+ WARN_ON(!qc);
+
+ agg = &il->stations[sta_id].tid[tid].agg;
+
+ il4965_tx_status_reply_tx(il, agg, tx_resp, txq_id, idx);
+
+ /* check if BAR is needed */
+ if ((tx_resp->frame_count == 1) &&
+ !il4965_is_tx_success(status))
+ info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
+
+ if (txq->q.read_ptr != (scd_ssn & 0xff)) {
+ idx = il_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd);
+ D_TX_REPLY("Retry scheduler reclaim scd_ssn "
+ "%d idx %d\n", scd_ssn, idx);
+ freed = il4965_tx_queue_reclaim(il, txq_id, idx);
+ if (qc)
+ il4965_free_tfds_in_queue(il, sta_id, tid,
+ freed);
+
+ if (il->mac80211_registered &&
+ il_queue_space(&txq->q) > txq->q.low_mark &&
+ agg->state != IL_EMPTYING_HW_QUEUE_DELBA)
+ il_wake_queue(il, txq);
+ }
+ } else {
+ info->status.rates[0].count = tx_resp->failure_frame + 1;
+ info->flags |= il4965_tx_status_to_mac80211(status);
+ il4965_hwrate_to_tx_control(il,
+ le32_to_cpu(tx_resp->rate_n_flags),
+ info);
+
+ D_TX_REPLY("TXQ %d status %s (0x%08x) "
+ "rate_n_flags 0x%x retries %d\n", txq_id,
+ il4965_get_tx_fail_reason(status), status,
+ le32_to_cpu(tx_resp->rate_n_flags),
+ tx_resp->failure_frame);
+
+ freed = il4965_tx_queue_reclaim(il, txq_id, idx);
+ if (qc && likely(sta_id != IL_INVALID_STATION))
+ il4965_free_tfds_in_queue(il, sta_id, tid, freed);
+ else if (sta_id == IL_INVALID_STATION)
+ D_TX_REPLY("Station not known\n");
+
+ if (il->mac80211_registered &&
+ il_queue_space(&txq->q) > txq->q.low_mark)
+ il_wake_queue(il, txq);
+ }
+ if (qc && likely(sta_id != IL_INVALID_STATION))
+ il4965_txq_check_empty(il, sta_id, tid, txq_id);
+
+ il4965_check_abort_status(il, tx_resp->frame_count, status);
+
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+}
+
+static void
+il4965_hdl_beacon(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ struct il4965_beacon_notif *beacon = (void *)pkt->u.raw;
+ u8 rate __maybe_unused =
+ il4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
+
+ D_RX("beacon status %#x, retries:%d ibssmgr:%d "
+ "tsf:0x%.8x%.8x rate:%d\n",
+ le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK,
+ beacon->beacon_notify_hdr.failure_frame,
+ le32_to_cpu(beacon->ibss_mgr_status),
+ le32_to_cpu(beacon->high_tsf), le32_to_cpu(beacon->low_tsf), rate);
+
+ il->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
+}
+
+/* Set up 4965-specific Rx frame reply handlers */
+static void
+il4965_handler_setup(struct il_priv *il)
+{
+ /* Legacy Rx frames */
+ il->handlers[N_RX] = il4965_hdl_rx;
+ /* Tx response */
+ il->handlers[C_TX] = il4965_hdl_tx;
+ il->handlers[N_BEACON] = il4965_hdl_beacon;
+}
+
+static struct il_hcmd_ops il4965_hcmd = {
+ .rxon_assoc = il4965_send_rxon_assoc,
+ .commit_rxon = il4965_commit_rxon,
+ .set_rxon_chain = il4965_set_rxon_chain,
+};
+
+static void
+il4965_post_scan(struct il_priv *il)
+{
+ struct il_rxon_context *ctx = &il->ctx;
+
+ /*
+ * Since setting the RXON may have been deferred while
+ * performing the scan, fire one off if needed
+ */
+ if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
+ il_commit_rxon(il, ctx);
+}
+
+static void
+il4965_post_associate(struct il_priv *il)
+{
+ struct il_rxon_context *ctx = &il->ctx;
+ struct ieee80211_vif *vif = ctx->vif;
+ struct ieee80211_conf *conf = NULL;
+ int ret = 0;
+
+ if (!vif || !il->is_open)
+ return;
+
+ if (test_bit(S_EXIT_PENDING, &il->status))
+ return;
+
+ il_scan_cancel_timeout(il, 200);
+
+ conf = &il->hw->conf;
+
+ ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+ il_commit_rxon(il, ctx);
+
+ ret = il_send_rxon_timing(il, ctx);
+ if (ret)
+ IL_WARN("RXON timing - " "Attempting to continue.\n");
+
+ ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
+
+ il_set_rxon_ht(il, &il->current_ht_config);
+
+ if (il->cfg->ops->hcmd->set_rxon_chain)
+ il->cfg->ops->hcmd->set_rxon_chain(il, ctx);
+
+ ctx->staging.assoc_id = cpu_to_le16(vif->bss_conf.aid);
+
+ D_ASSOC("assoc id %d beacon interval %d\n", vif->bss_conf.aid,
+ vif->bss_conf.beacon_int);
+
+ if (vif->bss_conf.use_short_preamble)
+ ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
+ else
+ ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
+
+ if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
+ if (vif->bss_conf.use_short_slot)
+ ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
+ else
+ ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
+ }
+
+ il_commit_rxon(il, ctx);
+
+ D_ASSOC("Associated as %d to: %pM\n", vif->bss_conf.aid,
+ ctx->active.bssid_addr);
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ il4965_send_beacon_cmd(il);
+ break;
+ default:
+ IL_ERR("%s Should not be called in %d mode\n", __func__,
+ vif->type);
+ break;
+ }
+
+ /* the chain noise calibration will enabled PM upon completion
+ * If chain noise has already been run, then we need to enable
+ * power management here */
+ if (il->chain_noise_data.state == IL_CHAIN_NOISE_DONE)
+ il_power_update_mode(il, false);
+
+ /* Enable Rx differential gain and sensitivity calibrations */
+ il4965_chain_noise_reset(il);
+ il->start_calib = 1;
+}
+
+static void
+il4965_config_ap(struct il_priv *il)
+{
+ struct il_rxon_context *ctx = &il->ctx;
+ struct ieee80211_vif *vif = ctx->vif;
+ int ret = 0;
+
+ lockdep_assert_held(&il->mutex);
+
+ if (test_bit(S_EXIT_PENDING, &il->status))
+ return;
+
+ /* The following should be done only at AP bring up */
+ if (!il_is_associated_ctx(ctx)) {
+
+ /* RXON - unassoc (to set timing command) */
+ ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+ il_commit_rxon(il, ctx);
+
+ /* RXON Timing */
+ ret = il_send_rxon_timing(il, ctx);
+ if (ret)
+ IL_WARN("RXON timing failed - "
+ "Attempting to continue.\n");
+
+ /* AP has all antennas */
+ il->chain_noise_data.active_chains = il->hw_params.valid_rx_ant;
+ il_set_rxon_ht(il, &il->current_ht_config);
+ if (il->cfg->ops->hcmd->set_rxon_chain)
+ il->cfg->ops->hcmd->set_rxon_chain(il, ctx);
+
+ ctx->staging.assoc_id = 0;
+
+ if (vif->bss_conf.use_short_preamble)
+ ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
+ else
+ ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
+
+ if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
+ if (vif->bss_conf.use_short_slot)
+ ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
+ else
+ ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
+ }
+ /* need to send beacon cmd before committing assoc RXON! */
+ il4965_send_beacon_cmd(il);
+ /* restore RXON assoc */
+ ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
+ il_commit_rxon(il, ctx);
+ }
+ il4965_send_beacon_cmd(il);
+}
+
+static struct il_hcmd_utils_ops il4965_hcmd_utils = {
+ .get_hcmd_size = il4965_get_hcmd_size,
+ .build_addsta_hcmd = il4965_build_addsta_hcmd,
+ .request_scan = il4965_request_scan,
+ .post_scan = il4965_post_scan,
+};
+
+static struct il_lib_ops il4965_lib = {
+ .set_hw_params = il4965_hw_set_hw_params,
+ .txq_update_byte_cnt_tbl = il4965_txq_update_byte_cnt_tbl,
+ .txq_attach_buf_to_tfd = il4965_hw_txq_attach_buf_to_tfd,
+ .txq_free_tfd = il4965_hw_txq_free_tfd,
+ .txq_init = il4965_hw_tx_queue_init,
+ .handler_setup = il4965_handler_setup,
+ .is_valid_rtc_data_addr = il4965_hw_valid_rtc_data_addr,
+ .init_alive_start = il4965_init_alive_start,
+ .load_ucode = il4965_load_bsm,
+ .dump_nic_error_log = il4965_dump_nic_error_log,
+ .dump_fh = il4965_dump_fh,
+ .set_channel_switch = il4965_hw_channel_switch,
+ .apm_ops = {
+ .init = il_apm_init,
+ .config = il4965_nic_config,
+ },
+ .eeprom_ops = {
+ .regulatory_bands = {
+ EEPROM_REGULATORY_BAND_1_CHANNELS,
+ EEPROM_REGULATORY_BAND_2_CHANNELS,
+ EEPROM_REGULATORY_BAND_3_CHANNELS,
+ EEPROM_REGULATORY_BAND_4_CHANNELS,
+ EEPROM_REGULATORY_BAND_5_CHANNELS,
+ EEPROM_4965_REGULATORY_BAND_24_HT40_CHANNELS,
+ EEPROM_4965_REGULATORY_BAND_52_HT40_CHANNELS},
+ .acquire_semaphore = il4965_eeprom_acquire_semaphore,
+ .release_semaphore = il4965_eeprom_release_semaphore,
+ },
+ .send_tx_power = il4965_send_tx_power,
+ .update_chain_flags = il4965_update_chain_flags,
+ .temp_ops = {
+ .temperature = il4965_temperature_calib,
+ },
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+ .debugfs_ops = {
+ .rx_stats_read = il4965_ucode_rx_stats_read,
+ .tx_stats_read = il4965_ucode_tx_stats_read,
+ .general_stats_read = il4965_ucode_general_stats_read,
+ },
+#endif
+};
+
+static const struct il_legacy_ops il4965_legacy_ops = {
+ .post_associate = il4965_post_associate,
+ .config_ap = il4965_config_ap,
+ .manage_ibss_station = il4965_manage_ibss_station,
+ .update_bcast_stations = il4965_update_bcast_stations,
+};
+
+struct ieee80211_ops il4965_hw_ops = {
+ .tx = il4965_mac_tx,
+ .start = il4965_mac_start,
+ .stop = il4965_mac_stop,
+ .add_interface = il_mac_add_interface,
+ .remove_interface = il_mac_remove_interface,
+ .change_interface = il_mac_change_interface,
+ .config = il_mac_config,
+ .configure_filter = il4965_configure_filter,
+ .set_key = il4965_mac_set_key,
+ .update_tkip_key = il4965_mac_update_tkip_key,
+ .conf_tx = il_mac_conf_tx,
+ .reset_tsf = il_mac_reset_tsf,
+ .bss_info_changed = il_mac_bss_info_changed,
+ .ampdu_action = il4965_mac_ampdu_action,
+ .hw_scan = il_mac_hw_scan,
+ .sta_add = il4965_mac_sta_add,
+ .sta_remove = il_mac_sta_remove,
+ .channel_switch = il4965_mac_channel_switch,
+ .tx_last_beacon = il_mac_tx_last_beacon,
+};
+
+static const struct il_ops il4965_ops = {
+ .lib = &il4965_lib,
+ .hcmd = &il4965_hcmd,
+ .utils = &il4965_hcmd_utils,
+ .led = &il4965_led_ops,
+ .legacy = &il4965_legacy_ops,
+ .ieee80211_ops = &il4965_hw_ops,
+};
+
+static struct il_base_params il4965_base_params = {
+ .eeprom_size = IL4965_EEPROM_IMG_SIZE,
+ .num_of_queues = IL49_NUM_QUEUES,
+ .num_of_ampdu_queues = IL49_NUM_AMPDU_QUEUES,
+ .pll_cfg_val = 0,
+ .set_l0s = true,
+ .use_bsm = true,
+ .led_compensation = 61,
+ .chain_noise_num_beacons = IL4965_CAL_NUM_BEACONS,
+ .wd_timeout = IL_DEF_WD_TIMEOUT,
+ .temperature_kelvin = true,
+ .ucode_tracing = true,
+ .sensitivity_calib_by_driver = true,
+ .chain_noise_calib_by_driver = true,
+};
+
+struct il_cfg il4965_cfg = {
+ .name = "Intel(R) Wireless WiFi Link 4965AGN",
+ .fw_name_pre = IL4965_FW_PRE,
+ .ucode_api_max = IL4965_UCODE_API_MAX,
+ .ucode_api_min = IL4965_UCODE_API_MIN,
+ .sku = IL_SKU_A | IL_SKU_G | IL_SKU_N,
+ .valid_tx_ant = ANT_AB,
+ .valid_rx_ant = ANT_ABC,
+ .eeprom_ver = EEPROM_4965_EEPROM_VERSION,
+ .eeprom_calib_ver = EEPROM_4965_TX_POWER_VERSION,
+ .ops = &il4965_ops,
+ .mod_params = &il4965_mod_params,
+ .base_params = &il4965_base_params,
+ .led_mode = IL_LED_BLINK,
+ /*
+ * Force use of chains B and C for scan RX on 5 GHz band
+ * because the device has off-channel reception on chain A.
+ */
+ .scan_rx_antennas[IEEE80211_BAND_5GHZ] = ANT_BC,
+};
+
+/* Module firmware */
+MODULE_FIRMWARE(IL4965_MODULE_FIRMWARE(IL4965_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlegacy/4965.h b/drivers/net/wireless/iwlegacy/4965.h
new file mode 100644
index 00000000000..74472314bc3
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/4965.h
@@ -0,0 +1,1309 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#ifndef __il_4965_h__
+#define __il_4965_h__
+
+struct il_rx_queue;
+struct il_rx_buf;
+struct il_rx_pkt;
+struct il_tx_queue;
+struct il_rxon_context;
+
+/* configuration for the _4965 devices */
+extern struct il_cfg il4965_cfg;
+
+extern struct il_mod_params il4965_mod_params;
+
+extern struct ieee80211_ops il4965_hw_ops;
+
+/* tx queue */
+void il4965_free_tfds_in_queue(struct il_priv *il, int sta_id, int tid,
+ int freed);
+
+/* RXON */
+void il4965_set_rxon_chain(struct il_priv *il, struct il_rxon_context *ctx);
+
+/* uCode */
+int il4965_verify_ucode(struct il_priv *il);
+
+/* lib */
+void il4965_check_abort_status(struct il_priv *il, u8 frame_count, u32 status);
+
+void il4965_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq);
+int il4965_rx_init(struct il_priv *il, struct il_rx_queue *rxq);
+int il4965_hw_nic_init(struct il_priv *il);
+int il4965_dump_fh(struct il_priv *il, char **buf, bool display);
+
+/* rx */
+void il4965_rx_queue_restock(struct il_priv *il);
+void il4965_rx_replenish(struct il_priv *il);
+void il4965_rx_replenish_now(struct il_priv *il);
+void il4965_rx_queue_free(struct il_priv *il, struct il_rx_queue *rxq);
+int il4965_rxq_stop(struct il_priv *il);
+int il4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band);
+void il4965_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb);
+void il4965_hdl_rx_phy(struct il_priv *il, struct il_rx_buf *rxb);
+void il4965_rx_handle(struct il_priv *il);
+
+/* tx */
+void il4965_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq);
+int il4965_hw_txq_attach_buf_to_tfd(struct il_priv *il, struct il_tx_queue *txq,
+ dma_addr_t addr, u16 len, u8 reset, u8 pad);
+int il4965_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq);
+void il4965_hwrate_to_tx_control(struct il_priv *il, u32 rate_n_flags,
+ struct ieee80211_tx_info *info);
+int il4965_tx_skb(struct il_priv *il, struct sk_buff *skb);
+int il4965_tx_agg_start(struct il_priv *il, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 tid, u16 * ssn);
+int il4965_tx_agg_stop(struct il_priv *il, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 tid);
+int il4965_txq_check_empty(struct il_priv *il, int sta_id, u8 tid, int txq_id);
+void il4965_hdl_compressed_ba(struct il_priv *il, struct il_rx_buf *rxb);
+int il4965_tx_queue_reclaim(struct il_priv *il, int txq_id, int idx);
+void il4965_hw_txq_ctx_free(struct il_priv *il);
+int il4965_txq_ctx_alloc(struct il_priv *il);
+void il4965_txq_ctx_reset(struct il_priv *il);
+void il4965_txq_ctx_stop(struct il_priv *il);
+void il4965_txq_set_sched(struct il_priv *il, u32 mask);
+
+/*
+ * Acquire il->lock before calling this function !
+ */
+void il4965_set_wr_ptrs(struct il_priv *il, int txq_id, u32 idx);
+/**
+ * il4965_tx_queue_set_status - (optionally) start Tx/Cmd queue
+ * @tx_fifo_id: Tx DMA/FIFO channel (range 0-7) that the queue will feed
+ * @scd_retry: (1) Indicates queue will be used in aggregation mode
+ *
+ * NOTE: Acquire il->lock before calling this function !
+ */
+void il4965_tx_queue_set_status(struct il_priv *il, struct il_tx_queue *txq,
+ int tx_fifo_id, int scd_retry);
+
+u8 il4965_toggle_tx_ant(struct il_priv *il, u8 ant_idx, u8 valid);
+
+/* rx */
+void il4965_hdl_missed_beacon(struct il_priv *il, struct il_rx_buf *rxb);
+bool il4965_good_plcp_health(struct il_priv *il, struct il_rx_pkt *pkt);
+void il4965_hdl_stats(struct il_priv *il, struct il_rx_buf *rxb);
+void il4965_hdl_c_stats(struct il_priv *il, struct il_rx_buf *rxb);
+
+/* scan */
+int il4965_request_scan(struct il_priv *il, struct ieee80211_vif *vif);
+
+/* station mgmt */
+int il4965_manage_ibss_station(struct il_priv *il, struct ieee80211_vif *vif,
+ bool add);
+
+/* hcmd */
+int il4965_send_beacon_cmd(struct il_priv *il);
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+const char *il4965_get_tx_fail_reason(u32 status);
+#else
+static inline const char *
+il4965_get_tx_fail_reason(u32 status)
+{
+ return "";
+}
+#endif
+
+/* station management */
+int il4965_alloc_bcast_station(struct il_priv *il, struct il_rxon_context *ctx);
+int il4965_add_bssid_station(struct il_priv *il, struct il_rxon_context *ctx,
+ const u8 *addr, u8 *sta_id_r);
+int il4965_remove_default_wep_key(struct il_priv *il,
+ struct il_rxon_context *ctx,
+ struct ieee80211_key_conf *key);
+int il4965_set_default_wep_key(struct il_priv *il, struct il_rxon_context *ctx,
+ struct ieee80211_key_conf *key);
+int il4965_restore_default_wep_keys(struct il_priv *il,
+ struct il_rxon_context *ctx);
+int il4965_set_dynamic_key(struct il_priv *il, struct il_rxon_context *ctx,
+ struct ieee80211_key_conf *key, u8 sta_id);
+int il4965_remove_dynamic_key(struct il_priv *il, struct il_rxon_context *ctx,
+ struct ieee80211_key_conf *key, u8 sta_id);
+void il4965_update_tkip_key(struct il_priv *il, struct il_rxon_context *ctx,
+ struct ieee80211_key_conf *keyconf,
+ struct ieee80211_sta *sta, u32 iv32,
+ u16 *phase1key);
+int il4965_sta_tx_modify_enable_tid(struct il_priv *il, int sta_id, int tid);
+int il4965_sta_rx_agg_start(struct il_priv *il, struct ieee80211_sta *sta,
+ int tid, u16 ssn);
+int il4965_sta_rx_agg_stop(struct il_priv *il, struct ieee80211_sta *sta,
+ int tid);
+void il4965_sta_modify_sleep_tx_count(struct il_priv *il, int sta_id, int cnt);
+int il4965_update_bcast_stations(struct il_priv *il);
+
+/* rate */
+static inline u8
+il4965_hw_get_rate(__le32 rate_n_flags)
+{
+ return le32_to_cpu(rate_n_flags) & 0xFF;
+}
+
+static inline __le32
+il4965_hw_set_rate_n_flags(u8 rate, u32 flags)
+{
+ return cpu_to_le32(flags | (u32) rate);
+}
+
+/* eeprom */
+void il4965_eeprom_get_mac(const struct il_priv *il, u8 * mac);
+int il4965_eeprom_acquire_semaphore(struct il_priv *il);
+void il4965_eeprom_release_semaphore(struct il_priv *il);
+int il4965_eeprom_check_version(struct il_priv *il);
+
+/* mac80211 handlers (for 4965) */
+void il4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
+int il4965_mac_start(struct ieee80211_hw *hw);
+void il4965_mac_stop(struct ieee80211_hw *hw);
+void il4965_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *total_flags, u64 multicast);
+int il4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key);
+void il4965_mac_update_tkip_key(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_key_conf *keyconf,
+ struct ieee80211_sta *sta, u32 iv32,
+ u16 *phase1key);
+int il4965_mac_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ enum ieee80211_ampdu_mlme_action action,
+ struct ieee80211_sta *sta, u16 tid, u16 * ssn,
+ u8 buf_size);
+int il4965_mac_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+void il4965_mac_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_channel_switch *ch_switch);
+
+void il4965_led_enable(struct il_priv *il);
+
+/* EEPROM */
+#define IL4965_EEPROM_IMG_SIZE 1024
+
+/*
+ * uCode queue management definitions ...
+ * The first queue used for block-ack aggregation is #7 (4965 only).
+ * All block-ack aggregation queues should map to Tx DMA/FIFO channel 7.
+ */
+#define IL49_FIRST_AMPDU_QUEUE 7
+
+/* Sizes and addresses for instruction and data memory (SRAM) in
+ * 4965's embedded processor. Driver access is via HBUS_TARG_MEM_* regs. */
+#define IL49_RTC_INST_LOWER_BOUND (0x000000)
+#define IL49_RTC_INST_UPPER_BOUND (0x018000)
+
+#define IL49_RTC_DATA_LOWER_BOUND (0x800000)
+#define IL49_RTC_DATA_UPPER_BOUND (0x80A000)
+
+#define IL49_RTC_INST_SIZE (IL49_RTC_INST_UPPER_BOUND - \
+ IL49_RTC_INST_LOWER_BOUND)
+#define IL49_RTC_DATA_SIZE (IL49_RTC_DATA_UPPER_BOUND - \
+ IL49_RTC_DATA_LOWER_BOUND)
+
+#define IL49_MAX_INST_SIZE IL49_RTC_INST_SIZE
+#define IL49_MAX_DATA_SIZE IL49_RTC_DATA_SIZE
+
+/* Size of uCode instruction memory in bootstrap state machine */
+#define IL49_MAX_BSM_SIZE BSM_SRAM_SIZE
+
+static inline int
+il4965_hw_valid_rtc_data_addr(u32 addr)
+{
+ return (addr >= IL49_RTC_DATA_LOWER_BOUND &&
+ addr < IL49_RTC_DATA_UPPER_BOUND);
+}
+
+/********************* START TEMPERATURE *************************************/
+
+/**
+ * 4965 temperature calculation.
+ *
+ * The driver must calculate the device temperature before calculating
+ * a txpower setting (amplifier gain is temperature dependent). The
+ * calculation uses 4 measurements, 3 of which (R1, R2, R3) are calibration
+ * values used for the life of the driver, and one of which (R4) is the
+ * real-time temperature indicator.
+ *
+ * uCode provides all 4 values to the driver via the "initialize alive"
+ * notification (see struct il4965_init_alive_resp). After the runtime uCode
+ * image loads, uCode updates the R4 value via stats notifications
+ * (see N_STATS), which occur after each received beacon
+ * when associated, or can be requested via C_STATS.
+ *
+ * NOTE: uCode provides the R4 value as a 23-bit signed value. Driver
+ * must sign-extend to 32 bits before applying formula below.
+ *
+ * Formula:
+ *
+ * degrees Kelvin = ((97 * 259 * (R4 - R2) / (R3 - R1)) / 100) + 8
+ *
+ * NOTE: The basic formula is 259 * (R4-R2) / (R3-R1). The 97/100 is
+ * an additional correction, which should be centered around 0 degrees
+ * Celsius (273 degrees Kelvin). The 8 (3 percent of 273) compensates for
+ * centering the 97/100 correction around 0 degrees K.
+ *
+ * Add 273 to Kelvin value to find degrees Celsius, for comparing current
+ * temperature with factory-measured temperatures when calculating txpower
+ * settings.
+ */
+#define TEMPERATURE_CALIB_KELVIN_OFFSET 8
+#define TEMPERATURE_CALIB_A_VAL 259
+
+/* Limit range of calculated temperature to be between these Kelvin values */
+#define IL_TX_POWER_TEMPERATURE_MIN (263)
+#define IL_TX_POWER_TEMPERATURE_MAX (410)
+
+#define IL_TX_POWER_TEMPERATURE_OUT_OF_RANGE(t) \
+ ((t) < IL_TX_POWER_TEMPERATURE_MIN || \
+ (t) > IL_TX_POWER_TEMPERATURE_MAX)
+
+/********************* END TEMPERATURE ***************************************/
+
+/********************* START TXPOWER *****************************************/
+
+/**
+ * 4965 txpower calculations rely on information from three sources:
+ *
+ * 1) EEPROM
+ * 2) "initialize" alive notification
+ * 3) stats notifications
+ *
+ * EEPROM data consists of:
+ *
+ * 1) Regulatory information (max txpower and channel usage flags) is provided
+ * separately for each channel that can possibly supported by 4965.
+ * 40 MHz wide (.11n HT40) channels are listed separately from 20 MHz
+ * (legacy) channels.
+ *
+ * See struct il4965_eeprom_channel for format, and struct il4965_eeprom
+ * for locations in EEPROM.
+ *
+ * 2) Factory txpower calibration information is provided separately for
+ * sub-bands of contiguous channels. 2.4GHz has just one sub-band,
+ * but 5 GHz has several sub-bands.
+ *
+ * In addition, per-band (2.4 and 5 Ghz) saturation txpowers are provided.
+ *
+ * See struct il4965_eeprom_calib_info (and the tree of structures
+ * contained within it) for format, and struct il4965_eeprom for
+ * locations in EEPROM.
+ *
+ * "Initialization alive" notification (see struct il4965_init_alive_resp)
+ * consists of:
+ *
+ * 1) Temperature calculation parameters.
+ *
+ * 2) Power supply voltage measurement.
+ *
+ * 3) Tx gain compensation to balance 2 transmitters for MIMO use.
+ *
+ * Statistics notifications deliver:
+ *
+ * 1) Current values for temperature param R4.
+ */
+
+/**
+ * To calculate a txpower setting for a given desired target txpower, channel,
+ * modulation bit rate, and transmitter chain (4965 has 2 transmitters to
+ * support MIMO and transmit diversity), driver must do the following:
+ *
+ * 1) Compare desired txpower vs. (EEPROM) regulatory limit for this channel.
+ * Do not exceed regulatory limit; reduce target txpower if necessary.
+ *
+ * If setting up txpowers for MIMO rates (rate idxes 8-15, 24-31),
+ * 2 transmitters will be used simultaneously; driver must reduce the
+ * regulatory limit by 3 dB (half-power) for each transmitter, so the
+ * combined total output of the 2 transmitters is within regulatory limits.
+ *
+ *
+ * 2) Compare target txpower vs. (EEPROM) saturation txpower *reduced by
+ * backoff for this bit rate*. Do not exceed (saturation - backoff[rate]);
+ * reduce target txpower if necessary.
+ *
+ * Backoff values below are in 1/2 dB units (equivalent to steps in
+ * txpower gain tables):
+ *
+ * OFDM 6 - 36 MBit: 10 steps (5 dB)
+ * OFDM 48 MBit: 15 steps (7.5 dB)
+ * OFDM 54 MBit: 17 steps (8.5 dB)
+ * OFDM 60 MBit: 20 steps (10 dB)
+ * CCK all rates: 10 steps (5 dB)
+ *
+ * Backoff values apply to saturation txpower on a per-transmitter basis;
+ * when using MIMO (2 transmitters), each transmitter uses the same
+ * saturation level provided in EEPROM, and the same backoff values;
+ * no reduction (such as with regulatory txpower limits) is required.
+ *
+ * Saturation and Backoff values apply equally to 20 Mhz (legacy) channel
+ * widths and 40 Mhz (.11n HT40) channel widths; there is no separate
+ * factory measurement for ht40 channels.
+ *
+ * The result of this step is the final target txpower. The rest of
+ * the steps figure out the proper settings for the device to achieve
+ * that target txpower.
+ *
+ *
+ * 3) Determine (EEPROM) calibration sub band for the target channel, by
+ * comparing against first and last channels in each sub band
+ * (see struct il4965_eeprom_calib_subband_info).
+ *
+ *
+ * 4) Linearly interpolate (EEPROM) factory calibration measurement sets,
+ * referencing the 2 factory-measured (sample) channels within the sub band.
+ *
+ * Interpolation is based on difference between target channel's frequency
+ * and the sample channels' frequencies. Since channel numbers are based
+ * on frequency (5 MHz between each channel number), this is equivalent
+ * to interpolating based on channel number differences.
+ *
+ * Note that the sample channels may or may not be the channels at the
+ * edges of the sub band. The target channel may be "outside" of the
+ * span of the sampled channels.
+ *
+ * Driver may choose the pair (for 2 Tx chains) of measurements (see
+ * struct il4965_eeprom_calib_ch_info) for which the actual measured
+ * txpower comes closest to the desired txpower. Usually, though,
+ * the middle set of measurements is closest to the regulatory limits,
+ * and is therefore a good choice for all txpower calculations (this
+ * assumes that high accuracy is needed for maximizing legal txpower,
+ * while lower txpower configurations do not need as much accuracy).
+ *
+ * Driver should interpolate both members of the chosen measurement pair,
+ * i.e. for both Tx chains (radio transmitters), unless the driver knows
+ * that only one of the chains will be used (e.g. only one tx antenna
+ * connected, but this should be unusual). The rate scaling algorithm
+ * switches antennas to find best performance, so both Tx chains will
+ * be used (although only one at a time) even for non-MIMO transmissions.
+ *
+ * Driver should interpolate factory values for temperature, gain table
+ * idx, and actual power. The power amplifier detector values are
+ * not used by the driver.
+ *
+ * Sanity check: If the target channel happens to be one of the sample
+ * channels, the results should agree with the sample channel's
+ * measurements!
+ *
+ *
+ * 5) Find difference between desired txpower and (interpolated)
+ * factory-measured txpower. Using (interpolated) factory gain table idx
+ * (shown elsewhere) as a starting point, adjust this idx lower to
+ * increase txpower, or higher to decrease txpower, until the target
+ * txpower is reached. Each step in the gain table is 1/2 dB.
+ *
+ * For example, if factory measured txpower is 16 dBm, and target txpower
+ * is 13 dBm, add 6 steps to the factory gain idx to reduce txpower
+ * by 3 dB.
+ *
+ *
+ * 6) Find difference between current device temperature and (interpolated)
+ * factory-measured temperature for sub-band. Factory values are in
+ * degrees Celsius. To calculate current temperature, see comments for
+ * "4965 temperature calculation".
+ *
+ * If current temperature is higher than factory temperature, driver must
+ * increase gain (lower gain table idx), and vice verse.
+ *
+ * Temperature affects gain differently for different channels:
+ *
+ * 2.4 GHz all channels: 3.5 degrees per half-dB step
+ * 5 GHz channels 34-43: 4.5 degrees per half-dB step
+ * 5 GHz channels >= 44: 4.0 degrees per half-dB step
+ *
+ * NOTE: Temperature can increase rapidly when transmitting, especially
+ * with heavy traffic at high txpowers. Driver should update
+ * temperature calculations often under these conditions to
+ * maintain strong txpower in the face of rising temperature.
+ *
+ *
+ * 7) Find difference between current power supply voltage indicator
+ * (from "initialize alive") and factory-measured power supply voltage
+ * indicator (EEPROM).
+ *
+ * If the current voltage is higher (indicator is lower) than factory
+ * voltage, gain should be reduced (gain table idx increased) by:
+ *
+ * (eeprom - current) / 7
+ *
+ * If the current voltage is lower (indicator is higher) than factory
+ * voltage, gain should be increased (gain table idx decreased) by:
+ *
+ * 2 * (current - eeprom) / 7
+ *
+ * If number of idx steps in either direction turns out to be > 2,
+ * something is wrong ... just use 0.
+ *
+ * NOTE: Voltage compensation is independent of band/channel.
+ *
+ * NOTE: "Initialize" uCode measures current voltage, which is assumed
+ * to be constant after this initial measurement. Voltage
+ * compensation for txpower (number of steps in gain table)
+ * may be calculated once and used until the next uCode bootload.
+ *
+ *
+ * 8) If setting up txpowers for MIMO rates (rate idxes 8-15, 24-31),
+ * adjust txpower for each transmitter chain, so txpower is balanced
+ * between the two chains. There are 5 pairs of tx_atten[group][chain]
+ * values in "initialize alive", one pair for each of 5 channel ranges:
+ *
+ * Group 0: 5 GHz channel 34-43
+ * Group 1: 5 GHz channel 44-70
+ * Group 2: 5 GHz channel 71-124
+ * Group 3: 5 GHz channel 125-200
+ * Group 4: 2.4 GHz all channels
+ *
+ * Add the tx_atten[group][chain] value to the idx for the target chain.
+ * The values are signed, but are in pairs of 0 and a non-negative number,
+ * so as to reduce gain (if necessary) of the "hotter" channel. This
+ * avoids any need to double-check for regulatory compliance after
+ * this step.
+ *
+ *
+ * 9) If setting up for a CCK rate, lower the gain by adding a CCK compensation
+ * value to the idx:
+ *
+ * Hardware rev B: 9 steps (4.5 dB)
+ * Hardware rev C: 5 steps (2.5 dB)
+ *
+ * Hardware rev for 4965 can be determined by reading CSR_HW_REV_WA_REG,
+ * bits [3:2], 1 = B, 2 = C.
+ *
+ * NOTE: This compensation is in addition to any saturation backoff that
+ * might have been applied in an earlier step.
+ *
+ *
+ * 10) Select the gain table, based on band (2.4 vs 5 GHz).
+ *
+ * Limit the adjusted idx to stay within the table!
+ *
+ *
+ * 11) Read gain table entries for DSP and radio gain, place into appropriate
+ * location(s) in command (struct il4965_txpowertable_cmd).
+ */
+
+/**
+ * When MIMO is used (2 transmitters operating simultaneously), driver should
+ * limit each transmitter to deliver a max of 3 dB below the regulatory limit
+ * for the device. That is, use half power for each transmitter, so total
+ * txpower is within regulatory limits.
+ *
+ * The value "6" represents number of steps in gain table to reduce power 3 dB.
+ * Each step is 1/2 dB.
+ */
+#define IL_TX_POWER_MIMO_REGULATORY_COMPENSATION (6)
+
+/**
+ * CCK gain compensation.
+ *
+ * When calculating txpowers for CCK, after making sure that the target power
+ * is within regulatory and saturation limits, driver must additionally
+ * back off gain by adding these values to the gain table idx.
+ *
+ * Hardware rev for 4965 can be determined by reading CSR_HW_REV_WA_REG,
+ * bits [3:2], 1 = B, 2 = C.
+ */
+#define IL_TX_POWER_CCK_COMPENSATION_B_STEP (9)
+#define IL_TX_POWER_CCK_COMPENSATION_C_STEP (5)
+
+/*
+ * 4965 power supply voltage compensation for txpower
+ */
+#define TX_POWER_IL_VOLTAGE_CODES_PER_03V (7)
+
+/**
+ * Gain tables.
+ *
+ * The following tables contain pair of values for setting txpower, i.e.
+ * gain settings for the output of the device's digital signal processor (DSP),
+ * and for the analog gain structure of the transmitter.
+ *
+ * Each entry in the gain tables represents a step of 1/2 dB. Note that these
+ * are *relative* steps, not indications of absolute output power. Output
+ * power varies with temperature, voltage, and channel frequency, and also
+ * requires consideration of average power (to satisfy regulatory constraints),
+ * and peak power (to avoid distortion of the output signal).
+ *
+ * Each entry contains two values:
+ * 1) DSP gain (or sometimes called DSP attenuation). This is a fine-grained
+ * linear value that multiplies the output of the digital signal processor,
+ * before being sent to the analog radio.
+ * 2) Radio gain. This sets the analog gain of the radio Tx path.
+ * It is a coarser setting, and behaves in a logarithmic (dB) fashion.
+ *
+ * EEPROM contains factory calibration data for txpower. This maps actual
+ * measured txpower levels to gain settings in the "well known" tables
+ * below ("well-known" means here that both factory calibration *and* the
+ * driver work with the same table).
+ *
+ * There are separate tables for 2.4 GHz and 5 GHz bands. The 5 GHz table
+ * has an extension (into negative idxes), in case the driver needs to
+ * boost power setting for high device temperatures (higher than would be
+ * present during factory calibration). A 5 Ghz EEPROM idx of "40"
+ * corresponds to the 49th entry in the table used by the driver.
+ */
+#define MIN_TX_GAIN_IDX (0) /* highest gain, lowest idx, 2.4 */
+#define MIN_TX_GAIN_IDX_52GHZ_EXT (-9) /* highest gain, lowest idx, 5 */
+
+/**
+ * 2.4 GHz gain table
+ *
+ * Index Dsp gain Radio gain
+ * 0 110 0x3f (highest gain)
+ * 1 104 0x3f
+ * 2 98 0x3f
+ * 3 110 0x3e
+ * 4 104 0x3e
+ * 5 98 0x3e
+ * 6 110 0x3d
+ * 7 104 0x3d
+ * 8 98 0x3d
+ * 9 110 0x3c
+ * 10 104 0x3c
+ * 11 98 0x3c
+ * 12 110 0x3b
+ * 13 104 0x3b
+ * 14 98 0x3b
+ * 15 110 0x3a
+ * 16 104 0x3a
+ * 17 98 0x3a
+ * 18 110 0x39
+ * 19 104 0x39
+ * 20 98 0x39
+ * 21 110 0x38
+ * 22 104 0x38
+ * 23 98 0x38
+ * 24 110 0x37
+ * 25 104 0x37
+ * 26 98 0x37
+ * 27 110 0x36
+ * 28 104 0x36
+ * 29 98 0x36
+ * 30 110 0x35
+ * 31 104 0x35
+ * 32 98 0x35
+ * 33 110 0x34
+ * 34 104 0x34
+ * 35 98 0x34
+ * 36 110 0x33
+ * 37 104 0x33
+ * 38 98 0x33
+ * 39 110 0x32
+ * 40 104 0x32
+ * 41 98 0x32
+ * 42 110 0x31
+ * 43 104 0x31
+ * 44 98 0x31
+ * 45 110 0x30
+ * 46 104 0x30
+ * 47 98 0x30
+ * 48 110 0x6
+ * 49 104 0x6
+ * 50 98 0x6
+ * 51 110 0x5
+ * 52 104 0x5
+ * 53 98 0x5
+ * 54 110 0x4
+ * 55 104 0x4
+ * 56 98 0x4
+ * 57 110 0x3
+ * 58 104 0x3
+ * 59 98 0x3
+ * 60 110 0x2
+ * 61 104 0x2
+ * 62 98 0x2
+ * 63 110 0x1
+ * 64 104 0x1
+ * 65 98 0x1
+ * 66 110 0x0
+ * 67 104 0x0
+ * 68 98 0x0
+ * 69 97 0
+ * 70 96 0
+ * 71 95 0
+ * 72 94 0
+ * 73 93 0
+ * 74 92 0
+ * 75 91 0
+ * 76 90 0
+ * 77 89 0
+ * 78 88 0
+ * 79 87 0
+ * 80 86 0
+ * 81 85 0
+ * 82 84 0
+ * 83 83 0
+ * 84 82 0
+ * 85 81 0
+ * 86 80 0
+ * 87 79 0
+ * 88 78 0
+ * 89 77 0
+ * 90 76 0
+ * 91 75 0
+ * 92 74 0
+ * 93 73 0
+ * 94 72 0
+ * 95 71 0
+ * 96 70 0
+ * 97 69 0
+ * 98 68 0
+ */
+
+/**
+ * 5 GHz gain table
+ *
+ * Index Dsp gain Radio gain
+ * -9 123 0x3F (highest gain)
+ * -8 117 0x3F
+ * -7 110 0x3F
+ * -6 104 0x3F
+ * -5 98 0x3F
+ * -4 110 0x3E
+ * -3 104 0x3E
+ * -2 98 0x3E
+ * -1 110 0x3D
+ * 0 104 0x3D
+ * 1 98 0x3D
+ * 2 110 0x3C
+ * 3 104 0x3C
+ * 4 98 0x3C
+ * 5 110 0x3B
+ * 6 104 0x3B
+ * 7 98 0x3B
+ * 8 110 0x3A
+ * 9 104 0x3A
+ * 10 98 0x3A
+ * 11 110 0x39
+ * 12 104 0x39
+ * 13 98 0x39
+ * 14 110 0x38
+ * 15 104 0x38
+ * 16 98 0x38
+ * 17 110 0x37
+ * 18 104 0x37
+ * 19 98 0x37
+ * 20 110 0x36
+ * 21 104 0x36
+ * 22 98 0x36
+ * 23 110 0x35
+ * 24 104 0x35
+ * 25 98 0x35
+ * 26 110 0x34
+ * 27 104 0x34
+ * 28 98 0x34
+ * 29 110 0x33
+ * 30 104 0x33
+ * 31 98 0x33
+ * 32 110 0x32
+ * 33 104 0x32
+ * 34 98 0x32
+ * 35 110 0x31
+ * 36 104 0x31
+ * 37 98 0x31
+ * 38 110 0x30
+ * 39 104 0x30
+ * 40 98 0x30
+ * 41 110 0x25
+ * 42 104 0x25
+ * 43 98 0x25
+ * 44 110 0x24
+ * 45 104 0x24
+ * 46 98 0x24
+ * 47 110 0x23
+ * 48 104 0x23
+ * 49 98 0x23
+ * 50 110 0x22
+ * 51 104 0x18
+ * 52 98 0x18
+ * 53 110 0x17
+ * 54 104 0x17
+ * 55 98 0x17
+ * 56 110 0x16
+ * 57 104 0x16
+ * 58 98 0x16
+ * 59 110 0x15
+ * 60 104 0x15
+ * 61 98 0x15
+ * 62 110 0x14
+ * 63 104 0x14
+ * 64 98 0x14
+ * 65 110 0x13
+ * 66 104 0x13
+ * 67 98 0x13
+ * 68 110 0x12
+ * 69 104 0x08
+ * 70 98 0x08
+ * 71 110 0x07
+ * 72 104 0x07
+ * 73 98 0x07
+ * 74 110 0x06
+ * 75 104 0x06
+ * 76 98 0x06
+ * 77 110 0x05
+ * 78 104 0x05
+ * 79 98 0x05
+ * 80 110 0x04
+ * 81 104 0x04
+ * 82 98 0x04
+ * 83 110 0x03
+ * 84 104 0x03
+ * 85 98 0x03
+ * 86 110 0x02
+ * 87 104 0x02
+ * 88 98 0x02
+ * 89 110 0x01
+ * 90 104 0x01
+ * 91 98 0x01
+ * 92 110 0x00
+ * 93 104 0x00
+ * 94 98 0x00
+ * 95 93 0x00
+ * 96 88 0x00
+ * 97 83 0x00
+ * 98 78 0x00
+ */
+
+/**
+ * Sanity checks and default values for EEPROM regulatory levels.
+ * If EEPROM values fall outside MIN/MAX range, use default values.
+ *
+ * Regulatory limits refer to the maximum average txpower allowed by
+ * regulatory agencies in the geographies in which the device is meant
+ * to be operated. These limits are SKU-specific (i.e. geography-specific),
+ * and channel-specific; each channel has an individual regulatory limit
+ * listed in the EEPROM.
+ *
+ * Units are in half-dBm (i.e. "34" means 17 dBm).
+ */
+#define IL_TX_POWER_DEFAULT_REGULATORY_24 (34)
+#define IL_TX_POWER_DEFAULT_REGULATORY_52 (34)
+#define IL_TX_POWER_REGULATORY_MIN (0)
+#define IL_TX_POWER_REGULATORY_MAX (34)
+
+/**
+ * Sanity checks and default values for EEPROM saturation levels.
+ * If EEPROM values fall outside MIN/MAX range, use default values.
+ *
+ * Saturation is the highest level that the output power amplifier can produce
+ * without significant clipping distortion. This is a "peak" power level.
+ * Different types of modulation (i.e. various "rates", and OFDM vs. CCK)
+ * require differing amounts of backoff, relative to their average power output,
+ * in order to avoid clipping distortion.
+ *
+ * Driver must make sure that it is violating neither the saturation limit,
+ * nor the regulatory limit, when calculating Tx power settings for various
+ * rates.
+ *
+ * Units are in half-dBm (i.e. "38" means 19 dBm).
+ */
+#define IL_TX_POWER_DEFAULT_SATURATION_24 (38)
+#define IL_TX_POWER_DEFAULT_SATURATION_52 (38)
+#define IL_TX_POWER_SATURATION_MIN (20)
+#define IL_TX_POWER_SATURATION_MAX (50)
+
+/**
+ * Channel groups used for Tx Attenuation calibration (MIMO tx channel balance)
+ * and thermal Txpower calibration.
+ *
+ * When calculating txpower, driver must compensate for current device
+ * temperature; higher temperature requires higher gain. Driver must calculate
+ * current temperature (see "4965 temperature calculation"), then compare vs.
+ * factory calibration temperature in EEPROM; if current temperature is higher
+ * than factory temperature, driver must *increase* gain by proportions shown
+ * in table below. If current temperature is lower than factory, driver must
+ * *decrease* gain.
+ *
+ * Different frequency ranges require different compensation, as shown below.
+ */
+/* Group 0, 5.2 GHz ch 34-43: 4.5 degrees per 1/2 dB. */
+#define CALIB_IL_TX_ATTEN_GR1_FCH 34
+#define CALIB_IL_TX_ATTEN_GR1_LCH 43
+
+/* Group 1, 5.3 GHz ch 44-70: 4.0 degrees per 1/2 dB. */
+#define CALIB_IL_TX_ATTEN_GR2_FCH 44
+#define CALIB_IL_TX_ATTEN_GR2_LCH 70
+
+/* Group 2, 5.5 GHz ch 71-124: 4.0 degrees per 1/2 dB. */
+#define CALIB_IL_TX_ATTEN_GR3_FCH 71
+#define CALIB_IL_TX_ATTEN_GR3_LCH 124
+
+/* Group 3, 5.7 GHz ch 125-200: 4.0 degrees per 1/2 dB. */
+#define CALIB_IL_TX_ATTEN_GR4_FCH 125
+#define CALIB_IL_TX_ATTEN_GR4_LCH 200
+
+/* Group 4, 2.4 GHz all channels: 3.5 degrees per 1/2 dB. */
+#define CALIB_IL_TX_ATTEN_GR5_FCH 1
+#define CALIB_IL_TX_ATTEN_GR5_LCH 20
+
+enum {
+ CALIB_CH_GROUP_1 = 0,
+ CALIB_CH_GROUP_2 = 1,
+ CALIB_CH_GROUP_3 = 2,
+ CALIB_CH_GROUP_4 = 3,
+ CALIB_CH_GROUP_5 = 4,
+ CALIB_CH_GROUP_MAX
+};
+
+/********************* END TXPOWER *****************************************/
+
+/**
+ * Tx/Rx Queues
+ *
+ * Most communication between driver and 4965 is via queues of data buffers.
+ * For example, all commands that the driver issues to device's embedded
+ * controller (uCode) are via the command queue (one of the Tx queues). All
+ * uCode command responses/replies/notifications, including Rx frames, are
+ * conveyed from uCode to driver via the Rx queue.
+ *
+ * Most support for these queues, including handshake support, resides in
+ * structures in host DRAM, shared between the driver and the device. When
+ * allocating this memory, the driver must make sure that data written by
+ * the host CPU updates DRAM immediately (and does not get "stuck" in CPU's
+ * cache memory), so DRAM and cache are consistent, and the device can
+ * immediately see changes made by the driver.
+ *
+ * 4965 supports up to 16 DRAM-based Tx queues, and services these queues via
+ * up to 7 DMA channels (FIFOs). Each Tx queue is supported by a circular array
+ * in DRAM containing 256 Transmit Frame Descriptors (TFDs).
+ */
+#define IL49_NUM_FIFOS 7
+#define IL49_CMD_FIFO_NUM 4
+#define IL49_NUM_QUEUES 16
+#define IL49_NUM_AMPDU_QUEUES 8
+
+/**
+ * struct il4965_schedq_bc_tbl
+ *
+ * Byte Count table
+ *
+ * Each Tx queue uses a byte-count table containing 320 entries:
+ * one 16-bit entry for each of 256 TFDs, plus an additional 64 entries that
+ * duplicate the first 64 entries (to avoid wrap-around within a Tx win;
+ * max Tx win is 64 TFDs).
+ *
+ * When driver sets up a new TFD, it must also enter the total byte count
+ * of the frame to be transmitted into the corresponding entry in the byte
+ * count table for the chosen Tx queue. If the TFD idx is 0-63, the driver
+ * must duplicate the byte count entry in corresponding idx 256-319.
+ *
+ * padding puts each byte count table on a 1024-byte boundary;
+ * 4965 assumes tables are separated by 1024 bytes.
+ */
+struct il4965_scd_bc_tbl {
+ __le16 tfd_offset[TFD_QUEUE_BC_SIZE];
+ u8 pad[1024 - (TFD_QUEUE_BC_SIZE) * sizeof(__le16)];
+} __packed;
+
+#define IL4965_RTC_INST_LOWER_BOUND (0x000000)
+
+/* RSSI to dBm */
+#define IL4965_RSSI_OFFSET 44
+
+/* PCI registers */
+#define PCI_CFG_RETRY_TIMEOUT 0x041
+
+/* PCI register values */
+#define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01
+#define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02
+
+#define IL4965_DEFAULT_TX_RETRY 15
+
+/* EEPROM */
+#define IL4965_FIRST_AMPDU_QUEUE 10
+
+/* Calibration */
+void il4965_chain_noise_calibration(struct il_priv *il, void *stat_resp);
+void il4965_sensitivity_calibration(struct il_priv *il, void *resp);
+void il4965_init_sensitivity(struct il_priv *il);
+void il4965_reset_run_time_calib(struct il_priv *il);
+void il4965_calib_free_results(struct il_priv *il);
+
+/* Debug */
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+ssize_t il4965_ucode_rx_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos);
+ssize_t il4965_ucode_tx_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos);
+ssize_t il4965_ucode_general_stats_read(struct file *file,
+ char __user *user_buf, size_t count,
+ loff_t *ppos);
+#endif
+
+/****************************/
+/* Flow Handler Definitions */
+/****************************/
+
+/**
+ * This I/O area is directly read/writable by driver (e.g. Linux uses writel())
+ * Addresses are offsets from device's PCI hardware base address.
+ */
+#define FH49_MEM_LOWER_BOUND (0x1000)
+#define FH49_MEM_UPPER_BOUND (0x2000)
+
+/**
+ * Keep-Warm (KW) buffer base address.
+ *
+ * Driver must allocate a 4KByte buffer that is used by 4965 for keeping the
+ * host DRAM powered on (via dummy accesses to DRAM) to maintain low-latency
+ * DRAM access when 4965 is Txing or Rxing. The dummy accesses prevent host
+ * from going into a power-savings mode that would cause higher DRAM latency,
+ * and possible data over/under-runs, before all Tx/Rx is complete.
+ *
+ * Driver loads FH49_KW_MEM_ADDR_REG with the physical address (bits 35:4)
+ * of the buffer, which must be 4K aligned. Once this is set up, the 4965
+ * automatically invokes keep-warm accesses when normal accesses might not
+ * be sufficient to maintain fast DRAM response.
+ *
+ * Bit fields:
+ * 31-0: Keep-warm buffer physical base address [35:4], must be 4K aligned
+ */
+#define FH49_KW_MEM_ADDR_REG (FH49_MEM_LOWER_BOUND + 0x97C)
+
+/**
+ * TFD Circular Buffers Base (CBBC) addresses
+ *
+ * 4965 has 16 base pointer registers, one for each of 16 host-DRAM-resident
+ * circular buffers (CBs/queues) containing Transmit Frame Descriptors (TFDs)
+ * (see struct il_tfd_frame). These 16 pointer registers are offset by 0x04
+ * bytes from one another. Each TFD circular buffer in DRAM must be 256-byte
+ * aligned (address bits 0-7 must be 0).
+ *
+ * Bit fields in each pointer register:
+ * 27-0: TFD CB physical base address [35:8], must be 256-byte aligned
+ */
+#define FH49_MEM_CBBC_LOWER_BOUND (FH49_MEM_LOWER_BOUND + 0x9D0)
+#define FH49_MEM_CBBC_UPPER_BOUND (FH49_MEM_LOWER_BOUND + 0xA10)
+
+/* Find TFD CB base pointer for given queue (range 0-15). */
+#define FH49_MEM_CBBC_QUEUE(x) (FH49_MEM_CBBC_LOWER_BOUND + (x) * 0x4)
+
+/**
+ * Rx SRAM Control and Status Registers (RSCSR)
+ *
+ * These registers provide handshake between driver and 4965 for the Rx queue
+ * (this queue handles *all* command responses, notifications, Rx data, etc.
+ * sent from 4965 uCode to host driver). Unlike Tx, there is only one Rx
+ * queue, and only one Rx DMA/FIFO channel. Also unlike Tx, which can
+ * concatenate up to 20 DRAM buffers to form a Tx frame, each Receive Buffer
+ * Descriptor (RBD) points to only one Rx Buffer (RB); there is a 1:1
+ * mapping between RBDs and RBs.
+ *
+ * Driver must allocate host DRAM memory for the following, and set the
+ * physical address of each into 4965 registers:
+ *
+ * 1) Receive Buffer Descriptor (RBD) circular buffer (CB), typically with 256
+ * entries (although any power of 2, up to 4096, is selectable by driver).
+ * Each entry (1 dword) points to a receive buffer (RB) of consistent size
+ * (typically 4K, although 8K or 16K are also selectable by driver).
+ * Driver sets up RB size and number of RBDs in the CB via Rx config
+ * register FH49_MEM_RCSR_CHNL0_CONFIG_REG.
+ *
+ * Bit fields within one RBD:
+ * 27-0: Receive Buffer physical address bits [35:8], 256-byte aligned
+ *
+ * Driver sets physical address [35:8] of base of RBD circular buffer
+ * into FH49_RSCSR_CHNL0_RBDCB_BASE_REG [27:0].
+ *
+ * 2) Rx status buffer, 8 bytes, in which 4965 indicates which Rx Buffers
+ * (RBs) have been filled, via a "write pointer", actually the idx of
+ * the RB's corresponding RBD within the circular buffer. Driver sets
+ * physical address [35:4] into FH49_RSCSR_CHNL0_STTS_WPTR_REG [31:0].
+ *
+ * Bit fields in lower dword of Rx status buffer (upper dword not used
+ * by driver; see struct il4965_shared, val0):
+ * 31-12: Not used by driver
+ * 11- 0: Index of last filled Rx buffer descriptor
+ * (4965 writes, driver reads this value)
+ *
+ * As the driver prepares Receive Buffers (RBs) for 4965 to fill, driver must
+ * enter pointers to these RBs into contiguous RBD circular buffer entries,
+ * and update the 4965's "write" idx register,
+ * FH49_RSCSR_CHNL0_RBDCB_WPTR_REG.
+ *
+ * This "write" idx corresponds to the *next* RBD that the driver will make
+ * available, i.e. one RBD past the tail of the ready-to-fill RBDs within
+ * the circular buffer. This value should initially be 0 (before preparing any
+ * RBs), should be 8 after preparing the first 8 RBs (for example), and must
+ * wrap back to 0 at the end of the circular buffer (but don't wrap before
+ * "read" idx has advanced past 1! See below).
+ * NOTE: 4965 EXPECTS THE WRITE IDX TO BE INCREMENTED IN MULTIPLES OF 8.
+ *
+ * As the 4965 fills RBs (referenced from contiguous RBDs within the circular
+ * buffer), it updates the Rx status buffer in host DRAM, 2) described above,
+ * to tell the driver the idx of the latest filled RBD. The driver must
+ * read this "read" idx from DRAM after receiving an Rx interrupt from 4965.
+ *
+ * The driver must also internally keep track of a third idx, which is the
+ * next RBD to process. When receiving an Rx interrupt, driver should process
+ * all filled but unprocessed RBs up to, but not including, the RB
+ * corresponding to the "read" idx. For example, if "read" idx becomes "1",
+ * driver may process the RB pointed to by RBD 0. Depending on volume of
+ * traffic, there may be many RBs to process.
+ *
+ * If read idx == write idx, 4965 thinks there is no room to put new data.
+ * Due to this, the maximum number of filled RBs is 255, instead of 256. To
+ * be safe, make sure that there is a gap of at least 2 RBDs between "write"
+ * and "read" idxes; that is, make sure that there are no more than 254
+ * buffers waiting to be filled.
+ */
+#define FH49_MEM_RSCSR_LOWER_BOUND (FH49_MEM_LOWER_BOUND + 0xBC0)
+#define FH49_MEM_RSCSR_UPPER_BOUND (FH49_MEM_LOWER_BOUND + 0xC00)
+#define FH49_MEM_RSCSR_CHNL0 (FH49_MEM_RSCSR_LOWER_BOUND)
+
+/**
+ * Physical base address of 8-byte Rx Status buffer.
+ * Bit fields:
+ * 31-0: Rx status buffer physical base address [35:4], must 16-byte aligned.
+ */
+#define FH49_RSCSR_CHNL0_STTS_WPTR_REG (FH49_MEM_RSCSR_CHNL0)
+
+/**
+ * Physical base address of Rx Buffer Descriptor Circular Buffer.
+ * Bit fields:
+ * 27-0: RBD CD physical base address [35:8], must be 256-byte aligned.
+ */
+#define FH49_RSCSR_CHNL0_RBDCB_BASE_REG (FH49_MEM_RSCSR_CHNL0 + 0x004)
+
+/**
+ * Rx write pointer (idx, really!).
+ * Bit fields:
+ * 11-0: Index of driver's most recent prepared-to-be-filled RBD, + 1.
+ * NOTE: For 256-entry circular buffer, use only bits [7:0].
+ */
+#define FH49_RSCSR_CHNL0_RBDCB_WPTR_REG (FH49_MEM_RSCSR_CHNL0 + 0x008)
+#define FH49_RSCSR_CHNL0_WPTR (FH49_RSCSR_CHNL0_RBDCB_WPTR_REG)
+
+/**
+ * Rx Config/Status Registers (RCSR)
+ * Rx Config Reg for channel 0 (only channel used)
+ *
+ * Driver must initialize FH49_MEM_RCSR_CHNL0_CONFIG_REG as follows for
+ * normal operation (see bit fields).
+ *
+ * Clearing FH49_MEM_RCSR_CHNL0_CONFIG_REG to 0 turns off Rx DMA.
+ * Driver should poll FH49_MEM_RSSR_RX_STATUS_REG for
+ * FH49_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (bit 24) before continuing.
+ *
+ * Bit fields:
+ * 31-30: Rx DMA channel enable: '00' off/pause, '01' pause at end of frame,
+ * '10' operate normally
+ * 29-24: reserved
+ * 23-20: # RBDs in circular buffer = 2^value; use "8" for 256 RBDs (normal),
+ * min "5" for 32 RBDs, max "12" for 4096 RBDs.
+ * 19-18: reserved
+ * 17-16: size of each receive buffer; '00' 4K (normal), '01' 8K,
+ * '10' 12K, '11' 16K.
+ * 15-14: reserved
+ * 13-12: IRQ destination; '00' none, '01' host driver (normal operation)
+ * 11- 4: timeout for closing Rx buffer and interrupting host (units 32 usec)
+ * typical value 0x10 (about 1/2 msec)
+ * 3- 0: reserved
+ */
+#define FH49_MEM_RCSR_LOWER_BOUND (FH49_MEM_LOWER_BOUND + 0xC00)
+#define FH49_MEM_RCSR_UPPER_BOUND (FH49_MEM_LOWER_BOUND + 0xCC0)
+#define FH49_MEM_RCSR_CHNL0 (FH49_MEM_RCSR_LOWER_BOUND)
+
+#define FH49_MEM_RCSR_CHNL0_CONFIG_REG (FH49_MEM_RCSR_CHNL0)
+
+#define FH49_RCSR_CHNL0_RX_CONFIG_RB_TIMEOUT_MSK (0x00000FF0) /* bits 4-11 */
+#define FH49_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_MSK (0x00001000) /* bits 12 */
+#define FH49_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK (0x00008000) /* bit 15 */
+#define FH49_RCSR_CHNL0_RX_CONFIG_RB_SIZE_MSK (0x00030000) /* bits 16-17 */
+#define FH49_RCSR_CHNL0_RX_CONFIG_RBDBC_SIZE_MSK (0x00F00000) /* bits 20-23 */
+#define FH49_RCSR_CHNL0_RX_CONFIG_DMA_CHNL_EN_MSK (0xC0000000) /* bits 30-31 */
+
+#define FH49_RCSR_RX_CONFIG_RBDCB_SIZE_POS (20)
+#define FH49_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS (4)
+#define RX_RB_TIMEOUT (0x10)
+
+#define FH49_RCSR_RX_CONFIG_CHNL_EN_PAUSE_VAL (0x00000000)
+#define FH49_RCSR_RX_CONFIG_CHNL_EN_PAUSE_EOF_VAL (0x40000000)
+#define FH49_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL (0x80000000)
+
+#define FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K (0x00000000)
+#define FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K (0x00010000)
+#define FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K (0x00020000)
+#define FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_16K (0x00030000)
+
+#define FH49_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY (0x00000004)
+#define FH49_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_NO_INT_VAL (0x00000000)
+#define FH49_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL (0x00001000)
+
+/**
+ * Rx Shared Status Registers (RSSR)
+ *
+ * After stopping Rx DMA channel (writing 0 to
+ * FH49_MEM_RCSR_CHNL0_CONFIG_REG), driver must poll
+ * FH49_MEM_RSSR_RX_STATUS_REG until Rx channel is idle.
+ *
+ * Bit fields:
+ * 24: 1 = Channel 0 is idle
+ *
+ * FH49_MEM_RSSR_SHARED_CTRL_REG and FH49_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV
+ * contain default values that should not be altered by the driver.
+ */
+#define FH49_MEM_RSSR_LOWER_BOUND (FH49_MEM_LOWER_BOUND + 0xC40)
+#define FH49_MEM_RSSR_UPPER_BOUND (FH49_MEM_LOWER_BOUND + 0xD00)
+
+#define FH49_MEM_RSSR_SHARED_CTRL_REG (FH49_MEM_RSSR_LOWER_BOUND)
+#define FH49_MEM_RSSR_RX_STATUS_REG (FH49_MEM_RSSR_LOWER_BOUND + 0x004)
+#define FH49_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV\
+ (FH49_MEM_RSSR_LOWER_BOUND + 0x008)
+
+#define FH49_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (0x01000000)
+
+#define FH49_MEM_TFDIB_REG1_ADDR_BITSHIFT 28
+
+/* TFDB Area - TFDs buffer table */
+#define FH49_MEM_TFDIB_DRAM_ADDR_LSB_MSK (0xFFFFFFFF)
+#define FH49_TFDIB_LOWER_BOUND (FH49_MEM_LOWER_BOUND + 0x900)
+#define FH49_TFDIB_UPPER_BOUND (FH49_MEM_LOWER_BOUND + 0x958)
+#define FH49_TFDIB_CTRL0_REG(_chnl) (FH49_TFDIB_LOWER_BOUND + 0x8 * (_chnl))
+#define FH49_TFDIB_CTRL1_REG(_chnl) (FH49_TFDIB_LOWER_BOUND + 0x8 * (_chnl) + 0x4)
+
+/**
+ * Transmit DMA Channel Control/Status Registers (TCSR)
+ *
+ * 4965 has one configuration register for each of 8 Tx DMA/FIFO channels
+ * supported in hardware (don't confuse these with the 16 Tx queues in DRAM,
+ * which feed the DMA/FIFO channels); config regs are separated by 0x20 bytes.
+ *
+ * To use a Tx DMA channel, driver must initialize its
+ * FH49_TCSR_CHNL_TX_CONFIG_REG(chnl) with:
+ *
+ * FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
+ * FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL
+ *
+ * All other bits should be 0.
+ *
+ * Bit fields:
+ * 31-30: Tx DMA channel enable: '00' off/pause, '01' pause at end of frame,
+ * '10' operate normally
+ * 29- 4: Reserved, set to "0"
+ * 3: Enable internal DMA requests (1, normal operation), disable (0)
+ * 2- 0: Reserved, set to "0"
+ */
+#define FH49_TCSR_LOWER_BOUND (FH49_MEM_LOWER_BOUND + 0xD00)
+#define FH49_TCSR_UPPER_BOUND (FH49_MEM_LOWER_BOUND + 0xE60)
+
+/* Find Control/Status reg for given Tx DMA/FIFO channel */
+#define FH49_TCSR_CHNL_NUM (7)
+#define FH50_TCSR_CHNL_NUM (8)
+
+/* TCSR: tx_config register values */
+#define FH49_TCSR_CHNL_TX_CONFIG_REG(_chnl) \
+ (FH49_TCSR_LOWER_BOUND + 0x20 * (_chnl))
+#define FH49_TCSR_CHNL_TX_CREDIT_REG(_chnl) \
+ (FH49_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x4)
+#define FH49_TCSR_CHNL_TX_BUF_STS_REG(_chnl) \
+ (FH49_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x8)
+
+#define FH49_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF (0x00000000)
+#define FH49_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_DRV (0x00000001)
+
+#define FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE (0x00000000)
+#define FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE (0x00000008)
+
+#define FH49_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_NOINT (0x00000000)
+#define FH49_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD (0x00100000)
+#define FH49_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD (0x00200000)
+
+#define FH49_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT (0x00000000)
+#define FH49_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_ENDTFD (0x00400000)
+#define FH49_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_IFTFD (0x00800000)
+
+#define FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE (0x00000000)
+#define FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE_EOF (0x40000000)
+#define FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE (0x80000000)
+
+#define FH49_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_EMPTY (0x00000000)
+#define FH49_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_WAIT (0x00002000)
+#define FH49_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID (0x00000003)
+
+#define FH49_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM (20)
+#define FH49_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX (12)
+
+/**
+ * Tx Shared Status Registers (TSSR)
+ *
+ * After stopping Tx DMA channel (writing 0 to
+ * FH49_TCSR_CHNL_TX_CONFIG_REG(chnl)), driver must poll
+ * FH49_TSSR_TX_STATUS_REG until selected Tx channel is idle
+ * (channel's buffers empty | no pending requests).
+ *
+ * Bit fields:
+ * 31-24: 1 = Channel buffers empty (channel 7:0)
+ * 23-16: 1 = No pending requests (channel 7:0)
+ */
+#define FH49_TSSR_LOWER_BOUND (FH49_MEM_LOWER_BOUND + 0xEA0)
+#define FH49_TSSR_UPPER_BOUND (FH49_MEM_LOWER_BOUND + 0xEC0)
+
+#define FH49_TSSR_TX_STATUS_REG (FH49_TSSR_LOWER_BOUND + 0x010)
+
+/**
+ * Bit fields for TSSR(Tx Shared Status & Control) error status register:
+ * 31: Indicates an address error when accessed to internal memory
+ * uCode/driver must write "1" in order to clear this flag
+ * 30: Indicates that Host did not send the expected number of dwords to FH
+ * uCode/driver must write "1" in order to clear this flag
+ * 16-9:Each status bit is for one channel. Indicates that an (Error) ActDMA
+ * command was received from the scheduler while the TRB was already full
+ * with previous command
+ * uCode/driver must write "1" in order to clear this flag
+ * 7-0: Each status bit indicates a channel's TxCredit error. When an error
+ * bit is set, it indicates that the FH has received a full indication
+ * from the RTC TxFIFO and the current value of the TxCredit counter was
+ * not equal to zero. This mean that the credit mechanism was not
+ * synchronized to the TxFIFO status
+ * uCode/driver must write "1" in order to clear this flag
+ */
+#define FH49_TSSR_TX_ERROR_REG (FH49_TSSR_LOWER_BOUND + 0x018)
+
+#define FH49_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_chnl) ((1 << (_chnl)) << 16)
+
+/* Tx service channels */
+#define FH49_SRVC_CHNL (9)
+#define FH49_SRVC_LOWER_BOUND (FH49_MEM_LOWER_BOUND + 0x9C8)
+#define FH49_SRVC_UPPER_BOUND (FH49_MEM_LOWER_BOUND + 0x9D0)
+#define FH49_SRVC_CHNL_SRAM_ADDR_REG(_chnl) \
+ (FH49_SRVC_LOWER_BOUND + ((_chnl) - 9) * 0x4)
+
+#define FH49_TX_CHICKEN_BITS_REG (FH49_MEM_LOWER_BOUND + 0xE98)
+/* Instruct FH to increment the retry count of a packet when
+ * it is brought from the memory to TX-FIFO
+ */
+#define FH49_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN (0x00000002)
+
+/* Keep Warm Size */
+#define IL_KW_SIZE 0x1000 /* 4k */
+
+#endif /* __il_4965_h__ */
diff --git a/drivers/net/wireless/iwlegacy/Kconfig b/drivers/net/wireless/iwlegacy/Kconfig
index aef65cd4766..05bd375cb84 100644
--- a/drivers/net/wireless/iwlegacy/Kconfig
+++ b/drivers/net/wireless/iwlegacy/Kconfig
@@ -1,4 +1,4 @@
-config IWLWIFI_LEGACY
+config IWLEGACY
tristate
select FW_LOADER
select NEW_LEDS
@@ -7,13 +7,13 @@ config IWLWIFI_LEGACY
select MAC80211_LEDS
menu "Debugging Options"
- depends on IWLWIFI_LEGACY
+ depends on IWLEGACY
-config IWLWIFI_LEGACY_DEBUG
- bool "Enable full debugging output in 4965 and 3945 drivers"
- depends on IWLWIFI_LEGACY
+config IWLEGACY_DEBUG
+ bool "Enable full debugging output in iwlegacy (iwl 3945/4965) drivers"
+ depends on IWLEGACY
---help---
- This option will enable debug tracing output for the iwlwifilegacy
+ This option will enable debug tracing output for the iwlegacy
drivers.
This will result in the kernel module being ~100k larger. You can
@@ -29,43 +29,26 @@ config IWLWIFI_LEGACY_DEBUG
% echo 0x43fff > /sys/class/net/wlan0/device/debug_level
You can find the list of debug mask values in:
- drivers/net/wireless/iwlwifilegacy/iwl-debug.h
+ drivers/net/wireless/iwlegacy/common.h
If this is your first time using this driver, you should say Y here
as the debug information can assist others in helping you resolve
any problems you may encounter.
-config IWLWIFI_LEGACY_DEBUGFS
- bool "4965 and 3945 debugfs support"
- depends on IWLWIFI_LEGACY && MAC80211_DEBUGFS
+config IWLEGACY_DEBUGFS
+ bool "iwlegacy (iwl 3945/4965) debugfs support"
+ depends on IWLEGACY && MAC80211_DEBUGFS
---help---
- Enable creation of debugfs files for the iwlwifilegacy drivers. This
+ Enable creation of debugfs files for the iwlegacy drivers. This
is a low-impact option that allows getting insight into the
driver's state at runtime.
-config IWLWIFI_LEGACY_DEVICE_TRACING
- bool "iwlwifilegacy legacy device access tracing"
- depends on IWLWIFI_LEGACY
- depends on EVENT_TRACING
- help
- Say Y here to trace all commands, including TX frames and IO
- accesses, sent to the device. If you say yes, iwlwifilegacy will
- register with the ftrace framework for event tracing and dump
- all this information to the ringbuffer, you may need to
- increase the ringbuffer size. See the ftrace documentation
- for more information.
-
- When tracing is not enabled, this option still has some
- (though rather small) overhead.
-
- If unsure, say Y so we can help you better when problems
- occur.
endmenu
config IWL4965
tristate "Intel Wireless WiFi 4965AGN (iwl4965)"
depends on PCI && MAC80211
- select IWLWIFI_LEGACY
+ select IWLEGACY
---help---
This option enables support for
@@ -93,7 +76,7 @@ config IWL4965
config IWL3945
tristate "Intel PRO/Wireless 3945ABG/BG Network Connection (iwl3945)"
depends on PCI && MAC80211
- select IWLWIFI_LEGACY
+ select IWLEGACY
---help---
Select to build the driver supporting the:
diff --git a/drivers/net/wireless/iwlegacy/Makefile b/drivers/net/wireless/iwlegacy/Makefile
index d56aeb38c21..c985a01a073 100644
--- a/drivers/net/wireless/iwlegacy/Makefile
+++ b/drivers/net/wireless/iwlegacy/Makefile
@@ -1,25 +1,17 @@
-obj-$(CONFIG_IWLWIFI_LEGACY) += iwl-legacy.o
-iwl-legacy-objs := iwl-core.o iwl-eeprom.o iwl-hcmd.o iwl-power.o
-iwl-legacy-objs += iwl-rx.o iwl-tx.o iwl-sta.o
-iwl-legacy-objs += iwl-scan.o iwl-led.o
-iwl-legacy-$(CONFIG_IWLWIFI_LEGACY_DEBUGFS) += iwl-debugfs.o
-iwl-legacy-$(CONFIG_IWLWIFI_LEGACY_DEVICE_TRACING) += iwl-devtrace.o
+obj-$(CONFIG_IWLEGACY) += iwlegacy.o
+iwlegacy-objs := common.o
+iwlegacy-$(CONFIG_IWLEGACY_DEBUGFS) += debug.o
-iwl-legacy-objs += $(iwl-legacy-m)
-
-CFLAGS_iwl-devtrace.o := -I$(src)
+iwlegacy-objs += $(iwlegacy-m)
# 4965
obj-$(CONFIG_IWL4965) += iwl4965.o
-iwl4965-objs := iwl-4965.o iwl4965-base.o iwl-4965-rs.o iwl-4965-led.o
-iwl4965-objs += iwl-4965-ucode.o iwl-4965-tx.o
-iwl4965-objs += iwl-4965-lib.o iwl-4965-rx.o iwl-4965-calib.o
-iwl4965-objs += iwl-4965-sta.o iwl-4965-eeprom.o
-iwl4965-$(CONFIG_IWLWIFI_LEGACY_DEBUGFS) += iwl-4965-debugfs.o
+iwl4965-objs := 4965.o 4965-mac.o 4965-rs.o 4965-calib.o
+iwl4965-$(CONFIG_IWLEGACY_DEBUGFS) += 4965-debug.o
# 3945
obj-$(CONFIG_IWL3945) += iwl3945.o
-iwl3945-objs := iwl3945-base.o iwl-3945.o iwl-3945-rs.o iwl-3945-led.o
-iwl3945-$(CONFIG_IWLWIFI_LEGACY_DEBUGFS) += iwl-3945-debugfs.o
+iwl3945-objs := 3945-mac.o 3945.o 3945-rs.o
+iwl3945-$(CONFIG_IWLEGACY_DEBUGFS) += 3945-debug.o
ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/iwlegacy/iwl-commands.h b/drivers/net/wireless/iwlegacy/commands.h
index 89904054473..25dd7d28d02 100644
--- a/drivers/net/wireless/iwlegacy/iwl-commands.h
+++ b/drivers/net/wireless/iwlegacy/commands.h
@@ -60,100 +60,96 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
-/*
- * Please use this file (iwl-commands.h) only for uCode API definitions.
- * Please use iwl-xxxx-hw.h for hardware-related definitions.
- * Please use iwl-dev.h for driver implementation definitions.
- */
-#ifndef __iwl_legacy_commands_h__
-#define __iwl_legacy_commands_h__
+#ifndef __il_commands_h__
+#define __il_commands_h__
-struct iwl_priv;
+#include <linux/ieee80211.h>
-/* uCode version contains 4 values: Major/Minor/API/Serial */
-#define IWL_UCODE_MAJOR(ver) (((ver) & 0xFF000000) >> 24)
-#define IWL_UCODE_MINOR(ver) (((ver) & 0x00FF0000) >> 16)
-#define IWL_UCODE_API(ver) (((ver) & 0x0000FF00) >> 8)
-#define IWL_UCODE_SERIAL(ver) ((ver) & 0x000000FF)
+struct il_priv;
+/* uCode version contains 4 values: Major/Minor/API/Serial */
+#define IL_UCODE_MAJOR(ver) (((ver) & 0xFF000000) >> 24)
+#define IL_UCODE_MINOR(ver) (((ver) & 0x00FF0000) >> 16)
+#define IL_UCODE_API(ver) (((ver) & 0x0000FF00) >> 8)
+#define IL_UCODE_SERIAL(ver) ((ver) & 0x000000FF)
/* Tx rates */
-#define IWL_CCK_RATES 4
-#define IWL_OFDM_RATES 8
-#define IWL_MAX_RATES (IWL_CCK_RATES + IWL_OFDM_RATES)
+#define IL_CCK_RATES 4
+#define IL_OFDM_RATES 8
+#define IL_MAX_RATES (IL_CCK_RATES + IL_OFDM_RATES)
enum {
- REPLY_ALIVE = 0x1,
- REPLY_ERROR = 0x2,
+ N_ALIVE = 0x1,
+ N_ERROR = 0x2,
/* RXON and QOS commands */
- REPLY_RXON = 0x10,
- REPLY_RXON_ASSOC = 0x11,
- REPLY_QOS_PARAM = 0x13,
- REPLY_RXON_TIMING = 0x14,
+ C_RXON = 0x10,
+ C_RXON_ASSOC = 0x11,
+ C_QOS_PARAM = 0x13,
+ C_RXON_TIMING = 0x14,
/* Multi-Station support */
- REPLY_ADD_STA = 0x18,
- REPLY_REMOVE_STA = 0x19,
+ C_ADD_STA = 0x18,
+ C_REM_STA = 0x19,
/* Security */
- REPLY_WEPKEY = 0x20,
+ C_WEPKEY = 0x20,
/* RX, TX, LEDs */
- REPLY_3945_RX = 0x1b, /* 3945 only */
- REPLY_TX = 0x1c,
- REPLY_RATE_SCALE = 0x47, /* 3945 only */
- REPLY_LEDS_CMD = 0x48,
- REPLY_TX_LINK_QUALITY_CMD = 0x4e, /* for 4965 and up */
+ N_3945_RX = 0x1b, /* 3945 only */
+ C_TX = 0x1c,
+ C_RATE_SCALE = 0x47, /* 3945 only */
+ C_LEDS = 0x48,
+ C_TX_LINK_QUALITY_CMD = 0x4e, /* for 4965 */
/* 802.11h related */
- REPLY_CHANNEL_SWITCH = 0x72,
- CHANNEL_SWITCH_NOTIFICATION = 0x73,
- REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74,
- SPECTRUM_MEASURE_NOTIFICATION = 0x75,
+ C_CHANNEL_SWITCH = 0x72,
+ N_CHANNEL_SWITCH = 0x73,
+ C_SPECTRUM_MEASUREMENT = 0x74,
+ N_SPECTRUM_MEASUREMENT = 0x75,
/* Power Management */
- POWER_TABLE_CMD = 0x77,
- PM_SLEEP_NOTIFICATION = 0x7A,
- PM_DEBUG_STATISTIC_NOTIFIC = 0x7B,
+ C_POWER_TBL = 0x77,
+ N_PM_SLEEP = 0x7A,
+ N_PM_DEBUG_STATS = 0x7B,
/* Scan commands and notifications */
- REPLY_SCAN_CMD = 0x80,
- REPLY_SCAN_ABORT_CMD = 0x81,
- SCAN_START_NOTIFICATION = 0x82,
- SCAN_RESULTS_NOTIFICATION = 0x83,
- SCAN_COMPLETE_NOTIFICATION = 0x84,
+ C_SCAN = 0x80,
+ C_SCAN_ABORT = 0x81,
+ N_SCAN_START = 0x82,
+ N_SCAN_RESULTS = 0x83,
+ N_SCAN_COMPLETE = 0x84,
/* IBSS/AP commands */
- BEACON_NOTIFICATION = 0x90,
- REPLY_TX_BEACON = 0x91,
+ N_BEACON = 0x90,
+ C_TX_BEACON = 0x91,
/* Miscellaneous commands */
- REPLY_TX_PWR_TABLE_CMD = 0x97,
+ C_TX_PWR_TBL = 0x97,
/* Bluetooth device coexistence config command */
- REPLY_BT_CONFIG = 0x9b,
+ C_BT_CONFIG = 0x9b,
/* Statistics */
- REPLY_STATISTICS_CMD = 0x9c,
- STATISTICS_NOTIFICATION = 0x9d,
+ C_STATS = 0x9c,
+ N_STATS = 0x9d,
/* RF-KILL commands and notifications */
- CARD_STATE_NOTIFICATION = 0xa1,
+ N_CARD_STATE = 0xa1,
/* Missed beacons notification */
- MISSED_BEACONS_NOTIFICATION = 0xa2,
+ N_MISSED_BEACONS = 0xa2,
- REPLY_CT_KILL_CONFIG_CMD = 0xa4,
- SENSITIVITY_CMD = 0xa8,
- REPLY_PHY_CALIBRATION_CMD = 0xb0,
- REPLY_RX_PHY_CMD = 0xc0,
- REPLY_RX_MPDU_CMD = 0xc1,
- REPLY_RX = 0xc3,
- REPLY_COMPRESSED_BA = 0xc5,
+ C_CT_KILL_CONFIG = 0xa4,
+ C_SENSITIVITY = 0xa8,
+ C_PHY_CALIBRATION = 0xb0,
+ N_RX_PHY = 0xc0,
+ N_RX_MPDU = 0xc1,
+ N_RX = 0xc3,
+ N_COMPRESSED_BA = 0xc5,
- REPLY_MAX = 0xff
+ IL_CN_MAX = 0xff
};
/******************************************************************************
@@ -163,25 +159,25 @@ enum {
*
*****************************************************************************/
-/* iwl_cmd_header flags value */
-#define IWL_CMD_FAILED_MSK 0x40
+/* il_cmd_header flags value */
+#define IL_CMD_FAILED_MSK 0x40
#define SEQ_TO_QUEUE(s) (((s) >> 8) & 0x1f)
#define QUEUE_TO_SEQ(q) (((q) & 0x1f) << 8)
-#define SEQ_TO_INDEX(s) ((s) & 0xff)
-#define INDEX_TO_SEQ(i) ((i) & 0xff)
+#define SEQ_TO_IDX(s) ((s) & 0xff)
+#define IDX_TO_SEQ(i) ((i) & 0xff)
#define SEQ_HUGE_FRAME cpu_to_le16(0x4000)
#define SEQ_RX_FRAME cpu_to_le16(0x8000)
/**
- * struct iwl_cmd_header
+ * struct il_cmd_header
*
* This header format appears in the beginning of each command sent from the
* driver, and each response/notification received from uCode.
*/
-struct iwl_cmd_header {
- u8 cmd; /* Command ID: REPLY_RXON, etc. */
- u8 flags; /* 0:5 reserved, 6 abort, 7 internal */
+struct il_cmd_header {
+ u8 cmd; /* Command ID: C_RXON, etc. */
+ u8 flags; /* 0:5 reserved, 6 abort, 7 internal */
/*
* The driver sets up the sequence number to values of its choosing.
* uCode does not use this value, but passes it back to the driver
@@ -192,29 +188,28 @@ struct iwl_cmd_header {
* There is one exception: uCode sets bit 15 when it originates
* the response/notification, i.e. when the response/notification
* is not a direct response to a command sent by the driver. For
- * example, uCode issues REPLY_3945_RX when it sends a received frame
+ * example, uCode issues N_3945_RX when it sends a received frame
* to the driver; it is not a direct response to any driver command.
*
* The Linux driver uses the following format:
*
- * 0:7 tfd index - position within TX queue
- * 8:12 TX queue id
- * 13 reserved
- * 14 huge - driver sets this to indicate command is in the
- * 'huge' storage at the end of the command buffers
- * 15 unsolicited RX or uCode-originated notification
- */
+ * 0:7 tfd idx - position within TX queue
+ * 8:12 TX queue id
+ * 13 reserved
+ * 14 huge - driver sets this to indicate command is in the
+ * 'huge' storage at the end of the command buffers
+ * 15 unsolicited RX or uCode-originated notification
+ */
__le16 sequence;
/* command or response/notification data follows immediately */
u8 data[0];
} __packed;
-
/**
- * struct iwl3945_tx_power
+ * struct il3945_tx_power
*
- * Used in REPLY_TX_PWR_TABLE_CMD, REPLY_SCAN_CMD, REPLY_CHANNEL_SWITCH
+ * Used in C_TX_PWR_TBL, C_SCAN, C_CHANNEL_SWITCH
*
* Each entry contains two values:
* 1) DSP gain (or sometimes called DSP attenuation). This is a fine-grained
@@ -223,21 +218,21 @@ struct iwl_cmd_header {
* 2) Radio gain. This sets the analog gain of the radio Tx path.
* It is a coarser setting, and behaves in a logarithmic (dB) fashion.
*
- * Driver obtains values from struct iwl3945_tx_power power_gain_table[][].
+ * Driver obtains values from struct il3945_tx_power power_gain_table[][].
*/
-struct iwl3945_tx_power {
+struct il3945_tx_power {
u8 tx_gain; /* gain for analog radio */
u8 dsp_atten; /* gain for DSP */
} __packed;
/**
- * struct iwl3945_power_per_rate
+ * struct il3945_power_per_rate
*
- * Used in REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH
+ * Used in C_TX_PWR_TBL, C_CHANNEL_SWITCH
*/
-struct iwl3945_power_per_rate {
+struct il3945_power_per_rate {
u8 rate; /* plcp */
- struct iwl3945_tx_power tpc;
+ struct il3945_tx_power tpc;
u8 reserved;
} __packed;
@@ -245,10 +240,10 @@ struct iwl3945_power_per_rate {
* iwl4965 rate_n_flags bit fields
*
* rate_n_flags format is used in following iwl4965 commands:
- * REPLY_RX (response only)
- * REPLY_RX_MPDU (response only)
- * REPLY_TX (both command and response)
- * REPLY_TX_LINK_QUALITY_CMD
+ * N_RX (response only)
+ * N_RX_MPDU (response only)
+ * C_TX (both command and response)
+ * C_TX_LINK_QUALITY_CMD
*
* High-throughput (HT) rate format for bits 7:0 (bit 8 must be "1"):
* 2-0: 0) 6 Mbps
@@ -326,17 +321,17 @@ struct iwl3945_power_per_rate {
#define RATE_MCS_ANT_ABC_MSK (RATE_MCS_ANT_AB_MSK | RATE_MCS_ANT_C_MSK)
#define RATE_ANT_NUM 3
-#define POWER_TABLE_NUM_ENTRIES 33
-#define POWER_TABLE_NUM_HT_OFDM_ENTRIES 32
-#define POWER_TABLE_CCK_ENTRY 32
+#define POWER_TBL_NUM_ENTRIES 33
+#define POWER_TBL_NUM_HT_OFDM_ENTRIES 32
+#define POWER_TBL_CCK_ENTRY 32
-#define IWL_PWR_NUM_HT_OFDM_ENTRIES 24
-#define IWL_PWR_CCK_ENTRIES 2
+#define IL_PWR_NUM_HT_OFDM_ENTRIES 24
+#define IL_PWR_CCK_ENTRIES 2
/**
- * union iwl4965_tx_power_dual_stream
+ * union il4965_tx_power_dual_stream
*
- * Host format used for REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH
+ * Host format used for C_TX_PWR_TBL, C_CHANNEL_SWITCH
* Use __le32 version (struct tx_power_dual_stream) when building command.
*
* Driver provides radio gain and DSP attenuation settings to device in pairs,
@@ -347,9 +342,9 @@ struct iwl3945_power_per_rate {
* For MIMO rates, one value may be different from the other,
* in order to balance the Tx output between the two transmitters.
*
- * See more details in doc for TXPOWER in iwl-4965-hw.h.
+ * See more details in doc for TXPOWER in 4965.h.
*/
-union iwl4965_tx_power_dual_stream {
+union il4965_tx_power_dual_stream {
struct {
u8 radio_tx_gain[2];
u8 dsp_predis_atten[2];
@@ -360,21 +355,21 @@ union iwl4965_tx_power_dual_stream {
/**
* struct tx_power_dual_stream
*
- * Table entries in REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH
+ * Table entries in C_TX_PWR_TBL, C_CHANNEL_SWITCH
*
- * Same format as iwl_tx_power_dual_stream, but __le32
+ * Same format as il_tx_power_dual_stream, but __le32
*/
struct tx_power_dual_stream {
__le32 dw;
} __packed;
/**
- * struct iwl4965_tx_power_db
+ * struct il4965_tx_power_db
*
- * Entire table within REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH
+ * Entire table within C_TX_PWR_TBL, C_CHANNEL_SWITCH
*/
-struct iwl4965_tx_power_db {
- struct tx_power_dual_stream power_tbl[POWER_TABLE_NUM_ENTRIES];
+struct il4965_tx_power_db {
+ struct tx_power_dual_stream power_tbl[POWER_TBL_NUM_ENTRIES];
} __packed;
/******************************************************************************
@@ -387,7 +382,7 @@ struct iwl4965_tx_power_db {
#define INITIALIZE_SUBTYPE (9)
/*
- * ("Initialize") REPLY_ALIVE = 0x1 (response only, not a command)
+ * ("Initialize") N_ALIVE = 0x1 (response only, not a command)
*
* uCode issues this "initialize alive" notification once the initialization
* uCode image has completed its work, and is ready to load the runtime image.
@@ -410,7 +405,7 @@ struct iwl4965_tx_power_db {
* 3) Tx gain compensation to balance 4965's 2 Tx chains for MIMO operation,
* for each of 5 frequency ranges.
*/
-struct iwl_init_alive_resp {
+struct il_init_alive_resp {
u8 ucode_minor;
u8 ucode_major;
__le16 reserved1;
@@ -433,9 +428,8 @@ struct iwl_init_alive_resp {
* 2 Tx chains */
} __packed;
-
/**
- * REPLY_ALIVE = 0x1 (response only, not a command)
+ * N_ALIVE = 0x1 (response only, not a command)
*
* uCode issues this "alive" notification once the runtime image is ready
* to receive commands from the driver. This is the *second* "alive"
@@ -454,7 +448,7 @@ struct iwl_init_alive_resp {
* __le32 log_size; log capacity (in number of entries)
* __le32 type; (1) timestamp with each entry, (0) no timestamp
* __le32 wraps; # times uCode has wrapped to top of circular buffer
- * __le32 write_index; next circular buffer entry that uCode would fill
+ * __le32 write_idx; next circular buffer entry that uCode would fill
*
* The header is followed by the circular buffer of log entries. Entries
* with timestamps have the following format:
@@ -511,13 +505,13 @@ struct iwl_init_alive_resp {
* The Linux driver can print both logs to the system log when a uCode error
* occurs.
*/
-struct iwl_alive_resp {
+struct il_alive_resp {
u8 ucode_minor;
u8 ucode_major;
__le16 reserved1;
u8 sw_rev[8];
u8 ver_type;
- u8 ver_subtype; /* not "9" for runtime alive */
+ u8 ver_subtype; /* not "9" for runtime alive */
__le16 reserved2;
__le32 log_event_table_ptr; /* SRAM address for event log */
__le32 error_event_table_ptr; /* SRAM address for error log */
@@ -526,9 +520,9 @@ struct iwl_alive_resp {
} __packed;
/*
- * REPLY_ERROR = 0x2 (response only, not a command)
+ * N_ERROR = 0x2 (response only, not a command)
*/
-struct iwl_error_resp {
+struct il_error_resp {
__le32 error_type;
u8 cmd_id;
u8 reserved1;
@@ -554,7 +548,6 @@ enum {
RXON_DEV_TYPE_SNIFFER = 6,
};
-
#define RXON_RX_CHAIN_DRIVER_FORCE_MSK cpu_to_le16(0x1 << 0)
#define RXON_RX_CHAIN_DRIVER_FORCE_POS (0)
#define RXON_RX_CHAIN_VALID_MSK cpu_to_le16(0x7 << 1)
@@ -593,7 +586,6 @@ enum {
* (according to ON_AIR deassertion) */
#define RXON_FLG_TSF2HOST_MSK cpu_to_le32(1 << 15)
-
/* HT flags */
#define RXON_FLG_CTRL_CHANNEL_LOC_POS (22)
#define RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK cpu_to_le32(0x1 << 22)
@@ -640,7 +632,7 @@ enum {
#define RXON_FILTER_BCON_AWARE_MSK cpu_to_le32(1 << 6)
/**
- * REPLY_RXON = 0x10 (command, has simple generic response)
+ * C_RXON = 0x10 (command, has simple generic response)
*
* RXON tunes the radio tuner to a service channel, and sets up a number
* of parameters that are used primarily for Rx, but also for Tx operations.
@@ -653,11 +645,11 @@ enum {
* channel.
*
* NOTE: All RXONs wipe clean the internal txpower table. Driver must
- * issue a new REPLY_TX_PWR_TABLE_CMD after each REPLY_RXON (0x10),
+ * issue a new C_TX_PWR_TBL after each C_RXON (0x10),
* regardless of whether RXON_FILTER_ASSOC_MSK is set.
*/
-struct iwl3945_rxon_cmd {
+struct il3945_rxon_cmd {
u8 node_addr[6];
__le16 reserved1;
u8 bssid_addr[6];
@@ -676,7 +668,7 @@ struct iwl3945_rxon_cmd {
__le16 reserved5;
} __packed;
-struct iwl4965_rxon_cmd {
+struct il4965_rxon_cmd {
u8 node_addr[6];
__le16 reserved1;
u8 bssid_addr[6];
@@ -699,7 +691,7 @@ struct iwl4965_rxon_cmd {
/* Create a common rxon cmd which will be typecast into the 3945 or 4965
* specific rxon cmd, depending on where it is called from.
*/
-struct iwl_legacy_rxon_cmd {
+struct il_rxon_cmd {
u8 node_addr[6];
__le16 reserved1;
u8 bssid_addr[6];
@@ -721,11 +713,10 @@ struct iwl_legacy_rxon_cmd {
u8 reserved5;
} __packed;
-
/*
- * REPLY_RXON_ASSOC = 0x11 (command, has simple generic response)
+ * C_RXON_ASSOC = 0x11 (command, has simple generic response)
*/
-struct iwl3945_rxon_assoc_cmd {
+struct il3945_rxon_assoc_cmd {
__le32 flags;
__le32 filter_flags;
u8 ofdm_basic_rates;
@@ -733,7 +724,7 @@ struct iwl3945_rxon_assoc_cmd {
__le16 reserved;
} __packed;
-struct iwl4965_rxon_assoc_cmd {
+struct il4965_rxon_assoc_cmd {
__le32 flags;
__le32 filter_flags;
u8 ofdm_basic_rates;
@@ -744,17 +735,17 @@ struct iwl4965_rxon_assoc_cmd {
__le16 reserved;
} __packed;
-#define IWL_CONN_MAX_LISTEN_INTERVAL 10
-#define IWL_MAX_UCODE_BEACON_INTERVAL 4 /* 4096 */
-#define IWL39_MAX_UCODE_BEACON_INTERVAL 1 /* 1024 */
+#define IL_CONN_MAX_LISTEN_INTERVAL 10
+#define IL_MAX_UCODE_BEACON_INTERVAL 4 /* 4096 */
+#define IL39_MAX_UCODE_BEACON_INTERVAL 1 /* 1024 */
/*
- * REPLY_RXON_TIMING = 0x14 (command, has simple generic response)
+ * C_RXON_TIMING = 0x14 (command, has simple generic response)
*/
-struct iwl_rxon_time_cmd {
+struct il_rxon_time_cmd {
__le64 timestamp;
__le16 beacon_interval;
- __le16 atim_window;
+ __le16 atim_win;
__le32 beacon_init_val;
__le16 listen_interval;
u8 dtim_period;
@@ -762,32 +753,32 @@ struct iwl_rxon_time_cmd {
} __packed;
/*
- * REPLY_CHANNEL_SWITCH = 0x72 (command, has simple generic response)
+ * C_CHANNEL_SWITCH = 0x72 (command, has simple generic response)
*/
-struct iwl3945_channel_switch_cmd {
+struct il3945_channel_switch_cmd {
u8 band;
u8 expect_beacon;
__le16 channel;
__le32 rxon_flags;
__le32 rxon_filter_flags;
__le32 switch_time;
- struct iwl3945_power_per_rate power[IWL_MAX_RATES];
+ struct il3945_power_per_rate power[IL_MAX_RATES];
} __packed;
-struct iwl4965_channel_switch_cmd {
+struct il4965_channel_switch_cmd {
u8 band;
u8 expect_beacon;
__le16 channel;
__le32 rxon_flags;
__le32 rxon_filter_flags;
__le32 switch_time;
- struct iwl4965_tx_power_db tx_power;
+ struct il4965_tx_power_db tx_power;
} __packed;
/*
- * CHANNEL_SWITCH_NOTIFICATION = 0x73 (notification only, not a command)
+ * N_CHANNEL_SWITCH = 0x73 (notification only, not a command)
*/
-struct iwl_csa_notification {
+struct il_csa_notification {
__le16 band;
__le16 channel;
__le32 status; /* 0 - OK, 1 - fail */
@@ -800,22 +791,22 @@ struct iwl_csa_notification {
*****************************************************************************/
/**
- * struct iwl_ac_qos -- QOS timing params for REPLY_QOS_PARAM
- * One for each of 4 EDCA access categories in struct iwl_qosparam_cmd
+ * struct il_ac_qos -- QOS timing params for C_QOS_PARAM
+ * One for each of 4 EDCA access categories in struct il_qosparam_cmd
*
- * @cw_min: Contention window, start value in numbers of slots.
+ * @cw_min: Contention win, start value in numbers of slots.
* Should be a power-of-2, minus 1. Device's default is 0x0f.
- * @cw_max: Contention window, max value in numbers of slots.
+ * @cw_max: Contention win, max value in numbers of slots.
* Should be a power-of-2, minus 1. Device's default is 0x3f.
* @aifsn: Number of slots in Arbitration Interframe Space (before
* performing random backoff timing prior to Tx). Device default 1.
* @edca_txop: Length of Tx opportunity, in uSecs. Device default is 0.
*
- * Device will automatically increase contention window by (2*CW) + 1 for each
+ * Device will automatically increase contention win by (2*CW) + 1 for each
* transmission retry. Device uses cw_max as a bit mask, ANDed with new CW
* value, to cap the CW value.
*/
-struct iwl_ac_qos {
+struct il_ac_qos {
__le16 cw_min;
__le16 cw_max;
u8 aifsn;
@@ -832,14 +823,14 @@ struct iwl_ac_qos {
#define AC_NUM 4
/*
- * REPLY_QOS_PARAM = 0x13 (command, has simple generic response)
+ * C_QOS_PARAM = 0x13 (command, has simple generic response)
*
* This command sets up timings for each of the 4 prioritized EDCA Tx FIFOs
* 0: Background, 1: Best Effort, 2: Video, 3: Voice.
*/
-struct iwl_qosparam_cmd {
+struct il_qosparam_cmd {
__le32 qos_flags;
- struct iwl_ac_qos ac[AC_NUM];
+ struct il_ac_qos ac[AC_NUM];
} __packed;
/******************************************************************************
@@ -852,15 +843,15 @@ struct iwl_qosparam_cmd {
*/
/* Special, dedicated locations within device's station table */
-#define IWL_AP_ID 0
-#define IWL_STA_ID 2
-#define IWL3945_BROADCAST_ID 24
-#define IWL3945_STATION_COUNT 25
-#define IWL4965_BROADCAST_ID 31
-#define IWL4965_STATION_COUNT 32
+#define IL_AP_ID 0
+#define IL_STA_ID 2
+#define IL3945_BROADCAST_ID 24
+#define IL3945_STATION_COUNT 25
+#define IL4965_BROADCAST_ID 31
+#define IL4965_STATION_COUNT 32
-#define IWL_STATION_COUNT 32 /* MAX(3945,4965)*/
-#define IWL_INVALID_STATION 255
+#define IL_STATION_COUNT 32 /* MAX(3945,4965) */
+#define IL_INVALID_STATION 255
#define STA_FLG_TX_RATE_MSK cpu_to_le32(1 << 2)
#define STA_FLG_PWR_SAVE_MSK cpu_to_le32(1 << 8)
@@ -901,11 +892,11 @@ struct iwl_qosparam_cmd {
#define STA_MODIFY_DELBA_TID_MSK 0x10
#define STA_MODIFY_SLEEP_TX_COUNT_MSK 0x20
-/* Receiver address (actually, Rx station's index into station table),
+/* Receiver address (actually, Rx station's idx into station table),
* combined with Traffic ID (QOS priority), in format used by Tx Scheduler */
#define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid))
-struct iwl4965_keyinfo {
+struct il4965_keyinfo {
__le16 key_flags;
u8 tkip_rx_tsc_byte2; /* TSC[2] for key mix ph1 detection */
u8 reserved1;
@@ -918,12 +909,12 @@ struct iwl4965_keyinfo {
/**
* struct sta_id_modify
* @addr[ETH_ALEN]: station's MAC address
- * @sta_id: index of station in uCode's station table
+ * @sta_id: idx of station in uCode's station table
* @modify_mask: STA_MODIFY_*, 1: modify, 0: don't change
*
- * Driver selects unused table index when adding new station,
- * or the index to a pre-existing station entry when modifying that station.
- * Some indexes have special purposes (IWL_AP_ID, index 0, is for AP).
+ * Driver selects unused table idx when adding new station,
+ * or the idx to a pre-existing station entry when modifying that station.
+ * Some idxes have special purposes (IL_AP_ID, idx 0, is for AP).
*
* modify_mask flags select which parameters to modify vs. leave alone.
*/
@@ -936,15 +927,15 @@ struct sta_id_modify {
} __packed;
/*
- * REPLY_ADD_STA = 0x18 (command)
+ * C_ADD_STA = 0x18 (command)
*
* The device contains an internal table of per-station information,
* with info on security keys, aggregation parameters, and Tx rates for
* initial Tx attempt and any retries (4965 devices uses
- * REPLY_TX_LINK_QUALITY_CMD,
- * 3945 uses REPLY_RATE_SCALE to set up rate tables).
+ * C_TX_LINK_QUALITY_CMD,
+ * 3945 uses C_RATE_SCALE to set up rate tables).
*
- * REPLY_ADD_STA sets up the table entry for one station, either creating
+ * C_ADD_STA sets up the table entry for one station, either creating
* a new entry, or modifying a pre-existing one.
*
* NOTE: RXON command (without "associated" bit set) wipes the station table
@@ -954,20 +945,20 @@ struct sta_id_modify {
* their own txpower/rate setup data).
*
* When getting started on a new channel, driver must set up the
- * IWL_BROADCAST_ID entry (last entry in the table). For a client
+ * IL_BROADCAST_ID entry (last entry in the table). For a client
* station in a BSS, once an AP is selected, driver sets up the AP STA
- * in the IWL_AP_ID entry (1st entry in the table). BROADCAST and AP
+ * in the IL_AP_ID entry (1st entry in the table). BROADCAST and AP
* are all that are needed for a BSS client station. If the device is
* used as AP, or in an IBSS network, driver must set up station table
- * entries for all STAs in network, starting with index IWL_STA_ID.
+ * entries for all STAs in network, starting with idx IL_STA_ID.
*/
-struct iwl3945_addsta_cmd {
+struct il3945_addsta_cmd {
u8 mode; /* 1: modify existing, 0: add new station */
u8 reserved[3];
struct sta_id_modify sta;
- struct iwl4965_keyinfo key;
- __le32 station_flags; /* STA_FLG_* */
+ struct il4965_keyinfo key;
+ __le32 station_flags; /* STA_FLG_* */
__le32 station_flags_msk; /* STA_FLG_* */
/* bit field to disable (1) or enable (0) Tx for Traffic ID (TID)
@@ -990,12 +981,12 @@ struct iwl3945_addsta_cmd {
__le16 add_immediate_ba_ssn;
} __packed;
-struct iwl4965_addsta_cmd {
+struct il4965_addsta_cmd {
u8 mode; /* 1: modify existing, 0: add new station */
u8 reserved[3];
struct sta_id_modify sta;
- struct iwl4965_keyinfo key;
- __le32 station_flags; /* STA_FLG_* */
+ struct il4965_keyinfo key;
+ __le32 station_flags; /* STA_FLG_* */
__le32 station_flags_msk; /* STA_FLG_* */
/* bit field to disable (1) or enable (0) Tx for Traffic ID (TID)
@@ -1003,7 +994,7 @@ struct iwl4965_addsta_cmd {
* Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */
__le16 tid_disable_tx;
- __le16 reserved1;
+ __le16 reserved1;
/* TID for which to add block-ack support.
* Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
@@ -1028,12 +1019,12 @@ struct iwl4965_addsta_cmd {
} __packed;
/* Wrapper struct for 3945 and 4965 addsta_cmd structures */
-struct iwl_legacy_addsta_cmd {
+struct il_addsta_cmd {
u8 mode; /* 1: modify existing, 0: add new station */
u8 reserved[3];
struct sta_id_modify sta;
- struct iwl4965_keyinfo key;
- __le32 station_flags; /* STA_FLG_* */
+ struct il4965_keyinfo key;
+ __le32 station_flags; /* STA_FLG_* */
__le32 station_flags_msk; /* STA_FLG_* */
/* bit field to disable (1) or enable (0) Tx for Traffic ID (TID)
@@ -1041,7 +1032,7 @@ struct iwl_legacy_addsta_cmd {
* Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */
__le16 tid_disable_tx;
- __le16 rate_n_flags; /* 3945 only */
+ __le16 rate_n_flags; /* 3945 only */
/* TID for which to add block-ack support.
* Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
@@ -1065,51 +1056,50 @@ struct iwl_legacy_addsta_cmd {
__le16 reserved2;
} __packed;
-
#define ADD_STA_SUCCESS_MSK 0x1
-#define ADD_STA_NO_ROOM_IN_TABLE 0x2
+#define ADD_STA_NO_ROOM_IN_TBL 0x2
#define ADD_STA_NO_BLOCK_ACK_RESOURCE 0x4
#define ADD_STA_MODIFY_NON_EXIST_STA 0x8
/*
- * REPLY_ADD_STA = 0x18 (response)
+ * C_ADD_STA = 0x18 (response)
*/
-struct iwl_add_sta_resp {
- u8 status; /* ADD_STA_* */
+struct il_add_sta_resp {
+ u8 status; /* ADD_STA_* */
} __packed;
#define REM_STA_SUCCESS_MSK 0x1
/*
- * REPLY_REM_STA = 0x19 (response)
+ * C_REM_STA = 0x19 (response)
*/
-struct iwl_rem_sta_resp {
+struct il_rem_sta_resp {
u8 status;
} __packed;
/*
- * REPLY_REM_STA = 0x19 (command)
+ * C_REM_STA = 0x19 (command)
*/
-struct iwl_rem_sta_cmd {
- u8 num_sta; /* number of removed stations */
+struct il_rem_sta_cmd {
+ u8 num_sta; /* number of removed stations */
u8 reserved[3];
- u8 addr[ETH_ALEN]; /* MAC addr of the first station */
+ u8 addr[ETH_ALEN]; /* MAC addr of the first station */
u8 reserved2[2];
} __packed;
-#define IWL_TX_FIFO_BK_MSK cpu_to_le32(BIT(0))
-#define IWL_TX_FIFO_BE_MSK cpu_to_le32(BIT(1))
-#define IWL_TX_FIFO_VI_MSK cpu_to_le32(BIT(2))
-#define IWL_TX_FIFO_VO_MSK cpu_to_le32(BIT(3))
-#define IWL_AGG_TX_QUEUE_MSK cpu_to_le32(0xffc00)
+#define IL_TX_FIFO_BK_MSK cpu_to_le32(BIT(0))
+#define IL_TX_FIFO_BE_MSK cpu_to_le32(BIT(1))
+#define IL_TX_FIFO_VI_MSK cpu_to_le32(BIT(2))
+#define IL_TX_FIFO_VO_MSK cpu_to_le32(BIT(3))
+#define IL_AGG_TX_QUEUE_MSK cpu_to_le32(0xffc00)
-#define IWL_DROP_SINGLE 0
-#define IWL_DROP_SELECTED 1
-#define IWL_DROP_ALL 2
+#define IL_DROP_SINGLE 0
+#define IL_DROP_SELECTED 1
+#define IL_DROP_ALL 2
/*
* REPLY_WEP_KEY = 0x20
*/
-struct iwl_wep_key {
- u8 key_index;
+struct il_wep_key {
+ u8 key_idx;
u8 key_offset;
u8 reserved1[2];
u8 key_size;
@@ -1117,12 +1107,12 @@ struct iwl_wep_key {
u8 key[16];
} __packed;
-struct iwl_wep_cmd {
+struct il_wep_cmd {
u8 num_keys;
u8 global_key_type;
u8 flags;
u8 reserved;
- struct iwl_wep_key key[0];
+ struct il_wep_key key[0];
} __packed;
#define WEP_KEY_WEP_TYPE 1
@@ -1168,8 +1158,7 @@ struct iwl_wep_cmd {
#define RX_MPDU_RES_STATUS_TTAK_OK (1 << 7)
#define RX_MPDU_RES_STATUS_DEC_DONE_MSK (0x800)
-
-struct iwl3945_rx_frame_stats {
+struct il3945_rx_frame_stats {
u8 phy_count;
u8 id;
u8 rssi;
@@ -1179,7 +1168,7 @@ struct iwl3945_rx_frame_stats {
u8 payload[0];
} __packed;
-struct iwl3945_rx_frame_hdr {
+struct il3945_rx_frame_hdr {
__le16 channel;
__le16 phy_flags;
u8 reserved1;
@@ -1188,73 +1177,71 @@ struct iwl3945_rx_frame_hdr {
u8 payload[0];
} __packed;
-struct iwl3945_rx_frame_end {
+struct il3945_rx_frame_end {
__le32 status;
__le64 timestamp;
__le32 beacon_timestamp;
} __packed;
/*
- * REPLY_3945_RX = 0x1b (response only, not a command)
+ * N_3945_RX = 0x1b (response only, not a command)
*
* NOTE: DO NOT dereference from casts to this structure
* It is provided only for calculating minimum data set size.
* The actual offsets of the hdr and end are dynamic based on
* stats.phy_count
*/
-struct iwl3945_rx_frame {
- struct iwl3945_rx_frame_stats stats;
- struct iwl3945_rx_frame_hdr hdr;
- struct iwl3945_rx_frame_end end;
+struct il3945_rx_frame {
+ struct il3945_rx_frame_stats stats;
+ struct il3945_rx_frame_hdr hdr;
+ struct il3945_rx_frame_end end;
} __packed;
-#define IWL39_RX_FRAME_SIZE (4 + sizeof(struct iwl3945_rx_frame))
+#define IL39_RX_FRAME_SIZE (4 + sizeof(struct il3945_rx_frame))
/* Fixed (non-configurable) rx data from phy */
-#define IWL49_RX_RES_PHY_CNT 14
-#define IWL49_RX_PHY_FLAGS_ANTENNAE_OFFSET (4)
-#define IWL49_RX_PHY_FLAGS_ANTENNAE_MASK (0x70)
-#define IWL49_AGC_DB_MASK (0x3f80) /* MASK(7,13) */
-#define IWL49_AGC_DB_POS (7)
-struct iwl4965_rx_non_cfg_phy {
+#define IL49_RX_RES_PHY_CNT 14
+#define IL49_RX_PHY_FLAGS_ANTENNAE_OFFSET (4)
+#define IL49_RX_PHY_FLAGS_ANTENNAE_MASK (0x70)
+#define IL49_AGC_DB_MASK (0x3f80) /* MASK(7,13) */
+#define IL49_AGC_DB_POS (7)
+struct il4965_rx_non_cfg_phy {
__le16 ant_selection; /* ant A bit 4, ant B bit 5, ant C bit 6 */
__le16 agc_info; /* agc code 0:6, agc dB 7:13, reserved 14:15 */
u8 rssi_info[6]; /* we use even entries, 0/2/4 for A/B/C rssi */
u8 pad[0];
} __packed;
-
/*
- * REPLY_RX = 0xc3 (response only, not a command)
+ * N_RX = 0xc3 (response only, not a command)
* Used only for legacy (non 11n) frames.
*/
-struct iwl_rx_phy_res {
- u8 non_cfg_phy_cnt; /* non configurable DSP phy data byte count */
+struct il_rx_phy_res {
+ u8 non_cfg_phy_cnt; /* non configurable DSP phy data byte count */
u8 cfg_phy_cnt; /* configurable DSP phy data byte count */
u8 stat_id; /* configurable DSP phy data set ID */
u8 reserved1;
__le64 timestamp; /* TSF at on air rise */
- __le32 beacon_time_stamp; /* beacon at on-air rise */
+ __le32 beacon_time_stamp; /* beacon at on-air rise */
__le16 phy_flags; /* general phy flags: band, modulation, ... */
__le16 channel; /* channel number */
- u8 non_cfg_phy_buf[32]; /* for various implementations of non_cfg_phy */
+ u8 non_cfg_phy_buf[32]; /* for various implementations of non_cfg_phy */
__le32 rate_n_flags; /* RATE_MCS_* */
__le16 byte_count; /* frame's byte-count */
__le16 frame_time; /* frame's time on the air */
} __packed;
-struct iwl_rx_mpdu_res_start {
+struct il_rx_mpdu_res_start {
__le16 byte_count;
__le16 reserved;
} __packed;
-
/******************************************************************************
* (5)
* Tx Commands & Responses:
*
- * Driver must place each REPLY_TX command into one of the prioritized Tx
+ * Driver must place each C_TX command into one of the prioritized Tx
* queues in host DRAM, shared between driver and device (see comments for
* SCD registers and Tx/Rx Queues). When the device's Tx scheduler and uCode
* are preparing to transmit, the device pulls the Tx command over the PCI
@@ -1264,18 +1251,18 @@ struct iwl_rx_mpdu_res_start {
* uCode handles all timing and protocol related to control frames
* (RTS/CTS/ACK), based on flags in the Tx command. uCode and Tx scheduler
* handle reception of block-acks; uCode updates the host driver via
- * REPLY_COMPRESSED_BA.
+ * N_COMPRESSED_BA.
*
* uCode handles retrying Tx when an ACK is expected but not received.
* This includes trying lower data rates than the one requested in the Tx
- * command, as set up by the REPLY_RATE_SCALE (for 3945) or
- * REPLY_TX_LINK_QUALITY_CMD (4965).
+ * command, as set up by the C_RATE_SCALE (for 3945) or
+ * C_TX_LINK_QUALITY_CMD (4965).
*
- * Driver sets up transmit power for various rates via REPLY_TX_PWR_TABLE_CMD.
+ * Driver sets up transmit power for various rates via C_TX_PWR_TBL.
* This command must be executed after every RXON command, before Tx can occur.
*****************************************************************************/
-/* REPLY_TX Tx flags field */
+/* C_TX Tx flags field */
/*
* 1: Use Request-To-Send protocol before this frame.
@@ -1296,8 +1283,8 @@ struct iwl_rx_mpdu_res_start {
#define TX_CMD_FLG_ACK_MSK cpu_to_le32(1 << 3)
/* For 4965 devices:
- * 1: Use rate scale table (see REPLY_TX_LINK_QUALITY_CMD).
- * Tx command's initial_rate_index indicates first rate to try;
+ * 1: Use rate scale table (see C_TX_LINK_QUALITY_CMD).
+ * Tx command's initial_rate_idx indicates first rate to try;
* uCode walks through table for additional Tx attempts.
* 0: Use Tx rate/MCS from Tx command's rate_n_flags field.
* This rate will be used for all Tx attempts; it will not be scaled. */
@@ -1322,7 +1309,7 @@ struct iwl_rx_mpdu_res_start {
/* 1: uCode overrides sequence control field in MAC header.
* 0: Driver provides sequence control field in MAC header.
* Set this for management frames, non-QOS data frames, non-unicast frames,
- * and also in Tx command embedded in REPLY_SCAN_CMD for active scans. */
+ * and also in Tx command embedded in C_SCAN for active scans. */
#define TX_CMD_FLG_SEQ_CTL_MSK cpu_to_le32(1 << 13)
/* 1: This frame is non-last MPDU; more fragments are coming.
@@ -1349,7 +1336,6 @@ struct iwl_rx_mpdu_res_start {
/* HCCA-AP - disable duration overwriting. */
#define TX_CMD_FLG_DUR_MSK cpu_to_le32(1 << 25)
-
/*
* TX command security control
*/
@@ -1369,10 +1355,10 @@ struct iwl_rx_mpdu_res_start {
#define TKIP_ICV_LEN 4
/*
- * REPLY_TX = 0x1c (command)
+ * C_TX = 0x1c (command)
*/
-struct iwl3945_tx_cmd {
+struct il3945_tx_cmd {
/*
* MPDU byte count:
* MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size,
@@ -1434,9 +1420,9 @@ struct iwl3945_tx_cmd {
} __packed;
/*
- * REPLY_TX = 0x1c (response)
+ * C_TX = 0x1c (response)
*/
-struct iwl3945_tx_resp {
+struct il3945_tx_resp {
u8 failure_rts;
u8 failure_frame;
u8 bt_kill_count;
@@ -1445,19 +1431,18 @@ struct iwl3945_tx_resp {
__le32 status; /* TX status */
} __packed;
-
/*
* 4965 uCode updates these Tx attempt count values in host DRAM.
* Used for managing Tx retries when expecting block-acks.
* Driver should set these fields to 0.
*/
-struct iwl_dram_scratch {
+struct il_dram_scratch {
u8 try_cnt; /* Tx attempts */
u8 bt_kill_cnt; /* Tx attempts blocked by Bluetooth device */
__le16 reserved;
} __packed;
-struct iwl_tx_cmd {
+struct il_tx_cmd {
/*
* MPDU byte count:
* MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size,
@@ -1481,7 +1466,7 @@ struct iwl_tx_cmd {
/* uCode may modify this field of the Tx command (in host DRAM!).
* Driver must also set dram_lsb_ptr and dram_msb_ptr in this cmd. */
- struct iwl_dram_scratch scratch;
+ struct il_dram_scratch scratch;
/* Rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is cleared. */
__le32 rate_n_flags; /* RATE_MCS_* */
@@ -1493,13 +1478,13 @@ struct iwl_tx_cmd {
u8 sec_ctl; /* TX_CMD_SEC_* */
/*
- * Index into rate table (see REPLY_TX_LINK_QUALITY_CMD) for initial
+ * Index into rate table (see C_TX_LINK_QUALITY_CMD) for initial
* Tx attempt, if TX_CMD_FLG_STA_RATE_MSK is set. Normally "0" for
* data frames, this field may be used to selectively reduce initial
* rate (via non-0 value) for special frames (e.g. management), while
* still supporting rate scaling for all frames.
*/
- u8 initial_rate_index;
+ u8 initial_rate_idx;
u8 reserved;
u8 key[16];
__le16 next_frame_flags;
@@ -1628,12 +1613,12 @@ enum {
};
enum {
- TX_STATUS_MSK = 0x000000ff, /* bits 0:7 */
+ TX_STATUS_MSK = 0x000000ff, /* bits 0:7 */
TX_STATUS_DELAY_MSK = 0x00000040,
TX_STATUS_ABORT_MSK = 0x00000080,
TX_PACKET_MODE_MSK = 0x0000ff00, /* bits 8:15 */
TX_FIFO_NUMBER_MSK = 0x00070000, /* bits 16:18 */
- TX_RESERVED = 0x00780000, /* bits 19:22 */
+ TX_RESERVED = 0x00780000, /* bits 19:22 */
TX_POWER_PA_DETECT_MSK = 0x7f800000, /* bits 23:30 */
TX_ABORT_REQUIRED_MSK = 0x80000000, /* bits 31:31 */
};
@@ -1671,7 +1656,7 @@ enum {
#define AGG_TX_STATE_SEQ_NUM_MSK 0xffff0000
/*
- * REPLY_TX = 0x1c (response)
+ * C_TX = 0x1c (response)
*
* This response may be in one of two slightly different formats, indicated
* by the frame_count field:
@@ -1697,7 +1682,7 @@ struct agg_tx_status {
__le16 sequence;
} __packed;
-struct iwl4965_tx_resp {
+struct il4965_tx_resp {
u8 frame_count; /* 1 no aggregation, >1 aggregation */
u8 bt_kill_count; /* # blocked by bluetooth (unused for agg) */
u8 failure_rts; /* # failures due to unsuccessful RTS */
@@ -1730,16 +1715,16 @@ struct iwl4965_tx_resp {
*/
union {
__le32 status;
- struct agg_tx_status agg_status[0]; /* for each agg frame */
+ struct agg_tx_status agg_status[0]; /* for each agg frame */
} u;
} __packed;
/*
- * REPLY_COMPRESSED_BA = 0xc5 (response only, not a command)
+ * N_COMPRESSED_BA = 0xc5 (response only, not a command)
*
* Reports Block-Acknowledge from recipient station
*/
-struct iwl_compressed_ba_resp {
+struct il_compressed_ba_resp {
__le32 sta_addr_lo32;
__le16 sta_addr_hi16;
__le16 reserved;
@@ -1754,30 +1739,29 @@ struct iwl_compressed_ba_resp {
} __packed;
/*
- * REPLY_TX_PWR_TABLE_CMD = 0x97 (command, has simple generic response)
+ * C_TX_PWR_TBL = 0x97 (command, has simple generic response)
*
- * See details under "TXPOWER" in iwl-4965-hw.h.
+ * See details under "TXPOWER" in 4965.h.
*/
-struct iwl3945_txpowertable_cmd {
+struct il3945_txpowertable_cmd {
u8 band; /* 0: 5 GHz, 1: 2.4 GHz */
u8 reserved;
__le16 channel;
- struct iwl3945_power_per_rate power[IWL_MAX_RATES];
+ struct il3945_power_per_rate power[IL_MAX_RATES];
} __packed;
-struct iwl4965_txpowertable_cmd {
+struct il4965_txpowertable_cmd {
u8 band; /* 0: 5 GHz, 1: 2.4 GHz */
u8 reserved;
__le16 channel;
- struct iwl4965_tx_power_db tx_power;
+ struct il4965_tx_power_db tx_power;
} __packed;
-
/**
- * struct iwl3945_rate_scaling_cmd - Rate Scaling Command & Response
+ * struct il3945_rate_scaling_cmd - Rate Scaling Command & Response
*
- * REPLY_RATE_SCALE = 0x47 (command, has simple generic response)
+ * C_RATE_SCALE = 0x47 (command, has simple generic response)
*
* NOTE: The table of rates passed to the uCode via the
* RATE_SCALE command sets up the corresponding order of
@@ -1786,22 +1770,21 @@ struct iwl4965_txpowertable_cmd {
*
* For example, if you set 9MB (PLCP 0x0f) as the first
* rate in the rate table, the bit mask for that rate
- * when passed through ofdm_basic_rates on the REPLY_RXON
+ * when passed through ofdm_basic_rates on the C_RXON
* command would be bit 0 (1 << 0)
*/
-struct iwl3945_rate_scaling_info {
+struct il3945_rate_scaling_info {
__le16 rate_n_flags;
u8 try_cnt;
- u8 next_rate_index;
+ u8 next_rate_idx;
} __packed;
-struct iwl3945_rate_scaling_cmd {
+struct il3945_rate_scaling_cmd {
u8 table_id;
u8 reserved[3];
- struct iwl3945_rate_scaling_info table[IWL_MAX_RATES];
+ struct il3945_rate_scaling_info table[IL_MAX_RATES];
} __packed;
-
/*RS_NEW_API: only TLC_RTS remains and moved to bit 0 */
#define LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK (1 << 0)
@@ -1816,28 +1799,27 @@ struct iwl3945_rate_scaling_cmd {
#define LINK_QUAL_ANT_B_MSK (1 << 1)
#define LINK_QUAL_ANT_MSK (LINK_QUAL_ANT_A_MSK|LINK_QUAL_ANT_B_MSK)
-
/**
- * struct iwl_link_qual_general_params
+ * struct il_link_qual_general_params
*
- * Used in REPLY_TX_LINK_QUALITY_CMD
+ * Used in C_TX_LINK_QUALITY_CMD
*/
-struct iwl_link_qual_general_params {
+struct il_link_qual_general_params {
u8 flags;
- /* No entries at or above this (driver chosen) index contain MIMO */
+ /* No entries at or above this (driver chosen) idx contain MIMO */
u8 mimo_delimiter;
/* Best single antenna to use for single stream (legacy, SISO). */
u8 single_stream_ant_msk; /* LINK_QUAL_ANT_* */
/* Best antennas to use for MIMO (unused for 4965, assumes both). */
- u8 dual_stream_ant_msk; /* LINK_QUAL_ANT_* */
+ u8 dual_stream_ant_msk; /* LINK_QUAL_ANT_* */
/*
* If driver needs to use different initial rates for different
* EDCA QOS access categories (as implemented by tx fifos 0-3),
- * this table will set that up, by indicating the indexes in the
+ * this table will set that up, by indicating the idxes in the
* rs_table[LINK_QUAL_MAX_RETRY_NUM] rate table at which to start.
* Otherwise, driver should set all entries to 0.
*
@@ -1845,10 +1827,10 @@ struct iwl_link_qual_general_params {
* 0 = Background, 1 = Best Effort (normal), 2 = Video, 3 = Voice
* TX FIFOs above 3 use same value (typically 0) as TX FIFO 3.
*/
- u8 start_rate_index[LINK_QUAL_AC_NUM];
+ u8 start_rate_idx[LINK_QUAL_AC_NUM];
} __packed;
-#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000) /* 4 milliseconds */
+#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000) /* 4 milliseconds */
#define LINK_QUAL_AGG_TIME_LIMIT_MAX (8000)
#define LINK_QUAL_AGG_TIME_LIMIT_MIN (100)
@@ -1861,11 +1843,11 @@ struct iwl_link_qual_general_params {
#define LINK_QUAL_AGG_FRAME_LIMIT_MIN (0)
/**
- * struct iwl_link_qual_agg_params
+ * struct il_link_qual_agg_params
*
- * Used in REPLY_TX_LINK_QUALITY_CMD
+ * Used in C_TX_LINK_QUALITY_CMD
*/
-struct iwl_link_qual_agg_params {
+struct il_link_qual_agg_params {
/*
*Maximum number of uSec in aggregation.
@@ -1892,9 +1874,9 @@ struct iwl_link_qual_agg_params {
} __packed;
/*
- * REPLY_TX_LINK_QUALITY_CMD = 0x4e (command, has simple generic response)
+ * C_TX_LINK_QUALITY_CMD = 0x4e (command, has simple generic response)
*
- * For 4965 devices only; 3945 uses REPLY_RATE_SCALE.
+ * For 4965 devices only; 3945 uses C_RATE_SCALE.
*
* Each station in the 4965 device's internal station table has its own table
* of 16
@@ -1903,13 +1885,13 @@ struct iwl_link_qual_agg_params {
* one station.
*
* NOTE: Station must already be in 4965 device's station table.
- * Use REPLY_ADD_STA.
+ * Use C_ADD_STA.
*
* The rate scaling procedures described below work well. Of course, other
* procedures are possible, and may work better for particular environments.
*
*
- * FILLING THE RATE TABLE
+ * FILLING THE RATE TBL
*
* Given a particular initial rate and mode, as determined by the rate
* scaling algorithm described below, the Linux driver uses the following
@@ -1948,13 +1930,13 @@ struct iwl_link_qual_agg_params {
* speculative mode as the new current active mode.
*
* Each history set contains, separately for each possible rate, data for a
- * sliding window of the 62 most recent tx attempts at that rate. The data
+ * sliding win of the 62 most recent tx attempts at that rate. The data
* includes a shifting bitmap of success(1)/failure(0), and sums of successful
* and attempted frames, from which the driver can additionally calculate a
* success ratio (success / attempted) and number of failures
- * (attempted - success), and control the size of the window (attempted).
+ * (attempted - success), and control the size of the win (attempted).
* The driver uses the bit map to remove successes from the success sum, as
- * the oldest tx attempts fall out of the window.
+ * the oldest tx attempts fall out of the win.
*
* When the 4965 device makes multiple tx attempts for a given frame, each
* attempt might be at a different rate, and have different modulation
@@ -1966,7 +1948,7 @@ struct iwl_link_qual_agg_params {
*
* When using block-ack (aggregation), all frames are transmitted at the same
* rate, since there is no per-attempt acknowledgment from the destination
- * station. The Tx response struct iwl_tx_resp indicates the Tx rate in
+ * station. The Tx response struct il_tx_resp indicates the Tx rate in
* rate_n_flags field. After receiving a block-ack, the driver can update
* history for the entire block all at once.
*
@@ -2016,8 +1998,8 @@ struct iwl_link_qual_agg_params {
* good performance; higher rate is sure to have poorer success.
*
* 6) Re-evaluate the rate after each tx frame. If working with block-
- * acknowledge, history and statistics may be calculated for the entire
- * block (including prior history that fits within the history windows),
+ * acknowledge, history and stats may be calculated for the entire
+ * block (including prior history that fits within the history wins),
* before re-evaluation.
*
* FINDING BEST STARTING MODULATION MODE:
@@ -2079,22 +2061,22 @@ struct iwl_link_qual_agg_params {
* legacy), and then repeat the search process.
*
*/
-struct iwl_link_quality_cmd {
+struct il_link_quality_cmd {
/* Index of destination/recipient station in uCode's station table */
u8 sta_id;
u8 reserved1;
__le16 control; /* not used */
- struct iwl_link_qual_general_params general_params;
- struct iwl_link_qual_agg_params agg_params;
+ struct il_link_qual_general_params general_params;
+ struct il_link_qual_agg_params agg_params;
/*
- * Rate info; when using rate-scaling, Tx command's initial_rate_index
- * specifies 1st Tx rate attempted, via index into this table.
+ * Rate info; when using rate-scaling, Tx command's initial_rate_idx
+ * specifies 1st Tx rate attempted, via idx into this table.
* 4965 devices works its way through table when retrying Tx.
*/
struct {
- __le32 rate_n_flags; /* RATE_MCS_*, IWL_RATE_* */
+ __le32 rate_n_flags; /* RATE_MCS_*, RATE_* */
} rs_table[LINK_QUAL_MAX_RETRY_NUM];
__le32 reserved2;
} __packed;
@@ -2117,13 +2099,13 @@ struct iwl_link_quality_cmd {
#define BT_MAX_KILL_DEF (0x5)
/*
- * REPLY_BT_CONFIG = 0x9b (command, has simple generic response)
+ * C_BT_CONFIG = 0x9b (command, has simple generic response)
*
* 3945 and 4965 devices support hardware handshake with Bluetooth device on
* same platform. Bluetooth device alerts wireless device when it will Tx;
* wireless device can delay or kill its own Tx to accommodate.
*/
-struct iwl_bt_cmd {
+struct il_bt_cmd {
u8 flags;
u8 lead_time;
u8 max_kill;
@@ -2132,7 +2114,6 @@ struct iwl_bt_cmd {
__le32 kill_cts_mask;
} __packed;
-
/******************************************************************************
* (6)
* Spectrum Management (802.11h) Commands, Responses, Notifications:
@@ -2150,18 +2131,18 @@ struct iwl_bt_cmd {
RXON_FILTER_ASSOC_MSK | \
RXON_FILTER_BCON_AWARE_MSK)
-struct iwl_measure_channel {
+struct il_measure_channel {
__le32 duration; /* measurement duration in extended beacon
* format */
u8 channel; /* channel to measure */
- u8 type; /* see enum iwl_measure_type */
+ u8 type; /* see enum il_measure_type */
__le16 reserved;
} __packed;
/*
- * REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74 (command)
+ * C_SPECTRUM_MEASUREMENT = 0x74 (command)
*/
-struct iwl_spectrum_cmd {
+struct il_spectrum_cmd {
__le16 len; /* number of bytes starting from token */
u8 token; /* token id */
u8 id; /* measurement id -- 0 or 1 */
@@ -2174,13 +2155,13 @@ struct iwl_spectrum_cmd {
__le32 filter_flags; /* rxon filter flags */
__le16 channel_count; /* minimum 1, maximum 10 */
__le16 reserved3;
- struct iwl_measure_channel channels[10];
+ struct il_measure_channel channels[10];
} __packed;
/*
- * REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74 (response)
+ * C_SPECTRUM_MEASUREMENT = 0x74 (response)
*/
-struct iwl_spectrum_resp {
+struct il_spectrum_resp {
u8 token;
u8 id; /* id of the prior command replaced, or 0xff */
__le16 status; /* 0 - command will be handled
@@ -2188,57 +2169,57 @@ struct iwl_spectrum_resp {
* measurement) */
} __packed;
-enum iwl_measurement_state {
- IWL_MEASUREMENT_START = 0,
- IWL_MEASUREMENT_STOP = 1,
+enum il_measurement_state {
+ IL_MEASUREMENT_START = 0,
+ IL_MEASUREMENT_STOP = 1,
};
-enum iwl_measurement_status {
- IWL_MEASUREMENT_OK = 0,
- IWL_MEASUREMENT_CONCURRENT = 1,
- IWL_MEASUREMENT_CSA_CONFLICT = 2,
- IWL_MEASUREMENT_TGH_CONFLICT = 3,
+enum il_measurement_status {
+ IL_MEASUREMENT_OK = 0,
+ IL_MEASUREMENT_CONCURRENT = 1,
+ IL_MEASUREMENT_CSA_CONFLICT = 2,
+ IL_MEASUREMENT_TGH_CONFLICT = 3,
/* 4-5 reserved */
- IWL_MEASUREMENT_STOPPED = 6,
- IWL_MEASUREMENT_TIMEOUT = 7,
- IWL_MEASUREMENT_PERIODIC_FAILED = 8,
+ IL_MEASUREMENT_STOPPED = 6,
+ IL_MEASUREMENT_TIMEOUT = 7,
+ IL_MEASUREMENT_PERIODIC_FAILED = 8,
};
#define NUM_ELEMENTS_IN_HISTOGRAM 8
-struct iwl_measurement_histogram {
+struct il_measurement_histogram {
__le32 ofdm[NUM_ELEMENTS_IN_HISTOGRAM]; /* in 0.8usec counts */
__le32 cck[NUM_ELEMENTS_IN_HISTOGRAM]; /* in 1usec counts */
} __packed;
/* clear channel availability counters */
-struct iwl_measurement_cca_counters {
+struct il_measurement_cca_counters {
__le32 ofdm;
__le32 cck;
} __packed;
-enum iwl_measure_type {
- IWL_MEASURE_BASIC = (1 << 0),
- IWL_MEASURE_CHANNEL_LOAD = (1 << 1),
- IWL_MEASURE_HISTOGRAM_RPI = (1 << 2),
- IWL_MEASURE_HISTOGRAM_NOISE = (1 << 3),
- IWL_MEASURE_FRAME = (1 << 4),
+enum il_measure_type {
+ IL_MEASURE_BASIC = (1 << 0),
+ IL_MEASURE_CHANNEL_LOAD = (1 << 1),
+ IL_MEASURE_HISTOGRAM_RPI = (1 << 2),
+ IL_MEASURE_HISTOGRAM_NOISE = (1 << 3),
+ IL_MEASURE_FRAME = (1 << 4),
/* bits 5:6 are reserved */
- IWL_MEASURE_IDLE = (1 << 7),
+ IL_MEASURE_IDLE = (1 << 7),
};
/*
- * SPECTRUM_MEASURE_NOTIFICATION = 0x75 (notification only, not a command)
+ * N_SPECTRUM_MEASUREMENT = 0x75 (notification only, not a command)
*/
-struct iwl_spectrum_notification {
+struct il_spectrum_notification {
u8 id; /* measurement id -- 0 or 1 */
u8 token;
- u8 channel_index; /* index in measurement channel list */
+ u8 channel_idx; /* idx in measurement channel list */
u8 state; /* 0 - start, 1 - stop */
__le32 start_time; /* lower 32-bits of TSF */
u8 band; /* 0 - 5.2GHz, 1 - 2.4GHz */
u8 channel;
- u8 type; /* see enum iwl_measurement_type */
+ u8 type; /* see enum il_measurement_type */
u8 reserved1;
/* NOTE: cca_ofdm, cca_cck, basic_type, and histogram are only only
* valid if applicable for measurement type requested. */
@@ -2248,9 +2229,9 @@ struct iwl_spectrum_notification {
u8 basic_type; /* 0 - bss, 1 - ofdm preamble, 2 -
* unidentified */
u8 reserved2[3];
- struct iwl_measurement_histogram histogram;
+ struct il_measurement_histogram histogram;
__le32 stop_time; /* lower 32-bits of TSF */
- __le32 status; /* see iwl_measurement_status */
+ __le32 status; /* see il_measurement_status */
} __packed;
/******************************************************************************
@@ -2260,10 +2241,10 @@ struct iwl_spectrum_notification {
*****************************************************************************/
/**
- * struct iwl_powertable_cmd - Power Table Command
+ * struct il_powertable_cmd - Power Table Command
* @flags: See below:
*
- * POWER_TABLE_CMD = 0x77 (command, has simple generic response)
+ * C_POWER_TBL = 0x77 (command, has simple generic response)
*
* PM allow:
* bit 0 - '0' Driver not allow power management
@@ -2290,38 +2271,38 @@ struct iwl_spectrum_notification {
* '10' force xtal sleep
* '11' Illegal set
*
- * NOTE: if sleep_interval[SLEEP_INTRVL_TABLE_SIZE-1] > DTIM period then
+ * NOTE: if sleep_interval[SLEEP_INTRVL_TBL_SIZE-1] > DTIM period then
* ucode assume sleep over DTIM is allowed and we don't need to wake up
* for every DTIM.
*/
-#define IWL_POWER_VEC_SIZE 5
+#define IL_POWER_VEC_SIZE 5
-#define IWL_POWER_DRIVER_ALLOW_SLEEP_MSK cpu_to_le16(BIT(0))
-#define IWL_POWER_PCI_PM_MSK cpu_to_le16(BIT(3))
+#define IL_POWER_DRIVER_ALLOW_SLEEP_MSK cpu_to_le16(BIT(0))
+#define IL_POWER_PCI_PM_MSK cpu_to_le16(BIT(3))
-struct iwl3945_powertable_cmd {
+struct il3945_powertable_cmd {
__le16 flags;
u8 reserved[2];
__le32 rx_data_timeout;
__le32 tx_data_timeout;
- __le32 sleep_interval[IWL_POWER_VEC_SIZE];
+ __le32 sleep_interval[IL_POWER_VEC_SIZE];
} __packed;
-struct iwl_powertable_cmd {
+struct il_powertable_cmd {
__le16 flags;
- u8 keep_alive_seconds; /* 3945 reserved */
- u8 debug_flags; /* 3945 reserved */
+ u8 keep_alive_seconds; /* 3945 reserved */
+ u8 debug_flags; /* 3945 reserved */
__le32 rx_data_timeout;
__le32 tx_data_timeout;
- __le32 sleep_interval[IWL_POWER_VEC_SIZE];
+ __le32 sleep_interval[IL_POWER_VEC_SIZE];
__le32 keep_alive_beacons;
} __packed;
/*
- * PM_SLEEP_NOTIFICATION = 0x7A (notification only, not a command)
+ * N_PM_SLEEP = 0x7A (notification only, not a command)
* all devices identical.
*/
-struct iwl_sleep_notification {
+struct il_sleep_notification {
u8 pm_sleep_mode;
u8 pm_wakeup_src;
__le16 reserved;
@@ -2332,23 +2313,23 @@ struct iwl_sleep_notification {
/* Sleep states. all devices identical. */
enum {
- IWL_PM_NO_SLEEP = 0,
- IWL_PM_SLP_MAC = 1,
- IWL_PM_SLP_FULL_MAC_UNASSOCIATE = 2,
- IWL_PM_SLP_FULL_MAC_CARD_STATE = 3,
- IWL_PM_SLP_PHY = 4,
- IWL_PM_SLP_REPENT = 5,
- IWL_PM_WAKEUP_BY_TIMER = 6,
- IWL_PM_WAKEUP_BY_DRIVER = 7,
- IWL_PM_WAKEUP_BY_RFKILL = 8,
+ IL_PM_NO_SLEEP = 0,
+ IL_PM_SLP_MAC = 1,
+ IL_PM_SLP_FULL_MAC_UNASSOCIATE = 2,
+ IL_PM_SLP_FULL_MAC_CARD_STATE = 3,
+ IL_PM_SLP_PHY = 4,
+ IL_PM_SLP_REPENT = 5,
+ IL_PM_WAKEUP_BY_TIMER = 6,
+ IL_PM_WAKEUP_BY_DRIVER = 7,
+ IL_PM_WAKEUP_BY_RFKILL = 8,
/* 3 reserved */
- IWL_PM_NUM_OF_MODES = 12,
+ IL_PM_NUM_OF_MODES = 12,
};
/*
- * CARD_STATE_NOTIFICATION = 0xa1 (notification only, not a command)
+ * N_CARD_STATE = 0xa1 (notification only, not a command)
*/
-struct iwl_card_state_notif {
+struct il_card_state_notif {
__le32 flags;
} __packed;
@@ -2357,11 +2338,11 @@ struct iwl_card_state_notif {
#define CT_CARD_DISABLED 0x04
#define RXON_CARD_DISABLED 0x10
-struct iwl_ct_kill_config {
- __le32 reserved;
- __le32 critical_temperature_M;
- __le32 critical_temperature_R;
-} __packed;
+struct il_ct_kill_config {
+ __le32 reserved;
+ __le32 critical_temperature_M;
+ __le32 critical_temperature_R;
+} __packed;
/******************************************************************************
* (8)
@@ -2373,7 +2354,7 @@ struct iwl_ct_kill_config {
#define SCAN_CHANNEL_TYPE_ACTIVE cpu_to_le32(1)
/**
- * struct iwl_scan_channel - entry in REPLY_SCAN_CMD channel table
+ * struct il_scan_channel - entry in C_SCAN channel table
*
* One for each channel in the scan list.
* Each channel can independently select:
@@ -2383,7 +2364,7 @@ struct iwl_ct_kill_config {
* quiet_plcp_th, good_CRC_th)
*
* To avoid uCode errors, make sure the following are true (see comments
- * under struct iwl_scan_cmd about max_out_time and quiet_time):
+ * under struct il_scan_cmd about max_out_time and quiet_time):
* 1) If using passive_dwell (i.e. passive_dwell != 0):
* active_dwell <= passive_dwell (< max_out_time if max_out_time != 0)
* 2) quiet_time <= active_dwell
@@ -2391,7 +2372,7 @@ struct iwl_ct_kill_config {
* passive_dwell < max_out_time
* active_dwell < max_out_time
*/
-struct iwl3945_scan_channel {
+struct il3945_scan_channel {
/*
* type is defined as:
* 0:0 1 = active, 0 = passive
@@ -2400,16 +2381,16 @@ struct iwl3945_scan_channel {
* 5:7 reserved
*/
u8 type;
- u8 channel; /* band is selected by iwl3945_scan_cmd "flags" field */
- struct iwl3945_tx_power tpc;
+ u8 channel; /* band is selected by il3945_scan_cmd "flags" field */
+ struct il3945_tx_power tpc;
__le16 active_dwell; /* in 1024-uSec TU (time units), typ 5-50 */
__le16 passive_dwell; /* in 1024-uSec TU (time units), typ 20-500 */
} __packed;
/* set number of direct probes u8 type */
-#define IWL39_SCAN_PROBE_MASK(n) ((BIT(n) | (BIT(n) - BIT(1))))
+#define IL39_SCAN_PROBE_MASK(n) ((BIT(n) | (BIT(n) - BIT(1))))
-struct iwl_scan_channel {
+struct il_scan_channel {
/*
* type is defined as:
* 0:0 1 = active, 0 = passive
@@ -2418,7 +2399,7 @@ struct iwl_scan_channel {
* 21:31 reserved
*/
__le32 type;
- __le16 channel; /* band is selected by iwl_scan_cmd "flags" field */
+ __le16 channel; /* band is selected by il_scan_cmd "flags" field */
u8 tx_gain; /* gain for analog radio */
u8 dsp_atten; /* gain for DSP */
__le16 active_dwell; /* in 1024-uSec TU (time units), typ 5-50 */
@@ -2426,17 +2407,17 @@ struct iwl_scan_channel {
} __packed;
/* set number of direct probes __le32 type */
-#define IWL_SCAN_PROBE_MASK(n) cpu_to_le32((BIT(n) | (BIT(n) - BIT(1))))
+#define IL_SCAN_PROBE_MASK(n) cpu_to_le32((BIT(n) | (BIT(n) - BIT(1))))
/**
- * struct iwl_ssid_ie - directed scan network information element
+ * struct il_ssid_ie - directed scan network information element
*
- * Up to 20 of these may appear in REPLY_SCAN_CMD (Note: Only 4 are in
- * 3945 SCAN api), selected by "type" bit field in struct iwl_scan_channel;
+ * Up to 20 of these may appear in C_SCAN (Note: Only 4 are in
+ * 3945 SCAN api), selected by "type" bit field in struct il_scan_channel;
* each channel may select different ssids from among the 20 (4) entries.
* SSID IEs get transmitted in reverse order of entry.
*/
-struct iwl_ssid_ie {
+struct il_ssid_ie {
u8 id;
u8 len;
u8 ssid[32];
@@ -2445,14 +2426,14 @@ struct iwl_ssid_ie {
#define PROBE_OPTION_MAX_3945 4
#define PROBE_OPTION_MAX 20
#define TX_CMD_LIFE_TIME_INFINITE cpu_to_le32(0xFFFFFFFF)
-#define IWL_GOOD_CRC_TH_DISABLED 0
-#define IWL_GOOD_CRC_TH_DEFAULT cpu_to_le16(1)
-#define IWL_GOOD_CRC_TH_NEVER cpu_to_le16(0xffff)
-#define IWL_MAX_SCAN_SIZE 1024
-#define IWL_MAX_CMD_SIZE 4096
+#define IL_GOOD_CRC_TH_DISABLED 0
+#define IL_GOOD_CRC_TH_DEFAULT cpu_to_le16(1)
+#define IL_GOOD_CRC_TH_NEVER cpu_to_le16(0xffff)
+#define IL_MAX_SCAN_SIZE 1024
+#define IL_MAX_CMD_SIZE 4096
/*
- * REPLY_SCAN_CMD = 0x80 (command)
+ * C_SCAN = 0x80 (command)
*
* The hardware scan command is very powerful; the driver can set it up to
* maintain (relatively) normal network traffic while doing a scan in the
@@ -2501,10 +2482,10 @@ struct iwl_ssid_ie {
* Driver must use separate scan commands for 2.4 vs. 5 GHz bands.
*
* To avoid uCode errors, see timing restrictions described under
- * struct iwl_scan_channel.
+ * struct il_scan_channel.
*/
-struct iwl3945_scan_cmd {
+struct il3945_scan_cmd {
__le16 len;
u8 reserved0;
u8 channel_count; /* # channels in channel list */
@@ -2525,10 +2506,10 @@ struct iwl3945_scan_cmd {
/* For active scans (set to all-0s for passive scans).
* Does not include payload. Must specify Tx rate; no rate scaling. */
- struct iwl3945_tx_cmd tx_cmd;
+ struct il3945_tx_cmd tx_cmd;
/* For directed active scans (set to all-0s otherwise) */
- struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX_3945];
+ struct il_ssid_ie direct_scan[PROBE_OPTION_MAX_3945];
/*
* Probe request frame, followed by channel list.
@@ -2538,17 +2519,17 @@ struct iwl3945_scan_cmd {
* Number of channels in list is specified by channel_count.
* Each channel in list is of type:
*
- * struct iwl3945_scan_channel channels[0];
+ * struct il3945_scan_channel channels[0];
*
* NOTE: Only one band of channels can be scanned per pass. You
* must not mix 2.4GHz channels and 5.2GHz channels, and you must wait
- * for one scan to complete (i.e. receive SCAN_COMPLETE_NOTIFICATION)
+ * for one scan to complete (i.e. receive N_SCAN_COMPLETE)
* before requesting another scan.
*/
u8 data[0];
} __packed;
-struct iwl_scan_cmd {
+struct il_scan_cmd {
__le16 len;
u8 reserved0;
u8 channel_count; /* # channels in channel list */
@@ -2569,10 +2550,10 @@ struct iwl_scan_cmd {
/* For active scans (set to all-0s for passive scans).
* Does not include payload. Must specify Tx rate; no rate scaling. */
- struct iwl_tx_cmd tx_cmd;
+ struct il_tx_cmd tx_cmd;
/* For directed active scans (set to all-0s otherwise) */
- struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX];
+ struct il_ssid_ie direct_scan[PROBE_OPTION_MAX];
/*
* Probe request frame, followed by channel list.
@@ -2582,11 +2563,11 @@ struct iwl_scan_cmd {
* Number of channels in list is specified by channel_count.
* Each channel in list is of type:
*
- * struct iwl_scan_channel channels[0];
+ * struct il_scan_channel channels[0];
*
* NOTE: Only one band of channels can be scanned per pass. You
* must not mix 2.4GHz channels and 5.2GHz channels, and you must wait
- * for one scan to complete (i.e. receive SCAN_COMPLETE_NOTIFICATION)
+ * for one scan to complete (i.e. receive N_SCAN_COMPLETE)
* before requesting another scan.
*/
u8 data[0];
@@ -2598,16 +2579,16 @@ struct iwl_scan_cmd {
#define ABORT_STATUS 0x2
/*
- * REPLY_SCAN_CMD = 0x80 (response)
+ * C_SCAN = 0x80 (response)
*/
-struct iwl_scanreq_notification {
+struct il_scanreq_notification {
__le32 status; /* 1: okay, 2: cannot fulfill request */
} __packed;
/*
- * SCAN_START_NOTIFICATION = 0x82 (notification only, not a command)
+ * N_SCAN_START = 0x82 (notification only, not a command)
*/
-struct iwl_scanstart_notification {
+struct il_scanstart_notification {
__le32 tsf_low;
__le32 tsf_high;
__le32 beacon_timer;
@@ -2620,30 +2601,30 @@ struct iwl_scanstart_notification {
#define SCAN_OWNER_STATUS 0x1
#define MEASURE_OWNER_STATUS 0x2
-#define IWL_PROBE_STATUS_OK 0
-#define IWL_PROBE_STATUS_TX_FAILED BIT(0)
+#define IL_PROBE_STATUS_OK 0
+#define IL_PROBE_STATUS_TX_FAILED BIT(0)
/* error statuses combined with TX_FAILED */
-#define IWL_PROBE_STATUS_FAIL_TTL BIT(1)
-#define IWL_PROBE_STATUS_FAIL_BT BIT(2)
+#define IL_PROBE_STATUS_FAIL_TTL BIT(1)
+#define IL_PROBE_STATUS_FAIL_BT BIT(2)
-#define NUMBER_OF_STATISTICS 1 /* first __le32 is good CRC */
+#define NUMBER_OF_STATS 1 /* first __le32 is good CRC */
/*
- * SCAN_RESULTS_NOTIFICATION = 0x83 (notification only, not a command)
+ * N_SCAN_RESULTS = 0x83 (notification only, not a command)
*/
-struct iwl_scanresults_notification {
+struct il_scanresults_notification {
u8 channel;
u8 band;
u8 probe_status;
- u8 num_probe_not_sent; /* not enough time to send */
+ u8 num_probe_not_sent; /* not enough time to send */
__le32 tsf_low;
__le32 tsf_high;
- __le32 statistics[NUMBER_OF_STATISTICS];
+ __le32 stats[NUMBER_OF_STATS];
} __packed;
/*
- * SCAN_COMPLETE_NOTIFICATION = 0x84 (notification only, not a command)
+ * N_SCAN_COMPLETE = 0x84 (notification only, not a command)
*/
-struct iwl_scancomplete_notification {
+struct il_scancomplete_notification {
u8 scanned_channels;
u8 status;
u8 last_channel;
@@ -2651,50 +2632,49 @@ struct iwl_scancomplete_notification {
__le32 tsf_high;
} __packed;
-
/******************************************************************************
* (9)
* IBSS/AP Commands and Notifications:
*
*****************************************************************************/
-enum iwl_ibss_manager {
- IWL_NOT_IBSS_MANAGER = 0,
- IWL_IBSS_MANAGER = 1,
+enum il_ibss_manager {
+ IL_NOT_IBSS_MANAGER = 0,
+ IL_IBSS_MANAGER = 1,
};
/*
- * BEACON_NOTIFICATION = 0x90 (notification only, not a command)
+ * N_BEACON = 0x90 (notification only, not a command)
*/
-struct iwl3945_beacon_notif {
- struct iwl3945_tx_resp beacon_notify_hdr;
+struct il3945_beacon_notif {
+ struct il3945_tx_resp beacon_notify_hdr;
__le32 low_tsf;
__le32 high_tsf;
__le32 ibss_mgr_status;
} __packed;
-struct iwl4965_beacon_notif {
- struct iwl4965_tx_resp beacon_notify_hdr;
+struct il4965_beacon_notif {
+ struct il4965_tx_resp beacon_notify_hdr;
__le32 low_tsf;
__le32 high_tsf;
__le32 ibss_mgr_status;
} __packed;
/*
- * REPLY_TX_BEACON = 0x91 (command, has simple generic response)
+ * C_TX_BEACON= 0x91 (command, has simple generic response)
*/
-struct iwl3945_tx_beacon_cmd {
- struct iwl3945_tx_cmd tx;
+struct il3945_tx_beacon_cmd {
+ struct il3945_tx_cmd tx;
__le16 tim_idx;
u8 tim_size;
u8 reserved1;
struct ieee80211_hdr frame[0]; /* beacon frame */
} __packed;
-struct iwl_tx_beacon_cmd {
- struct iwl_tx_cmd tx;
+struct il_tx_beacon_cmd {
+ struct il_tx_cmd tx;
__le16 tim_idx;
u8 tim_size;
u8 reserved1;
@@ -2707,7 +2687,7 @@ struct iwl_tx_beacon_cmd {
*
*****************************************************************************/
-#define IWL_TEMP_CONVERT 260
+#define IL_TEMP_CONVERT 260
#define SUP_RATE_11A_MAX_NUM_CHANNELS 8
#define SUP_RATE_11B_MAX_NUM_CHANNELS 4
@@ -2727,9 +2707,9 @@ struct rate_histogram {
} failed;
} __packed;
-/* statistics command response */
+/* stats command response */
-struct iwl39_statistics_rx_phy {
+struct iwl39_stats_rx_phy {
__le32 ina_cnt;
__le32 fina_cnt;
__le32 plcp_err;
@@ -2747,7 +2727,7 @@ struct iwl39_statistics_rx_phy {
__le32 sent_cts_cnt;
} __packed;
-struct iwl39_statistics_rx_non_phy {
+struct iwl39_stats_rx_non_phy {
__le32 bogus_cts; /* CTS received when not expecting CTS */
__le32 bogus_ack; /* ACK received when not expecting ACK */
__le32 non_bssid_frames; /* number of frames with BSSID that
@@ -2758,13 +2738,13 @@ struct iwl39_statistics_rx_non_phy {
* our serving channel */
} __packed;
-struct iwl39_statistics_rx {
- struct iwl39_statistics_rx_phy ofdm;
- struct iwl39_statistics_rx_phy cck;
- struct iwl39_statistics_rx_non_phy general;
+struct iwl39_stats_rx {
+ struct iwl39_stats_rx_phy ofdm;
+ struct iwl39_stats_rx_phy cck;
+ struct iwl39_stats_rx_non_phy general;
} __packed;
-struct iwl39_statistics_tx {
+struct iwl39_stats_tx {
__le32 preamble_cnt;
__le32 rx_detected_cnt;
__le32 bt_prio_defer_cnt;
@@ -2776,31 +2756,31 @@ struct iwl39_statistics_tx {
__le32 actual_ack_cnt;
} __packed;
-struct statistics_dbg {
+struct stats_dbg {
__le32 burst_check;
__le32 burst_count;
__le32 wait_for_silence_timeout_cnt;
__le32 reserved[3];
} __packed;
-struct iwl39_statistics_div {
+struct iwl39_stats_div {
__le32 tx_on_a;
__le32 tx_on_b;
__le32 exec_time;
__le32 probe_time;
} __packed;
-struct iwl39_statistics_general {
+struct iwl39_stats_general {
__le32 temperature;
- struct statistics_dbg dbg;
+ struct stats_dbg dbg;
__le32 sleep_time;
__le32 slots_out;
__le32 slots_idle;
__le32 ttl_timestamp;
- struct iwl39_statistics_div div;
+ struct iwl39_stats_div div;
} __packed;
-struct statistics_rx_phy {
+struct stats_rx_phy {
__le32 ina_cnt;
__le32 fina_cnt;
__le32 plcp_err;
@@ -2823,7 +2803,7 @@ struct statistics_rx_phy {
__le32 reserved3;
} __packed;
-struct statistics_rx_ht_phy {
+struct stats_rx_ht_phy {
__le32 plcp_err;
__le32 overrun_err;
__le32 early_overrun_err;
@@ -2838,7 +2818,7 @@ struct statistics_rx_ht_phy {
#define INTERFERENCE_DATA_AVAILABLE cpu_to_le32(1)
-struct statistics_rx_non_phy {
+struct stats_rx_non_phy {
__le32 bogus_cts; /* CTS received when not expecting CTS */
__le32 bogus_ack; /* ACK received when not expecting ACK */
__le32 non_bssid_frames; /* number of frames with BSSID that
@@ -2852,15 +2832,15 @@ struct statistics_rx_non_phy {
__le32 num_missed_bcon; /* number of missed beacons */
__le32 adc_rx_saturation_time; /* count in 0.8us units the time the
* ADC was in saturation */
- __le32 ina_detection_search_time;/* total time (in 0.8us) searched
- * for INA */
+ __le32 ina_detection_search_time; /* total time (in 0.8us) searched
+ * for INA */
__le32 beacon_silence_rssi_a; /* RSSI silence after beacon frame */
__le32 beacon_silence_rssi_b; /* RSSI silence after beacon frame */
__le32 beacon_silence_rssi_c; /* RSSI silence after beacon frame */
__le32 interference_data_flag; /* flag for interference data
* availability. 1 when data is
* available. */
- __le32 channel_load; /* counts RX Enable time in uSec */
+ __le32 channel_load; /* counts RX Enable time in uSec */
__le32 dsp_false_alarms; /* DSP false alarm (both OFDM
* and CCK) counter */
__le32 beacon_rssi_a;
@@ -2871,28 +2851,28 @@ struct statistics_rx_non_phy {
__le32 beacon_energy_c;
} __packed;
-struct statistics_rx {
- struct statistics_rx_phy ofdm;
- struct statistics_rx_phy cck;
- struct statistics_rx_non_phy general;
- struct statistics_rx_ht_phy ofdm_ht;
+struct stats_rx {
+ struct stats_rx_phy ofdm;
+ struct stats_rx_phy cck;
+ struct stats_rx_non_phy general;
+ struct stats_rx_ht_phy ofdm_ht;
} __packed;
/**
- * struct statistics_tx_power - current tx power
+ * struct stats_tx_power - current tx power
*
* @ant_a: current tx power on chain a in 1/2 dB step
* @ant_b: current tx power on chain b in 1/2 dB step
* @ant_c: current tx power on chain c in 1/2 dB step
*/
-struct statistics_tx_power {
+struct stats_tx_power {
u8 ant_a;
u8 ant_b;
u8 ant_c;
u8 reserved;
} __packed;
-struct statistics_tx_non_phy_agg {
+struct stats_tx_non_phy_agg {
__le32 ba_timeout;
__le32 ba_reschedule_frames;
__le32 scd_query_agg_frame_cnt;
@@ -2905,7 +2885,7 @@ struct statistics_tx_non_phy_agg {
__le32 rx_ba_rsp_cnt;
} __packed;
-struct statistics_tx {
+struct stats_tx {
__le32 preamble_cnt;
__le32 rx_detected_cnt;
__le32 bt_prio_defer_cnt;
@@ -2920,13 +2900,12 @@ struct statistics_tx {
__le32 burst_abort_missing_next_frame_cnt;
__le32 cts_timeout_collision;
__le32 ack_or_ba_timeout_collision;
- struct statistics_tx_non_phy_agg agg;
+ struct stats_tx_non_phy_agg agg;
__le32 reserved1;
} __packed;
-
-struct statistics_div {
+struct stats_div {
__le32 tx_on_a;
__le32 tx_on_b;
__le32 exec_time;
@@ -2935,14 +2914,14 @@ struct statistics_div {
__le32 reserved2;
} __packed;
-struct statistics_general_common {
- __le32 temperature; /* radio temperature */
- struct statistics_dbg dbg;
+struct stats_general_common {
+ __le32 temperature; /* radio temperature */
+ struct stats_dbg dbg;
__le32 sleep_time;
__le32 slots_out;
__le32 slots_idle;
__le32 ttl_timestamp;
- struct statistics_div div;
+ struct stats_div div;
__le32 rx_enable_counter;
/*
* num_of_sos_states:
@@ -2952,73 +2931,73 @@ struct statistics_general_common {
__le32 num_of_sos_states;
} __packed;
-struct statistics_general {
- struct statistics_general_common common;
+struct stats_general {
+ struct stats_general_common common;
__le32 reserved2;
__le32 reserved3;
} __packed;
-#define UCODE_STATISTICS_CLEAR_MSK (0x1 << 0)
-#define UCODE_STATISTICS_FREQUENCY_MSK (0x1 << 1)
-#define UCODE_STATISTICS_NARROW_BAND_MSK (0x1 << 2)
+#define UCODE_STATS_CLEAR_MSK (0x1 << 0)
+#define UCODE_STATS_FREQUENCY_MSK (0x1 << 1)
+#define UCODE_STATS_NARROW_BAND_MSK (0x1 << 2)
/*
- * REPLY_STATISTICS_CMD = 0x9c,
+ * C_STATS = 0x9c,
* all devices identical.
*
- * This command triggers an immediate response containing uCode statistics.
- * The response is in the same format as STATISTICS_NOTIFICATION 0x9d, below.
+ * This command triggers an immediate response containing uCode stats.
+ * The response is in the same format as N_STATS 0x9d, below.
*
* If the CLEAR_STATS configuration flag is set, uCode will clear its
- * internal copy of the statistics (counters) after issuing the response.
- * This flag does not affect STATISTICS_NOTIFICATIONs after beacons (see below).
+ * internal copy of the stats (counters) after issuing the response.
+ * This flag does not affect N_STATSs after beacons (see below).
*
* If the DISABLE_NOTIF configuration flag is set, uCode will not issue
- * STATISTICS_NOTIFICATIONs after received beacons (see below). This flag
- * does not affect the response to the REPLY_STATISTICS_CMD 0x9c itself.
+ * N_STATSs after received beacons (see below). This flag
+ * does not affect the response to the C_STATS 0x9c itself.
*/
-#define IWL_STATS_CONF_CLEAR_STATS cpu_to_le32(0x1) /* see above */
-#define IWL_STATS_CONF_DISABLE_NOTIF cpu_to_le32(0x2)/* see above */
-struct iwl_statistics_cmd {
- __le32 configuration_flags; /* IWL_STATS_CONF_* */
+#define IL_STATS_CONF_CLEAR_STATS cpu_to_le32(0x1) /* see above */
+#define IL_STATS_CONF_DISABLE_NOTIF cpu_to_le32(0x2) /* see above */
+struct il_stats_cmd {
+ __le32 configuration_flags; /* IL_STATS_CONF_* */
} __packed;
/*
- * STATISTICS_NOTIFICATION = 0x9d (notification only, not a command)
+ * N_STATS = 0x9d (notification only, not a command)
*
* By default, uCode issues this notification after receiving a beacon
* while associated. To disable this behavior, set DISABLE_NOTIF flag in the
- * REPLY_STATISTICS_CMD 0x9c, above.
+ * C_STATS 0x9c, above.
*
* Statistics counters continue to increment beacon after beacon, but are
- * cleared when changing channels or when driver issues REPLY_STATISTICS_CMD
+ * cleared when changing channels or when driver issues C_STATS
* 0x9c with CLEAR_STATS bit set (see above).
*
- * uCode also issues this notification during scans. uCode clears statistics
- * appropriately so that each notification contains statistics for only the
+ * uCode also issues this notification during scans. uCode clears stats
+ * appropriately so that each notification contains stats for only the
* one channel that has just been scanned.
*/
-#define STATISTICS_REPLY_FLG_BAND_24G_MSK cpu_to_le32(0x2)
-#define STATISTICS_REPLY_FLG_HT40_MODE_MSK cpu_to_le32(0x8)
+#define STATS_REPLY_FLG_BAND_24G_MSK cpu_to_le32(0x2)
+#define STATS_REPLY_FLG_HT40_MODE_MSK cpu_to_le32(0x8)
-struct iwl3945_notif_statistics {
+struct il3945_notif_stats {
__le32 flag;
- struct iwl39_statistics_rx rx;
- struct iwl39_statistics_tx tx;
- struct iwl39_statistics_general general;
+ struct iwl39_stats_rx rx;
+ struct iwl39_stats_tx tx;
+ struct iwl39_stats_general general;
} __packed;
-struct iwl_notif_statistics {
+struct il_notif_stats {
__le32 flag;
- struct statistics_rx rx;
- struct statistics_tx tx;
- struct statistics_general general;
+ struct stats_rx rx;
+ struct stats_tx tx;
+ struct stats_general general;
} __packed;
/*
- * MISSED_BEACONS_NOTIFICATION = 0xa2 (notification only, not a command)
+ * N_MISSED_BEACONS = 0xa2 (notification only, not a command)
*
- * uCode send MISSED_BEACONS_NOTIFICATION to driver when detect beacon missed
+ * uCode send N_MISSED_BEACONS to driver when detect beacon missed
* in regardless of how many missed beacons, which mean when driver receive the
* notification, inside the command, it can find all the beacons information
* which include number of total missed beacons, number of consecutive missed
@@ -3035,18 +3014,17 @@ struct iwl_notif_statistics {
*
*/
-#define IWL_MISSED_BEACON_THRESHOLD_MIN (1)
-#define IWL_MISSED_BEACON_THRESHOLD_DEF (5)
-#define IWL_MISSED_BEACON_THRESHOLD_MAX IWL_MISSED_BEACON_THRESHOLD_DEF
+#define IL_MISSED_BEACON_THRESHOLD_MIN (1)
+#define IL_MISSED_BEACON_THRESHOLD_DEF (5)
+#define IL_MISSED_BEACON_THRESHOLD_MAX IL_MISSED_BEACON_THRESHOLD_DEF
-struct iwl_missed_beacon_notif {
+struct il_missed_beacon_notif {
__le32 consecutive_missed_beacons;
__le32 total_missed_becons;
__le32 num_expected_beacons;
__le32 num_recvd_beacons;
} __packed;
-
/******************************************************************************
* (11)
* Rx Calibration Commands:
@@ -3062,7 +3040,7 @@ struct iwl_missed_beacon_notif {
*****************************************************************************/
/**
- * SENSITIVITY_CMD = 0xa8 (command, has simple generic response)
+ * C_SENSITIVITY = 0xa8 (command, has simple generic response)
*
* This command sets up the Rx signal detector for a sensitivity level that
* is high enough to lock onto all signals within the associated network,
@@ -3076,12 +3054,12 @@ struct iwl_missed_beacon_notif {
* time listening, not transmitting). Driver must adjust sensitivity so that
* the ratio of actual false alarms to actual Rx time falls within this range.
*
- * While associated, uCode delivers STATISTICS_NOTIFICATIONs after each
+ * While associated, uCode delivers N_STATSs after each
* received beacon. These provide information to the driver to analyze the
- * sensitivity. Don't analyze statistics that come in from scanning, or any
- * other non-associated-network source. Pertinent statistics include:
+ * sensitivity. Don't analyze stats that come in from scanning, or any
+ * other non-associated-network source. Pertinent stats include:
*
- * From "general" statistics (struct statistics_rx_non_phy):
+ * From "general" stats (struct stats_rx_non_phy):
*
* (beacon_energy_[abc] & 0x0FF00) >> 8 (unsigned, higher value is lower level)
* Measure of energy of desired signal. Used for establishing a level
@@ -3094,7 +3072,7 @@ struct iwl_missed_beacon_notif {
* uSecs of actual Rx time during beacon period (varies according to
* how much time was spent transmitting).
*
- * From "cck" and "ofdm" statistics (struct statistics_rx_phy), separately:
+ * From "cck" and "ofdm" stats (struct stats_rx_phy), separately:
*
* false_alarm_cnt
* Signal locks abandoned early (before phy-level header).
@@ -3111,15 +3089,15 @@ struct iwl_missed_beacon_notif {
*
* Total number of false alarms = false_alarms + plcp_errs
*
- * For OFDM, adjust the following table entries in struct iwl_sensitivity_cmd
+ * For OFDM, adjust the following table entries in struct il_sensitivity_cmd
* (notice that the start points for OFDM are at or close to settings for
* maximum sensitivity):
*
* START / MIN / MAX
- * HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX 90 / 85 / 120
- * HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX 170 / 170 / 210
- * HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX 105 / 105 / 140
- * HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX 220 / 220 / 270
+ * HD_AUTO_CORR32_X1_TH_ADD_MIN_IDX 90 / 85 / 120
+ * HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_IDX 170 / 170 / 210
+ * HD_AUTO_CORR32_X4_TH_ADD_MIN_IDX 105 / 105 / 140
+ * HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_IDX 220 / 220 / 270
*
* If actual rate of OFDM false alarms (+ plcp_errors) is too high
* (greater than 50 for each 204.8 msecs listening), reduce sensitivity
@@ -3152,30 +3130,30 @@ struct iwl_missed_beacon_notif {
* Reset this to 0 at the first beacon period that falls within the
* "good" range (5 to 50 false alarms per 204.8 milliseconds rx).
*
- * Then, adjust the following CCK table entries in struct iwl_sensitivity_cmd
+ * Then, adjust the following CCK table entries in struct il_sensitivity_cmd
* (notice that the start points for CCK are at maximum sensitivity):
*
* START / MIN / MAX
- * HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX 125 / 125 / 200
- * HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX 200 / 200 / 400
- * HD_MIN_ENERGY_CCK_DET_INDEX 100 / 0 / 100
+ * HD_AUTO_CORR40_X4_TH_ADD_MIN_IDX 125 / 125 / 200
+ * HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX 200 / 200 / 400
+ * HD_MIN_ENERGY_CCK_DET_IDX 100 / 0 / 100
*
* If actual rate of CCK false alarms (+ plcp_errors) is too high
* (greater than 50 for each 204.8 msecs listening), method for reducing
* sensitivity is:
*
- * 1) *Add* 3 to value in HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX,
+ * 1) *Add* 3 to value in HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX,
* up to max 400.
*
- * 2) If current value in HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX is < 160,
+ * 2) If current value in HD_AUTO_CORR40_X4_TH_ADD_MIN_IDX is < 160,
* sensitivity has been reduced a significant amount; bring it up to
* a moderate 161. Otherwise, *add* 3, up to max 200.
*
- * 3) a) If current value in HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX is > 160,
+ * 3) a) If current value in HD_AUTO_CORR40_X4_TH_ADD_MIN_IDX is > 160,
* sensitivity has been reduced only a moderate or small amount;
- * *subtract* 2 from value in HD_MIN_ENERGY_CCK_DET_INDEX,
+ * *subtract* 2 from value in HD_MIN_ENERGY_CCK_DET_IDX,
* down to min 0. Otherwise (if gain has been significantly reduced),
- * don't change the HD_MIN_ENERGY_CCK_DET_INDEX value.
+ * don't change the HD_MIN_ENERGY_CCK_DET_IDX value.
*
* b) Save a snapshot of the "silence reference".
*
@@ -3191,13 +3169,13 @@ struct iwl_missed_beacon_notif {
*
* Method for increasing sensitivity:
*
- * 1) *Subtract* 3 from value in HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX,
+ * 1) *Subtract* 3 from value in HD_AUTO_CORR40_X4_TH_ADD_MIN_IDX,
* down to min 125.
*
- * 2) *Subtract* 3 from value in HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX,
+ * 2) *Subtract* 3 from value in HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX,
* down to min 200.
*
- * 3) *Add* 2 to value in HD_MIN_ENERGY_CCK_DET_INDEX, up to max 100.
+ * 3) *Add* 2 to value in HD_MIN_ENERGY_CCK_DET_IDX, up to max 100.
*
* If actual rate of CCK false alarms (+ plcp_errors) is within good range
* (between 5 and 50 for each 204.8 msecs listening):
@@ -3206,57 +3184,56 @@ struct iwl_missed_beacon_notif {
*
* 2) If previous beacon had too many CCK false alarms (+ plcp_errors),
* give some extra margin to energy threshold by *subtracting* 8
- * from value in HD_MIN_ENERGY_CCK_DET_INDEX.
+ * from value in HD_MIN_ENERGY_CCK_DET_IDX.
*
* For all cases (too few, too many, good range), make sure that the CCK
* detection threshold (energy) is below the energy level for robust
* detection over the past 10 beacon periods, the "Max cck energy".
* Lower values mean higher energy; this means making sure that the value
- * in HD_MIN_ENERGY_CCK_DET_INDEX is at or *above* "Max cck energy".
+ * in HD_MIN_ENERGY_CCK_DET_IDX is at or *above* "Max cck energy".
*
*/
/*
- * Table entries in SENSITIVITY_CMD (struct iwl_sensitivity_cmd)
- */
-#define HD_TABLE_SIZE (11) /* number of entries */
-#define HD_MIN_ENERGY_CCK_DET_INDEX (0) /* table indexes */
-#define HD_MIN_ENERGY_OFDM_DET_INDEX (1)
-#define HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX (2)
-#define HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX (3)
-#define HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX (4)
-#define HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX (5)
-#define HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX (6)
-#define HD_BARKER_CORR_TH_ADD_MIN_INDEX (7)
-#define HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX (8)
-#define HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX (9)
-#define HD_OFDM_ENERGY_TH_IN_INDEX (10)
-
-/* Control field in struct iwl_sensitivity_cmd */
-#define SENSITIVITY_CMD_CONTROL_DEFAULT_TABLE cpu_to_le16(0)
-#define SENSITIVITY_CMD_CONTROL_WORK_TABLE cpu_to_le16(1)
+ * Table entries in C_SENSITIVITY (struct il_sensitivity_cmd)
+ */
+#define HD_TBL_SIZE (11) /* number of entries */
+#define HD_MIN_ENERGY_CCK_DET_IDX (0) /* table idxes */
+#define HD_MIN_ENERGY_OFDM_DET_IDX (1)
+#define HD_AUTO_CORR32_X1_TH_ADD_MIN_IDX (2)
+#define HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_IDX (3)
+#define HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX (4)
+#define HD_AUTO_CORR32_X4_TH_ADD_MIN_IDX (5)
+#define HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_IDX (6)
+#define HD_BARKER_CORR_TH_ADD_MIN_IDX (7)
+#define HD_BARKER_CORR_TH_ADD_MIN_MRC_IDX (8)
+#define HD_AUTO_CORR40_X4_TH_ADD_MIN_IDX (9)
+#define HD_OFDM_ENERGY_TH_IN_IDX (10)
+
+/* Control field in struct il_sensitivity_cmd */
+#define C_SENSITIVITY_CONTROL_DEFAULT_TBL cpu_to_le16(0)
+#define C_SENSITIVITY_CONTROL_WORK_TBL cpu_to_le16(1)
/**
- * struct iwl_sensitivity_cmd
+ * struct il_sensitivity_cmd
* @control: (1) updates working table, (0) updates default table
- * @table: energy threshold values, use HD_* as index into table
+ * @table: energy threshold values, use HD_* as idx into table
*
* Always use "1" in "control" to update uCode's working table and DSP.
*/
-struct iwl_sensitivity_cmd {
- __le16 control; /* always use "1" */
- __le16 table[HD_TABLE_SIZE]; /* use HD_* as index */
+struct il_sensitivity_cmd {
+ __le16 control; /* always use "1" */
+ __le16 table[HD_TBL_SIZE]; /* use HD_* as idx */
} __packed;
-
/**
- * REPLY_PHY_CALIBRATION_CMD = 0xb0 (command, has simple generic response)
+ * C_PHY_CALIBRATION = 0xb0 (command, has simple generic response)
*
* This command sets the relative gains of 4965 device's 3 radio receiver chains.
*
* After the first association, driver should accumulate signal and noise
- * statistics from the STATISTICS_NOTIFICATIONs that follow the first 20
- * beacons from the associated network (don't collect statistics that come
+ * stats from the N_STATSs that follow the first 20
+ * beacons from the associated network (don't collect stats that come
* in from scanning, or any other non-network source).
*
* DISCONNECTED ANTENNA:
@@ -3264,7 +3241,7 @@ struct iwl_sensitivity_cmd {
* Driver should determine which antennas are actually connected, by comparing
* average beacon signal levels for the 3 Rx chains. Accumulate (add) the
* following values over 20 beacons, one accumulator for each of the chains
- * a/b/c, from struct statistics_rx_non_phy:
+ * a/b/c, from struct stats_rx_non_phy:
*
* beacon_rssi_[abc] & 0x0FF (unsigned, units in dB)
*
@@ -3283,7 +3260,7 @@ struct iwl_sensitivity_cmd {
* to antennas, see above) for gain, by comparing the average signal levels
* detected during the silence after each beacon (background noise).
* Accumulate (add) the following values over 20 beacons, one accumulator for
- * each of the chains a/b/c, from struct statistics_rx_non_phy:
+ * each of the chains a/b/c, from struct stats_rx_non_phy:
*
* beacon_silence_rssi_[abc] & 0x0FF (unsigned, units in dB)
*
@@ -3294,7 +3271,7 @@ struct iwl_sensitivity_cmd {
* (accum_noise[i] - accum_noise[reference]) / 30
*
* The "30" adjusts the dB in the 20 accumulated samples to units of 1.5 dB.
- * For use in diff_gain_[abc] fields of struct iwl_calibration_cmd, the
+ * For use in diff_gain_[abc] fields of struct il_calibration_cmd, the
* driver should limit the difference results to a range of 0-3 (0-4.5 dB),
* and set bit 2 to indicate "reduce gain". The value for the reference
* (weakest) chain should be "0".
@@ -3306,24 +3283,24 @@ struct iwl_sensitivity_cmd {
/* Phy calibration command for series */
/* The default calibrate table size if not specified by firmware */
-#define IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE 18
+#define IL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE 18
enum {
- IWL_PHY_CALIBRATE_DIFF_GAIN_CMD = 7,
- IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE = 19,
+ IL_PHY_CALIBRATE_DIFF_GAIN_CMD = 7,
+ IL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE = 19,
};
-#define IWL_MAX_PHY_CALIBRATE_TBL_SIZE (253)
+#define IL_MAX_PHY_CALIBRATE_TBL_SIZE (253)
-struct iwl_calib_hdr {
+struct il_calib_hdr {
u8 op_code;
u8 first_group;
u8 groups_num;
u8 data_valid;
} __packed;
-/* IWL_PHY_CALIBRATE_DIFF_GAIN_CMD (7) */
-struct iwl_calib_diff_gain_cmd {
- struct iwl_calib_hdr hdr;
+/* IL_PHY_CALIBRATE_DIFF_GAIN_CMD (7) */
+struct il_calib_diff_gain_cmd {
+ struct il_calib_hdr hdr;
s8 diff_gain_a; /* see above */
s8 diff_gain_b;
s8 diff_gain_c;
@@ -3338,12 +3315,12 @@ struct iwl_calib_diff_gain_cmd {
/*
* LEDs Command & Response
- * REPLY_LEDS_CMD = 0x48 (command, has simple generic response)
+ * C_LEDS = 0x48 (command, has simple generic response)
*
* For each of 3 possible LEDs (Activity/Link/Tech, selected by "id" field),
* this command turns it on or off, or sets up a periodic blinking cycle.
*/
-struct iwl_led_cmd {
+struct il_led_cmd {
__le32 interval; /* "interval" in uSec */
u8 id; /* 1: Activity, 2: Link, 3: Tech */
u8 off; /* # intervals off while blinking;
@@ -3353,14 +3330,15 @@ struct iwl_led_cmd {
u8 reserved;
} __packed;
-
/******************************************************************************
* (13)
* Union of all expected notifications/responses:
*
*****************************************************************************/
-struct iwl_rx_packet {
+#define IL_RX_FRAME_SIZE_MSK 0x00003fff
+
+struct il_rx_pkt {
/*
* The first 4 bytes of the RX frame header contain both the RX frame
* size and some flags.
@@ -3372,27 +3350,27 @@ struct iwl_rx_packet {
* 13-00: RX frame size
*/
__le32 len_n_flags;
- struct iwl_cmd_header hdr;
+ struct il_cmd_header hdr;
union {
- struct iwl3945_rx_frame rx_frame;
- struct iwl3945_tx_resp tx_resp;
- struct iwl3945_beacon_notif beacon_status;
-
- struct iwl_alive_resp alive_frame;
- struct iwl_spectrum_notification spectrum_notif;
- struct iwl_csa_notification csa_notif;
- struct iwl_error_resp err_resp;
- struct iwl_card_state_notif card_state_notif;
- struct iwl_add_sta_resp add_sta;
- struct iwl_rem_sta_resp rem_sta;
- struct iwl_sleep_notification sleep_notif;
- struct iwl_spectrum_resp spectrum;
- struct iwl_notif_statistics stats;
- struct iwl_compressed_ba_resp compressed_ba;
- struct iwl_missed_beacon_notif missed_beacon;
+ struct il3945_rx_frame rx_frame;
+ struct il3945_tx_resp tx_resp;
+ struct il3945_beacon_notif beacon_status;
+
+ struct il_alive_resp alive_frame;
+ struct il_spectrum_notification spectrum_notif;
+ struct il_csa_notification csa_notif;
+ struct il_error_resp err_resp;
+ struct il_card_state_notif card_state_notif;
+ struct il_add_sta_resp add_sta;
+ struct il_rem_sta_resp rem_sta;
+ struct il_sleep_notification sleep_notif;
+ struct il_spectrum_resp spectrum;
+ struct il_notif_stats stats;
+ struct il_compressed_ba_resp compressed_ba;
+ struct il_missed_beacon_notif missed_beacon;
__le32 status;
u8 raw[0];
} u;
} __packed;
-#endif /* __iwl_legacy_commands_h__ */
+#endif /* __il_commands_h__ */
diff --git a/drivers/net/wireless/iwlegacy/common.c b/drivers/net/wireless/iwlegacy/common.c
new file mode 100644
index 00000000000..881ba043770
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/common.c
@@ -0,0 +1,5706 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *****************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/etherdevice.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/lockdep.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/skbuff.h>
+#include <net/mac80211.h>
+
+#include "common.h"
+
+const char *
+il_get_cmd_string(u8 cmd)
+{
+ switch (cmd) {
+ IL_CMD(N_ALIVE);
+ IL_CMD(N_ERROR);
+ IL_CMD(C_RXON);
+ IL_CMD(C_RXON_ASSOC);
+ IL_CMD(C_QOS_PARAM);
+ IL_CMD(C_RXON_TIMING);
+ IL_CMD(C_ADD_STA);
+ IL_CMD(C_REM_STA);
+ IL_CMD(C_WEPKEY);
+ IL_CMD(N_3945_RX);
+ IL_CMD(C_TX);
+ IL_CMD(C_RATE_SCALE);
+ IL_CMD(C_LEDS);
+ IL_CMD(C_TX_LINK_QUALITY_CMD);
+ IL_CMD(C_CHANNEL_SWITCH);
+ IL_CMD(N_CHANNEL_SWITCH);
+ IL_CMD(C_SPECTRUM_MEASUREMENT);
+ IL_CMD(N_SPECTRUM_MEASUREMENT);
+ IL_CMD(C_POWER_TBL);
+ IL_CMD(N_PM_SLEEP);
+ IL_CMD(N_PM_DEBUG_STATS);
+ IL_CMD(C_SCAN);
+ IL_CMD(C_SCAN_ABORT);
+ IL_CMD(N_SCAN_START);
+ IL_CMD(N_SCAN_RESULTS);
+ IL_CMD(N_SCAN_COMPLETE);
+ IL_CMD(N_BEACON);
+ IL_CMD(C_TX_BEACON);
+ IL_CMD(C_TX_PWR_TBL);
+ IL_CMD(C_BT_CONFIG);
+ IL_CMD(C_STATS);
+ IL_CMD(N_STATS);
+ IL_CMD(N_CARD_STATE);
+ IL_CMD(N_MISSED_BEACONS);
+ IL_CMD(C_CT_KILL_CONFIG);
+ IL_CMD(C_SENSITIVITY);
+ IL_CMD(C_PHY_CALIBRATION);
+ IL_CMD(N_RX_PHY);
+ IL_CMD(N_RX_MPDU);
+ IL_CMD(N_RX);
+ IL_CMD(N_COMPRESSED_BA);
+ default:
+ return "UNKNOWN";
+
+ }
+}
+EXPORT_SYMBOL(il_get_cmd_string);
+
+#define HOST_COMPLETE_TIMEOUT (HZ / 2)
+
+static void
+il_generic_cmd_callback(struct il_priv *il, struct il_device_cmd *cmd,
+ struct il_rx_pkt *pkt)
+{
+ if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
+ IL_ERR("Bad return from %s (0x%08X)\n",
+ il_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
+ return;
+ }
+#ifdef CONFIG_IWLEGACY_DEBUG
+ switch (cmd->hdr.cmd) {
+ case C_TX_LINK_QUALITY_CMD:
+ case C_SENSITIVITY:
+ D_HC_DUMP("back from %s (0x%08X)\n",
+ il_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
+ break;
+ default:
+ D_HC("back from %s (0x%08X)\n", il_get_cmd_string(cmd->hdr.cmd),
+ pkt->hdr.flags);
+ }
+#endif
+}
+
+static int
+il_send_cmd_async(struct il_priv *il, struct il_host_cmd *cmd)
+{
+ int ret;
+
+ BUG_ON(!(cmd->flags & CMD_ASYNC));
+
+ /* An asynchronous command can not expect an SKB to be set. */
+ BUG_ON(cmd->flags & CMD_WANT_SKB);
+
+ /* Assign a generic callback if one is not provided */
+ if (!cmd->callback)
+ cmd->callback = il_generic_cmd_callback;
+
+ if (test_bit(S_EXIT_PENDING, &il->status))
+ return -EBUSY;
+
+ ret = il_enqueue_hcmd(il, cmd);
+ if (ret < 0) {
+ IL_ERR("Error sending %s: enqueue_hcmd failed: %d\n",
+ il_get_cmd_string(cmd->id), ret);
+ return ret;
+ }
+ return 0;
+}
+
+int
+il_send_cmd_sync(struct il_priv *il, struct il_host_cmd *cmd)
+{
+ int cmd_idx;
+ int ret;
+
+ lockdep_assert_held(&il->mutex);
+
+ BUG_ON(cmd->flags & CMD_ASYNC);
+
+ /* A synchronous command can not have a callback set. */
+ BUG_ON(cmd->callback);
+
+ D_INFO("Attempting to send sync command %s\n",
+ il_get_cmd_string(cmd->id));
+
+ set_bit(S_HCMD_ACTIVE, &il->status);
+ D_INFO("Setting HCMD_ACTIVE for command %s\n",
+ il_get_cmd_string(cmd->id));
+
+ cmd_idx = il_enqueue_hcmd(il, cmd);
+ if (cmd_idx < 0) {
+ ret = cmd_idx;
+ IL_ERR("Error sending %s: enqueue_hcmd failed: %d\n",
+ il_get_cmd_string(cmd->id), ret);
+ goto out;
+ }
+
+ ret = wait_event_timeout(il->wait_command_queue,
+ !test_bit(S_HCMD_ACTIVE, &il->status),
+ HOST_COMPLETE_TIMEOUT);
+ if (!ret) {
+ if (test_bit(S_HCMD_ACTIVE, &il->status)) {
+ IL_ERR("Error sending %s: time out after %dms.\n",
+ il_get_cmd_string(cmd->id),
+ jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
+
+ clear_bit(S_HCMD_ACTIVE, &il->status);
+ D_INFO("Clearing HCMD_ACTIVE for command %s\n",
+ il_get_cmd_string(cmd->id));
+ ret = -ETIMEDOUT;
+ goto cancel;
+ }
+ }
+
+ if (test_bit(S_RF_KILL_HW, &il->status)) {
+ IL_ERR("Command %s aborted: RF KILL Switch\n",
+ il_get_cmd_string(cmd->id));
+ ret = -ECANCELED;
+ goto fail;
+ }
+ if (test_bit(S_FW_ERROR, &il->status)) {
+ IL_ERR("Command %s failed: FW Error\n",
+ il_get_cmd_string(cmd->id));
+ ret = -EIO;
+ goto fail;
+ }
+ if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) {
+ IL_ERR("Error: Response NULL in '%s'\n",
+ il_get_cmd_string(cmd->id));
+ ret = -EIO;
+ goto cancel;
+ }
+
+ ret = 0;
+ goto out;
+
+cancel:
+ if (cmd->flags & CMD_WANT_SKB) {
+ /*
+ * Cancel the CMD_WANT_SKB flag for the cmd in the
+ * TX cmd queue. Otherwise in case the cmd comes
+ * in later, it will possibly set an invalid
+ * address (cmd->meta.source).
+ */
+ il->txq[il->cmd_queue].meta[cmd_idx].flags &= ~CMD_WANT_SKB;
+ }
+fail:
+ if (cmd->reply_page) {
+ il_free_pages(il, cmd->reply_page);
+ cmd->reply_page = 0;
+ }
+out:
+ return ret;
+}
+EXPORT_SYMBOL(il_send_cmd_sync);
+
+int
+il_send_cmd(struct il_priv *il, struct il_host_cmd *cmd)
+{
+ if (cmd->flags & CMD_ASYNC)
+ return il_send_cmd_async(il, cmd);
+
+ return il_send_cmd_sync(il, cmd);
+}
+EXPORT_SYMBOL(il_send_cmd);
+
+int
+il_send_cmd_pdu(struct il_priv *il, u8 id, u16 len, const void *data)
+{
+ struct il_host_cmd cmd = {
+ .id = id,
+ .len = len,
+ .data = data,
+ };
+
+ return il_send_cmd_sync(il, &cmd);
+}
+EXPORT_SYMBOL(il_send_cmd_pdu);
+
+int
+il_send_cmd_pdu_async(struct il_priv *il, u8 id, u16 len, const void *data,
+ void (*callback) (struct il_priv *il,
+ struct il_device_cmd *cmd,
+ struct il_rx_pkt *pkt))
+{
+ struct il_host_cmd cmd = {
+ .id = id,
+ .len = len,
+ .data = data,
+ };
+
+ cmd.flags |= CMD_ASYNC;
+ cmd.callback = callback;
+
+ return il_send_cmd_async(il, &cmd);
+}
+EXPORT_SYMBOL(il_send_cmd_pdu_async);
+
+/* default: IL_LED_BLINK(0) using blinking idx table */
+static int led_mode;
+module_param(led_mode, int, S_IRUGO);
+MODULE_PARM_DESC(led_mode,
+ "0=system default, " "1=On(RF On)/Off(RF Off), 2=blinking");
+
+/* Throughput OFF time(ms) ON time (ms)
+ * >300 25 25
+ * >200 to 300 40 40
+ * >100 to 200 55 55
+ * >70 to 100 65 65
+ * >50 to 70 75 75
+ * >20 to 50 85 85
+ * >10 to 20 95 95
+ * >5 to 10 110 110
+ * >1 to 5 130 130
+ * >0 to 1 167 167
+ * <=0 SOLID ON
+ */
+static const struct ieee80211_tpt_blink il_blink[] = {
+ {.throughput = 0, .blink_time = 334},
+ {.throughput = 1 * 1024 - 1, .blink_time = 260},
+ {.throughput = 5 * 1024 - 1, .blink_time = 220},
+ {.throughput = 10 * 1024 - 1, .blink_time = 190},
+ {.throughput = 20 * 1024 - 1, .blink_time = 170},
+ {.throughput = 50 * 1024 - 1, .blink_time = 150},
+ {.throughput = 70 * 1024 - 1, .blink_time = 130},
+ {.throughput = 100 * 1024 - 1, .blink_time = 110},
+ {.throughput = 200 * 1024 - 1, .blink_time = 80},
+ {.throughput = 300 * 1024 - 1, .blink_time = 50},
+};
+
+/*
+ * Adjust led blink rate to compensate on a MAC Clock difference on every HW
+ * Led blink rate analysis showed an average deviation of 0% on 3945,
+ * 5% on 4965 HW.
+ * Need to compensate on the led on/off time per HW according to the deviation
+ * to achieve the desired led frequency
+ * The calculation is: (100-averageDeviation)/100 * blinkTime
+ * For code efficiency the calculation will be:
+ * compensation = (100 - averageDeviation) * 64 / 100
+ * NewBlinkTime = (compensation * BlinkTime) / 64
+ */
+static inline u8
+il_blink_compensation(struct il_priv *il, u8 time, u16 compensation)
+{
+ if (!compensation) {
+ IL_ERR("undefined blink compensation: "
+ "use pre-defined blinking time\n");
+ return time;
+ }
+
+ return (u8) ((time * compensation) >> 6);
+}
+
+/* Set led pattern command */
+static int
+il_led_cmd(struct il_priv *il, unsigned long on, unsigned long off)
+{
+ struct il_led_cmd led_cmd = {
+ .id = IL_LED_LINK,
+ .interval = IL_DEF_LED_INTRVL
+ };
+ int ret;
+
+ if (!test_bit(S_READY, &il->status))
+ return -EBUSY;
+
+ if (il->blink_on == on && il->blink_off == off)
+ return 0;
+
+ if (off == 0) {
+ /* led is SOLID_ON */
+ on = IL_LED_SOLID;
+ }
+
+ D_LED("Led blink time compensation=%u\n",
+ il->cfg->base_params->led_compensation);
+ led_cmd.on =
+ il_blink_compensation(il, on,
+ il->cfg->base_params->led_compensation);
+ led_cmd.off =
+ il_blink_compensation(il, off,
+ il->cfg->base_params->led_compensation);
+
+ ret = il->cfg->ops->led->cmd(il, &led_cmd);
+ if (!ret) {
+ il->blink_on = on;
+ il->blink_off = off;
+ }
+ return ret;
+}
+
+static void
+il_led_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct il_priv *il = container_of(led_cdev, struct il_priv, led);
+ unsigned long on = 0;
+
+ if (brightness > 0)
+ on = IL_LED_SOLID;
+
+ il_led_cmd(il, on, 0);
+}
+
+static int
+il_led_blink_set(struct led_classdev *led_cdev, unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ struct il_priv *il = container_of(led_cdev, struct il_priv, led);
+
+ return il_led_cmd(il, *delay_on, *delay_off);
+}
+
+void
+il_leds_init(struct il_priv *il)
+{
+ int mode = led_mode;
+ int ret;
+
+ if (mode == IL_LED_DEFAULT)
+ mode = il->cfg->led_mode;
+
+ il->led.name =
+ kasprintf(GFP_KERNEL, "%s-led", wiphy_name(il->hw->wiphy));
+ il->led.brightness_set = il_led_brightness_set;
+ il->led.blink_set = il_led_blink_set;
+ il->led.max_brightness = 1;
+
+ switch (mode) {
+ case IL_LED_DEFAULT:
+ WARN_ON(1);
+ break;
+ case IL_LED_BLINK:
+ il->led.default_trigger =
+ ieee80211_create_tpt_led_trigger(il->hw,
+ IEEE80211_TPT_LEDTRIG_FL_CONNECTED,
+ il_blink,
+ ARRAY_SIZE(il_blink));
+ break;
+ case IL_LED_RF_STATE:
+ il->led.default_trigger = ieee80211_get_radio_led_name(il->hw);
+ break;
+ }
+
+ ret = led_classdev_register(&il->pci_dev->dev, &il->led);
+ if (ret) {
+ kfree(il->led.name);
+ return;
+ }
+
+ il->led_registered = true;
+}
+EXPORT_SYMBOL(il_leds_init);
+
+void
+il_leds_exit(struct il_priv *il)
+{
+ if (!il->led_registered)
+ return;
+
+ led_classdev_unregister(&il->led);
+ kfree(il->led.name);
+}
+EXPORT_SYMBOL(il_leds_exit);
+
+/************************** EEPROM BANDS ****************************
+ *
+ * The il_eeprom_band definitions below provide the mapping from the
+ * EEPROM contents to the specific channel number supported for each
+ * band.
+ *
+ * For example, il_priv->eeprom.band_3_channels[4] from the band_3
+ * definition below maps to physical channel 42 in the 5.2GHz spectrum.
+ * The specific geography and calibration information for that channel
+ * is contained in the eeprom map itself.
+ *
+ * During init, we copy the eeprom information and channel map
+ * information into il->channel_info_24/52 and il->channel_map_24/52
+ *
+ * channel_map_24/52 provides the idx in the channel_info array for a
+ * given channel. We have to have two separate maps as there is channel
+ * overlap with the 2.4GHz and 5.2GHz spectrum as seen in band_1 and
+ * band_2
+ *
+ * A value of 0xff stored in the channel_map indicates that the channel
+ * is not supported by the hardware at all.
+ *
+ * A value of 0xfe in the channel_map indicates that the channel is not
+ * valid for Tx with the current hardware. This means that
+ * while the system can tune and receive on a given channel, it may not
+ * be able to associate or transmit any frames on that
+ * channel. There is no corresponding channel information for that
+ * entry.
+ *
+ *********************************************************************/
+
+/* 2.4 GHz */
+const u8 il_eeprom_band_1[14] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
+};
+
+/* 5.2 GHz bands */
+static const u8 il_eeprom_band_2[] = { /* 4915-5080MHz */
+ 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16
+};
+
+static const u8 il_eeprom_band_3[] = { /* 5170-5320MHz */
+ 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
+};
+
+static const u8 il_eeprom_band_4[] = { /* 5500-5700MHz */
+ 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
+};
+
+static const u8 il_eeprom_band_5[] = { /* 5725-5825MHz */
+ 145, 149, 153, 157, 161, 165
+};
+
+static const u8 il_eeprom_band_6[] = { /* 2.4 ht40 channel */
+ 1, 2, 3, 4, 5, 6, 7
+};
+
+static const u8 il_eeprom_band_7[] = { /* 5.2 ht40 channel */
+ 36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157
+};
+
+/******************************************************************************
+ *
+ * EEPROM related functions
+ *
+******************************************************************************/
+
+static int
+il_eeprom_verify_signature(struct il_priv *il)
+{
+ u32 gp = _il_rd(il, CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK;
+ int ret = 0;
+
+ D_EEPROM("EEPROM signature=0x%08x\n", gp);
+ switch (gp) {
+ case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K:
+ case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K:
+ break;
+ default:
+ IL_ERR("bad EEPROM signature," "EEPROM_GP=0x%08x\n", gp);
+ ret = -ENOENT;
+ break;
+ }
+ return ret;
+}
+
+const u8 *
+il_eeprom_query_addr(const struct il_priv *il, size_t offset)
+{
+ BUG_ON(offset >= il->cfg->base_params->eeprom_size);
+ return &il->eeprom[offset];
+}
+EXPORT_SYMBOL(il_eeprom_query_addr);
+
+u16
+il_eeprom_query16(const struct il_priv *il, size_t offset)
+{
+ if (!il->eeprom)
+ return 0;
+ return (u16) il->eeprom[offset] | ((u16) il->eeprom[offset + 1] << 8);
+}
+EXPORT_SYMBOL(il_eeprom_query16);
+
+/**
+ * il_eeprom_init - read EEPROM contents
+ *
+ * Load the EEPROM contents from adapter into il->eeprom
+ *
+ * NOTE: This routine uses the non-debug IO access functions.
+ */
+int
+il_eeprom_init(struct il_priv *il)
+{
+ __le16 *e;
+ u32 gp = _il_rd(il, CSR_EEPROM_GP);
+ int sz;
+ int ret;
+ u16 addr;
+
+ /* allocate eeprom */
+ sz = il->cfg->base_params->eeprom_size;
+ D_EEPROM("NVM size = %d\n", sz);
+ il->eeprom = kzalloc(sz, GFP_KERNEL);
+ if (!il->eeprom) {
+ ret = -ENOMEM;
+ goto alloc_err;
+ }
+ e = (__le16 *) il->eeprom;
+
+ il->cfg->ops->lib->apm_ops.init(il);
+
+ ret = il_eeprom_verify_signature(il);
+ if (ret < 0) {
+ IL_ERR("EEPROM not found, EEPROM_GP=0x%08x\n", gp);
+ ret = -ENOENT;
+ goto err;
+ }
+
+ /* Make sure driver (instead of uCode) is allowed to read EEPROM */
+ ret = il->cfg->ops->lib->eeprom_ops.acquire_semaphore(il);
+ if (ret < 0) {
+ IL_ERR("Failed to acquire EEPROM semaphore.\n");
+ ret = -ENOENT;
+ goto err;
+ }
+
+ /* eeprom is an array of 16bit values */
+ for (addr = 0; addr < sz; addr += sizeof(u16)) {
+ u32 r;
+
+ _il_wr(il, CSR_EEPROM_REG,
+ CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
+
+ ret =
+ _il_poll_bit(il, CSR_EEPROM_REG,
+ CSR_EEPROM_REG_READ_VALID_MSK,
+ CSR_EEPROM_REG_READ_VALID_MSK,
+ IL_EEPROM_ACCESS_TIMEOUT);
+ if (ret < 0) {
+ IL_ERR("Time out reading EEPROM[%d]\n", addr);
+ goto done;
+ }
+ r = _il_rd(il, CSR_EEPROM_REG);
+ e[addr / 2] = cpu_to_le16(r >> 16);
+ }
+
+ D_EEPROM("NVM Type: %s, version: 0x%x\n", "EEPROM",
+ il_eeprom_query16(il, EEPROM_VERSION));
+
+ ret = 0;
+done:
+ il->cfg->ops->lib->eeprom_ops.release_semaphore(il);
+
+err:
+ if (ret)
+ il_eeprom_free(il);
+ /* Reset chip to save power until we load uCode during "up". */
+ il_apm_stop(il);
+alloc_err:
+ return ret;
+}
+EXPORT_SYMBOL(il_eeprom_init);
+
+void
+il_eeprom_free(struct il_priv *il)
+{
+ kfree(il->eeprom);
+ il->eeprom = NULL;
+}
+EXPORT_SYMBOL(il_eeprom_free);
+
+static void
+il_init_band_reference(const struct il_priv *il, int eep_band,
+ int *eeprom_ch_count,
+ const struct il_eeprom_channel **eeprom_ch_info,
+ const u8 **eeprom_ch_idx)
+{
+ u32 offset =
+ il->cfg->ops->lib->eeprom_ops.regulatory_bands[eep_band - 1];
+ switch (eep_band) {
+ case 1: /* 2.4GHz band */
+ *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_1);
+ *eeprom_ch_info =
+ (struct il_eeprom_channel *)il_eeprom_query_addr(il,
+ offset);
+ *eeprom_ch_idx = il_eeprom_band_1;
+ break;
+ case 2: /* 4.9GHz band */
+ *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_2);
+ *eeprom_ch_info =
+ (struct il_eeprom_channel *)il_eeprom_query_addr(il,
+ offset);
+ *eeprom_ch_idx = il_eeprom_band_2;
+ break;
+ case 3: /* 5.2GHz band */
+ *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_3);
+ *eeprom_ch_info =
+ (struct il_eeprom_channel *)il_eeprom_query_addr(il,
+ offset);
+ *eeprom_ch_idx = il_eeprom_band_3;
+ break;
+ case 4: /* 5.5GHz band */
+ *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_4);
+ *eeprom_ch_info =
+ (struct il_eeprom_channel *)il_eeprom_query_addr(il,
+ offset);
+ *eeprom_ch_idx = il_eeprom_band_4;
+ break;
+ case 5: /* 5.7GHz band */
+ *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_5);
+ *eeprom_ch_info =
+ (struct il_eeprom_channel *)il_eeprom_query_addr(il,
+ offset);
+ *eeprom_ch_idx = il_eeprom_band_5;
+ break;
+ case 6: /* 2.4GHz ht40 channels */
+ *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_6);
+ *eeprom_ch_info =
+ (struct il_eeprom_channel *)il_eeprom_query_addr(il,
+ offset);
+ *eeprom_ch_idx = il_eeprom_band_6;
+ break;
+ case 7: /* 5 GHz ht40 channels */
+ *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_7);
+ *eeprom_ch_info =
+ (struct il_eeprom_channel *)il_eeprom_query_addr(il,
+ offset);
+ *eeprom_ch_idx = il_eeprom_band_7;
+ break;
+ default:
+ BUG();
+ }
+}
+
+#define CHECK_AND_PRINT(x) ((eeprom_ch->flags & EEPROM_CHANNEL_##x) \
+ ? # x " " : "")
+/**
+ * il_mod_ht40_chan_info - Copy ht40 channel info into driver's il.
+ *
+ * Does not set up a command, or touch hardware.
+ */
+static int
+il_mod_ht40_chan_info(struct il_priv *il, enum ieee80211_band band, u16 channel,
+ const struct il_eeprom_channel *eeprom_ch,
+ u8 clear_ht40_extension_channel)
+{
+ struct il_channel_info *ch_info;
+
+ ch_info =
+ (struct il_channel_info *)il_get_channel_info(il, band, channel);
+
+ if (!il_is_channel_valid(ch_info))
+ return -1;
+
+ D_EEPROM("HT40 Ch. %d [%sGHz] %s%s%s%s%s(0x%02x %ddBm):"
+ " Ad-Hoc %ssupported\n", ch_info->channel,
+ il_is_channel_a_band(ch_info) ? "5.2" : "2.4",
+ CHECK_AND_PRINT(IBSS), CHECK_AND_PRINT(ACTIVE),
+ CHECK_AND_PRINT(RADAR), CHECK_AND_PRINT(WIDE),
+ CHECK_AND_PRINT(DFS), eeprom_ch->flags,
+ eeprom_ch->max_power_avg,
+ ((eeprom_ch->flags & EEPROM_CHANNEL_IBSS) &&
+ !(eeprom_ch->flags & EEPROM_CHANNEL_RADAR)) ? "" : "not ");
+
+ ch_info->ht40_eeprom = *eeprom_ch;
+ ch_info->ht40_max_power_avg = eeprom_ch->max_power_avg;
+ ch_info->ht40_flags = eeprom_ch->flags;
+ if (eeprom_ch->flags & EEPROM_CHANNEL_VALID)
+ ch_info->ht40_extension_channel &=
+ ~clear_ht40_extension_channel;
+
+ return 0;
+}
+
+#define CHECK_AND_PRINT_I(x) ((eeprom_ch_info[ch].flags & EEPROM_CHANNEL_##x) \
+ ? # x " " : "")
+
+/**
+ * il_init_channel_map - Set up driver's info for all possible channels
+ */
+int
+il_init_channel_map(struct il_priv *il)
+{
+ int eeprom_ch_count = 0;
+ const u8 *eeprom_ch_idx = NULL;
+ const struct il_eeprom_channel *eeprom_ch_info = NULL;
+ int band, ch;
+ struct il_channel_info *ch_info;
+
+ if (il->channel_count) {
+ D_EEPROM("Channel map already initialized.\n");
+ return 0;
+ }
+
+ D_EEPROM("Initializing regulatory info from EEPROM\n");
+
+ il->channel_count =
+ ARRAY_SIZE(il_eeprom_band_1) + ARRAY_SIZE(il_eeprom_band_2) +
+ ARRAY_SIZE(il_eeprom_band_3) + ARRAY_SIZE(il_eeprom_band_4) +
+ ARRAY_SIZE(il_eeprom_band_5);
+
+ D_EEPROM("Parsing data for %d channels.\n", il->channel_count);
+
+ il->channel_info =
+ kzalloc(sizeof(struct il_channel_info) * il->channel_count,
+ GFP_KERNEL);
+ if (!il->channel_info) {
+ IL_ERR("Could not allocate channel_info\n");
+ il->channel_count = 0;
+ return -ENOMEM;
+ }
+
+ ch_info = il->channel_info;
+
+ /* Loop through the 5 EEPROM bands adding them in order to the
+ * channel map we maintain (that contains additional information than
+ * what just in the EEPROM) */
+ for (band = 1; band <= 5; band++) {
+
+ il_init_band_reference(il, band, &eeprom_ch_count,
+ &eeprom_ch_info, &eeprom_ch_idx);
+
+ /* Loop through each band adding each of the channels */
+ for (ch = 0; ch < eeprom_ch_count; ch++) {
+ ch_info->channel = eeprom_ch_idx[ch];
+ ch_info->band =
+ (band ==
+ 1) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+
+ /* permanently store EEPROM's channel regulatory flags
+ * and max power in channel info database. */
+ ch_info->eeprom = eeprom_ch_info[ch];
+
+ /* Copy the run-time flags so they are there even on
+ * invalid channels */
+ ch_info->flags = eeprom_ch_info[ch].flags;
+ /* First write that ht40 is not enabled, and then enable
+ * one by one */
+ ch_info->ht40_extension_channel =
+ IEEE80211_CHAN_NO_HT40;
+
+ if (!(il_is_channel_valid(ch_info))) {
+ D_EEPROM("Ch. %d Flags %x [%sGHz] - "
+ "No traffic\n", ch_info->channel,
+ ch_info->flags,
+ il_is_channel_a_band(ch_info) ? "5.2" :
+ "2.4");
+ ch_info++;
+ continue;
+ }
+
+ /* Initialize regulatory-based run-time data */
+ ch_info->max_power_avg = ch_info->curr_txpow =
+ eeprom_ch_info[ch].max_power_avg;
+ ch_info->scan_power = eeprom_ch_info[ch].max_power_avg;
+ ch_info->min_power = 0;
+
+ D_EEPROM("Ch. %d [%sGHz] " "%s%s%s%s%s%s(0x%02x %ddBm):"
+ " Ad-Hoc %ssupported\n", ch_info->channel,
+ il_is_channel_a_band(ch_info) ? "5.2" : "2.4",
+ CHECK_AND_PRINT_I(VALID),
+ CHECK_AND_PRINT_I(IBSS),
+ CHECK_AND_PRINT_I(ACTIVE),
+ CHECK_AND_PRINT_I(RADAR),
+ CHECK_AND_PRINT_I(WIDE),
+ CHECK_AND_PRINT_I(DFS),
+ eeprom_ch_info[ch].flags,
+ eeprom_ch_info[ch].max_power_avg,
+ ((eeprom_ch_info[ch].
+ flags & EEPROM_CHANNEL_IBSS) &&
+ !(eeprom_ch_info[ch].
+ flags & EEPROM_CHANNEL_RADAR)) ? "" :
+ "not ");
+
+ ch_info++;
+ }
+ }
+
+ /* Check if we do have HT40 channels */
+ if (il->cfg->ops->lib->eeprom_ops.regulatory_bands[5] ==
+ EEPROM_REGULATORY_BAND_NO_HT40 &&
+ il->cfg->ops->lib->eeprom_ops.regulatory_bands[6] ==
+ EEPROM_REGULATORY_BAND_NO_HT40)
+ return 0;
+
+ /* Two additional EEPROM bands for 2.4 and 5 GHz HT40 channels */
+ for (band = 6; band <= 7; band++) {
+ enum ieee80211_band ieeeband;
+
+ il_init_band_reference(il, band, &eeprom_ch_count,
+ &eeprom_ch_info, &eeprom_ch_idx);
+
+ /* EEPROM band 6 is 2.4, band 7 is 5 GHz */
+ ieeeband =
+ (band == 6) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+
+ /* Loop through each band adding each of the channels */
+ for (ch = 0; ch < eeprom_ch_count; ch++) {
+ /* Set up driver's info for lower half */
+ il_mod_ht40_chan_info(il, ieeeband, eeprom_ch_idx[ch],
+ &eeprom_ch_info[ch],
+ IEEE80211_CHAN_NO_HT40PLUS);
+
+ /* Set up driver's info for upper half */
+ il_mod_ht40_chan_info(il, ieeeband,
+ eeprom_ch_idx[ch] + 4,
+ &eeprom_ch_info[ch],
+ IEEE80211_CHAN_NO_HT40MINUS);
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(il_init_channel_map);
+
+/*
+ * il_free_channel_map - undo allocations in il_init_channel_map
+ */
+void
+il_free_channel_map(struct il_priv *il)
+{
+ kfree(il->channel_info);
+ il->channel_count = 0;
+}
+EXPORT_SYMBOL(il_free_channel_map);
+
+/**
+ * il_get_channel_info - Find driver's ilate channel info
+ *
+ * Based on band and channel number.
+ */
+const struct il_channel_info *
+il_get_channel_info(const struct il_priv *il, enum ieee80211_band band,
+ u16 channel)
+{
+ int i;
+
+ switch (band) {
+ case IEEE80211_BAND_5GHZ:
+ for (i = 14; i < il->channel_count; i++) {
+ if (il->channel_info[i].channel == channel)
+ return &il->channel_info[i];
+ }
+ break;
+ case IEEE80211_BAND_2GHZ:
+ if (channel >= 1 && channel <= 14)
+ return &il->channel_info[channel - 1];
+ break;
+ default:
+ BUG();
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL(il_get_channel_info);
+
+/*
+ * Setting power level allows the card to go to sleep when not busy.
+ *
+ * We calculate a sleep command based on the required latency, which
+ * we get from mac80211. In order to handle thermal throttling, we can
+ * also use pre-defined power levels.
+ */
+
+/*
+ * This defines the old power levels. They are still used by default
+ * (level 1) and for thermal throttle (levels 3 through 5)
+ */
+
+struct il_power_vec_entry {
+ struct il_powertable_cmd cmd;
+ u8 no_dtim; /* number of skip dtim */
+};
+
+static void
+il_power_sleep_cam_cmd(struct il_priv *il, struct il_powertable_cmd *cmd)
+{
+ memset(cmd, 0, sizeof(*cmd));
+
+ if (il->power_data.pci_pm)
+ cmd->flags |= IL_POWER_PCI_PM_MSK;
+
+ D_POWER("Sleep command for CAM\n");
+}
+
+static int
+il_set_power(struct il_priv *il, struct il_powertable_cmd *cmd)
+{
+ D_POWER("Sending power/sleep command\n");
+ D_POWER("Flags value = 0x%08X\n", cmd->flags);
+ D_POWER("Tx timeout = %u\n", le32_to_cpu(cmd->tx_data_timeout));
+ D_POWER("Rx timeout = %u\n", le32_to_cpu(cmd->rx_data_timeout));
+ D_POWER("Sleep interval vector = { %d , %d , %d , %d , %d }\n",
+ le32_to_cpu(cmd->sleep_interval[0]),
+ le32_to_cpu(cmd->sleep_interval[1]),
+ le32_to_cpu(cmd->sleep_interval[2]),
+ le32_to_cpu(cmd->sleep_interval[3]),
+ le32_to_cpu(cmd->sleep_interval[4]));
+
+ return il_send_cmd_pdu(il, C_POWER_TBL,
+ sizeof(struct il_powertable_cmd), cmd);
+}
+
+int
+il_power_set_mode(struct il_priv *il, struct il_powertable_cmd *cmd, bool force)
+{
+ int ret;
+ bool update_chains;
+
+ lockdep_assert_held(&il->mutex);
+
+ /* Don't update the RX chain when chain noise calibration is running */
+ update_chains = il->chain_noise_data.state == IL_CHAIN_NOISE_DONE ||
+ il->chain_noise_data.state == IL_CHAIN_NOISE_ALIVE;
+
+ if (!memcmp(&il->power_data.sleep_cmd, cmd, sizeof(*cmd)) && !force)
+ return 0;
+
+ if (!il_is_ready_rf(il))
+ return -EIO;
+
+ /* scan complete use sleep_power_next, need to be updated */
+ memcpy(&il->power_data.sleep_cmd_next, cmd, sizeof(*cmd));
+ if (test_bit(S_SCANNING, &il->status) && !force) {
+ D_INFO("Defer power set mode while scanning\n");
+ return 0;
+ }
+
+ if (cmd->flags & IL_POWER_DRIVER_ALLOW_SLEEP_MSK)
+ set_bit(S_POWER_PMI, &il->status);
+
+ ret = il_set_power(il, cmd);
+ if (!ret) {
+ if (!(cmd->flags & IL_POWER_DRIVER_ALLOW_SLEEP_MSK))
+ clear_bit(S_POWER_PMI, &il->status);
+
+ if (il->cfg->ops->lib->update_chain_flags && update_chains)
+ il->cfg->ops->lib->update_chain_flags(il);
+ else if (il->cfg->ops->lib->update_chain_flags)
+ D_POWER("Cannot update the power, chain noise "
+ "calibration running: %d\n",
+ il->chain_noise_data.state);
+
+ memcpy(&il->power_data.sleep_cmd, cmd, sizeof(*cmd));
+ } else
+ IL_ERR("set power fail, ret = %d", ret);
+
+ return ret;
+}
+
+int
+il_power_update_mode(struct il_priv *il, bool force)
+{
+ struct il_powertable_cmd cmd;
+
+ il_power_sleep_cam_cmd(il, &cmd);
+ return il_power_set_mode(il, &cmd, force);
+}
+EXPORT_SYMBOL(il_power_update_mode);
+
+/* initialize to default */
+void
+il_power_initialize(struct il_priv *il)
+{
+ u16 lctl = il_pcie_link_ctl(il);
+
+ il->power_data.pci_pm = !(lctl & PCI_CFG_LINK_CTRL_VAL_L0S_EN);
+
+ il->power_data.debug_sleep_level_override = -1;
+
+ memset(&il->power_data.sleep_cmd, 0, sizeof(il->power_data.sleep_cmd));
+}
+EXPORT_SYMBOL(il_power_initialize);
+
+/* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after
+ * sending probe req. This should be set long enough to hear probe responses
+ * from more than one AP. */
+#define IL_ACTIVE_DWELL_TIME_24 (30) /* all times in msec */
+#define IL_ACTIVE_DWELL_TIME_52 (20)
+
+#define IL_ACTIVE_DWELL_FACTOR_24GHZ (3)
+#define IL_ACTIVE_DWELL_FACTOR_52GHZ (2)
+
+/* For passive scan, listen PASSIVE_DWELL_TIME (msec) on each channel.
+ * Must be set longer than active dwell time.
+ * For the most reliable scan, set > AP beacon interval (typically 100msec). */
+#define IL_PASSIVE_DWELL_TIME_24 (20) /* all times in msec */
+#define IL_PASSIVE_DWELL_TIME_52 (10)
+#define IL_PASSIVE_DWELL_BASE (100)
+#define IL_CHANNEL_TUNE_TIME 5
+
+static int
+il_send_scan_abort(struct il_priv *il)
+{
+ int ret;
+ struct il_rx_pkt *pkt;
+ struct il_host_cmd cmd = {
+ .id = C_SCAN_ABORT,
+ .flags = CMD_WANT_SKB,
+ };
+
+ /* Exit instantly with error when device is not ready
+ * to receive scan abort command or it does not perform
+ * hardware scan currently */
+ if (!test_bit(S_READY, &il->status) ||
+ !test_bit(S_GEO_CONFIGURED, &il->status) ||
+ !test_bit(S_SCAN_HW, &il->status) ||
+ test_bit(S_FW_ERROR, &il->status) ||
+ test_bit(S_EXIT_PENDING, &il->status))
+ return -EIO;
+
+ ret = il_send_cmd_sync(il, &cmd);
+ if (ret)
+ return ret;
+
+ pkt = (struct il_rx_pkt *)cmd.reply_page;
+ if (pkt->u.status != CAN_ABORT_STATUS) {
+ /* The scan abort will return 1 for success or
+ * 2 for "failure". A failure condition can be
+ * due to simply not being in an active scan which
+ * can occur if we send the scan abort before we
+ * the microcode has notified us that a scan is
+ * completed. */
+ D_SCAN("SCAN_ABORT ret %d.\n", pkt->u.status);
+ ret = -EIO;
+ }
+
+ il_free_pages(il, cmd.reply_page);
+ return ret;
+}
+
+static void
+il_complete_scan(struct il_priv *il, bool aborted)
+{
+ /* check if scan was requested from mac80211 */
+ if (il->scan_request) {
+ D_SCAN("Complete scan in mac80211\n");
+ ieee80211_scan_completed(il->hw, aborted);
+ }
+
+ il->scan_vif = NULL;
+ il->scan_request = NULL;
+}
+
+void
+il_force_scan_end(struct il_priv *il)
+{
+ lockdep_assert_held(&il->mutex);
+
+ if (!test_bit(S_SCANNING, &il->status)) {
+ D_SCAN("Forcing scan end while not scanning\n");
+ return;
+ }
+
+ D_SCAN("Forcing scan end\n");
+ clear_bit(S_SCANNING, &il->status);
+ clear_bit(S_SCAN_HW, &il->status);
+ clear_bit(S_SCAN_ABORTING, &il->status);
+ il_complete_scan(il, true);
+}
+
+static void
+il_do_scan_abort(struct il_priv *il)
+{
+ int ret;
+
+ lockdep_assert_held(&il->mutex);
+
+ if (!test_bit(S_SCANNING, &il->status)) {
+ D_SCAN("Not performing scan to abort\n");
+ return;
+ }
+
+ if (test_and_set_bit(S_SCAN_ABORTING, &il->status)) {
+ D_SCAN("Scan abort in progress\n");
+ return;
+ }
+
+ ret = il_send_scan_abort(il);
+ if (ret) {
+ D_SCAN("Send scan abort failed %d\n", ret);
+ il_force_scan_end(il);
+ } else
+ D_SCAN("Successfully send scan abort\n");
+}
+
+/**
+ * il_scan_cancel - Cancel any currently executing HW scan
+ */
+int
+il_scan_cancel(struct il_priv *il)
+{
+ D_SCAN("Queuing abort scan\n");
+ queue_work(il->workqueue, &il->abort_scan);
+ return 0;
+}
+EXPORT_SYMBOL(il_scan_cancel);
+
+/**
+ * il_scan_cancel_timeout - Cancel any currently executing HW scan
+ * @ms: amount of time to wait (in milliseconds) for scan to abort
+ *
+ */
+int
+il_scan_cancel_timeout(struct il_priv *il, unsigned long ms)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(ms);
+
+ lockdep_assert_held(&il->mutex);
+
+ D_SCAN("Scan cancel timeout\n");
+
+ il_do_scan_abort(il);
+
+ while (time_before_eq(jiffies, timeout)) {
+ if (!test_bit(S_SCAN_HW, &il->status))
+ break;
+ msleep(20);
+ }
+
+ return test_bit(S_SCAN_HW, &il->status);
+}
+EXPORT_SYMBOL(il_scan_cancel_timeout);
+
+/* Service response to C_SCAN (0x80) */
+static void
+il_hdl_scan(struct il_priv *il, struct il_rx_buf *rxb)
+{
+#ifdef CONFIG_IWLEGACY_DEBUG
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ struct il_scanreq_notification *notif =
+ (struct il_scanreq_notification *)pkt->u.raw;
+
+ D_SCAN("Scan request status = 0x%x\n", notif->status);
+#endif
+}
+
+/* Service N_SCAN_START (0x82) */
+static void
+il_hdl_scan_start(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ struct il_scanstart_notification *notif =
+ (struct il_scanstart_notification *)pkt->u.raw;
+ il->scan_start_tsf = le32_to_cpu(notif->tsf_low);
+ D_SCAN("Scan start: " "%d [802.11%s] "
+ "(TSF: 0x%08X:%08X) - %d (beacon timer %u)\n", notif->channel,
+ notif->band ? "bg" : "a", le32_to_cpu(notif->tsf_high),
+ le32_to_cpu(notif->tsf_low), notif->status, notif->beacon_timer);
+}
+
+/* Service N_SCAN_RESULTS (0x83) */
+static void
+il_hdl_scan_results(struct il_priv *il, struct il_rx_buf *rxb)
+{
+#ifdef CONFIG_IWLEGACY_DEBUG
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ struct il_scanresults_notification *notif =
+ (struct il_scanresults_notification *)pkt->u.raw;
+
+ D_SCAN("Scan ch.res: " "%d [802.11%s] " "(TSF: 0x%08X:%08X) - %d "
+ "elapsed=%lu usec\n", notif->channel, notif->band ? "bg" : "a",
+ le32_to_cpu(notif->tsf_high), le32_to_cpu(notif->tsf_low),
+ le32_to_cpu(notif->stats[0]),
+ le32_to_cpu(notif->tsf_low) - il->scan_start_tsf);
+#endif
+}
+
+/* Service N_SCAN_COMPLETE (0x84) */
+static void
+il_hdl_scan_complete(struct il_priv *il, struct il_rx_buf *rxb)
+{
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ struct il_scancomplete_notification *scan_notif = (void *)pkt->u.raw;
+#endif
+
+ D_SCAN("Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n",
+ scan_notif->scanned_channels, scan_notif->tsf_low,
+ scan_notif->tsf_high, scan_notif->status);
+
+ /* The HW is no longer scanning */
+ clear_bit(S_SCAN_HW, &il->status);
+
+ D_SCAN("Scan on %sGHz took %dms\n",
+ (il->scan_band == IEEE80211_BAND_2GHZ) ? "2.4" : "5.2",
+ jiffies_to_msecs(jiffies - il->scan_start));
+
+ queue_work(il->workqueue, &il->scan_completed);
+}
+
+void
+il_setup_rx_scan_handlers(struct il_priv *il)
+{
+ /* scan handlers */
+ il->handlers[C_SCAN] = il_hdl_scan;
+ il->handlers[N_SCAN_START] = il_hdl_scan_start;
+ il->handlers[N_SCAN_RESULTS] = il_hdl_scan_results;
+ il->handlers[N_SCAN_COMPLETE] = il_hdl_scan_complete;
+}
+EXPORT_SYMBOL(il_setup_rx_scan_handlers);
+
+inline u16
+il_get_active_dwell_time(struct il_priv *il, enum ieee80211_band band,
+ u8 n_probes)
+{
+ if (band == IEEE80211_BAND_5GHZ)
+ return IL_ACTIVE_DWELL_TIME_52 +
+ IL_ACTIVE_DWELL_FACTOR_52GHZ * (n_probes + 1);
+ else
+ return IL_ACTIVE_DWELL_TIME_24 +
+ IL_ACTIVE_DWELL_FACTOR_24GHZ * (n_probes + 1);
+}
+EXPORT_SYMBOL(il_get_active_dwell_time);
+
+u16
+il_get_passive_dwell_time(struct il_priv *il, enum ieee80211_band band,
+ struct ieee80211_vif *vif)
+{
+ struct il_rxon_context *ctx = &il->ctx;
+ u16 value;
+
+ u16 passive =
+ (band ==
+ IEEE80211_BAND_2GHZ) ? IL_PASSIVE_DWELL_BASE +
+ IL_PASSIVE_DWELL_TIME_24 : IL_PASSIVE_DWELL_BASE +
+ IL_PASSIVE_DWELL_TIME_52;
+
+ if (il_is_any_associated(il)) {
+ /*
+ * If we're associated, we clamp the maximum passive
+ * dwell time to be 98% of the smallest beacon interval
+ * (minus 2 * channel tune time)
+ */
+ value = ctx->vif ? ctx->vif->bss_conf.beacon_int : 0;
+ if (value > IL_PASSIVE_DWELL_BASE || !value)
+ value = IL_PASSIVE_DWELL_BASE;
+ value = (value * 98) / 100 - IL_CHANNEL_TUNE_TIME * 2;
+ passive = min(value, passive);
+ }
+
+ return passive;
+}
+EXPORT_SYMBOL(il_get_passive_dwell_time);
+
+void
+il_init_scan_params(struct il_priv *il)
+{
+ u8 ant_idx = fls(il->hw_params.valid_tx_ant) - 1;
+ if (!il->scan_tx_ant[IEEE80211_BAND_5GHZ])
+ il->scan_tx_ant[IEEE80211_BAND_5GHZ] = ant_idx;
+ if (!il->scan_tx_ant[IEEE80211_BAND_2GHZ])
+ il->scan_tx_ant[IEEE80211_BAND_2GHZ] = ant_idx;
+}
+EXPORT_SYMBOL(il_init_scan_params);
+
+static int
+il_scan_initiate(struct il_priv *il, struct ieee80211_vif *vif)
+{
+ int ret;
+
+ lockdep_assert_held(&il->mutex);
+
+ if (WARN_ON(!il->cfg->ops->utils->request_scan))
+ return -EOPNOTSUPP;
+
+ cancel_delayed_work(&il->scan_check);
+
+ if (!il_is_ready_rf(il)) {
+ IL_WARN("Request scan called when driver not ready.\n");
+ return -EIO;
+ }
+
+ if (test_bit(S_SCAN_HW, &il->status)) {
+ D_SCAN("Multiple concurrent scan requests in parallel.\n");
+ return -EBUSY;
+ }
+
+ if (test_bit(S_SCAN_ABORTING, &il->status)) {
+ D_SCAN("Scan request while abort pending.\n");
+ return -EBUSY;
+ }
+
+ D_SCAN("Starting scan...\n");
+
+ set_bit(S_SCANNING, &il->status);
+ il->scan_start = jiffies;
+
+ ret = il->cfg->ops->utils->request_scan(il, vif);
+ if (ret) {
+ clear_bit(S_SCANNING, &il->status);
+ return ret;
+ }
+
+ queue_delayed_work(il->workqueue, &il->scan_check,
+ IL_SCAN_CHECK_WATCHDOG);
+
+ return 0;
+}
+
+int
+il_mac_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct cfg80211_scan_request *req)
+{
+ struct il_priv *il = hw->priv;
+ int ret;
+
+ D_MAC80211("enter\n");
+
+ if (req->n_channels == 0)
+ return -EINVAL;
+
+ mutex_lock(&il->mutex);
+
+ if (test_bit(S_SCANNING, &il->status)) {
+ D_SCAN("Scan already in progress.\n");
+ ret = -EAGAIN;
+ goto out_unlock;
+ }
+
+ /* mac80211 will only ask for one band at a time */
+ il->scan_request = req;
+ il->scan_vif = vif;
+ il->scan_band = req->channels[0]->band;
+
+ ret = il_scan_initiate(il, vif);
+
+ D_MAC80211("leave\n");
+
+out_unlock:
+ mutex_unlock(&il->mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL(il_mac_hw_scan);
+
+static void
+il_bg_scan_check(struct work_struct *data)
+{
+ struct il_priv *il =
+ container_of(data, struct il_priv, scan_check.work);
+
+ D_SCAN("Scan check work\n");
+
+ /* Since we are here firmware does not finish scan and
+ * most likely is in bad shape, so we don't bother to
+ * send abort command, just force scan complete to mac80211 */
+ mutex_lock(&il->mutex);
+ il_force_scan_end(il);
+ mutex_unlock(&il->mutex);
+}
+
+/**
+ * il_fill_probe_req - fill in all required fields and IE for probe request
+ */
+
+u16
+il_fill_probe_req(struct il_priv *il, struct ieee80211_mgmt *frame,
+ const u8 *ta, const u8 *ies, int ie_len, int left)
+{
+ int len = 0;
+ u8 *pos = NULL;
+
+ /* Make sure there is enough space for the probe request,
+ * two mandatory IEs and the data */
+ left -= 24;
+ if (left < 0)
+ return 0;
+
+ frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
+ memcpy(frame->da, il_bcast_addr, ETH_ALEN);
+ memcpy(frame->sa, ta, ETH_ALEN);
+ memcpy(frame->bssid, il_bcast_addr, ETH_ALEN);
+ frame->seq_ctrl = 0;
+
+ len += 24;
+
+ /* ...next IE... */
+ pos = &frame->u.probe_req.variable[0];
+
+ /* fill in our indirect SSID IE */
+ left -= 2;
+ if (left < 0)
+ return 0;
+ *pos++ = WLAN_EID_SSID;
+ *pos++ = 0;
+
+ len += 2;
+
+ if (WARN_ON(left < ie_len))
+ return len;
+
+ if (ies && ie_len) {
+ memcpy(pos, ies, ie_len);
+ len += ie_len;
+ }
+
+ return (u16) len;
+}
+EXPORT_SYMBOL(il_fill_probe_req);
+
+static void
+il_bg_abort_scan(struct work_struct *work)
+{
+ struct il_priv *il = container_of(work, struct il_priv, abort_scan);
+
+ D_SCAN("Abort scan work\n");
+
+ /* We keep scan_check work queued in case when firmware will not
+ * report back scan completed notification */
+ mutex_lock(&il->mutex);
+ il_scan_cancel_timeout(il, 200);
+ mutex_unlock(&il->mutex);
+}
+
+static void
+il_bg_scan_completed(struct work_struct *work)
+{
+ struct il_priv *il = container_of(work, struct il_priv, scan_completed);
+ bool aborted;
+
+ D_SCAN("Completed scan.\n");
+
+ cancel_delayed_work(&il->scan_check);
+
+ mutex_lock(&il->mutex);
+
+ aborted = test_and_clear_bit(S_SCAN_ABORTING, &il->status);
+ if (aborted)
+ D_SCAN("Aborted scan completed.\n");
+
+ if (!test_and_clear_bit(S_SCANNING, &il->status)) {
+ D_SCAN("Scan already completed.\n");
+ goto out_settings;
+ }
+
+ il_complete_scan(il, aborted);
+
+out_settings:
+ /* Can we still talk to firmware ? */
+ if (!il_is_ready_rf(il))
+ goto out;
+
+ /*
+ * We do not commit power settings while scan is pending,
+ * do it now if the settings changed.
+ */
+ il_power_set_mode(il, &il->power_data.sleep_cmd_next, false);
+ il_set_tx_power(il, il->tx_power_next, false);
+
+ il->cfg->ops->utils->post_scan(il);
+
+out:
+ mutex_unlock(&il->mutex);
+}
+
+void
+il_setup_scan_deferred_work(struct il_priv *il)
+{
+ INIT_WORK(&il->scan_completed, il_bg_scan_completed);
+ INIT_WORK(&il->abort_scan, il_bg_abort_scan);
+ INIT_DELAYED_WORK(&il->scan_check, il_bg_scan_check);
+}
+EXPORT_SYMBOL(il_setup_scan_deferred_work);
+
+void
+il_cancel_scan_deferred_work(struct il_priv *il)
+{
+ cancel_work_sync(&il->abort_scan);
+ cancel_work_sync(&il->scan_completed);
+
+ if (cancel_delayed_work_sync(&il->scan_check)) {
+ mutex_lock(&il->mutex);
+ il_force_scan_end(il);
+ mutex_unlock(&il->mutex);
+ }
+}
+EXPORT_SYMBOL(il_cancel_scan_deferred_work);
+
+/* il->sta_lock must be held */
+static void
+il_sta_ucode_activate(struct il_priv *il, u8 sta_id)
+{
+
+ if (!(il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE))
+ IL_ERR("ACTIVATE a non DRIVER active station id %u addr %pM\n",
+ sta_id, il->stations[sta_id].sta.sta.addr);
+
+ if (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE) {
+ D_ASSOC("STA id %u addr %pM already present"
+ " in uCode (according to driver)\n", sta_id,
+ il->stations[sta_id].sta.sta.addr);
+ } else {
+ il->stations[sta_id].used |= IL_STA_UCODE_ACTIVE;
+ D_ASSOC("Added STA id %u addr %pM to uCode\n", sta_id,
+ il->stations[sta_id].sta.sta.addr);
+ }
+}
+
+static int
+il_process_add_sta_resp(struct il_priv *il, struct il_addsta_cmd *addsta,
+ struct il_rx_pkt *pkt, bool sync)
+{
+ u8 sta_id = addsta->sta.sta_id;
+ unsigned long flags;
+ int ret = -EIO;
+
+ if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
+ IL_ERR("Bad return from C_ADD_STA (0x%08X)\n", pkt->hdr.flags);
+ return ret;
+ }
+
+ D_INFO("Processing response for adding station %u\n", sta_id);
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+
+ switch (pkt->u.add_sta.status) {
+ case ADD_STA_SUCCESS_MSK:
+ D_INFO("C_ADD_STA PASSED\n");
+ il_sta_ucode_activate(il, sta_id);
+ ret = 0;
+ break;
+ case ADD_STA_NO_ROOM_IN_TBL:
+ IL_ERR("Adding station %d failed, no room in table.\n", sta_id);
+ break;
+ case ADD_STA_NO_BLOCK_ACK_RESOURCE:
+ IL_ERR("Adding station %d failed, no block ack resource.\n",
+ sta_id);
+ break;
+ case ADD_STA_MODIFY_NON_EXIST_STA:
+ IL_ERR("Attempting to modify non-existing station %d\n",
+ sta_id);
+ break;
+ default:
+ D_ASSOC("Received C_ADD_STA:(0x%08X)\n", pkt->u.add_sta.status);
+ break;
+ }
+
+ D_INFO("%s station id %u addr %pM\n",
+ il->stations[sta_id].sta.mode ==
+ STA_CONTROL_MODIFY_MSK ? "Modified" : "Added", sta_id,
+ il->stations[sta_id].sta.sta.addr);
+
+ /*
+ * XXX: The MAC address in the command buffer is often changed from
+ * the original sent to the device. That is, the MAC address
+ * written to the command buffer often is not the same MAC address
+ * read from the command buffer when the command returns. This
+ * issue has not yet been resolved and this debugging is left to
+ * observe the problem.
+ */
+ D_INFO("%s station according to cmd buffer %pM\n",
+ il->stations[sta_id].sta.mode ==
+ STA_CONTROL_MODIFY_MSK ? "Modified" : "Added", addsta->sta.addr);
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+ return ret;
+}
+
+static void
+il_add_sta_callback(struct il_priv *il, struct il_device_cmd *cmd,
+ struct il_rx_pkt *pkt)
+{
+ struct il_addsta_cmd *addsta = (struct il_addsta_cmd *)cmd->cmd.payload;
+
+ il_process_add_sta_resp(il, addsta, pkt, false);
+
+}
+
+int
+il_send_add_sta(struct il_priv *il, struct il_addsta_cmd *sta, u8 flags)
+{
+ struct il_rx_pkt *pkt = NULL;
+ int ret = 0;
+ u8 data[sizeof(*sta)];
+ struct il_host_cmd cmd = {
+ .id = C_ADD_STA,
+ .flags = flags,
+ .data = data,
+ };
+ u8 sta_id __maybe_unused = sta->sta.sta_id;
+
+ D_INFO("Adding sta %u (%pM) %ssynchronously\n", sta_id, sta->sta.addr,
+ flags & CMD_ASYNC ? "a" : "");
+
+ if (flags & CMD_ASYNC)
+ cmd.callback = il_add_sta_callback;
+ else {
+ cmd.flags |= CMD_WANT_SKB;
+ might_sleep();
+ }
+
+ cmd.len = il->cfg->ops->utils->build_addsta_hcmd(sta, data);
+ ret = il_send_cmd(il, &cmd);
+
+ if (ret || (flags & CMD_ASYNC))
+ return ret;
+
+ if (ret == 0) {
+ pkt = (struct il_rx_pkt *)cmd.reply_page;
+ ret = il_process_add_sta_resp(il, sta, pkt, true);
+ }
+ il_free_pages(il, cmd.reply_page);
+
+ return ret;
+}
+EXPORT_SYMBOL(il_send_add_sta);
+
+static void
+il_set_ht_add_station(struct il_priv *il, u8 idx, struct ieee80211_sta *sta,
+ struct il_rxon_context *ctx)
+{
+ struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->ht_cap;
+ __le32 sta_flags;
+ u8 mimo_ps_mode;
+
+ if (!sta || !sta_ht_inf->ht_supported)
+ goto done;
+
+ mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_SM_PS) >> 2;
+ D_ASSOC("spatial multiplexing power save mode: %s\n",
+ (mimo_ps_mode == WLAN_HT_CAP_SM_PS_STATIC) ? "static" :
+ (mimo_ps_mode == WLAN_HT_CAP_SM_PS_DYNAMIC) ? "dynamic" :
+ "disabled");
+
+ sta_flags = il->stations[idx].sta.station_flags;
+
+ sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK);
+
+ switch (mimo_ps_mode) {
+ case WLAN_HT_CAP_SM_PS_STATIC:
+ sta_flags |= STA_FLG_MIMO_DIS_MSK;
+ break;
+ case WLAN_HT_CAP_SM_PS_DYNAMIC:
+ sta_flags |= STA_FLG_RTS_MIMO_PROT_MSK;
+ break;
+ case WLAN_HT_CAP_SM_PS_DISABLED:
+ break;
+ default:
+ IL_WARN("Invalid MIMO PS mode %d\n", mimo_ps_mode);
+ break;
+ }
+
+ sta_flags |=
+ cpu_to_le32((u32) sta_ht_inf->
+ ampdu_factor << STA_FLG_MAX_AGG_SIZE_POS);
+
+ sta_flags |=
+ cpu_to_le32((u32) sta_ht_inf->
+ ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS);
+
+ if (il_is_ht40_tx_allowed(il, ctx, &sta->ht_cap))
+ sta_flags |= STA_FLG_HT40_EN_MSK;
+ else
+ sta_flags &= ~STA_FLG_HT40_EN_MSK;
+
+ il->stations[idx].sta.station_flags = sta_flags;
+done:
+ return;
+}
+
+/**
+ * il_prep_station - Prepare station information for addition
+ *
+ * should be called with sta_lock held
+ */
+u8
+il_prep_station(struct il_priv *il, struct il_rxon_context *ctx,
+ const u8 *addr, bool is_ap, struct ieee80211_sta *sta)
+{
+ struct il_station_entry *station;
+ int i;
+ u8 sta_id = IL_INVALID_STATION;
+ u16 rate;
+
+ if (is_ap)
+ sta_id = ctx->ap_sta_id;
+ else if (is_broadcast_ether_addr(addr))
+ sta_id = ctx->bcast_sta_id;
+ else
+ for (i = IL_STA_ID; i < il->hw_params.max_stations; i++) {
+ if (!compare_ether_addr
+ (il->stations[i].sta.sta.addr, addr)) {
+ sta_id = i;
+ break;
+ }
+
+ if (!il->stations[i].used &&
+ sta_id == IL_INVALID_STATION)
+ sta_id = i;
+ }
+
+ /*
+ * These two conditions have the same outcome, but keep them
+ * separate
+ */
+ if (unlikely(sta_id == IL_INVALID_STATION))
+ return sta_id;
+
+ /*
+ * uCode is not able to deal with multiple requests to add a
+ * station. Keep track if one is in progress so that we do not send
+ * another.
+ */
+ if (il->stations[sta_id].used & IL_STA_UCODE_INPROGRESS) {
+ D_INFO("STA %d already in process of being added.\n", sta_id);
+ return sta_id;
+ }
+
+ if ((il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE) &&
+ (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE) &&
+ !compare_ether_addr(il->stations[sta_id].sta.sta.addr, addr)) {
+ D_ASSOC("STA %d (%pM) already added, not adding again.\n",
+ sta_id, addr);
+ return sta_id;
+ }
+
+ station = &il->stations[sta_id];
+ station->used = IL_STA_DRIVER_ACTIVE;
+ D_ASSOC("Add STA to driver ID %d: %pM\n", sta_id, addr);
+ il->num_stations++;
+
+ /* Set up the C_ADD_STA command to send to device */
+ memset(&station->sta, 0, sizeof(struct il_addsta_cmd));
+ memcpy(station->sta.sta.addr, addr, ETH_ALEN);
+ station->sta.mode = 0;
+ station->sta.sta.sta_id = sta_id;
+ station->sta.station_flags = ctx->station_flags;
+ station->ctxid = ctx->ctxid;
+
+ if (sta) {
+ struct il_station_priv_common *sta_priv;
+
+ sta_priv = (void *)sta->drv_priv;
+ sta_priv->ctx = ctx;
+ }
+
+ /*
+ * OK to call unconditionally, since local stations (IBSS BSSID
+ * STA and broadcast STA) pass in a NULL sta, and mac80211
+ * doesn't allow HT IBSS.
+ */
+ il_set_ht_add_station(il, sta_id, sta, ctx);
+
+ /* 3945 only */
+ rate = (il->band == IEEE80211_BAND_5GHZ) ? RATE_6M_PLCP : RATE_1M_PLCP;
+ /* Turn on both antennas for the station... */
+ station->sta.rate_n_flags = cpu_to_le16(rate | RATE_MCS_ANT_AB_MSK);
+
+ return sta_id;
+
+}
+EXPORT_SYMBOL_GPL(il_prep_station);
+
+#define STA_WAIT_TIMEOUT (HZ/2)
+
+/**
+ * il_add_station_common -
+ */
+int
+il_add_station_common(struct il_priv *il, struct il_rxon_context *ctx,
+ const u8 *addr, bool is_ap, struct ieee80211_sta *sta,
+ u8 *sta_id_r)
+{
+ unsigned long flags_spin;
+ int ret = 0;
+ u8 sta_id;
+ struct il_addsta_cmd sta_cmd;
+
+ *sta_id_r = 0;
+ spin_lock_irqsave(&il->sta_lock, flags_spin);
+ sta_id = il_prep_station(il, ctx, addr, is_ap, sta);
+ if (sta_id == IL_INVALID_STATION) {
+ IL_ERR("Unable to prepare station %pM for addition\n", addr);
+ spin_unlock_irqrestore(&il->sta_lock, flags_spin);
+ return -EINVAL;
+ }
+
+ /*
+ * uCode is not able to deal with multiple requests to add a
+ * station. Keep track if one is in progress so that we do not send
+ * another.
+ */
+ if (il->stations[sta_id].used & IL_STA_UCODE_INPROGRESS) {
+ D_INFO("STA %d already in process of being added.\n", sta_id);
+ spin_unlock_irqrestore(&il->sta_lock, flags_spin);
+ return -EEXIST;
+ }
+
+ if ((il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE) &&
+ (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE)) {
+ D_ASSOC("STA %d (%pM) already added, not adding again.\n",
+ sta_id, addr);
+ spin_unlock_irqrestore(&il->sta_lock, flags_spin);
+ return -EEXIST;
+ }
+
+ il->stations[sta_id].used |= IL_STA_UCODE_INPROGRESS;
+ memcpy(&sta_cmd, &il->stations[sta_id].sta,
+ sizeof(struct il_addsta_cmd));
+ spin_unlock_irqrestore(&il->sta_lock, flags_spin);
+
+ /* Add station to device's station table */
+ ret = il_send_add_sta(il, &sta_cmd, CMD_SYNC);
+ if (ret) {
+ spin_lock_irqsave(&il->sta_lock, flags_spin);
+ IL_ERR("Adding station %pM failed.\n",
+ il->stations[sta_id].sta.sta.addr);
+ il->stations[sta_id].used &= ~IL_STA_DRIVER_ACTIVE;
+ il->stations[sta_id].used &= ~IL_STA_UCODE_INPROGRESS;
+ spin_unlock_irqrestore(&il->sta_lock, flags_spin);
+ }
+ *sta_id_r = sta_id;
+ return ret;
+}
+EXPORT_SYMBOL(il_add_station_common);
+
+/**
+ * il_sta_ucode_deactivate - deactivate ucode status for a station
+ *
+ * il->sta_lock must be held
+ */
+static void
+il_sta_ucode_deactivate(struct il_priv *il, u8 sta_id)
+{
+ /* Ucode must be active and driver must be non active */
+ if ((il->stations[sta_id].
+ used & (IL_STA_UCODE_ACTIVE | IL_STA_DRIVER_ACTIVE)) !=
+ IL_STA_UCODE_ACTIVE)
+ IL_ERR("removed non active STA %u\n", sta_id);
+
+ il->stations[sta_id].used &= ~IL_STA_UCODE_ACTIVE;
+
+ memset(&il->stations[sta_id], 0, sizeof(struct il_station_entry));
+ D_ASSOC("Removed STA %u\n", sta_id);
+}
+
+static int
+il_send_remove_station(struct il_priv *il, const u8 * addr, int sta_id,
+ bool temporary)
+{
+ struct il_rx_pkt *pkt;
+ int ret;
+
+ unsigned long flags_spin;
+ struct il_rem_sta_cmd rm_sta_cmd;
+
+ struct il_host_cmd cmd = {
+ .id = C_REM_STA,
+ .len = sizeof(struct il_rem_sta_cmd),
+ .flags = CMD_SYNC,
+ .data = &rm_sta_cmd,
+ };
+
+ memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd));
+ rm_sta_cmd.num_sta = 1;
+ memcpy(&rm_sta_cmd.addr, addr, ETH_ALEN);
+
+ cmd.flags |= CMD_WANT_SKB;
+
+ ret = il_send_cmd(il, &cmd);
+
+ if (ret)
+ return ret;
+
+ pkt = (struct il_rx_pkt *)cmd.reply_page;
+ if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
+ IL_ERR("Bad return from C_REM_STA (0x%08X)\n", pkt->hdr.flags);
+ ret = -EIO;
+ }
+
+ if (!ret) {
+ switch (pkt->u.rem_sta.status) {
+ case REM_STA_SUCCESS_MSK:
+ if (!temporary) {
+ spin_lock_irqsave(&il->sta_lock, flags_spin);
+ il_sta_ucode_deactivate(il, sta_id);
+ spin_unlock_irqrestore(&il->sta_lock,
+ flags_spin);
+ }
+ D_ASSOC("C_REM_STA PASSED\n");
+ break;
+ default:
+ ret = -EIO;
+ IL_ERR("C_REM_STA failed\n");
+ break;
+ }
+ }
+ il_free_pages(il, cmd.reply_page);
+
+ return ret;
+}
+
+/**
+ * il_remove_station - Remove driver's knowledge of station.
+ */
+int
+il_remove_station(struct il_priv *il, const u8 sta_id, const u8 * addr)
+{
+ unsigned long flags;
+
+ if (!il_is_ready(il)) {
+ D_INFO("Unable to remove station %pM, device not ready.\n",
+ addr);
+ /*
+ * It is typical for stations to be removed when we are
+ * going down. Return success since device will be down
+ * soon anyway
+ */
+ return 0;
+ }
+
+ D_ASSOC("Removing STA from driver:%d %pM\n", sta_id, addr);
+
+ if (WARN_ON(sta_id == IL_INVALID_STATION))
+ return -EINVAL;
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+
+ if (!(il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE)) {
+ D_INFO("Removing %pM but non DRIVER active\n", addr);
+ goto out_err;
+ }
+
+ if (!(il->stations[sta_id].used & IL_STA_UCODE_ACTIVE)) {
+ D_INFO("Removing %pM but non UCODE active\n", addr);
+ goto out_err;
+ }
+
+ if (il->stations[sta_id].used & IL_STA_LOCAL) {
+ kfree(il->stations[sta_id].lq);
+ il->stations[sta_id].lq = NULL;
+ }
+
+ il->stations[sta_id].used &= ~IL_STA_DRIVER_ACTIVE;
+
+ il->num_stations--;
+
+ BUG_ON(il->num_stations < 0);
+
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+ return il_send_remove_station(il, addr, sta_id, false);
+out_err:
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(il_remove_station);
+
+/**
+ * il_clear_ucode_stations - clear ucode station table bits
+ *
+ * This function clears all the bits in the driver indicating
+ * which stations are active in the ucode. Call when something
+ * other than explicit station management would cause this in
+ * the ucode, e.g. unassociated RXON.
+ */
+void
+il_clear_ucode_stations(struct il_priv *il, struct il_rxon_context *ctx)
+{
+ int i;
+ unsigned long flags_spin;
+ bool cleared = false;
+
+ D_INFO("Clearing ucode stations in driver\n");
+
+ spin_lock_irqsave(&il->sta_lock, flags_spin);
+ for (i = 0; i < il->hw_params.max_stations; i++) {
+ if (ctx && ctx->ctxid != il->stations[i].ctxid)
+ continue;
+
+ if (il->stations[i].used & IL_STA_UCODE_ACTIVE) {
+ D_INFO("Clearing ucode active for station %d\n", i);
+ il->stations[i].used &= ~IL_STA_UCODE_ACTIVE;
+ cleared = true;
+ }
+ }
+ spin_unlock_irqrestore(&il->sta_lock, flags_spin);
+
+ if (!cleared)
+ D_INFO("No active stations found to be cleared\n");
+}
+EXPORT_SYMBOL(il_clear_ucode_stations);
+
+/**
+ * il_restore_stations() - Restore driver known stations to device
+ *
+ * All stations considered active by driver, but not present in ucode, is
+ * restored.
+ *
+ * Function sleeps.
+ */
+void
+il_restore_stations(struct il_priv *il, struct il_rxon_context *ctx)
+{
+ struct il_addsta_cmd sta_cmd;
+ struct il_link_quality_cmd lq;
+ unsigned long flags_spin;
+ int i;
+ bool found = false;
+ int ret;
+ bool send_lq;
+
+ if (!il_is_ready(il)) {
+ D_INFO("Not ready yet, not restoring any stations.\n");
+ return;
+ }
+
+ D_ASSOC("Restoring all known stations ... start.\n");
+ spin_lock_irqsave(&il->sta_lock, flags_spin);
+ for (i = 0; i < il->hw_params.max_stations; i++) {
+ if (ctx->ctxid != il->stations[i].ctxid)
+ continue;
+ if ((il->stations[i].used & IL_STA_DRIVER_ACTIVE) &&
+ !(il->stations[i].used & IL_STA_UCODE_ACTIVE)) {
+ D_ASSOC("Restoring sta %pM\n",
+ il->stations[i].sta.sta.addr);
+ il->stations[i].sta.mode = 0;
+ il->stations[i].used |= IL_STA_UCODE_INPROGRESS;
+ found = true;
+ }
+ }
+
+ for (i = 0; i < il->hw_params.max_stations; i++) {
+ if ((il->stations[i].used & IL_STA_UCODE_INPROGRESS)) {
+ memcpy(&sta_cmd, &il->stations[i].sta,
+ sizeof(struct il_addsta_cmd));
+ send_lq = false;
+ if (il->stations[i].lq) {
+ memcpy(&lq, il->stations[i].lq,
+ sizeof(struct il_link_quality_cmd));
+ send_lq = true;
+ }
+ spin_unlock_irqrestore(&il->sta_lock, flags_spin);
+ ret = il_send_add_sta(il, &sta_cmd, CMD_SYNC);
+ if (ret) {
+ spin_lock_irqsave(&il->sta_lock, flags_spin);
+ IL_ERR("Adding station %pM failed.\n",
+ il->stations[i].sta.sta.addr);
+ il->stations[i].used &= ~IL_STA_DRIVER_ACTIVE;
+ il->stations[i].used &=
+ ~IL_STA_UCODE_INPROGRESS;
+ spin_unlock_irqrestore(&il->sta_lock,
+ flags_spin);
+ }
+ /*
+ * Rate scaling has already been initialized, send
+ * current LQ command
+ */
+ if (send_lq)
+ il_send_lq_cmd(il, ctx, &lq, CMD_SYNC, true);
+ spin_lock_irqsave(&il->sta_lock, flags_spin);
+ il->stations[i].used &= ~IL_STA_UCODE_INPROGRESS;
+ }
+ }
+
+ spin_unlock_irqrestore(&il->sta_lock, flags_spin);
+ if (!found)
+ D_INFO("Restoring all known stations"
+ " .... no stations to be restored.\n");
+ else
+ D_INFO("Restoring all known stations" " .... complete.\n");
+}
+EXPORT_SYMBOL(il_restore_stations);
+
+int
+il_get_free_ucode_key_idx(struct il_priv *il)
+{
+ int i;
+
+ for (i = 0; i < il->sta_key_max_num; i++)
+ if (!test_and_set_bit(i, &il->ucode_key_table))
+ return i;
+
+ return WEP_INVALID_OFFSET;
+}
+EXPORT_SYMBOL(il_get_free_ucode_key_idx);
+
+void
+il_dealloc_bcast_stations(struct il_priv *il)
+{
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+ for (i = 0; i < il->hw_params.max_stations; i++) {
+ if (!(il->stations[i].used & IL_STA_BCAST))
+ continue;
+
+ il->stations[i].used &= ~IL_STA_UCODE_ACTIVE;
+ il->num_stations--;
+ BUG_ON(il->num_stations < 0);
+ kfree(il->stations[i].lq);
+ il->stations[i].lq = NULL;
+ }
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+}
+EXPORT_SYMBOL_GPL(il_dealloc_bcast_stations);
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+static void
+il_dump_lq_cmd(struct il_priv *il, struct il_link_quality_cmd *lq)
+{
+ int i;
+ D_RATE("lq station id 0x%x\n", lq->sta_id);
+ D_RATE("lq ant 0x%X 0x%X\n", lq->general_params.single_stream_ant_msk,
+ lq->general_params.dual_stream_ant_msk);
+
+ for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
+ D_RATE("lq idx %d 0x%X\n", i, lq->rs_table[i].rate_n_flags);
+}
+#else
+static inline void
+il_dump_lq_cmd(struct il_priv *il, struct il_link_quality_cmd *lq)
+{
+}
+#endif
+
+/**
+ * il_is_lq_table_valid() - Test one aspect of LQ cmd for validity
+ *
+ * It sometimes happens when a HT rate has been in use and we
+ * loose connectivity with AP then mac80211 will first tell us that the
+ * current channel is not HT anymore before removing the station. In such a
+ * scenario the RXON flags will be updated to indicate we are not
+ * communicating HT anymore, but the LQ command may still contain HT rates.
+ * Test for this to prevent driver from sending LQ command between the time
+ * RXON flags are updated and when LQ command is updated.
+ */
+static bool
+il_is_lq_table_valid(struct il_priv *il, struct il_rxon_context *ctx,
+ struct il_link_quality_cmd *lq)
+{
+ int i;
+
+ if (ctx->ht.enabled)
+ return true;
+
+ D_INFO("Channel %u is not an HT channel\n", ctx->active.channel);
+ for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
+ if (le32_to_cpu(lq->rs_table[i].rate_n_flags) & RATE_MCS_HT_MSK) {
+ D_INFO("idx %d of LQ expects HT channel\n", i);
+ return false;
+ }
+ }
+ return true;
+}
+
+/**
+ * il_send_lq_cmd() - Send link quality command
+ * @init: This command is sent as part of station initialization right
+ * after station has been added.
+ *
+ * The link quality command is sent as the last step of station creation.
+ * This is the special case in which init is set and we call a callback in
+ * this case to clear the state indicating that station creation is in
+ * progress.
+ */
+int
+il_send_lq_cmd(struct il_priv *il, struct il_rxon_context *ctx,
+ struct il_link_quality_cmd *lq, u8 flags, bool init)
+{
+ int ret = 0;
+ unsigned long flags_spin;
+
+ struct il_host_cmd cmd = {
+ .id = C_TX_LINK_QUALITY_CMD,
+ .len = sizeof(struct il_link_quality_cmd),
+ .flags = flags,
+ .data = lq,
+ };
+
+ if (WARN_ON(lq->sta_id == IL_INVALID_STATION))
+ return -EINVAL;
+
+ spin_lock_irqsave(&il->sta_lock, flags_spin);
+ if (!(il->stations[lq->sta_id].used & IL_STA_DRIVER_ACTIVE)) {
+ spin_unlock_irqrestore(&il->sta_lock, flags_spin);
+ return -EINVAL;
+ }
+ spin_unlock_irqrestore(&il->sta_lock, flags_spin);
+
+ il_dump_lq_cmd(il, lq);
+ BUG_ON(init && (cmd.flags & CMD_ASYNC));
+
+ if (il_is_lq_table_valid(il, ctx, lq))
+ ret = il_send_cmd(il, &cmd);
+ else
+ ret = -EINVAL;
+
+ if (cmd.flags & CMD_ASYNC)
+ return ret;
+
+ if (init) {
+ D_INFO("init LQ command complete,"
+ " clearing sta addition status for sta %d\n",
+ lq->sta_id);
+ spin_lock_irqsave(&il->sta_lock, flags_spin);
+ il->stations[lq->sta_id].used &= ~IL_STA_UCODE_INPROGRESS;
+ spin_unlock_irqrestore(&il->sta_lock, flags_spin);
+ }
+ return ret;
+}
+EXPORT_SYMBOL(il_send_lq_cmd);
+
+int
+il_mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct il_priv *il = hw->priv;
+ struct il_station_priv_common *sta_common = (void *)sta->drv_priv;
+ int ret;
+
+ D_INFO("received request to remove station %pM\n", sta->addr);
+ mutex_lock(&il->mutex);
+ D_INFO("proceeding to remove station %pM\n", sta->addr);
+ ret = il_remove_station(il, sta_common->sta_id, sta->addr);
+ if (ret)
+ IL_ERR("Error removing station %pM\n", sta->addr);
+ mutex_unlock(&il->mutex);
+ return ret;
+}
+EXPORT_SYMBOL(il_mac_sta_remove);
+
+/************************** RX-FUNCTIONS ****************************/
+/*
+ * Rx theory of operation
+ *
+ * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
+ * each of which point to Receive Buffers to be filled by the NIC. These get
+ * used not only for Rx frames, but for any command response or notification
+ * from the NIC. The driver and NIC manage the Rx buffers by means
+ * of idxes into the circular buffer.
+ *
+ * Rx Queue Indexes
+ * The host/firmware share two idx registers for managing the Rx buffers.
+ *
+ * The READ idx maps to the first position that the firmware may be writing
+ * to -- the driver can read up to (but not including) this position and get
+ * good data.
+ * The READ idx is managed by the firmware once the card is enabled.
+ *
+ * The WRITE idx maps to the last position the driver has read from -- the
+ * position preceding WRITE is the last slot the firmware can place a packet.
+ *
+ * The queue is empty (no good data) if WRITE = READ - 1, and is full if
+ * WRITE = READ.
+ *
+ * During initialization, the host sets up the READ queue position to the first
+ * IDX position, and WRITE to the last (READ - 1 wrapped)
+ *
+ * When the firmware places a packet in a buffer, it will advance the READ idx
+ * and fire the RX interrupt. The driver can then query the READ idx and
+ * process as many packets as possible, moving the WRITE idx forward as it
+ * resets the Rx queue buffers with new memory.
+ *
+ * The management in the driver is as follows:
+ * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
+ * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
+ * to replenish the iwl->rxq->rx_free.
+ * + In il_rx_replenish (scheduled) if 'processed' != 'read' then the
+ * iwl->rxq is replenished and the READ IDX is updated (updating the
+ * 'processed' and 'read' driver idxes as well)
+ * + A received packet is processed and handed to the kernel network stack,
+ * detached from the iwl->rxq. The driver 'processed' idx is updated.
+ * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
+ * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
+ * IDX is not incremented and iwl->status(RX_STALLED) is set. If there
+ * were enough free buffers and RX_STALLED is set it is cleared.
+ *
+ *
+ * Driver sequence:
+ *
+ * il_rx_queue_alloc() Allocates rx_free
+ * il_rx_replenish() Replenishes rx_free list from rx_used, and calls
+ * il_rx_queue_restock
+ * il_rx_queue_restock() Moves available buffers from rx_free into Rx
+ * queue, updates firmware pointers, and updates
+ * the WRITE idx. If insufficient rx_free buffers
+ * are available, schedules il_rx_replenish
+ *
+ * -- enable interrupts --
+ * ISR - il_rx() Detach il_rx_bufs from pool up to the
+ * READ IDX, detaching the SKB from the pool.
+ * Moves the packet buffer from queue to rx_used.
+ * Calls il_rx_queue_restock to refill any empty
+ * slots.
+ * ...
+ *
+ */
+
+/**
+ * il_rx_queue_space - Return number of free slots available in queue.
+ */
+int
+il_rx_queue_space(const struct il_rx_queue *q)
+{
+ int s = q->read - q->write;
+ if (s <= 0)
+ s += RX_QUEUE_SIZE;
+ /* keep some buffer to not confuse full and empty queue */
+ s -= 2;
+ if (s < 0)
+ s = 0;
+ return s;
+}
+EXPORT_SYMBOL(il_rx_queue_space);
+
+/**
+ * il_rx_queue_update_write_ptr - Update the write pointer for the RX queue
+ */
+void
+il_rx_queue_update_write_ptr(struct il_priv *il, struct il_rx_queue *q)
+{
+ unsigned long flags;
+ u32 rx_wrt_ptr_reg = il->hw_params.rx_wrt_ptr_reg;
+ u32 reg;
+
+ spin_lock_irqsave(&q->lock, flags);
+
+ if (q->need_update == 0)
+ goto exit_unlock;
+
+ /* If power-saving is in use, make sure device is awake */
+ if (test_bit(S_POWER_PMI, &il->status)) {
+ reg = _il_rd(il, CSR_UCODE_DRV_GP1);
+
+ if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
+ D_INFO("Rx queue requesting wakeup," " GP1 = 0x%x\n",
+ reg);
+ il_set_bit(il, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ goto exit_unlock;
+ }
+
+ q->write_actual = (q->write & ~0x7);
+ il_wr(il, rx_wrt_ptr_reg, q->write_actual);
+
+ /* Else device is assumed to be awake */
+ } else {
+ /* Device expects a multiple of 8 */
+ q->write_actual = (q->write & ~0x7);
+ il_wr(il, rx_wrt_ptr_reg, q->write_actual);
+ }
+
+ q->need_update = 0;
+
+exit_unlock:
+ spin_unlock_irqrestore(&q->lock, flags);
+}
+EXPORT_SYMBOL(il_rx_queue_update_write_ptr);
+
+int
+il_rx_queue_alloc(struct il_priv *il)
+{
+ struct il_rx_queue *rxq = &il->rxq;
+ struct device *dev = &il->pci_dev->dev;
+ int i;
+
+ spin_lock_init(&rxq->lock);
+ INIT_LIST_HEAD(&rxq->rx_free);
+ INIT_LIST_HEAD(&rxq->rx_used);
+
+ /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */
+ rxq->bd =
+ dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->bd_dma,
+ GFP_KERNEL);
+ if (!rxq->bd)
+ goto err_bd;
+
+ rxq->rb_stts =
+ dma_alloc_coherent(dev, sizeof(struct il_rb_status),
+ &rxq->rb_stts_dma, GFP_KERNEL);
+ if (!rxq->rb_stts)
+ goto err_rb;
+
+ /* Fill the rx_used queue with _all_ of the Rx buffers */
+ for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
+ list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
+
+ /* Set us so that we have processed and used all buffers, but have
+ * not restocked the Rx queue with fresh buffers */
+ rxq->read = rxq->write = 0;
+ rxq->write_actual = 0;
+ rxq->free_count = 0;
+ rxq->need_update = 0;
+ return 0;
+
+err_rb:
+ dma_free_coherent(&il->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
+ rxq->bd_dma);
+err_bd:
+ return -ENOMEM;
+}
+EXPORT_SYMBOL(il_rx_queue_alloc);
+
+void
+il_hdl_spectrum_measurement(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ struct il_spectrum_notification *report = &(pkt->u.spectrum_notif);
+
+ if (!report->state) {
+ D_11H("Spectrum Measure Notification: Start\n");
+ return;
+ }
+
+ memcpy(&il->measure_report, report, sizeof(*report));
+ il->measurement_status |= MEASUREMENT_READY;
+}
+EXPORT_SYMBOL(il_hdl_spectrum_measurement);
+
+/*
+ * returns non-zero if packet should be dropped
+ */
+int
+il_set_decrypted_flag(struct il_priv *il, struct ieee80211_hdr *hdr,
+ u32 decrypt_res, struct ieee80211_rx_status *stats)
+{
+ u16 fc = le16_to_cpu(hdr->frame_control);
+
+ /*
+ * All contexts have the same setting here due to it being
+ * a module parameter, so OK to check any context.
+ */
+ if (il->ctx.active.filter_flags & RXON_FILTER_DIS_DECRYPT_MSK)
+ return 0;
+
+ if (!(fc & IEEE80211_FCTL_PROTECTED))
+ return 0;
+
+ D_RX("decrypt_res:0x%x\n", decrypt_res);
+ switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) {
+ case RX_RES_STATUS_SEC_TYPE_TKIP:
+ /* The uCode has got a bad phase 1 Key, pushes the packet.
+ * Decryption will be done in SW. */
+ if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
+ RX_RES_STATUS_BAD_KEY_TTAK)
+ break;
+
+ case RX_RES_STATUS_SEC_TYPE_WEP:
+ if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
+ RX_RES_STATUS_BAD_ICV_MIC) {
+ /* bad ICV, the packet is destroyed since the
+ * decryption is inplace, drop it */
+ D_RX("Packet destroyed\n");
+ return -1;
+ }
+ case RX_RES_STATUS_SEC_TYPE_CCMP:
+ if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
+ RX_RES_STATUS_DECRYPT_OK) {
+ D_RX("hw decrypt successfully!!!\n");
+ stats->flag |= RX_FLAG_DECRYPTED;
+ }
+ break;
+
+ default:
+ break;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(il_set_decrypted_flag);
+
+/**
+ * il_txq_update_write_ptr - Send new write idx to hardware
+ */
+void
+il_txq_update_write_ptr(struct il_priv *il, struct il_tx_queue *txq)
+{
+ u32 reg = 0;
+ int txq_id = txq->q.id;
+
+ if (txq->need_update == 0)
+ return;
+
+ /* if we're trying to save power */
+ if (test_bit(S_POWER_PMI, &il->status)) {
+ /* wake up nic if it's powered down ...
+ * uCode will wake up, and interrupt us again, so next
+ * time we'll skip this part. */
+ reg = _il_rd(il, CSR_UCODE_DRV_GP1);
+
+ if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
+ D_INFO("Tx queue %d requesting wakeup," " GP1 = 0x%x\n",
+ txq_id, reg);
+ il_set_bit(il, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ return;
+ }
+
+ il_wr(il, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8));
+
+ /*
+ * else not in power-save mode,
+ * uCode will never sleep when we're
+ * trying to tx (during RFKILL, we're not trying to tx).
+ */
+ } else
+ _il_wr(il, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8));
+ txq->need_update = 0;
+}
+EXPORT_SYMBOL(il_txq_update_write_ptr);
+
+/**
+ * il_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's
+ */
+void
+il_tx_queue_unmap(struct il_priv *il, int txq_id)
+{
+ struct il_tx_queue *txq = &il->txq[txq_id];
+ struct il_queue *q = &txq->q;
+
+ if (q->n_bd == 0)
+ return;
+
+ while (q->write_ptr != q->read_ptr) {
+ il->cfg->ops->lib->txq_free_tfd(il, txq);
+ q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd);
+ }
+}
+EXPORT_SYMBOL(il_tx_queue_unmap);
+
+/**
+ * il_tx_queue_free - Deallocate DMA queue.
+ * @txq: Transmit queue to deallocate.
+ *
+ * Empty queue by removing and destroying all BD's.
+ * Free all buffers.
+ * 0-fill, but do not free "txq" descriptor structure.
+ */
+void
+il_tx_queue_free(struct il_priv *il, int txq_id)
+{
+ struct il_tx_queue *txq = &il->txq[txq_id];
+ struct device *dev = &il->pci_dev->dev;
+ int i;
+
+ il_tx_queue_unmap(il, txq_id);
+
+ /* De-alloc array of command/tx buffers */
+ for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
+ kfree(txq->cmd[i]);
+
+ /* De-alloc circular buffer of TFDs */
+ if (txq->q.n_bd)
+ dma_free_coherent(dev, il->hw_params.tfd_size * txq->q.n_bd,
+ txq->tfds, txq->q.dma_addr);
+
+ /* De-alloc array of per-TFD driver data */
+ kfree(txq->txb);
+ txq->txb = NULL;
+
+ /* deallocate arrays */
+ kfree(txq->cmd);
+ kfree(txq->meta);
+ txq->cmd = NULL;
+ txq->meta = NULL;
+
+ /* 0-fill queue descriptor structure */
+ memset(txq, 0, sizeof(*txq));
+}
+EXPORT_SYMBOL(il_tx_queue_free);
+
+/**
+ * il_cmd_queue_unmap - Unmap any remaining DMA mappings from command queue
+ */
+void
+il_cmd_queue_unmap(struct il_priv *il)
+{
+ struct il_tx_queue *txq = &il->txq[il->cmd_queue];
+ struct il_queue *q = &txq->q;
+ int i;
+
+ if (q->n_bd == 0)
+ return;
+
+ while (q->read_ptr != q->write_ptr) {
+ i = il_get_cmd_idx(q, q->read_ptr, 0);
+
+ if (txq->meta[i].flags & CMD_MAPPED) {
+ pci_unmap_single(il->pci_dev,
+ dma_unmap_addr(&txq->meta[i], mapping),
+ dma_unmap_len(&txq->meta[i], len),
+ PCI_DMA_BIDIRECTIONAL);
+ txq->meta[i].flags = 0;
+ }
+
+ q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd);
+ }
+
+ i = q->n_win;
+ if (txq->meta[i].flags & CMD_MAPPED) {
+ pci_unmap_single(il->pci_dev,
+ dma_unmap_addr(&txq->meta[i], mapping),
+ dma_unmap_len(&txq->meta[i], len),
+ PCI_DMA_BIDIRECTIONAL);
+ txq->meta[i].flags = 0;
+ }
+}
+EXPORT_SYMBOL(il_cmd_queue_unmap);
+
+/**
+ * il_cmd_queue_free - Deallocate DMA queue.
+ * @txq: Transmit queue to deallocate.
+ *
+ * Empty queue by removing and destroying all BD's.
+ * Free all buffers.
+ * 0-fill, but do not free "txq" descriptor structure.
+ */
+void
+il_cmd_queue_free(struct il_priv *il)
+{
+ struct il_tx_queue *txq = &il->txq[il->cmd_queue];
+ struct device *dev = &il->pci_dev->dev;
+ int i;
+
+ il_cmd_queue_unmap(il);
+
+ /* De-alloc array of command/tx buffers */
+ for (i = 0; i <= TFD_CMD_SLOTS; i++)
+ kfree(txq->cmd[i]);
+
+ /* De-alloc circular buffer of TFDs */
+ if (txq->q.n_bd)
+ dma_free_coherent(dev, il->hw_params.tfd_size * txq->q.n_bd,
+ txq->tfds, txq->q.dma_addr);
+
+ /* deallocate arrays */
+ kfree(txq->cmd);
+ kfree(txq->meta);
+ txq->cmd = NULL;
+ txq->meta = NULL;
+
+ /* 0-fill queue descriptor structure */
+ memset(txq, 0, sizeof(*txq));
+}
+EXPORT_SYMBOL(il_cmd_queue_free);
+
+/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
+ * DMA services
+ *
+ * Theory of operation
+ *
+ * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
+ * of buffer descriptors, each of which points to one or more data buffers for
+ * the device to read from or fill. Driver and device exchange status of each
+ * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
+ * entries in each circular buffer, to protect against confusing empty and full
+ * queue states.
+ *
+ * The device reads or writes the data in the queues via the device's several
+ * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
+ *
+ * For Tx queue, there are low mark and high mark limits. If, after queuing
+ * the packet for Tx, free space become < low mark, Tx queue stopped. When
+ * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
+ * Tx queue resumed.
+ *
+ * See more detailed info in 4965.h.
+ ***************************************************/
+
+int
+il_queue_space(const struct il_queue *q)
+{
+ int s = q->read_ptr - q->write_ptr;
+
+ if (q->read_ptr > q->write_ptr)
+ s -= q->n_bd;
+
+ if (s <= 0)
+ s += q->n_win;
+ /* keep some reserve to not confuse empty and full situations */
+ s -= 2;
+ if (s < 0)
+ s = 0;
+ return s;
+}
+EXPORT_SYMBOL(il_queue_space);
+
+
+/**
+ * il_queue_init - Initialize queue's high/low-water and read/write idxes
+ */
+static int
+il_queue_init(struct il_priv *il, struct il_queue *q, int count, int slots_num,
+ u32 id)
+{
+ q->n_bd = count;
+ q->n_win = slots_num;
+ q->id = id;
+
+ /* count must be power-of-two size, otherwise il_queue_inc_wrap
+ * and il_queue_dec_wrap are broken. */
+ BUG_ON(!is_power_of_2(count));
+
+ /* slots_num must be power-of-two size, otherwise
+ * il_get_cmd_idx is broken. */
+ BUG_ON(!is_power_of_2(slots_num));
+
+ q->low_mark = q->n_win / 4;
+ if (q->low_mark < 4)
+ q->low_mark = 4;
+
+ q->high_mark = q->n_win / 8;
+ if (q->high_mark < 2)
+ q->high_mark = 2;
+
+ q->write_ptr = q->read_ptr = 0;
+
+ return 0;
+}
+
+/**
+ * il_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue
+ */
+static int
+il_tx_queue_alloc(struct il_priv *il, struct il_tx_queue *txq, u32 id)
+{
+ struct device *dev = &il->pci_dev->dev;
+ size_t tfd_sz = il->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX;
+
+ /* Driver ilate data, only for Tx (not command) queues,
+ * not shared with device. */
+ if (id != il->cmd_queue) {
+ txq->txb = kcalloc(TFD_QUEUE_SIZE_MAX, sizeof(txq->txb[0]),
+ GFP_KERNEL);
+ if (!txq->txb) {
+ IL_ERR("kmalloc for auxiliary BD "
+ "structures failed\n");
+ goto error;
+ }
+ } else {
+ txq->txb = NULL;
+ }
+
+ /* Circular buffer of transmit frame descriptors (TFDs),
+ * shared with device */
+ txq->tfds =
+ dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr, GFP_KERNEL);
+ if (!txq->tfds) {
+ IL_ERR("pci_alloc_consistent(%zd) failed\n", tfd_sz);
+ goto error;
+ }
+ txq->q.id = id;
+
+ return 0;
+
+error:
+ kfree(txq->txb);
+ txq->txb = NULL;
+
+ return -ENOMEM;
+}
+
+/**
+ * il_tx_queue_init - Allocate and initialize one tx/cmd queue
+ */
+int
+il_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq, int slots_num,
+ u32 txq_id)
+{
+ int i, len;
+ int ret;
+ int actual_slots = slots_num;
+
+ /*
+ * Alloc buffer array for commands (Tx or other types of commands).
+ * For the command queue (#4/#9), allocate command space + one big
+ * command for scan, since scan command is very huge; the system will
+ * not have two scans at the same time, so only one is needed.
+ * For normal Tx queues (all other queues), no super-size command
+ * space is needed.
+ */
+ if (txq_id == il->cmd_queue)
+ actual_slots++;
+
+ txq->meta =
+ kzalloc(sizeof(struct il_cmd_meta) * actual_slots, GFP_KERNEL);
+ txq->cmd =
+ kzalloc(sizeof(struct il_device_cmd *) * actual_slots, GFP_KERNEL);
+
+ if (!txq->meta || !txq->cmd)
+ goto out_free_arrays;
+
+ len = sizeof(struct il_device_cmd);
+ for (i = 0; i < actual_slots; i++) {
+ /* only happens for cmd queue */
+ if (i == slots_num)
+ len = IL_MAX_CMD_SIZE;
+
+ txq->cmd[i] = kmalloc(len, GFP_KERNEL);
+ if (!txq->cmd[i])
+ goto err;
+ }
+
+ /* Alloc driver data array and TFD circular buffer */
+ ret = il_tx_queue_alloc(il, txq, txq_id);
+ if (ret)
+ goto err;
+
+ txq->need_update = 0;
+
+ /*
+ * For the default queues 0-3, set up the swq_id
+ * already -- all others need to get one later
+ * (if they need one at all).
+ */
+ if (txq_id < 4)
+ il_set_swq_id(txq, txq_id, txq_id);
+
+ /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
+ * il_queue_inc_wrap and il_queue_dec_wrap are broken. */
+ BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
+
+ /* Initialize queue's high/low-water marks, and head/tail idxes */
+ il_queue_init(il, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
+
+ /* Tell device where to find queue */
+ il->cfg->ops->lib->txq_init(il, txq);
+
+ return 0;
+err:
+ for (i = 0; i < actual_slots; i++)
+ kfree(txq->cmd[i]);
+out_free_arrays:
+ kfree(txq->meta);
+ kfree(txq->cmd);
+
+ return -ENOMEM;
+}
+EXPORT_SYMBOL(il_tx_queue_init);
+
+void
+il_tx_queue_reset(struct il_priv *il, struct il_tx_queue *txq, int slots_num,
+ u32 txq_id)
+{
+ int actual_slots = slots_num;
+
+ if (txq_id == il->cmd_queue)
+ actual_slots++;
+
+ memset(txq->meta, 0, sizeof(struct il_cmd_meta) * actual_slots);
+
+ txq->need_update = 0;
+
+ /* Initialize queue's high/low-water marks, and head/tail idxes */
+ il_queue_init(il, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
+
+ /* Tell device where to find queue */
+ il->cfg->ops->lib->txq_init(il, txq);
+}
+EXPORT_SYMBOL(il_tx_queue_reset);
+
+/*************** HOST COMMAND QUEUE FUNCTIONS *****/
+
+/**
+ * il_enqueue_hcmd - enqueue a uCode command
+ * @il: device ilate data point
+ * @cmd: a point to the ucode command structure
+ *
+ * The function returns < 0 values to indicate the operation is
+ * failed. On success, it turns the idx (> 0) of command in the
+ * command queue.
+ */
+int
+il_enqueue_hcmd(struct il_priv *il, struct il_host_cmd *cmd)
+{
+ struct il_tx_queue *txq = &il->txq[il->cmd_queue];
+ struct il_queue *q = &txq->q;
+ struct il_device_cmd *out_cmd;
+ struct il_cmd_meta *out_meta;
+ dma_addr_t phys_addr;
+ unsigned long flags;
+ int len;
+ u32 idx;
+ u16 fix_size;
+
+ cmd->len = il->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len);
+ fix_size = (u16) (cmd->len + sizeof(out_cmd->hdr));
+
+ /* If any of the command structures end up being larger than
+ * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then
+ * we will need to increase the size of the TFD entries
+ * Also, check to see if command buffer should not exceed the size
+ * of device_cmd and max_cmd_size. */
+ BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
+ !(cmd->flags & CMD_SIZE_HUGE));
+ BUG_ON(fix_size > IL_MAX_CMD_SIZE);
+
+ if (il_is_rfkill(il) || il_is_ctkill(il)) {
+ IL_WARN("Not sending command - %s KILL\n",
+ il_is_rfkill(il) ? "RF" : "CT");
+ return -EIO;
+ }
+
+ spin_lock_irqsave(&il->hcmd_lock, flags);
+
+ if (il_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
+ spin_unlock_irqrestore(&il->hcmd_lock, flags);
+
+ IL_ERR("Restarting adapter due to command queue full\n");
+ queue_work(il->workqueue, &il->restart);
+ return -ENOSPC;
+ }
+
+ idx = il_get_cmd_idx(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE);
+ out_cmd = txq->cmd[idx];
+ out_meta = &txq->meta[idx];
+
+ if (WARN_ON(out_meta->flags & CMD_MAPPED)) {
+ spin_unlock_irqrestore(&il->hcmd_lock, flags);
+ return -ENOSPC;
+ }
+
+ memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */
+ out_meta->flags = cmd->flags | CMD_MAPPED;
+ if (cmd->flags & CMD_WANT_SKB)
+ out_meta->source = cmd;
+ if (cmd->flags & CMD_ASYNC)
+ out_meta->callback = cmd->callback;
+
+ out_cmd->hdr.cmd = cmd->id;
+ memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len);
+
+ /* At this point, the out_cmd now has all of the incoming cmd
+ * information */
+
+ out_cmd->hdr.flags = 0;
+ out_cmd->hdr.sequence =
+ cpu_to_le16(QUEUE_TO_SEQ(il->cmd_queue) | IDX_TO_SEQ(q->write_ptr));
+ if (cmd->flags & CMD_SIZE_HUGE)
+ out_cmd->hdr.sequence |= SEQ_HUGE_FRAME;
+ len = sizeof(struct il_device_cmd);
+ if (idx == TFD_CMD_SLOTS)
+ len = IL_MAX_CMD_SIZE;
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+ switch (out_cmd->hdr.cmd) {
+ case C_TX_LINK_QUALITY_CMD:
+ case C_SENSITIVITY:
+ D_HC_DUMP("Sending command %s (#%x), seq: 0x%04X, "
+ "%d bytes at %d[%d]:%d\n",
+ il_get_cmd_string(out_cmd->hdr.cmd), out_cmd->hdr.cmd,
+ le16_to_cpu(out_cmd->hdr.sequence), fix_size,
+ q->write_ptr, idx, il->cmd_queue);
+ break;
+ default:
+ D_HC("Sending command %s (#%x), seq: 0x%04X, "
+ "%d bytes at %d[%d]:%d\n",
+ il_get_cmd_string(out_cmd->hdr.cmd), out_cmd->hdr.cmd,
+ le16_to_cpu(out_cmd->hdr.sequence), fix_size, q->write_ptr,
+ idx, il->cmd_queue);
+ }
+#endif
+ txq->need_update = 1;
+
+ if (il->cfg->ops->lib->txq_update_byte_cnt_tbl)
+ /* Set up entry in queue's byte count circular buffer */
+ il->cfg->ops->lib->txq_update_byte_cnt_tbl(il, txq, 0);
+
+ phys_addr =
+ pci_map_single(il->pci_dev, &out_cmd->hdr, fix_size,
+ PCI_DMA_BIDIRECTIONAL);
+ dma_unmap_addr_set(out_meta, mapping, phys_addr);
+ dma_unmap_len_set(out_meta, len, fix_size);
+
+ il->cfg->ops->lib->txq_attach_buf_to_tfd(il, txq, phys_addr, fix_size,
+ 1, U32_PAD(cmd->len));
+
+ /* Increment and update queue's write idx */
+ q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd);
+ il_txq_update_write_ptr(il, txq);
+
+ spin_unlock_irqrestore(&il->hcmd_lock, flags);
+ return idx;
+}
+
+/**
+ * il_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
+ *
+ * When FW advances 'R' idx, all entries between old and new 'R' idx
+ * need to be reclaimed. As result, some free space forms. If there is
+ * enough free space (> low mark), wake the stack that feeds us.
+ */
+static void
+il_hcmd_queue_reclaim(struct il_priv *il, int txq_id, int idx, int cmd_idx)
+{
+ struct il_tx_queue *txq = &il->txq[txq_id];
+ struct il_queue *q = &txq->q;
+ int nfreed = 0;
+
+ if (idx >= q->n_bd || il_queue_used(q, idx) == 0) {
+ IL_ERR("Read idx for DMA queue txq id (%d), idx %d, "
+ "is out of range [0-%d] %d %d.\n", txq_id, idx, q->n_bd,
+ q->write_ptr, q->read_ptr);
+ return;
+ }
+
+ for (idx = il_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
+ q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd)) {
+
+ if (nfreed++ > 0) {
+ IL_ERR("HCMD skipped: idx (%d) %d %d\n", idx,
+ q->write_ptr, q->read_ptr);
+ queue_work(il->workqueue, &il->restart);
+ }
+
+ }
+}
+
+/**
+ * il_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
+ * @rxb: Rx buffer to reclaim
+ *
+ * If an Rx buffer has an async callback associated with it the callback
+ * will be executed. The attached skb (if present) will only be freed
+ * if the callback returns 1
+ */
+void
+il_tx_cmd_complete(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ u16 sequence = le16_to_cpu(pkt->hdr.sequence);
+ int txq_id = SEQ_TO_QUEUE(sequence);
+ int idx = SEQ_TO_IDX(sequence);
+ int cmd_idx;
+ bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME);
+ struct il_device_cmd *cmd;
+ struct il_cmd_meta *meta;
+ struct il_tx_queue *txq = &il->txq[il->cmd_queue];
+ unsigned long flags;
+
+ /* If a Tx command is being handled and it isn't in the actual
+ * command queue then there a command routing bug has been introduced
+ * in the queue management code. */
+ if (WARN
+ (txq_id != il->cmd_queue,
+ "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
+ txq_id, il->cmd_queue, sequence, il->txq[il->cmd_queue].q.read_ptr,
+ il->txq[il->cmd_queue].q.write_ptr)) {
+ il_print_hex_error(il, pkt, 32);
+ return;
+ }
+
+ cmd_idx = il_get_cmd_idx(&txq->q, idx, huge);
+ cmd = txq->cmd[cmd_idx];
+ meta = &txq->meta[cmd_idx];
+
+ txq->time_stamp = jiffies;
+
+ pci_unmap_single(il->pci_dev, dma_unmap_addr(meta, mapping),
+ dma_unmap_len(meta, len), PCI_DMA_BIDIRECTIONAL);
+
+ /* Input error checking is done when commands are added to queue. */
+ if (meta->flags & CMD_WANT_SKB) {
+ meta->source->reply_page = (unsigned long)rxb_addr(rxb);
+ rxb->page = NULL;
+ } else if (meta->callback)
+ meta->callback(il, cmd, pkt);
+
+ spin_lock_irqsave(&il->hcmd_lock, flags);
+
+ il_hcmd_queue_reclaim(il, txq_id, idx, cmd_idx);
+
+ if (!(meta->flags & CMD_ASYNC)) {
+ clear_bit(S_HCMD_ACTIVE, &il->status);
+ D_INFO("Clearing HCMD_ACTIVE for command %s\n",
+ il_get_cmd_string(cmd->hdr.cmd));
+ wake_up(&il->wait_command_queue);
+ }
+
+ /* Mark as unmapped */
+ meta->flags = 0;
+
+ spin_unlock_irqrestore(&il->hcmd_lock, flags);
+}
+EXPORT_SYMBOL(il_tx_cmd_complete);
+
+MODULE_DESCRIPTION("iwl-legacy: common functions for 3945 and 4965");
+MODULE_VERSION(IWLWIFI_VERSION);
+MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
+MODULE_LICENSE("GPL");
+
+/*
+ * set bt_coex_active to true, uCode will do kill/defer
+ * every time the priority line is asserted (BT is sending signals on the
+ * priority line in the PCIx).
+ * set bt_coex_active to false, uCode will ignore the BT activity and
+ * perform the normal operation
+ *
+ * User might experience transmit issue on some platform due to WiFi/BT
+ * co-exist problem. The possible behaviors are:
+ * Able to scan and finding all the available AP
+ * Not able to associate with any AP
+ * On those platforms, WiFi communication can be restored by set
+ * "bt_coex_active" module parameter to "false"
+ *
+ * default: bt_coex_active = true (BT_COEX_ENABLE)
+ */
+static bool bt_coex_active = true;
+module_param(bt_coex_active, bool, S_IRUGO);
+MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist");
+
+u32 il_debug_level;
+EXPORT_SYMBOL(il_debug_level);
+
+const u8 il_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
+EXPORT_SYMBOL(il_bcast_addr);
+
+/* This function both allocates and initializes hw and il. */
+struct ieee80211_hw *
+il_alloc_all(struct il_cfg *cfg)
+{
+ struct il_priv *il;
+ /* mac80211 allocates memory for this device instance, including
+ * space for this driver's ilate structure */
+ struct ieee80211_hw *hw;
+
+ hw = ieee80211_alloc_hw(sizeof(struct il_priv),
+ cfg->ops->ieee80211_ops);
+ if (hw == NULL) {
+ pr_err("%s: Can not allocate network device\n", cfg->name);
+ goto out;
+ }
+
+ il = hw->priv;
+ il->hw = hw;
+
+out:
+ return hw;
+}
+EXPORT_SYMBOL(il_alloc_all);
+
+#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
+#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
+static void
+il_init_ht_hw_capab(const struct il_priv *il,
+ struct ieee80211_sta_ht_cap *ht_info,
+ enum ieee80211_band band)
+{
+ u16 max_bit_rate = 0;
+ u8 rx_chains_num = il->hw_params.rx_chains_num;
+ u8 tx_chains_num = il->hw_params.tx_chains_num;
+
+ ht_info->cap = 0;
+ memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
+
+ ht_info->ht_supported = true;
+
+ ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
+ max_bit_rate = MAX_BIT_RATE_20_MHZ;
+ if (il->hw_params.ht40_channel & BIT(band)) {
+ ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+ ht_info->cap |= IEEE80211_HT_CAP_SGI_40;
+ ht_info->mcs.rx_mask[4] = 0x01;
+ max_bit_rate = MAX_BIT_RATE_40_MHZ;
+ }
+
+ if (il->cfg->mod_params->amsdu_size_8K)
+ ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
+
+ ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF;
+ ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF;
+
+ ht_info->mcs.rx_mask[0] = 0xFF;
+ if (rx_chains_num >= 2)
+ ht_info->mcs.rx_mask[1] = 0xFF;
+ if (rx_chains_num >= 3)
+ ht_info->mcs.rx_mask[2] = 0xFF;
+
+ /* Highest supported Rx data rate */
+ max_bit_rate *= rx_chains_num;
+ WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK);
+ ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate);
+
+ /* Tx MCS capabilities */
+ ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
+ if (tx_chains_num != rx_chains_num) {
+ ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
+ ht_info->mcs.tx_params |=
+ ((tx_chains_num -
+ 1) << IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
+ }
+}
+
+/**
+ * il_init_geos - Initialize mac80211's geo/channel info based from eeprom
+ */
+int
+il_init_geos(struct il_priv *il)
+{
+ struct il_channel_info *ch;
+ struct ieee80211_supported_band *sband;
+ struct ieee80211_channel *channels;
+ struct ieee80211_channel *geo_ch;
+ struct ieee80211_rate *rates;
+ int i = 0;
+ s8 max_tx_power = 0;
+
+ if (il->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
+ il->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
+ D_INFO("Geography modes already initialized.\n");
+ set_bit(S_GEO_CONFIGURED, &il->status);
+ return 0;
+ }
+
+ channels =
+ kzalloc(sizeof(struct ieee80211_channel) * il->channel_count,
+ GFP_KERNEL);
+ if (!channels)
+ return -ENOMEM;
+
+ rates =
+ kzalloc((sizeof(struct ieee80211_rate) * RATE_COUNT_LEGACY),
+ GFP_KERNEL);
+ if (!rates) {
+ kfree(channels);
+ return -ENOMEM;
+ }
+
+ /* 5.2GHz channels start after the 2.4GHz channels */
+ sband = &il->bands[IEEE80211_BAND_5GHZ];
+ sband->channels = &channels[ARRAY_SIZE(il_eeprom_band_1)];
+ /* just OFDM */
+ sband->bitrates = &rates[IL_FIRST_OFDM_RATE];
+ sband->n_bitrates = RATE_COUNT_LEGACY - IL_FIRST_OFDM_RATE;
+
+ if (il->cfg->sku & IL_SKU_N)
+ il_init_ht_hw_capab(il, &sband->ht_cap, IEEE80211_BAND_5GHZ);
+
+ sband = &il->bands[IEEE80211_BAND_2GHZ];
+ sband->channels = channels;
+ /* OFDM & CCK */
+ sband->bitrates = rates;
+ sband->n_bitrates = RATE_COUNT_LEGACY;
+
+ if (il->cfg->sku & IL_SKU_N)
+ il_init_ht_hw_capab(il, &sband->ht_cap, IEEE80211_BAND_2GHZ);
+
+ il->ieee_channels = channels;
+ il->ieee_rates = rates;
+
+ for (i = 0; i < il->channel_count; i++) {
+ ch = &il->channel_info[i];
+
+ if (!il_is_channel_valid(ch))
+ continue;
+
+ sband = &il->bands[ch->band];
+
+ geo_ch = &sband->channels[sband->n_channels++];
+
+ geo_ch->center_freq =
+ ieee80211_channel_to_frequency(ch->channel, ch->band);
+ geo_ch->max_power = ch->max_power_avg;
+ geo_ch->max_antenna_gain = 0xff;
+ geo_ch->hw_value = ch->channel;
+
+ if (il_is_channel_valid(ch)) {
+ if (!(ch->flags & EEPROM_CHANNEL_IBSS))
+ geo_ch->flags |= IEEE80211_CHAN_NO_IBSS;
+
+ if (!(ch->flags & EEPROM_CHANNEL_ACTIVE))
+ geo_ch->flags |= IEEE80211_CHAN_PASSIVE_SCAN;
+
+ if (ch->flags & EEPROM_CHANNEL_RADAR)
+ geo_ch->flags |= IEEE80211_CHAN_RADAR;
+
+ geo_ch->flags |= ch->ht40_extension_channel;
+
+ if (ch->max_power_avg > max_tx_power)
+ max_tx_power = ch->max_power_avg;
+ } else {
+ geo_ch->flags |= IEEE80211_CHAN_DISABLED;
+ }
+
+ D_INFO("Channel %d Freq=%d[%sGHz] %s flag=0x%X\n", ch->channel,
+ geo_ch->center_freq,
+ il_is_channel_a_band(ch) ? "5.2" : "2.4",
+ geo_ch->
+ flags & IEEE80211_CHAN_DISABLED ? "restricted" : "valid",
+ geo_ch->flags);
+ }
+
+ il->tx_power_device_lmt = max_tx_power;
+ il->tx_power_user_lmt = max_tx_power;
+ il->tx_power_next = max_tx_power;
+
+ if (il->bands[IEEE80211_BAND_5GHZ].n_channels == 0 &&
+ (il->cfg->sku & IL_SKU_A)) {
+ IL_INFO("Incorrectly detected BG card as ABG. "
+ "Please send your PCI ID 0x%04X:0x%04X to maintainer.\n",
+ il->pci_dev->device, il->pci_dev->subsystem_device);
+ il->cfg->sku &= ~IL_SKU_A;
+ }
+
+ IL_INFO("Tunable channels: %d 802.11bg, %d 802.11a channels\n",
+ il->bands[IEEE80211_BAND_2GHZ].n_channels,
+ il->bands[IEEE80211_BAND_5GHZ].n_channels);
+
+ set_bit(S_GEO_CONFIGURED, &il->status);
+
+ return 0;
+}
+EXPORT_SYMBOL(il_init_geos);
+
+/*
+ * il_free_geos - undo allocations in il_init_geos
+ */
+void
+il_free_geos(struct il_priv *il)
+{
+ kfree(il->ieee_channels);
+ kfree(il->ieee_rates);
+ clear_bit(S_GEO_CONFIGURED, &il->status);
+}
+EXPORT_SYMBOL(il_free_geos);
+
+static bool
+il_is_channel_extension(struct il_priv *il, enum ieee80211_band band,
+ u16 channel, u8 extension_chan_offset)
+{
+ const struct il_channel_info *ch_info;
+
+ ch_info = il_get_channel_info(il, band, channel);
+ if (!il_is_channel_valid(ch_info))
+ return false;
+
+ if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_ABOVE)
+ return !(ch_info->
+ ht40_extension_channel & IEEE80211_CHAN_NO_HT40PLUS);
+ else if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_BELOW)
+ return !(ch_info->
+ ht40_extension_channel & IEEE80211_CHAN_NO_HT40MINUS);
+
+ return false;
+}
+
+bool
+il_is_ht40_tx_allowed(struct il_priv *il, struct il_rxon_context *ctx,
+ struct ieee80211_sta_ht_cap *ht_cap)
+{
+ if (!ctx->ht.enabled || !ctx->ht.is_40mhz)
+ return false;
+
+ /*
+ * We do not check for IEEE80211_HT_CAP_SUP_WIDTH_20_40
+ * the bit will not set if it is pure 40MHz case
+ */
+ if (ht_cap && !ht_cap->ht_supported)
+ return false;
+
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+ if (il->disable_ht40)
+ return false;
+#endif
+
+ return il_is_channel_extension(il, il->band,
+ le16_to_cpu(ctx->staging.channel),
+ ctx->ht.extension_chan_offset);
+}
+EXPORT_SYMBOL(il_is_ht40_tx_allowed);
+
+static u16
+il_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val)
+{
+ u16 new_val;
+ u16 beacon_factor;
+
+ /*
+ * If mac80211 hasn't given us a beacon interval, program
+ * the default into the device.
+ */
+ if (!beacon_val)
+ return DEFAULT_BEACON_INTERVAL;
+
+ /*
+ * If the beacon interval we obtained from the peer
+ * is too large, we'll have to wake up more often
+ * (and in IBSS case, we'll beacon too much)
+ *
+ * For example, if max_beacon_val is 4096, and the
+ * requested beacon interval is 7000, we'll have to
+ * use 3500 to be able to wake up on the beacons.
+ *
+ * This could badly influence beacon detection stats.
+ */
+
+ beacon_factor = (beacon_val + max_beacon_val) / max_beacon_val;
+ new_val = beacon_val / beacon_factor;
+
+ if (!new_val)
+ new_val = max_beacon_val;
+
+ return new_val;
+}
+
+int
+il_send_rxon_timing(struct il_priv *il, struct il_rxon_context *ctx)
+{
+ u64 tsf;
+ s32 interval_tm, rem;
+ struct ieee80211_conf *conf = NULL;
+ u16 beacon_int;
+ struct ieee80211_vif *vif = ctx->vif;
+
+ conf = &il->hw->conf;
+
+ lockdep_assert_held(&il->mutex);
+
+ memset(&ctx->timing, 0, sizeof(struct il_rxon_time_cmd));
+
+ ctx->timing.timestamp = cpu_to_le64(il->timestamp);
+ ctx->timing.listen_interval = cpu_to_le16(conf->listen_interval);
+
+ beacon_int = vif ? vif->bss_conf.beacon_int : 0;
+
+ /*
+ * TODO: For IBSS we need to get atim_win from mac80211,
+ * for now just always use 0
+ */
+ ctx->timing.atim_win = 0;
+
+ beacon_int =
+ il_adjust_beacon_interval(beacon_int,
+ il->hw_params.max_beacon_itrvl *
+ TIME_UNIT);
+ ctx->timing.beacon_interval = cpu_to_le16(beacon_int);
+
+ tsf = il->timestamp; /* tsf is modifed by do_div: copy it */
+ interval_tm = beacon_int * TIME_UNIT;
+ rem = do_div(tsf, interval_tm);
+ ctx->timing.beacon_init_val = cpu_to_le32(interval_tm - rem);
+
+ ctx->timing.dtim_period = vif ? (vif->bss_conf.dtim_period ? : 1) : 1;
+
+ D_ASSOC("beacon interval %d beacon timer %d beacon tim %d\n",
+ le16_to_cpu(ctx->timing.beacon_interval),
+ le32_to_cpu(ctx->timing.beacon_init_val),
+ le16_to_cpu(ctx->timing.atim_win));
+
+ return il_send_cmd_pdu(il, ctx->rxon_timing_cmd, sizeof(ctx->timing),
+ &ctx->timing);
+}
+EXPORT_SYMBOL(il_send_rxon_timing);
+
+void
+il_set_rxon_hwcrypto(struct il_priv *il, struct il_rxon_context *ctx,
+ int hw_decrypt)
+{
+ struct il_rxon_cmd *rxon = &ctx->staging;
+
+ if (hw_decrypt)
+ rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK;
+ else
+ rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
+
+}
+EXPORT_SYMBOL(il_set_rxon_hwcrypto);
+
+/* validate RXON structure is valid */
+int
+il_check_rxon_cmd(struct il_priv *il, struct il_rxon_context *ctx)
+{
+ struct il_rxon_cmd *rxon = &ctx->staging;
+ bool error = false;
+
+ if (rxon->flags & RXON_FLG_BAND_24G_MSK) {
+ if (rxon->flags & RXON_FLG_TGJ_NARROW_BAND_MSK) {
+ IL_WARN("check 2.4G: wrong narrow\n");
+ error = true;
+ }
+ if (rxon->flags & RXON_FLG_RADAR_DETECT_MSK) {
+ IL_WARN("check 2.4G: wrong radar\n");
+ error = true;
+ }
+ } else {
+ if (!(rxon->flags & RXON_FLG_SHORT_SLOT_MSK)) {
+ IL_WARN("check 5.2G: not short slot!\n");
+ error = true;
+ }
+ if (rxon->flags & RXON_FLG_CCK_MSK) {
+ IL_WARN("check 5.2G: CCK!\n");
+ error = true;
+ }
+ }
+ if ((rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1) {
+ IL_WARN("mac/bssid mcast!\n");
+ error = true;
+ }
+
+ /* make sure basic rates 6Mbps and 1Mbps are supported */
+ if ((rxon->ofdm_basic_rates & RATE_6M_MASK) == 0 &&
+ (rxon->cck_basic_rates & RATE_1M_MASK) == 0) {
+ IL_WARN("neither 1 nor 6 are basic\n");
+ error = true;
+ }
+
+ if (le16_to_cpu(rxon->assoc_id) > 2007) {
+ IL_WARN("aid > 2007\n");
+ error = true;
+ }
+
+ if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) ==
+ (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) {
+ IL_WARN("CCK and short slot\n");
+ error = true;
+ }
+
+ if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) ==
+ (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) {
+ IL_WARN("CCK and auto detect");
+ error = true;
+ }
+
+ if ((rxon->
+ flags & (RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK)) ==
+ RXON_FLG_TGG_PROTECT_MSK) {
+ IL_WARN("TGg but no auto-detect\n");
+ error = true;
+ }
+
+ if (error)
+ IL_WARN("Tuning to channel %d\n", le16_to_cpu(rxon->channel));
+
+ if (error) {
+ IL_ERR("Invalid RXON\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(il_check_rxon_cmd);
+
+/**
+ * il_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed
+ * @il: staging_rxon is compared to active_rxon
+ *
+ * If the RXON structure is changing enough to require a new tune,
+ * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that
+ * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required.
+ */
+int
+il_full_rxon_required(struct il_priv *il, struct il_rxon_context *ctx)
+{
+ const struct il_rxon_cmd *staging = &ctx->staging;
+ const struct il_rxon_cmd *active = &ctx->active;
+
+#define CHK(cond) \
+ if ((cond)) { \
+ D_INFO("need full RXON - " #cond "\n"); \
+ return 1; \
+ }
+
+#define CHK_NEQ(c1, c2) \
+ if ((c1) != (c2)) { \
+ D_INFO("need full RXON - " \
+ #c1 " != " #c2 " - %d != %d\n", \
+ (c1), (c2)); \
+ return 1; \
+ }
+
+ /* These items are only settable from the full RXON command */
+ CHK(!il_is_associated_ctx(ctx));
+ CHK(compare_ether_addr(staging->bssid_addr, active->bssid_addr));
+ CHK(compare_ether_addr(staging->node_addr, active->node_addr));
+ CHK(compare_ether_addr
+ (staging->wlap_bssid_addr, active->wlap_bssid_addr));
+ CHK_NEQ(staging->dev_type, active->dev_type);
+ CHK_NEQ(staging->channel, active->channel);
+ CHK_NEQ(staging->air_propagation, active->air_propagation);
+ CHK_NEQ(staging->ofdm_ht_single_stream_basic_rates,
+ active->ofdm_ht_single_stream_basic_rates);
+ CHK_NEQ(staging->ofdm_ht_dual_stream_basic_rates,
+ active->ofdm_ht_dual_stream_basic_rates);
+ CHK_NEQ(staging->assoc_id, active->assoc_id);
+
+ /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can
+ * be updated with the RXON_ASSOC command -- however only some
+ * flag transitions are allowed using RXON_ASSOC */
+
+ /* Check if we are not switching bands */
+ CHK_NEQ(staging->flags & RXON_FLG_BAND_24G_MSK,
+ active->flags & RXON_FLG_BAND_24G_MSK);
+
+ /* Check if we are switching association toggle */
+ CHK_NEQ(staging->filter_flags & RXON_FILTER_ASSOC_MSK,
+ active->filter_flags & RXON_FILTER_ASSOC_MSK);
+
+#undef CHK
+#undef CHK_NEQ
+
+ return 0;
+}
+EXPORT_SYMBOL(il_full_rxon_required);
+
+u8
+il_get_lowest_plcp(struct il_priv *il, struct il_rxon_context *ctx)
+{
+ /*
+ * Assign the lowest rate -- should really get this from
+ * the beacon skb from mac80211.
+ */
+ if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK)
+ return RATE_1M_PLCP;
+ else
+ return RATE_6M_PLCP;
+}
+EXPORT_SYMBOL(il_get_lowest_plcp);
+
+static void
+_il_set_rxon_ht(struct il_priv *il, struct il_ht_config *ht_conf,
+ struct il_rxon_context *ctx)
+{
+ struct il_rxon_cmd *rxon = &ctx->staging;
+
+ if (!ctx->ht.enabled) {
+ rxon->flags &=
+ ~(RXON_FLG_CHANNEL_MODE_MSK |
+ RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK | RXON_FLG_HT40_PROT_MSK
+ | RXON_FLG_HT_PROT_MSK);
+ return;
+ }
+
+ rxon->flags |=
+ cpu_to_le32(ctx->ht.protection << RXON_FLG_HT_OPERATING_MODE_POS);
+
+ /* Set up channel bandwidth:
+ * 20 MHz only, 20/40 mixed or pure 40 if ht40 ok */
+ /* clear the HT channel mode before set the mode */
+ rxon->flags &=
+ ~(RXON_FLG_CHANNEL_MODE_MSK | RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
+ if (il_is_ht40_tx_allowed(il, ctx, NULL)) {
+ /* pure ht40 */
+ if (ctx->ht.protection == IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) {
+ rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40;
+ /* Note: control channel is opposite of extension channel */
+ switch (ctx->ht.extension_chan_offset) {
+ case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
+ rxon->flags &=
+ ~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
+ break;
+ case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
+ rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
+ break;
+ }
+ } else {
+ /* Note: control channel is opposite of extension channel */
+ switch (ctx->ht.extension_chan_offset) {
+ case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
+ rxon->flags &=
+ ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
+ rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
+ break;
+ case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
+ rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
+ rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
+ break;
+ case IEEE80211_HT_PARAM_CHA_SEC_NONE:
+ default:
+ /* channel location only valid if in Mixed mode */
+ IL_ERR("invalid extension channel offset\n");
+ break;
+ }
+ }
+ } else {
+ rxon->flags |= RXON_FLG_CHANNEL_MODE_LEGACY;
+ }
+
+ if (il->cfg->ops->hcmd->set_rxon_chain)
+ il->cfg->ops->hcmd->set_rxon_chain(il, ctx);
+
+ D_ASSOC("rxon flags 0x%X operation mode :0x%X "
+ "extension channel offset 0x%x\n", le32_to_cpu(rxon->flags),
+ ctx->ht.protection, ctx->ht.extension_chan_offset);
+}
+
+void
+il_set_rxon_ht(struct il_priv *il, struct il_ht_config *ht_conf)
+{
+ _il_set_rxon_ht(il, ht_conf, &il->ctx);
+}
+EXPORT_SYMBOL(il_set_rxon_ht);
+
+/* Return valid, unused, channel for a passive scan to reset the RF */
+u8
+il_get_single_channel_number(struct il_priv *il, enum ieee80211_band band)
+{
+ const struct il_channel_info *ch_info;
+ int i;
+ u8 channel = 0;
+ u8 min, max;
+
+ if (band == IEEE80211_BAND_5GHZ) {
+ min = 14;
+ max = il->channel_count;
+ } else {
+ min = 0;
+ max = 14;
+ }
+
+ for (i = min; i < max; i++) {
+ channel = il->channel_info[i].channel;
+ if (channel == le16_to_cpu(il->ctx.staging.channel))
+ continue;
+
+ ch_info = il_get_channel_info(il, band, channel);
+ if (il_is_channel_valid(ch_info))
+ break;
+ }
+
+ return channel;
+}
+EXPORT_SYMBOL(il_get_single_channel_number);
+
+/**
+ * il_set_rxon_channel - Set the band and channel values in staging RXON
+ * @ch: requested channel as a pointer to struct ieee80211_channel
+
+ * NOTE: Does not commit to the hardware; it sets appropriate bit fields
+ * in the staging RXON flag structure based on the ch->band
+ */
+int
+il_set_rxon_channel(struct il_priv *il, struct ieee80211_channel *ch,
+ struct il_rxon_context *ctx)
+{
+ enum ieee80211_band band = ch->band;
+ u16 channel = ch->hw_value;
+
+ if (le16_to_cpu(ctx->staging.channel) == channel && il->band == band)
+ return 0;
+
+ ctx->staging.channel = cpu_to_le16(channel);
+ if (band == IEEE80211_BAND_5GHZ)
+ ctx->staging.flags &= ~RXON_FLG_BAND_24G_MSK;
+ else
+ ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
+
+ il->band = band;
+
+ D_INFO("Staging channel set to %d [%d]\n", channel, band);
+
+ return 0;
+}
+EXPORT_SYMBOL(il_set_rxon_channel);
+
+void
+il_set_flags_for_band(struct il_priv *il, struct il_rxon_context *ctx,
+ enum ieee80211_band band, struct ieee80211_vif *vif)
+{
+ if (band == IEEE80211_BAND_5GHZ) {
+ ctx->staging.flags &=
+ ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK |
+ RXON_FLG_CCK_MSK);
+ ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
+ } else {
+ /* Copied from il_post_associate() */
+ if (vif && vif->bss_conf.use_short_slot)
+ ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
+ else
+ ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
+
+ ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
+ ctx->staging.flags |= RXON_FLG_AUTO_DETECT_MSK;
+ ctx->staging.flags &= ~RXON_FLG_CCK_MSK;
+ }
+}
+EXPORT_SYMBOL(il_set_flags_for_band);
+
+/*
+ * initialize rxon structure with default values from eeprom
+ */
+void
+il_connection_init_rx_config(struct il_priv *il, struct il_rxon_context *ctx)
+{
+ const struct il_channel_info *ch_info;
+
+ memset(&ctx->staging, 0, sizeof(ctx->staging));
+
+ if (!ctx->vif) {
+ ctx->staging.dev_type = ctx->unused_devtype;
+ } else
+ switch (ctx->vif->type) {
+
+ case NL80211_IFTYPE_STATION:
+ ctx->staging.dev_type = ctx->station_devtype;
+ ctx->staging.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
+ break;
+
+ case NL80211_IFTYPE_ADHOC:
+ ctx->staging.dev_type = ctx->ibss_devtype;
+ ctx->staging.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
+ ctx->staging.filter_flags =
+ RXON_FILTER_BCON_AWARE_MSK |
+ RXON_FILTER_ACCEPT_GRP_MSK;
+ break;
+
+ default:
+ IL_ERR("Unsupported interface type %d\n",
+ ctx->vif->type);
+ break;
+ }
+
+#if 0
+ /* TODO: Figure out when short_preamble would be set and cache from
+ * that */
+ if (!hw_to_local(il->hw)->short_preamble)
+ ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
+ else
+ ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
+#endif
+
+ ch_info =
+ il_get_channel_info(il, il->band, le16_to_cpu(ctx->active.channel));
+
+ if (!ch_info)
+ ch_info = &il->channel_info[0];
+
+ ctx->staging.channel = cpu_to_le16(ch_info->channel);
+ il->band = ch_info->band;
+
+ il_set_flags_for_band(il, ctx, il->band, ctx->vif);
+
+ ctx->staging.ofdm_basic_rates =
+ (IL_OFDM_RATES_MASK >> IL_FIRST_OFDM_RATE) & 0xFF;
+ ctx->staging.cck_basic_rates =
+ (IL_CCK_RATES_MASK >> IL_FIRST_CCK_RATE) & 0xF;
+
+ /* clear both MIX and PURE40 mode flag */
+ ctx->staging.flags &=
+ ~(RXON_FLG_CHANNEL_MODE_MIXED | RXON_FLG_CHANNEL_MODE_PURE_40);
+ if (ctx->vif)
+ memcpy(ctx->staging.node_addr, ctx->vif->addr, ETH_ALEN);
+
+ ctx->staging.ofdm_ht_single_stream_basic_rates = 0xff;
+ ctx->staging.ofdm_ht_dual_stream_basic_rates = 0xff;
+}
+EXPORT_SYMBOL(il_connection_init_rx_config);
+
+void
+il_set_rate(struct il_priv *il)
+{
+ const struct ieee80211_supported_band *hw = NULL;
+ struct ieee80211_rate *rate;
+ int i;
+
+ hw = il_get_hw_mode(il, il->band);
+ if (!hw) {
+ IL_ERR("Failed to set rate: unable to get hw mode\n");
+ return;
+ }
+
+ il->active_rate = 0;
+
+ for (i = 0; i < hw->n_bitrates; i++) {
+ rate = &(hw->bitrates[i]);
+ if (rate->hw_value < RATE_COUNT_LEGACY)
+ il->active_rate |= (1 << rate->hw_value);
+ }
+
+ D_RATE("Set active_rate = %0x\n", il->active_rate);
+
+ il->ctx.staging.cck_basic_rates =
+ (IL_CCK_BASIC_RATES_MASK >> IL_FIRST_CCK_RATE) & 0xF;
+
+ il->ctx.staging.ofdm_basic_rates =
+ (IL_OFDM_BASIC_RATES_MASK >> IL_FIRST_OFDM_RATE) & 0xFF;
+}
+EXPORT_SYMBOL(il_set_rate);
+
+void
+il_chswitch_done(struct il_priv *il, bool is_success)
+{
+ struct il_rxon_context *ctx = &il->ctx;
+
+ if (test_bit(S_EXIT_PENDING, &il->status))
+ return;
+
+ if (test_and_clear_bit(S_CHANNEL_SWITCH_PENDING, &il->status))
+ ieee80211_chswitch_done(ctx->vif, is_success);
+}
+EXPORT_SYMBOL(il_chswitch_done);
+
+void
+il_hdl_csa(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ struct il_csa_notification *csa = &(pkt->u.csa_notif);
+
+ struct il_rxon_context *ctx = &il->ctx;
+ struct il_rxon_cmd *rxon = (void *)&ctx->active;
+
+ if (!test_bit(S_CHANNEL_SWITCH_PENDING, &il->status))
+ return;
+
+ if (!le32_to_cpu(csa->status) && csa->channel == il->switch_channel) {
+ rxon->channel = csa->channel;
+ ctx->staging.channel = csa->channel;
+ D_11H("CSA notif: channel %d\n", le16_to_cpu(csa->channel));
+ il_chswitch_done(il, true);
+ } else {
+ IL_ERR("CSA notif (fail) : channel %d\n",
+ le16_to_cpu(csa->channel));
+ il_chswitch_done(il, false);
+ }
+}
+EXPORT_SYMBOL(il_hdl_csa);
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+void
+il_print_rx_config_cmd(struct il_priv *il, struct il_rxon_context *ctx)
+{
+ struct il_rxon_cmd *rxon = &ctx->staging;
+
+ D_RADIO("RX CONFIG:\n");
+ il_print_hex_dump(il, IL_DL_RADIO, (u8 *) rxon, sizeof(*rxon));
+ D_RADIO("u16 channel: 0x%x\n", le16_to_cpu(rxon->channel));
+ D_RADIO("u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags));
+ D_RADIO("u32 filter_flags: 0x%08x\n", le32_to_cpu(rxon->filter_flags));
+ D_RADIO("u8 dev_type: 0x%x\n", rxon->dev_type);
+ D_RADIO("u8 ofdm_basic_rates: 0x%02x\n", rxon->ofdm_basic_rates);
+ D_RADIO("u8 cck_basic_rates: 0x%02x\n", rxon->cck_basic_rates);
+ D_RADIO("u8[6] node_addr: %pM\n", rxon->node_addr);
+ D_RADIO("u8[6] bssid_addr: %pM\n", rxon->bssid_addr);
+ D_RADIO("u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id));
+}
+EXPORT_SYMBOL(il_print_rx_config_cmd);
+#endif
+/**
+ * il_irq_handle_error - called for HW or SW error interrupt from card
+ */
+void
+il_irq_handle_error(struct il_priv *il)
+{
+ /* Set the FW error flag -- cleared on il_down */
+ set_bit(S_FW_ERROR, &il->status);
+
+ /* Cancel currently queued command. */
+ clear_bit(S_HCMD_ACTIVE, &il->status);
+
+ IL_ERR("Loaded firmware version: %s\n", il->hw->wiphy->fw_version);
+
+ il->cfg->ops->lib->dump_nic_error_log(il);
+ if (il->cfg->ops->lib->dump_fh)
+ il->cfg->ops->lib->dump_fh(il, NULL, false);
+#ifdef CONFIG_IWLEGACY_DEBUG
+ if (il_get_debug_level(il) & IL_DL_FW_ERRORS)
+ il_print_rx_config_cmd(il, &il->ctx);
+#endif
+
+ wake_up(&il->wait_command_queue);
+
+ /* Keep the restart process from trying to send host
+ * commands by clearing the INIT status bit */
+ clear_bit(S_READY, &il->status);
+
+ if (!test_bit(S_EXIT_PENDING, &il->status)) {
+ IL_DBG(IL_DL_FW_ERRORS,
+ "Restarting adapter due to uCode error.\n");
+
+ if (il->cfg->mod_params->restart_fw)
+ queue_work(il->workqueue, &il->restart);
+ }
+}
+EXPORT_SYMBOL(il_irq_handle_error);
+
+static int
+il_apm_stop_master(struct il_priv *il)
+{
+ int ret = 0;
+
+ /* stop device's busmaster DMA activity */
+ il_set_bit(il, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
+
+ ret =
+ _il_poll_bit(il, CSR_RESET, CSR_RESET_REG_FLAG_MASTER_DISABLED,
+ CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
+ if (ret)
+ IL_WARN("Master Disable Timed Out, 100 usec\n");
+
+ D_INFO("stop master\n");
+
+ return ret;
+}
+
+void
+il_apm_stop(struct il_priv *il)
+{
+ D_INFO("Stop card, put in low power state\n");
+
+ /* Stop device's DMA activity */
+ il_apm_stop_master(il);
+
+ /* Reset the entire device */
+ il_set_bit(il, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
+
+ udelay(10);
+
+ /*
+ * Clear "initialization complete" bit to move adapter from
+ * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
+ */
+ il_clear_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+}
+EXPORT_SYMBOL(il_apm_stop);
+
+/*
+ * Start up NIC's basic functionality after it has been reset
+ * (e.g. after platform boot, or shutdown via il_apm_stop())
+ * NOTE: This does not load uCode nor start the embedded processor
+ */
+int
+il_apm_init(struct il_priv *il)
+{
+ int ret = 0;
+ u16 lctl;
+
+ D_INFO("Init card's basic functions\n");
+
+ /*
+ * Use "set_bit" below rather than "write", to preserve any hardware
+ * bits already set by default after reset.
+ */
+
+ /* Disable L0S exit timer (platform NMI Work/Around) */
+ il_set_bit(il, CSR_GIO_CHICKEN_BITS,
+ CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
+
+ /*
+ * Disable L0s without affecting L1;
+ * don't wait for ICH L0s (ICH bug W/A)
+ */
+ il_set_bit(il, CSR_GIO_CHICKEN_BITS,
+ CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
+
+ /* Set FH wait threshold to maximum (HW error during stress W/A) */
+ il_set_bit(il, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
+
+ /*
+ * Enable HAP INTA (interrupt from management bus) to
+ * wake device's PCI Express link L1a -> L0s
+ * NOTE: This is no-op for 3945 (non-existent bit)
+ */
+ il_set_bit(il, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
+
+ /*
+ * HW bug W/A for instability in PCIe bus L0->L0S->L1 transition.
+ * Check if BIOS (or OS) enabled L1-ASPM on this device.
+ * If so (likely), disable L0S, so device moves directly L0->L1;
+ * costs negligible amount of power savings.
+ * If not (unlikely), enable L0S, so there is at least some
+ * power savings, even without L1.
+ */
+ if (il->cfg->base_params->set_l0s) {
+ lctl = il_pcie_link_ctl(il);
+ if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) ==
+ PCI_CFG_LINK_CTRL_VAL_L1_EN) {
+ /* L1-ASPM enabled; disable(!) L0S */
+ il_set_bit(il, CSR_GIO_REG,
+ CSR_GIO_REG_VAL_L0S_ENABLED);
+ D_POWER("L1 Enabled; Disabling L0S\n");
+ } else {
+ /* L1-ASPM disabled; enable(!) L0S */
+ il_clear_bit(il, CSR_GIO_REG,
+ CSR_GIO_REG_VAL_L0S_ENABLED);
+ D_POWER("L1 Disabled; Enabling L0S\n");
+ }
+ }
+
+ /* Configure analog phase-lock-loop before activating to D0A */
+ if (il->cfg->base_params->pll_cfg_val)
+ il_set_bit(il, CSR_ANA_PLL_CFG,
+ il->cfg->base_params->pll_cfg_val);
+
+ /*
+ * Set "initialization complete" bit to move adapter from
+ * D0U* --> D0A* (powered-up active) state.
+ */
+ il_set_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+
+ /*
+ * Wait for clock stabilization; once stabilized, access to
+ * device-internal resources is supported, e.g. il_wr_prph()
+ * and accesses to uCode SRAM.
+ */
+ ret =
+ _il_poll_bit(il, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
+ CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
+ if (ret < 0) {
+ D_INFO("Failed to init the card\n");
+ goto out;
+ }
+
+ /*
+ * Enable DMA and BSM (if used) clocks, wait for them to stabilize.
+ * BSM (Boostrap State Machine) is only in 3945 and 4965.
+ *
+ * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
+ * do not disable clocks. This preserves any hardware bits already
+ * set by default in "CLK_CTRL_REG" after reset.
+ */
+ if (il->cfg->base_params->use_bsm)
+ il_wr_prph(il, APMG_CLK_EN_REG,
+ APMG_CLK_VAL_DMA_CLK_RQT | APMG_CLK_VAL_BSM_CLK_RQT);
+ else
+ il_wr_prph(il, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT);
+ udelay(20);
+
+ /* Disable L1-Active */
+ il_set_bits_prph(il, APMG_PCIDEV_STT_REG,
+ APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
+
+out:
+ return ret;
+}
+EXPORT_SYMBOL(il_apm_init);
+
+int
+il_set_tx_power(struct il_priv *il, s8 tx_power, bool force)
+{
+ int ret;
+ s8 prev_tx_power;
+ bool defer;
+ struct il_rxon_context *ctx = &il->ctx;
+
+ lockdep_assert_held(&il->mutex);
+
+ if (il->tx_power_user_lmt == tx_power && !force)
+ return 0;
+
+ if (!il->cfg->ops->lib->send_tx_power)
+ return -EOPNOTSUPP;
+
+ /* 0 dBm mean 1 milliwatt */
+ if (tx_power < 0) {
+ IL_WARN("Requested user TXPOWER %d below 1 mW.\n", tx_power);
+ return -EINVAL;
+ }
+
+ if (tx_power > il->tx_power_device_lmt) {
+ IL_WARN("Requested user TXPOWER %d above upper limit %d.\n",
+ tx_power, il->tx_power_device_lmt);
+ return -EINVAL;
+ }
+
+ if (!il_is_ready_rf(il))
+ return -EIO;
+
+ /* scan complete and commit_rxon use tx_power_next value,
+ * it always need to be updated for newest request */
+ il->tx_power_next = tx_power;
+
+ /* do not set tx power when scanning or channel changing */
+ defer = test_bit(S_SCANNING, &il->status) ||
+ memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging));
+ if (defer && !force) {
+ D_INFO("Deferring tx power set\n");
+ return 0;
+ }
+
+ prev_tx_power = il->tx_power_user_lmt;
+ il->tx_power_user_lmt = tx_power;
+
+ ret = il->cfg->ops->lib->send_tx_power(il);
+
+ /* if fail to set tx_power, restore the orig. tx power */
+ if (ret) {
+ il->tx_power_user_lmt = prev_tx_power;
+ il->tx_power_next = prev_tx_power;
+ }
+ return ret;
+}
+EXPORT_SYMBOL(il_set_tx_power);
+
+void
+il_send_bt_config(struct il_priv *il)
+{
+ struct il_bt_cmd bt_cmd = {
+ .lead_time = BT_LEAD_TIME_DEF,
+ .max_kill = BT_MAX_KILL_DEF,
+ .kill_ack_mask = 0,
+ .kill_cts_mask = 0,
+ };
+
+ if (!bt_coex_active)
+ bt_cmd.flags = BT_COEX_DISABLE;
+ else
+ bt_cmd.flags = BT_COEX_ENABLE;
+
+ D_INFO("BT coex %s\n",
+ (bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active");
+
+ if (il_send_cmd_pdu(il, C_BT_CONFIG, sizeof(struct il_bt_cmd), &bt_cmd))
+ IL_ERR("failed to send BT Coex Config\n");
+}
+EXPORT_SYMBOL(il_send_bt_config);
+
+int
+il_send_stats_request(struct il_priv *il, u8 flags, bool clear)
+{
+ struct il_stats_cmd stats_cmd = {
+ .configuration_flags = clear ? IL_STATS_CONF_CLEAR_STATS : 0,
+ };
+
+ if (flags & CMD_ASYNC)
+ return il_send_cmd_pdu_async(il, C_STATS, sizeof(struct il_stats_cmd),
+ &stats_cmd, NULL);
+ else
+ return il_send_cmd_pdu(il, C_STATS, sizeof(struct il_stats_cmd),
+ &stats_cmd);
+}
+EXPORT_SYMBOL(il_send_stats_request);
+
+void
+il_hdl_pm_sleep(struct il_priv *il, struct il_rx_buf *rxb)
+{
+#ifdef CONFIG_IWLEGACY_DEBUG
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ struct il_sleep_notification *sleep = &(pkt->u.sleep_notif);
+ D_RX("sleep mode: %d, src: %d\n",
+ sleep->pm_sleep_mode, sleep->pm_wakeup_src);
+#endif
+}
+EXPORT_SYMBOL(il_hdl_pm_sleep);
+
+void
+il_hdl_pm_debug_stats(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ u32 len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK;
+ D_RADIO("Dumping %d bytes of unhandled notification for %s:\n", len,
+ il_get_cmd_string(pkt->hdr.cmd));
+ il_print_hex_dump(il, IL_DL_RADIO, pkt->u.raw, len);
+}
+EXPORT_SYMBOL(il_hdl_pm_debug_stats);
+
+void
+il_hdl_error(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+
+ IL_ERR("Error Reply type 0x%08X cmd %s (0x%02X) "
+ "seq 0x%04X ser 0x%08X\n",
+ le32_to_cpu(pkt->u.err_resp.error_type),
+ il_get_cmd_string(pkt->u.err_resp.cmd_id),
+ pkt->u.err_resp.cmd_id,
+ le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num),
+ le32_to_cpu(pkt->u.err_resp.error_info));
+}
+EXPORT_SYMBOL(il_hdl_error);
+
+void
+il_clear_isr_stats(struct il_priv *il)
+{
+ memset(&il->isr_stats, 0, sizeof(il->isr_stats));
+}
+
+int
+il_mac_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
+ const struct ieee80211_tx_queue_params *params)
+{
+ struct il_priv *il = hw->priv;
+ unsigned long flags;
+ int q;
+
+ D_MAC80211("enter\n");
+
+ if (!il_is_ready_rf(il)) {
+ D_MAC80211("leave - RF not ready\n");
+ return -EIO;
+ }
+
+ if (queue >= AC_NUM) {
+ D_MAC80211("leave - queue >= AC_NUM %d\n", queue);
+ return 0;
+ }
+
+ q = AC_NUM - 1 - queue;
+
+ spin_lock_irqsave(&il->lock, flags);
+
+ il->ctx.qos_data.def_qos_parm.ac[q].cw_min =
+ cpu_to_le16(params->cw_min);
+ il->ctx.qos_data.def_qos_parm.ac[q].cw_max =
+ cpu_to_le16(params->cw_max);
+ il->ctx.qos_data.def_qos_parm.ac[q].aifsn = params->aifs;
+ il->ctx.qos_data.def_qos_parm.ac[q].edca_txop =
+ cpu_to_le16((params->txop * 32));
+
+ il->ctx.qos_data.def_qos_parm.ac[q].reserved1 = 0;
+
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ D_MAC80211("leave\n");
+ return 0;
+}
+EXPORT_SYMBOL(il_mac_conf_tx);
+
+int
+il_mac_tx_last_beacon(struct ieee80211_hw *hw)
+{
+ struct il_priv *il = hw->priv;
+
+ return il->ibss_manager == IL_IBSS_MANAGER;
+}
+EXPORT_SYMBOL_GPL(il_mac_tx_last_beacon);
+
+static int
+il_set_mode(struct il_priv *il, struct il_rxon_context *ctx)
+{
+ il_connection_init_rx_config(il, ctx);
+
+ if (il->cfg->ops->hcmd->set_rxon_chain)
+ il->cfg->ops->hcmd->set_rxon_chain(il, ctx);
+
+ return il_commit_rxon(il, ctx);
+}
+
+static int
+il_setup_interface(struct il_priv *il, struct il_rxon_context *ctx)
+{
+ struct ieee80211_vif *vif = ctx->vif;
+ int err;
+
+ lockdep_assert_held(&il->mutex);
+
+ /*
+ * This variable will be correct only when there's just
+ * a single context, but all code using it is for hardware
+ * that supports only one context.
+ */
+ il->iw_mode = vif->type;
+
+ ctx->is_active = true;
+
+ err = il_set_mode(il, ctx);
+ if (err) {
+ if (!ctx->always_active)
+ ctx->is_active = false;
+ return err;
+ }
+
+ return 0;
+}
+
+int
+il_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct il_priv *il = hw->priv;
+ struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
+ int err;
+ u32 modes;
+
+ D_MAC80211("enter: type %d, addr %pM\n", vif->type, vif->addr);
+
+ mutex_lock(&il->mutex);
+
+ if (!il_is_ready_rf(il)) {
+ IL_WARN("Try to add interface when device not ready\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ /* check if busy context is exclusive */
+ if (il->ctx.vif &&
+ (il->ctx.exclusive_interface_modes & BIT(il->ctx.vif->type))) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ modes = il->ctx.interface_modes | il->ctx.exclusive_interface_modes;
+ if (!(modes & BIT(vif->type))) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ vif_priv->ctx = &il->ctx;
+ il->ctx.vif = vif;
+
+ err = il_setup_interface(il, &il->ctx);
+ if (err) {
+ il->ctx.vif = NULL;
+ il->iw_mode = NL80211_IFTYPE_STATION;
+ }
+
+out:
+ mutex_unlock(&il->mutex);
+
+ D_MAC80211("leave\n");
+ return err;
+}
+EXPORT_SYMBOL(il_mac_add_interface);
+
+static void
+il_teardown_interface(struct il_priv *il, struct ieee80211_vif *vif,
+ bool mode_change)
+{
+ struct il_rxon_context *ctx = il_rxon_ctx_from_vif(vif);
+
+ lockdep_assert_held(&il->mutex);
+
+ if (il->scan_vif == vif) {
+ il_scan_cancel_timeout(il, 200);
+ il_force_scan_end(il);
+ }
+
+ if (!mode_change) {
+ il_set_mode(il, ctx);
+ if (!ctx->always_active)
+ ctx->is_active = false;
+ }
+}
+
+void
+il_mac_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct il_priv *il = hw->priv;
+ struct il_rxon_context *ctx = il_rxon_ctx_from_vif(vif);
+
+ D_MAC80211("enter\n");
+
+ mutex_lock(&il->mutex);
+
+ WARN_ON(ctx->vif != vif);
+ ctx->vif = NULL;
+
+ il_teardown_interface(il, vif, false);
+
+ memset(il->bssid, 0, ETH_ALEN);
+ mutex_unlock(&il->mutex);
+
+ D_MAC80211("leave\n");
+
+}
+EXPORT_SYMBOL(il_mac_remove_interface);
+
+int
+il_alloc_txq_mem(struct il_priv *il)
+{
+ if (!il->txq)
+ il->txq =
+ kzalloc(sizeof(struct il_tx_queue) *
+ il->cfg->base_params->num_of_queues, GFP_KERNEL);
+ if (!il->txq) {
+ IL_ERR("Not enough memory for txq\n");
+ return -ENOMEM;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(il_alloc_txq_mem);
+
+void
+il_txq_mem(struct il_priv *il)
+{
+ kfree(il->txq);
+ il->txq = NULL;
+}
+EXPORT_SYMBOL(il_txq_mem);
+
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+
+#define IL_TRAFFIC_DUMP_SIZE (IL_TRAFFIC_ENTRY_SIZE * IL_TRAFFIC_ENTRIES)
+
+void
+il_reset_traffic_log(struct il_priv *il)
+{
+ il->tx_traffic_idx = 0;
+ il->rx_traffic_idx = 0;
+ if (il->tx_traffic)
+ memset(il->tx_traffic, 0, IL_TRAFFIC_DUMP_SIZE);
+ if (il->rx_traffic)
+ memset(il->rx_traffic, 0, IL_TRAFFIC_DUMP_SIZE);
+}
+
+int
+il_alloc_traffic_mem(struct il_priv *il)
+{
+ u32 traffic_size = IL_TRAFFIC_DUMP_SIZE;
+
+ if (il_debug_level & IL_DL_TX) {
+ if (!il->tx_traffic) {
+ il->tx_traffic = kzalloc(traffic_size, GFP_KERNEL);
+ if (!il->tx_traffic)
+ return -ENOMEM;
+ }
+ }
+ if (il_debug_level & IL_DL_RX) {
+ if (!il->rx_traffic) {
+ il->rx_traffic = kzalloc(traffic_size, GFP_KERNEL);
+ if (!il->rx_traffic)
+ return -ENOMEM;
+ }
+ }
+ il_reset_traffic_log(il);
+ return 0;
+}
+EXPORT_SYMBOL(il_alloc_traffic_mem);
+
+void
+il_free_traffic_mem(struct il_priv *il)
+{
+ kfree(il->tx_traffic);
+ il->tx_traffic = NULL;
+
+ kfree(il->rx_traffic);
+ il->rx_traffic = NULL;
+}
+EXPORT_SYMBOL(il_free_traffic_mem);
+
+void
+il_dbg_log_tx_data_frame(struct il_priv *il, u16 length,
+ struct ieee80211_hdr *header)
+{
+ __le16 fc;
+ u16 len;
+
+ if (likely(!(il_debug_level & IL_DL_TX)))
+ return;
+
+ if (!il->tx_traffic)
+ return;
+
+ fc = header->frame_control;
+ if (ieee80211_is_data(fc)) {
+ len =
+ (length >
+ IL_TRAFFIC_ENTRY_SIZE) ? IL_TRAFFIC_ENTRY_SIZE : length;
+ memcpy((il->tx_traffic +
+ (il->tx_traffic_idx * IL_TRAFFIC_ENTRY_SIZE)), header,
+ len);
+ il->tx_traffic_idx =
+ (il->tx_traffic_idx + 1) % IL_TRAFFIC_ENTRIES;
+ }
+}
+EXPORT_SYMBOL(il_dbg_log_tx_data_frame);
+
+void
+il_dbg_log_rx_data_frame(struct il_priv *il, u16 length,
+ struct ieee80211_hdr *header)
+{
+ __le16 fc;
+ u16 len;
+
+ if (likely(!(il_debug_level & IL_DL_RX)))
+ return;
+
+ if (!il->rx_traffic)
+ return;
+
+ fc = header->frame_control;
+ if (ieee80211_is_data(fc)) {
+ len =
+ (length >
+ IL_TRAFFIC_ENTRY_SIZE) ? IL_TRAFFIC_ENTRY_SIZE : length;
+ memcpy((il->rx_traffic +
+ (il->rx_traffic_idx * IL_TRAFFIC_ENTRY_SIZE)), header,
+ len);
+ il->rx_traffic_idx =
+ (il->rx_traffic_idx + 1) % IL_TRAFFIC_ENTRIES;
+ }
+}
+EXPORT_SYMBOL(il_dbg_log_rx_data_frame);
+
+const char *
+il_get_mgmt_string(int cmd)
+{
+ switch (cmd) {
+ IL_CMD(MANAGEMENT_ASSOC_REQ);
+ IL_CMD(MANAGEMENT_ASSOC_RESP);
+ IL_CMD(MANAGEMENT_REASSOC_REQ);
+ IL_CMD(MANAGEMENT_REASSOC_RESP);
+ IL_CMD(MANAGEMENT_PROBE_REQ);
+ IL_CMD(MANAGEMENT_PROBE_RESP);
+ IL_CMD(MANAGEMENT_BEACON);
+ IL_CMD(MANAGEMENT_ATIM);
+ IL_CMD(MANAGEMENT_DISASSOC);
+ IL_CMD(MANAGEMENT_AUTH);
+ IL_CMD(MANAGEMENT_DEAUTH);
+ IL_CMD(MANAGEMENT_ACTION);
+ default:
+ return "UNKNOWN";
+
+ }
+}
+
+const char *
+il_get_ctrl_string(int cmd)
+{
+ switch (cmd) {
+ IL_CMD(CONTROL_BACK_REQ);
+ IL_CMD(CONTROL_BACK);
+ IL_CMD(CONTROL_PSPOLL);
+ IL_CMD(CONTROL_RTS);
+ IL_CMD(CONTROL_CTS);
+ IL_CMD(CONTROL_ACK);
+ IL_CMD(CONTROL_CFEND);
+ IL_CMD(CONTROL_CFENDACK);
+ default:
+ return "UNKNOWN";
+
+ }
+}
+
+void
+il_clear_traffic_stats(struct il_priv *il)
+{
+ memset(&il->tx_stats, 0, sizeof(struct traffic_stats));
+ memset(&il->rx_stats, 0, sizeof(struct traffic_stats));
+}
+
+/*
+ * if CONFIG_IWLEGACY_DEBUGFS defined,
+ * il_update_stats function will
+ * record all the MGMT, CTRL and DATA pkt for both TX and Rx pass
+ * Use debugFs to display the rx/rx_stats
+ * if CONFIG_IWLEGACY_DEBUGFS not being defined, then no MGMT and CTRL
+ * information will be recorded, but DATA pkt still will be recorded
+ * for the reason of il_led.c need to control the led blinking based on
+ * number of tx and rx data.
+ *
+ */
+void
+il_update_stats(struct il_priv *il, bool is_tx, __le16 fc, u16 len)
+{
+ struct traffic_stats *stats;
+
+ if (is_tx)
+ stats = &il->tx_stats;
+ else
+ stats = &il->rx_stats;
+
+ if (ieee80211_is_mgmt(fc)) {
+ switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
+ case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
+ stats->mgmt[MANAGEMENT_ASSOC_REQ]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP):
+ stats->mgmt[MANAGEMENT_ASSOC_RESP]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
+ stats->mgmt[MANAGEMENT_REASSOC_REQ]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP):
+ stats->mgmt[MANAGEMENT_REASSOC_RESP]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ):
+ stats->mgmt[MANAGEMENT_PROBE_REQ]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP):
+ stats->mgmt[MANAGEMENT_PROBE_RESP]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_BEACON):
+ stats->mgmt[MANAGEMENT_BEACON]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_ATIM):
+ stats->mgmt[MANAGEMENT_ATIM]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_DISASSOC):
+ stats->mgmt[MANAGEMENT_DISASSOC]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_AUTH):
+ stats->mgmt[MANAGEMENT_AUTH]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
+ stats->mgmt[MANAGEMENT_DEAUTH]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_ACTION):
+ stats->mgmt[MANAGEMENT_ACTION]++;
+ break;
+ }
+ } else if (ieee80211_is_ctl(fc)) {
+ switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
+ case cpu_to_le16(IEEE80211_STYPE_BACK_REQ):
+ stats->ctrl[CONTROL_BACK_REQ]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_BACK):
+ stats->ctrl[CONTROL_BACK]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_PSPOLL):
+ stats->ctrl[CONTROL_PSPOLL]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_RTS):
+ stats->ctrl[CONTROL_RTS]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_CTS):
+ stats->ctrl[CONTROL_CTS]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_ACK):
+ stats->ctrl[CONTROL_ACK]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_CFEND):
+ stats->ctrl[CONTROL_CFEND]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_CFENDACK):
+ stats->ctrl[CONTROL_CFENDACK]++;
+ break;
+ }
+ } else {
+ /* data */
+ stats->data_cnt++;
+ stats->data_bytes += len;
+ }
+}
+EXPORT_SYMBOL(il_update_stats);
+#endif
+
+int
+il_force_reset(struct il_priv *il, bool external)
+{
+ struct il_force_reset *force_reset;
+
+ if (test_bit(S_EXIT_PENDING, &il->status))
+ return -EINVAL;
+
+ force_reset = &il->force_reset;
+ force_reset->reset_request_count++;
+ if (!external) {
+ if (force_reset->last_force_reset_jiffies &&
+ time_after(force_reset->last_force_reset_jiffies +
+ force_reset->reset_duration, jiffies)) {
+ D_INFO("force reset rejected\n");
+ force_reset->reset_reject_count++;
+ return -EAGAIN;
+ }
+ }
+ force_reset->reset_success_count++;
+ force_reset->last_force_reset_jiffies = jiffies;
+
+ /*
+ * if the request is from external(ex: debugfs),
+ * then always perform the request in regardless the module
+ * parameter setting
+ * if the request is from internal (uCode error or driver
+ * detect failure), then fw_restart module parameter
+ * need to be check before performing firmware reload
+ */
+
+ if (!external && !il->cfg->mod_params->restart_fw) {
+ D_INFO("Cancel firmware reload based on "
+ "module parameter setting\n");
+ return 0;
+ }
+
+ IL_ERR("On demand firmware reload\n");
+
+ /* Set the FW error flag -- cleared on il_down */
+ set_bit(S_FW_ERROR, &il->status);
+ wake_up(&il->wait_command_queue);
+ /*
+ * Keep the restart process from trying to send host
+ * commands by clearing the INIT status bit
+ */
+ clear_bit(S_READY, &il->status);
+ queue_work(il->workqueue, &il->restart);
+
+ return 0;
+}
+
+int
+il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ enum nl80211_iftype newtype, bool newp2p)
+{
+ struct il_priv *il = hw->priv;
+ struct il_rxon_context *ctx = il_rxon_ctx_from_vif(vif);
+ u32 modes;
+ int err;
+
+ newtype = ieee80211_iftype_p2p(newtype, newp2p);
+
+ mutex_lock(&il->mutex);
+
+ if (!ctx->vif || !il_is_ready_rf(il)) {
+ /*
+ * Huh? But wait ... this can maybe happen when
+ * we're in the middle of a firmware restart!
+ */
+ err = -EBUSY;
+ goto out;
+ }
+
+ modes = ctx->interface_modes | ctx->exclusive_interface_modes;
+ if (!(modes & BIT(newtype))) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ if ((il->ctx.exclusive_interface_modes & BIT(il->ctx.vif->type)) ||
+ (il->ctx.exclusive_interface_modes & BIT(newtype))) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ /* success */
+ il_teardown_interface(il, vif, true);
+ vif->type = newtype;
+ vif->p2p = newp2p;
+ err = il_setup_interface(il, ctx);
+ WARN_ON(err);
+ /*
+ * We've switched internally, but submitting to the
+ * device may have failed for some reason. Mask this
+ * error, because otherwise mac80211 will not switch
+ * (and set the interface type back) and we'll be
+ * out of sync with it.
+ */
+ err = 0;
+
+out:
+ mutex_unlock(&il->mutex);
+ return err;
+}
+EXPORT_SYMBOL(il_mac_change_interface);
+
+/*
+ * On every watchdog tick we check (latest) time stamp. If it does not
+ * change during timeout period and queue is not empty we reset firmware.
+ */
+static int
+il_check_stuck_queue(struct il_priv *il, int cnt)
+{
+ struct il_tx_queue *txq = &il->txq[cnt];
+ struct il_queue *q = &txq->q;
+ unsigned long timeout;
+ int ret;
+
+ if (q->read_ptr == q->write_ptr) {
+ txq->time_stamp = jiffies;
+ return 0;
+ }
+
+ timeout =
+ txq->time_stamp +
+ msecs_to_jiffies(il->cfg->base_params->wd_timeout);
+
+ if (time_after(jiffies, timeout)) {
+ IL_ERR("Queue %d stuck for %u ms.\n", q->id,
+ il->cfg->base_params->wd_timeout);
+ ret = il_force_reset(il, false);
+ return (ret == -EAGAIN) ? 0 : 1;
+ }
+
+ return 0;
+}
+
+/*
+ * Making watchdog tick be a quarter of timeout assure we will
+ * discover the queue hung between timeout and 1.25*timeout
+ */
+#define IL_WD_TICK(timeout) ((timeout) / 4)
+
+/*
+ * Watchdog timer callback, we check each tx queue for stuck, if if hung
+ * we reset the firmware. If everything is fine just rearm the timer.
+ */
+void
+il_bg_watchdog(unsigned long data)
+{
+ struct il_priv *il = (struct il_priv *)data;
+ int cnt;
+ unsigned long timeout;
+
+ if (test_bit(S_EXIT_PENDING, &il->status))
+ return;
+
+ timeout = il->cfg->base_params->wd_timeout;
+ if (timeout == 0)
+ return;
+
+ /* monitor and check for stuck cmd queue */
+ if (il_check_stuck_queue(il, il->cmd_queue))
+ return;
+
+ /* monitor and check for other stuck queues */
+ if (il_is_any_associated(il)) {
+ for (cnt = 0; cnt < il->hw_params.max_txq_num; cnt++) {
+ /* skip as we already checked the command queue */
+ if (cnt == il->cmd_queue)
+ continue;
+ if (il_check_stuck_queue(il, cnt))
+ return;
+ }
+ }
+
+ mod_timer(&il->watchdog,
+ jiffies + msecs_to_jiffies(IL_WD_TICK(timeout)));
+}
+EXPORT_SYMBOL(il_bg_watchdog);
+
+void
+il_setup_watchdog(struct il_priv *il)
+{
+ unsigned int timeout = il->cfg->base_params->wd_timeout;
+
+ if (timeout)
+ mod_timer(&il->watchdog,
+ jiffies + msecs_to_jiffies(IL_WD_TICK(timeout)));
+ else
+ del_timer(&il->watchdog);
+}
+EXPORT_SYMBOL(il_setup_watchdog);
+
+/*
+ * extended beacon time format
+ * time in usec will be changed into a 32-bit value in extended:internal format
+ * the extended part is the beacon counts
+ * the internal part is the time in usec within one beacon interval
+ */
+u32
+il_usecs_to_beacons(struct il_priv *il, u32 usec, u32 beacon_interval)
+{
+ u32 quot;
+ u32 rem;
+ u32 interval = beacon_interval * TIME_UNIT;
+
+ if (!interval || !usec)
+ return 0;
+
+ quot =
+ (usec /
+ interval) & (il_beacon_time_mask_high(il,
+ il->hw_params.
+ beacon_time_tsf_bits) >> il->
+ hw_params.beacon_time_tsf_bits);
+ rem =
+ (usec % interval) & il_beacon_time_mask_low(il,
+ il->hw_params.
+ beacon_time_tsf_bits);
+
+ return (quot << il->hw_params.beacon_time_tsf_bits) + rem;
+}
+EXPORT_SYMBOL(il_usecs_to_beacons);
+
+/* base is usually what we get from ucode with each received frame,
+ * the same as HW timer counter counting down
+ */
+__le32
+il_add_beacon_time(struct il_priv *il, u32 base, u32 addon,
+ u32 beacon_interval)
+{
+ u32 base_low = base & il_beacon_time_mask_low(il,
+ il->hw_params.
+ beacon_time_tsf_bits);
+ u32 addon_low = addon & il_beacon_time_mask_low(il,
+ il->hw_params.
+ beacon_time_tsf_bits);
+ u32 interval = beacon_interval * TIME_UNIT;
+ u32 res = (base & il_beacon_time_mask_high(il,
+ il->hw_params.
+ beacon_time_tsf_bits)) +
+ (addon & il_beacon_time_mask_high(il,
+ il->hw_params.
+ beacon_time_tsf_bits));
+
+ if (base_low > addon_low)
+ res += base_low - addon_low;
+ else if (base_low < addon_low) {
+ res += interval + base_low - addon_low;
+ res += (1 << il->hw_params.beacon_time_tsf_bits);
+ } else
+ res += (1 << il->hw_params.beacon_time_tsf_bits);
+
+ return cpu_to_le32(res);
+}
+EXPORT_SYMBOL(il_add_beacon_time);
+
+#ifdef CONFIG_PM
+
+int
+il_pci_suspend(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct il_priv *il = pci_get_drvdata(pdev);
+
+ /*
+ * This function is called when system goes into suspend state
+ * mac80211 will call il_mac_stop() from the mac80211 suspend function
+ * first but since il_mac_stop() has no knowledge of who the caller is,
+ * it will not call apm_ops.stop() to stop the DMA operation.
+ * Calling apm_ops.stop here to make sure we stop the DMA.
+ */
+ il_apm_stop(il);
+
+ return 0;
+}
+EXPORT_SYMBOL(il_pci_suspend);
+
+int
+il_pci_resume(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct il_priv *il = pci_get_drvdata(pdev);
+ bool hw_rfkill = false;
+
+ /*
+ * We disable the RETRY_TIMEOUT register (0x41) to keep
+ * PCI Tx retries from interfering with C3 CPU state.
+ */
+ pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
+
+ il_enable_interrupts(il);
+
+ if (!(_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
+ hw_rfkill = true;
+
+ if (hw_rfkill)
+ set_bit(S_RF_KILL_HW, &il->status);
+ else
+ clear_bit(S_RF_KILL_HW, &il->status);
+
+ wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rfkill);
+
+ return 0;
+}
+EXPORT_SYMBOL(il_pci_resume);
+
+const struct dev_pm_ops il_pm_ops = {
+ .suspend = il_pci_suspend,
+ .resume = il_pci_resume,
+ .freeze = il_pci_suspend,
+ .thaw = il_pci_resume,
+ .poweroff = il_pci_suspend,
+ .restore = il_pci_resume,
+};
+EXPORT_SYMBOL(il_pm_ops);
+
+#endif /* CONFIG_PM */
+
+static void
+il_update_qos(struct il_priv *il, struct il_rxon_context *ctx)
+{
+ if (test_bit(S_EXIT_PENDING, &il->status))
+ return;
+
+ if (!ctx->is_active)
+ return;
+
+ ctx->qos_data.def_qos_parm.qos_flags = 0;
+
+ if (ctx->qos_data.qos_active)
+ ctx->qos_data.def_qos_parm.qos_flags |=
+ QOS_PARAM_FLG_UPDATE_EDCA_MSK;
+
+ if (ctx->ht.enabled)
+ ctx->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
+
+ D_QOS("send QoS cmd with Qos active=%d FLAGS=0x%X\n",
+ ctx->qos_data.qos_active, ctx->qos_data.def_qos_parm.qos_flags);
+
+ il_send_cmd_pdu_async(il, ctx->qos_cmd, sizeof(struct il_qosparam_cmd),
+ &ctx->qos_data.def_qos_parm, NULL);
+}
+
+/**
+ * il_mac_config - mac80211 config callback
+ */
+int
+il_mac_config(struct ieee80211_hw *hw, u32 changed)
+{
+ struct il_priv *il = hw->priv;
+ const struct il_channel_info *ch_info;
+ struct ieee80211_conf *conf = &hw->conf;
+ struct ieee80211_channel *channel = conf->channel;
+ struct il_ht_config *ht_conf = &il->current_ht_config;
+ struct il_rxon_context *ctx = &il->ctx;
+ unsigned long flags = 0;
+ int ret = 0;
+ u16 ch;
+ int scan_active = 0;
+ bool ht_changed = false;
+
+ if (WARN_ON(!il->cfg->ops->legacy))
+ return -EOPNOTSUPP;
+
+ mutex_lock(&il->mutex);
+
+ D_MAC80211("enter to channel %d changed 0x%X\n", channel->hw_value,
+ changed);
+
+ if (unlikely(test_bit(S_SCANNING, &il->status))) {
+ scan_active = 1;
+ D_MAC80211("scan active\n");
+ }
+
+ if (changed &
+ (IEEE80211_CONF_CHANGE_SMPS | IEEE80211_CONF_CHANGE_CHANNEL)) {
+ /* mac80211 uses static for non-HT which is what we want */
+ il->current_ht_config.smps = conf->smps_mode;
+
+ /*
+ * Recalculate chain counts.
+ *
+ * If monitor mode is enabled then mac80211 will
+ * set up the SM PS mode to OFF if an HT channel is
+ * configured.
+ */
+ if (il->cfg->ops->hcmd->set_rxon_chain)
+ il->cfg->ops->hcmd->set_rxon_chain(il, &il->ctx);
+ }
+
+ /* during scanning mac80211 will delay channel setting until
+ * scan finish with changed = 0
+ */
+ if (!changed || (changed & IEEE80211_CONF_CHANGE_CHANNEL)) {
+
+ if (scan_active)
+ goto set_ch_out;
+
+ ch = channel->hw_value;
+ ch_info = il_get_channel_info(il, channel->band, ch);
+ if (!il_is_channel_valid(ch_info)) {
+ D_MAC80211("leave - invalid channel\n");
+ ret = -EINVAL;
+ goto set_ch_out;
+ }
+
+ if (il->iw_mode == NL80211_IFTYPE_ADHOC &&
+ !il_is_channel_ibss(ch_info)) {
+ D_MAC80211("leave - not IBSS channel\n");
+ ret = -EINVAL;
+ goto set_ch_out;
+ }
+
+ spin_lock_irqsave(&il->lock, flags);
+
+ /* Configure HT40 channels */
+ if (ctx->ht.enabled != conf_is_ht(conf)) {
+ ctx->ht.enabled = conf_is_ht(conf);
+ ht_changed = true;
+ }
+ if (ctx->ht.enabled) {
+ if (conf_is_ht40_minus(conf)) {
+ ctx->ht.extension_chan_offset =
+ IEEE80211_HT_PARAM_CHA_SEC_BELOW;
+ ctx->ht.is_40mhz = true;
+ } else if (conf_is_ht40_plus(conf)) {
+ ctx->ht.extension_chan_offset =
+ IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
+ ctx->ht.is_40mhz = true;
+ } else {
+ ctx->ht.extension_chan_offset =
+ IEEE80211_HT_PARAM_CHA_SEC_NONE;
+ ctx->ht.is_40mhz = false;
+ }
+ } else
+ ctx->ht.is_40mhz = false;
+
+ /*
+ * Default to no protection. Protection mode will
+ * later be set from BSS config in il_ht_conf
+ */
+ ctx->ht.protection = IEEE80211_HT_OP_MODE_PROTECTION_NONE;
+
+ /* if we are switching from ht to 2.4 clear flags
+ * from any ht related info since 2.4 does not
+ * support ht */
+ if ((le16_to_cpu(ctx->staging.channel) != ch))
+ ctx->staging.flags = 0;
+
+ il_set_rxon_channel(il, channel, ctx);
+ il_set_rxon_ht(il, ht_conf);
+
+ il_set_flags_for_band(il, ctx, channel->band, ctx->vif);
+
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ if (il->cfg->ops->legacy->update_bcast_stations)
+ ret = il->cfg->ops->legacy->update_bcast_stations(il);
+
+set_ch_out:
+ /* The list of supported rates and rate mask can be different
+ * for each band; since the band may have changed, reset
+ * the rate mask to what mac80211 lists */
+ il_set_rate(il);
+ }
+
+ if (changed & (IEEE80211_CONF_CHANGE_PS | IEEE80211_CONF_CHANGE_IDLE)) {
+ ret = il_power_update_mode(il, false);
+ if (ret)
+ D_MAC80211("Error setting sleep level\n");
+ }
+
+ if (changed & IEEE80211_CONF_CHANGE_POWER) {
+ D_MAC80211("TX Power old=%d new=%d\n", il->tx_power_user_lmt,
+ conf->power_level);
+
+ il_set_tx_power(il, conf->power_level, false);
+ }
+
+ if (!il_is_ready(il)) {
+ D_MAC80211("leave - not ready\n");
+ goto out;
+ }
+
+ if (scan_active)
+ goto out;
+
+ if (memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging)))
+ il_commit_rxon(il, ctx);
+ else
+ D_INFO("Not re-sending same RXON configuration.\n");
+ if (ht_changed)
+ il_update_qos(il, ctx);
+
+out:
+ D_MAC80211("leave\n");
+ mutex_unlock(&il->mutex);
+ return ret;
+}
+EXPORT_SYMBOL(il_mac_config);
+
+void
+il_mac_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct il_priv *il = hw->priv;
+ unsigned long flags;
+ struct il_rxon_context *ctx = &il->ctx;
+
+ if (WARN_ON(!il->cfg->ops->legacy))
+ return;
+
+ mutex_lock(&il->mutex);
+ D_MAC80211("enter\n");
+
+ spin_lock_irqsave(&il->lock, flags);
+ memset(&il->current_ht_config, 0, sizeof(struct il_ht_config));
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ spin_lock_irqsave(&il->lock, flags);
+
+ /* new association get rid of ibss beacon skb */
+ if (il->beacon_skb)
+ dev_kfree_skb(il->beacon_skb);
+
+ il->beacon_skb = NULL;
+
+ il->timestamp = 0;
+
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ il_scan_cancel_timeout(il, 100);
+ if (!il_is_ready_rf(il)) {
+ D_MAC80211("leave - not ready\n");
+ mutex_unlock(&il->mutex);
+ return;
+ }
+
+ /* we are restarting association process
+ * clear RXON_FILTER_ASSOC_MSK bit
+ */
+ ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+ il_commit_rxon(il, ctx);
+
+ il_set_rate(il);
+
+ mutex_unlock(&il->mutex);
+
+ D_MAC80211("leave\n");
+}
+EXPORT_SYMBOL(il_mac_reset_tsf);
+
+static void
+il_ht_conf(struct il_priv *il, struct ieee80211_vif *vif)
+{
+ struct il_ht_config *ht_conf = &il->current_ht_config;
+ struct ieee80211_sta *sta;
+ struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
+ struct il_rxon_context *ctx = il_rxon_ctx_from_vif(vif);
+
+ D_ASSOC("enter:\n");
+
+ if (!ctx->ht.enabled)
+ return;
+
+ ctx->ht.protection =
+ bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION;
+ ctx->ht.non_gf_sta_present =
+ !!(bss_conf->
+ ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
+
+ ht_conf->single_chain_sufficient = false;
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ rcu_read_lock();
+ sta = ieee80211_find_sta(vif, bss_conf->bssid);
+ if (sta) {
+ struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+ int maxstreams;
+
+ maxstreams =
+ (ht_cap->mcs.
+ tx_params & IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK)
+ >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
+ maxstreams += 1;
+
+ if (ht_cap->mcs.rx_mask[1] == 0 &&
+ ht_cap->mcs.rx_mask[2] == 0)
+ ht_conf->single_chain_sufficient = true;
+ if (maxstreams <= 1)
+ ht_conf->single_chain_sufficient = true;
+ } else {
+ /*
+ * If at all, this can only happen through a race
+ * when the AP disconnects us while we're still
+ * setting up the connection, in that case mac80211
+ * will soon tell us about that.
+ */
+ ht_conf->single_chain_sufficient = true;
+ }
+ rcu_read_unlock();
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ ht_conf->single_chain_sufficient = true;
+ break;
+ default:
+ break;
+ }
+
+ D_ASSOC("leave\n");
+}
+
+static inline void
+il_set_no_assoc(struct il_priv *il, struct ieee80211_vif *vif)
+{
+ struct il_rxon_context *ctx = il_rxon_ctx_from_vif(vif);
+
+ /*
+ * inform the ucode that there is no longer an
+ * association and that no more packets should be
+ * sent
+ */
+ ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+ ctx->staging.assoc_id = 0;
+ il_commit_rxon(il, ctx);
+}
+
+static void
+il_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct il_priv *il = hw->priv;
+ unsigned long flags;
+ __le64 timestamp;
+ struct sk_buff *skb = ieee80211_beacon_get(hw, vif);
+
+ if (!skb)
+ return;
+
+ D_MAC80211("enter\n");
+
+ lockdep_assert_held(&il->mutex);
+
+ if (!il->beacon_ctx) {
+ IL_ERR("update beacon but no beacon context!\n");
+ dev_kfree_skb(skb);
+ return;
+ }
+
+ spin_lock_irqsave(&il->lock, flags);
+
+ if (il->beacon_skb)
+ dev_kfree_skb(il->beacon_skb);
+
+ il->beacon_skb = skb;
+
+ timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
+ il->timestamp = le64_to_cpu(timestamp);
+
+ D_MAC80211("leave\n");
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ if (!il_is_ready_rf(il)) {
+ D_MAC80211("leave - RF not ready\n");
+ return;
+ }
+
+ il->cfg->ops->legacy->post_associate(il);
+}
+
+void
+il_mac_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf, u32 changes)
+{
+ struct il_priv *il = hw->priv;
+ struct il_rxon_context *ctx = il_rxon_ctx_from_vif(vif);
+ int ret;
+
+ if (WARN_ON(!il->cfg->ops->legacy))
+ return;
+
+ D_MAC80211("changes = 0x%X\n", changes);
+
+ mutex_lock(&il->mutex);
+
+ if (!il_is_alive(il)) {
+ mutex_unlock(&il->mutex);
+ return;
+ }
+
+ if (changes & BSS_CHANGED_QOS) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&il->lock, flags);
+ ctx->qos_data.qos_active = bss_conf->qos;
+ il_update_qos(il, ctx);
+ spin_unlock_irqrestore(&il->lock, flags);
+ }
+
+ if (changes & BSS_CHANGED_BEACON_ENABLED) {
+ /*
+ * the add_interface code must make sure we only ever
+ * have a single interface that could be beaconing at
+ * any time.
+ */
+ if (vif->bss_conf.enable_beacon)
+ il->beacon_ctx = ctx;
+ else
+ il->beacon_ctx = NULL;
+ }
+
+ if (changes & BSS_CHANGED_BSSID) {
+ D_MAC80211("BSSID %pM\n", bss_conf->bssid);
+
+ /*
+ * If there is currently a HW scan going on in the
+ * background then we need to cancel it else the RXON
+ * below/in post_associate will fail.
+ */
+ if (il_scan_cancel_timeout(il, 100)) {
+ IL_WARN("Aborted scan still in progress after 100ms\n");
+ D_MAC80211("leaving - scan abort failed.\n");
+ mutex_unlock(&il->mutex);
+ return;
+ }
+
+ /* mac80211 only sets assoc when in STATION mode */
+ if (vif->type == NL80211_IFTYPE_ADHOC || bss_conf->assoc) {
+ memcpy(ctx->staging.bssid_addr, bss_conf->bssid,
+ ETH_ALEN);
+
+ /* currently needed in a few places */
+ memcpy(il->bssid, bss_conf->bssid, ETH_ALEN);
+ } else {
+ ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+ }
+
+ }
+
+ /*
+ * This needs to be after setting the BSSID in case
+ * mac80211 decides to do both changes at once because
+ * it will invoke post_associate.
+ */
+ if (vif->type == NL80211_IFTYPE_ADHOC && (changes & BSS_CHANGED_BEACON))
+ il_beacon_update(hw, vif);
+
+ if (changes & BSS_CHANGED_ERP_PREAMBLE) {
+ D_MAC80211("ERP_PREAMBLE %d\n", bss_conf->use_short_preamble);
+ if (bss_conf->use_short_preamble)
+ ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
+ else
+ ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
+ }
+
+ if (changes & BSS_CHANGED_ERP_CTS_PROT) {
+ D_MAC80211("ERP_CTS %d\n", bss_conf->use_cts_prot);
+ if (bss_conf->use_cts_prot && il->band != IEEE80211_BAND_5GHZ)
+ ctx->staging.flags |= RXON_FLG_TGG_PROTECT_MSK;
+ else
+ ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
+ if (bss_conf->use_cts_prot)
+ ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
+ else
+ ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
+ }
+
+ if (changes & BSS_CHANGED_BASIC_RATES) {
+ /* XXX use this information
+ *
+ * To do that, remove code from il_set_rate() and put something
+ * like this here:
+ *
+ if (A-band)
+ ctx->staging.ofdm_basic_rates =
+ bss_conf->basic_rates;
+ else
+ ctx->staging.ofdm_basic_rates =
+ bss_conf->basic_rates >> 4;
+ ctx->staging.cck_basic_rates =
+ bss_conf->basic_rates & 0xF;
+ */
+ }
+
+ if (changes & BSS_CHANGED_HT) {
+ il_ht_conf(il, vif);
+
+ if (il->cfg->ops->hcmd->set_rxon_chain)
+ il->cfg->ops->hcmd->set_rxon_chain(il, ctx);
+ }
+
+ if (changes & BSS_CHANGED_ASSOC) {
+ D_MAC80211("ASSOC %d\n", bss_conf->assoc);
+ if (bss_conf->assoc) {
+ il->timestamp = bss_conf->timestamp;
+
+ if (!il_is_rfkill(il))
+ il->cfg->ops->legacy->post_associate(il);
+ } else
+ il_set_no_assoc(il, vif);
+ }
+
+ if (changes && il_is_associated_ctx(ctx) && bss_conf->aid) {
+ D_MAC80211("Changes (%#x) while associated\n", changes);
+ ret = il_send_rxon_assoc(il, ctx);
+ if (!ret) {
+ /* Sync active_rxon with latest change. */
+ memcpy((void *)&ctx->active, &ctx->staging,
+ sizeof(struct il_rxon_cmd));
+ }
+ }
+
+ if (changes & BSS_CHANGED_BEACON_ENABLED) {
+ if (vif->bss_conf.enable_beacon) {
+ memcpy(ctx->staging.bssid_addr, bss_conf->bssid,
+ ETH_ALEN);
+ memcpy(il->bssid, bss_conf->bssid, ETH_ALEN);
+ il->cfg->ops->legacy->config_ap(il);
+ } else
+ il_set_no_assoc(il, vif);
+ }
+
+ if (changes & BSS_CHANGED_IBSS) {
+ ret =
+ il->cfg->ops->legacy->manage_ibss_station(il, vif,
+ bss_conf->
+ ibss_joined);
+ if (ret)
+ IL_ERR("failed to %s IBSS station %pM\n",
+ bss_conf->ibss_joined ? "add" : "remove",
+ bss_conf->bssid);
+ }
+
+ mutex_unlock(&il->mutex);
+
+ D_MAC80211("leave\n");
+}
+EXPORT_SYMBOL(il_mac_bss_info_changed);
+
+irqreturn_t
+il_isr(int irq, void *data)
+{
+ struct il_priv *il = data;
+ u32 inta, inta_mask;
+ u32 inta_fh;
+ unsigned long flags;
+ if (!il)
+ return IRQ_NONE;
+
+ spin_lock_irqsave(&il->lock, flags);
+
+ /* Disable (but don't clear!) interrupts here to avoid
+ * back-to-back ISRs and sporadic interrupts from our NIC.
+ * If we have something to service, the tasklet will re-enable ints.
+ * If we *don't* have something, we'll re-enable before leaving here. */
+ inta_mask = _il_rd(il, CSR_INT_MASK); /* just for debug */
+ _il_wr(il, CSR_INT_MASK, 0x00000000);
+
+ /* Discover which interrupts are active/pending */
+ inta = _il_rd(il, CSR_INT);
+ inta_fh = _il_rd(il, CSR_FH_INT_STATUS);
+
+ /* Ignore interrupt if there's nothing in NIC to service.
+ * This may be due to IRQ shared with another device,
+ * or due to sporadic interrupts thrown from our NIC. */
+ if (!inta && !inta_fh) {
+ D_ISR("Ignore interrupt, inta == 0, inta_fh == 0\n");
+ goto none;
+ }
+
+ if (inta == 0xFFFFFFFF || (inta & 0xFFFFFFF0) == 0xa5a5a5a0) {
+ /* Hardware disappeared. It might have already raised
+ * an interrupt */
+ IL_WARN("HARDWARE GONE?? INTA == 0x%08x\n", inta);
+ goto unplugged;
+ }
+
+ D_ISR("ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", inta, inta_mask,
+ inta_fh);
+
+ inta &= ~CSR_INT_BIT_SCD;
+
+ /* il_irq_tasklet() will service interrupts and re-enable them */
+ if (likely(inta || inta_fh))
+ tasklet_schedule(&il->irq_tasklet);
+
+unplugged:
+ spin_unlock_irqrestore(&il->lock, flags);
+ return IRQ_HANDLED;
+
+none:
+ /* re-enable interrupts here since we don't have anything to service. */
+ /* only Re-enable if disabled by irq */
+ if (test_bit(S_INT_ENABLED, &il->status))
+ il_enable_interrupts(il);
+ spin_unlock_irqrestore(&il->lock, flags);
+ return IRQ_NONE;
+}
+EXPORT_SYMBOL(il_isr);
+
+/*
+ * il_tx_cmd_protection: Set rts/cts. 3945 and 4965 only share this
+ * function.
+ */
+void
+il_tx_cmd_protection(struct il_priv *il, struct ieee80211_tx_info *info,
+ __le16 fc, __le32 *tx_flags)
+{
+ if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
+ *tx_flags |= TX_CMD_FLG_RTS_MSK;
+ *tx_flags &= ~TX_CMD_FLG_CTS_MSK;
+ *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
+
+ if (!ieee80211_is_mgmt(fc))
+ return;
+
+ switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
+ case cpu_to_le16(IEEE80211_STYPE_AUTH):
+ case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
+ case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
+ case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
+ *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
+ *tx_flags |= TX_CMD_FLG_CTS_MSK;
+ break;
+ }
+ } else if (info->control.rates[0].
+ flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
+ *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
+ *tx_flags |= TX_CMD_FLG_CTS_MSK;
+ *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
+ }
+}
+EXPORT_SYMBOL(il_tx_cmd_protection);
diff --git a/drivers/net/wireless/iwlegacy/common.h b/drivers/net/wireless/iwlegacy/common.h
new file mode 100644
index 00000000000..1bc0b02f559
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/common.h
@@ -0,0 +1,3424 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+#ifndef __il_core_h__
+#define __il_core_h__
+
+#include <linux/interrupt.h>
+#include <linux/pci.h> /* for struct pci_device_id */
+#include <linux/kernel.h>
+#include <linux/leds.h>
+#include <linux/wait.h>
+#include <net/mac80211.h>
+#include <net/ieee80211_radiotap.h>
+
+#include "commands.h"
+#include "csr.h"
+#include "prph.h"
+
+struct il_host_cmd;
+struct il_cmd;
+struct il_tx_queue;
+
+#define IL_ERR(f, a...) dev_err(&il->pci_dev->dev, f, ## a)
+#define IL_WARN(f, a...) dev_warn(&il->pci_dev->dev, f, ## a)
+#define IL_INFO(f, a...) dev_info(&il->pci_dev->dev, f, ## a)
+
+#define RX_QUEUE_SIZE 256
+#define RX_QUEUE_MASK 255
+#define RX_QUEUE_SIZE_LOG 8
+
+/*
+ * RX related structures and functions
+ */
+#define RX_FREE_BUFFERS 64
+#define RX_LOW_WATERMARK 8
+
+#define U32_PAD(n) ((4-(n))&0x3)
+
+/* CT-KILL constants */
+#define CT_KILL_THRESHOLD_LEGACY 110 /* in Celsius */
+
+/* Default noise level to report when noise measurement is not available.
+ * This may be because we're:
+ * 1) Not associated (4965, no beacon stats being sent to driver)
+ * 2) Scanning (noise measurement does not apply to associated channel)
+ * 3) Receiving CCK (3945 delivers noise info only for OFDM frames)
+ * Use default noise value of -127 ... this is below the range of measurable
+ * Rx dBm for either 3945 or 4965, so it can indicate "unmeasurable" to user.
+ * Also, -127 works better than 0 when averaging frames with/without
+ * noise info (e.g. averaging might be done in app); measured dBm values are
+ * always negative ... using a negative value as the default keeps all
+ * averages within an s8's (used in some apps) range of negative values. */
+#define IL_NOISE_MEAS_NOT_AVAILABLE (-127)
+
+/*
+ * RTS threshold here is total size [2347] minus 4 FCS bytes
+ * Per spec:
+ * a value of 0 means RTS on all data/management packets
+ * a value > max MSDU size means no RTS
+ * else RTS for data/management frames where MPDU is larger
+ * than RTS value.
+ */
+#define DEFAULT_RTS_THRESHOLD 2347U
+#define MIN_RTS_THRESHOLD 0U
+#define MAX_RTS_THRESHOLD 2347U
+#define MAX_MSDU_SIZE 2304U
+#define MAX_MPDU_SIZE 2346U
+#define DEFAULT_BEACON_INTERVAL 100U
+#define DEFAULT_SHORT_RETRY_LIMIT 7U
+#define DEFAULT_LONG_RETRY_LIMIT 4U
+
+struct il_rx_buf {
+ dma_addr_t page_dma;
+ struct page *page;
+ struct list_head list;
+};
+
+#define rxb_addr(r) page_address(r->page)
+
+/* defined below */
+struct il_device_cmd;
+
+struct il_cmd_meta {
+ /* only for SYNC commands, iff the reply skb is wanted */
+ struct il_host_cmd *source;
+ /*
+ * only for ASYNC commands
+ * (which is somewhat stupid -- look at common.c for instance
+ * which duplicates a bunch of code because the callback isn't
+ * invoked for SYNC commands, if it were and its result passed
+ * through it would be simpler...)
+ */
+ void (*callback) (struct il_priv *il, struct il_device_cmd *cmd,
+ struct il_rx_pkt *pkt);
+
+ /* The CMD_SIZE_HUGE flag bit indicates that the command
+ * structure is stored at the end of the shared queue memory. */
+ u32 flags;
+
+ DEFINE_DMA_UNMAP_ADDR(mapping);
+ DEFINE_DMA_UNMAP_LEN(len);
+};
+
+/*
+ * Generic queue structure
+ *
+ * Contains common data for Rx and Tx queues
+ */
+struct il_queue {
+ int n_bd; /* number of BDs in this queue */
+ int write_ptr; /* 1-st empty entry (idx) host_w */
+ int read_ptr; /* last used entry (idx) host_r */
+ /* use for monitoring and recovering the stuck queue */
+ dma_addr_t dma_addr; /* physical addr for BD's */
+ int n_win; /* safe queue win */
+ u32 id;
+ int low_mark; /* low watermark, resume queue if free
+ * space more than this */
+ int high_mark; /* high watermark, stop queue if free
+ * space less than this */
+};
+
+/* One for each TFD */
+struct il_tx_info {
+ struct sk_buff *skb;
+ struct il_rxon_context *ctx;
+};
+
+/**
+ * struct il_tx_queue - Tx Queue for DMA
+ * @q: generic Rx/Tx queue descriptor
+ * @bd: base of circular buffer of TFDs
+ * @cmd: array of command/TX buffer pointers
+ * @meta: array of meta data for each command/tx buffer
+ * @dma_addr_cmd: physical address of cmd/tx buffer array
+ * @txb: array of per-TFD driver data
+ * @time_stamp: time (in jiffies) of last read_ptr change
+ * @need_update: indicates need to update read/write idx
+ * @sched_retry: indicates queue is high-throughput aggregation (HT AGG) enabled
+ *
+ * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
+ * descriptors) and required locking structures.
+ */
+#define TFD_TX_CMD_SLOTS 256
+#define TFD_CMD_SLOTS 32
+
+struct il_tx_queue {
+ struct il_queue q;
+ void *tfds;
+ struct il_device_cmd **cmd;
+ struct il_cmd_meta *meta;
+ struct il_tx_info *txb;
+ unsigned long time_stamp;
+ u8 need_update;
+ u8 sched_retry;
+ u8 active;
+ u8 swq_id;
+};
+
+/*
+ * EEPROM access time values:
+ *
+ * Driver initiates EEPROM read by writing byte address << 1 to CSR_EEPROM_REG.
+ * Driver then polls CSR_EEPROM_REG for CSR_EEPROM_REG_READ_VALID_MSK (0x1).
+ * When polling, wait 10 uSec between polling loops, up to a maximum 5000 uSec.
+ * Driver reads 16-bit value from bits 31-16 of CSR_EEPROM_REG.
+ */
+#define IL_EEPROM_ACCESS_TIMEOUT 5000 /* uSec */
+
+#define IL_EEPROM_SEM_TIMEOUT 10 /* microseconds */
+#define IL_EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
+
+/*
+ * Regulatory channel usage flags in EEPROM struct il4965_eeprom_channel.flags.
+ *
+ * IBSS and/or AP operation is allowed *only* on those channels with
+ * (VALID && IBSS && ACTIVE && !RADAR). This restriction is in place because
+ * RADAR detection is not supported by the 4965 driver, but is a
+ * requirement for establishing a new network for legal operation on channels
+ * requiring RADAR detection or restricting ACTIVE scanning.
+ *
+ * NOTE: "WIDE" flag does not indicate anything about "HT40" 40 MHz channels.
+ * It only indicates that 20 MHz channel use is supported; HT40 channel
+ * usage is indicated by a separate set of regulatory flags for each
+ * HT40 channel pair.
+ *
+ * NOTE: Using a channel inappropriately will result in a uCode error!
+ */
+#define IL_NUM_TX_CALIB_GROUPS 5
+enum {
+ EEPROM_CHANNEL_VALID = (1 << 0), /* usable for this SKU/geo */
+ EEPROM_CHANNEL_IBSS = (1 << 1), /* usable as an IBSS channel */
+ /* Bit 2 Reserved */
+ EEPROM_CHANNEL_ACTIVE = (1 << 3), /* active scanning allowed */
+ EEPROM_CHANNEL_RADAR = (1 << 4), /* radar detection required */
+ EEPROM_CHANNEL_WIDE = (1 << 5), /* 20 MHz channel okay */
+ /* Bit 6 Reserved (was Narrow Channel) */
+ EEPROM_CHANNEL_DFS = (1 << 7), /* dynamic freq selection candidate */
+};
+
+/* SKU Capabilities */
+/* 3945 only */
+#define EEPROM_SKU_CAP_SW_RF_KILL_ENABLE (1 << 0)
+#define EEPROM_SKU_CAP_HW_RF_KILL_ENABLE (1 << 1)
+
+/* *regulatory* channel data format in eeprom, one for each channel.
+ * There are separate entries for HT40 (40 MHz) vs. normal (20 MHz) channels. */
+struct il_eeprom_channel {
+ u8 flags; /* EEPROM_CHANNEL_* flags copied from EEPROM */
+ s8 max_power_avg; /* max power (dBm) on this chnl, limit 31 */
+} __packed;
+
+/* 3945 Specific */
+#define EEPROM_3945_EEPROM_VERSION (0x2f)
+
+/* 4965 has two radio transmitters (and 3 radio receivers) */
+#define EEPROM_TX_POWER_TX_CHAINS (2)
+
+/* 4965 has room for up to 8 sets of txpower calibration data */
+#define EEPROM_TX_POWER_BANDS (8)
+
+/* 4965 factory calibration measures txpower gain settings for
+ * each of 3 target output levels */
+#define EEPROM_TX_POWER_MEASUREMENTS (3)
+
+/* 4965 Specific */
+/* 4965 driver does not work with txpower calibration version < 5 */
+#define EEPROM_4965_TX_POWER_VERSION (5)
+#define EEPROM_4965_EEPROM_VERSION (0x2f)
+#define EEPROM_4965_CALIB_VERSION_OFFSET (2*0xB6) /* 2 bytes */
+#define EEPROM_4965_CALIB_TXPOWER_OFFSET (2*0xE8) /* 48 bytes */
+#define EEPROM_4965_BOARD_REVISION (2*0x4F) /* 2 bytes */
+#define EEPROM_4965_BOARD_PBA (2*0x56+1) /* 9 bytes */
+
+/* 2.4 GHz */
+extern const u8 il_eeprom_band_1[14];
+
+/*
+ * factory calibration data for one txpower level, on one channel,
+ * measured on one of the 2 tx chains (radio transmitter and associated
+ * antenna). EEPROM contains:
+ *
+ * 1) Temperature (degrees Celsius) of device when measurement was made.
+ *
+ * 2) Gain table idx used to achieve the target measurement power.
+ * This refers to the "well-known" gain tables (see 4965.h).
+ *
+ * 3) Actual measured output power, in half-dBm ("34" = 17 dBm).
+ *
+ * 4) RF power amplifier detector level measurement (not used).
+ */
+struct il_eeprom_calib_measure {
+ u8 temperature; /* Device temperature (Celsius) */
+ u8 gain_idx; /* Index into gain table */
+ u8 actual_pow; /* Measured RF output power, half-dBm */
+ s8 pa_det; /* Power amp detector level (not used) */
+} __packed;
+
+/*
+ * measurement set for one channel. EEPROM contains:
+ *
+ * 1) Channel number measured
+ *
+ * 2) Measurements for each of 3 power levels for each of 2 radio transmitters
+ * (a.k.a. "tx chains") (6 measurements altogether)
+ */
+struct il_eeprom_calib_ch_info {
+ u8 ch_num;
+ struct il_eeprom_calib_measure
+ measurements[EEPROM_TX_POWER_TX_CHAINS]
+ [EEPROM_TX_POWER_MEASUREMENTS];
+} __packed;
+
+/*
+ * txpower subband info.
+ *
+ * For each frequency subband, EEPROM contains the following:
+ *
+ * 1) First and last channels within range of the subband. "0" values
+ * indicate that this sample set is not being used.
+ *
+ * 2) Sample measurement sets for 2 channels close to the range endpoints.
+ */
+struct il_eeprom_calib_subband_info {
+ u8 ch_from; /* channel number of lowest channel in subband */
+ u8 ch_to; /* channel number of highest channel in subband */
+ struct il_eeprom_calib_ch_info ch1;
+ struct il_eeprom_calib_ch_info ch2;
+} __packed;
+
+/*
+ * txpower calibration info. EEPROM contains:
+ *
+ * 1) Factory-measured saturation power levels (maximum levels at which
+ * tx power amplifier can output a signal without too much distortion).
+ * There is one level for 2.4 GHz band and one for 5 GHz band. These
+ * values apply to all channels within each of the bands.
+ *
+ * 2) Factory-measured power supply voltage level. This is assumed to be
+ * constant (i.e. same value applies to all channels/bands) while the
+ * factory measurements are being made.
+ *
+ * 3) Up to 8 sets of factory-measured txpower calibration values.
+ * These are for different frequency ranges, since txpower gain
+ * characteristics of the analog radio circuitry vary with frequency.
+ *
+ * Not all sets need to be filled with data;
+ * struct il_eeprom_calib_subband_info contains range of channels
+ * (0 if unused) for each set of data.
+ */
+struct il_eeprom_calib_info {
+ u8 saturation_power24; /* half-dBm (e.g. "34" = 17 dBm) */
+ u8 saturation_power52; /* half-dBm */
+ __le16 voltage; /* signed */
+ struct il_eeprom_calib_subband_info band_info[EEPROM_TX_POWER_BANDS];
+} __packed;
+
+/* General */
+#define EEPROM_DEVICE_ID (2*0x08) /* 2 bytes */
+#define EEPROM_MAC_ADDRESS (2*0x15) /* 6 bytes */
+#define EEPROM_BOARD_REVISION (2*0x35) /* 2 bytes */
+#define EEPROM_BOARD_PBA_NUMBER (2*0x3B+1) /* 9 bytes */
+#define EEPROM_VERSION (2*0x44) /* 2 bytes */
+#define EEPROM_SKU_CAP (2*0x45) /* 2 bytes */
+#define EEPROM_OEM_MODE (2*0x46) /* 2 bytes */
+#define EEPROM_WOWLAN_MODE (2*0x47) /* 2 bytes */
+#define EEPROM_RADIO_CONFIG (2*0x48) /* 2 bytes */
+#define EEPROM_NUM_MAC_ADDRESS (2*0x4C) /* 2 bytes */
+
+/* The following masks are to be applied on EEPROM_RADIO_CONFIG */
+#define EEPROM_RF_CFG_TYPE_MSK(x) (x & 0x3) /* bits 0-1 */
+#define EEPROM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */
+#define EEPROM_RF_CFG_DASH_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */
+#define EEPROM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */
+#define EEPROM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */
+#define EEPROM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
+
+#define EEPROM_3945_RF_CFG_TYPE_MAX 0x0
+#define EEPROM_4965_RF_CFG_TYPE_MAX 0x1
+
+/*
+ * Per-channel regulatory data.
+ *
+ * Each channel that *might* be supported by iwl has a fixed location
+ * in EEPROM containing EEPROM_CHANNEL_* usage flags (LSB) and max regulatory
+ * txpower (MSB).
+ *
+ * Entries immediately below are for 20 MHz channel width. HT40 (40 MHz)
+ * channels (only for 4965, not supported by 3945) appear later in the EEPROM.
+ *
+ * 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
+ */
+#define EEPROM_REGULATORY_SKU_ID (2*0x60) /* 4 bytes */
+#define EEPROM_REGULATORY_BAND_1 (2*0x62) /* 2 bytes */
+#define EEPROM_REGULATORY_BAND_1_CHANNELS (2*0x63) /* 28 bytes */
+
+/*
+ * 4.9 GHz channels 183, 184, 185, 187, 188, 189, 192, 196,
+ * 5.0 GHz channels 7, 8, 11, 12, 16
+ * (4915-5080MHz) (none of these is ever supported)
+ */
+#define EEPROM_REGULATORY_BAND_2 (2*0x71) /* 2 bytes */
+#define EEPROM_REGULATORY_BAND_2_CHANNELS (2*0x72) /* 26 bytes */
+
+/*
+ * 5.2 GHz channels 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
+ * (5170-5320MHz)
+ */
+#define EEPROM_REGULATORY_BAND_3 (2*0x7F) /* 2 bytes */
+#define EEPROM_REGULATORY_BAND_3_CHANNELS (2*0x80) /* 24 bytes */
+
+/*
+ * 5.5 GHz channels 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
+ * (5500-5700MHz)
+ */
+#define EEPROM_REGULATORY_BAND_4 (2*0x8C) /* 2 bytes */
+#define EEPROM_REGULATORY_BAND_4_CHANNELS (2*0x8D) /* 22 bytes */
+
+/*
+ * 5.7 GHz channels 145, 149, 153, 157, 161, 165
+ * (5725-5825MHz)
+ */
+#define EEPROM_REGULATORY_BAND_5 (2*0x98) /* 2 bytes */
+#define EEPROM_REGULATORY_BAND_5_CHANNELS (2*0x99) /* 12 bytes */
+
+/*
+ * 2.4 GHz HT40 channels 1 (5), 2 (6), 3 (7), 4 (8), 5 (9), 6 (10), 7 (11)
+ *
+ * The channel listed is the center of the lower 20 MHz half of the channel.
+ * The overall center frequency is actually 2 channels (10 MHz) above that,
+ * and the upper half of each HT40 channel is centered 4 channels (20 MHz) away
+ * from the lower half; e.g. the upper half of HT40 channel 1 is channel 5,
+ * and the overall HT40 channel width centers on channel 3.
+ *
+ * NOTE: The RXON command uses 20 MHz channel numbers to specify the
+ * control channel to which to tune. RXON also specifies whether the
+ * control channel is the upper or lower half of a HT40 channel.
+ *
+ * NOTE: 4965 does not support HT40 channels on 2.4 GHz.
+ */
+#define EEPROM_4965_REGULATORY_BAND_24_HT40_CHANNELS (2*0xA0) /* 14 bytes */
+
+/*
+ * 5.2 GHz HT40 channels 36 (40), 44 (48), 52 (56), 60 (64),
+ * 100 (104), 108 (112), 116 (120), 124 (128), 132 (136), 149 (153), 157 (161)
+ */
+#define EEPROM_4965_REGULATORY_BAND_52_HT40_CHANNELS (2*0xA8) /* 22 bytes */
+
+#define EEPROM_REGULATORY_BAND_NO_HT40 (0)
+
+struct il_eeprom_ops {
+ const u32 regulatory_bands[7];
+ int (*acquire_semaphore) (struct il_priv *il);
+ void (*release_semaphore) (struct il_priv *il);
+};
+
+int il_eeprom_init(struct il_priv *il);
+void il_eeprom_free(struct il_priv *il);
+const u8 *il_eeprom_query_addr(const struct il_priv *il, size_t offset);
+u16 il_eeprom_query16(const struct il_priv *il, size_t offset);
+int il_init_channel_map(struct il_priv *il);
+void il_free_channel_map(struct il_priv *il);
+const struct il_channel_info *il_get_channel_info(const struct il_priv *il,
+ enum ieee80211_band band,
+ u16 channel);
+
+#define IL_NUM_SCAN_RATES (2)
+
+struct il4965_channel_tgd_info {
+ u8 type;
+ s8 max_power;
+};
+
+struct il4965_channel_tgh_info {
+ s64 last_radar_time;
+};
+
+#define IL4965_MAX_RATE (33)
+
+struct il3945_clip_group {
+ /* maximum power level to prevent clipping for each rate, derived by
+ * us from this band's saturation power in EEPROM */
+ const s8 clip_powers[IL_MAX_RATES];
+};
+
+/* current Tx power values to use, one for each rate for each channel.
+ * requested power is limited by:
+ * -- regulatory EEPROM limits for this channel
+ * -- hardware capabilities (clip-powers)
+ * -- spectrum management
+ * -- user preference (e.g. iwconfig)
+ * when requested power is set, base power idx must also be set. */
+struct il3945_channel_power_info {
+ struct il3945_tx_power tpc; /* actual radio and DSP gain settings */
+ s8 power_table_idx; /* actual (compenst'd) idx into gain table */
+ s8 base_power_idx; /* gain idx for power at factory temp. */
+ s8 requested_power; /* power (dBm) requested for this chnl/rate */
+};
+
+/* current scan Tx power values to use, one for each scan rate for each
+ * channel. */
+struct il3945_scan_power_info {
+ struct il3945_tx_power tpc; /* actual radio and DSP gain settings */
+ s8 power_table_idx; /* actual (compenst'd) idx into gain table */
+ s8 requested_power; /* scan pwr (dBm) requested for chnl/rate */
+};
+
+/*
+ * One for each channel, holds all channel setup data
+ * Some of the fields (e.g. eeprom and flags/max_power_avg) are redundant
+ * with one another!
+ */
+struct il_channel_info {
+ struct il4965_channel_tgd_info tgd;
+ struct il4965_channel_tgh_info tgh;
+ struct il_eeprom_channel eeprom; /* EEPROM regulatory limit */
+ struct il_eeprom_channel ht40_eeprom; /* EEPROM regulatory limit for
+ * HT40 channel */
+
+ u8 channel; /* channel number */
+ u8 flags; /* flags copied from EEPROM */
+ s8 max_power_avg; /* (dBm) regul. eeprom, normal Tx, any rate */
+ s8 curr_txpow; /* (dBm) regulatory/spectrum/user (not h/w) limit */
+ s8 min_power; /* always 0 */
+ s8 scan_power; /* (dBm) regul. eeprom, direct scans, any rate */
+
+ u8 group_idx; /* 0-4, maps channel to group1/2/3/4/5 */
+ u8 band_idx; /* 0-4, maps channel to band1/2/3/4/5 */
+ enum ieee80211_band band;
+
+ /* HT40 channel info */
+ s8 ht40_max_power_avg; /* (dBm) regul. eeprom, normal Tx, any rate */
+ u8 ht40_flags; /* flags copied from EEPROM */
+ u8 ht40_extension_channel; /* HT_IE_EXT_CHANNEL_* */
+
+ /* Radio/DSP gain settings for each "normal" data Tx rate.
+ * These include, in addition to RF and DSP gain, a few fields for
+ * remembering/modifying gain settings (idxes). */
+ struct il3945_channel_power_info power_info[IL4965_MAX_RATE];
+
+ /* Radio/DSP gain settings for each scan rate, for directed scans. */
+ struct il3945_scan_power_info scan_pwr_info[IL_NUM_SCAN_RATES];
+};
+
+#define IL_TX_FIFO_BK 0 /* shared */
+#define IL_TX_FIFO_BE 1
+#define IL_TX_FIFO_VI 2 /* shared */
+#define IL_TX_FIFO_VO 3
+#define IL_TX_FIFO_UNUSED -1
+
+/* Minimum number of queues. MAX_NUM is defined in hw specific files.
+ * Set the minimum to accommodate the 4 standard TX queues, 1 command
+ * queue, 2 (unused) HCCA queues, and 4 HT queues (one for each AC) */
+#define IL_MIN_NUM_QUEUES 10
+
+#define IL_DEFAULT_CMD_QUEUE_NUM 4
+
+#define IEEE80211_DATA_LEN 2304
+#define IEEE80211_4ADDR_LEN 30
+#define IEEE80211_HLEN (IEEE80211_4ADDR_LEN)
+#define IEEE80211_FRAME_LEN (IEEE80211_DATA_LEN + IEEE80211_HLEN)
+
+struct il_frame {
+ union {
+ struct ieee80211_hdr frame;
+ struct il_tx_beacon_cmd beacon;
+ u8 raw[IEEE80211_FRAME_LEN];
+ u8 cmd[360];
+ } u;
+ struct list_head list;
+};
+
+#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
+#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
+#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
+
+enum {
+ CMD_SYNC = 0,
+ CMD_SIZE_NORMAL = 0,
+ CMD_NO_SKB = 0,
+ CMD_SIZE_HUGE = (1 << 0),
+ CMD_ASYNC = (1 << 1),
+ CMD_WANT_SKB = (1 << 2),
+ CMD_MAPPED = (1 << 3),
+};
+
+#define DEF_CMD_PAYLOAD_SIZE 320
+
+/**
+ * struct il_device_cmd
+ *
+ * For allocation of the command and tx queues, this establishes the overall
+ * size of the largest command we send to uCode, except for a scan command
+ * (which is relatively huge; space is allocated separately).
+ */
+struct il_device_cmd {
+ struct il_cmd_header hdr; /* uCode API */
+ union {
+ u32 flags;
+ u8 val8;
+ u16 val16;
+ u32 val32;
+ struct il_tx_cmd tx;
+ u8 payload[DEF_CMD_PAYLOAD_SIZE];
+ } __packed cmd;
+} __packed;
+
+#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct il_device_cmd))
+
+struct il_host_cmd {
+ const void *data;
+ unsigned long reply_page;
+ void (*callback) (struct il_priv *il, struct il_device_cmd *cmd,
+ struct il_rx_pkt *pkt);
+ u32 flags;
+ u16 len;
+ u8 id;
+};
+
+#define SUP_RATE_11A_MAX_NUM_CHANNELS 8
+#define SUP_RATE_11B_MAX_NUM_CHANNELS 4
+#define SUP_RATE_11G_MAX_NUM_CHANNELS 12
+
+/**
+ * struct il_rx_queue - Rx queue
+ * @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
+ * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
+ * @read: Shared idx to newest available Rx buffer
+ * @write: Shared idx to oldest written Rx packet
+ * @free_count: Number of pre-allocated buffers in rx_free
+ * @rx_free: list of free SKBs for use
+ * @rx_used: List of Rx buffers with no SKB
+ * @need_update: flag to indicate we need to update read/write idx
+ * @rb_stts: driver's pointer to receive buffer status
+ * @rb_stts_dma: bus address of receive buffer status
+ *
+ * NOTE: rx_free and rx_used are used as a FIFO for il_rx_bufs
+ */
+struct il_rx_queue {
+ __le32 *bd;
+ dma_addr_t bd_dma;
+ struct il_rx_buf pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
+ struct il_rx_buf *queue[RX_QUEUE_SIZE];
+ u32 read;
+ u32 write;
+ u32 free_count;
+ u32 write_actual;
+ struct list_head rx_free;
+ struct list_head rx_used;
+ int need_update;
+ struct il_rb_status *rb_stts;
+ dma_addr_t rb_stts_dma;
+ spinlock_t lock;
+};
+
+#define IL_SUPPORTED_RATES_IE_LEN 8
+
+#define MAX_TID_COUNT 9
+
+#define IL_INVALID_RATE 0xFF
+#define IL_INVALID_VALUE -1
+
+/**
+ * struct il_ht_agg -- aggregation status while waiting for block-ack
+ * @txq_id: Tx queue used for Tx attempt
+ * @frame_count: # frames attempted by Tx command
+ * @wait_for_ba: Expect block-ack before next Tx reply
+ * @start_idx: Index of 1st Transmit Frame Descriptor (TFD) in Tx win
+ * @bitmap0: Low order bitmap, one bit for each frame pending ACK in Tx win
+ * @bitmap1: High order, one bit for each frame pending ACK in Tx win
+ * @rate_n_flags: Rate at which Tx was attempted
+ *
+ * If C_TX indicates that aggregation was attempted, driver must wait
+ * for block ack (N_COMPRESSED_BA). This struct stores tx reply info
+ * until block ack arrives.
+ */
+struct il_ht_agg {
+ u16 txq_id;
+ u16 frame_count;
+ u16 wait_for_ba;
+ u16 start_idx;
+ u64 bitmap;
+ u32 rate_n_flags;
+#define IL_AGG_OFF 0
+#define IL_AGG_ON 1
+#define IL_EMPTYING_HW_QUEUE_ADDBA 2
+#define IL_EMPTYING_HW_QUEUE_DELBA 3
+ u8 state;
+};
+
+struct il_tid_data {
+ u16 seq_number; /* 4965 only */
+ u16 tfds_in_queue;
+ struct il_ht_agg agg;
+};
+
+struct il_hw_key {
+ u32 cipher;
+ int keylen;
+ u8 keyidx;
+ u8 key[32];
+};
+
+union il_ht_rate_supp {
+ u16 rates;
+ struct {
+ u8 siso_rate;
+ u8 mimo_rate;
+ };
+};
+
+#define CFG_HT_RX_AMPDU_FACTOR_8K (0x0)
+#define CFG_HT_RX_AMPDU_FACTOR_16K (0x1)
+#define CFG_HT_RX_AMPDU_FACTOR_32K (0x2)
+#define CFG_HT_RX_AMPDU_FACTOR_64K (0x3)
+#define CFG_HT_RX_AMPDU_FACTOR_DEF CFG_HT_RX_AMPDU_FACTOR_64K
+#define CFG_HT_RX_AMPDU_FACTOR_MAX CFG_HT_RX_AMPDU_FACTOR_64K
+#define CFG_HT_RX_AMPDU_FACTOR_MIN CFG_HT_RX_AMPDU_FACTOR_8K
+
+/*
+ * Maximal MPDU density for TX aggregation
+ * 4 - 2us density
+ * 5 - 4us density
+ * 6 - 8us density
+ * 7 - 16us density
+ */
+#define CFG_HT_MPDU_DENSITY_2USEC (0x4)
+#define CFG_HT_MPDU_DENSITY_4USEC (0x5)
+#define CFG_HT_MPDU_DENSITY_8USEC (0x6)
+#define CFG_HT_MPDU_DENSITY_16USEC (0x7)
+#define CFG_HT_MPDU_DENSITY_DEF CFG_HT_MPDU_DENSITY_4USEC
+#define CFG_HT_MPDU_DENSITY_MAX CFG_HT_MPDU_DENSITY_16USEC
+#define CFG_HT_MPDU_DENSITY_MIN (0x1)
+
+struct il_ht_config {
+ bool single_chain_sufficient;
+ enum ieee80211_smps_mode smps; /* current smps mode */
+};
+
+/* QoS structures */
+struct il_qos_info {
+ int qos_active;
+ struct il_qosparam_cmd def_qos_parm;
+};
+
+/*
+ * Structure should be accessed with sta_lock held. When station addition
+ * is in progress (IL_STA_UCODE_INPROGRESS) it is possible to access only
+ * the commands (il_addsta_cmd and il_link_quality_cmd) without
+ * sta_lock held.
+ */
+struct il_station_entry {
+ struct il_addsta_cmd sta;
+ struct il_tid_data tid[MAX_TID_COUNT];
+ u8 used, ctxid;
+ struct il_hw_key keyinfo;
+ struct il_link_quality_cmd *lq;
+};
+
+struct il_station_priv_common {
+ struct il_rxon_context *ctx;
+ u8 sta_id;
+};
+
+/**
+ * struct il_vif_priv - driver's ilate per-interface information
+ *
+ * When mac80211 allocates a virtual interface, it can allocate
+ * space for us to put data into.
+ */
+struct il_vif_priv {
+ struct il_rxon_context *ctx;
+ u8 ibss_bssid_sta_id;
+};
+
+/* one for each uCode image (inst/data, boot/init/runtime) */
+struct fw_desc {
+ void *v_addr; /* access by driver */
+ dma_addr_t p_addr; /* access by card's busmaster DMA */
+ u32 len; /* bytes */
+};
+
+/* uCode file layout */
+struct il_ucode_header {
+ __le32 ver; /* major/minor/API/serial */
+ struct {
+ __le32 inst_size; /* bytes of runtime code */
+ __le32 data_size; /* bytes of runtime data */
+ __le32 init_size; /* bytes of init code */
+ __le32 init_data_size; /* bytes of init data */
+ __le32 boot_size; /* bytes of bootstrap code */
+ u8 data[0]; /* in same order as sizes */
+ } v1;
+};
+
+struct il4965_ibss_seq {
+ u8 mac[ETH_ALEN];
+ u16 seq_num;
+ u16 frag_num;
+ unsigned long packet_time;
+ struct list_head list;
+};
+
+struct il_sensitivity_ranges {
+ u16 min_nrg_cck;
+ u16 max_nrg_cck;
+
+ u16 nrg_th_cck;
+ u16 nrg_th_ofdm;
+
+ u16 auto_corr_min_ofdm;
+ u16 auto_corr_min_ofdm_mrc;
+ u16 auto_corr_min_ofdm_x1;
+ u16 auto_corr_min_ofdm_mrc_x1;
+
+ u16 auto_corr_max_ofdm;
+ u16 auto_corr_max_ofdm_mrc;
+ u16 auto_corr_max_ofdm_x1;
+ u16 auto_corr_max_ofdm_mrc_x1;
+
+ u16 auto_corr_max_cck;
+ u16 auto_corr_max_cck_mrc;
+ u16 auto_corr_min_cck;
+ u16 auto_corr_min_cck_mrc;
+
+ u16 barker_corr_th_min;
+ u16 barker_corr_th_min_mrc;
+ u16 nrg_th_cca;
+};
+
+#define KELVIN_TO_CELSIUS(x) ((x)-273)
+#define CELSIUS_TO_KELVIN(x) ((x)+273)
+
+/**
+ * struct il_hw_params
+ * @max_txq_num: Max # Tx queues supported
+ * @dma_chnl_num: Number of Tx DMA/FIFO channels
+ * @scd_bc_tbls_size: size of scheduler byte count tables
+ * @tfd_size: TFD size
+ * @tx/rx_chains_num: Number of TX/RX chains
+ * @valid_tx/rx_ant: usable antennas
+ * @max_rxq_size: Max # Rx frames in Rx queue (must be power-of-2)
+ * @max_rxq_log: Log-base-2 of max_rxq_size
+ * @rx_page_order: Rx buffer page order
+ * @rx_wrt_ptr_reg: FH{39}_RSCSR_CHNL0_WPTR
+ * @max_stations:
+ * @ht40_channel: is 40MHz width possible in band 2.4
+ * BIT(IEEE80211_BAND_5GHZ) BIT(IEEE80211_BAND_5GHZ)
+ * @sw_crypto: 0 for hw, 1 for sw
+ * @max_xxx_size: for ucode uses
+ * @ct_kill_threshold: temperature threshold
+ * @beacon_time_tsf_bits: number of valid tsf bits for beacon time
+ * @struct il_sensitivity_ranges: range of sensitivity values
+ */
+struct il_hw_params {
+ u8 max_txq_num;
+ u8 dma_chnl_num;
+ u16 scd_bc_tbls_size;
+ u32 tfd_size;
+ u8 tx_chains_num;
+ u8 rx_chains_num;
+ u8 valid_tx_ant;
+ u8 valid_rx_ant;
+ u16 max_rxq_size;
+ u16 max_rxq_log;
+ u32 rx_page_order;
+ u32 rx_wrt_ptr_reg;
+ u8 max_stations;
+ u8 ht40_channel;
+ u8 max_beacon_itrvl; /* in 1024 ms */
+ u32 max_inst_size;
+ u32 max_data_size;
+ u32 max_bsm_size;
+ u32 ct_kill_threshold; /* value in hw-dependent units */
+ u16 beacon_time_tsf_bits;
+ const struct il_sensitivity_ranges *sens;
+};
+
+/******************************************************************************
+ *
+ * Functions implemented in core module which are forward declared here
+ * for use by iwl-[4-5].c
+ *
+ * NOTE: The implementation of these functions are not hardware specific
+ * which is why they are in the core module files.
+ *
+ * Naming convention --
+ * il_ <-- Is part of iwlwifi
+ * iwlXXXX_ <-- Hardware specific (implemented in iwl-XXXX.c for XXXX)
+ * il4965_bg_ <-- Called from work queue context
+ * il4965_mac_ <-- mac80211 callback
+ *
+ ****************************************************************************/
+extern void il4965_update_chain_flags(struct il_priv *il);
+extern const u8 il_bcast_addr[ETH_ALEN];
+extern int il_queue_space(const struct il_queue *q);
+static inline int
+il_queue_used(const struct il_queue *q, int i)
+{
+ return q->write_ptr >= q->read_ptr ? (i >= q->read_ptr &&
+ i < q->write_ptr) : !(i <
+ q->read_ptr
+ && i >=
+ q->
+ write_ptr);
+}
+
+static inline u8
+il_get_cmd_idx(struct il_queue *q, u32 idx, int is_huge)
+{
+ /*
+ * This is for init calibration result and scan command which
+ * required buffer > TFD_MAX_PAYLOAD_SIZE,
+ * the big buffer at end of command array
+ */
+ if (is_huge)
+ return q->n_win; /* must be power of 2 */
+
+ /* Otherwise, use normal size buffers */
+ return idx & (q->n_win - 1);
+}
+
+struct il_dma_ptr {
+ dma_addr_t dma;
+ void *addr;
+ size_t size;
+};
+
+#define IL_OPERATION_MODE_AUTO 0
+#define IL_OPERATION_MODE_HT_ONLY 1
+#define IL_OPERATION_MODE_MIXED 2
+#define IL_OPERATION_MODE_20MHZ 3
+
+#define IL_TX_CRC_SIZE 4
+#define IL_TX_DELIMITER_SIZE 4
+
+#define TX_POWER_IL_ILLEGAL_VOLTAGE -10000
+
+/* Sensitivity and chain noise calibration */
+#define INITIALIZATION_VALUE 0xFFFF
+#define IL4965_CAL_NUM_BEACONS 20
+#define IL_CAL_NUM_BEACONS 16
+#define MAXIMUM_ALLOWED_PATHLOSS 15
+
+#define CHAIN_NOISE_MAX_DELTA_GAIN_CODE 3
+
+#define MAX_FA_OFDM 50
+#define MIN_FA_OFDM 5
+#define MAX_FA_CCK 50
+#define MIN_FA_CCK 5
+
+#define AUTO_CORR_STEP_OFDM 1
+
+#define AUTO_CORR_STEP_CCK 3
+#define AUTO_CORR_MAX_TH_CCK 160
+
+#define NRG_DIFF 2
+#define NRG_STEP_CCK 2
+#define NRG_MARGIN 8
+#define MAX_NUMBER_CCK_NO_FA 100
+
+#define AUTO_CORR_CCK_MIN_VAL_DEF (125)
+
+#define CHAIN_A 0
+#define CHAIN_B 1
+#define CHAIN_C 2
+#define CHAIN_NOISE_DELTA_GAIN_INIT_VAL 4
+#define ALL_BAND_FILTER 0xFF00
+#define IN_BAND_FILTER 0xFF
+#define MIN_AVERAGE_NOISE_MAX_VALUE 0xFFFFFFFF
+
+#define NRG_NUM_PREV_STAT_L 20
+#define NUM_RX_CHAINS 3
+
+enum il4965_false_alarm_state {
+ IL_FA_TOO_MANY = 0,
+ IL_FA_TOO_FEW = 1,
+ IL_FA_GOOD_RANGE = 2,
+};
+
+enum il4965_chain_noise_state {
+ IL_CHAIN_NOISE_ALIVE = 0, /* must be 0 */
+ IL_CHAIN_NOISE_ACCUMULATE,
+ IL_CHAIN_NOISE_CALIBRATED,
+ IL_CHAIN_NOISE_DONE,
+};
+
+enum il4965_calib_enabled_state {
+ IL_CALIB_DISABLED = 0, /* must be 0 */
+ IL_CALIB_ENABLED = 1,
+};
+
+/*
+ * enum il_calib
+ * defines the order in which results of initial calibrations
+ * should be sent to the runtime uCode
+ */
+enum il_calib {
+ IL_CALIB_MAX,
+};
+
+/* Opaque calibration results */
+struct il_calib_result {
+ void *buf;
+ size_t buf_len;
+};
+
+enum ucode_type {
+ UCODE_NONE = 0,
+ UCODE_INIT,
+ UCODE_RT
+};
+
+/* Sensitivity calib data */
+struct il_sensitivity_data {
+ u32 auto_corr_ofdm;
+ u32 auto_corr_ofdm_mrc;
+ u32 auto_corr_ofdm_x1;
+ u32 auto_corr_ofdm_mrc_x1;
+ u32 auto_corr_cck;
+ u32 auto_corr_cck_mrc;
+
+ u32 last_bad_plcp_cnt_ofdm;
+ u32 last_fa_cnt_ofdm;
+ u32 last_bad_plcp_cnt_cck;
+ u32 last_fa_cnt_cck;
+
+ u32 nrg_curr_state;
+ u32 nrg_prev_state;
+ u32 nrg_value[10];
+ u8 nrg_silence_rssi[NRG_NUM_PREV_STAT_L];
+ u32 nrg_silence_ref;
+ u32 nrg_energy_idx;
+ u32 nrg_silence_idx;
+ u32 nrg_th_cck;
+ s32 nrg_auto_corr_silence_diff;
+ u32 num_in_cck_no_fa;
+ u32 nrg_th_ofdm;
+
+ u16 barker_corr_th_min;
+ u16 barker_corr_th_min_mrc;
+ u16 nrg_th_cca;
+};
+
+/* Chain noise (differential Rx gain) calib data */
+struct il_chain_noise_data {
+ u32 active_chains;
+ u32 chain_noise_a;
+ u32 chain_noise_b;
+ u32 chain_noise_c;
+ u32 chain_signal_a;
+ u32 chain_signal_b;
+ u32 chain_signal_c;
+ u16 beacon_count;
+ u8 disconn_array[NUM_RX_CHAINS];
+ u8 delta_gain_code[NUM_RX_CHAINS];
+ u8 radio_write;
+ u8 state;
+};
+
+#define EEPROM_SEM_TIMEOUT 10 /* milliseconds */
+#define EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
+
+#define IL_TRAFFIC_ENTRIES (256)
+#define IL_TRAFFIC_ENTRY_SIZE (64)
+
+enum {
+ MEASUREMENT_READY = (1 << 0),
+ MEASUREMENT_ACTIVE = (1 << 1),
+};
+
+/* interrupt stats */
+struct isr_stats {
+ u32 hw;
+ u32 sw;
+ u32 err_code;
+ u32 sch;
+ u32 alive;
+ u32 rfkill;
+ u32 ctkill;
+ u32 wakeup;
+ u32 rx;
+ u32 handlers[IL_CN_MAX];
+ u32 tx;
+ u32 unhandled;
+};
+
+/* management stats */
+enum il_mgmt_stats {
+ MANAGEMENT_ASSOC_REQ = 0,
+ MANAGEMENT_ASSOC_RESP,
+ MANAGEMENT_REASSOC_REQ,
+ MANAGEMENT_REASSOC_RESP,
+ MANAGEMENT_PROBE_REQ,
+ MANAGEMENT_PROBE_RESP,
+ MANAGEMENT_BEACON,
+ MANAGEMENT_ATIM,
+ MANAGEMENT_DISASSOC,
+ MANAGEMENT_AUTH,
+ MANAGEMENT_DEAUTH,
+ MANAGEMENT_ACTION,
+ MANAGEMENT_MAX,
+};
+/* control stats */
+enum il_ctrl_stats {
+ CONTROL_BACK_REQ = 0,
+ CONTROL_BACK,
+ CONTROL_PSPOLL,
+ CONTROL_RTS,
+ CONTROL_CTS,
+ CONTROL_ACK,
+ CONTROL_CFEND,
+ CONTROL_CFENDACK,
+ CONTROL_MAX,
+};
+
+struct traffic_stats {
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+ u32 mgmt[MANAGEMENT_MAX];
+ u32 ctrl[CONTROL_MAX];
+ u32 data_cnt;
+ u64 data_bytes;
+#endif
+};
+
+/*
+ * host interrupt timeout value
+ * used with setting interrupt coalescing timer
+ * the CSR_INT_COALESCING is an 8 bit register in 32-usec unit
+ *
+ * default interrupt coalescing timer is 64 x 32 = 2048 usecs
+ * default interrupt coalescing calibration timer is 16 x 32 = 512 usecs
+ */
+#define IL_HOST_INT_TIMEOUT_MAX (0xFF)
+#define IL_HOST_INT_TIMEOUT_DEF (0x40)
+#define IL_HOST_INT_TIMEOUT_MIN (0x0)
+#define IL_HOST_INT_CALIB_TIMEOUT_MAX (0xFF)
+#define IL_HOST_INT_CALIB_TIMEOUT_DEF (0x10)
+#define IL_HOST_INT_CALIB_TIMEOUT_MIN (0x0)
+
+#define IL_DELAY_NEXT_FORCE_FW_RELOAD (HZ*5)
+
+/* TX queue watchdog timeouts in mSecs */
+#define IL_DEF_WD_TIMEOUT (2000)
+#define IL_LONG_WD_TIMEOUT (10000)
+#define IL_MAX_WD_TIMEOUT (120000)
+
+struct il_force_reset {
+ int reset_request_count;
+ int reset_success_count;
+ int reset_reject_count;
+ unsigned long reset_duration;
+ unsigned long last_force_reset_jiffies;
+};
+
+/* extend beacon time format bit shifting */
+/*
+ * for _3945 devices
+ * bits 31:24 - extended
+ * bits 23:0 - interval
+ */
+#define IL3945_EXT_BEACON_TIME_POS 24
+/*
+ * for _4965 devices
+ * bits 31:22 - extended
+ * bits 21:0 - interval
+ */
+#define IL4965_EXT_BEACON_TIME_POS 22
+
+struct il_rxon_context {
+ struct ieee80211_vif *vif;
+
+ const u8 *ac_to_fifo;
+ const u8 *ac_to_queue;
+ u8 mcast_queue;
+
+ /*
+ * We could use the vif to indicate active, but we
+ * also need it to be active during disabling when
+ * we already removed the vif for type setting.
+ */
+ bool always_active, is_active;
+
+ bool ht_need_multiple_chains;
+
+ int ctxid;
+
+ u32 interface_modes, exclusive_interface_modes;
+ u8 unused_devtype, ap_devtype, ibss_devtype, station_devtype;
+
+ /*
+ * We declare this const so it can only be
+ * changed via explicit cast within the
+ * routines that actually update the physical
+ * hardware.
+ */
+ const struct il_rxon_cmd active;
+ struct il_rxon_cmd staging;
+
+ struct il_rxon_time_cmd timing;
+
+ struct il_qos_info qos_data;
+
+ u8 bcast_sta_id, ap_sta_id;
+
+ u8 rxon_cmd, rxon_assoc_cmd, rxon_timing_cmd;
+ u8 qos_cmd;
+ u8 wep_key_cmd;
+
+ struct il_wep_key wep_keys[WEP_KEYS_MAX];
+ u8 key_mapping_keys;
+
+ __le32 station_flags;
+
+ struct {
+ bool non_gf_sta_present;
+ u8 protection;
+ bool enabled, is_40mhz;
+ u8 extension_chan_offset;
+ } ht;
+};
+
+struct il_power_mgr {
+ struct il_powertable_cmd sleep_cmd;
+ struct il_powertable_cmd sleep_cmd_next;
+ int debug_sleep_level_override;
+ bool pci_pm;
+};
+
+struct il_priv {
+
+ /* ieee device used by generic ieee processing code */
+ struct ieee80211_hw *hw;
+ struct ieee80211_channel *ieee_channels;
+ struct ieee80211_rate *ieee_rates;
+ struct il_cfg *cfg;
+
+ /* temporary frame storage list */
+ struct list_head free_frames;
+ int frames_count;
+
+ enum ieee80211_band band;
+ int alloc_rxb_page;
+
+ void (*handlers[IL_CN_MAX]) (struct il_priv *il,
+ struct il_rx_buf *rxb);
+
+ struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
+
+ /* spectrum measurement report caching */
+ struct il_spectrum_notification measure_report;
+ u8 measurement_status;
+
+ /* ucode beacon time */
+ u32 ucode_beacon_time;
+ int missed_beacon_threshold;
+
+ /* track IBSS manager (last beacon) status */
+ u32 ibss_manager;
+
+ /* force reset */
+ struct il_force_reset force_reset;
+
+ /* we allocate array of il_channel_info for NIC's valid channels.
+ * Access via channel # using indirect idx array */
+ struct il_channel_info *channel_info; /* channel info array */
+ u8 channel_count; /* # of channels */
+
+ /* thermal calibration */
+ s32 temperature; /* degrees Kelvin */
+ s32 last_temperature;
+
+ /* init calibration results */
+ struct il_calib_result calib_results[IL_CALIB_MAX];
+
+ /* Scan related variables */
+ unsigned long scan_start;
+ unsigned long scan_start_tsf;
+ void *scan_cmd;
+ enum ieee80211_band scan_band;
+ struct cfg80211_scan_request *scan_request;
+ struct ieee80211_vif *scan_vif;
+ u8 scan_tx_ant[IEEE80211_NUM_BANDS];
+ u8 mgmt_tx_ant;
+
+ /* spinlock */
+ spinlock_t lock; /* protect general shared data */
+ spinlock_t hcmd_lock; /* protect hcmd */
+ spinlock_t reg_lock; /* protect hw register access */
+ struct mutex mutex;
+
+ /* basic pci-network driver stuff */
+ struct pci_dev *pci_dev;
+
+ /* pci hardware address support */
+ void __iomem *hw_base;
+ u32 hw_rev;
+ u32 hw_wa_rev;
+ u8 rev_id;
+
+ /* command queue number */
+ u8 cmd_queue;
+
+ /* max number of station keys */
+ u8 sta_key_max_num;
+
+ /* EEPROM MAC addresses */
+ struct mac_address addresses[1];
+
+ /* uCode images, save to reload in case of failure */
+ int fw_idx; /* firmware we're trying to load */
+ u32 ucode_ver; /* version of ucode, copy of
+ il_ucode.ver */
+ struct fw_desc ucode_code; /* runtime inst */
+ struct fw_desc ucode_data; /* runtime data original */
+ struct fw_desc ucode_data_backup; /* runtime data save/restore */
+ struct fw_desc ucode_init; /* initialization inst */
+ struct fw_desc ucode_init_data; /* initialization data */
+ struct fw_desc ucode_boot; /* bootstrap inst */
+ enum ucode_type ucode_type;
+ u8 ucode_write_complete; /* the image write is complete */
+ char firmware_name[25];
+
+ struct il_rxon_context ctx;
+
+ __le16 switch_channel;
+
+ /* 1st responses from initialize and runtime uCode images.
+ * _4965's initialize alive response contains some calibration data. */
+ struct il_init_alive_resp card_alive_init;
+ struct il_alive_resp card_alive;
+
+ u16 active_rate;
+
+ u8 start_calib;
+ struct il_sensitivity_data sensitivity_data;
+ struct il_chain_noise_data chain_noise_data;
+ __le16 sensitivity_tbl[HD_TBL_SIZE];
+
+ struct il_ht_config current_ht_config;
+
+ /* Rate scaling data */
+ u8 retry_rate;
+
+ wait_queue_head_t wait_command_queue;
+
+ int activity_timer_active;
+
+ /* Rx and Tx DMA processing queues */
+ struct il_rx_queue rxq;
+ struct il_tx_queue *txq;
+ unsigned long txq_ctx_active_msk;
+ struct il_dma_ptr kw; /* keep warm address */
+ struct il_dma_ptr scd_bc_tbls;
+
+ u32 scd_base_addr; /* scheduler sram base address */
+
+ unsigned long status;
+
+ /* counts mgmt, ctl, and data packets */
+ struct traffic_stats tx_stats;
+ struct traffic_stats rx_stats;
+
+ /* counts interrupts */
+ struct isr_stats isr_stats;
+
+ struct il_power_mgr power_data;
+
+ /* context information */
+ u8 bssid[ETH_ALEN]; /* used only on 3945 but filled by core */
+
+ /* station table variables */
+
+ /* Note: if lock and sta_lock are needed, lock must be acquired first */
+ spinlock_t sta_lock;
+ int num_stations;
+ struct il_station_entry stations[IL_STATION_COUNT];
+ unsigned long ucode_key_table;
+
+ /* queue refcounts */
+#define IL_MAX_HW_QUEUES 32
+ unsigned long queue_stopped[BITS_TO_LONGS(IL_MAX_HW_QUEUES)];
+ /* for each AC */
+ atomic_t queue_stop_count[4];
+
+ /* Indication if ieee80211_ops->open has been called */
+ u8 is_open;
+
+ u8 mac80211_registered;
+
+ /* eeprom -- this is in the card's little endian byte order */
+ u8 *eeprom;
+ struct il_eeprom_calib_info *calib_info;
+
+ enum nl80211_iftype iw_mode;
+
+ /* Last Rx'd beacon timestamp */
+ u64 timestamp;
+
+ union {
+#if defined(CONFIG_IWL3945) || defined(CONFIG_IWL3945_MODULE)
+ struct {
+ void *shared_virt;
+ dma_addr_t shared_phys;
+
+ struct delayed_work thermal_periodic;
+ struct delayed_work rfkill_poll;
+
+ struct il3945_notif_stats stats;
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+ struct il3945_notif_stats accum_stats;
+ struct il3945_notif_stats delta_stats;
+ struct il3945_notif_stats max_delta;
+#endif
+
+ u32 sta_supp_rates;
+ int last_rx_rssi; /* From Rx packet stats */
+
+ /* Rx'd packet timing information */
+ u32 last_beacon_time;
+ u64 last_tsf;
+
+ /*
+ * each calibration channel group in the
+ * EEPROM has a derived clip setting for
+ * each rate.
+ */
+ const struct il3945_clip_group clip_groups[5];
+
+ } _3945;
+#endif
+#if defined(CONFIG_IWL4965) || defined(CONFIG_IWL4965_MODULE)
+ struct {
+ struct il_rx_phy_res last_phy_res;
+ bool last_phy_res_valid;
+
+ struct completion firmware_loading_complete;
+
+ /*
+ * chain noise reset and gain commands are the
+ * two extra calibration commands follows the standard
+ * phy calibration commands
+ */
+ u8 phy_calib_chain_noise_reset_cmd;
+ u8 phy_calib_chain_noise_gain_cmd;
+
+ struct il_notif_stats stats;
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+ struct il_notif_stats accum_stats;
+ struct il_notif_stats delta_stats;
+ struct il_notif_stats max_delta;
+#endif
+
+ } _4965;
+#endif
+ };
+
+ struct il_hw_params hw_params;
+
+ u32 inta_mask;
+
+ struct workqueue_struct *workqueue;
+
+ struct work_struct restart;
+ struct work_struct scan_completed;
+ struct work_struct rx_replenish;
+ struct work_struct abort_scan;
+
+ struct il_rxon_context *beacon_ctx;
+ struct sk_buff *beacon_skb;
+
+ struct work_struct tx_flush;
+
+ struct tasklet_struct irq_tasklet;
+
+ struct delayed_work init_alive_start;
+ struct delayed_work alive_start;
+ struct delayed_work scan_check;
+
+ /* TX Power */
+ s8 tx_power_user_lmt;
+ s8 tx_power_device_lmt;
+ s8 tx_power_next;
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+ /* debugging info */
+ u32 debug_level; /* per device debugging will override global
+ il_debug_level if set */
+#endif /* CONFIG_IWLEGACY_DEBUG */
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+ /* debugfs */
+ u16 tx_traffic_idx;
+ u16 rx_traffic_idx;
+ u8 *tx_traffic;
+ u8 *rx_traffic;
+ struct dentry *debugfs_dir;
+ u32 dbgfs_sram_offset, dbgfs_sram_len;
+ bool disable_ht40;
+#endif /* CONFIG_IWLEGACY_DEBUGFS */
+
+ struct work_struct txpower_work;
+ u32 disable_sens_cal;
+ u32 disable_chain_noise_cal;
+ u32 disable_tx_power_cal;
+ struct work_struct run_time_calib_work;
+ struct timer_list stats_periodic;
+ struct timer_list watchdog;
+ bool hw_ready;
+
+ struct led_classdev led;
+ unsigned long blink_on, blink_off;
+ bool led_registered;
+}; /*il_priv */
+
+static inline void
+il_txq_ctx_activate(struct il_priv *il, int txq_id)
+{
+ set_bit(txq_id, &il->txq_ctx_active_msk);
+}
+
+static inline void
+il_txq_ctx_deactivate(struct il_priv *il, int txq_id)
+{
+ clear_bit(txq_id, &il->txq_ctx_active_msk);
+}
+
+static inline struct ieee80211_hdr *
+il_tx_queue_get_hdr(struct il_priv *il, int txq_id, int idx)
+{
+ if (il->txq[txq_id].txb[idx].skb)
+ return (struct ieee80211_hdr *)il->txq[txq_id].txb[idx].skb->
+ data;
+ return NULL;
+}
+
+static inline struct il_rxon_context *
+il_rxon_ctx_from_vif(struct ieee80211_vif *vif)
+{
+ struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
+
+ return vif_priv->ctx;
+}
+
+#define for_each_context(il, _ctx) \
+ for (_ctx = &il->ctx; _ctx == &il->ctx; _ctx++)
+
+static inline int
+il_is_associated(struct il_priv *il)
+{
+ return (il->ctx.active.filter_flags & RXON_FILTER_ASSOC_MSK) ? 1 : 0;
+}
+
+static inline int
+il_is_any_associated(struct il_priv *il)
+{
+ return il_is_associated(il);
+}
+
+static inline int
+il_is_associated_ctx(struct il_rxon_context *ctx)
+{
+ return (ctx->active.filter_flags & RXON_FILTER_ASSOC_MSK) ? 1 : 0;
+}
+
+static inline int
+il_is_channel_valid(const struct il_channel_info *ch_info)
+{
+ if (ch_info == NULL)
+ return 0;
+ return (ch_info->flags & EEPROM_CHANNEL_VALID) ? 1 : 0;
+}
+
+static inline int
+il_is_channel_radar(const struct il_channel_info *ch_info)
+{
+ return (ch_info->flags & EEPROM_CHANNEL_RADAR) ? 1 : 0;
+}
+
+static inline u8
+il_is_channel_a_band(const struct il_channel_info *ch_info)
+{
+ return ch_info->band == IEEE80211_BAND_5GHZ;
+}
+
+static inline int
+il_is_channel_passive(const struct il_channel_info *ch)
+{
+ return (!(ch->flags & EEPROM_CHANNEL_ACTIVE)) ? 1 : 0;
+}
+
+static inline int
+il_is_channel_ibss(const struct il_channel_info *ch)
+{
+ return (ch->flags & EEPROM_CHANNEL_IBSS) ? 1 : 0;
+}
+
+static inline void
+__il_free_pages(struct il_priv *il, struct page *page)
+{
+ __free_pages(page, il->hw_params.rx_page_order);
+ il->alloc_rxb_page--;
+}
+
+static inline void
+il_free_pages(struct il_priv *il, unsigned long page)
+{
+ free_pages(page, il->hw_params.rx_page_order);
+ il->alloc_rxb_page--;
+}
+
+#define IWLWIFI_VERSION "in-tree:"
+#define DRV_COPYRIGHT "Copyright(c) 2003-2011 Intel Corporation"
+#define DRV_AUTHOR "<ilw@linux.intel.com>"
+
+#define IL_PCI_DEVICE(dev, subdev, cfg) \
+ .vendor = PCI_VENDOR_ID_INTEL, .device = (dev), \
+ .subvendor = PCI_ANY_ID, .subdevice = (subdev), \
+ .driver_data = (kernel_ulong_t)&(cfg)
+
+#define TIME_UNIT 1024
+
+#define IL_SKU_G 0x1
+#define IL_SKU_A 0x2
+#define IL_SKU_N 0x8
+
+#define IL_CMD(x) case x: return #x
+
+/* Size of one Rx buffer in host DRAM */
+#define IL_RX_BUF_SIZE_3K (3 * 1000) /* 3945 only */
+#define IL_RX_BUF_SIZE_4K (4 * 1024)
+#define IL_RX_BUF_SIZE_8K (8 * 1024)
+
+struct il_hcmd_ops {
+ int (*rxon_assoc) (struct il_priv *il, struct il_rxon_context *ctx);
+ int (*commit_rxon) (struct il_priv *il, struct il_rxon_context *ctx);
+ void (*set_rxon_chain) (struct il_priv *il,
+ struct il_rxon_context *ctx);
+};
+
+struct il_hcmd_utils_ops {
+ u16(*get_hcmd_size) (u8 cmd_id, u16 len);
+ u16(*build_addsta_hcmd) (const struct il_addsta_cmd *cmd, u8 *data);
+ int (*request_scan) (struct il_priv *il, struct ieee80211_vif *vif);
+ void (*post_scan) (struct il_priv *il);
+};
+
+struct il_apm_ops {
+ int (*init) (struct il_priv *il);
+ void (*config) (struct il_priv *il);
+};
+
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+struct il_debugfs_ops {
+ ssize_t(*rx_stats_read) (struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos);
+ ssize_t(*tx_stats_read) (struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos);
+ ssize_t(*general_stats_read) (struct file *file,
+ char __user *user_buf, size_t count,
+ loff_t *ppos);
+};
+#endif
+
+struct il_temp_ops {
+ void (*temperature) (struct il_priv *il);
+};
+
+struct il_lib_ops {
+ /* set hw dependent parameters */
+ int (*set_hw_params) (struct il_priv *il);
+ /* Handling TX */
+ void (*txq_update_byte_cnt_tbl) (struct il_priv *il,
+ struct il_tx_queue *txq,
+ u16 byte_cnt);
+ int (*txq_attach_buf_to_tfd) (struct il_priv *il,
+ struct il_tx_queue *txq, dma_addr_t addr,
+ u16 len, u8 reset, u8 pad);
+ void (*txq_free_tfd) (struct il_priv *il, struct il_tx_queue *txq);
+ int (*txq_init) (struct il_priv *il, struct il_tx_queue *txq);
+ /* setup Rx handler */
+ void (*handler_setup) (struct il_priv *il);
+ /* alive notification after init uCode load */
+ void (*init_alive_start) (struct il_priv *il);
+ /* check validity of rtc data address */
+ int (*is_valid_rtc_data_addr) (u32 addr);
+ /* 1st ucode load */
+ int (*load_ucode) (struct il_priv *il);
+
+ void (*dump_nic_error_log) (struct il_priv *il);
+ int (*dump_fh) (struct il_priv *il, char **buf, bool display);
+ int (*set_channel_switch) (struct il_priv *il,
+ struct ieee80211_channel_switch *ch_switch);
+ /* power management */
+ struct il_apm_ops apm_ops;
+
+ /* power */
+ int (*send_tx_power) (struct il_priv *il);
+ void (*update_chain_flags) (struct il_priv *il);
+
+ /* eeprom operations */
+ struct il_eeprom_ops eeprom_ops;
+
+ /* temperature */
+ struct il_temp_ops temp_ops;
+
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+ struct il_debugfs_ops debugfs_ops;
+#endif
+
+};
+
+struct il_led_ops {
+ int (*cmd) (struct il_priv *il, struct il_led_cmd *led_cmd);
+};
+
+struct il_legacy_ops {
+ void (*post_associate) (struct il_priv *il);
+ void (*config_ap) (struct il_priv *il);
+ /* station management */
+ int (*update_bcast_stations) (struct il_priv *il);
+ int (*manage_ibss_station) (struct il_priv *il,
+ struct ieee80211_vif *vif, bool add);
+};
+
+struct il_ops {
+ const struct il_lib_ops *lib;
+ const struct il_hcmd_ops *hcmd;
+ const struct il_hcmd_utils_ops *utils;
+ const struct il_led_ops *led;
+ const struct il_nic_ops *nic;
+ const struct il_legacy_ops *legacy;
+ const struct ieee80211_ops *ieee80211_ops;
+};
+
+struct il_mod_params {
+ int sw_crypto; /* def: 0 = using hardware encryption */
+ int disable_hw_scan; /* def: 0 = use h/w scan */
+ int num_of_queues; /* def: HW dependent */
+ int disable_11n; /* def: 0 = 11n capabilities enabled */
+ int amsdu_size_8K; /* def: 1 = enable 8K amsdu size */
+ int antenna; /* def: 0 = both antennas (use diversity) */
+ int restart_fw; /* def: 1 = restart firmware */
+};
+
+/*
+ * @led_compensation: compensate on the led on/off time per HW according
+ * to the deviation to achieve the desired led frequency.
+ * The detail algorithm is described in common.c
+ * @chain_noise_num_beacons: number of beacons used to compute chain noise
+ * @wd_timeout: TX queues watchdog timeout
+ * @temperature_kelvin: temperature report by uCode in kelvin
+ * @ucode_tracing: support ucode continuous tracing
+ * @sensitivity_calib_by_driver: driver has the capability to perform
+ * sensitivity calibration operation
+ * @chain_noise_calib_by_driver: driver has the capability to perform
+ * chain noise calibration operation
+ */
+struct il_base_params {
+ int eeprom_size;
+ int num_of_queues; /* def: HW dependent */
+ int num_of_ampdu_queues; /* def: HW dependent */
+ /* for il_apm_init() */
+ u32 pll_cfg_val;
+ bool set_l0s;
+ bool use_bsm;
+
+ u16 led_compensation;
+ int chain_noise_num_beacons;
+ unsigned int wd_timeout;
+ bool temperature_kelvin;
+ const bool ucode_tracing;
+ const bool sensitivity_calib_by_driver;
+ const bool chain_noise_calib_by_driver;
+};
+
+#define IL_LED_SOLID 11
+#define IL_DEF_LED_INTRVL cpu_to_le32(1000)
+
+#define IL_LED_ACTIVITY (0<<1)
+#define IL_LED_LINK (1<<1)
+
+/*
+ * LED mode
+ * IL_LED_DEFAULT: use device default
+ * IL_LED_RF_STATE: turn LED on/off based on RF state
+ * LED ON = RF ON
+ * LED OFF = RF OFF
+ * IL_LED_BLINK: adjust led blink rate based on blink table
+ */
+enum il_led_mode {
+ IL_LED_DEFAULT,
+ IL_LED_RF_STATE,
+ IL_LED_BLINK,
+};
+
+void il_leds_init(struct il_priv *il);
+void il_leds_exit(struct il_priv *il);
+
+/**
+ * struct il_cfg
+ * @fw_name_pre: Firmware filename prefix. The api version and extension
+ * (.ucode) will be added to filename before loading from disk. The
+ * filename is constructed as fw_name_pre<api>.ucode.
+ * @ucode_api_max: Highest version of uCode API supported by driver.
+ * @ucode_api_min: Lowest version of uCode API supported by driver.
+ * @scan_antennas: available antenna for scan operation
+ * @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off)
+ *
+ * We enable the driver to be backward compatible wrt API version. The
+ * driver specifies which APIs it supports (with @ucode_api_max being the
+ * highest and @ucode_api_min the lowest). Firmware will only be loaded if
+ * it has a supported API version. The firmware's API version will be
+ * stored in @il_priv, enabling the driver to make runtime changes based
+ * on firmware version used.
+ *
+ * For example,
+ * if (IL_UCODE_API(il->ucode_ver) >= 2) {
+ * Driver interacts with Firmware API version >= 2.
+ * } else {
+ * Driver interacts with Firmware API version 1.
+ * }
+ *
+ * The ideal usage of this infrastructure is to treat a new ucode API
+ * release as a new hardware revision. That is, through utilizing the
+ * il_hcmd_utils_ops etc. we accommodate different command structures
+ * and flows between hardware versions as well as their API
+ * versions.
+ *
+ */
+struct il_cfg {
+ /* params specific to an individual device within a device family */
+ const char *name;
+ const char *fw_name_pre;
+ const unsigned int ucode_api_max;
+ const unsigned int ucode_api_min;
+ u8 valid_tx_ant;
+ u8 valid_rx_ant;
+ unsigned int sku;
+ u16 eeprom_ver;
+ u16 eeprom_calib_ver;
+ const struct il_ops *ops;
+ /* module based parameters which can be set from modprobe cmd */
+ const struct il_mod_params *mod_params;
+ /* params not likely to change within a device family */
+ struct il_base_params *base_params;
+ /* params likely to change within a device family */
+ u8 scan_rx_antennas[IEEE80211_NUM_BANDS];
+ enum il_led_mode led_mode;
+};
+
+/***************************
+ * L i b *
+ ***************************/
+
+struct ieee80211_hw *il_alloc_all(struct il_cfg *cfg);
+int il_mac_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u16 queue, const struct ieee80211_tx_queue_params *params);
+int il_mac_tx_last_beacon(struct ieee80211_hw *hw);
+
+void il_set_rxon_hwcrypto(struct il_priv *il, struct il_rxon_context *ctx,
+ int hw_decrypt);
+int il_check_rxon_cmd(struct il_priv *il, struct il_rxon_context *ctx);
+int il_full_rxon_required(struct il_priv *il, struct il_rxon_context *ctx);
+int il_set_rxon_channel(struct il_priv *il, struct ieee80211_channel *ch,
+ struct il_rxon_context *ctx);
+void il_set_flags_for_band(struct il_priv *il, struct il_rxon_context *ctx,
+ enum ieee80211_band band, struct ieee80211_vif *vif);
+u8 il_get_single_channel_number(struct il_priv *il, enum ieee80211_band band);
+void il_set_rxon_ht(struct il_priv *il, struct il_ht_config *ht_conf);
+bool il_is_ht40_tx_allowed(struct il_priv *il, struct il_rxon_context *ctx,
+ struct ieee80211_sta_ht_cap *ht_cap);
+void il_connection_init_rx_config(struct il_priv *il,
+ struct il_rxon_context *ctx);
+void il_set_rate(struct il_priv *il);
+int il_set_decrypted_flag(struct il_priv *il, struct ieee80211_hdr *hdr,
+ u32 decrypt_res, struct ieee80211_rx_status *stats);
+void il_irq_handle_error(struct il_priv *il);
+int il_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
+void il_mac_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
+int il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ enum nl80211_iftype newtype, bool newp2p);
+int il_alloc_txq_mem(struct il_priv *il);
+void il_txq_mem(struct il_priv *il);
+
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+int il_alloc_traffic_mem(struct il_priv *il);
+void il_free_traffic_mem(struct il_priv *il);
+void il_reset_traffic_log(struct il_priv *il);
+void il_dbg_log_tx_data_frame(struct il_priv *il, u16 length,
+ struct ieee80211_hdr *header);
+void il_dbg_log_rx_data_frame(struct il_priv *il, u16 length,
+ struct ieee80211_hdr *header);
+const char *il_get_mgmt_string(int cmd);
+const char *il_get_ctrl_string(int cmd);
+void il_clear_traffic_stats(struct il_priv *il);
+void il_update_stats(struct il_priv *il, bool is_tx, __le16 fc, u16 len);
+#else
+static inline int
+il_alloc_traffic_mem(struct il_priv *il)
+{
+ return 0;
+}
+
+static inline void
+il_free_traffic_mem(struct il_priv *il)
+{
+}
+
+static inline void
+il_reset_traffic_log(struct il_priv *il)
+{
+}
+
+static inline void
+il_dbg_log_tx_data_frame(struct il_priv *il, u16 length,
+ struct ieee80211_hdr *header)
+{
+}
+
+static inline void
+il_dbg_log_rx_data_frame(struct il_priv *il, u16 length,
+ struct ieee80211_hdr *header)
+{
+}
+
+static inline void
+il_update_stats(struct il_priv *il, bool is_tx, __le16 fc, u16 len)
+{
+}
+#endif
+/*****************************************************
+ * RX handlers.
+ * **************************************************/
+void il_hdl_pm_sleep(struct il_priv *il, struct il_rx_buf *rxb);
+void il_hdl_pm_debug_stats(struct il_priv *il, struct il_rx_buf *rxb);
+void il_hdl_error(struct il_priv *il, struct il_rx_buf *rxb);
+
+/*****************************************************
+* RX
+******************************************************/
+void il_cmd_queue_unmap(struct il_priv *il);
+void il_cmd_queue_free(struct il_priv *il);
+int il_rx_queue_alloc(struct il_priv *il);
+void il_rx_queue_update_write_ptr(struct il_priv *il, struct il_rx_queue *q);
+int il_rx_queue_space(const struct il_rx_queue *q);
+void il_tx_cmd_complete(struct il_priv *il, struct il_rx_buf *rxb);
+/* Handlers */
+void il_hdl_spectrum_measurement(struct il_priv *il, struct il_rx_buf *rxb);
+void il_recover_from_stats(struct il_priv *il, struct il_rx_pkt *pkt);
+void il_chswitch_done(struct il_priv *il, bool is_success);
+void il_hdl_csa(struct il_priv *il, struct il_rx_buf *rxb);
+
+/* TX helpers */
+
+/*****************************************************
+* TX
+******************************************************/
+void il_txq_update_write_ptr(struct il_priv *il, struct il_tx_queue *txq);
+int il_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq, int slots_num,
+ u32 txq_id);
+void il_tx_queue_reset(struct il_priv *il, struct il_tx_queue *txq,
+ int slots_num, u32 txq_id);
+void il_tx_queue_unmap(struct il_priv *il, int txq_id);
+void il_tx_queue_free(struct il_priv *il, int txq_id);
+void il_setup_watchdog(struct il_priv *il);
+/*****************************************************
+ * TX power
+ ****************************************************/
+int il_set_tx_power(struct il_priv *il, s8 tx_power, bool force);
+
+/*******************************************************************************
+ * Rate
+ ******************************************************************************/
+
+u8 il_get_lowest_plcp(struct il_priv *il, struct il_rxon_context *ctx);
+
+/*******************************************************************************
+ * Scanning
+ ******************************************************************************/
+void il_init_scan_params(struct il_priv *il);
+int il_scan_cancel(struct il_priv *il);
+int il_scan_cancel_timeout(struct il_priv *il, unsigned long ms);
+void il_force_scan_end(struct il_priv *il);
+int il_mac_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct cfg80211_scan_request *req);
+void il_internal_short_hw_scan(struct il_priv *il);
+int il_force_reset(struct il_priv *il, bool external);
+u16 il_fill_probe_req(struct il_priv *il, struct ieee80211_mgmt *frame,
+ const u8 *ta, const u8 *ie, int ie_len, int left);
+void il_setup_rx_scan_handlers(struct il_priv *il);
+u16 il_get_active_dwell_time(struct il_priv *il, enum ieee80211_band band,
+ u8 n_probes);
+u16 il_get_passive_dwell_time(struct il_priv *il, enum ieee80211_band band,
+ struct ieee80211_vif *vif);
+void il_setup_scan_deferred_work(struct il_priv *il);
+void il_cancel_scan_deferred_work(struct il_priv *il);
+
+/* For faster active scanning, scan will move to the next channel if fewer than
+ * PLCP_QUIET_THRESH packets are heard on this channel within
+ * ACTIVE_QUIET_TIME after sending probe request. This shortens the dwell
+ * time if it's a quiet channel (nothing responded to our probe, and there's
+ * no other traffic).
+ * Disable "quiet" feature by setting PLCP_QUIET_THRESH to 0. */
+#define IL_ACTIVE_QUIET_TIME cpu_to_le16(10) /* msec */
+#define IL_PLCP_QUIET_THRESH cpu_to_le16(1) /* packets */
+
+#define IL_SCAN_CHECK_WATCHDOG (HZ * 7)
+
+/*****************************************************
+ * S e n d i n g H o s t C o m m a n d s *
+ *****************************************************/
+
+const char *il_get_cmd_string(u8 cmd);
+int __must_check il_send_cmd_sync(struct il_priv *il, struct il_host_cmd *cmd);
+int il_send_cmd(struct il_priv *il, struct il_host_cmd *cmd);
+int __must_check il_send_cmd_pdu(struct il_priv *il, u8 id, u16 len,
+ const void *data);
+int il_send_cmd_pdu_async(struct il_priv *il, u8 id, u16 len, const void *data,
+ void (*callback) (struct il_priv *il,
+ struct il_device_cmd *cmd,
+ struct il_rx_pkt *pkt));
+
+int il_enqueue_hcmd(struct il_priv *il, struct il_host_cmd *cmd);
+
+/*****************************************************
+ * PCI *
+ *****************************************************/
+
+static inline u16
+il_pcie_link_ctl(struct il_priv *il)
+{
+ int pos;
+ u16 pci_lnk_ctl;
+ pos = pci_pcie_cap(il->pci_dev);
+ pci_read_config_word(il->pci_dev, pos + PCI_EXP_LNKCTL, &pci_lnk_ctl);
+ return pci_lnk_ctl;
+}
+
+void il_bg_watchdog(unsigned long data);
+u32 il_usecs_to_beacons(struct il_priv *il, u32 usec, u32 beacon_interval);
+__le32 il_add_beacon_time(struct il_priv *il, u32 base, u32 addon,
+ u32 beacon_interval);
+
+#ifdef CONFIG_PM
+int il_pci_suspend(struct device *device);
+int il_pci_resume(struct device *device);
+extern const struct dev_pm_ops il_pm_ops;
+
+#define IL_LEGACY_PM_OPS (&il_pm_ops)
+
+#else /* !CONFIG_PM */
+
+#define IL_LEGACY_PM_OPS NULL
+
+#endif /* !CONFIG_PM */
+
+/*****************************************************
+* Error Handling Debugging
+******************************************************/
+void il4965_dump_nic_error_log(struct il_priv *il);
+#ifdef CONFIG_IWLEGACY_DEBUG
+void il_print_rx_config_cmd(struct il_priv *il, struct il_rxon_context *ctx);
+#else
+static inline void
+il_print_rx_config_cmd(struct il_priv *il, struct il_rxon_context *ctx)
+{
+}
+#endif
+
+void il_clear_isr_stats(struct il_priv *il);
+
+/*****************************************************
+* GEOS
+******************************************************/
+int il_init_geos(struct il_priv *il);
+void il_free_geos(struct il_priv *il);
+
+/*************** DRIVER STATUS FUNCTIONS *****/
+
+#define S_HCMD_ACTIVE 0 /* host command in progress */
+/* 1 is unused (used to be S_HCMD_SYNC_ACTIVE) */
+#define S_INT_ENABLED 2
+#define S_RF_KILL_HW 3
+#define S_CT_KILL 4
+#define S_INIT 5
+#define S_ALIVE 6
+#define S_READY 7
+#define S_TEMPERATURE 8
+#define S_GEO_CONFIGURED 9
+#define S_EXIT_PENDING 10
+#define S_STATS 12
+#define S_SCANNING 13
+#define S_SCAN_ABORTING 14
+#define S_SCAN_HW 15
+#define S_POWER_PMI 16
+#define S_FW_ERROR 17
+#define S_CHANNEL_SWITCH_PENDING 18
+
+static inline int
+il_is_ready(struct il_priv *il)
+{
+ /* The adapter is 'ready' if READY and GEO_CONFIGURED bits are
+ * set but EXIT_PENDING is not */
+ return test_bit(S_READY, &il->status) &&
+ test_bit(S_GEO_CONFIGURED, &il->status) &&
+ !test_bit(S_EXIT_PENDING, &il->status);
+}
+
+static inline int
+il_is_alive(struct il_priv *il)
+{
+ return test_bit(S_ALIVE, &il->status);
+}
+
+static inline int
+il_is_init(struct il_priv *il)
+{
+ return test_bit(S_INIT, &il->status);
+}
+
+static inline int
+il_is_rfkill_hw(struct il_priv *il)
+{
+ return test_bit(S_RF_KILL_HW, &il->status);
+}
+
+static inline int
+il_is_rfkill(struct il_priv *il)
+{
+ return il_is_rfkill_hw(il);
+}
+
+static inline int
+il_is_ctkill(struct il_priv *il)
+{
+ return test_bit(S_CT_KILL, &il->status);
+}
+
+static inline int
+il_is_ready_rf(struct il_priv *il)
+{
+
+ if (il_is_rfkill(il))
+ return 0;
+
+ return il_is_ready(il);
+}
+
+extern void il_send_bt_config(struct il_priv *il);
+extern int il_send_stats_request(struct il_priv *il, u8 flags, bool clear);
+void il_apm_stop(struct il_priv *il);
+int il_apm_init(struct il_priv *il);
+
+int il_send_rxon_timing(struct il_priv *il, struct il_rxon_context *ctx);
+static inline int
+il_send_rxon_assoc(struct il_priv *il, struct il_rxon_context *ctx)
+{
+ return il->cfg->ops->hcmd->rxon_assoc(il, ctx);
+}
+
+static inline int
+il_commit_rxon(struct il_priv *il, struct il_rxon_context *ctx)
+{
+ return il->cfg->ops->hcmd->commit_rxon(il, ctx);
+}
+
+static inline const struct ieee80211_supported_band *
+il_get_hw_mode(struct il_priv *il, enum ieee80211_band band)
+{
+ return il->hw->wiphy->bands[band];
+}
+
+/* mac80211 handlers */
+int il_mac_config(struct ieee80211_hw *hw, u32 changed);
+void il_mac_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
+void il_mac_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf, u32 changes);
+void il_tx_cmd_protection(struct il_priv *il, struct ieee80211_tx_info *info,
+ __le16 fc, __le32 *tx_flags);
+
+irqreturn_t il_isr(int irq, void *data);
+
+#include <linux/io.h>
+
+static inline void
+_il_write8(struct il_priv *il, u32 ofs, u8 val)
+{
+ iowrite8(val, il->hw_base + ofs);
+}
+#define il_write8(il, ofs, val) _il_write8(il, ofs, val)
+
+static inline void
+_il_wr(struct il_priv *il, u32 ofs, u32 val)
+{
+ iowrite32(val, il->hw_base + ofs);
+}
+
+static inline u32
+_il_rd(struct il_priv *il, u32 ofs)
+{
+ return ioread32(il->hw_base + ofs);
+}
+
+#define IL_POLL_INTERVAL 10 /* microseconds */
+static inline int
+_il_poll_bit(struct il_priv *il, u32 addr, u32 bits, u32 mask, int timeout)
+{
+ int t = 0;
+
+ do {
+ if ((_il_rd(il, addr) & mask) == (bits & mask))
+ return t;
+ udelay(IL_POLL_INTERVAL);
+ t += IL_POLL_INTERVAL;
+ } while (t < timeout);
+
+ return -ETIMEDOUT;
+}
+
+static inline void
+_il_set_bit(struct il_priv *il, u32 reg, u32 mask)
+{
+ _il_wr(il, reg, _il_rd(il, reg) | mask);
+}
+
+static inline void
+il_set_bit(struct il_priv *p, u32 r, u32 m)
+{
+ unsigned long reg_flags;
+
+ spin_lock_irqsave(&p->reg_lock, reg_flags);
+ _il_set_bit(p, r, m);
+ spin_unlock_irqrestore(&p->reg_lock, reg_flags);
+}
+
+static inline void
+_il_clear_bit(struct il_priv *il, u32 reg, u32 mask)
+{
+ _il_wr(il, reg, _il_rd(il, reg) & ~mask);
+}
+
+static inline void
+il_clear_bit(struct il_priv *p, u32 r, u32 m)
+{
+ unsigned long reg_flags;
+
+ spin_lock_irqsave(&p->reg_lock, reg_flags);
+ _il_clear_bit(p, r, m);
+ spin_unlock_irqrestore(&p->reg_lock, reg_flags);
+}
+
+static inline int
+_il_grab_nic_access(struct il_priv *il)
+{
+ int ret;
+ u32 val;
+
+ /* this bit wakes up the NIC */
+ _il_set_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+
+ /*
+ * These bits say the device is running, and should keep running for
+ * at least a short while (at least as long as MAC_ACCESS_REQ stays 1),
+ * but they do not indicate that embedded SRAM is restored yet;
+ * 3945 and 4965 have volatile SRAM, and must save/restore contents
+ * to/from host DRAM when sleeping/waking for power-saving.
+ * Each direction takes approximately 1/4 millisecond; with this
+ * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a
+ * series of register accesses are expected (e.g. reading Event Log),
+ * to keep device from sleeping.
+ *
+ * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that
+ * SRAM is okay/restored. We don't check that here because this call
+ * is just for hardware register access; but GP1 MAC_SLEEP check is a
+ * good idea before accessing 3945/4965 SRAM (e.g. reading Event Log).
+ *
+ */
+ ret =
+ _il_poll_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
+ (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
+ CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
+ if (ret < 0) {
+ val = _il_rd(il, CSR_GP_CNTRL);
+ IL_ERR("MAC is in deep sleep!. CSR_GP_CNTRL = 0x%08X\n", val);
+ _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static inline void
+_il_release_nic_access(struct il_priv *il)
+{
+ _il_clear_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+}
+
+static inline u32
+il_rd(struct il_priv *il, u32 reg)
+{
+ u32 value;
+ unsigned long reg_flags;
+
+ spin_lock_irqsave(&il->reg_lock, reg_flags);
+ _il_grab_nic_access(il);
+ value = _il_rd(il, reg);
+ _il_release_nic_access(il);
+ spin_unlock_irqrestore(&il->reg_lock, reg_flags);
+ return value;
+
+}
+
+static inline void
+il_wr(struct il_priv *il, u32 reg, u32 value)
+{
+ unsigned long reg_flags;
+
+ spin_lock_irqsave(&il->reg_lock, reg_flags);
+ if (!_il_grab_nic_access(il)) {
+ _il_wr(il, reg, value);
+ _il_release_nic_access(il);
+ }
+ spin_unlock_irqrestore(&il->reg_lock, reg_flags);
+}
+
+static inline void
+il_write_reg_buf(struct il_priv *il, u32 reg, u32 len, u32 * values)
+{
+ u32 count = sizeof(u32);
+
+ if (il != NULL && values != NULL) {
+ for (; 0 < len; len -= count, reg += count, values++)
+ il_wr(il, reg, *values);
+ }
+}
+
+static inline int
+il_poll_bit(struct il_priv *il, u32 addr, u32 mask, int timeout)
+{
+ int t = 0;
+
+ do {
+ if ((il_rd(il, addr) & mask) == mask)
+ return t;
+ udelay(IL_POLL_INTERVAL);
+ t += IL_POLL_INTERVAL;
+ } while (t < timeout);
+
+ return -ETIMEDOUT;
+}
+
+static inline u32
+_il_rd_prph(struct il_priv *il, u32 reg)
+{
+ _il_wr(il, HBUS_TARG_PRPH_RADDR, reg | (3 << 24));
+ rmb();
+ return _il_rd(il, HBUS_TARG_PRPH_RDAT);
+}
+
+static inline u32
+il_rd_prph(struct il_priv *il, u32 reg)
+{
+ unsigned long reg_flags;
+ u32 val;
+
+ spin_lock_irqsave(&il->reg_lock, reg_flags);
+ _il_grab_nic_access(il);
+ val = _il_rd_prph(il, reg);
+ _il_release_nic_access(il);
+ spin_unlock_irqrestore(&il->reg_lock, reg_flags);
+ return val;
+}
+
+static inline void
+_il_wr_prph(struct il_priv *il, u32 addr, u32 val)
+{
+ _il_wr(il, HBUS_TARG_PRPH_WADDR, ((addr & 0x0000FFFF) | (3 << 24)));
+ wmb();
+ _il_wr(il, HBUS_TARG_PRPH_WDAT, val);
+}
+
+static inline void
+il_wr_prph(struct il_priv *il, u32 addr, u32 val)
+{
+ unsigned long reg_flags;
+
+ spin_lock_irqsave(&il->reg_lock, reg_flags);
+ if (!_il_grab_nic_access(il)) {
+ _il_wr_prph(il, addr, val);
+ _il_release_nic_access(il);
+ }
+ spin_unlock_irqrestore(&il->reg_lock, reg_flags);
+}
+
+#define _il_set_bits_prph(il, reg, mask) \
+_il_wr_prph(il, reg, (_il_rd_prph(il, reg) | mask))
+
+static inline void
+il_set_bits_prph(struct il_priv *il, u32 reg, u32 mask)
+{
+ unsigned long reg_flags;
+
+ spin_lock_irqsave(&il->reg_lock, reg_flags);
+ _il_grab_nic_access(il);
+ _il_set_bits_prph(il, reg, mask);
+ _il_release_nic_access(il);
+ spin_unlock_irqrestore(&il->reg_lock, reg_flags);
+}
+
+#define _il_set_bits_mask_prph(il, reg, bits, mask) \
+_il_wr_prph(il, reg, \
+ ((_il_rd_prph(il, reg) & mask) | bits))
+
+static inline void
+il_set_bits_mask_prph(struct il_priv *il, u32 reg, u32 bits, u32 mask)
+{
+ unsigned long reg_flags;
+
+ spin_lock_irqsave(&il->reg_lock, reg_flags);
+ _il_grab_nic_access(il);
+ _il_set_bits_mask_prph(il, reg, bits, mask);
+ _il_release_nic_access(il);
+ spin_unlock_irqrestore(&il->reg_lock, reg_flags);
+}
+
+static inline void
+il_clear_bits_prph(struct il_priv *il, u32 reg, u32 mask)
+{
+ unsigned long reg_flags;
+ u32 val;
+
+ spin_lock_irqsave(&il->reg_lock, reg_flags);
+ _il_grab_nic_access(il);
+ val = _il_rd_prph(il, reg);
+ _il_wr_prph(il, reg, (val & ~mask));
+ _il_release_nic_access(il);
+ spin_unlock_irqrestore(&il->reg_lock, reg_flags);
+}
+
+static inline u32
+il_read_targ_mem(struct il_priv *il, u32 addr)
+{
+ unsigned long reg_flags;
+ u32 value;
+
+ spin_lock_irqsave(&il->reg_lock, reg_flags);
+ _il_grab_nic_access(il);
+
+ _il_wr(il, HBUS_TARG_MEM_RADDR, addr);
+ rmb();
+ value = _il_rd(il, HBUS_TARG_MEM_RDAT);
+
+ _il_release_nic_access(il);
+ spin_unlock_irqrestore(&il->reg_lock, reg_flags);
+ return value;
+}
+
+static inline void
+il_write_targ_mem(struct il_priv *il, u32 addr, u32 val)
+{
+ unsigned long reg_flags;
+
+ spin_lock_irqsave(&il->reg_lock, reg_flags);
+ if (!_il_grab_nic_access(il)) {
+ _il_wr(il, HBUS_TARG_MEM_WADDR, addr);
+ wmb();
+ _il_wr(il, HBUS_TARG_MEM_WDAT, val);
+ _il_release_nic_access(il);
+ }
+ spin_unlock_irqrestore(&il->reg_lock, reg_flags);
+}
+
+static inline void
+il_write_targ_mem_buf(struct il_priv *il, u32 addr, u32 len, u32 * values)
+{
+ unsigned long reg_flags;
+
+ spin_lock_irqsave(&il->reg_lock, reg_flags);
+ if (!_il_grab_nic_access(il)) {
+ _il_wr(il, HBUS_TARG_MEM_WADDR, addr);
+ wmb();
+ for (; 0 < len; len -= sizeof(u32), values++)
+ _il_wr(il, HBUS_TARG_MEM_WDAT, *values);
+
+ _il_release_nic_access(il);
+ }
+ spin_unlock_irqrestore(&il->reg_lock, reg_flags);
+}
+
+#define HW_KEY_DYNAMIC 0
+#define HW_KEY_DEFAULT 1
+
+#define IL_STA_DRIVER_ACTIVE BIT(0) /* driver entry is active */
+#define IL_STA_UCODE_ACTIVE BIT(1) /* ucode entry is active */
+#define IL_STA_UCODE_INPROGRESS BIT(2) /* ucode entry is in process of
+ being activated */
+#define IL_STA_LOCAL BIT(3) /* station state not directed by mac80211;
+ (this is for the IBSS BSSID stations) */
+#define IL_STA_BCAST BIT(4) /* this station is the special bcast station */
+
+void il_restore_stations(struct il_priv *il, struct il_rxon_context *ctx);
+void il_clear_ucode_stations(struct il_priv *il, struct il_rxon_context *ctx);
+void il_dealloc_bcast_stations(struct il_priv *il);
+int il_get_free_ucode_key_idx(struct il_priv *il);
+int il_send_add_sta(struct il_priv *il, struct il_addsta_cmd *sta, u8 flags);
+int il_add_station_common(struct il_priv *il, struct il_rxon_context *ctx,
+ const u8 *addr, bool is_ap,
+ struct ieee80211_sta *sta, u8 *sta_id_r);
+int il_remove_station(struct il_priv *il, const u8 sta_id, const u8 * addr);
+int il_mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+
+u8 il_prep_station(struct il_priv *il, struct il_rxon_context *ctx,
+ const u8 *addr, bool is_ap, struct ieee80211_sta *sta);
+
+int il_send_lq_cmd(struct il_priv *il, struct il_rxon_context *ctx,
+ struct il_link_quality_cmd *lq, u8 flags, bool init);
+
+/**
+ * il_clear_driver_stations - clear knowledge of all stations from driver
+ * @il: iwl il struct
+ *
+ * This is called during il_down() to make sure that in the case
+ * we're coming there from a hardware restart mac80211 will be
+ * able to reconfigure stations -- if we're getting there in the
+ * normal down flow then the stations will already be cleared.
+ */
+static inline void
+il_clear_driver_stations(struct il_priv *il)
+{
+ unsigned long flags;
+ struct il_rxon_context *ctx = &il->ctx;
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+ memset(il->stations, 0, sizeof(il->stations));
+ il->num_stations = 0;
+
+ il->ucode_key_table = 0;
+
+ /*
+ * Remove all key information that is not stored as part
+ * of station information since mac80211 may not have had
+ * a chance to remove all the keys. When device is
+ * reconfigured by mac80211 after an error all keys will
+ * be reconfigured.
+ */
+ memset(ctx->wep_keys, 0, sizeof(ctx->wep_keys));
+ ctx->key_mapping_keys = 0;
+
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+}
+
+static inline int
+il_sta_id(struct ieee80211_sta *sta)
+{
+ if (WARN_ON(!sta))
+ return IL_INVALID_STATION;
+
+ return ((struct il_station_priv_common *)sta->drv_priv)->sta_id;
+}
+
+/**
+ * il_sta_id_or_broadcast - return sta_id or broadcast sta
+ * @il: iwl il
+ * @context: the current context
+ * @sta: mac80211 station
+ *
+ * In certain circumstances mac80211 passes a station pointer
+ * that may be %NULL, for example during TX or key setup. In
+ * that case, we need to use the broadcast station, so this
+ * inline wraps that pattern.
+ */
+static inline int
+il_sta_id_or_broadcast(struct il_priv *il, struct il_rxon_context *context,
+ struct ieee80211_sta *sta)
+{
+ int sta_id;
+
+ if (!sta)
+ return context->bcast_sta_id;
+
+ sta_id = il_sta_id(sta);
+
+ /*
+ * mac80211 should not be passing a partially
+ * initialised station!
+ */
+ WARN_ON(sta_id == IL_INVALID_STATION);
+
+ return sta_id;
+}
+
+/**
+ * il_queue_inc_wrap - increment queue idx, wrap back to beginning
+ * @idx -- current idx
+ * @n_bd -- total number of entries in queue (must be power of 2)
+ */
+static inline int
+il_queue_inc_wrap(int idx, int n_bd)
+{
+ return ++idx & (n_bd - 1);
+}
+
+/**
+ * il_queue_dec_wrap - decrement queue idx, wrap back to end
+ * @idx -- current idx
+ * @n_bd -- total number of entries in queue (must be power of 2)
+ */
+static inline int
+il_queue_dec_wrap(int idx, int n_bd)
+{
+ return --idx & (n_bd - 1);
+}
+
+/* TODO: Move fw_desc functions to iwl-pci.ko */
+static inline void
+il_free_fw_desc(struct pci_dev *pci_dev, struct fw_desc *desc)
+{
+ if (desc->v_addr)
+ dma_free_coherent(&pci_dev->dev, desc->len, desc->v_addr,
+ desc->p_addr);
+ desc->v_addr = NULL;
+ desc->len = 0;
+}
+
+static inline int
+il_alloc_fw_desc(struct pci_dev *pci_dev, struct fw_desc *desc)
+{
+ if (!desc->len) {
+ desc->v_addr = NULL;
+ return -EINVAL;
+ }
+
+ desc->v_addr =
+ dma_alloc_coherent(&pci_dev->dev, desc->len, &desc->p_addr,
+ GFP_KERNEL);
+ return (desc->v_addr != NULL) ? 0 : -ENOMEM;
+}
+
+/*
+ * we have 8 bits used like this:
+ *
+ * 7 6 5 4 3 2 1 0
+ * | | | | | | | |
+ * | | | | | | +-+-------- AC queue (0-3)
+ * | | | | | |
+ * | +-+-+-+-+------------ HW queue ID
+ * |
+ * +---------------------- unused
+ */
+static inline void
+il_set_swq_id(struct il_tx_queue *txq, u8 ac, u8 hwq)
+{
+ BUG_ON(ac > 3); /* only have 2 bits */
+ BUG_ON(hwq > 31); /* only use 5 bits */
+
+ txq->swq_id = (hwq << 2) | ac;
+}
+
+static inline void
+il_wake_queue(struct il_priv *il, struct il_tx_queue *txq)
+{
+ u8 queue = txq->swq_id;
+ u8 ac = queue & 3;
+ u8 hwq = (queue >> 2) & 0x1f;
+
+ if (test_and_clear_bit(hwq, il->queue_stopped))
+ if (atomic_dec_return(&il->queue_stop_count[ac]) <= 0)
+ ieee80211_wake_queue(il->hw, ac);
+}
+
+static inline void
+il_stop_queue(struct il_priv *il, struct il_tx_queue *txq)
+{
+ u8 queue = txq->swq_id;
+ u8 ac = queue & 3;
+ u8 hwq = (queue >> 2) & 0x1f;
+
+ if (!test_and_set_bit(hwq, il->queue_stopped))
+ if (atomic_inc_return(&il->queue_stop_count[ac]) > 0)
+ ieee80211_stop_queue(il->hw, ac);
+}
+
+#ifdef ieee80211_stop_queue
+#undef ieee80211_stop_queue
+#endif
+
+#define ieee80211_stop_queue DO_NOT_USE_ieee80211_stop_queue
+
+#ifdef ieee80211_wake_queue
+#undef ieee80211_wake_queue
+#endif
+
+#define ieee80211_wake_queue DO_NOT_USE_ieee80211_wake_queue
+
+static inline void
+il_disable_interrupts(struct il_priv *il)
+{
+ clear_bit(S_INT_ENABLED, &il->status);
+
+ /* disable interrupts from uCode/NIC to host */
+ _il_wr(il, CSR_INT_MASK, 0x00000000);
+
+ /* acknowledge/clear/reset any interrupts still pending
+ * from uCode or flow handler (Rx/Tx DMA) */
+ _il_wr(il, CSR_INT, 0xffffffff);
+ _il_wr(il, CSR_FH_INT_STATUS, 0xffffffff);
+}
+
+static inline void
+il_enable_rfkill_int(struct il_priv *il)
+{
+ _il_wr(il, CSR_INT_MASK, CSR_INT_BIT_RF_KILL);
+}
+
+static inline void
+il_enable_interrupts(struct il_priv *il)
+{
+ set_bit(S_INT_ENABLED, &il->status);
+ _il_wr(il, CSR_INT_MASK, il->inta_mask);
+}
+
+/**
+ * il_beacon_time_mask_low - mask of lower 32 bit of beacon time
+ * @il -- pointer to il_priv data structure
+ * @tsf_bits -- number of bits need to shift for masking)
+ */
+static inline u32
+il_beacon_time_mask_low(struct il_priv *il, u16 tsf_bits)
+{
+ return (1 << tsf_bits) - 1;
+}
+
+/**
+ * il_beacon_time_mask_high - mask of higher 32 bit of beacon time
+ * @il -- pointer to il_priv data structure
+ * @tsf_bits -- number of bits need to shift for masking)
+ */
+static inline u32
+il_beacon_time_mask_high(struct il_priv *il, u16 tsf_bits)
+{
+ return ((1 << (32 - tsf_bits)) - 1) << tsf_bits;
+}
+
+/**
+ * struct il_rb_status - reseve buffer status host memory mapped FH registers
+ *
+ * @closed_rb_num [0:11] - Indicates the idx of the RB which was closed
+ * @closed_fr_num [0:11] - Indicates the idx of the RX Frame which was closed
+ * @finished_rb_num [0:11] - Indicates the idx of the current RB
+ * in which the last frame was written to
+ * @finished_fr_num [0:11] - Indicates the idx of the RX Frame
+ * which was transferred
+ */
+struct il_rb_status {
+ __le16 closed_rb_num;
+ __le16 closed_fr_num;
+ __le16 finished_rb_num;
+ __le16 finished_fr_nam;
+ __le32 __unused; /* 3945 only */
+} __packed;
+
+#define TFD_QUEUE_SIZE_MAX (256)
+#define TFD_QUEUE_SIZE_BC_DUP (64)
+#define TFD_QUEUE_BC_SIZE (TFD_QUEUE_SIZE_MAX + TFD_QUEUE_SIZE_BC_DUP)
+#define IL_TX_DMA_MASK DMA_BIT_MASK(36)
+#define IL_NUM_OF_TBS 20
+
+static inline u8
+il_get_dma_hi_addr(dma_addr_t addr)
+{
+ return (sizeof(addr) > sizeof(u32) ? (addr >> 16) >> 16 : 0) & 0xF;
+}
+
+/**
+ * struct il_tfd_tb transmit buffer descriptor within transmit frame descriptor
+ *
+ * This structure contains dma address and length of transmission address
+ *
+ * @lo: low [31:0] portion of the dma address of TX buffer every even is
+ * unaligned on 16 bit boundary
+ * @hi_n_len: 0-3 [35:32] portion of dma
+ * 4-15 length of the tx buffer
+ */
+struct il_tfd_tb {
+ __le32 lo;
+ __le16 hi_n_len;
+} __packed;
+
+/**
+ * struct il_tfd
+ *
+ * Transmit Frame Descriptor (TFD)
+ *
+ * @ __reserved1[3] reserved
+ * @ num_tbs 0-4 number of active tbs
+ * 5 reserved
+ * 6-7 padding (not used)
+ * @ tbs[20] transmit frame buffer descriptors
+ * @ __pad padding
+ *
+ * Each Tx queue uses a circular buffer of 256 TFDs stored in host DRAM.
+ * Both driver and device share these circular buffers, each of which must be
+ * contiguous 256 TFDs x 128 bytes-per-TFD = 32 KBytes
+ *
+ * Driver must indicate the physical address of the base of each
+ * circular buffer via the FH49_MEM_CBBC_QUEUE registers.
+ *
+ * Each TFD contains pointer/size information for up to 20 data buffers
+ * in host DRAM. These buffers collectively contain the (one) frame described
+ * by the TFD. Each buffer must be a single contiguous block of memory within
+ * itself, but buffers may be scattered in host DRAM. Each buffer has max size
+ * of (4K - 4). The concatenates all of a TFD's buffers into a single
+ * Tx frame, up to 8 KBytes in size.
+ *
+ * A maximum of 255 (not 256!) TFDs may be on a queue waiting for Tx.
+ */
+struct il_tfd {
+ u8 __reserved1[3];
+ u8 num_tbs;
+ struct il_tfd_tb tbs[IL_NUM_OF_TBS];
+ __le32 __pad;
+} __packed;
+/* PCI registers */
+#define PCI_CFG_RETRY_TIMEOUT 0x041
+
+/* PCI register values */
+#define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01
+#define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02
+
+struct il_rate_info {
+ u8 plcp; /* uCode API: RATE_6M_PLCP, etc. */
+ u8 plcp_siso; /* uCode API: RATE_SISO_6M_PLCP, etc. */
+ u8 plcp_mimo2; /* uCode API: RATE_MIMO2_6M_PLCP, etc. */
+ u8 ieee; /* MAC header: RATE_6M_IEEE, etc. */
+ u8 prev_ieee; /* previous rate in IEEE speeds */
+ u8 next_ieee; /* next rate in IEEE speeds */
+ u8 prev_rs; /* previous rate used in rs algo */
+ u8 next_rs; /* next rate used in rs algo */
+ u8 prev_rs_tgg; /* previous rate used in TGG rs algo */
+ u8 next_rs_tgg; /* next rate used in TGG rs algo */
+};
+
+struct il3945_rate_info {
+ u8 plcp; /* uCode API: RATE_6M_PLCP, etc. */
+ u8 ieee; /* MAC header: RATE_6M_IEEE, etc. */
+ u8 prev_ieee; /* previous rate in IEEE speeds */
+ u8 next_ieee; /* next rate in IEEE speeds */
+ u8 prev_rs; /* previous rate used in rs algo */
+ u8 next_rs; /* next rate used in rs algo */
+ u8 prev_rs_tgg; /* previous rate used in TGG rs algo */
+ u8 next_rs_tgg; /* next rate used in TGG rs algo */
+ u8 table_rs_idx; /* idx in rate scale table cmd */
+ u8 prev_table_rs; /* prev in rate table cmd */
+};
+
+/*
+ * These serve as idxes into
+ * struct il_rate_info il_rates[RATE_COUNT];
+ */
+enum {
+ RATE_1M_IDX = 0,
+ RATE_2M_IDX,
+ RATE_5M_IDX,
+ RATE_11M_IDX,
+ RATE_6M_IDX,
+ RATE_9M_IDX,
+ RATE_12M_IDX,
+ RATE_18M_IDX,
+ RATE_24M_IDX,
+ RATE_36M_IDX,
+ RATE_48M_IDX,
+ RATE_54M_IDX,
+ RATE_60M_IDX,
+ RATE_COUNT,
+ RATE_COUNT_LEGACY = RATE_COUNT - 1, /* Excluding 60M */
+ RATE_COUNT_3945 = RATE_COUNT - 1,
+ RATE_INVM_IDX = RATE_COUNT,
+ RATE_INVALID = RATE_COUNT,
+};
+
+enum {
+ RATE_6M_IDX_TBL = 0,
+ RATE_9M_IDX_TBL,
+ RATE_12M_IDX_TBL,
+ RATE_18M_IDX_TBL,
+ RATE_24M_IDX_TBL,
+ RATE_36M_IDX_TBL,
+ RATE_48M_IDX_TBL,
+ RATE_54M_IDX_TBL,
+ RATE_1M_IDX_TBL,
+ RATE_2M_IDX_TBL,
+ RATE_5M_IDX_TBL,
+ RATE_11M_IDX_TBL,
+ RATE_INVM_IDX_TBL = RATE_INVM_IDX - 1,
+};
+
+enum {
+ IL_FIRST_OFDM_RATE = RATE_6M_IDX,
+ IL39_LAST_OFDM_RATE = RATE_54M_IDX,
+ IL_LAST_OFDM_RATE = RATE_60M_IDX,
+ IL_FIRST_CCK_RATE = RATE_1M_IDX,
+ IL_LAST_CCK_RATE = RATE_11M_IDX,
+};
+
+/* #define vs. enum to keep from defaulting to 'large integer' */
+#define RATE_6M_MASK (1 << RATE_6M_IDX)
+#define RATE_9M_MASK (1 << RATE_9M_IDX)
+#define RATE_12M_MASK (1 << RATE_12M_IDX)
+#define RATE_18M_MASK (1 << RATE_18M_IDX)
+#define RATE_24M_MASK (1 << RATE_24M_IDX)
+#define RATE_36M_MASK (1 << RATE_36M_IDX)
+#define RATE_48M_MASK (1 << RATE_48M_IDX)
+#define RATE_54M_MASK (1 << RATE_54M_IDX)
+#define RATE_60M_MASK (1 << RATE_60M_IDX)
+#define RATE_1M_MASK (1 << RATE_1M_IDX)
+#define RATE_2M_MASK (1 << RATE_2M_IDX)
+#define RATE_5M_MASK (1 << RATE_5M_IDX)
+#define RATE_11M_MASK (1 << RATE_11M_IDX)
+
+/* uCode API values for legacy bit rates, both OFDM and CCK */
+enum {
+ RATE_6M_PLCP = 13,
+ RATE_9M_PLCP = 15,
+ RATE_12M_PLCP = 5,
+ RATE_18M_PLCP = 7,
+ RATE_24M_PLCP = 9,
+ RATE_36M_PLCP = 11,
+ RATE_48M_PLCP = 1,
+ RATE_54M_PLCP = 3,
+ RATE_60M_PLCP = 3, /*FIXME:RS:should be removed */
+ RATE_1M_PLCP = 10,
+ RATE_2M_PLCP = 20,
+ RATE_5M_PLCP = 55,
+ RATE_11M_PLCP = 110,
+ /*FIXME:RS:add RATE_LEGACY_INVM_PLCP = 0, */
+};
+
+/* uCode API values for OFDM high-throughput (HT) bit rates */
+enum {
+ RATE_SISO_6M_PLCP = 0,
+ RATE_SISO_12M_PLCP = 1,
+ RATE_SISO_18M_PLCP = 2,
+ RATE_SISO_24M_PLCP = 3,
+ RATE_SISO_36M_PLCP = 4,
+ RATE_SISO_48M_PLCP = 5,
+ RATE_SISO_54M_PLCP = 6,
+ RATE_SISO_60M_PLCP = 7,
+ RATE_MIMO2_6M_PLCP = 0x8,
+ RATE_MIMO2_12M_PLCP = 0x9,
+ RATE_MIMO2_18M_PLCP = 0xa,
+ RATE_MIMO2_24M_PLCP = 0xb,
+ RATE_MIMO2_36M_PLCP = 0xc,
+ RATE_MIMO2_48M_PLCP = 0xd,
+ RATE_MIMO2_54M_PLCP = 0xe,
+ RATE_MIMO2_60M_PLCP = 0xf,
+ RATE_SISO_INVM_PLCP,
+ RATE_MIMO2_INVM_PLCP = RATE_SISO_INVM_PLCP,
+};
+
+/* MAC header values for bit rates */
+enum {
+ RATE_6M_IEEE = 12,
+ RATE_9M_IEEE = 18,
+ RATE_12M_IEEE = 24,
+ RATE_18M_IEEE = 36,
+ RATE_24M_IEEE = 48,
+ RATE_36M_IEEE = 72,
+ RATE_48M_IEEE = 96,
+ RATE_54M_IEEE = 108,
+ RATE_60M_IEEE = 120,
+ RATE_1M_IEEE = 2,
+ RATE_2M_IEEE = 4,
+ RATE_5M_IEEE = 11,
+ RATE_11M_IEEE = 22,
+};
+
+#define IL_CCK_BASIC_RATES_MASK \
+ (RATE_1M_MASK | \
+ RATE_2M_MASK)
+
+#define IL_CCK_RATES_MASK \
+ (IL_CCK_BASIC_RATES_MASK | \
+ RATE_5M_MASK | \
+ RATE_11M_MASK)
+
+#define IL_OFDM_BASIC_RATES_MASK \
+ (RATE_6M_MASK | \
+ RATE_12M_MASK | \
+ RATE_24M_MASK)
+
+#define IL_OFDM_RATES_MASK \
+ (IL_OFDM_BASIC_RATES_MASK | \
+ RATE_9M_MASK | \
+ RATE_18M_MASK | \
+ RATE_36M_MASK | \
+ RATE_48M_MASK | \
+ RATE_54M_MASK)
+
+#define IL_BASIC_RATES_MASK \
+ (IL_OFDM_BASIC_RATES_MASK | \
+ IL_CCK_BASIC_RATES_MASK)
+
+#define RATES_MASK ((1 << RATE_COUNT) - 1)
+#define RATES_MASK_3945 ((1 << RATE_COUNT_3945) - 1)
+
+#define IL_INVALID_VALUE -1
+
+#define IL_MIN_RSSI_VAL -100
+#define IL_MAX_RSSI_VAL 0
+
+/* These values specify how many Tx frame attempts before
+ * searching for a new modulation mode */
+#define IL_LEGACY_FAILURE_LIMIT 160
+#define IL_LEGACY_SUCCESS_LIMIT 480
+#define IL_LEGACY_TBL_COUNT 160
+
+#define IL_NONE_LEGACY_FAILURE_LIMIT 400
+#define IL_NONE_LEGACY_SUCCESS_LIMIT 4500
+#define IL_NONE_LEGACY_TBL_COUNT 1500
+
+/* Success ratio (ACKed / attempted tx frames) values (perfect is 128 * 100) */
+#define IL_RS_GOOD_RATIO 12800 /* 100% */
+#define RATE_SCALE_SWITCH 10880 /* 85% */
+#define RATE_HIGH_TH 10880 /* 85% */
+#define RATE_INCREASE_TH 6400 /* 50% */
+#define RATE_DECREASE_TH 1920 /* 15% */
+
+/* possible actions when in legacy mode */
+#define IL_LEGACY_SWITCH_ANTENNA1 0
+#define IL_LEGACY_SWITCH_ANTENNA2 1
+#define IL_LEGACY_SWITCH_SISO 2
+#define IL_LEGACY_SWITCH_MIMO2_AB 3
+#define IL_LEGACY_SWITCH_MIMO2_AC 4
+#define IL_LEGACY_SWITCH_MIMO2_BC 5
+
+/* possible actions when in siso mode */
+#define IL_SISO_SWITCH_ANTENNA1 0
+#define IL_SISO_SWITCH_ANTENNA2 1
+#define IL_SISO_SWITCH_MIMO2_AB 2
+#define IL_SISO_SWITCH_MIMO2_AC 3
+#define IL_SISO_SWITCH_MIMO2_BC 4
+#define IL_SISO_SWITCH_GI 5
+
+/* possible actions when in mimo mode */
+#define IL_MIMO2_SWITCH_ANTENNA1 0
+#define IL_MIMO2_SWITCH_ANTENNA2 1
+#define IL_MIMO2_SWITCH_SISO_A 2
+#define IL_MIMO2_SWITCH_SISO_B 3
+#define IL_MIMO2_SWITCH_SISO_C 4
+#define IL_MIMO2_SWITCH_GI 5
+
+#define IL_MAX_SEARCH IL_MIMO2_SWITCH_GI
+
+#define IL_ACTION_LIMIT 3 /* # possible actions */
+
+#define LQ_SIZE 2 /* 2 mode tables: "Active" and "Search" */
+
+/* load per tid defines for A-MPDU activation */
+#define IL_AGG_TPT_THREHOLD 0
+#define IL_AGG_LOAD_THRESHOLD 10
+#define IL_AGG_ALL_TID 0xff
+#define TID_QUEUE_CELL_SPACING 50 /*mS */
+#define TID_QUEUE_MAX_SIZE 20
+#define TID_ROUND_VALUE 5 /* mS */
+#define TID_MAX_LOAD_COUNT 8
+
+#define TID_MAX_TIME_DIFF ((TID_QUEUE_MAX_SIZE - 1) * TID_QUEUE_CELL_SPACING)
+#define TIME_WRAP_AROUND(x, y) (((y) > (x)) ? (y) - (x) : (0-(x)) + (y))
+
+extern const struct il_rate_info il_rates[RATE_COUNT];
+
+enum il_table_type {
+ LQ_NONE,
+ LQ_G, /* legacy types */
+ LQ_A,
+ LQ_SISO, /* high-throughput types */
+ LQ_MIMO2,
+ LQ_MAX,
+};
+
+#define is_legacy(tbl) ((tbl) == LQ_G || (tbl) == LQ_A)
+#define is_siso(tbl) ((tbl) == LQ_SISO)
+#define is_mimo2(tbl) ((tbl) == LQ_MIMO2)
+#define is_mimo(tbl) (is_mimo2(tbl))
+#define is_Ht(tbl) (is_siso(tbl) || is_mimo(tbl))
+#define is_a_band(tbl) ((tbl) == LQ_A)
+#define is_g_and(tbl) ((tbl) == LQ_G)
+
+#define ANT_NONE 0x0
+#define ANT_A BIT(0)
+#define ANT_B BIT(1)
+#define ANT_AB (ANT_A | ANT_B)
+#define ANT_C BIT(2)
+#define ANT_AC (ANT_A | ANT_C)
+#define ANT_BC (ANT_B | ANT_C)
+#define ANT_ABC (ANT_AB | ANT_C)
+
+#define IL_MAX_MCS_DISPLAY_SIZE 12
+
+struct il_rate_mcs_info {
+ char mbps[IL_MAX_MCS_DISPLAY_SIZE];
+ char mcs[IL_MAX_MCS_DISPLAY_SIZE];
+};
+
+/**
+ * struct il_rate_scale_data -- tx success history for one rate
+ */
+struct il_rate_scale_data {
+ u64 data; /* bitmap of successful frames */
+ s32 success_counter; /* number of frames successful */
+ s32 success_ratio; /* per-cent * 128 */
+ s32 counter; /* number of frames attempted */
+ s32 average_tpt; /* success ratio * expected throughput */
+ unsigned long stamp;
+};
+
+/**
+ * struct il_scale_tbl_info -- tx params and success history for all rates
+ *
+ * There are two of these in struct il_lq_sta,
+ * one for "active", and one for "search".
+ */
+struct il_scale_tbl_info {
+ enum il_table_type lq_type;
+ u8 ant_type;
+ u8 is_SGI; /* 1 = short guard interval */
+ u8 is_ht40; /* 1 = 40 MHz channel width */
+ u8 is_dup; /* 1 = duplicated data streams */
+ u8 action; /* change modulation; IL_[LEGACY/SISO/MIMO]_SWITCH_* */
+ u8 max_search; /* maximun number of tables we can search */
+ s32 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */
+ u32 current_rate; /* rate_n_flags, uCode API format */
+ struct il_rate_scale_data win[RATE_COUNT]; /* rate histories */
+};
+
+struct il_traffic_load {
+ unsigned long time_stamp; /* age of the oldest stats */
+ u32 packet_count[TID_QUEUE_MAX_SIZE]; /* packet count in this time
+ * slice */
+ u32 total; /* total num of packets during the
+ * last TID_MAX_TIME_DIFF */
+ u8 queue_count; /* number of queues that has
+ * been used since the last cleanup */
+ u8 head; /* start of the circular buffer */
+};
+
+/**
+ * struct il_lq_sta -- driver's rate scaling ilate structure
+ *
+ * Pointer to this gets passed back and forth between driver and mac80211.
+ */
+struct il_lq_sta {
+ u8 active_tbl; /* idx of active table, range 0-1 */
+ u8 enable_counter; /* indicates HT mode */
+ u8 stay_in_tbl; /* 1: disallow, 0: allow search for new mode */
+ u8 search_better_tbl; /* 1: currently trying alternate mode */
+ s32 last_tpt;
+
+ /* The following determine when to search for a new mode */
+ u32 table_count_limit;
+ u32 max_failure_limit; /* # failed frames before new search */
+ u32 max_success_limit; /* # successful frames before new search */
+ u32 table_count;
+ u32 total_failed; /* total failed frames, any/all rates */
+ u32 total_success; /* total successful frames, any/all rates */
+ u64 flush_timer; /* time staying in mode before new search */
+
+ u8 action_counter; /* # mode-switch actions tried */
+ u8 is_green;
+ u8 is_dup;
+ enum ieee80211_band band;
+
+ /* The following are bitmaps of rates; RATE_6M_MASK, etc. */
+ u32 supp_rates;
+ u16 active_legacy_rate;
+ u16 active_siso_rate;
+ u16 active_mimo2_rate;
+ s8 max_rate_idx; /* Max rate set by user */
+ u8 missed_rate_counter;
+
+ struct il_link_quality_cmd lq;
+ struct il_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */
+ struct il_traffic_load load[TID_MAX_LOAD_COUNT];
+ u8 tx_agg_tid_en;
+#ifdef CONFIG_MAC80211_DEBUGFS
+ struct dentry *rs_sta_dbgfs_scale_table_file;
+ struct dentry *rs_sta_dbgfs_stats_table_file;
+ struct dentry *rs_sta_dbgfs_rate_scale_data_file;
+ struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file;
+ u32 dbg_fixed_rate;
+#endif
+ struct il_priv *drv;
+
+ /* used to be in sta_info */
+ int last_txrate_idx;
+ /* last tx rate_n_flags */
+ u32 last_rate_n_flags;
+ /* packets destined for this STA are aggregated */
+ u8 is_agg;
+};
+
+/*
+ * il_station_priv: Driver's ilate station information
+ *
+ * When mac80211 creates a station it reserves some space (hw->sta_data_size)
+ * in the structure for use by driver. This structure is places in that
+ * space.
+ *
+ * The common struct MUST be first because it is shared between
+ * 3945 and 4965!
+ */
+struct il_station_priv {
+ struct il_station_priv_common common;
+ struct il_lq_sta lq_sta;
+ atomic_t pending_frames;
+ bool client;
+ bool asleep;
+};
+
+static inline u8
+il4965_num_of_ant(u8 m)
+{
+ return !!(m & ANT_A) + !!(m & ANT_B) + !!(m & ANT_C);
+}
+
+static inline u8
+il4965_first_antenna(u8 mask)
+{
+ if (mask & ANT_A)
+ return ANT_A;
+ if (mask & ANT_B)
+ return ANT_B;
+ return ANT_C;
+}
+
+/**
+ * il3945_rate_scale_init - Initialize the rate scale table based on assoc info
+ *
+ * The specific throughput table used is based on the type of network
+ * the associated with, including A, B, G, and G w/ TGG protection
+ */
+extern void il3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id);
+
+/* Initialize station's rate scaling information after adding station */
+extern void il4965_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta,
+ u8 sta_id);
+extern void il3945_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta,
+ u8 sta_id);
+
+/**
+ * il_rate_control_register - Register the rate control algorithm callbacks
+ *
+ * Since the rate control algorithm is hardware specific, there is no need
+ * or reason to place it as a stand alone module. The driver can call
+ * il_rate_control_register in order to register the rate control callbacks
+ * with the mac80211 subsystem. This should be performed prior to calling
+ * ieee80211_register_hw
+ *
+ */
+extern int il4965_rate_control_register(void);
+extern int il3945_rate_control_register(void);
+
+/**
+ * il_rate_control_unregister - Unregister the rate control callbacks
+ *
+ * This should be called after calling ieee80211_unregister_hw, but before
+ * the driver is unloaded.
+ */
+extern void il4965_rate_control_unregister(void);
+extern void il3945_rate_control_unregister(void);
+
+extern int il_power_update_mode(struct il_priv *il, bool force);
+extern void il_power_initialize(struct il_priv *il);
+
+extern u32 il_debug_level;
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+/*
+ * il_get_debug_level: Return active debug level for device
+ *
+ * Using sysfs it is possible to set per device debug level. This debug
+ * level will be used if set, otherwise the global debug level which can be
+ * set via module parameter is used.
+ */
+static inline u32
+il_get_debug_level(struct il_priv *il)
+{
+ if (il->debug_level)
+ return il->debug_level;
+ else
+ return il_debug_level;
+}
+#else
+static inline u32
+il_get_debug_level(struct il_priv *il)
+{
+ return il_debug_level;
+}
+#endif
+
+#define il_print_hex_error(il, p, len) \
+do { \
+ print_hex_dump(KERN_ERR, "iwl data: ", \
+ DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \
+} while (0)
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+#define IL_DBG(level, fmt, args...) \
+do { \
+ if (il_get_debug_level(il) & level) \
+ dev_printk(KERN_ERR, &il->hw->wiphy->dev, \
+ "%c %s " fmt, in_interrupt() ? 'I' : 'U', \
+ __func__ , ## args); \
+} while (0)
+
+#define il_print_hex_dump(il, level, p, len) \
+do { \
+ if (il_get_debug_level(il) & level) \
+ print_hex_dump(KERN_DEBUG, "iwl data: ", \
+ DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \
+} while (0)
+
+#else
+#define IL_DBG(level, fmt, args...)
+static inline void
+il_print_hex_dump(struct il_priv *il, int level, const void *p, u32 len)
+{
+}
+#endif /* CONFIG_IWLEGACY_DEBUG */
+
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+int il_dbgfs_register(struct il_priv *il, const char *name);
+void il_dbgfs_unregister(struct il_priv *il);
+#else
+static inline int
+il_dbgfs_register(struct il_priv *il, const char *name)
+{
+ return 0;
+}
+
+static inline void
+il_dbgfs_unregister(struct il_priv *il)
+{
+}
+#endif /* CONFIG_IWLEGACY_DEBUGFS */
+
+/*
+ * To use the debug system:
+ *
+ * If you are defining a new debug classification, simply add it to the #define
+ * list here in the form of
+ *
+ * #define IL_DL_xxxx VALUE
+ *
+ * where xxxx should be the name of the classification (for example, WEP).
+ *
+ * You then need to either add a IL_xxxx_DEBUG() macro definition for your
+ * classification, or use IL_DBG(IL_DL_xxxx, ...) whenever you want
+ * to send output to that classification.
+ *
+ * The active debug levels can be accessed via files
+ *
+ * /sys/module/iwl4965/parameters/debug
+ * /sys/module/iwl3945/parameters/debug
+ * /sys/class/net/wlan0/device/debug_level
+ *
+ * when CONFIG_IWLEGACY_DEBUG=y.
+ */
+
+/* 0x0000000F - 0x00000001 */
+#define IL_DL_INFO (1 << 0)
+#define IL_DL_MAC80211 (1 << 1)
+#define IL_DL_HCMD (1 << 2)
+#define IL_DL_STATE (1 << 3)
+/* 0x000000F0 - 0x00000010 */
+#define IL_DL_MACDUMP (1 << 4)
+#define IL_DL_HCMD_DUMP (1 << 5)
+#define IL_DL_EEPROM (1 << 6)
+#define IL_DL_RADIO (1 << 7)
+/* 0x00000F00 - 0x00000100 */
+#define IL_DL_POWER (1 << 8)
+#define IL_DL_TEMP (1 << 9)
+#define IL_DL_NOTIF (1 << 10)
+#define IL_DL_SCAN (1 << 11)
+/* 0x0000F000 - 0x00001000 */
+#define IL_DL_ASSOC (1 << 12)
+#define IL_DL_DROP (1 << 13)
+#define IL_DL_TXPOWER (1 << 14)
+#define IL_DL_AP (1 << 15)
+/* 0x000F0000 - 0x00010000 */
+#define IL_DL_FW (1 << 16)
+#define IL_DL_RF_KILL (1 << 17)
+#define IL_DL_FW_ERRORS (1 << 18)
+#define IL_DL_LED (1 << 19)
+/* 0x00F00000 - 0x00100000 */
+#define IL_DL_RATE (1 << 20)
+#define IL_DL_CALIB (1 << 21)
+#define IL_DL_WEP (1 << 22)
+#define IL_DL_TX (1 << 23)
+/* 0x0F000000 - 0x01000000 */
+#define IL_DL_RX (1 << 24)
+#define IL_DL_ISR (1 << 25)
+#define IL_DL_HT (1 << 26)
+/* 0xF0000000 - 0x10000000 */
+#define IL_DL_11H (1 << 28)
+#define IL_DL_STATS (1 << 29)
+#define IL_DL_TX_REPLY (1 << 30)
+#define IL_DL_QOS (1 << 31)
+
+#define D_INFO(f, a...) IL_DBG(IL_DL_INFO, f, ## a)
+#define D_MAC80211(f, a...) IL_DBG(IL_DL_MAC80211, f, ## a)
+#define D_MACDUMP(f, a...) IL_DBG(IL_DL_MACDUMP, f, ## a)
+#define D_TEMP(f, a...) IL_DBG(IL_DL_TEMP, f, ## a)
+#define D_SCAN(f, a...) IL_DBG(IL_DL_SCAN, f, ## a)
+#define D_RX(f, a...) IL_DBG(IL_DL_RX, f, ## a)
+#define D_TX(f, a...) IL_DBG(IL_DL_TX, f, ## a)
+#define D_ISR(f, a...) IL_DBG(IL_DL_ISR, f, ## a)
+#define D_LED(f, a...) IL_DBG(IL_DL_LED, f, ## a)
+#define D_WEP(f, a...) IL_DBG(IL_DL_WEP, f, ## a)
+#define D_HC(f, a...) IL_DBG(IL_DL_HCMD, f, ## a)
+#define D_HC_DUMP(f, a...) IL_DBG(IL_DL_HCMD_DUMP, f, ## a)
+#define D_EEPROM(f, a...) IL_DBG(IL_DL_EEPROM, f, ## a)
+#define D_CALIB(f, a...) IL_DBG(IL_DL_CALIB, f, ## a)
+#define D_FW(f, a...) IL_DBG(IL_DL_FW, f, ## a)
+#define D_RF_KILL(f, a...) IL_DBG(IL_DL_RF_KILL, f, ## a)
+#define D_DROP(f, a...) IL_DBG(IL_DL_DROP, f, ## a)
+#define D_AP(f, a...) IL_DBG(IL_DL_AP, f, ## a)
+#define D_TXPOWER(f, a...) IL_DBG(IL_DL_TXPOWER, f, ## a)
+#define D_RATE(f, a...) IL_DBG(IL_DL_RATE, f, ## a)
+#define D_NOTIF(f, a...) IL_DBG(IL_DL_NOTIF, f, ## a)
+#define D_ASSOC(f, a...) IL_DBG(IL_DL_ASSOC, f, ## a)
+#define D_HT(f, a...) IL_DBG(IL_DL_HT, f, ## a)
+#define D_STATS(f, a...) IL_DBG(IL_DL_STATS, f, ## a)
+#define D_TX_REPLY(f, a...) IL_DBG(IL_DL_TX_REPLY, f, ## a)
+#define D_QOS(f, a...) IL_DBG(IL_DL_QOS, f, ## a)
+#define D_RADIO(f, a...) IL_DBG(IL_DL_RADIO, f, ## a)
+#define D_POWER(f, a...) IL_DBG(IL_DL_POWER, f, ## a)
+#define D_11H(f, a...) IL_DBG(IL_DL_11H, f, ## a)
+
+#endif /* __il_core_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-csr.h b/drivers/net/wireless/iwlegacy/csr.h
index 668a9616c26..9138e15004f 100644
--- a/drivers/net/wireless/iwlegacy/iwl-csr.h
+++ b/drivers/net/wireless/iwlegacy/csr.h
@@ -60,8 +60,8 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
-#ifndef __iwl_legacy_csr_h__
-#define __iwl_legacy_csr_h__
+#ifndef __il_csr_h__
+#define __il_csr_h__
/*
* CSR (control and status registers)
*
@@ -70,9 +70,9 @@
* low power states due to driver-invoked device resets
* (e.g. CSR_RESET_REG_FLAG_SW_RESET) or uCode-driven power-saving modes.
*
- * Use iwl_write32() and iwl_read32() family to access these registers;
+ * Use _il_wr() and _il_rd() family to access these registers;
* these provide simple PCI bus access, without waking up the MAC.
- * Do not use iwl_legacy_write_direct32() family for these registers;
+ * Do not use il_wr() family for these registers;
* no need to "grab nic access" via CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ.
* The MAC (uCode processor, etc.) does not need to be powered up for accessing
* the CSR registers.
@@ -82,16 +82,16 @@
*/
#define CSR_BASE (0x000)
-#define CSR_HW_IF_CONFIG_REG (CSR_BASE+0x000) /* hardware interface config */
-#define CSR_INT_COALESCING (CSR_BASE+0x004) /* accum ints, 32-usec units */
-#define CSR_INT (CSR_BASE+0x008) /* host interrupt status/ack */
-#define CSR_INT_MASK (CSR_BASE+0x00c) /* host interrupt enable */
-#define CSR_FH_INT_STATUS (CSR_BASE+0x010) /* busmaster int status/ack*/
-#define CSR_GPIO_IN (CSR_BASE+0x018) /* read external chip pins */
-#define CSR_RESET (CSR_BASE+0x020) /* busmaster enable, NMI, etc*/
+#define CSR_HW_IF_CONFIG_REG (CSR_BASE+0x000) /* hardware interface config */
+#define CSR_INT_COALESCING (CSR_BASE+0x004) /* accum ints, 32-usec units */
+#define CSR_INT (CSR_BASE+0x008) /* host interrupt status/ack */
+#define CSR_INT_MASK (CSR_BASE+0x00c) /* host interrupt enable */
+#define CSR_FH_INT_STATUS (CSR_BASE+0x010) /* busmaster int status/ack */
+#define CSR_GPIO_IN (CSR_BASE+0x018) /* read external chip pins */
+#define CSR_RESET (CSR_BASE+0x020) /* busmaster enable, NMI, etc */
#define CSR_GP_CNTRL (CSR_BASE+0x024)
-/* 2nd byte of CSR_INT_COALESCING, not accessible via iwl_write32()! */
+/* 2nd byte of CSR_INT_COALESCING, not accessible via _il_wr()! */
#define CSR_INT_PERIODIC_REG (CSR_BASE+0x005)
/*
@@ -166,26 +166,26 @@
#define CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A (0x00080000)
#define CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM (0x00200000)
-#define CSR_HW_IF_CONFIG_REG_BIT_NIC_READY (0x00400000) /* PCI_OWN_SEM */
-#define CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE (0x02000000) /* ME_OWN */
-#define CSR_HW_IF_CONFIG_REG_PREPARE (0x08000000) /* WAKE_ME */
+#define CSR_HW_IF_CONFIG_REG_BIT_NIC_READY (0x00400000) /* PCI_OWN_SEM */
+#define CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE (0x02000000) /* ME_OWN */
+#define CSR_HW_IF_CONFIG_REG_PREPARE (0x08000000) /* WAKE_ME */
-#define CSR_INT_PERIODIC_DIS (0x00) /* disable periodic int*/
-#define CSR_INT_PERIODIC_ENA (0xFF) /* 255*32 usec ~ 8 msec*/
+#define CSR_INT_PERIODIC_DIS (0x00) /* disable periodic int */
+#define CSR_INT_PERIODIC_ENA (0xFF) /* 255*32 usec ~ 8 msec */
/* interrupt flags in INTA, set by uCode or hardware (e.g. dma),
* acknowledged (reset) by host writing "1" to flagged bits. */
-#define CSR_INT_BIT_FH_RX (1 << 31) /* Rx DMA, cmd responses, FH_INT[17:16] */
-#define CSR_INT_BIT_HW_ERR (1 << 29) /* DMA hardware error FH_INT[31] */
-#define CSR_INT_BIT_RX_PERIODIC (1 << 28) /* Rx periodic */
-#define CSR_INT_BIT_FH_TX (1 << 27) /* Tx DMA FH_INT[1:0] */
-#define CSR_INT_BIT_SCD (1 << 26) /* TXQ pointer advanced */
-#define CSR_INT_BIT_SW_ERR (1 << 25) /* uCode error */
-#define CSR_INT_BIT_RF_KILL (1 << 7) /* HW RFKILL switch GP_CNTRL[27] toggled */
-#define CSR_INT_BIT_CT_KILL (1 << 6) /* Critical temp (chip too hot) rfkill */
-#define CSR_INT_BIT_SW_RX (1 << 3) /* Rx, command responses, 3945 */
-#define CSR_INT_BIT_WAKEUP (1 << 1) /* NIC controller waking up (pwr mgmt) */
-#define CSR_INT_BIT_ALIVE (1 << 0) /* uCode interrupts once it initializes */
+#define CSR_INT_BIT_FH_RX (1 << 31) /* Rx DMA, cmd responses, FH_INT[17:16] */
+#define CSR_INT_BIT_HW_ERR (1 << 29) /* DMA hardware error FH_INT[31] */
+#define CSR_INT_BIT_RX_PERIODIC (1 << 28) /* Rx periodic */
+#define CSR_INT_BIT_FH_TX (1 << 27) /* Tx DMA FH_INT[1:0] */
+#define CSR_INT_BIT_SCD (1 << 26) /* TXQ pointer advanced */
+#define CSR_INT_BIT_SW_ERR (1 << 25) /* uCode error */
+#define CSR_INT_BIT_RF_KILL (1 << 7) /* HW RFKILL switch GP_CNTRL[27] toggled */
+#define CSR_INT_BIT_CT_KILL (1 << 6) /* Critical temp (chip too hot) rfkill */
+#define CSR_INT_BIT_SW_RX (1 << 3) /* Rx, command responses, 3945 */
+#define CSR_INT_BIT_WAKEUP (1 << 1) /* NIC controller waking up (pwr mgmt) */
+#define CSR_INT_BIT_ALIVE (1 << 0) /* uCode interrupts once it initializes */
#define CSR_INI_SET_MASK (CSR_INT_BIT_FH_RX | \
CSR_INT_BIT_HW_ERR | \
@@ -197,21 +197,20 @@
CSR_INT_BIT_ALIVE)
/* interrupt flags in FH (flow handler) (PCI busmaster DMA) */
-#define CSR_FH_INT_BIT_ERR (1 << 31) /* Error */
-#define CSR_FH_INT_BIT_HI_PRIOR (1 << 30) /* High priority Rx, bypass coalescing */
-#define CSR39_FH_INT_BIT_RX_CHNL2 (1 << 18) /* Rx channel 2 (3945 only) */
-#define CSR_FH_INT_BIT_RX_CHNL1 (1 << 17) /* Rx channel 1 */
-#define CSR_FH_INT_BIT_RX_CHNL0 (1 << 16) /* Rx channel 0 */
-#define CSR39_FH_INT_BIT_TX_CHNL6 (1 << 6) /* Tx channel 6 (3945 only) */
-#define CSR_FH_INT_BIT_TX_CHNL1 (1 << 1) /* Tx channel 1 */
-#define CSR_FH_INT_BIT_TX_CHNL0 (1 << 0) /* Tx channel 0 */
+#define CSR_FH_INT_BIT_ERR (1 << 31) /* Error */
+#define CSR_FH_INT_BIT_HI_PRIOR (1 << 30) /* High priority Rx, bypass coalescing */
+#define CSR39_FH_INT_BIT_RX_CHNL2 (1 << 18) /* Rx channel 2 (3945 only) */
+#define CSR_FH_INT_BIT_RX_CHNL1 (1 << 17) /* Rx channel 1 */
+#define CSR_FH_INT_BIT_RX_CHNL0 (1 << 16) /* Rx channel 0 */
+#define CSR39_FH_INT_BIT_TX_CHNL6 (1 << 6) /* Tx channel 6 (3945 only) */
+#define CSR_FH_INT_BIT_TX_CHNL1 (1 << 1) /* Tx channel 1 */
+#define CSR_FH_INT_BIT_TX_CHNL0 (1 << 0) /* Tx channel 0 */
#define CSR39_FH_INT_RX_MASK (CSR_FH_INT_BIT_HI_PRIOR | \
CSR39_FH_INT_BIT_RX_CHNL2 | \
CSR_FH_INT_BIT_RX_CHNL1 | \
CSR_FH_INT_BIT_RX_CHNL0)
-
#define CSR39_FH_INT_TX_MASK (CSR39_FH_INT_BIT_TX_CHNL6 | \
CSR_FH_INT_BIT_TX_CHNL1 | \
CSR_FH_INT_BIT_TX_CHNL0)
@@ -285,7 +284,6 @@
#define CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE (0x04000000)
#define CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW (0x08000000)
-
/* EEPROM REG */
#define CSR_EEPROM_REG_READ_VALID_MSK (0x00000001)
#define CSR_EEPROM_REG_BIT_CMD (0x00000002)
@@ -293,19 +291,18 @@
#define CSR_EEPROM_REG_MSK_DATA (0xFFFF0000)
/* EEPROM GP */
-#define CSR_EEPROM_GP_VALID_MSK (0x00000007) /* signature */
+#define CSR_EEPROM_GP_VALID_MSK (0x00000007) /* signature */
#define CSR_EEPROM_GP_IF_OWNER_MSK (0x00000180)
#define CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K (0x00000002)
#define CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K (0x00000004)
/* GP REG */
-#define CSR_GP_REG_POWER_SAVE_STATUS_MSK (0x03000000) /* bit 24/25 */
+#define CSR_GP_REG_POWER_SAVE_STATUS_MSK (0x03000000) /* bit 24/25 */
#define CSR_GP_REG_NO_POWER_SAVE (0x00000000)
#define CSR_GP_REG_MAC_POWER_SAVE (0x01000000)
#define CSR_GP_REG_PHY_POWER_SAVE (0x02000000)
#define CSR_GP_REG_POWER_SAVE_ERROR (0x03000000)
-
/* CSR GIO */
#define CSR_GIO_REG_VAL_L0S_ENABLED (0x00000002)
@@ -357,7 +354,7 @@
/* HPET MEM debug */
#define CSR_DBG_HPET_MEM_REG_VAL (0xFFFF0000)
-/* DRAM INT TABLE */
+/* DRAM INT TBL */
#define CSR_DRAM_INT_TBL_ENABLE (1 << 31)
#define CSR_DRAM_INIT_TBL_WRAP_CHECK (1 << 27)
@@ -368,13 +365,13 @@
* to indirectly access device's internal memory or registers that
* may be powered-down.
*
- * Use iwl_legacy_write_direct32()/iwl_legacy_read_direct32() family
+ * Use il_wr()/il_rd() family
* for these registers;
* host must "grab nic access" via CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
* to make sure the MAC (uCode processor, etc.) is powered up for accessing
* internal resources.
*
- * Do not use iwl_write32()/iwl_read32() family to access these registers;
+ * Do not use _il_wr()/_il_rd() family to access these registers;
* these provide only simple PCI bus access, without waking up the MAC.
*/
#define HBUS_BASE (0x400)
@@ -411,12 +408,12 @@
#define HBUS_TARG_PRPH_RDAT (HBUS_BASE+0x050)
/*
- * Per-Tx-queue write pointer (index, really!)
- * Indicates index to next TFD that driver will fill (1 past latest filled).
+ * Per-Tx-queue write pointer (idx, really!)
+ * Indicates idx to next TFD that driver will fill (1 past latest filled).
* Bit usage:
- * 0-7: queue write index
+ * 0-7: queue write idx
* 11-8: queue selector
*/
#define HBUS_TARG_WRPTR (HBUS_BASE+0x060)
-#endif /* !__iwl_legacy_csr_h__ */
+#endif /* !__il_csr_h__ */
diff --git a/drivers/net/wireless/iwlegacy/debug.c b/drivers/net/wireless/iwlegacy/debug.c
new file mode 100644
index 00000000000..b1b8926a9c7
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/debug.c
@@ -0,0 +1,1411 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *****************************************************************************/
+#include <linux/ieee80211.h>
+#include <linux/export.h>
+#include <net/mac80211.h>
+
+#include "common.h"
+
+/* create and remove of files */
+#define DEBUGFS_ADD_FILE(name, parent, mode) do { \
+ if (!debugfs_create_file(#name, mode, parent, il, \
+ &il_dbgfs_##name##_ops)) \
+ goto err; \
+} while (0)
+
+#define DEBUGFS_ADD_BOOL(name, parent, ptr) do { \
+ struct dentry *__tmp; \
+ __tmp = debugfs_create_bool(#name, S_IWUSR | S_IRUSR, \
+ parent, ptr); \
+ if (IS_ERR(__tmp) || !__tmp) \
+ goto err; \
+} while (0)
+
+#define DEBUGFS_ADD_X32(name, parent, ptr) do { \
+ struct dentry *__tmp; \
+ __tmp = debugfs_create_x32(#name, S_IWUSR | S_IRUSR, \
+ parent, ptr); \
+ if (IS_ERR(__tmp) || !__tmp) \
+ goto err; \
+} while (0)
+
+/* file operation */
+#define DEBUGFS_READ_FUNC(name) \
+static ssize_t il_dbgfs_##name##_read(struct file *file, \
+ char __user *user_buf, \
+ size_t count, loff_t *ppos);
+
+#define DEBUGFS_WRITE_FUNC(name) \
+static ssize_t il_dbgfs_##name##_write(struct file *file, \
+ const char __user *user_buf, \
+ size_t count, loff_t *ppos);
+
+static int
+il_dbgfs_open_file_generic(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+#define DEBUGFS_READ_FILE_OPS(name) \
+ DEBUGFS_READ_FUNC(name); \
+static const struct file_operations il_dbgfs_##name##_ops = { \
+ .read = il_dbgfs_##name##_read, \
+ .open = il_dbgfs_open_file_generic, \
+ .llseek = generic_file_llseek, \
+};
+
+#define DEBUGFS_WRITE_FILE_OPS(name) \
+ DEBUGFS_WRITE_FUNC(name); \
+static const struct file_operations il_dbgfs_##name##_ops = { \
+ .write = il_dbgfs_##name##_write, \
+ .open = il_dbgfs_open_file_generic, \
+ .llseek = generic_file_llseek, \
+};
+
+#define DEBUGFS_READ_WRITE_FILE_OPS(name) \
+ DEBUGFS_READ_FUNC(name); \
+ DEBUGFS_WRITE_FUNC(name); \
+static const struct file_operations il_dbgfs_##name##_ops = { \
+ .write = il_dbgfs_##name##_write, \
+ .read = il_dbgfs_##name##_read, \
+ .open = il_dbgfs_open_file_generic, \
+ .llseek = generic_file_llseek, \
+};
+
+static ssize_t
+il_dbgfs_tx_stats_read(struct file *file, char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+
+ struct il_priv *il = file->private_data;
+ char *buf;
+ int pos = 0;
+
+ int cnt;
+ ssize_t ret;
+ const size_t bufsz =
+ 100 + sizeof(char) * 50 * (MANAGEMENT_MAX + CONTROL_MAX);
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ pos += scnprintf(buf + pos, bufsz - pos, "Management:\n");
+ for (cnt = 0; cnt < MANAGEMENT_MAX; cnt++) {
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "\t%25s\t\t: %u\n",
+ il_get_mgmt_string(cnt), il->tx_stats.mgmt[cnt]);
+ }
+ pos += scnprintf(buf + pos, bufsz - pos, "Control\n");
+ for (cnt = 0; cnt < CONTROL_MAX; cnt++) {
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "\t%25s\t\t: %u\n",
+ il_get_ctrl_string(cnt), il->tx_stats.ctrl[cnt]);
+ }
+ pos += scnprintf(buf + pos, bufsz - pos, "Data:\n");
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "\tcnt: %u\n",
+ il->tx_stats.data_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "\tbytes: %llu\n",
+ il->tx_stats.data_bytes);
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t
+il_dbgfs_clear_traffic_stats_write(struct file *file,
+ const char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ u32 clear_flag;
+ char buf[8];
+ int buf_size;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ if (sscanf(buf, "%x", &clear_flag) != 1)
+ return -EFAULT;
+ il_clear_traffic_stats(il);
+
+ return count;
+}
+
+static ssize_t
+il_dbgfs_rx_stats_read(struct file *file, char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+
+ struct il_priv *il = file->private_data;
+ char *buf;
+ int pos = 0;
+ int cnt;
+ ssize_t ret;
+ const size_t bufsz =
+ 100 + sizeof(char) * 50 * (MANAGEMENT_MAX + CONTROL_MAX);
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ pos += scnprintf(buf + pos, bufsz - pos, "Management:\n");
+ for (cnt = 0; cnt < MANAGEMENT_MAX; cnt++) {
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "\t%25s\t\t: %u\n",
+ il_get_mgmt_string(cnt), il->rx_stats.mgmt[cnt]);
+ }
+ pos += scnprintf(buf + pos, bufsz - pos, "Control:\n");
+ for (cnt = 0; cnt < CONTROL_MAX; cnt++) {
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "\t%25s\t\t: %u\n",
+ il_get_ctrl_string(cnt), il->rx_stats.ctrl[cnt]);
+ }
+ pos += scnprintf(buf + pos, bufsz - pos, "Data:\n");
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "\tcnt: %u\n",
+ il->rx_stats.data_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "\tbytes: %llu\n",
+ il->rx_stats.data_bytes);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+#define BYTE1_MASK 0x000000ff;
+#define BYTE2_MASK 0x0000ffff;
+#define BYTE3_MASK 0x00ffffff;
+static ssize_t
+il_dbgfs_sram_read(struct file *file, char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+ u32 val;
+ char *buf;
+ ssize_t ret;
+ int i;
+ int pos = 0;
+ struct il_priv *il = file->private_data;
+ size_t bufsz;
+
+ /* default is to dump the entire data segment */
+ if (!il->dbgfs_sram_offset && !il->dbgfs_sram_len) {
+ il->dbgfs_sram_offset = 0x800000;
+ if (il->ucode_type == UCODE_INIT)
+ il->dbgfs_sram_len = il->ucode_init_data.len;
+ else
+ il->dbgfs_sram_len = il->ucode_data.len;
+ }
+ bufsz = 30 + il->dbgfs_sram_len * sizeof(char) * 10;
+ buf = kmalloc(bufsz, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "sram_len: 0x%x\n",
+ il->dbgfs_sram_len);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "sram_offset: 0x%x\n",
+ il->dbgfs_sram_offset);
+ for (i = il->dbgfs_sram_len; i > 0; i -= 4) {
+ val =
+ il_read_targ_mem(il,
+ il->dbgfs_sram_offset +
+ il->dbgfs_sram_len - i);
+ if (i < 4) {
+ switch (i) {
+ case 1:
+ val &= BYTE1_MASK;
+ break;
+ case 2:
+ val &= BYTE2_MASK;
+ break;
+ case 3:
+ val &= BYTE3_MASK;
+ break;
+ }
+ }
+ if (!(i % 16))
+ pos += scnprintf(buf + pos, bufsz - pos, "\n");
+ pos += scnprintf(buf + pos, bufsz - pos, "0x%08x ", val);
+ }
+ pos += scnprintf(buf + pos, bufsz - pos, "\n");
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t
+il_dbgfs_sram_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ char buf[64];
+ int buf_size;
+ u32 offset, len;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+
+ if (sscanf(buf, "%x,%x", &offset, &len) == 2) {
+ il->dbgfs_sram_offset = offset;
+ il->dbgfs_sram_len = len;
+ } else {
+ il->dbgfs_sram_offset = 0;
+ il->dbgfs_sram_len = 0;
+ }
+
+ return count;
+}
+
+static ssize_t
+il_dbgfs_stations_read(struct file *file, char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ struct il_station_entry *station;
+ int max_sta = il->hw_params.max_stations;
+ char *buf;
+ int i, j, pos = 0;
+ ssize_t ret;
+ /* Add 30 for initial string */
+ const size_t bufsz = 30 + sizeof(char) * 500 * (il->num_stations);
+
+ buf = kmalloc(bufsz, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "num of stations: %d\n\n",
+ il->num_stations);
+
+ for (i = 0; i < max_sta; i++) {
+ station = &il->stations[i];
+ if (!station->used)
+ continue;
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "station %d - addr: %pM, flags: %#x\n", i,
+ station->sta.sta.addr,
+ station->sta.station_flags_msk);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "TID\tseq_num\ttxq_id\tframes\ttfds\t");
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "start_idx\tbitmap\t\t\trate_n_flags\n");
+
+ for (j = 0; j < MAX_TID_COUNT; j++) {
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "%d:\t%#x\t%#x\t%u\t%u\t%u\t\t%#.16llx\t%#x",
+ j, station->tid[j].seq_number,
+ station->tid[j].agg.txq_id,
+ station->tid[j].agg.frame_count,
+ station->tid[j].tfds_in_queue,
+ station->tid[j].agg.start_idx,
+ station->tid[j].agg.bitmap,
+ station->tid[j].agg.rate_n_flags);
+
+ if (station->tid[j].agg.wait_for_ba)
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " - waitforba");
+ pos += scnprintf(buf + pos, bufsz - pos, "\n");
+ }
+
+ pos += scnprintf(buf + pos, bufsz - pos, "\n");
+ }
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t
+il_dbgfs_nvm_read(struct file *file, char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+ ssize_t ret;
+ struct il_priv *il = file->private_data;
+ int pos = 0, ofs = 0, buf_size = 0;
+ const u8 *ptr;
+ char *buf;
+ u16 eeprom_ver;
+ size_t eeprom_len = il->cfg->base_params->eeprom_size;
+ buf_size = 4 * eeprom_len + 256;
+
+ if (eeprom_len % 16) {
+ IL_ERR("NVM size is not multiple of 16.\n");
+ return -ENODATA;
+ }
+
+ ptr = il->eeprom;
+ if (!ptr) {
+ IL_ERR("Invalid EEPROM memory\n");
+ return -ENOMEM;
+ }
+
+ /* 4 characters for byte 0xYY */
+ buf = kzalloc(buf_size, GFP_KERNEL);
+ if (!buf) {
+ IL_ERR("Can not allocate Buffer\n");
+ return -ENOMEM;
+ }
+ eeprom_ver = il_eeprom_query16(il, EEPROM_VERSION);
+ pos +=
+ scnprintf(buf + pos, buf_size - pos, "EEPROM " "version: 0x%x\n",
+ eeprom_ver);
+ for (ofs = 0; ofs < eeprom_len; ofs += 16) {
+ pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs);
+ hex_dump_to_buffer(ptr + ofs, 16, 16, 2, buf + pos,
+ buf_size - pos, 0);
+ pos += strlen(buf + pos);
+ if (buf_size - pos > 0)
+ buf[pos++] = '\n';
+ }
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t
+il_dbgfs_channels_read(struct file *file, char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ struct ieee80211_channel *channels = NULL;
+ const struct ieee80211_supported_band *supp_band = NULL;
+ int pos = 0, i, bufsz = PAGE_SIZE;
+ char *buf;
+ ssize_t ret;
+
+ if (!test_bit(S_GEO_CONFIGURED, &il->status))
+ return -EAGAIN;
+
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf) {
+ IL_ERR("Can not allocate Buffer\n");
+ return -ENOMEM;
+ }
+
+ supp_band = il_get_hw_mode(il, IEEE80211_BAND_2GHZ);
+ if (supp_band) {
+ channels = supp_band->channels;
+
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "Displaying %d channels in 2.4GHz band 802.11bg):\n",
+ supp_band->n_channels);
+
+ for (i = 0; i < supp_band->n_channels; i++)
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "%d: %ddBm: BSS%s%s, %s.\n",
+ channels[i].hw_value,
+ channels[i].max_power,
+ channels[i].
+ flags & IEEE80211_CHAN_RADAR ?
+ " (IEEE 802.11h required)" : "",
+ ((channels[i].
+ flags & IEEE80211_CHAN_NO_IBSS) ||
+ (channels[i].
+ flags & IEEE80211_CHAN_RADAR)) ? "" :
+ ", IBSS",
+ channels[i].
+ flags & IEEE80211_CHAN_PASSIVE_SCAN ?
+ "passive only" : "active/passive");
+ }
+ supp_band = il_get_hw_mode(il, IEEE80211_BAND_5GHZ);
+ if (supp_band) {
+ channels = supp_band->channels;
+
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "Displaying %d channels in 5.2GHz band (802.11a)\n",
+ supp_band->n_channels);
+
+ for (i = 0; i < supp_band->n_channels; i++)
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "%d: %ddBm: BSS%s%s, %s.\n",
+ channels[i].hw_value,
+ channels[i].max_power,
+ channels[i].
+ flags & IEEE80211_CHAN_RADAR ?
+ " (IEEE 802.11h required)" : "",
+ ((channels[i].
+ flags & IEEE80211_CHAN_NO_IBSS) ||
+ (channels[i].
+ flags & IEEE80211_CHAN_RADAR)) ? "" :
+ ", IBSS",
+ channels[i].
+ flags & IEEE80211_CHAN_PASSIVE_SCAN ?
+ "passive only" : "active/passive");
+ }
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t
+il_dbgfs_status_read(struct file *file, char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+
+ struct il_priv *il = file->private_data;
+ char buf[512];
+ int pos = 0;
+ const size_t bufsz = sizeof(buf);
+
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "S_HCMD_ACTIVE:\t %d\n",
+ test_bit(S_HCMD_ACTIVE, &il->status));
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "S_INT_ENABLED:\t %d\n",
+ test_bit(S_INT_ENABLED, &il->status));
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "S_RF_KILL_HW:\t %d\n",
+ test_bit(S_RF_KILL_HW, &il->status));
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "S_CT_KILL:\t\t %d\n",
+ test_bit(S_CT_KILL, &il->status));
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "S_INIT:\t\t %d\n",
+ test_bit(S_INIT, &il->status));
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "S_ALIVE:\t\t %d\n",
+ test_bit(S_ALIVE, &il->status));
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "S_READY:\t\t %d\n",
+ test_bit(S_READY, &il->status));
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "S_TEMPERATURE:\t %d\n",
+ test_bit(S_TEMPERATURE, &il->status));
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "S_GEO_CONFIGURED:\t %d\n",
+ test_bit(S_GEO_CONFIGURED, &il->status));
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "S_EXIT_PENDING:\t %d\n",
+ test_bit(S_EXIT_PENDING, &il->status));
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "S_STATS:\t %d\n",
+ test_bit(S_STATS, &il->status));
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "S_SCANNING:\t %d\n",
+ test_bit(S_SCANNING, &il->status));
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "S_SCAN_ABORTING:\t %d\n",
+ test_bit(S_SCAN_ABORTING, &il->status));
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "S_SCAN_HW:\t\t %d\n",
+ test_bit(S_SCAN_HW, &il->status));
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "S_POWER_PMI:\t %d\n",
+ test_bit(S_POWER_PMI, &il->status));
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "S_FW_ERROR:\t %d\n",
+ test_bit(S_FW_ERROR, &il->status));
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t
+il_dbgfs_interrupt_read(struct file *file, char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+
+ struct il_priv *il = file->private_data;
+ int pos = 0;
+ int cnt = 0;
+ char *buf;
+ int bufsz = 24 * 64; /* 24 items * 64 char per item */
+ ssize_t ret;
+
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf) {
+ IL_ERR("Can not allocate Buffer\n");
+ return -ENOMEM;
+ }
+
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "Interrupt Statistics Report:\n");
+
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
+ il->isr_stats.hw);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
+ il->isr_stats.sw);
+ if (il->isr_stats.sw || il->isr_stats.hw) {
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "\tLast Restarting Code: 0x%X\n",
+ il->isr_stats.err_code);
+ }
+#ifdef CONFIG_IWLEGACY_DEBUG
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
+ il->isr_stats.sch);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
+ il->isr_stats.alive);
+#endif
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "HW RF KILL switch toggled:\t %u\n",
+ il->isr_stats.rfkill);
+
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
+ il->isr_stats.ctkill);
+
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
+ il->isr_stats.wakeup);
+
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "Rx command responses:\t\t %u\n",
+ il->isr_stats.rx);
+ for (cnt = 0; cnt < IL_CN_MAX; cnt++) {
+ if (il->isr_stats.handlers[cnt] > 0)
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "\tRx handler[%36s]:\t\t %u\n",
+ il_get_cmd_string(cnt),
+ il->isr_stats.handlers[cnt]);
+ }
+
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
+ il->isr_stats.tx);
+
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
+ il->isr_stats.unhandled);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t
+il_dbgfs_interrupt_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ char buf[8];
+ int buf_size;
+ u32 reset_flag;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ if (sscanf(buf, "%x", &reset_flag) != 1)
+ return -EFAULT;
+ if (reset_flag == 0)
+ il_clear_isr_stats(il);
+
+ return count;
+}
+
+static ssize_t
+il_dbgfs_qos_read(struct file *file, char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ struct il_rxon_context *ctx = &il->ctx;
+ int pos = 0, i;
+ char buf[256];
+ const size_t bufsz = sizeof(buf);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "context %d:\n", ctx->ctxid);
+ for (i = 0; i < AC_NUM; i++) {
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "\tcw_min\tcw_max\taifsn\ttxop\n");
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "AC[%d]\t%u\t%u\t%u\t%u\n", i,
+ ctx->qos_data.def_qos_parm.ac[i].cw_min,
+ ctx->qos_data.def_qos_parm.ac[i].cw_max,
+ ctx->qos_data.def_qos_parm.ac[i].aifsn,
+ ctx->qos_data.def_qos_parm.ac[i].edca_txop);
+ }
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t
+il_dbgfs_disable_ht40_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ char buf[8];
+ int buf_size;
+ int ht40;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ if (sscanf(buf, "%d", &ht40) != 1)
+ return -EFAULT;
+ if (!il_is_any_associated(il))
+ il->disable_ht40 = ht40 ? true : false;
+ else {
+ IL_ERR("Sta associated with AP - "
+ "Change to 40MHz channel support is not allowed\n");
+ return -EINVAL;
+ }
+
+ return count;
+}
+
+static ssize_t
+il_dbgfs_disable_ht40_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ char buf[100];
+ int pos = 0;
+ const size_t bufsz = sizeof(buf);
+
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "11n 40MHz Mode: %s\n",
+ il->disable_ht40 ? "Disabled" : "Enabled");
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+DEBUGFS_READ_WRITE_FILE_OPS(sram);
+DEBUGFS_READ_FILE_OPS(nvm);
+DEBUGFS_READ_FILE_OPS(stations);
+DEBUGFS_READ_FILE_OPS(channels);
+DEBUGFS_READ_FILE_OPS(status);
+DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
+DEBUGFS_READ_FILE_OPS(qos);
+DEBUGFS_READ_WRITE_FILE_OPS(disable_ht40);
+
+static ssize_t
+il_dbgfs_traffic_log_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ int pos = 0, ofs = 0;
+ int cnt = 0, entry;
+ struct il_tx_queue *txq;
+ struct il_queue *q;
+ struct il_rx_queue *rxq = &il->rxq;
+ char *buf;
+ int bufsz =
+ ((IL_TRAFFIC_ENTRIES * IL_TRAFFIC_ENTRY_SIZE * 64) * 2) +
+ (il->cfg->base_params->num_of_queues * 32 * 8) + 400;
+ const u8 *ptr;
+ ssize_t ret;
+
+ if (!il->txq) {
+ IL_ERR("txq not ready\n");
+ return -EAGAIN;
+ }
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf) {
+ IL_ERR("Can not allocate buffer\n");
+ return -ENOMEM;
+ }
+ pos += scnprintf(buf + pos, bufsz - pos, "Tx Queue\n");
+ for (cnt = 0; cnt < il->hw_params.max_txq_num; cnt++) {
+ txq = &il->txq[cnt];
+ q = &txq->q;
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "q[%d]: read_ptr: %u, write_ptr: %u\n", cnt,
+ q->read_ptr, q->write_ptr);
+ }
+ if (il->tx_traffic && (il_debug_level & IL_DL_TX)) {
+ ptr = il->tx_traffic;
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "Tx Traffic idx: %u\n",
+ il->tx_traffic_idx);
+ for (cnt = 0, ofs = 0; cnt < IL_TRAFFIC_ENTRIES; cnt++) {
+ for (entry = 0; entry < IL_TRAFFIC_ENTRY_SIZE / 16;
+ entry++, ofs += 16) {
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "0x%.4x ",
+ ofs);
+ hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
+ buf + pos, bufsz - pos, 0);
+ pos += strlen(buf + pos);
+ if (bufsz - pos > 0)
+ buf[pos++] = '\n';
+ }
+ }
+ }
+
+ pos += scnprintf(buf + pos, bufsz - pos, "Rx Queue\n");
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "read: %u, write: %u\n",
+ rxq->read, rxq->write);
+
+ if (il->rx_traffic && (il_debug_level & IL_DL_RX)) {
+ ptr = il->rx_traffic;
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "Rx Traffic idx: %u\n",
+ il->rx_traffic_idx);
+ for (cnt = 0, ofs = 0; cnt < IL_TRAFFIC_ENTRIES; cnt++) {
+ for (entry = 0; entry < IL_TRAFFIC_ENTRY_SIZE / 16;
+ entry++, ofs += 16) {
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "0x%.4x ",
+ ofs);
+ hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
+ buf + pos, bufsz - pos, 0);
+ pos += strlen(buf + pos);
+ if (bufsz - pos > 0)
+ buf[pos++] = '\n';
+ }
+ }
+ }
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t
+il_dbgfs_traffic_log_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ char buf[8];
+ int buf_size;
+ int traffic_log;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ if (sscanf(buf, "%d", &traffic_log) != 1)
+ return -EFAULT;
+ if (traffic_log == 0)
+ il_reset_traffic_log(il);
+
+ return count;
+}
+
+static ssize_t
+il_dbgfs_tx_queue_read(struct file *file, char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+
+ struct il_priv *il = file->private_data;
+ struct il_tx_queue *txq;
+ struct il_queue *q;
+ char *buf;
+ int pos = 0;
+ int cnt;
+ int ret;
+ const size_t bufsz =
+ sizeof(char) * 64 * il->cfg->base_params->num_of_queues;
+
+ if (!il->txq) {
+ IL_ERR("txq not ready\n");
+ return -EAGAIN;
+ }
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ for (cnt = 0; cnt < il->hw_params.max_txq_num; cnt++) {
+ txq = &il->txq[cnt];
+ q = &txq->q;
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "hwq %.2d: read=%u write=%u stop=%d"
+ " swq_id=%#.2x (ac %d/hwq %d)\n", cnt,
+ q->read_ptr, q->write_ptr,
+ !!test_bit(cnt, il->queue_stopped),
+ txq->swq_id, txq->swq_id & 3,
+ (txq->swq_id >> 2) & 0x1f);
+ if (cnt >= 4)
+ continue;
+ /* for the ACs, display the stop count too */
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " stop-count: %d\n",
+ atomic_read(&il->queue_stop_count[cnt]));
+ }
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t
+il_dbgfs_rx_queue_read(struct file *file, char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+
+ struct il_priv *il = file->private_data;
+ struct il_rx_queue *rxq = &il->rxq;
+ char buf[256];
+ int pos = 0;
+ const size_t bufsz = sizeof(buf);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n", rxq->read);
+ pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n", rxq->write);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "free_count: %u\n",
+ rxq->free_count);
+ if (rxq->rb_stts) {
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n",
+ le16_to_cpu(rxq->rb_stts->
+ closed_rb_num) & 0x0FFF);
+ } else {
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "closed_rb_num: Not Allocated\n");
+ }
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t
+il_dbgfs_ucode_rx_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ return il->cfg->ops->lib->debugfs_ops.rx_stats_read(file, user_buf,
+ count, ppos);
+}
+
+static ssize_t
+il_dbgfs_ucode_tx_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ return il->cfg->ops->lib->debugfs_ops.tx_stats_read(file, user_buf,
+ count, ppos);
+}
+
+static ssize_t
+il_dbgfs_ucode_general_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ return il->cfg->ops->lib->debugfs_ops.general_stats_read(file, user_buf,
+ count, ppos);
+}
+
+static ssize_t
+il_dbgfs_sensitivity_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+
+ struct il_priv *il = file->private_data;
+ int pos = 0;
+ int cnt = 0;
+ char *buf;
+ int bufsz = sizeof(struct il_sensitivity_data) * 4 + 100;
+ ssize_t ret;
+ struct il_sensitivity_data *data;
+
+ data = &il->sensitivity_data;
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf) {
+ IL_ERR("Can not allocate Buffer\n");
+ return -ENOMEM;
+ }
+
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm:\t\t\t %u\n",
+ data->auto_corr_ofdm);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm_mrc:\t\t %u\n",
+ data->auto_corr_ofdm_mrc);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm_x1:\t\t %u\n",
+ data->auto_corr_ofdm_x1);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm_mrc_x1:\t\t %u\n",
+ data->auto_corr_ofdm_mrc_x1);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "auto_corr_cck:\t\t\t %u\n",
+ data->auto_corr_cck);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "auto_corr_cck_mrc:\t\t %u\n",
+ data->auto_corr_cck_mrc);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "last_bad_plcp_cnt_ofdm:\t\t %u\n",
+ data->last_bad_plcp_cnt_ofdm);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "last_fa_cnt_ofdm:\t\t %u\n",
+ data->last_fa_cnt_ofdm);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "last_bad_plcp_cnt_cck:\t\t %u\n",
+ data->last_bad_plcp_cnt_cck);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "last_fa_cnt_cck:\t\t %u\n",
+ data->last_fa_cnt_cck);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "nrg_curr_state:\t\t\t %u\n",
+ data->nrg_curr_state);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "nrg_prev_state:\t\t\t %u\n",
+ data->nrg_prev_state);
+ pos += scnprintf(buf + pos, bufsz - pos, "nrg_value:\t\t\t");
+ for (cnt = 0; cnt < 10; cnt++) {
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, " %u",
+ data->nrg_value[cnt]);
+ }
+ pos += scnprintf(buf + pos, bufsz - pos, "\n");
+ pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_rssi:\t\t");
+ for (cnt = 0; cnt < NRG_NUM_PREV_STAT_L; cnt++) {
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, " %u",
+ data->nrg_silence_rssi[cnt]);
+ }
+ pos += scnprintf(buf + pos, bufsz - pos, "\n");
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "nrg_silence_ref:\t\t %u\n",
+ data->nrg_silence_ref);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "nrg_energy_idx:\t\t\t %u\n",
+ data->nrg_energy_idx);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "nrg_silence_idx:\t\t %u\n",
+ data->nrg_silence_idx);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "nrg_th_cck:\t\t\t %u\n",
+ data->nrg_th_cck);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "nrg_auto_corr_silence_diff:\t %u\n",
+ data->nrg_auto_corr_silence_diff);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "num_in_cck_no_fa:\t\t %u\n",
+ data->num_in_cck_no_fa);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "nrg_th_ofdm:\t\t\t %u\n",
+ data->nrg_th_ofdm);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t
+il_dbgfs_chain_noise_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+
+ struct il_priv *il = file->private_data;
+ int pos = 0;
+ int cnt = 0;
+ char *buf;
+ int bufsz = sizeof(struct il_chain_noise_data) * 4 + 100;
+ ssize_t ret;
+ struct il_chain_noise_data *data;
+
+ data = &il->chain_noise_data;
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf) {
+ IL_ERR("Can not allocate Buffer\n");
+ return -ENOMEM;
+ }
+
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "active_chains:\t\t\t %u\n",
+ data->active_chains);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "chain_noise_a:\t\t\t %u\n",
+ data->chain_noise_a);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "chain_noise_b:\t\t\t %u\n",
+ data->chain_noise_b);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "chain_noise_c:\t\t\t %u\n",
+ data->chain_noise_c);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "chain_signal_a:\t\t\t %u\n",
+ data->chain_signal_a);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "chain_signal_b:\t\t\t %u\n",
+ data->chain_signal_b);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "chain_signal_c:\t\t\t %u\n",
+ data->chain_signal_c);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "beacon_count:\t\t\t %u\n",
+ data->beacon_count);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "disconn_array:\t\t\t");
+ for (cnt = 0; cnt < NUM_RX_CHAINS; cnt++) {
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, " %u",
+ data->disconn_array[cnt]);
+ }
+ pos += scnprintf(buf + pos, bufsz - pos, "\n");
+ pos += scnprintf(buf + pos, bufsz - pos, "delta_gain_code:\t\t");
+ for (cnt = 0; cnt < NUM_RX_CHAINS; cnt++) {
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, " %u",
+ data->delta_gain_code[cnt]);
+ }
+ pos += scnprintf(buf + pos, bufsz - pos, "\n");
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "radio_write:\t\t\t %u\n",
+ data->radio_write);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "state:\t\t\t\t %u\n",
+ data->state);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t
+il_dbgfs_power_save_status_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ char buf[60];
+ int pos = 0;
+ const size_t bufsz = sizeof(buf);
+ u32 pwrsave_status;
+
+ pwrsave_status =
+ _il_rd(il, CSR_GP_CNTRL) & CSR_GP_REG_POWER_SAVE_STATUS_MSK;
+
+ pos += scnprintf(buf + pos, bufsz - pos, "Power Save Status: ");
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "%s\n",
+ (pwrsave_status == CSR_GP_REG_NO_POWER_SAVE) ? "none" :
+ (pwrsave_status == CSR_GP_REG_MAC_POWER_SAVE) ? "MAC" :
+ (pwrsave_status == CSR_GP_REG_PHY_POWER_SAVE) ? "PHY" :
+ "error");
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t
+il_dbgfs_clear_ucode_stats_write(struct file *file,
+ const char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ char buf[8];
+ int buf_size;
+ int clear;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ if (sscanf(buf, "%d", &clear) != 1)
+ return -EFAULT;
+
+ /* make request to uCode to retrieve stats information */
+ mutex_lock(&il->mutex);
+ il_send_stats_request(il, CMD_SYNC, true);
+ mutex_unlock(&il->mutex);
+
+ return count;
+}
+
+static ssize_t
+il_dbgfs_rxon_flags_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+
+ struct il_priv *il = file->private_data;
+ int len = 0;
+ char buf[20];
+
+ len = sprintf(buf, "0x%04X\n", le32_to_cpu(il->ctx.active.flags));
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t
+il_dbgfs_rxon_filter_flags_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+
+ struct il_priv *il = file->private_data;
+ int len = 0;
+ char buf[20];
+
+ len =
+ sprintf(buf, "0x%04X\n", le32_to_cpu(il->ctx.active.filter_flags));
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t
+il_dbgfs_fh_reg_read(struct file *file, char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ char *buf;
+ int pos = 0;
+ ssize_t ret = -EFAULT;
+
+ if (il->cfg->ops->lib->dump_fh) {
+ ret = pos = il->cfg->ops->lib->dump_fh(il, &buf, true);
+ if (buf) {
+ ret =
+ simple_read_from_buffer(user_buf, count, ppos, buf,
+ pos);
+ kfree(buf);
+ }
+ }
+
+ return ret;
+}
+
+static ssize_t
+il_dbgfs_missed_beacon_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+
+ struct il_priv *il = file->private_data;
+ int pos = 0;
+ char buf[12];
+ const size_t bufsz = sizeof(buf);
+
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "%d\n",
+ il->missed_beacon_threshold);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t
+il_dbgfs_missed_beacon_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ char buf[8];
+ int buf_size;
+ int missed;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ if (sscanf(buf, "%d", &missed) != 1)
+ return -EINVAL;
+
+ if (missed < IL_MISSED_BEACON_THRESHOLD_MIN ||
+ missed > IL_MISSED_BEACON_THRESHOLD_MAX)
+ il->missed_beacon_threshold = IL_MISSED_BEACON_THRESHOLD_DEF;
+ else
+ il->missed_beacon_threshold = missed;
+
+ return count;
+}
+
+static ssize_t
+il_dbgfs_force_reset_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+
+ struct il_priv *il = file->private_data;
+ int pos = 0;
+ char buf[300];
+ const size_t bufsz = sizeof(buf);
+ struct il_force_reset *force_reset;
+
+ force_reset = &il->force_reset;
+
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "\tnumber of reset request: %d\n",
+ force_reset->reset_request_count);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "\tnumber of reset request success: %d\n",
+ force_reset->reset_success_count);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "\tnumber of reset request reject: %d\n",
+ force_reset->reset_reject_count);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "\treset duration: %lu\n",
+ force_reset->reset_duration);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t
+il_dbgfs_force_reset_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+
+ int ret;
+ struct il_priv *il = file->private_data;
+
+ ret = il_force_reset(il, true);
+
+ return ret ? ret : count;
+}
+
+static ssize_t
+il_dbgfs_wd_timeout_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+
+ struct il_priv *il = file->private_data;
+ char buf[8];
+ int buf_size;
+ int timeout;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ if (sscanf(buf, "%d", &timeout) != 1)
+ return -EINVAL;
+ if (timeout < 0 || timeout > IL_MAX_WD_TIMEOUT)
+ timeout = IL_DEF_WD_TIMEOUT;
+
+ il->cfg->base_params->wd_timeout = timeout;
+ il_setup_watchdog(il);
+ return count;
+}
+
+DEBUGFS_READ_FILE_OPS(rx_stats);
+DEBUGFS_READ_FILE_OPS(tx_stats);
+DEBUGFS_READ_WRITE_FILE_OPS(traffic_log);
+DEBUGFS_READ_FILE_OPS(rx_queue);
+DEBUGFS_READ_FILE_OPS(tx_queue);
+DEBUGFS_READ_FILE_OPS(ucode_rx_stats);
+DEBUGFS_READ_FILE_OPS(ucode_tx_stats);
+DEBUGFS_READ_FILE_OPS(ucode_general_stats);
+DEBUGFS_READ_FILE_OPS(sensitivity);
+DEBUGFS_READ_FILE_OPS(chain_noise);
+DEBUGFS_READ_FILE_OPS(power_save_status);
+DEBUGFS_WRITE_FILE_OPS(clear_ucode_stats);
+DEBUGFS_WRITE_FILE_OPS(clear_traffic_stats);
+DEBUGFS_READ_FILE_OPS(fh_reg);
+DEBUGFS_READ_WRITE_FILE_OPS(missed_beacon);
+DEBUGFS_READ_WRITE_FILE_OPS(force_reset);
+DEBUGFS_READ_FILE_OPS(rxon_flags);
+DEBUGFS_READ_FILE_OPS(rxon_filter_flags);
+DEBUGFS_WRITE_FILE_OPS(wd_timeout);
+
+/*
+ * Create the debugfs files and directories
+ *
+ */
+int
+il_dbgfs_register(struct il_priv *il, const char *name)
+{
+ struct dentry *phyd = il->hw->wiphy->debugfsdir;
+ struct dentry *dir_drv, *dir_data, *dir_rf, *dir_debug;
+
+ dir_drv = debugfs_create_dir(name, phyd);
+ if (!dir_drv)
+ return -ENOMEM;
+
+ il->debugfs_dir = dir_drv;
+
+ dir_data = debugfs_create_dir("data", dir_drv);
+ if (!dir_data)
+ goto err;
+ dir_rf = debugfs_create_dir("rf", dir_drv);
+ if (!dir_rf)
+ goto err;
+ dir_debug = debugfs_create_dir("debug", dir_drv);
+ if (!dir_debug)
+ goto err;
+
+ DEBUGFS_ADD_FILE(nvm, dir_data, S_IRUSR);
+ DEBUGFS_ADD_FILE(sram, dir_data, S_IWUSR | S_IRUSR);
+ DEBUGFS_ADD_FILE(stations, dir_data, S_IRUSR);
+ DEBUGFS_ADD_FILE(channels, dir_data, S_IRUSR);
+ DEBUGFS_ADD_FILE(status, dir_data, S_IRUSR);
+ DEBUGFS_ADD_FILE(interrupt, dir_data, S_IWUSR | S_IRUSR);
+ DEBUGFS_ADD_FILE(qos, dir_data, S_IRUSR);
+ DEBUGFS_ADD_FILE(disable_ht40, dir_data, S_IWUSR | S_IRUSR);
+ DEBUGFS_ADD_FILE(rx_stats, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(tx_stats, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(traffic_log, dir_debug, S_IWUSR | S_IRUSR);
+ DEBUGFS_ADD_FILE(rx_queue, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(tx_queue, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(power_save_status, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(clear_ucode_stats, dir_debug, S_IWUSR);
+ DEBUGFS_ADD_FILE(clear_traffic_stats, dir_debug, S_IWUSR);
+ DEBUGFS_ADD_FILE(fh_reg, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(missed_beacon, dir_debug, S_IWUSR);
+ DEBUGFS_ADD_FILE(force_reset, dir_debug, S_IWUSR | S_IRUSR);
+ DEBUGFS_ADD_FILE(ucode_rx_stats, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(ucode_tx_stats, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(ucode_general_stats, dir_debug, S_IRUSR);
+
+ if (il->cfg->base_params->sensitivity_calib_by_driver)
+ DEBUGFS_ADD_FILE(sensitivity, dir_debug, S_IRUSR);
+ if (il->cfg->base_params->chain_noise_calib_by_driver)
+ DEBUGFS_ADD_FILE(chain_noise, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR);
+ DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR);
+ DEBUGFS_ADD_FILE(wd_timeout, dir_debug, S_IWUSR);
+ if (il->cfg->base_params->sensitivity_calib_by_driver)
+ DEBUGFS_ADD_BOOL(disable_sensitivity, dir_rf,
+ &il->disable_sens_cal);
+ if (il->cfg->base_params->chain_noise_calib_by_driver)
+ DEBUGFS_ADD_BOOL(disable_chain_noise, dir_rf,
+ &il->disable_chain_noise_cal);
+ DEBUGFS_ADD_BOOL(disable_tx_power, dir_rf, &il->disable_tx_power_cal);
+ return 0;
+
+err:
+ IL_ERR("Can't create the debugfs directory\n");
+ il_dbgfs_unregister(il);
+ return -ENOMEM;
+}
+EXPORT_SYMBOL(il_dbgfs_register);
+
+/**
+ * Remove the debugfs files and directories
+ *
+ */
+void
+il_dbgfs_unregister(struct il_priv *il)
+{
+ if (!il->debugfs_dir)
+ return;
+
+ debugfs_remove_recursive(il->debugfs_dir);
+ il->debugfs_dir = NULL;
+}
+EXPORT_SYMBOL(il_dbgfs_unregister);
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.c b/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.c
deleted file mode 100644
index cfabb38793a..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.c
+++ /dev/null
@@ -1,523 +0,0 @@
-/******************************************************************************
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *****************************************************************************/
-
-#include "iwl-3945-debugfs.h"
-
-
-static int iwl3945_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
-{
- int p = 0;
-
- p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n",
- le32_to_cpu(priv->_3945.statistics.flag));
- if (le32_to_cpu(priv->_3945.statistics.flag) &
- UCODE_STATISTICS_CLEAR_MSK)
- p += scnprintf(buf + p, bufsz - p,
- "\tStatistics have been cleared\n");
- p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n",
- (le32_to_cpu(priv->_3945.statistics.flag) &
- UCODE_STATISTICS_FREQUENCY_MSK)
- ? "2.4 GHz" : "5.2 GHz");
- p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n",
- (le32_to_cpu(priv->_3945.statistics.flag) &
- UCODE_STATISTICS_NARROW_BAND_MSK)
- ? "enabled" : "disabled");
- return p;
-}
-
-ssize_t iwl3945_ucode_rx_stats_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- int pos = 0;
- char *buf;
- int bufsz = sizeof(struct iwl39_statistics_rx_phy) * 40 +
- sizeof(struct iwl39_statistics_rx_non_phy) * 40 + 400;
- ssize_t ret;
- struct iwl39_statistics_rx_phy *ofdm, *accum_ofdm, *delta_ofdm,
- *max_ofdm;
- struct iwl39_statistics_rx_phy *cck, *accum_cck, *delta_cck, *max_cck;
- struct iwl39_statistics_rx_non_phy *general, *accum_general;
- struct iwl39_statistics_rx_non_phy *delta_general, *max_general;
-
- if (!iwl_legacy_is_alive(priv))
- return -EAGAIN;
-
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf) {
- IWL_ERR(priv, "Can not allocate Buffer\n");
- return -ENOMEM;
- }
-
- /*
- * The statistic information display here is based on
- * the last statistics notification from uCode
- * might not reflect the current uCode activity
- */
- ofdm = &priv->_3945.statistics.rx.ofdm;
- cck = &priv->_3945.statistics.rx.cck;
- general = &priv->_3945.statistics.rx.general;
- accum_ofdm = &priv->_3945.accum_statistics.rx.ofdm;
- accum_cck = &priv->_3945.accum_statistics.rx.cck;
- accum_general = &priv->_3945.accum_statistics.rx.general;
- delta_ofdm = &priv->_3945.delta_statistics.rx.ofdm;
- delta_cck = &priv->_3945.delta_statistics.rx.cck;
- delta_general = &priv->_3945.delta_statistics.rx.general;
- max_ofdm = &priv->_3945.max_delta.rx.ofdm;
- max_cck = &priv->_3945.max_delta.rx.cck;
- max_general = &priv->_3945.max_delta.rx.general;
-
- pos += iwl3945_statistics_flag(priv, buf, bufsz);
- pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
- "acumulative delta max\n",
- "Statistics_Rx - OFDM:");
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "ina_cnt:", le32_to_cpu(ofdm->ina_cnt),
- accum_ofdm->ina_cnt,
- delta_ofdm->ina_cnt, max_ofdm->ina_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "fina_cnt:",
- le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt,
- delta_ofdm->fina_cnt, max_ofdm->fina_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n", "plcp_err:",
- le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err,
- delta_ofdm->plcp_err, max_ofdm->plcp_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n", "crc32_err:",
- le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err,
- delta_ofdm->crc32_err, max_ofdm->crc32_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n", "overrun_err:",
- le32_to_cpu(ofdm->overrun_err),
- accum_ofdm->overrun_err, delta_ofdm->overrun_err,
- max_ofdm->overrun_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "early_overrun_err:",
- le32_to_cpu(ofdm->early_overrun_err),
- accum_ofdm->early_overrun_err,
- delta_ofdm->early_overrun_err,
- max_ofdm->early_overrun_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "crc32_good:", le32_to_cpu(ofdm->crc32_good),
- accum_ofdm->crc32_good, delta_ofdm->crc32_good,
- max_ofdm->crc32_good);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n", "false_alarm_cnt:",
- le32_to_cpu(ofdm->false_alarm_cnt),
- accum_ofdm->false_alarm_cnt,
- delta_ofdm->false_alarm_cnt,
- max_ofdm->false_alarm_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "fina_sync_err_cnt:",
- le32_to_cpu(ofdm->fina_sync_err_cnt),
- accum_ofdm->fina_sync_err_cnt,
- delta_ofdm->fina_sync_err_cnt,
- max_ofdm->fina_sync_err_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "sfd_timeout:",
- le32_to_cpu(ofdm->sfd_timeout),
- accum_ofdm->sfd_timeout,
- delta_ofdm->sfd_timeout,
- max_ofdm->sfd_timeout);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "fina_timeout:",
- le32_to_cpu(ofdm->fina_timeout),
- accum_ofdm->fina_timeout,
- delta_ofdm->fina_timeout,
- max_ofdm->fina_timeout);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "unresponded_rts:",
- le32_to_cpu(ofdm->unresponded_rts),
- accum_ofdm->unresponded_rts,
- delta_ofdm->unresponded_rts,
- max_ofdm->unresponded_rts);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "rxe_frame_lmt_ovrun:",
- le32_to_cpu(ofdm->rxe_frame_limit_overrun),
- accum_ofdm->rxe_frame_limit_overrun,
- delta_ofdm->rxe_frame_limit_overrun,
- max_ofdm->rxe_frame_limit_overrun);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "sent_ack_cnt:",
- le32_to_cpu(ofdm->sent_ack_cnt),
- accum_ofdm->sent_ack_cnt,
- delta_ofdm->sent_ack_cnt,
- max_ofdm->sent_ack_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "sent_cts_cnt:",
- le32_to_cpu(ofdm->sent_cts_cnt),
- accum_ofdm->sent_cts_cnt,
- delta_ofdm->sent_cts_cnt, max_ofdm->sent_cts_cnt);
-
- pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
- "acumulative delta max\n",
- "Statistics_Rx - CCK:");
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "ina_cnt:",
- le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt,
- delta_cck->ina_cnt, max_cck->ina_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "fina_cnt:",
- le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt,
- delta_cck->fina_cnt, max_cck->fina_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "plcp_err:",
- le32_to_cpu(cck->plcp_err), accum_cck->plcp_err,
- delta_cck->plcp_err, max_cck->plcp_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "crc32_err:",
- le32_to_cpu(cck->crc32_err), accum_cck->crc32_err,
- delta_cck->crc32_err, max_cck->crc32_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "overrun_err:",
- le32_to_cpu(cck->overrun_err),
- accum_cck->overrun_err,
- delta_cck->overrun_err, max_cck->overrun_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "early_overrun_err:",
- le32_to_cpu(cck->early_overrun_err),
- accum_cck->early_overrun_err,
- delta_cck->early_overrun_err,
- max_cck->early_overrun_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "crc32_good:",
- le32_to_cpu(cck->crc32_good), accum_cck->crc32_good,
- delta_cck->crc32_good,
- max_cck->crc32_good);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "false_alarm_cnt:",
- le32_to_cpu(cck->false_alarm_cnt),
- accum_cck->false_alarm_cnt,
- delta_cck->false_alarm_cnt, max_cck->false_alarm_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "fina_sync_err_cnt:",
- le32_to_cpu(cck->fina_sync_err_cnt),
- accum_cck->fina_sync_err_cnt,
- delta_cck->fina_sync_err_cnt,
- max_cck->fina_sync_err_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "sfd_timeout:",
- le32_to_cpu(cck->sfd_timeout),
- accum_cck->sfd_timeout,
- delta_cck->sfd_timeout, max_cck->sfd_timeout);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "fina_timeout:",
- le32_to_cpu(cck->fina_timeout),
- accum_cck->fina_timeout,
- delta_cck->fina_timeout, max_cck->fina_timeout);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "unresponded_rts:",
- le32_to_cpu(cck->unresponded_rts),
- accum_cck->unresponded_rts,
- delta_cck->unresponded_rts,
- max_cck->unresponded_rts);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "rxe_frame_lmt_ovrun:",
- le32_to_cpu(cck->rxe_frame_limit_overrun),
- accum_cck->rxe_frame_limit_overrun,
- delta_cck->rxe_frame_limit_overrun,
- max_cck->rxe_frame_limit_overrun);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "sent_ack_cnt:",
- le32_to_cpu(cck->sent_ack_cnt),
- accum_cck->sent_ack_cnt,
- delta_cck->sent_ack_cnt,
- max_cck->sent_ack_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "sent_cts_cnt:",
- le32_to_cpu(cck->sent_cts_cnt),
- accum_cck->sent_cts_cnt,
- delta_cck->sent_cts_cnt,
- max_cck->sent_cts_cnt);
-
- pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
- "acumulative delta max\n",
- "Statistics_Rx - GENERAL:");
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "bogus_cts:",
- le32_to_cpu(general->bogus_cts),
- accum_general->bogus_cts,
- delta_general->bogus_cts, max_general->bogus_cts);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "bogus_ack:",
- le32_to_cpu(general->bogus_ack),
- accum_general->bogus_ack,
- delta_general->bogus_ack, max_general->bogus_ack);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "non_bssid_frames:",
- le32_to_cpu(general->non_bssid_frames),
- accum_general->non_bssid_frames,
- delta_general->non_bssid_frames,
- max_general->non_bssid_frames);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "filtered_frames:",
- le32_to_cpu(general->filtered_frames),
- accum_general->filtered_frames,
- delta_general->filtered_frames,
- max_general->filtered_frames);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "non_channel_beacons:",
- le32_to_cpu(general->non_channel_beacons),
- accum_general->non_channel_beacons,
- delta_general->non_channel_beacons,
- max_general->non_channel_beacons);
-
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
-}
-
-ssize_t iwl3945_ucode_tx_stats_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- int pos = 0;
- char *buf;
- int bufsz = (sizeof(struct iwl39_statistics_tx) * 48) + 250;
- ssize_t ret;
- struct iwl39_statistics_tx *tx, *accum_tx, *delta_tx, *max_tx;
-
- if (!iwl_legacy_is_alive(priv))
- return -EAGAIN;
-
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf) {
- IWL_ERR(priv, "Can not allocate Buffer\n");
- return -ENOMEM;
- }
-
- /*
- * The statistic information display here is based on
- * the last statistics notification from uCode
- * might not reflect the current uCode activity
- */
- tx = &priv->_3945.statistics.tx;
- accum_tx = &priv->_3945.accum_statistics.tx;
- delta_tx = &priv->_3945.delta_statistics.tx;
- max_tx = &priv->_3945.max_delta.tx;
- pos += iwl3945_statistics_flag(priv, buf, bufsz);
- pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
- "acumulative delta max\n",
- "Statistics_Tx:");
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "preamble:",
- le32_to_cpu(tx->preamble_cnt),
- accum_tx->preamble_cnt,
- delta_tx->preamble_cnt, max_tx->preamble_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "rx_detected_cnt:",
- le32_to_cpu(tx->rx_detected_cnt),
- accum_tx->rx_detected_cnt,
- delta_tx->rx_detected_cnt, max_tx->rx_detected_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "bt_prio_defer_cnt:",
- le32_to_cpu(tx->bt_prio_defer_cnt),
- accum_tx->bt_prio_defer_cnt,
- delta_tx->bt_prio_defer_cnt,
- max_tx->bt_prio_defer_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "bt_prio_kill_cnt:",
- le32_to_cpu(tx->bt_prio_kill_cnt),
- accum_tx->bt_prio_kill_cnt,
- delta_tx->bt_prio_kill_cnt,
- max_tx->bt_prio_kill_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "few_bytes_cnt:",
- le32_to_cpu(tx->few_bytes_cnt),
- accum_tx->few_bytes_cnt,
- delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "cts_timeout:",
- le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout,
- delta_tx->cts_timeout, max_tx->cts_timeout);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "ack_timeout:",
- le32_to_cpu(tx->ack_timeout),
- accum_tx->ack_timeout,
- delta_tx->ack_timeout, max_tx->ack_timeout);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "expected_ack_cnt:",
- le32_to_cpu(tx->expected_ack_cnt),
- accum_tx->expected_ack_cnt,
- delta_tx->expected_ack_cnt,
- max_tx->expected_ack_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "actual_ack_cnt:",
- le32_to_cpu(tx->actual_ack_cnt),
- accum_tx->actual_ack_cnt,
- delta_tx->actual_ack_cnt,
- max_tx->actual_ack_cnt);
-
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
-}
-
-ssize_t iwl3945_ucode_general_stats_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- int pos = 0;
- char *buf;
- int bufsz = sizeof(struct iwl39_statistics_general) * 10 + 300;
- ssize_t ret;
- struct iwl39_statistics_general *general, *accum_general;
- struct iwl39_statistics_general *delta_general, *max_general;
- struct statistics_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
- struct iwl39_statistics_div *div, *accum_div, *delta_div, *max_div;
-
- if (!iwl_legacy_is_alive(priv))
- return -EAGAIN;
-
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf) {
- IWL_ERR(priv, "Can not allocate Buffer\n");
- return -ENOMEM;
- }
-
- /*
- * The statistic information display here is based on
- * the last statistics notification from uCode
- * might not reflect the current uCode activity
- */
- general = &priv->_3945.statistics.general;
- dbg = &priv->_3945.statistics.general.dbg;
- div = &priv->_3945.statistics.general.div;
- accum_general = &priv->_3945.accum_statistics.general;
- delta_general = &priv->_3945.delta_statistics.general;
- max_general = &priv->_3945.max_delta.general;
- accum_dbg = &priv->_3945.accum_statistics.general.dbg;
- delta_dbg = &priv->_3945.delta_statistics.general.dbg;
- max_dbg = &priv->_3945.max_delta.general.dbg;
- accum_div = &priv->_3945.accum_statistics.general.div;
- delta_div = &priv->_3945.delta_statistics.general.div;
- max_div = &priv->_3945.max_delta.general.div;
- pos += iwl3945_statistics_flag(priv, buf, bufsz);
- pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
- "acumulative delta max\n",
- "Statistics_General:");
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "burst_check:",
- le32_to_cpu(dbg->burst_check),
- accum_dbg->burst_check,
- delta_dbg->burst_check, max_dbg->burst_check);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "burst_count:",
- le32_to_cpu(dbg->burst_count),
- accum_dbg->burst_count,
- delta_dbg->burst_count, max_dbg->burst_count);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "sleep_time:",
- le32_to_cpu(general->sleep_time),
- accum_general->sleep_time,
- delta_general->sleep_time, max_general->sleep_time);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "slots_out:",
- le32_to_cpu(general->slots_out),
- accum_general->slots_out,
- delta_general->slots_out, max_general->slots_out);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "slots_idle:",
- le32_to_cpu(general->slots_idle),
- accum_general->slots_idle,
- delta_general->slots_idle, max_general->slots_idle);
- pos += scnprintf(buf + pos, bufsz - pos, "ttl_timestamp:\t\t\t%u\n",
- le32_to_cpu(general->ttl_timestamp));
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "tx_on_a:",
- le32_to_cpu(div->tx_on_a), accum_div->tx_on_a,
- delta_div->tx_on_a, max_div->tx_on_a);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "tx_on_b:",
- le32_to_cpu(div->tx_on_b), accum_div->tx_on_b,
- delta_div->tx_on_b, max_div->tx_on_b);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "exec_time:",
- le32_to_cpu(div->exec_time), accum_div->exec_time,
- delta_div->exec_time, max_div->exec_time);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "probe_time:",
- le32_to_cpu(div->probe_time), accum_div->probe_time,
- delta_div->probe_time, max_div->probe_time);
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
-}
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.h b/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.h
deleted file mode 100644
index 8fef4b32b44..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/******************************************************************************
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *****************************************************************************/
-
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-debug.h"
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
-ssize_t iwl3945_ucode_rx_stats_read(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos);
-ssize_t iwl3945_ucode_tx_stats_read(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos);
-ssize_t iwl3945_ucode_general_stats_read(struct file *file,
- char __user *user_buf, size_t count,
- loff_t *ppos);
-#else
-static ssize_t iwl3945_ucode_rx_stats_read(struct file *file,
- char __user *user_buf, size_t count,
- loff_t *ppos)
-{
- return 0;
-}
-static ssize_t iwl3945_ucode_tx_stats_read(struct file *file,
- char __user *user_buf, size_t count,
- loff_t *ppos)
-{
- return 0;
-}
-static ssize_t iwl3945_ucode_general_stats_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- return 0;
-}
-#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-fh.h b/drivers/net/wireless/iwlegacy/iwl-3945-fh.h
deleted file mode 100644
index 836c9919f82..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-3945-fh.h
+++ /dev/null
@@ -1,187 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *****************************************************************************/
-#ifndef __iwl_3945_fh_h__
-#define __iwl_3945_fh_h__
-
-/************************************/
-/* iwl3945 Flow Handler Definitions */
-/************************************/
-
-/**
- * This I/O area is directly read/writable by driver (e.g. Linux uses writel())
- * Addresses are offsets from device's PCI hardware base address.
- */
-#define FH39_MEM_LOWER_BOUND (0x0800)
-#define FH39_MEM_UPPER_BOUND (0x1000)
-
-#define FH39_CBCC_TABLE (FH39_MEM_LOWER_BOUND + 0x140)
-#define FH39_TFDB_TABLE (FH39_MEM_LOWER_BOUND + 0x180)
-#define FH39_RCSR_TABLE (FH39_MEM_LOWER_BOUND + 0x400)
-#define FH39_RSSR_TABLE (FH39_MEM_LOWER_BOUND + 0x4c0)
-#define FH39_TCSR_TABLE (FH39_MEM_LOWER_BOUND + 0x500)
-#define FH39_TSSR_TABLE (FH39_MEM_LOWER_BOUND + 0x680)
-
-/* TFDB (Transmit Frame Buffer Descriptor) */
-#define FH39_TFDB(_ch, buf) (FH39_TFDB_TABLE + \
- ((_ch) * 2 + (buf)) * 0x28)
-#define FH39_TFDB_CHNL_BUF_CTRL_REG(_ch) (FH39_TFDB_TABLE + 0x50 * (_ch))
-
-/* CBCC channel is [0,2] */
-#define FH39_CBCC(_ch) (FH39_CBCC_TABLE + (_ch) * 0x8)
-#define FH39_CBCC_CTRL(_ch) (FH39_CBCC(_ch) + 0x00)
-#define FH39_CBCC_BASE(_ch) (FH39_CBCC(_ch) + 0x04)
-
-/* RCSR channel is [0,2] */
-#define FH39_RCSR(_ch) (FH39_RCSR_TABLE + (_ch) * 0x40)
-#define FH39_RCSR_CONFIG(_ch) (FH39_RCSR(_ch) + 0x00)
-#define FH39_RCSR_RBD_BASE(_ch) (FH39_RCSR(_ch) + 0x04)
-#define FH39_RCSR_WPTR(_ch) (FH39_RCSR(_ch) + 0x20)
-#define FH39_RCSR_RPTR_ADDR(_ch) (FH39_RCSR(_ch) + 0x24)
-
-#define FH39_RSCSR_CHNL0_WPTR (FH39_RCSR_WPTR(0))
-
-/* RSSR */
-#define FH39_RSSR_CTRL (FH39_RSSR_TABLE + 0x000)
-#define FH39_RSSR_STATUS (FH39_RSSR_TABLE + 0x004)
-
-/* TCSR */
-#define FH39_TCSR(_ch) (FH39_TCSR_TABLE + (_ch) * 0x20)
-#define FH39_TCSR_CONFIG(_ch) (FH39_TCSR(_ch) + 0x00)
-#define FH39_TCSR_CREDIT(_ch) (FH39_TCSR(_ch) + 0x04)
-#define FH39_TCSR_BUFF_STTS(_ch) (FH39_TCSR(_ch) + 0x08)
-
-/* TSSR */
-#define FH39_TSSR_CBB_BASE (FH39_TSSR_TABLE + 0x000)
-#define FH39_TSSR_MSG_CONFIG (FH39_TSSR_TABLE + 0x008)
-#define FH39_TSSR_TX_STATUS (FH39_TSSR_TABLE + 0x010)
-
-
-/* DBM */
-
-#define FH39_SRVC_CHNL (6)
-
-#define FH39_RCSR_RX_CONFIG_REG_POS_RBDC_SIZE (20)
-#define FH39_RCSR_RX_CONFIG_REG_POS_IRQ_RBTH (4)
-
-#define FH39_RCSR_RX_CONFIG_REG_BIT_WR_STTS_EN (0x08000000)
-
-#define FH39_RCSR_RX_CONFIG_REG_VAL_DMA_CHNL_EN_ENABLE (0x80000000)
-
-#define FH39_RCSR_RX_CONFIG_REG_VAL_RDRBD_EN_ENABLE (0x20000000)
-
-#define FH39_RCSR_RX_CONFIG_REG_VAL_MAX_FRAG_SIZE_128 (0x01000000)
-
-#define FH39_RCSR_RX_CONFIG_REG_VAL_IRQ_DEST_INT_HOST (0x00001000)
-
-#define FH39_RCSR_RX_CONFIG_REG_VAL_MSG_MODE_FH (0x00000000)
-
-#define FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF (0x00000000)
-#define FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_DRIVER (0x00000001)
-
-#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE_VAL (0x00000000)
-#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL (0x00000008)
-
-#define FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD (0x00200000)
-
-#define FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT (0x00000000)
-
-#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE (0x00000000)
-#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE (0x80000000)
-
-#define FH39_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID (0x00004000)
-
-#define FH39_TCSR_CHNL_TX_BUF_STS_REG_BIT_TFDB_WPTR (0x00000001)
-
-#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON (0xFF000000)
-#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_TXPD_ON (0x00FF0000)
-
-#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_128B (0x00000400)
-
-#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TFD_ON (0x00000100)
-#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_CBB_ON (0x00000080)
-
-#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RSP_WAIT_TH (0x00000020)
-#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_RSP_WAIT_TH (0x00000005)
-
-#define FH39_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_ch) (BIT(_ch) << 24)
-#define FH39_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_ch) (BIT(_ch) << 16)
-
-#define FH39_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_ch) \
- (FH39_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_ch) | \
- FH39_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_ch))
-
-#define FH39_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (0x01000000)
-
-struct iwl3945_tfd_tb {
- __le32 addr;
- __le32 len;
-} __packed;
-
-struct iwl3945_tfd {
- __le32 control_flags;
- struct iwl3945_tfd_tb tbs[4];
- u8 __pad[28];
-} __packed;
-
-
-#endif /* __iwl_3945_fh_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-hw.h b/drivers/net/wireless/iwlegacy/iwl-3945-hw.h
deleted file mode 100644
index 5c3a68d3af1..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-3945-hw.h
+++ /dev/null
@@ -1,291 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *****************************************************************************/
-/*
- * Please use this file (iwl-3945-hw.h) only for hardware-related definitions.
- * Please use iwl-commands.h for uCode API definitions.
- * Please use iwl-3945.h for driver implementation definitions.
- */
-
-#ifndef __iwl_3945_hw__
-#define __iwl_3945_hw__
-
-#include "iwl-eeprom.h"
-
-/* RSSI to dBm */
-#define IWL39_RSSI_OFFSET 95
-
-/*
- * EEPROM related constants, enums, and structures.
- */
-#define EEPROM_SKU_CAP_OP_MODE_MRC (1 << 7)
-
-/*
- * Mapping of a Tx power level, at factory calibration temperature,
- * to a radio/DSP gain table index.
- * One for each of 5 "sample" power levels in each band.
- * v_det is measured at the factory, using the 3945's built-in power amplifier
- * (PA) output voltage detector. This same detector is used during Tx of
- * long packets in normal operation to provide feedback as to proper output
- * level.
- * Data copied from EEPROM.
- * DO NOT ALTER THIS STRUCTURE!!!
- */
-struct iwl3945_eeprom_txpower_sample {
- u8 gain_index; /* index into power (gain) setup table ... */
- s8 power; /* ... for this pwr level for this chnl group */
- u16 v_det; /* PA output voltage */
-} __packed;
-
-/*
- * Mappings of Tx power levels -> nominal radio/DSP gain table indexes.
- * One for each channel group (a.k.a. "band") (1 for BG, 4 for A).
- * Tx power setup code interpolates between the 5 "sample" power levels
- * to determine the nominal setup for a requested power level.
- * Data copied from EEPROM.
- * DO NOT ALTER THIS STRUCTURE!!!
- */
-struct iwl3945_eeprom_txpower_group {
- struct iwl3945_eeprom_txpower_sample samples[5]; /* 5 power levels */
- s32 a, b, c, d, e; /* coefficients for voltage->power
- * formula (signed) */
- s32 Fa, Fb, Fc, Fd, Fe; /* these modify coeffs based on
- * frequency (signed) */
- s8 saturation_power; /* highest power possible by h/w in this
- * band */
- u8 group_channel; /* "representative" channel # in this band */
- s16 temperature; /* h/w temperature at factory calib this band
- * (signed) */
-} __packed;
-
-/*
- * Temperature-based Tx-power compensation data, not band-specific.
- * These coefficients are use to modify a/b/c/d/e coeffs based on
- * difference between current temperature and factory calib temperature.
- * Data copied from EEPROM.
- */
-struct iwl3945_eeprom_temperature_corr {
- u32 Ta;
- u32 Tb;
- u32 Tc;
- u32 Td;
- u32 Te;
-} __packed;
-
-/*
- * EEPROM map
- */
-struct iwl3945_eeprom {
- u8 reserved0[16];
- u16 device_id; /* abs.ofs: 16 */
- u8 reserved1[2];
- u16 pmc; /* abs.ofs: 20 */
- u8 reserved2[20];
- u8 mac_address[6]; /* abs.ofs: 42 */
- u8 reserved3[58];
- u16 board_revision; /* abs.ofs: 106 */
- u8 reserved4[11];
- u8 board_pba_number[9]; /* abs.ofs: 119 */
- u8 reserved5[8];
- u16 version; /* abs.ofs: 136 */
- u8 sku_cap; /* abs.ofs: 138 */
- u8 leds_mode; /* abs.ofs: 139 */
- u16 oem_mode;
- u16 wowlan_mode; /* abs.ofs: 142 */
- u16 leds_time_interval; /* abs.ofs: 144 */
- u8 leds_off_time; /* abs.ofs: 146 */
- u8 leds_on_time; /* abs.ofs: 147 */
- u8 almgor_m_version; /* abs.ofs: 148 */
- u8 antenna_switch_type; /* abs.ofs: 149 */
- u8 reserved6[42];
- u8 sku_id[4]; /* abs.ofs: 192 */
-
-/*
- * Per-channel regulatory data.
- *
- * Each channel that *might* be supported by 3945 has a fixed location
- * in EEPROM containing EEPROM_CHANNEL_* usage flags (LSB) and max regulatory
- * txpower (MSB).
- *
- * Entries immediately below are for 20 MHz channel width.
- *
- * 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
- */
- u16 band_1_count; /* abs.ofs: 196 */
- struct iwl_eeprom_channel band_1_channels[14]; /* abs.ofs: 198 */
-
-/*
- * 4.9 GHz channels 183, 184, 185, 187, 188, 189, 192, 196,
- * 5.0 GHz channels 7, 8, 11, 12, 16
- * (4915-5080MHz) (none of these is ever supported)
- */
- u16 band_2_count; /* abs.ofs: 226 */
- struct iwl_eeprom_channel band_2_channels[13]; /* abs.ofs: 228 */
-
-/*
- * 5.2 GHz channels 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
- * (5170-5320MHz)
- */
- u16 band_3_count; /* abs.ofs: 254 */
- struct iwl_eeprom_channel band_3_channels[12]; /* abs.ofs: 256 */
-
-/*
- * 5.5 GHz channels 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
- * (5500-5700MHz)
- */
- u16 band_4_count; /* abs.ofs: 280 */
- struct iwl_eeprom_channel band_4_channels[11]; /* abs.ofs: 282 */
-
-/*
- * 5.7 GHz channels 145, 149, 153, 157, 161, 165
- * (5725-5825MHz)
- */
- u16 band_5_count; /* abs.ofs: 304 */
- struct iwl_eeprom_channel band_5_channels[6]; /* abs.ofs: 306 */
-
- u8 reserved9[194];
-
-/*
- * 3945 Txpower calibration data.
- */
-#define IWL_NUM_TX_CALIB_GROUPS 5
- struct iwl3945_eeprom_txpower_group groups[IWL_NUM_TX_CALIB_GROUPS];
-/* abs.ofs: 512 */
- struct iwl3945_eeprom_temperature_corr corrections; /* abs.ofs: 832 */
- u8 reserved16[172]; /* fill out to full 1024 byte block */
-} __packed;
-
-#define IWL3945_EEPROM_IMG_SIZE 1024
-
-/* End of EEPROM */
-
-#define PCI_CFG_REV_ID_BIT_BASIC_SKU (0x40) /* bit 6 */
-#define PCI_CFG_REV_ID_BIT_RTP (0x80) /* bit 7 */
-
-/* 4 DATA + 1 CMD. There are 2 HCCA queues that are not used. */
-#define IWL39_NUM_QUEUES 5
-#define IWL39_CMD_QUEUE_NUM 4
-
-#define IWL_DEFAULT_TX_RETRY 15
-
-/*********************************************/
-
-#define RFD_SIZE 4
-#define NUM_TFD_CHUNKS 4
-
-#define RX_QUEUE_SIZE 256
-#define RX_QUEUE_MASK 255
-#define RX_QUEUE_SIZE_LOG 8
-
-#define U32_PAD(n) ((4-(n))&0x3)
-
-#define TFD_CTL_COUNT_SET(n) (n << 24)
-#define TFD_CTL_COUNT_GET(ctl) ((ctl >> 24) & 7)
-#define TFD_CTL_PAD_SET(n) (n << 28)
-#define TFD_CTL_PAD_GET(ctl) (ctl >> 28)
-
-/* Sizes and addresses for instruction and data memory (SRAM) in
- * 3945's embedded processor. Driver access is via HBUS_TARG_MEM_* regs. */
-#define IWL39_RTC_INST_LOWER_BOUND (0x000000)
-#define IWL39_RTC_INST_UPPER_BOUND (0x014000)
-
-#define IWL39_RTC_DATA_LOWER_BOUND (0x800000)
-#define IWL39_RTC_DATA_UPPER_BOUND (0x808000)
-
-#define IWL39_RTC_INST_SIZE (IWL39_RTC_INST_UPPER_BOUND - \
- IWL39_RTC_INST_LOWER_BOUND)
-#define IWL39_RTC_DATA_SIZE (IWL39_RTC_DATA_UPPER_BOUND - \
- IWL39_RTC_DATA_LOWER_BOUND)
-
-#define IWL39_MAX_INST_SIZE IWL39_RTC_INST_SIZE
-#define IWL39_MAX_DATA_SIZE IWL39_RTC_DATA_SIZE
-
-/* Size of uCode instruction memory in bootstrap state machine */
-#define IWL39_MAX_BSM_SIZE IWL39_RTC_INST_SIZE
-
-static inline int iwl3945_hw_valid_rtc_data_addr(u32 addr)
-{
- return (addr >= IWL39_RTC_DATA_LOWER_BOUND) &&
- (addr < IWL39_RTC_DATA_UPPER_BOUND);
-}
-
-/* Base physical address of iwl3945_shared is provided to FH_TSSR_CBB_BASE
- * and &iwl3945_shared.rx_read_ptr[0] is provided to FH_RCSR_RPTR_ADDR(0) */
-struct iwl3945_shared {
- __le32 tx_base_ptr[8];
-} __packed;
-
-static inline u8 iwl3945_hw_get_rate(__le16 rate_n_flags)
-{
- return le16_to_cpu(rate_n_flags) & 0xFF;
-}
-
-static inline u16 iwl3945_hw_get_rate_n_flags(__le16 rate_n_flags)
-{
- return le16_to_cpu(rate_n_flags);
-}
-
-static inline __le16 iwl3945_hw_set_rate_n_flags(u8 rate, u16 flags)
-{
- return cpu_to_le16((u16)rate|flags);
-}
-#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-led.c b/drivers/net/wireless/iwlegacy/iwl-3945-led.c
deleted file mode 100644
index 7a7f0f38c8a..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-3945-led.c
+++ /dev/null
@@ -1,63 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <linux/delay.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <net/mac80211.h>
-#include <linux/etherdevice.h>
-#include <asm/unaligned.h>
-
-#include "iwl-commands.h"
-#include "iwl-3945.h"
-#include "iwl-core.h"
-#include "iwl-dev.h"
-#include "iwl-3945-led.h"
-
-
-/* Send led command */
-static int iwl3945_send_led_cmd(struct iwl_priv *priv,
- struct iwl_led_cmd *led_cmd)
-{
- struct iwl_host_cmd cmd = {
- .id = REPLY_LEDS_CMD,
- .len = sizeof(struct iwl_led_cmd),
- .data = led_cmd,
- .flags = CMD_ASYNC,
- .callback = NULL,
- };
-
- return iwl_legacy_send_cmd(priv, &cmd);
-}
-
-const struct iwl_led_ops iwl3945_led_ops = {
- .cmd = iwl3945_send_led_cmd,
-};
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-led.h b/drivers/net/wireless/iwlegacy/iwl-3945-led.h
deleted file mode 100644
index 96716276eb0..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-3945-led.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#ifndef __iwl_3945_led_h__
-#define __iwl_3945_led_h__
-
-extern const struct iwl_led_ops iwl3945_led_ops;
-
-#endif /* __iwl_3945_led_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-rs.c b/drivers/net/wireless/iwlegacy/iwl-3945-rs.c
deleted file mode 100644
index 8faeaf2ddde..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-3945-rs.c
+++ /dev/null
@@ -1,996 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/skbuff.h>
-#include <linux/slab.h>
-#include <net/mac80211.h>
-
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/delay.h>
-
-#include <linux/workqueue.h>
-
-#include "iwl-commands.h"
-#include "iwl-3945.h"
-#include "iwl-sta.h"
-
-#define RS_NAME "iwl-3945-rs"
-
-static s32 iwl3945_expected_tpt_g[IWL_RATE_COUNT_3945] = {
- 7, 13, 35, 58, 0, 0, 76, 104, 130, 168, 191, 202
-};
-
-static s32 iwl3945_expected_tpt_g_prot[IWL_RATE_COUNT_3945] = {
- 7, 13, 35, 58, 0, 0, 0, 80, 93, 113, 123, 125
-};
-
-static s32 iwl3945_expected_tpt_a[IWL_RATE_COUNT_3945] = {
- 0, 0, 0, 0, 40, 57, 72, 98, 121, 154, 177, 186
-};
-
-static s32 iwl3945_expected_tpt_b[IWL_RATE_COUNT_3945] = {
- 7, 13, 35, 58, 0, 0, 0, 0, 0, 0, 0, 0
-};
-
-struct iwl3945_tpt_entry {
- s8 min_rssi;
- u8 index;
-};
-
-static struct iwl3945_tpt_entry iwl3945_tpt_table_a[] = {
- {-60, IWL_RATE_54M_INDEX},
- {-64, IWL_RATE_48M_INDEX},
- {-72, IWL_RATE_36M_INDEX},
- {-80, IWL_RATE_24M_INDEX},
- {-84, IWL_RATE_18M_INDEX},
- {-85, IWL_RATE_12M_INDEX},
- {-87, IWL_RATE_9M_INDEX},
- {-89, IWL_RATE_6M_INDEX}
-};
-
-static struct iwl3945_tpt_entry iwl3945_tpt_table_g[] = {
- {-60, IWL_RATE_54M_INDEX},
- {-64, IWL_RATE_48M_INDEX},
- {-68, IWL_RATE_36M_INDEX},
- {-80, IWL_RATE_24M_INDEX},
- {-84, IWL_RATE_18M_INDEX},
- {-85, IWL_RATE_12M_INDEX},
- {-86, IWL_RATE_11M_INDEX},
- {-88, IWL_RATE_5M_INDEX},
- {-90, IWL_RATE_2M_INDEX},
- {-92, IWL_RATE_1M_INDEX}
-};
-
-#define IWL_RATE_MAX_WINDOW 62
-#define IWL_RATE_FLUSH (3*HZ)
-#define IWL_RATE_WIN_FLUSH (HZ/2)
-#define IWL39_RATE_HIGH_TH 11520
-#define IWL_SUCCESS_UP_TH 8960
-#define IWL_SUCCESS_DOWN_TH 10880
-#define IWL_RATE_MIN_FAILURE_TH 6
-#define IWL_RATE_MIN_SUCCESS_TH 8
-#define IWL_RATE_DECREASE_TH 1920
-#define IWL_RATE_RETRY_TH 15
-
-static u8 iwl3945_get_rate_index_by_rssi(s32 rssi, enum ieee80211_band band)
-{
- u32 index = 0;
- u32 table_size = 0;
- struct iwl3945_tpt_entry *tpt_table = NULL;
-
- if ((rssi < IWL_MIN_RSSI_VAL) || (rssi > IWL_MAX_RSSI_VAL))
- rssi = IWL_MIN_RSSI_VAL;
-
- switch (band) {
- case IEEE80211_BAND_2GHZ:
- tpt_table = iwl3945_tpt_table_g;
- table_size = ARRAY_SIZE(iwl3945_tpt_table_g);
- break;
-
- case IEEE80211_BAND_5GHZ:
- tpt_table = iwl3945_tpt_table_a;
- table_size = ARRAY_SIZE(iwl3945_tpt_table_a);
- break;
-
- default:
- BUG();
- break;
- }
-
- while ((index < table_size) && (rssi < tpt_table[index].min_rssi))
- index++;
-
- index = min(index, (table_size - 1));
-
- return tpt_table[index].index;
-}
-
-static void iwl3945_clear_window(struct iwl3945_rate_scale_data *window)
-{
- window->data = 0;
- window->success_counter = 0;
- window->success_ratio = -1;
- window->counter = 0;
- window->average_tpt = IWL_INVALID_VALUE;
- window->stamp = 0;
-}
-
-/**
- * iwl3945_rate_scale_flush_windows - flush out the rate scale windows
- *
- * Returns the number of windows that have gathered data but were
- * not flushed. If there were any that were not flushed, then
- * reschedule the rate flushing routine.
- */
-static int iwl3945_rate_scale_flush_windows(struct iwl3945_rs_sta *rs_sta)
-{
- int unflushed = 0;
- int i;
- unsigned long flags;
- struct iwl_priv *priv __maybe_unused = rs_sta->priv;
-
- /*
- * For each rate, if we have collected data on that rate
- * and it has been more than IWL_RATE_WIN_FLUSH
- * since we flushed, clear out the gathered statistics
- */
- for (i = 0; i < IWL_RATE_COUNT_3945; i++) {
- if (!rs_sta->win[i].counter)
- continue;
-
- spin_lock_irqsave(&rs_sta->lock, flags);
- if (time_after(jiffies, rs_sta->win[i].stamp +
- IWL_RATE_WIN_FLUSH)) {
- IWL_DEBUG_RATE(priv, "flushing %d samples of rate "
- "index %d\n",
- rs_sta->win[i].counter, i);
- iwl3945_clear_window(&rs_sta->win[i]);
- } else
- unflushed++;
- spin_unlock_irqrestore(&rs_sta->lock, flags);
- }
-
- return unflushed;
-}
-
-#define IWL_RATE_FLUSH_MAX 5000 /* msec */
-#define IWL_RATE_FLUSH_MIN 50 /* msec */
-#define IWL_AVERAGE_PACKETS 1500
-
-static void iwl3945_bg_rate_scale_flush(unsigned long data)
-{
- struct iwl3945_rs_sta *rs_sta = (void *)data;
- struct iwl_priv *priv __maybe_unused = rs_sta->priv;
- int unflushed = 0;
- unsigned long flags;
- u32 packet_count, duration, pps;
-
- IWL_DEBUG_RATE(priv, "enter\n");
-
- unflushed = iwl3945_rate_scale_flush_windows(rs_sta);
-
- spin_lock_irqsave(&rs_sta->lock, flags);
-
- /* Number of packets Rx'd since last time this timer ran */
- packet_count = (rs_sta->tx_packets - rs_sta->last_tx_packets) + 1;
-
- rs_sta->last_tx_packets = rs_sta->tx_packets + 1;
-
- if (unflushed) {
- duration =
- jiffies_to_msecs(jiffies - rs_sta->last_partial_flush);
-
- IWL_DEBUG_RATE(priv, "Tx'd %d packets in %dms\n",
- packet_count, duration);
-
- /* Determine packets per second */
- if (duration)
- pps = (packet_count * 1000) / duration;
- else
- pps = 0;
-
- if (pps) {
- duration = (IWL_AVERAGE_PACKETS * 1000) / pps;
- if (duration < IWL_RATE_FLUSH_MIN)
- duration = IWL_RATE_FLUSH_MIN;
- else if (duration > IWL_RATE_FLUSH_MAX)
- duration = IWL_RATE_FLUSH_MAX;
- } else
- duration = IWL_RATE_FLUSH_MAX;
-
- rs_sta->flush_time = msecs_to_jiffies(duration);
-
- IWL_DEBUG_RATE(priv, "new flush period: %d msec ave %d\n",
- duration, packet_count);
-
- mod_timer(&rs_sta->rate_scale_flush, jiffies +
- rs_sta->flush_time);
-
- rs_sta->last_partial_flush = jiffies;
- } else {
- rs_sta->flush_time = IWL_RATE_FLUSH;
- rs_sta->flush_pending = 0;
- }
- /* If there weren't any unflushed entries, we don't schedule the timer
- * to run again */
-
- rs_sta->last_flush = jiffies;
-
- spin_unlock_irqrestore(&rs_sta->lock, flags);
-
- IWL_DEBUG_RATE(priv, "leave\n");
-}
-
-/**
- * iwl3945_collect_tx_data - Update the success/failure sliding window
- *
- * We keep a sliding window of the last 64 packets transmitted
- * at this rate. window->data contains the bitmask of successful
- * packets.
- */
-static void iwl3945_collect_tx_data(struct iwl3945_rs_sta *rs_sta,
- struct iwl3945_rate_scale_data *window,
- int success, int retries, int index)
-{
- unsigned long flags;
- s32 fail_count;
- struct iwl_priv *priv __maybe_unused = rs_sta->priv;
-
- if (!retries) {
- IWL_DEBUG_RATE(priv, "leave: retries == 0 -- should be at least 1\n");
- return;
- }
-
- spin_lock_irqsave(&rs_sta->lock, flags);
-
- /*
- * Keep track of only the latest 62 tx frame attempts in this rate's
- * history window; anything older isn't really relevant any more.
- * If we have filled up the sliding window, drop the oldest attempt;
- * if the oldest attempt (highest bit in bitmap) shows "success",
- * subtract "1" from the success counter (this is the main reason
- * we keep these bitmaps!).
- * */
- while (retries > 0) {
- if (window->counter >= IWL_RATE_MAX_WINDOW) {
-
- /* remove earliest */
- window->counter = IWL_RATE_MAX_WINDOW - 1;
-
- if (window->data & (1ULL << (IWL_RATE_MAX_WINDOW - 1))) {
- window->data &= ~(1ULL << (IWL_RATE_MAX_WINDOW - 1));
- window->success_counter--;
- }
- }
-
- /* Increment frames-attempted counter */
- window->counter++;
-
- /* Shift bitmap by one frame (throw away oldest history),
- * OR in "1", and increment "success" if this
- * frame was successful. */
- window->data <<= 1;
- if (success > 0) {
- window->success_counter++;
- window->data |= 0x1;
- success--;
- }
-
- retries--;
- }
-
- /* Calculate current success ratio, avoid divide-by-0! */
- if (window->counter > 0)
- window->success_ratio = 128 * (100 * window->success_counter)
- / window->counter;
- else
- window->success_ratio = IWL_INVALID_VALUE;
-
- fail_count = window->counter - window->success_counter;
-
- /* Calculate average throughput, if we have enough history. */
- if ((fail_count >= IWL_RATE_MIN_FAILURE_TH) ||
- (window->success_counter >= IWL_RATE_MIN_SUCCESS_TH))
- window->average_tpt = ((window->success_ratio *
- rs_sta->expected_tpt[index] + 64) / 128);
- else
- window->average_tpt = IWL_INVALID_VALUE;
-
- /* Tag this window as having been updated */
- window->stamp = jiffies;
-
- spin_unlock_irqrestore(&rs_sta->lock, flags);
-
-}
-
-/*
- * Called after adding a new station to initialize rate scaling
- */
-void iwl3945_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_id)
-{
- struct ieee80211_hw *hw = priv->hw;
- struct ieee80211_conf *conf = &priv->hw->conf;
- struct iwl3945_sta_priv *psta;
- struct iwl3945_rs_sta *rs_sta;
- struct ieee80211_supported_band *sband;
- int i;
-
- IWL_DEBUG_INFO(priv, "enter\n");
- if (sta_id == priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id)
- goto out;
-
- psta = (struct iwl3945_sta_priv *) sta->drv_priv;
- rs_sta = &psta->rs_sta;
- sband = hw->wiphy->bands[conf->channel->band];
-
- rs_sta->priv = priv;
-
- rs_sta->start_rate = IWL_RATE_INVALID;
-
- /* default to just 802.11b */
- rs_sta->expected_tpt = iwl3945_expected_tpt_b;
-
- rs_sta->last_partial_flush = jiffies;
- rs_sta->last_flush = jiffies;
- rs_sta->flush_time = IWL_RATE_FLUSH;
- rs_sta->last_tx_packets = 0;
-
- rs_sta->rate_scale_flush.data = (unsigned long)rs_sta;
- rs_sta->rate_scale_flush.function = iwl3945_bg_rate_scale_flush;
-
- for (i = 0; i < IWL_RATE_COUNT_3945; i++)
- iwl3945_clear_window(&rs_sta->win[i]);
-
- /* TODO: what is a good starting rate for STA? About middle? Maybe not
- * the lowest or the highest rate.. Could consider using RSSI from
- * previous packets? Need to have IEEE 802.1X auth succeed immediately
- * after assoc.. */
-
- for (i = sband->n_bitrates - 1; i >= 0; i--) {
- if (sta->supp_rates[sband->band] & (1 << i)) {
- rs_sta->last_txrate_idx = i;
- break;
- }
- }
-
- priv->_3945.sta_supp_rates = sta->supp_rates[sband->band];
- /* For 5 GHz band it start at IWL_FIRST_OFDM_RATE */
- if (sband->band == IEEE80211_BAND_5GHZ) {
- rs_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
- priv->_3945.sta_supp_rates = priv->_3945.sta_supp_rates <<
- IWL_FIRST_OFDM_RATE;
- }
-
-out:
- priv->stations[sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
-
- IWL_DEBUG_INFO(priv, "leave\n");
-}
-
-static void *iwl3945_rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
-{
- return hw->priv;
-}
-
-/* rate scale requires free function to be implemented */
-static void iwl3945_rs_free(void *priv)
-{
- return;
-}
-
-static void *iwl3945_rs_alloc_sta(void *iwl_priv, struct ieee80211_sta *sta, gfp_t gfp)
-{
- struct iwl3945_rs_sta *rs_sta;
- struct iwl3945_sta_priv *psta = (void *) sta->drv_priv;
- struct iwl_priv *priv __maybe_unused = iwl_priv;
-
- IWL_DEBUG_RATE(priv, "enter\n");
-
- rs_sta = &psta->rs_sta;
-
- spin_lock_init(&rs_sta->lock);
- init_timer(&rs_sta->rate_scale_flush);
-
- IWL_DEBUG_RATE(priv, "leave\n");
-
- return rs_sta;
-}
-
-static void iwl3945_rs_free_sta(void *iwl_priv, struct ieee80211_sta *sta,
- void *priv_sta)
-{
- struct iwl3945_rs_sta *rs_sta = priv_sta;
-
- /*
- * Be careful not to use any members of iwl3945_rs_sta (like trying
- * to use iwl_priv to print out debugging) since it may not be fully
- * initialized at this point.
- */
- del_timer_sync(&rs_sta->rate_scale_flush);
-}
-
-
-/**
- * iwl3945_rs_tx_status - Update rate control values based on Tx results
- *
- * NOTE: Uses iwl_priv->retry_rate for the # of retries attempted by
- * the hardware for each rate.
- */
-static void iwl3945_rs_tx_status(void *priv_rate, struct ieee80211_supported_band *sband,
- struct ieee80211_sta *sta, void *priv_sta,
- struct sk_buff *skb)
-{
- s8 retries = 0, current_count;
- int scale_rate_index, first_index, last_index;
- unsigned long flags;
- struct iwl_priv *priv = (struct iwl_priv *)priv_rate;
- struct iwl3945_rs_sta *rs_sta = priv_sta;
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-
- IWL_DEBUG_RATE(priv, "enter\n");
-
- retries = info->status.rates[0].count;
- /* Sanity Check for retries */
- if (retries > IWL_RATE_RETRY_TH)
- retries = IWL_RATE_RETRY_TH;
-
- first_index = sband->bitrates[info->status.rates[0].idx].hw_value;
- if ((first_index < 0) || (first_index >= IWL_RATE_COUNT_3945)) {
- IWL_DEBUG_RATE(priv, "leave: Rate out of bounds: %d\n", first_index);
- return;
- }
-
- if (!priv_sta) {
- IWL_DEBUG_RATE(priv, "leave: No STA priv data to update!\n");
- return;
- }
-
- /* Treat uninitialized rate scaling data same as non-existing. */
- if (!rs_sta->priv) {
- IWL_DEBUG_RATE(priv, "leave: STA priv data uninitialized!\n");
- return;
- }
-
-
- rs_sta->tx_packets++;
-
- scale_rate_index = first_index;
- last_index = first_index;
-
- /*
- * Update the window for each rate. We determine which rates
- * were Tx'd based on the total number of retries vs. the number
- * of retries configured for each rate -- currently set to the
- * priv value 'retry_rate' vs. rate specific
- *
- * On exit from this while loop last_index indicates the rate
- * at which the frame was finally transmitted (or failed if no
- * ACK)
- */
- while (retries > 1) {
- if ((retries - 1) < priv->retry_rate) {
- current_count = (retries - 1);
- last_index = scale_rate_index;
- } else {
- current_count = priv->retry_rate;
- last_index = iwl3945_rs_next_rate(priv,
- scale_rate_index);
- }
-
- /* Update this rate accounting for as many retries
- * as was used for it (per current_count) */
- iwl3945_collect_tx_data(rs_sta,
- &rs_sta->win[scale_rate_index],
- 0, current_count, scale_rate_index);
- IWL_DEBUG_RATE(priv, "Update rate %d for %d retries.\n",
- scale_rate_index, current_count);
-
- retries -= current_count;
-
- scale_rate_index = last_index;
- }
-
-
- /* Update the last index window with success/failure based on ACK */
- IWL_DEBUG_RATE(priv, "Update rate %d with %s.\n",
- last_index,
- (info->flags & IEEE80211_TX_STAT_ACK) ?
- "success" : "failure");
- iwl3945_collect_tx_data(rs_sta,
- &rs_sta->win[last_index],
- info->flags & IEEE80211_TX_STAT_ACK, 1, last_index);
-
- /* We updated the rate scale window -- if its been more than
- * flush_time since the last run, schedule the flush
- * again */
- spin_lock_irqsave(&rs_sta->lock, flags);
-
- if (!rs_sta->flush_pending &&
- time_after(jiffies, rs_sta->last_flush +
- rs_sta->flush_time)) {
-
- rs_sta->last_partial_flush = jiffies;
- rs_sta->flush_pending = 1;
- mod_timer(&rs_sta->rate_scale_flush,
- jiffies + rs_sta->flush_time);
- }
-
- spin_unlock_irqrestore(&rs_sta->lock, flags);
-
- IWL_DEBUG_RATE(priv, "leave\n");
-}
-
-static u16 iwl3945_get_adjacent_rate(struct iwl3945_rs_sta *rs_sta,
- u8 index, u16 rate_mask, enum ieee80211_band band)
-{
- u8 high = IWL_RATE_INVALID;
- u8 low = IWL_RATE_INVALID;
- struct iwl_priv *priv __maybe_unused = rs_sta->priv;
-
- /* 802.11A walks to the next literal adjacent rate in
- * the rate table */
- if (unlikely(band == IEEE80211_BAND_5GHZ)) {
- int i;
- u32 mask;
-
- /* Find the previous rate that is in the rate mask */
- i = index - 1;
- for (mask = (1 << i); i >= 0; i--, mask >>= 1) {
- if (rate_mask & mask) {
- low = i;
- break;
- }
- }
-
- /* Find the next rate that is in the rate mask */
- i = index + 1;
- for (mask = (1 << i); i < IWL_RATE_COUNT_3945;
- i++, mask <<= 1) {
- if (rate_mask & mask) {
- high = i;
- break;
- }
- }
-
- return (high << 8) | low;
- }
-
- low = index;
- while (low != IWL_RATE_INVALID) {
- if (rs_sta->tgg)
- low = iwl3945_rates[low].prev_rs_tgg;
- else
- low = iwl3945_rates[low].prev_rs;
- if (low == IWL_RATE_INVALID)
- break;
- if (rate_mask & (1 << low))
- break;
- IWL_DEBUG_RATE(priv, "Skipping masked lower rate: %d\n", low);
- }
-
- high = index;
- while (high != IWL_RATE_INVALID) {
- if (rs_sta->tgg)
- high = iwl3945_rates[high].next_rs_tgg;
- else
- high = iwl3945_rates[high].next_rs;
- if (high == IWL_RATE_INVALID)
- break;
- if (rate_mask & (1 << high))
- break;
- IWL_DEBUG_RATE(priv, "Skipping masked higher rate: %d\n", high);
- }
-
- return (high << 8) | low;
-}
-
-/**
- * iwl3945_rs_get_rate - find the rate for the requested packet
- *
- * Returns the ieee80211_rate structure allocated by the driver.
- *
- * The rate control algorithm has no internal mapping between hw_mode's
- * rate ordering and the rate ordering used by the rate control algorithm.
- *
- * The rate control algorithm uses a single table of rates that goes across
- * the entire A/B/G spectrum vs. being limited to just one particular
- * hw_mode.
- *
- * As such, we can't convert the index obtained below into the hw_mode's
- * rate table and must reference the driver allocated rate table
- *
- */
-static void iwl3945_rs_get_rate(void *priv_r, struct ieee80211_sta *sta,
- void *priv_sta, struct ieee80211_tx_rate_control *txrc)
-{
- struct ieee80211_supported_band *sband = txrc->sband;
- struct sk_buff *skb = txrc->skb;
- u8 low = IWL_RATE_INVALID;
- u8 high = IWL_RATE_INVALID;
- u16 high_low;
- int index;
- struct iwl3945_rs_sta *rs_sta = priv_sta;
- struct iwl3945_rate_scale_data *window = NULL;
- int current_tpt = IWL_INVALID_VALUE;
- int low_tpt = IWL_INVALID_VALUE;
- int high_tpt = IWL_INVALID_VALUE;
- u32 fail_count;
- s8 scale_action = 0;
- unsigned long flags;
- u16 rate_mask;
- s8 max_rate_idx = -1;
- struct iwl_priv *priv __maybe_unused = (struct iwl_priv *)priv_r;
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-
- IWL_DEBUG_RATE(priv, "enter\n");
-
- /* Treat uninitialized rate scaling data same as non-existing. */
- if (rs_sta && !rs_sta->priv) {
- IWL_DEBUG_RATE(priv, "Rate scaling information not initialized yet.\n");
- priv_sta = NULL;
- }
-
- if (rate_control_send_low(sta, priv_sta, txrc))
- return;
-
- rate_mask = sta->supp_rates[sband->band];
-
- /* get user max rate if set */
- max_rate_idx = txrc->max_rate_idx;
- if ((sband->band == IEEE80211_BAND_5GHZ) && (max_rate_idx != -1))
- max_rate_idx += IWL_FIRST_OFDM_RATE;
- if ((max_rate_idx < 0) || (max_rate_idx >= IWL_RATE_COUNT))
- max_rate_idx = -1;
-
- index = min(rs_sta->last_txrate_idx & 0xffff, IWL_RATE_COUNT_3945 - 1);
-
- if (sband->band == IEEE80211_BAND_5GHZ)
- rate_mask = rate_mask << IWL_FIRST_OFDM_RATE;
-
- spin_lock_irqsave(&rs_sta->lock, flags);
-
- /* for recent assoc, choose best rate regarding
- * to rssi value
- */
- if (rs_sta->start_rate != IWL_RATE_INVALID) {
- if (rs_sta->start_rate < index &&
- (rate_mask & (1 << rs_sta->start_rate)))
- index = rs_sta->start_rate;
- rs_sta->start_rate = IWL_RATE_INVALID;
- }
-
- /* force user max rate if set by user */
- if ((max_rate_idx != -1) && (max_rate_idx < index)) {
- if (rate_mask & (1 << max_rate_idx))
- index = max_rate_idx;
- }
-
- window = &(rs_sta->win[index]);
-
- fail_count = window->counter - window->success_counter;
-
- if (((fail_count < IWL_RATE_MIN_FAILURE_TH) &&
- (window->success_counter < IWL_RATE_MIN_SUCCESS_TH))) {
- spin_unlock_irqrestore(&rs_sta->lock, flags);
-
- IWL_DEBUG_RATE(priv, "Invalid average_tpt on rate %d: "
- "counter: %d, success_counter: %d, "
- "expected_tpt is %sNULL\n",
- index,
- window->counter,
- window->success_counter,
- rs_sta->expected_tpt ? "not " : "");
-
- /* Can't calculate this yet; not enough history */
- window->average_tpt = IWL_INVALID_VALUE;
- goto out;
-
- }
-
- current_tpt = window->average_tpt;
-
- high_low = iwl3945_get_adjacent_rate(rs_sta, index, rate_mask,
- sband->band);
- low = high_low & 0xff;
- high = (high_low >> 8) & 0xff;
-
- /* If user set max rate, dont allow higher than user constrain */
- if ((max_rate_idx != -1) && (max_rate_idx < high))
- high = IWL_RATE_INVALID;
-
- /* Collect Measured throughputs of adjacent rates */
- if (low != IWL_RATE_INVALID)
- low_tpt = rs_sta->win[low].average_tpt;
-
- if (high != IWL_RATE_INVALID)
- high_tpt = rs_sta->win[high].average_tpt;
-
- spin_unlock_irqrestore(&rs_sta->lock, flags);
-
- scale_action = 0;
-
- /* Low success ratio , need to drop the rate */
- if ((window->success_ratio < IWL_RATE_DECREASE_TH) || !current_tpt) {
- IWL_DEBUG_RATE(priv, "decrease rate because of low success_ratio\n");
- scale_action = -1;
- /* No throughput measured yet for adjacent rates,
- * try increase */
- } else if ((low_tpt == IWL_INVALID_VALUE) &&
- (high_tpt == IWL_INVALID_VALUE)) {
-
- if (high != IWL_RATE_INVALID && window->success_ratio >= IWL_RATE_INCREASE_TH)
- scale_action = 1;
- else if (low != IWL_RATE_INVALID)
- scale_action = 0;
-
- /* Both adjacent throughputs are measured, but neither one has
- * better throughput; we're using the best rate, don't change
- * it! */
- } else if ((low_tpt != IWL_INVALID_VALUE) &&
- (high_tpt != IWL_INVALID_VALUE) &&
- (low_tpt < current_tpt) && (high_tpt < current_tpt)) {
-
- IWL_DEBUG_RATE(priv, "No action -- low [%d] & high [%d] < "
- "current_tpt [%d]\n",
- low_tpt, high_tpt, current_tpt);
- scale_action = 0;
-
- /* At least one of the rates has better throughput */
- } else {
- if (high_tpt != IWL_INVALID_VALUE) {
-
- /* High rate has better throughput, Increase
- * rate */
- if (high_tpt > current_tpt &&
- window->success_ratio >= IWL_RATE_INCREASE_TH)
- scale_action = 1;
- else {
- IWL_DEBUG_RATE(priv,
- "decrease rate because of high tpt\n");
- scale_action = 0;
- }
- } else if (low_tpt != IWL_INVALID_VALUE) {
- if (low_tpt > current_tpt) {
- IWL_DEBUG_RATE(priv,
- "decrease rate because of low tpt\n");
- scale_action = -1;
- } else if (window->success_ratio >= IWL_RATE_INCREASE_TH) {
- /* Lower rate has better
- * throughput,decrease rate */
- scale_action = 1;
- }
- }
- }
-
- /* Sanity check; asked for decrease, but success rate or throughput
- * has been good at old rate. Don't change it. */
- if ((scale_action == -1) && (low != IWL_RATE_INVALID) &&
- ((window->success_ratio > IWL_RATE_HIGH_TH) ||
- (current_tpt > (100 * rs_sta->expected_tpt[low]))))
- scale_action = 0;
-
- switch (scale_action) {
- case -1:
-
- /* Decrese rate */
- if (low != IWL_RATE_INVALID)
- index = low;
- break;
-
- case 1:
- /* Increase rate */
- if (high != IWL_RATE_INVALID)
- index = high;
-
- break;
-
- case 0:
- default:
- /* No change */
- break;
- }
-
- IWL_DEBUG_RATE(priv, "Selected %d (action %d) - low %d high %d\n",
- index, scale_action, low, high);
-
- out:
-
- if (sband->band == IEEE80211_BAND_5GHZ) {
- if (WARN_ON_ONCE(index < IWL_FIRST_OFDM_RATE))
- index = IWL_FIRST_OFDM_RATE;
- rs_sta->last_txrate_idx = index;
- info->control.rates[0].idx = index - IWL_FIRST_OFDM_RATE;
- } else {
- rs_sta->last_txrate_idx = index;
- info->control.rates[0].idx = rs_sta->last_txrate_idx;
- }
-
- IWL_DEBUG_RATE(priv, "leave: %d\n", index);
-}
-
-#ifdef CONFIG_MAC80211_DEBUGFS
-static int iwl3945_open_file_generic(struct inode *inode, struct file *file)
-{
- file->private_data = inode->i_private;
- return 0;
-}
-
-static ssize_t iwl3945_sta_dbgfs_stats_table_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- char *buff;
- int desc = 0;
- int j;
- ssize_t ret;
- struct iwl3945_rs_sta *lq_sta = file->private_data;
-
- buff = kmalloc(1024, GFP_KERNEL);
- if (!buff)
- return -ENOMEM;
-
- desc += sprintf(buff + desc, "tx packets=%d last rate index=%d\n"
- "rate=0x%X flush time %d\n",
- lq_sta->tx_packets,
- lq_sta->last_txrate_idx,
- lq_sta->start_rate, jiffies_to_msecs(lq_sta->flush_time));
- for (j = 0; j < IWL_RATE_COUNT_3945; j++) {
- desc += sprintf(buff+desc,
- "counter=%d success=%d %%=%d\n",
- lq_sta->win[j].counter,
- lq_sta->win[j].success_counter,
- lq_sta->win[j].success_ratio);
- }
- ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
- kfree(buff);
- return ret;
-}
-
-static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
- .read = iwl3945_sta_dbgfs_stats_table_read,
- .open = iwl3945_open_file_generic,
- .llseek = default_llseek,
-};
-
-static void iwl3945_add_debugfs(void *priv, void *priv_sta,
- struct dentry *dir)
-{
- struct iwl3945_rs_sta *lq_sta = priv_sta;
-
- lq_sta->rs_sta_dbgfs_stats_table_file =
- debugfs_create_file("rate_stats_table", 0600, dir,
- lq_sta, &rs_sta_dbgfs_stats_table_ops);
-
-}
-
-static void iwl3945_remove_debugfs(void *priv, void *priv_sta)
-{
- struct iwl3945_rs_sta *lq_sta = priv_sta;
- debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
-}
-#endif
-
-/*
- * Initialization of rate scaling information is done by driver after
- * the station is added. Since mac80211 calls this function before a
- * station is added we ignore it.
- */
-static void iwl3945_rs_rate_init_stub(void *priv_r,
- struct ieee80211_supported_band *sband,
- struct ieee80211_sta *sta, void *priv_sta)
-{
-}
-
-static struct rate_control_ops rs_ops = {
- .module = NULL,
- .name = RS_NAME,
- .tx_status = iwl3945_rs_tx_status,
- .get_rate = iwl3945_rs_get_rate,
- .rate_init = iwl3945_rs_rate_init_stub,
- .alloc = iwl3945_rs_alloc,
- .free = iwl3945_rs_free,
- .alloc_sta = iwl3945_rs_alloc_sta,
- .free_sta = iwl3945_rs_free_sta,
-#ifdef CONFIG_MAC80211_DEBUGFS
- .add_sta_debugfs = iwl3945_add_debugfs,
- .remove_sta_debugfs = iwl3945_remove_debugfs,
-#endif
-
-};
-void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id)
-{
- struct iwl_priv *priv = hw->priv;
- s32 rssi = 0;
- unsigned long flags;
- struct iwl3945_rs_sta *rs_sta;
- struct ieee80211_sta *sta;
- struct iwl3945_sta_priv *psta;
-
- IWL_DEBUG_RATE(priv, "enter\n");
-
- rcu_read_lock();
-
- sta = ieee80211_find_sta(priv->contexts[IWL_RXON_CTX_BSS].vif,
- priv->stations[sta_id].sta.sta.addr);
- if (!sta) {
- IWL_DEBUG_RATE(priv, "Unable to find station to initialize rate scaling.\n");
- rcu_read_unlock();
- return;
- }
-
- psta = (void *) sta->drv_priv;
- rs_sta = &psta->rs_sta;
-
- spin_lock_irqsave(&rs_sta->lock, flags);
-
- rs_sta->tgg = 0;
- switch (priv->band) {
- case IEEE80211_BAND_2GHZ:
- /* TODO: this always does G, not a regression */
- if (priv->contexts[IWL_RXON_CTX_BSS].active.flags &
- RXON_FLG_TGG_PROTECT_MSK) {
- rs_sta->tgg = 1;
- rs_sta->expected_tpt = iwl3945_expected_tpt_g_prot;
- } else
- rs_sta->expected_tpt = iwl3945_expected_tpt_g;
- break;
-
- case IEEE80211_BAND_5GHZ:
- rs_sta->expected_tpt = iwl3945_expected_tpt_a;
- break;
- case IEEE80211_NUM_BANDS:
- BUG();
- break;
- }
-
- spin_unlock_irqrestore(&rs_sta->lock, flags);
-
- rssi = priv->_3945.last_rx_rssi;
- if (rssi == 0)
- rssi = IWL_MIN_RSSI_VAL;
-
- IWL_DEBUG_RATE(priv, "Network RSSI: %d\n", rssi);
-
- rs_sta->start_rate = iwl3945_get_rate_index_by_rssi(rssi, priv->band);
-
- IWL_DEBUG_RATE(priv, "leave: rssi %d assign rate index: "
- "%d (plcp 0x%x)\n", rssi, rs_sta->start_rate,
- iwl3945_rates[rs_sta->start_rate].plcp);
- rcu_read_unlock();
-}
-
-int iwl3945_rate_control_register(void)
-{
- return ieee80211_rate_control_register(&rs_ops);
-}
-
-void iwl3945_rate_control_unregister(void)
-{
- ieee80211_rate_control_unregister(&rs_ops);
-}
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945.c b/drivers/net/wireless/iwlegacy/iwl-3945.c
deleted file mode 100644
index f7c0a743847..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-3945.c
+++ /dev/null
@@ -1,2741 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <linux/delay.h>
-#include <linux/sched.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/firmware.h>
-#include <linux/etherdevice.h>
-#include <asm/unaligned.h>
-#include <net/mac80211.h>
-
-#include "iwl-fh.h"
-#include "iwl-3945-fh.h"
-#include "iwl-commands.h"
-#include "iwl-sta.h"
-#include "iwl-3945.h"
-#include "iwl-eeprom.h"
-#include "iwl-core.h"
-#include "iwl-helpers.h"
-#include "iwl-led.h"
-#include "iwl-3945-led.h"
-#include "iwl-3945-debugfs.h"
-
-#define IWL_DECLARE_RATE_INFO(r, ip, in, rp, rn, pp, np) \
- [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \
- IWL_RATE_##r##M_IEEE, \
- IWL_RATE_##ip##M_INDEX, \
- IWL_RATE_##in##M_INDEX, \
- IWL_RATE_##rp##M_INDEX, \
- IWL_RATE_##rn##M_INDEX, \
- IWL_RATE_##pp##M_INDEX, \
- IWL_RATE_##np##M_INDEX, \
- IWL_RATE_##r##M_INDEX_TABLE, \
- IWL_RATE_##ip##M_INDEX_TABLE }
-
-/*
- * Parameter order:
- * rate, prev rate, next rate, prev tgg rate, next tgg rate
- *
- * If there isn't a valid next or previous rate then INV is used which
- * maps to IWL_RATE_INVALID
- *
- */
-const struct iwl3945_rate_info iwl3945_rates[IWL_RATE_COUNT_3945] = {
- IWL_DECLARE_RATE_INFO(1, INV, 2, INV, 2, INV, 2), /* 1mbps */
- IWL_DECLARE_RATE_INFO(2, 1, 5, 1, 5, 1, 5), /* 2mbps */
- IWL_DECLARE_RATE_INFO(5, 2, 6, 2, 11, 2, 11), /*5.5mbps */
- IWL_DECLARE_RATE_INFO(11, 9, 12, 5, 12, 5, 18), /* 11mbps */
- IWL_DECLARE_RATE_INFO(6, 5, 9, 5, 11, 5, 11), /* 6mbps */
- IWL_DECLARE_RATE_INFO(9, 6, 11, 5, 11, 5, 11), /* 9mbps */
- IWL_DECLARE_RATE_INFO(12, 11, 18, 11, 18, 11, 18), /* 12mbps */
- IWL_DECLARE_RATE_INFO(18, 12, 24, 12, 24, 11, 24), /* 18mbps */
- IWL_DECLARE_RATE_INFO(24, 18, 36, 18, 36, 18, 36), /* 24mbps */
- IWL_DECLARE_RATE_INFO(36, 24, 48, 24, 48, 24, 48), /* 36mbps */
- IWL_DECLARE_RATE_INFO(48, 36, 54, 36, 54, 36, 54), /* 48mbps */
- IWL_DECLARE_RATE_INFO(54, 48, INV, 48, INV, 48, INV),/* 54mbps */
-};
-
-static inline u8 iwl3945_get_prev_ieee_rate(u8 rate_index)
-{
- u8 rate = iwl3945_rates[rate_index].prev_ieee;
-
- if (rate == IWL_RATE_INVALID)
- rate = rate_index;
- return rate;
-}
-
-/* 1 = enable the iwl3945_disable_events() function */
-#define IWL_EVT_DISABLE (0)
-#define IWL_EVT_DISABLE_SIZE (1532/32)
-
-/**
- * iwl3945_disable_events - Disable selected events in uCode event log
- *
- * Disable an event by writing "1"s into "disable"
- * bitmap in SRAM. Bit position corresponds to Event # (id/type).
- * Default values of 0 enable uCode events to be logged.
- * Use for only special debugging. This function is just a placeholder as-is,
- * you'll need to provide the special bits! ...
- * ... and set IWL_EVT_DISABLE to 1. */
-void iwl3945_disable_events(struct iwl_priv *priv)
-{
- int i;
- u32 base; /* SRAM address of event log header */
- u32 disable_ptr; /* SRAM address of event-disable bitmap array */
- u32 array_size; /* # of u32 entries in array */
- static const u32 evt_disable[IWL_EVT_DISABLE_SIZE] = {
- 0x00000000, /* 31 - 0 Event id numbers */
- 0x00000000, /* 63 - 32 */
- 0x00000000, /* 95 - 64 */
- 0x00000000, /* 127 - 96 */
- 0x00000000, /* 159 - 128 */
- 0x00000000, /* 191 - 160 */
- 0x00000000, /* 223 - 192 */
- 0x00000000, /* 255 - 224 */
- 0x00000000, /* 287 - 256 */
- 0x00000000, /* 319 - 288 */
- 0x00000000, /* 351 - 320 */
- 0x00000000, /* 383 - 352 */
- 0x00000000, /* 415 - 384 */
- 0x00000000, /* 447 - 416 */
- 0x00000000, /* 479 - 448 */
- 0x00000000, /* 511 - 480 */
- 0x00000000, /* 543 - 512 */
- 0x00000000, /* 575 - 544 */
- 0x00000000, /* 607 - 576 */
- 0x00000000, /* 639 - 608 */
- 0x00000000, /* 671 - 640 */
- 0x00000000, /* 703 - 672 */
- 0x00000000, /* 735 - 704 */
- 0x00000000, /* 767 - 736 */
- 0x00000000, /* 799 - 768 */
- 0x00000000, /* 831 - 800 */
- 0x00000000, /* 863 - 832 */
- 0x00000000, /* 895 - 864 */
- 0x00000000, /* 927 - 896 */
- 0x00000000, /* 959 - 928 */
- 0x00000000, /* 991 - 960 */
- 0x00000000, /* 1023 - 992 */
- 0x00000000, /* 1055 - 1024 */
- 0x00000000, /* 1087 - 1056 */
- 0x00000000, /* 1119 - 1088 */
- 0x00000000, /* 1151 - 1120 */
- 0x00000000, /* 1183 - 1152 */
- 0x00000000, /* 1215 - 1184 */
- 0x00000000, /* 1247 - 1216 */
- 0x00000000, /* 1279 - 1248 */
- 0x00000000, /* 1311 - 1280 */
- 0x00000000, /* 1343 - 1312 */
- 0x00000000, /* 1375 - 1344 */
- 0x00000000, /* 1407 - 1376 */
- 0x00000000, /* 1439 - 1408 */
- 0x00000000, /* 1471 - 1440 */
- 0x00000000, /* 1503 - 1472 */
- };
-
- base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
- if (!iwl3945_hw_valid_rtc_data_addr(base)) {
- IWL_ERR(priv, "Invalid event log pointer 0x%08X\n", base);
- return;
- }
-
- disable_ptr = iwl_legacy_read_targ_mem(priv, base + (4 * sizeof(u32)));
- array_size = iwl_legacy_read_targ_mem(priv, base + (5 * sizeof(u32)));
-
- if (IWL_EVT_DISABLE && (array_size == IWL_EVT_DISABLE_SIZE)) {
- IWL_DEBUG_INFO(priv, "Disabling selected uCode log events at 0x%x\n",
- disable_ptr);
- for (i = 0; i < IWL_EVT_DISABLE_SIZE; i++)
- iwl_legacy_write_targ_mem(priv,
- disable_ptr + (i * sizeof(u32)),
- evt_disable[i]);
-
- } else {
- IWL_DEBUG_INFO(priv, "Selected uCode log events may be disabled\n");
- IWL_DEBUG_INFO(priv, " by writing \"1\"s into disable bitmap\n");
- IWL_DEBUG_INFO(priv, " in SRAM at 0x%x, size %d u32s\n",
- disable_ptr, array_size);
- }
-
-}
-
-static int iwl3945_hwrate_to_plcp_idx(u8 plcp)
-{
- int idx;
-
- for (idx = 0; idx < IWL_RATE_COUNT_3945; idx++)
- if (iwl3945_rates[idx].plcp == plcp)
- return idx;
- return -1;
-}
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-#define TX_STATUS_ENTRY(x) case TX_3945_STATUS_FAIL_ ## x: return #x
-
-static const char *iwl3945_get_tx_fail_reason(u32 status)
-{
- switch (status & TX_STATUS_MSK) {
- case TX_3945_STATUS_SUCCESS:
- return "SUCCESS";
- TX_STATUS_ENTRY(SHORT_LIMIT);
- TX_STATUS_ENTRY(LONG_LIMIT);
- TX_STATUS_ENTRY(FIFO_UNDERRUN);
- TX_STATUS_ENTRY(MGMNT_ABORT);
- TX_STATUS_ENTRY(NEXT_FRAG);
- TX_STATUS_ENTRY(LIFE_EXPIRE);
- TX_STATUS_ENTRY(DEST_PS);
- TX_STATUS_ENTRY(ABORTED);
- TX_STATUS_ENTRY(BT_RETRY);
- TX_STATUS_ENTRY(STA_INVALID);
- TX_STATUS_ENTRY(FRAG_DROPPED);
- TX_STATUS_ENTRY(TID_DISABLE);
- TX_STATUS_ENTRY(FRAME_FLUSHED);
- TX_STATUS_ENTRY(INSUFFICIENT_CF_POLL);
- TX_STATUS_ENTRY(TX_LOCKED);
- TX_STATUS_ENTRY(NO_BEACON_ON_RADAR);
- }
-
- return "UNKNOWN";
-}
-#else
-static inline const char *iwl3945_get_tx_fail_reason(u32 status)
-{
- return "";
-}
-#endif
-
-/*
- * get ieee prev rate from rate scale table.
- * for A and B mode we need to overright prev
- * value
- */
-int iwl3945_rs_next_rate(struct iwl_priv *priv, int rate)
-{
- int next_rate = iwl3945_get_prev_ieee_rate(rate);
-
- switch (priv->band) {
- case IEEE80211_BAND_5GHZ:
- if (rate == IWL_RATE_12M_INDEX)
- next_rate = IWL_RATE_9M_INDEX;
- else if (rate == IWL_RATE_6M_INDEX)
- next_rate = IWL_RATE_6M_INDEX;
- break;
- case IEEE80211_BAND_2GHZ:
- if (!(priv->_3945.sta_supp_rates & IWL_OFDM_RATES_MASK) &&
- iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) {
- if (rate == IWL_RATE_11M_INDEX)
- next_rate = IWL_RATE_5M_INDEX;
- }
- break;
-
- default:
- break;
- }
-
- return next_rate;
-}
-
-
-/**
- * iwl3945_tx_queue_reclaim - Reclaim Tx queue entries already Tx'd
- *
- * When FW advances 'R' index, all entries between old and new 'R' index
- * need to be reclaimed. As result, some free space forms. If there is
- * enough free space (> low mark), wake the stack that feeds us.
- */
-static void iwl3945_tx_queue_reclaim(struct iwl_priv *priv,
- int txq_id, int index)
-{
- struct iwl_tx_queue *txq = &priv->txq[txq_id];
- struct iwl_queue *q = &txq->q;
- struct iwl_tx_info *tx_info;
-
- BUG_ON(txq_id == IWL39_CMD_QUEUE_NUM);
-
- for (index = iwl_legacy_queue_inc_wrap(index, q->n_bd);
- q->read_ptr != index;
- q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd)) {
-
- tx_info = &txq->txb[txq->q.read_ptr];
- ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb);
- tx_info->skb = NULL;
- priv->cfg->ops->lib->txq_free_tfd(priv, txq);
- }
-
- if (iwl_legacy_queue_space(q) > q->low_mark && (txq_id >= 0) &&
- (txq_id != IWL39_CMD_QUEUE_NUM) &&
- priv->mac80211_registered)
- iwl_legacy_wake_queue(priv, txq);
-}
-
-/**
- * iwl3945_rx_reply_tx - Handle Tx response
- */
-static void iwl3945_rx_reply_tx(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- u16 sequence = le16_to_cpu(pkt->hdr.sequence);
- int txq_id = SEQ_TO_QUEUE(sequence);
- int index = SEQ_TO_INDEX(sequence);
- struct iwl_tx_queue *txq = &priv->txq[txq_id];
- struct ieee80211_tx_info *info;
- struct iwl3945_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
- u32 status = le32_to_cpu(tx_resp->status);
- int rate_idx;
- int fail;
-
- if ((index >= txq->q.n_bd) || (iwl_legacy_queue_used(&txq->q, index) == 0)) {
- IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d "
- "is out of range [0-%d] %d %d\n", txq_id,
- index, txq->q.n_bd, txq->q.write_ptr,
- txq->q.read_ptr);
- return;
- }
-
- txq->time_stamp = jiffies;
- info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb);
- ieee80211_tx_info_clear_status(info);
-
- /* Fill the MRR chain with some info about on-chip retransmissions */
- rate_idx = iwl3945_hwrate_to_plcp_idx(tx_resp->rate);
- if (info->band == IEEE80211_BAND_5GHZ)
- rate_idx -= IWL_FIRST_OFDM_RATE;
-
- fail = tx_resp->failure_frame;
-
- info->status.rates[0].idx = rate_idx;
- info->status.rates[0].count = fail + 1; /* add final attempt */
-
- /* tx_status->rts_retry_count = tx_resp->failure_rts; */
- info->flags |= ((status & TX_STATUS_MSK) == TX_STATUS_SUCCESS) ?
- IEEE80211_TX_STAT_ACK : 0;
-
- IWL_DEBUG_TX(priv, "Tx queue %d Status %s (0x%08x) plcp rate %d retries %d\n",
- txq_id, iwl3945_get_tx_fail_reason(status), status,
- tx_resp->rate, tx_resp->failure_frame);
-
- IWL_DEBUG_TX_REPLY(priv, "Tx queue reclaim %d\n", index);
- iwl3945_tx_queue_reclaim(priv, txq_id, index);
-
- if (status & TX_ABORT_REQUIRED_MSK)
- IWL_ERR(priv, "TODO: Implement Tx ABORT REQUIRED!!!\n");
-}
-
-
-
-/*****************************************************************************
- *
- * Intel PRO/Wireless 3945ABG/BG Network Connection
- *
- * RX handler implementations
- *
- *****************************************************************************/
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
-static void iwl3945_accumulative_statistics(struct iwl_priv *priv,
- __le32 *stats)
-{
- int i;
- __le32 *prev_stats;
- u32 *accum_stats;
- u32 *delta, *max_delta;
-
- prev_stats = (__le32 *)&priv->_3945.statistics;
- accum_stats = (u32 *)&priv->_3945.accum_statistics;
- delta = (u32 *)&priv->_3945.delta_statistics;
- max_delta = (u32 *)&priv->_3945.max_delta;
-
- for (i = sizeof(__le32); i < sizeof(struct iwl3945_notif_statistics);
- i += sizeof(__le32), stats++, prev_stats++, delta++,
- max_delta++, accum_stats++) {
- if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
- *delta = (le32_to_cpu(*stats) -
- le32_to_cpu(*prev_stats));
- *accum_stats += *delta;
- if (*delta > *max_delta)
- *max_delta = *delta;
- }
- }
-
- /* reset accumulative statistics for "no-counter" type statistics */
- priv->_3945.accum_statistics.general.temperature =
- priv->_3945.statistics.general.temperature;
- priv->_3945.accum_statistics.general.ttl_timestamp =
- priv->_3945.statistics.general.ttl_timestamp;
-}
-#endif
-
-void iwl3945_hw_rx_statistics(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
-
- IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n",
- (int)sizeof(struct iwl3945_notif_statistics),
- le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK);
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
- iwl3945_accumulative_statistics(priv, (__le32 *)&pkt->u.raw);
-#endif
-
- memcpy(&priv->_3945.statistics, pkt->u.raw, sizeof(priv->_3945.statistics));
-}
-
-void iwl3945_reply_statistics(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- __le32 *flag = (__le32 *)&pkt->u.raw;
-
- if (le32_to_cpu(*flag) & UCODE_STATISTICS_CLEAR_MSK) {
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
- memset(&priv->_3945.accum_statistics, 0,
- sizeof(struct iwl3945_notif_statistics));
- memset(&priv->_3945.delta_statistics, 0,
- sizeof(struct iwl3945_notif_statistics));
- memset(&priv->_3945.max_delta, 0,
- sizeof(struct iwl3945_notif_statistics));
-#endif
- IWL_DEBUG_RX(priv, "Statistics have been cleared\n");
- }
- iwl3945_hw_rx_statistics(priv, rxb);
-}
-
-
-/******************************************************************************
- *
- * Misc. internal state and helper functions
- *
- ******************************************************************************/
-
-/* This is necessary only for a number of statistics, see the caller. */
-static int iwl3945_is_network_packet(struct iwl_priv *priv,
- struct ieee80211_hdr *header)
-{
- /* Filter incoming packets to determine if they are targeted toward
- * this network, discarding packets coming from ourselves */
- switch (priv->iw_mode) {
- case NL80211_IFTYPE_ADHOC: /* Header: Dest. | Source | BSSID */
- /* packets to our IBSS update information */
- return !compare_ether_addr(header->addr3, priv->bssid);
- case NL80211_IFTYPE_STATION: /* Header: Dest. | AP{BSSID} | Source */
- /* packets to our IBSS update information */
- return !compare_ether_addr(header->addr2, priv->bssid);
- default:
- return 1;
- }
-}
-
-static void iwl3945_pass_packet_to_mac80211(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb,
- struct ieee80211_rx_status *stats)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)IWL_RX_DATA(pkt);
- struct iwl3945_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt);
- struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt);
- u16 len = le16_to_cpu(rx_hdr->len);
- struct sk_buff *skb;
- __le16 fc = hdr->frame_control;
-
- /* We received data from the HW, so stop the watchdog */
- if (unlikely(len + IWL39_RX_FRAME_SIZE >
- PAGE_SIZE << priv->hw_params.rx_page_order)) {
- IWL_DEBUG_DROP(priv, "Corruption detected!\n");
- return;
- }
-
- /* We only process data packets if the interface is open */
- if (unlikely(!priv->is_open)) {
- IWL_DEBUG_DROP_LIMIT(priv,
- "Dropping packet while interface is not open.\n");
- return;
- }
-
- skb = dev_alloc_skb(128);
- if (!skb) {
- IWL_ERR(priv, "dev_alloc_skb failed\n");
- return;
- }
-
- if (!iwl3945_mod_params.sw_crypto)
- iwl_legacy_set_decrypted_flag(priv,
- (struct ieee80211_hdr *)rxb_addr(rxb),
- le32_to_cpu(rx_end->status), stats);
-
- skb_add_rx_frag(skb, 0, rxb->page,
- (void *)rx_hdr->payload - (void *)pkt, len);
-
- iwl_legacy_update_stats(priv, false, fc, len);
- memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
-
- ieee80211_rx(priv->hw, skb);
- priv->alloc_rxb_page--;
- rxb->page = NULL;
-}
-
-#define IWL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6)
-
-static void iwl3945_rx_reply_rx(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct ieee80211_hdr *header;
- struct ieee80211_rx_status rx_status;
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl3945_rx_frame_stats *rx_stats = IWL_RX_STATS(pkt);
- struct iwl3945_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt);
- struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt);
- u16 rx_stats_sig_avg __maybe_unused = le16_to_cpu(rx_stats->sig_avg);
- u16 rx_stats_noise_diff __maybe_unused = le16_to_cpu(rx_stats->noise_diff);
- u8 network_packet;
-
- rx_status.flag = 0;
- rx_status.mactime = le64_to_cpu(rx_end->timestamp);
- rx_status.band = (rx_hdr->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
- IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
- rx_status.freq =
- ieee80211_channel_to_frequency(le16_to_cpu(rx_hdr->channel),
- rx_status.band);
-
- rx_status.rate_idx = iwl3945_hwrate_to_plcp_idx(rx_hdr->rate);
- if (rx_status.band == IEEE80211_BAND_5GHZ)
- rx_status.rate_idx -= IWL_FIRST_OFDM_RATE;
-
- rx_status.antenna = (le16_to_cpu(rx_hdr->phy_flags) &
- RX_RES_PHY_FLAGS_ANTENNA_MSK) >> 4;
-
- /* set the preamble flag if appropriate */
- if (rx_hdr->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
- rx_status.flag |= RX_FLAG_SHORTPRE;
-
- if ((unlikely(rx_stats->phy_count > 20))) {
- IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n",
- rx_stats->phy_count);
- return;
- }
-
- if (!(rx_end->status & RX_RES_STATUS_NO_CRC32_ERROR)
- || !(rx_end->status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
- IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n", rx_end->status);
- return;
- }
-
-
-
- /* Convert 3945's rssi indicator to dBm */
- rx_status.signal = rx_stats->rssi - IWL39_RSSI_OFFSET;
-
- IWL_DEBUG_STATS(priv, "Rssi %d sig_avg %d noise_diff %d\n",
- rx_status.signal, rx_stats_sig_avg,
- rx_stats_noise_diff);
-
- header = (struct ieee80211_hdr *)IWL_RX_DATA(pkt);
-
- network_packet = iwl3945_is_network_packet(priv, header);
-
- IWL_DEBUG_STATS_LIMIT(priv, "[%c] %d RSSI:%d Signal:%u, Rate:%u\n",
- network_packet ? '*' : ' ',
- le16_to_cpu(rx_hdr->channel),
- rx_status.signal, rx_status.signal,
- rx_status.rate_idx);
-
- iwl_legacy_dbg_log_rx_data_frame(priv, le16_to_cpu(rx_hdr->len),
- header);
-
- if (network_packet) {
- priv->_3945.last_beacon_time =
- le32_to_cpu(rx_end->beacon_timestamp);
- priv->_3945.last_tsf = le64_to_cpu(rx_end->timestamp);
- priv->_3945.last_rx_rssi = rx_status.signal;
- }
-
- iwl3945_pass_packet_to_mac80211(priv, rxb, &rx_status);
-}
-
-int iwl3945_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
- struct iwl_tx_queue *txq,
- dma_addr_t addr, u16 len, u8 reset, u8 pad)
-{
- int count;
- struct iwl_queue *q;
- struct iwl3945_tfd *tfd, *tfd_tmp;
-
- q = &txq->q;
- tfd_tmp = (struct iwl3945_tfd *)txq->tfds;
- tfd = &tfd_tmp[q->write_ptr];
-
- if (reset)
- memset(tfd, 0, sizeof(*tfd));
-
- count = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags));
-
- if ((count >= NUM_TFD_CHUNKS) || (count < 0)) {
- IWL_ERR(priv, "Error can not send more than %d chunks\n",
- NUM_TFD_CHUNKS);
- return -EINVAL;
- }
-
- tfd->tbs[count].addr = cpu_to_le32(addr);
- tfd->tbs[count].len = cpu_to_le32(len);
-
- count++;
-
- tfd->control_flags = cpu_to_le32(TFD_CTL_COUNT_SET(count) |
- TFD_CTL_PAD_SET(pad));
-
- return 0;
-}
-
-/**
- * iwl3945_hw_txq_free_tfd - Free one TFD, those at index [txq->q.read_ptr]
- *
- * Does NOT advance any indexes
- */
-void iwl3945_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
-{
- struct iwl3945_tfd *tfd_tmp = (struct iwl3945_tfd *)txq->tfds;
- int index = txq->q.read_ptr;
- struct iwl3945_tfd *tfd = &tfd_tmp[index];
- struct pci_dev *dev = priv->pci_dev;
- int i;
- int counter;
-
- /* sanity check */
- counter = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags));
- if (counter > NUM_TFD_CHUNKS) {
- IWL_ERR(priv, "Too many chunks: %i\n", counter);
- /* @todo issue fatal error, it is quite serious situation */
- return;
- }
-
- /* Unmap tx_cmd */
- if (counter)
- pci_unmap_single(dev,
- dma_unmap_addr(&txq->meta[index], mapping),
- dma_unmap_len(&txq->meta[index], len),
- PCI_DMA_TODEVICE);
-
- /* unmap chunks if any */
-
- for (i = 1; i < counter; i++)
- pci_unmap_single(dev, le32_to_cpu(tfd->tbs[i].addr),
- le32_to_cpu(tfd->tbs[i].len), PCI_DMA_TODEVICE);
-
- /* free SKB */
- if (txq->txb) {
- struct sk_buff *skb;
-
- skb = txq->txb[txq->q.read_ptr].skb;
-
- /* can be called from irqs-disabled context */
- if (skb) {
- dev_kfree_skb_any(skb);
- txq->txb[txq->q.read_ptr].skb = NULL;
- }
- }
-}
-
-/**
- * iwl3945_hw_build_tx_cmd_rate - Add rate portion to TX_CMD:
- *
-*/
-void iwl3945_hw_build_tx_cmd_rate(struct iwl_priv *priv,
- struct iwl_device_cmd *cmd,
- struct ieee80211_tx_info *info,
- struct ieee80211_hdr *hdr,
- int sta_id, int tx_id)
-{
- u16 hw_value = ieee80211_get_tx_rate(priv->hw, info)->hw_value;
- u16 rate_index = min(hw_value & 0xffff, IWL_RATE_COUNT_3945);
- u16 rate_mask;
- int rate;
- u8 rts_retry_limit;
- u8 data_retry_limit;
- __le32 tx_flags;
- __le16 fc = hdr->frame_control;
- struct iwl3945_tx_cmd *tx_cmd = (struct iwl3945_tx_cmd *)cmd->cmd.payload;
-
- rate = iwl3945_rates[rate_index].plcp;
- tx_flags = tx_cmd->tx_flags;
-
- /* We need to figure out how to get the sta->supp_rates while
- * in this running context */
- rate_mask = IWL_RATES_MASK_3945;
-
- /* Set retry limit on DATA packets and Probe Responses*/
- if (ieee80211_is_probe_resp(fc))
- data_retry_limit = 3;
- else
- data_retry_limit = IWL_DEFAULT_TX_RETRY;
- tx_cmd->data_retry_limit = data_retry_limit;
-
- if (tx_id >= IWL39_CMD_QUEUE_NUM)
- rts_retry_limit = 3;
- else
- rts_retry_limit = 7;
-
- if (data_retry_limit < rts_retry_limit)
- rts_retry_limit = data_retry_limit;
- tx_cmd->rts_retry_limit = rts_retry_limit;
-
- tx_cmd->rate = rate;
- tx_cmd->tx_flags = tx_flags;
-
- /* OFDM */
- tx_cmd->supp_rates[0] =
- ((rate_mask & IWL_OFDM_RATES_MASK) >> IWL_FIRST_OFDM_RATE) & 0xFF;
-
- /* CCK */
- tx_cmd->supp_rates[1] = (rate_mask & 0xF);
-
- IWL_DEBUG_RATE(priv, "Tx sta id: %d, rate: %d (plcp), flags: 0x%4X "
- "cck/ofdm mask: 0x%x/0x%x\n", sta_id,
- tx_cmd->rate, le32_to_cpu(tx_cmd->tx_flags),
- tx_cmd->supp_rates[1], tx_cmd->supp_rates[0]);
-}
-
-static u8 iwl3945_sync_sta(struct iwl_priv *priv, int sta_id, u16 tx_rate)
-{
- unsigned long flags_spin;
- struct iwl_station_entry *station;
-
- if (sta_id == IWL_INVALID_STATION)
- return IWL_INVALID_STATION;
-
- spin_lock_irqsave(&priv->sta_lock, flags_spin);
- station = &priv->stations[sta_id];
-
- station->sta.sta.modify_mask = STA_MODIFY_TX_RATE_MSK;
- station->sta.rate_n_flags = cpu_to_le16(tx_rate);
- station->sta.mode = STA_CONTROL_MODIFY_MSK;
- iwl_legacy_send_add_sta(priv, &station->sta, CMD_ASYNC);
- spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
-
- IWL_DEBUG_RATE(priv, "SCALE sync station %d to rate %d\n",
- sta_id, tx_rate);
- return sta_id;
-}
-
-static void iwl3945_set_pwr_vmain(struct iwl_priv *priv)
-{
-/*
- * (for documentation purposes)
- * to set power to V_AUX, do
-
- if (pci_pme_capable(priv->pci_dev, PCI_D3cold)) {
- iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
- APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
- ~APMG_PS_CTRL_MSK_PWR_SRC);
-
- iwl_poll_bit(priv, CSR_GPIO_IN,
- CSR_GPIO_IN_VAL_VAUX_PWR_SRC,
- CSR_GPIO_IN_BIT_AUX_POWER, 5000);
- }
- */
-
- iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
- APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
- ~APMG_PS_CTRL_MSK_PWR_SRC);
-
- iwl_poll_bit(priv, CSR_GPIO_IN, CSR_GPIO_IN_VAL_VMAIN_PWR_SRC,
- CSR_GPIO_IN_BIT_AUX_POWER, 5000); /* uS */
-}
-
-static int iwl3945_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
-{
- iwl_legacy_write_direct32(priv, FH39_RCSR_RBD_BASE(0), rxq->bd_dma);
- iwl_legacy_write_direct32(priv, FH39_RCSR_RPTR_ADDR(0),
- rxq->rb_stts_dma);
- iwl_legacy_write_direct32(priv, FH39_RCSR_WPTR(0), 0);
- iwl_legacy_write_direct32(priv, FH39_RCSR_CONFIG(0),
- FH39_RCSR_RX_CONFIG_REG_VAL_DMA_CHNL_EN_ENABLE |
- FH39_RCSR_RX_CONFIG_REG_VAL_RDRBD_EN_ENABLE |
- FH39_RCSR_RX_CONFIG_REG_BIT_WR_STTS_EN |
- FH39_RCSR_RX_CONFIG_REG_VAL_MAX_FRAG_SIZE_128 |
- (RX_QUEUE_SIZE_LOG << FH39_RCSR_RX_CONFIG_REG_POS_RBDC_SIZE) |
- FH39_RCSR_RX_CONFIG_REG_VAL_IRQ_DEST_INT_HOST |
- (1 << FH39_RCSR_RX_CONFIG_REG_POS_IRQ_RBTH) |
- FH39_RCSR_RX_CONFIG_REG_VAL_MSG_MODE_FH);
-
- /* fake read to flush all prev I/O */
- iwl_legacy_read_direct32(priv, FH39_RSSR_CTRL);
-
- return 0;
-}
-
-static int iwl3945_tx_reset(struct iwl_priv *priv)
-{
-
- /* bypass mode */
- iwl_legacy_write_prph(priv, ALM_SCD_MODE_REG, 0x2);
-
- /* RA 0 is active */
- iwl_legacy_write_prph(priv, ALM_SCD_ARASTAT_REG, 0x01);
-
- /* all 6 fifo are active */
- iwl_legacy_write_prph(priv, ALM_SCD_TXFACT_REG, 0x3f);
-
- iwl_legacy_write_prph(priv, ALM_SCD_SBYP_MODE_1_REG, 0x010000);
- iwl_legacy_write_prph(priv, ALM_SCD_SBYP_MODE_2_REG, 0x030002);
- iwl_legacy_write_prph(priv, ALM_SCD_TXF4MF_REG, 0x000004);
- iwl_legacy_write_prph(priv, ALM_SCD_TXF5MF_REG, 0x000005);
-
- iwl_legacy_write_direct32(priv, FH39_TSSR_CBB_BASE,
- priv->_3945.shared_phys);
-
- iwl_legacy_write_direct32(priv, FH39_TSSR_MSG_CONFIG,
- FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON |
- FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_TXPD_ON |
- FH39_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_128B |
- FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TFD_ON |
- FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_CBB_ON |
- FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RSP_WAIT_TH |
- FH39_TSSR_TX_MSG_CONFIG_REG_VAL_RSP_WAIT_TH);
-
-
- return 0;
-}
-
-/**
- * iwl3945_txq_ctx_reset - Reset TX queue context
- *
- * Destroys all DMA structures and initialize them again
- */
-static int iwl3945_txq_ctx_reset(struct iwl_priv *priv)
-{
- int rc;
- int txq_id, slots_num;
-
- iwl3945_hw_txq_ctx_free(priv);
-
- /* allocate tx queue structure */
- rc = iwl_legacy_alloc_txq_mem(priv);
- if (rc)
- return rc;
-
- /* Tx CMD queue */
- rc = iwl3945_tx_reset(priv);
- if (rc)
- goto error;
-
- /* Tx queue(s) */
- for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
- slots_num = (txq_id == IWL39_CMD_QUEUE_NUM) ?
- TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
- rc = iwl_legacy_tx_queue_init(priv, &priv->txq[txq_id],
- slots_num, txq_id);
- if (rc) {
- IWL_ERR(priv, "Tx %d queue init failed\n", txq_id);
- goto error;
- }
- }
-
- return rc;
-
- error:
- iwl3945_hw_txq_ctx_free(priv);
- return rc;
-}
-
-
-/*
- * Start up 3945's basic functionality after it has been reset
- * (e.g. after platform boot, or shutdown via iwl_legacy_apm_stop())
- * NOTE: This does not load uCode nor start the embedded processor
- */
-static int iwl3945_apm_init(struct iwl_priv *priv)
-{
- int ret = iwl_legacy_apm_init(priv);
-
- /* Clear APMG (NIC's internal power management) interrupts */
- iwl_legacy_write_prph(priv, APMG_RTC_INT_MSK_REG, 0x0);
- iwl_legacy_write_prph(priv, APMG_RTC_INT_STT_REG, 0xFFFFFFFF);
-
- /* Reset radio chip */
- iwl_legacy_set_bits_prph(priv, APMG_PS_CTRL_REG,
- APMG_PS_CTRL_VAL_RESET_REQ);
- udelay(5);
- iwl_legacy_clear_bits_prph(priv, APMG_PS_CTRL_REG,
- APMG_PS_CTRL_VAL_RESET_REQ);
-
- return ret;
-}
-
-static void iwl3945_nic_config(struct iwl_priv *priv)
-{
- struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
- unsigned long flags;
- u8 rev_id = priv->pci_dev->revision;
-
- spin_lock_irqsave(&priv->lock, flags);
-
- /* Determine HW type */
- IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", rev_id);
-
- if (rev_id & PCI_CFG_REV_ID_BIT_RTP)
- IWL_DEBUG_INFO(priv, "RTP type\n");
- else if (rev_id & PCI_CFG_REV_ID_BIT_BASIC_SKU) {
- IWL_DEBUG_INFO(priv, "3945 RADIO-MB type\n");
- iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
- CSR39_HW_IF_CONFIG_REG_BIT_3945_MB);
- } else {
- IWL_DEBUG_INFO(priv, "3945 RADIO-MM type\n");
- iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
- CSR39_HW_IF_CONFIG_REG_BIT_3945_MM);
- }
-
- if (EEPROM_SKU_CAP_OP_MODE_MRC == eeprom->sku_cap) {
- IWL_DEBUG_INFO(priv, "SKU OP mode is mrc\n");
- iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
- CSR39_HW_IF_CONFIG_REG_BIT_SKU_MRC);
- } else
- IWL_DEBUG_INFO(priv, "SKU OP mode is basic\n");
-
- if ((eeprom->board_revision & 0xF0) == 0xD0) {
- IWL_DEBUG_INFO(priv, "3945ABG revision is 0x%X\n",
- eeprom->board_revision);
- iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
- CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE);
- } else {
- IWL_DEBUG_INFO(priv, "3945ABG revision is 0x%X\n",
- eeprom->board_revision);
- iwl_legacy_clear_bit(priv, CSR_HW_IF_CONFIG_REG,
- CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE);
- }
-
- if (eeprom->almgor_m_version <= 1) {
- iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
- CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_A);
- IWL_DEBUG_INFO(priv, "Card M type A version is 0x%X\n",
- eeprom->almgor_m_version);
- } else {
- IWL_DEBUG_INFO(priv, "Card M type B version is 0x%X\n",
- eeprom->almgor_m_version);
- iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
- CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_B);
- }
- spin_unlock_irqrestore(&priv->lock, flags);
-
- if (eeprom->sku_cap & EEPROM_SKU_CAP_SW_RF_KILL_ENABLE)
- IWL_DEBUG_RF_KILL(priv, "SW RF KILL supported in EEPROM.\n");
-
- if (eeprom->sku_cap & EEPROM_SKU_CAP_HW_RF_KILL_ENABLE)
- IWL_DEBUG_RF_KILL(priv, "HW RF KILL supported in EEPROM.\n");
-}
-
-int iwl3945_hw_nic_init(struct iwl_priv *priv)
-{
- int rc;
- unsigned long flags;
- struct iwl_rx_queue *rxq = &priv->rxq;
-
- spin_lock_irqsave(&priv->lock, flags);
- priv->cfg->ops->lib->apm_ops.init(priv);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- iwl3945_set_pwr_vmain(priv);
-
- priv->cfg->ops->lib->apm_ops.config(priv);
-
- /* Allocate the RX queue, or reset if it is already allocated */
- if (!rxq->bd) {
- rc = iwl_legacy_rx_queue_alloc(priv);
- if (rc) {
- IWL_ERR(priv, "Unable to initialize Rx queue\n");
- return -ENOMEM;
- }
- } else
- iwl3945_rx_queue_reset(priv, rxq);
-
- iwl3945_rx_replenish(priv);
-
- iwl3945_rx_init(priv, rxq);
-
-
- /* Look at using this instead:
- rxq->need_update = 1;
- iwl_legacy_rx_queue_update_write_ptr(priv, rxq);
- */
-
- iwl_legacy_write_direct32(priv, FH39_RCSR_WPTR(0), rxq->write & ~7);
-
- rc = iwl3945_txq_ctx_reset(priv);
- if (rc)
- return rc;
-
- set_bit(STATUS_INIT, &priv->status);
-
- return 0;
-}
-
-/**
- * iwl3945_hw_txq_ctx_free - Free TXQ Context
- *
- * Destroy all TX DMA queues and structures
- */
-void iwl3945_hw_txq_ctx_free(struct iwl_priv *priv)
-{
- int txq_id;
-
- /* Tx queues */
- if (priv->txq)
- for (txq_id = 0; txq_id < priv->hw_params.max_txq_num;
- txq_id++)
- if (txq_id == IWL39_CMD_QUEUE_NUM)
- iwl_legacy_cmd_queue_free(priv);
- else
- iwl_legacy_tx_queue_free(priv, txq_id);
-
- /* free tx queue structure */
- iwl_legacy_txq_mem(priv);
-}
-
-void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv)
-{
- int txq_id;
-
- /* stop SCD */
- iwl_legacy_write_prph(priv, ALM_SCD_MODE_REG, 0);
- iwl_legacy_write_prph(priv, ALM_SCD_TXFACT_REG, 0);
-
- /* reset TFD queues */
- for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
- iwl_legacy_write_direct32(priv, FH39_TCSR_CONFIG(txq_id), 0x0);
- iwl_poll_direct_bit(priv, FH39_TSSR_TX_STATUS,
- FH39_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(txq_id),
- 1000);
- }
-
- iwl3945_hw_txq_ctx_free(priv);
-}
-
-/**
- * iwl3945_hw_reg_adjust_power_by_temp
- * return index delta into power gain settings table
-*/
-static int iwl3945_hw_reg_adjust_power_by_temp(int new_reading, int old_reading)
-{
- return (new_reading - old_reading) * (-11) / 100;
-}
-
-/**
- * iwl3945_hw_reg_temp_out_of_range - Keep temperature in sane range
- */
-static inline int iwl3945_hw_reg_temp_out_of_range(int temperature)
-{
- return ((temperature < -260) || (temperature > 25)) ? 1 : 0;
-}
-
-int iwl3945_hw_get_temperature(struct iwl_priv *priv)
-{
- return iwl_read32(priv, CSR_UCODE_DRV_GP2);
-}
-
-/**
- * iwl3945_hw_reg_txpower_get_temperature
- * get the current temperature by reading from NIC
-*/
-static int iwl3945_hw_reg_txpower_get_temperature(struct iwl_priv *priv)
-{
- struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
- int temperature;
-
- temperature = iwl3945_hw_get_temperature(priv);
-
- /* driver's okay range is -260 to +25.
- * human readable okay range is 0 to +285 */
- IWL_DEBUG_INFO(priv, "Temperature: %d\n", temperature + IWL_TEMP_CONVERT);
-
- /* handle insane temp reading */
- if (iwl3945_hw_reg_temp_out_of_range(temperature)) {
- IWL_ERR(priv, "Error bad temperature value %d\n", temperature);
-
- /* if really really hot(?),
- * substitute the 3rd band/group's temp measured at factory */
- if (priv->last_temperature > 100)
- temperature = eeprom->groups[2].temperature;
- else /* else use most recent "sane" value from driver */
- temperature = priv->last_temperature;
- }
-
- return temperature; /* raw, not "human readable" */
-}
-
-/* Adjust Txpower only if temperature variance is greater than threshold.
- *
- * Both are lower than older versions' 9 degrees */
-#define IWL_TEMPERATURE_LIMIT_TIMER 6
-
-/**
- * iwl3945_is_temp_calib_needed - determines if new calibration is needed
- *
- * records new temperature in tx_mgr->temperature.
- * replaces tx_mgr->last_temperature *only* if calib needed
- * (assumes caller will actually do the calibration!). */
-static int iwl3945_is_temp_calib_needed(struct iwl_priv *priv)
-{
- int temp_diff;
-
- priv->temperature = iwl3945_hw_reg_txpower_get_temperature(priv);
- temp_diff = priv->temperature - priv->last_temperature;
-
- /* get absolute value */
- if (temp_diff < 0) {
- IWL_DEBUG_POWER(priv, "Getting cooler, delta %d,\n", temp_diff);
- temp_diff = -temp_diff;
- } else if (temp_diff == 0)
- IWL_DEBUG_POWER(priv, "Same temp,\n");
- else
- IWL_DEBUG_POWER(priv, "Getting warmer, delta %d,\n", temp_diff);
-
- /* if we don't need calibration, *don't* update last_temperature */
- if (temp_diff < IWL_TEMPERATURE_LIMIT_TIMER) {
- IWL_DEBUG_POWER(priv, "Timed thermal calib not needed\n");
- return 0;
- }
-
- IWL_DEBUG_POWER(priv, "Timed thermal calib needed\n");
-
- /* assume that caller will actually do calib ...
- * update the "last temperature" value */
- priv->last_temperature = priv->temperature;
- return 1;
-}
-
-#define IWL_MAX_GAIN_ENTRIES 78
-#define IWL_CCK_FROM_OFDM_POWER_DIFF -5
-#define IWL_CCK_FROM_OFDM_INDEX_DIFF (10)
-
-/* radio and DSP power table, each step is 1/2 dB.
- * 1st number is for RF analog gain, 2nd number is for DSP pre-DAC gain. */
-static struct iwl3945_tx_power power_gain_table[2][IWL_MAX_GAIN_ENTRIES] = {
- {
- {251, 127}, /* 2.4 GHz, highest power */
- {251, 127},
- {251, 127},
- {251, 127},
- {251, 125},
- {251, 110},
- {251, 105},
- {251, 98},
- {187, 125},
- {187, 115},
- {187, 108},
- {187, 99},
- {243, 119},
- {243, 111},
- {243, 105},
- {243, 97},
- {243, 92},
- {211, 106},
- {211, 100},
- {179, 120},
- {179, 113},
- {179, 107},
- {147, 125},
- {147, 119},
- {147, 112},
- {147, 106},
- {147, 101},
- {147, 97},
- {147, 91},
- {115, 107},
- {235, 121},
- {235, 115},
- {235, 109},
- {203, 127},
- {203, 121},
- {203, 115},
- {203, 108},
- {203, 102},
- {203, 96},
- {203, 92},
- {171, 110},
- {171, 104},
- {171, 98},
- {139, 116},
- {227, 125},
- {227, 119},
- {227, 113},
- {227, 107},
- {227, 101},
- {227, 96},
- {195, 113},
- {195, 106},
- {195, 102},
- {195, 95},
- {163, 113},
- {163, 106},
- {163, 102},
- {163, 95},
- {131, 113},
- {131, 106},
- {131, 102},
- {131, 95},
- {99, 113},
- {99, 106},
- {99, 102},
- {99, 95},
- {67, 113},
- {67, 106},
- {67, 102},
- {67, 95},
- {35, 113},
- {35, 106},
- {35, 102},
- {35, 95},
- {3, 113},
- {3, 106},
- {3, 102},
- {3, 95} }, /* 2.4 GHz, lowest power */
- {
- {251, 127}, /* 5.x GHz, highest power */
- {251, 120},
- {251, 114},
- {219, 119},
- {219, 101},
- {187, 113},
- {187, 102},
- {155, 114},
- {155, 103},
- {123, 117},
- {123, 107},
- {123, 99},
- {123, 92},
- {91, 108},
- {59, 125},
- {59, 118},
- {59, 109},
- {59, 102},
- {59, 96},
- {59, 90},
- {27, 104},
- {27, 98},
- {27, 92},
- {115, 118},
- {115, 111},
- {115, 104},
- {83, 126},
- {83, 121},
- {83, 113},
- {83, 105},
- {83, 99},
- {51, 118},
- {51, 111},
- {51, 104},
- {51, 98},
- {19, 116},
- {19, 109},
- {19, 102},
- {19, 98},
- {19, 93},
- {171, 113},
- {171, 107},
- {171, 99},
- {139, 120},
- {139, 113},
- {139, 107},
- {139, 99},
- {107, 120},
- {107, 113},
- {107, 107},
- {107, 99},
- {75, 120},
- {75, 113},
- {75, 107},
- {75, 99},
- {43, 120},
- {43, 113},
- {43, 107},
- {43, 99},
- {11, 120},
- {11, 113},
- {11, 107},
- {11, 99},
- {131, 107},
- {131, 99},
- {99, 120},
- {99, 113},
- {99, 107},
- {99, 99},
- {67, 120},
- {67, 113},
- {67, 107},
- {67, 99},
- {35, 120},
- {35, 113},
- {35, 107},
- {35, 99},
- {3, 120} } /* 5.x GHz, lowest power */
-};
-
-static inline u8 iwl3945_hw_reg_fix_power_index(int index)
-{
- if (index < 0)
- return 0;
- if (index >= IWL_MAX_GAIN_ENTRIES)
- return IWL_MAX_GAIN_ENTRIES - 1;
- return (u8) index;
-}
-
-/* Kick off thermal recalibration check every 60 seconds */
-#define REG_RECALIB_PERIOD (60)
-
-/**
- * iwl3945_hw_reg_set_scan_power - Set Tx power for scan probe requests
- *
- * Set (in our channel info database) the direct scan Tx power for 1 Mbit (CCK)
- * or 6 Mbit (OFDM) rates.
- */
-static void iwl3945_hw_reg_set_scan_power(struct iwl_priv *priv, u32 scan_tbl_index,
- s32 rate_index, const s8 *clip_pwrs,
- struct iwl_channel_info *ch_info,
- int band_index)
-{
- struct iwl3945_scan_power_info *scan_power_info;
- s8 power;
- u8 power_index;
-
- scan_power_info = &ch_info->scan_pwr_info[scan_tbl_index];
-
- /* use this channel group's 6Mbit clipping/saturation pwr,
- * but cap at regulatory scan power restriction (set during init
- * based on eeprom channel data) for this channel. */
- power = min(ch_info->scan_power, clip_pwrs[IWL_RATE_6M_INDEX_TABLE]);
-
- power = min(power, priv->tx_power_user_lmt);
- scan_power_info->requested_power = power;
-
- /* find difference between new scan *power* and current "normal"
- * Tx *power* for 6Mb. Use this difference (x2) to adjust the
- * current "normal" temperature-compensated Tx power *index* for
- * this rate (1Mb or 6Mb) to yield new temp-compensated scan power
- * *index*. */
- power_index = ch_info->power_info[rate_index].power_table_index
- - (power - ch_info->power_info
- [IWL_RATE_6M_INDEX_TABLE].requested_power) * 2;
-
- /* store reference index that we use when adjusting *all* scan
- * powers. So we can accommodate user (all channel) or spectrum
- * management (single channel) power changes "between" temperature
- * feedback compensation procedures.
- * don't force fit this reference index into gain table; it may be a
- * negative number. This will help avoid errors when we're at
- * the lower bounds (highest gains, for warmest temperatures)
- * of the table. */
-
- /* don't exceed table bounds for "real" setting */
- power_index = iwl3945_hw_reg_fix_power_index(power_index);
-
- scan_power_info->power_table_index = power_index;
- scan_power_info->tpc.tx_gain =
- power_gain_table[band_index][power_index].tx_gain;
- scan_power_info->tpc.dsp_atten =
- power_gain_table[band_index][power_index].dsp_atten;
-}
-
-/**
- * iwl3945_send_tx_power - fill in Tx Power command with gain settings
- *
- * Configures power settings for all rates for the current channel,
- * using values from channel info struct, and send to NIC
- */
-static int iwl3945_send_tx_power(struct iwl_priv *priv)
-{
- int rate_idx, i;
- const struct iwl_channel_info *ch_info = NULL;
- struct iwl3945_txpowertable_cmd txpower = {
- .channel = priv->contexts[IWL_RXON_CTX_BSS].active.channel,
- };
- u16 chan;
-
- if (WARN_ONCE(test_bit(STATUS_SCAN_HW, &priv->status),
- "TX Power requested while scanning!\n"))
- return -EAGAIN;
-
- chan = le16_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.channel);
-
- txpower.band = (priv->band == IEEE80211_BAND_5GHZ) ? 0 : 1;
- ch_info = iwl_legacy_get_channel_info(priv, priv->band, chan);
- if (!ch_info) {
- IWL_ERR(priv,
- "Failed to get channel info for channel %d [%d]\n",
- chan, priv->band);
- return -EINVAL;
- }
-
- if (!iwl_legacy_is_channel_valid(ch_info)) {
- IWL_DEBUG_POWER(priv, "Not calling TX_PWR_TABLE_CMD on "
- "non-Tx channel.\n");
- return 0;
- }
-
- /* fill cmd with power settings for all rates for current channel */
- /* Fill OFDM rate */
- for (rate_idx = IWL_FIRST_OFDM_RATE, i = 0;
- rate_idx <= IWL39_LAST_OFDM_RATE; rate_idx++, i++) {
-
- txpower.power[i].tpc = ch_info->power_info[i].tpc;
- txpower.power[i].rate = iwl3945_rates[rate_idx].plcp;
-
- IWL_DEBUG_POWER(priv, "ch %d:%d rf %d dsp %3d rate code 0x%02x\n",
- le16_to_cpu(txpower.channel),
- txpower.band,
- txpower.power[i].tpc.tx_gain,
- txpower.power[i].tpc.dsp_atten,
- txpower.power[i].rate);
- }
- /* Fill CCK rates */
- for (rate_idx = IWL_FIRST_CCK_RATE;
- rate_idx <= IWL_LAST_CCK_RATE; rate_idx++, i++) {
- txpower.power[i].tpc = ch_info->power_info[i].tpc;
- txpower.power[i].rate = iwl3945_rates[rate_idx].plcp;
-
- IWL_DEBUG_POWER(priv, "ch %d:%d rf %d dsp %3d rate code 0x%02x\n",
- le16_to_cpu(txpower.channel),
- txpower.band,
- txpower.power[i].tpc.tx_gain,
- txpower.power[i].tpc.dsp_atten,
- txpower.power[i].rate);
- }
-
- return iwl_legacy_send_cmd_pdu(priv, REPLY_TX_PWR_TABLE_CMD,
- sizeof(struct iwl3945_txpowertable_cmd),
- &txpower);
-
-}
-
-/**
- * iwl3945_hw_reg_set_new_power - Configures power tables at new levels
- * @ch_info: Channel to update. Uses power_info.requested_power.
- *
- * Replace requested_power and base_power_index ch_info fields for
- * one channel.
- *
- * Called if user or spectrum management changes power preferences.
- * Takes into account h/w and modulation limitations (clip power).
- *
- * This does *not* send anything to NIC, just sets up ch_info for one channel.
- *
- * NOTE: reg_compensate_for_temperature_dif() *must* be run after this to
- * properly fill out the scan powers, and actual h/w gain settings,
- * and send changes to NIC
- */
-static int iwl3945_hw_reg_set_new_power(struct iwl_priv *priv,
- struct iwl_channel_info *ch_info)
-{
- struct iwl3945_channel_power_info *power_info;
- int power_changed = 0;
- int i;
- const s8 *clip_pwrs;
- int power;
-
- /* Get this chnlgrp's rate-to-max/clip-powers table */
- clip_pwrs = priv->_3945.clip_groups[ch_info->group_index].clip_powers;
-
- /* Get this channel's rate-to-current-power settings table */
- power_info = ch_info->power_info;
-
- /* update OFDM Txpower settings */
- for (i = IWL_RATE_6M_INDEX_TABLE; i <= IWL_RATE_54M_INDEX_TABLE;
- i++, ++power_info) {
- int delta_idx;
-
- /* limit new power to be no more than h/w capability */
- power = min(ch_info->curr_txpow, clip_pwrs[i]);
- if (power == power_info->requested_power)
- continue;
-
- /* find difference between old and new requested powers,
- * update base (non-temp-compensated) power index */
- delta_idx = (power - power_info->requested_power) * 2;
- power_info->base_power_index -= delta_idx;
-
- /* save new requested power value */
- power_info->requested_power = power;
-
- power_changed = 1;
- }
-
- /* update CCK Txpower settings, based on OFDM 12M setting ...
- * ... all CCK power settings for a given channel are the *same*. */
- if (power_changed) {
- power =
- ch_info->power_info[IWL_RATE_12M_INDEX_TABLE].
- requested_power + IWL_CCK_FROM_OFDM_POWER_DIFF;
-
- /* do all CCK rates' iwl3945_channel_power_info structures */
- for (i = IWL_RATE_1M_INDEX_TABLE; i <= IWL_RATE_11M_INDEX_TABLE; i++) {
- power_info->requested_power = power;
- power_info->base_power_index =
- ch_info->power_info[IWL_RATE_12M_INDEX_TABLE].
- base_power_index + IWL_CCK_FROM_OFDM_INDEX_DIFF;
- ++power_info;
- }
- }
-
- return 0;
-}
-
-/**
- * iwl3945_hw_reg_get_ch_txpower_limit - returns new power limit for channel
- *
- * NOTE: Returned power limit may be less (but not more) than requested,
- * based strictly on regulatory (eeprom and spectrum mgt) limitations
- * (no consideration for h/w clipping limitations).
- */
-static int iwl3945_hw_reg_get_ch_txpower_limit(struct iwl_channel_info *ch_info)
-{
- s8 max_power;
-
-#if 0
- /* if we're using TGd limits, use lower of TGd or EEPROM */
- if (ch_info->tgd_data.max_power != 0)
- max_power = min(ch_info->tgd_data.max_power,
- ch_info->eeprom.max_power_avg);
-
- /* else just use EEPROM limits */
- else
-#endif
- max_power = ch_info->eeprom.max_power_avg;
-
- return min(max_power, ch_info->max_power_avg);
-}
-
-/**
- * iwl3945_hw_reg_comp_txpower_temp - Compensate for temperature
- *
- * Compensate txpower settings of *all* channels for temperature.
- * This only accounts for the difference between current temperature
- * and the factory calibration temperatures, and bases the new settings
- * on the channel's base_power_index.
- *
- * If RxOn is "associated", this sends the new Txpower to NIC!
- */
-static int iwl3945_hw_reg_comp_txpower_temp(struct iwl_priv *priv)
-{
- struct iwl_channel_info *ch_info = NULL;
- struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
- int delta_index;
- const s8 *clip_pwrs; /* array of h/w max power levels for each rate */
- u8 a_band;
- u8 rate_index;
- u8 scan_tbl_index;
- u8 i;
- int ref_temp;
- int temperature = priv->temperature;
-
- if (priv->disable_tx_power_cal ||
- test_bit(STATUS_SCANNING, &priv->status)) {
- /* do not perform tx power calibration */
- return 0;
- }
- /* set up new Tx power info for each and every channel, 2.4 and 5.x */
- for (i = 0; i < priv->channel_count; i++) {
- ch_info = &priv->channel_info[i];
- a_band = iwl_legacy_is_channel_a_band(ch_info);
-
- /* Get this chnlgrp's factory calibration temperature */
- ref_temp = (s16)eeprom->groups[ch_info->group_index].
- temperature;
-
- /* get power index adjustment based on current and factory
- * temps */
- delta_index = iwl3945_hw_reg_adjust_power_by_temp(temperature,
- ref_temp);
-
- /* set tx power value for all rates, OFDM and CCK */
- for (rate_index = 0; rate_index < IWL_RATE_COUNT_3945;
- rate_index++) {
- int power_idx =
- ch_info->power_info[rate_index].base_power_index;
-
- /* temperature compensate */
- power_idx += delta_index;
-
- /* stay within table range */
- power_idx = iwl3945_hw_reg_fix_power_index(power_idx);
- ch_info->power_info[rate_index].
- power_table_index = (u8) power_idx;
- ch_info->power_info[rate_index].tpc =
- power_gain_table[a_band][power_idx];
- }
-
- /* Get this chnlgrp's rate-to-max/clip-powers table */
- clip_pwrs = priv->_3945.clip_groups[ch_info->group_index].clip_powers;
-
- /* set scan tx power, 1Mbit for CCK, 6Mbit for OFDM */
- for (scan_tbl_index = 0;
- scan_tbl_index < IWL_NUM_SCAN_RATES; scan_tbl_index++) {
- s32 actual_index = (scan_tbl_index == 0) ?
- IWL_RATE_1M_INDEX_TABLE : IWL_RATE_6M_INDEX_TABLE;
- iwl3945_hw_reg_set_scan_power(priv, scan_tbl_index,
- actual_index, clip_pwrs,
- ch_info, a_band);
- }
- }
-
- /* send Txpower command for current channel to ucode */
- return priv->cfg->ops->lib->send_tx_power(priv);
-}
-
-int iwl3945_hw_reg_set_txpower(struct iwl_priv *priv, s8 power)
-{
- struct iwl_channel_info *ch_info;
- s8 max_power;
- u8 a_band;
- u8 i;
-
- if (priv->tx_power_user_lmt == power) {
- IWL_DEBUG_POWER(priv, "Requested Tx power same as current "
- "limit: %ddBm.\n", power);
- return 0;
- }
-
- IWL_DEBUG_POWER(priv, "Setting upper limit clamp to %ddBm.\n", power);
- priv->tx_power_user_lmt = power;
-
- /* set up new Tx powers for each and every channel, 2.4 and 5.x */
-
- for (i = 0; i < priv->channel_count; i++) {
- ch_info = &priv->channel_info[i];
- a_band = iwl_legacy_is_channel_a_band(ch_info);
-
- /* find minimum power of all user and regulatory constraints
- * (does not consider h/w clipping limitations) */
- max_power = iwl3945_hw_reg_get_ch_txpower_limit(ch_info);
- max_power = min(power, max_power);
- if (max_power != ch_info->curr_txpow) {
- ch_info->curr_txpow = max_power;
-
- /* this considers the h/w clipping limitations */
- iwl3945_hw_reg_set_new_power(priv, ch_info);
- }
- }
-
- /* update txpower settings for all channels,
- * send to NIC if associated. */
- iwl3945_is_temp_calib_needed(priv);
- iwl3945_hw_reg_comp_txpower_temp(priv);
-
- return 0;
-}
-
-static int iwl3945_send_rxon_assoc(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx)
-{
- int rc = 0;
- struct iwl_rx_packet *pkt;
- struct iwl3945_rxon_assoc_cmd rxon_assoc;
- struct iwl_host_cmd cmd = {
- .id = REPLY_RXON_ASSOC,
- .len = sizeof(rxon_assoc),
- .flags = CMD_WANT_SKB,
- .data = &rxon_assoc,
- };
- const struct iwl_legacy_rxon_cmd *rxon1 = &ctx->staging;
- const struct iwl_legacy_rxon_cmd *rxon2 = &ctx->active;
-
- if ((rxon1->flags == rxon2->flags) &&
- (rxon1->filter_flags == rxon2->filter_flags) &&
- (rxon1->cck_basic_rates == rxon2->cck_basic_rates) &&
- (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) {
- IWL_DEBUG_INFO(priv, "Using current RXON_ASSOC. Not resending.\n");
- return 0;
- }
-
- rxon_assoc.flags = ctx->staging.flags;
- rxon_assoc.filter_flags = ctx->staging.filter_flags;
- rxon_assoc.ofdm_basic_rates = ctx->staging.ofdm_basic_rates;
- rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates;
- rxon_assoc.reserved = 0;
-
- rc = iwl_legacy_send_cmd_sync(priv, &cmd);
- if (rc)
- return rc;
-
- pkt = (struct iwl_rx_packet *)cmd.reply_page;
- if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
- IWL_ERR(priv, "Bad return from REPLY_RXON_ASSOC command\n");
- rc = -EIO;
- }
-
- iwl_legacy_free_pages(priv, cmd.reply_page);
-
- return rc;
-}
-
-/**
- * iwl3945_commit_rxon - commit staging_rxon to hardware
- *
- * The RXON command in staging_rxon is committed to the hardware and
- * the active_rxon structure is updated with the new data. This
- * function correctly transitions out of the RXON_ASSOC_MSK state if
- * a HW tune is required based on the RXON structure changes.
- */
-int iwl3945_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
-{
- /* cast away the const for active_rxon in this function */
- struct iwl3945_rxon_cmd *active_rxon = (void *)&ctx->active;
- struct iwl3945_rxon_cmd *staging_rxon = (void *)&ctx->staging;
- int rc = 0;
- bool new_assoc = !!(staging_rxon->filter_flags & RXON_FILTER_ASSOC_MSK);
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- return -EINVAL;
-
- if (!iwl_legacy_is_alive(priv))
- return -1;
-
- /* always get timestamp with Rx frame */
- staging_rxon->flags |= RXON_FLG_TSF2HOST_MSK;
-
- /* select antenna */
- staging_rxon->flags &=
- ~(RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_SEL_MSK);
- staging_rxon->flags |= iwl3945_get_antenna_flags(priv);
-
- rc = iwl_legacy_check_rxon_cmd(priv, ctx);
- if (rc) {
- IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n");
- return -EINVAL;
- }
-
- /* If we don't need to send a full RXON, we can use
- * iwl3945_rxon_assoc_cmd which is used to reconfigure filter
- * and other flags for the current radio configuration. */
- if (!iwl_legacy_full_rxon_required(priv,
- &priv->contexts[IWL_RXON_CTX_BSS])) {
- rc = iwl_legacy_send_rxon_assoc(priv,
- &priv->contexts[IWL_RXON_CTX_BSS]);
- if (rc) {
- IWL_ERR(priv, "Error setting RXON_ASSOC "
- "configuration (%d).\n", rc);
- return rc;
- }
-
- memcpy(active_rxon, staging_rxon, sizeof(*active_rxon));
- /*
- * We do not commit tx power settings while channel changing,
- * do it now if tx power changed.
- */
- iwl_legacy_set_tx_power(priv, priv->tx_power_next, false);
- return 0;
- }
-
- /* If we are currently associated and the new config requires
- * an RXON_ASSOC and the new config wants the associated mask enabled,
- * we must clear the associated from the active configuration
- * before we apply the new config */
- if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS) && new_assoc) {
- IWL_DEBUG_INFO(priv, "Toggling associated bit on current RXON\n");
- active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
-
- /*
- * reserved4 and 5 could have been filled by the iwlcore code.
- * Let's clear them before pushing to the 3945.
- */
- active_rxon->reserved4 = 0;
- active_rxon->reserved5 = 0;
- rc = iwl_legacy_send_cmd_pdu(priv, REPLY_RXON,
- sizeof(struct iwl3945_rxon_cmd),
- &priv->contexts[IWL_RXON_CTX_BSS].active);
-
- /* If the mask clearing failed then we set
- * active_rxon back to what it was previously */
- if (rc) {
- active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK;
- IWL_ERR(priv, "Error clearing ASSOC_MSK on current "
- "configuration (%d).\n", rc);
- return rc;
- }
- iwl_legacy_clear_ucode_stations(priv,
- &priv->contexts[IWL_RXON_CTX_BSS]);
- iwl_legacy_restore_stations(priv,
- &priv->contexts[IWL_RXON_CTX_BSS]);
- }
-
- IWL_DEBUG_INFO(priv, "Sending RXON\n"
- "* with%s RXON_FILTER_ASSOC_MSK\n"
- "* channel = %d\n"
- "* bssid = %pM\n",
- (new_assoc ? "" : "out"),
- le16_to_cpu(staging_rxon->channel),
- staging_rxon->bssid_addr);
-
- /*
- * reserved4 and 5 could have been filled by the iwlcore code.
- * Let's clear them before pushing to the 3945.
- */
- staging_rxon->reserved4 = 0;
- staging_rxon->reserved5 = 0;
-
- iwl_legacy_set_rxon_hwcrypto(priv, ctx, !iwl3945_mod_params.sw_crypto);
-
- /* Apply the new configuration */
- rc = iwl_legacy_send_cmd_pdu(priv, REPLY_RXON,
- sizeof(struct iwl3945_rxon_cmd),
- staging_rxon);
- if (rc) {
- IWL_ERR(priv, "Error setting new configuration (%d).\n", rc);
- return rc;
- }
-
- memcpy(active_rxon, staging_rxon, sizeof(*active_rxon));
-
- if (!new_assoc) {
- iwl_legacy_clear_ucode_stations(priv,
- &priv->contexts[IWL_RXON_CTX_BSS]);
- iwl_legacy_restore_stations(priv,
- &priv->contexts[IWL_RXON_CTX_BSS]);
- }
-
- /* If we issue a new RXON command which required a tune then we must
- * send a new TXPOWER command or we won't be able to Tx any frames */
- rc = iwl_legacy_set_tx_power(priv, priv->tx_power_next, true);
- if (rc) {
- IWL_ERR(priv, "Error setting Tx power (%d).\n", rc);
- return rc;
- }
-
- /* Init the hardware's rate fallback order based on the band */
- rc = iwl3945_init_hw_rate_table(priv);
- if (rc) {
- IWL_ERR(priv, "Error setting HW rate table: %02X\n", rc);
- return -EIO;
- }
-
- return 0;
-}
-
-/**
- * iwl3945_reg_txpower_periodic - called when time to check our temperature.
- *
- * -- reset periodic timer
- * -- see if temp has changed enough to warrant re-calibration ... if so:
- * -- correct coeffs for temp (can reset temp timer)
- * -- save this temp as "last",
- * -- send new set of gain settings to NIC
- * NOTE: This should continue working, even when we're not associated,
- * so we can keep our internal table of scan powers current. */
-void iwl3945_reg_txpower_periodic(struct iwl_priv *priv)
-{
- /* This will kick in the "brute force"
- * iwl3945_hw_reg_comp_txpower_temp() below */
- if (!iwl3945_is_temp_calib_needed(priv))
- goto reschedule;
-
- /* Set up a new set of temp-adjusted TxPowers, send to NIC.
- * This is based *only* on current temperature,
- * ignoring any previous power measurements */
- iwl3945_hw_reg_comp_txpower_temp(priv);
-
- reschedule:
- queue_delayed_work(priv->workqueue,
- &priv->_3945.thermal_periodic, REG_RECALIB_PERIOD * HZ);
-}
-
-static void iwl3945_bg_reg_txpower_periodic(struct work_struct *work)
-{
- struct iwl_priv *priv = container_of(work, struct iwl_priv,
- _3945.thermal_periodic.work);
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- return;
-
- mutex_lock(&priv->mutex);
- iwl3945_reg_txpower_periodic(priv);
- mutex_unlock(&priv->mutex);
-}
-
-/**
- * iwl3945_hw_reg_get_ch_grp_index - find the channel-group index (0-4)
- * for the channel.
- *
- * This function is used when initializing channel-info structs.
- *
- * NOTE: These channel groups do *NOT* match the bands above!
- * These channel groups are based on factory-tested channels;
- * on A-band, EEPROM's "group frequency" entries represent the top
- * channel in each group 1-4. Group 5 All B/G channels are in group 0.
- */
-static u16 iwl3945_hw_reg_get_ch_grp_index(struct iwl_priv *priv,
- const struct iwl_channel_info *ch_info)
-{
- struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
- struct iwl3945_eeprom_txpower_group *ch_grp = &eeprom->groups[0];
- u8 group;
- u16 group_index = 0; /* based on factory calib frequencies */
- u8 grp_channel;
-
- /* Find the group index for the channel ... don't use index 1(?) */
- if (iwl_legacy_is_channel_a_band(ch_info)) {
- for (group = 1; group < 5; group++) {
- grp_channel = ch_grp[group].group_channel;
- if (ch_info->channel <= grp_channel) {
- group_index = group;
- break;
- }
- }
- /* group 4 has a few channels *above* its factory cal freq */
- if (group == 5)
- group_index = 4;
- } else
- group_index = 0; /* 2.4 GHz, group 0 */
-
- IWL_DEBUG_POWER(priv, "Chnl %d mapped to grp %d\n", ch_info->channel,
- group_index);
- return group_index;
-}
-
-/**
- * iwl3945_hw_reg_get_matched_power_index - Interpolate to get nominal index
- *
- * Interpolate to get nominal (i.e. at factory calibration temperature) index
- * into radio/DSP gain settings table for requested power.
- */
-static int iwl3945_hw_reg_get_matched_power_index(struct iwl_priv *priv,
- s8 requested_power,
- s32 setting_index, s32 *new_index)
-{
- const struct iwl3945_eeprom_txpower_group *chnl_grp = NULL;
- struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
- s32 index0, index1;
- s32 power = 2 * requested_power;
- s32 i;
- const struct iwl3945_eeprom_txpower_sample *samples;
- s32 gains0, gains1;
- s32 res;
- s32 denominator;
-
- chnl_grp = &eeprom->groups[setting_index];
- samples = chnl_grp->samples;
- for (i = 0; i < 5; i++) {
- if (power == samples[i].power) {
- *new_index = samples[i].gain_index;
- return 0;
- }
- }
-
- if (power > samples[1].power) {
- index0 = 0;
- index1 = 1;
- } else if (power > samples[2].power) {
- index0 = 1;
- index1 = 2;
- } else if (power > samples[3].power) {
- index0 = 2;
- index1 = 3;
- } else {
- index0 = 3;
- index1 = 4;
- }
-
- denominator = (s32) samples[index1].power - (s32) samples[index0].power;
- if (denominator == 0)
- return -EINVAL;
- gains0 = (s32) samples[index0].gain_index * (1 << 19);
- gains1 = (s32) samples[index1].gain_index * (1 << 19);
- res = gains0 + (gains1 - gains0) *
- ((s32) power - (s32) samples[index0].power) / denominator +
- (1 << 18);
- *new_index = res >> 19;
- return 0;
-}
-
-static void iwl3945_hw_reg_init_channel_groups(struct iwl_priv *priv)
-{
- u32 i;
- s32 rate_index;
- struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
- const struct iwl3945_eeprom_txpower_group *group;
-
- IWL_DEBUG_POWER(priv, "Initializing factory calib info from EEPROM\n");
-
- for (i = 0; i < IWL_NUM_TX_CALIB_GROUPS; i++) {
- s8 *clip_pwrs; /* table of power levels for each rate */
- s8 satur_pwr; /* saturation power for each chnl group */
- group = &eeprom->groups[i];
-
- /* sanity check on factory saturation power value */
- if (group->saturation_power < 40) {
- IWL_WARN(priv, "Error: saturation power is %d, "
- "less than minimum expected 40\n",
- group->saturation_power);
- return;
- }
-
- /*
- * Derive requested power levels for each rate, based on
- * hardware capabilities (saturation power for band).
- * Basic value is 3dB down from saturation, with further
- * power reductions for highest 3 data rates. These
- * backoffs provide headroom for high rate modulation
- * power peaks, without too much distortion (clipping).
- */
- /* we'll fill in this array with h/w max power levels */
- clip_pwrs = (s8 *) priv->_3945.clip_groups[i].clip_powers;
-
- /* divide factory saturation power by 2 to find -3dB level */
- satur_pwr = (s8) (group->saturation_power >> 1);
-
- /* fill in channel group's nominal powers for each rate */
- for (rate_index = 0;
- rate_index < IWL_RATE_COUNT_3945; rate_index++, clip_pwrs++) {
- switch (rate_index) {
- case IWL_RATE_36M_INDEX_TABLE:
- if (i == 0) /* B/G */
- *clip_pwrs = satur_pwr;
- else /* A */
- *clip_pwrs = satur_pwr - 5;
- break;
- case IWL_RATE_48M_INDEX_TABLE:
- if (i == 0)
- *clip_pwrs = satur_pwr - 7;
- else
- *clip_pwrs = satur_pwr - 10;
- break;
- case IWL_RATE_54M_INDEX_TABLE:
- if (i == 0)
- *clip_pwrs = satur_pwr - 9;
- else
- *clip_pwrs = satur_pwr - 12;
- break;
- default:
- *clip_pwrs = satur_pwr;
- break;
- }
- }
- }
-}
-
-/**
- * iwl3945_txpower_set_from_eeprom - Set channel power info based on EEPROM
- *
- * Second pass (during init) to set up priv->channel_info
- *
- * Set up Tx-power settings in our channel info database for each VALID
- * (for this geo/SKU) channel, at all Tx data rates, based on eeprom values
- * and current temperature.
- *
- * Since this is based on current temperature (at init time), these values may
- * not be valid for very long, but it gives us a starting/default point,
- * and allows us to active (i.e. using Tx) scan.
- *
- * This does *not* write values to NIC, just sets up our internal table.
- */
-int iwl3945_txpower_set_from_eeprom(struct iwl_priv *priv)
-{
- struct iwl_channel_info *ch_info = NULL;
- struct iwl3945_channel_power_info *pwr_info;
- struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
- int delta_index;
- u8 rate_index;
- u8 scan_tbl_index;
- const s8 *clip_pwrs; /* array of power levels for each rate */
- u8 gain, dsp_atten;
- s8 power;
- u8 pwr_index, base_pwr_index, a_band;
- u8 i;
- int temperature;
-
- /* save temperature reference,
- * so we can determine next time to calibrate */
- temperature = iwl3945_hw_reg_txpower_get_temperature(priv);
- priv->last_temperature = temperature;
-
- iwl3945_hw_reg_init_channel_groups(priv);
-
- /* initialize Tx power info for each and every channel, 2.4 and 5.x */
- for (i = 0, ch_info = priv->channel_info; i < priv->channel_count;
- i++, ch_info++) {
- a_band = iwl_legacy_is_channel_a_band(ch_info);
- if (!iwl_legacy_is_channel_valid(ch_info))
- continue;
-
- /* find this channel's channel group (*not* "band") index */
- ch_info->group_index =
- iwl3945_hw_reg_get_ch_grp_index(priv, ch_info);
-
- /* Get this chnlgrp's rate->max/clip-powers table */
- clip_pwrs = priv->_3945.clip_groups[ch_info->group_index].clip_powers;
-
- /* calculate power index *adjustment* value according to
- * diff between current temperature and factory temperature */
- delta_index = iwl3945_hw_reg_adjust_power_by_temp(temperature,
- eeprom->groups[ch_info->group_index].
- temperature);
-
- IWL_DEBUG_POWER(priv, "Delta index for channel %d: %d [%d]\n",
- ch_info->channel, delta_index, temperature +
- IWL_TEMP_CONVERT);
-
- /* set tx power value for all OFDM rates */
- for (rate_index = 0; rate_index < IWL_OFDM_RATES;
- rate_index++) {
- s32 uninitialized_var(power_idx);
- int rc;
-
- /* use channel group's clip-power table,
- * but don't exceed channel's max power */
- s8 pwr = min(ch_info->max_power_avg,
- clip_pwrs[rate_index]);
-
- pwr_info = &ch_info->power_info[rate_index];
-
- /* get base (i.e. at factory-measured temperature)
- * power table index for this rate's power */
- rc = iwl3945_hw_reg_get_matched_power_index(priv, pwr,
- ch_info->group_index,
- &power_idx);
- if (rc) {
- IWL_ERR(priv, "Invalid power index\n");
- return rc;
- }
- pwr_info->base_power_index = (u8) power_idx;
-
- /* temperature compensate */
- power_idx += delta_index;
-
- /* stay within range of gain table */
- power_idx = iwl3945_hw_reg_fix_power_index(power_idx);
-
- /* fill 1 OFDM rate's iwl3945_channel_power_info struct */
- pwr_info->requested_power = pwr;
- pwr_info->power_table_index = (u8) power_idx;
- pwr_info->tpc.tx_gain =
- power_gain_table[a_band][power_idx].tx_gain;
- pwr_info->tpc.dsp_atten =
- power_gain_table[a_band][power_idx].dsp_atten;
- }
-
- /* set tx power for CCK rates, based on OFDM 12 Mbit settings*/
- pwr_info = &ch_info->power_info[IWL_RATE_12M_INDEX_TABLE];
- power = pwr_info->requested_power +
- IWL_CCK_FROM_OFDM_POWER_DIFF;
- pwr_index = pwr_info->power_table_index +
- IWL_CCK_FROM_OFDM_INDEX_DIFF;
- base_pwr_index = pwr_info->base_power_index +
- IWL_CCK_FROM_OFDM_INDEX_DIFF;
-
- /* stay within table range */
- pwr_index = iwl3945_hw_reg_fix_power_index(pwr_index);
- gain = power_gain_table[a_band][pwr_index].tx_gain;
- dsp_atten = power_gain_table[a_band][pwr_index].dsp_atten;
-
- /* fill each CCK rate's iwl3945_channel_power_info structure
- * NOTE: All CCK-rate Txpwrs are the same for a given chnl!
- * NOTE: CCK rates start at end of OFDM rates! */
- for (rate_index = 0;
- rate_index < IWL_CCK_RATES; rate_index++) {
- pwr_info = &ch_info->power_info[rate_index+IWL_OFDM_RATES];
- pwr_info->requested_power = power;
- pwr_info->power_table_index = pwr_index;
- pwr_info->base_power_index = base_pwr_index;
- pwr_info->tpc.tx_gain = gain;
- pwr_info->tpc.dsp_atten = dsp_atten;
- }
-
- /* set scan tx power, 1Mbit for CCK, 6Mbit for OFDM */
- for (scan_tbl_index = 0;
- scan_tbl_index < IWL_NUM_SCAN_RATES; scan_tbl_index++) {
- s32 actual_index = (scan_tbl_index == 0) ?
- IWL_RATE_1M_INDEX_TABLE : IWL_RATE_6M_INDEX_TABLE;
- iwl3945_hw_reg_set_scan_power(priv, scan_tbl_index,
- actual_index, clip_pwrs, ch_info, a_band);
- }
- }
-
- return 0;
-}
-
-int iwl3945_hw_rxq_stop(struct iwl_priv *priv)
-{
- int rc;
-
- iwl_legacy_write_direct32(priv, FH39_RCSR_CONFIG(0), 0);
- rc = iwl_poll_direct_bit(priv, FH39_RSSR_STATUS,
- FH39_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
- if (rc < 0)
- IWL_ERR(priv, "Can't stop Rx DMA.\n");
-
- return 0;
-}
-
-int iwl3945_hw_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq)
-{
- int txq_id = txq->q.id;
-
- struct iwl3945_shared *shared_data = priv->_3945.shared_virt;
-
- shared_data->tx_base_ptr[txq_id] = cpu_to_le32((u32)txq->q.dma_addr);
-
- iwl_legacy_write_direct32(priv, FH39_CBCC_CTRL(txq_id), 0);
- iwl_legacy_write_direct32(priv, FH39_CBCC_BASE(txq_id), 0);
-
- iwl_legacy_write_direct32(priv, FH39_TCSR_CONFIG(txq_id),
- FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT |
- FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF |
- FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD |
- FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL |
- FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE);
-
- /* fake read to flush all prev. writes */
- iwl_read32(priv, FH39_TSSR_CBB_BASE);
-
- return 0;
-}
-
-/*
- * HCMD utils
- */
-static u16 iwl3945_get_hcmd_size(u8 cmd_id, u16 len)
-{
- switch (cmd_id) {
- case REPLY_RXON:
- return sizeof(struct iwl3945_rxon_cmd);
- case POWER_TABLE_CMD:
- return sizeof(struct iwl3945_powertable_cmd);
- default:
- return len;
- }
-}
-
-
-static u16 iwl3945_build_addsta_hcmd(const struct iwl_legacy_addsta_cmd *cmd,
- u8 *data)
-{
- struct iwl3945_addsta_cmd *addsta = (struct iwl3945_addsta_cmd *)data;
- addsta->mode = cmd->mode;
- memcpy(&addsta->sta, &cmd->sta, sizeof(struct sta_id_modify));
- memcpy(&addsta->key, &cmd->key, sizeof(struct iwl4965_keyinfo));
- addsta->station_flags = cmd->station_flags;
- addsta->station_flags_msk = cmd->station_flags_msk;
- addsta->tid_disable_tx = cpu_to_le16(0);
- addsta->rate_n_flags = cmd->rate_n_flags;
- addsta->add_immediate_ba_tid = cmd->add_immediate_ba_tid;
- addsta->remove_immediate_ba_tid = cmd->remove_immediate_ba_tid;
- addsta->add_immediate_ba_ssn = cmd->add_immediate_ba_ssn;
-
- return (u16)sizeof(struct iwl3945_addsta_cmd);
-}
-
-static int iwl3945_add_bssid_station(struct iwl_priv *priv,
- const u8 *addr, u8 *sta_id_r)
-{
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- int ret;
- u8 sta_id;
- unsigned long flags;
-
- if (sta_id_r)
- *sta_id_r = IWL_INVALID_STATION;
-
- ret = iwl_legacy_add_station_common(priv, ctx, addr, 0, NULL, &sta_id);
- if (ret) {
- IWL_ERR(priv, "Unable to add station %pM\n", addr);
- return ret;
- }
-
- if (sta_id_r)
- *sta_id_r = sta_id;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- priv->stations[sta_id].used |= IWL_STA_LOCAL;
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- return 0;
-}
-static int iwl3945_manage_ibss_station(struct iwl_priv *priv,
- struct ieee80211_vif *vif, bool add)
-{
- struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
- int ret;
-
- if (add) {
- ret = iwl3945_add_bssid_station(priv, vif->bss_conf.bssid,
- &vif_priv->ibss_bssid_sta_id);
- if (ret)
- return ret;
-
- iwl3945_sync_sta(priv, vif_priv->ibss_bssid_sta_id,
- (priv->band == IEEE80211_BAND_5GHZ) ?
- IWL_RATE_6M_PLCP : IWL_RATE_1M_PLCP);
- iwl3945_rate_scale_init(priv->hw, vif_priv->ibss_bssid_sta_id);
-
- return 0;
- }
-
- return iwl_legacy_remove_station(priv, vif_priv->ibss_bssid_sta_id,
- vif->bss_conf.bssid);
-}
-
-/**
- * iwl3945_init_hw_rate_table - Initialize the hardware rate fallback table
- */
-int iwl3945_init_hw_rate_table(struct iwl_priv *priv)
-{
- int rc, i, index, prev_index;
- struct iwl3945_rate_scaling_cmd rate_cmd = {
- .reserved = {0, 0, 0},
- };
- struct iwl3945_rate_scaling_info *table = rate_cmd.table;
-
- for (i = 0; i < ARRAY_SIZE(iwl3945_rates); i++) {
- index = iwl3945_rates[i].table_rs_index;
-
- table[index].rate_n_flags =
- iwl3945_hw_set_rate_n_flags(iwl3945_rates[i].plcp, 0);
- table[index].try_cnt = priv->retry_rate;
- prev_index = iwl3945_get_prev_ieee_rate(i);
- table[index].next_rate_index =
- iwl3945_rates[prev_index].table_rs_index;
- }
-
- switch (priv->band) {
- case IEEE80211_BAND_5GHZ:
- IWL_DEBUG_RATE(priv, "Select A mode rate scale\n");
- /* If one of the following CCK rates is used,
- * have it fall back to the 6M OFDM rate */
- for (i = IWL_RATE_1M_INDEX_TABLE;
- i <= IWL_RATE_11M_INDEX_TABLE; i++)
- table[i].next_rate_index =
- iwl3945_rates[IWL_FIRST_OFDM_RATE].table_rs_index;
-
- /* Don't fall back to CCK rates */
- table[IWL_RATE_12M_INDEX_TABLE].next_rate_index =
- IWL_RATE_9M_INDEX_TABLE;
-
- /* Don't drop out of OFDM rates */
- table[IWL_RATE_6M_INDEX_TABLE].next_rate_index =
- iwl3945_rates[IWL_FIRST_OFDM_RATE].table_rs_index;
- break;
-
- case IEEE80211_BAND_2GHZ:
- IWL_DEBUG_RATE(priv, "Select B/G mode rate scale\n");
- /* If an OFDM rate is used, have it fall back to the
- * 1M CCK rates */
-
- if (!(priv->_3945.sta_supp_rates & IWL_OFDM_RATES_MASK) &&
- iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) {
-
- index = IWL_FIRST_CCK_RATE;
- for (i = IWL_RATE_6M_INDEX_TABLE;
- i <= IWL_RATE_54M_INDEX_TABLE; i++)
- table[i].next_rate_index =
- iwl3945_rates[index].table_rs_index;
-
- index = IWL_RATE_11M_INDEX_TABLE;
- /* CCK shouldn't fall back to OFDM... */
- table[index].next_rate_index = IWL_RATE_5M_INDEX_TABLE;
- }
- break;
-
- default:
- WARN_ON(1);
- break;
- }
-
- /* Update the rate scaling for control frame Tx */
- rate_cmd.table_id = 0;
- rc = iwl_legacy_send_cmd_pdu(priv, REPLY_RATE_SCALE, sizeof(rate_cmd),
- &rate_cmd);
- if (rc)
- return rc;
-
- /* Update the rate scaling for data frame Tx */
- rate_cmd.table_id = 1;
- return iwl_legacy_send_cmd_pdu(priv, REPLY_RATE_SCALE, sizeof(rate_cmd),
- &rate_cmd);
-}
-
-/* Called when initializing driver */
-int iwl3945_hw_set_hw_params(struct iwl_priv *priv)
-{
- memset((void *)&priv->hw_params, 0,
- sizeof(struct iwl_hw_params));
-
- priv->_3945.shared_virt =
- dma_alloc_coherent(&priv->pci_dev->dev,
- sizeof(struct iwl3945_shared),
- &priv->_3945.shared_phys, GFP_KERNEL);
- if (!priv->_3945.shared_virt) {
- IWL_ERR(priv, "failed to allocate pci memory\n");
- return -ENOMEM;
- }
-
- /* Assign number of Usable TX queues */
- priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues;
-
- priv->hw_params.tfd_size = sizeof(struct iwl3945_tfd);
- priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_3K);
- priv->hw_params.max_rxq_size = RX_QUEUE_SIZE;
- priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
- priv->hw_params.max_stations = IWL3945_STATION_COUNT;
- priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWL3945_BROADCAST_ID;
-
- priv->sta_key_max_num = STA_KEY_MAX_NUM;
-
- priv->hw_params.rx_wrt_ptr_reg = FH39_RSCSR_CHNL0_WPTR;
- priv->hw_params.max_beacon_itrvl = IWL39_MAX_UCODE_BEACON_INTERVAL;
- priv->hw_params.beacon_time_tsf_bits = IWL3945_EXT_BEACON_TIME_POS;
-
- return 0;
-}
-
-unsigned int iwl3945_hw_get_beacon_cmd(struct iwl_priv *priv,
- struct iwl3945_frame *frame, u8 rate)
-{
- struct iwl3945_tx_beacon_cmd *tx_beacon_cmd;
- unsigned int frame_size;
-
- tx_beacon_cmd = (struct iwl3945_tx_beacon_cmd *)&frame->u;
- memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
-
- tx_beacon_cmd->tx.sta_id =
- priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id;
- tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
-
- frame_size = iwl3945_fill_beacon_frame(priv,
- tx_beacon_cmd->frame,
- sizeof(frame->u) - sizeof(*tx_beacon_cmd));
-
- BUG_ON(frame_size > MAX_MPDU_SIZE);
- tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size);
-
- tx_beacon_cmd->tx.rate = rate;
- tx_beacon_cmd->tx.tx_flags = (TX_CMD_FLG_SEQ_CTL_MSK |
- TX_CMD_FLG_TSF_MSK);
-
- /* supp_rates[0] == OFDM start at IWL_FIRST_OFDM_RATE*/
- tx_beacon_cmd->tx.supp_rates[0] =
- (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
-
- tx_beacon_cmd->tx.supp_rates[1] =
- (IWL_CCK_BASIC_RATES_MASK & 0xF);
-
- return sizeof(struct iwl3945_tx_beacon_cmd) + frame_size;
-}
-
-void iwl3945_hw_rx_handler_setup(struct iwl_priv *priv)
-{
- priv->rx_handlers[REPLY_TX] = iwl3945_rx_reply_tx;
- priv->rx_handlers[REPLY_3945_RX] = iwl3945_rx_reply_rx;
-}
-
-void iwl3945_hw_setup_deferred_work(struct iwl_priv *priv)
-{
- INIT_DELAYED_WORK(&priv->_3945.thermal_periodic,
- iwl3945_bg_reg_txpower_periodic);
-}
-
-void iwl3945_hw_cancel_deferred_work(struct iwl_priv *priv)
-{
- cancel_delayed_work(&priv->_3945.thermal_periodic);
-}
-
-/* check contents of special bootstrap uCode SRAM */
-static int iwl3945_verify_bsm(struct iwl_priv *priv)
- {
- __le32 *image = priv->ucode_boot.v_addr;
- u32 len = priv->ucode_boot.len;
- u32 reg;
- u32 val;
-
- IWL_DEBUG_INFO(priv, "Begin verify bsm\n");
-
- /* verify BSM SRAM contents */
- val = iwl_legacy_read_prph(priv, BSM_WR_DWCOUNT_REG);
- for (reg = BSM_SRAM_LOWER_BOUND;
- reg < BSM_SRAM_LOWER_BOUND + len;
- reg += sizeof(u32), image++) {
- val = iwl_legacy_read_prph(priv, reg);
- if (val != le32_to_cpu(*image)) {
- IWL_ERR(priv, "BSM uCode verification failed at "
- "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n",
- BSM_SRAM_LOWER_BOUND,
- reg - BSM_SRAM_LOWER_BOUND, len,
- val, le32_to_cpu(*image));
- return -EIO;
- }
- }
-
- IWL_DEBUG_INFO(priv, "BSM bootstrap uCode image OK\n");
-
- return 0;
-}
-
-
-/******************************************************************************
- *
- * EEPROM related functions
- *
- ******************************************************************************/
-
-/*
- * Clear the OWNER_MSK, to establish driver (instead of uCode running on
- * embedded controller) as EEPROM reader; each read is a series of pulses
- * to/from the EEPROM chip, not a single event, so even reads could conflict
- * if they weren't arbitrated by some ownership mechanism. Here, the driver
- * simply claims ownership, which should be safe when this function is called
- * (i.e. before loading uCode!).
- */
-static int iwl3945_eeprom_acquire_semaphore(struct iwl_priv *priv)
-{
- _iwl_legacy_clear_bit(priv, CSR_EEPROM_GP, CSR_EEPROM_GP_IF_OWNER_MSK);
- return 0;
-}
-
-
-static void iwl3945_eeprom_release_semaphore(struct iwl_priv *priv)
-{
- return;
-}
-
- /**
- * iwl3945_load_bsm - Load bootstrap instructions
- *
- * BSM operation:
- *
- * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program
- * in special SRAM that does not power down during RFKILL. When powering back
- * up after power-saving sleeps (or during initial uCode load), the BSM loads
- * the bootstrap program into the on-board processor, and starts it.
- *
- * The bootstrap program loads (via DMA) instructions and data for a new
- * program from host DRAM locations indicated by the host driver in the
- * BSM_DRAM_* registers. Once the new program is loaded, it starts
- * automatically.
- *
- * When initializing the NIC, the host driver points the BSM to the
- * "initialize" uCode image. This uCode sets up some internal data, then
- * notifies host via "initialize alive" that it is complete.
- *
- * The host then replaces the BSM_DRAM_* pointer values to point to the
- * normal runtime uCode instructions and a backup uCode data cache buffer
- * (filled initially with starting data values for the on-board processor),
- * then triggers the "initialize" uCode to load and launch the runtime uCode,
- * which begins normal operation.
- *
- * When doing a power-save shutdown, runtime uCode saves data SRAM into
- * the backup data cache in DRAM before SRAM is powered down.
- *
- * When powering back up, the BSM loads the bootstrap program. This reloads
- * the runtime uCode instructions and the backup data cache into SRAM,
- * and re-launches the runtime uCode from where it left off.
- */
-static int iwl3945_load_bsm(struct iwl_priv *priv)
-{
- __le32 *image = priv->ucode_boot.v_addr;
- u32 len = priv->ucode_boot.len;
- dma_addr_t pinst;
- dma_addr_t pdata;
- u32 inst_len;
- u32 data_len;
- int rc;
- int i;
- u32 done;
- u32 reg_offset;
-
- IWL_DEBUG_INFO(priv, "Begin load bsm\n");
-
- /* make sure bootstrap program is no larger than BSM's SRAM size */
- if (len > IWL39_MAX_BSM_SIZE)
- return -EINVAL;
-
- /* Tell bootstrap uCode where to find the "Initialize" uCode
- * in host DRAM ... host DRAM physical address bits 31:0 for 3945.
- * NOTE: iwl3945_initialize_alive_start() will replace these values,
- * after the "initialize" uCode has run, to point to
- * runtime/protocol instructions and backup data cache. */
- pinst = priv->ucode_init.p_addr;
- pdata = priv->ucode_init_data.p_addr;
- inst_len = priv->ucode_init.len;
- data_len = priv->ucode_init_data.len;
-
- iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
- iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
- iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
- iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
-
- /* Fill BSM memory with bootstrap instructions */
- for (reg_offset = BSM_SRAM_LOWER_BOUND;
- reg_offset < BSM_SRAM_LOWER_BOUND + len;
- reg_offset += sizeof(u32), image++)
- _iwl_legacy_write_prph(priv, reg_offset,
- le32_to_cpu(*image));
-
- rc = iwl3945_verify_bsm(priv);
- if (rc)
- return rc;
-
- /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
- iwl_legacy_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0);
- iwl_legacy_write_prph(priv, BSM_WR_MEM_DST_REG,
- IWL39_RTC_INST_LOWER_BOUND);
- iwl_legacy_write_prph(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
-
- /* Load bootstrap code into instruction SRAM now,
- * to prepare to load "initialize" uCode */
- iwl_legacy_write_prph(priv, BSM_WR_CTRL_REG,
- BSM_WR_CTRL_REG_BIT_START);
-
- /* Wait for load of bootstrap uCode to finish */
- for (i = 0; i < 100; i++) {
- done = iwl_legacy_read_prph(priv, BSM_WR_CTRL_REG);
- if (!(done & BSM_WR_CTRL_REG_BIT_START))
- break;
- udelay(10);
- }
- if (i < 100)
- IWL_DEBUG_INFO(priv, "BSM write complete, poll %d iterations\n", i);
- else {
- IWL_ERR(priv, "BSM write did not complete!\n");
- return -EIO;
- }
-
- /* Enable future boot loads whenever power management unit triggers it
- * (e.g. when powering back up after power-save shutdown) */
- iwl_legacy_write_prph(priv, BSM_WR_CTRL_REG,
- BSM_WR_CTRL_REG_BIT_START_EN);
-
- return 0;
-}
-
-static struct iwl_hcmd_ops iwl3945_hcmd = {
- .rxon_assoc = iwl3945_send_rxon_assoc,
- .commit_rxon = iwl3945_commit_rxon,
-};
-
-static struct iwl_lib_ops iwl3945_lib = {
- .txq_attach_buf_to_tfd = iwl3945_hw_txq_attach_buf_to_tfd,
- .txq_free_tfd = iwl3945_hw_txq_free_tfd,
- .txq_init = iwl3945_hw_tx_queue_init,
- .load_ucode = iwl3945_load_bsm,
- .dump_nic_error_log = iwl3945_dump_nic_error_log,
- .apm_ops = {
- .init = iwl3945_apm_init,
- .config = iwl3945_nic_config,
- },
- .eeprom_ops = {
- .regulatory_bands = {
- EEPROM_REGULATORY_BAND_1_CHANNELS,
- EEPROM_REGULATORY_BAND_2_CHANNELS,
- EEPROM_REGULATORY_BAND_3_CHANNELS,
- EEPROM_REGULATORY_BAND_4_CHANNELS,
- EEPROM_REGULATORY_BAND_5_CHANNELS,
- EEPROM_REGULATORY_BAND_NO_HT40,
- EEPROM_REGULATORY_BAND_NO_HT40,
- },
- .acquire_semaphore = iwl3945_eeprom_acquire_semaphore,
- .release_semaphore = iwl3945_eeprom_release_semaphore,
- },
- .send_tx_power = iwl3945_send_tx_power,
- .is_valid_rtc_data_addr = iwl3945_hw_valid_rtc_data_addr,
-
- .debugfs_ops = {
- .rx_stats_read = iwl3945_ucode_rx_stats_read,
- .tx_stats_read = iwl3945_ucode_tx_stats_read,
- .general_stats_read = iwl3945_ucode_general_stats_read,
- },
-};
-
-static const struct iwl_legacy_ops iwl3945_legacy_ops = {
- .post_associate = iwl3945_post_associate,
- .config_ap = iwl3945_config_ap,
- .manage_ibss_station = iwl3945_manage_ibss_station,
-};
-
-static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = {
- .get_hcmd_size = iwl3945_get_hcmd_size,
- .build_addsta_hcmd = iwl3945_build_addsta_hcmd,
- .request_scan = iwl3945_request_scan,
- .post_scan = iwl3945_post_scan,
-};
-
-static const struct iwl_ops iwl3945_ops = {
- .lib = &iwl3945_lib,
- .hcmd = &iwl3945_hcmd,
- .utils = &iwl3945_hcmd_utils,
- .led = &iwl3945_led_ops,
- .legacy = &iwl3945_legacy_ops,
- .ieee80211_ops = &iwl3945_hw_ops,
-};
-
-static struct iwl_base_params iwl3945_base_params = {
- .eeprom_size = IWL3945_EEPROM_IMG_SIZE,
- .num_of_queues = IWL39_NUM_QUEUES,
- .pll_cfg_val = CSR39_ANA_PLL_CFG_VAL,
- .set_l0s = false,
- .use_bsm = true,
- .led_compensation = 64,
- .wd_timeout = IWL_DEF_WD_TIMEOUT,
-};
-
-static struct iwl_cfg iwl3945_bg_cfg = {
- .name = "3945BG",
- .fw_name_pre = IWL3945_FW_PRE,
- .ucode_api_max = IWL3945_UCODE_API_MAX,
- .ucode_api_min = IWL3945_UCODE_API_MIN,
- .sku = IWL_SKU_G,
- .eeprom_ver = EEPROM_3945_EEPROM_VERSION,
- .ops = &iwl3945_ops,
- .mod_params = &iwl3945_mod_params,
- .base_params = &iwl3945_base_params,
- .led_mode = IWL_LED_BLINK,
-};
-
-static struct iwl_cfg iwl3945_abg_cfg = {
- .name = "3945ABG",
- .fw_name_pre = IWL3945_FW_PRE,
- .ucode_api_max = IWL3945_UCODE_API_MAX,
- .ucode_api_min = IWL3945_UCODE_API_MIN,
- .sku = IWL_SKU_A|IWL_SKU_G,
- .eeprom_ver = EEPROM_3945_EEPROM_VERSION,
- .ops = &iwl3945_ops,
- .mod_params = &iwl3945_mod_params,
- .base_params = &iwl3945_base_params,
- .led_mode = IWL_LED_BLINK,
-};
-
-DEFINE_PCI_DEVICE_TABLE(iwl3945_hw_card_ids) = {
- {IWL_PCI_DEVICE(0x4222, 0x1005, iwl3945_bg_cfg)},
- {IWL_PCI_DEVICE(0x4222, 0x1034, iwl3945_bg_cfg)},
- {IWL_PCI_DEVICE(0x4222, 0x1044, iwl3945_bg_cfg)},
- {IWL_PCI_DEVICE(0x4227, 0x1014, iwl3945_bg_cfg)},
- {IWL_PCI_DEVICE(0x4222, PCI_ANY_ID, iwl3945_abg_cfg)},
- {IWL_PCI_DEVICE(0x4227, PCI_ANY_ID, iwl3945_abg_cfg)},
- {0}
-};
-
-MODULE_DEVICE_TABLE(pci, iwl3945_hw_card_ids);
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945.h b/drivers/net/wireless/iwlegacy/iwl-3945.h
deleted file mode 100644
index b118b59b71d..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-3945.h
+++ /dev/null
@@ -1,308 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-/*
- * Please use this file (iwl-3945.h) for driver implementation definitions.
- * Please use iwl-3945-commands.h for uCode API definitions.
- * Please use iwl-3945-hw.h for hardware-related definitions.
- */
-
-#ifndef __iwl_3945_h__
-#define __iwl_3945_h__
-
-#include <linux/pci.h> /* for struct pci_device_id */
-#include <linux/kernel.h>
-#include <net/ieee80211_radiotap.h>
-
-/* Hardware specific file defines the PCI IDs table for that hardware module */
-extern const struct pci_device_id iwl3945_hw_card_ids[];
-
-#include "iwl-csr.h"
-#include "iwl-prph.h"
-#include "iwl-fh.h"
-#include "iwl-3945-hw.h"
-#include "iwl-debug.h"
-#include "iwl-power.h"
-#include "iwl-dev.h"
-#include "iwl-led.h"
-
-/* Highest firmware API version supported */
-#define IWL3945_UCODE_API_MAX 2
-
-/* Lowest firmware API version supported */
-#define IWL3945_UCODE_API_MIN 1
-
-#define IWL3945_FW_PRE "iwlwifi-3945-"
-#define _IWL3945_MODULE_FIRMWARE(api) IWL3945_FW_PRE #api ".ucode"
-#define IWL3945_MODULE_FIRMWARE(api) _IWL3945_MODULE_FIRMWARE(api)
-
-/* Default noise level to report when noise measurement is not available.
- * This may be because we're:
- * 1) Not associated (4965, no beacon statistics being sent to driver)
- * 2) Scanning (noise measurement does not apply to associated channel)
- * 3) Receiving CCK (3945 delivers noise info only for OFDM frames)
- * Use default noise value of -127 ... this is below the range of measurable
- * Rx dBm for either 3945 or 4965, so it can indicate "unmeasurable" to user.
- * Also, -127 works better than 0 when averaging frames with/without
- * noise info (e.g. averaging might be done in app); measured dBm values are
- * always negative ... using a negative value as the default keeps all
- * averages within an s8's (used in some apps) range of negative values. */
-#define IWL_NOISE_MEAS_NOT_AVAILABLE (-127)
-
-/* Module parameters accessible from iwl-*.c */
-extern struct iwl_mod_params iwl3945_mod_params;
-
-struct iwl3945_rate_scale_data {
- u64 data;
- s32 success_counter;
- s32 success_ratio;
- s32 counter;
- s32 average_tpt;
- unsigned long stamp;
-};
-
-struct iwl3945_rs_sta {
- spinlock_t lock;
- struct iwl_priv *priv;
- s32 *expected_tpt;
- unsigned long last_partial_flush;
- unsigned long last_flush;
- u32 flush_time;
- u32 last_tx_packets;
- u32 tx_packets;
- u8 tgg;
- u8 flush_pending;
- u8 start_rate;
- struct timer_list rate_scale_flush;
- struct iwl3945_rate_scale_data win[IWL_RATE_COUNT_3945];
-#ifdef CONFIG_MAC80211_DEBUGFS
- struct dentry *rs_sta_dbgfs_stats_table_file;
-#endif
-
- /* used to be in sta_info */
- int last_txrate_idx;
-};
-
-
-/*
- * The common struct MUST be first because it is shared between
- * 3945 and 4965!
- */
-struct iwl3945_sta_priv {
- struct iwl_station_priv_common common;
- struct iwl3945_rs_sta rs_sta;
-};
-
-enum iwl3945_antenna {
- IWL_ANTENNA_DIVERSITY,
- IWL_ANTENNA_MAIN,
- IWL_ANTENNA_AUX
-};
-
-/*
- * RTS threshold here is total size [2347] minus 4 FCS bytes
- * Per spec:
- * a value of 0 means RTS on all data/management packets
- * a value > max MSDU size means no RTS
- * else RTS for data/management frames where MPDU is larger
- * than RTS value.
- */
-#define DEFAULT_RTS_THRESHOLD 2347U
-#define MIN_RTS_THRESHOLD 0U
-#define MAX_RTS_THRESHOLD 2347U
-#define MAX_MSDU_SIZE 2304U
-#define MAX_MPDU_SIZE 2346U
-#define DEFAULT_BEACON_INTERVAL 100U
-#define DEFAULT_SHORT_RETRY_LIMIT 7U
-#define DEFAULT_LONG_RETRY_LIMIT 4U
-
-#define IWL_TX_FIFO_AC0 0
-#define IWL_TX_FIFO_AC1 1
-#define IWL_TX_FIFO_AC2 2
-#define IWL_TX_FIFO_AC3 3
-#define IWL_TX_FIFO_HCCA_1 5
-#define IWL_TX_FIFO_HCCA_2 6
-#define IWL_TX_FIFO_NONE 7
-
-#define IEEE80211_DATA_LEN 2304
-#define IEEE80211_4ADDR_LEN 30
-#define IEEE80211_HLEN (IEEE80211_4ADDR_LEN)
-#define IEEE80211_FRAME_LEN (IEEE80211_DATA_LEN + IEEE80211_HLEN)
-
-struct iwl3945_frame {
- union {
- struct ieee80211_hdr frame;
- struct iwl3945_tx_beacon_cmd beacon;
- u8 raw[IEEE80211_FRAME_LEN];
- u8 cmd[360];
- } u;
- struct list_head list;
-};
-
-#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
-#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
-#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
-
-#define SUP_RATE_11A_MAX_NUM_CHANNELS 8
-#define SUP_RATE_11B_MAX_NUM_CHANNELS 4
-#define SUP_RATE_11G_MAX_NUM_CHANNELS 12
-
-#define IWL_SUPPORTED_RATES_IE_LEN 8
-
-#define SCAN_INTERVAL 100
-
-#define MAX_TID_COUNT 9
-
-#define IWL_INVALID_RATE 0xFF
-#define IWL_INVALID_VALUE -1
-
-#define STA_PS_STATUS_WAKE 0
-#define STA_PS_STATUS_SLEEP 1
-
-struct iwl3945_ibss_seq {
- u8 mac[ETH_ALEN];
- u16 seq_num;
- u16 frag_num;
- unsigned long packet_time;
- struct list_head list;
-};
-
-#define IWL_RX_HDR(x) ((struct iwl3945_rx_frame_hdr *)(\
- x->u.rx_frame.stats.payload + \
- x->u.rx_frame.stats.phy_count))
-#define IWL_RX_END(x) ((struct iwl3945_rx_frame_end *)(\
- IWL_RX_HDR(x)->payload + \
- le16_to_cpu(IWL_RX_HDR(x)->len)))
-#define IWL_RX_STATS(x) (&x->u.rx_frame.stats)
-#define IWL_RX_DATA(x) (IWL_RX_HDR(x)->payload)
-
-
-/******************************************************************************
- *
- * Functions implemented in iwl3945-base.c which are forward declared here
- * for use by iwl-*.c
- *
- *****************************************************************************/
-extern int iwl3945_calc_db_from_ratio(int sig_ratio);
-extern void iwl3945_rx_replenish(void *data);
-extern void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
-extern unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv,
- struct ieee80211_hdr *hdr, int left);
-extern int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
- char **buf, bool display);
-extern void iwl3945_dump_nic_error_log(struct iwl_priv *priv);
-
-/******************************************************************************
- *
- * Functions implemented in iwl-[34]*.c which are forward declared here
- * for use by iwl3945-base.c
- *
- * NOTE: The implementation of these functions are hardware specific
- * which is why they are in the hardware specific files (vs. iwl-base.c)
- *
- * Naming convention --
- * iwl3945_ <-- Its part of iwlwifi (should be changed to iwl3945_)
- * iwl3945_hw_ <-- Hardware specific (implemented in iwl-XXXX.c by all HW)
- * iwlXXXX_ <-- Hardware specific (implemented in iwl-XXXX.c for XXXX)
- * iwl3945_bg_ <-- Called from work queue context
- * iwl3945_mac_ <-- mac80211 callback
- *
- ****************************************************************************/
-extern void iwl3945_hw_rx_handler_setup(struct iwl_priv *priv);
-extern void iwl3945_hw_setup_deferred_work(struct iwl_priv *priv);
-extern void iwl3945_hw_cancel_deferred_work(struct iwl_priv *priv);
-extern int iwl3945_hw_rxq_stop(struct iwl_priv *priv);
-extern int iwl3945_hw_set_hw_params(struct iwl_priv *priv);
-extern int iwl3945_hw_nic_init(struct iwl_priv *priv);
-extern int iwl3945_hw_nic_stop_master(struct iwl_priv *priv);
-extern void iwl3945_hw_txq_ctx_free(struct iwl_priv *priv);
-extern void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv);
-extern int iwl3945_hw_nic_reset(struct iwl_priv *priv);
-extern int iwl3945_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
- struct iwl_tx_queue *txq,
- dma_addr_t addr, u16 len,
- u8 reset, u8 pad);
-extern void iwl3945_hw_txq_free_tfd(struct iwl_priv *priv,
- struct iwl_tx_queue *txq);
-extern int iwl3945_hw_get_temperature(struct iwl_priv *priv);
-extern int iwl3945_hw_tx_queue_init(struct iwl_priv *priv,
- struct iwl_tx_queue *txq);
-extern unsigned int iwl3945_hw_get_beacon_cmd(struct iwl_priv *priv,
- struct iwl3945_frame *frame, u8 rate);
-void iwl3945_hw_build_tx_cmd_rate(struct iwl_priv *priv,
- struct iwl_device_cmd *cmd,
- struct ieee80211_tx_info *info,
- struct ieee80211_hdr *hdr,
- int sta_id, int tx_id);
-extern int iwl3945_hw_reg_send_txpower(struct iwl_priv *priv);
-extern int iwl3945_hw_reg_set_txpower(struct iwl_priv *priv, s8 power);
-extern void iwl3945_hw_rx_statistics(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb);
-void iwl3945_reply_statistics(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb);
-extern void iwl3945_disable_events(struct iwl_priv *priv);
-extern int iwl4965_get_temperature(const struct iwl_priv *priv);
-extern void iwl3945_post_associate(struct iwl_priv *priv);
-extern void iwl3945_config_ap(struct iwl_priv *priv);
-
-extern int iwl3945_commit_rxon(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx);
-
-/**
- * iwl3945_hw_find_station - Find station id for a given BSSID
- * @bssid: MAC address of station ID to find
- *
- * NOTE: This should not be hardware specific but the code has
- * not yet been merged into a single common layer for managing the
- * station tables.
- */
-extern u8 iwl3945_hw_find_station(struct iwl_priv *priv, const u8 *bssid);
-
-extern struct ieee80211_ops iwl3945_hw_ops;
-
-/*
- * Forward declare iwl-3945.c functions for iwl3945-base.c
- */
-extern __le32 iwl3945_get_antenna_flags(const struct iwl_priv *priv);
-extern int iwl3945_init_hw_rate_table(struct iwl_priv *priv);
-extern void iwl3945_reg_txpower_periodic(struct iwl_priv *priv);
-extern int iwl3945_txpower_set_from_eeprom(struct iwl_priv *priv);
-
-extern const struct iwl_channel_info *iwl3945_get_channel_info(
- const struct iwl_priv *priv, enum ieee80211_band band, u16 channel);
-
-extern int iwl3945_rs_next_rate(struct iwl_priv *priv, int rate);
-
-/* scanning */
-int iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif);
-void iwl3945_post_scan(struct iwl_priv *priv);
-
-/* rates */
-extern const struct iwl3945_rate_info iwl3945_rates[IWL_RATE_COUNT_3945];
-
-/* Requires full declaration of iwl_priv before including */
-#include "iwl-io.h"
-
-#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-calib.h b/drivers/net/wireless/iwlegacy/iwl-4965-calib.h
deleted file mode 100644
index f46c80e6e00..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-calib.h
+++ /dev/null
@@ -1,75 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *****************************************************************************/
-#ifndef __iwl_4965_calib_h__
-#define __iwl_4965_calib_h__
-
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-commands.h"
-
-void iwl4965_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp);
-void iwl4965_sensitivity_calibration(struct iwl_priv *priv, void *resp);
-void iwl4965_init_sensitivity(struct iwl_priv *priv);
-void iwl4965_reset_run_time_calib(struct iwl_priv *priv);
-void iwl4965_calib_free_results(struct iwl_priv *priv);
-
-#endif /* __iwl_4965_calib_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.c b/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.c
deleted file mode 100644
index 1c93665766e..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.c
+++ /dev/null
@@ -1,774 +0,0 @@
-/******************************************************************************
-*
-* GPL LICENSE SUMMARY
-*
-* Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
-*
-* This program is free software; you can redistribute it and/or modify
-* it under the terms of version 2 of the GNU General Public License as
-* published by the Free Software Foundation.
-*
-* This program is distributed in the hope that it will be useful, but
-* WITHOUT ANY WARRANTY; without even the implied warranty of
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-* General Public License for more details.
-*
-* You should have received a copy of the GNU General Public License
-* along with this program; if not, write to the Free Software
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
-* USA
-*
-* The full GNU General Public License is included in this distribution
-* in the file called LICENSE.GPL.
-*
-* Contact Information:
-* Intel Linux Wireless <ilw@linux.intel.com>
-* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-*****************************************************************************/
-#include "iwl-4965.h"
-#include "iwl-4965-debugfs.h"
-
-static const char *fmt_value = " %-30s %10u\n";
-static const char *fmt_table = " %-30s %10u %10u %10u %10u\n";
-static const char *fmt_header =
- "%-32s current cumulative delta max\n";
-
-static int iwl4965_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
-{
- int p = 0;
- u32 flag;
-
- flag = le32_to_cpu(priv->_4965.statistics.flag);
-
- p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n", flag);
- if (flag & UCODE_STATISTICS_CLEAR_MSK)
- p += scnprintf(buf + p, bufsz - p,
- "\tStatistics have been cleared\n");
- p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n",
- (flag & UCODE_STATISTICS_FREQUENCY_MSK)
- ? "2.4 GHz" : "5.2 GHz");
- p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n",
- (flag & UCODE_STATISTICS_NARROW_BAND_MSK)
- ? "enabled" : "disabled");
-
- return p;
-}
-
-ssize_t iwl4965_ucode_rx_stats_read(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- int pos = 0;
- char *buf;
- int bufsz = sizeof(struct statistics_rx_phy) * 40 +
- sizeof(struct statistics_rx_non_phy) * 40 +
- sizeof(struct statistics_rx_ht_phy) * 40 + 400;
- ssize_t ret;
- struct statistics_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm;
- struct statistics_rx_phy *cck, *accum_cck, *delta_cck, *max_cck;
- struct statistics_rx_non_phy *general, *accum_general;
- struct statistics_rx_non_phy *delta_general, *max_general;
- struct statistics_rx_ht_phy *ht, *accum_ht, *delta_ht, *max_ht;
-
- if (!iwl_legacy_is_alive(priv))
- return -EAGAIN;
-
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf) {
- IWL_ERR(priv, "Can not allocate Buffer\n");
- return -ENOMEM;
- }
-
- /*
- * the statistic information display here is based on
- * the last statistics notification from uCode
- * might not reflect the current uCode activity
- */
- ofdm = &priv->_4965.statistics.rx.ofdm;
- cck = &priv->_4965.statistics.rx.cck;
- general = &priv->_4965.statistics.rx.general;
- ht = &priv->_4965.statistics.rx.ofdm_ht;
- accum_ofdm = &priv->_4965.accum_statistics.rx.ofdm;
- accum_cck = &priv->_4965.accum_statistics.rx.cck;
- accum_general = &priv->_4965.accum_statistics.rx.general;
- accum_ht = &priv->_4965.accum_statistics.rx.ofdm_ht;
- delta_ofdm = &priv->_4965.delta_statistics.rx.ofdm;
- delta_cck = &priv->_4965.delta_statistics.rx.cck;
- delta_general = &priv->_4965.delta_statistics.rx.general;
- delta_ht = &priv->_4965.delta_statistics.rx.ofdm_ht;
- max_ofdm = &priv->_4965.max_delta.rx.ofdm;
- max_cck = &priv->_4965.max_delta.rx.cck;
- max_general = &priv->_4965.max_delta.rx.general;
- max_ht = &priv->_4965.max_delta.rx.ofdm_ht;
-
- pos += iwl4965_statistics_flag(priv, buf, bufsz);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_header, "Statistics_Rx - OFDM:");
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "ina_cnt:",
- le32_to_cpu(ofdm->ina_cnt),
- accum_ofdm->ina_cnt,
- delta_ofdm->ina_cnt, max_ofdm->ina_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "fina_cnt:",
- le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt,
- delta_ofdm->fina_cnt, max_ofdm->fina_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "plcp_err:",
- le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err,
- delta_ofdm->plcp_err, max_ofdm->plcp_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "crc32_err:",
- le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err,
- delta_ofdm->crc32_err, max_ofdm->crc32_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "overrun_err:",
- le32_to_cpu(ofdm->overrun_err),
- accum_ofdm->overrun_err, delta_ofdm->overrun_err,
- max_ofdm->overrun_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "early_overrun_err:",
- le32_to_cpu(ofdm->early_overrun_err),
- accum_ofdm->early_overrun_err,
- delta_ofdm->early_overrun_err,
- max_ofdm->early_overrun_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "crc32_good:",
- le32_to_cpu(ofdm->crc32_good),
- accum_ofdm->crc32_good, delta_ofdm->crc32_good,
- max_ofdm->crc32_good);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "false_alarm_cnt:",
- le32_to_cpu(ofdm->false_alarm_cnt),
- accum_ofdm->false_alarm_cnt,
- delta_ofdm->false_alarm_cnt,
- max_ofdm->false_alarm_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "fina_sync_err_cnt:",
- le32_to_cpu(ofdm->fina_sync_err_cnt),
- accum_ofdm->fina_sync_err_cnt,
- delta_ofdm->fina_sync_err_cnt,
- max_ofdm->fina_sync_err_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "sfd_timeout:",
- le32_to_cpu(ofdm->sfd_timeout),
- accum_ofdm->sfd_timeout, delta_ofdm->sfd_timeout,
- max_ofdm->sfd_timeout);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "fina_timeout:",
- le32_to_cpu(ofdm->fina_timeout),
- accum_ofdm->fina_timeout, delta_ofdm->fina_timeout,
- max_ofdm->fina_timeout);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "unresponded_rts:",
- le32_to_cpu(ofdm->unresponded_rts),
- accum_ofdm->unresponded_rts,
- delta_ofdm->unresponded_rts,
- max_ofdm->unresponded_rts);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "rxe_frame_lmt_ovrun:",
- le32_to_cpu(ofdm->rxe_frame_limit_overrun),
- accum_ofdm->rxe_frame_limit_overrun,
- delta_ofdm->rxe_frame_limit_overrun,
- max_ofdm->rxe_frame_limit_overrun);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "sent_ack_cnt:",
- le32_to_cpu(ofdm->sent_ack_cnt),
- accum_ofdm->sent_ack_cnt, delta_ofdm->sent_ack_cnt,
- max_ofdm->sent_ack_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "sent_cts_cnt:",
- le32_to_cpu(ofdm->sent_cts_cnt),
- accum_ofdm->sent_cts_cnt, delta_ofdm->sent_cts_cnt,
- max_ofdm->sent_cts_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "sent_ba_rsp_cnt:",
- le32_to_cpu(ofdm->sent_ba_rsp_cnt),
- accum_ofdm->sent_ba_rsp_cnt,
- delta_ofdm->sent_ba_rsp_cnt,
- max_ofdm->sent_ba_rsp_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "dsp_self_kill:",
- le32_to_cpu(ofdm->dsp_self_kill),
- accum_ofdm->dsp_self_kill,
- delta_ofdm->dsp_self_kill,
- max_ofdm->dsp_self_kill);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "mh_format_err:",
- le32_to_cpu(ofdm->mh_format_err),
- accum_ofdm->mh_format_err,
- delta_ofdm->mh_format_err,
- max_ofdm->mh_format_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "re_acq_main_rssi_sum:",
- le32_to_cpu(ofdm->re_acq_main_rssi_sum),
- accum_ofdm->re_acq_main_rssi_sum,
- delta_ofdm->re_acq_main_rssi_sum,
- max_ofdm->re_acq_main_rssi_sum);
-
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_header, "Statistics_Rx - CCK:");
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "ina_cnt:",
- le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt,
- delta_cck->ina_cnt, max_cck->ina_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "fina_cnt:",
- le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt,
- delta_cck->fina_cnt, max_cck->fina_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "plcp_err:",
- le32_to_cpu(cck->plcp_err), accum_cck->plcp_err,
- delta_cck->plcp_err, max_cck->plcp_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "crc32_err:",
- le32_to_cpu(cck->crc32_err), accum_cck->crc32_err,
- delta_cck->crc32_err, max_cck->crc32_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "overrun_err:",
- le32_to_cpu(cck->overrun_err),
- accum_cck->overrun_err, delta_cck->overrun_err,
- max_cck->overrun_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "early_overrun_err:",
- le32_to_cpu(cck->early_overrun_err),
- accum_cck->early_overrun_err,
- delta_cck->early_overrun_err,
- max_cck->early_overrun_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "crc32_good:",
- le32_to_cpu(cck->crc32_good), accum_cck->crc32_good,
- delta_cck->crc32_good, max_cck->crc32_good);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "false_alarm_cnt:",
- le32_to_cpu(cck->false_alarm_cnt),
- accum_cck->false_alarm_cnt,
- delta_cck->false_alarm_cnt, max_cck->false_alarm_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "fina_sync_err_cnt:",
- le32_to_cpu(cck->fina_sync_err_cnt),
- accum_cck->fina_sync_err_cnt,
- delta_cck->fina_sync_err_cnt,
- max_cck->fina_sync_err_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "sfd_timeout:",
- le32_to_cpu(cck->sfd_timeout),
- accum_cck->sfd_timeout, delta_cck->sfd_timeout,
- max_cck->sfd_timeout);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "fina_timeout:",
- le32_to_cpu(cck->fina_timeout),
- accum_cck->fina_timeout, delta_cck->fina_timeout,
- max_cck->fina_timeout);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "unresponded_rts:",
- le32_to_cpu(cck->unresponded_rts),
- accum_cck->unresponded_rts, delta_cck->unresponded_rts,
- max_cck->unresponded_rts);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "rxe_frame_lmt_ovrun:",
- le32_to_cpu(cck->rxe_frame_limit_overrun),
- accum_cck->rxe_frame_limit_overrun,
- delta_cck->rxe_frame_limit_overrun,
- max_cck->rxe_frame_limit_overrun);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "sent_ack_cnt:",
- le32_to_cpu(cck->sent_ack_cnt),
- accum_cck->sent_ack_cnt, delta_cck->sent_ack_cnt,
- max_cck->sent_ack_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "sent_cts_cnt:",
- le32_to_cpu(cck->sent_cts_cnt),
- accum_cck->sent_cts_cnt, delta_cck->sent_cts_cnt,
- max_cck->sent_cts_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "sent_ba_rsp_cnt:",
- le32_to_cpu(cck->sent_ba_rsp_cnt),
- accum_cck->sent_ba_rsp_cnt,
- delta_cck->sent_ba_rsp_cnt,
- max_cck->sent_ba_rsp_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "dsp_self_kill:",
- le32_to_cpu(cck->dsp_self_kill),
- accum_cck->dsp_self_kill, delta_cck->dsp_self_kill,
- max_cck->dsp_self_kill);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "mh_format_err:",
- le32_to_cpu(cck->mh_format_err),
- accum_cck->mh_format_err, delta_cck->mh_format_err,
- max_cck->mh_format_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "re_acq_main_rssi_sum:",
- le32_to_cpu(cck->re_acq_main_rssi_sum),
- accum_cck->re_acq_main_rssi_sum,
- delta_cck->re_acq_main_rssi_sum,
- max_cck->re_acq_main_rssi_sum);
-
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_header, "Statistics_Rx - GENERAL:");
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "bogus_cts:",
- le32_to_cpu(general->bogus_cts),
- accum_general->bogus_cts, delta_general->bogus_cts,
- max_general->bogus_cts);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "bogus_ack:",
- le32_to_cpu(general->bogus_ack),
- accum_general->bogus_ack, delta_general->bogus_ack,
- max_general->bogus_ack);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "non_bssid_frames:",
- le32_to_cpu(general->non_bssid_frames),
- accum_general->non_bssid_frames,
- delta_general->non_bssid_frames,
- max_general->non_bssid_frames);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "filtered_frames:",
- le32_to_cpu(general->filtered_frames),
- accum_general->filtered_frames,
- delta_general->filtered_frames,
- max_general->filtered_frames);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "non_channel_beacons:",
- le32_to_cpu(general->non_channel_beacons),
- accum_general->non_channel_beacons,
- delta_general->non_channel_beacons,
- max_general->non_channel_beacons);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "channel_beacons:",
- le32_to_cpu(general->channel_beacons),
- accum_general->channel_beacons,
- delta_general->channel_beacons,
- max_general->channel_beacons);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "num_missed_bcon:",
- le32_to_cpu(general->num_missed_bcon),
- accum_general->num_missed_bcon,
- delta_general->num_missed_bcon,
- max_general->num_missed_bcon);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "adc_rx_saturation_time:",
- le32_to_cpu(general->adc_rx_saturation_time),
- accum_general->adc_rx_saturation_time,
- delta_general->adc_rx_saturation_time,
- max_general->adc_rx_saturation_time);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "ina_detect_search_tm:",
- le32_to_cpu(general->ina_detection_search_time),
- accum_general->ina_detection_search_time,
- delta_general->ina_detection_search_time,
- max_general->ina_detection_search_time);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "beacon_silence_rssi_a:",
- le32_to_cpu(general->beacon_silence_rssi_a),
- accum_general->beacon_silence_rssi_a,
- delta_general->beacon_silence_rssi_a,
- max_general->beacon_silence_rssi_a);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "beacon_silence_rssi_b:",
- le32_to_cpu(general->beacon_silence_rssi_b),
- accum_general->beacon_silence_rssi_b,
- delta_general->beacon_silence_rssi_b,
- max_general->beacon_silence_rssi_b);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "beacon_silence_rssi_c:",
- le32_to_cpu(general->beacon_silence_rssi_c),
- accum_general->beacon_silence_rssi_c,
- delta_general->beacon_silence_rssi_c,
- max_general->beacon_silence_rssi_c);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "interference_data_flag:",
- le32_to_cpu(general->interference_data_flag),
- accum_general->interference_data_flag,
- delta_general->interference_data_flag,
- max_general->interference_data_flag);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "channel_load:",
- le32_to_cpu(general->channel_load),
- accum_general->channel_load,
- delta_general->channel_load,
- max_general->channel_load);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "dsp_false_alarms:",
- le32_to_cpu(general->dsp_false_alarms),
- accum_general->dsp_false_alarms,
- delta_general->dsp_false_alarms,
- max_general->dsp_false_alarms);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "beacon_rssi_a:",
- le32_to_cpu(general->beacon_rssi_a),
- accum_general->beacon_rssi_a,
- delta_general->beacon_rssi_a,
- max_general->beacon_rssi_a);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "beacon_rssi_b:",
- le32_to_cpu(general->beacon_rssi_b),
- accum_general->beacon_rssi_b,
- delta_general->beacon_rssi_b,
- max_general->beacon_rssi_b);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "beacon_rssi_c:",
- le32_to_cpu(general->beacon_rssi_c),
- accum_general->beacon_rssi_c,
- delta_general->beacon_rssi_c,
- max_general->beacon_rssi_c);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "beacon_energy_a:",
- le32_to_cpu(general->beacon_energy_a),
- accum_general->beacon_energy_a,
- delta_general->beacon_energy_a,
- max_general->beacon_energy_a);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "beacon_energy_b:",
- le32_to_cpu(general->beacon_energy_b),
- accum_general->beacon_energy_b,
- delta_general->beacon_energy_b,
- max_general->beacon_energy_b);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "beacon_energy_c:",
- le32_to_cpu(general->beacon_energy_c),
- accum_general->beacon_energy_c,
- delta_general->beacon_energy_c,
- max_general->beacon_energy_c);
-
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_header, "Statistics_Rx - OFDM_HT:");
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "plcp_err:",
- le32_to_cpu(ht->plcp_err), accum_ht->plcp_err,
- delta_ht->plcp_err, max_ht->plcp_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "overrun_err:",
- le32_to_cpu(ht->overrun_err), accum_ht->overrun_err,
- delta_ht->overrun_err, max_ht->overrun_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "early_overrun_err:",
- le32_to_cpu(ht->early_overrun_err),
- accum_ht->early_overrun_err,
- delta_ht->early_overrun_err,
- max_ht->early_overrun_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "crc32_good:",
- le32_to_cpu(ht->crc32_good), accum_ht->crc32_good,
- delta_ht->crc32_good, max_ht->crc32_good);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "crc32_err:",
- le32_to_cpu(ht->crc32_err), accum_ht->crc32_err,
- delta_ht->crc32_err, max_ht->crc32_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "mh_format_err:",
- le32_to_cpu(ht->mh_format_err),
- accum_ht->mh_format_err,
- delta_ht->mh_format_err, max_ht->mh_format_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "agg_crc32_good:",
- le32_to_cpu(ht->agg_crc32_good),
- accum_ht->agg_crc32_good,
- delta_ht->agg_crc32_good, max_ht->agg_crc32_good);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "agg_mpdu_cnt:",
- le32_to_cpu(ht->agg_mpdu_cnt),
- accum_ht->agg_mpdu_cnt,
- delta_ht->agg_mpdu_cnt, max_ht->agg_mpdu_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "agg_cnt:",
- le32_to_cpu(ht->agg_cnt), accum_ht->agg_cnt,
- delta_ht->agg_cnt, max_ht->agg_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "unsupport_mcs:",
- le32_to_cpu(ht->unsupport_mcs),
- accum_ht->unsupport_mcs,
- delta_ht->unsupport_mcs, max_ht->unsupport_mcs);
-
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
-}
-
-ssize_t iwl4965_ucode_tx_stats_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- int pos = 0;
- char *buf;
- int bufsz = (sizeof(struct statistics_tx) * 48) + 250;
- ssize_t ret;
- struct statistics_tx *tx, *accum_tx, *delta_tx, *max_tx;
-
- if (!iwl_legacy_is_alive(priv))
- return -EAGAIN;
-
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf) {
- IWL_ERR(priv, "Can not allocate Buffer\n");
- return -ENOMEM;
- }
-
- /* the statistic information display here is based on
- * the last statistics notification from uCode
- * might not reflect the current uCode activity
- */
- tx = &priv->_4965.statistics.tx;
- accum_tx = &priv->_4965.accum_statistics.tx;
- delta_tx = &priv->_4965.delta_statistics.tx;
- max_tx = &priv->_4965.max_delta.tx;
-
- pos += iwl4965_statistics_flag(priv, buf, bufsz);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_header, "Statistics_Tx:");
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "preamble:",
- le32_to_cpu(tx->preamble_cnt),
- accum_tx->preamble_cnt,
- delta_tx->preamble_cnt, max_tx->preamble_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "rx_detected_cnt:",
- le32_to_cpu(tx->rx_detected_cnt),
- accum_tx->rx_detected_cnt,
- delta_tx->rx_detected_cnt, max_tx->rx_detected_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "bt_prio_defer_cnt:",
- le32_to_cpu(tx->bt_prio_defer_cnt),
- accum_tx->bt_prio_defer_cnt,
- delta_tx->bt_prio_defer_cnt,
- max_tx->bt_prio_defer_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "bt_prio_kill_cnt:",
- le32_to_cpu(tx->bt_prio_kill_cnt),
- accum_tx->bt_prio_kill_cnt,
- delta_tx->bt_prio_kill_cnt,
- max_tx->bt_prio_kill_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "few_bytes_cnt:",
- le32_to_cpu(tx->few_bytes_cnt),
- accum_tx->few_bytes_cnt,
- delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "cts_timeout:",
- le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout,
- delta_tx->cts_timeout, max_tx->cts_timeout);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "ack_timeout:",
- le32_to_cpu(tx->ack_timeout),
- accum_tx->ack_timeout,
- delta_tx->ack_timeout, max_tx->ack_timeout);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "expected_ack_cnt:",
- le32_to_cpu(tx->expected_ack_cnt),
- accum_tx->expected_ack_cnt,
- delta_tx->expected_ack_cnt,
- max_tx->expected_ack_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "actual_ack_cnt:",
- le32_to_cpu(tx->actual_ack_cnt),
- accum_tx->actual_ack_cnt,
- delta_tx->actual_ack_cnt,
- max_tx->actual_ack_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "dump_msdu_cnt:",
- le32_to_cpu(tx->dump_msdu_cnt),
- accum_tx->dump_msdu_cnt,
- delta_tx->dump_msdu_cnt,
- max_tx->dump_msdu_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "abort_nxt_frame_mismatch:",
- le32_to_cpu(tx->burst_abort_next_frame_mismatch_cnt),
- accum_tx->burst_abort_next_frame_mismatch_cnt,
- delta_tx->burst_abort_next_frame_mismatch_cnt,
- max_tx->burst_abort_next_frame_mismatch_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "abort_missing_nxt_frame:",
- le32_to_cpu(tx->burst_abort_missing_next_frame_cnt),
- accum_tx->burst_abort_missing_next_frame_cnt,
- delta_tx->burst_abort_missing_next_frame_cnt,
- max_tx->burst_abort_missing_next_frame_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "cts_timeout_collision:",
- le32_to_cpu(tx->cts_timeout_collision),
- accum_tx->cts_timeout_collision,
- delta_tx->cts_timeout_collision,
- max_tx->cts_timeout_collision);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "ack_ba_timeout_collision:",
- le32_to_cpu(tx->ack_or_ba_timeout_collision),
- accum_tx->ack_or_ba_timeout_collision,
- delta_tx->ack_or_ba_timeout_collision,
- max_tx->ack_or_ba_timeout_collision);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "agg ba_timeout:",
- le32_to_cpu(tx->agg.ba_timeout),
- accum_tx->agg.ba_timeout,
- delta_tx->agg.ba_timeout,
- max_tx->agg.ba_timeout);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "agg ba_resched_frames:",
- le32_to_cpu(tx->agg.ba_reschedule_frames),
- accum_tx->agg.ba_reschedule_frames,
- delta_tx->agg.ba_reschedule_frames,
- max_tx->agg.ba_reschedule_frames);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "agg scd_query_agg_frame:",
- le32_to_cpu(tx->agg.scd_query_agg_frame_cnt),
- accum_tx->agg.scd_query_agg_frame_cnt,
- delta_tx->agg.scd_query_agg_frame_cnt,
- max_tx->agg.scd_query_agg_frame_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "agg scd_query_no_agg:",
- le32_to_cpu(tx->agg.scd_query_no_agg),
- accum_tx->agg.scd_query_no_agg,
- delta_tx->agg.scd_query_no_agg,
- max_tx->agg.scd_query_no_agg);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "agg scd_query_agg:",
- le32_to_cpu(tx->agg.scd_query_agg),
- accum_tx->agg.scd_query_agg,
- delta_tx->agg.scd_query_agg,
- max_tx->agg.scd_query_agg);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "agg scd_query_mismatch:",
- le32_to_cpu(tx->agg.scd_query_mismatch),
- accum_tx->agg.scd_query_mismatch,
- delta_tx->agg.scd_query_mismatch,
- max_tx->agg.scd_query_mismatch);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "agg frame_not_ready:",
- le32_to_cpu(tx->agg.frame_not_ready),
- accum_tx->agg.frame_not_ready,
- delta_tx->agg.frame_not_ready,
- max_tx->agg.frame_not_ready);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "agg underrun:",
- le32_to_cpu(tx->agg.underrun),
- accum_tx->agg.underrun,
- delta_tx->agg.underrun, max_tx->agg.underrun);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "agg bt_prio_kill:",
- le32_to_cpu(tx->agg.bt_prio_kill),
- accum_tx->agg.bt_prio_kill,
- delta_tx->agg.bt_prio_kill,
- max_tx->agg.bt_prio_kill);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "agg rx_ba_rsp_cnt:",
- le32_to_cpu(tx->agg.rx_ba_rsp_cnt),
- accum_tx->agg.rx_ba_rsp_cnt,
- delta_tx->agg.rx_ba_rsp_cnt,
- max_tx->agg.rx_ba_rsp_cnt);
-
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
-}
-
-ssize_t
-iwl4965_ucode_general_stats_read(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- int pos = 0;
- char *buf;
- int bufsz = sizeof(struct statistics_general) * 10 + 300;
- ssize_t ret;
- struct statistics_general_common *general, *accum_general;
- struct statistics_general_common *delta_general, *max_general;
- struct statistics_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
- struct statistics_div *div, *accum_div, *delta_div, *max_div;
-
- if (!iwl_legacy_is_alive(priv))
- return -EAGAIN;
-
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf) {
- IWL_ERR(priv, "Can not allocate Buffer\n");
- return -ENOMEM;
- }
-
- /* the statistic information display here is based on
- * the last statistics notification from uCode
- * might not reflect the current uCode activity
- */
- general = &priv->_4965.statistics.general.common;
- dbg = &priv->_4965.statistics.general.common.dbg;
- div = &priv->_4965.statistics.general.common.div;
- accum_general = &priv->_4965.accum_statistics.general.common;
- accum_dbg = &priv->_4965.accum_statistics.general.common.dbg;
- accum_div = &priv->_4965.accum_statistics.general.common.div;
- delta_general = &priv->_4965.delta_statistics.general.common;
- max_general = &priv->_4965.max_delta.general.common;
- delta_dbg = &priv->_4965.delta_statistics.general.common.dbg;
- max_dbg = &priv->_4965.max_delta.general.common.dbg;
- delta_div = &priv->_4965.delta_statistics.general.common.div;
- max_div = &priv->_4965.max_delta.general.common.div;
-
- pos += iwl4965_statistics_flag(priv, buf, bufsz);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_header, "Statistics_General:");
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_value, "temperature:",
- le32_to_cpu(general->temperature));
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_value, "ttl_timestamp:",
- le32_to_cpu(general->ttl_timestamp));
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "burst_check:",
- le32_to_cpu(dbg->burst_check),
- accum_dbg->burst_check,
- delta_dbg->burst_check, max_dbg->burst_check);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "burst_count:",
- le32_to_cpu(dbg->burst_count),
- accum_dbg->burst_count,
- delta_dbg->burst_count, max_dbg->burst_count);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "wait_for_silence_timeout_count:",
- le32_to_cpu(dbg->wait_for_silence_timeout_cnt),
- accum_dbg->wait_for_silence_timeout_cnt,
- delta_dbg->wait_for_silence_timeout_cnt,
- max_dbg->wait_for_silence_timeout_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "sleep_time:",
- le32_to_cpu(general->sleep_time),
- accum_general->sleep_time,
- delta_general->sleep_time, max_general->sleep_time);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "slots_out:",
- le32_to_cpu(general->slots_out),
- accum_general->slots_out,
- delta_general->slots_out, max_general->slots_out);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "slots_idle:",
- le32_to_cpu(general->slots_idle),
- accum_general->slots_idle,
- delta_general->slots_idle, max_general->slots_idle);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "tx_on_a:",
- le32_to_cpu(div->tx_on_a), accum_div->tx_on_a,
- delta_div->tx_on_a, max_div->tx_on_a);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "tx_on_b:",
- le32_to_cpu(div->tx_on_b), accum_div->tx_on_b,
- delta_div->tx_on_b, max_div->tx_on_b);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "exec_time:",
- le32_to_cpu(div->exec_time), accum_div->exec_time,
- delta_div->exec_time, max_div->exec_time);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "probe_time:",
- le32_to_cpu(div->probe_time), accum_div->probe_time,
- delta_div->probe_time, max_div->probe_time);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "rx_enable_counter:",
- le32_to_cpu(general->rx_enable_counter),
- accum_general->rx_enable_counter,
- delta_general->rx_enable_counter,
- max_general->rx_enable_counter);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "num_of_sos_states:",
- le32_to_cpu(general->num_of_sos_states),
- accum_general->num_of_sos_states,
- delta_general->num_of_sos_states,
- max_general->num_of_sos_states);
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
-}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.h b/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.h
deleted file mode 100644
index 6c8e35361a9..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/******************************************************************************
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *****************************************************************************/
-
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-debug.h"
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
-ssize_t iwl4965_ucode_rx_stats_read(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos);
-ssize_t iwl4965_ucode_tx_stats_read(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos);
-ssize_t iwl4965_ucode_general_stats_read(struct file *file,
- char __user *user_buf, size_t count, loff_t *ppos);
-#else
-static ssize_t
-iwl4965_ucode_rx_stats_read(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- return 0;
-}
-static ssize_t
-iwl4965_ucode_tx_stats_read(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- return 0;
-}
-static ssize_t
-iwl4965_ucode_general_stats_read(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- return 0;
-}
-#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-eeprom.c b/drivers/net/wireless/iwlegacy/iwl-4965-eeprom.c
deleted file mode 100644
index cb9baab1ff7..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-eeprom.c
+++ /dev/null
@@ -1,154 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *****************************************************************************/
-
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-
-#include <net/mac80211.h>
-
-#include "iwl-commands.h"
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-debug.h"
-#include "iwl-4965.h"
-#include "iwl-io.h"
-
-/******************************************************************************
- *
- * EEPROM related functions
- *
-******************************************************************************/
-
-/*
- * The device's EEPROM semaphore prevents conflicts between driver and uCode
- * when accessing the EEPROM; each access is a series of pulses to/from the
- * EEPROM chip, not a single event, so even reads could conflict if they
- * weren't arbitrated by the semaphore.
- */
-int iwl4965_eeprom_acquire_semaphore(struct iwl_priv *priv)
-{
- u16 count;
- int ret;
-
- for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) {
- /* Request semaphore */
- iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
-
- /* See if we got it */
- ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
- CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
- EEPROM_SEM_TIMEOUT);
- if (ret >= 0) {
- IWL_DEBUG_IO(priv,
- "Acquired semaphore after %d tries.\n",
- count+1);
- return ret;
- }
- }
-
- return ret;
-}
-
-void iwl4965_eeprom_release_semaphore(struct iwl_priv *priv)
-{
- iwl_legacy_clear_bit(priv, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
-
-}
-
-int iwl4965_eeprom_check_version(struct iwl_priv *priv)
-{
- u16 eeprom_ver;
- u16 calib_ver;
-
- eeprom_ver = iwl_legacy_eeprom_query16(priv, EEPROM_VERSION);
- calib_ver = iwl_legacy_eeprom_query16(priv,
- EEPROM_4965_CALIB_VERSION_OFFSET);
-
- if (eeprom_ver < priv->cfg->eeprom_ver ||
- calib_ver < priv->cfg->eeprom_calib_ver)
- goto err;
-
- IWL_INFO(priv, "device EEPROM VER=0x%x, CALIB=0x%x\n",
- eeprom_ver, calib_ver);
-
- return 0;
-err:
- IWL_ERR(priv, "Unsupported (too old) EEPROM VER=0x%x < 0x%x "
- "CALIB=0x%x < 0x%x\n",
- eeprom_ver, priv->cfg->eeprom_ver,
- calib_ver, priv->cfg->eeprom_calib_ver);
- return -EINVAL;
-
-}
-
-void iwl4965_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac)
-{
- const u8 *addr = iwl_legacy_eeprom_query_addr(priv,
- EEPROM_MAC_ADDRESS);
- memcpy(mac, addr, ETH_ALEN);
-}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-hw.h b/drivers/net/wireless/iwlegacy/iwl-4965-hw.h
deleted file mode 100644
index fc6fa2886d9..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-hw.h
+++ /dev/null
@@ -1,811 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *****************************************************************************/
-/*
- * Please use this file (iwl-4965-hw.h) only for hardware-related definitions.
- * Use iwl-commands.h for uCode API definitions.
- * Use iwl-dev.h for driver implementation definitions.
- */
-
-#ifndef __iwl_4965_hw_h__
-#define __iwl_4965_hw_h__
-
-#include "iwl-fh.h"
-
-/* EEPROM */
-#define IWL4965_EEPROM_IMG_SIZE 1024
-
-/*
- * uCode queue management definitions ...
- * The first queue used for block-ack aggregation is #7 (4965 only).
- * All block-ack aggregation queues should map to Tx DMA/FIFO channel 7.
- */
-#define IWL49_FIRST_AMPDU_QUEUE 7
-
-/* Sizes and addresses for instruction and data memory (SRAM) in
- * 4965's embedded processor. Driver access is via HBUS_TARG_MEM_* regs. */
-#define IWL49_RTC_INST_LOWER_BOUND (0x000000)
-#define IWL49_RTC_INST_UPPER_BOUND (0x018000)
-
-#define IWL49_RTC_DATA_LOWER_BOUND (0x800000)
-#define IWL49_RTC_DATA_UPPER_BOUND (0x80A000)
-
-#define IWL49_RTC_INST_SIZE (IWL49_RTC_INST_UPPER_BOUND - \
- IWL49_RTC_INST_LOWER_BOUND)
-#define IWL49_RTC_DATA_SIZE (IWL49_RTC_DATA_UPPER_BOUND - \
- IWL49_RTC_DATA_LOWER_BOUND)
-
-#define IWL49_MAX_INST_SIZE IWL49_RTC_INST_SIZE
-#define IWL49_MAX_DATA_SIZE IWL49_RTC_DATA_SIZE
-
-/* Size of uCode instruction memory in bootstrap state machine */
-#define IWL49_MAX_BSM_SIZE BSM_SRAM_SIZE
-
-static inline int iwl4965_hw_valid_rtc_data_addr(u32 addr)
-{
- return (addr >= IWL49_RTC_DATA_LOWER_BOUND) &&
- (addr < IWL49_RTC_DATA_UPPER_BOUND);
-}
-
-/********************* START TEMPERATURE *************************************/
-
-/**
- * 4965 temperature calculation.
- *
- * The driver must calculate the device temperature before calculating
- * a txpower setting (amplifier gain is temperature dependent). The
- * calculation uses 4 measurements, 3 of which (R1, R2, R3) are calibration
- * values used for the life of the driver, and one of which (R4) is the
- * real-time temperature indicator.
- *
- * uCode provides all 4 values to the driver via the "initialize alive"
- * notification (see struct iwl4965_init_alive_resp). After the runtime uCode
- * image loads, uCode updates the R4 value via statistics notifications
- * (see STATISTICS_NOTIFICATION), which occur after each received beacon
- * when associated, or can be requested via REPLY_STATISTICS_CMD.
- *
- * NOTE: uCode provides the R4 value as a 23-bit signed value. Driver
- * must sign-extend to 32 bits before applying formula below.
- *
- * Formula:
- *
- * degrees Kelvin = ((97 * 259 * (R4 - R2) / (R3 - R1)) / 100) + 8
- *
- * NOTE: The basic formula is 259 * (R4-R2) / (R3-R1). The 97/100 is
- * an additional correction, which should be centered around 0 degrees
- * Celsius (273 degrees Kelvin). The 8 (3 percent of 273) compensates for
- * centering the 97/100 correction around 0 degrees K.
- *
- * Add 273 to Kelvin value to find degrees Celsius, for comparing current
- * temperature with factory-measured temperatures when calculating txpower
- * settings.
- */
-#define TEMPERATURE_CALIB_KELVIN_OFFSET 8
-#define TEMPERATURE_CALIB_A_VAL 259
-
-/* Limit range of calculated temperature to be between these Kelvin values */
-#define IWL_TX_POWER_TEMPERATURE_MIN (263)
-#define IWL_TX_POWER_TEMPERATURE_MAX (410)
-
-#define IWL_TX_POWER_TEMPERATURE_OUT_OF_RANGE(t) \
- (((t) < IWL_TX_POWER_TEMPERATURE_MIN) || \
- ((t) > IWL_TX_POWER_TEMPERATURE_MAX))
-
-/********************* END TEMPERATURE ***************************************/
-
-/********************* START TXPOWER *****************************************/
-
-/**
- * 4965 txpower calculations rely on information from three sources:
- *
- * 1) EEPROM
- * 2) "initialize" alive notification
- * 3) statistics notifications
- *
- * EEPROM data consists of:
- *
- * 1) Regulatory information (max txpower and channel usage flags) is provided
- * separately for each channel that can possibly supported by 4965.
- * 40 MHz wide (.11n HT40) channels are listed separately from 20 MHz
- * (legacy) channels.
- *
- * See struct iwl4965_eeprom_channel for format, and struct iwl4965_eeprom
- * for locations in EEPROM.
- *
- * 2) Factory txpower calibration information is provided separately for
- * sub-bands of contiguous channels. 2.4GHz has just one sub-band,
- * but 5 GHz has several sub-bands.
- *
- * In addition, per-band (2.4 and 5 Ghz) saturation txpowers are provided.
- *
- * See struct iwl4965_eeprom_calib_info (and the tree of structures
- * contained within it) for format, and struct iwl4965_eeprom for
- * locations in EEPROM.
- *
- * "Initialization alive" notification (see struct iwl4965_init_alive_resp)
- * consists of:
- *
- * 1) Temperature calculation parameters.
- *
- * 2) Power supply voltage measurement.
- *
- * 3) Tx gain compensation to balance 2 transmitters for MIMO use.
- *
- * Statistics notifications deliver:
- *
- * 1) Current values for temperature param R4.
- */
-
-/**
- * To calculate a txpower setting for a given desired target txpower, channel,
- * modulation bit rate, and transmitter chain (4965 has 2 transmitters to
- * support MIMO and transmit diversity), driver must do the following:
- *
- * 1) Compare desired txpower vs. (EEPROM) regulatory limit for this channel.
- * Do not exceed regulatory limit; reduce target txpower if necessary.
- *
- * If setting up txpowers for MIMO rates (rate indexes 8-15, 24-31),
- * 2 transmitters will be used simultaneously; driver must reduce the
- * regulatory limit by 3 dB (half-power) for each transmitter, so the
- * combined total output of the 2 transmitters is within regulatory limits.
- *
- *
- * 2) Compare target txpower vs. (EEPROM) saturation txpower *reduced by
- * backoff for this bit rate*. Do not exceed (saturation - backoff[rate]);
- * reduce target txpower if necessary.
- *
- * Backoff values below are in 1/2 dB units (equivalent to steps in
- * txpower gain tables):
- *
- * OFDM 6 - 36 MBit: 10 steps (5 dB)
- * OFDM 48 MBit: 15 steps (7.5 dB)
- * OFDM 54 MBit: 17 steps (8.5 dB)
- * OFDM 60 MBit: 20 steps (10 dB)
- * CCK all rates: 10 steps (5 dB)
- *
- * Backoff values apply to saturation txpower on a per-transmitter basis;
- * when using MIMO (2 transmitters), each transmitter uses the same
- * saturation level provided in EEPROM, and the same backoff values;
- * no reduction (such as with regulatory txpower limits) is required.
- *
- * Saturation and Backoff values apply equally to 20 Mhz (legacy) channel
- * widths and 40 Mhz (.11n HT40) channel widths; there is no separate
- * factory measurement for ht40 channels.
- *
- * The result of this step is the final target txpower. The rest of
- * the steps figure out the proper settings for the device to achieve
- * that target txpower.
- *
- *
- * 3) Determine (EEPROM) calibration sub band for the target channel, by
- * comparing against first and last channels in each sub band
- * (see struct iwl4965_eeprom_calib_subband_info).
- *
- *
- * 4) Linearly interpolate (EEPROM) factory calibration measurement sets,
- * referencing the 2 factory-measured (sample) channels within the sub band.
- *
- * Interpolation is based on difference between target channel's frequency
- * and the sample channels' frequencies. Since channel numbers are based
- * on frequency (5 MHz between each channel number), this is equivalent
- * to interpolating based on channel number differences.
- *
- * Note that the sample channels may or may not be the channels at the
- * edges of the sub band. The target channel may be "outside" of the
- * span of the sampled channels.
- *
- * Driver may choose the pair (for 2 Tx chains) of measurements (see
- * struct iwl4965_eeprom_calib_ch_info) for which the actual measured
- * txpower comes closest to the desired txpower. Usually, though,
- * the middle set of measurements is closest to the regulatory limits,
- * and is therefore a good choice for all txpower calculations (this
- * assumes that high accuracy is needed for maximizing legal txpower,
- * while lower txpower configurations do not need as much accuracy).
- *
- * Driver should interpolate both members of the chosen measurement pair,
- * i.e. for both Tx chains (radio transmitters), unless the driver knows
- * that only one of the chains will be used (e.g. only one tx antenna
- * connected, but this should be unusual). The rate scaling algorithm
- * switches antennas to find best performance, so both Tx chains will
- * be used (although only one at a time) even for non-MIMO transmissions.
- *
- * Driver should interpolate factory values for temperature, gain table
- * index, and actual power. The power amplifier detector values are
- * not used by the driver.
- *
- * Sanity check: If the target channel happens to be one of the sample
- * channels, the results should agree with the sample channel's
- * measurements!
- *
- *
- * 5) Find difference between desired txpower and (interpolated)
- * factory-measured txpower. Using (interpolated) factory gain table index
- * (shown elsewhere) as a starting point, adjust this index lower to
- * increase txpower, or higher to decrease txpower, until the target
- * txpower is reached. Each step in the gain table is 1/2 dB.
- *
- * For example, if factory measured txpower is 16 dBm, and target txpower
- * is 13 dBm, add 6 steps to the factory gain index to reduce txpower
- * by 3 dB.
- *
- *
- * 6) Find difference between current device temperature and (interpolated)
- * factory-measured temperature for sub-band. Factory values are in
- * degrees Celsius. To calculate current temperature, see comments for
- * "4965 temperature calculation".
- *
- * If current temperature is higher than factory temperature, driver must
- * increase gain (lower gain table index), and vice verse.
- *
- * Temperature affects gain differently for different channels:
- *
- * 2.4 GHz all channels: 3.5 degrees per half-dB step
- * 5 GHz channels 34-43: 4.5 degrees per half-dB step
- * 5 GHz channels >= 44: 4.0 degrees per half-dB step
- *
- * NOTE: Temperature can increase rapidly when transmitting, especially
- * with heavy traffic at high txpowers. Driver should update
- * temperature calculations often under these conditions to
- * maintain strong txpower in the face of rising temperature.
- *
- *
- * 7) Find difference between current power supply voltage indicator
- * (from "initialize alive") and factory-measured power supply voltage
- * indicator (EEPROM).
- *
- * If the current voltage is higher (indicator is lower) than factory
- * voltage, gain should be reduced (gain table index increased) by:
- *
- * (eeprom - current) / 7
- *
- * If the current voltage is lower (indicator is higher) than factory
- * voltage, gain should be increased (gain table index decreased) by:
- *
- * 2 * (current - eeprom) / 7
- *
- * If number of index steps in either direction turns out to be > 2,
- * something is wrong ... just use 0.
- *
- * NOTE: Voltage compensation is independent of band/channel.
- *
- * NOTE: "Initialize" uCode measures current voltage, which is assumed
- * to be constant after this initial measurement. Voltage
- * compensation for txpower (number of steps in gain table)
- * may be calculated once and used until the next uCode bootload.
- *
- *
- * 8) If setting up txpowers for MIMO rates (rate indexes 8-15, 24-31),
- * adjust txpower for each transmitter chain, so txpower is balanced
- * between the two chains. There are 5 pairs of tx_atten[group][chain]
- * values in "initialize alive", one pair for each of 5 channel ranges:
- *
- * Group 0: 5 GHz channel 34-43
- * Group 1: 5 GHz channel 44-70
- * Group 2: 5 GHz channel 71-124
- * Group 3: 5 GHz channel 125-200
- * Group 4: 2.4 GHz all channels
- *
- * Add the tx_atten[group][chain] value to the index for the target chain.
- * The values are signed, but are in pairs of 0 and a non-negative number,
- * so as to reduce gain (if necessary) of the "hotter" channel. This
- * avoids any need to double-check for regulatory compliance after
- * this step.
- *
- *
- * 9) If setting up for a CCK rate, lower the gain by adding a CCK compensation
- * value to the index:
- *
- * Hardware rev B: 9 steps (4.5 dB)
- * Hardware rev C: 5 steps (2.5 dB)
- *
- * Hardware rev for 4965 can be determined by reading CSR_HW_REV_WA_REG,
- * bits [3:2], 1 = B, 2 = C.
- *
- * NOTE: This compensation is in addition to any saturation backoff that
- * might have been applied in an earlier step.
- *
- *
- * 10) Select the gain table, based on band (2.4 vs 5 GHz).
- *
- * Limit the adjusted index to stay within the table!
- *
- *
- * 11) Read gain table entries for DSP and radio gain, place into appropriate
- * location(s) in command (struct iwl4965_txpowertable_cmd).
- */
-
-/**
- * When MIMO is used (2 transmitters operating simultaneously), driver should
- * limit each transmitter to deliver a max of 3 dB below the regulatory limit
- * for the device. That is, use half power for each transmitter, so total
- * txpower is within regulatory limits.
- *
- * The value "6" represents number of steps in gain table to reduce power 3 dB.
- * Each step is 1/2 dB.
- */
-#define IWL_TX_POWER_MIMO_REGULATORY_COMPENSATION (6)
-
-/**
- * CCK gain compensation.
- *
- * When calculating txpowers for CCK, after making sure that the target power
- * is within regulatory and saturation limits, driver must additionally
- * back off gain by adding these values to the gain table index.
- *
- * Hardware rev for 4965 can be determined by reading CSR_HW_REV_WA_REG,
- * bits [3:2], 1 = B, 2 = C.
- */
-#define IWL_TX_POWER_CCK_COMPENSATION_B_STEP (9)
-#define IWL_TX_POWER_CCK_COMPENSATION_C_STEP (5)
-
-/*
- * 4965 power supply voltage compensation for txpower
- */
-#define TX_POWER_IWL_VOLTAGE_CODES_PER_03V (7)
-
-/**
- * Gain tables.
- *
- * The following tables contain pair of values for setting txpower, i.e.
- * gain settings for the output of the device's digital signal processor (DSP),
- * and for the analog gain structure of the transmitter.
- *
- * Each entry in the gain tables represents a step of 1/2 dB. Note that these
- * are *relative* steps, not indications of absolute output power. Output
- * power varies with temperature, voltage, and channel frequency, and also
- * requires consideration of average power (to satisfy regulatory constraints),
- * and peak power (to avoid distortion of the output signal).
- *
- * Each entry contains two values:
- * 1) DSP gain (or sometimes called DSP attenuation). This is a fine-grained
- * linear value that multiplies the output of the digital signal processor,
- * before being sent to the analog radio.
- * 2) Radio gain. This sets the analog gain of the radio Tx path.
- * It is a coarser setting, and behaves in a logarithmic (dB) fashion.
- *
- * EEPROM contains factory calibration data for txpower. This maps actual
- * measured txpower levels to gain settings in the "well known" tables
- * below ("well-known" means here that both factory calibration *and* the
- * driver work with the same table).
- *
- * There are separate tables for 2.4 GHz and 5 GHz bands. The 5 GHz table
- * has an extension (into negative indexes), in case the driver needs to
- * boost power setting for high device temperatures (higher than would be
- * present during factory calibration). A 5 Ghz EEPROM index of "40"
- * corresponds to the 49th entry in the table used by the driver.
- */
-#define MIN_TX_GAIN_INDEX (0) /* highest gain, lowest idx, 2.4 */
-#define MIN_TX_GAIN_INDEX_52GHZ_EXT (-9) /* highest gain, lowest idx, 5 */
-
-/**
- * 2.4 GHz gain table
- *
- * Index Dsp gain Radio gain
- * 0 110 0x3f (highest gain)
- * 1 104 0x3f
- * 2 98 0x3f
- * 3 110 0x3e
- * 4 104 0x3e
- * 5 98 0x3e
- * 6 110 0x3d
- * 7 104 0x3d
- * 8 98 0x3d
- * 9 110 0x3c
- * 10 104 0x3c
- * 11 98 0x3c
- * 12 110 0x3b
- * 13 104 0x3b
- * 14 98 0x3b
- * 15 110 0x3a
- * 16 104 0x3a
- * 17 98 0x3a
- * 18 110 0x39
- * 19 104 0x39
- * 20 98 0x39
- * 21 110 0x38
- * 22 104 0x38
- * 23 98 0x38
- * 24 110 0x37
- * 25 104 0x37
- * 26 98 0x37
- * 27 110 0x36
- * 28 104 0x36
- * 29 98 0x36
- * 30 110 0x35
- * 31 104 0x35
- * 32 98 0x35
- * 33 110 0x34
- * 34 104 0x34
- * 35 98 0x34
- * 36 110 0x33
- * 37 104 0x33
- * 38 98 0x33
- * 39 110 0x32
- * 40 104 0x32
- * 41 98 0x32
- * 42 110 0x31
- * 43 104 0x31
- * 44 98 0x31
- * 45 110 0x30
- * 46 104 0x30
- * 47 98 0x30
- * 48 110 0x6
- * 49 104 0x6
- * 50 98 0x6
- * 51 110 0x5
- * 52 104 0x5
- * 53 98 0x5
- * 54 110 0x4
- * 55 104 0x4
- * 56 98 0x4
- * 57 110 0x3
- * 58 104 0x3
- * 59 98 0x3
- * 60 110 0x2
- * 61 104 0x2
- * 62 98 0x2
- * 63 110 0x1
- * 64 104 0x1
- * 65 98 0x1
- * 66 110 0x0
- * 67 104 0x0
- * 68 98 0x0
- * 69 97 0
- * 70 96 0
- * 71 95 0
- * 72 94 0
- * 73 93 0
- * 74 92 0
- * 75 91 0
- * 76 90 0
- * 77 89 0
- * 78 88 0
- * 79 87 0
- * 80 86 0
- * 81 85 0
- * 82 84 0
- * 83 83 0
- * 84 82 0
- * 85 81 0
- * 86 80 0
- * 87 79 0
- * 88 78 0
- * 89 77 0
- * 90 76 0
- * 91 75 0
- * 92 74 0
- * 93 73 0
- * 94 72 0
- * 95 71 0
- * 96 70 0
- * 97 69 0
- * 98 68 0
- */
-
-/**
- * 5 GHz gain table
- *
- * Index Dsp gain Radio gain
- * -9 123 0x3F (highest gain)
- * -8 117 0x3F
- * -7 110 0x3F
- * -6 104 0x3F
- * -5 98 0x3F
- * -4 110 0x3E
- * -3 104 0x3E
- * -2 98 0x3E
- * -1 110 0x3D
- * 0 104 0x3D
- * 1 98 0x3D
- * 2 110 0x3C
- * 3 104 0x3C
- * 4 98 0x3C
- * 5 110 0x3B
- * 6 104 0x3B
- * 7 98 0x3B
- * 8 110 0x3A
- * 9 104 0x3A
- * 10 98 0x3A
- * 11 110 0x39
- * 12 104 0x39
- * 13 98 0x39
- * 14 110 0x38
- * 15 104 0x38
- * 16 98 0x38
- * 17 110 0x37
- * 18 104 0x37
- * 19 98 0x37
- * 20 110 0x36
- * 21 104 0x36
- * 22 98 0x36
- * 23 110 0x35
- * 24 104 0x35
- * 25 98 0x35
- * 26 110 0x34
- * 27 104 0x34
- * 28 98 0x34
- * 29 110 0x33
- * 30 104 0x33
- * 31 98 0x33
- * 32 110 0x32
- * 33 104 0x32
- * 34 98 0x32
- * 35 110 0x31
- * 36 104 0x31
- * 37 98 0x31
- * 38 110 0x30
- * 39 104 0x30
- * 40 98 0x30
- * 41 110 0x25
- * 42 104 0x25
- * 43 98 0x25
- * 44 110 0x24
- * 45 104 0x24
- * 46 98 0x24
- * 47 110 0x23
- * 48 104 0x23
- * 49 98 0x23
- * 50 110 0x22
- * 51 104 0x18
- * 52 98 0x18
- * 53 110 0x17
- * 54 104 0x17
- * 55 98 0x17
- * 56 110 0x16
- * 57 104 0x16
- * 58 98 0x16
- * 59 110 0x15
- * 60 104 0x15
- * 61 98 0x15
- * 62 110 0x14
- * 63 104 0x14
- * 64 98 0x14
- * 65 110 0x13
- * 66 104 0x13
- * 67 98 0x13
- * 68 110 0x12
- * 69 104 0x08
- * 70 98 0x08
- * 71 110 0x07
- * 72 104 0x07
- * 73 98 0x07
- * 74 110 0x06
- * 75 104 0x06
- * 76 98 0x06
- * 77 110 0x05
- * 78 104 0x05
- * 79 98 0x05
- * 80 110 0x04
- * 81 104 0x04
- * 82 98 0x04
- * 83 110 0x03
- * 84 104 0x03
- * 85 98 0x03
- * 86 110 0x02
- * 87 104 0x02
- * 88 98 0x02
- * 89 110 0x01
- * 90 104 0x01
- * 91 98 0x01
- * 92 110 0x00
- * 93 104 0x00
- * 94 98 0x00
- * 95 93 0x00
- * 96 88 0x00
- * 97 83 0x00
- * 98 78 0x00
- */
-
-
-/**
- * Sanity checks and default values for EEPROM regulatory levels.
- * If EEPROM values fall outside MIN/MAX range, use default values.
- *
- * Regulatory limits refer to the maximum average txpower allowed by
- * regulatory agencies in the geographies in which the device is meant
- * to be operated. These limits are SKU-specific (i.e. geography-specific),
- * and channel-specific; each channel has an individual regulatory limit
- * listed in the EEPROM.
- *
- * Units are in half-dBm (i.e. "34" means 17 dBm).
- */
-#define IWL_TX_POWER_DEFAULT_REGULATORY_24 (34)
-#define IWL_TX_POWER_DEFAULT_REGULATORY_52 (34)
-#define IWL_TX_POWER_REGULATORY_MIN (0)
-#define IWL_TX_POWER_REGULATORY_MAX (34)
-
-/**
- * Sanity checks and default values for EEPROM saturation levels.
- * If EEPROM values fall outside MIN/MAX range, use default values.
- *
- * Saturation is the highest level that the output power amplifier can produce
- * without significant clipping distortion. This is a "peak" power level.
- * Different types of modulation (i.e. various "rates", and OFDM vs. CCK)
- * require differing amounts of backoff, relative to their average power output,
- * in order to avoid clipping distortion.
- *
- * Driver must make sure that it is violating neither the saturation limit,
- * nor the regulatory limit, when calculating Tx power settings for various
- * rates.
- *
- * Units are in half-dBm (i.e. "38" means 19 dBm).
- */
-#define IWL_TX_POWER_DEFAULT_SATURATION_24 (38)
-#define IWL_TX_POWER_DEFAULT_SATURATION_52 (38)
-#define IWL_TX_POWER_SATURATION_MIN (20)
-#define IWL_TX_POWER_SATURATION_MAX (50)
-
-/**
- * Channel groups used for Tx Attenuation calibration (MIMO tx channel balance)
- * and thermal Txpower calibration.
- *
- * When calculating txpower, driver must compensate for current device
- * temperature; higher temperature requires higher gain. Driver must calculate
- * current temperature (see "4965 temperature calculation"), then compare vs.
- * factory calibration temperature in EEPROM; if current temperature is higher
- * than factory temperature, driver must *increase* gain by proportions shown
- * in table below. If current temperature is lower than factory, driver must
- * *decrease* gain.
- *
- * Different frequency ranges require different compensation, as shown below.
- */
-/* Group 0, 5.2 GHz ch 34-43: 4.5 degrees per 1/2 dB. */
-#define CALIB_IWL_TX_ATTEN_GR1_FCH 34
-#define CALIB_IWL_TX_ATTEN_GR1_LCH 43
-
-/* Group 1, 5.3 GHz ch 44-70: 4.0 degrees per 1/2 dB. */
-#define CALIB_IWL_TX_ATTEN_GR2_FCH 44
-#define CALIB_IWL_TX_ATTEN_GR2_LCH 70
-
-/* Group 2, 5.5 GHz ch 71-124: 4.0 degrees per 1/2 dB. */
-#define CALIB_IWL_TX_ATTEN_GR3_FCH 71
-#define CALIB_IWL_TX_ATTEN_GR3_LCH 124
-
-/* Group 3, 5.7 GHz ch 125-200: 4.0 degrees per 1/2 dB. */
-#define CALIB_IWL_TX_ATTEN_GR4_FCH 125
-#define CALIB_IWL_TX_ATTEN_GR4_LCH 200
-
-/* Group 4, 2.4 GHz all channels: 3.5 degrees per 1/2 dB. */
-#define CALIB_IWL_TX_ATTEN_GR5_FCH 1
-#define CALIB_IWL_TX_ATTEN_GR5_LCH 20
-
-enum {
- CALIB_CH_GROUP_1 = 0,
- CALIB_CH_GROUP_2 = 1,
- CALIB_CH_GROUP_3 = 2,
- CALIB_CH_GROUP_4 = 3,
- CALIB_CH_GROUP_5 = 4,
- CALIB_CH_GROUP_MAX
-};
-
-/********************* END TXPOWER *****************************************/
-
-
-/**
- * Tx/Rx Queues
- *
- * Most communication between driver and 4965 is via queues of data buffers.
- * For example, all commands that the driver issues to device's embedded
- * controller (uCode) are via the command queue (one of the Tx queues). All
- * uCode command responses/replies/notifications, including Rx frames, are
- * conveyed from uCode to driver via the Rx queue.
- *
- * Most support for these queues, including handshake support, resides in
- * structures in host DRAM, shared between the driver and the device. When
- * allocating this memory, the driver must make sure that data written by
- * the host CPU updates DRAM immediately (and does not get "stuck" in CPU's
- * cache memory), so DRAM and cache are consistent, and the device can
- * immediately see changes made by the driver.
- *
- * 4965 supports up to 16 DRAM-based Tx queues, and services these queues via
- * up to 7 DMA channels (FIFOs). Each Tx queue is supported by a circular array
- * in DRAM containing 256 Transmit Frame Descriptors (TFDs).
- */
-#define IWL49_NUM_FIFOS 7
-#define IWL49_CMD_FIFO_NUM 4
-#define IWL49_NUM_QUEUES 16
-#define IWL49_NUM_AMPDU_QUEUES 8
-
-
-/**
- * struct iwl4965_schedq_bc_tbl
- *
- * Byte Count table
- *
- * Each Tx queue uses a byte-count table containing 320 entries:
- * one 16-bit entry for each of 256 TFDs, plus an additional 64 entries that
- * duplicate the first 64 entries (to avoid wrap-around within a Tx window;
- * max Tx window is 64 TFDs).
- *
- * When driver sets up a new TFD, it must also enter the total byte count
- * of the frame to be transmitted into the corresponding entry in the byte
- * count table for the chosen Tx queue. If the TFD index is 0-63, the driver
- * must duplicate the byte count entry in corresponding index 256-319.
- *
- * padding puts each byte count table on a 1024-byte boundary;
- * 4965 assumes tables are separated by 1024 bytes.
- */
-struct iwl4965_scd_bc_tbl {
- __le16 tfd_offset[TFD_QUEUE_BC_SIZE];
- u8 pad[1024 - (TFD_QUEUE_BC_SIZE) * sizeof(__le16)];
-} __packed;
-
-
-#define IWL4965_RTC_INST_LOWER_BOUND (0x000000)
-
-/* RSSI to dBm */
-#define IWL4965_RSSI_OFFSET 44
-
-/* PCI registers */
-#define PCI_CFG_RETRY_TIMEOUT 0x041
-
-/* PCI register values */
-#define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01
-#define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02
-
-#define IWL4965_DEFAULT_TX_RETRY 15
-
-/* EEPROM */
-#define IWL4965_FIRST_AMPDU_QUEUE 10
-
-
-#endif /* !__iwl_4965_hw_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-led.c b/drivers/net/wireless/iwlegacy/iwl-4965-led.c
deleted file mode 100644
index 6862fdcaee6..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-led.c
+++ /dev/null
@@ -1,73 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <linux/delay.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <net/mac80211.h>
-#include <linux/etherdevice.h>
-#include <asm/unaligned.h>
-
-#include "iwl-commands.h"
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-io.h"
-#include "iwl-4965-led.h"
-
-/* Send led command */
-static int
-iwl4965_send_led_cmd(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd)
-{
- struct iwl_host_cmd cmd = {
- .id = REPLY_LEDS_CMD,
- .len = sizeof(struct iwl_led_cmd),
- .data = led_cmd,
- .flags = CMD_ASYNC,
- .callback = NULL,
- };
- u32 reg;
-
- reg = iwl_read32(priv, CSR_LED_REG);
- if (reg != (reg & CSR_LED_BSM_CTRL_MSK))
- iwl_write32(priv, CSR_LED_REG, reg & CSR_LED_BSM_CTRL_MSK);
-
- return iwl_legacy_send_cmd(priv, &cmd);
-}
-
-/* Set led register off */
-void iwl4965_led_enable(struct iwl_priv *priv)
-{
- iwl_write32(priv, CSR_LED_REG, CSR_LED_REG_TRUN_ON);
-}
-
-const struct iwl_led_ops iwl4965_led_ops = {
- .cmd = iwl4965_send_led_cmd,
-};
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-led.h b/drivers/net/wireless/iwlegacy/iwl-4965-led.h
deleted file mode 100644
index 5ed3615fc33..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-led.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#ifndef __iwl_4965_led_h__
-#define __iwl_4965_led_h__
-
-extern const struct iwl_led_ops iwl4965_led_ops;
-void iwl4965_led_enable(struct iwl_priv *priv);
-
-#endif /* __iwl_4965_led_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-lib.c b/drivers/net/wireless/iwlegacy/iwl-4965-lib.c
deleted file mode 100644
index 2be6d9e3b01..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-lib.c
+++ /dev/null
@@ -1,1194 +0,0 @@
-/******************************************************************************
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-#include <linux/etherdevice.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/sched.h>
-
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-io.h"
-#include "iwl-helpers.h"
-#include "iwl-4965-hw.h"
-#include "iwl-4965.h"
-#include "iwl-sta.h"
-
-void iwl4965_check_abort_status(struct iwl_priv *priv,
- u8 frame_count, u32 status)
-{
- if (frame_count == 1 && status == TX_STATUS_FAIL_RFKILL_FLUSH) {
- IWL_ERR(priv, "Tx flush command to flush out all frames\n");
- if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
- queue_work(priv->workqueue, &priv->tx_flush);
- }
-}
-
-/*
- * EEPROM
- */
-struct iwl_mod_params iwl4965_mod_params = {
- .amsdu_size_8K = 1,
- .restart_fw = 1,
- /* the rest are 0 by default */
-};
-
-void iwl4965_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
-{
- unsigned long flags;
- int i;
- spin_lock_irqsave(&rxq->lock, flags);
- INIT_LIST_HEAD(&rxq->rx_free);
- INIT_LIST_HEAD(&rxq->rx_used);
- /* Fill the rx_used queue with _all_ of the Rx buffers */
- for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
- /* In the reset function, these buffers may have been allocated
- * to an SKB, so we need to unmap and free potential storage */
- if (rxq->pool[i].page != NULL) {
- pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
- PAGE_SIZE << priv->hw_params.rx_page_order,
- PCI_DMA_FROMDEVICE);
- __iwl_legacy_free_pages(priv, rxq->pool[i].page);
- rxq->pool[i].page = NULL;
- }
- list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
- }
-
- for (i = 0; i < RX_QUEUE_SIZE; i++)
- rxq->queue[i] = NULL;
-
- /* Set us so that we have processed and used all buffers, but have
- * not restocked the Rx queue with fresh buffers */
- rxq->read = rxq->write = 0;
- rxq->write_actual = 0;
- rxq->free_count = 0;
- spin_unlock_irqrestore(&rxq->lock, flags);
-}
-
-int iwl4965_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
-{
- u32 rb_size;
- const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
- u32 rb_timeout = 0;
-
- if (priv->cfg->mod_params->amsdu_size_8K)
- rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
- else
- rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
-
- /* Stop Rx DMA */
- iwl_legacy_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
-
- /* Reset driver's Rx queue write index */
- iwl_legacy_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
-
- /* Tell device where to find RBD circular buffer in DRAM */
- iwl_legacy_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
- (u32)(rxq->bd_dma >> 8));
-
- /* Tell device where in DRAM to update its Rx status */
- iwl_legacy_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
- rxq->rb_stts_dma >> 4);
-
- /* Enable Rx DMA
- * Direct rx interrupts to hosts
- * Rx buffer size 4 or 8k
- * RB timeout 0x10
- * 256 RBDs
- */
- iwl_legacy_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG,
- FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
- FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
- FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
- rb_size|
- (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
- (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
-
- /* Set interrupt coalescing timer to default (2048 usecs) */
- iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
-
- return 0;
-}
-
-static void iwl4965_set_pwr_vmain(struct iwl_priv *priv)
-{
-/*
- * (for documentation purposes)
- * to set power to V_AUX, do:
-
- if (pci_pme_capable(priv->pci_dev, PCI_D3cold))
- iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
- APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
- ~APMG_PS_CTRL_MSK_PWR_SRC);
- */
-
- iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
- APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
- ~APMG_PS_CTRL_MSK_PWR_SRC);
-}
-
-int iwl4965_hw_nic_init(struct iwl_priv *priv)
-{
- unsigned long flags;
- struct iwl_rx_queue *rxq = &priv->rxq;
- int ret;
-
- /* nic_init */
- spin_lock_irqsave(&priv->lock, flags);
- priv->cfg->ops->lib->apm_ops.init(priv);
-
- /* Set interrupt coalescing calibration timer to default (512 usecs) */
- iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- iwl4965_set_pwr_vmain(priv);
-
- priv->cfg->ops->lib->apm_ops.config(priv);
-
- /* Allocate the RX queue, or reset if it is already allocated */
- if (!rxq->bd) {
- ret = iwl_legacy_rx_queue_alloc(priv);
- if (ret) {
- IWL_ERR(priv, "Unable to initialize Rx queue\n");
- return -ENOMEM;
- }
- } else
- iwl4965_rx_queue_reset(priv, rxq);
-
- iwl4965_rx_replenish(priv);
-
- iwl4965_rx_init(priv, rxq);
-
- spin_lock_irqsave(&priv->lock, flags);
-
- rxq->need_update = 1;
- iwl_legacy_rx_queue_update_write_ptr(priv, rxq);
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- /* Allocate or reset and init all Tx and Command queues */
- if (!priv->txq) {
- ret = iwl4965_txq_ctx_alloc(priv);
- if (ret)
- return ret;
- } else
- iwl4965_txq_ctx_reset(priv);
-
- set_bit(STATUS_INIT, &priv->status);
-
- return 0;
-}
-
-/**
- * iwl4965_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
- */
-static inline __le32 iwl4965_dma_addr2rbd_ptr(struct iwl_priv *priv,
- dma_addr_t dma_addr)
-{
- return cpu_to_le32((u32)(dma_addr >> 8));
-}
-
-/**
- * iwl4965_rx_queue_restock - refill RX queue from pre-allocated pool
- *
- * If there are slots in the RX queue that need to be restocked,
- * and we have free pre-allocated buffers, fill the ranks as much
- * as we can, pulling from rx_free.
- *
- * This moves the 'write' index forward to catch up with 'processed', and
- * also updates the memory address in the firmware to reference the new
- * target buffer.
- */
-void iwl4965_rx_queue_restock(struct iwl_priv *priv)
-{
- struct iwl_rx_queue *rxq = &priv->rxq;
- struct list_head *element;
- struct iwl_rx_mem_buffer *rxb;
- unsigned long flags;
-
- spin_lock_irqsave(&rxq->lock, flags);
- while ((iwl_legacy_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
- /* The overwritten rxb must be a used one */
- rxb = rxq->queue[rxq->write];
- BUG_ON(rxb && rxb->page);
-
- /* Get next free Rx buffer, remove from free list */
- element = rxq->rx_free.next;
- rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
- list_del(element);
-
- /* Point to Rx buffer via next RBD in circular buffer */
- rxq->bd[rxq->write] = iwl4965_dma_addr2rbd_ptr(priv,
- rxb->page_dma);
- rxq->queue[rxq->write] = rxb;
- rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
- rxq->free_count--;
- }
- spin_unlock_irqrestore(&rxq->lock, flags);
- /* If the pre-allocated buffer pool is dropping low, schedule to
- * refill it */
- if (rxq->free_count <= RX_LOW_WATERMARK)
- queue_work(priv->workqueue, &priv->rx_replenish);
-
-
- /* If we've added more space for the firmware to place data, tell it.
- * Increment device's write pointer in multiples of 8. */
- if (rxq->write_actual != (rxq->write & ~0x7)) {
- spin_lock_irqsave(&rxq->lock, flags);
- rxq->need_update = 1;
- spin_unlock_irqrestore(&rxq->lock, flags);
- iwl_legacy_rx_queue_update_write_ptr(priv, rxq);
- }
-}
-
-/**
- * iwl4965_rx_replenish - Move all used packet from rx_used to rx_free
- *
- * When moving to rx_free an SKB is allocated for the slot.
- *
- * Also restock the Rx queue via iwl_rx_queue_restock.
- * This is called as a scheduled work item (except for during initialization)
- */
-static void iwl4965_rx_allocate(struct iwl_priv *priv, gfp_t priority)
-{
- struct iwl_rx_queue *rxq = &priv->rxq;
- struct list_head *element;
- struct iwl_rx_mem_buffer *rxb;
- struct page *page;
- unsigned long flags;
- gfp_t gfp_mask = priority;
-
- while (1) {
- spin_lock_irqsave(&rxq->lock, flags);
- if (list_empty(&rxq->rx_used)) {
- spin_unlock_irqrestore(&rxq->lock, flags);
- return;
- }
- spin_unlock_irqrestore(&rxq->lock, flags);
-
- if (rxq->free_count > RX_LOW_WATERMARK)
- gfp_mask |= __GFP_NOWARN;
-
- if (priv->hw_params.rx_page_order > 0)
- gfp_mask |= __GFP_COMP;
-
- /* Alloc a new receive buffer */
- page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order);
- if (!page) {
- if (net_ratelimit())
- IWL_DEBUG_INFO(priv, "alloc_pages failed, "
- "order: %d\n",
- priv->hw_params.rx_page_order);
-
- if ((rxq->free_count <= RX_LOW_WATERMARK) &&
- net_ratelimit())
- IWL_CRIT(priv,
- "Failed to alloc_pages with %s. "
- "Only %u free buffers remaining.\n",
- priority == GFP_ATOMIC ?
- "GFP_ATOMIC" : "GFP_KERNEL",
- rxq->free_count);
- /* We don't reschedule replenish work here -- we will
- * call the restock method and if it still needs
- * more buffers it will schedule replenish */
- return;
- }
-
- spin_lock_irqsave(&rxq->lock, flags);
-
- if (list_empty(&rxq->rx_used)) {
- spin_unlock_irqrestore(&rxq->lock, flags);
- __free_pages(page, priv->hw_params.rx_page_order);
- return;
- }
- element = rxq->rx_used.next;
- rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
- list_del(element);
-
- spin_unlock_irqrestore(&rxq->lock, flags);
-
- BUG_ON(rxb->page);
- rxb->page = page;
- /* Get physical address of the RB */
- rxb->page_dma = pci_map_page(priv->pci_dev, page, 0,
- PAGE_SIZE << priv->hw_params.rx_page_order,
- PCI_DMA_FROMDEVICE);
- /* dma address must be no more than 36 bits */
- BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
- /* and also 256 byte aligned! */
- BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
-
- spin_lock_irqsave(&rxq->lock, flags);
-
- list_add_tail(&rxb->list, &rxq->rx_free);
- rxq->free_count++;
- priv->alloc_rxb_page++;
-
- spin_unlock_irqrestore(&rxq->lock, flags);
- }
-}
-
-void iwl4965_rx_replenish(struct iwl_priv *priv)
-{
- unsigned long flags;
-
- iwl4965_rx_allocate(priv, GFP_KERNEL);
-
- spin_lock_irqsave(&priv->lock, flags);
- iwl4965_rx_queue_restock(priv);
- spin_unlock_irqrestore(&priv->lock, flags);
-}
-
-void iwl4965_rx_replenish_now(struct iwl_priv *priv)
-{
- iwl4965_rx_allocate(priv, GFP_ATOMIC);
-
- iwl4965_rx_queue_restock(priv);
-}
-
-/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
- * If an SKB has been detached, the POOL needs to have its SKB set to NULL
- * This free routine walks the list of POOL entries and if SKB is set to
- * non NULL it is unmapped and freed
- */
-void iwl4965_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
-{
- int i;
- for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
- if (rxq->pool[i].page != NULL) {
- pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
- PAGE_SIZE << priv->hw_params.rx_page_order,
- PCI_DMA_FROMDEVICE);
- __iwl_legacy_free_pages(priv, rxq->pool[i].page);
- rxq->pool[i].page = NULL;
- }
- }
-
- dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
- rxq->bd_dma);
- dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status),
- rxq->rb_stts, rxq->rb_stts_dma);
- rxq->bd = NULL;
- rxq->rb_stts = NULL;
-}
-
-int iwl4965_rxq_stop(struct iwl_priv *priv)
-{
-
- /* stop Rx DMA */
- iwl_legacy_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
- iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG,
- FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
-
- return 0;
-}
-
-int iwl4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band)
-{
- int idx = 0;
- int band_offset = 0;
-
- /* HT rate format: mac80211 wants an MCS number, which is just LSB */
- if (rate_n_flags & RATE_MCS_HT_MSK) {
- idx = (rate_n_flags & 0xff);
- return idx;
- /* Legacy rate format, search for match in table */
- } else {
- if (band == IEEE80211_BAND_5GHZ)
- band_offset = IWL_FIRST_OFDM_RATE;
- for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++)
- if (iwlegacy_rates[idx].plcp == (rate_n_flags & 0xFF))
- return idx - band_offset;
- }
-
- return -1;
-}
-
-static int iwl4965_calc_rssi(struct iwl_priv *priv,
- struct iwl_rx_phy_res *rx_resp)
-{
- /* data from PHY/DSP regarding signal strength, etc.,
- * contents are always there, not configurable by host. */
- struct iwl4965_rx_non_cfg_phy *ncphy =
- (struct iwl4965_rx_non_cfg_phy *)rx_resp->non_cfg_phy_buf;
- u32 agc = (le16_to_cpu(ncphy->agc_info) & IWL49_AGC_DB_MASK)
- >> IWL49_AGC_DB_POS;
-
- u32 valid_antennae =
- (le16_to_cpu(rx_resp->phy_flags) & IWL49_RX_PHY_FLAGS_ANTENNAE_MASK)
- >> IWL49_RX_PHY_FLAGS_ANTENNAE_OFFSET;
- u8 max_rssi = 0;
- u32 i;
-
- /* Find max rssi among 3 possible receivers.
- * These values are measured by the digital signal processor (DSP).
- * They should stay fairly constant even as the signal strength varies,
- * if the radio's automatic gain control (AGC) is working right.
- * AGC value (see below) will provide the "interesting" info. */
- for (i = 0; i < 3; i++)
- if (valid_antennae & (1 << i))
- max_rssi = max(ncphy->rssi_info[i << 1], max_rssi);
-
- IWL_DEBUG_STATS(priv, "Rssi In A %d B %d C %d Max %d AGC dB %d\n",
- ncphy->rssi_info[0], ncphy->rssi_info[2], ncphy->rssi_info[4],
- max_rssi, agc);
-
- /* dBm = max_rssi dB - agc dB - constant.
- * Higher AGC (higher radio gain) means lower signal. */
- return max_rssi - agc - IWL4965_RSSI_OFFSET;
-}
-
-
-static u32 iwl4965_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
-{
- u32 decrypt_out = 0;
-
- if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) ==
- RX_RES_STATUS_STATION_FOUND)
- decrypt_out |= (RX_RES_STATUS_STATION_FOUND |
- RX_RES_STATUS_NO_STATION_INFO_MISMATCH);
-
- decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK);
-
- /* packet was not encrypted */
- if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
- RX_RES_STATUS_SEC_TYPE_NONE)
- return decrypt_out;
-
- /* packet was encrypted with unknown alg */
- if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
- RX_RES_STATUS_SEC_TYPE_ERR)
- return decrypt_out;
-
- /* decryption was not done in HW */
- if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) !=
- RX_MPDU_RES_STATUS_DEC_DONE_MSK)
- return decrypt_out;
-
- switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) {
-
- case RX_RES_STATUS_SEC_TYPE_CCMP:
- /* alg is CCM: check MIC only */
- if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK))
- /* Bad MIC */
- decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
- else
- decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
-
- break;
-
- case RX_RES_STATUS_SEC_TYPE_TKIP:
- if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) {
- /* Bad TTAK */
- decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
- break;
- }
- /* fall through if TTAK OK */
- default:
- if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
- decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
- else
- decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
- break;
- }
-
- IWL_DEBUG_RX(priv, "decrypt_in:0x%x decrypt_out = 0x%x\n",
- decrypt_in, decrypt_out);
-
- return decrypt_out;
-}
-
-static void iwl4965_pass_packet_to_mac80211(struct iwl_priv *priv,
- struct ieee80211_hdr *hdr,
- u16 len,
- u32 ampdu_status,
- struct iwl_rx_mem_buffer *rxb,
- struct ieee80211_rx_status *stats)
-{
- struct sk_buff *skb;
- __le16 fc = hdr->frame_control;
-
- /* We only process data packets if the interface is open */
- if (unlikely(!priv->is_open)) {
- IWL_DEBUG_DROP_LIMIT(priv,
- "Dropping packet while interface is not open.\n");
- return;
- }
-
- /* In case of HW accelerated crypto and bad decryption, drop */
- if (!priv->cfg->mod_params->sw_crypto &&
- iwl_legacy_set_decrypted_flag(priv, hdr, ampdu_status, stats))
- return;
-
- skb = dev_alloc_skb(128);
- if (!skb) {
- IWL_ERR(priv, "dev_alloc_skb failed\n");
- return;
- }
-
- skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len);
-
- iwl_legacy_update_stats(priv, false, fc, len);
- memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
-
- ieee80211_rx(priv->hw, skb);
- priv->alloc_rxb_page--;
- rxb->page = NULL;
-}
-
-/* Called for REPLY_RX (legacy ABG frames), or
- * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */
-void iwl4965_rx_reply_rx(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct ieee80211_hdr *header;
- struct ieee80211_rx_status rx_status;
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_rx_phy_res *phy_res;
- __le32 rx_pkt_status;
- struct iwl_rx_mpdu_res_start *amsdu;
- u32 len;
- u32 ampdu_status;
- u32 rate_n_flags;
-
- /**
- * REPLY_RX and REPLY_RX_MPDU_CMD are handled differently.
- * REPLY_RX: physical layer info is in this buffer
- * REPLY_RX_MPDU_CMD: physical layer info was sent in separate
- * command and cached in priv->last_phy_res
- *
- * Here we set up local variables depending on which command is
- * received.
- */
- if (pkt->hdr.cmd == REPLY_RX) {
- phy_res = (struct iwl_rx_phy_res *)pkt->u.raw;
- header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res)
- + phy_res->cfg_phy_cnt);
-
- len = le16_to_cpu(phy_res->byte_count);
- rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*phy_res) +
- phy_res->cfg_phy_cnt + len);
- ampdu_status = le32_to_cpu(rx_pkt_status);
- } else {
- if (!priv->_4965.last_phy_res_valid) {
- IWL_ERR(priv, "MPDU frame without cached PHY data\n");
- return;
- }
- phy_res = &priv->_4965.last_phy_res;
- amsdu = (struct iwl_rx_mpdu_res_start *)pkt->u.raw;
- header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu));
- len = le16_to_cpu(amsdu->byte_count);
- rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*amsdu) + len);
- ampdu_status = iwl4965_translate_rx_status(priv,
- le32_to_cpu(rx_pkt_status));
- }
-
- if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
- IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n",
- phy_res->cfg_phy_cnt);
- return;
- }
-
- if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
- !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
- IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n",
- le32_to_cpu(rx_pkt_status));
- return;
- }
-
- /* This will be used in several places later */
- rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
-
- /* rx_status carries information about the packet to mac80211 */
- rx_status.mactime = le64_to_cpu(phy_res->timestamp);
- rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
- IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
- rx_status.freq =
- ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel),
- rx_status.band);
- rx_status.rate_idx =
- iwl4965_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band);
- rx_status.flag = 0;
-
- /* TSF isn't reliable. In order to allow smooth user experience,
- * this W/A doesn't propagate it to the mac80211 */
- /*rx_status.flag |= RX_FLAG_MACTIME_MPDU;*/
-
- priv->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp);
-
- /* Find max signal strength (dBm) among 3 antenna/receiver chains */
- rx_status.signal = iwl4965_calc_rssi(priv, phy_res);
-
- iwl_legacy_dbg_log_rx_data_frame(priv, len, header);
- IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, TSF %llu\n",
- rx_status.signal, (unsigned long long)rx_status.mactime);
-
- /*
- * "antenna number"
- *
- * It seems that the antenna field in the phy flags value
- * is actually a bit field. This is undefined by radiotap,
- * it wants an actual antenna number but I always get "7"
- * for most legacy frames I receive indicating that the
- * same frame was received on all three RX chains.
- *
- * I think this field should be removed in favor of a
- * new 802.11n radiotap field "RX chains" that is defined
- * as a bitmask.
- */
- rx_status.antenna =
- (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK)
- >> RX_RES_PHY_FLAGS_ANTENNA_POS;
-
- /* set the preamble flag if appropriate */
- if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
- rx_status.flag |= RX_FLAG_SHORTPRE;
-
- /* Set up the HT phy flags */
- if (rate_n_flags & RATE_MCS_HT_MSK)
- rx_status.flag |= RX_FLAG_HT;
- if (rate_n_flags & RATE_MCS_HT40_MSK)
- rx_status.flag |= RX_FLAG_40MHZ;
- if (rate_n_flags & RATE_MCS_SGI_MSK)
- rx_status.flag |= RX_FLAG_SHORT_GI;
-
- iwl4965_pass_packet_to_mac80211(priv, header, len, ampdu_status,
- rxb, &rx_status);
-}
-
-/* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD).
- * This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */
-void iwl4965_rx_reply_rx_phy(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- priv->_4965.last_phy_res_valid = true;
- memcpy(&priv->_4965.last_phy_res, pkt->u.raw,
- sizeof(struct iwl_rx_phy_res));
-}
-
-static int iwl4965_get_channels_for_scan(struct iwl_priv *priv,
- struct ieee80211_vif *vif,
- enum ieee80211_band band,
- u8 is_active, u8 n_probes,
- struct iwl_scan_channel *scan_ch)
-{
- struct ieee80211_channel *chan;
- const struct ieee80211_supported_band *sband;
- const struct iwl_channel_info *ch_info;
- u16 passive_dwell = 0;
- u16 active_dwell = 0;
- int added, i;
- u16 channel;
-
- sband = iwl_get_hw_mode(priv, band);
- if (!sband)
- return 0;
-
- active_dwell = iwl_legacy_get_active_dwell_time(priv, band, n_probes);
- passive_dwell = iwl_legacy_get_passive_dwell_time(priv, band, vif);
-
- if (passive_dwell <= active_dwell)
- passive_dwell = active_dwell + 1;
-
- for (i = 0, added = 0; i < priv->scan_request->n_channels; i++) {
- chan = priv->scan_request->channels[i];
-
- if (chan->band != band)
- continue;
-
- channel = chan->hw_value;
- scan_ch->channel = cpu_to_le16(channel);
-
- ch_info = iwl_legacy_get_channel_info(priv, band, channel);
- if (!iwl_legacy_is_channel_valid(ch_info)) {
- IWL_DEBUG_SCAN(priv,
- "Channel %d is INVALID for this band.\n",
- channel);
- continue;
- }
-
- if (!is_active || iwl_legacy_is_channel_passive(ch_info) ||
- (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN))
- scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
- else
- scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;
-
- if (n_probes)
- scan_ch->type |= IWL_SCAN_PROBE_MASK(n_probes);
-
- scan_ch->active_dwell = cpu_to_le16(active_dwell);
- scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
-
- /* Set txpower levels to defaults */
- scan_ch->dsp_atten = 110;
-
- /* NOTE: if we were doing 6Mb OFDM for scans we'd use
- * power level:
- * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
- */
- if (band == IEEE80211_BAND_5GHZ)
- scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
- else
- scan_ch->tx_gain = ((1 << 5) | (5 << 3));
-
- IWL_DEBUG_SCAN(priv, "Scanning ch=%d prob=0x%X [%s %d]\n",
- channel, le32_to_cpu(scan_ch->type),
- (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
- "ACTIVE" : "PASSIVE",
- (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
- active_dwell : passive_dwell);
-
- scan_ch++;
- added++;
- }
-
- IWL_DEBUG_SCAN(priv, "total channels to scan %d\n", added);
- return added;
-}
-
-int iwl4965_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
-{
- struct iwl_host_cmd cmd = {
- .id = REPLY_SCAN_CMD,
- .len = sizeof(struct iwl_scan_cmd),
- .flags = CMD_SIZE_HUGE,
- };
- struct iwl_scan_cmd *scan;
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- u32 rate_flags = 0;
- u16 cmd_len;
- u16 rx_chain = 0;
- enum ieee80211_band band;
- u8 n_probes = 0;
- u8 rx_ant = priv->hw_params.valid_rx_ant;
- u8 rate;
- bool is_active = false;
- int chan_mod;
- u8 active_chains;
- u8 scan_tx_antennas = priv->hw_params.valid_tx_ant;
- int ret;
-
- lockdep_assert_held(&priv->mutex);
-
- if (vif)
- ctx = iwl_legacy_rxon_ctx_from_vif(vif);
-
- if (!priv->scan_cmd) {
- priv->scan_cmd = kmalloc(sizeof(struct iwl_scan_cmd) +
- IWL_MAX_SCAN_SIZE, GFP_KERNEL);
- if (!priv->scan_cmd) {
- IWL_DEBUG_SCAN(priv,
- "fail to allocate memory for scan\n");
- return -ENOMEM;
- }
- }
- scan = priv->scan_cmd;
- memset(scan, 0, sizeof(struct iwl_scan_cmd) + IWL_MAX_SCAN_SIZE);
-
- scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
- scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
-
- if (iwl_legacy_is_any_associated(priv)) {
- u16 interval;
- u32 extra;
- u32 suspend_time = 100;
- u32 scan_suspend_time = 100;
-
- IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
- interval = vif->bss_conf.beacon_int;
-
- scan->suspend_time = 0;
- scan->max_out_time = cpu_to_le32(200 * 1024);
- if (!interval)
- interval = suspend_time;
-
- extra = (suspend_time / interval) << 22;
- scan_suspend_time = (extra |
- ((suspend_time % interval) * 1024));
- scan->suspend_time = cpu_to_le32(scan_suspend_time);
- IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n",
- scan_suspend_time, interval);
- }
-
- if (priv->scan_request->n_ssids) {
- int i, p = 0;
- IWL_DEBUG_SCAN(priv, "Kicking off active scan\n");
- for (i = 0; i < priv->scan_request->n_ssids; i++) {
- /* always does wildcard anyway */
- if (!priv->scan_request->ssids[i].ssid_len)
- continue;
- scan->direct_scan[p].id = WLAN_EID_SSID;
- scan->direct_scan[p].len =
- priv->scan_request->ssids[i].ssid_len;
- memcpy(scan->direct_scan[p].ssid,
- priv->scan_request->ssids[i].ssid,
- priv->scan_request->ssids[i].ssid_len);
- n_probes++;
- p++;
- }
- is_active = true;
- } else
- IWL_DEBUG_SCAN(priv, "Start passive scan.\n");
-
- scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
- scan->tx_cmd.sta_id = ctx->bcast_sta_id;
- scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
-
- switch (priv->scan_band) {
- case IEEE80211_BAND_2GHZ:
- scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
- chan_mod = le32_to_cpu(
- priv->contexts[IWL_RXON_CTX_BSS].active.flags &
- RXON_FLG_CHANNEL_MODE_MSK)
- >> RXON_FLG_CHANNEL_MODE_POS;
- if (chan_mod == CHANNEL_MODE_PURE_40) {
- rate = IWL_RATE_6M_PLCP;
- } else {
- rate = IWL_RATE_1M_PLCP;
- rate_flags = RATE_MCS_CCK_MSK;
- }
- break;
- case IEEE80211_BAND_5GHZ:
- rate = IWL_RATE_6M_PLCP;
- break;
- default:
- IWL_WARN(priv, "Invalid scan band\n");
- return -EIO;
- }
-
- /*
- * If active scanning is requested but a certain channel is
- * marked passive, we can do active scanning if we detect
- * transmissions.
- *
- * There is an issue with some firmware versions that triggers
- * a sysassert on a "good CRC threshold" of zero (== disabled),
- * on a radar channel even though this means that we should NOT
- * send probes.
- *
- * The "good CRC threshold" is the number of frames that we
- * need to receive during our dwell time on a channel before
- * sending out probes -- setting this to a huge value will
- * mean we never reach it, but at the same time work around
- * the aforementioned issue. Thus use IWL_GOOD_CRC_TH_NEVER
- * here instead of IWL_GOOD_CRC_TH_DISABLED.
- */
- scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
- IWL_GOOD_CRC_TH_NEVER;
-
- band = priv->scan_band;
-
- if (priv->cfg->scan_rx_antennas[band])
- rx_ant = priv->cfg->scan_rx_antennas[band];
-
- priv->scan_tx_ant[band] = iwl4965_toggle_tx_ant(priv,
- priv->scan_tx_ant[band],
- scan_tx_antennas);
- rate_flags |= iwl4965_ant_idx_to_flags(priv->scan_tx_ant[band]);
- scan->tx_cmd.rate_n_flags = iwl4965_hw_set_rate_n_flags(rate, rate_flags);
-
- /* In power save mode use one chain, otherwise use all chains */
- if (test_bit(STATUS_POWER_PMI, &priv->status)) {
- /* rx_ant has been set to all valid chains previously */
- active_chains = rx_ant &
- ((u8)(priv->chain_noise_data.active_chains));
- if (!active_chains)
- active_chains = rx_ant;
-
- IWL_DEBUG_SCAN(priv, "chain_noise_data.active_chains: %u\n",
- priv->chain_noise_data.active_chains);
-
- rx_ant = iwl4965_first_antenna(active_chains);
- }
-
- /* MIMO is not used here, but value is required */
- rx_chain |= priv->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS;
- rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
- rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS;
- rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
- scan->rx_chain = cpu_to_le16(rx_chain);
-
- cmd_len = iwl_legacy_fill_probe_req(priv,
- (struct ieee80211_mgmt *)scan->data,
- vif->addr,
- priv->scan_request->ie,
- priv->scan_request->ie_len,
- IWL_MAX_SCAN_SIZE - sizeof(*scan));
- scan->tx_cmd.len = cpu_to_le16(cmd_len);
-
- scan->filter_flags |= (RXON_FILTER_ACCEPT_GRP_MSK |
- RXON_FILTER_BCON_AWARE_MSK);
-
- scan->channel_count = iwl4965_get_channels_for_scan(priv, vif, band,
- is_active, n_probes,
- (void *)&scan->data[cmd_len]);
- if (scan->channel_count == 0) {
- IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count);
- return -EIO;
- }
-
- cmd.len += le16_to_cpu(scan->tx_cmd.len) +
- scan->channel_count * sizeof(struct iwl_scan_channel);
- cmd.data = scan;
- scan->len = cpu_to_le16(cmd.len);
-
- set_bit(STATUS_SCAN_HW, &priv->status);
-
- ret = iwl_legacy_send_cmd_sync(priv, &cmd);
- if (ret)
- clear_bit(STATUS_SCAN_HW, &priv->status);
-
- return ret;
-}
-
-int iwl4965_manage_ibss_station(struct iwl_priv *priv,
- struct ieee80211_vif *vif, bool add)
-{
- struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
-
- if (add)
- return iwl4965_add_bssid_station(priv, vif_priv->ctx,
- vif->bss_conf.bssid,
- &vif_priv->ibss_bssid_sta_id);
- return iwl_legacy_remove_station(priv, vif_priv->ibss_bssid_sta_id,
- vif->bss_conf.bssid);
-}
-
-void iwl4965_free_tfds_in_queue(struct iwl_priv *priv,
- int sta_id, int tid, int freed)
-{
- lockdep_assert_held(&priv->sta_lock);
-
- if (priv->stations[sta_id].tid[tid].tfds_in_queue >= freed)
- priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
- else {
- IWL_DEBUG_TX(priv, "free more than tfds_in_queue (%u:%d)\n",
- priv->stations[sta_id].tid[tid].tfds_in_queue,
- freed);
- priv->stations[sta_id].tid[tid].tfds_in_queue = 0;
- }
-}
-
-#define IWL_TX_QUEUE_MSK 0xfffff
-
-static bool iwl4965_is_single_rx_stream(struct iwl_priv *priv)
-{
- return priv->current_ht_config.smps == IEEE80211_SMPS_STATIC ||
- priv->current_ht_config.single_chain_sufficient;
-}
-
-#define IWL_NUM_RX_CHAINS_MULTIPLE 3
-#define IWL_NUM_RX_CHAINS_SINGLE 2
-#define IWL_NUM_IDLE_CHAINS_DUAL 2
-#define IWL_NUM_IDLE_CHAINS_SINGLE 1
-
-/*
- * Determine how many receiver/antenna chains to use.
- *
- * More provides better reception via diversity. Fewer saves power
- * at the expense of throughput, but only when not in powersave to
- * start with.
- *
- * MIMO (dual stream) requires at least 2, but works better with 3.
- * This does not determine *which* chains to use, just how many.
- */
-static int iwl4965_get_active_rx_chain_count(struct iwl_priv *priv)
-{
- /* # of Rx chains to use when expecting MIMO. */
- if (iwl4965_is_single_rx_stream(priv))
- return IWL_NUM_RX_CHAINS_SINGLE;
- else
- return IWL_NUM_RX_CHAINS_MULTIPLE;
-}
-
-/*
- * When we are in power saving mode, unless device support spatial
- * multiplexing power save, use the active count for rx chain count.
- */
-static int
-iwl4965_get_idle_rx_chain_count(struct iwl_priv *priv, int active_cnt)
-{
- /* # Rx chains when idling, depending on SMPS mode */
- switch (priv->current_ht_config.smps) {
- case IEEE80211_SMPS_STATIC:
- case IEEE80211_SMPS_DYNAMIC:
- return IWL_NUM_IDLE_CHAINS_SINGLE;
- case IEEE80211_SMPS_OFF:
- return active_cnt;
- default:
- WARN(1, "invalid SMPS mode %d",
- priv->current_ht_config.smps);
- return active_cnt;
- }
-}
-
-/* up to 4 chains */
-static u8 iwl4965_count_chain_bitmap(u32 chain_bitmap)
-{
- u8 res;
- res = (chain_bitmap & BIT(0)) >> 0;
- res += (chain_bitmap & BIT(1)) >> 1;
- res += (chain_bitmap & BIT(2)) >> 2;
- res += (chain_bitmap & BIT(3)) >> 3;
- return res;
-}
-
-/**
- * iwl4965_set_rxon_chain - Set up Rx chain usage in "staging" RXON image
- *
- * Selects how many and which Rx receivers/antennas/chains to use.
- * This should not be used for scan command ... it puts data in wrong place.
- */
-void iwl4965_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
-{
- bool is_single = iwl4965_is_single_rx_stream(priv);
- bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status);
- u8 idle_rx_cnt, active_rx_cnt, valid_rx_cnt;
- u32 active_chains;
- u16 rx_chain;
-
- /* Tell uCode which antennas are actually connected.
- * Before first association, we assume all antennas are connected.
- * Just after first association, iwl4965_chain_noise_calibration()
- * checks which antennas actually *are* connected. */
- if (priv->chain_noise_data.active_chains)
- active_chains = priv->chain_noise_data.active_chains;
- else
- active_chains = priv->hw_params.valid_rx_ant;
-
- rx_chain = active_chains << RXON_RX_CHAIN_VALID_POS;
-
- /* How many receivers should we use? */
- active_rx_cnt = iwl4965_get_active_rx_chain_count(priv);
- idle_rx_cnt = iwl4965_get_idle_rx_chain_count(priv, active_rx_cnt);
-
-
- /* correct rx chain count according hw settings
- * and chain noise calibration
- */
- valid_rx_cnt = iwl4965_count_chain_bitmap(active_chains);
- if (valid_rx_cnt < active_rx_cnt)
- active_rx_cnt = valid_rx_cnt;
-
- if (valid_rx_cnt < idle_rx_cnt)
- idle_rx_cnt = valid_rx_cnt;
-
- rx_chain |= active_rx_cnt << RXON_RX_CHAIN_MIMO_CNT_POS;
- rx_chain |= idle_rx_cnt << RXON_RX_CHAIN_CNT_POS;
-
- ctx->staging.rx_chain = cpu_to_le16(rx_chain);
-
- if (!is_single && (active_rx_cnt >= IWL_NUM_RX_CHAINS_SINGLE) && is_cam)
- ctx->staging.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK;
- else
- ctx->staging.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK;
-
- IWL_DEBUG_ASSOC(priv, "rx_chain=0x%X active=%d idle=%d\n",
- ctx->staging.rx_chain,
- active_rx_cnt, idle_rx_cnt);
-
- WARN_ON(active_rx_cnt == 0 || idle_rx_cnt == 0 ||
- active_rx_cnt < idle_rx_cnt);
-}
-
-u8 iwl4965_toggle_tx_ant(struct iwl_priv *priv, u8 ant, u8 valid)
-{
- int i;
- u8 ind = ant;
-
- for (i = 0; i < RATE_ANT_NUM - 1; i++) {
- ind = (ind + 1) < RATE_ANT_NUM ? ind + 1 : 0;
- if (valid & BIT(ind))
- return ind;
- }
- return ant;
-}
-
-static const char *iwl4965_get_fh_string(int cmd)
-{
- switch (cmd) {
- IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG);
- IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG);
- IWL_CMD(FH_RSCSR_CHNL0_WPTR);
- IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG);
- IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG);
- IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG);
- IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
- IWL_CMD(FH_TSSR_TX_STATUS_REG);
- IWL_CMD(FH_TSSR_TX_ERROR_REG);
- default:
- return "UNKNOWN";
- }
-}
-
-int iwl4965_dump_fh(struct iwl_priv *priv, char **buf, bool display)
-{
- int i;
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- int pos = 0;
- size_t bufsz = 0;
-#endif
- static const u32 fh_tbl[] = {
- FH_RSCSR_CHNL0_STTS_WPTR_REG,
- FH_RSCSR_CHNL0_RBDCB_BASE_REG,
- FH_RSCSR_CHNL0_WPTR,
- FH_MEM_RCSR_CHNL0_CONFIG_REG,
- FH_MEM_RSSR_SHARED_CTRL_REG,
- FH_MEM_RSSR_RX_STATUS_REG,
- FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
- FH_TSSR_TX_STATUS_REG,
- FH_TSSR_TX_ERROR_REG
- };
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- if (display) {
- bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
- *buf = kmalloc(bufsz, GFP_KERNEL);
- if (!*buf)
- return -ENOMEM;
- pos += scnprintf(*buf + pos, bufsz - pos,
- "FH register values:\n");
- for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
- pos += scnprintf(*buf + pos, bufsz - pos,
- " %34s: 0X%08x\n",
- iwl4965_get_fh_string(fh_tbl[i]),
- iwl_legacy_read_direct32(priv, fh_tbl[i]));
- }
- return pos;
- }
-#endif
- IWL_ERR(priv, "FH register values:\n");
- for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
- IWL_ERR(priv, " %34s: 0X%08x\n",
- iwl4965_get_fh_string(fh_tbl[i]),
- iwl_legacy_read_direct32(priv, fh_tbl[i]));
- }
- return 0;
-}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-rs.c b/drivers/net/wireless/iwlegacy/iwl-4965-rs.c
deleted file mode 100644
index 57ebe214e68..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-rs.c
+++ /dev/null
@@ -1,2871 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/skbuff.h>
-#include <linux/slab.h>
-#include <net/mac80211.h>
-
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/delay.h>
-
-#include <linux/workqueue.h>
-
-#include "iwl-dev.h"
-#include "iwl-sta.h"
-#include "iwl-core.h"
-#include "iwl-4965.h"
-
-#define IWL4965_RS_NAME "iwl-4965-rs"
-
-#define NUM_TRY_BEFORE_ANT_TOGGLE 1
-#define IWL_NUMBER_TRY 1
-#define IWL_HT_NUMBER_TRY 3
-
-#define IWL_RATE_MAX_WINDOW 62 /* # tx in history window */
-#define IWL_RATE_MIN_FAILURE_TH 6 /* min failures to calc tpt */
-#define IWL_RATE_MIN_SUCCESS_TH 8 /* min successes to calc tpt */
-
-/* max allowed rate miss before sync LQ cmd */
-#define IWL_MISSED_RATE_MAX 15
-/* max time to accum history 2 seconds */
-#define IWL_RATE_SCALE_FLUSH_INTVL (3*HZ)
-
-static u8 rs_ht_to_legacy[] = {
- IWL_RATE_6M_INDEX, IWL_RATE_6M_INDEX,
- IWL_RATE_6M_INDEX, IWL_RATE_6M_INDEX,
- IWL_RATE_6M_INDEX,
- IWL_RATE_6M_INDEX, IWL_RATE_9M_INDEX,
- IWL_RATE_12M_INDEX, IWL_RATE_18M_INDEX,
- IWL_RATE_24M_INDEX, IWL_RATE_36M_INDEX,
- IWL_RATE_48M_INDEX, IWL_RATE_54M_INDEX
-};
-
-static const u8 ant_toggle_lookup[] = {
- /*ANT_NONE -> */ ANT_NONE,
- /*ANT_A -> */ ANT_B,
- /*ANT_B -> */ ANT_C,
- /*ANT_AB -> */ ANT_BC,
- /*ANT_C -> */ ANT_A,
- /*ANT_AC -> */ ANT_AB,
- /*ANT_BC -> */ ANT_AC,
- /*ANT_ABC -> */ ANT_ABC,
-};
-
-#define IWL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np) \
- [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \
- IWL_RATE_SISO_##s##M_PLCP, \
- IWL_RATE_MIMO2_##s##M_PLCP,\
- IWL_RATE_##r##M_IEEE, \
- IWL_RATE_##ip##M_INDEX, \
- IWL_RATE_##in##M_INDEX, \
- IWL_RATE_##rp##M_INDEX, \
- IWL_RATE_##rn##M_INDEX, \
- IWL_RATE_##pp##M_INDEX, \
- IWL_RATE_##np##M_INDEX }
-
-/*
- * Parameter order:
- * rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate
- *
- * If there isn't a valid next or previous rate then INV is used which
- * maps to IWL_RATE_INVALID
- *
- */
-const struct iwl_rate_info iwlegacy_rates[IWL_RATE_COUNT] = {
- IWL_DECLARE_RATE_INFO(1, INV, INV, 2, INV, 2, INV, 2), /* 1mbps */
- IWL_DECLARE_RATE_INFO(2, INV, 1, 5, 1, 5, 1, 5), /* 2mbps */
- IWL_DECLARE_RATE_INFO(5, INV, 2, 6, 2, 11, 2, 11), /*5.5mbps */
- IWL_DECLARE_RATE_INFO(11, INV, 9, 12, 9, 12, 5, 18), /* 11mbps */
- IWL_DECLARE_RATE_INFO(6, 6, 5, 9, 5, 11, 5, 11), /* 6mbps */
- IWL_DECLARE_RATE_INFO(9, 6, 6, 11, 6, 11, 5, 11), /* 9mbps */
- IWL_DECLARE_RATE_INFO(12, 12, 11, 18, 11, 18, 11, 18), /* 12mbps */
- IWL_DECLARE_RATE_INFO(18, 18, 12, 24, 12, 24, 11, 24), /* 18mbps */
- IWL_DECLARE_RATE_INFO(24, 24, 18, 36, 18, 36, 18, 36), /* 24mbps */
- IWL_DECLARE_RATE_INFO(36, 36, 24, 48, 24, 48, 24, 48), /* 36mbps */
- IWL_DECLARE_RATE_INFO(48, 48, 36, 54, 36, 54, 36, 54), /* 48mbps */
- IWL_DECLARE_RATE_INFO(54, 54, 48, INV, 48, INV, 48, INV),/* 54mbps */
- IWL_DECLARE_RATE_INFO(60, 60, 48, INV, 48, INV, 48, INV),/* 60mbps */
-};
-
-static int iwl4965_hwrate_to_plcp_idx(u32 rate_n_flags)
-{
- int idx = 0;
-
- /* HT rate format */
- if (rate_n_flags & RATE_MCS_HT_MSK) {
- idx = (rate_n_flags & 0xff);
-
- if (idx >= IWL_RATE_MIMO2_6M_PLCP)
- idx = idx - IWL_RATE_MIMO2_6M_PLCP;
-
- idx += IWL_FIRST_OFDM_RATE;
- /* skip 9M not supported in ht*/
- if (idx >= IWL_RATE_9M_INDEX)
- idx += 1;
- if ((idx >= IWL_FIRST_OFDM_RATE) && (idx <= IWL_LAST_OFDM_RATE))
- return idx;
-
- /* legacy rate format, search for match in table */
- } else {
- for (idx = 0; idx < ARRAY_SIZE(iwlegacy_rates); idx++)
- if (iwlegacy_rates[idx].plcp == (rate_n_flags & 0xFF))
- return idx;
- }
-
- return -1;
-}
-
-static void iwl4965_rs_rate_scale_perform(struct iwl_priv *priv,
- struct sk_buff *skb,
- struct ieee80211_sta *sta,
- struct iwl_lq_sta *lq_sta);
-static void iwl4965_rs_fill_link_cmd(struct iwl_priv *priv,
- struct iwl_lq_sta *lq_sta, u32 rate_n_flags);
-static void iwl4965_rs_stay_in_table(struct iwl_lq_sta *lq_sta,
- bool force_search);
-
-#ifdef CONFIG_MAC80211_DEBUGFS
-static void iwl4965_rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
- u32 *rate_n_flags, int index);
-#else
-static void iwl4965_rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
- u32 *rate_n_flags, int index)
-{}
-#endif
-
-/**
- * The following tables contain the expected throughput metrics for all rates
- *
- * 1, 2, 5.5, 11, 6, 9, 12, 18, 24, 36, 48, 54, 60 MBits
- *
- * where invalid entries are zeros.
- *
- * CCK rates are only valid in legacy table and will only be used in G
- * (2.4 GHz) band.
- */
-
-static s32 expected_tpt_legacy[IWL_RATE_COUNT] = {
- 7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 0
-};
-
-static s32 expected_tpt_siso20MHz[4][IWL_RATE_COUNT] = {
- {0, 0, 0, 0, 42, 0, 76, 102, 124, 158, 183, 193, 202}, /* Norm */
- {0, 0, 0, 0, 46, 0, 82, 110, 132, 167, 192, 202, 210}, /* SGI */
- {0, 0, 0, 0, 48, 0, 93, 135, 176, 251, 319, 351, 381}, /* AGG */
- {0, 0, 0, 0, 53, 0, 102, 149, 193, 275, 348, 381, 413}, /* AGG+SGI */
-};
-
-static s32 expected_tpt_siso40MHz[4][IWL_RATE_COUNT] = {
- {0, 0, 0, 0, 77, 0, 127, 160, 184, 220, 242, 250, 257}, /* Norm */
- {0, 0, 0, 0, 83, 0, 135, 169, 193, 229, 250, 257, 264}, /* SGI */
- {0, 0, 0, 0, 96, 0, 182, 259, 328, 451, 553, 598, 640}, /* AGG */
- {0, 0, 0, 0, 106, 0, 199, 282, 357, 487, 593, 640, 683}, /* AGG+SGI */
-};
-
-static s32 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = {
- {0, 0, 0, 0, 74, 0, 123, 155, 179, 213, 235, 243, 250}, /* Norm */
- {0, 0, 0, 0, 81, 0, 131, 164, 187, 221, 242, 250, 256}, /* SGI */
- {0, 0, 0, 0, 92, 0, 175, 250, 317, 436, 534, 578, 619}, /* AGG */
- {0, 0, 0, 0, 102, 0, 192, 273, 344, 470, 573, 619, 660}, /* AGG+SGI*/
-};
-
-static s32 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = {
- {0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289}, /* Norm */
- {0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293}, /* SGI */
- {0, 0, 0, 0, 180, 0, 327, 446, 545, 708, 828, 878, 922}, /* AGG */
- {0, 0, 0, 0, 197, 0, 355, 481, 584, 752, 872, 922, 966}, /* AGG+SGI */
-};
-
-/* mbps, mcs */
-static const struct iwl_rate_mcs_info iwl_rate_mcs[IWL_RATE_COUNT] = {
- { "1", "BPSK DSSS"},
- { "2", "QPSK DSSS"},
- {"5.5", "BPSK CCK"},
- { "11", "QPSK CCK"},
- { "6", "BPSK 1/2"},
- { "9", "BPSK 1/2"},
- { "12", "QPSK 1/2"},
- { "18", "QPSK 3/4"},
- { "24", "16QAM 1/2"},
- { "36", "16QAM 3/4"},
- { "48", "64QAM 2/3"},
- { "54", "64QAM 3/4"},
- { "60", "64QAM 5/6"},
-};
-
-#define MCS_INDEX_PER_STREAM (8)
-
-static inline u8 iwl4965_rs_extract_rate(u32 rate_n_flags)
-{
- return (u8)(rate_n_flags & 0xFF);
-}
-
-static void
-iwl4965_rs_rate_scale_clear_window(struct iwl_rate_scale_data *window)
-{
- window->data = 0;
- window->success_counter = 0;
- window->success_ratio = IWL_INVALID_VALUE;
- window->counter = 0;
- window->average_tpt = IWL_INVALID_VALUE;
- window->stamp = 0;
-}
-
-static inline u8 iwl4965_rs_is_valid_ant(u8 valid_antenna, u8 ant_type)
-{
- return (ant_type & valid_antenna) == ant_type;
-}
-
-/*
- * removes the old data from the statistics. All data that is older than
- * TID_MAX_TIME_DIFF, will be deleted.
- */
-static void
-iwl4965_rs_tl_rm_old_stats(struct iwl_traffic_load *tl, u32 curr_time)
-{
- /* The oldest age we want to keep */
- u32 oldest_time = curr_time - TID_MAX_TIME_DIFF;
-
- while (tl->queue_count &&
- (tl->time_stamp < oldest_time)) {
- tl->total -= tl->packet_count[tl->head];
- tl->packet_count[tl->head] = 0;
- tl->time_stamp += TID_QUEUE_CELL_SPACING;
- tl->queue_count--;
- tl->head++;
- if (tl->head >= TID_QUEUE_MAX_SIZE)
- tl->head = 0;
- }
-}
-
-/*
- * increment traffic load value for tid and also remove
- * any old values if passed the certain time period
- */
-static u8 iwl4965_rs_tl_add_packet(struct iwl_lq_sta *lq_data,
- struct ieee80211_hdr *hdr)
-{
- u32 curr_time = jiffies_to_msecs(jiffies);
- u32 time_diff;
- s32 index;
- struct iwl_traffic_load *tl = NULL;
- u8 tid;
-
- if (ieee80211_is_data_qos(hdr->frame_control)) {
- u8 *qc = ieee80211_get_qos_ctl(hdr);
- tid = qc[0] & 0xf;
- } else
- return MAX_TID_COUNT;
-
- if (unlikely(tid >= TID_MAX_LOAD_COUNT))
- return MAX_TID_COUNT;
-
- tl = &lq_data->load[tid];
-
- curr_time -= curr_time % TID_ROUND_VALUE;
-
- /* Happens only for the first packet. Initialize the data */
- if (!(tl->queue_count)) {
- tl->total = 1;
- tl->time_stamp = curr_time;
- tl->queue_count = 1;
- tl->head = 0;
- tl->packet_count[0] = 1;
- return MAX_TID_COUNT;
- }
-
- time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
- index = time_diff / TID_QUEUE_CELL_SPACING;
-
- /* The history is too long: remove data that is older than */
- /* TID_MAX_TIME_DIFF */
- if (index >= TID_QUEUE_MAX_SIZE)
- iwl4965_rs_tl_rm_old_stats(tl, curr_time);
-
- index = (tl->head + index) % TID_QUEUE_MAX_SIZE;
- tl->packet_count[index] = tl->packet_count[index] + 1;
- tl->total = tl->total + 1;
-
- if ((index + 1) > tl->queue_count)
- tl->queue_count = index + 1;
-
- return tid;
-}
-
-/*
- get the traffic load value for tid
-*/
-static u32 iwl4965_rs_tl_get_load(struct iwl_lq_sta *lq_data, u8 tid)
-{
- u32 curr_time = jiffies_to_msecs(jiffies);
- u32 time_diff;
- s32 index;
- struct iwl_traffic_load *tl = NULL;
-
- if (tid >= TID_MAX_LOAD_COUNT)
- return 0;
-
- tl = &(lq_data->load[tid]);
-
- curr_time -= curr_time % TID_ROUND_VALUE;
-
- if (!(tl->queue_count))
- return 0;
-
- time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
- index = time_diff / TID_QUEUE_CELL_SPACING;
-
- /* The history is too long: remove data that is older than */
- /* TID_MAX_TIME_DIFF */
- if (index >= TID_QUEUE_MAX_SIZE)
- iwl4965_rs_tl_rm_old_stats(tl, curr_time);
-
- return tl->total;
-}
-
-static int iwl4965_rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
- struct iwl_lq_sta *lq_data, u8 tid,
- struct ieee80211_sta *sta)
-{
- int ret = -EAGAIN;
- u32 load;
-
- load = iwl4965_rs_tl_get_load(lq_data, tid);
-
- if (load > IWL_AGG_LOAD_THRESHOLD) {
- IWL_DEBUG_HT(priv, "Starting Tx agg: STA: %pM tid: %d\n",
- sta->addr, tid);
- ret = ieee80211_start_tx_ba_session(sta, tid, 5000);
- if (ret == -EAGAIN) {
- /*
- * driver and mac80211 is out of sync
- * this might be cause by reloading firmware
- * stop the tx ba session here
- */
- IWL_ERR(priv, "Fail start Tx agg on tid: %d\n",
- tid);
- ieee80211_stop_tx_ba_session(sta, tid);
- }
- } else {
- IWL_ERR(priv, "Aggregation not enabled for tid %d "
- "because load = %u\n", tid, load);
- }
- return ret;
-}
-
-static void iwl4965_rs_tl_turn_on_agg(struct iwl_priv *priv, u8 tid,
- struct iwl_lq_sta *lq_data,
- struct ieee80211_sta *sta)
-{
- if (tid < TID_MAX_LOAD_COUNT)
- iwl4965_rs_tl_turn_on_agg_for_tid(priv, lq_data, tid, sta);
- else
- IWL_ERR(priv, "tid exceeds max load count: %d/%d\n",
- tid, TID_MAX_LOAD_COUNT);
-}
-
-static inline int iwl4965_get_iwl4965_num_of_ant_from_rate(u32 rate_n_flags)
-{
- return !!(rate_n_flags & RATE_MCS_ANT_A_MSK) +
- !!(rate_n_flags & RATE_MCS_ANT_B_MSK) +
- !!(rate_n_flags & RATE_MCS_ANT_C_MSK);
-}
-
-/*
- * Static function to get the expected throughput from an iwl_scale_tbl_info
- * that wraps a NULL pointer check
- */
-static s32
-iwl4965_get_expected_tpt(struct iwl_scale_tbl_info *tbl, int rs_index)
-{
- if (tbl->expected_tpt)
- return tbl->expected_tpt[rs_index];
- return 0;
-}
-
-/**
- * iwl4965_rs_collect_tx_data - Update the success/failure sliding window
- *
- * We keep a sliding window of the last 62 packets transmitted
- * at this rate. window->data contains the bitmask of successful
- * packets.
- */
-static int iwl4965_rs_collect_tx_data(struct iwl_scale_tbl_info *tbl,
- int scale_index, int attempts, int successes)
-{
- struct iwl_rate_scale_data *window = NULL;
- static const u64 mask = (((u64)1) << (IWL_RATE_MAX_WINDOW - 1));
- s32 fail_count, tpt;
-
- if (scale_index < 0 || scale_index >= IWL_RATE_COUNT)
- return -EINVAL;
-
- /* Select window for current tx bit rate */
- window = &(tbl->win[scale_index]);
-
- /* Get expected throughput */
- tpt = iwl4965_get_expected_tpt(tbl, scale_index);
-
- /*
- * Keep track of only the latest 62 tx frame attempts in this rate's
- * history window; anything older isn't really relevant any more.
- * If we have filled up the sliding window, drop the oldest attempt;
- * if the oldest attempt (highest bit in bitmap) shows "success",
- * subtract "1" from the success counter (this is the main reason
- * we keep these bitmaps!).
- */
- while (attempts > 0) {
- if (window->counter >= IWL_RATE_MAX_WINDOW) {
-
- /* remove earliest */
- window->counter = IWL_RATE_MAX_WINDOW - 1;
-
- if (window->data & mask) {
- window->data &= ~mask;
- window->success_counter--;
- }
- }
-
- /* Increment frames-attempted counter */
- window->counter++;
-
- /* Shift bitmap by one frame to throw away oldest history */
- window->data <<= 1;
-
- /* Mark the most recent #successes attempts as successful */
- if (successes > 0) {
- window->success_counter++;
- window->data |= 0x1;
- successes--;
- }
-
- attempts--;
- }
-
- /* Calculate current success ratio, avoid divide-by-0! */
- if (window->counter > 0)
- window->success_ratio = 128 * (100 * window->success_counter)
- / window->counter;
- else
- window->success_ratio = IWL_INVALID_VALUE;
-
- fail_count = window->counter - window->success_counter;
-
- /* Calculate average throughput, if we have enough history. */
- if ((fail_count >= IWL_RATE_MIN_FAILURE_TH) ||
- (window->success_counter >= IWL_RATE_MIN_SUCCESS_TH))
- window->average_tpt = (window->success_ratio * tpt + 64) / 128;
- else
- window->average_tpt = IWL_INVALID_VALUE;
-
- /* Tag this window as having been updated */
- window->stamp = jiffies;
-
- return 0;
-}
-
-/*
- * Fill uCode API rate_n_flags field, based on "search" or "active" table.
- */
-static u32 iwl4965_rate_n_flags_from_tbl(struct iwl_priv *priv,
- struct iwl_scale_tbl_info *tbl,
- int index, u8 use_green)
-{
- u32 rate_n_flags = 0;
-
- if (is_legacy(tbl->lq_type)) {
- rate_n_flags = iwlegacy_rates[index].plcp;
- if (index >= IWL_FIRST_CCK_RATE && index <= IWL_LAST_CCK_RATE)
- rate_n_flags |= RATE_MCS_CCK_MSK;
-
- } else if (is_Ht(tbl->lq_type)) {
- if (index > IWL_LAST_OFDM_RATE) {
- IWL_ERR(priv, "Invalid HT rate index %d\n", index);
- index = IWL_LAST_OFDM_RATE;
- }
- rate_n_flags = RATE_MCS_HT_MSK;
-
- if (is_siso(tbl->lq_type))
- rate_n_flags |= iwlegacy_rates[index].plcp_siso;
- else
- rate_n_flags |= iwlegacy_rates[index].plcp_mimo2;
- } else {
- IWL_ERR(priv, "Invalid tbl->lq_type %d\n", tbl->lq_type);
- }
-
- rate_n_flags |= ((tbl->ant_type << RATE_MCS_ANT_POS) &
- RATE_MCS_ANT_ABC_MSK);
-
- if (is_Ht(tbl->lq_type)) {
- if (tbl->is_ht40) {
- if (tbl->is_dup)
- rate_n_flags |= RATE_MCS_DUP_MSK;
- else
- rate_n_flags |= RATE_MCS_HT40_MSK;
- }
- if (tbl->is_SGI)
- rate_n_flags |= RATE_MCS_SGI_MSK;
-
- if (use_green) {
- rate_n_flags |= RATE_MCS_GF_MSK;
- if (is_siso(tbl->lq_type) && tbl->is_SGI) {
- rate_n_flags &= ~RATE_MCS_SGI_MSK;
- IWL_ERR(priv, "GF was set with SGI:SISO\n");
- }
- }
- }
- return rate_n_flags;
-}
-
-/*
- * Interpret uCode API's rate_n_flags format,
- * fill "search" or "active" tx mode table.
- */
-static int iwl4965_rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
- enum ieee80211_band band,
- struct iwl_scale_tbl_info *tbl,
- int *rate_idx)
-{
- u32 ant_msk = (rate_n_flags & RATE_MCS_ANT_ABC_MSK);
- u8 iwl4965_num_of_ant = iwl4965_get_iwl4965_num_of_ant_from_rate(rate_n_flags);
- u8 mcs;
-
- memset(tbl, 0, sizeof(struct iwl_scale_tbl_info));
- *rate_idx = iwl4965_hwrate_to_plcp_idx(rate_n_flags);
-
- if (*rate_idx == IWL_RATE_INVALID) {
- *rate_idx = -1;
- return -EINVAL;
- }
- tbl->is_SGI = 0; /* default legacy setup */
- tbl->is_ht40 = 0;
- tbl->is_dup = 0;
- tbl->ant_type = (ant_msk >> RATE_MCS_ANT_POS);
- tbl->lq_type = LQ_NONE;
- tbl->max_search = IWL_MAX_SEARCH;
-
- /* legacy rate format */
- if (!(rate_n_flags & RATE_MCS_HT_MSK)) {
- if (iwl4965_num_of_ant == 1) {
- if (band == IEEE80211_BAND_5GHZ)
- tbl->lq_type = LQ_A;
- else
- tbl->lq_type = LQ_G;
- }
- /* HT rate format */
- } else {
- if (rate_n_flags & RATE_MCS_SGI_MSK)
- tbl->is_SGI = 1;
-
- if ((rate_n_flags & RATE_MCS_HT40_MSK) ||
- (rate_n_flags & RATE_MCS_DUP_MSK))
- tbl->is_ht40 = 1;
-
- if (rate_n_flags & RATE_MCS_DUP_MSK)
- tbl->is_dup = 1;
-
- mcs = iwl4965_rs_extract_rate(rate_n_flags);
-
- /* SISO */
- if (mcs <= IWL_RATE_SISO_60M_PLCP) {
- if (iwl4965_num_of_ant == 1)
- tbl->lq_type = LQ_SISO; /*else NONE*/
- /* MIMO2 */
- } else {
- if (iwl4965_num_of_ant == 2)
- tbl->lq_type = LQ_MIMO2;
- }
- }
- return 0;
-}
-
-/* switch to another antenna/antennas and return 1 */
-/* if no other valid antenna found, return 0 */
-static int iwl4965_rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags,
- struct iwl_scale_tbl_info *tbl)
-{
- u8 new_ant_type;
-
- if (!tbl->ant_type || tbl->ant_type > ANT_ABC)
- return 0;
-
- if (!iwl4965_rs_is_valid_ant(valid_ant, tbl->ant_type))
- return 0;
-
- new_ant_type = ant_toggle_lookup[tbl->ant_type];
-
- while ((new_ant_type != tbl->ant_type) &&
- !iwl4965_rs_is_valid_ant(valid_ant, new_ant_type))
- new_ant_type = ant_toggle_lookup[new_ant_type];
-
- if (new_ant_type == tbl->ant_type)
- return 0;
-
- tbl->ant_type = new_ant_type;
- *rate_n_flags &= ~RATE_MCS_ANT_ABC_MSK;
- *rate_n_flags |= new_ant_type << RATE_MCS_ANT_POS;
- return 1;
-}
-
-/**
- * Green-field mode is valid if the station supports it and
- * there are no non-GF stations present in the BSS.
- */
-static bool iwl4965_rs_use_green(struct ieee80211_sta *sta)
-{
- struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
- struct iwl_rxon_context *ctx = sta_priv->common.ctx;
-
- return (sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) &&
- !(ctx->ht.non_gf_sta_present);
-}
-
-/**
- * iwl4965_rs_get_supported_rates - get the available rates
- *
- * if management frame or broadcast frame only return
- * basic available rates.
- *
- */
-static u16 iwl4965_rs_get_supported_rates(struct iwl_lq_sta *lq_sta,
- struct ieee80211_hdr *hdr,
- enum iwl_table_type rate_type)
-{
- if (is_legacy(rate_type)) {
- return lq_sta->active_legacy_rate;
- } else {
- if (is_siso(rate_type))
- return lq_sta->active_siso_rate;
- else
- return lq_sta->active_mimo2_rate;
- }
-}
-
-static u16
-iwl4965_rs_get_adjacent_rate(struct iwl_priv *priv, u8 index, u16 rate_mask,
- int rate_type)
-{
- u8 high = IWL_RATE_INVALID;
- u8 low = IWL_RATE_INVALID;
-
- /* 802.11A or ht walks to the next literal adjacent rate in
- * the rate table */
- if (is_a_band(rate_type) || !is_legacy(rate_type)) {
- int i;
- u32 mask;
-
- /* Find the previous rate that is in the rate mask */
- i = index - 1;
- for (mask = (1 << i); i >= 0; i--, mask >>= 1) {
- if (rate_mask & mask) {
- low = i;
- break;
- }
- }
-
- /* Find the next rate that is in the rate mask */
- i = index + 1;
- for (mask = (1 << i); i < IWL_RATE_COUNT; i++, mask <<= 1) {
- if (rate_mask & mask) {
- high = i;
- break;
- }
- }
-
- return (high << 8) | low;
- }
-
- low = index;
- while (low != IWL_RATE_INVALID) {
- low = iwlegacy_rates[low].prev_rs;
- if (low == IWL_RATE_INVALID)
- break;
- if (rate_mask & (1 << low))
- break;
- IWL_DEBUG_RATE(priv, "Skipping masked lower rate: %d\n", low);
- }
-
- high = index;
- while (high != IWL_RATE_INVALID) {
- high = iwlegacy_rates[high].next_rs;
- if (high == IWL_RATE_INVALID)
- break;
- if (rate_mask & (1 << high))
- break;
- IWL_DEBUG_RATE(priv, "Skipping masked higher rate: %d\n", high);
- }
-
- return (high << 8) | low;
-}
-
-static u32 iwl4965_rs_get_lower_rate(struct iwl_lq_sta *lq_sta,
- struct iwl_scale_tbl_info *tbl,
- u8 scale_index, u8 ht_possible)
-{
- s32 low;
- u16 rate_mask;
- u16 high_low;
- u8 switch_to_legacy = 0;
- u8 is_green = lq_sta->is_green;
- struct iwl_priv *priv = lq_sta->drv;
-
- /* check if we need to switch from HT to legacy rates.
- * assumption is that mandatory rates (1Mbps or 6Mbps)
- * are always supported (spec demand) */
- if (!is_legacy(tbl->lq_type) && (!ht_possible || !scale_index)) {
- switch_to_legacy = 1;
- scale_index = rs_ht_to_legacy[scale_index];
- if (lq_sta->band == IEEE80211_BAND_5GHZ)
- tbl->lq_type = LQ_A;
- else
- tbl->lq_type = LQ_G;
-
- if (iwl4965_num_of_ant(tbl->ant_type) > 1)
- tbl->ant_type =
- iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
-
- tbl->is_ht40 = 0;
- tbl->is_SGI = 0;
- tbl->max_search = IWL_MAX_SEARCH;
- }
-
- rate_mask = iwl4965_rs_get_supported_rates(lq_sta, NULL, tbl->lq_type);
-
- /* Mask with station rate restriction */
- if (is_legacy(tbl->lq_type)) {
- /* supp_rates has no CCK bits in A mode */
- if (lq_sta->band == IEEE80211_BAND_5GHZ)
- rate_mask = (u16)(rate_mask &
- (lq_sta->supp_rates << IWL_FIRST_OFDM_RATE));
- else
- rate_mask = (u16)(rate_mask & lq_sta->supp_rates);
- }
-
- /* If we switched from HT to legacy, check current rate */
- if (switch_to_legacy && (rate_mask & (1 << scale_index))) {
- low = scale_index;
- goto out;
- }
-
- high_low = iwl4965_rs_get_adjacent_rate(lq_sta->drv,
- scale_index, rate_mask,
- tbl->lq_type);
- low = high_low & 0xff;
-
- if (low == IWL_RATE_INVALID)
- low = scale_index;
-
-out:
- return iwl4965_rate_n_flags_from_tbl(lq_sta->drv, tbl, low, is_green);
-}
-
-/*
- * Simple function to compare two rate scale table types
- */
-static bool iwl4965_table_type_matches(struct iwl_scale_tbl_info *a,
- struct iwl_scale_tbl_info *b)
-{
- return (a->lq_type == b->lq_type) && (a->ant_type == b->ant_type) &&
- (a->is_SGI == b->is_SGI);
-}
-
-/*
- * mac80211 sends us Tx status
- */
-static void
-iwl4965_rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
- struct ieee80211_sta *sta, void *priv_sta,
- struct sk_buff *skb)
-{
- int legacy_success;
- int retries;
- int rs_index, mac_index, i;
- struct iwl_lq_sta *lq_sta = priv_sta;
- struct iwl_link_quality_cmd *table;
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- struct iwl_priv *priv = (struct iwl_priv *)priv_r;
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- enum mac80211_rate_control_flags mac_flags;
- u32 tx_rate;
- struct iwl_scale_tbl_info tbl_type;
- struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl;
- struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
- struct iwl_rxon_context *ctx = sta_priv->common.ctx;
-
- IWL_DEBUG_RATE_LIMIT(priv,
- "get frame ack response, update rate scale window\n");
-
- /* Treat uninitialized rate scaling data same as non-existing. */
- if (!lq_sta) {
- IWL_DEBUG_RATE(priv, "Station rate scaling not created yet.\n");
- return;
- } else if (!lq_sta->drv) {
- IWL_DEBUG_RATE(priv, "Rate scaling not initialized yet.\n");
- return;
- }
-
- if (!ieee80211_is_data(hdr->frame_control) ||
- info->flags & IEEE80211_TX_CTL_NO_ACK)
- return;
-
- /* This packet was aggregated but doesn't carry status info */
- if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
- !(info->flags & IEEE80211_TX_STAT_AMPDU))
- return;
-
- /*
- * Ignore this Tx frame response if its initial rate doesn't match
- * that of latest Link Quality command. There may be stragglers
- * from a previous Link Quality command, but we're no longer interested
- * in those; they're either from the "active" mode while we're trying
- * to check "search" mode, or a prior "search" mode after we've moved
- * to a new "search" mode (which might become the new "active" mode).
- */
- table = &lq_sta->lq;
- tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
- iwl4965_rs_get_tbl_info_from_mcs(tx_rate,
- priv->band, &tbl_type, &rs_index);
- if (priv->band == IEEE80211_BAND_5GHZ)
- rs_index -= IWL_FIRST_OFDM_RATE;
- mac_flags = info->status.rates[0].flags;
- mac_index = info->status.rates[0].idx;
- /* For HT packets, map MCS to PLCP */
- if (mac_flags & IEEE80211_TX_RC_MCS) {
- mac_index &= RATE_MCS_CODE_MSK; /* Remove # of streams */
- if (mac_index >= (IWL_RATE_9M_INDEX - IWL_FIRST_OFDM_RATE))
- mac_index++;
- /*
- * mac80211 HT index is always zero-indexed; we need to move
- * HT OFDM rates after CCK rates in 2.4 GHz band
- */
- if (priv->band == IEEE80211_BAND_2GHZ)
- mac_index += IWL_FIRST_OFDM_RATE;
- }
- /* Here we actually compare this rate to the latest LQ command */
- if ((mac_index < 0) ||
- (tbl_type.is_SGI !=
- !!(mac_flags & IEEE80211_TX_RC_SHORT_GI)) ||
- (tbl_type.is_ht40 !=
- !!(mac_flags & IEEE80211_TX_RC_40_MHZ_WIDTH)) ||
- (tbl_type.is_dup !=
- !!(mac_flags & IEEE80211_TX_RC_DUP_DATA)) ||
- (tbl_type.ant_type != info->antenna_sel_tx) ||
- (!!(tx_rate & RATE_MCS_HT_MSK) !=
- !!(mac_flags & IEEE80211_TX_RC_MCS)) ||
- (!!(tx_rate & RATE_MCS_GF_MSK) !=
- !!(mac_flags & IEEE80211_TX_RC_GREEN_FIELD)) ||
- (rs_index != mac_index)) {
- IWL_DEBUG_RATE(priv,
- "initial rate %d does not match %d (0x%x)\n",
- mac_index, rs_index, tx_rate);
- /*
- * Since rates mis-match, the last LQ command may have failed.
- * After IWL_MISSED_RATE_MAX mis-matches, resync the uCode with
- * ... driver.
- */
- lq_sta->missed_rate_counter++;
- if (lq_sta->missed_rate_counter > IWL_MISSED_RATE_MAX) {
- lq_sta->missed_rate_counter = 0;
- iwl_legacy_send_lq_cmd(priv, ctx, &lq_sta->lq,
- CMD_ASYNC, false);
- }
- /* Regardless, ignore this status info for outdated rate */
- return;
- } else
- /* Rate did match, so reset the missed_rate_counter */
- lq_sta->missed_rate_counter = 0;
-
- /* Figure out if rate scale algorithm is in active or search table */
- if (iwl4965_table_type_matches(&tbl_type,
- &(lq_sta->lq_info[lq_sta->active_tbl]))) {
- curr_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
- other_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
- } else if (iwl4965_table_type_matches(&tbl_type,
- &lq_sta->lq_info[1 - lq_sta->active_tbl])) {
- curr_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
- other_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
- } else {
- IWL_DEBUG_RATE(priv,
- "Neither active nor search matches tx rate\n");
- tmp_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
- IWL_DEBUG_RATE(priv, "active- lq:%x, ant:%x, SGI:%d\n",
- tmp_tbl->lq_type, tmp_tbl->ant_type, tmp_tbl->is_SGI);
- tmp_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
- IWL_DEBUG_RATE(priv, "search- lq:%x, ant:%x, SGI:%d\n",
- tmp_tbl->lq_type, tmp_tbl->ant_type, tmp_tbl->is_SGI);
- IWL_DEBUG_RATE(priv, "actual- lq:%x, ant:%x, SGI:%d\n",
- tbl_type.lq_type, tbl_type.ant_type, tbl_type.is_SGI);
- /*
- * no matching table found, let's by-pass the data collection
- * and continue to perform rate scale to find the rate table
- */
- iwl4965_rs_stay_in_table(lq_sta, true);
- goto done;
- }
-
- /*
- * Updating the frame history depends on whether packets were
- * aggregated.
- *
- * For aggregation, all packets were transmitted at the same rate, the
- * first index into rate scale table.
- */
- if (info->flags & IEEE80211_TX_STAT_AMPDU) {
- tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
- iwl4965_rs_get_tbl_info_from_mcs(tx_rate, priv->band, &tbl_type,
- &rs_index);
- iwl4965_rs_collect_tx_data(curr_tbl, rs_index,
- info->status.ampdu_len,
- info->status.ampdu_ack_len);
-
- /* Update success/fail counts if not searching for new mode */
- if (lq_sta->stay_in_tbl) {
- lq_sta->total_success += info->status.ampdu_ack_len;
- lq_sta->total_failed += (info->status.ampdu_len -
- info->status.ampdu_ack_len);
- }
- } else {
- /*
- * For legacy, update frame history with for each Tx retry.
- */
- retries = info->status.rates[0].count - 1;
- /* HW doesn't send more than 15 retries */
- retries = min(retries, 15);
-
- /* The last transmission may have been successful */
- legacy_success = !!(info->flags & IEEE80211_TX_STAT_ACK);
- /* Collect data for each rate used during failed TX attempts */
- for (i = 0; i <= retries; ++i) {
- tx_rate = le32_to_cpu(table->rs_table[i].rate_n_flags);
- iwl4965_rs_get_tbl_info_from_mcs(tx_rate, priv->band,
- &tbl_type, &rs_index);
- /*
- * Only collect stats if retried rate is in the same RS
- * table as active/search.
- */
- if (iwl4965_table_type_matches(&tbl_type, curr_tbl))
- tmp_tbl = curr_tbl;
- else if (iwl4965_table_type_matches(&tbl_type,
- other_tbl))
- tmp_tbl = other_tbl;
- else
- continue;
- iwl4965_rs_collect_tx_data(tmp_tbl, rs_index, 1,
- i < retries ? 0 : legacy_success);
- }
-
- /* Update success/fail counts if not searching for new mode */
- if (lq_sta->stay_in_tbl) {
- lq_sta->total_success += legacy_success;
- lq_sta->total_failed += retries + (1 - legacy_success);
- }
- }
- /* The last TX rate is cached in lq_sta; it's set in if/else above */
- lq_sta->last_rate_n_flags = tx_rate;
-done:
- /* See if there's a better rate or modulation mode to try. */
- if (sta && sta->supp_rates[sband->band])
- iwl4965_rs_rate_scale_perform(priv, skb, sta, lq_sta);
-}
-
-/*
- * Begin a period of staying with a selected modulation mode.
- * Set "stay_in_tbl" flag to prevent any mode switches.
- * Set frame tx success limits according to legacy vs. high-throughput,
- * and reset overall (spanning all rates) tx success history statistics.
- * These control how long we stay using same modulation mode before
- * searching for a new mode.
- */
-static void iwl4965_rs_set_stay_in_table(struct iwl_priv *priv, u8 is_legacy,
- struct iwl_lq_sta *lq_sta)
-{
- IWL_DEBUG_RATE(priv, "we are staying in the same table\n");
- lq_sta->stay_in_tbl = 1; /* only place this gets set */
- if (is_legacy) {
- lq_sta->table_count_limit = IWL_LEGACY_TABLE_COUNT;
- lq_sta->max_failure_limit = IWL_LEGACY_FAILURE_LIMIT;
- lq_sta->max_success_limit = IWL_LEGACY_SUCCESS_LIMIT;
- } else {
- lq_sta->table_count_limit = IWL_NONE_LEGACY_TABLE_COUNT;
- lq_sta->max_failure_limit = IWL_NONE_LEGACY_FAILURE_LIMIT;
- lq_sta->max_success_limit = IWL_NONE_LEGACY_SUCCESS_LIMIT;
- }
- lq_sta->table_count = 0;
- lq_sta->total_failed = 0;
- lq_sta->total_success = 0;
- lq_sta->flush_timer = jiffies;
- lq_sta->action_counter = 0;
-}
-
-/*
- * Find correct throughput table for given mode of modulation
- */
-static void iwl4965_rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
- struct iwl_scale_tbl_info *tbl)
-{
- /* Used to choose among HT tables */
- s32 (*ht_tbl_pointer)[IWL_RATE_COUNT];
-
- /* Check for invalid LQ type */
- if (WARN_ON_ONCE(!is_legacy(tbl->lq_type) && !is_Ht(tbl->lq_type))) {
- tbl->expected_tpt = expected_tpt_legacy;
- return;
- }
-
- /* Legacy rates have only one table */
- if (is_legacy(tbl->lq_type)) {
- tbl->expected_tpt = expected_tpt_legacy;
- return;
- }
-
- /* Choose among many HT tables depending on number of streams
- * (SISO/MIMO2), channel width (20/40), SGI, and aggregation
- * status */
- if (is_siso(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup))
- ht_tbl_pointer = expected_tpt_siso20MHz;
- else if (is_siso(tbl->lq_type))
- ht_tbl_pointer = expected_tpt_siso40MHz;
- else if (is_mimo2(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup))
- ht_tbl_pointer = expected_tpt_mimo2_20MHz;
- else /* if (is_mimo2(tbl->lq_type)) <-- must be true */
- ht_tbl_pointer = expected_tpt_mimo2_40MHz;
-
- if (!tbl->is_SGI && !lq_sta->is_agg) /* Normal */
- tbl->expected_tpt = ht_tbl_pointer[0];
- else if (tbl->is_SGI && !lq_sta->is_agg) /* SGI */
- tbl->expected_tpt = ht_tbl_pointer[1];
- else if (!tbl->is_SGI && lq_sta->is_agg) /* AGG */
- tbl->expected_tpt = ht_tbl_pointer[2];
- else /* AGG+SGI */
- tbl->expected_tpt = ht_tbl_pointer[3];
-}
-
-/*
- * Find starting rate for new "search" high-throughput mode of modulation.
- * Goal is to find lowest expected rate (under perfect conditions) that is
- * above the current measured throughput of "active" mode, to give new mode
- * a fair chance to prove itself without too many challenges.
- *
- * This gets called when transitioning to more aggressive modulation
- * (i.e. legacy to SISO or MIMO, or SISO to MIMO), as well as less aggressive
- * (i.e. MIMO to SISO). When moving to MIMO, bit rate will typically need
- * to decrease to match "active" throughput. When moving from MIMO to SISO,
- * bit rate will typically need to increase, but not if performance was bad.
- */
-static s32 iwl4965_rs_get_best_rate(struct iwl_priv *priv,
- struct iwl_lq_sta *lq_sta,
- struct iwl_scale_tbl_info *tbl, /* "search" */
- u16 rate_mask, s8 index)
-{
- /* "active" values */
- struct iwl_scale_tbl_info *active_tbl =
- &(lq_sta->lq_info[lq_sta->active_tbl]);
- s32 active_sr = active_tbl->win[index].success_ratio;
- s32 active_tpt = active_tbl->expected_tpt[index];
-
- /* expected "search" throughput */
- s32 *tpt_tbl = tbl->expected_tpt;
-
- s32 new_rate, high, low, start_hi;
- u16 high_low;
- s8 rate = index;
-
- new_rate = high = low = start_hi = IWL_RATE_INVALID;
-
- for (; ;) {
- high_low = iwl4965_rs_get_adjacent_rate(priv, rate, rate_mask,
- tbl->lq_type);
-
- low = high_low & 0xff;
- high = (high_low >> 8) & 0xff;
-
- /*
- * Lower the "search" bit rate, to give new "search" mode
- * approximately the same throughput as "active" if:
- *
- * 1) "Active" mode has been working modestly well (but not
- * great), and expected "search" throughput (under perfect
- * conditions) at candidate rate is above the actual
- * measured "active" throughput (but less than expected
- * "active" throughput under perfect conditions).
- * OR
- * 2) "Active" mode has been working perfectly or very well
- * and expected "search" throughput (under perfect
- * conditions) at candidate rate is above expected
- * "active" throughput (under perfect conditions).
- */
- if ((((100 * tpt_tbl[rate]) > lq_sta->last_tpt) &&
- ((active_sr > IWL_RATE_DECREASE_TH) &&
- (active_sr <= IWL_RATE_HIGH_TH) &&
- (tpt_tbl[rate] <= active_tpt))) ||
- ((active_sr >= IWL_RATE_SCALE_SWITCH) &&
- (tpt_tbl[rate] > active_tpt))) {
-
- /* (2nd or later pass)
- * If we've already tried to raise the rate, and are
- * now trying to lower it, use the higher rate. */
- if (start_hi != IWL_RATE_INVALID) {
- new_rate = start_hi;
- break;
- }
-
- new_rate = rate;
-
- /* Loop again with lower rate */
- if (low != IWL_RATE_INVALID)
- rate = low;
-
- /* Lower rate not available, use the original */
- else
- break;
-
- /* Else try to raise the "search" rate to match "active" */
- } else {
- /* (2nd or later pass)
- * If we've already tried to lower the rate, and are
- * now trying to raise it, use the lower rate. */
- if (new_rate != IWL_RATE_INVALID)
- break;
-
- /* Loop again with higher rate */
- else if (high != IWL_RATE_INVALID) {
- start_hi = high;
- rate = high;
-
- /* Higher rate not available, use the original */
- } else {
- new_rate = rate;
- break;
- }
- }
- }
-
- return new_rate;
-}
-
-/*
- * Set up search table for MIMO2
- */
-static int iwl4965_rs_switch_to_mimo2(struct iwl_priv *priv,
- struct iwl_lq_sta *lq_sta,
- struct ieee80211_conf *conf,
- struct ieee80211_sta *sta,
- struct iwl_scale_tbl_info *tbl, int index)
-{
- u16 rate_mask;
- s32 rate;
- s8 is_green = lq_sta->is_green;
- struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
- struct iwl_rxon_context *ctx = sta_priv->common.ctx;
-
- if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
- return -1;
-
- if (((sta->ht_cap.cap & IEEE80211_HT_CAP_SM_PS) >> 2)
- == WLAN_HT_CAP_SM_PS_STATIC)
- return -1;
-
- /* Need both Tx chains/antennas to support MIMO */
- if (priv->hw_params.tx_chains_num < 2)
- return -1;
-
- IWL_DEBUG_RATE(priv, "LQ: try to switch to MIMO2\n");
-
- tbl->lq_type = LQ_MIMO2;
- tbl->is_dup = lq_sta->is_dup;
- tbl->action = 0;
- tbl->max_search = IWL_MAX_SEARCH;
- rate_mask = lq_sta->active_mimo2_rate;
-
- if (iwl_legacy_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
- tbl->is_ht40 = 1;
- else
- tbl->is_ht40 = 0;
-
- iwl4965_rs_set_expected_tpt_table(lq_sta, tbl);
-
- rate = iwl4965_rs_get_best_rate(priv, lq_sta, tbl, rate_mask, index);
-
- IWL_DEBUG_RATE(priv, "LQ: MIMO2 best rate %d mask %X\n",
- rate, rate_mask);
- if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) {
- IWL_DEBUG_RATE(priv,
- "Can't switch with index %d rate mask %x\n",
- rate, rate_mask);
- return -1;
- }
- tbl->current_rate = iwl4965_rate_n_flags_from_tbl(priv,
- tbl, rate, is_green);
-
- IWL_DEBUG_RATE(priv, "LQ: Switch to new mcs %X index is green %X\n",
- tbl->current_rate, is_green);
- return 0;
-}
-
-/*
- * Set up search table for SISO
- */
-static int iwl4965_rs_switch_to_siso(struct iwl_priv *priv,
- struct iwl_lq_sta *lq_sta,
- struct ieee80211_conf *conf,
- struct ieee80211_sta *sta,
- struct iwl_scale_tbl_info *tbl, int index)
-{
- u16 rate_mask;
- u8 is_green = lq_sta->is_green;
- s32 rate;
- struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
- struct iwl_rxon_context *ctx = sta_priv->common.ctx;
-
- if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
- return -1;
-
- IWL_DEBUG_RATE(priv, "LQ: try to switch to SISO\n");
-
- tbl->is_dup = lq_sta->is_dup;
- tbl->lq_type = LQ_SISO;
- tbl->action = 0;
- tbl->max_search = IWL_MAX_SEARCH;
- rate_mask = lq_sta->active_siso_rate;
-
- if (iwl_legacy_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
- tbl->is_ht40 = 1;
- else
- tbl->is_ht40 = 0;
-
- if (is_green)
- tbl->is_SGI = 0; /*11n spec: no SGI in SISO+Greenfield*/
-
- iwl4965_rs_set_expected_tpt_table(lq_sta, tbl);
- rate = iwl4965_rs_get_best_rate(priv, lq_sta, tbl, rate_mask, index);
-
- IWL_DEBUG_RATE(priv, "LQ: get best rate %d mask %X\n", rate, rate_mask);
- if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) {
- IWL_DEBUG_RATE(priv,
- "can not switch with index %d rate mask %x\n",
- rate, rate_mask);
- return -1;
- }
- tbl->current_rate = iwl4965_rate_n_flags_from_tbl(priv,
- tbl, rate, is_green);
- IWL_DEBUG_RATE(priv, "LQ: Switch to new mcs %X index is green %X\n",
- tbl->current_rate, is_green);
- return 0;
-}
-
-/*
- * Try to switch to new modulation mode from legacy
- */
-static int iwl4965_rs_move_legacy_other(struct iwl_priv *priv,
- struct iwl_lq_sta *lq_sta,
- struct ieee80211_conf *conf,
- struct ieee80211_sta *sta,
- int index)
-{
- struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
- struct iwl_scale_tbl_info *search_tbl =
- &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
- struct iwl_rate_scale_data *window = &(tbl->win[index]);
- u32 sz = (sizeof(struct iwl_scale_tbl_info) -
- (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
- u8 start_action;
- u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
- u8 tx_chains_num = priv->hw_params.tx_chains_num;
- int ret = 0;
- u8 update_search_tbl_counter = 0;
-
- tbl->action = IWL_LEGACY_SWITCH_SISO;
-
- start_action = tbl->action;
- for (; ;) {
- lq_sta->action_counter++;
- switch (tbl->action) {
- case IWL_LEGACY_SWITCH_ANTENNA1:
- case IWL_LEGACY_SWITCH_ANTENNA2:
- IWL_DEBUG_RATE(priv, "LQ: Legacy toggle Antenna\n");
-
- if ((tbl->action == IWL_LEGACY_SWITCH_ANTENNA1 &&
- tx_chains_num <= 1) ||
- (tbl->action == IWL_LEGACY_SWITCH_ANTENNA2 &&
- tx_chains_num <= 2))
- break;
-
- /* Don't change antenna if success has been great */
- if (window->success_ratio >= IWL_RS_GOOD_RATIO)
- break;
-
- /* Set up search table to try other antenna */
- memcpy(search_tbl, tbl, sz);
-
- if (iwl4965_rs_toggle_antenna(valid_tx_ant,
- &search_tbl->current_rate, search_tbl)) {
- update_search_tbl_counter = 1;
- iwl4965_rs_set_expected_tpt_table(lq_sta,
- search_tbl);
- goto out;
- }
- break;
- case IWL_LEGACY_SWITCH_SISO:
- IWL_DEBUG_RATE(priv, "LQ: Legacy switch to SISO\n");
-
- /* Set up search table to try SISO */
- memcpy(search_tbl, tbl, sz);
- search_tbl->is_SGI = 0;
- ret = iwl4965_rs_switch_to_siso(priv, lq_sta, conf, sta,
- search_tbl, index);
- if (!ret) {
- lq_sta->action_counter = 0;
- goto out;
- }
-
- break;
- case IWL_LEGACY_SWITCH_MIMO2_AB:
- case IWL_LEGACY_SWITCH_MIMO2_AC:
- case IWL_LEGACY_SWITCH_MIMO2_BC:
- IWL_DEBUG_RATE(priv, "LQ: Legacy switch to MIMO2\n");
-
- /* Set up search table to try MIMO */
- memcpy(search_tbl, tbl, sz);
- search_tbl->is_SGI = 0;
-
- if (tbl->action == IWL_LEGACY_SWITCH_MIMO2_AB)
- search_tbl->ant_type = ANT_AB;
- else if (tbl->action == IWL_LEGACY_SWITCH_MIMO2_AC)
- search_tbl->ant_type = ANT_AC;
- else
- search_tbl->ant_type = ANT_BC;
-
- if (!iwl4965_rs_is_valid_ant(valid_tx_ant,
- search_tbl->ant_type))
- break;
-
- ret = iwl4965_rs_switch_to_mimo2(priv, lq_sta,
- conf, sta,
- search_tbl, index);
- if (!ret) {
- lq_sta->action_counter = 0;
- goto out;
- }
- break;
- }
- tbl->action++;
- if (tbl->action > IWL_LEGACY_SWITCH_MIMO2_BC)
- tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
-
- if (tbl->action == start_action)
- break;
-
- }
- search_tbl->lq_type = LQ_NONE;
- return 0;
-
-out:
- lq_sta->search_better_tbl = 1;
- tbl->action++;
- if (tbl->action > IWL_LEGACY_SWITCH_MIMO2_BC)
- tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
- if (update_search_tbl_counter)
- search_tbl->action = tbl->action;
- return 0;
-
-}
-
-/*
- * Try to switch to new modulation mode from SISO
- */
-static int iwl4965_rs_move_siso_to_other(struct iwl_priv *priv,
- struct iwl_lq_sta *lq_sta,
- struct ieee80211_conf *conf,
- struct ieee80211_sta *sta, int index)
-{
- u8 is_green = lq_sta->is_green;
- struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
- struct iwl_scale_tbl_info *search_tbl =
- &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
- struct iwl_rate_scale_data *window = &(tbl->win[index]);
- struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
- u32 sz = (sizeof(struct iwl_scale_tbl_info) -
- (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
- u8 start_action;
- u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
- u8 tx_chains_num = priv->hw_params.tx_chains_num;
- u8 update_search_tbl_counter = 0;
- int ret;
-
- start_action = tbl->action;
-
- for (;;) {
- lq_sta->action_counter++;
- switch (tbl->action) {
- case IWL_SISO_SWITCH_ANTENNA1:
- case IWL_SISO_SWITCH_ANTENNA2:
- IWL_DEBUG_RATE(priv, "LQ: SISO toggle Antenna\n");
- if ((tbl->action == IWL_SISO_SWITCH_ANTENNA1 &&
- tx_chains_num <= 1) ||
- (tbl->action == IWL_SISO_SWITCH_ANTENNA2 &&
- tx_chains_num <= 2))
- break;
-
- if (window->success_ratio >= IWL_RS_GOOD_RATIO)
- break;
-
- memcpy(search_tbl, tbl, sz);
- if (iwl4965_rs_toggle_antenna(valid_tx_ant,
- &search_tbl->current_rate, search_tbl)) {
- update_search_tbl_counter = 1;
- goto out;
- }
- break;
- case IWL_SISO_SWITCH_MIMO2_AB:
- case IWL_SISO_SWITCH_MIMO2_AC:
- case IWL_SISO_SWITCH_MIMO2_BC:
- IWL_DEBUG_RATE(priv, "LQ: SISO switch to MIMO2\n");
- memcpy(search_tbl, tbl, sz);
- search_tbl->is_SGI = 0;
-
- if (tbl->action == IWL_SISO_SWITCH_MIMO2_AB)
- search_tbl->ant_type = ANT_AB;
- else if (tbl->action == IWL_SISO_SWITCH_MIMO2_AC)
- search_tbl->ant_type = ANT_AC;
- else
- search_tbl->ant_type = ANT_BC;
-
- if (!iwl4965_rs_is_valid_ant(valid_tx_ant,
- search_tbl->ant_type))
- break;
-
- ret = iwl4965_rs_switch_to_mimo2(priv, lq_sta,
- conf, sta,
- search_tbl, index);
- if (!ret)
- goto out;
- break;
- case IWL_SISO_SWITCH_GI:
- if (!tbl->is_ht40 && !(ht_cap->cap &
- IEEE80211_HT_CAP_SGI_20))
- break;
- if (tbl->is_ht40 && !(ht_cap->cap &
- IEEE80211_HT_CAP_SGI_40))
- break;
-
- IWL_DEBUG_RATE(priv, "LQ: SISO toggle SGI/NGI\n");
-
- memcpy(search_tbl, tbl, sz);
- if (is_green) {
- if (!tbl->is_SGI)
- break;
- else
- IWL_ERR(priv,
- "SGI was set in GF+SISO\n");
- }
- search_tbl->is_SGI = !tbl->is_SGI;
- iwl4965_rs_set_expected_tpt_table(lq_sta, search_tbl);
- if (tbl->is_SGI) {
- s32 tpt = lq_sta->last_tpt / 100;
- if (tpt >= search_tbl->expected_tpt[index])
- break;
- }
- search_tbl->current_rate =
- iwl4965_rate_n_flags_from_tbl(priv, search_tbl,
- index, is_green);
- update_search_tbl_counter = 1;
- goto out;
- }
- tbl->action++;
- if (tbl->action > IWL_SISO_SWITCH_GI)
- tbl->action = IWL_SISO_SWITCH_ANTENNA1;
-
- if (tbl->action == start_action)
- break;
- }
- search_tbl->lq_type = LQ_NONE;
- return 0;
-
- out:
- lq_sta->search_better_tbl = 1;
- tbl->action++;
- if (tbl->action > IWL_SISO_SWITCH_GI)
- tbl->action = IWL_SISO_SWITCH_ANTENNA1;
- if (update_search_tbl_counter)
- search_tbl->action = tbl->action;
-
- return 0;
-}
-
-/*
- * Try to switch to new modulation mode from MIMO2
- */
-static int iwl4965_rs_move_mimo2_to_other(struct iwl_priv *priv,
- struct iwl_lq_sta *lq_sta,
- struct ieee80211_conf *conf,
- struct ieee80211_sta *sta, int index)
-{
- s8 is_green = lq_sta->is_green;
- struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
- struct iwl_scale_tbl_info *search_tbl =
- &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
- struct iwl_rate_scale_data *window = &(tbl->win[index]);
- struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
- u32 sz = (sizeof(struct iwl_scale_tbl_info) -
- (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
- u8 start_action;
- u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
- u8 tx_chains_num = priv->hw_params.tx_chains_num;
- u8 update_search_tbl_counter = 0;
- int ret;
-
- start_action = tbl->action;
- for (;;) {
- lq_sta->action_counter++;
- switch (tbl->action) {
- case IWL_MIMO2_SWITCH_ANTENNA1:
- case IWL_MIMO2_SWITCH_ANTENNA2:
- IWL_DEBUG_RATE(priv, "LQ: MIMO2 toggle Antennas\n");
-
- if (tx_chains_num <= 2)
- break;
-
- if (window->success_ratio >= IWL_RS_GOOD_RATIO)
- break;
-
- memcpy(search_tbl, tbl, sz);
- if (iwl4965_rs_toggle_antenna(valid_tx_ant,
- &search_tbl->current_rate, search_tbl)) {
- update_search_tbl_counter = 1;
- goto out;
- }
- break;
- case IWL_MIMO2_SWITCH_SISO_A:
- case IWL_MIMO2_SWITCH_SISO_B:
- case IWL_MIMO2_SWITCH_SISO_C:
- IWL_DEBUG_RATE(priv, "LQ: MIMO2 switch to SISO\n");
-
- /* Set up new search table for SISO */
- memcpy(search_tbl, tbl, sz);
-
- if (tbl->action == IWL_MIMO2_SWITCH_SISO_A)
- search_tbl->ant_type = ANT_A;
- else if (tbl->action == IWL_MIMO2_SWITCH_SISO_B)
- search_tbl->ant_type = ANT_B;
- else
- search_tbl->ant_type = ANT_C;
-
- if (!iwl4965_rs_is_valid_ant(valid_tx_ant,
- search_tbl->ant_type))
- break;
-
- ret = iwl4965_rs_switch_to_siso(priv, lq_sta,
- conf, sta,
- search_tbl, index);
- if (!ret)
- goto out;
-
- break;
-
- case IWL_MIMO2_SWITCH_GI:
- if (!tbl->is_ht40 && !(ht_cap->cap &
- IEEE80211_HT_CAP_SGI_20))
- break;
- if (tbl->is_ht40 && !(ht_cap->cap &
- IEEE80211_HT_CAP_SGI_40))
- break;
-
- IWL_DEBUG_RATE(priv, "LQ: MIMO2 toggle SGI/NGI\n");
-
- /* Set up new search table for MIMO2 */
- memcpy(search_tbl, tbl, sz);
- search_tbl->is_SGI = !tbl->is_SGI;
- iwl4965_rs_set_expected_tpt_table(lq_sta, search_tbl);
- /*
- * If active table already uses the fastest possible
- * modulation (dual stream with short guard interval),
- * and it's working well, there's no need to look
- * for a better type of modulation!
- */
- if (tbl->is_SGI) {
- s32 tpt = lq_sta->last_tpt / 100;
- if (tpt >= search_tbl->expected_tpt[index])
- break;
- }
- search_tbl->current_rate =
- iwl4965_rate_n_flags_from_tbl(priv, search_tbl,
- index, is_green);
- update_search_tbl_counter = 1;
- goto out;
-
- }
- tbl->action++;
- if (tbl->action > IWL_MIMO2_SWITCH_GI)
- tbl->action = IWL_MIMO2_SWITCH_ANTENNA1;
-
- if (tbl->action == start_action)
- break;
- }
- search_tbl->lq_type = LQ_NONE;
- return 0;
- out:
- lq_sta->search_better_tbl = 1;
- tbl->action++;
- if (tbl->action > IWL_MIMO2_SWITCH_GI)
- tbl->action = IWL_MIMO2_SWITCH_ANTENNA1;
- if (update_search_tbl_counter)
- search_tbl->action = tbl->action;
-
- return 0;
-
-}
-
-/*
- * Check whether we should continue using same modulation mode, or
- * begin search for a new mode, based on:
- * 1) # tx successes or failures while using this mode
- * 2) # times calling this function
- * 3) elapsed time in this mode (not used, for now)
- */
-static void
-iwl4965_rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
-{
- struct iwl_scale_tbl_info *tbl;
- int i;
- int active_tbl;
- int flush_interval_passed = 0;
- struct iwl_priv *priv;
-
- priv = lq_sta->drv;
- active_tbl = lq_sta->active_tbl;
-
- tbl = &(lq_sta->lq_info[active_tbl]);
-
- /* If we've been disallowing search, see if we should now allow it */
- if (lq_sta->stay_in_tbl) {
-
- /* Elapsed time using current modulation mode */
- if (lq_sta->flush_timer)
- flush_interval_passed =
- time_after(jiffies,
- (unsigned long)(lq_sta->flush_timer +
- IWL_RATE_SCALE_FLUSH_INTVL));
-
- /*
- * Check if we should allow search for new modulation mode.
- * If many frames have failed or succeeded, or we've used
- * this same modulation for a long time, allow search, and
- * reset history stats that keep track of whether we should
- * allow a new search. Also (below) reset all bitmaps and
- * stats in active history.
- */
- if (force_search ||
- (lq_sta->total_failed > lq_sta->max_failure_limit) ||
- (lq_sta->total_success > lq_sta->max_success_limit) ||
- ((!lq_sta->search_better_tbl) && (lq_sta->flush_timer)
- && (flush_interval_passed))) {
- IWL_DEBUG_RATE(priv, "LQ: stay is expired %d %d %d\n:",
- lq_sta->total_failed,
- lq_sta->total_success,
- flush_interval_passed);
-
- /* Allow search for new mode */
- lq_sta->stay_in_tbl = 0; /* only place reset */
- lq_sta->total_failed = 0;
- lq_sta->total_success = 0;
- lq_sta->flush_timer = 0;
-
- /*
- * Else if we've used this modulation mode enough repetitions
- * (regardless of elapsed time or success/failure), reset
- * history bitmaps and rate-specific stats for all rates in
- * active table.
- */
- } else {
- lq_sta->table_count++;
- if (lq_sta->table_count >=
- lq_sta->table_count_limit) {
- lq_sta->table_count = 0;
-
- IWL_DEBUG_RATE(priv,
- "LQ: stay in table clear win\n");
- for (i = 0; i < IWL_RATE_COUNT; i++)
- iwl4965_rs_rate_scale_clear_window(
- &(tbl->win[i]));
- }
- }
-
- /* If transitioning to allow "search", reset all history
- * bitmaps and stats in active table (this will become the new
- * "search" table). */
- if (!lq_sta->stay_in_tbl) {
- for (i = 0; i < IWL_RATE_COUNT; i++)
- iwl4965_rs_rate_scale_clear_window(
- &(tbl->win[i]));
- }
- }
-}
-
-/*
- * setup rate table in uCode
- * return rate_n_flags as used in the table
- */
-static u32 iwl4965_rs_update_rate_tbl(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- struct iwl_lq_sta *lq_sta,
- struct iwl_scale_tbl_info *tbl,
- int index, u8 is_green)
-{
- u32 rate;
-
- /* Update uCode's rate table. */
- rate = iwl4965_rate_n_flags_from_tbl(priv, tbl, index, is_green);
- iwl4965_rs_fill_link_cmd(priv, lq_sta, rate);
- iwl_legacy_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_ASYNC, false);
-
- return rate;
-}
-
-/*
- * Do rate scaling and search for new modulation mode.
- */
-static void iwl4965_rs_rate_scale_perform(struct iwl_priv *priv,
- struct sk_buff *skb,
- struct ieee80211_sta *sta,
- struct iwl_lq_sta *lq_sta)
-{
- struct ieee80211_hw *hw = priv->hw;
- struct ieee80211_conf *conf = &hw->conf;
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- int low = IWL_RATE_INVALID;
- int high = IWL_RATE_INVALID;
- int index;
- int i;
- struct iwl_rate_scale_data *window = NULL;
- int current_tpt = IWL_INVALID_VALUE;
- int low_tpt = IWL_INVALID_VALUE;
- int high_tpt = IWL_INVALID_VALUE;
- u32 fail_count;
- s8 scale_action = 0;
- u16 rate_mask;
- u8 update_lq = 0;
- struct iwl_scale_tbl_info *tbl, *tbl1;
- u16 rate_scale_index_msk = 0;
- u32 rate;
- u8 is_green = 0;
- u8 active_tbl = 0;
- u8 done_search = 0;
- u16 high_low;
- s32 sr;
- u8 tid = MAX_TID_COUNT;
- struct iwl_tid_data *tid_data;
- struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
- struct iwl_rxon_context *ctx = sta_priv->common.ctx;
-
- IWL_DEBUG_RATE(priv, "rate scale calculate new rate for skb\n");
-
- /* Send management frames and NO_ACK data using lowest rate. */
- /* TODO: this could probably be improved.. */
- if (!ieee80211_is_data(hdr->frame_control) ||
- info->flags & IEEE80211_TX_CTL_NO_ACK)
- return;
-
- if (!sta || !lq_sta)
- return;
-
- lq_sta->supp_rates = sta->supp_rates[lq_sta->band];
-
- tid = iwl4965_rs_tl_add_packet(lq_sta, hdr);
- if ((tid != MAX_TID_COUNT) && (lq_sta->tx_agg_tid_en & (1 << tid))) {
- tid_data = &priv->stations[lq_sta->lq.sta_id].tid[tid];
- if (tid_data->agg.state == IWL_AGG_OFF)
- lq_sta->is_agg = 0;
- else
- lq_sta->is_agg = 1;
- } else
- lq_sta->is_agg = 0;
-
- /*
- * Select rate-scale / modulation-mode table to work with in
- * the rest of this function: "search" if searching for better
- * modulation mode, or "active" if doing rate scaling within a mode.
- */
- if (!lq_sta->search_better_tbl)
- active_tbl = lq_sta->active_tbl;
- else
- active_tbl = 1 - lq_sta->active_tbl;
-
- tbl = &(lq_sta->lq_info[active_tbl]);
- if (is_legacy(tbl->lq_type))
- lq_sta->is_green = 0;
- else
- lq_sta->is_green = iwl4965_rs_use_green(sta);
- is_green = lq_sta->is_green;
-
- /* current tx rate */
- index = lq_sta->last_txrate_idx;
-
- IWL_DEBUG_RATE(priv, "Rate scale index %d for type %d\n", index,
- tbl->lq_type);
-
- /* rates available for this association, and for modulation mode */
- rate_mask = iwl4965_rs_get_supported_rates(lq_sta, hdr, tbl->lq_type);
-
- IWL_DEBUG_RATE(priv, "mask 0x%04X\n", rate_mask);
-
- /* mask with station rate restriction */
- if (is_legacy(tbl->lq_type)) {
- if (lq_sta->band == IEEE80211_BAND_5GHZ)
- /* supp_rates has no CCK bits in A mode */
- rate_scale_index_msk = (u16) (rate_mask &
- (lq_sta->supp_rates << IWL_FIRST_OFDM_RATE));
- else
- rate_scale_index_msk = (u16) (rate_mask &
- lq_sta->supp_rates);
-
- } else
- rate_scale_index_msk = rate_mask;
-
- if (!rate_scale_index_msk)
- rate_scale_index_msk = rate_mask;
-
- if (!((1 << index) & rate_scale_index_msk)) {
- IWL_ERR(priv, "Current Rate is not valid\n");
- if (lq_sta->search_better_tbl) {
- /* revert to active table if search table is not valid*/
- tbl->lq_type = LQ_NONE;
- lq_sta->search_better_tbl = 0;
- tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
- /* get "active" rate info */
- index = iwl4965_hwrate_to_plcp_idx(tbl->current_rate);
- rate = iwl4965_rs_update_rate_tbl(priv, ctx, lq_sta,
- tbl, index, is_green);
- }
- return;
- }
-
- /* Get expected throughput table and history window for current rate */
- if (!tbl->expected_tpt) {
- IWL_ERR(priv, "tbl->expected_tpt is NULL\n");
- return;
- }
-
- /* force user max rate if set by user */
- if ((lq_sta->max_rate_idx != -1) &&
- (lq_sta->max_rate_idx < index)) {
- index = lq_sta->max_rate_idx;
- update_lq = 1;
- window = &(tbl->win[index]);
- goto lq_update;
- }
-
- window = &(tbl->win[index]);
-
- /*
- * If there is not enough history to calculate actual average
- * throughput, keep analyzing results of more tx frames, without
- * changing rate or mode (bypass most of the rest of this function).
- * Set up new rate table in uCode only if old rate is not supported
- * in current association (use new rate found above).
- */
- fail_count = window->counter - window->success_counter;
- if ((fail_count < IWL_RATE_MIN_FAILURE_TH) &&
- (window->success_counter < IWL_RATE_MIN_SUCCESS_TH)) {
- IWL_DEBUG_RATE(priv, "LQ: still below TH. succ=%d total=%d "
- "for index %d\n",
- window->success_counter, window->counter, index);
-
- /* Can't calculate this yet; not enough history */
- window->average_tpt = IWL_INVALID_VALUE;
-
- /* Should we stay with this modulation mode,
- * or search for a new one? */
- iwl4965_rs_stay_in_table(lq_sta, false);
-
- goto out;
- }
- /* Else we have enough samples; calculate estimate of
- * actual average throughput */
- if (window->average_tpt != ((window->success_ratio *
- tbl->expected_tpt[index] + 64) / 128)) {
- IWL_ERR(priv,
- "expected_tpt should have been calculated by now\n");
- window->average_tpt = ((window->success_ratio *
- tbl->expected_tpt[index] + 64) / 128);
- }
-
- /* If we are searching for better modulation mode, check success. */
- if (lq_sta->search_better_tbl) {
- /* If good success, continue using the "search" mode;
- * no need to send new link quality command, since we're
- * continuing to use the setup that we've been trying. */
- if (window->average_tpt > lq_sta->last_tpt) {
-
- IWL_DEBUG_RATE(priv, "LQ: SWITCHING TO NEW TABLE "
- "suc=%d cur-tpt=%d old-tpt=%d\n",
- window->success_ratio,
- window->average_tpt,
- lq_sta->last_tpt);
-
- if (!is_legacy(tbl->lq_type))
- lq_sta->enable_counter = 1;
-
- /* Swap tables; "search" becomes "active" */
- lq_sta->active_tbl = active_tbl;
- current_tpt = window->average_tpt;
-
- /* Else poor success; go back to mode in "active" table */
- } else {
-
- IWL_DEBUG_RATE(priv, "LQ: GOING BACK TO THE OLD TABLE "
- "suc=%d cur-tpt=%d old-tpt=%d\n",
- window->success_ratio,
- window->average_tpt,
- lq_sta->last_tpt);
-
- /* Nullify "search" table */
- tbl->lq_type = LQ_NONE;
-
- /* Revert to "active" table */
- active_tbl = lq_sta->active_tbl;
- tbl = &(lq_sta->lq_info[active_tbl]);
-
- /* Revert to "active" rate and throughput info */
- index = iwl4965_hwrate_to_plcp_idx(tbl->current_rate);
- current_tpt = lq_sta->last_tpt;
-
- /* Need to set up a new rate table in uCode */
- update_lq = 1;
- }
-
- /* Either way, we've made a decision; modulation mode
- * search is done, allow rate adjustment next time. */
- lq_sta->search_better_tbl = 0;
- done_search = 1; /* Don't switch modes below! */
- goto lq_update;
- }
-
- /* (Else) not in search of better modulation mode, try for better
- * starting rate, while staying in this mode. */
- high_low = iwl4965_rs_get_adjacent_rate(priv, index,
- rate_scale_index_msk,
- tbl->lq_type);
- low = high_low & 0xff;
- high = (high_low >> 8) & 0xff;
-
- /* If user set max rate, dont allow higher than user constrain */
- if ((lq_sta->max_rate_idx != -1) &&
- (lq_sta->max_rate_idx < high))
- high = IWL_RATE_INVALID;
-
- sr = window->success_ratio;
-
- /* Collect measured throughputs for current and adjacent rates */
- current_tpt = window->average_tpt;
- if (low != IWL_RATE_INVALID)
- low_tpt = tbl->win[low].average_tpt;
- if (high != IWL_RATE_INVALID)
- high_tpt = tbl->win[high].average_tpt;
-
- scale_action = 0;
-
- /* Too many failures, decrease rate */
- if ((sr <= IWL_RATE_DECREASE_TH) || (current_tpt == 0)) {
- IWL_DEBUG_RATE(priv,
- "decrease rate because of low success_ratio\n");
- scale_action = -1;
-
- /* No throughput measured yet for adjacent rates; try increase. */
- } else if ((low_tpt == IWL_INVALID_VALUE) &&
- (high_tpt == IWL_INVALID_VALUE)) {
-
- if (high != IWL_RATE_INVALID && sr >= IWL_RATE_INCREASE_TH)
- scale_action = 1;
- else if (low != IWL_RATE_INVALID)
- scale_action = 0;
- }
-
- /* Both adjacent throughputs are measured, but neither one has better
- * throughput; we're using the best rate, don't change it! */
- else if ((low_tpt != IWL_INVALID_VALUE) &&
- (high_tpt != IWL_INVALID_VALUE) &&
- (low_tpt < current_tpt) &&
- (high_tpt < current_tpt))
- scale_action = 0;
-
- /* At least one adjacent rate's throughput is measured,
- * and may have better performance. */
- else {
- /* Higher adjacent rate's throughput is measured */
- if (high_tpt != IWL_INVALID_VALUE) {
- /* Higher rate has better throughput */
- if (high_tpt > current_tpt &&
- sr >= IWL_RATE_INCREASE_TH) {
- scale_action = 1;
- } else {
- scale_action = 0;
- }
-
- /* Lower adjacent rate's throughput is measured */
- } else if (low_tpt != IWL_INVALID_VALUE) {
- /* Lower rate has better throughput */
- if (low_tpt > current_tpt) {
- IWL_DEBUG_RATE(priv,
- "decrease rate because of low tpt\n");
- scale_action = -1;
- } else if (sr >= IWL_RATE_INCREASE_TH) {
- scale_action = 1;
- }
- }
- }
-
- /* Sanity check; asked for decrease, but success rate or throughput
- * has been good at old rate. Don't change it. */
- if ((scale_action == -1) && (low != IWL_RATE_INVALID) &&
- ((sr > IWL_RATE_HIGH_TH) ||
- (current_tpt > (100 * tbl->expected_tpt[low]))))
- scale_action = 0;
-
- switch (scale_action) {
- case -1:
- /* Decrease starting rate, update uCode's rate table */
- if (low != IWL_RATE_INVALID) {
- update_lq = 1;
- index = low;
- }
-
- break;
- case 1:
- /* Increase starting rate, update uCode's rate table */
- if (high != IWL_RATE_INVALID) {
- update_lq = 1;
- index = high;
- }
-
- break;
- case 0:
- /* No change */
- default:
- break;
- }
-
- IWL_DEBUG_RATE(priv, "choose rate scale index %d action %d low %d "
- "high %d type %d\n",
- index, scale_action, low, high, tbl->lq_type);
-
-lq_update:
- /* Replace uCode's rate table for the destination station. */
- if (update_lq)
- rate = iwl4965_rs_update_rate_tbl(priv, ctx, lq_sta,
- tbl, index, is_green);
-
- /* Should we stay with this modulation mode,
- * or search for a new one? */
- iwl4965_rs_stay_in_table(lq_sta, false);
-
- /*
- * Search for new modulation mode if we're:
- * 1) Not changing rates right now
- * 2) Not just finishing up a search
- * 3) Allowing a new search
- */
- if (!update_lq && !done_search &&
- !lq_sta->stay_in_tbl && window->counter) {
- /* Save current throughput to compare with "search" throughput*/
- lq_sta->last_tpt = current_tpt;
-
- /* Select a new "search" modulation mode to try.
- * If one is found, set up the new "search" table. */
- if (is_legacy(tbl->lq_type))
- iwl4965_rs_move_legacy_other(priv, lq_sta,
- conf, sta, index);
- else if (is_siso(tbl->lq_type))
- iwl4965_rs_move_siso_to_other(priv, lq_sta,
- conf, sta, index);
- else /* (is_mimo2(tbl->lq_type)) */
- iwl4965_rs_move_mimo2_to_other(priv, lq_sta,
- conf, sta, index);
-
- /* If new "search" mode was selected, set up in uCode table */
- if (lq_sta->search_better_tbl) {
- /* Access the "search" table, clear its history. */
- tbl = &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
- for (i = 0; i < IWL_RATE_COUNT; i++)
- iwl4965_rs_rate_scale_clear_window(
- &(tbl->win[i]));
-
- /* Use new "search" start rate */
- index = iwl4965_hwrate_to_plcp_idx(tbl->current_rate);
-
- IWL_DEBUG_RATE(priv,
- "Switch current mcs: %X index: %d\n",
- tbl->current_rate, index);
- iwl4965_rs_fill_link_cmd(priv, lq_sta,
- tbl->current_rate);
- iwl_legacy_send_lq_cmd(priv, ctx,
- &lq_sta->lq, CMD_ASYNC, false);
- } else
- done_search = 1;
- }
-
- if (done_search && !lq_sta->stay_in_tbl) {
- /* If the "active" (non-search) mode was legacy,
- * and we've tried switching antennas,
- * but we haven't been able to try HT modes (not available),
- * stay with best antenna legacy modulation for a while
- * before next round of mode comparisons. */
- tbl1 = &(lq_sta->lq_info[lq_sta->active_tbl]);
- if (is_legacy(tbl1->lq_type) && !conf_is_ht(conf) &&
- lq_sta->action_counter > tbl1->max_search) {
- IWL_DEBUG_RATE(priv, "LQ: STAY in legacy table\n");
- iwl4965_rs_set_stay_in_table(priv, 1, lq_sta);
- }
-
- /* If we're in an HT mode, and all 3 mode switch actions
- * have been tried and compared, stay in this best modulation
- * mode for a while before next round of mode comparisons. */
- if (lq_sta->enable_counter &&
- (lq_sta->action_counter >= tbl1->max_search)) {
- if ((lq_sta->last_tpt > IWL_AGG_TPT_THREHOLD) &&
- (lq_sta->tx_agg_tid_en & (1 << tid)) &&
- (tid != MAX_TID_COUNT)) {
- tid_data =
- &priv->stations[lq_sta->lq.sta_id].tid[tid];
- if (tid_data->agg.state == IWL_AGG_OFF) {
- IWL_DEBUG_RATE(priv,
- "try to aggregate tid %d\n",
- tid);
- iwl4965_rs_tl_turn_on_agg(priv, tid,
- lq_sta, sta);
- }
- }
- iwl4965_rs_set_stay_in_table(priv, 0, lq_sta);
- }
- }
-
-out:
- tbl->current_rate = iwl4965_rate_n_flags_from_tbl(priv, tbl,
- index, is_green);
- i = index;
- lq_sta->last_txrate_idx = i;
-}
-
-/**
- * iwl4965_rs_initialize_lq - Initialize a station's hardware rate table
- *
- * The uCode's station table contains a table of fallback rates
- * for automatic fallback during transmission.
- *
- * NOTE: This sets up a default set of values. These will be replaced later
- * if the driver's iwl-4965-rs rate scaling algorithm is used, instead of
- * rc80211_simple.
- *
- * NOTE: Run REPLY_ADD_STA command to set up station table entry, before
- * calling this function (which runs REPLY_TX_LINK_QUALITY_CMD,
- * which requires station table entry to exist).
- */
-static void iwl4965_rs_initialize_lq(struct iwl_priv *priv,
- struct ieee80211_conf *conf,
- struct ieee80211_sta *sta,
- struct iwl_lq_sta *lq_sta)
-{
- struct iwl_scale_tbl_info *tbl;
- int rate_idx;
- int i;
- u32 rate;
- u8 use_green = iwl4965_rs_use_green(sta);
- u8 active_tbl = 0;
- u8 valid_tx_ant;
- struct iwl_station_priv *sta_priv;
- struct iwl_rxon_context *ctx;
-
- if (!sta || !lq_sta)
- return;
-
- sta_priv = (void *)sta->drv_priv;
- ctx = sta_priv->common.ctx;
-
- i = lq_sta->last_txrate_idx;
-
- valid_tx_ant = priv->hw_params.valid_tx_ant;
-
- if (!lq_sta->search_better_tbl)
- active_tbl = lq_sta->active_tbl;
- else
- active_tbl = 1 - lq_sta->active_tbl;
-
- tbl = &(lq_sta->lq_info[active_tbl]);
-
- if ((i < 0) || (i >= IWL_RATE_COUNT))
- i = 0;
-
- rate = iwlegacy_rates[i].plcp;
- tbl->ant_type = iwl4965_first_antenna(valid_tx_ant);
- rate |= tbl->ant_type << RATE_MCS_ANT_POS;
-
- if (i >= IWL_FIRST_CCK_RATE && i <= IWL_LAST_CCK_RATE)
- rate |= RATE_MCS_CCK_MSK;
-
- iwl4965_rs_get_tbl_info_from_mcs(rate, priv->band, tbl, &rate_idx);
- if (!iwl4965_rs_is_valid_ant(valid_tx_ant, tbl->ant_type))
- iwl4965_rs_toggle_antenna(valid_tx_ant, &rate, tbl);
-
- rate = iwl4965_rate_n_flags_from_tbl(priv, tbl, rate_idx, use_green);
- tbl->current_rate = rate;
- iwl4965_rs_set_expected_tpt_table(lq_sta, tbl);
- iwl4965_rs_fill_link_cmd(NULL, lq_sta, rate);
- priv->stations[lq_sta->lq.sta_id].lq = &lq_sta->lq;
- iwl_legacy_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_SYNC, true);
-}
-
-static void
-iwl4965_rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta,
- struct ieee80211_tx_rate_control *txrc)
-{
-
- struct sk_buff *skb = txrc->skb;
- struct ieee80211_supported_band *sband = txrc->sband;
- struct iwl_priv *priv __maybe_unused = (struct iwl_priv *)priv_r;
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct iwl_lq_sta *lq_sta = priv_sta;
- int rate_idx;
-
- IWL_DEBUG_RATE_LIMIT(priv, "rate scale calculate new rate for skb\n");
-
- /* Get max rate if user set max rate */
- if (lq_sta) {
- lq_sta->max_rate_idx = txrc->max_rate_idx;
- if ((sband->band == IEEE80211_BAND_5GHZ) &&
- (lq_sta->max_rate_idx != -1))
- lq_sta->max_rate_idx += IWL_FIRST_OFDM_RATE;
- if ((lq_sta->max_rate_idx < 0) ||
- (lq_sta->max_rate_idx >= IWL_RATE_COUNT))
- lq_sta->max_rate_idx = -1;
- }
-
- /* Treat uninitialized rate scaling data same as non-existing. */
- if (lq_sta && !lq_sta->drv) {
- IWL_DEBUG_RATE(priv, "Rate scaling not initialized yet.\n");
- priv_sta = NULL;
- }
-
- /* Send management frames and NO_ACK data using lowest rate. */
- if (rate_control_send_low(sta, priv_sta, txrc))
- return;
-
- if (!lq_sta)
- return;
-
- rate_idx = lq_sta->last_txrate_idx;
-
- if (lq_sta->last_rate_n_flags & RATE_MCS_HT_MSK) {
- rate_idx -= IWL_FIRST_OFDM_RATE;
- /* 6M and 9M shared same MCS index */
- rate_idx = (rate_idx > 0) ? (rate_idx - 1) : 0;
- if (iwl4965_rs_extract_rate(lq_sta->last_rate_n_flags) >=
- IWL_RATE_MIMO2_6M_PLCP)
- rate_idx = rate_idx + MCS_INDEX_PER_STREAM;
- info->control.rates[0].flags = IEEE80211_TX_RC_MCS;
- if (lq_sta->last_rate_n_flags & RATE_MCS_SGI_MSK)
- info->control.rates[0].flags |=
- IEEE80211_TX_RC_SHORT_GI;
- if (lq_sta->last_rate_n_flags & RATE_MCS_DUP_MSK)
- info->control.rates[0].flags |=
- IEEE80211_TX_RC_DUP_DATA;
- if (lq_sta->last_rate_n_flags & RATE_MCS_HT40_MSK)
- info->control.rates[0].flags |=
- IEEE80211_TX_RC_40_MHZ_WIDTH;
- if (lq_sta->last_rate_n_flags & RATE_MCS_GF_MSK)
- info->control.rates[0].flags |=
- IEEE80211_TX_RC_GREEN_FIELD;
- } else {
- /* Check for invalid rates */
- if ((rate_idx < 0) || (rate_idx >= IWL_RATE_COUNT_LEGACY) ||
- ((sband->band == IEEE80211_BAND_5GHZ) &&
- (rate_idx < IWL_FIRST_OFDM_RATE)))
- rate_idx = rate_lowest_index(sband, sta);
- /* On valid 5 GHz rate, adjust index */
- else if (sband->band == IEEE80211_BAND_5GHZ)
- rate_idx -= IWL_FIRST_OFDM_RATE;
- info->control.rates[0].flags = 0;
- }
- info->control.rates[0].idx = rate_idx;
-
-}
-
-static void *iwl4965_rs_alloc_sta(void *priv_rate, struct ieee80211_sta *sta,
- gfp_t gfp)
-{
- struct iwl_lq_sta *lq_sta;
- struct iwl_station_priv *sta_priv =
- (struct iwl_station_priv *) sta->drv_priv;
- struct iwl_priv *priv;
-
- priv = (struct iwl_priv *)priv_rate;
- IWL_DEBUG_RATE(priv, "create station rate scale window\n");
-
- lq_sta = &sta_priv->lq_sta;
-
- return lq_sta;
-}
-
-/*
- * Called after adding a new station to initialize rate scaling
- */
-void
-iwl4965_rs_rate_init(struct iwl_priv *priv,
- struct ieee80211_sta *sta,
- u8 sta_id)
-{
- int i, j;
- struct ieee80211_hw *hw = priv->hw;
- struct ieee80211_conf *conf = &priv->hw->conf;
- struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
- struct iwl_station_priv *sta_priv;
- struct iwl_lq_sta *lq_sta;
- struct ieee80211_supported_band *sband;
-
- sta_priv = (struct iwl_station_priv *) sta->drv_priv;
- lq_sta = &sta_priv->lq_sta;
- sband = hw->wiphy->bands[conf->channel->band];
-
-
- lq_sta->lq.sta_id = sta_id;
-
- for (j = 0; j < LQ_SIZE; j++)
- for (i = 0; i < IWL_RATE_COUNT; i++)
- iwl4965_rs_rate_scale_clear_window(
- &lq_sta->lq_info[j].win[i]);
-
- lq_sta->flush_timer = 0;
- lq_sta->supp_rates = sta->supp_rates[sband->band];
- for (j = 0; j < LQ_SIZE; j++)
- for (i = 0; i < IWL_RATE_COUNT; i++)
- iwl4965_rs_rate_scale_clear_window(
- &lq_sta->lq_info[j].win[i]);
-
- IWL_DEBUG_RATE(priv, "LQ:"
- "*** rate scale station global init for station %d ***\n",
- sta_id);
- /* TODO: what is a good starting rate for STA? About middle? Maybe not
- * the lowest or the highest rate.. Could consider using RSSI from
- * previous packets? Need to have IEEE 802.1X auth succeed immediately
- * after assoc.. */
-
- lq_sta->is_dup = 0;
- lq_sta->max_rate_idx = -1;
- lq_sta->missed_rate_counter = IWL_MISSED_RATE_MAX;
- lq_sta->is_green = iwl4965_rs_use_green(sta);
- lq_sta->active_legacy_rate = priv->active_rate & ~(0x1000);
- lq_sta->band = priv->band;
- /*
- * active_siso_rate mask includes 9 MBits (bit 5), and CCK (bits 0-3),
- * supp_rates[] does not; shift to convert format, force 9 MBits off.
- */
- lq_sta->active_siso_rate = ht_cap->mcs.rx_mask[0] << 1;
- lq_sta->active_siso_rate |= ht_cap->mcs.rx_mask[0] & 0x1;
- lq_sta->active_siso_rate &= ~((u16)0x2);
- lq_sta->active_siso_rate <<= IWL_FIRST_OFDM_RATE;
-
- /* Same here */
- lq_sta->active_mimo2_rate = ht_cap->mcs.rx_mask[1] << 1;
- lq_sta->active_mimo2_rate |= ht_cap->mcs.rx_mask[1] & 0x1;
- lq_sta->active_mimo2_rate &= ~((u16)0x2);
- lq_sta->active_mimo2_rate <<= IWL_FIRST_OFDM_RATE;
-
- /* These values will be overridden later */
- lq_sta->lq.general_params.single_stream_ant_msk =
- iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
- lq_sta->lq.general_params.dual_stream_ant_msk =
- priv->hw_params.valid_tx_ant &
- ~iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
- if (!lq_sta->lq.general_params.dual_stream_ant_msk) {
- lq_sta->lq.general_params.dual_stream_ant_msk = ANT_AB;
- } else if (iwl4965_num_of_ant(priv->hw_params.valid_tx_ant) == 2) {
- lq_sta->lq.general_params.dual_stream_ant_msk =
- priv->hw_params.valid_tx_ant;
- }
-
- /* as default allow aggregation for all tids */
- lq_sta->tx_agg_tid_en = IWL_AGG_ALL_TID;
- lq_sta->drv = priv;
-
- /* Set last_txrate_idx to lowest rate */
- lq_sta->last_txrate_idx = rate_lowest_index(sband, sta);
- if (sband->band == IEEE80211_BAND_5GHZ)
- lq_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
- lq_sta->is_agg = 0;
-
-#ifdef CONFIG_MAC80211_DEBUGFS
- lq_sta->dbg_fixed_rate = 0;
-#endif
-
- iwl4965_rs_initialize_lq(priv, conf, sta, lq_sta);
-}
-
-static void iwl4965_rs_fill_link_cmd(struct iwl_priv *priv,
- struct iwl_lq_sta *lq_sta, u32 new_rate)
-{
- struct iwl_scale_tbl_info tbl_type;
- int index = 0;
- int rate_idx;
- int repeat_rate = 0;
- u8 ant_toggle_cnt = 0;
- u8 use_ht_possible = 1;
- u8 valid_tx_ant = 0;
- struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
-
- /* Override starting rate (index 0) if needed for debug purposes */
- iwl4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
-
- /* Interpret new_rate (rate_n_flags) */
- iwl4965_rs_get_tbl_info_from_mcs(new_rate, lq_sta->band,
- &tbl_type, &rate_idx);
-
- /* How many times should we repeat the initial rate? */
- if (is_legacy(tbl_type.lq_type)) {
- ant_toggle_cnt = 1;
- repeat_rate = IWL_NUMBER_TRY;
- } else {
- repeat_rate = IWL_HT_NUMBER_TRY;
- }
-
- lq_cmd->general_params.mimo_delimiter =
- is_mimo(tbl_type.lq_type) ? 1 : 0;
-
- /* Fill 1st table entry (index 0) */
- lq_cmd->rs_table[index].rate_n_flags = cpu_to_le32(new_rate);
-
- if (iwl4965_num_of_ant(tbl_type.ant_type) == 1) {
- lq_cmd->general_params.single_stream_ant_msk =
- tbl_type.ant_type;
- } else if (iwl4965_num_of_ant(tbl_type.ant_type) == 2) {
- lq_cmd->general_params.dual_stream_ant_msk =
- tbl_type.ant_type;
- } /* otherwise we don't modify the existing value */
-
- index++;
- repeat_rate--;
- if (priv)
- valid_tx_ant = priv->hw_params.valid_tx_ant;
-
- /* Fill rest of rate table */
- while (index < LINK_QUAL_MAX_RETRY_NUM) {
- /* Repeat initial/next rate.
- * For legacy IWL_NUMBER_TRY == 1, this loop will not execute.
- * For HT IWL_HT_NUMBER_TRY == 3, this executes twice. */
- while (repeat_rate > 0 && (index < LINK_QUAL_MAX_RETRY_NUM)) {
- if (is_legacy(tbl_type.lq_type)) {
- if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
- ant_toggle_cnt++;
- else if (priv &&
- iwl4965_rs_toggle_antenna(valid_tx_ant,
- &new_rate, &tbl_type))
- ant_toggle_cnt = 1;
- }
-
- /* Override next rate if needed for debug purposes */
- iwl4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
-
- /* Fill next table entry */
- lq_cmd->rs_table[index].rate_n_flags =
- cpu_to_le32(new_rate);
- repeat_rate--;
- index++;
- }
-
- iwl4965_rs_get_tbl_info_from_mcs(new_rate,
- lq_sta->band, &tbl_type,
- &rate_idx);
-
- /* Indicate to uCode which entries might be MIMO.
- * If initial rate was MIMO, this will finally end up
- * as (IWL_HT_NUMBER_TRY * 2), after 2nd pass, otherwise 0. */
- if (is_mimo(tbl_type.lq_type))
- lq_cmd->general_params.mimo_delimiter = index;
-
- /* Get next rate */
- new_rate = iwl4965_rs_get_lower_rate(lq_sta,
- &tbl_type, rate_idx,
- use_ht_possible);
-
- /* How many times should we repeat the next rate? */
- if (is_legacy(tbl_type.lq_type)) {
- if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
- ant_toggle_cnt++;
- else if (priv &&
- iwl4965_rs_toggle_antenna(valid_tx_ant,
- &new_rate, &tbl_type))
- ant_toggle_cnt = 1;
-
- repeat_rate = IWL_NUMBER_TRY;
- } else {
- repeat_rate = IWL_HT_NUMBER_TRY;
- }
-
- /* Don't allow HT rates after next pass.
- * iwl4965_rs_get_lower_rate() will change type to LQ_A or LQ_G. */
- use_ht_possible = 0;
-
- /* Override next rate if needed for debug purposes */
- iwl4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
-
- /* Fill next table entry */
- lq_cmd->rs_table[index].rate_n_flags = cpu_to_le32(new_rate);
-
- index++;
- repeat_rate--;
- }
-
- lq_cmd->agg_params.agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
- lq_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
-
- lq_cmd->agg_params.agg_time_limit =
- cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
-}
-
-static void
-*iwl4965_rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
-{
- return hw->priv;
-}
-/* rate scale requires free function to be implemented */
-static void iwl4965_rs_free(void *priv_rate)
-{
- return;
-}
-
-static void iwl4965_rs_free_sta(void *priv_r, struct ieee80211_sta *sta,
- void *priv_sta)
-{
- struct iwl_priv *priv __maybe_unused = priv_r;
-
- IWL_DEBUG_RATE(priv, "enter\n");
- IWL_DEBUG_RATE(priv, "leave\n");
-}
-
-
-#ifdef CONFIG_MAC80211_DEBUGFS
-static int iwl4965_open_file_generic(struct inode *inode, struct file *file)
-{
- file->private_data = inode->i_private;
- return 0;
-}
-static void iwl4965_rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
- u32 *rate_n_flags, int index)
-{
- struct iwl_priv *priv;
- u8 valid_tx_ant;
- u8 ant_sel_tx;
-
- priv = lq_sta->drv;
- valid_tx_ant = priv->hw_params.valid_tx_ant;
- if (lq_sta->dbg_fixed_rate) {
- ant_sel_tx =
- ((lq_sta->dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK)
- >> RATE_MCS_ANT_POS);
- if ((valid_tx_ant & ant_sel_tx) == ant_sel_tx) {
- *rate_n_flags = lq_sta->dbg_fixed_rate;
- IWL_DEBUG_RATE(priv, "Fixed rate ON\n");
- } else {
- lq_sta->dbg_fixed_rate = 0;
- IWL_ERR(priv,
- "Invalid antenna selection 0x%X, Valid is 0x%X\n",
- ant_sel_tx, valid_tx_ant);
- IWL_DEBUG_RATE(priv, "Fixed rate OFF\n");
- }
- } else {
- IWL_DEBUG_RATE(priv, "Fixed rate OFF\n");
- }
-}
-
-static ssize_t iwl4965_rs_sta_dbgfs_scale_table_write(struct file *file,
- const char __user *user_buf, size_t count, loff_t *ppos)
-{
- struct iwl_lq_sta *lq_sta = file->private_data;
- struct iwl_priv *priv;
- char buf[64];
- size_t buf_size;
- u32 parsed_rate;
- struct iwl_station_priv *sta_priv =
- container_of(lq_sta, struct iwl_station_priv, lq_sta);
- struct iwl_rxon_context *ctx = sta_priv->common.ctx;
-
- priv = lq_sta->drv;
- memset(buf, 0, sizeof(buf));
- buf_size = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
-
- if (sscanf(buf, "%x", &parsed_rate) == 1)
- lq_sta->dbg_fixed_rate = parsed_rate;
- else
- lq_sta->dbg_fixed_rate = 0;
-
- lq_sta->active_legacy_rate = 0x0FFF; /* 1 - 54 MBits, includes CCK */
- lq_sta->active_siso_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
- lq_sta->active_mimo2_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
-
- IWL_DEBUG_RATE(priv, "sta_id %d rate 0x%X\n",
- lq_sta->lq.sta_id, lq_sta->dbg_fixed_rate);
-
- if (lq_sta->dbg_fixed_rate) {
- iwl4965_rs_fill_link_cmd(NULL, lq_sta, lq_sta->dbg_fixed_rate);
- iwl_legacy_send_lq_cmd(lq_sta->drv, ctx, &lq_sta->lq, CMD_ASYNC,
- false);
- }
-
- return count;
-}
-
-static ssize_t iwl4965_rs_sta_dbgfs_scale_table_read(struct file *file,
- char __user *user_buf, size_t count, loff_t *ppos)
-{
- char *buff;
- int desc = 0;
- int i = 0;
- int index = 0;
- ssize_t ret;
-
- struct iwl_lq_sta *lq_sta = file->private_data;
- struct iwl_priv *priv;
- struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
-
- priv = lq_sta->drv;
- buff = kmalloc(1024, GFP_KERNEL);
- if (!buff)
- return -ENOMEM;
-
- desc += sprintf(buff+desc, "sta_id %d\n", lq_sta->lq.sta_id);
- desc += sprintf(buff+desc, "failed=%d success=%d rate=0%X\n",
- lq_sta->total_failed, lq_sta->total_success,
- lq_sta->active_legacy_rate);
- desc += sprintf(buff+desc, "fixed rate 0x%X\n",
- lq_sta->dbg_fixed_rate);
- desc += sprintf(buff+desc, "valid_tx_ant %s%s%s\n",
- (priv->hw_params.valid_tx_ant & ANT_A) ? "ANT_A," : "",
- (priv->hw_params.valid_tx_ant & ANT_B) ? "ANT_B," : "",
- (priv->hw_params.valid_tx_ant & ANT_C) ? "ANT_C" : "");
- desc += sprintf(buff+desc, "lq type %s\n",
- (is_legacy(tbl->lq_type)) ? "legacy" : "HT");
- if (is_Ht(tbl->lq_type)) {
- desc += sprintf(buff+desc, " %s",
- (is_siso(tbl->lq_type)) ? "SISO" : "MIMO2");
- desc += sprintf(buff+desc, " %s",
- (tbl->is_ht40) ? "40MHz" : "20MHz");
- desc += sprintf(buff+desc, " %s %s %s\n",
- (tbl->is_SGI) ? "SGI" : "",
- (lq_sta->is_green) ? "GF enabled" : "",
- (lq_sta->is_agg) ? "AGG on" : "");
- }
- desc += sprintf(buff+desc, "last tx rate=0x%X\n",
- lq_sta->last_rate_n_flags);
- desc += sprintf(buff+desc, "general:"
- "flags=0x%X mimo-d=%d s-ant0x%x d-ant=0x%x\n",
- lq_sta->lq.general_params.flags,
- lq_sta->lq.general_params.mimo_delimiter,
- lq_sta->lq.general_params.single_stream_ant_msk,
- lq_sta->lq.general_params.dual_stream_ant_msk);
-
- desc += sprintf(buff+desc, "agg:"
- "time_limit=%d dist_start_th=%d frame_cnt_limit=%d\n",
- le16_to_cpu(lq_sta->lq.agg_params.agg_time_limit),
- lq_sta->lq.agg_params.agg_dis_start_th,
- lq_sta->lq.agg_params.agg_frame_cnt_limit);
-
- desc += sprintf(buff+desc,
- "Start idx [0]=0x%x [1]=0x%x [2]=0x%x [3]=0x%x\n",
- lq_sta->lq.general_params.start_rate_index[0],
- lq_sta->lq.general_params.start_rate_index[1],
- lq_sta->lq.general_params.start_rate_index[2],
- lq_sta->lq.general_params.start_rate_index[3]);
-
- for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
- index = iwl4965_hwrate_to_plcp_idx(
- le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags));
- if (is_legacy(tbl->lq_type)) {
- desc += sprintf(buff+desc, " rate[%d] 0x%X %smbps\n",
- i,
- le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags),
- iwl_rate_mcs[index].mbps);
- } else {
- desc += sprintf(buff+desc,
- " rate[%d] 0x%X %smbps (%s)\n",
- i,
- le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags),
- iwl_rate_mcs[index].mbps, iwl_rate_mcs[index].mcs);
- }
- }
-
- ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
- kfree(buff);
- return ret;
-}
-
-static const struct file_operations rs_sta_dbgfs_scale_table_ops = {
- .write = iwl4965_rs_sta_dbgfs_scale_table_write,
- .read = iwl4965_rs_sta_dbgfs_scale_table_read,
- .open = iwl4965_open_file_generic,
- .llseek = default_llseek,
-};
-static ssize_t iwl4965_rs_sta_dbgfs_stats_table_read(struct file *file,
- char __user *user_buf, size_t count, loff_t *ppos)
-{
- char *buff;
- int desc = 0;
- int i, j;
- ssize_t ret;
-
- struct iwl_lq_sta *lq_sta = file->private_data;
-
- buff = kmalloc(1024, GFP_KERNEL);
- if (!buff)
- return -ENOMEM;
-
- for (i = 0; i < LQ_SIZE; i++) {
- desc += sprintf(buff+desc,
- "%s type=%d SGI=%d HT40=%d DUP=%d GF=%d\n"
- "rate=0x%X\n",
- lq_sta->active_tbl == i ? "*" : "x",
- lq_sta->lq_info[i].lq_type,
- lq_sta->lq_info[i].is_SGI,
- lq_sta->lq_info[i].is_ht40,
- lq_sta->lq_info[i].is_dup,
- lq_sta->is_green,
- lq_sta->lq_info[i].current_rate);
- for (j = 0; j < IWL_RATE_COUNT; j++) {
- desc += sprintf(buff+desc,
- "counter=%d success=%d %%=%d\n",
- lq_sta->lq_info[i].win[j].counter,
- lq_sta->lq_info[i].win[j].success_counter,
- lq_sta->lq_info[i].win[j].success_ratio);
- }
- }
- ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
- kfree(buff);
- return ret;
-}
-
-static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
- .read = iwl4965_rs_sta_dbgfs_stats_table_read,
- .open = iwl4965_open_file_generic,
- .llseek = default_llseek,
-};
-
-static ssize_t iwl4965_rs_sta_dbgfs_rate_scale_data_read(struct file *file,
- char __user *user_buf, size_t count, loff_t *ppos)
-{
- char buff[120];
- int desc = 0;
- ssize_t ret;
-
- struct iwl_lq_sta *lq_sta = file->private_data;
- struct iwl_priv *priv;
- struct iwl_scale_tbl_info *tbl = &lq_sta->lq_info[lq_sta->active_tbl];
-
- priv = lq_sta->drv;
-
- if (is_Ht(tbl->lq_type))
- desc += sprintf(buff+desc,
- "Bit Rate= %d Mb/s\n",
- tbl->expected_tpt[lq_sta->last_txrate_idx]);
- else
- desc += sprintf(buff+desc,
- "Bit Rate= %d Mb/s\n",
- iwlegacy_rates[lq_sta->last_txrate_idx].ieee >> 1);
-
- ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
- return ret;
-}
-
-static const struct file_operations rs_sta_dbgfs_rate_scale_data_ops = {
- .read = iwl4965_rs_sta_dbgfs_rate_scale_data_read,
- .open = iwl4965_open_file_generic,
- .llseek = default_llseek,
-};
-
-static void iwl4965_rs_add_debugfs(void *priv, void *priv_sta,
- struct dentry *dir)
-{
- struct iwl_lq_sta *lq_sta = priv_sta;
- lq_sta->rs_sta_dbgfs_scale_table_file =
- debugfs_create_file("rate_scale_table", S_IRUSR | S_IWUSR, dir,
- lq_sta, &rs_sta_dbgfs_scale_table_ops);
- lq_sta->rs_sta_dbgfs_stats_table_file =
- debugfs_create_file("rate_stats_table", S_IRUSR, dir,
- lq_sta, &rs_sta_dbgfs_stats_table_ops);
- lq_sta->rs_sta_dbgfs_rate_scale_data_file =
- debugfs_create_file("rate_scale_data", S_IRUSR, dir,
- lq_sta, &rs_sta_dbgfs_rate_scale_data_ops);
- lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file =
- debugfs_create_u8("tx_agg_tid_enable", S_IRUSR | S_IWUSR, dir,
- &lq_sta->tx_agg_tid_en);
-
-}
-
-static void iwl4965_rs_remove_debugfs(void *priv, void *priv_sta)
-{
- struct iwl_lq_sta *lq_sta = priv_sta;
- debugfs_remove(lq_sta->rs_sta_dbgfs_scale_table_file);
- debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
- debugfs_remove(lq_sta->rs_sta_dbgfs_rate_scale_data_file);
- debugfs_remove(lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file);
-}
-#endif
-
-/*
- * Initialization of rate scaling information is done by driver after
- * the station is added. Since mac80211 calls this function before a
- * station is added we ignore it.
- */
-static void
-iwl4965_rs_rate_init_stub(void *priv_r, struct ieee80211_supported_band *sband,
- struct ieee80211_sta *sta, void *priv_sta)
-{
-}
-static struct rate_control_ops rs_4965_ops = {
- .module = NULL,
- .name = IWL4965_RS_NAME,
- .tx_status = iwl4965_rs_tx_status,
- .get_rate = iwl4965_rs_get_rate,
- .rate_init = iwl4965_rs_rate_init_stub,
- .alloc = iwl4965_rs_alloc,
- .free = iwl4965_rs_free,
- .alloc_sta = iwl4965_rs_alloc_sta,
- .free_sta = iwl4965_rs_free_sta,
-#ifdef CONFIG_MAC80211_DEBUGFS
- .add_sta_debugfs = iwl4965_rs_add_debugfs,
- .remove_sta_debugfs = iwl4965_rs_remove_debugfs,
-#endif
-};
-
-int iwl4965_rate_control_register(void)
-{
- return ieee80211_rate_control_register(&rs_4965_ops);
-}
-
-void iwl4965_rate_control_unregister(void)
-{
- ieee80211_rate_control_unregister(&rs_4965_ops);
-}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-rx.c b/drivers/net/wireless/iwlegacy/iwl-4965-rx.c
deleted file mode 100644
index 2b144bbfc3c..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-rx.c
+++ /dev/null
@@ -1,215 +0,0 @@
-/******************************************************************************
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/sched.h>
-
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-4965-calib.h"
-#include "iwl-sta.h"
-#include "iwl-io.h"
-#include "iwl-helpers.h"
-#include "iwl-4965-hw.h"
-#include "iwl-4965.h"
-
-void iwl4965_rx_missed_beacon_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_missed_beacon_notif *missed_beacon;
-
- missed_beacon = &pkt->u.missed_beacon;
- if (le32_to_cpu(missed_beacon->consecutive_missed_beacons) >
- priv->missed_beacon_threshold) {
- IWL_DEBUG_CALIB(priv,
- "missed bcn cnsq %d totl %d rcd %d expctd %d\n",
- le32_to_cpu(missed_beacon->consecutive_missed_beacons),
- le32_to_cpu(missed_beacon->total_missed_becons),
- le32_to_cpu(missed_beacon->num_recvd_beacons),
- le32_to_cpu(missed_beacon->num_expected_beacons));
- if (!test_bit(STATUS_SCANNING, &priv->status))
- iwl4965_init_sensitivity(priv);
- }
-}
-
-/* Calculate noise level, based on measurements during network silence just
- * before arriving beacon. This measurement can be done only if we know
- * exactly when to expect beacons, therefore only when we're associated. */
-static void iwl4965_rx_calc_noise(struct iwl_priv *priv)
-{
- struct statistics_rx_non_phy *rx_info;
- int num_active_rx = 0;
- int total_silence = 0;
- int bcn_silence_a, bcn_silence_b, bcn_silence_c;
- int last_rx_noise;
-
- rx_info = &(priv->_4965.statistics.rx.general);
- bcn_silence_a =
- le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
- bcn_silence_b =
- le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
- bcn_silence_c =
- le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
-
- if (bcn_silence_a) {
- total_silence += bcn_silence_a;
- num_active_rx++;
- }
- if (bcn_silence_b) {
- total_silence += bcn_silence_b;
- num_active_rx++;
- }
- if (bcn_silence_c) {
- total_silence += bcn_silence_c;
- num_active_rx++;
- }
-
- /* Average among active antennas */
- if (num_active_rx)
- last_rx_noise = (total_silence / num_active_rx) - 107;
- else
- last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
-
- IWL_DEBUG_CALIB(priv, "inband silence a %u, b %u, c %u, dBm %d\n",
- bcn_silence_a, bcn_silence_b, bcn_silence_c,
- last_rx_noise);
-}
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
-/*
- * based on the assumption of all statistics counter are in DWORD
- * FIXME: This function is for debugging, do not deal with
- * the case of counters roll-over.
- */
-static void iwl4965_accumulative_statistics(struct iwl_priv *priv,
- __le32 *stats)
-{
- int i, size;
- __le32 *prev_stats;
- u32 *accum_stats;
- u32 *delta, *max_delta;
- struct statistics_general_common *general, *accum_general;
- struct statistics_tx *tx, *accum_tx;
-
- prev_stats = (__le32 *)&priv->_4965.statistics;
- accum_stats = (u32 *)&priv->_4965.accum_statistics;
- size = sizeof(struct iwl_notif_statistics);
- general = &priv->_4965.statistics.general.common;
- accum_general = &priv->_4965.accum_statistics.general.common;
- tx = &priv->_4965.statistics.tx;
- accum_tx = &priv->_4965.accum_statistics.tx;
- delta = (u32 *)&priv->_4965.delta_statistics;
- max_delta = (u32 *)&priv->_4965.max_delta;
-
- for (i = sizeof(__le32); i < size;
- i += sizeof(__le32), stats++, prev_stats++, delta++,
- max_delta++, accum_stats++) {
- if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
- *delta = (le32_to_cpu(*stats) -
- le32_to_cpu(*prev_stats));
- *accum_stats += *delta;
- if (*delta > *max_delta)
- *max_delta = *delta;
- }
- }
-
- /* reset accumulative statistics for "no-counter" type statistics */
- accum_general->temperature = general->temperature;
- accum_general->ttl_timestamp = general->ttl_timestamp;
-}
-#endif
-
-#define REG_RECALIB_PERIOD (60)
-
-void iwl4965_rx_statistics(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- int change;
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
-
- IWL_DEBUG_RX(priv,
- "Statistics notification received (%d vs %d).\n",
- (int)sizeof(struct iwl_notif_statistics),
- le32_to_cpu(pkt->len_n_flags) &
- FH_RSCSR_FRAME_SIZE_MSK);
-
- change = ((priv->_4965.statistics.general.common.temperature !=
- pkt->u.stats.general.common.temperature) ||
- ((priv->_4965.statistics.flag &
- STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
- (pkt->u.stats.flag &
- STATISTICS_REPLY_FLG_HT40_MODE_MSK)));
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
- iwl4965_accumulative_statistics(priv, (__le32 *)&pkt->u.stats);
-#endif
-
- /* TODO: reading some of statistics is unneeded */
- memcpy(&priv->_4965.statistics, &pkt->u.stats,
- sizeof(priv->_4965.statistics));
-
- set_bit(STATUS_STATISTICS, &priv->status);
-
- /* Reschedule the statistics timer to occur in
- * REG_RECALIB_PERIOD seconds to ensure we get a
- * thermal update even if the uCode doesn't give
- * us one */
- mod_timer(&priv->statistics_periodic, jiffies +
- msecs_to_jiffies(REG_RECALIB_PERIOD * 1000));
-
- if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
- (pkt->hdr.cmd == STATISTICS_NOTIFICATION)) {
- iwl4965_rx_calc_noise(priv);
- queue_work(priv->workqueue, &priv->run_time_calib_work);
- }
- if (priv->cfg->ops->lib->temp_ops.temperature && change)
- priv->cfg->ops->lib->temp_ops.temperature(priv);
-}
-
-void iwl4965_reply_statistics(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
-
- if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATISTICS_CLEAR_MSK) {
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
- memset(&priv->_4965.accum_statistics, 0,
- sizeof(struct iwl_notif_statistics));
- memset(&priv->_4965.delta_statistics, 0,
- sizeof(struct iwl_notif_statistics));
- memset(&priv->_4965.max_delta, 0,
- sizeof(struct iwl_notif_statistics));
-#endif
- IWL_DEBUG_RX(priv, "Statistics have been cleared\n");
- }
- iwl4965_rx_statistics(priv, rxb);
-}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-sta.c b/drivers/net/wireless/iwlegacy/iwl-4965-sta.c
deleted file mode 100644
index a262c23553d..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-sta.c
+++ /dev/null
@@ -1,721 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * Portions of this file are derived from the ipw3945 project, as well
- * as portions of the ieee80211 subsystem header files.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <net/mac80211.h>
-
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-sta.h"
-#include "iwl-4965.h"
-
-static struct iwl_link_quality_cmd *
-iwl4965_sta_alloc_lq(struct iwl_priv *priv, u8 sta_id)
-{
- int i, r;
- struct iwl_link_quality_cmd *link_cmd;
- u32 rate_flags = 0;
- __le32 rate_n_flags;
-
- link_cmd = kzalloc(sizeof(struct iwl_link_quality_cmd), GFP_KERNEL);
- if (!link_cmd) {
- IWL_ERR(priv, "Unable to allocate memory for LQ cmd.\n");
- return NULL;
- }
- /* Set up the rate scaling to start at selected rate, fall back
- * all the way down to 1M in IEEE order, and then spin on 1M */
- if (priv->band == IEEE80211_BAND_5GHZ)
- r = IWL_RATE_6M_INDEX;
- else
- r = IWL_RATE_1M_INDEX;
-
- if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE)
- rate_flags |= RATE_MCS_CCK_MSK;
-
- rate_flags |= iwl4965_first_antenna(priv->hw_params.valid_tx_ant) <<
- RATE_MCS_ANT_POS;
- rate_n_flags = iwl4965_hw_set_rate_n_flags(iwlegacy_rates[r].plcp,
- rate_flags);
- for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
- link_cmd->rs_table[i].rate_n_flags = rate_n_flags;
-
- link_cmd->general_params.single_stream_ant_msk =
- iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
-
- link_cmd->general_params.dual_stream_ant_msk =
- priv->hw_params.valid_tx_ant &
- ~iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
- if (!link_cmd->general_params.dual_stream_ant_msk) {
- link_cmd->general_params.dual_stream_ant_msk = ANT_AB;
- } else if (iwl4965_num_of_ant(priv->hw_params.valid_tx_ant) == 2) {
- link_cmd->general_params.dual_stream_ant_msk =
- priv->hw_params.valid_tx_ant;
- }
-
- link_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
- link_cmd->agg_params.agg_time_limit =
- cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
-
- link_cmd->sta_id = sta_id;
-
- return link_cmd;
-}
-
-/*
- * iwl4965_add_bssid_station - Add the special IBSS BSSID station
- *
- * Function sleeps.
- */
-int
-iwl4965_add_bssid_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
- const u8 *addr, u8 *sta_id_r)
-{
- int ret;
- u8 sta_id;
- struct iwl_link_quality_cmd *link_cmd;
- unsigned long flags;
-
- if (sta_id_r)
- *sta_id_r = IWL_INVALID_STATION;
-
- ret = iwl_legacy_add_station_common(priv, ctx, addr, 0, NULL, &sta_id);
- if (ret) {
- IWL_ERR(priv, "Unable to add station %pM\n", addr);
- return ret;
- }
-
- if (sta_id_r)
- *sta_id_r = sta_id;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- priv->stations[sta_id].used |= IWL_STA_LOCAL;
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- /* Set up default rate scaling table in device's station table */
- link_cmd = iwl4965_sta_alloc_lq(priv, sta_id);
- if (!link_cmd) {
- IWL_ERR(priv,
- "Unable to initialize rate scaling for station %pM.\n",
- addr);
- return -ENOMEM;
- }
-
- ret = iwl_legacy_send_lq_cmd(priv, ctx, link_cmd, CMD_SYNC, true);
- if (ret)
- IWL_ERR(priv, "Link quality command failed (%d)\n", ret);
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- priv->stations[sta_id].lq = link_cmd;
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- return 0;
-}
-
-static int iwl4965_static_wepkey_cmd(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- bool send_if_empty)
-{
- int i, not_empty = 0;
- u8 buff[sizeof(struct iwl_wep_cmd) +
- sizeof(struct iwl_wep_key) * WEP_KEYS_MAX];
- struct iwl_wep_cmd *wep_cmd = (struct iwl_wep_cmd *)buff;
- size_t cmd_size = sizeof(struct iwl_wep_cmd);
- struct iwl_host_cmd cmd = {
- .id = ctx->wep_key_cmd,
- .data = wep_cmd,
- .flags = CMD_SYNC,
- };
-
- might_sleep();
-
- memset(wep_cmd, 0, cmd_size +
- (sizeof(struct iwl_wep_key) * WEP_KEYS_MAX));
-
- for (i = 0; i < WEP_KEYS_MAX ; i++) {
- wep_cmd->key[i].key_index = i;
- if (ctx->wep_keys[i].key_size) {
- wep_cmd->key[i].key_offset = i;
- not_empty = 1;
- } else {
- wep_cmd->key[i].key_offset = WEP_INVALID_OFFSET;
- }
-
- wep_cmd->key[i].key_size = ctx->wep_keys[i].key_size;
- memcpy(&wep_cmd->key[i].key[3], ctx->wep_keys[i].key,
- ctx->wep_keys[i].key_size);
- }
-
- wep_cmd->global_key_type = WEP_KEY_WEP_TYPE;
- wep_cmd->num_keys = WEP_KEYS_MAX;
-
- cmd_size += sizeof(struct iwl_wep_key) * WEP_KEYS_MAX;
-
- cmd.len = cmd_size;
-
- if (not_empty || send_if_empty)
- return iwl_legacy_send_cmd(priv, &cmd);
- else
- return 0;
-}
-
-int iwl4965_restore_default_wep_keys(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx)
-{
- lockdep_assert_held(&priv->mutex);
-
- return iwl4965_static_wepkey_cmd(priv, ctx, false);
-}
-
-int iwl4965_remove_default_wep_key(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- struct ieee80211_key_conf *keyconf)
-{
- int ret;
-
- lockdep_assert_held(&priv->mutex);
-
- IWL_DEBUG_WEP(priv, "Removing default WEP key: idx=%d\n",
- keyconf->keyidx);
-
- memset(&ctx->wep_keys[keyconf->keyidx], 0, sizeof(ctx->wep_keys[0]));
- if (iwl_legacy_is_rfkill(priv)) {
- IWL_DEBUG_WEP(priv,
- "Not sending REPLY_WEPKEY command due to RFKILL.\n");
- /* but keys in device are clear anyway so return success */
- return 0;
- }
- ret = iwl4965_static_wepkey_cmd(priv, ctx, 1);
- IWL_DEBUG_WEP(priv, "Remove default WEP key: idx=%d ret=%d\n",
- keyconf->keyidx, ret);
-
- return ret;
-}
-
-int iwl4965_set_default_wep_key(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- struct ieee80211_key_conf *keyconf)
-{
- int ret;
-
- lockdep_assert_held(&priv->mutex);
-
- if (keyconf->keylen != WEP_KEY_LEN_128 &&
- keyconf->keylen != WEP_KEY_LEN_64) {
- IWL_DEBUG_WEP(priv, "Bad WEP key length %d\n", keyconf->keylen);
- return -EINVAL;
- }
-
- keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
- keyconf->hw_key_idx = HW_KEY_DEFAULT;
- priv->stations[ctx->ap_sta_id].keyinfo.cipher = keyconf->cipher;
-
- ctx->wep_keys[keyconf->keyidx].key_size = keyconf->keylen;
- memcpy(&ctx->wep_keys[keyconf->keyidx].key, &keyconf->key,
- keyconf->keylen);
-
- ret = iwl4965_static_wepkey_cmd(priv, ctx, false);
- IWL_DEBUG_WEP(priv, "Set default WEP key: len=%d idx=%d ret=%d\n",
- keyconf->keylen, keyconf->keyidx, ret);
-
- return ret;
-}
-
-static int iwl4965_set_wep_dynamic_key_info(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- struct ieee80211_key_conf *keyconf,
- u8 sta_id)
-{
- unsigned long flags;
- __le16 key_flags = 0;
- struct iwl_legacy_addsta_cmd sta_cmd;
-
- lockdep_assert_held(&priv->mutex);
-
- keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
-
- key_flags |= (STA_KEY_FLG_WEP | STA_KEY_FLG_MAP_KEY_MSK);
- key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
- key_flags &= ~STA_KEY_FLG_INVALID;
-
- if (keyconf->keylen == WEP_KEY_LEN_128)
- key_flags |= STA_KEY_FLG_KEY_SIZE_MSK;
-
- if (sta_id == ctx->bcast_sta_id)
- key_flags |= STA_KEY_MULTICAST_MSK;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
-
- priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
- priv->stations[sta_id].keyinfo.keylen = keyconf->keylen;
- priv->stations[sta_id].keyinfo.keyidx = keyconf->keyidx;
-
- memcpy(priv->stations[sta_id].keyinfo.key,
- keyconf->key, keyconf->keylen);
-
- memcpy(&priv->stations[sta_id].sta.key.key[3],
- keyconf->key, keyconf->keylen);
-
- if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
- == STA_KEY_FLG_NO_ENC)
- priv->stations[sta_id].sta.key.key_offset =
- iwl_legacy_get_free_ucode_key_index(priv);
- /* else, we are overriding an existing key => no need to allocated room
- * in uCode. */
-
- WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
- "no space for a new key");
-
- priv->stations[sta_id].sta.key.key_flags = key_flags;
- priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
- priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
-
- memcpy(&sta_cmd, &priv->stations[sta_id].sta,
- sizeof(struct iwl_legacy_addsta_cmd));
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
-}
-
-static int iwl4965_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- struct ieee80211_key_conf *keyconf,
- u8 sta_id)
-{
- unsigned long flags;
- __le16 key_flags = 0;
- struct iwl_legacy_addsta_cmd sta_cmd;
-
- lockdep_assert_held(&priv->mutex);
-
- key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
- key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
- key_flags &= ~STA_KEY_FLG_INVALID;
-
- if (sta_id == ctx->bcast_sta_id)
- key_flags |= STA_KEY_MULTICAST_MSK;
-
- keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
- priv->stations[sta_id].keyinfo.keylen = keyconf->keylen;
-
- memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key,
- keyconf->keylen);
-
- memcpy(priv->stations[sta_id].sta.key.key, keyconf->key,
- keyconf->keylen);
-
- if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
- == STA_KEY_FLG_NO_ENC)
- priv->stations[sta_id].sta.key.key_offset =
- iwl_legacy_get_free_ucode_key_index(priv);
- /* else, we are overriding an existing key => no need to allocated room
- * in uCode. */
-
- WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
- "no space for a new key");
-
- priv->stations[sta_id].sta.key.key_flags = key_flags;
- priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
- priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
-
- memcpy(&sta_cmd, &priv->stations[sta_id].sta,
- sizeof(struct iwl_legacy_addsta_cmd));
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
-}
-
-static int iwl4965_set_tkip_dynamic_key_info(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- struct ieee80211_key_conf *keyconf,
- u8 sta_id)
-{
- unsigned long flags;
- int ret = 0;
- __le16 key_flags = 0;
-
- key_flags |= (STA_KEY_FLG_TKIP | STA_KEY_FLG_MAP_KEY_MSK);
- key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
- key_flags &= ~STA_KEY_FLG_INVALID;
-
- if (sta_id == ctx->bcast_sta_id)
- key_flags |= STA_KEY_MULTICAST_MSK;
-
- keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
- keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
-
- priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
- priv->stations[sta_id].keyinfo.keylen = 16;
-
- if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
- == STA_KEY_FLG_NO_ENC)
- priv->stations[sta_id].sta.key.key_offset =
- iwl_legacy_get_free_ucode_key_index(priv);
- /* else, we are overriding an existing key => no need to allocated room
- * in uCode. */
-
- WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
- "no space for a new key");
-
- priv->stations[sta_id].sta.key.key_flags = key_flags;
-
-
- /* This copy is acutally not needed: we get the key with each TX */
- memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, 16);
-
- memcpy(priv->stations[sta_id].sta.key.key, keyconf->key, 16);
-
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- return ret;
-}
-
-void iwl4965_update_tkip_key(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- struct ieee80211_key_conf *keyconf,
- struct ieee80211_sta *sta, u32 iv32, u16 *phase1key)
-{
- u8 sta_id;
- unsigned long flags;
- int i;
-
- if (iwl_legacy_scan_cancel(priv)) {
- /* cancel scan failed, just live w/ bad key and rely
- briefly on SW decryption */
- return;
- }
-
- sta_id = iwl_legacy_sta_id_or_broadcast(priv, ctx, sta);
- if (sta_id == IWL_INVALID_STATION)
- return;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
-
- priv->stations[sta_id].sta.key.tkip_rx_tsc_byte2 = (u8) iv32;
-
- for (i = 0; i < 5; i++)
- priv->stations[sta_id].sta.key.tkip_rx_ttak[i] =
- cpu_to_le16(phase1key[i]);
-
- priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
- priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
-
- iwl_legacy_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
-
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
-}
-
-int iwl4965_remove_dynamic_key(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- struct ieee80211_key_conf *keyconf,
- u8 sta_id)
-{
- unsigned long flags;
- u16 key_flags;
- u8 keyidx;
- struct iwl_legacy_addsta_cmd sta_cmd;
-
- lockdep_assert_held(&priv->mutex);
-
- ctx->key_mapping_keys--;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- key_flags = le16_to_cpu(priv->stations[sta_id].sta.key.key_flags);
- keyidx = (key_flags >> STA_KEY_FLG_KEYID_POS) & 0x3;
-
- IWL_DEBUG_WEP(priv, "Remove dynamic key: idx=%d sta=%d\n",
- keyconf->keyidx, sta_id);
-
- if (keyconf->keyidx != keyidx) {
- /* We need to remove a key with index different that the one
- * in the uCode. This means that the key we need to remove has
- * been replaced by another one with different index.
- * Don't do anything and return ok
- */
- spin_unlock_irqrestore(&priv->sta_lock, flags);
- return 0;
- }
-
- if (priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET) {
- IWL_WARN(priv, "Removing wrong key %d 0x%x\n",
- keyconf->keyidx, key_flags);
- spin_unlock_irqrestore(&priv->sta_lock, flags);
- return 0;
- }
-
- if (!test_and_clear_bit(priv->stations[sta_id].sta.key.key_offset,
- &priv->ucode_key_table))
- IWL_ERR(priv, "index %d not used in uCode key table.\n",
- priv->stations[sta_id].sta.key.key_offset);
- memset(&priv->stations[sta_id].keyinfo, 0,
- sizeof(struct iwl_hw_key));
- memset(&priv->stations[sta_id].sta.key, 0,
- sizeof(struct iwl4965_keyinfo));
- priv->stations[sta_id].sta.key.key_flags =
- STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID;
- priv->stations[sta_id].sta.key.key_offset = WEP_INVALID_OFFSET;
- priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
- priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
-
- if (iwl_legacy_is_rfkill(priv)) {
- IWL_DEBUG_WEP(priv,
- "Not sending REPLY_ADD_STA command because RFKILL enabled.\n");
- spin_unlock_irqrestore(&priv->sta_lock, flags);
- return 0;
- }
- memcpy(&sta_cmd, &priv->stations[sta_id].sta,
- sizeof(struct iwl_legacy_addsta_cmd));
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
-}
-
-int iwl4965_set_dynamic_key(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
- struct ieee80211_key_conf *keyconf, u8 sta_id)
-{
- int ret;
-
- lockdep_assert_held(&priv->mutex);
-
- ctx->key_mapping_keys++;
- keyconf->hw_key_idx = HW_KEY_DYNAMIC;
-
- switch (keyconf->cipher) {
- case WLAN_CIPHER_SUITE_CCMP:
- ret = iwl4965_set_ccmp_dynamic_key_info(priv, ctx,
- keyconf, sta_id);
- break;
- case WLAN_CIPHER_SUITE_TKIP:
- ret = iwl4965_set_tkip_dynamic_key_info(priv, ctx,
- keyconf, sta_id);
- break;
- case WLAN_CIPHER_SUITE_WEP40:
- case WLAN_CIPHER_SUITE_WEP104:
- ret = iwl4965_set_wep_dynamic_key_info(priv, ctx,
- keyconf, sta_id);
- break;
- default:
- IWL_ERR(priv,
- "Unknown alg: %s cipher = %x\n", __func__,
- keyconf->cipher);
- ret = -EINVAL;
- }
-
- IWL_DEBUG_WEP(priv,
- "Set dynamic key: cipher=%x len=%d idx=%d sta=%d ret=%d\n",
- keyconf->cipher, keyconf->keylen, keyconf->keyidx,
- sta_id, ret);
-
- return ret;
-}
-
-/**
- * iwl4965_alloc_bcast_station - add broadcast station into driver's station table.
- *
- * This adds the broadcast station into the driver's station table
- * and marks it driver active, so that it will be restored to the
- * device at the next best time.
- */
-int iwl4965_alloc_bcast_station(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx)
-{
- struct iwl_link_quality_cmd *link_cmd;
- unsigned long flags;
- u8 sta_id;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- sta_id = iwl_legacy_prep_station(priv, ctx, iwlegacy_bcast_addr,
- false, NULL);
- if (sta_id == IWL_INVALID_STATION) {
- IWL_ERR(priv, "Unable to prepare broadcast station\n");
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- return -EINVAL;
- }
-
- priv->stations[sta_id].used |= IWL_STA_DRIVER_ACTIVE;
- priv->stations[sta_id].used |= IWL_STA_BCAST;
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- link_cmd = iwl4965_sta_alloc_lq(priv, sta_id);
- if (!link_cmd) {
- IWL_ERR(priv,
- "Unable to initialize rate scaling for bcast station.\n");
- return -ENOMEM;
- }
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- priv->stations[sta_id].lq = link_cmd;
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- return 0;
-}
-
-/**
- * iwl4965_update_bcast_station - update broadcast station's LQ command
- *
- * Only used by iwl4965. Placed here to have all bcast station management
- * code together.
- */
-static int iwl4965_update_bcast_station(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx)
-{
- unsigned long flags;
- struct iwl_link_quality_cmd *link_cmd;
- u8 sta_id = ctx->bcast_sta_id;
-
- link_cmd = iwl4965_sta_alloc_lq(priv, sta_id);
- if (!link_cmd) {
- IWL_ERR(priv,
- "Unable to initialize rate scaling for bcast station.\n");
- return -ENOMEM;
- }
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- if (priv->stations[sta_id].lq)
- kfree(priv->stations[sta_id].lq);
- else
- IWL_DEBUG_INFO(priv,
- "Bcast station rate scaling has not been initialized yet.\n");
- priv->stations[sta_id].lq = link_cmd;
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- return 0;
-}
-
-int iwl4965_update_bcast_stations(struct iwl_priv *priv)
-{
- struct iwl_rxon_context *ctx;
- int ret = 0;
-
- for_each_context(priv, ctx) {
- ret = iwl4965_update_bcast_station(priv, ctx);
- if (ret)
- break;
- }
-
- return ret;
-}
-
-/**
- * iwl4965_sta_tx_modify_enable_tid - Enable Tx for this TID in station table
- */
-int iwl4965_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid)
-{
- unsigned long flags;
- struct iwl_legacy_addsta_cmd sta_cmd;
-
- lockdep_assert_held(&priv->mutex);
-
- /* Remove "disable" flag, to enable Tx for this TID */
- spin_lock_irqsave(&priv->sta_lock, flags);
- priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX;
- priv->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid));
- priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
- memcpy(&sta_cmd, &priv->stations[sta_id].sta,
- sizeof(struct iwl_legacy_addsta_cmd));
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
-}
-
-int iwl4965_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta,
- int tid, u16 ssn)
-{
- unsigned long flags;
- int sta_id;
- struct iwl_legacy_addsta_cmd sta_cmd;
-
- lockdep_assert_held(&priv->mutex);
-
- sta_id = iwl_legacy_sta_id(sta);
- if (sta_id == IWL_INVALID_STATION)
- return -ENXIO;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- priv->stations[sta_id].sta.station_flags_msk = 0;
- priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_ADDBA_TID_MSK;
- priv->stations[sta_id].sta.add_immediate_ba_tid = (u8)tid;
- priv->stations[sta_id].sta.add_immediate_ba_ssn = cpu_to_le16(ssn);
- priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
- memcpy(&sta_cmd, &priv->stations[sta_id].sta,
- sizeof(struct iwl_legacy_addsta_cmd));
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
-}
-
-int iwl4965_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
- int tid)
-{
- unsigned long flags;
- int sta_id;
- struct iwl_legacy_addsta_cmd sta_cmd;
-
- lockdep_assert_held(&priv->mutex);
-
- sta_id = iwl_legacy_sta_id(sta);
- if (sta_id == IWL_INVALID_STATION) {
- IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
- return -ENXIO;
- }
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- priv->stations[sta_id].sta.station_flags_msk = 0;
- priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK;
- priv->stations[sta_id].sta.remove_immediate_ba_tid = (u8)tid;
- priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
- memcpy(&sta_cmd, &priv->stations[sta_id].sta,
- sizeof(struct iwl_legacy_addsta_cmd));
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
-}
-
-void
-iwl4965_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- priv->stations[sta_id].sta.station_flags |= STA_FLG_PWR_SAVE_MSK;
- priv->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK;
- priv->stations[sta_id].sta.sta.modify_mask =
- STA_MODIFY_SLEEP_TX_COUNT_MSK;
- priv->stations[sta_id].sta.sleep_tx_count = cpu_to_le16(cnt);
- priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
- iwl_legacy_send_add_sta(priv,
- &priv->stations[sta_id].sta, CMD_ASYNC);
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
-}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-tx.c b/drivers/net/wireless/iwlegacy/iwl-4965-tx.c
deleted file mode 100644
index 7f12e3638ba..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-tx.c
+++ /dev/null
@@ -1,1378 +0,0 @@
-/******************************************************************************
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/sched.h>
-
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-sta.h"
-#include "iwl-io.h"
-#include "iwl-helpers.h"
-#include "iwl-4965-hw.h"
-#include "iwl-4965.h"
-
-/*
- * mac80211 queues, ACs, hardware queues, FIFOs.
- *
- * Cf. http://wireless.kernel.org/en/developers/Documentation/mac80211/queues
- *
- * Mac80211 uses the following numbers, which we get as from it
- * by way of skb_get_queue_mapping(skb):
- *
- * VO 0
- * VI 1
- * BE 2
- * BK 3
- *
- *
- * Regular (not A-MPDU) frames are put into hardware queues corresponding
- * to the FIFOs, see comments in iwl-prph.h. Aggregated frames get their
- * own queue per aggregation session (RA/TID combination), such queues are
- * set up to map into FIFOs too, for which we need an AC->FIFO mapping. In
- * order to map frames to the right queue, we also need an AC->hw queue
- * mapping. This is implemented here.
- *
- * Due to the way hw queues are set up (by the hw specific modules like
- * iwl-4965.c), the AC->hw queue mapping is the identity
- * mapping.
- */
-
-static const u8 tid_to_ac[] = {
- IEEE80211_AC_BE,
- IEEE80211_AC_BK,
- IEEE80211_AC_BK,
- IEEE80211_AC_BE,
- IEEE80211_AC_VI,
- IEEE80211_AC_VI,
- IEEE80211_AC_VO,
- IEEE80211_AC_VO
-};
-
-static inline int iwl4965_get_ac_from_tid(u16 tid)
-{
- if (likely(tid < ARRAY_SIZE(tid_to_ac)))
- return tid_to_ac[tid];
-
- /* no support for TIDs 8-15 yet */
- return -EINVAL;
-}
-
-static inline int
-iwl4965_get_fifo_from_tid(struct iwl_rxon_context *ctx, u16 tid)
-{
- if (likely(tid < ARRAY_SIZE(tid_to_ac)))
- return ctx->ac_to_fifo[tid_to_ac[tid]];
-
- /* no support for TIDs 8-15 yet */
- return -EINVAL;
-}
-
-/*
- * handle build REPLY_TX command notification.
- */
-static void iwl4965_tx_cmd_build_basic(struct iwl_priv *priv,
- struct sk_buff *skb,
- struct iwl_tx_cmd *tx_cmd,
- struct ieee80211_tx_info *info,
- struct ieee80211_hdr *hdr,
- u8 std_id)
-{
- __le16 fc = hdr->frame_control;
- __le32 tx_flags = tx_cmd->tx_flags;
-
- tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
- if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
- tx_flags |= TX_CMD_FLG_ACK_MSK;
- if (ieee80211_is_mgmt(fc))
- tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
- if (ieee80211_is_probe_resp(fc) &&
- !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
- tx_flags |= TX_CMD_FLG_TSF_MSK;
- } else {
- tx_flags &= (~TX_CMD_FLG_ACK_MSK);
- tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
- }
-
- if (ieee80211_is_back_req(fc))
- tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
-
- tx_cmd->sta_id = std_id;
- if (ieee80211_has_morefrags(fc))
- tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
-
- if (ieee80211_is_data_qos(fc)) {
- u8 *qc = ieee80211_get_qos_ctl(hdr);
- tx_cmd->tid_tspec = qc[0] & 0xf;
- tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
- } else {
- tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
- }
-
- iwl_legacy_tx_cmd_protection(priv, info, fc, &tx_flags);
-
- tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
- if (ieee80211_is_mgmt(fc)) {
- if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
- tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
- else
- tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
- } else {
- tx_cmd->timeout.pm_frame_timeout = 0;
- }
-
- tx_cmd->driver_txop = 0;
- tx_cmd->tx_flags = tx_flags;
- tx_cmd->next_frame_len = 0;
-}
-
-#define RTS_DFAULT_RETRY_LIMIT 60
-
-static void iwl4965_tx_cmd_build_rate(struct iwl_priv *priv,
- struct iwl_tx_cmd *tx_cmd,
- struct ieee80211_tx_info *info,
- __le16 fc)
-{
- u32 rate_flags;
- int rate_idx;
- u8 rts_retry_limit;
- u8 data_retry_limit;
- u8 rate_plcp;
-
- /* Set retry limit on DATA packets and Probe Responses*/
- if (ieee80211_is_probe_resp(fc))
- data_retry_limit = 3;
- else
- data_retry_limit = IWL4965_DEFAULT_TX_RETRY;
- tx_cmd->data_retry_limit = data_retry_limit;
-
- /* Set retry limit on RTS packets */
- rts_retry_limit = RTS_DFAULT_RETRY_LIMIT;
- if (data_retry_limit < rts_retry_limit)
- rts_retry_limit = data_retry_limit;
- tx_cmd->rts_retry_limit = rts_retry_limit;
-
- /* DATA packets will use the uCode station table for rate/antenna
- * selection */
- if (ieee80211_is_data(fc)) {
- tx_cmd->initial_rate_index = 0;
- tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
- return;
- }
-
- /**
- * If the current TX rate stored in mac80211 has the MCS bit set, it's
- * not really a TX rate. Thus, we use the lowest supported rate for
- * this band. Also use the lowest supported rate if the stored rate
- * index is invalid.
- */
- rate_idx = info->control.rates[0].idx;
- if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS ||
- (rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY))
- rate_idx = rate_lowest_index(&priv->bands[info->band],
- info->control.sta);
- /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
- if (info->band == IEEE80211_BAND_5GHZ)
- rate_idx += IWL_FIRST_OFDM_RATE;
- /* Get PLCP rate for tx_cmd->rate_n_flags */
- rate_plcp = iwlegacy_rates[rate_idx].plcp;
- /* Zero out flags for this packet */
- rate_flags = 0;
-
- /* Set CCK flag as needed */
- if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
- rate_flags |= RATE_MCS_CCK_MSK;
-
- /* Set up antennas */
- priv->mgmt_tx_ant = iwl4965_toggle_tx_ant(priv, priv->mgmt_tx_ant,
- priv->hw_params.valid_tx_ant);
-
- rate_flags |= iwl4965_ant_idx_to_flags(priv->mgmt_tx_ant);
-
- /* Set the rate in the TX cmd */
- tx_cmd->rate_n_flags = iwl4965_hw_set_rate_n_flags(rate_plcp, rate_flags);
-}
-
-static void iwl4965_tx_cmd_build_hwcrypto(struct iwl_priv *priv,
- struct ieee80211_tx_info *info,
- struct iwl_tx_cmd *tx_cmd,
- struct sk_buff *skb_frag,
- int sta_id)
-{
- struct ieee80211_key_conf *keyconf = info->control.hw_key;
-
- switch (keyconf->cipher) {
- case WLAN_CIPHER_SUITE_CCMP:
- tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
- memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
- if (info->flags & IEEE80211_TX_CTL_AMPDU)
- tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
- IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n");
- break;
-
- case WLAN_CIPHER_SUITE_TKIP:
- tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
- ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key);
- IWL_DEBUG_TX(priv, "tx_cmd with tkip hwcrypto\n");
- break;
-
- case WLAN_CIPHER_SUITE_WEP104:
- tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
- /* fall through */
- case WLAN_CIPHER_SUITE_WEP40:
- tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP |
- (keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT);
-
- memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
-
- IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption "
- "with key %d\n", keyconf->keyidx);
- break;
-
- default:
- IWL_ERR(priv, "Unknown encode cipher %x\n", keyconf->cipher);
- break;
- }
-}
-
-/*
- * start REPLY_TX command process
- */
-int iwl4965_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
-{
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct ieee80211_sta *sta = info->control.sta;
- struct iwl_station_priv *sta_priv = NULL;
- struct iwl_tx_queue *txq;
- struct iwl_queue *q;
- struct iwl_device_cmd *out_cmd;
- struct iwl_cmd_meta *out_meta;
- struct iwl_tx_cmd *tx_cmd;
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- int txq_id;
- dma_addr_t phys_addr;
- dma_addr_t txcmd_phys;
- dma_addr_t scratch_phys;
- u16 len, firstlen, secondlen;
- u16 seq_number = 0;
- __le16 fc;
- u8 hdr_len;
- u8 sta_id;
- u8 wait_write_ptr = 0;
- u8 tid = 0;
- u8 *qc = NULL;
- unsigned long flags;
- bool is_agg = false;
-
- if (info->control.vif)
- ctx = iwl_legacy_rxon_ctx_from_vif(info->control.vif);
-
- spin_lock_irqsave(&priv->lock, flags);
- if (iwl_legacy_is_rfkill(priv)) {
- IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
- goto drop_unlock;
- }
-
- fc = hdr->frame_control;
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- if (ieee80211_is_auth(fc))
- IWL_DEBUG_TX(priv, "Sending AUTH frame\n");
- else if (ieee80211_is_assoc_req(fc))
- IWL_DEBUG_TX(priv, "Sending ASSOC frame\n");
- else if (ieee80211_is_reassoc_req(fc))
- IWL_DEBUG_TX(priv, "Sending REASSOC frame\n");
-#endif
-
- hdr_len = ieee80211_hdrlen(fc);
-
- /* For management frames use broadcast id to do not break aggregation */
- if (!ieee80211_is_data(fc))
- sta_id = ctx->bcast_sta_id;
- else {
- /* Find index into station table for destination station */
- sta_id = iwl_legacy_sta_id_or_broadcast(priv, ctx, info->control.sta);
-
- if (sta_id == IWL_INVALID_STATION) {
- IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
- hdr->addr1);
- goto drop_unlock;
- }
- }
-
- IWL_DEBUG_TX(priv, "station Id %d\n", sta_id);
-
- if (sta)
- sta_priv = (void *)sta->drv_priv;
-
- if (sta_priv && sta_priv->asleep &&
- (info->flags & IEEE80211_TX_CTL_POLL_RESPONSE)) {
- /*
- * This sends an asynchronous command to the device,
- * but we can rely on it being processed before the
- * next frame is processed -- and the next frame to
- * this station is the one that will consume this
- * counter.
- * For now set the counter to just 1 since we do not
- * support uAPSD yet.
- */
- iwl4965_sta_modify_sleep_tx_count(priv, sta_id, 1);
- }
-
- /*
- * Send this frame after DTIM -- there's a special queue
- * reserved for this for contexts that support AP mode.
- */
- if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
- txq_id = ctx->mcast_queue;
- /*
- * The microcode will clear the more data
- * bit in the last frame it transmits.
- */
- hdr->frame_control |=
- cpu_to_le16(IEEE80211_FCTL_MOREDATA);
- } else
- txq_id = ctx->ac_to_queue[skb_get_queue_mapping(skb)];
-
- /* irqs already disabled/saved above when locking priv->lock */
- spin_lock(&priv->sta_lock);
-
- if (ieee80211_is_data_qos(fc)) {
- qc = ieee80211_get_qos_ctl(hdr);
- tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
- if (WARN_ON_ONCE(tid >= MAX_TID_COUNT)) {
- spin_unlock(&priv->sta_lock);
- goto drop_unlock;
- }
- seq_number = priv->stations[sta_id].tid[tid].seq_number;
- seq_number &= IEEE80211_SCTL_SEQ;
- hdr->seq_ctrl = hdr->seq_ctrl &
- cpu_to_le16(IEEE80211_SCTL_FRAG);
- hdr->seq_ctrl |= cpu_to_le16(seq_number);
- seq_number += 0x10;
- /* aggregation is on for this <sta,tid> */
- if (info->flags & IEEE80211_TX_CTL_AMPDU &&
- priv->stations[sta_id].tid[tid].agg.state == IWL_AGG_ON) {
- txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
- is_agg = true;
- }
- }
-
- txq = &priv->txq[txq_id];
- q = &txq->q;
-
- if (unlikely(iwl_legacy_queue_space(q) < q->high_mark)) {
- spin_unlock(&priv->sta_lock);
- goto drop_unlock;
- }
-
- if (ieee80211_is_data_qos(fc)) {
- priv->stations[sta_id].tid[tid].tfds_in_queue++;
- if (!ieee80211_has_morefrags(fc))
- priv->stations[sta_id].tid[tid].seq_number = seq_number;
- }
-
- spin_unlock(&priv->sta_lock);
-
- /* Set up driver data for this TFD */
- memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
- txq->txb[q->write_ptr].skb = skb;
- txq->txb[q->write_ptr].ctx = ctx;
-
- /* Set up first empty entry in queue's array of Tx/cmd buffers */
- out_cmd = txq->cmd[q->write_ptr];
- out_meta = &txq->meta[q->write_ptr];
- tx_cmd = &out_cmd->cmd.tx;
- memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
- memset(tx_cmd, 0, sizeof(struct iwl_tx_cmd));
-
- /*
- * Set up the Tx-command (not MAC!) header.
- * Store the chosen Tx queue and TFD index within the sequence field;
- * after Tx, uCode's Tx response will return this value so driver can
- * locate the frame within the tx queue and do post-tx processing.
- */
- out_cmd->hdr.cmd = REPLY_TX;
- out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
- INDEX_TO_SEQ(q->write_ptr)));
-
- /* Copy MAC header from skb into command buffer */
- memcpy(tx_cmd->hdr, hdr, hdr_len);
-
-
- /* Total # bytes to be transmitted */
- len = (u16)skb->len;
- tx_cmd->len = cpu_to_le16(len);
-
- if (info->control.hw_key)
- iwl4965_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id);
-
- /* TODO need this for burst mode later on */
- iwl4965_tx_cmd_build_basic(priv, skb, tx_cmd, info, hdr, sta_id);
- iwl_legacy_dbg_log_tx_data_frame(priv, len, hdr);
-
- iwl4965_tx_cmd_build_rate(priv, tx_cmd, info, fc);
-
- iwl_legacy_update_stats(priv, true, fc, len);
- /*
- * Use the first empty entry in this queue's command buffer array
- * to contain the Tx command and MAC header concatenated together
- * (payload data will be in another buffer).
- * Size of this varies, due to varying MAC header length.
- * If end is not dword aligned, we'll have 2 extra bytes at the end
- * of the MAC header (device reads on dword boundaries).
- * We'll tell device about this padding later.
- */
- len = sizeof(struct iwl_tx_cmd) +
- sizeof(struct iwl_cmd_header) + hdr_len;
- firstlen = (len + 3) & ~3;
-
- /* Tell NIC about any 2-byte padding after MAC header */
- if (firstlen != len)
- tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
-
- /* Physical address of this Tx command's header (not MAC header!),
- * within command buffer array. */
- txcmd_phys = pci_map_single(priv->pci_dev,
- &out_cmd->hdr, firstlen,
- PCI_DMA_BIDIRECTIONAL);
- dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
- dma_unmap_len_set(out_meta, len, firstlen);
- /* Add buffer containing Tx command and MAC(!) header to TFD's
- * first entry */
- priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
- txcmd_phys, firstlen, 1, 0);
-
- if (!ieee80211_has_morefrags(hdr->frame_control)) {
- txq->need_update = 1;
- } else {
- wait_write_ptr = 1;
- txq->need_update = 0;
- }
-
- /* Set up TFD's 2nd entry to point directly to remainder of skb,
- * if any (802.11 null frames have no payload). */
- secondlen = skb->len - hdr_len;
- if (secondlen > 0) {
- phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
- secondlen, PCI_DMA_TODEVICE);
- priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
- phys_addr, secondlen,
- 0, 0);
- }
-
- scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
- offsetof(struct iwl_tx_cmd, scratch);
-
- /* take back ownership of DMA buffer to enable update */
- pci_dma_sync_single_for_cpu(priv->pci_dev, txcmd_phys,
- firstlen, PCI_DMA_BIDIRECTIONAL);
- tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
- tx_cmd->dram_msb_ptr = iwl_legacy_get_dma_hi_addr(scratch_phys);
-
- IWL_DEBUG_TX(priv, "sequence nr = 0X%x\n",
- le16_to_cpu(out_cmd->hdr.sequence));
- IWL_DEBUG_TX(priv, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
- iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
- iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
-
- /* Set up entry for this TFD in Tx byte-count array */
- if (info->flags & IEEE80211_TX_CTL_AMPDU)
- priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq,
- le16_to_cpu(tx_cmd->len));
-
- pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys,
- firstlen, PCI_DMA_BIDIRECTIONAL);
-
- trace_iwlwifi_legacy_dev_tx(priv,
- &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr],
- sizeof(struct iwl_tfd),
- &out_cmd->hdr, firstlen,
- skb->data + hdr_len, secondlen);
-
- /* Tell device the write index *just past* this latest filled TFD */
- q->write_ptr = iwl_legacy_queue_inc_wrap(q->write_ptr, q->n_bd);
- iwl_legacy_txq_update_write_ptr(priv, txq);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- /*
- * At this point the frame is "transmitted" successfully
- * and we will get a TX status notification eventually,
- * regardless of the value of ret. "ret" only indicates
- * whether or not we should update the write pointer.
- */
-
- /*
- * Avoid atomic ops if it isn't an associated client.
- * Also, if this is a packet for aggregation, don't
- * increase the counter because the ucode will stop
- * aggregation queues when their respective station
- * goes to sleep.
- */
- if (sta_priv && sta_priv->client && !is_agg)
- atomic_inc(&sta_priv->pending_frames);
-
- if ((iwl_legacy_queue_space(q) < q->high_mark) &&
- priv->mac80211_registered) {
- if (wait_write_ptr) {
- spin_lock_irqsave(&priv->lock, flags);
- txq->need_update = 1;
- iwl_legacy_txq_update_write_ptr(priv, txq);
- spin_unlock_irqrestore(&priv->lock, flags);
- } else {
- iwl_legacy_stop_queue(priv, txq);
- }
- }
-
- return 0;
-
-drop_unlock:
- spin_unlock_irqrestore(&priv->lock, flags);
- return -1;
-}
-
-static inline int iwl4965_alloc_dma_ptr(struct iwl_priv *priv,
- struct iwl_dma_ptr *ptr, size_t size)
-{
- ptr->addr = dma_alloc_coherent(&priv->pci_dev->dev, size, &ptr->dma,
- GFP_KERNEL);
- if (!ptr->addr)
- return -ENOMEM;
- ptr->size = size;
- return 0;
-}
-
-static inline void iwl4965_free_dma_ptr(struct iwl_priv *priv,
- struct iwl_dma_ptr *ptr)
-{
- if (unlikely(!ptr->addr))
- return;
-
- dma_free_coherent(&priv->pci_dev->dev, ptr->size, ptr->addr, ptr->dma);
- memset(ptr, 0, sizeof(*ptr));
-}
-
-/**
- * iwl4965_hw_txq_ctx_free - Free TXQ Context
- *
- * Destroy all TX DMA queues and structures
- */
-void iwl4965_hw_txq_ctx_free(struct iwl_priv *priv)
-{
- int txq_id;
-
- /* Tx queues */
- if (priv->txq) {
- for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
- if (txq_id == priv->cmd_queue)
- iwl_legacy_cmd_queue_free(priv);
- else
- iwl_legacy_tx_queue_free(priv, txq_id);
- }
- iwl4965_free_dma_ptr(priv, &priv->kw);
-
- iwl4965_free_dma_ptr(priv, &priv->scd_bc_tbls);
-
- /* free tx queue structure */
- iwl_legacy_txq_mem(priv);
-}
-
-/**
- * iwl4965_txq_ctx_alloc - allocate TX queue context
- * Allocate all Tx DMA structures and initialize them
- *
- * @param priv
- * @return error code
- */
-int iwl4965_txq_ctx_alloc(struct iwl_priv *priv)
-{
- int ret;
- int txq_id, slots_num;
- unsigned long flags;
-
- /* Free all tx/cmd queues and keep-warm buffer */
- iwl4965_hw_txq_ctx_free(priv);
-
- ret = iwl4965_alloc_dma_ptr(priv, &priv->scd_bc_tbls,
- priv->hw_params.scd_bc_tbls_size);
- if (ret) {
- IWL_ERR(priv, "Scheduler BC Table allocation failed\n");
- goto error_bc_tbls;
- }
- /* Alloc keep-warm buffer */
- ret = iwl4965_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE);
- if (ret) {
- IWL_ERR(priv, "Keep Warm allocation failed\n");
- goto error_kw;
- }
-
- /* allocate tx queue structure */
- ret = iwl_legacy_alloc_txq_mem(priv);
- if (ret)
- goto error;
-
- spin_lock_irqsave(&priv->lock, flags);
-
- /* Turn off all Tx DMA fifos */
- iwl4965_txq_set_sched(priv, 0);
-
- /* Tell NIC where to find the "keep warm" buffer */
- iwl_legacy_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- /* Alloc and init all Tx queues, including the command queue (#4/#9) */
- for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
- slots_num = (txq_id == priv->cmd_queue) ?
- TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
- ret = iwl_legacy_tx_queue_init(priv,
- &priv->txq[txq_id], slots_num,
- txq_id);
- if (ret) {
- IWL_ERR(priv, "Tx %d queue init failed\n", txq_id);
- goto error;
- }
- }
-
- return ret;
-
- error:
- iwl4965_hw_txq_ctx_free(priv);
- iwl4965_free_dma_ptr(priv, &priv->kw);
- error_kw:
- iwl4965_free_dma_ptr(priv, &priv->scd_bc_tbls);
- error_bc_tbls:
- return ret;
-}
-
-void iwl4965_txq_ctx_reset(struct iwl_priv *priv)
-{
- int txq_id, slots_num;
- unsigned long flags;
-
- spin_lock_irqsave(&priv->lock, flags);
-
- /* Turn off all Tx DMA fifos */
- iwl4965_txq_set_sched(priv, 0);
-
- /* Tell NIC where to find the "keep warm" buffer */
- iwl_legacy_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- /* Alloc and init all Tx queues, including the command queue (#4) */
- for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
- slots_num = txq_id == priv->cmd_queue ?
- TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
- iwl_legacy_tx_queue_reset(priv, &priv->txq[txq_id],
- slots_num, txq_id);
- }
-}
-
-/**
- * iwl4965_txq_ctx_stop - Stop all Tx DMA channels
- */
-void iwl4965_txq_ctx_stop(struct iwl_priv *priv)
-{
- int ch, txq_id;
- unsigned long flags;
-
- /* Turn off all Tx DMA fifos */
- spin_lock_irqsave(&priv->lock, flags);
-
- iwl4965_txq_set_sched(priv, 0);
-
- /* Stop each Tx DMA channel, and wait for it to be idle */
- for (ch = 0; ch < priv->hw_params.dma_chnl_num; ch++) {
- iwl_legacy_write_direct32(priv,
- FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
- if (iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG,
- FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
- 1000))
- IWL_ERR(priv, "Failing on timeout while stopping"
- " DMA channel %d [0x%08x]", ch,
- iwl_legacy_read_direct32(priv,
- FH_TSSR_TX_STATUS_REG));
- }
- spin_unlock_irqrestore(&priv->lock, flags);
-
- if (!priv->txq)
- return;
-
- /* Unmap DMA from host system and free skb's */
- for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
- if (txq_id == priv->cmd_queue)
- iwl_legacy_cmd_queue_unmap(priv);
- else
- iwl_legacy_tx_queue_unmap(priv, txq_id);
-}
-
-/*
- * Find first available (lowest unused) Tx Queue, mark it "active".
- * Called only when finding queue for aggregation.
- * Should never return anything < 7, because they should already
- * be in use as EDCA AC (0-3), Command (4), reserved (5, 6)
- */
-static int iwl4965_txq_ctx_activate_free(struct iwl_priv *priv)
-{
- int txq_id;
-
- for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
- if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk))
- return txq_id;
- return -1;
-}
-
-/**
- * iwl4965_tx_queue_stop_scheduler - Stop queue, but keep configuration
- */
-static void iwl4965_tx_queue_stop_scheduler(struct iwl_priv *priv,
- u16 txq_id)
-{
- /* Simply stop the queue, but don't change any configuration;
- * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
- iwl_legacy_write_prph(priv,
- IWL49_SCD_QUEUE_STATUS_BITS(txq_id),
- (0 << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE)|
- (1 << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
-}
-
-/**
- * iwl4965_tx_queue_set_q2ratid - Map unique receiver/tid combination to a queue
- */
-static int iwl4965_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid,
- u16 txq_id)
-{
- u32 tbl_dw_addr;
- u32 tbl_dw;
- u16 scd_q2ratid;
-
- scd_q2ratid = ra_tid & IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK;
-
- tbl_dw_addr = priv->scd_base_addr +
- IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id);
-
- tbl_dw = iwl_legacy_read_targ_mem(priv, tbl_dw_addr);
-
- if (txq_id & 0x1)
- tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
- else
- tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
-
- iwl_legacy_write_targ_mem(priv, tbl_dw_addr, tbl_dw);
-
- return 0;
-}
-
-/**
- * iwl4965_tx_queue_agg_enable - Set up & enable aggregation for selected queue
- *
- * NOTE: txq_id must be greater than IWL49_FIRST_AMPDU_QUEUE,
- * i.e. it must be one of the higher queues used for aggregation
- */
-static int iwl4965_txq_agg_enable(struct iwl_priv *priv, int txq_id,
- int tx_fifo, int sta_id, int tid, u16 ssn_idx)
-{
- unsigned long flags;
- u16 ra_tid;
- int ret;
-
- if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) ||
- (IWL49_FIRST_AMPDU_QUEUE +
- priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
- IWL_WARN(priv,
- "queue number out of range: %d, must be %d to %d\n",
- txq_id, IWL49_FIRST_AMPDU_QUEUE,
- IWL49_FIRST_AMPDU_QUEUE +
- priv->cfg->base_params->num_of_ampdu_queues - 1);
- return -EINVAL;
- }
-
- ra_tid = BUILD_RAxTID(sta_id, tid);
-
- /* Modify device's station table to Tx this TID */
- ret = iwl4965_sta_tx_modify_enable_tid(priv, sta_id, tid);
- if (ret)
- return ret;
-
- spin_lock_irqsave(&priv->lock, flags);
-
- /* Stop this Tx queue before configuring it */
- iwl4965_tx_queue_stop_scheduler(priv, txq_id);
-
- /* Map receiver-address / traffic-ID to this queue */
- iwl4965_tx_queue_set_q2ratid(priv, ra_tid, txq_id);
-
- /* Set this queue as a chain-building queue */
- iwl_legacy_set_bits_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
-
- /* Place first TFD at index corresponding to start sequence number.
- * Assumes that ssn_idx is valid (!= 0xFFF) */
- priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
- priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
- iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
-
- /* Set up Tx window size and frame limit for this queue */
- iwl_legacy_write_targ_mem(priv,
- priv->scd_base_addr + IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id),
- (SCD_WIN_SIZE << IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
- IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
-
- iwl_legacy_write_targ_mem(priv, priv->scd_base_addr +
- IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
- (SCD_FRAME_LIMIT << IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS)
- & IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
-
- iwl_legacy_set_bits_prph(priv, IWL49_SCD_INTERRUPT_MASK, (1 << txq_id));
-
- /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
- iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1);
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- return 0;
-}
-
-
-int iwl4965_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, u16 tid, u16 *ssn)
-{
- int sta_id;
- int tx_fifo;
- int txq_id;
- int ret;
- unsigned long flags;
- struct iwl_tid_data *tid_data;
-
- tx_fifo = iwl4965_get_fifo_from_tid(iwl_legacy_rxon_ctx_from_vif(vif), tid);
- if (unlikely(tx_fifo < 0))
- return tx_fifo;
-
- IWL_WARN(priv, "%s on ra = %pM tid = %d\n",
- __func__, sta->addr, tid);
-
- sta_id = iwl_legacy_sta_id(sta);
- if (sta_id == IWL_INVALID_STATION) {
- IWL_ERR(priv, "Start AGG on invalid station\n");
- return -ENXIO;
- }
- if (unlikely(tid >= MAX_TID_COUNT))
- return -EINVAL;
-
- if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) {
- IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n");
- return -ENXIO;
- }
-
- txq_id = iwl4965_txq_ctx_activate_free(priv);
- if (txq_id == -1) {
- IWL_ERR(priv, "No free aggregation queue available\n");
- return -ENXIO;
- }
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- tid_data = &priv->stations[sta_id].tid[tid];
- *ssn = SEQ_TO_SN(tid_data->seq_number);
- tid_data->agg.txq_id = txq_id;
- iwl_legacy_set_swq_id(&priv->txq[txq_id],
- iwl4965_get_ac_from_tid(tid), txq_id);
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- ret = iwl4965_txq_agg_enable(priv, txq_id, tx_fifo,
- sta_id, tid, *ssn);
- if (ret)
- return ret;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- tid_data = &priv->stations[sta_id].tid[tid];
- if (tid_data->tfds_in_queue == 0) {
- IWL_DEBUG_HT(priv, "HW queue is empty\n");
- tid_data->agg.state = IWL_AGG_ON;
- ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
- } else {
- IWL_DEBUG_HT(priv,
- "HW queue is NOT empty: %d packets in HW queue\n",
- tid_data->tfds_in_queue);
- tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
- }
- spin_unlock_irqrestore(&priv->sta_lock, flags);
- return ret;
-}
-
-/**
- * txq_id must be greater than IWL49_FIRST_AMPDU_QUEUE
- * priv->lock must be held by the caller
- */
-static int iwl4965_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
- u16 ssn_idx, u8 tx_fifo)
-{
- if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) ||
- (IWL49_FIRST_AMPDU_QUEUE +
- priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
- IWL_WARN(priv,
- "queue number out of range: %d, must be %d to %d\n",
- txq_id, IWL49_FIRST_AMPDU_QUEUE,
- IWL49_FIRST_AMPDU_QUEUE +
- priv->cfg->base_params->num_of_ampdu_queues - 1);
- return -EINVAL;
- }
-
- iwl4965_tx_queue_stop_scheduler(priv, txq_id);
-
- iwl_legacy_clear_bits_prph(priv,
- IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
-
- priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
- priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
- /* supposes that ssn_idx is valid (!= 0xFFF) */
- iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
-
- iwl_legacy_clear_bits_prph(priv,
- IWL49_SCD_INTERRUPT_MASK, (1 << txq_id));
- iwl_txq_ctx_deactivate(priv, txq_id);
- iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0);
-
- return 0;
-}
-
-int iwl4965_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, u16 tid)
-{
- int tx_fifo_id, txq_id, sta_id, ssn;
- struct iwl_tid_data *tid_data;
- int write_ptr, read_ptr;
- unsigned long flags;
-
- tx_fifo_id = iwl4965_get_fifo_from_tid(iwl_legacy_rxon_ctx_from_vif(vif), tid);
- if (unlikely(tx_fifo_id < 0))
- return tx_fifo_id;
-
- sta_id = iwl_legacy_sta_id(sta);
-
- if (sta_id == IWL_INVALID_STATION) {
- IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
- return -ENXIO;
- }
-
- spin_lock_irqsave(&priv->sta_lock, flags);
-
- tid_data = &priv->stations[sta_id].tid[tid];
- ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
- txq_id = tid_data->agg.txq_id;
-
- switch (priv->stations[sta_id].tid[tid].agg.state) {
- case IWL_EMPTYING_HW_QUEUE_ADDBA:
- /*
- * This can happen if the peer stops aggregation
- * again before we've had a chance to drain the
- * queue we selected previously, i.e. before the
- * session was really started completely.
- */
- IWL_DEBUG_HT(priv, "AGG stop before setup done\n");
- goto turn_off;
- case IWL_AGG_ON:
- break;
- default:
- IWL_WARN(priv, "Stopping AGG while state not ON or starting\n");
- }
-
- write_ptr = priv->txq[txq_id].q.write_ptr;
- read_ptr = priv->txq[txq_id].q.read_ptr;
-
- /* The queue is not empty */
- if (write_ptr != read_ptr) {
- IWL_DEBUG_HT(priv, "Stopping a non empty AGG HW QUEUE\n");
- priv->stations[sta_id].tid[tid].agg.state =
- IWL_EMPTYING_HW_QUEUE_DELBA;
- spin_unlock_irqrestore(&priv->sta_lock, flags);
- return 0;
- }
-
- IWL_DEBUG_HT(priv, "HW queue is empty\n");
- turn_off:
- priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
-
- /* do not restore/save irqs */
- spin_unlock(&priv->sta_lock);
- spin_lock(&priv->lock);
-
- /*
- * the only reason this call can fail is queue number out of range,
- * which can happen if uCode is reloaded and all the station
- * information are lost. if it is outside the range, there is no need
- * to deactivate the uCode queue, just return "success" to allow
- * mac80211 to clean up it own data.
- */
- iwl4965_txq_agg_disable(priv, txq_id, ssn, tx_fifo_id);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
-
- return 0;
-}
-
-int iwl4965_txq_check_empty(struct iwl_priv *priv,
- int sta_id, u8 tid, int txq_id)
-{
- struct iwl_queue *q = &priv->txq[txq_id].q;
- u8 *addr = priv->stations[sta_id].sta.sta.addr;
- struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid];
- struct iwl_rxon_context *ctx;
-
- ctx = &priv->contexts[priv->stations[sta_id].ctxid];
-
- lockdep_assert_held(&priv->sta_lock);
-
- switch (priv->stations[sta_id].tid[tid].agg.state) {
- case IWL_EMPTYING_HW_QUEUE_DELBA:
- /* We are reclaiming the last packet of the */
- /* aggregated HW queue */
- if ((txq_id == tid_data->agg.txq_id) &&
- (q->read_ptr == q->write_ptr)) {
- u16 ssn = SEQ_TO_SN(tid_data->seq_number);
- int tx_fifo = iwl4965_get_fifo_from_tid(ctx, tid);
- IWL_DEBUG_HT(priv,
- "HW queue empty: continue DELBA flow\n");
- iwl4965_txq_agg_disable(priv, txq_id, ssn, tx_fifo);
- tid_data->agg.state = IWL_AGG_OFF;
- ieee80211_stop_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
- }
- break;
- case IWL_EMPTYING_HW_QUEUE_ADDBA:
- /* We are reclaiming the last packet of the queue */
- if (tid_data->tfds_in_queue == 0) {
- IWL_DEBUG_HT(priv,
- "HW queue empty: continue ADDBA flow\n");
- tid_data->agg.state = IWL_AGG_ON;
- ieee80211_start_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
- }
- break;
- }
-
- return 0;
-}
-
-static void iwl4965_non_agg_tx_status(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- const u8 *addr1)
-{
- struct ieee80211_sta *sta;
- struct iwl_station_priv *sta_priv;
-
- rcu_read_lock();
- sta = ieee80211_find_sta(ctx->vif, addr1);
- if (sta) {
- sta_priv = (void *)sta->drv_priv;
- /* avoid atomic ops if this isn't a client */
- if (sta_priv->client &&
- atomic_dec_return(&sta_priv->pending_frames) == 0)
- ieee80211_sta_block_awake(priv->hw, sta, false);
- }
- rcu_read_unlock();
-}
-
-static void
-iwl4965_tx_status(struct iwl_priv *priv, struct iwl_tx_info *tx_info,
- bool is_agg)
-{
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) tx_info->skb->data;
-
- if (!is_agg)
- iwl4965_non_agg_tx_status(priv, tx_info->ctx, hdr->addr1);
-
- ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb);
-}
-
-int iwl4965_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
-{
- struct iwl_tx_queue *txq = &priv->txq[txq_id];
- struct iwl_queue *q = &txq->q;
- struct iwl_tx_info *tx_info;
- int nfreed = 0;
- struct ieee80211_hdr *hdr;
-
- if ((index >= q->n_bd) || (iwl_legacy_queue_used(q, index) == 0)) {
- IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
- "is out of range [0-%d] %d %d.\n", txq_id,
- index, q->n_bd, q->write_ptr, q->read_ptr);
- return 0;
- }
-
- for (index = iwl_legacy_queue_inc_wrap(index, q->n_bd);
- q->read_ptr != index;
- q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd)) {
-
- tx_info = &txq->txb[txq->q.read_ptr];
-
- if (WARN_ON_ONCE(tx_info->skb == NULL))
- continue;
-
- hdr = (struct ieee80211_hdr *)tx_info->skb->data;
- if (ieee80211_is_data_qos(hdr->frame_control))
- nfreed++;
-
- iwl4965_tx_status(priv, tx_info,
- txq_id >= IWL4965_FIRST_AMPDU_QUEUE);
- tx_info->skb = NULL;
-
- priv->cfg->ops->lib->txq_free_tfd(priv, txq);
- }
- return nfreed;
-}
-
-/**
- * iwl4965_tx_status_reply_compressed_ba - Update tx status from block-ack
- *
- * Go through block-ack's bitmap of ACK'd frames, update driver's record of
- * ACK vs. not. This gets sent to mac80211, then to rate scaling algo.
- */
-static int iwl4965_tx_status_reply_compressed_ba(struct iwl_priv *priv,
- struct iwl_ht_agg *agg,
- struct iwl_compressed_ba_resp *ba_resp)
-
-{
- int i, sh, ack;
- u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
- u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
- int successes = 0;
- struct ieee80211_tx_info *info;
- u64 bitmap, sent_bitmap;
-
- if (unlikely(!agg->wait_for_ba)) {
- if (unlikely(ba_resp->bitmap))
- IWL_ERR(priv, "Received BA when not expected\n");
- return -EINVAL;
- }
-
- /* Mark that the expected block-ack response arrived */
- agg->wait_for_ba = 0;
- IWL_DEBUG_TX_REPLY(priv, "BA %d %d\n", agg->start_idx,
- ba_resp->seq_ctl);
-
- /* Calculate shift to align block-ack bits with our Tx window bits */
- sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl >> 4);
- if (sh < 0) /* tbw something is wrong with indices */
- sh += 0x100;
-
- if (agg->frame_count > (64 - sh)) {
- IWL_DEBUG_TX_REPLY(priv, "more frames than bitmap size");
- return -1;
- }
-
- /* don't use 64-bit values for now */
- bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
-
- /* check for success or failure according to the
- * transmitted bitmap and block-ack bitmap */
- sent_bitmap = bitmap & agg->bitmap;
-
- /* For each frame attempted in aggregation,
- * update driver's record of tx frame's status. */
- i = 0;
- while (sent_bitmap) {
- ack = sent_bitmap & 1ULL;
- successes += ack;
- IWL_DEBUG_TX_REPLY(priv, "%s ON i=%d idx=%d raw=%d\n",
- ack ? "ACK" : "NACK", i,
- (agg->start_idx + i) & 0xff,
- agg->start_idx + i);
- sent_bitmap >>= 1;
- ++i;
- }
-
- IWL_DEBUG_TX_REPLY(priv, "Bitmap %llx\n",
- (unsigned long long)bitmap);
-
- info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb);
- memset(&info->status, 0, sizeof(info->status));
- info->flags |= IEEE80211_TX_STAT_ACK;
- info->flags |= IEEE80211_TX_STAT_AMPDU;
- info->status.ampdu_ack_len = successes;
- info->status.ampdu_len = agg->frame_count;
- iwl4965_hwrate_to_tx_control(priv, agg->rate_n_flags, info);
-
- return 0;
-}
-
-/**
- * translate ucode response to mac80211 tx status control values
- */
-void iwl4965_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
- struct ieee80211_tx_info *info)
-{
- struct ieee80211_tx_rate *r = &info->control.rates[0];
-
- info->antenna_sel_tx =
- ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
- if (rate_n_flags & RATE_MCS_HT_MSK)
- r->flags |= IEEE80211_TX_RC_MCS;
- if (rate_n_flags & RATE_MCS_GF_MSK)
- r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
- if (rate_n_flags & RATE_MCS_HT40_MSK)
- r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
- if (rate_n_flags & RATE_MCS_DUP_MSK)
- r->flags |= IEEE80211_TX_RC_DUP_DATA;
- if (rate_n_flags & RATE_MCS_SGI_MSK)
- r->flags |= IEEE80211_TX_RC_SHORT_GI;
- r->idx = iwl4965_hwrate_to_mac80211_idx(rate_n_flags, info->band);
-}
-
-/**
- * iwl4965_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA
- *
- * Handles block-acknowledge notification from device, which reports success
- * of frames sent via aggregation.
- */
-void iwl4965_rx_reply_compressed_ba(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
- struct iwl_tx_queue *txq = NULL;
- struct iwl_ht_agg *agg;
- int index;
- int sta_id;
- int tid;
- unsigned long flags;
-
- /* "flow" corresponds to Tx queue */
- u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
-
- /* "ssn" is start of block-ack Tx window, corresponds to index
- * (in Tx queue's circular buffer) of first TFD/frame in window */
- u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
-
- if (scd_flow >= priv->hw_params.max_txq_num) {
- IWL_ERR(priv,
- "BUG_ON scd_flow is bigger than number of queues\n");
- return;
- }
-
- txq = &priv->txq[scd_flow];
- sta_id = ba_resp->sta_id;
- tid = ba_resp->tid;
- agg = &priv->stations[sta_id].tid[tid].agg;
- if (unlikely(agg->txq_id != scd_flow)) {
- /*
- * FIXME: this is a uCode bug which need to be addressed,
- * log the information and return for now!
- * since it is possible happen very often and in order
- * not to fill the syslog, don't enable the logging by default
- */
- IWL_DEBUG_TX_REPLY(priv,
- "BA scd_flow %d does not match txq_id %d\n",
- scd_flow, agg->txq_id);
- return;
- }
-
- /* Find index just before block-ack window */
- index = iwl_legacy_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
-
- spin_lock_irqsave(&priv->sta_lock, flags);
-
- IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, "
- "sta_id = %d\n",
- agg->wait_for_ba,
- (u8 *) &ba_resp->sta_addr_lo32,
- ba_resp->sta_id);
- IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx,"
- "scd_flow = "
- "%d, scd_ssn = %d\n",
- ba_resp->tid,
- ba_resp->seq_ctl,
- (unsigned long long)le64_to_cpu(ba_resp->bitmap),
- ba_resp->scd_flow,
- ba_resp->scd_ssn);
- IWL_DEBUG_TX_REPLY(priv, "DAT start_idx = %d, bitmap = 0x%llx\n",
- agg->start_idx,
- (unsigned long long)agg->bitmap);
-
- /* Update driver's record of ACK vs. not for each frame in window */
- iwl4965_tx_status_reply_compressed_ba(priv, agg, ba_resp);
-
- /* Release all TFDs before the SSN, i.e. all TFDs in front of
- * block-ack window (we assume that they've been successfully
- * transmitted ... if not, it's too late anyway). */
- if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
- /* calculate mac80211 ampdu sw queue to wake */
- int freed = iwl4965_tx_queue_reclaim(priv, scd_flow, index);
- iwl4965_free_tfds_in_queue(priv, sta_id, tid, freed);
-
- if ((iwl_legacy_queue_space(&txq->q) > txq->q.low_mark) &&
- priv->mac80211_registered &&
- (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
- iwl_legacy_wake_queue(priv, txq);
-
- iwl4965_txq_check_empty(priv, sta_id, tid, scd_flow);
- }
-
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-}
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-const char *iwl4965_get_tx_fail_reason(u32 status)
-{
-#define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
-#define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
-
- switch (status & TX_STATUS_MSK) {
- case TX_STATUS_SUCCESS:
- return "SUCCESS";
- TX_STATUS_POSTPONE(DELAY);
- TX_STATUS_POSTPONE(FEW_BYTES);
- TX_STATUS_POSTPONE(QUIET_PERIOD);
- TX_STATUS_POSTPONE(CALC_TTAK);
- TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY);
- TX_STATUS_FAIL(SHORT_LIMIT);
- TX_STATUS_FAIL(LONG_LIMIT);
- TX_STATUS_FAIL(FIFO_UNDERRUN);
- TX_STATUS_FAIL(DRAIN_FLOW);
- TX_STATUS_FAIL(RFKILL_FLUSH);
- TX_STATUS_FAIL(LIFE_EXPIRE);
- TX_STATUS_FAIL(DEST_PS);
- TX_STATUS_FAIL(HOST_ABORTED);
- TX_STATUS_FAIL(BT_RETRY);
- TX_STATUS_FAIL(STA_INVALID);
- TX_STATUS_FAIL(FRAG_DROPPED);
- TX_STATUS_FAIL(TID_DISABLE);
- TX_STATUS_FAIL(FIFO_FLUSHED);
- TX_STATUS_FAIL(INSUFFICIENT_CF_POLL);
- TX_STATUS_FAIL(PASSIVE_NO_RX);
- TX_STATUS_FAIL(NO_BEACON_ON_RADAR);
- }
-
- return "UNKNOWN";
-
-#undef TX_STATUS_FAIL
-#undef TX_STATUS_POSTPONE
-}
-#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-ucode.c b/drivers/net/wireless/iwlegacy/iwl-4965-ucode.c
deleted file mode 100644
index 001d148feb9..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-ucode.c
+++ /dev/null
@@ -1,166 +0,0 @@
-/******************************************************************************
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/sched.h>
-
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-io.h"
-#include "iwl-helpers.h"
-#include "iwl-4965-hw.h"
-#include "iwl-4965.h"
-#include "iwl-4965-calib.h"
-
-#define IWL_AC_UNSET -1
-
-/**
- * iwl_verify_inst_sparse - verify runtime uCode image in card vs. host,
- * using sample data 100 bytes apart. If these sample points are good,
- * it's a pretty good bet that everything between them is good, too.
- */
-static int
-iwl4965_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len)
-{
- u32 val;
- int ret = 0;
- u32 errcnt = 0;
- u32 i;
-
- IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
-
- for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
- /* read data comes through single port, auto-incr addr */
- /* NOTE: Use the debugless read so we don't flood kernel log
- * if IWL_DL_IO is set */
- iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR,
- i + IWL4965_RTC_INST_LOWER_BOUND);
- val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
- if (val != le32_to_cpu(*image)) {
- ret = -EIO;
- errcnt++;
- if (errcnt >= 3)
- break;
- }
- }
-
- return ret;
-}
-
-/**
- * iwl4965_verify_inst_full - verify runtime uCode image in card vs. host,
- * looking at all data.
- */
-static int iwl4965_verify_inst_full(struct iwl_priv *priv, __le32 *image,
- u32 len)
-{
- u32 val;
- u32 save_len = len;
- int ret = 0;
- u32 errcnt;
-
- IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
-
- iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR,
- IWL4965_RTC_INST_LOWER_BOUND);
-
- errcnt = 0;
- for (; len > 0; len -= sizeof(u32), image++) {
- /* read data comes through single port, auto-incr addr */
- /* NOTE: Use the debugless read so we don't flood kernel log
- * if IWL_DL_IO is set */
- val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
- if (val != le32_to_cpu(*image)) {
- IWL_ERR(priv, "uCode INST section is invalid at "
- "offset 0x%x, is 0x%x, s/b 0x%x\n",
- save_len - len, val, le32_to_cpu(*image));
- ret = -EIO;
- errcnt++;
- if (errcnt >= 20)
- break;
- }
- }
-
- if (!errcnt)
- IWL_DEBUG_INFO(priv,
- "ucode image in INSTRUCTION memory is good\n");
-
- return ret;
-}
-
-/**
- * iwl4965_verify_ucode - determine which instruction image is in SRAM,
- * and verify its contents
- */
-int iwl4965_verify_ucode(struct iwl_priv *priv)
-{
- __le32 *image;
- u32 len;
- int ret;
-
- /* Try bootstrap */
- image = (__le32 *)priv->ucode_boot.v_addr;
- len = priv->ucode_boot.len;
- ret = iwl4965_verify_inst_sparse(priv, image, len);
- if (!ret) {
- IWL_DEBUG_INFO(priv, "Bootstrap uCode is good in inst SRAM\n");
- return 0;
- }
-
- /* Try initialize */
- image = (__le32 *)priv->ucode_init.v_addr;
- len = priv->ucode_init.len;
- ret = iwl4965_verify_inst_sparse(priv, image, len);
- if (!ret) {
- IWL_DEBUG_INFO(priv, "Initialize uCode is good in inst SRAM\n");
- return 0;
- }
-
- /* Try runtime/protocol */
- image = (__le32 *)priv->ucode_code.v_addr;
- len = priv->ucode_code.len;
- ret = iwl4965_verify_inst_sparse(priv, image, len);
- if (!ret) {
- IWL_DEBUG_INFO(priv, "Runtime uCode is good in inst SRAM\n");
- return 0;
- }
-
- IWL_ERR(priv, "NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
-
- /* Since nothing seems to match, show first several data entries in
- * instruction SRAM, so maybe visual inspection will give a clue.
- * Selection of bootstrap image (vs. other images) is arbitrary. */
- image = (__le32 *)priv->ucode_boot.v_addr;
- len = priv->ucode_boot.len;
- ret = iwl4965_verify_inst_full(priv, image, len);
-
- return ret;
-}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965.c b/drivers/net/wireless/iwlegacy/iwl-4965.c
deleted file mode 100644
index 86f4fce193e..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965.c
+++ /dev/null
@@ -1,2183 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <linux/delay.h>
-#include <linux/sched.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <net/mac80211.h>
-#include <linux/etherdevice.h>
-#include <asm/unaligned.h>
-
-#include "iwl-eeprom.h"
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-io.h"
-#include "iwl-helpers.h"
-#include "iwl-4965-calib.h"
-#include "iwl-sta.h"
-#include "iwl-4965-led.h"
-#include "iwl-4965.h"
-#include "iwl-4965-debugfs.h"
-
-static int iwl4965_send_tx_power(struct iwl_priv *priv);
-static int iwl4965_hw_get_temperature(struct iwl_priv *priv);
-
-/* Highest firmware API version supported */
-#define IWL4965_UCODE_API_MAX 2
-
-/* Lowest firmware API version supported */
-#define IWL4965_UCODE_API_MIN 2
-
-#define IWL4965_FW_PRE "iwlwifi-4965-"
-#define _IWL4965_MODULE_FIRMWARE(api) IWL4965_FW_PRE #api ".ucode"
-#define IWL4965_MODULE_FIRMWARE(api) _IWL4965_MODULE_FIRMWARE(api)
-
-/* check contents of special bootstrap uCode SRAM */
-static int iwl4965_verify_bsm(struct iwl_priv *priv)
-{
- __le32 *image = priv->ucode_boot.v_addr;
- u32 len = priv->ucode_boot.len;
- u32 reg;
- u32 val;
-
- IWL_DEBUG_INFO(priv, "Begin verify bsm\n");
-
- /* verify BSM SRAM contents */
- val = iwl_legacy_read_prph(priv, BSM_WR_DWCOUNT_REG);
- for (reg = BSM_SRAM_LOWER_BOUND;
- reg < BSM_SRAM_LOWER_BOUND + len;
- reg += sizeof(u32), image++) {
- val = iwl_legacy_read_prph(priv, reg);
- if (val != le32_to_cpu(*image)) {
- IWL_ERR(priv, "BSM uCode verification failed at "
- "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n",
- BSM_SRAM_LOWER_BOUND,
- reg - BSM_SRAM_LOWER_BOUND, len,
- val, le32_to_cpu(*image));
- return -EIO;
- }
- }
-
- IWL_DEBUG_INFO(priv, "BSM bootstrap uCode image OK\n");
-
- return 0;
-}
-
-/**
- * iwl4965_load_bsm - Load bootstrap instructions
- *
- * BSM operation:
- *
- * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program
- * in special SRAM that does not power down during RFKILL. When powering back
- * up after power-saving sleeps (or during initial uCode load), the BSM loads
- * the bootstrap program into the on-board processor, and starts it.
- *
- * The bootstrap program loads (via DMA) instructions and data for a new
- * program from host DRAM locations indicated by the host driver in the
- * BSM_DRAM_* registers. Once the new program is loaded, it starts
- * automatically.
- *
- * When initializing the NIC, the host driver points the BSM to the
- * "initialize" uCode image. This uCode sets up some internal data, then
- * notifies host via "initialize alive" that it is complete.
- *
- * The host then replaces the BSM_DRAM_* pointer values to point to the
- * normal runtime uCode instructions and a backup uCode data cache buffer
- * (filled initially with starting data values for the on-board processor),
- * then triggers the "initialize" uCode to load and launch the runtime uCode,
- * which begins normal operation.
- *
- * When doing a power-save shutdown, runtime uCode saves data SRAM into
- * the backup data cache in DRAM before SRAM is powered down.
- *
- * When powering back up, the BSM loads the bootstrap program. This reloads
- * the runtime uCode instructions and the backup data cache into SRAM,
- * and re-launches the runtime uCode from where it left off.
- */
-static int iwl4965_load_bsm(struct iwl_priv *priv)
-{
- __le32 *image = priv->ucode_boot.v_addr;
- u32 len = priv->ucode_boot.len;
- dma_addr_t pinst;
- dma_addr_t pdata;
- u32 inst_len;
- u32 data_len;
- int i;
- u32 done;
- u32 reg_offset;
- int ret;
-
- IWL_DEBUG_INFO(priv, "Begin load bsm\n");
-
- priv->ucode_type = UCODE_RT;
-
- /* make sure bootstrap program is no larger than BSM's SRAM size */
- if (len > IWL49_MAX_BSM_SIZE)
- return -EINVAL;
-
- /* Tell bootstrap uCode where to find the "Initialize" uCode
- * in host DRAM ... host DRAM physical address bits 35:4 for 4965.
- * NOTE: iwl_init_alive_start() will replace these values,
- * after the "initialize" uCode has run, to point to
- * runtime/protocol instructions and backup data cache.
- */
- pinst = priv->ucode_init.p_addr >> 4;
- pdata = priv->ucode_init_data.p_addr >> 4;
- inst_len = priv->ucode_init.len;
- data_len = priv->ucode_init_data.len;
-
- iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
- iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
- iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
- iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
-
- /* Fill BSM memory with bootstrap instructions */
- for (reg_offset = BSM_SRAM_LOWER_BOUND;
- reg_offset < BSM_SRAM_LOWER_BOUND + len;
- reg_offset += sizeof(u32), image++)
- _iwl_legacy_write_prph(priv, reg_offset, le32_to_cpu(*image));
-
- ret = iwl4965_verify_bsm(priv);
- if (ret)
- return ret;
-
- /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
- iwl_legacy_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0);
- iwl_legacy_write_prph(priv,
- BSM_WR_MEM_DST_REG, IWL49_RTC_INST_LOWER_BOUND);
- iwl_legacy_write_prph(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
-
- /* Load bootstrap code into instruction SRAM now,
- * to prepare to load "initialize" uCode */
- iwl_legacy_write_prph(priv, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START);
-
- /* Wait for load of bootstrap uCode to finish */
- for (i = 0; i < 100; i++) {
- done = iwl_legacy_read_prph(priv, BSM_WR_CTRL_REG);
- if (!(done & BSM_WR_CTRL_REG_BIT_START))
- break;
- udelay(10);
- }
- if (i < 100)
- IWL_DEBUG_INFO(priv, "BSM write complete, poll %d iterations\n", i);
- else {
- IWL_ERR(priv, "BSM write did not complete!\n");
- return -EIO;
- }
-
- /* Enable future boot loads whenever power management unit triggers it
- * (e.g. when powering back up after power-save shutdown) */
- iwl_legacy_write_prph(priv,
- BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN);
-
-
- return 0;
-}
-
-/**
- * iwl4965_set_ucode_ptrs - Set uCode address location
- *
- * Tell initialization uCode where to find runtime uCode.
- *
- * BSM registers initially contain pointers to initialization uCode.
- * We need to replace them to load runtime uCode inst and data,
- * and to save runtime data when powering down.
- */
-static int iwl4965_set_ucode_ptrs(struct iwl_priv *priv)
-{
- dma_addr_t pinst;
- dma_addr_t pdata;
- int ret = 0;
-
- /* bits 35:4 for 4965 */
- pinst = priv->ucode_code.p_addr >> 4;
- pdata = priv->ucode_data_backup.p_addr >> 4;
-
- /* Tell bootstrap uCode where to find image to load */
- iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
- iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
- iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG,
- priv->ucode_data.len);
-
- /* Inst byte count must be last to set up, bit 31 signals uCode
- * that all new ptr/size info is in place */
- iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG,
- priv->ucode_code.len | BSM_DRAM_INST_LOAD);
- IWL_DEBUG_INFO(priv, "Runtime uCode pointers are set.\n");
-
- return ret;
-}
-
-/**
- * iwl4965_init_alive_start - Called after REPLY_ALIVE notification received
- *
- * Called after REPLY_ALIVE notification received from "initialize" uCode.
- *
- * The 4965 "initialize" ALIVE reply contains calibration data for:
- * Voltage, temperature, and MIMO tx gain correction, now stored in priv
- * (3945 does not contain this data).
- *
- * Tell "initialize" uCode to go ahead and load the runtime uCode.
-*/
-static void iwl4965_init_alive_start(struct iwl_priv *priv)
-{
- /* Bootstrap uCode has loaded initialize uCode ... verify inst image.
- * This is a paranoid check, because we would not have gotten the
- * "initialize" alive if code weren't properly loaded. */
- if (iwl4965_verify_ucode(priv)) {
- /* Runtime instruction load was bad;
- * take it all the way back down so we can try again */
- IWL_DEBUG_INFO(priv, "Bad \"initialize\" uCode load.\n");
- goto restart;
- }
-
- /* Calculate temperature */
- priv->temperature = iwl4965_hw_get_temperature(priv);
-
- /* Send pointers to protocol/runtime uCode image ... init code will
- * load and launch runtime uCode, which will send us another "Alive"
- * notification. */
- IWL_DEBUG_INFO(priv, "Initialization Alive received.\n");
- if (iwl4965_set_ucode_ptrs(priv)) {
- /* Runtime instruction load won't happen;
- * take it all the way back down so we can try again */
- IWL_DEBUG_INFO(priv, "Couldn't set up uCode pointers.\n");
- goto restart;
- }
- return;
-
-restart:
- queue_work(priv->workqueue, &priv->restart);
-}
-
-static bool iw4965_is_ht40_channel(__le32 rxon_flags)
-{
- int chan_mod = le32_to_cpu(rxon_flags & RXON_FLG_CHANNEL_MODE_MSK)
- >> RXON_FLG_CHANNEL_MODE_POS;
- return ((chan_mod == CHANNEL_MODE_PURE_40) ||
- (chan_mod == CHANNEL_MODE_MIXED));
-}
-
-static void iwl4965_nic_config(struct iwl_priv *priv)
-{
- unsigned long flags;
- u16 radio_cfg;
-
- spin_lock_irqsave(&priv->lock, flags);
-
- radio_cfg = iwl_legacy_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
-
- /* write radio config values to register */
- if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) == EEPROM_4965_RF_CFG_TYPE_MAX)
- iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
- EEPROM_RF_CFG_TYPE_MSK(radio_cfg) |
- EEPROM_RF_CFG_STEP_MSK(radio_cfg) |
- EEPROM_RF_CFG_DASH_MSK(radio_cfg));
-
- /* set CSR_HW_CONFIG_REG for uCode use */
- iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
- CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
-
- priv->calib_info = (struct iwl_eeprom_calib_info *)
- iwl_legacy_eeprom_query_addr(priv,
- EEPROM_4965_CALIB_TXPOWER_OFFSET);
-
- spin_unlock_irqrestore(&priv->lock, flags);
-}
-
-/* Reset differential Rx gains in NIC to prepare for chain noise calibration.
- * Called after every association, but this runs only once!
- * ... once chain noise is calibrated the first time, it's good forever. */
-static void iwl4965_chain_noise_reset(struct iwl_priv *priv)
-{
- struct iwl_chain_noise_data *data = &(priv->chain_noise_data);
-
- if ((data->state == IWL_CHAIN_NOISE_ALIVE) &&
- iwl_legacy_is_any_associated(priv)) {
- struct iwl_calib_diff_gain_cmd cmd;
-
- /* clear data for chain noise calibration algorithm */
- data->chain_noise_a = 0;
- data->chain_noise_b = 0;
- data->chain_noise_c = 0;
- data->chain_signal_a = 0;
- data->chain_signal_b = 0;
- data->chain_signal_c = 0;
- data->beacon_count = 0;
-
- memset(&cmd, 0, sizeof(cmd));
- cmd.hdr.op_code = IWL_PHY_CALIBRATE_DIFF_GAIN_CMD;
- cmd.diff_gain_a = 0;
- cmd.diff_gain_b = 0;
- cmd.diff_gain_c = 0;
- if (iwl_legacy_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
- sizeof(cmd), &cmd))
- IWL_ERR(priv,
- "Could not send REPLY_PHY_CALIBRATION_CMD\n");
- data->state = IWL_CHAIN_NOISE_ACCUMULATE;
- IWL_DEBUG_CALIB(priv, "Run chain_noise_calibrate\n");
- }
-}
-
-static struct iwl_sensitivity_ranges iwl4965_sensitivity = {
- .min_nrg_cck = 97,
- .max_nrg_cck = 0, /* not used, set to 0 */
-
- .auto_corr_min_ofdm = 85,
- .auto_corr_min_ofdm_mrc = 170,
- .auto_corr_min_ofdm_x1 = 105,
- .auto_corr_min_ofdm_mrc_x1 = 220,
-
- .auto_corr_max_ofdm = 120,
- .auto_corr_max_ofdm_mrc = 210,
- .auto_corr_max_ofdm_x1 = 140,
- .auto_corr_max_ofdm_mrc_x1 = 270,
-
- .auto_corr_min_cck = 125,
- .auto_corr_max_cck = 200,
- .auto_corr_min_cck_mrc = 200,
- .auto_corr_max_cck_mrc = 400,
-
- .nrg_th_cck = 100,
- .nrg_th_ofdm = 100,
-
- .barker_corr_th_min = 190,
- .barker_corr_th_min_mrc = 390,
- .nrg_th_cca = 62,
-};
-
-static void iwl4965_set_ct_threshold(struct iwl_priv *priv)
-{
- /* want Kelvin */
- priv->hw_params.ct_kill_threshold =
- CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD_LEGACY);
-}
-
-/**
- * iwl4965_hw_set_hw_params
- *
- * Called when initializing driver
- */
-static int iwl4965_hw_set_hw_params(struct iwl_priv *priv)
-{
- if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES &&
- priv->cfg->mod_params->num_of_queues <= IWL49_NUM_QUEUES)
- priv->cfg->base_params->num_of_queues =
- priv->cfg->mod_params->num_of_queues;
-
- priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues;
- priv->hw_params.dma_chnl_num = FH49_TCSR_CHNL_NUM;
- priv->hw_params.scd_bc_tbls_size =
- priv->cfg->base_params->num_of_queues *
- sizeof(struct iwl4965_scd_bc_tbl);
- priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
- priv->hw_params.max_stations = IWL4965_STATION_COUNT;
- priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWL4965_BROADCAST_ID;
- priv->hw_params.max_data_size = IWL49_RTC_DATA_SIZE;
- priv->hw_params.max_inst_size = IWL49_RTC_INST_SIZE;
- priv->hw_params.max_bsm_size = BSM_SRAM_SIZE;
- priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_5GHZ);
-
- priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR;
-
- priv->hw_params.tx_chains_num = iwl4965_num_of_ant(priv->cfg->valid_tx_ant);
- priv->hw_params.rx_chains_num = iwl4965_num_of_ant(priv->cfg->valid_rx_ant);
- priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
- priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
-
- iwl4965_set_ct_threshold(priv);
-
- priv->hw_params.sens = &iwl4965_sensitivity;
- priv->hw_params.beacon_time_tsf_bits = IWL4965_EXT_BEACON_TIME_POS;
-
- return 0;
-}
-
-static s32 iwl4965_math_div_round(s32 num, s32 denom, s32 *res)
-{
- s32 sign = 1;
-
- if (num < 0) {
- sign = -sign;
- num = -num;
- }
- if (denom < 0) {
- sign = -sign;
- denom = -denom;
- }
- *res = 1;
- *res = ((num * 2 + denom) / (denom * 2)) * sign;
-
- return 1;
-}
-
-/**
- * iwl4965_get_voltage_compensation - Power supply voltage comp for txpower
- *
- * Determines power supply voltage compensation for txpower calculations.
- * Returns number of 1/2-dB steps to subtract from gain table index,
- * to compensate for difference between power supply voltage during
- * factory measurements, vs. current power supply voltage.
- *
- * Voltage indication is higher for lower voltage.
- * Lower voltage requires more gain (lower gain table index).
- */
-static s32 iwl4965_get_voltage_compensation(s32 eeprom_voltage,
- s32 current_voltage)
-{
- s32 comp = 0;
-
- if ((TX_POWER_IWL_ILLEGAL_VOLTAGE == eeprom_voltage) ||
- (TX_POWER_IWL_ILLEGAL_VOLTAGE == current_voltage))
- return 0;
-
- iwl4965_math_div_round(current_voltage - eeprom_voltage,
- TX_POWER_IWL_VOLTAGE_CODES_PER_03V, &comp);
-
- if (current_voltage > eeprom_voltage)
- comp *= 2;
- if ((comp < -2) || (comp > 2))
- comp = 0;
-
- return comp;
-}
-
-static s32 iwl4965_get_tx_atten_grp(u16 channel)
-{
- if (channel >= CALIB_IWL_TX_ATTEN_GR5_FCH &&
- channel <= CALIB_IWL_TX_ATTEN_GR5_LCH)
- return CALIB_CH_GROUP_5;
-
- if (channel >= CALIB_IWL_TX_ATTEN_GR1_FCH &&
- channel <= CALIB_IWL_TX_ATTEN_GR1_LCH)
- return CALIB_CH_GROUP_1;
-
- if (channel >= CALIB_IWL_TX_ATTEN_GR2_FCH &&
- channel <= CALIB_IWL_TX_ATTEN_GR2_LCH)
- return CALIB_CH_GROUP_2;
-
- if (channel >= CALIB_IWL_TX_ATTEN_GR3_FCH &&
- channel <= CALIB_IWL_TX_ATTEN_GR3_LCH)
- return CALIB_CH_GROUP_3;
-
- if (channel >= CALIB_IWL_TX_ATTEN_GR4_FCH &&
- channel <= CALIB_IWL_TX_ATTEN_GR4_LCH)
- return CALIB_CH_GROUP_4;
-
- return -EINVAL;
-}
-
-static u32 iwl4965_get_sub_band(const struct iwl_priv *priv, u32 channel)
-{
- s32 b = -1;
-
- for (b = 0; b < EEPROM_TX_POWER_BANDS; b++) {
- if (priv->calib_info->band_info[b].ch_from == 0)
- continue;
-
- if ((channel >= priv->calib_info->band_info[b].ch_from)
- && (channel <= priv->calib_info->band_info[b].ch_to))
- break;
- }
-
- return b;
-}
-
-static s32 iwl4965_interpolate_value(s32 x, s32 x1, s32 y1, s32 x2, s32 y2)
-{
- s32 val;
-
- if (x2 == x1)
- return y1;
- else {
- iwl4965_math_div_round((x2 - x) * (y1 - y2), (x2 - x1), &val);
- return val + y2;
- }
-}
-
-/**
- * iwl4965_interpolate_chan - Interpolate factory measurements for one channel
- *
- * Interpolates factory measurements from the two sample channels within a
- * sub-band, to apply to channel of interest. Interpolation is proportional to
- * differences in channel frequencies, which is proportional to differences
- * in channel number.
- */
-static int iwl4965_interpolate_chan(struct iwl_priv *priv, u32 channel,
- struct iwl_eeprom_calib_ch_info *chan_info)
-{
- s32 s = -1;
- u32 c;
- u32 m;
- const struct iwl_eeprom_calib_measure *m1;
- const struct iwl_eeprom_calib_measure *m2;
- struct iwl_eeprom_calib_measure *omeas;
- u32 ch_i1;
- u32 ch_i2;
-
- s = iwl4965_get_sub_band(priv, channel);
- if (s >= EEPROM_TX_POWER_BANDS) {
- IWL_ERR(priv, "Tx Power can not find channel %d\n", channel);
- return -1;
- }
-
- ch_i1 = priv->calib_info->band_info[s].ch1.ch_num;
- ch_i2 = priv->calib_info->band_info[s].ch2.ch_num;
- chan_info->ch_num = (u8) channel;
-
- IWL_DEBUG_TXPOWER(priv, "channel %d subband %d factory cal ch %d & %d\n",
- channel, s, ch_i1, ch_i2);
-
- for (c = 0; c < EEPROM_TX_POWER_TX_CHAINS; c++) {
- for (m = 0; m < EEPROM_TX_POWER_MEASUREMENTS; m++) {
- m1 = &(priv->calib_info->band_info[s].ch1.
- measurements[c][m]);
- m2 = &(priv->calib_info->band_info[s].ch2.
- measurements[c][m]);
- omeas = &(chan_info->measurements[c][m]);
-
- omeas->actual_pow =
- (u8) iwl4965_interpolate_value(channel, ch_i1,
- m1->actual_pow,
- ch_i2,
- m2->actual_pow);
- omeas->gain_idx =
- (u8) iwl4965_interpolate_value(channel, ch_i1,
- m1->gain_idx, ch_i2,
- m2->gain_idx);
- omeas->temperature =
- (u8) iwl4965_interpolate_value(channel, ch_i1,
- m1->temperature,
- ch_i2,
- m2->temperature);
- omeas->pa_det =
- (s8) iwl4965_interpolate_value(channel, ch_i1,
- m1->pa_det, ch_i2,
- m2->pa_det);
-
- IWL_DEBUG_TXPOWER(priv,
- "chain %d meas %d AP1=%d AP2=%d AP=%d\n", c, m,
- m1->actual_pow, m2->actual_pow, omeas->actual_pow);
- IWL_DEBUG_TXPOWER(priv,
- "chain %d meas %d NI1=%d NI2=%d NI=%d\n", c, m,
- m1->gain_idx, m2->gain_idx, omeas->gain_idx);
- IWL_DEBUG_TXPOWER(priv,
- "chain %d meas %d PA1=%d PA2=%d PA=%d\n", c, m,
- m1->pa_det, m2->pa_det, omeas->pa_det);
- IWL_DEBUG_TXPOWER(priv,
- "chain %d meas %d T1=%d T2=%d T=%d\n", c, m,
- m1->temperature, m2->temperature,
- omeas->temperature);
- }
- }
-
- return 0;
-}
-
-/* bit-rate-dependent table to prevent Tx distortion, in half-dB units,
- * for OFDM 6, 12, 18, 24, 36, 48, 54, 60 MBit, and CCK all rates. */
-static s32 back_off_table[] = {
- 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 20 MHz */
- 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 20 MHz */
- 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 40 MHz */
- 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 40 MHz */
- 10 /* CCK */
-};
-
-/* Thermal compensation values for txpower for various frequency ranges ...
- * ratios from 3:1 to 4.5:1 of degrees (Celsius) per half-dB gain adjust */
-static struct iwl4965_txpower_comp_entry {
- s32 degrees_per_05db_a;
- s32 degrees_per_05db_a_denom;
-} tx_power_cmp_tble[CALIB_CH_GROUP_MAX] = {
- {9, 2}, /* group 0 5.2, ch 34-43 */
- {4, 1}, /* group 1 5.2, ch 44-70 */
- {4, 1}, /* group 2 5.2, ch 71-124 */
- {4, 1}, /* group 3 5.2, ch 125-200 */
- {3, 1} /* group 4 2.4, ch all */
-};
-
-static s32 get_min_power_index(s32 rate_power_index, u32 band)
-{
- if (!band) {
- if ((rate_power_index & 7) <= 4)
- return MIN_TX_GAIN_INDEX_52GHZ_EXT;
- }
- return MIN_TX_GAIN_INDEX;
-}
-
-struct gain_entry {
- u8 dsp;
- u8 radio;
-};
-
-static const struct gain_entry gain_table[2][108] = {
- /* 5.2GHz power gain index table */
- {
- {123, 0x3F}, /* highest txpower */
- {117, 0x3F},
- {110, 0x3F},
- {104, 0x3F},
- {98, 0x3F},
- {110, 0x3E},
- {104, 0x3E},
- {98, 0x3E},
- {110, 0x3D},
- {104, 0x3D},
- {98, 0x3D},
- {110, 0x3C},
- {104, 0x3C},
- {98, 0x3C},
- {110, 0x3B},
- {104, 0x3B},
- {98, 0x3B},
- {110, 0x3A},
- {104, 0x3A},
- {98, 0x3A},
- {110, 0x39},
- {104, 0x39},
- {98, 0x39},
- {110, 0x38},
- {104, 0x38},
- {98, 0x38},
- {110, 0x37},
- {104, 0x37},
- {98, 0x37},
- {110, 0x36},
- {104, 0x36},
- {98, 0x36},
- {110, 0x35},
- {104, 0x35},
- {98, 0x35},
- {110, 0x34},
- {104, 0x34},
- {98, 0x34},
- {110, 0x33},
- {104, 0x33},
- {98, 0x33},
- {110, 0x32},
- {104, 0x32},
- {98, 0x32},
- {110, 0x31},
- {104, 0x31},
- {98, 0x31},
- {110, 0x30},
- {104, 0x30},
- {98, 0x30},
- {110, 0x25},
- {104, 0x25},
- {98, 0x25},
- {110, 0x24},
- {104, 0x24},
- {98, 0x24},
- {110, 0x23},
- {104, 0x23},
- {98, 0x23},
- {110, 0x22},
- {104, 0x18},
- {98, 0x18},
- {110, 0x17},
- {104, 0x17},
- {98, 0x17},
- {110, 0x16},
- {104, 0x16},
- {98, 0x16},
- {110, 0x15},
- {104, 0x15},
- {98, 0x15},
- {110, 0x14},
- {104, 0x14},
- {98, 0x14},
- {110, 0x13},
- {104, 0x13},
- {98, 0x13},
- {110, 0x12},
- {104, 0x08},
- {98, 0x08},
- {110, 0x07},
- {104, 0x07},
- {98, 0x07},
- {110, 0x06},
- {104, 0x06},
- {98, 0x06},
- {110, 0x05},
- {104, 0x05},
- {98, 0x05},
- {110, 0x04},
- {104, 0x04},
- {98, 0x04},
- {110, 0x03},
- {104, 0x03},
- {98, 0x03},
- {110, 0x02},
- {104, 0x02},
- {98, 0x02},
- {110, 0x01},
- {104, 0x01},
- {98, 0x01},
- {110, 0x00},
- {104, 0x00},
- {98, 0x00},
- {93, 0x00},
- {88, 0x00},
- {83, 0x00},
- {78, 0x00},
- },
- /* 2.4GHz power gain index table */
- {
- {110, 0x3f}, /* highest txpower */
- {104, 0x3f},
- {98, 0x3f},
- {110, 0x3e},
- {104, 0x3e},
- {98, 0x3e},
- {110, 0x3d},
- {104, 0x3d},
- {98, 0x3d},
- {110, 0x3c},
- {104, 0x3c},
- {98, 0x3c},
- {110, 0x3b},
- {104, 0x3b},
- {98, 0x3b},
- {110, 0x3a},
- {104, 0x3a},
- {98, 0x3a},
- {110, 0x39},
- {104, 0x39},
- {98, 0x39},
- {110, 0x38},
- {104, 0x38},
- {98, 0x38},
- {110, 0x37},
- {104, 0x37},
- {98, 0x37},
- {110, 0x36},
- {104, 0x36},
- {98, 0x36},
- {110, 0x35},
- {104, 0x35},
- {98, 0x35},
- {110, 0x34},
- {104, 0x34},
- {98, 0x34},
- {110, 0x33},
- {104, 0x33},
- {98, 0x33},
- {110, 0x32},
- {104, 0x32},
- {98, 0x32},
- {110, 0x31},
- {104, 0x31},
- {98, 0x31},
- {110, 0x30},
- {104, 0x30},
- {98, 0x30},
- {110, 0x6},
- {104, 0x6},
- {98, 0x6},
- {110, 0x5},
- {104, 0x5},
- {98, 0x5},
- {110, 0x4},
- {104, 0x4},
- {98, 0x4},
- {110, 0x3},
- {104, 0x3},
- {98, 0x3},
- {110, 0x2},
- {104, 0x2},
- {98, 0x2},
- {110, 0x1},
- {104, 0x1},
- {98, 0x1},
- {110, 0x0},
- {104, 0x0},
- {98, 0x0},
- {97, 0},
- {96, 0},
- {95, 0},
- {94, 0},
- {93, 0},
- {92, 0},
- {91, 0},
- {90, 0},
- {89, 0},
- {88, 0},
- {87, 0},
- {86, 0},
- {85, 0},
- {84, 0},
- {83, 0},
- {82, 0},
- {81, 0},
- {80, 0},
- {79, 0},
- {78, 0},
- {77, 0},
- {76, 0},
- {75, 0},
- {74, 0},
- {73, 0},
- {72, 0},
- {71, 0},
- {70, 0},
- {69, 0},
- {68, 0},
- {67, 0},
- {66, 0},
- {65, 0},
- {64, 0},
- {63, 0},
- {62, 0},
- {61, 0},
- {60, 0},
- {59, 0},
- }
-};
-
-static int iwl4965_fill_txpower_tbl(struct iwl_priv *priv, u8 band, u16 channel,
- u8 is_ht40, u8 ctrl_chan_high,
- struct iwl4965_tx_power_db *tx_power_tbl)
-{
- u8 saturation_power;
- s32 target_power;
- s32 user_target_power;
- s32 power_limit;
- s32 current_temp;
- s32 reg_limit;
- s32 current_regulatory;
- s32 txatten_grp = CALIB_CH_GROUP_MAX;
- int i;
- int c;
- const struct iwl_channel_info *ch_info = NULL;
- struct iwl_eeprom_calib_ch_info ch_eeprom_info;
- const struct iwl_eeprom_calib_measure *measurement;
- s16 voltage;
- s32 init_voltage;
- s32 voltage_compensation;
- s32 degrees_per_05db_num;
- s32 degrees_per_05db_denom;
- s32 factory_temp;
- s32 temperature_comp[2];
- s32 factory_gain_index[2];
- s32 factory_actual_pwr[2];
- s32 power_index;
-
- /* tx_power_user_lmt is in dBm, convert to half-dBm (half-dB units
- * are used for indexing into txpower table) */
- user_target_power = 2 * priv->tx_power_user_lmt;
-
- /* Get current (RXON) channel, band, width */
- IWL_DEBUG_TXPOWER(priv, "chan %d band %d is_ht40 %d\n", channel, band,
- is_ht40);
-
- ch_info = iwl_legacy_get_channel_info(priv, priv->band, channel);
-
- if (!iwl_legacy_is_channel_valid(ch_info))
- return -EINVAL;
-
- /* get txatten group, used to select 1) thermal txpower adjustment
- * and 2) mimo txpower balance between Tx chains. */
- txatten_grp = iwl4965_get_tx_atten_grp(channel);
- if (txatten_grp < 0) {
- IWL_ERR(priv, "Can't find txatten group for channel %d.\n",
- channel);
- return txatten_grp;
- }
-
- IWL_DEBUG_TXPOWER(priv, "channel %d belongs to txatten group %d\n",
- channel, txatten_grp);
-
- if (is_ht40) {
- if (ctrl_chan_high)
- channel -= 2;
- else
- channel += 2;
- }
-
- /* hardware txpower limits ...
- * saturation (clipping distortion) txpowers are in half-dBm */
- if (band)
- saturation_power = priv->calib_info->saturation_power24;
- else
- saturation_power = priv->calib_info->saturation_power52;
-
- if (saturation_power < IWL_TX_POWER_SATURATION_MIN ||
- saturation_power > IWL_TX_POWER_SATURATION_MAX) {
- if (band)
- saturation_power = IWL_TX_POWER_DEFAULT_SATURATION_24;
- else
- saturation_power = IWL_TX_POWER_DEFAULT_SATURATION_52;
- }
-
- /* regulatory txpower limits ... reg_limit values are in half-dBm,
- * max_power_avg values are in dBm, convert * 2 */
- if (is_ht40)
- reg_limit = ch_info->ht40_max_power_avg * 2;
- else
- reg_limit = ch_info->max_power_avg * 2;
-
- if ((reg_limit < IWL_TX_POWER_REGULATORY_MIN) ||
- (reg_limit > IWL_TX_POWER_REGULATORY_MAX)) {
- if (band)
- reg_limit = IWL_TX_POWER_DEFAULT_REGULATORY_24;
- else
- reg_limit = IWL_TX_POWER_DEFAULT_REGULATORY_52;
- }
-
- /* Interpolate txpower calibration values for this channel,
- * based on factory calibration tests on spaced channels. */
- iwl4965_interpolate_chan(priv, channel, &ch_eeprom_info);
-
- /* calculate tx gain adjustment based on power supply voltage */
- voltage = le16_to_cpu(priv->calib_info->voltage);
- init_voltage = (s32)le32_to_cpu(priv->card_alive_init.voltage);
- voltage_compensation =
- iwl4965_get_voltage_compensation(voltage, init_voltage);
-
- IWL_DEBUG_TXPOWER(priv, "curr volt %d eeprom volt %d volt comp %d\n",
- init_voltage,
- voltage, voltage_compensation);
-
- /* get current temperature (Celsius) */
- current_temp = max(priv->temperature, IWL_TX_POWER_TEMPERATURE_MIN);
- current_temp = min(priv->temperature, IWL_TX_POWER_TEMPERATURE_MAX);
- current_temp = KELVIN_TO_CELSIUS(current_temp);
-
- /* select thermal txpower adjustment params, based on channel group
- * (same frequency group used for mimo txatten adjustment) */
- degrees_per_05db_num =
- tx_power_cmp_tble[txatten_grp].degrees_per_05db_a;
- degrees_per_05db_denom =
- tx_power_cmp_tble[txatten_grp].degrees_per_05db_a_denom;
-
- /* get per-chain txpower values from factory measurements */
- for (c = 0; c < 2; c++) {
- measurement = &ch_eeprom_info.measurements[c][1];
-
- /* txgain adjustment (in half-dB steps) based on difference
- * between factory and current temperature */
- factory_temp = measurement->temperature;
- iwl4965_math_div_round((current_temp - factory_temp) *
- degrees_per_05db_denom,
- degrees_per_05db_num,
- &temperature_comp[c]);
-
- factory_gain_index[c] = measurement->gain_idx;
- factory_actual_pwr[c] = measurement->actual_pow;
-
- IWL_DEBUG_TXPOWER(priv, "chain = %d\n", c);
- IWL_DEBUG_TXPOWER(priv, "fctry tmp %d, "
- "curr tmp %d, comp %d steps\n",
- factory_temp, current_temp,
- temperature_comp[c]);
-
- IWL_DEBUG_TXPOWER(priv, "fctry idx %d, fctry pwr %d\n",
- factory_gain_index[c],
- factory_actual_pwr[c]);
- }
-
- /* for each of 33 bit-rates (including 1 for CCK) */
- for (i = 0; i < POWER_TABLE_NUM_ENTRIES; i++) {
- u8 is_mimo_rate;
- union iwl4965_tx_power_dual_stream tx_power;
-
- /* for mimo, reduce each chain's txpower by half
- * (3dB, 6 steps), so total output power is regulatory
- * compliant. */
- if (i & 0x8) {
- current_regulatory = reg_limit -
- IWL_TX_POWER_MIMO_REGULATORY_COMPENSATION;
- is_mimo_rate = 1;
- } else {
- current_regulatory = reg_limit;
- is_mimo_rate = 0;
- }
-
- /* find txpower limit, either hardware or regulatory */
- power_limit = saturation_power - back_off_table[i];
- if (power_limit > current_regulatory)
- power_limit = current_regulatory;
-
- /* reduce user's txpower request if necessary
- * for this rate on this channel */
- target_power = user_target_power;
- if (target_power > power_limit)
- target_power = power_limit;
-
- IWL_DEBUG_TXPOWER(priv, "rate %d sat %d reg %d usr %d tgt %d\n",
- i, saturation_power - back_off_table[i],
- current_regulatory, user_target_power,
- target_power);
-
- /* for each of 2 Tx chains (radio transmitters) */
- for (c = 0; c < 2; c++) {
- s32 atten_value;
-
- if (is_mimo_rate)
- atten_value =
- (s32)le32_to_cpu(priv->card_alive_init.
- tx_atten[txatten_grp][c]);
- else
- atten_value = 0;
-
- /* calculate index; higher index means lower txpower */
- power_index = (u8) (factory_gain_index[c] -
- (target_power -
- factory_actual_pwr[c]) -
- temperature_comp[c] -
- voltage_compensation +
- atten_value);
-
-/* IWL_DEBUG_TXPOWER(priv, "calculated txpower index %d\n",
- power_index); */
-
- if (power_index < get_min_power_index(i, band))
- power_index = get_min_power_index(i, band);
-
- /* adjust 5 GHz index to support negative indexes */
- if (!band)
- power_index += 9;
-
- /* CCK, rate 32, reduce txpower for CCK */
- if (i == POWER_TABLE_CCK_ENTRY)
- power_index +=
- IWL_TX_POWER_CCK_COMPENSATION_C_STEP;
-
- /* stay within the table! */
- if (power_index > 107) {
- IWL_WARN(priv, "txpower index %d > 107\n",
- power_index);
- power_index = 107;
- }
- if (power_index < 0) {
- IWL_WARN(priv, "txpower index %d < 0\n",
- power_index);
- power_index = 0;
- }
-
- /* fill txpower command for this rate/chain */
- tx_power.s.radio_tx_gain[c] =
- gain_table[band][power_index].radio;
- tx_power.s.dsp_predis_atten[c] =
- gain_table[band][power_index].dsp;
-
- IWL_DEBUG_TXPOWER(priv, "chain %d mimo %d index %d "
- "gain 0x%02x dsp %d\n",
- c, atten_value, power_index,
- tx_power.s.radio_tx_gain[c],
- tx_power.s.dsp_predis_atten[c]);
- } /* for each chain */
-
- tx_power_tbl->power_tbl[i].dw = cpu_to_le32(tx_power.dw);
-
- } /* for each rate */
-
- return 0;
-}
-
-/**
- * iwl4965_send_tx_power - Configure the TXPOWER level user limit
- *
- * Uses the active RXON for channel, band, and characteristics (ht40, high)
- * The power limit is taken from priv->tx_power_user_lmt.
- */
-static int iwl4965_send_tx_power(struct iwl_priv *priv)
-{
- struct iwl4965_txpowertable_cmd cmd = { 0 };
- int ret;
- u8 band = 0;
- bool is_ht40 = false;
- u8 ctrl_chan_high = 0;
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
- if (WARN_ONCE(test_bit(STATUS_SCAN_HW, &priv->status),
- "TX Power requested while scanning!\n"))
- return -EAGAIN;
-
- band = priv->band == IEEE80211_BAND_2GHZ;
-
- is_ht40 = iw4965_is_ht40_channel(ctx->active.flags);
-
- if (is_ht40 && (ctx->active.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
- ctrl_chan_high = 1;
-
- cmd.band = band;
- cmd.channel = ctx->active.channel;
-
- ret = iwl4965_fill_txpower_tbl(priv, band,
- le16_to_cpu(ctx->active.channel),
- is_ht40, ctrl_chan_high, &cmd.tx_power);
- if (ret)
- goto out;
-
- ret = iwl_legacy_send_cmd_pdu(priv,
- REPLY_TX_PWR_TABLE_CMD, sizeof(cmd), &cmd);
-
-out:
- return ret;
-}
-
-static int iwl4965_send_rxon_assoc(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx)
-{
- int ret = 0;
- struct iwl4965_rxon_assoc_cmd rxon_assoc;
- const struct iwl_legacy_rxon_cmd *rxon1 = &ctx->staging;
- const struct iwl_legacy_rxon_cmd *rxon2 = &ctx->active;
-
- if ((rxon1->flags == rxon2->flags) &&
- (rxon1->filter_flags == rxon2->filter_flags) &&
- (rxon1->cck_basic_rates == rxon2->cck_basic_rates) &&
- (rxon1->ofdm_ht_single_stream_basic_rates ==
- rxon2->ofdm_ht_single_stream_basic_rates) &&
- (rxon1->ofdm_ht_dual_stream_basic_rates ==
- rxon2->ofdm_ht_dual_stream_basic_rates) &&
- (rxon1->rx_chain == rxon2->rx_chain) &&
- (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) {
- IWL_DEBUG_INFO(priv, "Using current RXON_ASSOC. Not resending.\n");
- return 0;
- }
-
- rxon_assoc.flags = ctx->staging.flags;
- rxon_assoc.filter_flags = ctx->staging.filter_flags;
- rxon_assoc.ofdm_basic_rates = ctx->staging.ofdm_basic_rates;
- rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates;
- rxon_assoc.reserved = 0;
- rxon_assoc.ofdm_ht_single_stream_basic_rates =
- ctx->staging.ofdm_ht_single_stream_basic_rates;
- rxon_assoc.ofdm_ht_dual_stream_basic_rates =
- ctx->staging.ofdm_ht_dual_stream_basic_rates;
- rxon_assoc.rx_chain_select_flags = ctx->staging.rx_chain;
-
- ret = iwl_legacy_send_cmd_pdu_async(priv, REPLY_RXON_ASSOC,
- sizeof(rxon_assoc), &rxon_assoc, NULL);
-
- return ret;
-}
-
-static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
-{
- /* cast away the const for active_rxon in this function */
- struct iwl_legacy_rxon_cmd *active_rxon = (void *)&ctx->active;
- int ret;
- bool new_assoc =
- !!(ctx->staging.filter_flags & RXON_FILTER_ASSOC_MSK);
-
- if (!iwl_legacy_is_alive(priv))
- return -EBUSY;
-
- if (!ctx->is_active)
- return 0;
-
- /* always get timestamp with Rx frame */
- ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK;
-
- ret = iwl_legacy_check_rxon_cmd(priv, ctx);
- if (ret) {
- IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n");
- return -EINVAL;
- }
-
- /*
- * receive commit_rxon request
- * abort any previous channel switch if still in process
- */
- if (test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status) &&
- (priv->switch_channel != ctx->staging.channel)) {
- IWL_DEBUG_11H(priv, "abort channel switch on %d\n",
- le16_to_cpu(priv->switch_channel));
- iwl_legacy_chswitch_done(priv, false);
- }
-
- /* If we don't need to send a full RXON, we can use
- * iwl_rxon_assoc_cmd which is used to reconfigure filter
- * and other flags for the current radio configuration. */
- if (!iwl_legacy_full_rxon_required(priv, ctx)) {
- ret = iwl_legacy_send_rxon_assoc(priv, ctx);
- if (ret) {
- IWL_ERR(priv, "Error setting RXON_ASSOC (%d)\n", ret);
- return ret;
- }
-
- memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
- iwl_legacy_print_rx_config_cmd(priv, ctx);
- /*
- * We do not commit tx power settings while channel changing,
- * do it now if tx power changed.
- */
- iwl_legacy_set_tx_power(priv, priv->tx_power_next, false);
- return 0;
- }
-
- /* If we are currently associated and the new config requires
- * an RXON_ASSOC and the new config wants the associated mask enabled,
- * we must clear the associated from the active configuration
- * before we apply the new config */
- if (iwl_legacy_is_associated_ctx(ctx) && new_assoc) {
- IWL_DEBUG_INFO(priv, "Toggling associated bit on current RXON\n");
- active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
-
- ret = iwl_legacy_send_cmd_pdu(priv, ctx->rxon_cmd,
- sizeof(struct iwl_legacy_rxon_cmd),
- active_rxon);
-
- /* If the mask clearing failed then we set
- * active_rxon back to what it was previously */
- if (ret) {
- active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK;
- IWL_ERR(priv, "Error clearing ASSOC_MSK (%d)\n", ret);
- return ret;
- }
- iwl_legacy_clear_ucode_stations(priv, ctx);
- iwl_legacy_restore_stations(priv, ctx);
- ret = iwl4965_restore_default_wep_keys(priv, ctx);
- if (ret) {
- IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret);
- return ret;
- }
- }
-
- IWL_DEBUG_INFO(priv, "Sending RXON\n"
- "* with%s RXON_FILTER_ASSOC_MSK\n"
- "* channel = %d\n"
- "* bssid = %pM\n",
- (new_assoc ? "" : "out"),
- le16_to_cpu(ctx->staging.channel),
- ctx->staging.bssid_addr);
-
- iwl_legacy_set_rxon_hwcrypto(priv, ctx,
- !priv->cfg->mod_params->sw_crypto);
-
- /* Apply the new configuration
- * RXON unassoc clears the station table in uCode so restoration of
- * stations is needed after it (the RXON command) completes
- */
- if (!new_assoc) {
- ret = iwl_legacy_send_cmd_pdu(priv, ctx->rxon_cmd,
- sizeof(struct iwl_legacy_rxon_cmd), &ctx->staging);
- if (ret) {
- IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
- return ret;
- }
- IWL_DEBUG_INFO(priv, "Return from !new_assoc RXON.\n");
- memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
- iwl_legacy_clear_ucode_stations(priv, ctx);
- iwl_legacy_restore_stations(priv, ctx);
- ret = iwl4965_restore_default_wep_keys(priv, ctx);
- if (ret) {
- IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret);
- return ret;
- }
- }
- if (new_assoc) {
- priv->start_calib = 0;
- /* Apply the new configuration
- * RXON assoc doesn't clear the station table in uCode,
- */
- ret = iwl_legacy_send_cmd_pdu(priv, ctx->rxon_cmd,
- sizeof(struct iwl_legacy_rxon_cmd), &ctx->staging);
- if (ret) {
- IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
- return ret;
- }
- memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
- }
- iwl_legacy_print_rx_config_cmd(priv, ctx);
-
- iwl4965_init_sensitivity(priv);
-
- /* If we issue a new RXON command which required a tune then we must
- * send a new TXPOWER command or we won't be able to Tx any frames */
- ret = iwl_legacy_set_tx_power(priv, priv->tx_power_next, true);
- if (ret) {
- IWL_ERR(priv, "Error sending TX power (%d)\n", ret);
- return ret;
- }
-
- return 0;
-}
-
-static int iwl4965_hw_channel_switch(struct iwl_priv *priv,
- struct ieee80211_channel_switch *ch_switch)
-{
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- int rc;
- u8 band = 0;
- bool is_ht40 = false;
- u8 ctrl_chan_high = 0;
- struct iwl4965_channel_switch_cmd cmd;
- const struct iwl_channel_info *ch_info;
- u32 switch_time_in_usec, ucode_switch_time;
- u16 ch;
- u32 tsf_low;
- u8 switch_count;
- u16 beacon_interval = le16_to_cpu(ctx->timing.beacon_interval);
- struct ieee80211_vif *vif = ctx->vif;
- band = priv->band == IEEE80211_BAND_2GHZ;
-
- is_ht40 = iw4965_is_ht40_channel(ctx->staging.flags);
-
- if (is_ht40 &&
- (ctx->staging.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
- ctrl_chan_high = 1;
-
- cmd.band = band;
- cmd.expect_beacon = 0;
- ch = ch_switch->channel->hw_value;
- cmd.channel = cpu_to_le16(ch);
- cmd.rxon_flags = ctx->staging.flags;
- cmd.rxon_filter_flags = ctx->staging.filter_flags;
- switch_count = ch_switch->count;
- tsf_low = ch_switch->timestamp & 0x0ffffffff;
- /*
- * calculate the ucode channel switch time
- * adding TSF as one of the factor for when to switch
- */
- if ((priv->ucode_beacon_time > tsf_low) && beacon_interval) {
- if (switch_count > ((priv->ucode_beacon_time - tsf_low) /
- beacon_interval)) {
- switch_count -= (priv->ucode_beacon_time -
- tsf_low) / beacon_interval;
- } else
- switch_count = 0;
- }
- if (switch_count <= 1)
- cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
- else {
- switch_time_in_usec =
- vif->bss_conf.beacon_int * switch_count * TIME_UNIT;
- ucode_switch_time = iwl_legacy_usecs_to_beacons(priv,
- switch_time_in_usec,
- beacon_interval);
- cmd.switch_time = iwl_legacy_add_beacon_time(priv,
- priv->ucode_beacon_time,
- ucode_switch_time,
- beacon_interval);
- }
- IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n",
- cmd.switch_time);
- ch_info = iwl_legacy_get_channel_info(priv, priv->band, ch);
- if (ch_info)
- cmd.expect_beacon = iwl_legacy_is_channel_radar(ch_info);
- else {
- IWL_ERR(priv, "invalid channel switch from %u to %u\n",
- ctx->active.channel, ch);
- return -EFAULT;
- }
-
- rc = iwl4965_fill_txpower_tbl(priv, band, ch, is_ht40,
- ctrl_chan_high, &cmd.tx_power);
- if (rc) {
- IWL_DEBUG_11H(priv, "error:%d fill txpower_tbl\n", rc);
- return rc;
- }
-
- return iwl_legacy_send_cmd_pdu(priv,
- REPLY_CHANNEL_SWITCH, sizeof(cmd), &cmd);
-}
-
-/**
- * iwl4965_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
- */
-static void iwl4965_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
- struct iwl_tx_queue *txq,
- u16 byte_cnt)
-{
- struct iwl4965_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr;
- int txq_id = txq->q.id;
- int write_ptr = txq->q.write_ptr;
- int len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
- __le16 bc_ent;
-
- WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);
-
- bc_ent = cpu_to_le16(len & 0xFFF);
- /* Set up byte count within first 256 entries */
- scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
-
- /* If within first 64 entries, duplicate at end */
- if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
- scd_bc_tbl[txq_id].
- tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
-}
-
-/**
- * iwl4965_hw_get_temperature - return the calibrated temperature (in Kelvin)
- * @statistics: Provides the temperature reading from the uCode
- *
- * A return of <0 indicates bogus data in the statistics
- */
-static int iwl4965_hw_get_temperature(struct iwl_priv *priv)
-{
- s32 temperature;
- s32 vt;
- s32 R1, R2, R3;
- u32 R4;
-
- if (test_bit(STATUS_TEMPERATURE, &priv->status) &&
- (priv->_4965.statistics.flag &
- STATISTICS_REPLY_FLG_HT40_MODE_MSK)) {
- IWL_DEBUG_TEMP(priv, "Running HT40 temperature calibration\n");
- R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[1]);
- R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[1]);
- R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[1]);
- R4 = le32_to_cpu(priv->card_alive_init.therm_r4[1]);
- } else {
- IWL_DEBUG_TEMP(priv, "Running temperature calibration\n");
- R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[0]);
- R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[0]);
- R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[0]);
- R4 = le32_to_cpu(priv->card_alive_init.therm_r4[0]);
- }
-
- /*
- * Temperature is only 23 bits, so sign extend out to 32.
- *
- * NOTE If we haven't received a statistics notification yet
- * with an updated temperature, use R4 provided to us in the
- * "initialize" ALIVE response.
- */
- if (!test_bit(STATUS_TEMPERATURE, &priv->status))
- vt = sign_extend32(R4, 23);
- else
- vt = sign_extend32(le32_to_cpu(priv->_4965.statistics.
- general.common.temperature), 23);
-
- IWL_DEBUG_TEMP(priv, "Calib values R[1-3]: %d %d %d R4: %d\n", R1, R2, R3, vt);
-
- if (R3 == R1) {
- IWL_ERR(priv, "Calibration conflict R1 == R3\n");
- return -1;
- }
-
- /* Calculate temperature in degrees Kelvin, adjust by 97%.
- * Add offset to center the adjustment around 0 degrees Centigrade. */
- temperature = TEMPERATURE_CALIB_A_VAL * (vt - R2);
- temperature /= (R3 - R1);
- temperature = (temperature * 97) / 100 + TEMPERATURE_CALIB_KELVIN_OFFSET;
-
- IWL_DEBUG_TEMP(priv, "Calibrated temperature: %dK, %dC\n",
- temperature, KELVIN_TO_CELSIUS(temperature));
-
- return temperature;
-}
-
-/* Adjust Txpower only if temperature variance is greater than threshold. */
-#define IWL_TEMPERATURE_THRESHOLD 3
-
-/**
- * iwl4965_is_temp_calib_needed - determines if new calibration is needed
- *
- * If the temperature changed has changed sufficiently, then a recalibration
- * is needed.
- *
- * Assumes caller will replace priv->last_temperature once calibration
- * executed.
- */
-static int iwl4965_is_temp_calib_needed(struct iwl_priv *priv)
-{
- int temp_diff;
-
- if (!test_bit(STATUS_STATISTICS, &priv->status)) {
- IWL_DEBUG_TEMP(priv, "Temperature not updated -- no statistics.\n");
- return 0;
- }
-
- temp_diff = priv->temperature - priv->last_temperature;
-
- /* get absolute value */
- if (temp_diff < 0) {
- IWL_DEBUG_POWER(priv, "Getting cooler, delta %d\n", temp_diff);
- temp_diff = -temp_diff;
- } else if (temp_diff == 0)
- IWL_DEBUG_POWER(priv, "Temperature unchanged\n");
- else
- IWL_DEBUG_POWER(priv, "Getting warmer, delta %d\n", temp_diff);
-
- if (temp_diff < IWL_TEMPERATURE_THRESHOLD) {
- IWL_DEBUG_POWER(priv, " => thermal txpower calib not needed\n");
- return 0;
- }
-
- IWL_DEBUG_POWER(priv, " => thermal txpower calib needed\n");
-
- return 1;
-}
-
-static void iwl4965_temperature_calib(struct iwl_priv *priv)
-{
- s32 temp;
-
- temp = iwl4965_hw_get_temperature(priv);
- if (IWL_TX_POWER_TEMPERATURE_OUT_OF_RANGE(temp))
- return;
-
- if (priv->temperature != temp) {
- if (priv->temperature)
- IWL_DEBUG_TEMP(priv, "Temperature changed "
- "from %dC to %dC\n",
- KELVIN_TO_CELSIUS(priv->temperature),
- KELVIN_TO_CELSIUS(temp));
- else
- IWL_DEBUG_TEMP(priv, "Temperature "
- "initialized to %dC\n",
- KELVIN_TO_CELSIUS(temp));
- }
-
- priv->temperature = temp;
- set_bit(STATUS_TEMPERATURE, &priv->status);
-
- if (!priv->disable_tx_power_cal &&
- unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
- iwl4965_is_temp_calib_needed(priv))
- queue_work(priv->workqueue, &priv->txpower_work);
-}
-
-static u16 iwl4965_get_hcmd_size(u8 cmd_id, u16 len)
-{
- switch (cmd_id) {
- case REPLY_RXON:
- return (u16) sizeof(struct iwl4965_rxon_cmd);
- default:
- return len;
- }
-}
-
-static u16 iwl4965_build_addsta_hcmd(const struct iwl_legacy_addsta_cmd *cmd,
- u8 *data)
-{
- struct iwl4965_addsta_cmd *addsta = (struct iwl4965_addsta_cmd *)data;
- addsta->mode = cmd->mode;
- memcpy(&addsta->sta, &cmd->sta, sizeof(struct sta_id_modify));
- memcpy(&addsta->key, &cmd->key, sizeof(struct iwl4965_keyinfo));
- addsta->station_flags = cmd->station_flags;
- addsta->station_flags_msk = cmd->station_flags_msk;
- addsta->tid_disable_tx = cmd->tid_disable_tx;
- addsta->add_immediate_ba_tid = cmd->add_immediate_ba_tid;
- addsta->remove_immediate_ba_tid = cmd->remove_immediate_ba_tid;
- addsta->add_immediate_ba_ssn = cmd->add_immediate_ba_ssn;
- addsta->sleep_tx_count = cmd->sleep_tx_count;
- addsta->reserved1 = cpu_to_le16(0);
- addsta->reserved2 = cpu_to_le16(0);
-
- return (u16)sizeof(struct iwl4965_addsta_cmd);
-}
-
-static inline u32 iwl4965_get_scd_ssn(struct iwl4965_tx_resp *tx_resp)
-{
- return le32_to_cpup(&tx_resp->u.status + tx_resp->frame_count) & MAX_SN;
-}
-
-/**
- * iwl4965_tx_status_reply_tx - Handle Tx response for frames in aggregation queue
- */
-static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv,
- struct iwl_ht_agg *agg,
- struct iwl4965_tx_resp *tx_resp,
- int txq_id, u16 start_idx)
-{
- u16 status;
- struct agg_tx_status *frame_status = tx_resp->u.agg_status;
- struct ieee80211_tx_info *info = NULL;
- struct ieee80211_hdr *hdr = NULL;
- u32 rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
- int i, sh, idx;
- u16 seq;
- if (agg->wait_for_ba)
- IWL_DEBUG_TX_REPLY(priv, "got tx response w/o block-ack\n");
-
- agg->frame_count = tx_resp->frame_count;
- agg->start_idx = start_idx;
- agg->rate_n_flags = rate_n_flags;
- agg->bitmap = 0;
-
- /* num frames attempted by Tx command */
- if (agg->frame_count == 1) {
- /* Only one frame was attempted; no block-ack will arrive */
- status = le16_to_cpu(frame_status[0].status);
- idx = start_idx;
-
- IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, StartIdx=%d idx=%d\n",
- agg->frame_count, agg->start_idx, idx);
-
- info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb);
- info->status.rates[0].count = tx_resp->failure_frame + 1;
- info->flags &= ~IEEE80211_TX_CTL_AMPDU;
- info->flags |= iwl4965_tx_status_to_mac80211(status);
- iwl4965_hwrate_to_tx_control(priv, rate_n_flags, info);
-
- IWL_DEBUG_TX_REPLY(priv, "1 Frame 0x%x failure :%d\n",
- status & 0xff, tx_resp->failure_frame);
- IWL_DEBUG_TX_REPLY(priv, "Rate Info rate_n_flags=%x\n", rate_n_flags);
-
- agg->wait_for_ba = 0;
- } else {
- /* Two or more frames were attempted; expect block-ack */
- u64 bitmap = 0;
- int start = agg->start_idx;
-
- /* Construct bit-map of pending frames within Tx window */
- for (i = 0; i < agg->frame_count; i++) {
- u16 sc;
- status = le16_to_cpu(frame_status[i].status);
- seq = le16_to_cpu(frame_status[i].sequence);
- idx = SEQ_TO_INDEX(seq);
- txq_id = SEQ_TO_QUEUE(seq);
-
- if (status & (AGG_TX_STATE_FEW_BYTES_MSK |
- AGG_TX_STATE_ABORT_MSK))
- continue;
-
- IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, txq_id=%d idx=%d\n",
- agg->frame_count, txq_id, idx);
-
- hdr = iwl_legacy_tx_queue_get_hdr(priv, txq_id, idx);
- if (!hdr) {
- IWL_ERR(priv,
- "BUG_ON idx doesn't point to valid skb"
- " idx=%d, txq_id=%d\n", idx, txq_id);
- return -1;
- }
-
- sc = le16_to_cpu(hdr->seq_ctrl);
- if (idx != (SEQ_TO_SN(sc) & 0xff)) {
- IWL_ERR(priv,
- "BUG_ON idx doesn't match seq control"
- " idx=%d, seq_idx=%d, seq=%d\n",
- idx, SEQ_TO_SN(sc), hdr->seq_ctrl);
- return -1;
- }
-
- IWL_DEBUG_TX_REPLY(priv, "AGG Frame i=%d idx %d seq=%d\n",
- i, idx, SEQ_TO_SN(sc));
-
- sh = idx - start;
- if (sh > 64) {
- sh = (start - idx) + 0xff;
- bitmap = bitmap << sh;
- sh = 0;
- start = idx;
- } else if (sh < -64)
- sh = 0xff - (start - idx);
- else if (sh < 0) {
- sh = start - idx;
- start = idx;
- bitmap = bitmap << sh;
- sh = 0;
- }
- bitmap |= 1ULL << sh;
- IWL_DEBUG_TX_REPLY(priv, "start=%d bitmap=0x%llx\n",
- start, (unsigned long long)bitmap);
- }
-
- agg->bitmap = bitmap;
- agg->start_idx = start;
- IWL_DEBUG_TX_REPLY(priv, "Frames %d start_idx=%d bitmap=0x%llx\n",
- agg->frame_count, agg->start_idx,
- (unsigned long long)agg->bitmap);
-
- if (bitmap)
- agg->wait_for_ba = 1;
- }
- return 0;
-}
-
-static u8 iwl4965_find_station(struct iwl_priv *priv, const u8 *addr)
-{
- int i;
- int start = 0;
- int ret = IWL_INVALID_STATION;
- unsigned long flags;
-
- if ((priv->iw_mode == NL80211_IFTYPE_ADHOC))
- start = IWL_STA_ID;
-
- if (is_broadcast_ether_addr(addr))
- return priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- for (i = start; i < priv->hw_params.max_stations; i++)
- if (priv->stations[i].used &&
- (!compare_ether_addr(priv->stations[i].sta.sta.addr,
- addr))) {
- ret = i;
- goto out;
- }
-
- IWL_DEBUG_ASSOC_LIMIT(priv, "can not find STA %pM total %d\n",
- addr, priv->num_stations);
-
- out:
- /*
- * It may be possible that more commands interacting with stations
- * arrive before we completed processing the adding of
- * station
- */
- if (ret != IWL_INVALID_STATION &&
- (!(priv->stations[ret].used & IWL_STA_UCODE_ACTIVE) ||
- ((priv->stations[ret].used & IWL_STA_UCODE_ACTIVE) &&
- (priv->stations[ret].used & IWL_STA_UCODE_INPROGRESS)))) {
- IWL_ERR(priv, "Requested station info for sta %d before ready.\n",
- ret);
- ret = IWL_INVALID_STATION;
- }
- spin_unlock_irqrestore(&priv->sta_lock, flags);
- return ret;
-}
-
-static int iwl4965_get_ra_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr)
-{
- if (priv->iw_mode == NL80211_IFTYPE_STATION) {
- return IWL_AP_ID;
- } else {
- u8 *da = ieee80211_get_DA(hdr);
- return iwl4965_find_station(priv, da);
- }
-}
-
-/**
- * iwl4965_rx_reply_tx - Handle standard (non-aggregation) Tx response
- */
-static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- u16 sequence = le16_to_cpu(pkt->hdr.sequence);
- int txq_id = SEQ_TO_QUEUE(sequence);
- int index = SEQ_TO_INDEX(sequence);
- struct iwl_tx_queue *txq = &priv->txq[txq_id];
- struct ieee80211_hdr *hdr;
- struct ieee80211_tx_info *info;
- struct iwl4965_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
- u32 status = le32_to_cpu(tx_resp->u.status);
- int uninitialized_var(tid);
- int sta_id;
- int freed;
- u8 *qc = NULL;
- unsigned long flags;
-
- if ((index >= txq->q.n_bd) || (iwl_legacy_queue_used(&txq->q, index) == 0)) {
- IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d "
- "is out of range [0-%d] %d %d\n", txq_id,
- index, txq->q.n_bd, txq->q.write_ptr,
- txq->q.read_ptr);
- return;
- }
-
- txq->time_stamp = jiffies;
- info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb);
- memset(&info->status, 0, sizeof(info->status));
-
- hdr = iwl_legacy_tx_queue_get_hdr(priv, txq_id, index);
- if (ieee80211_is_data_qos(hdr->frame_control)) {
- qc = ieee80211_get_qos_ctl(hdr);
- tid = qc[0] & 0xf;
- }
-
- sta_id = iwl4965_get_ra_sta_id(priv, hdr);
- if (txq->sched_retry && unlikely(sta_id == IWL_INVALID_STATION)) {
- IWL_ERR(priv, "Station not known\n");
- return;
- }
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- if (txq->sched_retry) {
- const u32 scd_ssn = iwl4965_get_scd_ssn(tx_resp);
- struct iwl_ht_agg *agg = NULL;
- WARN_ON(!qc);
-
- agg = &priv->stations[sta_id].tid[tid].agg;
-
- iwl4965_tx_status_reply_tx(priv, agg, tx_resp, txq_id, index);
-
- /* check if BAR is needed */
- if ((tx_resp->frame_count == 1) && !iwl4965_is_tx_success(status))
- info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
-
- if (txq->q.read_ptr != (scd_ssn & 0xff)) {
- index = iwl_legacy_queue_dec_wrap(scd_ssn & 0xff,
- txq->q.n_bd);
- IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim scd_ssn "
- "%d index %d\n", scd_ssn , index);
- freed = iwl4965_tx_queue_reclaim(priv, txq_id, index);
- if (qc)
- iwl4965_free_tfds_in_queue(priv, sta_id,
- tid, freed);
-
- if (priv->mac80211_registered &&
- (iwl_legacy_queue_space(&txq->q) > txq->q.low_mark)
- && (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
- iwl_legacy_wake_queue(priv, txq);
- }
- } else {
- info->status.rates[0].count = tx_resp->failure_frame + 1;
- info->flags |= iwl4965_tx_status_to_mac80211(status);
- iwl4965_hwrate_to_tx_control(priv,
- le32_to_cpu(tx_resp->rate_n_flags),
- info);
-
- IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x) "
- "rate_n_flags 0x%x retries %d\n",
- txq_id,
- iwl4965_get_tx_fail_reason(status), status,
- le32_to_cpu(tx_resp->rate_n_flags),
- tx_resp->failure_frame);
-
- freed = iwl4965_tx_queue_reclaim(priv, txq_id, index);
- if (qc && likely(sta_id != IWL_INVALID_STATION))
- iwl4965_free_tfds_in_queue(priv, sta_id, tid, freed);
- else if (sta_id == IWL_INVALID_STATION)
- IWL_DEBUG_TX_REPLY(priv, "Station not known\n");
-
- if (priv->mac80211_registered &&
- (iwl_legacy_queue_space(&txq->q) > txq->q.low_mark))
- iwl_legacy_wake_queue(priv, txq);
- }
- if (qc && likely(sta_id != IWL_INVALID_STATION))
- iwl4965_txq_check_empty(priv, sta_id, tid, txq_id);
-
- iwl4965_check_abort_status(priv, tx_resp->frame_count, status);
-
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-}
-
-static void iwl4965_rx_beacon_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl4965_beacon_notif *beacon = (void *)pkt->u.raw;
- u8 rate __maybe_unused =
- iwl4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
-
- IWL_DEBUG_RX(priv, "beacon status %#x, retries:%d ibssmgr:%d "
- "tsf:0x%.8x%.8x rate:%d\n",
- le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK,
- beacon->beacon_notify_hdr.failure_frame,
- le32_to_cpu(beacon->ibss_mgr_status),
- le32_to_cpu(beacon->high_tsf),
- le32_to_cpu(beacon->low_tsf), rate);
-
- priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
-}
-
-/* Set up 4965-specific Rx frame reply handlers */
-static void iwl4965_rx_handler_setup(struct iwl_priv *priv)
-{
- /* Legacy Rx frames */
- priv->rx_handlers[REPLY_RX] = iwl4965_rx_reply_rx;
- /* Tx response */
- priv->rx_handlers[REPLY_TX] = iwl4965_rx_reply_tx;
- priv->rx_handlers[BEACON_NOTIFICATION] = iwl4965_rx_beacon_notif;
-}
-
-static struct iwl_hcmd_ops iwl4965_hcmd = {
- .rxon_assoc = iwl4965_send_rxon_assoc,
- .commit_rxon = iwl4965_commit_rxon,
- .set_rxon_chain = iwl4965_set_rxon_chain,
-};
-
-static void iwl4965_post_scan(struct iwl_priv *priv)
-{
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
- /*
- * Since setting the RXON may have been deferred while
- * performing the scan, fire one off if needed
- */
- if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
- iwl_legacy_commit_rxon(priv, ctx);
-}
-
-static void iwl4965_post_associate(struct iwl_priv *priv)
-{
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- struct ieee80211_vif *vif = ctx->vif;
- struct ieee80211_conf *conf = NULL;
- int ret = 0;
-
- if (!vif || !priv->is_open)
- return;
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- return;
-
- iwl_legacy_scan_cancel_timeout(priv, 200);
-
- conf = iwl_legacy_ieee80211_get_hw_conf(priv->hw);
-
- ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
- iwl_legacy_commit_rxon(priv, ctx);
-
- ret = iwl_legacy_send_rxon_timing(priv, ctx);
- if (ret)
- IWL_WARN(priv, "RXON timing - "
- "Attempting to continue.\n");
-
- ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
-
- iwl_legacy_set_rxon_ht(priv, &priv->current_ht_config);
-
- if (priv->cfg->ops->hcmd->set_rxon_chain)
- priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
-
- ctx->staging.assoc_id = cpu_to_le16(vif->bss_conf.aid);
-
- IWL_DEBUG_ASSOC(priv, "assoc id %d beacon interval %d\n",
- vif->bss_conf.aid, vif->bss_conf.beacon_int);
-
- if (vif->bss_conf.use_short_preamble)
- ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
- else
- ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
-
- if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
- if (vif->bss_conf.use_short_slot)
- ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
- else
- ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
- }
-
- iwl_legacy_commit_rxon(priv, ctx);
-
- IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n",
- vif->bss_conf.aid, ctx->active.bssid_addr);
-
- switch (vif->type) {
- case NL80211_IFTYPE_STATION:
- break;
- case NL80211_IFTYPE_ADHOC:
- iwl4965_send_beacon_cmd(priv);
- break;
- default:
- IWL_ERR(priv, "%s Should not be called in %d mode\n",
- __func__, vif->type);
- break;
- }
-
- /* the chain noise calibration will enabled PM upon completion
- * If chain noise has already been run, then we need to enable
- * power management here */
- if (priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE)
- iwl_legacy_power_update_mode(priv, false);
-
- /* Enable Rx differential gain and sensitivity calibrations */
- iwl4965_chain_noise_reset(priv);
- priv->start_calib = 1;
-}
-
-static void iwl4965_config_ap(struct iwl_priv *priv)
-{
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- struct ieee80211_vif *vif = ctx->vif;
- int ret = 0;
-
- lockdep_assert_held(&priv->mutex);
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- return;
-
- /* The following should be done only at AP bring up */
- if (!iwl_legacy_is_associated_ctx(ctx)) {
-
- /* RXON - unassoc (to set timing command) */
- ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
- iwl_legacy_commit_rxon(priv, ctx);
-
- /* RXON Timing */
- ret = iwl_legacy_send_rxon_timing(priv, ctx);
- if (ret)
- IWL_WARN(priv, "RXON timing failed - "
- "Attempting to continue.\n");
-
- /* AP has all antennas */
- priv->chain_noise_data.active_chains =
- priv->hw_params.valid_rx_ant;
- iwl_legacy_set_rxon_ht(priv, &priv->current_ht_config);
- if (priv->cfg->ops->hcmd->set_rxon_chain)
- priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
-
- ctx->staging.assoc_id = 0;
-
- if (vif->bss_conf.use_short_preamble)
- ctx->staging.flags |=
- RXON_FLG_SHORT_PREAMBLE_MSK;
- else
- ctx->staging.flags &=
- ~RXON_FLG_SHORT_PREAMBLE_MSK;
-
- if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
- if (vif->bss_conf.use_short_slot)
- ctx->staging.flags |=
- RXON_FLG_SHORT_SLOT_MSK;
- else
- ctx->staging.flags &=
- ~RXON_FLG_SHORT_SLOT_MSK;
- }
- /* need to send beacon cmd before committing assoc RXON! */
- iwl4965_send_beacon_cmd(priv);
- /* restore RXON assoc */
- ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
- iwl_legacy_commit_rxon(priv, ctx);
- }
- iwl4965_send_beacon_cmd(priv);
-}
-
-static struct iwl_hcmd_utils_ops iwl4965_hcmd_utils = {
- .get_hcmd_size = iwl4965_get_hcmd_size,
- .build_addsta_hcmd = iwl4965_build_addsta_hcmd,
- .request_scan = iwl4965_request_scan,
- .post_scan = iwl4965_post_scan,
-};
-
-static struct iwl_lib_ops iwl4965_lib = {
- .set_hw_params = iwl4965_hw_set_hw_params,
- .txq_update_byte_cnt_tbl = iwl4965_txq_update_byte_cnt_tbl,
- .txq_attach_buf_to_tfd = iwl4965_hw_txq_attach_buf_to_tfd,
- .txq_free_tfd = iwl4965_hw_txq_free_tfd,
- .txq_init = iwl4965_hw_tx_queue_init,
- .rx_handler_setup = iwl4965_rx_handler_setup,
- .is_valid_rtc_data_addr = iwl4965_hw_valid_rtc_data_addr,
- .init_alive_start = iwl4965_init_alive_start,
- .load_ucode = iwl4965_load_bsm,
- .dump_nic_error_log = iwl4965_dump_nic_error_log,
- .dump_fh = iwl4965_dump_fh,
- .set_channel_switch = iwl4965_hw_channel_switch,
- .apm_ops = {
- .init = iwl_legacy_apm_init,
- .config = iwl4965_nic_config,
- },
- .eeprom_ops = {
- .regulatory_bands = {
- EEPROM_REGULATORY_BAND_1_CHANNELS,
- EEPROM_REGULATORY_BAND_2_CHANNELS,
- EEPROM_REGULATORY_BAND_3_CHANNELS,
- EEPROM_REGULATORY_BAND_4_CHANNELS,
- EEPROM_REGULATORY_BAND_5_CHANNELS,
- EEPROM_4965_REGULATORY_BAND_24_HT40_CHANNELS,
- EEPROM_4965_REGULATORY_BAND_52_HT40_CHANNELS
- },
- .acquire_semaphore = iwl4965_eeprom_acquire_semaphore,
- .release_semaphore = iwl4965_eeprom_release_semaphore,
- },
- .send_tx_power = iwl4965_send_tx_power,
- .update_chain_flags = iwl4965_update_chain_flags,
- .temp_ops = {
- .temperature = iwl4965_temperature_calib,
- },
- .debugfs_ops = {
- .rx_stats_read = iwl4965_ucode_rx_stats_read,
- .tx_stats_read = iwl4965_ucode_tx_stats_read,
- .general_stats_read = iwl4965_ucode_general_stats_read,
- },
-};
-
-static const struct iwl_legacy_ops iwl4965_legacy_ops = {
- .post_associate = iwl4965_post_associate,
- .config_ap = iwl4965_config_ap,
- .manage_ibss_station = iwl4965_manage_ibss_station,
- .update_bcast_stations = iwl4965_update_bcast_stations,
-};
-
-struct ieee80211_ops iwl4965_hw_ops = {
- .tx = iwl4965_mac_tx,
- .start = iwl4965_mac_start,
- .stop = iwl4965_mac_stop,
- .add_interface = iwl_legacy_mac_add_interface,
- .remove_interface = iwl_legacy_mac_remove_interface,
- .change_interface = iwl_legacy_mac_change_interface,
- .config = iwl_legacy_mac_config,
- .configure_filter = iwl4965_configure_filter,
- .set_key = iwl4965_mac_set_key,
- .update_tkip_key = iwl4965_mac_update_tkip_key,
- .conf_tx = iwl_legacy_mac_conf_tx,
- .reset_tsf = iwl_legacy_mac_reset_tsf,
- .bss_info_changed = iwl_legacy_mac_bss_info_changed,
- .ampdu_action = iwl4965_mac_ampdu_action,
- .hw_scan = iwl_legacy_mac_hw_scan,
- .sta_add = iwl4965_mac_sta_add,
- .sta_remove = iwl_legacy_mac_sta_remove,
- .channel_switch = iwl4965_mac_channel_switch,
- .tx_last_beacon = iwl_legacy_mac_tx_last_beacon,
-};
-
-static const struct iwl_ops iwl4965_ops = {
- .lib = &iwl4965_lib,
- .hcmd = &iwl4965_hcmd,
- .utils = &iwl4965_hcmd_utils,
- .led = &iwl4965_led_ops,
- .legacy = &iwl4965_legacy_ops,
- .ieee80211_ops = &iwl4965_hw_ops,
-};
-
-static struct iwl_base_params iwl4965_base_params = {
- .eeprom_size = IWL4965_EEPROM_IMG_SIZE,
- .num_of_queues = IWL49_NUM_QUEUES,
- .num_of_ampdu_queues = IWL49_NUM_AMPDU_QUEUES,
- .pll_cfg_val = 0,
- .set_l0s = true,
- .use_bsm = true,
- .led_compensation = 61,
- .chain_noise_num_beacons = IWL4965_CAL_NUM_BEACONS,
- .wd_timeout = IWL_DEF_WD_TIMEOUT,
- .temperature_kelvin = true,
- .ucode_tracing = true,
- .sensitivity_calib_by_driver = true,
- .chain_noise_calib_by_driver = true,
-};
-
-struct iwl_cfg iwl4965_cfg = {
- .name = "Intel(R) Wireless WiFi Link 4965AGN",
- .fw_name_pre = IWL4965_FW_PRE,
- .ucode_api_max = IWL4965_UCODE_API_MAX,
- .ucode_api_min = IWL4965_UCODE_API_MIN,
- .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
- .valid_tx_ant = ANT_AB,
- .valid_rx_ant = ANT_ABC,
- .eeprom_ver = EEPROM_4965_EEPROM_VERSION,
- .eeprom_calib_ver = EEPROM_4965_TX_POWER_VERSION,
- .ops = &iwl4965_ops,
- .mod_params = &iwl4965_mod_params,
- .base_params = &iwl4965_base_params,
- .led_mode = IWL_LED_BLINK,
- /*
- * Force use of chains B and C for scan RX on 5 GHz band
- * because the device has off-channel reception on chain A.
- */
- .scan_rx_antennas[IEEE80211_BAND_5GHZ] = ANT_BC,
-};
-
-/* Module firmware */
-MODULE_FIRMWARE(IWL4965_MODULE_FIRMWARE(IWL4965_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965.h b/drivers/net/wireless/iwlegacy/iwl-4965.h
deleted file mode 100644
index 01f8163daf1..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965.h
+++ /dev/null
@@ -1,282 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *****************************************************************************/
-
-#ifndef __iwl_4965_h__
-#define __iwl_4965_h__
-
-#include "iwl-dev.h"
-
-/* configuration for the _4965 devices */
-extern struct iwl_cfg iwl4965_cfg;
-
-extern struct iwl_mod_params iwl4965_mod_params;
-
-extern struct ieee80211_ops iwl4965_hw_ops;
-
-/* tx queue */
-void iwl4965_free_tfds_in_queue(struct iwl_priv *priv,
- int sta_id, int tid, int freed);
-
-/* RXON */
-void iwl4965_set_rxon_chain(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx);
-
-/* uCode */
-int iwl4965_verify_ucode(struct iwl_priv *priv);
-
-/* lib */
-void iwl4965_check_abort_status(struct iwl_priv *priv,
- u8 frame_count, u32 status);
-
-void iwl4965_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
-int iwl4965_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
-int iwl4965_hw_nic_init(struct iwl_priv *priv);
-int iwl4965_dump_fh(struct iwl_priv *priv, char **buf, bool display);
-
-/* rx */
-void iwl4965_rx_queue_restock(struct iwl_priv *priv);
-void iwl4965_rx_replenish(struct iwl_priv *priv);
-void iwl4965_rx_replenish_now(struct iwl_priv *priv);
-void iwl4965_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
-int iwl4965_rxq_stop(struct iwl_priv *priv);
-int iwl4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band);
-void iwl4965_rx_reply_rx(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb);
-void iwl4965_rx_reply_rx_phy(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb);
-void iwl4965_rx_handle(struct iwl_priv *priv);
-
-/* tx */
-void iwl4965_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq);
-int iwl4965_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
- struct iwl_tx_queue *txq,
- dma_addr_t addr, u16 len, u8 reset, u8 pad);
-int iwl4965_hw_tx_queue_init(struct iwl_priv *priv,
- struct iwl_tx_queue *txq);
-void iwl4965_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
- struct ieee80211_tx_info *info);
-int iwl4965_tx_skb(struct iwl_priv *priv, struct sk_buff *skb);
-int iwl4965_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, u16 tid, u16 *ssn);
-int iwl4965_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, u16 tid);
-int iwl4965_txq_check_empty(struct iwl_priv *priv,
- int sta_id, u8 tid, int txq_id);
-void iwl4965_rx_reply_compressed_ba(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb);
-int iwl4965_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index);
-void iwl4965_hw_txq_ctx_free(struct iwl_priv *priv);
-int iwl4965_txq_ctx_alloc(struct iwl_priv *priv);
-void iwl4965_txq_ctx_reset(struct iwl_priv *priv);
-void iwl4965_txq_ctx_stop(struct iwl_priv *priv);
-void iwl4965_txq_set_sched(struct iwl_priv *priv, u32 mask);
-
-/*
- * Acquire priv->lock before calling this function !
- */
-void iwl4965_set_wr_ptrs(struct iwl_priv *priv, int txq_id, u32 index);
-/**
- * iwl4965_tx_queue_set_status - (optionally) start Tx/Cmd queue
- * @tx_fifo_id: Tx DMA/FIFO channel (range 0-7) that the queue will feed
- * @scd_retry: (1) Indicates queue will be used in aggregation mode
- *
- * NOTE: Acquire priv->lock before calling this function !
- */
-void iwl4965_tx_queue_set_status(struct iwl_priv *priv,
- struct iwl_tx_queue *txq,
- int tx_fifo_id, int scd_retry);
-
-static inline u32 iwl4965_tx_status_to_mac80211(u32 status)
-{
- status &= TX_STATUS_MSK;
-
- switch (status) {
- case TX_STATUS_SUCCESS:
- case TX_STATUS_DIRECT_DONE:
- return IEEE80211_TX_STAT_ACK;
- case TX_STATUS_FAIL_DEST_PS:
- return IEEE80211_TX_STAT_TX_FILTERED;
- default:
- return 0;
- }
-}
-
-static inline bool iwl4965_is_tx_success(u32 status)
-{
- status &= TX_STATUS_MSK;
- return (status == TX_STATUS_SUCCESS) ||
- (status == TX_STATUS_DIRECT_DONE);
-}
-
-u8 iwl4965_toggle_tx_ant(struct iwl_priv *priv, u8 ant_idx, u8 valid);
-
-/* rx */
-void iwl4965_rx_missed_beacon_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb);
-bool iwl4965_good_plcp_health(struct iwl_priv *priv,
- struct iwl_rx_packet *pkt);
-void iwl4965_rx_statistics(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb);
-void iwl4965_reply_statistics(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb);
-
-/* scan */
-int iwl4965_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif);
-
-/* station mgmt */
-int iwl4965_manage_ibss_station(struct iwl_priv *priv,
- struct ieee80211_vif *vif, bool add);
-
-/* hcmd */
-int iwl4965_send_beacon_cmd(struct iwl_priv *priv);
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-const char *iwl4965_get_tx_fail_reason(u32 status);
-#else
-static inline const char *
-iwl4965_get_tx_fail_reason(u32 status) { return ""; }
-#endif
-
-/* station management */
-int iwl4965_alloc_bcast_station(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx);
-int iwl4965_add_bssid_station(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- const u8 *addr, u8 *sta_id_r);
-int iwl4965_remove_default_wep_key(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- struct ieee80211_key_conf *key);
-int iwl4965_set_default_wep_key(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- struct ieee80211_key_conf *key);
-int iwl4965_restore_default_wep_keys(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx);
-int iwl4965_set_dynamic_key(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- struct ieee80211_key_conf *key, u8 sta_id);
-int iwl4965_remove_dynamic_key(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- struct ieee80211_key_conf *key, u8 sta_id);
-void iwl4965_update_tkip_key(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- struct ieee80211_key_conf *keyconf,
- struct ieee80211_sta *sta, u32 iv32, u16 *phase1key);
-int iwl4965_sta_tx_modify_enable_tid(struct iwl_priv *priv,
- int sta_id, int tid);
-int iwl4965_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta,
- int tid, u16 ssn);
-int iwl4965_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
- int tid);
-void iwl4965_sta_modify_sleep_tx_count(struct iwl_priv *priv,
- int sta_id, int cnt);
-int iwl4965_update_bcast_stations(struct iwl_priv *priv);
-
-/* rate */
-static inline u32 iwl4965_ant_idx_to_flags(u8 ant_idx)
-{
- return BIT(ant_idx) << RATE_MCS_ANT_POS;
-}
-
-static inline u8 iwl4965_hw_get_rate(__le32 rate_n_flags)
-{
- return le32_to_cpu(rate_n_flags) & 0xFF;
-}
-
-static inline __le32 iwl4965_hw_set_rate_n_flags(u8 rate, u32 flags)
-{
- return cpu_to_le32(flags|(u32)rate);
-}
-
-/* eeprom */
-void iwl4965_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac);
-int iwl4965_eeprom_acquire_semaphore(struct iwl_priv *priv);
-void iwl4965_eeprom_release_semaphore(struct iwl_priv *priv);
-int iwl4965_eeprom_check_version(struct iwl_priv *priv);
-
-/* mac80211 handlers (for 4965) */
-void iwl4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
-int iwl4965_mac_start(struct ieee80211_hw *hw);
-void iwl4965_mac_stop(struct ieee80211_hw *hw);
-void iwl4965_configure_filter(struct ieee80211_hw *hw,
- unsigned int changed_flags,
- unsigned int *total_flags,
- u64 multicast);
-int iwl4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
- struct ieee80211_vif *vif, struct ieee80211_sta *sta,
- struct ieee80211_key_conf *key);
-void iwl4965_mac_update_tkip_key(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_key_conf *keyconf,
- struct ieee80211_sta *sta,
- u32 iv32, u16 *phase1key);
-int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid, u16 *ssn,
- u8 buf_size);
-int iwl4965_mac_sta_add(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta);
-void iwl4965_mac_channel_switch(struct ieee80211_hw *hw,
- struct ieee80211_channel_switch *ch_switch);
-
-#endif /* __iwl_4965_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-core.c b/drivers/net/wireless/iwlegacy/iwl-core.c
deleted file mode 100644
index 2bd5659310d..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-core.c
+++ /dev/null
@@ -1,2661 +0,0 @@
-/******************************************************************************
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *****************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/etherdevice.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <net/mac80211.h>
-
-#include "iwl-eeprom.h"
-#include "iwl-dev.h"
-#include "iwl-debug.h"
-#include "iwl-core.h"
-#include "iwl-io.h"
-#include "iwl-power.h"
-#include "iwl-sta.h"
-#include "iwl-helpers.h"
-
-
-MODULE_DESCRIPTION("iwl-legacy: common functions for 3945 and 4965");
-MODULE_VERSION(IWLWIFI_VERSION);
-MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
-MODULE_LICENSE("GPL");
-
-/*
- * set bt_coex_active to true, uCode will do kill/defer
- * every time the priority line is asserted (BT is sending signals on the
- * priority line in the PCIx).
- * set bt_coex_active to false, uCode will ignore the BT activity and
- * perform the normal operation
- *
- * User might experience transmit issue on some platform due to WiFi/BT
- * co-exist problem. The possible behaviors are:
- * Able to scan and finding all the available AP
- * Not able to associate with any AP
- * On those platforms, WiFi communication can be restored by set
- * "bt_coex_active" module parameter to "false"
- *
- * default: bt_coex_active = true (BT_COEX_ENABLE)
- */
-static bool bt_coex_active = true;
-module_param(bt_coex_active, bool, S_IRUGO);
-MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist");
-
-u32 iwlegacy_debug_level;
-EXPORT_SYMBOL(iwlegacy_debug_level);
-
-const u8 iwlegacy_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
-EXPORT_SYMBOL(iwlegacy_bcast_addr);
-
-
-/* This function both allocates and initializes hw and priv. */
-struct ieee80211_hw *iwl_legacy_alloc_all(struct iwl_cfg *cfg)
-{
- struct iwl_priv *priv;
- /* mac80211 allocates memory for this device instance, including
- * space for this driver's private structure */
- struct ieee80211_hw *hw;
-
- hw = ieee80211_alloc_hw(sizeof(struct iwl_priv),
- cfg->ops->ieee80211_ops);
- if (hw == NULL) {
- pr_err("%s: Can not allocate network device\n",
- cfg->name);
- goto out;
- }
-
- priv = hw->priv;
- priv->hw = hw;
-
-out:
- return hw;
-}
-EXPORT_SYMBOL(iwl_legacy_alloc_all);
-
-#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
-#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
-static void iwl_legacy_init_ht_hw_capab(const struct iwl_priv *priv,
- struct ieee80211_sta_ht_cap *ht_info,
- enum ieee80211_band band)
-{
- u16 max_bit_rate = 0;
- u8 rx_chains_num = priv->hw_params.rx_chains_num;
- u8 tx_chains_num = priv->hw_params.tx_chains_num;
-
- ht_info->cap = 0;
- memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
-
- ht_info->ht_supported = true;
-
- ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
- max_bit_rate = MAX_BIT_RATE_20_MHZ;
- if (priv->hw_params.ht40_channel & BIT(band)) {
- ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
- ht_info->cap |= IEEE80211_HT_CAP_SGI_40;
- ht_info->mcs.rx_mask[4] = 0x01;
- max_bit_rate = MAX_BIT_RATE_40_MHZ;
- }
-
- if (priv->cfg->mod_params->amsdu_size_8K)
- ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
-
- ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF;
- ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF;
-
- ht_info->mcs.rx_mask[0] = 0xFF;
- if (rx_chains_num >= 2)
- ht_info->mcs.rx_mask[1] = 0xFF;
- if (rx_chains_num >= 3)
- ht_info->mcs.rx_mask[2] = 0xFF;
-
- /* Highest supported Rx data rate */
- max_bit_rate *= rx_chains_num;
- WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK);
- ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate);
-
- /* Tx MCS capabilities */
- ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
- if (tx_chains_num != rx_chains_num) {
- ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
- ht_info->mcs.tx_params |= ((tx_chains_num - 1) <<
- IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
- }
-}
-
-/**
- * iwl_legacy_init_geos - Initialize mac80211's geo/channel info based from eeprom
- */
-int iwl_legacy_init_geos(struct iwl_priv *priv)
-{
- struct iwl_channel_info *ch;
- struct ieee80211_supported_band *sband;
- struct ieee80211_channel *channels;
- struct ieee80211_channel *geo_ch;
- struct ieee80211_rate *rates;
- int i = 0;
- s8 max_tx_power = 0;
-
- if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
- priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
- IWL_DEBUG_INFO(priv, "Geography modes already initialized.\n");
- set_bit(STATUS_GEO_CONFIGURED, &priv->status);
- return 0;
- }
-
- channels = kzalloc(sizeof(struct ieee80211_channel) *
- priv->channel_count, GFP_KERNEL);
- if (!channels)
- return -ENOMEM;
-
- rates = kzalloc((sizeof(struct ieee80211_rate) * IWL_RATE_COUNT_LEGACY),
- GFP_KERNEL);
- if (!rates) {
- kfree(channels);
- return -ENOMEM;
- }
-
- /* 5.2GHz channels start after the 2.4GHz channels */
- sband = &priv->bands[IEEE80211_BAND_5GHZ];
- sband->channels = &channels[ARRAY_SIZE(iwlegacy_eeprom_band_1)];
- /* just OFDM */
- sband->bitrates = &rates[IWL_FIRST_OFDM_RATE];
- sband->n_bitrates = IWL_RATE_COUNT_LEGACY - IWL_FIRST_OFDM_RATE;
-
- if (priv->cfg->sku & IWL_SKU_N)
- iwl_legacy_init_ht_hw_capab(priv, &sband->ht_cap,
- IEEE80211_BAND_5GHZ);
-
- sband = &priv->bands[IEEE80211_BAND_2GHZ];
- sband->channels = channels;
- /* OFDM & CCK */
- sband->bitrates = rates;
- sband->n_bitrates = IWL_RATE_COUNT_LEGACY;
-
- if (priv->cfg->sku & IWL_SKU_N)
- iwl_legacy_init_ht_hw_capab(priv, &sband->ht_cap,
- IEEE80211_BAND_2GHZ);
-
- priv->ieee_channels = channels;
- priv->ieee_rates = rates;
-
- for (i = 0; i < priv->channel_count; i++) {
- ch = &priv->channel_info[i];
-
- if (!iwl_legacy_is_channel_valid(ch))
- continue;
-
- sband = &priv->bands[ch->band];
-
- geo_ch = &sband->channels[sband->n_channels++];
-
- geo_ch->center_freq =
- ieee80211_channel_to_frequency(ch->channel, ch->band);
- geo_ch->max_power = ch->max_power_avg;
- geo_ch->max_antenna_gain = 0xff;
- geo_ch->hw_value = ch->channel;
-
- if (iwl_legacy_is_channel_valid(ch)) {
- if (!(ch->flags & EEPROM_CHANNEL_IBSS))
- geo_ch->flags |= IEEE80211_CHAN_NO_IBSS;
-
- if (!(ch->flags & EEPROM_CHANNEL_ACTIVE))
- geo_ch->flags |= IEEE80211_CHAN_PASSIVE_SCAN;
-
- if (ch->flags & EEPROM_CHANNEL_RADAR)
- geo_ch->flags |= IEEE80211_CHAN_RADAR;
-
- geo_ch->flags |= ch->ht40_extension_channel;
-
- if (ch->max_power_avg > max_tx_power)
- max_tx_power = ch->max_power_avg;
- } else {
- geo_ch->flags |= IEEE80211_CHAN_DISABLED;
- }
-
- IWL_DEBUG_INFO(priv, "Channel %d Freq=%d[%sGHz] %s flag=0x%X\n",
- ch->channel, geo_ch->center_freq,
- iwl_legacy_is_channel_a_band(ch) ? "5.2" : "2.4",
- geo_ch->flags & IEEE80211_CHAN_DISABLED ?
- "restricted" : "valid",
- geo_ch->flags);
- }
-
- priv->tx_power_device_lmt = max_tx_power;
- priv->tx_power_user_lmt = max_tx_power;
- priv->tx_power_next = max_tx_power;
-
- if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) &&
- priv->cfg->sku & IWL_SKU_A) {
- IWL_INFO(priv, "Incorrectly detected BG card as ABG. "
- "Please send your PCI ID 0x%04X:0x%04X to maintainer.\n",
- priv->pci_dev->device,
- priv->pci_dev->subsystem_device);
- priv->cfg->sku &= ~IWL_SKU_A;
- }
-
- IWL_INFO(priv, "Tunable channels: %d 802.11bg, %d 802.11a channels\n",
- priv->bands[IEEE80211_BAND_2GHZ].n_channels,
- priv->bands[IEEE80211_BAND_5GHZ].n_channels);
-
- set_bit(STATUS_GEO_CONFIGURED, &priv->status);
-
- return 0;
-}
-EXPORT_SYMBOL(iwl_legacy_init_geos);
-
-/*
- * iwl_legacy_free_geos - undo allocations in iwl_legacy_init_geos
- */
-void iwl_legacy_free_geos(struct iwl_priv *priv)
-{
- kfree(priv->ieee_channels);
- kfree(priv->ieee_rates);
- clear_bit(STATUS_GEO_CONFIGURED, &priv->status);
-}
-EXPORT_SYMBOL(iwl_legacy_free_geos);
-
-static bool iwl_legacy_is_channel_extension(struct iwl_priv *priv,
- enum ieee80211_band band,
- u16 channel, u8 extension_chan_offset)
-{
- const struct iwl_channel_info *ch_info;
-
- ch_info = iwl_legacy_get_channel_info(priv, band, channel);
- if (!iwl_legacy_is_channel_valid(ch_info))
- return false;
-
- if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_ABOVE)
- return !(ch_info->ht40_extension_channel &
- IEEE80211_CHAN_NO_HT40PLUS);
- else if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_BELOW)
- return !(ch_info->ht40_extension_channel &
- IEEE80211_CHAN_NO_HT40MINUS);
-
- return false;
-}
-
-bool iwl_legacy_is_ht40_tx_allowed(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- struct ieee80211_sta_ht_cap *ht_cap)
-{
- if (!ctx->ht.enabled || !ctx->ht.is_40mhz)
- return false;
-
- /*
- * We do not check for IEEE80211_HT_CAP_SUP_WIDTH_20_40
- * the bit will not set if it is pure 40MHz case
- */
- if (ht_cap && !ht_cap->ht_supported)
- return false;
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
- if (priv->disable_ht40)
- return false;
-#endif
-
- return iwl_legacy_is_channel_extension(priv, priv->band,
- le16_to_cpu(ctx->staging.channel),
- ctx->ht.extension_chan_offset);
-}
-EXPORT_SYMBOL(iwl_legacy_is_ht40_tx_allowed);
-
-static u16 iwl_legacy_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val)
-{
- u16 new_val;
- u16 beacon_factor;
-
- /*
- * If mac80211 hasn't given us a beacon interval, program
- * the default into the device.
- */
- if (!beacon_val)
- return DEFAULT_BEACON_INTERVAL;
-
- /*
- * If the beacon interval we obtained from the peer
- * is too large, we'll have to wake up more often
- * (and in IBSS case, we'll beacon too much)
- *
- * For example, if max_beacon_val is 4096, and the
- * requested beacon interval is 7000, we'll have to
- * use 3500 to be able to wake up on the beacons.
- *
- * This could badly influence beacon detection stats.
- */
-
- beacon_factor = (beacon_val + max_beacon_val) / max_beacon_val;
- new_val = beacon_val / beacon_factor;
-
- if (!new_val)
- new_val = max_beacon_val;
-
- return new_val;
-}
-
-int
-iwl_legacy_send_rxon_timing(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
-{
- u64 tsf;
- s32 interval_tm, rem;
- struct ieee80211_conf *conf = NULL;
- u16 beacon_int;
- struct ieee80211_vif *vif = ctx->vif;
-
- conf = iwl_legacy_ieee80211_get_hw_conf(priv->hw);
-
- lockdep_assert_held(&priv->mutex);
-
- memset(&ctx->timing, 0, sizeof(struct iwl_rxon_time_cmd));
-
- ctx->timing.timestamp = cpu_to_le64(priv->timestamp);
- ctx->timing.listen_interval = cpu_to_le16(conf->listen_interval);
-
- beacon_int = vif ? vif->bss_conf.beacon_int : 0;
-
- /*
- * TODO: For IBSS we need to get atim_window from mac80211,
- * for now just always use 0
- */
- ctx->timing.atim_window = 0;
-
- beacon_int = iwl_legacy_adjust_beacon_interval(beacon_int,
- priv->hw_params.max_beacon_itrvl * TIME_UNIT);
- ctx->timing.beacon_interval = cpu_to_le16(beacon_int);
-
- tsf = priv->timestamp; /* tsf is modifed by do_div: copy it */
- interval_tm = beacon_int * TIME_UNIT;
- rem = do_div(tsf, interval_tm);
- ctx->timing.beacon_init_val = cpu_to_le32(interval_tm - rem);
-
- ctx->timing.dtim_period = vif ? (vif->bss_conf.dtim_period ?: 1) : 1;
-
- IWL_DEBUG_ASSOC(priv,
- "beacon interval %d beacon timer %d beacon tim %d\n",
- le16_to_cpu(ctx->timing.beacon_interval),
- le32_to_cpu(ctx->timing.beacon_init_val),
- le16_to_cpu(ctx->timing.atim_window));
-
- return iwl_legacy_send_cmd_pdu(priv, ctx->rxon_timing_cmd,
- sizeof(ctx->timing), &ctx->timing);
-}
-EXPORT_SYMBOL(iwl_legacy_send_rxon_timing);
-
-void
-iwl_legacy_set_rxon_hwcrypto(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- int hw_decrypt)
-{
- struct iwl_legacy_rxon_cmd *rxon = &ctx->staging;
-
- if (hw_decrypt)
- rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK;
- else
- rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
-
-}
-EXPORT_SYMBOL(iwl_legacy_set_rxon_hwcrypto);
-
-/* validate RXON structure is valid */
-int
-iwl_legacy_check_rxon_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
-{
- struct iwl_legacy_rxon_cmd *rxon = &ctx->staging;
- bool error = false;
-
- if (rxon->flags & RXON_FLG_BAND_24G_MSK) {
- if (rxon->flags & RXON_FLG_TGJ_NARROW_BAND_MSK) {
- IWL_WARN(priv, "check 2.4G: wrong narrow\n");
- error = true;
- }
- if (rxon->flags & RXON_FLG_RADAR_DETECT_MSK) {
- IWL_WARN(priv, "check 2.4G: wrong radar\n");
- error = true;
- }
- } else {
- if (!(rxon->flags & RXON_FLG_SHORT_SLOT_MSK)) {
- IWL_WARN(priv, "check 5.2G: not short slot!\n");
- error = true;
- }
- if (rxon->flags & RXON_FLG_CCK_MSK) {
- IWL_WARN(priv, "check 5.2G: CCK!\n");
- error = true;
- }
- }
- if ((rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1) {
- IWL_WARN(priv, "mac/bssid mcast!\n");
- error = true;
- }
-
- /* make sure basic rates 6Mbps and 1Mbps are supported */
- if ((rxon->ofdm_basic_rates & IWL_RATE_6M_MASK) == 0 &&
- (rxon->cck_basic_rates & IWL_RATE_1M_MASK) == 0) {
- IWL_WARN(priv, "neither 1 nor 6 are basic\n");
- error = true;
- }
-
- if (le16_to_cpu(rxon->assoc_id) > 2007) {
- IWL_WARN(priv, "aid > 2007\n");
- error = true;
- }
-
- if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK))
- == (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) {
- IWL_WARN(priv, "CCK and short slot\n");
- error = true;
- }
-
- if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK))
- == (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) {
- IWL_WARN(priv, "CCK and auto detect");
- error = true;
- }
-
- if ((rxon->flags & (RXON_FLG_AUTO_DETECT_MSK |
- RXON_FLG_TGG_PROTECT_MSK)) ==
- RXON_FLG_TGG_PROTECT_MSK) {
- IWL_WARN(priv, "TGg but no auto-detect\n");
- error = true;
- }
-
- if (error)
- IWL_WARN(priv, "Tuning to channel %d\n",
- le16_to_cpu(rxon->channel));
-
- if (error) {
- IWL_ERR(priv, "Invalid RXON\n");
- return -EINVAL;
- }
- return 0;
-}
-EXPORT_SYMBOL(iwl_legacy_check_rxon_cmd);
-
-/**
- * iwl_legacy_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed
- * @priv: staging_rxon is compared to active_rxon
- *
- * If the RXON structure is changing enough to require a new tune,
- * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that
- * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required.
- */
-int iwl_legacy_full_rxon_required(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx)
-{
- const struct iwl_legacy_rxon_cmd *staging = &ctx->staging;
- const struct iwl_legacy_rxon_cmd *active = &ctx->active;
-
-#define CHK(cond) \
- if ((cond)) { \
- IWL_DEBUG_INFO(priv, "need full RXON - " #cond "\n"); \
- return 1; \
- }
-
-#define CHK_NEQ(c1, c2) \
- if ((c1) != (c2)) { \
- IWL_DEBUG_INFO(priv, "need full RXON - " \
- #c1 " != " #c2 " - %d != %d\n", \
- (c1), (c2)); \
- return 1; \
- }
-
- /* These items are only settable from the full RXON command */
- CHK(!iwl_legacy_is_associated_ctx(ctx));
- CHK(compare_ether_addr(staging->bssid_addr, active->bssid_addr));
- CHK(compare_ether_addr(staging->node_addr, active->node_addr));
- CHK(compare_ether_addr(staging->wlap_bssid_addr,
- active->wlap_bssid_addr));
- CHK_NEQ(staging->dev_type, active->dev_type);
- CHK_NEQ(staging->channel, active->channel);
- CHK_NEQ(staging->air_propagation, active->air_propagation);
- CHK_NEQ(staging->ofdm_ht_single_stream_basic_rates,
- active->ofdm_ht_single_stream_basic_rates);
- CHK_NEQ(staging->ofdm_ht_dual_stream_basic_rates,
- active->ofdm_ht_dual_stream_basic_rates);
- CHK_NEQ(staging->assoc_id, active->assoc_id);
-
- /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can
- * be updated with the RXON_ASSOC command -- however only some
- * flag transitions are allowed using RXON_ASSOC */
-
- /* Check if we are not switching bands */
- CHK_NEQ(staging->flags & RXON_FLG_BAND_24G_MSK,
- active->flags & RXON_FLG_BAND_24G_MSK);
-
- /* Check if we are switching association toggle */
- CHK_NEQ(staging->filter_flags & RXON_FILTER_ASSOC_MSK,
- active->filter_flags & RXON_FILTER_ASSOC_MSK);
-
-#undef CHK
-#undef CHK_NEQ
-
- return 0;
-}
-EXPORT_SYMBOL(iwl_legacy_full_rxon_required);
-
-u8 iwl_legacy_get_lowest_plcp(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx)
-{
- /*
- * Assign the lowest rate -- should really get this from
- * the beacon skb from mac80211.
- */
- if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK)
- return IWL_RATE_1M_PLCP;
- else
- return IWL_RATE_6M_PLCP;
-}
-EXPORT_SYMBOL(iwl_legacy_get_lowest_plcp);
-
-static void _iwl_legacy_set_rxon_ht(struct iwl_priv *priv,
- struct iwl_ht_config *ht_conf,
- struct iwl_rxon_context *ctx)
-{
- struct iwl_legacy_rxon_cmd *rxon = &ctx->staging;
-
- if (!ctx->ht.enabled) {
- rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
- RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK |
- RXON_FLG_HT40_PROT_MSK |
- RXON_FLG_HT_PROT_MSK);
- return;
- }
-
- rxon->flags |= cpu_to_le32(ctx->ht.protection <<
- RXON_FLG_HT_OPERATING_MODE_POS);
-
- /* Set up channel bandwidth:
- * 20 MHz only, 20/40 mixed or pure 40 if ht40 ok */
- /* clear the HT channel mode before set the mode */
- rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
- RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
- if (iwl_legacy_is_ht40_tx_allowed(priv, ctx, NULL)) {
- /* pure ht40 */
- if (ctx->ht.protection ==
- IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) {
- rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40;
- /* Note: control channel is opposite of extension channel */
- switch (ctx->ht.extension_chan_offset) {
- case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
- rxon->flags &=
- ~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
- break;
- case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
- rxon->flags |=
- RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
- break;
- }
- } else {
- /* Note: control channel is opposite of extension channel */
- switch (ctx->ht.extension_chan_offset) {
- case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
- rxon->flags &=
- ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
- rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
- break;
- case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
- rxon->flags |=
- RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
- rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
- break;
- case IEEE80211_HT_PARAM_CHA_SEC_NONE:
- default:
- /* channel location only valid if in Mixed mode */
- IWL_ERR(priv,
- "invalid extension channel offset\n");
- break;
- }
- }
- } else {
- rxon->flags |= RXON_FLG_CHANNEL_MODE_LEGACY;
- }
-
- if (priv->cfg->ops->hcmd->set_rxon_chain)
- priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
-
- IWL_DEBUG_ASSOC(priv, "rxon flags 0x%X operation mode :0x%X "
- "extension channel offset 0x%x\n",
- le32_to_cpu(rxon->flags), ctx->ht.protection,
- ctx->ht.extension_chan_offset);
-}
-
-void iwl_legacy_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf)
-{
- struct iwl_rxon_context *ctx;
-
- for_each_context(priv, ctx)
- _iwl_legacy_set_rxon_ht(priv, ht_conf, ctx);
-}
-EXPORT_SYMBOL(iwl_legacy_set_rxon_ht);
-
-/* Return valid, unused, channel for a passive scan to reset the RF */
-u8 iwl_legacy_get_single_channel_number(struct iwl_priv *priv,
- enum ieee80211_band band)
-{
- const struct iwl_channel_info *ch_info;
- int i;
- u8 channel = 0;
- u8 min, max;
- struct iwl_rxon_context *ctx;
-
- if (band == IEEE80211_BAND_5GHZ) {
- min = 14;
- max = priv->channel_count;
- } else {
- min = 0;
- max = 14;
- }
-
- for (i = min; i < max; i++) {
- bool busy = false;
-
- for_each_context(priv, ctx) {
- busy = priv->channel_info[i].channel ==
- le16_to_cpu(ctx->staging.channel);
- if (busy)
- break;
- }
-
- if (busy)
- continue;
-
- channel = priv->channel_info[i].channel;
- ch_info = iwl_legacy_get_channel_info(priv, band, channel);
- if (iwl_legacy_is_channel_valid(ch_info))
- break;
- }
-
- return channel;
-}
-EXPORT_SYMBOL(iwl_legacy_get_single_channel_number);
-
-/**
- * iwl_legacy_set_rxon_channel - Set the band and channel values in staging RXON
- * @ch: requested channel as a pointer to struct ieee80211_channel
-
- * NOTE: Does not commit to the hardware; it sets appropriate bit fields
- * in the staging RXON flag structure based on the ch->band
- */
-int
-iwl_legacy_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch,
- struct iwl_rxon_context *ctx)
-{
- enum ieee80211_band band = ch->band;
- u16 channel = ch->hw_value;
-
- if ((le16_to_cpu(ctx->staging.channel) == channel) &&
- (priv->band == band))
- return 0;
-
- ctx->staging.channel = cpu_to_le16(channel);
- if (band == IEEE80211_BAND_5GHZ)
- ctx->staging.flags &= ~RXON_FLG_BAND_24G_MSK;
- else
- ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
-
- priv->band = band;
-
- IWL_DEBUG_INFO(priv, "Staging channel set to %d [%d]\n", channel, band);
-
- return 0;
-}
-EXPORT_SYMBOL(iwl_legacy_set_rxon_channel);
-
-void iwl_legacy_set_flags_for_band(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- enum ieee80211_band band,
- struct ieee80211_vif *vif)
-{
- if (band == IEEE80211_BAND_5GHZ) {
- ctx->staging.flags &=
- ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK
- | RXON_FLG_CCK_MSK);
- ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
- } else {
- /* Copied from iwl_post_associate() */
- if (vif && vif->bss_conf.use_short_slot)
- ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
- else
- ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
-
- ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
- ctx->staging.flags |= RXON_FLG_AUTO_DETECT_MSK;
- ctx->staging.flags &= ~RXON_FLG_CCK_MSK;
- }
-}
-EXPORT_SYMBOL(iwl_legacy_set_flags_for_band);
-
-/*
- * initialize rxon structure with default values from eeprom
- */
-void iwl_legacy_connection_init_rx_config(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx)
-{
- const struct iwl_channel_info *ch_info;
-
- memset(&ctx->staging, 0, sizeof(ctx->staging));
-
- if (!ctx->vif) {
- ctx->staging.dev_type = ctx->unused_devtype;
- } else
- switch (ctx->vif->type) {
-
- case NL80211_IFTYPE_STATION:
- ctx->staging.dev_type = ctx->station_devtype;
- ctx->staging.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
- break;
-
- case NL80211_IFTYPE_ADHOC:
- ctx->staging.dev_type = ctx->ibss_devtype;
- ctx->staging.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
- ctx->staging.filter_flags = RXON_FILTER_BCON_AWARE_MSK |
- RXON_FILTER_ACCEPT_GRP_MSK;
- break;
-
- default:
- IWL_ERR(priv, "Unsupported interface type %d\n",
- ctx->vif->type);
- break;
- }
-
-#if 0
- /* TODO: Figure out when short_preamble would be set and cache from
- * that */
- if (!hw_to_local(priv->hw)->short_preamble)
- ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
- else
- ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
-#endif
-
- ch_info = iwl_legacy_get_channel_info(priv, priv->band,
- le16_to_cpu(ctx->active.channel));
-
- if (!ch_info)
- ch_info = &priv->channel_info[0];
-
- ctx->staging.channel = cpu_to_le16(ch_info->channel);
- priv->band = ch_info->band;
-
- iwl_legacy_set_flags_for_band(priv, ctx, priv->band, ctx->vif);
-
- ctx->staging.ofdm_basic_rates =
- (IWL_OFDM_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
- ctx->staging.cck_basic_rates =
- (IWL_CCK_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
-
- /* clear both MIX and PURE40 mode flag */
- ctx->staging.flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED |
- RXON_FLG_CHANNEL_MODE_PURE_40);
- if (ctx->vif)
- memcpy(ctx->staging.node_addr, ctx->vif->addr, ETH_ALEN);
-
- ctx->staging.ofdm_ht_single_stream_basic_rates = 0xff;
- ctx->staging.ofdm_ht_dual_stream_basic_rates = 0xff;
-}
-EXPORT_SYMBOL(iwl_legacy_connection_init_rx_config);
-
-void iwl_legacy_set_rate(struct iwl_priv *priv)
-{
- const struct ieee80211_supported_band *hw = NULL;
- struct ieee80211_rate *rate;
- struct iwl_rxon_context *ctx;
- int i;
-
- hw = iwl_get_hw_mode(priv, priv->band);
- if (!hw) {
- IWL_ERR(priv, "Failed to set rate: unable to get hw mode\n");
- return;
- }
-
- priv->active_rate = 0;
-
- for (i = 0; i < hw->n_bitrates; i++) {
- rate = &(hw->bitrates[i]);
- if (rate->hw_value < IWL_RATE_COUNT_LEGACY)
- priv->active_rate |= (1 << rate->hw_value);
- }
-
- IWL_DEBUG_RATE(priv, "Set active_rate = %0x\n", priv->active_rate);
-
- for_each_context(priv, ctx) {
- ctx->staging.cck_basic_rates =
- (IWL_CCK_BASIC_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
-
- ctx->staging.ofdm_basic_rates =
- (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
- }
-}
-EXPORT_SYMBOL(iwl_legacy_set_rate);
-
-void iwl_legacy_chswitch_done(struct iwl_priv *priv, bool is_success)
-{
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- return;
-
- if (test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
- ieee80211_chswitch_done(ctx->vif, is_success);
-}
-EXPORT_SYMBOL(iwl_legacy_chswitch_done);
-
-void iwl_legacy_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_csa_notification *csa = &(pkt->u.csa_notif);
-
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- struct iwl_legacy_rxon_cmd *rxon = (void *)&ctx->active;
-
- if (!test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
- return;
-
- if (!le32_to_cpu(csa->status) && csa->channel == priv->switch_channel) {
- rxon->channel = csa->channel;
- ctx->staging.channel = csa->channel;
- IWL_DEBUG_11H(priv, "CSA notif: channel %d\n",
- le16_to_cpu(csa->channel));
- iwl_legacy_chswitch_done(priv, true);
- } else {
- IWL_ERR(priv, "CSA notif (fail) : channel %d\n",
- le16_to_cpu(csa->channel));
- iwl_legacy_chswitch_done(priv, false);
- }
-}
-EXPORT_SYMBOL(iwl_legacy_rx_csa);
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-void iwl_legacy_print_rx_config_cmd(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx)
-{
- struct iwl_legacy_rxon_cmd *rxon = &ctx->staging;
-
- IWL_DEBUG_RADIO(priv, "RX CONFIG:\n");
- iwl_print_hex_dump(priv, IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon));
- IWL_DEBUG_RADIO(priv, "u16 channel: 0x%x\n",
- le16_to_cpu(rxon->channel));
- IWL_DEBUG_RADIO(priv, "u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags));
- IWL_DEBUG_RADIO(priv, "u32 filter_flags: 0x%08x\n",
- le32_to_cpu(rxon->filter_flags));
- IWL_DEBUG_RADIO(priv, "u8 dev_type: 0x%x\n", rxon->dev_type);
- IWL_DEBUG_RADIO(priv, "u8 ofdm_basic_rates: 0x%02x\n",
- rxon->ofdm_basic_rates);
- IWL_DEBUG_RADIO(priv, "u8 cck_basic_rates: 0x%02x\n",
- rxon->cck_basic_rates);
- IWL_DEBUG_RADIO(priv, "u8[6] node_addr: %pM\n", rxon->node_addr);
- IWL_DEBUG_RADIO(priv, "u8[6] bssid_addr: %pM\n", rxon->bssid_addr);
- IWL_DEBUG_RADIO(priv, "u16 assoc_id: 0x%x\n",
- le16_to_cpu(rxon->assoc_id));
-}
-EXPORT_SYMBOL(iwl_legacy_print_rx_config_cmd);
-#endif
-/**
- * iwl_legacy_irq_handle_error - called for HW or SW error interrupt from card
- */
-void iwl_legacy_irq_handle_error(struct iwl_priv *priv)
-{
- /* Set the FW error flag -- cleared on iwl_down */
- set_bit(STATUS_FW_ERROR, &priv->status);
-
- /* Cancel currently queued command. */
- clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
-
- IWL_ERR(priv, "Loaded firmware version: %s\n",
- priv->hw->wiphy->fw_version);
-
- priv->cfg->ops->lib->dump_nic_error_log(priv);
- if (priv->cfg->ops->lib->dump_fh)
- priv->cfg->ops->lib->dump_fh(priv, NULL, false);
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- if (iwl_legacy_get_debug_level(priv) & IWL_DL_FW_ERRORS)
- iwl_legacy_print_rx_config_cmd(priv,
- &priv->contexts[IWL_RXON_CTX_BSS]);
-#endif
-
- wake_up(&priv->wait_command_queue);
-
- /* Keep the restart process from trying to send host
- * commands by clearing the INIT status bit */
- clear_bit(STATUS_READY, &priv->status);
-
- if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) {
- IWL_DEBUG(priv, IWL_DL_FW_ERRORS,
- "Restarting adapter due to uCode error.\n");
-
- if (priv->cfg->mod_params->restart_fw)
- queue_work(priv->workqueue, &priv->restart);
- }
-}
-EXPORT_SYMBOL(iwl_legacy_irq_handle_error);
-
-static int iwl_legacy_apm_stop_master(struct iwl_priv *priv)
-{
- int ret = 0;
-
- /* stop device's busmaster DMA activity */
- iwl_legacy_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
-
- ret = iwl_poll_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_MASTER_DISABLED,
- CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
- if (ret)
- IWL_WARN(priv, "Master Disable Timed Out, 100 usec\n");
-
- IWL_DEBUG_INFO(priv, "stop master\n");
-
- return ret;
-}
-
-void iwl_legacy_apm_stop(struct iwl_priv *priv)
-{
- IWL_DEBUG_INFO(priv, "Stop card, put in low power state\n");
-
- /* Stop device's DMA activity */
- iwl_legacy_apm_stop_master(priv);
-
- /* Reset the entire device */
- iwl_legacy_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
-
- udelay(10);
-
- /*
- * Clear "initialization complete" bit to move adapter from
- * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
- */
- iwl_legacy_clear_bit(priv, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
-}
-EXPORT_SYMBOL(iwl_legacy_apm_stop);
-
-
-/*
- * Start up NIC's basic functionality after it has been reset
- * (e.g. after platform boot, or shutdown via iwl_legacy_apm_stop())
- * NOTE: This does not load uCode nor start the embedded processor
- */
-int iwl_legacy_apm_init(struct iwl_priv *priv)
-{
- int ret = 0;
- u16 lctl;
-
- IWL_DEBUG_INFO(priv, "Init card's basic functions\n");
-
- /*
- * Use "set_bit" below rather than "write", to preserve any hardware
- * bits already set by default after reset.
- */
-
- /* Disable L0S exit timer (platform NMI Work/Around) */
- iwl_legacy_set_bit(priv, CSR_GIO_CHICKEN_BITS,
- CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
-
- /*
- * Disable L0s without affecting L1;
- * don't wait for ICH L0s (ICH bug W/A)
- */
- iwl_legacy_set_bit(priv, CSR_GIO_CHICKEN_BITS,
- CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
-
- /* Set FH wait threshold to maximum (HW error during stress W/A) */
- iwl_legacy_set_bit(priv, CSR_DBG_HPET_MEM_REG,
- CSR_DBG_HPET_MEM_REG_VAL);
-
- /*
- * Enable HAP INTA (interrupt from management bus) to
- * wake device's PCI Express link L1a -> L0s
- * NOTE: This is no-op for 3945 (non-existent bit)
- */
- iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
-
- /*
- * HW bug W/A for instability in PCIe bus L0->L0S->L1 transition.
- * Check if BIOS (or OS) enabled L1-ASPM on this device.
- * If so (likely), disable L0S, so device moves directly L0->L1;
- * costs negligible amount of power savings.
- * If not (unlikely), enable L0S, so there is at least some
- * power savings, even without L1.
- */
- if (priv->cfg->base_params->set_l0s) {
- lctl = iwl_legacy_pcie_link_ctl(priv);
- if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) ==
- PCI_CFG_LINK_CTRL_VAL_L1_EN) {
- /* L1-ASPM enabled; disable(!) L0S */
- iwl_legacy_set_bit(priv, CSR_GIO_REG,
- CSR_GIO_REG_VAL_L0S_ENABLED);
- IWL_DEBUG_POWER(priv, "L1 Enabled; Disabling L0S\n");
- } else {
- /* L1-ASPM disabled; enable(!) L0S */
- iwl_legacy_clear_bit(priv, CSR_GIO_REG,
- CSR_GIO_REG_VAL_L0S_ENABLED);
- IWL_DEBUG_POWER(priv, "L1 Disabled; Enabling L0S\n");
- }
- }
-
- /* Configure analog phase-lock-loop before activating to D0A */
- if (priv->cfg->base_params->pll_cfg_val)
- iwl_legacy_set_bit(priv, CSR_ANA_PLL_CFG,
- priv->cfg->base_params->pll_cfg_val);
-
- /*
- * Set "initialization complete" bit to move adapter from
- * D0U* --> D0A* (powered-up active) state.
- */
- iwl_legacy_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
-
- /*
- * Wait for clock stabilization; once stabilized, access to
- * device-internal resources is supported, e.g. iwl_legacy_write_prph()
- * and accesses to uCode SRAM.
- */
- ret = iwl_poll_bit(priv, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
- CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
- if (ret < 0) {
- IWL_DEBUG_INFO(priv, "Failed to init the card\n");
- goto out;
- }
-
- /*
- * Enable DMA and BSM (if used) clocks, wait for them to stabilize.
- * BSM (Boostrap State Machine) is only in 3945 and 4965.
- *
- * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
- * do not disable clocks. This preserves any hardware bits already
- * set by default in "CLK_CTRL_REG" after reset.
- */
- if (priv->cfg->base_params->use_bsm)
- iwl_legacy_write_prph(priv, APMG_CLK_EN_REG,
- APMG_CLK_VAL_DMA_CLK_RQT | APMG_CLK_VAL_BSM_CLK_RQT);
- else
- iwl_legacy_write_prph(priv, APMG_CLK_EN_REG,
- APMG_CLK_VAL_DMA_CLK_RQT);
- udelay(20);
-
- /* Disable L1-Active */
- iwl_legacy_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
- APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
-
-out:
- return ret;
-}
-EXPORT_SYMBOL(iwl_legacy_apm_init);
-
-
-int iwl_legacy_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
-{
- int ret;
- s8 prev_tx_power;
- bool defer;
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
- lockdep_assert_held(&priv->mutex);
-
- if (priv->tx_power_user_lmt == tx_power && !force)
- return 0;
-
- if (!priv->cfg->ops->lib->send_tx_power)
- return -EOPNOTSUPP;
-
- /* 0 dBm mean 1 milliwatt */
- if (tx_power < 0) {
- IWL_WARN(priv,
- "Requested user TXPOWER %d below 1 mW.\n",
- tx_power);
- return -EINVAL;
- }
-
- if (tx_power > priv->tx_power_device_lmt) {
- IWL_WARN(priv,
- "Requested user TXPOWER %d above upper limit %d.\n",
- tx_power, priv->tx_power_device_lmt);
- return -EINVAL;
- }
-
- if (!iwl_legacy_is_ready_rf(priv))
- return -EIO;
-
- /* scan complete and commit_rxon use tx_power_next value,
- * it always need to be updated for newest request */
- priv->tx_power_next = tx_power;
-
- /* do not set tx power when scanning or channel changing */
- defer = test_bit(STATUS_SCANNING, &priv->status) ||
- memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging));
- if (defer && !force) {
- IWL_DEBUG_INFO(priv, "Deferring tx power set\n");
- return 0;
- }
-
- prev_tx_power = priv->tx_power_user_lmt;
- priv->tx_power_user_lmt = tx_power;
-
- ret = priv->cfg->ops->lib->send_tx_power(priv);
-
- /* if fail to set tx_power, restore the orig. tx power */
- if (ret) {
- priv->tx_power_user_lmt = prev_tx_power;
- priv->tx_power_next = prev_tx_power;
- }
- return ret;
-}
-EXPORT_SYMBOL(iwl_legacy_set_tx_power);
-
-void iwl_legacy_send_bt_config(struct iwl_priv *priv)
-{
- struct iwl_bt_cmd bt_cmd = {
- .lead_time = BT_LEAD_TIME_DEF,
- .max_kill = BT_MAX_KILL_DEF,
- .kill_ack_mask = 0,
- .kill_cts_mask = 0,
- };
-
- if (!bt_coex_active)
- bt_cmd.flags = BT_COEX_DISABLE;
- else
- bt_cmd.flags = BT_COEX_ENABLE;
-
- IWL_DEBUG_INFO(priv, "BT coex %s\n",
- (bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active");
-
- if (iwl_legacy_send_cmd_pdu(priv, REPLY_BT_CONFIG,
- sizeof(struct iwl_bt_cmd), &bt_cmd))
- IWL_ERR(priv, "failed to send BT Coex Config\n");
-}
-EXPORT_SYMBOL(iwl_legacy_send_bt_config);
-
-int iwl_legacy_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear)
-{
- struct iwl_statistics_cmd statistics_cmd = {
- .configuration_flags =
- clear ? IWL_STATS_CONF_CLEAR_STATS : 0,
- };
-
- if (flags & CMD_ASYNC)
- return iwl_legacy_send_cmd_pdu_async(priv, REPLY_STATISTICS_CMD,
- sizeof(struct iwl_statistics_cmd),
- &statistics_cmd, NULL);
- else
- return iwl_legacy_send_cmd_pdu(priv, REPLY_STATISTICS_CMD,
- sizeof(struct iwl_statistics_cmd),
- &statistics_cmd);
-}
-EXPORT_SYMBOL(iwl_legacy_send_statistics_request);
-
-void iwl_legacy_rx_pm_sleep_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_sleep_notification *sleep = &(pkt->u.sleep_notif);
- IWL_DEBUG_RX(priv, "sleep mode: %d, src: %d\n",
- sleep->pm_sleep_mode, sleep->pm_wakeup_src);
-#endif
-}
-EXPORT_SYMBOL(iwl_legacy_rx_pm_sleep_notif);
-
-void iwl_legacy_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- u32 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
- IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled "
- "notification for %s:\n", len,
- iwl_legacy_get_cmd_string(pkt->hdr.cmd));
- iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->u.raw, len);
-}
-EXPORT_SYMBOL(iwl_legacy_rx_pm_debug_statistics_notif);
-
-void iwl_legacy_rx_reply_error(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
-
- IWL_ERR(priv, "Error Reply type 0x%08X cmd %s (0x%02X) "
- "seq 0x%04X ser 0x%08X\n",
- le32_to_cpu(pkt->u.err_resp.error_type),
- iwl_legacy_get_cmd_string(pkt->u.err_resp.cmd_id),
- pkt->u.err_resp.cmd_id,
- le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num),
- le32_to_cpu(pkt->u.err_resp.error_info));
-}
-EXPORT_SYMBOL(iwl_legacy_rx_reply_error);
-
-void iwl_legacy_clear_isr_stats(struct iwl_priv *priv)
-{
- memset(&priv->isr_stats, 0, sizeof(priv->isr_stats));
-}
-
-int iwl_legacy_mac_conf_tx(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif, u16 queue,
- const struct ieee80211_tx_queue_params *params)
-{
- struct iwl_priv *priv = hw->priv;
- struct iwl_rxon_context *ctx;
- unsigned long flags;
- int q;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- if (!iwl_legacy_is_ready_rf(priv)) {
- IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
- return -EIO;
- }
-
- if (queue >= AC_NUM) {
- IWL_DEBUG_MAC80211(priv, "leave - queue >= AC_NUM %d\n", queue);
- return 0;
- }
-
- q = AC_NUM - 1 - queue;
-
- spin_lock_irqsave(&priv->lock, flags);
-
- for_each_context(priv, ctx) {
- ctx->qos_data.def_qos_parm.ac[q].cw_min =
- cpu_to_le16(params->cw_min);
- ctx->qos_data.def_qos_parm.ac[q].cw_max =
- cpu_to_le16(params->cw_max);
- ctx->qos_data.def_qos_parm.ac[q].aifsn = params->aifs;
- ctx->qos_data.def_qos_parm.ac[q].edca_txop =
- cpu_to_le16((params->txop * 32));
-
- ctx->qos_data.def_qos_parm.ac[q].reserved1 = 0;
- }
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- IWL_DEBUG_MAC80211(priv, "leave\n");
- return 0;
-}
-EXPORT_SYMBOL(iwl_legacy_mac_conf_tx);
-
-int iwl_legacy_mac_tx_last_beacon(struct ieee80211_hw *hw)
-{
- struct iwl_priv *priv = hw->priv;
-
- return priv->ibss_manager == IWL_IBSS_MANAGER;
-}
-EXPORT_SYMBOL_GPL(iwl_legacy_mac_tx_last_beacon);
-
-static int
-iwl_legacy_set_mode(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
-{
- iwl_legacy_connection_init_rx_config(priv, ctx);
-
- if (priv->cfg->ops->hcmd->set_rxon_chain)
- priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
-
- return iwl_legacy_commit_rxon(priv, ctx);
-}
-
-static int iwl_legacy_setup_interface(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx)
-{
- struct ieee80211_vif *vif = ctx->vif;
- int err;
-
- lockdep_assert_held(&priv->mutex);
-
- /*
- * This variable will be correct only when there's just
- * a single context, but all code using it is for hardware
- * that supports only one context.
- */
- priv->iw_mode = vif->type;
-
- ctx->is_active = true;
-
- err = iwl_legacy_set_mode(priv, ctx);
- if (err) {
- if (!ctx->always_active)
- ctx->is_active = false;
- return err;
- }
-
- return 0;
-}
-
-int
-iwl_legacy_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
-{
- struct iwl_priv *priv = hw->priv;
- struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
- struct iwl_rxon_context *tmp, *ctx = NULL;
- int err;
-
- IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n",
- vif->type, vif->addr);
-
- mutex_lock(&priv->mutex);
-
- if (!iwl_legacy_is_ready_rf(priv)) {
- IWL_WARN(priv, "Try to add interface when device not ready\n");
- err = -EINVAL;
- goto out;
- }
-
- for_each_context(priv, tmp) {
- u32 possible_modes =
- tmp->interface_modes | tmp->exclusive_interface_modes;
-
- if (tmp->vif) {
- /* check if this busy context is exclusive */
- if (tmp->exclusive_interface_modes &
- BIT(tmp->vif->type)) {
- err = -EINVAL;
- goto out;
- }
- continue;
- }
-
- if (!(possible_modes & BIT(vif->type)))
- continue;
-
- /* have maybe usable context w/o interface */
- ctx = tmp;
- break;
- }
-
- if (!ctx) {
- err = -EOPNOTSUPP;
- goto out;
- }
-
- vif_priv->ctx = ctx;
- ctx->vif = vif;
-
- err = iwl_legacy_setup_interface(priv, ctx);
- if (!err)
- goto out;
-
- ctx->vif = NULL;
- priv->iw_mode = NL80211_IFTYPE_STATION;
- out:
- mutex_unlock(&priv->mutex);
-
- IWL_DEBUG_MAC80211(priv, "leave\n");
- return err;
-}
-EXPORT_SYMBOL(iwl_legacy_mac_add_interface);
-
-static void iwl_legacy_teardown_interface(struct iwl_priv *priv,
- struct ieee80211_vif *vif,
- bool mode_change)
-{
- struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
-
- lockdep_assert_held(&priv->mutex);
-
- if (priv->scan_vif == vif) {
- iwl_legacy_scan_cancel_timeout(priv, 200);
- iwl_legacy_force_scan_end(priv);
- }
-
- if (!mode_change) {
- iwl_legacy_set_mode(priv, ctx);
- if (!ctx->always_active)
- ctx->is_active = false;
- }
-}
-
-void iwl_legacy_mac_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
-{
- struct iwl_priv *priv = hw->priv;
- struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- mutex_lock(&priv->mutex);
-
- WARN_ON(ctx->vif != vif);
- ctx->vif = NULL;
-
- iwl_legacy_teardown_interface(priv, vif, false);
-
- memset(priv->bssid, 0, ETH_ALEN);
- mutex_unlock(&priv->mutex);
-
- IWL_DEBUG_MAC80211(priv, "leave\n");
-
-}
-EXPORT_SYMBOL(iwl_legacy_mac_remove_interface);
-
-int iwl_legacy_alloc_txq_mem(struct iwl_priv *priv)
-{
- if (!priv->txq)
- priv->txq = kzalloc(
- sizeof(struct iwl_tx_queue) *
- priv->cfg->base_params->num_of_queues,
- GFP_KERNEL);
- if (!priv->txq) {
- IWL_ERR(priv, "Not enough memory for txq\n");
- return -ENOMEM;
- }
- return 0;
-}
-EXPORT_SYMBOL(iwl_legacy_alloc_txq_mem);
-
-void iwl_legacy_txq_mem(struct iwl_priv *priv)
-{
- kfree(priv->txq);
- priv->txq = NULL;
-}
-EXPORT_SYMBOL(iwl_legacy_txq_mem);
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
-
-#define IWL_TRAFFIC_DUMP_SIZE (IWL_TRAFFIC_ENTRY_SIZE * IWL_TRAFFIC_ENTRIES)
-
-void iwl_legacy_reset_traffic_log(struct iwl_priv *priv)
-{
- priv->tx_traffic_idx = 0;
- priv->rx_traffic_idx = 0;
- if (priv->tx_traffic)
- memset(priv->tx_traffic, 0, IWL_TRAFFIC_DUMP_SIZE);
- if (priv->rx_traffic)
- memset(priv->rx_traffic, 0, IWL_TRAFFIC_DUMP_SIZE);
-}
-
-int iwl_legacy_alloc_traffic_mem(struct iwl_priv *priv)
-{
- u32 traffic_size = IWL_TRAFFIC_DUMP_SIZE;
-
- if (iwlegacy_debug_level & IWL_DL_TX) {
- if (!priv->tx_traffic) {
- priv->tx_traffic =
- kzalloc(traffic_size, GFP_KERNEL);
- if (!priv->tx_traffic)
- return -ENOMEM;
- }
- }
- if (iwlegacy_debug_level & IWL_DL_RX) {
- if (!priv->rx_traffic) {
- priv->rx_traffic =
- kzalloc(traffic_size, GFP_KERNEL);
- if (!priv->rx_traffic)
- return -ENOMEM;
- }
- }
- iwl_legacy_reset_traffic_log(priv);
- return 0;
-}
-EXPORT_SYMBOL(iwl_legacy_alloc_traffic_mem);
-
-void iwl_legacy_free_traffic_mem(struct iwl_priv *priv)
-{
- kfree(priv->tx_traffic);
- priv->tx_traffic = NULL;
-
- kfree(priv->rx_traffic);
- priv->rx_traffic = NULL;
-}
-EXPORT_SYMBOL(iwl_legacy_free_traffic_mem);
-
-void iwl_legacy_dbg_log_tx_data_frame(struct iwl_priv *priv,
- u16 length, struct ieee80211_hdr *header)
-{
- __le16 fc;
- u16 len;
-
- if (likely(!(iwlegacy_debug_level & IWL_DL_TX)))
- return;
-
- if (!priv->tx_traffic)
- return;
-
- fc = header->frame_control;
- if (ieee80211_is_data(fc)) {
- len = (length > IWL_TRAFFIC_ENTRY_SIZE)
- ? IWL_TRAFFIC_ENTRY_SIZE : length;
- memcpy((priv->tx_traffic +
- (priv->tx_traffic_idx * IWL_TRAFFIC_ENTRY_SIZE)),
- header, len);
- priv->tx_traffic_idx =
- (priv->tx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES;
- }
-}
-EXPORT_SYMBOL(iwl_legacy_dbg_log_tx_data_frame);
-
-void iwl_legacy_dbg_log_rx_data_frame(struct iwl_priv *priv,
- u16 length, struct ieee80211_hdr *header)
-{
- __le16 fc;
- u16 len;
-
- if (likely(!(iwlegacy_debug_level & IWL_DL_RX)))
- return;
-
- if (!priv->rx_traffic)
- return;
-
- fc = header->frame_control;
- if (ieee80211_is_data(fc)) {
- len = (length > IWL_TRAFFIC_ENTRY_SIZE)
- ? IWL_TRAFFIC_ENTRY_SIZE : length;
- memcpy((priv->rx_traffic +
- (priv->rx_traffic_idx * IWL_TRAFFIC_ENTRY_SIZE)),
- header, len);
- priv->rx_traffic_idx =
- (priv->rx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES;
- }
-}
-EXPORT_SYMBOL(iwl_legacy_dbg_log_rx_data_frame);
-
-const char *iwl_legacy_get_mgmt_string(int cmd)
-{
- switch (cmd) {
- IWL_CMD(MANAGEMENT_ASSOC_REQ);
- IWL_CMD(MANAGEMENT_ASSOC_RESP);
- IWL_CMD(MANAGEMENT_REASSOC_REQ);
- IWL_CMD(MANAGEMENT_REASSOC_RESP);
- IWL_CMD(MANAGEMENT_PROBE_REQ);
- IWL_CMD(MANAGEMENT_PROBE_RESP);
- IWL_CMD(MANAGEMENT_BEACON);
- IWL_CMD(MANAGEMENT_ATIM);
- IWL_CMD(MANAGEMENT_DISASSOC);
- IWL_CMD(MANAGEMENT_AUTH);
- IWL_CMD(MANAGEMENT_DEAUTH);
- IWL_CMD(MANAGEMENT_ACTION);
- default:
- return "UNKNOWN";
-
- }
-}
-
-const char *iwl_legacy_get_ctrl_string(int cmd)
-{
- switch (cmd) {
- IWL_CMD(CONTROL_BACK_REQ);
- IWL_CMD(CONTROL_BACK);
- IWL_CMD(CONTROL_PSPOLL);
- IWL_CMD(CONTROL_RTS);
- IWL_CMD(CONTROL_CTS);
- IWL_CMD(CONTROL_ACK);
- IWL_CMD(CONTROL_CFEND);
- IWL_CMD(CONTROL_CFENDACK);
- default:
- return "UNKNOWN";
-
- }
-}
-
-void iwl_legacy_clear_traffic_stats(struct iwl_priv *priv)
-{
- memset(&priv->tx_stats, 0, sizeof(struct traffic_stats));
- memset(&priv->rx_stats, 0, sizeof(struct traffic_stats));
-}
-
-/*
- * if CONFIG_IWLWIFI_LEGACY_DEBUGFS defined,
- * iwl_legacy_update_stats function will
- * record all the MGMT, CTRL and DATA pkt for both TX and Rx pass
- * Use debugFs to display the rx/rx_statistics
- * if CONFIG_IWLWIFI_LEGACY_DEBUGFS not being defined, then no MGMT and CTRL
- * information will be recorded, but DATA pkt still will be recorded
- * for the reason of iwl_led.c need to control the led blinking based on
- * number of tx and rx data.
- *
- */
-void
-iwl_legacy_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc, u16 len)
-{
- struct traffic_stats *stats;
-
- if (is_tx)
- stats = &priv->tx_stats;
- else
- stats = &priv->rx_stats;
-
- if (ieee80211_is_mgmt(fc)) {
- switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
- case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
- stats->mgmt[MANAGEMENT_ASSOC_REQ]++;
- break;
- case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP):
- stats->mgmt[MANAGEMENT_ASSOC_RESP]++;
- break;
- case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
- stats->mgmt[MANAGEMENT_REASSOC_REQ]++;
- break;
- case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP):
- stats->mgmt[MANAGEMENT_REASSOC_RESP]++;
- break;
- case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ):
- stats->mgmt[MANAGEMENT_PROBE_REQ]++;
- break;
- case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP):
- stats->mgmt[MANAGEMENT_PROBE_RESP]++;
- break;
- case cpu_to_le16(IEEE80211_STYPE_BEACON):
- stats->mgmt[MANAGEMENT_BEACON]++;
- break;
- case cpu_to_le16(IEEE80211_STYPE_ATIM):
- stats->mgmt[MANAGEMENT_ATIM]++;
- break;
- case cpu_to_le16(IEEE80211_STYPE_DISASSOC):
- stats->mgmt[MANAGEMENT_DISASSOC]++;
- break;
- case cpu_to_le16(IEEE80211_STYPE_AUTH):
- stats->mgmt[MANAGEMENT_AUTH]++;
- break;
- case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
- stats->mgmt[MANAGEMENT_DEAUTH]++;
- break;
- case cpu_to_le16(IEEE80211_STYPE_ACTION):
- stats->mgmt[MANAGEMENT_ACTION]++;
- break;
- }
- } else if (ieee80211_is_ctl(fc)) {
- switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
- case cpu_to_le16(IEEE80211_STYPE_BACK_REQ):
- stats->ctrl[CONTROL_BACK_REQ]++;
- break;
- case cpu_to_le16(IEEE80211_STYPE_BACK):
- stats->ctrl[CONTROL_BACK]++;
- break;
- case cpu_to_le16(IEEE80211_STYPE_PSPOLL):
- stats->ctrl[CONTROL_PSPOLL]++;
- break;
- case cpu_to_le16(IEEE80211_STYPE_RTS):
- stats->ctrl[CONTROL_RTS]++;
- break;
- case cpu_to_le16(IEEE80211_STYPE_CTS):
- stats->ctrl[CONTROL_CTS]++;
- break;
- case cpu_to_le16(IEEE80211_STYPE_ACK):
- stats->ctrl[CONTROL_ACK]++;
- break;
- case cpu_to_le16(IEEE80211_STYPE_CFEND):
- stats->ctrl[CONTROL_CFEND]++;
- break;
- case cpu_to_le16(IEEE80211_STYPE_CFENDACK):
- stats->ctrl[CONTROL_CFENDACK]++;
- break;
- }
- } else {
- /* data */
- stats->data_cnt++;
- stats->data_bytes += len;
- }
-}
-EXPORT_SYMBOL(iwl_legacy_update_stats);
-#endif
-
-int iwl_legacy_force_reset(struct iwl_priv *priv, bool external)
-{
- struct iwl_force_reset *force_reset;
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- return -EINVAL;
-
- force_reset = &priv->force_reset;
- force_reset->reset_request_count++;
- if (!external) {
- if (force_reset->last_force_reset_jiffies &&
- time_after(force_reset->last_force_reset_jiffies +
- force_reset->reset_duration, jiffies)) {
- IWL_DEBUG_INFO(priv, "force reset rejected\n");
- force_reset->reset_reject_count++;
- return -EAGAIN;
- }
- }
- force_reset->reset_success_count++;
- force_reset->last_force_reset_jiffies = jiffies;
-
- /*
- * if the request is from external(ex: debugfs),
- * then always perform the request in regardless the module
- * parameter setting
- * if the request is from internal (uCode error or driver
- * detect failure), then fw_restart module parameter
- * need to be check before performing firmware reload
- */
-
- if (!external && !priv->cfg->mod_params->restart_fw) {
- IWL_DEBUG_INFO(priv, "Cancel firmware reload based on "
- "module parameter setting\n");
- return 0;
- }
-
- IWL_ERR(priv, "On demand firmware reload\n");
-
- /* Set the FW error flag -- cleared on iwl_down */
- set_bit(STATUS_FW_ERROR, &priv->status);
- wake_up(&priv->wait_command_queue);
- /*
- * Keep the restart process from trying to send host
- * commands by clearing the INIT status bit
- */
- clear_bit(STATUS_READY, &priv->status);
- queue_work(priv->workqueue, &priv->restart);
-
- return 0;
-}
-
-int
-iwl_legacy_mac_change_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- enum nl80211_iftype newtype, bool newp2p)
-{
- struct iwl_priv *priv = hw->priv;
- struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
- struct iwl_rxon_context *tmp;
- u32 interface_modes;
- int err;
-
- newtype = ieee80211_iftype_p2p(newtype, newp2p);
-
- mutex_lock(&priv->mutex);
-
- if (!ctx->vif || !iwl_legacy_is_ready_rf(priv)) {
- /*
- * Huh? But wait ... this can maybe happen when
- * we're in the middle of a firmware restart!
- */
- err = -EBUSY;
- goto out;
- }
-
- interface_modes = ctx->interface_modes | ctx->exclusive_interface_modes;
-
- if (!(interface_modes & BIT(newtype))) {
- err = -EBUSY;
- goto out;
- }
-
- if (ctx->exclusive_interface_modes & BIT(newtype)) {
- for_each_context(priv, tmp) {
- if (ctx == tmp)
- continue;
-
- if (!tmp->vif)
- continue;
-
- /*
- * The current mode switch would be exclusive, but
- * another context is active ... refuse the switch.
- */
- err = -EBUSY;
- goto out;
- }
- }
-
- /* success */
- iwl_legacy_teardown_interface(priv, vif, true);
- vif->type = newtype;
- vif->p2p = newp2p;
- err = iwl_legacy_setup_interface(priv, ctx);
- WARN_ON(err);
- /*
- * We've switched internally, but submitting to the
- * device may have failed for some reason. Mask this
- * error, because otherwise mac80211 will not switch
- * (and set the interface type back) and we'll be
- * out of sync with it.
- */
- err = 0;
-
- out:
- mutex_unlock(&priv->mutex);
- return err;
-}
-EXPORT_SYMBOL(iwl_legacy_mac_change_interface);
-
-/*
- * On every watchdog tick we check (latest) time stamp. If it does not
- * change during timeout period and queue is not empty we reset firmware.
- */
-static int iwl_legacy_check_stuck_queue(struct iwl_priv *priv, int cnt)
-{
- struct iwl_tx_queue *txq = &priv->txq[cnt];
- struct iwl_queue *q = &txq->q;
- unsigned long timeout;
- int ret;
-
- if (q->read_ptr == q->write_ptr) {
- txq->time_stamp = jiffies;
- return 0;
- }
-
- timeout = txq->time_stamp +
- msecs_to_jiffies(priv->cfg->base_params->wd_timeout);
-
- if (time_after(jiffies, timeout)) {
- IWL_ERR(priv, "Queue %d stuck for %u ms.\n",
- q->id, priv->cfg->base_params->wd_timeout);
- ret = iwl_legacy_force_reset(priv, false);
- return (ret == -EAGAIN) ? 0 : 1;
- }
-
- return 0;
-}
-
-/*
- * Making watchdog tick be a quarter of timeout assure we will
- * discover the queue hung between timeout and 1.25*timeout
- */
-#define IWL_WD_TICK(timeout) ((timeout) / 4)
-
-/*
- * Watchdog timer callback, we check each tx queue for stuck, if if hung
- * we reset the firmware. If everything is fine just rearm the timer.
- */
-void iwl_legacy_bg_watchdog(unsigned long data)
-{
- struct iwl_priv *priv = (struct iwl_priv *)data;
- int cnt;
- unsigned long timeout;
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- return;
-
- timeout = priv->cfg->base_params->wd_timeout;
- if (timeout == 0)
- return;
-
- /* monitor and check for stuck cmd queue */
- if (iwl_legacy_check_stuck_queue(priv, priv->cmd_queue))
- return;
-
- /* monitor and check for other stuck queues */
- if (iwl_legacy_is_any_associated(priv)) {
- for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
- /* skip as we already checked the command queue */
- if (cnt == priv->cmd_queue)
- continue;
- if (iwl_legacy_check_stuck_queue(priv, cnt))
- return;
- }
- }
-
- mod_timer(&priv->watchdog, jiffies +
- msecs_to_jiffies(IWL_WD_TICK(timeout)));
-}
-EXPORT_SYMBOL(iwl_legacy_bg_watchdog);
-
-void iwl_legacy_setup_watchdog(struct iwl_priv *priv)
-{
- unsigned int timeout = priv->cfg->base_params->wd_timeout;
-
- if (timeout)
- mod_timer(&priv->watchdog,
- jiffies + msecs_to_jiffies(IWL_WD_TICK(timeout)));
- else
- del_timer(&priv->watchdog);
-}
-EXPORT_SYMBOL(iwl_legacy_setup_watchdog);
-
-/*
- * extended beacon time format
- * time in usec will be changed into a 32-bit value in extended:internal format
- * the extended part is the beacon counts
- * the internal part is the time in usec within one beacon interval
- */
-u32
-iwl_legacy_usecs_to_beacons(struct iwl_priv *priv,
- u32 usec, u32 beacon_interval)
-{
- u32 quot;
- u32 rem;
- u32 interval = beacon_interval * TIME_UNIT;
-
- if (!interval || !usec)
- return 0;
-
- quot = (usec / interval) &
- (iwl_legacy_beacon_time_mask_high(priv,
- priv->hw_params.beacon_time_tsf_bits) >>
- priv->hw_params.beacon_time_tsf_bits);
- rem = (usec % interval) & iwl_legacy_beacon_time_mask_low(priv,
- priv->hw_params.beacon_time_tsf_bits);
-
- return (quot << priv->hw_params.beacon_time_tsf_bits) + rem;
-}
-EXPORT_SYMBOL(iwl_legacy_usecs_to_beacons);
-
-/* base is usually what we get from ucode with each received frame,
- * the same as HW timer counter counting down
- */
-__le32 iwl_legacy_add_beacon_time(struct iwl_priv *priv, u32 base,
- u32 addon, u32 beacon_interval)
-{
- u32 base_low = base & iwl_legacy_beacon_time_mask_low(priv,
- priv->hw_params.beacon_time_tsf_bits);
- u32 addon_low = addon & iwl_legacy_beacon_time_mask_low(priv,
- priv->hw_params.beacon_time_tsf_bits);
- u32 interval = beacon_interval * TIME_UNIT;
- u32 res = (base & iwl_legacy_beacon_time_mask_high(priv,
- priv->hw_params.beacon_time_tsf_bits)) +
- (addon & iwl_legacy_beacon_time_mask_high(priv,
- priv->hw_params.beacon_time_tsf_bits));
-
- if (base_low > addon_low)
- res += base_low - addon_low;
- else if (base_low < addon_low) {
- res += interval + base_low - addon_low;
- res += (1 << priv->hw_params.beacon_time_tsf_bits);
- } else
- res += (1 << priv->hw_params.beacon_time_tsf_bits);
-
- return cpu_to_le32(res);
-}
-EXPORT_SYMBOL(iwl_legacy_add_beacon_time);
-
-#ifdef CONFIG_PM
-
-int iwl_legacy_pci_suspend(struct device *device)
-{
- struct pci_dev *pdev = to_pci_dev(device);
- struct iwl_priv *priv = pci_get_drvdata(pdev);
-
- /*
- * This function is called when system goes into suspend state
- * mac80211 will call iwl_mac_stop() from the mac80211 suspend function
- * first but since iwl_mac_stop() has no knowledge of who the caller is,
- * it will not call apm_ops.stop() to stop the DMA operation.
- * Calling apm_ops.stop here to make sure we stop the DMA.
- */
- iwl_legacy_apm_stop(priv);
-
- return 0;
-}
-EXPORT_SYMBOL(iwl_legacy_pci_suspend);
-
-int iwl_legacy_pci_resume(struct device *device)
-{
- struct pci_dev *pdev = to_pci_dev(device);
- struct iwl_priv *priv = pci_get_drvdata(pdev);
- bool hw_rfkill = false;
-
- /*
- * We disable the RETRY_TIMEOUT register (0x41) to keep
- * PCI Tx retries from interfering with C3 CPU state.
- */
- pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
-
- iwl_legacy_enable_interrupts(priv);
-
- if (!(iwl_read32(priv, CSR_GP_CNTRL) &
- CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
- hw_rfkill = true;
-
- if (hw_rfkill)
- set_bit(STATUS_RF_KILL_HW, &priv->status);
- else
- clear_bit(STATUS_RF_KILL_HW, &priv->status);
-
- wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rfkill);
-
- return 0;
-}
-EXPORT_SYMBOL(iwl_legacy_pci_resume);
-
-const struct dev_pm_ops iwl_legacy_pm_ops = {
- .suspend = iwl_legacy_pci_suspend,
- .resume = iwl_legacy_pci_resume,
- .freeze = iwl_legacy_pci_suspend,
- .thaw = iwl_legacy_pci_resume,
- .poweroff = iwl_legacy_pci_suspend,
- .restore = iwl_legacy_pci_resume,
-};
-EXPORT_SYMBOL(iwl_legacy_pm_ops);
-
-#endif /* CONFIG_PM */
-
-static void
-iwl_legacy_update_qos(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
-{
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- return;
-
- if (!ctx->is_active)
- return;
-
- ctx->qos_data.def_qos_parm.qos_flags = 0;
-
- if (ctx->qos_data.qos_active)
- ctx->qos_data.def_qos_parm.qos_flags |=
- QOS_PARAM_FLG_UPDATE_EDCA_MSK;
-
- if (ctx->ht.enabled)
- ctx->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
-
- IWL_DEBUG_QOS(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n",
- ctx->qos_data.qos_active,
- ctx->qos_data.def_qos_parm.qos_flags);
-
- iwl_legacy_send_cmd_pdu_async(priv, ctx->qos_cmd,
- sizeof(struct iwl_qosparam_cmd),
- &ctx->qos_data.def_qos_parm, NULL);
-}
-
-/**
- * iwl_legacy_mac_config - mac80211 config callback
- */
-int iwl_legacy_mac_config(struct ieee80211_hw *hw, u32 changed)
-{
- struct iwl_priv *priv = hw->priv;
- const struct iwl_channel_info *ch_info;
- struct ieee80211_conf *conf = &hw->conf;
- struct ieee80211_channel *channel = conf->channel;
- struct iwl_ht_config *ht_conf = &priv->current_ht_config;
- struct iwl_rxon_context *ctx;
- unsigned long flags = 0;
- int ret = 0;
- u16 ch;
- int scan_active = 0;
- bool ht_changed[NUM_IWL_RXON_CTX] = {};
-
- if (WARN_ON(!priv->cfg->ops->legacy))
- return -EOPNOTSUPP;
-
- mutex_lock(&priv->mutex);
-
- IWL_DEBUG_MAC80211(priv, "enter to channel %d changed 0x%X\n",
- channel->hw_value, changed);
-
- if (unlikely(test_bit(STATUS_SCANNING, &priv->status))) {
- scan_active = 1;
- IWL_DEBUG_MAC80211(priv, "scan active\n");
- }
-
- if (changed & (IEEE80211_CONF_CHANGE_SMPS |
- IEEE80211_CONF_CHANGE_CHANNEL)) {
- /* mac80211 uses static for non-HT which is what we want */
- priv->current_ht_config.smps = conf->smps_mode;
-
- /*
- * Recalculate chain counts.
- *
- * If monitor mode is enabled then mac80211 will
- * set up the SM PS mode to OFF if an HT channel is
- * configured.
- */
- if (priv->cfg->ops->hcmd->set_rxon_chain)
- for_each_context(priv, ctx)
- priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
- }
-
- /* during scanning mac80211 will delay channel setting until
- * scan finish with changed = 0
- */
- if (!changed || (changed & IEEE80211_CONF_CHANGE_CHANNEL)) {
- if (scan_active)
- goto set_ch_out;
-
- ch = channel->hw_value;
- ch_info = iwl_legacy_get_channel_info(priv, channel->band, ch);
- if (!iwl_legacy_is_channel_valid(ch_info)) {
- IWL_DEBUG_MAC80211(priv, "leave - invalid channel\n");
- ret = -EINVAL;
- goto set_ch_out;
- }
-
- if (priv->iw_mode == NL80211_IFTYPE_ADHOC &&
- !iwl_legacy_is_channel_ibss(ch_info)) {
- IWL_DEBUG_MAC80211(priv, "leave - not IBSS channel\n");
- ret = -EINVAL;
- goto set_ch_out;
- }
-
- spin_lock_irqsave(&priv->lock, flags);
-
- for_each_context(priv, ctx) {
- /* Configure HT40 channels */
- if (ctx->ht.enabled != conf_is_ht(conf)) {
- ctx->ht.enabled = conf_is_ht(conf);
- ht_changed[ctx->ctxid] = true;
- }
- if (ctx->ht.enabled) {
- if (conf_is_ht40_minus(conf)) {
- ctx->ht.extension_chan_offset =
- IEEE80211_HT_PARAM_CHA_SEC_BELOW;
- ctx->ht.is_40mhz = true;
- } else if (conf_is_ht40_plus(conf)) {
- ctx->ht.extension_chan_offset =
- IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
- ctx->ht.is_40mhz = true;
- } else {
- ctx->ht.extension_chan_offset =
- IEEE80211_HT_PARAM_CHA_SEC_NONE;
- ctx->ht.is_40mhz = false;
- }
- } else
- ctx->ht.is_40mhz = false;
-
- /*
- * Default to no protection. Protection mode will
- * later be set from BSS config in iwl_ht_conf
- */
- ctx->ht.protection =
- IEEE80211_HT_OP_MODE_PROTECTION_NONE;
-
- /* if we are switching from ht to 2.4 clear flags
- * from any ht related info since 2.4 does not
- * support ht */
- if ((le16_to_cpu(ctx->staging.channel) != ch))
- ctx->staging.flags = 0;
-
- iwl_legacy_set_rxon_channel(priv, channel, ctx);
- iwl_legacy_set_rxon_ht(priv, ht_conf);
-
- iwl_legacy_set_flags_for_band(priv, ctx, channel->band,
- ctx->vif);
- }
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- if (priv->cfg->ops->legacy->update_bcast_stations)
- ret =
- priv->cfg->ops->legacy->update_bcast_stations(priv);
-
- set_ch_out:
- /* The list of supported rates and rate mask can be different
- * for each band; since the band may have changed, reset
- * the rate mask to what mac80211 lists */
- iwl_legacy_set_rate(priv);
- }
-
- if (changed & (IEEE80211_CONF_CHANGE_PS |
- IEEE80211_CONF_CHANGE_IDLE)) {
- ret = iwl_legacy_power_update_mode(priv, false);
- if (ret)
- IWL_DEBUG_MAC80211(priv, "Error setting sleep level\n");
- }
-
- if (changed & IEEE80211_CONF_CHANGE_POWER) {
- IWL_DEBUG_MAC80211(priv, "TX Power old=%d new=%d\n",
- priv->tx_power_user_lmt, conf->power_level);
-
- iwl_legacy_set_tx_power(priv, conf->power_level, false);
- }
-
- if (!iwl_legacy_is_ready(priv)) {
- IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
- goto out;
- }
-
- if (scan_active)
- goto out;
-
- for_each_context(priv, ctx) {
- if (memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging)))
- iwl_legacy_commit_rxon(priv, ctx);
- else
- IWL_DEBUG_INFO(priv,
- "Not re-sending same RXON configuration.\n");
- if (ht_changed[ctx->ctxid])
- iwl_legacy_update_qos(priv, ctx);
- }
-
-out:
- IWL_DEBUG_MAC80211(priv, "leave\n");
- mutex_unlock(&priv->mutex);
- return ret;
-}
-EXPORT_SYMBOL(iwl_legacy_mac_config);
-
-void iwl_legacy_mac_reset_tsf(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
-{
- struct iwl_priv *priv = hw->priv;
- unsigned long flags;
- /* IBSS can only be the IWL_RXON_CTX_BSS context */
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
- if (WARN_ON(!priv->cfg->ops->legacy))
- return;
-
- mutex_lock(&priv->mutex);
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- spin_lock_irqsave(&priv->lock, flags);
- memset(&priv->current_ht_config, 0, sizeof(struct iwl_ht_config));
- spin_unlock_irqrestore(&priv->lock, flags);
-
- spin_lock_irqsave(&priv->lock, flags);
-
- /* new association get rid of ibss beacon skb */
- if (priv->beacon_skb)
- dev_kfree_skb(priv->beacon_skb);
-
- priv->beacon_skb = NULL;
-
- priv->timestamp = 0;
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- iwl_legacy_scan_cancel_timeout(priv, 100);
- if (!iwl_legacy_is_ready_rf(priv)) {
- IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
- mutex_unlock(&priv->mutex);
- return;
- }
-
- /* we are restarting association process
- * clear RXON_FILTER_ASSOC_MSK bit
- */
- ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
- iwl_legacy_commit_rxon(priv, ctx);
-
- iwl_legacy_set_rate(priv);
-
- mutex_unlock(&priv->mutex);
-
- IWL_DEBUG_MAC80211(priv, "leave\n");
-}
-EXPORT_SYMBOL(iwl_legacy_mac_reset_tsf);
-
-static void iwl_legacy_ht_conf(struct iwl_priv *priv,
- struct ieee80211_vif *vif)
-{
- struct iwl_ht_config *ht_conf = &priv->current_ht_config;
- struct ieee80211_sta *sta;
- struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
- struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
-
- IWL_DEBUG_ASSOC(priv, "enter:\n");
-
- if (!ctx->ht.enabled)
- return;
-
- ctx->ht.protection =
- bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION;
- ctx->ht.non_gf_sta_present =
- !!(bss_conf->ht_operation_mode &
- IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
-
- ht_conf->single_chain_sufficient = false;
-
- switch (vif->type) {
- case NL80211_IFTYPE_STATION:
- rcu_read_lock();
- sta = ieee80211_find_sta(vif, bss_conf->bssid);
- if (sta) {
- struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
- int maxstreams;
-
- maxstreams = (ht_cap->mcs.tx_params &
- IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK)
- >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
- maxstreams += 1;
-
- if ((ht_cap->mcs.rx_mask[1] == 0) &&
- (ht_cap->mcs.rx_mask[2] == 0))
- ht_conf->single_chain_sufficient = true;
- if (maxstreams <= 1)
- ht_conf->single_chain_sufficient = true;
- } else {
- /*
- * If at all, this can only happen through a race
- * when the AP disconnects us while we're still
- * setting up the connection, in that case mac80211
- * will soon tell us about that.
- */
- ht_conf->single_chain_sufficient = true;
- }
- rcu_read_unlock();
- break;
- case NL80211_IFTYPE_ADHOC:
- ht_conf->single_chain_sufficient = true;
- break;
- default:
- break;
- }
-
- IWL_DEBUG_ASSOC(priv, "leave\n");
-}
-
-static inline void iwl_legacy_set_no_assoc(struct iwl_priv *priv,
- struct ieee80211_vif *vif)
-{
- struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
-
- /*
- * inform the ucode that there is no longer an
- * association and that no more packets should be
- * sent
- */
- ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
- ctx->staging.assoc_id = 0;
- iwl_legacy_commit_rxon(priv, ctx);
-}
-
-static void iwl_legacy_beacon_update(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
-{
- struct iwl_priv *priv = hw->priv;
- unsigned long flags;
- __le64 timestamp;
- struct sk_buff *skb = ieee80211_beacon_get(hw, vif);
-
- if (!skb)
- return;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- lockdep_assert_held(&priv->mutex);
-
- if (!priv->beacon_ctx) {
- IWL_ERR(priv, "update beacon but no beacon context!\n");
- dev_kfree_skb(skb);
- return;
- }
-
- spin_lock_irqsave(&priv->lock, flags);
-
- if (priv->beacon_skb)
- dev_kfree_skb(priv->beacon_skb);
-
- priv->beacon_skb = skb;
-
- timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
- priv->timestamp = le64_to_cpu(timestamp);
-
- IWL_DEBUG_MAC80211(priv, "leave\n");
- spin_unlock_irqrestore(&priv->lock, flags);
-
- if (!iwl_legacy_is_ready_rf(priv)) {
- IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
- return;
- }
-
- priv->cfg->ops->legacy->post_associate(priv);
-}
-
-void iwl_legacy_mac_bss_info_changed(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *bss_conf,
- u32 changes)
-{
- struct iwl_priv *priv = hw->priv;
- struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
- int ret;
-
- if (WARN_ON(!priv->cfg->ops->legacy))
- return;
-
- IWL_DEBUG_MAC80211(priv, "changes = 0x%X\n", changes);
-
- mutex_lock(&priv->mutex);
-
- if (!iwl_legacy_is_alive(priv)) {
- mutex_unlock(&priv->mutex);
- return;
- }
-
- if (changes & BSS_CHANGED_QOS) {
- unsigned long flags;
-
- spin_lock_irqsave(&priv->lock, flags);
- ctx->qos_data.qos_active = bss_conf->qos;
- iwl_legacy_update_qos(priv, ctx);
- spin_unlock_irqrestore(&priv->lock, flags);
- }
-
- if (changes & BSS_CHANGED_BEACON_ENABLED) {
- /*
- * the add_interface code must make sure we only ever
- * have a single interface that could be beaconing at
- * any time.
- */
- if (vif->bss_conf.enable_beacon)
- priv->beacon_ctx = ctx;
- else
- priv->beacon_ctx = NULL;
- }
-
- if (changes & BSS_CHANGED_BSSID) {
- IWL_DEBUG_MAC80211(priv, "BSSID %pM\n", bss_conf->bssid);
-
- /*
- * If there is currently a HW scan going on in the
- * background then we need to cancel it else the RXON
- * below/in post_associate will fail.
- */
- if (iwl_legacy_scan_cancel_timeout(priv, 100)) {
- IWL_WARN(priv,
- "Aborted scan still in progress after 100ms\n");
- IWL_DEBUG_MAC80211(priv,
- "leaving - scan abort failed.\n");
- mutex_unlock(&priv->mutex);
- return;
- }
-
- /* mac80211 only sets assoc when in STATION mode */
- if (vif->type == NL80211_IFTYPE_ADHOC || bss_conf->assoc) {
- memcpy(ctx->staging.bssid_addr,
- bss_conf->bssid, ETH_ALEN);
-
- /* currently needed in a few places */
- memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
- } else {
- ctx->staging.filter_flags &=
- ~RXON_FILTER_ASSOC_MSK;
- }
-
- }
-
- /*
- * This needs to be after setting the BSSID in case
- * mac80211 decides to do both changes at once because
- * it will invoke post_associate.
- */
- if (vif->type == NL80211_IFTYPE_ADHOC && changes & BSS_CHANGED_BEACON)
- iwl_legacy_beacon_update(hw, vif);
-
- if (changes & BSS_CHANGED_ERP_PREAMBLE) {
- IWL_DEBUG_MAC80211(priv, "ERP_PREAMBLE %d\n",
- bss_conf->use_short_preamble);
- if (bss_conf->use_short_preamble)
- ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
- else
- ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
- }
-
- if (changes & BSS_CHANGED_ERP_CTS_PROT) {
- IWL_DEBUG_MAC80211(priv,
- "ERP_CTS %d\n", bss_conf->use_cts_prot);
- if (bss_conf->use_cts_prot &&
- (priv->band != IEEE80211_BAND_5GHZ))
- ctx->staging.flags |= RXON_FLG_TGG_PROTECT_MSK;
- else
- ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
- if (bss_conf->use_cts_prot)
- ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
- else
- ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
- }
-
- if (changes & BSS_CHANGED_BASIC_RATES) {
- /* XXX use this information
- *
- * To do that, remove code from iwl_legacy_set_rate() and put something
- * like this here:
- *
- if (A-band)
- ctx->staging.ofdm_basic_rates =
- bss_conf->basic_rates;
- else
- ctx->staging.ofdm_basic_rates =
- bss_conf->basic_rates >> 4;
- ctx->staging.cck_basic_rates =
- bss_conf->basic_rates & 0xF;
- */
- }
-
- if (changes & BSS_CHANGED_HT) {
- iwl_legacy_ht_conf(priv, vif);
-
- if (priv->cfg->ops->hcmd->set_rxon_chain)
- priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
- }
-
- if (changes & BSS_CHANGED_ASSOC) {
- IWL_DEBUG_MAC80211(priv, "ASSOC %d\n", bss_conf->assoc);
- if (bss_conf->assoc) {
- priv->timestamp = bss_conf->timestamp;
-
- if (!iwl_legacy_is_rfkill(priv))
- priv->cfg->ops->legacy->post_associate(priv);
- } else
- iwl_legacy_set_no_assoc(priv, vif);
- }
-
- if (changes && iwl_legacy_is_associated_ctx(ctx) && bss_conf->aid) {
- IWL_DEBUG_MAC80211(priv, "Changes (%#x) while associated\n",
- changes);
- ret = iwl_legacy_send_rxon_assoc(priv, ctx);
- if (!ret) {
- /* Sync active_rxon with latest change. */
- memcpy((void *)&ctx->active,
- &ctx->staging,
- sizeof(struct iwl_legacy_rxon_cmd));
- }
- }
-
- if (changes & BSS_CHANGED_BEACON_ENABLED) {
- if (vif->bss_conf.enable_beacon) {
- memcpy(ctx->staging.bssid_addr,
- bss_conf->bssid, ETH_ALEN);
- memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
- priv->cfg->ops->legacy->config_ap(priv);
- } else
- iwl_legacy_set_no_assoc(priv, vif);
- }
-
- if (changes & BSS_CHANGED_IBSS) {
- ret = priv->cfg->ops->legacy->manage_ibss_station(priv, vif,
- bss_conf->ibss_joined);
- if (ret)
- IWL_ERR(priv, "failed to %s IBSS station %pM\n",
- bss_conf->ibss_joined ? "add" : "remove",
- bss_conf->bssid);
- }
-
- mutex_unlock(&priv->mutex);
-
- IWL_DEBUG_MAC80211(priv, "leave\n");
-}
-EXPORT_SYMBOL(iwl_legacy_mac_bss_info_changed);
-
-irqreturn_t iwl_legacy_isr(int irq, void *data)
-{
- struct iwl_priv *priv = data;
- u32 inta, inta_mask;
- u32 inta_fh;
- unsigned long flags;
- if (!priv)
- return IRQ_NONE;
-
- spin_lock_irqsave(&priv->lock, flags);
-
- /* Disable (but don't clear!) interrupts here to avoid
- * back-to-back ISRs and sporadic interrupts from our NIC.
- * If we have something to service, the tasklet will re-enable ints.
- * If we *don't* have something, we'll re-enable before leaving here. */
- inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */
- iwl_write32(priv, CSR_INT_MASK, 0x00000000);
-
- /* Discover which interrupts are active/pending */
- inta = iwl_read32(priv, CSR_INT);
- inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
-
- /* Ignore interrupt if there's nothing in NIC to service.
- * This may be due to IRQ shared with another device,
- * or due to sporadic interrupts thrown from our NIC. */
- if (!inta && !inta_fh) {
- IWL_DEBUG_ISR(priv,
- "Ignore interrupt, inta == 0, inta_fh == 0\n");
- goto none;
- }
-
- if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
- /* Hardware disappeared. It might have already raised
- * an interrupt */
- IWL_WARN(priv, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
- goto unplugged;
- }
-
- IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
- inta, inta_mask, inta_fh);
-
- inta &= ~CSR_INT_BIT_SCD;
-
- /* iwl_irq_tasklet() will service interrupts and re-enable them */
- if (likely(inta || inta_fh))
- tasklet_schedule(&priv->irq_tasklet);
-
-unplugged:
- spin_unlock_irqrestore(&priv->lock, flags);
- return IRQ_HANDLED;
-
-none:
- /* re-enable interrupts here since we don't have anything to service. */
- /* only Re-enable if disabled by irq */
- if (test_bit(STATUS_INT_ENABLED, &priv->status))
- iwl_legacy_enable_interrupts(priv);
- spin_unlock_irqrestore(&priv->lock, flags);
- return IRQ_NONE;
-}
-EXPORT_SYMBOL(iwl_legacy_isr);
-
-/*
- * iwl_legacy_tx_cmd_protection: Set rts/cts. 3945 and 4965 only share this
- * function.
- */
-void iwl_legacy_tx_cmd_protection(struct iwl_priv *priv,
- struct ieee80211_tx_info *info,
- __le16 fc, __le32 *tx_flags)
-{
- if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
- *tx_flags |= TX_CMD_FLG_RTS_MSK;
- *tx_flags &= ~TX_CMD_FLG_CTS_MSK;
- *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
-
- if (!ieee80211_is_mgmt(fc))
- return;
-
- switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
- case cpu_to_le16(IEEE80211_STYPE_AUTH):
- case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
- case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
- case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
- *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
- *tx_flags |= TX_CMD_FLG_CTS_MSK;
- break;
- }
- } else if (info->control.rates[0].flags &
- IEEE80211_TX_RC_USE_CTS_PROTECT) {
- *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
- *tx_flags |= TX_CMD_FLG_CTS_MSK;
- *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
- }
-}
-EXPORT_SYMBOL(iwl_legacy_tx_cmd_protection);
diff --git a/drivers/net/wireless/iwlegacy/iwl-core.h b/drivers/net/wireless/iwlegacy/iwl-core.h
deleted file mode 100644
index d1271fe07d4..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-core.h
+++ /dev/null
@@ -1,636 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *****************************************************************************/
-
-#ifndef __iwl_legacy_core_h__
-#define __iwl_legacy_core_h__
-
-/************************
- * forward declarations *
- ************************/
-struct iwl_host_cmd;
-struct iwl_cmd;
-
-
-#define IWLWIFI_VERSION "in-tree:"
-#define DRV_COPYRIGHT "Copyright(c) 2003-2011 Intel Corporation"
-#define DRV_AUTHOR "<ilw@linux.intel.com>"
-
-#define IWL_PCI_DEVICE(dev, subdev, cfg) \
- .vendor = PCI_VENDOR_ID_INTEL, .device = (dev), \
- .subvendor = PCI_ANY_ID, .subdevice = (subdev), \
- .driver_data = (kernel_ulong_t)&(cfg)
-
-#define TIME_UNIT 1024
-
-#define IWL_SKU_G 0x1
-#define IWL_SKU_A 0x2
-#define IWL_SKU_N 0x8
-
-#define IWL_CMD(x) case x: return #x
-
-struct iwl_hcmd_ops {
- int (*rxon_assoc)(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
- int (*commit_rxon)(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
- void (*set_rxon_chain)(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx);
-};
-
-struct iwl_hcmd_utils_ops {
- u16 (*get_hcmd_size)(u8 cmd_id, u16 len);
- u16 (*build_addsta_hcmd)(const struct iwl_legacy_addsta_cmd *cmd,
- u8 *data);
- int (*request_scan)(struct iwl_priv *priv, struct ieee80211_vif *vif);
- void (*post_scan)(struct iwl_priv *priv);
-};
-
-struct iwl_apm_ops {
- int (*init)(struct iwl_priv *priv);
- void (*config)(struct iwl_priv *priv);
-};
-
-struct iwl_debugfs_ops {
- ssize_t (*rx_stats_read)(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos);
- ssize_t (*tx_stats_read)(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos);
- ssize_t (*general_stats_read)(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos);
-};
-
-struct iwl_temp_ops {
- void (*temperature)(struct iwl_priv *priv);
-};
-
-struct iwl_lib_ops {
- /* set hw dependent parameters */
- int (*set_hw_params)(struct iwl_priv *priv);
- /* Handling TX */
- void (*txq_update_byte_cnt_tbl)(struct iwl_priv *priv,
- struct iwl_tx_queue *txq,
- u16 byte_cnt);
- int (*txq_attach_buf_to_tfd)(struct iwl_priv *priv,
- struct iwl_tx_queue *txq,
- dma_addr_t addr,
- u16 len, u8 reset, u8 pad);
- void (*txq_free_tfd)(struct iwl_priv *priv,
- struct iwl_tx_queue *txq);
- int (*txq_init)(struct iwl_priv *priv,
- struct iwl_tx_queue *txq);
- /* setup Rx handler */
- void (*rx_handler_setup)(struct iwl_priv *priv);
- /* alive notification after init uCode load */
- void (*init_alive_start)(struct iwl_priv *priv);
- /* check validity of rtc data address */
- int (*is_valid_rtc_data_addr)(u32 addr);
- /* 1st ucode load */
- int (*load_ucode)(struct iwl_priv *priv);
-
- void (*dump_nic_error_log)(struct iwl_priv *priv);
- int (*dump_fh)(struct iwl_priv *priv, char **buf, bool display);
- int (*set_channel_switch)(struct iwl_priv *priv,
- struct ieee80211_channel_switch *ch_switch);
- /* power management */
- struct iwl_apm_ops apm_ops;
-
- /* power */
- int (*send_tx_power) (struct iwl_priv *priv);
- void (*update_chain_flags)(struct iwl_priv *priv);
-
- /* eeprom operations (as defined in iwl-eeprom.h) */
- struct iwl_eeprom_ops eeprom_ops;
-
- /* temperature */
- struct iwl_temp_ops temp_ops;
-
- struct iwl_debugfs_ops debugfs_ops;
-
-};
-
-struct iwl_led_ops {
- int (*cmd)(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd);
-};
-
-struct iwl_legacy_ops {
- void (*post_associate)(struct iwl_priv *priv);
- void (*config_ap)(struct iwl_priv *priv);
- /* station management */
- int (*update_bcast_stations)(struct iwl_priv *priv);
- int (*manage_ibss_station)(struct iwl_priv *priv,
- struct ieee80211_vif *vif, bool add);
-};
-
-struct iwl_ops {
- const struct iwl_lib_ops *lib;
- const struct iwl_hcmd_ops *hcmd;
- const struct iwl_hcmd_utils_ops *utils;
- const struct iwl_led_ops *led;
- const struct iwl_nic_ops *nic;
- const struct iwl_legacy_ops *legacy;
- const struct ieee80211_ops *ieee80211_ops;
-};
-
-struct iwl_mod_params {
- int sw_crypto; /* def: 0 = using hardware encryption */
- int disable_hw_scan; /* def: 0 = use h/w scan */
- int num_of_queues; /* def: HW dependent */
- int disable_11n; /* def: 0 = 11n capabilities enabled */
- int amsdu_size_8K; /* def: 1 = enable 8K amsdu size */
- int antenna; /* def: 0 = both antennas (use diversity) */
- int restart_fw; /* def: 1 = restart firmware */
-};
-
-/*
- * @led_compensation: compensate on the led on/off time per HW according
- * to the deviation to achieve the desired led frequency.
- * The detail algorithm is described in iwl-led.c
- * @chain_noise_num_beacons: number of beacons used to compute chain noise
- * @wd_timeout: TX queues watchdog timeout
- * @temperature_kelvin: temperature report by uCode in kelvin
- * @ucode_tracing: support ucode continuous tracing
- * @sensitivity_calib_by_driver: driver has the capability to perform
- * sensitivity calibration operation
- * @chain_noise_calib_by_driver: driver has the capability to perform
- * chain noise calibration operation
- */
-struct iwl_base_params {
- int eeprom_size;
- int num_of_queues; /* def: HW dependent */
- int num_of_ampdu_queues;/* def: HW dependent */
- /* for iwl_legacy_apm_init() */
- u32 pll_cfg_val;
- bool set_l0s;
- bool use_bsm;
-
- u16 led_compensation;
- int chain_noise_num_beacons;
- unsigned int wd_timeout;
- bool temperature_kelvin;
- const bool ucode_tracing;
- const bool sensitivity_calib_by_driver;
- const bool chain_noise_calib_by_driver;
-};
-
-/**
- * struct iwl_cfg
- * @fw_name_pre: Firmware filename prefix. The api version and extension
- * (.ucode) will be added to filename before loading from disk. The
- * filename is constructed as fw_name_pre<api>.ucode.
- * @ucode_api_max: Highest version of uCode API supported by driver.
- * @ucode_api_min: Lowest version of uCode API supported by driver.
- * @scan_antennas: available antenna for scan operation
- * @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off)
- *
- * We enable the driver to be backward compatible wrt API version. The
- * driver specifies which APIs it supports (with @ucode_api_max being the
- * highest and @ucode_api_min the lowest). Firmware will only be loaded if
- * it has a supported API version. The firmware's API version will be
- * stored in @iwl_priv, enabling the driver to make runtime changes based
- * on firmware version used.
- *
- * For example,
- * if (IWL_UCODE_API(priv->ucode_ver) >= 2) {
- * Driver interacts with Firmware API version >= 2.
- * } else {
- * Driver interacts with Firmware API version 1.
- * }
- *
- * The ideal usage of this infrastructure is to treat a new ucode API
- * release as a new hardware revision. That is, through utilizing the
- * iwl_hcmd_utils_ops etc. we accommodate different command structures
- * and flows between hardware versions as well as their API
- * versions.
- *
- */
-struct iwl_cfg {
- /* params specific to an individual device within a device family */
- const char *name;
- const char *fw_name_pre;
- const unsigned int ucode_api_max;
- const unsigned int ucode_api_min;
- u8 valid_tx_ant;
- u8 valid_rx_ant;
- unsigned int sku;
- u16 eeprom_ver;
- u16 eeprom_calib_ver;
- const struct iwl_ops *ops;
- /* module based parameters which can be set from modprobe cmd */
- const struct iwl_mod_params *mod_params;
- /* params not likely to change within a device family */
- struct iwl_base_params *base_params;
- /* params likely to change within a device family */
- u8 scan_rx_antennas[IEEE80211_NUM_BANDS];
- enum iwl_led_mode led_mode;
-};
-
-/***************************
- * L i b *
- ***************************/
-
-struct ieee80211_hw *iwl_legacy_alloc_all(struct iwl_cfg *cfg);
-int iwl_legacy_mac_conf_tx(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif, u16 queue,
- const struct ieee80211_tx_queue_params *params);
-int iwl_legacy_mac_tx_last_beacon(struct ieee80211_hw *hw);
-void iwl_legacy_set_rxon_hwcrypto(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- int hw_decrypt);
-int iwl_legacy_check_rxon_cmd(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx);
-int iwl_legacy_full_rxon_required(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx);
-int iwl_legacy_set_rxon_channel(struct iwl_priv *priv,
- struct ieee80211_channel *ch,
- struct iwl_rxon_context *ctx);
-void iwl_legacy_set_flags_for_band(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- enum ieee80211_band band,
- struct ieee80211_vif *vif);
-u8 iwl_legacy_get_single_channel_number(struct iwl_priv *priv,
- enum ieee80211_band band);
-void iwl_legacy_set_rxon_ht(struct iwl_priv *priv,
- struct iwl_ht_config *ht_conf);
-bool iwl_legacy_is_ht40_tx_allowed(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- struct ieee80211_sta_ht_cap *ht_cap);
-void iwl_legacy_connection_init_rx_config(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx);
-void iwl_legacy_set_rate(struct iwl_priv *priv);
-int iwl_legacy_set_decrypted_flag(struct iwl_priv *priv,
- struct ieee80211_hdr *hdr,
- u32 decrypt_res,
- struct ieee80211_rx_status *stats);
-void iwl_legacy_irq_handle_error(struct iwl_priv *priv);
-int iwl_legacy_mac_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif);
-void iwl_legacy_mac_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif);
-int iwl_legacy_mac_change_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- enum nl80211_iftype newtype, bool newp2p);
-int iwl_legacy_alloc_txq_mem(struct iwl_priv *priv);
-void iwl_legacy_txq_mem(struct iwl_priv *priv);
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
-int iwl_legacy_alloc_traffic_mem(struct iwl_priv *priv);
-void iwl_legacy_free_traffic_mem(struct iwl_priv *priv);
-void iwl_legacy_reset_traffic_log(struct iwl_priv *priv);
-void iwl_legacy_dbg_log_tx_data_frame(struct iwl_priv *priv,
- u16 length, struct ieee80211_hdr *header);
-void iwl_legacy_dbg_log_rx_data_frame(struct iwl_priv *priv,
- u16 length, struct ieee80211_hdr *header);
-const char *iwl_legacy_get_mgmt_string(int cmd);
-const char *iwl_legacy_get_ctrl_string(int cmd);
-void iwl_legacy_clear_traffic_stats(struct iwl_priv *priv);
-void iwl_legacy_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc,
- u16 len);
-#else
-static inline int iwl_legacy_alloc_traffic_mem(struct iwl_priv *priv)
-{
- return 0;
-}
-static inline void iwl_legacy_free_traffic_mem(struct iwl_priv *priv)
-{
-}
-static inline void iwl_legacy_reset_traffic_log(struct iwl_priv *priv)
-{
-}
-static inline void iwl_legacy_dbg_log_tx_data_frame(struct iwl_priv *priv,
- u16 length, struct ieee80211_hdr *header)
-{
-}
-static inline void iwl_legacy_dbg_log_rx_data_frame(struct iwl_priv *priv,
- u16 length, struct ieee80211_hdr *header)
-{
-}
-static inline void iwl_legacy_update_stats(struct iwl_priv *priv, bool is_tx,
- __le16 fc, u16 len)
-{
-}
-#endif
-/*****************************************************
- * RX handlers.
- * **************************************************/
-void iwl_legacy_rx_pm_sleep_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb);
-void iwl_legacy_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb);
-void iwl_legacy_rx_reply_error(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb);
-
-/*****************************************************
-* RX
-******************************************************/
-void iwl_legacy_cmd_queue_unmap(struct iwl_priv *priv);
-void iwl_legacy_cmd_queue_free(struct iwl_priv *priv);
-int iwl_legacy_rx_queue_alloc(struct iwl_priv *priv);
-void iwl_legacy_rx_queue_update_write_ptr(struct iwl_priv *priv,
- struct iwl_rx_queue *q);
-int iwl_legacy_rx_queue_space(const struct iwl_rx_queue *q);
-void iwl_legacy_tx_cmd_complete(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb);
-/* Handlers */
-void iwl_legacy_rx_spectrum_measure_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb);
-void iwl_legacy_recover_from_statistics(struct iwl_priv *priv,
- struct iwl_rx_packet *pkt);
-void iwl_legacy_chswitch_done(struct iwl_priv *priv, bool is_success);
-void iwl_legacy_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
-
-/* TX helpers */
-
-/*****************************************************
-* TX
-******************************************************/
-void iwl_legacy_txq_update_write_ptr(struct iwl_priv *priv,
- struct iwl_tx_queue *txq);
-int iwl_legacy_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
- int slots_num, u32 txq_id);
-void iwl_legacy_tx_queue_reset(struct iwl_priv *priv,
- struct iwl_tx_queue *txq,
- int slots_num, u32 txq_id);
-void iwl_legacy_tx_queue_unmap(struct iwl_priv *priv, int txq_id);
-void iwl_legacy_tx_queue_free(struct iwl_priv *priv, int txq_id);
-void iwl_legacy_setup_watchdog(struct iwl_priv *priv);
-/*****************************************************
- * TX power
- ****************************************************/
-int iwl_legacy_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force);
-
-/*******************************************************************************
- * Rate
- ******************************************************************************/
-
-u8 iwl_legacy_get_lowest_plcp(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx);
-
-/*******************************************************************************
- * Scanning
- ******************************************************************************/
-void iwl_legacy_init_scan_params(struct iwl_priv *priv);
-int iwl_legacy_scan_cancel(struct iwl_priv *priv);
-int iwl_legacy_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms);
-void iwl_legacy_force_scan_end(struct iwl_priv *priv);
-int iwl_legacy_mac_hw_scan(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct cfg80211_scan_request *req);
-void iwl_legacy_internal_short_hw_scan(struct iwl_priv *priv);
-int iwl_legacy_force_reset(struct iwl_priv *priv, bool external);
-u16 iwl_legacy_fill_probe_req(struct iwl_priv *priv,
- struct ieee80211_mgmt *frame,
- const u8 *ta, const u8 *ie, int ie_len, int left);
-void iwl_legacy_setup_rx_scan_handlers(struct iwl_priv *priv);
-u16 iwl_legacy_get_active_dwell_time(struct iwl_priv *priv,
- enum ieee80211_band band,
- u8 n_probes);
-u16 iwl_legacy_get_passive_dwell_time(struct iwl_priv *priv,
- enum ieee80211_band band,
- struct ieee80211_vif *vif);
-void iwl_legacy_setup_scan_deferred_work(struct iwl_priv *priv);
-void iwl_legacy_cancel_scan_deferred_work(struct iwl_priv *priv);
-
-/* For faster active scanning, scan will move to the next channel if fewer than
- * PLCP_QUIET_THRESH packets are heard on this channel within
- * ACTIVE_QUIET_TIME after sending probe request. This shortens the dwell
- * time if it's a quiet channel (nothing responded to our probe, and there's
- * no other traffic).
- * Disable "quiet" feature by setting PLCP_QUIET_THRESH to 0. */
-#define IWL_ACTIVE_QUIET_TIME cpu_to_le16(10) /* msec */
-#define IWL_PLCP_QUIET_THRESH cpu_to_le16(1) /* packets */
-
-#define IWL_SCAN_CHECK_WATCHDOG (HZ * 7)
-
-/*****************************************************
- * S e n d i n g H o s t C o m m a n d s *
- *****************************************************/
-
-const char *iwl_legacy_get_cmd_string(u8 cmd);
-int __must_check iwl_legacy_send_cmd_sync(struct iwl_priv *priv,
- struct iwl_host_cmd *cmd);
-int iwl_legacy_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd);
-int __must_check iwl_legacy_send_cmd_pdu(struct iwl_priv *priv, u8 id,
- u16 len, const void *data);
-int iwl_legacy_send_cmd_pdu_async(struct iwl_priv *priv, u8 id, u16 len,
- const void *data,
- void (*callback)(struct iwl_priv *priv,
- struct iwl_device_cmd *cmd,
- struct iwl_rx_packet *pkt));
-
-int iwl_legacy_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd);
-
-
-/*****************************************************
- * PCI *
- *****************************************************/
-
-static inline u16 iwl_legacy_pcie_link_ctl(struct iwl_priv *priv)
-{
- int pos;
- u16 pci_lnk_ctl;
- pos = pci_pcie_cap(priv->pci_dev);
- pci_read_config_word(priv->pci_dev, pos + PCI_EXP_LNKCTL, &pci_lnk_ctl);
- return pci_lnk_ctl;
-}
-
-void iwl_legacy_bg_watchdog(unsigned long data);
-u32 iwl_legacy_usecs_to_beacons(struct iwl_priv *priv,
- u32 usec, u32 beacon_interval);
-__le32 iwl_legacy_add_beacon_time(struct iwl_priv *priv, u32 base,
- u32 addon, u32 beacon_interval);
-
-#ifdef CONFIG_PM
-int iwl_legacy_pci_suspend(struct device *device);
-int iwl_legacy_pci_resume(struct device *device);
-extern const struct dev_pm_ops iwl_legacy_pm_ops;
-
-#define IWL_LEGACY_PM_OPS (&iwl_legacy_pm_ops)
-
-#else /* !CONFIG_PM */
-
-#define IWL_LEGACY_PM_OPS NULL
-
-#endif /* !CONFIG_PM */
-
-/*****************************************************
-* Error Handling Debugging
-******************************************************/
-void iwl4965_dump_nic_error_log(struct iwl_priv *priv);
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-void iwl_legacy_print_rx_config_cmd(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx);
-#else
-static inline void iwl_legacy_print_rx_config_cmd(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx)
-{
-}
-#endif
-
-void iwl_legacy_clear_isr_stats(struct iwl_priv *priv);
-
-/*****************************************************
-* GEOS
-******************************************************/
-int iwl_legacy_init_geos(struct iwl_priv *priv);
-void iwl_legacy_free_geos(struct iwl_priv *priv);
-
-/*************** DRIVER STATUS FUNCTIONS *****/
-
-#define STATUS_HCMD_ACTIVE 0 /* host command in progress */
-/* 1 is unused (used to be STATUS_HCMD_SYNC_ACTIVE) */
-#define STATUS_INT_ENABLED 2
-#define STATUS_RF_KILL_HW 3
-#define STATUS_CT_KILL 4
-#define STATUS_INIT 5
-#define STATUS_ALIVE 6
-#define STATUS_READY 7
-#define STATUS_TEMPERATURE 8
-#define STATUS_GEO_CONFIGURED 9
-#define STATUS_EXIT_PENDING 10
-#define STATUS_STATISTICS 12
-#define STATUS_SCANNING 13
-#define STATUS_SCAN_ABORTING 14
-#define STATUS_SCAN_HW 15
-#define STATUS_POWER_PMI 16
-#define STATUS_FW_ERROR 17
-#define STATUS_CHANNEL_SWITCH_PENDING 18
-
-static inline int iwl_legacy_is_ready(struct iwl_priv *priv)
-{
- /* The adapter is 'ready' if READY and GEO_CONFIGURED bits are
- * set but EXIT_PENDING is not */
- return test_bit(STATUS_READY, &priv->status) &&
- test_bit(STATUS_GEO_CONFIGURED, &priv->status) &&
- !test_bit(STATUS_EXIT_PENDING, &priv->status);
-}
-
-static inline int iwl_legacy_is_alive(struct iwl_priv *priv)
-{
- return test_bit(STATUS_ALIVE, &priv->status);
-}
-
-static inline int iwl_legacy_is_init(struct iwl_priv *priv)
-{
- return test_bit(STATUS_INIT, &priv->status);
-}
-
-static inline int iwl_legacy_is_rfkill_hw(struct iwl_priv *priv)
-{
- return test_bit(STATUS_RF_KILL_HW, &priv->status);
-}
-
-static inline int iwl_legacy_is_rfkill(struct iwl_priv *priv)
-{
- return iwl_legacy_is_rfkill_hw(priv);
-}
-
-static inline int iwl_legacy_is_ctkill(struct iwl_priv *priv)
-{
- return test_bit(STATUS_CT_KILL, &priv->status);
-}
-
-static inline int iwl_legacy_is_ready_rf(struct iwl_priv *priv)
-{
-
- if (iwl_legacy_is_rfkill(priv))
- return 0;
-
- return iwl_legacy_is_ready(priv);
-}
-
-extern void iwl_legacy_send_bt_config(struct iwl_priv *priv);
-extern int iwl_legacy_send_statistics_request(struct iwl_priv *priv,
- u8 flags, bool clear);
-void iwl_legacy_apm_stop(struct iwl_priv *priv);
-int iwl_legacy_apm_init(struct iwl_priv *priv);
-
-int iwl_legacy_send_rxon_timing(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx);
-static inline int iwl_legacy_send_rxon_assoc(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx)
-{
- return priv->cfg->ops->hcmd->rxon_assoc(priv, ctx);
-}
-static inline int iwl_legacy_commit_rxon(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx)
-{
- return priv->cfg->ops->hcmd->commit_rxon(priv, ctx);
-}
-static inline const struct ieee80211_supported_band *iwl_get_hw_mode(
- struct iwl_priv *priv, enum ieee80211_band band)
-{
- return priv->hw->wiphy->bands[band];
-}
-
-/* mac80211 handlers */
-int iwl_legacy_mac_config(struct ieee80211_hw *hw, u32 changed);
-void iwl_legacy_mac_reset_tsf(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif);
-void iwl_legacy_mac_bss_info_changed(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *bss_conf,
- u32 changes);
-void iwl_legacy_tx_cmd_protection(struct iwl_priv *priv,
- struct ieee80211_tx_info *info,
- __le16 fc, __le32 *tx_flags);
-
-irqreturn_t iwl_legacy_isr(int irq, void *data);
-
-#endif /* __iwl_legacy_core_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-debug.h b/drivers/net/wireless/iwlegacy/iwl-debug.h
deleted file mode 100644
index ae13112701b..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-debug.h
+++ /dev/null
@@ -1,198 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * Portions of this file are derived from the ipw3945 project.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#ifndef __iwl_legacy_debug_h__
-#define __iwl_legacy_debug_h__
-
-struct iwl_priv;
-extern u32 iwlegacy_debug_level;
-
-#define IWL_ERR(p, f, a...) dev_err(&((p)->pci_dev->dev), f, ## a)
-#define IWL_WARN(p, f, a...) dev_warn(&((p)->pci_dev->dev), f, ## a)
-#define IWL_INFO(p, f, a...) dev_info(&((p)->pci_dev->dev), f, ## a)
-#define IWL_CRIT(p, f, a...) dev_crit(&((p)->pci_dev->dev), f, ## a)
-
-#define iwl_print_hex_error(priv, p, len) \
-do { \
- print_hex_dump(KERN_ERR, "iwl data: ", \
- DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \
-} while (0)
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-#define IWL_DEBUG(__priv, level, fmt, args...) \
-do { \
- if (iwl_legacy_get_debug_level(__priv) & (level)) \
- dev_printk(KERN_ERR, &(__priv->hw->wiphy->dev), \
- "%c %s " fmt, in_interrupt() ? 'I' : 'U', \
- __func__ , ## args); \
-} while (0)
-
-#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) \
-do { \
- if ((iwl_legacy_get_debug_level(__priv) & (level)) && net_ratelimit()) \
- dev_printk(KERN_ERR, &(__priv->hw->wiphy->dev), \
- "%c %s " fmt, in_interrupt() ? 'I' : 'U', \
- __func__ , ## args); \
-} while (0)
-
-#define iwl_print_hex_dump(priv, level, p, len) \
-do { \
- if (iwl_legacy_get_debug_level(priv) & level) \
- print_hex_dump(KERN_DEBUG, "iwl data: ", \
- DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \
-} while (0)
-
-#else
-#define IWL_DEBUG(__priv, level, fmt, args...)
-#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
-static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
- const void *p, u32 len)
-{}
-#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
-int iwl_legacy_dbgfs_register(struct iwl_priv *priv, const char *name);
-void iwl_legacy_dbgfs_unregister(struct iwl_priv *priv);
-#else
-static inline int
-iwl_legacy_dbgfs_register(struct iwl_priv *priv, const char *name)
-{
- return 0;
-}
-static inline void iwl_legacy_dbgfs_unregister(struct iwl_priv *priv)
-{
-}
-#endif /* CONFIG_IWLWIFI_LEGACY_DEBUGFS */
-
-/*
- * To use the debug system:
- *
- * If you are defining a new debug classification, simply add it to the #define
- * list here in the form of
- *
- * #define IWL_DL_xxxx VALUE
- *
- * where xxxx should be the name of the classification (for example, WEP).
- *
- * You then need to either add a IWL_xxxx_DEBUG() macro definition for your
- * classification, or use IWL_DEBUG(IWL_DL_xxxx, ...) whenever you want
- * to send output to that classification.
- *
- * The active debug levels can be accessed via files
- *
- * /sys/module/iwl4965/parameters/debug{50}
- * /sys/module/iwl3945/parameters/debug
- * /sys/class/net/wlan0/device/debug_level
- *
- * when CONFIG_IWLWIFI_LEGACY_DEBUG=y.
- */
-
-/* 0x0000000F - 0x00000001 */
-#define IWL_DL_INFO (1 << 0)
-#define IWL_DL_MAC80211 (1 << 1)
-#define IWL_DL_HCMD (1 << 2)
-#define IWL_DL_STATE (1 << 3)
-/* 0x000000F0 - 0x00000010 */
-#define IWL_DL_MACDUMP (1 << 4)
-#define IWL_DL_HCMD_DUMP (1 << 5)
-#define IWL_DL_EEPROM (1 << 6)
-#define IWL_DL_RADIO (1 << 7)
-/* 0x00000F00 - 0x00000100 */
-#define IWL_DL_POWER (1 << 8)
-#define IWL_DL_TEMP (1 << 9)
-#define IWL_DL_NOTIF (1 << 10)
-#define IWL_DL_SCAN (1 << 11)
-/* 0x0000F000 - 0x00001000 */
-#define IWL_DL_ASSOC (1 << 12)
-#define IWL_DL_DROP (1 << 13)
-#define IWL_DL_TXPOWER (1 << 14)
-#define IWL_DL_AP (1 << 15)
-/* 0x000F0000 - 0x00010000 */
-#define IWL_DL_FW (1 << 16)
-#define IWL_DL_RF_KILL (1 << 17)
-#define IWL_DL_FW_ERRORS (1 << 18)
-#define IWL_DL_LED (1 << 19)
-/* 0x00F00000 - 0x00100000 */
-#define IWL_DL_RATE (1 << 20)
-#define IWL_DL_CALIB (1 << 21)
-#define IWL_DL_WEP (1 << 22)
-#define IWL_DL_TX (1 << 23)
-/* 0x0F000000 - 0x01000000 */
-#define IWL_DL_RX (1 << 24)
-#define IWL_DL_ISR (1 << 25)
-#define IWL_DL_HT (1 << 26)
-#define IWL_DL_IO (1 << 27)
-/* 0xF0000000 - 0x10000000 */
-#define IWL_DL_11H (1 << 28)
-#define IWL_DL_STATS (1 << 29)
-#define IWL_DL_TX_REPLY (1 << 30)
-#define IWL_DL_QOS (1 << 31)
-
-#define IWL_DEBUG_INFO(p, f, a...) IWL_DEBUG(p, IWL_DL_INFO, f, ## a)
-#define IWL_DEBUG_MAC80211(p, f, a...) IWL_DEBUG(p, IWL_DL_MAC80211, f, ## a)
-#define IWL_DEBUG_MACDUMP(p, f, a...) IWL_DEBUG(p, IWL_DL_MACDUMP, f, ## a)
-#define IWL_DEBUG_TEMP(p, f, a...) IWL_DEBUG(p, IWL_DL_TEMP, f, ## a)
-#define IWL_DEBUG_SCAN(p, f, a...) IWL_DEBUG(p, IWL_DL_SCAN, f, ## a)
-#define IWL_DEBUG_RX(p, f, a...) IWL_DEBUG(p, IWL_DL_RX, f, ## a)
-#define IWL_DEBUG_TX(p, f, a...) IWL_DEBUG(p, IWL_DL_TX, f, ## a)
-#define IWL_DEBUG_ISR(p, f, a...) IWL_DEBUG(p, IWL_DL_ISR, f, ## a)
-#define IWL_DEBUG_LED(p, f, a...) IWL_DEBUG(p, IWL_DL_LED, f, ## a)
-#define IWL_DEBUG_WEP(p, f, a...) IWL_DEBUG(p, IWL_DL_WEP, f, ## a)
-#define IWL_DEBUG_HC(p, f, a...) IWL_DEBUG(p, IWL_DL_HCMD, f, ## a)
-#define IWL_DEBUG_HC_DUMP(p, f, a...) IWL_DEBUG(p, IWL_DL_HCMD_DUMP, f, ## a)
-#define IWL_DEBUG_EEPROM(p, f, a...) IWL_DEBUG(p, IWL_DL_EEPROM, f, ## a)
-#define IWL_DEBUG_CALIB(p, f, a...) IWL_DEBUG(p, IWL_DL_CALIB, f, ## a)
-#define IWL_DEBUG_FW(p, f, a...) IWL_DEBUG(p, IWL_DL_FW, f, ## a)
-#define IWL_DEBUG_RF_KILL(p, f, a...) IWL_DEBUG(p, IWL_DL_RF_KILL, f, ## a)
-#define IWL_DEBUG_DROP(p, f, a...) IWL_DEBUG(p, IWL_DL_DROP, f, ## a)
-#define IWL_DEBUG_DROP_LIMIT(p, f, a...) \
- IWL_DEBUG_LIMIT(p, IWL_DL_DROP, f, ## a)
-#define IWL_DEBUG_AP(p, f, a...) IWL_DEBUG(p, IWL_DL_AP, f, ## a)
-#define IWL_DEBUG_TXPOWER(p, f, a...) IWL_DEBUG(p, IWL_DL_TXPOWER, f, ## a)
-#define IWL_DEBUG_IO(p, f, a...) IWL_DEBUG(p, IWL_DL_IO, f, ## a)
-#define IWL_DEBUG_RATE(p, f, a...) IWL_DEBUG(p, IWL_DL_RATE, f, ## a)
-#define IWL_DEBUG_RATE_LIMIT(p, f, a...) \
- IWL_DEBUG_LIMIT(p, IWL_DL_RATE, f, ## a)
-#define IWL_DEBUG_NOTIF(p, f, a...) IWL_DEBUG(p, IWL_DL_NOTIF, f, ## a)
-#define IWL_DEBUG_ASSOC(p, f, a...) \
- IWL_DEBUG(p, IWL_DL_ASSOC | IWL_DL_INFO, f, ## a)
-#define IWL_DEBUG_ASSOC_LIMIT(p, f, a...) \
- IWL_DEBUG_LIMIT(p, IWL_DL_ASSOC | IWL_DL_INFO, f, ## a)
-#define IWL_DEBUG_HT(p, f, a...) IWL_DEBUG(p, IWL_DL_HT, f, ## a)
-#define IWL_DEBUG_STATS(p, f, a...) IWL_DEBUG(p, IWL_DL_STATS, f, ## a)
-#define IWL_DEBUG_STATS_LIMIT(p, f, a...) \
- IWL_DEBUG_LIMIT(p, IWL_DL_STATS, f, ## a)
-#define IWL_DEBUG_TX_REPLY(p, f, a...) IWL_DEBUG(p, IWL_DL_TX_REPLY, f, ## a)
-#define IWL_DEBUG_TX_REPLY_LIMIT(p, f, a...) \
- IWL_DEBUG_LIMIT(p, IWL_DL_TX_REPLY, f, ## a)
-#define IWL_DEBUG_QOS(p, f, a...) IWL_DEBUG(p, IWL_DL_QOS, f, ## a)
-#define IWL_DEBUG_RADIO(p, f, a...) IWL_DEBUG(p, IWL_DL_RADIO, f, ## a)
-#define IWL_DEBUG_POWER(p, f, a...) IWL_DEBUG(p, IWL_DL_POWER, f, ## a)
-#define IWL_DEBUG_11H(p, f, a...) IWL_DEBUG(p, IWL_DL_11H, f, ## a)
-
-#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-debugfs.c b/drivers/net/wireless/iwlegacy/iwl-debugfs.c
deleted file mode 100644
index 996996a7165..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-debugfs.c
+++ /dev/null
@@ -1,1313 +0,0 @@
-/******************************************************************************
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *****************************************************************************/
-#include <linux/ieee80211.h>
-#include <net/mac80211.h>
-
-
-#include "iwl-dev.h"
-#include "iwl-debug.h"
-#include "iwl-core.h"
-#include "iwl-io.h"
-
-/* create and remove of files */
-#define DEBUGFS_ADD_FILE(name, parent, mode) do { \
- if (!debugfs_create_file(#name, mode, parent, priv, \
- &iwl_legacy_dbgfs_##name##_ops)) \
- goto err; \
-} while (0)
-
-#define DEBUGFS_ADD_BOOL(name, parent, ptr) do { \
- struct dentry *__tmp; \
- __tmp = debugfs_create_bool(#name, S_IWUSR | S_IRUSR, \
- parent, ptr); \
- if (IS_ERR(__tmp) || !__tmp) \
- goto err; \
-} while (0)
-
-#define DEBUGFS_ADD_X32(name, parent, ptr) do { \
- struct dentry *__tmp; \
- __tmp = debugfs_create_x32(#name, S_IWUSR | S_IRUSR, \
- parent, ptr); \
- if (IS_ERR(__tmp) || !__tmp) \
- goto err; \
-} while (0)
-
-/* file operation */
-#define DEBUGFS_READ_FUNC(name) \
-static ssize_t iwl_legacy_dbgfs_##name##_read(struct file *file, \
- char __user *user_buf, \
- size_t count, loff_t *ppos);
-
-#define DEBUGFS_WRITE_FUNC(name) \
-static ssize_t iwl_legacy_dbgfs_##name##_write(struct file *file, \
- const char __user *user_buf, \
- size_t count, loff_t *ppos);
-
-
-static int
-iwl_legacy_dbgfs_open_file_generic(struct inode *inode, struct file *file)
-{
- file->private_data = inode->i_private;
- return 0;
-}
-
-#define DEBUGFS_READ_FILE_OPS(name) \
- DEBUGFS_READ_FUNC(name); \
-static const struct file_operations iwl_legacy_dbgfs_##name##_ops = { \
- .read = iwl_legacy_dbgfs_##name##_read, \
- .open = iwl_legacy_dbgfs_open_file_generic, \
- .llseek = generic_file_llseek, \
-};
-
-#define DEBUGFS_WRITE_FILE_OPS(name) \
- DEBUGFS_WRITE_FUNC(name); \
-static const struct file_operations iwl_legacy_dbgfs_##name##_ops = { \
- .write = iwl_legacy_dbgfs_##name##_write, \
- .open = iwl_legacy_dbgfs_open_file_generic, \
- .llseek = generic_file_llseek, \
-};
-
-#define DEBUGFS_READ_WRITE_FILE_OPS(name) \
- DEBUGFS_READ_FUNC(name); \
- DEBUGFS_WRITE_FUNC(name); \
-static const struct file_operations iwl_legacy_dbgfs_##name##_ops = { \
- .write = iwl_legacy_dbgfs_##name##_write, \
- .read = iwl_legacy_dbgfs_##name##_read, \
- .open = iwl_legacy_dbgfs_open_file_generic, \
- .llseek = generic_file_llseek, \
-};
-
-static ssize_t iwl_legacy_dbgfs_tx_statistics_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos) {
-
- struct iwl_priv *priv = file->private_data;
- char *buf;
- int pos = 0;
-
- int cnt;
- ssize_t ret;
- const size_t bufsz = 100 +
- sizeof(char) * 50 * (MANAGEMENT_MAX + CONTROL_MAX);
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
- pos += scnprintf(buf + pos, bufsz - pos, "Management:\n");
- for (cnt = 0; cnt < MANAGEMENT_MAX; cnt++) {
- pos += scnprintf(buf + pos, bufsz - pos,
- "\t%25s\t\t: %u\n",
- iwl_legacy_get_mgmt_string(cnt),
- priv->tx_stats.mgmt[cnt]);
- }
- pos += scnprintf(buf + pos, bufsz - pos, "Control\n");
- for (cnt = 0; cnt < CONTROL_MAX; cnt++) {
- pos += scnprintf(buf + pos, bufsz - pos,
- "\t%25s\t\t: %u\n",
- iwl_legacy_get_ctrl_string(cnt),
- priv->tx_stats.ctrl[cnt]);
- }
- pos += scnprintf(buf + pos, bufsz - pos, "Data:\n");
- pos += scnprintf(buf + pos, bufsz - pos, "\tcnt: %u\n",
- priv->tx_stats.data_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, "\tbytes: %llu\n",
- priv->tx_stats.data_bytes);
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
-}
-
-static ssize_t
-iwl_legacy_dbgfs_clear_traffic_statistics_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- u32 clear_flag;
- char buf[8];
- int buf_size;
-
- memset(buf, 0, sizeof(buf));
- buf_size = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
- if (sscanf(buf, "%x", &clear_flag) != 1)
- return -EFAULT;
- iwl_legacy_clear_traffic_stats(priv);
-
- return count;
-}
-
-static ssize_t iwl_legacy_dbgfs_rx_statistics_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos) {
-
- struct iwl_priv *priv = file->private_data;
- char *buf;
- int pos = 0;
- int cnt;
- ssize_t ret;
- const size_t bufsz = 100 +
- sizeof(char) * 50 * (MANAGEMENT_MAX + CONTROL_MAX);
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- pos += scnprintf(buf + pos, bufsz - pos, "Management:\n");
- for (cnt = 0; cnt < MANAGEMENT_MAX; cnt++) {
- pos += scnprintf(buf + pos, bufsz - pos,
- "\t%25s\t\t: %u\n",
- iwl_legacy_get_mgmt_string(cnt),
- priv->rx_stats.mgmt[cnt]);
- }
- pos += scnprintf(buf + pos, bufsz - pos, "Control:\n");
- for (cnt = 0; cnt < CONTROL_MAX; cnt++) {
- pos += scnprintf(buf + pos, bufsz - pos,
- "\t%25s\t\t: %u\n",
- iwl_legacy_get_ctrl_string(cnt),
- priv->rx_stats.ctrl[cnt]);
- }
- pos += scnprintf(buf + pos, bufsz - pos, "Data:\n");
- pos += scnprintf(buf + pos, bufsz - pos, "\tcnt: %u\n",
- priv->rx_stats.data_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, "\tbytes: %llu\n",
- priv->rx_stats.data_bytes);
-
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
-}
-
-#define BYTE1_MASK 0x000000ff;
-#define BYTE2_MASK 0x0000ffff;
-#define BYTE3_MASK 0x00ffffff;
-static ssize_t iwl_legacy_dbgfs_sram_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- u32 val;
- char *buf;
- ssize_t ret;
- int i;
- int pos = 0;
- struct iwl_priv *priv = file->private_data;
- size_t bufsz;
-
- /* default is to dump the entire data segment */
- if (!priv->dbgfs_sram_offset && !priv->dbgfs_sram_len) {
- priv->dbgfs_sram_offset = 0x800000;
- if (priv->ucode_type == UCODE_INIT)
- priv->dbgfs_sram_len = priv->ucode_init_data.len;
- else
- priv->dbgfs_sram_len = priv->ucode_data.len;
- }
- bufsz = 30 + priv->dbgfs_sram_len * sizeof(char) * 10;
- buf = kmalloc(bufsz, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
- pos += scnprintf(buf + pos, bufsz - pos, "sram_len: 0x%x\n",
- priv->dbgfs_sram_len);
- pos += scnprintf(buf + pos, bufsz - pos, "sram_offset: 0x%x\n",
- priv->dbgfs_sram_offset);
- for (i = priv->dbgfs_sram_len; i > 0; i -= 4) {
- val = iwl_legacy_read_targ_mem(priv, priv->dbgfs_sram_offset + \
- priv->dbgfs_sram_len - i);
- if (i < 4) {
- switch (i) {
- case 1:
- val &= BYTE1_MASK;
- break;
- case 2:
- val &= BYTE2_MASK;
- break;
- case 3:
- val &= BYTE3_MASK;
- break;
- }
- }
- if (!(i % 16))
- pos += scnprintf(buf + pos, bufsz - pos, "\n");
- pos += scnprintf(buf + pos, bufsz - pos, "0x%08x ", val);
- }
- pos += scnprintf(buf + pos, bufsz - pos, "\n");
-
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
-}
-
-static ssize_t iwl_legacy_dbgfs_sram_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- char buf[64];
- int buf_size;
- u32 offset, len;
-
- memset(buf, 0, sizeof(buf));
- buf_size = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
-
- if (sscanf(buf, "%x,%x", &offset, &len) == 2) {
- priv->dbgfs_sram_offset = offset;
- priv->dbgfs_sram_len = len;
- } else {
- priv->dbgfs_sram_offset = 0;
- priv->dbgfs_sram_len = 0;
- }
-
- return count;
-}
-
-static ssize_t
-iwl_legacy_dbgfs_stations_read(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- struct iwl_station_entry *station;
- int max_sta = priv->hw_params.max_stations;
- char *buf;
- int i, j, pos = 0;
- ssize_t ret;
- /* Add 30 for initial string */
- const size_t bufsz = 30 + sizeof(char) * 500 * (priv->num_stations);
-
- buf = kmalloc(bufsz, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- pos += scnprintf(buf + pos, bufsz - pos, "num of stations: %d\n\n",
- priv->num_stations);
-
- for (i = 0; i < max_sta; i++) {
- station = &priv->stations[i];
- if (!station->used)
- continue;
- pos += scnprintf(buf + pos, bufsz - pos,
- "station %d - addr: %pM, flags: %#x\n",
- i, station->sta.sta.addr,
- station->sta.station_flags_msk);
- pos += scnprintf(buf + pos, bufsz - pos,
- "TID\tseq_num\ttxq_id\tframes\ttfds\t");
- pos += scnprintf(buf + pos, bufsz - pos,
- "start_idx\tbitmap\t\t\trate_n_flags\n");
-
- for (j = 0; j < MAX_TID_COUNT; j++) {
- pos += scnprintf(buf + pos, bufsz - pos,
- "%d:\t%#x\t%#x\t%u\t%u\t%u\t\t%#.16llx\t%#x",
- j, station->tid[j].seq_number,
- station->tid[j].agg.txq_id,
- station->tid[j].agg.frame_count,
- station->tid[j].tfds_in_queue,
- station->tid[j].agg.start_idx,
- station->tid[j].agg.bitmap,
- station->tid[j].agg.rate_n_flags);
-
- if (station->tid[j].agg.wait_for_ba)
- pos += scnprintf(buf + pos, bufsz - pos,
- " - waitforba");
- pos += scnprintf(buf + pos, bufsz - pos, "\n");
- }
-
- pos += scnprintf(buf + pos, bufsz - pos, "\n");
- }
-
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
-}
-
-static ssize_t iwl_legacy_dbgfs_nvm_read(struct file *file,
- char __user *user_buf,
- size_t count,
- loff_t *ppos)
-{
- ssize_t ret;
- struct iwl_priv *priv = file->private_data;
- int pos = 0, ofs = 0, buf_size = 0;
- const u8 *ptr;
- char *buf;
- u16 eeprom_ver;
- size_t eeprom_len = priv->cfg->base_params->eeprom_size;
- buf_size = 4 * eeprom_len + 256;
-
- if (eeprom_len % 16) {
- IWL_ERR(priv, "NVM size is not multiple of 16.\n");
- return -ENODATA;
- }
-
- ptr = priv->eeprom;
- if (!ptr) {
- IWL_ERR(priv, "Invalid EEPROM memory\n");
- return -ENOMEM;
- }
-
- /* 4 characters for byte 0xYY */
- buf = kzalloc(buf_size, GFP_KERNEL);
- if (!buf) {
- IWL_ERR(priv, "Can not allocate Buffer\n");
- return -ENOMEM;
- }
- eeprom_ver = iwl_legacy_eeprom_query16(priv, EEPROM_VERSION);
- pos += scnprintf(buf + pos, buf_size - pos, "EEPROM "
- "version: 0x%x\n", eeprom_ver);
- for (ofs = 0 ; ofs < eeprom_len ; ofs += 16) {
- pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs);
- hex_dump_to_buffer(ptr + ofs, 16 , 16, 2, buf + pos,
- buf_size - pos, 0);
- pos += strlen(buf + pos);
- if (buf_size - pos > 0)
- buf[pos++] = '\n';
- }
-
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
-}
-
-static ssize_t
-iwl_legacy_dbgfs_channels_read(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- struct ieee80211_channel *channels = NULL;
- const struct ieee80211_supported_band *supp_band = NULL;
- int pos = 0, i, bufsz = PAGE_SIZE;
- char *buf;
- ssize_t ret;
-
- if (!test_bit(STATUS_GEO_CONFIGURED, &priv->status))
- return -EAGAIN;
-
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf) {
- IWL_ERR(priv, "Can not allocate Buffer\n");
- return -ENOMEM;
- }
-
- supp_band = iwl_get_hw_mode(priv, IEEE80211_BAND_2GHZ);
- if (supp_band) {
- channels = supp_band->channels;
-
- pos += scnprintf(buf + pos, bufsz - pos,
- "Displaying %d channels in 2.4GHz band 802.11bg):\n",
- supp_band->n_channels);
-
- for (i = 0; i < supp_band->n_channels; i++)
- pos += scnprintf(buf + pos, bufsz - pos,
- "%d: %ddBm: BSS%s%s, %s.\n",
- channels[i].hw_value,
- channels[i].max_power,
- channels[i].flags & IEEE80211_CHAN_RADAR ?
- " (IEEE 802.11h required)" : "",
- ((channels[i].flags & IEEE80211_CHAN_NO_IBSS)
- || (channels[i].flags &
- IEEE80211_CHAN_RADAR)) ? "" :
- ", IBSS",
- channels[i].flags &
- IEEE80211_CHAN_PASSIVE_SCAN ?
- "passive only" : "active/passive");
- }
- supp_band = iwl_get_hw_mode(priv, IEEE80211_BAND_5GHZ);
- if (supp_band) {
- channels = supp_band->channels;
-
- pos += scnprintf(buf + pos, bufsz - pos,
- "Displaying %d channels in 5.2GHz band (802.11a)\n",
- supp_band->n_channels);
-
- for (i = 0; i < supp_band->n_channels; i++)
- pos += scnprintf(buf + pos, bufsz - pos,
- "%d: %ddBm: BSS%s%s, %s.\n",
- channels[i].hw_value,
- channels[i].max_power,
- channels[i].flags & IEEE80211_CHAN_RADAR ?
- " (IEEE 802.11h required)" : "",
- ((channels[i].flags & IEEE80211_CHAN_NO_IBSS)
- || (channels[i].flags &
- IEEE80211_CHAN_RADAR)) ? "" :
- ", IBSS",
- channels[i].flags &
- IEEE80211_CHAN_PASSIVE_SCAN ?
- "passive only" : "active/passive");
- }
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
-}
-
-static ssize_t iwl_legacy_dbgfs_status_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos) {
-
- struct iwl_priv *priv = file->private_data;
- char buf[512];
- int pos = 0;
- const size_t bufsz = sizeof(buf);
-
- pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
- test_bit(STATUS_HCMD_ACTIVE, &priv->status));
- pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INT_ENABLED:\t %d\n",
- test_bit(STATUS_INT_ENABLED, &priv->status));
- pos += scnprintf(buf + pos, bufsz - pos, "STATUS_RF_KILL_HW:\t %d\n",
- test_bit(STATUS_RF_KILL_HW, &priv->status));
- pos += scnprintf(buf + pos, bufsz - pos, "STATUS_CT_KILL:\t\t %d\n",
- test_bit(STATUS_CT_KILL, &priv->status));
- pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INIT:\t\t %d\n",
- test_bit(STATUS_INIT, &priv->status));
- pos += scnprintf(buf + pos, bufsz - pos, "STATUS_ALIVE:\t\t %d\n",
- test_bit(STATUS_ALIVE, &priv->status));
- pos += scnprintf(buf + pos, bufsz - pos, "STATUS_READY:\t\t %d\n",
- test_bit(STATUS_READY, &priv->status));
- pos += scnprintf(buf + pos, bufsz - pos, "STATUS_TEMPERATURE:\t %d\n",
- test_bit(STATUS_TEMPERATURE, &priv->status));
- pos += scnprintf(buf + pos, bufsz - pos, "STATUS_GEO_CONFIGURED:\t %d\n",
- test_bit(STATUS_GEO_CONFIGURED, &priv->status));
- pos += scnprintf(buf + pos, bufsz - pos, "STATUS_EXIT_PENDING:\t %d\n",
- test_bit(STATUS_EXIT_PENDING, &priv->status));
- pos += scnprintf(buf + pos, bufsz - pos, "STATUS_STATISTICS:\t %d\n",
- test_bit(STATUS_STATISTICS, &priv->status));
- pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCANNING:\t %d\n",
- test_bit(STATUS_SCANNING, &priv->status));
- pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCAN_ABORTING:\t %d\n",
- test_bit(STATUS_SCAN_ABORTING, &priv->status));
- pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCAN_HW:\t\t %d\n",
- test_bit(STATUS_SCAN_HW, &priv->status));
- pos += scnprintf(buf + pos, bufsz - pos, "STATUS_POWER_PMI:\t %d\n",
- test_bit(STATUS_POWER_PMI, &priv->status));
- pos += scnprintf(buf + pos, bufsz - pos, "STATUS_FW_ERROR:\t %d\n",
- test_bit(STATUS_FW_ERROR, &priv->status));
- return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-}
-
-static ssize_t iwl_legacy_dbgfs_interrupt_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos) {
-
- struct iwl_priv *priv = file->private_data;
- int pos = 0;
- int cnt = 0;
- char *buf;
- int bufsz = 24 * 64; /* 24 items * 64 char per item */
- ssize_t ret;
-
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf) {
- IWL_ERR(priv, "Can not allocate Buffer\n");
- return -ENOMEM;
- }
-
- pos += scnprintf(buf + pos, bufsz - pos,
- "Interrupt Statistics Report:\n");
-
- pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
- priv->isr_stats.hw);
- pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
- priv->isr_stats.sw);
- if (priv->isr_stats.sw || priv->isr_stats.hw) {
- pos += scnprintf(buf + pos, bufsz - pos,
- "\tLast Restarting Code: 0x%X\n",
- priv->isr_stats.err_code);
- }
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
- priv->isr_stats.sch);
- pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
- priv->isr_stats.alive);
-#endif
- pos += scnprintf(buf + pos, bufsz - pos,
- "HW RF KILL switch toggled:\t %u\n",
- priv->isr_stats.rfkill);
-
- pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
- priv->isr_stats.ctkill);
-
- pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
- priv->isr_stats.wakeup);
-
- pos += scnprintf(buf + pos, bufsz - pos,
- "Rx command responses:\t\t %u\n",
- priv->isr_stats.rx);
- for (cnt = 0; cnt < REPLY_MAX; cnt++) {
- if (priv->isr_stats.rx_handlers[cnt] > 0)
- pos += scnprintf(buf + pos, bufsz - pos,
- "\tRx handler[%36s]:\t\t %u\n",
- iwl_legacy_get_cmd_string(cnt),
- priv->isr_stats.rx_handlers[cnt]);
- }
-
- pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
- priv->isr_stats.tx);
-
- pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
- priv->isr_stats.unhandled);
-
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
-}
-
-static ssize_t iwl_legacy_dbgfs_interrupt_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- char buf[8];
- int buf_size;
- u32 reset_flag;
-
- memset(buf, 0, sizeof(buf));
- buf_size = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
- if (sscanf(buf, "%x", &reset_flag) != 1)
- return -EFAULT;
- if (reset_flag == 0)
- iwl_legacy_clear_isr_stats(priv);
-
- return count;
-}
-
-static ssize_t
-iwl_legacy_dbgfs_qos_read(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- struct iwl_rxon_context *ctx;
- int pos = 0, i;
- char buf[256 * NUM_IWL_RXON_CTX];
- const size_t bufsz = sizeof(buf);
-
- for_each_context(priv, ctx) {
- pos += scnprintf(buf + pos, bufsz - pos, "context %d:\n",
- ctx->ctxid);
- for (i = 0; i < AC_NUM; i++) {
- pos += scnprintf(buf + pos, bufsz - pos,
- "\tcw_min\tcw_max\taifsn\ttxop\n");
- pos += scnprintf(buf + pos, bufsz - pos,
- "AC[%d]\t%u\t%u\t%u\t%u\n", i,
- ctx->qos_data.def_qos_parm.ac[i].cw_min,
- ctx->qos_data.def_qos_parm.ac[i].cw_max,
- ctx->qos_data.def_qos_parm.ac[i].aifsn,
- ctx->qos_data.def_qos_parm.ac[i].edca_txop);
- }
- pos += scnprintf(buf + pos, bufsz - pos, "\n");
- }
- return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-}
-
-static ssize_t iwl_legacy_dbgfs_disable_ht40_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- char buf[8];
- int buf_size;
- int ht40;
-
- memset(buf, 0, sizeof(buf));
- buf_size = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
- if (sscanf(buf, "%d", &ht40) != 1)
- return -EFAULT;
- if (!iwl_legacy_is_any_associated(priv))
- priv->disable_ht40 = ht40 ? true : false;
- else {
- IWL_ERR(priv, "Sta associated with AP - "
- "Change to 40MHz channel support is not allowed\n");
- return -EINVAL;
- }
-
- return count;
-}
-
-static ssize_t iwl_legacy_dbgfs_disable_ht40_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- char buf[100];
- int pos = 0;
- const size_t bufsz = sizeof(buf);
-
- pos += scnprintf(buf + pos, bufsz - pos,
- "11n 40MHz Mode: %s\n",
- priv->disable_ht40 ? "Disabled" : "Enabled");
- return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-}
-
-DEBUGFS_READ_WRITE_FILE_OPS(sram);
-DEBUGFS_READ_FILE_OPS(nvm);
-DEBUGFS_READ_FILE_OPS(stations);
-DEBUGFS_READ_FILE_OPS(channels);
-DEBUGFS_READ_FILE_OPS(status);
-DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
-DEBUGFS_READ_FILE_OPS(qos);
-DEBUGFS_READ_WRITE_FILE_OPS(disable_ht40);
-
-static ssize_t iwl_legacy_dbgfs_traffic_log_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- int pos = 0, ofs = 0;
- int cnt = 0, entry;
- struct iwl_tx_queue *txq;
- struct iwl_queue *q;
- struct iwl_rx_queue *rxq = &priv->rxq;
- char *buf;
- int bufsz = ((IWL_TRAFFIC_ENTRIES * IWL_TRAFFIC_ENTRY_SIZE * 64) * 2) +
- (priv->cfg->base_params->num_of_queues * 32 * 8) + 400;
- const u8 *ptr;
- ssize_t ret;
-
- if (!priv->txq) {
- IWL_ERR(priv, "txq not ready\n");
- return -EAGAIN;
- }
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf) {
- IWL_ERR(priv, "Can not allocate buffer\n");
- return -ENOMEM;
- }
- pos += scnprintf(buf + pos, bufsz - pos, "Tx Queue\n");
- for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
- txq = &priv->txq[cnt];
- q = &txq->q;
- pos += scnprintf(buf + pos, bufsz - pos,
- "q[%d]: read_ptr: %u, write_ptr: %u\n",
- cnt, q->read_ptr, q->write_ptr);
- }
- if (priv->tx_traffic && (iwlegacy_debug_level & IWL_DL_TX)) {
- ptr = priv->tx_traffic;
- pos += scnprintf(buf + pos, bufsz - pos,
- "Tx Traffic idx: %u\n", priv->tx_traffic_idx);
- for (cnt = 0, ofs = 0; cnt < IWL_TRAFFIC_ENTRIES; cnt++) {
- for (entry = 0; entry < IWL_TRAFFIC_ENTRY_SIZE / 16;
- entry++, ofs += 16) {
- pos += scnprintf(buf + pos, bufsz - pos,
- "0x%.4x ", ofs);
- hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
- buf + pos, bufsz - pos, 0);
- pos += strlen(buf + pos);
- if (bufsz - pos > 0)
- buf[pos++] = '\n';
- }
- }
- }
-
- pos += scnprintf(buf + pos, bufsz - pos, "Rx Queue\n");
- pos += scnprintf(buf + pos, bufsz - pos,
- "read: %u, write: %u\n",
- rxq->read, rxq->write);
-
- if (priv->rx_traffic && (iwlegacy_debug_level & IWL_DL_RX)) {
- ptr = priv->rx_traffic;
- pos += scnprintf(buf + pos, bufsz - pos,
- "Rx Traffic idx: %u\n", priv->rx_traffic_idx);
- for (cnt = 0, ofs = 0; cnt < IWL_TRAFFIC_ENTRIES; cnt++) {
- for (entry = 0; entry < IWL_TRAFFIC_ENTRY_SIZE / 16;
- entry++, ofs += 16) {
- pos += scnprintf(buf + pos, bufsz - pos,
- "0x%.4x ", ofs);
- hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
- buf + pos, bufsz - pos, 0);
- pos += strlen(buf + pos);
- if (bufsz - pos > 0)
- buf[pos++] = '\n';
- }
- }
- }
-
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
-}
-
-static ssize_t iwl_legacy_dbgfs_traffic_log_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- char buf[8];
- int buf_size;
- int traffic_log;
-
- memset(buf, 0, sizeof(buf));
- buf_size = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
- if (sscanf(buf, "%d", &traffic_log) != 1)
- return -EFAULT;
- if (traffic_log == 0)
- iwl_legacy_reset_traffic_log(priv);
-
- return count;
-}
-
-static ssize_t iwl_legacy_dbgfs_tx_queue_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos) {
-
- struct iwl_priv *priv = file->private_data;
- struct iwl_tx_queue *txq;
- struct iwl_queue *q;
- char *buf;
- int pos = 0;
- int cnt;
- int ret;
- const size_t bufsz = sizeof(char) * 64 *
- priv->cfg->base_params->num_of_queues;
-
- if (!priv->txq) {
- IWL_ERR(priv, "txq not ready\n");
- return -EAGAIN;
- }
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
- txq = &priv->txq[cnt];
- q = &txq->q;
- pos += scnprintf(buf + pos, bufsz - pos,
- "hwq %.2d: read=%u write=%u stop=%d"
- " swq_id=%#.2x (ac %d/hwq %d)\n",
- cnt, q->read_ptr, q->write_ptr,
- !!test_bit(cnt, priv->queue_stopped),
- txq->swq_id, txq->swq_id & 3,
- (txq->swq_id >> 2) & 0x1f);
- if (cnt >= 4)
- continue;
- /* for the ACs, display the stop count too */
- pos += scnprintf(buf + pos, bufsz - pos,
- " stop-count: %d\n",
- atomic_read(&priv->queue_stop_count[cnt]));
- }
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
-}
-
-static ssize_t iwl_legacy_dbgfs_rx_queue_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos) {
-
- struct iwl_priv *priv = file->private_data;
- struct iwl_rx_queue *rxq = &priv->rxq;
- char buf[256];
- int pos = 0;
- const size_t bufsz = sizeof(buf);
-
- pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n",
- rxq->read);
- pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n",
- rxq->write);
- pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n",
- rxq->free_count);
- if (rxq->rb_stts) {
- pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n",
- le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF);
- } else {
- pos += scnprintf(buf + pos, bufsz - pos,
- "closed_rb_num: Not Allocated\n");
- }
- return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-}
-
-static ssize_t iwl_legacy_dbgfs_ucode_rx_stats_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- return priv->cfg->ops->lib->debugfs_ops.rx_stats_read(file,
- user_buf, count, ppos);
-}
-
-static ssize_t iwl_legacy_dbgfs_ucode_tx_stats_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- return priv->cfg->ops->lib->debugfs_ops.tx_stats_read(file,
- user_buf, count, ppos);
-}
-
-static ssize_t iwl_legacy_dbgfs_ucode_general_stats_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- return priv->cfg->ops->lib->debugfs_ops.general_stats_read(file,
- user_buf, count, ppos);
-}
-
-static ssize_t iwl_legacy_dbgfs_sensitivity_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos) {
-
- struct iwl_priv *priv = file->private_data;
- int pos = 0;
- int cnt = 0;
- char *buf;
- int bufsz = sizeof(struct iwl_sensitivity_data) * 4 + 100;
- ssize_t ret;
- struct iwl_sensitivity_data *data;
-
- data = &priv->sensitivity_data;
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf) {
- IWL_ERR(priv, "Can not allocate Buffer\n");
- return -ENOMEM;
- }
-
- pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm:\t\t\t %u\n",
- data->auto_corr_ofdm);
- pos += scnprintf(buf + pos, bufsz - pos,
- "auto_corr_ofdm_mrc:\t\t %u\n",
- data->auto_corr_ofdm_mrc);
- pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm_x1:\t\t %u\n",
- data->auto_corr_ofdm_x1);
- pos += scnprintf(buf + pos, bufsz - pos,
- "auto_corr_ofdm_mrc_x1:\t\t %u\n",
- data->auto_corr_ofdm_mrc_x1);
- pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_cck:\t\t\t %u\n",
- data->auto_corr_cck);
- pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_cck_mrc:\t\t %u\n",
- data->auto_corr_cck_mrc);
- pos += scnprintf(buf + pos, bufsz - pos,
- "last_bad_plcp_cnt_ofdm:\t\t %u\n",
- data->last_bad_plcp_cnt_ofdm);
- pos += scnprintf(buf + pos, bufsz - pos, "last_fa_cnt_ofdm:\t\t %u\n",
- data->last_fa_cnt_ofdm);
- pos += scnprintf(buf + pos, bufsz - pos,
- "last_bad_plcp_cnt_cck:\t\t %u\n",
- data->last_bad_plcp_cnt_cck);
- pos += scnprintf(buf + pos, bufsz - pos, "last_fa_cnt_cck:\t\t %u\n",
- data->last_fa_cnt_cck);
- pos += scnprintf(buf + pos, bufsz - pos, "nrg_curr_state:\t\t\t %u\n",
- data->nrg_curr_state);
- pos += scnprintf(buf + pos, bufsz - pos, "nrg_prev_state:\t\t\t %u\n",
- data->nrg_prev_state);
- pos += scnprintf(buf + pos, bufsz - pos, "nrg_value:\t\t\t");
- for (cnt = 0; cnt < 10; cnt++) {
- pos += scnprintf(buf + pos, bufsz - pos, " %u",
- data->nrg_value[cnt]);
- }
- pos += scnprintf(buf + pos, bufsz - pos, "\n");
- pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_rssi:\t\t");
- for (cnt = 0; cnt < NRG_NUM_PREV_STAT_L; cnt++) {
- pos += scnprintf(buf + pos, bufsz - pos, " %u",
- data->nrg_silence_rssi[cnt]);
- }
- pos += scnprintf(buf + pos, bufsz - pos, "\n");
- pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_ref:\t\t %u\n",
- data->nrg_silence_ref);
- pos += scnprintf(buf + pos, bufsz - pos, "nrg_energy_idx:\t\t\t %u\n",
- data->nrg_energy_idx);
- pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_idx:\t\t %u\n",
- data->nrg_silence_idx);
- pos += scnprintf(buf + pos, bufsz - pos, "nrg_th_cck:\t\t\t %u\n",
- data->nrg_th_cck);
- pos += scnprintf(buf + pos, bufsz - pos,
- "nrg_auto_corr_silence_diff:\t %u\n",
- data->nrg_auto_corr_silence_diff);
- pos += scnprintf(buf + pos, bufsz - pos, "num_in_cck_no_fa:\t\t %u\n",
- data->num_in_cck_no_fa);
- pos += scnprintf(buf + pos, bufsz - pos, "nrg_th_ofdm:\t\t\t %u\n",
- data->nrg_th_ofdm);
-
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
-}
-
-
-static ssize_t iwl_legacy_dbgfs_chain_noise_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos) {
-
- struct iwl_priv *priv = file->private_data;
- int pos = 0;
- int cnt = 0;
- char *buf;
- int bufsz = sizeof(struct iwl_chain_noise_data) * 4 + 100;
- ssize_t ret;
- struct iwl_chain_noise_data *data;
-
- data = &priv->chain_noise_data;
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf) {
- IWL_ERR(priv, "Can not allocate Buffer\n");
- return -ENOMEM;
- }
-
- pos += scnprintf(buf + pos, bufsz - pos, "active_chains:\t\t\t %u\n",
- data->active_chains);
- pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_a:\t\t\t %u\n",
- data->chain_noise_a);
- pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_b:\t\t\t %u\n",
- data->chain_noise_b);
- pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_c:\t\t\t %u\n",
- data->chain_noise_c);
- pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_a:\t\t\t %u\n",
- data->chain_signal_a);
- pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_b:\t\t\t %u\n",
- data->chain_signal_b);
- pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_c:\t\t\t %u\n",
- data->chain_signal_c);
- pos += scnprintf(buf + pos, bufsz - pos, "beacon_count:\t\t\t %u\n",
- data->beacon_count);
-
- pos += scnprintf(buf + pos, bufsz - pos, "disconn_array:\t\t\t");
- for (cnt = 0; cnt < NUM_RX_CHAINS; cnt++) {
- pos += scnprintf(buf + pos, bufsz - pos, " %u",
- data->disconn_array[cnt]);
- }
- pos += scnprintf(buf + pos, bufsz - pos, "\n");
- pos += scnprintf(buf + pos, bufsz - pos, "delta_gain_code:\t\t");
- for (cnt = 0; cnt < NUM_RX_CHAINS; cnt++) {
- pos += scnprintf(buf + pos, bufsz - pos, " %u",
- data->delta_gain_code[cnt]);
- }
- pos += scnprintf(buf + pos, bufsz - pos, "\n");
- pos += scnprintf(buf + pos, bufsz - pos, "radio_write:\t\t\t %u\n",
- data->radio_write);
- pos += scnprintf(buf + pos, bufsz - pos, "state:\t\t\t\t %u\n",
- data->state);
-
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
-}
-
-static ssize_t iwl_legacy_dbgfs_power_save_status_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- char buf[60];
- int pos = 0;
- const size_t bufsz = sizeof(buf);
- u32 pwrsave_status;
-
- pwrsave_status = iwl_read32(priv, CSR_GP_CNTRL) &
- CSR_GP_REG_POWER_SAVE_STATUS_MSK;
-
- pos += scnprintf(buf + pos, bufsz - pos, "Power Save Status: ");
- pos += scnprintf(buf + pos, bufsz - pos, "%s\n",
- (pwrsave_status == CSR_GP_REG_NO_POWER_SAVE) ? "none" :
- (pwrsave_status == CSR_GP_REG_MAC_POWER_SAVE) ? "MAC" :
- (pwrsave_status == CSR_GP_REG_PHY_POWER_SAVE) ? "PHY" :
- "error");
-
- return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-}
-
-static ssize_t iwl_legacy_dbgfs_clear_ucode_statistics_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- char buf[8];
- int buf_size;
- int clear;
-
- memset(buf, 0, sizeof(buf));
- buf_size = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
- if (sscanf(buf, "%d", &clear) != 1)
- return -EFAULT;
-
- /* make request to uCode to retrieve statistics information */
- mutex_lock(&priv->mutex);
- iwl_legacy_send_statistics_request(priv, CMD_SYNC, true);
- mutex_unlock(&priv->mutex);
-
- return count;
-}
-
-static ssize_t iwl_legacy_dbgfs_rxon_flags_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos) {
-
- struct iwl_priv *priv = file->private_data;
- int len = 0;
- char buf[20];
-
- len = sprintf(buf, "0x%04X\n",
- le32_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.flags));
- return simple_read_from_buffer(user_buf, count, ppos, buf, len);
-}
-
-static ssize_t iwl_legacy_dbgfs_rxon_filter_flags_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos) {
-
- struct iwl_priv *priv = file->private_data;
- int len = 0;
- char buf[20];
-
- len = sprintf(buf, "0x%04X\n",
- le32_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.filter_flags));
- return simple_read_from_buffer(user_buf, count, ppos, buf, len);
-}
-
-static ssize_t iwl_legacy_dbgfs_fh_reg_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- char *buf;
- int pos = 0;
- ssize_t ret = -EFAULT;
-
- if (priv->cfg->ops->lib->dump_fh) {
- ret = pos = priv->cfg->ops->lib->dump_fh(priv, &buf, true);
- if (buf) {
- ret = simple_read_from_buffer(user_buf,
- count, ppos, buf, pos);
- kfree(buf);
- }
- }
-
- return ret;
-}
-
-static ssize_t iwl_legacy_dbgfs_missed_beacon_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos) {
-
- struct iwl_priv *priv = file->private_data;
- int pos = 0;
- char buf[12];
- const size_t bufsz = sizeof(buf);
-
- pos += scnprintf(buf + pos, bufsz - pos, "%d\n",
- priv->missed_beacon_threshold);
-
- return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-}
-
-static ssize_t iwl_legacy_dbgfs_missed_beacon_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- char buf[8];
- int buf_size;
- int missed;
-
- memset(buf, 0, sizeof(buf));
- buf_size = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
- if (sscanf(buf, "%d", &missed) != 1)
- return -EINVAL;
-
- if (missed < IWL_MISSED_BEACON_THRESHOLD_MIN ||
- missed > IWL_MISSED_BEACON_THRESHOLD_MAX)
- priv->missed_beacon_threshold =
- IWL_MISSED_BEACON_THRESHOLD_DEF;
- else
- priv->missed_beacon_threshold = missed;
-
- return count;
-}
-
-static ssize_t iwl_legacy_dbgfs_force_reset_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos) {
-
- struct iwl_priv *priv = file->private_data;
- int pos = 0;
- char buf[300];
- const size_t bufsz = sizeof(buf);
- struct iwl_force_reset *force_reset;
-
- force_reset = &priv->force_reset;
-
- pos += scnprintf(buf + pos, bufsz - pos,
- "\tnumber of reset request: %d\n",
- force_reset->reset_request_count);
- pos += scnprintf(buf + pos, bufsz - pos,
- "\tnumber of reset request success: %d\n",
- force_reset->reset_success_count);
- pos += scnprintf(buf + pos, bufsz - pos,
- "\tnumber of reset request reject: %d\n",
- force_reset->reset_reject_count);
- pos += scnprintf(buf + pos, bufsz - pos,
- "\treset duration: %lu\n",
- force_reset->reset_duration);
-
- return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-}
-
-static ssize_t iwl_legacy_dbgfs_force_reset_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos) {
-
- int ret;
- struct iwl_priv *priv = file->private_data;
-
- ret = iwl_legacy_force_reset(priv, true);
-
- return ret ? ret : count;
-}
-
-static ssize_t iwl_legacy_dbgfs_wd_timeout_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos) {
-
- struct iwl_priv *priv = file->private_data;
- char buf[8];
- int buf_size;
- int timeout;
-
- memset(buf, 0, sizeof(buf));
- buf_size = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
- if (sscanf(buf, "%d", &timeout) != 1)
- return -EINVAL;
- if (timeout < 0 || timeout > IWL_MAX_WD_TIMEOUT)
- timeout = IWL_DEF_WD_TIMEOUT;
-
- priv->cfg->base_params->wd_timeout = timeout;
- iwl_legacy_setup_watchdog(priv);
- return count;
-}
-
-DEBUGFS_READ_FILE_OPS(rx_statistics);
-DEBUGFS_READ_FILE_OPS(tx_statistics);
-DEBUGFS_READ_WRITE_FILE_OPS(traffic_log);
-DEBUGFS_READ_FILE_OPS(rx_queue);
-DEBUGFS_READ_FILE_OPS(tx_queue);
-DEBUGFS_READ_FILE_OPS(ucode_rx_stats);
-DEBUGFS_READ_FILE_OPS(ucode_tx_stats);
-DEBUGFS_READ_FILE_OPS(ucode_general_stats);
-DEBUGFS_READ_FILE_OPS(sensitivity);
-DEBUGFS_READ_FILE_OPS(chain_noise);
-DEBUGFS_READ_FILE_OPS(power_save_status);
-DEBUGFS_WRITE_FILE_OPS(clear_ucode_statistics);
-DEBUGFS_WRITE_FILE_OPS(clear_traffic_statistics);
-DEBUGFS_READ_FILE_OPS(fh_reg);
-DEBUGFS_READ_WRITE_FILE_OPS(missed_beacon);
-DEBUGFS_READ_WRITE_FILE_OPS(force_reset);
-DEBUGFS_READ_FILE_OPS(rxon_flags);
-DEBUGFS_READ_FILE_OPS(rxon_filter_flags);
-DEBUGFS_WRITE_FILE_OPS(wd_timeout);
-
-/*
- * Create the debugfs files and directories
- *
- */
-int iwl_legacy_dbgfs_register(struct iwl_priv *priv, const char *name)
-{
- struct dentry *phyd = priv->hw->wiphy->debugfsdir;
- struct dentry *dir_drv, *dir_data, *dir_rf, *dir_debug;
-
- dir_drv = debugfs_create_dir(name, phyd);
- if (!dir_drv)
- return -ENOMEM;
-
- priv->debugfs_dir = dir_drv;
-
- dir_data = debugfs_create_dir("data", dir_drv);
- if (!dir_data)
- goto err;
- dir_rf = debugfs_create_dir("rf", dir_drv);
- if (!dir_rf)
- goto err;
- dir_debug = debugfs_create_dir("debug", dir_drv);
- if (!dir_debug)
- goto err;
-
- DEBUGFS_ADD_FILE(nvm, dir_data, S_IRUSR);
- DEBUGFS_ADD_FILE(sram, dir_data, S_IWUSR | S_IRUSR);
- DEBUGFS_ADD_FILE(stations, dir_data, S_IRUSR);
- DEBUGFS_ADD_FILE(channels, dir_data, S_IRUSR);
- DEBUGFS_ADD_FILE(status, dir_data, S_IRUSR);
- DEBUGFS_ADD_FILE(interrupt, dir_data, S_IWUSR | S_IRUSR);
- DEBUGFS_ADD_FILE(qos, dir_data, S_IRUSR);
- DEBUGFS_ADD_FILE(disable_ht40, dir_data, S_IWUSR | S_IRUSR);
- DEBUGFS_ADD_FILE(rx_statistics, dir_debug, S_IRUSR);
- DEBUGFS_ADD_FILE(tx_statistics, dir_debug, S_IRUSR);
- DEBUGFS_ADD_FILE(traffic_log, dir_debug, S_IWUSR | S_IRUSR);
- DEBUGFS_ADD_FILE(rx_queue, dir_debug, S_IRUSR);
- DEBUGFS_ADD_FILE(tx_queue, dir_debug, S_IRUSR);
- DEBUGFS_ADD_FILE(power_save_status, dir_debug, S_IRUSR);
- DEBUGFS_ADD_FILE(clear_ucode_statistics, dir_debug, S_IWUSR);
- DEBUGFS_ADD_FILE(clear_traffic_statistics, dir_debug, S_IWUSR);
- DEBUGFS_ADD_FILE(fh_reg, dir_debug, S_IRUSR);
- DEBUGFS_ADD_FILE(missed_beacon, dir_debug, S_IWUSR);
- DEBUGFS_ADD_FILE(force_reset, dir_debug, S_IWUSR | S_IRUSR);
- DEBUGFS_ADD_FILE(ucode_rx_stats, dir_debug, S_IRUSR);
- DEBUGFS_ADD_FILE(ucode_tx_stats, dir_debug, S_IRUSR);
- DEBUGFS_ADD_FILE(ucode_general_stats, dir_debug, S_IRUSR);
-
- if (priv->cfg->base_params->sensitivity_calib_by_driver)
- DEBUGFS_ADD_FILE(sensitivity, dir_debug, S_IRUSR);
- if (priv->cfg->base_params->chain_noise_calib_by_driver)
- DEBUGFS_ADD_FILE(chain_noise, dir_debug, S_IRUSR);
- DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR);
- DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR);
- DEBUGFS_ADD_FILE(wd_timeout, dir_debug, S_IWUSR);
- if (priv->cfg->base_params->sensitivity_calib_by_driver)
- DEBUGFS_ADD_BOOL(disable_sensitivity, dir_rf,
- &priv->disable_sens_cal);
- if (priv->cfg->base_params->chain_noise_calib_by_driver)
- DEBUGFS_ADD_BOOL(disable_chain_noise, dir_rf,
- &priv->disable_chain_noise_cal);
- DEBUGFS_ADD_BOOL(disable_tx_power, dir_rf,
- &priv->disable_tx_power_cal);
- return 0;
-
-err:
- IWL_ERR(priv, "Can't create the debugfs directory\n");
- iwl_legacy_dbgfs_unregister(priv);
- return -ENOMEM;
-}
-EXPORT_SYMBOL(iwl_legacy_dbgfs_register);
-
-/**
- * Remove the debugfs files and directories
- *
- */
-void iwl_legacy_dbgfs_unregister(struct iwl_priv *priv)
-{
- if (!priv->debugfs_dir)
- return;
-
- debugfs_remove_recursive(priv->debugfs_dir);
- priv->debugfs_dir = NULL;
-}
-EXPORT_SYMBOL(iwl_legacy_dbgfs_unregister);
diff --git a/drivers/net/wireless/iwlegacy/iwl-dev.h b/drivers/net/wireless/iwlegacy/iwl-dev.h
deleted file mode 100644
index 9c786edf56f..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-dev.h
+++ /dev/null
@@ -1,1364 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-/*
- * Please use this file (iwl-dev.h) for driver implementation definitions.
- * Please use iwl-commands.h for uCode API definitions.
- * Please use iwl-4965-hw.h for hardware-related definitions.
- */
-
-#ifndef __iwl_legacy_dev_h__
-#define __iwl_legacy_dev_h__
-
-#include <linux/interrupt.h>
-#include <linux/pci.h> /* for struct pci_device_id */
-#include <linux/kernel.h>
-#include <linux/leds.h>
-#include <linux/wait.h>
-#include <net/ieee80211_radiotap.h>
-
-#include "iwl-eeprom.h"
-#include "iwl-csr.h"
-#include "iwl-prph.h"
-#include "iwl-fh.h"
-#include "iwl-debug.h"
-#include "iwl-4965-hw.h"
-#include "iwl-3945-hw.h"
-#include "iwl-led.h"
-#include "iwl-power.h"
-#include "iwl-legacy-rs.h"
-
-struct iwl_tx_queue;
-
-/* CT-KILL constants */
-#define CT_KILL_THRESHOLD_LEGACY 110 /* in Celsius */
-
-/* Default noise level to report when noise measurement is not available.
- * This may be because we're:
- * 1) Not associated (4965, no beacon statistics being sent to driver)
- * 2) Scanning (noise measurement does not apply to associated channel)
- * 3) Receiving CCK (3945 delivers noise info only for OFDM frames)
- * Use default noise value of -127 ... this is below the range of measurable
- * Rx dBm for either 3945 or 4965, so it can indicate "unmeasurable" to user.
- * Also, -127 works better than 0 when averaging frames with/without
- * noise info (e.g. averaging might be done in app); measured dBm values are
- * always negative ... using a negative value as the default keeps all
- * averages within an s8's (used in some apps) range of negative values. */
-#define IWL_NOISE_MEAS_NOT_AVAILABLE (-127)
-
-/*
- * RTS threshold here is total size [2347] minus 4 FCS bytes
- * Per spec:
- * a value of 0 means RTS on all data/management packets
- * a value > max MSDU size means no RTS
- * else RTS for data/management frames where MPDU is larger
- * than RTS value.
- */
-#define DEFAULT_RTS_THRESHOLD 2347U
-#define MIN_RTS_THRESHOLD 0U
-#define MAX_RTS_THRESHOLD 2347U
-#define MAX_MSDU_SIZE 2304U
-#define MAX_MPDU_SIZE 2346U
-#define DEFAULT_BEACON_INTERVAL 100U
-#define DEFAULT_SHORT_RETRY_LIMIT 7U
-#define DEFAULT_LONG_RETRY_LIMIT 4U
-
-struct iwl_rx_mem_buffer {
- dma_addr_t page_dma;
- struct page *page;
- struct list_head list;
-};
-
-#define rxb_addr(r) page_address(r->page)
-
-/* defined below */
-struct iwl_device_cmd;
-
-struct iwl_cmd_meta {
- /* only for SYNC commands, iff the reply skb is wanted */
- struct iwl_host_cmd *source;
- /*
- * only for ASYNC commands
- * (which is somewhat stupid -- look at iwl-sta.c for instance
- * which duplicates a bunch of code because the callback isn't
- * invoked for SYNC commands, if it were and its result passed
- * through it would be simpler...)
- */
- void (*callback)(struct iwl_priv *priv,
- struct iwl_device_cmd *cmd,
- struct iwl_rx_packet *pkt);
-
- /* The CMD_SIZE_HUGE flag bit indicates that the command
- * structure is stored at the end of the shared queue memory. */
- u32 flags;
-
- DEFINE_DMA_UNMAP_ADDR(mapping);
- DEFINE_DMA_UNMAP_LEN(len);
-};
-
-/*
- * Generic queue structure
- *
- * Contains common data for Rx and Tx queues
- */
-struct iwl_queue {
- int n_bd; /* number of BDs in this queue */
- int write_ptr; /* 1-st empty entry (index) host_w*/
- int read_ptr; /* last used entry (index) host_r*/
- /* use for monitoring and recovering the stuck queue */
- dma_addr_t dma_addr; /* physical addr for BD's */
- int n_window; /* safe queue window */
- u32 id;
- int low_mark; /* low watermark, resume queue if free
- * space more than this */
- int high_mark; /* high watermark, stop queue if free
- * space less than this */
-};
-
-/* One for each TFD */
-struct iwl_tx_info {
- struct sk_buff *skb;
- struct iwl_rxon_context *ctx;
-};
-
-/**
- * struct iwl_tx_queue - Tx Queue for DMA
- * @q: generic Rx/Tx queue descriptor
- * @bd: base of circular buffer of TFDs
- * @cmd: array of command/TX buffer pointers
- * @meta: array of meta data for each command/tx buffer
- * @dma_addr_cmd: physical address of cmd/tx buffer array
- * @txb: array of per-TFD driver data
- * @time_stamp: time (in jiffies) of last read_ptr change
- * @need_update: indicates need to update read/write index
- * @sched_retry: indicates queue is high-throughput aggregation (HT AGG) enabled
- *
- * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
- * descriptors) and required locking structures.
- */
-#define TFD_TX_CMD_SLOTS 256
-#define TFD_CMD_SLOTS 32
-
-struct iwl_tx_queue {
- struct iwl_queue q;
- void *tfds;
- struct iwl_device_cmd **cmd;
- struct iwl_cmd_meta *meta;
- struct iwl_tx_info *txb;
- unsigned long time_stamp;
- u8 need_update;
- u8 sched_retry;
- u8 active;
- u8 swq_id;
-};
-
-#define IWL_NUM_SCAN_RATES (2)
-
-struct iwl4965_channel_tgd_info {
- u8 type;
- s8 max_power;
-};
-
-struct iwl4965_channel_tgh_info {
- s64 last_radar_time;
-};
-
-#define IWL4965_MAX_RATE (33)
-
-struct iwl3945_clip_group {
- /* maximum power level to prevent clipping for each rate, derived by
- * us from this band's saturation power in EEPROM */
- const s8 clip_powers[IWL_MAX_RATES];
-};
-
-/* current Tx power values to use, one for each rate for each channel.
- * requested power is limited by:
- * -- regulatory EEPROM limits for this channel
- * -- hardware capabilities (clip-powers)
- * -- spectrum management
- * -- user preference (e.g. iwconfig)
- * when requested power is set, base power index must also be set. */
-struct iwl3945_channel_power_info {
- struct iwl3945_tx_power tpc; /* actual radio and DSP gain settings */
- s8 power_table_index; /* actual (compenst'd) index into gain table */
- s8 base_power_index; /* gain index for power at factory temp. */
- s8 requested_power; /* power (dBm) requested for this chnl/rate */
-};
-
-/* current scan Tx power values to use, one for each scan rate for each
- * channel. */
-struct iwl3945_scan_power_info {
- struct iwl3945_tx_power tpc; /* actual radio and DSP gain settings */
- s8 power_table_index; /* actual (compenst'd) index into gain table */
- s8 requested_power; /* scan pwr (dBm) requested for chnl/rate */
-};
-
-/*
- * One for each channel, holds all channel setup data
- * Some of the fields (e.g. eeprom and flags/max_power_avg) are redundant
- * with one another!
- */
-struct iwl_channel_info {
- struct iwl4965_channel_tgd_info tgd;
- struct iwl4965_channel_tgh_info tgh;
- struct iwl_eeprom_channel eeprom; /* EEPROM regulatory limit */
- struct iwl_eeprom_channel ht40_eeprom; /* EEPROM regulatory limit for
- * HT40 channel */
-
- u8 channel; /* channel number */
- u8 flags; /* flags copied from EEPROM */
- s8 max_power_avg; /* (dBm) regul. eeprom, normal Tx, any rate */
- s8 curr_txpow; /* (dBm) regulatory/spectrum/user (not h/w) limit */
- s8 min_power; /* always 0 */
- s8 scan_power; /* (dBm) regul. eeprom, direct scans, any rate */
-
- u8 group_index; /* 0-4, maps channel to group1/2/3/4/5 */
- u8 band_index; /* 0-4, maps channel to band1/2/3/4/5 */
- enum ieee80211_band band;
-
- /* HT40 channel info */
- s8 ht40_max_power_avg; /* (dBm) regul. eeprom, normal Tx, any rate */
- u8 ht40_flags; /* flags copied from EEPROM */
- u8 ht40_extension_channel; /* HT_IE_EXT_CHANNEL_* */
-
- /* Radio/DSP gain settings for each "normal" data Tx rate.
- * These include, in addition to RF and DSP gain, a few fields for
- * remembering/modifying gain settings (indexes). */
- struct iwl3945_channel_power_info power_info[IWL4965_MAX_RATE];
-
- /* Radio/DSP gain settings for each scan rate, for directed scans. */
- struct iwl3945_scan_power_info scan_pwr_info[IWL_NUM_SCAN_RATES];
-};
-
-#define IWL_TX_FIFO_BK 0 /* shared */
-#define IWL_TX_FIFO_BE 1
-#define IWL_TX_FIFO_VI 2 /* shared */
-#define IWL_TX_FIFO_VO 3
-#define IWL_TX_FIFO_UNUSED -1
-
-/* Minimum number of queues. MAX_NUM is defined in hw specific files.
- * Set the minimum to accommodate the 4 standard TX queues, 1 command
- * queue, 2 (unused) HCCA queues, and 4 HT queues (one for each AC) */
-#define IWL_MIN_NUM_QUEUES 10
-
-#define IWL_DEFAULT_CMD_QUEUE_NUM 4
-
-#define IEEE80211_DATA_LEN 2304
-#define IEEE80211_4ADDR_LEN 30
-#define IEEE80211_HLEN (IEEE80211_4ADDR_LEN)
-#define IEEE80211_FRAME_LEN (IEEE80211_DATA_LEN + IEEE80211_HLEN)
-
-struct iwl_frame {
- union {
- struct ieee80211_hdr frame;
- struct iwl_tx_beacon_cmd beacon;
- u8 raw[IEEE80211_FRAME_LEN];
- u8 cmd[360];
- } u;
- struct list_head list;
-};
-
-#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
-#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
-#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
-
-enum {
- CMD_SYNC = 0,
- CMD_SIZE_NORMAL = 0,
- CMD_NO_SKB = 0,
- CMD_SIZE_HUGE = (1 << 0),
- CMD_ASYNC = (1 << 1),
- CMD_WANT_SKB = (1 << 2),
- CMD_MAPPED = (1 << 3),
-};
-
-#define DEF_CMD_PAYLOAD_SIZE 320
-
-/**
- * struct iwl_device_cmd
- *
- * For allocation of the command and tx queues, this establishes the overall
- * size of the largest command we send to uCode, except for a scan command
- * (which is relatively huge; space is allocated separately).
- */
-struct iwl_device_cmd {
- struct iwl_cmd_header hdr; /* uCode API */
- union {
- u32 flags;
- u8 val8;
- u16 val16;
- u32 val32;
- struct iwl_tx_cmd tx;
- u8 payload[DEF_CMD_PAYLOAD_SIZE];
- } __packed cmd;
-} __packed;
-
-#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
-
-
-struct iwl_host_cmd {
- const void *data;
- unsigned long reply_page;
- void (*callback)(struct iwl_priv *priv,
- struct iwl_device_cmd *cmd,
- struct iwl_rx_packet *pkt);
- u32 flags;
- u16 len;
- u8 id;
-};
-
-#define SUP_RATE_11A_MAX_NUM_CHANNELS 8
-#define SUP_RATE_11B_MAX_NUM_CHANNELS 4
-#define SUP_RATE_11G_MAX_NUM_CHANNELS 12
-
-/**
- * struct iwl_rx_queue - Rx queue
- * @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
- * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
- * @read: Shared index to newest available Rx buffer
- * @write: Shared index to oldest written Rx packet
- * @free_count: Number of pre-allocated buffers in rx_free
- * @rx_free: list of free SKBs for use
- * @rx_used: List of Rx buffers with no SKB
- * @need_update: flag to indicate we need to update read/write index
- * @rb_stts: driver's pointer to receive buffer status
- * @rb_stts_dma: bus address of receive buffer status
- *
- * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
- */
-struct iwl_rx_queue {
- __le32 *bd;
- dma_addr_t bd_dma;
- struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
- struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
- u32 read;
- u32 write;
- u32 free_count;
- u32 write_actual;
- struct list_head rx_free;
- struct list_head rx_used;
- int need_update;
- struct iwl_rb_status *rb_stts;
- dma_addr_t rb_stts_dma;
- spinlock_t lock;
-};
-
-#define IWL_SUPPORTED_RATES_IE_LEN 8
-
-#define MAX_TID_COUNT 9
-
-#define IWL_INVALID_RATE 0xFF
-#define IWL_INVALID_VALUE -1
-
-/**
- * struct iwl_ht_agg -- aggregation status while waiting for block-ack
- * @txq_id: Tx queue used for Tx attempt
- * @frame_count: # frames attempted by Tx command
- * @wait_for_ba: Expect block-ack before next Tx reply
- * @start_idx: Index of 1st Transmit Frame Descriptor (TFD) in Tx window
- * @bitmap0: Low order bitmap, one bit for each frame pending ACK in Tx window
- * @bitmap1: High order, one bit for each frame pending ACK in Tx window
- * @rate_n_flags: Rate at which Tx was attempted
- *
- * If REPLY_TX indicates that aggregation was attempted, driver must wait
- * for block ack (REPLY_COMPRESSED_BA). This struct stores tx reply info
- * until block ack arrives.
- */
-struct iwl_ht_agg {
- u16 txq_id;
- u16 frame_count;
- u16 wait_for_ba;
- u16 start_idx;
- u64 bitmap;
- u32 rate_n_flags;
-#define IWL_AGG_OFF 0
-#define IWL_AGG_ON 1
-#define IWL_EMPTYING_HW_QUEUE_ADDBA 2
-#define IWL_EMPTYING_HW_QUEUE_DELBA 3
- u8 state;
-};
-
-
-struct iwl_tid_data {
- u16 seq_number; /* 4965 only */
- u16 tfds_in_queue;
- struct iwl_ht_agg agg;
-};
-
-struct iwl_hw_key {
- u32 cipher;
- int keylen;
- u8 keyidx;
- u8 key[32];
-};
-
-union iwl_ht_rate_supp {
- u16 rates;
- struct {
- u8 siso_rate;
- u8 mimo_rate;
- };
-};
-
-#define CFG_HT_RX_AMPDU_FACTOR_8K (0x0)
-#define CFG_HT_RX_AMPDU_FACTOR_16K (0x1)
-#define CFG_HT_RX_AMPDU_FACTOR_32K (0x2)
-#define CFG_HT_RX_AMPDU_FACTOR_64K (0x3)
-#define CFG_HT_RX_AMPDU_FACTOR_DEF CFG_HT_RX_AMPDU_FACTOR_64K
-#define CFG_HT_RX_AMPDU_FACTOR_MAX CFG_HT_RX_AMPDU_FACTOR_64K
-#define CFG_HT_RX_AMPDU_FACTOR_MIN CFG_HT_RX_AMPDU_FACTOR_8K
-
-/*
- * Maximal MPDU density for TX aggregation
- * 4 - 2us density
- * 5 - 4us density
- * 6 - 8us density
- * 7 - 16us density
- */
-#define CFG_HT_MPDU_DENSITY_2USEC (0x4)
-#define CFG_HT_MPDU_DENSITY_4USEC (0x5)
-#define CFG_HT_MPDU_DENSITY_8USEC (0x6)
-#define CFG_HT_MPDU_DENSITY_16USEC (0x7)
-#define CFG_HT_MPDU_DENSITY_DEF CFG_HT_MPDU_DENSITY_4USEC
-#define CFG_HT_MPDU_DENSITY_MAX CFG_HT_MPDU_DENSITY_16USEC
-#define CFG_HT_MPDU_DENSITY_MIN (0x1)
-
-struct iwl_ht_config {
- bool single_chain_sufficient;
- enum ieee80211_smps_mode smps; /* current smps mode */
-};
-
-/* QoS structures */
-struct iwl_qos_info {
- int qos_active;
- struct iwl_qosparam_cmd def_qos_parm;
-};
-
-/*
- * Structure should be accessed with sta_lock held. When station addition
- * is in progress (IWL_STA_UCODE_INPROGRESS) it is possible to access only
- * the commands (iwl_legacy_addsta_cmd and iwl_link_quality_cmd) without
- * sta_lock held.
- */
-struct iwl_station_entry {
- struct iwl_legacy_addsta_cmd sta;
- struct iwl_tid_data tid[MAX_TID_COUNT];
- u8 used, ctxid;
- struct iwl_hw_key keyinfo;
- struct iwl_link_quality_cmd *lq;
-};
-
-struct iwl_station_priv_common {
- struct iwl_rxon_context *ctx;
- u8 sta_id;
-};
-
-/*
- * iwl_station_priv: Driver's private station information
- *
- * When mac80211 creates a station it reserves some space (hw->sta_data_size)
- * in the structure for use by driver. This structure is places in that
- * space.
- *
- * The common struct MUST be first because it is shared between
- * 3945 and 4965!
- */
-struct iwl_station_priv {
- struct iwl_station_priv_common common;
- struct iwl_lq_sta lq_sta;
- atomic_t pending_frames;
- bool client;
- bool asleep;
-};
-
-/**
- * struct iwl_vif_priv - driver's private per-interface information
- *
- * When mac80211 allocates a virtual interface, it can allocate
- * space for us to put data into.
- */
-struct iwl_vif_priv {
- struct iwl_rxon_context *ctx;
- u8 ibss_bssid_sta_id;
-};
-
-/* one for each uCode image (inst/data, boot/init/runtime) */
-struct fw_desc {
- void *v_addr; /* access by driver */
- dma_addr_t p_addr; /* access by card's busmaster DMA */
- u32 len; /* bytes */
-};
-
-/* uCode file layout */
-struct iwl_ucode_header {
- __le32 ver; /* major/minor/API/serial */
- struct {
- __le32 inst_size; /* bytes of runtime code */
- __le32 data_size; /* bytes of runtime data */
- __le32 init_size; /* bytes of init code */
- __le32 init_data_size; /* bytes of init data */
- __le32 boot_size; /* bytes of bootstrap code */
- u8 data[0]; /* in same order as sizes */
- } v1;
-};
-
-struct iwl4965_ibss_seq {
- u8 mac[ETH_ALEN];
- u16 seq_num;
- u16 frag_num;
- unsigned long packet_time;
- struct list_head list;
-};
-
-struct iwl_sensitivity_ranges {
- u16 min_nrg_cck;
- u16 max_nrg_cck;
-
- u16 nrg_th_cck;
- u16 nrg_th_ofdm;
-
- u16 auto_corr_min_ofdm;
- u16 auto_corr_min_ofdm_mrc;
- u16 auto_corr_min_ofdm_x1;
- u16 auto_corr_min_ofdm_mrc_x1;
-
- u16 auto_corr_max_ofdm;
- u16 auto_corr_max_ofdm_mrc;
- u16 auto_corr_max_ofdm_x1;
- u16 auto_corr_max_ofdm_mrc_x1;
-
- u16 auto_corr_max_cck;
- u16 auto_corr_max_cck_mrc;
- u16 auto_corr_min_cck;
- u16 auto_corr_min_cck_mrc;
-
- u16 barker_corr_th_min;
- u16 barker_corr_th_min_mrc;
- u16 nrg_th_cca;
-};
-
-
-#define KELVIN_TO_CELSIUS(x) ((x)-273)
-#define CELSIUS_TO_KELVIN(x) ((x)+273)
-
-
-/**
- * struct iwl_hw_params
- * @max_txq_num: Max # Tx queues supported
- * @dma_chnl_num: Number of Tx DMA/FIFO channels
- * @scd_bc_tbls_size: size of scheduler byte count tables
- * @tfd_size: TFD size
- * @tx/rx_chains_num: Number of TX/RX chains
- * @valid_tx/rx_ant: usable antennas
- * @max_rxq_size: Max # Rx frames in Rx queue (must be power-of-2)
- * @max_rxq_log: Log-base-2 of max_rxq_size
- * @rx_page_order: Rx buffer page order
- * @rx_wrt_ptr_reg: FH{39}_RSCSR_CHNL0_WPTR
- * @max_stations:
- * @ht40_channel: is 40MHz width possible in band 2.4
- * BIT(IEEE80211_BAND_5GHZ) BIT(IEEE80211_BAND_5GHZ)
- * @sw_crypto: 0 for hw, 1 for sw
- * @max_xxx_size: for ucode uses
- * @ct_kill_threshold: temperature threshold
- * @beacon_time_tsf_bits: number of valid tsf bits for beacon time
- * @struct iwl_sensitivity_ranges: range of sensitivity values
- */
-struct iwl_hw_params {
- u8 max_txq_num;
- u8 dma_chnl_num;
- u16 scd_bc_tbls_size;
- u32 tfd_size;
- u8 tx_chains_num;
- u8 rx_chains_num;
- u8 valid_tx_ant;
- u8 valid_rx_ant;
- u16 max_rxq_size;
- u16 max_rxq_log;
- u32 rx_page_order;
- u32 rx_wrt_ptr_reg;
- u8 max_stations;
- u8 ht40_channel;
- u8 max_beacon_itrvl; /* in 1024 ms */
- u32 max_inst_size;
- u32 max_data_size;
- u32 max_bsm_size;
- u32 ct_kill_threshold; /* value in hw-dependent units */
- u16 beacon_time_tsf_bits;
- const struct iwl_sensitivity_ranges *sens;
-};
-
-
-/******************************************************************************
- *
- * Functions implemented in core module which are forward declared here
- * for use by iwl-[4-5].c
- *
- * NOTE: The implementation of these functions are not hardware specific
- * which is why they are in the core module files.
- *
- * Naming convention --
- * iwl_ <-- Is part of iwlwifi
- * iwlXXXX_ <-- Hardware specific (implemented in iwl-XXXX.c for XXXX)
- * iwl4965_bg_ <-- Called from work queue context
- * iwl4965_mac_ <-- mac80211 callback
- *
- ****************************************************************************/
-extern void iwl4965_update_chain_flags(struct iwl_priv *priv);
-extern const u8 iwlegacy_bcast_addr[ETH_ALEN];
-extern int iwl_legacy_queue_space(const struct iwl_queue *q);
-static inline int iwl_legacy_queue_used(const struct iwl_queue *q, int i)
-{
- return q->write_ptr >= q->read_ptr ?
- (i >= q->read_ptr && i < q->write_ptr) :
- !(i < q->read_ptr && i >= q->write_ptr);
-}
-
-
-static inline u8 iwl_legacy_get_cmd_index(struct iwl_queue *q, u32 index,
- int is_huge)
-{
- /*
- * This is for init calibration result and scan command which
- * required buffer > TFD_MAX_PAYLOAD_SIZE,
- * the big buffer at end of command array
- */
- if (is_huge)
- return q->n_window; /* must be power of 2 */
-
- /* Otherwise, use normal size buffers */
- return index & (q->n_window - 1);
-}
-
-
-struct iwl_dma_ptr {
- dma_addr_t dma;
- void *addr;
- size_t size;
-};
-
-#define IWL_OPERATION_MODE_AUTO 0
-#define IWL_OPERATION_MODE_HT_ONLY 1
-#define IWL_OPERATION_MODE_MIXED 2
-#define IWL_OPERATION_MODE_20MHZ 3
-
-#define IWL_TX_CRC_SIZE 4
-#define IWL_TX_DELIMITER_SIZE 4
-
-#define TX_POWER_IWL_ILLEGAL_VOLTAGE -10000
-
-/* Sensitivity and chain noise calibration */
-#define INITIALIZATION_VALUE 0xFFFF
-#define IWL4965_CAL_NUM_BEACONS 20
-#define IWL_CAL_NUM_BEACONS 16
-#define MAXIMUM_ALLOWED_PATHLOSS 15
-
-#define CHAIN_NOISE_MAX_DELTA_GAIN_CODE 3
-
-#define MAX_FA_OFDM 50
-#define MIN_FA_OFDM 5
-#define MAX_FA_CCK 50
-#define MIN_FA_CCK 5
-
-#define AUTO_CORR_STEP_OFDM 1
-
-#define AUTO_CORR_STEP_CCK 3
-#define AUTO_CORR_MAX_TH_CCK 160
-
-#define NRG_DIFF 2
-#define NRG_STEP_CCK 2
-#define NRG_MARGIN 8
-#define MAX_NUMBER_CCK_NO_FA 100
-
-#define AUTO_CORR_CCK_MIN_VAL_DEF (125)
-
-#define CHAIN_A 0
-#define CHAIN_B 1
-#define CHAIN_C 2
-#define CHAIN_NOISE_DELTA_GAIN_INIT_VAL 4
-#define ALL_BAND_FILTER 0xFF00
-#define IN_BAND_FILTER 0xFF
-#define MIN_AVERAGE_NOISE_MAX_VALUE 0xFFFFFFFF
-
-#define NRG_NUM_PREV_STAT_L 20
-#define NUM_RX_CHAINS 3
-
-enum iwl4965_false_alarm_state {
- IWL_FA_TOO_MANY = 0,
- IWL_FA_TOO_FEW = 1,
- IWL_FA_GOOD_RANGE = 2,
-};
-
-enum iwl4965_chain_noise_state {
- IWL_CHAIN_NOISE_ALIVE = 0, /* must be 0 */
- IWL_CHAIN_NOISE_ACCUMULATE,
- IWL_CHAIN_NOISE_CALIBRATED,
- IWL_CHAIN_NOISE_DONE,
-};
-
-enum iwl4965_calib_enabled_state {
- IWL_CALIB_DISABLED = 0, /* must be 0 */
- IWL_CALIB_ENABLED = 1,
-};
-
-/*
- * enum iwl_calib
- * defines the order in which results of initial calibrations
- * should be sent to the runtime uCode
- */
-enum iwl_calib {
- IWL_CALIB_MAX,
-};
-
-/* Opaque calibration results */
-struct iwl_calib_result {
- void *buf;
- size_t buf_len;
-};
-
-enum ucode_type {
- UCODE_NONE = 0,
- UCODE_INIT,
- UCODE_RT
-};
-
-/* Sensitivity calib data */
-struct iwl_sensitivity_data {
- u32 auto_corr_ofdm;
- u32 auto_corr_ofdm_mrc;
- u32 auto_corr_ofdm_x1;
- u32 auto_corr_ofdm_mrc_x1;
- u32 auto_corr_cck;
- u32 auto_corr_cck_mrc;
-
- u32 last_bad_plcp_cnt_ofdm;
- u32 last_fa_cnt_ofdm;
- u32 last_bad_plcp_cnt_cck;
- u32 last_fa_cnt_cck;
-
- u32 nrg_curr_state;
- u32 nrg_prev_state;
- u32 nrg_value[10];
- u8 nrg_silence_rssi[NRG_NUM_PREV_STAT_L];
- u32 nrg_silence_ref;
- u32 nrg_energy_idx;
- u32 nrg_silence_idx;
- u32 nrg_th_cck;
- s32 nrg_auto_corr_silence_diff;
- u32 num_in_cck_no_fa;
- u32 nrg_th_ofdm;
-
- u16 barker_corr_th_min;
- u16 barker_corr_th_min_mrc;
- u16 nrg_th_cca;
-};
-
-/* Chain noise (differential Rx gain) calib data */
-struct iwl_chain_noise_data {
- u32 active_chains;
- u32 chain_noise_a;
- u32 chain_noise_b;
- u32 chain_noise_c;
- u32 chain_signal_a;
- u32 chain_signal_b;
- u32 chain_signal_c;
- u16 beacon_count;
- u8 disconn_array[NUM_RX_CHAINS];
- u8 delta_gain_code[NUM_RX_CHAINS];
- u8 radio_write;
- u8 state;
-};
-
-#define EEPROM_SEM_TIMEOUT 10 /* milliseconds */
-#define EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
-
-#define IWL_TRAFFIC_ENTRIES (256)
-#define IWL_TRAFFIC_ENTRY_SIZE (64)
-
-enum {
- MEASUREMENT_READY = (1 << 0),
- MEASUREMENT_ACTIVE = (1 << 1),
-};
-
-/* interrupt statistics */
-struct isr_statistics {
- u32 hw;
- u32 sw;
- u32 err_code;
- u32 sch;
- u32 alive;
- u32 rfkill;
- u32 ctkill;
- u32 wakeup;
- u32 rx;
- u32 rx_handlers[REPLY_MAX];
- u32 tx;
- u32 unhandled;
-};
-
-/* management statistics */
-enum iwl_mgmt_stats {
- MANAGEMENT_ASSOC_REQ = 0,
- MANAGEMENT_ASSOC_RESP,
- MANAGEMENT_REASSOC_REQ,
- MANAGEMENT_REASSOC_RESP,
- MANAGEMENT_PROBE_REQ,
- MANAGEMENT_PROBE_RESP,
- MANAGEMENT_BEACON,
- MANAGEMENT_ATIM,
- MANAGEMENT_DISASSOC,
- MANAGEMENT_AUTH,
- MANAGEMENT_DEAUTH,
- MANAGEMENT_ACTION,
- MANAGEMENT_MAX,
-};
-/* control statistics */
-enum iwl_ctrl_stats {
- CONTROL_BACK_REQ = 0,
- CONTROL_BACK,
- CONTROL_PSPOLL,
- CONTROL_RTS,
- CONTROL_CTS,
- CONTROL_ACK,
- CONTROL_CFEND,
- CONTROL_CFENDACK,
- CONTROL_MAX,
-};
-
-struct traffic_stats {
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
- u32 mgmt[MANAGEMENT_MAX];
- u32 ctrl[CONTROL_MAX];
- u32 data_cnt;
- u64 data_bytes;
-#endif
-};
-
-/*
- * host interrupt timeout value
- * used with setting interrupt coalescing timer
- * the CSR_INT_COALESCING is an 8 bit register in 32-usec unit
- *
- * default interrupt coalescing timer is 64 x 32 = 2048 usecs
- * default interrupt coalescing calibration timer is 16 x 32 = 512 usecs
- */
-#define IWL_HOST_INT_TIMEOUT_MAX (0xFF)
-#define IWL_HOST_INT_TIMEOUT_DEF (0x40)
-#define IWL_HOST_INT_TIMEOUT_MIN (0x0)
-#define IWL_HOST_INT_CALIB_TIMEOUT_MAX (0xFF)
-#define IWL_HOST_INT_CALIB_TIMEOUT_DEF (0x10)
-#define IWL_HOST_INT_CALIB_TIMEOUT_MIN (0x0)
-
-#define IWL_DELAY_NEXT_FORCE_FW_RELOAD (HZ*5)
-
-/* TX queue watchdog timeouts in mSecs */
-#define IWL_DEF_WD_TIMEOUT (2000)
-#define IWL_LONG_WD_TIMEOUT (10000)
-#define IWL_MAX_WD_TIMEOUT (120000)
-
-struct iwl_force_reset {
- int reset_request_count;
- int reset_success_count;
- int reset_reject_count;
- unsigned long reset_duration;
- unsigned long last_force_reset_jiffies;
-};
-
-/* extend beacon time format bit shifting */
-/*
- * for _3945 devices
- * bits 31:24 - extended
- * bits 23:0 - interval
- */
-#define IWL3945_EXT_BEACON_TIME_POS 24
-/*
- * for _4965 devices
- * bits 31:22 - extended
- * bits 21:0 - interval
- */
-#define IWL4965_EXT_BEACON_TIME_POS 22
-
-enum iwl_rxon_context_id {
- IWL_RXON_CTX_BSS,
-
- NUM_IWL_RXON_CTX
-};
-
-struct iwl_rxon_context {
- struct ieee80211_vif *vif;
-
- const u8 *ac_to_fifo;
- const u8 *ac_to_queue;
- u8 mcast_queue;
-
- /*
- * We could use the vif to indicate active, but we
- * also need it to be active during disabling when
- * we already removed the vif for type setting.
- */
- bool always_active, is_active;
-
- bool ht_need_multiple_chains;
-
- enum iwl_rxon_context_id ctxid;
-
- u32 interface_modes, exclusive_interface_modes;
- u8 unused_devtype, ap_devtype, ibss_devtype, station_devtype;
-
- /*
- * We declare this const so it can only be
- * changed via explicit cast within the
- * routines that actually update the physical
- * hardware.
- */
- const struct iwl_legacy_rxon_cmd active;
- struct iwl_legacy_rxon_cmd staging;
-
- struct iwl_rxon_time_cmd timing;
-
- struct iwl_qos_info qos_data;
-
- u8 bcast_sta_id, ap_sta_id;
-
- u8 rxon_cmd, rxon_assoc_cmd, rxon_timing_cmd;
- u8 qos_cmd;
- u8 wep_key_cmd;
-
- struct iwl_wep_key wep_keys[WEP_KEYS_MAX];
- u8 key_mapping_keys;
-
- __le32 station_flags;
-
- struct {
- bool non_gf_sta_present;
- u8 protection;
- bool enabled, is_40mhz;
- u8 extension_chan_offset;
- } ht;
-};
-
-struct iwl_priv {
-
- /* ieee device used by generic ieee processing code */
- struct ieee80211_hw *hw;
- struct ieee80211_channel *ieee_channels;
- struct ieee80211_rate *ieee_rates;
- struct iwl_cfg *cfg;
-
- /* temporary frame storage list */
- struct list_head free_frames;
- int frames_count;
-
- enum ieee80211_band band;
- int alloc_rxb_page;
-
- void (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb);
-
- struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
-
- /* spectrum measurement report caching */
- struct iwl_spectrum_notification measure_report;
- u8 measurement_status;
-
- /* ucode beacon time */
- u32 ucode_beacon_time;
- int missed_beacon_threshold;
-
- /* track IBSS manager (last beacon) status */
- u32 ibss_manager;
-
- /* force reset */
- struct iwl_force_reset force_reset;
-
- /* we allocate array of iwl_channel_info for NIC's valid channels.
- * Access via channel # using indirect index array */
- struct iwl_channel_info *channel_info; /* channel info array */
- u8 channel_count; /* # of channels */
-
- /* thermal calibration */
- s32 temperature; /* degrees Kelvin */
- s32 last_temperature;
-
- /* init calibration results */
- struct iwl_calib_result calib_results[IWL_CALIB_MAX];
-
- /* Scan related variables */
- unsigned long scan_start;
- unsigned long scan_start_tsf;
- void *scan_cmd;
- enum ieee80211_band scan_band;
- struct cfg80211_scan_request *scan_request;
- struct ieee80211_vif *scan_vif;
- u8 scan_tx_ant[IEEE80211_NUM_BANDS];
- u8 mgmt_tx_ant;
-
- /* spinlock */
- spinlock_t lock; /* protect general shared data */
- spinlock_t hcmd_lock; /* protect hcmd */
- spinlock_t reg_lock; /* protect hw register access */
- struct mutex mutex;
-
- /* basic pci-network driver stuff */
- struct pci_dev *pci_dev;
-
- /* pci hardware address support */
- void __iomem *hw_base;
- u32 hw_rev;
- u32 hw_wa_rev;
- u8 rev_id;
-
- /* microcode/device supports multiple contexts */
- u8 valid_contexts;
-
- /* command queue number */
- u8 cmd_queue;
-
- /* max number of station keys */
- u8 sta_key_max_num;
-
- /* EEPROM MAC addresses */
- struct mac_address addresses[1];
-
- /* uCode images, save to reload in case of failure */
- int fw_index; /* firmware we're trying to load */
- u32 ucode_ver; /* version of ucode, copy of
- iwl_ucode.ver */
- struct fw_desc ucode_code; /* runtime inst */
- struct fw_desc ucode_data; /* runtime data original */
- struct fw_desc ucode_data_backup; /* runtime data save/restore */
- struct fw_desc ucode_init; /* initialization inst */
- struct fw_desc ucode_init_data; /* initialization data */
- struct fw_desc ucode_boot; /* bootstrap inst */
- enum ucode_type ucode_type;
- u8 ucode_write_complete; /* the image write is complete */
- char firmware_name[25];
-
- struct iwl_rxon_context contexts[NUM_IWL_RXON_CTX];
-
- __le16 switch_channel;
-
- /* 1st responses from initialize and runtime uCode images.
- * _4965's initialize alive response contains some calibration data. */
- struct iwl_init_alive_resp card_alive_init;
- struct iwl_alive_resp card_alive;
-
- u16 active_rate;
-
- u8 start_calib;
- struct iwl_sensitivity_data sensitivity_data;
- struct iwl_chain_noise_data chain_noise_data;
- __le16 sensitivity_tbl[HD_TABLE_SIZE];
-
- struct iwl_ht_config current_ht_config;
-
- /* Rate scaling data */
- u8 retry_rate;
-
- wait_queue_head_t wait_command_queue;
-
- int activity_timer_active;
-
- /* Rx and Tx DMA processing queues */
- struct iwl_rx_queue rxq;
- struct iwl_tx_queue *txq;
- unsigned long txq_ctx_active_msk;
- struct iwl_dma_ptr kw; /* keep warm address */
- struct iwl_dma_ptr scd_bc_tbls;
-
- u32 scd_base_addr; /* scheduler sram base address */
-
- unsigned long status;
-
- /* counts mgmt, ctl, and data packets */
- struct traffic_stats tx_stats;
- struct traffic_stats rx_stats;
-
- /* counts interrupts */
- struct isr_statistics isr_stats;
-
- struct iwl_power_mgr power_data;
-
- /* context information */
- u8 bssid[ETH_ALEN]; /* used only on 3945 but filled by core */
-
- /* station table variables */
-
- /* Note: if lock and sta_lock are needed, lock must be acquired first */
- spinlock_t sta_lock;
- int num_stations;
- struct iwl_station_entry stations[IWL_STATION_COUNT];
- unsigned long ucode_key_table;
-
- /* queue refcounts */
-#define IWL_MAX_HW_QUEUES 32
- unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
- /* for each AC */
- atomic_t queue_stop_count[4];
-
- /* Indication if ieee80211_ops->open has been called */
- u8 is_open;
-
- u8 mac80211_registered;
-
- /* eeprom -- this is in the card's little endian byte order */
- u8 *eeprom;
- struct iwl_eeprom_calib_info *calib_info;
-
- enum nl80211_iftype iw_mode;
-
- /* Last Rx'd beacon timestamp */
- u64 timestamp;
-
- union {
-#if defined(CONFIG_IWL3945) || defined(CONFIG_IWL3945_MODULE)
- struct {
- void *shared_virt;
- dma_addr_t shared_phys;
-
- struct delayed_work thermal_periodic;
- struct delayed_work rfkill_poll;
-
- struct iwl3945_notif_statistics statistics;
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
- struct iwl3945_notif_statistics accum_statistics;
- struct iwl3945_notif_statistics delta_statistics;
- struct iwl3945_notif_statistics max_delta;
-#endif
-
- u32 sta_supp_rates;
- int last_rx_rssi; /* From Rx packet statistics */
-
- /* Rx'd packet timing information */
- u32 last_beacon_time;
- u64 last_tsf;
-
- /*
- * each calibration channel group in the
- * EEPROM has a derived clip setting for
- * each rate.
- */
- const struct iwl3945_clip_group clip_groups[5];
-
- } _3945;
-#endif
-#if defined(CONFIG_IWL4965) || defined(CONFIG_IWL4965_MODULE)
- struct {
- struct iwl_rx_phy_res last_phy_res;
- bool last_phy_res_valid;
-
- struct completion firmware_loading_complete;
-
- /*
- * chain noise reset and gain commands are the
- * two extra calibration commands follows the standard
- * phy calibration commands
- */
- u8 phy_calib_chain_noise_reset_cmd;
- u8 phy_calib_chain_noise_gain_cmd;
-
- struct iwl_notif_statistics statistics;
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
- struct iwl_notif_statistics accum_statistics;
- struct iwl_notif_statistics delta_statistics;
- struct iwl_notif_statistics max_delta;
-#endif
-
- } _4965;
-#endif
- };
-
- struct iwl_hw_params hw_params;
-
- u32 inta_mask;
-
- struct workqueue_struct *workqueue;
-
- struct work_struct restart;
- struct work_struct scan_completed;
- struct work_struct rx_replenish;
- struct work_struct abort_scan;
-
- struct iwl_rxon_context *beacon_ctx;
- struct sk_buff *beacon_skb;
-
- struct work_struct tx_flush;
-
- struct tasklet_struct irq_tasklet;
-
- struct delayed_work init_alive_start;
- struct delayed_work alive_start;
- struct delayed_work scan_check;
-
- /* TX Power */
- s8 tx_power_user_lmt;
- s8 tx_power_device_lmt;
- s8 tx_power_next;
-
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- /* debugging info */
- u32 debug_level; /* per device debugging will override global
- iwlegacy_debug_level if set */
-#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
- /* debugfs */
- u16 tx_traffic_idx;
- u16 rx_traffic_idx;
- u8 *tx_traffic;
- u8 *rx_traffic;
- struct dentry *debugfs_dir;
- u32 dbgfs_sram_offset, dbgfs_sram_len;
- bool disable_ht40;
-#endif /* CONFIG_IWLWIFI_LEGACY_DEBUGFS */
-
- struct work_struct txpower_work;
- u32 disable_sens_cal;
- u32 disable_chain_noise_cal;
- u32 disable_tx_power_cal;
- struct work_struct run_time_calib_work;
- struct timer_list statistics_periodic;
- struct timer_list watchdog;
- bool hw_ready;
-
- struct led_classdev led;
- unsigned long blink_on, blink_off;
- bool led_registered;
-}; /*iwl_priv */
-
-static inline void iwl_txq_ctx_activate(struct iwl_priv *priv, int txq_id)
-{
- set_bit(txq_id, &priv->txq_ctx_active_msk);
-}
-
-static inline void iwl_txq_ctx_deactivate(struct iwl_priv *priv, int txq_id)
-{
- clear_bit(txq_id, &priv->txq_ctx_active_msk);
-}
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-/*
- * iwl_legacy_get_debug_level: Return active debug level for device
- *
- * Using sysfs it is possible to set per device debug level. This debug
- * level will be used if set, otherwise the global debug level which can be
- * set via module parameter is used.
- */
-static inline u32 iwl_legacy_get_debug_level(struct iwl_priv *priv)
-{
- if (priv->debug_level)
- return priv->debug_level;
- else
- return iwlegacy_debug_level;
-}
-#else
-static inline u32 iwl_legacy_get_debug_level(struct iwl_priv *priv)
-{
- return iwlegacy_debug_level;
-}
-#endif
-
-
-static inline struct ieee80211_hdr *
-iwl_legacy_tx_queue_get_hdr(struct iwl_priv *priv,
- int txq_id, int idx)
-{
- if (priv->txq[txq_id].txb[idx].skb)
- return (struct ieee80211_hdr *)priv->txq[txq_id].
- txb[idx].skb->data;
- return NULL;
-}
-
-static inline struct iwl_rxon_context *
-iwl_legacy_rxon_ctx_from_vif(struct ieee80211_vif *vif)
-{
- struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
-
- return vif_priv->ctx;
-}
-
-#define for_each_context(priv, ctx) \
- for (ctx = &priv->contexts[IWL_RXON_CTX_BSS]; \
- ctx < &priv->contexts[NUM_IWL_RXON_CTX]; ctx++) \
- if (priv->valid_contexts & BIT(ctx->ctxid))
-
-static inline int iwl_legacy_is_associated(struct iwl_priv *priv,
- enum iwl_rxon_context_id ctxid)
-{
- return (priv->contexts[ctxid].active.filter_flags &
- RXON_FILTER_ASSOC_MSK) ? 1 : 0;
-}
-
-static inline int iwl_legacy_is_any_associated(struct iwl_priv *priv)
-{
- return iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS);
-}
-
-static inline int iwl_legacy_is_associated_ctx(struct iwl_rxon_context *ctx)
-{
- return (ctx->active.filter_flags & RXON_FILTER_ASSOC_MSK) ? 1 : 0;
-}
-
-static inline int iwl_legacy_is_channel_valid(const struct iwl_channel_info *ch_info)
-{
- if (ch_info == NULL)
- return 0;
- return (ch_info->flags & EEPROM_CHANNEL_VALID) ? 1 : 0;
-}
-
-static inline int iwl_legacy_is_channel_radar(const struct iwl_channel_info *ch_info)
-{
- return (ch_info->flags & EEPROM_CHANNEL_RADAR) ? 1 : 0;
-}
-
-static inline u8 iwl_legacy_is_channel_a_band(const struct iwl_channel_info *ch_info)
-{
- return ch_info->band == IEEE80211_BAND_5GHZ;
-}
-
-static inline int
-iwl_legacy_is_channel_passive(const struct iwl_channel_info *ch)
-{
- return (!(ch->flags & EEPROM_CHANNEL_ACTIVE)) ? 1 : 0;
-}
-
-static inline int
-iwl_legacy_is_channel_ibss(const struct iwl_channel_info *ch)
-{
- return (ch->flags & EEPROM_CHANNEL_IBSS) ? 1 : 0;
-}
-
-static inline void
-__iwl_legacy_free_pages(struct iwl_priv *priv, struct page *page)
-{
- __free_pages(page, priv->hw_params.rx_page_order);
- priv->alloc_rxb_page--;
-}
-
-static inline void iwl_legacy_free_pages(struct iwl_priv *priv, unsigned long page)
-{
- free_pages(page, priv->hw_params.rx_page_order);
- priv->alloc_rxb_page--;
-}
-#endif /* __iwl_legacy_dev_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-devtrace.c b/drivers/net/wireless/iwlegacy/iwl-devtrace.c
deleted file mode 100644
index acec99197ce..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-devtrace.c
+++ /dev/null
@@ -1,42 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2009 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <linux/module.h>
-
-/* sparse doesn't like tracepoint macros */
-#ifndef __CHECKER__
-#include "iwl-dev.h"
-
-#define CREATE_TRACE_POINTS
-#include "iwl-devtrace.h"
-
-EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_iowrite8);
-EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_ioread32);
-EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_iowrite32);
-EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_rx);
-EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_tx);
-EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_ucode_error);
-#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-devtrace.h b/drivers/net/wireless/iwlegacy/iwl-devtrace.h
deleted file mode 100644
index a443725ba6b..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-devtrace.h
+++ /dev/null
@@ -1,210 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2009 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#if !defined(__IWLWIFI_LEGACY_DEVICE_TRACE) || defined(TRACE_HEADER_MULTI_READ)
-#define __IWLWIFI_LEGACY_DEVICE_TRACE
-
-#include <linux/tracepoint.h>
-
-#if !defined(CONFIG_IWLWIFI_LEGACY_DEVICE_TRACING) || defined(__CHECKER__)
-#undef TRACE_EVENT
-#define TRACE_EVENT(name, proto, ...) \
-static inline void trace_ ## name(proto) {}
-#endif
-
-
-#define PRIV_ENTRY __field(struct iwl_priv *, priv)
-#define PRIV_ASSIGN (__entry->priv = priv)
-
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM iwlwifi_legacy_io
-
-TRACE_EVENT(iwlwifi_legacy_dev_ioread32,
- TP_PROTO(struct iwl_priv *priv, u32 offs, u32 val),
- TP_ARGS(priv, offs, val),
- TP_STRUCT__entry(
- PRIV_ENTRY
- __field(u32, offs)
- __field(u32, val)
- ),
- TP_fast_assign(
- PRIV_ASSIGN;
- __entry->offs = offs;
- __entry->val = val;
- ),
- TP_printk("[%p] read io[%#x] = %#x", __entry->priv,
- __entry->offs, __entry->val)
-);
-
-TRACE_EVENT(iwlwifi_legacy_dev_iowrite8,
- TP_PROTO(struct iwl_priv *priv, u32 offs, u8 val),
- TP_ARGS(priv, offs, val),
- TP_STRUCT__entry(
- PRIV_ENTRY
- __field(u32, offs)
- __field(u8, val)
- ),
- TP_fast_assign(
- PRIV_ASSIGN;
- __entry->offs = offs;
- __entry->val = val;
- ),
- TP_printk("[%p] write io[%#x] = %#x)", __entry->priv,
- __entry->offs, __entry->val)
-);
-
-TRACE_EVENT(iwlwifi_legacy_dev_iowrite32,
- TP_PROTO(struct iwl_priv *priv, u32 offs, u32 val),
- TP_ARGS(priv, offs, val),
- TP_STRUCT__entry(
- PRIV_ENTRY
- __field(u32, offs)
- __field(u32, val)
- ),
- TP_fast_assign(
- PRIV_ASSIGN;
- __entry->offs = offs;
- __entry->val = val;
- ),
- TP_printk("[%p] write io[%#x] = %#x)", __entry->priv,
- __entry->offs, __entry->val)
-);
-
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM iwlwifi_legacy_ucode
-
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM iwlwifi
-
-TRACE_EVENT(iwlwifi_legacy_dev_hcmd,
- TP_PROTO(struct iwl_priv *priv, void *hcmd, size_t len, u32 flags),
- TP_ARGS(priv, hcmd, len, flags),
- TP_STRUCT__entry(
- PRIV_ENTRY
- __dynamic_array(u8, hcmd, len)
- __field(u32, flags)
- ),
- TP_fast_assign(
- PRIV_ASSIGN;
- memcpy(__get_dynamic_array(hcmd), hcmd, len);
- __entry->flags = flags;
- ),
- TP_printk("[%p] hcmd %#.2x (%ssync)",
- __entry->priv, ((u8 *)__get_dynamic_array(hcmd))[0],
- __entry->flags & CMD_ASYNC ? "a" : "")
-);
-
-TRACE_EVENT(iwlwifi_legacy_dev_rx,
- TP_PROTO(struct iwl_priv *priv, void *rxbuf, size_t len),
- TP_ARGS(priv, rxbuf, len),
- TP_STRUCT__entry(
- PRIV_ENTRY
- __dynamic_array(u8, rxbuf, len)
- ),
- TP_fast_assign(
- PRIV_ASSIGN;
- memcpy(__get_dynamic_array(rxbuf), rxbuf, len);
- ),
- TP_printk("[%p] RX cmd %#.2x",
- __entry->priv, ((u8 *)__get_dynamic_array(rxbuf))[4])
-);
-
-TRACE_EVENT(iwlwifi_legacy_dev_tx,
- TP_PROTO(struct iwl_priv *priv, void *tfd, size_t tfdlen,
- void *buf0, size_t buf0_len,
- void *buf1, size_t buf1_len),
- TP_ARGS(priv, tfd, tfdlen, buf0, buf0_len, buf1, buf1_len),
- TP_STRUCT__entry(
- PRIV_ENTRY
-
- __field(size_t, framelen)
- __dynamic_array(u8, tfd, tfdlen)
-
- /*
- * Do not insert between or below these items,
- * we want to keep the frame together (except
- * for the possible padding).
- */
- __dynamic_array(u8, buf0, buf0_len)
- __dynamic_array(u8, buf1, buf1_len)
- ),
- TP_fast_assign(
- PRIV_ASSIGN;
- __entry->framelen = buf0_len + buf1_len;
- memcpy(__get_dynamic_array(tfd), tfd, tfdlen);
- memcpy(__get_dynamic_array(buf0), buf0, buf0_len);
- memcpy(__get_dynamic_array(buf1), buf1, buf1_len);
- ),
- TP_printk("[%p] TX %.2x (%zu bytes)",
- __entry->priv,
- ((u8 *)__get_dynamic_array(buf0))[0],
- __entry->framelen)
-);
-
-TRACE_EVENT(iwlwifi_legacy_dev_ucode_error,
- TP_PROTO(struct iwl_priv *priv, u32 desc, u32 time,
- u32 data1, u32 data2, u32 line, u32 blink1,
- u32 blink2, u32 ilink1, u32 ilink2),
- TP_ARGS(priv, desc, time, data1, data2, line,
- blink1, blink2, ilink1, ilink2),
- TP_STRUCT__entry(
- PRIV_ENTRY
- __field(u32, desc)
- __field(u32, time)
- __field(u32, data1)
- __field(u32, data2)
- __field(u32, line)
- __field(u32, blink1)
- __field(u32, blink2)
- __field(u32, ilink1)
- __field(u32, ilink2)
- ),
- TP_fast_assign(
- PRIV_ASSIGN;
- __entry->desc = desc;
- __entry->time = time;
- __entry->data1 = data1;
- __entry->data2 = data2;
- __entry->line = line;
- __entry->blink1 = blink1;
- __entry->blink2 = blink2;
- __entry->ilink1 = ilink1;
- __entry->ilink2 = ilink2;
- ),
- TP_printk("[%p] #%02d %010u data 0x%08X 0x%08X line %u, "
- "blink 0x%05X 0x%05X ilink 0x%05X 0x%05X",
- __entry->priv, __entry->desc, __entry->time, __entry->data1,
- __entry->data2, __entry->line, __entry->blink1,
- __entry->blink2, __entry->ilink1, __entry->ilink2)
-);
-
-#endif /* __IWLWIFI_DEVICE_TRACE */
-
-#undef TRACE_INCLUDE_PATH
-#define TRACE_INCLUDE_PATH .
-#undef TRACE_INCLUDE_FILE
-#define TRACE_INCLUDE_FILE iwl-devtrace
-#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/iwlegacy/iwl-eeprom.c b/drivers/net/wireless/iwlegacy/iwl-eeprom.c
deleted file mode 100644
index 5bf3f49b74a..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-eeprom.c
+++ /dev/null
@@ -1,553 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *****************************************************************************/
-
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-
-#include <net/mac80211.h>
-
-#include "iwl-commands.h"
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-debug.h"
-#include "iwl-eeprom.h"
-#include "iwl-io.h"
-
-/************************** EEPROM BANDS ****************************
- *
- * The iwlegacy_eeprom_band definitions below provide the mapping from the
- * EEPROM contents to the specific channel number supported for each
- * band.
- *
- * For example, iwl_priv->eeprom.band_3_channels[4] from the band_3
- * definition below maps to physical channel 42 in the 5.2GHz spectrum.
- * The specific geography and calibration information for that channel
- * is contained in the eeprom map itself.
- *
- * During init, we copy the eeprom information and channel map
- * information into priv->channel_info_24/52 and priv->channel_map_24/52
- *
- * channel_map_24/52 provides the index in the channel_info array for a
- * given channel. We have to have two separate maps as there is channel
- * overlap with the 2.4GHz and 5.2GHz spectrum as seen in band_1 and
- * band_2
- *
- * A value of 0xff stored in the channel_map indicates that the channel
- * is not supported by the hardware at all.
- *
- * A value of 0xfe in the channel_map indicates that the channel is not
- * valid for Tx with the current hardware. This means that
- * while the system can tune and receive on a given channel, it may not
- * be able to associate or transmit any frames on that
- * channel. There is no corresponding channel information for that
- * entry.
- *
- *********************************************************************/
-
-/* 2.4 GHz */
-const u8 iwlegacy_eeprom_band_1[14] = {
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
-};
-
-/* 5.2 GHz bands */
-static const u8 iwlegacy_eeprom_band_2[] = { /* 4915-5080MHz */
- 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16
-};
-
-static const u8 iwlegacy_eeprom_band_3[] = { /* 5170-5320MHz */
- 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
-};
-
-static const u8 iwlegacy_eeprom_band_4[] = { /* 5500-5700MHz */
- 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
-};
-
-static const u8 iwlegacy_eeprom_band_5[] = { /* 5725-5825MHz */
- 145, 149, 153, 157, 161, 165
-};
-
-static const u8 iwlegacy_eeprom_band_6[] = { /* 2.4 ht40 channel */
- 1, 2, 3, 4, 5, 6, 7
-};
-
-static const u8 iwlegacy_eeprom_band_7[] = { /* 5.2 ht40 channel */
- 36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157
-};
-
-/******************************************************************************
- *
- * EEPROM related functions
- *
-******************************************************************************/
-
-static int iwl_legacy_eeprom_verify_signature(struct iwl_priv *priv)
-{
- u32 gp = iwl_read32(priv, CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK;
- int ret = 0;
-
- IWL_DEBUG_EEPROM(priv, "EEPROM signature=0x%08x\n", gp);
- switch (gp) {
- case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K:
- case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K:
- break;
- default:
- IWL_ERR(priv, "bad EEPROM signature,"
- "EEPROM_GP=0x%08x\n", gp);
- ret = -ENOENT;
- break;
- }
- return ret;
-}
-
-const u8
-*iwl_legacy_eeprom_query_addr(const struct iwl_priv *priv, size_t offset)
-{
- BUG_ON(offset >= priv->cfg->base_params->eeprom_size);
- return &priv->eeprom[offset];
-}
-EXPORT_SYMBOL(iwl_legacy_eeprom_query_addr);
-
-u16 iwl_legacy_eeprom_query16(const struct iwl_priv *priv, size_t offset)
-{
- if (!priv->eeprom)
- return 0;
- return (u16)priv->eeprom[offset] | ((u16)priv->eeprom[offset + 1] << 8);
-}
-EXPORT_SYMBOL(iwl_legacy_eeprom_query16);
-
-/**
- * iwl_legacy_eeprom_init - read EEPROM contents
- *
- * Load the EEPROM contents from adapter into priv->eeprom
- *
- * NOTE: This routine uses the non-debug IO access functions.
- */
-int iwl_legacy_eeprom_init(struct iwl_priv *priv)
-{
- __le16 *e;
- u32 gp = iwl_read32(priv, CSR_EEPROM_GP);
- int sz;
- int ret;
- u16 addr;
-
- /* allocate eeprom */
- sz = priv->cfg->base_params->eeprom_size;
- IWL_DEBUG_EEPROM(priv, "NVM size = %d\n", sz);
- priv->eeprom = kzalloc(sz, GFP_KERNEL);
- if (!priv->eeprom) {
- ret = -ENOMEM;
- goto alloc_err;
- }
- e = (__le16 *)priv->eeprom;
-
- priv->cfg->ops->lib->apm_ops.init(priv);
-
- ret = iwl_legacy_eeprom_verify_signature(priv);
- if (ret < 0) {
- IWL_ERR(priv, "EEPROM not found, EEPROM_GP=0x%08x\n", gp);
- ret = -ENOENT;
- goto err;
- }
-
- /* Make sure driver (instead of uCode) is allowed to read EEPROM */
- ret = priv->cfg->ops->lib->eeprom_ops.acquire_semaphore(priv);
- if (ret < 0) {
- IWL_ERR(priv, "Failed to acquire EEPROM semaphore.\n");
- ret = -ENOENT;
- goto err;
- }
-
- /* eeprom is an array of 16bit values */
- for (addr = 0; addr < sz; addr += sizeof(u16)) {
- u32 r;
-
- _iwl_legacy_write32(priv, CSR_EEPROM_REG,
- CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
-
- ret = iwl_poll_bit(priv, CSR_EEPROM_REG,
- CSR_EEPROM_REG_READ_VALID_MSK,
- CSR_EEPROM_REG_READ_VALID_MSK,
- IWL_EEPROM_ACCESS_TIMEOUT);
- if (ret < 0) {
- IWL_ERR(priv, "Time out reading EEPROM[%d]\n",
- addr);
- goto done;
- }
- r = _iwl_legacy_read_direct32(priv, CSR_EEPROM_REG);
- e[addr / 2] = cpu_to_le16(r >> 16);
- }
-
- IWL_DEBUG_EEPROM(priv, "NVM Type: %s, version: 0x%x\n",
- "EEPROM",
- iwl_legacy_eeprom_query16(priv, EEPROM_VERSION));
-
- ret = 0;
-done:
- priv->cfg->ops->lib->eeprom_ops.release_semaphore(priv);
-
-err:
- if (ret)
- iwl_legacy_eeprom_free(priv);
- /* Reset chip to save power until we load uCode during "up". */
- iwl_legacy_apm_stop(priv);
-alloc_err:
- return ret;
-}
-EXPORT_SYMBOL(iwl_legacy_eeprom_init);
-
-void iwl_legacy_eeprom_free(struct iwl_priv *priv)
-{
- kfree(priv->eeprom);
- priv->eeprom = NULL;
-}
-EXPORT_SYMBOL(iwl_legacy_eeprom_free);
-
-static void iwl_legacy_init_band_reference(const struct iwl_priv *priv,
- int eep_band, int *eeprom_ch_count,
- const struct iwl_eeprom_channel **eeprom_ch_info,
- const u8 **eeprom_ch_index)
-{
- u32 offset = priv->cfg->ops->lib->
- eeprom_ops.regulatory_bands[eep_band - 1];
- switch (eep_band) {
- case 1: /* 2.4GHz band */
- *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_1);
- *eeprom_ch_info = (struct iwl_eeprom_channel *)
- iwl_legacy_eeprom_query_addr(priv, offset);
- *eeprom_ch_index = iwlegacy_eeprom_band_1;
- break;
- case 2: /* 4.9GHz band */
- *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_2);
- *eeprom_ch_info = (struct iwl_eeprom_channel *)
- iwl_legacy_eeprom_query_addr(priv, offset);
- *eeprom_ch_index = iwlegacy_eeprom_band_2;
- break;
- case 3: /* 5.2GHz band */
- *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_3);
- *eeprom_ch_info = (struct iwl_eeprom_channel *)
- iwl_legacy_eeprom_query_addr(priv, offset);
- *eeprom_ch_index = iwlegacy_eeprom_band_3;
- break;
- case 4: /* 5.5GHz band */
- *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_4);
- *eeprom_ch_info = (struct iwl_eeprom_channel *)
- iwl_legacy_eeprom_query_addr(priv, offset);
- *eeprom_ch_index = iwlegacy_eeprom_band_4;
- break;
- case 5: /* 5.7GHz band */
- *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_5);
- *eeprom_ch_info = (struct iwl_eeprom_channel *)
- iwl_legacy_eeprom_query_addr(priv, offset);
- *eeprom_ch_index = iwlegacy_eeprom_band_5;
- break;
- case 6: /* 2.4GHz ht40 channels */
- *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_6);
- *eeprom_ch_info = (struct iwl_eeprom_channel *)
- iwl_legacy_eeprom_query_addr(priv, offset);
- *eeprom_ch_index = iwlegacy_eeprom_band_6;
- break;
- case 7: /* 5 GHz ht40 channels */
- *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_7);
- *eeprom_ch_info = (struct iwl_eeprom_channel *)
- iwl_legacy_eeprom_query_addr(priv, offset);
- *eeprom_ch_index = iwlegacy_eeprom_band_7;
- break;
- default:
- BUG();
- }
-}
-
-#define CHECK_AND_PRINT(x) ((eeprom_ch->flags & EEPROM_CHANNEL_##x) \
- ? # x " " : "")
-/**
- * iwl_legacy_mod_ht40_chan_info - Copy ht40 channel info into driver's priv.
- *
- * Does not set up a command, or touch hardware.
- */
-static int iwl_legacy_mod_ht40_chan_info(struct iwl_priv *priv,
- enum ieee80211_band band, u16 channel,
- const struct iwl_eeprom_channel *eeprom_ch,
- u8 clear_ht40_extension_channel)
-{
- struct iwl_channel_info *ch_info;
-
- ch_info = (struct iwl_channel_info *)
- iwl_legacy_get_channel_info(priv, band, channel);
-
- if (!iwl_legacy_is_channel_valid(ch_info))
- return -1;
-
- IWL_DEBUG_EEPROM(priv, "HT40 Ch. %d [%sGHz] %s%s%s%s%s(0x%02x %ddBm):"
- " Ad-Hoc %ssupported\n",
- ch_info->channel,
- iwl_legacy_is_channel_a_band(ch_info) ?
- "5.2" : "2.4",
- CHECK_AND_PRINT(IBSS),
- CHECK_AND_PRINT(ACTIVE),
- CHECK_AND_PRINT(RADAR),
- CHECK_AND_PRINT(WIDE),
- CHECK_AND_PRINT(DFS),
- eeprom_ch->flags,
- eeprom_ch->max_power_avg,
- ((eeprom_ch->flags & EEPROM_CHANNEL_IBSS)
- && !(eeprom_ch->flags & EEPROM_CHANNEL_RADAR)) ?
- "" : "not ");
-
- ch_info->ht40_eeprom = *eeprom_ch;
- ch_info->ht40_max_power_avg = eeprom_ch->max_power_avg;
- ch_info->ht40_flags = eeprom_ch->flags;
- if (eeprom_ch->flags & EEPROM_CHANNEL_VALID)
- ch_info->ht40_extension_channel &=
- ~clear_ht40_extension_channel;
-
- return 0;
-}
-
-#define CHECK_AND_PRINT_I(x) ((eeprom_ch_info[ch].flags & EEPROM_CHANNEL_##x) \
- ? # x " " : "")
-
-/**
- * iwl_legacy_init_channel_map - Set up driver's info for all possible channels
- */
-int iwl_legacy_init_channel_map(struct iwl_priv *priv)
-{
- int eeprom_ch_count = 0;
- const u8 *eeprom_ch_index = NULL;
- const struct iwl_eeprom_channel *eeprom_ch_info = NULL;
- int band, ch;
- struct iwl_channel_info *ch_info;
-
- if (priv->channel_count) {
- IWL_DEBUG_EEPROM(priv, "Channel map already initialized.\n");
- return 0;
- }
-
- IWL_DEBUG_EEPROM(priv, "Initializing regulatory info from EEPROM\n");
-
- priv->channel_count =
- ARRAY_SIZE(iwlegacy_eeprom_band_1) +
- ARRAY_SIZE(iwlegacy_eeprom_band_2) +
- ARRAY_SIZE(iwlegacy_eeprom_band_3) +
- ARRAY_SIZE(iwlegacy_eeprom_band_4) +
- ARRAY_SIZE(iwlegacy_eeprom_band_5);
-
- IWL_DEBUG_EEPROM(priv, "Parsing data for %d channels.\n",
- priv->channel_count);
-
- priv->channel_info = kzalloc(sizeof(struct iwl_channel_info) *
- priv->channel_count, GFP_KERNEL);
- if (!priv->channel_info) {
- IWL_ERR(priv, "Could not allocate channel_info\n");
- priv->channel_count = 0;
- return -ENOMEM;
- }
-
- ch_info = priv->channel_info;
-
- /* Loop through the 5 EEPROM bands adding them in order to the
- * channel map we maintain (that contains additional information than
- * what just in the EEPROM) */
- for (band = 1; band <= 5; band++) {
-
- iwl_legacy_init_band_reference(priv, band, &eeprom_ch_count,
- &eeprom_ch_info, &eeprom_ch_index);
-
- /* Loop through each band adding each of the channels */
- for (ch = 0; ch < eeprom_ch_count; ch++) {
- ch_info->channel = eeprom_ch_index[ch];
- ch_info->band = (band == 1) ? IEEE80211_BAND_2GHZ :
- IEEE80211_BAND_5GHZ;
-
- /* permanently store EEPROM's channel regulatory flags
- * and max power in channel info database. */
- ch_info->eeprom = eeprom_ch_info[ch];
-
- /* Copy the run-time flags so they are there even on
- * invalid channels */
- ch_info->flags = eeprom_ch_info[ch].flags;
- /* First write that ht40 is not enabled, and then enable
- * one by one */
- ch_info->ht40_extension_channel =
- IEEE80211_CHAN_NO_HT40;
-
- if (!(iwl_legacy_is_channel_valid(ch_info))) {
- IWL_DEBUG_EEPROM(priv,
- "Ch. %d Flags %x [%sGHz] - "
- "No traffic\n",
- ch_info->channel,
- ch_info->flags,
- iwl_legacy_is_channel_a_band(ch_info) ?
- "5.2" : "2.4");
- ch_info++;
- continue;
- }
-
- /* Initialize regulatory-based run-time data */
- ch_info->max_power_avg = ch_info->curr_txpow =
- eeprom_ch_info[ch].max_power_avg;
- ch_info->scan_power = eeprom_ch_info[ch].max_power_avg;
- ch_info->min_power = 0;
-
- IWL_DEBUG_EEPROM(priv, "Ch. %d [%sGHz] "
- "%s%s%s%s%s%s(0x%02x %ddBm):"
- " Ad-Hoc %ssupported\n",
- ch_info->channel,
- iwl_legacy_is_channel_a_band(ch_info) ?
- "5.2" : "2.4",
- CHECK_AND_PRINT_I(VALID),
- CHECK_AND_PRINT_I(IBSS),
- CHECK_AND_PRINT_I(ACTIVE),
- CHECK_AND_PRINT_I(RADAR),
- CHECK_AND_PRINT_I(WIDE),
- CHECK_AND_PRINT_I(DFS),
- eeprom_ch_info[ch].flags,
- eeprom_ch_info[ch].max_power_avg,
- ((eeprom_ch_info[ch].
- flags & EEPROM_CHANNEL_IBSS)
- && !(eeprom_ch_info[ch].
- flags & EEPROM_CHANNEL_RADAR))
- ? "" : "not ");
-
- ch_info++;
- }
- }
-
- /* Check if we do have HT40 channels */
- if (priv->cfg->ops->lib->eeprom_ops.regulatory_bands[5] ==
- EEPROM_REGULATORY_BAND_NO_HT40 &&
- priv->cfg->ops->lib->eeprom_ops.regulatory_bands[6] ==
- EEPROM_REGULATORY_BAND_NO_HT40)
- return 0;
-
- /* Two additional EEPROM bands for 2.4 and 5 GHz HT40 channels */
- for (band = 6; band <= 7; band++) {
- enum ieee80211_band ieeeband;
-
- iwl_legacy_init_band_reference(priv, band, &eeprom_ch_count,
- &eeprom_ch_info, &eeprom_ch_index);
-
- /* EEPROM band 6 is 2.4, band 7 is 5 GHz */
- ieeeband =
- (band == 6) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
-
- /* Loop through each band adding each of the channels */
- for (ch = 0; ch < eeprom_ch_count; ch++) {
- /* Set up driver's info for lower half */
- iwl_legacy_mod_ht40_chan_info(priv, ieeeband,
- eeprom_ch_index[ch],
- &eeprom_ch_info[ch],
- IEEE80211_CHAN_NO_HT40PLUS);
-
- /* Set up driver's info for upper half */
- iwl_legacy_mod_ht40_chan_info(priv, ieeeband,
- eeprom_ch_index[ch] + 4,
- &eeprom_ch_info[ch],
- IEEE80211_CHAN_NO_HT40MINUS);
- }
- }
-
- return 0;
-}
-EXPORT_SYMBOL(iwl_legacy_init_channel_map);
-
-/*
- * iwl_legacy_free_channel_map - undo allocations in iwl_legacy_init_channel_map
- */
-void iwl_legacy_free_channel_map(struct iwl_priv *priv)
-{
- kfree(priv->channel_info);
- priv->channel_count = 0;
-}
-EXPORT_SYMBOL(iwl_legacy_free_channel_map);
-
-/**
- * iwl_legacy_get_channel_info - Find driver's private channel info
- *
- * Based on band and channel number.
- */
-const struct
-iwl_channel_info *iwl_legacy_get_channel_info(const struct iwl_priv *priv,
- enum ieee80211_band band, u16 channel)
-{
- int i;
-
- switch (band) {
- case IEEE80211_BAND_5GHZ:
- for (i = 14; i < priv->channel_count; i++) {
- if (priv->channel_info[i].channel == channel)
- return &priv->channel_info[i];
- }
- break;
- case IEEE80211_BAND_2GHZ:
- if (channel >= 1 && channel <= 14)
- return &priv->channel_info[channel - 1];
- break;
- default:
- BUG();
- }
-
- return NULL;
-}
-EXPORT_SYMBOL(iwl_legacy_get_channel_info);
diff --git a/drivers/net/wireless/iwlegacy/iwl-eeprom.h b/drivers/net/wireless/iwlegacy/iwl-eeprom.h
deleted file mode 100644
index c59c8100202..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-eeprom.h
+++ /dev/null
@@ -1,344 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *****************************************************************************/
-
-#ifndef __iwl_legacy_eeprom_h__
-#define __iwl_legacy_eeprom_h__
-
-#include <net/mac80211.h>
-
-struct iwl_priv;
-
-/*
- * EEPROM access time values:
- *
- * Driver initiates EEPROM read by writing byte address << 1 to CSR_EEPROM_REG.
- * Driver then polls CSR_EEPROM_REG for CSR_EEPROM_REG_READ_VALID_MSK (0x1).
- * When polling, wait 10 uSec between polling loops, up to a maximum 5000 uSec.
- * Driver reads 16-bit value from bits 31-16 of CSR_EEPROM_REG.
- */
-#define IWL_EEPROM_ACCESS_TIMEOUT 5000 /* uSec */
-
-#define IWL_EEPROM_SEM_TIMEOUT 10 /* microseconds */
-#define IWL_EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
-
-
-/*
- * Regulatory channel usage flags in EEPROM struct iwl4965_eeprom_channel.flags.
- *
- * IBSS and/or AP operation is allowed *only* on those channels with
- * (VALID && IBSS && ACTIVE && !RADAR). This restriction is in place because
- * RADAR detection is not supported by the 4965 driver, but is a
- * requirement for establishing a new network for legal operation on channels
- * requiring RADAR detection or restricting ACTIVE scanning.
- *
- * NOTE: "WIDE" flag does not indicate anything about "HT40" 40 MHz channels.
- * It only indicates that 20 MHz channel use is supported; HT40 channel
- * usage is indicated by a separate set of regulatory flags for each
- * HT40 channel pair.
- *
- * NOTE: Using a channel inappropriately will result in a uCode error!
- */
-#define IWL_NUM_TX_CALIB_GROUPS 5
-enum {
- EEPROM_CHANNEL_VALID = (1 << 0), /* usable for this SKU/geo */
- EEPROM_CHANNEL_IBSS = (1 << 1), /* usable as an IBSS channel */
- /* Bit 2 Reserved */
- EEPROM_CHANNEL_ACTIVE = (1 << 3), /* active scanning allowed */
- EEPROM_CHANNEL_RADAR = (1 << 4), /* radar detection required */
- EEPROM_CHANNEL_WIDE = (1 << 5), /* 20 MHz channel okay */
- /* Bit 6 Reserved (was Narrow Channel) */
- EEPROM_CHANNEL_DFS = (1 << 7), /* dynamic freq selection candidate */
-};
-
-/* SKU Capabilities */
-/* 3945 only */
-#define EEPROM_SKU_CAP_SW_RF_KILL_ENABLE (1 << 0)
-#define EEPROM_SKU_CAP_HW_RF_KILL_ENABLE (1 << 1)
-
-/* *regulatory* channel data format in eeprom, one for each channel.
- * There are separate entries for HT40 (40 MHz) vs. normal (20 MHz) channels. */
-struct iwl_eeprom_channel {
- u8 flags; /* EEPROM_CHANNEL_* flags copied from EEPROM */
- s8 max_power_avg; /* max power (dBm) on this chnl, limit 31 */
-} __packed;
-
-/* 3945 Specific */
-#define EEPROM_3945_EEPROM_VERSION (0x2f)
-
-/* 4965 has two radio transmitters (and 3 radio receivers) */
-#define EEPROM_TX_POWER_TX_CHAINS (2)
-
-/* 4965 has room for up to 8 sets of txpower calibration data */
-#define EEPROM_TX_POWER_BANDS (8)
-
-/* 4965 factory calibration measures txpower gain settings for
- * each of 3 target output levels */
-#define EEPROM_TX_POWER_MEASUREMENTS (3)
-
-/* 4965 Specific */
-/* 4965 driver does not work with txpower calibration version < 5 */
-#define EEPROM_4965_TX_POWER_VERSION (5)
-#define EEPROM_4965_EEPROM_VERSION (0x2f)
-#define EEPROM_4965_CALIB_VERSION_OFFSET (2*0xB6) /* 2 bytes */
-#define EEPROM_4965_CALIB_TXPOWER_OFFSET (2*0xE8) /* 48 bytes */
-#define EEPROM_4965_BOARD_REVISION (2*0x4F) /* 2 bytes */
-#define EEPROM_4965_BOARD_PBA (2*0x56+1) /* 9 bytes */
-
-/* 2.4 GHz */
-extern const u8 iwlegacy_eeprom_band_1[14];
-
-/*
- * factory calibration data for one txpower level, on one channel,
- * measured on one of the 2 tx chains (radio transmitter and associated
- * antenna). EEPROM contains:
- *
- * 1) Temperature (degrees Celsius) of device when measurement was made.
- *
- * 2) Gain table index used to achieve the target measurement power.
- * This refers to the "well-known" gain tables (see iwl-4965-hw.h).
- *
- * 3) Actual measured output power, in half-dBm ("34" = 17 dBm).
- *
- * 4) RF power amplifier detector level measurement (not used).
- */
-struct iwl_eeprom_calib_measure {
- u8 temperature; /* Device temperature (Celsius) */
- u8 gain_idx; /* Index into gain table */
- u8 actual_pow; /* Measured RF output power, half-dBm */
- s8 pa_det; /* Power amp detector level (not used) */
-} __packed;
-
-
-/*
- * measurement set for one channel. EEPROM contains:
- *
- * 1) Channel number measured
- *
- * 2) Measurements for each of 3 power levels for each of 2 radio transmitters
- * (a.k.a. "tx chains") (6 measurements altogether)
- */
-struct iwl_eeprom_calib_ch_info {
- u8 ch_num;
- struct iwl_eeprom_calib_measure
- measurements[EEPROM_TX_POWER_TX_CHAINS]
- [EEPROM_TX_POWER_MEASUREMENTS];
-} __packed;
-
-/*
- * txpower subband info.
- *
- * For each frequency subband, EEPROM contains the following:
- *
- * 1) First and last channels within range of the subband. "0" values
- * indicate that this sample set is not being used.
- *
- * 2) Sample measurement sets for 2 channels close to the range endpoints.
- */
-struct iwl_eeprom_calib_subband_info {
- u8 ch_from; /* channel number of lowest channel in subband */
- u8 ch_to; /* channel number of highest channel in subband */
- struct iwl_eeprom_calib_ch_info ch1;
- struct iwl_eeprom_calib_ch_info ch2;
-} __packed;
-
-
-/*
- * txpower calibration info. EEPROM contains:
- *
- * 1) Factory-measured saturation power levels (maximum levels at which
- * tx power amplifier can output a signal without too much distortion).
- * There is one level for 2.4 GHz band and one for 5 GHz band. These
- * values apply to all channels within each of the bands.
- *
- * 2) Factory-measured power supply voltage level. This is assumed to be
- * constant (i.e. same value applies to all channels/bands) while the
- * factory measurements are being made.
- *
- * 3) Up to 8 sets of factory-measured txpower calibration values.
- * These are for different frequency ranges, since txpower gain
- * characteristics of the analog radio circuitry vary with frequency.
- *
- * Not all sets need to be filled with data;
- * struct iwl_eeprom_calib_subband_info contains range of channels
- * (0 if unused) for each set of data.
- */
-struct iwl_eeprom_calib_info {
- u8 saturation_power24; /* half-dBm (e.g. "34" = 17 dBm) */
- u8 saturation_power52; /* half-dBm */
- __le16 voltage; /* signed */
- struct iwl_eeprom_calib_subband_info
- band_info[EEPROM_TX_POWER_BANDS];
-} __packed;
-
-
-/* General */
-#define EEPROM_DEVICE_ID (2*0x08) /* 2 bytes */
-#define EEPROM_MAC_ADDRESS (2*0x15) /* 6 bytes */
-#define EEPROM_BOARD_REVISION (2*0x35) /* 2 bytes */
-#define EEPROM_BOARD_PBA_NUMBER (2*0x3B+1) /* 9 bytes */
-#define EEPROM_VERSION (2*0x44) /* 2 bytes */
-#define EEPROM_SKU_CAP (2*0x45) /* 2 bytes */
-#define EEPROM_OEM_MODE (2*0x46) /* 2 bytes */
-#define EEPROM_WOWLAN_MODE (2*0x47) /* 2 bytes */
-#define EEPROM_RADIO_CONFIG (2*0x48) /* 2 bytes */
-#define EEPROM_NUM_MAC_ADDRESS (2*0x4C) /* 2 bytes */
-
-/* The following masks are to be applied on EEPROM_RADIO_CONFIG */
-#define EEPROM_RF_CFG_TYPE_MSK(x) (x & 0x3) /* bits 0-1 */
-#define EEPROM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */
-#define EEPROM_RF_CFG_DASH_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */
-#define EEPROM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */
-#define EEPROM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */
-#define EEPROM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
-
-#define EEPROM_3945_RF_CFG_TYPE_MAX 0x0
-#define EEPROM_4965_RF_CFG_TYPE_MAX 0x1
-
-/*
- * Per-channel regulatory data.
- *
- * Each channel that *might* be supported by iwl has a fixed location
- * in EEPROM containing EEPROM_CHANNEL_* usage flags (LSB) and max regulatory
- * txpower (MSB).
- *
- * Entries immediately below are for 20 MHz channel width. HT40 (40 MHz)
- * channels (only for 4965, not supported by 3945) appear later in the EEPROM.
- *
- * 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
- */
-#define EEPROM_REGULATORY_SKU_ID (2*0x60) /* 4 bytes */
-#define EEPROM_REGULATORY_BAND_1 (2*0x62) /* 2 bytes */
-#define EEPROM_REGULATORY_BAND_1_CHANNELS (2*0x63) /* 28 bytes */
-
-/*
- * 4.9 GHz channels 183, 184, 185, 187, 188, 189, 192, 196,
- * 5.0 GHz channels 7, 8, 11, 12, 16
- * (4915-5080MHz) (none of these is ever supported)
- */
-#define EEPROM_REGULATORY_BAND_2 (2*0x71) /* 2 bytes */
-#define EEPROM_REGULATORY_BAND_2_CHANNELS (2*0x72) /* 26 bytes */
-
-/*
- * 5.2 GHz channels 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
- * (5170-5320MHz)
- */
-#define EEPROM_REGULATORY_BAND_3 (2*0x7F) /* 2 bytes */
-#define EEPROM_REGULATORY_BAND_3_CHANNELS (2*0x80) /* 24 bytes */
-
-/*
- * 5.5 GHz channels 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
- * (5500-5700MHz)
- */
-#define EEPROM_REGULATORY_BAND_4 (2*0x8C) /* 2 bytes */
-#define EEPROM_REGULATORY_BAND_4_CHANNELS (2*0x8D) /* 22 bytes */
-
-/*
- * 5.7 GHz channels 145, 149, 153, 157, 161, 165
- * (5725-5825MHz)
- */
-#define EEPROM_REGULATORY_BAND_5 (2*0x98) /* 2 bytes */
-#define EEPROM_REGULATORY_BAND_5_CHANNELS (2*0x99) /* 12 bytes */
-
-/*
- * 2.4 GHz HT40 channels 1 (5), 2 (6), 3 (7), 4 (8), 5 (9), 6 (10), 7 (11)
- *
- * The channel listed is the center of the lower 20 MHz half of the channel.
- * The overall center frequency is actually 2 channels (10 MHz) above that,
- * and the upper half of each HT40 channel is centered 4 channels (20 MHz) away
- * from the lower half; e.g. the upper half of HT40 channel 1 is channel 5,
- * and the overall HT40 channel width centers on channel 3.
- *
- * NOTE: The RXON command uses 20 MHz channel numbers to specify the
- * control channel to which to tune. RXON also specifies whether the
- * control channel is the upper or lower half of a HT40 channel.
- *
- * NOTE: 4965 does not support HT40 channels on 2.4 GHz.
- */
-#define EEPROM_4965_REGULATORY_BAND_24_HT40_CHANNELS (2*0xA0) /* 14 bytes */
-
-/*
- * 5.2 GHz HT40 channels 36 (40), 44 (48), 52 (56), 60 (64),
- * 100 (104), 108 (112), 116 (120), 124 (128), 132 (136), 149 (153), 157 (161)
- */
-#define EEPROM_4965_REGULATORY_BAND_52_HT40_CHANNELS (2*0xA8) /* 22 bytes */
-
-#define EEPROM_REGULATORY_BAND_NO_HT40 (0)
-
-struct iwl_eeprom_ops {
- const u32 regulatory_bands[7];
- int (*acquire_semaphore) (struct iwl_priv *priv);
- void (*release_semaphore) (struct iwl_priv *priv);
-};
-
-
-int iwl_legacy_eeprom_init(struct iwl_priv *priv);
-void iwl_legacy_eeprom_free(struct iwl_priv *priv);
-const u8 *iwl_legacy_eeprom_query_addr(const struct iwl_priv *priv,
- size_t offset);
-u16 iwl_legacy_eeprom_query16(const struct iwl_priv *priv, size_t offset);
-int iwl_legacy_init_channel_map(struct iwl_priv *priv);
-void iwl_legacy_free_channel_map(struct iwl_priv *priv);
-const struct iwl_channel_info *iwl_legacy_get_channel_info(
- const struct iwl_priv *priv,
- enum ieee80211_band band, u16 channel);
-
-#endif /* __iwl_legacy_eeprom_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-fh.h b/drivers/net/wireless/iwlegacy/iwl-fh.h
deleted file mode 100644
index 6e6091816e3..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-fh.h
+++ /dev/null
@@ -1,513 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *****************************************************************************/
-#ifndef __iwl_legacy_fh_h__
-#define __iwl_legacy_fh_h__
-
-/****************************/
-/* Flow Handler Definitions */
-/****************************/
-
-/**
- * This I/O area is directly read/writable by driver (e.g. Linux uses writel())
- * Addresses are offsets from device's PCI hardware base address.
- */
-#define FH_MEM_LOWER_BOUND (0x1000)
-#define FH_MEM_UPPER_BOUND (0x2000)
-
-/**
- * Keep-Warm (KW) buffer base address.
- *
- * Driver must allocate a 4KByte buffer that is used by 4965 for keeping the
- * host DRAM powered on (via dummy accesses to DRAM) to maintain low-latency
- * DRAM access when 4965 is Txing or Rxing. The dummy accesses prevent host
- * from going into a power-savings mode that would cause higher DRAM latency,
- * and possible data over/under-runs, before all Tx/Rx is complete.
- *
- * Driver loads FH_KW_MEM_ADDR_REG with the physical address (bits 35:4)
- * of the buffer, which must be 4K aligned. Once this is set up, the 4965
- * automatically invokes keep-warm accesses when normal accesses might not
- * be sufficient to maintain fast DRAM response.
- *
- * Bit fields:
- * 31-0: Keep-warm buffer physical base address [35:4], must be 4K aligned
- */
-#define FH_KW_MEM_ADDR_REG (FH_MEM_LOWER_BOUND + 0x97C)
-
-
-/**
- * TFD Circular Buffers Base (CBBC) addresses
- *
- * 4965 has 16 base pointer registers, one for each of 16 host-DRAM-resident
- * circular buffers (CBs/queues) containing Transmit Frame Descriptors (TFDs)
- * (see struct iwl_tfd_frame). These 16 pointer registers are offset by 0x04
- * bytes from one another. Each TFD circular buffer in DRAM must be 256-byte
- * aligned (address bits 0-7 must be 0).
- *
- * Bit fields in each pointer register:
- * 27-0: TFD CB physical base address [35:8], must be 256-byte aligned
- */
-#define FH_MEM_CBBC_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x9D0)
-#define FH_MEM_CBBC_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xA10)
-
-/* Find TFD CB base pointer for given queue (range 0-15). */
-#define FH_MEM_CBBC_QUEUE(x) (FH_MEM_CBBC_LOWER_BOUND + (x) * 0x4)
-
-
-/**
- * Rx SRAM Control and Status Registers (RSCSR)
- *
- * These registers provide handshake between driver and 4965 for the Rx queue
- * (this queue handles *all* command responses, notifications, Rx data, etc.
- * sent from 4965 uCode to host driver). Unlike Tx, there is only one Rx
- * queue, and only one Rx DMA/FIFO channel. Also unlike Tx, which can
- * concatenate up to 20 DRAM buffers to form a Tx frame, each Receive Buffer
- * Descriptor (RBD) points to only one Rx Buffer (RB); there is a 1:1
- * mapping between RBDs and RBs.
- *
- * Driver must allocate host DRAM memory for the following, and set the
- * physical address of each into 4965 registers:
- *
- * 1) Receive Buffer Descriptor (RBD) circular buffer (CB), typically with 256
- * entries (although any power of 2, up to 4096, is selectable by driver).
- * Each entry (1 dword) points to a receive buffer (RB) of consistent size
- * (typically 4K, although 8K or 16K are also selectable by driver).
- * Driver sets up RB size and number of RBDs in the CB via Rx config
- * register FH_MEM_RCSR_CHNL0_CONFIG_REG.
- *
- * Bit fields within one RBD:
- * 27-0: Receive Buffer physical address bits [35:8], 256-byte aligned
- *
- * Driver sets physical address [35:8] of base of RBD circular buffer
- * into FH_RSCSR_CHNL0_RBDCB_BASE_REG [27:0].
- *
- * 2) Rx status buffer, 8 bytes, in which 4965 indicates which Rx Buffers
- * (RBs) have been filled, via a "write pointer", actually the index of
- * the RB's corresponding RBD within the circular buffer. Driver sets
- * physical address [35:4] into FH_RSCSR_CHNL0_STTS_WPTR_REG [31:0].
- *
- * Bit fields in lower dword of Rx status buffer (upper dword not used
- * by driver; see struct iwl4965_shared, val0):
- * 31-12: Not used by driver
- * 11- 0: Index of last filled Rx buffer descriptor
- * (4965 writes, driver reads this value)
- *
- * As the driver prepares Receive Buffers (RBs) for 4965 to fill, driver must
- * enter pointers to these RBs into contiguous RBD circular buffer entries,
- * and update the 4965's "write" index register,
- * FH_RSCSR_CHNL0_RBDCB_WPTR_REG.
- *
- * This "write" index corresponds to the *next* RBD that the driver will make
- * available, i.e. one RBD past the tail of the ready-to-fill RBDs within
- * the circular buffer. This value should initially be 0 (before preparing any
- * RBs), should be 8 after preparing the first 8 RBs (for example), and must
- * wrap back to 0 at the end of the circular buffer (but don't wrap before
- * "read" index has advanced past 1! See below).
- * NOTE: 4965 EXPECTS THE WRITE INDEX TO BE INCREMENTED IN MULTIPLES OF 8.
- *
- * As the 4965 fills RBs (referenced from contiguous RBDs within the circular
- * buffer), it updates the Rx status buffer in host DRAM, 2) described above,
- * to tell the driver the index of the latest filled RBD. The driver must
- * read this "read" index from DRAM after receiving an Rx interrupt from 4965.
- *
- * The driver must also internally keep track of a third index, which is the
- * next RBD to process. When receiving an Rx interrupt, driver should process
- * all filled but unprocessed RBs up to, but not including, the RB
- * corresponding to the "read" index. For example, if "read" index becomes "1",
- * driver may process the RB pointed to by RBD 0. Depending on volume of
- * traffic, there may be many RBs to process.
- *
- * If read index == write index, 4965 thinks there is no room to put new data.
- * Due to this, the maximum number of filled RBs is 255, instead of 256. To
- * be safe, make sure that there is a gap of at least 2 RBDs between "write"
- * and "read" indexes; that is, make sure that there are no more than 254
- * buffers waiting to be filled.
- */
-#define FH_MEM_RSCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xBC0)
-#define FH_MEM_RSCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xC00)
-#define FH_MEM_RSCSR_CHNL0 (FH_MEM_RSCSR_LOWER_BOUND)
-
-/**
- * Physical base address of 8-byte Rx Status buffer.
- * Bit fields:
- * 31-0: Rx status buffer physical base address [35:4], must 16-byte aligned.
- */
-#define FH_RSCSR_CHNL0_STTS_WPTR_REG (FH_MEM_RSCSR_CHNL0)
-
-/**
- * Physical base address of Rx Buffer Descriptor Circular Buffer.
- * Bit fields:
- * 27-0: RBD CD physical base address [35:8], must be 256-byte aligned.
- */
-#define FH_RSCSR_CHNL0_RBDCB_BASE_REG (FH_MEM_RSCSR_CHNL0 + 0x004)
-
-/**
- * Rx write pointer (index, really!).
- * Bit fields:
- * 11-0: Index of driver's most recent prepared-to-be-filled RBD, + 1.
- * NOTE: For 256-entry circular buffer, use only bits [7:0].
- */
-#define FH_RSCSR_CHNL0_RBDCB_WPTR_REG (FH_MEM_RSCSR_CHNL0 + 0x008)
-#define FH_RSCSR_CHNL0_WPTR (FH_RSCSR_CHNL0_RBDCB_WPTR_REG)
-
-
-/**
- * Rx Config/Status Registers (RCSR)
- * Rx Config Reg for channel 0 (only channel used)
- *
- * Driver must initialize FH_MEM_RCSR_CHNL0_CONFIG_REG as follows for
- * normal operation (see bit fields).
- *
- * Clearing FH_MEM_RCSR_CHNL0_CONFIG_REG to 0 turns off Rx DMA.
- * Driver should poll FH_MEM_RSSR_RX_STATUS_REG for
- * FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (bit 24) before continuing.
- *
- * Bit fields:
- * 31-30: Rx DMA channel enable: '00' off/pause, '01' pause at end of frame,
- * '10' operate normally
- * 29-24: reserved
- * 23-20: # RBDs in circular buffer = 2^value; use "8" for 256 RBDs (normal),
- * min "5" for 32 RBDs, max "12" for 4096 RBDs.
- * 19-18: reserved
- * 17-16: size of each receive buffer; '00' 4K (normal), '01' 8K,
- * '10' 12K, '11' 16K.
- * 15-14: reserved
- * 13-12: IRQ destination; '00' none, '01' host driver (normal operation)
- * 11- 4: timeout for closing Rx buffer and interrupting host (units 32 usec)
- * typical value 0x10 (about 1/2 msec)
- * 3- 0: reserved
- */
-#define FH_MEM_RCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xC00)
-#define FH_MEM_RCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xCC0)
-#define FH_MEM_RCSR_CHNL0 (FH_MEM_RCSR_LOWER_BOUND)
-
-#define FH_MEM_RCSR_CHNL0_CONFIG_REG (FH_MEM_RCSR_CHNL0)
-
-#define FH_RCSR_CHNL0_RX_CONFIG_RB_TIMEOUT_MSK (0x00000FF0) /* bits 4-11 */
-#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_MSK (0x00001000) /* bits 12 */
-#define FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK (0x00008000) /* bit 15 */
-#define FH_RCSR_CHNL0_RX_CONFIG_RB_SIZE_MSK (0x00030000) /* bits 16-17 */
-#define FH_RCSR_CHNL0_RX_CONFIG_RBDBC_SIZE_MSK (0x00F00000) /* bits 20-23 */
-#define FH_RCSR_CHNL0_RX_CONFIG_DMA_CHNL_EN_MSK (0xC0000000) /* bits 30-31*/
-
-#define FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS (20)
-#define FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS (4)
-#define RX_RB_TIMEOUT (0x10)
-
-#define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_VAL (0x00000000)
-#define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_EOF_VAL (0x40000000)
-#define FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL (0x80000000)
-
-#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K (0x00000000)
-#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K (0x00010000)
-#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K (0x00020000)
-#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_16K (0x00030000)
-
-#define FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY (0x00000004)
-#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_NO_INT_VAL (0x00000000)
-#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL (0x00001000)
-
-#define FH_RSCSR_FRAME_SIZE_MSK (0x00003FFF) /* bits 0-13 */
-
-/**
- * Rx Shared Status Registers (RSSR)
- *
- * After stopping Rx DMA channel (writing 0 to
- * FH_MEM_RCSR_CHNL0_CONFIG_REG), driver must poll
- * FH_MEM_RSSR_RX_STATUS_REG until Rx channel is idle.
- *
- * Bit fields:
- * 24: 1 = Channel 0 is idle
- *
- * FH_MEM_RSSR_SHARED_CTRL_REG and FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV
- * contain default values that should not be altered by the driver.
- */
-#define FH_MEM_RSSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xC40)
-#define FH_MEM_RSSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xD00)
-
-#define FH_MEM_RSSR_SHARED_CTRL_REG (FH_MEM_RSSR_LOWER_BOUND)
-#define FH_MEM_RSSR_RX_STATUS_REG (FH_MEM_RSSR_LOWER_BOUND + 0x004)
-#define FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV\
- (FH_MEM_RSSR_LOWER_BOUND + 0x008)
-
-#define FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (0x01000000)
-
-#define FH_MEM_TFDIB_REG1_ADDR_BITSHIFT 28
-
-/* TFDB Area - TFDs buffer table */
-#define FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK (0xFFFFFFFF)
-#define FH_TFDIB_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x900)
-#define FH_TFDIB_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0x958)
-#define FH_TFDIB_CTRL0_REG(_chnl) (FH_TFDIB_LOWER_BOUND + 0x8 * (_chnl))
-#define FH_TFDIB_CTRL1_REG(_chnl) (FH_TFDIB_LOWER_BOUND + 0x8 * (_chnl) + 0x4)
-
-/**
- * Transmit DMA Channel Control/Status Registers (TCSR)
- *
- * 4965 has one configuration register for each of 8 Tx DMA/FIFO channels
- * supported in hardware (don't confuse these with the 16 Tx queues in DRAM,
- * which feed the DMA/FIFO channels); config regs are separated by 0x20 bytes.
- *
- * To use a Tx DMA channel, driver must initialize its
- * FH_TCSR_CHNL_TX_CONFIG_REG(chnl) with:
- *
- * FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
- * FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL
- *
- * All other bits should be 0.
- *
- * Bit fields:
- * 31-30: Tx DMA channel enable: '00' off/pause, '01' pause at end of frame,
- * '10' operate normally
- * 29- 4: Reserved, set to "0"
- * 3: Enable internal DMA requests (1, normal operation), disable (0)
- * 2- 0: Reserved, set to "0"
- */
-#define FH_TCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xD00)
-#define FH_TCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xE60)
-
-/* Find Control/Status reg for given Tx DMA/FIFO channel */
-#define FH49_TCSR_CHNL_NUM (7)
-#define FH50_TCSR_CHNL_NUM (8)
-
-/* TCSR: tx_config register values */
-#define FH_TCSR_CHNL_TX_CONFIG_REG(_chnl) \
- (FH_TCSR_LOWER_BOUND + 0x20 * (_chnl))
-#define FH_TCSR_CHNL_TX_CREDIT_REG(_chnl) \
- (FH_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x4)
-#define FH_TCSR_CHNL_TX_BUF_STS_REG(_chnl) \
- (FH_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x8)
-
-#define FH_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF (0x00000000)
-#define FH_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_DRV (0x00000001)
-
-#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE (0x00000000)
-#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE (0x00000008)
-
-#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_NOINT (0x00000000)
-#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD (0x00100000)
-#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD (0x00200000)
-
-#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT (0x00000000)
-#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_ENDTFD (0x00400000)
-#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_IFTFD (0x00800000)
-
-#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE (0x00000000)
-#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE_EOF (0x40000000)
-#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE (0x80000000)
-
-#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_EMPTY (0x00000000)
-#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_WAIT (0x00002000)
-#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID (0x00000003)
-
-#define FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM (20)
-#define FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX (12)
-
-/**
- * Tx Shared Status Registers (TSSR)
- *
- * After stopping Tx DMA channel (writing 0 to
- * FH_TCSR_CHNL_TX_CONFIG_REG(chnl)), driver must poll
- * FH_TSSR_TX_STATUS_REG until selected Tx channel is idle
- * (channel's buffers empty | no pending requests).
- *
- * Bit fields:
- * 31-24: 1 = Channel buffers empty (channel 7:0)
- * 23-16: 1 = No pending requests (channel 7:0)
- */
-#define FH_TSSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xEA0)
-#define FH_TSSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xEC0)
-
-#define FH_TSSR_TX_STATUS_REG (FH_TSSR_LOWER_BOUND + 0x010)
-
-/**
- * Bit fields for TSSR(Tx Shared Status & Control) error status register:
- * 31: Indicates an address error when accessed to internal memory
- * uCode/driver must write "1" in order to clear this flag
- * 30: Indicates that Host did not send the expected number of dwords to FH
- * uCode/driver must write "1" in order to clear this flag
- * 16-9:Each status bit is for one channel. Indicates that an (Error) ActDMA
- * command was received from the scheduler while the TRB was already full
- * with previous command
- * uCode/driver must write "1" in order to clear this flag
- * 7-0: Each status bit indicates a channel's TxCredit error. When an error
- * bit is set, it indicates that the FH has received a full indication
- * from the RTC TxFIFO and the current value of the TxCredit counter was
- * not equal to zero. This mean that the credit mechanism was not
- * synchronized to the TxFIFO status
- * uCode/driver must write "1" in order to clear this flag
- */
-#define FH_TSSR_TX_ERROR_REG (FH_TSSR_LOWER_BOUND + 0x018)
-
-#define FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_chnl) ((1 << (_chnl)) << 16)
-
-/* Tx service channels */
-#define FH_SRVC_CHNL (9)
-#define FH_SRVC_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x9C8)
-#define FH_SRVC_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0x9D0)
-#define FH_SRVC_CHNL_SRAM_ADDR_REG(_chnl) \
- (FH_SRVC_LOWER_BOUND + ((_chnl) - 9) * 0x4)
-
-#define FH_TX_CHICKEN_BITS_REG (FH_MEM_LOWER_BOUND + 0xE98)
-/* Instruct FH to increment the retry count of a packet when
- * it is brought from the memory to TX-FIFO
- */
-#define FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN (0x00000002)
-
-#define RX_QUEUE_SIZE 256
-#define RX_QUEUE_MASK 255
-#define RX_QUEUE_SIZE_LOG 8
-
-/*
- * RX related structures and functions
- */
-#define RX_FREE_BUFFERS 64
-#define RX_LOW_WATERMARK 8
-
-/* Size of one Rx buffer in host DRAM */
-#define IWL_RX_BUF_SIZE_3K (3 * 1000) /* 3945 only */
-#define IWL_RX_BUF_SIZE_4K (4 * 1024)
-#define IWL_RX_BUF_SIZE_8K (8 * 1024)
-
-/**
- * struct iwl_rb_status - reseve buffer status
- * host memory mapped FH registers
- * @closed_rb_num [0:11] - Indicates the index of the RB which was closed
- * @closed_fr_num [0:11] - Indicates the index of the RX Frame which was closed
- * @finished_rb_num [0:11] - Indicates the index of the current RB
- * in which the last frame was written to
- * @finished_fr_num [0:11] - Indicates the index of the RX Frame
- * which was transferred
- */
-struct iwl_rb_status {
- __le16 closed_rb_num;
- __le16 closed_fr_num;
- __le16 finished_rb_num;
- __le16 finished_fr_nam;
- __le32 __unused; /* 3945 only */
-} __packed;
-
-
-#define TFD_QUEUE_SIZE_MAX (256)
-#define TFD_QUEUE_SIZE_BC_DUP (64)
-#define TFD_QUEUE_BC_SIZE (TFD_QUEUE_SIZE_MAX + TFD_QUEUE_SIZE_BC_DUP)
-#define IWL_TX_DMA_MASK DMA_BIT_MASK(36)
-#define IWL_NUM_OF_TBS 20
-
-static inline u8 iwl_legacy_get_dma_hi_addr(dma_addr_t addr)
-{
- return (sizeof(addr) > sizeof(u32) ? (addr >> 16) >> 16 : 0) & 0xF;
-}
-/**
- * struct iwl_tfd_tb transmit buffer descriptor within transmit frame descriptor
- *
- * This structure contains dma address and length of transmission address
- *
- * @lo: low [31:0] portion of the dma address of TX buffer
- * every even is unaligned on 16 bit boundary
- * @hi_n_len 0-3 [35:32] portion of dma
- * 4-15 length of the tx buffer
- */
-struct iwl_tfd_tb {
- __le32 lo;
- __le16 hi_n_len;
-} __packed;
-
-/**
- * struct iwl_tfd
- *
- * Transmit Frame Descriptor (TFD)
- *
- * @ __reserved1[3] reserved
- * @ num_tbs 0-4 number of active tbs
- * 5 reserved
- * 6-7 padding (not used)
- * @ tbs[20] transmit frame buffer descriptors
- * @ __pad padding
- *
- * Each Tx queue uses a circular buffer of 256 TFDs stored in host DRAM.
- * Both driver and device share these circular buffers, each of which must be
- * contiguous 256 TFDs x 128 bytes-per-TFD = 32 KBytes
- *
- * Driver must indicate the physical address of the base of each
- * circular buffer via the FH_MEM_CBBC_QUEUE registers.
- *
- * Each TFD contains pointer/size information for up to 20 data buffers
- * in host DRAM. These buffers collectively contain the (one) frame described
- * by the TFD. Each buffer must be a single contiguous block of memory within
- * itself, but buffers may be scattered in host DRAM. Each buffer has max size
- * of (4K - 4). The concatenates all of a TFD's buffers into a single
- * Tx frame, up to 8 KBytes in size.
- *
- * A maximum of 255 (not 256!) TFDs may be on a queue waiting for Tx.
- */
-struct iwl_tfd {
- u8 __reserved1[3];
- u8 num_tbs;
- struct iwl_tfd_tb tbs[IWL_NUM_OF_TBS];
- __le32 __pad;
-} __packed;
-
-/* Keep Warm Size */
-#define IWL_KW_SIZE 0x1000 /* 4k */
-
-#endif /* !__iwl_legacy_fh_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-hcmd.c b/drivers/net/wireless/iwlegacy/iwl-hcmd.c
deleted file mode 100644
index ce1fc9feb61..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-hcmd.c
+++ /dev/null
@@ -1,271 +0,0 @@
-/******************************************************************************
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *****************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <net/mac80211.h>
-
-#include "iwl-dev.h"
-#include "iwl-debug.h"
-#include "iwl-eeprom.h"
-#include "iwl-core.h"
-
-
-const char *iwl_legacy_get_cmd_string(u8 cmd)
-{
- switch (cmd) {
- IWL_CMD(REPLY_ALIVE);
- IWL_CMD(REPLY_ERROR);
- IWL_CMD(REPLY_RXON);
- IWL_CMD(REPLY_RXON_ASSOC);
- IWL_CMD(REPLY_QOS_PARAM);
- IWL_CMD(REPLY_RXON_TIMING);
- IWL_CMD(REPLY_ADD_STA);
- IWL_CMD(REPLY_REMOVE_STA);
- IWL_CMD(REPLY_WEPKEY);
- IWL_CMD(REPLY_3945_RX);
- IWL_CMD(REPLY_TX);
- IWL_CMD(REPLY_RATE_SCALE);
- IWL_CMD(REPLY_LEDS_CMD);
- IWL_CMD(REPLY_TX_LINK_QUALITY_CMD);
- IWL_CMD(REPLY_CHANNEL_SWITCH);
- IWL_CMD(CHANNEL_SWITCH_NOTIFICATION);
- IWL_CMD(REPLY_SPECTRUM_MEASUREMENT_CMD);
- IWL_CMD(SPECTRUM_MEASURE_NOTIFICATION);
- IWL_CMD(POWER_TABLE_CMD);
- IWL_CMD(PM_SLEEP_NOTIFICATION);
- IWL_CMD(PM_DEBUG_STATISTIC_NOTIFIC);
- IWL_CMD(REPLY_SCAN_CMD);
- IWL_CMD(REPLY_SCAN_ABORT_CMD);
- IWL_CMD(SCAN_START_NOTIFICATION);
- IWL_CMD(SCAN_RESULTS_NOTIFICATION);
- IWL_CMD(SCAN_COMPLETE_NOTIFICATION);
- IWL_CMD(BEACON_NOTIFICATION);
- IWL_CMD(REPLY_TX_BEACON);
- IWL_CMD(REPLY_TX_PWR_TABLE_CMD);
- IWL_CMD(REPLY_BT_CONFIG);
- IWL_CMD(REPLY_STATISTICS_CMD);
- IWL_CMD(STATISTICS_NOTIFICATION);
- IWL_CMD(CARD_STATE_NOTIFICATION);
- IWL_CMD(MISSED_BEACONS_NOTIFICATION);
- IWL_CMD(REPLY_CT_KILL_CONFIG_CMD);
- IWL_CMD(SENSITIVITY_CMD);
- IWL_CMD(REPLY_PHY_CALIBRATION_CMD);
- IWL_CMD(REPLY_RX_PHY_CMD);
- IWL_CMD(REPLY_RX_MPDU_CMD);
- IWL_CMD(REPLY_RX);
- IWL_CMD(REPLY_COMPRESSED_BA);
- default:
- return "UNKNOWN";
-
- }
-}
-EXPORT_SYMBOL(iwl_legacy_get_cmd_string);
-
-#define HOST_COMPLETE_TIMEOUT (HZ / 2)
-
-static void iwl_legacy_generic_cmd_callback(struct iwl_priv *priv,
- struct iwl_device_cmd *cmd,
- struct iwl_rx_packet *pkt)
-{
- if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
- IWL_ERR(priv, "Bad return from %s (0x%08X)\n",
- iwl_legacy_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
- return;
- }
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- switch (cmd->hdr.cmd) {
- case REPLY_TX_LINK_QUALITY_CMD:
- case SENSITIVITY_CMD:
- IWL_DEBUG_HC_DUMP(priv, "back from %s (0x%08X)\n",
- iwl_legacy_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
- break;
- default:
- IWL_DEBUG_HC(priv, "back from %s (0x%08X)\n",
- iwl_legacy_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
- }
-#endif
-}
-
-static int
-iwl_legacy_send_cmd_async(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
-{
- int ret;
-
- BUG_ON(!(cmd->flags & CMD_ASYNC));
-
- /* An asynchronous command can not expect an SKB to be set. */
- BUG_ON(cmd->flags & CMD_WANT_SKB);
-
- /* Assign a generic callback if one is not provided */
- if (!cmd->callback)
- cmd->callback = iwl_legacy_generic_cmd_callback;
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- return -EBUSY;
-
- ret = iwl_legacy_enqueue_hcmd(priv, cmd);
- if (ret < 0) {
- IWL_ERR(priv, "Error sending %s: enqueue_hcmd failed: %d\n",
- iwl_legacy_get_cmd_string(cmd->id), ret);
- return ret;
- }
- return 0;
-}
-
-int iwl_legacy_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
-{
- int cmd_idx;
- int ret;
-
- lockdep_assert_held(&priv->mutex);
-
- BUG_ON(cmd->flags & CMD_ASYNC);
-
- /* A synchronous command can not have a callback set. */
- BUG_ON(cmd->callback);
-
- IWL_DEBUG_INFO(priv, "Attempting to send sync command %s\n",
- iwl_legacy_get_cmd_string(cmd->id));
-
- set_bit(STATUS_HCMD_ACTIVE, &priv->status);
- IWL_DEBUG_INFO(priv, "Setting HCMD_ACTIVE for command %s\n",
- iwl_legacy_get_cmd_string(cmd->id));
-
- cmd_idx = iwl_legacy_enqueue_hcmd(priv, cmd);
- if (cmd_idx < 0) {
- ret = cmd_idx;
- IWL_ERR(priv, "Error sending %s: enqueue_hcmd failed: %d\n",
- iwl_legacy_get_cmd_string(cmd->id), ret);
- goto out;
- }
-
- ret = wait_event_timeout(priv->wait_command_queue,
- !test_bit(STATUS_HCMD_ACTIVE, &priv->status),
- HOST_COMPLETE_TIMEOUT);
- if (!ret) {
- if (test_bit(STATUS_HCMD_ACTIVE, &priv->status)) {
- IWL_ERR(priv,
- "Error sending %s: time out after %dms.\n",
- iwl_legacy_get_cmd_string(cmd->id),
- jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
-
- clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
- IWL_DEBUG_INFO(priv,
- "Clearing HCMD_ACTIVE for command %s\n",
- iwl_legacy_get_cmd_string(cmd->id));
- ret = -ETIMEDOUT;
- goto cancel;
- }
- }
-
- if (test_bit(STATUS_RF_KILL_HW, &priv->status)) {
- IWL_ERR(priv, "Command %s aborted: RF KILL Switch\n",
- iwl_legacy_get_cmd_string(cmd->id));
- ret = -ECANCELED;
- goto fail;
- }
- if (test_bit(STATUS_FW_ERROR, &priv->status)) {
- IWL_ERR(priv, "Command %s failed: FW Error\n",
- iwl_legacy_get_cmd_string(cmd->id));
- ret = -EIO;
- goto fail;
- }
- if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) {
- IWL_ERR(priv, "Error: Response NULL in '%s'\n",
- iwl_legacy_get_cmd_string(cmd->id));
- ret = -EIO;
- goto cancel;
- }
-
- ret = 0;
- goto out;
-
-cancel:
- if (cmd->flags & CMD_WANT_SKB) {
- /*
- * Cancel the CMD_WANT_SKB flag for the cmd in the
- * TX cmd queue. Otherwise in case the cmd comes
- * in later, it will possibly set an invalid
- * address (cmd->meta.source).
- */
- priv->txq[priv->cmd_queue].meta[cmd_idx].flags &=
- ~CMD_WANT_SKB;
- }
-fail:
- if (cmd->reply_page) {
- iwl_legacy_free_pages(priv, cmd->reply_page);
- cmd->reply_page = 0;
- }
-out:
- return ret;
-}
-EXPORT_SYMBOL(iwl_legacy_send_cmd_sync);
-
-int iwl_legacy_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
-{
- if (cmd->flags & CMD_ASYNC)
- return iwl_legacy_send_cmd_async(priv, cmd);
-
- return iwl_legacy_send_cmd_sync(priv, cmd);
-}
-EXPORT_SYMBOL(iwl_legacy_send_cmd);
-
-int
-iwl_legacy_send_cmd_pdu(struct iwl_priv *priv, u8 id, u16 len, const void *data)
-{
- struct iwl_host_cmd cmd = {
- .id = id,
- .len = len,
- .data = data,
- };
-
- return iwl_legacy_send_cmd_sync(priv, &cmd);
-}
-EXPORT_SYMBOL(iwl_legacy_send_cmd_pdu);
-
-int iwl_legacy_send_cmd_pdu_async(struct iwl_priv *priv,
- u8 id, u16 len, const void *data,
- void (*callback)(struct iwl_priv *priv,
- struct iwl_device_cmd *cmd,
- struct iwl_rx_packet *pkt))
-{
- struct iwl_host_cmd cmd = {
- .id = id,
- .len = len,
- .data = data,
- };
-
- cmd.flags |= CMD_ASYNC;
- cmd.callback = callback;
-
- return iwl_legacy_send_cmd_async(priv, &cmd);
-}
-EXPORT_SYMBOL(iwl_legacy_send_cmd_pdu_async);
diff --git a/drivers/net/wireless/iwlegacy/iwl-helpers.h b/drivers/net/wireless/iwlegacy/iwl-helpers.h
deleted file mode 100644
index 5cf23eaecbb..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-helpers.h
+++ /dev/null
@@ -1,196 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * Portions of this file are derived from the ipw3945 project, as well
- * as portions of the ieee80211 subsystem header files.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#ifndef __iwl_legacy_helpers_h__
-#define __iwl_legacy_helpers_h__
-
-#include <linux/ctype.h>
-#include <net/mac80211.h>
-
-#include "iwl-io.h"
-
-#define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
-
-
-static inline struct ieee80211_conf *iwl_legacy_ieee80211_get_hw_conf(
- struct ieee80211_hw *hw)
-{
- return &hw->conf;
-}
-
-/**
- * iwl_legacy_queue_inc_wrap - increment queue index, wrap back to beginning
- * @index -- current index
- * @n_bd -- total number of entries in queue (must be power of 2)
- */
-static inline int iwl_legacy_queue_inc_wrap(int index, int n_bd)
-{
- return ++index & (n_bd - 1);
-}
-
-/**
- * iwl_legacy_queue_dec_wrap - decrement queue index, wrap back to end
- * @index -- current index
- * @n_bd -- total number of entries in queue (must be power of 2)
- */
-static inline int iwl_legacy_queue_dec_wrap(int index, int n_bd)
-{
- return --index & (n_bd - 1);
-}
-
-/* TODO: Move fw_desc functions to iwl-pci.ko */
-static inline void iwl_legacy_free_fw_desc(struct pci_dev *pci_dev,
- struct fw_desc *desc)
-{
- if (desc->v_addr)
- dma_free_coherent(&pci_dev->dev, desc->len,
- desc->v_addr, desc->p_addr);
- desc->v_addr = NULL;
- desc->len = 0;
-}
-
-static inline int iwl_legacy_alloc_fw_desc(struct pci_dev *pci_dev,
- struct fw_desc *desc)
-{
- if (!desc->len) {
- desc->v_addr = NULL;
- return -EINVAL;
- }
-
- desc->v_addr = dma_alloc_coherent(&pci_dev->dev, desc->len,
- &desc->p_addr, GFP_KERNEL);
- return (desc->v_addr != NULL) ? 0 : -ENOMEM;
-}
-
-/*
- * we have 8 bits used like this:
- *
- * 7 6 5 4 3 2 1 0
- * | | | | | | | |
- * | | | | | | +-+-------- AC queue (0-3)
- * | | | | | |
- * | +-+-+-+-+------------ HW queue ID
- * |
- * +---------------------- unused
- */
-static inline void
-iwl_legacy_set_swq_id(struct iwl_tx_queue *txq, u8 ac, u8 hwq)
-{
- BUG_ON(ac > 3); /* only have 2 bits */
- BUG_ON(hwq > 31); /* only use 5 bits */
-
- txq->swq_id = (hwq << 2) | ac;
-}
-
-static inline void iwl_legacy_wake_queue(struct iwl_priv *priv,
- struct iwl_tx_queue *txq)
-{
- u8 queue = txq->swq_id;
- u8 ac = queue & 3;
- u8 hwq = (queue >> 2) & 0x1f;
-
- if (test_and_clear_bit(hwq, priv->queue_stopped))
- if (atomic_dec_return(&priv->queue_stop_count[ac]) <= 0)
- ieee80211_wake_queue(priv->hw, ac);
-}
-
-static inline void iwl_legacy_stop_queue(struct iwl_priv *priv,
- struct iwl_tx_queue *txq)
-{
- u8 queue = txq->swq_id;
- u8 ac = queue & 3;
- u8 hwq = (queue >> 2) & 0x1f;
-
- if (!test_and_set_bit(hwq, priv->queue_stopped))
- if (atomic_inc_return(&priv->queue_stop_count[ac]) > 0)
- ieee80211_stop_queue(priv->hw, ac);
-}
-
-#ifdef ieee80211_stop_queue
-#undef ieee80211_stop_queue
-#endif
-
-#define ieee80211_stop_queue DO_NOT_USE_ieee80211_stop_queue
-
-#ifdef ieee80211_wake_queue
-#undef ieee80211_wake_queue
-#endif
-
-#define ieee80211_wake_queue DO_NOT_USE_ieee80211_wake_queue
-
-static inline void iwl_legacy_disable_interrupts(struct iwl_priv *priv)
-{
- clear_bit(STATUS_INT_ENABLED, &priv->status);
-
- /* disable interrupts from uCode/NIC to host */
- iwl_write32(priv, CSR_INT_MASK, 0x00000000);
-
- /* acknowledge/clear/reset any interrupts still pending
- * from uCode or flow handler (Rx/Tx DMA) */
- iwl_write32(priv, CSR_INT, 0xffffffff);
- iwl_write32(priv, CSR_FH_INT_STATUS, 0xffffffff);
- IWL_DEBUG_ISR(priv, "Disabled interrupts\n");
-}
-
-static inline void iwl_legacy_enable_rfkill_int(struct iwl_priv *priv)
-{
- IWL_DEBUG_ISR(priv, "Enabling rfkill interrupt\n");
- iwl_write32(priv, CSR_INT_MASK, CSR_INT_BIT_RF_KILL);
-}
-
-static inline void iwl_legacy_enable_interrupts(struct iwl_priv *priv)
-{
- IWL_DEBUG_ISR(priv, "Enabling interrupts\n");
- set_bit(STATUS_INT_ENABLED, &priv->status);
- iwl_write32(priv, CSR_INT_MASK, priv->inta_mask);
-}
-
-/**
- * iwl_legacy_beacon_time_mask_low - mask of lower 32 bit of beacon time
- * @priv -- pointer to iwl_priv data structure
- * @tsf_bits -- number of bits need to shift for masking)
- */
-static inline u32 iwl_legacy_beacon_time_mask_low(struct iwl_priv *priv,
- u16 tsf_bits)
-{
- return (1 << tsf_bits) - 1;
-}
-
-/**
- * iwl_legacy_beacon_time_mask_high - mask of higher 32 bit of beacon time
- * @priv -- pointer to iwl_priv data structure
- * @tsf_bits -- number of bits need to shift for masking)
- */
-static inline u32 iwl_legacy_beacon_time_mask_high(struct iwl_priv *priv,
- u16 tsf_bits)
-{
- return ((1 << (32 - tsf_bits)) - 1) << tsf_bits;
-}
-
-#endif /* __iwl_legacy_helpers_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-io.h b/drivers/net/wireless/iwlegacy/iwl-io.h
deleted file mode 100644
index 5cc5d342914..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-io.h
+++ /dev/null
@@ -1,545 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * Portions of this file are derived from the ipw3945 project.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#ifndef __iwl_legacy_io_h__
-#define __iwl_legacy_io_h__
-
-#include <linux/io.h>
-
-#include "iwl-dev.h"
-#include "iwl-debug.h"
-#include "iwl-devtrace.h"
-
-/*
- * IO, register, and NIC memory access functions
- *
- * NOTE on naming convention and macro usage for these
- *
- * A single _ prefix before a an access function means that no state
- * check or debug information is printed when that function is called.
- *
- * A double __ prefix before an access function means that state is checked
- * and the current line number and caller function name are printed in addition
- * to any other debug output.
- *
- * The non-prefixed name is the #define that maps the caller into a
- * #define that provides the caller's name and __LINE__ to the double
- * prefix version.
- *
- * If you wish to call the function without any debug or state checking,
- * you should use the single _ prefix version (as is used by dependent IO
- * routines, for example _iwl_legacy_read_direct32 calls the non-check version of
- * _iwl_legacy_read32.)
- *
- * These declarations are *extremely* useful in quickly isolating code deltas
- * which result in misconfiguration of the hardware I/O. In combination with
- * git-bisect and the IO debug level you can quickly determine the specific
- * commit which breaks the IO sequence to the hardware.
- *
- */
-
-static inline void _iwl_legacy_write8(struct iwl_priv *priv, u32 ofs, u8 val)
-{
- trace_iwlwifi_legacy_dev_iowrite8(priv, ofs, val);
- iowrite8(val, priv->hw_base + ofs);
-}
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-static inline void
-__iwl_legacy_write8(const char *f, u32 l, struct iwl_priv *priv,
- u32 ofs, u8 val)
-{
- IWL_DEBUG_IO(priv, "write8(0x%08X, 0x%02X) - %s %d\n", ofs, val, f, l);
- _iwl_legacy_write8(priv, ofs, val);
-}
-#define iwl_write8(priv, ofs, val) \
- __iwl_legacy_write8(__FILE__, __LINE__, priv, ofs, val)
-#else
-#define iwl_write8(priv, ofs, val) _iwl_legacy_write8(priv, ofs, val)
-#endif
-
-
-static inline void _iwl_legacy_write32(struct iwl_priv *priv, u32 ofs, u32 val)
-{
- trace_iwlwifi_legacy_dev_iowrite32(priv, ofs, val);
- iowrite32(val, priv->hw_base + ofs);
-}
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-static inline void
-__iwl_legacy_write32(const char *f, u32 l, struct iwl_priv *priv,
- u32 ofs, u32 val)
-{
- IWL_DEBUG_IO(priv, "write32(0x%08X, 0x%08X) - %s %d\n", ofs, val, f, l);
- _iwl_legacy_write32(priv, ofs, val);
-}
-#define iwl_write32(priv, ofs, val) \
- __iwl_legacy_write32(__FILE__, __LINE__, priv, ofs, val)
-#else
-#define iwl_write32(priv, ofs, val) _iwl_legacy_write32(priv, ofs, val)
-#endif
-
-static inline u32 _iwl_legacy_read32(struct iwl_priv *priv, u32 ofs)
-{
- u32 val = ioread32(priv->hw_base + ofs);
- trace_iwlwifi_legacy_dev_ioread32(priv, ofs, val);
- return val;
-}
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-static inline u32
-__iwl_legacy_read32(char *f, u32 l, struct iwl_priv *priv, u32 ofs)
-{
- IWL_DEBUG_IO(priv, "read_direct32(0x%08X) - %s %d\n", ofs, f, l);
- return _iwl_legacy_read32(priv, ofs);
-}
-#define iwl_read32(priv, ofs) __iwl_legacy_read32(__FILE__, __LINE__, priv, ofs)
-#else
-#define iwl_read32(p, o) _iwl_legacy_read32(p, o)
-#endif
-
-#define IWL_POLL_INTERVAL 10 /* microseconds */
-static inline int
-_iwl_legacy_poll_bit(struct iwl_priv *priv, u32 addr,
- u32 bits, u32 mask, int timeout)
-{
- int t = 0;
-
- do {
- if ((_iwl_legacy_read32(priv, addr) & mask) == (bits & mask))
- return t;
- udelay(IWL_POLL_INTERVAL);
- t += IWL_POLL_INTERVAL;
- } while (t < timeout);
-
- return -ETIMEDOUT;
-}
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-static inline int __iwl_legacy_poll_bit(const char *f, u32 l,
- struct iwl_priv *priv, u32 addr,
- u32 bits, u32 mask, int timeout)
-{
- int ret = _iwl_legacy_poll_bit(priv, addr, bits, mask, timeout);
- IWL_DEBUG_IO(priv, "poll_bit(0x%08X, 0x%08X, 0x%08X) - %s- %s %d\n",
- addr, bits, mask,
- unlikely(ret == -ETIMEDOUT) ? "timeout" : "", f, l);
- return ret;
-}
-#define iwl_poll_bit(priv, addr, bits, mask, timeout) \
- __iwl_legacy_poll_bit(__FILE__, __LINE__, priv, addr, \
- bits, mask, timeout)
-#else
-#define iwl_poll_bit(p, a, b, m, t) _iwl_legacy_poll_bit(p, a, b, m, t)
-#endif
-
-static inline void _iwl_legacy_set_bit(struct iwl_priv *priv, u32 reg, u32 mask)
-{
- _iwl_legacy_write32(priv, reg, _iwl_legacy_read32(priv, reg) | mask);
-}
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-static inline void __iwl_legacy_set_bit(const char *f, u32 l,
- struct iwl_priv *priv, u32 reg, u32 mask)
-{
- u32 val = _iwl_legacy_read32(priv, reg) | mask;
- IWL_DEBUG_IO(priv, "set_bit(0x%08X, 0x%08X) = 0x%08X\n", reg,
- mask, val);
- _iwl_legacy_write32(priv, reg, val);
-}
-static inline void iwl_legacy_set_bit(struct iwl_priv *p, u32 r, u32 m)
-{
- unsigned long reg_flags;
-
- spin_lock_irqsave(&p->reg_lock, reg_flags);
- __iwl_legacy_set_bit(__FILE__, __LINE__, p, r, m);
- spin_unlock_irqrestore(&p->reg_lock, reg_flags);
-}
-#else
-static inline void iwl_legacy_set_bit(struct iwl_priv *p, u32 r, u32 m)
-{
- unsigned long reg_flags;
-
- spin_lock_irqsave(&p->reg_lock, reg_flags);
- _iwl_legacy_set_bit(p, r, m);
- spin_unlock_irqrestore(&p->reg_lock, reg_flags);
-}
-#endif
-
-static inline void
-_iwl_legacy_clear_bit(struct iwl_priv *priv, u32 reg, u32 mask)
-{
- _iwl_legacy_write32(priv, reg, _iwl_legacy_read32(priv, reg) & ~mask);
-}
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-static inline void
-__iwl_legacy_clear_bit(const char *f, u32 l,
- struct iwl_priv *priv, u32 reg, u32 mask)
-{
- u32 val = _iwl_legacy_read32(priv, reg) & ~mask;
- IWL_DEBUG_IO(priv, "clear_bit(0x%08X, 0x%08X) = 0x%08X\n", reg, mask, val);
- _iwl_legacy_write32(priv, reg, val);
-}
-static inline void iwl_legacy_clear_bit(struct iwl_priv *p, u32 r, u32 m)
-{
- unsigned long reg_flags;
-
- spin_lock_irqsave(&p->reg_lock, reg_flags);
- __iwl_legacy_clear_bit(__FILE__, __LINE__, p, r, m);
- spin_unlock_irqrestore(&p->reg_lock, reg_flags);
-}
-#else
-static inline void iwl_legacy_clear_bit(struct iwl_priv *p, u32 r, u32 m)
-{
- unsigned long reg_flags;
-
- spin_lock_irqsave(&p->reg_lock, reg_flags);
- _iwl_legacy_clear_bit(p, r, m);
- spin_unlock_irqrestore(&p->reg_lock, reg_flags);
-}
-#endif
-
-static inline int _iwl_legacy_grab_nic_access(struct iwl_priv *priv)
-{
- int ret;
- u32 val;
-
- /* this bit wakes up the NIC */
- _iwl_legacy_set_bit(priv, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
-
- /*
- * These bits say the device is running, and should keep running for
- * at least a short while (at least as long as MAC_ACCESS_REQ stays 1),
- * but they do not indicate that embedded SRAM is restored yet;
- * 3945 and 4965 have volatile SRAM, and must save/restore contents
- * to/from host DRAM when sleeping/waking for power-saving.
- * Each direction takes approximately 1/4 millisecond; with this
- * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a
- * series of register accesses are expected (e.g. reading Event Log),
- * to keep device from sleeping.
- *
- * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that
- * SRAM is okay/restored. We don't check that here because this call
- * is just for hardware register access; but GP1 MAC_SLEEP check is a
- * good idea before accessing 3945/4965 SRAM (e.g. reading Event Log).
- *
- */
- ret = _iwl_legacy_poll_bit(priv, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
- (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
- CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
- if (ret < 0) {
- val = _iwl_legacy_read32(priv, CSR_GP_CNTRL);
- IWL_ERR(priv,
- "MAC is in deep sleep!. CSR_GP_CNTRL = 0x%08X\n", val);
- _iwl_legacy_write32(priv, CSR_RESET,
- CSR_RESET_REG_FLAG_FORCE_NMI);
- return -EIO;
- }
-
- return 0;
-}
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-static inline int __iwl_legacy_grab_nic_access(const char *f, u32 l,
- struct iwl_priv *priv)
-{
- IWL_DEBUG_IO(priv, "grabbing nic access - %s %d\n", f, l);
- return _iwl_legacy_grab_nic_access(priv);
-}
-#define iwl_grab_nic_access(priv) \
- __iwl_legacy_grab_nic_access(__FILE__, __LINE__, priv)
-#else
-#define iwl_grab_nic_access(priv) \
- _iwl_legacy_grab_nic_access(priv)
-#endif
-
-static inline void _iwl_legacy_release_nic_access(struct iwl_priv *priv)
-{
- _iwl_legacy_clear_bit(priv, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
-}
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-static inline void __iwl_legacy_release_nic_access(const char *f, u32 l,
- struct iwl_priv *priv)
-{
-
- IWL_DEBUG_IO(priv, "releasing nic access - %s %d\n", f, l);
- _iwl_legacy_release_nic_access(priv);
-}
-#define iwl_release_nic_access(priv) \
- __iwl_legacy_release_nic_access(__FILE__, __LINE__, priv)
-#else
-#define iwl_release_nic_access(priv) \
- _iwl_legacy_release_nic_access(priv)
-#endif
-
-static inline u32 _iwl_legacy_read_direct32(struct iwl_priv *priv, u32 reg)
-{
- return _iwl_legacy_read32(priv, reg);
-}
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-static inline u32 __iwl_legacy_read_direct32(const char *f, u32 l,
- struct iwl_priv *priv, u32 reg)
-{
- u32 value = _iwl_legacy_read_direct32(priv, reg);
- IWL_DEBUG_IO(priv,
- "read_direct32(0x%4X) = 0x%08x - %s %d\n", reg, value,
- f, l);
- return value;
-}
-static inline u32 iwl_legacy_read_direct32(struct iwl_priv *priv, u32 reg)
-{
- u32 value;
- unsigned long reg_flags;
-
- spin_lock_irqsave(&priv->reg_lock, reg_flags);
- iwl_grab_nic_access(priv);
- value = __iwl_legacy_read_direct32(__FILE__, __LINE__, priv, reg);
- iwl_release_nic_access(priv);
- spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
- return value;
-}
-
-#else
-static inline u32 iwl_legacy_read_direct32(struct iwl_priv *priv, u32 reg)
-{
- u32 value;
- unsigned long reg_flags;
-
- spin_lock_irqsave(&priv->reg_lock, reg_flags);
- iwl_grab_nic_access(priv);
- value = _iwl_legacy_read_direct32(priv, reg);
- iwl_release_nic_access(priv);
- spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
- return value;
-
-}
-#endif
-
-static inline void _iwl_legacy_write_direct32(struct iwl_priv *priv,
- u32 reg, u32 value)
-{
- _iwl_legacy_write32(priv, reg, value);
-}
-static inline void
-iwl_legacy_write_direct32(struct iwl_priv *priv, u32 reg, u32 value)
-{
- unsigned long reg_flags;
-
- spin_lock_irqsave(&priv->reg_lock, reg_flags);
- if (!iwl_grab_nic_access(priv)) {
- _iwl_legacy_write_direct32(priv, reg, value);
- iwl_release_nic_access(priv);
- }
- spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
-}
-
-static inline void iwl_legacy_write_reg_buf(struct iwl_priv *priv,
- u32 reg, u32 len, u32 *values)
-{
- u32 count = sizeof(u32);
-
- if ((priv != NULL) && (values != NULL)) {
- for (; 0 < len; len -= count, reg += count, values++)
- iwl_legacy_write_direct32(priv, reg, *values);
- }
-}
-
-static inline int _iwl_legacy_poll_direct_bit(struct iwl_priv *priv, u32 addr,
- u32 mask, int timeout)
-{
- int t = 0;
-
- do {
- if ((iwl_legacy_read_direct32(priv, addr) & mask) == mask)
- return t;
- udelay(IWL_POLL_INTERVAL);
- t += IWL_POLL_INTERVAL;
- } while (t < timeout);
-
- return -ETIMEDOUT;
-}
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-static inline int __iwl_legacy_poll_direct_bit(const char *f, u32 l,
- struct iwl_priv *priv,
- u32 addr, u32 mask, int timeout)
-{
- int ret = _iwl_legacy_poll_direct_bit(priv, addr, mask, timeout);
-
- if (unlikely(ret == -ETIMEDOUT))
- IWL_DEBUG_IO(priv, "poll_direct_bit(0x%08X, 0x%08X) - "
- "timedout - %s %d\n", addr, mask, f, l);
- else
- IWL_DEBUG_IO(priv, "poll_direct_bit(0x%08X, 0x%08X) = 0x%08X "
- "- %s %d\n", addr, mask, ret, f, l);
- return ret;
-}
-#define iwl_poll_direct_bit(priv, addr, mask, timeout) \
-__iwl_legacy_poll_direct_bit(__FILE__, __LINE__, priv, addr, mask, timeout)
-#else
-#define iwl_poll_direct_bit _iwl_legacy_poll_direct_bit
-#endif
-
-static inline u32 _iwl_legacy_read_prph(struct iwl_priv *priv, u32 reg)
-{
- _iwl_legacy_write_direct32(priv, HBUS_TARG_PRPH_RADDR, reg | (3 << 24));
- rmb();
- return _iwl_legacy_read_direct32(priv, HBUS_TARG_PRPH_RDAT);
-}
-static inline u32 iwl_legacy_read_prph(struct iwl_priv *priv, u32 reg)
-{
- unsigned long reg_flags;
- u32 val;
-
- spin_lock_irqsave(&priv->reg_lock, reg_flags);
- iwl_grab_nic_access(priv);
- val = _iwl_legacy_read_prph(priv, reg);
- iwl_release_nic_access(priv);
- spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
- return val;
-}
-
-static inline void _iwl_legacy_write_prph(struct iwl_priv *priv,
- u32 addr, u32 val)
-{
- _iwl_legacy_write_direct32(priv, HBUS_TARG_PRPH_WADDR,
- ((addr & 0x0000FFFF) | (3 << 24)));
- wmb();
- _iwl_legacy_write_direct32(priv, HBUS_TARG_PRPH_WDAT, val);
-}
-
-static inline void
-iwl_legacy_write_prph(struct iwl_priv *priv, u32 addr, u32 val)
-{
- unsigned long reg_flags;
-
- spin_lock_irqsave(&priv->reg_lock, reg_flags);
- if (!iwl_grab_nic_access(priv)) {
- _iwl_legacy_write_prph(priv, addr, val);
- iwl_release_nic_access(priv);
- }
- spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
-}
-
-#define _iwl_legacy_set_bits_prph(priv, reg, mask) \
-_iwl_legacy_write_prph(priv, reg, (_iwl_legacy_read_prph(priv, reg) | mask))
-
-static inline void
-iwl_legacy_set_bits_prph(struct iwl_priv *priv, u32 reg, u32 mask)
-{
- unsigned long reg_flags;
-
- spin_lock_irqsave(&priv->reg_lock, reg_flags);
- iwl_grab_nic_access(priv);
- _iwl_legacy_set_bits_prph(priv, reg, mask);
- iwl_release_nic_access(priv);
- spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
-}
-
-#define _iwl_legacy_set_bits_mask_prph(priv, reg, bits, mask) \
-_iwl_legacy_write_prph(priv, reg, \
- ((_iwl_legacy_read_prph(priv, reg) & mask) | bits))
-
-static inline void iwl_legacy_set_bits_mask_prph(struct iwl_priv *priv, u32 reg,
- u32 bits, u32 mask)
-{
- unsigned long reg_flags;
-
- spin_lock_irqsave(&priv->reg_lock, reg_flags);
- iwl_grab_nic_access(priv);
- _iwl_legacy_set_bits_mask_prph(priv, reg, bits, mask);
- iwl_release_nic_access(priv);
- spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
-}
-
-static inline void iwl_legacy_clear_bits_prph(struct iwl_priv
- *priv, u32 reg, u32 mask)
-{
- unsigned long reg_flags;
- u32 val;
-
- spin_lock_irqsave(&priv->reg_lock, reg_flags);
- iwl_grab_nic_access(priv);
- val = _iwl_legacy_read_prph(priv, reg);
- _iwl_legacy_write_prph(priv, reg, (val & ~mask));
- iwl_release_nic_access(priv);
- spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
-}
-
-static inline u32 iwl_legacy_read_targ_mem(struct iwl_priv *priv, u32 addr)
-{
- unsigned long reg_flags;
- u32 value;
-
- spin_lock_irqsave(&priv->reg_lock, reg_flags);
- iwl_grab_nic_access(priv);
-
- _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR, addr);
- rmb();
- value = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
-
- iwl_release_nic_access(priv);
- spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
- return value;
-}
-
-static inline void
-iwl_legacy_write_targ_mem(struct iwl_priv *priv, u32 addr, u32 val)
-{
- unsigned long reg_flags;
-
- spin_lock_irqsave(&priv->reg_lock, reg_flags);
- if (!iwl_grab_nic_access(priv)) {
- _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_WADDR, addr);
- wmb();
- _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_WDAT, val);
- iwl_release_nic_access(priv);
- }
- spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
-}
-
-static inline void
-iwl_legacy_write_targ_mem_buf(struct iwl_priv *priv, u32 addr,
- u32 len, u32 *values)
-{
- unsigned long reg_flags;
-
- spin_lock_irqsave(&priv->reg_lock, reg_flags);
- if (!iwl_grab_nic_access(priv)) {
- _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_WADDR, addr);
- wmb();
- for (; 0 < len; len -= sizeof(u32), values++)
- _iwl_legacy_write_direct32(priv,
- HBUS_TARG_MEM_WDAT, *values);
-
- iwl_release_nic_access(priv);
- }
- spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
-}
-#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-led.c b/drivers/net/wireless/iwlegacy/iwl-led.c
deleted file mode 100644
index dc568a474c5..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-led.c
+++ /dev/null
@@ -1,205 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <linux/delay.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <net/mac80211.h>
-#include <linux/etherdevice.h>
-#include <asm/unaligned.h>
-
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-io.h"
-
-/* default: IWL_LED_BLINK(0) using blinking index table */
-static int led_mode;
-module_param(led_mode, int, S_IRUGO);
-MODULE_PARM_DESC(led_mode, "0=system default, "
- "1=On(RF On)/Off(RF Off), 2=blinking");
-
-/* Throughput OFF time(ms) ON time (ms)
- * >300 25 25
- * >200 to 300 40 40
- * >100 to 200 55 55
- * >70 to 100 65 65
- * >50 to 70 75 75
- * >20 to 50 85 85
- * >10 to 20 95 95
- * >5 to 10 110 110
- * >1 to 5 130 130
- * >0 to 1 167 167
- * <=0 SOLID ON
- */
-static const struct ieee80211_tpt_blink iwl_blink[] = {
- { .throughput = 0, .blink_time = 334 },
- { .throughput = 1 * 1024 - 1, .blink_time = 260 },
- { .throughput = 5 * 1024 - 1, .blink_time = 220 },
- { .throughput = 10 * 1024 - 1, .blink_time = 190 },
- { .throughput = 20 * 1024 - 1, .blink_time = 170 },
- { .throughput = 50 * 1024 - 1, .blink_time = 150 },
- { .throughput = 70 * 1024 - 1, .blink_time = 130 },
- { .throughput = 100 * 1024 - 1, .blink_time = 110 },
- { .throughput = 200 * 1024 - 1, .blink_time = 80 },
- { .throughput = 300 * 1024 - 1, .blink_time = 50 },
-};
-
-/*
- * Adjust led blink rate to compensate on a MAC Clock difference on every HW
- * Led blink rate analysis showed an average deviation of 0% on 3945,
- * 5% on 4965 HW.
- * Need to compensate on the led on/off time per HW according to the deviation
- * to achieve the desired led frequency
- * The calculation is: (100-averageDeviation)/100 * blinkTime
- * For code efficiency the calculation will be:
- * compensation = (100 - averageDeviation) * 64 / 100
- * NewBlinkTime = (compensation * BlinkTime) / 64
- */
-static inline u8 iwl_legacy_blink_compensation(struct iwl_priv *priv,
- u8 time, u16 compensation)
-{
- if (!compensation) {
- IWL_ERR(priv, "undefined blink compensation: "
- "use pre-defined blinking time\n");
- return time;
- }
-
- return (u8)((time * compensation) >> 6);
-}
-
-/* Set led pattern command */
-static int iwl_legacy_led_cmd(struct iwl_priv *priv,
- unsigned long on,
- unsigned long off)
-{
- struct iwl_led_cmd led_cmd = {
- .id = IWL_LED_LINK,
- .interval = IWL_DEF_LED_INTRVL
- };
- int ret;
-
- if (!test_bit(STATUS_READY, &priv->status))
- return -EBUSY;
-
- if (priv->blink_on == on && priv->blink_off == off)
- return 0;
-
- if (off == 0) {
- /* led is SOLID_ON */
- on = IWL_LED_SOLID;
- }
-
- IWL_DEBUG_LED(priv, "Led blink time compensation=%u\n",
- priv->cfg->base_params->led_compensation);
- led_cmd.on = iwl_legacy_blink_compensation(priv, on,
- priv->cfg->base_params->led_compensation);
- led_cmd.off = iwl_legacy_blink_compensation(priv, off,
- priv->cfg->base_params->led_compensation);
-
- ret = priv->cfg->ops->led->cmd(priv, &led_cmd);
- if (!ret) {
- priv->blink_on = on;
- priv->blink_off = off;
- }
- return ret;
-}
-
-static void iwl_legacy_led_brightness_set(struct led_classdev *led_cdev,
- enum led_brightness brightness)
-{
- struct iwl_priv *priv = container_of(led_cdev, struct iwl_priv, led);
- unsigned long on = 0;
-
- if (brightness > 0)
- on = IWL_LED_SOLID;
-
- iwl_legacy_led_cmd(priv, on, 0);
-}
-
-static int iwl_legacy_led_blink_set(struct led_classdev *led_cdev,
- unsigned long *delay_on,
- unsigned long *delay_off)
-{
- struct iwl_priv *priv = container_of(led_cdev, struct iwl_priv, led);
-
- return iwl_legacy_led_cmd(priv, *delay_on, *delay_off);
-}
-
-void iwl_legacy_leds_init(struct iwl_priv *priv)
-{
- int mode = led_mode;
- int ret;
-
- if (mode == IWL_LED_DEFAULT)
- mode = priv->cfg->led_mode;
-
- priv->led.name = kasprintf(GFP_KERNEL, "%s-led",
- wiphy_name(priv->hw->wiphy));
- priv->led.brightness_set = iwl_legacy_led_brightness_set;
- priv->led.blink_set = iwl_legacy_led_blink_set;
- priv->led.max_brightness = 1;
-
- switch (mode) {
- case IWL_LED_DEFAULT:
- WARN_ON(1);
- break;
- case IWL_LED_BLINK:
- priv->led.default_trigger =
- ieee80211_create_tpt_led_trigger(priv->hw,
- IEEE80211_TPT_LEDTRIG_FL_CONNECTED,
- iwl_blink, ARRAY_SIZE(iwl_blink));
- break;
- case IWL_LED_RF_STATE:
- priv->led.default_trigger =
- ieee80211_get_radio_led_name(priv->hw);
- break;
- }
-
- ret = led_classdev_register(&priv->pci_dev->dev, &priv->led);
- if (ret) {
- kfree(priv->led.name);
- return;
- }
-
- priv->led_registered = true;
-}
-EXPORT_SYMBOL(iwl_legacy_leds_init);
-
-void iwl_legacy_leds_exit(struct iwl_priv *priv)
-{
- if (!priv->led_registered)
- return;
-
- led_classdev_unregister(&priv->led);
- kfree(priv->led.name);
-}
-EXPORT_SYMBOL(iwl_legacy_leds_exit);
diff --git a/drivers/net/wireless/iwlegacy/iwl-led.h b/drivers/net/wireless/iwlegacy/iwl-led.h
deleted file mode 100644
index f0791f70f79..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-led.h
+++ /dev/null
@@ -1,56 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#ifndef __iwl_legacy_leds_h__
-#define __iwl_legacy_leds_h__
-
-
-struct iwl_priv;
-
-#define IWL_LED_SOLID 11
-#define IWL_DEF_LED_INTRVL cpu_to_le32(1000)
-
-#define IWL_LED_ACTIVITY (0<<1)
-#define IWL_LED_LINK (1<<1)
-
-/*
- * LED mode
- * IWL_LED_DEFAULT: use device default
- * IWL_LED_RF_STATE: turn LED on/off based on RF state
- * LED ON = RF ON
- * LED OFF = RF OFF
- * IWL_LED_BLINK: adjust led blink rate based on blink table
- */
-enum iwl_led_mode {
- IWL_LED_DEFAULT,
- IWL_LED_RF_STATE,
- IWL_LED_BLINK,
-};
-
-void iwl_legacy_leds_init(struct iwl_priv *priv);
-void iwl_legacy_leds_exit(struct iwl_priv *priv);
-
-#endif /* __iwl_legacy_leds_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-legacy-rs.h b/drivers/net/wireless/iwlegacy/iwl-legacy-rs.h
deleted file mode 100644
index 38647e481eb..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-legacy-rs.h
+++ /dev/null
@@ -1,456 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#ifndef __iwl_legacy_rs_h__
-#define __iwl_legacy_rs_h__
-
-struct iwl_rate_info {
- u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */
- u8 plcp_siso; /* uCode API: IWL_RATE_SISO_6M_PLCP, etc. */
- u8 plcp_mimo2; /* uCode API: IWL_RATE_MIMO2_6M_PLCP, etc. */
- u8 ieee; /* MAC header: IWL_RATE_6M_IEEE, etc. */
- u8 prev_ieee; /* previous rate in IEEE speeds */
- u8 next_ieee; /* next rate in IEEE speeds */
- u8 prev_rs; /* previous rate used in rs algo */
- u8 next_rs; /* next rate used in rs algo */
- u8 prev_rs_tgg; /* previous rate used in TGG rs algo */
- u8 next_rs_tgg; /* next rate used in TGG rs algo */
-};
-
-struct iwl3945_rate_info {
- u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */
- u8 ieee; /* MAC header: IWL_RATE_6M_IEEE, etc. */
- u8 prev_ieee; /* previous rate in IEEE speeds */
- u8 next_ieee; /* next rate in IEEE speeds */
- u8 prev_rs; /* previous rate used in rs algo */
- u8 next_rs; /* next rate used in rs algo */
- u8 prev_rs_tgg; /* previous rate used in TGG rs algo */
- u8 next_rs_tgg; /* next rate used in TGG rs algo */
- u8 table_rs_index; /* index in rate scale table cmd */
- u8 prev_table_rs; /* prev in rate table cmd */
-};
-
-
-/*
- * These serve as indexes into
- * struct iwl_rate_info iwlegacy_rates[IWL_RATE_COUNT];
- */
-enum {
- IWL_RATE_1M_INDEX = 0,
- IWL_RATE_2M_INDEX,
- IWL_RATE_5M_INDEX,
- IWL_RATE_11M_INDEX,
- IWL_RATE_6M_INDEX,
- IWL_RATE_9M_INDEX,
- IWL_RATE_12M_INDEX,
- IWL_RATE_18M_INDEX,
- IWL_RATE_24M_INDEX,
- IWL_RATE_36M_INDEX,
- IWL_RATE_48M_INDEX,
- IWL_RATE_54M_INDEX,
- IWL_RATE_60M_INDEX,
- IWL_RATE_COUNT,
- IWL_RATE_COUNT_LEGACY = IWL_RATE_COUNT - 1, /* Excluding 60M */
- IWL_RATE_COUNT_3945 = IWL_RATE_COUNT - 1,
- IWL_RATE_INVM_INDEX = IWL_RATE_COUNT,
- IWL_RATE_INVALID = IWL_RATE_COUNT,
-};
-
-enum {
- IWL_RATE_6M_INDEX_TABLE = 0,
- IWL_RATE_9M_INDEX_TABLE,
- IWL_RATE_12M_INDEX_TABLE,
- IWL_RATE_18M_INDEX_TABLE,
- IWL_RATE_24M_INDEX_TABLE,
- IWL_RATE_36M_INDEX_TABLE,
- IWL_RATE_48M_INDEX_TABLE,
- IWL_RATE_54M_INDEX_TABLE,
- IWL_RATE_1M_INDEX_TABLE,
- IWL_RATE_2M_INDEX_TABLE,
- IWL_RATE_5M_INDEX_TABLE,
- IWL_RATE_11M_INDEX_TABLE,
- IWL_RATE_INVM_INDEX_TABLE = IWL_RATE_INVM_INDEX - 1,
-};
-
-enum {
- IWL_FIRST_OFDM_RATE = IWL_RATE_6M_INDEX,
- IWL39_LAST_OFDM_RATE = IWL_RATE_54M_INDEX,
- IWL_LAST_OFDM_RATE = IWL_RATE_60M_INDEX,
- IWL_FIRST_CCK_RATE = IWL_RATE_1M_INDEX,
- IWL_LAST_CCK_RATE = IWL_RATE_11M_INDEX,
-};
-
-/* #define vs. enum to keep from defaulting to 'large integer' */
-#define IWL_RATE_6M_MASK (1 << IWL_RATE_6M_INDEX)
-#define IWL_RATE_9M_MASK (1 << IWL_RATE_9M_INDEX)
-#define IWL_RATE_12M_MASK (1 << IWL_RATE_12M_INDEX)
-#define IWL_RATE_18M_MASK (1 << IWL_RATE_18M_INDEX)
-#define IWL_RATE_24M_MASK (1 << IWL_RATE_24M_INDEX)
-#define IWL_RATE_36M_MASK (1 << IWL_RATE_36M_INDEX)
-#define IWL_RATE_48M_MASK (1 << IWL_RATE_48M_INDEX)
-#define IWL_RATE_54M_MASK (1 << IWL_RATE_54M_INDEX)
-#define IWL_RATE_60M_MASK (1 << IWL_RATE_60M_INDEX)
-#define IWL_RATE_1M_MASK (1 << IWL_RATE_1M_INDEX)
-#define IWL_RATE_2M_MASK (1 << IWL_RATE_2M_INDEX)
-#define IWL_RATE_5M_MASK (1 << IWL_RATE_5M_INDEX)
-#define IWL_RATE_11M_MASK (1 << IWL_RATE_11M_INDEX)
-
-/* uCode API values for legacy bit rates, both OFDM and CCK */
-enum {
- IWL_RATE_6M_PLCP = 13,
- IWL_RATE_9M_PLCP = 15,
- IWL_RATE_12M_PLCP = 5,
- IWL_RATE_18M_PLCP = 7,
- IWL_RATE_24M_PLCP = 9,
- IWL_RATE_36M_PLCP = 11,
- IWL_RATE_48M_PLCP = 1,
- IWL_RATE_54M_PLCP = 3,
- IWL_RATE_60M_PLCP = 3,/*FIXME:RS:should be removed*/
- IWL_RATE_1M_PLCP = 10,
- IWL_RATE_2M_PLCP = 20,
- IWL_RATE_5M_PLCP = 55,
- IWL_RATE_11M_PLCP = 110,
- /*FIXME:RS:add IWL_RATE_LEGACY_INVM_PLCP = 0,*/
-};
-
-/* uCode API values for OFDM high-throughput (HT) bit rates */
-enum {
- IWL_RATE_SISO_6M_PLCP = 0,
- IWL_RATE_SISO_12M_PLCP = 1,
- IWL_RATE_SISO_18M_PLCP = 2,
- IWL_RATE_SISO_24M_PLCP = 3,
- IWL_RATE_SISO_36M_PLCP = 4,
- IWL_RATE_SISO_48M_PLCP = 5,
- IWL_RATE_SISO_54M_PLCP = 6,
- IWL_RATE_SISO_60M_PLCP = 7,
- IWL_RATE_MIMO2_6M_PLCP = 0x8,
- IWL_RATE_MIMO2_12M_PLCP = 0x9,
- IWL_RATE_MIMO2_18M_PLCP = 0xa,
- IWL_RATE_MIMO2_24M_PLCP = 0xb,
- IWL_RATE_MIMO2_36M_PLCP = 0xc,
- IWL_RATE_MIMO2_48M_PLCP = 0xd,
- IWL_RATE_MIMO2_54M_PLCP = 0xe,
- IWL_RATE_MIMO2_60M_PLCP = 0xf,
- IWL_RATE_SISO_INVM_PLCP,
- IWL_RATE_MIMO2_INVM_PLCP = IWL_RATE_SISO_INVM_PLCP,
-};
-
-/* MAC header values for bit rates */
-enum {
- IWL_RATE_6M_IEEE = 12,
- IWL_RATE_9M_IEEE = 18,
- IWL_RATE_12M_IEEE = 24,
- IWL_RATE_18M_IEEE = 36,
- IWL_RATE_24M_IEEE = 48,
- IWL_RATE_36M_IEEE = 72,
- IWL_RATE_48M_IEEE = 96,
- IWL_RATE_54M_IEEE = 108,
- IWL_RATE_60M_IEEE = 120,
- IWL_RATE_1M_IEEE = 2,
- IWL_RATE_2M_IEEE = 4,
- IWL_RATE_5M_IEEE = 11,
- IWL_RATE_11M_IEEE = 22,
-};
-
-#define IWL_CCK_BASIC_RATES_MASK \
- (IWL_RATE_1M_MASK | \
- IWL_RATE_2M_MASK)
-
-#define IWL_CCK_RATES_MASK \
- (IWL_CCK_BASIC_RATES_MASK | \
- IWL_RATE_5M_MASK | \
- IWL_RATE_11M_MASK)
-
-#define IWL_OFDM_BASIC_RATES_MASK \
- (IWL_RATE_6M_MASK | \
- IWL_RATE_12M_MASK | \
- IWL_RATE_24M_MASK)
-
-#define IWL_OFDM_RATES_MASK \
- (IWL_OFDM_BASIC_RATES_MASK | \
- IWL_RATE_9M_MASK | \
- IWL_RATE_18M_MASK | \
- IWL_RATE_36M_MASK | \
- IWL_RATE_48M_MASK | \
- IWL_RATE_54M_MASK)
-
-#define IWL_BASIC_RATES_MASK \
- (IWL_OFDM_BASIC_RATES_MASK | \
- IWL_CCK_BASIC_RATES_MASK)
-
-#define IWL_RATES_MASK ((1 << IWL_RATE_COUNT) - 1)
-#define IWL_RATES_MASK_3945 ((1 << IWL_RATE_COUNT_3945) - 1)
-
-#define IWL_INVALID_VALUE -1
-
-#define IWL_MIN_RSSI_VAL -100
-#define IWL_MAX_RSSI_VAL 0
-
-/* These values specify how many Tx frame attempts before
- * searching for a new modulation mode */
-#define IWL_LEGACY_FAILURE_LIMIT 160
-#define IWL_LEGACY_SUCCESS_LIMIT 480
-#define IWL_LEGACY_TABLE_COUNT 160
-
-#define IWL_NONE_LEGACY_FAILURE_LIMIT 400
-#define IWL_NONE_LEGACY_SUCCESS_LIMIT 4500
-#define IWL_NONE_LEGACY_TABLE_COUNT 1500
-
-/* Success ratio (ACKed / attempted tx frames) values (perfect is 128 * 100) */
-#define IWL_RS_GOOD_RATIO 12800 /* 100% */
-#define IWL_RATE_SCALE_SWITCH 10880 /* 85% */
-#define IWL_RATE_HIGH_TH 10880 /* 85% */
-#define IWL_RATE_INCREASE_TH 6400 /* 50% */
-#define IWL_RATE_DECREASE_TH 1920 /* 15% */
-
-/* possible actions when in legacy mode */
-#define IWL_LEGACY_SWITCH_ANTENNA1 0
-#define IWL_LEGACY_SWITCH_ANTENNA2 1
-#define IWL_LEGACY_SWITCH_SISO 2
-#define IWL_LEGACY_SWITCH_MIMO2_AB 3
-#define IWL_LEGACY_SWITCH_MIMO2_AC 4
-#define IWL_LEGACY_SWITCH_MIMO2_BC 5
-
-/* possible actions when in siso mode */
-#define IWL_SISO_SWITCH_ANTENNA1 0
-#define IWL_SISO_SWITCH_ANTENNA2 1
-#define IWL_SISO_SWITCH_MIMO2_AB 2
-#define IWL_SISO_SWITCH_MIMO2_AC 3
-#define IWL_SISO_SWITCH_MIMO2_BC 4
-#define IWL_SISO_SWITCH_GI 5
-
-/* possible actions when in mimo mode */
-#define IWL_MIMO2_SWITCH_ANTENNA1 0
-#define IWL_MIMO2_SWITCH_ANTENNA2 1
-#define IWL_MIMO2_SWITCH_SISO_A 2
-#define IWL_MIMO2_SWITCH_SISO_B 3
-#define IWL_MIMO2_SWITCH_SISO_C 4
-#define IWL_MIMO2_SWITCH_GI 5
-
-#define IWL_MAX_SEARCH IWL_MIMO2_SWITCH_GI
-
-#define IWL_ACTION_LIMIT 3 /* # possible actions */
-
-#define LQ_SIZE 2 /* 2 mode tables: "Active" and "Search" */
-
-/* load per tid defines for A-MPDU activation */
-#define IWL_AGG_TPT_THREHOLD 0
-#define IWL_AGG_LOAD_THRESHOLD 10
-#define IWL_AGG_ALL_TID 0xff
-#define TID_QUEUE_CELL_SPACING 50 /*mS */
-#define TID_QUEUE_MAX_SIZE 20
-#define TID_ROUND_VALUE 5 /* mS */
-#define TID_MAX_LOAD_COUNT 8
-
-#define TID_MAX_TIME_DIFF ((TID_QUEUE_MAX_SIZE - 1) * TID_QUEUE_CELL_SPACING)
-#define TIME_WRAP_AROUND(x, y) (((y) > (x)) ? (y) - (x) : (0-(x)) + (y))
-
-extern const struct iwl_rate_info iwlegacy_rates[IWL_RATE_COUNT];
-
-enum iwl_table_type {
- LQ_NONE,
- LQ_G, /* legacy types */
- LQ_A,
- LQ_SISO, /* high-throughput types */
- LQ_MIMO2,
- LQ_MAX,
-};
-
-#define is_legacy(tbl) (((tbl) == LQ_G) || ((tbl) == LQ_A))
-#define is_siso(tbl) ((tbl) == LQ_SISO)
-#define is_mimo2(tbl) ((tbl) == LQ_MIMO2)
-#define is_mimo(tbl) (is_mimo2(tbl))
-#define is_Ht(tbl) (is_siso(tbl) || is_mimo(tbl))
-#define is_a_band(tbl) ((tbl) == LQ_A)
-#define is_g_and(tbl) ((tbl) == LQ_G)
-
-#define ANT_NONE 0x0
-#define ANT_A BIT(0)
-#define ANT_B BIT(1)
-#define ANT_AB (ANT_A | ANT_B)
-#define ANT_C BIT(2)
-#define ANT_AC (ANT_A | ANT_C)
-#define ANT_BC (ANT_B | ANT_C)
-#define ANT_ABC (ANT_AB | ANT_C)
-
-#define IWL_MAX_MCS_DISPLAY_SIZE 12
-
-struct iwl_rate_mcs_info {
- char mbps[IWL_MAX_MCS_DISPLAY_SIZE];
- char mcs[IWL_MAX_MCS_DISPLAY_SIZE];
-};
-
-/**
- * struct iwl_rate_scale_data -- tx success history for one rate
- */
-struct iwl_rate_scale_data {
- u64 data; /* bitmap of successful frames */
- s32 success_counter; /* number of frames successful */
- s32 success_ratio; /* per-cent * 128 */
- s32 counter; /* number of frames attempted */
- s32 average_tpt; /* success ratio * expected throughput */
- unsigned long stamp;
-};
-
-/**
- * struct iwl_scale_tbl_info -- tx params and success history for all rates
- *
- * There are two of these in struct iwl_lq_sta,
- * one for "active", and one for "search".
- */
-struct iwl_scale_tbl_info {
- enum iwl_table_type lq_type;
- u8 ant_type;
- u8 is_SGI; /* 1 = short guard interval */
- u8 is_ht40; /* 1 = 40 MHz channel width */
- u8 is_dup; /* 1 = duplicated data streams */
- u8 action; /* change modulation; IWL_[LEGACY/SISO/MIMO]_SWITCH_* */
- u8 max_search; /* maximun number of tables we can search */
- s32 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */
- u32 current_rate; /* rate_n_flags, uCode API format */
- struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */
-};
-
-struct iwl_traffic_load {
- unsigned long time_stamp; /* age of the oldest statistics */
- u32 packet_count[TID_QUEUE_MAX_SIZE]; /* packet count in this time
- * slice */
- u32 total; /* total num of packets during the
- * last TID_MAX_TIME_DIFF */
- u8 queue_count; /* number of queues that has
- * been used since the last cleanup */
- u8 head; /* start of the circular buffer */
-};
-
-/**
- * struct iwl_lq_sta -- driver's rate scaling private structure
- *
- * Pointer to this gets passed back and forth between driver and mac80211.
- */
-struct iwl_lq_sta {
- u8 active_tbl; /* index of active table, range 0-1 */
- u8 enable_counter; /* indicates HT mode */
- u8 stay_in_tbl; /* 1: disallow, 0: allow search for new mode */
- u8 search_better_tbl; /* 1: currently trying alternate mode */
- s32 last_tpt;
-
- /* The following determine when to search for a new mode */
- u32 table_count_limit;
- u32 max_failure_limit; /* # failed frames before new search */
- u32 max_success_limit; /* # successful frames before new search */
- u32 table_count;
- u32 total_failed; /* total failed frames, any/all rates */
- u32 total_success; /* total successful frames, any/all rates */
- u64 flush_timer; /* time staying in mode before new search */
-
- u8 action_counter; /* # mode-switch actions tried */
- u8 is_green;
- u8 is_dup;
- enum ieee80211_band band;
-
- /* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */
- u32 supp_rates;
- u16 active_legacy_rate;
- u16 active_siso_rate;
- u16 active_mimo2_rate;
- s8 max_rate_idx; /* Max rate set by user */
- u8 missed_rate_counter;
-
- struct iwl_link_quality_cmd lq;
- struct iwl_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */
- struct iwl_traffic_load load[TID_MAX_LOAD_COUNT];
- u8 tx_agg_tid_en;
-#ifdef CONFIG_MAC80211_DEBUGFS
- struct dentry *rs_sta_dbgfs_scale_table_file;
- struct dentry *rs_sta_dbgfs_stats_table_file;
- struct dentry *rs_sta_dbgfs_rate_scale_data_file;
- struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file;
- u32 dbg_fixed_rate;
-#endif
- struct iwl_priv *drv;
-
- /* used to be in sta_info */
- int last_txrate_idx;
- /* last tx rate_n_flags */
- u32 last_rate_n_flags;
- /* packets destined for this STA are aggregated */
- u8 is_agg;
-};
-
-static inline u8 iwl4965_num_of_ant(u8 mask)
-{
- return !!((mask) & ANT_A) +
- !!((mask) & ANT_B) +
- !!((mask) & ANT_C);
-}
-
-static inline u8 iwl4965_first_antenna(u8 mask)
-{
- if (mask & ANT_A)
- return ANT_A;
- if (mask & ANT_B)
- return ANT_B;
- return ANT_C;
-}
-
-
-/**
- * iwl3945_rate_scale_init - Initialize the rate scale table based on assoc info
- *
- * The specific throughput table used is based on the type of network
- * the associated with, including A, B, G, and G w/ TGG protection
- */
-extern void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id);
-
-/* Initialize station's rate scaling information after adding station */
-extern void iwl4965_rs_rate_init(struct iwl_priv *priv,
- struct ieee80211_sta *sta, u8 sta_id);
-extern void iwl3945_rs_rate_init(struct iwl_priv *priv,
- struct ieee80211_sta *sta, u8 sta_id);
-
-/**
- * iwl_rate_control_register - Register the rate control algorithm callbacks
- *
- * Since the rate control algorithm is hardware specific, there is no need
- * or reason to place it as a stand alone module. The driver can call
- * iwl_rate_control_register in order to register the rate control callbacks
- * with the mac80211 subsystem. This should be performed prior to calling
- * ieee80211_register_hw
- *
- */
-extern int iwl4965_rate_control_register(void);
-extern int iwl3945_rate_control_register(void);
-
-/**
- * iwl_rate_control_unregister - Unregister the rate control callbacks
- *
- * This should be called after calling ieee80211_unregister_hw, but before
- * the driver is unloaded.
- */
-extern void iwl4965_rate_control_unregister(void);
-extern void iwl3945_rate_control_unregister(void);
-
-#endif /* __iwl_legacy_rs__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-power.c b/drivers/net/wireless/iwlegacy/iwl-power.c
deleted file mode 100644
index 903ef0d6d6c..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-power.c
+++ /dev/null
@@ -1,165 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
- *
- * Portions of this file are derived from the ipw3945 project, as well
- * as portions of the ieee80211 subsystem header files.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *****************************************************************************/
-
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-
-#include <net/mac80211.h>
-
-#include "iwl-eeprom.h"
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-io.h"
-#include "iwl-commands.h"
-#include "iwl-debug.h"
-#include "iwl-power.h"
-
-/*
- * Setting power level allows the card to go to sleep when not busy.
- *
- * We calculate a sleep command based on the required latency, which
- * we get from mac80211. In order to handle thermal throttling, we can
- * also use pre-defined power levels.
- */
-
-/*
- * This defines the old power levels. They are still used by default
- * (level 1) and for thermal throttle (levels 3 through 5)
- */
-
-struct iwl_power_vec_entry {
- struct iwl_powertable_cmd cmd;
- u8 no_dtim; /* number of skip dtim */
-};
-
-static void iwl_legacy_power_sleep_cam_cmd(struct iwl_priv *priv,
- struct iwl_powertable_cmd *cmd)
-{
- memset(cmd, 0, sizeof(*cmd));
-
- if (priv->power_data.pci_pm)
- cmd->flags |= IWL_POWER_PCI_PM_MSK;
-
- IWL_DEBUG_POWER(priv, "Sleep command for CAM\n");
-}
-
-static int
-iwl_legacy_set_power(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd)
-{
- IWL_DEBUG_POWER(priv, "Sending power/sleep command\n");
- IWL_DEBUG_POWER(priv, "Flags value = 0x%08X\n", cmd->flags);
- IWL_DEBUG_POWER(priv, "Tx timeout = %u\n",
- le32_to_cpu(cmd->tx_data_timeout));
- IWL_DEBUG_POWER(priv, "Rx timeout = %u\n",
- le32_to_cpu(cmd->rx_data_timeout));
- IWL_DEBUG_POWER(priv,
- "Sleep interval vector = { %d , %d , %d , %d , %d }\n",
- le32_to_cpu(cmd->sleep_interval[0]),
- le32_to_cpu(cmd->sleep_interval[1]),
- le32_to_cpu(cmd->sleep_interval[2]),
- le32_to_cpu(cmd->sleep_interval[3]),
- le32_to_cpu(cmd->sleep_interval[4]));
-
- return iwl_legacy_send_cmd_pdu(priv, POWER_TABLE_CMD,
- sizeof(struct iwl_powertable_cmd), cmd);
-}
-
-int
-iwl_legacy_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd,
- bool force)
-{
- int ret;
- bool update_chains;
-
- lockdep_assert_held(&priv->mutex);
-
- /* Don't update the RX chain when chain noise calibration is running */
- update_chains = priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE ||
- priv->chain_noise_data.state == IWL_CHAIN_NOISE_ALIVE;
-
- if (!memcmp(&priv->power_data.sleep_cmd, cmd, sizeof(*cmd)) && !force)
- return 0;
-
- if (!iwl_legacy_is_ready_rf(priv))
- return -EIO;
-
- /* scan complete use sleep_power_next, need to be updated */
- memcpy(&priv->power_data.sleep_cmd_next, cmd, sizeof(*cmd));
- if (test_bit(STATUS_SCANNING, &priv->status) && !force) {
- IWL_DEBUG_INFO(priv, "Defer power set mode while scanning\n");
- return 0;
- }
-
- if (cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK)
- set_bit(STATUS_POWER_PMI, &priv->status);
-
- ret = iwl_legacy_set_power(priv, cmd);
- if (!ret) {
- if (!(cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK))
- clear_bit(STATUS_POWER_PMI, &priv->status);
-
- if (priv->cfg->ops->lib->update_chain_flags && update_chains)
- priv->cfg->ops->lib->update_chain_flags(priv);
- else if (priv->cfg->ops->lib->update_chain_flags)
- IWL_DEBUG_POWER(priv,
- "Cannot update the power, chain noise "
- "calibration running: %d\n",
- priv->chain_noise_data.state);
-
- memcpy(&priv->power_data.sleep_cmd, cmd, sizeof(*cmd));
- } else
- IWL_ERR(priv, "set power fail, ret = %d", ret);
-
- return ret;
-}
-
-int iwl_legacy_power_update_mode(struct iwl_priv *priv, bool force)
-{
- struct iwl_powertable_cmd cmd;
-
- iwl_legacy_power_sleep_cam_cmd(priv, &cmd);
- return iwl_legacy_power_set_mode(priv, &cmd, force);
-}
-EXPORT_SYMBOL(iwl_legacy_power_update_mode);
-
-/* initialize to default */
-void iwl_legacy_power_initialize(struct iwl_priv *priv)
-{
- u16 lctl = iwl_legacy_pcie_link_ctl(priv);
-
- priv->power_data.pci_pm = !(lctl & PCI_CFG_LINK_CTRL_VAL_L0S_EN);
-
- priv->power_data.debug_sleep_level_override = -1;
-
- memset(&priv->power_data.sleep_cmd, 0,
- sizeof(priv->power_data.sleep_cmd));
-}
-EXPORT_SYMBOL(iwl_legacy_power_initialize);
diff --git a/drivers/net/wireless/iwlegacy/iwl-power.h b/drivers/net/wireless/iwlegacy/iwl-power.h
deleted file mode 100644
index d30b36acdc4..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-power.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
- *
- * Portions of this file are derived from the ipw3945 project, as well
- * as portions of the ieee80211 subsystem header files.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *****************************************************************************/
-#ifndef __iwl_legacy_power_setting_h__
-#define __iwl_legacy_power_setting_h__
-
-#include "iwl-commands.h"
-
-enum iwl_power_level {
- IWL_POWER_INDEX_1,
- IWL_POWER_INDEX_2,
- IWL_POWER_INDEX_3,
- IWL_POWER_INDEX_4,
- IWL_POWER_INDEX_5,
- IWL_POWER_NUM
-};
-
-struct iwl_power_mgr {
- struct iwl_powertable_cmd sleep_cmd;
- struct iwl_powertable_cmd sleep_cmd_next;
- int debug_sleep_level_override;
- bool pci_pm;
-};
-
-int
-iwl_legacy_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd,
- bool force);
-int iwl_legacy_power_update_mode(struct iwl_priv *priv, bool force);
-void iwl_legacy_power_initialize(struct iwl_priv *priv);
-
-#endif /* __iwl_legacy_power_setting_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-rx.c b/drivers/net/wireless/iwlegacy/iwl-rx.c
deleted file mode 100644
index 9b5d0abe8be..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-rx.c
+++ /dev/null
@@ -1,281 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * Portions of this file are derived from the ipw3945 project, as well
- * as portions of the ieee80211 subsystem header files.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <linux/etherdevice.h>
-#include <linux/slab.h>
-#include <net/mac80211.h>
-#include <asm/unaligned.h>
-#include "iwl-eeprom.h"
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-sta.h"
-#include "iwl-io.h"
-#include "iwl-helpers.h"
-/************************** RX-FUNCTIONS ****************************/
-/*
- * Rx theory of operation
- *
- * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
- * each of which point to Receive Buffers to be filled by the NIC. These get
- * used not only for Rx frames, but for any command response or notification
- * from the NIC. The driver and NIC manage the Rx buffers by means
- * of indexes into the circular buffer.
- *
- * Rx Queue Indexes
- * The host/firmware share two index registers for managing the Rx buffers.
- *
- * The READ index maps to the first position that the firmware may be writing
- * to -- the driver can read up to (but not including) this position and get
- * good data.
- * The READ index is managed by the firmware once the card is enabled.
- *
- * The WRITE index maps to the last position the driver has read from -- the
- * position preceding WRITE is the last slot the firmware can place a packet.
- *
- * The queue is empty (no good data) if WRITE = READ - 1, and is full if
- * WRITE = READ.
- *
- * During initialization, the host sets up the READ queue position to the first
- * INDEX position, and WRITE to the last (READ - 1 wrapped)
- *
- * When the firmware places a packet in a buffer, it will advance the READ index
- * and fire the RX interrupt. The driver can then query the READ index and
- * process as many packets as possible, moving the WRITE index forward as it
- * resets the Rx queue buffers with new memory.
- *
- * The management in the driver is as follows:
- * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
- * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
- * to replenish the iwl->rxq->rx_free.
- * + In iwl_rx_replenish (scheduled) if 'processed' != 'read' then the
- * iwl->rxq is replenished and the READ INDEX is updated (updating the
- * 'processed' and 'read' driver indexes as well)
- * + A received packet is processed and handed to the kernel network stack,
- * detached from the iwl->rxq. The driver 'processed' index is updated.
- * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
- * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
- * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there
- * were enough free buffers and RX_STALLED is set it is cleared.
- *
- *
- * Driver sequence:
- *
- * iwl_legacy_rx_queue_alloc() Allocates rx_free
- * iwl_rx_replenish() Replenishes rx_free list from rx_used, and calls
- * iwl_rx_queue_restock
- * iwl_rx_queue_restock() Moves available buffers from rx_free into Rx
- * queue, updates firmware pointers, and updates
- * the WRITE index. If insufficient rx_free buffers
- * are available, schedules iwl_rx_replenish
- *
- * -- enable interrupts --
- * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
- * READ INDEX, detaching the SKB from the pool.
- * Moves the packet buffer from queue to rx_used.
- * Calls iwl_rx_queue_restock to refill any empty
- * slots.
- * ...
- *
- */
-
-/**
- * iwl_legacy_rx_queue_space - Return number of free slots available in queue.
- */
-int iwl_legacy_rx_queue_space(const struct iwl_rx_queue *q)
-{
- int s = q->read - q->write;
- if (s <= 0)
- s += RX_QUEUE_SIZE;
- /* keep some buffer to not confuse full and empty queue */
- s -= 2;
- if (s < 0)
- s = 0;
- return s;
-}
-EXPORT_SYMBOL(iwl_legacy_rx_queue_space);
-
-/**
- * iwl_legacy_rx_queue_update_write_ptr - Update the write pointer for the RX queue
- */
-void
-iwl_legacy_rx_queue_update_write_ptr(struct iwl_priv *priv,
- struct iwl_rx_queue *q)
-{
- unsigned long flags;
- u32 rx_wrt_ptr_reg = priv->hw_params.rx_wrt_ptr_reg;
- u32 reg;
-
- spin_lock_irqsave(&q->lock, flags);
-
- if (q->need_update == 0)
- goto exit_unlock;
-
- /* If power-saving is in use, make sure device is awake */
- if (test_bit(STATUS_POWER_PMI, &priv->status)) {
- reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
-
- if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
- IWL_DEBUG_INFO(priv,
- "Rx queue requesting wakeup,"
- " GP1 = 0x%x\n", reg);
- iwl_legacy_set_bit(priv, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
- goto exit_unlock;
- }
-
- q->write_actual = (q->write & ~0x7);
- iwl_legacy_write_direct32(priv, rx_wrt_ptr_reg,
- q->write_actual);
-
- /* Else device is assumed to be awake */
- } else {
- /* Device expects a multiple of 8 */
- q->write_actual = (q->write & ~0x7);
- iwl_legacy_write_direct32(priv, rx_wrt_ptr_reg,
- q->write_actual);
- }
-
- q->need_update = 0;
-
- exit_unlock:
- spin_unlock_irqrestore(&q->lock, flags);
-}
-EXPORT_SYMBOL(iwl_legacy_rx_queue_update_write_ptr);
-
-int iwl_legacy_rx_queue_alloc(struct iwl_priv *priv)
-{
- struct iwl_rx_queue *rxq = &priv->rxq;
- struct device *dev = &priv->pci_dev->dev;
- int i;
-
- spin_lock_init(&rxq->lock);
- INIT_LIST_HEAD(&rxq->rx_free);
- INIT_LIST_HEAD(&rxq->rx_used);
-
- /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */
- rxq->bd = dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->bd_dma,
- GFP_KERNEL);
- if (!rxq->bd)
- goto err_bd;
-
- rxq->rb_stts = dma_alloc_coherent(dev, sizeof(struct iwl_rb_status),
- &rxq->rb_stts_dma, GFP_KERNEL);
- if (!rxq->rb_stts)
- goto err_rb;
-
- /* Fill the rx_used queue with _all_ of the Rx buffers */
- for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
- list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
-
- /* Set us so that we have processed and used all buffers, but have
- * not restocked the Rx queue with fresh buffers */
- rxq->read = rxq->write = 0;
- rxq->write_actual = 0;
- rxq->free_count = 0;
- rxq->need_update = 0;
- return 0;
-
-err_rb:
- dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
- rxq->bd_dma);
-err_bd:
- return -ENOMEM;
-}
-EXPORT_SYMBOL(iwl_legacy_rx_queue_alloc);
-
-
-void iwl_legacy_rx_spectrum_measure_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_spectrum_notification *report = &(pkt->u.spectrum_notif);
-
- if (!report->state) {
- IWL_DEBUG_11H(priv,
- "Spectrum Measure Notification: Start\n");
- return;
- }
-
- memcpy(&priv->measure_report, report, sizeof(*report));
- priv->measurement_status |= MEASUREMENT_READY;
-}
-EXPORT_SYMBOL(iwl_legacy_rx_spectrum_measure_notif);
-
-/*
- * returns non-zero if packet should be dropped
- */
-int iwl_legacy_set_decrypted_flag(struct iwl_priv *priv,
- struct ieee80211_hdr *hdr,
- u32 decrypt_res,
- struct ieee80211_rx_status *stats)
-{
- u16 fc = le16_to_cpu(hdr->frame_control);
-
- /*
- * All contexts have the same setting here due to it being
- * a module parameter, so OK to check any context.
- */
- if (priv->contexts[IWL_RXON_CTX_BSS].active.filter_flags &
- RXON_FILTER_DIS_DECRYPT_MSK)
- return 0;
-
- if (!(fc & IEEE80211_FCTL_PROTECTED))
- return 0;
-
- IWL_DEBUG_RX(priv, "decrypt_res:0x%x\n", decrypt_res);
- switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) {
- case RX_RES_STATUS_SEC_TYPE_TKIP:
- /* The uCode has got a bad phase 1 Key, pushes the packet.
- * Decryption will be done in SW. */
- if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
- RX_RES_STATUS_BAD_KEY_TTAK)
- break;
-
- case RX_RES_STATUS_SEC_TYPE_WEP:
- if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
- RX_RES_STATUS_BAD_ICV_MIC) {
- /* bad ICV, the packet is destroyed since the
- * decryption is inplace, drop it */
- IWL_DEBUG_RX(priv, "Packet destroyed\n");
- return -1;
- }
- case RX_RES_STATUS_SEC_TYPE_CCMP:
- if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
- RX_RES_STATUS_DECRYPT_OK) {
- IWL_DEBUG_RX(priv, "hw decrypt successfully!!!\n");
- stats->flag |= RX_FLAG_DECRYPTED;
- }
- break;
-
- default:
- break;
- }
- return 0;
-}
-EXPORT_SYMBOL(iwl_legacy_set_decrypted_flag);
diff --git a/drivers/net/wireless/iwlegacy/iwl-scan.c b/drivers/net/wireless/iwlegacy/iwl-scan.c
deleted file mode 100644
index a6b5222fc59..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-scan.c
+++ /dev/null
@@ -1,549 +0,0 @@
-/******************************************************************************
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *****************************************************************************/
-#include <linux/slab.h>
-#include <linux/types.h>
-#include <linux/etherdevice.h>
-#include <net/mac80211.h>
-
-#include "iwl-eeprom.h"
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-sta.h"
-#include "iwl-io.h"
-#include "iwl-helpers.h"
-
-/* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after
- * sending probe req. This should be set long enough to hear probe responses
- * from more than one AP. */
-#define IWL_ACTIVE_DWELL_TIME_24 (30) /* all times in msec */
-#define IWL_ACTIVE_DWELL_TIME_52 (20)
-
-#define IWL_ACTIVE_DWELL_FACTOR_24GHZ (3)
-#define IWL_ACTIVE_DWELL_FACTOR_52GHZ (2)
-
-/* For passive scan, listen PASSIVE_DWELL_TIME (msec) on each channel.
- * Must be set longer than active dwell time.
- * For the most reliable scan, set > AP beacon interval (typically 100msec). */
-#define IWL_PASSIVE_DWELL_TIME_24 (20) /* all times in msec */
-#define IWL_PASSIVE_DWELL_TIME_52 (10)
-#define IWL_PASSIVE_DWELL_BASE (100)
-#define IWL_CHANNEL_TUNE_TIME 5
-
-static int iwl_legacy_send_scan_abort(struct iwl_priv *priv)
-{
- int ret;
- struct iwl_rx_packet *pkt;
- struct iwl_host_cmd cmd = {
- .id = REPLY_SCAN_ABORT_CMD,
- .flags = CMD_WANT_SKB,
- };
-
- /* Exit instantly with error when device is not ready
- * to receive scan abort command or it does not perform
- * hardware scan currently */
- if (!test_bit(STATUS_READY, &priv->status) ||
- !test_bit(STATUS_GEO_CONFIGURED, &priv->status) ||
- !test_bit(STATUS_SCAN_HW, &priv->status) ||
- test_bit(STATUS_FW_ERROR, &priv->status) ||
- test_bit(STATUS_EXIT_PENDING, &priv->status))
- return -EIO;
-
- ret = iwl_legacy_send_cmd_sync(priv, &cmd);
- if (ret)
- return ret;
-
- pkt = (struct iwl_rx_packet *)cmd.reply_page;
- if (pkt->u.status != CAN_ABORT_STATUS) {
- /* The scan abort will return 1 for success or
- * 2 for "failure". A failure condition can be
- * due to simply not being in an active scan which
- * can occur if we send the scan abort before we
- * the microcode has notified us that a scan is
- * completed. */
- IWL_DEBUG_SCAN(priv, "SCAN_ABORT ret %d.\n", pkt->u.status);
- ret = -EIO;
- }
-
- iwl_legacy_free_pages(priv, cmd.reply_page);
- return ret;
-}
-
-static void iwl_legacy_complete_scan(struct iwl_priv *priv, bool aborted)
-{
- /* check if scan was requested from mac80211 */
- if (priv->scan_request) {
- IWL_DEBUG_SCAN(priv, "Complete scan in mac80211\n");
- ieee80211_scan_completed(priv->hw, aborted);
- }
-
- priv->scan_vif = NULL;
- priv->scan_request = NULL;
-}
-
-void iwl_legacy_force_scan_end(struct iwl_priv *priv)
-{
- lockdep_assert_held(&priv->mutex);
-
- if (!test_bit(STATUS_SCANNING, &priv->status)) {
- IWL_DEBUG_SCAN(priv, "Forcing scan end while not scanning\n");
- return;
- }
-
- IWL_DEBUG_SCAN(priv, "Forcing scan end\n");
- clear_bit(STATUS_SCANNING, &priv->status);
- clear_bit(STATUS_SCAN_HW, &priv->status);
- clear_bit(STATUS_SCAN_ABORTING, &priv->status);
- iwl_legacy_complete_scan(priv, true);
-}
-
-static void iwl_legacy_do_scan_abort(struct iwl_priv *priv)
-{
- int ret;
-
- lockdep_assert_held(&priv->mutex);
-
- if (!test_bit(STATUS_SCANNING, &priv->status)) {
- IWL_DEBUG_SCAN(priv, "Not performing scan to abort\n");
- return;
- }
-
- if (test_and_set_bit(STATUS_SCAN_ABORTING, &priv->status)) {
- IWL_DEBUG_SCAN(priv, "Scan abort in progress\n");
- return;
- }
-
- ret = iwl_legacy_send_scan_abort(priv);
- if (ret) {
- IWL_DEBUG_SCAN(priv, "Send scan abort failed %d\n", ret);
- iwl_legacy_force_scan_end(priv);
- } else
- IWL_DEBUG_SCAN(priv, "Successfully send scan abort\n");
-}
-
-/**
- * iwl_scan_cancel - Cancel any currently executing HW scan
- */
-int iwl_legacy_scan_cancel(struct iwl_priv *priv)
-{
- IWL_DEBUG_SCAN(priv, "Queuing abort scan\n");
- queue_work(priv->workqueue, &priv->abort_scan);
- return 0;
-}
-EXPORT_SYMBOL(iwl_legacy_scan_cancel);
-
-/**
- * iwl_legacy_scan_cancel_timeout - Cancel any currently executing HW scan
- * @ms: amount of time to wait (in milliseconds) for scan to abort
- *
- */
-int iwl_legacy_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms)
-{
- unsigned long timeout = jiffies + msecs_to_jiffies(ms);
-
- lockdep_assert_held(&priv->mutex);
-
- IWL_DEBUG_SCAN(priv, "Scan cancel timeout\n");
-
- iwl_legacy_do_scan_abort(priv);
-
- while (time_before_eq(jiffies, timeout)) {
- if (!test_bit(STATUS_SCAN_HW, &priv->status))
- break;
- msleep(20);
- }
-
- return test_bit(STATUS_SCAN_HW, &priv->status);
-}
-EXPORT_SYMBOL(iwl_legacy_scan_cancel_timeout);
-
-/* Service response to REPLY_SCAN_CMD (0x80) */
-static void iwl_legacy_rx_reply_scan(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_scanreq_notification *notif =
- (struct iwl_scanreq_notification *)pkt->u.raw;
-
- IWL_DEBUG_SCAN(priv, "Scan request status = 0x%x\n", notif->status);
-#endif
-}
-
-/* Service SCAN_START_NOTIFICATION (0x82) */
-static void iwl_legacy_rx_scan_start_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_scanstart_notification *notif =
- (struct iwl_scanstart_notification *)pkt->u.raw;
- priv->scan_start_tsf = le32_to_cpu(notif->tsf_low);
- IWL_DEBUG_SCAN(priv, "Scan start: "
- "%d [802.11%s] "
- "(TSF: 0x%08X:%08X) - %d (beacon timer %u)\n",
- notif->channel,
- notif->band ? "bg" : "a",
- le32_to_cpu(notif->tsf_high),
- le32_to_cpu(notif->tsf_low),
- notif->status, notif->beacon_timer);
-}
-
-/* Service SCAN_RESULTS_NOTIFICATION (0x83) */
-static void iwl_legacy_rx_scan_results_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_scanresults_notification *notif =
- (struct iwl_scanresults_notification *)pkt->u.raw;
-
- IWL_DEBUG_SCAN(priv, "Scan ch.res: "
- "%d [802.11%s] "
- "(TSF: 0x%08X:%08X) - %d "
- "elapsed=%lu usec\n",
- notif->channel,
- notif->band ? "bg" : "a",
- le32_to_cpu(notif->tsf_high),
- le32_to_cpu(notif->tsf_low),
- le32_to_cpu(notif->statistics[0]),
- le32_to_cpu(notif->tsf_low) - priv->scan_start_tsf);
-#endif
-}
-
-/* Service SCAN_COMPLETE_NOTIFICATION (0x84) */
-static void iwl_legacy_rx_scan_complete_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_scancomplete_notification *scan_notif = (void *)pkt->u.raw;
-#endif
-
- IWL_DEBUG_SCAN(priv,
- "Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n",
- scan_notif->scanned_channels,
- scan_notif->tsf_low,
- scan_notif->tsf_high, scan_notif->status);
-
- /* The HW is no longer scanning */
- clear_bit(STATUS_SCAN_HW, &priv->status);
-
- IWL_DEBUG_SCAN(priv, "Scan on %sGHz took %dms\n",
- (priv->scan_band == IEEE80211_BAND_2GHZ) ? "2.4" : "5.2",
- jiffies_to_msecs(jiffies - priv->scan_start));
-
- queue_work(priv->workqueue, &priv->scan_completed);
-}
-
-void iwl_legacy_setup_rx_scan_handlers(struct iwl_priv *priv)
-{
- /* scan handlers */
- priv->rx_handlers[REPLY_SCAN_CMD] = iwl_legacy_rx_reply_scan;
- priv->rx_handlers[SCAN_START_NOTIFICATION] =
- iwl_legacy_rx_scan_start_notif;
- priv->rx_handlers[SCAN_RESULTS_NOTIFICATION] =
- iwl_legacy_rx_scan_results_notif;
- priv->rx_handlers[SCAN_COMPLETE_NOTIFICATION] =
- iwl_legacy_rx_scan_complete_notif;
-}
-EXPORT_SYMBOL(iwl_legacy_setup_rx_scan_handlers);
-
-inline u16 iwl_legacy_get_active_dwell_time(struct iwl_priv *priv,
- enum ieee80211_band band,
- u8 n_probes)
-{
- if (band == IEEE80211_BAND_5GHZ)
- return IWL_ACTIVE_DWELL_TIME_52 +
- IWL_ACTIVE_DWELL_FACTOR_52GHZ * (n_probes + 1);
- else
- return IWL_ACTIVE_DWELL_TIME_24 +
- IWL_ACTIVE_DWELL_FACTOR_24GHZ * (n_probes + 1);
-}
-EXPORT_SYMBOL(iwl_legacy_get_active_dwell_time);
-
-u16 iwl_legacy_get_passive_dwell_time(struct iwl_priv *priv,
- enum ieee80211_band band,
- struct ieee80211_vif *vif)
-{
- struct iwl_rxon_context *ctx;
- u16 passive = (band == IEEE80211_BAND_2GHZ) ?
- IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_24 :
- IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_52;
-
- if (iwl_legacy_is_any_associated(priv)) {
- /*
- * If we're associated, we clamp the maximum passive
- * dwell time to be 98% of the smallest beacon interval
- * (minus 2 * channel tune time)
- */
- for_each_context(priv, ctx) {
- u16 value;
-
- if (!iwl_legacy_is_associated_ctx(ctx))
- continue;
- value = ctx->vif ? ctx->vif->bss_conf.beacon_int : 0;
- if ((value > IWL_PASSIVE_DWELL_BASE) || !value)
- value = IWL_PASSIVE_DWELL_BASE;
- value = (value * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2;
- passive = min(value, passive);
- }
- }
-
- return passive;
-}
-EXPORT_SYMBOL(iwl_legacy_get_passive_dwell_time);
-
-void iwl_legacy_init_scan_params(struct iwl_priv *priv)
-{
- u8 ant_idx = fls(priv->hw_params.valid_tx_ant) - 1;
- if (!priv->scan_tx_ant[IEEE80211_BAND_5GHZ])
- priv->scan_tx_ant[IEEE80211_BAND_5GHZ] = ant_idx;
- if (!priv->scan_tx_ant[IEEE80211_BAND_2GHZ])
- priv->scan_tx_ant[IEEE80211_BAND_2GHZ] = ant_idx;
-}
-EXPORT_SYMBOL(iwl_legacy_init_scan_params);
-
-static int iwl_legacy_scan_initiate(struct iwl_priv *priv,
- struct ieee80211_vif *vif)
-{
- int ret;
-
- lockdep_assert_held(&priv->mutex);
-
- if (WARN_ON(!priv->cfg->ops->utils->request_scan))
- return -EOPNOTSUPP;
-
- cancel_delayed_work(&priv->scan_check);
-
- if (!iwl_legacy_is_ready_rf(priv)) {
- IWL_WARN(priv, "Request scan called when driver not ready.\n");
- return -EIO;
- }
-
- if (test_bit(STATUS_SCAN_HW, &priv->status)) {
- IWL_DEBUG_SCAN(priv,
- "Multiple concurrent scan requests in parallel.\n");
- return -EBUSY;
- }
-
- if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
- IWL_DEBUG_SCAN(priv, "Scan request while abort pending.\n");
- return -EBUSY;
- }
-
- IWL_DEBUG_SCAN(priv, "Starting scan...\n");
-
- set_bit(STATUS_SCANNING, &priv->status);
- priv->scan_start = jiffies;
-
- ret = priv->cfg->ops->utils->request_scan(priv, vif);
- if (ret) {
- clear_bit(STATUS_SCANNING, &priv->status);
- return ret;
- }
-
- queue_delayed_work(priv->workqueue, &priv->scan_check,
- IWL_SCAN_CHECK_WATCHDOG);
-
- return 0;
-}
-
-int iwl_legacy_mac_hw_scan(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct cfg80211_scan_request *req)
-{
- struct iwl_priv *priv = hw->priv;
- int ret;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- if (req->n_channels == 0)
- return -EINVAL;
-
- mutex_lock(&priv->mutex);
-
- if (test_bit(STATUS_SCANNING, &priv->status)) {
- IWL_DEBUG_SCAN(priv, "Scan already in progress.\n");
- ret = -EAGAIN;
- goto out_unlock;
- }
-
- /* mac80211 will only ask for one band at a time */
- priv->scan_request = req;
- priv->scan_vif = vif;
- priv->scan_band = req->channels[0]->band;
-
- ret = iwl_legacy_scan_initiate(priv, vif);
-
- IWL_DEBUG_MAC80211(priv, "leave\n");
-
-out_unlock:
- mutex_unlock(&priv->mutex);
-
- return ret;
-}
-EXPORT_SYMBOL(iwl_legacy_mac_hw_scan);
-
-static void iwl_legacy_bg_scan_check(struct work_struct *data)
-{
- struct iwl_priv *priv =
- container_of(data, struct iwl_priv, scan_check.work);
-
- IWL_DEBUG_SCAN(priv, "Scan check work\n");
-
- /* Since we are here firmware does not finish scan and
- * most likely is in bad shape, so we don't bother to
- * send abort command, just force scan complete to mac80211 */
- mutex_lock(&priv->mutex);
- iwl_legacy_force_scan_end(priv);
- mutex_unlock(&priv->mutex);
-}
-
-/**
- * iwl_legacy_fill_probe_req - fill in all required fields and IE for probe request
- */
-
-u16
-iwl_legacy_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame,
- const u8 *ta, const u8 *ies, int ie_len, int left)
-{
- int len = 0;
- u8 *pos = NULL;
-
- /* Make sure there is enough space for the probe request,
- * two mandatory IEs and the data */
- left -= 24;
- if (left < 0)
- return 0;
-
- frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
- memcpy(frame->da, iwlegacy_bcast_addr, ETH_ALEN);
- memcpy(frame->sa, ta, ETH_ALEN);
- memcpy(frame->bssid, iwlegacy_bcast_addr, ETH_ALEN);
- frame->seq_ctrl = 0;
-
- len += 24;
-
- /* ...next IE... */
- pos = &frame->u.probe_req.variable[0];
-
- /* fill in our indirect SSID IE */
- left -= 2;
- if (left < 0)
- return 0;
- *pos++ = WLAN_EID_SSID;
- *pos++ = 0;
-
- len += 2;
-
- if (WARN_ON(left < ie_len))
- return len;
-
- if (ies && ie_len) {
- memcpy(pos, ies, ie_len);
- len += ie_len;
- }
-
- return (u16)len;
-}
-EXPORT_SYMBOL(iwl_legacy_fill_probe_req);
-
-static void iwl_legacy_bg_abort_scan(struct work_struct *work)
-{
- struct iwl_priv *priv = container_of(work, struct iwl_priv, abort_scan);
-
- IWL_DEBUG_SCAN(priv, "Abort scan work\n");
-
- /* We keep scan_check work queued in case when firmware will not
- * report back scan completed notification */
- mutex_lock(&priv->mutex);
- iwl_legacy_scan_cancel_timeout(priv, 200);
- mutex_unlock(&priv->mutex);
-}
-
-static void iwl_legacy_bg_scan_completed(struct work_struct *work)
-{
- struct iwl_priv *priv =
- container_of(work, struct iwl_priv, scan_completed);
- bool aborted;
-
- IWL_DEBUG_SCAN(priv, "Completed scan.\n");
-
- cancel_delayed_work(&priv->scan_check);
-
- mutex_lock(&priv->mutex);
-
- aborted = test_and_clear_bit(STATUS_SCAN_ABORTING, &priv->status);
- if (aborted)
- IWL_DEBUG_SCAN(priv, "Aborted scan completed.\n");
-
- if (!test_and_clear_bit(STATUS_SCANNING, &priv->status)) {
- IWL_DEBUG_SCAN(priv, "Scan already completed.\n");
- goto out_settings;
- }
-
- iwl_legacy_complete_scan(priv, aborted);
-
-out_settings:
- /* Can we still talk to firmware ? */
- if (!iwl_legacy_is_ready_rf(priv))
- goto out;
-
- /*
- * We do not commit power settings while scan is pending,
- * do it now if the settings changed.
- */
- iwl_legacy_power_set_mode(priv, &priv->power_data.sleep_cmd_next, false);
- iwl_legacy_set_tx_power(priv, priv->tx_power_next, false);
-
- priv->cfg->ops->utils->post_scan(priv);
-
-out:
- mutex_unlock(&priv->mutex);
-}
-
-void iwl_legacy_setup_scan_deferred_work(struct iwl_priv *priv)
-{
- INIT_WORK(&priv->scan_completed, iwl_legacy_bg_scan_completed);
- INIT_WORK(&priv->abort_scan, iwl_legacy_bg_abort_scan);
- INIT_DELAYED_WORK(&priv->scan_check, iwl_legacy_bg_scan_check);
-}
-EXPORT_SYMBOL(iwl_legacy_setup_scan_deferred_work);
-
-void iwl_legacy_cancel_scan_deferred_work(struct iwl_priv *priv)
-{
- cancel_work_sync(&priv->abort_scan);
- cancel_work_sync(&priv->scan_completed);
-
- if (cancel_delayed_work_sync(&priv->scan_check)) {
- mutex_lock(&priv->mutex);
- iwl_legacy_force_scan_end(priv);
- mutex_unlock(&priv->mutex);
- }
-}
-EXPORT_SYMBOL(iwl_legacy_cancel_scan_deferred_work);
diff --git a/drivers/net/wireless/iwlegacy/iwl-spectrum.h b/drivers/net/wireless/iwlegacy/iwl-spectrum.h
index 9f70a472310..85fe48e520f 100644
--- a/drivers/net/wireless/iwlegacy/iwl-spectrum.h
+++ b/drivers/net/wireless/iwlegacy/iwl-spectrum.h
@@ -26,8 +26,8 @@
*
*****************************************************************************/
-#ifndef __iwl_legacy_spectrum_h__
-#define __iwl_legacy_spectrum_h__
+#ifndef __il_spectrum_h__
+#define __il_spectrum_h__
enum { /* ieee80211_basic_report.map */
IEEE80211_BASIC_MAP_BSS = (1 << 0),
IEEE80211_BASIC_MAP_OFDM = (1 << 1),
diff --git a/drivers/net/wireless/iwlegacy/iwl-sta.c b/drivers/net/wireless/iwlegacy/iwl-sta.c
index 66f0fb2bbe0..75fe315f66b 100644
--- a/drivers/net/wireless/iwlegacy/iwl-sta.c
+++ b/drivers/net/wireless/iwlegacy/iwl-sta.c
@@ -31,81 +31,82 @@
#include <linux/etherdevice.h>
#include <linux/sched.h>
#include <linux/lockdep.h>
+#include <linux/export.h>
#include "iwl-dev.h"
#include "iwl-core.h"
#include "iwl-sta.h"
-/* priv->sta_lock must be held */
-static void iwl_legacy_sta_ucode_activate(struct iwl_priv *priv, u8 sta_id)
+/* il->sta_lock must be held */
+static void il_sta_ucode_activate(struct il_priv *il, u8 sta_id)
{
- if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE))
- IWL_ERR(priv,
+ if (!(il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE))
+ IL_ERR(
"ACTIVATE a non DRIVER active station id %u addr %pM\n",
- sta_id, priv->stations[sta_id].sta.sta.addr);
+ sta_id, il->stations[sta_id].sta.sta.addr);
- if (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE) {
- IWL_DEBUG_ASSOC(priv,
+ if (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE) {
+ D_ASSOC(
"STA id %u addr %pM already present"
" in uCode (according to driver)\n",
- sta_id, priv->stations[sta_id].sta.sta.addr);
+ sta_id, il->stations[sta_id].sta.sta.addr);
} else {
- priv->stations[sta_id].used |= IWL_STA_UCODE_ACTIVE;
- IWL_DEBUG_ASSOC(priv, "Added STA id %u addr %pM to uCode\n",
- sta_id, priv->stations[sta_id].sta.sta.addr);
+ il->stations[sta_id].used |= IL_STA_UCODE_ACTIVE;
+ D_ASSOC("Added STA id %u addr %pM to uCode\n",
+ sta_id, il->stations[sta_id].sta.sta.addr);
}
}
-static int iwl_legacy_process_add_sta_resp(struct iwl_priv *priv,
- struct iwl_legacy_addsta_cmd *addsta,
- struct iwl_rx_packet *pkt,
+static int il_process_add_sta_resp(struct il_priv *il,
+ struct il_addsta_cmd *addsta,
+ struct il_rx_pkt *pkt,
bool sync)
{
u8 sta_id = addsta->sta.sta_id;
unsigned long flags;
int ret = -EIO;
- if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
- IWL_ERR(priv, "Bad return from REPLY_ADD_STA (0x%08X)\n",
+ if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
+ IL_ERR("Bad return from C_ADD_STA (0x%08X)\n",
pkt->hdr.flags);
return ret;
}
- IWL_DEBUG_INFO(priv, "Processing response for adding station %u\n",
+ D_INFO("Processing response for adding station %u\n",
sta_id);
- spin_lock_irqsave(&priv->sta_lock, flags);
+ spin_lock_irqsave(&il->sta_lock, flags);
switch (pkt->u.add_sta.status) {
case ADD_STA_SUCCESS_MSK:
- IWL_DEBUG_INFO(priv, "REPLY_ADD_STA PASSED\n");
- iwl_legacy_sta_ucode_activate(priv, sta_id);
+ D_INFO("C_ADD_STA PASSED\n");
+ il_sta_ucode_activate(il, sta_id);
ret = 0;
break;
- case ADD_STA_NO_ROOM_IN_TABLE:
- IWL_ERR(priv, "Adding station %d failed, no room in table.\n",
+ case ADD_STA_NO_ROOM_IN_TBL:
+ IL_ERR("Adding station %d failed, no room in table.\n",
sta_id);
break;
case ADD_STA_NO_BLOCK_ACK_RESOURCE:
- IWL_ERR(priv,
+ IL_ERR(
"Adding station %d failed, no block ack resource.\n",
sta_id);
break;
case ADD_STA_MODIFY_NON_EXIST_STA:
- IWL_ERR(priv, "Attempting to modify non-existing station %d\n",
+ IL_ERR("Attempting to modify non-existing station %d\n",
sta_id);
break;
default:
- IWL_DEBUG_ASSOC(priv, "Received REPLY_ADD_STA:(0x%08X)\n",
+ D_ASSOC("Received C_ADD_STA:(0x%08X)\n",
pkt->u.add_sta.status);
break;
}
- IWL_DEBUG_INFO(priv, "%s station id %u addr %pM\n",
- priv->stations[sta_id].sta.mode ==
+ D_INFO("%s station id %u addr %pM\n",
+ il->stations[sta_id].sta.mode ==
STA_CONTROL_MODIFY_MSK ? "Modified" : "Added",
- sta_id, priv->stations[sta_id].sta.sta.addr);
+ sta_id, il->stations[sta_id].sta.sta.addr);
/*
* XXX: The MAC address in the command buffer is often changed from
@@ -115,68 +116,68 @@ static int iwl_legacy_process_add_sta_resp(struct iwl_priv *priv,
* issue has not yet been resolved and this debugging is left to
* observe the problem.
*/
- IWL_DEBUG_INFO(priv, "%s station according to cmd buffer %pM\n",
- priv->stations[sta_id].sta.mode ==
+ D_INFO("%s station according to cmd buffer %pM\n",
+ il->stations[sta_id].sta.mode ==
STA_CONTROL_MODIFY_MSK ? "Modified" : "Added",
addsta->sta.addr);
- spin_unlock_irqrestore(&priv->sta_lock, flags);
+ spin_unlock_irqrestore(&il->sta_lock, flags);
return ret;
}
-static void iwl_legacy_add_sta_callback(struct iwl_priv *priv,
- struct iwl_device_cmd *cmd,
- struct iwl_rx_packet *pkt)
+static void il_add_sta_callback(struct il_priv *il,
+ struct il_device_cmd *cmd,
+ struct il_rx_pkt *pkt)
{
- struct iwl_legacy_addsta_cmd *addsta =
- (struct iwl_legacy_addsta_cmd *)cmd->cmd.payload;
+ struct il_addsta_cmd *addsta =
+ (struct il_addsta_cmd *)cmd->cmd.payload;
- iwl_legacy_process_add_sta_resp(priv, addsta, pkt, false);
+ il_process_add_sta_resp(il, addsta, pkt, false);
}
-int iwl_legacy_send_add_sta(struct iwl_priv *priv,
- struct iwl_legacy_addsta_cmd *sta, u8 flags)
+int il_send_add_sta(struct il_priv *il,
+ struct il_addsta_cmd *sta, u8 flags)
{
- struct iwl_rx_packet *pkt = NULL;
+ struct il_rx_pkt *pkt = NULL;
int ret = 0;
u8 data[sizeof(*sta)];
- struct iwl_host_cmd cmd = {
- .id = REPLY_ADD_STA,
+ struct il_host_cmd cmd = {
+ .id = C_ADD_STA,
.flags = flags,
.data = data,
};
u8 sta_id __maybe_unused = sta->sta.sta_id;
- IWL_DEBUG_INFO(priv, "Adding sta %u (%pM) %ssynchronously\n",
+ D_INFO("Adding sta %u (%pM) %ssynchronously\n",
sta_id, sta->sta.addr, flags & CMD_ASYNC ? "a" : "");
if (flags & CMD_ASYNC)
- cmd.callback = iwl_legacy_add_sta_callback;
+ cmd.callback = il_add_sta_callback;
else {
cmd.flags |= CMD_WANT_SKB;
might_sleep();
}
- cmd.len = priv->cfg->ops->utils->build_addsta_hcmd(sta, data);
- ret = iwl_legacy_send_cmd(priv, &cmd);
+ cmd.len = il->cfg->ops->utils->build_addsta_hcmd(sta, data);
+ ret = il_send_cmd(il, &cmd);
if (ret || (flags & CMD_ASYNC))
return ret;
if (ret == 0) {
- pkt = (struct iwl_rx_packet *)cmd.reply_page;
- ret = iwl_legacy_process_add_sta_resp(priv, sta, pkt, true);
+ pkt = (struct il_rx_pkt *)cmd.reply_page;
+ ret = il_process_add_sta_resp(il, sta, pkt, true);
}
- iwl_legacy_free_pages(priv, cmd.reply_page);
+ il_free_pages(il, cmd.reply_page);
return ret;
}
-EXPORT_SYMBOL(iwl_legacy_send_add_sta);
+EXPORT_SYMBOL(il_send_add_sta);
-static void iwl_legacy_set_ht_add_station(struct iwl_priv *priv, u8 index,
+static void il_set_ht_add_station(struct il_priv *il, u8 idx,
struct ieee80211_sta *sta,
- struct iwl_rxon_context *ctx)
+ struct il_rxon_context *ctx)
{
struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->ht_cap;
__le32 sta_flags;
@@ -186,13 +187,13 @@ static void iwl_legacy_set_ht_add_station(struct iwl_priv *priv, u8 index,
goto done;
mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_SM_PS) >> 2;
- IWL_DEBUG_ASSOC(priv, "spatial multiplexing power save mode: %s\n",
+ D_ASSOC("spatial multiplexing power save mode: %s\n",
(mimo_ps_mode == WLAN_HT_CAP_SM_PS_STATIC) ?
"static" :
(mimo_ps_mode == WLAN_HT_CAP_SM_PS_DYNAMIC) ?
"dynamic" : "disabled");
- sta_flags = priv->stations[index].sta.station_flags;
+ sta_flags = il->stations[idx].sta.station_flags;
sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK);
@@ -206,7 +207,7 @@ static void iwl_legacy_set_ht_add_station(struct iwl_priv *priv, u8 index,
case WLAN_HT_CAP_SM_PS_DISABLED:
break;
default:
- IWL_WARN(priv, "Invalid MIMO PS mode %d\n", mimo_ps_mode);
+ IL_WARN("Invalid MIMO PS mode %d\n", mimo_ps_mode);
break;
}
@@ -216,27 +217,27 @@ static void iwl_legacy_set_ht_add_station(struct iwl_priv *priv, u8 index,
sta_flags |= cpu_to_le32(
(u32)sta_ht_inf->ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS);
- if (iwl_legacy_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
+ if (il_is_ht40_tx_allowed(il, ctx, &sta->ht_cap))
sta_flags |= STA_FLG_HT40_EN_MSK;
else
sta_flags &= ~STA_FLG_HT40_EN_MSK;
- priv->stations[index].sta.station_flags = sta_flags;
+ il->stations[idx].sta.station_flags = sta_flags;
done:
return;
}
/**
- * iwl_legacy_prep_station - Prepare station information for addition
+ * il_prep_station - Prepare station information for addition
*
* should be called with sta_lock held
*/
-u8 iwl_legacy_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
+u8 il_prep_station(struct il_priv *il, struct il_rxon_context *ctx,
const u8 *addr, bool is_ap, struct ieee80211_sta *sta)
{
- struct iwl_station_entry *station;
+ struct il_station_entry *station;
int i;
- u8 sta_id = IWL_INVALID_STATION;
+ u8 sta_id = IL_INVALID_STATION;
u16 rate;
if (is_ap)
@@ -244,15 +245,15 @@ u8 iwl_legacy_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
else if (is_broadcast_ether_addr(addr))
sta_id = ctx->bcast_sta_id;
else
- for (i = IWL_STA_ID; i < priv->hw_params.max_stations; i++) {
- if (!compare_ether_addr(priv->stations[i].sta.sta.addr,
+ for (i = IL_STA_ID; i < il->hw_params.max_stations; i++) {
+ if (!compare_ether_addr(il->stations[i].sta.sta.addr,
addr)) {
sta_id = i;
break;
}
- if (!priv->stations[i].used &&
- sta_id == IWL_INVALID_STATION)
+ if (!il->stations[i].used &&
+ sta_id == IL_INVALID_STATION)
sta_id = i;
}
@@ -260,7 +261,7 @@ u8 iwl_legacy_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
* These two conditions have the same outcome, but keep them
* separate
*/
- if (unlikely(sta_id == IWL_INVALID_STATION))
+ if (unlikely(sta_id == IL_INVALID_STATION))
return sta_id;
/*
@@ -268,30 +269,30 @@ u8 iwl_legacy_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
* station. Keep track if one is in progress so that we do not send
* another.
*/
- if (priv->stations[sta_id].used & IWL_STA_UCODE_INPROGRESS) {
- IWL_DEBUG_INFO(priv,
+ if (il->stations[sta_id].used & IL_STA_UCODE_INPROGRESS) {
+ D_INFO(
"STA %d already in process of being added.\n",
sta_id);
return sta_id;
}
- if ((priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE) &&
- (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE) &&
- !compare_ether_addr(priv->stations[sta_id].sta.sta.addr, addr)) {
- IWL_DEBUG_ASSOC(priv,
+ if ((il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE) &&
+ (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE) &&
+ !compare_ether_addr(il->stations[sta_id].sta.sta.addr, addr)) {
+ D_ASSOC(
"STA %d (%pM) already added, not adding again.\n",
sta_id, addr);
return sta_id;
}
- station = &priv->stations[sta_id];
- station->used = IWL_STA_DRIVER_ACTIVE;
- IWL_DEBUG_ASSOC(priv, "Add STA to driver ID %d: %pM\n",
+ station = &il->stations[sta_id];
+ station->used = IL_STA_DRIVER_ACTIVE;
+ D_ASSOC("Add STA to driver ID %d: %pM\n",
sta_id, addr);
- priv->num_stations++;
+ il->num_stations++;
- /* Set up the REPLY_ADD_STA command to send to device */
- memset(&station->sta, 0, sizeof(struct iwl_legacy_addsta_cmd));
+ /* Set up the C_ADD_STA command to send to device */
+ memset(&station->sta, 0, sizeof(struct il_addsta_cmd));
memcpy(station->sta.sta.addr, addr, ETH_ALEN);
station->sta.mode = 0;
station->sta.sta.sta_id = sta_id;
@@ -299,7 +300,7 @@ u8 iwl_legacy_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
station->ctxid = ctx->ctxid;
if (sta) {
- struct iwl_station_priv_common *sta_priv;
+ struct il_station_priv_common *sta_priv;
sta_priv = (void *)sta->drv_priv;
sta_priv->ctx = ctx;
@@ -310,42 +311,42 @@ u8 iwl_legacy_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
* STA and broadcast STA) pass in a NULL sta, and mac80211
* doesn't allow HT IBSS.
*/
- iwl_legacy_set_ht_add_station(priv, sta_id, sta, ctx);
+ il_set_ht_add_station(il, sta_id, sta, ctx);
/* 3945 only */
- rate = (priv->band == IEEE80211_BAND_5GHZ) ?
- IWL_RATE_6M_PLCP : IWL_RATE_1M_PLCP;
+ rate = (il->band == IEEE80211_BAND_5GHZ) ?
+ RATE_6M_PLCP : RATE_1M_PLCP;
/* Turn on both antennas for the station... */
station->sta.rate_n_flags = cpu_to_le16(rate | RATE_MCS_ANT_AB_MSK);
return sta_id;
}
-EXPORT_SYMBOL_GPL(iwl_legacy_prep_station);
+EXPORT_SYMBOL_GPL(il_prep_station);
#define STA_WAIT_TIMEOUT (HZ/2)
/**
- * iwl_legacy_add_station_common -
+ * il_add_station_common -
*/
int
-iwl_legacy_add_station_common(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
+il_add_station_common(struct il_priv *il,
+ struct il_rxon_context *ctx,
const u8 *addr, bool is_ap,
struct ieee80211_sta *sta, u8 *sta_id_r)
{
unsigned long flags_spin;
int ret = 0;
u8 sta_id;
- struct iwl_legacy_addsta_cmd sta_cmd;
+ struct il_addsta_cmd sta_cmd;
*sta_id_r = 0;
- spin_lock_irqsave(&priv->sta_lock, flags_spin);
- sta_id = iwl_legacy_prep_station(priv, ctx, addr, is_ap, sta);
- if (sta_id == IWL_INVALID_STATION) {
- IWL_ERR(priv, "Unable to prepare station %pM for addition\n",
+ spin_lock_irqsave(&il->sta_lock, flags_spin);
+ sta_id = il_prep_station(il, ctx, addr, is_ap, sta);
+ if (sta_id == IL_INVALID_STATION) {
+ IL_ERR("Unable to prepare station %pM for addition\n",
addr);
- spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+ spin_unlock_irqrestore(&il->sta_lock, flags_spin);
return -EINVAL;
}
@@ -354,75 +355,75 @@ iwl_legacy_add_station_common(struct iwl_priv *priv,
* station. Keep track if one is in progress so that we do not send
* another.
*/
- if (priv->stations[sta_id].used & IWL_STA_UCODE_INPROGRESS) {
- IWL_DEBUG_INFO(priv,
+ if (il->stations[sta_id].used & IL_STA_UCODE_INPROGRESS) {
+ D_INFO(
"STA %d already in process of being added.\n",
sta_id);
- spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+ spin_unlock_irqrestore(&il->sta_lock, flags_spin);
return -EEXIST;
}
- if ((priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE) &&
- (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) {
- IWL_DEBUG_ASSOC(priv,
+ if ((il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE) &&
+ (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE)) {
+ D_ASSOC(
"STA %d (%pM) already added, not adding again.\n",
sta_id, addr);
- spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+ spin_unlock_irqrestore(&il->sta_lock, flags_spin);
return -EEXIST;
}
- priv->stations[sta_id].used |= IWL_STA_UCODE_INPROGRESS;
- memcpy(&sta_cmd, &priv->stations[sta_id].sta,
- sizeof(struct iwl_legacy_addsta_cmd));
- spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+ il->stations[sta_id].used |= IL_STA_UCODE_INPROGRESS;
+ memcpy(&sta_cmd, &il->stations[sta_id].sta,
+ sizeof(struct il_addsta_cmd));
+ spin_unlock_irqrestore(&il->sta_lock, flags_spin);
/* Add station to device's station table */
- ret = iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
+ ret = il_send_add_sta(il, &sta_cmd, CMD_SYNC);
if (ret) {
- spin_lock_irqsave(&priv->sta_lock, flags_spin);
- IWL_ERR(priv, "Adding station %pM failed.\n",
- priv->stations[sta_id].sta.sta.addr);
- priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
- priv->stations[sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
- spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+ spin_lock_irqsave(&il->sta_lock, flags_spin);
+ IL_ERR("Adding station %pM failed.\n",
+ il->stations[sta_id].sta.sta.addr);
+ il->stations[sta_id].used &= ~IL_STA_DRIVER_ACTIVE;
+ il->stations[sta_id].used &= ~IL_STA_UCODE_INPROGRESS;
+ spin_unlock_irqrestore(&il->sta_lock, flags_spin);
}
*sta_id_r = sta_id;
return ret;
}
-EXPORT_SYMBOL(iwl_legacy_add_station_common);
+EXPORT_SYMBOL(il_add_station_common);
/**
- * iwl_legacy_sta_ucode_deactivate - deactivate ucode status for a station
+ * il_sta_ucode_deactivate - deactivate ucode status for a station
*
- * priv->sta_lock must be held
+ * il->sta_lock must be held
*/
-static void iwl_legacy_sta_ucode_deactivate(struct iwl_priv *priv, u8 sta_id)
+static void il_sta_ucode_deactivate(struct il_priv *il, u8 sta_id)
{
/* Ucode must be active and driver must be non active */
- if ((priv->stations[sta_id].used &
- (IWL_STA_UCODE_ACTIVE | IWL_STA_DRIVER_ACTIVE)) !=
- IWL_STA_UCODE_ACTIVE)
- IWL_ERR(priv, "removed non active STA %u\n", sta_id);
+ if ((il->stations[sta_id].used &
+ (IL_STA_UCODE_ACTIVE | IL_STA_DRIVER_ACTIVE)) !=
+ IL_STA_UCODE_ACTIVE)
+ IL_ERR("removed non active STA %u\n", sta_id);
- priv->stations[sta_id].used &= ~IWL_STA_UCODE_ACTIVE;
+ il->stations[sta_id].used &= ~IL_STA_UCODE_ACTIVE;
- memset(&priv->stations[sta_id], 0, sizeof(struct iwl_station_entry));
- IWL_DEBUG_ASSOC(priv, "Removed STA %u\n", sta_id);
+ memset(&il->stations[sta_id], 0, sizeof(struct il_station_entry));
+ D_ASSOC("Removed STA %u\n", sta_id);
}
-static int iwl_legacy_send_remove_station(struct iwl_priv *priv,
+static int il_send_remove_station(struct il_priv *il,
const u8 *addr, int sta_id,
bool temporary)
{
- struct iwl_rx_packet *pkt;
+ struct il_rx_pkt *pkt;
int ret;
unsigned long flags_spin;
- struct iwl_rem_sta_cmd rm_sta_cmd;
+ struct il_rem_sta_cmd rm_sta_cmd;
- struct iwl_host_cmd cmd = {
- .id = REPLY_REMOVE_STA,
- .len = sizeof(struct iwl_rem_sta_cmd),
+ struct il_host_cmd cmd = {
+ .id = C_REM_STA,
+ .len = sizeof(struct il_rem_sta_cmd),
.flags = CMD_SYNC,
.data = &rm_sta_cmd,
};
@@ -433,14 +434,14 @@ static int iwl_legacy_send_remove_station(struct iwl_priv *priv,
cmd.flags |= CMD_WANT_SKB;
- ret = iwl_legacy_send_cmd(priv, &cmd);
+ ret = il_send_cmd(il, &cmd);
if (ret)
return ret;
- pkt = (struct iwl_rx_packet *)cmd.reply_page;
- if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
- IWL_ERR(priv, "Bad return from REPLY_REMOVE_STA (0x%08X)\n",
+ pkt = (struct il_rx_pkt *)cmd.reply_page;
+ if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
+ IL_ERR("Bad return from C_REM_STA (0x%08X)\n",
pkt->hdr.flags);
ret = -EIO;
}
@@ -449,34 +450,34 @@ static int iwl_legacy_send_remove_station(struct iwl_priv *priv,
switch (pkt->u.rem_sta.status) {
case REM_STA_SUCCESS_MSK:
if (!temporary) {
- spin_lock_irqsave(&priv->sta_lock, flags_spin);
- iwl_legacy_sta_ucode_deactivate(priv, sta_id);
- spin_unlock_irqrestore(&priv->sta_lock,
+ spin_lock_irqsave(&il->sta_lock, flags_spin);
+ il_sta_ucode_deactivate(il, sta_id);
+ spin_unlock_irqrestore(&il->sta_lock,
flags_spin);
}
- IWL_DEBUG_ASSOC(priv, "REPLY_REMOVE_STA PASSED\n");
+ D_ASSOC("C_REM_STA PASSED\n");
break;
default:
ret = -EIO;
- IWL_ERR(priv, "REPLY_REMOVE_STA failed\n");
+ IL_ERR("C_REM_STA failed\n");
break;
}
}
- iwl_legacy_free_pages(priv, cmd.reply_page);
+ il_free_pages(il, cmd.reply_page);
return ret;
}
/**
- * iwl_legacy_remove_station - Remove driver's knowledge of station.
+ * il_remove_station - Remove driver's knowledge of station.
*/
-int iwl_legacy_remove_station(struct iwl_priv *priv, const u8 sta_id,
+int il_remove_station(struct il_priv *il, const u8 sta_id,
const u8 *addr)
{
unsigned long flags;
- if (!iwl_legacy_is_ready(priv)) {
- IWL_DEBUG_INFO(priv,
+ if (!il_is_ready(il)) {
+ D_INFO(
"Unable to remove station %pM, device not ready.\n",
addr);
/*
@@ -487,85 +488,85 @@ int iwl_legacy_remove_station(struct iwl_priv *priv, const u8 sta_id,
return 0;
}
- IWL_DEBUG_ASSOC(priv, "Removing STA from driver:%d %pM\n",
+ D_ASSOC("Removing STA from driver:%d %pM\n",
sta_id, addr);
- if (WARN_ON(sta_id == IWL_INVALID_STATION))
+ if (WARN_ON(sta_id == IL_INVALID_STATION))
return -EINVAL;
- spin_lock_irqsave(&priv->sta_lock, flags);
+ spin_lock_irqsave(&il->sta_lock, flags);
- if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)) {
- IWL_DEBUG_INFO(priv, "Removing %pM but non DRIVER active\n",
+ if (!(il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE)) {
+ D_INFO("Removing %pM but non DRIVER active\n",
addr);
goto out_err;
}
- if (!(priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) {
- IWL_DEBUG_INFO(priv, "Removing %pM but non UCODE active\n",
+ if (!(il->stations[sta_id].used & IL_STA_UCODE_ACTIVE)) {
+ D_INFO("Removing %pM but non UCODE active\n",
addr);
goto out_err;
}
- if (priv->stations[sta_id].used & IWL_STA_LOCAL) {
- kfree(priv->stations[sta_id].lq);
- priv->stations[sta_id].lq = NULL;
+ if (il->stations[sta_id].used & IL_STA_LOCAL) {
+ kfree(il->stations[sta_id].lq);
+ il->stations[sta_id].lq = NULL;
}
- priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
+ il->stations[sta_id].used &= ~IL_STA_DRIVER_ACTIVE;
- priv->num_stations--;
+ il->num_stations--;
- BUG_ON(priv->num_stations < 0);
+ BUG_ON(il->num_stations < 0);
- spin_unlock_irqrestore(&priv->sta_lock, flags);
+ spin_unlock_irqrestore(&il->sta_lock, flags);
- return iwl_legacy_send_remove_station(priv, addr, sta_id, false);
+ return il_send_remove_station(il, addr, sta_id, false);
out_err:
- spin_unlock_irqrestore(&priv->sta_lock, flags);
+ spin_unlock_irqrestore(&il->sta_lock, flags);
return -EINVAL;
}
-EXPORT_SYMBOL_GPL(iwl_legacy_remove_station);
+EXPORT_SYMBOL_GPL(il_remove_station);
/**
- * iwl_legacy_clear_ucode_stations - clear ucode station table bits
+ * il_clear_ucode_stations - clear ucode station table bits
*
* This function clears all the bits in the driver indicating
* which stations are active in the ucode. Call when something
* other than explicit station management would cause this in
* the ucode, e.g. unassociated RXON.
*/
-void iwl_legacy_clear_ucode_stations(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx)
+void il_clear_ucode_stations(struct il_priv *il,
+ struct il_rxon_context *ctx)
{
int i;
unsigned long flags_spin;
bool cleared = false;
- IWL_DEBUG_INFO(priv, "Clearing ucode stations in driver\n");
+ D_INFO("Clearing ucode stations in driver\n");
- spin_lock_irqsave(&priv->sta_lock, flags_spin);
- for (i = 0; i < priv->hw_params.max_stations; i++) {
- if (ctx && ctx->ctxid != priv->stations[i].ctxid)
+ spin_lock_irqsave(&il->sta_lock, flags_spin);
+ for (i = 0; i < il->hw_params.max_stations; i++) {
+ if (ctx && ctx->ctxid != il->stations[i].ctxid)
continue;
- if (priv->stations[i].used & IWL_STA_UCODE_ACTIVE) {
- IWL_DEBUG_INFO(priv,
+ if (il->stations[i].used & IL_STA_UCODE_ACTIVE) {
+ D_INFO(
"Clearing ucode active for station %d\n", i);
- priv->stations[i].used &= ~IWL_STA_UCODE_ACTIVE;
+ il->stations[i].used &= ~IL_STA_UCODE_ACTIVE;
cleared = true;
}
}
- spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+ spin_unlock_irqrestore(&il->sta_lock, flags_spin);
if (!cleared)
- IWL_DEBUG_INFO(priv,
+ D_INFO(
"No active stations found to be cleared\n");
}
-EXPORT_SYMBOL(iwl_legacy_clear_ucode_stations);
+EXPORT_SYMBOL(il_clear_ucode_stations);
/**
- * iwl_legacy_restore_stations() - Restore driver known stations to device
+ * il_restore_stations() - Restore driver known stations to device
*
* All stations considered active by driver, but not present in ucode, is
* restored.
@@ -573,58 +574,58 @@ EXPORT_SYMBOL(iwl_legacy_clear_ucode_stations);
* Function sleeps.
*/
void
-iwl_legacy_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
+il_restore_stations(struct il_priv *il, struct il_rxon_context *ctx)
{
- struct iwl_legacy_addsta_cmd sta_cmd;
- struct iwl_link_quality_cmd lq;
+ struct il_addsta_cmd sta_cmd;
+ struct il_link_quality_cmd lq;
unsigned long flags_spin;
int i;
bool found = false;
int ret;
bool send_lq;
- if (!iwl_legacy_is_ready(priv)) {
- IWL_DEBUG_INFO(priv,
+ if (!il_is_ready(il)) {
+ D_INFO(
"Not ready yet, not restoring any stations.\n");
return;
}
- IWL_DEBUG_ASSOC(priv, "Restoring all known stations ... start.\n");
- spin_lock_irqsave(&priv->sta_lock, flags_spin);
- for (i = 0; i < priv->hw_params.max_stations; i++) {
- if (ctx->ctxid != priv->stations[i].ctxid)
+ D_ASSOC("Restoring all known stations ... start.\n");
+ spin_lock_irqsave(&il->sta_lock, flags_spin);
+ for (i = 0; i < il->hw_params.max_stations; i++) {
+ if (ctx->ctxid != il->stations[i].ctxid)
continue;
- if ((priv->stations[i].used & IWL_STA_DRIVER_ACTIVE) &&
- !(priv->stations[i].used & IWL_STA_UCODE_ACTIVE)) {
- IWL_DEBUG_ASSOC(priv, "Restoring sta %pM\n",
- priv->stations[i].sta.sta.addr);
- priv->stations[i].sta.mode = 0;
- priv->stations[i].used |= IWL_STA_UCODE_INPROGRESS;
+ if ((il->stations[i].used & IL_STA_DRIVER_ACTIVE) &&
+ !(il->stations[i].used & IL_STA_UCODE_ACTIVE)) {
+ D_ASSOC("Restoring sta %pM\n",
+ il->stations[i].sta.sta.addr);
+ il->stations[i].sta.mode = 0;
+ il->stations[i].used |= IL_STA_UCODE_INPROGRESS;
found = true;
}
}
- for (i = 0; i < priv->hw_params.max_stations; i++) {
- if ((priv->stations[i].used & IWL_STA_UCODE_INPROGRESS)) {
- memcpy(&sta_cmd, &priv->stations[i].sta,
- sizeof(struct iwl_legacy_addsta_cmd));
+ for (i = 0; i < il->hw_params.max_stations; i++) {
+ if ((il->stations[i].used & IL_STA_UCODE_INPROGRESS)) {
+ memcpy(&sta_cmd, &il->stations[i].sta,
+ sizeof(struct il_addsta_cmd));
send_lq = false;
- if (priv->stations[i].lq) {
- memcpy(&lq, priv->stations[i].lq,
- sizeof(struct iwl_link_quality_cmd));
+ if (il->stations[i].lq) {
+ memcpy(&lq, il->stations[i].lq,
+ sizeof(struct il_link_quality_cmd));
send_lq = true;
}
- spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
- ret = iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
+ spin_unlock_irqrestore(&il->sta_lock, flags_spin);
+ ret = il_send_add_sta(il, &sta_cmd, CMD_SYNC);
if (ret) {
- spin_lock_irqsave(&priv->sta_lock, flags_spin);
- IWL_ERR(priv, "Adding station %pM failed.\n",
- priv->stations[i].sta.sta.addr);
- priv->stations[i].used &=
- ~IWL_STA_DRIVER_ACTIVE;
- priv->stations[i].used &=
- ~IWL_STA_UCODE_INPROGRESS;
- spin_unlock_irqrestore(&priv->sta_lock,
+ spin_lock_irqsave(&il->sta_lock, flags_spin);
+ IL_ERR("Adding station %pM failed.\n",
+ il->stations[i].sta.sta.addr);
+ il->stations[i].used &=
+ ~IL_STA_DRIVER_ACTIVE;
+ il->stations[i].used &=
+ ~IL_STA_UCODE_INPROGRESS;
+ spin_unlock_irqrestore(&il->sta_lock,
flags_spin);
}
/*
@@ -632,78 +633,78 @@ iwl_legacy_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
* current LQ command
*/
if (send_lq)
- iwl_legacy_send_lq_cmd(priv, ctx, &lq,
+ il_send_lq_cmd(il, ctx, &lq,
CMD_SYNC, true);
- spin_lock_irqsave(&priv->sta_lock, flags_spin);
- priv->stations[i].used &= ~IWL_STA_UCODE_INPROGRESS;
+ spin_lock_irqsave(&il->sta_lock, flags_spin);
+ il->stations[i].used &= ~IL_STA_UCODE_INPROGRESS;
}
}
- spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+ spin_unlock_irqrestore(&il->sta_lock, flags_spin);
if (!found)
- IWL_DEBUG_INFO(priv, "Restoring all known stations"
+ D_INFO("Restoring all known stations"
" .... no stations to be restored.\n");
else
- IWL_DEBUG_INFO(priv, "Restoring all known stations"
+ D_INFO("Restoring all known stations"
" .... complete.\n");
}
-EXPORT_SYMBOL(iwl_legacy_restore_stations);
+EXPORT_SYMBOL(il_restore_stations);
-int iwl_legacy_get_free_ucode_key_index(struct iwl_priv *priv)
+int il_get_free_ucode_key_idx(struct il_priv *il)
{
int i;
- for (i = 0; i < priv->sta_key_max_num; i++)
- if (!test_and_set_bit(i, &priv->ucode_key_table))
+ for (i = 0; i < il->sta_key_max_num; i++)
+ if (!test_and_set_bit(i, &il->ucode_key_table))
return i;
return WEP_INVALID_OFFSET;
}
-EXPORT_SYMBOL(iwl_legacy_get_free_ucode_key_index);
+EXPORT_SYMBOL(il_get_free_ucode_key_idx);
-void iwl_legacy_dealloc_bcast_stations(struct iwl_priv *priv)
+void il_dealloc_bcast_stations(struct il_priv *il)
{
unsigned long flags;
int i;
- spin_lock_irqsave(&priv->sta_lock, flags);
- for (i = 0; i < priv->hw_params.max_stations; i++) {
- if (!(priv->stations[i].used & IWL_STA_BCAST))
+ spin_lock_irqsave(&il->sta_lock, flags);
+ for (i = 0; i < il->hw_params.max_stations; i++) {
+ if (!(il->stations[i].used & IL_STA_BCAST))
continue;
- priv->stations[i].used &= ~IWL_STA_UCODE_ACTIVE;
- priv->num_stations--;
- BUG_ON(priv->num_stations < 0);
- kfree(priv->stations[i].lq);
- priv->stations[i].lq = NULL;
+ il->stations[i].used &= ~IL_STA_UCODE_ACTIVE;
+ il->num_stations--;
+ BUG_ON(il->num_stations < 0);
+ kfree(il->stations[i].lq);
+ il->stations[i].lq = NULL;
}
- spin_unlock_irqrestore(&priv->sta_lock, flags);
+ spin_unlock_irqrestore(&il->sta_lock, flags);
}
-EXPORT_SYMBOL_GPL(iwl_legacy_dealloc_bcast_stations);
+EXPORT_SYMBOL_GPL(il_dealloc_bcast_stations);
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-static void iwl_legacy_dump_lq_cmd(struct iwl_priv *priv,
- struct iwl_link_quality_cmd *lq)
+#ifdef CONFIG_IWLEGACY_DEBUG
+static void il_dump_lq_cmd(struct il_priv *il,
+ struct il_link_quality_cmd *lq)
{
int i;
- IWL_DEBUG_RATE(priv, "lq station id 0x%x\n", lq->sta_id);
- IWL_DEBUG_RATE(priv, "lq ant 0x%X 0x%X\n",
+ D_RATE("lq station id 0x%x\n", lq->sta_id);
+ D_RATE("lq ant 0x%X 0x%X\n",
lq->general_params.single_stream_ant_msk,
lq->general_params.dual_stream_ant_msk);
for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
- IWL_DEBUG_RATE(priv, "lq index %d 0x%X\n",
+ D_RATE("lq idx %d 0x%X\n",
i, lq->rs_table[i].rate_n_flags);
}
#else
-static inline void iwl_legacy_dump_lq_cmd(struct iwl_priv *priv,
- struct iwl_link_quality_cmd *lq)
+static inline void il_dump_lq_cmd(struct il_priv *il,
+ struct il_link_quality_cmd *lq)
{
}
#endif
/**
- * iwl_legacy_is_lq_table_valid() - Test one aspect of LQ cmd for validity
+ * il_is_lq_table_valid() - Test one aspect of LQ cmd for validity
*
* It sometimes happens when a HT rate has been in use and we
* loose connectivity with AP then mac80211 will first tell us that the
@@ -713,22 +714,22 @@ static inline void iwl_legacy_dump_lq_cmd(struct iwl_priv *priv,
* Test for this to prevent driver from sending LQ command between the time
* RXON flags are updated and when LQ command is updated.
*/
-static bool iwl_legacy_is_lq_table_valid(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- struct iwl_link_quality_cmd *lq)
+static bool il_is_lq_table_valid(struct il_priv *il,
+ struct il_rxon_context *ctx,
+ struct il_link_quality_cmd *lq)
{
int i;
if (ctx->ht.enabled)
return true;
- IWL_DEBUG_INFO(priv, "Channel %u is not an HT channel\n",
+ D_INFO("Channel %u is not an HT channel\n",
ctx->active.channel);
for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
if (le32_to_cpu(lq->rs_table[i].rate_n_flags) &
RATE_MCS_HT_MSK) {
- IWL_DEBUG_INFO(priv,
- "index %d of LQ expects HT channel\n",
+ D_INFO(
+ "idx %d of LQ expects HT channel\n",
i);
return false;
}
@@ -737,7 +738,7 @@ static bool iwl_legacy_is_lq_table_valid(struct iwl_priv *priv,
}
/**
- * iwl_legacy_send_lq_cmd() - Send link quality command
+ * il_send_lq_cmd() - Send link quality command
* @init: This command is sent as part of station initialization right
* after station has been added.
*
@@ -746,35 +747,35 @@ static bool iwl_legacy_is_lq_table_valid(struct iwl_priv *priv,
* this case to clear the state indicating that station creation is in
* progress.
*/
-int iwl_legacy_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
- struct iwl_link_quality_cmd *lq, u8 flags, bool init)
+int il_send_lq_cmd(struct il_priv *il, struct il_rxon_context *ctx,
+ struct il_link_quality_cmd *lq, u8 flags, bool init)
{
int ret = 0;
unsigned long flags_spin;
- struct iwl_host_cmd cmd = {
- .id = REPLY_TX_LINK_QUALITY_CMD,
- .len = sizeof(struct iwl_link_quality_cmd),
+ struct il_host_cmd cmd = {
+ .id = C_TX_LINK_QUALITY_CMD,
+ .len = sizeof(struct il_link_quality_cmd),
.flags = flags,
.data = lq,
};
- if (WARN_ON(lq->sta_id == IWL_INVALID_STATION))
+ if (WARN_ON(lq->sta_id == IL_INVALID_STATION))
return -EINVAL;
- spin_lock_irqsave(&priv->sta_lock, flags_spin);
- if (!(priv->stations[lq->sta_id].used & IWL_STA_DRIVER_ACTIVE)) {
- spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+ spin_lock_irqsave(&il->sta_lock, flags_spin);
+ if (!(il->stations[lq->sta_id].used & IL_STA_DRIVER_ACTIVE)) {
+ spin_unlock_irqrestore(&il->sta_lock, flags_spin);
return -EINVAL;
}
- spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+ spin_unlock_irqrestore(&il->sta_lock, flags_spin);
- iwl_legacy_dump_lq_cmd(priv, lq);
+ il_dump_lq_cmd(il, lq);
BUG_ON(init && (cmd.flags & CMD_ASYNC));
- if (iwl_legacy_is_lq_table_valid(priv, ctx, lq))
- ret = iwl_legacy_send_cmd(priv, &cmd);
+ if (il_is_lq_table_valid(il, ctx, lq))
+ ret = il_send_cmd(il, &cmd);
else
ret = -EINVAL;
@@ -782,35 +783,35 @@ int iwl_legacy_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
return ret;
if (init) {
- IWL_DEBUG_INFO(priv, "init LQ command complete,"
+ D_INFO("init LQ command complete,"
" clearing sta addition status for sta %d\n",
lq->sta_id);
- spin_lock_irqsave(&priv->sta_lock, flags_spin);
- priv->stations[lq->sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
- spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+ spin_lock_irqsave(&il->sta_lock, flags_spin);
+ il->stations[lq->sta_id].used &= ~IL_STA_UCODE_INPROGRESS;
+ spin_unlock_irqrestore(&il->sta_lock, flags_spin);
}
return ret;
}
-EXPORT_SYMBOL(iwl_legacy_send_lq_cmd);
+EXPORT_SYMBOL(il_send_lq_cmd);
-int iwl_legacy_mac_sta_remove(struct ieee80211_hw *hw,
+int il_mac_sta_remove(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
- struct iwl_priv *priv = hw->priv;
- struct iwl_station_priv_common *sta_common = (void *)sta->drv_priv;
+ struct il_priv *il = hw->priv;
+ struct il_station_priv_common *sta_common = (void *)sta->drv_priv;
int ret;
- IWL_DEBUG_INFO(priv, "received request to remove station %pM\n",
+ D_INFO("received request to remove station %pM\n",
sta->addr);
- mutex_lock(&priv->mutex);
- IWL_DEBUG_INFO(priv, "proceeding to remove station %pM\n",
+ mutex_lock(&il->mutex);
+ D_INFO("proceeding to remove station %pM\n",
sta->addr);
- ret = iwl_legacy_remove_station(priv, sta_common->sta_id, sta->addr);
+ ret = il_remove_station(il, sta_common->sta_id, sta->addr);
if (ret)
- IWL_ERR(priv, "Error removing station %pM\n",
+ IL_ERR("Error removing station %pM\n",
sta->addr);
- mutex_unlock(&priv->mutex);
+ mutex_unlock(&il->mutex);
return ret;
}
-EXPORT_SYMBOL(iwl_legacy_mac_sta_remove);
+EXPORT_SYMBOL(il_mac_sta_remove);
diff --git a/drivers/net/wireless/iwlegacy/iwl-sta.h b/drivers/net/wireless/iwlegacy/iwl-sta.h
deleted file mode 100644
index 67bd75fe01a..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-sta.h
+++ /dev/null
@@ -1,148 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * Portions of this file are derived from the ipw3945 project, as well
- * as portions of the ieee80211 subsystem header files.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-#ifndef __iwl_legacy_sta_h__
-#define __iwl_legacy_sta_h__
-
-#include "iwl-dev.h"
-
-#define HW_KEY_DYNAMIC 0
-#define HW_KEY_DEFAULT 1
-
-#define IWL_STA_DRIVER_ACTIVE BIT(0) /* driver entry is active */
-#define IWL_STA_UCODE_ACTIVE BIT(1) /* ucode entry is active */
-#define IWL_STA_UCODE_INPROGRESS BIT(2) /* ucode entry is in process of
- being activated */
-#define IWL_STA_LOCAL BIT(3) /* station state not directed by mac80211;
- (this is for the IBSS BSSID stations) */
-#define IWL_STA_BCAST BIT(4) /* this station is the special bcast station */
-
-
-void iwl_legacy_restore_stations(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx);
-void iwl_legacy_clear_ucode_stations(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx);
-void iwl_legacy_dealloc_bcast_stations(struct iwl_priv *priv);
-int iwl_legacy_get_free_ucode_key_index(struct iwl_priv *priv);
-int iwl_legacy_send_add_sta(struct iwl_priv *priv,
- struct iwl_legacy_addsta_cmd *sta, u8 flags);
-int iwl_legacy_add_station_common(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- const u8 *addr, bool is_ap,
- struct ieee80211_sta *sta, u8 *sta_id_r);
-int iwl_legacy_remove_station(struct iwl_priv *priv,
- const u8 sta_id,
- const u8 *addr);
-int iwl_legacy_mac_sta_remove(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta);
-
-u8 iwl_legacy_prep_station(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- const u8 *addr, bool is_ap,
- struct ieee80211_sta *sta);
-
-int iwl_legacy_send_lq_cmd(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- struct iwl_link_quality_cmd *lq,
- u8 flags, bool init);
-
-/**
- * iwl_legacy_clear_driver_stations - clear knowledge of all stations from driver
- * @priv: iwl priv struct
- *
- * This is called during iwl_down() to make sure that in the case
- * we're coming there from a hardware restart mac80211 will be
- * able to reconfigure stations -- if we're getting there in the
- * normal down flow then the stations will already be cleared.
- */
-static inline void iwl_legacy_clear_driver_stations(struct iwl_priv *priv)
-{
- unsigned long flags;
- struct iwl_rxon_context *ctx;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- memset(priv->stations, 0, sizeof(priv->stations));
- priv->num_stations = 0;
-
- priv->ucode_key_table = 0;
-
- for_each_context(priv, ctx) {
- /*
- * Remove all key information that is not stored as part
- * of station information since mac80211 may not have had
- * a chance to remove all the keys. When device is
- * reconfigured by mac80211 after an error all keys will
- * be reconfigured.
- */
- memset(ctx->wep_keys, 0, sizeof(ctx->wep_keys));
- ctx->key_mapping_keys = 0;
- }
-
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-}
-
-static inline int iwl_legacy_sta_id(struct ieee80211_sta *sta)
-{
- if (WARN_ON(!sta))
- return IWL_INVALID_STATION;
-
- return ((struct iwl_station_priv_common *)sta->drv_priv)->sta_id;
-}
-
-/**
- * iwl_legacy_sta_id_or_broadcast - return sta_id or broadcast sta
- * @priv: iwl priv
- * @context: the current context
- * @sta: mac80211 station
- *
- * In certain circumstances mac80211 passes a station pointer
- * that may be %NULL, for example during TX or key setup. In
- * that case, we need to use the broadcast station, so this
- * inline wraps that pattern.
- */
-static inline int iwl_legacy_sta_id_or_broadcast(struct iwl_priv *priv,
- struct iwl_rxon_context *context,
- struct ieee80211_sta *sta)
-{
- int sta_id;
-
- if (!sta)
- return context->bcast_sta_id;
-
- sta_id = iwl_legacy_sta_id(sta);
-
- /*
- * mac80211 should not be passing a partially
- * initialised station!
- */
- WARN_ON(sta_id == IWL_INVALID_STATION);
-
- return sta_id;
-}
-#endif /* __iwl_legacy_sta_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-tx.c b/drivers/net/wireless/iwlegacy/iwl-tx.c
deleted file mode 100644
index ef9e268bf8a..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-tx.c
+++ /dev/null
@@ -1,658 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * Portions of this file are derived from the ipw3945 project, as well
- * as portions of the ieee80211 subsystem header files.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <linux/etherdevice.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <net/mac80211.h>
-#include "iwl-eeprom.h"
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-sta.h"
-#include "iwl-io.h"
-#include "iwl-helpers.h"
-
-/**
- * iwl_legacy_txq_update_write_ptr - Send new write index to hardware
- */
-void
-iwl_legacy_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
-{
- u32 reg = 0;
- int txq_id = txq->q.id;
-
- if (txq->need_update == 0)
- return;
-
- /* if we're trying to save power */
- if (test_bit(STATUS_POWER_PMI, &priv->status)) {
- /* wake up nic if it's powered down ...
- * uCode will wake up, and interrupt us again, so next
- * time we'll skip this part. */
- reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
-
- if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
- IWL_DEBUG_INFO(priv,
- "Tx queue %d requesting wakeup,"
- " GP1 = 0x%x\n", txq_id, reg);
- iwl_legacy_set_bit(priv, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
- return;
- }
-
- iwl_legacy_write_direct32(priv, HBUS_TARG_WRPTR,
- txq->q.write_ptr | (txq_id << 8));
-
- /*
- * else not in power-save mode,
- * uCode will never sleep when we're
- * trying to tx (during RFKILL, we're not trying to tx).
- */
- } else
- iwl_write32(priv, HBUS_TARG_WRPTR,
- txq->q.write_ptr | (txq_id << 8));
- txq->need_update = 0;
-}
-EXPORT_SYMBOL(iwl_legacy_txq_update_write_ptr);
-
-/**
- * iwl_legacy_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's
- */
-void iwl_legacy_tx_queue_unmap(struct iwl_priv *priv, int txq_id)
-{
- struct iwl_tx_queue *txq = &priv->txq[txq_id];
- struct iwl_queue *q = &txq->q;
-
- if (q->n_bd == 0)
- return;
-
- while (q->write_ptr != q->read_ptr) {
- priv->cfg->ops->lib->txq_free_tfd(priv, txq);
- q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd);
- }
-}
-EXPORT_SYMBOL(iwl_legacy_tx_queue_unmap);
-
-/**
- * iwl_legacy_tx_queue_free - Deallocate DMA queue.
- * @txq: Transmit queue to deallocate.
- *
- * Empty queue by removing and destroying all BD's.
- * Free all buffers.
- * 0-fill, but do not free "txq" descriptor structure.
- */
-void iwl_legacy_tx_queue_free(struct iwl_priv *priv, int txq_id)
-{
- struct iwl_tx_queue *txq = &priv->txq[txq_id];
- struct device *dev = &priv->pci_dev->dev;
- int i;
-
- iwl_legacy_tx_queue_unmap(priv, txq_id);
-
- /* De-alloc array of command/tx buffers */
- for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
- kfree(txq->cmd[i]);
-
- /* De-alloc circular buffer of TFDs */
- if (txq->q.n_bd)
- dma_free_coherent(dev, priv->hw_params.tfd_size *
- txq->q.n_bd, txq->tfds, txq->q.dma_addr);
-
- /* De-alloc array of per-TFD driver data */
- kfree(txq->txb);
- txq->txb = NULL;
-
- /* deallocate arrays */
- kfree(txq->cmd);
- kfree(txq->meta);
- txq->cmd = NULL;
- txq->meta = NULL;
-
- /* 0-fill queue descriptor structure */
- memset(txq, 0, sizeof(*txq));
-}
-EXPORT_SYMBOL(iwl_legacy_tx_queue_free);
-
-/**
- * iwl_cmd_queue_unmap - Unmap any remaining DMA mappings from command queue
- */
-void iwl_legacy_cmd_queue_unmap(struct iwl_priv *priv)
-{
- struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
- struct iwl_queue *q = &txq->q;
- int i;
-
- if (q->n_bd == 0)
- return;
-
- while (q->read_ptr != q->write_ptr) {
- i = iwl_legacy_get_cmd_index(q, q->read_ptr, 0);
-
- if (txq->meta[i].flags & CMD_MAPPED) {
- pci_unmap_single(priv->pci_dev,
- dma_unmap_addr(&txq->meta[i], mapping),
- dma_unmap_len(&txq->meta[i], len),
- PCI_DMA_BIDIRECTIONAL);
- txq->meta[i].flags = 0;
- }
-
- q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd);
- }
-
- i = q->n_window;
- if (txq->meta[i].flags & CMD_MAPPED) {
- pci_unmap_single(priv->pci_dev,
- dma_unmap_addr(&txq->meta[i], mapping),
- dma_unmap_len(&txq->meta[i], len),
- PCI_DMA_BIDIRECTIONAL);
- txq->meta[i].flags = 0;
- }
-}
-EXPORT_SYMBOL(iwl_legacy_cmd_queue_unmap);
-
-/**
- * iwl_legacy_cmd_queue_free - Deallocate DMA queue.
- * @txq: Transmit queue to deallocate.
- *
- * Empty queue by removing and destroying all BD's.
- * Free all buffers.
- * 0-fill, but do not free "txq" descriptor structure.
- */
-void iwl_legacy_cmd_queue_free(struct iwl_priv *priv)
-{
- struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
- struct device *dev = &priv->pci_dev->dev;
- int i;
-
- iwl_legacy_cmd_queue_unmap(priv);
-
- /* De-alloc array of command/tx buffers */
- for (i = 0; i <= TFD_CMD_SLOTS; i++)
- kfree(txq->cmd[i]);
-
- /* De-alloc circular buffer of TFDs */
- if (txq->q.n_bd)
- dma_free_coherent(dev, priv->hw_params.tfd_size * txq->q.n_bd,
- txq->tfds, txq->q.dma_addr);
-
- /* deallocate arrays */
- kfree(txq->cmd);
- kfree(txq->meta);
- txq->cmd = NULL;
- txq->meta = NULL;
-
- /* 0-fill queue descriptor structure */
- memset(txq, 0, sizeof(*txq));
-}
-EXPORT_SYMBOL(iwl_legacy_cmd_queue_free);
-
-/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
- * DMA services
- *
- * Theory of operation
- *
- * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
- * of buffer descriptors, each of which points to one or more data buffers for
- * the device to read from or fill. Driver and device exchange status of each
- * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
- * entries in each circular buffer, to protect against confusing empty and full
- * queue states.
- *
- * The device reads or writes the data in the queues via the device's several
- * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
- *
- * For Tx queue, there are low mark and high mark limits. If, after queuing
- * the packet for Tx, free space become < low mark, Tx queue stopped. When
- * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
- * Tx queue resumed.
- *
- * See more detailed info in iwl-4965-hw.h.
- ***************************************************/
-
-int iwl_legacy_queue_space(const struct iwl_queue *q)
-{
- int s = q->read_ptr - q->write_ptr;
-
- if (q->read_ptr > q->write_ptr)
- s -= q->n_bd;
-
- if (s <= 0)
- s += q->n_window;
- /* keep some reserve to not confuse empty and full situations */
- s -= 2;
- if (s < 0)
- s = 0;
- return s;
-}
-EXPORT_SYMBOL(iwl_legacy_queue_space);
-
-
-/**
- * iwl_legacy_queue_init - Initialize queue's high/low-water and read/write indexes
- */
-static int iwl_legacy_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
- int count, int slots_num, u32 id)
-{
- q->n_bd = count;
- q->n_window = slots_num;
- q->id = id;
-
- /* count must be power-of-two size, otherwise iwl_legacy_queue_inc_wrap
- * and iwl_legacy_queue_dec_wrap are broken. */
- BUG_ON(!is_power_of_2(count));
-
- /* slots_num must be power-of-two size, otherwise
- * iwl_legacy_get_cmd_index is broken. */
- BUG_ON(!is_power_of_2(slots_num));
-
- q->low_mark = q->n_window / 4;
- if (q->low_mark < 4)
- q->low_mark = 4;
-
- q->high_mark = q->n_window / 8;
- if (q->high_mark < 2)
- q->high_mark = 2;
-
- q->write_ptr = q->read_ptr = 0;
-
- return 0;
-}
-
-/**
- * iwl_legacy_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue
- */
-static int iwl_legacy_tx_queue_alloc(struct iwl_priv *priv,
- struct iwl_tx_queue *txq, u32 id)
-{
- struct device *dev = &priv->pci_dev->dev;
- size_t tfd_sz = priv->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX;
-
- /* Driver private data, only for Tx (not command) queues,
- * not shared with device. */
- if (id != priv->cmd_queue) {
- txq->txb = kzalloc(sizeof(txq->txb[0]) *
- TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
- if (!txq->txb) {
- IWL_ERR(priv, "kmalloc for auxiliary BD "
- "structures failed\n");
- goto error;
- }
- } else {
- txq->txb = NULL;
- }
-
- /* Circular buffer of transmit frame descriptors (TFDs),
- * shared with device */
- txq->tfds = dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr,
- GFP_KERNEL);
- if (!txq->tfds) {
- IWL_ERR(priv, "pci_alloc_consistent(%zd) failed\n", tfd_sz);
- goto error;
- }
- txq->q.id = id;
-
- return 0;
-
- error:
- kfree(txq->txb);
- txq->txb = NULL;
-
- return -ENOMEM;
-}
-
-/**
- * iwl_legacy_tx_queue_init - Allocate and initialize one tx/cmd queue
- */
-int iwl_legacy_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
- int slots_num, u32 txq_id)
-{
- int i, len;
- int ret;
- int actual_slots = slots_num;
-
- /*
- * Alloc buffer array for commands (Tx or other types of commands).
- * For the command queue (#4/#9), allocate command space + one big
- * command for scan, since scan command is very huge; the system will
- * not have two scans at the same time, so only one is needed.
- * For normal Tx queues (all other queues), no super-size command
- * space is needed.
- */
- if (txq_id == priv->cmd_queue)
- actual_slots++;
-
- txq->meta = kzalloc(sizeof(struct iwl_cmd_meta) * actual_slots,
- GFP_KERNEL);
- txq->cmd = kzalloc(sizeof(struct iwl_device_cmd *) * actual_slots,
- GFP_KERNEL);
-
- if (!txq->meta || !txq->cmd)
- goto out_free_arrays;
-
- len = sizeof(struct iwl_device_cmd);
- for (i = 0; i < actual_slots; i++) {
- /* only happens for cmd queue */
- if (i == slots_num)
- len = IWL_MAX_CMD_SIZE;
-
- txq->cmd[i] = kmalloc(len, GFP_KERNEL);
- if (!txq->cmd[i])
- goto err;
- }
-
- /* Alloc driver data array and TFD circular buffer */
- ret = iwl_legacy_tx_queue_alloc(priv, txq, txq_id);
- if (ret)
- goto err;
-
- txq->need_update = 0;
-
- /*
- * For the default queues 0-3, set up the swq_id
- * already -- all others need to get one later
- * (if they need one at all).
- */
- if (txq_id < 4)
- iwl_legacy_set_swq_id(txq, txq_id, txq_id);
-
- /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
- * iwl_legacy_queue_inc_wrap and iwl_legacy_queue_dec_wrap are broken. */
- BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
-
- /* Initialize queue's high/low-water marks, and head/tail indexes */
- iwl_legacy_queue_init(priv, &txq->q,
- TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
-
- /* Tell device where to find queue */
- priv->cfg->ops->lib->txq_init(priv, txq);
-
- return 0;
-err:
- for (i = 0; i < actual_slots; i++)
- kfree(txq->cmd[i]);
-out_free_arrays:
- kfree(txq->meta);
- kfree(txq->cmd);
-
- return -ENOMEM;
-}
-EXPORT_SYMBOL(iwl_legacy_tx_queue_init);
-
-void iwl_legacy_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq,
- int slots_num, u32 txq_id)
-{
- int actual_slots = slots_num;
-
- if (txq_id == priv->cmd_queue)
- actual_slots++;
-
- memset(txq->meta, 0, sizeof(struct iwl_cmd_meta) * actual_slots);
-
- txq->need_update = 0;
-
- /* Initialize queue's high/low-water marks, and head/tail indexes */
- iwl_legacy_queue_init(priv, &txq->q,
- TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
-
- /* Tell device where to find queue */
- priv->cfg->ops->lib->txq_init(priv, txq);
-}
-EXPORT_SYMBOL(iwl_legacy_tx_queue_reset);
-
-/*************** HOST COMMAND QUEUE FUNCTIONS *****/
-
-/**
- * iwl_legacy_enqueue_hcmd - enqueue a uCode command
- * @priv: device private data point
- * @cmd: a point to the ucode command structure
- *
- * The function returns < 0 values to indicate the operation is
- * failed. On success, it turns the index (> 0) of command in the
- * command queue.
- */
-int iwl_legacy_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
-{
- struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
- struct iwl_queue *q = &txq->q;
- struct iwl_device_cmd *out_cmd;
- struct iwl_cmd_meta *out_meta;
- dma_addr_t phys_addr;
- unsigned long flags;
- int len;
- u32 idx;
- u16 fix_size;
-
- cmd->len = priv->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len);
- fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr));
-
- /* If any of the command structures end up being larger than
- * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then
- * we will need to increase the size of the TFD entries
- * Also, check to see if command buffer should not exceed the size
- * of device_cmd and max_cmd_size. */
- BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
- !(cmd->flags & CMD_SIZE_HUGE));
- BUG_ON(fix_size > IWL_MAX_CMD_SIZE);
-
- if (iwl_legacy_is_rfkill(priv) || iwl_legacy_is_ctkill(priv)) {
- IWL_WARN(priv, "Not sending command - %s KILL\n",
- iwl_legacy_is_rfkill(priv) ? "RF" : "CT");
- return -EIO;
- }
-
- spin_lock_irqsave(&priv->hcmd_lock, flags);
-
- if (iwl_legacy_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
- spin_unlock_irqrestore(&priv->hcmd_lock, flags);
-
- IWL_ERR(priv, "Restarting adapter due to command queue full\n");
- queue_work(priv->workqueue, &priv->restart);
- return -ENOSPC;
- }
-
- idx = iwl_legacy_get_cmd_index(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE);
- out_cmd = txq->cmd[idx];
- out_meta = &txq->meta[idx];
-
- if (WARN_ON(out_meta->flags & CMD_MAPPED)) {
- spin_unlock_irqrestore(&priv->hcmd_lock, flags);
- return -ENOSPC;
- }
-
- memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */
- out_meta->flags = cmd->flags | CMD_MAPPED;
- if (cmd->flags & CMD_WANT_SKB)
- out_meta->source = cmd;
- if (cmd->flags & CMD_ASYNC)
- out_meta->callback = cmd->callback;
-
- out_cmd->hdr.cmd = cmd->id;
- memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len);
-
- /* At this point, the out_cmd now has all of the incoming cmd
- * information */
-
- out_cmd->hdr.flags = 0;
- out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(priv->cmd_queue) |
- INDEX_TO_SEQ(q->write_ptr));
- if (cmd->flags & CMD_SIZE_HUGE)
- out_cmd->hdr.sequence |= SEQ_HUGE_FRAME;
- len = sizeof(struct iwl_device_cmd);
- if (idx == TFD_CMD_SLOTS)
- len = IWL_MAX_CMD_SIZE;
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- switch (out_cmd->hdr.cmd) {
- case REPLY_TX_LINK_QUALITY_CMD:
- case SENSITIVITY_CMD:
- IWL_DEBUG_HC_DUMP(priv,
- "Sending command %s (#%x), seq: 0x%04X, "
- "%d bytes at %d[%d]:%d\n",
- iwl_legacy_get_cmd_string(out_cmd->hdr.cmd),
- out_cmd->hdr.cmd,
- le16_to_cpu(out_cmd->hdr.sequence), fix_size,
- q->write_ptr, idx, priv->cmd_queue);
- break;
- default:
- IWL_DEBUG_HC(priv, "Sending command %s (#%x), seq: 0x%04X, "
- "%d bytes at %d[%d]:%d\n",
- iwl_legacy_get_cmd_string(out_cmd->hdr.cmd),
- out_cmd->hdr.cmd,
- le16_to_cpu(out_cmd->hdr.sequence), fix_size,
- q->write_ptr, idx, priv->cmd_queue);
- }
-#endif
- txq->need_update = 1;
-
- if (priv->cfg->ops->lib->txq_update_byte_cnt_tbl)
- /* Set up entry in queue's byte count circular buffer */
- priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, 0);
-
- phys_addr = pci_map_single(priv->pci_dev, &out_cmd->hdr,
- fix_size, PCI_DMA_BIDIRECTIONAL);
- dma_unmap_addr_set(out_meta, mapping, phys_addr);
- dma_unmap_len_set(out_meta, len, fix_size);
-
- trace_iwlwifi_legacy_dev_hcmd(priv, &out_cmd->hdr,
- fix_size, cmd->flags);
-
- priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
- phys_addr, fix_size, 1,
- U32_PAD(cmd->len));
-
- /* Increment and update queue's write index */
- q->write_ptr = iwl_legacy_queue_inc_wrap(q->write_ptr, q->n_bd);
- iwl_legacy_txq_update_write_ptr(priv, txq);
-
- spin_unlock_irqrestore(&priv->hcmd_lock, flags);
- return idx;
-}
-
-/**
- * iwl_legacy_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
- *
- * When FW advances 'R' index, all entries between old and new 'R' index
- * need to be reclaimed. As result, some free space forms. If there is
- * enough free space (> low mark), wake the stack that feeds us.
- */
-static void iwl_legacy_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id,
- int idx, int cmd_idx)
-{
- struct iwl_tx_queue *txq = &priv->txq[txq_id];
- struct iwl_queue *q = &txq->q;
- int nfreed = 0;
-
- if ((idx >= q->n_bd) || (iwl_legacy_queue_used(q, idx) == 0)) {
- IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
- "is out of range [0-%d] %d %d.\n", txq_id,
- idx, q->n_bd, q->write_ptr, q->read_ptr);
- return;
- }
-
- for (idx = iwl_legacy_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
- q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd)) {
-
- if (nfreed++ > 0) {
- IWL_ERR(priv, "HCMD skipped: index (%d) %d %d\n", idx,
- q->write_ptr, q->read_ptr);
- queue_work(priv->workqueue, &priv->restart);
- }
-
- }
-}
-
-/**
- * iwl_legacy_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
- * @rxb: Rx buffer to reclaim
- *
- * If an Rx buffer has an async callback associated with it the callback
- * will be executed. The attached skb (if present) will only be freed
- * if the callback returns 1
- */
-void
-iwl_legacy_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- u16 sequence = le16_to_cpu(pkt->hdr.sequence);
- int txq_id = SEQ_TO_QUEUE(sequence);
- int index = SEQ_TO_INDEX(sequence);
- int cmd_index;
- bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME);
- struct iwl_device_cmd *cmd;
- struct iwl_cmd_meta *meta;
- struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
- unsigned long flags;
-
- /* If a Tx command is being handled and it isn't in the actual
- * command queue then there a command routing bug has been introduced
- * in the queue management code. */
- if (WARN(txq_id != priv->cmd_queue,
- "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
- txq_id, priv->cmd_queue, sequence,
- priv->txq[priv->cmd_queue].q.read_ptr,
- priv->txq[priv->cmd_queue].q.write_ptr)) {
- iwl_print_hex_error(priv, pkt, 32);
- return;
- }
-
- cmd_index = iwl_legacy_get_cmd_index(&txq->q, index, huge);
- cmd = txq->cmd[cmd_index];
- meta = &txq->meta[cmd_index];
-
- txq->time_stamp = jiffies;
-
- pci_unmap_single(priv->pci_dev,
- dma_unmap_addr(meta, mapping),
- dma_unmap_len(meta, len),
- PCI_DMA_BIDIRECTIONAL);
-
- /* Input error checking is done when commands are added to queue. */
- if (meta->flags & CMD_WANT_SKB) {
- meta->source->reply_page = (unsigned long)rxb_addr(rxb);
- rxb->page = NULL;
- } else if (meta->callback)
- meta->callback(priv, cmd, pkt);
-
- spin_lock_irqsave(&priv->hcmd_lock, flags);
-
- iwl_legacy_hcmd_queue_reclaim(priv, txq_id, index, cmd_index);
-
- if (!(meta->flags & CMD_ASYNC)) {
- clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
- IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s\n",
- iwl_legacy_get_cmd_string(cmd->hdr.cmd));
- wake_up(&priv->wait_command_queue);
- }
-
- /* Mark as unmapped */
- meta->flags = 0;
-
- spin_unlock_irqrestore(&priv->hcmd_lock, flags);
-}
-EXPORT_SYMBOL(iwl_legacy_tx_cmd_complete);
diff --git a/drivers/net/wireless/iwlegacy/iwl3945-base.c b/drivers/net/wireless/iwlegacy/iwl3945-base.c
deleted file mode 100644
index b282d869a54..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl3945-base.c
+++ /dev/null
@@ -1,4016 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * Portions of this file are derived from the ipw3945 project, as well
- * as portions of the ieee80211 subsystem header files.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/pci-aspm.h>
-#include <linux/slab.h>
-#include <linux/dma-mapping.h>
-#include <linux/delay.h>
-#include <linux/sched.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/firmware.h>
-#include <linux/etherdevice.h>
-#include <linux/if_arp.h>
-
-#include <net/ieee80211_radiotap.h>
-#include <net/mac80211.h>
-
-#include <asm/div64.h>
-
-#define DRV_NAME "iwl3945"
-
-#include "iwl-fh.h"
-#include "iwl-3945-fh.h"
-#include "iwl-commands.h"
-#include "iwl-sta.h"
-#include "iwl-3945.h"
-#include "iwl-core.h"
-#include "iwl-helpers.h"
-#include "iwl-dev.h"
-#include "iwl-spectrum.h"
-
-/*
- * module name, copyright, version, etc.
- */
-
-#define DRV_DESCRIPTION \
-"Intel(R) PRO/Wireless 3945ABG/BG Network Connection driver for Linux"
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-#define VD "d"
-#else
-#define VD
-#endif
-
-/*
- * add "s" to indicate spectrum measurement included.
- * we add it here to be consistent with previous releases in which
- * this was configurable.
- */
-#define DRV_VERSION IWLWIFI_VERSION VD "s"
-#define DRV_COPYRIGHT "Copyright(c) 2003-2011 Intel Corporation"
-#define DRV_AUTHOR "<ilw@linux.intel.com>"
-
-MODULE_DESCRIPTION(DRV_DESCRIPTION);
-MODULE_VERSION(DRV_VERSION);
-MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
-MODULE_LICENSE("GPL");
-
- /* module parameters */
-struct iwl_mod_params iwl3945_mod_params = {
- .sw_crypto = 1,
- .restart_fw = 1,
- .disable_hw_scan = 1,
- /* the rest are 0 by default */
-};
-
-/**
- * iwl3945_get_antenna_flags - Get antenna flags for RXON command
- * @priv: eeprom and antenna fields are used to determine antenna flags
- *
- * priv->eeprom39 is used to determine if antenna AUX/MAIN are reversed
- * iwl3945_mod_params.antenna specifies the antenna diversity mode:
- *
- * IWL_ANTENNA_DIVERSITY - NIC selects best antenna by itself
- * IWL_ANTENNA_MAIN - Force MAIN antenna
- * IWL_ANTENNA_AUX - Force AUX antenna
- */
-__le32 iwl3945_get_antenna_flags(const struct iwl_priv *priv)
-{
- struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
-
- switch (iwl3945_mod_params.antenna) {
- case IWL_ANTENNA_DIVERSITY:
- return 0;
-
- case IWL_ANTENNA_MAIN:
- if (eeprom->antenna_switch_type)
- return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_B_MSK;
- return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_A_MSK;
-
- case IWL_ANTENNA_AUX:
- if (eeprom->antenna_switch_type)
- return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_A_MSK;
- return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_B_MSK;
- }
-
- /* bad antenna selector value */
- IWL_ERR(priv, "Bad antenna selector value (0x%x)\n",
- iwl3945_mod_params.antenna);
-
- return 0; /* "diversity" is default if error */
-}
-
-static int iwl3945_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
- struct ieee80211_key_conf *keyconf,
- u8 sta_id)
-{
- unsigned long flags;
- __le16 key_flags = 0;
- int ret;
-
- key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
- key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
-
- if (sta_id == priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id)
- key_flags |= STA_KEY_MULTICAST_MSK;
-
- keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
- keyconf->hw_key_idx = keyconf->keyidx;
- key_flags &= ~STA_KEY_FLG_INVALID;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
- priv->stations[sta_id].keyinfo.keylen = keyconf->keylen;
- memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key,
- keyconf->keylen);
-
- memcpy(priv->stations[sta_id].sta.key.key, keyconf->key,
- keyconf->keylen);
-
- if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
- == STA_KEY_FLG_NO_ENC)
- priv->stations[sta_id].sta.key.key_offset =
- iwl_legacy_get_free_ucode_key_index(priv);
- /* else, we are overriding an existing key => no need to allocated room
- * in uCode. */
-
- WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
- "no space for a new key");
-
- priv->stations[sta_id].sta.key.key_flags = key_flags;
- priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
- priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
-
- IWL_DEBUG_INFO(priv, "hwcrypto: modify ucode station key info\n");
-
- ret = iwl_legacy_send_add_sta(priv,
- &priv->stations[sta_id].sta, CMD_ASYNC);
-
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- return ret;
-}
-
-static int iwl3945_set_tkip_dynamic_key_info(struct iwl_priv *priv,
- struct ieee80211_key_conf *keyconf,
- u8 sta_id)
-{
- return -EOPNOTSUPP;
-}
-
-static int iwl3945_set_wep_dynamic_key_info(struct iwl_priv *priv,
- struct ieee80211_key_conf *keyconf,
- u8 sta_id)
-{
- return -EOPNOTSUPP;
-}
-
-static int iwl3945_clear_sta_key_info(struct iwl_priv *priv, u8 sta_id)
-{
- unsigned long flags;
- struct iwl_legacy_addsta_cmd sta_cmd;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- memset(&priv->stations[sta_id].keyinfo, 0, sizeof(struct iwl_hw_key));
- memset(&priv->stations[sta_id].sta.key, 0,
- sizeof(struct iwl4965_keyinfo));
- priv->stations[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC;
- priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
- priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
- memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_legacy_addsta_cmd));
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- IWL_DEBUG_INFO(priv, "hwcrypto: clear ucode station key info\n");
- return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
-}
-
-static int iwl3945_set_dynamic_key(struct iwl_priv *priv,
- struct ieee80211_key_conf *keyconf, u8 sta_id)
-{
- int ret = 0;
-
- keyconf->hw_key_idx = HW_KEY_DYNAMIC;
-
- switch (keyconf->cipher) {
- case WLAN_CIPHER_SUITE_CCMP:
- ret = iwl3945_set_ccmp_dynamic_key_info(priv, keyconf, sta_id);
- break;
- case WLAN_CIPHER_SUITE_TKIP:
- ret = iwl3945_set_tkip_dynamic_key_info(priv, keyconf, sta_id);
- break;
- case WLAN_CIPHER_SUITE_WEP40:
- case WLAN_CIPHER_SUITE_WEP104:
- ret = iwl3945_set_wep_dynamic_key_info(priv, keyconf, sta_id);
- break;
- default:
- IWL_ERR(priv, "Unknown alg: %s alg=%x\n", __func__,
- keyconf->cipher);
- ret = -EINVAL;
- }
-
- IWL_DEBUG_WEP(priv, "Set dynamic key: alg=%x len=%d idx=%d sta=%d ret=%d\n",
- keyconf->cipher, keyconf->keylen, keyconf->keyidx,
- sta_id, ret);
-
- return ret;
-}
-
-static int iwl3945_remove_static_key(struct iwl_priv *priv)
-{
- int ret = -EOPNOTSUPP;
-
- return ret;
-}
-
-static int iwl3945_set_static_key(struct iwl_priv *priv,
- struct ieee80211_key_conf *key)
-{
- if (key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
- key->cipher == WLAN_CIPHER_SUITE_WEP104)
- return -EOPNOTSUPP;
-
- IWL_ERR(priv, "Static key invalid: cipher %x\n", key->cipher);
- return -EINVAL;
-}
-
-static void iwl3945_clear_free_frames(struct iwl_priv *priv)
-{
- struct list_head *element;
-
- IWL_DEBUG_INFO(priv, "%d frames on pre-allocated heap on clear.\n",
- priv->frames_count);
-
- while (!list_empty(&priv->free_frames)) {
- element = priv->free_frames.next;
- list_del(element);
- kfree(list_entry(element, struct iwl3945_frame, list));
- priv->frames_count--;
- }
-
- if (priv->frames_count) {
- IWL_WARN(priv, "%d frames still in use. Did we lose one?\n",
- priv->frames_count);
- priv->frames_count = 0;
- }
-}
-
-static struct iwl3945_frame *iwl3945_get_free_frame(struct iwl_priv *priv)
-{
- struct iwl3945_frame *frame;
- struct list_head *element;
- if (list_empty(&priv->free_frames)) {
- frame = kzalloc(sizeof(*frame), GFP_KERNEL);
- if (!frame) {
- IWL_ERR(priv, "Could not allocate frame!\n");
- return NULL;
- }
-
- priv->frames_count++;
- return frame;
- }
-
- element = priv->free_frames.next;
- list_del(element);
- return list_entry(element, struct iwl3945_frame, list);
-}
-
-static void iwl3945_free_frame(struct iwl_priv *priv, struct iwl3945_frame *frame)
-{
- memset(frame, 0, sizeof(*frame));
- list_add(&frame->list, &priv->free_frames);
-}
-
-unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv,
- struct ieee80211_hdr *hdr,
- int left)
-{
-
- if (!iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS) || !priv->beacon_skb)
- return 0;
-
- if (priv->beacon_skb->len > left)
- return 0;
-
- memcpy(hdr, priv->beacon_skb->data, priv->beacon_skb->len);
-
- return priv->beacon_skb->len;
-}
-
-static int iwl3945_send_beacon_cmd(struct iwl_priv *priv)
-{
- struct iwl3945_frame *frame;
- unsigned int frame_size;
- int rc;
- u8 rate;
-
- frame = iwl3945_get_free_frame(priv);
-
- if (!frame) {
- IWL_ERR(priv, "Could not obtain free frame buffer for beacon "
- "command.\n");
- return -ENOMEM;
- }
-
- rate = iwl_legacy_get_lowest_plcp(priv,
- &priv->contexts[IWL_RXON_CTX_BSS]);
-
- frame_size = iwl3945_hw_get_beacon_cmd(priv, frame, rate);
-
- rc = iwl_legacy_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size,
- &frame->u.cmd[0]);
-
- iwl3945_free_frame(priv, frame);
-
- return rc;
-}
-
-static void iwl3945_unset_hw_params(struct iwl_priv *priv)
-{
- if (priv->_3945.shared_virt)
- dma_free_coherent(&priv->pci_dev->dev,
- sizeof(struct iwl3945_shared),
- priv->_3945.shared_virt,
- priv->_3945.shared_phys);
-}
-
-static void iwl3945_build_tx_cmd_hwcrypto(struct iwl_priv *priv,
- struct ieee80211_tx_info *info,
- struct iwl_device_cmd *cmd,
- struct sk_buff *skb_frag,
- int sta_id)
-{
- struct iwl3945_tx_cmd *tx_cmd = (struct iwl3945_tx_cmd *)cmd->cmd.payload;
- struct iwl_hw_key *keyinfo = &priv->stations[sta_id].keyinfo;
-
- tx_cmd->sec_ctl = 0;
-
- switch (keyinfo->cipher) {
- case WLAN_CIPHER_SUITE_CCMP:
- tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
- memcpy(tx_cmd->key, keyinfo->key, keyinfo->keylen);
- IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n");
- break;
-
- case WLAN_CIPHER_SUITE_TKIP:
- break;
-
- case WLAN_CIPHER_SUITE_WEP104:
- tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
- /* fall through */
- case WLAN_CIPHER_SUITE_WEP40:
- tx_cmd->sec_ctl |= TX_CMD_SEC_WEP |
- (info->control.hw_key->hw_key_idx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT;
-
- memcpy(&tx_cmd->key[3], keyinfo->key, keyinfo->keylen);
-
- IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption "
- "with key %d\n", info->control.hw_key->hw_key_idx);
- break;
-
- default:
- IWL_ERR(priv, "Unknown encode cipher %x\n", keyinfo->cipher);
- break;
- }
-}
-
-/*
- * handle build REPLY_TX command notification.
- */
-static void iwl3945_build_tx_cmd_basic(struct iwl_priv *priv,
- struct iwl_device_cmd *cmd,
- struct ieee80211_tx_info *info,
- struct ieee80211_hdr *hdr, u8 std_id)
-{
- struct iwl3945_tx_cmd *tx_cmd = (struct iwl3945_tx_cmd *)cmd->cmd.payload;
- __le32 tx_flags = tx_cmd->tx_flags;
- __le16 fc = hdr->frame_control;
-
- tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
- if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
- tx_flags |= TX_CMD_FLG_ACK_MSK;
- if (ieee80211_is_mgmt(fc))
- tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
- if (ieee80211_is_probe_resp(fc) &&
- !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
- tx_flags |= TX_CMD_FLG_TSF_MSK;
- } else {
- tx_flags &= (~TX_CMD_FLG_ACK_MSK);
- tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
- }
-
- tx_cmd->sta_id = std_id;
- if (ieee80211_has_morefrags(fc))
- tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
-
- if (ieee80211_is_data_qos(fc)) {
- u8 *qc = ieee80211_get_qos_ctl(hdr);
- tx_cmd->tid_tspec = qc[0] & 0xf;
- tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
- } else {
- tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
- }
-
- iwl_legacy_tx_cmd_protection(priv, info, fc, &tx_flags);
-
- tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
- if (ieee80211_is_mgmt(fc)) {
- if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
- tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
- else
- tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
- } else {
- tx_cmd->timeout.pm_frame_timeout = 0;
- }
-
- tx_cmd->driver_txop = 0;
- tx_cmd->tx_flags = tx_flags;
- tx_cmd->next_frame_len = 0;
-}
-
-/*
- * start REPLY_TX command process
- */
-static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
-{
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct iwl3945_tx_cmd *tx_cmd;
- struct iwl_tx_queue *txq = NULL;
- struct iwl_queue *q = NULL;
- struct iwl_device_cmd *out_cmd;
- struct iwl_cmd_meta *out_meta;
- dma_addr_t phys_addr;
- dma_addr_t txcmd_phys;
- int txq_id = skb_get_queue_mapping(skb);
- u16 len, idx, hdr_len;
- u8 id;
- u8 unicast;
- u8 sta_id;
- u8 tid = 0;
- __le16 fc;
- u8 wait_write_ptr = 0;
- unsigned long flags;
-
- spin_lock_irqsave(&priv->lock, flags);
- if (iwl_legacy_is_rfkill(priv)) {
- IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
- goto drop_unlock;
- }
-
- if ((ieee80211_get_tx_rate(priv->hw, info)->hw_value & 0xFF) == IWL_INVALID_RATE) {
- IWL_ERR(priv, "ERROR: No TX rate available.\n");
- goto drop_unlock;
- }
-
- unicast = !is_multicast_ether_addr(hdr->addr1);
- id = 0;
-
- fc = hdr->frame_control;
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- if (ieee80211_is_auth(fc))
- IWL_DEBUG_TX(priv, "Sending AUTH frame\n");
- else if (ieee80211_is_assoc_req(fc))
- IWL_DEBUG_TX(priv, "Sending ASSOC frame\n");
- else if (ieee80211_is_reassoc_req(fc))
- IWL_DEBUG_TX(priv, "Sending REASSOC frame\n");
-#endif
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- hdr_len = ieee80211_hdrlen(fc);
-
- /* Find index into station table for destination station */
- sta_id = iwl_legacy_sta_id_or_broadcast(
- priv, &priv->contexts[IWL_RXON_CTX_BSS],
- info->control.sta);
- if (sta_id == IWL_INVALID_STATION) {
- IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
- hdr->addr1);
- goto drop;
- }
-
- IWL_DEBUG_RATE(priv, "station Id %d\n", sta_id);
-
- if (ieee80211_is_data_qos(fc)) {
- u8 *qc = ieee80211_get_qos_ctl(hdr);
- tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
- if (unlikely(tid >= MAX_TID_COUNT))
- goto drop;
- }
-
- /* Descriptor for chosen Tx queue */
- txq = &priv->txq[txq_id];
- q = &txq->q;
-
- if ((iwl_legacy_queue_space(q) < q->high_mark))
- goto drop;
-
- spin_lock_irqsave(&priv->lock, flags);
-
- idx = iwl_legacy_get_cmd_index(q, q->write_ptr, 0);
-
- /* Set up driver data for this TFD */
- memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
- txq->txb[q->write_ptr].skb = skb;
- txq->txb[q->write_ptr].ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
- /* Init first empty entry in queue's array of Tx/cmd buffers */
- out_cmd = txq->cmd[idx];
- out_meta = &txq->meta[idx];
- tx_cmd = (struct iwl3945_tx_cmd *)out_cmd->cmd.payload;
- memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
- memset(tx_cmd, 0, sizeof(*tx_cmd));
-
- /*
- * Set up the Tx-command (not MAC!) header.
- * Store the chosen Tx queue and TFD index within the sequence field;
- * after Tx, uCode's Tx response will return this value so driver can
- * locate the frame within the tx queue and do post-tx processing.
- */
- out_cmd->hdr.cmd = REPLY_TX;
- out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
- INDEX_TO_SEQ(q->write_ptr)));
-
- /* Copy MAC header from skb into command buffer */
- memcpy(tx_cmd->hdr, hdr, hdr_len);
-
-
- if (info->control.hw_key)
- iwl3945_build_tx_cmd_hwcrypto(priv, info, out_cmd, skb, sta_id);
-
- /* TODO need this for burst mode later on */
- iwl3945_build_tx_cmd_basic(priv, out_cmd, info, hdr, sta_id);
-
- /* set is_hcca to 0; it probably will never be implemented */
- iwl3945_hw_build_tx_cmd_rate(priv, out_cmd, info, hdr, sta_id, 0);
-
- /* Total # bytes to be transmitted */
- len = (u16)skb->len;
- tx_cmd->len = cpu_to_le16(len);
-
- iwl_legacy_dbg_log_tx_data_frame(priv, len, hdr);
- iwl_legacy_update_stats(priv, true, fc, len);
- tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_A_MSK;
- tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_B_MSK;
-
- if (!ieee80211_has_morefrags(hdr->frame_control)) {
- txq->need_update = 1;
- } else {
- wait_write_ptr = 1;
- txq->need_update = 0;
- }
-
- IWL_DEBUG_TX(priv, "sequence nr = 0X%x\n",
- le16_to_cpu(out_cmd->hdr.sequence));
- IWL_DEBUG_TX(priv, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
- iwl_print_hex_dump(priv, IWL_DL_TX, tx_cmd, sizeof(*tx_cmd));
- iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr,
- ieee80211_hdrlen(fc));
-
- /*
- * Use the first empty entry in this queue's command buffer array
- * to contain the Tx command and MAC header concatenated together
- * (payload data will be in another buffer).
- * Size of this varies, due to varying MAC header length.
- * If end is not dword aligned, we'll have 2 extra bytes at the end
- * of the MAC header (device reads on dword boundaries).
- * We'll tell device about this padding later.
- */
- len = sizeof(struct iwl3945_tx_cmd) +
- sizeof(struct iwl_cmd_header) + hdr_len;
- len = (len + 3) & ~3;
-
- /* Physical address of this Tx command's header (not MAC header!),
- * within command buffer array. */
- txcmd_phys = pci_map_single(priv->pci_dev, &out_cmd->hdr,
- len, PCI_DMA_TODEVICE);
- /* we do not map meta data ... so we can safely access address to
- * provide to unmap command*/
- dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
- dma_unmap_len_set(out_meta, len, len);
-
- /* Add buffer containing Tx command and MAC(!) header to TFD's
- * first entry */
- priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
- txcmd_phys, len, 1, 0);
-
-
- /* Set up TFD's 2nd entry to point directly to remainder of skb,
- * if any (802.11 null frames have no payload). */
- len = skb->len - hdr_len;
- if (len) {
- phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
- len, PCI_DMA_TODEVICE);
- priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
- phys_addr, len,
- 0, U32_PAD(len));
- }
-
-
- /* Tell device the write index *just past* this latest filled TFD */
- q->write_ptr = iwl_legacy_queue_inc_wrap(q->write_ptr, q->n_bd);
- iwl_legacy_txq_update_write_ptr(priv, txq);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- if ((iwl_legacy_queue_space(q) < q->high_mark)
- && priv->mac80211_registered) {
- if (wait_write_ptr) {
- spin_lock_irqsave(&priv->lock, flags);
- txq->need_update = 1;
- iwl_legacy_txq_update_write_ptr(priv, txq);
- spin_unlock_irqrestore(&priv->lock, flags);
- }
-
- iwl_legacy_stop_queue(priv, txq);
- }
-
- return 0;
-
-drop_unlock:
- spin_unlock_irqrestore(&priv->lock, flags);
-drop:
- return -1;
-}
-
-static int iwl3945_get_measurement(struct iwl_priv *priv,
- struct ieee80211_measurement_params *params,
- u8 type)
-{
- struct iwl_spectrum_cmd spectrum;
- struct iwl_rx_packet *pkt;
- struct iwl_host_cmd cmd = {
- .id = REPLY_SPECTRUM_MEASUREMENT_CMD,
- .data = (void *)&spectrum,
- .flags = CMD_WANT_SKB,
- };
- u32 add_time = le64_to_cpu(params->start_time);
- int rc;
- int spectrum_resp_status;
- int duration = le16_to_cpu(params->duration);
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
- if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS))
- add_time = iwl_legacy_usecs_to_beacons(priv,
- le64_to_cpu(params->start_time) - priv->_3945.last_tsf,
- le16_to_cpu(ctx->timing.beacon_interval));
-
- memset(&spectrum, 0, sizeof(spectrum));
-
- spectrum.channel_count = cpu_to_le16(1);
- spectrum.flags =
- RXON_FLG_TSF2HOST_MSK | RXON_FLG_ANT_A_MSK | RXON_FLG_DIS_DIV_MSK;
- spectrum.filter_flags = MEASUREMENT_FILTER_FLAG;
- cmd.len = sizeof(spectrum);
- spectrum.len = cpu_to_le16(cmd.len - sizeof(spectrum.len));
-
- if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS))
- spectrum.start_time =
- iwl_legacy_add_beacon_time(priv,
- priv->_3945.last_beacon_time, add_time,
- le16_to_cpu(ctx->timing.beacon_interval));
- else
- spectrum.start_time = 0;
-
- spectrum.channels[0].duration = cpu_to_le32(duration * TIME_UNIT);
- spectrum.channels[0].channel = params->channel;
- spectrum.channels[0].type = type;
- if (ctx->active.flags & RXON_FLG_BAND_24G_MSK)
- spectrum.flags |= RXON_FLG_BAND_24G_MSK |
- RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK;
-
- rc = iwl_legacy_send_cmd_sync(priv, &cmd);
- if (rc)
- return rc;
-
- pkt = (struct iwl_rx_packet *)cmd.reply_page;
- if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
- IWL_ERR(priv, "Bad return from REPLY_RX_ON_ASSOC command\n");
- rc = -EIO;
- }
-
- spectrum_resp_status = le16_to_cpu(pkt->u.spectrum.status);
- switch (spectrum_resp_status) {
- case 0: /* Command will be handled */
- if (pkt->u.spectrum.id != 0xff) {
- IWL_DEBUG_INFO(priv, "Replaced existing measurement: %d\n",
- pkt->u.spectrum.id);
- priv->measurement_status &= ~MEASUREMENT_READY;
- }
- priv->measurement_status |= MEASUREMENT_ACTIVE;
- rc = 0;
- break;
-
- case 1: /* Command will not be handled */
- rc = -EAGAIN;
- break;
- }
-
- iwl_legacy_free_pages(priv, cmd.reply_page);
-
- return rc;
-}
-
-static void iwl3945_rx_reply_alive(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_alive_resp *palive;
- struct delayed_work *pwork;
-
- palive = &pkt->u.alive_frame;
-
- IWL_DEBUG_INFO(priv, "Alive ucode status 0x%08X revision "
- "0x%01X 0x%01X\n",
- palive->is_valid, palive->ver_type,
- palive->ver_subtype);
-
- if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
- IWL_DEBUG_INFO(priv, "Initialization Alive received.\n");
- memcpy(&priv->card_alive_init, &pkt->u.alive_frame,
- sizeof(struct iwl_alive_resp));
- pwork = &priv->init_alive_start;
- } else {
- IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
- memcpy(&priv->card_alive, &pkt->u.alive_frame,
- sizeof(struct iwl_alive_resp));
- pwork = &priv->alive_start;
- iwl3945_disable_events(priv);
- }
-
- /* We delay the ALIVE response by 5ms to
- * give the HW RF Kill time to activate... */
- if (palive->is_valid == UCODE_VALID_OK)
- queue_delayed_work(priv->workqueue, pwork,
- msecs_to_jiffies(5));
- else
- IWL_WARN(priv, "uCode did not respond OK.\n");
-}
-
-static void iwl3945_rx_reply_add_sta(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
-#endif
-
- IWL_DEBUG_RX(priv, "Received REPLY_ADD_STA: 0x%02X\n", pkt->u.status);
-}
-
-static void iwl3945_rx_beacon_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl3945_beacon_notif *beacon = &(pkt->u.beacon_status);
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- u8 rate = beacon->beacon_notify_hdr.rate;
-
- IWL_DEBUG_RX(priv, "beacon status %x retries %d iss %d "
- "tsf %d %d rate %d\n",
- le32_to_cpu(beacon->beacon_notify_hdr.status) & TX_STATUS_MSK,
- beacon->beacon_notify_hdr.failure_frame,
- le32_to_cpu(beacon->ibss_mgr_status),
- le32_to_cpu(beacon->high_tsf),
- le32_to_cpu(beacon->low_tsf), rate);
-#endif
-
- priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
-
-}
-
-/* Handle notification from uCode that card's power state is changing
- * due to software, hardware, or critical temperature RFKILL */
-static void iwl3945_rx_card_state_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
- unsigned long status = priv->status;
-
- IWL_WARN(priv, "Card state received: HW:%s SW:%s\n",
- (flags & HW_CARD_DISABLED) ? "Kill" : "On",
- (flags & SW_CARD_DISABLED) ? "Kill" : "On");
-
- iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
- CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
-
- if (flags & HW_CARD_DISABLED)
- set_bit(STATUS_RF_KILL_HW, &priv->status);
- else
- clear_bit(STATUS_RF_KILL_HW, &priv->status);
-
-
- iwl_legacy_scan_cancel(priv);
-
- if ((test_bit(STATUS_RF_KILL_HW, &status) !=
- test_bit(STATUS_RF_KILL_HW, &priv->status)))
- wiphy_rfkill_set_hw_state(priv->hw->wiphy,
- test_bit(STATUS_RF_KILL_HW, &priv->status));
- else
- wake_up(&priv->wait_command_queue);
-}
-
-/**
- * iwl3945_setup_rx_handlers - Initialize Rx handler callbacks
- *
- * Setup the RX handlers for each of the reply types sent from the uCode
- * to the host.
- *
- * This function chains into the hardware specific files for them to setup
- * any hardware specific handlers as well.
- */
-static void iwl3945_setup_rx_handlers(struct iwl_priv *priv)
-{
- priv->rx_handlers[REPLY_ALIVE] = iwl3945_rx_reply_alive;
- priv->rx_handlers[REPLY_ADD_STA] = iwl3945_rx_reply_add_sta;
- priv->rx_handlers[REPLY_ERROR] = iwl_legacy_rx_reply_error;
- priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_legacy_rx_csa;
- priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] =
- iwl_legacy_rx_spectrum_measure_notif;
- priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_legacy_rx_pm_sleep_notif;
- priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] =
- iwl_legacy_rx_pm_debug_statistics_notif;
- priv->rx_handlers[BEACON_NOTIFICATION] = iwl3945_rx_beacon_notif;
-
- /*
- * The same handler is used for both the REPLY to a discrete
- * statistics request from the host as well as for the periodic
- * statistics notifications (after received beacons) from the uCode.
- */
- priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl3945_reply_statistics;
- priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl3945_hw_rx_statistics;
-
- iwl_legacy_setup_rx_scan_handlers(priv);
- priv->rx_handlers[CARD_STATE_NOTIFICATION] = iwl3945_rx_card_state_notif;
-
- /* Set up hardware specific Rx handlers */
- iwl3945_hw_rx_handler_setup(priv);
-}
-
-/************************** RX-FUNCTIONS ****************************/
-/*
- * Rx theory of operation
- *
- * The host allocates 32 DMA target addresses and passes the host address
- * to the firmware at register IWL_RFDS_TABLE_LOWER + N * RFD_SIZE where N is
- * 0 to 31
- *
- * Rx Queue Indexes
- * The host/firmware share two index registers for managing the Rx buffers.
- *
- * The READ index maps to the first position that the firmware may be writing
- * to -- the driver can read up to (but not including) this position and get
- * good data.
- * The READ index is managed by the firmware once the card is enabled.
- *
- * The WRITE index maps to the last position the driver has read from -- the
- * position preceding WRITE is the last slot the firmware can place a packet.
- *
- * The queue is empty (no good data) if WRITE = READ - 1, and is full if
- * WRITE = READ.
- *
- * During initialization, the host sets up the READ queue position to the first
- * INDEX position, and WRITE to the last (READ - 1 wrapped)
- *
- * When the firmware places a packet in a buffer, it will advance the READ index
- * and fire the RX interrupt. The driver can then query the READ index and
- * process as many packets as possible, moving the WRITE index forward as it
- * resets the Rx queue buffers with new memory.
- *
- * The management in the driver is as follows:
- * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
- * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
- * to replenish the iwl->rxq->rx_free.
- * + In iwl3945_rx_replenish (scheduled) if 'processed' != 'read' then the
- * iwl->rxq is replenished and the READ INDEX is updated (updating the
- * 'processed' and 'read' driver indexes as well)
- * + A received packet is processed and handed to the kernel network stack,
- * detached from the iwl->rxq. The driver 'processed' index is updated.
- * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
- * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
- * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there
- * were enough free buffers and RX_STALLED is set it is cleared.
- *
- *
- * Driver sequence:
- *
- * iwl3945_rx_replenish() Replenishes rx_free list from rx_used, and calls
- * iwl3945_rx_queue_restock
- * iwl3945_rx_queue_restock() Moves available buffers from rx_free into Rx
- * queue, updates firmware pointers, and updates
- * the WRITE index. If insufficient rx_free buffers
- * are available, schedules iwl3945_rx_replenish
- *
- * -- enable interrupts --
- * ISR - iwl3945_rx() Detach iwl_rx_mem_buffers from pool up to the
- * READ INDEX, detaching the SKB from the pool.
- * Moves the packet buffer from queue to rx_used.
- * Calls iwl3945_rx_queue_restock to refill any empty
- * slots.
- * ...
- *
- */
-
-/**
- * iwl3945_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
- */
-static inline __le32 iwl3945_dma_addr2rbd_ptr(struct iwl_priv *priv,
- dma_addr_t dma_addr)
-{
- return cpu_to_le32((u32)dma_addr);
-}
-
-/**
- * iwl3945_rx_queue_restock - refill RX queue from pre-allocated pool
- *
- * If there are slots in the RX queue that need to be restocked,
- * and we have free pre-allocated buffers, fill the ranks as much
- * as we can, pulling from rx_free.
- *
- * This moves the 'write' index forward to catch up with 'processed', and
- * also updates the memory address in the firmware to reference the new
- * target buffer.
- */
-static void iwl3945_rx_queue_restock(struct iwl_priv *priv)
-{
- struct iwl_rx_queue *rxq = &priv->rxq;
- struct list_head *element;
- struct iwl_rx_mem_buffer *rxb;
- unsigned long flags;
- int write;
-
- spin_lock_irqsave(&rxq->lock, flags);
- write = rxq->write & ~0x7;
- while ((iwl_legacy_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
- /* Get next free Rx buffer, remove from free list */
- element = rxq->rx_free.next;
- rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
- list_del(element);
-
- /* Point to Rx buffer via next RBD in circular buffer */
- rxq->bd[rxq->write] = iwl3945_dma_addr2rbd_ptr(priv, rxb->page_dma);
- rxq->queue[rxq->write] = rxb;
- rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
- rxq->free_count--;
- }
- spin_unlock_irqrestore(&rxq->lock, flags);
- /* If the pre-allocated buffer pool is dropping low, schedule to
- * refill it */
- if (rxq->free_count <= RX_LOW_WATERMARK)
- queue_work(priv->workqueue, &priv->rx_replenish);
-
-
- /* If we've added more space for the firmware to place data, tell it.
- * Increment device's write pointer in multiples of 8. */
- if ((rxq->write_actual != (rxq->write & ~0x7))
- || (abs(rxq->write - rxq->read) > 7)) {
- spin_lock_irqsave(&rxq->lock, flags);
- rxq->need_update = 1;
- spin_unlock_irqrestore(&rxq->lock, flags);
- iwl_legacy_rx_queue_update_write_ptr(priv, rxq);
- }
-}
-
-/**
- * iwl3945_rx_replenish - Move all used packet from rx_used to rx_free
- *
- * When moving to rx_free an SKB is allocated for the slot.
- *
- * Also restock the Rx queue via iwl3945_rx_queue_restock.
- * This is called as a scheduled work item (except for during initialization)
- */
-static void iwl3945_rx_allocate(struct iwl_priv *priv, gfp_t priority)
-{
- struct iwl_rx_queue *rxq = &priv->rxq;
- struct list_head *element;
- struct iwl_rx_mem_buffer *rxb;
- struct page *page;
- unsigned long flags;
- gfp_t gfp_mask = priority;
-
- while (1) {
- spin_lock_irqsave(&rxq->lock, flags);
-
- if (list_empty(&rxq->rx_used)) {
- spin_unlock_irqrestore(&rxq->lock, flags);
- return;
- }
- spin_unlock_irqrestore(&rxq->lock, flags);
-
- if (rxq->free_count > RX_LOW_WATERMARK)
- gfp_mask |= __GFP_NOWARN;
-
- if (priv->hw_params.rx_page_order > 0)
- gfp_mask |= __GFP_COMP;
-
- /* Alloc a new receive buffer */
- page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order);
- if (!page) {
- if (net_ratelimit())
- IWL_DEBUG_INFO(priv, "Failed to allocate SKB buffer.\n");
- if ((rxq->free_count <= RX_LOW_WATERMARK) &&
- net_ratelimit())
- IWL_CRIT(priv, "Failed to allocate SKB buffer with %s. Only %u free buffers remaining.\n",
- priority == GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL",
- rxq->free_count);
- /* We don't reschedule replenish work here -- we will
- * call the restock method and if it still needs
- * more buffers it will schedule replenish */
- break;
- }
-
- spin_lock_irqsave(&rxq->lock, flags);
- if (list_empty(&rxq->rx_used)) {
- spin_unlock_irqrestore(&rxq->lock, flags);
- __free_pages(page, priv->hw_params.rx_page_order);
- return;
- }
- element = rxq->rx_used.next;
- rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
- list_del(element);
- spin_unlock_irqrestore(&rxq->lock, flags);
-
- rxb->page = page;
- /* Get physical address of RB/SKB */
- rxb->page_dma = pci_map_page(priv->pci_dev, page, 0,
- PAGE_SIZE << priv->hw_params.rx_page_order,
- PCI_DMA_FROMDEVICE);
-
- spin_lock_irqsave(&rxq->lock, flags);
-
- list_add_tail(&rxb->list, &rxq->rx_free);
- rxq->free_count++;
- priv->alloc_rxb_page++;
-
- spin_unlock_irqrestore(&rxq->lock, flags);
- }
-}
-
-void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
-{
- unsigned long flags;
- int i;
- spin_lock_irqsave(&rxq->lock, flags);
- INIT_LIST_HEAD(&rxq->rx_free);
- INIT_LIST_HEAD(&rxq->rx_used);
- /* Fill the rx_used queue with _all_ of the Rx buffers */
- for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
- /* In the reset function, these buffers may have been allocated
- * to an SKB, so we need to unmap and free potential storage */
- if (rxq->pool[i].page != NULL) {
- pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
- PAGE_SIZE << priv->hw_params.rx_page_order,
- PCI_DMA_FROMDEVICE);
- __iwl_legacy_free_pages(priv, rxq->pool[i].page);
- rxq->pool[i].page = NULL;
- }
- list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
- }
-
- /* Set us so that we have processed and used all buffers, but have
- * not restocked the Rx queue with fresh buffers */
- rxq->read = rxq->write = 0;
- rxq->write_actual = 0;
- rxq->free_count = 0;
- spin_unlock_irqrestore(&rxq->lock, flags);
-}
-
-void iwl3945_rx_replenish(void *data)
-{
- struct iwl_priv *priv = data;
- unsigned long flags;
-
- iwl3945_rx_allocate(priv, GFP_KERNEL);
-
- spin_lock_irqsave(&priv->lock, flags);
- iwl3945_rx_queue_restock(priv);
- spin_unlock_irqrestore(&priv->lock, flags);
-}
-
-static void iwl3945_rx_replenish_now(struct iwl_priv *priv)
-{
- iwl3945_rx_allocate(priv, GFP_ATOMIC);
-
- iwl3945_rx_queue_restock(priv);
-}
-
-
-/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
- * If an SKB has been detached, the POOL needs to have its SKB set to NULL
- * This free routine walks the list of POOL entries and if SKB is set to
- * non NULL it is unmapped and freed
- */
-static void iwl3945_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
-{
- int i;
- for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
- if (rxq->pool[i].page != NULL) {
- pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
- PAGE_SIZE << priv->hw_params.rx_page_order,
- PCI_DMA_FROMDEVICE);
- __iwl_legacy_free_pages(priv, rxq->pool[i].page);
- rxq->pool[i].page = NULL;
- }
- }
-
- dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
- rxq->bd_dma);
- dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status),
- rxq->rb_stts, rxq->rb_stts_dma);
- rxq->bd = NULL;
- rxq->rb_stts = NULL;
-}
-
-
-/* Convert linear signal-to-noise ratio into dB */
-static u8 ratio2dB[100] = {
-/* 0 1 2 3 4 5 6 7 8 9 */
- 0, 0, 6, 10, 12, 14, 16, 17, 18, 19, /* 00 - 09 */
- 20, 21, 22, 22, 23, 23, 24, 25, 26, 26, /* 10 - 19 */
- 26, 26, 26, 27, 27, 28, 28, 28, 29, 29, /* 20 - 29 */
- 29, 30, 30, 30, 31, 31, 31, 31, 32, 32, /* 30 - 39 */
- 32, 32, 32, 33, 33, 33, 33, 33, 34, 34, /* 40 - 49 */
- 34, 34, 34, 34, 35, 35, 35, 35, 35, 35, /* 50 - 59 */
- 36, 36, 36, 36, 36, 36, 36, 37, 37, 37, /* 60 - 69 */
- 37, 37, 37, 37, 37, 38, 38, 38, 38, 38, /* 70 - 79 */
- 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, /* 80 - 89 */
- 39, 39, 39, 39, 39, 40, 40, 40, 40, 40 /* 90 - 99 */
-};
-
-/* Calculates a relative dB value from a ratio of linear
- * (i.e. not dB) signal levels.
- * Conversion assumes that levels are voltages (20*log), not powers (10*log). */
-int iwl3945_calc_db_from_ratio(int sig_ratio)
-{
- /* 1000:1 or higher just report as 60 dB */
- if (sig_ratio >= 1000)
- return 60;
-
- /* 100:1 or higher, divide by 10 and use table,
- * add 20 dB to make up for divide by 10 */
- if (sig_ratio >= 100)
- return 20 + (int)ratio2dB[sig_ratio/10];
-
- /* We shouldn't see this */
- if (sig_ratio < 1)
- return 0;
-
- /* Use table for ratios 1:1 - 99:1 */
- return (int)ratio2dB[sig_ratio];
-}
-
-/**
- * iwl3945_rx_handle - Main entry function for receiving responses from uCode
- *
- * Uses the priv->rx_handlers callback function array to invoke
- * the appropriate handlers, including command responses,
- * frame-received notifications, and other notifications.
- */
-static void iwl3945_rx_handle(struct iwl_priv *priv)
-{
- struct iwl_rx_mem_buffer *rxb;
- struct iwl_rx_packet *pkt;
- struct iwl_rx_queue *rxq = &priv->rxq;
- u32 r, i;
- int reclaim;
- unsigned long flags;
- u8 fill_rx = 0;
- u32 count = 8;
- int total_empty = 0;
-
- /* uCode's read index (stored in shared DRAM) indicates the last Rx
- * buffer that the driver may process (last buffer filled by ucode). */
- r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF;
- i = rxq->read;
-
- /* calculate total frames need to be restock after handling RX */
- total_empty = r - rxq->write_actual;
- if (total_empty < 0)
- total_empty += RX_QUEUE_SIZE;
-
- if (total_empty > (RX_QUEUE_SIZE / 2))
- fill_rx = 1;
- /* Rx interrupt, but nothing sent from uCode */
- if (i == r)
- IWL_DEBUG_RX(priv, "r = %d, i = %d\n", r, i);
-
- while (i != r) {
- int len;
-
- rxb = rxq->queue[i];
-
- /* If an RXB doesn't have a Rx queue slot associated with it,
- * then a bug has been introduced in the queue refilling
- * routines -- catch it here */
- BUG_ON(rxb == NULL);
-
- rxq->queue[i] = NULL;
-
- pci_unmap_page(priv->pci_dev, rxb->page_dma,
- PAGE_SIZE << priv->hw_params.rx_page_order,
- PCI_DMA_FROMDEVICE);
- pkt = rxb_addr(rxb);
-
- len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
- len += sizeof(u32); /* account for status word */
- trace_iwlwifi_legacy_dev_rx(priv, pkt, len);
-
- /* Reclaim a command buffer only if this packet is a response
- * to a (driver-originated) command.
- * If the packet (e.g. Rx frame) originated from uCode,
- * there is no command buffer to reclaim.
- * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
- * but apparently a few don't get set; catch them here. */
- reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
- (pkt->hdr.cmd != STATISTICS_NOTIFICATION) &&
- (pkt->hdr.cmd != REPLY_TX);
-
- /* Based on type of command response or notification,
- * handle those that need handling via function in
- * rx_handlers table. See iwl3945_setup_rx_handlers() */
- if (priv->rx_handlers[pkt->hdr.cmd]) {
- IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r, i,
- iwl_legacy_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
- priv->isr_stats.rx_handlers[pkt->hdr.cmd]++;
- priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
- } else {
- /* No handling needed */
- IWL_DEBUG_RX(priv,
- "r %d i %d No handler needed for %s, 0x%02x\n",
- r, i, iwl_legacy_get_cmd_string(pkt->hdr.cmd),
- pkt->hdr.cmd);
- }
-
- /*
- * XXX: After here, we should always check rxb->page
- * against NULL before touching it or its virtual
- * memory (pkt). Because some rx_handler might have
- * already taken or freed the pages.
- */
-
- if (reclaim) {
- /* Invoke any callbacks, transfer the buffer to caller,
- * and fire off the (possibly) blocking iwl_legacy_send_cmd()
- * as we reclaim the driver command queue */
- if (rxb->page)
- iwl_legacy_tx_cmd_complete(priv, rxb);
- else
- IWL_WARN(priv, "Claim null rxb?\n");
- }
-
- /* Reuse the page if possible. For notification packets and
- * SKBs that fail to Rx correctly, add them back into the
- * rx_free list for reuse later. */
- spin_lock_irqsave(&rxq->lock, flags);
- if (rxb->page != NULL) {
- rxb->page_dma = pci_map_page(priv->pci_dev, rxb->page,
- 0, PAGE_SIZE << priv->hw_params.rx_page_order,
- PCI_DMA_FROMDEVICE);
- list_add_tail(&rxb->list, &rxq->rx_free);
- rxq->free_count++;
- } else
- list_add_tail(&rxb->list, &rxq->rx_used);
-
- spin_unlock_irqrestore(&rxq->lock, flags);
-
- i = (i + 1) & RX_QUEUE_MASK;
- /* If there are a lot of unused frames,
- * restock the Rx queue so ucode won't assert. */
- if (fill_rx) {
- count++;
- if (count >= 8) {
- rxq->read = i;
- iwl3945_rx_replenish_now(priv);
- count = 0;
- }
- }
- }
-
- /* Backtrack one entry */
- rxq->read = i;
- if (fill_rx)
- iwl3945_rx_replenish_now(priv);
- else
- iwl3945_rx_queue_restock(priv);
-}
-
-/* call this function to flush any scheduled tasklet */
-static inline void iwl3945_synchronize_irq(struct iwl_priv *priv)
-{
- /* wait to make sure we flush pending tasklet*/
- synchronize_irq(priv->pci_dev->irq);
- tasklet_kill(&priv->irq_tasklet);
-}
-
-static const char *iwl3945_desc_lookup(int i)
-{
- switch (i) {
- case 1:
- return "FAIL";
- case 2:
- return "BAD_PARAM";
- case 3:
- return "BAD_CHECKSUM";
- case 4:
- return "NMI_INTERRUPT";
- case 5:
- return "SYSASSERT";
- case 6:
- return "FATAL_ERROR";
- }
-
- return "UNKNOWN";
-}
-
-#define ERROR_START_OFFSET (1 * sizeof(u32))
-#define ERROR_ELEM_SIZE (7 * sizeof(u32))
-
-void iwl3945_dump_nic_error_log(struct iwl_priv *priv)
-{
- u32 i;
- u32 desc, time, count, base, data1;
- u32 blink1, blink2, ilink1, ilink2;
-
- base = le32_to_cpu(priv->card_alive.error_event_table_ptr);
-
- if (!iwl3945_hw_valid_rtc_data_addr(base)) {
- IWL_ERR(priv, "Not valid error log pointer 0x%08X\n", base);
- return;
- }
-
-
- count = iwl_legacy_read_targ_mem(priv, base);
-
- if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
- IWL_ERR(priv, "Start IWL Error Log Dump:\n");
- IWL_ERR(priv, "Status: 0x%08lX, count: %d\n",
- priv->status, count);
- }
-
- IWL_ERR(priv, "Desc Time asrtPC blink2 "
- "ilink1 nmiPC Line\n");
- for (i = ERROR_START_OFFSET;
- i < (count * ERROR_ELEM_SIZE) + ERROR_START_OFFSET;
- i += ERROR_ELEM_SIZE) {
- desc = iwl_legacy_read_targ_mem(priv, base + i);
- time =
- iwl_legacy_read_targ_mem(priv, base + i + 1 * sizeof(u32));
- blink1 =
- iwl_legacy_read_targ_mem(priv, base + i + 2 * sizeof(u32));
- blink2 =
- iwl_legacy_read_targ_mem(priv, base + i + 3 * sizeof(u32));
- ilink1 =
- iwl_legacy_read_targ_mem(priv, base + i + 4 * sizeof(u32));
- ilink2 =
- iwl_legacy_read_targ_mem(priv, base + i + 5 * sizeof(u32));
- data1 =
- iwl_legacy_read_targ_mem(priv, base + i + 6 * sizeof(u32));
-
- IWL_ERR(priv,
- "%-13s (0x%X) %010u 0x%05X 0x%05X 0x%05X 0x%05X %u\n\n",
- iwl3945_desc_lookup(desc), desc, time, blink1, blink2,
- ilink1, ilink2, data1);
- trace_iwlwifi_legacy_dev_ucode_error(priv, desc, time, data1, 0,
- 0, blink1, blink2, ilink1, ilink2);
- }
-}
-
-static void iwl3945_irq_tasklet(struct iwl_priv *priv)
-{
- u32 inta, handled = 0;
- u32 inta_fh;
- unsigned long flags;
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- u32 inta_mask;
-#endif
-
- spin_lock_irqsave(&priv->lock, flags);
-
- /* Ack/clear/reset pending uCode interrupts.
- * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
- * and will clear only when CSR_FH_INT_STATUS gets cleared. */
- inta = iwl_read32(priv, CSR_INT);
- iwl_write32(priv, CSR_INT, inta);
-
- /* Ack/clear/reset pending flow-handler (DMA) interrupts.
- * Any new interrupts that happen after this, either while we're
- * in this tasklet, or later, will show up in next ISR/tasklet. */
- inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
- iwl_write32(priv, CSR_FH_INT_STATUS, inta_fh);
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- if (iwl_legacy_get_debug_level(priv) & IWL_DL_ISR) {
- /* just for debug */
- inta_mask = iwl_read32(priv, CSR_INT_MASK);
- IWL_DEBUG_ISR(priv, "inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
- inta, inta_mask, inta_fh);
- }
-#endif
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
- * atomic, make sure that inta covers all the interrupts that
- * we've discovered, even if FH interrupt came in just after
- * reading CSR_INT. */
- if (inta_fh & CSR39_FH_INT_RX_MASK)
- inta |= CSR_INT_BIT_FH_RX;
- if (inta_fh & CSR39_FH_INT_TX_MASK)
- inta |= CSR_INT_BIT_FH_TX;
-
- /* Now service all interrupt bits discovered above. */
- if (inta & CSR_INT_BIT_HW_ERR) {
- IWL_ERR(priv, "Hardware error detected. Restarting.\n");
-
- /* Tell the device to stop sending interrupts */
- iwl_legacy_disable_interrupts(priv);
-
- priv->isr_stats.hw++;
- iwl_legacy_irq_handle_error(priv);
-
- handled |= CSR_INT_BIT_HW_ERR;
-
- return;
- }
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) {
- /* NIC fires this, but we don't use it, redundant with WAKEUP */
- if (inta & CSR_INT_BIT_SCD) {
- IWL_DEBUG_ISR(priv, "Scheduler finished to transmit "
- "the frame/frames.\n");
- priv->isr_stats.sch++;
- }
-
- /* Alive notification via Rx interrupt will do the real work */
- if (inta & CSR_INT_BIT_ALIVE) {
- IWL_DEBUG_ISR(priv, "Alive interrupt\n");
- priv->isr_stats.alive++;
- }
- }
-#endif
- /* Safely ignore these bits for debug checks below */
- inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
-
- /* Error detected by uCode */
- if (inta & CSR_INT_BIT_SW_ERR) {
- IWL_ERR(priv, "Microcode SW error detected. "
- "Restarting 0x%X.\n", inta);
- priv->isr_stats.sw++;
- iwl_legacy_irq_handle_error(priv);
- handled |= CSR_INT_BIT_SW_ERR;
- }
-
- /* uCode wakes up after power-down sleep */
- if (inta & CSR_INT_BIT_WAKEUP) {
- IWL_DEBUG_ISR(priv, "Wakeup interrupt\n");
- iwl_legacy_rx_queue_update_write_ptr(priv, &priv->rxq);
- iwl_legacy_txq_update_write_ptr(priv, &priv->txq[0]);
- iwl_legacy_txq_update_write_ptr(priv, &priv->txq[1]);
- iwl_legacy_txq_update_write_ptr(priv, &priv->txq[2]);
- iwl_legacy_txq_update_write_ptr(priv, &priv->txq[3]);
- iwl_legacy_txq_update_write_ptr(priv, &priv->txq[4]);
- iwl_legacy_txq_update_write_ptr(priv, &priv->txq[5]);
-
- priv->isr_stats.wakeup++;
- handled |= CSR_INT_BIT_WAKEUP;
- }
-
- /* All uCode command responses, including Tx command responses,
- * Rx "responses" (frame-received notification), and other
- * notifications from uCode come through here*/
- if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
- iwl3945_rx_handle(priv);
- priv->isr_stats.rx++;
- handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
- }
-
- if (inta & CSR_INT_BIT_FH_TX) {
- IWL_DEBUG_ISR(priv, "Tx interrupt\n");
- priv->isr_stats.tx++;
-
- iwl_write32(priv, CSR_FH_INT_STATUS, (1 << 6));
- iwl_legacy_write_direct32(priv, FH39_TCSR_CREDIT
- (FH39_SRVC_CHNL), 0x0);
- handled |= CSR_INT_BIT_FH_TX;
- }
-
- if (inta & ~handled) {
- IWL_ERR(priv, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
- priv->isr_stats.unhandled++;
- }
-
- if (inta & ~priv->inta_mask) {
- IWL_WARN(priv, "Disabled INTA bits 0x%08x were pending\n",
- inta & ~priv->inta_mask);
- IWL_WARN(priv, " with FH_INT = 0x%08x\n", inta_fh);
- }
-
- /* Re-enable all interrupts */
- /* only Re-enable if disabled by irq */
- if (test_bit(STATUS_INT_ENABLED, &priv->status))
- iwl_legacy_enable_interrupts(priv);
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) {
- inta = iwl_read32(priv, CSR_INT);
- inta_mask = iwl_read32(priv, CSR_INT_MASK);
- inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
- IWL_DEBUG_ISR(priv, "End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
- "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
- }
-#endif
-}
-
-static int iwl3945_get_channels_for_scan(struct iwl_priv *priv,
- enum ieee80211_band band,
- u8 is_active, u8 n_probes,
- struct iwl3945_scan_channel *scan_ch,
- struct ieee80211_vif *vif)
-{
- struct ieee80211_channel *chan;
- const struct ieee80211_supported_band *sband;
- const struct iwl_channel_info *ch_info;
- u16 passive_dwell = 0;
- u16 active_dwell = 0;
- int added, i;
-
- sband = iwl_get_hw_mode(priv, band);
- if (!sband)
- return 0;
-
- active_dwell = iwl_legacy_get_active_dwell_time(priv, band, n_probes);
- passive_dwell = iwl_legacy_get_passive_dwell_time(priv, band, vif);
-
- if (passive_dwell <= active_dwell)
- passive_dwell = active_dwell + 1;
-
- for (i = 0, added = 0; i < priv->scan_request->n_channels; i++) {
- chan = priv->scan_request->channels[i];
-
- if (chan->band != band)
- continue;
-
- scan_ch->channel = chan->hw_value;
-
- ch_info = iwl_legacy_get_channel_info(priv, band,
- scan_ch->channel);
- if (!iwl_legacy_is_channel_valid(ch_info)) {
- IWL_DEBUG_SCAN(priv,
- "Channel %d is INVALID for this band.\n",
- scan_ch->channel);
- continue;
- }
-
- scan_ch->active_dwell = cpu_to_le16(active_dwell);
- scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
- /* If passive , set up for auto-switch
- * and use long active_dwell time.
- */
- if (!is_active || iwl_legacy_is_channel_passive(ch_info) ||
- (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN)) {
- scan_ch->type = 0; /* passive */
- if (IWL_UCODE_API(priv->ucode_ver) == 1)
- scan_ch->active_dwell = cpu_to_le16(passive_dwell - 1);
- } else {
- scan_ch->type = 1; /* active */
- }
-
- /* Set direct probe bits. These may be used both for active
- * scan channels (probes gets sent right away),
- * or for passive channels (probes get se sent only after
- * hearing clear Rx packet).*/
- if (IWL_UCODE_API(priv->ucode_ver) >= 2) {
- if (n_probes)
- scan_ch->type |= IWL39_SCAN_PROBE_MASK(n_probes);
- } else {
- /* uCode v1 does not allow setting direct probe bits on
- * passive channel. */
- if ((scan_ch->type & 1) && n_probes)
- scan_ch->type |= IWL39_SCAN_PROBE_MASK(n_probes);
- }
-
- /* Set txpower levels to defaults */
- scan_ch->tpc.dsp_atten = 110;
- /* scan_pwr_info->tpc.dsp_atten; */
-
- /*scan_pwr_info->tpc.tx_gain; */
- if (band == IEEE80211_BAND_5GHZ)
- scan_ch->tpc.tx_gain = ((1 << 5) | (3 << 3)) | 3;
- else {
- scan_ch->tpc.tx_gain = ((1 << 5) | (5 << 3));
- /* NOTE: if we were doing 6Mb OFDM for scans we'd use
- * power level:
- * scan_ch->tpc.tx_gain = ((1 << 5) | (2 << 3)) | 3;
- */
- }
-
- IWL_DEBUG_SCAN(priv, "Scanning %d [%s %d]\n",
- scan_ch->channel,
- (scan_ch->type & 1) ? "ACTIVE" : "PASSIVE",
- (scan_ch->type & 1) ?
- active_dwell : passive_dwell);
-
- scan_ch++;
- added++;
- }
-
- IWL_DEBUG_SCAN(priv, "total channels to scan %d\n", added);
- return added;
-}
-
-static void iwl3945_init_hw_rates(struct iwl_priv *priv,
- struct ieee80211_rate *rates)
-{
- int i;
-
- for (i = 0; i < IWL_RATE_COUNT_LEGACY; i++) {
- rates[i].bitrate = iwl3945_rates[i].ieee * 5;
- rates[i].hw_value = i; /* Rate scaling will work on indexes */
- rates[i].hw_value_short = i;
- rates[i].flags = 0;
- if ((i > IWL39_LAST_OFDM_RATE) || (i < IWL_FIRST_OFDM_RATE)) {
- /*
- * If CCK != 1M then set short preamble rate flag.
- */
- rates[i].flags |= (iwl3945_rates[i].plcp == 10) ?
- 0 : IEEE80211_RATE_SHORT_PREAMBLE;
- }
- }
-}
-
-/******************************************************************************
- *
- * uCode download functions
- *
- ******************************************************************************/
-
-static void iwl3945_dealloc_ucode_pci(struct iwl_priv *priv)
-{
- iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_code);
- iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data);
- iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
- iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init);
- iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init_data);
- iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_boot);
-}
-
-/**
- * iwl3945_verify_inst_full - verify runtime uCode image in card vs. host,
- * looking at all data.
- */
-static int iwl3945_verify_inst_full(struct iwl_priv *priv, __le32 *image, u32 len)
-{
- u32 val;
- u32 save_len = len;
- int rc = 0;
- u32 errcnt;
-
- IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
-
- iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR,
- IWL39_RTC_INST_LOWER_BOUND);
-
- errcnt = 0;
- for (; len > 0; len -= sizeof(u32), image++) {
- /* read data comes through single port, auto-incr addr */
- /* NOTE: Use the debugless read so we don't flood kernel log
- * if IWL_DL_IO is set */
- val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
- if (val != le32_to_cpu(*image)) {
- IWL_ERR(priv, "uCode INST section is invalid at "
- "offset 0x%x, is 0x%x, s/b 0x%x\n",
- save_len - len, val, le32_to_cpu(*image));
- rc = -EIO;
- errcnt++;
- if (errcnt >= 20)
- break;
- }
- }
-
-
- if (!errcnt)
- IWL_DEBUG_INFO(priv,
- "ucode image in INSTRUCTION memory is good\n");
-
- return rc;
-}
-
-
-/**
- * iwl3945_verify_inst_sparse - verify runtime uCode image in card vs. host,
- * using sample data 100 bytes apart. If these sample points are good,
- * it's a pretty good bet that everything between them is good, too.
- */
-static int iwl3945_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len)
-{
- u32 val;
- int rc = 0;
- u32 errcnt = 0;
- u32 i;
-
- IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
-
- for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
- /* read data comes through single port, auto-incr addr */
- /* NOTE: Use the debugless read so we don't flood kernel log
- * if IWL_DL_IO is set */
- iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR,
- i + IWL39_RTC_INST_LOWER_BOUND);
- val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
- if (val != le32_to_cpu(*image)) {
-#if 0 /* Enable this if you want to see details */
- IWL_ERR(priv, "uCode INST section is invalid at "
- "offset 0x%x, is 0x%x, s/b 0x%x\n",
- i, val, *image);
-#endif
- rc = -EIO;
- errcnt++;
- if (errcnt >= 3)
- break;
- }
- }
-
- return rc;
-}
-
-
-/**
- * iwl3945_verify_ucode - determine which instruction image is in SRAM,
- * and verify its contents
- */
-static int iwl3945_verify_ucode(struct iwl_priv *priv)
-{
- __le32 *image;
- u32 len;
- int rc = 0;
-
- /* Try bootstrap */
- image = (__le32 *)priv->ucode_boot.v_addr;
- len = priv->ucode_boot.len;
- rc = iwl3945_verify_inst_sparse(priv, image, len);
- if (rc == 0) {
- IWL_DEBUG_INFO(priv, "Bootstrap uCode is good in inst SRAM\n");
- return 0;
- }
-
- /* Try initialize */
- image = (__le32 *)priv->ucode_init.v_addr;
- len = priv->ucode_init.len;
- rc = iwl3945_verify_inst_sparse(priv, image, len);
- if (rc == 0) {
- IWL_DEBUG_INFO(priv, "Initialize uCode is good in inst SRAM\n");
- return 0;
- }
-
- /* Try runtime/protocol */
- image = (__le32 *)priv->ucode_code.v_addr;
- len = priv->ucode_code.len;
- rc = iwl3945_verify_inst_sparse(priv, image, len);
- if (rc == 0) {
- IWL_DEBUG_INFO(priv, "Runtime uCode is good in inst SRAM\n");
- return 0;
- }
-
- IWL_ERR(priv, "NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
-
- /* Since nothing seems to match, show first several data entries in
- * instruction SRAM, so maybe visual inspection will give a clue.
- * Selection of bootstrap image (vs. other images) is arbitrary. */
- image = (__le32 *)priv->ucode_boot.v_addr;
- len = priv->ucode_boot.len;
- rc = iwl3945_verify_inst_full(priv, image, len);
-
- return rc;
-}
-
-static void iwl3945_nic_start(struct iwl_priv *priv)
-{
- /* Remove all resets to allow NIC to operate */
- iwl_write32(priv, CSR_RESET, 0);
-}
-
-#define IWL3945_UCODE_GET(item) \
-static u32 iwl3945_ucode_get_##item(const struct iwl_ucode_header *ucode)\
-{ \
- return le32_to_cpu(ucode->v1.item); \
-}
-
-static u32 iwl3945_ucode_get_header_size(u32 api_ver)
-{
- return 24;
-}
-
-static u8 *iwl3945_ucode_get_data(const struct iwl_ucode_header *ucode)
-{
- return (u8 *) ucode->v1.data;
-}
-
-IWL3945_UCODE_GET(inst_size);
-IWL3945_UCODE_GET(data_size);
-IWL3945_UCODE_GET(init_size);
-IWL3945_UCODE_GET(init_data_size);
-IWL3945_UCODE_GET(boot_size);
-
-/**
- * iwl3945_read_ucode - Read uCode images from disk file.
- *
- * Copy into buffers for card to fetch via bus-mastering
- */
-static int iwl3945_read_ucode(struct iwl_priv *priv)
-{
- const struct iwl_ucode_header *ucode;
- int ret = -EINVAL, index;
- const struct firmware *ucode_raw;
- /* firmware file name contains uCode/driver compatibility version */
- const char *name_pre = priv->cfg->fw_name_pre;
- const unsigned int api_max = priv->cfg->ucode_api_max;
- const unsigned int api_min = priv->cfg->ucode_api_min;
- char buf[25];
- u8 *src;
- size_t len;
- u32 api_ver, inst_size, data_size, init_size, init_data_size, boot_size;
-
- /* Ask kernel firmware_class module to get the boot firmware off disk.
- * request_firmware() is synchronous, file is in memory on return. */
- for (index = api_max; index >= api_min; index--) {
- sprintf(buf, "%s%u%s", name_pre, index, ".ucode");
- ret = request_firmware(&ucode_raw, buf, &priv->pci_dev->dev);
- if (ret < 0) {
- IWL_ERR(priv, "%s firmware file req failed: %d\n",
- buf, ret);
- if (ret == -ENOENT)
- continue;
- else
- goto error;
- } else {
- if (index < api_max)
- IWL_ERR(priv, "Loaded firmware %s, "
- "which is deprecated. "
- " Please use API v%u instead.\n",
- buf, api_max);
- IWL_DEBUG_INFO(priv, "Got firmware '%s' file "
- "(%zd bytes) from disk\n",
- buf, ucode_raw->size);
- break;
- }
- }
-
- if (ret < 0)
- goto error;
-
- /* Make sure that we got at least our header! */
- if (ucode_raw->size < iwl3945_ucode_get_header_size(1)) {
- IWL_ERR(priv, "File size way too small!\n");
- ret = -EINVAL;
- goto err_release;
- }
-
- /* Data from ucode file: header followed by uCode images */
- ucode = (struct iwl_ucode_header *)ucode_raw->data;
-
- priv->ucode_ver = le32_to_cpu(ucode->ver);
- api_ver = IWL_UCODE_API(priv->ucode_ver);
- inst_size = iwl3945_ucode_get_inst_size(ucode);
- data_size = iwl3945_ucode_get_data_size(ucode);
- init_size = iwl3945_ucode_get_init_size(ucode);
- init_data_size = iwl3945_ucode_get_init_data_size(ucode);
- boot_size = iwl3945_ucode_get_boot_size(ucode);
- src = iwl3945_ucode_get_data(ucode);
-
- /* api_ver should match the api version forming part of the
- * firmware filename ... but we don't check for that and only rely
- * on the API version read from firmware header from here on forward */
-
- if (api_ver < api_min || api_ver > api_max) {
- IWL_ERR(priv, "Driver unable to support your firmware API. "
- "Driver supports v%u, firmware is v%u.\n",
- api_max, api_ver);
- priv->ucode_ver = 0;
- ret = -EINVAL;
- goto err_release;
- }
- if (api_ver != api_max)
- IWL_ERR(priv, "Firmware has old API version. Expected %u, "
- "got %u. New firmware can be obtained "
- "from http://www.intellinuxwireless.org.\n",
- api_max, api_ver);
-
- IWL_INFO(priv, "loaded firmware version %u.%u.%u.%u\n",
- IWL_UCODE_MAJOR(priv->ucode_ver),
- IWL_UCODE_MINOR(priv->ucode_ver),
- IWL_UCODE_API(priv->ucode_ver),
- IWL_UCODE_SERIAL(priv->ucode_ver));
-
- snprintf(priv->hw->wiphy->fw_version,
- sizeof(priv->hw->wiphy->fw_version),
- "%u.%u.%u.%u",
- IWL_UCODE_MAJOR(priv->ucode_ver),
- IWL_UCODE_MINOR(priv->ucode_ver),
- IWL_UCODE_API(priv->ucode_ver),
- IWL_UCODE_SERIAL(priv->ucode_ver));
-
- IWL_DEBUG_INFO(priv, "f/w package hdr ucode version raw = 0x%x\n",
- priv->ucode_ver);
- IWL_DEBUG_INFO(priv, "f/w package hdr runtime inst size = %u\n",
- inst_size);
- IWL_DEBUG_INFO(priv, "f/w package hdr runtime data size = %u\n",
- data_size);
- IWL_DEBUG_INFO(priv, "f/w package hdr init inst size = %u\n",
- init_size);
- IWL_DEBUG_INFO(priv, "f/w package hdr init data size = %u\n",
- init_data_size);
- IWL_DEBUG_INFO(priv, "f/w package hdr boot inst size = %u\n",
- boot_size);
-
-
- /* Verify size of file vs. image size info in file's header */
- if (ucode_raw->size != iwl3945_ucode_get_header_size(api_ver) +
- inst_size + data_size + init_size +
- init_data_size + boot_size) {
-
- IWL_DEBUG_INFO(priv,
- "uCode file size %zd does not match expected size\n",
- ucode_raw->size);
- ret = -EINVAL;
- goto err_release;
- }
-
- /* Verify that uCode images will fit in card's SRAM */
- if (inst_size > IWL39_MAX_INST_SIZE) {
- IWL_DEBUG_INFO(priv, "uCode instr len %d too large to fit in\n",
- inst_size);
- ret = -EINVAL;
- goto err_release;
- }
-
- if (data_size > IWL39_MAX_DATA_SIZE) {
- IWL_DEBUG_INFO(priv, "uCode data len %d too large to fit in\n",
- data_size);
- ret = -EINVAL;
- goto err_release;
- }
- if (init_size > IWL39_MAX_INST_SIZE) {
- IWL_DEBUG_INFO(priv,
- "uCode init instr len %d too large to fit in\n",
- init_size);
- ret = -EINVAL;
- goto err_release;
- }
- if (init_data_size > IWL39_MAX_DATA_SIZE) {
- IWL_DEBUG_INFO(priv,
- "uCode init data len %d too large to fit in\n",
- init_data_size);
- ret = -EINVAL;
- goto err_release;
- }
- if (boot_size > IWL39_MAX_BSM_SIZE) {
- IWL_DEBUG_INFO(priv,
- "uCode boot instr len %d too large to fit in\n",
- boot_size);
- ret = -EINVAL;
- goto err_release;
- }
-
- /* Allocate ucode buffers for card's bus-master loading ... */
-
- /* Runtime instructions and 2 copies of data:
- * 1) unmodified from disk
- * 2) backup cache for save/restore during power-downs */
- priv->ucode_code.len = inst_size;
- iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_code);
-
- priv->ucode_data.len = data_size;
- iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data);
-
- priv->ucode_data_backup.len = data_size;
- iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
-
- if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr ||
- !priv->ucode_data_backup.v_addr)
- goto err_pci_alloc;
-
- /* Initialization instructions and data */
- if (init_size && init_data_size) {
- priv->ucode_init.len = init_size;
- iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init);
-
- priv->ucode_init_data.len = init_data_size;
- iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init_data);
-
- if (!priv->ucode_init.v_addr || !priv->ucode_init_data.v_addr)
- goto err_pci_alloc;
- }
-
- /* Bootstrap (instructions only, no data) */
- if (boot_size) {
- priv->ucode_boot.len = boot_size;
- iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_boot);
-
- if (!priv->ucode_boot.v_addr)
- goto err_pci_alloc;
- }
-
- /* Copy images into buffers for card's bus-master reads ... */
-
- /* Runtime instructions (first block of data in file) */
- len = inst_size;
- IWL_DEBUG_INFO(priv,
- "Copying (but not loading) uCode instr len %zd\n", len);
- memcpy(priv->ucode_code.v_addr, src, len);
- src += len;
-
- IWL_DEBUG_INFO(priv, "uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
- priv->ucode_code.v_addr, (u32)priv->ucode_code.p_addr);
-
- /* Runtime data (2nd block)
- * NOTE: Copy into backup buffer will be done in iwl3945_up() */
- len = data_size;
- IWL_DEBUG_INFO(priv,
- "Copying (but not loading) uCode data len %zd\n", len);
- memcpy(priv->ucode_data.v_addr, src, len);
- memcpy(priv->ucode_data_backup.v_addr, src, len);
- src += len;
-
- /* Initialization instructions (3rd block) */
- if (init_size) {
- len = init_size;
- IWL_DEBUG_INFO(priv,
- "Copying (but not loading) init instr len %zd\n", len);
- memcpy(priv->ucode_init.v_addr, src, len);
- src += len;
- }
-
- /* Initialization data (4th block) */
- if (init_data_size) {
- len = init_data_size;
- IWL_DEBUG_INFO(priv,
- "Copying (but not loading) init data len %zd\n", len);
- memcpy(priv->ucode_init_data.v_addr, src, len);
- src += len;
- }
-
- /* Bootstrap instructions (5th block) */
- len = boot_size;
- IWL_DEBUG_INFO(priv,
- "Copying (but not loading) boot instr len %zd\n", len);
- memcpy(priv->ucode_boot.v_addr, src, len);
-
- /* We have our copies now, allow OS release its copies */
- release_firmware(ucode_raw);
- return 0;
-
- err_pci_alloc:
- IWL_ERR(priv, "failed to allocate pci memory\n");
- ret = -ENOMEM;
- iwl3945_dealloc_ucode_pci(priv);
-
- err_release:
- release_firmware(ucode_raw);
-
- error:
- return ret;
-}
-
-
-/**
- * iwl3945_set_ucode_ptrs - Set uCode address location
- *
- * Tell initialization uCode where to find runtime uCode.
- *
- * BSM registers initially contain pointers to initialization uCode.
- * We need to replace them to load runtime uCode inst and data,
- * and to save runtime data when powering down.
- */
-static int iwl3945_set_ucode_ptrs(struct iwl_priv *priv)
-{
- dma_addr_t pinst;
- dma_addr_t pdata;
-
- /* bits 31:0 for 3945 */
- pinst = priv->ucode_code.p_addr;
- pdata = priv->ucode_data_backup.p_addr;
-
- /* Tell bootstrap uCode where to find image to load */
- iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
- iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
- iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG,
- priv->ucode_data.len);
-
- /* Inst byte count must be last to set up, bit 31 signals uCode
- * that all new ptr/size info is in place */
- iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG,
- priv->ucode_code.len | BSM_DRAM_INST_LOAD);
-
- IWL_DEBUG_INFO(priv, "Runtime uCode pointers are set.\n");
-
- return 0;
-}
-
-/**
- * iwl3945_init_alive_start - Called after REPLY_ALIVE notification received
- *
- * Called after REPLY_ALIVE notification received from "initialize" uCode.
- *
- * Tell "initialize" uCode to go ahead and load the runtime uCode.
- */
-static void iwl3945_init_alive_start(struct iwl_priv *priv)
-{
- /* Check alive response for "valid" sign from uCode */
- if (priv->card_alive_init.is_valid != UCODE_VALID_OK) {
- /* We had an error bringing up the hardware, so take it
- * all the way back down so we can try again */
- IWL_DEBUG_INFO(priv, "Initialize Alive failed.\n");
- goto restart;
- }
-
- /* Bootstrap uCode has loaded initialize uCode ... verify inst image.
- * This is a paranoid check, because we would not have gotten the
- * "initialize" alive if code weren't properly loaded. */
- if (iwl3945_verify_ucode(priv)) {
- /* Runtime instruction load was bad;
- * take it all the way back down so we can try again */
- IWL_DEBUG_INFO(priv, "Bad \"initialize\" uCode load.\n");
- goto restart;
- }
-
- /* Send pointers to protocol/runtime uCode image ... init code will
- * load and launch runtime uCode, which will send us another "Alive"
- * notification. */
- IWL_DEBUG_INFO(priv, "Initialization Alive received.\n");
- if (iwl3945_set_ucode_ptrs(priv)) {
- /* Runtime instruction load won't happen;
- * take it all the way back down so we can try again */
- IWL_DEBUG_INFO(priv, "Couldn't set up uCode pointers.\n");
- goto restart;
- }
- return;
-
- restart:
- queue_work(priv->workqueue, &priv->restart);
-}
-
-/**
- * iwl3945_alive_start - called after REPLY_ALIVE notification received
- * from protocol/runtime uCode (initialization uCode's
- * Alive gets handled by iwl3945_init_alive_start()).
- */
-static void iwl3945_alive_start(struct iwl_priv *priv)
-{
- int thermal_spin = 0;
- u32 rfkill;
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
- IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
-
- if (priv->card_alive.is_valid != UCODE_VALID_OK) {
- /* We had an error bringing up the hardware, so take it
- * all the way back down so we can try again */
- IWL_DEBUG_INFO(priv, "Alive failed.\n");
- goto restart;
- }
-
- /* Initialize uCode has loaded Runtime uCode ... verify inst image.
- * This is a paranoid check, because we would not have gotten the
- * "runtime" alive if code weren't properly loaded. */
- if (iwl3945_verify_ucode(priv)) {
- /* Runtime instruction load was bad;
- * take it all the way back down so we can try again */
- IWL_DEBUG_INFO(priv, "Bad runtime uCode load.\n");
- goto restart;
- }
-
- rfkill = iwl_legacy_read_prph(priv, APMG_RFKILL_REG);
- IWL_DEBUG_INFO(priv, "RFKILL status: 0x%x\n", rfkill);
-
- if (rfkill & 0x1) {
- clear_bit(STATUS_RF_KILL_HW, &priv->status);
- /* if RFKILL is not on, then wait for thermal
- * sensor in adapter to kick in */
- while (iwl3945_hw_get_temperature(priv) == 0) {
- thermal_spin++;
- udelay(10);
- }
-
- if (thermal_spin)
- IWL_DEBUG_INFO(priv, "Thermal calibration took %dus\n",
- thermal_spin * 10);
- } else
- set_bit(STATUS_RF_KILL_HW, &priv->status);
-
- /* After the ALIVE response, we can send commands to 3945 uCode */
- set_bit(STATUS_ALIVE, &priv->status);
-
- /* Enable watchdog to monitor the driver tx queues */
- iwl_legacy_setup_watchdog(priv);
-
- if (iwl_legacy_is_rfkill(priv))
- return;
-
- ieee80211_wake_queues(priv->hw);
-
- priv->active_rate = IWL_RATES_MASK_3945;
-
- iwl_legacy_power_update_mode(priv, true);
-
- if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) {
- struct iwl3945_rxon_cmd *active_rxon =
- (struct iwl3945_rxon_cmd *)(&ctx->active);
-
- ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
- active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
- } else {
- /* Initialize our rx_config data */
- iwl_legacy_connection_init_rx_config(priv, ctx);
- }
-
- /* Configure Bluetooth device coexistence support */
- iwl_legacy_send_bt_config(priv);
-
- set_bit(STATUS_READY, &priv->status);
-
- /* Configure the adapter for unassociated operation */
- iwl3945_commit_rxon(priv, ctx);
-
- iwl3945_reg_txpower_periodic(priv);
-
- IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n");
- wake_up(&priv->wait_command_queue);
-
- return;
-
- restart:
- queue_work(priv->workqueue, &priv->restart);
-}
-
-static void iwl3945_cancel_deferred_work(struct iwl_priv *priv);
-
-static void __iwl3945_down(struct iwl_priv *priv)
-{
- unsigned long flags;
- int exit_pending;
-
- IWL_DEBUG_INFO(priv, DRV_NAME " is going down\n");
-
- iwl_legacy_scan_cancel_timeout(priv, 200);
-
- exit_pending = test_and_set_bit(STATUS_EXIT_PENDING, &priv->status);
-
- /* Stop TX queues watchdog. We need to have STATUS_EXIT_PENDING bit set
- * to prevent rearm timer */
- del_timer_sync(&priv->watchdog);
-
- /* Station information will now be cleared in device */
- iwl_legacy_clear_ucode_stations(priv, NULL);
- iwl_legacy_dealloc_bcast_stations(priv);
- iwl_legacy_clear_driver_stations(priv);
-
- /* Unblock any waiting calls */
- wake_up_all(&priv->wait_command_queue);
-
- /* Wipe out the EXIT_PENDING status bit if we are not actually
- * exiting the module */
- if (!exit_pending)
- clear_bit(STATUS_EXIT_PENDING, &priv->status);
-
- /* stop and reset the on-board processor */
- iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
-
- /* tell the device to stop sending interrupts */
- spin_lock_irqsave(&priv->lock, flags);
- iwl_legacy_disable_interrupts(priv);
- spin_unlock_irqrestore(&priv->lock, flags);
- iwl3945_synchronize_irq(priv);
-
- if (priv->mac80211_registered)
- ieee80211_stop_queues(priv->hw);
-
- /* If we have not previously called iwl3945_init() then
- * clear all bits but the RF Kill bits and return */
- if (!iwl_legacy_is_init(priv)) {
- priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) <<
- STATUS_RF_KILL_HW |
- test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
- STATUS_GEO_CONFIGURED |
- test_bit(STATUS_EXIT_PENDING, &priv->status) <<
- STATUS_EXIT_PENDING;
- goto exit;
- }
-
- /* ...otherwise clear out all the status bits but the RF Kill
- * bit and continue taking the NIC down. */
- priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) <<
- STATUS_RF_KILL_HW |
- test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
- STATUS_GEO_CONFIGURED |
- test_bit(STATUS_FW_ERROR, &priv->status) <<
- STATUS_FW_ERROR |
- test_bit(STATUS_EXIT_PENDING, &priv->status) <<
- STATUS_EXIT_PENDING;
-
- iwl3945_hw_txq_ctx_stop(priv);
- iwl3945_hw_rxq_stop(priv);
-
- /* Power-down device's busmaster DMA clocks */
- iwl_legacy_write_prph(priv, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
- udelay(5);
-
- /* Stop the device, and put it in low power state */
- iwl_legacy_apm_stop(priv);
-
- exit:
- memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp));
-
- if (priv->beacon_skb)
- dev_kfree_skb(priv->beacon_skb);
- priv->beacon_skb = NULL;
-
- /* clear out any free frames */
- iwl3945_clear_free_frames(priv);
-}
-
-static void iwl3945_down(struct iwl_priv *priv)
-{
- mutex_lock(&priv->mutex);
- __iwl3945_down(priv);
- mutex_unlock(&priv->mutex);
-
- iwl3945_cancel_deferred_work(priv);
-}
-
-#define MAX_HW_RESTARTS 5
-
-static int iwl3945_alloc_bcast_station(struct iwl_priv *priv)
-{
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- unsigned long flags;
- u8 sta_id;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- sta_id = iwl_legacy_prep_station(priv, ctx,
- iwlegacy_bcast_addr, false, NULL);
- if (sta_id == IWL_INVALID_STATION) {
- IWL_ERR(priv, "Unable to prepare broadcast station\n");
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- return -EINVAL;
- }
-
- priv->stations[sta_id].used |= IWL_STA_DRIVER_ACTIVE;
- priv->stations[sta_id].used |= IWL_STA_BCAST;
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- return 0;
-}
-
-static int __iwl3945_up(struct iwl_priv *priv)
-{
- int rc, i;
-
- rc = iwl3945_alloc_bcast_station(priv);
- if (rc)
- return rc;
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
- IWL_WARN(priv, "Exit pending; will not bring the NIC up\n");
- return -EIO;
- }
-
- if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) {
- IWL_ERR(priv, "ucode not available for device bring up\n");
- return -EIO;
- }
-
- /* If platform's RF_KILL switch is NOT set to KILL */
- if (iwl_read32(priv, CSR_GP_CNTRL) &
- CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
- clear_bit(STATUS_RF_KILL_HW, &priv->status);
- else {
- set_bit(STATUS_RF_KILL_HW, &priv->status);
- IWL_WARN(priv, "Radio disabled by HW RF Kill switch\n");
- return -ENODEV;
- }
-
- iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
-
- rc = iwl3945_hw_nic_init(priv);
- if (rc) {
- IWL_ERR(priv, "Unable to int nic\n");
- return rc;
- }
-
- /* make sure rfkill handshake bits are cleared */
- iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
- iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
- CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
-
- /* clear (again), then enable host interrupts */
- iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
- iwl_legacy_enable_interrupts(priv);
-
- /* really make sure rfkill handshake bits are cleared */
- iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
- iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
-
- /* Copy original ucode data image from disk into backup cache.
- * This will be used to initialize the on-board processor's
- * data SRAM for a clean start when the runtime program first loads. */
- memcpy(priv->ucode_data_backup.v_addr, priv->ucode_data.v_addr,
- priv->ucode_data.len);
-
- /* We return success when we resume from suspend and rf_kill is on. */
- if (test_bit(STATUS_RF_KILL_HW, &priv->status))
- return 0;
-
- for (i = 0; i < MAX_HW_RESTARTS; i++) {
-
- /* load bootstrap state machine,
- * load bootstrap program into processor's memory,
- * prepare to load the "initialize" uCode */
- rc = priv->cfg->ops->lib->load_ucode(priv);
-
- if (rc) {
- IWL_ERR(priv,
- "Unable to set up bootstrap uCode: %d\n", rc);
- continue;
- }
-
- /* start card; "initialize" will load runtime ucode */
- iwl3945_nic_start(priv);
-
- IWL_DEBUG_INFO(priv, DRV_NAME " is coming up\n");
-
- return 0;
- }
-
- set_bit(STATUS_EXIT_PENDING, &priv->status);
- __iwl3945_down(priv);
- clear_bit(STATUS_EXIT_PENDING, &priv->status);
-
- /* tried to restart and config the device for as long as our
- * patience could withstand */
- IWL_ERR(priv, "Unable to initialize device after %d attempts.\n", i);
- return -EIO;
-}
-
-
-/*****************************************************************************
- *
- * Workqueue callbacks
- *
- *****************************************************************************/
-
-static void iwl3945_bg_init_alive_start(struct work_struct *data)
-{
- struct iwl_priv *priv =
- container_of(data, struct iwl_priv, init_alive_start.work);
-
- mutex_lock(&priv->mutex);
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- goto out;
-
- iwl3945_init_alive_start(priv);
-out:
- mutex_unlock(&priv->mutex);
-}
-
-static void iwl3945_bg_alive_start(struct work_struct *data)
-{
- struct iwl_priv *priv =
- container_of(data, struct iwl_priv, alive_start.work);
-
- mutex_lock(&priv->mutex);
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- goto out;
-
- iwl3945_alive_start(priv);
-out:
- mutex_unlock(&priv->mutex);
-}
-
-/*
- * 3945 cannot interrupt driver when hardware rf kill switch toggles;
- * driver must poll CSR_GP_CNTRL_REG register for change. This register
- * *is* readable even when device has been SW_RESET into low power mode
- * (e.g. during RF KILL).
- */
-static void iwl3945_rfkill_poll(struct work_struct *data)
-{
- struct iwl_priv *priv =
- container_of(data, struct iwl_priv, _3945.rfkill_poll.work);
- bool old_rfkill = test_bit(STATUS_RF_KILL_HW, &priv->status);
- bool new_rfkill = !(iwl_read32(priv, CSR_GP_CNTRL)
- & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
-
- if (new_rfkill != old_rfkill) {
- if (new_rfkill)
- set_bit(STATUS_RF_KILL_HW, &priv->status);
- else
- clear_bit(STATUS_RF_KILL_HW, &priv->status);
-
- wiphy_rfkill_set_hw_state(priv->hw->wiphy, new_rfkill);
-
- IWL_DEBUG_RF_KILL(priv, "RF_KILL bit toggled to %s.\n",
- new_rfkill ? "disable radio" : "enable radio");
- }
-
- /* Keep this running, even if radio now enabled. This will be
- * cancelled in mac_start() if system decides to start again */
- queue_delayed_work(priv->workqueue, &priv->_3945.rfkill_poll,
- round_jiffies_relative(2 * HZ));
-
-}
-
-int iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
-{
- struct iwl_host_cmd cmd = {
- .id = REPLY_SCAN_CMD,
- .len = sizeof(struct iwl3945_scan_cmd),
- .flags = CMD_SIZE_HUGE,
- };
- struct iwl3945_scan_cmd *scan;
- u8 n_probes = 0;
- enum ieee80211_band band;
- bool is_active = false;
- int ret;
- u16 len;
-
- lockdep_assert_held(&priv->mutex);
-
- if (!priv->scan_cmd) {
- priv->scan_cmd = kmalloc(sizeof(struct iwl3945_scan_cmd) +
- IWL_MAX_SCAN_SIZE, GFP_KERNEL);
- if (!priv->scan_cmd) {
- IWL_DEBUG_SCAN(priv, "Fail to allocate scan memory\n");
- return -ENOMEM;
- }
- }
- scan = priv->scan_cmd;
- memset(scan, 0, sizeof(struct iwl3945_scan_cmd) + IWL_MAX_SCAN_SIZE);
-
- scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
- scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
-
- if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) {
- u16 interval;
- u32 extra;
- u32 suspend_time = 100;
- u32 scan_suspend_time = 100;
-
- IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
-
- interval = vif->bss_conf.beacon_int;
-
- scan->suspend_time = 0;
- scan->max_out_time = cpu_to_le32(200 * 1024);
- if (!interval)
- interval = suspend_time;
- /*
- * suspend time format:
- * 0-19: beacon interval in usec (time before exec.)
- * 20-23: 0
- * 24-31: number of beacons (suspend between channels)
- */
-
- extra = (suspend_time / interval) << 24;
- scan_suspend_time = 0xFF0FFFFF &
- (extra | ((suspend_time % interval) * 1024));
-
- scan->suspend_time = cpu_to_le32(scan_suspend_time);
- IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n",
- scan_suspend_time, interval);
- }
-
- if (priv->scan_request->n_ssids) {
- int i, p = 0;
- IWL_DEBUG_SCAN(priv, "Kicking off active scan\n");
- for (i = 0; i < priv->scan_request->n_ssids; i++) {
- /* always does wildcard anyway */
- if (!priv->scan_request->ssids[i].ssid_len)
- continue;
- scan->direct_scan[p].id = WLAN_EID_SSID;
- scan->direct_scan[p].len =
- priv->scan_request->ssids[i].ssid_len;
- memcpy(scan->direct_scan[p].ssid,
- priv->scan_request->ssids[i].ssid,
- priv->scan_request->ssids[i].ssid_len);
- n_probes++;
- p++;
- }
- is_active = true;
- } else
- IWL_DEBUG_SCAN(priv, "Kicking off passive scan.\n");
-
- /* We don't build a direct scan probe request; the uCode will do
- * that based on the direct_mask added to each channel entry */
- scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
- scan->tx_cmd.sta_id = priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id;
- scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
-
- /* flags + rate selection */
-
- switch (priv->scan_band) {
- case IEEE80211_BAND_2GHZ:
- scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
- scan->tx_cmd.rate = IWL_RATE_1M_PLCP;
- band = IEEE80211_BAND_2GHZ;
- break;
- case IEEE80211_BAND_5GHZ:
- scan->tx_cmd.rate = IWL_RATE_6M_PLCP;
- band = IEEE80211_BAND_5GHZ;
- break;
- default:
- IWL_WARN(priv, "Invalid scan band\n");
- return -EIO;
- }
-
- /*
- * If active scaning is requested but a certain channel
- * is marked passive, we can do active scanning if we
- * detect transmissions.
- */
- scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
- IWL_GOOD_CRC_TH_DISABLED;
-
- len = iwl_legacy_fill_probe_req(priv, (struct ieee80211_mgmt *)scan->data,
- vif->addr, priv->scan_request->ie,
- priv->scan_request->ie_len,
- IWL_MAX_SCAN_SIZE - sizeof(*scan));
- scan->tx_cmd.len = cpu_to_le16(len);
-
- /* select Rx antennas */
- scan->flags |= iwl3945_get_antenna_flags(priv);
-
- scan->channel_count = iwl3945_get_channels_for_scan(priv, band, is_active, n_probes,
- (void *)&scan->data[len], vif);
- if (scan->channel_count == 0) {
- IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count);
- return -EIO;
- }
-
- cmd.len += le16_to_cpu(scan->tx_cmd.len) +
- scan->channel_count * sizeof(struct iwl3945_scan_channel);
- cmd.data = scan;
- scan->len = cpu_to_le16(cmd.len);
-
- set_bit(STATUS_SCAN_HW, &priv->status);
- ret = iwl_legacy_send_cmd_sync(priv, &cmd);
- if (ret)
- clear_bit(STATUS_SCAN_HW, &priv->status);
- return ret;
-}
-
-void iwl3945_post_scan(struct iwl_priv *priv)
-{
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
- /*
- * Since setting the RXON may have been deferred while
- * performing the scan, fire one off if needed
- */
- if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
- iwl3945_commit_rxon(priv, ctx);
-}
-
-static void iwl3945_bg_restart(struct work_struct *data)
-{
- struct iwl_priv *priv = container_of(data, struct iwl_priv, restart);
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- return;
-
- if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) {
- struct iwl_rxon_context *ctx;
- mutex_lock(&priv->mutex);
- for_each_context(priv, ctx)
- ctx->vif = NULL;
- priv->is_open = 0;
- mutex_unlock(&priv->mutex);
- iwl3945_down(priv);
- ieee80211_restart_hw(priv->hw);
- } else {
- iwl3945_down(priv);
-
- mutex_lock(&priv->mutex);
- if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
- mutex_unlock(&priv->mutex);
- return;
- }
-
- __iwl3945_up(priv);
- mutex_unlock(&priv->mutex);
- }
-}
-
-static void iwl3945_bg_rx_replenish(struct work_struct *data)
-{
- struct iwl_priv *priv =
- container_of(data, struct iwl_priv, rx_replenish);
-
- mutex_lock(&priv->mutex);
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- goto out;
-
- iwl3945_rx_replenish(priv);
-out:
- mutex_unlock(&priv->mutex);
-}
-
-void iwl3945_post_associate(struct iwl_priv *priv)
-{
- int rc = 0;
- struct ieee80211_conf *conf = NULL;
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
- if (!ctx->vif || !priv->is_open)
- return;
-
- IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n",
- ctx->vif->bss_conf.aid, ctx->active.bssid_addr);
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- return;
-
- iwl_legacy_scan_cancel_timeout(priv, 200);
-
- conf = iwl_legacy_ieee80211_get_hw_conf(priv->hw);
-
- ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
- iwl3945_commit_rxon(priv, ctx);
-
- rc = iwl_legacy_send_rxon_timing(priv, ctx);
- if (rc)
- IWL_WARN(priv, "REPLY_RXON_TIMING failed - "
- "Attempting to continue.\n");
-
- ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
-
- ctx->staging.assoc_id = cpu_to_le16(ctx->vif->bss_conf.aid);
-
- IWL_DEBUG_ASSOC(priv, "assoc id %d beacon interval %d\n",
- ctx->vif->bss_conf.aid, ctx->vif->bss_conf.beacon_int);
-
- if (ctx->vif->bss_conf.use_short_preamble)
- ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
- else
- ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
-
- if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
- if (ctx->vif->bss_conf.use_short_slot)
- ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
- else
- ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
- }
-
- iwl3945_commit_rxon(priv, ctx);
-
- switch (ctx->vif->type) {
- case NL80211_IFTYPE_STATION:
- iwl3945_rate_scale_init(priv->hw, IWL_AP_ID);
- break;
- case NL80211_IFTYPE_ADHOC:
- iwl3945_send_beacon_cmd(priv);
- break;
- default:
- IWL_ERR(priv, "%s Should not be called in %d mode\n",
- __func__, ctx->vif->type);
- break;
- }
-}
-
-/*****************************************************************************
- *
- * mac80211 entry point functions
- *
- *****************************************************************************/
-
-#define UCODE_READY_TIMEOUT (2 * HZ)
-
-static int iwl3945_mac_start(struct ieee80211_hw *hw)
-{
- struct iwl_priv *priv = hw->priv;
- int ret;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- /* we should be verifying the device is ready to be opened */
- mutex_lock(&priv->mutex);
-
- /* fetch ucode file from disk, alloc and copy to bus-master buffers ...
- * ucode filename and max sizes are card-specific. */
-
- if (!priv->ucode_code.len) {
- ret = iwl3945_read_ucode(priv);
- if (ret) {
- IWL_ERR(priv, "Could not read microcode: %d\n", ret);
- mutex_unlock(&priv->mutex);
- goto out_release_irq;
- }
- }
-
- ret = __iwl3945_up(priv);
-
- mutex_unlock(&priv->mutex);
-
- if (ret)
- goto out_release_irq;
-
- IWL_DEBUG_INFO(priv, "Start UP work.\n");
-
- /* Wait for START_ALIVE from ucode. Otherwise callbacks from
- * mac80211 will not be run successfully. */
- ret = wait_event_timeout(priv->wait_command_queue,
- test_bit(STATUS_READY, &priv->status),
- UCODE_READY_TIMEOUT);
- if (!ret) {
- if (!test_bit(STATUS_READY, &priv->status)) {
- IWL_ERR(priv,
- "Wait for START_ALIVE timeout after %dms.\n",
- jiffies_to_msecs(UCODE_READY_TIMEOUT));
- ret = -ETIMEDOUT;
- goto out_release_irq;
- }
- }
-
- /* ucode is running and will send rfkill notifications,
- * no need to poll the killswitch state anymore */
- cancel_delayed_work(&priv->_3945.rfkill_poll);
-
- priv->is_open = 1;
- IWL_DEBUG_MAC80211(priv, "leave\n");
- return 0;
-
-out_release_irq:
- priv->is_open = 0;
- IWL_DEBUG_MAC80211(priv, "leave - failed\n");
- return ret;
-}
-
-static void iwl3945_mac_stop(struct ieee80211_hw *hw)
-{
- struct iwl_priv *priv = hw->priv;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- if (!priv->is_open) {
- IWL_DEBUG_MAC80211(priv, "leave - skip\n");
- return;
- }
-
- priv->is_open = 0;
-
- iwl3945_down(priv);
-
- flush_workqueue(priv->workqueue);
-
- /* start polling the killswitch state again */
- queue_delayed_work(priv->workqueue, &priv->_3945.rfkill_poll,
- round_jiffies_relative(2 * HZ));
-
- IWL_DEBUG_MAC80211(priv, "leave\n");
-}
-
-static void iwl3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
-{
- struct iwl_priv *priv = hw->priv;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
- ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
-
- if (iwl3945_tx_skb(priv, skb))
- dev_kfree_skb_any(skb);
-
- IWL_DEBUG_MAC80211(priv, "leave\n");
-}
-
-void iwl3945_config_ap(struct iwl_priv *priv)
-{
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- struct ieee80211_vif *vif = ctx->vif;
- int rc = 0;
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- return;
-
- /* The following should be done only at AP bring up */
- if (!(iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS))) {
-
- /* RXON - unassoc (to set timing command) */
- ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
- iwl3945_commit_rxon(priv, ctx);
-
- /* RXON Timing */
- rc = iwl_legacy_send_rxon_timing(priv, ctx);
- if (rc)
- IWL_WARN(priv, "REPLY_RXON_TIMING failed - "
- "Attempting to continue.\n");
-
- ctx->staging.assoc_id = 0;
-
- if (vif->bss_conf.use_short_preamble)
- ctx->staging.flags |=
- RXON_FLG_SHORT_PREAMBLE_MSK;
- else
- ctx->staging.flags &=
- ~RXON_FLG_SHORT_PREAMBLE_MSK;
-
- if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
- if (vif->bss_conf.use_short_slot)
- ctx->staging.flags |=
- RXON_FLG_SHORT_SLOT_MSK;
- else
- ctx->staging.flags &=
- ~RXON_FLG_SHORT_SLOT_MSK;
- }
- /* restore RXON assoc */
- ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
- iwl3945_commit_rxon(priv, ctx);
- }
- iwl3945_send_beacon_cmd(priv);
-}
-
-static int iwl3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
- struct ieee80211_key_conf *key)
-{
- struct iwl_priv *priv = hw->priv;
- int ret = 0;
- u8 sta_id = IWL_INVALID_STATION;
- u8 static_key;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- if (iwl3945_mod_params.sw_crypto) {
- IWL_DEBUG_MAC80211(priv, "leave - hwcrypto disabled\n");
- return -EOPNOTSUPP;
- }
-
- /*
- * To support IBSS RSN, don't program group keys in IBSS, the
- * hardware will then not attempt to decrypt the frames.
- */
- if (vif->type == NL80211_IFTYPE_ADHOC &&
- !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
- return -EOPNOTSUPP;
-
- static_key = !iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS);
-
- if (!static_key) {
- sta_id = iwl_legacy_sta_id_or_broadcast(
- priv, &priv->contexts[IWL_RXON_CTX_BSS], sta);
- if (sta_id == IWL_INVALID_STATION)
- return -EINVAL;
- }
-
- mutex_lock(&priv->mutex);
- iwl_legacy_scan_cancel_timeout(priv, 100);
-
- switch (cmd) {
- case SET_KEY:
- if (static_key)
- ret = iwl3945_set_static_key(priv, key);
- else
- ret = iwl3945_set_dynamic_key(priv, key, sta_id);
- IWL_DEBUG_MAC80211(priv, "enable hwcrypto key\n");
- break;
- case DISABLE_KEY:
- if (static_key)
- ret = iwl3945_remove_static_key(priv);
- else
- ret = iwl3945_clear_sta_key_info(priv, sta_id);
- IWL_DEBUG_MAC80211(priv, "disable hwcrypto key\n");
- break;
- default:
- ret = -EINVAL;
- }
-
- mutex_unlock(&priv->mutex);
- IWL_DEBUG_MAC80211(priv, "leave\n");
-
- return ret;
-}
-
-static int iwl3945_mac_sta_add(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- struct iwl_priv *priv = hw->priv;
- struct iwl3945_sta_priv *sta_priv = (void *)sta->drv_priv;
- int ret;
- bool is_ap = vif->type == NL80211_IFTYPE_STATION;
- u8 sta_id;
-
- IWL_DEBUG_INFO(priv, "received request to add station %pM\n",
- sta->addr);
- mutex_lock(&priv->mutex);
- IWL_DEBUG_INFO(priv, "proceeding to add station %pM\n",
- sta->addr);
- sta_priv->common.sta_id = IWL_INVALID_STATION;
-
-
- ret = iwl_legacy_add_station_common(priv,
- &priv->contexts[IWL_RXON_CTX_BSS],
- sta->addr, is_ap, sta, &sta_id);
- if (ret) {
- IWL_ERR(priv, "Unable to add station %pM (%d)\n",
- sta->addr, ret);
- /* Should we return success if return code is EEXIST ? */
- mutex_unlock(&priv->mutex);
- return ret;
- }
-
- sta_priv->common.sta_id = sta_id;
-
- /* Initialize rate scaling */
- IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM\n",
- sta->addr);
- iwl3945_rs_rate_init(priv, sta, sta_id);
- mutex_unlock(&priv->mutex);
-
- return 0;
-}
-
-static void iwl3945_configure_filter(struct ieee80211_hw *hw,
- unsigned int changed_flags,
- unsigned int *total_flags,
- u64 multicast)
-{
- struct iwl_priv *priv = hw->priv;
- __le32 filter_or = 0, filter_nand = 0;
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
-#define CHK(test, flag) do { \
- if (*total_flags & (test)) \
- filter_or |= (flag); \
- else \
- filter_nand |= (flag); \
- } while (0)
-
- IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n",
- changed_flags, *total_flags);
-
- CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
- CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK);
- CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
-
-#undef CHK
-
- mutex_lock(&priv->mutex);
-
- ctx->staging.filter_flags &= ~filter_nand;
- ctx->staging.filter_flags |= filter_or;
-
- /*
- * Not committing directly because hardware can perform a scan,
- * but even if hw is ready, committing here breaks for some reason,
- * we'll eventually commit the filter flags change anyway.
- */
-
- mutex_unlock(&priv->mutex);
-
- /*
- * Receiving all multicast frames is always enabled by the
- * default flags setup in iwl_legacy_connection_init_rx_config()
- * since we currently do not support programming multicast
- * filters into the device.
- */
- *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
- FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
-}
-
-
-/*****************************************************************************
- *
- * sysfs attributes
- *
- *****************************************************************************/
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-
-/*
- * The following adds a new attribute to the sysfs representation
- * of this device driver (i.e. a new file in /sys/bus/pci/drivers/iwl/)
- * used for controlling the debug level.
- *
- * See the level definitions in iwl for details.
- *
- * The debug_level being managed using sysfs below is a per device debug
- * level that is used instead of the global debug level if it (the per
- * device debug level) is set.
- */
-static ssize_t iwl3945_show_debug_level(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
- return sprintf(buf, "0x%08X\n", iwl_legacy_get_debug_level(priv));
-}
-static ssize_t iwl3945_store_debug_level(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
- unsigned long val;
- int ret;
-
- ret = strict_strtoul(buf, 0, &val);
- if (ret)
- IWL_INFO(priv, "%s is not in hex or decimal form.\n", buf);
- else {
- priv->debug_level = val;
- if (iwl_legacy_alloc_traffic_mem(priv))
- IWL_ERR(priv,
- "Not enough memory to generate traffic log\n");
- }
- return strnlen(buf, count);
-}
-
-static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO,
- iwl3945_show_debug_level, iwl3945_store_debug_level);
-
-#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */
-
-static ssize_t iwl3945_show_temperature(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
-
- if (!iwl_legacy_is_alive(priv))
- return -EAGAIN;
-
- return sprintf(buf, "%d\n", iwl3945_hw_get_temperature(priv));
-}
-
-static DEVICE_ATTR(temperature, S_IRUGO, iwl3945_show_temperature, NULL);
-
-static ssize_t iwl3945_show_tx_power(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
- return sprintf(buf, "%d\n", priv->tx_power_user_lmt);
-}
-
-static ssize_t iwl3945_store_tx_power(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
- char *p = (char *)buf;
- u32 val;
-
- val = simple_strtoul(p, &p, 10);
- if (p == buf)
- IWL_INFO(priv, ": %s is not in decimal form.\n", buf);
- else
- iwl3945_hw_reg_set_txpower(priv, val);
-
- return count;
-}
-
-static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, iwl3945_show_tx_power, iwl3945_store_tx_power);
-
-static ssize_t iwl3945_show_flags(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
- return sprintf(buf, "0x%04X\n", ctx->active.flags);
-}
-
-static ssize_t iwl3945_store_flags(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
- u32 flags = simple_strtoul(buf, NULL, 0);
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
- mutex_lock(&priv->mutex);
- if (le32_to_cpu(ctx->staging.flags) != flags) {
- /* Cancel any currently running scans... */
- if (iwl_legacy_scan_cancel_timeout(priv, 100))
- IWL_WARN(priv, "Could not cancel scan.\n");
- else {
- IWL_DEBUG_INFO(priv, "Committing rxon.flags = 0x%04X\n",
- flags);
- ctx->staging.flags = cpu_to_le32(flags);
- iwl3945_commit_rxon(priv, ctx);
- }
- }
- mutex_unlock(&priv->mutex);
-
- return count;
-}
-
-static DEVICE_ATTR(flags, S_IWUSR | S_IRUGO, iwl3945_show_flags, iwl3945_store_flags);
-
-static ssize_t iwl3945_show_filter_flags(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
- return sprintf(buf, "0x%04X\n",
- le32_to_cpu(ctx->active.filter_flags));
-}
-
-static ssize_t iwl3945_store_filter_flags(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- u32 filter_flags = simple_strtoul(buf, NULL, 0);
-
- mutex_lock(&priv->mutex);
- if (le32_to_cpu(ctx->staging.filter_flags) != filter_flags) {
- /* Cancel any currently running scans... */
- if (iwl_legacy_scan_cancel_timeout(priv, 100))
- IWL_WARN(priv, "Could not cancel scan.\n");
- else {
- IWL_DEBUG_INFO(priv, "Committing rxon.filter_flags = "
- "0x%04X\n", filter_flags);
- ctx->staging.filter_flags =
- cpu_to_le32(filter_flags);
- iwl3945_commit_rxon(priv, ctx);
- }
- }
- mutex_unlock(&priv->mutex);
-
- return count;
-}
-
-static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, iwl3945_show_filter_flags,
- iwl3945_store_filter_flags);
-
-static ssize_t iwl3945_show_measurement(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
- struct iwl_spectrum_notification measure_report;
- u32 size = sizeof(measure_report), len = 0, ofs = 0;
- u8 *data = (u8 *)&measure_report;
- unsigned long flags;
-
- spin_lock_irqsave(&priv->lock, flags);
- if (!(priv->measurement_status & MEASUREMENT_READY)) {
- spin_unlock_irqrestore(&priv->lock, flags);
- return 0;
- }
- memcpy(&measure_report, &priv->measure_report, size);
- priv->measurement_status = 0;
- spin_unlock_irqrestore(&priv->lock, flags);
-
- while (size && (PAGE_SIZE - len)) {
- hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len,
- PAGE_SIZE - len, 1);
- len = strlen(buf);
- if (PAGE_SIZE - len)
- buf[len++] = '\n';
-
- ofs += 16;
- size -= min(size, 16U);
- }
-
- return len;
-}
-
-static ssize_t iwl3945_store_measurement(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- struct ieee80211_measurement_params params = {
- .channel = le16_to_cpu(ctx->active.channel),
- .start_time = cpu_to_le64(priv->_3945.last_tsf),
- .duration = cpu_to_le16(1),
- };
- u8 type = IWL_MEASURE_BASIC;
- u8 buffer[32];
- u8 channel;
-
- if (count) {
- char *p = buffer;
- strncpy(buffer, buf, min(sizeof(buffer), count));
- channel = simple_strtoul(p, NULL, 0);
- if (channel)
- params.channel = channel;
-
- p = buffer;
- while (*p && *p != ' ')
- p++;
- if (*p)
- type = simple_strtoul(p + 1, NULL, 0);
- }
-
- IWL_DEBUG_INFO(priv, "Invoking measurement of type %d on "
- "channel %d (for '%s')\n", type, params.channel, buf);
- iwl3945_get_measurement(priv, &params, type);
-
- return count;
-}
-
-static DEVICE_ATTR(measurement, S_IRUSR | S_IWUSR,
- iwl3945_show_measurement, iwl3945_store_measurement);
-
-static ssize_t iwl3945_store_retry_rate(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
-
- priv->retry_rate = simple_strtoul(buf, NULL, 0);
- if (priv->retry_rate <= 0)
- priv->retry_rate = 1;
-
- return count;
-}
-
-static ssize_t iwl3945_show_retry_rate(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
- return sprintf(buf, "%d", priv->retry_rate);
-}
-
-static DEVICE_ATTR(retry_rate, S_IWUSR | S_IRUSR, iwl3945_show_retry_rate,
- iwl3945_store_retry_rate);
-
-
-static ssize_t iwl3945_show_channels(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- /* all this shit doesn't belong into sysfs anyway */
- return 0;
-}
-
-static DEVICE_ATTR(channels, S_IRUSR, iwl3945_show_channels, NULL);
-
-static ssize_t iwl3945_show_antenna(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
-
- if (!iwl_legacy_is_alive(priv))
- return -EAGAIN;
-
- return sprintf(buf, "%d\n", iwl3945_mod_params.antenna);
-}
-
-static ssize_t iwl3945_store_antenna(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct iwl_priv *priv __maybe_unused = dev_get_drvdata(d);
- int ant;
-
- if (count == 0)
- return 0;
-
- if (sscanf(buf, "%1i", &ant) != 1) {
- IWL_DEBUG_INFO(priv, "not in hex or decimal form.\n");
- return count;
- }
-
- if ((ant >= 0) && (ant <= 2)) {
- IWL_DEBUG_INFO(priv, "Setting antenna select to %d.\n", ant);
- iwl3945_mod_params.antenna = (enum iwl3945_antenna)ant;
- } else
- IWL_DEBUG_INFO(priv, "Bad antenna select value %d.\n", ant);
-
-
- return count;
-}
-
-static DEVICE_ATTR(antenna, S_IWUSR | S_IRUGO, iwl3945_show_antenna, iwl3945_store_antenna);
-
-static ssize_t iwl3945_show_status(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
- if (!iwl_legacy_is_alive(priv))
- return -EAGAIN;
- return sprintf(buf, "0x%08x\n", (int)priv->status);
-}
-
-static DEVICE_ATTR(status, S_IRUGO, iwl3945_show_status, NULL);
-
-static ssize_t iwl3945_dump_error_log(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
- char *p = (char *)buf;
-
- if (p[0] == '1')
- iwl3945_dump_nic_error_log(priv);
-
- return strnlen(buf, count);
-}
-
-static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, iwl3945_dump_error_log);
-
-/*****************************************************************************
- *
- * driver setup and tear down
- *
- *****************************************************************************/
-
-static void iwl3945_setup_deferred_work(struct iwl_priv *priv)
-{
- priv->workqueue = create_singlethread_workqueue(DRV_NAME);
-
- init_waitqueue_head(&priv->wait_command_queue);
-
- INIT_WORK(&priv->restart, iwl3945_bg_restart);
- INIT_WORK(&priv->rx_replenish, iwl3945_bg_rx_replenish);
- INIT_DELAYED_WORK(&priv->init_alive_start, iwl3945_bg_init_alive_start);
- INIT_DELAYED_WORK(&priv->alive_start, iwl3945_bg_alive_start);
- INIT_DELAYED_WORK(&priv->_3945.rfkill_poll, iwl3945_rfkill_poll);
-
- iwl_legacy_setup_scan_deferred_work(priv);
-
- iwl3945_hw_setup_deferred_work(priv);
-
- init_timer(&priv->watchdog);
- priv->watchdog.data = (unsigned long)priv;
- priv->watchdog.function = iwl_legacy_bg_watchdog;
-
- tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
- iwl3945_irq_tasklet, (unsigned long)priv);
-}
-
-static void iwl3945_cancel_deferred_work(struct iwl_priv *priv)
-{
- iwl3945_hw_cancel_deferred_work(priv);
-
- cancel_delayed_work_sync(&priv->init_alive_start);
- cancel_delayed_work(&priv->alive_start);
-
- iwl_legacy_cancel_scan_deferred_work(priv);
-}
-
-static struct attribute *iwl3945_sysfs_entries[] = {
- &dev_attr_antenna.attr,
- &dev_attr_channels.attr,
- &dev_attr_dump_errors.attr,
- &dev_attr_flags.attr,
- &dev_attr_filter_flags.attr,
- &dev_attr_measurement.attr,
- &dev_attr_retry_rate.attr,
- &dev_attr_status.attr,
- &dev_attr_temperature.attr,
- &dev_attr_tx_power.attr,
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- &dev_attr_debug_level.attr,
-#endif
- NULL
-};
-
-static struct attribute_group iwl3945_attribute_group = {
- .name = NULL, /* put in device directory */
- .attrs = iwl3945_sysfs_entries,
-};
-
-struct ieee80211_ops iwl3945_hw_ops = {
- .tx = iwl3945_mac_tx,
- .start = iwl3945_mac_start,
- .stop = iwl3945_mac_stop,
- .add_interface = iwl_legacy_mac_add_interface,
- .remove_interface = iwl_legacy_mac_remove_interface,
- .change_interface = iwl_legacy_mac_change_interface,
- .config = iwl_legacy_mac_config,
- .configure_filter = iwl3945_configure_filter,
- .set_key = iwl3945_mac_set_key,
- .conf_tx = iwl_legacy_mac_conf_tx,
- .reset_tsf = iwl_legacy_mac_reset_tsf,
- .bss_info_changed = iwl_legacy_mac_bss_info_changed,
- .hw_scan = iwl_legacy_mac_hw_scan,
- .sta_add = iwl3945_mac_sta_add,
- .sta_remove = iwl_legacy_mac_sta_remove,
- .tx_last_beacon = iwl_legacy_mac_tx_last_beacon,
-};
-
-static int iwl3945_init_drv(struct iwl_priv *priv)
-{
- int ret;
- struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
-
- priv->retry_rate = 1;
- priv->beacon_skb = NULL;
-
- spin_lock_init(&priv->sta_lock);
- spin_lock_init(&priv->hcmd_lock);
-
- INIT_LIST_HEAD(&priv->free_frames);
-
- mutex_init(&priv->mutex);
-
- priv->ieee_channels = NULL;
- priv->ieee_rates = NULL;
- priv->band = IEEE80211_BAND_2GHZ;
-
- priv->iw_mode = NL80211_IFTYPE_STATION;
- priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF;
-
- /* initialize force reset */
- priv->force_reset.reset_duration = IWL_DELAY_NEXT_FORCE_FW_RELOAD;
-
- if (eeprom->version < EEPROM_3945_EEPROM_VERSION) {
- IWL_WARN(priv, "Unsupported EEPROM version: 0x%04X\n",
- eeprom->version);
- ret = -EINVAL;
- goto err;
- }
- ret = iwl_legacy_init_channel_map(priv);
- if (ret) {
- IWL_ERR(priv, "initializing regulatory failed: %d\n", ret);
- goto err;
- }
-
- /* Set up txpower settings in driver for all channels */
- if (iwl3945_txpower_set_from_eeprom(priv)) {
- ret = -EIO;
- goto err_free_channel_map;
- }
-
- ret = iwl_legacy_init_geos(priv);
- if (ret) {
- IWL_ERR(priv, "initializing geos failed: %d\n", ret);
- goto err_free_channel_map;
- }
- iwl3945_init_hw_rates(priv, priv->ieee_rates);
-
- return 0;
-
-err_free_channel_map:
- iwl_legacy_free_channel_map(priv);
-err:
- return ret;
-}
-
-#define IWL3945_MAX_PROBE_REQUEST 200
-
-static int iwl3945_setup_mac(struct iwl_priv *priv)
-{
- int ret;
- struct ieee80211_hw *hw = priv->hw;
-
- hw->rate_control_algorithm = "iwl-3945-rs";
- hw->sta_data_size = sizeof(struct iwl3945_sta_priv);
- hw->vif_data_size = sizeof(struct iwl_vif_priv);
-
- /* Tell mac80211 our characteristics */
- hw->flags = IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_SPECTRUM_MGMT;
-
- hw->wiphy->interface_modes =
- priv->contexts[IWL_RXON_CTX_BSS].interface_modes;
-
- hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
- WIPHY_FLAG_DISABLE_BEACON_HINTS |
- WIPHY_FLAG_IBSS_RSN;
-
- hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945;
- /* we create the 802.11 header and a zero-length SSID element */
- hw->wiphy->max_scan_ie_len = IWL3945_MAX_PROBE_REQUEST - 24 - 2;
-
- /* Default value; 4 EDCA QOS priorities */
- hw->queues = 4;
-
- if (priv->bands[IEEE80211_BAND_2GHZ].n_channels)
- priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
- &priv->bands[IEEE80211_BAND_2GHZ];
-
- if (priv->bands[IEEE80211_BAND_5GHZ].n_channels)
- priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
- &priv->bands[IEEE80211_BAND_5GHZ];
-
- iwl_legacy_leds_init(priv);
-
- ret = ieee80211_register_hw(priv->hw);
- if (ret) {
- IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
- return ret;
- }
- priv->mac80211_registered = 1;
-
- return 0;
-}
-
-static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
- int err = 0, i;
- struct iwl_priv *priv;
- struct ieee80211_hw *hw;
- struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
- struct iwl3945_eeprom *eeprom;
- unsigned long flags;
-
- /***********************
- * 1. Allocating HW data
- * ********************/
-
- /* mac80211 allocates memory for this device instance, including
- * space for this driver's private structure */
- hw = iwl_legacy_alloc_all(cfg);
- if (hw == NULL) {
- pr_err("Can not allocate network device\n");
- err = -ENOMEM;
- goto out;
- }
- priv = hw->priv;
- SET_IEEE80211_DEV(hw, &pdev->dev);
-
- priv->cmd_queue = IWL39_CMD_QUEUE_NUM;
-
- /* 3945 has only one valid context */
- priv->valid_contexts = BIT(IWL_RXON_CTX_BSS);
-
- for (i = 0; i < NUM_IWL_RXON_CTX; i++)
- priv->contexts[i].ctxid = i;
-
- priv->contexts[IWL_RXON_CTX_BSS].rxon_cmd = REPLY_RXON;
- priv->contexts[IWL_RXON_CTX_BSS].rxon_timing_cmd = REPLY_RXON_TIMING;
- priv->contexts[IWL_RXON_CTX_BSS].rxon_assoc_cmd = REPLY_RXON_ASSOC;
- priv->contexts[IWL_RXON_CTX_BSS].qos_cmd = REPLY_QOS_PARAM;
- priv->contexts[IWL_RXON_CTX_BSS].ap_sta_id = IWL_AP_ID;
- priv->contexts[IWL_RXON_CTX_BSS].wep_key_cmd = REPLY_WEPKEY;
- priv->contexts[IWL_RXON_CTX_BSS].interface_modes =
- BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_ADHOC);
- priv->contexts[IWL_RXON_CTX_BSS].ibss_devtype = RXON_DEV_TYPE_IBSS;
- priv->contexts[IWL_RXON_CTX_BSS].station_devtype = RXON_DEV_TYPE_ESS;
- priv->contexts[IWL_RXON_CTX_BSS].unused_devtype = RXON_DEV_TYPE_ESS;
-
- /*
- * Disabling hardware scan means that mac80211 will perform scans
- * "the hard way", rather than using device's scan.
- */
- if (iwl3945_mod_params.disable_hw_scan) {
- IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
- iwl3945_hw_ops.hw_scan = NULL;
- }
-
- IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
- priv->cfg = cfg;
- priv->pci_dev = pdev;
- priv->inta_mask = CSR_INI_SET_MASK;
-
- if (iwl_legacy_alloc_traffic_mem(priv))
- IWL_ERR(priv, "Not enough memory to generate traffic log\n");
-
- /***************************
- * 2. Initializing PCI bus
- * *************************/
- pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
- PCIE_LINK_STATE_CLKPM);
-
- if (pci_enable_device(pdev)) {
- err = -ENODEV;
- goto out_ieee80211_free_hw;
- }
-
- pci_set_master(pdev);
-
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
- if (!err)
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
- if (err) {
- IWL_WARN(priv, "No suitable DMA available.\n");
- goto out_pci_disable_device;
- }
-
- pci_set_drvdata(pdev, priv);
- err = pci_request_regions(pdev, DRV_NAME);
- if (err)
- goto out_pci_disable_device;
-
- /***********************
- * 3. Read REV Register
- * ********************/
- priv->hw_base = pci_iomap(pdev, 0, 0);
- if (!priv->hw_base) {
- err = -ENODEV;
- goto out_pci_release_regions;
- }
-
- IWL_DEBUG_INFO(priv, "pci_resource_len = 0x%08llx\n",
- (unsigned long long) pci_resource_len(pdev, 0));
- IWL_DEBUG_INFO(priv, "pci_resource_base = %p\n", priv->hw_base);
-
- /* We disable the RETRY_TIMEOUT register (0x41) to keep
- * PCI Tx retries from interfering with C3 CPU state */
- pci_write_config_byte(pdev, 0x41, 0x00);
-
- /* these spin locks will be used in apm_ops.init and EEPROM access
- * we should init now
- */
- spin_lock_init(&priv->reg_lock);
- spin_lock_init(&priv->lock);
-
- /*
- * stop and reset the on-board processor just in case it is in a
- * strange state ... like being left stranded by a primary kernel
- * and this is now the kdump kernel trying to start up
- */
- iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
-
- /***********************
- * 4. Read EEPROM
- * ********************/
-
- /* Read the EEPROM */
- err = iwl_legacy_eeprom_init(priv);
- if (err) {
- IWL_ERR(priv, "Unable to init EEPROM\n");
- goto out_iounmap;
- }
- /* MAC Address location in EEPROM same for 3945/4965 */
- eeprom = (struct iwl3945_eeprom *)priv->eeprom;
- IWL_DEBUG_INFO(priv, "MAC address: %pM\n", eeprom->mac_address);
- SET_IEEE80211_PERM_ADDR(priv->hw, eeprom->mac_address);
-
- /***********************
- * 5. Setup HW Constants
- * ********************/
- /* Device-specific setup */
- if (iwl3945_hw_set_hw_params(priv)) {
- IWL_ERR(priv, "failed to set hw settings\n");
- goto out_eeprom_free;
- }
-
- /***********************
- * 6. Setup priv
- * ********************/
-
- err = iwl3945_init_drv(priv);
- if (err) {
- IWL_ERR(priv, "initializing driver failed\n");
- goto out_unset_hw_params;
- }
-
- IWL_INFO(priv, "Detected Intel Wireless WiFi Link %s\n",
- priv->cfg->name);
-
- /***********************
- * 7. Setup Services
- * ********************/
-
- spin_lock_irqsave(&priv->lock, flags);
- iwl_legacy_disable_interrupts(priv);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- pci_enable_msi(priv->pci_dev);
-
- err = request_irq(priv->pci_dev->irq, iwl_legacy_isr,
- IRQF_SHARED, DRV_NAME, priv);
- if (err) {
- IWL_ERR(priv, "Error allocating IRQ %d\n", priv->pci_dev->irq);
- goto out_disable_msi;
- }
-
- err = sysfs_create_group(&pdev->dev.kobj, &iwl3945_attribute_group);
- if (err) {
- IWL_ERR(priv, "failed to create sysfs device attributes\n");
- goto out_release_irq;
- }
-
- iwl_legacy_set_rxon_channel(priv,
- &priv->bands[IEEE80211_BAND_2GHZ].channels[5],
- &priv->contexts[IWL_RXON_CTX_BSS]);
- iwl3945_setup_deferred_work(priv);
- iwl3945_setup_rx_handlers(priv);
- iwl_legacy_power_initialize(priv);
-
- /*********************************
- * 8. Setup and Register mac80211
- * *******************************/
-
- iwl_legacy_enable_interrupts(priv);
-
- err = iwl3945_setup_mac(priv);
- if (err)
- goto out_remove_sysfs;
-
- err = iwl_legacy_dbgfs_register(priv, DRV_NAME);
- if (err)
- IWL_ERR(priv, "failed to create debugfs files. Ignoring error: %d\n", err);
-
- /* Start monitoring the killswitch */
- queue_delayed_work(priv->workqueue, &priv->_3945.rfkill_poll,
- 2 * HZ);
-
- return 0;
-
- out_remove_sysfs:
- destroy_workqueue(priv->workqueue);
- priv->workqueue = NULL;
- sysfs_remove_group(&pdev->dev.kobj, &iwl3945_attribute_group);
- out_release_irq:
- free_irq(priv->pci_dev->irq, priv);
- out_disable_msi:
- pci_disable_msi(priv->pci_dev);
- iwl_legacy_free_geos(priv);
- iwl_legacy_free_channel_map(priv);
- out_unset_hw_params:
- iwl3945_unset_hw_params(priv);
- out_eeprom_free:
- iwl_legacy_eeprom_free(priv);
- out_iounmap:
- pci_iounmap(pdev, priv->hw_base);
- out_pci_release_regions:
- pci_release_regions(pdev);
- out_pci_disable_device:
- pci_set_drvdata(pdev, NULL);
- pci_disable_device(pdev);
- out_ieee80211_free_hw:
- iwl_legacy_free_traffic_mem(priv);
- ieee80211_free_hw(priv->hw);
- out:
- return err;
-}
-
-static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
-{
- struct iwl_priv *priv = pci_get_drvdata(pdev);
- unsigned long flags;
-
- if (!priv)
- return;
-
- IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n");
-
- iwl_legacy_dbgfs_unregister(priv);
-
- set_bit(STATUS_EXIT_PENDING, &priv->status);
-
- iwl_legacy_leds_exit(priv);
-
- if (priv->mac80211_registered) {
- ieee80211_unregister_hw(priv->hw);
- priv->mac80211_registered = 0;
- } else {
- iwl3945_down(priv);
- }
-
- /*
- * Make sure device is reset to low power before unloading driver.
- * This may be redundant with iwl_down(), but there are paths to
- * run iwl_down() without calling apm_ops.stop(), and there are
- * paths to avoid running iwl_down() at all before leaving driver.
- * This (inexpensive) call *makes sure* device is reset.
- */
- iwl_legacy_apm_stop(priv);
-
- /* make sure we flush any pending irq or
- * tasklet for the driver
- */
- spin_lock_irqsave(&priv->lock, flags);
- iwl_legacy_disable_interrupts(priv);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- iwl3945_synchronize_irq(priv);
-
- sysfs_remove_group(&pdev->dev.kobj, &iwl3945_attribute_group);
-
- cancel_delayed_work_sync(&priv->_3945.rfkill_poll);
-
- iwl3945_dealloc_ucode_pci(priv);
-
- if (priv->rxq.bd)
- iwl3945_rx_queue_free(priv, &priv->rxq);
- iwl3945_hw_txq_ctx_free(priv);
-
- iwl3945_unset_hw_params(priv);
-
- /*netif_stop_queue(dev); */
- flush_workqueue(priv->workqueue);
-
- /* ieee80211_unregister_hw calls iwl3945_mac_stop, which flushes
- * priv->workqueue... so we can't take down the workqueue
- * until now... */
- destroy_workqueue(priv->workqueue);
- priv->workqueue = NULL;
- iwl_legacy_free_traffic_mem(priv);
-
- free_irq(pdev->irq, priv);
- pci_disable_msi(pdev);
-
- pci_iounmap(pdev, priv->hw_base);
- pci_release_regions(pdev);
- pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
-
- iwl_legacy_free_channel_map(priv);
- iwl_legacy_free_geos(priv);
- kfree(priv->scan_cmd);
- if (priv->beacon_skb)
- dev_kfree_skb(priv->beacon_skb);
-
- ieee80211_free_hw(priv->hw);
-}
-
-
-/*****************************************************************************
- *
- * driver and module entry point
- *
- *****************************************************************************/
-
-static struct pci_driver iwl3945_driver = {
- .name = DRV_NAME,
- .id_table = iwl3945_hw_card_ids,
- .probe = iwl3945_pci_probe,
- .remove = __devexit_p(iwl3945_pci_remove),
- .driver.pm = IWL_LEGACY_PM_OPS,
-};
-
-static int __init iwl3945_init(void)
-{
-
- int ret;
- pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
- pr_info(DRV_COPYRIGHT "\n");
-
- ret = iwl3945_rate_control_register();
- if (ret) {
- pr_err("Unable to register rate control algorithm: %d\n", ret);
- return ret;
- }
-
- ret = pci_register_driver(&iwl3945_driver);
- if (ret) {
- pr_err("Unable to initialize PCI module\n");
- goto error_register;
- }
-
- return ret;
-
-error_register:
- iwl3945_rate_control_unregister();
- return ret;
-}
-
-static void __exit iwl3945_exit(void)
-{
- pci_unregister_driver(&iwl3945_driver);
- iwl3945_rate_control_unregister();
-}
-
-MODULE_FIRMWARE(IWL3945_MODULE_FIRMWARE(IWL3945_UCODE_API_MAX));
-
-module_param_named(antenna, iwl3945_mod_params.antenna, int, S_IRUGO);
-MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])");
-module_param_named(swcrypto, iwl3945_mod_params.sw_crypto, int, S_IRUGO);
-MODULE_PARM_DESC(swcrypto,
- "using software crypto (default 1 [software])");
-module_param_named(disable_hw_scan, iwl3945_mod_params.disable_hw_scan,
- int, S_IRUGO);
-MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 1)");
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-module_param_named(debug, iwlegacy_debug_level, uint, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(debug, "debug output mask");
-#endif
-module_param_named(fw_restart, iwl3945_mod_params.restart_fw, int, S_IRUGO);
-MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
-
-module_exit(iwl3945_exit);
-module_init(iwl3945_init);
diff --git a/drivers/net/wireless/iwlegacy/iwl4965-base.c b/drivers/net/wireless/iwlegacy/iwl4965-base.c
deleted file mode 100644
index d2fba9eae15..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl4965-base.c
+++ /dev/null
@@ -1,3281 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * Portions of this file are derived from the ipw3945 project, as well
- * as portions of the ieee80211 subsystem header files.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/pci-aspm.h>
-#include <linux/slab.h>
-#include <linux/dma-mapping.h>
-#include <linux/delay.h>
-#include <linux/sched.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/firmware.h>
-#include <linux/etherdevice.h>
-#include <linux/if_arp.h>
-
-#include <net/mac80211.h>
-
-#include <asm/div64.h>
-
-#define DRV_NAME "iwl4965"
-
-#include "iwl-eeprom.h"
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-io.h"
-#include "iwl-helpers.h"
-#include "iwl-sta.h"
-#include "iwl-4965-calib.h"
-#include "iwl-4965.h"
-#include "iwl-4965-led.h"
-
-
-/******************************************************************************
- *
- * module boiler plate
- *
- ******************************************************************************/
-
-/*
- * module name, copyright, version, etc.
- */
-#define DRV_DESCRIPTION "Intel(R) Wireless WiFi 4965 driver for Linux"
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-#define VD "d"
-#else
-#define VD
-#endif
-
-#define DRV_VERSION IWLWIFI_VERSION VD
-
-
-MODULE_DESCRIPTION(DRV_DESCRIPTION);
-MODULE_VERSION(DRV_VERSION);
-MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("iwl4965");
-
-void iwl4965_update_chain_flags(struct iwl_priv *priv)
-{
- struct iwl_rxon_context *ctx;
-
- if (priv->cfg->ops->hcmd->set_rxon_chain) {
- for_each_context(priv, ctx) {
- priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
- if (ctx->active.rx_chain != ctx->staging.rx_chain)
- iwl_legacy_commit_rxon(priv, ctx);
- }
- }
-}
-
-static void iwl4965_clear_free_frames(struct iwl_priv *priv)
-{
- struct list_head *element;
-
- IWL_DEBUG_INFO(priv, "%d frames on pre-allocated heap on clear.\n",
- priv->frames_count);
-
- while (!list_empty(&priv->free_frames)) {
- element = priv->free_frames.next;
- list_del(element);
- kfree(list_entry(element, struct iwl_frame, list));
- priv->frames_count--;
- }
-
- if (priv->frames_count) {
- IWL_WARN(priv, "%d frames still in use. Did we lose one?\n",
- priv->frames_count);
- priv->frames_count = 0;
- }
-}
-
-static struct iwl_frame *iwl4965_get_free_frame(struct iwl_priv *priv)
-{
- struct iwl_frame *frame;
- struct list_head *element;
- if (list_empty(&priv->free_frames)) {
- frame = kzalloc(sizeof(*frame), GFP_KERNEL);
- if (!frame) {
- IWL_ERR(priv, "Could not allocate frame!\n");
- return NULL;
- }
-
- priv->frames_count++;
- return frame;
- }
-
- element = priv->free_frames.next;
- list_del(element);
- return list_entry(element, struct iwl_frame, list);
-}
-
-static void iwl4965_free_frame(struct iwl_priv *priv, struct iwl_frame *frame)
-{
- memset(frame, 0, sizeof(*frame));
- list_add(&frame->list, &priv->free_frames);
-}
-
-static u32 iwl4965_fill_beacon_frame(struct iwl_priv *priv,
- struct ieee80211_hdr *hdr,
- int left)
-{
- lockdep_assert_held(&priv->mutex);
-
- if (!priv->beacon_skb)
- return 0;
-
- if (priv->beacon_skb->len > left)
- return 0;
-
- memcpy(hdr, priv->beacon_skb->data, priv->beacon_skb->len);
-
- return priv->beacon_skb->len;
-}
-
-/* Parse the beacon frame to find the TIM element and set tim_idx & tim_size */
-static void iwl4965_set_beacon_tim(struct iwl_priv *priv,
- struct iwl_tx_beacon_cmd *tx_beacon_cmd,
- u8 *beacon, u32 frame_size)
-{
- u16 tim_idx;
- struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)beacon;
-
- /*
- * The index is relative to frame start but we start looking at the
- * variable-length part of the beacon.
- */
- tim_idx = mgmt->u.beacon.variable - beacon;
-
- /* Parse variable-length elements of beacon to find WLAN_EID_TIM */
- while ((tim_idx < (frame_size - 2)) &&
- (beacon[tim_idx] != WLAN_EID_TIM))
- tim_idx += beacon[tim_idx+1] + 2;
-
- /* If TIM field was found, set variables */
- if ((tim_idx < (frame_size - 1)) && (beacon[tim_idx] == WLAN_EID_TIM)) {
- tx_beacon_cmd->tim_idx = cpu_to_le16(tim_idx);
- tx_beacon_cmd->tim_size = beacon[tim_idx+1];
- } else
- IWL_WARN(priv, "Unable to find TIM Element in beacon\n");
-}
-
-static unsigned int iwl4965_hw_get_beacon_cmd(struct iwl_priv *priv,
- struct iwl_frame *frame)
-{
- struct iwl_tx_beacon_cmd *tx_beacon_cmd;
- u32 frame_size;
- u32 rate_flags;
- u32 rate;
- /*
- * We have to set up the TX command, the TX Beacon command, and the
- * beacon contents.
- */
-
- lockdep_assert_held(&priv->mutex);
-
- if (!priv->beacon_ctx) {
- IWL_ERR(priv, "trying to build beacon w/o beacon context!\n");
- return 0;
- }
-
- /* Initialize memory */
- tx_beacon_cmd = &frame->u.beacon;
- memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
-
- /* Set up TX beacon contents */
- frame_size = iwl4965_fill_beacon_frame(priv, tx_beacon_cmd->frame,
- sizeof(frame->u) - sizeof(*tx_beacon_cmd));
- if (WARN_ON_ONCE(frame_size > MAX_MPDU_SIZE))
- return 0;
- if (!frame_size)
- return 0;
-
- /* Set up TX command fields */
- tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size);
- tx_beacon_cmd->tx.sta_id = priv->beacon_ctx->bcast_sta_id;
- tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
- tx_beacon_cmd->tx.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK |
- TX_CMD_FLG_TSF_MSK | TX_CMD_FLG_STA_RATE_MSK;
-
- /* Set up TX beacon command fields */
- iwl4965_set_beacon_tim(priv, tx_beacon_cmd, (u8 *)tx_beacon_cmd->frame,
- frame_size);
-
- /* Set up packet rate and flags */
- rate = iwl_legacy_get_lowest_plcp(priv, priv->beacon_ctx);
- priv->mgmt_tx_ant = iwl4965_toggle_tx_ant(priv, priv->mgmt_tx_ant,
- priv->hw_params.valid_tx_ant);
- rate_flags = iwl4965_ant_idx_to_flags(priv->mgmt_tx_ant);
- if ((rate >= IWL_FIRST_CCK_RATE) && (rate <= IWL_LAST_CCK_RATE))
- rate_flags |= RATE_MCS_CCK_MSK;
- tx_beacon_cmd->tx.rate_n_flags = iwl4965_hw_set_rate_n_flags(rate,
- rate_flags);
-
- return sizeof(*tx_beacon_cmd) + frame_size;
-}
-
-int iwl4965_send_beacon_cmd(struct iwl_priv *priv)
-{
- struct iwl_frame *frame;
- unsigned int frame_size;
- int rc;
-
- frame = iwl4965_get_free_frame(priv);
- if (!frame) {
- IWL_ERR(priv, "Could not obtain free frame buffer for beacon "
- "command.\n");
- return -ENOMEM;
- }
-
- frame_size = iwl4965_hw_get_beacon_cmd(priv, frame);
- if (!frame_size) {
- IWL_ERR(priv, "Error configuring the beacon command\n");
- iwl4965_free_frame(priv, frame);
- return -EINVAL;
- }
-
- rc = iwl_legacy_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size,
- &frame->u.cmd[0]);
-
- iwl4965_free_frame(priv, frame);
-
- return rc;
-}
-
-static inline dma_addr_t iwl4965_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
-{
- struct iwl_tfd_tb *tb = &tfd->tbs[idx];
-
- dma_addr_t addr = get_unaligned_le32(&tb->lo);
- if (sizeof(dma_addr_t) > sizeof(u32))
- addr |=
- ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;
-
- return addr;
-}
-
-static inline u16 iwl4965_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
-{
- struct iwl_tfd_tb *tb = &tfd->tbs[idx];
-
- return le16_to_cpu(tb->hi_n_len) >> 4;
-}
-
-static inline void iwl4965_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
- dma_addr_t addr, u16 len)
-{
- struct iwl_tfd_tb *tb = &tfd->tbs[idx];
- u16 hi_n_len = len << 4;
-
- put_unaligned_le32(addr, &tb->lo);
- if (sizeof(dma_addr_t) > sizeof(u32))
- hi_n_len |= ((addr >> 16) >> 16) & 0xF;
-
- tb->hi_n_len = cpu_to_le16(hi_n_len);
-
- tfd->num_tbs = idx + 1;
-}
-
-static inline u8 iwl4965_tfd_get_num_tbs(struct iwl_tfd *tfd)
-{
- return tfd->num_tbs & 0x1f;
-}
-
-/**
- * iwl4965_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
- * @priv - driver private data
- * @txq - tx queue
- *
- * Does NOT advance any TFD circular buffer read/write indexes
- * Does NOT free the TFD itself (which is within circular buffer)
- */
-void iwl4965_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
-{
- struct iwl_tfd *tfd_tmp = (struct iwl_tfd *)txq->tfds;
- struct iwl_tfd *tfd;
- struct pci_dev *dev = priv->pci_dev;
- int index = txq->q.read_ptr;
- int i;
- int num_tbs;
-
- tfd = &tfd_tmp[index];
-
- /* Sanity check on number of chunks */
- num_tbs = iwl4965_tfd_get_num_tbs(tfd);
-
- if (num_tbs >= IWL_NUM_OF_TBS) {
- IWL_ERR(priv, "Too many chunks: %i\n", num_tbs);
- /* @todo issue fatal error, it is quite serious situation */
- return;
- }
-
- /* Unmap tx_cmd */
- if (num_tbs)
- pci_unmap_single(dev,
- dma_unmap_addr(&txq->meta[index], mapping),
- dma_unmap_len(&txq->meta[index], len),
- PCI_DMA_BIDIRECTIONAL);
-
- /* Unmap chunks, if any. */
- for (i = 1; i < num_tbs; i++)
- pci_unmap_single(dev, iwl4965_tfd_tb_get_addr(tfd, i),
- iwl4965_tfd_tb_get_len(tfd, i),
- PCI_DMA_TODEVICE);
-
- /* free SKB */
- if (txq->txb) {
- struct sk_buff *skb;
-
- skb = txq->txb[txq->q.read_ptr].skb;
-
- /* can be called from irqs-disabled context */
- if (skb) {
- dev_kfree_skb_any(skb);
- txq->txb[txq->q.read_ptr].skb = NULL;
- }
- }
-}
-
-int iwl4965_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
- struct iwl_tx_queue *txq,
- dma_addr_t addr, u16 len,
- u8 reset, u8 pad)
-{
- struct iwl_queue *q;
- struct iwl_tfd *tfd, *tfd_tmp;
- u32 num_tbs;
-
- q = &txq->q;
- tfd_tmp = (struct iwl_tfd *)txq->tfds;
- tfd = &tfd_tmp[q->write_ptr];
-
- if (reset)
- memset(tfd, 0, sizeof(*tfd));
-
- num_tbs = iwl4965_tfd_get_num_tbs(tfd);
-
- /* Each TFD can point to a maximum 20 Tx buffers */
- if (num_tbs >= IWL_NUM_OF_TBS) {
- IWL_ERR(priv, "Error can not send more than %d chunks\n",
- IWL_NUM_OF_TBS);
- return -EINVAL;
- }
-
- BUG_ON(addr & ~DMA_BIT_MASK(36));
- if (unlikely(addr & ~IWL_TX_DMA_MASK))
- IWL_ERR(priv, "Unaligned address = %llx\n",
- (unsigned long long)addr);
-
- iwl4965_tfd_set_tb(tfd, num_tbs, addr, len);
-
- return 0;
-}
-
-/*
- * Tell nic where to find circular buffer of Tx Frame Descriptors for
- * given Tx queue, and enable the DMA channel used for that queue.
- *
- * 4965 supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA
- * channels supported in hardware.
- */
-int iwl4965_hw_tx_queue_init(struct iwl_priv *priv,
- struct iwl_tx_queue *txq)
-{
- int txq_id = txq->q.id;
-
- /* Circular buffer (TFD queue in DRAM) physical base address */
- iwl_legacy_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id),
- txq->q.dma_addr >> 8);
-
- return 0;
-}
-
-/******************************************************************************
- *
- * Generic RX handler implementations
- *
- ******************************************************************************/
-static void iwl4965_rx_reply_alive(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_alive_resp *palive;
- struct delayed_work *pwork;
-
- palive = &pkt->u.alive_frame;
-
- IWL_DEBUG_INFO(priv, "Alive ucode status 0x%08X revision "
- "0x%01X 0x%01X\n",
- palive->is_valid, palive->ver_type,
- palive->ver_subtype);
-
- if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
- IWL_DEBUG_INFO(priv, "Initialization Alive received.\n");
- memcpy(&priv->card_alive_init,
- &pkt->u.alive_frame,
- sizeof(struct iwl_init_alive_resp));
- pwork = &priv->init_alive_start;
- } else {
- IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
- memcpy(&priv->card_alive, &pkt->u.alive_frame,
- sizeof(struct iwl_alive_resp));
- pwork = &priv->alive_start;
- }
-
- /* We delay the ALIVE response by 5ms to
- * give the HW RF Kill time to activate... */
- if (palive->is_valid == UCODE_VALID_OK)
- queue_delayed_work(priv->workqueue, pwork,
- msecs_to_jiffies(5));
- else
- IWL_WARN(priv, "uCode did not respond OK.\n");
-}
-
-/**
- * iwl4965_bg_statistics_periodic - Timer callback to queue statistics
- *
- * This callback is provided in order to send a statistics request.
- *
- * This timer function is continually reset to execute within
- * REG_RECALIB_PERIOD seconds since the last STATISTICS_NOTIFICATION
- * was received. We need to ensure we receive the statistics in order
- * to update the temperature used for calibrating the TXPOWER.
- */
-static void iwl4965_bg_statistics_periodic(unsigned long data)
-{
- struct iwl_priv *priv = (struct iwl_priv *)data;
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- return;
-
- /* dont send host command if rf-kill is on */
- if (!iwl_legacy_is_ready_rf(priv))
- return;
-
- iwl_legacy_send_statistics_request(priv, CMD_ASYNC, false);
-}
-
-static void iwl4965_rx_beacon_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl4965_beacon_notif *beacon =
- (struct iwl4965_beacon_notif *)pkt->u.raw;
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- u8 rate = iwl4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
-
- IWL_DEBUG_RX(priv, "beacon status %x retries %d iss %d "
- "tsf %d %d rate %d\n",
- le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK,
- beacon->beacon_notify_hdr.failure_frame,
- le32_to_cpu(beacon->ibss_mgr_status),
- le32_to_cpu(beacon->high_tsf),
- le32_to_cpu(beacon->low_tsf), rate);
-#endif
-
- priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
-}
-
-static void iwl4965_perform_ct_kill_task(struct iwl_priv *priv)
-{
- unsigned long flags;
-
- IWL_DEBUG_POWER(priv, "Stop all queues\n");
-
- if (priv->mac80211_registered)
- ieee80211_stop_queues(priv->hw);
-
- iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
- CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
- iwl_read32(priv, CSR_UCODE_DRV_GP1);
-
- spin_lock_irqsave(&priv->reg_lock, flags);
- if (!iwl_grab_nic_access(priv))
- iwl_release_nic_access(priv);
- spin_unlock_irqrestore(&priv->reg_lock, flags);
-}
-
-/* Handle notification from uCode that card's power state is changing
- * due to software, hardware, or critical temperature RFKILL */
-static void iwl4965_rx_card_state_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
- unsigned long status = priv->status;
-
- IWL_DEBUG_RF_KILL(priv, "Card state received: HW:%s SW:%s CT:%s\n",
- (flags & HW_CARD_DISABLED) ? "Kill" : "On",
- (flags & SW_CARD_DISABLED) ? "Kill" : "On",
- (flags & CT_CARD_DISABLED) ?
- "Reached" : "Not reached");
-
- if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED |
- CT_CARD_DISABLED)) {
-
- iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
- CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
-
- iwl_legacy_write_direct32(priv, HBUS_TARG_MBX_C,
- HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
-
- if (!(flags & RXON_CARD_DISABLED)) {
- iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
- CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
- iwl_legacy_write_direct32(priv, HBUS_TARG_MBX_C,
- HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
- }
- }
-
- if (flags & CT_CARD_DISABLED)
- iwl4965_perform_ct_kill_task(priv);
-
- if (flags & HW_CARD_DISABLED)
- set_bit(STATUS_RF_KILL_HW, &priv->status);
- else
- clear_bit(STATUS_RF_KILL_HW, &priv->status);
-
- if (!(flags & RXON_CARD_DISABLED))
- iwl_legacy_scan_cancel(priv);
-
- if ((test_bit(STATUS_RF_KILL_HW, &status) !=
- test_bit(STATUS_RF_KILL_HW, &priv->status)))
- wiphy_rfkill_set_hw_state(priv->hw->wiphy,
- test_bit(STATUS_RF_KILL_HW, &priv->status));
- else
- wake_up(&priv->wait_command_queue);
-}
-
-/**
- * iwl4965_setup_rx_handlers - Initialize Rx handler callbacks
- *
- * Setup the RX handlers for each of the reply types sent from the uCode
- * to the host.
- *
- * This function chains into the hardware specific files for them to setup
- * any hardware specific handlers as well.
- */
-static void iwl4965_setup_rx_handlers(struct iwl_priv *priv)
-{
- priv->rx_handlers[REPLY_ALIVE] = iwl4965_rx_reply_alive;
- priv->rx_handlers[REPLY_ERROR] = iwl_legacy_rx_reply_error;
- priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_legacy_rx_csa;
- priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] =
- iwl_legacy_rx_spectrum_measure_notif;
- priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_legacy_rx_pm_sleep_notif;
- priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] =
- iwl_legacy_rx_pm_debug_statistics_notif;
- priv->rx_handlers[BEACON_NOTIFICATION] = iwl4965_rx_beacon_notif;
-
- /*
- * The same handler is used for both the REPLY to a discrete
- * statistics request from the host as well as for the periodic
- * statistics notifications (after received beacons) from the uCode.
- */
- priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl4965_reply_statistics;
- priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl4965_rx_statistics;
-
- iwl_legacy_setup_rx_scan_handlers(priv);
-
- /* status change handler */
- priv->rx_handlers[CARD_STATE_NOTIFICATION] =
- iwl4965_rx_card_state_notif;
-
- priv->rx_handlers[MISSED_BEACONS_NOTIFICATION] =
- iwl4965_rx_missed_beacon_notif;
- /* Rx handlers */
- priv->rx_handlers[REPLY_RX_PHY_CMD] = iwl4965_rx_reply_rx_phy;
- priv->rx_handlers[REPLY_RX_MPDU_CMD] = iwl4965_rx_reply_rx;
- /* block ack */
- priv->rx_handlers[REPLY_COMPRESSED_BA] = iwl4965_rx_reply_compressed_ba;
- /* Set up hardware specific Rx handlers */
- priv->cfg->ops->lib->rx_handler_setup(priv);
-}
-
-/**
- * iwl4965_rx_handle - Main entry function for receiving responses from uCode
- *
- * Uses the priv->rx_handlers callback function array to invoke
- * the appropriate handlers, including command responses,
- * frame-received notifications, and other notifications.
- */
-void iwl4965_rx_handle(struct iwl_priv *priv)
-{
- struct iwl_rx_mem_buffer *rxb;
- struct iwl_rx_packet *pkt;
- struct iwl_rx_queue *rxq = &priv->rxq;
- u32 r, i;
- int reclaim;
- unsigned long flags;
- u8 fill_rx = 0;
- u32 count = 8;
- int total_empty;
-
- /* uCode's read index (stored in shared DRAM) indicates the last Rx
- * buffer that the driver may process (last buffer filled by ucode). */
- r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF;
- i = rxq->read;
-
- /* Rx interrupt, but nothing sent from uCode */
- if (i == r)
- IWL_DEBUG_RX(priv, "r = %d, i = %d\n", r, i);
-
- /* calculate total frames need to be restock after handling RX */
- total_empty = r - rxq->write_actual;
- if (total_empty < 0)
- total_empty += RX_QUEUE_SIZE;
-
- if (total_empty > (RX_QUEUE_SIZE / 2))
- fill_rx = 1;
-
- while (i != r) {
- int len;
-
- rxb = rxq->queue[i];
-
- /* If an RXB doesn't have a Rx queue slot associated with it,
- * then a bug has been introduced in the queue refilling
- * routines -- catch it here */
- BUG_ON(rxb == NULL);
-
- rxq->queue[i] = NULL;
-
- pci_unmap_page(priv->pci_dev, rxb->page_dma,
- PAGE_SIZE << priv->hw_params.rx_page_order,
- PCI_DMA_FROMDEVICE);
- pkt = rxb_addr(rxb);
-
- len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
- len += sizeof(u32); /* account for status word */
- trace_iwlwifi_legacy_dev_rx(priv, pkt, len);
-
- /* Reclaim a command buffer only if this packet is a response
- * to a (driver-originated) command.
- * If the packet (e.g. Rx frame) originated from uCode,
- * there is no command buffer to reclaim.
- * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
- * but apparently a few don't get set; catch them here. */
- reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
- (pkt->hdr.cmd != REPLY_RX_PHY_CMD) &&
- (pkt->hdr.cmd != REPLY_RX) &&
- (pkt->hdr.cmd != REPLY_RX_MPDU_CMD) &&
- (pkt->hdr.cmd != REPLY_COMPRESSED_BA) &&
- (pkt->hdr.cmd != STATISTICS_NOTIFICATION) &&
- (pkt->hdr.cmd != REPLY_TX);
-
- /* Based on type of command response or notification,
- * handle those that need handling via function in
- * rx_handlers table. See iwl4965_setup_rx_handlers() */
- if (priv->rx_handlers[pkt->hdr.cmd]) {
- IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r,
- i, iwl_legacy_get_cmd_string(pkt->hdr.cmd),
- pkt->hdr.cmd);
- priv->isr_stats.rx_handlers[pkt->hdr.cmd]++;
- priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
- } else {
- /* No handling needed */
- IWL_DEBUG_RX(priv,
- "r %d i %d No handler needed for %s, 0x%02x\n",
- r, i, iwl_legacy_get_cmd_string(pkt->hdr.cmd),
- pkt->hdr.cmd);
- }
-
- /*
- * XXX: After here, we should always check rxb->page
- * against NULL before touching it or its virtual
- * memory (pkt). Because some rx_handler might have
- * already taken or freed the pages.
- */
-
- if (reclaim) {
- /* Invoke any callbacks, transfer the buffer to caller,
- * and fire off the (possibly) blocking iwl_legacy_send_cmd()
- * as we reclaim the driver command queue */
- if (rxb->page)
- iwl_legacy_tx_cmd_complete(priv, rxb);
- else
- IWL_WARN(priv, "Claim null rxb?\n");
- }
-
- /* Reuse the page if possible. For notification packets and
- * SKBs that fail to Rx correctly, add them back into the
- * rx_free list for reuse later. */
- spin_lock_irqsave(&rxq->lock, flags);
- if (rxb->page != NULL) {
- rxb->page_dma = pci_map_page(priv->pci_dev, rxb->page,
- 0, PAGE_SIZE << priv->hw_params.rx_page_order,
- PCI_DMA_FROMDEVICE);
- list_add_tail(&rxb->list, &rxq->rx_free);
- rxq->free_count++;
- } else
- list_add_tail(&rxb->list, &rxq->rx_used);
-
- spin_unlock_irqrestore(&rxq->lock, flags);
-
- i = (i + 1) & RX_QUEUE_MASK;
- /* If there are a lot of unused frames,
- * restock the Rx queue so ucode wont assert. */
- if (fill_rx) {
- count++;
- if (count >= 8) {
- rxq->read = i;
- iwl4965_rx_replenish_now(priv);
- count = 0;
- }
- }
- }
-
- /* Backtrack one entry */
- rxq->read = i;
- if (fill_rx)
- iwl4965_rx_replenish_now(priv);
- else
- iwl4965_rx_queue_restock(priv);
-}
-
-/* call this function to flush any scheduled tasklet */
-static inline void iwl4965_synchronize_irq(struct iwl_priv *priv)
-{
- /* wait to make sure we flush pending tasklet*/
- synchronize_irq(priv->pci_dev->irq);
- tasklet_kill(&priv->irq_tasklet);
-}
-
-static void iwl4965_irq_tasklet(struct iwl_priv *priv)
-{
- u32 inta, handled = 0;
- u32 inta_fh;
- unsigned long flags;
- u32 i;
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- u32 inta_mask;
-#endif
-
- spin_lock_irqsave(&priv->lock, flags);
-
- /* Ack/clear/reset pending uCode interrupts.
- * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
- * and will clear only when CSR_FH_INT_STATUS gets cleared. */
- inta = iwl_read32(priv, CSR_INT);
- iwl_write32(priv, CSR_INT, inta);
-
- /* Ack/clear/reset pending flow-handler (DMA) interrupts.
- * Any new interrupts that happen after this, either while we're
- * in this tasklet, or later, will show up in next ISR/tasklet. */
- inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
- iwl_write32(priv, CSR_FH_INT_STATUS, inta_fh);
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- if (iwl_legacy_get_debug_level(priv) & IWL_DL_ISR) {
- /* just for debug */
- inta_mask = iwl_read32(priv, CSR_INT_MASK);
- IWL_DEBUG_ISR(priv, "inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
- inta, inta_mask, inta_fh);
- }
-#endif
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
- * atomic, make sure that inta covers all the interrupts that
- * we've discovered, even if FH interrupt came in just after
- * reading CSR_INT. */
- if (inta_fh & CSR49_FH_INT_RX_MASK)
- inta |= CSR_INT_BIT_FH_RX;
- if (inta_fh & CSR49_FH_INT_TX_MASK)
- inta |= CSR_INT_BIT_FH_TX;
-
- /* Now service all interrupt bits discovered above. */
- if (inta & CSR_INT_BIT_HW_ERR) {
- IWL_ERR(priv, "Hardware error detected. Restarting.\n");
-
- /* Tell the device to stop sending interrupts */
- iwl_legacy_disable_interrupts(priv);
-
- priv->isr_stats.hw++;
- iwl_legacy_irq_handle_error(priv);
-
- handled |= CSR_INT_BIT_HW_ERR;
-
- return;
- }
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) {
- /* NIC fires this, but we don't use it, redundant with WAKEUP */
- if (inta & CSR_INT_BIT_SCD) {
- IWL_DEBUG_ISR(priv, "Scheduler finished to transmit "
- "the frame/frames.\n");
- priv->isr_stats.sch++;
- }
-
- /* Alive notification via Rx interrupt will do the real work */
- if (inta & CSR_INT_BIT_ALIVE) {
- IWL_DEBUG_ISR(priv, "Alive interrupt\n");
- priv->isr_stats.alive++;
- }
- }
-#endif
- /* Safely ignore these bits for debug checks below */
- inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
-
- /* HW RF KILL switch toggled */
- if (inta & CSR_INT_BIT_RF_KILL) {
- int hw_rf_kill = 0;
- if (!(iwl_read32(priv, CSR_GP_CNTRL) &
- CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
- hw_rf_kill = 1;
-
- IWL_WARN(priv, "RF_KILL bit toggled to %s.\n",
- hw_rf_kill ? "disable radio" : "enable radio");
-
- priv->isr_stats.rfkill++;
-
- /* driver only loads ucode once setting the interface up.
- * the driver allows loading the ucode even if the radio
- * is killed. Hence update the killswitch state here. The
- * rfkill handler will care about restarting if needed.
- */
- if (!test_bit(STATUS_ALIVE, &priv->status)) {
- if (hw_rf_kill)
- set_bit(STATUS_RF_KILL_HW, &priv->status);
- else
- clear_bit(STATUS_RF_KILL_HW, &priv->status);
- wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rf_kill);
- }
-
- handled |= CSR_INT_BIT_RF_KILL;
- }
-
- /* Chip got too hot and stopped itself */
- if (inta & CSR_INT_BIT_CT_KILL) {
- IWL_ERR(priv, "Microcode CT kill error detected.\n");
- priv->isr_stats.ctkill++;
- handled |= CSR_INT_BIT_CT_KILL;
- }
-
- /* Error detected by uCode */
- if (inta & CSR_INT_BIT_SW_ERR) {
- IWL_ERR(priv, "Microcode SW error detected. "
- " Restarting 0x%X.\n", inta);
- priv->isr_stats.sw++;
- iwl_legacy_irq_handle_error(priv);
- handled |= CSR_INT_BIT_SW_ERR;
- }
-
- /*
- * uCode wakes up after power-down sleep.
- * Tell device about any new tx or host commands enqueued,
- * and about any Rx buffers made available while asleep.
- */
- if (inta & CSR_INT_BIT_WAKEUP) {
- IWL_DEBUG_ISR(priv, "Wakeup interrupt\n");
- iwl_legacy_rx_queue_update_write_ptr(priv, &priv->rxq);
- for (i = 0; i < priv->hw_params.max_txq_num; i++)
- iwl_legacy_txq_update_write_ptr(priv, &priv->txq[i]);
- priv->isr_stats.wakeup++;
- handled |= CSR_INT_BIT_WAKEUP;
- }
-
- /* All uCode command responses, including Tx command responses,
- * Rx "responses" (frame-received notification), and other
- * notifications from uCode come through here*/
- if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
- iwl4965_rx_handle(priv);
- priv->isr_stats.rx++;
- handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
- }
-
- /* This "Tx" DMA channel is used only for loading uCode */
- if (inta & CSR_INT_BIT_FH_TX) {
- IWL_DEBUG_ISR(priv, "uCode load interrupt\n");
- priv->isr_stats.tx++;
- handled |= CSR_INT_BIT_FH_TX;
- /* Wake up uCode load routine, now that load is complete */
- priv->ucode_write_complete = 1;
- wake_up(&priv->wait_command_queue);
- }
-
- if (inta & ~handled) {
- IWL_ERR(priv, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
- priv->isr_stats.unhandled++;
- }
-
- if (inta & ~(priv->inta_mask)) {
- IWL_WARN(priv, "Disabled INTA bits 0x%08x were pending\n",
- inta & ~priv->inta_mask);
- IWL_WARN(priv, " with FH_INT = 0x%08x\n", inta_fh);
- }
-
- /* Re-enable all interrupts */
- /* only Re-enable if disabled by irq */
- if (test_bit(STATUS_INT_ENABLED, &priv->status))
- iwl_legacy_enable_interrupts(priv);
- /* Re-enable RF_KILL if it occurred */
- else if (handled & CSR_INT_BIT_RF_KILL)
- iwl_legacy_enable_rfkill_int(priv);
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) {
- inta = iwl_read32(priv, CSR_INT);
- inta_mask = iwl_read32(priv, CSR_INT_MASK);
- inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
- IWL_DEBUG_ISR(priv,
- "End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
- "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
- }
-#endif
-}
-
-/*****************************************************************************
- *
- * sysfs attributes
- *
- *****************************************************************************/
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-
-/*
- * The following adds a new attribute to the sysfs representation
- * of this device driver (i.e. a new file in /sys/class/net/wlan0/device/)
- * used for controlling the debug level.
- *
- * See the level definitions in iwl for details.
- *
- * The debug_level being managed using sysfs below is a per device debug
- * level that is used instead of the global debug level if it (the per
- * device debug level) is set.
- */
-static ssize_t iwl4965_show_debug_level(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
- return sprintf(buf, "0x%08X\n", iwl_legacy_get_debug_level(priv));
-}
-static ssize_t iwl4965_store_debug_level(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
- unsigned long val;
- int ret;
-
- ret = strict_strtoul(buf, 0, &val);
- if (ret)
- IWL_ERR(priv, "%s is not in hex or decimal form.\n", buf);
- else {
- priv->debug_level = val;
- if (iwl_legacy_alloc_traffic_mem(priv))
- IWL_ERR(priv,
- "Not enough memory to generate traffic log\n");
- }
- return strnlen(buf, count);
-}
-
-static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO,
- iwl4965_show_debug_level, iwl4965_store_debug_level);
-
-
-#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */
-
-
-static ssize_t iwl4965_show_temperature(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
-
- if (!iwl_legacy_is_alive(priv))
- return -EAGAIN;
-
- return sprintf(buf, "%d\n", priv->temperature);
-}
-
-static DEVICE_ATTR(temperature, S_IRUGO, iwl4965_show_temperature, NULL);
-
-static ssize_t iwl4965_show_tx_power(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
-
- if (!iwl_legacy_is_ready_rf(priv))
- return sprintf(buf, "off\n");
- else
- return sprintf(buf, "%d\n", priv->tx_power_user_lmt);
-}
-
-static ssize_t iwl4965_store_tx_power(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
- unsigned long val;
- int ret;
-
- ret = strict_strtoul(buf, 10, &val);
- if (ret)
- IWL_INFO(priv, "%s is not in decimal form.\n", buf);
- else {
- ret = iwl_legacy_set_tx_power(priv, val, false);
- if (ret)
- IWL_ERR(priv, "failed setting tx power (0x%d).\n",
- ret);
- else
- ret = count;
- }
- return ret;
-}
-
-static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO,
- iwl4965_show_tx_power, iwl4965_store_tx_power);
-
-static struct attribute *iwl_sysfs_entries[] = {
- &dev_attr_temperature.attr,
- &dev_attr_tx_power.attr,
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- &dev_attr_debug_level.attr,
-#endif
- NULL
-};
-
-static struct attribute_group iwl_attribute_group = {
- .name = NULL, /* put in device directory */
- .attrs = iwl_sysfs_entries,
-};
-
-/******************************************************************************
- *
- * uCode download functions
- *
- ******************************************************************************/
-
-static void iwl4965_dealloc_ucode_pci(struct iwl_priv *priv)
-{
- iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_code);
- iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data);
- iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
- iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init);
- iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init_data);
- iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_boot);
-}
-
-static void iwl4965_nic_start(struct iwl_priv *priv)
-{
- /* Remove all resets to allow NIC to operate */
- iwl_write32(priv, CSR_RESET, 0);
-}
-
-static void iwl4965_ucode_callback(const struct firmware *ucode_raw,
- void *context);
-static int iwl4965_mac_setup_register(struct iwl_priv *priv,
- u32 max_probe_length);
-
-static int __must_check iwl4965_request_firmware(struct iwl_priv *priv, bool first)
-{
- const char *name_pre = priv->cfg->fw_name_pre;
- char tag[8];
-
- if (first) {
- priv->fw_index = priv->cfg->ucode_api_max;
- sprintf(tag, "%d", priv->fw_index);
- } else {
- priv->fw_index--;
- sprintf(tag, "%d", priv->fw_index);
- }
-
- if (priv->fw_index < priv->cfg->ucode_api_min) {
- IWL_ERR(priv, "no suitable firmware found!\n");
- return -ENOENT;
- }
-
- sprintf(priv->firmware_name, "%s%s%s", name_pre, tag, ".ucode");
-
- IWL_DEBUG_INFO(priv, "attempting to load firmware '%s'\n",
- priv->firmware_name);
-
- return request_firmware_nowait(THIS_MODULE, 1, priv->firmware_name,
- &priv->pci_dev->dev, GFP_KERNEL, priv,
- iwl4965_ucode_callback);
-}
-
-struct iwl4965_firmware_pieces {
- const void *inst, *data, *init, *init_data, *boot;
- size_t inst_size, data_size, init_size, init_data_size, boot_size;
-};
-
-static int iwl4965_load_firmware(struct iwl_priv *priv,
- const struct firmware *ucode_raw,
- struct iwl4965_firmware_pieces *pieces)
-{
- struct iwl_ucode_header *ucode = (void *)ucode_raw->data;
- u32 api_ver, hdr_size;
- const u8 *src;
-
- priv->ucode_ver = le32_to_cpu(ucode->ver);
- api_ver = IWL_UCODE_API(priv->ucode_ver);
-
- switch (api_ver) {
- default:
- case 0:
- case 1:
- case 2:
- hdr_size = 24;
- if (ucode_raw->size < hdr_size) {
- IWL_ERR(priv, "File size too small!\n");
- return -EINVAL;
- }
- pieces->inst_size = le32_to_cpu(ucode->v1.inst_size);
- pieces->data_size = le32_to_cpu(ucode->v1.data_size);
- pieces->init_size = le32_to_cpu(ucode->v1.init_size);
- pieces->init_data_size =
- le32_to_cpu(ucode->v1.init_data_size);
- pieces->boot_size = le32_to_cpu(ucode->v1.boot_size);
- src = ucode->v1.data;
- break;
- }
-
- /* Verify size of file vs. image size info in file's header */
- if (ucode_raw->size != hdr_size + pieces->inst_size +
- pieces->data_size + pieces->init_size +
- pieces->init_data_size + pieces->boot_size) {
-
- IWL_ERR(priv,
- "uCode file size %d does not match expected size\n",
- (int)ucode_raw->size);
- return -EINVAL;
- }
-
- pieces->inst = src;
- src += pieces->inst_size;
- pieces->data = src;
- src += pieces->data_size;
- pieces->init = src;
- src += pieces->init_size;
- pieces->init_data = src;
- src += pieces->init_data_size;
- pieces->boot = src;
- src += pieces->boot_size;
-
- return 0;
-}
-
-/**
- * iwl4965_ucode_callback - callback when firmware was loaded
- *
- * If loaded successfully, copies the firmware into buffers
- * for the card to fetch (via DMA).
- */
-static void
-iwl4965_ucode_callback(const struct firmware *ucode_raw, void *context)
-{
- struct iwl_priv *priv = context;
- struct iwl_ucode_header *ucode;
- int err;
- struct iwl4965_firmware_pieces pieces;
- const unsigned int api_max = priv->cfg->ucode_api_max;
- const unsigned int api_min = priv->cfg->ucode_api_min;
- u32 api_ver;
-
- u32 max_probe_length = 200;
- u32 standard_phy_calibration_size =
- IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE;
-
- memset(&pieces, 0, sizeof(pieces));
-
- if (!ucode_raw) {
- if (priv->fw_index <= priv->cfg->ucode_api_max)
- IWL_ERR(priv,
- "request for firmware file '%s' failed.\n",
- priv->firmware_name);
- goto try_again;
- }
-
- IWL_DEBUG_INFO(priv, "Loaded firmware file '%s' (%zd bytes).\n",
- priv->firmware_name, ucode_raw->size);
-
- /* Make sure that we got at least the API version number */
- if (ucode_raw->size < 4) {
- IWL_ERR(priv, "File size way too small!\n");
- goto try_again;
- }
-
- /* Data from ucode file: header followed by uCode images */
- ucode = (struct iwl_ucode_header *)ucode_raw->data;
-
- err = iwl4965_load_firmware(priv, ucode_raw, &pieces);
-
- if (err)
- goto try_again;
-
- api_ver = IWL_UCODE_API(priv->ucode_ver);
-
- /*
- * api_ver should match the api version forming part of the
- * firmware filename ... but we don't check for that and only rely
- * on the API version read from firmware header from here on forward
- */
- if (api_ver < api_min || api_ver > api_max) {
- IWL_ERR(priv,
- "Driver unable to support your firmware API. "
- "Driver supports v%u, firmware is v%u.\n",
- api_max, api_ver);
- goto try_again;
- }
-
- if (api_ver != api_max)
- IWL_ERR(priv,
- "Firmware has old API version. Expected v%u, "
- "got v%u. New firmware can be obtained "
- "from http://www.intellinuxwireless.org.\n",
- api_max, api_ver);
-
- IWL_INFO(priv, "loaded firmware version %u.%u.%u.%u\n",
- IWL_UCODE_MAJOR(priv->ucode_ver),
- IWL_UCODE_MINOR(priv->ucode_ver),
- IWL_UCODE_API(priv->ucode_ver),
- IWL_UCODE_SERIAL(priv->ucode_ver));
-
- snprintf(priv->hw->wiphy->fw_version,
- sizeof(priv->hw->wiphy->fw_version),
- "%u.%u.%u.%u",
- IWL_UCODE_MAJOR(priv->ucode_ver),
- IWL_UCODE_MINOR(priv->ucode_ver),
- IWL_UCODE_API(priv->ucode_ver),
- IWL_UCODE_SERIAL(priv->ucode_ver));
-
- /*
- * For any of the failures below (before allocating pci memory)
- * we will try to load a version with a smaller API -- maybe the
- * user just got a corrupted version of the latest API.
- */
-
- IWL_DEBUG_INFO(priv, "f/w package hdr ucode version raw = 0x%x\n",
- priv->ucode_ver);
- IWL_DEBUG_INFO(priv, "f/w package hdr runtime inst size = %Zd\n",
- pieces.inst_size);
- IWL_DEBUG_INFO(priv, "f/w package hdr runtime data size = %Zd\n",
- pieces.data_size);
- IWL_DEBUG_INFO(priv, "f/w package hdr init inst size = %Zd\n",
- pieces.init_size);
- IWL_DEBUG_INFO(priv, "f/w package hdr init data size = %Zd\n",
- pieces.init_data_size);
- IWL_DEBUG_INFO(priv, "f/w package hdr boot inst size = %Zd\n",
- pieces.boot_size);
-
- /* Verify that uCode images will fit in card's SRAM */
- if (pieces.inst_size > priv->hw_params.max_inst_size) {
- IWL_ERR(priv, "uCode instr len %Zd too large to fit in\n",
- pieces.inst_size);
- goto try_again;
- }
-
- if (pieces.data_size > priv->hw_params.max_data_size) {
- IWL_ERR(priv, "uCode data len %Zd too large to fit in\n",
- pieces.data_size);
- goto try_again;
- }
-
- if (pieces.init_size > priv->hw_params.max_inst_size) {
- IWL_ERR(priv, "uCode init instr len %Zd too large to fit in\n",
- pieces.init_size);
- goto try_again;
- }
-
- if (pieces.init_data_size > priv->hw_params.max_data_size) {
- IWL_ERR(priv, "uCode init data len %Zd too large to fit in\n",
- pieces.init_data_size);
- goto try_again;
- }
-
- if (pieces.boot_size > priv->hw_params.max_bsm_size) {
- IWL_ERR(priv, "uCode boot instr len %Zd too large to fit in\n",
- pieces.boot_size);
- goto try_again;
- }
-
- /* Allocate ucode buffers for card's bus-master loading ... */
-
- /* Runtime instructions and 2 copies of data:
- * 1) unmodified from disk
- * 2) backup cache for save/restore during power-downs */
- priv->ucode_code.len = pieces.inst_size;
- iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_code);
-
- priv->ucode_data.len = pieces.data_size;
- iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data);
-
- priv->ucode_data_backup.len = pieces.data_size;
- iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
-
- if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr ||
- !priv->ucode_data_backup.v_addr)
- goto err_pci_alloc;
-
- /* Initialization instructions and data */
- if (pieces.init_size && pieces.init_data_size) {
- priv->ucode_init.len = pieces.init_size;
- iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init);
-
- priv->ucode_init_data.len = pieces.init_data_size;
- iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init_data);
-
- if (!priv->ucode_init.v_addr || !priv->ucode_init_data.v_addr)
- goto err_pci_alloc;
- }
-
- /* Bootstrap (instructions only, no data) */
- if (pieces.boot_size) {
- priv->ucode_boot.len = pieces.boot_size;
- iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_boot);
-
- if (!priv->ucode_boot.v_addr)
- goto err_pci_alloc;
- }
-
- /* Now that we can no longer fail, copy information */
-
- priv->sta_key_max_num = STA_KEY_MAX_NUM;
-
- /* Copy images into buffers for card's bus-master reads ... */
-
- /* Runtime instructions (first block of data in file) */
- IWL_DEBUG_INFO(priv, "Copying (but not loading) uCode instr len %Zd\n",
- pieces.inst_size);
- memcpy(priv->ucode_code.v_addr, pieces.inst, pieces.inst_size);
-
- IWL_DEBUG_INFO(priv, "uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
- priv->ucode_code.v_addr, (u32)priv->ucode_code.p_addr);
-
- /*
- * Runtime data
- * NOTE: Copy into backup buffer will be done in iwl_up()
- */
- IWL_DEBUG_INFO(priv, "Copying (but not loading) uCode data len %Zd\n",
- pieces.data_size);
- memcpy(priv->ucode_data.v_addr, pieces.data, pieces.data_size);
- memcpy(priv->ucode_data_backup.v_addr, pieces.data, pieces.data_size);
-
- /* Initialization instructions */
- if (pieces.init_size) {
- IWL_DEBUG_INFO(priv,
- "Copying (but not loading) init instr len %Zd\n",
- pieces.init_size);
- memcpy(priv->ucode_init.v_addr, pieces.init, pieces.init_size);
- }
-
- /* Initialization data */
- if (pieces.init_data_size) {
- IWL_DEBUG_INFO(priv,
- "Copying (but not loading) init data len %Zd\n",
- pieces.init_data_size);
- memcpy(priv->ucode_init_data.v_addr, pieces.init_data,
- pieces.init_data_size);
- }
-
- /* Bootstrap instructions */
- IWL_DEBUG_INFO(priv, "Copying (but not loading) boot instr len %Zd\n",
- pieces.boot_size);
- memcpy(priv->ucode_boot.v_addr, pieces.boot, pieces.boot_size);
-
- /*
- * figure out the offset of chain noise reset and gain commands
- * base on the size of standard phy calibration commands table size
- */
- priv->_4965.phy_calib_chain_noise_reset_cmd =
- standard_phy_calibration_size;
- priv->_4965.phy_calib_chain_noise_gain_cmd =
- standard_phy_calibration_size + 1;
-
- /**************************************************
- * This is still part of probe() in a sense...
- *
- * 9. Setup and register with mac80211 and debugfs
- **************************************************/
- err = iwl4965_mac_setup_register(priv, max_probe_length);
- if (err)
- goto out_unbind;
-
- err = iwl_legacy_dbgfs_register(priv, DRV_NAME);
- if (err)
- IWL_ERR(priv,
- "failed to create debugfs files. Ignoring error: %d\n", err);
-
- err = sysfs_create_group(&priv->pci_dev->dev.kobj,
- &iwl_attribute_group);
- if (err) {
- IWL_ERR(priv, "failed to create sysfs device attributes\n");
- goto out_unbind;
- }
-
- /* We have our copies now, allow OS release its copies */
- release_firmware(ucode_raw);
- complete(&priv->_4965.firmware_loading_complete);
- return;
-
- try_again:
- /* try next, if any */
- if (iwl4965_request_firmware(priv, false))
- goto out_unbind;
- release_firmware(ucode_raw);
- return;
-
- err_pci_alloc:
- IWL_ERR(priv, "failed to allocate pci memory\n");
- iwl4965_dealloc_ucode_pci(priv);
- out_unbind:
- complete(&priv->_4965.firmware_loading_complete);
- device_release_driver(&priv->pci_dev->dev);
- release_firmware(ucode_raw);
-}
-
-static const char * const desc_lookup_text[] = {
- "OK",
- "FAIL",
- "BAD_PARAM",
- "BAD_CHECKSUM",
- "NMI_INTERRUPT_WDG",
- "SYSASSERT",
- "FATAL_ERROR",
- "BAD_COMMAND",
- "HW_ERROR_TUNE_LOCK",
- "HW_ERROR_TEMPERATURE",
- "ILLEGAL_CHAN_FREQ",
- "VCC_NOT_STABLE",
- "FH_ERROR",
- "NMI_INTERRUPT_HOST",
- "NMI_INTERRUPT_ACTION_PT",
- "NMI_INTERRUPT_UNKNOWN",
- "UCODE_VERSION_MISMATCH",
- "HW_ERROR_ABS_LOCK",
- "HW_ERROR_CAL_LOCK_FAIL",
- "NMI_INTERRUPT_INST_ACTION_PT",
- "NMI_INTERRUPT_DATA_ACTION_PT",
- "NMI_TRM_HW_ER",
- "NMI_INTERRUPT_TRM",
- "NMI_INTERRUPT_BREAK_POINT",
- "DEBUG_0",
- "DEBUG_1",
- "DEBUG_2",
- "DEBUG_3",
-};
-
-static struct { char *name; u8 num; } advanced_lookup[] = {
- { "NMI_INTERRUPT_WDG", 0x34 },
- { "SYSASSERT", 0x35 },
- { "UCODE_VERSION_MISMATCH", 0x37 },
- { "BAD_COMMAND", 0x38 },
- { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
- { "FATAL_ERROR", 0x3D },
- { "NMI_TRM_HW_ERR", 0x46 },
- { "NMI_INTERRUPT_TRM", 0x4C },
- { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
- { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
- { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
- { "NMI_INTERRUPT_HOST", 0x66 },
- { "NMI_INTERRUPT_ACTION_PT", 0x7C },
- { "NMI_INTERRUPT_UNKNOWN", 0x84 },
- { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
- { "ADVANCED_SYSASSERT", 0 },
-};
-
-static const char *iwl4965_desc_lookup(u32 num)
-{
- int i;
- int max = ARRAY_SIZE(desc_lookup_text);
-
- if (num < max)
- return desc_lookup_text[num];
-
- max = ARRAY_SIZE(advanced_lookup) - 1;
- for (i = 0; i < max; i++) {
- if (advanced_lookup[i].num == num)
- break;
- }
- return advanced_lookup[i].name;
-}
-
-#define ERROR_START_OFFSET (1 * sizeof(u32))
-#define ERROR_ELEM_SIZE (7 * sizeof(u32))
-
-void iwl4965_dump_nic_error_log(struct iwl_priv *priv)
-{
- u32 data2, line;
- u32 desc, time, count, base, data1;
- u32 blink1, blink2, ilink1, ilink2;
- u32 pc, hcmd;
-
- if (priv->ucode_type == UCODE_INIT) {
- base = le32_to_cpu(priv->card_alive_init.error_event_table_ptr);
- } else {
- base = le32_to_cpu(priv->card_alive.error_event_table_ptr);
- }
-
- if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
- IWL_ERR(priv,
- "Not valid error log pointer 0x%08X for %s uCode\n",
- base, (priv->ucode_type == UCODE_INIT) ? "Init" : "RT");
- return;
- }
-
- count = iwl_legacy_read_targ_mem(priv, base);
-
- if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
- IWL_ERR(priv, "Start IWL Error Log Dump:\n");
- IWL_ERR(priv, "Status: 0x%08lX, count: %d\n",
- priv->status, count);
- }
-
- desc = iwl_legacy_read_targ_mem(priv, base + 1 * sizeof(u32));
- priv->isr_stats.err_code = desc;
- pc = iwl_legacy_read_targ_mem(priv, base + 2 * sizeof(u32));
- blink1 = iwl_legacy_read_targ_mem(priv, base + 3 * sizeof(u32));
- blink2 = iwl_legacy_read_targ_mem(priv, base + 4 * sizeof(u32));
- ilink1 = iwl_legacy_read_targ_mem(priv, base + 5 * sizeof(u32));
- ilink2 = iwl_legacy_read_targ_mem(priv, base + 6 * sizeof(u32));
- data1 = iwl_legacy_read_targ_mem(priv, base + 7 * sizeof(u32));
- data2 = iwl_legacy_read_targ_mem(priv, base + 8 * sizeof(u32));
- line = iwl_legacy_read_targ_mem(priv, base + 9 * sizeof(u32));
- time = iwl_legacy_read_targ_mem(priv, base + 11 * sizeof(u32));
- hcmd = iwl_legacy_read_targ_mem(priv, base + 22 * sizeof(u32));
-
- trace_iwlwifi_legacy_dev_ucode_error(priv, desc,
- time, data1, data2, line,
- blink1, blink2, ilink1, ilink2);
-
- IWL_ERR(priv, "Desc Time "
- "data1 data2 line\n");
- IWL_ERR(priv, "%-28s (0x%04X) %010u 0x%08X 0x%08X %u\n",
- iwl4965_desc_lookup(desc), desc, time, data1, data2, line);
- IWL_ERR(priv, "pc blink1 blink2 ilink1 ilink2 hcmd\n");
- IWL_ERR(priv, "0x%05X 0x%05X 0x%05X 0x%05X 0x%05X 0x%05X\n",
- pc, blink1, blink2, ilink1, ilink2, hcmd);
-}
-
-static void iwl4965_rf_kill_ct_config(struct iwl_priv *priv)
-{
- struct iwl_ct_kill_config cmd;
- unsigned long flags;
- int ret = 0;
-
- spin_lock_irqsave(&priv->lock, flags);
- iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
- CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- cmd.critical_temperature_R =
- cpu_to_le32(priv->hw_params.ct_kill_threshold);
-
- ret = iwl_legacy_send_cmd_pdu(priv, REPLY_CT_KILL_CONFIG_CMD,
- sizeof(cmd), &cmd);
- if (ret)
- IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n");
- else
- IWL_DEBUG_INFO(priv, "REPLY_CT_KILL_CONFIG_CMD "
- "succeeded, "
- "critical temperature is %d\n",
- priv->hw_params.ct_kill_threshold);
-}
-
-static const s8 default_queue_to_tx_fifo[] = {
- IWL_TX_FIFO_VO,
- IWL_TX_FIFO_VI,
- IWL_TX_FIFO_BE,
- IWL_TX_FIFO_BK,
- IWL49_CMD_FIFO_NUM,
- IWL_TX_FIFO_UNUSED,
- IWL_TX_FIFO_UNUSED,
-};
-
-static int iwl4965_alive_notify(struct iwl_priv *priv)
-{
- u32 a;
- unsigned long flags;
- int i, chan;
- u32 reg_val;
-
- spin_lock_irqsave(&priv->lock, flags);
-
- /* Clear 4965's internal Tx Scheduler data base */
- priv->scd_base_addr = iwl_legacy_read_prph(priv,
- IWL49_SCD_SRAM_BASE_ADDR);
- a = priv->scd_base_addr + IWL49_SCD_CONTEXT_DATA_OFFSET;
- for (; a < priv->scd_base_addr + IWL49_SCD_TX_STTS_BITMAP_OFFSET; a += 4)
- iwl_legacy_write_targ_mem(priv, a, 0);
- for (; a < priv->scd_base_addr + IWL49_SCD_TRANSLATE_TBL_OFFSET; a += 4)
- iwl_legacy_write_targ_mem(priv, a, 0);
- for (; a < priv->scd_base_addr +
- IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(priv->hw_params.max_txq_num); a += 4)
- iwl_legacy_write_targ_mem(priv, a, 0);
-
- /* Tel 4965 where to find Tx byte count tables */
- iwl_legacy_write_prph(priv, IWL49_SCD_DRAM_BASE_ADDR,
- priv->scd_bc_tbls.dma >> 10);
-
- /* Enable DMA channel */
- for (chan = 0; chan < FH49_TCSR_CHNL_NUM ; chan++)
- iwl_legacy_write_direct32(priv,
- FH_TCSR_CHNL_TX_CONFIG_REG(chan),
- FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
- FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
-
- /* Update FH chicken bits */
- reg_val = iwl_legacy_read_direct32(priv, FH_TX_CHICKEN_BITS_REG);
- iwl_legacy_write_direct32(priv, FH_TX_CHICKEN_BITS_REG,
- reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
-
- /* Disable chain mode for all queues */
- iwl_legacy_write_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, 0);
-
- /* Initialize each Tx queue (including the command queue) */
- for (i = 0; i < priv->hw_params.max_txq_num; i++) {
-
- /* TFD circular buffer read/write indexes */
- iwl_legacy_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(i), 0);
- iwl_legacy_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
-
- /* Max Tx Window size for Scheduler-ACK mode */
- iwl_legacy_write_targ_mem(priv, priv->scd_base_addr +
- IWL49_SCD_CONTEXT_QUEUE_OFFSET(i),
- (SCD_WIN_SIZE <<
- IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
- IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
-
- /* Frame limit */
- iwl_legacy_write_targ_mem(priv, priv->scd_base_addr +
- IWL49_SCD_CONTEXT_QUEUE_OFFSET(i) +
- sizeof(u32),
- (SCD_FRAME_LIMIT <<
- IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
- IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
-
- }
- iwl_legacy_write_prph(priv, IWL49_SCD_INTERRUPT_MASK,
- (1 << priv->hw_params.max_txq_num) - 1);
-
- /* Activate all Tx DMA/FIFO channels */
- iwl4965_txq_set_sched(priv, IWL_MASK(0, 6));
-
- iwl4965_set_wr_ptrs(priv, IWL_DEFAULT_CMD_QUEUE_NUM, 0);
-
- /* make sure all queue are not stopped */
- memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
- for (i = 0; i < 4; i++)
- atomic_set(&priv->queue_stop_count[i], 0);
-
- /* reset to 0 to enable all the queue first */
- priv->txq_ctx_active_msk = 0;
- /* Map each Tx/cmd queue to its corresponding fifo */
- BUILD_BUG_ON(ARRAY_SIZE(default_queue_to_tx_fifo) != 7);
-
- for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) {
- int ac = default_queue_to_tx_fifo[i];
-
- iwl_txq_ctx_activate(priv, i);
-
- if (ac == IWL_TX_FIFO_UNUSED)
- continue;
-
- iwl4965_tx_queue_set_status(priv, &priv->txq[i], ac, 0);
- }
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- return 0;
-}
-
-/**
- * iwl4965_alive_start - called after REPLY_ALIVE notification received
- * from protocol/runtime uCode (initialization uCode's
- * Alive gets handled by iwl_init_alive_start()).
- */
-static void iwl4965_alive_start(struct iwl_priv *priv)
-{
- int ret = 0;
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
- IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
-
- if (priv->card_alive.is_valid != UCODE_VALID_OK) {
- /* We had an error bringing up the hardware, so take it
- * all the way back down so we can try again */
- IWL_DEBUG_INFO(priv, "Alive failed.\n");
- goto restart;
- }
-
- /* Initialize uCode has loaded Runtime uCode ... verify inst image.
- * This is a paranoid check, because we would not have gotten the
- * "runtime" alive if code weren't properly loaded. */
- if (iwl4965_verify_ucode(priv)) {
- /* Runtime instruction load was bad;
- * take it all the way back down so we can try again */
- IWL_DEBUG_INFO(priv, "Bad runtime uCode load.\n");
- goto restart;
- }
-
- ret = iwl4965_alive_notify(priv);
- if (ret) {
- IWL_WARN(priv,
- "Could not complete ALIVE transition [ntf]: %d\n", ret);
- goto restart;
- }
-
-
- /* After the ALIVE response, we can send host commands to the uCode */
- set_bit(STATUS_ALIVE, &priv->status);
-
- /* Enable watchdog to monitor the driver tx queues */
- iwl_legacy_setup_watchdog(priv);
-
- if (iwl_legacy_is_rfkill(priv))
- return;
-
- ieee80211_wake_queues(priv->hw);
-
- priv->active_rate = IWL_RATES_MASK;
-
- if (iwl_legacy_is_associated_ctx(ctx)) {
- struct iwl_legacy_rxon_cmd *active_rxon =
- (struct iwl_legacy_rxon_cmd *)&ctx->active;
- /* apply any changes in staging */
- ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
- active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
- } else {
- struct iwl_rxon_context *tmp;
- /* Initialize our rx_config data */
- for_each_context(priv, tmp)
- iwl_legacy_connection_init_rx_config(priv, tmp);
-
- if (priv->cfg->ops->hcmd->set_rxon_chain)
- priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
- }
-
- /* Configure bluetooth coexistence if enabled */
- iwl_legacy_send_bt_config(priv);
-
- iwl4965_reset_run_time_calib(priv);
-
- set_bit(STATUS_READY, &priv->status);
-
- /* Configure the adapter for unassociated operation */
- iwl_legacy_commit_rxon(priv, ctx);
-
- /* At this point, the NIC is initialized and operational */
- iwl4965_rf_kill_ct_config(priv);
-
- IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n");
- wake_up(&priv->wait_command_queue);
-
- iwl_legacy_power_update_mode(priv, true);
- IWL_DEBUG_INFO(priv, "Updated power mode\n");
-
- return;
-
- restart:
- queue_work(priv->workqueue, &priv->restart);
-}
-
-static void iwl4965_cancel_deferred_work(struct iwl_priv *priv);
-
-static void __iwl4965_down(struct iwl_priv *priv)
-{
- unsigned long flags;
- int exit_pending;
-
- IWL_DEBUG_INFO(priv, DRV_NAME " is going down\n");
-
- iwl_legacy_scan_cancel_timeout(priv, 200);
-
- exit_pending = test_and_set_bit(STATUS_EXIT_PENDING, &priv->status);
-
- /* Stop TX queues watchdog. We need to have STATUS_EXIT_PENDING bit set
- * to prevent rearm timer */
- del_timer_sync(&priv->watchdog);
-
- iwl_legacy_clear_ucode_stations(priv, NULL);
- iwl_legacy_dealloc_bcast_stations(priv);
- iwl_legacy_clear_driver_stations(priv);
-
- /* Unblock any waiting calls */
- wake_up_all(&priv->wait_command_queue);
-
- /* Wipe out the EXIT_PENDING status bit if we are not actually
- * exiting the module */
- if (!exit_pending)
- clear_bit(STATUS_EXIT_PENDING, &priv->status);
-
- /* stop and reset the on-board processor */
- iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
-
- /* tell the device to stop sending interrupts */
- spin_lock_irqsave(&priv->lock, flags);
- iwl_legacy_disable_interrupts(priv);
- spin_unlock_irqrestore(&priv->lock, flags);
- iwl4965_synchronize_irq(priv);
-
- if (priv->mac80211_registered)
- ieee80211_stop_queues(priv->hw);
-
- /* If we have not previously called iwl_init() then
- * clear all bits but the RF Kill bit and return */
- if (!iwl_legacy_is_init(priv)) {
- priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) <<
- STATUS_RF_KILL_HW |
- test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
- STATUS_GEO_CONFIGURED |
- test_bit(STATUS_EXIT_PENDING, &priv->status) <<
- STATUS_EXIT_PENDING;
- goto exit;
- }
-
- /* ...otherwise clear out all the status bits but the RF Kill
- * bit and continue taking the NIC down. */
- priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) <<
- STATUS_RF_KILL_HW |
- test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
- STATUS_GEO_CONFIGURED |
- test_bit(STATUS_FW_ERROR, &priv->status) <<
- STATUS_FW_ERROR |
- test_bit(STATUS_EXIT_PENDING, &priv->status) <<
- STATUS_EXIT_PENDING;
-
- iwl4965_txq_ctx_stop(priv);
- iwl4965_rxq_stop(priv);
-
- /* Power-down device's busmaster DMA clocks */
- iwl_legacy_write_prph(priv, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
- udelay(5);
-
- /* Make sure (redundant) we've released our request to stay awake */
- iwl_legacy_clear_bit(priv, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
-
- /* Stop the device, and put it in low power state */
- iwl_legacy_apm_stop(priv);
-
- exit:
- memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp));
-
- dev_kfree_skb(priv->beacon_skb);
- priv->beacon_skb = NULL;
-
- /* clear out any free frames */
- iwl4965_clear_free_frames(priv);
-}
-
-static void iwl4965_down(struct iwl_priv *priv)
-{
- mutex_lock(&priv->mutex);
- __iwl4965_down(priv);
- mutex_unlock(&priv->mutex);
-
- iwl4965_cancel_deferred_work(priv);
-}
-
-#define HW_READY_TIMEOUT (50)
-
-static int iwl4965_set_hw_ready(struct iwl_priv *priv)
-{
- int ret = 0;
-
- iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
-
- /* See if we got it */
- ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
- CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
- HW_READY_TIMEOUT);
- if (ret != -ETIMEDOUT)
- priv->hw_ready = true;
- else
- priv->hw_ready = false;
-
- IWL_DEBUG_INFO(priv, "hardware %s\n",
- (priv->hw_ready == 1) ? "ready" : "not ready");
- return ret;
-}
-
-static int iwl4965_prepare_card_hw(struct iwl_priv *priv)
-{
- int ret = 0;
-
- IWL_DEBUG_INFO(priv, "iwl4965_prepare_card_hw enter\n");
-
- ret = iwl4965_set_hw_ready(priv);
- if (priv->hw_ready)
- return ret;
-
- /* If HW is not ready, prepare the conditions to check again */
- iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_PREPARE);
-
- ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
- ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
- CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
-
- /* HW should be ready by now, check again. */
- if (ret != -ETIMEDOUT)
- iwl4965_set_hw_ready(priv);
-
- return ret;
-}
-
-#define MAX_HW_RESTARTS 5
-
-static int __iwl4965_up(struct iwl_priv *priv)
-{
- struct iwl_rxon_context *ctx;
- int i;
- int ret;
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
- IWL_WARN(priv, "Exit pending; will not bring the NIC up\n");
- return -EIO;
- }
-
- if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) {
- IWL_ERR(priv, "ucode not available for device bringup\n");
- return -EIO;
- }
-
- for_each_context(priv, ctx) {
- ret = iwl4965_alloc_bcast_station(priv, ctx);
- if (ret) {
- iwl_legacy_dealloc_bcast_stations(priv);
- return ret;
- }
- }
-
- iwl4965_prepare_card_hw(priv);
-
- if (!priv->hw_ready) {
- IWL_WARN(priv, "Exit HW not ready\n");
- return -EIO;
- }
-
- /* If platform's RF_KILL switch is NOT set to KILL */
- if (iwl_read32(priv,
- CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
- clear_bit(STATUS_RF_KILL_HW, &priv->status);
- else
- set_bit(STATUS_RF_KILL_HW, &priv->status);
-
- if (iwl_legacy_is_rfkill(priv)) {
- wiphy_rfkill_set_hw_state(priv->hw->wiphy, true);
-
- iwl_legacy_enable_interrupts(priv);
- IWL_WARN(priv, "Radio disabled by HW RF Kill switch\n");
- return 0;
- }
-
- iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
-
- /* must be initialised before iwl_hw_nic_init */
- priv->cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
-
- ret = iwl4965_hw_nic_init(priv);
- if (ret) {
- IWL_ERR(priv, "Unable to init nic\n");
- return ret;
- }
-
- /* make sure rfkill handshake bits are cleared */
- iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
- iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
- CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
-
- /* clear (again), then enable host interrupts */
- iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
- iwl_legacy_enable_interrupts(priv);
-
- /* really make sure rfkill handshake bits are cleared */
- iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
- iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
-
- /* Copy original ucode data image from disk into backup cache.
- * This will be used to initialize the on-board processor's
- * data SRAM for a clean start when the runtime program first loads. */
- memcpy(priv->ucode_data_backup.v_addr, priv->ucode_data.v_addr,
- priv->ucode_data.len);
-
- for (i = 0; i < MAX_HW_RESTARTS; i++) {
-
- /* load bootstrap state machine,
- * load bootstrap program into processor's memory,
- * prepare to load the "initialize" uCode */
- ret = priv->cfg->ops->lib->load_ucode(priv);
-
- if (ret) {
- IWL_ERR(priv, "Unable to set up bootstrap uCode: %d\n",
- ret);
- continue;
- }
-
- /* start card; "initialize" will load runtime ucode */
- iwl4965_nic_start(priv);
-
- IWL_DEBUG_INFO(priv, DRV_NAME " is coming up\n");
-
- return 0;
- }
-
- set_bit(STATUS_EXIT_PENDING, &priv->status);
- __iwl4965_down(priv);
- clear_bit(STATUS_EXIT_PENDING, &priv->status);
-
- /* tried to restart and config the device for as long as our
- * patience could withstand */
- IWL_ERR(priv, "Unable to initialize device after %d attempts.\n", i);
- return -EIO;
-}
-
-
-/*****************************************************************************
- *
- * Workqueue callbacks
- *
- *****************************************************************************/
-
-static void iwl4965_bg_init_alive_start(struct work_struct *data)
-{
- struct iwl_priv *priv =
- container_of(data, struct iwl_priv, init_alive_start.work);
-
- mutex_lock(&priv->mutex);
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- goto out;
-
- priv->cfg->ops->lib->init_alive_start(priv);
-out:
- mutex_unlock(&priv->mutex);
-}
-
-static void iwl4965_bg_alive_start(struct work_struct *data)
-{
- struct iwl_priv *priv =
- container_of(data, struct iwl_priv, alive_start.work);
-
- mutex_lock(&priv->mutex);
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- goto out;
-
- iwl4965_alive_start(priv);
-out:
- mutex_unlock(&priv->mutex);
-}
-
-static void iwl4965_bg_run_time_calib_work(struct work_struct *work)
-{
- struct iwl_priv *priv = container_of(work, struct iwl_priv,
- run_time_calib_work);
-
- mutex_lock(&priv->mutex);
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
- test_bit(STATUS_SCANNING, &priv->status)) {
- mutex_unlock(&priv->mutex);
- return;
- }
-
- if (priv->start_calib) {
- iwl4965_chain_noise_calibration(priv,
- (void *)&priv->_4965.statistics);
- iwl4965_sensitivity_calibration(priv,
- (void *)&priv->_4965.statistics);
- }
-
- mutex_unlock(&priv->mutex);
-}
-
-static void iwl4965_bg_restart(struct work_struct *data)
-{
- struct iwl_priv *priv = container_of(data, struct iwl_priv, restart);
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- return;
-
- if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) {
- struct iwl_rxon_context *ctx;
-
- mutex_lock(&priv->mutex);
- for_each_context(priv, ctx)
- ctx->vif = NULL;
- priv->is_open = 0;
-
- __iwl4965_down(priv);
-
- mutex_unlock(&priv->mutex);
- iwl4965_cancel_deferred_work(priv);
- ieee80211_restart_hw(priv->hw);
- } else {
- iwl4965_down(priv);
-
- mutex_lock(&priv->mutex);
- if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
- mutex_unlock(&priv->mutex);
- return;
- }
-
- __iwl4965_up(priv);
- mutex_unlock(&priv->mutex);
- }
-}
-
-static void iwl4965_bg_rx_replenish(struct work_struct *data)
-{
- struct iwl_priv *priv =
- container_of(data, struct iwl_priv, rx_replenish);
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- return;
-
- mutex_lock(&priv->mutex);
- iwl4965_rx_replenish(priv);
- mutex_unlock(&priv->mutex);
-}
-
-/*****************************************************************************
- *
- * mac80211 entry point functions
- *
- *****************************************************************************/
-
-#define UCODE_READY_TIMEOUT (4 * HZ)
-
-/*
- * Not a mac80211 entry point function, but it fits in with all the
- * other mac80211 functions grouped here.
- */
-static int iwl4965_mac_setup_register(struct iwl_priv *priv,
- u32 max_probe_length)
-{
- int ret;
- struct ieee80211_hw *hw = priv->hw;
- struct iwl_rxon_context *ctx;
-
- hw->rate_control_algorithm = "iwl-4965-rs";
-
- /* Tell mac80211 our characteristics */
- hw->flags = IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_AMPDU_AGGREGATION |
- IEEE80211_HW_NEED_DTIM_PERIOD |
- IEEE80211_HW_SPECTRUM_MGMT |
- IEEE80211_HW_REPORTS_TX_ACK_STATUS;
-
- if (priv->cfg->sku & IWL_SKU_N)
- hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
- IEEE80211_HW_SUPPORTS_STATIC_SMPS;
-
- hw->sta_data_size = sizeof(struct iwl_station_priv);
- hw->vif_data_size = sizeof(struct iwl_vif_priv);
-
- for_each_context(priv, ctx) {
- hw->wiphy->interface_modes |= ctx->interface_modes;
- hw->wiphy->interface_modes |= ctx->exclusive_interface_modes;
- }
-
- hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
- WIPHY_FLAG_DISABLE_BEACON_HINTS;
-
- /*
- * For now, disable PS by default because it affects
- * RX performance significantly.
- */
- hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
-
- hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
- /* we create the 802.11 header and a zero-length SSID element */
- hw->wiphy->max_scan_ie_len = max_probe_length - 24 - 2;
-
- /* Default value; 4 EDCA QOS priorities */
- hw->queues = 4;
-
- hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
-
- if (priv->bands[IEEE80211_BAND_2GHZ].n_channels)
- priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
- &priv->bands[IEEE80211_BAND_2GHZ];
- if (priv->bands[IEEE80211_BAND_5GHZ].n_channels)
- priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
- &priv->bands[IEEE80211_BAND_5GHZ];
-
- iwl_legacy_leds_init(priv);
-
- ret = ieee80211_register_hw(priv->hw);
- if (ret) {
- IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
- return ret;
- }
- priv->mac80211_registered = 1;
-
- return 0;
-}
-
-
-int iwl4965_mac_start(struct ieee80211_hw *hw)
-{
- struct iwl_priv *priv = hw->priv;
- int ret;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- /* we should be verifying the device is ready to be opened */
- mutex_lock(&priv->mutex);
- ret = __iwl4965_up(priv);
- mutex_unlock(&priv->mutex);
-
- if (ret)
- return ret;
-
- if (iwl_legacy_is_rfkill(priv))
- goto out;
-
- IWL_DEBUG_INFO(priv, "Start UP work done.\n");
-
- /* Wait for START_ALIVE from Run Time ucode. Otherwise callbacks from
- * mac80211 will not be run successfully. */
- ret = wait_event_timeout(priv->wait_command_queue,
- test_bit(STATUS_READY, &priv->status),
- UCODE_READY_TIMEOUT);
- if (!ret) {
- if (!test_bit(STATUS_READY, &priv->status)) {
- IWL_ERR(priv, "START_ALIVE timeout after %dms.\n",
- jiffies_to_msecs(UCODE_READY_TIMEOUT));
- return -ETIMEDOUT;
- }
- }
-
- iwl4965_led_enable(priv);
-
-out:
- priv->is_open = 1;
- IWL_DEBUG_MAC80211(priv, "leave\n");
- return 0;
-}
-
-void iwl4965_mac_stop(struct ieee80211_hw *hw)
-{
- struct iwl_priv *priv = hw->priv;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- if (!priv->is_open)
- return;
-
- priv->is_open = 0;
-
- iwl4965_down(priv);
-
- flush_workqueue(priv->workqueue);
-
- /* User space software may expect getting rfkill changes
- * even if interface is down */
- iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
- iwl_legacy_enable_rfkill_int(priv);
-
- IWL_DEBUG_MAC80211(priv, "leave\n");
-}
-
-void iwl4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
-{
- struct iwl_priv *priv = hw->priv;
-
- IWL_DEBUG_MACDUMP(priv, "enter\n");
-
- IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
- ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
-
- if (iwl4965_tx_skb(priv, skb))
- dev_kfree_skb_any(skb);
-
- IWL_DEBUG_MACDUMP(priv, "leave\n");
-}
-
-void iwl4965_mac_update_tkip_key(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_key_conf *keyconf,
- struct ieee80211_sta *sta,
- u32 iv32, u16 *phase1key)
-{
- struct iwl_priv *priv = hw->priv;
- struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- iwl4965_update_tkip_key(priv, vif_priv->ctx, keyconf, sta,
- iv32, phase1key);
-
- IWL_DEBUG_MAC80211(priv, "leave\n");
-}
-
-int iwl4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
- struct ieee80211_vif *vif, struct ieee80211_sta *sta,
- struct ieee80211_key_conf *key)
-{
- struct iwl_priv *priv = hw->priv;
- struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
- struct iwl_rxon_context *ctx = vif_priv->ctx;
- int ret;
- u8 sta_id;
- bool is_default_wep_key = false;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- if (priv->cfg->mod_params->sw_crypto) {
- IWL_DEBUG_MAC80211(priv, "leave - hwcrypto disabled\n");
- return -EOPNOTSUPP;
- }
-
- sta_id = iwl_legacy_sta_id_or_broadcast(priv, vif_priv->ctx, sta);
- if (sta_id == IWL_INVALID_STATION)
- return -EINVAL;
-
- mutex_lock(&priv->mutex);
- iwl_legacy_scan_cancel_timeout(priv, 100);
-
- /*
- * If we are getting WEP group key and we didn't receive any key mapping
- * so far, we are in legacy wep mode (group key only), otherwise we are
- * in 1X mode.
- * In legacy wep mode, we use another host command to the uCode.
- */
- if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
- key->cipher == WLAN_CIPHER_SUITE_WEP104) &&
- !sta) {
- if (cmd == SET_KEY)
- is_default_wep_key = !ctx->key_mapping_keys;
- else
- is_default_wep_key =
- (key->hw_key_idx == HW_KEY_DEFAULT);
- }
-
- switch (cmd) {
- case SET_KEY:
- if (is_default_wep_key)
- ret = iwl4965_set_default_wep_key(priv,
- vif_priv->ctx, key);
- else
- ret = iwl4965_set_dynamic_key(priv, vif_priv->ctx,
- key, sta_id);
-
- IWL_DEBUG_MAC80211(priv, "enable hwcrypto key\n");
- break;
- case DISABLE_KEY:
- if (is_default_wep_key)
- ret = iwl4965_remove_default_wep_key(priv, ctx, key);
- else
- ret = iwl4965_remove_dynamic_key(priv, ctx,
- key, sta_id);
-
- IWL_DEBUG_MAC80211(priv, "disable hwcrypto key\n");
- break;
- default:
- ret = -EINVAL;
- }
-
- mutex_unlock(&priv->mutex);
- IWL_DEBUG_MAC80211(priv, "leave\n");
-
- return ret;
-}
-
-int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid, u16 *ssn,
- u8 buf_size)
-{
- struct iwl_priv *priv = hw->priv;
- int ret = -EINVAL;
-
- IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n",
- sta->addr, tid);
-
- if (!(priv->cfg->sku & IWL_SKU_N))
- return -EACCES;
-
- mutex_lock(&priv->mutex);
-
- switch (action) {
- case IEEE80211_AMPDU_RX_START:
- IWL_DEBUG_HT(priv, "start Rx\n");
- ret = iwl4965_sta_rx_agg_start(priv, sta, tid, *ssn);
- break;
- case IEEE80211_AMPDU_RX_STOP:
- IWL_DEBUG_HT(priv, "stop Rx\n");
- ret = iwl4965_sta_rx_agg_stop(priv, sta, tid);
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- ret = 0;
- break;
- case IEEE80211_AMPDU_TX_START:
- IWL_DEBUG_HT(priv, "start Tx\n");
- ret = iwl4965_tx_agg_start(priv, vif, sta, tid, ssn);
- break;
- case IEEE80211_AMPDU_TX_STOP:
- IWL_DEBUG_HT(priv, "stop Tx\n");
- ret = iwl4965_tx_agg_stop(priv, vif, sta, tid);
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- ret = 0;
- break;
- case IEEE80211_AMPDU_TX_OPERATIONAL:
- ret = 0;
- break;
- }
- mutex_unlock(&priv->mutex);
-
- return ret;
-}
-
-int iwl4965_mac_sta_add(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- struct iwl_priv *priv = hw->priv;
- struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
- struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
- bool is_ap = vif->type == NL80211_IFTYPE_STATION;
- int ret;
- u8 sta_id;
-
- IWL_DEBUG_INFO(priv, "received request to add station %pM\n",
- sta->addr);
- mutex_lock(&priv->mutex);
- IWL_DEBUG_INFO(priv, "proceeding to add station %pM\n",
- sta->addr);
- sta_priv->common.sta_id = IWL_INVALID_STATION;
-
- atomic_set(&sta_priv->pending_frames, 0);
-
- ret = iwl_legacy_add_station_common(priv, vif_priv->ctx, sta->addr,
- is_ap, sta, &sta_id);
- if (ret) {
- IWL_ERR(priv, "Unable to add station %pM (%d)\n",
- sta->addr, ret);
- /* Should we return success if return code is EEXIST ? */
- mutex_unlock(&priv->mutex);
- return ret;
- }
-
- sta_priv->common.sta_id = sta_id;
-
- /* Initialize rate scaling */
- IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM\n",
- sta->addr);
- iwl4965_rs_rate_init(priv, sta, sta_id);
- mutex_unlock(&priv->mutex);
-
- return 0;
-}
-
-void iwl4965_mac_channel_switch(struct ieee80211_hw *hw,
- struct ieee80211_channel_switch *ch_switch)
-{
- struct iwl_priv *priv = hw->priv;
- const struct iwl_channel_info *ch_info;
- struct ieee80211_conf *conf = &hw->conf;
- struct ieee80211_channel *channel = ch_switch->channel;
- struct iwl_ht_config *ht_conf = &priv->current_ht_config;
-
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- u16 ch;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- mutex_lock(&priv->mutex);
-
- if (iwl_legacy_is_rfkill(priv))
- goto out;
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
- test_bit(STATUS_SCANNING, &priv->status) ||
- test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
- goto out;
-
- if (!iwl_legacy_is_associated_ctx(ctx))
- goto out;
-
- if (!priv->cfg->ops->lib->set_channel_switch)
- goto out;
-
- ch = channel->hw_value;
- if (le16_to_cpu(ctx->active.channel) == ch)
- goto out;
-
- ch_info = iwl_legacy_get_channel_info(priv, channel->band, ch);
- if (!iwl_legacy_is_channel_valid(ch_info)) {
- IWL_DEBUG_MAC80211(priv, "invalid channel\n");
- goto out;
- }
-
- spin_lock_irq(&priv->lock);
-
- priv->current_ht_config.smps = conf->smps_mode;
-
- /* Configure HT40 channels */
- ctx->ht.enabled = conf_is_ht(conf);
- if (ctx->ht.enabled) {
- if (conf_is_ht40_minus(conf)) {
- ctx->ht.extension_chan_offset =
- IEEE80211_HT_PARAM_CHA_SEC_BELOW;
- ctx->ht.is_40mhz = true;
- } else if (conf_is_ht40_plus(conf)) {
- ctx->ht.extension_chan_offset =
- IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
- ctx->ht.is_40mhz = true;
- } else {
- ctx->ht.extension_chan_offset =
- IEEE80211_HT_PARAM_CHA_SEC_NONE;
- ctx->ht.is_40mhz = false;
- }
- } else
- ctx->ht.is_40mhz = false;
-
- if ((le16_to_cpu(ctx->staging.channel) != ch))
- ctx->staging.flags = 0;
-
- iwl_legacy_set_rxon_channel(priv, channel, ctx);
- iwl_legacy_set_rxon_ht(priv, ht_conf);
- iwl_legacy_set_flags_for_band(priv, ctx, channel->band, ctx->vif);
-
- spin_unlock_irq(&priv->lock);
-
- iwl_legacy_set_rate(priv);
- /*
- * at this point, staging_rxon has the
- * configuration for channel switch
- */
- set_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status);
- priv->switch_channel = cpu_to_le16(ch);
- if (priv->cfg->ops->lib->set_channel_switch(priv, ch_switch)) {
- clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status);
- priv->switch_channel = 0;
- ieee80211_chswitch_done(ctx->vif, false);
- }
-
-out:
- mutex_unlock(&priv->mutex);
- IWL_DEBUG_MAC80211(priv, "leave\n");
-}
-
-void iwl4965_configure_filter(struct ieee80211_hw *hw,
- unsigned int changed_flags,
- unsigned int *total_flags,
- u64 multicast)
-{
- struct iwl_priv *priv = hw->priv;
- __le32 filter_or = 0, filter_nand = 0;
- struct iwl_rxon_context *ctx;
-
-#define CHK(test, flag) do { \
- if (*total_flags & (test)) \
- filter_or |= (flag); \
- else \
- filter_nand |= (flag); \
- } while (0)
-
- IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n",
- changed_flags, *total_flags);
-
- CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
- /* Setting _just_ RXON_FILTER_CTL2HOST_MSK causes FH errors */
- CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
- CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
-
-#undef CHK
-
- mutex_lock(&priv->mutex);
-
- for_each_context(priv, ctx) {
- ctx->staging.filter_flags &= ~filter_nand;
- ctx->staging.filter_flags |= filter_or;
-
- /*
- * Not committing directly because hardware can perform a scan,
- * but we'll eventually commit the filter flags change anyway.
- */
- }
-
- mutex_unlock(&priv->mutex);
-
- /*
- * Receiving all multicast frames is always enabled by the
- * default flags setup in iwl_legacy_connection_init_rx_config()
- * since we currently do not support programming multicast
- * filters into the device.
- */
- *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
- FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
-}
-
-/*****************************************************************************
- *
- * driver setup and teardown
- *
- *****************************************************************************/
-
-static void iwl4965_bg_txpower_work(struct work_struct *work)
-{
- struct iwl_priv *priv = container_of(work, struct iwl_priv,
- txpower_work);
-
- mutex_lock(&priv->mutex);
-
- /* If a scan happened to start before we got here
- * then just return; the statistics notification will
- * kick off another scheduled work to compensate for
- * any temperature delta we missed here. */
- if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
- test_bit(STATUS_SCANNING, &priv->status))
- goto out;
-
- /* Regardless of if we are associated, we must reconfigure the
- * TX power since frames can be sent on non-radar channels while
- * not associated */
- priv->cfg->ops->lib->send_tx_power(priv);
-
- /* Update last_temperature to keep is_calib_needed from running
- * when it isn't needed... */
- priv->last_temperature = priv->temperature;
-out:
- mutex_unlock(&priv->mutex);
-}
-
-static void iwl4965_setup_deferred_work(struct iwl_priv *priv)
-{
- priv->workqueue = create_singlethread_workqueue(DRV_NAME);
-
- init_waitqueue_head(&priv->wait_command_queue);
-
- INIT_WORK(&priv->restart, iwl4965_bg_restart);
- INIT_WORK(&priv->rx_replenish, iwl4965_bg_rx_replenish);
- INIT_WORK(&priv->run_time_calib_work, iwl4965_bg_run_time_calib_work);
- INIT_DELAYED_WORK(&priv->init_alive_start, iwl4965_bg_init_alive_start);
- INIT_DELAYED_WORK(&priv->alive_start, iwl4965_bg_alive_start);
-
- iwl_legacy_setup_scan_deferred_work(priv);
-
- INIT_WORK(&priv->txpower_work, iwl4965_bg_txpower_work);
-
- init_timer(&priv->statistics_periodic);
- priv->statistics_periodic.data = (unsigned long)priv;
- priv->statistics_periodic.function = iwl4965_bg_statistics_periodic;
-
- init_timer(&priv->watchdog);
- priv->watchdog.data = (unsigned long)priv;
- priv->watchdog.function = iwl_legacy_bg_watchdog;
-
- tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
- iwl4965_irq_tasklet, (unsigned long)priv);
-}
-
-static void iwl4965_cancel_deferred_work(struct iwl_priv *priv)
-{
- cancel_work_sync(&priv->txpower_work);
- cancel_delayed_work_sync(&priv->init_alive_start);
- cancel_delayed_work(&priv->alive_start);
- cancel_work_sync(&priv->run_time_calib_work);
-
- iwl_legacy_cancel_scan_deferred_work(priv);
-
- del_timer_sync(&priv->statistics_periodic);
-}
-
-static void iwl4965_init_hw_rates(struct iwl_priv *priv,
- struct ieee80211_rate *rates)
-{
- int i;
-
- for (i = 0; i < IWL_RATE_COUNT_LEGACY; i++) {
- rates[i].bitrate = iwlegacy_rates[i].ieee * 5;
- rates[i].hw_value = i; /* Rate scaling will work on indexes */
- rates[i].hw_value_short = i;
- rates[i].flags = 0;
- if ((i >= IWL_FIRST_CCK_RATE) && (i <= IWL_LAST_CCK_RATE)) {
- /*
- * If CCK != 1M then set short preamble rate flag.
- */
- rates[i].flags |=
- (iwlegacy_rates[i].plcp == IWL_RATE_1M_PLCP) ?
- 0 : IEEE80211_RATE_SHORT_PREAMBLE;
- }
- }
-}
-/*
- * Acquire priv->lock before calling this function !
- */
-void iwl4965_set_wr_ptrs(struct iwl_priv *priv, int txq_id, u32 index)
-{
- iwl_legacy_write_direct32(priv, HBUS_TARG_WRPTR,
- (index & 0xff) | (txq_id << 8));
- iwl_legacy_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(txq_id), index);
-}
-
-void iwl4965_tx_queue_set_status(struct iwl_priv *priv,
- struct iwl_tx_queue *txq,
- int tx_fifo_id, int scd_retry)
-{
- int txq_id = txq->q.id;
-
- /* Find out whether to activate Tx queue */
- int active = test_bit(txq_id, &priv->txq_ctx_active_msk) ? 1 : 0;
-
- /* Set up and activate */
- iwl_legacy_write_prph(priv, IWL49_SCD_QUEUE_STATUS_BITS(txq_id),
- (active << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
- (tx_fifo_id << IWL49_SCD_QUEUE_STTS_REG_POS_TXF) |
- (scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_WSL) |
- (scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK) |
- IWL49_SCD_QUEUE_STTS_REG_MSK);
-
- txq->sched_retry = scd_retry;
-
- IWL_DEBUG_INFO(priv, "%s %s Queue %d on AC %d\n",
- active ? "Activate" : "Deactivate",
- scd_retry ? "BA" : "AC", txq_id, tx_fifo_id);
-}
-
-
-static int iwl4965_init_drv(struct iwl_priv *priv)
-{
- int ret;
-
- spin_lock_init(&priv->sta_lock);
- spin_lock_init(&priv->hcmd_lock);
-
- INIT_LIST_HEAD(&priv->free_frames);
-
- mutex_init(&priv->mutex);
-
- priv->ieee_channels = NULL;
- priv->ieee_rates = NULL;
- priv->band = IEEE80211_BAND_2GHZ;
-
- priv->iw_mode = NL80211_IFTYPE_STATION;
- priv->current_ht_config.smps = IEEE80211_SMPS_STATIC;
- priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF;
-
- /* initialize force reset */
- priv->force_reset.reset_duration = IWL_DELAY_NEXT_FORCE_FW_RELOAD;
-
- /* Choose which receivers/antennas to use */
- if (priv->cfg->ops->hcmd->set_rxon_chain)
- priv->cfg->ops->hcmd->set_rxon_chain(priv,
- &priv->contexts[IWL_RXON_CTX_BSS]);
-
- iwl_legacy_init_scan_params(priv);
-
- ret = iwl_legacy_init_channel_map(priv);
- if (ret) {
- IWL_ERR(priv, "initializing regulatory failed: %d\n", ret);
- goto err;
- }
-
- ret = iwl_legacy_init_geos(priv);
- if (ret) {
- IWL_ERR(priv, "initializing geos failed: %d\n", ret);
- goto err_free_channel_map;
- }
- iwl4965_init_hw_rates(priv, priv->ieee_rates);
-
- return 0;
-
-err_free_channel_map:
- iwl_legacy_free_channel_map(priv);
-err:
- return ret;
-}
-
-static void iwl4965_uninit_drv(struct iwl_priv *priv)
-{
- iwl4965_calib_free_results(priv);
- iwl_legacy_free_geos(priv);
- iwl_legacy_free_channel_map(priv);
- kfree(priv->scan_cmd);
-}
-
-static void iwl4965_hw_detect(struct iwl_priv *priv)
-{
- priv->hw_rev = _iwl_legacy_read32(priv, CSR_HW_REV);
- priv->hw_wa_rev = _iwl_legacy_read32(priv, CSR_HW_REV_WA_REG);
- priv->rev_id = priv->pci_dev->revision;
- IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", priv->rev_id);
-}
-
-static int iwl4965_set_hw_params(struct iwl_priv *priv)
-{
- priv->hw_params.max_rxq_size = RX_QUEUE_SIZE;
- priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
- if (priv->cfg->mod_params->amsdu_size_8K)
- priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_8K);
- else
- priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_4K);
-
- priv->hw_params.max_beacon_itrvl = IWL_MAX_UCODE_BEACON_INTERVAL;
-
- if (priv->cfg->mod_params->disable_11n)
- priv->cfg->sku &= ~IWL_SKU_N;
-
- /* Device-specific setup */
- return priv->cfg->ops->lib->set_hw_params(priv);
-}
-
-static const u8 iwl4965_bss_ac_to_fifo[] = {
- IWL_TX_FIFO_VO,
- IWL_TX_FIFO_VI,
- IWL_TX_FIFO_BE,
- IWL_TX_FIFO_BK,
-};
-
-static const u8 iwl4965_bss_ac_to_queue[] = {
- 0, 1, 2, 3,
-};
-
-static int
-iwl4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
- int err = 0, i;
- struct iwl_priv *priv;
- struct ieee80211_hw *hw;
- struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
- unsigned long flags;
- u16 pci_cmd;
-
- /************************
- * 1. Allocating HW data
- ************************/
-
- hw = iwl_legacy_alloc_all(cfg);
- if (!hw) {
- err = -ENOMEM;
- goto out;
- }
- priv = hw->priv;
- /* At this point both hw and priv are allocated. */
-
- /*
- * The default context is always valid,
- * more may be discovered when firmware
- * is loaded.
- */
- priv->valid_contexts = BIT(IWL_RXON_CTX_BSS);
-
- for (i = 0; i < NUM_IWL_RXON_CTX; i++)
- priv->contexts[i].ctxid = i;
-
- priv->contexts[IWL_RXON_CTX_BSS].always_active = true;
- priv->contexts[IWL_RXON_CTX_BSS].is_active = true;
- priv->contexts[IWL_RXON_CTX_BSS].rxon_cmd = REPLY_RXON;
- priv->contexts[IWL_RXON_CTX_BSS].rxon_timing_cmd = REPLY_RXON_TIMING;
- priv->contexts[IWL_RXON_CTX_BSS].rxon_assoc_cmd = REPLY_RXON_ASSOC;
- priv->contexts[IWL_RXON_CTX_BSS].qos_cmd = REPLY_QOS_PARAM;
- priv->contexts[IWL_RXON_CTX_BSS].ap_sta_id = IWL_AP_ID;
- priv->contexts[IWL_RXON_CTX_BSS].wep_key_cmd = REPLY_WEPKEY;
- priv->contexts[IWL_RXON_CTX_BSS].ac_to_fifo = iwl4965_bss_ac_to_fifo;
- priv->contexts[IWL_RXON_CTX_BSS].ac_to_queue = iwl4965_bss_ac_to_queue;
- priv->contexts[IWL_RXON_CTX_BSS].exclusive_interface_modes =
- BIT(NL80211_IFTYPE_ADHOC);
- priv->contexts[IWL_RXON_CTX_BSS].interface_modes =
- BIT(NL80211_IFTYPE_STATION);
- priv->contexts[IWL_RXON_CTX_BSS].ap_devtype = RXON_DEV_TYPE_AP;
- priv->contexts[IWL_RXON_CTX_BSS].ibss_devtype = RXON_DEV_TYPE_IBSS;
- priv->contexts[IWL_RXON_CTX_BSS].station_devtype = RXON_DEV_TYPE_ESS;
- priv->contexts[IWL_RXON_CTX_BSS].unused_devtype = RXON_DEV_TYPE_ESS;
-
- BUILD_BUG_ON(NUM_IWL_RXON_CTX != 1);
-
- SET_IEEE80211_DEV(hw, &pdev->dev);
-
- IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
- priv->cfg = cfg;
- priv->pci_dev = pdev;
- priv->inta_mask = CSR_INI_SET_MASK;
-
- if (iwl_legacy_alloc_traffic_mem(priv))
- IWL_ERR(priv, "Not enough memory to generate traffic log\n");
-
- /**************************
- * 2. Initializing PCI bus
- **************************/
- pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
- PCIE_LINK_STATE_CLKPM);
-
- if (pci_enable_device(pdev)) {
- err = -ENODEV;
- goto out_ieee80211_free_hw;
- }
-
- pci_set_master(pdev);
-
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
- if (!err)
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
- if (err) {
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
- if (!err)
- err = pci_set_consistent_dma_mask(pdev,
- DMA_BIT_MASK(32));
- /* both attempts failed: */
- if (err) {
- IWL_WARN(priv, "No suitable DMA available.\n");
- goto out_pci_disable_device;
- }
- }
-
- err = pci_request_regions(pdev, DRV_NAME);
- if (err)
- goto out_pci_disable_device;
-
- pci_set_drvdata(pdev, priv);
-
-
- /***********************
- * 3. Read REV register
- ***********************/
- priv->hw_base = pci_iomap(pdev, 0, 0);
- if (!priv->hw_base) {
- err = -ENODEV;
- goto out_pci_release_regions;
- }
-
- IWL_DEBUG_INFO(priv, "pci_resource_len = 0x%08llx\n",
- (unsigned long long) pci_resource_len(pdev, 0));
- IWL_DEBUG_INFO(priv, "pci_resource_base = %p\n", priv->hw_base);
-
- /* these spin locks will be used in apm_ops.init and EEPROM access
- * we should init now
- */
- spin_lock_init(&priv->reg_lock);
- spin_lock_init(&priv->lock);
-
- /*
- * stop and reset the on-board processor just in case it is in a
- * strange state ... like being left stranded by a primary kernel
- * and this is now the kdump kernel trying to start up
- */
- iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
-
- iwl4965_hw_detect(priv);
- IWL_INFO(priv, "Detected %s, REV=0x%X\n",
- priv->cfg->name, priv->hw_rev);
-
- /* We disable the RETRY_TIMEOUT register (0x41) to keep
- * PCI Tx retries from interfering with C3 CPU state */
- pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
-
- iwl4965_prepare_card_hw(priv);
- if (!priv->hw_ready) {
- IWL_WARN(priv, "Failed, HW not ready\n");
- goto out_iounmap;
- }
-
- /*****************
- * 4. Read EEPROM
- *****************/
- /* Read the EEPROM */
- err = iwl_legacy_eeprom_init(priv);
- if (err) {
- IWL_ERR(priv, "Unable to init EEPROM\n");
- goto out_iounmap;
- }
- err = iwl4965_eeprom_check_version(priv);
- if (err)
- goto out_free_eeprom;
-
- if (err)
- goto out_free_eeprom;
-
- /* extract MAC Address */
- iwl4965_eeprom_get_mac(priv, priv->addresses[0].addr);
- IWL_DEBUG_INFO(priv, "MAC address: %pM\n", priv->addresses[0].addr);
- priv->hw->wiphy->addresses = priv->addresses;
- priv->hw->wiphy->n_addresses = 1;
-
- /************************
- * 5. Setup HW constants
- ************************/
- if (iwl4965_set_hw_params(priv)) {
- IWL_ERR(priv, "failed to set hw parameters\n");
- goto out_free_eeprom;
- }
-
- /*******************
- * 6. Setup priv
- *******************/
-
- err = iwl4965_init_drv(priv);
- if (err)
- goto out_free_eeprom;
- /* At this point both hw and priv are initialized. */
-
- /********************
- * 7. Setup services
- ********************/
- spin_lock_irqsave(&priv->lock, flags);
- iwl_legacy_disable_interrupts(priv);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- pci_enable_msi(priv->pci_dev);
-
- err = request_irq(priv->pci_dev->irq, iwl_legacy_isr,
- IRQF_SHARED, DRV_NAME, priv);
- if (err) {
- IWL_ERR(priv, "Error allocating IRQ %d\n", priv->pci_dev->irq);
- goto out_disable_msi;
- }
-
- iwl4965_setup_deferred_work(priv);
- iwl4965_setup_rx_handlers(priv);
-
- /*********************************************
- * 8. Enable interrupts and read RFKILL state
- *********************************************/
-
- /* enable rfkill interrupt: hw bug w/a */
- pci_read_config_word(priv->pci_dev, PCI_COMMAND, &pci_cmd);
- if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
- pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
- pci_write_config_word(priv->pci_dev, PCI_COMMAND, pci_cmd);
- }
-
- iwl_legacy_enable_rfkill_int(priv);
-
- /* If platform's RF_KILL switch is NOT set to KILL */
- if (iwl_read32(priv, CSR_GP_CNTRL) &
- CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
- clear_bit(STATUS_RF_KILL_HW, &priv->status);
- else
- set_bit(STATUS_RF_KILL_HW, &priv->status);
-
- wiphy_rfkill_set_hw_state(priv->hw->wiphy,
- test_bit(STATUS_RF_KILL_HW, &priv->status));
-
- iwl_legacy_power_initialize(priv);
-
- init_completion(&priv->_4965.firmware_loading_complete);
-
- err = iwl4965_request_firmware(priv, true);
- if (err)
- goto out_destroy_workqueue;
-
- return 0;
-
- out_destroy_workqueue:
- destroy_workqueue(priv->workqueue);
- priv->workqueue = NULL;
- free_irq(priv->pci_dev->irq, priv);
- out_disable_msi:
- pci_disable_msi(priv->pci_dev);
- iwl4965_uninit_drv(priv);
- out_free_eeprom:
- iwl_legacy_eeprom_free(priv);
- out_iounmap:
- pci_iounmap(pdev, priv->hw_base);
- out_pci_release_regions:
- pci_set_drvdata(pdev, NULL);
- pci_release_regions(pdev);
- out_pci_disable_device:
- pci_disable_device(pdev);
- out_ieee80211_free_hw:
- iwl_legacy_free_traffic_mem(priv);
- ieee80211_free_hw(priv->hw);
- out:
- return err;
-}
-
-static void __devexit iwl4965_pci_remove(struct pci_dev *pdev)
-{
- struct iwl_priv *priv = pci_get_drvdata(pdev);
- unsigned long flags;
-
- if (!priv)
- return;
-
- wait_for_completion(&priv->_4965.firmware_loading_complete);
-
- IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n");
-
- iwl_legacy_dbgfs_unregister(priv);
- sysfs_remove_group(&pdev->dev.kobj, &iwl_attribute_group);
-
- /* ieee80211_unregister_hw call wil cause iwl_mac_stop to
- * to be called and iwl4965_down since we are removing the device
- * we need to set STATUS_EXIT_PENDING bit.
- */
- set_bit(STATUS_EXIT_PENDING, &priv->status);
-
- iwl_legacy_leds_exit(priv);
-
- if (priv->mac80211_registered) {
- ieee80211_unregister_hw(priv->hw);
- priv->mac80211_registered = 0;
- } else {
- iwl4965_down(priv);
- }
-
- /*
- * Make sure device is reset to low power before unloading driver.
- * This may be redundant with iwl4965_down(), but there are paths to
- * run iwl4965_down() without calling apm_ops.stop(), and there are
- * paths to avoid running iwl4965_down() at all before leaving driver.
- * This (inexpensive) call *makes sure* device is reset.
- */
- iwl_legacy_apm_stop(priv);
-
- /* make sure we flush any pending irq or
- * tasklet for the driver
- */
- spin_lock_irqsave(&priv->lock, flags);
- iwl_legacy_disable_interrupts(priv);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- iwl4965_synchronize_irq(priv);
-
- iwl4965_dealloc_ucode_pci(priv);
-
- if (priv->rxq.bd)
- iwl4965_rx_queue_free(priv, &priv->rxq);
- iwl4965_hw_txq_ctx_free(priv);
-
- iwl_legacy_eeprom_free(priv);
-
-
- /*netif_stop_queue(dev); */
- flush_workqueue(priv->workqueue);
-
- /* ieee80211_unregister_hw calls iwl_mac_stop, which flushes
- * priv->workqueue... so we can't take down the workqueue
- * until now... */
- destroy_workqueue(priv->workqueue);
- priv->workqueue = NULL;
- iwl_legacy_free_traffic_mem(priv);
-
- free_irq(priv->pci_dev->irq, priv);
- pci_disable_msi(priv->pci_dev);
- pci_iounmap(pdev, priv->hw_base);
- pci_release_regions(pdev);
- pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
-
- iwl4965_uninit_drv(priv);
-
- dev_kfree_skb(priv->beacon_skb);
-
- ieee80211_free_hw(priv->hw);
-}
-
-/*
- * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
- * must be called under priv->lock and mac access
- */
-void iwl4965_txq_set_sched(struct iwl_priv *priv, u32 mask)
-{
- iwl_legacy_write_prph(priv, IWL49_SCD_TXFACT, mask);
-}
-
-/*****************************************************************************
- *
- * driver and module entry point
- *
- *****************************************************************************/
-
-/* Hardware specific file defines the PCI IDs table for that hardware module */
-static DEFINE_PCI_DEVICE_TABLE(iwl4965_hw_card_ids) = {
-#if defined(CONFIG_IWL4965_MODULE) || defined(CONFIG_IWL4965)
- {IWL_PCI_DEVICE(0x4229, PCI_ANY_ID, iwl4965_cfg)},
- {IWL_PCI_DEVICE(0x4230, PCI_ANY_ID, iwl4965_cfg)},
-#endif /* CONFIG_IWL4965 */
-
- {0}
-};
-MODULE_DEVICE_TABLE(pci, iwl4965_hw_card_ids);
-
-static struct pci_driver iwl4965_driver = {
- .name = DRV_NAME,
- .id_table = iwl4965_hw_card_ids,
- .probe = iwl4965_pci_probe,
- .remove = __devexit_p(iwl4965_pci_remove),
- .driver.pm = IWL_LEGACY_PM_OPS,
-};
-
-static int __init iwl4965_init(void)
-{
-
- int ret;
- pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
- pr_info(DRV_COPYRIGHT "\n");
-
- ret = iwl4965_rate_control_register();
- if (ret) {
- pr_err("Unable to register rate control algorithm: %d\n", ret);
- return ret;
- }
-
- ret = pci_register_driver(&iwl4965_driver);
- if (ret) {
- pr_err("Unable to initialize PCI module\n");
- goto error_register;
- }
-
- return ret;
-
-error_register:
- iwl4965_rate_control_unregister();
- return ret;
-}
-
-static void __exit iwl4965_exit(void)
-{
- pci_unregister_driver(&iwl4965_driver);
- iwl4965_rate_control_unregister();
-}
-
-module_exit(iwl4965_exit);
-module_init(iwl4965_init);
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-module_param_named(debug, iwlegacy_debug_level, uint, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(debug, "debug output mask");
-#endif
-
-module_param_named(swcrypto, iwl4965_mod_params.sw_crypto, int, S_IRUGO);
-MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
-module_param_named(queues_num, iwl4965_mod_params.num_of_queues, int, S_IRUGO);
-MODULE_PARM_DESC(queues_num, "number of hw queues.");
-module_param_named(11n_disable, iwl4965_mod_params.disable_11n, int, S_IRUGO);
-MODULE_PARM_DESC(11n_disable, "disable 11n functionality");
-module_param_named(amsdu_size_8K, iwl4965_mod_params.amsdu_size_8K,
- int, S_IRUGO);
-MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
-module_param_named(fw_restart, iwl4965_mod_params.restart_fw, int, S_IRUGO);
-MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
diff --git a/drivers/net/wireless/iwlegacy/iwl-prph.h b/drivers/net/wireless/iwlegacy/prph.h
index 30a493003ab..ffec4b4a248 100644
--- a/drivers/net/wireless/iwlegacy/iwl-prph.h
+++ b/drivers/net/wireless/iwlegacy/prph.h
@@ -60,8 +60,8 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*****************************************************************************/
-#ifndef __iwl_legacy_prph_h__
-#define __iwl_legacy_prph_h__
+#ifndef __il_prph_h__
+#define __il_prph_h__
/*
* Registers in this file are internal, not PCI bus memory mapped.
@@ -91,9 +91,9 @@
#define APMG_PS_CTRL_VAL_RESET_REQ (0x04000000)
#define APMG_PS_CTRL_MSK_PWR_SRC (0x03000000)
#define APMG_PS_CTRL_VAL_PWR_SRC_VMAIN (0x00000000)
-#define APMG_PS_CTRL_VAL_PWR_SRC_MAX (0x01000000) /* 3945 only */
+#define APMG_PS_CTRL_VAL_PWR_SRC_MAX (0x01000000) /* 3945 only */
#define APMG_PS_CTRL_VAL_PWR_SRC_VAUX (0x02000000)
-#define APMG_SVR_VOLTAGE_CONFIG_BIT_MSK (0x000001E0) /* bit 8:5 */
+#define APMG_SVR_VOLTAGE_CONFIG_BIT_MSK (0x000001E0) /* bit 8:5 */
#define APMG_SVR_DIGITAL_VOLTAGE_1_32 (0x00000060)
#define APMG_PCIDEV_STT_VAL_L1_ACT_DIS (0x00000800)
@@ -120,13 +120,13 @@
*
* 1) Initialization -- performs hardware calibration and sets up some
* internal data, then notifies host via "initialize alive" notification
- * (struct iwl_init_alive_resp) that it has completed all of its work.
+ * (struct il_init_alive_resp) that it has completed all of its work.
* After signal from host, it then loads and starts the runtime program.
* The initialization program must be used when initially setting up the
* NIC after loading the driver.
*
* 2) Runtime/Protocol -- performs all normal runtime operations. This
- * notifies host via "alive" notification (struct iwl_alive_resp) that it
+ * notifies host via "alive" notification (struct il_alive_resp) that it
* is ready to be used.
*
* When initializing the NIC, the host driver does the following procedure:
@@ -189,7 +189,7 @@
* procedure.
*
* This save/restore method is mostly for autonomous power management during
- * normal operation (result of POWER_TABLE_CMD). Platform suspend/resume and
+ * normal operation (result of C_POWER_TBL). Platform suspend/resume and
* RFKILL should use complete restarts (with total re-initialization) of uCode,
* allowing total shutdown (including BSM memory).
*
@@ -202,19 +202,19 @@
*/
/* BSM bit fields */
-#define BSM_WR_CTRL_REG_BIT_START (0x80000000) /* start boot load now */
-#define BSM_WR_CTRL_REG_BIT_START_EN (0x40000000) /* enable boot after pwrup*/
-#define BSM_DRAM_INST_LOAD (0x80000000) /* start program load now */
+#define BSM_WR_CTRL_REG_BIT_START (0x80000000) /* start boot load now */
+#define BSM_WR_CTRL_REG_BIT_START_EN (0x40000000) /* enable boot after pwrup */
+#define BSM_DRAM_INST_LOAD (0x80000000) /* start program load now */
/* BSM addresses */
#define BSM_BASE (PRPH_BASE + 0x3400)
#define BSM_END (PRPH_BASE + 0x3800)
-#define BSM_WR_CTRL_REG (BSM_BASE + 0x000) /* ctl and status */
-#define BSM_WR_MEM_SRC_REG (BSM_BASE + 0x004) /* source in BSM mem */
-#define BSM_WR_MEM_DST_REG (BSM_BASE + 0x008) /* dest in SRAM mem */
-#define BSM_WR_DWCOUNT_REG (BSM_BASE + 0x00C) /* bytes */
-#define BSM_WR_STATUS_REG (BSM_BASE + 0x010) /* bit 0: 1 == done */
+#define BSM_WR_CTRL_REG (BSM_BASE + 0x000) /* ctl and status */
+#define BSM_WR_MEM_SRC_REG (BSM_BASE + 0x004) /* source in BSM mem */
+#define BSM_WR_MEM_DST_REG (BSM_BASE + 0x008) /* dest in SRAM mem */
+#define BSM_WR_DWCOUNT_REG (BSM_BASE + 0x00C) /* bytes */
+#define BSM_WR_STATUS_REG (BSM_BASE + 0x010) /* bit 0: 1 == done */
/*
* Pointers and size regs for bootstrap load and data SRAM save/restore.
@@ -231,8 +231,7 @@
* Read/write, address range from LOWER_BOUND to (LOWER_BOUND + SIZE -1)
*/
#define BSM_SRAM_LOWER_BOUND (PRPH_BASE + 0x3800)
-#define BSM_SRAM_SIZE (1024) /* bytes */
-
+#define BSM_SRAM_SIZE (1024) /* bytes */
/* 3945 Tx scheduler registers */
#define ALM_SCD_BASE (PRPH_BASE + 0x2E00)
@@ -255,7 +254,7 @@
* but one DMA channel may take input from several queues.
*
* Tx DMA FIFOs have dedicated purposes. For 4965, they are used as follows
- * (cf. default_queue_to_tx_fifo in iwl-4965.c):
+ * (cf. default_queue_to_tx_fifo in 4965.c):
*
* 0 -- EDCA BK (background) frames, lowest priority
* 1 -- EDCA BE (best effort) frames, normal priority
@@ -274,20 +273,20 @@
* The driver sets up each queue to work in one of two modes:
*
* 1) Scheduler-Ack, in which the scheduler automatically supports a
- * block-ack (BA) window of up to 64 TFDs. In this mode, each queue
+ * block-ack (BA) win of up to 64 TFDs. In this mode, each queue
* contains TFDs for a unique combination of Recipient Address (RA)
* and Traffic Identifier (TID), that is, traffic of a given
* Quality-Of-Service (QOS) priority, destined for a single station.
*
* In scheduler-ack mode, the scheduler keeps track of the Tx status of
- * each frame within the BA window, including whether it's been transmitted,
+ * each frame within the BA win, including whether it's been transmitted,
* and whether it's been acknowledged by the receiving station. The device
* automatically processes block-acks received from the receiving STA,
* and reschedules un-acked frames to be retransmitted (successful
* Tx completion may end up being out-of-order).
*
* The driver must maintain the queue's Byte Count table in host DRAM
- * (struct iwl4965_sched_queue_byte_cnt_tbl) for this mode.
+ * (struct il4965_sched_queue_byte_cnt_tbl) for this mode.
* This mode does not support fragmentation.
*
* 2) FIFO (a.k.a. non-Scheduler-ACK), in which each TFD is processed in order.
@@ -316,34 +315,34 @@
*/
/**
- * Max Tx window size is the max number of contiguous TFDs that the scheduler
+ * Max Tx win size is the max number of contiguous TFDs that the scheduler
* can keep track of at one time when creating block-ack chains of frames.
* Note that "64" matches the number of ack bits in a block-ack packet.
* Driver should use SCD_WIN_SIZE and SCD_FRAME_LIMIT values to initialize
- * IWL49_SCD_CONTEXT_QUEUE_OFFSET(x) values.
+ * IL49_SCD_CONTEXT_QUEUE_OFFSET(x) values.
*/
#define SCD_WIN_SIZE 64
#define SCD_FRAME_LIMIT 64
/* SCD registers are internal, must be accessed via HBUS_TARG_PRPH regs */
-#define IWL49_SCD_START_OFFSET 0xa02c00
+#define IL49_SCD_START_OFFSET 0xa02c00
/*
* 4965 tells driver SRAM address for internal scheduler structs via this reg.
* Value is valid only after "Alive" response from uCode.
*/
-#define IWL49_SCD_SRAM_BASE_ADDR (IWL49_SCD_START_OFFSET + 0x0)
+#define IL49_SCD_SRAM_BASE_ADDR (IL49_SCD_START_OFFSET + 0x0)
/*
* Driver may need to update queue-empty bits after changing queue's
- * write and read pointers (indexes) during (re-)initialization (i.e. when
+ * write and read pointers (idxes) during (re-)initialization (i.e. when
* scheduler is not tracking what's happening).
* Bit fields:
* 31-16: Write mask -- 1: update empty bit, 0: don't change empty bit
* 15-00: Empty state, one for each queue -- 1: empty, 0: non-empty
* NOTE: This register is not used by Linux driver.
*/
-#define IWL49_SCD_EMPTY_BITS (IWL49_SCD_START_OFFSET + 0x4)
+#define IL49_SCD_EMPTY_BITS (IL49_SCD_START_OFFSET + 0x4)
/*
* Physical base address of array of byte count (BC) circular buffers (CBs).
@@ -351,11 +350,11 @@
* This register points to BC CB for queue 0, must be on 1024-byte boundary.
* Others are spaced by 1024 bytes.
* Each BC CB is 2 bytes * (256 + 64) = 740 bytes, followed by 384 bytes pad.
- * (Index into a queue's BC CB) = (index into queue's TFD CB) = (SSN & 0xff).
+ * (Index into a queue's BC CB) = (idx into queue's TFD CB) = (SSN & 0xff).
* Bit fields:
* 25-00: Byte Count CB physical address [35:10], must be 1024-byte aligned.
*/
-#define IWL49_SCD_DRAM_BASE_ADDR (IWL49_SCD_START_OFFSET + 0x10)
+#define IL49_SCD_DRAM_BASE_ADDR (IL49_SCD_START_OFFSET + 0x10)
/*
* Enables any/all Tx DMA/FIFO channels.
@@ -364,23 +363,23 @@
* Bit fields:
* 7- 0: Enable (1), disable (0), one bit for each channel 0-7
*/
-#define IWL49_SCD_TXFACT (IWL49_SCD_START_OFFSET + 0x1c)
+#define IL49_SCD_TXFACT (IL49_SCD_START_OFFSET + 0x1c)
/*
- * Queue (x) Write Pointers (indexes, really!), one for each Tx queue.
+ * Queue (x) Write Pointers (idxes, really!), one for each Tx queue.
* Initialized and updated by driver as new TFDs are added to queue.
- * NOTE: If using Block Ack, index must correspond to frame's
- * Start Sequence Number; index = (SSN & 0xff)
+ * NOTE: If using Block Ack, idx must correspond to frame's
+ * Start Sequence Number; idx = (SSN & 0xff)
* NOTE: Alternative to HBUS_TARG_WRPTR, which is what Linux driver uses?
*/
-#define IWL49_SCD_QUEUE_WRPTR(x) (IWL49_SCD_START_OFFSET + 0x24 + (x) * 4)
+#define IL49_SCD_QUEUE_WRPTR(x) (IL49_SCD_START_OFFSET + 0x24 + (x) * 4)
/*
- * Queue (x) Read Pointers (indexes, really!), one for each Tx queue.
- * For FIFO mode, index indicates next frame to transmit.
- * For Scheduler-ACK mode, index indicates first frame in Tx window.
+ * Queue (x) Read Pointers (idxes, really!), one for each Tx queue.
+ * For FIFO mode, idx indicates next frame to transmit.
+ * For Scheduler-ACK mode, idx indicates first frame in Tx win.
* Initialized by driver, updated by scheduler.
*/
-#define IWL49_SCD_QUEUE_RDPTR(x) (IWL49_SCD_START_OFFSET + 0x64 + (x) * 4)
+#define IL49_SCD_QUEUE_RDPTR(x) (IL49_SCD_START_OFFSET + 0x64 + (x) * 4)
/*
* Select which queues work in chain mode (1) vs. not (0).
@@ -391,18 +390,18 @@
* NOTE: If driver sets up queue for chain mode, it should be also set up
* Scheduler-ACK mode as well, via SCD_QUEUE_STATUS_BITS(x).
*/
-#define IWL49_SCD_QUEUECHAIN_SEL (IWL49_SCD_START_OFFSET + 0xd0)
+#define IL49_SCD_QUEUECHAIN_SEL (IL49_SCD_START_OFFSET + 0xd0)
/*
* Select which queues interrupt driver when scheduler increments
- * a queue's read pointer (index).
+ * a queue's read pointer (idx).
* Bit fields:
* 31-16: Reserved
* 15-00: Interrupt enable, one bit for each queue -- 1: enabled, 0: disabled
* NOTE: This functionality is apparently a no-op; driver relies on interrupts
* from Rx queue to read Tx command responses and update Tx queues.
*/
-#define IWL49_SCD_INTERRUPT_MASK (IWL49_SCD_START_OFFSET + 0xe4)
+#define IL49_SCD_INTERRUPT_MASK (IL49_SCD_START_OFFSET + 0xe4)
/*
* Queue search status registers. One for each queue.
@@ -414,7 +413,7 @@
* Driver should init to "1" for aggregation mode, or "0" otherwise.
* 7-6: Driver should init to "0"
* 5: Window Size Left; indicates whether scheduler can request
- * another TFD, based on window size, etc. Driver should init
+ * another TFD, based on win size, etc. Driver should init
* this bit to "1" for aggregation mode, or "0" for non-agg.
* 4-1: Tx FIFO to use (range 0-7).
* 0: Queue is active (1), not active (0).
@@ -423,18 +422,18 @@
* NOTE: If enabling Scheduler-ACK mode, chain mode should also be enabled
* via SCD_QUEUECHAIN_SEL.
*/
-#define IWL49_SCD_QUEUE_STATUS_BITS(x)\
- (IWL49_SCD_START_OFFSET + 0x104 + (x) * 4)
+#define IL49_SCD_QUEUE_STATUS_BITS(x)\
+ (IL49_SCD_START_OFFSET + 0x104 + (x) * 4)
/* Bit field positions */
-#define IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE (0)
-#define IWL49_SCD_QUEUE_STTS_REG_POS_TXF (1)
-#define IWL49_SCD_QUEUE_STTS_REG_POS_WSL (5)
-#define IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK (8)
+#define IL49_SCD_QUEUE_STTS_REG_POS_ACTIVE (0)
+#define IL49_SCD_QUEUE_STTS_REG_POS_TXF (1)
+#define IL49_SCD_QUEUE_STTS_REG_POS_WSL (5)
+#define IL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK (8)
/* Write masks */
-#define IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN (10)
-#define IWL49_SCD_QUEUE_STTS_REG_MSK (0x0007FC00)
+#define IL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN (10)
+#define IL49_SCD_QUEUE_STTS_REG_MSK (0x0007FC00)
/**
* 4965 internal SRAM structures for scheduler, shared with driver ...
@@ -460,7 +459,7 @@
* each queue's entry as follows:
*
* LS Dword bit fields:
- * 0-06: Max Tx window size for Scheduler-ACK. Driver should init to 64.
+ * 0-06: Max Tx win size for Scheduler-ACK. Driver should init to 64.
*
* MS Dword bit fields:
* 16-22: Frame limit. Driver should init to 10 (0xa).
@@ -470,14 +469,14 @@
* Init must be done after driver receives "Alive" response from 4965 uCode,
* and when setting up queue for aggregation.
*/
-#define IWL49_SCD_CONTEXT_DATA_OFFSET 0x380
-#define IWL49_SCD_CONTEXT_QUEUE_OFFSET(x) \
- (IWL49_SCD_CONTEXT_DATA_OFFSET + ((x) * 8))
+#define IL49_SCD_CONTEXT_DATA_OFFSET 0x380
+#define IL49_SCD_CONTEXT_QUEUE_OFFSET(x) \
+ (IL49_SCD_CONTEXT_DATA_OFFSET + ((x) * 8))
-#define IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS (0)
-#define IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK (0x0000007F)
-#define IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS (16)
-#define IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK (0x007F0000)
+#define IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS (0)
+#define IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK (0x0000007F)
+#define IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS (16)
+#define IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK (0x007F0000)
/*
* Tx Status Bitmap
@@ -486,7 +485,7 @@
* "Alive" notification from uCode. Area is used only by device itself;
* no other support (besides clearing) is required from driver.
*/
-#define IWL49_SCD_TX_STTS_BITMAP_OFFSET 0x400
+#define IL49_SCD_TX_STTS_BITMAP_OFFSET 0x400
/*
* RAxTID to queue translation mapping.
@@ -494,7 +493,7 @@
* When queue is in Scheduler-ACK mode, frames placed in a that queue must be
* for only one combination of receiver address (RA) and traffic ID (TID), i.e.
* one QOS priority level destined for one station (for this wireless link,
- * not final destination). The SCD_TRANSLATE_TABLE area provides 16 16-bit
+ * not final destination). The SCD_TRANSLATE_TBL area provides 16 16-bit
* mappings, one for each of the 16 queues. If queue is not in Scheduler-ACK
* mode, the device ignores the mapping value.
*
@@ -508,16 +507,16 @@
* must read a dword-aligned value from device SRAM, replace the 16-bit map
* value of interest, and write the dword value back into device SRAM.
*/
-#define IWL49_SCD_TRANSLATE_TBL_OFFSET 0x500
+#define IL49_SCD_TRANSLATE_TBL_OFFSET 0x500
/* Find translation table dword to read/write for given queue */
-#define IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(x) \
- ((IWL49_SCD_TRANSLATE_TBL_OFFSET + ((x) * 2)) & 0xfffffffc)
+#define IL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(x) \
+ ((IL49_SCD_TRANSLATE_TBL_OFFSET + ((x) * 2)) & 0xfffffffc)
-#define IWL_SCD_TXFIFO_POS_TID (0)
-#define IWL_SCD_TXFIFO_POS_RA (4)
-#define IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK (0x01FF)
+#define IL_SCD_TXFIFO_POS_TID (0)
+#define IL_SCD_TXFIFO_POS_RA (4)
+#define IL_SCD_QUEUE_RA_TID_MAP_RATID_MSK (0x01FF)
/*********************** END TX SCHEDULER *************************************/
-#endif /* __iwl_legacy_prph_h__ */
+#endif /* __il_prph_h__ */
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index 57703d5209d..ae08498dfca 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -102,12 +102,28 @@ config IWLWIFI_DEVICE_TRACING
occur.
endmenu
-config IWLWIFI_DEVICE_SVTOOL
- bool "iwlwifi device svtool support"
+config IWLWIFI_DEVICE_TESTMODE
+ def_bool y
depends on IWLWIFI
- select NL80211_TESTMODE
+ depends on NL80211_TESTMODE
help
- This option enables the svtool support for iwlwifi device through
- NL80211_TESTMODE. svtool is a software validation tool that runs in
- the user space and interacts with the device in the kernel space
- through the generic netlink message via NL80211_TESTMODE channel.
+ This option enables the testmode support for iwlwifi device through
+ NL80211_TESTMODE. This provide the capabilities of enable user space
+ validation applications to interacts with the device through the
+ generic netlink message via NL80211_TESTMODE channel.
+
+config IWLWIFI_P2P
+ bool "iwlwifi experimental P2P support"
+ depends on IWLWIFI
+ help
+ This option enables experimental P2P support for some devices
+ based on microcode support. Since P2P support is still under
+ development, this option may even enable it for some devices
+ now that turn out to not support it in the future due to
+ microcode restrictions.
+
+ To determine if your microcode supports the experimental P2P
+ offered by this option, check if the driver advertises AP
+ support when it is loaded.
+
+ Say Y only if you want to experiment with P2P.
diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile
index a7ab280994c..9dc84a7354d 100644
--- a/drivers/net/wireless/iwlwifi/Makefile
+++ b/drivers/net/wireless/iwlwifi/Makefile
@@ -1,7 +1,7 @@
# WIFI
obj-$(CONFIG_IWLWIFI) += iwlwifi.o
iwlwifi-objs := iwl-agn.o iwl-agn-rs.o iwl-mac80211.o
-iwlwifi-objs += iwl-agn-ucode.o iwl-agn-tx.o
+iwlwifi-objs += iwl-ucode.o iwl-agn-tx.o
iwlwifi-objs += iwl-agn-lib.o iwl-agn-calib.o iwl-io.o
iwlwifi-objs += iwl-agn-tt.o iwl-agn-sta.o iwl-agn-rx.o
@@ -18,7 +18,7 @@ iwlwifi-objs += iwl-trans-pcie.o iwl-trans-pcie-rx.o iwl-trans-pcie-tx.o
iwlwifi-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o
iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o
-iwlwifi-$(CONFIG_IWLWIFI_DEVICE_SVTOOL) += iwl-sv-open.o
+iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TESTMODE) += iwl-testmode.o
CFLAGS_iwl-devtrace.o := -I$(src)
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
index e12b48c2cff..8d3bad7ea5d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -147,16 +147,7 @@ static int iwl1000_hw_set_hw_params(struct iwl_priv *priv)
iwl1000_set_ct_threshold(priv);
/* Set initial sensitivity parameters */
- /* Set initial calibration set */
hw_params(priv).sens = &iwl1000_sensitivity;
- hw_params(priv).calib_init_cfg =
- BIT(IWL_CALIB_XTAL) |
- BIT(IWL_CALIB_LO) |
- BIT(IWL_CALIB_TX_IQ) |
- BIT(IWL_CALIB_TX_IQ_PERD) |
- BIT(IWL_CALIB_BASE_BAND);
- if (priv->cfg->need_dc_calib)
- hw_params(priv).calib_init_cfg |= BIT(IWL_CALIB_DC);
return 0;
}
@@ -191,6 +182,7 @@ static struct iwl_base_params iwl1000_base_params = {
.chain_noise_scale = 1000,
.wd_timeout = IWL_DEF_WD_TIMEOUT,
.max_event_log_size = 128,
+ .wd_disable = true,
};
static struct iwl_ht_params iwl1000_ht_params = {
.ht_greenfield_support = true,
diff --git a/drivers/net/wireless/iwlwifi/iwl-2000.c b/drivers/net/wireless/iwlwifi/iwl-2000.c
index b3193571ed0..0c4688d95b6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-2000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-2000.c
@@ -143,17 +143,7 @@ static int iwl2000_hw_set_hw_params(struct iwl_priv *priv)
iwl2000_set_ct_threshold(priv);
/* Set initial sensitivity parameters */
- /* Set initial calibration set */
hw_params(priv).sens = &iwl2000_sensitivity;
- hw_params(priv).calib_init_cfg =
- BIT(IWL_CALIB_XTAL) |
- BIT(IWL_CALIB_LO) |
- BIT(IWL_CALIB_TX_IQ) |
- BIT(IWL_CALIB_BASE_BAND);
- if (priv->cfg->need_dc_calib)
- hw_params(priv).calib_rt_cfg |= IWL_CALIB_CFG_DC_IDX;
- if (priv->cfg->need_temp_offset_calib)
- hw_params(priv).calib_init_cfg |= BIT(IWL_CALIB_TEMP_OFFSET);
return 0;
}
@@ -258,7 +248,6 @@ static struct iwl_bt_params iwl2030_bt_params = {
.eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
.lib = &iwl2000_lib, \
.base_params = &iwl2000_base_params, \
- .need_dc_calib = true, \
.need_temp_offset_calib = true, \
.temp_offset_v2 = true, \
.led_mode = IWL_LED_RF_STATE, \
@@ -286,7 +275,6 @@ struct iwl_cfg iwl2000_2bgn_d_cfg = {
.lib = &iwl2030_lib, \
.base_params = &iwl2030_base_params, \
.bt_params = &iwl2030_bt_params, \
- .need_dc_calib = true, \
.need_temp_offset_calib = true, \
.temp_offset_v2 = true, \
.led_mode = IWL_LED_RF_STATE, \
@@ -308,7 +296,6 @@ struct iwl_cfg iwl2030_2bgn_cfg = {
.eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
.lib = &iwl2000_lib, \
.base_params = &iwl2000_base_params, \
- .need_dc_calib = true, \
.need_temp_offset_calib = true, \
.temp_offset_v2 = true, \
.led_mode = IWL_LED_RF_STATE, \
@@ -338,7 +325,6 @@ struct iwl_cfg iwl105_bgn_d_cfg = {
.lib = &iwl2030_lib, \
.base_params = &iwl2030_base_params, \
.bt_params = &iwl2030_bt_params, \
- .need_dc_calib = true, \
.need_temp_offset_calib = true, \
.temp_offset_v2 = true, \
.led_mode = IWL_LED_RF_STATE, \
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index c511c98a89a..6706d7c10bd 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -134,10 +134,10 @@ static struct iwl_sensitivity_ranges iwl5150_sensitivity = {
#define IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF (-5)
-static s32 iwl_temp_calib_to_offset(struct iwl_priv *priv)
+static s32 iwl_temp_calib_to_offset(struct iwl_shared *shrd)
{
u16 temperature, voltage;
- __le16 *temp_calib = (__le16 *)iwl_eeprom_query_addr(priv,
+ __le16 *temp_calib = (__le16 *)iwl_eeprom_query_addr(shrd,
EEPROM_KELVIN_TEMPERATURE);
temperature = le16_to_cpu(temp_calib[0]);
@@ -151,7 +151,7 @@ static void iwl5150_set_ct_threshold(struct iwl_priv *priv)
{
const s32 volt2temp_coef = IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF;
s32 threshold = (s32)CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD_LEGACY) -
- iwl_temp_calib_to_offset(priv);
+ iwl_temp_calib_to_offset(priv->shrd);
hw_params(priv).ct_kill_threshold = threshold * volt2temp_coef;
}
@@ -186,14 +186,7 @@ static int iwl5000_hw_set_hw_params(struct iwl_priv *priv)
iwl5000_set_ct_threshold(priv);
/* Set initial sensitivity parameters */
- /* Set initial calibration set */
hw_params(priv).sens = &iwl5000_sensitivity;
- hw_params(priv).calib_init_cfg =
- BIT(IWL_CALIB_XTAL) |
- BIT(IWL_CALIB_LO) |
- BIT(IWL_CALIB_TX_IQ) |
- BIT(IWL_CALIB_TX_IQ_PERD) |
- BIT(IWL_CALIB_BASE_BAND);
return 0;
}
@@ -222,14 +215,7 @@ static int iwl5150_hw_set_hw_params(struct iwl_priv *priv)
iwl5150_set_ct_threshold(priv);
/* Set initial sensitivity parameters */
- /* Set initial calibration set */
hw_params(priv).sens = &iwl5150_sensitivity;
- hw_params(priv).calib_init_cfg =
- BIT(IWL_CALIB_LO) |
- BIT(IWL_CALIB_TX_IQ) |
- BIT(IWL_CALIB_BASE_BAND);
- if (priv->cfg->need_dc_calib)
- hw_params(priv).calib_init_cfg |= BIT(IWL_CALIB_DC);
return 0;
}
@@ -237,7 +223,7 @@ static int iwl5150_hw_set_hw_params(struct iwl_priv *priv)
static void iwl5150_temperature(struct iwl_priv *priv)
{
u32 vt = 0;
- s32 offset = iwl_temp_calib_to_offset(priv);
+ s32 offset = iwl_temp_calib_to_offset(priv->shrd);
vt = le32_to_cpu(priv->statistics.common.temperature);
vt = vt / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF + offset;
@@ -364,6 +350,7 @@ static struct iwl_base_params iwl5000_base_params = {
.wd_timeout = IWL_LONG_WD_TIMEOUT,
.max_event_log_size = 512,
.no_idle_support = true,
+ .wd_disable = true,
};
static struct iwl_ht_params iwl5000_ht_params = {
.ht_greenfield_support = true,
@@ -433,7 +420,7 @@ struct iwl_cfg iwl5350_agn_cfg = {
.eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION, \
.lib = &iwl5150_lib, \
.base_params = &iwl5000_base_params, \
- .need_dc_calib = true, \
+ .no_xtal_calib = true, \
.led_mode = IWL_LED_BLINK, \
.internal_wimax_coex = true
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index ee3363fdf30..3e277b6774f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -46,11 +46,12 @@
#include "iwl-cfg.h"
/* Highest firmware API version supported */
-#define IWL6000_UCODE_API_MAX 4
+#define IWL6000_UCODE_API_MAX 6
#define IWL6050_UCODE_API_MAX 5
#define IWL6000G2_UCODE_API_MAX 6
/* Oldest version we won't warn about */
+#define IWL6000_UCODE_API_OK 4
#define IWL6000G2_UCODE_API_OK 5
/* Lowest firmware API version supported */
@@ -80,7 +81,7 @@ static void iwl6000_set_ct_threshold(struct iwl_priv *priv)
static void iwl6050_additional_nic_config(struct iwl_priv *priv)
{
/* Indicate calibration version to uCode. */
- if (iwlagn_eeprom_calib_version(priv) >= 6)
+ if (iwl_eeprom_calib_version(priv->shrd) >= 6)
iwl_set_bit(bus(priv), CSR_GP_DRIVER_REG,
CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6);
}
@@ -88,7 +89,7 @@ static void iwl6050_additional_nic_config(struct iwl_priv *priv)
static void iwl6150_additional_nic_config(struct iwl_priv *priv)
{
/* Indicate calibration version to uCode. */
- if (iwlagn_eeprom_calib_version(priv) >= 6)
+ if (iwl_eeprom_calib_version(priv->shrd) >= 6)
iwl_set_bit(bus(priv), CSR_GP_DRIVER_REG,
CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6);
iwl_set_bit(bus(priv), CSR_GP_DRIVER_REG,
@@ -164,17 +165,7 @@ static int iwl6000_hw_set_hw_params(struct iwl_priv *priv)
iwl6000_set_ct_threshold(priv);
/* Set initial sensitivity parameters */
- /* Set initial calibration set */
hw_params(priv).sens = &iwl6000_sensitivity;
- hw_params(priv).calib_init_cfg =
- BIT(IWL_CALIB_XTAL) |
- BIT(IWL_CALIB_LO) |
- BIT(IWL_CALIB_TX_IQ) |
- BIT(IWL_CALIB_BASE_BAND);
- if (priv->cfg->need_dc_calib)
- hw_params(priv).calib_rt_cfg |= IWL_CALIB_CFG_DC_IDX;
- if (priv->cfg->need_temp_offset_calib)
- hw_params(priv).calib_init_cfg |= BIT(IWL_CALIB_TEMP_OFFSET);
return 0;
}
@@ -364,7 +355,6 @@ static struct iwl_bt_params iwl6000_bt_params = {
.eeprom_calib_ver = EEPROM_6005_TX_POWER_VERSION, \
.lib = &iwl6000_lib, \
.base_params = &iwl6000_g2_base_params, \
- .need_dc_calib = true, \
.need_temp_offset_calib = true, \
.led_mode = IWL_LED_RF_STATE
@@ -406,7 +396,6 @@ struct iwl_cfg iwl6005_2agn_d_cfg = {
.lib = &iwl6030_lib, \
.base_params = &iwl6000_g2_base_params, \
.bt_params = &iwl6000_bt_params, \
- .need_dc_calib = true, \
.need_temp_offset_calib = true, \
.led_mode = IWL_LED_RF_STATE, \
.adv_pm = true \
@@ -469,6 +458,7 @@ struct iwl_cfg iwl130_bg_cfg = {
#define IWL_DEVICE_6000i \
.fw_name_pre = IWL6000_FW_PRE, \
.ucode_api_max = IWL6000_UCODE_API_MAX, \
+ .ucode_api_ok = IWL6000_UCODE_API_OK, \
.ucode_api_min = IWL6000_UCODE_API_MIN, \
.valid_tx_ant = ANT_BC, /* .cfg overwrite */ \
.valid_rx_ant = ANT_BC, /* .cfg overwrite */ \
@@ -506,7 +496,6 @@ struct iwl_cfg iwl6000i_2bg_cfg = {
.eeprom_ver = EEPROM_6050_EEPROM_VERSION, \
.eeprom_calib_ver = EEPROM_6050_TX_POWER_VERSION, \
.base_params = &iwl6050_base_params, \
- .need_dc_calib = true, \
.led_mode = IWL_LED_BLINK, \
.internal_wimax_coex = true
@@ -530,7 +519,6 @@ struct iwl_cfg iwl6050_2abg_cfg = {
.eeprom_ver = EEPROM_6150_EEPROM_VERSION, \
.eeprom_calib_ver = EEPROM_6150_TX_POWER_VERSION, \
.base_params = &iwl6050_base_params, \
- .need_dc_calib = true, \
.led_mode = IWL_LED_BLINK, \
.internal_wimax_coex = true
@@ -549,17 +537,17 @@ struct iwl_cfg iwl6000_3agn_cfg = {
.name = "Intel(R) Centrino(R) Ultimate-N 6300 AGN",
.fw_name_pre = IWL6000_FW_PRE,
.ucode_api_max = IWL6000_UCODE_API_MAX,
+ .ucode_api_ok = IWL6000_UCODE_API_OK,
.ucode_api_min = IWL6000_UCODE_API_MIN,
.eeprom_ver = EEPROM_6000_EEPROM_VERSION,
.eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION,
.lib = &iwl6000_lib,
.base_params = &iwl6000_base_params,
.ht_params = &iwl6000_ht_params,
- .need_dc_calib = true,
.led_mode = IWL_LED_BLINK,
};
-MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_OK));
MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_MAX));
MODULE_FIRMWARE(IWL6005_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
MODULE_FIRMWARE(IWL6030_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-calib.c b/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
index 03bac48558b..16971a02029 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
@@ -82,56 +82,64 @@ struct statistics_general_data {
u32 beacon_energy_c;
};
-int iwl_send_calib_results(struct iwl_priv *priv)
+int iwl_send_calib_results(struct iwl_trans *trans)
{
- int ret = 0;
- int i = 0;
-
struct iwl_host_cmd hcmd = {
.id = REPLY_PHY_CALIBRATION_CMD,
.flags = CMD_SYNC,
};
-
- for (i = 0; i < IWL_CALIB_MAX; i++) {
- if ((BIT(i) & hw_params(priv).calib_init_cfg) &&
- priv->calib_results[i].buf) {
- hcmd.len[0] = priv->calib_results[i].buf_len;
- hcmd.data[0] = priv->calib_results[i].buf;
- hcmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
- ret = iwl_trans_send_cmd(trans(priv), &hcmd);
- if (ret) {
- IWL_ERR(priv, "Error %d iteration %d\n",
- ret, i);
- break;
- }
+ struct iwl_calib_result *res;
+
+ list_for_each_entry(res, &trans->calib_results, list) {
+ int ret;
+
+ hcmd.len[0] = res->cmd_len;
+ hcmd.data[0] = &res->hdr;
+ hcmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
+ ret = iwl_trans_send_cmd(trans, &hcmd);
+ if (ret) {
+ IWL_ERR(trans, "Error %d on calib cmd %d\n",
+ ret, res->hdr.op_code);
+ return ret;
}
}
- return ret;
+ return 0;
}
-int iwl_calib_set(struct iwl_calib_result *res, const u8 *buf, int len)
+int iwl_calib_set(struct iwl_trans *trans,
+ const struct iwl_calib_hdr *cmd, int len)
{
- if (res->buf_len != len) {
- kfree(res->buf);
- res->buf = kzalloc(len, GFP_ATOMIC);
- }
- if (unlikely(res->buf == NULL))
+ struct iwl_calib_result *res, *tmp;
+
+ res = kmalloc(sizeof(*res) + len - sizeof(struct iwl_calib_hdr),
+ GFP_ATOMIC);
+ if (!res)
return -ENOMEM;
+ memcpy(&res->hdr, cmd, len);
+ res->cmd_len = len;
+
+ list_for_each_entry(tmp, &trans->calib_results, list) {
+ if (tmp->hdr.op_code == res->hdr.op_code) {
+ list_replace(&tmp->list, &res->list);
+ kfree(tmp);
+ return 0;
+ }
+ }
+
+ /* wasn't in list already */
+ list_add_tail(&res->list, &trans->calib_results);
- res->buf_len = len;
- memcpy(res->buf, buf, len);
return 0;
}
-void iwl_calib_free_results(struct iwl_priv *priv)
+void iwl_calib_free_results(struct iwl_trans *trans)
{
- int i;
+ struct iwl_calib_result *res, *tmp;
- for (i = 0; i < IWL_CALIB_MAX; i++) {
- kfree(priv->calib_results[i].buf);
- priv->calib_results[i].buf = NULL;
- priv->calib_results[i].buf_len = 0;
+ list_for_each_entry_safe(res, tmp, &trans->calib_results, list) {
+ list_del(&res->list);
+ kfree(res);
}
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-calib.h b/drivers/net/wireless/iwlwifi/iwl-agn-calib.h
index a869fc9205d..10275ce92bd 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-calib.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-calib.h
@@ -72,8 +72,4 @@ void iwl_sensitivity_calibration(struct iwl_priv *priv);
void iwl_init_sensitivity(struct iwl_priv *priv);
void iwl_reset_run_time_calib(struct iwl_priv *priv);
-int iwl_send_calib_results(struct iwl_priv *priv);
-int iwl_calib_set(struct iwl_calib_result *res, const u8 *buf, int len);
-void iwl_calib_free_results(struct iwl_priv *priv);
-
#endif /* __iwl_calib_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
index 0bc96221735..057f9523356 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
@@ -92,11 +92,11 @@ void iwlagn_temperature(struct iwl_priv *priv)
iwl_tt_handler(priv);
}
-u16 iwlagn_eeprom_calib_version(struct iwl_priv *priv)
+u16 iwl_eeprom_calib_version(struct iwl_shared *shrd)
{
struct iwl_eeprom_calib_hdr *hdr;
- hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(priv,
+ hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(shrd,
EEPROM_CALIB_ALL);
return hdr->version;
@@ -105,7 +105,7 @@ u16 iwlagn_eeprom_calib_version(struct iwl_priv *priv)
/*
* EEPROM
*/
-static u32 eeprom_indirect_address(const struct iwl_priv *priv, u32 address)
+static u32 eeprom_indirect_address(const struct iwl_shared *shrd, u32 address)
{
u16 offset = 0;
@@ -114,31 +114,31 @@ static u32 eeprom_indirect_address(const struct iwl_priv *priv, u32 address)
switch (address & INDIRECT_TYPE_MSK) {
case INDIRECT_HOST:
- offset = iwl_eeprom_query16(priv, EEPROM_LINK_HOST);
+ offset = iwl_eeprom_query16(shrd, EEPROM_LINK_HOST);
break;
case INDIRECT_GENERAL:
- offset = iwl_eeprom_query16(priv, EEPROM_LINK_GENERAL);
+ offset = iwl_eeprom_query16(shrd, EEPROM_LINK_GENERAL);
break;
case INDIRECT_REGULATORY:
- offset = iwl_eeprom_query16(priv, EEPROM_LINK_REGULATORY);
+ offset = iwl_eeprom_query16(shrd, EEPROM_LINK_REGULATORY);
break;
case INDIRECT_TXP_LIMIT:
- offset = iwl_eeprom_query16(priv, EEPROM_LINK_TXP_LIMIT);
+ offset = iwl_eeprom_query16(shrd, EEPROM_LINK_TXP_LIMIT);
break;
case INDIRECT_TXP_LIMIT_SIZE:
- offset = iwl_eeprom_query16(priv, EEPROM_LINK_TXP_LIMIT_SIZE);
+ offset = iwl_eeprom_query16(shrd, EEPROM_LINK_TXP_LIMIT_SIZE);
break;
case INDIRECT_CALIBRATION:
- offset = iwl_eeprom_query16(priv, EEPROM_LINK_CALIBRATION);
+ offset = iwl_eeprom_query16(shrd, EEPROM_LINK_CALIBRATION);
break;
case INDIRECT_PROCESS_ADJST:
- offset = iwl_eeprom_query16(priv, EEPROM_LINK_PROCESS_ADJST);
+ offset = iwl_eeprom_query16(shrd, EEPROM_LINK_PROCESS_ADJST);
break;
case INDIRECT_OTHERS:
- offset = iwl_eeprom_query16(priv, EEPROM_LINK_OTHERS);
+ offset = iwl_eeprom_query16(shrd, EEPROM_LINK_OTHERS);
break;
default:
- IWL_ERR(priv, "illegal indirect type: 0x%X\n",
+ IWL_ERR(shrd->trans, "illegal indirect type: 0x%X\n",
address & INDIRECT_TYPE_MSK);
break;
}
@@ -147,11 +147,11 @@ static u32 eeprom_indirect_address(const struct iwl_priv *priv, u32 address)
return (address & ADDRESS_MSK) + (offset << 1);
}
-const u8 *iwl_eeprom_query_addr(const struct iwl_priv *priv, size_t offset)
+const u8 *iwl_eeprom_query_addr(const struct iwl_shared *shrd, size_t offset)
{
- u32 address = eeprom_indirect_address(priv, offset);
- BUG_ON(address >= priv->cfg->base_params->eeprom_size);
- return &priv->eeprom[address];
+ u32 address = eeprom_indirect_address(shrd, offset);
+ BUG_ON(address >= shrd->priv->cfg->base_params->eeprom_size);
+ return &shrd->eeprom[address];
}
struct iwl_mod_params iwlagn_mod_params = {
@@ -934,57 +934,6 @@ u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant, u8 valid)
return ant;
}
-/* notification wait support */
-void iwlagn_init_notification_wait(struct iwl_priv *priv,
- struct iwl_notification_wait *wait_entry,
- u8 cmd,
- void (*fn)(struct iwl_priv *priv,
- struct iwl_rx_packet *pkt,
- void *data),
- void *fn_data)
-{
- wait_entry->fn = fn;
- wait_entry->fn_data = fn_data;
- wait_entry->cmd = cmd;
- wait_entry->triggered = false;
- wait_entry->aborted = false;
-
- spin_lock_bh(&priv->notif_wait_lock);
- list_add(&wait_entry->list, &priv->notif_waits);
- spin_unlock_bh(&priv->notif_wait_lock);
-}
-
-int iwlagn_wait_notification(struct iwl_priv *priv,
- struct iwl_notification_wait *wait_entry,
- unsigned long timeout)
-{
- int ret;
-
- ret = wait_event_timeout(priv->notif_waitq,
- wait_entry->triggered || wait_entry->aborted,
- timeout);
-
- spin_lock_bh(&priv->notif_wait_lock);
- list_del(&wait_entry->list);
- spin_unlock_bh(&priv->notif_wait_lock);
-
- if (wait_entry->aborted)
- return -EIO;
-
- /* return value is always >= 0 */
- if (ret <= 0)
- return -ETIMEDOUT;
- return 0;
-}
-
-void iwlagn_remove_notification(struct iwl_priv *priv,
- struct iwl_notification_wait *wait_entry)
-{
- spin_lock_bh(&priv->notif_wait_lock);
- list_del(&wait_entry->list);
- spin_unlock_bh(&priv->notif_wait_lock);
-}
-
#ifdef CONFIG_PM_SLEEP
static void iwlagn_convert_p1k(u16 *p1k, __le16 *out)
{
@@ -1208,7 +1157,7 @@ int iwlagn_suspend(struct iwl_priv *priv,
* For QoS counters, we store the one to use next, so subtract 0x10
* since the uCode will add 0x10 before using the value.
*/
- for (i = 0; i < 8; i++) {
+ for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
seq = priv->shrd->tid_data[IWL_AP_ID][i].seq_number;
seq -= 0x10;
wakeup_filter_cmd.qos_seq[i] = cpu_to_le16(seq);
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
index 359c47a4fce..a23835a7797 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
@@ -298,7 +298,7 @@ static u8 rs_tl_add_packet(struct iwl_lq_sta *lq_data,
} else
return IWL_MAX_TID_COUNT;
- if (unlikely(tid >= TID_MAX_LOAD_COUNT))
+ if (unlikely(tid >= IWL_MAX_TID_COUNT))
return IWL_MAX_TID_COUNT;
tl = &lq_data->load[tid];
@@ -352,7 +352,7 @@ static void rs_program_fix_rate(struct iwl_priv *priv,
lq_sta->active_mimo2_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
lq_sta->active_mimo3_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
-#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL
+#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
/* testmode has higher priority to overwirte the fixed rate */
if (priv->tm_fixed_rate)
lq_sta->dbg_fixed_rate = priv->tm_fixed_rate;
@@ -379,7 +379,7 @@ static u32 rs_tl_get_load(struct iwl_lq_sta *lq_data, u8 tid)
s32 index;
struct iwl_traffic_load *tl = NULL;
- if (tid >= TID_MAX_LOAD_COUNT)
+ if (tid >= IWL_MAX_TID_COUNT)
return 0;
tl = &(lq_data->load[tid]);
@@ -444,11 +444,11 @@ static void rs_tl_turn_on_agg(struct iwl_priv *priv, u8 tid,
struct iwl_lq_sta *lq_data,
struct ieee80211_sta *sta)
{
- if (tid < TID_MAX_LOAD_COUNT)
+ if (tid < IWL_MAX_TID_COUNT)
rs_tl_turn_on_agg_for_tid(priv, lq_data, tid, sta);
else
- IWL_ERR(priv, "tid exceeds max load count: %d/%d\n",
- tid, TID_MAX_LOAD_COUNT);
+ IWL_ERR(priv, "tid exceeds max TID count: %d/%d\n",
+ tid, IWL_MAX_TID_COUNT);
}
static inline int get_num_of_ant_from_rate(u32 rate_n_flags)
@@ -1081,7 +1081,7 @@ done:
if (sta && sta->supp_rates[sband->band])
rs_rate_scale_perform(priv, skb, sta, lq_sta);
-#if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_IWLWIFI_DEVICE_SVTOOL)
+#if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_IWLWIFI_DEVICE_TESTMODE)
if ((priv->tm_fixed_rate) &&
(priv->tm_fixed_rate != lq_sta->dbg_fixed_rate))
rs_program_fix_rate(priv, lq_sta);
@@ -2904,7 +2904,7 @@ void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_i
if (sband->band == IEEE80211_BAND_5GHZ)
lq_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
lq_sta->is_agg = 0;
-#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL
+#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
priv->tm_fixed_rate = 0;
#endif
#ifdef CONFIG_MAC80211_DEBUGFS
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
index f4f6deb829a..6675b3c816d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
@@ -281,7 +281,6 @@ enum {
#define TID_QUEUE_CELL_SPACING 50 /*mS */
#define TID_QUEUE_MAX_SIZE 20
#define TID_ROUND_VALUE 5 /* mS */
-#define TID_MAX_LOAD_COUNT 8
#define TID_MAX_TIME_DIFF ((TID_QUEUE_MAX_SIZE - 1) * TID_QUEUE_CELL_SPACING)
#define TIME_WRAP_AROUND(x, y) (((y) > (x)) ? (y) - (x) : (0-(x)) + (y))
@@ -402,7 +401,7 @@ struct iwl_lq_sta {
struct iwl_link_quality_cmd lq;
struct iwl_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */
- struct iwl_traffic_load load[TID_MAX_LOAD_COUNT];
+ struct iwl_traffic_load load[IWL_MAX_TID_COUNT];
u8 tx_agg_tid_en;
#ifdef CONFIG_MAC80211_DEBUGFS
struct dentry *rs_sta_dbgfs_scale_table_file;
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rx.c b/drivers/net/wireless/iwlwifi/iwl-agn-rx.c
index fdb4c378611..9001c23f27b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rx.c
@@ -117,6 +117,7 @@ const char *get_cmd_string(u8 cmd)
IWL_CMD(REPLY_WOWLAN_TKIP_PARAMS);
IWL_CMD(REPLY_WOWLAN_KEK_KCK_MATERIAL);
IWL_CMD(REPLY_WOWLAN_GET_STATUS);
+ IWL_CMD(REPLY_D3_CONFIG);
default:
return "UNKNOWN";
@@ -1130,9 +1131,9 @@ void iwl_setup_rx_handlers(struct iwl_priv *priv)
priv->rx_handlers[REPLY_TX] = iwlagn_rx_reply_tx;
/* set up notification wait support */
- spin_lock_init(&priv->notif_wait_lock);
- INIT_LIST_HEAD(&priv->notif_waits);
- init_waitqueue_head(&priv->notif_waitq);
+ spin_lock_init(&priv->shrd->notif_wait_lock);
+ INIT_LIST_HEAD(&priv->shrd->notif_waits);
+ init_waitqueue_head(&priv->shrd->notif_waitq);
/* Set up BT Rx handlers */
if (priv->cfg->lib->bt_rx_handler_setup)
@@ -1151,11 +1152,11 @@ int iwl_rx_dispatch(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb,
* even if the RX handler consumes the RXB we have
* access to it in the notification wait entry.
*/
- if (!list_empty(&priv->notif_waits)) {
+ if (!list_empty(&priv->shrd->notif_waits)) {
struct iwl_notification_wait *w;
- spin_lock(&priv->notif_wait_lock);
- list_for_each_entry(w, &priv->notif_waits, list) {
+ spin_lock(&priv->shrd->notif_wait_lock);
+ list_for_each_entry(w, &priv->shrd->notif_waits, list) {
if (w->cmd != pkt->hdr.cmd)
continue;
IWL_DEBUG_RX(priv,
@@ -1164,11 +1165,11 @@ int iwl_rx_dispatch(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb,
pkt->hdr.cmd);
w->triggered = true;
if (w->fn)
- w->fn(priv, pkt, w->fn_data);
+ w->fn(trans(priv), pkt, w->fn_data);
}
- spin_unlock(&priv->notif_wait_lock);
+ spin_unlock(&priv->shrd->notif_wait_lock);
- wake_up_all(&priv->notif_waitq);
+ wake_up_all(&priv->shrd->notif_waitq);
}
if (priv->pre_rx_handler)
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
index 8de97f5a182..d21f535a3b4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
@@ -60,7 +60,7 @@ static int iwlagn_disable_pan(struct iwl_priv *priv,
u8 old_dev_type = send->dev_type;
int ret;
- iwlagn_init_notification_wait(priv, &disable_wait,
+ iwl_init_notification_wait(priv->shrd, &disable_wait,
REPLY_WIPAN_DEACTIVATION_COMPLETE,
NULL, NULL);
@@ -74,9 +74,9 @@ static int iwlagn_disable_pan(struct iwl_priv *priv,
if (ret) {
IWL_ERR(priv, "Error disabling PAN (%d)\n", ret);
- iwlagn_remove_notification(priv, &disable_wait);
+ iwl_remove_notification(priv->shrd, &disable_wait);
} else {
- ret = iwlagn_wait_notification(priv, &disable_wait, HZ);
+ ret = iwl_wait_notification(priv->shrd, &disable_wait, HZ);
if (ret)
IWL_ERR(priv, "Timed out waiting for PAN disable\n");
}
@@ -529,6 +529,24 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
return 0;
}
+void iwlagn_config_ht40(struct ieee80211_conf *conf,
+ struct iwl_rxon_context *ctx)
+{
+ if (conf_is_ht40_minus(conf)) {
+ ctx->ht.extension_chan_offset =
+ IEEE80211_HT_PARAM_CHA_SEC_BELOW;
+ ctx->ht.is_40mhz = true;
+ } else if (conf_is_ht40_plus(conf)) {
+ ctx->ht.extension_chan_offset =
+ IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
+ ctx->ht.is_40mhz = true;
+ } else {
+ ctx->ht.extension_chan_offset =
+ IEEE80211_HT_PARAM_CHA_SEC_NONE;
+ ctx->ht.is_40mhz = false;
+ }
+}
+
int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
{
struct iwl_priv *priv = hw->priv;
@@ -590,19 +608,11 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
ctx->ht.enabled = conf_is_ht(conf);
if (ctx->ht.enabled) {
- if (conf_is_ht40_minus(conf)) {
- ctx->ht.extension_chan_offset =
- IEEE80211_HT_PARAM_CHA_SEC_BELOW;
- ctx->ht.is_40mhz = true;
- } else if (conf_is_ht40_plus(conf)) {
- ctx->ht.extension_chan_offset =
- IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
- ctx->ht.is_40mhz = true;
- } else {
- ctx->ht.extension_chan_offset =
- IEEE80211_HT_PARAM_CHA_SEC_NONE;
- ctx->ht.is_40mhz = false;
- }
+ /* if HT40 is used, it should not change
+ * after associated except channel switch */
+ if (!ctx->ht.is_40mhz ||
+ !iwl_is_associated_ctx(ctx))
+ iwlagn_config_ht40(conf, ctx);
} else
ctx->ht.is_40mhz = false;
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
index 901fd9485d7..63d948d21c0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
@@ -135,8 +135,8 @@ static u16 iwlagn_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data)
u16 size = (u16)sizeof(struct iwl_addsta_cmd);
struct iwl_addsta_cmd *addsta = (struct iwl_addsta_cmd *)data;
memcpy(addsta, cmd, size);
- /* resrved in 5000 */
- addsta->rate_n_flags = cpu_to_le16(0);
+ /* resrved in agn */
+ addsta->legacy_reserved = cpu_to_le16(0);
return size;
}
@@ -1250,9 +1250,6 @@ int iwl_set_dynamic_key(struct iwl_priv *priv,
switch (keyconf->cipher) {
case WLAN_CIPHER_SUITE_TKIP:
- keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
- keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
-
if (sta)
addr = sta->addr;
else /* station mode case only */
@@ -1265,8 +1262,6 @@ int iwl_set_dynamic_key(struct iwl_priv *priv,
seq.tkip.iv32, p1k, CMD_SYNC);
break;
case WLAN_CIPHER_SUITE_CCMP:
- keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
- /* fall through */
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
ret = iwlagn_send_sta_key(priv, keyconf, sta_id,
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
index e6a02e09ee1..81754cddba7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
@@ -91,7 +91,10 @@ static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv,
tx_cmd->tid_tspec = qc[0] & 0xf;
tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
} else {
- tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+ if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
+ tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+ else
+ tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
}
iwlagn_tx_cmd_protection(priv, info, fc, &tx_flags);
@@ -148,7 +151,7 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
if (ieee80211_is_data(fc)) {
tx_cmd->initial_rate_index = 0;
tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
-#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL
+#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
if (priv->tm_fixed_rate) {
/*
* rate overwrite by testmode
@@ -161,7 +164,8 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
}
#endif
return;
- }
+ } else if (ieee80211_is_back_req(fc))
+ tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
/**
* If the current TX rate stored in mac80211 has the MCS bit set, it's
@@ -790,6 +794,7 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb,
iwl_rx_reply_tx_agg(priv, tx_resp);
if (tx_resp->frame_count == 1) {
+ IWL_DEBUG_TX_REPLY(priv, "Q %d, ssn %d", txq_id, ssn);
__skb_queue_head_init(&skbs);
/*we can free until ssn % q.n_bd not inclusive */
iwl_trans_reclaim(trans(priv), sta_id, tid, txq_id,
@@ -920,11 +925,9 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
ba_resp->sta_id);
IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx, "
"scd_flow = %d, scd_ssn = %d\n",
- ba_resp->tid,
- ba_resp->seq_ctl,
+ ba_resp->tid, ba_resp->seq_ctl,
(unsigned long long)le64_to_cpu(ba_resp->bitmap),
- ba_resp->scd_flow,
- ba_resp->scd_ssn);
+ scd_flow, ba_resp_scd_ssn);
/* Mark that the expected block-ack response arrived */
agg->wait_for_ba = false;
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index e235e84de8b..f5fe42dbb3b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -366,7 +366,7 @@ static void iwl_continuous_event_trace(struct iwl_priv *priv)
u32 num_wraps; /* # times uCode wrapped to top of log */
u32 next_entry; /* index of next entry to be written by uCode */
- base = priv->device_pointers.error_event_table;
+ base = priv->shrd->device_pointers.error_event_table;
if (iwlagn_hw_valid_rtc_data_addr(base)) {
capacity = iwl_read_targ_mem(bus(priv), base);
num_wraps = iwl_read_targ_mem(bus(priv),
@@ -1036,6 +1036,9 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
priv->inst_evtlog_size =
priv->cfg->base_params->max_event_log_size;
priv->inst_errlog_ptr = pieces.inst_errlog_ptr;
+#ifndef CONFIG_IWLWIFI_P2P
+ ucode_capa.flags &= ~IWL_UCODE_TLV_FLAGS_PAN;
+#endif
priv->new_scan_threshold_behaviour =
!!(ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NEWSCAN);
@@ -1057,7 +1060,6 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
priv->sta_key_max_num = STA_KEY_MAX_NUM;
priv->shrd->cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
}
-
/*
* figure out the offset of chain noise reset and gain commands
* base on the size of standard phy calibration commands table size
@@ -1232,14 +1234,14 @@ int iwl_alive_start(struct iwl_priv *priv)
priv->bt_valid = IWLAGN_BT_VALID_ENABLE_FLAGS;
priv->cur_rssi_ctx = NULL;
- iwlagn_send_prio_tbl(priv);
+ iwl_send_prio_tbl(trans(priv));
/* FIXME: w/a to force change uCode BT state machine */
- ret = iwlagn_send_bt_env(priv, IWL_BT_COEX_ENV_OPEN,
+ ret = iwl_send_bt_env(trans(priv), IWL_BT_COEX_ENV_OPEN,
BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
if (ret)
return ret;
- ret = iwlagn_send_bt_env(priv, IWL_BT_COEX_ENV_CLOSE,
+ ret = iwl_send_bt_env(trans(priv), IWL_BT_COEX_ENV_CLOSE,
BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
if (ret)
return ret;
@@ -1575,6 +1577,8 @@ static int iwl_init_drv(struct iwl_priv *priv)
mutex_init(&priv->shrd->mutex);
+ INIT_LIST_HEAD(&trans(priv)->calib_results);
+
priv->ieee_channels = NULL;
priv->ieee_rates = NULL;
priv->band = IEEE80211_BAND_2GHZ;
@@ -1631,7 +1635,6 @@ err:
static void iwl_uninit_drv(struct iwl_priv *priv)
{
- iwl_calib_free_results(priv);
iwl_free_geos(priv);
iwl_free_channel_map(priv);
if (priv->tx_cmd_pool)
@@ -1680,6 +1683,41 @@ static int iwl_set_hw_params(struct iwl_priv *priv)
+static void iwl_debug_config(struct iwl_priv *priv)
+{
+ dev_printk(KERN_INFO, bus(priv)->dev, "CONFIG_IWLWIFI_DEBUG "
+#ifdef CONFIG_IWLWIFI_DEBUG
+ "enabled\n");
+#else
+ "disabled\n");
+#endif
+ dev_printk(KERN_INFO, bus(priv)->dev, "CONFIG_IWLWIFI_DEBUGFS "
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ "enabled\n");
+#else
+ "disabled\n");
+#endif
+ dev_printk(KERN_INFO, bus(priv)->dev, "CONFIG_IWLWIFI_DEVICE_TRACING "
+#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
+ "enabled\n");
+#else
+ "disabled\n");
+#endif
+
+ dev_printk(KERN_INFO, bus(priv)->dev, "CONFIG_IWLWIFI_DEVICE_TESTMODE "
+#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
+ "enabled\n");
+#else
+ "disabled\n");
+#endif
+ dev_printk(KERN_INFO, bus(priv)->dev, "CONFIG_IWLWIFI_P2P "
+#ifdef CONFIG_IWLWIFI_P2P
+ "enabled\n");
+#else
+ "disabled\n");
+#endif
+}
+
int iwl_probe(struct iwl_bus *bus, const struct iwl_trans_ops *trans_ops,
struct iwl_cfg *cfg)
{
@@ -1715,6 +1753,9 @@ int iwl_probe(struct iwl_bus *bus, const struct iwl_trans_ops *trans_ops,
SET_IEEE80211_DEV(hw, bus(priv)->dev);
+ /* what debugging capabilities we have */
+ iwl_debug_config(priv);
+
IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
priv->cfg = cfg;
@@ -1780,11 +1821,11 @@ int iwl_probe(struct iwl_bus *bus, const struct iwl_trans_ops *trans_ops,
goto out_free_eeprom;
/* extract MAC Address */
- iwl_eeprom_get_mac(priv, priv->addresses[0].addr);
+ iwl_eeprom_get_mac(priv->shrd, priv->addresses[0].addr);
IWL_DEBUG_INFO(priv, "MAC address: %pM\n", priv->addresses[0].addr);
priv->hw->wiphy->addresses = priv->addresses;
priv->hw->wiphy->n_addresses = 1;
- num_mac = iwl_eeprom_query16(priv, EEPROM_NUM_MAC_ADDRESS);
+ num_mac = iwl_eeprom_query16(priv->shrd, EEPROM_NUM_MAC_ADDRESS);
if (num_mac > 1) {
memcpy(priv->addresses[1].addr, priv->addresses[0].addr,
ETH_ALEN);
@@ -1849,7 +1890,7 @@ out_destroy_workqueue:
priv->shrd->workqueue = NULL;
iwl_uninit_drv(priv);
out_free_eeprom:
- iwl_eeprom_free(priv);
+ iwl_eeprom_free(priv->shrd);
out_free_trans:
iwl_trans_free(trans(priv));
out_free_traffic_mem:
@@ -1888,7 +1929,7 @@ void __devexit iwl_remove(struct iwl_priv * priv)
iwl_dealloc_ucode(trans(priv));
- iwl_eeprom_free(priv);
+ iwl_eeprom_free(priv->shrd);
/*netif_stop_queue(dev); */
flush_workqueue(priv->shrd->workqueue);
@@ -1988,9 +2029,10 @@ MODULE_PARM_DESC(plcp_check, "Check plcp health (default: 1 [enabled])");
module_param_named(ack_check, iwlagn_mod_params.ack_check, bool, S_IRUGO);
MODULE_PARM_DESC(ack_check, "Check ack health (default: 0 [disabled])");
-module_param_named(wd_disable, iwlagn_mod_params.wd_disable, bool, S_IRUGO);
+module_param_named(wd_disable, iwlagn_mod_params.wd_disable, int, S_IRUGO);
MODULE_PARM_DESC(wd_disable,
- "Disable stuck queue watchdog timer (default: 0 [enabled])");
+ "Disable stuck queue watchdog timer 0=system default, "
+ "1=disable, 2=enable (default: 0)");
/*
* set bt_coex_active to true, uCode will do kill/defer
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.h b/drivers/net/wireless/iwlwifi/iwl-agn.h
index 5d8d2f44592..eb453ea41c4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.h
@@ -101,13 +101,15 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
u32 changes);
+void iwlagn_config_ht40(struct ieee80211_conf *conf,
+ struct iwl_rxon_context *ctx);
/* uCode */
int iwlagn_rx_calib_result(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb,
struct iwl_device_cmd *cmd);
-int iwlagn_send_bt_env(struct iwl_priv *priv, u8 action, u8 type);
-void iwlagn_send_prio_tbl(struct iwl_priv *priv);
+int iwl_send_bt_env(struct iwl_trans *trans, u8 action, u8 type);
+void iwl_send_prio_tbl(struct iwl_trans *trans);
int iwlagn_run_init_ucode(struct iwl_priv *priv);
int iwlagn_load_ucode_wait_alive(struct iwl_priv *priv,
enum iwl_ucode_type ucode_type);
@@ -115,7 +117,7 @@ int iwlagn_load_ucode_wait_alive(struct iwl_priv *priv,
/* lib */
int iwlagn_send_tx_power(struct iwl_priv *priv);
void iwlagn_temperature(struct iwl_priv *priv);
-u16 iwlagn_eeprom_calib_version(struct iwl_priv *priv);
+u16 iwl_eeprom_calib_version(struct iwl_shared *shrd);
int iwlagn_txfifo_flush(struct iwl_priv *priv, u16 flush_control);
void iwlagn_dev_txfifo_flush(struct iwl_priv *priv, u16 flush_control);
int iwlagn_send_beacon_cmd(struct iwl_priv *priv);
@@ -352,28 +354,12 @@ static inline __le32 iwl_hw_set_rate_n_flags(u8 rate, u32 flags)
/* eeprom */
void iwl_eeprom_enhanced_txpower(struct iwl_priv *priv);
-void iwl_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac);
-
-/* notification wait support */
-void __acquires(wait_entry)
-iwlagn_init_notification_wait(struct iwl_priv *priv,
- struct iwl_notification_wait *wait_entry,
- u8 cmd,
- void (*fn)(struct iwl_priv *priv,
- struct iwl_rx_packet *pkt,
- void *data),
- void *fn_data);
-int __must_check __releases(wait_entry)
-iwlagn_wait_notification(struct iwl_priv *priv,
- struct iwl_notification_wait *wait_entry,
- unsigned long timeout);
-void __releases(wait_entry)
-iwlagn_remove_notification(struct iwl_priv *priv,
- struct iwl_notification_wait *wait_entry);
+void iwl_eeprom_get_mac(const struct iwl_shared *shrd, u8 *mac);
+
extern int iwlagn_init_alive_start(struct iwl_priv *priv);
extern int iwl_alive_start(struct iwl_priv *priv);
/* svtool */
-#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL
+#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
extern int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data,
int len);
extern int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h
index f4eccf58377..265de39d394 100644
--- a/drivers/net/wireless/iwlwifi/iwl-commands.h
+++ b/drivers/net/wireless/iwlwifi/iwl-commands.h
@@ -109,10 +109,10 @@ enum {
/* RX, TX, LEDs */
REPLY_TX = 0x1c,
REPLY_LEDS_CMD = 0x48,
- REPLY_TX_LINK_QUALITY_CMD = 0x4e, /* for 4965 and up */
+ REPLY_TX_LINK_QUALITY_CMD = 0x4e,
/* WiMAX coexistence */
- COEX_PRIORITY_TABLE_CMD = 0x5a, /* for 5000 series and up */
+ COEX_PRIORITY_TABLE_CMD = 0x5a,
COEX_MEDIUM_NOTIFICATION = 0x5b,
COEX_EVENT_CMD = 0x5c,
@@ -466,23 +466,27 @@ struct iwl_error_event_table {
u32 frame_ptr; /* frame pointer */
u32 stack_ptr; /* stack pointer */
u32 hcmd; /* last host command header */
-#if 0
- /* no need to read the remainder, we don't use the values */
- u32 isr0; /* isr status register LMPM_NIC_ISR0: rxtx_flag */
- u32 isr1; /* isr status register LMPM_NIC_ISR1: host_flag */
- u32 isr2; /* isr status register LMPM_NIC_ISR2: enc_flag */
- u32 isr3; /* isr status register LMPM_NIC_ISR3: time_flag */
- u32 isr4; /* isr status register LMPM_NIC_ISR4: wico interrupt */
+ u32 isr0; /* isr status register LMPM_NIC_ISR0:
+ * rxtx_flag */
+ u32 isr1; /* isr status register LMPM_NIC_ISR1:
+ * host_flag */
+ u32 isr2; /* isr status register LMPM_NIC_ISR2:
+ * enc_flag */
+ u32 isr3; /* isr status register LMPM_NIC_ISR3:
+ * time_flag */
+ u32 isr4; /* isr status register LMPM_NIC_ISR4:
+ * wico interrupt */
u32 isr_pref; /* isr status register LMPM_NIC_PREF_STAT */
u32 wait_event; /* wait event() caller address */
u32 l2p_control; /* L2pControlField */
u32 l2p_duration; /* L2pDurationField */
u32 l2p_mhvalid; /* L2pMhValidBits */
u32 l2p_addr_match; /* L2pAddrMatchStat */
- u32 lmpm_pmg_sel; /* indicate which clocks are turned on (LMPM_PMG_SEL) */
- u32 u_timestamp; /* indicate when the date and time of the compilation */
+ u32 lmpm_pmg_sel; /* indicate which clocks are turned on
+ * (LMPM_PMG_SEL) */
+ u32 u_timestamp; /* indicate when the date and time of the
+ * compilation */
u32 flow_handler; /* FH read/write pointers, RX credit */
-#endif
} __packed;
struct iwl_alive_resp {
@@ -810,7 +814,7 @@ struct iwl_qosparam_cmd {
#define IWLAGN_STATION_COUNT 16
#define IWL_INVALID_STATION 255
-#define IWL_MAX_TID_COUNT 9
+#define IWL_MAX_TID_COUNT 8
#define STA_FLG_TX_RATE_MSK cpu_to_le32(1 << 2)
#define STA_FLG_PWR_SAVE_MSK cpu_to_le32(1 << 8)
@@ -931,8 +935,7 @@ struct iwl_addsta_cmd {
* corresponding to bit (e.g. bit 5 controls TID 5).
* Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */
__le16 tid_disable_tx;
-
- __le16 rate_n_flags; /* 3945 only */
+ __le16 legacy_reserved;
/* TID for which to add block-ack support.
* Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
@@ -1162,8 +1165,7 @@ struct iwl_rx_mpdu_res_start {
*
* uCode handles retrying Tx when an ACK is expected but not received.
* This includes trying lower data rates than the one requested in the Tx
- * command, as set up by the REPLY_RATE_SCALE (for 3945) or
- * REPLY_TX_LINK_QUALITY_CMD (agn).
+ * command, as set up by the REPLY_TX_LINK_QUALITY_CMD (agn).
*
* Driver sets up transmit power for various rates via REPLY_TX_PWR_TABLE_CMD.
* This command must be executed after every RXON command, before Tx can occur.
@@ -1175,25 +1177,9 @@ struct iwl_rx_mpdu_res_start {
* 1: Use RTS/CTS protocol or CTS-to-self if spec allows it
* before this frame. if CTS-to-self required check
* RXON_FLG_SELF_CTS_EN status.
- * unused in 3945/4965, used in 5000 series and after
*/
#define TX_CMD_FLG_PROT_REQUIRE_MSK cpu_to_le32(1 << 0)
-/*
- * 1: Use Request-To-Send protocol before this frame.
- * Mutually exclusive vs. TX_CMD_FLG_CTS_MSK.
- * used in 3945/4965, unused in 5000 series and after
- */
-#define TX_CMD_FLG_RTS_MSK cpu_to_le32(1 << 1)
-
-/*
- * 1: Transmit Clear-To-Send to self before this frame.
- * Driver should set this for AUTH/DEAUTH/ASSOC-REQ/REASSOC mgmnt frames.
- * Mutually exclusive vs. TX_CMD_FLG_RTS_MSK.
- * used in 3945/4965, unused in 5000 series and after
- */
-#define TX_CMD_FLG_CTS_MSK cpu_to_le32(1 << 2)
-
/* 1: Expect ACK from receiving station
* 0: Don't expect ACK (MAC header's duration field s/b 0)
* Set this for unicast frames, but not broadcast/multicast. */
@@ -1211,18 +1197,8 @@ struct iwl_rx_mpdu_res_start {
* Set when Txing a block-ack request frame. Also set TX_CMD_FLG_ACK_MSK. */
#define TX_CMD_FLG_IMM_BA_RSP_MASK cpu_to_le32(1 << 6)
-/*
- * 1: Frame requires full Tx-Op protection.
- * Set this if either RTS or CTS Tx Flag gets set.
- * used in 3945/4965, unused in 5000 series and after
- */
-#define TX_CMD_FLG_FULL_TXOP_PROT_MSK cpu_to_le32(1 << 7)
-
-/* Tx antenna selection field; used only for 3945, reserved (0) for agn devices.
- * Set field to "0" to allow 3945 uCode to select antenna (normal usage). */
+/* Tx antenna selection field; reserved (0) for agn devices. */
#define TX_CMD_FLG_ANT_SEL_MSK cpu_to_le32(0xf00)
-#define TX_CMD_FLG_ANT_A_MSK cpu_to_le32(1 << 8)
-#define TX_CMD_FLG_ANT_B_MSK cpu_to_le32(1 << 9)
/* 1: Ignore Bluetooth priority for this frame.
* 0: Delay Tx until Bluetooth device is done (normal usage). */
@@ -1568,7 +1544,6 @@ struct iwl_compressed_ba_resp {
__le64 bitmap;
__le16 scd_flow;
__le16 scd_ssn;
- /* following only for 5000 series and up */
u8 txed; /* number of frames sent */
u8 txed_2_done; /* number of frames acked */
} __packed;
@@ -1670,7 +1645,7 @@ struct iwl_link_qual_agg_params {
/*
* REPLY_TX_LINK_QUALITY_CMD = 0x4e (command, has simple generic response)
*
- * For agn devices only; 3945 uses REPLY_RATE_SCALE.
+ * For agn devices
*
* Each station in the agn device's internal station table has its own table
* of 16
@@ -1919,7 +1894,7 @@ struct iwl_link_quality_cmd {
/*
* REPLY_BT_CONFIG = 0x9b (command, has simple generic response)
*
- * 3945 and agn devices support hardware handshake with Bluetooth device on
+ * agn devices support hardware handshake with Bluetooth device on
* same platform. Bluetooth device alerts wireless device when it will Tx;
* wireless device can delay or kill its own Tx to accommodate.
*/
@@ -2203,8 +2178,8 @@ struct iwl_spectrum_notification {
struct iwl_powertable_cmd {
__le16 flags;
- u8 keep_alive_seconds; /* 3945 reserved */
- u8 debug_flags; /* 3945 reserved */
+ u8 keep_alive_seconds;
+ u8 debug_flags;
__le32 rx_data_timeout;
__le32 tx_data_timeout;
__le32 sleep_interval[IWL_POWER_VEC_SIZE];
@@ -2325,9 +2300,9 @@ struct iwl_scan_channel {
/**
* struct iwl_ssid_ie - directed scan network information element
*
- * Up to 20 of these may appear in REPLY_SCAN_CMD (Note: Only 4 are in
- * 3945 SCAN api), selected by "type" bit field in struct iwl_scan_channel;
- * each channel may select different ssids from among the 20 (4) entries.
+ * Up to 20 of these may appear in REPLY_SCAN_CMD,
+ * selected by "type" bit field in struct iwl_scan_channel;
+ * each channel may select different ssids from among the 20 entries.
* SSID IEs get transmitted in reverse order of entry.
*/
struct iwl_ssid_ie {
@@ -2336,7 +2311,6 @@ struct iwl_ssid_ie {
u8 ssid[32];
} __packed;
-#define PROBE_OPTION_MAX_3945 4
#define PROBE_OPTION_MAX 20
#define TX_CMD_LIFE_TIME_INFINITE cpu_to_le32(0xFFFFFFFF)
#define IWL_GOOD_CRC_TH_DISABLED 0
@@ -2417,8 +2391,6 @@ struct iwl_scan_cmd {
* channel */
__le32 suspend_time; /* pause scan this long (in "extended beacon
* format") when returning to service chnl:
- * 3945; 31:24 # beacons, 19:0 additional usec,
- * 4965; 31:22 # beacons, 21:0 additional usec.
*/
__le32 flags; /* RXON_FLG_* */
__le32 filter_flags; /* RXON_FILTER_* */
@@ -2734,7 +2706,7 @@ struct statistics_div {
struct statistics_general_common {
__le32 temperature; /* radio temperature */
- __le32 temperature_m; /* for 5000 and up, this is radio voltage */
+ __le32 temperature_m; /* radio voltage */
struct statistics_dbg dbg;
__le32 sleep_time;
__le32 slots_out;
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index f9e9170e977..3b6f48bfe0e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -836,19 +836,6 @@ void iwl_print_rx_config_cmd(struct iwl_priv *priv,
}
#endif
-static void iwlagn_abort_notification_waits(struct iwl_priv *priv)
-{
- unsigned long flags;
- struct iwl_notification_wait *wait_entry;
-
- spin_lock_irqsave(&priv->notif_wait_lock, flags);
- list_for_each_entry(wait_entry, &priv->notif_waits, list)
- wait_entry->aborted = true;
- spin_unlock_irqrestore(&priv->notif_wait_lock, flags);
-
- wake_up_all(&priv->notif_waitq);
-}
-
void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand)
{
unsigned int reload_msec;
@@ -860,7 +847,7 @@ void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand)
/* Cancel currently queued command. */
clear_bit(STATUS_HCMD_ACTIVE, &priv->shrd->status);
- iwlagn_abort_notification_waits(priv);
+ iwl_abort_notification_waits(priv->shrd);
/* Keep the restart process from trying to send host
* commands by clearing the ready bit */
@@ -1505,11 +1492,23 @@ void iwl_setup_watchdog(struct iwl_priv *priv)
{
unsigned int timeout = priv->cfg->base_params->wd_timeout;
- if (timeout && !iwlagn_mod_params.wd_disable)
- mod_timer(&priv->watchdog,
- jiffies + msecs_to_jiffies(IWL_WD_TICK(timeout)));
- else
- del_timer(&priv->watchdog);
+ if (!iwlagn_mod_params.wd_disable) {
+ /* use system default */
+ if (timeout && !priv->cfg->base_params->wd_disable)
+ mod_timer(&priv->watchdog,
+ jiffies +
+ msecs_to_jiffies(IWL_WD_TICK(timeout)));
+ else
+ del_timer(&priv->watchdog);
+ } else {
+ /* module parameter overwrite default configuration */
+ if (timeout && iwlagn_mod_params.wd_disable == 2)
+ mod_timer(&priv->watchdog,
+ jiffies +
+ msecs_to_jiffies(IWL_WD_TICK(timeout)));
+ else
+ del_timer(&priv->watchdog);
+ }
}
/**
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index fa47f75185d..6da53a36c1b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -113,6 +113,7 @@ struct iwl_lib_ops {
* @shadow_reg_enable: HW shadhow register bit
* @no_idle_support: do not support idle mode
* @hd_v2: v2 of enhanced sensitivity value, used for 2000 series and up
+ * wd_disable: disable watchdog timer
*/
struct iwl_base_params {
int eeprom_size;
@@ -134,6 +135,7 @@ struct iwl_base_params {
const bool shadow_reg_enable;
const bool no_idle_support;
const bool hd_v2;
+ const bool wd_disable;
};
/*
* @advanced_bt_coexist: support advanced bt coexist
@@ -184,8 +186,9 @@ struct iwl_ht_params {
* @ht_params: point to ht patameters
* @bt_params: pointer to bt parameters
* @pa_type: used by 6000 series only to identify the type of Power Amplifier
- * @need_dc_calib: need to perform init dc calibration
* @need_temp_offset_calib: need to perform temperature offset calibration
+ * @no_xtal_calib: some devices do not need crystal calibration data,
+ * don't send it to those
* @scan_antennas: available antenna for scan operation
* @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off)
* @adv_pm: advance power management
@@ -222,8 +225,8 @@ struct iwl_cfg {
struct iwl_ht_params *ht_params;
struct iwl_bt_params *bt_params;
enum iwl_pa_type pa_type; /* if used set to IWL_PA_SYSTEM */
- const bool need_dc_calib; /* if used set to true */
const bool need_temp_offset_calib; /* if used set to true */
+ const bool no_xtal_calib;
u8 scan_rx_antennas[IEEE80211_NUM_BANDS];
enum iwl_led_mode led_mode;
const bool adv_pm;
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
index 40ef97bac1a..f8fc2393dd4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
@@ -47,20 +47,21 @@ do { \
} while (0)
#ifdef CONFIG_IWLWIFI_DEBUG
-#define IWL_DEBUG(m, level, fmt, args...) \
+#define IWL_DEBUG(m, level, fmt, ...) \
do { \
if (iwl_get_debug_level((m)->shrd) & (level)) \
- dev_printk(KERN_ERR, bus(m)->dev, \
- "%c %s " fmt, in_interrupt() ? 'I' : 'U', \
- __func__ , ## args); \
+ dev_err(bus(m)->dev, "%c %s " fmt, \
+ in_interrupt() ? 'I' : 'U', __func__, \
+ ##__VA_ARGS__); \
} while (0)
-#define IWL_DEBUG_LIMIT(m, level, fmt, args...) \
+#define IWL_DEBUG_LIMIT(m, level, fmt, ...) \
do { \
- if (iwl_get_debug_level((m)->shrd) & (level) && net_ratelimit())\
- dev_printk(KERN_ERR, bus(m)->dev, \
- "%c %s " fmt, in_interrupt() ? 'I' : 'U', \
- __func__ , ## args); \
+ if (iwl_get_debug_level((m)->shrd) & (level) && \
+ net_ratelimit()) \
+ dev_err(bus(m)->dev, "%c %s " fmt, \
+ in_interrupt() ? 'I' : 'U', __func__, \
+ ##__VA_ARGS__); \
} while (0)
#define iwl_print_hex_dump(m, level, p, len) \
@@ -70,14 +71,18 @@ do { \
DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \
} while (0)
-#define IWL_DEBUG_QUIET_RFKILL(p, fmt, args...) \
+#define IWL_DEBUG_QUIET_RFKILL(p, fmt, ...) \
do { \
- if (!iwl_is_rfkill(p->shrd)) \
- dev_printk(KERN_ERR, bus(p)->dev, "%c %s " fmt, \
- (in_interrupt() ? 'I' : 'U'), __func__ , ##args); \
- else if (iwl_get_debug_level(p->shrd) & IWL_DL_RADIO) \
- dev_printk(KERN_ERR, bus(p)->dev, "(RFKILL) %c %s " fmt, \
- (in_interrupt() ? 'I' : 'U'), __func__ , ##args); \
+ if (!iwl_is_rfkill(p->shrd)) \
+ dev_err(bus(p)->dev, "%s%c %s " fmt, \
+ "", \
+ in_interrupt() ? 'I' : 'U', __func__, \
+ ##__VA_ARGS__); \
+ else if (iwl_get_debug_level(p->shrd) & IWL_DL_RADIO) \
+ dev_err(bus(p)->dev, "%s%c %s " fmt, \
+ "(RFKILL) ", \
+ in_interrupt() ? 'I' : 'U', __func__, \
+ ##__VA_ARGS__); \
} while (0)
#else
@@ -129,48 +134,43 @@ static inline void iwl_dbgfs_unregister(struct iwl_priv *priv)
*/
/* 0x0000000F - 0x00000001 */
-#define IWL_DL_INFO (1 << 0)
-#define IWL_DL_MAC80211 (1 << 1)
-#define IWL_DL_HCMD (1 << 2)
-#define IWL_DL_STATE (1 << 3)
+#define IWL_DL_INFO 0x00000001
+#define IWL_DL_MAC80211 0x00000002
+#define IWL_DL_HCMD 0x00000004
+#define IWL_DL_STATE 0x00000008
/* 0x000000F0 - 0x00000010 */
-#define IWL_DL_MACDUMP (1 << 4)
-#define IWL_DL_HCMD_DUMP (1 << 5)
-#define IWL_DL_EEPROM (1 << 6)
-#define IWL_DL_RADIO (1 << 7)
+#define IWL_DL_EEPROM 0x00000040
+#define IWL_DL_RADIO 0x00000080
/* 0x00000F00 - 0x00000100 */
-#define IWL_DL_POWER (1 << 8)
-#define IWL_DL_TEMP (1 << 9)
-/* reserved (1 << 10) */
-#define IWL_DL_SCAN (1 << 11)
+#define IWL_DL_POWER 0x00000100
+#define IWL_DL_TEMP 0x00000200
+#define IWL_DL_SCAN 0x00000800
/* 0x0000F000 - 0x00001000 */
-#define IWL_DL_ASSOC (1 << 12)
-#define IWL_DL_DROP (1 << 13)
-/* reserved (1 << 14) */
-#define IWL_DL_COEX (1 << 15)
+#define IWL_DL_ASSOC 0x00001000
+#define IWL_DL_DROP 0x00002000
+#define IWL_DL_COEX 0x00008000
/* 0x000F0000 - 0x00010000 */
-#define IWL_DL_FW (1 << 16)
-#define IWL_DL_RF_KILL (1 << 17)
-#define IWL_DL_FW_ERRORS (1 << 18)
-#define IWL_DL_LED (1 << 19)
+#define IWL_DL_FW 0x00010000
+#define IWL_DL_RF_KILL 0x00020000
+#define IWL_DL_FW_ERRORS 0x00040000
+#define IWL_DL_LED 0x00080000
/* 0x00F00000 - 0x00100000 */
-#define IWL_DL_RATE (1 << 20)
-#define IWL_DL_CALIB (1 << 21)
-#define IWL_DL_WEP (1 << 22)
-#define IWL_DL_TX (1 << 23)
+#define IWL_DL_RATE 0x00100000
+#define IWL_DL_CALIB 0x00200000
+#define IWL_DL_WEP 0x00400000
+#define IWL_DL_TX 0x00800000
/* 0x0F000000 - 0x01000000 */
-#define IWL_DL_RX (1 << 24)
-#define IWL_DL_ISR (1 << 25)
-#define IWL_DL_HT (1 << 26)
+#define IWL_DL_RX 0x01000000
+#define IWL_DL_ISR 0x02000000
+#define IWL_DL_HT 0x04000000
/* 0xF0000000 - 0x10000000 */
-#define IWL_DL_11H (1 << 28)
-#define IWL_DL_STATS (1 << 29)
-#define IWL_DL_TX_REPLY (1 << 30)
-#define IWL_DL_TX_QUEUES (1 << 31)
+#define IWL_DL_11H 0x10000000
+#define IWL_DL_STATS 0x20000000
+#define IWL_DL_TX_REPLY 0x40000000
+#define IWL_DL_TX_QUEUES 0x80000000
#define IWL_DEBUG_INFO(p, f, a...) IWL_DEBUG(p, IWL_DL_INFO, f, ## a)
#define IWL_DEBUG_MAC80211(p, f, a...) IWL_DEBUG(p, IWL_DL_MAC80211, f, ## a)
-#define IWL_DEBUG_MACDUMP(p, f, a...) IWL_DEBUG(p, IWL_DL_MACDUMP, f, ## a)
#define IWL_DEBUG_TEMP(p, f, a...) IWL_DEBUG(p, IWL_DL_TEMP, f, ## a)
#define IWL_DEBUG_SCAN(p, f, a...) IWL_DEBUG(p, IWL_DL_SCAN, f, ## a)
#define IWL_DEBUG_RX(p, f, a...) IWL_DEBUG(p, IWL_DL_RX, f, ## a)
@@ -179,7 +179,6 @@ static inline void iwl_dbgfs_unregister(struct iwl_priv *priv)
#define IWL_DEBUG_LED(p, f, a...) IWL_DEBUG(p, IWL_DL_LED, f, ## a)
#define IWL_DEBUG_WEP(p, f, a...) IWL_DEBUG(p, IWL_DL_WEP, f, ## a)
#define IWL_DEBUG_HC(p, f, a...) IWL_DEBUG(p, IWL_DL_HCMD, f, ## a)
-#define IWL_DEBUG_HC_DUMP(p, f, a...) IWL_DEBUG(p, IWL_DL_HCMD_DUMP, f, ## a)
#define IWL_DEBUG_EEPROM(p, f, a...) IWL_DEBUG(p, IWL_DL_EEPROM, f, ## a)
#define IWL_DEBUG_CALIB(p, f, a...) IWL_DEBUG(p, IWL_DL_CALIB, f, ## a)
#define IWL_DEBUG_FW(p, f, a...) IWL_DEBUG(p, IWL_DL_FW, f, ## a)
@@ -201,8 +200,6 @@ static inline void iwl_dbgfs_unregister(struct iwl_priv *priv)
#define IWL_DEBUG_STATS_LIMIT(p, f, a...) \
IWL_DEBUG_LIMIT(p, IWL_DL_STATS, f, ## a)
#define IWL_DEBUG_TX_REPLY(p, f, a...) IWL_DEBUG(p, IWL_DL_TX_REPLY, f, ## a)
-#define IWL_DEBUG_TX_REPLY_LIMIT(p, f, a...) \
- IWL_DEBUG_LIMIT(p, IWL_DL_TX_REPLY, f, ## a)
#define IWL_DEBUG_TX_QUEUES(p, f, a...) IWL_DEBUG(p, IWL_DL_TX_QUEUES, f, ## a)
#define IWL_DEBUG_RADIO(p, f, a...) IWL_DEBUG(p, IWL_DL_RADIO, f, ## a)
#define IWL_DEBUG_POWER(p, f, a...) IWL_DEBUG(p, IWL_DL_POWER, f, ## a)
diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
index 68b04f5b10c..6bf6845e1a5 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
@@ -234,11 +234,12 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file,
/* default is to dump the entire data segment */
if (!priv->dbgfs_sram_offset && !priv->dbgfs_sram_len) {
+ struct iwl_trans *trans = trans(priv);
priv->dbgfs_sram_offset = 0x800000;
- if (priv->ucode_type == IWL_UCODE_INIT)
- priv->dbgfs_sram_len = trans(priv)->ucode_init.data.len;
+ if (trans->shrd->ucode_type == IWL_UCODE_INIT)
+ priv->dbgfs_sram_len = trans->ucode_init.data.len;
else
- priv->dbgfs_sram_len = trans(priv)->ucode_rt.data.len;
+ priv->dbgfs_sram_len = trans->ucode_rt.data.len;
}
len = priv->dbgfs_sram_len;
@@ -415,7 +416,7 @@ static ssize_t iwl_dbgfs_nvm_read(struct file *file,
return -ENODATA;
}
- ptr = priv->eeprom;
+ ptr = priv->shrd->eeprom;
if (!ptr) {
IWL_ERR(priv, "Invalid EEPROM/OTP memory\n");
return -ENOMEM;
@@ -427,7 +428,7 @@ static ssize_t iwl_dbgfs_nvm_read(struct file *file,
IWL_ERR(priv, "Can not allocate Buffer\n");
return -ENOMEM;
}
- eeprom_ver = iwl_eeprom_query16(priv, EEPROM_VERSION);
+ eeprom_ver = iwl_eeprom_query16(priv->shrd, EEPROM_VERSION);
pos += scnprintf(buf + pos, buf_size - pos, "NVM Type: %s, "
"version: 0x%x\n",
(trans(priv)->nvm_device_type == NVM_DEVICE_TYPE_OTP)
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index 556e4a2c19b..69ecf6e2e65 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -60,11 +60,10 @@ struct iwl_tx_queue;
/* Default noise level to report when noise measurement is not available.
* This may be because we're:
- * 1) Not associated (4965, no beacon statistics being sent to driver)
+ * 1) Not associated no beacon statistics being sent to driver)
* 2) Scanning (noise measurement does not apply to associated channel)
- * 3) Receiving CCK (3945 delivers noise info only for OFDM frames)
* Use default noise value of -127 ... this is below the range of measurable
- * Rx dBm for either 3945 or 4965, so it can indicate "unmeasurable" to user.
+ * Rx dBm for all agn devices, so it can indicate "unmeasurable" to user.
* Also, -127 works better than 0 when averaging frames with/without
* noise info (e.g. averaging might be done in app); measured dBm values are
* always negative ... using a negative value as the default keeps all
@@ -441,29 +440,6 @@ enum iwlagn_chain_noise_state {
IWL_CHAIN_NOISE_DONE,
};
-
-/*
- * enum iwl_calib
- * defines the order in which results of initial calibrations
- * should be sent to the runtime uCode
- */
-enum iwl_calib {
- IWL_CALIB_XTAL,
- IWL_CALIB_DC,
- IWL_CALIB_LO,
- IWL_CALIB_TX_IQ,
- IWL_CALIB_TX_IQ_PERD,
- IWL_CALIB_BASE_BAND,
- IWL_CALIB_TEMP_OFFSET,
- IWL_CALIB_MAX
-};
-
-/* Opaque calibration results */
-struct iwl_calib_result {
- void *buf;
- size_t buf_len;
-};
-
/* Sensitivity calib data */
struct iwl_sensitivity_data {
u32 auto_corr_ofdm;
@@ -703,35 +679,6 @@ struct iwl_force_reset {
*/
#define IWLAGN_EXT_BEACON_TIME_POS 22
-/**
- * struct iwl_notification_wait - notification wait entry
- * @list: list head for global list
- * @fn: function called with the notification
- * @cmd: command ID
- *
- * This structure is not used directly, to wait for a
- * notification declare it on the stack, and call
- * iwlagn_init_notification_wait() with appropriate
- * parameters. Then do whatever will cause the ucode
- * to notify the driver, and to wait for that then
- * call iwlagn_wait_notification().
- *
- * Each notification is one-shot. If at some point we
- * need to support multi-shot notifications (which
- * can't be allocated on the stack) we need to modify
- * the code for them.
- */
-struct iwl_notification_wait {
- struct list_head list;
-
- void (*fn)(struct iwl_priv *priv, struct iwl_rx_packet *pkt,
- void *data);
- void *fn_data;
-
- u8 cmd;
- bool triggered, aborted;
-};
-
struct iwl_rxon_context {
struct ieee80211_vif *vif;
@@ -794,7 +741,7 @@ enum iwl_scan_type {
IWL_SCAN_ROC,
};
-#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL
+#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
struct iwl_testmode_trace {
u32 buff_size;
u32 total_size;
@@ -804,6 +751,12 @@ struct iwl_testmode_trace {
dma_addr_t dma_addr;
bool trace_enabled;
};
+struct iwl_testmode_sram {
+ u32 buff_size;
+ u32 num_chunks;
+ u8 *buff_addr;
+ bool sram_readed;
+};
#endif
struct iwl_wipan_noa_data {
@@ -868,9 +821,6 @@ struct iwl_priv {
s32 temperature; /* Celsius */
s32 last_temperature;
- /* init calibration results */
- struct iwl_calib_result calib_results[IWL_CALIB_MAX];
-
struct iwl_wipan_noa_data __rcu *noa_data;
/* Scan related variables */
@@ -897,18 +847,12 @@ struct iwl_priv {
u32 ucode_ver; /* version of ucode, copy of
iwl_ucode.ver */
- enum iwl_ucode_type ucode_type;
char firmware_name[25];
struct iwl_rxon_context contexts[NUM_IWL_RXON_CTX];
__le16 switch_channel;
- struct {
- u32 error_event_table;
- u32 log_event_table;
- } device_pointers;
-
u16 active_rate;
u8 start_calib;
@@ -942,10 +886,6 @@ struct iwl_priv {
/* Indication if ieee80211_ops->open has been called */
u8 is_open;
- /* eeprom -- this is in the card's little endian byte order */
- u8 *eeprom;
- struct iwl_eeprom_calib_info *calib_info;
-
enum nl80211_iftype iw_mode;
/* Last Rx'd beacon timestamp */
@@ -1001,10 +941,6 @@ struct iwl_priv {
/* counts reply_tx error */
struct reply_tx_error_statistics reply_tx_stats;
struct reply_agg_tx_error_statistics reply_agg_tx_stats;
- /* notification wait support */
- struct list_head notif_waits;
- spinlock_t notif_wait_lock;
- wait_queue_head_t notif_waitq;
/* remain-on-channel offload support */
struct ieee80211_channel *hw_roc_channel;
@@ -1082,8 +1018,9 @@ struct iwl_priv {
struct led_classdev led;
unsigned long blink_on, blink_off;
bool led_registered;
-#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL
+#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
struct iwl_testmode_trace testmode_trace;
+ struct iwl_testmode_sram testmode_sram;
u32 tm_fixed_rate;
#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
index dcada0827ea..6fcc7d586b2 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
@@ -215,11 +215,11 @@ static int iwl_eeprom_verify_signature(struct iwl_trans *trans)
return ret;
}
-u16 iwl_eeprom_query16(const struct iwl_priv *priv, size_t offset)
+u16 iwl_eeprom_query16(const struct iwl_shared *shrd, size_t offset)
{
- if (!priv->eeprom)
+ if (!shrd->eeprom)
return 0;
- return (u16)priv->eeprom[offset] | ((u16)priv->eeprom[offset + 1] << 8);
+ return (u16)shrd->eeprom[offset] | ((u16)shrd->eeprom[offset + 1] << 8);
}
int iwl_eeprom_check_version(struct iwl_priv *priv)
@@ -227,8 +227,8 @@ int iwl_eeprom_check_version(struct iwl_priv *priv)
u16 eeprom_ver;
u16 calib_ver;
- eeprom_ver = iwl_eeprom_query16(priv, EEPROM_VERSION);
- calib_ver = iwlagn_eeprom_calib_version(priv);
+ eeprom_ver = iwl_eeprom_query16(priv->shrd, EEPROM_VERSION);
+ calib_ver = iwl_eeprom_calib_version(priv->shrd);
if (eeprom_ver < priv->cfg->eeprom_ver ||
calib_ver < priv->cfg->eeprom_calib_ver)
@@ -249,11 +249,12 @@ err:
int iwl_eeprom_check_sku(struct iwl_priv *priv)
{
+ struct iwl_shared *shrd = priv->shrd;
u16 radio_cfg;
if (!priv->cfg->sku) {
/* not using sku overwrite */
- priv->cfg->sku = iwl_eeprom_query16(priv, EEPROM_SKU_CAP);
+ priv->cfg->sku = iwl_eeprom_query16(shrd, EEPROM_SKU_CAP);
if (priv->cfg->sku & EEPROM_SKU_CAP_11N_ENABLE &&
!priv->cfg->ht_params) {
IWL_ERR(priv, "Invalid 11n configuration\n");
@@ -269,7 +270,7 @@ int iwl_eeprom_check_sku(struct iwl_priv *priv)
if (!priv->cfg->valid_tx_ant && !priv->cfg->valid_rx_ant) {
/* not using .cfg overwrite */
- radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
+ radio_cfg = iwl_eeprom_query16(shrd, EEPROM_RADIO_CONFIG);
priv->cfg->valid_tx_ant = EEPROM_RF_CFG_TX_ANT_MSK(radio_cfg);
priv->cfg->valid_rx_ant = EEPROM_RF_CFG_RX_ANT_MSK(radio_cfg);
if (!priv->cfg->valid_tx_ant || !priv->cfg->valid_rx_ant) {
@@ -289,9 +290,9 @@ int iwl_eeprom_check_sku(struct iwl_priv *priv)
return 0;
}
-void iwl_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac)
+void iwl_eeprom_get_mac(const struct iwl_shared *shrd, u8 *mac)
{
- const u8 *addr = iwl_eeprom_query_addr(priv,
+ const u8 *addr = iwl_eeprom_query_addr(shrd,
EEPROM_MAC_ADDRESS);
memcpy(mac, addr, ETH_ALEN);
}
@@ -582,6 +583,7 @@ iwl_eeprom_enh_txp_read_element(struct iwl_priv *priv,
void iwl_eeprom_enhanced_txpower(struct iwl_priv *priv)
{
+ struct iwl_shared *shrd = priv->shrd;
struct iwl_eeprom_enhanced_txpwr *txp_array, *txp;
int idx, entries;
__le16 *txp_len;
@@ -590,10 +592,10 @@ void iwl_eeprom_enhanced_txpower(struct iwl_priv *priv)
BUILD_BUG_ON(sizeof(struct iwl_eeprom_enhanced_txpwr) != 8);
/* the length is in 16-bit words, but we want entries */
- txp_len = (__le16 *) iwl_eeprom_query_addr(priv, EEPROM_TXP_SZ_OFFS);
+ txp_len = (__le16 *) iwl_eeprom_query_addr(shrd, EEPROM_TXP_SZ_OFFS);
entries = le16_to_cpup(txp_len) * 2 / EEPROM_TXP_ENTRY_LEN;
- txp_array = (void *) iwl_eeprom_query_addr(priv, EEPROM_TXP_OFFS);
+ txp_array = (void *) iwl_eeprom_query_addr(shrd, EEPROM_TXP_OFFS);
for (idx = 0; idx < entries; idx++) {
txp = &txp_array[idx];
@@ -646,12 +648,13 @@ void iwl_eeprom_enhanced_txpower(struct iwl_priv *priv)
/**
* iwl_eeprom_init - read EEPROM contents
*
- * Load the EEPROM contents from adapter into priv->eeprom
+ * Load the EEPROM contents from adapter into shrd->eeprom
*
* NOTE: This routine uses the non-debug IO access functions.
*/
int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev)
{
+ struct iwl_shared *shrd = priv->shrd;
__le16 *e;
u32 gp = iwl_read32(bus(priv), CSR_EEPROM_GP);
int sz;
@@ -666,12 +669,12 @@ int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev)
/* allocate eeprom */
sz = priv->cfg->base_params->eeprom_size;
IWL_DEBUG_EEPROM(priv, "NVM size = %d\n", sz);
- priv->eeprom = kzalloc(sz, GFP_KERNEL);
- if (!priv->eeprom) {
+ shrd->eeprom = kzalloc(sz, GFP_KERNEL);
+ if (!shrd->eeprom) {
ret = -ENOMEM;
goto alloc_err;
}
- e = (__le16 *)priv->eeprom;
+ e = (__le16 *)shrd->eeprom;
iwl_apm_init(priv);
@@ -746,7 +749,7 @@ int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev)
IWL_DEBUG_EEPROM(priv, "NVM Type: %s, version: 0x%x\n",
(trans(priv)->nvm_device_type == NVM_DEVICE_TYPE_OTP)
? "OTP" : "EEPROM",
- iwl_eeprom_query16(priv, EEPROM_VERSION));
+ iwl_eeprom_query16(shrd, EEPROM_VERSION));
ret = 0;
done:
@@ -754,17 +757,17 @@ done:
err:
if (ret)
- iwl_eeprom_free(priv);
+ iwl_eeprom_free(priv->shrd);
/* Reset chip to save power until we load uCode during "up". */
iwl_apm_stop(priv);
alloc_err:
return ret;
}
-void iwl_eeprom_free(struct iwl_priv *priv)
+void iwl_eeprom_free(struct iwl_shared *shrd)
{
- kfree(priv->eeprom);
- priv->eeprom = NULL;
+ kfree(shrd->eeprom);
+ shrd->eeprom = NULL;
}
static void iwl_init_band_reference(const struct iwl_priv *priv,
@@ -772,49 +775,50 @@ static void iwl_init_band_reference(const struct iwl_priv *priv,
const struct iwl_eeprom_channel **eeprom_ch_info,
const u8 **eeprom_ch_index)
{
+ struct iwl_shared *shrd = priv->shrd;
u32 offset = priv->cfg->lib->
eeprom_ops.regulatory_bands[eep_band - 1];
switch (eep_band) {
case 1: /* 2.4GHz band */
*eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_1);
*eeprom_ch_info = (struct iwl_eeprom_channel *)
- iwl_eeprom_query_addr(priv, offset);
+ iwl_eeprom_query_addr(shrd, offset);
*eeprom_ch_index = iwl_eeprom_band_1;
break;
case 2: /* 4.9GHz band */
*eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_2);
*eeprom_ch_info = (struct iwl_eeprom_channel *)
- iwl_eeprom_query_addr(priv, offset);
+ iwl_eeprom_query_addr(shrd, offset);
*eeprom_ch_index = iwl_eeprom_band_2;
break;
case 3: /* 5.2GHz band */
*eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_3);
*eeprom_ch_info = (struct iwl_eeprom_channel *)
- iwl_eeprom_query_addr(priv, offset);
+ iwl_eeprom_query_addr(shrd, offset);
*eeprom_ch_index = iwl_eeprom_band_3;
break;
case 4: /* 5.5GHz band */
*eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_4);
*eeprom_ch_info = (struct iwl_eeprom_channel *)
- iwl_eeprom_query_addr(priv, offset);
+ iwl_eeprom_query_addr(shrd, offset);
*eeprom_ch_index = iwl_eeprom_band_4;
break;
case 5: /* 5.7GHz band */
*eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_5);
*eeprom_ch_info = (struct iwl_eeprom_channel *)
- iwl_eeprom_query_addr(priv, offset);
+ iwl_eeprom_query_addr(shrd, offset);
*eeprom_ch_index = iwl_eeprom_band_5;
break;
case 6: /* 2.4GHz ht40 channels */
*eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_6);
*eeprom_ch_info = (struct iwl_eeprom_channel *)
- iwl_eeprom_query_addr(priv, offset);
+ iwl_eeprom_query_addr(shrd, offset);
*eeprom_ch_index = iwl_eeprom_band_6;
break;
case 7: /* 5 GHz ht40 channels */
*eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_7);
*eeprom_ch_info = (struct iwl_eeprom_channel *)
- iwl_eeprom_query_addr(priv, offset);
+ iwl_eeprom_query_addr(shrd, offset);
*eeprom_ch_index = iwl_eeprom_band_7;
break;
default:
@@ -1064,7 +1068,7 @@ void iwl_rf_config(struct iwl_priv *priv)
{
u16 radio_cfg;
- radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
+ radio_cfg = iwl_eeprom_query16(priv->shrd, EEPROM_RADIO_CONFIG);
/* write radio config values to register */
if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) <= EEPROM_RF_CONFIG_TYPE_MAX) {
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
index c94747e7299..9fa937ec35e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
@@ -66,6 +66,7 @@
#include <net/mac80211.h>
struct iwl_priv;
+struct iwl_shared;
/*
* EEPROM access time values:
@@ -305,11 +306,11 @@ struct iwl_eeprom_ops {
int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev);
-void iwl_eeprom_free(struct iwl_priv *priv);
+void iwl_eeprom_free(struct iwl_shared *shrd);
int iwl_eeprom_check_version(struct iwl_priv *priv);
int iwl_eeprom_check_sku(struct iwl_priv *priv);
-const u8 *iwl_eeprom_query_addr(const struct iwl_priv *priv, size_t offset);
-u16 iwl_eeprom_query16(const struct iwl_priv *priv, size_t offset);
+const u8 *iwl_eeprom_query_addr(const struct iwl_shared *shrd, size_t offset);
+u16 iwl_eeprom_query16(const struct iwl_shared *shrd, size_t offset);
int iwl_init_channel_map(struct iwl_priv *priv);
void iwl_free_channel_map(struct iwl_priv *priv);
const struct iwl_channel_info *iwl_get_channel_info(
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.c b/drivers/net/wireless/iwlwifi/iwl-io.c
index 3ffa8e62b85..3464cad7e38 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.c
+++ b/drivers/net/wireless/iwlwifi/iwl-io.c
@@ -143,7 +143,7 @@ u32 iwl_read_direct32(struct iwl_bus *bus, u32 reg)
spin_lock_irqsave(&bus->reg_lock, flags);
iwl_grab_nic_access(bus);
- value = iwl_read32(bus(bus), reg);
+ value = iwl_read32(bus, reg);
iwl_release_nic_access(bus);
spin_unlock_irqrestore(&bus->reg_lock, flags);
diff --git a/drivers/net/wireless/iwlwifi/iwl-mac80211.c b/drivers/net/wireless/iwlwifi/iwl-mac80211.c
index 05b1f0d2f38..e3944f4e4fd 100644
--- a/drivers/net/wireless/iwlwifi/iwl-mac80211.c
+++ b/drivers/net/wireless/iwlwifi/iwl-mac80211.c
@@ -427,7 +427,7 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw)
iwl_write32(bus(priv), CSR_UCODE_DRV_GP1_CLR,
CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
- base = priv->device_pointers.error_event_table;
+ base = priv->shrd->device_pointers.error_event_table;
if (iwlagn_hw_valid_rtc_data_addr(base)) {
spin_lock_irqsave(&bus(priv)->reg_lock, flags);
ret = iwl_grab_nic_access_silent(bus(priv));
@@ -481,15 +481,11 @@ static void iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
{
struct iwl_priv *priv = hw->priv;
- IWL_DEBUG_MACDUMP(priv, "enter\n");
-
IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
if (iwlagn_tx_skb(priv, skb))
dev_kfree_skb_any(skb);
-
- IWL_DEBUG_MACDUMP(priv, "leave\n");
}
static void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw,
@@ -521,6 +517,17 @@ static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
return -EOPNOTSUPP;
}
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_TKIP:
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
+ /* fall through */
+ case WLAN_CIPHER_SUITE_CCMP:
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+ break;
+ default:
+ break;
+ }
+
/*
* We could program these keys into the hardware as well, but we
* don't expect much multicast traffic in IBSS and having keys
@@ -804,21 +811,9 @@ static void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
/* Configure HT40 channels */
ctx->ht.enabled = conf_is_ht(conf);
- if (ctx->ht.enabled) {
- if (conf_is_ht40_minus(conf)) {
- ctx->ht.extension_chan_offset =
- IEEE80211_HT_PARAM_CHA_SEC_BELOW;
- ctx->ht.is_40mhz = true;
- } else if (conf_is_ht40_plus(conf)) {
- ctx->ht.extension_chan_offset =
- IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
- ctx->ht.is_40mhz = true;
- } else {
- ctx->ht.extension_chan_offset =
- IEEE80211_HT_PARAM_CHA_SEC_NONE;
- ctx->ht.is_40mhz = false;
- }
- } else
+ if (ctx->ht.enabled)
+ iwlagn_config_ht40(conf, ctx);
+ else
ctx->ht.is_40mhz = false;
if ((le16_to_cpu(ctx->staging.channel) != ch))
@@ -1053,6 +1048,9 @@ static int iwlagn_mac_tx_sync(struct ieee80211_hw *hw,
int ret;
u8 sta_id;
+ if (ctx->ctxid != IWL_RXON_CTX_PAN)
+ return 0;
+
IWL_DEBUG_MAC80211(priv, "enter\n");
mutex_lock(&priv->shrd->mutex);
@@ -1102,6 +1100,9 @@ static void iwlagn_mac_finish_tx_sync(struct ieee80211_hw *hw,
struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
struct iwl_rxon_context *ctx = vif_priv->ctx;
+ if (ctx->ctxid != IWL_RXON_CTX_PAN)
+ return;
+
IWL_DEBUG_MAC80211(priv, "enter\n");
mutex_lock(&priv->shrd->mutex);
diff --git a/drivers/net/wireless/iwlwifi/iwl-pci.c b/drivers/net/wireless/iwlwifi/iwl-pci.c
index 86d6a2354e8..850ec8e51b1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-pci.c
+++ b/drivers/net/wireless/iwlwifi/iwl-pci.c
@@ -60,6 +60,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
+#include <linux/module.h>
#include <linux/pci.h>
#include <linux/pci-aspm.h>
diff --git a/drivers/net/wireless/iwlwifi/iwl-shared.h b/drivers/net/wireless/iwlwifi/iwl-shared.h
index 1f7a93c67c4..29a7284aa3e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-shared.h
+++ b/drivers/net/wireless/iwlwifi/iwl-shared.h
@@ -97,6 +97,7 @@
struct iwl_cfg;
struct iwl_bus;
struct iwl_priv;
+struct iwl_trans;
struct iwl_sensitivity_ranges;
struct iwl_trans_ops;
@@ -120,7 +121,7 @@ extern struct iwl_mod_params iwlagn_mod_params;
* @restart_fw: restart firmware, default = 1
* @plcp_check: enable plcp health check, default = true
* @ack_check: disable ack health check, default = false
- * @wd_disable: enable stuck queue check, default = false
+ * @wd_disable: enable stuck queue check, default = 0
* @bt_coex_active: enable bt coex, default = true
* @led_mode: system default, default = 0
* @no_sleep_autoadjust: disable autoadjust, default = true
@@ -141,7 +142,7 @@ struct iwl_mod_params {
int restart_fw;
bool plcp_check;
bool ack_check;
- bool wd_disable;
+ int wd_disable;
bool bt_coex_active;
int led_mode;
bool no_sleep_autoadjust;
@@ -174,7 +175,6 @@ struct iwl_mod_params {
* @ct_kill_exit_threshold: when to reeable the device - in hw dependent unit
* relevant for 1000, 6000 and up
* @wd_timeout: TX queues watchdog timeout
- * @calib_init_cfg: setup initial calibrations for the hw
* @calib_rt_cfg: setup runtime calibrations for the hw
* @struct iwl_sensitivity_ranges: range of sensitivity values
*/
@@ -195,7 +195,6 @@ struct iwl_hw_params {
u32 ct_kill_exit_threshold;
unsigned int wd_timeout;
- u32 calib_init_cfg;
u32 calib_rt_cfg;
const struct iwl_sensitivity_ranges *sens;
};
@@ -259,6 +258,52 @@ struct iwl_tid_data {
};
/**
+ * enum iwl_ucode_type
+ *
+ * The type of ucode currently loaded on the hardware.
+ *
+ * @IWL_UCODE_NONE: No ucode loaded
+ * @IWL_UCODE_REGULAR: Normal runtime ucode
+ * @IWL_UCODE_INIT: Initial ucode
+ * @IWL_UCODE_WOWLAN: Wake on Wireless enabled ucode
+ */
+enum iwl_ucode_type {
+ IWL_UCODE_NONE,
+ IWL_UCODE_REGULAR,
+ IWL_UCODE_INIT,
+ IWL_UCODE_WOWLAN,
+};
+
+/**
+ * struct iwl_notification_wait - notification wait entry
+ * @list: list head for global list
+ * @fn: function called with the notification
+ * @cmd: command ID
+ *
+ * This structure is not used directly, to wait for a
+ * notification declare it on the stack, and call
+ * iwlagn_init_notification_wait() with appropriate
+ * parameters. Then do whatever will cause the ucode
+ * to notify the driver, and to wait for that then
+ * call iwlagn_wait_notification().
+ *
+ * Each notification is one-shot. If at some point we
+ * need to support multi-shot notifications (which
+ * can't be allocated on the stack) we need to modify
+ * the code for them.
+ */
+struct iwl_notification_wait {
+ struct list_head list;
+
+ void (*fn)(struct iwl_trans *trans, struct iwl_rx_packet *pkt,
+ void *data);
+ void *fn_data;
+
+ u8 cmd;
+ bool triggered, aborted;
+};
+
+/**
* struct iwl_shared - shared fields for all the layers of the driver
*
* @dbg_level_dev: dbg level set per device. Prevails on
@@ -275,6 +320,11 @@ struct iwl_tid_data {
* @sta_lock: protects the station table.
* If lock and sta_lock are needed, lock must be acquired first.
* @mutex:
+ * @ucode_type: indicator of loaded ucode image
+ * @notif_waits: things waiting for notification
+ * @notif_wait_lock: lock protecting notification
+ * @notif_waitq: head of notification wait queue
+ * @device_pointers: pointers to ucode event tables
*/
struct iwl_shared {
#ifdef CONFIG_IWLWIFI_DEBUG
@@ -302,6 +352,23 @@ struct iwl_shared {
struct iwl_tid_data tid_data[IWLAGN_STATION_COUNT][IWL_MAX_TID_COUNT];
wait_queue_head_t wait_command_queue;
+
+ /* eeprom -- this is in the card's little endian byte order */
+ u8 *eeprom;
+
+ /* ucode related variables */
+ enum iwl_ucode_type ucode_type;
+
+ /* notification wait support */
+ struct list_head notif_waits;
+ spinlock_t notif_wait_lock;
+ wait_queue_head_t notif_waitq;
+
+ struct {
+ u32 error_event_table;
+ u32 log_event_table;
+ } device_pointers;
+
};
/*Whatever _m is (iwl_trans, iwl_priv, iwl_bus, these macros will work */
@@ -445,6 +512,24 @@ bool iwl_check_for_ct_kill(struct iwl_priv *priv);
void iwl_stop_sw_queue(struct iwl_priv *priv, u8 ac);
void iwl_wake_sw_queue(struct iwl_priv *priv, u8 ac);
+/* notification wait support */
+void iwl_abort_notification_waits(struct iwl_shared *shrd);
+void __acquires(wait_entry)
+iwl_init_notification_wait(struct iwl_shared *shrd,
+ struct iwl_notification_wait *wait_entry,
+ u8 cmd,
+ void (*fn)(struct iwl_trans *trans,
+ struct iwl_rx_packet *pkt,
+ void *data),
+ void *fn_data);
+int __must_check __releases(wait_entry)
+iwl_wait_notification(struct iwl_shared *shrd,
+ struct iwl_notification_wait *wait_entry,
+ unsigned long timeout);
+void __releases(wait_entry)
+iwl_remove_notification(struct iwl_shared *shrd,
+ struct iwl_notification_wait *wait_entry);
+
#ifdef CONFIG_IWLWIFI_DEBUGFS
void iwl_reset_traffic_log(struct iwl_priv *priv);
#endif /* CONFIG_IWLWIFI_DEBUGFS */
diff --git a/drivers/net/wireless/iwlwifi/iwl-sv-open.c b/drivers/net/wireless/iwlwifi/iwl-testmode.c
index e3882d0cfc8..a874eb7b5f8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sv-open.c
+++ b/drivers/net/wireless/iwlwifi/iwl-testmode.c
@@ -77,6 +77,7 @@
#include "iwl-agn.h"
#include "iwl-testmode.h"
#include "iwl-trans.h"
+#include "iwl-bus.h"
/* The TLVs used in the gnl message policy between the kernel module and
* user space application. iwl_testmode_gnl_msg_policy is to be carried
@@ -106,6 +107,13 @@ struct nla_policy iwl_testmode_gnl_msg_policy[IWL_TM_ATTR_MAX] = {
[IWL_TM_ATTR_FIXRATE] = { .type = NLA_U32, },
[IWL_TM_ATTR_UCODE_OWNER] = { .type = NLA_U8, },
+
+ [IWL_TM_ATTR_SRAM_ADDR] = { .type = NLA_U32, },
+ [IWL_TM_ATTR_SRAM_SIZE] = { .type = NLA_U32, },
+ [IWL_TM_ATTR_SRAM_DUMP] = { .type = NLA_UNSPEC, },
+
+ [IWL_TM_ATTR_FW_VERSION] = { .type = NLA_U32, },
+ [IWL_TM_ATTR_DEVICE_ID] = { .type = NLA_U32, },
};
/*
@@ -177,6 +185,18 @@ void iwl_testmode_init(struct iwl_priv *priv)
{
priv->pre_rx_handler = iwl_testmode_ucode_rx_pkt;
priv->testmode_trace.trace_enabled = false;
+ priv->testmode_sram.sram_readed = false;
+}
+
+static void iwl_sram_cleanup(struct iwl_priv *priv)
+{
+ if (priv->testmode_sram.sram_readed) {
+ kfree(priv->testmode_sram.buff_addr);
+ priv->testmode_sram.buff_addr = NULL;
+ priv->testmode_sram.buff_size = 0;
+ priv->testmode_sram.num_chunks = 0;
+ priv->testmode_sram.sram_readed = false;
+ }
}
static void iwl_trace_cleanup(struct iwl_priv *priv)
@@ -201,6 +221,7 @@ static void iwl_trace_cleanup(struct iwl_priv *priv)
void iwl_testmode_cleanup(struct iwl_priv *priv)
{
iwl_trace_cleanup(priv);
+ iwl_sram_cleanup(priv);
}
/*
@@ -276,7 +297,7 @@ static int iwl_testmode_reg(struct ieee80211_hw *hw, struct nlattr **tb)
IWL_INFO(priv, "testmode register access command offset 0x%x\n", ofs);
switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
- case IWL_TM_CMD_APP2DEV_REG_READ32:
+ case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32:
val32 = iwl_read32(bus(priv), ofs);
IWL_INFO(priv, "32bit value to read 0x%x\n", val32);
@@ -291,7 +312,7 @@ static int iwl_testmode_reg(struct ieee80211_hw *hw, struct nlattr **tb)
IWL_DEBUG_INFO(priv,
"Error sending msg : %d\n", status);
break;
- case IWL_TM_CMD_APP2DEV_REG_WRITE32:
+ case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32:
if (!tb[IWL_TM_ATTR_REG_VALUE32]) {
IWL_DEBUG_INFO(priv,
"Error finding value to write\n");
@@ -302,7 +323,7 @@ static int iwl_testmode_reg(struct ieee80211_hw *hw, struct nlattr **tb)
iwl_write32(bus(priv), ofs, val32);
}
break;
- case IWL_TM_CMD_APP2DEV_REG_WRITE8:
+ case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8:
if (!tb[IWL_TM_ATTR_REG_VALUE8]) {
IWL_DEBUG_INFO(priv, "Error finding value to write\n");
return -ENOMSG;
@@ -312,6 +333,32 @@ static int iwl_testmode_reg(struct ieee80211_hw *hw, struct nlattr **tb)
iwl_write8(bus(priv), ofs, val8);
}
break;
+ case IWL_TM_CMD_APP2DEV_INDIRECT_REG_READ32:
+ val32 = iwl_read_prph(bus(priv), ofs);
+ IWL_INFO(priv, "32bit value to read 0x%x\n", val32);
+
+ skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20);
+ if (!skb) {
+ IWL_DEBUG_INFO(priv, "Error allocating memory\n");
+ return -ENOMEM;
+ }
+ NLA_PUT_U32(skb, IWL_TM_ATTR_REG_VALUE32, val32);
+ status = cfg80211_testmode_reply(skb);
+ if (status < 0)
+ IWL_DEBUG_INFO(priv,
+ "Error sending msg : %d\n", status);
+ break;
+ case IWL_TM_CMD_APP2DEV_INDIRECT_REG_WRITE32:
+ if (!tb[IWL_TM_ATTR_REG_VALUE32]) {
+ IWL_DEBUG_INFO(priv,
+ "Error finding value to write\n");
+ return -ENOMSG;
+ } else {
+ val32 = nla_get_u32(tb[IWL_TM_ATTR_REG_VALUE32]);
+ IWL_INFO(priv, "32bit value to write 0x%x\n", val32);
+ iwl_write_prph(bus(priv), ofs, val32);
+ }
+ break;
default:
IWL_DEBUG_INFO(priv, "Unknown testmode register command ID\n");
return -ENOSYS;
@@ -330,7 +377,7 @@ static int iwl_testmode_cfg_init_calib(struct iwl_priv *priv)
struct iwl_notification_wait calib_wait;
int ret;
- iwlagn_init_notification_wait(priv, &calib_wait,
+ iwl_init_notification_wait(priv->shrd, &calib_wait,
CALIBRATION_COMPLETE_NOTIFICATION,
NULL, NULL);
ret = iwlagn_init_alive_start(priv);
@@ -340,14 +387,14 @@ static int iwl_testmode_cfg_init_calib(struct iwl_priv *priv)
goto cfg_init_calib_error;
}
- ret = iwlagn_wait_notification(priv, &calib_wait, 2 * HZ);
+ ret = iwl_wait_notification(priv->shrd, &calib_wait, 2 * HZ);
if (ret)
IWL_DEBUG_INFO(priv, "Error detecting"
" CALIBRATION_COMPLETE_NOTIFICATION: %d\n", ret);
return ret;
cfg_init_calib_error:
- iwlagn_remove_notification(priv, &calib_wait);
+ iwl_remove_notification(priv->shrd, &calib_wait);
return ret;
}
@@ -373,6 +420,8 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
struct sk_buff *skb;
unsigned char *rsp_data_ptr = NULL;
int status = 0, rsp_data_len = 0;
+ char buf[32], *ptr = NULL;
+ unsigned int num, devid;
switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
case IWL_TM_CMD_APP2DEV_GET_DEVICENAME:
@@ -420,8 +469,23 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
"Error starting the device: %d\n", status);
break;
+ case IWL_TM_CMD_APP2DEV_LOAD_WOWLAN_FW:
+ iwl_scan_cancel_timeout(priv, 200);
+ iwl_trans_stop_device(trans(priv));
+ status = iwlagn_load_ucode_wait_alive(priv, IWL_UCODE_WOWLAN);
+ if (status) {
+ IWL_DEBUG_INFO(priv,
+ "Error loading WOWLAN ucode: %d\n", status);
+ break;
+ }
+ status = iwl_alive_start(priv);
+ if (status)
+ IWL_DEBUG_INFO(priv,
+ "Error starting the device: %d\n", status);
+ break;
+
case IWL_TM_CMD_APP2DEV_GET_EEPROM:
- if (priv->eeprom) {
+ if (priv->shrd->eeprom) {
skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy,
priv->cfg->base_params->eeprom_size + 20);
if (!skb) {
@@ -433,7 +497,7 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
IWL_TM_CMD_DEV2APP_EEPROM_RSP);
NLA_PUT(skb, IWL_TM_ATTR_EEPROM,
priv->cfg->base_params->eeprom_size,
- priv->eeprom);
+ priv->shrd->eeprom);
status = cfg80211_testmode_reply(skb);
if (status < 0)
IWL_DEBUG_INFO(priv,
@@ -452,6 +516,43 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
priv->tm_fixed_rate = nla_get_u32(tb[IWL_TM_ATTR_FIXRATE]);
break;
+ case IWL_TM_CMD_APP2DEV_GET_FW_VERSION:
+ IWL_INFO(priv, "uCode version raw: 0x%x\n", priv->ucode_ver);
+
+ skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20);
+ if (!skb) {
+ IWL_DEBUG_INFO(priv, "Error allocating memory\n");
+ return -ENOMEM;
+ }
+ NLA_PUT_U32(skb, IWL_TM_ATTR_FW_VERSION, priv->ucode_ver);
+ status = cfg80211_testmode_reply(skb);
+ if (status < 0)
+ IWL_DEBUG_INFO(priv,
+ "Error sending msg : %d\n", status);
+ break;
+
+ case IWL_TM_CMD_APP2DEV_GET_DEVICE_ID:
+ bus_get_hw_id(bus(priv), buf, sizeof(buf));
+ ptr = buf;
+ strsep(&ptr, ":");
+ sscanf(strsep(&ptr, ":"), "%x", &num);
+ sscanf(strsep(&ptr, ":"), "%x", &devid);
+ IWL_INFO(priv, "Device ID = 0x%04x, SubDevice ID= 0x%04x\n",
+ num, devid);
+ devid |= (num << 16);
+
+ skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20);
+ if (!skb) {
+ IWL_DEBUG_INFO(priv, "Error allocating memory\n");
+ return -ENOMEM;
+ }
+ NLA_PUT_U32(skb, IWL_TM_ATTR_DEVICE_ID, devid);
+ status = cfg80211_testmode_reply(skb);
+ if (status < 0)
+ IWL_DEBUG_INFO(priv,
+ "Error sending msg : %d\n", status);
+ break;
+
default:
IWL_DEBUG_INFO(priv, "Unknown testmode driver command ID\n");
return -ENOSYS;
@@ -532,7 +633,7 @@ static int iwl_testmode_trace(struct ieee80211_hw *hw, struct nlattr **tb)
}
priv->testmode_trace.num_chunks =
DIV_ROUND_UP(priv->testmode_trace.buff_size,
- TRACE_CHUNK_SIZE);
+ DUMP_CHUNK_SIZE);
break;
case IWL_TM_CMD_APP2DEV_END_TRACE:
@@ -564,15 +665,15 @@ static int iwl_testmode_trace_dump(struct ieee80211_hw *hw, struct nlattr **tb,
idx = cb->args[4];
if (idx >= priv->testmode_trace.num_chunks)
return -ENOENT;
- length = TRACE_CHUNK_SIZE;
+ length = DUMP_CHUNK_SIZE;
if (((idx + 1) == priv->testmode_trace.num_chunks) &&
- (priv->testmode_trace.buff_size % TRACE_CHUNK_SIZE))
+ (priv->testmode_trace.buff_size % DUMP_CHUNK_SIZE))
length = priv->testmode_trace.buff_size %
- TRACE_CHUNK_SIZE;
+ DUMP_CHUNK_SIZE;
NLA_PUT(skb, IWL_TM_ATTR_TRACE_DUMP, length,
priv->testmode_trace.trace_addr +
- (TRACE_CHUNK_SIZE * idx));
+ (DUMP_CHUNK_SIZE * idx));
idx++;
cb->args[4] = idx;
return 0;
@@ -618,6 +719,110 @@ static int iwl_testmode_ownership(struct ieee80211_hw *hw, struct nlattr **tb)
return 0;
}
+/*
+ * This function handles the user application commands for SRAM data dump
+ *
+ * It retrieves the mandatory fields IWL_TM_ATTR_SRAM_ADDR and
+ * IWL_TM_ATTR_SRAM_SIZE to decide the memory area for SRAM data reading
+ *
+ * Several error will be retured, -EBUSY if the SRAM data retrieved by
+ * previous command has not been delivered to userspace, or -ENOMSG if
+ * the mandatory fields (IWL_TM_ATTR_SRAM_ADDR,IWL_TM_ATTR_SRAM_SIZE)
+ * are missing, or -ENOMEM if the buffer allocation fails.
+ *
+ * Otherwise 0 is replied indicating the success of the SRAM reading.
+ *
+ * @hw: ieee80211_hw object that represents the device
+ * @tb: gnl message fields from the user space
+ */
+static int iwl_testmode_sram(struct ieee80211_hw *hw, struct nlattr **tb)
+{
+ struct iwl_priv *priv = hw->priv;
+ u32 base, ofs, size, maxsize;
+
+ if (priv->testmode_sram.sram_readed)
+ return -EBUSY;
+
+ if (!tb[IWL_TM_ATTR_SRAM_ADDR]) {
+ IWL_DEBUG_INFO(priv, "Error finding SRAM offset address\n");
+ return -ENOMSG;
+ }
+ ofs = nla_get_u32(tb[IWL_TM_ATTR_SRAM_ADDR]);
+ if (!tb[IWL_TM_ATTR_SRAM_SIZE]) {
+ IWL_DEBUG_INFO(priv, "Error finding size for SRAM reading\n");
+ return -ENOMSG;
+ }
+ size = nla_get_u32(tb[IWL_TM_ATTR_SRAM_SIZE]);
+ switch (priv->shrd->ucode_type) {
+ case IWL_UCODE_REGULAR:
+ maxsize = trans(priv)->ucode_rt.data.len;
+ break;
+ case IWL_UCODE_INIT:
+ maxsize = trans(priv)->ucode_init.data.len;
+ break;
+ case IWL_UCODE_WOWLAN:
+ maxsize = trans(priv)->ucode_wowlan.data.len;
+ break;
+ case IWL_UCODE_NONE:
+ IWL_DEBUG_INFO(priv, "Error, uCode does not been loaded\n");
+ return -ENOSYS;
+ default:
+ IWL_DEBUG_INFO(priv, "Error, unsupported uCode type\n");
+ return -ENOSYS;
+ }
+ if ((ofs + size) > maxsize) {
+ IWL_DEBUG_INFO(priv, "Invalid offset/size: out of range\n");
+ return -EINVAL;
+ }
+ priv->testmode_sram.buff_size = (size / 4) * 4;
+ priv->testmode_sram.buff_addr =
+ kmalloc(priv->testmode_sram.buff_size, GFP_KERNEL);
+ if (priv->testmode_sram.buff_addr == NULL) {
+ IWL_DEBUG_INFO(priv, "Error allocating memory\n");
+ return -ENOMEM;
+ }
+ base = 0x800000;
+ _iwl_read_targ_mem_words(bus(priv), base + ofs,
+ priv->testmode_sram.buff_addr,
+ priv->testmode_sram.buff_size / 4);
+ priv->testmode_sram.num_chunks =
+ DIV_ROUND_UP(priv->testmode_sram.buff_size, DUMP_CHUNK_SIZE);
+ priv->testmode_sram.sram_readed = true;
+ return 0;
+}
+
+static int iwl_testmode_sram_dump(struct ieee80211_hw *hw, struct nlattr **tb,
+ struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ struct iwl_priv *priv = hw->priv;
+ int idx, length;
+
+ if (priv->testmode_sram.sram_readed) {
+ idx = cb->args[4];
+ if (idx >= priv->testmode_sram.num_chunks) {
+ iwl_sram_cleanup(priv);
+ return -ENOENT;
+ }
+ length = DUMP_CHUNK_SIZE;
+ if (((idx + 1) == priv->testmode_sram.num_chunks) &&
+ (priv->testmode_sram.buff_size % DUMP_CHUNK_SIZE))
+ length = priv->testmode_sram.buff_size %
+ DUMP_CHUNK_SIZE;
+
+ NLA_PUT(skb, IWL_TM_ATTR_SRAM_DUMP, length,
+ priv->testmode_sram.buff_addr +
+ (DUMP_CHUNK_SIZE * idx));
+ idx++;
+ cb->args[4] = idx;
+ return 0;
+ } else
+ return -EFAULT;
+
+ nla_put_failure:
+ return -ENOBUFS;
+}
+
/* The testmode gnl message handler that takes the gnl message from the
* user space and parses it per the policy iwl_testmode_gnl_msg_policy, then
@@ -665,9 +870,11 @@ int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data, int len)
IWL_DEBUG_INFO(priv, "testmode cmd to uCode\n");
result = iwl_testmode_ucode(hw, tb);
break;
- case IWL_TM_CMD_APP2DEV_REG_READ32:
- case IWL_TM_CMD_APP2DEV_REG_WRITE32:
- case IWL_TM_CMD_APP2DEV_REG_WRITE8:
+ case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32:
+ case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32:
+ case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8:
+ case IWL_TM_CMD_APP2DEV_INDIRECT_REG_READ32:
+ case IWL_TM_CMD_APP2DEV_INDIRECT_REG_WRITE32:
IWL_DEBUG_INFO(priv, "testmode cmd to register\n");
result = iwl_testmode_reg(hw, tb);
break;
@@ -677,6 +884,9 @@ int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data, int len)
case IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW:
case IWL_TM_CMD_APP2DEV_GET_EEPROM:
case IWL_TM_CMD_APP2DEV_FIXRATE_REQ:
+ case IWL_TM_CMD_APP2DEV_LOAD_WOWLAN_FW:
+ case IWL_TM_CMD_APP2DEV_GET_FW_VERSION:
+ case IWL_TM_CMD_APP2DEV_GET_DEVICE_ID:
IWL_DEBUG_INFO(priv, "testmode cmd to driver\n");
result = iwl_testmode_driver(hw, tb);
break;
@@ -693,6 +903,11 @@ int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data, int len)
result = iwl_testmode_ownership(hw, tb);
break;
+ case IWL_TM_CMD_APP2DEV_READ_SRAM:
+ IWL_DEBUG_INFO(priv, "testmode sram read cmd to driver\n");
+ result = iwl_testmode_sram(hw, tb);
+ break;
+
default:
IWL_DEBUG_INFO(priv, "Unknown testmode command\n");
result = -ENOSYS;
@@ -741,6 +956,10 @@ int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
IWL_DEBUG_INFO(priv, "uCode trace cmd to driver\n");
result = iwl_testmode_trace_dump(hw, tb, skb, cb);
break;
+ case IWL_TM_CMD_APP2DEV_DUMP_SRAM:
+ IWL_DEBUG_INFO(priv, "testmode sram dump cmd to driver\n");
+ result = iwl_testmode_sram_dump(hw, tb, skb, cb);
+ break;
default:
result = -EINVAL;
break;
diff --git a/drivers/net/wireless/iwlwifi/iwl-testmode.h b/drivers/net/wireless/iwlwifi/iwl-testmode.h
index b980bda4b0f..26138f11034 100644
--- a/drivers/net/wireless/iwlwifi/iwl-testmode.h
+++ b/drivers/net/wireless/iwlwifi/iwl-testmode.h
@@ -76,9 +76,9 @@
* the actual uCode host command ID is carried with
* IWL_TM_ATTR_UCODE_CMD_ID
*
- * @IWL_TM_CMD_APP2DEV_REG_READ32:
- * @IWL_TM_CMD_APP2DEV_REG_WRITE32:
- * @IWL_TM_CMD_APP2DEV_REG_WRITE8:
+ * @IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32:
+ * @IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32:
+ * @IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8:
* commands from user applicaiton to access register
*
* @IWL_TM_CMD_APP2DEV_GET_DEVICENAME: retrieve device name
@@ -103,16 +103,30 @@
* @IWL_TM_CMD_DEV2APP_EEPROM_RSP:
* commands from kernel space to carry the eeprom response
* to user application
+ *
* @IWL_TM_CMD_APP2DEV_OWNERSHIP:
* commands from user application to own change the ownership of the uCode
* if application has the ownership, the only host command from
* testmode will deliver to uCode. Default owner is driver
+ *
+ * @IWL_TM_CMD_APP2DEV_INDIRECT_REG_READ32:
+ * @IWL_TM_CMD_APP2DEV_INDIRECT_REG_WRITE32:
+ * commands from user applicaiton to indirectly access peripheral register
+ *
+ * @IWL_TM_CMD_APP2DEV_READ_SRAM:
+ * @IWL_TM_CMD_APP2DEV_DUMP_SRAM:
+ * commands from user applicaiton to read data in sram
+ *
+ * @IWL_TM_CMD_APP2DEV_LOAD_WOWLAN_FW: load Weak On Wireless LAN uCode image
+ * @IWL_TM_CMD_APP2DEV_GET_FW_VERSION: retrieve uCode version
+ * @IWL_TM_CMD_APP2DEV_GET_DEVICE_ID: retrieve ID information in device
+ *
*/
enum iwl_tm_cmd_t {
IWL_TM_CMD_APP2DEV_UCODE = 1,
- IWL_TM_CMD_APP2DEV_REG_READ32 = 2,
- IWL_TM_CMD_APP2DEV_REG_WRITE32 = 3,
- IWL_TM_CMD_APP2DEV_REG_WRITE8 = 4,
+ IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32 = 2,
+ IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32 = 3,
+ IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8 = 4,
IWL_TM_CMD_APP2DEV_GET_DEVICENAME = 5,
IWL_TM_CMD_APP2DEV_LOAD_INIT_FW = 6,
IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB = 7,
@@ -126,7 +140,14 @@ enum iwl_tm_cmd_t {
IWL_TM_CMD_DEV2APP_UCODE_RX_PKT = 15,
IWL_TM_CMD_DEV2APP_EEPROM_RSP = 16,
IWL_TM_CMD_APP2DEV_OWNERSHIP = 17,
- IWL_TM_CMD_MAX = 18,
+ IWL_TM_CMD_APP2DEV_INDIRECT_REG_READ32 = 18,
+ IWL_TM_CMD_APP2DEV_INDIRECT_REG_WRITE32 = 19,
+ IWL_TM_CMD_APP2DEV_READ_SRAM = 20,
+ IWL_TM_CMD_APP2DEV_DUMP_SRAM = 21,
+ IWL_TM_CMD_APP2DEV_LOAD_WOWLAN_FW = 22,
+ IWL_TM_CMD_APP2DEV_GET_FW_VERSION = 23,
+ IWL_TM_CMD_APP2DEV_GET_DEVICE_ID = 24,
+ IWL_TM_CMD_MAX = 25,
};
/*
@@ -196,6 +217,26 @@ enum iwl_tm_cmd_t {
* When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_OWNERSHIP,
* The mandatory fields are:
* IWL_TM_ATTR_UCODE_OWNER for the new owner
+ *
+ * @IWL_TM_ATTR_SRAM_ADDR:
+ * @IWL_TM_ATTR_SRAM_SIZE:
+ * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_READ_SRAM,
+ * The mandatory fields are:
+ * IWL_TM_ATTR_SRAM_ADDR for the address in sram
+ * IWL_TM_ATTR_SRAM_SIZE for the buffer size of data reading
+ *
+ * @IWL_TM_ATTR_SRAM_DUMP:
+ * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_DUMP_SRAM,
+ * IWL_TM_ATTR_SRAM_DUMP for the data in sram
+ *
+ * @IWL_TM_ATTR_FW_VERSION:
+ * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_GET_FW_VERSION,
+ * IWL_TM_ATTR_FW_VERSION for the uCode version
+ *
+ * @IWL_TM_ATTR_DEVICE_ID:
+ * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_GET_DEVICE_ID,
+ * IWL_TM_ATTR_DEVICE_ID for the device ID information
+ *
*/
enum iwl_tm_attr_t {
IWL_TM_ATTR_NOT_APPLICABLE = 0,
@@ -213,7 +254,12 @@ enum iwl_tm_attr_t {
IWL_TM_ATTR_TRACE_DUMP = 12,
IWL_TM_ATTR_FIXRATE = 13,
IWL_TM_ATTR_UCODE_OWNER = 14,
- IWL_TM_ATTR_MAX = 15,
+ IWL_TM_ATTR_SRAM_ADDR = 15,
+ IWL_TM_ATTR_SRAM_SIZE = 16,
+ IWL_TM_ATTR_SRAM_DUMP = 17,
+ IWL_TM_ATTR_FW_VERSION = 18,
+ IWL_TM_ATTR_DEVICE_ID = 19,
+ IWL_TM_ATTR_MAX = 20,
};
/* uCode trace buffer */
@@ -221,6 +267,8 @@ enum iwl_tm_attr_t {
#define TRACE_BUFF_SIZE_MIN 0x20000
#define TRACE_BUFF_SIZE_DEF TRACE_BUFF_SIZE_MIN
#define TRACE_BUFF_PADD 0x2000
-#define TRACE_CHUNK_SIZE (PAGE_SIZE - 1024)
+
+/* Maximum data size of each dump it packet */
+#define DUMP_CHUNK_SIZE (PAGE_SIZE - 1024)
#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h
index afaaa2a51b9..5a384b309b0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h
@@ -354,6 +354,11 @@ static inline void iwl_set_swq_id(struct iwl_tx_queue *txq, u8 ac, u8 hwq)
txq->swq_id = (hwq << 2) | ac;
}
+static inline u8 iwl_get_queue_ac(struct iwl_tx_queue *txq)
+{
+ return txq->swq_id & 0x3;
+}
+
static inline void iwl_wake_queue(struct iwl_trans *trans,
struct iwl_tx_queue *txq, const char *msg)
{
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c
index ee126f844a5..2ee00e0f39d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c
@@ -594,8 +594,8 @@ static void iwl_dump_nic_error_log(struct iwl_trans *trans)
struct iwl_trans_pcie *trans_pcie =
IWL_TRANS_GET_PCIE_TRANS(trans);
- base = priv->device_pointers.error_event_table;
- if (priv->ucode_type == IWL_UCODE_INIT) {
+ base = trans->shrd->device_pointers.error_event_table;
+ if (trans->shrd->ucode_type == IWL_UCODE_INIT) {
if (!base)
base = priv->init_errlog_ptr;
} else {
@@ -607,7 +607,7 @@ static void iwl_dump_nic_error_log(struct iwl_trans *trans)
IWL_ERR(trans,
"Not valid error log pointer 0x%08X for %s uCode\n",
base,
- (priv->ucode_type == IWL_UCODE_INIT)
+ (trans->shrd->ucode_type == IWL_UCODE_INIT)
? "Init" : "RT");
return;
}
@@ -648,6 +648,21 @@ static void iwl_dump_nic_error_log(struct iwl_trans *trans)
IWL_ERR(trans, "0x%08X | hw version\n", table.hw_ver);
IWL_ERR(trans, "0x%08X | board version\n", table.brd_ver);
IWL_ERR(trans, "0x%08X | hcmd\n", table.hcmd);
+
+ IWL_ERR(trans, "0x%08X | isr0\n", table.isr0);
+ IWL_ERR(trans, "0x%08X | isr1\n", table.isr1);
+ IWL_ERR(trans, "0x%08X | isr2\n", table.isr2);
+ IWL_ERR(trans, "0x%08X | isr3\n", table.isr3);
+ IWL_ERR(trans, "0x%08X | isr4\n", table.isr4);
+ IWL_ERR(trans, "0x%08X | isr_pref\n", table.isr_pref);
+ IWL_ERR(trans, "0x%08X | wait_event\n", table.wait_event);
+ IWL_ERR(trans, "0x%08X | l2p_control\n", table.l2p_control);
+ IWL_ERR(trans, "0x%08X | l2p_duration\n", table.l2p_duration);
+ IWL_ERR(trans, "0x%08X | l2p_mhvalid\n", table.l2p_mhvalid);
+ IWL_ERR(trans, "0x%08X | l2p_addr_match\n", table.l2p_addr_match);
+ IWL_ERR(trans, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
+ IWL_ERR(trans, "0x%08X | timestamp\n", table.u_timestamp);
+ IWL_ERR(trans, "0x%08X | flow_handler\n", table.flow_handler);
}
/**
@@ -709,8 +724,8 @@ static int iwl_print_event_log(struct iwl_trans *trans, u32 start_idx,
if (num_events == 0)
return pos;
- base = priv->device_pointers.log_event_table;
- if (priv->ucode_type == IWL_UCODE_INIT) {
+ base = trans->shrd->device_pointers.log_event_table;
+ if (trans->shrd->ucode_type == IWL_UCODE_INIT) {
if (!base)
base = priv->init_evtlog_ptr;
} else {
@@ -823,8 +838,8 @@ int iwl_dump_nic_event_log(struct iwl_trans *trans, bool full_log,
size_t bufsz = 0;
struct iwl_priv *priv = priv(trans);
- base = priv->device_pointers.log_event_table;
- if (priv->ucode_type == IWL_UCODE_INIT) {
+ base = trans->shrd->device_pointers.log_event_table;
+ if (trans->shrd->ucode_type == IWL_UCODE_INIT) {
logsize = priv->init_evtlog_size;
if (!base)
base = priv->init_evtlog_ptr;
@@ -838,7 +853,7 @@ int iwl_dump_nic_event_log(struct iwl_trans *trans, bool full_log,
IWL_ERR(trans,
"Invalid event log pointer 0x%08X for %s uCode\n",
base,
- (priv->ucode_type == IWL_UCODE_INIT)
+ (trans->shrd->ucode_type == IWL_UCODE_INIT)
? "Init" : "RT");
return -EINVAL;
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c
index 6dba1515023..79331fb10aa 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c
@@ -559,7 +559,6 @@ int iwl_trans_pcie_tx_agg_alloc(struct iwl_trans *trans,
tid_data->agg.txq_id = txq_id;
iwl_set_swq_id(&trans_pcie->txq[txq_id], get_ac_from_tid(tid), txq_id);
- tid_data = &trans->shrd->tid_data[sta_id][tid];
if (tid_data->tfds_in_queue == 0) {
IWL_DEBUG_TX_QUEUES(trans, "HW queue is empty\n");
tid_data->agg.state = IWL_AGG_ON;
@@ -1121,9 +1120,6 @@ int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
return 0;
}
- IWL_DEBUG_TX_REPLY(trans, "reclaim: [%d, %d, %d]\n", txq_id,
- q->read_ptr, index);
-
if (WARN_ON(!skb_queue_empty(skbs)))
return 0;
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
index a1a58330273..66e1b9fa0b8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
@@ -990,29 +990,16 @@ static int iwl_trans_tx_stop(struct iwl_trans *trans)
return 0;
}
-static void iwl_trans_pcie_disable_sync_irq(struct iwl_trans *trans)
+static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
{
unsigned long flags;
- struct iwl_trans_pcie *trans_pcie =
- IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ /* tell the device to stop sending interrupts */
spin_lock_irqsave(&trans->shrd->lock, flags);
iwl_disable_interrupts(trans);
spin_unlock_irqrestore(&trans->shrd->lock, flags);
- /* wait to make sure we flush pending tasklet*/
- synchronize_irq(bus(trans)->irq);
- tasklet_kill(&trans_pcie->irq_tasklet);
-}
-
-static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
-{
- /* stop and reset the on-board processor */
- iwl_write32(bus(trans), CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
-
- /* tell the device to stop sending interrupts */
- iwl_trans_pcie_disable_sync_irq(trans);
-
/* device going down, Stop using ICT table */
iwl_disable_ict(trans);
@@ -1039,6 +1026,20 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
/* Stop the device, and put it in low power state */
iwl_apm_stop(priv(trans));
+
+ /* Upon stop, the APM issues an interrupt if HW RF kill is set.
+ * Clean again the interrupt here
+ */
+ spin_lock_irqsave(&trans->shrd->lock, flags);
+ iwl_disable_interrupts(trans);
+ spin_unlock_irqrestore(&trans->shrd->lock, flags);
+
+ /* wait to make sure we flush pending tasklet*/
+ synchronize_irq(bus(trans)->irq);
+ tasklet_kill(&trans_pcie->irq_tasklet);
+
+ /* stop and reset the on-board processor */
+ iwl_write32(bus(trans), CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
}
static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
@@ -1099,13 +1100,21 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
hdr->seq_ctrl = hdr->seq_ctrl &
cpu_to_le16(IEEE80211_SCTL_FRAG);
hdr->seq_ctrl |= cpu_to_le16(seq_number);
- seq_number += 0x10;
/* aggregation is on for this <sta,tid> */
if (info->flags & IEEE80211_TX_CTL_AMPDU) {
- WARN_ON_ONCE(tid_data->agg.state != IWL_AGG_ON);
+ if (WARN_ON_ONCE(tid_data->agg.state != IWL_AGG_ON)) {
+ IWL_ERR(trans, "TX_CTL_AMPDU while not in AGG:"
+ " Tx flags = 0x%08x, agg.state = %d",
+ info->flags, tid_data->agg.state);
+ IWL_ERR(trans, "sta_id = %d, tid = %d "
+ "txq_id = %d, seq_num = %d", sta_id,
+ tid, tid_data->agg.txq_id,
+ seq_number >> 4);
+ }
txq_id = tid_data->agg.txq_id;
is_agg = true;
}
+ seq_number += 0x10;
}
/* Copy MAC header from skb into command buffer */
@@ -1350,9 +1359,9 @@ static void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int sta_id, int tid,
}
if (txq->q.read_ptr != tfd_num) {
- IWL_DEBUG_TX_REPLY(trans, "Retry scheduler reclaim "
- "scd_ssn=%d idx=%d txq=%d swq=%d\n",
- ssn , tfd_num, txq_id, txq->swq_id);
+ IWL_DEBUG_TX_REPLY(trans, "[Q %d | AC %d] %d -> %d (%d)\n",
+ txq_id, iwl_get_queue_ac(txq), txq->q.read_ptr,
+ tfd_num, ssn);
freed = iwl_tx_queue_reclaim(trans, txq_id, tfd_num, skbs);
if (iwl_queue_space(&txq->q) > txq->q.low_mark && cond)
iwl_wake_queue(trans, txq, "Packets reclaimed");
@@ -1364,6 +1373,7 @@ static void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int sta_id, int tid,
static void iwl_trans_pcie_free(struct iwl_trans *trans)
{
+ iwl_calib_free_results(trans);
iwl_trans_pcie_tx_free(trans);
iwl_trans_pcie_rx_free(trans);
free_irq(bus(trans)->irq, trans);
@@ -1515,8 +1525,12 @@ static int iwl_trans_pcie_check_stuck_queue(struct iwl_trans *trans, int cnt)
if (time_after(jiffies, timeout)) {
IWL_ERR(trans, "Queue %d stuck for %u ms.\n", q->id,
hw_params(trans).wd_timeout);
- IWL_ERR(trans, "Current read_ptr %d write_ptr %d\n",
+ IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
q->read_ptr, q->write_ptr);
+ IWL_ERR(trans, "Current HW read_ptr %d write_ptr %d\n",
+ iwl_read_prph(bus(trans), SCD_QUEUE_RDPTR(cnt))
+ & (TFD_QUEUE_SIZE_MAX - 1),
+ iwl_read_prph(bus(trans), SCD_QUEUE_WRPTR(cnt)));
return 1;
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
index 50227ebc0ee..f94a6ee5f82 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
@@ -220,11 +220,12 @@ struct fw_img {
struct fw_desc data; /* firmware data image */
};
-enum iwl_ucode_type {
- IWL_UCODE_NONE,
- IWL_UCODE_REGULAR,
- IWL_UCODE_INIT,
- IWL_UCODE_WOWLAN,
+/* Opaque calibration results */
+struct iwl_calib_result {
+ struct list_head list;
+ size_t cmd_len;
+ struct iwl_calib_hdr hdr;
+ /* data follows */
};
/**
@@ -236,6 +237,8 @@ enum iwl_ucode_type {
* @ucode_rt: run time ucode image
* @ucode_init: init ucode image
* @ucode_wowlan: wake on wireless ucode image (optional)
+ * @nvm_device_type: indicates OTP or eeprom
+ * @calib_results: list head for init calibration results
*/
struct iwl_trans {
const struct iwl_trans_ops *ops;
@@ -250,6 +253,9 @@ struct iwl_trans {
/* eeprom related variables */
int nvm_device_type;
+ /* init calibration results */
+ struct list_head calib_results;
+
/* pointer to trans specific struct */
/*Ensure that this pointer will always be aligned to sizeof pointer */
char trans_specific[0] __attribute__((__aligned__(sizeof(void *))));
@@ -386,4 +392,9 @@ int iwl_alloc_fw_desc(struct iwl_bus *bus, struct fw_desc *desc,
const void *data, size_t len);
void iwl_dealloc_ucode(struct iwl_trans *trans);
+int iwl_send_calib_results(struct iwl_trans *trans);
+int iwl_calib_set(struct iwl_trans *trans,
+ const struct iwl_calib_hdr *cmd, int len);
+void iwl_calib_free_results(struct iwl_trans *trans);
+
#endif /* __iwl_trans_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c b/drivers/net/wireless/iwlwifi/iwl-ucode.c
index 9ec315b31d4..0577212ad3f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
+++ b/drivers/net/wireless/iwlwifi/iwl-ucode.c
@@ -122,7 +122,7 @@ int iwl_alloc_fw_desc(struct iwl_bus *bus, struct fw_desc *desc,
/*
* ucode
*/
-static int iwlagn_load_section(struct iwl_trans *trans, const char *name,
+static int iwl_load_section(struct iwl_trans *trans, const char *name,
struct fw_desc *image, u32 dst_addr)
{
struct iwl_bus *bus = bus(trans);
@@ -188,7 +188,7 @@ static inline struct fw_img *iwl_get_ucode_image(struct iwl_trans *trans,
return NULL;
}
-static int iwlagn_load_given_ucode(struct iwl_trans *trans,
+static int iwl_load_given_ucode(struct iwl_trans *trans,
enum iwl_ucode_type ucode_type)
{
int ret = 0;
@@ -201,36 +201,36 @@ static int iwlagn_load_given_ucode(struct iwl_trans *trans,
return -EINVAL;
}
- ret = iwlagn_load_section(trans, "INST", &image->code,
+ ret = iwl_load_section(trans, "INST", &image->code,
IWLAGN_RTC_INST_LOWER_BOUND);
if (ret)
return ret;
- return iwlagn_load_section(trans, "DATA", &image->data,
+ return iwl_load_section(trans, "DATA", &image->data,
IWLAGN_RTC_DATA_LOWER_BOUND);
}
/*
* Calibration
*/
-static int iwlagn_set_Xtal_calib(struct iwl_priv *priv)
+static int iwl_set_Xtal_calib(struct iwl_priv *priv)
{
struct iwl_calib_xtal_freq_cmd cmd;
__le16 *xtal_calib =
- (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_XTAL);
+ (__le16 *)iwl_eeprom_query_addr(priv->shrd, EEPROM_XTAL);
iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD);
cmd.cap_pin1 = le16_to_cpu(xtal_calib[0]);
cmd.cap_pin2 = le16_to_cpu(xtal_calib[1]);
- return iwl_calib_set(&priv->calib_results[IWL_CALIB_XTAL],
- (u8 *)&cmd, sizeof(cmd));
+ return iwl_calib_set(trans(priv), (void *)&cmd, sizeof(cmd));
}
-static int iwlagn_set_temperature_offset_calib(struct iwl_priv *priv)
+static int iwl_set_temperature_offset_calib(struct iwl_priv *priv)
{
struct iwl_calib_temperature_offset_cmd cmd;
__le16 *offset_calib =
- (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_RAW_TEMPERATURE);
+ (__le16 *)iwl_eeprom_query_addr(priv->shrd,
+ EEPROM_RAW_TEMPERATURE);
memset(&cmd, 0, sizeof(cmd));
iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD);
@@ -240,22 +240,22 @@ static int iwlagn_set_temperature_offset_calib(struct iwl_priv *priv)
IWL_DEBUG_CALIB(priv, "Radio sensor offset: %d\n",
le16_to_cpu(cmd.radio_sensor_offset));
- return iwl_calib_set(&priv->calib_results[IWL_CALIB_TEMP_OFFSET],
- (u8 *)&cmd, sizeof(cmd));
+ return iwl_calib_set(trans(priv), (void *)&cmd, sizeof(cmd));
}
-static int iwlagn_set_temperature_offset_calib_v2(struct iwl_priv *priv)
+static int iwl_set_temperature_offset_calib_v2(struct iwl_priv *priv)
{
struct iwl_calib_temperature_offset_v2_cmd cmd;
- __le16 *offset_calib_high = (__le16 *)iwl_eeprom_query_addr(priv,
+ __le16 *offset_calib_high = (__le16 *)iwl_eeprom_query_addr(priv->shrd,
EEPROM_KELVIN_TEMPERATURE);
__le16 *offset_calib_low =
- (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_RAW_TEMPERATURE);
+ (__le16 *)iwl_eeprom_query_addr(priv->shrd,
+ EEPROM_RAW_TEMPERATURE);
struct iwl_eeprom_calib_hdr *hdr;
memset(&cmd, 0, sizeof(cmd));
iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD);
- hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(priv,
+ hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(priv->shrd,
EEPROM_CALIB_ALL);
memcpy(&cmd.radio_sensor_offset_high, offset_calib_high,
sizeof(*offset_calib_high));
@@ -276,11 +276,10 @@ static int iwlagn_set_temperature_offset_calib_v2(struct iwl_priv *priv)
IWL_DEBUG_CALIB(priv, "Voltage Ref: %d\n",
le16_to_cpu(cmd.burntVoltageRef));
- return iwl_calib_set(&priv->calib_results[IWL_CALIB_TEMP_OFFSET],
- (u8 *)&cmd, sizeof(cmd));
+ return iwl_calib_set(trans(priv), (void *)&cmd, sizeof(cmd));
}
-static int iwlagn_send_calib_cfg(struct iwl_priv *priv)
+static int iwl_send_calib_cfg(struct iwl_trans *trans)
{
struct iwl_calib_cfg_cmd calib_cfg_cmd;
struct iwl_host_cmd cmd = {
@@ -296,7 +295,7 @@ static int iwlagn_send_calib_cfg(struct iwl_priv *priv)
calib_cfg_cmd.ucd_calib_cfg.flags =
IWL_CALIB_CFG_FLAG_SEND_COMPLETE_NTFY_MSK;
- return iwl_trans_send_cmd(trans(priv), &cmd);
+ return iwl_trans_send_cmd(trans, &cmd);
}
int iwlagn_rx_calib_result(struct iwl_priv *priv,
@@ -306,37 +305,14 @@ int iwlagn_rx_calib_result(struct iwl_priv *priv,
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_calib_hdr *hdr = (struct iwl_calib_hdr *)pkt->u.raw;
int len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
- int index;
/* reduce the size of the length field itself */
len -= 4;
- /* Define the order in which the results will be sent to the runtime
- * uCode. iwl_send_calib_results sends them in a row according to
- * their index. We sort them here
- */
- switch (hdr->op_code) {
- case IWL_PHY_CALIBRATE_DC_CMD:
- index = IWL_CALIB_DC;
- break;
- case IWL_PHY_CALIBRATE_LO_CMD:
- index = IWL_CALIB_LO;
- break;
- case IWL_PHY_CALIBRATE_TX_IQ_CMD:
- index = IWL_CALIB_TX_IQ;
- break;
- case IWL_PHY_CALIBRATE_TX_IQ_PERD_CMD:
- index = IWL_CALIB_TX_IQ_PERD;
- break;
- case IWL_PHY_CALIBRATE_BASE_BAND_CMD:
- index = IWL_CALIB_BASE_BAND;
- break;
- default:
- IWL_ERR(priv, "Unknown calibration notification %d\n",
- hdr->op_code);
- return -1;
- }
- iwl_calib_set(&priv->calib_results[index], pkt->u.raw, len);
+ if (iwl_calib_set(trans(priv), hdr, len))
+ IWL_ERR(priv, "Failed to record calibration data %d\n",
+ hdr->op_code);
+
return 0;
}
@@ -352,14 +328,14 @@ int iwlagn_init_alive_start(struct iwl_priv *priv)
* no need to close the envlope since we are going
* to load the runtime uCode later.
*/
- ret = iwlagn_send_bt_env(priv, IWL_BT_COEX_ENV_OPEN,
+ ret = iwl_send_bt_env(trans(priv), IWL_BT_COEX_ENV_OPEN,
BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
if (ret)
return ret;
}
- ret = iwlagn_send_calib_cfg(priv);
+ ret = iwl_send_calib_cfg(trans(priv));
if (ret)
return ret;
@@ -369,15 +345,15 @@ int iwlagn_init_alive_start(struct iwl_priv *priv)
*/
if (priv->cfg->need_temp_offset_calib) {
if (priv->cfg->temp_offset_v2)
- return iwlagn_set_temperature_offset_calib_v2(priv);
+ return iwl_set_temperature_offset_calib_v2(priv);
else
- return iwlagn_set_temperature_offset_calib(priv);
+ return iwl_set_temperature_offset_calib(priv);
}
return 0;
}
-static int iwlagn_send_wimax_coex(struct iwl_priv *priv)
+static int iwl_send_wimax_coex(struct iwl_priv *priv)
{
struct iwl_wimax_coex_cmd coex_cmd;
@@ -405,7 +381,7 @@ static int iwlagn_send_wimax_coex(struct iwl_priv *priv)
sizeof(coex_cmd), &coex_cmd);
}
-static const u8 iwlagn_bt_prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX] = {
+static const u8 iwl_bt_prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX] = {
((BT_COEX_PRIO_TBL_PRIO_BYPASS << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
(0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
((BT_COEX_PRIO_TBL_PRIO_BYPASS << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
@@ -427,42 +403,42 @@ static const u8 iwlagn_bt_prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX] = {
0, 0, 0, 0, 0, 0, 0
};
-void iwlagn_send_prio_tbl(struct iwl_priv *priv)
+void iwl_send_prio_tbl(struct iwl_trans *trans)
{
struct iwl_bt_coex_prio_table_cmd prio_tbl_cmd;
- memcpy(prio_tbl_cmd.prio_tbl, iwlagn_bt_prio_tbl,
- sizeof(iwlagn_bt_prio_tbl));
- if (iwl_trans_send_cmd_pdu(trans(priv),
+ memcpy(prio_tbl_cmd.prio_tbl, iwl_bt_prio_tbl,
+ sizeof(iwl_bt_prio_tbl));
+ if (iwl_trans_send_cmd_pdu(trans,
REPLY_BT_COEX_PRIO_TABLE, CMD_SYNC,
sizeof(prio_tbl_cmd), &prio_tbl_cmd))
- IWL_ERR(priv, "failed to send BT prio tbl command\n");
+ IWL_ERR(trans, "failed to send BT prio tbl command\n");
}
-int iwlagn_send_bt_env(struct iwl_priv *priv, u8 action, u8 type)
+int iwl_send_bt_env(struct iwl_trans *trans, u8 action, u8 type)
{
struct iwl_bt_coex_prot_env_cmd env_cmd;
int ret;
env_cmd.action = action;
env_cmd.type = type;
- ret = iwl_trans_send_cmd_pdu(trans(priv),
+ ret = iwl_trans_send_cmd_pdu(trans,
REPLY_BT_COEX_PROT_ENV, CMD_SYNC,
sizeof(env_cmd), &env_cmd);
if (ret)
- IWL_ERR(priv, "failed to send BT env command\n");
+ IWL_ERR(trans, "failed to send BT env command\n");
return ret;
}
-static int iwlagn_alive_notify(struct iwl_priv *priv)
+static int iwl_alive_notify(struct iwl_priv *priv)
{
struct iwl_rxon_context *ctx;
int ret;
if (!priv->tx_cmd_pool)
priv->tx_cmd_pool =
- kmem_cache_create("iwlagn_dev_cmd",
+ kmem_cache_create("iwl_dev_cmd",
sizeof(struct iwl_device_cmd),
sizeof(void *), 0, NULL);
@@ -473,15 +449,17 @@ static int iwlagn_alive_notify(struct iwl_priv *priv)
for_each_context(priv, ctx)
ctx->last_tx_rejected = false;
- ret = iwlagn_send_wimax_coex(priv);
+ ret = iwl_send_wimax_coex(priv);
if (ret)
return ret;
- ret = iwlagn_set_Xtal_calib(priv);
- if (ret)
- return ret;
+ if (!priv->cfg->no_xtal_calib) {
+ ret = iwl_set_Xtal_calib(priv);
+ if (ret)
+ return ret;
+ }
- return iwl_send_calib_results(priv);
+ return iwl_send_calib_results(trans(priv));
}
@@ -572,7 +550,7 @@ struct iwlagn_alive_data {
u8 subtype;
};
-static void iwlagn_alive_fn(struct iwl_priv *priv,
+static void iwl_alive_fn(struct iwl_trans *trans,
struct iwl_rx_packet *pkt,
void *data)
{
@@ -581,20 +559,84 @@ static void iwlagn_alive_fn(struct iwl_priv *priv,
palive = &pkt->u.alive_frame;
- IWL_DEBUG_FW(priv, "Alive ucode status 0x%08X revision "
+ IWL_DEBUG_FW(trans, "Alive ucode status 0x%08X revision "
"0x%01X 0x%01X\n",
palive->is_valid, palive->ver_type,
palive->ver_subtype);
- priv->device_pointers.error_event_table =
+ trans->shrd->device_pointers.error_event_table =
le32_to_cpu(palive->error_event_table_ptr);
- priv->device_pointers.log_event_table =
+ trans->shrd->device_pointers.log_event_table =
le32_to_cpu(palive->log_event_table_ptr);
alive_data->subtype = palive->ver_subtype;
alive_data->valid = palive->is_valid == UCODE_VALID_OK;
}
+/* notification wait support */
+void iwl_init_notification_wait(struct iwl_shared *shrd,
+ struct iwl_notification_wait *wait_entry,
+ u8 cmd,
+ void (*fn)(struct iwl_trans *trans,
+ struct iwl_rx_packet *pkt,
+ void *data),
+ void *fn_data)
+{
+ wait_entry->fn = fn;
+ wait_entry->fn_data = fn_data;
+ wait_entry->cmd = cmd;
+ wait_entry->triggered = false;
+ wait_entry->aborted = false;
+
+ spin_lock_bh(&shrd->notif_wait_lock);
+ list_add(&wait_entry->list, &shrd->notif_waits);
+ spin_unlock_bh(&shrd->notif_wait_lock);
+}
+
+int iwl_wait_notification(struct iwl_shared *shrd,
+ struct iwl_notification_wait *wait_entry,
+ unsigned long timeout)
+{
+ int ret;
+
+ ret = wait_event_timeout(shrd->notif_waitq,
+ wait_entry->triggered || wait_entry->aborted,
+ timeout);
+
+ spin_lock_bh(&shrd->notif_wait_lock);
+ list_del(&wait_entry->list);
+ spin_unlock_bh(&shrd->notif_wait_lock);
+
+ if (wait_entry->aborted)
+ return -EIO;
+
+ /* return value is always >= 0 */
+ if (ret <= 0)
+ return -ETIMEDOUT;
+ return 0;
+}
+
+void iwl_remove_notification(struct iwl_shared *shrd,
+ struct iwl_notification_wait *wait_entry)
+{
+ spin_lock_bh(&shrd->notif_wait_lock);
+ list_del(&wait_entry->list);
+ spin_unlock_bh(&shrd->notif_wait_lock);
+}
+
+void iwl_abort_notification_waits(struct iwl_shared *shrd)
+{
+ unsigned long flags;
+ struct iwl_notification_wait *wait_entry;
+
+ spin_lock_irqsave(&shrd->notif_wait_lock, flags);
+ list_for_each_entry(wait_entry, &shrd->notif_waits, list)
+ wait_entry->aborted = true;
+ spin_unlock_irqrestore(&shrd->notif_wait_lock, flags);
+
+ wake_up_all(&shrd->notif_waitq);
+}
+
#define UCODE_ALIVE_TIMEOUT HZ
#define UCODE_CALIB_TIMEOUT (2*HZ)
@@ -603,41 +645,43 @@ int iwlagn_load_ucode_wait_alive(struct iwl_priv *priv,
{
struct iwl_notification_wait alive_wait;
struct iwlagn_alive_data alive_data;
+ struct iwl_trans *trans = trans(priv);
int ret;
enum iwl_ucode_type old_type;
- ret = iwl_trans_start_device(trans(priv));
+ ret = iwl_trans_start_device(trans);
if (ret)
return ret;
- iwlagn_init_notification_wait(priv, &alive_wait, REPLY_ALIVE,
- iwlagn_alive_fn, &alive_data);
+ iwl_init_notification_wait(trans->shrd, &alive_wait, REPLY_ALIVE,
+ iwl_alive_fn, &alive_data);
- old_type = priv->ucode_type;
- priv->ucode_type = ucode_type;
+ old_type = trans->shrd->ucode_type;
+ trans->shrd->ucode_type = ucode_type;
- ret = iwlagn_load_given_ucode(trans(priv), ucode_type);
+ ret = iwl_load_given_ucode(trans, ucode_type);
if (ret) {
- priv->ucode_type = old_type;
- iwlagn_remove_notification(priv, &alive_wait);
+ trans->shrd->ucode_type = old_type;
+ iwl_remove_notification(trans->shrd, &alive_wait);
return ret;
}
- iwl_trans_kick_nic(trans(priv));
+ iwl_trans_kick_nic(trans);
/*
* Some things may run in the background now, but we
* just wait for the ALIVE notification here.
*/
- ret = iwlagn_wait_notification(priv, &alive_wait, UCODE_ALIVE_TIMEOUT);
+ ret = iwl_wait_notification(trans->shrd, &alive_wait,
+ UCODE_ALIVE_TIMEOUT);
if (ret) {
- priv->ucode_type = old_type;
+ trans->shrd->ucode_type = old_type;
return ret;
}
if (!alive_data.valid) {
IWL_ERR(priv, "Loaded ucode is not valid!\n");
- priv->ucode_type = old_type;
+ trans->shrd->ucode_type = old_type;
return -EIO;
}
@@ -647,9 +691,9 @@ int iwlagn_load_ucode_wait_alive(struct iwl_priv *priv,
* skip it for WoWLAN.
*/
if (ucode_type != IWL_UCODE_WOWLAN) {
- ret = iwl_verify_ucode(trans(priv), ucode_type);
+ ret = iwl_verify_ucode(trans, ucode_type);
if (ret) {
- priv->ucode_type = old_type;
+ trans->shrd->ucode_type = old_type;
return ret;
}
@@ -657,11 +701,11 @@ int iwlagn_load_ucode_wait_alive(struct iwl_priv *priv,
msleep(5);
}
- ret = iwlagn_alive_notify(priv);
+ ret = iwl_alive_notify(priv);
if (ret) {
IWL_WARN(priv,
"Could not complete ALIVE transition: %d\n", ret);
- priv->ucode_type = old_type;
+ trans->shrd->ucode_type = old_type;
return ret;
}
@@ -679,10 +723,10 @@ int iwlagn_run_init_ucode(struct iwl_priv *priv)
if (!trans(priv)->ucode_init.code.len)
return 0;
- if (priv->ucode_type != IWL_UCODE_NONE)
+ if (priv->shrd->ucode_type != IWL_UCODE_NONE)
return 0;
- iwlagn_init_notification_wait(priv, &calib_wait,
+ iwl_init_notification_wait(priv->shrd, &calib_wait,
CALIBRATION_COMPLETE_NOTIFICATION,
NULL, NULL);
@@ -699,12 +743,13 @@ int iwlagn_run_init_ucode(struct iwl_priv *priv)
* Some things may run in the background now, but we
* just wait for the calibration complete notification.
*/
- ret = iwlagn_wait_notification(priv, &calib_wait, UCODE_CALIB_TIMEOUT);
+ ret = iwl_wait_notification(priv->shrd, &calib_wait,
+ UCODE_CALIB_TIMEOUT);
goto out;
error:
- iwlagn_remove_notification(priv, &calib_wait);
+ iwl_remove_notification(priv->shrd, &calib_wait);
out:
/* Whatever happened, stop the device */
iwl_trans_stop_device(trans(priv));
diff --git a/drivers/net/wireless/iwmc3200wifi/commands.c b/drivers/net/wireless/iwmc3200wifi/commands.c
index 50dee6a0a5c..bd75078c454 100644
--- a/drivers/net/wireless/iwmc3200wifi/commands.c
+++ b/drivers/net/wireless/iwmc3200wifi/commands.c
@@ -42,6 +42,7 @@
#include <linux/ieee80211.h>
#include <linux/sched.h>
#include <linux/slab.h>
+#include <linux/moduleparam.h>
#include "iwm.h"
#include "bus.h"
diff --git a/drivers/net/wireless/iwmc3200wifi/debugfs.c b/drivers/net/wireless/iwmc3200wifi/debugfs.c
index 0a0cc9667cd..87eef5773a0 100644
--- a/drivers/net/wireless/iwmc3200wifi/debugfs.c
+++ b/drivers/net/wireless/iwmc3200wifi/debugfs.c
@@ -25,6 +25,7 @@
#include <linux/kernel.h>
#include <linux/bitops.h>
#include <linux/debugfs.h>
+#include <linux/export.h>
#include "iwm.h"
#include "bus.h"
diff --git a/drivers/net/wireless/iwmc3200wifi/main.c b/drivers/net/wireless/iwmc3200wifi/main.c
index 362002735b1..98a179f98ea 100644
--- a/drivers/net/wireless/iwmc3200wifi/main.c
+++ b/drivers/net/wireless/iwmc3200wifi/main.c
@@ -42,6 +42,7 @@
#include <linux/ieee80211.h>
#include <linux/wireless.h>
#include <linux/slab.h>
+#include <linux/moduleparam.h>
#include "iwm.h"
#include "debug.h"
diff --git a/drivers/net/wireless/iwmc3200wifi/sdio.c b/drivers/net/wireless/iwmc3200wifi/sdio.c
index 56383e7be83..764b40dd24a 100644
--- a/drivers/net/wireless/iwmc3200wifi/sdio.c
+++ b/drivers/net/wireless/iwmc3200wifi/sdio.c
@@ -63,6 +63,7 @@
*/
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/slab.h>
#include <linux/netdevice.h>
#include <linux/debugfs.h>
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
index 89f34ad8d34..d1d84e0e30f 100644
--- a/drivers/net/wireless/libertas/cfg.c
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -635,7 +635,7 @@ static int lbs_ret_scan(struct lbs_private *priv, unsigned long dummy,
if (channel &&
!(channel->flags & IEEE80211_CHAN_DISABLED)) {
bss = cfg80211_inform_bss(wiphy, channel,
- bssid, le64_to_cpu(*(__le64 *)tsfdesc),
+ bssid, get_unaligned_le64(tsfdesc),
capa, intvl, ie, ielen,
LBS_SCAN_RSSI_TO_MBM(rssi),
GFP_KERNEL);
diff --git a/drivers/net/wireless/libertas/cmd.c b/drivers/net/wireless/libertas/cmd.c
index e08ab1de3d9..d798bcc0d83 100644
--- a/drivers/net/wireless/libertas/cmd.c
+++ b/drivers/net/wireless/libertas/cmd.c
@@ -8,6 +8,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/if_arp.h>
+#include <linux/export.h>
#include "decl.h"
#include "cfg.h"
diff --git a/drivers/net/wireless/libertas/debugfs.c b/drivers/net/wireless/libertas/debugfs.c
index 1af18277884..d8d8f0d0899 100644
--- a/drivers/net/wireless/libertas/debugfs.c
+++ b/drivers/net/wireless/libertas/debugfs.c
@@ -5,6 +5,7 @@
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/slab.h>
+#include <linux/export.h>
#include "decl.h"
#include "cmd.h"
diff --git a/drivers/net/wireless/libertas/if_sdio.c b/drivers/net/wireless/libertas/if_sdio.c
index c962e21762d..9804ebc892d 100644
--- a/drivers/net/wireless/libertas/if_sdio.c
+++ b/drivers/net/wireless/libertas/if_sdio.c
@@ -29,7 +29,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
-#include <linux/moduleparam.h>
+#include <linux/module.h>
#include <linux/slab.h>
#include <linux/firmware.h>
#include <linux/netdevice.h>
diff --git a/drivers/net/wireless/libertas/if_spi.c b/drivers/net/wireless/libertas/if_spi.c
index 622ae6de0d8..50b1ee7721e 100644
--- a/drivers/net/wireless/libertas/if_spi.c
+++ b/drivers/net/wireless/libertas/if_spi.c
@@ -21,7 +21,7 @@
#include <linux/hardirq.h>
#include <linux/interrupt.h>
-#include <linux/moduleparam.h>
+#include <linux/module.h>
#include <linux/firmware.h>
#include <linux/jiffies.h>
#include <linux/list.h>
@@ -995,6 +995,7 @@ static int if_spi_host_to_card(struct lbs_private *priv,
spin_unlock_irqrestore(&card->buffer_lock, flags);
break;
default:
+ kfree(packet);
netdev_err(priv->dev, "can't transfer buffer of type %d\n",
type);
err = -EINVAL;
@@ -1290,7 +1291,6 @@ static struct spi_driver libertas_spi_driver = {
.remove = __devexit_p(libertas_spi_remove),
.driver = {
.name = "libertas_spi",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
.pm = &if_spi_pm_ops,
},
diff --git a/drivers/net/wireless/libertas/if_usb.c b/drivers/net/wireless/libertas/if_usb.c
index 8147f1e2a0b..db879c364eb 100644
--- a/drivers/net/wireless/libertas/if_usb.c
+++ b/drivers/net/wireless/libertas/if_usb.c
@@ -5,7 +5,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/delay.h>
-#include <linux/moduleparam.h>
+#include <linux/module.h>
#include <linux/firmware.h>
#include <linux/netdevice.h>
#include <linux/slab.h>
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c
index 39a6a7a4024..957681dede1 100644
--- a/drivers/net/wireless/libertas/main.c
+++ b/drivers/net/wireless/libertas/main.c
@@ -6,7 +6,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/moduleparam.h>
+#include <linux/module.h>
#include <linux/delay.h>
#include <linux/etherdevice.h>
#include <linux/hardirq.h>
diff --git a/drivers/net/wireless/libertas/rx.c b/drivers/net/wireless/libertas/rx.c
index 62e10eeadd7..c7366b07b56 100644
--- a/drivers/net/wireless/libertas/rx.c
+++ b/drivers/net/wireless/libertas/rx.c
@@ -8,6 +8,7 @@
#include <linux/hardirq.h>
#include <linux/slab.h>
#include <linux/types.h>
+#include <linux/export.h>
#include <net/cfg80211.h>
#include "defs.h"
diff --git a/drivers/net/wireless/libertas/tx.c b/drivers/net/wireless/libertas/tx.c
index 8f127520d78..c025f9c1828 100644
--- a/drivers/net/wireless/libertas/tx.c
+++ b/drivers/net/wireless/libertas/tx.c
@@ -5,6 +5,7 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/sched.h>
+#include <linux/export.h>
#include <net/cfg80211.h>
#include "host.h"
diff --git a/drivers/net/wireless/libertas_tf/cmd.c b/drivers/net/wireless/libertas_tf/cmd.c
index 13557fe0bf9..909ac368501 100644
--- a/drivers/net/wireless/libertas_tf/cmd.c
+++ b/drivers/net/wireless/libertas_tf/cmd.c
@@ -11,6 +11,7 @@
#include <linux/hardirq.h>
#include <linux/slab.h>
+#include <linux/export.h>
#include "libertas_tf.h"
diff --git a/drivers/net/wireless/libertas_tf/if_usb.c b/drivers/net/wireless/libertas_tf/if_usb.c
index ba7d96584cb..68202e63873 100644
--- a/drivers/net/wireless/libertas_tf/if_usb.c
+++ b/drivers/net/wireless/libertas_tf/if_usb.c
@@ -15,7 +15,7 @@
#include "if_usb.h"
#include <linux/delay.h>
-#include <linux/moduleparam.h>
+#include <linux/module.h>
#include <linux/firmware.h>
#include <linux/netdevice.h>
#include <linux/slab.h>
diff --git a/drivers/net/wireless/libertas_tf/main.c b/drivers/net/wireless/libertas_tf/main.c
index acc461aa385..ceb51b6e670 100644
--- a/drivers/net/wireless/libertas_tf/main.c
+++ b/drivers/net/wireless/libertas_tf/main.c
@@ -13,6 +13,7 @@
#include <linux/slab.h>
#include <linux/etherdevice.h>
+#include <linux/module.h>
#include "libertas_tf.h"
#define DRIVER_RELEASE_VERSION "004.p0"
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 477100d0b11..52bcdf40d5b 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -26,6 +26,7 @@
#include <linux/rtnetlink.h>
#include <linux/etherdevice.h>
#include <linux/debugfs.h>
+#include <linux/module.h>
#include <net/genetlink.h>
#include "mac80211_hwsim.h"
@@ -36,7 +37,8 @@ MODULE_AUTHOR("Jouni Malinen");
MODULE_DESCRIPTION("Software simulator of 802.11 radio(s) for mac80211");
MODULE_LICENSE("GPL");
-int wmediumd_pid;
+static u32 wmediumd_pid;
+
static int radios = 2;
module_param(radios, int, 0444);
MODULE_PARM_DESC(radios, "Number of simulated radios");
@@ -664,7 +666,7 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
{
bool ack;
struct ieee80211_tx_info *txi;
- int _pid;
+ u32 _pid;
mac80211_hwsim_monitor_rx(hw, skb);
@@ -675,7 +677,7 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
}
/* wmediumd mode check */
- _pid = wmediumd_pid;
+ _pid = ACCESS_ONCE(wmediumd_pid);
if (_pid)
return mac80211_hwsim_tx_frame_nl(hw, skb, _pid);
@@ -763,7 +765,7 @@ static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac,
struct ieee80211_hw *hw = arg;
struct sk_buff *skb;
struct ieee80211_tx_info *info;
- int _pid;
+ u32 _pid;
hwsim_check_magic(vif);
@@ -780,7 +782,7 @@ static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac,
mac80211_hwsim_monitor_rx(hw, skb);
/* wmediumd mode check */
- _pid = wmediumd_pid;
+ _pid = ACCESS_ONCE(wmediumd_pid);
if (_pid)
return mac80211_hwsim_tx_frame_nl(hw, skb, _pid);
@@ -1253,7 +1255,7 @@ static void hwsim_send_ps_poll(void *dat, u8 *mac, struct ieee80211_vif *vif)
struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
struct sk_buff *skb;
struct ieee80211_pspoll *pspoll;
- int _pid;
+ u32 _pid;
if (!vp->assoc)
return;
@@ -1274,7 +1276,7 @@ static void hwsim_send_ps_poll(void *dat, u8 *mac, struct ieee80211_vif *vif)
memcpy(pspoll->ta, mac, ETH_ALEN);
/* wmediumd mode check */
- _pid = wmediumd_pid;
+ _pid = ACCESS_ONCE(wmediumd_pid);
if (_pid)
return mac80211_hwsim_tx_frame_nl(data->hw, skb, _pid);
@@ -1291,7 +1293,7 @@ static void hwsim_send_nullfunc(struct mac80211_hwsim_data *data, u8 *mac,
struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
struct sk_buff *skb;
struct ieee80211_hdr *hdr;
- int _pid;
+ u32 _pid;
if (!vp->assoc)
return;
@@ -1313,7 +1315,7 @@ static void hwsim_send_nullfunc(struct mac80211_hwsim_data *data, u8 *mac,
memcpy(hdr->addr3, vp->bssid, ETH_ALEN);
/* wmediumd mode check */
- _pid = wmediumd_pid;
+ _pid = ACCESS_ONCE(wmediumd_pid);
if (_pid)
return mac80211_hwsim_tx_frame_nl(data->hw, skb, _pid);
@@ -1633,8 +1635,6 @@ static int hwsim_init_netlink(void)
int rc;
printk(KERN_INFO "mac80211_hwsim: initializing netlink\n");
- wmediumd_pid = 0;
-
rc = genl_register_family_with_ops(&hwsim_genl_family,
hwsim_ops, ARRAY_SIZE(hwsim_ops));
if (rc)
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index e9ab9a3fbe9..787dbe2aa40 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -120,10 +120,11 @@ mwifiex_cfg80211_del_key(struct wiphy *wiphy, struct net_device *netdev,
static int
mwifiex_cfg80211_set_tx_power(struct wiphy *wiphy,
enum nl80211_tx_power_setting type,
- int dbm)
+ int mbm)
{
struct mwifiex_private *priv = mwifiex_cfg80211_get_priv(wiphy);
struct mwifiex_power_cfg power_cfg;
+ int dbm = MBM_TO_DBM(mbm);
if (type == NL80211_TX_POWER_FIXED) {
power_cfg.is_power_auto = 0;
@@ -750,17 +751,13 @@ mwifiex_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev,
{
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
- if (priv->disconnect)
- return -EBUSY;
-
- priv->disconnect = 1;
if (mwifiex_deauthenticate(priv, NULL))
return -EFAULT;
wiphy_dbg(wiphy, "info: successfully disconnected from %pM:"
" reason code %d\n", priv->cfg_bssid, reason_code);
- queue_work(priv->workqueue, &priv->cfg_workqueue);
+ memset(priv->cfg_bssid, 0, ETH_ALEN);
return 0;
}
@@ -980,27 +977,32 @@ mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
int ret = 0;
- if (priv->assoc_request)
- return -EBUSY;
-
if (priv->bss_mode == NL80211_IFTYPE_ADHOC) {
wiphy_err(wiphy, "received infra assoc request "
"when station is in ibss mode\n");
goto done;
}
- priv->assoc_request = -EINPROGRESS;
-
wiphy_dbg(wiphy, "info: Trying to associate to %s and bssid %pM\n",
(char *) sme->ssid, sme->bssid);
ret = mwifiex_cfg80211_assoc(priv, sme->ssid_len, sme->ssid, sme->bssid,
priv->bss_mode, sme->channel, sme, 0);
-
- priv->assoc_request = 1;
done:
- priv->assoc_result = ret;
- queue_work(priv->workqueue, &priv->cfg_workqueue);
+ if (!ret) {
+ cfg80211_connect_result(priv->netdev, priv->cfg_bssid, NULL, 0,
+ NULL, 0, WLAN_STATUS_SUCCESS,
+ GFP_KERNEL);
+ dev_dbg(priv->adapter->dev,
+ "info: associated to bssid %pM successfully\n",
+ priv->cfg_bssid);
+ } else {
+ dev_dbg(priv->adapter->dev,
+ "info: association to bssid %pM failed\n",
+ priv->cfg_bssid);
+ memset(priv->cfg_bssid, 0, ETH_ALEN);
+ }
+
return ret;
}
@@ -1017,28 +1019,29 @@ mwifiex_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev,
struct mwifiex_private *priv = mwifiex_cfg80211_get_priv(wiphy);
int ret = 0;
- if (priv->ibss_join_request)
- return -EBUSY;
-
if (priv->bss_mode != NL80211_IFTYPE_ADHOC) {
wiphy_err(wiphy, "request to join ibss received "
"when station is not in ibss mode\n");
goto done;
}
- priv->ibss_join_request = -EINPROGRESS;
-
wiphy_dbg(wiphy, "info: trying to join to %s and bssid %pM\n",
(char *) params->ssid, params->bssid);
ret = mwifiex_cfg80211_assoc(priv, params->ssid_len, params->ssid,
params->bssid, priv->bss_mode,
params->channel, NULL, params->privacy);
-
- priv->ibss_join_request = 1;
done:
- priv->ibss_join_result = ret;
- queue_work(priv->workqueue, &priv->cfg_workqueue);
+ if (!ret) {
+ cfg80211_ibss_joined(priv->netdev, priv->cfg_bssid, GFP_KERNEL);
+ dev_dbg(priv->adapter->dev,
+ "info: joined/created adhoc network with bssid"
+ " %pM successfully\n", priv->cfg_bssid);
+ } else {
+ dev_dbg(priv->adapter->dev,
+ "info: failed creating/joining adhoc network\n");
+ }
+
return ret;
}
@@ -1053,17 +1056,12 @@ mwifiex_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *dev)
{
struct mwifiex_private *priv = mwifiex_cfg80211_get_priv(wiphy);
- if (priv->disconnect)
- return -EBUSY;
-
- priv->disconnect = 1;
-
wiphy_dbg(wiphy, "info: disconnecting from essid %pM\n",
priv->cfg_bssid);
if (mwifiex_deauthenticate(priv, NULL))
return -EFAULT;
- queue_work(priv->workqueue, &priv->cfg_workqueue);
+ memset(priv->cfg_bssid, 0, ETH_ALEN);
return 0;
}
@@ -1080,15 +1078,42 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_scan_request *request)
{
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
+ int i;
+ struct ieee80211_channel *chan;
wiphy_dbg(wiphy, "info: received scan request on %s\n", dev->name);
- if (priv->scan_request && priv->scan_request != request)
- return -EBUSY;
-
priv->scan_request = request;
- queue_work(priv->workqueue, &priv->cfg_workqueue);
+ priv->user_scan_cfg = kzalloc(sizeof(struct mwifiex_user_scan_cfg),
+ GFP_KERNEL);
+ if (!priv->user_scan_cfg) {
+ dev_err(priv->adapter->dev, "failed to alloc scan_req\n");
+ return -ENOMEM;
+ }
+ for (i = 0; i < request->n_ssids; i++) {
+ memcpy(priv->user_scan_cfg->ssid_list[i].ssid,
+ request->ssids[i].ssid, request->ssids[i].ssid_len);
+ priv->user_scan_cfg->ssid_list[i].max_len =
+ request->ssids[i].ssid_len;
+ }
+ for (i = 0; i < request->n_channels; i++) {
+ chan = request->channels[i];
+ priv->user_scan_cfg->chan_list[i].chan_number = chan->hw_value;
+ priv->user_scan_cfg->chan_list[i].radio_type = chan->band;
+
+ if (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN)
+ priv->user_scan_cfg->chan_list[i].scan_type =
+ MWIFIEX_SCAN_TYPE_PASSIVE;
+ else
+ priv->user_scan_cfg->chan_list[i].scan_type =
+ MWIFIEX_SCAN_TYPE_ACTIVE;
+
+ priv->user_scan_cfg->chan_list[i].scan_time = 0;
+ }
+ if (mwifiex_set_user_scan_ioctl(priv, priv->user_scan_cfg))
+ return -EFAULT;
+
return 0;
}
@@ -1294,10 +1319,6 @@ int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct net_device *dev)
priv->media_connected = false;
- cancel_work_sync(&priv->cfg_workqueue);
- flush_workqueue(priv->workqueue);
- destroy_workqueue(priv->workqueue);
-
priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
return 0;
@@ -1375,9 +1396,6 @@ int mwifiex_register_cfg80211(struct mwifiex_private *priv)
memcpy(wdev->wiphy->perm_addr, priv->curr_addr, ETH_ALEN);
wdev->wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
- /* We are using custom domains */
- wdev->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY;
-
/* Reserve space for bss band information */
wdev->wiphy->bss_priv_size = sizeof(u8);
@@ -1406,100 +1424,3 @@ int mwifiex_register_cfg80211(struct mwifiex_private *priv)
return ret;
}
-
-/*
- * This function handles the result of different pending network operations.
- *
- * The following operations are handled and CFG802.11 subsystem is
- * notified accordingly -
- * - Scan request completion
- * - Association request completion
- * - IBSS join request completion
- * - Disconnect request completion
- */
-void
-mwifiex_cfg80211_results(struct work_struct *work)
-{
- struct mwifiex_private *priv =
- container_of(work, struct mwifiex_private, cfg_workqueue);
- struct mwifiex_user_scan_cfg *scan_req;
- int ret = 0, i;
- struct ieee80211_channel *chan;
-
- if (priv->scan_request) {
- scan_req = kzalloc(sizeof(struct mwifiex_user_scan_cfg),
- GFP_KERNEL);
- if (!scan_req) {
- dev_err(priv->adapter->dev, "failed to alloc "
- "scan_req\n");
- return;
- }
- for (i = 0; i < priv->scan_request->n_ssids; i++) {
- memcpy(scan_req->ssid_list[i].ssid,
- priv->scan_request->ssids[i].ssid,
- priv->scan_request->ssids[i].ssid_len);
- scan_req->ssid_list[i].max_len =
- priv->scan_request->ssids[i].ssid_len;
- }
- for (i = 0; i < priv->scan_request->n_channels; i++) {
- chan = priv->scan_request->channels[i];
- scan_req->chan_list[i].chan_number = chan->hw_value;
- scan_req->chan_list[i].radio_type = chan->band;
- if (chan->flags & IEEE80211_CHAN_DISABLED)
- scan_req->chan_list[i].scan_type =
- MWIFIEX_SCAN_TYPE_PASSIVE;
- else
- scan_req->chan_list[i].scan_type =
- MWIFIEX_SCAN_TYPE_ACTIVE;
- scan_req->chan_list[i].scan_time = 0;
- }
- if (mwifiex_set_user_scan_ioctl(priv, scan_req))
- ret = -EFAULT;
- priv->scan_result_status = ret;
- dev_dbg(priv->adapter->dev, "info: %s: sending scan results\n",
- __func__);
- cfg80211_scan_done(priv->scan_request,
- (priv->scan_result_status < 0));
- priv->scan_request = NULL;
- kfree(scan_req);
- }
-
- if (priv->assoc_request == 1) {
- if (!priv->assoc_result) {
- cfg80211_connect_result(priv->netdev, priv->cfg_bssid,
- NULL, 0, NULL, 0,
- WLAN_STATUS_SUCCESS,
- GFP_KERNEL);
- dev_dbg(priv->adapter->dev,
- "info: associated to bssid %pM successfully\n",
- priv->cfg_bssid);
- } else {
- dev_dbg(priv->adapter->dev,
- "info: association to bssid %pM failed\n",
- priv->cfg_bssid);
- memset(priv->cfg_bssid, 0, ETH_ALEN);
- }
- priv->assoc_request = 0;
- priv->assoc_result = 0;
- }
-
- if (priv->ibss_join_request == 1) {
- if (!priv->ibss_join_result) {
- cfg80211_ibss_joined(priv->netdev, priv->cfg_bssid,
- GFP_KERNEL);
- dev_dbg(priv->adapter->dev,
- "info: joined/created adhoc network with bssid"
- " %pM successfully\n", priv->cfg_bssid);
- } else {
- dev_dbg(priv->adapter->dev,
- "info: failed creating/joining adhoc network\n");
- }
- priv->ibss_join_request = 0;
- priv->ibss_join_result = 0;
- }
-
- if (priv->disconnect) {
- memset(priv->cfg_bssid, 0, ETH_ALEN);
- priv->disconnect = 0;
- }
-}
diff --git a/drivers/net/wireless/mwifiex/cfg80211.h b/drivers/net/wireless/mwifiex/cfg80211.h
index 8d010f2500c..76c76c60438 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.h
+++ b/drivers/net/wireless/mwifiex/cfg80211.h
@@ -26,5 +26,4 @@
int mwifiex_register_cfg80211(struct mwifiex_private *);
-void mwifiex_cfg80211_results(struct work_struct *work);
#endif
diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
index ac278156d39..6e0a3eaecf7 100644
--- a/drivers/net/wireless/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/mwifiex/cmdevt.c
@@ -939,7 +939,6 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter)
{
struct cmd_ctrl_node *cmd_node = NULL, *tmp_node = NULL;
unsigned long cmd_flags;
- unsigned long cmd_pending_q_flags;
unsigned long scan_pending_q_flags;
uint16_t cancel_scan_cmd = false;
@@ -949,12 +948,9 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter)
cmd_node = adapter->curr_cmd;
cmd_node->wait_q_enabled = false;
cmd_node->cmd_flag |= CMD_F_CANCELED;
- spin_lock_irqsave(&adapter->cmd_pending_q_lock,
- cmd_pending_q_flags);
- list_del(&cmd_node->list);
- spin_unlock_irqrestore(&adapter->cmd_pending_q_lock,
- cmd_pending_q_flags);
mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
+ mwifiex_complete_cmd(adapter, adapter->curr_cmd);
+ adapter->curr_cmd = NULL;
spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
}
@@ -981,7 +977,6 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter)
spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
}
adapter->cmd_wait_q.status = -1;
- mwifiex_complete_cmd(adapter, adapter->curr_cmd);
}
/*
diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c
index 26940455255..244c728ef9d 100644
--- a/drivers/net/wireless/mwifiex/init.c
+++ b/drivers/net/wireless/mwifiex/init.c
@@ -283,6 +283,45 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
}
/*
+ * This function sets trans_start per tx_queue
+ */
+void mwifiex_set_trans_start(struct net_device *dev)
+{
+ int i;
+
+ for (i = 0; i < dev->num_tx_queues; i++)
+ netdev_get_tx_queue(dev, i)->trans_start = jiffies;
+
+ dev->trans_start = jiffies;
+}
+
+/*
+ * This function wakes up all queues in net_device
+ */
+void mwifiex_wake_up_net_dev_queue(struct net_device *netdev,
+ struct mwifiex_adapter *adapter)
+{
+ unsigned long dev_queue_flags;
+
+ spin_lock_irqsave(&adapter->queue_lock, dev_queue_flags);
+ netif_tx_wake_all_queues(netdev);
+ spin_unlock_irqrestore(&adapter->queue_lock, dev_queue_flags);
+}
+
+/*
+ * This function stops all queues in net_device
+ */
+void mwifiex_stop_net_dev_queue(struct net_device *netdev,
+ struct mwifiex_adapter *adapter)
+{
+ unsigned long dev_queue_flags;
+
+ spin_lock_irqsave(&adapter->queue_lock, dev_queue_flags);
+ netif_tx_stop_all_queues(netdev);
+ spin_unlock_irqrestore(&adapter->queue_lock, dev_queue_flags);
+}
+
+/*
* This function releases the lock variables and frees the locks and
* associated locks.
*/
@@ -359,6 +398,7 @@ int mwifiex_init_lock_list(struct mwifiex_adapter *adapter)
spin_lock_init(&adapter->int_lock);
spin_lock_init(&adapter->main_proc_lock);
spin_lock_init(&adapter->mwifiex_cmd_lock);
+ spin_lock_init(&adapter->queue_lock);
for (i = 0; i < adapter->priv_num; i++) {
if (adapter->priv[i]) {
priv = adapter->priv[i];
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index 67e6db7d672..84be196188c 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -401,7 +401,7 @@ mwifiex_fill_buffer(struct sk_buff *skb)
static int
mwifiex_open(struct net_device *dev)
{
- netif_start_queue(dev);
+ netif_tx_start_all_queues(dev);
return 0;
}
@@ -465,8 +465,8 @@ mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
atomic_inc(&priv->adapter->tx_pending);
if (atomic_read(&priv->adapter->tx_pending) >= MAX_TX_PENDING) {
- netif_stop_queue(priv->netdev);
- dev->trans_start = jiffies;
+ mwifiex_set_trans_start(dev);
+ mwifiex_stop_net_dev_queue(priv->netdev, priv->adapter);
}
queue_work(priv->adapter->workqueue, &priv->adapter->main_work);
@@ -533,7 +533,7 @@ mwifiex_tx_timeout(struct net_device *dev)
dev_err(priv->adapter->dev, "%lu : Tx timeout, bss_index=%d\n",
jiffies, priv->bss_index);
- dev->trans_start = jiffies;
+ mwifiex_set_trans_start(dev);
priv->num_tx_timeout++;
}
@@ -586,8 +586,6 @@ void mwifiex_init_priv_params(struct mwifiex_private *priv,
priv->media_connected = false;
memset(&priv->nick_name, 0, sizeof(priv->nick_name));
priv->num_tx_timeout = 0;
- priv->workqueue = create_singlethread_workqueue("cfg80211_wq");
- INIT_WORK(&priv->cfg_workqueue, mwifiex_cfg80211_results);
memcpy(dev->dev_addr, priv->curr_addr, ETH_ALEN);
}
@@ -793,7 +791,8 @@ int mwifiex_remove_card(struct mwifiex_adapter *adapter, struct semaphore *sem)
priv = adapter->priv[i];
if (priv && priv->netdev) {
if (!netif_queue_stopped(priv->netdev))
- netif_stop_queue(priv->netdev);
+ mwifiex_stop_net_dev_queue(priv->netdev,
+ adapter);
if (netif_carrier_ok(priv->netdev))
netif_carrier_off(priv->netdev);
}
diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
index 3861a617c0e..9207fc64641 100644
--- a/drivers/net/wireless/mwifiex/main.h
+++ b/drivers/net/wireless/mwifiex/main.h
@@ -453,15 +453,8 @@ struct mwifiex_private {
u8 scan_pending_on_block;
u8 report_scan_result;
struct cfg80211_scan_request *scan_request;
- int scan_result_status;
- int assoc_request;
- u16 assoc_result;
- int ibss_join_request;
- u16 ibss_join_result;
- bool disconnect;
+ struct mwifiex_user_scan_cfg *user_scan_cfg;
u8 cfg_bssid[6];
- struct workqueue_struct *workqueue;
- struct work_struct cfg_workqueue;
u8 country_code[IEEE80211_COUNTRY_STRING_LEN];
struct wps wps;
u8 scan_block;
@@ -655,10 +648,19 @@ struct mwifiex_adapter {
struct mwifiex_wait_queue cmd_wait_q;
u8 scan_wait_q_woken;
struct cmd_ctrl_node *cmd_queued;
+ spinlock_t queue_lock; /* lock for tx queues */
};
int mwifiex_init_lock_list(struct mwifiex_adapter *adapter);
+void mwifiex_set_trans_start(struct net_device *dev);
+
+void mwifiex_stop_net_dev_queue(struct net_device *netdev,
+ struct mwifiex_adapter *adapter);
+
+void mwifiex_wake_up_net_dev_queue(struct net_device *netdev,
+ struct mwifiex_adapter *adapter);
+
int mwifiex_init_fw(struct mwifiex_adapter *adapter);
int mwifiex_init_fw_complete(struct mwifiex_adapter *adapter);
diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c
index a2f32008f9a..405350940a4 100644
--- a/drivers/net/wireless/mwifiex/pcie.c
+++ b/drivers/net/wireless/mwifiex/pcie.c
@@ -386,7 +386,7 @@ static int mwifiex_pcie_create_txbd_ring(struct mwifiex_adapter *adapter)
card->txbd_ring_vbase = kzalloc(card->txbd_ring_size, GFP_KERNEL);
if (!card->txbd_ring_vbase) {
dev_err(adapter->dev, "Unable to allocate buffer for txbd ring.\n");
- return -1;
+ return -ENOMEM;
}
card->txbd_ring_pbase = virt_to_phys(card->txbd_ring_vbase);
@@ -476,7 +476,7 @@ static int mwifiex_pcie_create_rxbd_ring(struct mwifiex_adapter *adapter)
if (!card->rxbd_ring_vbase) {
dev_err(adapter->dev, "Unable to allocate buffer for "
"rxbd_ring.\n");
- return -1;
+ return -ENOMEM;
}
card->rxbd_ring_pbase = virt_to_phys(card->rxbd_ring_vbase);
@@ -569,7 +569,7 @@ static int mwifiex_pcie_create_evtbd_ring(struct mwifiex_adapter *adapter)
if (!card->evtbd_ring_vbase) {
dev_err(adapter->dev, "Unable to allocate buffer. "
"Terminating download\n");
- return -1;
+ return -ENOMEM;
}
card->evtbd_ring_pbase = virt_to_phys(card->evtbd_ring_vbase);
@@ -1231,15 +1231,13 @@ static int mwifiex_pcie_event_complete(struct mwifiex_adapter *adapter,
if (rdptr >= MWIFIEX_MAX_EVT_BD) {
dev_err(adapter->dev, "event_complete: Invalid rdptr 0x%x\n",
rdptr);
- ret = -EINVAL;
- goto done;
+ return -EINVAL;
}
/* Read the event ring write pointer set by firmware */
if (mwifiex_read_reg(adapter, REG_EVTBD_WRPTR, &wrptr)) {
dev_err(adapter->dev, "event_complete: failed to read REG_EVTBD_WRPTR\n");
- ret = -1;
- goto done;
+ return -1;
}
if (!card->evt_buf_list[rdptr]) {
@@ -1268,15 +1266,9 @@ static int mwifiex_pcie_event_complete(struct mwifiex_adapter *adapter,
/* Write the event ring read pointer in to REG_EVTBD_RDPTR */
if (mwifiex_write_reg(adapter, REG_EVTBD_RDPTR, card->evtbd_rdptr)) {
dev_err(adapter->dev, "event_complete: failed to read REG_EVTBD_RDPTR\n");
- ret = -1;
- goto done;
+ return -1;
}
-done:
- /* Free the buffer for failure case */
- if (ret && skb)
- dev_kfree_skb_any(skb);
-
dev_dbg(adapter->dev, "info: Check Events Again\n");
ret = mwifiex_pcie_process_event_ready(adapter);
diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
index 8a18bcc23b2..e2e715666bc 100644
--- a/drivers/net/wireless/mwifiex/scan.c
+++ b/drivers/net/wireless/mwifiex/scan.c
@@ -819,8 +819,10 @@ mwifiex_scan_setup_scan_config(struct mwifiex_private *priv,
wildcard_ssid_tlv->header.len = cpu_to_le16(
(u16) (ssid_len + sizeof(wildcard_ssid_tlv->
max_ssid_length)));
- wildcard_ssid_tlv->max_ssid_length =
- user_scan_in->ssid_list[ssid_idx].max_len;
+
+ /* max_ssid_length = 0 tells firmware to perform
+ specific scan for the SSID filled */
+ wildcard_ssid_tlv->max_ssid_length = 0;
memcpy(wildcard_ssid_tlv->ssid,
user_scan_in->ssid_list[ssid_idx].ssid,
@@ -1389,11 +1391,8 @@ int mwifiex_set_user_scan_ioctl(struct mwifiex_private *priv,
{
int status;
- priv->adapter->scan_wait_q_woken = false;
-
status = mwifiex_scan_networks(priv, scan_req);
- if (!status)
- status = mwifiex_wait_queue_complete(priv->adapter);
+ queue_work(priv->adapter->workqueue, &priv->adapter->main_work);
return status;
}
@@ -1794,6 +1793,14 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
up(&priv->async_sem);
}
+ if (priv->user_scan_cfg) {
+ dev_dbg(priv->adapter->dev, "info: %s: sending scan "
+ "results\n", __func__);
+ cfg80211_scan_done(priv->scan_request, 0);
+ priv->scan_request = NULL;
+ kfree(priv->user_scan_cfg);
+ priv->user_scan_cfg = NULL;
+ }
} else {
/* Get scan command from scan_pending_q and put to
cmd_pending_q */
diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c
index 702452b505c..d39d8457f25 100644
--- a/drivers/net/wireless/mwifiex/sdio.c
+++ b/drivers/net/wireless/mwifiex/sdio.c
@@ -1087,7 +1087,7 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter,
(adapter->ioport | 0x1000 |
(card->mpa_rx.ports << 4)) +
card->mpa_rx.start_port, 1))
- return -1;
+ goto error;
curr_ptr = card->mpa_rx.buf;
@@ -1130,12 +1130,29 @@ rx_curr_single:
if (mwifiex_sdio_card_to_host(adapter, &pkt_type,
skb->data, skb->len,
adapter->ioport + port))
- return -1;
+ goto error;
mwifiex_decode_rx_packet(adapter, skb, pkt_type);
}
return 0;
+
+error:
+ if (MP_RX_AGGR_IN_PROGRESS(card)) {
+ /* Multiport-aggregation transfer failed - cleanup */
+ for (pind = 0; pind < card->mpa_rx.pkt_cnt; pind++) {
+ /* copy pkt to deaggr buf */
+ skb_deaggr = card->mpa_rx.skb_arr[pind];
+ dev_kfree_skb_any(skb_deaggr);
+ }
+ MP_RX_AGGR_BUF_RESET(card);
+ }
+
+ if (f_do_rx_cur)
+ /* Single transfer pending. Free curr buff also */
+ dev_kfree_skb_any(skb);
+
+ return -1;
}
/*
@@ -1271,7 +1288,6 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
dev_dbg(adapter->dev,
"info: CFG reg val =%x\n", cr);
- dev_kfree_skb_any(skb);
return -1;
}
}
diff --git a/drivers/net/wireless/mwifiex/sta_event.c b/drivers/net/wireless/mwifiex/sta_event.c
index f204810e833..d7aa21da84d 100644
--- a/drivers/net/wireless/mwifiex/sta_event.c
+++ b/drivers/net/wireless/mwifiex/sta_event.c
@@ -115,18 +115,17 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv)
if (adapter->num_cmd_timeout && adapter->curr_cmd)
return;
priv->media_connected = false;
- if (!priv->disconnect) {
- priv->disconnect = 1;
- dev_dbg(adapter->dev, "info: successfully disconnected from"
- " %pM: reason code %d\n", priv->cfg_bssid,
- WLAN_REASON_DEAUTH_LEAVING);
- cfg80211_disconnected(priv->netdev,
- WLAN_REASON_DEAUTH_LEAVING, NULL, 0,
- GFP_KERNEL);
- queue_work(priv->workqueue, &priv->cfg_workqueue);
+ dev_dbg(adapter->dev, "info: successfully disconnected from"
+ " %pM: reason code %d\n", priv->cfg_bssid,
+ WLAN_REASON_DEAUTH_LEAVING);
+ if (priv->bss_mode == NL80211_IFTYPE_STATION) {
+ cfg80211_disconnected(priv->netdev, WLAN_REASON_DEAUTH_LEAVING,
+ NULL, 0, GFP_KERNEL);
}
+ memset(priv->cfg_bssid, 0, ETH_ALEN);
+
if (!netif_queue_stopped(priv->netdev))
- netif_stop_queue(priv->netdev);
+ mwifiex_stop_net_dev_queue(priv->netdev, adapter);
if (netif_carrier_ok(priv->netdev))
netif_carrier_off(priv->netdev);
/* Reset wireless stats signal info */
@@ -201,7 +200,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
if (!netif_carrier_ok(priv->netdev))
netif_carrier_on(priv->netdev);
if (netif_queue_stopped(priv->netdev))
- netif_wake_queue(priv->netdev);
+ mwifiex_wake_up_net_dev_queue(priv->netdev, adapter);
break;
case EVENT_DEAUTHENTICATED:
@@ -292,7 +291,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
priv->adhoc_is_link_sensed = false;
mwifiex_clean_txrx(priv);
if (!netif_queue_stopped(priv->netdev))
- netif_stop_queue(priv->netdev);
+ mwifiex_stop_net_dev_queue(priv->netdev, adapter);
if (netif_carrier_ok(priv->netdev))
netif_carrier_off(priv->netdev);
break;
diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
index 4b6f5539657..6d990c798a2 100644
--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
@@ -234,7 +234,7 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
"associating...\n");
if (!netif_queue_stopped(priv->netdev))
- netif_stop_queue(priv->netdev);
+ mwifiex_stop_net_dev_queue(priv->netdev, adapter);
/* Clear any past association response stored for
* application retrieval */
@@ -265,7 +265,7 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
ret = mwifiex_check_network_compatibility(priv, bss_desc);
if (!netif_queue_stopped(priv->netdev))
- netif_stop_queue(priv->netdev);
+ mwifiex_stop_net_dev_queue(priv->netdev, adapter);
if (!ret) {
dev_dbg(adapter->dev, "info: network found in scan"
diff --git a/drivers/net/wireless/mwifiex/txrx.c b/drivers/net/wireless/mwifiex/txrx.c
index a206f412875..d9274a1b77a 100644
--- a/drivers/net/wireless/mwifiex/txrx.c
+++ b/drivers/net/wireless/mwifiex/txrx.c
@@ -134,7 +134,7 @@ int mwifiex_write_data_complete(struct mwifiex_adapter *adapter,
if (!priv)
goto done;
- priv->netdev->trans_start = jiffies;
+ mwifiex_set_trans_start(priv->netdev);
if (!status) {
priv->stats.tx_packets++;
priv->stats.tx_bytes += skb->len;
@@ -152,7 +152,8 @@ int mwifiex_write_data_complete(struct mwifiex_adapter *adapter,
if ((GET_BSS_ROLE(tpriv) == MWIFIEX_BSS_ROLE_STA)
&& (tpriv->media_connected)) {
if (netif_queue_stopped(tpriv->netdev))
- netif_wake_queue(tpriv->netdev);
+ mwifiex_wake_up_net_dev_queue(tpriv->netdev,
+ adapter);
}
}
done:
diff --git a/drivers/net/wireless/orinoco/fw.c b/drivers/net/wireless/orinoco/fw.c
index 527cf5333db..4df8cf64b56 100644
--- a/drivers/net/wireless/orinoco/fw.c
+++ b/drivers/net/wireless/orinoco/fw.c
@@ -6,6 +6,7 @@
#include <linux/slab.h>
#include <linux/firmware.h>
#include <linux/device.h>
+#include <linux/module.h>
#include "hermes.h"
#include "hermes_dld.h"
diff --git a/drivers/net/wireless/p54/eeprom.c b/drivers/net/wireless/p54/eeprom.c
index 8b6f363b3f7..fa8ce510478 100644
--- a/drivers/net/wireless/p54/eeprom.c
+++ b/drivers/net/wireless/p54/eeprom.c
@@ -24,6 +24,7 @@
#include <net/mac80211.h>
#include <linux/crc-ccitt.h>
+#include <linux/export.h>
#include "p54.h"
#include "eeprom.h"
diff --git a/drivers/net/wireless/p54/fwio.c b/drivers/net/wireless/p54/fwio.c
index 53a3408931b..18e82b31afa 100644
--- a/drivers/net/wireless/p54/fwio.c
+++ b/drivers/net/wireless/p54/fwio.c
@@ -20,6 +20,7 @@
#include <linux/slab.h>
#include <linux/firmware.h>
#include <linux/etherdevice.h>
+#include <linux/export.h>
#include <net/mac80211.h>
diff --git a/drivers/net/wireless/p54/main.c b/drivers/net/wireless/p54/main.c
index ad9ae04d07a..db4d9a02f26 100644
--- a/drivers/net/wireless/p54/main.c
+++ b/drivers/net/wireless/p54/main.c
@@ -20,6 +20,7 @@
#include <linux/slab.h>
#include <linux/firmware.h>
#include <linux/etherdevice.h>
+#include <linux/module.h>
#include <net/mac80211.h>
diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c
index 1b753173680..b1f51a21579 100644
--- a/drivers/net/wireless/p54/p54pci.c
+++ b/drivers/net/wireless/p54/p54pci.c
@@ -20,6 +20,7 @@
#include <linux/etherdevice.h>
#include <linux/delay.h>
#include <linux/completion.h>
+#include <linux/module.h>
#include <net/mac80211.h>
#include "p54.h"
diff --git a/drivers/net/wireless/p54/p54spi.c b/drivers/net/wireless/p54/p54spi.c
index a454d487b14..7faed62c637 100644
--- a/drivers/net/wireless/p54/p54spi.c
+++ b/drivers/net/wireless/p54/p54spi.c
@@ -584,8 +584,6 @@ static void p54spi_op_stop(struct ieee80211_hw *dev)
mutex_lock(&priv->mutex);
WARN_ON(priv->fw_state != FW_STATE_READY);
- cancel_work_sync(&priv->work);
-
p54spi_power_off(priv);
spin_lock_irqsave(&priv->tx_lock, flags);
INIT_LIST_HEAD(&priv->tx_pending);
@@ -593,6 +591,8 @@ static void p54spi_op_stop(struct ieee80211_hw *dev)
priv->fw_state = FW_STATE_OFF;
mutex_unlock(&priv->mutex);
+
+ cancel_work_sync(&priv->work);
}
static int __devinit p54spi_probe(struct spi_device *spi)
@@ -652,6 +652,7 @@ static int __devinit p54spi_probe(struct spi_device *spi)
init_completion(&priv->fw_comp);
INIT_LIST_HEAD(&priv->tx_pending);
mutex_init(&priv->mutex);
+ spin_lock_init(&priv->tx_lock);
SET_IEEE80211_DEV(hw, &spi->dev);
priv->common.open = p54spi_op_start;
priv->common.stop = p54spi_op_stop;
@@ -699,7 +700,6 @@ static int __devexit p54spi_remove(struct spi_device *spi)
static struct spi_driver p54spi_driver = {
.driver = {
.name = "p54spi",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
index a8f3bc740df..9b609686642 100644
--- a/drivers/net/wireless/p54/p54usb.c
+++ b/drivers/net/wireless/p54/p54usb.c
@@ -20,6 +20,7 @@
#include <linux/etherdevice.h>
#include <linux/delay.h>
#include <linux/crc32.h>
+#include <linux/module.h>
#include <net/mac80211.h>
#include "p54.h"
diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c
index f485784a60a..42b97bc3847 100644
--- a/drivers/net/wireless/p54/txrx.c
+++ b/drivers/net/wireless/p54/txrx.c
@@ -16,6 +16,7 @@
* published by the Free Software Foundation.
*/
+#include <linux/export.h>
#include <linux/init.h>
#include <linux/firmware.h>
#include <linux/etherdevice.h>
@@ -241,7 +242,7 @@ void p54_free_skb(struct ieee80211_hw *dev, struct sk_buff *skb)
skb_unlink(skb, &priv->tx_queue);
p54_tx_qos_accounting_free(priv, skb);
- dev_kfree_skb_any(skb);
+ ieee80211_free_txskb(dev, skb);
}
EXPORT_SYMBOL_GPL(p54_free_skb);
@@ -787,7 +788,7 @@ void p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb)
&hdr_flags, &aid, &burst_allowed);
if (p54_tx_qos_accounting_alloc(priv, skb, queue)) {
- dev_kfree_skb_any(skb);
+ ieee80211_free_txskb(dev, skb);
return;
}
diff --git a/drivers/net/wireless/prism54/isl_ioctl.c b/drivers/net/wireless/prism54/isl_ioctl.c
index d97a2caf582..4e44b1af119 100644
--- a/drivers/net/wireless/prism54/isl_ioctl.c
+++ b/drivers/net/wireless/prism54/isl_ioctl.c
@@ -778,7 +778,7 @@ prism54_get_essid(struct net_device *ndev, struct iw_request_info *info,
dwrq->flags = 0;
dwrq->length = 0;
}
- essid->octets[essid->length] = '\0';
+ essid->octets[dwrq->length] = '\0';
memcpy(extra, essid->octets, dwrq->length);
kfree(essid);
@@ -2493,323 +2493,7 @@ prism54_set_mac_address(struct net_device *ndev, void *addr)
return ret;
}
-/* Note: currently, use hostapd ioctl from the Host AP driver for WPA
- * support. This is to be replaced with Linux wireless extensions once they
- * get WPA support. */
-
-/* Note II: please leave all this together as it will be easier to remove later,
- * once wireless extensions add WPA support -mcgrof */
-
-/* PRISM54_HOSTAPD ioctl() cmd: */
-enum {
- PRISM2_SET_ENCRYPTION = 6,
- PRISM2_HOSTAPD_SET_GENERIC_ELEMENT = 12,
- PRISM2_HOSTAPD_MLME = 13,
- PRISM2_HOSTAPD_SCAN_REQ = 14,
-};
-
#define PRISM54_SET_WPA SIOCIWFIRSTPRIV+12
-#define PRISM54_HOSTAPD SIOCIWFIRSTPRIV+25
-#define PRISM54_DROP_UNENCRYPTED SIOCIWFIRSTPRIV+26
-
-#define PRISM2_HOSTAPD_MAX_BUF_SIZE 1024
-#define PRISM2_HOSTAPD_GENERIC_ELEMENT_HDR_LEN \
- offsetof(struct prism2_hostapd_param, u.generic_elem.data)
-
-/* Maximum length for algorithm names (-1 for nul termination)
- * used in ioctl() */
-#define HOSTAP_CRYPT_ALG_NAME_LEN 16
-
-struct prism2_hostapd_param {
- u32 cmd;
- u8 sta_addr[ETH_ALEN];
- union {
- struct {
- u8 alg[HOSTAP_CRYPT_ALG_NAME_LEN];
- u32 flags;
- u32 err;
- u8 idx;
- u8 seq[8]; /* sequence counter (set: RX, get: TX) */
- u16 key_len;
- u8 key[0];
- } crypt;
- struct {
- u8 len;
- u8 data[0];
- } generic_elem;
- struct {
-#define MLME_STA_DEAUTH 0
-#define MLME_STA_DISASSOC 1
- u16 cmd;
- u16 reason_code;
- } mlme;
- struct {
- u8 ssid_len;
- u8 ssid[32];
- } scan_req;
- } u;
-};
-
-
-static int
-prism2_ioctl_set_encryption(struct net_device *dev,
- struct prism2_hostapd_param *param,
- int param_len)
-{
- islpci_private *priv = netdev_priv(dev);
- int rvalue = 0, force = 0;
- int authen = DOT11_AUTH_OS, invoke = 0, exunencrypt = 0;
- union oid_res_t r;
-
- /* with the new API, it's impossible to get a NULL pointer.
- * New version of iwconfig set the IW_ENCODE_NOKEY flag
- * when no key is given, but older versions don't. */
-
- if (param->u.crypt.key_len > 0) {
- /* we have a key to set */
- int index = param->u.crypt.idx;
- int current_index;
- struct obj_key key = { DOT11_PRIV_TKIP, 0, "" };
-
- /* get the current key index */
- rvalue = mgt_get_request(priv, DOT11_OID_DEFKEYID, 0, NULL, &r);
- current_index = r.u;
- /* Verify that the key is not marked as invalid */
- if (!(param->u.crypt.flags & IW_ENCODE_NOKEY)) {
- key.length = param->u.crypt.key_len > sizeof (param->u.crypt.key) ?
- sizeof (param->u.crypt.key) : param->u.crypt.key_len;
- memcpy(key.key, param->u.crypt.key, key.length);
- if (key.length == 32)
- /* we want WPA-PSK */
- key.type = DOT11_PRIV_TKIP;
- if ((index < 0) || (index > 3))
- /* no index provided use the current one */
- index = current_index;
-
- /* now send the key to the card */
- rvalue |=
- mgt_set_request(priv, DOT11_OID_DEFKEYX, index,
- &key);
- }
- /*
- * If a valid key is set, encryption should be enabled
- * (user may turn it off later).
- * This is also how "iwconfig ethX key on" works
- */
- if ((index == current_index) && (key.length > 0))
- force = 1;
- } else {
- int index = (param->u.crypt.flags & IW_ENCODE_INDEX) - 1;
- if ((index >= 0) && (index <= 3)) {
- /* we want to set the key index */
- rvalue |=
- mgt_set_request(priv, DOT11_OID_DEFKEYID, 0,
- &index);
- } else {
- if (!(param->u.crypt.flags & IW_ENCODE_MODE)) {
- /* we cannot do anything. Complain. */
- return -EINVAL;
- }
- }
- }
- /* now read the flags */
- if (param->u.crypt.flags & IW_ENCODE_DISABLED) {
- /* Encoding disabled,
- * authen = DOT11_AUTH_OS;
- * invoke = 0;
- * exunencrypt = 0; */
- }
- if (param->u.crypt.flags & IW_ENCODE_OPEN)
- /* Encode but accept non-encoded packets. No auth */
- invoke = 1;
- if ((param->u.crypt.flags & IW_ENCODE_RESTRICTED) || force) {
- /* Refuse non-encoded packets. Auth */
- authen = DOT11_AUTH_BOTH;
- invoke = 1;
- exunencrypt = 1;
- }
- /* do the change if requested */
- if ((param->u.crypt.flags & IW_ENCODE_MODE) || force) {
- rvalue |=
- mgt_set_request(priv, DOT11_OID_AUTHENABLE, 0, &authen);
- rvalue |=
- mgt_set_request(priv, DOT11_OID_PRIVACYINVOKED, 0, &invoke);
- rvalue |=
- mgt_set_request(priv, DOT11_OID_EXUNENCRYPTED, 0,
- &exunencrypt);
- }
- return rvalue;
-}
-
-static int
-prism2_ioctl_set_generic_element(struct net_device *ndev,
- struct prism2_hostapd_param *param,
- int param_len)
-{
- islpci_private *priv = netdev_priv(ndev);
- int max_len, len, alen, ret=0;
- struct obj_attachment *attach;
-
- len = param->u.generic_elem.len;
- max_len = param_len - PRISM2_HOSTAPD_GENERIC_ELEMENT_HDR_LEN;
- if (max_len < 0 || max_len < len)
- return -EINVAL;
-
- alen = sizeof(*attach) + len;
- attach = kzalloc(alen, GFP_KERNEL);
- if (attach == NULL)
- return -ENOMEM;
-
-#define WLAN_FC_TYPE_MGMT 0
-#define WLAN_FC_STYPE_ASSOC_REQ 0
-#define WLAN_FC_STYPE_REASSOC_REQ 2
-
- /* Note: endianness is covered by mgt_set_varlen */
-
- attach->type = (WLAN_FC_TYPE_MGMT << 2) |
- (WLAN_FC_STYPE_ASSOC_REQ << 4);
- attach->id = -1;
- attach->size = len;
- memcpy(attach->data, param->u.generic_elem.data, len);
-
- ret = mgt_set_varlen(priv, DOT11_OID_ATTACHMENT, attach, len);
-
- if (ret == 0) {
- attach->type = (WLAN_FC_TYPE_MGMT << 2) |
- (WLAN_FC_STYPE_REASSOC_REQ << 4);
-
- ret = mgt_set_varlen(priv, DOT11_OID_ATTACHMENT, attach, len);
-
- if (ret == 0)
- printk(KERN_DEBUG "%s: WPA IE Attachment was set\n",
- ndev->name);
- }
-
- kfree(attach);
- return ret;
-
-}
-
-static int
-prism2_ioctl_mlme(struct net_device *dev, struct prism2_hostapd_param *param)
-{
- return -EOPNOTSUPP;
-}
-
-static int
-prism2_ioctl_scan_req(struct net_device *ndev,
- struct prism2_hostapd_param *param)
-{
- islpci_private *priv = netdev_priv(ndev);
- struct iw_request_info info;
- int i, rvalue;
- struct obj_bsslist *bsslist;
- u32 noise = 0;
- char *extra = "";
- char *current_ev = "foo";
- union oid_res_t r;
-
- if (islpci_get_state(priv) < PRV_STATE_INIT) {
- /* device is not ready, fail gently */
- return 0;
- }
-
- /* first get the noise value. We will use it to report the link quality */
- rvalue = mgt_get_request(priv, DOT11_OID_NOISEFLOOR, 0, NULL, &r);
- noise = r.u;
-
- /* Ask the device for a list of known bss. We can report at most
- * IW_MAX_AP=64 to the range struct. But the device won't repport anything
- * if you change the value of IWMAX_BSS=24.
- */
- rvalue |= mgt_get_request(priv, DOT11_OID_BSSLIST, 0, NULL, &r);
- bsslist = r.ptr;
-
- info.cmd = PRISM54_HOSTAPD;
- info.flags = 0;
-
- /* ok now, scan the list and translate its info */
- for (i = 0; i < min(IW_MAX_AP, (int) bsslist->nr); i++)
- current_ev = prism54_translate_bss(ndev, &info, current_ev,
- extra + IW_SCAN_MAX_DATA,
- &(bsslist->bsslist[i]),
- noise);
- kfree(bsslist);
-
- return rvalue;
-}
-
-static int
-prism54_hostapd(struct net_device *ndev, struct iw_point *p)
-{
- struct prism2_hostapd_param *param;
- int ret = 0;
- u32 uwrq;
-
- printk(KERN_DEBUG "prism54_hostapd - len=%d\n", p->length);
- if (p->length < sizeof(struct prism2_hostapd_param) ||
- p->length > PRISM2_HOSTAPD_MAX_BUF_SIZE || !p->pointer)
- return -EINVAL;
-
- param = memdup_user(p->pointer, p->length);
- if (IS_ERR(param))
- return PTR_ERR(param);
-
- switch (param->cmd) {
- case PRISM2_SET_ENCRYPTION:
- printk(KERN_DEBUG "%s: Caught WPA supplicant set encryption request\n",
- ndev->name);
- ret = prism2_ioctl_set_encryption(ndev, param, p->length);
- break;
- case PRISM2_HOSTAPD_SET_GENERIC_ELEMENT:
- printk(KERN_DEBUG "%s: Caught WPA supplicant set WPA IE request\n",
- ndev->name);
- ret = prism2_ioctl_set_generic_element(ndev, param,
- p->length);
- break;
- case PRISM2_HOSTAPD_MLME:
- printk(KERN_DEBUG "%s: Caught WPA supplicant MLME request\n",
- ndev->name);
- ret = prism2_ioctl_mlme(ndev, param);
- break;
- case PRISM2_HOSTAPD_SCAN_REQ:
- printk(KERN_DEBUG "%s: Caught WPA supplicant scan request\n",
- ndev->name);
- ret = prism2_ioctl_scan_req(ndev, param);
- break;
- case PRISM54_SET_WPA:
- printk(KERN_DEBUG "%s: Caught WPA supplicant wpa init request\n",
- ndev->name);
- uwrq = 1;
- ret = prism54_set_wpa(ndev, NULL, &uwrq, NULL);
- break;
- case PRISM54_DROP_UNENCRYPTED:
- printk(KERN_DEBUG "%s: Caught WPA drop unencrypted request\n",
- ndev->name);
-#if 0
- uwrq = 0x01;
- mgt_set(priv, DOT11_OID_EXUNENCRYPTED, &uwrq);
- down_write(&priv->mib_sem);
- mgt_commit(priv);
- up_write(&priv->mib_sem);
-#endif
- /* Not necessary, as set_wpa does it, should we just do it here though? */
- ret = 0;
- break;
- default:
- printk(KERN_DEBUG "%s: Caught a WPA supplicant request that is not supported\n",
- ndev->name);
- ret = -EOPNOTSUPP;
- break;
- }
-
- if (ret == 0 && copy_to_user(p->pointer, param, p->length))
- ret = -EFAULT;
-
- kfree(param);
-
- return ret;
-}
static int
prism54_set_wpa(struct net_device *ndev, struct iw_request_info *info,
@@ -3223,20 +2907,3 @@ const struct iw_handler_def prism54_handler_def = {
.private_args = (struct iw_priv_args *) prism54_private_args,
.get_wireless_stats = prism54_get_wireless_stats,
};
-
-/* For wpa_supplicant */
-
-int
-prism54_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
-{
- struct iwreq *wrq = (struct iwreq *) rq;
- int ret = -1;
- switch (cmd) {
- case PRISM54_HOSTAPD:
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
- ret = prism54_hostapd(ndev, &wrq->u.data);
- return ret;
- }
- return -EOPNOTSUPP;
-}
diff --git a/drivers/net/wireless/prism54/isl_ioctl.h b/drivers/net/wireless/prism54/isl_ioctl.h
index bcfbfb9281d..a34bceb6e3c 100644
--- a/drivers/net/wireless/prism54/isl_ioctl.h
+++ b/drivers/net/wireless/prism54/isl_ioctl.h
@@ -43,8 +43,6 @@ void prism54_wpa_bss_ie_clean(islpci_private *priv);
int prism54_set_mac_address(struct net_device *, void *);
-int prism54_ioctl(struct net_device *, struct ifreq *, int);
-
extern const struct iw_handler_def prism54_handler_def;
#endif /* _ISL_IOCTL_H */
diff --git a/drivers/net/wireless/prism54/islpci_dev.c b/drivers/net/wireless/prism54/islpci_dev.c
index 8a3cf4fe376..5970ff6f40c 100644
--- a/drivers/net/wireless/prism54/islpci_dev.c
+++ b/drivers/net/wireless/prism54/islpci_dev.c
@@ -804,7 +804,6 @@ static const struct ethtool_ops islpci_ethtool_ops = {
static const struct net_device_ops islpci_netdev_ops = {
.ndo_open = islpci_open,
.ndo_stop = islpci_close,
- .ndo_do_ioctl = prism54_ioctl,
.ndo_start_xmit = islpci_eth_transmit,
.ndo_tx_timeout = islpci_eth_tx_timeout,
.ndo_set_mac_address = prism54_set_mac_address,
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 620e3c0e88e..3802c31fefc 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -244,6 +244,10 @@ enum ndis_80211_power_mode {
NDIS_80211_POWER_MODE_FAST_PSP,
};
+enum ndis_80211_pmkid_cand_list_flag_bits {
+ NDIS_80211_PMKID_CAND_PREAUTH = cpu_to_le32(1 << 0)
+};
+
struct ndis_80211_auth_request {
__le32 length;
u8 bssid[6];
@@ -387,19 +391,17 @@ struct ndis_80211_capability {
struct ndis_80211_bssid_info {
u8 bssid[6];
u8 pmkid[16];
-};
+} __packed;
struct ndis_80211_pmkid {
__le32 length;
__le32 bssid_info_count;
struct ndis_80211_bssid_info bssid_info[0];
-};
+} __packed;
/*
* private data
*/
-#define NET_TYPE_11FB 0
-
#define CAP_MODE_80211A 1
#define CAP_MODE_80211B 2
#define CAP_MODE_80211G 4
@@ -1347,6 +1349,32 @@ static int set_channel(struct usbnet *usbdev, int channel)
return ret;
}
+static struct ieee80211_channel *get_current_channel(struct usbnet *usbdev,
+ u16 *beacon_interval)
+{
+ struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
+ struct ieee80211_channel *channel;
+ struct ndis_80211_conf config;
+ int len, ret;
+
+ /* Get channel and beacon interval */
+ len = sizeof(config);
+ ret = rndis_query_oid(usbdev, OID_802_11_CONFIGURATION, &config, &len);
+ netdev_dbg(usbdev->net, "%s(): OID_802_11_CONFIGURATION -> %d\n",
+ __func__, ret);
+ if (ret < 0)
+ return NULL;
+
+ channel = ieee80211_get_channel(priv->wdev.wiphy,
+ KHZ_TO_MHZ(le32_to_cpu(config.ds_config)));
+ if (!channel)
+ return NULL;
+
+ if (beacon_interval)
+ *beacon_interval = le16_to_cpu(config.beacon_period);
+ return channel;
+}
+
/* index must be 0 - N, as per NDIS */
static int add_wep_key(struct usbnet *usbdev, const u8 *key, int key_len,
int index)
@@ -2650,13 +2678,12 @@ static void rndis_wlan_craft_connected_bss(struct usbnet *usbdev, u8 *bssid,
{
struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
struct ieee80211_channel *channel;
- struct ndis_80211_conf config;
struct ndis_80211_ssid ssid;
struct cfg80211_bss *bss;
s32 signal;
u64 timestamp;
u16 capability;
- u16 beacon_interval;
+ u16 beacon_interval = 0;
__le32 rssi;
u8 ie_buf[34];
int len, ret, ie_len;
@@ -2681,22 +2708,10 @@ static void rndis_wlan_craft_connected_bss(struct usbnet *usbdev, u8 *bssid,
}
/* Get channel and beacon interval */
- len = sizeof(config);
- ret = rndis_query_oid(usbdev, OID_802_11_CONFIGURATION, &config, &len);
- netdev_dbg(usbdev->net, "%s(): OID_802_11_CONFIGURATION -> %d\n",
- __func__, ret);
- if (ret >= 0) {
- beacon_interval = le16_to_cpu(config.beacon_period);
- channel = ieee80211_get_channel(priv->wdev.wiphy,
- KHZ_TO_MHZ(le32_to_cpu(config.ds_config)));
- if (!channel) {
- netdev_warn(usbdev->net, "%s(): could not get channel."
- "\n", __func__);
- return;
- }
- } else {
- netdev_warn(usbdev->net, "%s(): could not get configuration.\n",
- __func__);
+ channel = get_current_channel(usbdev, &beacon_interval);
+ if (!channel) {
+ netdev_warn(usbdev->net, "%s(): could not get channel.\n",
+ __func__);
return;
}
@@ -2841,8 +2856,9 @@ static void rndis_wlan_do_link_up_work(struct usbnet *usbdev)
req_ie_len, resp_ie,
resp_ie_len, 0, GFP_KERNEL);
else
- cfg80211_roamed(usbdev->net, NULL, bssid,
- req_ie, req_ie_len,
+ cfg80211_roamed(usbdev->net,
+ get_current_channel(usbdev, NULL),
+ bssid, req_ie, req_ie_len,
resp_ie, resp_ie_len, GFP_KERNEL);
} else if (priv->infra_mode == NDIS_80211_INFRA_ADHOC)
cfg80211_ibss_joined(usbdev->net, bssid, GFP_KERNEL);
@@ -3008,25 +3024,13 @@ static void rndis_wlan_pmkid_cand_list_indication(struct usbnet *usbdev,
for (i = 0; i < le32_to_cpu(cand_list->num_candidates); i++) {
struct ndis_80211_pmkid_candidate *cand =
&cand_list->candidate_list[i];
+ bool preauth = !!(cand->flags & NDIS_80211_PMKID_CAND_PREAUTH);
- netdev_dbg(usbdev->net, "cand[%i]: flags: 0x%08x, bssid: %pM\n",
- i, le32_to_cpu(cand->flags), cand->bssid);
-
-#if 0
- struct iw_pmkid_cand pcand;
- union iwreq_data wrqu;
+ netdev_dbg(usbdev->net, "cand[%i]: flags: 0x%08x, preauth: %d, bssid: %pM\n",
+ i, le32_to_cpu(cand->flags), preauth, cand->bssid);
- memset(&pcand, 0, sizeof(pcand));
- if (le32_to_cpu(cand->flags) & 0x01)
- pcand.flags |= IW_PMKID_CAND_PREAUTH;
- pcand.index = i;
- memcpy(pcand.bssid.sa_data, cand->bssid, ETH_ALEN);
-
- memset(&wrqu, 0, sizeof(wrqu));
- wrqu.data.length = sizeof(pcand);
- wireless_send_event(usbdev->net, IWEVPMKIDCAND, &wrqu,
- (u8 *)&pcand);
-#endif
+ cfg80211_pmksa_candidate_notify(usbdev->net, i, cand->bssid,
+ preauth, GFP_ATOMIC);
}
}
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index 3f183a15186..e5df380d4fb 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -1203,8 +1203,10 @@ void rt2800_config_filter(struct rt2x00_dev *rt2x00dev,
!(filter_flags & FIF_CONTROL));
rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_PSPOLL,
!(filter_flags & FIF_PSPOLL));
- rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_BA, 1);
- rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_BAR, 0);
+ rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_BA,
+ !(filter_flags & FIF_CONTROL));
+ rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_BAR,
+ !(filter_flags & FIF_CONTROL));
rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_CNTL,
!(filter_flags & FIF_CONTROL));
rt2800_register_write(rt2x00dev, RX_FILTER_CFG, reg);
@@ -3771,7 +3773,7 @@ static void rt2800_efuse_read(struct rt2x00_dev *rt2x00dev, unsigned int i)
/* Apparently the data is read from end to start */
rt2800_register_read_lock(rt2x00dev, EFUSE_DATA3, &reg);
/* The returned value is in CPU order, but eeprom is le */
- rt2x00dev->eeprom[i] = cpu_to_le32(reg);
+ *(u32 *)&rt2x00dev->eeprom[i] = cpu_to_le32(reg);
rt2800_register_read_lock(rt2x00dev, EFUSE_DATA2, &reg);
*(u32 *)&rt2x00dev->eeprom[i + 2] = cpu_to_le32(reg);
rt2800_register_read_lock(rt2x00dev, EFUSE_DATA1, &reg);
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index f1565792f27..377876315b8 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -919,6 +919,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x050d, 0x935b) },
/* Buffalo */
{ USB_DEVICE(0x0411, 0x00e8) },
+ { USB_DEVICE(0x0411, 0x0158) },
{ USB_DEVICE(0x0411, 0x016f) },
{ USB_DEVICE(0x0411, 0x01a2) },
/* Corega */
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index 2ec5c00235e..99ff12d0c29 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -943,6 +943,7 @@ struct rt2x00_dev {
* Powersaving work
*/
struct delayed_work autowakeup_work;
+ struct work_struct sleep_work;
/*
* Data queue arrays for RX, TX, Beacon and ATIM.
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index e1fb2a8569b..c3e1aa7c1a8 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -465,6 +465,23 @@ static u8 *rt2x00lib_find_ie(u8 *data, unsigned int len, u8 ie)
return NULL;
}
+static void rt2x00lib_sleep(struct work_struct *work)
+{
+ struct rt2x00_dev *rt2x00dev =
+ container_of(work, struct rt2x00_dev, sleep_work);
+
+ if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
+ return;
+
+ /*
+ * Check again is powersaving is enabled, to prevent races from delayed
+ * work execution.
+ */
+ if (!test_bit(CONFIG_POWERSAVING, &rt2x00dev->flags))
+ rt2x00lib_config(rt2x00dev, &rt2x00dev->hw->conf,
+ IEEE80211_CONF_CHANGE_PS);
+}
+
static void rt2x00lib_rxdone_check_ps(struct rt2x00_dev *rt2x00dev,
struct sk_buff *skb,
struct rxdone_entry_desc *rxdesc)
@@ -512,8 +529,7 @@ static void rt2x00lib_rxdone_check_ps(struct rt2x00_dev *rt2x00dev,
cam |= (tim_ie->bitmap_ctrl & 0x01);
if (!cam && !test_bit(CONFIG_POWERSAVING, &rt2x00dev->flags))
- rt2x00lib_config(rt2x00dev, &rt2x00dev->hw->conf,
- IEEE80211_CONF_CHANGE_PS);
+ queue_work(rt2x00dev->workqueue, &rt2x00dev->sleep_work);
}
static int rt2x00lib_rxdone_read_signal(struct rt2x00_dev *rt2x00dev,
@@ -815,11 +831,11 @@ static int rt2x00lib_probe_hw_modes(struct rt2x00_dev *rt2x00dev,
if (spec->supported_rates & SUPPORT_RATE_OFDM)
num_rates += 8;
- channels = kzalloc(sizeof(*channels) * spec->num_channels, GFP_KERNEL);
+ channels = kcalloc(spec->num_channels, sizeof(*channels), GFP_KERNEL);
if (!channels)
return -ENOMEM;
- rates = kzalloc(sizeof(*rates) * num_rates, GFP_KERNEL);
+ rates = kcalloc(num_rates, sizeof(*rates), GFP_KERNEL);
if (!rates)
goto exit_free_channels;
@@ -1141,6 +1157,7 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
INIT_WORK(&rt2x00dev->intf_work, rt2x00lib_intf_scheduled);
INIT_DELAYED_WORK(&rt2x00dev->autowakeup_work, rt2x00lib_autowakeup);
+ INIT_WORK(&rt2x00dev->sleep_work, rt2x00lib_sleep);
/*
* Let the driver probe the device to detect the capabilities.
@@ -1197,6 +1214,7 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev)
*/
cancel_work_sync(&rt2x00dev->intf_work);
cancel_delayed_work_sync(&rt2x00dev->autowakeup_work);
+ cancel_work_sync(&rt2x00dev->sleep_work);
if (rt2x00_is_usb(rt2x00dev)) {
del_timer_sync(&rt2x00dev->txstatus_timer);
cancel_work_sync(&rt2x00dev->rxdone_work);
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index bf0acff0780..ede3c58e678 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -160,7 +160,7 @@ void rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
exit_fail:
rt2x00queue_pause_queue(queue);
exit_free_skb:
- dev_kfree_skb_any(skb);
+ ieee80211_free_txskb(hw, skb);
}
EXPORT_SYMBOL_GPL(rt2x00mac_tx);
diff --git a/drivers/net/wireless/rtl818x/rtl8180/dev.c b/drivers/net/wireless/rtl818x/rtl8180/dev.c
index 0082015ff66..2f14a5fb0cb 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/dev.c
@@ -22,6 +22,7 @@
#include <linux/delay.h>
#include <linux/etherdevice.h>
#include <linux/eeprom_93cx6.h>
+#include <linux/module.h>
#include <net/mac80211.h>
#include "rtl8180.h"
diff --git a/drivers/net/wireless/rtl818x/rtl8187/dev.c b/drivers/net/wireless/rtl818x/rtl8187/dev.c
index 24873b55b55..4a78f9e39df 100644
--- a/drivers/net/wireless/rtl818x/rtl8187/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187/dev.c
@@ -26,6 +26,7 @@
#include <linux/delay.h>
#include <linux/etherdevice.h>
#include <linux/eeprom_93cx6.h>
+#include <linux/module.h>
#include <net/mac80211.h>
#include "rtl8187.h"
diff --git a/drivers/net/wireless/rtlwifi/Kconfig b/drivers/net/wireless/rtlwifi/Kconfig
index 45e14760c16..d6c42e69bdb 100644
--- a/drivers/net/wireless/rtlwifi/Kconfig
+++ b/drivers/net/wireless/rtlwifi/Kconfig
@@ -12,7 +12,7 @@ config RTL8192CE
config RTL8192SE
tristate "Realtek RTL8192SE/RTL8191SE PCIe Wireless Network Adapter"
- depends on MAC80211 && EXPERIMENTAL
+ depends on MAC80211 && EXPERIMENTAL && PCI
select FW_LOADER
select RTLWIFI
---help---
@@ -23,7 +23,7 @@ config RTL8192SE
config RTL8192DE
tristate "Realtek RTL8192DE/RTL8188DE PCIe Wireless Network Adapter"
- depends on MAC80211 && EXPERIMENTAL
+ depends on MAC80211 && EXPERIMENTAL && PCI
select FW_LOADER
select RTLWIFI
---help---
diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c
index d4fdd2a5a73..d81a6021a30 100644
--- a/drivers/net/wireless/rtlwifi/base.c
+++ b/drivers/net/wireless/rtlwifi/base.c
@@ -30,6 +30,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/ip.h>
+#include <linux/module.h>
#include "wifi.h"
#include "rc.h"
#include "base.h"
@@ -344,9 +345,9 @@ static void _rtl_init_mac80211(struct ieee80211_hw *hw)
if (is_valid_ether_addr(rtlefuse->dev_addr)) {
SET_IEEE80211_PERM_ADDR(hw, rtlefuse->dev_addr);
} else {
- u8 rtlmac[] = { 0x00, 0xe0, 0x4c, 0x81, 0x92, 0x00 };
- get_random_bytes((rtlmac + (ETH_ALEN - 1)), 1);
- SET_IEEE80211_PERM_ADDR(hw, rtlmac);
+ u8 rtlmac1[] = { 0x00, 0xe0, 0x4c, 0x81, 0x92, 0x00 };
+ get_random_bytes((rtlmac1 + (ETH_ALEN - 1)), 1);
+ SET_IEEE80211_PERM_ADDR(hw, rtlmac1);
}
}
@@ -447,12 +448,11 @@ int rtl_init_core(struct ieee80211_hw *hw)
/* <4> locks */
mutex_init(&rtlpriv->locks.conf_mutex);
- spin_lock_init(&rtlpriv->locks.ips_lock);
+ mutex_init(&rtlpriv->locks.ps_mutex);
spin_lock_init(&rtlpriv->locks.irq_th_lock);
spin_lock_init(&rtlpriv->locks.h2c_lock);
spin_lock_init(&rtlpriv->locks.rf_ps_lock);
spin_lock_init(&rtlpriv->locks.rf_lock);
- spin_lock_init(&rtlpriv->locks.lps_lock);
spin_lock_init(&rtlpriv->locks.waitq_lock);
spin_lock_init(&rtlpriv->locks.cck_and_rw_pagea_lock);
diff --git a/drivers/net/wireless/rtlwifi/base.h b/drivers/net/wireless/rtlwifi/base.h
index 4ae905983d0..f66b5757f6b 100644
--- a/drivers/net/wireless/rtlwifi/base.h
+++ b/drivers/net/wireless/rtlwifi/base.h
@@ -76,7 +76,7 @@ enum ap_peer {
SET_BITS_TO_LE_2BYTE(_hdr, 8, 1, _val)
#define SET_80211_PS_POLL_AID(_hdr, _val) \
- (*(u16 *)((u8 *)(_hdr) + 2) = le16_to_cpu(_val))
+ (*(u16 *)((u8 *)(_hdr) + 2) = _val)
#define SET_80211_PS_POLL_BSSID(_hdr, _val) \
memcpy(((u8 *)(_hdr)) + 4, (u8 *)(_val), ETH_ALEN)
#define SET_80211_PS_POLL_TA(_hdr, _val) \
diff --git a/drivers/net/wireless/rtlwifi/cam.c b/drivers/net/wireless/rtlwifi/cam.c
index 7babb6acd95..dc36d7461ca 100644
--- a/drivers/net/wireless/rtlwifi/cam.c
+++ b/drivers/net/wireless/rtlwifi/cam.c
@@ -29,6 +29,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/export.h>
#include "wifi.h"
#include "cam.h"
diff --git a/drivers/net/wireless/rtlwifi/efuse.c b/drivers/net/wireless/rtlwifi/efuse.c
index 3fc21f60bb0..ed1058b7158 100644
--- a/drivers/net/wireless/rtlwifi/efuse.c
+++ b/drivers/net/wireless/rtlwifi/efuse.c
@@ -27,6 +27,7 @@
*
*****************************************************************************/
+#include <linux/export.h>
#include "wifi.h"
#include "efuse.h"
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index 177a8e66924..0d4d242849b 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -27,6 +27,7 @@
*
*****************************************************************************/
+#include <linux/export.h>
#include "core.h"
#include "wifi.h"
#include "pci.h"
@@ -609,7 +610,7 @@ tx_status_ok:
if (((rtlpriv->link_info.num_rx_inperiod +
rtlpriv->link_info.num_tx_inperiod) > 8) ||
(rtlpriv->link_info.num_rx_inperiod > 2)) {
- tasklet_schedule(&rtlpriv->works.ips_leave_tasklet);
+ schedule_work(&rtlpriv->works.lps_leave_work);
}
}
@@ -735,7 +736,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
if (((rtlpriv->link_info.num_rx_inperiod +
rtlpriv->link_info.num_tx_inperiod) > 8) ||
(rtlpriv->link_info.num_rx_inperiod > 2)) {
- tasklet_schedule(&rtlpriv->works.ips_leave_tasklet);
+ schedule_work(&rtlpriv->works.lps_leave_work);
}
dev_kfree_skb_any(skb);
@@ -779,6 +780,7 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
unsigned long flags;
u32 inta = 0;
u32 intb = 0;
+ irqreturn_t ret = IRQ_HANDLED;
spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
@@ -786,8 +788,10 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
rtlpriv->cfg->ops->interrupt_recognized(hw, &inta, &intb);
/*Shared IRQ or HW disappared */
- if (!inta || inta == 0xffff)
+ if (!inta || inta == 0xffff) {
+ ret = IRQ_NONE;
goto done;
+ }
/*<1> beacon related */
if (inta & rtlpriv->cfg->maps[RTL_IMR_TBDOK]) {
@@ -889,12 +893,9 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
if (rtlpriv->rtlhal.earlymode_enable)
tasklet_schedule(&rtlpriv->works.irq_tasklet);
- spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
- return IRQ_HANDLED;
-
done:
spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
- return IRQ_HANDLED;
+ return ret;
}
static void _rtl_pci_irq_tasklet(struct ieee80211_hw *hw)
@@ -902,11 +903,6 @@ static void _rtl_pci_irq_tasklet(struct ieee80211_hw *hw)
_rtl_pci_tx_chk_waitq(hw);
}
-static void _rtl_pci_ips_leave_tasklet(struct ieee80211_hw *hw)
-{
- rtl_lps_leave(hw);
-}
-
static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -944,6 +940,15 @@ static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw)
return;
}
+static void rtl_lps_leave_work_callback(struct work_struct *work)
+{
+ struct rtl_works *rtlworks =
+ container_of(work, struct rtl_works, lps_leave_work);
+ struct ieee80211_hw *hw = rtlworks->hw;
+
+ rtl_lps_leave(hw);
+}
+
static void _rtl_pci_init_trx_var(struct ieee80211_hw *hw)
{
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
@@ -1005,9 +1010,7 @@ static void _rtl_pci_init_struct(struct ieee80211_hw *hw,
tasklet_init(&rtlpriv->works.irq_prepare_bcn_tasklet,
(void (*)(unsigned long))_rtl_pci_prepare_bcn_tasklet,
(unsigned long)hw);
- tasklet_init(&rtlpriv->works.ips_leave_tasklet,
- (void (*)(unsigned long))_rtl_pci_ips_leave_tasklet,
- (unsigned long)hw);
+ INIT_WORK(&rtlpriv->works.lps_leave_work, rtl_lps_leave_work_callback);
}
static int _rtl_pci_init_tx_ring(struct ieee80211_hw *hw,
@@ -1477,7 +1480,7 @@ static void rtl_pci_deinit(struct ieee80211_hw *hw)
synchronize_irq(rtlpci->pdev->irq);
tasklet_kill(&rtlpriv->works.irq_tasklet);
- tasklet_kill(&rtlpriv->works.ips_leave_tasklet);
+ cancel_work_sync(&rtlpriv->works.lps_leave_work);
flush_workqueue(rtlpriv->works.rtl_wq);
destroy_workqueue(rtlpriv->works.rtl_wq);
@@ -1552,7 +1555,7 @@ static void rtl_pci_stop(struct ieee80211_hw *hw)
set_hal_stop(rtlhal);
rtlpriv->cfg->ops->disable_interrupt(hw);
- tasklet_kill(&rtlpriv->works.ips_leave_tasklet);
+ cancel_work_sync(&rtlpriv->works.lps_leave_work);
spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flags);
while (ppsc->rfchange_inprogress) {
diff --git a/drivers/net/wireless/rtlwifi/ps.c b/drivers/net/wireless/rtlwifi/ps.c
index a693feffbe7..a14a68b2463 100644
--- a/drivers/net/wireless/rtlwifi/ps.c
+++ b/drivers/net/wireless/rtlwifi/ps.c
@@ -27,6 +27,7 @@
*
*****************************************************************************/
+#include <linux/export.h>
#include "wifi.h"
#include "base.h"
#include "ps.h"
@@ -240,7 +241,7 @@ void rtl_ips_nic_on(struct ieee80211_hw *hw)
if (mac->opmode != NL80211_IFTYPE_STATION)
return;
- spin_lock(&rtlpriv->locks.ips_lock);
+ mutex_lock(&rtlpriv->locks.ps_mutex);
if (ppsc->inactiveps) {
rtstate = ppsc->rfpwr_state;
@@ -256,7 +257,7 @@ void rtl_ips_nic_on(struct ieee80211_hw *hw)
}
}
- spin_unlock(&rtlpriv->locks.ips_lock);
+ mutex_unlock(&rtlpriv->locks.ps_mutex);
}
/*for FW LPS*/
@@ -394,7 +395,7 @@ void rtl_lps_enter(struct ieee80211_hw *hw)
if (mac->link_state != MAC80211_LINKED)
return;
- spin_lock(&rtlpriv->locks.lps_lock);
+ mutex_lock(&rtlpriv->locks.ps_mutex);
/* Idle for a while if we connect to AP a while ago. */
if (mac->cnt_after_linked >= 2) {
@@ -406,7 +407,7 @@ void rtl_lps_enter(struct ieee80211_hw *hw)
}
}
- spin_unlock(&rtlpriv->locks.lps_lock);
+ mutex_unlock(&rtlpriv->locks.ps_mutex);
}
/*Leave the leisure power save mode.*/
@@ -416,7 +417,7 @@ void rtl_lps_leave(struct ieee80211_hw *hw)
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- spin_lock(&rtlpriv->locks.lps_lock);
+ mutex_lock(&rtlpriv->locks.ps_mutex);
if (ppsc->fwctrl_lps) {
if (ppsc->dot11_psmode != EACTIVE) {
@@ -437,7 +438,7 @@ void rtl_lps_leave(struct ieee80211_hw *hw)
rtl_lps_set_psmode(hw, EACTIVE);
}
}
- spin_unlock(&rtlpriv->locks.lps_lock);
+ mutex_unlock(&rtlpriv->locks.ps_mutex);
}
/* For sw LPS*/
@@ -538,9 +539,9 @@ void rtl_swlps_rf_awake(struct ieee80211_hw *hw)
RT_CLEAR_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM);
}
- spin_lock(&rtlpriv->locks.lps_lock);
+ mutex_lock(&rtlpriv->locks.ps_mutex);
rtl_ps_set_rf_state(hw, ERFON, RF_CHANGE_BY_PS);
- spin_unlock(&rtlpriv->locks.lps_lock);
+ mutex_unlock(&rtlpriv->locks.ps_mutex);
}
void rtl_swlps_rfon_wq_callback(void *data)
@@ -573,9 +574,9 @@ void rtl_swlps_rf_sleep(struct ieee80211_hw *hw)
if (rtlpriv->link_info.busytraffic)
return;
- spin_lock(&rtlpriv->locks.lps_lock);
+ mutex_lock(&rtlpriv->locks.ps_mutex);
rtl_ps_set_rf_state(hw, ERFSLEEP, RF_CHANGE_BY_PS);
- spin_unlock(&rtlpriv->locks.lps_lock);
+ mutex_unlock(&rtlpriv->locks.ps_mutex);
if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM &&
!RT_IN_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM)) {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
index a00774e7090..72a98cab6f6 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
@@ -27,6 +27,7 @@
*
*****************************************************************************/
+#include <linux/export.h>
#include "dm_common.h"
#include "phy_common.h"
#include "../pci.h"
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
index 49a064bdbce..931d97979b0 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
@@ -30,6 +30,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/firmware.h>
+#include <linux/export.h>
#include "../wifi.h"
#include "../pci.h"
#include "../base.h"
@@ -72,6 +73,34 @@ static void _rtl92c_enable_fw_download(struct ieee80211_hw *hw, bool enable)
}
}
+static void rtl_block_fw_writeN(struct ieee80211_hw *hw, const u8 *buffer,
+ u32 size)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 blockSize = REALTEK_USB_VENQT_MAX_BUF_SIZE - 20;
+ u8 *bufferPtr = (u8 *) buffer;
+ u32 i, offset, blockCount, remainSize;
+
+ blockCount = size / blockSize;
+ remainSize = size % blockSize;
+
+ for (i = 0; i < blockCount; i++) {
+ offset = i * blockSize;
+ rtlpriv->io.writeN_sync(rtlpriv,
+ (FW_8192C_START_ADDRESS + offset),
+ (void *)(bufferPtr + offset),
+ blockSize);
+ }
+
+ if (remainSize) {
+ offset = blockCount * blockSize;
+ rtlpriv->io.writeN_sync(rtlpriv,
+ (FW_8192C_START_ADDRESS + offset),
+ (void *)(bufferPtr + offset),
+ remainSize);
+ }
+}
+
static void _rtl92c_fw_block_write(struct ieee80211_hw *hw,
const u8 *buffer, u32 size)
{
@@ -80,23 +109,30 @@ static void _rtl92c_fw_block_write(struct ieee80211_hw *hw,
u8 *bufferPtr = (u8 *) buffer;
u32 *pu4BytePtr = (u32 *) buffer;
u32 i, offset, blockCount, remainSize;
+ u32 data;
+ if (rtlpriv->io.writeN_sync) {
+ rtl_block_fw_writeN(hw, buffer, size);
+ return;
+ }
blockCount = size / blockSize;
remainSize = size % blockSize;
+ if (remainSize) {
+ /* the last word is < 4 bytes - pad it with zeros */
+ for (i = 0; i < 4 - remainSize; i++)
+ *(bufferPtr + size + i) = 0;
+ blockCount++;
+ }
for (i = 0; i < blockCount; i++) {
offset = i * blockSize;
+ /* for big-endian platforms, the firmware data need to be byte
+ * swapped as it was read as a byte string and will be written
+ * as 32-bit dwords and byte swapped when written
+ */
+ data = le32_to_cpu(*(__le32 *)(pu4BytePtr + i));
rtl_write_dword(rtlpriv, (FW_8192C_START_ADDRESS + offset),
- *(pu4BytePtr + i));
- }
-
- if (remainSize) {
- offset = blockCount * blockSize;
- bufferPtr += offset;
- for (i = 0; i < remainSize; i++) {
- rtl_write_byte(rtlpriv, (FW_8192C_START_ADDRESS +
- offset + i), *(bufferPtr + i));
- }
+ data);
}
}
@@ -226,10 +262,10 @@ int rtl92c_download_fw(struct ieee80211_hw *hw)
u32 fwsize;
enum version_8192c version = rtlhal->version;
- pr_info("Loading firmware file %s\n", rtlpriv->cfg->fw_name);
if (!rtlhal->pfirmware)
return 1;
+ pr_info("Loading firmware file %s\n", rtlpriv->cfg->fw_name);
pfwheader = (struct rtl92c_firmware_header *)rtlhal->pfirmware;
pfwdata = (u8 *) rtlhal->pfirmware;
fwsize = rtlhal->fwsize;
@@ -237,8 +273,9 @@ int rtl92c_download_fw(struct ieee80211_hw *hw)
if (IS_FW_HEADER_EXIST(pfwheader)) {
RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
("Firmware Version(%d), Signature(%#x),Size(%d)\n",
- pfwheader->version, pfwheader->signature,
- (uint)sizeof(struct rtl92c_firmware_header)));
+ le16_to_cpu(pfwheader->version),
+ le16_to_cpu(pfwheader->signature),
+ (uint)sizeof(struct rtl92c_firmware_header)));
pfwdata = pfwdata + sizeof(struct rtl92c_firmware_header);
fwsize = fwsize - sizeof(struct rtl92c_firmware_header);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h
index 3d5823c1262..cec5a3a1cc5 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h
@@ -32,32 +32,32 @@
#define FW_8192C_SIZE 0x3000
#define FW_8192C_START_ADDRESS 0x1000
-#define FW_8192C_END_ADDRESS 0x3FFF
+#define FW_8192C_END_ADDRESS 0x1FFF
#define FW_8192C_PAGE_SIZE 4096
#define FW_8192C_POLLING_DELAY 5
#define FW_8192C_POLLING_TIMEOUT_COUNT 100
#define IS_FW_HEADER_EXIST(_pfwhdr) \
- ((_pfwhdr->signature&0xFFF0) == 0x92C0 ||\
- (_pfwhdr->signature&0xFFF0) == 0x88C0)
+ ((le16_to_cpu(_pfwhdr->signature)&0xFFF0) == 0x92C0 ||\
+ (le16_to_cpu(_pfwhdr->signature)&0xFFF0) == 0x88C0)
struct rtl92c_firmware_header {
- u16 signature;
+ __le16 signature;
u8 category;
u8 function;
- u16 version;
+ __le16 version;
u8 subversion;
u8 rsvd1;
u8 month;
u8 date;
u8 hour;
u8 minute;
- u16 ramcodeSize;
- u16 rsvd2;
- u32 svnindex;
- u32 rsvd3;
- u32 rsvd4;
- u32 rsvd5;
+ __le16 ramcodeSize;
+ __le16 rsvd2;
+ __le32 svnindex;
+ __le32 rsvd3;
+ __le32 rsvd4;
+ __le32 rsvd5;
};
enum rtl8192c_h2c_cmd {
@@ -94,5 +94,6 @@ void rtl92c_firmware_selfreset(struct ieee80211_hw *hw);
void rtl92c_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode);
void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished);
void rtl92c_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus);
+void usb_writeN_async(struct rtl_priv *rtlpriv, u32 addr, void *data, u16 len);
#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/main.c b/drivers/net/wireless/rtlwifi/rtl8192c/main.c
index 2f624fc2749..605ff191aeb 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/main.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/main.c
@@ -27,6 +27,7 @@
*
*****************************************************************************/
+#include <linux/module.h>
#include "../wifi.h"
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
index 3b11642d3f7..1f07558debf 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
@@ -27,6 +27,7 @@
*
*****************************************************************************/
+#include <linux/export.h>
#include "../wifi.h"
#include "../rtl8192ce/reg.h"
#include "../rtl8192ce/def.h"
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
index 592a10ac592..3b585aadabf 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
@@ -569,7 +569,7 @@ static bool _rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw,
}
case ERFSLEEP:{
if (ppsc->rfpwr_state == ERFOFF)
- break;
+ return false;
for (queue_id = 0, i = 0;
queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
ring = &pcipriv->dev.tx_ring[queue_id];
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
index a48404cc2b9..f2aa33dc4d7 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
@@ -28,6 +28,7 @@
*****************************************************************************/
#include <linux/vmalloc.h>
+#include <linux/module.h>
#include "../wifi.h"
#include "../core.h"
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
index 814c05df51e..4ed973a3aa1 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
@@ -498,7 +498,7 @@ static void _rtl92cu_read_adapter_info(struct ieee80211_hw *hw)
}
RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_LOUD, ("MAP\n"),
hwinfo, HWSET_MAX_SIZE);
- eeprom_id = *((u16 *)&hwinfo[0]);
+ eeprom_id = le16_to_cpu(*((__le16 *)&hwinfo[0]));
if (eeprom_id != RTL8190_EEPROM_ID) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
("EEPROM ID(%#x) is invalid!!\n", eeprom_id));
@@ -516,13 +516,14 @@ static void _rtl92cu_read_adapter_info(struct ieee80211_hw *hw)
pr_info("MAC address: %pM\n", rtlefuse->dev_addr);
_rtl92cu_read_txpower_info_from_hwpg(hw,
rtlefuse->autoload_failflag, hwinfo);
- rtlefuse->eeprom_vid = *(u16 *)&hwinfo[EEPROM_VID];
- rtlefuse->eeprom_did = *(u16 *)&hwinfo[EEPROM_DID];
+ rtlefuse->eeprom_vid = le16_to_cpu(*(__le16 *)&hwinfo[EEPROM_VID]);
+ rtlefuse->eeprom_did = le16_to_cpu(*(__le16 *)&hwinfo[EEPROM_DID]);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
(" VID = 0x%02x PID = 0x%02x\n",
rtlefuse->eeprom_vid, rtlefuse->eeprom_did));
rtlefuse->eeprom_channelplan = *(u8 *)&hwinfo[EEPROM_CHANNELPLAN];
- rtlefuse->eeprom_version = *(u16 *)&hwinfo[EEPROM_VERSION];
+ rtlefuse->eeprom_version =
+ le16_to_cpu(*(__le16 *)&hwinfo[EEPROM_VERSION]);
rtlefuse->txpwr_fromeprom = true;
rtlefuse->eeprom_oemid = *(u8 *)&hwinfo[EEPROM_CUSTOMER_ID];
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
index 060a06f4a88..9e0c8fcdf90 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
@@ -84,6 +84,7 @@ void rtl92c_read_chip_version(struct ieee80211_hw *hw)
}
}
rtlhal->version = (enum version_8192c)chip_version;
+ pr_info("rtl8192cu: Chip version 0x%x\n", chip_version);
switch (rtlhal->version) {
case VERSION_NORMAL_TSMC_CHIP_92C_1T2R:
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c b/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c
index 72852900df8..e49cf2244c7 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c
@@ -548,7 +548,7 @@ static bool _rtl92cu_phy_set_rf_power_state(struct ieee80211_hw *hw,
break;
case ERFSLEEP:
if (ppsc->rfpwr_state == ERFOFF)
- break;
+ return false;
for (queue_id = 0, i = 0;
queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
ring = &pcipriv->dev.tx_ring[queue_id];
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
index b9a158e5eb0..94a3e170615 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
@@ -42,6 +42,7 @@
#include "led.h"
#include "hw.h"
#include <linux/vmalloc.h>
+#include <linux/module.h>
MODULE_AUTHOR("Georgia <georgia@realtek.com>");
MODULE_AUTHOR("Ziv Huang <ziv_huang@realtek.com>");
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
index bc33b147f44..b3cc7b94999 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
@@ -491,7 +491,7 @@ static void _rtl_tx_desc_checksum(u8 *txdesc)
SET_TX_DESC_TX_DESC_CHECKSUM(txdesc, 0);
for (index = 0; index < 16; index++)
checksum = checksum ^ (*(ptr + index));
- SET_TX_DESC_TX_DESC_CHECKSUM(txdesc, checksum);
+ SET_TX_DESC_TX_DESC_CHECKSUM(txdesc, cpu_to_le16(checksum));
}
void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
index 3ac7af1c550..0883349e1c8 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
@@ -3374,7 +3374,7 @@ bool rtl92d_phy_set_rf_power_state(struct ieee80211_hw *hw,
break;
case ERFSLEEP:
if (ppsc->rfpwr_state == ERFOFF)
- break;
+ return false;
for (queue_id = 0, i = 0;
queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/sw.c b/drivers/net/wireless/rtlwifi/rtl8192de/sw.c
index 691f8009218..149493f4c25 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/sw.c
@@ -30,6 +30,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/vmalloc.h>
+#include <linux/module.h>
#include "../wifi.h"
#include "../core.h"
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/phy.c b/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
index f27171af979..f10ac1ad908 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
@@ -602,7 +602,7 @@ bool rtl92s_phy_set_rf_power_state(struct ieee80211_hw *hw,
}
case ERFSLEEP:
if (ppsc->rfpwr_state == ERFOFF)
- break;
+ return false;
for (queue_id = 0, i = 0;
queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
index 3ec9a0d41ba..92f49d522c5 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
@@ -30,6 +30,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/vmalloc.h>
+#include <linux/module.h>
#include "../wifi.h"
#include "../core.h"
diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
index b42c2e2b205..e956fa71d04 100644
--- a/drivers/net/wireless/rtlwifi/usb.c
+++ b/drivers/net/wireless/rtlwifi/usb.c
@@ -28,18 +28,20 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/usb.h>
+#include <linux/export.h>
#include "core.h"
#include "wifi.h"
#include "usb.h"
#include "base.h"
#include "ps.h"
+#include "rtl8192c/fw_common.h"
#define REALTEK_USB_VENQT_READ 0xC0
#define REALTEK_USB_VENQT_WRITE 0x40
#define REALTEK_USB_VENQT_CMD_REQ 0x05
#define REALTEK_USB_VENQT_CMD_IDX 0x00
-#define REALTEK_USB_VENQT_MAX_BUF_SIZE 254
+#define MAX_USBCTRL_VENDORREQ_TIMES 10
static void usbctrl_async_callback(struct urb *urb)
{
@@ -81,6 +83,7 @@ static int _usbctrl_vendorreq_async_write(struct usb_device *udev, u8 request,
dr->wValue = cpu_to_le16(value);
dr->wIndex = cpu_to_le16(index);
dr->wLength = cpu_to_le16(len);
+ /* data are already in little-endian order */
memcpy(buf, pdata, len);
usb_fill_control_urb(urb, udev, pipe,
(unsigned char *)dr, buf, len,
@@ -99,16 +102,28 @@ static int _usbctrl_vendorreq_sync_read(struct usb_device *udev, u8 request,
unsigned int pipe;
int status;
u8 reqtype;
+ int vendorreq_times = 0;
+ static int count;
pipe = usb_rcvctrlpipe(udev, 0); /* read_in */
reqtype = REALTEK_USB_VENQT_READ;
- status = usb_control_msg(udev, pipe, request, reqtype, value, index,
- pdata, len, 0); /* max. timeout */
+ do {
+ status = usb_control_msg(udev, pipe, request, reqtype, value,
+ index, pdata, len, 0); /*max. timeout*/
+ if (status < 0) {
+ /* firmware download is checksumed, don't retry */
+ if ((value >= FW_8192C_START_ADDRESS &&
+ value <= FW_8192C_END_ADDRESS))
+ break;
+ } else {
+ break;
+ }
+ } while (++vendorreq_times < MAX_USBCTRL_VENDORREQ_TIMES);
- if (status < 0)
+ if (status < 0 && count++ < 4)
pr_err("reg 0x%x, usbctrl_vendorreq TimeOut! status:0x%x value=0x%x\n",
- value, status, *(u32 *)pdata);
+ value, status, le32_to_cpu(*(u32 *)pdata));
return status;
}
@@ -128,7 +143,7 @@ static u32 _usb_read_sync(struct usb_device *udev, u32 addr, u16 len)
wvalue = (u16)addr;
_usbctrl_vendorreq_sync_read(udev, request, wvalue, index, data, len);
- ret = *data;
+ ret = le32_to_cpu(*data);
kfree(data);
return ret;
}
@@ -160,12 +175,12 @@ static void _usb_write_async(struct usb_device *udev, u32 addr, u32 val,
u8 request;
u16 wvalue;
u16 index;
- u32 data;
+ __le32 data;
request = REALTEK_USB_VENQT_CMD_REQ;
index = REALTEK_USB_VENQT_CMD_IDX; /* n/a */
wvalue = (u16)(addr&0x0000ffff);
- data = val;
+ data = cpu_to_le32(val);
_usbctrl_vendorreq_async_write(udev, request, wvalue, index, &data,
len);
}
@@ -191,6 +206,30 @@ static void _usb_write32_async(struct rtl_priv *rtlpriv, u32 addr, u32 val)
_usb_write_async(to_usb_device(dev), addr, val, 4);
}
+static void _usb_writeN_sync(struct rtl_priv *rtlpriv, u32 addr, void *data,
+ u16 len)
+{
+ struct device *dev = rtlpriv->io.dev;
+ struct usb_device *udev = to_usb_device(dev);
+ u8 request = REALTEK_USB_VENQT_CMD_REQ;
+ u8 reqtype = REALTEK_USB_VENQT_WRITE;
+ u16 wvalue;
+ u16 index = REALTEK_USB_VENQT_CMD_IDX;
+ int pipe = usb_sndctrlpipe(udev, 0); /* write_out */
+ u8 *buffer;
+ dma_addr_t dma_addr;
+
+ wvalue = (u16)(addr&0x0000ffff);
+ buffer = usb_alloc_coherent(udev, (size_t)len, GFP_ATOMIC, &dma_addr);
+ if (!buffer)
+ return;
+ memcpy(buffer, data, len);
+ usb_control_msg(udev, pipe, request, reqtype, wvalue,
+ index, buffer, len, 50);
+
+ usb_free_coherent(udev, (size_t)len, buffer, dma_addr);
+}
+
static void _rtl_usb_io_handler_init(struct device *dev,
struct ieee80211_hw *hw)
{
@@ -204,6 +243,7 @@ static void _rtl_usb_io_handler_init(struct device *dev,
rtlpriv->io.read8_sync = _usb_read8_sync;
rtlpriv->io.read16_sync = _usb_read16_sync;
rtlpriv->io.read32_sync = _usb_read32_sync;
+ rtlpriv->io.writeN_sync = _usb_writeN_sync;
}
static void _rtl_usb_io_handler_release(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
index 713c7ddba8e..085dccdbd1b 100644
--- a/drivers/net/wireless/rtlwifi/wifi.h
+++ b/drivers/net/wireless/rtlwifi/wifi.h
@@ -63,6 +63,7 @@
#define AC_MAX 4
#define QOS_QUEUE_NUM 4
#define RTL_MAC80211_NUM_QUEUE 5
+#define REALTEK_USB_VENQT_MAX_BUF_SIZE 254
#define QBSS_LOAD_SIZE 5
#define MAX_WMMELE_LENGTH 64
@@ -943,8 +944,10 @@ struct rtl_io {
unsigned long pci_base_addr; /*device I/O address */
void (*write8_async) (struct rtl_priv *rtlpriv, u32 addr, u8 val);
- void (*write16_async) (struct rtl_priv *rtlpriv, u32 addr, __le16 val);
- void (*write32_async) (struct rtl_priv *rtlpriv, u32 addr, __le32 val);
+ void (*write16_async) (struct rtl_priv *rtlpriv, u32 addr, u16 val);
+ void (*write32_async) (struct rtl_priv *rtlpriv, u32 addr, u32 val);
+ void (*writeN_sync) (struct rtl_priv *rtlpriv, u32 addr, void *buf,
+ u16 len);
u8(*read8_sync) (struct rtl_priv *rtlpriv, u32 addr);
u16(*read16_sync) (struct rtl_priv *rtlpriv, u32 addr);
@@ -1541,14 +1544,13 @@ struct rtl_hal_cfg {
struct rtl_locks {
/* mutex */
struct mutex conf_mutex;
+ struct mutex ps_mutex;
/*spin lock */
- spinlock_t ips_lock;
spinlock_t irq_th_lock;
spinlock_t h2c_lock;
spinlock_t rf_ps_lock;
spinlock_t rf_lock;
- spinlock_t lps_lock;
spinlock_t waitq_lock;
/*Dual mac*/
@@ -1573,7 +1575,8 @@ struct rtl_works {
/* For SW LPS */
struct delayed_work ps_work;
struct delayed_work ps_rfon_wq;
- struct tasklet_struct ips_leave_tasklet;
+
+ struct work_struct lps_leave_work;
};
struct rtl_debug {
diff --git a/drivers/net/wireless/wl1251/spi.c b/drivers/net/wireless/wl1251/spi.c
index eaa5f955620..6248c354fc5 100644
--- a/drivers/net/wireless/wl1251/spi.c
+++ b/drivers/net/wireless/wl1251/spi.c
@@ -319,7 +319,6 @@ static int __devexit wl1251_spi_remove(struct spi_device *spi)
static struct spi_driver wl1251_spi_driver = {
.driver = {
.name = DRIVER_NAME,
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
diff --git a/drivers/net/wireless/wl12xx/Kconfig b/drivers/net/wireless/wl12xx/Kconfig
index 3fe388b87c2..af08c8609c6 100644
--- a/drivers/net/wireless/wl12xx/Kconfig
+++ b/drivers/net/wireless/wl12xx/Kconfig
@@ -42,16 +42,6 @@ config WL12XX_SDIO
If you choose to build a module, it'll be called wl12xx_sdio.
Say N if unsure.
-config WL12XX_SDIO_TEST
- tristate "TI wl12xx SDIO testing support"
- depends on WL12XX && MMC && WL12XX_SDIO
- default n
- ---help---
- This module adds support for the SDIO bus testing with the
- TI wl12xx chipsets. You probably don't want this unless you are
- testing a new hardware platform. Select this if you want to test the
- SDIO bus which is connected to the wl12xx chip.
-
config WL12XX_PLATFORM_DATA
bool
depends on WL12XX_SDIO != n || WL1251_SDIO != n
diff --git a/drivers/net/wireless/wl12xx/Makefile b/drivers/net/wireless/wl12xx/Makefile
index 621b3483ca2..fe67262ba19 100644
--- a/drivers/net/wireless/wl12xx/Makefile
+++ b/drivers/net/wireless/wl12xx/Makefile
@@ -3,14 +3,11 @@ wl12xx-objs = main.o cmd.o io.o event.o tx.o rx.o ps.o acx.o \
wl12xx_spi-objs = spi.o
wl12xx_sdio-objs = sdio.o
-wl12xx_sdio_test-objs = sdio_test.o
wl12xx-$(CONFIG_NL80211_TESTMODE) += testmode.o
obj-$(CONFIG_WL12XX) += wl12xx.o
obj-$(CONFIG_WL12XX_SPI) += wl12xx_spi.o
obj-$(CONFIG_WL12XX_SDIO) += wl12xx_sdio.o
-obj-$(CONFIG_WL12XX_SDIO_TEST) += wl12xx_sdio_test.o
-
# small builtin driver bit
obj-$(CONFIG_WL12XX_PLATFORM_DATA) += wl12xx_platform_data.o
diff --git a/drivers/net/wireless/wl12xx/acx.c b/drivers/net/wireless/wl12xx/acx.c
index ca044a74319..7537c401a44 100644
--- a/drivers/net/wireless/wl12xx/acx.c
+++ b/drivers/net/wireless/wl12xx/acx.c
@@ -29,11 +29,12 @@
#include <linux/slab.h>
#include "wl12xx.h"
+#include "debug.h"
#include "wl12xx_80211.h"
#include "reg.h"
#include "ps.h"
-int wl1271_acx_wake_up_conditions(struct wl1271 *wl)
+int wl1271_acx_wake_up_conditions(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
struct acx_wake_up_condition *wake_up;
int ret;
@@ -46,7 +47,7 @@ int wl1271_acx_wake_up_conditions(struct wl1271 *wl)
goto out;
}
- wake_up->role_id = wl->role_id;
+ wake_up->role_id = wlvif->role_id;
wake_up->wake_up_event = wl->conf.conn.wake_up_event;
wake_up->listen_interval = wl->conf.conn.listen_interval;
@@ -84,7 +85,8 @@ out:
return ret;
}
-int wl1271_acx_tx_power(struct wl1271 *wl, int power)
+int wl1271_acx_tx_power(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ int power)
{
struct acx_current_tx_power *acx;
int ret;
@@ -100,7 +102,7 @@ int wl1271_acx_tx_power(struct wl1271 *wl, int power)
goto out;
}
- acx->role_id = wl->role_id;
+ acx->role_id = wlvif->role_id;
acx->current_tx_power = power * 10;
ret = wl1271_cmd_configure(wl, DOT11_CUR_TX_PWR, acx, sizeof(*acx));
@@ -114,7 +116,7 @@ out:
return ret;
}
-int wl1271_acx_feature_cfg(struct wl1271 *wl)
+int wl1271_acx_feature_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
struct acx_feature_config *feature;
int ret;
@@ -128,7 +130,7 @@ int wl1271_acx_feature_cfg(struct wl1271 *wl)
}
/* DF_ENCRYPTION_DISABLE and DF_SNIFF_MODE_ENABLE are disabled */
- feature->role_id = wl->role_id;
+ feature->role_id = wlvif->role_id;
feature->data_flow_options = 0;
feature->options = 0;
@@ -184,33 +186,8 @@ out:
return ret;
}
-int wl1271_acx_pd_threshold(struct wl1271 *wl)
-{
- struct acx_packet_detection *pd;
- int ret;
-
- wl1271_debug(DEBUG_ACX, "acx data pd threshold");
-
- pd = kzalloc(sizeof(*pd), GFP_KERNEL);
- if (!pd) {
- ret = -ENOMEM;
- goto out;
- }
-
- pd->threshold = cpu_to_le32(wl->conf.rx.packet_detection_threshold);
-
- ret = wl1271_cmd_configure(wl, ACX_PD_THRESHOLD, pd, sizeof(*pd));
- if (ret < 0) {
- wl1271_warning("failed to set pd threshold: %d", ret);
- goto out;
- }
-
-out:
- kfree(pd);
- return ret;
-}
-
-int wl1271_acx_slot(struct wl1271 *wl, enum acx_slot_type slot_time)
+int wl1271_acx_slot(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ enum acx_slot_type slot_time)
{
struct acx_slot *slot;
int ret;
@@ -223,7 +200,7 @@ int wl1271_acx_slot(struct wl1271 *wl, enum acx_slot_type slot_time)
goto out;
}
- slot->role_id = wl->role_id;
+ slot->role_id = wlvif->role_id;
slot->wone_index = STATION_WONE_INDEX;
slot->slot_time = slot_time;
@@ -238,8 +215,8 @@ out:
return ret;
}
-int wl1271_acx_group_address_tbl(struct wl1271 *wl, bool enable,
- void *mc_list, u32 mc_list_len)
+int wl1271_acx_group_address_tbl(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ bool enable, void *mc_list, u32 mc_list_len)
{
struct acx_dot11_grp_addr_tbl *acx;
int ret;
@@ -253,7 +230,7 @@ int wl1271_acx_group_address_tbl(struct wl1271 *wl, bool enable,
}
/* MAC filtering */
- acx->role_id = wl->role_id;
+ acx->role_id = wlvif->role_id;
acx->enabled = enable;
acx->num_groups = mc_list_len;
memcpy(acx->mac_table, mc_list, mc_list_len * ETH_ALEN);
@@ -270,7 +247,8 @@ out:
return ret;
}
-int wl1271_acx_service_period_timeout(struct wl1271 *wl)
+int wl1271_acx_service_period_timeout(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif)
{
struct acx_rx_timeout *rx_timeout;
int ret;
@@ -283,7 +261,7 @@ int wl1271_acx_service_period_timeout(struct wl1271 *wl)
wl1271_debug(DEBUG_ACX, "acx service period timeout");
- rx_timeout->role_id = wl->role_id;
+ rx_timeout->role_id = wlvif->role_id;
rx_timeout->ps_poll_timeout = cpu_to_le16(wl->conf.rx.ps_poll_timeout);
rx_timeout->upsd_timeout = cpu_to_le16(wl->conf.rx.upsd_timeout);
@@ -300,7 +278,8 @@ out:
return ret;
}
-int wl1271_acx_rts_threshold(struct wl1271 *wl, u32 rts_threshold)
+int wl1271_acx_rts_threshold(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u32 rts_threshold)
{
struct acx_rts_threshold *rts;
int ret;
@@ -320,7 +299,7 @@ int wl1271_acx_rts_threshold(struct wl1271 *wl, u32 rts_threshold)
goto out;
}
- rts->role_id = wl->role_id;
+ rts->role_id = wlvif->role_id;
rts->threshold = cpu_to_le16((u16)rts_threshold);
ret = wl1271_cmd_configure(wl, DOT11_RTS_THRESHOLD, rts, sizeof(*rts));
@@ -363,7 +342,8 @@ out:
return ret;
}
-int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, bool enable_filter)
+int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ bool enable_filter)
{
struct acx_beacon_filter_option *beacon_filter = NULL;
int ret = 0;
@@ -380,7 +360,7 @@ int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, bool enable_filter)
goto out;
}
- beacon_filter->role_id = wl->role_id;
+ beacon_filter->role_id = wlvif->role_id;
beacon_filter->enable = enable_filter;
/*
@@ -401,7 +381,8 @@ out:
return ret;
}
-int wl1271_acx_beacon_filter_table(struct wl1271 *wl)
+int wl1271_acx_beacon_filter_table(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif)
{
struct acx_beacon_filter_ie_table *ie_table;
int i, idx = 0;
@@ -417,7 +398,7 @@ int wl1271_acx_beacon_filter_table(struct wl1271 *wl)
}
/* configure default beacon pass-through rules */
- ie_table->role_id = wl->role_id;
+ ie_table->role_id = wlvif->role_id;
ie_table->num_ie = 0;
for (i = 0; i < wl->conf.conn.bcn_filt_ie_count; i++) {
struct conf_bcn_filt_rule *r = &(wl->conf.conn.bcn_filt_ie[i]);
@@ -458,7 +439,8 @@ out:
#define ACX_CONN_MONIT_DISABLE_VALUE 0xffffffff
-int wl1271_acx_conn_monit_params(struct wl1271 *wl, bool enable)
+int wl1271_acx_conn_monit_params(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ bool enable)
{
struct acx_conn_monit_params *acx;
u32 threshold = ACX_CONN_MONIT_DISABLE_VALUE;
@@ -479,7 +461,7 @@ int wl1271_acx_conn_monit_params(struct wl1271 *wl, bool enable)
timeout = wl->conf.conn.bss_lose_timeout;
}
- acx->role_id = wl->role_id;
+ acx->role_id = wlvif->role_id;
acx->synch_fail_thold = cpu_to_le32(threshold);
acx->bss_lose_timeout = cpu_to_le32(timeout);
@@ -582,7 +564,7 @@ out:
return ret;
}
-int wl1271_acx_bcn_dtim_options(struct wl1271 *wl)
+int wl1271_acx_bcn_dtim_options(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
struct acx_beacon_broadcast *bb;
int ret;
@@ -595,7 +577,7 @@ int wl1271_acx_bcn_dtim_options(struct wl1271 *wl)
goto out;
}
- bb->role_id = wl->role_id;
+ bb->role_id = wlvif->role_id;
bb->beacon_rx_timeout = cpu_to_le16(wl->conf.conn.beacon_rx_timeout);
bb->broadcast_timeout = cpu_to_le16(wl->conf.conn.broadcast_timeout);
bb->rx_broadcast_in_ps = wl->conf.conn.rx_broadcast_in_ps;
@@ -612,7 +594,7 @@ out:
return ret;
}
-int wl1271_acx_aid(struct wl1271 *wl, u16 aid)
+int wl1271_acx_aid(struct wl1271 *wl, struct wl12xx_vif *wlvif, u16 aid)
{
struct acx_aid *acx_aid;
int ret;
@@ -625,7 +607,7 @@ int wl1271_acx_aid(struct wl1271 *wl, u16 aid)
goto out;
}
- acx_aid->role_id = wl->role_id;
+ acx_aid->role_id = wlvif->role_id;
acx_aid->aid = cpu_to_le16(aid);
ret = wl1271_cmd_configure(wl, ACX_AID, acx_aid, sizeof(*acx_aid));
@@ -668,7 +650,8 @@ out:
return ret;
}
-int wl1271_acx_set_preamble(struct wl1271 *wl, enum acx_preamble_type preamble)
+int wl1271_acx_set_preamble(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ enum acx_preamble_type preamble)
{
struct acx_preamble *acx;
int ret;
@@ -681,7 +664,7 @@ int wl1271_acx_set_preamble(struct wl1271 *wl, enum acx_preamble_type preamble)
goto out;
}
- acx->role_id = wl->role_id;
+ acx->role_id = wlvif->role_id;
acx->preamble = preamble;
ret = wl1271_cmd_configure(wl, ACX_PREAMBLE_TYPE, acx, sizeof(*acx));
@@ -695,7 +678,7 @@ out:
return ret;
}
-int wl1271_acx_cts_protect(struct wl1271 *wl,
+int wl1271_acx_cts_protect(struct wl1271 *wl, struct wl12xx_vif *wlvif,
enum acx_ctsprotect_type ctsprotect)
{
struct acx_ctsprotect *acx;
@@ -709,7 +692,7 @@ int wl1271_acx_cts_protect(struct wl1271 *wl,
goto out;
}
- acx->role_id = wl->role_id;
+ acx->role_id = wlvif->role_id;
acx->ctsprotect = ctsprotect;
ret = wl1271_cmd_configure(wl, ACX_CTS_PROTECTION, acx, sizeof(*acx));
@@ -739,7 +722,7 @@ int wl1271_acx_statistics(struct wl1271 *wl, struct acx_statistics *stats)
return 0;
}
-int wl1271_acx_sta_rate_policies(struct wl1271 *wl)
+int wl1271_acx_sta_rate_policies(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
struct acx_rate_policy *acx;
struct conf_tx_rate_class *c = &wl->conf.tx.sta_rc_conf;
@@ -755,11 +738,11 @@ int wl1271_acx_sta_rate_policies(struct wl1271 *wl)
}
wl1271_debug(DEBUG_ACX, "basic_rate: 0x%x, full_rate: 0x%x",
- wl->basic_rate, wl->rate_set);
+ wlvif->basic_rate, wlvif->rate_set);
/* configure one basic rate class */
- acx->rate_policy_idx = cpu_to_le32(ACX_TX_BASIC_RATE);
- acx->rate_policy.enabled_rates = cpu_to_le32(wl->basic_rate);
+ acx->rate_policy_idx = cpu_to_le32(wlvif->sta.basic_rate_idx);
+ acx->rate_policy.enabled_rates = cpu_to_le32(wlvif->basic_rate);
acx->rate_policy.short_retry_limit = c->short_retry_limit;
acx->rate_policy.long_retry_limit = c->long_retry_limit;
acx->rate_policy.aflags = c->aflags;
@@ -771,8 +754,8 @@ int wl1271_acx_sta_rate_policies(struct wl1271 *wl)
}
/* configure one AP supported rate class */
- acx->rate_policy_idx = cpu_to_le32(ACX_TX_AP_FULL_RATE);
- acx->rate_policy.enabled_rates = cpu_to_le32(wl->rate_set);
+ acx->rate_policy_idx = cpu_to_le32(wlvif->sta.ap_rate_idx);
+ acx->rate_policy.enabled_rates = cpu_to_le32(wlvif->rate_set);
acx->rate_policy.short_retry_limit = c->short_retry_limit;
acx->rate_policy.long_retry_limit = c->long_retry_limit;
acx->rate_policy.aflags = c->aflags;
@@ -788,7 +771,7 @@ int wl1271_acx_sta_rate_policies(struct wl1271 *wl)
* (p2p packets should always go out with OFDM rates, even
* if we are currently connected to 11b AP)
*/
- acx->rate_policy_idx = cpu_to_le32(ACX_TX_BASIC_RATE_P2P);
+ acx->rate_policy_idx = cpu_to_le32(wlvif->sta.p2p_rate_idx);
acx->rate_policy.enabled_rates =
cpu_to_le32(CONF_TX_RATE_MASK_BASIC_P2P);
acx->rate_policy.short_retry_limit = c->short_retry_limit;
@@ -839,8 +822,8 @@ out:
return ret;
}
-int wl1271_acx_ac_cfg(struct wl1271 *wl, u8 ac, u8 cw_min, u16 cw_max,
- u8 aifsn, u16 txop)
+int wl1271_acx_ac_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u8 ac, u8 cw_min, u16 cw_max, u8 aifsn, u16 txop)
{
struct acx_ac_cfg *acx;
int ret = 0;
@@ -855,7 +838,7 @@ int wl1271_acx_ac_cfg(struct wl1271 *wl, u8 ac, u8 cw_min, u16 cw_max,
goto out;
}
- acx->role_id = wl->role_id;
+ acx->role_id = wlvif->role_id;
acx->ac = ac;
acx->cw_min = cw_min;
acx->cw_max = cpu_to_le16(cw_max);
@@ -873,7 +856,8 @@ out:
return ret;
}
-int wl1271_acx_tid_cfg(struct wl1271 *wl, u8 queue_id, u8 channel_type,
+int wl1271_acx_tid_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u8 queue_id, u8 channel_type,
u8 tsid, u8 ps_scheme, u8 ack_policy,
u32 apsd_conf0, u32 apsd_conf1)
{
@@ -889,7 +873,7 @@ int wl1271_acx_tid_cfg(struct wl1271 *wl, u8 queue_id, u8 channel_type,
goto out;
}
- acx->role_id = wl->role_id;
+ acx->role_id = wlvif->role_id;
acx->queue_id = queue_id;
acx->channel_type = channel_type;
acx->tsid = tsid;
@@ -1098,7 +1082,8 @@ out:
return ret;
}
-int wl1271_acx_bet_enable(struct wl1271 *wl, bool enable)
+int wl1271_acx_bet_enable(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ bool enable)
{
struct wl1271_acx_bet_enable *acx = NULL;
int ret = 0;
@@ -1114,7 +1099,7 @@ int wl1271_acx_bet_enable(struct wl1271 *wl, bool enable)
goto out;
}
- acx->role_id = wl->role_id;
+ acx->role_id = wlvif->role_id;
acx->enable = enable ? CONF_BET_MODE_ENABLE : CONF_BET_MODE_DISABLE;
acx->max_consecutive = wl->conf.conn.bet_max_consecutive;
@@ -1129,7 +1114,8 @@ out:
return ret;
}
-int wl1271_acx_arp_ip_filter(struct wl1271 *wl, u8 enable, __be32 address)
+int wl1271_acx_arp_ip_filter(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u8 enable, __be32 address)
{
struct wl1271_acx_arp_filter *acx;
int ret;
@@ -1142,7 +1128,7 @@ int wl1271_acx_arp_ip_filter(struct wl1271 *wl, u8 enable, __be32 address)
goto out;
}
- acx->role_id = wl->role_id;
+ acx->role_id = wlvif->role_id;
acx->version = ACX_IPV4_VERSION;
acx->enable = enable;
@@ -1189,7 +1175,8 @@ out:
return ret;
}
-int wl1271_acx_keep_alive_mode(struct wl1271 *wl, bool enable)
+int wl1271_acx_keep_alive_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ bool enable)
{
struct wl1271_acx_keep_alive_mode *acx = NULL;
int ret = 0;
@@ -1202,7 +1189,7 @@ int wl1271_acx_keep_alive_mode(struct wl1271 *wl, bool enable)
goto out;
}
- acx->role_id = wl->role_id;
+ acx->role_id = wlvif->role_id;
acx->enabled = enable;
ret = wl1271_cmd_configure(wl, ACX_KEEP_ALIVE_MODE, acx, sizeof(*acx));
@@ -1216,7 +1203,8 @@ out:
return ret;
}
-int wl1271_acx_keep_alive_config(struct wl1271 *wl, u8 index, u8 tpl_valid)
+int wl1271_acx_keep_alive_config(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u8 index, u8 tpl_valid)
{
struct wl1271_acx_keep_alive_config *acx = NULL;
int ret = 0;
@@ -1229,7 +1217,7 @@ int wl1271_acx_keep_alive_config(struct wl1271 *wl, u8 index, u8 tpl_valid)
goto out;
}
- acx->role_id = wl->role_id;
+ acx->role_id = wlvif->role_id;
acx->period = cpu_to_le32(wl->conf.conn.keep_alive_interval);
acx->index = index;
acx->tpl_validation = tpl_valid;
@@ -1247,8 +1235,8 @@ out:
return ret;
}
-int wl1271_acx_rssi_snr_trigger(struct wl1271 *wl, bool enable,
- s16 thold, u8 hyst)
+int wl1271_acx_rssi_snr_trigger(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ bool enable, s16 thold, u8 hyst)
{
struct wl1271_acx_rssi_snr_trigger *acx = NULL;
int ret = 0;
@@ -1261,9 +1249,9 @@ int wl1271_acx_rssi_snr_trigger(struct wl1271 *wl, bool enable,
goto out;
}
- wl->last_rssi_event = -1;
+ wlvif->last_rssi_event = -1;
- acx->role_id = wl->role_id;
+ acx->role_id = wlvif->role_id;
acx->pacing = cpu_to_le16(wl->conf.roam_trigger.trigger_pacing);
acx->metric = WL1271_ACX_TRIG_METRIC_RSSI_BEACON;
acx->type = WL1271_ACX_TRIG_TYPE_EDGE;
@@ -1288,7 +1276,8 @@ out:
return ret;
}
-int wl1271_acx_rssi_snr_avg_weights(struct wl1271 *wl)
+int wl1271_acx_rssi_snr_avg_weights(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif)
{
struct wl1271_acx_rssi_snr_avg_weights *acx = NULL;
struct conf_roam_trigger_settings *c = &wl->conf.roam_trigger;
@@ -1302,7 +1291,7 @@ int wl1271_acx_rssi_snr_avg_weights(struct wl1271 *wl)
goto out;
}
- acx->role_id = wl->role_id;
+ acx->role_id = wlvif->role_id;
acx->rssi_beacon = c->avg_weight_rssi_beacon;
acx->rssi_data = c->avg_weight_rssi_data;
acx->snr_beacon = c->avg_weight_snr_beacon;
@@ -1367,6 +1356,7 @@ out:
}
int wl1271_acx_set_ht_information(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif,
u16 ht_operation_mode)
{
struct wl1271_acx_ht_information *acx;
@@ -1380,7 +1370,7 @@ int wl1271_acx_set_ht_information(struct wl1271 *wl,
goto out;
}
- acx->role_id = wl->role_id;
+ acx->role_id = wlvif->role_id;
acx->ht_protection =
(u8)(ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION);
acx->rifs_mode = 0;
@@ -1402,7 +1392,8 @@ out:
}
/* Configure BA session initiator/receiver parameters setting in the FW. */
-int wl12xx_acx_set_ba_initiator_policy(struct wl1271 *wl)
+int wl12xx_acx_set_ba_initiator_policy(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif)
{
struct wl1271_acx_ba_initiator_policy *acx;
int ret;
@@ -1416,7 +1407,7 @@ int wl12xx_acx_set_ba_initiator_policy(struct wl1271 *wl)
}
/* set for the current role */
- acx->role_id = wl->role_id;
+ acx->role_id = wlvif->role_id;
acx->tid_bitmap = wl->conf.ht.tx_ba_tid_bitmap;
acx->win_size = wl->conf.ht.tx_ba_win_size;
acx->inactivity_timeout = wl->conf.ht.inactivity_timeout;
@@ -1494,7 +1485,8 @@ out:
return ret;
}
-int wl1271_acx_ps_rx_streaming(struct wl1271 *wl, bool enable)
+int wl1271_acx_ps_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ bool enable)
{
struct wl1271_acx_ps_rx_streaming *rx_streaming;
u32 conf_queues, enable_queues;
@@ -1523,7 +1515,7 @@ int wl1271_acx_ps_rx_streaming(struct wl1271 *wl, bool enable)
if (!(conf_queues & BIT(i)))
continue;
- rx_streaming->role_id = wl->role_id;
+ rx_streaming->role_id = wlvif->role_id;
rx_streaming->tid = i;
rx_streaming->enable = enable_queues & BIT(i);
rx_streaming->period = wl->conf.rx_streaming.interval;
@@ -1542,7 +1534,7 @@ out:
return ret;
}
-int wl1271_acx_ap_max_tx_retry(struct wl1271 *wl)
+int wl1271_acx_ap_max_tx_retry(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
struct wl1271_acx_ap_max_tx_retry *acx = NULL;
int ret;
@@ -1553,7 +1545,7 @@ int wl1271_acx_ap_max_tx_retry(struct wl1271 *wl)
if (!acx)
return -ENOMEM;
- acx->role_id = wl->role_id;
+ acx->role_id = wlvif->role_id;
acx->max_tx_retry = cpu_to_le16(wl->conf.tx.max_tx_retries);
ret = wl1271_cmd_configure(wl, ACX_MAX_TX_FAILURE, acx, sizeof(*acx));
@@ -1567,7 +1559,7 @@ out:
return ret;
}
-int wl1271_acx_config_ps(struct wl1271 *wl)
+int wl12xx_acx_config_ps(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
struct wl1271_acx_config_ps *config_ps;
int ret;
@@ -1582,7 +1574,7 @@ int wl1271_acx_config_ps(struct wl1271 *wl)
config_ps->exit_retries = wl->conf.conn.psm_exit_retries;
config_ps->enter_retries = wl->conf.conn.psm_entry_retries;
- config_ps->null_data_rate = cpu_to_le32(wl->basic_rate);
+ config_ps->null_data_rate = cpu_to_le32(wlvif->basic_rate);
ret = wl1271_cmd_configure(wl, ACX_CONFIG_PS, config_ps,
sizeof(*config_ps));
diff --git a/drivers/net/wireless/wl12xx/acx.h b/drivers/net/wireless/wl12xx/acx.h
index e3f93b4b342..69892b40c2d 100644
--- a/drivers/net/wireless/wl12xx/acx.h
+++ b/drivers/net/wireless/wl12xx/acx.h
@@ -171,13 +171,6 @@ struct acx_rx_msdu_lifetime {
__le32 lifetime;
} __packed;
-struct acx_packet_detection {
- struct acx_header header;
-
- __le32 threshold;
-} __packed;
-
-
enum acx_slot_type {
SLOT_TIME_LONG = 0,
SLOT_TIME_SHORT = 1,
@@ -654,11 +647,6 @@ struct acx_rate_class {
u8 reserved;
};
-#define ACX_TX_BASIC_RATE 0
-#define ACX_TX_AP_FULL_RATE 1
-#define ACX_TX_BASIC_RATE_P2P 2
-#define ACX_TX_AP_MODE_MGMT_RATE 4
-#define ACX_TX_AP_MODE_BCST_RATE 5
struct acx_rate_policy {
struct acx_header header;
@@ -1234,39 +1222,48 @@ enum {
};
-int wl1271_acx_wake_up_conditions(struct wl1271 *wl);
+int wl1271_acx_wake_up_conditions(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif);
int wl1271_acx_sleep_auth(struct wl1271 *wl, u8 sleep_auth);
-int wl1271_acx_tx_power(struct wl1271 *wl, int power);
-int wl1271_acx_feature_cfg(struct wl1271 *wl);
+int wl1271_acx_tx_power(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ int power);
+int wl1271_acx_feature_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif);
int wl1271_acx_mem_map(struct wl1271 *wl,
struct acx_header *mem_map, size_t len);
int wl1271_acx_rx_msdu_life_time(struct wl1271 *wl);
-int wl1271_acx_pd_threshold(struct wl1271 *wl);
-int wl1271_acx_slot(struct wl1271 *wl, enum acx_slot_type slot_time);
-int wl1271_acx_group_address_tbl(struct wl1271 *wl, bool enable,
- void *mc_list, u32 mc_list_len);
-int wl1271_acx_service_period_timeout(struct wl1271 *wl);
-int wl1271_acx_rts_threshold(struct wl1271 *wl, u32 rts_threshold);
+int wl1271_acx_slot(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ enum acx_slot_type slot_time);
+int wl1271_acx_group_address_tbl(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ bool enable, void *mc_list, u32 mc_list_len);
+int wl1271_acx_service_period_timeout(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif);
+int wl1271_acx_rts_threshold(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u32 rts_threshold);
int wl1271_acx_dco_itrim_params(struct wl1271 *wl);
-int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, bool enable_filter);
-int wl1271_acx_beacon_filter_table(struct wl1271 *wl);
-int wl1271_acx_conn_monit_params(struct wl1271 *wl, bool enable);
+int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ bool enable_filter);
+int wl1271_acx_beacon_filter_table(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif);
+int wl1271_acx_conn_monit_params(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ bool enable);
int wl1271_acx_sg_enable(struct wl1271 *wl, bool enable);
int wl12xx_acx_sg_cfg(struct wl1271 *wl);
int wl1271_acx_cca_threshold(struct wl1271 *wl);
-int wl1271_acx_bcn_dtim_options(struct wl1271 *wl);
-int wl1271_acx_aid(struct wl1271 *wl, u16 aid);
+int wl1271_acx_bcn_dtim_options(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+int wl1271_acx_aid(struct wl1271 *wl, struct wl12xx_vif *wlvif, u16 aid);
int wl1271_acx_event_mbox_mask(struct wl1271 *wl, u32 event_mask);
-int wl1271_acx_set_preamble(struct wl1271 *wl, enum acx_preamble_type preamble);
-int wl1271_acx_cts_protect(struct wl1271 *wl,
+int wl1271_acx_set_preamble(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ enum acx_preamble_type preamble);
+int wl1271_acx_cts_protect(struct wl1271 *wl, struct wl12xx_vif *wlvif,
enum acx_ctsprotect_type ctsprotect);
int wl1271_acx_statistics(struct wl1271 *wl, struct acx_statistics *stats);
-int wl1271_acx_sta_rate_policies(struct wl1271 *wl);
+int wl1271_acx_sta_rate_policies(struct wl1271 *wl, struct wl12xx_vif *wlvif);
int wl1271_acx_ap_rate_policy(struct wl1271 *wl, struct conf_tx_rate_class *c,
u8 idx);
-int wl1271_acx_ac_cfg(struct wl1271 *wl, u8 ac, u8 cw_min, u16 cw_max,
- u8 aifsn, u16 txop);
-int wl1271_acx_tid_cfg(struct wl1271 *wl, u8 queue_id, u8 channel_type,
+int wl1271_acx_ac_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u8 ac, u8 cw_min, u16 cw_max, u8 aifsn, u16 txop);
+int wl1271_acx_tid_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u8 queue_id, u8 channel_type,
u8 tsid, u8 ps_scheme, u8 ack_policy,
u32 apsd_conf0, u32 apsd_conf1);
int wl1271_acx_frag_threshold(struct wl1271 *wl, u32 frag_threshold);
@@ -1276,26 +1273,34 @@ int wl1271_acx_init_mem_config(struct wl1271 *wl);
int wl1271_acx_host_if_cfg_bitmap(struct wl1271 *wl, u32 host_cfg_bitmap);
int wl1271_acx_init_rx_interrupt(struct wl1271 *wl);
int wl1271_acx_smart_reflex(struct wl1271 *wl);
-int wl1271_acx_bet_enable(struct wl1271 *wl, bool enable);
-int wl1271_acx_arp_ip_filter(struct wl1271 *wl, u8 enable, __be32 address);
+int wl1271_acx_bet_enable(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ bool enable);
+int wl1271_acx_arp_ip_filter(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u8 enable, __be32 address);
int wl1271_acx_pm_config(struct wl1271 *wl);
-int wl1271_acx_keep_alive_mode(struct wl1271 *wl, bool enable);
-int wl1271_acx_keep_alive_config(struct wl1271 *wl, u8 index, u8 tpl_valid);
-int wl1271_acx_rssi_snr_trigger(struct wl1271 *wl, bool enable,
- s16 thold, u8 hyst);
-int wl1271_acx_rssi_snr_avg_weights(struct wl1271 *wl);
+int wl1271_acx_keep_alive_mode(struct wl1271 *wl, struct wl12xx_vif *vif,
+ bool enable);
+int wl1271_acx_keep_alive_config(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u8 index, u8 tpl_valid);
+int wl1271_acx_rssi_snr_trigger(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ bool enable, s16 thold, u8 hyst);
+int wl1271_acx_rssi_snr_avg_weights(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif);
int wl1271_acx_set_ht_capabilities(struct wl1271 *wl,
struct ieee80211_sta_ht_cap *ht_cap,
bool allow_ht_operation, u8 hlid);
int wl1271_acx_set_ht_information(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif,
u16 ht_operation_mode);
-int wl12xx_acx_set_ba_initiator_policy(struct wl1271 *wl);
+int wl12xx_acx_set_ba_initiator_policy(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif);
int wl12xx_acx_set_ba_receiver_session(struct wl1271 *wl, u8 tid_index,
u16 ssn, bool enable, u8 peer_hlid);
int wl1271_acx_tsf_info(struct wl1271 *wl, u64 *mactime);
-int wl1271_acx_ps_rx_streaming(struct wl1271 *wl, bool enable);
-int wl1271_acx_ap_max_tx_retry(struct wl1271 *wl);
-int wl1271_acx_config_ps(struct wl1271 *wl);
+int wl1271_acx_ps_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ bool enable);
+int wl1271_acx_ap_max_tx_retry(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+int wl12xx_acx_config_ps(struct wl1271 *wl, struct wl12xx_vif *wlvif);
int wl1271_acx_set_inconnection_sta(struct wl1271 *wl, u8 *addr);
int wl1271_acx_fm_coex(struct wl1271 *wl);
int wl12xx_acx_set_rate_mgmt_params(struct wl1271 *wl);
diff --git a/drivers/net/wireless/wl12xx/boot.c b/drivers/net/wireless/wl12xx/boot.c
index d4e628db76b..8f9cf5a816e 100644
--- a/drivers/net/wireless/wl12xx/boot.c
+++ b/drivers/net/wireless/wl12xx/boot.c
@@ -23,7 +23,9 @@
#include <linux/slab.h>
#include <linux/wl12xx.h>
+#include <linux/export.h>
+#include "debug.h"
#include "acx.h"
#include "reg.h"
#include "boot.h"
@@ -346,6 +348,9 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
nvs_ptr += 3;
for (i = 0; i < burst_len; i++) {
+ if (nvs_ptr + 3 >= (u8 *) wl->nvs + nvs_len)
+ goto out_badnvs;
+
val = (nvs_ptr[0] | (nvs_ptr[1] << 8)
| (nvs_ptr[2] << 16) | (nvs_ptr[3] << 24));
@@ -357,6 +362,9 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
nvs_ptr += 4;
dest_addr += 4;
}
+
+ if (nvs_ptr >= (u8 *) wl->nvs + nvs_len)
+ goto out_badnvs;
}
/*
@@ -368,6 +376,10 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
*/
nvs_ptr = (u8 *)wl->nvs +
ALIGN(nvs_ptr - (u8 *)wl->nvs + 7, 4);
+
+ if (nvs_ptr >= (u8 *) wl->nvs + nvs_len)
+ goto out_badnvs;
+
nvs_len -= nvs_ptr - (u8 *)wl->nvs;
/* Now we must set the partition correctly */
@@ -383,6 +395,10 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
kfree(nvs_aligned);
return 0;
+
+out_badnvs:
+ wl1271_error("nvs data is malformed");
+ return -EILSEQ;
}
static void wl1271_boot_enable_interrupts(struct wl1271 *wl)
diff --git a/drivers/net/wireless/wl12xx/cmd.c b/drivers/net/wireless/wl12xx/cmd.c
index a52299e548f..e0d21797948 100644
--- a/drivers/net/wireless/wl12xx/cmd.c
+++ b/drivers/net/wireless/wl12xx/cmd.c
@@ -29,6 +29,7 @@
#include <linux/slab.h>
#include "wl12xx.h"
+#include "debug.h"
#include "reg.h"
#include "io.h"
#include "acx.h"
@@ -120,6 +121,11 @@ int wl1271_cmd_general_parms(struct wl1271 *wl)
if (!wl->nvs)
return -ENODEV;
+ if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) {
+ wl1271_warning("FEM index from INI out of bounds");
+ return -EINVAL;
+ }
+
gen_parms = kzalloc(sizeof(*gen_parms), GFP_KERNEL);
if (!gen_parms)
return -ENOMEM;
@@ -143,6 +149,12 @@ int wl1271_cmd_general_parms(struct wl1271 *wl)
gp->tx_bip_fem_manufacturer =
gen_parms->general_params.tx_bip_fem_manufacturer;
+ if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) {
+ wl1271_warning("FEM index from FW out of bounds");
+ ret = -EINVAL;
+ goto out;
+ }
+
wl1271_debug(DEBUG_CMD, "FEM autodetect: %s, manufacturer: %d\n",
answer ? "auto" : "manual", gp->tx_bip_fem_manufacturer);
@@ -162,6 +174,11 @@ int wl128x_cmd_general_parms(struct wl1271 *wl)
if (!wl->nvs)
return -ENODEV;
+ if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) {
+ wl1271_warning("FEM index from ini out of bounds");
+ return -EINVAL;
+ }
+
gen_parms = kzalloc(sizeof(*gen_parms), GFP_KERNEL);
if (!gen_parms)
return -ENOMEM;
@@ -186,6 +203,12 @@ int wl128x_cmd_general_parms(struct wl1271 *wl)
gp->tx_bip_fem_manufacturer =
gen_parms->general_params.tx_bip_fem_manufacturer;
+ if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) {
+ wl1271_warning("FEM index from FW out of bounds");
+ ret = -EINVAL;
+ goto out;
+ }
+
wl1271_debug(DEBUG_CMD, "FEM autodetect: %s, manufacturer: %d\n",
answer ? "auto" : "manual", gp->tx_bip_fem_manufacturer);
@@ -358,7 +381,8 @@ static int wl1271_cmd_wait_for_event(struct wl1271 *wl, u32 mask)
return 0;
}
-int wl12xx_cmd_role_enable(struct wl1271 *wl, u8 role_type, u8 *role_id)
+int wl12xx_cmd_role_enable(struct wl1271 *wl, u8 *addr, u8 role_type,
+ u8 *role_id)
{
struct wl12xx_cmd_role_enable *cmd;
int ret;
@@ -381,7 +405,7 @@ int wl12xx_cmd_role_enable(struct wl1271 *wl, u8 role_type, u8 *role_id)
goto out_free;
}
- memcpy(cmd->mac_address, wl->mac_addr, ETH_ALEN);
+ memcpy(cmd->mac_address, addr, ETH_ALEN);
cmd->role_type = role_type;
ret = wl1271_cmd_send(wl, CMD_ROLE_ENABLE, cmd, sizeof(*cmd), 0);
@@ -433,37 +457,41 @@ out:
return ret;
}
-static int wl12xx_allocate_link(struct wl1271 *wl, u8 *hlid)
+int wl12xx_allocate_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid)
{
u8 link = find_first_zero_bit(wl->links_map, WL12XX_MAX_LINKS);
if (link >= WL12XX_MAX_LINKS)
return -EBUSY;
__set_bit(link, wl->links_map);
+ __set_bit(link, wlvif->links_map);
*hlid = link;
return 0;
}
-static void wl12xx_free_link(struct wl1271 *wl, u8 *hlid)
+void wl12xx_free_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid)
{
if (*hlid == WL12XX_INVALID_LINK_ID)
return;
__clear_bit(*hlid, wl->links_map);
+ __clear_bit(*hlid, wlvif->links_map);
*hlid = WL12XX_INVALID_LINK_ID;
}
-static int wl12xx_get_new_session_id(struct wl1271 *wl)
+static int wl12xx_get_new_session_id(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif)
{
- if (wl->session_counter >= SESSION_COUNTER_MAX)
- wl->session_counter = 0;
+ if (wlvif->session_counter >= SESSION_COUNTER_MAX)
+ wlvif->session_counter = 0;
- wl->session_counter++;
+ wlvif->session_counter++;
- return wl->session_counter;
+ return wlvif->session_counter;
}
-int wl12xx_cmd_role_start_dev(struct wl1271 *wl)
+static int wl12xx_cmd_role_start_dev(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif)
{
struct wl12xx_cmd_role_start *cmd;
int ret;
@@ -474,20 +502,20 @@ int wl12xx_cmd_role_start_dev(struct wl1271 *wl)
goto out;
}
- wl1271_debug(DEBUG_CMD, "cmd role start dev %d", wl->dev_role_id);
+ wl1271_debug(DEBUG_CMD, "cmd role start dev %d", wlvif->dev_role_id);
- cmd->role_id = wl->dev_role_id;
- if (wl->band == IEEE80211_BAND_5GHZ)
+ cmd->role_id = wlvif->dev_role_id;
+ if (wlvif->band == IEEE80211_BAND_5GHZ)
cmd->band = WL12XX_BAND_5GHZ;
- cmd->channel = wl->channel;
+ cmd->channel = wlvif->channel;
- if (wl->dev_hlid == WL12XX_INVALID_LINK_ID) {
- ret = wl12xx_allocate_link(wl, &wl->dev_hlid);
+ if (wlvif->dev_hlid == WL12XX_INVALID_LINK_ID) {
+ ret = wl12xx_allocate_link(wl, wlvif, &wlvif->dev_hlid);
if (ret)
goto out_free;
}
- cmd->device.hlid = wl->dev_hlid;
- cmd->device.session = wl->session_counter;
+ cmd->device.hlid = wlvif->dev_hlid;
+ cmd->device.session = wlvif->session_counter;
wl1271_debug(DEBUG_CMD, "role start: roleid=%d, hlid=%d, session=%d",
cmd->role_id, cmd->device.hlid, cmd->device.session);
@@ -502,9 +530,7 @@ int wl12xx_cmd_role_start_dev(struct wl1271 *wl)
err_hlid:
/* clear links on error */
- __clear_bit(wl->dev_hlid, wl->links_map);
- wl->dev_hlid = WL12XX_INVALID_LINK_ID;
-
+ wl12xx_free_link(wl, wlvif, &wlvif->dev_hlid);
out_free:
kfree(cmd);
@@ -513,12 +539,13 @@ out:
return ret;
}
-int wl12xx_cmd_role_stop_dev(struct wl1271 *wl)
+static int wl12xx_cmd_role_stop_dev(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif)
{
struct wl12xx_cmd_role_stop *cmd;
int ret;
- if (WARN_ON(wl->dev_hlid == WL12XX_INVALID_LINK_ID))
+ if (WARN_ON(wlvif->dev_hlid == WL12XX_INVALID_LINK_ID))
return -EINVAL;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
@@ -529,7 +556,7 @@ int wl12xx_cmd_role_stop_dev(struct wl1271 *wl)
wl1271_debug(DEBUG_CMD, "cmd role stop dev");
- cmd->role_id = wl->dev_role_id;
+ cmd->role_id = wlvif->dev_role_id;
cmd->disc_type = DISCONNECT_IMMEDIATE;
cmd->reason = cpu_to_le16(WLAN_REASON_UNSPECIFIED);
@@ -545,7 +572,7 @@ int wl12xx_cmd_role_stop_dev(struct wl1271 *wl)
goto out_free;
}
- wl12xx_free_link(wl, &wl->dev_hlid);
+ wl12xx_free_link(wl, wlvif, &wlvif->dev_hlid);
out_free:
kfree(cmd);
@@ -554,8 +581,9 @@ out:
return ret;
}
-int wl12xx_cmd_role_start_sta(struct wl1271 *wl)
+int wl12xx_cmd_role_start_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
+ struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
struct wl12xx_cmd_role_start *cmd;
int ret;
@@ -565,33 +593,33 @@ int wl12xx_cmd_role_start_sta(struct wl1271 *wl)
goto out;
}
- wl1271_debug(DEBUG_CMD, "cmd role start sta %d", wl->role_id);
+ wl1271_debug(DEBUG_CMD, "cmd role start sta %d", wlvif->role_id);
- cmd->role_id = wl->role_id;
- if (wl->band == IEEE80211_BAND_5GHZ)
+ cmd->role_id = wlvif->role_id;
+ if (wlvif->band == IEEE80211_BAND_5GHZ)
cmd->band = WL12XX_BAND_5GHZ;
- cmd->channel = wl->channel;
- cmd->sta.basic_rate_set = cpu_to_le32(wl->basic_rate_set);
- cmd->sta.beacon_interval = cpu_to_le16(wl->beacon_int);
+ cmd->channel = wlvif->channel;
+ cmd->sta.basic_rate_set = cpu_to_le32(wlvif->basic_rate_set);
+ cmd->sta.beacon_interval = cpu_to_le16(wlvif->beacon_int);
cmd->sta.ssid_type = WL12XX_SSID_TYPE_ANY;
- cmd->sta.ssid_len = wl->ssid_len;
- memcpy(cmd->sta.ssid, wl->ssid, wl->ssid_len);
- memcpy(cmd->sta.bssid, wl->bssid, ETH_ALEN);
- cmd->sta.local_rates = cpu_to_le32(wl->rate_set);
+ cmd->sta.ssid_len = wlvif->ssid_len;
+ memcpy(cmd->sta.ssid, wlvif->ssid, wlvif->ssid_len);
+ memcpy(cmd->sta.bssid, vif->bss_conf.bssid, ETH_ALEN);
+ cmd->sta.local_rates = cpu_to_le32(wlvif->rate_set);
- if (wl->sta_hlid == WL12XX_INVALID_LINK_ID) {
- ret = wl12xx_allocate_link(wl, &wl->sta_hlid);
+ if (wlvif->sta.hlid == WL12XX_INVALID_LINK_ID) {
+ ret = wl12xx_allocate_link(wl, wlvif, &wlvif->sta.hlid);
if (ret)
goto out_free;
}
- cmd->sta.hlid = wl->sta_hlid;
- cmd->sta.session = wl12xx_get_new_session_id(wl);
- cmd->sta.remote_rates = cpu_to_le32(wl->rate_set);
+ cmd->sta.hlid = wlvif->sta.hlid;
+ cmd->sta.session = wl12xx_get_new_session_id(wl, wlvif);
+ cmd->sta.remote_rates = cpu_to_le32(wlvif->rate_set);
wl1271_debug(DEBUG_CMD, "role start: roleid=%d, hlid=%d, session=%d "
"basic_rate_set: 0x%x, remote_rates: 0x%x",
- wl->role_id, cmd->sta.hlid, cmd->sta.session,
- wl->basic_rate_set, wl->rate_set);
+ wlvif->role_id, cmd->sta.hlid, cmd->sta.session,
+ wlvif->basic_rate_set, wlvif->rate_set);
ret = wl1271_cmd_send(wl, CMD_ROLE_START, cmd, sizeof(*cmd), 0);
if (ret < 0) {
@@ -603,7 +631,7 @@ int wl12xx_cmd_role_start_sta(struct wl1271 *wl)
err_hlid:
/* clear links on error. */
- wl12xx_free_link(wl, &wl->sta_hlid);
+ wl12xx_free_link(wl, wlvif, &wlvif->sta.hlid);
out_free:
kfree(cmd);
@@ -613,12 +641,12 @@ out:
}
/* use this function to stop ibss as well */
-int wl12xx_cmd_role_stop_sta(struct wl1271 *wl)
+int wl12xx_cmd_role_stop_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
struct wl12xx_cmd_role_stop *cmd;
int ret;
- if (WARN_ON(wl->sta_hlid == WL12XX_INVALID_LINK_ID))
+ if (WARN_ON(wlvif->sta.hlid == WL12XX_INVALID_LINK_ID))
return -EINVAL;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
@@ -627,9 +655,9 @@ int wl12xx_cmd_role_stop_sta(struct wl1271 *wl)
goto out;
}
- wl1271_debug(DEBUG_CMD, "cmd role stop sta %d", wl->role_id);
+ wl1271_debug(DEBUG_CMD, "cmd role stop sta %d", wlvif->role_id);
- cmd->role_id = wl->role_id;
+ cmd->role_id = wlvif->role_id;
cmd->disc_type = DISCONNECT_IMMEDIATE;
cmd->reason = cpu_to_le16(WLAN_REASON_UNSPECIFIED);
@@ -639,7 +667,7 @@ int wl12xx_cmd_role_stop_sta(struct wl1271 *wl)
goto out_free;
}
- wl12xx_free_link(wl, &wl->sta_hlid);
+ wl12xx_free_link(wl, wlvif, &wlvif->sta.hlid);
out_free:
kfree(cmd);
@@ -648,16 +676,17 @@ out:
return ret;
}
-int wl12xx_cmd_role_start_ap(struct wl1271 *wl)
+int wl12xx_cmd_role_start_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
struct wl12xx_cmd_role_start *cmd;
- struct ieee80211_bss_conf *bss_conf = &wl->vif->bss_conf;
+ struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
+ struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
int ret;
- wl1271_debug(DEBUG_CMD, "cmd role start ap %d", wl->role_id);
+ wl1271_debug(DEBUG_CMD, "cmd role start ap %d", wlvif->role_id);
/* trying to use hidden SSID with an old hostapd version */
- if (wl->ssid_len == 0 && !bss_conf->hidden_ssid) {
+ if (wlvif->ssid_len == 0 && !bss_conf->hidden_ssid) {
wl1271_error("got a null SSID from beacon/bss");
ret = -EINVAL;
goto out;
@@ -669,30 +698,30 @@ int wl12xx_cmd_role_start_ap(struct wl1271 *wl)
goto out;
}
- ret = wl12xx_allocate_link(wl, &wl->ap_global_hlid);
+ ret = wl12xx_allocate_link(wl, wlvif, &wlvif->ap.global_hlid);
if (ret < 0)
goto out_free;
- ret = wl12xx_allocate_link(wl, &wl->ap_bcast_hlid);
+ ret = wl12xx_allocate_link(wl, wlvif, &wlvif->ap.bcast_hlid);
if (ret < 0)
goto out_free_global;
- cmd->role_id = wl->role_id;
+ cmd->role_id = wlvif->role_id;
cmd->ap.aging_period = cpu_to_le16(wl->conf.tx.ap_aging_period);
cmd->ap.bss_index = WL1271_AP_BSS_INDEX;
- cmd->ap.global_hlid = wl->ap_global_hlid;
- cmd->ap.broadcast_hlid = wl->ap_bcast_hlid;
- cmd->ap.basic_rate_set = cpu_to_le32(wl->basic_rate_set);
- cmd->ap.beacon_interval = cpu_to_le16(wl->beacon_int);
+ cmd->ap.global_hlid = wlvif->ap.global_hlid;
+ cmd->ap.broadcast_hlid = wlvif->ap.bcast_hlid;
+ cmd->ap.basic_rate_set = cpu_to_le32(wlvif->basic_rate_set);
+ cmd->ap.beacon_interval = cpu_to_le16(wlvif->beacon_int);
cmd->ap.dtim_interval = bss_conf->dtim_period;
cmd->ap.beacon_expiry = WL1271_AP_DEF_BEACON_EXP;
- cmd->channel = wl->channel;
+ cmd->channel = wlvif->channel;
if (!bss_conf->hidden_ssid) {
/* take the SSID from the beacon for backward compatibility */
cmd->ap.ssid_type = WL12XX_SSID_TYPE_PUBLIC;
- cmd->ap.ssid_len = wl->ssid_len;
- memcpy(cmd->ap.ssid, wl->ssid, wl->ssid_len);
+ cmd->ap.ssid_len = wlvif->ssid_len;
+ memcpy(cmd->ap.ssid, wlvif->ssid, wlvif->ssid_len);
} else {
cmd->ap.ssid_type = WL12XX_SSID_TYPE_HIDDEN;
cmd->ap.ssid_len = bss_conf->ssid_len;
@@ -701,7 +730,7 @@ int wl12xx_cmd_role_start_ap(struct wl1271 *wl)
cmd->ap.local_rates = cpu_to_le32(0xffffffff);
- switch (wl->band) {
+ switch (wlvif->band) {
case IEEE80211_BAND_2GHZ:
cmd->band = RADIO_BAND_2_4GHZ;
break;
@@ -709,7 +738,7 @@ int wl12xx_cmd_role_start_ap(struct wl1271 *wl)
cmd->band = RADIO_BAND_5GHZ;
break;
default:
- wl1271_warning("ap start - unknown band: %d", (int)wl->band);
+ wl1271_warning("ap start - unknown band: %d", (int)wlvif->band);
cmd->band = RADIO_BAND_2_4GHZ;
break;
}
@@ -723,10 +752,10 @@ int wl12xx_cmd_role_start_ap(struct wl1271 *wl)
goto out_free;
out_free_bcast:
- wl12xx_free_link(wl, &wl->ap_bcast_hlid);
+ wl12xx_free_link(wl, wlvif, &wlvif->ap.bcast_hlid);
out_free_global:
- wl12xx_free_link(wl, &wl->ap_global_hlid);
+ wl12xx_free_link(wl, wlvif, &wlvif->ap.global_hlid);
out_free:
kfree(cmd);
@@ -735,7 +764,7 @@ out:
return ret;
}
-int wl12xx_cmd_role_stop_ap(struct wl1271 *wl)
+int wl12xx_cmd_role_stop_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
struct wl12xx_cmd_role_stop *cmd;
int ret;
@@ -746,9 +775,9 @@ int wl12xx_cmd_role_stop_ap(struct wl1271 *wl)
goto out;
}
- wl1271_debug(DEBUG_CMD, "cmd role stop ap %d", wl->role_id);
+ wl1271_debug(DEBUG_CMD, "cmd role stop ap %d", wlvif->role_id);
- cmd->role_id = wl->role_id;
+ cmd->role_id = wlvif->role_id;
ret = wl1271_cmd_send(wl, CMD_ROLE_STOP, cmd, sizeof(*cmd), 0);
if (ret < 0) {
@@ -756,8 +785,8 @@ int wl12xx_cmd_role_stop_ap(struct wl1271 *wl)
goto out_free;
}
- wl12xx_free_link(wl, &wl->ap_bcast_hlid);
- wl12xx_free_link(wl, &wl->ap_global_hlid);
+ wl12xx_free_link(wl, wlvif, &wlvif->ap.bcast_hlid);
+ wl12xx_free_link(wl, wlvif, &wlvif->ap.global_hlid);
out_free:
kfree(cmd);
@@ -766,10 +795,11 @@ out:
return ret;
}
-int wl12xx_cmd_role_start_ibss(struct wl1271 *wl)
+int wl12xx_cmd_role_start_ibss(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
+ struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
struct wl12xx_cmd_role_start *cmd;
- struct ieee80211_bss_conf *bss_conf = &wl->vif->bss_conf;
+ struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
int ret;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
@@ -778,35 +808,36 @@ int wl12xx_cmd_role_start_ibss(struct wl1271 *wl)
goto out;
}
- wl1271_debug(DEBUG_CMD, "cmd role start ibss %d", wl->role_id);
+ wl1271_debug(DEBUG_CMD, "cmd role start ibss %d", wlvif->role_id);
- cmd->role_id = wl->role_id;
- if (wl->band == IEEE80211_BAND_5GHZ)
+ cmd->role_id = wlvif->role_id;
+ if (wlvif->band == IEEE80211_BAND_5GHZ)
cmd->band = WL12XX_BAND_5GHZ;
- cmd->channel = wl->channel;
- cmd->ibss.basic_rate_set = cpu_to_le32(wl->basic_rate_set);
- cmd->ibss.beacon_interval = cpu_to_le16(wl->beacon_int);
+ cmd->channel = wlvif->channel;
+ cmd->ibss.basic_rate_set = cpu_to_le32(wlvif->basic_rate_set);
+ cmd->ibss.beacon_interval = cpu_to_le16(wlvif->beacon_int);
cmd->ibss.dtim_interval = bss_conf->dtim_period;
cmd->ibss.ssid_type = WL12XX_SSID_TYPE_ANY;
- cmd->ibss.ssid_len = wl->ssid_len;
- memcpy(cmd->ibss.ssid, wl->ssid, wl->ssid_len);
- memcpy(cmd->ibss.bssid, wl->bssid, ETH_ALEN);
- cmd->sta.local_rates = cpu_to_le32(wl->rate_set);
+ cmd->ibss.ssid_len = wlvif->ssid_len;
+ memcpy(cmd->ibss.ssid, wlvif->ssid, wlvif->ssid_len);
+ memcpy(cmd->ibss.bssid, vif->bss_conf.bssid, ETH_ALEN);
+ cmd->sta.local_rates = cpu_to_le32(wlvif->rate_set);
- if (wl->sta_hlid == WL12XX_INVALID_LINK_ID) {
- ret = wl12xx_allocate_link(wl, &wl->sta_hlid);
+ if (wlvif->sta.hlid == WL12XX_INVALID_LINK_ID) {
+ ret = wl12xx_allocate_link(wl, wlvif, &wlvif->sta.hlid);
if (ret)
goto out_free;
}
- cmd->ibss.hlid = wl->sta_hlid;
- cmd->ibss.remote_rates = cpu_to_le32(wl->rate_set);
+ cmd->ibss.hlid = wlvif->sta.hlid;
+ cmd->ibss.remote_rates = cpu_to_le32(wlvif->rate_set);
wl1271_debug(DEBUG_CMD, "role start: roleid=%d, hlid=%d, session=%d "
"basic_rate_set: 0x%x, remote_rates: 0x%x",
- wl->role_id, cmd->sta.hlid, cmd->sta.session,
- wl->basic_rate_set, wl->rate_set);
+ wlvif->role_id, cmd->sta.hlid, cmd->sta.session,
+ wlvif->basic_rate_set, wlvif->rate_set);
- wl1271_debug(DEBUG_CMD, "wl->bssid = %pM", wl->bssid);
+ wl1271_debug(DEBUG_CMD, "vif->bss_conf.bssid = %pM",
+ vif->bss_conf.bssid);
ret = wl1271_cmd_send(wl, CMD_ROLE_START, cmd, sizeof(*cmd), 0);
if (ret < 0) {
@@ -818,7 +849,7 @@ int wl12xx_cmd_role_start_ibss(struct wl1271 *wl)
err_hlid:
/* clear links on error. */
- wl12xx_free_link(wl, &wl->sta_hlid);
+ wl12xx_free_link(wl, wlvif, &wlvif->sta.hlid);
out_free:
kfree(cmd);
@@ -962,7 +993,8 @@ out:
return ret;
}
-int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode)
+int wl1271_cmd_ps_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u8 ps_mode)
{
struct wl1271_cmd_ps_params *ps_params = NULL;
int ret = 0;
@@ -975,7 +1007,7 @@ int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode)
goto out;
}
- ps_params->role_id = wl->role_id;
+ ps_params->role_id = wlvif->role_id;
ps_params->ps_mode = ps_mode;
ret = wl1271_cmd_send(wl, CMD_SET_PS_MODE, ps_params,
@@ -1030,7 +1062,7 @@ out:
return ret;
}
-int wl1271_cmd_build_null_data(struct wl1271 *wl)
+int wl12xx_cmd_build_null_data(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
struct sk_buff *skb = NULL;
int size;
@@ -1038,11 +1070,12 @@ int wl1271_cmd_build_null_data(struct wl1271 *wl)
int ret = -ENOMEM;
- if (wl->bss_type == BSS_TYPE_IBSS) {
+ if (wlvif->bss_type == BSS_TYPE_IBSS) {
size = sizeof(struct wl12xx_null_data_template);
ptr = NULL;
} else {
- skb = ieee80211_nullfunc_get(wl->hw, wl->vif);
+ skb = ieee80211_nullfunc_get(wl->hw,
+ wl12xx_wlvif_to_vif(wlvif));
if (!skb)
goto out;
size = skb->len;
@@ -1050,7 +1083,7 @@ int wl1271_cmd_build_null_data(struct wl1271 *wl)
}
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, ptr, size, 0,
- wl->basic_rate);
+ wlvif->basic_rate);
out:
dev_kfree_skb(skb);
@@ -1061,19 +1094,21 @@ out:
}
-int wl1271_cmd_build_klv_null_data(struct wl1271 *wl)
+int wl12xx_cmd_build_klv_null_data(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif)
{
+ struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
struct sk_buff *skb = NULL;
int ret = -ENOMEM;
- skb = ieee80211_nullfunc_get(wl->hw, wl->vif);
+ skb = ieee80211_nullfunc_get(wl->hw, vif);
if (!skb)
goto out;
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_KLV,
skb->data, skb->len,
CMD_TEMPL_KLV_IDX_NULL_DATA,
- wl->basic_rate);
+ wlvif->basic_rate);
out:
dev_kfree_skb(skb);
@@ -1084,32 +1119,35 @@ out:
}
-int wl1271_cmd_build_ps_poll(struct wl1271 *wl, u16 aid)
+int wl1271_cmd_build_ps_poll(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u16 aid)
{
+ struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
struct sk_buff *skb;
int ret = 0;
- skb = ieee80211_pspoll_get(wl->hw, wl->vif);
+ skb = ieee80211_pspoll_get(wl->hw, vif);
if (!skb)
goto out;
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_PS_POLL, skb->data,
- skb->len, 0, wl->basic_rate_set);
+ skb->len, 0, wlvif->basic_rate_set);
out:
dev_kfree_skb(skb);
return ret;
}
-int wl1271_cmd_build_probe_req(struct wl1271 *wl,
+int wl1271_cmd_build_probe_req(struct wl1271 *wl, struct wl12xx_vif *wlvif,
const u8 *ssid, size_t ssid_len,
const u8 *ie, size_t ie_len, u8 band)
{
+ struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
struct sk_buff *skb;
int ret;
u32 rate;
- skb = ieee80211_probereq_get(wl->hw, wl->vif, ssid, ssid_len,
+ skb = ieee80211_probereq_get(wl->hw, vif, ssid, ssid_len,
ie, ie_len);
if (!skb) {
ret = -ENOMEM;
@@ -1118,7 +1156,7 @@ int wl1271_cmd_build_probe_req(struct wl1271 *wl,
wl1271_dump(DEBUG_SCAN, "PROBE REQ: ", skb->data, skb->len);
- rate = wl1271_tx_min_rate_get(wl, wl->bitrate_masks[band]);
+ rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[band]);
if (band == IEEE80211_BAND_2GHZ)
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_2_4,
skb->data, skb->len, 0, rate);
@@ -1132,20 +1170,22 @@ out:
}
struct sk_buff *wl1271_cmd_build_ap_probe_req(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif,
struct sk_buff *skb)
{
+ struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
int ret;
u32 rate;
if (!skb)
- skb = ieee80211_ap_probereq_get(wl->hw, wl->vif);
+ skb = ieee80211_ap_probereq_get(wl->hw, vif);
if (!skb)
goto out;
wl1271_dump(DEBUG_SCAN, "AP PROBE REQ: ", skb->data, skb->len);
- rate = wl1271_tx_min_rate_get(wl, wl->bitrate_masks[wl->band]);
- if (wl->band == IEEE80211_BAND_2GHZ)
+ rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[wlvif->band]);
+ if (wlvif->band == IEEE80211_BAND_2GHZ)
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_2_4,
skb->data, skb->len, 0, rate);
else
@@ -1159,9 +1199,11 @@ out:
return skb;
}
-int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, __be32 ip_addr)
+int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ __be32 ip_addr)
{
int ret;
+ struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
struct wl12xx_arp_rsp_template tmpl;
struct ieee80211_hdr_3addr *hdr;
struct arphdr *arp_hdr;
@@ -1173,8 +1215,8 @@ int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, __be32 ip_addr)
hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
IEEE80211_STYPE_DATA |
IEEE80211_FCTL_TODS);
- memcpy(hdr->addr1, wl->vif->bss_conf.bssid, ETH_ALEN);
- memcpy(hdr->addr2, wl->vif->addr, ETH_ALEN);
+ memcpy(hdr->addr1, vif->bss_conf.bssid, ETH_ALEN);
+ memcpy(hdr->addr2, vif->addr, ETH_ALEN);
memset(hdr->addr3, 0xff, ETH_ALEN);
/* llc layer */
@@ -1190,25 +1232,26 @@ int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, __be32 ip_addr)
arp_hdr->ar_op = cpu_to_be16(ARPOP_REPLY);
/* arp payload */
- memcpy(tmpl.sender_hw, wl->vif->addr, ETH_ALEN);
+ memcpy(tmpl.sender_hw, vif->addr, ETH_ALEN);
tmpl.sender_ip = ip_addr;
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_ARP_RSP,
&tmpl, sizeof(tmpl), 0,
- wl->basic_rate);
+ wlvif->basic_rate);
return ret;
}
-int wl1271_build_qos_null_data(struct wl1271 *wl)
+int wl1271_build_qos_null_data(struct wl1271 *wl, struct ieee80211_vif *vif)
{
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
struct ieee80211_qos_hdr template;
memset(&template, 0, sizeof(template));
- memcpy(template.addr1, wl->bssid, ETH_ALEN);
- memcpy(template.addr2, wl->mac_addr, ETH_ALEN);
- memcpy(template.addr3, wl->bssid, ETH_ALEN);
+ memcpy(template.addr1, vif->bss_conf.bssid, ETH_ALEN);
+ memcpy(template.addr2, vif->addr, ETH_ALEN);
+ memcpy(template.addr3, vif->bss_conf.bssid, ETH_ALEN);
template.frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
IEEE80211_STYPE_QOS_NULLFUNC |
@@ -1219,7 +1262,7 @@ int wl1271_build_qos_null_data(struct wl1271 *wl)
return wl1271_cmd_template_set(wl, CMD_TEMPL_QOS_NULL_DATA, &template,
sizeof(template), 0,
- wl->basic_rate);
+ wlvif->basic_rate);
}
int wl12xx_cmd_set_default_wep_key(struct wl1271 *wl, u8 id, u8 hlid)
@@ -1253,7 +1296,8 @@ out:
return ret;
}
-int wl1271_cmd_set_sta_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
+int wl1271_cmd_set_sta_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u16 action, u8 id, u8 key_type,
u8 key_size, const u8 *key, const u8 *addr,
u32 tx_seq_32, u16 tx_seq_16)
{
@@ -1261,7 +1305,7 @@ int wl1271_cmd_set_sta_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
int ret = 0;
/* hlid might have already been deleted */
- if (wl->sta_hlid == WL12XX_INVALID_LINK_ID)
+ if (wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
return 0;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
@@ -1270,7 +1314,7 @@ int wl1271_cmd_set_sta_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
goto out;
}
- cmd->hlid = wl->sta_hlid;
+ cmd->hlid = wlvif->sta.hlid;
if (key_type == KEY_WEP)
cmd->lid_key_type = WEP_DEFAULT_LID_TYPE;
@@ -1321,9 +1365,10 @@ out:
* TODO: merge with sta/ibss into 1 set_key function.
* note there are slight diffs
*/
-int wl1271_cmd_set_ap_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
- u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32,
- u16 tx_seq_16)
+int wl1271_cmd_set_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u16 action, u8 id, u8 key_type,
+ u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32,
+ u16 tx_seq_16)
{
struct wl1271_cmd_set_keys *cmd;
int ret = 0;
@@ -1333,7 +1378,7 @@ int wl1271_cmd_set_ap_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
if (!cmd)
return -ENOMEM;
- if (hlid == wl->ap_bcast_hlid) {
+ if (hlid == wlvif->ap.bcast_hlid) {
if (key_type == KEY_WEP)
lid_type = WEP_DEFAULT_LID_TYPE;
else
@@ -1411,7 +1456,8 @@ out:
return ret;
}
-int wl12xx_cmd_add_peer(struct wl1271 *wl, struct ieee80211_sta *sta, u8 hlid)
+int wl12xx_cmd_add_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ struct ieee80211_sta *sta, u8 hlid)
{
struct wl12xx_cmd_add_peer *cmd;
int i, ret;
@@ -1438,13 +1484,13 @@ int wl12xx_cmd_add_peer(struct wl1271 *wl, struct ieee80211_sta *sta, u8 hlid)
else
cmd->psd_type[i] = WL1271_PSD_LEGACY;
- sta_rates = sta->supp_rates[wl->band];
+ sta_rates = sta->supp_rates[wlvif->band];
if (sta->ht_cap.ht_supported)
sta_rates |= sta->ht_cap.mcs.rx_mask[0] << HW_HT_RATES_OFFSET;
cmd->supported_rates =
cpu_to_le32(wl1271_tx_enabled_rates_get(wl, sta_rates,
- wl->band));
+ wlvif->band));
wl1271_debug(DEBUG_CMD, "new peer rates=0x%x queues=0x%x",
cmd->supported_rates, sta->uapsd_queues);
@@ -1584,12 +1630,13 @@ out:
return ret;
}
-static int wl12xx_cmd_roc(struct wl1271 *wl, u8 role_id)
+static int wl12xx_cmd_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u8 role_id)
{
struct wl12xx_cmd_roc *cmd;
int ret = 0;
- wl1271_debug(DEBUG_CMD, "cmd roc %d (%d)", wl->channel, role_id);
+ wl1271_debug(DEBUG_CMD, "cmd roc %d (%d)", wlvif->channel, role_id);
if (WARN_ON(role_id == WL12XX_INVALID_ROLE_ID))
return -EINVAL;
@@ -1601,8 +1648,8 @@ static int wl12xx_cmd_roc(struct wl1271 *wl, u8 role_id)
}
cmd->role_id = role_id;
- cmd->channel = wl->channel;
- switch (wl->band) {
+ cmd->channel = wlvif->channel;
+ switch (wlvif->band) {
case IEEE80211_BAND_2GHZ:
cmd->band = RADIO_BAND_2_4GHZ;
break;
@@ -1610,7 +1657,7 @@ static int wl12xx_cmd_roc(struct wl1271 *wl, u8 role_id)
cmd->band = RADIO_BAND_5GHZ;
break;
default:
- wl1271_error("roc - unknown band: %d", (int)wl->band);
+ wl1271_error("roc - unknown band: %d", (int)wlvif->band);
ret = -EINVAL;
goto out_free;
}
@@ -1657,14 +1704,14 @@ out:
return ret;
}
-int wl12xx_roc(struct wl1271 *wl, u8 role_id)
+int wl12xx_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 role_id)
{
int ret = 0;
if (WARN_ON(test_bit(role_id, wl->roc_map)))
return 0;
- ret = wl12xx_cmd_roc(wl, role_id);
+ ret = wl12xx_cmd_roc(wl, wlvif, role_id);
if (ret < 0)
goto out;
@@ -1753,3 +1800,50 @@ out_free:
out:
return ret;
}
+
+/* start dev role and roc on its channel */
+int wl12xx_start_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif)
+{
+ int ret;
+
+ if (WARN_ON(!(wlvif->bss_type == BSS_TYPE_STA_BSS ||
+ wlvif->bss_type == BSS_TYPE_IBSS)))
+ return -EINVAL;
+
+ ret = wl12xx_cmd_role_start_dev(wl, wlvif);
+ if (ret < 0)
+ goto out;
+
+ ret = wl12xx_roc(wl, wlvif, wlvif->dev_role_id);
+ if (ret < 0)
+ goto out_stop;
+
+ return 0;
+
+out_stop:
+ wl12xx_cmd_role_stop_dev(wl, wlvif);
+out:
+ return ret;
+}
+
+/* croc dev hlid, and stop the role */
+int wl12xx_stop_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif)
+{
+ int ret;
+
+ if (WARN_ON(!(wlvif->bss_type == BSS_TYPE_STA_BSS ||
+ wlvif->bss_type == BSS_TYPE_IBSS)))
+ return -EINVAL;
+
+ if (test_bit(wlvif->dev_role_id, wl->roc_map)) {
+ ret = wl12xx_croc(wl, wlvif->dev_role_id);
+ if (ret < 0)
+ goto out;
+ }
+
+ ret = wl12xx_cmd_role_stop_dev(wl, wlvif);
+ if (ret < 0)
+ goto out;
+out:
+ return ret;
+}
diff --git a/drivers/net/wireless/wl12xx/cmd.h b/drivers/net/wireless/wl12xx/cmd.h
index b7bd42769aa..3f7d0b93c24 100644
--- a/drivers/net/wireless/wl12xx/cmd.h
+++ b/drivers/net/wireless/wl12xx/cmd.h
@@ -36,45 +36,54 @@ int wl128x_cmd_general_parms(struct wl1271 *wl);
int wl1271_cmd_radio_parms(struct wl1271 *wl);
int wl128x_cmd_radio_parms(struct wl1271 *wl);
int wl1271_cmd_ext_radio_parms(struct wl1271 *wl);
-int wl12xx_cmd_role_enable(struct wl1271 *wl, u8 role_type, u8 *role_id);
+int wl12xx_cmd_role_enable(struct wl1271 *wl, u8 *addr, u8 role_type,
+ u8 *role_id);
int wl12xx_cmd_role_disable(struct wl1271 *wl, u8 *role_id);
-int wl12xx_cmd_role_start_dev(struct wl1271 *wl);
-int wl12xx_cmd_role_stop_dev(struct wl1271 *wl);
-int wl12xx_cmd_role_start_sta(struct wl1271 *wl);
-int wl12xx_cmd_role_stop_sta(struct wl1271 *wl);
-int wl12xx_cmd_role_start_ap(struct wl1271 *wl);
-int wl12xx_cmd_role_stop_ap(struct wl1271 *wl);
-int wl12xx_cmd_role_start_ibss(struct wl1271 *wl);
+int wl12xx_cmd_role_start_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+int wl12xx_cmd_role_stop_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+int wl12xx_cmd_role_start_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+int wl12xx_cmd_role_stop_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+int wl12xx_cmd_role_start_ibss(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+int wl12xx_start_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+int wl12xx_stop_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif);
int wl1271_cmd_test(struct wl1271 *wl, void *buf, size_t buf_len, u8 answer);
int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf, size_t len);
int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len);
int wl1271_cmd_data_path(struct wl1271 *wl, bool enable);
-int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode);
+int wl1271_cmd_ps_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u8 ps_mode);
int wl1271_cmd_read_memory(struct wl1271 *wl, u32 addr, void *answer,
size_t len);
int wl1271_cmd_template_set(struct wl1271 *wl, u16 template_id,
void *buf, size_t buf_len, int index, u32 rates);
-int wl1271_cmd_build_null_data(struct wl1271 *wl);
-int wl1271_cmd_build_ps_poll(struct wl1271 *wl, u16 aid);
-int wl1271_cmd_build_probe_req(struct wl1271 *wl,
+int wl12xx_cmd_build_null_data(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+int wl1271_cmd_build_ps_poll(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u16 aid);
+int wl1271_cmd_build_probe_req(struct wl1271 *wl, struct wl12xx_vif *wlvif,
const u8 *ssid, size_t ssid_len,
const u8 *ie, size_t ie_len, u8 band);
struct sk_buff *wl1271_cmd_build_ap_probe_req(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif,
struct sk_buff *skb);
-int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, __be32 ip_addr);
-int wl1271_build_qos_null_data(struct wl1271 *wl);
-int wl1271_cmd_build_klv_null_data(struct wl1271 *wl);
+int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ __be32 ip_addr);
+int wl1271_build_qos_null_data(struct wl1271 *wl, struct ieee80211_vif *vif);
+int wl12xx_cmd_build_klv_null_data(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif);
int wl12xx_cmd_set_default_wep_key(struct wl1271 *wl, u8 id, u8 hlid);
-int wl1271_cmd_set_sta_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
+int wl1271_cmd_set_sta_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u16 action, u8 id, u8 key_type,
u8 key_size, const u8 *key, const u8 *addr,
u32 tx_seq_32, u16 tx_seq_16);
-int wl1271_cmd_set_ap_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
+int wl1271_cmd_set_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u16 action, u8 id, u8 key_type,
u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32,
u16 tx_seq_16);
int wl12xx_cmd_set_peer_state(struct wl1271 *wl, u8 hlid);
-int wl12xx_roc(struct wl1271 *wl, u8 role_id);
+int wl12xx_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 role_id);
int wl12xx_croc(struct wl1271 *wl, u8 role_id);
-int wl12xx_cmd_add_peer(struct wl1271 *wl, struct ieee80211_sta *sta, u8 hlid);
+int wl12xx_cmd_add_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ struct ieee80211_sta *sta, u8 hlid);
int wl12xx_cmd_remove_peer(struct wl1271 *wl, u8 hlid);
int wl12xx_cmd_config_fwlog(struct wl1271 *wl);
int wl12xx_cmd_start_fwlog(struct wl1271 *wl);
@@ -82,6 +91,9 @@ int wl12xx_cmd_stop_fwlog(struct wl1271 *wl);
int wl12xx_cmd_channel_switch(struct wl1271 *wl,
struct ieee80211_channel_switch *ch_switch);
int wl12xx_cmd_stop_channel_switch(struct wl1271 *wl);
+int wl12xx_allocate_link(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u8 *hlid);
+void wl12xx_free_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid);
enum wl1271_commands {
CMD_INTERROGATE = 1, /*use this to read information elements*/
diff --git a/drivers/net/wireless/wl12xx/conf.h b/drivers/net/wireless/wl12xx/conf.h
index 04bb8fbf93f..1bcfb017058 100644
--- a/drivers/net/wireless/wl12xx/conf.h
+++ b/drivers/net/wireless/wl12xx/conf.h
@@ -440,6 +440,10 @@ struct conf_rx_settings {
CONF_HW_BIT_RATE_36MBPS | CONF_HW_BIT_RATE_48MBPS | \
CONF_HW_BIT_RATE_54MBPS)
+#define CONF_TX_CCK_RATES (CONF_HW_BIT_RATE_1MBPS | \
+ CONF_HW_BIT_RATE_2MBPS | CONF_HW_BIT_RATE_5_5MBPS | \
+ CONF_HW_BIT_RATE_11MBPS)
+
#define CONF_TX_OFDM_RATES (CONF_HW_BIT_RATE_6MBPS | \
CONF_HW_BIT_RATE_12MBPS | CONF_HW_BIT_RATE_24MBPS | \
CONF_HW_BIT_RATE_36MBPS | CONF_HW_BIT_RATE_48MBPS | \
diff --git a/drivers/net/wireless/wl12xx/debug.h b/drivers/net/wireless/wl12xx/debug.h
new file mode 100644
index 00000000000..b85fd8c41e8
--- /dev/null
+++ b/drivers/net/wireless/wl12xx/debug.h
@@ -0,0 +1,101 @@
+/*
+ * This file is part of wl12xx
+ *
+ * Copyright (C) 2011 Texas Instruments. All rights reserved.
+ * Copyright (C) 2008-2009 Nokia Corporation
+ *
+ * Contact: Luciano Coelho <coelho@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __DEBUG_H__
+#define __DEBUG_H__
+
+#include <linux/bitops.h>
+#include <linux/printk.h>
+
+#define DRIVER_NAME "wl12xx"
+#define DRIVER_PREFIX DRIVER_NAME ": "
+
+enum {
+ DEBUG_NONE = 0,
+ DEBUG_IRQ = BIT(0),
+ DEBUG_SPI = BIT(1),
+ DEBUG_BOOT = BIT(2),
+ DEBUG_MAILBOX = BIT(3),
+ DEBUG_TESTMODE = BIT(4),
+ DEBUG_EVENT = BIT(5),
+ DEBUG_TX = BIT(6),
+ DEBUG_RX = BIT(7),
+ DEBUG_SCAN = BIT(8),
+ DEBUG_CRYPT = BIT(9),
+ DEBUG_PSM = BIT(10),
+ DEBUG_MAC80211 = BIT(11),
+ DEBUG_CMD = BIT(12),
+ DEBUG_ACX = BIT(13),
+ DEBUG_SDIO = BIT(14),
+ DEBUG_FILTERS = BIT(15),
+ DEBUG_ADHOC = BIT(16),
+ DEBUG_AP = BIT(17),
+ DEBUG_MASTER = (DEBUG_ADHOC | DEBUG_AP),
+ DEBUG_ALL = ~0,
+};
+
+extern u32 wl12xx_debug_level;
+
+#define DEBUG_DUMP_LIMIT 1024
+
+#define wl1271_error(fmt, arg...) \
+ pr_err(DRIVER_PREFIX "ERROR " fmt "\n", ##arg)
+
+#define wl1271_warning(fmt, arg...) \
+ pr_warning(DRIVER_PREFIX "WARNING " fmt "\n", ##arg)
+
+#define wl1271_notice(fmt, arg...) \
+ pr_info(DRIVER_PREFIX fmt "\n", ##arg)
+
+#define wl1271_info(fmt, arg...) \
+ pr_info(DRIVER_PREFIX fmt "\n", ##arg)
+
+#define wl1271_debug(level, fmt, arg...) \
+ do { \
+ if (level & wl12xx_debug_level) \
+ pr_debug(DRIVER_PREFIX fmt "\n", ##arg); \
+ } while (0)
+
+/* TODO: use pr_debug_hex_dump when it becomes available */
+#define wl1271_dump(level, prefix, buf, len) \
+ do { \
+ if (level & wl12xx_debug_level) \
+ print_hex_dump(KERN_DEBUG, DRIVER_PREFIX prefix, \
+ DUMP_PREFIX_OFFSET, 16, 1, \
+ buf, \
+ min_t(size_t, len, DEBUG_DUMP_LIMIT), \
+ 0); \
+ } while (0)
+
+#define wl1271_dump_ascii(level, prefix, buf, len) \
+ do { \
+ if (level & wl12xx_debug_level) \
+ print_hex_dump(KERN_DEBUG, DRIVER_PREFIX prefix, \
+ DUMP_PREFIX_OFFSET, 16, 1, \
+ buf, \
+ min_t(size_t, len, DEBUG_DUMP_LIMIT), \
+ true); \
+ } while (0)
+
+#endif /* __DEBUG_H__ */
diff --git a/drivers/net/wireless/wl12xx/debugfs.c b/drivers/net/wireless/wl12xx/debugfs.c
index 3999fd52830..15eb3a9c30c 100644
--- a/drivers/net/wireless/wl12xx/debugfs.c
+++ b/drivers/net/wireless/wl12xx/debugfs.c
@@ -27,6 +27,7 @@
#include <linux/slab.h>
#include "wl12xx.h"
+#include "debug.h"
#include "acx.h"
#include "ps.h"
#include "io.h"
@@ -316,12 +317,19 @@ static ssize_t driver_state_read(struct file *file, char __user *user_buf,
{
struct wl1271 *wl = file->private_data;
int res = 0;
- char buf[1024];
+ ssize_t ret;
+ char *buf;
+
+#define DRIVER_STATE_BUF_LEN 1024
+
+ buf = kmalloc(DRIVER_STATE_BUF_LEN, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
mutex_lock(&wl->mutex);
#define DRIVER_STATE_PRINT(x, fmt) \
- (res += scnprintf(buf + res, sizeof(buf) - res,\
+ (res += scnprintf(buf + res, DRIVER_STATE_BUF_LEN - res,\
#x " = " fmt "\n", wl->x))
#define DRIVER_STATE_PRINT_LONG(x) DRIVER_STATE_PRINT(x, "%ld")
@@ -346,29 +354,14 @@ static ssize_t driver_state_read(struct file *file, char __user *user_buf,
DRIVER_STATE_PRINT_INT(tx_results_count);
DRIVER_STATE_PRINT_LHEX(flags);
DRIVER_STATE_PRINT_INT(tx_blocks_freed);
- DRIVER_STATE_PRINT_INT(tx_security_last_seq_lsb);
DRIVER_STATE_PRINT_INT(rx_counter);
- DRIVER_STATE_PRINT_INT(session_counter);
DRIVER_STATE_PRINT_INT(state);
- DRIVER_STATE_PRINT_INT(bss_type);
DRIVER_STATE_PRINT_INT(channel);
- DRIVER_STATE_PRINT_HEX(rate_set);
- DRIVER_STATE_PRINT_HEX(basic_rate_set);
- DRIVER_STATE_PRINT_HEX(basic_rate);
DRIVER_STATE_PRINT_INT(band);
- DRIVER_STATE_PRINT_INT(beacon_int);
- DRIVER_STATE_PRINT_INT(psm_entry_retry);
- DRIVER_STATE_PRINT_INT(ps_poll_failures);
DRIVER_STATE_PRINT_INT(power_level);
- DRIVER_STATE_PRINT_INT(rssi_thold);
- DRIVER_STATE_PRINT_INT(last_rssi_event);
DRIVER_STATE_PRINT_INT(sg_enabled);
DRIVER_STATE_PRINT_INT(enable_11a);
DRIVER_STATE_PRINT_INT(noise);
- DRIVER_STATE_PRINT_LHEX(ap_hlid_map[0]);
- DRIVER_STATE_PRINT_INT(last_tx_hlid);
- DRIVER_STATE_PRINT_INT(ba_support);
- DRIVER_STATE_PRINT_HEX(ba_rx_bitmap);
DRIVER_STATE_PRINT_HEX(ap_fw_ps_map);
DRIVER_STATE_PRINT_LHEX(ap_ps_map);
DRIVER_STATE_PRINT_HEX(quirks);
@@ -387,10 +380,13 @@ static ssize_t driver_state_read(struct file *file, char __user *user_buf,
#undef DRIVER_STATE_PRINT_LHEX
#undef DRIVER_STATE_PRINT_STR
#undef DRIVER_STATE_PRINT
+#undef DRIVER_STATE_BUF_LEN
mutex_unlock(&wl->mutex);
- return simple_read_from_buffer(user_buf, count, ppos, buf, res);
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, res);
+ kfree(buf);
+ return ret;
}
static const struct file_operations driver_state_ops = {
@@ -399,6 +395,115 @@ static const struct file_operations driver_state_ops = {
.llseek = default_llseek,
};
+static ssize_t vifs_state_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct wl1271 *wl = file->private_data;
+ struct wl12xx_vif *wlvif;
+ int ret, res = 0;
+ const int buf_size = 4096;
+ char *buf;
+ char tmp_buf[64];
+
+ buf = kzalloc(buf_size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ mutex_lock(&wl->mutex);
+
+#define VIF_STATE_PRINT(x, fmt) \
+ (res += scnprintf(buf + res, buf_size - res, \
+ #x " = " fmt "\n", wlvif->x))
+
+#define VIF_STATE_PRINT_LONG(x) VIF_STATE_PRINT(x, "%ld")
+#define VIF_STATE_PRINT_INT(x) VIF_STATE_PRINT(x, "%d")
+#define VIF_STATE_PRINT_STR(x) VIF_STATE_PRINT(x, "%s")
+#define VIF_STATE_PRINT_LHEX(x) VIF_STATE_PRINT(x, "0x%lx")
+#define VIF_STATE_PRINT_LLHEX(x) VIF_STATE_PRINT(x, "0x%llx")
+#define VIF_STATE_PRINT_HEX(x) VIF_STATE_PRINT(x, "0x%x")
+
+#define VIF_STATE_PRINT_NSTR(x, len) \
+ do { \
+ memset(tmp_buf, 0, sizeof(tmp_buf)); \
+ memcpy(tmp_buf, wlvif->x, \
+ min_t(u8, len, sizeof(tmp_buf) - 1)); \
+ res += scnprintf(buf + res, buf_size - res, \
+ #x " = %s\n", tmp_buf); \
+ } while (0)
+
+ wl12xx_for_each_wlvif(wl, wlvif) {
+ VIF_STATE_PRINT_INT(role_id);
+ VIF_STATE_PRINT_INT(bss_type);
+ VIF_STATE_PRINT_LHEX(flags);
+ VIF_STATE_PRINT_INT(p2p);
+ VIF_STATE_PRINT_INT(dev_role_id);
+ VIF_STATE_PRINT_INT(dev_hlid);
+
+ if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
+ wlvif->bss_type == BSS_TYPE_IBSS) {
+ VIF_STATE_PRINT_INT(sta.hlid);
+ VIF_STATE_PRINT_INT(sta.ba_rx_bitmap);
+ VIF_STATE_PRINT_INT(sta.basic_rate_idx);
+ VIF_STATE_PRINT_INT(sta.ap_rate_idx);
+ VIF_STATE_PRINT_INT(sta.p2p_rate_idx);
+ } else {
+ VIF_STATE_PRINT_INT(ap.global_hlid);
+ VIF_STATE_PRINT_INT(ap.bcast_hlid);
+ VIF_STATE_PRINT_LHEX(ap.sta_hlid_map[0]);
+ VIF_STATE_PRINT_INT(ap.mgmt_rate_idx);
+ VIF_STATE_PRINT_INT(ap.bcast_rate_idx);
+ VIF_STATE_PRINT_INT(ap.ucast_rate_idx[0]);
+ VIF_STATE_PRINT_INT(ap.ucast_rate_idx[1]);
+ VIF_STATE_PRINT_INT(ap.ucast_rate_idx[2]);
+ VIF_STATE_PRINT_INT(ap.ucast_rate_idx[3]);
+ }
+ VIF_STATE_PRINT_INT(last_tx_hlid);
+ VIF_STATE_PRINT_LHEX(links_map[0]);
+ VIF_STATE_PRINT_NSTR(ssid, wlvif->ssid_len);
+ VIF_STATE_PRINT_INT(band);
+ VIF_STATE_PRINT_INT(channel);
+ VIF_STATE_PRINT_HEX(bitrate_masks[0]);
+ VIF_STATE_PRINT_HEX(bitrate_masks[1]);
+ VIF_STATE_PRINT_HEX(basic_rate_set);
+ VIF_STATE_PRINT_HEX(basic_rate);
+ VIF_STATE_PRINT_HEX(rate_set);
+ VIF_STATE_PRINT_INT(beacon_int);
+ VIF_STATE_PRINT_INT(default_key);
+ VIF_STATE_PRINT_INT(aid);
+ VIF_STATE_PRINT_INT(session_counter);
+ VIF_STATE_PRINT_INT(ps_poll_failures);
+ VIF_STATE_PRINT_INT(psm_entry_retry);
+ VIF_STATE_PRINT_INT(power_level);
+ VIF_STATE_PRINT_INT(rssi_thold);
+ VIF_STATE_PRINT_INT(last_rssi_event);
+ VIF_STATE_PRINT_INT(ba_support);
+ VIF_STATE_PRINT_INT(ba_allowed);
+ VIF_STATE_PRINT_LLHEX(tx_security_seq);
+ VIF_STATE_PRINT_INT(tx_security_last_seq_lsb);
+ }
+
+#undef VIF_STATE_PRINT_INT
+#undef VIF_STATE_PRINT_LONG
+#undef VIF_STATE_PRINT_HEX
+#undef VIF_STATE_PRINT_LHEX
+#undef VIF_STATE_PRINT_LLHEX
+#undef VIF_STATE_PRINT_STR
+#undef VIF_STATE_PRINT_NSTR
+#undef VIF_STATE_PRINT
+
+ mutex_unlock(&wl->mutex);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, res);
+ kfree(buf);
+ return ret;
+}
+
+static const struct file_operations vifs_state_ops = {
+ .read = vifs_state_read,
+ .open = wl1271_open_file_generic,
+ .llseek = default_llseek,
+};
+
static ssize_t dtim_interval_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
@@ -520,6 +625,7 @@ static ssize_t rx_streaming_interval_write(struct file *file,
size_t count, loff_t *ppos)
{
struct wl1271 *wl = file->private_data;
+ struct wl12xx_vif *wlvif;
unsigned long value;
int ret;
@@ -543,7 +649,9 @@ static ssize_t rx_streaming_interval_write(struct file *file,
if (ret < 0)
goto out;
- wl1271_recalc_rx_streaming(wl);
+ wl12xx_for_each_wlvif_sta(wl, wlvif) {
+ wl1271_recalc_rx_streaming(wl, wlvif);
+ }
wl1271_ps_elp_sleep(wl);
out:
@@ -572,6 +680,7 @@ static ssize_t rx_streaming_always_write(struct file *file,
size_t count, loff_t *ppos)
{
struct wl1271 *wl = file->private_data;
+ struct wl12xx_vif *wlvif;
unsigned long value;
int ret;
@@ -595,7 +704,9 @@ static ssize_t rx_streaming_always_write(struct file *file,
if (ret < 0)
goto out;
- wl1271_recalc_rx_streaming(wl);
+ wl12xx_for_each_wlvif_sta(wl, wlvif) {
+ wl1271_recalc_rx_streaming(wl, wlvif);
+ }
wl1271_ps_elp_sleep(wl);
out:
@@ -624,6 +735,7 @@ static ssize_t beacon_filtering_write(struct file *file,
size_t count, loff_t *ppos)
{
struct wl1271 *wl = file->private_data;
+ struct wl12xx_vif *wlvif;
char buf[10];
size_t len;
unsigned long value;
@@ -646,7 +758,9 @@ static ssize_t beacon_filtering_write(struct file *file,
if (ret < 0)
goto out;
- ret = wl1271_acx_beacon_filter_opt(wl, !!value);
+ wl12xx_for_each_wlvif(wl, wlvif) {
+ ret = wl1271_acx_beacon_filter_opt(wl, wlvif, !!value);
+ }
wl1271_ps_elp_sleep(wl);
out:
@@ -770,6 +884,7 @@ static int wl1271_debugfs_add_files(struct wl1271 *wl,
DEBUGFS_ADD(gpio_power, rootdir);
DEBUGFS_ADD(start_recovery, rootdir);
DEBUGFS_ADD(driver_state, rootdir);
+ DEBUGFS_ADD(vifs_state, rootdir);
DEBUGFS_ADD(dtim_interval, rootdir);
DEBUGFS_ADD(beacon_interval, rootdir);
DEBUGFS_ADD(beacon_filtering, rootdir);
diff --git a/drivers/net/wireless/wl12xx/event.c b/drivers/net/wireless/wl12xx/event.c
index 674ad2a9e40..00ce794eeba 100644
--- a/drivers/net/wireless/wl12xx/event.c
+++ b/drivers/net/wireless/wl12xx/event.c
@@ -22,6 +22,7 @@
*/
#include "wl12xx.h"
+#include "debug.h"
#include "reg.h"
#include "io.h"
#include "event.h"
@@ -31,12 +32,16 @@
void wl1271_pspoll_work(struct work_struct *work)
{
+ struct ieee80211_vif *vif;
+ struct wl12xx_vif *wlvif;
struct delayed_work *dwork;
struct wl1271 *wl;
int ret;
dwork = container_of(work, struct delayed_work, work);
- wl = container_of(dwork, struct wl1271, pspoll_work);
+ wlvif = container_of(dwork, struct wl12xx_vif, pspoll_work);
+ vif = container_of((void *)wlvif, struct ieee80211_vif, drv_priv);
+ wl = wlvif->wl;
wl1271_debug(DEBUG_EVENT, "pspoll work");
@@ -45,10 +50,10 @@ void wl1271_pspoll_work(struct work_struct *work)
if (unlikely(wl->state == WL1271_STATE_OFF))
goto out;
- if (!test_and_clear_bit(WL1271_FLAG_PSPOLL_FAILURE, &wl->flags))
+ if (!test_and_clear_bit(WLVIF_FLAG_PSPOLL_FAILURE, &wlvif->flags))
goto out;
- if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
+ if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
goto out;
/*
@@ -60,31 +65,33 @@ void wl1271_pspoll_work(struct work_struct *work)
if (ret < 0)
goto out;
- wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE, wl->basic_rate, true);
+ wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE,
+ wlvif->basic_rate, true);
wl1271_ps_elp_sleep(wl);
out:
mutex_unlock(&wl->mutex);
};
-static void wl1271_event_pspoll_delivery_fail(struct wl1271 *wl)
+static void wl1271_event_pspoll_delivery_fail(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif)
{
int delay = wl->conf.conn.ps_poll_recovery_period;
int ret;
- wl->ps_poll_failures++;
- if (wl->ps_poll_failures == 1)
+ wlvif->ps_poll_failures++;
+ if (wlvif->ps_poll_failures == 1)
wl1271_info("AP with dysfunctional ps-poll, "
"trying to work around it.");
/* force active mode receive data from the AP */
- if (test_bit(WL1271_FLAG_PSM, &wl->flags)) {
- ret = wl1271_ps_set_mode(wl, STATION_ACTIVE_MODE,
- wl->basic_rate, true);
+ if (test_bit(WLVIF_FLAG_PSM, &wlvif->flags)) {
+ ret = wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE,
+ wlvif->basic_rate, true);
if (ret < 0)
return;
- set_bit(WL1271_FLAG_PSPOLL_FAILURE, &wl->flags);
- ieee80211_queue_delayed_work(wl->hw, &wl->pspoll_work,
+ set_bit(WLVIF_FLAG_PSPOLL_FAILURE, &wlvif->flags);
+ ieee80211_queue_delayed_work(wl->hw, &wlvif->pspoll_work,
msecs_to_jiffies(delay));
}
@@ -97,6 +104,7 @@ static void wl1271_event_pspoll_delivery_fail(struct wl1271 *wl)
}
static int wl1271_event_ps_report(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif,
struct event_mailbox *mbox,
bool *beacon_loss)
{
@@ -109,41 +117,37 @@ static int wl1271_event_ps_report(struct wl1271 *wl,
case EVENT_ENTER_POWER_SAVE_FAIL:
wl1271_debug(DEBUG_PSM, "PSM entry failed");
- if (!test_bit(WL1271_FLAG_PSM, &wl->flags)) {
+ if (!test_bit(WLVIF_FLAG_PSM, &wlvif->flags)) {
/* remain in active mode */
- wl->psm_entry_retry = 0;
+ wlvif->psm_entry_retry = 0;
break;
}
- if (wl->psm_entry_retry < total_retries) {
- wl->psm_entry_retry++;
- ret = wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE,
- wl->basic_rate, true);
+ if (wlvif->psm_entry_retry < total_retries) {
+ wlvif->psm_entry_retry++;
+ ret = wl1271_ps_set_mode(wl, wlvif,
+ STATION_POWER_SAVE_MODE,
+ wlvif->basic_rate, true);
} else {
wl1271_info("No ack to nullfunc from AP.");
- wl->psm_entry_retry = 0;
+ wlvif->psm_entry_retry = 0;
*beacon_loss = true;
}
break;
case EVENT_ENTER_POWER_SAVE_SUCCESS:
- wl->psm_entry_retry = 0;
-
- /* enable beacon filtering */
- ret = wl1271_acx_beacon_filter_opt(wl, true);
- if (ret < 0)
- break;
+ wlvif->psm_entry_retry = 0;
/*
* BET has only a minor effect in 5GHz and masks
* channel switch IEs, so we only enable BET on 2.4GHz
*/
- if (wl->band == IEEE80211_BAND_2GHZ)
+ if (wlvif->band == IEEE80211_BAND_2GHZ)
/* enable beacon early termination */
- ret = wl1271_acx_bet_enable(wl, true);
+ ret = wl1271_acx_bet_enable(wl, wlvif, true);
- if (wl->ps_compl) {
- complete(wl->ps_compl);
- wl->ps_compl = NULL;
+ if (wlvif->ps_compl) {
+ complete(wlvif->ps_compl);
+ wlvif->ps_compl = NULL;
}
break;
default:
@@ -154,39 +158,44 @@ static int wl1271_event_ps_report(struct wl1271 *wl,
}
static void wl1271_event_rssi_trigger(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif,
struct event_mailbox *mbox)
{
+ struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
enum nl80211_cqm_rssi_threshold_event event;
s8 metric = mbox->rssi_snr_trigger_metric[0];
wl1271_debug(DEBUG_EVENT, "RSSI trigger metric: %d", metric);
- if (metric <= wl->rssi_thold)
+ if (metric <= wlvif->rssi_thold)
event = NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW;
else
event = NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH;
- if (event != wl->last_rssi_event)
- ieee80211_cqm_rssi_notify(wl->vif, event, GFP_KERNEL);
- wl->last_rssi_event = event;
+ if (event != wlvif->last_rssi_event)
+ ieee80211_cqm_rssi_notify(vif, event, GFP_KERNEL);
+ wlvif->last_rssi_event = event;
}
-static void wl1271_stop_ba_event(struct wl1271 *wl)
+static void wl1271_stop_ba_event(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
- if (wl->bss_type != BSS_TYPE_AP_BSS) {
- if (!wl->ba_rx_bitmap)
+ struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
+
+ if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
+ if (!wlvif->sta.ba_rx_bitmap)
return;
- ieee80211_stop_rx_ba_session(wl->vif, wl->ba_rx_bitmap,
- wl->bssid);
+ ieee80211_stop_rx_ba_session(vif, wlvif->sta.ba_rx_bitmap,
+ vif->bss_conf.bssid);
} else {
- int i;
+ u8 hlid;
struct wl1271_link *lnk;
- for (i = WL1271_AP_STA_HLID_START; i < AP_MAX_LINKS; i++) {
- lnk = &wl->links[i];
- if (!wl1271_is_active_sta(wl, i) || !lnk->ba_bitmap)
+ for_each_set_bit(hlid, wlvif->ap.sta_hlid_map,
+ WL12XX_MAX_LINKS) {
+ lnk = &wl->links[hlid];
+ if (!lnk->ba_bitmap)
continue;
- ieee80211_stop_rx_ba_session(wl->vif,
+ ieee80211_stop_rx_ba_session(vif,
lnk->ba_bitmap,
lnk->addr);
}
@@ -196,14 +205,23 @@ static void wl1271_stop_ba_event(struct wl1271 *wl)
static void wl12xx_event_soft_gemini_sense(struct wl1271 *wl,
u8 enable)
{
+ struct ieee80211_vif *vif;
+ struct wl12xx_vif *wlvif;
+
if (enable) {
/* disable dynamic PS when requested by the firmware */
- ieee80211_disable_dyn_ps(wl->vif);
+ wl12xx_for_each_wlvif_sta(wl, wlvif) {
+ vif = wl12xx_wlvif_to_vif(wlvif);
+ ieee80211_disable_dyn_ps(vif);
+ }
set_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags);
} else {
- ieee80211_enable_dyn_ps(wl->vif);
clear_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags);
- wl1271_recalc_rx_streaming(wl);
+ wl12xx_for_each_wlvif_sta(wl, wlvif) {
+ vif = wl12xx_wlvif_to_vif(wlvif);
+ ieee80211_enable_dyn_ps(vif);
+ wl1271_recalc_rx_streaming(wl, wlvif);
+ }
}
}
@@ -217,10 +235,11 @@ static void wl1271_event_mbox_dump(struct event_mailbox *mbox)
static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
{
+ struct ieee80211_vif *vif;
+ struct wl12xx_vif *wlvif;
int ret;
u32 vector;
bool beacon_loss = false;
- bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
bool disconnect_sta = false;
unsigned long sta_bitmap = 0;
@@ -234,7 +253,7 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
wl1271_debug(DEBUG_EVENT, "status: 0x%x",
mbox->scheduled_scan_status);
- wl1271_scan_stm(wl);
+ wl1271_scan_stm(wl, wl->scan_vif);
}
if (vector & PERIODIC_SCAN_REPORT_EVENT_ID) {
@@ -253,8 +272,7 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
}
}
- if (vector & SOFT_GEMINI_SENSE_EVENT_ID &&
- wl->bss_type == BSS_TYPE_STA_BSS)
+ if (vector & SOFT_GEMINI_SENSE_EVENT_ID)
wl12xx_event_soft_gemini_sense(wl,
mbox->soft_gemini_sense_info);
@@ -267,40 +285,54 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
* BSS_LOSE_EVENT, beacon loss has to be reported to the stack.
*
*/
- if ((vector & BSS_LOSE_EVENT_ID) && !is_ap) {
+ if (vector & BSS_LOSE_EVENT_ID) {
+ /* TODO: check for multi-role */
wl1271_info("Beacon loss detected.");
/* indicate to the stack, that beacons have been lost */
beacon_loss = true;
}
- if ((vector & PS_REPORT_EVENT_ID) && !is_ap) {
+ if (vector & PS_REPORT_EVENT_ID) {
wl1271_debug(DEBUG_EVENT, "PS_REPORT_EVENT");
- ret = wl1271_event_ps_report(wl, mbox, &beacon_loss);
- if (ret < 0)
- return ret;
+ wl12xx_for_each_wlvif_sta(wl, wlvif) {
+ ret = wl1271_event_ps_report(wl, wlvif,
+ mbox, &beacon_loss);
+ if (ret < 0)
+ return ret;
+ }
}
- if ((vector & PSPOLL_DELIVERY_FAILURE_EVENT_ID) && !is_ap)
- wl1271_event_pspoll_delivery_fail(wl);
+ if (vector & PSPOLL_DELIVERY_FAILURE_EVENT_ID)
+ wl12xx_for_each_wlvif_sta(wl, wlvif) {
+ wl1271_event_pspoll_delivery_fail(wl, wlvif);
+ }
if (vector & RSSI_SNR_TRIGGER_0_EVENT_ID) {
+ /* TODO: check actual multi-role support */
wl1271_debug(DEBUG_EVENT, "RSSI_SNR_TRIGGER_0_EVENT");
- if (wl->vif)
- wl1271_event_rssi_trigger(wl, mbox);
+ wl12xx_for_each_wlvif_sta(wl, wlvif) {
+ wl1271_event_rssi_trigger(wl, wlvif, mbox);
+ }
}
- if ((vector & BA_SESSION_RX_CONSTRAINT_EVENT_ID)) {
+ if (vector & BA_SESSION_RX_CONSTRAINT_EVENT_ID) {
+ u8 role_id = mbox->role_id;
wl1271_debug(DEBUG_EVENT, "BA_SESSION_RX_CONSTRAINT_EVENT_ID. "
- "ba_allowed = 0x%x", mbox->rx_ba_allowed);
+ "ba_allowed = 0x%x, role_id=%d",
+ mbox->rx_ba_allowed, role_id);
- wl->ba_allowed = !!mbox->rx_ba_allowed;
+ wl12xx_for_each_wlvif(wl, wlvif) {
+ if (role_id != 0xff && role_id != wlvif->role_id)
+ continue;
- if (wl->vif && !wl->ba_allowed)
- wl1271_stop_ba_event(wl);
+ wlvif->ba_allowed = !!mbox->rx_ba_allowed;
+ if (!wlvif->ba_allowed)
+ wl1271_stop_ba_event(wl, wlvif);
+ }
}
- if ((vector & CHANNEL_SWITCH_COMPLETE_EVENT_ID) && !is_ap) {
+ if (vector & CHANNEL_SWITCH_COMPLETE_EVENT_ID) {
wl1271_debug(DEBUG_EVENT, "CHANNEL_SWITCH_COMPLETE_EVENT_ID. "
"status = 0x%x",
mbox->channel_switch_status);
@@ -309,50 +341,65 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
* 1) channel switch complete with status=0
* 2) channel switch failed status=1
*/
- if (test_and_clear_bit(WL1271_FLAG_CS_PROGRESS, &wl->flags) &&
- (wl->vif))
- ieee80211_chswitch_done(wl->vif,
- mbox->channel_switch_status ? false : true);
+
+ /* TODO: configure only the relevant vif */
+ wl12xx_for_each_wlvif_sta(wl, wlvif) {
+ struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
+ bool success;
+
+ if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS,
+ &wl->flags))
+ continue;
+
+ success = mbox->channel_switch_status ? false : true;
+ ieee80211_chswitch_done(vif, success);
+ }
}
if ((vector & DUMMY_PACKET_EVENT_ID)) {
wl1271_debug(DEBUG_EVENT, "DUMMY_PACKET_ID_EVENT_ID");
- if (wl->vif)
- wl1271_tx_dummy_packet(wl);
+ wl1271_tx_dummy_packet(wl);
}
/*
* "TX retries exceeded" has a different meaning according to mode.
* In AP mode the offending station is disconnected.
*/
- if ((vector & MAX_TX_RETRY_EVENT_ID) && is_ap) {
+ if (vector & MAX_TX_RETRY_EVENT_ID) {
wl1271_debug(DEBUG_EVENT, "MAX_TX_RETRY_EVENT_ID");
sta_bitmap |= le16_to_cpu(mbox->sta_tx_retry_exceeded);
disconnect_sta = true;
}
- if ((vector & INACTIVE_STA_EVENT_ID) && is_ap) {
+ if (vector & INACTIVE_STA_EVENT_ID) {
wl1271_debug(DEBUG_EVENT, "INACTIVE_STA_EVENT_ID");
sta_bitmap |= le16_to_cpu(mbox->sta_aging_status);
disconnect_sta = true;
}
- if (is_ap && disconnect_sta) {
+ if (disconnect_sta) {
u32 num_packets = wl->conf.tx.max_tx_retries;
struct ieee80211_sta *sta;
const u8 *addr;
int h;
- for (h = find_first_bit(&sta_bitmap, AP_MAX_LINKS);
- h < AP_MAX_LINKS;
- h = find_next_bit(&sta_bitmap, AP_MAX_LINKS, h+1)) {
- if (!wl1271_is_active_sta(wl, h))
+ for_each_set_bit(h, &sta_bitmap, WL12XX_MAX_LINKS) {
+ bool found = false;
+ /* find the ap vif connected to this sta */
+ wl12xx_for_each_wlvif_ap(wl, wlvif) {
+ if (!test_bit(h, wlvif->ap.sta_hlid_map))
+ continue;
+ found = true;
+ break;
+ }
+ if (!found)
continue;
+ vif = wl12xx_wlvif_to_vif(wlvif);
addr = wl->links[h].addr;
rcu_read_lock();
- sta = ieee80211_find_sta(wl->vif, addr);
+ sta = ieee80211_find_sta(vif, addr);
if (sta) {
wl1271_debug(DEBUG_EVENT, "remove sta %d", h);
ieee80211_report_low_ack(sta, num_packets);
@@ -361,8 +408,11 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
}
}
- if (wl->vif && beacon_loss)
- ieee80211_connection_loss(wl->vif);
+ if (beacon_loss)
+ wl12xx_for_each_wlvif_sta(wl, wlvif) {
+ vif = wl12xx_wlvif_to_vif(wlvif);
+ ieee80211_connection_loss(vif);
+ }
return 0;
}
diff --git a/drivers/net/wireless/wl12xx/event.h b/drivers/net/wireless/wl12xx/event.h
index 49c1a0ede5b..1d878ba47bf 100644
--- a/drivers/net/wireless/wl12xx/event.h
+++ b/drivers/net/wireless/wl12xx/event.h
@@ -132,7 +132,4 @@ void wl1271_event_mbox_config(struct wl1271 *wl);
int wl1271_event_handle(struct wl1271 *wl, u8 mbox);
void wl1271_pspoll_work(struct work_struct *work);
-/* Functions from main.c */
-bool wl1271_is_active_sta(struct wl1271 *wl, u8 hlid);
-
#endif
diff --git a/drivers/net/wireless/wl12xx/init.c b/drivers/net/wireless/wl12xx/init.c
index 04db64c94e9..ca7ee59e450 100644
--- a/drivers/net/wireless/wl12xx/init.c
+++ b/drivers/net/wireless/wl12xx/init.c
@@ -25,6 +25,7 @@
#include <linux/module.h>
#include <linux/slab.h>
+#include "debug.h"
#include "init.h"
#include "wl12xx_80211.h"
#include "acx.h"
@@ -33,7 +34,7 @@
#include "tx.h"
#include "io.h"
-int wl1271_sta_init_templates_config(struct wl1271 *wl)
+int wl1271_init_templates_config(struct wl1271 *wl)
{
int ret, i;
@@ -64,7 +65,7 @@ int wl1271_sta_init_templates_config(struct wl1271 *wl)
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_QOS_NULL_DATA, NULL,
sizeof
- (struct wl12xx_qos_null_data_template),
+ (struct ieee80211_qos_hdr),
0, WL1271_RATE_AUTOMATIC);
if (ret < 0)
return ret;
@@ -88,10 +89,33 @@ int wl1271_sta_init_templates_config(struct wl1271 *wl)
if (ret < 0)
return ret;
+ /*
+ * Put very large empty placeholders for all templates. These
+ * reserve memory for later.
+ */
+ ret = wl1271_cmd_template_set(wl, CMD_TEMPL_AP_PROBE_RESPONSE, NULL,
+ WL1271_CMD_TEMPL_MAX_SIZE,
+ 0, WL1271_RATE_AUTOMATIC);
+ if (ret < 0)
+ return ret;
+
+ ret = wl1271_cmd_template_set(wl, CMD_TEMPL_AP_BEACON, NULL,
+ WL1271_CMD_TEMPL_MAX_SIZE,
+ 0, WL1271_RATE_AUTOMATIC);
+ if (ret < 0)
+ return ret;
+
+ ret = wl1271_cmd_template_set(wl, CMD_TEMPL_DEAUTH_AP, NULL,
+ sizeof
+ (struct wl12xx_disconn_template),
+ 0, WL1271_RATE_AUTOMATIC);
+ if (ret < 0)
+ return ret;
+
for (i = 0; i < CMD_TEMPL_KLV_IDX_MAX; i++) {
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_KLV, NULL,
- WL1271_CMD_TEMPL_DFLT_SIZE, i,
- WL1271_RATE_AUTOMATIC);
+ sizeof(struct ieee80211_qos_hdr),
+ i, WL1271_RATE_AUTOMATIC);
if (ret < 0)
return ret;
}
@@ -99,7 +123,8 @@ int wl1271_sta_init_templates_config(struct wl1271 *wl)
return 0;
}
-static int wl1271_ap_init_deauth_template(struct wl1271 *wl)
+static int wl1271_ap_init_deauth_template(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif)
{
struct wl12xx_disconn_template *tmpl;
int ret;
@@ -114,7 +139,7 @@ static int wl1271_ap_init_deauth_template(struct wl1271 *wl)
tmpl->header.frame_ctl = cpu_to_le16(IEEE80211_FTYPE_MGMT |
IEEE80211_STYPE_DEAUTH);
- rate = wl1271_tx_min_rate_get(wl, wl->basic_rate_set);
+ rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_DEAUTH_AP,
tmpl, sizeof(*tmpl), 0, rate);
@@ -123,8 +148,10 @@ out:
return ret;
}
-static int wl1271_ap_init_null_template(struct wl1271 *wl)
+static int wl1271_ap_init_null_template(struct wl1271 *wl,
+ struct ieee80211_vif *vif)
{
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
struct ieee80211_hdr_3addr *nullfunc;
int ret;
u32 rate;
@@ -141,10 +168,10 @@ static int wl1271_ap_init_null_template(struct wl1271 *wl)
/* nullfunc->addr1 is filled by FW */
- memcpy(nullfunc->addr2, wl->mac_addr, ETH_ALEN);
- memcpy(nullfunc->addr3, wl->mac_addr, ETH_ALEN);
+ memcpy(nullfunc->addr2, vif->addr, ETH_ALEN);
+ memcpy(nullfunc->addr3, vif->addr, ETH_ALEN);
- rate = wl1271_tx_min_rate_get(wl, wl->basic_rate_set);
+ rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, nullfunc,
sizeof(*nullfunc), 0, rate);
@@ -153,8 +180,10 @@ out:
return ret;
}
-static int wl1271_ap_init_qos_null_template(struct wl1271 *wl)
+static int wl1271_ap_init_qos_null_template(struct wl1271 *wl,
+ struct ieee80211_vif *vif)
{
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
struct ieee80211_qos_hdr *qosnull;
int ret;
u32 rate;
@@ -171,10 +200,10 @@ static int wl1271_ap_init_qos_null_template(struct wl1271 *wl)
/* qosnull->addr1 is filled by FW */
- memcpy(qosnull->addr2, wl->mac_addr, ETH_ALEN);
- memcpy(qosnull->addr3, wl->mac_addr, ETH_ALEN);
+ memcpy(qosnull->addr2, vif->addr, ETH_ALEN);
+ memcpy(qosnull->addr3, vif->addr, ETH_ALEN);
- rate = wl1271_tx_min_rate_get(wl, wl->basic_rate_set);
+ rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_QOS_NULL_DATA, qosnull,
sizeof(*qosnull), 0, rate);
@@ -183,49 +212,6 @@ out:
return ret;
}
-static int wl1271_ap_init_templates_config(struct wl1271 *wl)
-{
- int ret;
-
- /*
- * Put very large empty placeholders for all templates. These
- * reserve memory for later.
- */
- ret = wl1271_cmd_template_set(wl, CMD_TEMPL_AP_PROBE_RESPONSE, NULL,
- WL1271_CMD_TEMPL_MAX_SIZE,
- 0, WL1271_RATE_AUTOMATIC);
- if (ret < 0)
- return ret;
-
- ret = wl1271_cmd_template_set(wl, CMD_TEMPL_AP_BEACON, NULL,
- WL1271_CMD_TEMPL_MAX_SIZE,
- 0, WL1271_RATE_AUTOMATIC);
- if (ret < 0)
- return ret;
-
- ret = wl1271_cmd_template_set(wl, CMD_TEMPL_DEAUTH_AP, NULL,
- sizeof
- (struct wl12xx_disconn_template),
- 0, WL1271_RATE_AUTOMATIC);
- if (ret < 0)
- return ret;
-
- ret = wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, NULL,
- sizeof(struct wl12xx_null_data_template),
- 0, WL1271_RATE_AUTOMATIC);
- if (ret < 0)
- return ret;
-
- ret = wl1271_cmd_template_set(wl, CMD_TEMPL_QOS_NULL_DATA, NULL,
- sizeof
- (struct wl12xx_qos_null_data_template),
- 0, WL1271_RATE_AUTOMATIC);
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
static int wl12xx_init_rx_config(struct wl1271 *wl)
{
int ret;
@@ -237,39 +223,37 @@ static int wl12xx_init_rx_config(struct wl1271 *wl)
return 0;
}
-int wl1271_init_phy_config(struct wl1271 *wl)
+static int wl12xx_init_phy_vif_config(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif)
{
int ret;
- ret = wl1271_acx_pd_threshold(wl);
- if (ret < 0)
- return ret;
-
- ret = wl1271_acx_slot(wl, DEFAULT_SLOT_TIME);
+ ret = wl1271_acx_slot(wl, wlvif, DEFAULT_SLOT_TIME);
if (ret < 0)
return ret;
- ret = wl1271_acx_service_period_timeout(wl);
+ ret = wl1271_acx_service_period_timeout(wl, wlvif);
if (ret < 0)
return ret;
- ret = wl1271_acx_rts_threshold(wl, wl->hw->wiphy->rts_threshold);
+ ret = wl1271_acx_rts_threshold(wl, wlvif, wl->hw->wiphy->rts_threshold);
if (ret < 0)
return ret;
return 0;
}
-static int wl1271_init_beacon_filter(struct wl1271 *wl)
+static int wl1271_init_sta_beacon_filter(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif)
{
int ret;
- /* disable beacon filtering at this stage */
- ret = wl1271_acx_beacon_filter_opt(wl, false);
+ ret = wl1271_acx_beacon_filter_table(wl, wlvif);
if (ret < 0)
return ret;
- ret = wl1271_acx_beacon_filter_table(wl);
+ /* enable beacon filtering */
+ ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
if (ret < 0)
return ret;
@@ -302,11 +286,12 @@ int wl1271_init_energy_detection(struct wl1271 *wl)
return 0;
}
-static int wl1271_init_beacon_broadcast(struct wl1271 *wl)
+static int wl1271_init_beacon_broadcast(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif)
{
int ret;
- ret = wl1271_acx_bcn_dtim_options(wl);
+ ret = wl1271_acx_bcn_dtim_options(wl, wlvif);
if (ret < 0)
return ret;
@@ -327,36 +312,13 @@ static int wl12xx_init_fwlog(struct wl1271 *wl)
return 0;
}
-static int wl1271_sta_hw_init(struct wl1271 *wl)
+/* generic sta initialization (non vif-specific) */
+static int wl1271_sta_hw_init(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
int ret;
- if (wl->chip.id != CHIP_ID_1283_PG20) {
- ret = wl1271_cmd_ext_radio_parms(wl);
- if (ret < 0)
- return ret;
- }
-
/* PS config */
- ret = wl1271_acx_config_ps(wl);
- if (ret < 0)
- return ret;
-
- ret = wl1271_sta_init_templates_config(wl);
- if (ret < 0)
- return ret;
-
- ret = wl1271_acx_group_address_tbl(wl, true, NULL, 0);
- if (ret < 0)
- return ret;
-
- /* Initialize connection monitoring thresholds */
- ret = wl1271_acx_conn_monit_params(wl, false);
- if (ret < 0)
- return ret;
-
- /* Beacon filtering */
- ret = wl1271_init_beacon_filter(wl);
+ ret = wl12xx_acx_config_ps(wl, wlvif);
if (ret < 0)
return ret;
@@ -365,103 +327,61 @@ static int wl1271_sta_hw_init(struct wl1271 *wl)
if (ret < 0)
return ret;
- /* Beacons and broadcast settings */
- ret = wl1271_init_beacon_broadcast(wl);
- if (ret < 0)
- return ret;
-
- /* Configure for ELP power saving */
- ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
- if (ret < 0)
- return ret;
-
- /* Configure rssi/snr averaging weights */
- ret = wl1271_acx_rssi_snr_avg_weights(wl);
- if (ret < 0)
- return ret;
-
- ret = wl1271_acx_sta_rate_policies(wl);
- if (ret < 0)
- return ret;
-
- ret = wl12xx_acx_mem_cfg(wl);
- if (ret < 0)
- return ret;
-
- /* Configure the FW logger */
- ret = wl12xx_init_fwlog(wl);
+ ret = wl1271_acx_sta_rate_policies(wl, wlvif);
if (ret < 0)
return ret;
return 0;
}
-static int wl1271_sta_hw_init_post_mem(struct wl1271 *wl)
+static int wl1271_sta_hw_init_post_mem(struct wl1271 *wl,
+ struct ieee80211_vif *vif)
{
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
int ret, i;
/* disable all keep-alive templates */
for (i = 0; i < CMD_TEMPL_KLV_IDX_MAX; i++) {
- ret = wl1271_acx_keep_alive_config(wl, i,
+ ret = wl1271_acx_keep_alive_config(wl, wlvif, i,
ACX_KEEP_ALIVE_TPL_INVALID);
if (ret < 0)
return ret;
}
/* disable the keep-alive feature */
- ret = wl1271_acx_keep_alive_mode(wl, false);
+ ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
if (ret < 0)
return ret;
return 0;
}
-static int wl1271_ap_hw_init(struct wl1271 *wl)
+/* generic ap initialization (non vif-specific) */
+static int wl1271_ap_hw_init(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
int ret;
- ret = wl1271_ap_init_templates_config(wl);
- if (ret < 0)
- return ret;
-
- /* Configure for power always on */
- ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM);
- if (ret < 0)
- return ret;
-
- ret = wl1271_init_ap_rates(wl);
- if (ret < 0)
- return ret;
-
- ret = wl1271_acx_ap_max_tx_retry(wl);
- if (ret < 0)
- return ret;
-
- ret = wl12xx_acx_mem_cfg(wl);
- if (ret < 0)
- return ret;
-
- /* initialize Tx power */
- ret = wl1271_acx_tx_power(wl, wl->power_level);
+ ret = wl1271_init_ap_rates(wl, wlvif);
if (ret < 0)
return ret;
return 0;
}
-int wl1271_ap_init_templates(struct wl1271 *wl)
+int wl1271_ap_init_templates(struct wl1271 *wl, struct ieee80211_vif *vif)
{
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
int ret;
- ret = wl1271_ap_init_deauth_template(wl);
+ ret = wl1271_ap_init_deauth_template(wl, wlvif);
if (ret < 0)
return ret;
- ret = wl1271_ap_init_null_template(wl);
+ ret = wl1271_ap_init_null_template(wl, vif);
if (ret < 0)
return ret;
- ret = wl1271_ap_init_qos_null_template(wl);
+ ret = wl1271_ap_init_qos_null_template(wl, vif);
if (ret < 0)
return ret;
@@ -469,43 +389,45 @@ int wl1271_ap_init_templates(struct wl1271 *wl)
* when operating as AP we want to receive external beacons for
* configuring ERP protection.
*/
- ret = wl1271_acx_beacon_filter_opt(wl, false);
+ ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
if (ret < 0)
return ret;
return 0;
}
-static int wl1271_ap_hw_init_post_mem(struct wl1271 *wl)
+static int wl1271_ap_hw_init_post_mem(struct wl1271 *wl,
+ struct ieee80211_vif *vif)
{
- return wl1271_ap_init_templates(wl);
+ return wl1271_ap_init_templates(wl, vif);
}
-int wl1271_init_ap_rates(struct wl1271 *wl)
+int wl1271_init_ap_rates(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
int i, ret;
struct conf_tx_rate_class rc;
u32 supported_rates;
- wl1271_debug(DEBUG_AP, "AP basic rate set: 0x%x", wl->basic_rate_set);
+ wl1271_debug(DEBUG_AP, "AP basic rate set: 0x%x",
+ wlvif->basic_rate_set);
- if (wl->basic_rate_set == 0)
+ if (wlvif->basic_rate_set == 0)
return -EINVAL;
- rc.enabled_rates = wl->basic_rate_set;
+ rc.enabled_rates = wlvif->basic_rate_set;
rc.long_retry_limit = 10;
rc.short_retry_limit = 10;
rc.aflags = 0;
- ret = wl1271_acx_ap_rate_policy(wl, &rc, ACX_TX_AP_MODE_MGMT_RATE);
+ ret = wl1271_acx_ap_rate_policy(wl, &rc, wlvif->ap.mgmt_rate_idx);
if (ret < 0)
return ret;
/* use the min basic rate for AP broadcast/multicast */
- rc.enabled_rates = wl1271_tx_min_rate_get(wl, wl->basic_rate_set);
+ rc.enabled_rates = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
rc.short_retry_limit = 10;
rc.long_retry_limit = 10;
rc.aflags = 0;
- ret = wl1271_acx_ap_rate_policy(wl, &rc, ACX_TX_AP_MODE_BCST_RATE);
+ ret = wl1271_acx_ap_rate_policy(wl, &rc, wlvif->ap.bcast_rate_idx);
if (ret < 0)
return ret;
@@ -513,7 +435,7 @@ int wl1271_init_ap_rates(struct wl1271 *wl)
* If the basic rates contain OFDM rates, use OFDM only
* rates for unicast TX as well. Else use all supported rates.
*/
- if ((wl->basic_rate_set & CONF_TX_OFDM_RATES))
+ if ((wlvif->basic_rate_set & CONF_TX_OFDM_RATES))
supported_rates = CONF_TX_OFDM_RATES;
else
supported_rates = CONF_TX_AP_ENABLED_RATES;
@@ -527,7 +449,8 @@ int wl1271_init_ap_rates(struct wl1271 *wl)
rc.short_retry_limit = 10;
rc.long_retry_limit = 10;
rc.aflags = 0;
- ret = wl1271_acx_ap_rate_policy(wl, &rc, i);
+ ret = wl1271_acx_ap_rate_policy(wl, &rc,
+ wlvif->ap.ucast_rate_idx[i]);
if (ret < 0)
return ret;
}
@@ -535,24 +458,23 @@ int wl1271_init_ap_rates(struct wl1271 *wl)
return 0;
}
-static int wl1271_set_ba_policies(struct wl1271 *wl)
+static int wl1271_set_ba_policies(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
/* Reset the BA RX indicators */
- wl->ba_rx_bitmap = 0;
- wl->ba_allowed = true;
+ wlvif->ba_allowed = true;
wl->ba_rx_session_count = 0;
/* BA is supported in STA/AP modes */
- if (wl->bss_type != BSS_TYPE_AP_BSS &&
- wl->bss_type != BSS_TYPE_STA_BSS) {
- wl->ba_support = false;
+ if (wlvif->bss_type != BSS_TYPE_AP_BSS &&
+ wlvif->bss_type != BSS_TYPE_STA_BSS) {
+ wlvif->ba_support = false;
return 0;
}
- wl->ba_support = true;
+ wlvif->ba_support = true;
/* 802.11n initiator BA session setting */
- return wl12xx_acx_set_ba_initiator_policy(wl);
+ return wl12xx_acx_set_ba_initiator_policy(wl, wlvif);
}
int wl1271_chip_specific_init(struct wl1271 *wl)
@@ -562,7 +484,7 @@ int wl1271_chip_specific_init(struct wl1271 *wl)
if (wl->chip.id == CHIP_ID_1283_PG20) {
u32 host_cfg_bitmap = HOST_IF_CFG_RX_FIFO_ENABLE;
- if (wl->quirks & WL12XX_QUIRK_BLOCKSIZE_ALIGNMENT)
+ if (!(wl->quirks & WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT))
/* Enable SDIO padding */
host_cfg_bitmap |= HOST_IF_CFG_TX_PAD_TO_SDIO_BLK;
@@ -575,39 +497,186 @@ out:
return ret;
}
+/* vif-specifc initialization */
+static int wl12xx_init_sta_role(struct wl1271 *wl, struct wl12xx_vif *wlvif)
+{
+ int ret;
-int wl1271_hw_init(struct wl1271 *wl)
+ ret = wl1271_acx_group_address_tbl(wl, wlvif, true, NULL, 0);
+ if (ret < 0)
+ return ret;
+
+ /* Initialize connection monitoring thresholds */
+ ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
+ if (ret < 0)
+ return ret;
+
+ /* Beacon filtering */
+ ret = wl1271_init_sta_beacon_filter(wl, wlvif);
+ if (ret < 0)
+ return ret;
+
+ /* Beacons and broadcast settings */
+ ret = wl1271_init_beacon_broadcast(wl, wlvif);
+ if (ret < 0)
+ return ret;
+
+ /* Configure rssi/snr averaging weights */
+ ret = wl1271_acx_rssi_snr_avg_weights(wl, wlvif);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+/* vif-specific intialization */
+static int wl12xx_init_ap_role(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
+ int ret;
+
+ ret = wl1271_acx_ap_max_tx_retry(wl, wlvif);
+ if (ret < 0)
+ return ret;
+
+ /* initialize Tx power */
+ ret = wl1271_acx_tx_power(wl, wlvif, wlvif->power_level);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+int wl1271_init_vif_specific(struct wl1271 *wl, struct ieee80211_vif *vif)
+{
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
struct conf_tx_ac_category *conf_ac;
struct conf_tx_tid *conf_tid;
+ bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
int ret, i;
- bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
- if (wl->chip.id == CHIP_ID_1283_PG20)
- ret = wl128x_cmd_general_parms(wl);
- else
- ret = wl1271_cmd_general_parms(wl);
+ /*
+ * consider all existing roles before configuring psm.
+ * TODO: reconfigure on interface removal.
+ */
+ if (!wl->ap_count) {
+ if (is_ap) {
+ /* Configure for power always on */
+ ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM);
+ if (ret < 0)
+ return ret;
+ } else if (!wl->sta_count) {
+ /* Configure for ELP power saving */
+ ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ /* Mode specific init */
+ if (is_ap) {
+ ret = wl1271_ap_hw_init(wl, wlvif);
+ if (ret < 0)
+ return ret;
+
+ ret = wl12xx_init_ap_role(wl, wlvif);
+ if (ret < 0)
+ return ret;
+ } else {
+ ret = wl1271_sta_hw_init(wl, wlvif);
+ if (ret < 0)
+ return ret;
+
+ ret = wl12xx_init_sta_role(wl, wlvif);
+ if (ret < 0)
+ return ret;
+ }
+
+ wl12xx_init_phy_vif_config(wl, wlvif);
+
+ /* Default TID/AC configuration */
+ BUG_ON(wl->conf.tx.tid_conf_count != wl->conf.tx.ac_conf_count);
+ for (i = 0; i < wl->conf.tx.tid_conf_count; i++) {
+ conf_ac = &wl->conf.tx.ac_conf[i];
+ ret = wl1271_acx_ac_cfg(wl, wlvif, conf_ac->ac,
+ conf_ac->cw_min, conf_ac->cw_max,
+ conf_ac->aifsn, conf_ac->tx_op_limit);
+ if (ret < 0)
+ return ret;
+
+ conf_tid = &wl->conf.tx.tid_conf[i];
+ ret = wl1271_acx_tid_cfg(wl, wlvif,
+ conf_tid->queue_id,
+ conf_tid->channel_type,
+ conf_tid->tsid,
+ conf_tid->ps_scheme,
+ conf_tid->ack_policy,
+ conf_tid->apsd_conf[0],
+ conf_tid->apsd_conf[1]);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Configure HW encryption */
+ ret = wl1271_acx_feature_cfg(wl, wlvif);
if (ret < 0)
return ret;
- if (wl->chip.id == CHIP_ID_1283_PG20)
- ret = wl128x_cmd_radio_parms(wl);
+ /* Mode specific init - post mem init */
+ if (is_ap)
+ ret = wl1271_ap_hw_init_post_mem(wl, vif);
else
- ret = wl1271_cmd_radio_parms(wl);
+ ret = wl1271_sta_hw_init_post_mem(wl, vif);
+
+ if (ret < 0)
+ return ret;
+
+ /* Configure initiator BA sessions policies */
+ ret = wl1271_set_ba_policies(wl, wlvif);
if (ret < 0)
return ret;
+ return 0;
+}
+
+int wl1271_hw_init(struct wl1271 *wl)
+{
+ int ret;
+
+ if (wl->chip.id == CHIP_ID_1283_PG20) {
+ ret = wl128x_cmd_general_parms(wl);
+ if (ret < 0)
+ return ret;
+ ret = wl128x_cmd_radio_parms(wl);
+ if (ret < 0)
+ return ret;
+ } else {
+ ret = wl1271_cmd_general_parms(wl);
+ if (ret < 0)
+ return ret;
+ ret = wl1271_cmd_radio_parms(wl);
+ if (ret < 0)
+ return ret;
+ ret = wl1271_cmd_ext_radio_parms(wl);
+ if (ret < 0)
+ return ret;
+ }
+
/* Chip-specific init */
ret = wl1271_chip_specific_init(wl);
if (ret < 0)
return ret;
- /* Mode specific init */
- if (is_ap)
- ret = wl1271_ap_hw_init(wl);
- else
- ret = wl1271_sta_hw_init(wl);
+ /* Init templates */
+ ret = wl1271_init_templates_config(wl);
+ if (ret < 0)
+ return ret;
+
+ ret = wl12xx_acx_mem_cfg(wl);
+ if (ret < 0)
+ return ret;
+ /* Configure the FW logger */
+ ret = wl12xx_init_fwlog(wl);
if (ret < 0)
return ret;
@@ -626,11 +695,6 @@ int wl1271_hw_init(struct wl1271 *wl)
if (ret < 0)
goto out_free_memmap;
- /* PHY layer config */
- ret = wl1271_init_phy_config(wl);
- if (ret < 0)
- goto out_free_memmap;
-
ret = wl1271_acx_dco_itrim_params(wl);
if (ret < 0)
goto out_free_memmap;
@@ -655,61 +719,20 @@ int wl1271_hw_init(struct wl1271 *wl)
if (ret < 0)
goto out_free_memmap;
- /* Default TID/AC configuration */
- BUG_ON(wl->conf.tx.tid_conf_count != wl->conf.tx.ac_conf_count);
- for (i = 0; i < wl->conf.tx.tid_conf_count; i++) {
- conf_ac = &wl->conf.tx.ac_conf[i];
- ret = wl1271_acx_ac_cfg(wl, conf_ac->ac, conf_ac->cw_min,
- conf_ac->cw_max, conf_ac->aifsn,
- conf_ac->tx_op_limit);
- if (ret < 0)
- goto out_free_memmap;
-
- conf_tid = &wl->conf.tx.tid_conf[i];
- ret = wl1271_acx_tid_cfg(wl, conf_tid->queue_id,
- conf_tid->channel_type,
- conf_tid->tsid,
- conf_tid->ps_scheme,
- conf_tid->ack_policy,
- conf_tid->apsd_conf[0],
- conf_tid->apsd_conf[1]);
- if (ret < 0)
- goto out_free_memmap;
- }
-
/* Enable data path */
ret = wl1271_cmd_data_path(wl, 1);
if (ret < 0)
goto out_free_memmap;
- /* Configure HW encryption */
- ret = wl1271_acx_feature_cfg(wl);
- if (ret < 0)
- goto out_free_memmap;
-
/* configure PM */
ret = wl1271_acx_pm_config(wl);
if (ret < 0)
goto out_free_memmap;
- /* Mode specific init - post mem init */
- if (is_ap)
- ret = wl1271_ap_hw_init_post_mem(wl);
- else
- ret = wl1271_sta_hw_init_post_mem(wl);
-
- if (ret < 0)
- goto out_free_memmap;
-
ret = wl12xx_acx_set_rate_mgmt_params(wl);
if (ret < 0)
goto out_free_memmap;
- /* Configure initiator BA sessions policies */
- ret = wl1271_set_ba_policies(wl);
- if (ret < 0)
- goto out_free_memmap;
-
/* configure hangover */
ret = wl12xx_acx_config_hangover(wl);
if (ret < 0)
diff --git a/drivers/net/wireless/wl12xx/init.h b/drivers/net/wireless/wl12xx/init.h
index 3a3c230fd29..2da0f404ef6 100644
--- a/drivers/net/wireless/wl12xx/init.h
+++ b/drivers/net/wireless/wl12xx/init.h
@@ -27,13 +27,13 @@
#include "wl12xx.h"
int wl1271_hw_init_power_auth(struct wl1271 *wl);
-int wl1271_sta_init_templates_config(struct wl1271 *wl);
-int wl1271_init_phy_config(struct wl1271 *wl);
+int wl1271_init_templates_config(struct wl1271 *wl);
int wl1271_init_pta(struct wl1271 *wl);
int wl1271_init_energy_detection(struct wl1271 *wl);
int wl1271_chip_specific_init(struct wl1271 *wl);
int wl1271_hw_init(struct wl1271 *wl);
-int wl1271_init_ap_rates(struct wl1271 *wl);
-int wl1271_ap_init_templates(struct wl1271 *wl);
+int wl1271_init_vif_specific(struct wl1271 *wl, struct ieee80211_vif *vif);
+int wl1271_init_ap_rates(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+int wl1271_ap_init_templates(struct wl1271 *wl, struct ieee80211_vif *vif);
#endif
diff --git a/drivers/net/wireless/wl12xx/io.c b/drivers/net/wireless/wl12xx/io.c
index c2da66f4504..079ad380e8f 100644
--- a/drivers/net/wireless/wl12xx/io.c
+++ b/drivers/net/wireless/wl12xx/io.c
@@ -24,8 +24,10 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
+#include <linux/interrupt.h>
#include "wl12xx.h"
+#include "debug.h"
#include "wl12xx_80211.h"
#include "io.h"
#include "tx.h"
@@ -46,7 +48,7 @@
bool wl1271_set_block_size(struct wl1271 *wl)
{
if (wl->if_ops->set_block_size) {
- wl->if_ops->set_block_size(wl, WL12XX_BUS_BLOCK_SIZE);
+ wl->if_ops->set_block_size(wl->dev, WL12XX_BUS_BLOCK_SIZE);
return true;
}
@@ -55,12 +57,12 @@ bool wl1271_set_block_size(struct wl1271 *wl)
void wl1271_disable_interrupts(struct wl1271 *wl)
{
- wl->if_ops->disable_irq(wl);
+ disable_irq(wl->irq);
}
void wl1271_enable_interrupts(struct wl1271 *wl)
{
- wl->if_ops->enable_irq(wl);
+ enable_irq(wl->irq);
}
/* Set the SPI partitions to access the chip addresses
@@ -128,13 +130,13 @@ EXPORT_SYMBOL_GPL(wl1271_set_partition);
void wl1271_io_reset(struct wl1271 *wl)
{
if (wl->if_ops->reset)
- wl->if_ops->reset(wl);
+ wl->if_ops->reset(wl->dev);
}
void wl1271_io_init(struct wl1271 *wl)
{
if (wl->if_ops->init)
- wl->if_ops->init(wl);
+ wl->if_ops->init(wl->dev);
}
void wl1271_top_reg_write(struct wl1271 *wl, int addr, u16 val)
diff --git a/drivers/net/wireless/wl12xx/io.h b/drivers/net/wireless/wl12xx/io.h
index e839341dfaf..d398cbcea98 100644
--- a/drivers/net/wireless/wl12xx/io.h
+++ b/drivers/net/wireless/wl12xx/io.h
@@ -51,23 +51,17 @@ void wl1271_enable_interrupts(struct wl1271 *wl);
void wl1271_io_reset(struct wl1271 *wl);
void wl1271_io_init(struct wl1271 *wl);
-static inline struct device *wl1271_wl_to_dev(struct wl1271 *wl)
-{
- return wl->if_ops->dev(wl);
-}
-
-
/* Raw target IO, address is not translated */
static inline void wl1271_raw_write(struct wl1271 *wl, int addr, void *buf,
size_t len, bool fixed)
{
- wl->if_ops->write(wl, addr, buf, len, fixed);
+ wl->if_ops->write(wl->dev, addr, buf, len, fixed);
}
static inline void wl1271_raw_read(struct wl1271 *wl, int addr, void *buf,
size_t len, bool fixed)
{
- wl->if_ops->read(wl, addr, buf, len, fixed);
+ wl->if_ops->read(wl->dev, addr, buf, len, fixed);
}
static inline u32 wl1271_raw_read32(struct wl1271 *wl, int addr)
@@ -155,13 +149,13 @@ static inline void wl1271_write32(struct wl1271 *wl, int addr, u32 val)
static inline void wl1271_power_off(struct wl1271 *wl)
{
- wl->if_ops->power(wl, false);
+ wl->if_ops->power(wl->dev, false);
clear_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
}
static inline int wl1271_power_on(struct wl1271 *wl)
{
- int ret = wl->if_ops->power(wl, true);
+ int ret = wl->if_ops->power(wl->dev, true);
if (ret == 0)
set_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
@@ -176,15 +170,10 @@ u16 wl1271_top_reg_read(struct wl1271 *wl, int addr);
int wl1271_set_partition(struct wl1271 *wl,
struct wl1271_partition_set *p);
+bool wl1271_set_block_size(struct wl1271 *wl);
+
/* Functions from wl1271_main.c */
-int wl1271_register_hw(struct wl1271 *wl);
-void wl1271_unregister_hw(struct wl1271 *wl);
-int wl1271_init_ieee80211(struct wl1271 *wl);
-struct ieee80211_hw *wl1271_alloc_hw(void);
-int wl1271_free_hw(struct wl1271 *wl);
-irqreturn_t wl1271_irq(int irq, void *data);
-bool wl1271_set_block_size(struct wl1271 *wl);
int wl1271_tx_dummy_packet(struct wl1271 *wl);
#endif
diff --git a/drivers/net/wireless/wl12xx/main.c b/drivers/net/wireless/wl12xx/main.c
index 884f82b6321..c3058419e22 100644
--- a/drivers/net/wireless/wl12xx/main.c
+++ b/drivers/net/wireless/wl12xx/main.c
@@ -32,8 +32,10 @@
#include <linux/slab.h>
#include <linux/wl12xx.h>
#include <linux/sched.h>
+#include <linux/interrupt.h>
#include "wl12xx.h"
+#include "debug.h"
#include "wl12xx_80211.h"
#include "reg.h"
#include "io.h"
@@ -377,42 +379,30 @@ static char *fwlog_param;
static bool bug_on_recovery;
static void __wl1271_op_remove_interface(struct wl1271 *wl,
+ struct ieee80211_vif *vif,
bool reset_tx_queues);
-static void wl1271_free_ap_keys(struct wl1271 *wl);
-
-
-static void wl1271_device_release(struct device *dev)
-{
-
-}
-
-static struct platform_device wl1271_device = {
- .name = "wl1271",
- .id = -1,
-
- /* device model insists to have a release function */
- .dev = {
- .release = wl1271_device_release,
- },
-};
+static void wl1271_op_stop(struct ieee80211_hw *hw);
+static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
static DEFINE_MUTEX(wl_list_mutex);
static LIST_HEAD(wl_list);
-static int wl1271_check_operstate(struct wl1271 *wl, unsigned char operstate)
+static int wl1271_check_operstate(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ unsigned char operstate)
{
int ret;
+
if (operstate != IF_OPER_UP)
return 0;
- if (test_and_set_bit(WL1271_FLAG_STA_STATE_SENT, &wl->flags))
+ if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
return 0;
- ret = wl12xx_cmd_set_peer_state(wl, wl->sta_hlid);
+ ret = wl12xx_cmd_set_peer_state(wl, wlvif->sta.hlid);
if (ret < 0)
return ret;
- wl12xx_croc(wl, wl->role_id);
+ wl12xx_croc(wl, wlvif->role_id);
wl1271_info("Association completed.");
return 0;
@@ -426,6 +416,7 @@ static int wl1271_dev_notify(struct notifier_block *me, unsigned long what,
struct ieee80211_hw *hw;
struct wl1271 *wl;
struct wl1271 *wl_temp;
+ struct wl12xx_vif *wlvif;
int ret = 0;
/* Check that this notification is for us. */
@@ -459,17 +450,18 @@ static int wl1271_dev_notify(struct notifier_block *me, unsigned long what,
if (wl->state == WL1271_STATE_OFF)
goto out;
- if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
- goto out;
-
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
- goto out;
+ wl12xx_for_each_wlvif_sta(wl, wlvif) {
+ if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
+ continue;
- wl1271_check_operstate(wl, dev->operstate);
+ ret = wl1271_ps_elp_wakeup(wl);
+ if (ret < 0)
+ goto out;
- wl1271_ps_elp_sleep(wl);
+ wl1271_check_operstate(wl, wlvif, dev->operstate);
+ wl1271_ps_elp_sleep(wl);
+ }
out:
mutex_unlock(&wl->mutex);
@@ -498,19 +490,20 @@ static int wl1271_reg_notify(struct wiphy *wiphy,
return 0;
}
-static int wl1271_set_rx_streaming(struct wl1271 *wl, bool enable)
+static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ bool enable)
{
int ret = 0;
/* we should hold wl->mutex */
- ret = wl1271_acx_ps_rx_streaming(wl, enable);
+ ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable);
if (ret < 0)
goto out;
if (enable)
- set_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags);
+ set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
else
- clear_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags);
+ clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
out:
return ret;
}
@@ -519,25 +512,25 @@ out:
* this function is being called when the rx_streaming interval
* has beed changed or rx_streaming should be disabled
*/
-int wl1271_recalc_rx_streaming(struct wl1271 *wl)
+int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
int ret = 0;
int period = wl->conf.rx_streaming.interval;
/* don't reconfigure if rx_streaming is disabled */
- if (!test_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags))
+ if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
goto out;
/* reconfigure/disable according to new streaming_period */
if (period &&
- test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags) &&
+ test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
(wl->conf.rx_streaming.always ||
test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
- ret = wl1271_set_rx_streaming(wl, true);
+ ret = wl1271_set_rx_streaming(wl, wlvif, true);
else {
- ret = wl1271_set_rx_streaming(wl, false);
+ ret = wl1271_set_rx_streaming(wl, wlvif, false);
/* don't cancel_work_sync since we might deadlock */
- del_timer_sync(&wl->rx_streaming_timer);
+ del_timer_sync(&wlvif->rx_streaming_timer);
}
out:
return ret;
@@ -546,13 +539,14 @@ out:
static void wl1271_rx_streaming_enable_work(struct work_struct *work)
{
int ret;
- struct wl1271 *wl =
- container_of(work, struct wl1271, rx_streaming_enable_work);
+ struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
+ rx_streaming_enable_work);
+ struct wl1271 *wl = wlvif->wl;
mutex_lock(&wl->mutex);
- if (test_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags) ||
- !test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags) ||
+ if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) ||
+ !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
(!wl->conf.rx_streaming.always &&
!test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
goto out;
@@ -564,12 +558,12 @@ static void wl1271_rx_streaming_enable_work(struct work_struct *work)
if (ret < 0)
goto out;
- ret = wl1271_set_rx_streaming(wl, true);
+ ret = wl1271_set_rx_streaming(wl, wlvif, true);
if (ret < 0)
goto out_sleep;
/* stop it after some time of inactivity */
- mod_timer(&wl->rx_streaming_timer,
+ mod_timer(&wlvif->rx_streaming_timer,
jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
out_sleep:
@@ -581,19 +575,20 @@ out:
static void wl1271_rx_streaming_disable_work(struct work_struct *work)
{
int ret;
- struct wl1271 *wl =
- container_of(work, struct wl1271, rx_streaming_disable_work);
+ struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
+ rx_streaming_disable_work);
+ struct wl1271 *wl = wlvif->wl;
mutex_lock(&wl->mutex);
- if (!test_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags))
+ if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
goto out;
ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
- ret = wl1271_set_rx_streaming(wl, false);
+ ret = wl1271_set_rx_streaming(wl, wlvif, false);
if (ret)
goto out_sleep;
@@ -605,8 +600,9 @@ out:
static void wl1271_rx_streaming_timer(unsigned long data)
{
- struct wl1271 *wl = (struct wl1271 *)data;
- ieee80211_queue_work(wl->hw, &wl->rx_streaming_disable_work);
+ struct wl12xx_vif *wlvif = (struct wl12xx_vif *)data;
+ struct wl1271 *wl = wlvif->wl;
+ ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
}
static void wl1271_conf_init(struct wl1271 *wl)
@@ -645,9 +641,7 @@ static void wl1271_conf_init(struct wl1271 *wl)
static int wl1271_plt_init(struct wl1271 *wl)
{
- struct conf_tx_ac_category *conf_ac;
- struct conf_tx_tid *conf_tid;
- int ret, i;
+ int ret;
if (wl->chip.id == CHIP_ID_1283_PG20)
ret = wl128x_cmd_general_parms(wl);
@@ -676,74 +670,14 @@ static int wl1271_plt_init(struct wl1271 *wl)
if (ret < 0)
return ret;
- ret = wl1271_sta_init_templates_config(wl);
- if (ret < 0)
- return ret;
-
ret = wl1271_acx_init_mem_config(wl);
if (ret < 0)
return ret;
- /* PHY layer config */
- ret = wl1271_init_phy_config(wl);
- if (ret < 0)
- goto out_free_memmap;
-
- ret = wl1271_acx_dco_itrim_params(wl);
- if (ret < 0)
- goto out_free_memmap;
-
- /* Initialize connection monitoring thresholds */
- ret = wl1271_acx_conn_monit_params(wl, false);
- if (ret < 0)
- goto out_free_memmap;
-
- /* Bluetooth WLAN coexistence */
- ret = wl1271_init_pta(wl);
- if (ret < 0)
- goto out_free_memmap;
-
- /* FM WLAN coexistence */
- ret = wl1271_acx_fm_coex(wl);
- if (ret < 0)
- goto out_free_memmap;
-
- /* Energy detection */
- ret = wl1271_init_energy_detection(wl);
- if (ret < 0)
- goto out_free_memmap;
-
ret = wl12xx_acx_mem_cfg(wl);
if (ret < 0)
goto out_free_memmap;
- /* Default fragmentation threshold */
- ret = wl1271_acx_frag_threshold(wl, wl->conf.tx.frag_threshold);
- if (ret < 0)
- goto out_free_memmap;
-
- /* Default TID/AC configuration */
- BUG_ON(wl->conf.tx.tid_conf_count != wl->conf.tx.ac_conf_count);
- for (i = 0; i < wl->conf.tx.tid_conf_count; i++) {
- conf_ac = &wl->conf.tx.ac_conf[i];
- ret = wl1271_acx_ac_cfg(wl, conf_ac->ac, conf_ac->cw_min,
- conf_ac->cw_max, conf_ac->aifsn,
- conf_ac->tx_op_limit);
- if (ret < 0)
- goto out_free_memmap;
-
- conf_tid = &wl->conf.tx.tid_conf[i];
- ret = wl1271_acx_tid_cfg(wl, conf_tid->queue_id,
- conf_tid->channel_type,
- conf_tid->tsid,
- conf_tid->ps_scheme,
- conf_tid->ack_policy,
- conf_tid->apsd_conf[0],
- conf_tid->apsd_conf[1]);
- if (ret < 0)
- goto out_free_memmap;
- }
-
/* Enable data path */
ret = wl1271_cmd_data_path(wl, 1);
if (ret < 0)
@@ -768,14 +702,12 @@ static int wl1271_plt_init(struct wl1271 *wl)
return ret;
}
-static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl, u8 hlid, u8 tx_pkts)
+static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif,
+ u8 hlid, u8 tx_pkts)
{
bool fw_ps, single_sta;
- /* only regulate station links */
- if (hlid < WL1271_AP_STA_HLID_START)
- return;
-
fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
single_sta = (wl->active_sta_count == 1);
@@ -784,7 +716,7 @@ static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl, u8 hlid, u8 tx_pkts)
* packets in FW or if the STA is awake.
*/
if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
- wl1271_ps_link_end(wl, hlid);
+ wl12xx_ps_link_end(wl, wlvif, hlid);
/*
* Start high-level PS if the STA is asleep with enough blocks in FW.
@@ -792,24 +724,14 @@ static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl, u8 hlid, u8 tx_pkts)
* case FW-memory congestion is not a problem.
*/
else if (!single_sta && fw_ps && tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
- wl1271_ps_link_start(wl, hlid, true);
-}
-
-bool wl1271_is_active_sta(struct wl1271 *wl, u8 hlid)
-{
- int id;
-
- /* global/broadcast "stations" are always active */
- if (hlid < WL1271_AP_STA_HLID_START)
- return true;
-
- id = hlid - WL1271_AP_STA_HLID_START;
- return test_bit(id, wl->ap_hlid_map);
+ wl12xx_ps_link_start(wl, wlvif, hlid, true);
}
static void wl12xx_irq_update_links_status(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif,
struct wl12xx_fw_status *status)
{
+ struct wl1271_link *lnk;
u32 cur_fw_ps_map;
u8 hlid, cnt;
@@ -825,25 +747,22 @@ static void wl12xx_irq_update_links_status(struct wl1271 *wl,
wl->ap_fw_ps_map = cur_fw_ps_map;
}
- for (hlid = WL1271_AP_STA_HLID_START; hlid < AP_MAX_LINKS; hlid++) {
- if (!wl1271_is_active_sta(wl, hlid))
- continue;
-
- cnt = status->tx_lnk_free_pkts[hlid] -
- wl->links[hlid].prev_freed_pkts;
+ for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, WL12XX_MAX_LINKS) {
+ lnk = &wl->links[hlid];
+ cnt = status->tx_lnk_free_pkts[hlid] - lnk->prev_freed_pkts;
- wl->links[hlid].prev_freed_pkts =
- status->tx_lnk_free_pkts[hlid];
- wl->links[hlid].allocated_pkts -= cnt;
+ lnk->prev_freed_pkts = status->tx_lnk_free_pkts[hlid];
+ lnk->allocated_pkts -= cnt;
- wl12xx_irq_ps_regulate_link(wl, hlid,
- wl->links[hlid].allocated_pkts);
+ wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
+ lnk->allocated_pkts);
}
}
static void wl12xx_fw_status(struct wl1271 *wl,
struct wl12xx_fw_status *status)
{
+ struct wl12xx_vif *wlvif;
struct timespec ts;
u32 old_tx_blk_count = wl->tx_blocks_available;
int avail, freed_blocks;
@@ -898,8 +817,9 @@ static void wl12xx_fw_status(struct wl1271 *wl,
clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
/* for AP update num of allocated TX blocks per link and ps status */
- if (wl->bss_type == BSS_TYPE_AP_BSS)
- wl12xx_irq_update_links_status(wl, status);
+ wl12xx_for_each_wlvif_ap(wl, wlvif) {
+ wl12xx_irq_update_links_status(wl, wlvif, status);
+ }
/* update the host-chipset time offset */
getnstimeofday(&ts);
@@ -932,7 +852,7 @@ static void wl1271_netstack_work(struct work_struct *work)
#define WL1271_IRQ_MAX_LOOPS 256
-irqreturn_t wl1271_irq(int irq, void *cookie)
+static irqreturn_t wl1271_irq(int irq, void *cookie)
{
int ret;
u32 intr;
@@ -1054,7 +974,6 @@ out:
return IRQ_HANDLED;
}
-EXPORT_SYMBOL_GPL(wl1271_irq);
static int wl1271_fetch_firmware(struct wl1271 *wl)
{
@@ -1069,10 +988,10 @@ static int wl1271_fetch_firmware(struct wl1271 *wl)
wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
- ret = request_firmware(&fw, fw_name, wl1271_wl_to_dev(wl));
+ ret = request_firmware(&fw, fw_name, wl->dev);
if (ret < 0) {
- wl1271_error("could not get firmware: %d", ret);
+ wl1271_error("could not get firmware %s: %d", fw_name, ret);
return ret;
}
@@ -1107,10 +1026,11 @@ static int wl1271_fetch_nvs(struct wl1271 *wl)
const struct firmware *fw;
int ret;
- ret = request_firmware(&fw, WL12XX_NVS_NAME, wl1271_wl_to_dev(wl));
+ ret = request_firmware(&fw, WL12XX_NVS_NAME, wl->dev);
if (ret < 0) {
- wl1271_error("could not get nvs file: %d", ret);
+ wl1271_error("could not get nvs file %s: %d", WL12XX_NVS_NAME,
+ ret);
return ret;
}
@@ -1217,11 +1137,13 @@ static void wl1271_recovery_work(struct work_struct *work)
{
struct wl1271 *wl =
container_of(work, struct wl1271, recovery_work);
+ struct wl12xx_vif *wlvif;
+ struct ieee80211_vif *vif;
mutex_lock(&wl->mutex);
if (wl->state != WL1271_STATE_ON)
- goto out;
+ goto out_unlock;
/* Avoid a recursive recovery */
set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
@@ -1238,9 +1160,12 @@ static void wl1271_recovery_work(struct work_struct *work)
* in the firmware during recovery. This doens't hurt if the network is
* not encrypted.
*/
- if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags) ||
- test_bit(WL1271_FLAG_AP_STARTED, &wl->flags))
- wl->tx_security_seq += WL1271_TX_SQN_POST_RECOVERY_PADDING;
+ wl12xx_for_each_wlvif(wl, wlvif) {
+ if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
+ test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
+ wlvif->tx_security_seq +=
+ WL1271_TX_SQN_POST_RECOVERY_PADDING;
+ }
/* Prevent spurious TX during FW restart */
ieee80211_stop_queues(wl->hw);
@@ -1251,7 +1176,14 @@ static void wl1271_recovery_work(struct work_struct *work)
}
/* reboot the chipset */
- __wl1271_op_remove_interface(wl, false);
+ while (!list_empty(&wl->wlvif_list)) {
+ wlvif = list_first_entry(&wl->wlvif_list,
+ struct wl12xx_vif, list);
+ vif = wl12xx_wlvif_to_vif(wlvif);
+ __wl1271_op_remove_interface(wl, vif, false);
+ }
+ mutex_unlock(&wl->mutex);
+ wl1271_op_stop(wl->hw);
clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
@@ -1262,8 +1194,8 @@ static void wl1271_recovery_work(struct work_struct *work)
* to restart the HW.
*/
ieee80211_wake_queues(wl->hw);
-
-out:
+ return;
+out_unlock:
mutex_unlock(&wl->mutex);
}
@@ -1318,7 +1250,16 @@ static int wl1271_chip_wakeup(struct wl1271 *wl)
/* 0. read chip id from CHIP_ID */
wl->chip.id = wl1271_read32(wl, CHIP_ID_B);
- /* 1. check if chip id is valid */
+ /*
+ * For wl127x based devices we could use the default block
+ * size (512 bytes), but due to a bug in the sdio driver, we
+ * need to set it explicitly after the chip is powered on. To
+ * simplify the code and since the performance impact is
+ * negligible, we use the same block size for all different
+ * chip types.
+ */
+ if (!wl1271_set_block_size(wl))
+ wl->quirks |= WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT;
switch (wl->chip.id) {
case CHIP_ID_1271_PG10:
@@ -1328,7 +1269,9 @@ static int wl1271_chip_wakeup(struct wl1271 *wl)
ret = wl1271_setup(wl);
if (ret < 0)
goto out;
+ wl->quirks |= WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT;
break;
+
case CHIP_ID_1271_PG20:
wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1271 PG20)",
wl->chip.id);
@@ -1336,7 +1279,9 @@ static int wl1271_chip_wakeup(struct wl1271 *wl)
ret = wl1271_setup(wl);
if (ret < 0)
goto out;
+ wl->quirks |= WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT;
break;
+
case CHIP_ID_1283_PG20:
wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1283 PG20)",
wl->chip.id);
@@ -1344,9 +1289,6 @@ static int wl1271_chip_wakeup(struct wl1271 *wl)
ret = wl1271_setup(wl);
if (ret < 0)
goto out;
-
- if (wl1271_set_block_size(wl))
- wl->quirks |= WL12XX_QUIRK_BLOCKSIZE_ALIGNMENT;
break;
case CHIP_ID_1283_PG10:
default:
@@ -1389,8 +1331,6 @@ int wl1271_plt_start(struct wl1271 *wl)
goto out;
}
- wl->bss_type = BSS_TYPE_STA_BSS;
-
while (retries) {
retries--;
ret = wl1271_chip_wakeup(wl);
@@ -1482,33 +1422,34 @@ int wl1271_plt_stop(struct wl1271 *wl)
static void wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
{
struct wl1271 *wl = hw->priv;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_vif *vif = info->control.vif;
+ struct wl12xx_vif *wlvif = NULL;
unsigned long flags;
int q, mapping;
- u8 hlid = 0;
+ u8 hlid;
+
+ if (vif)
+ wlvif = wl12xx_vif_to_data(vif);
mapping = skb_get_queue_mapping(skb);
q = wl1271_tx_get_queue(mapping);
- if (wl->bss_type == BSS_TYPE_AP_BSS)
- hlid = wl12xx_tx_get_hlid_ap(wl, skb);
+ hlid = wl12xx_tx_get_hlid(wl, wlvif, skb);
spin_lock_irqsave(&wl->wl_lock, flags);
/* queue the packet */
- if (wl->bss_type == BSS_TYPE_AP_BSS) {
- if (!wl1271_is_active_sta(wl, hlid)) {
- wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d",
- hlid, q);
- dev_kfree_skb(skb);
- goto out;
- }
-
- wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d", hlid, q);
- skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
- } else {
- skb_queue_tail(&wl->tx_queue[q], skb);
+ if (hlid == WL12XX_INVALID_LINK_ID ||
+ (wlvif && !test_bit(hlid, wlvif->links_map))) {
+ wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
+ ieee80211_free_txskb(hw, skb);
+ goto out;
}
+ wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d", hlid, q);
+ skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
+
wl->tx_queue_count[q]++;
/*
@@ -1609,13 +1550,14 @@ static struct notifier_block wl1271_dev_notifier = {
};
#ifdef CONFIG_PM
-static int wl1271_configure_suspend_sta(struct wl1271 *wl)
+static int wl1271_configure_suspend_sta(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif)
{
int ret = 0;
mutex_lock(&wl->mutex);
- if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
+ if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
goto out_unlock;
ret = wl1271_ps_elp_wakeup(wl);
@@ -1623,12 +1565,12 @@ static int wl1271_configure_suspend_sta(struct wl1271 *wl)
goto out_unlock;
/* enter psm if needed*/
- if (!test_bit(WL1271_FLAG_PSM, &wl->flags)) {
+ if (!test_bit(WLVIF_FLAG_PSM, &wlvif->flags)) {
DECLARE_COMPLETION_ONSTACK(compl);
- wl->ps_compl = &compl;
- ret = wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE,
- wl->basic_rate, true);
+ wlvif->ps_compl = &compl;
+ ret = wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE,
+ wlvif->basic_rate, true);
if (ret < 0)
goto out_sleep;
@@ -1638,42 +1580,43 @@ static int wl1271_configure_suspend_sta(struct wl1271 *wl)
ret = wait_for_completion_timeout(
&compl, msecs_to_jiffies(WL1271_PS_COMPLETE_TIMEOUT));
+
+ mutex_lock(&wl->mutex);
if (ret <= 0) {
wl1271_warning("couldn't enter ps mode!");
ret = -EBUSY;
- goto out;
+ goto out_cleanup;
}
- /* take mutex again, and wakeup */
- mutex_lock(&wl->mutex);
-
ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
- goto out_unlock;
+ goto out_cleanup;
}
out_sleep:
wl1271_ps_elp_sleep(wl);
+out_cleanup:
+ wlvif->ps_compl = NULL;
out_unlock:
mutex_unlock(&wl->mutex);
-out:
return ret;
}
-static int wl1271_configure_suspend_ap(struct wl1271 *wl)
+static int wl1271_configure_suspend_ap(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif)
{
int ret = 0;
mutex_lock(&wl->mutex);
- if (!test_bit(WL1271_FLAG_AP_STARTED, &wl->flags))
+ if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
goto out_unlock;
ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out_unlock;
- ret = wl1271_acx_beacon_filter_opt(wl, true);
+ ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
wl1271_ps_elp_sleep(wl);
out_unlock:
@@ -1682,20 +1625,22 @@ out_unlock:
}
-static int wl1271_configure_suspend(struct wl1271 *wl)
+static int wl1271_configure_suspend(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif)
{
- if (wl->bss_type == BSS_TYPE_STA_BSS)
- return wl1271_configure_suspend_sta(wl);
- if (wl->bss_type == BSS_TYPE_AP_BSS)
- return wl1271_configure_suspend_ap(wl);
+ if (wlvif->bss_type == BSS_TYPE_STA_BSS)
+ return wl1271_configure_suspend_sta(wl, wlvif);
+ if (wlvif->bss_type == BSS_TYPE_AP_BSS)
+ return wl1271_configure_suspend_ap(wl, wlvif);
return 0;
}
-static void wl1271_configure_resume(struct wl1271 *wl)
+static void wl1271_configure_resume(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif)
{
int ret;
- bool is_sta = wl->bss_type == BSS_TYPE_STA_BSS;
- bool is_ap = wl->bss_type == BSS_TYPE_AP_BSS;
+ bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
+ bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
if (!is_sta && !is_ap)
return;
@@ -1707,11 +1652,11 @@ static void wl1271_configure_resume(struct wl1271 *wl)
if (is_sta) {
/* exit psm if it wasn't configured */
- if (!test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags))
- wl1271_ps_set_mode(wl, STATION_ACTIVE_MODE,
- wl->basic_rate, true);
+ if (!test_bit(WLVIF_FLAG_PSM_REQUESTED, &wlvif->flags))
+ wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE,
+ wlvif->basic_rate, true);
} else if (is_ap) {
- wl1271_acx_beacon_filter_opt(wl, false);
+ wl1271_acx_beacon_filter_opt(wl, wlvif, false);
}
wl1271_ps_elp_sleep(wl);
@@ -1723,16 +1668,19 @@ static int wl1271_op_suspend(struct ieee80211_hw *hw,
struct cfg80211_wowlan *wow)
{
struct wl1271 *wl = hw->priv;
+ struct wl12xx_vif *wlvif;
int ret;
wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
WARN_ON(!wow || !wow->any);
wl->wow_enabled = true;
- ret = wl1271_configure_suspend(wl);
- if (ret < 0) {
- wl1271_warning("couldn't prepare device to suspend");
- return ret;
+ wl12xx_for_each_wlvif(wl, wlvif) {
+ ret = wl1271_configure_suspend(wl, wlvif);
+ if (ret < 0) {
+ wl1271_warning("couldn't prepare device to suspend");
+ return ret;
+ }
}
/* flush any remaining work */
wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
@@ -1751,7 +1699,9 @@ static int wl1271_op_suspend(struct ieee80211_hw *hw,
wl1271_enable_interrupts(wl);
flush_work(&wl->tx_work);
- flush_delayed_work(&wl->pspoll_work);
+ wl12xx_for_each_wlvif(wl, wlvif) {
+ flush_delayed_work(&wlvif->pspoll_work);
+ }
flush_delayed_work(&wl->elp_work);
return 0;
@@ -1760,6 +1710,7 @@ static int wl1271_op_suspend(struct ieee80211_hw *hw,
static int wl1271_op_resume(struct ieee80211_hw *hw)
{
struct wl1271 *wl = hw->priv;
+ struct wl12xx_vif *wlvif;
unsigned long flags;
bool run_irq_work = false;
@@ -1783,7 +1734,9 @@ static int wl1271_op_resume(struct ieee80211_hw *hw)
wl1271_irq(0, wl);
wl1271_enable_interrupts(wl);
}
- wl1271_configure_resume(wl);
+ wl12xx_for_each_wlvif(wl, wlvif) {
+ wl1271_configure_resume(wl, wlvif);
+ }
wl->wow_enabled = false;
return 0;
@@ -1810,20 +1763,119 @@ static int wl1271_op_start(struct ieee80211_hw *hw)
static void wl1271_op_stop(struct ieee80211_hw *hw)
{
+ struct wl1271 *wl = hw->priv;
+ int i;
+
wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
+
+ mutex_lock(&wl->mutex);
+ if (wl->state == WL1271_STATE_OFF) {
+ mutex_unlock(&wl->mutex);
+ return;
+ }
+ /*
+ * this must be before the cancel_work calls below, so that the work
+ * functions don't perform further work.
+ */
+ wl->state = WL1271_STATE_OFF;
+ mutex_unlock(&wl->mutex);
+
+ mutex_lock(&wl_list_mutex);
+ list_del(&wl->list);
+ mutex_unlock(&wl_list_mutex);
+
+ wl1271_disable_interrupts(wl);
+ wl1271_flush_deferred_work(wl);
+ cancel_delayed_work_sync(&wl->scan_complete_work);
+ cancel_work_sync(&wl->netstack_work);
+ cancel_work_sync(&wl->tx_work);
+ cancel_delayed_work_sync(&wl->elp_work);
+
+ /* let's notify MAC80211 about the remaining pending TX frames */
+ wl12xx_tx_reset(wl, true);
+ mutex_lock(&wl->mutex);
+
+ wl1271_power_off(wl);
+
+ wl->band = IEEE80211_BAND_2GHZ;
+
+ wl->rx_counter = 0;
+ wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
+ wl->tx_blocks_available = 0;
+ wl->tx_allocated_blocks = 0;
+ wl->tx_results_count = 0;
+ wl->tx_packets_count = 0;
+ wl->time_offset = 0;
+ wl->tx_spare_blocks = TX_HW_BLOCK_SPARE_DEFAULT;
+ wl->ap_fw_ps_map = 0;
+ wl->ap_ps_map = 0;
+ wl->sched_scanning = false;
+ memset(wl->roles_map, 0, sizeof(wl->roles_map));
+ memset(wl->links_map, 0, sizeof(wl->links_map));
+ memset(wl->roc_map, 0, sizeof(wl->roc_map));
+ wl->active_sta_count = 0;
+
+ /* The system link is always allocated */
+ __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
+
+ /*
+ * this is performed after the cancel_work calls and the associated
+ * mutex_lock, so that wl1271_op_add_interface does not accidentally
+ * get executed before all these vars have been reset.
+ */
+ wl->flags = 0;
+
+ wl->tx_blocks_freed = 0;
+
+ for (i = 0; i < NUM_TX_QUEUES; i++) {
+ wl->tx_pkts_freed[i] = 0;
+ wl->tx_allocated_pkts[i] = 0;
+ }
+
+ wl1271_debugfs_reset(wl);
+
+ kfree(wl->fw_status);
+ wl->fw_status = NULL;
+ kfree(wl->tx_res_if);
+ wl->tx_res_if = NULL;
+ kfree(wl->target_mem_map);
+ wl->target_mem_map = NULL;
+
+ mutex_unlock(&wl->mutex);
+}
+
+static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
+{
+ u8 policy = find_first_zero_bit(wl->rate_policies_map,
+ WL12XX_MAX_RATE_POLICIES);
+ if (policy >= WL12XX_MAX_RATE_POLICIES)
+ return -EBUSY;
+
+ __set_bit(policy, wl->rate_policies_map);
+ *idx = policy;
+ return 0;
+}
+
+static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
+{
+ if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES))
+ return;
+
+ __clear_bit(*idx, wl->rate_policies_map);
+ *idx = WL12XX_MAX_RATE_POLICIES;
}
-static u8 wl12xx_get_role_type(struct wl1271 *wl)
+static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
- switch (wl->bss_type) {
+ switch (wlvif->bss_type) {
case BSS_TYPE_AP_BSS:
- if (wl->p2p)
+ if (wlvif->p2p)
return WL1271_ROLE_P2P_GO;
else
return WL1271_ROLE_AP;
case BSS_TYPE_STA_BSS:
- if (wl->p2p)
+ if (wlvif->p2p)
return WL1271_ROLE_P2P_CL;
else
return WL1271_ROLE_STA;
@@ -1832,78 +1884,95 @@ static u8 wl12xx_get_role_type(struct wl1271 *wl)
return WL1271_ROLE_IBSS;
default:
- wl1271_error("invalid bss_type: %d", wl->bss_type);
+ wl1271_error("invalid bss_type: %d", wlvif->bss_type);
}
return WL12XX_INVALID_ROLE_TYPE;
}
-static int wl1271_op_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
{
- struct wl1271 *wl = hw->priv;
- struct wiphy *wiphy = hw->wiphy;
- int retries = WL1271_BOOT_RETRIES;
- int ret = 0;
- u8 role_type;
- bool booted = false;
-
- wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
- ieee80211_vif_type_p2p(vif), vif->addr);
-
- mutex_lock(&wl->mutex);
- if (wl->vif) {
- wl1271_debug(DEBUG_MAC80211,
- "multiple vifs are not supported yet");
- ret = -EBUSY;
- goto out;
- }
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+ int i;
- /*
- * in some very corner case HW recovery scenarios its possible to
- * get here before __wl1271_op_remove_interface is complete, so
- * opt out if that is the case.
- */
- if (test_bit(WL1271_FLAG_IF_INITIALIZED, &wl->flags)) {
- ret = -EBUSY;
- goto out;
- }
+ /* clear everything but the persistent data */
+ memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent));
switch (ieee80211_vif_type_p2p(vif)) {
case NL80211_IFTYPE_P2P_CLIENT:
- wl->p2p = 1;
+ wlvif->p2p = 1;
/* fall-through */
case NL80211_IFTYPE_STATION:
- wl->bss_type = BSS_TYPE_STA_BSS;
- wl->set_bss_type = BSS_TYPE_STA_BSS;
+ wlvif->bss_type = BSS_TYPE_STA_BSS;
break;
case NL80211_IFTYPE_ADHOC:
- wl->bss_type = BSS_TYPE_IBSS;
- wl->set_bss_type = BSS_TYPE_STA_BSS;
+ wlvif->bss_type = BSS_TYPE_IBSS;
break;
case NL80211_IFTYPE_P2P_GO:
- wl->p2p = 1;
+ wlvif->p2p = 1;
/* fall-through */
case NL80211_IFTYPE_AP:
- wl->bss_type = BSS_TYPE_AP_BSS;
+ wlvif->bss_type = BSS_TYPE_AP_BSS;
break;
default:
- ret = -EOPNOTSUPP;
- goto out;
+ wlvif->bss_type = MAX_BSS_TYPE;
+ return -EOPNOTSUPP;
}
- role_type = wl12xx_get_role_type(wl);
- if (role_type == WL12XX_INVALID_ROLE_TYPE) {
- ret = -EINVAL;
- goto out;
+ wlvif->role_id = WL12XX_INVALID_ROLE_ID;
+ wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
+ wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
+
+ if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
+ wlvif->bss_type == BSS_TYPE_IBSS) {
+ /* init sta/ibss data */
+ wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
+ wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
+ wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
+ wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
+ } else {
+ /* init ap data */
+ wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
+ wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
+ wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
+ wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
+ for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
+ wl12xx_allocate_rate_policy(wl,
+ &wlvif->ap.ucast_rate_idx[i]);
}
- memcpy(wl->mac_addr, vif->addr, ETH_ALEN);
- if (wl->state != WL1271_STATE_OFF) {
- wl1271_error("cannot start because not in off state: %d",
- wl->state);
- ret = -EBUSY;
- goto out;
- }
+ wlvif->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
+ wlvif->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
+ wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
+ wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
+ wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
+ wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
+
+ /*
+ * mac80211 configures some values globally, while we treat them
+ * per-interface. thus, on init, we have to copy them from wl
+ */
+ wlvif->band = wl->band;
+ wlvif->channel = wl->channel;
+ wlvif->power_level = wl->power_level;
+
+ INIT_WORK(&wlvif->rx_streaming_enable_work,
+ wl1271_rx_streaming_enable_work);
+ INIT_WORK(&wlvif->rx_streaming_disable_work,
+ wl1271_rx_streaming_disable_work);
+ INIT_DELAYED_WORK(&wlvif->pspoll_work, wl1271_pspoll_work);
+ INIT_LIST_HEAD(&wlvif->list);
+
+ setup_timer(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer,
+ (unsigned long) wlvif);
+ return 0;
+}
+
+static bool wl12xx_init_fw(struct wl1271 *wl)
+{
+ int retries = WL1271_BOOT_RETRIES;
+ bool booted = false;
+ struct wiphy *wiphy = wl->hw->wiphy;
+ int ret;
while (retries) {
retries--;
@@ -1915,25 +1984,6 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
if (ret < 0)
goto power_off;
- if (wl->bss_type == BSS_TYPE_STA_BSS ||
- wl->bss_type == BSS_TYPE_IBSS) {
- /*
- * The device role is a special role used for
- * rx and tx frames prior to association (as
- * the STA role can get packets only from
- * its associated bssid)
- */
- ret = wl12xx_cmd_role_enable(wl,
- WL1271_ROLE_DEVICE,
- &wl->dev_role_id);
- if (ret < 0)
- goto irq_disable;
- }
-
- ret = wl12xx_cmd_role_enable(wl, role_type, &wl->role_id);
- if (ret < 0)
- goto irq_disable;
-
ret = wl1271_hw_init(wl);
if (ret < 0)
goto irq_disable;
@@ -1964,9 +2014,6 @@ power_off:
goto out;
}
- wl->vif = vif;
- wl->state = WL1271_STATE_ON;
- set_bit(WL1271_FLAG_IF_INITIALIZED, &wl->flags);
wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
/* update hw/fw version info in wiphy struct */
@@ -1984,7 +2031,110 @@ power_off:
wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
wl->enable_11a ? "" : "not ");
+ wl->state = WL1271_STATE_ON;
out:
+ return booted;
+}
+
+static int wl1271_op_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct wl1271 *wl = hw->priv;
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+ int ret = 0;
+ u8 role_type;
+ bool booted = false;
+
+ wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
+ ieee80211_vif_type_p2p(vif), vif->addr);
+
+ mutex_lock(&wl->mutex);
+ ret = wl1271_ps_elp_wakeup(wl);
+ if (ret < 0)
+ goto out_unlock;
+
+ if (wl->vif) {
+ wl1271_debug(DEBUG_MAC80211,
+ "multiple vifs are not supported yet");
+ ret = -EBUSY;
+ goto out;
+ }
+
+ /*
+ * in some very corner case HW recovery scenarios its possible to
+ * get here before __wl1271_op_remove_interface is complete, so
+ * opt out if that is the case.
+ */
+ if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
+ test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ ret = wl12xx_init_vif_data(wl, vif);
+ if (ret < 0)
+ goto out;
+
+ wlvif->wl = wl;
+ role_type = wl12xx_get_role_type(wl, wlvif);
+ if (role_type == WL12XX_INVALID_ROLE_TYPE) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * TODO: after the nvs issue will be solved, move this block
+ * to start(), and make sure here the driver is ON.
+ */
+ if (wl->state == WL1271_STATE_OFF) {
+ /*
+ * we still need this in order to configure the fw
+ * while uploading the nvs
+ */
+ memcpy(wl->mac_addr, vif->addr, ETH_ALEN);
+
+ booted = wl12xx_init_fw(wl);
+ if (!booted) {
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+ if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
+ wlvif->bss_type == BSS_TYPE_IBSS) {
+ /*
+ * The device role is a special role used for
+ * rx and tx frames prior to association (as
+ * the STA role can get packets only from
+ * its associated bssid)
+ */
+ ret = wl12xx_cmd_role_enable(wl, vif->addr,
+ WL1271_ROLE_DEVICE,
+ &wlvif->dev_role_id);
+ if (ret < 0)
+ goto out;
+ }
+
+ ret = wl12xx_cmd_role_enable(wl, vif->addr,
+ role_type, &wlvif->role_id);
+ if (ret < 0)
+ goto out;
+
+ ret = wl1271_init_vif_specific(wl, vif);
+ if (ret < 0)
+ goto out;
+
+ wl->vif = vif;
+ list_add(&wlvif->list, &wl->wlvif_list);
+ set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
+
+ if (wlvif->bss_type == BSS_TYPE_AP_BSS)
+ wl->ap_count++;
+ else
+ wl->sta_count++;
+out:
+ wl1271_ps_elp_sleep(wl);
+out_unlock:
mutex_unlock(&wl->mutex);
mutex_lock(&wl_list_mutex);
@@ -1996,29 +2146,34 @@ out:
}
static void __wl1271_op_remove_interface(struct wl1271 *wl,
+ struct ieee80211_vif *vif,
bool reset_tx_queues)
{
- int ret, i;
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+ int i, ret;
wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
+ if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
+ return;
+
+ wl->vif = NULL;
+
/* because of hardware recovery, we may get here twice */
if (wl->state != WL1271_STATE_ON)
return;
wl1271_info("down");
- mutex_lock(&wl_list_mutex);
- list_del(&wl->list);
- mutex_unlock(&wl_list_mutex);
-
/* enable dyn ps just in case (if left on due to fw crash etc) */
- if (wl->bss_type == BSS_TYPE_STA_BSS)
- ieee80211_enable_dyn_ps(wl->vif);
+ if (wlvif->bss_type == BSS_TYPE_STA_BSS)
+ ieee80211_enable_dyn_ps(vif);
- if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
+ if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
+ wl->scan_vif == vif) {
wl->scan.state = WL1271_SCAN_STATE_IDLE;
memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
+ wl->scan_vif = NULL;
wl->scan.req = NULL;
ieee80211_scan_completed(wl->hw, true);
}
@@ -2029,13 +2184,13 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl,
if (ret < 0)
goto deinit;
- if (wl->bss_type == BSS_TYPE_STA_BSS) {
- ret = wl12xx_cmd_role_disable(wl, &wl->dev_role_id);
+ if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
+ ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
if (ret < 0)
goto deinit;
}
- ret = wl12xx_cmd_role_disable(wl, &wl->role_id);
+ ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
if (ret < 0)
goto deinit;
@@ -2043,120 +2198,82 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl,
}
deinit:
/* clear all hlids (except system_hlid) */
- wl->sta_hlid = WL12XX_INVALID_LINK_ID;
- wl->dev_hlid = WL12XX_INVALID_LINK_ID;
- wl->ap_bcast_hlid = WL12XX_INVALID_LINK_ID;
- wl->ap_global_hlid = WL12XX_INVALID_LINK_ID;
+ wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
+
+ if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
+ wlvif->bss_type == BSS_TYPE_IBSS) {
+ wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
+ wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
+ wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
+ wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
+ } else {
+ wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
+ wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
+ wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
+ wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
+ for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
+ wl12xx_free_rate_policy(wl,
+ &wlvif->ap.ucast_rate_idx[i]);
+ }
- /*
- * this must be before the cancel_work calls below, so that the work
- * functions don't perform further work.
- */
- wl->state = WL1271_STATE_OFF;
+ wl12xx_tx_reset_wlvif(wl, wlvif);
+ wl1271_free_ap_keys(wl, wlvif);
+ if (wl->last_wlvif == wlvif)
+ wl->last_wlvif = NULL;
+ list_del(&wlvif->list);
+ memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map));
+ wlvif->role_id = WL12XX_INVALID_ROLE_ID;
+ wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
+
+ if (wlvif->bss_type == BSS_TYPE_AP_BSS)
+ wl->ap_count--;
+ else
+ wl->sta_count--;
mutex_unlock(&wl->mutex);
-
- wl1271_disable_interrupts(wl);
- wl1271_flush_deferred_work(wl);
- cancel_delayed_work_sync(&wl->scan_complete_work);
- cancel_work_sync(&wl->netstack_work);
- cancel_work_sync(&wl->tx_work);
- del_timer_sync(&wl->rx_streaming_timer);
- cancel_work_sync(&wl->rx_streaming_enable_work);
- cancel_work_sync(&wl->rx_streaming_disable_work);
- cancel_delayed_work_sync(&wl->pspoll_work);
- cancel_delayed_work_sync(&wl->elp_work);
+ del_timer_sync(&wlvif->rx_streaming_timer);
+ cancel_work_sync(&wlvif->rx_streaming_enable_work);
+ cancel_work_sync(&wlvif->rx_streaming_disable_work);
+ cancel_delayed_work_sync(&wlvif->pspoll_work);
mutex_lock(&wl->mutex);
-
- /* let's notify MAC80211 about the remaining pending TX frames */
- wl1271_tx_reset(wl, reset_tx_queues);
- wl1271_power_off(wl);
-
- memset(wl->bssid, 0, ETH_ALEN);
- memset(wl->ssid, 0, IEEE80211_MAX_SSID_LEN + 1);
- wl->ssid_len = 0;
- wl->bss_type = MAX_BSS_TYPE;
- wl->set_bss_type = MAX_BSS_TYPE;
- wl->p2p = 0;
- wl->band = IEEE80211_BAND_2GHZ;
-
- wl->rx_counter = 0;
- wl->psm_entry_retry = 0;
- wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
- wl->tx_blocks_available = 0;
- wl->tx_allocated_blocks = 0;
- wl->tx_results_count = 0;
- wl->tx_packets_count = 0;
- wl->time_offset = 0;
- wl->session_counter = 0;
- wl->rate_set = CONF_TX_RATE_MASK_BASIC;
- wl->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
- wl->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
- wl->vif = NULL;
- wl->tx_spare_blocks = TX_HW_BLOCK_SPARE_DEFAULT;
- wl1271_free_ap_keys(wl);
- memset(wl->ap_hlid_map, 0, sizeof(wl->ap_hlid_map));
- wl->ap_fw_ps_map = 0;
- wl->ap_ps_map = 0;
- wl->sched_scanning = false;
- wl->role_id = WL12XX_INVALID_ROLE_ID;
- wl->dev_role_id = WL12XX_INVALID_ROLE_ID;
- memset(wl->roles_map, 0, sizeof(wl->roles_map));
- memset(wl->links_map, 0, sizeof(wl->links_map));
- memset(wl->roc_map, 0, sizeof(wl->roc_map));
- wl->active_sta_count = 0;
-
- /* The system link is always allocated */
- __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
-
- /*
- * this is performed after the cancel_work calls and the associated
- * mutex_lock, so that wl1271_op_add_interface does not accidentally
- * get executed before all these vars have been reset.
- */
- wl->flags = 0;
-
- wl->tx_blocks_freed = 0;
-
- for (i = 0; i < NUM_TX_QUEUES; i++) {
- wl->tx_pkts_freed[i] = 0;
- wl->tx_allocated_pkts[i] = 0;
- }
-
- wl1271_debugfs_reset(wl);
-
- kfree(wl->fw_status);
- wl->fw_status = NULL;
- kfree(wl->tx_res_if);
- wl->tx_res_if = NULL;
- kfree(wl->target_mem_map);
- wl->target_mem_map = NULL;
}
static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct wl1271 *wl = hw->priv;
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+ struct wl12xx_vif *iter;
mutex_lock(&wl->mutex);
+
+ if (wl->state == WL1271_STATE_OFF ||
+ !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
+ goto out;
+
/*
* wl->vif can be null here if someone shuts down the interface
* just when hardware recovery has been started.
*/
- if (wl->vif) {
- WARN_ON(wl->vif != vif);
- __wl1271_op_remove_interface(wl, true);
- }
+ wl12xx_for_each_wlvif(wl, iter) {
+ if (iter != wlvif)
+ continue;
+ __wl1271_op_remove_interface(wl, vif, true);
+ break;
+ }
+ WARN_ON(iter != wlvif);
+out:
mutex_unlock(&wl->mutex);
cancel_work_sync(&wl->recovery_work);
}
-static int wl1271_join(struct wl1271 *wl, bool set_assoc)
+static int wl1271_join(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ bool set_assoc)
{
int ret;
- bool is_ibss = (wl->bss_type == BSS_TYPE_IBSS);
+ bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
/*
* One of the side effects of the JOIN command is that is clears
@@ -2167,20 +2284,20 @@ static int wl1271_join(struct wl1271 *wl, bool set_assoc)
* Keep the below message for now, unless it starts bothering
* users who really like to roam a lot :)
*/
- if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
+ if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
wl1271_info("JOIN while associated.");
if (set_assoc)
- set_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags);
+ set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
if (is_ibss)
- ret = wl12xx_cmd_role_start_ibss(wl);
+ ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
else
- ret = wl12xx_cmd_role_start_sta(wl);
+ ret = wl12xx_cmd_role_start_sta(wl, wlvif);
if (ret < 0)
goto out;
- if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
+ if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
goto out;
/*
@@ -2189,19 +2306,20 @@ static int wl1271_join(struct wl1271 *wl, bool set_assoc)
* the join. The acx_aid starts the keep-alive process, and the order
* of the commands below is relevant.
*/
- ret = wl1271_acx_keep_alive_mode(wl, true);
+ ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
if (ret < 0)
goto out;
- ret = wl1271_acx_aid(wl, wl->aid);
+ ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
if (ret < 0)
goto out;
- ret = wl1271_cmd_build_klv_null_data(wl);
+ ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
if (ret < 0)
goto out;
- ret = wl1271_acx_keep_alive_config(wl, CMD_TEMPL_KLV_IDX_NULL_DATA,
+ ret = wl1271_acx_keep_alive_config(wl, wlvif,
+ CMD_TEMPL_KLV_IDX_NULL_DATA,
ACX_KEEP_ALIVE_TPL_VALID);
if (ret < 0)
goto out;
@@ -2210,34 +2328,34 @@ out:
return ret;
}
-static int wl1271_unjoin(struct wl1271 *wl)
+static int wl1271_unjoin(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
int ret;
- if (test_and_clear_bit(WL1271_FLAG_CS_PROGRESS, &wl->flags)) {
+ if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
+ struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
+
wl12xx_cmd_stop_channel_switch(wl);
- ieee80211_chswitch_done(wl->vif, false);
+ ieee80211_chswitch_done(vif, false);
}
/* to stop listening to a channel, we disconnect */
- ret = wl12xx_cmd_role_stop_sta(wl);
+ ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
if (ret < 0)
goto out;
- memset(wl->bssid, 0, ETH_ALEN);
-
/* reset TX security counters on a clean disconnect */
- wl->tx_security_last_seq_lsb = 0;
- wl->tx_security_seq = 0;
+ wlvif->tx_security_last_seq_lsb = 0;
+ wlvif->tx_security_seq = 0;
out:
return ret;
}
-static void wl1271_set_band_rate(struct wl1271 *wl)
+static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
- wl->basic_rate_set = wl->bitrate_masks[wl->band];
- wl->rate_set = wl->basic_rate_set;
+ wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band];
+ wlvif->rate_set = wlvif->basic_rate_set;
}
static bool wl12xx_is_roc(struct wl1271 *wl)
@@ -2251,27 +2369,25 @@ static bool wl12xx_is_roc(struct wl1271 *wl)
return true;
}
-static int wl1271_sta_handle_idle(struct wl1271 *wl, bool idle)
+static int wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ bool idle)
{
int ret;
if (idle) {
/* no need to croc if we weren't busy (e.g. during boot) */
if (wl12xx_is_roc(wl)) {
- ret = wl12xx_croc(wl, wl->dev_role_id);
- if (ret < 0)
- goto out;
-
- ret = wl12xx_cmd_role_stop_dev(wl);
+ ret = wl12xx_stop_dev(wl, wlvif);
if (ret < 0)
goto out;
}
- wl->rate_set = wl1271_tx_min_rate_get(wl, wl->basic_rate_set);
- ret = wl1271_acx_sta_rate_policies(wl);
+ wlvif->rate_set =
+ wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
+ ret = wl1271_acx_sta_rate_policies(wl, wlvif);
if (ret < 0)
goto out;
ret = wl1271_acx_keep_alive_config(
- wl, CMD_TEMPL_KLV_IDX_NULL_DATA,
+ wl, wlvif, CMD_TEMPL_KLV_IDX_NULL_DATA,
ACX_KEEP_ALIVE_TPL_INVALID);
if (ret < 0)
goto out;
@@ -2283,11 +2399,7 @@ static int wl1271_sta_handle_idle(struct wl1271 *wl, bool idle)
ieee80211_sched_scan_stopped(wl->hw);
}
- ret = wl12xx_cmd_role_start_dev(wl);
- if (ret < 0)
- goto out;
-
- ret = wl12xx_roc(wl, wl->dev_role_id);
+ ret = wl12xx_start_dev(wl, wlvif);
if (ret < 0)
goto out;
clear_bit(WL1271_FLAG_IDLE, &wl->flags);
@@ -2297,61 +2409,22 @@ out:
return ret;
}
-static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
+static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ struct ieee80211_conf *conf, u32 changed)
{
- struct wl1271 *wl = hw->priv;
- struct ieee80211_conf *conf = &hw->conf;
- int channel, ret = 0;
- bool is_ap;
+ bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
+ int channel, ret;
channel = ieee80211_frequency_to_channel(conf->channel->center_freq);
- wl1271_debug(DEBUG_MAC80211, "mac80211 config ch %d psm %s power %d %s"
- " changed 0x%x",
- channel,
- conf->flags & IEEE80211_CONF_PS ? "on" : "off",
- conf->power_level,
- conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
- changed);
-
- /*
- * mac80211 will go to idle nearly immediately after transmitting some
- * frames, such as the deauth. To make sure those frames reach the air,
- * wait here until the TX queue is fully flushed.
- */
- if ((changed & IEEE80211_CONF_CHANGE_IDLE) &&
- (conf->flags & IEEE80211_CONF_IDLE))
- wl1271_tx_flush(wl);
-
- mutex_lock(&wl->mutex);
-
- if (unlikely(wl->state == WL1271_STATE_OFF)) {
- /* we support configuring the channel and band while off */
- if ((changed & IEEE80211_CONF_CHANGE_CHANNEL)) {
- wl->band = conf->channel->band;
- wl->channel = channel;
- }
-
- if ((changed & IEEE80211_CONF_CHANGE_POWER))
- wl->power_level = conf->power_level;
-
- goto out;
- }
-
- is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
-
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
- goto out;
-
/* if the channel changes while joined, join again */
if (changed & IEEE80211_CONF_CHANGE_CHANNEL &&
- ((wl->band != conf->channel->band) ||
- (wl->channel != channel))) {
+ ((wlvif->band != conf->channel->band) ||
+ (wlvif->channel != channel))) {
/* send all pending packets */
wl1271_tx_work_locked(wl);
- wl->band = conf->channel->band;
- wl->channel = channel;
+ wlvif->band = conf->channel->band;
+ wlvif->channel = channel;
if (!is_ap) {
/*
@@ -2360,24 +2433,27 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
* possible rate for the band as a fixed rate for
* association frames and other control messages.
*/
- if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
- wl1271_set_band_rate(wl);
+ if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
+ wl1271_set_band_rate(wl, wlvif);
- wl->basic_rate =
- wl1271_tx_min_rate_get(wl, wl->basic_rate_set);
- ret = wl1271_acx_sta_rate_policies(wl);
+ wlvif->basic_rate =
+ wl1271_tx_min_rate_get(wl,
+ wlvif->basic_rate_set);
+ ret = wl1271_acx_sta_rate_policies(wl, wlvif);
if (ret < 0)
wl1271_warning("rate policy for channel "
"failed %d", ret);
- if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) {
+ if (test_bit(WLVIF_FLAG_STA_ASSOCIATED,
+ &wlvif->flags)) {
if (wl12xx_is_roc(wl)) {
/* roaming */
- ret = wl12xx_croc(wl, wl->dev_role_id);
+ ret = wl12xx_croc(wl,
+ wlvif->dev_role_id);
if (ret < 0)
- goto out_sleep;
+ return ret;
}
- ret = wl1271_join(wl, false);
+ ret = wl1271_join(wl, wlvif, false);
if (ret < 0)
wl1271_warning("cmd join on channel "
"failed %d", ret);
@@ -2389,64 +2465,112 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
*/
if (wl12xx_is_roc(wl) &&
!(conf->flags & IEEE80211_CONF_IDLE)) {
- ret = wl12xx_croc(wl, wl->dev_role_id);
+ ret = wl12xx_stop_dev(wl, wlvif);
if (ret < 0)
- goto out_sleep;
+ return ret;
- ret = wl12xx_roc(wl, wl->dev_role_id);
+ ret = wl12xx_start_dev(wl, wlvif);
if (ret < 0)
- wl1271_warning("roc failed %d",
- ret);
+ return ret;
}
}
}
}
- if (changed & IEEE80211_CONF_CHANGE_IDLE && !is_ap) {
- ret = wl1271_sta_handle_idle(wl,
- conf->flags & IEEE80211_CONF_IDLE);
- if (ret < 0)
- wl1271_warning("idle mode change failed %d", ret);
- }
-
/*
* if mac80211 changes the PSM mode, make sure the mode is not
* incorrectly changed after the pspoll failure active window.
*/
if (changed & IEEE80211_CONF_CHANGE_PS)
- clear_bit(WL1271_FLAG_PSPOLL_FAILURE, &wl->flags);
+ clear_bit(WLVIF_FLAG_PSPOLL_FAILURE, &wlvif->flags);
if (conf->flags & IEEE80211_CONF_PS &&
- !test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags)) {
- set_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags);
+ !test_bit(WLVIF_FLAG_PSM_REQUESTED, &wlvif->flags)) {
+ set_bit(WLVIF_FLAG_PSM_REQUESTED, &wlvif->flags);
/*
* We enter PSM only if we're already associated.
* If we're not, we'll enter it when joining an SSID,
* through the bss_info_changed() hook.
*/
- if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) {
+ if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
wl1271_debug(DEBUG_PSM, "psm enabled");
- ret = wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE,
- wl->basic_rate, true);
+ ret = wl1271_ps_set_mode(wl, wlvif,
+ STATION_POWER_SAVE_MODE,
+ wlvif->basic_rate, true);
}
} else if (!(conf->flags & IEEE80211_CONF_PS) &&
- test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags)) {
+ test_bit(WLVIF_FLAG_PSM_REQUESTED, &wlvif->flags)) {
wl1271_debug(DEBUG_PSM, "psm disabled");
- clear_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags);
+ clear_bit(WLVIF_FLAG_PSM_REQUESTED, &wlvif->flags);
- if (test_bit(WL1271_FLAG_PSM, &wl->flags))
- ret = wl1271_ps_set_mode(wl, STATION_ACTIVE_MODE,
- wl->basic_rate, true);
+ if (test_bit(WLVIF_FLAG_PSM, &wlvif->flags))
+ ret = wl1271_ps_set_mode(wl, wlvif,
+ STATION_ACTIVE_MODE,
+ wlvif->basic_rate, true);
}
- if (conf->power_level != wl->power_level) {
- ret = wl1271_acx_tx_power(wl, conf->power_level);
+ if (conf->power_level != wlvif->power_level) {
+ ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
if (ret < 0)
- goto out_sleep;
+ return ret;
+
+ wlvif->power_level = conf->power_level;
+ }
+
+ return 0;
+}
+
+static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
+{
+ struct wl1271 *wl = hw->priv;
+ struct wl12xx_vif *wlvif;
+ struct ieee80211_conf *conf = &hw->conf;
+ int channel, ret = 0;
+
+ channel = ieee80211_frequency_to_channel(conf->channel->center_freq);
+
+ wl1271_debug(DEBUG_MAC80211, "mac80211 config ch %d psm %s power %d %s"
+ " changed 0x%x",
+ channel,
+ conf->flags & IEEE80211_CONF_PS ? "on" : "off",
+ conf->power_level,
+ conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
+ changed);
+
+ /*
+ * mac80211 will go to idle nearly immediately after transmitting some
+ * frames, such as the deauth. To make sure those frames reach the air,
+ * wait here until the TX queue is fully flushed.
+ */
+ if ((changed & IEEE80211_CONF_CHANGE_IDLE) &&
+ (conf->flags & IEEE80211_CONF_IDLE))
+ wl1271_tx_flush(wl);
+
+ mutex_lock(&wl->mutex);
+
+ /* we support configuring the channel and band even while off */
+ if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+ wl->band = conf->channel->band;
+ wl->channel = channel;
+ }
+ if (changed & IEEE80211_CONF_CHANGE_POWER)
wl->power_level = conf->power_level;
+
+ if (unlikely(wl->state == WL1271_STATE_OFF))
+ goto out;
+
+ ret = wl1271_ps_elp_wakeup(wl);
+ if (ret < 0)
+ goto out;
+
+ /* configure each interface */
+ wl12xx_for_each_wlvif(wl, wlvif) {
+ ret = wl12xx_config_vif(wl, wlvif, conf, changed);
+ if (ret < 0)
+ goto out_sleep;
}
out_sleep:
@@ -2509,6 +2633,8 @@ static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
{
struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
struct wl1271 *wl = hw->priv;
+ struct wl12xx_vif *wlvif;
+
int ret;
wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
@@ -2526,15 +2652,20 @@ static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
if (ret < 0)
goto out;
- if (wl->bss_type != BSS_TYPE_AP_BSS) {
- if (*total & FIF_ALLMULTI)
- ret = wl1271_acx_group_address_tbl(wl, false, NULL, 0);
- else if (fp)
- ret = wl1271_acx_group_address_tbl(wl, fp->enabled,
- fp->mc_list,
- fp->mc_list_length);
- if (ret < 0)
- goto out_sleep;
+ wl12xx_for_each_wlvif(wl, wlvif) {
+ if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
+ if (*total & FIF_ALLMULTI)
+ ret = wl1271_acx_group_address_tbl(wl, wlvif,
+ false,
+ NULL, 0);
+ else if (fp)
+ ret = wl1271_acx_group_address_tbl(wl, wlvif,
+ fp->enabled,
+ fp->mc_list,
+ fp->mc_list_length);
+ if (ret < 0)
+ goto out_sleep;
+ }
}
/*
@@ -2551,9 +2682,10 @@ out:
kfree(fp);
}
-static int wl1271_record_ap_key(struct wl1271 *wl, u8 id, u8 key_type,
- u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32,
- u16 tx_seq_16)
+static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u8 id, u8 key_type, u8 key_size,
+ const u8 *key, u8 hlid, u32 tx_seq_32,
+ u16 tx_seq_16)
{
struct wl1271_ap_key *ap_key;
int i;
@@ -2568,10 +2700,10 @@ static int wl1271_record_ap_key(struct wl1271 *wl, u8 id, u8 key_type,
* an existing key.
*/
for (i = 0; i < MAX_NUM_KEYS; i++) {
- if (wl->recorded_ap_keys[i] == NULL)
+ if (wlvif->ap.recorded_keys[i] == NULL)
break;
- if (wl->recorded_ap_keys[i]->id == id) {
+ if (wlvif->ap.recorded_keys[i]->id == id) {
wl1271_warning("trying to record key replacement");
return -EINVAL;
}
@@ -2592,21 +2724,21 @@ static int wl1271_record_ap_key(struct wl1271 *wl, u8 id, u8 key_type,
ap_key->tx_seq_32 = tx_seq_32;
ap_key->tx_seq_16 = tx_seq_16;
- wl->recorded_ap_keys[i] = ap_key;
+ wlvif->ap.recorded_keys[i] = ap_key;
return 0;
}
-static void wl1271_free_ap_keys(struct wl1271 *wl)
+static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
int i;
for (i = 0; i < MAX_NUM_KEYS; i++) {
- kfree(wl->recorded_ap_keys[i]);
- wl->recorded_ap_keys[i] = NULL;
+ kfree(wlvif->ap.recorded_keys[i]);
+ wlvif->ap.recorded_keys[i] = NULL;
}
}
-static int wl1271_ap_init_hwenc(struct wl1271 *wl)
+static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
int i, ret = 0;
struct wl1271_ap_key *key;
@@ -2614,15 +2746,15 @@ static int wl1271_ap_init_hwenc(struct wl1271 *wl)
for (i = 0; i < MAX_NUM_KEYS; i++) {
u8 hlid;
- if (wl->recorded_ap_keys[i] == NULL)
+ if (wlvif->ap.recorded_keys[i] == NULL)
break;
- key = wl->recorded_ap_keys[i];
+ key = wlvif->ap.recorded_keys[i];
hlid = key->hlid;
if (hlid == WL12XX_INVALID_LINK_ID)
- hlid = wl->ap_bcast_hlid;
+ hlid = wlvif->ap.bcast_hlid;
- ret = wl1271_cmd_set_ap_key(wl, KEY_ADD_OR_REPLACE,
+ ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE,
key->id, key->key_type,
key->key_size, key->key,
hlid, key->tx_seq_32,
@@ -2635,23 +2767,24 @@ static int wl1271_ap_init_hwenc(struct wl1271 *wl)
}
if (wep_key_added) {
- ret = wl12xx_cmd_set_default_wep_key(wl, wl->default_key,
- wl->ap_bcast_hlid);
+ ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key,
+ wlvif->ap.bcast_hlid);
if (ret < 0)
goto out;
}
out:
- wl1271_free_ap_keys(wl);
+ wl1271_free_ap_keys(wl, wlvif);
return ret;
}
-static int wl1271_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
+static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u16 action, u8 id, u8 key_type,
u8 key_size, const u8 *key, u32 tx_seq_32,
u16 tx_seq_16, struct ieee80211_sta *sta)
{
int ret;
- bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
+ bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
if (is_ap) {
struct wl1271_station *wl_sta;
@@ -2661,10 +2794,10 @@ static int wl1271_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
wl_sta = (struct wl1271_station *)sta->drv_priv;
hlid = wl_sta->hlid;
} else {
- hlid = wl->ap_bcast_hlid;
+ hlid = wlvif->ap.bcast_hlid;
}
- if (!test_bit(WL1271_FLAG_AP_STARTED, &wl->flags)) {
+ if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
/*
* We do not support removing keys after AP shutdown.
* Pretend we do to make mac80211 happy.
@@ -2672,12 +2805,12 @@ static int wl1271_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
if (action != KEY_ADD_OR_REPLACE)
return 0;
- ret = wl1271_record_ap_key(wl, id,
+ ret = wl1271_record_ap_key(wl, wlvif, id,
key_type, key_size,
key, hlid, tx_seq_32,
tx_seq_16);
} else {
- ret = wl1271_cmd_set_ap_key(wl, action,
+ ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
id, key_type, key_size,
key, hlid, tx_seq_32,
tx_seq_16);
@@ -2718,10 +2851,10 @@ static int wl1271_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
/* don't remove key if hlid was already deleted */
if (action == KEY_REMOVE &&
- wl->sta_hlid == WL12XX_INVALID_LINK_ID)
+ wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
return 0;
- ret = wl1271_cmd_set_sta_key(wl, action,
+ ret = wl1271_cmd_set_sta_key(wl, wlvif, action,
id, key_type, key_size,
key, addr, tx_seq_32,
tx_seq_16);
@@ -2731,8 +2864,8 @@ static int wl1271_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
/* the default WEP key needs to be configured at least once */
if (key_type == KEY_WEP) {
ret = wl12xx_cmd_set_default_wep_key(wl,
- wl->default_key,
- wl->sta_hlid);
+ wlvif->default_key,
+ wlvif->sta.hlid);
if (ret < 0)
return ret;
}
@@ -2747,6 +2880,7 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_key_conf *key_conf)
{
struct wl1271 *wl = hw->priv;
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
int ret;
u32 tx_seq_32 = 0;
u16 tx_seq_16 = 0;
@@ -2782,20 +2916,20 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
key_type = KEY_TKIP;
key_conf->hw_key_idx = key_conf->keyidx;
- tx_seq_32 = WL1271_TX_SECURITY_HI32(wl->tx_security_seq);
- tx_seq_16 = WL1271_TX_SECURITY_LO16(wl->tx_security_seq);
+ tx_seq_32 = WL1271_TX_SECURITY_HI32(wlvif->tx_security_seq);
+ tx_seq_16 = WL1271_TX_SECURITY_LO16(wlvif->tx_security_seq);
break;
case WLAN_CIPHER_SUITE_CCMP:
key_type = KEY_AES;
- key_conf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
- tx_seq_32 = WL1271_TX_SECURITY_HI32(wl->tx_security_seq);
- tx_seq_16 = WL1271_TX_SECURITY_LO16(wl->tx_security_seq);
+ key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
+ tx_seq_32 = WL1271_TX_SECURITY_HI32(wlvif->tx_security_seq);
+ tx_seq_16 = WL1271_TX_SECURITY_LO16(wlvif->tx_security_seq);
break;
case WL1271_CIPHER_SUITE_GEM:
key_type = KEY_GEM;
- tx_seq_32 = WL1271_TX_SECURITY_HI32(wl->tx_security_seq);
- tx_seq_16 = WL1271_TX_SECURITY_LO16(wl->tx_security_seq);
+ tx_seq_32 = WL1271_TX_SECURITY_HI32(wlvif->tx_security_seq);
+ tx_seq_16 = WL1271_TX_SECURITY_LO16(wlvif->tx_security_seq);
break;
default:
wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
@@ -2806,7 +2940,7 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
switch (cmd) {
case SET_KEY:
- ret = wl1271_set_key(wl, KEY_ADD_OR_REPLACE,
+ ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
key_conf->keyidx, key_type,
key_conf->keylen, key_conf->key,
tx_seq_32, tx_seq_16, sta);
@@ -2817,7 +2951,7 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
break;
case DISABLE_KEY:
- ret = wl1271_set_key(wl, KEY_REMOVE,
+ ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
key_conf->keyidx, key_type,
key_conf->keylen, key_conf->key,
0, 0, sta);
@@ -2847,6 +2981,8 @@ static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
struct cfg80211_scan_request *req)
{
struct wl1271 *wl = hw->priv;
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+
int ret;
u8 *ssid = NULL;
size_t len = 0;
@@ -2876,16 +3012,15 @@ static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
/* cancel ROC before scanning */
if (wl12xx_is_roc(wl)) {
- if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) {
+ if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
/* don't allow scanning right now */
ret = -EBUSY;
goto out_sleep;
}
- wl12xx_croc(wl, wl->dev_role_id);
- wl12xx_cmd_role_stop_dev(wl);
+ wl12xx_stop_dev(wl, wlvif);
}
- ret = wl1271_scan(hw->priv, ssid, len, req);
+ ret = wl1271_scan(hw->priv, vif, ssid, len, req);
out_sleep:
wl1271_ps_elp_sleep(wl);
out:
@@ -2921,6 +3056,7 @@ static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
}
wl->scan.state = WL1271_SCAN_STATE_IDLE;
memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
+ wl->scan_vif = NULL;
wl->scan.req = NULL;
ieee80211_scan_completed(wl->hw, true);
@@ -2938,6 +3074,7 @@ static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
struct ieee80211_sched_scan_ies *ies)
{
struct wl1271 *wl = hw->priv;
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
int ret;
wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start");
@@ -2948,11 +3085,11 @@ static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
if (ret < 0)
goto out;
- ret = wl1271_scan_sched_scan_config(wl, req, ies);
+ ret = wl1271_scan_sched_scan_config(wl, wlvif, req, ies);
if (ret < 0)
goto out_sleep;
- ret = wl1271_scan_sched_scan_start(wl);
+ ret = wl1271_scan_sched_scan_start(wl, wlvif);
if (ret < 0)
goto out_sleep;
@@ -3017,6 +3154,7 @@ out:
static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
{
struct wl1271 *wl = hw->priv;
+ struct wl12xx_vif *wlvif;
int ret = 0;
mutex_lock(&wl->mutex);
@@ -3030,10 +3168,11 @@ static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
if (ret < 0)
goto out;
- ret = wl1271_acx_rts_threshold(wl, value);
- if (ret < 0)
- wl1271_warning("wl1271_op_set_rts_threshold failed: %d", ret);
-
+ wl12xx_for_each_wlvif(wl, wlvif) {
+ ret = wl1271_acx_rts_threshold(wl, wlvif, value);
+ if (ret < 0)
+ wl1271_warning("set rts threshold failed: %d", ret);
+ }
wl1271_ps_elp_sleep(wl);
out:
@@ -3042,9 +3181,10 @@ out:
return ret;
}
-static int wl1271_ssid_set(struct wl1271 *wl, struct sk_buff *skb,
+static int wl1271_ssid_set(struct ieee80211_vif *vif, struct sk_buff *skb,
int offset)
{
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
u8 ssid_len;
const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
skb->len - offset);
@@ -3060,8 +3200,8 @@ static int wl1271_ssid_set(struct wl1271 *wl, struct sk_buff *skb,
return -EINVAL;
}
- wl->ssid_len = ssid_len;
- memcpy(wl->ssid, ptr+2, ssid_len);
+ wlvif->ssid_len = ssid_len;
+ memcpy(wlvif->ssid, ptr+2, ssid_len);
return 0;
}
@@ -3096,18 +3236,40 @@ static void wl12xx_remove_vendor_ie(struct sk_buff *skb,
skb_trim(skb, skb->len - len);
}
-static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl,
- u8 *probe_rsp_data,
- size_t probe_rsp_len,
- u32 rates)
+static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
+ struct ieee80211_vif *vif)
{
- struct ieee80211_bss_conf *bss_conf = &wl->vif->bss_conf;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ieee80211_proberesp_get(wl->hw, vif);
+ if (!skb)
+ return -EOPNOTSUPP;
+
+ ret = wl1271_cmd_template_set(wl,
+ CMD_TEMPL_AP_PROBE_RESPONSE,
+ skb->data,
+ skb->len, 0,
+ rates);
+
+ dev_kfree_skb(skb);
+ return ret;
+}
+
+static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
+ struct ieee80211_vif *vif,
+ u8 *probe_rsp_data,
+ size_t probe_rsp_len,
+ u32 rates)
+{
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+ struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
int ssid_ie_offset, ie_offset, templ_len;
const u8 *ptr;
/* no need to change probe response if the SSID is set correctly */
- if (wl->ssid_len > 0)
+ if (wlvif->ssid_len > 0)
return wl1271_cmd_template_set(wl,
CMD_TEMPL_AP_PROBE_RESPONSE,
probe_rsp_data,
@@ -3153,16 +3315,18 @@ static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl,
}
static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
+ struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
u32 changed)
{
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
int ret = 0;
if (changed & BSS_CHANGED_ERP_SLOT) {
if (bss_conf->use_short_slot)
- ret = wl1271_acx_slot(wl, SLOT_TIME_SHORT);
+ ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT);
else
- ret = wl1271_acx_slot(wl, SLOT_TIME_LONG);
+ ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG);
if (ret < 0) {
wl1271_warning("Set slot time failed %d", ret);
goto out;
@@ -3171,16 +3335,18 @@ static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
if (changed & BSS_CHANGED_ERP_PREAMBLE) {
if (bss_conf->use_short_preamble)
- wl1271_acx_set_preamble(wl, ACX_PREAMBLE_SHORT);
+ wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT);
else
- wl1271_acx_set_preamble(wl, ACX_PREAMBLE_LONG);
+ wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG);
}
if (changed & BSS_CHANGED_ERP_CTS_PROT) {
if (bss_conf->use_cts_prot)
- ret = wl1271_acx_cts_protect(wl, CTSPROTECT_ENABLE);
+ ret = wl1271_acx_cts_protect(wl, wlvif,
+ CTSPROTECT_ENABLE);
else
- ret = wl1271_acx_cts_protect(wl, CTSPROTECT_DISABLE);
+ ret = wl1271_acx_cts_protect(wl, wlvif,
+ CTSPROTECT_DISABLE);
if (ret < 0) {
wl1271_warning("Set ctsprotect failed %d", ret);
goto out;
@@ -3196,14 +3362,23 @@ static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
struct ieee80211_bss_conf *bss_conf,
u32 changed)
{
- bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+ bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
int ret = 0;
if ((changed & BSS_CHANGED_BEACON_INT)) {
wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
bss_conf->beacon_int);
- wl->beacon_int = bss_conf->beacon_int;
+ wlvif->beacon_int = bss_conf->beacon_int;
+ }
+
+ if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
+ u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
+ if (!wl1271_ap_set_probe_resp_tmpl(wl, rate, vif)) {
+ wl1271_debug(DEBUG_AP, "probe response updated");
+ set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
+ }
}
if ((changed & BSS_CHANGED_BEACON)) {
@@ -3214,17 +3389,19 @@ static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
u16 tmpl_id;
- if (!beacon)
+ if (!beacon) {
+ ret = -EINVAL;
goto out;
+ }
wl1271_debug(DEBUG_MASTER, "beacon updated");
- ret = wl1271_ssid_set(wl, beacon, ieoffset);
+ ret = wl1271_ssid_set(vif, beacon, ieoffset);
if (ret < 0) {
dev_kfree_skb(beacon);
goto out;
}
- min_rate = wl1271_tx_min_rate_get(wl, wl->basic_rate_set);
+ min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
CMD_TEMPL_BEACON;
ret = wl1271_cmd_template_set(wl, tmpl_id,
@@ -3236,6 +3413,13 @@ static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
goto out;
}
+ /*
+ * In case we already have a probe-resp beacon set explicitly
+ * by usermode, don't use the beacon data.
+ */
+ if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
+ goto end_bcn;
+
/* remove TIM ie from probe response */
wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
@@ -3254,7 +3438,7 @@ static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
IEEE80211_STYPE_PROBE_RESP);
if (is_ap)
- ret = wl1271_ap_set_probe_resp_tmpl(wl,
+ ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
beacon->data,
beacon->len,
min_rate);
@@ -3264,12 +3448,15 @@ static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
beacon->data,
beacon->len, 0,
min_rate);
+end_bcn:
dev_kfree_skb(beacon);
if (ret < 0)
goto out;
}
out:
+ if (ret != 0)
+ wl1271_error("beacon info change failed: %d", ret);
return ret;
}
@@ -3279,23 +3466,24 @@ static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
struct ieee80211_bss_conf *bss_conf,
u32 changed)
{
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
int ret = 0;
if ((changed & BSS_CHANGED_BASIC_RATES)) {
u32 rates = bss_conf->basic_rates;
- wl->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
- wl->band);
- wl->basic_rate = wl1271_tx_min_rate_get(wl,
- wl->basic_rate_set);
+ wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
+ wlvif->band);
+ wlvif->basic_rate = wl1271_tx_min_rate_get(wl,
+ wlvif->basic_rate_set);
- ret = wl1271_init_ap_rates(wl);
+ ret = wl1271_init_ap_rates(wl, wlvif);
if (ret < 0) {
wl1271_error("AP rate policy change failed %d", ret);
goto out;
}
- ret = wl1271_ap_init_templates(wl);
+ ret = wl1271_ap_init_templates(wl, vif);
if (ret < 0)
goto out;
}
@@ -3306,38 +3494,40 @@ static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
if ((changed & BSS_CHANGED_BEACON_ENABLED)) {
if (bss_conf->enable_beacon) {
- if (!test_bit(WL1271_FLAG_AP_STARTED, &wl->flags)) {
- ret = wl12xx_cmd_role_start_ap(wl);
+ if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
+ ret = wl12xx_cmd_role_start_ap(wl, wlvif);
if (ret < 0)
goto out;
- ret = wl1271_ap_init_hwenc(wl);
+ ret = wl1271_ap_init_hwenc(wl, wlvif);
if (ret < 0)
goto out;
- set_bit(WL1271_FLAG_AP_STARTED, &wl->flags);
+ set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
wl1271_debug(DEBUG_AP, "started AP");
}
} else {
- if (test_bit(WL1271_FLAG_AP_STARTED, &wl->flags)) {
- ret = wl12xx_cmd_role_stop_ap(wl);
+ if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
+ ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
if (ret < 0)
goto out;
- clear_bit(WL1271_FLAG_AP_STARTED, &wl->flags);
+ clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
+ clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET,
+ &wlvif->flags);
wl1271_debug(DEBUG_AP, "stopped AP");
}
}
}
- ret = wl1271_bss_erp_info_changed(wl, bss_conf, changed);
+ ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
if (ret < 0)
goto out;
/* Handle HT information change */
if ((changed & BSS_CHANGED_HT) &&
(bss_conf->channel_type != NL80211_CHAN_NO_HT)) {
- ret = wl1271_acx_set_ht_information(wl,
+ ret = wl1271_acx_set_ht_information(wl, wlvif,
bss_conf->ht_operation_mode);
if (ret < 0) {
wl1271_warning("Set ht information failed %d", ret);
@@ -3355,8 +3545,9 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
struct ieee80211_bss_conf *bss_conf,
u32 changed)
{
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
bool do_join = false, set_assoc = false;
- bool is_ibss = (wl->bss_type == BSS_TYPE_IBSS);
+ bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
bool ibss_joined = false;
u32 sta_rate_set = 0;
int ret;
@@ -3373,14 +3564,13 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
if (changed & BSS_CHANGED_IBSS) {
if (bss_conf->ibss_joined) {
- set_bit(WL1271_FLAG_IBSS_JOINED, &wl->flags);
+ set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
ibss_joined = true;
} else {
- if (test_and_clear_bit(WL1271_FLAG_IBSS_JOINED,
- &wl->flags)) {
- wl1271_unjoin(wl);
- wl12xx_cmd_role_start_dev(wl);
- wl12xx_roc(wl, wl->dev_role_id);
+ if (test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED,
+ &wlvif->flags)) {
+ wl1271_unjoin(wl, wlvif);
+ wl12xx_start_dev(wl, wlvif);
}
}
}
@@ -3396,46 +3586,40 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
bss_conf->enable_beacon ? "enabled" : "disabled");
- if (bss_conf->enable_beacon)
- wl->set_bss_type = BSS_TYPE_IBSS;
- else
- wl->set_bss_type = BSS_TYPE_STA_BSS;
do_join = true;
}
+ if (changed & BSS_CHANGED_IDLE) {
+ ret = wl1271_sta_handle_idle(wl, wlvif, bss_conf->idle);
+ if (ret < 0)
+ wl1271_warning("idle mode change failed %d", ret);
+ }
+
if ((changed & BSS_CHANGED_CQM)) {
bool enable = false;
if (bss_conf->cqm_rssi_thold)
enable = true;
- ret = wl1271_acx_rssi_snr_trigger(wl, enable,
+ ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable,
bss_conf->cqm_rssi_thold,
bss_conf->cqm_rssi_hyst);
if (ret < 0)
goto out;
- wl->rssi_thold = bss_conf->cqm_rssi_thold;
+ wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
}
- if ((changed & BSS_CHANGED_BSSID) &&
- /*
- * Now we know the correct bssid, so we send a new join command
- * and enable the BSSID filter
- */
- memcmp(wl->bssid, bss_conf->bssid, ETH_ALEN)) {
- memcpy(wl->bssid, bss_conf->bssid, ETH_ALEN);
-
- if (!is_zero_ether_addr(wl->bssid)) {
- ret = wl1271_cmd_build_null_data(wl);
+ if (changed & BSS_CHANGED_BSSID)
+ if (!is_zero_ether_addr(bss_conf->bssid)) {
+ ret = wl12xx_cmd_build_null_data(wl, wlvif);
if (ret < 0)
goto out;
- ret = wl1271_build_qos_null_data(wl);
+ ret = wl1271_build_qos_null_data(wl, vif);
if (ret < 0)
goto out;
/* Need to update the BSSID (for filtering etc) */
do_join = true;
}
- }
if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_HT)) {
rcu_read_lock();
@@ -3459,26 +3643,28 @@ sta_not_found:
if (bss_conf->assoc) {
u32 rates;
int ieoffset;
- wl->aid = bss_conf->aid;
+ wlvif->aid = bss_conf->aid;
set_assoc = true;
- wl->ps_poll_failures = 0;
+ wlvif->ps_poll_failures = 0;
/*
* use basic rates from AP, and determine lowest rate
* to use with control frames.
*/
rates = bss_conf->basic_rates;
- wl->basic_rate_set =
+ wlvif->basic_rate_set =
wl1271_tx_enabled_rates_get(wl, rates,
- wl->band);
- wl->basic_rate =
- wl1271_tx_min_rate_get(wl, wl->basic_rate_set);
+ wlvif->band);
+ wlvif->basic_rate =
+ wl1271_tx_min_rate_get(wl,
+ wlvif->basic_rate_set);
if (sta_rate_set)
- wl->rate_set = wl1271_tx_enabled_rates_get(wl,
+ wlvif->rate_set =
+ wl1271_tx_enabled_rates_get(wl,
sta_rate_set,
- wl->band);
- ret = wl1271_acx_sta_rate_policies(wl);
+ wlvif->band);
+ ret = wl1271_acx_sta_rate_policies(wl, wlvif);
if (ret < 0)
goto out;
@@ -3488,53 +3674,56 @@ sta_not_found:
* updates it by itself when the first beacon is
* received after a join.
*/
- ret = wl1271_cmd_build_ps_poll(wl, wl->aid);
+ ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
if (ret < 0)
goto out;
/*
* Get a template for hardware connection maintenance
*/
- dev_kfree_skb(wl->probereq);
- wl->probereq = wl1271_cmd_build_ap_probe_req(wl, NULL);
+ dev_kfree_skb(wlvif->probereq);
+ wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
+ wlvif,
+ NULL);
ieoffset = offsetof(struct ieee80211_mgmt,
u.probe_req.variable);
- wl1271_ssid_set(wl, wl->probereq, ieoffset);
+ wl1271_ssid_set(vif, wlvif->probereq, ieoffset);
/* enable the connection monitoring feature */
- ret = wl1271_acx_conn_monit_params(wl, true);
+ ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
if (ret < 0)
goto out;
} else {
/* use defaults when not associated */
bool was_assoc =
- !!test_and_clear_bit(WL1271_FLAG_STA_ASSOCIATED,
- &wl->flags);
+ !!test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED,
+ &wlvif->flags);
bool was_ifup =
- !!test_and_clear_bit(WL1271_FLAG_STA_STATE_SENT,
- &wl->flags);
- wl->aid = 0;
+ !!test_and_clear_bit(WLVIF_FLAG_STA_STATE_SENT,
+ &wlvif->flags);
+ wlvif->aid = 0;
/* free probe-request template */
- dev_kfree_skb(wl->probereq);
- wl->probereq = NULL;
+ dev_kfree_skb(wlvif->probereq);
+ wlvif->probereq = NULL;
/* re-enable dynamic ps - just in case */
- ieee80211_enable_dyn_ps(wl->vif);
+ ieee80211_enable_dyn_ps(vif);
/* revert back to minimum rates for the current band */
- wl1271_set_band_rate(wl);
- wl->basic_rate =
- wl1271_tx_min_rate_get(wl, wl->basic_rate_set);
- ret = wl1271_acx_sta_rate_policies(wl);
+ wl1271_set_band_rate(wl, wlvif);
+ wlvif->basic_rate =
+ wl1271_tx_min_rate_get(wl,
+ wlvif->basic_rate_set);
+ ret = wl1271_acx_sta_rate_policies(wl, wlvif);
if (ret < 0)
goto out;
/* disable connection monitor features */
- ret = wl1271_acx_conn_monit_params(wl, false);
+ ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
/* Disable the keep-alive feature */
- ret = wl1271_acx_keep_alive_mode(wl, false);
+ ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
if (ret < 0)
goto out;
@@ -3546,7 +3735,7 @@ sta_not_found:
* no IF_OPER_UP notification.
*/
if (!was_ifup) {
- ret = wl12xx_croc(wl, wl->role_id);
+ ret = wl12xx_croc(wl, wlvif->role_id);
if (ret < 0)
goto out;
}
@@ -3555,17 +3744,16 @@ sta_not_found:
* roaming on the same channel. until we will
* have a better flow...)
*/
- if (test_bit(wl->dev_role_id, wl->roc_map)) {
- ret = wl12xx_croc(wl, wl->dev_role_id);
+ if (test_bit(wlvif->dev_role_id, wl->roc_map)) {
+ ret = wl12xx_croc(wl,
+ wlvif->dev_role_id);
if (ret < 0)
goto out;
}
- wl1271_unjoin(wl);
- if (!(conf_flags & IEEE80211_CONF_IDLE)) {
- wl12xx_cmd_role_start_dev(wl);
- wl12xx_roc(wl, wl->dev_role_id);
- }
+ wl1271_unjoin(wl, wlvif);
+ if (!(conf_flags & IEEE80211_CONF_IDLE))
+ wl12xx_start_dev(wl, wlvif);
}
}
}
@@ -3576,27 +3764,28 @@ sta_not_found:
if (bss_conf->ibss_joined) {
u32 rates = bss_conf->basic_rates;
- wl->basic_rate_set =
+ wlvif->basic_rate_set =
wl1271_tx_enabled_rates_get(wl, rates,
- wl->band);
- wl->basic_rate =
- wl1271_tx_min_rate_get(wl, wl->basic_rate_set);
+ wlvif->band);
+ wlvif->basic_rate =
+ wl1271_tx_min_rate_get(wl,
+ wlvif->basic_rate_set);
/* by default, use 11b + OFDM rates */
- wl->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
- ret = wl1271_acx_sta_rate_policies(wl);
+ wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
+ ret = wl1271_acx_sta_rate_policies(wl, wlvif);
if (ret < 0)
goto out;
}
}
- ret = wl1271_bss_erp_info_changed(wl, bss_conf, changed);
+ ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
if (ret < 0)
goto out;
if (changed & BSS_CHANGED_ARP_FILTER) {
__be32 addr = bss_conf->arp_addr_list[0];
- WARN_ON(wl->bss_type != BSS_TYPE_STA_BSS);
+ WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
if (bss_conf->arp_addr_cnt == 1 &&
bss_conf->arp_filter_enabled) {
@@ -3606,24 +3795,24 @@ sta_not_found:
* isn't being set (when sending), so we have to
* reconfigure the template upon every ip change.
*/
- ret = wl1271_cmd_build_arp_rsp(wl, addr);
+ ret = wl1271_cmd_build_arp_rsp(wl, wlvif, addr);
if (ret < 0) {
wl1271_warning("build arp rsp failed: %d", ret);
goto out;
}
- ret = wl1271_acx_arp_ip_filter(wl,
+ ret = wl1271_acx_arp_ip_filter(wl, wlvif,
ACX_ARP_FILTER_ARP_FILTERING,
addr);
} else
- ret = wl1271_acx_arp_ip_filter(wl, 0, addr);
+ ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr);
if (ret < 0)
goto out;
}
if (do_join) {
- ret = wl1271_join(wl, set_assoc);
+ ret = wl1271_join(wl, wlvif, set_assoc);
if (ret < 0) {
wl1271_warning("cmd join failed %d", ret);
goto out;
@@ -3631,35 +3820,31 @@ sta_not_found:
/* ROC until connected (after EAPOL exchange) */
if (!is_ibss) {
- ret = wl12xx_roc(wl, wl->role_id);
+ ret = wl12xx_roc(wl, wlvif, wlvif->role_id);
if (ret < 0)
goto out;
- wl1271_check_operstate(wl,
+ wl1271_check_operstate(wl, wlvif,
ieee80211_get_operstate(vif));
}
/*
* stop device role if started (we might already be in
* STA role). TODO: make it better.
*/
- if (wl->dev_role_id != WL12XX_INVALID_ROLE_ID) {
- ret = wl12xx_croc(wl, wl->dev_role_id);
- if (ret < 0)
- goto out;
-
- ret = wl12xx_cmd_role_stop_dev(wl);
+ if (wlvif->dev_role_id != WL12XX_INVALID_ROLE_ID) {
+ ret = wl12xx_stop_dev(wl, wlvif);
if (ret < 0)
goto out;
}
/* If we want to go in PSM but we're not there yet */
- if (test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags) &&
- !test_bit(WL1271_FLAG_PSM, &wl->flags)) {
+ if (test_bit(WLVIF_FLAG_PSM_REQUESTED, &wlvif->flags) &&
+ !test_bit(WLVIF_FLAG_PSM, &wlvif->flags)) {
enum wl1271_cmd_ps_mode mode;
mode = STATION_POWER_SAVE_MODE;
- ret = wl1271_ps_set_mode(wl, mode,
- wl->basic_rate,
+ ret = wl1271_ps_set_mode(wl, wlvif, mode,
+ wlvif->basic_rate,
true);
if (ret < 0)
goto out;
@@ -3673,7 +3858,7 @@ sta_not_found:
ret = wl1271_acx_set_ht_capabilities(wl,
&sta_ht_cap,
true,
- wl->sta_hlid);
+ wlvif->sta.hlid);
if (ret < 0) {
wl1271_warning("Set ht cap true failed %d",
ret);
@@ -3685,7 +3870,7 @@ sta_not_found:
ret = wl1271_acx_set_ht_capabilities(wl,
&sta_ht_cap,
false,
- wl->sta_hlid);
+ wlvif->sta.hlid);
if (ret < 0) {
wl1271_warning("Set ht cap false failed %d",
ret);
@@ -3697,7 +3882,7 @@ sta_not_found:
/* Handle HT information change. Done after join. */
if ((changed & BSS_CHANGED_HT) &&
(bss_conf->channel_type != NL80211_CHAN_NO_HT)) {
- ret = wl1271_acx_set_ht_information(wl,
+ ret = wl1271_acx_set_ht_information(wl, wlvif,
bss_conf->ht_operation_mode);
if (ret < 0) {
wl1271_warning("Set ht information failed %d", ret);
@@ -3715,7 +3900,8 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
u32 changed)
{
struct wl1271 *wl = hw->priv;
- bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+ bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
int ret;
wl1271_debug(DEBUG_MAC80211, "mac80211 bss info changed 0x%x",
@@ -3726,6 +3912,9 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
if (unlikely(wl->state == WL1271_STATE_OFF))
goto out;
+ if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
+ goto out;
+
ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
@@ -3746,6 +3935,7 @@ static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
const struct ieee80211_tx_queue_params *params)
{
struct wl1271 *wl = hw->priv;
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
u8 ps_scheme;
int ret = 0;
@@ -3792,13 +3982,13 @@ static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
* the txop is confed in units of 32us by the mac80211,
* we need us
*/
- ret = wl1271_acx_ac_cfg(wl, wl1271_tx_get_queue(queue),
+ ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
params->cw_min, params->cw_max,
params->aifs, params->txop << 5);
if (ret < 0)
goto out_sleep;
- ret = wl1271_acx_tid_cfg(wl, wl1271_tx_get_queue(queue),
+ ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
CONF_CHANNEL_TYPE_EDCF,
wl1271_tx_get_queue(queue),
ps_scheme, CONF_ACK_POLICY_LEGACY,
@@ -3861,43 +4051,43 @@ static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
}
static int wl1271_allocate_sta(struct wl1271 *wl,
- struct ieee80211_sta *sta,
- u8 *hlid)
+ struct wl12xx_vif *wlvif,
+ struct ieee80211_sta *sta)
{
struct wl1271_station *wl_sta;
- int id;
+ int ret;
+
- id = find_first_zero_bit(wl->ap_hlid_map, AP_MAX_STATIONS);
- if (id >= AP_MAX_STATIONS) {
+ if (wl->active_sta_count >= AP_MAX_STATIONS) {
wl1271_warning("could not allocate HLID - too much stations");
return -EBUSY;
}
wl_sta = (struct wl1271_station *)sta->drv_priv;
- set_bit(id, wl->ap_hlid_map);
- wl_sta->hlid = WL1271_AP_STA_HLID_START + id;
- *hlid = wl_sta->hlid;
+ ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid);
+ if (ret < 0) {
+ wl1271_warning("could not allocate HLID - too many links");
+ return -EBUSY;
+ }
+
+ set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map);
memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
wl->active_sta_count++;
return 0;
}
-void wl1271_free_sta(struct wl1271 *wl, u8 hlid)
+void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
{
- int id = hlid - WL1271_AP_STA_HLID_START;
-
- if (hlid < WL1271_AP_STA_HLID_START)
+ if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
return;
- if (!test_bit(id, wl->ap_hlid_map))
- return;
-
- clear_bit(id, wl->ap_hlid_map);
+ clear_bit(hlid, wlvif->ap.sta_hlid_map);
memset(wl->links[hlid].addr, 0, ETH_ALEN);
wl->links[hlid].ba_bitmap = 0;
wl1271_tx_reset_link_queues(wl, hlid);
__clear_bit(hlid, &wl->ap_ps_map);
__clear_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
+ wl12xx_free_link(wl, wlvif, &hlid);
wl->active_sta_count--;
}
@@ -3906,6 +4096,8 @@ static int wl1271_op_sta_add(struct ieee80211_hw *hw,
struct ieee80211_sta *sta)
{
struct wl1271 *wl = hw->priv;
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+ struct wl1271_station *wl_sta;
int ret = 0;
u8 hlid;
@@ -3914,20 +4106,23 @@ static int wl1271_op_sta_add(struct ieee80211_hw *hw,
if (unlikely(wl->state == WL1271_STATE_OFF))
goto out;
- if (wl->bss_type != BSS_TYPE_AP_BSS)
+ if (wlvif->bss_type != BSS_TYPE_AP_BSS)
goto out;
wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
- ret = wl1271_allocate_sta(wl, sta, &hlid);
+ ret = wl1271_allocate_sta(wl, wlvif, sta);
if (ret < 0)
goto out;
+ wl_sta = (struct wl1271_station *)sta->drv_priv;
+ hlid = wl_sta->hlid;
+
ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out_free_sta;
- ret = wl12xx_cmd_add_peer(wl, sta, hlid);
+ ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
if (ret < 0)
goto out_sleep;
@@ -3944,7 +4139,7 @@ out_sleep:
out_free_sta:
if (ret < 0)
- wl1271_free_sta(wl, hlid);
+ wl1271_free_sta(wl, wlvif, hlid);
out:
mutex_unlock(&wl->mutex);
@@ -3956,6 +4151,7 @@ static int wl1271_op_sta_remove(struct ieee80211_hw *hw,
struct ieee80211_sta *sta)
{
struct wl1271 *wl = hw->priv;
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
struct wl1271_station *wl_sta;
int ret = 0, id;
@@ -3964,14 +4160,14 @@ static int wl1271_op_sta_remove(struct ieee80211_hw *hw,
if (unlikely(wl->state == WL1271_STATE_OFF))
goto out;
- if (wl->bss_type != BSS_TYPE_AP_BSS)
+ if (wlvif->bss_type != BSS_TYPE_AP_BSS)
goto out;
wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
wl_sta = (struct wl1271_station *)sta->drv_priv;
- id = wl_sta->hlid - WL1271_AP_STA_HLID_START;
- if (WARN_ON(!test_bit(id, wl->ap_hlid_map)))
+ id = wl_sta->hlid;
+ if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
goto out;
ret = wl1271_ps_elp_wakeup(wl);
@@ -3982,7 +4178,7 @@ static int wl1271_op_sta_remove(struct ieee80211_hw *hw,
if (ret < 0)
goto out_sleep;
- wl1271_free_sta(wl, wl_sta->hlid);
+ wl1271_free_sta(wl, wlvif, wl_sta->hlid);
out_sleep:
wl1271_ps_elp_sleep(wl);
@@ -3999,6 +4195,7 @@ static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
u8 buf_size)
{
struct wl1271 *wl = hw->priv;
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
int ret;
u8 hlid, *ba_bitmap;
@@ -4016,10 +4213,10 @@ static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
goto out;
}
- if (wl->bss_type == BSS_TYPE_STA_BSS) {
- hlid = wl->sta_hlid;
- ba_bitmap = &wl->ba_rx_bitmap;
- } else if (wl->bss_type == BSS_TYPE_AP_BSS) {
+ if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
+ hlid = wlvif->sta.hlid;
+ ba_bitmap = &wlvif->sta.ba_rx_bitmap;
+ } else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
struct wl1271_station *wl_sta;
wl_sta = (struct wl1271_station *)sta->drv_priv;
@@ -4039,7 +4236,7 @@ static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
switch (action) {
case IEEE80211_AMPDU_RX_START:
- if (!wl->ba_support || !wl->ba_allowed) {
+ if (!wlvif->ba_support || !wlvif->ba_allowed) {
ret = -ENOTSUPP;
break;
}
@@ -4108,8 +4305,9 @@ static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
const struct cfg80211_bitrate_mask *mask)
{
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
struct wl1271 *wl = hw->priv;
- int i;
+ int i, ret = 0;
wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x",
mask->control[NL80211_BAND_2GHZ].legacy,
@@ -4118,19 +4316,39 @@ static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
mutex_lock(&wl->mutex);
for (i = 0; i < IEEE80211_NUM_BANDS; i++)
- wl->bitrate_masks[i] =
+ wlvif->bitrate_masks[i] =
wl1271_tx_enabled_rates_get(wl,
mask->control[i].legacy,
i);
+
+ if (unlikely(wl->state == WL1271_STATE_OFF))
+ goto out;
+
+ if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
+ !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
+
+ ret = wl1271_ps_elp_wakeup(wl);
+ if (ret < 0)
+ goto out;
+
+ wl1271_set_band_rate(wl, wlvif);
+ wlvif->basic_rate =
+ wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
+ ret = wl1271_acx_sta_rate_policies(wl, wlvif);
+
+ wl1271_ps_elp_sleep(wl);
+ }
+out:
mutex_unlock(&wl->mutex);
- return 0;
+ return ret;
}
static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
struct ieee80211_channel_switch *ch_switch)
{
struct wl1271 *wl = hw->priv;
+ struct wl12xx_vif *wlvif;
int ret;
wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
@@ -4138,19 +4356,24 @@ static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
mutex_lock(&wl->mutex);
if (unlikely(wl->state == WL1271_STATE_OFF)) {
- mutex_unlock(&wl->mutex);
- ieee80211_chswitch_done(wl->vif, false);
- return;
+ wl12xx_for_each_wlvif_sta(wl, wlvif) {
+ struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
+ ieee80211_chswitch_done(vif, false);
+ }
+ goto out;
}
ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
- ret = wl12xx_cmd_channel_switch(wl, ch_switch);
+ /* TODO: change mac80211 to pass vif as param */
+ wl12xx_for_each_wlvif_sta(wl, wlvif) {
+ ret = wl12xx_cmd_channel_switch(wl, ch_switch);
- if (!ret)
- set_bit(WL1271_FLAG_CS_PROGRESS, &wl->flags);
+ if (!ret)
+ set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
+ }
wl1271_ps_elp_sleep(wl);
@@ -4170,10 +4393,6 @@ static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
/* packets are considered pending if in the TX queue or the FW */
ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0);
-
- /* the above is appropriate for STA mode for PS purposes */
- WARN_ON(wl->bss_type != BSS_TYPE_STA_BSS);
-
out:
mutex_unlock(&wl->mutex);
@@ -4604,7 +4823,7 @@ static struct bin_attribute fwlog_attr = {
.read = wl1271_sysfs_read_fwlog,
};
-int wl1271_register_hw(struct wl1271 *wl)
+static int wl1271_register_hw(struct wl1271 *wl)
{
int ret;
@@ -4645,9 +4864,8 @@ int wl1271_register_hw(struct wl1271 *wl)
return 0;
}
-EXPORT_SYMBOL_GPL(wl1271_register_hw);
-void wl1271_unregister_hw(struct wl1271 *wl)
+static void wl1271_unregister_hw(struct wl1271 *wl)
{
if (wl->state == WL1271_STATE_PLT)
__wl1271_plt_stop(wl);
@@ -4657,9 +4875,8 @@ void wl1271_unregister_hw(struct wl1271 *wl)
wl->mac80211_registered = false;
}
-EXPORT_SYMBOL_GPL(wl1271_unregister_hw);
-int wl1271_init_ieee80211(struct wl1271 *wl)
+static int wl1271_init_ieee80211(struct wl1271 *wl)
{
static const u32 cipher_suites[] = {
WLAN_CIPHER_SUITE_WEP40,
@@ -4736,27 +4953,33 @@ int wl1271_init_ieee80211(struct wl1271 *wl)
wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
- SET_IEEE80211_DEV(wl->hw, wl1271_wl_to_dev(wl));
+ /* the FW answers probe-requests in AP-mode */
+ wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
+ wl->hw->wiphy->probe_resp_offload =
+ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
+ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
+ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
+
+ SET_IEEE80211_DEV(wl->hw, wl->dev);
wl->hw->sta_data_size = sizeof(struct wl1271_station);
+ wl->hw->vif_data_size = sizeof(struct wl12xx_vif);
wl->hw->max_rx_aggregation_subframes = 8;
return 0;
}
-EXPORT_SYMBOL_GPL(wl1271_init_ieee80211);
#define WL1271_DEFAULT_CHANNEL 0
-struct ieee80211_hw *wl1271_alloc_hw(void)
+static struct ieee80211_hw *wl1271_alloc_hw(void)
{
struct ieee80211_hw *hw;
- struct platform_device *plat_dev = NULL;
struct wl1271 *wl;
int i, j, ret;
unsigned int order;
- BUILD_BUG_ON(AP_MAX_LINKS > WL12XX_MAX_LINKS);
+ BUILD_BUG_ON(AP_MAX_STATIONS > WL12XX_MAX_LINKS);
hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
if (!hw) {
@@ -4765,41 +4988,26 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
goto err_hw_alloc;
}
- plat_dev = kmemdup(&wl1271_device, sizeof(wl1271_device), GFP_KERNEL);
- if (!plat_dev) {
- wl1271_error("could not allocate platform_device");
- ret = -ENOMEM;
- goto err_plat_alloc;
- }
-
wl = hw->priv;
memset(wl, 0, sizeof(*wl));
INIT_LIST_HEAD(&wl->list);
+ INIT_LIST_HEAD(&wl->wlvif_list);
wl->hw = hw;
- wl->plat_dev = plat_dev;
-
- for (i = 0; i < NUM_TX_QUEUES; i++)
- skb_queue_head_init(&wl->tx_queue[i]);
for (i = 0; i < NUM_TX_QUEUES; i++)
- for (j = 0; j < AP_MAX_LINKS; j++)
+ for (j = 0; j < WL12XX_MAX_LINKS; j++)
skb_queue_head_init(&wl->links[j].tx_queue[i]);
skb_queue_head_init(&wl->deferred_rx_queue);
skb_queue_head_init(&wl->deferred_tx_queue);
INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
- INIT_DELAYED_WORK(&wl->pspoll_work, wl1271_pspoll_work);
INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
INIT_WORK(&wl->tx_work, wl1271_tx_work);
INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
- INIT_WORK(&wl->rx_streaming_enable_work,
- wl1271_rx_streaming_enable_work);
- INIT_WORK(&wl->rx_streaming_disable_work,
- wl1271_rx_streaming_disable_work);
wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
if (!wl->freezable_wq) {
@@ -4808,41 +5016,21 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
}
wl->channel = WL1271_DEFAULT_CHANNEL;
- wl->beacon_int = WL1271_DEFAULT_BEACON_INT;
- wl->default_key = 0;
wl->rx_counter = 0;
- wl->psm_entry_retry = 0;
wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
- wl->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
- wl->basic_rate = CONF_TX_RATE_MASK_BASIC;
- wl->rate_set = CONF_TX_RATE_MASK_BASIC;
wl->band = IEEE80211_BAND_2GHZ;
wl->vif = NULL;
wl->flags = 0;
wl->sg_enabled = true;
wl->hw_pg_ver = -1;
- wl->bss_type = MAX_BSS_TYPE;
- wl->set_bss_type = MAX_BSS_TYPE;
- wl->last_tx_hlid = 0;
wl->ap_ps_map = 0;
wl->ap_fw_ps_map = 0;
wl->quirks = 0;
wl->platform_quirks = 0;
wl->sched_scanning = false;
- wl->tx_security_seq = 0;
- wl->tx_security_last_seq_lsb = 0;
wl->tx_spare_blocks = TX_HW_BLOCK_SPARE_DEFAULT;
- wl->role_id = WL12XX_INVALID_ROLE_ID;
wl->system_hlid = WL12XX_SYSTEM_HLID;
- wl->sta_hlid = WL12XX_INVALID_LINK_ID;
- wl->dev_role_id = WL12XX_INVALID_ROLE_ID;
- wl->dev_hlid = WL12XX_INVALID_LINK_ID;
- wl->session_counter = 0;
- wl->ap_bcast_hlid = WL12XX_INVALID_LINK_ID;
- wl->ap_global_hlid = WL12XX_INVALID_LINK_ID;
wl->active_sta_count = 0;
- setup_timer(&wl->rx_streaming_timer, wl1271_rx_streaming_timer,
- (unsigned long) wl);
wl->fwlog_size = 0;
init_waitqueue_head(&wl->fwlog_waitq);
@@ -4860,8 +5048,6 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
/* Apply default driver configuration. */
wl1271_conf_init(wl);
- wl->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
- wl->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
order = get_order(WL1271_AGGR_BUFFER_SIZE);
wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
@@ -4883,49 +5069,8 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
goto err_dummy_packet;
}
- /* Register platform device */
- ret = platform_device_register(wl->plat_dev);
- if (ret) {
- wl1271_error("couldn't register platform device");
- goto err_fwlog;
- }
- dev_set_drvdata(&wl->plat_dev->dev, wl);
-
- /* Create sysfs file to control bt coex state */
- ret = device_create_file(&wl->plat_dev->dev, &dev_attr_bt_coex_state);
- if (ret < 0) {
- wl1271_error("failed to create sysfs file bt_coex_state");
- goto err_platform;
- }
-
- /* Create sysfs file to get HW PG version */
- ret = device_create_file(&wl->plat_dev->dev, &dev_attr_hw_pg_ver);
- if (ret < 0) {
- wl1271_error("failed to create sysfs file hw_pg_ver");
- goto err_bt_coex_state;
- }
-
- /* Create sysfs file for the FW log */
- ret = device_create_bin_file(&wl->plat_dev->dev, &fwlog_attr);
- if (ret < 0) {
- wl1271_error("failed to create sysfs file fwlog");
- goto err_hw_pg_ver;
- }
-
return hw;
-err_hw_pg_ver:
- device_remove_file(&wl->plat_dev->dev, &dev_attr_hw_pg_ver);
-
-err_bt_coex_state:
- device_remove_file(&wl->plat_dev->dev, &dev_attr_bt_coex_state);
-
-err_platform:
- platform_device_unregister(wl->plat_dev);
-
-err_fwlog:
- free_page((unsigned long)wl->fwlog);
-
err_dummy_packet:
dev_kfree_skb(wl->dummy_packet);
@@ -4937,18 +5082,14 @@ err_wq:
err_hw:
wl1271_debugfs_exit(wl);
- kfree(plat_dev);
-
-err_plat_alloc:
ieee80211_free_hw(hw);
err_hw_alloc:
return ERR_PTR(ret);
}
-EXPORT_SYMBOL_GPL(wl1271_alloc_hw);
-int wl1271_free_hw(struct wl1271 *wl)
+static int wl1271_free_hw(struct wl1271 *wl)
{
/* Unblock any fwlog readers */
mutex_lock(&wl->mutex);
@@ -4956,17 +5097,15 @@ int wl1271_free_hw(struct wl1271 *wl)
wake_up_interruptible_all(&wl->fwlog_waitq);
mutex_unlock(&wl->mutex);
- device_remove_bin_file(&wl->plat_dev->dev, &fwlog_attr);
+ device_remove_bin_file(wl->dev, &fwlog_attr);
- device_remove_file(&wl->plat_dev->dev, &dev_attr_hw_pg_ver);
+ device_remove_file(wl->dev, &dev_attr_hw_pg_ver);
- device_remove_file(&wl->plat_dev->dev, &dev_attr_bt_coex_state);
- platform_device_unregister(wl->plat_dev);
+ device_remove_file(wl->dev, &dev_attr_bt_coex_state);
free_page((unsigned long)wl->fwlog);
dev_kfree_skb(wl->dummy_packet);
free_pages((unsigned long)wl->aggr_buf,
get_order(WL1271_AGGR_BUFFER_SIZE));
- kfree(wl->plat_dev);
wl1271_debugfs_exit(wl);
@@ -4983,7 +5122,174 @@ int wl1271_free_hw(struct wl1271 *wl)
return 0;
}
-EXPORT_SYMBOL_GPL(wl1271_free_hw);
+
+static irqreturn_t wl12xx_hardirq(int irq, void *cookie)
+{
+ struct wl1271 *wl = cookie;
+ unsigned long flags;
+
+ wl1271_debug(DEBUG_IRQ, "IRQ");
+
+ /* complete the ELP completion */
+ spin_lock_irqsave(&wl->wl_lock, flags);
+ set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
+ if (wl->elp_compl) {
+ complete(wl->elp_compl);
+ wl->elp_compl = NULL;
+ }
+
+ if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
+ /* don't enqueue a work right now. mark it as pending */
+ set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
+ wl1271_debug(DEBUG_IRQ, "should not enqueue work");
+ disable_irq_nosync(wl->irq);
+ pm_wakeup_event(wl->dev, 0);
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
+ return IRQ_HANDLED;
+ }
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static int __devinit wl12xx_probe(struct platform_device *pdev)
+{
+ struct wl12xx_platform_data *pdata = pdev->dev.platform_data;
+ struct ieee80211_hw *hw;
+ struct wl1271 *wl;
+ unsigned long irqflags;
+ int ret = -ENODEV;
+
+ hw = wl1271_alloc_hw();
+ if (IS_ERR(hw)) {
+ wl1271_error("can't allocate hw");
+ ret = PTR_ERR(hw);
+ goto out;
+ }
+
+ wl = hw->priv;
+ wl->irq = platform_get_irq(pdev, 0);
+ wl->ref_clock = pdata->board_ref_clock;
+ wl->tcxo_clock = pdata->board_tcxo_clock;
+ wl->platform_quirks = pdata->platform_quirks;
+ wl->set_power = pdata->set_power;
+ wl->dev = &pdev->dev;
+ wl->if_ops = pdata->ops;
+
+ platform_set_drvdata(pdev, wl);
+
+ if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
+ irqflags = IRQF_TRIGGER_RISING;
+ else
+ irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT;
+
+ ret = request_threaded_irq(wl->irq, wl12xx_hardirq, wl1271_irq,
+ irqflags,
+ pdev->name, wl);
+ if (ret < 0) {
+ wl1271_error("request_irq() failed: %d", ret);
+ goto out_free_hw;
+ }
+
+ ret = enable_irq_wake(wl->irq);
+ if (!ret) {
+ wl->irq_wake_enabled = true;
+ device_init_wakeup(wl->dev, 1);
+ if (pdata->pwr_in_suspend)
+ hw->wiphy->wowlan.flags = WIPHY_WOWLAN_ANY;
+
+ }
+ disable_irq(wl->irq);
+
+ ret = wl1271_init_ieee80211(wl);
+ if (ret)
+ goto out_irq;
+
+ ret = wl1271_register_hw(wl);
+ if (ret)
+ goto out_irq;
+
+ /* Create sysfs file to control bt coex state */
+ ret = device_create_file(wl->dev, &dev_attr_bt_coex_state);
+ if (ret < 0) {
+ wl1271_error("failed to create sysfs file bt_coex_state");
+ goto out_irq;
+ }
+
+ /* Create sysfs file to get HW PG version */
+ ret = device_create_file(wl->dev, &dev_attr_hw_pg_ver);
+ if (ret < 0) {
+ wl1271_error("failed to create sysfs file hw_pg_ver");
+ goto out_bt_coex_state;
+ }
+
+ /* Create sysfs file for the FW log */
+ ret = device_create_bin_file(wl->dev, &fwlog_attr);
+ if (ret < 0) {
+ wl1271_error("failed to create sysfs file fwlog");
+ goto out_hw_pg_ver;
+ }
+
+ return 0;
+
+out_hw_pg_ver:
+ device_remove_file(wl->dev, &dev_attr_hw_pg_ver);
+
+out_bt_coex_state:
+ device_remove_file(wl->dev, &dev_attr_bt_coex_state);
+
+out_irq:
+ free_irq(wl->irq, wl);
+
+out_free_hw:
+ wl1271_free_hw(wl);
+
+out:
+ return ret;
+}
+
+static int __devexit wl12xx_remove(struct platform_device *pdev)
+{
+ struct wl1271 *wl = platform_get_drvdata(pdev);
+
+ if (wl->irq_wake_enabled) {
+ device_init_wakeup(wl->dev, 0);
+ disable_irq_wake(wl->irq);
+ }
+ wl1271_unregister_hw(wl);
+ free_irq(wl->irq, wl);
+ wl1271_free_hw(wl);
+
+ return 0;
+}
+
+static const struct platform_device_id wl12xx_id_table[] __devinitconst = {
+ { "wl12xx", 0 },
+ { } /* Terminating Entry */
+};
+MODULE_DEVICE_TABLE(platform, wl12xx_id_table);
+
+static struct platform_driver wl12xx_driver = {
+ .probe = wl12xx_probe,
+ .remove = __devexit_p(wl12xx_remove),
+ .id_table = wl12xx_id_table,
+ .driver = {
+ .name = "wl12xx_driver",
+ .owner = THIS_MODULE,
+ }
+};
+
+static int __init wl12xx_init(void)
+{
+ return platform_driver_register(&wl12xx_driver);
+}
+module_init(wl12xx_init);
+
+static void __exit wl12xx_exit(void)
+{
+ platform_driver_unregister(&wl12xx_driver);
+}
+module_exit(wl12xx_exit);
u32 wl12xx_debug_level = DEBUG_NONE;
EXPORT_SYMBOL_GPL(wl12xx_debug_level);
diff --git a/drivers/net/wireless/wl12xx/ps.c b/drivers/net/wireless/wl12xx/ps.c
index c15ebf2efd4..a7a11088dd3 100644
--- a/drivers/net/wireless/wl12xx/ps.c
+++ b/drivers/net/wireless/wl12xx/ps.c
@@ -25,6 +25,7 @@
#include "ps.h"
#include "io.h"
#include "tx.h"
+#include "debug.h"
#define WL1271_WAKEUP_TIMEOUT 500
@@ -32,6 +33,7 @@ void wl1271_elp_work(struct work_struct *work)
{
struct delayed_work *dwork;
struct wl1271 *wl;
+ struct wl12xx_vif *wlvif;
dwork = container_of(work, struct delayed_work, work);
wl = container_of(dwork, struct wl1271, elp_work);
@@ -47,11 +49,15 @@ void wl1271_elp_work(struct work_struct *work)
if (unlikely(!test_bit(WL1271_FLAG_ELP_REQUESTED, &wl->flags)))
goto out;
- if (test_bit(WL1271_FLAG_IN_ELP, &wl->flags) ||
- (!test_bit(WL1271_FLAG_PSM, &wl->flags) &&
- !test_bit(WL1271_FLAG_IDLE, &wl->flags)))
+ if (test_bit(WL1271_FLAG_IN_ELP, &wl->flags))
goto out;
+ wl12xx_for_each_wlvif(wl, wlvif) {
+ if (!test_bit(WLVIF_FLAG_PSM, &wlvif->flags) &&
+ !test_bit(WL1271_FLAG_IDLE, &wl->flags))
+ goto out;
+ }
+
wl1271_debug(DEBUG_PSM, "chip to elp");
wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_SLEEP);
set_bit(WL1271_FLAG_IN_ELP, &wl->flags);
@@ -65,13 +71,17 @@ out:
/* Routines to toggle sleep mode while in ELP */
void wl1271_ps_elp_sleep(struct wl1271 *wl)
{
+ struct wl12xx_vif *wlvif;
+
/* we shouldn't get consecutive sleep requests */
if (WARN_ON(test_and_set_bit(WL1271_FLAG_ELP_REQUESTED, &wl->flags)))
return;
- if (!test_bit(WL1271_FLAG_PSM, &wl->flags) &&
- !test_bit(WL1271_FLAG_IDLE, &wl->flags))
- return;
+ wl12xx_for_each_wlvif(wl, wlvif) {
+ if (!test_bit(WLVIF_FLAG_PSM, &wlvif->flags) &&
+ !test_bit(WL1271_FLAG_IDLE, &wl->flags))
+ return;
+ }
ieee80211_queue_delayed_work(wl->hw, &wl->elp_work,
msecs_to_jiffies(ELP_ENTRY_DELAY));
@@ -143,8 +153,8 @@ out:
return 0;
}
-int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode,
- u32 rates, bool send)
+int wl1271_ps_set_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ enum wl1271_cmd_ps_mode mode, u32 rates, bool send)
{
int ret;
@@ -152,39 +162,34 @@ int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode,
case STATION_POWER_SAVE_MODE:
wl1271_debug(DEBUG_PSM, "entering psm");
- ret = wl1271_acx_wake_up_conditions(wl);
+ ret = wl1271_acx_wake_up_conditions(wl, wlvif);
if (ret < 0) {
wl1271_error("couldn't set wake up conditions");
return ret;
}
- ret = wl1271_cmd_ps_mode(wl, STATION_POWER_SAVE_MODE);
+ ret = wl1271_cmd_ps_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
if (ret < 0)
return ret;
- set_bit(WL1271_FLAG_PSM, &wl->flags);
+ set_bit(WLVIF_FLAG_PSM, &wlvif->flags);
break;
case STATION_ACTIVE_MODE:
default:
wl1271_debug(DEBUG_PSM, "leaving psm");
/* disable beacon early termination */
- if (wl->band == IEEE80211_BAND_2GHZ) {
- ret = wl1271_acx_bet_enable(wl, false);
+ if (wlvif->band == IEEE80211_BAND_2GHZ) {
+ ret = wl1271_acx_bet_enable(wl, wlvif, false);
if (ret < 0)
return ret;
}
- /* disable beacon filtering */
- ret = wl1271_acx_beacon_filter_opt(wl, false);
- if (ret < 0)
- return ret;
-
- ret = wl1271_cmd_ps_mode(wl, STATION_ACTIVE_MODE);
+ ret = wl1271_cmd_ps_mode(wl, wlvif, STATION_ACTIVE_MODE);
if (ret < 0)
return ret;
- clear_bit(WL1271_FLAG_PSM, &wl->flags);
+ clear_bit(WLVIF_FLAG_PSM, &wlvif->flags);
break;
}
@@ -223,9 +228,11 @@ static void wl1271_ps_filter_frames(struct wl1271 *wl, u8 hlid)
wl1271_handle_tx_low_watermark(wl);
}
-void wl1271_ps_link_start(struct wl1271 *wl, u8 hlid, bool clean_queues)
+void wl12xx_ps_link_start(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u8 hlid, bool clean_queues)
{
struct ieee80211_sta *sta;
+ struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
if (test_bit(hlid, &wl->ap_ps_map))
return;
@@ -235,7 +242,7 @@ void wl1271_ps_link_start(struct wl1271 *wl, u8 hlid, bool clean_queues)
clean_queues);
rcu_read_lock();
- sta = ieee80211_find_sta(wl->vif, wl->links[hlid].addr);
+ sta = ieee80211_find_sta(vif, wl->links[hlid].addr);
if (!sta) {
wl1271_error("could not find sta %pM for starting ps",
wl->links[hlid].addr);
@@ -253,9 +260,10 @@ void wl1271_ps_link_start(struct wl1271 *wl, u8 hlid, bool clean_queues)
__set_bit(hlid, &wl->ap_ps_map);
}
-void wl1271_ps_link_end(struct wl1271 *wl, u8 hlid)
+void wl12xx_ps_link_end(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
{
struct ieee80211_sta *sta;
+ struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
if (!test_bit(hlid, &wl->ap_ps_map))
return;
@@ -265,7 +273,7 @@ void wl1271_ps_link_end(struct wl1271 *wl, u8 hlid)
__clear_bit(hlid, &wl->ap_ps_map);
rcu_read_lock();
- sta = ieee80211_find_sta(wl->vif, wl->links[hlid].addr);
+ sta = ieee80211_find_sta(vif, wl->links[hlid].addr);
if (!sta) {
wl1271_error("could not find sta %pM for ending ps",
wl->links[hlid].addr);
diff --git a/drivers/net/wireless/wl12xx/ps.h b/drivers/net/wireless/wl12xx/ps.h
index 25eb9bc9b62..a12052f0202 100644
--- a/drivers/net/wireless/wl12xx/ps.h
+++ b/drivers/net/wireless/wl12xx/ps.h
@@ -27,13 +27,14 @@
#include "wl12xx.h"
#include "acx.h"
-int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode,
- u32 rates, bool send);
+int wl1271_ps_set_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ enum wl1271_cmd_ps_mode mode, u32 rates, bool send);
void wl1271_ps_elp_sleep(struct wl1271 *wl);
int wl1271_ps_elp_wakeup(struct wl1271 *wl);
void wl1271_elp_work(struct work_struct *work);
-void wl1271_ps_link_start(struct wl1271 *wl, u8 hlid, bool clean_queues);
-void wl1271_ps_link_end(struct wl1271 *wl, u8 hlid);
+void wl12xx_ps_link_start(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u8 hlid, bool clean_queues);
+void wl12xx_ps_link_end(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid);
#define WL1271_PS_COMPLETE_TIMEOUT 500
diff --git a/drivers/net/wireless/wl12xx/reg.h b/drivers/net/wireless/wl12xx/reg.h
index 3f570f39758..df34d5977b9 100644
--- a/drivers/net/wireless/wl12xx/reg.h
+++ b/drivers/net/wireless/wl12xx/reg.h
@@ -408,7 +408,7 @@
/* Firmware image load chunk size */
-#define CHUNK_SIZE 512
+#define CHUNK_SIZE 16384
/* Firmware image header size */
#define FW_HDR_SIZE 8
diff --git a/drivers/net/wireless/wl12xx/rx.c b/drivers/net/wireless/wl12xx/rx.c
index dee4cfe9ccc..4fbd2a722ff 100644
--- a/drivers/net/wireless/wl12xx/rx.c
+++ b/drivers/net/wireless/wl12xx/rx.c
@@ -25,9 +25,11 @@
#include <linux/sched.h>
#include "wl12xx.h"
+#include "debug.h"
#include "acx.h"
#include "reg.h"
#include "rx.h"
+#include "tx.h"
#include "io.h"
static u8 wl12xx_rx_get_mem_block(struct wl12xx_fw_status *status,
@@ -96,7 +98,7 @@ static void wl1271_rx_status(struct wl1271 *wl,
}
static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
- bool unaligned)
+ bool unaligned, u8 *hlid)
{
struct wl1271_rx_descriptor *desc;
struct sk_buff *skb;
@@ -159,6 +161,7 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
* payload aligned to 4 bytes.
*/
memcpy(buf, data + sizeof(*desc), length - sizeof(*desc));
+ *hlid = desc->hlid;
hdr = (struct ieee80211_hdr *)skb->data;
if (ieee80211_is_beacon(hdr->frame_control))
@@ -169,10 +172,10 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
wl1271_rx_status(wl, desc, IEEE80211_SKB_RXCB(skb), beacon);
seq_num = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
- wl1271_debug(DEBUG_RX, "rx skb 0x%p: %d B %s seq %d", skb,
+ wl1271_debug(DEBUG_RX, "rx skb 0x%p: %d B %s seq %d hlid %d", skb,
skb->len - desc->pad_len,
beacon ? "beacon" : "",
- seq_num);
+ seq_num, *hlid);
skb_trim(skb, skb->len - desc->pad_len);
@@ -185,6 +188,7 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
void wl12xx_rx(struct wl1271 *wl, struct wl12xx_fw_status *status)
{
struct wl1271_acx_mem_map *wl_mem_map = wl->target_mem_map;
+ unsigned long active_hlids[BITS_TO_LONGS(WL12XX_MAX_LINKS)] = {0};
u32 buf_size;
u32 fw_rx_counter = status->fw_rx_counter & NUM_RX_PKT_DESC_MOD_MASK;
u32 drv_rx_counter = wl->rx_counter & NUM_RX_PKT_DESC_MOD_MASK;
@@ -192,8 +196,7 @@ void wl12xx_rx(struct wl1271 *wl, struct wl12xx_fw_status *status)
u32 mem_block;
u32 pkt_length;
u32 pkt_offset;
- bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
- bool had_data = false;
+ u8 hlid;
bool unaligned = false;
while (drv_rx_counter != fw_rx_counter) {
@@ -253,8 +256,15 @@ void wl12xx_rx(struct wl1271 *wl, struct wl12xx_fw_status *status)
*/
if (wl1271_rx_handle_data(wl,
wl->aggr_buf + pkt_offset,
- pkt_length, unaligned) == 1)
- had_data = true;
+ pkt_length, unaligned,
+ &hlid) == 1) {
+ if (hlid < WL12XX_MAX_LINKS)
+ __set_bit(hlid, active_hlids);
+ else
+ WARN(1,
+ "hlid exceeded WL12XX_MAX_LINKS "
+ "(%d)\n", hlid);
+ }
wl->rx_counter++;
drv_rx_counter++;
@@ -270,17 +280,5 @@ void wl12xx_rx(struct wl1271 *wl, struct wl12xx_fw_status *status)
if (wl->quirks & WL12XX_QUIRK_END_OF_TRANSACTION)
wl1271_write32(wl, RX_DRIVER_COUNTER_ADDRESS, wl->rx_counter);
- if (!is_ap && wl->conf.rx_streaming.interval && had_data &&
- (wl->conf.rx_streaming.always ||
- test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags))) {
- u32 timeout = wl->conf.rx_streaming.duration;
-
- /* restart rx streaming */
- if (!test_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags))
- ieee80211_queue_work(wl->hw,
- &wl->rx_streaming_enable_work);
-
- mod_timer(&wl->rx_streaming_timer,
- jiffies + msecs_to_jiffies(timeout));
- }
+ wl12xx_rearm_rx_streaming(wl, active_hlids);
}
diff --git a/drivers/net/wireless/wl12xx/scan.c b/drivers/net/wireless/wl12xx/scan.c
index fc29c671cf3..8599dab1fe2 100644
--- a/drivers/net/wireless/wl12xx/scan.c
+++ b/drivers/net/wireless/wl12xx/scan.c
@@ -24,6 +24,7 @@
#include <linux/ieee80211.h>
#include "wl12xx.h"
+#include "debug.h"
#include "cmd.h"
#include "scan.h"
#include "acx.h"
@@ -34,6 +35,8 @@ void wl1271_scan_complete_work(struct work_struct *work)
{
struct delayed_work *dwork;
struct wl1271 *wl;
+ struct ieee80211_vif *vif;
+ struct wl12xx_vif *wlvif;
int ret;
bool is_sta, is_ibss;
@@ -50,28 +53,31 @@ void wl1271_scan_complete_work(struct work_struct *work)
if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
goto out;
+ vif = wl->scan_vif;
+ wlvif = wl12xx_vif_to_data(vif);
+
wl->scan.state = WL1271_SCAN_STATE_IDLE;
memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
wl->scan.req = NULL;
+ wl->scan_vif = NULL;
ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
- if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) {
+ if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
/* restore hardware connection monitoring template */
- wl1271_cmd_build_ap_probe_req(wl, wl->probereq);
+ wl1271_cmd_build_ap_probe_req(wl, wlvif, wlvif->probereq);
}
/* return to ROC if needed */
- is_sta = (wl->bss_type == BSS_TYPE_STA_BSS);
- is_ibss = (wl->bss_type == BSS_TYPE_IBSS);
- if (((is_sta && !test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) ||
- (is_ibss && !test_bit(WL1271_FLAG_IBSS_JOINED, &wl->flags))) &&
- !test_bit(wl->dev_role_id, wl->roc_map)) {
+ is_sta = (wlvif->bss_type == BSS_TYPE_STA_BSS);
+ is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
+ if (((is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) ||
+ (is_ibss && !test_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags))) &&
+ !test_bit(wlvif->dev_role_id, wl->roc_map)) {
/* restore remain on channel */
- wl12xx_cmd_role_start_dev(wl);
- wl12xx_roc(wl, wl->dev_role_id);
+ wl12xx_start_dev(wl, wlvif);
}
wl1271_ps_elp_sleep(wl);
@@ -155,9 +161,11 @@ static int wl1271_get_scan_channels(struct wl1271 *wl,
#define WL1271_NOTHING_TO_SCAN 1
-static int wl1271_scan_send(struct wl1271 *wl, enum ieee80211_band band,
- bool passive, u32 basic_rate)
+static int wl1271_scan_send(struct wl1271 *wl, struct ieee80211_vif *vif,
+ enum ieee80211_band band,
+ bool passive, u32 basic_rate)
{
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
struct wl1271_cmd_scan *cmd;
struct wl1271_cmd_trigger_scan_to *trigger;
int ret;
@@ -177,11 +185,11 @@ static int wl1271_scan_send(struct wl1271 *wl, enum ieee80211_band band,
if (passive)
scan_options |= WL1271_SCAN_OPT_PASSIVE;
- if (WARN_ON(wl->role_id == WL12XX_INVALID_ROLE_ID)) {
+ if (WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID)) {
ret = -EINVAL;
goto out;
}
- cmd->params.role_id = wl->role_id;
+ cmd->params.role_id = wlvif->role_id;
cmd->params.scan_options = cpu_to_le16(scan_options);
cmd->params.n_ch = wl1271_get_scan_channels(wl, wl->scan.req,
@@ -194,7 +202,6 @@ static int wl1271_scan_send(struct wl1271 *wl, enum ieee80211_band band,
cmd->params.tx_rate = cpu_to_le32(basic_rate);
cmd->params.n_probe_reqs = wl->conf.scan.num_probe_reqs;
- cmd->params.tx_rate = cpu_to_le32(basic_rate);
cmd->params.tid_trigger = 0;
cmd->params.scan_tag = WL1271_SCAN_DEFAULT_TAG;
@@ -208,11 +215,11 @@ static int wl1271_scan_send(struct wl1271 *wl, enum ieee80211_band band,
memcpy(cmd->params.ssid, wl->scan.ssid, wl->scan.ssid_len);
}
- memcpy(cmd->addr, wl->mac_addr, ETH_ALEN);
+ memcpy(cmd->addr, vif->addr, ETH_ALEN);
- ret = wl1271_cmd_build_probe_req(wl, wl->scan.ssid, wl->scan.ssid_len,
- wl->scan.req->ie, wl->scan.req->ie_len,
- band);
+ ret = wl1271_cmd_build_probe_req(wl, wlvif, wl->scan.ssid,
+ wl->scan.ssid_len, wl->scan.req->ie,
+ wl->scan.req->ie_len, band);
if (ret < 0) {
wl1271_error("PROBE request template failed");
goto out;
@@ -241,11 +248,12 @@ out:
return ret;
}
-void wl1271_scan_stm(struct wl1271 *wl)
+void wl1271_scan_stm(struct wl1271 *wl, struct ieee80211_vif *vif)
{
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
int ret = 0;
enum ieee80211_band band;
- u32 rate;
+ u32 rate, mask;
switch (wl->scan.state) {
case WL1271_SCAN_STATE_IDLE:
@@ -253,47 +261,59 @@ void wl1271_scan_stm(struct wl1271 *wl)
case WL1271_SCAN_STATE_2GHZ_ACTIVE:
band = IEEE80211_BAND_2GHZ;
- rate = wl1271_tx_min_rate_get(wl, wl->bitrate_masks[band]);
- ret = wl1271_scan_send(wl, band, false, rate);
+ mask = wlvif->bitrate_masks[band];
+ if (wl->scan.req->no_cck) {
+ mask &= ~CONF_TX_CCK_RATES;
+ if (!mask)
+ mask = CONF_TX_RATE_MASK_BASIC_P2P;
+ }
+ rate = wl1271_tx_min_rate_get(wl, mask);
+ ret = wl1271_scan_send(wl, vif, band, false, rate);
if (ret == WL1271_NOTHING_TO_SCAN) {
wl->scan.state = WL1271_SCAN_STATE_2GHZ_PASSIVE;
- wl1271_scan_stm(wl);
+ wl1271_scan_stm(wl, vif);
}
break;
case WL1271_SCAN_STATE_2GHZ_PASSIVE:
band = IEEE80211_BAND_2GHZ;
- rate = wl1271_tx_min_rate_get(wl, wl->bitrate_masks[band]);
- ret = wl1271_scan_send(wl, band, true, rate);
+ mask = wlvif->bitrate_masks[band];
+ if (wl->scan.req->no_cck) {
+ mask &= ~CONF_TX_CCK_RATES;
+ if (!mask)
+ mask = CONF_TX_RATE_MASK_BASIC_P2P;
+ }
+ rate = wl1271_tx_min_rate_get(wl, mask);
+ ret = wl1271_scan_send(wl, vif, band, true, rate);
if (ret == WL1271_NOTHING_TO_SCAN) {
if (wl->enable_11a)
wl->scan.state = WL1271_SCAN_STATE_5GHZ_ACTIVE;
else
wl->scan.state = WL1271_SCAN_STATE_DONE;
- wl1271_scan_stm(wl);
+ wl1271_scan_stm(wl, vif);
}
break;
case WL1271_SCAN_STATE_5GHZ_ACTIVE:
band = IEEE80211_BAND_5GHZ;
- rate = wl1271_tx_min_rate_get(wl, wl->bitrate_masks[band]);
- ret = wl1271_scan_send(wl, band, false, rate);
+ rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[band]);
+ ret = wl1271_scan_send(wl, vif, band, false, rate);
if (ret == WL1271_NOTHING_TO_SCAN) {
wl->scan.state = WL1271_SCAN_STATE_5GHZ_PASSIVE;
- wl1271_scan_stm(wl);
+ wl1271_scan_stm(wl, vif);
}
break;
case WL1271_SCAN_STATE_5GHZ_PASSIVE:
band = IEEE80211_BAND_5GHZ;
- rate = wl1271_tx_min_rate_get(wl, wl->bitrate_masks[band]);
- ret = wl1271_scan_send(wl, band, true, rate);
+ rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[band]);
+ ret = wl1271_scan_send(wl, vif, band, true, rate);
if (ret == WL1271_NOTHING_TO_SCAN) {
wl->scan.state = WL1271_SCAN_STATE_DONE;
- wl1271_scan_stm(wl);
+ wl1271_scan_stm(wl, vif);
}
break;
@@ -317,7 +337,8 @@ void wl1271_scan_stm(struct wl1271 *wl)
}
}
-int wl1271_scan(struct wl1271 *wl, const u8 *ssid, size_t ssid_len,
+int wl1271_scan(struct wl1271 *wl, struct ieee80211_vif *vif,
+ const u8 *ssid, size_t ssid_len,
struct cfg80211_scan_request *req)
{
/*
@@ -338,6 +359,7 @@ int wl1271_scan(struct wl1271 *wl, const u8 *ssid, size_t ssid_len,
wl->scan.ssid_len = 0;
}
+ wl->scan_vif = vif;
wl->scan.req = req;
memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
@@ -346,7 +368,7 @@ int wl1271_scan(struct wl1271 *wl, const u8 *ssid, size_t ssid_len,
ieee80211_queue_delayed_work(wl->hw, &wl->scan_complete_work,
msecs_to_jiffies(WL1271_SCAN_TIMEOUT));
- wl1271_scan_stm(wl);
+ wl1271_scan_stm(wl, vif);
return 0;
}
@@ -550,6 +572,9 @@ wl12xx_scan_sched_scan_ssid_list(struct wl1271 *wl,
* so they're used in probe requests.
*/
for (i = 0; i < req->n_ssids; i++) {
+ if (!req->ssids[i].ssid_len)
+ continue;
+
for (j = 0; j < cmd->n_ssids; j++)
if (!memcmp(req->ssids[i].ssid,
cmd->ssids[j].ssid,
@@ -585,6 +610,7 @@ out:
}
int wl1271_scan_sched_scan_config(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif,
struct cfg80211_sched_scan_request *req,
struct ieee80211_sched_scan_ies *ies)
{
@@ -631,7 +657,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl,
}
if (!force_passive && cfg->active[0]) {
- ret = wl1271_cmd_build_probe_req(wl, req->ssids[0].ssid,
+ ret = wl1271_cmd_build_probe_req(wl, wlvif, req->ssids[0].ssid,
req->ssids[0].ssid_len,
ies->ie[IEEE80211_BAND_2GHZ],
ies->len[IEEE80211_BAND_2GHZ],
@@ -643,7 +669,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl,
}
if (!force_passive && cfg->active[1]) {
- ret = wl1271_cmd_build_probe_req(wl, req->ssids[0].ssid,
+ ret = wl1271_cmd_build_probe_req(wl, wlvif, req->ssids[0].ssid,
req->ssids[0].ssid_len,
ies->ie[IEEE80211_BAND_5GHZ],
ies->len[IEEE80211_BAND_5GHZ],
@@ -667,14 +693,14 @@ out:
return ret;
}
-int wl1271_scan_sched_scan_start(struct wl1271 *wl)
+int wl1271_scan_sched_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
struct wl1271_cmd_sched_scan_start *start;
int ret = 0;
wl1271_debug(DEBUG_CMD, "cmd periodic scan start");
- if (wl->bss_type != BSS_TYPE_STA_BSS)
+ if (wlvif->bss_type != BSS_TYPE_STA_BSS)
return -EOPNOTSUPP;
if (!test_bit(WL1271_FLAG_IDLE, &wl->flags))
diff --git a/drivers/net/wireless/wl12xx/scan.h b/drivers/net/wireless/wl12xx/scan.h
index 92115156522..a7ed43dc08c 100644
--- a/drivers/net/wireless/wl12xx/scan.h
+++ b/drivers/net/wireless/wl12xx/scan.h
@@ -26,18 +26,20 @@
#include "wl12xx.h"
-int wl1271_scan(struct wl1271 *wl, const u8 *ssid, size_t ssid_len,
+int wl1271_scan(struct wl1271 *wl, struct ieee80211_vif *vif,
+ const u8 *ssid, size_t ssid_len,
struct cfg80211_scan_request *req);
int wl1271_scan_stop(struct wl1271 *wl);
int wl1271_scan_build_probe_req(struct wl1271 *wl,
const u8 *ssid, size_t ssid_len,
const u8 *ie, size_t ie_len, u8 band);
-void wl1271_scan_stm(struct wl1271 *wl);
+void wl1271_scan_stm(struct wl1271 *wl, struct ieee80211_vif *vif);
void wl1271_scan_complete_work(struct work_struct *work);
int wl1271_scan_sched_scan_config(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif,
struct cfg80211_sched_scan_request *req,
struct ieee80211_sched_scan_ies *ies);
-int wl1271_scan_sched_scan_start(struct wl1271 *wl);
+int wl1271_scan_sched_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif);
void wl1271_scan_sched_scan_stop(struct wl1271 *wl);
void wl1271_scan_sched_scan_results(struct wl1271 *wl);
diff --git a/drivers/net/wireless/wl12xx/sdio.c b/drivers/net/wireless/wl12xx/sdio.c
index 516a8980723..468a50553fa 100644
--- a/drivers/net/wireless/wl12xx/sdio.c
+++ b/drivers/net/wireless/wl12xx/sdio.c
@@ -24,6 +24,7 @@
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/vmalloc.h>
+#include <linux/platform_device.h>
#include <linux/mmc/sdio_func.h>
#include <linux/mmc/sdio_ids.h>
#include <linux/mmc/card.h>
@@ -44,107 +45,67 @@
#define SDIO_DEVICE_ID_TI_WL1271 0x4076
#endif
+struct wl12xx_sdio_glue {
+ struct device *dev;
+ struct platform_device *core;
+};
+
static const struct sdio_device_id wl1271_devices[] __devinitconst = {
{ SDIO_DEVICE(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271) },
{}
};
MODULE_DEVICE_TABLE(sdio, wl1271_devices);
-static void wl1271_sdio_set_block_size(struct wl1271 *wl, unsigned int blksz)
-{
- sdio_claim_host(wl->if_priv);
- sdio_set_block_size(wl->if_priv, blksz);
- sdio_release_host(wl->if_priv);
-}
-
-static inline struct sdio_func *wl_to_func(struct wl1271 *wl)
-{
- return wl->if_priv;
-}
-
-static struct device *wl1271_sdio_wl_to_dev(struct wl1271 *wl)
-{
- return &(wl_to_func(wl)->dev);
-}
-
-static irqreturn_t wl1271_hardirq(int irq, void *cookie)
+static void wl1271_sdio_set_block_size(struct device *child,
+ unsigned int blksz)
{
- struct wl1271 *wl = cookie;
- unsigned long flags;
+ struct wl12xx_sdio_glue *glue = dev_get_drvdata(child->parent);
+ struct sdio_func *func = dev_to_sdio_func(glue->dev);
- wl1271_debug(DEBUG_IRQ, "IRQ");
-
- /* complete the ELP completion */
- spin_lock_irqsave(&wl->wl_lock, flags);
- set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
- if (wl->elp_compl) {
- complete(wl->elp_compl);
- wl->elp_compl = NULL;
- }
-
- if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
- /* don't enqueue a work right now. mark it as pending */
- set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
- wl1271_debug(DEBUG_IRQ, "should not enqueue work");
- disable_irq_nosync(wl->irq);
- pm_wakeup_event(wl1271_sdio_wl_to_dev(wl), 0);
- spin_unlock_irqrestore(&wl->wl_lock, flags);
- return IRQ_HANDLED;
- }
- spin_unlock_irqrestore(&wl->wl_lock, flags);
-
- return IRQ_WAKE_THREAD;
-}
-
-static void wl1271_sdio_disable_interrupts(struct wl1271 *wl)
-{
- disable_irq(wl->irq);
-}
-
-static void wl1271_sdio_enable_interrupts(struct wl1271 *wl)
-{
- enable_irq(wl->irq);
+ sdio_claim_host(func);
+ sdio_set_block_size(func, blksz);
+ sdio_release_host(func);
}
-static void wl1271_sdio_raw_read(struct wl1271 *wl, int addr, void *buf,
+static void wl12xx_sdio_raw_read(struct device *child, int addr, void *buf,
size_t len, bool fixed)
{
int ret;
- struct sdio_func *func = wl_to_func(wl);
+ struct wl12xx_sdio_glue *glue = dev_get_drvdata(child->parent);
+ struct sdio_func *func = dev_to_sdio_func(glue->dev);
if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) {
((u8 *)buf)[0] = sdio_f0_readb(func, addr, &ret);
- wl1271_debug(DEBUG_SDIO, "sdio read 52 addr 0x%x, byte 0x%02x",
- addr, ((u8 *)buf)[0]);
+ dev_dbg(child->parent, "sdio read 52 addr 0x%x, byte 0x%02x\n",
+ addr, ((u8 *)buf)[0]);
} else {
if (fixed)
ret = sdio_readsb(func, buf, addr, len);
else
ret = sdio_memcpy_fromio(func, buf, addr, len);
- wl1271_debug(DEBUG_SDIO, "sdio read 53 addr 0x%x, %zu bytes",
- addr, len);
- wl1271_dump_ascii(DEBUG_SDIO, "data: ", buf, len);
+ dev_dbg(child->parent, "sdio read 53 addr 0x%x, %zu bytes\n",
+ addr, len);
}
if (ret)
- wl1271_error("sdio read failed (%d)", ret);
+ dev_err(child->parent, "sdio read failed (%d)\n", ret);
}
-static void wl1271_sdio_raw_write(struct wl1271 *wl, int addr, void *buf,
+static void wl12xx_sdio_raw_write(struct device *child, int addr, void *buf,
size_t len, bool fixed)
{
int ret;
- struct sdio_func *func = wl_to_func(wl);
+ struct wl12xx_sdio_glue *glue = dev_get_drvdata(child->parent);
+ struct sdio_func *func = dev_to_sdio_func(glue->dev);
if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) {
sdio_f0_writeb(func, ((u8 *)buf)[0], addr, &ret);
- wl1271_debug(DEBUG_SDIO, "sdio write 52 addr 0x%x, byte 0x%02x",
- addr, ((u8 *)buf)[0]);
+ dev_dbg(child->parent, "sdio write 52 addr 0x%x, byte 0x%02x\n",
+ addr, ((u8 *)buf)[0]);
} else {
- wl1271_debug(DEBUG_SDIO, "sdio write 53 addr 0x%x, %zu bytes",
- addr, len);
- wl1271_dump_ascii(DEBUG_SDIO, "data: ", buf, len);
+ dev_dbg(child->parent, "sdio write 53 addr 0x%x, %zu bytes\n",
+ addr, len);
if (fixed)
ret = sdio_writesb(func, addr, buf, len);
@@ -153,13 +114,13 @@ static void wl1271_sdio_raw_write(struct wl1271 *wl, int addr, void *buf,
}
if (ret)
- wl1271_error("sdio write failed (%d)", ret);
+ dev_err(child->parent, "sdio write failed (%d)\n", ret);
}
-static int wl1271_sdio_power_on(struct wl1271 *wl)
+static int wl12xx_sdio_power_on(struct wl12xx_sdio_glue *glue)
{
- struct sdio_func *func = wl_to_func(wl);
int ret;
+ struct sdio_func *func = dev_to_sdio_func(glue->dev);
/* If enabled, tell runtime PM not to power off the card */
if (pm_runtime_enabled(&func->dev)) {
@@ -180,10 +141,10 @@ out:
return ret;
}
-static int wl1271_sdio_power_off(struct wl1271 *wl)
+static int wl12xx_sdio_power_off(struct wl12xx_sdio_glue *glue)
{
- struct sdio_func *func = wl_to_func(wl);
int ret;
+ struct sdio_func *func = dev_to_sdio_func(glue->dev);
sdio_disable_func(func);
sdio_release_host(func);
@@ -200,46 +161,43 @@ static int wl1271_sdio_power_off(struct wl1271 *wl)
return ret;
}
-static int wl1271_sdio_set_power(struct wl1271 *wl, bool enable)
+static int wl12xx_sdio_set_power(struct device *child, bool enable)
{
+ struct wl12xx_sdio_glue *glue = dev_get_drvdata(child->parent);
+
if (enable)
- return wl1271_sdio_power_on(wl);
+ return wl12xx_sdio_power_on(glue);
else
- return wl1271_sdio_power_off(wl);
+ return wl12xx_sdio_power_off(glue);
}
static struct wl1271_if_operations sdio_ops = {
- .read = wl1271_sdio_raw_read,
- .write = wl1271_sdio_raw_write,
- .power = wl1271_sdio_set_power,
- .dev = wl1271_sdio_wl_to_dev,
- .enable_irq = wl1271_sdio_enable_interrupts,
- .disable_irq = wl1271_sdio_disable_interrupts,
+ .read = wl12xx_sdio_raw_read,
+ .write = wl12xx_sdio_raw_write,
+ .power = wl12xx_sdio_set_power,
.set_block_size = wl1271_sdio_set_block_size,
};
static int __devinit wl1271_probe(struct sdio_func *func,
const struct sdio_device_id *id)
{
- struct ieee80211_hw *hw;
- const struct wl12xx_platform_data *wlan_data;
- struct wl1271 *wl;
- unsigned long irqflags;
+ struct wl12xx_platform_data *wlan_data;
+ struct wl12xx_sdio_glue *glue;
+ struct resource res[1];
mmc_pm_flag_t mmcflags;
- int ret;
+ int ret = -ENOMEM;
/* We are only able to handle the wlan function */
if (func->num != 0x02)
return -ENODEV;
- hw = wl1271_alloc_hw();
- if (IS_ERR(hw))
- return PTR_ERR(hw);
-
- wl = hw->priv;
+ glue = kzalloc(sizeof(*glue), GFP_KERNEL);
+ if (!glue) {
+ dev_err(&func->dev, "can't allocate glue\n");
+ goto out;
+ }
- wl->if_priv = func;
- wl->if_ops = &sdio_ops;
+ glue->dev = &func->dev;
/* Grab access to FN0 for ELP reg. */
func->card->quirks |= MMC_QUIRK_LENIENT_FN0;
@@ -250,80 +208,79 @@ static int __devinit wl1271_probe(struct sdio_func *func,
wlan_data = wl12xx_get_platform_data();
if (IS_ERR(wlan_data)) {
ret = PTR_ERR(wlan_data);
- wl1271_error("missing wlan platform data: %d", ret);
- goto out_free;
+ dev_err(glue->dev, "missing wlan platform data: %d\n", ret);
+ goto out_free_glue;
}
- wl->irq = wlan_data->irq;
- wl->ref_clock = wlan_data->board_ref_clock;
- wl->tcxo_clock = wlan_data->board_tcxo_clock;
- wl->platform_quirks = wlan_data->platform_quirks;
+ /* if sdio can keep power while host is suspended, enable wow */
+ mmcflags = sdio_get_host_pm_caps(func);
+ dev_dbg(glue->dev, "sdio PM caps = 0x%x\n", mmcflags);
- if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
- irqflags = IRQF_TRIGGER_RISING;
- else
- irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT;
-
- ret = request_threaded_irq(wl->irq, wl1271_hardirq, wl1271_irq,
- irqflags,
- DRIVER_NAME, wl);
- if (ret < 0) {
- wl1271_error("request_irq() failed: %d", ret);
- goto out_free;
- }
+ if (mmcflags & MMC_PM_KEEP_POWER)
+ wlan_data->pwr_in_suspend = true;
+
+ wlan_data->ops = &sdio_ops;
- ret = enable_irq_wake(wl->irq);
- if (!ret) {
- wl->irq_wake_enabled = true;
- device_init_wakeup(wl1271_sdio_wl_to_dev(wl), 1);
+ sdio_set_drvdata(func, glue);
- /* if sdio can keep power while host is suspended, enable wow */
- mmcflags = sdio_get_host_pm_caps(func);
- wl1271_debug(DEBUG_SDIO, "sdio PM caps = 0x%x", mmcflags);
+ /* Tell PM core that we don't need the card to be powered now */
+ pm_runtime_put_noidle(&func->dev);
- if (mmcflags & MMC_PM_KEEP_POWER)
- hw->wiphy->wowlan.flags = WIPHY_WOWLAN_ANY;
+ glue->core = platform_device_alloc("wl12xx", -1);
+ if (!glue->core) {
+ dev_err(glue->dev, "can't allocate platform_device");
+ ret = -ENOMEM;
+ goto out_free_glue;
}
- disable_irq(wl->irq);
- ret = wl1271_init_ieee80211(wl);
- if (ret)
- goto out_irq;
+ glue->core->dev.parent = &func->dev;
- ret = wl1271_register_hw(wl);
- if (ret)
- goto out_irq;
+ memset(res, 0x00, sizeof(res));
- sdio_set_drvdata(func, wl);
+ res[0].start = wlan_data->irq;
+ res[0].flags = IORESOURCE_IRQ;
+ res[0].name = "irq";
- /* Tell PM core that we don't need the card to be powered now */
- pm_runtime_put_noidle(&func->dev);
+ ret = platform_device_add_resources(glue->core, res, ARRAY_SIZE(res));
+ if (ret) {
+ dev_err(glue->dev, "can't add resources\n");
+ goto out_dev_put;
+ }
+ ret = platform_device_add_data(glue->core, wlan_data,
+ sizeof(*wlan_data));
+ if (ret) {
+ dev_err(glue->dev, "can't add platform data\n");
+ goto out_dev_put;
+ }
+
+ ret = platform_device_add(glue->core);
+ if (ret) {
+ dev_err(glue->dev, "can't add platform device\n");
+ goto out_dev_put;
+ }
return 0;
- out_irq:
- free_irq(wl->irq, wl);
+out_dev_put:
+ platform_device_put(glue->core);
- out_free:
- wl1271_free_hw(wl);
+out_free_glue:
+ kfree(glue);
+out:
return ret;
}
static void __devexit wl1271_remove(struct sdio_func *func)
{
- struct wl1271 *wl = sdio_get_drvdata(func);
+ struct wl12xx_sdio_glue *glue = sdio_get_drvdata(func);
/* Undo decrement done above in wl1271_probe */
pm_runtime_get_noresume(&func->dev);
- wl1271_unregister_hw(wl);
- if (wl->irq_wake_enabled) {
- device_init_wakeup(wl1271_sdio_wl_to_dev(wl), 0);
- disable_irq_wake(wl->irq);
- }
- free_irq(wl->irq, wl);
- wl1271_free_hw(wl);
+ platform_device_del(glue->core);
+ platform_device_put(glue->core);
+ kfree(glue);
}
#ifdef CONFIG_PM
@@ -332,20 +289,21 @@ static int wl1271_suspend(struct device *dev)
/* Tell MMC/SDIO core it's OK to power down the card
* (if it isn't already), but not to remove it completely */
struct sdio_func *func = dev_to_sdio_func(dev);
- struct wl1271 *wl = sdio_get_drvdata(func);
+ struct wl12xx_sdio_glue *glue = sdio_get_drvdata(func);
+ struct wl1271 *wl = platform_get_drvdata(glue->core);
mmc_pm_flag_t sdio_flags;
int ret = 0;
- wl1271_debug(DEBUG_MAC80211, "wl1271 suspend. wow_enabled: %d",
- wl->wow_enabled);
+ dev_dbg(dev, "wl1271 suspend. wow_enabled: %d\n",
+ wl->wow_enabled);
/* check whether sdio should keep power */
if (wl->wow_enabled) {
sdio_flags = sdio_get_host_pm_caps(func);
if (!(sdio_flags & MMC_PM_KEEP_POWER)) {
- wl1271_error("can't keep power while host "
- "is suspended");
+ dev_err(dev, "can't keep power while host "
+ "is suspended\n");
ret = -EINVAL;
goto out;
}
@@ -353,7 +311,7 @@ static int wl1271_suspend(struct device *dev)
/* keep power while host suspended */
ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
if (ret) {
- wl1271_error("error while trying to keep power");
+ dev_err(dev, "error while trying to keep power\n");
goto out;
}
@@ -367,9 +325,10 @@ out:
static int wl1271_resume(struct device *dev)
{
struct sdio_func *func = dev_to_sdio_func(dev);
- struct wl1271 *wl = sdio_get_drvdata(func);
+ struct wl12xx_sdio_glue *glue = sdio_get_drvdata(func);
+ struct wl1271 *wl = platform_get_drvdata(glue->core);
- wl1271_debug(DEBUG_MAC80211, "wl1271 resume");
+ dev_dbg(dev, "wl1271 resume\n");
if (wl->wow_enabled) {
/* claim back host */
sdio_claim_host(func);
diff --git a/drivers/net/wireless/wl12xx/sdio_test.c b/drivers/net/wireless/wl12xx/sdio_test.c
deleted file mode 100644
index f25d5d9212e..00000000000
--- a/drivers/net/wireless/wl12xx/sdio_test.c
+++ /dev/null
@@ -1,543 +0,0 @@
-/*
- * SDIO testing driver for wl12xx
- *
- * Copyright (C) 2010 Nokia Corporation
- *
- * Contact: Roger Quadros <roger.quadros@nokia.com>
- *
- * wl12xx read/write routines taken from the main module
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
- */
-
-#include <linux/irq.h>
-#include <linux/module.h>
-#include <linux/crc7.h>
-#include <linux/vmalloc.h>
-#include <linux/mmc/sdio_func.h>
-#include <linux/mmc/sdio_ids.h>
-#include <linux/mmc/card.h>
-#include <linux/mmc/host.h>
-#include <linux/gpio.h>
-#include <linux/wl12xx.h>
-#include <linux/kthread.h>
-#include <linux/firmware.h>
-#include <linux/pm_runtime.h>
-
-#include "wl12xx.h"
-#include "io.h"
-#include "boot.h"
-
-#ifndef SDIO_VENDOR_ID_TI
-#define SDIO_VENDOR_ID_TI 0x0097
-#endif
-
-#ifndef SDIO_DEVICE_ID_TI_WL1271
-#define SDIO_DEVICE_ID_TI_WL1271 0x4076
-#endif
-
-static bool rx, tx;
-
-module_param(rx, bool, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(rx, "Perform rx test. Default (0). "
- "This test continuously reads data from the SDIO device.\n");
-
-module_param(tx, bool, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(tx, "Perform tx test. Default (0). "
- "This test continuously writes data to the SDIO device.\n");
-
-struct wl1271_test {
- struct wl1271 wl;
- struct task_struct *test_task;
-};
-
-static const struct sdio_device_id wl1271_devices[] = {
- { SDIO_DEVICE(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271) },
- {}
-};
-
-static inline struct sdio_func *wl_to_func(struct wl1271 *wl)
-{
- return wl->if_priv;
-}
-
-static struct device *wl1271_sdio_wl_to_dev(struct wl1271 *wl)
-{
- return &(wl_to_func(wl)->dev);
-}
-
-static void wl1271_sdio_raw_read(struct wl1271 *wl, int addr, void *buf,
- size_t len, bool fixed)
-{
- int ret = 0;
- struct sdio_func *func = wl_to_func(wl);
-
- if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) {
- ((u8 *)buf)[0] = sdio_f0_readb(func, addr, &ret);
- wl1271_debug(DEBUG_SDIO, "sdio read 52 addr 0x%x, byte 0x%02x",
- addr, ((u8 *)buf)[0]);
- } else {
- if (fixed)
- ret = sdio_readsb(func, buf, addr, len);
- else
- ret = sdio_memcpy_fromio(func, buf, addr, len);
-
- wl1271_debug(DEBUG_SDIO, "sdio read 53 addr 0x%x, %zu bytes",
- addr, len);
- wl1271_dump_ascii(DEBUG_SDIO, "data: ", buf, len);
- }
-
- if (ret)
- wl1271_error("sdio read failed (%d)", ret);
-}
-
-static void wl1271_sdio_raw_write(struct wl1271 *wl, int addr, void *buf,
- size_t len, bool fixed)
-{
- int ret = 0;
- struct sdio_func *func = wl_to_func(wl);
-
- if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) {
- sdio_f0_writeb(func, ((u8 *)buf)[0], addr, &ret);
- wl1271_debug(DEBUG_SDIO, "sdio write 52 addr 0x%x, byte 0x%02x",
- addr, ((u8 *)buf)[0]);
- } else {
- wl1271_debug(DEBUG_SDIO, "sdio write 53 addr 0x%x, %zu bytes",
- addr, len);
- wl1271_dump_ascii(DEBUG_SDIO, "data: ", buf, len);
-
- if (fixed)
- ret = sdio_writesb(func, addr, buf, len);
- else
- ret = sdio_memcpy_toio(func, addr, buf, len);
- }
- if (ret)
- wl1271_error("sdio write failed (%d)", ret);
-
-}
-
-static int wl1271_sdio_set_power(struct wl1271 *wl, bool enable)
-{
- struct sdio_func *func = wl_to_func(wl);
- int ret;
-
- /* Let the SDIO stack handle wlan_enable control, so we
- * keep host claimed while wlan is in use to keep wl1271
- * alive.
- */
- if (enable) {
- /* Power up the card */
- ret = pm_runtime_get_sync(&func->dev);
- if (ret < 0)
- goto out;
-
- /* Runtime PM might be disabled, power up the card manually */
- ret = mmc_power_restore_host(func->card->host);
- if (ret < 0)
- goto out;
-
- sdio_claim_host(func);
- sdio_enable_func(func);
- } else {
- sdio_disable_func(func);
- sdio_release_host(func);
-
- /* Runtime PM might be disabled, power off the card manually */
- ret = mmc_power_save_host(func->card->host);
- if (ret < 0)
- goto out;
-
- /* Power down the card */
- ret = pm_runtime_put_sync(&func->dev);
- }
-
-out:
- return ret;
-}
-
-static void wl1271_sdio_disable_interrupts(struct wl1271 *wl)
-{
-}
-
-static void wl1271_sdio_enable_interrupts(struct wl1271 *wl)
-{
-}
-
-
-static struct wl1271_if_operations sdio_ops = {
- .read = wl1271_sdio_raw_read,
- .write = wl1271_sdio_raw_write,
- .power = wl1271_sdio_set_power,
- .dev = wl1271_sdio_wl_to_dev,
- .enable_irq = wl1271_sdio_enable_interrupts,
- .disable_irq = wl1271_sdio_disable_interrupts,
-};
-
-static void wl1271_fw_wakeup(struct wl1271 *wl)
-{
- u32 elp_reg;
-
- elp_reg = ELPCTRL_WAKE_UP;
- wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, elp_reg);
-}
-
-static int wl1271_fetch_firmware(struct wl1271 *wl)
-{
- const struct firmware *fw;
- int ret;
-
- if (wl->chip.id == CHIP_ID_1283_PG20)
- ret = request_firmware(&fw, WL128X_FW_NAME,
- wl1271_wl_to_dev(wl));
- else
- ret = request_firmware(&fw, WL127X_FW_NAME,
- wl1271_wl_to_dev(wl));
-
- if (ret < 0) {
- wl1271_error("could not get firmware: %d", ret);
- return ret;
- }
-
- if (fw->size % 4) {
- wl1271_error("firmware size is not multiple of 32 bits: %zu",
- fw->size);
- ret = -EILSEQ;
- goto out;
- }
-
- wl->fw_len = fw->size;
- wl->fw = vmalloc(wl->fw_len);
-
- if (!wl->fw) {
- wl1271_error("could not allocate memory for the firmware");
- ret = -ENOMEM;
- goto out;
- }
-
- memcpy(wl->fw, fw->data, wl->fw_len);
-
- ret = 0;
-
-out:
- release_firmware(fw);
-
- return ret;
-}
-
-static int wl1271_fetch_nvs(struct wl1271 *wl)
-{
- const struct firmware *fw;
- int ret;
-
- ret = request_firmware(&fw, WL12XX_NVS_NAME, wl1271_wl_to_dev(wl));
-
- if (ret < 0) {
- wl1271_error("could not get nvs file: %d", ret);
- return ret;
- }
-
- wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
-
- if (!wl->nvs) {
- wl1271_error("could not allocate memory for the nvs file");
- ret = -ENOMEM;
- goto out;
- }
-
- wl->nvs_len = fw->size;
-
-out:
- release_firmware(fw);
-
- return ret;
-}
-
-static int wl1271_chip_wakeup(struct wl1271 *wl)
-{
- struct wl1271_partition_set partition;
- int ret;
-
- msleep(WL1271_PRE_POWER_ON_SLEEP);
- ret = wl1271_power_on(wl);
- if (ret)
- return ret;
-
- msleep(WL1271_POWER_ON_SLEEP);
-
- /* We don't need a real memory partition here, because we only want
- * to use the registers at this point. */
- memset(&partition, 0, sizeof(partition));
- partition.reg.start = REGISTERS_BASE;
- partition.reg.size = REGISTERS_DOWN_SIZE;
- wl1271_set_partition(wl, &partition);
-
- /* ELP module wake up */
- wl1271_fw_wakeup(wl);
-
- /* whal_FwCtrl_BootSm() */
-
- /* 0. read chip id from CHIP_ID */
- wl->chip.id = wl1271_read32(wl, CHIP_ID_B);
-
- /* 1. check if chip id is valid */
-
- switch (wl->chip.id) {
- case CHIP_ID_1271_PG10:
- wl1271_warning("chip id 0x%x (1271 PG10) support is obsolete",
- wl->chip.id);
- break;
- case CHIP_ID_1271_PG20:
- wl1271_notice("chip id 0x%x (1271 PG20)",
- wl->chip.id);
- break;
- case CHIP_ID_1283_PG20:
- wl1271_notice("chip id 0x%x (1283 PG20)",
- wl->chip.id);
- break;
- case CHIP_ID_1283_PG10:
- default:
- wl1271_warning("unsupported chip id: 0x%x", wl->chip.id);
- return -ENODEV;
- }
-
- return ret;
-}
-
-static struct wl1271_partition_set part_down = {
- .mem = {
- .start = 0x00000000,
- .size = 0x000177c0
- },
- .reg = {
- .start = REGISTERS_BASE,
- .size = 0x00008800
- },
- .mem2 = {
- .start = 0x00000000,
- .size = 0x00000000
- },
- .mem3 = {
- .start = 0x00000000,
- .size = 0x00000000
- },
-};
-
-static int tester(void *data)
-{
- struct wl1271 *wl = data;
- struct sdio_func *func = wl_to_func(wl);
- struct device *pdev = &func->dev;
- int ret = 0;
- bool rx_started = 0;
- bool tx_started = 0;
- uint8_t *tx_buf, *rx_buf;
- int test_size = PAGE_SIZE;
- u32 addr = 0;
- struct wl1271_partition_set partition;
-
- /* We assume chip is powered up and firmware fetched */
-
- memcpy(&partition, &part_down, sizeof(partition));
- partition.mem.start = addr;
- wl1271_set_partition(wl, &partition);
-
- tx_buf = kmalloc(test_size, GFP_KERNEL);
- rx_buf = kmalloc(test_size, GFP_KERNEL);
- if (!tx_buf || !rx_buf) {
- dev_err(pdev,
- "Could not allocate memory. Test will not run.\n");
- ret = -ENOMEM;
- goto free;
- }
-
- memset(tx_buf, 0x5a, test_size);
-
- /* write something in data area so we can read it back */
- wl1271_write(wl, addr, tx_buf, test_size, false);
-
- while (!kthread_should_stop()) {
- if (rx && !rx_started) {
- dev_info(pdev, "starting rx test\n");
- rx_started = 1;
- } else if (!rx && rx_started) {
- dev_info(pdev, "stopping rx test\n");
- rx_started = 0;
- }
-
- if (tx && !tx_started) {
- dev_info(pdev, "starting tx test\n");
- tx_started = 1;
- } else if (!tx && tx_started) {
- dev_info(pdev, "stopping tx test\n");
- tx_started = 0;
- }
-
- if (rx_started)
- wl1271_read(wl, addr, rx_buf, test_size, false);
-
- if (tx_started)
- wl1271_write(wl, addr, tx_buf, test_size, false);
-
- if (!rx_started && !tx_started)
- msleep(100);
- }
-
-free:
- kfree(tx_buf);
- kfree(rx_buf);
- return ret;
-}
-
-static int __devinit wl1271_probe(struct sdio_func *func,
- const struct sdio_device_id *id)
-{
- const struct wl12xx_platform_data *wlan_data;
- struct wl1271 *wl;
- struct wl1271_test *wl_test;
- int ret = 0;
-
- /* wl1271 has 2 sdio functions we handle just the wlan part */
- if (func->num != 0x02)
- return -ENODEV;
-
- wl_test = kzalloc(sizeof(struct wl1271_test), GFP_KERNEL);
- if (!wl_test) {
- dev_err(&func->dev, "Could not allocate memory\n");
- return -ENOMEM;
- }
-
- wl = &wl_test->wl;
-
- wl->if_priv = func;
- wl->if_ops = &sdio_ops;
-
- /* Grab access to FN0 for ELP reg. */
- func->card->quirks |= MMC_QUIRK_LENIENT_FN0;
-
- /* Use block mode for transferring over one block size of data */
- func->card->quirks |= MMC_QUIRK_BLKSZ_FOR_BYTE_MODE;
-
- wlan_data = wl12xx_get_platform_data();
- if (IS_ERR(wlan_data)) {
- ret = PTR_ERR(wlan_data);
- dev_err(&func->dev, "missing wlan platform data: %d\n", ret);
- goto out_free;
- }
-
- wl->irq = wlan_data->irq;
- wl->ref_clock = wlan_data->board_ref_clock;
- wl->tcxo_clock = wlan_data->board_tcxo_clock;
-
- sdio_set_drvdata(func, wl_test);
-
- /* power up the device */
- ret = wl1271_chip_wakeup(wl);
- if (ret) {
- dev_err(&func->dev, "could not wake up chip\n");
- goto out_free;
- }
-
- if (wl->fw == NULL) {
- ret = wl1271_fetch_firmware(wl);
- if (ret < 0) {
- dev_err(&func->dev, "firmware fetch error\n");
- goto out_off;
- }
- }
-
- /* fetch NVS */
- if (wl->nvs == NULL) {
- ret = wl1271_fetch_nvs(wl);
- if (ret < 0) {
- dev_err(&func->dev, "NVS fetch error\n");
- goto out_off;
- }
- }
-
- ret = wl1271_load_firmware(wl);
- if (ret < 0) {
- dev_err(&func->dev, "firmware load error: %d\n", ret);
- goto out_free;
- }
-
- dev_info(&func->dev, "initialized\n");
-
- /* I/O testing will be done in the tester thread */
-
- wl_test->test_task = kthread_run(tester, wl, "sdio_tester");
- if (IS_ERR(wl_test->test_task)) {
- dev_err(&func->dev, "unable to create kernel thread\n");
- ret = PTR_ERR(wl_test->test_task);
- goto out_free;
- }
-
- return 0;
-
-out_off:
- /* power off the chip */
- wl1271_power_off(wl);
-
-out_free:
- kfree(wl_test);
- return ret;
-}
-
-static void __devexit wl1271_remove(struct sdio_func *func)
-{
- struct wl1271_test *wl_test = sdio_get_drvdata(func);
-
- /* stop the I/O test thread */
- kthread_stop(wl_test->test_task);
-
- /* power off the chip */
- wl1271_power_off(&wl_test->wl);
-
- vfree(wl_test->wl.fw);
- wl_test->wl.fw = NULL;
- kfree(wl_test->wl.nvs);
- wl_test->wl.nvs = NULL;
-
- kfree(wl_test);
-}
-
-static struct sdio_driver wl1271_sdio_driver = {
- .name = "wl12xx_sdio_test",
- .id_table = wl1271_devices,
- .probe = wl1271_probe,
- .remove = __devexit_p(wl1271_remove),
-};
-
-static int __init wl1271_init(void)
-{
- int ret;
-
- ret = sdio_register_driver(&wl1271_sdio_driver);
- if (ret < 0)
- pr_err("failed to register sdio driver: %d\n", ret);
-
- return ret;
-}
-module_init(wl1271_init);
-
-static void __exit wl1271_exit(void)
-{
- sdio_unregister_driver(&wl1271_sdio_driver);
-}
-module_exit(wl1271_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Roger Quadros <roger.quadros@nokia.com>");
-
diff --git a/drivers/net/wireless/wl12xx/spi.c b/drivers/net/wireless/wl12xx/spi.c
index 0f971867786..92caa7ce605 100644
--- a/drivers/net/wireless/wl12xx/spi.c
+++ b/drivers/net/wireless/wl12xx/spi.c
@@ -27,6 +27,7 @@
#include <linux/crc7.h>
#include <linux/spi/spi.h>
#include <linux/wl12xx.h>
+#include <linux/platform_device.h>
#include <linux/slab.h>
#include "wl12xx.h"
@@ -69,35 +70,22 @@
#define WSPI_MAX_NUM_OF_CHUNKS (WL1271_AGGR_BUFFER_SIZE / WSPI_MAX_CHUNK_SIZE)
-static inline struct spi_device *wl_to_spi(struct wl1271 *wl)
-{
- return wl->if_priv;
-}
-
-static struct device *wl1271_spi_wl_to_dev(struct wl1271 *wl)
-{
- return &(wl_to_spi(wl)->dev);
-}
-
-static void wl1271_spi_disable_interrupts(struct wl1271 *wl)
-{
- disable_irq(wl->irq);
-}
-
-static void wl1271_spi_enable_interrupts(struct wl1271 *wl)
-{
- enable_irq(wl->irq);
-}
+struct wl12xx_spi_glue {
+ struct device *dev;
+ struct platform_device *core;
+};
-static void wl1271_spi_reset(struct wl1271 *wl)
+static void wl12xx_spi_reset(struct device *child)
{
+ struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent);
u8 *cmd;
struct spi_transfer t;
struct spi_message m;
cmd = kzalloc(WSPI_INIT_CMD_LEN, GFP_KERNEL);
if (!cmd) {
- wl1271_error("could not allocate cmd for spi reset");
+ dev_err(child->parent,
+ "could not allocate cmd for spi reset\n");
return;
}
@@ -110,21 +98,22 @@ static void wl1271_spi_reset(struct wl1271 *wl)
t.len = WSPI_INIT_CMD_LEN;
spi_message_add_tail(&t, &m);
- spi_sync(wl_to_spi(wl), &m);
+ spi_sync(to_spi_device(glue->dev), &m);
- wl1271_dump(DEBUG_SPI, "spi reset -> ", cmd, WSPI_INIT_CMD_LEN);
kfree(cmd);
}
-static void wl1271_spi_init(struct wl1271 *wl)
+static void wl12xx_spi_init(struct device *child)
{
+ struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent);
u8 crc[WSPI_INIT_CMD_CRC_LEN], *cmd;
struct spi_transfer t;
struct spi_message m;
cmd = kzalloc(WSPI_INIT_CMD_LEN, GFP_KERNEL);
if (!cmd) {
- wl1271_error("could not allocate cmd for spi init");
+ dev_err(child->parent,
+ "could not allocate cmd for spi init\n");
return;
}
@@ -165,15 +154,16 @@ static void wl1271_spi_init(struct wl1271 *wl)
t.len = WSPI_INIT_CMD_LEN;
spi_message_add_tail(&t, &m);
- spi_sync(wl_to_spi(wl), &m);
- wl1271_dump(DEBUG_SPI, "spi init -> ", cmd, WSPI_INIT_CMD_LEN);
+ spi_sync(to_spi_device(glue->dev), &m);
kfree(cmd);
}
#define WL1271_BUSY_WORD_TIMEOUT 1000
-static int wl1271_spi_read_busy(struct wl1271 *wl)
+static int wl12xx_spi_read_busy(struct device *child)
{
+ struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent);
+ struct wl1271 *wl = dev_get_drvdata(child);
struct spi_transfer t[1];
struct spi_message m;
u32 *busy_buf;
@@ -194,20 +184,22 @@ static int wl1271_spi_read_busy(struct wl1271 *wl)
t[0].len = sizeof(u32);
t[0].cs_change = true;
spi_message_add_tail(&t[0], &m);
- spi_sync(wl_to_spi(wl), &m);
+ spi_sync(to_spi_device(glue->dev), &m);
if (*busy_buf & 0x1)
return 0;
}
/* The SPI bus is unresponsive, the read failed. */
- wl1271_error("SPI read busy-word timeout!\n");
+ dev_err(child->parent, "SPI read busy-word timeout!\n");
return -ETIMEDOUT;
}
-static void wl1271_spi_raw_read(struct wl1271 *wl, int addr, void *buf,
+static void wl12xx_spi_raw_read(struct device *child, int addr, void *buf,
size_t len, bool fixed)
{
+ struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent);
+ struct wl1271 *wl = dev_get_drvdata(child);
struct spi_transfer t[2];
struct spi_message m;
u32 *busy_buf;
@@ -243,10 +235,10 @@ static void wl1271_spi_raw_read(struct wl1271 *wl, int addr, void *buf,
t[1].cs_change = true;
spi_message_add_tail(&t[1], &m);
- spi_sync(wl_to_spi(wl), &m);
+ spi_sync(to_spi_device(glue->dev), &m);
if (!(busy_buf[WL1271_BUSY_WORD_CNT - 1] & 0x1) &&
- wl1271_spi_read_busy(wl)) {
+ wl12xx_spi_read_busy(child)) {
memset(buf, 0, chunk_len);
return;
}
@@ -259,10 +251,7 @@ static void wl1271_spi_raw_read(struct wl1271 *wl, int addr, void *buf,
t[0].cs_change = true;
spi_message_add_tail(&t[0], &m);
- spi_sync(wl_to_spi(wl), &m);
-
- wl1271_dump(DEBUG_SPI, "spi_read cmd -> ", cmd, sizeof(*cmd));
- wl1271_dump(DEBUG_SPI, "spi_read buf <- ", buf, chunk_len);
+ spi_sync(to_spi_device(glue->dev), &m);
if (!fixed)
addr += chunk_len;
@@ -271,9 +260,10 @@ static void wl1271_spi_raw_read(struct wl1271 *wl, int addr, void *buf,
}
}
-static void wl1271_spi_raw_write(struct wl1271 *wl, int addr, void *buf,
- size_t len, bool fixed)
+static void wl12xx_spi_raw_write(struct device *child, int addr, void *buf,
+ size_t len, bool fixed)
{
+ struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent);
struct spi_transfer t[2 * WSPI_MAX_NUM_OF_CHUNKS];
struct spi_message m;
u32 commands[WSPI_MAX_NUM_OF_CHUNKS];
@@ -308,9 +298,6 @@ static void wl1271_spi_raw_write(struct wl1271 *wl, int addr, void *buf,
t[i].len = chunk_len;
spi_message_add_tail(&t[i++], &m);
- wl1271_dump(DEBUG_SPI, "spi_write cmd -> ", cmd, sizeof(*cmd));
- wl1271_dump(DEBUG_SPI, "spi_write buf -> ", buf, chunk_len);
-
if (!fixed)
addr += chunk_len;
buf += chunk_len;
@@ -318,72 +305,41 @@ static void wl1271_spi_raw_write(struct wl1271 *wl, int addr, void *buf,
cmd++;
}
- spi_sync(wl_to_spi(wl), &m);
-}
-
-static irqreturn_t wl1271_hardirq(int irq, void *cookie)
-{
- struct wl1271 *wl = cookie;
- unsigned long flags;
-
- wl1271_debug(DEBUG_IRQ, "IRQ");
-
- /* complete the ELP completion */
- spin_lock_irqsave(&wl->wl_lock, flags);
- set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
- if (wl->elp_compl) {
- complete(wl->elp_compl);
- wl->elp_compl = NULL;
- }
- spin_unlock_irqrestore(&wl->wl_lock, flags);
-
- return IRQ_WAKE_THREAD;
-}
-
-static int wl1271_spi_set_power(struct wl1271 *wl, bool enable)
-{
- if (wl->set_power)
- wl->set_power(enable);
-
- return 0;
+ spi_sync(to_spi_device(glue->dev), &m);
}
static struct wl1271_if_operations spi_ops = {
- .read = wl1271_spi_raw_read,
- .write = wl1271_spi_raw_write,
- .reset = wl1271_spi_reset,
- .init = wl1271_spi_init,
- .power = wl1271_spi_set_power,
- .dev = wl1271_spi_wl_to_dev,
- .enable_irq = wl1271_spi_enable_interrupts,
- .disable_irq = wl1271_spi_disable_interrupts,
+ .read = wl12xx_spi_raw_read,
+ .write = wl12xx_spi_raw_write,
+ .reset = wl12xx_spi_reset,
+ .init = wl12xx_spi_init,
.set_block_size = NULL,
};
static int __devinit wl1271_probe(struct spi_device *spi)
{
+ struct wl12xx_spi_glue *glue;
struct wl12xx_platform_data *pdata;
- struct ieee80211_hw *hw;
- struct wl1271 *wl;
- unsigned long irqflags;
- int ret;
+ struct resource res[1];
+ int ret = -ENOMEM;
pdata = spi->dev.platform_data;
if (!pdata) {
- wl1271_error("no platform data");
+ dev_err(&spi->dev, "no platform data\n");
return -ENODEV;
}
- hw = wl1271_alloc_hw();
- if (IS_ERR(hw))
- return PTR_ERR(hw);
+ pdata->ops = &spi_ops;
- wl = hw->priv;
+ glue = kzalloc(sizeof(*glue), GFP_KERNEL);
+ if (!glue) {
+ dev_err(&spi->dev, "can't allocate glue\n");
+ goto out;
+ }
- dev_set_drvdata(&spi->dev, wl);
- wl->if_priv = spi;
+ glue->dev = &spi->dev;
- wl->if_ops = &spi_ops;
+ spi_set_drvdata(spi, glue);
/* This is the only SPI value that we need to set here, the rest
* comes from the board-peripherals file */
@@ -391,69 +347,61 @@ static int __devinit wl1271_probe(struct spi_device *spi)
ret = spi_setup(spi);
if (ret < 0) {
- wl1271_error("spi_setup failed");
- goto out_free;
+ dev_err(glue->dev, "spi_setup failed\n");
+ goto out_free_glue;
}
- wl->set_power = pdata->set_power;
- if (!wl->set_power) {
- wl1271_error("set power function missing in platform data");
- ret = -ENODEV;
- goto out_free;
+ glue->core = platform_device_alloc("wl12xx", -1);
+ if (!glue->core) {
+ dev_err(glue->dev, "can't allocate platform_device\n");
+ ret = -ENOMEM;
+ goto out_free_glue;
}
- wl->ref_clock = pdata->board_ref_clock;
- wl->tcxo_clock = pdata->board_tcxo_clock;
- wl->platform_quirks = pdata->platform_quirks;
+ glue->core->dev.parent = &spi->dev;
- if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
- irqflags = IRQF_TRIGGER_RISING;
- else
- irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT;
+ memset(res, 0x00, sizeof(res));
- wl->irq = spi->irq;
- if (wl->irq < 0) {
- wl1271_error("irq missing in platform data");
- ret = -ENODEV;
- goto out_free;
- }
+ res[0].start = spi->irq;
+ res[0].flags = IORESOURCE_IRQ;
+ res[0].name = "irq";
- ret = request_threaded_irq(wl->irq, wl1271_hardirq, wl1271_irq,
- irqflags,
- DRIVER_NAME, wl);
- if (ret < 0) {
- wl1271_error("request_irq() failed: %d", ret);
- goto out_free;
+ ret = platform_device_add_resources(glue->core, res, ARRAY_SIZE(res));
+ if (ret) {
+ dev_err(glue->dev, "can't add resources\n");
+ goto out_dev_put;
}
- disable_irq(wl->irq);
-
- ret = wl1271_init_ieee80211(wl);
- if (ret)
- goto out_irq;
+ ret = platform_device_add_data(glue->core, pdata, sizeof(*pdata));
+ if (ret) {
+ dev_err(glue->dev, "can't add platform data\n");
+ goto out_dev_put;
+ }
- ret = wl1271_register_hw(wl);
- if (ret)
- goto out_irq;
+ ret = platform_device_add(glue->core);
+ if (ret) {
+ dev_err(glue->dev, "can't register platform device\n");
+ goto out_dev_put;
+ }
return 0;
- out_irq:
- free_irq(wl->irq, wl);
-
- out_free:
- wl1271_free_hw(wl);
+out_dev_put:
+ platform_device_put(glue->core);
+out_free_glue:
+ kfree(glue);
+out:
return ret;
}
static int __devexit wl1271_remove(struct spi_device *spi)
{
- struct wl1271 *wl = dev_get_drvdata(&spi->dev);
+ struct wl12xx_spi_glue *glue = spi_get_drvdata(spi);
- wl1271_unregister_hw(wl);
- free_irq(wl->irq, wl);
- wl1271_free_hw(wl);
+ platform_device_del(glue->core);
+ platform_device_put(glue->core);
+ kfree(glue);
return 0;
}
@@ -462,7 +410,6 @@ static int __devexit wl1271_remove(struct spi_device *spi)
static struct spi_driver wl1271_spi_driver = {
.driver = {
.name = "wl1271_spi",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
diff --git a/drivers/net/wireless/wl12xx/testmode.c b/drivers/net/wireless/wl12xx/testmode.c
index 4ae8effaee2..25093c0cb0e 100644
--- a/drivers/net/wireless/wl12xx/testmode.c
+++ b/drivers/net/wireless/wl12xx/testmode.c
@@ -26,8 +26,10 @@
#include <net/genetlink.h>
#include "wl12xx.h"
+#include "debug.h"
#include "acx.h"
#include "reg.h"
+#include "ps.h"
#define WL1271_TM_MAX_DATA_LENGTH 1024
@@ -36,6 +38,7 @@ enum wl1271_tm_commands {
WL1271_TM_CMD_TEST,
WL1271_TM_CMD_INTERROGATE,
WL1271_TM_CMD_CONFIGURE,
+ WL1271_TM_CMD_NVS_PUSH, /* Not in use. Keep to not break ABI */
WL1271_TM_CMD_SET_PLT_MODE,
WL1271_TM_CMD_RECOVER,
@@ -87,31 +90,47 @@ static int wl1271_tm_cmd_test(struct wl1271 *wl, struct nlattr *tb[])
return -EMSGSIZE;
mutex_lock(&wl->mutex);
- ret = wl1271_cmd_test(wl, buf, buf_len, answer);
- mutex_unlock(&wl->mutex);
+ if (wl->state == WL1271_STATE_OFF) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = wl1271_ps_elp_wakeup(wl);
+ if (ret < 0)
+ goto out;
+
+ ret = wl1271_cmd_test(wl, buf, buf_len, answer);
if (ret < 0) {
wl1271_warning("testmode cmd test failed: %d", ret);
- return ret;
+ goto out_sleep;
}
if (answer) {
len = nla_total_size(buf_len);
skb = cfg80211_testmode_alloc_reply_skb(wl->hw->wiphy, len);
- if (!skb)
- return -ENOMEM;
+ if (!skb) {
+ ret = -ENOMEM;
+ goto out_sleep;
+ }
NLA_PUT(skb, WL1271_TM_ATTR_DATA, buf_len, buf);
ret = cfg80211_testmode_reply(skb);
if (ret < 0)
- return ret;
+ goto out_sleep;
}
- return 0;
+out_sleep:
+ wl1271_ps_elp_sleep(wl);
+out:
+ mutex_unlock(&wl->mutex);
+
+ return ret;
nla_put_failure:
kfree_skb(skb);
- return -EMSGSIZE;
+ ret = -EMSGSIZE;
+ goto out_sleep;
}
static int wl1271_tm_cmd_interrogate(struct wl1271 *wl, struct nlattr *tb[])
@@ -128,33 +147,53 @@ static int wl1271_tm_cmd_interrogate(struct wl1271 *wl, struct nlattr *tb[])
ie_id = nla_get_u8(tb[WL1271_TM_ATTR_IE_ID]);
+ mutex_lock(&wl->mutex);
+
+ if (wl->state == WL1271_STATE_OFF) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = wl1271_ps_elp_wakeup(wl);
+ if (ret < 0)
+ goto out;
+
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
- if (!cmd)
- return -ENOMEM;
+ if (!cmd) {
+ ret = -ENOMEM;
+ goto out_sleep;
+ }
- mutex_lock(&wl->mutex);
ret = wl1271_cmd_interrogate(wl, ie_id, cmd, sizeof(*cmd));
- mutex_unlock(&wl->mutex);
-
if (ret < 0) {
wl1271_warning("testmode cmd interrogate failed: %d", ret);
- kfree(cmd);
- return ret;
+ goto out_free;
}
skb = cfg80211_testmode_alloc_reply_skb(wl->hw->wiphy, sizeof(*cmd));
if (!skb) {
- kfree(cmd);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto out_free;
}
NLA_PUT(skb, WL1271_TM_ATTR_DATA, sizeof(*cmd), cmd);
+ ret = cfg80211_testmode_reply(skb);
+ if (ret < 0)
+ goto out_free;
+
+out_free:
+ kfree(cmd);
+out_sleep:
+ wl1271_ps_elp_sleep(wl);
+out:
+ mutex_unlock(&wl->mutex);
- return 0;
+ return ret;
nla_put_failure:
kfree_skb(skb);
- return -EMSGSIZE;
+ ret = -EMSGSIZE;
+ goto out_free;
}
static int wl1271_tm_cmd_configure(struct wl1271 *wl, struct nlattr *tb[])
diff --git a/drivers/net/wireless/wl12xx/tx.c b/drivers/net/wireless/wl12xx/tx.c
index bad9e29d49b..4508ccd7832 100644
--- a/drivers/net/wireless/wl12xx/tx.c
+++ b/drivers/net/wireless/wl12xx/tx.c
@@ -26,22 +26,24 @@
#include <linux/etherdevice.h>
#include "wl12xx.h"
+#include "debug.h"
#include "io.h"
#include "reg.h"
#include "ps.h"
#include "tx.h"
#include "event.h"
-static int wl1271_set_default_wep_key(struct wl1271 *wl, u8 id)
+static int wl1271_set_default_wep_key(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif, u8 id)
{
int ret;
- bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
+ bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
if (is_ap)
ret = wl12xx_cmd_set_default_wep_key(wl, id,
- wl->ap_bcast_hlid);
+ wlvif->ap.bcast_hlid);
else
- ret = wl12xx_cmd_set_default_wep_key(wl, id, wl->sta_hlid);
+ ret = wl12xx_cmd_set_default_wep_key(wl, id, wlvif->sta.hlid);
if (ret < 0)
return ret;
@@ -76,7 +78,8 @@ static void wl1271_free_tx_id(struct wl1271 *wl, int id)
}
static int wl1271_tx_update_filters(struct wl1271 *wl,
- struct sk_buff *skb)
+ struct wl12xx_vif *wlvif,
+ struct sk_buff *skb)
{
struct ieee80211_hdr *hdr;
int ret;
@@ -92,15 +95,11 @@ static int wl1271_tx_update_filters(struct wl1271 *wl,
if (!ieee80211_is_auth(hdr->frame_control))
return 0;
- if (wl->dev_hlid != WL12XX_INVALID_LINK_ID)
+ if (wlvif->dev_hlid != WL12XX_INVALID_LINK_ID)
goto out;
wl1271_debug(DEBUG_CMD, "starting device role for roaming");
- ret = wl12xx_cmd_role_start_dev(wl);
- if (ret < 0)
- goto out;
-
- ret = wl12xx_roc(wl, wl->dev_role_id);
+ ret = wl12xx_start_dev(wl, wlvif);
if (ret < 0)
goto out;
out:
@@ -123,18 +122,16 @@ static void wl1271_tx_ap_update_inconnection_sta(struct wl1271 *wl,
wl1271_acx_set_inconnection_sta(wl, hdr->addr1);
}
-static void wl1271_tx_regulate_link(struct wl1271 *wl, u8 hlid)
+static void wl1271_tx_regulate_link(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif,
+ u8 hlid)
{
bool fw_ps, single_sta;
u8 tx_pkts;
- /* only regulate station links */
- if (hlid < WL1271_AP_STA_HLID_START)
+ if (WARN_ON(!test_bit(hlid, wlvif->links_map)))
return;
- if (WARN_ON(!wl1271_is_active_sta(wl, hlid)))
- return;
-
fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
tx_pkts = wl->links[hlid].allocated_pkts;
single_sta = (wl->active_sta_count == 1);
@@ -146,7 +143,7 @@ static void wl1271_tx_regulate_link(struct wl1271 *wl, u8 hlid)
* case FW-memory congestion is not a problem.
*/
if (!single_sta && fw_ps && tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
- wl1271_ps_link_start(wl, hlid, true);
+ wl12xx_ps_link_start(wl, wlvif, hlid, true);
}
bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb)
@@ -154,7 +151,8 @@ bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb)
return wl->dummy_packet == skb;
}
-u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct sk_buff *skb)
+u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ struct sk_buff *skb)
{
struct ieee80211_tx_info *control = IEEE80211_SKB_CB(skb);
@@ -167,49 +165,51 @@ u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct sk_buff *skb)
} else {
struct ieee80211_hdr *hdr;
- if (!test_bit(WL1271_FLAG_AP_STARTED, &wl->flags))
+ if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
return wl->system_hlid;
hdr = (struct ieee80211_hdr *)skb->data;
if (ieee80211_is_mgmt(hdr->frame_control))
- return wl->ap_global_hlid;
+ return wlvif->ap.global_hlid;
else
- return wl->ap_bcast_hlid;
+ return wlvif->ap.bcast_hlid;
}
}
-static u8 wl1271_tx_get_hlid(struct wl1271 *wl, struct sk_buff *skb)
+u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- if (wl12xx_is_dummy_packet(wl, skb))
+ if (!wlvif || wl12xx_is_dummy_packet(wl, skb))
return wl->system_hlid;
- if (wl->bss_type == BSS_TYPE_AP_BSS)
- return wl12xx_tx_get_hlid_ap(wl, skb);
+ if (wlvif->bss_type == BSS_TYPE_AP_BSS)
+ return wl12xx_tx_get_hlid_ap(wl, wlvif, skb);
- wl1271_tx_update_filters(wl, skb);
+ wl1271_tx_update_filters(wl, wlvif, skb);
- if ((test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags) ||
- test_bit(WL1271_FLAG_IBSS_JOINED, &wl->flags)) &&
+ if ((test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
+ test_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags)) &&
!ieee80211_is_auth(hdr->frame_control) &&
!ieee80211_is_assoc_req(hdr->frame_control))
- return wl->sta_hlid;
+ return wlvif->sta.hlid;
else
- return wl->dev_hlid;
+ return wlvif->dev_hlid;
}
static unsigned int wl12xx_calc_packet_alignment(struct wl1271 *wl,
unsigned int packet_length)
{
- if (wl->quirks & WL12XX_QUIRK_BLOCKSIZE_ALIGNMENT)
- return ALIGN(packet_length, WL12XX_BUS_BLOCK_SIZE);
- else
+ if (wl->quirks & WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT)
return ALIGN(packet_length, WL1271_TX_ALIGN_TO);
+ else
+ return ALIGN(packet_length, WL12XX_BUS_BLOCK_SIZE);
}
-static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra,
- u32 buf_offset, u8 hlid)
+static int wl1271_tx_allocate(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ struct sk_buff *skb, u32 extra, u32 buf_offset,
+ u8 hlid)
{
struct wl1271_tx_hw_descr *desc;
u32 total_len = skb->len + sizeof(struct wl1271_tx_hw_descr) + extra;
@@ -217,6 +217,7 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra,
u32 total_blocks;
int id, ret = -EBUSY, ac;
u32 spare_blocks = wl->tx_spare_blocks;
+ bool is_dummy = false;
if (buf_offset + total_len > WL1271_AGGR_BUFFER_SIZE)
return -EAGAIN;
@@ -231,8 +232,10 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra,
len = wl12xx_calc_packet_alignment(wl, total_len);
/* in case of a dummy packet, use default amount of spare mem blocks */
- if (unlikely(wl12xx_is_dummy_packet(wl, skb)))
+ if (unlikely(wl12xx_is_dummy_packet(wl, skb))) {
+ is_dummy = true;
spare_blocks = TX_HW_BLOCK_SPARE_DEFAULT;
+ }
total_blocks = (len + TX_HW_BLOCK_SIZE - 1) / TX_HW_BLOCK_SIZE +
spare_blocks;
@@ -257,8 +260,9 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra,
ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
wl->tx_allocated_pkts[ac]++;
- if (wl->bss_type == BSS_TYPE_AP_BSS &&
- hlid >= WL1271_AP_STA_HLID_START)
+ if (!is_dummy && wlvif &&
+ wlvif->bss_type == BSS_TYPE_AP_BSS &&
+ test_bit(hlid, wlvif->ap.sta_hlid_map))
wl->links[hlid].allocated_pkts++;
ret = 0;
@@ -273,15 +277,16 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra,
return ret;
}
-static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
- u32 extra, struct ieee80211_tx_info *control,
- u8 hlid)
+static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ struct sk_buff *skb, u32 extra,
+ struct ieee80211_tx_info *control, u8 hlid)
{
struct timespec ts;
struct wl1271_tx_hw_descr *desc;
int aligned_len, ac, rate_idx;
s64 hosttime;
- u16 tx_attr;
+ u16 tx_attr = 0;
+ bool is_dummy;
desc = (struct wl1271_tx_hw_descr *) skb->data;
@@ -298,7 +303,8 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
hosttime = (timespec_to_ns(&ts) >> 10);
desc->start_time = cpu_to_le32(hosttime - wl->time_offset);
- if (wl->bss_type != BSS_TYPE_AP_BSS)
+ is_dummy = wl12xx_is_dummy_packet(wl, skb);
+ if (is_dummy || !wlvif || wlvif->bss_type != BSS_TYPE_AP_BSS)
desc->life_time = cpu_to_le16(TX_HW_MGMT_PKT_LIFETIME_TU);
else
desc->life_time = cpu_to_le16(TX_HW_AP_MODE_PKT_LIFETIME_TU);
@@ -307,39 +313,42 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
desc->tid = skb->priority;
- if (wl12xx_is_dummy_packet(wl, skb)) {
+ if (is_dummy) {
/*
* FW expects the dummy packet to have an invalid session id -
* any session id that is different than the one set in the join
*/
- tx_attr = ((~wl->session_counter) <<
+ tx_attr = (SESSION_COUNTER_INVALID <<
TX_HW_ATTR_OFST_SESSION_COUNTER) &
TX_HW_ATTR_SESSION_COUNTER;
tx_attr |= TX_HW_ATTR_TX_DUMMY_REQ;
- } else {
+ } else if (wlvif) {
/* configure the tx attributes */
- tx_attr =
- wl->session_counter << TX_HW_ATTR_OFST_SESSION_COUNTER;
+ tx_attr = wlvif->session_counter <<
+ TX_HW_ATTR_OFST_SESSION_COUNTER;
}
desc->hlid = hlid;
-
- if (wl->bss_type != BSS_TYPE_AP_BSS) {
+ if (is_dummy || !wlvif)
+ rate_idx = 0;
+ else if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
/* if the packets are destined for AP (have a STA entry)
send them with AP rate policies, otherwise use default
basic rates */
- if (control->control.sta)
- rate_idx = ACX_TX_AP_FULL_RATE;
+ if (control->flags & IEEE80211_TX_CTL_NO_CCK_RATE)
+ rate_idx = wlvif->sta.p2p_rate_idx;
+ else if (control->control.sta)
+ rate_idx = wlvif->sta.ap_rate_idx;
else
- rate_idx = ACX_TX_BASIC_RATE;
+ rate_idx = wlvif->sta.basic_rate_idx;
} else {
- if (hlid == wl->ap_global_hlid)
- rate_idx = ACX_TX_AP_MODE_MGMT_RATE;
- else if (hlid == wl->ap_bcast_hlid)
- rate_idx = ACX_TX_AP_MODE_BCST_RATE;
+ if (hlid == wlvif->ap.global_hlid)
+ rate_idx = wlvif->ap.mgmt_rate_idx;
+ else if (hlid == wlvif->ap.bcast_hlid)
+ rate_idx = wlvif->ap.bcast_rate_idx;
else
- rate_idx = ac;
+ rate_idx = wlvif->ap.ucast_rate_idx[ac];
}
tx_attr |= rate_idx << TX_HW_ATTR_OFST_RATE_POLICY;
@@ -379,20 +388,24 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
}
/* caller must hold wl->mutex */
-static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct sk_buff *skb,
- u32 buf_offset)
+static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ struct sk_buff *skb, u32 buf_offset)
{
struct ieee80211_tx_info *info;
u32 extra = 0;
int ret = 0;
u32 total_len;
u8 hlid;
+ bool is_dummy;
if (!skb)
return -EINVAL;
info = IEEE80211_SKB_CB(skb);
+ /* TODO: handle dummy packets on multi-vifs */
+ is_dummy = wl12xx_is_dummy_packet(wl, skb);
+
if (info->control.hw_key &&
info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP)
extra = WL1271_TKIP_IV_SPACE;
@@ -405,29 +418,28 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct sk_buff *skb,
is_wep = (cipher == WLAN_CIPHER_SUITE_WEP40) ||
(cipher == WLAN_CIPHER_SUITE_WEP104);
- if (unlikely(is_wep && wl->default_key != idx)) {
- ret = wl1271_set_default_wep_key(wl, idx);
+ if (unlikely(is_wep && wlvif->default_key != idx)) {
+ ret = wl1271_set_default_wep_key(wl, wlvif, idx);
if (ret < 0)
return ret;
- wl->default_key = idx;
+ wlvif->default_key = idx;
}
}
-
- hlid = wl1271_tx_get_hlid(wl, skb);
+ hlid = wl12xx_tx_get_hlid(wl, wlvif, skb);
if (hlid == WL12XX_INVALID_LINK_ID) {
wl1271_error("invalid hlid. dropping skb 0x%p", skb);
return -EINVAL;
}
- ret = wl1271_tx_allocate(wl, skb, extra, buf_offset, hlid);
+ ret = wl1271_tx_allocate(wl, wlvif, skb, extra, buf_offset, hlid);
if (ret < 0)
return ret;
- wl1271_tx_fill_hdr(wl, skb, extra, info, hlid);
+ wl1271_tx_fill_hdr(wl, wlvif, skb, extra, info, hlid);
- if (wl->bss_type == BSS_TYPE_AP_BSS) {
+ if (!is_dummy && wlvif && wlvif->bss_type == BSS_TYPE_AP_BSS) {
wl1271_tx_ap_update_inconnection_sta(wl, skb);
- wl1271_tx_regulate_link(wl, hlid);
+ wl1271_tx_regulate_link(wl, wlvif, hlid);
}
/*
@@ -444,7 +456,7 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct sk_buff *skb,
memset(wl->aggr_buf + buf_offset + skb->len, 0, total_len - skb->len);
/* Revert side effects in the dummy packet skb, so it can be reused */
- if (wl12xx_is_dummy_packet(wl, skb))
+ if (is_dummy)
skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
return total_len;
@@ -522,19 +534,18 @@ static struct sk_buff_head *wl1271_select_queue(struct wl1271 *wl,
return &queues[q];
}
-static struct sk_buff *wl1271_sta_skb_dequeue(struct wl1271 *wl)
+static struct sk_buff *wl12xx_lnk_skb_dequeue(struct wl1271 *wl,
+ struct wl1271_link *lnk)
{
- struct sk_buff *skb = NULL;
+ struct sk_buff *skb;
unsigned long flags;
struct sk_buff_head *queue;
- queue = wl1271_select_queue(wl, wl->tx_queue);
+ queue = wl1271_select_queue(wl, lnk->tx_queue);
if (!queue)
- goto out;
+ return NULL;
skb = skb_dequeue(queue);
-
-out:
if (skb) {
int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
spin_lock_irqsave(&wl->wl_lock, flags);
@@ -545,43 +556,33 @@ out:
return skb;
}
-static struct sk_buff *wl1271_ap_skb_dequeue(struct wl1271 *wl)
+static struct sk_buff *wl12xx_vif_skb_dequeue(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif)
{
struct sk_buff *skb = NULL;
- unsigned long flags;
int i, h, start_hlid;
- struct sk_buff_head *queue;
/* start from the link after the last one */
- start_hlid = (wl->last_tx_hlid + 1) % AP_MAX_LINKS;
+ start_hlid = (wlvif->last_tx_hlid + 1) % WL12XX_MAX_LINKS;
/* dequeue according to AC, round robin on each link */
- for (i = 0; i < AP_MAX_LINKS; i++) {
- h = (start_hlid + i) % AP_MAX_LINKS;
+ for (i = 0; i < WL12XX_MAX_LINKS; i++) {
+ h = (start_hlid + i) % WL12XX_MAX_LINKS;
/* only consider connected stations */
- if (h >= WL1271_AP_STA_HLID_START &&
- !test_bit(h - WL1271_AP_STA_HLID_START, wl->ap_hlid_map))
+ if (!test_bit(h, wlvif->links_map))
continue;
- queue = wl1271_select_queue(wl, wl->links[h].tx_queue);
- if (!queue)
+ skb = wl12xx_lnk_skb_dequeue(wl, &wl->links[h]);
+ if (!skb)
continue;
- skb = skb_dequeue(queue);
- if (skb)
- break;
+ wlvif->last_tx_hlid = h;
+ break;
}
- if (skb) {
- int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
- wl->last_tx_hlid = h;
- spin_lock_irqsave(&wl->wl_lock, flags);
- wl->tx_queue_count[q]--;
- spin_unlock_irqrestore(&wl->wl_lock, flags);
- } else {
- wl->last_tx_hlid = 0;
- }
+ if (!skb)
+ wlvif->last_tx_hlid = 0;
return skb;
}
@@ -589,12 +590,32 @@ static struct sk_buff *wl1271_ap_skb_dequeue(struct wl1271 *wl)
static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl)
{
unsigned long flags;
+ struct wl12xx_vif *wlvif = wl->last_wlvif;
struct sk_buff *skb = NULL;
- if (wl->bss_type == BSS_TYPE_AP_BSS)
- skb = wl1271_ap_skb_dequeue(wl);
- else
- skb = wl1271_sta_skb_dequeue(wl);
+ if (wlvif) {
+ wl12xx_for_each_wlvif_continue(wl, wlvif) {
+ skb = wl12xx_vif_skb_dequeue(wl, wlvif);
+ if (skb) {
+ wl->last_wlvif = wlvif;
+ break;
+ }
+ }
+ }
+
+ /* do another pass */
+ if (!skb) {
+ wl12xx_for_each_wlvif(wl, wlvif) {
+ skb = wl12xx_vif_skb_dequeue(wl, wlvif);
+ if (skb) {
+ wl->last_wlvif = wlvif;
+ break;
+ }
+ }
+ }
+
+ if (!skb)
+ skb = wl12xx_lnk_skb_dequeue(wl, &wl->links[wl->system_hlid]);
if (!skb &&
test_and_clear_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags)) {
@@ -610,21 +631,21 @@ static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl)
return skb;
}
-static void wl1271_skb_queue_head(struct wl1271 *wl, struct sk_buff *skb)
+static void wl1271_skb_queue_head(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ struct sk_buff *skb)
{
unsigned long flags;
int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
if (wl12xx_is_dummy_packet(wl, skb)) {
set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
- } else if (wl->bss_type == BSS_TYPE_AP_BSS) {
- u8 hlid = wl1271_tx_get_hlid(wl, skb);
+ } else {
+ u8 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb);
skb_queue_head(&wl->links[hlid].tx_queue[q], skb);
/* make sure we dequeue the same packet next time */
- wl->last_tx_hlid = (hlid + AP_MAX_LINKS - 1) % AP_MAX_LINKS;
- } else {
- skb_queue_head(&wl->tx_queue[q], skb);
+ wlvif->last_tx_hlid = (hlid + WL12XX_MAX_LINKS - 1) %
+ WL12XX_MAX_LINKS;
}
spin_lock_irqsave(&wl->wl_lock, flags);
@@ -639,29 +660,71 @@ static bool wl1271_tx_is_data_present(struct sk_buff *skb)
return ieee80211_is_data_present(hdr->frame_control);
}
+void wl12xx_rearm_rx_streaming(struct wl1271 *wl, unsigned long *active_hlids)
+{
+ struct wl12xx_vif *wlvif;
+ u32 timeout;
+ u8 hlid;
+
+ if (!wl->conf.rx_streaming.interval)
+ return;
+
+ if (!wl->conf.rx_streaming.always &&
+ !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags))
+ return;
+
+ timeout = wl->conf.rx_streaming.duration;
+ wl12xx_for_each_wlvif_sta(wl, wlvif) {
+ bool found = false;
+ for_each_set_bit(hlid, active_hlids, WL12XX_MAX_LINKS) {
+ if (test_bit(hlid, wlvif->links_map)) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found)
+ continue;
+
+ /* enable rx streaming */
+ if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
+ ieee80211_queue_work(wl->hw,
+ &wlvif->rx_streaming_enable_work);
+
+ mod_timer(&wlvif->rx_streaming_timer,
+ jiffies + msecs_to_jiffies(timeout));
+ }
+}
+
void wl1271_tx_work_locked(struct wl1271 *wl)
{
+ struct wl12xx_vif *wlvif;
struct sk_buff *skb;
+ struct wl1271_tx_hw_descr *desc;
u32 buf_offset = 0;
bool sent_packets = false;
- bool had_data = false;
- bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
+ unsigned long active_hlids[BITS_TO_LONGS(WL12XX_MAX_LINKS)] = {0};
int ret;
if (unlikely(wl->state == WL1271_STATE_OFF))
return;
while ((skb = wl1271_skb_dequeue(wl))) {
- if (wl1271_tx_is_data_present(skb))
- had_data = true;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ bool has_data = false;
- ret = wl1271_prepare_tx_frame(wl, skb, buf_offset);
+ wlvif = NULL;
+ if (!wl12xx_is_dummy_packet(wl, skb) && info->control.vif)
+ wlvif = wl12xx_vif_to_data(info->control.vif);
+
+ has_data = wlvif && wl1271_tx_is_data_present(skb);
+ ret = wl1271_prepare_tx_frame(wl, wlvif, skb, buf_offset);
if (ret == -EAGAIN) {
/*
* Aggregation buffer is full.
* Flush buffer and try again.
*/
- wl1271_skb_queue_head(wl, skb);
+ wl1271_skb_queue_head(wl, wlvif, skb);
wl1271_write(wl, WL1271_SLV_MEM_DATA, wl->aggr_buf,
buf_offset, true);
sent_packets = true;
@@ -672,16 +735,27 @@ void wl1271_tx_work_locked(struct wl1271 *wl)
* Firmware buffer is full.
* Queue back last skb, and stop aggregating.
*/
- wl1271_skb_queue_head(wl, skb);
+ wl1271_skb_queue_head(wl, wlvif, skb);
/* No work left, avoid scheduling redundant tx work */
set_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
goto out_ack;
} else if (ret < 0) {
- dev_kfree_skb(skb);
+ if (wl12xx_is_dummy_packet(wl, skb))
+ /*
+ * fw still expects dummy packet,
+ * so re-enqueue it
+ */
+ wl1271_skb_queue_head(wl, wlvif, skb);
+ else
+ ieee80211_free_txskb(wl->hw, skb);
goto out_ack;
}
buf_offset += ret;
wl->tx_packets_count++;
+ if (has_data) {
+ desc = (struct wl1271_tx_hw_descr *) skb->data;
+ __set_bit(desc->hlid, active_hlids);
+ }
}
out_ack:
@@ -701,19 +775,7 @@ out_ack:
wl1271_handle_tx_low_watermark(wl);
}
- if (!is_ap && wl->conf.rx_streaming.interval && had_data &&
- (wl->conf.rx_streaming.always ||
- test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags))) {
- u32 timeout = wl->conf.rx_streaming.duration;
-
- /* enable rx streaming */
- if (!test_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags))
- ieee80211_queue_work(wl->hw,
- &wl->rx_streaming_enable_work);
-
- mod_timer(&wl->rx_streaming_timer,
- jiffies + msecs_to_jiffies(timeout));
- }
+ wl12xx_rearm_rx_streaming(wl, active_hlids);
}
void wl1271_tx_work(struct work_struct *work)
@@ -737,6 +799,8 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl,
struct wl1271_tx_hw_res_descr *result)
{
struct ieee80211_tx_info *info;
+ struct ieee80211_vif *vif;
+ struct wl12xx_vif *wlvif;
struct sk_buff *skb;
int id = result->id;
int rate = -1;
@@ -756,11 +820,16 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl,
return;
}
+ /* info->control is valid as long as we don't update info->status */
+ vif = info->control.vif;
+ wlvif = wl12xx_vif_to_data(vif);
+
/* update the TX status info */
if (result->status == TX_SUCCESS) {
if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
info->flags |= IEEE80211_TX_STAT_ACK;
- rate = wl1271_rate_to_idx(result->rate_class_index, wl->band);
+ rate = wl1271_rate_to_idx(result->rate_class_index,
+ wlvif->band);
retries = result->ack_failures;
} else if (result->status == TX_RETRY_EXCEEDED) {
wl->stats.excessive_retries++;
@@ -783,14 +852,14 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl,
info->control.hw_key->cipher == WLAN_CIPHER_SUITE_CCMP ||
info->control.hw_key->cipher == WL1271_CIPHER_SUITE_GEM)) {
u8 fw_lsb = result->tx_security_sequence_number_lsb;
- u8 cur_lsb = wl->tx_security_last_seq_lsb;
+ u8 cur_lsb = wlvif->tx_security_last_seq_lsb;
/*
* update security sequence number, taking care of potential
* wrap-around
*/
- wl->tx_security_seq += (fw_lsb - cur_lsb + 256) % 256;
- wl->tx_security_last_seq_lsb = fw_lsb;
+ wlvif->tx_security_seq += (fw_lsb - cur_lsb) & 0xff;
+ wlvif->tx_security_last_seq_lsb = fw_lsb;
}
/* remove private header from packet */
@@ -886,39 +955,30 @@ void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid)
}
/* caller must hold wl->mutex and TX must be stopped */
-void wl1271_tx_reset(struct wl1271 *wl, bool reset_tx_queues)
+void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
int i;
- struct sk_buff *skb;
- struct ieee80211_tx_info *info;
/* TX failure */
- if (wl->bss_type == BSS_TYPE_AP_BSS) {
- for (i = 0; i < AP_MAX_LINKS; i++) {
- wl1271_free_sta(wl, i);
- wl1271_tx_reset_link_queues(wl, i);
- wl->links[i].allocated_pkts = 0;
- wl->links[i].prev_freed_pkts = 0;
- }
-
- wl->last_tx_hlid = 0;
- } else {
- for (i = 0; i < NUM_TX_QUEUES; i++) {
- while ((skb = skb_dequeue(&wl->tx_queue[i]))) {
- wl1271_debug(DEBUG_TX, "freeing skb 0x%p",
- skb);
-
- if (!wl12xx_is_dummy_packet(wl, skb)) {
- info = IEEE80211_SKB_CB(skb);
- info->status.rates[0].idx = -1;
- info->status.rates[0].count = 0;
- ieee80211_tx_status_ni(wl->hw, skb);
- }
- }
- }
+ for_each_set_bit(i, wlvif->links_map, WL12XX_MAX_LINKS) {
+ if (wlvif->bss_type == BSS_TYPE_AP_BSS)
+ wl1271_free_sta(wl, wlvif, i);
+ else
+ wlvif->sta.ba_rx_bitmap = 0;
- wl->ba_rx_bitmap = 0;
+ wl1271_tx_reset_link_queues(wl, i);
+ wl->links[i].allocated_pkts = 0;
+ wl->links[i].prev_freed_pkts = 0;
}
+ wlvif->last_tx_hlid = 0;
+
+}
+/* caller must hold wl->mutex and TX must be stopped */
+void wl12xx_tx_reset(struct wl1271 *wl, bool reset_tx_queues)
+{
+ int i;
+ struct sk_buff *skb;
+ struct ieee80211_tx_info *info;
for (i = 0; i < NUM_TX_QUEUES; i++)
wl->tx_queue_count[i] = 0;
diff --git a/drivers/net/wireless/wl12xx/tx.h b/drivers/net/wireless/wl12xx/tx.h
index dc4f09adf08..2dbb24e6d54 100644
--- a/drivers/net/wireless/wl12xx/tx.h
+++ b/drivers/net/wireless/wl12xx/tx.h
@@ -206,18 +206,23 @@ static inline int wl1271_tx_total_queue_count(struct wl1271 *wl)
void wl1271_tx_work(struct work_struct *work);
void wl1271_tx_work_locked(struct wl1271 *wl);
void wl1271_tx_complete(struct wl1271 *wl);
-void wl1271_tx_reset(struct wl1271 *wl, bool reset_tx_queues);
+void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+void wl12xx_tx_reset(struct wl1271 *wl, bool reset_tx_queues);
void wl1271_tx_flush(struct wl1271 *wl);
u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band);
u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set,
enum ieee80211_band rate_band);
u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set);
-u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct sk_buff *skb);
+u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ struct sk_buff *skb);
+u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ struct sk_buff *skb);
void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid);
void wl1271_handle_tx_low_watermark(struct wl1271 *wl);
bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb);
+void wl12xx_rearm_rx_streaming(struct wl1271 *wl, unsigned long *active_hlids);
/* from main.c */
-void wl1271_free_sta(struct wl1271 *wl, u8 hlid);
+void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid);
#endif
diff --git a/drivers/net/wireless/wl12xx/wl12xx.h b/drivers/net/wireless/wl12xx/wl12xx.h
index 1ec90fc7505..d21f71ff6f6 100644
--- a/drivers/net/wireless/wl12xx/wl12xx.h
+++ b/drivers/net/wireless/wl12xx/wl12xx.h
@@ -35,83 +35,6 @@
#include "conf.h"
#include "ini.h"
-#define DRIVER_NAME "wl1271"
-#define DRIVER_PREFIX DRIVER_NAME ": "
-
-/*
- * FW versions support BA 11n
- * versions marks x.x.x.50-60.x
- */
-#define WL12XX_BA_SUPPORT_FW_COST_VER2_START 50
-#define WL12XX_BA_SUPPORT_FW_COST_VER2_END 60
-
-enum {
- DEBUG_NONE = 0,
- DEBUG_IRQ = BIT(0),
- DEBUG_SPI = BIT(1),
- DEBUG_BOOT = BIT(2),
- DEBUG_MAILBOX = BIT(3),
- DEBUG_TESTMODE = BIT(4),
- DEBUG_EVENT = BIT(5),
- DEBUG_TX = BIT(6),
- DEBUG_RX = BIT(7),
- DEBUG_SCAN = BIT(8),
- DEBUG_CRYPT = BIT(9),
- DEBUG_PSM = BIT(10),
- DEBUG_MAC80211 = BIT(11),
- DEBUG_CMD = BIT(12),
- DEBUG_ACX = BIT(13),
- DEBUG_SDIO = BIT(14),
- DEBUG_FILTERS = BIT(15),
- DEBUG_ADHOC = BIT(16),
- DEBUG_AP = BIT(17),
- DEBUG_MASTER = (DEBUG_ADHOC | DEBUG_AP),
- DEBUG_ALL = ~0,
-};
-
-extern u32 wl12xx_debug_level;
-
-#define DEBUG_DUMP_LIMIT 1024
-
-#define wl1271_error(fmt, arg...) \
- pr_err(DRIVER_PREFIX "ERROR " fmt "\n", ##arg)
-
-#define wl1271_warning(fmt, arg...) \
- pr_warning(DRIVER_PREFIX "WARNING " fmt "\n", ##arg)
-
-#define wl1271_notice(fmt, arg...) \
- pr_info(DRIVER_PREFIX fmt "\n", ##arg)
-
-#define wl1271_info(fmt, arg...) \
- pr_info(DRIVER_PREFIX fmt "\n", ##arg)
-
-#define wl1271_debug(level, fmt, arg...) \
- do { \
- if (level & wl12xx_debug_level) \
- pr_debug(DRIVER_PREFIX fmt "\n", ##arg); \
- } while (0)
-
-/* TODO: use pr_debug_hex_dump when it will be available */
-#define wl1271_dump(level, prefix, buf, len) \
- do { \
- if (level & wl12xx_debug_level) \
- print_hex_dump(KERN_DEBUG, DRIVER_PREFIX prefix, \
- DUMP_PREFIX_OFFSET, 16, 1, \
- buf, \
- min_t(size_t, len, DEBUG_DUMP_LIMIT), \
- 0); \
- } while (0)
-
-#define wl1271_dump_ascii(level, prefix, buf, len) \
- do { \
- if (level & wl12xx_debug_level) \
- print_hex_dump(KERN_DEBUG, DRIVER_PREFIX prefix, \
- DUMP_PREFIX_OFFSET, 16, 1, \
- buf, \
- min_t(size_t, len, DEBUG_DUMP_LIMIT), \
- true); \
- } while (0)
-
#define WL127X_FW_NAME "ti-connectivity/wl127x-fw-3.bin"
#define WL128X_FW_NAME "ti-connectivity/wl128x-fw-3.bin"
@@ -142,16 +65,12 @@ extern u32 wl12xx_debug_level;
#define WL12XX_INVALID_ROLE_ID 0xff
#define WL12XX_INVALID_LINK_ID 0xff
+#define WL12XX_MAX_RATE_POLICIES 16
+
/* Defined by FW as 0. Will not be freed or allocated. */
#define WL12XX_SYSTEM_HLID 0
/*
- * TODO: we currently don't support multirole. remove
- * this constant from the code when we do.
- */
-#define WL1271_AP_STA_HLID_START 3
-
-/*
* When in AP-mode, we allow (at least) this number of packets
* to be transmitted to FW for a STA in PS-mode. Only when packets are
* present in the FW buffers it will wake the sleeping STA. We want to put
@@ -236,13 +155,6 @@ struct wl1271_stats {
#define AP_MAX_STATIONS 8
-/* Broadcast and Global links + system link + links to stations */
-/*
- * TODO: when WL1271_AP_STA_HLID_START is no longer constant, change all
- * the places that use this.
- */
-#define AP_MAX_LINKS (AP_MAX_STATIONS + WL1271_AP_STA_HLID_START)
-
/* FW status registers */
struct wl12xx_fw_status {
__le32 intr;
@@ -299,17 +211,14 @@ struct wl1271_scan {
};
struct wl1271_if_operations {
- void (*read)(struct wl1271 *wl, int addr, void *buf, size_t len,
+ void (*read)(struct device *child, int addr, void *buf, size_t len,
bool fixed);
- void (*write)(struct wl1271 *wl, int addr, void *buf, size_t len,
+ void (*write)(struct device *child, int addr, void *buf, size_t len,
bool fixed);
- void (*reset)(struct wl1271 *wl);
- void (*init)(struct wl1271 *wl);
- int (*power)(struct wl1271 *wl, bool enable);
- struct device* (*dev)(struct wl1271 *wl);
- void (*enable_irq)(struct wl1271 *wl);
- void (*disable_irq)(struct wl1271 *wl);
- void (*set_block_size) (struct wl1271 *wl, unsigned int blksz);
+ void (*reset)(struct device *child);
+ void (*init)(struct device *child);
+ int (*power)(struct device *child, bool enable);
+ void (*set_block_size) (struct device *child, unsigned int blksz);
};
#define MAX_NUM_KEYS 14
@@ -326,29 +235,33 @@ struct wl1271_ap_key {
};
enum wl12xx_flags {
- WL1271_FLAG_STA_ASSOCIATED,
- WL1271_FLAG_IBSS_JOINED,
WL1271_FLAG_GPIO_POWER,
WL1271_FLAG_TX_QUEUE_STOPPED,
WL1271_FLAG_TX_PENDING,
WL1271_FLAG_IN_ELP,
WL1271_FLAG_ELP_REQUESTED,
- WL1271_FLAG_PSM,
- WL1271_FLAG_PSM_REQUESTED,
WL1271_FLAG_IRQ_RUNNING,
WL1271_FLAG_IDLE,
- WL1271_FLAG_PSPOLL_FAILURE,
- WL1271_FLAG_STA_STATE_SENT,
WL1271_FLAG_FW_TX_BUSY,
- WL1271_FLAG_AP_STARTED,
- WL1271_FLAG_IF_INITIALIZED,
WL1271_FLAG_DUMMY_PACKET_PENDING,
WL1271_FLAG_SUSPENDED,
WL1271_FLAG_PENDING_WORK,
WL1271_FLAG_SOFT_GEMINI,
- WL1271_FLAG_RX_STREAMING_STARTED,
WL1271_FLAG_RECOVERY_IN_PROGRESS,
- WL1271_FLAG_CS_PROGRESS,
+};
+
+enum wl12xx_vif_flags {
+ WLVIF_FLAG_INITIALIZED,
+ WLVIF_FLAG_STA_ASSOCIATED,
+ WLVIF_FLAG_IBSS_JOINED,
+ WLVIF_FLAG_AP_STARTED,
+ WLVIF_FLAG_PSM,
+ WLVIF_FLAG_PSM_REQUESTED,
+ WLVIF_FLAG_STA_STATE_SENT,
+ WLVIF_FLAG_RX_STREAMING_STARTED,
+ WLVIF_FLAG_PSPOLL_FAILURE,
+ WLVIF_FLAG_CS_PROGRESS,
+ WLVIF_FLAG_AP_PROBE_RESP_SET,
};
struct wl1271_link {
@@ -366,10 +279,11 @@ struct wl1271_link {
};
struct wl1271 {
- struct platform_device *plat_dev;
struct ieee80211_hw *hw;
bool mac80211_registered;
+ struct device *dev;
+
void *if_priv;
struct wl1271_if_operations *if_ops;
@@ -399,25 +313,20 @@ struct wl1271 {
s8 hw_pg_ver;
- u8 bssid[ETH_ALEN];
u8 mac_addr[ETH_ALEN];
- u8 bss_type;
- u8 set_bss_type;
- u8 p2p; /* we are using p2p role */
- u8 ssid[IEEE80211_MAX_SSID_LEN + 1];
- u8 ssid_len;
int channel;
- u8 role_id;
- u8 dev_role_id;
u8 system_hlid;
- u8 sta_hlid;
- u8 dev_hlid;
- u8 ap_global_hlid;
- u8 ap_bcast_hlid;
unsigned long links_map[BITS_TO_LONGS(WL12XX_MAX_LINKS)];
unsigned long roles_map[BITS_TO_LONGS(WL12XX_MAX_ROLES)];
unsigned long roc_map[BITS_TO_LONGS(WL12XX_MAX_ROLES)];
+ unsigned long rate_policies_map[
+ BITS_TO_LONGS(WL12XX_MAX_RATE_POLICIES)];
+
+ struct list_head wlvif_list;
+
+ u8 sta_count;
+ u8 ap_count;
struct wl1271_acx_mem_map *target_mem_map;
@@ -440,11 +349,7 @@ struct wl1271 {
/* Time-offset between host and chipset clocks */
s64 time_offset;
- /* Session counter for the chipset */
- int session_counter;
-
/* Frames scheduled for transmission, not handled yet */
- struct sk_buff_head tx_queue[NUM_TX_QUEUES];
int tx_queue_count[NUM_TX_QUEUES];
long stopped_queues_map;
@@ -462,17 +367,6 @@ struct wl1271 {
struct sk_buff *tx_frames[ACX_TX_DESCRIPTORS];
int tx_frames_cnt;
- /*
- * Security sequence number
- * bits 0-15: lower 16 bits part of sequence number
- * bits 16-47: higher 32 bits part of sequence number
- * bits 48-63: not in use
- */
- u64 tx_security_seq;
-
- /* 8 bits of the last sequence number in use */
- u8 tx_security_last_seq_lsb;
-
/* FW Rx counter */
u32 rx_counter;
@@ -507,59 +401,21 @@ struct wl1271 {
u32 mbox_ptr[2];
/* Are we currently scanning */
+ struct ieee80211_vif *scan_vif;
struct wl1271_scan scan;
struct delayed_work scan_complete_work;
bool sched_scanning;
- /* probe-req template for the current AP */
- struct sk_buff *probereq;
-
- /* Our association ID */
- u16 aid;
-
- /*
- * currently configured rate set:
- * bits 0-15 - 802.11abg rates
- * bits 16-23 - 802.11n MCS index mask
- * support only 1 stream, thus only 8 bits for the MCS rates (0-7).
- */
- u32 basic_rate_set;
- u32 basic_rate;
- u32 rate_set;
- u32 bitrate_masks[IEEE80211_NUM_BANDS];
-
/* The current band */
enum ieee80211_band band;
- /* Beaconing interval (needed for ad-hoc) */
- u32 beacon_int;
-
- /* Default key (for WEP) */
- u32 default_key;
-
- /* Rx Streaming */
- struct work_struct rx_streaming_enable_work;
- struct work_struct rx_streaming_disable_work;
- struct timer_list rx_streaming_timer;
-
struct completion *elp_compl;
- struct completion *ps_compl;
struct delayed_work elp_work;
- struct delayed_work pspoll_work;
-
- /* counter for ps-poll delivery failures */
- int ps_poll_failures;
-
- /* retry counter for PSM entries */
- u8 psm_entry_retry;
/* in dBm */
int power_level;
- int rssi_thold;
- int last_rssi_event;
-
struct wl1271_stats stats;
__le32 buffer_32;
@@ -583,20 +439,9 @@ struct wl1271 {
/* Most recently reported noise in dBm */
s8 noise;
- /* map for HLIDs of associated stations - when operating in AP mode */
- unsigned long ap_hlid_map[BITS_TO_LONGS(AP_MAX_STATIONS)];
-
- /* recoreded keys for AP-mode - set here before AP startup */
- struct wl1271_ap_key *recorded_ap_keys[MAX_NUM_KEYS];
-
/* bands supported by this instance of wl12xx */
struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
- /* RX BA constraint value */
- bool ba_support;
- u8 ba_rx_bitmap;
- bool ba_allowed;
-
int tcxo_clock;
/*
@@ -610,10 +455,7 @@ struct wl1271 {
* AP-mode - links indexed by HLID. The global and broadcast links
* are always active.
*/
- struct wl1271_link links[AP_MAX_LINKS];
-
- /* the hlid of the link where the last transmitted skb came from */
- int last_tx_hlid;
+ struct wl1271_link links[WL12XX_MAX_LINKS];
/* AP-mode - a bitmap of links currently in PS mode according to FW */
u32 ap_fw_ps_map;
@@ -632,21 +474,173 @@ struct wl1271 {
/* AP-mode - number of currently connected stations */
int active_sta_count;
+
+ /* last wlvif we transmitted from */
+ struct wl12xx_vif *last_wlvif;
};
struct wl1271_station {
u8 hlid;
};
+struct wl12xx_vif {
+ struct wl1271 *wl;
+ struct list_head list;
+ unsigned long flags;
+ u8 bss_type;
+ u8 p2p; /* we are using p2p role */
+ u8 role_id;
+
+ /* sta/ibss specific */
+ u8 dev_role_id;
+ u8 dev_hlid;
+
+ union {
+ struct {
+ u8 hlid;
+ u8 ba_rx_bitmap;
+
+ u8 basic_rate_idx;
+ u8 ap_rate_idx;
+ u8 p2p_rate_idx;
+ } sta;
+ struct {
+ u8 global_hlid;
+ u8 bcast_hlid;
+
+ /* HLIDs bitmap of associated stations */
+ unsigned long sta_hlid_map[BITS_TO_LONGS(
+ WL12XX_MAX_LINKS)];
+
+ /* recoreded keys - set here before AP startup */
+ struct wl1271_ap_key *recorded_keys[MAX_NUM_KEYS];
+
+ u8 mgmt_rate_idx;
+ u8 bcast_rate_idx;
+ u8 ucast_rate_idx[CONF_TX_MAX_AC_COUNT];
+ } ap;
+ };
+
+ /* the hlid of the last transmitted skb */
+ int last_tx_hlid;
+
+ unsigned long links_map[BITS_TO_LONGS(WL12XX_MAX_LINKS)];
+
+ u8 ssid[IEEE80211_MAX_SSID_LEN + 1];
+ u8 ssid_len;
+
+ /* The current band */
+ enum ieee80211_band band;
+ int channel;
+
+ u32 bitrate_masks[IEEE80211_NUM_BANDS];
+ u32 basic_rate_set;
+
+ /*
+ * currently configured rate set:
+ * bits 0-15 - 802.11abg rates
+ * bits 16-23 - 802.11n MCS index mask
+ * support only 1 stream, thus only 8 bits for the MCS rates (0-7).
+ */
+ u32 basic_rate;
+ u32 rate_set;
+
+ /* probe-req template for the current AP */
+ struct sk_buff *probereq;
+
+ /* Beaconing interval (needed for ad-hoc) */
+ u32 beacon_int;
+
+ /* Default key (for WEP) */
+ u32 default_key;
+
+ /* Our association ID */
+ u16 aid;
+
+ /* Session counter for the chipset */
+ int session_counter;
+
+ struct completion *ps_compl;
+ struct delayed_work pspoll_work;
+
+ /* counter for ps-poll delivery failures */
+ int ps_poll_failures;
+
+ /* retry counter for PSM entries */
+ u8 psm_entry_retry;
+
+ /* in dBm */
+ int power_level;
+
+ int rssi_thold;
+ int last_rssi_event;
+
+ /* RX BA constraint value */
+ bool ba_support;
+ bool ba_allowed;
+
+ /* Rx Streaming */
+ struct work_struct rx_streaming_enable_work;
+ struct work_struct rx_streaming_disable_work;
+ struct timer_list rx_streaming_timer;
+
+ /*
+ * This struct must be last!
+ * data that has to be saved acrossed reconfigs (e.g. recovery)
+ * should be declared in this struct.
+ */
+ struct {
+ u8 persistent[0];
+ /*
+ * Security sequence number
+ * bits 0-15: lower 16 bits part of sequence number
+ * bits 16-47: higher 32 bits part of sequence number
+ * bits 48-63: not in use
+ */
+ u64 tx_security_seq;
+
+ /* 8 bits of the last sequence number in use */
+ u8 tx_security_last_seq_lsb;
+ };
+};
+
+static inline struct wl12xx_vif *wl12xx_vif_to_data(struct ieee80211_vif *vif)
+{
+ return (struct wl12xx_vif *)vif->drv_priv;
+}
+
+static inline
+struct ieee80211_vif *wl12xx_wlvif_to_vif(struct wl12xx_vif *wlvif)
+{
+ return container_of((void *)wlvif, struct ieee80211_vif, drv_priv);
+}
+
+#define wl12xx_for_each_wlvif(wl, wlvif) \
+ list_for_each_entry(wlvif, &wl->wlvif_list, list)
+
+#define wl12xx_for_each_wlvif_continue(wl, wlvif) \
+ list_for_each_entry_continue(wlvif, &wl->wlvif_list, list)
+
+#define wl12xx_for_each_wlvif_bss_type(wl, wlvif, _bss_type) \
+ wl12xx_for_each_wlvif(wl, wlvif) \
+ if (wlvif->bss_type == _bss_type)
+
+#define wl12xx_for_each_wlvif_sta(wl, wlvif) \
+ wl12xx_for_each_wlvif_bss_type(wl, wlvif, BSS_TYPE_STA_BSS)
+
+#define wl12xx_for_each_wlvif_ap(wl, wlvif) \
+ wl12xx_for_each_wlvif_bss_type(wl, wlvif, BSS_TYPE_AP_BSS)
+
int wl1271_plt_start(struct wl1271 *wl);
int wl1271_plt_stop(struct wl1271 *wl);
-int wl1271_recalc_rx_streaming(struct wl1271 *wl);
+int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif);
void wl12xx_queue_recovery_work(struct wl1271 *wl);
size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen);
#define JOIN_TIMEOUT 5000 /* 5000 milliseconds to join */
-#define SESSION_COUNTER_MAX 7 /* maximum value for the session counter */
+#define SESSION_COUNTER_MAX 6 /* maximum value for the session counter */
+#define SESSION_COUNTER_INVALID 7 /* used with dummy_packet */
#define WL1271_DEFAULT_POWER_LEVEL 0
@@ -669,8 +663,8 @@ size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen);
/* Each RX/TX transaction requires an end-of-transaction transfer */
#define WL12XX_QUIRK_END_OF_TRANSACTION BIT(0)
-/* WL128X requires aggregated packets to be aligned to the SDIO block size */
-#define WL12XX_QUIRK_BLOCKSIZE_ALIGNMENT BIT(2)
+/* wl127x and SPI don't support SDIO block size alignment */
+#define WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT BIT(2)
/* Older firmwares did not implement the FW logger over bus feature */
#define WL12XX_QUIRK_FWLOG_NOT_IMPLEMENTED BIT(4)
diff --git a/drivers/net/wireless/wl12xx/wl12xx_80211.h b/drivers/net/wireless/wl12xx/wl12xx_80211.h
index f7971d3b089..8f0ffaf6230 100644
--- a/drivers/net/wireless/wl12xx/wl12xx_80211.h
+++ b/drivers/net/wireless/wl12xx/wl12xx_80211.h
@@ -116,11 +116,6 @@ struct wl12xx_ps_poll_template {
u8 ta[ETH_ALEN];
} __packed;
-struct wl12xx_qos_null_data_template {
- struct ieee80211_header header;
- __le16 qos_ctl;
-} __packed;
-
struct wl12xx_arp_rsp_template {
struct ieee80211_hdr_3addr hdr;
diff --git a/drivers/net/wireless/wl12xx/wl12xx_platform_data.c b/drivers/net/wireless/wl12xx/wl12xx_platform_data.c
index 973b11060a8..3c96b332184 100644
--- a/drivers/net/wireless/wl12xx/wl12xx_platform_data.c
+++ b/drivers/net/wireless/wl12xx/wl12xx_platform_data.c
@@ -2,7 +2,7 @@
#include <linux/err.h>
#include <linux/wl12xx.h>
-static const struct wl12xx_platform_data *platform_data;
+static struct wl12xx_platform_data *platform_data;
int __init wl12xx_set_platform_data(const struct wl12xx_platform_data *data)
{
@@ -18,7 +18,7 @@ int __init wl12xx_set_platform_data(const struct wl12xx_platform_data *data)
return 0;
}
-const struct wl12xx_platform_data *wl12xx_get_platform_data(void)
+struct wl12xx_platform_data *wl12xx_get_platform_data(void)
{
if (!platform_data)
return ERR_PTR(-ENODEV);
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index cf0d69dd7be..785bdbe38f2 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -28,6 +28,7 @@
#include <linux/skbuff.h>
#include <linux/usb.h>
#include <linux/workqueue.h>
+#include <linux/module.h>
#include <net/mac80211.h>
#include <asm/unaligned.h>
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 161f207786a..94b79c3338c 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -58,10 +58,6 @@ struct xenvif {
u8 fe_dev_addr[6];
/* Physical parameters of the comms window. */
- grant_handle_t tx_shmem_handle;
- grant_ref_t tx_shmem_ref;
- grant_handle_t rx_shmem_handle;
- grant_ref_t rx_shmem_ref;
unsigned int irq;
/* List of frontends to notify after a batch of frames sent. */
@@ -70,8 +66,6 @@ struct xenvif {
/* The shared rings and indexes. */
struct xen_netif_tx_back_ring tx;
struct xen_netif_rx_back_ring rx;
- struct vm_struct *tx_comms_area;
- struct vm_struct *rx_comms_area;
/* Frontend feature information. */
u8 can_sg:1;
@@ -106,6 +100,11 @@ struct xenvif {
wait_queue_head_t waiting_to_free;
};
+static inline struct xenbus_device *xenvif_to_xenbus_device(struct xenvif *vif)
+{
+ return to_xenbus_device(vif->dev->dev.parent);
+}
+
#define XEN_NETIF_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, PAGE_SIZE)
#define XEN_NETIF_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, PAGE_SIZE)
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index d5508957200..0cb594c8609 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -1589,88 +1589,42 @@ static int xen_netbk_kthread(void *data)
void xen_netbk_unmap_frontend_rings(struct xenvif *vif)
{
- struct gnttab_unmap_grant_ref op;
-
- if (vif->tx.sring) {
- gnttab_set_unmap_op(&op, (unsigned long)vif->tx_comms_area->addr,
- GNTMAP_host_map, vif->tx_shmem_handle);
-
- if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
- BUG();
- }
-
- if (vif->rx.sring) {
- gnttab_set_unmap_op(&op, (unsigned long)vif->rx_comms_area->addr,
- GNTMAP_host_map, vif->rx_shmem_handle);
-
- if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
- BUG();
- }
- if (vif->rx_comms_area)
- free_vm_area(vif->rx_comms_area);
- if (vif->tx_comms_area)
- free_vm_area(vif->tx_comms_area);
+ if (vif->tx.sring)
+ xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
+ vif->tx.sring);
+ if (vif->rx.sring)
+ xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
+ vif->rx.sring);
}
int xen_netbk_map_frontend_rings(struct xenvif *vif,
grant_ref_t tx_ring_ref,
grant_ref_t rx_ring_ref)
{
- struct gnttab_map_grant_ref op;
+ void *addr;
struct xen_netif_tx_sring *txs;
struct xen_netif_rx_sring *rxs;
int err = -ENOMEM;
- vif->tx_comms_area = alloc_vm_area(PAGE_SIZE);
- if (vif->tx_comms_area == NULL)
+ err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif),
+ tx_ring_ref, &addr);
+ if (err)
goto err;
- vif->rx_comms_area = alloc_vm_area(PAGE_SIZE);
- if (vif->rx_comms_area == NULL)
- goto err;
-
- gnttab_set_map_op(&op, (unsigned long)vif->tx_comms_area->addr,
- GNTMAP_host_map, tx_ring_ref, vif->domid);
-
- if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
- BUG();
-
- if (op.status) {
- netdev_warn(vif->dev,
- "failed to map tx ring. err=%d status=%d\n",
- err, op.status);
- err = op.status;
- goto err;
- }
-
- vif->tx_shmem_ref = tx_ring_ref;
- vif->tx_shmem_handle = op.handle;
-
- txs = (struct xen_netif_tx_sring *)vif->tx_comms_area->addr;
+ txs = (struct xen_netif_tx_sring *)addr;
BACK_RING_INIT(&vif->tx, txs, PAGE_SIZE);
- gnttab_set_map_op(&op, (unsigned long)vif->rx_comms_area->addr,
- GNTMAP_host_map, rx_ring_ref, vif->domid);
-
- if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
- BUG();
-
- if (op.status) {
- netdev_warn(vif->dev,
- "failed to map rx ring. err=%d status=%d\n",
- err, op.status);
- err = op.status;
+ err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif),
+ rx_ring_ref, &addr);
+ if (err)
goto err;
- }
-
- vif->rx_shmem_ref = rx_ring_ref;
- vif->rx_shmem_handle = op.handle;
- vif->rx_req_cons_peek = 0;
- rxs = (struct xen_netif_rx_sring *)vif->rx_comms_area->addr;
+ rxs = (struct xen_netif_rx_sring *)addr;
BACK_RING_INIT(&vif->rx, rxs, PAGE_SIZE);
+ vif->rx_req_cons_peek = 0;
+
return 0;
err:
diff --git a/drivers/nfc/nfcwilink.c b/drivers/nfc/nfcwilink.c
index 5b0f1ff8036..06c3642e5bd 100644
--- a/drivers/nfc/nfcwilink.c
+++ b/drivers/nfc/nfcwilink.c
@@ -27,6 +27,7 @@
*
*/
#include <linux/platform_device.h>
+#include <linux/module.h>
#include <linux/nfc.h>
#include <net/nfc/nci.h>
#include <net/nfc/nci_core.h>
diff --git a/drivers/nfc/pn533.c b/drivers/nfc/pn533.c
index 7bcb1febef0..b8b6c2abbd4 100644
--- a/drivers/nfc/pn533.c
+++ b/drivers/nfc/pn533.c
@@ -72,6 +72,7 @@ MODULE_DEVICE_TABLE(usb, pn533_table);
#define PN533_CMD_IN_LIST_PASSIVE_TARGET 0x4A
#define PN533_CMD_IN_ATR 0x50
#define PN533_CMD_IN_RELEASE 0x52
+#define PN533_CMD_IN_JUMP_FOR_DEP 0x56
#define PN533_CMD_RESPONSE(cmd) (cmd + 1)
@@ -231,6 +232,26 @@ struct pn533_cmd_activate_response {
u8 gt[];
} __packed;
+/* PN533_CMD_IN_JUMP_FOR_DEP */
+struct pn533_cmd_jump_dep {
+ u8 active;
+ u8 baud;
+ u8 next;
+ u8 gt[];
+} __packed;
+
+struct pn533_cmd_jump_dep_response {
+ u8 status;
+ u8 tg;
+ u8 nfcid3t[10];
+ u8 didt;
+ u8 bst;
+ u8 brt;
+ u8 to;
+ u8 ppt;
+ /* optional */
+ u8 gt[];
+} __packed;
struct pn533 {
struct usb_device *udev;
@@ -1121,6 +1142,7 @@ static int pn533_activate_target_nfcdep(struct pn533 *dev)
{
struct pn533_cmd_activate_param param;
struct pn533_cmd_activate_response *resp;
+ u16 gt_len;
int rc;
nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
@@ -1146,7 +1168,11 @@ static int pn533_activate_target_nfcdep(struct pn533 *dev)
if (rc != PN533_CMD_RET_SUCCESS)
return -EIO;
- return 0;
+ /* ATR_RES general bytes are located at offset 16 */
+ gt_len = PN533_FRAME_CMD_PARAMS_LEN(dev->in_frame) - 16;
+ rc = nfc_set_remote_general_bytes(dev->nfc_dev, resp->gt, gt_len);
+
+ return rc;
}
static int pn533_activate_target(struct nfc_dev *nfc_dev, u32 target_idx,
@@ -1239,6 +1265,142 @@ static void pn533_deactivate_target(struct nfc_dev *nfc_dev, u32 target_idx)
return;
}
+
+static int pn533_in_dep_link_up_complete(struct pn533 *dev, void *arg,
+ u8 *params, int params_len)
+{
+ struct pn533_cmd_jump_dep *cmd;
+ struct pn533_cmd_jump_dep_response *resp;
+ struct nfc_target nfc_target;
+ u8 target_gt_len;
+ int rc;
+
+ if (params_len == -ENOENT) {
+ nfc_dev_dbg(&dev->interface->dev, "");
+ return 0;
+ }
+
+ if (params_len < 0) {
+ nfc_dev_err(&dev->interface->dev,
+ "Error %d when bringing DEP link up",
+ params_len);
+ return 0;
+ }
+
+ if (dev->tgt_available_prots &&
+ !(dev->tgt_available_prots & (1 << NFC_PROTO_NFC_DEP))) {
+ nfc_dev_err(&dev->interface->dev,
+ "The target does not support DEP");
+ return -EINVAL;
+ }
+
+ resp = (struct pn533_cmd_jump_dep_response *) params;
+ cmd = (struct pn533_cmd_jump_dep *) arg;
+ rc = resp->status & PN533_CMD_RET_MASK;
+ if (rc != PN533_CMD_RET_SUCCESS) {
+ nfc_dev_err(&dev->interface->dev,
+ "Bringing DEP link up failed %d", rc);
+ return 0;
+ }
+
+ if (!dev->tgt_available_prots) {
+ nfc_dev_dbg(&dev->interface->dev, "Creating new target");
+
+ nfc_target.supported_protocols = NFC_PROTO_NFC_DEP_MASK;
+ rc = nfc_targets_found(dev->nfc_dev, &nfc_target, 1);
+ if (rc)
+ return 0;
+
+ dev->tgt_available_prots = 0;
+ }
+
+ dev->tgt_active_prot = NFC_PROTO_NFC_DEP;
+
+ /* ATR_RES general bytes are located at offset 17 */
+ target_gt_len = PN533_FRAME_CMD_PARAMS_LEN(dev->in_frame) - 17;
+ rc = nfc_set_remote_general_bytes(dev->nfc_dev,
+ resp->gt, target_gt_len);
+ if (rc == 0)
+ rc = nfc_dep_link_is_up(dev->nfc_dev,
+ dev->nfc_dev->targets[0].idx,
+ !cmd->active, NFC_RF_INITIATOR);
+
+ return 0;
+}
+
+static int pn533_dep_link_up(struct nfc_dev *nfc_dev, int target_idx,
+ u8 comm_mode, u8 rf_mode)
+{
+ struct pn533 *dev = nfc_get_drvdata(nfc_dev);
+ struct pn533_cmd_jump_dep *cmd;
+ u8 cmd_len, local_gt_len, *local_gt;
+ int rc;
+
+ nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+
+ if (rf_mode == NFC_RF_TARGET) {
+ nfc_dev_err(&dev->interface->dev, "Target mode not supported");
+ return -EOPNOTSUPP;
+ }
+
+
+ if (dev->poll_mod_count) {
+ nfc_dev_err(&dev->interface->dev,
+ "Cannot bring the DEP link up while polling");
+ return -EBUSY;
+ }
+
+ if (dev->tgt_active_prot) {
+ nfc_dev_err(&dev->interface->dev,
+ "There is already an active target");
+ return -EBUSY;
+ }
+
+ local_gt = nfc_get_local_general_bytes(dev->nfc_dev, &local_gt_len);
+ if (local_gt_len > NFC_MAX_GT_LEN)
+ return -EINVAL;
+
+ cmd_len = sizeof(struct pn533_cmd_jump_dep) + local_gt_len;
+ cmd = kzalloc(cmd_len, GFP_KERNEL);
+ if (cmd == NULL)
+ return -ENOMEM;
+
+ pn533_tx_frame_init(dev->out_frame, PN533_CMD_IN_JUMP_FOR_DEP);
+
+ cmd->active = !comm_mode;
+ cmd->baud = 0;
+ if (local_gt != NULL) {
+ cmd->next = 4; /* We have some Gi */
+ memcpy(cmd->gt, local_gt, local_gt_len);
+ } else {
+ cmd->next = 0;
+ }
+
+ memcpy(PN533_FRAME_CMD_PARAMS_PTR(dev->out_frame), cmd, cmd_len);
+ dev->out_frame->datalen += cmd_len;
+
+ pn533_tx_frame_finish(dev->out_frame);
+
+ rc = pn533_send_cmd_frame_async(dev, dev->out_frame, dev->in_frame,
+ dev->in_maxlen, pn533_in_dep_link_up_complete,
+ cmd, GFP_KERNEL);
+ if (rc)
+ goto out;
+
+
+out:
+ kfree(cmd);
+
+ return rc;
+}
+
+static int pn533_dep_link_down(struct nfc_dev *nfc_dev)
+{
+ pn533_deactivate_target(nfc_dev, 0);
+
+ return 0;
+}
+
#define PN533_CMD_DATAEXCH_HEAD_LEN (sizeof(struct pn533_frame) + 3)
#define PN533_CMD_DATAEXCH_DATA_MAXLEN 262
@@ -1339,7 +1501,7 @@ error:
return 0;
}
-int pn533_data_exchange(struct nfc_dev *nfc_dev, u32 target_idx,
+static int pn533_data_exchange(struct nfc_dev *nfc_dev, u32 target_idx,
struct sk_buff *skb,
data_exchange_cb_t cb,
void *cb_context)
@@ -1368,7 +1530,7 @@ int pn533_data_exchange(struct nfc_dev *nfc_dev, u32 target_idx,
PN533_CMD_DATAEXCH_DATA_MAXLEN +
PN533_FRAME_TAIL_SIZE;
- skb_resp = nfc_alloc_skb(skb_resp_len, GFP_KERNEL);
+ skb_resp = nfc_alloc_recv_skb(skb_resp_len, GFP_KERNEL);
if (!skb_resp) {
rc = -ENOMEM;
goto error;
@@ -1434,6 +1596,8 @@ static int pn533_set_configuration(struct pn533 *dev, u8 cfgitem, u8 *cfgdata,
struct nfc_ops pn533_nfc_ops = {
.dev_up = NULL,
.dev_down = NULL,
+ .dep_link_up = pn533_dep_link_up,
+ .dep_link_down = pn533_dep_link_down,
.start_poll = pn533_start_poll,
.stop_poll = pn533_stop_poll,
.activate_target = pn533_activate_target,
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 3ff22e32b60..9b6588ef067 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -17,14 +17,39 @@
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
+#include <linux/ctype.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/proc_fs.h>
+/**
+ * struct alias_prop - Alias property in 'aliases' node
+ * @link: List node to link the structure in aliases_lookup list
+ * @alias: Alias property name
+ * @np: Pointer to device_node that the alias stands for
+ * @id: Index value from end of alias name
+ * @stem: Alias string without the index
+ *
+ * The structure represents one alias property of 'aliases' node as
+ * an entry in aliases_lookup list.
+ */
+struct alias_prop {
+ struct list_head link;
+ const char *alias;
+ struct device_node *np;
+ int id;
+ char stem[0];
+};
+
+static LIST_HEAD(aliases_lookup);
+
struct device_node *allnodes;
struct device_node *of_chosen;
+struct device_node *of_aliases;
+
+static DEFINE_MUTEX(of_aliases_mutex);
/* use when traversing tree through the allnext, child, sibling,
* or parent members of struct device_node.
@@ -632,6 +657,35 @@ int of_property_read_u32_array(const struct device_node *np,
EXPORT_SYMBOL_GPL(of_property_read_u32_array);
/**
+ * of_property_read_u64 - Find and read a 64 bit integer from a property
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+ * @out_value: pointer to return value, modified only if return value is 0.
+ *
+ * Search for a property in a device node and read a 64-bit value from
+ * it. Returns 0 on success, -EINVAL if the property does not exist,
+ * -ENODATA if property does not have a value, and -EOVERFLOW if the
+ * property data isn't large enough.
+ *
+ * The out_value is modified only if a valid u64 value can be decoded.
+ */
+int of_property_read_u64(const struct device_node *np, const char *propname,
+ u64 *out_value)
+{
+ struct property *prop = of_find_property(np, propname, NULL);
+
+ if (!prop)
+ return -EINVAL;
+ if (!prop->value)
+ return -ENODATA;
+ if (sizeof(*out_value) > prop->length)
+ return -EOVERFLOW;
+ *out_value = of_read_number(prop->value, 2);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(of_property_read_u64);
+
+/**
* of_property_read_string - Find and read a string from a property
* @np: device node from which the property value is to be read.
* @propname: name of the property to be searched.
@@ -662,6 +716,90 @@ int of_property_read_string(struct device_node *np, const char *propname,
EXPORT_SYMBOL_GPL(of_property_read_string);
/**
+ * of_property_read_string_index - Find and read a string from a multiple
+ * strings property.
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+ * @index: index of the string in the list of strings
+ * @out_string: pointer to null terminated return string, modified only if
+ * return value is 0.
+ *
+ * Search for a property in a device tree node and retrieve a null
+ * terminated string value (pointer to data, not a copy) in the list of strings
+ * contained in that property.
+ * Returns 0 on success, -EINVAL if the property does not exist, -ENODATA if
+ * property does not have a value, and -EILSEQ if the string is not
+ * null-terminated within the length of the property data.
+ *
+ * The out_string pointer is modified only if a valid string can be decoded.
+ */
+int of_property_read_string_index(struct device_node *np, const char *propname,
+ int index, const char **output)
+{
+ struct property *prop = of_find_property(np, propname, NULL);
+ int i = 0;
+ size_t l = 0, total = 0;
+ const char *p;
+
+ if (!prop)
+ return -EINVAL;
+ if (!prop->value)
+ return -ENODATA;
+ if (strnlen(prop->value, prop->length) >= prop->length)
+ return -EILSEQ;
+
+ p = prop->value;
+
+ for (i = 0; total < prop->length; total += l, p += l) {
+ l = strlen(p) + 1;
+ if ((*p != 0) && (i++ == index)) {
+ *output = p;
+ return 0;
+ }
+ }
+ return -ENODATA;
+}
+EXPORT_SYMBOL_GPL(of_property_read_string_index);
+
+
+/**
+ * of_property_count_strings - Find and return the number of strings from a
+ * multiple strings property.
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+ *
+ * Search for a property in a device tree node and retrieve the number of null
+ * terminated string contain in it. Returns the number of strings on
+ * success, -EINVAL if the property does not exist, -ENODATA if property
+ * does not have a value, and -EILSEQ if the string is not null-terminated
+ * within the length of the property data.
+ */
+int of_property_count_strings(struct device_node *np, const char *propname)
+{
+ struct property *prop = of_find_property(np, propname, NULL);
+ int i = 0;
+ size_t l = 0, total = 0;
+ const char *p;
+
+ if (!prop)
+ return -EINVAL;
+ if (!prop->value)
+ return -ENODATA;
+ if (strnlen(prop->value, prop->length) >= prop->length)
+ return -EILSEQ;
+
+ p = prop->value;
+
+ for (i = 0; total < prop->length; total += l, p += l) {
+ l = strlen(p) + 1;
+ if (*p != 0)
+ i++;
+ }
+ return i;
+}
+EXPORT_SYMBOL_GPL(of_property_count_strings);
+
+/**
* of_parse_phandle - Resolve a phandle property to a device_node pointer
* @np: Pointer to device node holding phandle property
* @phandle_name: Name of property holding a phandle value
@@ -988,3 +1126,99 @@ out_unlock:
}
#endif /* defined(CONFIG_OF_DYNAMIC) */
+static void of_alias_add(struct alias_prop *ap, struct device_node *np,
+ int id, const char *stem, int stem_len)
+{
+ ap->np = np;
+ ap->id = id;
+ strncpy(ap->stem, stem, stem_len);
+ ap->stem[stem_len] = 0;
+ list_add_tail(&ap->link, &aliases_lookup);
+ pr_debug("adding DT alias:%s: stem=%s id=%i node=%s\n",
+ ap->alias, ap->stem, ap->id, np ? np->full_name : NULL);
+}
+
+/**
+ * of_alias_scan - Scan all properties of 'aliases' node
+ *
+ * The function scans all the properties of 'aliases' node and populate
+ * the the global lookup table with the properties. It returns the
+ * number of alias_prop found, or error code in error case.
+ *
+ * @dt_alloc: An allocator that provides a virtual address to memory
+ * for the resulting tree
+ */
+void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align))
+{
+ struct property *pp;
+
+ of_chosen = of_find_node_by_path("/chosen");
+ if (of_chosen == NULL)
+ of_chosen = of_find_node_by_path("/chosen@0");
+ of_aliases = of_find_node_by_path("/aliases");
+ if (!of_aliases)
+ return;
+
+ for_each_property(pp, of_aliases->properties) {
+ const char *start = pp->name;
+ const char *end = start + strlen(start);
+ struct device_node *np;
+ struct alias_prop *ap;
+ int id, len;
+
+ /* Skip those we do not want to proceed */
+ if (!strcmp(pp->name, "name") ||
+ !strcmp(pp->name, "phandle") ||
+ !strcmp(pp->name, "linux,phandle"))
+ continue;
+
+ np = of_find_node_by_path(pp->value);
+ if (!np)
+ continue;
+
+ /* walk the alias backwards to extract the id and work out
+ * the 'stem' string */
+ while (isdigit(*(end-1)) && end > start)
+ end--;
+ len = end - start;
+
+ if (kstrtoint(end, 10, &id) < 0)
+ continue;
+
+ /* Allocate an alias_prop with enough space for the stem */
+ ap = dt_alloc(sizeof(*ap) + len + 1, 4);
+ if (!ap)
+ continue;
+ ap->alias = start;
+ of_alias_add(ap, np, id, start, len);
+ }
+}
+
+/**
+ * of_alias_get_id - Get alias id for the given device_node
+ * @np: Pointer to the given device_node
+ * @stem: Alias stem of the given device_node
+ *
+ * The function travels the lookup table to get alias id for the given
+ * device_node and alias stem. It returns the alias id if find it.
+ */
+int of_alias_get_id(struct device_node *np, const char *stem)
+{
+ struct alias_prop *app;
+ int id = -ENODEV;
+
+ mutex_lock(&of_aliases_mutex);
+ list_for_each_entry(app, &aliases_lookup, link) {
+ if (strcmp(app->stem, stem) != 0)
+ continue;
+
+ if (np == app->np) {
+ id = app->id;
+ break;
+ }
+ }
+ mutex_unlock(&of_aliases_mutex);
+
+ return id;
+}
+EXPORT_SYMBOL_GPL(of_alias_get_id);
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 65200af29c5..fd85fa298e0 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -681,9 +681,14 @@ int __init early_init_dt_scan_chosen(unsigned long node, const char *uname,
if (p != NULL && l > 0)
strlcpy(data, p, min((int)l, COMMAND_LINE_SIZE));
+ /*
+ * CONFIG_CMDLINE is meant to be a default in case nothing else
+ * managed to set the command line, unless CONFIG_CMDLINE_FORCE
+ * is set in which case we override whatever was found earlier.
+ */
#ifdef CONFIG_CMDLINE
#ifndef CONFIG_CMDLINE_FORCE
- if (p == NULL || l == 0 || (l == 1 && (*p) == 0))
+ if (!((char *)data)[0])
#endif
strlcpy(data, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
#endif /* CONFIG_CMDLINE */
@@ -707,10 +712,8 @@ void __init unflatten_device_tree(void)
__unflatten_device_tree(initial_boot_params, &allnodes,
early_init_dt_alloc_memory_arch);
- /* Get pointer to OF "/chosen" node for use everywhere */
- of_chosen = of_find_node_by_path("/chosen");
- if (of_chosen == NULL)
- of_chosen = of_find_node_by_path("/chosen@0");
+ /* Get pointer to "/chosen" and "/aliasas" nodes for use everywhere */
+ of_alias_scan(early_init_dt_alloc_memory_arch);
}
#endif /* CONFIG_OF_EARLY_FLATTREE */
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 9f689f1da0f..6d3dd3988d0 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -19,10 +19,12 @@
*/
#include <linux/errno.h>
+#include <linux/list.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/string.h>
+#include <linux/slab.h>
/* For archs that don't support NO_IRQ (such as x86), provide a dummy value */
#ifndef NO_IRQ
@@ -58,27 +60,27 @@ EXPORT_SYMBOL_GPL(irq_of_parse_and_map);
*/
struct device_node *of_irq_find_parent(struct device_node *child)
{
- struct device_node *p;
+ struct device_node *p, *c = child;
const __be32 *parp;
- if (!of_node_get(child))
+ if (!of_node_get(c))
return NULL;
do {
- parp = of_get_property(child, "interrupt-parent", NULL);
+ parp = of_get_property(c, "interrupt-parent", NULL);
if (parp == NULL)
- p = of_get_parent(child);
+ p = of_get_parent(c);
else {
if (of_irq_workarounds & OF_IMAP_NO_PHANDLE)
p = of_node_get(of_irq_dflt_pic);
else
p = of_find_node_by_phandle(be32_to_cpup(parp));
}
- of_node_put(child);
- child = p;
+ of_node_put(c);
+ c = p;
} while (p && of_get_property(p, "#interrupt-cells", NULL) == NULL);
- return p;
+ return (p == child) ? NULL : p;
}
/**
@@ -386,3 +388,108 @@ int of_irq_to_resource_table(struct device_node *dev, struct resource *res,
return i;
}
+
+struct intc_desc {
+ struct list_head list;
+ struct device_node *dev;
+ struct device_node *interrupt_parent;
+};
+
+/**
+ * of_irq_init - Scan and init matching interrupt controllers in DT
+ * @matches: 0 terminated array of nodes to match and init function to call
+ *
+ * This function scans the device tree for matching interrupt controller nodes,
+ * and calls their initialization functions in order with parents first.
+ */
+void __init of_irq_init(const struct of_device_id *matches)
+{
+ struct device_node *np, *parent = NULL;
+ struct intc_desc *desc, *temp_desc;
+ struct list_head intc_desc_list, intc_parent_list;
+
+ INIT_LIST_HEAD(&intc_desc_list);
+ INIT_LIST_HEAD(&intc_parent_list);
+
+ for_each_matching_node(np, matches) {
+ if (!of_find_property(np, "interrupt-controller", NULL))
+ continue;
+ /*
+ * Here, we allocate and populate an intc_desc with the node
+ * pointer, interrupt-parent device_node etc.
+ */
+ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+ if (WARN_ON(!desc))
+ goto err;
+
+ desc->dev = np;
+ desc->interrupt_parent = of_irq_find_parent(np);
+ list_add_tail(&desc->list, &intc_desc_list);
+ }
+
+ /*
+ * The root irq controller is the one without an interrupt-parent.
+ * That one goes first, followed by the controllers that reference it,
+ * followed by the ones that reference the 2nd level controllers, etc.
+ */
+ while (!list_empty(&intc_desc_list)) {
+ /*
+ * Process all controllers with the current 'parent'.
+ * First pass will be looking for NULL as the parent.
+ * The assumption is that NULL parent means a root controller.
+ */
+ list_for_each_entry_safe(desc, temp_desc, &intc_desc_list, list) {
+ const struct of_device_id *match;
+ int ret;
+ of_irq_init_cb_t irq_init_cb;
+
+ if (desc->interrupt_parent != parent)
+ continue;
+
+ list_del(&desc->list);
+ match = of_match_node(matches, desc->dev);
+ if (WARN(!match->data,
+ "of_irq_init: no init function for %s\n",
+ match->compatible)) {
+ kfree(desc);
+ continue;
+ }
+
+ pr_debug("of_irq_init: init %s @ %p, parent %p\n",
+ match->compatible,
+ desc->dev, desc->interrupt_parent);
+ irq_init_cb = match->data;
+ ret = irq_init_cb(desc->dev, desc->interrupt_parent);
+ if (ret) {
+ kfree(desc);
+ continue;
+ }
+
+ /*
+ * This one is now set up; add it to the parent list so
+ * its children can get processed in a subsequent pass.
+ */
+ list_add_tail(&desc->list, &intc_parent_list);
+ }
+
+ /* Get the next pending parent that might have children */
+ desc = list_first_entry(&intc_parent_list, typeof(*desc), list);
+ if (list_empty(&intc_parent_list) || !desc) {
+ pr_err("of_irq_init: children remain, but no parents\n");
+ break;
+ }
+ list_del(&desc->list);
+ parent = desc->dev;
+ kfree(desc);
+ }
+
+ list_for_each_entry_safe(desc, temp_desc, &intc_parent_list, list) {
+ list_del(&desc->list);
+ kfree(desc);
+ }
+err:
+ list_for_each_entry_safe(desc, temp_desc, &intc_desc_list, list) {
+ list_del(&desc->list);
+ kfree(desc);
+ }
+}
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index d35e300b0ad..980c079e444 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -83,7 +83,6 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
addr);
continue;
}
- phy_scan_fixups(phy);
/* Associate the OF node with the device structure so it
* can be looked up later */
diff --git a/drivers/of/of_net.c b/drivers/of/of_net.c
index bb184717588..ffab033d207 100644
--- a/drivers/of/of_net.c
+++ b/drivers/of/of_net.c
@@ -9,6 +9,7 @@
#include <linux/kernel.h>
#include <linux/of_net.h>
#include <linux/phy.h>
+#include <linux/export.h>
/**
* It maps 'enum phy_interface_t' found in include/linux/phy.h
diff --git a/drivers/of/of_pci.c b/drivers/of/of_pci.c
index 3701b62c1d5..13e37e2d8ec 100644
--- a/drivers/of/of_pci.c
+++ b/drivers/of/of_pci.c
@@ -1,4 +1,5 @@
#include <linux/kernel.h>
+#include <linux/export.h>
#include <linux/of.h>
#include <linux/of_pci.h>
#include <asm/prom.h>
diff --git a/drivers/of/of_pci_irq.c b/drivers/of/of_pci_irq.c
index ac1ec54e4fd..93125163dea 100644
--- a/drivers/of/of_pci_irq.c
+++ b/drivers/of/of_pci_irq.c
@@ -1,6 +1,7 @@
#include <linux/kernel.h>
#include <linux/of_pci.h>
#include <linux/of_irq.h>
+#include <linux/export.h>
#include <asm/prom.h>
/**
diff --git a/drivers/of/of_spi.c b/drivers/of/of_spi.c
index 1dbce58a58b..6dbc074e487 100644
--- a/drivers/of/of_spi.c
+++ b/drivers/of/of_spi.c
@@ -6,6 +6,7 @@
* tree.
*/
+#include <linux/module.h>
#include <linux/of.h>
#include <linux/device.h>
#include <linux/spi/spi.h>
diff --git a/drivers/of/pdt.c b/drivers/of/pdt.c
index 4d87b5dc928..bc5b3990f6e 100644
--- a/drivers/of/pdt.c
+++ b/drivers/of/pdt.c
@@ -229,6 +229,11 @@ static struct device_node * __init of_pdt_build_tree(struct device_node *parent,
return ret;
}
+static void *kernel_tree_alloc(u64 size, u64 align)
+{
+ return prom_early_alloc(size);
+}
+
void __init of_pdt_build_devicetree(phandle root_node, struct of_pdt_ops *ops)
{
struct device_node **nextp;
@@ -245,4 +250,7 @@ void __init of_pdt_build_devicetree(phandle root_node, struct of_pdt_ops *ops)
nextp = &allnodes->allnext;
allnodes->child = of_pdt_build_tree(allnodes,
of_pdt_prom_ops->getchild(allnodes->phandle), &nextp);
+
+ /* Get pointer to "/chosen" and "/aliasas" nodes for use everywhere */
+ of_alias_scan(kernel_tree_alloc);
}
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index ed5a6d3c26a..cbd5d701c7e 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -310,18 +310,21 @@ static const struct of_dev_auxdata *of_dev_lookup(const struct of_dev_auxdata *l
struct device_node *np)
{
struct resource res;
- if (lookup) {
- for(; lookup->name != NULL; lookup++) {
- if (!of_device_is_compatible(np, lookup->compatible))
- continue;
- if (of_address_to_resource(np, 0, &res))
- continue;
- if (res.start != lookup->phys_addr)
- continue;
- pr_debug("%s: devname=%s\n", np->full_name, lookup->name);
- return lookup;
- }
+
+ if (!lookup)
+ return NULL;
+
+ for(; lookup->name != NULL; lookup++) {
+ if (!of_device_is_compatible(np, lookup->compatible))
+ continue;
+ if (of_address_to_resource(np, 0, &res))
+ continue;
+ if (res.start != lookup->phys_addr)
+ continue;
+ pr_debug("%s: devname=%s\n", np->full_name, lookup->name);
+ return lookup;
}
+
return NULL;
}
@@ -329,8 +332,9 @@ static const struct of_dev_auxdata *of_dev_lookup(const struct of_dev_auxdata *l
* of_platform_bus_create() - Create a device for a node and its children.
* @bus: device node of the bus to instantiate
* @matches: match table for bus nodes
- * disallow recursive creation of child buses
+ * @lookup: auxdata table for matching id and platform_data with device nodes
* @parent: parent for new device, or NULL for top level.
+ * @strict: require compatible property
*
* Creates a platform_device for the provided device_node, and optionally
* recursively create devices for all the child nodes.
diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c
index 75a80e46b39..8b490d77054 100644
--- a/drivers/parisc/ccio-dma.c
+++ b/drivers/parisc/ccio-dma.c
@@ -44,6 +44,7 @@
#include <linux/seq_file.h>
#include <linux/scatterlist.h>
#include <linux/iommu-helper.h>
+#include <linux/export.h>
#include <asm/byteorder.h>
#include <asm/cache.h> /* for L1_CACHE_BYTES */
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c
index a6f762188bc..8644d5372e7 100644
--- a/drivers/parisc/sba_iommu.c
+++ b/drivers/parisc/sba_iommu.c
@@ -39,6 +39,7 @@
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
+#include <linux/module.h>
#include <asm/ropes.h>
#include <asm/mckinley.h> /* for proc_mckinley_root */
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 0fa466a91bf..b6f9749b4fa 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -51,17 +51,6 @@ config XEN_PCIDEV_FRONTEND
The PCI device frontend driver allows the kernel to import arbitrary
PCI devices from a PCI backend to support PCI driver domains.
-config XEN_PCIDEV_FE_DEBUG
- bool "Xen PCI Frontend debugging"
- depends on XEN_PCIDEV_FRONTEND && PCI_DEBUG
- help
- Say Y here if you want the Xen PCI frontend to produce a bunch of debug
- messages to the system log. Select this if you are having a
- problem with Xen PCI frontend support and want to see more of what is
- going on.
-
- When in doubt, say N.
-
config HT_IRQ
bool "Interrupts on hypertransport devices"
default y
@@ -71,9 +60,13 @@ config HT_IRQ
If unsure say Y.
+config PCI_ATS
+ bool
+
config PCI_IOV
bool "PCI IOV support"
depends on PCI
+ select PCI_ATS
help
I/O Virtualization is a PCI feature supported by some devices
which allows them to create virtual devices which share their
@@ -81,6 +74,28 @@ config PCI_IOV
If unsure, say N.
+config PCI_PRI
+ bool "PCI PRI support"
+ select PCI_ATS
+ help
+ PRI is the PCI Page Request Interface. It allows PCI devices that are
+ behind an IOMMU to recover from page faults.
+
+ If unsure, say N.
+
+config PCI_PASID
+ bool "PCI PASID support"
+ depends on PCI
+ select PCI_ATS
+ help
+ Process Address Space Identifiers (PASIDs) can be used by PCI devices
+ to access more than one IO address space at the same time. To make
+ use of this feature an IOMMU is required which also supports PASIDs.
+ Select this option if you have such an IOMMU and want to compile the
+ driver for it into your kernel.
+
+ If unsure, say N.
+
config PCI_IOAPIC
bool
depends on PCI
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 6fadae3ad13..083a49fee56 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -29,6 +29,7 @@ obj-$(CONFIG_PCI_MSI) += msi.o
# Build the Hypertransport interrupt support
obj-$(CONFIG_HT_IRQ) += htirq.o
+obj-$(CONFIG_PCI_ATS) += ats.o
obj-$(CONFIG_PCI_IOV) += iov.o
#
diff --git a/drivers/pci/ats.c b/drivers/pci/ats.c
new file mode 100644
index 00000000000..7ec56fb0bd7
--- /dev/null
+++ b/drivers/pci/ats.c
@@ -0,0 +1,439 @@
+/*
+ * drivers/pci/ats.c
+ *
+ * Copyright (C) 2009 Intel Corporation, Yu Zhao <yu.zhao@intel.com>
+ * Copyright (C) 2011 Advanced Micro Devices,
+ *
+ * PCI Express I/O Virtualization (IOV) support.
+ * Address Translation Service 1.0
+ * Page Request Interface added by Joerg Roedel <joerg.roedel@amd.com>
+ * PASID support added by Joerg Roedel <joerg.roedel@amd.com>
+ */
+
+#include <linux/export.h>
+#include <linux/pci-ats.h>
+#include <linux/pci.h>
+
+#include "pci.h"
+
+static int ats_alloc_one(struct pci_dev *dev, int ps)
+{
+ int pos;
+ u16 cap;
+ struct pci_ats *ats;
+
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ATS);
+ if (!pos)
+ return -ENODEV;
+
+ ats = kzalloc(sizeof(*ats), GFP_KERNEL);
+ if (!ats)
+ return -ENOMEM;
+
+ ats->pos = pos;
+ ats->stu = ps;
+ pci_read_config_word(dev, pos + PCI_ATS_CAP, &cap);
+ ats->qdep = PCI_ATS_CAP_QDEP(cap) ? PCI_ATS_CAP_QDEP(cap) :
+ PCI_ATS_MAX_QDEP;
+ dev->ats = ats;
+
+ return 0;
+}
+
+static void ats_free_one(struct pci_dev *dev)
+{
+ kfree(dev->ats);
+ dev->ats = NULL;
+}
+
+/**
+ * pci_enable_ats - enable the ATS capability
+ * @dev: the PCI device
+ * @ps: the IOMMU page shift
+ *
+ * Returns 0 on success, or negative on failure.
+ */
+int pci_enable_ats(struct pci_dev *dev, int ps)
+{
+ int rc;
+ u16 ctrl;
+
+ BUG_ON(dev->ats && dev->ats->is_enabled);
+
+ if (ps < PCI_ATS_MIN_STU)
+ return -EINVAL;
+
+ if (dev->is_physfn || dev->is_virtfn) {
+ struct pci_dev *pdev = dev->is_physfn ? dev : dev->physfn;
+
+ mutex_lock(&pdev->sriov->lock);
+ if (pdev->ats)
+ rc = pdev->ats->stu == ps ? 0 : -EINVAL;
+ else
+ rc = ats_alloc_one(pdev, ps);
+
+ if (!rc)
+ pdev->ats->ref_cnt++;
+ mutex_unlock(&pdev->sriov->lock);
+ if (rc)
+ return rc;
+ }
+
+ if (!dev->is_physfn) {
+ rc = ats_alloc_one(dev, ps);
+ if (rc)
+ return rc;
+ }
+
+ ctrl = PCI_ATS_CTRL_ENABLE;
+ if (!dev->is_virtfn)
+ ctrl |= PCI_ATS_CTRL_STU(ps - PCI_ATS_MIN_STU);
+ pci_write_config_word(dev, dev->ats->pos + PCI_ATS_CTRL, ctrl);
+
+ dev->ats->is_enabled = 1;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pci_enable_ats);
+
+/**
+ * pci_disable_ats - disable the ATS capability
+ * @dev: the PCI device
+ */
+void pci_disable_ats(struct pci_dev *dev)
+{
+ u16 ctrl;
+
+ BUG_ON(!dev->ats || !dev->ats->is_enabled);
+
+ pci_read_config_word(dev, dev->ats->pos + PCI_ATS_CTRL, &ctrl);
+ ctrl &= ~PCI_ATS_CTRL_ENABLE;
+ pci_write_config_word(dev, dev->ats->pos + PCI_ATS_CTRL, ctrl);
+
+ dev->ats->is_enabled = 0;
+
+ if (dev->is_physfn || dev->is_virtfn) {
+ struct pci_dev *pdev = dev->is_physfn ? dev : dev->physfn;
+
+ mutex_lock(&pdev->sriov->lock);
+ pdev->ats->ref_cnt--;
+ if (!pdev->ats->ref_cnt)
+ ats_free_one(pdev);
+ mutex_unlock(&pdev->sriov->lock);
+ }
+
+ if (!dev->is_physfn)
+ ats_free_one(dev);
+}
+EXPORT_SYMBOL_GPL(pci_disable_ats);
+
+/**
+ * pci_ats_queue_depth - query the ATS Invalidate Queue Depth
+ * @dev: the PCI device
+ *
+ * Returns the queue depth on success, or negative on failure.
+ *
+ * The ATS spec uses 0 in the Invalidate Queue Depth field to
+ * indicate that the function can accept 32 Invalidate Request.
+ * But here we use the `real' values (i.e. 1~32) for the Queue
+ * Depth; and 0 indicates the function shares the Queue with
+ * other functions (doesn't exclusively own a Queue).
+ */
+int pci_ats_queue_depth(struct pci_dev *dev)
+{
+ int pos;
+ u16 cap;
+
+ if (dev->is_virtfn)
+ return 0;
+
+ if (dev->ats)
+ return dev->ats->qdep;
+
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ATS);
+ if (!pos)
+ return -ENODEV;
+
+ pci_read_config_word(dev, pos + PCI_ATS_CAP, &cap);
+
+ return PCI_ATS_CAP_QDEP(cap) ? PCI_ATS_CAP_QDEP(cap) :
+ PCI_ATS_MAX_QDEP;
+}
+EXPORT_SYMBOL_GPL(pci_ats_queue_depth);
+
+#ifdef CONFIG_PCI_PRI
+/**
+ * pci_enable_pri - Enable PRI capability
+ * @ pdev: PCI device structure
+ *
+ * Returns 0 on success, negative value on error
+ */
+int pci_enable_pri(struct pci_dev *pdev, u32 reqs)
+{
+ u16 control, status;
+ u32 max_requests;
+ int pos;
+
+ pos = pci_find_ext_capability(pdev, PCI_PRI_CAP);
+ if (!pos)
+ return -EINVAL;
+
+ pci_read_config_word(pdev, pos + PCI_PRI_CONTROL_OFF, &control);
+ pci_read_config_word(pdev, pos + PCI_PRI_STATUS_OFF, &status);
+ if ((control & PCI_PRI_ENABLE) || !(status & PCI_PRI_STATUS_STOPPED))
+ return -EBUSY;
+
+ pci_read_config_dword(pdev, pos + PCI_PRI_MAX_REQ_OFF, &max_requests);
+ reqs = min(max_requests, reqs);
+ pci_write_config_dword(pdev, pos + PCI_PRI_ALLOC_REQ_OFF, reqs);
+
+ control |= PCI_PRI_ENABLE;
+ pci_write_config_word(pdev, pos + PCI_PRI_CONTROL_OFF, control);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pci_enable_pri);
+
+/**
+ * pci_disable_pri - Disable PRI capability
+ * @pdev: PCI device structure
+ *
+ * Only clears the enabled-bit, regardless of its former value
+ */
+void pci_disable_pri(struct pci_dev *pdev)
+{
+ u16 control;
+ int pos;
+
+ pos = pci_find_ext_capability(pdev, PCI_PRI_CAP);
+ if (!pos)
+ return;
+
+ pci_read_config_word(pdev, pos + PCI_PRI_CONTROL_OFF, &control);
+ control &= ~PCI_PRI_ENABLE;
+ pci_write_config_word(pdev, pos + PCI_PRI_CONTROL_OFF, control);
+}
+EXPORT_SYMBOL_GPL(pci_disable_pri);
+
+/**
+ * pci_pri_enabled - Checks if PRI capability is enabled
+ * @pdev: PCI device structure
+ *
+ * Returns true if PRI is enabled on the device, false otherwise
+ */
+bool pci_pri_enabled(struct pci_dev *pdev)
+{
+ u16 control;
+ int pos;
+
+ pos = pci_find_ext_capability(pdev, PCI_PRI_CAP);
+ if (!pos)
+ return false;
+
+ pci_read_config_word(pdev, pos + PCI_PRI_CONTROL_OFF, &control);
+
+ return (control & PCI_PRI_ENABLE) ? true : false;
+}
+EXPORT_SYMBOL_GPL(pci_pri_enabled);
+
+/**
+ * pci_reset_pri - Resets device's PRI state
+ * @pdev: PCI device structure
+ *
+ * The PRI capability must be disabled before this function is called.
+ * Returns 0 on success, negative value on error.
+ */
+int pci_reset_pri(struct pci_dev *pdev)
+{
+ u16 control;
+ int pos;
+
+ pos = pci_find_ext_capability(pdev, PCI_PRI_CAP);
+ if (!pos)
+ return -EINVAL;
+
+ pci_read_config_word(pdev, pos + PCI_PRI_CONTROL_OFF, &control);
+ if (control & PCI_PRI_ENABLE)
+ return -EBUSY;
+
+ control |= PCI_PRI_RESET;
+
+ pci_write_config_word(pdev, pos + PCI_PRI_CONTROL_OFF, control);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pci_reset_pri);
+
+/**
+ * pci_pri_stopped - Checks whether the PRI capability is stopped
+ * @pdev: PCI device structure
+ *
+ * Returns true if the PRI capability on the device is disabled and the
+ * device has no outstanding PRI requests, false otherwise. The device
+ * indicates this via the STOPPED bit in the status register of the
+ * capability.
+ * The device internal state can be cleared by resetting the PRI state
+ * with pci_reset_pri(). This can force the capability into the STOPPED
+ * state.
+ */
+bool pci_pri_stopped(struct pci_dev *pdev)
+{
+ u16 control, status;
+ int pos;
+
+ pos = pci_find_ext_capability(pdev, PCI_PRI_CAP);
+ if (!pos)
+ return true;
+
+ pci_read_config_word(pdev, pos + PCI_PRI_CONTROL_OFF, &control);
+ pci_read_config_word(pdev, pos + PCI_PRI_STATUS_OFF, &status);
+
+ if (control & PCI_PRI_ENABLE)
+ return false;
+
+ return (status & PCI_PRI_STATUS_STOPPED) ? true : false;
+}
+EXPORT_SYMBOL_GPL(pci_pri_stopped);
+
+/**
+ * pci_pri_status - Request PRI status of a device
+ * @pdev: PCI device structure
+ *
+ * Returns negative value on failure, status on success. The status can
+ * be checked against status-bits. Supported bits are currently:
+ * PCI_PRI_STATUS_RF: Response failure
+ * PCI_PRI_STATUS_UPRGI: Unexpected Page Request Group Index
+ * PCI_PRI_STATUS_STOPPED: PRI has stopped
+ */
+int pci_pri_status(struct pci_dev *pdev)
+{
+ u16 status, control;
+ int pos;
+
+ pos = pci_find_ext_capability(pdev, PCI_PRI_CAP);
+ if (!pos)
+ return -EINVAL;
+
+ pci_read_config_word(pdev, pos + PCI_PRI_CONTROL_OFF, &control);
+ pci_read_config_word(pdev, pos + PCI_PRI_STATUS_OFF, &status);
+
+ /* Stopped bit is undefined when enable == 1, so clear it */
+ if (control & PCI_PRI_ENABLE)
+ status &= ~PCI_PRI_STATUS_STOPPED;
+
+ return status;
+}
+EXPORT_SYMBOL_GPL(pci_pri_status);
+#endif /* CONFIG_PCI_PRI */
+
+#ifdef CONFIG_PCI_PASID
+/**
+ * pci_enable_pasid - Enable the PASID capability
+ * @pdev: PCI device structure
+ * @features: Features to enable
+ *
+ * Returns 0 on success, negative value on error. This function checks
+ * whether the features are actually supported by the device and returns
+ * an error if not.
+ */
+int pci_enable_pasid(struct pci_dev *pdev, int features)
+{
+ u16 control, supported;
+ int pos;
+
+ pos = pci_find_ext_capability(pdev, PCI_PASID_CAP);
+ if (!pos)
+ return -EINVAL;
+
+ pci_read_config_word(pdev, pos + PCI_PASID_CONTROL_OFF, &control);
+ pci_read_config_word(pdev, pos + PCI_PASID_CAP_OFF, &supported);
+
+ if (!(supported & PCI_PASID_ENABLE))
+ return -EINVAL;
+
+ supported &= PCI_PASID_EXEC | PCI_PASID_PRIV;
+
+ /* User wants to enable anything unsupported? */
+ if ((supported & features) != features)
+ return -EINVAL;
+
+ control = PCI_PASID_ENABLE | features;
+
+ pci_write_config_word(pdev, pos + PCI_PASID_CONTROL_OFF, control);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pci_enable_pasid);
+
+/**
+ * pci_disable_pasid - Disable the PASID capability
+ * @pdev: PCI device structure
+ *
+ */
+void pci_disable_pasid(struct pci_dev *pdev)
+{
+ u16 control = 0;
+ int pos;
+
+ pos = pci_find_ext_capability(pdev, PCI_PASID_CAP);
+ if (!pos)
+ return;
+
+ pci_write_config_word(pdev, pos + PCI_PASID_CONTROL_OFF, control);
+}
+EXPORT_SYMBOL_GPL(pci_disable_pasid);
+
+/**
+ * pci_pasid_features - Check which PASID features are supported
+ * @pdev: PCI device structure
+ *
+ * Returns a negative value when no PASI capability is present.
+ * Otherwise is returns a bitmask with supported features. Current
+ * features reported are:
+ * PCI_PASID_ENABLE - PASID capability can be enabled
+ * PCI_PASID_EXEC - Execute permission supported
+ * PCI_PASID_PRIV - Priviledged mode supported
+ */
+int pci_pasid_features(struct pci_dev *pdev)
+{
+ u16 supported;
+ int pos;
+
+ pos = pci_find_ext_capability(pdev, PCI_PASID_CAP);
+ if (!pos)
+ return -EINVAL;
+
+ pci_read_config_word(pdev, pos + PCI_PASID_CAP_OFF, &supported);
+
+ supported &= PCI_PASID_ENABLE | PCI_PASID_EXEC | PCI_PASID_PRIV;
+
+ return supported;
+}
+EXPORT_SYMBOL_GPL(pci_pasid_features);
+
+#define PASID_NUMBER_SHIFT 8
+#define PASID_NUMBER_MASK (0x1f << PASID_NUMBER_SHIFT)
+/**
+ * pci_max_pasid - Get maximum number of PASIDs supported by device
+ * @pdev: PCI device structure
+ *
+ * Returns negative value when PASID capability is not present.
+ * Otherwise it returns the numer of supported PASIDs.
+ */
+int pci_max_pasids(struct pci_dev *pdev)
+{
+ u16 supported;
+ int pos;
+
+ pos = pci_find_ext_capability(pdev, PCI_PASID_CAP);
+ if (!pos)
+ return -EINVAL;
+
+ pci_read_config_word(pdev, pos + PCI_PASID_CAP_OFF, &supported);
+
+ supported = (supported & PASID_NUMBER_MASK) >> PASID_NUMBER_SHIFT;
+
+ return (1 << supported);
+}
+EXPORT_SYMBOL_GPL(pci_max_pasids);
+#endif /* CONFIG_PCI_PASID */
diff --git a/drivers/pci/hotplug-pci.c b/drivers/pci/hotplug-pci.c
index 4d4a6447840..d3509cdeb55 100644
--- a/drivers/pci/hotplug-pci.c
+++ b/drivers/pci/hotplug-pci.c
@@ -1,6 +1,7 @@
/* Core PCI functionality used only by PCI hotplug */
#include <linux/pci.h>
+#include <linux/export.h>
#include "pci.h"
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index 220285760b6..596172b4ae9 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -48,6 +48,7 @@
#include <linux/pci-acpi.h>
#include <linux/mutex.h>
#include <linux/slab.h>
+#include <linux/acpi.h>
#include "../pci.h"
#include "acpiphp.h"
@@ -1149,15 +1150,35 @@ check_sub_bridges(acpi_handle handle, u32 lvl, void *context, void **rv)
return AE_OK ;
}
-/**
- * handle_hotplug_event_bridge - handle ACPI event on bridges
- * @handle: Notify()'ed acpi_handle
- * @type: Notify code
- * @context: pointer to acpiphp_bridge structure
- *
- * Handles ACPI event notification on {host,p2p} bridges.
- */
-static void handle_hotplug_event_bridge(acpi_handle handle, u32 type, void *context)
+struct acpiphp_hp_work {
+ struct work_struct work;
+ acpi_handle handle;
+ u32 type;
+ void *context;
+};
+
+static void alloc_acpiphp_hp_work(acpi_handle handle, u32 type,
+ void *context,
+ void (*func)(struct work_struct *work))
+{
+ struct acpiphp_hp_work *hp_work;
+ int ret;
+
+ hp_work = kmalloc(sizeof(*hp_work), GFP_KERNEL);
+ if (!hp_work)
+ return;
+
+ hp_work->handle = handle;
+ hp_work->type = type;
+ hp_work->context = context;
+
+ INIT_WORK(&hp_work->work, func);
+ ret = queue_work(kacpi_hotplug_wq, &hp_work->work);
+ if (!ret)
+ kfree(hp_work);
+}
+
+static void _handle_hotplug_event_bridge(struct work_struct *work)
{
struct acpiphp_bridge *bridge;
char objname[64];
@@ -1165,11 +1186,18 @@ static void handle_hotplug_event_bridge(acpi_handle handle, u32 type, void *cont
.pointer = objname };
struct acpi_device *device;
int num_sub_bridges = 0;
+ struct acpiphp_hp_work *hp_work;
+ acpi_handle handle;
+ u32 type;
+
+ hp_work = container_of(work, struct acpiphp_hp_work, work);
+ handle = hp_work->handle;
+ type = hp_work->type;
if (acpi_bus_get_device(handle, &device)) {
/* This bridge must have just been physically inserted */
handle_bridge_insertion(handle, type);
- return;
+ goto out;
}
bridge = acpiphp_handle_to_bridge(handle);
@@ -1180,7 +1208,7 @@ static void handle_hotplug_event_bridge(acpi_handle handle, u32 type, void *cont
if (!bridge && !num_sub_bridges) {
err("cannot get bridge info\n");
- return;
+ goto out;
}
acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
@@ -1241,22 +1269,49 @@ static void handle_hotplug_event_bridge(acpi_handle handle, u32 type, void *cont
warn("notify_handler: unknown event type 0x%x for %s\n", type, objname);
break;
}
+
+out:
+ kfree(hp_work); /* allocated in handle_hotplug_event_bridge */
}
/**
- * handle_hotplug_event_func - handle ACPI event on functions (i.e. slots)
+ * handle_hotplug_event_bridge - handle ACPI event on bridges
* @handle: Notify()'ed acpi_handle
* @type: Notify code
- * @context: pointer to acpiphp_func structure
+ * @context: pointer to acpiphp_bridge structure
*
- * Handles ACPI event notification on slots.
+ * Handles ACPI event notification on {host,p2p} bridges.
*/
-static void handle_hotplug_event_func(acpi_handle handle, u32 type, void *context)
+static void handle_hotplug_event_bridge(acpi_handle handle, u32 type,
+ void *context)
+{
+ /*
+ * Currently the code adds all hotplug events to the kacpid_wq
+ * queue when it should add hotplug events to the kacpi_hotplug_wq.
+ * The proper way to fix this is to reorganize the code so that
+ * drivers (dock, etc.) do not call acpi_os_execute(), etc.
+ * For now just re-add this work to the kacpi_hotplug_wq so we
+ * don't deadlock on hotplug actions.
+ */
+ alloc_acpiphp_hp_work(handle, type, context,
+ _handle_hotplug_event_bridge);
+}
+
+static void _handle_hotplug_event_func(struct work_struct *work)
{
struct acpiphp_func *func;
char objname[64];
struct acpi_buffer buffer = { .length = sizeof(objname),
.pointer = objname };
+ struct acpiphp_hp_work *hp_work;
+ acpi_handle handle;
+ u32 type;
+ void *context;
+
+ hp_work = container_of(work, struct acpiphp_hp_work, work);
+ handle = hp_work->handle;
+ type = hp_work->type;
+ context = hp_work->context;
acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
@@ -1291,8 +1346,32 @@ static void handle_hotplug_event_func(acpi_handle handle, u32 type, void *contex
warn("notify_handler: unknown event type 0x%x for %s\n", type, objname);
break;
}
+
+ kfree(hp_work); /* allocated in handle_hotplug_event_func */
}
+/**
+ * handle_hotplug_event_func - handle ACPI event on functions (i.e. slots)
+ * @handle: Notify()'ed acpi_handle
+ * @type: Notify code
+ * @context: pointer to acpiphp_func structure
+ *
+ * Handles ACPI event notification on slots.
+ */
+static void handle_hotplug_event_func(acpi_handle handle, u32 type,
+ void *context)
+{
+ /*
+ * Currently the code adds all hotplug events to the kacpid_wq
+ * queue when it should add hotplug events to the kacpi_hotplug_wq.
+ * The proper way to fix this is to reorganize the code so that
+ * drivers (dock, etc.) do not call acpi_os_execute(), etc.
+ * For now just re-add this work to the kacpi_hotplug_wq so we
+ * don't deadlock on hotplug actions.
+ */
+ alloc_acpiphp_hp_work(handle, type, context,
+ _handle_hotplug_event_func);
+}
static acpi_status
find_root_bridges(acpi_handle handle, u32 lvl, void *context, void **rv)
diff --git a/drivers/pci/hotplug/pciehp_acpi.c b/drivers/pci/hotplug/pciehp_acpi.c
index 5f7226223a6..376d70d1717 100644
--- a/drivers/pci/hotplug/pciehp_acpi.c
+++ b/drivers/pci/hotplug/pciehp_acpi.c
@@ -27,6 +27,7 @@
#include <linux/pci.h>
#include <linux/pci_hotplug.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include "pciehp.h"
#define PCIEHP_DETECT_PCIE (0)
diff --git a/drivers/pci/hotplug/pcihp_slot.c b/drivers/pci/hotplug/pcihp_slot.c
index 3ffd9c1acc0..8c05a18c977 100644
--- a/drivers/pci/hotplug/pcihp_slot.c
+++ b/drivers/pci/hotplug/pcihp_slot.c
@@ -24,6 +24,7 @@
*/
#include <linux/pci.h>
+#include <linux/export.h>
#include <linux/pci_hotplug.h>
static struct hpp_type0 pci_default_type0 = {
diff --git a/drivers/pci/hotplug/rpadlpar_core.c b/drivers/pci/hotplug/rpadlpar_core.c
index 1d002b1c2bf..c56a9413e1a 100644
--- a/drivers/pci/hotplug/rpadlpar_core.c
+++ b/drivers/pci/hotplug/rpadlpar_core.c
@@ -18,6 +18,7 @@
#undef DEBUG
#include <linux/init.h>
+#include <linux/module.h>
#include <linux/pci.h>
#include <linux/string.h>
#include <linux/vmalloc.h>
diff --git a/drivers/pci/htirq.c b/drivers/pci/htirq.c
index db057b6fe0c..6e373ea57b3 100644
--- a/drivers/pci/htirq.c
+++ b/drivers/pci/htirq.c
@@ -9,6 +9,7 @@
#include <linux/irq.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
+#include <linux/export.h>
#include <linux/slab.h>
#include <linux/htirq.h>
diff --git a/drivers/pci/ioapic.c b/drivers/pci/ioapic.c
index 203508b227b..5775638ac01 100644
--- a/drivers/pci/ioapic.c
+++ b/drivers/pci/ioapic.c
@@ -17,6 +17,7 @@
*/
#include <linux/pci.h>
+#include <linux/export.h>
#include <linux/acpi.h>
#include <linux/slab.h>
#include <acpi/acpi_bus.h>
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index 42fae477651..b82c155d7b3 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -11,6 +11,7 @@
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/mutex.h>
+#include <linux/export.h>
#include <linux/string.h>
#include <linux/delay.h>
#include <linux/pci-ats.h>
@@ -722,145 +723,3 @@ int pci_num_vf(struct pci_dev *dev)
return dev->sriov->nr_virtfn;
}
EXPORT_SYMBOL_GPL(pci_num_vf);
-
-static int ats_alloc_one(struct pci_dev *dev, int ps)
-{
- int pos;
- u16 cap;
- struct pci_ats *ats;
-
- pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ATS);
- if (!pos)
- return -ENODEV;
-
- ats = kzalloc(sizeof(*ats), GFP_KERNEL);
- if (!ats)
- return -ENOMEM;
-
- ats->pos = pos;
- ats->stu = ps;
- pci_read_config_word(dev, pos + PCI_ATS_CAP, &cap);
- ats->qdep = PCI_ATS_CAP_QDEP(cap) ? PCI_ATS_CAP_QDEP(cap) :
- PCI_ATS_MAX_QDEP;
- dev->ats = ats;
-
- return 0;
-}
-
-static void ats_free_one(struct pci_dev *dev)
-{
- kfree(dev->ats);
- dev->ats = NULL;
-}
-
-/**
- * pci_enable_ats - enable the ATS capability
- * @dev: the PCI device
- * @ps: the IOMMU page shift
- *
- * Returns 0 on success, or negative on failure.
- */
-int pci_enable_ats(struct pci_dev *dev, int ps)
-{
- int rc;
- u16 ctrl;
-
- BUG_ON(dev->ats && dev->ats->is_enabled);
-
- if (ps < PCI_ATS_MIN_STU)
- return -EINVAL;
-
- if (dev->is_physfn || dev->is_virtfn) {
- struct pci_dev *pdev = dev->is_physfn ? dev : dev->physfn;
-
- mutex_lock(&pdev->sriov->lock);
- if (pdev->ats)
- rc = pdev->ats->stu == ps ? 0 : -EINVAL;
- else
- rc = ats_alloc_one(pdev, ps);
-
- if (!rc)
- pdev->ats->ref_cnt++;
- mutex_unlock(&pdev->sriov->lock);
- if (rc)
- return rc;
- }
-
- if (!dev->is_physfn) {
- rc = ats_alloc_one(dev, ps);
- if (rc)
- return rc;
- }
-
- ctrl = PCI_ATS_CTRL_ENABLE;
- if (!dev->is_virtfn)
- ctrl |= PCI_ATS_CTRL_STU(ps - PCI_ATS_MIN_STU);
- pci_write_config_word(dev, dev->ats->pos + PCI_ATS_CTRL, ctrl);
-
- dev->ats->is_enabled = 1;
-
- return 0;
-}
-
-/**
- * pci_disable_ats - disable the ATS capability
- * @dev: the PCI device
- */
-void pci_disable_ats(struct pci_dev *dev)
-{
- u16 ctrl;
-
- BUG_ON(!dev->ats || !dev->ats->is_enabled);
-
- pci_read_config_word(dev, dev->ats->pos + PCI_ATS_CTRL, &ctrl);
- ctrl &= ~PCI_ATS_CTRL_ENABLE;
- pci_write_config_word(dev, dev->ats->pos + PCI_ATS_CTRL, ctrl);
-
- dev->ats->is_enabled = 0;
-
- if (dev->is_physfn || dev->is_virtfn) {
- struct pci_dev *pdev = dev->is_physfn ? dev : dev->physfn;
-
- mutex_lock(&pdev->sriov->lock);
- pdev->ats->ref_cnt--;
- if (!pdev->ats->ref_cnt)
- ats_free_one(pdev);
- mutex_unlock(&pdev->sriov->lock);
- }
-
- if (!dev->is_physfn)
- ats_free_one(dev);
-}
-
-/**
- * pci_ats_queue_depth - query the ATS Invalidate Queue Depth
- * @dev: the PCI device
- *
- * Returns the queue depth on success, or negative on failure.
- *
- * The ATS spec uses 0 in the Invalidate Queue Depth field to
- * indicate that the function can accept 32 Invalidate Request.
- * But here we use the `real' values (i.e. 1~32) for the Queue
- * Depth; and 0 indicates the function shares the Queue with
- * other functions (doesn't exclusively own a Queue).
- */
-int pci_ats_queue_depth(struct pci_dev *dev)
-{
- int pos;
- u16 cap;
-
- if (dev->is_virtfn)
- return 0;
-
- if (dev->ats)
- return dev->ats->qdep;
-
- pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ATS);
- if (!pos)
- return -ENODEV;
-
- pci_read_config_word(dev, pos + PCI_ATS_CAP, &cap);
-
- return PCI_ATS_CAP_QDEP(cap) ? PCI_ATS_CAP_QDEP(cap) :
- PCI_ATS_MAX_QDEP;
-}
diff --git a/drivers/pci/irq.c b/drivers/pci/irq.c
index de01174aff0..e5f69a43b1b 100644
--- a/drivers/pci/irq.c
+++ b/drivers/pci/irq.c
@@ -7,6 +7,7 @@
#include <linux/acpi.h>
#include <linux/device.h>
#include <linux/kernel.h>
+#include <linux/export.h>
#include <linux/pci.h>
static void pci_note_irq_problem(struct pci_dev *pdev, const char *reason)
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 2f10328bf66..0e6d04d7ba4 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -11,6 +11,7 @@
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/init.h>
+#include <linux/export.h>
#include <linux/ioport.h>
#include <linux/pci.h>
#include <linux/proc_fs.h>
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index d36f41ea8cb..4ecb6408b0d 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -46,6 +46,9 @@ static void pci_acpi_wake_dev(acpi_handle handle, u32 event, void *context)
struct pci_dev *pci_dev = context;
if (event == ACPI_NOTIFY_DEVICE_WAKE && pci_dev) {
+ if (pci_dev->pme_poll)
+ pci_dev->pme_poll = false;
+
pci_wakeup_event(pci_dev);
pci_check_pme_status(pci_dev);
pm_runtime_resume(&pci_dev->dev);
@@ -282,7 +285,6 @@ static int acpi_dev_run_wake(struct device *phys_dev, bool enable)
{
struct acpi_device *dev;
acpi_handle handle;
- int error = -ENODEV;
if (!device_run_wake(phys_dev))
return -EINVAL;
@@ -302,7 +304,7 @@ static int acpi_dev_run_wake(struct device *phys_dev, bool enable)
acpi_disable_wakeup_device_power(dev);
}
- return error;
+ return 0;
}
static void acpi_pci_propagate_run_wake(struct pci_bus *bus, bool enable)
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 7bcf12adced..106be0d08f8 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -19,6 +19,7 @@
#include <linux/sched.h>
#include <linux/pci.h>
#include <linux/stat.h>
+#include <linux/export.h>
#include <linux/topology.h>
#include <linux/mm.h>
#include <linux/fs.h>
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index e9651f0a881..6f45a73c6e9 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1407,13 +1407,16 @@ bool pci_check_pme_status(struct pci_dev *dev)
/**
* pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set.
* @dev: Device to handle.
- * @ign: Ignored.
+ * @pme_poll_reset: Whether or not to reset the device's pme_poll flag.
*
* Check if @dev has generated PME and queue a resume request for it in that
* case.
*/
-static int pci_pme_wakeup(struct pci_dev *dev, void *ign)
+static int pci_pme_wakeup(struct pci_dev *dev, void *pme_poll_reset)
{
+ if (pme_poll_reset && dev->pme_poll)
+ dev->pme_poll = false;
+
if (pci_check_pme_status(dev)) {
pci_wakeup_event(dev);
pm_request_resume(&dev->dev);
@@ -1428,7 +1431,7 @@ static int pci_pme_wakeup(struct pci_dev *dev, void *ign)
void pci_pme_wakeup_bus(struct pci_bus *bus)
{
if (bus)
- pci_walk_bus(bus, pci_pme_wakeup, NULL);
+ pci_walk_bus(bus, pci_pme_wakeup, (void *)true);
}
/**
@@ -1446,31 +1449,26 @@ bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
static void pci_pme_list_scan(struct work_struct *work)
{
- struct pci_pme_device *pme_dev;
+ struct pci_pme_device *pme_dev, *n;
mutex_lock(&pci_pme_list_mutex);
if (!list_empty(&pci_pme_list)) {
- list_for_each_entry(pme_dev, &pci_pme_list, list)
- pci_pme_wakeup(pme_dev->dev, NULL);
- schedule_delayed_work(&pci_pme_work, msecs_to_jiffies(PME_TIMEOUT));
+ list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) {
+ if (pme_dev->dev->pme_poll) {
+ pci_pme_wakeup(pme_dev->dev, NULL);
+ } else {
+ list_del(&pme_dev->list);
+ kfree(pme_dev);
+ }
+ }
+ if (!list_empty(&pci_pme_list))
+ schedule_delayed_work(&pci_pme_work,
+ msecs_to_jiffies(PME_TIMEOUT));
}
mutex_unlock(&pci_pme_list_mutex);
}
/**
- * pci_external_pme - is a device an external PCI PME source?
- * @dev: PCI device to check
- *
- */
-
-static bool pci_external_pme(struct pci_dev *dev)
-{
- if (pci_is_pcie(dev) || dev->bus->number == 0)
- return false;
- return true;
-}
-
-/**
* pci_pme_active - enable or disable PCI device's PME# function
* @dev: PCI device to handle.
* @enable: 'true' to enable PME# generation; 'false' to disable it.
@@ -1503,7 +1501,7 @@ void pci_pme_active(struct pci_dev *dev, bool enable)
hit, and the power savings from the devices will still be a
win. */
- if (pci_external_pme(dev)) {
+ if (dev->pme_poll) {
struct pci_pme_device *pme_dev;
if (enable) {
pme_dev = kmalloc(sizeof(struct pci_pme_device),
@@ -1821,6 +1819,7 @@ void pci_pm_init(struct pci_dev *dev)
(pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "",
(pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT;
+ dev->pme_poll = true;
/*
* Make device's PM flags reflect the wake-up capability, but
* let the user space enable it to wake up the system as needed.
@@ -3203,8 +3202,6 @@ int pcie_set_readrq(struct pci_dev *dev, int rq)
if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
goto out;
- v = (ffs(rq) - 8) << 12;
-
cap = pci_pcie_cap(dev);
if (!cap)
goto out;
@@ -3212,6 +3209,22 @@ int pcie_set_readrq(struct pci_dev *dev, int rq)
err = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
if (err)
goto out;
+ /*
+ * If using the "performance" PCIe config, we clamp the
+ * read rq size to the max packet size to prevent the
+ * host bridge generating requests larger than we can
+ * cope with
+ */
+ if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
+ int mps = pcie_get_mps(dev);
+
+ if (mps < 0)
+ return mps;
+ if (mps < rq)
+ rq = mps;
+ }
+
+ v = (ffs(rq) - 8) << 12;
if ((ctl & PCI_EXP_DEVCTL_READRQ) != v) {
ctl &= ~PCI_EXP_DEVCTL_READRQ;
diff --git a/drivers/pci/pcie/pme.c b/drivers/pci/pcie/pme.c
index 0057344a3fc..001f1b78f39 100644
--- a/drivers/pci/pcie/pme.c
+++ b/drivers/pci/pcie/pme.c
@@ -84,6 +84,9 @@ static bool pcie_pme_walk_bus(struct pci_bus *bus)
list_for_each_entry(dev, &bus->devices, bus_list) {
/* Skip PCIe devices in case we started from a root port. */
if (!pci_is_pcie(dev) && pci_check_pme_status(dev)) {
+ if (dev->pme_poll)
+ dev->pme_poll = false;
+
pci_wakeup_event(dev);
pm_request_resume(&dev->dev);
ret = true;
@@ -142,6 +145,9 @@ static void pcie_pme_handle_request(struct pci_dev *port, u16 req_id)
/* First, check if the PME is from the root port itself. */
if (port->devfn == devfn && port->bus->number == busnr) {
+ if (port->pme_poll)
+ port->pme_poll = false;
+
if (pci_check_pme_status(port)) {
pm_request_resume(&port->dev);
found = true;
@@ -187,6 +193,9 @@ static void pcie_pme_handle_request(struct pci_dev *port, u16 req_id)
/* The device is there, but we have to check its PME status. */
found = pci_check_pme_status(dev);
if (found) {
+ if (dev->pme_poll)
+ dev->pme_poll = false;
+
pci_wakeup_event(dev);
pm_request_resume(&dev->dev);
}
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 6ab6bd3df4b..04e74f48571 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -1363,31 +1363,25 @@ static int pcie_find_smpss(struct pci_dev *dev, void *data)
static void pcie_write_mps(struct pci_dev *dev, int mps)
{
- int rc, dev_mpss;
-
- dev_mpss = 128 << dev->pcie_mpss;
+ int rc;
if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
- if (dev->bus->self) {
- dev_dbg(&dev->bus->dev, "Bus MPSS %d\n",
- 128 << dev->bus->self->pcie_mpss);
+ mps = 128 << dev->pcie_mpss;
- /* For "MPS Force Max", the assumption is made that
+ if (dev->pcie_type != PCI_EXP_TYPE_ROOT_PORT && dev->bus->self)
+ /* For "Performance", the assumption is made that
* downstream communication will never be larger than
* the MRRS. So, the MPS only needs to be configured
* for the upstream communication. This being the case,
* walk from the top down and set the MPS of the child
* to that of the parent bus.
+ *
+ * Configure the device MPS with the smaller of the
+ * device MPSS or the bridge MPS (which is assumed to be
+ * properly configured at this point to the largest
+ * allowable MPS based on its parent bus).
*/
- mps = 128 << dev->bus->self->pcie_mpss;
- if (mps > dev_mpss)
- dev_warn(&dev->dev, "MPS configured higher than"
- " maximum supported by the device. If"
- " a bus issue occurs, try running with"
- " pci=pcie_bus_safe.\n");
- }
-
- dev->pcie_mpss = ffs(mps) - 8;
+ mps = min(mps, pcie_get_mps(dev->bus->self));
}
rc = pcie_set_mps(dev, mps);
@@ -1395,25 +1389,22 @@ static void pcie_write_mps(struct pci_dev *dev, int mps)
dev_err(&dev->dev, "Failed attempting to set the MPS\n");
}
-static void pcie_write_mrrs(struct pci_dev *dev, int mps)
+static void pcie_write_mrrs(struct pci_dev *dev)
{
- int rc, mrrs, dev_mpss;
+ int rc, mrrs;
/* In the "safe" case, do not configure the MRRS. There appear to be
* issues with setting MRRS to 0 on a number of devices.
*/
-
if (pcie_bus_config != PCIE_BUS_PERFORMANCE)
return;
- dev_mpss = 128 << dev->pcie_mpss;
-
/* For Max performance, the MRRS must be set to the largest supported
* value. However, it cannot be configured larger than the MPS the
- * device or the bus can support. This assumes that the largest MRRS
- * available on the device cannot be smaller than the device MPSS.
+ * device or the bus can support. This should already be properly
+ * configured by a prior call to pcie_write_mps.
*/
- mrrs = min(mps, dev_mpss);
+ mrrs = pcie_get_mps(dev);
/* MRRS is a R/W register. Invalid values can be written, but a
* subsequent read will verify if the value is acceptable or not.
@@ -1421,38 +1412,41 @@ static void pcie_write_mrrs(struct pci_dev *dev, int mps)
* shrink the value until it is acceptable to the HW.
*/
while (mrrs != pcie_get_readrq(dev) && mrrs >= 128) {
- dev_warn(&dev->dev, "Attempting to modify the PCI-E MRRS value"
- " to %d. If any issues are encountered, please try "
- "running with pci=pcie_bus_safe\n", mrrs);
rc = pcie_set_readrq(dev, mrrs);
- if (rc)
- dev_err(&dev->dev,
- "Failed attempting to set the MRRS\n");
+ if (!rc)
+ break;
+ dev_warn(&dev->dev, "Failed attempting to set the MRRS\n");
mrrs /= 2;
}
+
+ if (mrrs < 128)
+ dev_err(&dev->dev, "MRRS was unable to be configured with a "
+ "safe value. If problems are experienced, try running "
+ "with pci=pcie_bus_safe.\n");
}
static int pcie_bus_configure_set(struct pci_dev *dev, void *data)
{
- int mps = 128 << *(u8 *)data;
+ int mps, orig_mps;
if (!pci_is_pcie(dev))
return 0;
- dev_dbg(&dev->dev, "Dev MPS %d MPSS %d MRRS %d\n",
- pcie_get_mps(dev), 128<<dev->pcie_mpss, pcie_get_readrq(dev));
+ mps = 128 << *(u8 *)data;
+ orig_mps = pcie_get_mps(dev);
pcie_write_mps(dev, mps);
- pcie_write_mrrs(dev, mps);
+ pcie_write_mrrs(dev);
- dev_dbg(&dev->dev, "Dev MPS %d MPSS %d MRRS %d\n",
- pcie_get_mps(dev), 128<<dev->pcie_mpss, pcie_get_readrq(dev));
+ dev_info(&dev->dev, "PCI-E Max Payload Size set to %4d/%4d (was %4d), "
+ "Max Read Rq %4d\n", pcie_get_mps(dev), 128 << dev->pcie_mpss,
+ orig_mps, pcie_get_readrq(dev));
return 0;
}
-/* pcie_bus_configure_mps requires that pci_walk_bus work in a top-down,
+/* pcie_bus_configure_settings requires that pci_walk_bus work in a top-down,
* parents then children fashion. If this changes, then this code will not
* work as designed.
*/
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index b23856aaf6e..64765474676 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -17,6 +17,7 @@
#include <linux/types.h>
#include <linux/kernel.h>
+#include <linux/export.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/delay.h>
@@ -2745,20 +2746,6 @@ static void ricoh_mmc_fixup_r5c832(struct pci_dev *dev)
/* disable must be done via function #0 */
if (PCI_FUNC(dev->devfn))
return;
-
- pci_read_config_byte(dev, 0xCB, &disable);
-
- if (disable & 0x02)
- return;
-
- pci_read_config_byte(dev, 0xCA, &write_enable);
- pci_write_config_byte(dev, 0xCA, 0x57);
- pci_write_config_byte(dev, 0xCB, disable | 0x02);
- pci_write_config_byte(dev, 0xCA, write_enable);
-
- dev_notice(&dev->dev, "proprietary Ricoh MMC controller disabled (via firewire function)\n");
- dev_notice(&dev->dev, "MMC cards are now supported by standard SDHCI controller\n");
-
/*
* RICOH 0xe823 SD/MMC card reader fails to recognize
* certain types of SD/MMC cards. Lowering the SD base
@@ -2781,6 +2768,20 @@ static void ricoh_mmc_fixup_r5c832(struct pci_dev *dev)
dev_notice(&dev->dev, "MMC controller base frequency changed to 50Mhz.\n");
}
+
+ pci_read_config_byte(dev, 0xCB, &disable);
+
+ if (disable & 0x02)
+ return;
+
+ pci_read_config_byte(dev, 0xCA, &write_enable);
+ pci_write_config_byte(dev, 0xCA, 0x57);
+ pci_write_config_byte(dev, 0xCB, disable | 0x02);
+ pci_write_config_byte(dev, 0xCA, write_enable);
+
+ dev_notice(&dev->dev, "proprietary Ricoh MMC controller disabled (via firewire function)\n");
+ dev_notice(&dev->dev, "MMC cards are now supported by standard SDHCI controller\n");
+
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832);
@@ -2822,6 +2823,89 @@ static void __devinit fixup_ti816x_class(struct pci_dev* dev)
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_TI, 0xb800, fixup_ti816x_class);
+/* Some PCIe devices do not work reliably with the claimed maximum
+ * payload size supported.
+ */
+static void __devinit fixup_mpss_256(struct pci_dev *dev)
+{
+ dev->pcie_mpss = 1; /* 256 bytes */
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE,
+ PCI_DEVICE_ID_SOLARFLARE_SFC4000A_0, fixup_mpss_256);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE,
+ PCI_DEVICE_ID_SOLARFLARE_SFC4000A_1, fixup_mpss_256);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE,
+ PCI_DEVICE_ID_SOLARFLARE_SFC4000B, fixup_mpss_256);
+
+/* Intel 5000 and 5100 Memory controllers have an errata with read completion
+ * coalescing (which is enabled by default on some BIOSes) and MPS of 256B.
+ * Since there is no way of knowing what the PCIE MPS on each fabric will be
+ * until all of the devices are discovered and buses walked, read completion
+ * coalescing must be disabled. Unfortunately, it cannot be re-enabled because
+ * it is possible to hotplug a device with MPS of 256B.
+ */
+static void __devinit quirk_intel_mc_errata(struct pci_dev *dev)
+{
+ int err;
+ u16 rcc;
+
+ if (pcie_bus_config == PCIE_BUS_TUNE_OFF)
+ return;
+
+ /* Intel errata specifies bits to change but does not say what they are.
+ * Keeping them magical until such time as the registers and values can
+ * be explained.
+ */
+ err = pci_read_config_word(dev, 0x48, &rcc);
+ if (err) {
+ dev_err(&dev->dev, "Error attempting to read the read "
+ "completion coalescing register.\n");
+ return;
+ }
+
+ if (!(rcc & (1 << 10)))
+ return;
+
+ rcc &= ~(1 << 10);
+
+ err = pci_write_config_word(dev, 0x48, rcc);
+ if (err) {
+ dev_err(&dev->dev, "Error attempting to write the read "
+ "completion coalescing register.\n");
+ return;
+ }
+
+ pr_info_once("Read completion coalescing disabled due to hardware "
+ "errata relating to 256B MPS.\n");
+}
+/* Intel 5000 series memory controllers and ports 2-7 */
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25c0, quirk_intel_mc_errata);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25d0, quirk_intel_mc_errata);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25d4, quirk_intel_mc_errata);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25d8, quirk_intel_mc_errata);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e2, quirk_intel_mc_errata);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e3, quirk_intel_mc_errata);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e4, quirk_intel_mc_errata);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e5, quirk_intel_mc_errata);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e6, quirk_intel_mc_errata);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e7, quirk_intel_mc_errata);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25f7, quirk_intel_mc_errata);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25f8, quirk_intel_mc_errata);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25f9, quirk_intel_mc_errata);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25fa, quirk_intel_mc_errata);
+/* Intel 5100 series memory controllers and ports 2-7 */
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65c0, quirk_intel_mc_errata);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e2, quirk_intel_mc_errata);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e3, quirk_intel_mc_errata);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e4, quirk_intel_mc_errata);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e5, quirk_intel_mc_errata);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e6, quirk_intel_mc_errata);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e7, quirk_intel_mc_errata);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65f7, quirk_intel_mc_errata);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65f8, quirk_intel_mc_errata);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65f9, quirk_intel_mc_errata);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65fa, quirk_intel_mc_errata);
+
static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f,
struct pci_fixup *end)
{
diff --git a/drivers/pci/rom.c b/drivers/pci/rom.c
index 36864a935d6..48ebdb237f3 100644
--- a/drivers/pci/rom.c
+++ b/drivers/pci/rom.c
@@ -7,6 +7,7 @@
* PCI ROM access routines
*/
#include <linux/kernel.h>
+#include <linux/export.h>
#include <linux/pci.h>
#include <linux/slab.h>
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 784da9d3602..86b69f85f90 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -426,7 +426,7 @@ static void __pci_setup_bridge(struct pci_bus *bus, unsigned long type)
pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, bus->bridge_ctl);
}
-static void pci_setup_bridge(struct pci_bus *bus)
+void pci_setup_bridge(struct pci_bus *bus)
{
unsigned long type = IORESOURCE_IO | IORESOURCE_MEM |
IORESOURCE_PREFETCH;
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index 51a9095c7da..5717509becb 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -18,6 +18,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/export.h>
#include <linux/pci.h>
#include <linux/errno.h>
#include <linux/ioport.h>
diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
index 968cfea04f7..ac6412fb8d6 100644
--- a/drivers/pci/slot.c
+++ b/drivers/pci/slot.c
@@ -7,6 +7,7 @@
#include <linux/kobject.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include <linux/pci.h>
#include <linux/err.h>
#include "pci.h"
diff --git a/drivers/pci/vpd.c b/drivers/pci/vpd.c
index a5a5ca17cfe..39b79070335 100644
--- a/drivers/pci/vpd.c
+++ b/drivers/pci/vpd.c
@@ -6,6 +6,7 @@
*/
#include <linux/pci.h>
+#include <linux/export.h>
int pci_vpd_find_tag(const u8 *buf, unsigned int off, unsigned int len, u8 rdt)
{
diff --git a/drivers/pcmcia/db1xxx_ss.c b/drivers/pcmcia/db1xxx_ss.c
index 01757f18a20..3e49df6d5e3 100644
--- a/drivers/pcmcia/db1xxx_ss.c
+++ b/drivers/pcmcia/db1xxx_ss.c
@@ -24,6 +24,7 @@
#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/pm.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/resource.h>
#include <linux/slab.h>
diff --git a/drivers/pcmcia/pxa2xx_balloon3.c b/drivers/pcmcia/pxa2xx_balloon3.c
index f56d7de7c75..22a75e610f1 100644
--- a/drivers/pcmcia/pxa2xx_balloon3.c
+++ b/drivers/pcmcia/pxa2xx_balloon3.c
@@ -97,7 +97,7 @@ static void balloon3_pcmcia_socket_state(struct soc_pcmcia_socket *skt,
static int balloon3_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
const socket_state_t *state)
{
- __raw_writew(BALLOON3_CF_RESET, BALLOON3_CF_CONTROL_REG |
+ __raw_writew(BALLOON3_CF_RESET, BALLOON3_CF_CONTROL_REG +
((state->flags & SS_RESET) ?
BALLOON3_FPGA_SETnCLR : 0));
return 0;
diff --git a/drivers/pcmcia/pxa2xx_base.c b/drivers/pcmcia/pxa2xx_base.c
index 2c540542b5a..a87e2728b2c 100644
--- a/drivers/pcmcia/pxa2xx_base.c
+++ b/drivers/pcmcia/pxa2xx_base.c
@@ -231,6 +231,7 @@ void pxa2xx_configure_sockets(struct device *dev)
__raw_writel(mecr, MECR);
}
+EXPORT_SYMBOL(pxa2xx_configure_sockets);
static const char *skt_names[] = {
"PCMCIA socket 0",
diff --git a/drivers/pcmcia/pxa2xx_cm_x255.c b/drivers/pcmcia/pxa2xx_cm_x255.c
index 63f4d5211ed..0b4f946cf13 100644
--- a/drivers/pcmcia/pxa2xx_cm_x255.c
+++ b/drivers/pcmcia/pxa2xx_cm_x255.c
@@ -14,6 +14,7 @@
#include <linux/irq.h>
#include <linux/delay.h>
#include <linux/gpio.h>
+#include <linux/export.h>
#include <asm/mach-types.h>
diff --git a/drivers/pcmcia/pxa2xx_cm_x270.c b/drivers/pcmcia/pxa2xx_cm_x270.c
index 6ee42b4c3e6..923f315926e 100644
--- a/drivers/pcmcia/pxa2xx_cm_x270.c
+++ b/drivers/pcmcia/pxa2xx_cm_x270.c
@@ -14,6 +14,7 @@
#include <linux/irq.h>
#include <linux/delay.h>
#include <linux/gpio.h>
+#include <linux/export.h>
#include <asm/mach-types.h>
diff --git a/drivers/pcmcia/pxa2xx_cm_x2xx.c b/drivers/pcmcia/pxa2xx_cm_x2xx.c
index 4f09506ad8d..6e7dcfd22ed 100644
--- a/drivers/pcmcia/pxa2xx_cm_x2xx.c
+++ b/drivers/pcmcia/pxa2xx_cm_x2xx.c
@@ -12,9 +12,8 @@
#include <linux/module.h>
-#include <asm/system.h>
#include <asm/mach-types.h>
-#include <mach/system.h>
+#include <mach/hardware.h>
int cmx255_pcmcia_init(void);
int cmx270_pcmcia_init(void);
diff --git a/drivers/pcmcia/pxa2xx_vpac270.c b/drivers/pcmcia/pxa2xx_vpac270.c
index e956f659089..66ab92cf310 100644
--- a/drivers/pcmcia/pxa2xx_vpac270.c
+++ b/drivers/pcmcia/pxa2xx_vpac270.c
@@ -17,7 +17,7 @@
#include <asm/mach-types.h>
-#include <mach/gpio.h>
+#include <asm/gpio.h>
#include <mach/vpac270.h>
#include "soc_common.h"
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index ef566443f94..e17e2f8001d 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -2,23 +2,17 @@
# PINCTRL infrastructure and drivers
#
-menuconfig PINCTRL
- bool "PINCTRL Support"
+config PINCTRL
+ bool
depends on EXPERIMENTAL
- help
- This enables the PINCTRL subsystem for controlling pins
- on chip packages, for example multiplexing pins on primarily
- PGA and BGA packages for systems on chip.
-
- If unsure, say N.
if PINCTRL
+menu "Pin controllers"
+ depends on PINCTRL
+
config PINMUX
bool "Support pinmux controllers"
- help
- Say Y here if you want the pincontrol subsystem to handle pin
- multiplexing drivers.
config DEBUG_PINCTRL
bool "Debug PINCTRL calls"
@@ -30,14 +24,12 @@ config PINMUX_SIRF
bool "CSR SiRFprimaII pinmux driver"
depends on ARCH_PRIMA2
select PINMUX
- help
- Say Y here to enable the SiRFprimaII pinmux driver
config PINMUX_U300
bool "U300 pinmux driver"
depends on ARCH_U300
select PINMUX
- help
- Say Y here to enable the U300 pinmux driver
+
+endmenu
endif
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index 423522d8731..eadef9e191e 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -12,6 +12,7 @@
#define pr_fmt(fmt) "pinctrl core: " fmt
#include <linux/kernel.h>
+#include <linux/export.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/slab.h>
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 10cf2500522..7f43cf86d77 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -61,15 +61,18 @@ config ASUS_LAPTOP
depends on INPUT
depends on RFKILL || RFKILL = n
select INPUT_SPARSEKMAP
+ select INPUT_POLLDEV
---help---
- This is the new Linux driver for Asus laptops. It may also support some
- MEDION, JVC or VICTOR laptops. It makes all the extra buttons generate
- standard ACPI events and input events. It also adds
- support for video output switching, LCD backlight control, Bluetooth and
- Wlan control, and most importantly, allows you to blink those fancy LEDs.
+ This is a driver for Asus laptops, Lenovo SL and the Pegatron
+ Lucid tablet. It may also support some MEDION, JVC or VICTOR
+ laptops. It makes all the extra buttons generate standard
+ ACPI events and input events, and on the Lucid the built-in
+ accelerometer appears as an input device. It also adds
+ support for video output switching, LCD backlight control,
+ Bluetooth and Wlan control, and most importantly, allows you
+ to blink those fancy LEDs.
- For more information and a userspace daemon for handling the extra
- buttons see <http://acpi4asus.sf.net>.
+ For more information see <http://acpi4asus.sf.net>.
If you have an ACPI-compatible ASUS laptop, say Y or M here.
@@ -80,8 +83,10 @@ config DELL_LAPTOP
depends on EXPERIMENTAL
depends on BACKLIGHT_CLASS_DEVICE
depends on RFKILL || RFKILL = n
- depends on POWER_SUPPLY
depends on SERIO_I8042
+ select POWER_SUPPLY
+ select LEDS_CLASS
+ select NEW_LEDS
default n
---help---
This driver adds support for rfkill and backlight control to Dell
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index af2bb20cb2f..b848277171a 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -190,6 +190,7 @@ enum interface_flags {
ACER_AMW0,
ACER_AMW0_V2,
ACER_WMID,
+ ACER_WMID_v2,
};
#define ACER_DEFAULT_WIRELESS 0
@@ -205,6 +206,7 @@ static int threeg = -1;
static int force_series;
static bool ec_raw_mode;
static bool has_type_aa;
+static u16 commun_func_bitmap;
module_param(mailled, int, 0444);
module_param(brightness, int, 0444);
@@ -464,6 +466,15 @@ static struct dmi_system_id acer_quirks[] = {
},
.driver_data = &quirk_lenovo_ideapad_s205,
},
+ {
+ .callback = dmi_matched,
+ .ident = "Lenovo 3000 N200",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "0687A31"),
+ },
+ .driver_data = &quirk_fujitsu_amilo_li_1718,
+ },
{}
};
@@ -868,6 +879,174 @@ static acpi_status WMID_set_u32(u32 value, u32 cap, struct wmi_interface *iface)
return WMI_execute_u32(method_id, (u32)value, NULL);
}
+static acpi_status wmid3_get_device_status(u32 *value, u16 device)
+{
+ struct wmid3_gds_return_value return_value;
+ acpi_status status;
+ union acpi_object *obj;
+ struct wmid3_gds_input_param params = {
+ .function_num = 0x1,
+ .hotkey_number = 0x01,
+ .devices = device,
+ };
+ struct acpi_buffer input = {
+ sizeof(struct wmid3_gds_input_param),
+ &params
+ };
+ struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+
+ status = wmi_evaluate_method(WMID_GUID3, 0, 0x2, &input, &output);
+ if (ACPI_FAILURE(status))
+ return status;
+
+ obj = output.pointer;
+
+ if (!obj)
+ return AE_ERROR;
+ else if (obj->type != ACPI_TYPE_BUFFER) {
+ kfree(obj);
+ return AE_ERROR;
+ }
+ if (obj->buffer.length != 8) {
+ pr_warn("Unknown buffer length %d\n", obj->buffer.length);
+ kfree(obj);
+ return AE_ERROR;
+ }
+
+ return_value = *((struct wmid3_gds_return_value *)obj->buffer.pointer);
+ kfree(obj);
+
+ if (return_value.error_code || return_value.ec_return_value)
+ pr_warn("Get 0x%x Device Status failed: 0x%x - 0x%x\n",
+ device,
+ return_value.error_code,
+ return_value.ec_return_value);
+ else
+ *value = !!(return_value.devices & device);
+
+ return status;
+}
+
+static acpi_status wmid_v2_get_u32(u32 *value, u32 cap)
+{
+ u16 device;
+
+ switch (cap) {
+ case ACER_CAP_WIRELESS:
+ device = ACER_WMID3_GDS_WIRELESS;
+ break;
+ case ACER_CAP_BLUETOOTH:
+ device = ACER_WMID3_GDS_BLUETOOTH;
+ break;
+ case ACER_CAP_THREEG:
+ device = ACER_WMID3_GDS_THREEG;
+ break;
+ default:
+ return AE_ERROR;
+ }
+ return wmid3_get_device_status(value, device);
+}
+
+static acpi_status wmid3_set_device_status(u32 value, u16 device)
+{
+ struct wmid3_gds_return_value return_value;
+ acpi_status status;
+ union acpi_object *obj;
+ u16 devices;
+ struct wmid3_gds_input_param params = {
+ .function_num = 0x1,
+ .hotkey_number = 0x01,
+ .devices = commun_func_bitmap,
+ };
+ struct acpi_buffer input = {
+ sizeof(struct wmid3_gds_input_param),
+ &params
+ };
+ struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct acpi_buffer output2 = { ACPI_ALLOCATE_BUFFER, NULL };
+
+ status = wmi_evaluate_method(WMID_GUID3, 0, 0x2, &input, &output);
+ if (ACPI_FAILURE(status))
+ return status;
+
+ obj = output.pointer;
+
+ if (!obj)
+ return AE_ERROR;
+ else if (obj->type != ACPI_TYPE_BUFFER) {
+ kfree(obj);
+ return AE_ERROR;
+ }
+ if (obj->buffer.length != 8) {
+ pr_warning("Unknown buffer length %d\n", obj->buffer.length);
+ kfree(obj);
+ return AE_ERROR;
+ }
+
+ return_value = *((struct wmid3_gds_return_value *)obj->buffer.pointer);
+ kfree(obj);
+
+ if (return_value.error_code || return_value.ec_return_value) {
+ pr_warning("Get Current Device Status failed: "
+ "0x%x - 0x%x\n", return_value.error_code,
+ return_value.ec_return_value);
+ return status;
+ }
+
+ devices = return_value.devices;
+ params.function_num = 0x2;
+ params.hotkey_number = 0x01;
+ params.devices = (value) ? (devices | device) : (devices & ~device);
+
+ status = wmi_evaluate_method(WMID_GUID3, 0, 0x1, &input, &output2);
+ if (ACPI_FAILURE(status))
+ return status;
+
+ obj = output2.pointer;
+
+ if (!obj)
+ return AE_ERROR;
+ else if (obj->type != ACPI_TYPE_BUFFER) {
+ kfree(obj);
+ return AE_ERROR;
+ }
+ if (obj->buffer.length != 4) {
+ pr_warning("Unknown buffer length %d\n", obj->buffer.length);
+ kfree(obj);
+ return AE_ERROR;
+ }
+
+ return_value = *((struct wmid3_gds_return_value *)obj->buffer.pointer);
+ kfree(obj);
+
+ if (return_value.error_code || return_value.ec_return_value)
+ pr_warning("Set Device Status failed: "
+ "0x%x - 0x%x\n", return_value.error_code,
+ return_value.ec_return_value);
+
+ return status;
+}
+
+static acpi_status wmid_v2_set_u32(u32 value, u32 cap)
+{
+ u16 device;
+
+ switch (cap) {
+ case ACER_CAP_WIRELESS:
+ device = ACER_WMID3_GDS_WIRELESS;
+ break;
+ case ACER_CAP_BLUETOOTH:
+ device = ACER_WMID3_GDS_BLUETOOTH;
+ break;
+ case ACER_CAP_THREEG:
+ device = ACER_WMID3_GDS_THREEG;
+ break;
+ default:
+ return AE_ERROR;
+ }
+ return wmid3_set_device_status(value, device);
+}
+
static void type_aa_dmi_decode(const struct dmi_header *header, void *dummy)
{
struct hotkey_function_type_aa *type_aa;
@@ -881,6 +1060,7 @@ static void type_aa_dmi_decode(const struct dmi_header *header, void *dummy)
pr_info("Function bitmap for Communication Button: 0x%x\n",
type_aa->commun_func_bitmap);
+ commun_func_bitmap = type_aa->commun_func_bitmap;
if (type_aa->commun_func_bitmap & ACER_WMID3_GDS_WIRELESS)
interface->capability |= ACER_CAP_WIRELESS;
@@ -913,17 +1093,13 @@ static acpi_status WMID_set_capabilities(void)
return AE_ERROR;
}
- dmi_walk(type_aa_dmi_decode, NULL);
- if (!has_type_aa) {
+ pr_info("Function bitmap for Communication Device: 0x%x\n", devices);
+ if (devices & 0x07)
interface->capability |= ACER_CAP_WIRELESS;
- if (devices & 0x40)
- interface->capability |= ACER_CAP_THREEG;
- if (devices & 0x10)
- interface->capability |= ACER_CAP_BLUETOOTH;
- }
-
- /* WMID always provides brightness methods */
- interface->capability |= ACER_CAP_BRIGHTNESS;
+ if (devices & 0x40)
+ interface->capability |= ACER_CAP_THREEG;
+ if (devices & 0x10)
+ interface->capability |= ACER_CAP_BLUETOOTH;
if (!(devices & 0x20))
max_brightness = 0x9;
@@ -936,6 +1112,10 @@ static struct wmi_interface wmid_interface = {
.type = ACER_WMID,
};
+static struct wmi_interface wmid_v2_interface = {
+ .type = ACER_WMID_v2,
+};
+
/*
* Generic Device (interface-independent)
*/
@@ -956,6 +1136,14 @@ static acpi_status get_u32(u32 *value, u32 cap)
case ACER_WMID:
status = WMID_get_u32(value, cap, interface);
break;
+ case ACER_WMID_v2:
+ if (cap & (ACER_CAP_WIRELESS |
+ ACER_CAP_BLUETOOTH |
+ ACER_CAP_THREEG))
+ status = wmid_v2_get_u32(value, cap);
+ else if (wmi_has_guid(WMID_GUID2))
+ status = WMID_get_u32(value, cap, interface);
+ break;
}
return status;
@@ -989,6 +1177,13 @@ static acpi_status set_u32(u32 value, u32 cap)
}
case ACER_WMID:
return WMID_set_u32(value, cap, interface);
+ case ACER_WMID_v2:
+ if (cap & (ACER_CAP_WIRELESS |
+ ACER_CAP_BLUETOOTH |
+ ACER_CAP_THREEG))
+ return wmid_v2_set_u32(value, cap);
+ else if (wmi_has_guid(WMID_GUID2))
+ return WMID_set_u32(value, cap, interface);
default:
return AE_BAD_PARAMETER;
}
@@ -1095,186 +1290,6 @@ static void acer_backlight_exit(void)
backlight_device_unregister(acer_backlight_device);
}
-static acpi_status wmid3_get_device_status(u32 *value, u16 device)
-{
- struct wmid3_gds_return_value return_value;
- acpi_status status;
- union acpi_object *obj;
- struct wmid3_gds_input_param params = {
- .function_num = 0x1,
- .hotkey_number = 0x01,
- .devices = device,
- };
- struct acpi_buffer input = {
- sizeof(struct wmid3_gds_input_param),
- &params
- };
- struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
-
- status = wmi_evaluate_method(WMID_GUID3, 0, 0x2, &input, &output);
- if (ACPI_FAILURE(status))
- return status;
-
- obj = output.pointer;
-
- if (!obj)
- return AE_ERROR;
- else if (obj->type != ACPI_TYPE_BUFFER) {
- kfree(obj);
- return AE_ERROR;
- }
- if (obj->buffer.length != 8) {
- pr_warn("Unknown buffer length %d\n", obj->buffer.length);
- kfree(obj);
- return AE_ERROR;
- }
-
- return_value = *((struct wmid3_gds_return_value *)obj->buffer.pointer);
- kfree(obj);
-
- if (return_value.error_code || return_value.ec_return_value)
- pr_warn("Get Device Status failed: 0x%x - 0x%x\n",
- return_value.error_code,
- return_value.ec_return_value);
- else
- *value = !!(return_value.devices & device);
-
- return status;
-}
-
-static acpi_status get_device_status(u32 *value, u32 cap)
-{
- if (wmi_has_guid(WMID_GUID3)) {
- u16 device;
-
- switch (cap) {
- case ACER_CAP_WIRELESS:
- device = ACER_WMID3_GDS_WIRELESS;
- break;
- case ACER_CAP_BLUETOOTH:
- device = ACER_WMID3_GDS_BLUETOOTH;
- break;
- case ACER_CAP_THREEG:
- device = ACER_WMID3_GDS_THREEG;
- break;
- default:
- return AE_ERROR;
- }
- return wmid3_get_device_status(value, device);
-
- } else {
- return get_u32(value, cap);
- }
-}
-
-static acpi_status wmid3_set_device_status(u32 value, u16 device)
-{
- struct wmid3_gds_return_value return_value;
- acpi_status status;
- union acpi_object *obj;
- u16 devices;
- struct wmid3_gds_input_param params = {
- .function_num = 0x1,
- .hotkey_number = 0x01,
- .devices = ACER_WMID3_GDS_WIRELESS |
- ACER_WMID3_GDS_THREEG |
- ACER_WMID3_GDS_WIMAX |
- ACER_WMID3_GDS_BLUETOOTH,
- };
- struct acpi_buffer input = {
- sizeof(struct wmid3_gds_input_param),
- &params
- };
- struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
- struct acpi_buffer output2 = { ACPI_ALLOCATE_BUFFER, NULL };
-
- status = wmi_evaluate_method(WMID_GUID3, 0, 0x2, &input, &output);
- if (ACPI_FAILURE(status))
- return status;
-
- obj = output.pointer;
-
- if (!obj)
- return AE_ERROR;
- else if (obj->type != ACPI_TYPE_BUFFER) {
- kfree(obj);
- return AE_ERROR;
- }
- if (obj->buffer.length != 8) {
- pr_warning("Unknown buffer length %d\n", obj->buffer.length);
- kfree(obj);
- return AE_ERROR;
- }
-
- return_value = *((struct wmid3_gds_return_value *)obj->buffer.pointer);
- kfree(obj);
-
- if (return_value.error_code || return_value.ec_return_value) {
- pr_warning("Get Current Device Status failed: "
- "0x%x - 0x%x\n", return_value.error_code,
- return_value.ec_return_value);
- return status;
- }
-
- devices = return_value.devices;
- params.function_num = 0x2;
- params.hotkey_number = 0x01;
- params.devices = (value) ? (devices | device) : (devices & ~device);
-
- status = wmi_evaluate_method(WMID_GUID3, 0, 0x1, &input, &output2);
- if (ACPI_FAILURE(status))
- return status;
-
- obj = output2.pointer;
-
- if (!obj)
- return AE_ERROR;
- else if (obj->type != ACPI_TYPE_BUFFER) {
- kfree(obj);
- return AE_ERROR;
- }
- if (obj->buffer.length != 4) {
- pr_warning("Unknown buffer length %d\n", obj->buffer.length);
- kfree(obj);
- return AE_ERROR;
- }
-
- return_value = *((struct wmid3_gds_return_value *)obj->buffer.pointer);
- kfree(obj);
-
- if (return_value.error_code || return_value.ec_return_value)
- pr_warning("Set Device Status failed: "
- "0x%x - 0x%x\n", return_value.error_code,
- return_value.ec_return_value);
-
- return status;
-}
-
-static acpi_status set_device_status(u32 value, u32 cap)
-{
- if (wmi_has_guid(WMID_GUID3)) {
- u16 device;
-
- switch (cap) {
- case ACER_CAP_WIRELESS:
- device = ACER_WMID3_GDS_WIRELESS;
- break;
- case ACER_CAP_BLUETOOTH:
- device = ACER_WMID3_GDS_BLUETOOTH;
- break;
- case ACER_CAP_THREEG:
- device = ACER_WMID3_GDS_THREEG;
- break;
- default:
- return AE_ERROR;
- }
- return wmid3_set_device_status(value, device);
-
- } else {
- return set_u32(value, cap);
- }
-}
-
/*
* Rfkill devices
*/
@@ -1285,12 +1300,13 @@ static void acer_rfkill_update(struct work_struct *ignored)
u32 state;
acpi_status status;
- status = get_u32(&state, ACER_CAP_WIRELESS);
- if (ACPI_SUCCESS(status)) {
- if (quirks->wireless == 3) {
- rfkill_set_hw_state(wireless_rfkill, !state);
- } else {
- rfkill_set_sw_state(wireless_rfkill, !state);
+ if (has_cap(ACER_CAP_WIRELESS)) {
+ status = get_u32(&state, ACER_CAP_WIRELESS);
+ if (ACPI_SUCCESS(status)) {
+ if (quirks->wireless == 3)
+ rfkill_set_hw_state(wireless_rfkill, !state);
+ else
+ rfkill_set_sw_state(wireless_rfkill, !state);
}
}
@@ -1301,8 +1317,7 @@ static void acer_rfkill_update(struct work_struct *ignored)
}
if (has_cap(ACER_CAP_THREEG) && wmi_has_guid(WMID_GUID3)) {
- status = wmid3_get_device_status(&state,
- ACER_WMID3_GDS_THREEG);
+ status = get_u32(&state, ACER_WMID3_GDS_THREEG);
if (ACPI_SUCCESS(status))
rfkill_set_sw_state(threeg_rfkill, !state);
}
@@ -1316,7 +1331,7 @@ static int acer_rfkill_set(void *data, bool blocked)
u32 cap = (unsigned long)data;
if (rfkill_inited) {
- status = set_device_status(!blocked, cap);
+ status = set_u32(!blocked, cap);
if (ACPI_FAILURE(status))
return -ENODEV;
}
@@ -1343,7 +1358,7 @@ static struct rfkill *acer_rfkill_register(struct device *dev,
if (!rfkill_dev)
return ERR_PTR(-ENOMEM);
- status = get_device_status(&state, cap);
+ status = get_u32(&state, cap);
err = rfkill_register(rfkill_dev);
if (err) {
@@ -1359,19 +1374,24 @@ static struct rfkill *acer_rfkill_register(struct device *dev,
static int acer_rfkill_init(struct device *dev)
{
- wireless_rfkill = acer_rfkill_register(dev, RFKILL_TYPE_WLAN,
- "acer-wireless", ACER_CAP_WIRELESS);
- if (IS_ERR(wireless_rfkill))
- return PTR_ERR(wireless_rfkill);
+ int err;
+
+ if (has_cap(ACER_CAP_WIRELESS)) {
+ wireless_rfkill = acer_rfkill_register(dev, RFKILL_TYPE_WLAN,
+ "acer-wireless", ACER_CAP_WIRELESS);
+ if (IS_ERR(wireless_rfkill)) {
+ err = PTR_ERR(wireless_rfkill);
+ goto error_wireless;
+ }
+ }
if (has_cap(ACER_CAP_BLUETOOTH)) {
bluetooth_rfkill = acer_rfkill_register(dev,
RFKILL_TYPE_BLUETOOTH, "acer-bluetooth",
ACER_CAP_BLUETOOTH);
if (IS_ERR(bluetooth_rfkill)) {
- rfkill_unregister(wireless_rfkill);
- rfkill_destroy(wireless_rfkill);
- return PTR_ERR(bluetooth_rfkill);
+ err = PTR_ERR(bluetooth_rfkill);
+ goto error_bluetooth;
}
}
@@ -1380,30 +1400,44 @@ static int acer_rfkill_init(struct device *dev)
RFKILL_TYPE_WWAN, "acer-threeg",
ACER_CAP_THREEG);
if (IS_ERR(threeg_rfkill)) {
- rfkill_unregister(wireless_rfkill);
- rfkill_destroy(wireless_rfkill);
- rfkill_unregister(bluetooth_rfkill);
- rfkill_destroy(bluetooth_rfkill);
- return PTR_ERR(threeg_rfkill);
+ err = PTR_ERR(threeg_rfkill);
+ goto error_threeg;
}
}
rfkill_inited = true;
- if (ec_raw_mode || !wmi_has_guid(ACERWMID_EVENT_GUID))
+ if ((ec_raw_mode || !wmi_has_guid(ACERWMID_EVENT_GUID)) &&
+ has_cap(ACER_CAP_WIRELESS | ACER_CAP_BLUETOOTH | ACER_CAP_THREEG))
schedule_delayed_work(&acer_rfkill_work,
round_jiffies_relative(HZ));
return 0;
+
+error_threeg:
+ if (has_cap(ACER_CAP_BLUETOOTH)) {
+ rfkill_unregister(bluetooth_rfkill);
+ rfkill_destroy(bluetooth_rfkill);
+ }
+error_bluetooth:
+ if (has_cap(ACER_CAP_WIRELESS)) {
+ rfkill_unregister(wireless_rfkill);
+ rfkill_destroy(wireless_rfkill);
+ }
+error_wireless:
+ return err;
}
static void acer_rfkill_exit(void)
{
- if (ec_raw_mode || !wmi_has_guid(ACERWMID_EVENT_GUID))
+ if ((ec_raw_mode || !wmi_has_guid(ACERWMID_EVENT_GUID)) &&
+ has_cap(ACER_CAP_WIRELESS | ACER_CAP_BLUETOOTH | ACER_CAP_THREEG))
cancel_delayed_work_sync(&acer_rfkill_work);
- rfkill_unregister(wireless_rfkill);
- rfkill_destroy(wireless_rfkill);
+ if (has_cap(ACER_CAP_WIRELESS)) {
+ rfkill_unregister(wireless_rfkill);
+ rfkill_destroy(wireless_rfkill);
+ }
if (has_cap(ACER_CAP_BLUETOOTH)) {
rfkill_unregister(bluetooth_rfkill);
@@ -1428,11 +1462,7 @@ static ssize_t show_bool_threeg(struct device *dev,
pr_info("This threeg sysfs will be removed in 2012"
" - used by: %s\n", current->comm);
- if (wmi_has_guid(WMID_GUID3))
- status = wmid3_get_device_status(&result,
- ACER_WMID3_GDS_THREEG);
- else
- status = get_u32(&result, ACER_CAP_THREEG);
+ status = get_u32(&result, ACER_CAP_THREEG);
if (ACPI_SUCCESS(status))
return sprintf(buf, "%u\n", result);
return sprintf(buf, "Read error\n");
@@ -1464,6 +1494,8 @@ static ssize_t show_interface(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "AMW0 v2\n");
case ACER_WMID:
return sprintf(buf, "WMID\n");
+ case ACER_WMID_v2:
+ return sprintf(buf, "WMID v2\n");
default:
return sprintf(buf, "Error!\n");
}
@@ -1883,12 +1915,20 @@ static int __init acer_wmi_init(void)
if (!wmi_has_guid(AMW0_GUID1) && wmi_has_guid(WMID_GUID1))
interface = &wmid_interface;
+ if (wmi_has_guid(WMID_GUID3))
+ interface = &wmid_v2_interface;
+
+ if (interface)
+ dmi_walk(type_aa_dmi_decode, NULL);
+
if (wmi_has_guid(WMID_GUID2) && interface) {
- if (ACPI_FAILURE(WMID_set_capabilities())) {
+ if (!has_type_aa && ACPI_FAILURE(WMID_set_capabilities())) {
pr_err("Unable to detect available WMID devices\n");
return -ENODEV;
}
- } else if (!wmi_has_guid(WMID_GUID2) && interface) {
+ /* WMID always provides brightness methods */
+ interface->capability |= ACER_CAP_BRIGHTNESS;
+ } else if (!wmi_has_guid(WMID_GUID2) && interface && !has_type_aa) {
pr_err("No WMID device detection method found\n");
return -ENODEV;
}
@@ -1912,7 +1952,7 @@ static int __init acer_wmi_init(void)
set_quirks();
- if (acpi_video_backlight_support() && has_cap(ACER_CAP_BRIGHTNESS)) {
+ if (acpi_video_backlight_support()) {
interface->capability &= ~ACER_CAP_BRIGHTNESS;
pr_info("Brightness must be controlled by "
"generic video driver\n");
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
index fa6d7ec68b2..edaccad9b5b 100644
--- a/drivers/platform/x86/asus-laptop.c
+++ b/drivers/platform/x86/asus-laptop.c
@@ -4,6 +4,7 @@
*
* Copyright (C) 2002-2005 Julien Lerouge, 2003-2006 Karol Kozimor
* Copyright (C) 2006-2007 Corentin Chary
+ * Copyright (C) 2011 Wind River Systems
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -48,6 +49,7 @@
#include <linux/uaccess.h>
#include <linux/input.h>
#include <linux/input/sparse-keymap.h>
+#include <linux/input-polldev.h>
#include <linux/rfkill.h>
#include <linux/slab.h>
#include <linux/dmi.h>
@@ -83,26 +85,32 @@ static int wlan_status = 1;
static int bluetooth_status = 1;
static int wimax_status = -1;
static int wwan_status = -1;
+static int als_status;
module_param(wlan_status, int, 0444);
MODULE_PARM_DESC(wlan_status, "Set the wireless status on boot "
"(0 = disabled, 1 = enabled, -1 = don't do anything). "
- "default is 1");
+ "default is -1");
module_param(bluetooth_status, int, 0444);
MODULE_PARM_DESC(bluetooth_status, "Set the wireless status on boot "
"(0 = disabled, 1 = enabled, -1 = don't do anything). "
- "default is 1");
+ "default is -1");
module_param(wimax_status, int, 0444);
MODULE_PARM_DESC(wimax_status, "Set the wireless status on boot "
"(0 = disabled, 1 = enabled, -1 = don't do anything). "
- "default is 1");
+ "default is -1");
module_param(wwan_status, int, 0444);
MODULE_PARM_DESC(wwan_status, "Set the wireless status on boot "
"(0 = disabled, 1 = enabled, -1 = don't do anything). "
- "default is 1");
+ "default is -1");
+
+module_param(als_status, int, 0444);
+MODULE_PARM_DESC(als_status, "Set the ALS status on boot "
+ "(0 = disabled, 1 = enabled). "
+ "default is 0");
/*
* Some events we use, same for all Asus
@@ -173,6 +181,29 @@ MODULE_PARM_DESC(wwan_status, "Set the wireless status on boot "
#define METHOD_KBD_LIGHT_SET "SLKB"
#define METHOD_KBD_LIGHT_GET "GLKB"
+/* For Pegatron Lucid tablet */
+#define DEVICE_NAME_PEGA "Lucid"
+
+#define METHOD_PEGA_ENABLE "ENPR"
+#define METHOD_PEGA_DISABLE "DAPR"
+#define PEGA_WLAN 0x00
+#define PEGA_BLUETOOTH 0x01
+#define PEGA_WWAN 0x02
+#define PEGA_ALS 0x04
+#define PEGA_ALS_POWER 0x05
+
+#define METHOD_PEGA_READ "RDLN"
+#define PEGA_READ_ALS_H 0x02
+#define PEGA_READ_ALS_L 0x03
+
+#define PEGA_ACCEL_NAME "pega_accel"
+#define PEGA_ACCEL_DESC "Pegatron Lucid Tablet Accelerometer"
+#define METHOD_XLRX "XLRX"
+#define METHOD_XLRY "XLRY"
+#define METHOD_XLRZ "XLRZ"
+#define PEGA_ACC_CLAMP 512 /* 1G accel is reported as ~256, so clamp to 2G */
+#define PEGA_ACC_RETRIES 3
+
/*
* Define a specific led structure to keep the main structure clean
*/
@@ -185,6 +216,15 @@ struct asus_led {
};
/*
+ * Same thing for rfkill
+ */
+struct asus_pega_rfkill {
+ int control_id; /* type of control. Maps to PEGA_* values */
+ struct rfkill *rfkill;
+ struct asus_laptop *asus;
+};
+
+/*
* This is the main structure, we can use it to store anything interesting
* about the hotk device
*/
@@ -198,6 +238,7 @@ struct asus_laptop {
struct input_dev *inputdev;
struct key_entry *keymap;
+ struct input_polled_dev *pega_accel_poll;
struct asus_led mled;
struct asus_led tled;
@@ -209,9 +250,18 @@ struct asus_laptop {
int wireless_status;
bool have_rsts;
+ bool is_pega_lucid;
+ bool pega_acc_live;
+ int pega_acc_x;
+ int pega_acc_y;
+ int pega_acc_z;
struct rfkill *gps_rfkill;
+ struct asus_pega_rfkill wlanrfk;
+ struct asus_pega_rfkill btrfk;
+ struct asus_pega_rfkill wwanrfk;
+
acpi_handle handle; /* the handle of the hotk device */
u32 ledd_status; /* status of the LED display */
u8 light_level; /* light sensor level */
@@ -323,6 +373,127 @@ static int acpi_check_handle(acpi_handle handle, const char *method,
return 0;
}
+static bool asus_check_pega_lucid(struct asus_laptop *asus)
+{
+ return !strcmp(asus->name, DEVICE_NAME_PEGA) &&
+ !acpi_check_handle(asus->handle, METHOD_PEGA_ENABLE, NULL) &&
+ !acpi_check_handle(asus->handle, METHOD_PEGA_DISABLE, NULL) &&
+ !acpi_check_handle(asus->handle, METHOD_PEGA_READ, NULL);
+}
+
+static int asus_pega_lucid_set(struct asus_laptop *asus, int unit, bool enable)
+{
+ char *method = enable ? METHOD_PEGA_ENABLE : METHOD_PEGA_DISABLE;
+ return write_acpi_int(asus->handle, method, unit);
+}
+
+static int pega_acc_axis(struct asus_laptop *asus, int curr, char *method)
+{
+ int i, delta;
+ unsigned long long val;
+ for (i = 0; i < PEGA_ACC_RETRIES; i++) {
+ acpi_evaluate_integer(asus->handle, method, NULL, &val);
+
+ /* The output is noisy. From reading the ASL
+ * dissassembly, timeout errors are returned with 1's
+ * in the high word, and the lack of locking around
+ * thei hi/lo byte reads means that a transition
+ * between (for example) -1 and 0 could be read as
+ * 0xff00 or 0x00ff. */
+ delta = abs(curr - (short)val);
+ if (delta < 128 && !(val & ~0xffff))
+ break;
+ }
+ return clamp_val((short)val, -PEGA_ACC_CLAMP, PEGA_ACC_CLAMP);
+}
+
+static void pega_accel_poll(struct input_polled_dev *ipd)
+{
+ struct device *parent = ipd->input->dev.parent;
+ struct asus_laptop *asus = dev_get_drvdata(parent);
+
+ /* In some cases, the very first call to poll causes a
+ * recursive fault under the polldev worker. This is
+ * apparently related to very early userspace access to the
+ * device, and perhaps a firmware bug. Fake the first report. */
+ if (!asus->pega_acc_live) {
+ asus->pega_acc_live = true;
+ input_report_abs(ipd->input, ABS_X, 0);
+ input_report_abs(ipd->input, ABS_Y, 0);
+ input_report_abs(ipd->input, ABS_Z, 0);
+ input_sync(ipd->input);
+ return;
+ }
+
+ asus->pega_acc_x = pega_acc_axis(asus, asus->pega_acc_x, METHOD_XLRX);
+ asus->pega_acc_y = pega_acc_axis(asus, asus->pega_acc_y, METHOD_XLRY);
+ asus->pega_acc_z = pega_acc_axis(asus, asus->pega_acc_z, METHOD_XLRZ);
+
+ /* Note transform, convert to "right/up/out" in the native
+ * landscape orientation (i.e. the vector is the direction of
+ * "real up" in the device's cartiesian coordinates). */
+ input_report_abs(ipd->input, ABS_X, -asus->pega_acc_x);
+ input_report_abs(ipd->input, ABS_Y, -asus->pega_acc_y);
+ input_report_abs(ipd->input, ABS_Z, asus->pega_acc_z);
+ input_sync(ipd->input);
+}
+
+static void pega_accel_exit(struct asus_laptop *asus)
+{
+ if (asus->pega_accel_poll) {
+ input_unregister_polled_device(asus->pega_accel_poll);
+ input_free_polled_device(asus->pega_accel_poll);
+ }
+ asus->pega_accel_poll = NULL;
+}
+
+static int pega_accel_init(struct asus_laptop *asus)
+{
+ int err;
+ struct input_polled_dev *ipd;
+
+ if (!asus->is_pega_lucid)
+ return -ENODEV;
+
+ if (acpi_check_handle(asus->handle, METHOD_XLRX, NULL) ||
+ acpi_check_handle(asus->handle, METHOD_XLRY, NULL) ||
+ acpi_check_handle(asus->handle, METHOD_XLRZ, NULL))
+ return -ENODEV;
+
+ ipd = input_allocate_polled_device();
+ if (!ipd)
+ return -ENOMEM;
+
+ ipd->poll = pega_accel_poll;
+ ipd->poll_interval = 125;
+ ipd->poll_interval_min = 50;
+ ipd->poll_interval_max = 2000;
+
+ ipd->input->name = PEGA_ACCEL_DESC;
+ ipd->input->phys = PEGA_ACCEL_NAME "/input0";
+ ipd->input->dev.parent = &asus->platform_device->dev;
+ ipd->input->id.bustype = BUS_HOST;
+
+ set_bit(EV_ABS, ipd->input->evbit);
+ input_set_abs_params(ipd->input, ABS_X,
+ -PEGA_ACC_CLAMP, PEGA_ACC_CLAMP, 0, 0);
+ input_set_abs_params(ipd->input, ABS_Y,
+ -PEGA_ACC_CLAMP, PEGA_ACC_CLAMP, 0, 0);
+ input_set_abs_params(ipd->input, ABS_Z,
+ -PEGA_ACC_CLAMP, PEGA_ACC_CLAMP, 0, 0);
+
+ err = input_register_polled_device(ipd);
+ if (err)
+ goto exit;
+
+ asus->pega_accel_poll = ipd;
+ return 0;
+
+exit:
+ input_free_polled_device(ipd);
+ return err;
+}
+
/* Generic LED function */
static int asus_led_set(struct asus_laptop *asus, const char *method,
int value)
@@ -430,17 +601,17 @@ static enum led_brightness asus_kled_cdev_get(struct led_classdev *led_cdev)
static void asus_led_exit(struct asus_laptop *asus)
{
- if (asus->mled.led.dev)
+ if (!IS_ERR_OR_NULL(asus->mled.led.dev))
led_classdev_unregister(&asus->mled.led);
- if (asus->tled.led.dev)
+ if (!IS_ERR_OR_NULL(asus->tled.led.dev))
led_classdev_unregister(&asus->tled.led);
- if (asus->pled.led.dev)
+ if (!IS_ERR_OR_NULL(asus->pled.led.dev))
led_classdev_unregister(&asus->pled.led);
- if (asus->rled.led.dev)
+ if (!IS_ERR_OR_NULL(asus->rled.led.dev))
led_classdev_unregister(&asus->rled.led);
- if (asus->gled.led.dev)
+ if (!IS_ERR_OR_NULL(asus->gled.led.dev))
led_classdev_unregister(&asus->gled.led);
- if (asus->kled.led.dev)
+ if (!IS_ERR_OR_NULL(asus->kled.led.dev))
led_classdev_unregister(&asus->kled.led);
if (asus->led_workqueue) {
destroy_workqueue(asus->led_workqueue);
@@ -474,6 +645,13 @@ static int asus_led_init(struct asus_laptop *asus)
int r;
/*
+ * The Pegatron Lucid has no physical leds, but all methods are
+ * available in the DSDT...
+ */
+ if (asus->is_pega_lucid)
+ return 0;
+
+ /*
* Functions that actually update the LED's are called from a
* workqueue. By doing this as separate work rather than when the LED
* subsystem asks, we avoid messing with the Asus ACPI stuff during a
@@ -907,8 +1085,18 @@ static ssize_t store_disp(struct device *dev, struct device_attribute *attr,
*/
static void asus_als_switch(struct asus_laptop *asus, int value)
{
- if (write_acpi_int(asus->handle, METHOD_ALS_CONTROL, value))
- pr_warn("Error setting light sensor switch\n");
+ int ret;
+
+ if (asus->is_pega_lucid) {
+ ret = asus_pega_lucid_set(asus, PEGA_ALS, value);
+ if (!ret)
+ ret = asus_pega_lucid_set(asus, PEGA_ALS_POWER, value);
+ } else {
+ ret = write_acpi_int(asus->handle, METHOD_ALS_CONTROL, value);
+ }
+ if (ret)
+ pr_warning("Error setting light sensor switch\n");
+
asus->light_switch = value;
}
@@ -964,6 +1152,35 @@ static ssize_t store_lslvl(struct device *dev, struct device_attribute *attr,
return rv;
}
+static int pega_int_read(struct asus_laptop *asus, int arg, int *result)
+{
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ int err = write_acpi_int_ret(asus->handle, METHOD_PEGA_READ, arg,
+ &buffer);
+ if (!err) {
+ union acpi_object *obj = buffer.pointer;
+ if (obj && obj->type == ACPI_TYPE_INTEGER)
+ *result = obj->integer.value;
+ else
+ err = -EIO;
+ }
+ return err;
+}
+
+static ssize_t show_lsvalue(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct asus_laptop *asus = dev_get_drvdata(dev);
+ int err, hi, lo;
+
+ err = pega_int_read(asus, PEGA_READ_ALS_H, &hi);
+ if (!err)
+ err = pega_int_read(asus, PEGA_READ_ALS_L, &lo);
+ if (!err)
+ return sprintf(buf, "%d\n", 10 * hi + lo);
+ return err;
+}
+
/*
* GPS
*/
@@ -1062,6 +1279,86 @@ static int asus_rfkill_init(struct asus_laptop *asus)
return result;
}
+static int pega_rfkill_set(void *data, bool blocked)
+{
+ struct asus_pega_rfkill *pega_rfk = data;
+
+ int ret = asus_pega_lucid_set(pega_rfk->asus, pega_rfk->control_id, !blocked);
+ pr_warn("Setting rfkill %d, to %d; returned %d\n", pega_rfk->control_id, !blocked, ret);
+
+ return ret;
+}
+
+static const struct rfkill_ops pega_rfkill_ops = {
+ .set_block = pega_rfkill_set,
+};
+
+static void pega_rfkill_terminate(struct asus_pega_rfkill *pega_rfk)
+{
+ pr_warn("Terminating %d\n", pega_rfk->control_id);
+ if (pega_rfk->rfkill) {
+ rfkill_unregister(pega_rfk->rfkill);
+ rfkill_destroy(pega_rfk->rfkill);
+ pega_rfk->rfkill = NULL;
+ }
+}
+
+static void pega_rfkill_exit(struct asus_laptop *asus)
+{
+ pega_rfkill_terminate(&asus->wwanrfk);
+ pega_rfkill_terminate(&asus->btrfk);
+ pega_rfkill_terminate(&asus->wlanrfk);
+}
+
+static int pega_rfkill_setup(struct asus_laptop *asus, struct asus_pega_rfkill *pega_rfk,
+ const char *name, int controlid, int rfkill_type)
+{
+ int result;
+
+ pr_warn("Setting up rfk %s, control %d, type %d\n", name, controlid, rfkill_type);
+ pega_rfk->control_id = controlid;
+ pega_rfk->asus = asus;
+ pega_rfk->rfkill = rfkill_alloc(name, &asus->platform_device->dev,
+ rfkill_type, &pega_rfkill_ops, pega_rfk);
+ if (!pega_rfk->rfkill)
+ return -EINVAL;
+
+ result = rfkill_register(pega_rfk->rfkill);
+ if (result) {
+ rfkill_destroy(pega_rfk->rfkill);
+ pega_rfk->rfkill = NULL;
+ }
+
+ return result;
+}
+
+static int pega_rfkill_init(struct asus_laptop *asus)
+{
+ int ret = 0;
+
+ if(!asus->is_pega_lucid)
+ return -ENODEV;
+
+ ret = pega_rfkill_setup(asus, &asus->wlanrfk, "pega-wlan", PEGA_WLAN, RFKILL_TYPE_WLAN);
+ if(ret)
+ return ret;
+ ret = pega_rfkill_setup(asus, &asus->btrfk, "pega-bt", PEGA_BLUETOOTH, RFKILL_TYPE_BLUETOOTH);
+ if(ret)
+ goto err_btrfk;
+ ret = pega_rfkill_setup(asus, &asus->wwanrfk, "pega-wwan", PEGA_WWAN, RFKILL_TYPE_WWAN);
+ if(ret)
+ goto err_wwanrfk;
+
+ pr_warn("Pega rfkill init succeeded\n");
+ return 0;
+err_wwanrfk:
+ pega_rfkill_terminate(&asus->btrfk);
+err_btrfk:
+ pega_rfkill_terminate(&asus->wlanrfk);
+
+ return ret;
+}
+
/*
* Input device (i.e. hotkeys)
*/
@@ -1141,6 +1438,14 @@ static void asus_acpi_notify(struct acpi_device *device, u32 event)
}
return ;
}
+
+ /* Accelerometer "coarse orientation change" event */
+ if (asus->pega_accel_poll && event == 0xEA) {
+ kobject_uevent(&asus->pega_accel_poll->input->dev.kobj,
+ KOBJ_CHANGE);
+ return ;
+ }
+
asus_input_notify(asus, event);
}
@@ -1152,6 +1457,7 @@ static DEVICE_ATTR(wimax, S_IRUGO | S_IWUSR, show_wimax, store_wimax);
static DEVICE_ATTR(wwan, S_IRUGO | S_IWUSR, show_wwan, store_wwan);
static DEVICE_ATTR(display, S_IWUSR, NULL, store_disp);
static DEVICE_ATTR(ledd, S_IRUGO | S_IWUSR, show_ledd, store_ledd);
+static DEVICE_ATTR(ls_value, S_IRUGO, show_lsvalue, NULL);
static DEVICE_ATTR(ls_level, S_IRUGO | S_IWUSR, show_lslvl, store_lslvl);
static DEVICE_ATTR(ls_switch, S_IRUGO | S_IWUSR, show_lssw, store_lssw);
static DEVICE_ATTR(gps, S_IRUGO | S_IWUSR, show_gps, store_gps);
@@ -1164,6 +1470,7 @@ static struct attribute *asus_attributes[] = {
&dev_attr_wwan.attr,
&dev_attr_display.attr,
&dev_attr_ledd.attr,
+ &dev_attr_ls_value.attr,
&dev_attr_ls_level.attr,
&dev_attr_ls_switch.attr,
&dev_attr_gps.attr,
@@ -1180,6 +1487,19 @@ static mode_t asus_sysfs_is_visible(struct kobject *kobj,
acpi_handle handle = asus->handle;
bool supported;
+ if (asus->is_pega_lucid) {
+ /* no ls_level interface on the Lucid */
+ if (attr == &dev_attr_ls_switch.attr)
+ supported = true;
+ else if (attr == &dev_attr_ls_level.attr)
+ supported = false;
+ else
+ goto normal;
+
+ return supported;
+ }
+
+normal:
if (attr == &dev_attr_wlan.attr) {
supported = !acpi_check_handle(handle, METHOD_WLAN, NULL);
@@ -1202,8 +1522,9 @@ static mode_t asus_sysfs_is_visible(struct kobject *kobj,
} else if (attr == &dev_attr_ls_switch.attr ||
attr == &dev_attr_ls_level.attr) {
supported = !acpi_check_handle(handle, METHOD_ALS_CONTROL, NULL) &&
- !acpi_check_handle(handle, METHOD_ALS_LEVEL, NULL);
-
+ !acpi_check_handle(handle, METHOD_ALS_LEVEL, NULL);
+ } else if (attr == &dev_attr_ls_value.attr) {
+ supported = asus->is_pega_lucid;
} else if (attr == &dev_attr_gps.attr) {
supported = !acpi_check_handle(handle, METHOD_GPS_ON, NULL) &&
!acpi_check_handle(handle, METHOD_GPS_OFF, NULL) &&
@@ -1258,7 +1579,7 @@ static struct platform_driver platform_driver = {
.driver = {
.name = ASUS_LAPTOP_FILE,
.owner = THIS_MODULE,
- }
+ },
};
/*
@@ -1388,11 +1709,13 @@ static int __devinit asus_acpi_init(struct asus_laptop *asus)
asus->ledd_status = 0xFFF;
/* Set initial values of light sensor and level */
- asus->light_switch = 0; /* Default to light sensor disabled */
+ asus->light_switch = !!als_status;
asus->light_level = 5; /* level 5 for sensor sensitivity */
- if (!acpi_check_handle(asus->handle, METHOD_ALS_CONTROL, NULL) &&
- !acpi_check_handle(asus->handle, METHOD_ALS_LEVEL, NULL)) {
+ if (asus->is_pega_lucid) {
+ asus_als_switch(asus, asus->light_switch);
+ } else if (!acpi_check_handle(asus->handle, METHOD_ALS_CONTROL, NULL) &&
+ !acpi_check_handle(asus->handle, METHOD_ALS_LEVEL, NULL)) {
asus_als_switch(asus, asus->light_switch);
asus_als_level(asus, asus->light_level);
}
@@ -1439,9 +1762,10 @@ static int __devinit asus_acpi_add(struct acpi_device *device)
goto fail_platform;
/*
- * Register the platform device first. It is used as a parent for the
- * sub-devices below.
+ * Need platform type detection first, then the platform
+ * device. It is used as a parent for the sub-devices below.
*/
+ asus->is_pega_lucid = asus_check_pega_lucid(asus);
result = asus_platform_init(asus);
if (result)
goto fail_platform;
@@ -1465,9 +1789,21 @@ static int __devinit asus_acpi_add(struct acpi_device *device)
if (result)
goto fail_rfkill;
+ result = pega_accel_init(asus);
+ if (result && result != -ENODEV)
+ goto fail_pega_accel;
+
+ result = pega_rfkill_init(asus);
+ if (result && result != -ENODEV)
+ goto fail_pega_rfkill;
+
asus_device_present = true;
return 0;
+fail_pega_rfkill:
+ pega_accel_exit(asus);
+fail_pega_accel:
+ asus_rfkill_exit(asus);
fail_rfkill:
asus_led_exit(asus);
fail_led:
@@ -1491,6 +1827,8 @@ static int asus_acpi_remove(struct acpi_device *device, int type)
asus_rfkill_exit(asus);
asus_led_exit(asus);
asus_input_exit(asus);
+ pega_accel_exit(asus);
+ pega_rfkill_exit(asus);
asus_platform_exit(asus);
kfree(asus->name);
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index 95cba9ebf6c..d1049ee3c9e 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -453,7 +453,9 @@ static enum led_brightness kbd_led_get(struct led_classdev *led_cdev)
static void asus_wmi_led_exit(struct asus_wmi *asus)
{
- if (asus->tpd_led.dev)
+ if (!IS_ERR_OR_NULL(asus->kbd_led.dev))
+ led_classdev_unregister(&asus->kbd_led);
+ if (!IS_ERR_OR_NULL(asus->tpd_led.dev))
led_classdev_unregister(&asus->tpd_led);
if (asus->led_workqueue)
destroy_workqueue(asus->led_workqueue);
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index f31fa4efa72..a43cfd906c6 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -60,6 +60,22 @@ struct calling_interface_structure {
struct calling_interface_token tokens[];
} __packed;
+struct quirk_entry {
+ u8 touchpad_led;
+};
+
+static struct quirk_entry *quirks;
+
+static struct quirk_entry quirk_dell_vostro_v130 = {
+ .touchpad_led = 1,
+};
+
+static int dmi_matched(const struct dmi_system_id *dmi)
+{
+ quirks = dmi->driver_data;
+ return 1;
+}
+
static int da_command_address;
static int da_command_code;
static int da_num_tokens;
@@ -149,6 +165,27 @@ static struct dmi_system_id __devinitdata dell_blacklist[] = {
{}
};
+static struct dmi_system_id __devinitdata dell_quirks[] = {
+ {
+ .callback = dmi_matched,
+ .ident = "Dell Vostro V130",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V130"),
+ },
+ .driver_data = &quirk_dell_vostro_v130,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Dell Vostro V131",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V131"),
+ },
+ .driver_data = &quirk_dell_vostro_v130,
+ },
+};
+
static struct calling_interface_buffer *buffer;
static struct page *bufferpage;
static DEFINE_MUTEX(buffer_mutex);
@@ -552,6 +589,44 @@ static const struct backlight_ops dell_ops = {
.update_status = dell_send_intensity,
};
+static void touchpad_led_on()
+{
+ int command = 0x97;
+ char data = 1;
+ i8042_command(&data, command | 1 << 12);
+}
+
+static void touchpad_led_off()
+{
+ int command = 0x97;
+ char data = 2;
+ i8042_command(&data, command | 1 << 12);
+}
+
+static void touchpad_led_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ if (value > 0)
+ touchpad_led_on();
+ else
+ touchpad_led_off();
+}
+
+static struct led_classdev touchpad_led = {
+ .name = "dell-laptop::touchpad",
+ .brightness_set = touchpad_led_set,
+};
+
+static int __devinit touchpad_led_init(struct device *dev)
+{
+ return led_classdev_register(dev, &touchpad_led);
+}
+
+static void touchpad_led_exit(void)
+{
+ led_classdev_unregister(&touchpad_led);
+}
+
static bool dell_laptop_i8042_filter(unsigned char data, unsigned char str,
struct serio *port)
{
@@ -584,6 +659,10 @@ static int __init dell_init(void)
if (!dmi_check_system(dell_device_table))
return -ENODEV;
+ quirks = NULL;
+ /* find if this machine support other functions */
+ dmi_check_system(dell_quirks);
+
dmi_walk(find_tokens, NULL);
if (!da_tokens) {
@@ -626,6 +705,9 @@ static int __init dell_init(void)
goto fail_filter;
}
+ if (quirks && quirks->touchpad_led)
+ touchpad_led_init(&platform_device->dev);
+
dell_laptop_dir = debugfs_create_dir("dell_laptop", NULL);
if (dell_laptop_dir != NULL)
debugfs_create_file("rfkill", 0444, dell_laptop_dir, NULL,
@@ -692,6 +774,8 @@ fail_platform_driver:
static void __exit dell_exit(void)
{
debugfs_remove_recursive(dell_laptop_dir);
+ if (quirks && quirks->touchpad_led)
+ touchpad_led_exit();
i8042_remove_filter(dell_laptop_i8042_filter);
cancel_delayed_work_sync(&dell_rfkill_work);
backlight_device_unregister(dell_backlight_device);
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
index 1c45d92e216..ea44abd8df4 100644
--- a/drivers/platform/x86/eeepc-laptop.c
+++ b/drivers/platform/x86/eeepc-laptop.c
@@ -568,7 +568,7 @@ static int eeepc_led_init(struct eeepc_laptop *eeepc)
static void eeepc_led_exit(struct eeepc_laptop *eeepc)
{
- if (eeepc->tpd_led.dev)
+ if (!IS_ERR_OR_NULL(eeepc->tpd_led.dev))
led_classdev_unregister(&eeepc->tpd_led);
if (eeepc->led_workqueue)
destroy_workqueue(eeepc->led_workqueue);
diff --git a/drivers/platform/x86/hp_accel.c b/drivers/platform/x86/hp_accel.c
index 1b52d00e2f9..22b2dfa7314 100644
--- a/drivers/platform/x86/hp_accel.c
+++ b/drivers/platform/x86/hp_accel.c
@@ -76,6 +76,7 @@ static inline void delayed_sysfs_set(struct led_classdev *led_cdev,
/* For automatic insertion of the module */
static struct acpi_device_id lis3lv02d_device_ids[] = {
{"HPQ0004", 0}, /* HP Mobile Data Protection System PNP */
+ {"HPQ6000", 0}, /* HP Mobile Data Protection System PNP */
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, lis3lv02d_device_ids);
@@ -209,6 +210,8 @@ static struct dmi_system_id lis3lv02d_dmi_ids[] = {
AXIS_DMI_MATCH("NC6715x", "HP Compaq 6715", y_inverted),
AXIS_DMI_MATCH("NC693xx", "HP EliteBook 693", xy_rotated_right),
AXIS_DMI_MATCH("NC693xx", "HP EliteBook 853", xy_swap),
+ AXIS_DMI_MATCH("NC854xx", "HP EliteBook 854", y_inverted),
+ AXIS_DMI_MATCH("NC273xx", "HP EliteBook 273", y_inverted),
/* Intel-based HP Pavilion dv5 */
AXIS_DMI_MATCH2("HPDV5_I",
PRODUCT_NAME, "HP Pavilion dv5",
@@ -227,7 +230,12 @@ static struct dmi_system_id lis3lv02d_dmi_ids[] = {
AXIS_DMI_MATCH("HPB452x", "HP ProBook 452", y_inverted),
AXIS_DMI_MATCH("HPB522x", "HP ProBook 522", xy_swap),
AXIS_DMI_MATCH("HPB532x", "HP ProBook 532", y_inverted),
+ AXIS_DMI_MATCH("HPB655x", "HP ProBook 655", xy_swap_inverted),
AXIS_DMI_MATCH("Mini510x", "HP Mini 510", xy_rotated_left_usd),
+ AXIS_DMI_MATCH("HPB63xx", "HP ProBook 63", xy_swap),
+ AXIS_DMI_MATCH("HPB64xx", "HP ProBook 64", xy_swap),
+ AXIS_DMI_MATCH("HPB64xx", "HP EliteBook 84", xy_swap),
+ AXIS_DMI_MATCH("HPB65xx", "HP ProBook 65", x_inverted),
{ NULL, }
/* Laptop models without axis info (yet):
* "NC6910" "HP Compaq 6910"
@@ -320,7 +328,7 @@ static int lis3lv02d_add(struct acpi_device *device)
INIT_WORK(&hpled_led.work, delayed_set_status_worker);
ret = led_classdev_register(NULL, &hpled_led.led_classdev);
if (ret) {
- lis3lv02d_joystick_disable();
+ lis3lv02d_joystick_disable(&lis3_dev);
lis3lv02d_poweroff(&lis3_dev);
flush_work(&hpled_led.work);
return ret;
@@ -334,7 +342,7 @@ static int lis3lv02d_remove(struct acpi_device *device, int type)
if (!device)
return -EINVAL;
- lis3lv02d_joystick_disable();
+ lis3lv02d_joystick_disable(&lis3_dev);
lis3lv02d_poweroff(&lis3_dev);
led_classdev_unregister(&hpled_led.led_classdev);
@@ -354,8 +362,7 @@ static int lis3lv02d_suspend(struct acpi_device *device, pm_message_t state)
static int lis3lv02d_resume(struct acpi_device *device)
{
- lis3lv02d_poweron(&lis3_dev);
- return 0;
+ return lis3lv02d_poweron(&lis3_dev);
}
#else
#define lis3lv02d_suspend NULL
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index 0c595410e78..a36addf106a 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -34,6 +34,8 @@
#include <linux/input/sparse-keymap.h>
#include <linux/backlight.h>
#include <linux/fb.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
#define IDEAPAD_RFKILL_DEV_NUM (3)
@@ -42,15 +44,41 @@
#define CFG_WIFI_BIT (18)
#define CFG_CAMERA_BIT (19)
+enum {
+ VPCCMD_R_VPC1 = 0x10,
+ VPCCMD_R_BL_MAX,
+ VPCCMD_R_BL,
+ VPCCMD_W_BL,
+ VPCCMD_R_WIFI,
+ VPCCMD_W_WIFI,
+ VPCCMD_R_BT,
+ VPCCMD_W_BT,
+ VPCCMD_R_BL_POWER,
+ VPCCMD_R_NOVO,
+ VPCCMD_R_VPC2,
+ VPCCMD_R_TOUCHPAD,
+ VPCCMD_W_TOUCHPAD,
+ VPCCMD_R_CAMERA,
+ VPCCMD_W_CAMERA,
+ VPCCMD_R_3G,
+ VPCCMD_W_3G,
+ VPCCMD_R_ODD, /* 0x21 */
+ VPCCMD_R_RF = 0x23,
+ VPCCMD_W_RF,
+ VPCCMD_W_BL_POWER = 0x33,
+};
+
struct ideapad_private {
struct rfkill *rfk[IDEAPAD_RFKILL_DEV_NUM];
struct platform_device *platform_device;
struct input_dev *inputdev;
struct backlight_device *blightdev;
+ struct dentry *debug;
unsigned long cfg;
};
static acpi_handle ideapad_handle;
+static struct ideapad_private *ideapad_priv;
static bool no_bt_rfkill;
module_param(no_bt_rfkill, bool, 0444);
MODULE_PARM_DESC(no_bt_rfkill, "No rfkill for bluetooth.");
@@ -164,6 +192,146 @@ static int write_ec_cmd(acpi_handle handle, int cmd, unsigned long data)
}
/*
+ * debugfs
+ */
+#define DEBUGFS_EVENT_LEN (4096)
+static int debugfs_status_show(struct seq_file *s, void *data)
+{
+ unsigned long value;
+
+ if (!read_ec_data(ideapad_handle, VPCCMD_R_BL_MAX, &value))
+ seq_printf(s, "Backlight max:\t%lu\n", value);
+ if (!read_ec_data(ideapad_handle, VPCCMD_R_BL, &value))
+ seq_printf(s, "Backlight now:\t%lu\n", value);
+ if (!read_ec_data(ideapad_handle, VPCCMD_R_BL_POWER, &value))
+ seq_printf(s, "BL power value:\t%s\n", value ? "On" : "Off");
+ seq_printf(s, "=====================\n");
+
+ if (!read_ec_data(ideapad_handle, VPCCMD_R_RF, &value))
+ seq_printf(s, "Radio status:\t%s(%lu)\n",
+ value ? "On" : "Off", value);
+ if (!read_ec_data(ideapad_handle, VPCCMD_R_WIFI, &value))
+ seq_printf(s, "Wifi status:\t%s(%lu)\n",
+ value ? "On" : "Off", value);
+ if (!read_ec_data(ideapad_handle, VPCCMD_R_BT, &value))
+ seq_printf(s, "BT status:\t%s(%lu)\n",
+ value ? "On" : "Off", value);
+ if (!read_ec_data(ideapad_handle, VPCCMD_R_3G, &value))
+ seq_printf(s, "3G status:\t%s(%lu)\n",
+ value ? "On" : "Off", value);
+ seq_printf(s, "=====================\n");
+
+ if (!read_ec_data(ideapad_handle, VPCCMD_R_TOUCHPAD, &value))
+ seq_printf(s, "Touchpad status:%s(%lu)\n",
+ value ? "On" : "Off", value);
+ if (!read_ec_data(ideapad_handle, VPCCMD_R_CAMERA, &value))
+ seq_printf(s, "Camera status:\t%s(%lu)\n",
+ value ? "On" : "Off", value);
+
+ return 0;
+}
+
+static int debugfs_status_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, debugfs_status_show, NULL);
+}
+
+static const struct file_operations debugfs_status_fops = {
+ .owner = THIS_MODULE,
+ .open = debugfs_status_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int debugfs_cfg_show(struct seq_file *s, void *data)
+{
+ if (!ideapad_priv) {
+ seq_printf(s, "cfg: N/A\n");
+ } else {
+ seq_printf(s, "cfg: 0x%.8lX\n\nCapability: ",
+ ideapad_priv->cfg);
+ if (test_bit(CFG_BT_BIT, &ideapad_priv->cfg))
+ seq_printf(s, "Bluetooth ");
+ if (test_bit(CFG_3G_BIT, &ideapad_priv->cfg))
+ seq_printf(s, "3G ");
+ if (test_bit(CFG_WIFI_BIT, &ideapad_priv->cfg))
+ seq_printf(s, "Wireless ");
+ if (test_bit(CFG_CAMERA_BIT, &ideapad_priv->cfg))
+ seq_printf(s, "Camera ");
+ seq_printf(s, "\nGraphic: ");
+ switch ((ideapad_priv->cfg)&0x700) {
+ case 0x100:
+ seq_printf(s, "Intel");
+ break;
+ case 0x200:
+ seq_printf(s, "ATI");
+ break;
+ case 0x300:
+ seq_printf(s, "Nvidia");
+ break;
+ case 0x400:
+ seq_printf(s, "Intel and ATI");
+ break;
+ case 0x500:
+ seq_printf(s, "Intel and Nvidia");
+ break;
+ }
+ seq_printf(s, "\n");
+ }
+ return 0;
+}
+
+static int debugfs_cfg_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, debugfs_cfg_show, NULL);
+}
+
+static const struct file_operations debugfs_cfg_fops = {
+ .owner = THIS_MODULE,
+ .open = debugfs_cfg_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int __devinit ideapad_debugfs_init(struct ideapad_private *priv)
+{
+ struct dentry *node;
+
+ priv->debug = debugfs_create_dir("ideapad", NULL);
+ if (priv->debug == NULL) {
+ pr_err("failed to create debugfs directory");
+ goto errout;
+ }
+
+ node = debugfs_create_file("cfg", S_IRUGO, priv->debug, NULL,
+ &debugfs_cfg_fops);
+ if (!node) {
+ pr_err("failed to create cfg in debugfs");
+ goto errout;
+ }
+
+ node = debugfs_create_file("status", S_IRUGO, priv->debug, NULL,
+ &debugfs_status_fops);
+ if (!node) {
+ pr_err("failed to create event in debugfs");
+ goto errout;
+ }
+
+ return 0;
+
+errout:
+ return -ENOMEM;
+}
+
+static void ideapad_debugfs_exit(struct ideapad_private *priv)
+{
+ debugfs_remove_recursive(priv->debug);
+ priv->debug = NULL;
+}
+
+/*
* sysfs
*/
static ssize_t show_ideapad_cam(struct device *dev,
@@ -172,7 +340,7 @@ static ssize_t show_ideapad_cam(struct device *dev,
{
unsigned long result;
- if (read_ec_data(ideapad_handle, 0x1D, &result))
+ if (read_ec_data(ideapad_handle, VPCCMD_R_CAMERA, &result))
return sprintf(buf, "-1\n");
return sprintf(buf, "%lu\n", result);
}
@@ -187,7 +355,7 @@ static ssize_t store_ideapad_cam(struct device *dev,
return 0;
if (sscanf(buf, "%i", &state) != 1)
return -EINVAL;
- ret = write_ec_cmd(ideapad_handle, 0x1E, state);
+ ret = write_ec_cmd(ideapad_handle, VPCCMD_W_CAMERA, state);
if (ret < 0)
return ret;
return count;
@@ -195,20 +363,8 @@ static ssize_t store_ideapad_cam(struct device *dev,
static DEVICE_ATTR(camera_power, 0644, show_ideapad_cam, store_ideapad_cam);
-static ssize_t show_ideapad_cfg(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct ideapad_private *priv = dev_get_drvdata(dev);
-
- return sprintf(buf, "0x%.8lX\n", priv->cfg);
-}
-
-static DEVICE_ATTR(cfg, 0444, show_ideapad_cfg, NULL);
-
static struct attribute *ideapad_attributes[] = {
&dev_attr_camera_power.attr,
- &dev_attr_cfg.attr,
NULL
};
@@ -244,9 +400,9 @@ struct ideapad_rfk_data {
};
const struct ideapad_rfk_data ideapad_rfk_data[] = {
- { "ideapad_wlan", CFG_WIFI_BIT, 0x15, RFKILL_TYPE_WLAN },
- { "ideapad_bluetooth", CFG_BT_BIT, 0x17, RFKILL_TYPE_BLUETOOTH },
- { "ideapad_3g", CFG_3G_BIT, 0x20, RFKILL_TYPE_WWAN },
+ { "ideapad_wlan", CFG_WIFI_BIT, VPCCMD_W_WIFI, RFKILL_TYPE_WLAN },
+ { "ideapad_bluetooth", CFG_BT_BIT, VPCCMD_W_BT, RFKILL_TYPE_BLUETOOTH },
+ { "ideapad_3g", CFG_3G_BIT, VPCCMD_W_3G, RFKILL_TYPE_WWAN },
};
static int ideapad_rfk_set(void *data, bool blocked)
@@ -260,13 +416,12 @@ static struct rfkill_ops ideapad_rfk_ops = {
.set_block = ideapad_rfk_set,
};
-static void ideapad_sync_rfk_state(struct acpi_device *adevice)
+static void ideapad_sync_rfk_state(struct ideapad_private *priv)
{
- struct ideapad_private *priv = dev_get_drvdata(&adevice->dev);
unsigned long hw_blocked;
int i;
- if (read_ec_data(ideapad_handle, 0x23, &hw_blocked))
+ if (read_ec_data(ideapad_handle, VPCCMD_R_RF, &hw_blocked))
return;
hw_blocked = !hw_blocked;
@@ -363,8 +518,10 @@ static void ideapad_platform_exit(struct ideapad_private *priv)
* input device
*/
static const struct key_entry ideapad_keymap[] = {
- { KE_KEY, 0x06, { KEY_SWITCHVIDEOMODE } },
- { KE_KEY, 0x0D, { KEY_WLAN } },
+ { KE_KEY, 6, { KEY_SWITCHVIDEOMODE } },
+ { KE_KEY, 13, { KEY_WLAN } },
+ { KE_KEY, 16, { KEY_PROG1 } },
+ { KE_KEY, 17, { KEY_PROG2 } },
{ KE_END, 0 },
};
@@ -419,6 +576,18 @@ static void ideapad_input_report(struct ideapad_private *priv,
sparse_keymap_report_event(priv->inputdev, scancode, 1, true);
}
+static void ideapad_input_novokey(struct ideapad_private *priv)
+{
+ unsigned long long_pressed;
+
+ if (read_ec_data(ideapad_handle, VPCCMD_R_NOVO, &long_pressed))
+ return;
+ if (long_pressed)
+ ideapad_input_report(priv, 17);
+ else
+ ideapad_input_report(priv, 16);
+}
+
/*
* backlight
*/
@@ -426,16 +595,17 @@ static int ideapad_backlight_get_brightness(struct backlight_device *blightdev)
{
unsigned long now;
- if (read_ec_data(ideapad_handle, 0x12, &now))
+ if (read_ec_data(ideapad_handle, VPCCMD_R_BL, &now))
return -EIO;
return now;
}
static int ideapad_backlight_update_status(struct backlight_device *blightdev)
{
- if (write_ec_cmd(ideapad_handle, 0x13, blightdev->props.brightness))
+ if (write_ec_cmd(ideapad_handle, VPCCMD_W_BL,
+ blightdev->props.brightness))
return -EIO;
- if (write_ec_cmd(ideapad_handle, 0x33,
+ if (write_ec_cmd(ideapad_handle, VPCCMD_W_BL_POWER,
blightdev->props.power == FB_BLANK_POWERDOWN ? 0 : 1))
return -EIO;
@@ -453,11 +623,11 @@ static int ideapad_backlight_init(struct ideapad_private *priv)
struct backlight_properties props;
unsigned long max, now, power;
- if (read_ec_data(ideapad_handle, 0x11, &max))
+ if (read_ec_data(ideapad_handle, VPCCMD_R_BL_MAX, &max))
return -EIO;
- if (read_ec_data(ideapad_handle, 0x12, &now))
+ if (read_ec_data(ideapad_handle, VPCCMD_R_BL, &now))
return -EIO;
- if (read_ec_data(ideapad_handle, 0x18, &power))
+ if (read_ec_data(ideapad_handle, VPCCMD_R_BL_POWER, &power))
return -EIO;
memset(&props, 0, sizeof(struct backlight_properties));
@@ -493,7 +663,9 @@ static void ideapad_backlight_notify_power(struct ideapad_private *priv)
unsigned long power;
struct backlight_device *blightdev = priv->blightdev;
- if (read_ec_data(ideapad_handle, 0x18, &power))
+ if (!blightdev)
+ return;
+ if (read_ec_data(ideapad_handle, VPCCMD_R_BL_POWER, &power))
return;
blightdev->props.power = power ? FB_BLANK_UNBLANK : FB_BLANK_POWERDOWN;
}
@@ -504,7 +676,7 @@ static void ideapad_backlight_notify_brightness(struct ideapad_private *priv)
/* if we control brightness via acpi video driver */
if (priv->blightdev == NULL) {
- read_ec_data(ideapad_handle, 0x12, &now);
+ read_ec_data(ideapad_handle, VPCCMD_R_BL, &now);
return;
}
@@ -533,6 +705,7 @@ static int __devinit ideapad_acpi_add(struct acpi_device *adevice)
if (!priv)
return -ENOMEM;
dev_set_drvdata(&adevice->dev, priv);
+ ideapad_priv = priv;
ideapad_handle = adevice->handle;
priv->cfg = cfg;
@@ -540,6 +713,10 @@ static int __devinit ideapad_acpi_add(struct acpi_device *adevice)
if (ret)
goto platform_failed;
+ ret = ideapad_debugfs_init(priv);
+ if (ret)
+ goto debugfs_failed;
+
ret = ideapad_input_init(priv);
if (ret)
goto input_failed;
@@ -550,7 +727,7 @@ static int __devinit ideapad_acpi_add(struct acpi_device *adevice)
else
priv->rfk[i] = NULL;
}
- ideapad_sync_rfk_state(adevice);
+ ideapad_sync_rfk_state(priv);
if (!acpi_video_backlight_support()) {
ret = ideapad_backlight_init(priv);
@@ -565,6 +742,8 @@ backlight_failed:
ideapad_unregister_rfkill(adevice, i);
ideapad_input_exit(priv);
input_failed:
+ ideapad_debugfs_exit(priv);
+debugfs_failed:
ideapad_platform_exit(priv);
platform_failed:
kfree(priv);
@@ -580,6 +759,7 @@ static int __devexit ideapad_acpi_remove(struct acpi_device *adevice, int type)
for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++)
ideapad_unregister_rfkill(adevice, i);
ideapad_input_exit(priv);
+ ideapad_debugfs_exit(priv);
ideapad_platform_exit(priv);
dev_set_drvdata(&adevice->dev, NULL);
kfree(priv);
@@ -593,9 +773,9 @@ static void ideapad_acpi_notify(struct acpi_device *adevice, u32 event)
acpi_handle handle = adevice->handle;
unsigned long vpc1, vpc2, vpc_bit;
- if (read_ec_data(handle, 0x10, &vpc1))
+ if (read_ec_data(handle, VPCCMD_R_VPC1, &vpc1))
return;
- if (read_ec_data(handle, 0x1A, &vpc2))
+ if (read_ec_data(handle, VPCCMD_R_VPC2, &vpc2))
return;
vpc1 = (vpc2 << 8) | vpc1;
@@ -603,11 +783,14 @@ static void ideapad_acpi_notify(struct acpi_device *adevice, u32 event)
if (test_bit(vpc_bit, &vpc1)) {
switch (vpc_bit) {
case 9:
- ideapad_sync_rfk_state(adevice);
+ ideapad_sync_rfk_state(priv);
break;
case 4:
ideapad_backlight_notify_brightness(priv);
break;
+ case 3:
+ ideapad_input_novokey(priv);
+ break;
case 2:
ideapad_backlight_notify_power(priv);
break;
diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c
index c86665369a2..48870e50423 100644
--- a/drivers/platform/x86/intel_scu_ipc.c
+++ b/drivers/platform/x86/intel_scu_ipc.c
@@ -24,6 +24,7 @@
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/sfi.h>
+#include <linux/module.h>
#include <asm/mrst.h>
#include <asm/intel_scu_ipc.h>
diff --git a/drivers/platform/x86/intel_scu_ipcutil.c b/drivers/platform/x86/intel_scu_ipcutil.c
index b93a03259c1..2d0f9136ea9 100644
--- a/drivers/platform/x86/intel_scu_ipcutil.c
+++ b/drivers/platform/x86/intel_scu_ipcutil.c
@@ -24,7 +24,7 @@
#include <linux/init.h>
#include <asm/intel_scu_ipc.h>
-static u32 major;
+static int major;
#define MAX_FW_SIZE 264192
@@ -117,7 +117,11 @@ static const struct file_operations scu_ipc_fops = {
static int __init ipc_module_init(void)
{
- return register_chrdev(0, "intel_mid_scu", &scu_ipc_fops);
+ major = register_chrdev(0, "intel_mid_scu", &scu_ipc_fops);
+ if (major < 0)
+ return major;
+
+ return 0;
}
static void __exit ipc_module_exit(void)
diff --git a/drivers/platform/x86/msi-wmi.c b/drivers/platform/x86/msi-wmi.c
index 6f40bf202dc..2264331bd48 100644
--- a/drivers/platform/x86/msi-wmi.c
+++ b/drivers/platform/x86/msi-wmi.c
@@ -28,6 +28,7 @@
#include <linux/acpi.h>
#include <linux/backlight.h>
#include <linux/slab.h>
+#include <linux/module.h>
MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
MODULE_DESCRIPTION("MSI laptop WMI hotkeys driver");
diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c
index 35916301104..09e26bfd464 100644
--- a/drivers/platform/x86/samsung-laptop.c
+++ b/drivers/platform/x86/samsung-laptop.c
@@ -226,6 +226,7 @@ static struct backlight_device *backlight_device;
static struct mutex sabi_mutex;
static struct platform_device *sdev;
static struct rfkill *rfk;
+static bool has_stepping_quirk;
static int force;
module_param(force, bool, 0);
@@ -370,15 +371,28 @@ static u8 read_brightness(void)
&sretval);
if (!retval) {
user_brightness = sretval.retval[0];
- if (user_brightness != 0)
+ if (user_brightness > sabi_config->min_brightness)
user_brightness -= sabi_config->min_brightness;
+ else
+ user_brightness = 0;
}
return user_brightness;
}
static void set_brightness(u8 user_brightness)
{
- u8 user_level = user_brightness - sabi_config->min_brightness;
+ u8 user_level = user_brightness + sabi_config->min_brightness;
+
+ if (has_stepping_quirk && user_level != 0) {
+ /*
+ * short circuit if the specified level is what's already set
+ * to prevent the screen from flickering needlessly
+ */
+ if (user_brightness == read_brightness())
+ return;
+
+ sabi_set_command(sabi_config->commands.set_brightness, 0);
+ }
sabi_set_command(sabi_config->commands.set_brightness, user_level);
}
@@ -388,6 +402,40 @@ static int get_brightness(struct backlight_device *bd)
return (int)read_brightness();
}
+static void check_for_stepping_quirk(void)
+{
+ u8 initial_level;
+ u8 check_level;
+ u8 orig_level = read_brightness();
+
+ /*
+ * Some laptops exhibit the strange behaviour of stepping toward
+ * (rather than setting) the brightness except when changing to/from
+ * brightness level 0. This behaviour is checked for here and worked
+ * around in set_brightness.
+ */
+
+ if (orig_level == 0)
+ set_brightness(1);
+
+ initial_level = read_brightness();
+
+ if (initial_level <= 2)
+ check_level = initial_level + 2;
+ else
+ check_level = initial_level - 2;
+
+ has_stepping_quirk = false;
+ set_brightness(check_level);
+
+ if (read_brightness() != check_level) {
+ has_stepping_quirk = true;
+ pr_info("enabled workaround for brightness stepping quirk\n");
+ }
+
+ set_brightness(orig_level);
+}
+
static int update_status(struct backlight_device *bd)
{
set_brightness(bd->props.brightness);
@@ -621,6 +669,16 @@ static struct dmi_system_id __initdata samsung_dmi_table[] = {
.callback = dmi_check_cb,
},
{
+ .ident = "N220",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR,
+ "SAMSUNG ELECTRONICS CO., LTD."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "N220"),
+ DMI_MATCH(DMI_BOARD_NAME, "N220"),
+ },
+ .callback = dmi_check_cb,
+ },
+ {
.ident = "N150/N210/N220/N230",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR,
@@ -641,6 +699,15 @@ static struct dmi_system_id __initdata samsung_dmi_table[] = {
.callback = dmi_check_cb,
},
{
+ .ident = "R700",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "SR700"),
+ DMI_MATCH(DMI_BOARD_NAME, "SR700"),
+ },
+ .callback = dmi_check_cb,
+ },
+ {
.ident = "R530/R730",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
@@ -686,6 +753,33 @@ static struct dmi_system_id __initdata samsung_dmi_table[] = {
},
.callback = dmi_check_cb,
},
+ {
+ .ident = "R528/R728",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "R528/R728"),
+ DMI_MATCH(DMI_BOARD_NAME, "R528/R728"),
+ },
+ .callback = dmi_check_cb,
+ },
+ {
+ .ident = "NC210/NC110",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "NC210/NC110"),
+ DMI_MATCH(DMI_BOARD_NAME, "NC210/NC110"),
+ },
+ .callback = dmi_check_cb,
+ },
+ {
+ .ident = "X520",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X520"),
+ DMI_MATCH(DMI_BOARD_NAME, "X520"),
+ },
+ .callback = dmi_check_cb,
+ },
{ },
};
MODULE_DEVICE_TABLE(dmi, samsung_dmi_table);
@@ -770,7 +864,7 @@ static int __init samsung_init(void)
sabi_iface = ioremap_nocache(ifaceP, 16);
if (!sabi_iface) {
pr_err("Can't remap %x\n", ifaceP);
- goto exit;
+ goto error_no_signature;
}
if (debug) {
printk(KERN_DEBUG "ifaceP = 0x%08x\n", ifaceP);
@@ -794,6 +888,9 @@ static int __init samsung_init(void)
}
}
+ /* Check for stepping quirk */
+ check_for_stepping_quirk();
+
/* knock up a platform device to hang stuff off of */
sdev = platform_device_register_simple("samsung", -1, NULL, 0);
if (IS_ERR(sdev))
@@ -802,7 +899,8 @@ static int __init samsung_init(void)
/* create a backlight device to talk to this one */
memset(&props, 0, sizeof(struct backlight_properties));
props.type = BACKLIGHT_PLATFORM;
- props.max_brightness = sabi_config->max_brightness;
+ props.max_brightness = sabi_config->max_brightness -
+ sabi_config->min_brightness;
backlight_device = backlight_device_register("samsung", &sdev->dev,
NULL, &backlight_ops,
&props);
@@ -821,7 +919,6 @@ static int __init samsung_init(void)
if (retval)
goto error_file_create;
-exit:
return 0;
error_file_create:
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index bbd182e178c..c006dee5ebf 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -3281,7 +3281,7 @@ static int sony_pic_add(struct acpi_device *device)
/* request IRQ */
list_for_each_entry_reverse(irq, &spic_dev.interrupts, list) {
if (!request_irq(irq->irq.interrupts[0], sony_pic_irq,
- IRQF_DISABLED, "sony-laptop", &spic_dev)) {
+ 0, "sony-laptop", &spic_dev)) {
dprintk("IRQ: %d - triggering: %d - "
"polarity: %d - shr: %d\n",
irq->irq.interrupts[0],
diff --git a/drivers/platform/x86/topstar-laptop.c b/drivers/platform/x86/topstar-laptop.c
index 4c20447ddbb..d528daa0e81 100644
--- a/drivers/platform/x86/topstar-laptop.c
+++ b/drivers/platform/x86/topstar-laptop.c
@@ -41,6 +41,7 @@ static const struct key_entry topstar_keymap[] = {
{ KE_KEY, 0x8c, { KEY_MEDIA } },
/* Known non hotkey events don't handled or that we don't care yet */
+ { KE_IGNORE, 0x82, }, /* backlight event */
{ KE_IGNORE, 0x8e, },
{ KE_IGNORE, 0x8f, },
{ KE_IGNORE, 0x90, },
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index cb009b2629e..13ef8c37471 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -47,7 +47,6 @@
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/backlight.h>
-#include <linux/platform_device.h>
#include <linux/rfkill.h>
#include <linux/input.h>
#include <linux/input/sparse-keymap.h>
@@ -63,11 +62,7 @@ MODULE_DESCRIPTION("Toshiba Laptop ACPI Extras Driver");
MODULE_LICENSE("GPL");
/* Toshiba ACPI method paths */
-#define METHOD_LCD_BRIGHTNESS "\\_SB_.PCI0.VGA_.LCD_._BCM"
-#define TOSH_INTERFACE_1 "\\_SB_.VALD"
-#define TOSH_INTERFACE_2 "\\_SB_.VALZ"
#define METHOD_VIDEO_OUT "\\_SB_.VALX.DSSX"
-#define GHCI_METHOD ".GHCI"
/* Toshiba HCI interface definitions
*
@@ -111,6 +106,25 @@ MODULE_LICENSE("GPL");
#define HCI_WIRELESS_BT_ATTACH 0x40
#define HCI_WIRELESS_BT_POWER 0x80
+struct toshiba_acpi_dev {
+ struct acpi_device *acpi_dev;
+ const char *method_hci;
+ struct rfkill *bt_rfk;
+ struct input_dev *hotkey_dev;
+ struct backlight_device *backlight_dev;
+ struct led_classdev led_dev;
+
+ int force_fan;
+ int last_key_event;
+ int key_event_valid;
+
+ int illumination_supported:1;
+ int video_supported:1;
+ int fan_supported:1;
+
+ struct mutex mutex;
+};
+
static const struct acpi_device_id toshiba_device_ids[] = {
{"TOS6200", 0},
{"TOS6208", 0},
@@ -119,7 +133,7 @@ static const struct acpi_device_id toshiba_device_ids[] = {
};
MODULE_DEVICE_TABLE(acpi, toshiba_device_ids);
-static const struct key_entry toshiba_acpi_keymap[] __initconst = {
+static const struct key_entry toshiba_acpi_keymap[] __devinitconst = {
{ KE_KEY, 0x101, { KEY_MUTE } },
{ KE_KEY, 0x102, { KEY_ZOOMOUT } },
{ KE_KEY, 0x103, { KEY_ZOOMIN } },
@@ -155,15 +169,6 @@ static __inline__ void _set_bit(u32 * word, u32 mask, int value)
/* acpi interface wrappers
*/
-static int is_valid_acpi_path(const char *methodName)
-{
- acpi_handle handle;
- acpi_status status;
-
- status = acpi_get_handle(NULL, (char *)methodName, &handle);
- return !ACPI_FAILURE(status);
-}
-
static int write_acpi_int(const char *methodName, int val)
{
struct acpi_object_list params;
@@ -176,32 +181,14 @@ static int write_acpi_int(const char *methodName, int val)
in_objs[0].integer.value = val;
status = acpi_evaluate_object(NULL, (char *)methodName, &params, NULL);
- return (status == AE_OK);
-}
-
-#if 0
-static int read_acpi_int(const char *methodName, int *pVal)
-{
- struct acpi_buffer results;
- union acpi_object out_objs[1];
- acpi_status status;
-
- results.length = sizeof(out_objs);
- results.pointer = out_objs;
-
- status = acpi_evaluate_object(0, (char *)methodName, 0, &results);
- *pVal = out_objs[0].integer.value;
-
- return (status == AE_OK) && (out_objs[0].type == ACPI_TYPE_INTEGER);
+ return (status == AE_OK) ? 0 : -EIO;
}
-#endif
-
-static const char *method_hci /*= 0*/ ;
/* Perform a raw HCI call. Here we don't care about input or output buffer
* format.
*/
-static acpi_status hci_raw(const u32 in[HCI_WORDS], u32 out[HCI_WORDS])
+static acpi_status hci_raw(struct toshiba_acpi_dev *dev,
+ const u32 in[HCI_WORDS], u32 out[HCI_WORDS])
{
struct acpi_object_list params;
union acpi_object in_objs[HCI_WORDS];
@@ -220,7 +207,8 @@ static acpi_status hci_raw(const u32 in[HCI_WORDS], u32 out[HCI_WORDS])
results.length = sizeof(out_objs);
results.pointer = out_objs;
- status = acpi_evaluate_object(NULL, (char *)method_hci, &params,
+ status = acpi_evaluate_object(dev->acpi_dev->handle,
+ (char *)dev->method_hci, &params,
&results);
if ((status == AE_OK) && (out_objs->package.count <= HCI_WORDS)) {
for (i = 0; i < out_objs->package.count; ++i) {
@@ -237,85 +225,79 @@ static acpi_status hci_raw(const u32 in[HCI_WORDS], u32 out[HCI_WORDS])
* may be useful (such as "not supported").
*/
-static acpi_status hci_write1(u32 reg, u32 in1, u32 * result)
+static acpi_status hci_write1(struct toshiba_acpi_dev *dev, u32 reg,
+ u32 in1, u32 *result)
{
u32 in[HCI_WORDS] = { HCI_SET, reg, in1, 0, 0, 0 };
u32 out[HCI_WORDS];
- acpi_status status = hci_raw(in, out);
+ acpi_status status = hci_raw(dev, in, out);
*result = (status == AE_OK) ? out[0] : HCI_FAILURE;
return status;
}
-static acpi_status hci_read1(u32 reg, u32 * out1, u32 * result)
+static acpi_status hci_read1(struct toshiba_acpi_dev *dev, u32 reg,
+ u32 *out1, u32 *result)
{
u32 in[HCI_WORDS] = { HCI_GET, reg, 0, 0, 0, 0 };
u32 out[HCI_WORDS];
- acpi_status status = hci_raw(in, out);
+ acpi_status status = hci_raw(dev, in, out);
*out1 = out[2];
*result = (status == AE_OK) ? out[0] : HCI_FAILURE;
return status;
}
-static acpi_status hci_write2(u32 reg, u32 in1, u32 in2, u32 *result)
+static acpi_status hci_write2(struct toshiba_acpi_dev *dev, u32 reg,
+ u32 in1, u32 in2, u32 *result)
{
u32 in[HCI_WORDS] = { HCI_SET, reg, in1, in2, 0, 0 };
u32 out[HCI_WORDS];
- acpi_status status = hci_raw(in, out);
+ acpi_status status = hci_raw(dev, in, out);
*result = (status == AE_OK) ? out[0] : HCI_FAILURE;
return status;
}
-static acpi_status hci_read2(u32 reg, u32 *out1, u32 *out2, u32 *result)
+static acpi_status hci_read2(struct toshiba_acpi_dev *dev, u32 reg,
+ u32 *out1, u32 *out2, u32 *result)
{
u32 in[HCI_WORDS] = { HCI_GET, reg, *out1, *out2, 0, 0 };
u32 out[HCI_WORDS];
- acpi_status status = hci_raw(in, out);
+ acpi_status status = hci_raw(dev, in, out);
*out1 = out[2];
*out2 = out[3];
*result = (status == AE_OK) ? out[0] : HCI_FAILURE;
return status;
}
-struct toshiba_acpi_dev {
- struct platform_device *p_dev;
- struct rfkill *bt_rfk;
- struct input_dev *hotkey_dev;
- int illumination_installed;
- acpi_handle handle;
-
- const char *bt_name;
-
- struct mutex mutex;
-};
-
/* Illumination support */
-static int toshiba_illumination_available(void)
+static int toshiba_illumination_available(struct toshiba_acpi_dev *dev)
{
u32 in[HCI_WORDS] = { 0, 0, 0, 0, 0, 0 };
u32 out[HCI_WORDS];
acpi_status status;
in[0] = 0xf100;
- status = hci_raw(in, out);
+ status = hci_raw(dev, in, out);
if (ACPI_FAILURE(status)) {
pr_info("Illumination device not available\n");
return 0;
}
in[0] = 0xf400;
- status = hci_raw(in, out);
+ status = hci_raw(dev, in, out);
return 1;
}
static void toshiba_illumination_set(struct led_classdev *cdev,
enum led_brightness brightness)
{
+ struct toshiba_acpi_dev *dev = container_of(cdev,
+ struct toshiba_acpi_dev, led_dev);
u32 in[HCI_WORDS] = { 0, 0, 0, 0, 0, 0 };
u32 out[HCI_WORDS];
acpi_status status;
/* First request : initialize communication. */
in[0] = 0xf100;
- status = hci_raw(in, out);
+ status = hci_raw(dev, in, out);
if (ACPI_FAILURE(status)) {
pr_info("Illumination device not available\n");
return;
@@ -326,7 +308,7 @@ static void toshiba_illumination_set(struct led_classdev *cdev,
in[0] = 0xf400;
in[1] = 0x14e;
in[2] = 1;
- status = hci_raw(in, out);
+ status = hci_raw(dev, in, out);
if (ACPI_FAILURE(status)) {
pr_info("ACPI call for illumination failed\n");
return;
@@ -336,7 +318,7 @@ static void toshiba_illumination_set(struct led_classdev *cdev,
in[0] = 0xf400;
in[1] = 0x14e;
in[2] = 0;
- status = hci_raw(in, out);
+ status = hci_raw(dev, in, out);
if (ACPI_FAILURE(status)) {
pr_info("ACPI call for illumination failed.\n");
return;
@@ -347,11 +329,13 @@ static void toshiba_illumination_set(struct led_classdev *cdev,
in[0] = 0xf200;
in[1] = 0;
in[2] = 0;
- hci_raw(in, out);
+ hci_raw(dev, in, out);
}
static enum led_brightness toshiba_illumination_get(struct led_classdev *cdev)
{
+ struct toshiba_acpi_dev *dev = container_of(cdev,
+ struct toshiba_acpi_dev, led_dev);
u32 in[HCI_WORDS] = { 0, 0, 0, 0, 0, 0 };
u32 out[HCI_WORDS];
acpi_status status;
@@ -359,7 +343,7 @@ static enum led_brightness toshiba_illumination_get(struct led_classdev *cdev)
/* First request : initialize communication. */
in[0] = 0xf100;
- status = hci_raw(in, out);
+ status = hci_raw(dev, in, out);
if (ACPI_FAILURE(status)) {
pr_info("Illumination device not available\n");
return LED_OFF;
@@ -368,7 +352,7 @@ static enum led_brightness toshiba_illumination_get(struct led_classdev *cdev)
/* Check the illumination */
in[0] = 0xf300;
in[1] = 0x14e;
- status = hci_raw(in, out);
+ status = hci_raw(dev, in, out);
if (ACPI_FAILURE(status)) {
pr_info("ACPI call for illumination failed.\n");
return LED_OFF;
@@ -380,46 +364,35 @@ static enum led_brightness toshiba_illumination_get(struct led_classdev *cdev)
in[0] = 0xf200;
in[1] = 0;
in[2] = 0;
- hci_raw(in, out);
+ hci_raw(dev, in, out);
return result;
}
-static struct led_classdev toshiba_led = {
- .name = "toshiba::illumination",
- .max_brightness = 1,
- .brightness_set = toshiba_illumination_set,
- .brightness_get = toshiba_illumination_get,
-};
-
-static struct toshiba_acpi_dev toshiba_acpi = {
- .bt_name = "Toshiba Bluetooth",
-};
-
/* Bluetooth rfkill handlers */
-static u32 hci_get_bt_present(bool *present)
+static u32 hci_get_bt_present(struct toshiba_acpi_dev *dev, bool *present)
{
u32 hci_result;
u32 value, value2;
value = 0;
value2 = 0;
- hci_read2(HCI_WIRELESS, &value, &value2, &hci_result);
+ hci_read2(dev, HCI_WIRELESS, &value, &value2, &hci_result);
if (hci_result == HCI_SUCCESS)
*present = (value & HCI_WIRELESS_BT_PRESENT) ? true : false;
return hci_result;
}
-static u32 hci_get_radio_state(bool *radio_state)
+static u32 hci_get_radio_state(struct toshiba_acpi_dev *dev, bool *radio_state)
{
u32 hci_result;
u32 value, value2;
value = 0;
value2 = 0x0001;
- hci_read2(HCI_WIRELESS, &value, &value2, &hci_result);
+ hci_read2(dev, HCI_WIRELESS, &value, &value2, &hci_result);
*radio_state = value & HCI_WIRELESS_KILL_SWITCH;
return hci_result;
@@ -436,8 +409,8 @@ static int bt_rfkill_set_block(void *data, bool blocked)
value = (blocked == false);
mutex_lock(&dev->mutex);
- if (hci_get_radio_state(&radio_state) != HCI_SUCCESS) {
- err = -EBUSY;
+ if (hci_get_radio_state(dev, &radio_state) != HCI_SUCCESS) {
+ err = -EIO;
goto out;
}
@@ -446,11 +419,11 @@ static int bt_rfkill_set_block(void *data, bool blocked)
goto out;
}
- hci_write2(HCI_WIRELESS, value, HCI_WIRELESS_BT_POWER, &result1);
- hci_write2(HCI_WIRELESS, value, HCI_WIRELESS_BT_ATTACH, &result2);
+ hci_write2(dev, HCI_WIRELESS, value, HCI_WIRELESS_BT_POWER, &result1);
+ hci_write2(dev, HCI_WIRELESS, value, HCI_WIRELESS_BT_ATTACH, &result2);
if (result1 != HCI_SUCCESS || result2 != HCI_SUCCESS)
- err = -EBUSY;
+ err = -EIO;
else
err = 0;
out:
@@ -467,7 +440,7 @@ static void bt_rfkill_poll(struct rfkill *rfkill, void *data)
mutex_lock(&dev->mutex);
- hci_result = hci_get_radio_state(&value);
+ hci_result = hci_get_radio_state(dev, &value);
if (hci_result != HCI_SUCCESS) {
/* Can't do anything useful */
mutex_unlock(&dev->mutex);
@@ -488,63 +461,64 @@ static const struct rfkill_ops toshiba_rfk_ops = {
};
static struct proc_dir_entry *toshiba_proc_dir /*= 0*/ ;
-static struct backlight_device *toshiba_backlight_device;
-static int force_fan;
-static int last_key_event;
-static int key_event_valid;
static int get_lcd(struct backlight_device *bd)
{
+ struct toshiba_acpi_dev *dev = bl_get_data(bd);
u32 hci_result;
u32 value;
- hci_read1(HCI_LCD_BRIGHTNESS, &value, &hci_result);
- if (hci_result == HCI_SUCCESS) {
+ hci_read1(dev, HCI_LCD_BRIGHTNESS, &value, &hci_result);
+ if (hci_result == HCI_SUCCESS)
return (value >> HCI_LCD_BRIGHTNESS_SHIFT);
- } else
- return -EFAULT;
+
+ return -EIO;
}
static int lcd_proc_show(struct seq_file *m, void *v)
{
- int value = get_lcd(NULL);
+ struct toshiba_acpi_dev *dev = m->private;
+ int value;
+
+ if (!dev->backlight_dev)
+ return -ENODEV;
+ value = get_lcd(dev->backlight_dev);
if (value >= 0) {
seq_printf(m, "brightness: %d\n", value);
seq_printf(m, "brightness_levels: %d\n",
HCI_LCD_BRIGHTNESS_LEVELS);
- } else {
- pr_err("Error reading LCD brightness\n");
+ return 0;
}
- return 0;
+ pr_err("Error reading LCD brightness\n");
+ return -EIO;
}
static int lcd_proc_open(struct inode *inode, struct file *file)
{
- return single_open(file, lcd_proc_show, NULL);
+ return single_open(file, lcd_proc_show, PDE(inode)->data);
}
-static int set_lcd(int value)
+static int set_lcd(struct toshiba_acpi_dev *dev, int value)
{
u32 hci_result;
value = value << HCI_LCD_BRIGHTNESS_SHIFT;
- hci_write1(HCI_LCD_BRIGHTNESS, value, &hci_result);
- if (hci_result != HCI_SUCCESS)
- return -EFAULT;
-
- return 0;
+ hci_write1(dev, HCI_LCD_BRIGHTNESS, value, &hci_result);
+ return hci_result == HCI_SUCCESS ? 0 : -EIO;
}
static int set_lcd_status(struct backlight_device *bd)
{
- return set_lcd(bd->props.brightness);
+ struct toshiba_acpi_dev *dev = bl_get_data(bd);
+ return set_lcd(dev, bd->props.brightness);
}
static ssize_t lcd_proc_write(struct file *file, const char __user *buf,
size_t count, loff_t *pos)
{
+ struct toshiba_acpi_dev *dev = PDE(file->f_path.dentry->d_inode)->data;
char cmd[42];
size_t len;
int value;
@@ -557,7 +531,7 @@ static ssize_t lcd_proc_write(struct file *file, const char __user *buf,
if (sscanf(cmd, " brightness : %i", &value) == 1 &&
value >= 0 && value < HCI_LCD_BRIGHTNESS_LEVELS) {
- ret = set_lcd(value);
+ ret = set_lcd(dev, value);
if (ret == 0)
ret = count;
} else {
@@ -575,41 +549,49 @@ static const struct file_operations lcd_proc_fops = {
.write = lcd_proc_write,
};
-static int video_proc_show(struct seq_file *m, void *v)
+static int get_video_status(struct toshiba_acpi_dev *dev, u32 *status)
{
u32 hci_result;
+
+ hci_read1(dev, HCI_VIDEO_OUT, status, &hci_result);
+ return hci_result == HCI_SUCCESS ? 0 : -EIO;
+}
+
+static int video_proc_show(struct seq_file *m, void *v)
+{
+ struct toshiba_acpi_dev *dev = m->private;
u32 value;
+ int ret;
- hci_read1(HCI_VIDEO_OUT, &value, &hci_result);
- if (hci_result == HCI_SUCCESS) {
+ ret = get_video_status(dev, &value);
+ if (!ret) {
int is_lcd = (value & HCI_VIDEO_OUT_LCD) ? 1 : 0;
int is_crt = (value & HCI_VIDEO_OUT_CRT) ? 1 : 0;
int is_tv = (value & HCI_VIDEO_OUT_TV) ? 1 : 0;
seq_printf(m, "lcd_out: %d\n", is_lcd);
seq_printf(m, "crt_out: %d\n", is_crt);
seq_printf(m, "tv_out: %d\n", is_tv);
- } else {
- pr_err("Error reading video out status\n");
}
- return 0;
+ return ret;
}
static int video_proc_open(struct inode *inode, struct file *file)
{
- return single_open(file, video_proc_show, NULL);
+ return single_open(file, video_proc_show, PDE(inode)->data);
}
static ssize_t video_proc_write(struct file *file, const char __user *buf,
size_t count, loff_t *pos)
{
+ struct toshiba_acpi_dev *dev = PDE(file->f_path.dentry->d_inode)->data;
char *cmd, *buffer;
+ int ret;
int value;
int remain = count;
int lcd_out = -1;
int crt_out = -1;
int tv_out = -1;
- u32 hci_result;
u32 video_out;
cmd = kmalloc(count + 1, GFP_KERNEL);
@@ -644,8 +626,8 @@ static ssize_t video_proc_write(struct file *file, const char __user *buf,
kfree(cmd);
- hci_read1(HCI_VIDEO_OUT, &video_out, &hci_result);
- if (hci_result == HCI_SUCCESS) {
+ ret = get_video_status(dev, &video_out);
+ if (!ret) {
unsigned int new_video_out = video_out;
if (lcd_out != -1)
_set_bit(&new_video_out, HCI_VIDEO_OUT_LCD, lcd_out);
@@ -656,12 +638,10 @@ static ssize_t video_proc_write(struct file *file, const char __user *buf,
/* To avoid unnecessary video disruption, only write the new
* video setting if something changed. */
if (new_video_out != video_out)
- write_acpi_int(METHOD_VIDEO_OUT, new_video_out);
- } else {
- return -EFAULT;
+ ret = write_acpi_int(METHOD_VIDEO_OUT, new_video_out);
}
- return count;
+ return ret ? ret : count;
}
static const struct file_operations video_proc_fops = {
@@ -673,30 +653,38 @@ static const struct file_operations video_proc_fops = {
.write = video_proc_write,
};
-static int fan_proc_show(struct seq_file *m, void *v)
+static int get_fan_status(struct toshiba_acpi_dev *dev, u32 *status)
{
u32 hci_result;
+
+ hci_read1(dev, HCI_FAN, status, &hci_result);
+ return hci_result == HCI_SUCCESS ? 0 : -EIO;
+}
+
+static int fan_proc_show(struct seq_file *m, void *v)
+{
+ struct toshiba_acpi_dev *dev = m->private;
+ int ret;
u32 value;
- hci_read1(HCI_FAN, &value, &hci_result);
- if (hci_result == HCI_SUCCESS) {
+ ret = get_fan_status(dev, &value);
+ if (!ret) {
seq_printf(m, "running: %d\n", (value > 0));
- seq_printf(m, "force_on: %d\n", force_fan);
- } else {
- pr_err("Error reading fan status\n");
+ seq_printf(m, "force_on: %d\n", dev->force_fan);
}
- return 0;
+ return ret;
}
static int fan_proc_open(struct inode *inode, struct file *file)
{
- return single_open(file, fan_proc_show, NULL);
+ return single_open(file, fan_proc_show, PDE(inode)->data);
}
static ssize_t fan_proc_write(struct file *file, const char __user *buf,
size_t count, loff_t *pos)
{
+ struct toshiba_acpi_dev *dev = PDE(file->f_path.dentry->d_inode)->data;
char cmd[42];
size_t len;
int value;
@@ -709,11 +697,11 @@ static ssize_t fan_proc_write(struct file *file, const char __user *buf,
if (sscanf(cmd, " force_on : %i", &value) == 1 &&
value >= 0 && value <= 1) {
- hci_write1(HCI_FAN, value, &hci_result);
+ hci_write1(dev, HCI_FAN, value, &hci_result);
if (hci_result != HCI_SUCCESS)
- return -EFAULT;
+ return -EIO;
else
- force_fan = value;
+ dev->force_fan = value;
} else {
return -EINVAL;
}
@@ -732,42 +720,43 @@ static const struct file_operations fan_proc_fops = {
static int keys_proc_show(struct seq_file *m, void *v)
{
+ struct toshiba_acpi_dev *dev = m->private;
u32 hci_result;
u32 value;
- if (!key_event_valid) {
- hci_read1(HCI_SYSTEM_EVENT, &value, &hci_result);
+ if (!dev->key_event_valid) {
+ hci_read1(dev, HCI_SYSTEM_EVENT, &value, &hci_result);
if (hci_result == HCI_SUCCESS) {
- key_event_valid = 1;
- last_key_event = value;
+ dev->key_event_valid = 1;
+ dev->last_key_event = value;
} else if (hci_result == HCI_EMPTY) {
/* better luck next time */
} else if (hci_result == HCI_NOT_SUPPORTED) {
/* This is a workaround for an unresolved issue on
* some machines where system events sporadically
* become disabled. */
- hci_write1(HCI_SYSTEM_EVENT, 1, &hci_result);
+ hci_write1(dev, HCI_SYSTEM_EVENT, 1, &hci_result);
pr_notice("Re-enabled hotkeys\n");
} else {
pr_err("Error reading hotkey status\n");
- goto end;
+ return -EIO;
}
}
- seq_printf(m, "hotkey_ready: %d\n", key_event_valid);
- seq_printf(m, "hotkey: 0x%04x\n", last_key_event);
-end:
+ seq_printf(m, "hotkey_ready: %d\n", dev->key_event_valid);
+ seq_printf(m, "hotkey: 0x%04x\n", dev->last_key_event);
return 0;
}
static int keys_proc_open(struct inode *inode, struct file *file)
{
- return single_open(file, keys_proc_show, NULL);
+ return single_open(file, keys_proc_show, PDE(inode)->data);
}
static ssize_t keys_proc_write(struct file *file, const char __user *buf,
size_t count, loff_t *pos)
{
+ struct toshiba_acpi_dev *dev = PDE(file->f_path.dentry->d_inode)->data;
char cmd[42];
size_t len;
int value;
@@ -778,7 +767,7 @@ static ssize_t keys_proc_write(struct file *file, const char __user *buf,
cmd[len] = '\0';
if (sscanf(cmd, " hotkey_ready : %i", &value) == 1 && value == 0) {
- key_event_valid = 0;
+ dev->key_event_valid = 0;
} else {
return -EINVAL;
}
@@ -820,21 +809,35 @@ static const struct file_operations version_proc_fops = {
#define PROC_TOSHIBA "toshiba"
-static void __init create_toshiba_proc_entries(void)
+static void __devinit
+create_toshiba_proc_entries(struct toshiba_acpi_dev *dev)
{
- proc_create("lcd", S_IRUGO | S_IWUSR, toshiba_proc_dir, &lcd_proc_fops);
- proc_create("video", S_IRUGO | S_IWUSR, toshiba_proc_dir, &video_proc_fops);
- proc_create("fan", S_IRUGO | S_IWUSR, toshiba_proc_dir, &fan_proc_fops);
- proc_create("keys", S_IRUGO | S_IWUSR, toshiba_proc_dir, &keys_proc_fops);
- proc_create("version", S_IRUGO, toshiba_proc_dir, &version_proc_fops);
+ if (dev->backlight_dev)
+ proc_create_data("lcd", S_IRUGO | S_IWUSR, toshiba_proc_dir,
+ &lcd_proc_fops, dev);
+ if (dev->video_supported)
+ proc_create_data("video", S_IRUGO | S_IWUSR, toshiba_proc_dir,
+ &video_proc_fops, dev);
+ if (dev->fan_supported)
+ proc_create_data("fan", S_IRUGO | S_IWUSR, toshiba_proc_dir,
+ &fan_proc_fops, dev);
+ if (dev->hotkey_dev)
+ proc_create_data("keys", S_IRUGO | S_IWUSR, toshiba_proc_dir,
+ &keys_proc_fops, dev);
+ proc_create_data("version", S_IRUGO, toshiba_proc_dir,
+ &version_proc_fops, dev);
}
-static void remove_toshiba_proc_entries(void)
+static void remove_toshiba_proc_entries(struct toshiba_acpi_dev *dev)
{
- remove_proc_entry("lcd", toshiba_proc_dir);
- remove_proc_entry("video", toshiba_proc_dir);
- remove_proc_entry("fan", toshiba_proc_dir);
- remove_proc_entry("keys", toshiba_proc_dir);
+ if (dev->backlight_dev)
+ remove_proc_entry("lcd", toshiba_proc_dir);
+ if (dev->video_supported)
+ remove_proc_entry("video", toshiba_proc_dir);
+ if (dev->fan_supported)
+ remove_proc_entry("fan", toshiba_proc_dir);
+ if (dev->hotkey_dev)
+ remove_proc_entry("keys", toshiba_proc_dir);
remove_proc_entry("version", toshiba_proc_dir);
}
@@ -843,224 +846,256 @@ static const struct backlight_ops toshiba_backlight_data = {
.update_status = set_lcd_status,
};
-static void toshiba_acpi_notify(acpi_handle handle, u32 event, void *context)
-{
- u32 hci_result, value;
-
- if (event != 0x80)
- return;
- do {
- hci_read1(HCI_SYSTEM_EVENT, &value, &hci_result);
- if (hci_result == HCI_SUCCESS) {
- if (value == 0x100)
- continue;
- /* act on key press; ignore key release */
- if (value & 0x80)
- continue;
-
- if (!sparse_keymap_report_event(toshiba_acpi.hotkey_dev,
- value, 1, true)) {
- pr_info("Unknown key %x\n",
- value);
- }
- } else if (hci_result == HCI_NOT_SUPPORTED) {
- /* This is a workaround for an unresolved issue on
- * some machines where system events sporadically
- * become disabled. */
- hci_write1(HCI_SYSTEM_EVENT, 1, &hci_result);
- pr_notice("Re-enabled hotkeys\n");
- }
- } while (hci_result != HCI_EMPTY);
-}
-
-static int __init toshiba_acpi_setup_keyboard(char *device)
+static int __devinit toshiba_acpi_setup_keyboard(struct toshiba_acpi_dev *dev)
{
acpi_status status;
int error;
- status = acpi_get_handle(NULL, device, &toshiba_acpi.handle);
- if (ACPI_FAILURE(status)) {
- pr_info("Unable to get notification device\n");
- return -ENODEV;
- }
-
- toshiba_acpi.hotkey_dev = input_allocate_device();
- if (!toshiba_acpi.hotkey_dev) {
+ dev->hotkey_dev = input_allocate_device();
+ if (!dev->hotkey_dev) {
pr_info("Unable to register input device\n");
return -ENOMEM;
}
- toshiba_acpi.hotkey_dev->name = "Toshiba input device";
- toshiba_acpi.hotkey_dev->phys = device;
- toshiba_acpi.hotkey_dev->id.bustype = BUS_HOST;
+ dev->hotkey_dev->name = "Toshiba input device";
+ dev->hotkey_dev->phys = "toshiba_acpi/input0";
+ dev->hotkey_dev->id.bustype = BUS_HOST;
- error = sparse_keymap_setup(toshiba_acpi.hotkey_dev,
- toshiba_acpi_keymap, NULL);
+ error = sparse_keymap_setup(dev->hotkey_dev, toshiba_acpi_keymap, NULL);
if (error)
goto err_free_dev;
- status = acpi_install_notify_handler(toshiba_acpi.handle,
- ACPI_DEVICE_NOTIFY, toshiba_acpi_notify, NULL);
- if (ACPI_FAILURE(status)) {
- pr_info("Unable to install hotkey notification\n");
- error = -ENODEV;
- goto err_free_keymap;
- }
-
- status = acpi_evaluate_object(toshiba_acpi.handle, "ENAB", NULL, NULL);
+ status = acpi_evaluate_object(dev->acpi_dev->handle, "ENAB", NULL, NULL);
if (ACPI_FAILURE(status)) {
pr_info("Unable to enable hotkeys\n");
error = -ENODEV;
- goto err_remove_notify;
+ goto err_free_keymap;
}
- error = input_register_device(toshiba_acpi.hotkey_dev);
+ error = input_register_device(dev->hotkey_dev);
if (error) {
pr_info("Unable to register input device\n");
- goto err_remove_notify;
+ goto err_free_keymap;
}
return 0;
- err_remove_notify:
- acpi_remove_notify_handler(toshiba_acpi.handle,
- ACPI_DEVICE_NOTIFY, toshiba_acpi_notify);
err_free_keymap:
- sparse_keymap_free(toshiba_acpi.hotkey_dev);
+ sparse_keymap_free(dev->hotkey_dev);
err_free_dev:
- input_free_device(toshiba_acpi.hotkey_dev);
- toshiba_acpi.hotkey_dev = NULL;
+ input_free_device(dev->hotkey_dev);
+ dev->hotkey_dev = NULL;
return error;
}
-static void toshiba_acpi_exit(void)
+static int toshiba_acpi_remove(struct acpi_device *acpi_dev, int type)
{
- if (toshiba_acpi.hotkey_dev) {
- acpi_remove_notify_handler(toshiba_acpi.handle,
- ACPI_DEVICE_NOTIFY, toshiba_acpi_notify);
- sparse_keymap_free(toshiba_acpi.hotkey_dev);
- input_unregister_device(toshiba_acpi.hotkey_dev);
+ struct toshiba_acpi_dev *dev = acpi_driver_data(acpi_dev);
+
+ remove_toshiba_proc_entries(dev);
+
+ if (dev->hotkey_dev) {
+ input_unregister_device(dev->hotkey_dev);
+ sparse_keymap_free(dev->hotkey_dev);
}
- if (toshiba_acpi.bt_rfk) {
- rfkill_unregister(toshiba_acpi.bt_rfk);
- rfkill_destroy(toshiba_acpi.bt_rfk);
+ if (dev->bt_rfk) {
+ rfkill_unregister(dev->bt_rfk);
+ rfkill_destroy(dev->bt_rfk);
}
- if (toshiba_backlight_device)
- backlight_device_unregister(toshiba_backlight_device);
+ if (dev->backlight_dev)
+ backlight_device_unregister(dev->backlight_dev);
- remove_toshiba_proc_entries();
+ if (dev->illumination_supported)
+ led_classdev_unregister(&dev->led_dev);
- if (toshiba_proc_dir)
- remove_proc_entry(PROC_TOSHIBA, acpi_root_dir);
+ kfree(dev);
+
+ return 0;
+}
+
+static const char * __devinit find_hci_method(acpi_handle handle)
+{
+ acpi_status status;
+ acpi_handle hci_handle;
- if (toshiba_acpi.illumination_installed)
- led_classdev_unregister(&toshiba_led);
+ status = acpi_get_handle(handle, "GHCI", &hci_handle);
+ if (ACPI_SUCCESS(status))
+ return "GHCI";
- platform_device_unregister(toshiba_acpi.p_dev);
+ status = acpi_get_handle(handle, "SPFC", &hci_handle);
+ if (ACPI_SUCCESS(status))
+ return "SPFC";
- return;
+ return NULL;
}
-static int __init toshiba_acpi_init(void)
+static int __devinit toshiba_acpi_add(struct acpi_device *acpi_dev)
{
+ struct toshiba_acpi_dev *dev;
+ const char *hci_method;
u32 hci_result;
+ u32 dummy;
bool bt_present;
int ret = 0;
struct backlight_properties props;
- if (acpi_disabled)
- return -ENODEV;
-
- /* simple device detection: look for HCI method */
- if (is_valid_acpi_path(TOSH_INTERFACE_1 GHCI_METHOD)) {
- method_hci = TOSH_INTERFACE_1 GHCI_METHOD;
- if (toshiba_acpi_setup_keyboard(TOSH_INTERFACE_1))
- pr_info("Unable to activate hotkeys\n");
- } else if (is_valid_acpi_path(TOSH_INTERFACE_2 GHCI_METHOD)) {
- method_hci = TOSH_INTERFACE_2 GHCI_METHOD;
- if (toshiba_acpi_setup_keyboard(TOSH_INTERFACE_2))
- pr_info("Unable to activate hotkeys\n");
- } else
- return -ENODEV;
-
pr_info("Toshiba Laptop ACPI Extras version %s\n",
TOSHIBA_ACPI_VERSION);
- pr_info(" HCI method: %s\n", method_hci);
-
- mutex_init(&toshiba_acpi.mutex);
-
- toshiba_acpi.p_dev = platform_device_register_simple("toshiba_acpi",
- -1, NULL, 0);
- if (IS_ERR(toshiba_acpi.p_dev)) {
- ret = PTR_ERR(toshiba_acpi.p_dev);
- pr_err("unable to register platform device\n");
- toshiba_acpi.p_dev = NULL;
- toshiba_acpi_exit();
- return ret;
+
+ hci_method = find_hci_method(acpi_dev->handle);
+ if (!hci_method) {
+ pr_err("HCI interface not found\n");
+ return -ENODEV;
}
- force_fan = 0;
- key_event_valid = 0;
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+ dev->acpi_dev = acpi_dev;
+ dev->method_hci = hci_method;
+ acpi_dev->driver_data = dev;
- /* enable event fifo */
- hci_write1(HCI_SYSTEM_EVENT, 1, &hci_result);
+ if (toshiba_acpi_setup_keyboard(dev))
+ pr_info("Unable to activate hotkeys\n");
- toshiba_proc_dir = proc_mkdir(PROC_TOSHIBA, acpi_root_dir);
- if (!toshiba_proc_dir) {
- toshiba_acpi_exit();
- return -ENODEV;
- } else {
- create_toshiba_proc_entries();
- }
+ mutex_init(&dev->mutex);
+
+ /* enable event fifo */
+ hci_write1(dev, HCI_SYSTEM_EVENT, 1, &hci_result);
props.type = BACKLIGHT_PLATFORM;
props.max_brightness = HCI_LCD_BRIGHTNESS_LEVELS - 1;
- toshiba_backlight_device = backlight_device_register("toshiba",
- &toshiba_acpi.p_dev->dev,
- NULL,
- &toshiba_backlight_data,
- &props);
- if (IS_ERR(toshiba_backlight_device)) {
- ret = PTR_ERR(toshiba_backlight_device);
+ dev->backlight_dev = backlight_device_register("toshiba",
+ &acpi_dev->dev,
+ dev,
+ &toshiba_backlight_data,
+ &props);
+ if (IS_ERR(dev->backlight_dev)) {
+ ret = PTR_ERR(dev->backlight_dev);
pr_err("Could not register toshiba backlight device\n");
- toshiba_backlight_device = NULL;
- toshiba_acpi_exit();
- return ret;
+ dev->backlight_dev = NULL;
+ goto error;
}
+ dev->backlight_dev->props.brightness = get_lcd(dev->backlight_dev);
/* Register rfkill switch for Bluetooth */
- if (hci_get_bt_present(&bt_present) == HCI_SUCCESS && bt_present) {
- toshiba_acpi.bt_rfk = rfkill_alloc(toshiba_acpi.bt_name,
- &toshiba_acpi.p_dev->dev,
- RFKILL_TYPE_BLUETOOTH,
- &toshiba_rfk_ops,
- &toshiba_acpi);
- if (!toshiba_acpi.bt_rfk) {
+ if (hci_get_bt_present(dev, &bt_present) == HCI_SUCCESS && bt_present) {
+ dev->bt_rfk = rfkill_alloc("Toshiba Bluetooth",
+ &acpi_dev->dev,
+ RFKILL_TYPE_BLUETOOTH,
+ &toshiba_rfk_ops,
+ dev);
+ if (!dev->bt_rfk) {
pr_err("unable to allocate rfkill device\n");
- toshiba_acpi_exit();
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto error;
}
- ret = rfkill_register(toshiba_acpi.bt_rfk);
+ ret = rfkill_register(dev->bt_rfk);
if (ret) {
pr_err("unable to register rfkill device\n");
- rfkill_destroy(toshiba_acpi.bt_rfk);
- toshiba_acpi_exit();
- return ret;
+ rfkill_destroy(dev->bt_rfk);
+ goto error;
}
}
- toshiba_acpi.illumination_installed = 0;
- if (toshiba_illumination_available()) {
- if (!led_classdev_register(&(toshiba_acpi.p_dev->dev),
- &toshiba_led))
- toshiba_acpi.illumination_installed = 1;
+ if (toshiba_illumination_available(dev)) {
+ dev->led_dev.name = "toshiba::illumination";
+ dev->led_dev.max_brightness = 1;
+ dev->led_dev.brightness_set = toshiba_illumination_set;
+ dev->led_dev.brightness_get = toshiba_illumination_get;
+ if (!led_classdev_register(&acpi_dev->dev, &dev->led_dev))
+ dev->illumination_supported = 1;
}
+ /* Determine whether or not BIOS supports fan and video interfaces */
+
+ ret = get_video_status(dev, &dummy);
+ dev->video_supported = !ret;
+
+ ret = get_fan_status(dev, &dummy);
+ dev->fan_supported = !ret;
+
+ create_toshiba_proc_entries(dev);
+
return 0;
+
+error:
+ toshiba_acpi_remove(acpi_dev, 0);
+ return ret;
+}
+
+static void toshiba_acpi_notify(struct acpi_device *acpi_dev, u32 event)
+{
+ struct toshiba_acpi_dev *dev = acpi_driver_data(acpi_dev);
+ u32 hci_result, value;
+
+ if (event != 0x80)
+ return;
+ do {
+ hci_read1(dev, HCI_SYSTEM_EVENT, &value, &hci_result);
+ if (hci_result == HCI_SUCCESS) {
+ if (value == 0x100)
+ continue;
+ /* act on key press; ignore key release */
+ if (value & 0x80)
+ continue;
+
+ if (!sparse_keymap_report_event(dev->hotkey_dev,
+ value, 1, true)) {
+ pr_info("Unknown key %x\n",
+ value);
+ }
+ } else if (hci_result == HCI_NOT_SUPPORTED) {
+ /* This is a workaround for an unresolved issue on
+ * some machines where system events sporadically
+ * become disabled. */
+ hci_write1(dev, HCI_SYSTEM_EVENT, 1, &hci_result);
+ pr_notice("Re-enabled hotkeys\n");
+ }
+ } while (hci_result != HCI_EMPTY);
+}
+
+
+static struct acpi_driver toshiba_acpi_driver = {
+ .name = "Toshiba ACPI driver",
+ .owner = THIS_MODULE,
+ .ids = toshiba_device_ids,
+ .flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS,
+ .ops = {
+ .add = toshiba_acpi_add,
+ .remove = toshiba_acpi_remove,
+ .notify = toshiba_acpi_notify,
+ },
+};
+
+static int __init toshiba_acpi_init(void)
+{
+ int ret;
+
+ toshiba_proc_dir = proc_mkdir(PROC_TOSHIBA, acpi_root_dir);
+ if (!toshiba_proc_dir) {
+ pr_err("Unable to create proc dir " PROC_TOSHIBA "\n");
+ return -ENODEV;
+ }
+
+ ret = acpi_bus_register_driver(&toshiba_acpi_driver);
+ if (ret) {
+ pr_err("Failed to register ACPI driver: %d\n", ret);
+ remove_proc_entry(PROC_TOSHIBA, acpi_root_dir);
+ }
+
+ return ret;
+}
+
+static void __exit toshiba_acpi_exit(void)
+{
+ acpi_bus_unregister_driver(&toshiba_acpi_driver);
+ if (toshiba_proc_dir)
+ remove_proc_entry(PROC_TOSHIBA, acpi_root_dir);
}
module_init(toshiba_acpi_init);
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
index f23d5a84e7b..a134c26870b 100644
--- a/drivers/platform/x86/wmi.c
+++ b/drivers/platform/x86/wmi.c
@@ -36,6 +36,7 @@
#include <linux/list.h>
#include <linux/acpi.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
@@ -754,9 +755,13 @@ static void wmi_free_devices(void)
struct wmi_block *wblock, *next;
/* Delete devices for all the GUIDs */
- list_for_each_entry_safe(wblock, next, &wmi_block_list, list)
+ list_for_each_entry_safe(wblock, next, &wmi_block_list, list) {
+ list_del(&wblock->list);
if (wblock->dev.class)
device_unregister(&wblock->dev);
+ else
+ kfree(wblock);
+ }
}
static bool guid_already_parsed(const char *guid_string)
diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c
index ca84d5099ce..b00c17612a8 100644
--- a/drivers/pnp/pnpacpi/core.c
+++ b/drivers/pnp/pnpacpi/core.c
@@ -19,6 +19,7 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#include <linux/export.h>
#include <linux/acpi.h>
#include <linux/pnp.h>
#include <linux/slab.h>
diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c
index bbf3edd85be..5be4a392a3a 100644
--- a/drivers/pnp/pnpacpi/rsparser.c
+++ b/drivers/pnp/pnpacpi/rsparser.c
@@ -509,15 +509,12 @@ static __init void pnpacpi_parse_dma_option(struct pnp_dev *dev,
struct acpi_resource_dma *p)
{
int i;
- unsigned char map = 0, flags = 0;
-
- if (p->channel_count == 0)
- flags |= IORESOURCE_DISABLED;
+ unsigned char map = 0, flags;
for (i = 0; i < p->channel_count; i++)
map |= 1 << p->channels[i];
- flags |= dma_flags(dev, p->type, p->bus_master, p->transfer);
+ flags = dma_flags(dev, p->type, p->bus_master, p->transfer);
pnp_register_dma_resource(dev, option_flags, map, flags);
}
@@ -527,17 +524,14 @@ static __init void pnpacpi_parse_irq_option(struct pnp_dev *dev,
{
int i;
pnp_irq_mask_t map;
- unsigned char flags = 0;
-
- if (p->interrupt_count == 0)
- flags |= IORESOURCE_DISABLED;
+ unsigned char flags;
bitmap_zero(map.bits, PNP_IRQ_NR);
for (i = 0; i < p->interrupt_count; i++)
if (p->interrupts[i])
__set_bit(p->interrupts[i], map.bits);
- flags |= irq_flags(p->triggering, p->polarity, p->sharable);
+ flags = irq_flags(p->triggering, p->polarity, p->sharable);
pnp_register_irq_resource(dev, option_flags, &map, flags);
}
@@ -547,10 +541,7 @@ static __init void pnpacpi_parse_ext_irq_option(struct pnp_dev *dev,
{
int i;
pnp_irq_mask_t map;
- unsigned char flags = 0;
-
- if (p->interrupt_count == 0)
- flags |= IORESOURCE_DISABLED;
+ unsigned char flags;
bitmap_zero(map.bits, PNP_IRQ_NR);
for (i = 0; i < p->interrupt_count; i++) {
@@ -564,7 +555,7 @@ static __init void pnpacpi_parse_ext_irq_option(struct pnp_dev *dev,
}
}
- flags |= irq_flags(p->triggering, p->polarity, p->sharable);
+ flags = irq_flags(p->triggering, p->polarity, p->sharable);
pnp_register_irq_resource(dev, option_flags, &map, flags);
}
@@ -574,11 +565,8 @@ static __init void pnpacpi_parse_port_option(struct pnp_dev *dev,
{
unsigned char flags = 0;
- if (io->address_length == 0)
- flags |= IORESOURCE_DISABLED;
-
if (io->io_decode == ACPI_DECODE_16)
- flags |= IORESOURCE_IO_16BIT_ADDR;
+ flags = IORESOURCE_IO_16BIT_ADDR;
pnp_register_port_resource(dev, option_flags, io->minimum, io->maximum,
io->alignment, io->address_length, flags);
}
@@ -587,13 +575,8 @@ static __init void pnpacpi_parse_fixed_port_option(struct pnp_dev *dev,
unsigned int option_flags,
struct acpi_resource_fixed_io *io)
{
- unsigned char flags = 0;
-
- if (io->address_length == 0)
- flags |= IORESOURCE_DISABLED;
-
pnp_register_port_resource(dev, option_flags, io->address, io->address,
- 0, io->address_length, flags | IORESOURCE_IO_FIXED);
+ 0, io->address_length, IORESOURCE_IO_FIXED);
}
static __init void pnpacpi_parse_mem24_option(struct pnp_dev *dev,
@@ -602,11 +585,8 @@ static __init void pnpacpi_parse_mem24_option(struct pnp_dev *dev,
{
unsigned char flags = 0;
- if (p->address_length == 0)
- flags |= IORESOURCE_DISABLED;
-
if (p->write_protect == ACPI_READ_WRITE_MEMORY)
- flags |= IORESOURCE_MEM_WRITEABLE;
+ flags = IORESOURCE_MEM_WRITEABLE;
pnp_register_mem_resource(dev, option_flags, p->minimum, p->maximum,
p->alignment, p->address_length, flags);
}
@@ -617,11 +597,8 @@ static __init void pnpacpi_parse_mem32_option(struct pnp_dev *dev,
{
unsigned char flags = 0;
- if (p->address_length == 0)
- flags |= IORESOURCE_DISABLED;
-
if (p->write_protect == ACPI_READ_WRITE_MEMORY)
- flags |= IORESOURCE_MEM_WRITEABLE;
+ flags = IORESOURCE_MEM_WRITEABLE;
pnp_register_mem_resource(dev, option_flags, p->minimum, p->maximum,
p->alignment, p->address_length, flags);
}
@@ -632,11 +609,8 @@ static __init void pnpacpi_parse_fixed_mem32_option(struct pnp_dev *dev,
{
unsigned char flags = 0;
- if (p->address_length == 0)
- flags |= IORESOURCE_DISABLED;
-
if (p->write_protect == ACPI_READ_WRITE_MEMORY)
- flags |= IORESOURCE_MEM_WRITEABLE;
+ flags = IORESOURCE_MEM_WRITEABLE;
pnp_register_mem_resource(dev, option_flags, p->address, p->address,
0, p->address_length, flags);
}
@@ -656,19 +630,16 @@ static __init void pnpacpi_parse_address_option(struct pnp_dev *dev,
return;
}
- if (p->address_length == 0)
- flags |= IORESOURCE_DISABLED;
-
if (p->resource_type == ACPI_MEMORY_RANGE) {
if (p->info.mem.write_protect == ACPI_READ_WRITE_MEMORY)
- flags |= IORESOURCE_MEM_WRITEABLE;
+ flags = IORESOURCE_MEM_WRITEABLE;
pnp_register_mem_resource(dev, option_flags, p->minimum,
p->minimum, 0, p->address_length,
flags);
} else if (p->resource_type == ACPI_IO_RANGE)
pnp_register_port_resource(dev, option_flags, p->minimum,
p->minimum, 0, p->address_length,
- flags | IORESOURCE_IO_FIXED);
+ IORESOURCE_IO_FIXED);
}
static __init void pnpacpi_parse_ext_address_option(struct pnp_dev *dev,
@@ -678,19 +649,16 @@ static __init void pnpacpi_parse_ext_address_option(struct pnp_dev *dev,
struct acpi_resource_extended_address64 *p = &r->data.ext_address64;
unsigned char flags = 0;
- if (p->address_length == 0)
- flags |= IORESOURCE_DISABLED;
-
if (p->resource_type == ACPI_MEMORY_RANGE) {
if (p->info.mem.write_protect == ACPI_READ_WRITE_MEMORY)
- flags |= IORESOURCE_MEM_WRITEABLE;
+ flags = IORESOURCE_MEM_WRITEABLE;
pnp_register_mem_resource(dev, option_flags, p->minimum,
p->minimum, 0, p->address_length,
flags);
} else if (p->resource_type == ACPI_IO_RANGE)
pnp_register_port_resource(dev, option_flags, p->minimum,
p->minimum, 0, p->address_length,
- flags | IORESOURCE_IO_FIXED);
+ IORESOURCE_IO_FIXED);
}
struct acpipnp_parse_option_s {
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index 57de051a74b..9f88641e67f 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -70,6 +70,7 @@ config BATTERY_DS2760
config BATTERY_DS2780
tristate "DS2780 battery driver"
+ depends on HAS_IOMEM
select W1
select W1_SLAVE_DS2780
help
diff --git a/drivers/power/ds2780_battery.c b/drivers/power/ds2780_battery.c
index 1fefe82e12e..91a783d7236 100644
--- a/drivers/power/ds2780_battery.c
+++ b/drivers/power/ds2780_battery.c
@@ -39,6 +39,7 @@ struct ds2780_device_info {
struct device *dev;
struct power_supply bat;
struct device *w1_dev;
+ struct task_struct *mutex_holder;
};
enum current_types {
@@ -49,8 +50,8 @@ enum current_types {
static const char model[] = "DS2780";
static const char manufacturer[] = "Maxim/Dallas";
-static inline struct ds2780_device_info *to_ds2780_device_info(
- struct power_supply *psy)
+static inline struct ds2780_device_info *
+to_ds2780_device_info(struct power_supply *psy)
{
return container_of(psy, struct ds2780_device_info, bat);
}
@@ -60,17 +61,28 @@ static inline struct power_supply *to_power_supply(struct device *dev)
return dev_get_drvdata(dev);
}
-static inline int ds2780_read8(struct device *dev, u8 *val, int addr)
+static inline int ds2780_battery_io(struct ds2780_device_info *dev_info,
+ char *buf, int addr, size_t count, int io)
{
- return w1_ds2780_io(dev, val, addr, sizeof(u8), 0);
+ if (dev_info->mutex_holder == current)
+ return w1_ds2780_io_nolock(dev_info->w1_dev, buf, addr, count, io);
+ else
+ return w1_ds2780_io(dev_info->w1_dev, buf, addr, count, io);
+}
+
+static inline int ds2780_read8(struct ds2780_device_info *dev_info, u8 *val,
+ int addr)
+{
+ return ds2780_battery_io(dev_info, val, addr, sizeof(u8), 0);
}
-static int ds2780_read16(struct device *dev, s16 *val, int addr)
+static int ds2780_read16(struct ds2780_device_info *dev_info, s16 *val,
+ int addr)
{
int ret;
u8 raw[2];
- ret = w1_ds2780_io(dev, raw, addr, sizeof(u8) * 2, 0);
+ ret = ds2780_battery_io(dev_info, raw, addr, sizeof(raw), 0);
if (ret < 0)
return ret;
@@ -79,16 +91,16 @@ static int ds2780_read16(struct device *dev, s16 *val, int addr)
return 0;
}
-static inline int ds2780_read_block(struct device *dev, u8 *val, int addr,
- size_t count)
+static inline int ds2780_read_block(struct ds2780_device_info *dev_info,
+ u8 *val, int addr, size_t count)
{
- return w1_ds2780_io(dev, val, addr, count, 0);
+ return ds2780_battery_io(dev_info, val, addr, count, 0);
}
-static inline int ds2780_write(struct device *dev, u8 *val, int addr,
- size_t count)
+static inline int ds2780_write(struct ds2780_device_info *dev_info, u8 *val,
+ int addr, size_t count)
{
- return w1_ds2780_io(dev, val, addr, count, 1);
+ return ds2780_battery_io(dev_info, val, addr, count, 1);
}
static inline int ds2780_store_eeprom(struct device *dev, int addr)
@@ -122,7 +134,7 @@ static int ds2780_set_sense_register(struct ds2780_device_info *dev_info,
{
int ret;
- ret = ds2780_write(dev_info->w1_dev, &conductance,
+ ret = ds2780_write(dev_info, &conductance,
DS2780_RSNSP_REG, sizeof(u8));
if (ret < 0)
return ret;
@@ -134,7 +146,7 @@ static int ds2780_set_sense_register(struct ds2780_device_info *dev_info,
static int ds2780_get_rsgain_register(struct ds2780_device_info *dev_info,
u16 *rsgain)
{
- return ds2780_read16(dev_info->w1_dev, rsgain, DS2780_RSGAIN_MSB_REG);
+ return ds2780_read16(dev_info, rsgain, DS2780_RSGAIN_MSB_REG);
}
/* Set RSGAIN value from 0 to 1.999 in steps of 0.001 */
@@ -144,8 +156,8 @@ static int ds2780_set_rsgain_register(struct ds2780_device_info *dev_info,
int ret;
u8 raw[] = {rsgain >> 8, rsgain & 0xFF};
- ret = ds2780_write(dev_info->w1_dev, raw,
- DS2780_RSGAIN_MSB_REG, sizeof(u8) * 2);
+ ret = ds2780_write(dev_info, raw,
+ DS2780_RSGAIN_MSB_REG, sizeof(raw));
if (ret < 0)
return ret;
@@ -167,7 +179,7 @@ static int ds2780_get_voltage(struct ds2780_device_info *dev_info,
* Bits 2 - 0 of the voltage value are in bits 7 - 5 of the
* voltage LSB register
*/
- ret = ds2780_read16(dev_info->w1_dev, &voltage_raw,
+ ret = ds2780_read16(dev_info, &voltage_raw,
DS2780_VOLT_MSB_REG);
if (ret < 0)
return ret;
@@ -196,7 +208,7 @@ static int ds2780_get_temperature(struct ds2780_device_info *dev_info,
* Bits 2 - 0 of the temperature value are in bits 7 - 5 of the
* temperature LSB register
*/
- ret = ds2780_read16(dev_info->w1_dev, &temperature_raw,
+ ret = ds2780_read16(dev_info, &temperature_raw,
DS2780_TEMP_MSB_REG);
if (ret < 0)
return ret;
@@ -222,13 +234,13 @@ static int ds2780_get_current(struct ds2780_device_info *dev_info,
* The units of measurement for current are dependent on the value of
* the sense resistor.
*/
- ret = ds2780_read8(dev_info->w1_dev, &sense_res_raw, DS2780_RSNSP_REG);
+ ret = ds2780_read8(dev_info, &sense_res_raw, DS2780_RSNSP_REG);
if (ret < 0)
return ret;
if (sense_res_raw == 0) {
dev_err(dev_info->dev, "sense resistor value is 0\n");
- return -ENXIO;
+ return -EINVAL;
}
sense_res = 1000 / sense_res_raw;
@@ -248,7 +260,7 @@ static int ds2780_get_current(struct ds2780_device_info *dev_info,
* Bits 7 - 0 of the current value are in bits 7 - 0 of the current
* LSB register
*/
- ret = ds2780_read16(dev_info->w1_dev, &current_raw, reg_msb);
+ ret = ds2780_read16(dev_info, &current_raw, reg_msb);
if (ret < 0)
return ret;
@@ -267,7 +279,7 @@ static int ds2780_get_accumulated_current(struct ds2780_device_info *dev_info,
* The units of measurement for accumulated current are dependent on
* the value of the sense resistor.
*/
- ret = ds2780_read8(dev_info->w1_dev, &sense_res_raw, DS2780_RSNSP_REG);
+ ret = ds2780_read8(dev_info, &sense_res_raw, DS2780_RSNSP_REG);
if (ret < 0)
return ret;
@@ -285,7 +297,7 @@ static int ds2780_get_accumulated_current(struct ds2780_device_info *dev_info,
* Bits 7 - 0 of the ACR value are in bits 7 - 0 of the ACR
* LSB register
*/
- ret = ds2780_read16(dev_info->w1_dev, &current_raw, DS2780_ACR_MSB_REG);
+ ret = ds2780_read16(dev_info, &current_raw, DS2780_ACR_MSB_REG);
if (ret < 0)
return ret;
@@ -299,7 +311,7 @@ static int ds2780_get_capacity(struct ds2780_device_info *dev_info,
int ret;
u8 raw;
- ret = ds2780_read8(dev_info->w1_dev, &raw, DS2780_RARC_REG);
+ ret = ds2780_read8(dev_info, &raw, DS2780_RARC_REG);
if (ret < 0)
return ret;
@@ -345,7 +357,7 @@ static int ds2780_get_charge_now(struct ds2780_device_info *dev_info,
* Bits 7 - 0 of the RAAC value are in bits 7 - 0 of the RAAC
* LSB register
*/
- ret = ds2780_read16(dev_info->w1_dev, &charge_raw, DS2780_RAAC_MSB_REG);
+ ret = ds2780_read16(dev_info, &charge_raw, DS2780_RAAC_MSB_REG);
if (ret < 0)
return ret;
@@ -356,7 +368,7 @@ static int ds2780_get_charge_now(struct ds2780_device_info *dev_info,
static int ds2780_get_control_register(struct ds2780_device_info *dev_info,
u8 *control_reg)
{
- return ds2780_read8(dev_info->w1_dev, control_reg, DS2780_CONTROL_REG);
+ return ds2780_read8(dev_info, control_reg, DS2780_CONTROL_REG);
}
static int ds2780_set_control_register(struct ds2780_device_info *dev_info,
@@ -364,7 +376,7 @@ static int ds2780_set_control_register(struct ds2780_device_info *dev_info,
{
int ret;
- ret = ds2780_write(dev_info->w1_dev, &control_reg,
+ ret = ds2780_write(dev_info, &control_reg,
DS2780_CONTROL_REG, sizeof(u8));
if (ret < 0)
return ret;
@@ -503,7 +515,7 @@ static ssize_t ds2780_get_sense_resistor_value(struct device *dev,
struct power_supply *psy = to_power_supply(dev);
struct ds2780_device_info *dev_info = to_ds2780_device_info(psy);
- ret = ds2780_read8(dev_info->w1_dev, &sense_resistor, DS2780_RSNSP_REG);
+ ret = ds2780_read8(dev_info, &sense_resistor, DS2780_RSNSP_REG);
if (ret < 0)
return ret;
@@ -584,7 +596,7 @@ static ssize_t ds2780_get_pio_pin(struct device *dev,
struct power_supply *psy = to_power_supply(dev);
struct ds2780_device_info *dev_info = to_ds2780_device_info(psy);
- ret = ds2780_read8(dev_info->w1_dev, &sfr, DS2780_SFR_REG);
+ ret = ds2780_read8(dev_info, &sfr, DS2780_SFR_REG);
if (ret < 0)
return ret;
@@ -611,7 +623,7 @@ static ssize_t ds2780_set_pio_pin(struct device *dev,
return -EINVAL;
}
- ret = ds2780_write(dev_info->w1_dev, &new_setting,
+ ret = ds2780_write(dev_info, &new_setting,
DS2780_SFR_REG, sizeof(u8));
if (ret < 0)
return ret;
@@ -632,7 +644,7 @@ static ssize_t ds2780_read_param_eeprom_bin(struct file *filp,
DS2780_EEPROM_BLOCK1_END -
DS2780_EEPROM_BLOCK1_START + 1 - off);
- return ds2780_read_block(dev_info->w1_dev, buf,
+ return ds2780_read_block(dev_info, buf,
DS2780_EEPROM_BLOCK1_START + off, count);
}
@@ -650,7 +662,7 @@ static ssize_t ds2780_write_param_eeprom_bin(struct file *filp,
DS2780_EEPROM_BLOCK1_END -
DS2780_EEPROM_BLOCK1_START + 1 - off);
- ret = ds2780_write(dev_info->w1_dev, buf,
+ ret = ds2780_write(dev_info, buf,
DS2780_EEPROM_BLOCK1_START + off, count);
if (ret < 0)
return ret;
@@ -685,9 +697,8 @@ static ssize_t ds2780_read_user_eeprom_bin(struct file *filp,
DS2780_EEPROM_BLOCK0_END -
DS2780_EEPROM_BLOCK0_START + 1 - off);
- return ds2780_read_block(dev_info->w1_dev, buf,
+ return ds2780_read_block(dev_info, buf,
DS2780_EEPROM_BLOCK0_START + off, count);
-
}
static ssize_t ds2780_write_user_eeprom_bin(struct file *filp,
@@ -704,7 +715,7 @@ static ssize_t ds2780_write_user_eeprom_bin(struct file *filp,
DS2780_EEPROM_BLOCK0_END -
DS2780_EEPROM_BLOCK0_START + 1 - off);
- ret = ds2780_write(dev_info->w1_dev, buf,
+ ret = ds2780_write(dev_info, buf,
DS2780_EEPROM_BLOCK0_START + off, count);
if (ret < 0)
return ret;
@@ -768,6 +779,7 @@ static int __devinit ds2780_battery_probe(struct platform_device *pdev)
dev_info->bat.properties = ds2780_battery_props;
dev_info->bat.num_properties = ARRAY_SIZE(ds2780_battery_props);
dev_info->bat.get_property = ds2780_battery_get_property;
+ dev_info->mutex_holder = current;
ret = power_supply_register(&pdev->dev, &dev_info->bat);
if (ret) {
@@ -797,6 +809,8 @@ static int __devinit ds2780_battery_probe(struct platform_device *pdev)
goto fail_remove_bin_file;
}
+ dev_info->mutex_holder = NULL;
+
return 0;
fail_remove_bin_file:
@@ -816,6 +830,8 @@ static int __devexit ds2780_battery_remove(struct platform_device *pdev)
{
struct ds2780_device_info *dev_info = platform_get_drvdata(pdev);
+ dev_info->mutex_holder = current;
+
/* remove attributes */
sysfs_remove_group(&dev_info->bat.dev->kobj, &ds2780_attr_group);
diff --git a/drivers/power/max17042_battery.c b/drivers/power/max17042_battery.c
index 98bfab35b8e..9f0183c7307 100644
--- a/drivers/power/max17042_battery.c
+++ b/drivers/power/max17042_battery.c
@@ -23,6 +23,7 @@
*/
#include <linux/init.h>
+#include <linux/module.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/mod_devicetable.h>
diff --git a/drivers/power/max8903_charger.c b/drivers/power/max8903_charger.c
index a9b0209a2f5..2595145f3bf 100644
--- a/drivers/power/max8903_charger.c
+++ b/drivers/power/max8903_charger.c
@@ -22,6 +22,7 @@
#include <linux/gpio.h>
#include <linux/interrupt.h>
+#include <linux/module.h>
#include <linux/slab.h>
#include <linux/power_supply.h>
#include <linux/platform_device.h>
diff --git a/drivers/power/max8997_charger.c b/drivers/power/max8997_charger.c
index ffc5033ea9c..a23317d75c5 100644
--- a/drivers/power/max8997_charger.c
+++ b/drivers/power/max8997_charger.c
@@ -19,6 +19,7 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#include <linux/module.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/slab.h>
diff --git a/drivers/power/max8998_charger.c b/drivers/power/max8998_charger.c
index ef8efadb58c..93e3bb47a3a 100644
--- a/drivers/power/max8998_charger.c
+++ b/drivers/power/max8998_charger.c
@@ -19,6 +19,7 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#include <linux/module.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/slab.h>
diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
index 605514afc29..e15d4c9d398 100644
--- a/drivers/power/power_supply_sysfs.c
+++ b/drivers/power/power_supply_sysfs.c
@@ -14,6 +14,7 @@
#include <linux/ctype.h>
#include <linux/power_supply.h>
#include <linux/slab.h>
+#include <linux/stat.h>
#include "power_supply.h"
diff --git a/drivers/pps/clients/Kconfig b/drivers/pps/clients/Kconfig
index 8520a7f4dd6..445197d4a8c 100644
--- a/drivers/pps/clients/Kconfig
+++ b/drivers/pps/clients/Kconfig
@@ -29,4 +29,13 @@ config PPS_CLIENT_PARPORT
If you say yes here you get support for a PPS source connected
with the interrupt pin of your parallel port.
+config PPS_CLIENT_GPIO
+ tristate "PPS client using GPIO"
+ depends on PPS && GENERIC_HARDIRQS
+ help
+ If you say yes here you get support for a PPS source using
+ GPIO. To be useful you must also register a platform device
+ specifying the GPIO pin and other options, usually in your board
+ setup.
+
endif
diff --git a/drivers/pps/clients/Makefile b/drivers/pps/clients/Makefile
index 4feb7e9e71e..a461d15f4a2 100644
--- a/drivers/pps/clients/Makefile
+++ b/drivers/pps/clients/Makefile
@@ -5,5 +5,6 @@
obj-$(CONFIG_PPS_CLIENT_KTIMER) += pps-ktimer.o
obj-$(CONFIG_PPS_CLIENT_LDISC) += pps-ldisc.o
obj-$(CONFIG_PPS_CLIENT_PARPORT) += pps_parport.o
+obj-$(CONFIG_PPS_CLIENT_GPIO) += pps-gpio.o
ccflags-$(CONFIG_PPS_DEBUG) := -DDEBUG
diff --git a/drivers/pps/clients/pps-gpio.c b/drivers/pps/clients/pps-gpio.c
new file mode 100644
index 00000000000..65505554547
--- /dev/null
+++ b/drivers/pps/clients/pps-gpio.c
@@ -0,0 +1,227 @@
+/*
+ * pps-gpio.c -- PPS client driver using GPIO
+ *
+ *
+ * Copyright (C) 2010 Ricardo Martins <rasm@fe.up.pt>
+ * Copyright (C) 2011 James Nuss <jamesnuss@nanometrics.ca>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#define PPS_GPIO_NAME "pps-gpio"
+#define pr_fmt(fmt) PPS_GPIO_NAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/pps_kernel.h>
+#include <linux/pps-gpio.h>
+#include <linux/gpio.h>
+#include <linux/list.h>
+
+/* Info for each registered platform device */
+struct pps_gpio_device_data {
+ int irq; /* IRQ used as PPS source */
+ struct pps_device *pps; /* PPS source device */
+ struct pps_source_info info; /* PPS source information */
+ const struct pps_gpio_platform_data *pdata;
+};
+
+/*
+ * Report the PPS event
+ */
+
+static irqreturn_t pps_gpio_irq_handler(int irq, void *data)
+{
+ const struct pps_gpio_device_data *info;
+ struct pps_event_time ts;
+ int rising_edge;
+
+ /* Get the time stamp first */
+ pps_get_ts(&ts);
+
+ info = data;
+
+ rising_edge = gpio_get_value(info->pdata->gpio_pin);
+ if ((rising_edge && !info->pdata->assert_falling_edge) ||
+ (!rising_edge && info->pdata->assert_falling_edge))
+ pps_event(info->pps, &ts, PPS_CAPTUREASSERT, NULL);
+ else if (info->pdata->capture_clear &&
+ ((rising_edge && info->pdata->assert_falling_edge) ||
+ (!rising_edge && !info->pdata->assert_falling_edge)))
+ pps_event(info->pps, &ts, PPS_CAPTURECLEAR, NULL);
+
+ return IRQ_HANDLED;
+}
+
+static int pps_gpio_setup(struct platform_device *pdev)
+{
+ int ret;
+ const struct pps_gpio_platform_data *pdata = pdev->dev.platform_data;
+
+ ret = gpio_request(pdata->gpio_pin, pdata->gpio_label);
+ if (ret) {
+ pr_warning("failed to request GPIO %u\n", pdata->gpio_pin);
+ return -EINVAL;
+ }
+
+ ret = gpio_direction_input(pdata->gpio_pin);
+ if (ret) {
+ pr_warning("failed to set pin direction\n");
+ gpio_free(pdata->gpio_pin);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static unsigned long
+get_irqf_trigger_flags(const struct pps_gpio_platform_data *pdata)
+{
+ unsigned long flags = pdata->assert_falling_edge ?
+ IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING;
+
+ if (pdata->capture_clear) {
+ flags |= ((flags & IRQF_TRIGGER_RISING) ?
+ IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING);
+ }
+
+ return flags;
+}
+
+static int pps_gpio_probe(struct platform_device *pdev)
+{
+ struct pps_gpio_device_data *data;
+ int irq;
+ int ret;
+ int err;
+ int pps_default_params;
+ const struct pps_gpio_platform_data *pdata = pdev->dev.platform_data;
+
+
+ /* GPIO setup */
+ ret = pps_gpio_setup(pdev);
+ if (ret)
+ return -EINVAL;
+
+ /* IRQ setup */
+ irq = gpio_to_irq(pdata->gpio_pin);
+ if (irq < 0) {
+ pr_err("failed to map GPIO to IRQ: %d\n", irq);
+ err = -EINVAL;
+ goto return_error;
+ }
+
+ /* allocate space for device info */
+ data = kzalloc(sizeof(struct pps_gpio_device_data), GFP_KERNEL);
+ if (data == NULL) {
+ err = -ENOMEM;
+ goto return_error;
+ }
+
+ /* initialize PPS specific parts of the bookkeeping data structure. */
+ data->info.mode = PPS_CAPTUREASSERT | PPS_OFFSETASSERT |
+ PPS_ECHOASSERT | PPS_CANWAIT | PPS_TSFMT_TSPEC;
+ if (pdata->capture_clear)
+ data->info.mode |= PPS_CAPTURECLEAR | PPS_OFFSETCLEAR |
+ PPS_ECHOCLEAR;
+ data->info.owner = THIS_MODULE;
+ snprintf(data->info.name, PPS_MAX_NAME_LEN - 1, "%s.%d",
+ pdev->name, pdev->id);
+
+ /* register PPS source */
+ pps_default_params = PPS_CAPTUREASSERT | PPS_OFFSETASSERT;
+ if (pdata->capture_clear)
+ pps_default_params |= PPS_CAPTURECLEAR | PPS_OFFSETCLEAR;
+ data->pps = pps_register_source(&data->info, pps_default_params);
+ if (data->pps == NULL) {
+ kfree(data);
+ pr_err("failed to register IRQ %d as PPS source\n", irq);
+ err = -EINVAL;
+ goto return_error;
+ }
+
+ data->irq = irq;
+ data->pdata = pdata;
+
+ /* register IRQ interrupt handler */
+ ret = request_irq(irq, pps_gpio_irq_handler,
+ get_irqf_trigger_flags(pdata), data->info.name, data);
+ if (ret) {
+ pps_unregister_source(data->pps);
+ kfree(data);
+ pr_err("failed to acquire IRQ %d\n", irq);
+ err = -EINVAL;
+ goto return_error;
+ }
+
+ platform_set_drvdata(pdev, data);
+ dev_info(data->pps->dev, "Registered IRQ %d as PPS source\n", irq);
+
+ return 0;
+
+return_error:
+ gpio_free(pdata->gpio_pin);
+ return err;
+}
+
+static int pps_gpio_remove(struct platform_device *pdev)
+{
+ struct pps_gpio_device_data *data = platform_get_drvdata(pdev);
+ const struct pps_gpio_platform_data *pdata = data->pdata;
+
+ platform_set_drvdata(pdev, NULL);
+ free_irq(data->irq, data);
+ gpio_free(pdata->gpio_pin);
+ pps_unregister_source(data->pps);
+ pr_info("removed IRQ %d as PPS source\n", data->irq);
+ kfree(data);
+ return 0;
+}
+
+static struct platform_driver pps_gpio_driver = {
+ .probe = pps_gpio_probe,
+ .remove = __devexit_p(pps_gpio_remove),
+ .driver = {
+ .name = PPS_GPIO_NAME,
+ .owner = THIS_MODULE
+ },
+};
+
+static int __init pps_gpio_init(void)
+{
+ int ret = platform_driver_register(&pps_gpio_driver);
+ if (ret < 0)
+ pr_err("failed to register platform driver\n");
+ return ret;
+}
+
+static void __exit pps_gpio_exit(void)
+{
+ platform_driver_unregister(&pps_gpio_driver);
+ pr_debug("unregistered platform driver\n");
+}
+
+module_init(pps_gpio_init);
+module_exit(pps_gpio_exit);
+
+MODULE_AUTHOR("Ricardo Martins <rasm@fe.up.pt>");
+MODULE_AUTHOR("James Nuss <jamesnuss@nanometrics.ca>");
+MODULE_DESCRIPTION("Use GPIO pin as PPS source");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("1.0.0");
diff --git a/drivers/pps/clients/pps-ktimer.c b/drivers/pps/clients/pps-ktimer.c
index 82583b0ff82..436b4e4e71a 100644
--- a/drivers/pps/clients/pps-ktimer.c
+++ b/drivers/pps/clients/pps-ktimer.c
@@ -52,17 +52,6 @@ static void pps_ktimer_event(unsigned long ptr)
}
/*
- * The echo function
- */
-
-static void pps_ktimer_echo(struct pps_device *pps, int event, void *data)
-{
- dev_info(pps->dev, "echo %s %s\n",
- event & PPS_CAPTUREASSERT ? "assert" : "",
- event & PPS_CAPTURECLEAR ? "clear" : "");
-}
-
-/*
* The PPS info struct
*/
@@ -72,7 +61,6 @@ static struct pps_source_info pps_ktimer_info = {
.mode = PPS_CAPTUREASSERT | PPS_OFFSETASSERT |
PPS_ECHOASSERT |
PPS_CANWAIT | PPS_TSFMT_TSPEC,
- .echo = pps_ktimer_echo,
.owner = THIS_MODULE,
};
diff --git a/drivers/pps/clients/pps_parport.c b/drivers/pps/clients/pps_parport.c
index c571d6dd8f6..e1b4705ae3e 100644
--- a/drivers/pps/clients/pps_parport.c
+++ b/drivers/pps/clients/pps_parport.c
@@ -133,14 +133,6 @@ out_both:
return;
}
-/* the PPS echo function */
-static void pps_echo(struct pps_device *pps, int event, void *data)
-{
- dev_info(pps->dev, "echo %s %s\n",
- event & PPS_CAPTUREASSERT ? "assert" : "",
- event & PPS_CAPTURECLEAR ? "clear" : "");
-}
-
static void parport_attach(struct parport *port)
{
struct pps_client_pp *device;
@@ -151,7 +143,6 @@ static void parport_attach(struct parport *port)
PPS_OFFSETASSERT | PPS_OFFSETCLEAR | \
PPS_ECHOASSERT | PPS_ECHOCLEAR | \
PPS_CANWAIT | PPS_TSFMT_TSPEC,
- .echo = pps_echo,
.owner = THIS_MODULE,
.dev = NULL
};
diff --git a/drivers/pps/kapi.c b/drivers/pps/kapi.c
index a4e8eb9fece..f197e8ea185 100644
--- a/drivers/pps/kapi.c
+++ b/drivers/pps/kapi.c
@@ -52,6 +52,14 @@ static void pps_add_offset(struct pps_ktime *ts, struct pps_ktime *offset)
ts->sec += offset->sec;
}
+static void pps_echo_client_default(struct pps_device *pps, int event,
+ void *data)
+{
+ dev_info(pps->dev, "echo %s %s\n",
+ event & PPS_CAPTUREASSERT ? "assert" : "",
+ event & PPS_CAPTURECLEAR ? "clear" : "");
+}
+
/*
* Exported functions
*/
@@ -80,13 +88,6 @@ struct pps_device *pps_register_source(struct pps_source_info *info,
err = -EINVAL;
goto pps_register_source_exit;
}
- if ((info->mode & (PPS_ECHOASSERT | PPS_ECHOCLEAR)) != 0 &&
- info->echo == NULL) {
- pr_err("%s: echo function is not defined\n",
- info->name);
- err = -EINVAL;
- goto pps_register_source_exit;
- }
if ((info->mode & (PPS_TSFMT_TSPEC | PPS_TSFMT_NTPFP)) == 0) {
pr_err("%s: unspecified time format\n",
info->name);
@@ -108,6 +109,11 @@ struct pps_device *pps_register_source(struct pps_source_info *info,
pps->params.mode = default_params;
pps->info = *info;
+ /* check for default echo function */
+ if ((pps->info.mode & (PPS_ECHOASSERT | PPS_ECHOCLEAR)) &&
+ pps->info.echo == NULL)
+ pps->info.echo = pps_echo_client_default;
+
init_waitqueue_head(&pps->queue);
spin_lock_init(&pps->lock);
diff --git a/drivers/ps3/ps3-vuart.c b/drivers/ps3/ps3-vuart.c
index d9fb729535a..fb7300837fe 100644
--- a/drivers/ps3/ps3-vuart.c
+++ b/drivers/ps3/ps3-vuart.c
@@ -952,7 +952,7 @@ static int ps3_vuart_bus_interrupt_get(void)
}
result = request_irq(vuart_bus_priv.virq, ps3_vuart_irq_handler,
- IRQF_DISABLED, "vuart", &vuart_bus_priv);
+ 0, "vuart", &vuart_bus_priv);
if (result) {
pr_debug("%s:%d: request_irq failed (%d)\n",
diff --git a/drivers/ps3/ps3stor_lib.c b/drivers/ps3/ps3stor_lib.c
index af0afa1db4a..8c3f5adf1bc 100644
--- a/drivers/ps3/ps3stor_lib.c
+++ b/drivers/ps3/ps3stor_lib.c
@@ -19,6 +19,7 @@
*/
#include <linux/dma-mapping.h>
+#include <linux/module.h>
#include <asm/lv1call.h>
#include <asm/ps3stor.h>
@@ -166,7 +167,7 @@ int ps3stor_setup(struct ps3_storage_device *dev, irq_handler_t handler)
goto fail_close_device;
}
- error = request_irq(dev->irq, handler, IRQF_DISABLED,
+ error = request_irq(dev->irq, handler, 0,
dev->sbd.core.driver->name, dev);
if (error) {
dev_err(&dev->sbd.core, "%s:%u: request_irq failed %d\n",
diff --git a/drivers/ps3/sys-manager-core.c b/drivers/ps3/sys-manager-core.c
index 474225852b6..0e41737ea83 100644
--- a/drivers/ps3/sys-manager-core.c
+++ b/drivers/ps3/sys-manager-core.c
@@ -19,6 +19,7 @@
*/
#include <linux/kernel.h>
+#include <linux/export.h>
#include <asm/lv1call.h>
#include <asm/ps3.h>
diff --git a/drivers/rapidio/Kconfig b/drivers/rapidio/Kconfig
index 070211a5955..bc871923879 100644
--- a/drivers/rapidio/Kconfig
+++ b/drivers/rapidio/Kconfig
@@ -1,6 +1,8 @@
#
# RapidIO configuration
#
+source "drivers/rapidio/devices/Kconfig"
+
config RAPIDIO_DISC_TIMEOUT
int "Discovery timeout duration (seconds)"
depends on RAPIDIO
@@ -20,8 +22,6 @@ config RAPIDIO_ENABLE_RX_TX_PORTS
ports for Input/Output direction to allow other traffic
than Maintenance transfers.
-source "drivers/rapidio/switches/Kconfig"
-
config RAPIDIO_DEBUG
bool "RapidIO subsystem debug messages"
depends on RAPIDIO
@@ -32,3 +32,5 @@ config RAPIDIO_DEBUG
going on.
If you are unsure about this, say N here.
+
+source "drivers/rapidio/switches/Kconfig"
diff --git a/drivers/rapidio/Makefile b/drivers/rapidio/Makefile
index 89b8eca825b..ec3fb812100 100644
--- a/drivers/rapidio/Makefile
+++ b/drivers/rapidio/Makefile
@@ -4,5 +4,6 @@
obj-y += rio.o rio-access.o rio-driver.o rio-scan.o rio-sysfs.o
obj-$(CONFIG_RAPIDIO) += switches/
+obj-$(CONFIG_RAPIDIO) += devices/
subdir-ccflags-$(CONFIG_RAPIDIO_DEBUG) := -DDEBUG
diff --git a/drivers/rapidio/devices/Kconfig b/drivers/rapidio/devices/Kconfig
new file mode 100644
index 00000000000..12a9d7f7040
--- /dev/null
+++ b/drivers/rapidio/devices/Kconfig
@@ -0,0 +1,10 @@
+#
+# RapidIO master port configuration
+#
+
+config RAPIDIO_TSI721
+ bool "IDT Tsi721 PCI Express SRIO Controller support"
+ depends on RAPIDIO && PCIEPORTBUS
+ default "n"
+ ---help---
+ Include support for IDT Tsi721 PCI Express Serial RapidIO controller.
diff --git a/drivers/rapidio/devices/Makefile b/drivers/rapidio/devices/Makefile
new file mode 100644
index 00000000000..3b7b4e2dff7
--- /dev/null
+++ b/drivers/rapidio/devices/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for RapidIO devices
+#
+
+obj-$(CONFIG_RAPIDIO_TSI721) += tsi721.o
diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c
new file mode 100644
index 00000000000..5225930a10c
--- /dev/null
+++ b/drivers/rapidio/devices/tsi721.c
@@ -0,0 +1,2360 @@
+/*
+ * RapidIO mport driver for Tsi721 PCIExpress-to-SRIO bridge
+ *
+ * Copyright 2011 Integrated Device Technology, Inc.
+ * Alexandre Bounine <alexandre.bounine@idt.com>
+ * Chul Kim <chul.kim@idt.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/io.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/rio.h>
+#include <linux/rio_drv.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/kfifo.h>
+#include <linux/delay.h>
+
+#include "tsi721.h"
+
+#define DEBUG_PW /* Inbound Port-Write debugging */
+
+static void tsi721_omsg_handler(struct tsi721_device *priv, int ch);
+static void tsi721_imsg_handler(struct tsi721_device *priv, int ch);
+
+/**
+ * tsi721_lcread - read from local SREP config space
+ * @mport: RapidIO master port info
+ * @index: ID of RapdiIO interface
+ * @offset: Offset into configuration space
+ * @len: Length (in bytes) of the maintenance transaction
+ * @data: Value to be read into
+ *
+ * Generates a local SREP space read. Returns %0 on
+ * success or %-EINVAL on failure.
+ */
+static int tsi721_lcread(struct rio_mport *mport, int index, u32 offset,
+ int len, u32 *data)
+{
+ struct tsi721_device *priv = mport->priv;
+
+ if (len != sizeof(u32))
+ return -EINVAL; /* only 32-bit access is supported */
+
+ *data = ioread32(priv->regs + offset);
+
+ return 0;
+}
+
+/**
+ * tsi721_lcwrite - write into local SREP config space
+ * @mport: RapidIO master port info
+ * @index: ID of RapdiIO interface
+ * @offset: Offset into configuration space
+ * @len: Length (in bytes) of the maintenance transaction
+ * @data: Value to be written
+ *
+ * Generates a local write into SREP configuration space. Returns %0 on
+ * success or %-EINVAL on failure.
+ */
+static int tsi721_lcwrite(struct rio_mport *mport, int index, u32 offset,
+ int len, u32 data)
+{
+ struct tsi721_device *priv = mport->priv;
+
+ if (len != sizeof(u32))
+ return -EINVAL; /* only 32-bit access is supported */
+
+ iowrite32(data, priv->regs + offset);
+
+ return 0;
+}
+
+/**
+ * tsi721_maint_dma - Helper function to generate RapidIO maintenance
+ * transactions using designated Tsi721 DMA channel.
+ * @priv: pointer to tsi721 private data
+ * @sys_size: RapdiIO transport system size
+ * @destid: Destination ID of transaction
+ * @hopcount: Number of hops to target device
+ * @offset: Offset into configuration space
+ * @len: Length (in bytes) of the maintenance transaction
+ * @data: Location to be read from or write into
+ * @do_wr: Operation flag (1 == MAINT_WR)
+ *
+ * Generates a RapidIO maintenance transaction (Read or Write).
+ * Returns %0 on success and %-EINVAL or %-EFAULT on failure.
+ */
+static int tsi721_maint_dma(struct tsi721_device *priv, u32 sys_size,
+ u16 destid, u8 hopcount, u32 offset, int len,
+ u32 *data, int do_wr)
+{
+ struct tsi721_dma_desc *bd_ptr;
+ u32 rd_count, swr_ptr, ch_stat;
+ int i, err = 0;
+ u32 op = do_wr ? MAINT_WR : MAINT_RD;
+
+ if (offset > (RIO_MAINT_SPACE_SZ - len) || (len != sizeof(u32)))
+ return -EINVAL;
+
+ bd_ptr = priv->bdma[TSI721_DMACH_MAINT].bd_base;
+
+ rd_count = ioread32(
+ priv->regs + TSI721_DMAC_DRDCNT(TSI721_DMACH_MAINT));
+
+ /* Initialize DMA descriptor */
+ bd_ptr[0].type_id = cpu_to_le32((DTYPE2 << 29) | (op << 19) | destid);
+ bd_ptr[0].bcount = cpu_to_le32((sys_size << 26) | 0x04);
+ bd_ptr[0].raddr_lo = cpu_to_le32((hopcount << 24) | offset);
+ bd_ptr[0].raddr_hi = 0;
+ if (do_wr)
+ bd_ptr[0].data[0] = cpu_to_be32p(data);
+ else
+ bd_ptr[0].data[0] = 0xffffffff;
+
+ mb();
+
+ /* Start DMA operation */
+ iowrite32(rd_count + 2,
+ priv->regs + TSI721_DMAC_DWRCNT(TSI721_DMACH_MAINT));
+ ioread32(priv->regs + TSI721_DMAC_DWRCNT(TSI721_DMACH_MAINT));
+ i = 0;
+
+ /* Wait until DMA transfer is finished */
+ while ((ch_stat = ioread32(priv->regs +
+ TSI721_DMAC_STS(TSI721_DMACH_MAINT))) & TSI721_DMAC_STS_RUN) {
+ udelay(1);
+ if (++i >= 5000000) {
+ dev_dbg(&priv->pdev->dev,
+ "%s : DMA[%d] read timeout ch_status=%x\n",
+ __func__, TSI721_DMACH_MAINT, ch_stat);
+ if (!do_wr)
+ *data = 0xffffffff;
+ err = -EIO;
+ goto err_out;
+ }
+ }
+
+ if (ch_stat & TSI721_DMAC_STS_ABORT) {
+ /* If DMA operation aborted due to error,
+ * reinitialize DMA channel
+ */
+ dev_dbg(&priv->pdev->dev, "%s : DMA ABORT ch_stat=%x\n",
+ __func__, ch_stat);
+ dev_dbg(&priv->pdev->dev, "OP=%d : destid=%x hc=%x off=%x\n",
+ do_wr ? MAINT_WR : MAINT_RD, destid, hopcount, offset);
+ iowrite32(TSI721_DMAC_INT_ALL,
+ priv->regs + TSI721_DMAC_INT(TSI721_DMACH_MAINT));
+ iowrite32(TSI721_DMAC_CTL_INIT,
+ priv->regs + TSI721_DMAC_CTL(TSI721_DMACH_MAINT));
+ udelay(10);
+ iowrite32(0, priv->regs +
+ TSI721_DMAC_DWRCNT(TSI721_DMACH_MAINT));
+ udelay(1);
+ if (!do_wr)
+ *data = 0xffffffff;
+ err = -EIO;
+ goto err_out;
+ }
+
+ if (!do_wr)
+ *data = be32_to_cpu(bd_ptr[0].data[0]);
+
+ /*
+ * Update descriptor status FIFO RD pointer.
+ * NOTE: Skipping check and clear FIFO entries because we are waiting
+ * for transfer to be completed.
+ */
+ swr_ptr = ioread32(priv->regs + TSI721_DMAC_DSWP(TSI721_DMACH_MAINT));
+ iowrite32(swr_ptr, priv->regs + TSI721_DMAC_DSRP(TSI721_DMACH_MAINT));
+err_out:
+
+ return err;
+}
+
+/**
+ * tsi721_cread_dma - Generate a RapidIO maintenance read transaction
+ * using Tsi721 BDMA engine.
+ * @mport: RapidIO master port control structure
+ * @index: ID of RapdiIO interface
+ * @destid: Destination ID of transaction
+ * @hopcount: Number of hops to target device
+ * @offset: Offset into configuration space
+ * @len: Length (in bytes) of the maintenance transaction
+ * @val: Location to be read into
+ *
+ * Generates a RapidIO maintenance read transaction.
+ * Returns %0 on success and %-EINVAL or %-EFAULT on failure.
+ */
+static int tsi721_cread_dma(struct rio_mport *mport, int index, u16 destid,
+ u8 hopcount, u32 offset, int len, u32 *data)
+{
+ struct tsi721_device *priv = mport->priv;
+
+ return tsi721_maint_dma(priv, mport->sys_size, destid, hopcount,
+ offset, len, data, 0);
+}
+
+/**
+ * tsi721_cwrite_dma - Generate a RapidIO maintenance write transaction
+ * using Tsi721 BDMA engine
+ * @mport: RapidIO master port control structure
+ * @index: ID of RapdiIO interface
+ * @destid: Destination ID of transaction
+ * @hopcount: Number of hops to target device
+ * @offset: Offset into configuration space
+ * @len: Length (in bytes) of the maintenance transaction
+ * @val: Value to be written
+ *
+ * Generates a RapidIO maintenance write transaction.
+ * Returns %0 on success and %-EINVAL or %-EFAULT on failure.
+ */
+static int tsi721_cwrite_dma(struct rio_mport *mport, int index, u16 destid,
+ u8 hopcount, u32 offset, int len, u32 data)
+{
+ struct tsi721_device *priv = mport->priv;
+ u32 temp = data;
+
+ return tsi721_maint_dma(priv, mport->sys_size, destid, hopcount,
+ offset, len, &temp, 1);
+}
+
+/**
+ * tsi721_pw_handler - Tsi721 inbound port-write interrupt handler
+ * @mport: RapidIO master port structure
+ *
+ * Handles inbound port-write interrupts. Copies PW message from an internal
+ * buffer into PW message FIFO and schedules deferred routine to process
+ * queued messages.
+ */
+static int
+tsi721_pw_handler(struct rio_mport *mport)
+{
+ struct tsi721_device *priv = mport->priv;
+ u32 pw_stat;
+ u32 pw_buf[TSI721_RIO_PW_MSG_SIZE/sizeof(u32)];
+
+
+ pw_stat = ioread32(priv->regs + TSI721_RIO_PW_RX_STAT);
+
+ if (pw_stat & TSI721_RIO_PW_RX_STAT_PW_VAL) {
+ pw_buf[0] = ioread32(priv->regs + TSI721_RIO_PW_RX_CAPT(0));
+ pw_buf[1] = ioread32(priv->regs + TSI721_RIO_PW_RX_CAPT(1));
+ pw_buf[2] = ioread32(priv->regs + TSI721_RIO_PW_RX_CAPT(2));
+ pw_buf[3] = ioread32(priv->regs + TSI721_RIO_PW_RX_CAPT(3));
+
+ /* Queue PW message (if there is room in FIFO),
+ * otherwise discard it.
+ */
+ spin_lock(&priv->pw_fifo_lock);
+ if (kfifo_avail(&priv->pw_fifo) >= TSI721_RIO_PW_MSG_SIZE)
+ kfifo_in(&priv->pw_fifo, pw_buf,
+ TSI721_RIO_PW_MSG_SIZE);
+ else
+ priv->pw_discard_count++;
+ spin_unlock(&priv->pw_fifo_lock);
+ }
+
+ /* Clear pending PW interrupts */
+ iowrite32(TSI721_RIO_PW_RX_STAT_PW_DISC | TSI721_RIO_PW_RX_STAT_PW_VAL,
+ priv->regs + TSI721_RIO_PW_RX_STAT);
+
+ schedule_work(&priv->pw_work);
+
+ return 0;
+}
+
+static void tsi721_pw_dpc(struct work_struct *work)
+{
+ struct tsi721_device *priv = container_of(work, struct tsi721_device,
+ pw_work);
+ u32 msg_buffer[RIO_PW_MSG_SIZE/sizeof(u32)]; /* Use full size PW message
+ buffer for RIO layer */
+
+ /*
+ * Process port-write messages
+ */
+ while (kfifo_out_spinlocked(&priv->pw_fifo, (unsigned char *)msg_buffer,
+ TSI721_RIO_PW_MSG_SIZE, &priv->pw_fifo_lock)) {
+ /* Process one message */
+#ifdef DEBUG_PW
+ {
+ u32 i;
+ pr_debug("%s : Port-Write Message:", __func__);
+ for (i = 0; i < RIO_PW_MSG_SIZE/sizeof(u32); ) {
+ pr_debug("0x%02x: %08x %08x %08x %08x", i*4,
+ msg_buffer[i], msg_buffer[i + 1],
+ msg_buffer[i + 2], msg_buffer[i + 3]);
+ i += 4;
+ }
+ pr_debug("\n");
+ }
+#endif
+ /* Pass the port-write message to RIO core for processing */
+ rio_inb_pwrite_handler((union rio_pw_msg *)msg_buffer);
+ }
+}
+
+/**
+ * tsi721_pw_enable - enable/disable port-write interface init
+ * @mport: Master port implementing the port write unit
+ * @enable: 1=enable; 0=disable port-write message handling
+ */
+static int tsi721_pw_enable(struct rio_mport *mport, int enable)
+{
+ struct tsi721_device *priv = mport->priv;
+ u32 rval;
+
+ rval = ioread32(priv->regs + TSI721_RIO_EM_INT_ENABLE);
+
+ if (enable)
+ rval |= TSI721_RIO_EM_INT_ENABLE_PW_RX;
+ else
+ rval &= ~TSI721_RIO_EM_INT_ENABLE_PW_RX;
+
+ /* Clear pending PW interrupts */
+ iowrite32(TSI721_RIO_PW_RX_STAT_PW_DISC | TSI721_RIO_PW_RX_STAT_PW_VAL,
+ priv->regs + TSI721_RIO_PW_RX_STAT);
+ /* Update enable bits */
+ iowrite32(rval, priv->regs + TSI721_RIO_EM_INT_ENABLE);
+
+ return 0;
+}
+
+/**
+ * tsi721_dsend - Send a RapidIO doorbell
+ * @mport: RapidIO master port info
+ * @index: ID of RapidIO interface
+ * @destid: Destination ID of target device
+ * @data: 16-bit info field of RapidIO doorbell
+ *
+ * Sends a RapidIO doorbell message. Always returns %0.
+ */
+static int tsi721_dsend(struct rio_mport *mport, int index,
+ u16 destid, u16 data)
+{
+ struct tsi721_device *priv = mport->priv;
+ u32 offset;
+
+ offset = (((mport->sys_size) ? RIO_TT_CODE_16 : RIO_TT_CODE_8) << 18) |
+ (destid << 2);
+
+ dev_dbg(&priv->pdev->dev,
+ "Send Doorbell 0x%04x to destID 0x%x\n", data, destid);
+ iowrite16be(data, priv->odb_base + offset);
+
+ return 0;
+}
+
+/**
+ * tsi721_dbell_handler - Tsi721 doorbell interrupt handler
+ * @mport: RapidIO master port structure
+ *
+ * Handles inbound doorbell interrupts. Copies doorbell entry from an internal
+ * buffer into DB message FIFO and schedules deferred routine to process
+ * queued DBs.
+ */
+static int
+tsi721_dbell_handler(struct rio_mport *mport)
+{
+ struct tsi721_device *priv = mport->priv;
+ u32 regval;
+
+ /* Disable IDB interrupts */
+ regval = ioread32(priv->regs + TSI721_SR_CHINTE(IDB_QUEUE));
+ regval &= ~TSI721_SR_CHINT_IDBQRCV;
+ iowrite32(regval,
+ priv->regs + TSI721_SR_CHINTE(IDB_QUEUE));
+
+ schedule_work(&priv->idb_work);
+
+ return 0;
+}
+
+static void tsi721_db_dpc(struct work_struct *work)
+{
+ struct tsi721_device *priv = container_of(work, struct tsi721_device,
+ idb_work);
+ struct rio_mport *mport;
+ struct rio_dbell *dbell;
+ int found = 0;
+ u32 wr_ptr, rd_ptr;
+ u64 *idb_entry;
+ u32 regval;
+ union {
+ u64 msg;
+ u8 bytes[8];
+ } idb;
+
+ /*
+ * Process queued inbound doorbells
+ */
+ mport = priv->mport;
+
+ wr_ptr = ioread32(priv->regs + TSI721_IDQ_WP(IDB_QUEUE));
+ rd_ptr = ioread32(priv->regs + TSI721_IDQ_RP(IDB_QUEUE));
+
+ while (wr_ptr != rd_ptr) {
+ idb_entry = (u64 *)(priv->idb_base +
+ (TSI721_IDB_ENTRY_SIZE * rd_ptr));
+ rd_ptr++;
+ idb.msg = *idb_entry;
+ *idb_entry = 0;
+
+ /* Process one doorbell */
+ list_for_each_entry(dbell, &mport->dbells, node) {
+ if ((dbell->res->start <= DBELL_INF(idb.bytes)) &&
+ (dbell->res->end >= DBELL_INF(idb.bytes))) {
+ found = 1;
+ break;
+ }
+ }
+
+ if (found) {
+ dbell->dinb(mport, dbell->dev_id, DBELL_SID(idb.bytes),
+ DBELL_TID(idb.bytes), DBELL_INF(idb.bytes));
+ } else {
+ dev_dbg(&priv->pdev->dev,
+ "spurious inb doorbell, sid %2.2x tid %2.2x"
+ " info %4.4x\n", DBELL_SID(idb.bytes),
+ DBELL_TID(idb.bytes), DBELL_INF(idb.bytes));
+ }
+ }
+
+ iowrite32(rd_ptr & (IDB_QSIZE - 1),
+ priv->regs + TSI721_IDQ_RP(IDB_QUEUE));
+
+ /* Re-enable IDB interrupts */
+ regval = ioread32(priv->regs + TSI721_SR_CHINTE(IDB_QUEUE));
+ regval |= TSI721_SR_CHINT_IDBQRCV;
+ iowrite32(regval,
+ priv->regs + TSI721_SR_CHINTE(IDB_QUEUE));
+}
+
+/**
+ * tsi721_irqhandler - Tsi721 interrupt handler
+ * @irq: Linux interrupt number
+ * @ptr: Pointer to interrupt-specific data (mport structure)
+ *
+ * Handles Tsi721 interrupts signaled using MSI and INTA. Checks reported
+ * interrupt events and calls an event-specific handler(s).
+ */
+static irqreturn_t tsi721_irqhandler(int irq, void *ptr)
+{
+ struct rio_mport *mport = (struct rio_mport *)ptr;
+ struct tsi721_device *priv = mport->priv;
+ u32 dev_int;
+ u32 dev_ch_int;
+ u32 intval;
+ u32 ch_inte;
+
+ dev_int = ioread32(priv->regs + TSI721_DEV_INT);
+ if (!dev_int)
+ return IRQ_NONE;
+
+ dev_ch_int = ioread32(priv->regs + TSI721_DEV_CHAN_INT);
+
+ if (dev_int & TSI721_DEV_INT_SR2PC_CH) {
+ /* Service SR2PC Channel interrupts */
+ if (dev_ch_int & TSI721_INT_SR2PC_CHAN(IDB_QUEUE)) {
+ /* Service Inbound Doorbell interrupt */
+ intval = ioread32(priv->regs +
+ TSI721_SR_CHINT(IDB_QUEUE));
+ if (intval & TSI721_SR_CHINT_IDBQRCV)
+ tsi721_dbell_handler(mport);
+ else
+ dev_info(&priv->pdev->dev,
+ "Unsupported SR_CH_INT %x\n", intval);
+
+ /* Clear interrupts */
+ iowrite32(intval,
+ priv->regs + TSI721_SR_CHINT(IDB_QUEUE));
+ ioread32(priv->regs + TSI721_SR_CHINT(IDB_QUEUE));
+ }
+ }
+
+ if (dev_int & TSI721_DEV_INT_SMSG_CH) {
+ int ch;
+
+ /*
+ * Service channel interrupts from Messaging Engine
+ */
+
+ if (dev_ch_int & TSI721_INT_IMSG_CHAN_M) { /* Inbound Msg */
+ /* Disable signaled OB MSG Channel interrupts */
+ ch_inte = ioread32(priv->regs + TSI721_DEV_CHAN_INTE);
+ ch_inte &= ~(dev_ch_int & TSI721_INT_IMSG_CHAN_M);
+ iowrite32(ch_inte, priv->regs + TSI721_DEV_CHAN_INTE);
+
+ /*
+ * Process Inbound Message interrupt for each MBOX
+ */
+ for (ch = 4; ch < RIO_MAX_MBOX + 4; ch++) {
+ if (!(dev_ch_int & TSI721_INT_IMSG_CHAN(ch)))
+ continue;
+ tsi721_imsg_handler(priv, ch);
+ }
+ }
+
+ if (dev_ch_int & TSI721_INT_OMSG_CHAN_M) { /* Outbound Msg */
+ /* Disable signaled OB MSG Channel interrupts */
+ ch_inte = ioread32(priv->regs + TSI721_DEV_CHAN_INTE);
+ ch_inte &= ~(dev_ch_int & TSI721_INT_OMSG_CHAN_M);
+ iowrite32(ch_inte, priv->regs + TSI721_DEV_CHAN_INTE);
+
+ /*
+ * Process Outbound Message interrupts for each MBOX
+ */
+
+ for (ch = 0; ch < RIO_MAX_MBOX; ch++) {
+ if (!(dev_ch_int & TSI721_INT_OMSG_CHAN(ch)))
+ continue;
+ tsi721_omsg_handler(priv, ch);
+ }
+ }
+ }
+
+ if (dev_int & TSI721_DEV_INT_SRIO) {
+ /* Service SRIO MAC interrupts */
+ intval = ioread32(priv->regs + TSI721_RIO_EM_INT_STAT);
+ if (intval & TSI721_RIO_EM_INT_STAT_PW_RX)
+ tsi721_pw_handler(mport);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void tsi721_interrupts_init(struct tsi721_device *priv)
+{
+ u32 intr;
+
+ /* Enable IDB interrupts */
+ iowrite32(TSI721_SR_CHINT_ALL,
+ priv->regs + TSI721_SR_CHINT(IDB_QUEUE));
+ iowrite32(TSI721_SR_CHINT_IDBQRCV,
+ priv->regs + TSI721_SR_CHINTE(IDB_QUEUE));
+ iowrite32(TSI721_INT_SR2PC_CHAN(IDB_QUEUE),
+ priv->regs + TSI721_DEV_CHAN_INTE);
+
+ /* Enable SRIO MAC interrupts */
+ iowrite32(TSI721_RIO_EM_DEV_INT_EN_INT,
+ priv->regs + TSI721_RIO_EM_DEV_INT_EN);
+
+ if (priv->flags & TSI721_USING_MSIX)
+ intr = TSI721_DEV_INT_SRIO;
+ else
+ intr = TSI721_DEV_INT_SR2PC_CH | TSI721_DEV_INT_SRIO |
+ TSI721_DEV_INT_SMSG_CH;
+
+ iowrite32(intr, priv->regs + TSI721_DEV_INTE);
+ ioread32(priv->regs + TSI721_DEV_INTE);
+}
+
+#ifdef CONFIG_PCI_MSI
+/**
+ * tsi721_omsg_msix - MSI-X interrupt handler for outbound messaging
+ * @irq: Linux interrupt number
+ * @ptr: Pointer to interrupt-specific data (mport structure)
+ *
+ * Handles outbound messaging interrupts signaled using MSI-X.
+ */
+static irqreturn_t tsi721_omsg_msix(int irq, void *ptr)
+{
+ struct tsi721_device *priv = ((struct rio_mport *)ptr)->priv;
+ int mbox;
+
+ mbox = (irq - priv->msix[TSI721_VECT_OMB0_DONE].vector) % RIO_MAX_MBOX;
+ tsi721_omsg_handler(priv, mbox);
+ return IRQ_HANDLED;
+}
+
+/**
+ * tsi721_imsg_msix - MSI-X interrupt handler for inbound messaging
+ * @irq: Linux interrupt number
+ * @ptr: Pointer to interrupt-specific data (mport structure)
+ *
+ * Handles inbound messaging interrupts signaled using MSI-X.
+ */
+static irqreturn_t tsi721_imsg_msix(int irq, void *ptr)
+{
+ struct tsi721_device *priv = ((struct rio_mport *)ptr)->priv;
+ int mbox;
+
+ mbox = (irq - priv->msix[TSI721_VECT_IMB0_RCV].vector) % RIO_MAX_MBOX;
+ tsi721_imsg_handler(priv, mbox + 4);
+ return IRQ_HANDLED;
+}
+
+/**
+ * tsi721_srio_msix - Tsi721 MSI-X SRIO MAC interrupt handler
+ * @irq: Linux interrupt number
+ * @ptr: Pointer to interrupt-specific data (mport structure)
+ *
+ * Handles Tsi721 interrupts from SRIO MAC.
+ */
+static irqreturn_t tsi721_srio_msix(int irq, void *ptr)
+{
+ struct tsi721_device *priv = ((struct rio_mport *)ptr)->priv;
+ u32 srio_int;
+
+ /* Service SRIO MAC interrupts */
+ srio_int = ioread32(priv->regs + TSI721_RIO_EM_INT_STAT);
+ if (srio_int & TSI721_RIO_EM_INT_STAT_PW_RX)
+ tsi721_pw_handler((struct rio_mport *)ptr);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * tsi721_sr2pc_ch_msix - Tsi721 MSI-X SR2PC Channel interrupt handler
+ * @irq: Linux interrupt number
+ * @ptr: Pointer to interrupt-specific data (mport structure)
+ *
+ * Handles Tsi721 interrupts from SR2PC Channel.
+ * NOTE: At this moment services only one SR2PC channel associated with inbound
+ * doorbells.
+ */
+static irqreturn_t tsi721_sr2pc_ch_msix(int irq, void *ptr)
+{
+ struct tsi721_device *priv = ((struct rio_mport *)ptr)->priv;
+ u32 sr_ch_int;
+
+ /* Service Inbound DB interrupt from SR2PC channel */
+ sr_ch_int = ioread32(priv->regs + TSI721_SR_CHINT(IDB_QUEUE));
+ if (sr_ch_int & TSI721_SR_CHINT_IDBQRCV)
+ tsi721_dbell_handler((struct rio_mport *)ptr);
+
+ /* Clear interrupts */
+ iowrite32(sr_ch_int, priv->regs + TSI721_SR_CHINT(IDB_QUEUE));
+ /* Read back to ensure that interrupt was cleared */
+ sr_ch_int = ioread32(priv->regs + TSI721_SR_CHINT(IDB_QUEUE));
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * tsi721_request_msix - register interrupt service for MSI-X mode.
+ * @mport: RapidIO master port structure
+ *
+ * Registers MSI-X interrupt service routines for interrupts that are active
+ * immediately after mport initialization. Messaging interrupt service routines
+ * should be registered during corresponding open requests.
+ */
+static int tsi721_request_msix(struct rio_mport *mport)
+{
+ struct tsi721_device *priv = mport->priv;
+ int err = 0;
+
+ err = request_irq(priv->msix[TSI721_VECT_IDB].vector,
+ tsi721_sr2pc_ch_msix, 0,
+ priv->msix[TSI721_VECT_IDB].irq_name, (void *)mport);
+ if (err)
+ goto out;
+
+ err = request_irq(priv->msix[TSI721_VECT_PWRX].vector,
+ tsi721_srio_msix, 0,
+ priv->msix[TSI721_VECT_PWRX].irq_name, (void *)mport);
+ if (err)
+ free_irq(
+ priv->msix[TSI721_VECT_IDB].vector,
+ (void *)mport);
+out:
+ return err;
+}
+
+/**
+ * tsi721_enable_msix - Attempts to enable MSI-X support for Tsi721.
+ * @priv: pointer to tsi721 private data
+ *
+ * Configures MSI-X support for Tsi721. Supports only an exact number
+ * of requested vectors.
+ */
+static int tsi721_enable_msix(struct tsi721_device *priv)
+{
+ struct msix_entry entries[TSI721_VECT_MAX];
+ int err;
+ int i;
+
+ entries[TSI721_VECT_IDB].entry = TSI721_MSIX_SR2PC_IDBQ_RCV(IDB_QUEUE);
+ entries[TSI721_VECT_PWRX].entry = TSI721_MSIX_SRIO_MAC_INT;
+
+ /*
+ * Initialize MSI-X entries for Messaging Engine:
+ * this driver supports four RIO mailboxes (inbound and outbound)
+ * NOTE: Inbound message MBOX 0...4 use IB channels 4...7. Therefore
+ * offset +4 is added to IB MBOX number.
+ */
+ for (i = 0; i < RIO_MAX_MBOX; i++) {
+ entries[TSI721_VECT_IMB0_RCV + i].entry =
+ TSI721_MSIX_IMSG_DQ_RCV(i + 4);
+ entries[TSI721_VECT_IMB0_INT + i].entry =
+ TSI721_MSIX_IMSG_INT(i + 4);
+ entries[TSI721_VECT_OMB0_DONE + i].entry =
+ TSI721_MSIX_OMSG_DONE(i);
+ entries[TSI721_VECT_OMB0_INT + i].entry =
+ TSI721_MSIX_OMSG_INT(i);
+ }
+
+ err = pci_enable_msix(priv->pdev, entries, ARRAY_SIZE(entries));
+ if (err) {
+ if (err > 0)
+ dev_info(&priv->pdev->dev,
+ "Only %d MSI-X vectors available, "
+ "not using MSI-X\n", err);
+ return err;
+ }
+
+ /*
+ * Copy MSI-X vector information into tsi721 private structure
+ */
+ priv->msix[TSI721_VECT_IDB].vector = entries[TSI721_VECT_IDB].vector;
+ snprintf(priv->msix[TSI721_VECT_IDB].irq_name, IRQ_DEVICE_NAME_MAX,
+ DRV_NAME "-idb@pci:%s", pci_name(priv->pdev));
+ priv->msix[TSI721_VECT_PWRX].vector = entries[TSI721_VECT_PWRX].vector;
+ snprintf(priv->msix[TSI721_VECT_PWRX].irq_name, IRQ_DEVICE_NAME_MAX,
+ DRV_NAME "-pwrx@pci:%s", pci_name(priv->pdev));
+
+ for (i = 0; i < RIO_MAX_MBOX; i++) {
+ priv->msix[TSI721_VECT_IMB0_RCV + i].vector =
+ entries[TSI721_VECT_IMB0_RCV + i].vector;
+ snprintf(priv->msix[TSI721_VECT_IMB0_RCV + i].irq_name,
+ IRQ_DEVICE_NAME_MAX, DRV_NAME "-imbr%d@pci:%s",
+ i, pci_name(priv->pdev));
+
+ priv->msix[TSI721_VECT_IMB0_INT + i].vector =
+ entries[TSI721_VECT_IMB0_INT + i].vector;
+ snprintf(priv->msix[TSI721_VECT_IMB0_INT + i].irq_name,
+ IRQ_DEVICE_NAME_MAX, DRV_NAME "-imbi%d@pci:%s",
+ i, pci_name(priv->pdev));
+
+ priv->msix[TSI721_VECT_OMB0_DONE + i].vector =
+ entries[TSI721_VECT_OMB0_DONE + i].vector;
+ snprintf(priv->msix[TSI721_VECT_OMB0_DONE + i].irq_name,
+ IRQ_DEVICE_NAME_MAX, DRV_NAME "-ombd%d@pci:%s",
+ i, pci_name(priv->pdev));
+
+ priv->msix[TSI721_VECT_OMB0_INT + i].vector =
+ entries[TSI721_VECT_OMB0_INT + i].vector;
+ snprintf(priv->msix[TSI721_VECT_OMB0_INT + i].irq_name,
+ IRQ_DEVICE_NAME_MAX, DRV_NAME "-ombi%d@pci:%s",
+ i, pci_name(priv->pdev));
+ }
+
+ return 0;
+}
+#endif /* CONFIG_PCI_MSI */
+
+static int tsi721_request_irq(struct rio_mport *mport)
+{
+ struct tsi721_device *priv = mport->priv;
+ int err;
+
+#ifdef CONFIG_PCI_MSI
+ if (priv->flags & TSI721_USING_MSIX)
+ err = tsi721_request_msix(mport);
+ else
+#endif
+ err = request_irq(priv->pdev->irq, tsi721_irqhandler,
+ (priv->flags & TSI721_USING_MSI) ? 0 : IRQF_SHARED,
+ DRV_NAME, (void *)mport);
+
+ if (err)
+ dev_err(&priv->pdev->dev,
+ "Unable to allocate interrupt, Error: %d\n", err);
+
+ return err;
+}
+
+/**
+ * tsi721_init_pc2sr_mapping - initializes outbound (PCIe->SRIO)
+ * translation regions.
+ * @priv: pointer to tsi721 private data
+ *
+ * Disables SREP translation regions.
+ */
+static void tsi721_init_pc2sr_mapping(struct tsi721_device *priv)
+{
+ int i;
+
+ /* Disable all PC2SR translation windows */
+ for (i = 0; i < TSI721_OBWIN_NUM; i++)
+ iowrite32(0, priv->regs + TSI721_OBWINLB(i));
+}
+
+/**
+ * tsi721_init_sr2pc_mapping - initializes inbound (SRIO->PCIe)
+ * translation regions.
+ * @priv: pointer to tsi721 private data
+ *
+ * Disables inbound windows.
+ */
+static void tsi721_init_sr2pc_mapping(struct tsi721_device *priv)
+{
+ int i;
+
+ /* Disable all SR2PC inbound windows */
+ for (i = 0; i < TSI721_IBWIN_NUM; i++)
+ iowrite32(0, priv->regs + TSI721_IBWINLB(i));
+}
+
+/**
+ * tsi721_port_write_init - Inbound port write interface init
+ * @priv: pointer to tsi721 private data
+ *
+ * Initializes inbound port write handler.
+ * Returns %0 on success or %-ENOMEM on failure.
+ */
+static int tsi721_port_write_init(struct tsi721_device *priv)
+{
+ priv->pw_discard_count = 0;
+ INIT_WORK(&priv->pw_work, tsi721_pw_dpc);
+ spin_lock_init(&priv->pw_fifo_lock);
+ if (kfifo_alloc(&priv->pw_fifo,
+ TSI721_RIO_PW_MSG_SIZE * 32, GFP_KERNEL)) {
+ dev_err(&priv->pdev->dev, "PW FIFO allocation failed\n");
+ return -ENOMEM;
+ }
+
+ /* Use reliable port-write capture mode */
+ iowrite32(TSI721_RIO_PW_CTL_PWC_REL, priv->regs + TSI721_RIO_PW_CTL);
+ return 0;
+}
+
+static int tsi721_doorbell_init(struct tsi721_device *priv)
+{
+ /* Outbound Doorbells do not require any setup.
+ * Tsi721 uses dedicated PCI BAR1 to generate doorbells.
+ * That BAR1 was mapped during the probe routine.
+ */
+
+ /* Initialize Inbound Doorbell processing DPC and queue */
+ priv->db_discard_count = 0;
+ INIT_WORK(&priv->idb_work, tsi721_db_dpc);
+
+ /* Allocate buffer for inbound doorbells queue */
+ priv->idb_base = dma_alloc_coherent(&priv->pdev->dev,
+ IDB_QSIZE * TSI721_IDB_ENTRY_SIZE,
+ &priv->idb_dma, GFP_KERNEL);
+ if (!priv->idb_base)
+ return -ENOMEM;
+
+ memset(priv->idb_base, 0, IDB_QSIZE * TSI721_IDB_ENTRY_SIZE);
+
+ dev_dbg(&priv->pdev->dev, "Allocated IDB buffer @ %p (phys = %llx)\n",
+ priv->idb_base, (unsigned long long)priv->idb_dma);
+
+ iowrite32(TSI721_IDQ_SIZE_VAL(IDB_QSIZE),
+ priv->regs + TSI721_IDQ_SIZE(IDB_QUEUE));
+ iowrite32(((u64)priv->idb_dma >> 32),
+ priv->regs + TSI721_IDQ_BASEU(IDB_QUEUE));
+ iowrite32(((u64)priv->idb_dma & TSI721_IDQ_BASEL_ADDR),
+ priv->regs + TSI721_IDQ_BASEL(IDB_QUEUE));
+ /* Enable accepting all inbound doorbells */
+ iowrite32(0, priv->regs + TSI721_IDQ_MASK(IDB_QUEUE));
+
+ iowrite32(TSI721_IDQ_INIT, priv->regs + TSI721_IDQ_CTL(IDB_QUEUE));
+
+ iowrite32(0, priv->regs + TSI721_IDQ_RP(IDB_QUEUE));
+
+ return 0;
+}
+
+static void tsi721_doorbell_free(struct tsi721_device *priv)
+{
+ if (priv->idb_base == NULL)
+ return;
+
+ /* Free buffer allocated for inbound doorbell queue */
+ dma_free_coherent(&priv->pdev->dev, IDB_QSIZE * TSI721_IDB_ENTRY_SIZE,
+ priv->idb_base, priv->idb_dma);
+ priv->idb_base = NULL;
+}
+
+static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum)
+{
+ struct tsi721_dma_desc *bd_ptr;
+ u64 *sts_ptr;
+ dma_addr_t bd_phys, sts_phys;
+ int sts_size;
+ int bd_num = priv->bdma[chnum].bd_num;
+
+ dev_dbg(&priv->pdev->dev, "Init Block DMA Engine, CH%d\n", chnum);
+
+ /*
+ * Initialize DMA channel for maintenance requests
+ */
+
+ /* Allocate space for DMA descriptors */
+ bd_ptr = dma_alloc_coherent(&priv->pdev->dev,
+ bd_num * sizeof(struct tsi721_dma_desc),
+ &bd_phys, GFP_KERNEL);
+ if (!bd_ptr)
+ return -ENOMEM;
+
+ priv->bdma[chnum].bd_phys = bd_phys;
+ priv->bdma[chnum].bd_base = bd_ptr;
+
+ memset(bd_ptr, 0, bd_num * sizeof(struct tsi721_dma_desc));
+
+ dev_dbg(&priv->pdev->dev, "DMA descriptors @ %p (phys = %llx)\n",
+ bd_ptr, (unsigned long long)bd_phys);
+
+ /* Allocate space for descriptor status FIFO */
+ sts_size = (bd_num >= TSI721_DMA_MINSTSSZ) ?
+ bd_num : TSI721_DMA_MINSTSSZ;
+ sts_size = roundup_pow_of_two(sts_size);
+ sts_ptr = dma_alloc_coherent(&priv->pdev->dev,
+ sts_size * sizeof(struct tsi721_dma_sts),
+ &sts_phys, GFP_KERNEL);
+ if (!sts_ptr) {
+ /* Free space allocated for DMA descriptors */
+ dma_free_coherent(&priv->pdev->dev,
+ bd_num * sizeof(struct tsi721_dma_desc),
+ bd_ptr, bd_phys);
+ priv->bdma[chnum].bd_base = NULL;
+ return -ENOMEM;
+ }
+
+ priv->bdma[chnum].sts_phys = sts_phys;
+ priv->bdma[chnum].sts_base = sts_ptr;
+ priv->bdma[chnum].sts_size = sts_size;
+
+ memset(sts_ptr, 0, sts_size);
+
+ dev_dbg(&priv->pdev->dev,
+ "desc status FIFO @ %p (phys = %llx) size=0x%x\n",
+ sts_ptr, (unsigned long long)sts_phys, sts_size);
+
+ /* Initialize DMA descriptors ring */
+ bd_ptr[bd_num - 1].type_id = cpu_to_le32(DTYPE3 << 29);
+ bd_ptr[bd_num - 1].next_lo = cpu_to_le32((u64)bd_phys &
+ TSI721_DMAC_DPTRL_MASK);
+ bd_ptr[bd_num - 1].next_hi = cpu_to_le32((u64)bd_phys >> 32);
+
+ /* Setup DMA descriptor pointers */
+ iowrite32(((u64)bd_phys >> 32),
+ priv->regs + TSI721_DMAC_DPTRH(chnum));
+ iowrite32(((u64)bd_phys & TSI721_DMAC_DPTRL_MASK),
+ priv->regs + TSI721_DMAC_DPTRL(chnum));
+
+ /* Setup descriptor status FIFO */
+ iowrite32(((u64)sts_phys >> 32),
+ priv->regs + TSI721_DMAC_DSBH(chnum));
+ iowrite32(((u64)sts_phys & TSI721_DMAC_DSBL_MASK),
+ priv->regs + TSI721_DMAC_DSBL(chnum));
+ iowrite32(TSI721_DMAC_DSSZ_SIZE(sts_size),
+ priv->regs + TSI721_DMAC_DSSZ(chnum));
+
+ /* Clear interrupt bits */
+ iowrite32(TSI721_DMAC_INT_ALL,
+ priv->regs + TSI721_DMAC_INT(chnum));
+
+ ioread32(priv->regs + TSI721_DMAC_INT(chnum));
+
+ /* Toggle DMA channel initialization */
+ iowrite32(TSI721_DMAC_CTL_INIT, priv->regs + TSI721_DMAC_CTL(chnum));
+ ioread32(priv->regs + TSI721_DMAC_CTL(chnum));
+ udelay(10);
+
+ return 0;
+}
+
+static int tsi721_bdma_ch_free(struct tsi721_device *priv, int chnum)
+{
+ u32 ch_stat;
+
+ if (priv->bdma[chnum].bd_base == NULL)
+ return 0;
+
+ /* Check if DMA channel still running */
+ ch_stat = ioread32(priv->regs + TSI721_DMAC_STS(chnum));
+ if (ch_stat & TSI721_DMAC_STS_RUN)
+ return -EFAULT;
+
+ /* Put DMA channel into init state */
+ iowrite32(TSI721_DMAC_CTL_INIT,
+ priv->regs + TSI721_DMAC_CTL(chnum));
+
+ /* Free space allocated for DMA descriptors */
+ dma_free_coherent(&priv->pdev->dev,
+ priv->bdma[chnum].bd_num * sizeof(struct tsi721_dma_desc),
+ priv->bdma[chnum].bd_base, priv->bdma[chnum].bd_phys);
+ priv->bdma[chnum].bd_base = NULL;
+
+ /* Free space allocated for status FIFO */
+ dma_free_coherent(&priv->pdev->dev,
+ priv->bdma[chnum].sts_size * sizeof(struct tsi721_dma_sts),
+ priv->bdma[chnum].sts_base, priv->bdma[chnum].sts_phys);
+ priv->bdma[chnum].sts_base = NULL;
+ return 0;
+}
+
+static int tsi721_bdma_init(struct tsi721_device *priv)
+{
+ /* Initialize BDMA channel allocated for RapidIO maintenance read/write
+ * request generation
+ */
+ priv->bdma[TSI721_DMACH_MAINT].bd_num = 2;
+ if (tsi721_bdma_ch_init(priv, TSI721_DMACH_MAINT)) {
+ dev_err(&priv->pdev->dev, "Unable to initialize maintenance DMA"
+ " channel %d, aborting\n", TSI721_DMACH_MAINT);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void tsi721_bdma_free(struct tsi721_device *priv)
+{
+ tsi721_bdma_ch_free(priv, TSI721_DMACH_MAINT);
+}
+
+/* Enable Inbound Messaging Interrupts */
+static void
+tsi721_imsg_interrupt_enable(struct tsi721_device *priv, int ch,
+ u32 inte_mask)
+{
+ u32 rval;
+
+ if (!inte_mask)
+ return;
+
+ /* Clear pending Inbound Messaging interrupts */
+ iowrite32(inte_mask, priv->regs + TSI721_IBDMAC_INT(ch));
+
+ /* Enable Inbound Messaging interrupts */
+ rval = ioread32(priv->regs + TSI721_IBDMAC_INTE(ch));
+ iowrite32(rval | inte_mask, priv->regs + TSI721_IBDMAC_INTE(ch));
+
+ if (priv->flags & TSI721_USING_MSIX)
+ return; /* Finished if we are in MSI-X mode */
+
+ /*
+ * For MSI and INTA interrupt signalling we need to enable next levels
+ */
+
+ /* Enable Device Channel Interrupt */
+ rval = ioread32(priv->regs + TSI721_DEV_CHAN_INTE);
+ iowrite32(rval | TSI721_INT_IMSG_CHAN(ch),
+ priv->regs + TSI721_DEV_CHAN_INTE);
+}
+
+/* Disable Inbound Messaging Interrupts */
+static void
+tsi721_imsg_interrupt_disable(struct tsi721_device *priv, int ch,
+ u32 inte_mask)
+{
+ u32 rval;
+
+ if (!inte_mask)
+ return;
+
+ /* Clear pending Inbound Messaging interrupts */
+ iowrite32(inte_mask, priv->regs + TSI721_IBDMAC_INT(ch));
+
+ /* Disable Inbound Messaging interrupts */
+ rval = ioread32(priv->regs + TSI721_IBDMAC_INTE(ch));
+ rval &= ~inte_mask;
+ iowrite32(rval, priv->regs + TSI721_IBDMAC_INTE(ch));
+
+ if (priv->flags & TSI721_USING_MSIX)
+ return; /* Finished if we are in MSI-X mode */
+
+ /*
+ * For MSI and INTA interrupt signalling we need to disable next levels
+ */
+
+ /* Disable Device Channel Interrupt */
+ rval = ioread32(priv->regs + TSI721_DEV_CHAN_INTE);
+ rval &= ~TSI721_INT_IMSG_CHAN(ch);
+ iowrite32(rval, priv->regs + TSI721_DEV_CHAN_INTE);
+}
+
+/* Enable Outbound Messaging interrupts */
+static void
+tsi721_omsg_interrupt_enable(struct tsi721_device *priv, int ch,
+ u32 inte_mask)
+{
+ u32 rval;
+
+ if (!inte_mask)
+ return;
+
+ /* Clear pending Outbound Messaging interrupts */
+ iowrite32(inte_mask, priv->regs + TSI721_OBDMAC_INT(ch));
+
+ /* Enable Outbound Messaging channel interrupts */
+ rval = ioread32(priv->regs + TSI721_OBDMAC_INTE(ch));
+ iowrite32(rval | inte_mask, priv->regs + TSI721_OBDMAC_INTE(ch));
+
+ if (priv->flags & TSI721_USING_MSIX)
+ return; /* Finished if we are in MSI-X mode */
+
+ /*
+ * For MSI and INTA interrupt signalling we need to enable next levels
+ */
+
+ /* Enable Device Channel Interrupt */
+ rval = ioread32(priv->regs + TSI721_DEV_CHAN_INTE);
+ iowrite32(rval | TSI721_INT_OMSG_CHAN(ch),
+ priv->regs + TSI721_DEV_CHAN_INTE);
+}
+
+/* Disable Outbound Messaging interrupts */
+static void
+tsi721_omsg_interrupt_disable(struct tsi721_device *priv, int ch,
+ u32 inte_mask)
+{
+ u32 rval;
+
+ if (!inte_mask)
+ return;
+
+ /* Clear pending Outbound Messaging interrupts */
+ iowrite32(inte_mask, priv->regs + TSI721_OBDMAC_INT(ch));
+
+ /* Disable Outbound Messaging interrupts */
+ rval = ioread32(priv->regs + TSI721_OBDMAC_INTE(ch));
+ rval &= ~inte_mask;
+ iowrite32(rval, priv->regs + TSI721_OBDMAC_INTE(ch));
+
+ if (priv->flags & TSI721_USING_MSIX)
+ return; /* Finished if we are in MSI-X mode */
+
+ /*
+ * For MSI and INTA interrupt signalling we need to disable next levels
+ */
+
+ /* Disable Device Channel Interrupt */
+ rval = ioread32(priv->regs + TSI721_DEV_CHAN_INTE);
+ rval &= ~TSI721_INT_OMSG_CHAN(ch);
+ iowrite32(rval, priv->regs + TSI721_DEV_CHAN_INTE);
+}
+
+/**
+ * tsi721_add_outb_message - Add message to the Tsi721 outbound message queue
+ * @mport: Master port with outbound message queue
+ * @rdev: Target of outbound message
+ * @mbox: Outbound mailbox
+ * @buffer: Message to add to outbound queue
+ * @len: Length of message
+ */
+static int
+tsi721_add_outb_message(struct rio_mport *mport, struct rio_dev *rdev, int mbox,
+ void *buffer, size_t len)
+{
+ struct tsi721_device *priv = mport->priv;
+ struct tsi721_omsg_desc *desc;
+ u32 tx_slot;
+
+ if (!priv->omsg_init[mbox] ||
+ len > TSI721_MSG_MAX_SIZE || len < 8)
+ return -EINVAL;
+
+ tx_slot = priv->omsg_ring[mbox].tx_slot;
+
+ /* Copy copy message into transfer buffer */
+ memcpy(priv->omsg_ring[mbox].omq_base[tx_slot], buffer, len);
+
+ if (len & 0x7)
+ len += 8;
+
+ /* Build descriptor associated with buffer */
+ desc = priv->omsg_ring[mbox].omd_base;
+ desc[tx_slot].type_id = cpu_to_le32((DTYPE4 << 29) | rdev->destid);
+ if (tx_slot % 4 == 0)
+ desc[tx_slot].type_id |= cpu_to_le32(TSI721_OMD_IOF);
+
+ desc[tx_slot].msg_info =
+ cpu_to_le32((mport->sys_size << 26) | (mbox << 22) |
+ (0xe << 12) | (len & 0xff8));
+ desc[tx_slot].bufptr_lo =
+ cpu_to_le32((u64)priv->omsg_ring[mbox].omq_phys[tx_slot] &
+ 0xffffffff);
+ desc[tx_slot].bufptr_hi =
+ cpu_to_le32((u64)priv->omsg_ring[mbox].omq_phys[tx_slot] >> 32);
+
+ priv->omsg_ring[mbox].wr_count++;
+
+ /* Go to next descriptor */
+ if (++priv->omsg_ring[mbox].tx_slot == priv->omsg_ring[mbox].size) {
+ priv->omsg_ring[mbox].tx_slot = 0;
+ /* Move through the ring link descriptor at the end */
+ priv->omsg_ring[mbox].wr_count++;
+ }
+
+ mb();
+
+ /* Set new write count value */
+ iowrite32(priv->omsg_ring[mbox].wr_count,
+ priv->regs + TSI721_OBDMAC_DWRCNT(mbox));
+ ioread32(priv->regs + TSI721_OBDMAC_DWRCNT(mbox));
+
+ return 0;
+}
+
+/**
+ * tsi721_omsg_handler - Outbound Message Interrupt Handler
+ * @priv: pointer to tsi721 private data
+ * @ch: number of OB MSG channel to service
+ *
+ * Services channel interrupts from outbound messaging engine.
+ */
+static void tsi721_omsg_handler(struct tsi721_device *priv, int ch)
+{
+ u32 omsg_int;
+
+ spin_lock(&priv->omsg_ring[ch].lock);
+
+ omsg_int = ioread32(priv->regs + TSI721_OBDMAC_INT(ch));
+
+ if (omsg_int & TSI721_OBDMAC_INT_ST_FULL)
+ dev_info(&priv->pdev->dev,
+ "OB MBOX%d: Status FIFO is full\n", ch);
+
+ if (omsg_int & (TSI721_OBDMAC_INT_DONE | TSI721_OBDMAC_INT_IOF_DONE)) {
+ u32 srd_ptr;
+ u64 *sts_ptr, last_ptr = 0, prev_ptr = 0;
+ int i, j;
+ u32 tx_slot;
+
+ /*
+ * Find last successfully processed descriptor
+ */
+
+ /* Check and clear descriptor status FIFO entries */
+ srd_ptr = priv->omsg_ring[ch].sts_rdptr;
+ sts_ptr = priv->omsg_ring[ch].sts_base;
+ j = srd_ptr * 8;
+ while (sts_ptr[j]) {
+ for (i = 0; i < 8 && sts_ptr[j]; i++, j++) {
+ prev_ptr = last_ptr;
+ last_ptr = le64_to_cpu(sts_ptr[j]);
+ sts_ptr[j] = 0;
+ }
+
+ ++srd_ptr;
+ srd_ptr %= priv->omsg_ring[ch].sts_size;
+ j = srd_ptr * 8;
+ }
+
+ if (last_ptr == 0)
+ goto no_sts_update;
+
+ priv->omsg_ring[ch].sts_rdptr = srd_ptr;
+ iowrite32(srd_ptr, priv->regs + TSI721_OBDMAC_DSRP(ch));
+
+ if (!priv->mport->outb_msg[ch].mcback)
+ goto no_sts_update;
+
+ /* Inform upper layer about transfer completion */
+
+ tx_slot = (last_ptr - (u64)priv->omsg_ring[ch].omd_phys)/
+ sizeof(struct tsi721_omsg_desc);
+
+ /*
+ * Check if this is a Link Descriptor (LD).
+ * If yes, ignore LD and use descriptor processed
+ * before LD.
+ */
+ if (tx_slot == priv->omsg_ring[ch].size) {
+ if (prev_ptr)
+ tx_slot = (prev_ptr -
+ (u64)priv->omsg_ring[ch].omd_phys)/
+ sizeof(struct tsi721_omsg_desc);
+ else
+ goto no_sts_update;
+ }
+
+ /* Move slot index to the next message to be sent */
+ ++tx_slot;
+ if (tx_slot == priv->omsg_ring[ch].size)
+ tx_slot = 0;
+ BUG_ON(tx_slot >= priv->omsg_ring[ch].size);
+ priv->mport->outb_msg[ch].mcback(priv->mport,
+ priv->omsg_ring[ch].dev_id, ch,
+ tx_slot);
+ }
+
+no_sts_update:
+
+ if (omsg_int & TSI721_OBDMAC_INT_ERROR) {
+ /*
+ * Outbound message operation aborted due to error,
+ * reinitialize OB MSG channel
+ */
+
+ dev_dbg(&priv->pdev->dev, "OB MSG ABORT ch_stat=%x\n",
+ ioread32(priv->regs + TSI721_OBDMAC_STS(ch)));
+
+ iowrite32(TSI721_OBDMAC_INT_ERROR,
+ priv->regs + TSI721_OBDMAC_INT(ch));
+ iowrite32(TSI721_OBDMAC_CTL_INIT,
+ priv->regs + TSI721_OBDMAC_CTL(ch));
+ ioread32(priv->regs + TSI721_OBDMAC_CTL(ch));
+
+ /* Inform upper level to clear all pending tx slots */
+ if (priv->mport->outb_msg[ch].mcback)
+ priv->mport->outb_msg[ch].mcback(priv->mport,
+ priv->omsg_ring[ch].dev_id, ch,
+ priv->omsg_ring[ch].tx_slot);
+ /* Synch tx_slot tracking */
+ iowrite32(priv->omsg_ring[ch].tx_slot,
+ priv->regs + TSI721_OBDMAC_DRDCNT(ch));
+ ioread32(priv->regs + TSI721_OBDMAC_DRDCNT(ch));
+ priv->omsg_ring[ch].wr_count = priv->omsg_ring[ch].tx_slot;
+ priv->omsg_ring[ch].sts_rdptr = 0;
+ }
+
+ /* Clear channel interrupts */
+ iowrite32(omsg_int, priv->regs + TSI721_OBDMAC_INT(ch));
+
+ if (!(priv->flags & TSI721_USING_MSIX)) {
+ u32 ch_inte;
+
+ /* Re-enable channel interrupts */
+ ch_inte = ioread32(priv->regs + TSI721_DEV_CHAN_INTE);
+ ch_inte |= TSI721_INT_OMSG_CHAN(ch);
+ iowrite32(ch_inte, priv->regs + TSI721_DEV_CHAN_INTE);
+ }
+
+ spin_unlock(&priv->omsg_ring[ch].lock);
+}
+
+/**
+ * tsi721_open_outb_mbox - Initialize Tsi721 outbound mailbox
+ * @mport: Master port implementing Outbound Messaging Engine
+ * @dev_id: Device specific pointer to pass on event
+ * @mbox: Mailbox to open
+ * @entries: Number of entries in the outbound mailbox ring
+ */
+static int tsi721_open_outb_mbox(struct rio_mport *mport, void *dev_id,
+ int mbox, int entries)
+{
+ struct tsi721_device *priv = mport->priv;
+ struct tsi721_omsg_desc *bd_ptr;
+ int i, rc = 0;
+
+ if ((entries < TSI721_OMSGD_MIN_RING_SIZE) ||
+ (entries > (TSI721_OMSGD_RING_SIZE)) ||
+ (!is_power_of_2(entries)) || mbox >= RIO_MAX_MBOX) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ priv->omsg_ring[mbox].dev_id = dev_id;
+ priv->omsg_ring[mbox].size = entries;
+ priv->omsg_ring[mbox].sts_rdptr = 0;
+ spin_lock_init(&priv->omsg_ring[mbox].lock);
+
+ /* Outbound Msg Buffer allocation based on
+ the number of maximum descriptor entries */
+ for (i = 0; i < entries; i++) {
+ priv->omsg_ring[mbox].omq_base[i] =
+ dma_alloc_coherent(
+ &priv->pdev->dev, TSI721_MSG_BUFFER_SIZE,
+ &priv->omsg_ring[mbox].omq_phys[i],
+ GFP_KERNEL);
+ if (priv->omsg_ring[mbox].omq_base[i] == NULL) {
+ dev_dbg(&priv->pdev->dev,
+ "Unable to allocate OB MSG data buffer for"
+ " MBOX%d\n", mbox);
+ rc = -ENOMEM;
+ goto out_buf;
+ }
+ }
+
+ /* Outbound message descriptor allocation */
+ priv->omsg_ring[mbox].omd_base = dma_alloc_coherent(
+ &priv->pdev->dev,
+ (entries + 1) * sizeof(struct tsi721_omsg_desc),
+ &priv->omsg_ring[mbox].omd_phys, GFP_KERNEL);
+ if (priv->omsg_ring[mbox].omd_base == NULL) {
+ dev_dbg(&priv->pdev->dev,
+ "Unable to allocate OB MSG descriptor memory "
+ "for MBOX%d\n", mbox);
+ rc = -ENOMEM;
+ goto out_buf;
+ }
+
+ priv->omsg_ring[mbox].tx_slot = 0;
+
+ /* Outbound message descriptor status FIFO allocation */
+ priv->omsg_ring[mbox].sts_size = roundup_pow_of_two(entries + 1);
+ priv->omsg_ring[mbox].sts_base = dma_alloc_coherent(&priv->pdev->dev,
+ priv->omsg_ring[mbox].sts_size *
+ sizeof(struct tsi721_dma_sts),
+ &priv->omsg_ring[mbox].sts_phys, GFP_KERNEL);
+ if (priv->omsg_ring[mbox].sts_base == NULL) {
+ dev_dbg(&priv->pdev->dev,
+ "Unable to allocate OB MSG descriptor status FIFO "
+ "for MBOX%d\n", mbox);
+ rc = -ENOMEM;
+ goto out_desc;
+ }
+
+ memset(priv->omsg_ring[mbox].sts_base, 0,
+ entries * sizeof(struct tsi721_dma_sts));
+
+ /*
+ * Configure Outbound Messaging Engine
+ */
+
+ /* Setup Outbound Message descriptor pointer */
+ iowrite32(((u64)priv->omsg_ring[mbox].omd_phys >> 32),
+ priv->regs + TSI721_OBDMAC_DPTRH(mbox));
+ iowrite32(((u64)priv->omsg_ring[mbox].omd_phys &
+ TSI721_OBDMAC_DPTRL_MASK),
+ priv->regs + TSI721_OBDMAC_DPTRL(mbox));
+
+ /* Setup Outbound Message descriptor status FIFO */
+ iowrite32(((u64)priv->omsg_ring[mbox].sts_phys >> 32),
+ priv->regs + TSI721_OBDMAC_DSBH(mbox));
+ iowrite32(((u64)priv->omsg_ring[mbox].sts_phys &
+ TSI721_OBDMAC_DSBL_MASK),
+ priv->regs + TSI721_OBDMAC_DSBL(mbox));
+ iowrite32(TSI721_DMAC_DSSZ_SIZE(priv->omsg_ring[mbox].sts_size),
+ priv->regs + (u32)TSI721_OBDMAC_DSSZ(mbox));
+
+ /* Enable interrupts */
+
+#ifdef CONFIG_PCI_MSI
+ if (priv->flags & TSI721_USING_MSIX) {
+ /* Request interrupt service if we are in MSI-X mode */
+ rc = request_irq(
+ priv->msix[TSI721_VECT_OMB0_DONE + mbox].vector,
+ tsi721_omsg_msix, 0,
+ priv->msix[TSI721_VECT_OMB0_DONE + mbox].irq_name,
+ (void *)mport);
+
+ if (rc) {
+ dev_dbg(&priv->pdev->dev,
+ "Unable to allocate MSI-X interrupt for "
+ "OBOX%d-DONE\n", mbox);
+ goto out_stat;
+ }
+
+ rc = request_irq(priv->msix[TSI721_VECT_OMB0_INT + mbox].vector,
+ tsi721_omsg_msix, 0,
+ priv->msix[TSI721_VECT_OMB0_INT + mbox].irq_name,
+ (void *)mport);
+
+ if (rc) {
+ dev_dbg(&priv->pdev->dev,
+ "Unable to allocate MSI-X interrupt for "
+ "MBOX%d-INT\n", mbox);
+ free_irq(
+ priv->msix[TSI721_VECT_OMB0_DONE + mbox].vector,
+ (void *)mport);
+ goto out_stat;
+ }
+ }
+#endif /* CONFIG_PCI_MSI */
+
+ tsi721_omsg_interrupt_enable(priv, mbox, TSI721_OBDMAC_INT_ALL);
+
+ /* Initialize Outbound Message descriptors ring */
+ bd_ptr = priv->omsg_ring[mbox].omd_base;
+ bd_ptr[entries].type_id = cpu_to_le32(DTYPE5 << 29);
+ bd_ptr[entries].msg_info = 0;
+ bd_ptr[entries].next_lo =
+ cpu_to_le32((u64)priv->omsg_ring[mbox].omd_phys &
+ TSI721_OBDMAC_DPTRL_MASK);
+ bd_ptr[entries].next_hi =
+ cpu_to_le32((u64)priv->omsg_ring[mbox].omd_phys >> 32);
+ priv->omsg_ring[mbox].wr_count = 0;
+ mb();
+
+ /* Initialize Outbound Message engine */
+ iowrite32(TSI721_OBDMAC_CTL_INIT, priv->regs + TSI721_OBDMAC_CTL(mbox));
+ ioread32(priv->regs + TSI721_OBDMAC_DWRCNT(mbox));
+ udelay(10);
+
+ priv->omsg_init[mbox] = 1;
+
+ return 0;
+
+#ifdef CONFIG_PCI_MSI
+out_stat:
+ dma_free_coherent(&priv->pdev->dev,
+ priv->omsg_ring[mbox].sts_size * sizeof(struct tsi721_dma_sts),
+ priv->omsg_ring[mbox].sts_base,
+ priv->omsg_ring[mbox].sts_phys);
+
+ priv->omsg_ring[mbox].sts_base = NULL;
+#endif /* CONFIG_PCI_MSI */
+
+out_desc:
+ dma_free_coherent(&priv->pdev->dev,
+ (entries + 1) * sizeof(struct tsi721_omsg_desc),
+ priv->omsg_ring[mbox].omd_base,
+ priv->omsg_ring[mbox].omd_phys);
+
+ priv->omsg_ring[mbox].omd_base = NULL;
+
+out_buf:
+ for (i = 0; i < priv->omsg_ring[mbox].size; i++) {
+ if (priv->omsg_ring[mbox].omq_base[i]) {
+ dma_free_coherent(&priv->pdev->dev,
+ TSI721_MSG_BUFFER_SIZE,
+ priv->omsg_ring[mbox].omq_base[i],
+ priv->omsg_ring[mbox].omq_phys[i]);
+
+ priv->omsg_ring[mbox].omq_base[i] = NULL;
+ }
+ }
+
+out:
+ return rc;
+}
+
+/**
+ * tsi721_close_outb_mbox - Close Tsi721 outbound mailbox
+ * @mport: Master port implementing the outbound message unit
+ * @mbox: Mailbox to close
+ */
+static void tsi721_close_outb_mbox(struct rio_mport *mport, int mbox)
+{
+ struct tsi721_device *priv = mport->priv;
+ u32 i;
+
+ if (!priv->omsg_init[mbox])
+ return;
+ priv->omsg_init[mbox] = 0;
+
+ /* Disable Interrupts */
+
+ tsi721_omsg_interrupt_disable(priv, mbox, TSI721_OBDMAC_INT_ALL);
+
+#ifdef CONFIG_PCI_MSI
+ if (priv->flags & TSI721_USING_MSIX) {
+ free_irq(priv->msix[TSI721_VECT_OMB0_DONE + mbox].vector,
+ (void *)mport);
+ free_irq(priv->msix[TSI721_VECT_OMB0_INT + mbox].vector,
+ (void *)mport);
+ }
+#endif /* CONFIG_PCI_MSI */
+
+ /* Free OMSG Descriptor Status FIFO */
+ dma_free_coherent(&priv->pdev->dev,
+ priv->omsg_ring[mbox].sts_size * sizeof(struct tsi721_dma_sts),
+ priv->omsg_ring[mbox].sts_base,
+ priv->omsg_ring[mbox].sts_phys);
+
+ priv->omsg_ring[mbox].sts_base = NULL;
+
+ /* Free OMSG descriptors */
+ dma_free_coherent(&priv->pdev->dev,
+ (priv->omsg_ring[mbox].size + 1) *
+ sizeof(struct tsi721_omsg_desc),
+ priv->omsg_ring[mbox].omd_base,
+ priv->omsg_ring[mbox].omd_phys);
+
+ priv->omsg_ring[mbox].omd_base = NULL;
+
+ /* Free message buffers */
+ for (i = 0; i < priv->omsg_ring[mbox].size; i++) {
+ if (priv->omsg_ring[mbox].omq_base[i]) {
+ dma_free_coherent(&priv->pdev->dev,
+ TSI721_MSG_BUFFER_SIZE,
+ priv->omsg_ring[mbox].omq_base[i],
+ priv->omsg_ring[mbox].omq_phys[i]);
+
+ priv->omsg_ring[mbox].omq_base[i] = NULL;
+ }
+ }
+}
+
+/**
+ * tsi721_imsg_handler - Inbound Message Interrupt Handler
+ * @priv: pointer to tsi721 private data
+ * @ch: inbound message channel number to service
+ *
+ * Services channel interrupts from inbound messaging engine.
+ */
+static void tsi721_imsg_handler(struct tsi721_device *priv, int ch)
+{
+ u32 mbox = ch - 4;
+ u32 imsg_int;
+
+ spin_lock(&priv->imsg_ring[mbox].lock);
+
+ imsg_int = ioread32(priv->regs + TSI721_IBDMAC_INT(ch));
+
+ if (imsg_int & TSI721_IBDMAC_INT_SRTO)
+ dev_info(&priv->pdev->dev, "IB MBOX%d SRIO timeout\n",
+ mbox);
+
+ if (imsg_int & TSI721_IBDMAC_INT_PC_ERROR)
+ dev_info(&priv->pdev->dev, "IB MBOX%d PCIe error\n",
+ mbox);
+
+ if (imsg_int & TSI721_IBDMAC_INT_FQ_LOW)
+ dev_info(&priv->pdev->dev,
+ "IB MBOX%d IB free queue low\n", mbox);
+
+ /* Clear IB channel interrupts */
+ iowrite32(imsg_int, priv->regs + TSI721_IBDMAC_INT(ch));
+
+ /* If an IB Msg is received notify the upper layer */
+ if (imsg_int & TSI721_IBDMAC_INT_DQ_RCV &&
+ priv->mport->inb_msg[mbox].mcback)
+ priv->mport->inb_msg[mbox].mcback(priv->mport,
+ priv->imsg_ring[mbox].dev_id, mbox, -1);
+
+ if (!(priv->flags & TSI721_USING_MSIX)) {
+ u32 ch_inte;
+
+ /* Re-enable channel interrupts */
+ ch_inte = ioread32(priv->regs + TSI721_DEV_CHAN_INTE);
+ ch_inte |= TSI721_INT_IMSG_CHAN(ch);
+ iowrite32(ch_inte, priv->regs + TSI721_DEV_CHAN_INTE);
+ }
+
+ spin_unlock(&priv->imsg_ring[mbox].lock);
+}
+
+/**
+ * tsi721_open_inb_mbox - Initialize Tsi721 inbound mailbox
+ * @mport: Master port implementing the Inbound Messaging Engine
+ * @dev_id: Device specific pointer to pass on event
+ * @mbox: Mailbox to open
+ * @entries: Number of entries in the inbound mailbox ring
+ */
+static int tsi721_open_inb_mbox(struct rio_mport *mport, void *dev_id,
+ int mbox, int entries)
+{
+ struct tsi721_device *priv = mport->priv;
+ int ch = mbox + 4;
+ int i;
+ u64 *free_ptr;
+ int rc = 0;
+
+ if ((entries < TSI721_IMSGD_MIN_RING_SIZE) ||
+ (entries > TSI721_IMSGD_RING_SIZE) ||
+ (!is_power_of_2(entries)) || mbox >= RIO_MAX_MBOX) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /* Initialize IB Messaging Ring */
+ priv->imsg_ring[mbox].dev_id = dev_id;
+ priv->imsg_ring[mbox].size = entries;
+ priv->imsg_ring[mbox].rx_slot = 0;
+ priv->imsg_ring[mbox].desc_rdptr = 0;
+ priv->imsg_ring[mbox].fq_wrptr = 0;
+ for (i = 0; i < priv->imsg_ring[mbox].size; i++)
+ priv->imsg_ring[mbox].imq_base[i] = NULL;
+ spin_lock_init(&priv->imsg_ring[mbox].lock);
+
+ /* Allocate buffers for incoming messages */
+ priv->imsg_ring[mbox].buf_base =
+ dma_alloc_coherent(&priv->pdev->dev,
+ entries * TSI721_MSG_BUFFER_SIZE,
+ &priv->imsg_ring[mbox].buf_phys,
+ GFP_KERNEL);
+
+ if (priv->imsg_ring[mbox].buf_base == NULL) {
+ dev_err(&priv->pdev->dev,
+ "Failed to allocate buffers for IB MBOX%d\n", mbox);
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ /* Allocate memory for circular free list */
+ priv->imsg_ring[mbox].imfq_base =
+ dma_alloc_coherent(&priv->pdev->dev,
+ entries * 8,
+ &priv->imsg_ring[mbox].imfq_phys,
+ GFP_KERNEL);
+
+ if (priv->imsg_ring[mbox].imfq_base == NULL) {
+ dev_err(&priv->pdev->dev,
+ "Failed to allocate free queue for IB MBOX%d\n", mbox);
+ rc = -ENOMEM;
+ goto out_buf;
+ }
+
+ /* Allocate memory for Inbound message descriptors */
+ priv->imsg_ring[mbox].imd_base =
+ dma_alloc_coherent(&priv->pdev->dev,
+ entries * sizeof(struct tsi721_imsg_desc),
+ &priv->imsg_ring[mbox].imd_phys, GFP_KERNEL);
+
+ if (priv->imsg_ring[mbox].imd_base == NULL) {
+ dev_err(&priv->pdev->dev,
+ "Failed to allocate descriptor memory for IB MBOX%d\n",
+ mbox);
+ rc = -ENOMEM;
+ goto out_dma;
+ }
+
+ /* Fill free buffer pointer list */
+ free_ptr = priv->imsg_ring[mbox].imfq_base;
+ for (i = 0; i < entries; i++)
+ free_ptr[i] = cpu_to_le64(
+ (u64)(priv->imsg_ring[mbox].buf_phys) +
+ i * 0x1000);
+
+ mb();
+
+ /*
+ * For mapping of inbound SRIO Messages into appropriate queues we need
+ * to set Inbound Device ID register in the messaging engine. We do it
+ * once when first inbound mailbox is requested.
+ */
+ if (!(priv->flags & TSI721_IMSGID_SET)) {
+ iowrite32((u32)priv->mport->host_deviceid,
+ priv->regs + TSI721_IB_DEVID);
+ priv->flags |= TSI721_IMSGID_SET;
+ }
+
+ /*
+ * Configure Inbound Messaging channel (ch = mbox + 4)
+ */
+
+ /* Setup Inbound Message free queue */
+ iowrite32(((u64)priv->imsg_ring[mbox].imfq_phys >> 32),
+ priv->regs + TSI721_IBDMAC_FQBH(ch));
+ iowrite32(((u64)priv->imsg_ring[mbox].imfq_phys &
+ TSI721_IBDMAC_FQBL_MASK),
+ priv->regs+TSI721_IBDMAC_FQBL(ch));
+ iowrite32(TSI721_DMAC_DSSZ_SIZE(entries),
+ priv->regs + TSI721_IBDMAC_FQSZ(ch));
+
+ /* Setup Inbound Message descriptor queue */
+ iowrite32(((u64)priv->imsg_ring[mbox].imd_phys >> 32),
+ priv->regs + TSI721_IBDMAC_DQBH(ch));
+ iowrite32(((u32)priv->imsg_ring[mbox].imd_phys &
+ (u32)TSI721_IBDMAC_DQBL_MASK),
+ priv->regs+TSI721_IBDMAC_DQBL(ch));
+ iowrite32(TSI721_DMAC_DSSZ_SIZE(entries),
+ priv->regs + TSI721_IBDMAC_DQSZ(ch));
+
+ /* Enable interrupts */
+
+#ifdef CONFIG_PCI_MSI
+ if (priv->flags & TSI721_USING_MSIX) {
+ /* Request interrupt service if we are in MSI-X mode */
+ rc = request_irq(priv->msix[TSI721_VECT_IMB0_RCV + mbox].vector,
+ tsi721_imsg_msix, 0,
+ priv->msix[TSI721_VECT_IMB0_RCV + mbox].irq_name,
+ (void *)mport);
+
+ if (rc) {
+ dev_dbg(&priv->pdev->dev,
+ "Unable to allocate MSI-X interrupt for "
+ "IBOX%d-DONE\n", mbox);
+ goto out_desc;
+ }
+
+ rc = request_irq(priv->msix[TSI721_VECT_IMB0_INT + mbox].vector,
+ tsi721_imsg_msix, 0,
+ priv->msix[TSI721_VECT_IMB0_INT + mbox].irq_name,
+ (void *)mport);
+
+ if (rc) {
+ dev_dbg(&priv->pdev->dev,
+ "Unable to allocate MSI-X interrupt for "
+ "IBOX%d-INT\n", mbox);
+ free_irq(
+ priv->msix[TSI721_VECT_IMB0_RCV + mbox].vector,
+ (void *)mport);
+ goto out_desc;
+ }
+ }
+#endif /* CONFIG_PCI_MSI */
+
+ tsi721_imsg_interrupt_enable(priv, ch, TSI721_IBDMAC_INT_ALL);
+
+ /* Initialize Inbound Message Engine */
+ iowrite32(TSI721_IBDMAC_CTL_INIT, priv->regs + TSI721_IBDMAC_CTL(ch));
+ ioread32(priv->regs + TSI721_IBDMAC_CTL(ch));
+ udelay(10);
+ priv->imsg_ring[mbox].fq_wrptr = entries - 1;
+ iowrite32(entries - 1, priv->regs + TSI721_IBDMAC_FQWP(ch));
+
+ priv->imsg_init[mbox] = 1;
+ return 0;
+
+#ifdef CONFIG_PCI_MSI
+out_desc:
+ dma_free_coherent(&priv->pdev->dev,
+ priv->imsg_ring[mbox].size * sizeof(struct tsi721_imsg_desc),
+ priv->imsg_ring[mbox].imd_base,
+ priv->imsg_ring[mbox].imd_phys);
+
+ priv->imsg_ring[mbox].imd_base = NULL;
+#endif /* CONFIG_PCI_MSI */
+
+out_dma:
+ dma_free_coherent(&priv->pdev->dev,
+ priv->imsg_ring[mbox].size * 8,
+ priv->imsg_ring[mbox].imfq_base,
+ priv->imsg_ring[mbox].imfq_phys);
+
+ priv->imsg_ring[mbox].imfq_base = NULL;
+
+out_buf:
+ dma_free_coherent(&priv->pdev->dev,
+ priv->imsg_ring[mbox].size * TSI721_MSG_BUFFER_SIZE,
+ priv->imsg_ring[mbox].buf_base,
+ priv->imsg_ring[mbox].buf_phys);
+
+ priv->imsg_ring[mbox].buf_base = NULL;
+
+out:
+ return rc;
+}
+
+/**
+ * tsi721_close_inb_mbox - Shut down Tsi721 inbound mailbox
+ * @mport: Master port implementing the Inbound Messaging Engine
+ * @mbox: Mailbox to close
+ */
+static void tsi721_close_inb_mbox(struct rio_mport *mport, int mbox)
+{
+ struct tsi721_device *priv = mport->priv;
+ u32 rx_slot;
+ int ch = mbox + 4;
+
+ if (!priv->imsg_init[mbox]) /* mbox isn't initialized yet */
+ return;
+ priv->imsg_init[mbox] = 0;
+
+ /* Disable Inbound Messaging Engine */
+
+ /* Disable Interrupts */
+ tsi721_imsg_interrupt_disable(priv, ch, TSI721_OBDMAC_INT_MASK);
+
+#ifdef CONFIG_PCI_MSI
+ if (priv->flags & TSI721_USING_MSIX) {
+ free_irq(priv->msix[TSI721_VECT_IMB0_RCV + mbox].vector,
+ (void *)mport);
+ free_irq(priv->msix[TSI721_VECT_IMB0_INT + mbox].vector,
+ (void *)mport);
+ }
+#endif /* CONFIG_PCI_MSI */
+
+ /* Clear Inbound Buffer Queue */
+ for (rx_slot = 0; rx_slot < priv->imsg_ring[mbox].size; rx_slot++)
+ priv->imsg_ring[mbox].imq_base[rx_slot] = NULL;
+
+ /* Free memory allocated for message buffers */
+ dma_free_coherent(&priv->pdev->dev,
+ priv->imsg_ring[mbox].size * TSI721_MSG_BUFFER_SIZE,
+ priv->imsg_ring[mbox].buf_base,
+ priv->imsg_ring[mbox].buf_phys);
+
+ priv->imsg_ring[mbox].buf_base = NULL;
+
+ /* Free memory allocated for free pointr list */
+ dma_free_coherent(&priv->pdev->dev,
+ priv->imsg_ring[mbox].size * 8,
+ priv->imsg_ring[mbox].imfq_base,
+ priv->imsg_ring[mbox].imfq_phys);
+
+ priv->imsg_ring[mbox].imfq_base = NULL;
+
+ /* Free memory allocated for RX descriptors */
+ dma_free_coherent(&priv->pdev->dev,
+ priv->imsg_ring[mbox].size * sizeof(struct tsi721_imsg_desc),
+ priv->imsg_ring[mbox].imd_base,
+ priv->imsg_ring[mbox].imd_phys);
+
+ priv->imsg_ring[mbox].imd_base = NULL;
+}
+
+/**
+ * tsi721_add_inb_buffer - Add buffer to the Tsi721 inbound message queue
+ * @mport: Master port implementing the Inbound Messaging Engine
+ * @mbox: Inbound mailbox number
+ * @buf: Buffer to add to inbound queue
+ */
+static int tsi721_add_inb_buffer(struct rio_mport *mport, int mbox, void *buf)
+{
+ struct tsi721_device *priv = mport->priv;
+ u32 rx_slot;
+ int rc = 0;
+
+ rx_slot = priv->imsg_ring[mbox].rx_slot;
+ if (priv->imsg_ring[mbox].imq_base[rx_slot]) {
+ dev_err(&priv->pdev->dev,
+ "Error adding inbound buffer %d, buffer exists\n",
+ rx_slot);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ priv->imsg_ring[mbox].imq_base[rx_slot] = buf;
+
+ if (++priv->imsg_ring[mbox].rx_slot == priv->imsg_ring[mbox].size)
+ priv->imsg_ring[mbox].rx_slot = 0;
+
+out:
+ return rc;
+}
+
+/**
+ * tsi721_get_inb_message - Fetch inbound message from the Tsi721 MSG Queue
+ * @mport: Master port implementing the Inbound Messaging Engine
+ * @mbox: Inbound mailbox number
+ *
+ * Returns pointer to the message on success or NULL on failure.
+ */
+static void *tsi721_get_inb_message(struct rio_mport *mport, int mbox)
+{
+ struct tsi721_device *priv = mport->priv;
+ struct tsi721_imsg_desc *desc;
+ u32 rx_slot;
+ void *rx_virt = NULL;
+ u64 rx_phys;
+ void *buf = NULL;
+ u64 *free_ptr;
+ int ch = mbox + 4;
+ int msg_size;
+
+ if (!priv->imsg_init[mbox])
+ return NULL;
+
+ desc = priv->imsg_ring[mbox].imd_base;
+ desc += priv->imsg_ring[mbox].desc_rdptr;
+
+ if (!(le32_to_cpu(desc->msg_info) & TSI721_IMD_HO))
+ goto out;
+
+ rx_slot = priv->imsg_ring[mbox].rx_slot;
+ while (priv->imsg_ring[mbox].imq_base[rx_slot] == NULL) {
+ if (++rx_slot == priv->imsg_ring[mbox].size)
+ rx_slot = 0;
+ }
+
+ rx_phys = ((u64)le32_to_cpu(desc->bufptr_hi) << 32) |
+ le32_to_cpu(desc->bufptr_lo);
+
+ rx_virt = priv->imsg_ring[mbox].buf_base +
+ (rx_phys - (u64)priv->imsg_ring[mbox].buf_phys);
+
+ buf = priv->imsg_ring[mbox].imq_base[rx_slot];
+ msg_size = le32_to_cpu(desc->msg_info) & TSI721_IMD_BCOUNT;
+ if (msg_size == 0)
+ msg_size = RIO_MAX_MSG_SIZE;
+
+ memcpy(buf, rx_virt, msg_size);
+ priv->imsg_ring[mbox].imq_base[rx_slot] = NULL;
+
+ desc->msg_info &= cpu_to_le32(~TSI721_IMD_HO);
+ if (++priv->imsg_ring[mbox].desc_rdptr == priv->imsg_ring[mbox].size)
+ priv->imsg_ring[mbox].desc_rdptr = 0;
+
+ iowrite32(priv->imsg_ring[mbox].desc_rdptr,
+ priv->regs + TSI721_IBDMAC_DQRP(ch));
+
+ /* Return free buffer into the pointer list */
+ free_ptr = priv->imsg_ring[mbox].imfq_base;
+ free_ptr[priv->imsg_ring[mbox].fq_wrptr] = cpu_to_le64(rx_phys);
+
+ if (++priv->imsg_ring[mbox].fq_wrptr == priv->imsg_ring[mbox].size)
+ priv->imsg_ring[mbox].fq_wrptr = 0;
+
+ iowrite32(priv->imsg_ring[mbox].fq_wrptr,
+ priv->regs + TSI721_IBDMAC_FQWP(ch));
+out:
+ return buf;
+}
+
+/**
+ * tsi721_messages_init - Initialization of Messaging Engine
+ * @priv: pointer to tsi721 private data
+ *
+ * Configures Tsi721 messaging engine.
+ */
+static int tsi721_messages_init(struct tsi721_device *priv)
+{
+ int ch;
+
+ iowrite32(0, priv->regs + TSI721_SMSG_ECC_LOG);
+ iowrite32(0, priv->regs + TSI721_RETRY_GEN_CNT);
+ iowrite32(0, priv->regs + TSI721_RETRY_RX_CNT);
+
+ /* Set SRIO Message Request/Response Timeout */
+ iowrite32(TSI721_RQRPTO_VAL, priv->regs + TSI721_RQRPTO);
+
+ /* Initialize Inbound Messaging Engine Registers */
+ for (ch = 0; ch < TSI721_IMSG_CHNUM; ch++) {
+ /* Clear interrupt bits */
+ iowrite32(TSI721_IBDMAC_INT_MASK,
+ priv->regs + TSI721_IBDMAC_INT(ch));
+ /* Clear Status */
+ iowrite32(0, priv->regs + TSI721_IBDMAC_STS(ch));
+
+ iowrite32(TSI721_SMSG_ECC_COR_LOG_MASK,
+ priv->regs + TSI721_SMSG_ECC_COR_LOG(ch));
+ iowrite32(TSI721_SMSG_ECC_NCOR_MASK,
+ priv->regs + TSI721_SMSG_ECC_NCOR(ch));
+ }
+
+ return 0;
+}
+
+/**
+ * tsi721_disable_ints - disables all device interrupts
+ * @priv: pointer to tsi721 private data
+ */
+static void tsi721_disable_ints(struct tsi721_device *priv)
+{
+ int ch;
+
+ /* Disable all device level interrupts */
+ iowrite32(0, priv->regs + TSI721_DEV_INTE);
+
+ /* Disable all Device Channel interrupts */
+ iowrite32(0, priv->regs + TSI721_DEV_CHAN_INTE);
+
+ /* Disable all Inbound Msg Channel interrupts */
+ for (ch = 0; ch < TSI721_IMSG_CHNUM; ch++)
+ iowrite32(0, priv->regs + TSI721_IBDMAC_INTE(ch));
+
+ /* Disable all Outbound Msg Channel interrupts */
+ for (ch = 0; ch < TSI721_OMSG_CHNUM; ch++)
+ iowrite32(0, priv->regs + TSI721_OBDMAC_INTE(ch));
+
+ /* Disable all general messaging interrupts */
+ iowrite32(0, priv->regs + TSI721_SMSG_INTE);
+
+ /* Disable all BDMA Channel interrupts */
+ for (ch = 0; ch < TSI721_DMA_MAXCH; ch++)
+ iowrite32(0, priv->regs + TSI721_DMAC_INTE(ch));
+
+ /* Disable all general BDMA interrupts */
+ iowrite32(0, priv->regs + TSI721_BDMA_INTE);
+
+ /* Disable all SRIO Channel interrupts */
+ for (ch = 0; ch < TSI721_SRIO_MAXCH; ch++)
+ iowrite32(0, priv->regs + TSI721_SR_CHINTE(ch));
+
+ /* Disable all general SR2PC interrupts */
+ iowrite32(0, priv->regs + TSI721_SR2PC_GEN_INTE);
+
+ /* Disable all PC2SR interrupts */
+ iowrite32(0, priv->regs + TSI721_PC2SR_INTE);
+
+ /* Disable all I2C interrupts */
+ iowrite32(0, priv->regs + TSI721_I2C_INT_ENABLE);
+
+ /* Disable SRIO MAC interrupts */
+ iowrite32(0, priv->regs + TSI721_RIO_EM_INT_ENABLE);
+ iowrite32(0, priv->regs + TSI721_RIO_EM_DEV_INT_EN);
+}
+
+/**
+ * tsi721_setup_mport - Setup Tsi721 as RapidIO subsystem master port
+ * @priv: pointer to tsi721 private data
+ *
+ * Configures Tsi721 as RapidIO master port.
+ */
+static int __devinit tsi721_setup_mport(struct tsi721_device *priv)
+{
+ struct pci_dev *pdev = priv->pdev;
+ int err = 0;
+ struct rio_ops *ops;
+
+ struct rio_mport *mport;
+
+ ops = kzalloc(sizeof(struct rio_ops), GFP_KERNEL);
+ if (!ops) {
+ dev_dbg(&pdev->dev, "Unable to allocate memory for rio_ops\n");
+ return -ENOMEM;
+ }
+
+ ops->lcread = tsi721_lcread;
+ ops->lcwrite = tsi721_lcwrite;
+ ops->cread = tsi721_cread_dma;
+ ops->cwrite = tsi721_cwrite_dma;
+ ops->dsend = tsi721_dsend;
+ ops->open_inb_mbox = tsi721_open_inb_mbox;
+ ops->close_inb_mbox = tsi721_close_inb_mbox;
+ ops->open_outb_mbox = tsi721_open_outb_mbox;
+ ops->close_outb_mbox = tsi721_close_outb_mbox;
+ ops->add_outb_message = tsi721_add_outb_message;
+ ops->add_inb_buffer = tsi721_add_inb_buffer;
+ ops->get_inb_message = tsi721_get_inb_message;
+
+ mport = kzalloc(sizeof(struct rio_mport), GFP_KERNEL);
+ if (!mport) {
+ kfree(ops);
+ dev_dbg(&pdev->dev, "Unable to allocate memory for mport\n");
+ return -ENOMEM;
+ }
+
+ mport->ops = ops;
+ mport->index = 0;
+ mport->sys_size = 0; /* small system */
+ mport->phy_type = RIO_PHY_SERIAL;
+ mport->priv = (void *)priv;
+ mport->phys_efptr = 0x100;
+
+ INIT_LIST_HEAD(&mport->dbells);
+
+ rio_init_dbell_res(&mport->riores[RIO_DOORBELL_RESOURCE], 0, 0xffff);
+ rio_init_mbox_res(&mport->riores[RIO_INB_MBOX_RESOURCE], 0, 0);
+ rio_init_mbox_res(&mport->riores[RIO_OUTB_MBOX_RESOURCE], 0, 0);
+ strcpy(mport->name, "Tsi721 mport");
+
+ /* Hook up interrupt handler */
+
+#ifdef CONFIG_PCI_MSI
+ if (!tsi721_enable_msix(priv))
+ priv->flags |= TSI721_USING_MSIX;
+ else if (!pci_enable_msi(pdev))
+ priv->flags |= TSI721_USING_MSI;
+ else
+ dev_info(&pdev->dev,
+ "MSI/MSI-X is not available. Using legacy INTx.\n");
+#endif /* CONFIG_PCI_MSI */
+
+ err = tsi721_request_irq(mport);
+
+ if (!err) {
+ tsi721_interrupts_init(priv);
+ ops->pwenable = tsi721_pw_enable;
+ } else
+ dev_err(&pdev->dev, "Unable to get assigned PCI IRQ "
+ "vector %02X err=0x%x\n", pdev->irq, err);
+
+ /* Enable SRIO link */
+ iowrite32(ioread32(priv->regs + TSI721_DEVCTL) |
+ TSI721_DEVCTL_SRBOOT_CMPL,
+ priv->regs + TSI721_DEVCTL);
+
+ rio_register_mport(mport);
+ priv->mport = mport;
+
+ if (mport->host_deviceid >= 0)
+ iowrite32(RIO_PORT_GEN_HOST | RIO_PORT_GEN_MASTER |
+ RIO_PORT_GEN_DISCOVERED,
+ priv->regs + (0x100 + RIO_PORT_GEN_CTL_CSR));
+ else
+ iowrite32(0, priv->regs + (0x100 + RIO_PORT_GEN_CTL_CSR));
+
+ return 0;
+}
+
+static int __devinit tsi721_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct tsi721_device *priv;
+ int i;
+ int err;
+ u32 regval;
+
+ priv = kzalloc(sizeof(struct tsi721_device), GFP_KERNEL);
+ if (priv == NULL) {
+ dev_err(&pdev->dev, "Failed to allocate memory for device\n");
+ err = -ENOMEM;
+ goto err_exit;
+ }
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to enable PCI device\n");
+ goto err_clean;
+ }
+
+ priv->pdev = pdev;
+
+#ifdef DEBUG
+ for (i = 0; i <= PCI_STD_RESOURCE_END; i++) {
+ dev_dbg(&pdev->dev, "res[%d] @ 0x%llx (0x%lx, 0x%lx)\n",
+ i, (unsigned long long)pci_resource_start(pdev, i),
+ (unsigned long)pci_resource_len(pdev, i),
+ pci_resource_flags(pdev, i));
+ }
+#endif
+ /*
+ * Verify BAR configuration
+ */
+
+ /* BAR_0 (registers) must be 512KB+ in 32-bit address space */
+ if (!(pci_resource_flags(pdev, BAR_0) & IORESOURCE_MEM) ||
+ pci_resource_flags(pdev, BAR_0) & IORESOURCE_MEM_64 ||
+ pci_resource_len(pdev, BAR_0) < TSI721_REG_SPACE_SIZE) {
+ dev_err(&pdev->dev,
+ "Missing or misconfigured CSR BAR0, aborting.\n");
+ err = -ENODEV;
+ goto err_disable_pdev;
+ }
+
+ /* BAR_1 (outbound doorbells) must be 16MB+ in 32-bit address space */
+ if (!(pci_resource_flags(pdev, BAR_1) & IORESOURCE_MEM) ||
+ pci_resource_flags(pdev, BAR_1) & IORESOURCE_MEM_64 ||
+ pci_resource_len(pdev, BAR_1) < TSI721_DB_WIN_SIZE) {
+ dev_err(&pdev->dev,
+ "Missing or misconfigured Doorbell BAR1, aborting.\n");
+ err = -ENODEV;
+ goto err_disable_pdev;
+ }
+
+ /*
+ * BAR_2 and BAR_4 (outbound translation) must be in 64-bit PCIe address
+ * space.
+ * NOTE: BAR_2 and BAR_4 are not used by this version of driver.
+ * It may be a good idea to keep them disabled using HW configuration
+ * to save PCI memory space.
+ */
+ if ((pci_resource_flags(pdev, BAR_2) & IORESOURCE_MEM) &&
+ (pci_resource_flags(pdev, BAR_2) & IORESOURCE_MEM_64)) {
+ dev_info(&pdev->dev, "Outbound BAR2 is not used but enabled.\n");
+ }
+
+ if ((pci_resource_flags(pdev, BAR_4) & IORESOURCE_MEM) &&
+ (pci_resource_flags(pdev, BAR_4) & IORESOURCE_MEM_64)) {
+ dev_info(&pdev->dev, "Outbound BAR4 is not used but enabled.\n");
+ }
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
+ dev_err(&pdev->dev, "Cannot obtain PCI resources, "
+ "aborting.\n");
+ goto err_disable_pdev;
+ }
+
+ pci_set_master(pdev);
+
+ priv->regs = pci_ioremap_bar(pdev, BAR_0);
+ if (!priv->regs) {
+ dev_err(&pdev->dev,
+ "Unable to map device registers space, aborting\n");
+ err = -ENOMEM;
+ goto err_free_res;
+ }
+
+ priv->odb_base = pci_ioremap_bar(pdev, BAR_1);
+ if (!priv->odb_base) {
+ dev_err(&pdev->dev,
+ "Unable to map outbound doorbells space, aborting\n");
+ err = -ENOMEM;
+ goto err_unmap_bars;
+ }
+
+ /* Configure DMA attributes. */
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
+ dev_info(&pdev->dev, "Unable to set DMA mask\n");
+ goto err_unmap_bars;
+ }
+
+ if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
+ dev_info(&pdev->dev, "Unable to set consistent DMA mask\n");
+ } else {
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (err)
+ dev_info(&pdev->dev, "Unable to set consistent DMA mask\n");
+ }
+
+ /* Clear "no snoop" and "relaxed ordering" bits. */
+ pci_read_config_dword(pdev, 0x40 + PCI_EXP_DEVCTL, &regval);
+ regval &= ~(PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN);
+ pci_write_config_dword(pdev, 0x40 + PCI_EXP_DEVCTL, regval);
+
+ /*
+ * FIXUP: correct offsets of MSI-X tables in the MSI-X Capability Block
+ */
+ pci_write_config_dword(pdev, TSI721_PCIECFG_EPCTL, 0x01);
+ pci_write_config_dword(pdev, TSI721_PCIECFG_MSIXTBL,
+ TSI721_MSIXTBL_OFFSET);
+ pci_write_config_dword(pdev, TSI721_PCIECFG_MSIXPBA,
+ TSI721_MSIXPBA_OFFSET);
+ pci_write_config_dword(pdev, TSI721_PCIECFG_EPCTL, 0);
+ /* End of FIXUP */
+
+ tsi721_disable_ints(priv);
+
+ tsi721_init_pc2sr_mapping(priv);
+ tsi721_init_sr2pc_mapping(priv);
+
+ if (tsi721_bdma_init(priv)) {
+ dev_err(&pdev->dev, "BDMA initialization failed, aborting\n");
+ err = -ENOMEM;
+ goto err_unmap_bars;
+ }
+
+ err = tsi721_doorbell_init(priv);
+ if (err)
+ goto err_free_bdma;
+
+ tsi721_port_write_init(priv);
+
+ err = tsi721_messages_init(priv);
+ if (err)
+ goto err_free_consistent;
+
+ err = tsi721_setup_mport(priv);
+ if (err)
+ goto err_free_consistent;
+
+ return 0;
+
+err_free_consistent:
+ tsi721_doorbell_free(priv);
+err_free_bdma:
+ tsi721_bdma_free(priv);
+err_unmap_bars:
+ if (priv->regs)
+ iounmap(priv->regs);
+ if (priv->odb_base)
+ iounmap(priv->odb_base);
+err_free_res:
+ pci_release_regions(pdev);
+ pci_clear_master(pdev);
+err_disable_pdev:
+ pci_disable_device(pdev);
+err_clean:
+ kfree(priv);
+err_exit:
+ return err;
+}
+
+static DEFINE_PCI_DEVICE_TABLE(tsi721_pci_tbl) = {
+ { PCI_DEVICE(PCI_VENDOR_ID_IDT, PCI_DEVICE_ID_TSI721) },
+ { 0, } /* terminate list */
+};
+
+MODULE_DEVICE_TABLE(pci, tsi721_pci_tbl);
+
+static struct pci_driver tsi721_driver = {
+ .name = "tsi721",
+ .id_table = tsi721_pci_tbl,
+ .probe = tsi721_probe,
+};
+
+static int __init tsi721_init(void)
+{
+ return pci_register_driver(&tsi721_driver);
+}
+
+static void __exit tsi721_exit(void)
+{
+ pci_unregister_driver(&tsi721_driver);
+}
+
+device_initcall(tsi721_init);
diff --git a/drivers/rapidio/devices/tsi721.h b/drivers/rapidio/devices/tsi721.h
new file mode 100644
index 00000000000..58be4deb140
--- /dev/null
+++ b/drivers/rapidio/devices/tsi721.h
@@ -0,0 +1,766 @@
+/*
+ * Tsi721 PCIExpress-to-SRIO bridge definitions
+ *
+ * Copyright 2011, Integrated Device Technology, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef __TSI721_H
+#define __TSI721_H
+
+#define DRV_NAME "tsi721"
+
+#define DEFAULT_HOPCOUNT 0xff
+#define DEFAULT_DESTID 0xff
+
+/* PCI device ID */
+#define PCI_DEVICE_ID_TSI721 0x80ab
+
+#define BAR_0 0
+#define BAR_1 1
+#define BAR_2 2
+#define BAR_4 4
+
+#define TSI721_PC2SR_BARS 2
+#define TSI721_PC2SR_WINS 8
+#define TSI721_PC2SR_ZONES 8
+#define TSI721_MAINT_WIN 0 /* Window for outbound maintenance requests */
+#define IDB_QUEUE 0 /* Inbound Doorbell Queue to use */
+#define IDB_QSIZE 512 /* Inbound Doorbell Queue size */
+
+/* Memory space sizes */
+#define TSI721_REG_SPACE_SIZE (512 * 1024) /* 512K */
+#define TSI721_DB_WIN_SIZE (16 * 1024 * 1024) /* 16MB */
+
+#define RIO_TT_CODE_8 0x00000000
+#define RIO_TT_CODE_16 0x00000001
+
+#define TSI721_DMA_MAXCH 8
+#define TSI721_DMA_MINSTSSZ 32
+#define TSI721_DMA_STSBLKSZ 8
+
+#define TSI721_SRIO_MAXCH 8
+
+#define DBELL_SID(buf) (((u8)buf[2] << 8) | (u8)buf[3])
+#define DBELL_TID(buf) (((u8)buf[4] << 8) | (u8)buf[5])
+#define DBELL_INF(buf) (((u8)buf[0] << 8) | (u8)buf[1])
+
+#define TSI721_RIO_PW_MSG_SIZE 16 /* Tsi721 saves only 16 bytes of PW msg */
+
+/* Register definitions */
+
+/*
+ * Registers in PCIe configuration space
+ */
+
+#define TSI721_PCIECFG_MSIXTBL 0x0a4
+#define TSI721_MSIXTBL_OFFSET 0x2c000
+#define TSI721_PCIECFG_MSIXPBA 0x0a8
+#define TSI721_MSIXPBA_OFFSET 0x2a000
+#define TSI721_PCIECFG_EPCTL 0x400
+
+/*
+ * Event Management Registers
+ */
+
+#define TSI721_RIO_EM_INT_STAT 0x10910
+#define TSI721_RIO_EM_INT_STAT_PW_RX 0x00010000
+
+#define TSI721_RIO_EM_INT_ENABLE 0x10914
+#define TSI721_RIO_EM_INT_ENABLE_PW_RX 0x00010000
+
+#define TSI721_RIO_EM_DEV_INT_EN 0x10930
+#define TSI721_RIO_EM_DEV_INT_EN_INT 0x00000001
+
+/*
+ * Port-Write Block Registers
+ */
+
+#define TSI721_RIO_PW_CTL 0x10a04
+#define TSI721_RIO_PW_CTL_PW_TIMER 0xf0000000
+#define TSI721_RIO_PW_CTL_PWT_DIS (0 << 28)
+#define TSI721_RIO_PW_CTL_PWT_103 (1 << 28)
+#define TSI721_RIO_PW_CTL_PWT_205 (1 << 29)
+#define TSI721_RIO_PW_CTL_PWT_410 (1 << 30)
+#define TSI721_RIO_PW_CTL_PWT_820 (1 << 31)
+#define TSI721_RIO_PW_CTL_PWC_MODE 0x01000000
+#define TSI721_RIO_PW_CTL_PWC_CONT 0x00000000
+#define TSI721_RIO_PW_CTL_PWC_REL 0x01000000
+
+#define TSI721_RIO_PW_RX_STAT 0x10a10
+#define TSI721_RIO_PW_RX_STAT_WR_SIZE 0x0000f000
+#define TSI_RIO_PW_RX_STAT_WDPTR 0x00000100
+#define TSI721_RIO_PW_RX_STAT_PW_SHORT 0x00000008
+#define TSI721_RIO_PW_RX_STAT_PW_TRUNC 0x00000004
+#define TSI721_RIO_PW_RX_STAT_PW_DISC 0x00000002
+#define TSI721_RIO_PW_RX_STAT_PW_VAL 0x00000001
+
+#define TSI721_RIO_PW_RX_CAPT(x) (0x10a20 + (x)*4)
+
+/*
+ * Inbound Doorbells
+ */
+
+#define TSI721_IDB_ENTRY_SIZE 64
+
+#define TSI721_IDQ_CTL(x) (0x20000 + (x) * 1000)
+#define TSI721_IDQ_SUSPEND 0x00000002
+#define TSI721_IDQ_INIT 0x00000001
+
+#define TSI721_IDQ_STS(x) (0x20004 + (x) * 1000)
+#define TSI721_IDQ_RUN 0x00200000
+
+#define TSI721_IDQ_MASK(x) (0x20008 + (x) * 1000)
+#define TSI721_IDQ_MASK_MASK 0xffff0000
+#define TSI721_IDQ_MASK_PATT 0x0000ffff
+
+#define TSI721_IDQ_RP(x) (0x2000c + (x) * 1000)
+#define TSI721_IDQ_RP_PTR 0x0007ffff
+
+#define TSI721_IDQ_WP(x) (0x20010 + (x) * 1000)
+#define TSI721_IDQ_WP_PTR 0x0007ffff
+
+#define TSI721_IDQ_BASEL(x) (0x20014 + (x) * 1000)
+#define TSI721_IDQ_BASEL_ADDR 0xffffffc0
+#define TSI721_IDQ_BASEU(x) (0x20018 + (x) * 1000)
+#define TSI721_IDQ_SIZE(x) (0x2001c + (x) * 1000)
+#define TSI721_IDQ_SIZE_VAL(size) (__fls(size) - 4)
+#define TSI721_IDQ_SIZE_MIN 512
+#define TSI721_IDQ_SIZE_MAX (512 * 1024)
+
+#define TSI721_SR_CHINT(x) (0x20040 + (x) * 1000)
+#define TSI721_SR_CHINTE(x) (0x20044 + (x) * 1000)
+#define TSI721_SR_CHINTSET(x) (0x20048 + (x) * 1000)
+#define TSI721_SR_CHINT_ODBOK 0x00000020
+#define TSI721_SR_CHINT_IDBQRCV 0x00000010
+#define TSI721_SR_CHINT_SUSP 0x00000008
+#define TSI721_SR_CHINT_ODBTO 0x00000004
+#define TSI721_SR_CHINT_ODBRTRY 0x00000002
+#define TSI721_SR_CHINT_ODBERR 0x00000001
+#define TSI721_SR_CHINT_ALL 0x0000003f
+
+#define TSI721_IBWIN_NUM 8
+
+#define TSI721_IBWINLB(x) (0x29000 + (x) * 20)
+#define TSI721_IBWINLB_BA 0xfffff000
+#define TSI721_IBWINLB_WEN 0x00000001
+
+#define TSI721_SR2PC_GEN_INTE 0x29800
+#define TSI721_SR2PC_PWE 0x29804
+#define TSI721_SR2PC_GEN_INT 0x29808
+
+#define TSI721_DEV_INTE 0x29840
+#define TSI721_DEV_INT 0x29844
+#define TSI721_DEV_INTSET 0x29848
+#define TSI721_DEV_INT_SMSG_CH 0x00000800
+#define TSI721_DEV_INT_SMSG_NCH 0x00000400
+#define TSI721_DEV_INT_SR2PC_CH 0x00000200
+#define TSI721_DEV_INT_SRIO 0x00000020
+
+#define TSI721_DEV_CHAN_INTE 0x2984c
+#define TSI721_DEV_CHAN_INT 0x29850
+
+#define TSI721_INT_SR2PC_CHAN_M 0xff000000
+#define TSI721_INT_SR2PC_CHAN(x) (1 << (24 + (x)))
+#define TSI721_INT_IMSG_CHAN_M 0x00ff0000
+#define TSI721_INT_IMSG_CHAN(x) (1 << (16 + (x)))
+#define TSI721_INT_OMSG_CHAN_M 0x0000ff00
+#define TSI721_INT_OMSG_CHAN(x) (1 << (8 + (x)))
+
+/*
+ * PC2SR block registers
+ */
+#define TSI721_OBWIN_NUM TSI721_PC2SR_WINS
+
+#define TSI721_OBWINLB(x) (0x40000 + (x) * 20)
+#define TSI721_OBWINLB_BA 0xffff8000
+#define TSI721_OBWINLB_WEN 0x00000001
+
+#define TSI721_OBWINUB(x) (0x40004 + (x) * 20)
+
+#define TSI721_OBWINSZ(x) (0x40008 + (x) * 20)
+#define TSI721_OBWINSZ_SIZE 0x00001f00
+#define TSI721_OBWIN_SIZE(size) (__fls(size) - 15)
+
+#define TSI721_ZONE_SEL 0x41300
+#define TSI721_ZONE_SEL_RD_WRB 0x00020000
+#define TSI721_ZONE_SEL_GO 0x00010000
+#define TSI721_ZONE_SEL_WIN 0x00000038
+#define TSI721_ZONE_SEL_ZONE 0x00000007
+
+#define TSI721_LUT_DATA0 0x41304
+#define TSI721_LUT_DATA0_ADD 0xfffff000
+#define TSI721_LUT_DATA0_RDTYPE 0x00000f00
+#define TSI721_LUT_DATA0_NREAD 0x00000100
+#define TSI721_LUT_DATA0_MNTRD 0x00000200
+#define TSI721_LUT_DATA0_RDCRF 0x00000020
+#define TSI721_LUT_DATA0_WRCRF 0x00000010
+#define TSI721_LUT_DATA0_WRTYPE 0x0000000f
+#define TSI721_LUT_DATA0_NWR 0x00000001
+#define TSI721_LUT_DATA0_MNTWR 0x00000002
+#define TSI721_LUT_DATA0_NWR_R 0x00000004
+
+#define TSI721_LUT_DATA1 0x41308
+
+#define TSI721_LUT_DATA2 0x4130c
+#define TSI721_LUT_DATA2_HC 0xff000000
+#define TSI721_LUT_DATA2_ADD65 0x000c0000
+#define TSI721_LUT_DATA2_TT 0x00030000
+#define TSI721_LUT_DATA2_DSTID 0x0000ffff
+
+#define TSI721_PC2SR_INTE 0x41310
+
+#define TSI721_DEVCTL 0x48004
+#define TSI721_DEVCTL_SRBOOT_CMPL 0x00000004
+
+#define TSI721_I2C_INT_ENABLE 0x49120
+
+/*
+ * Block DMA Engine Registers
+ * x = 0..7
+ */
+
+#define TSI721_DMAC_DWRCNT(x) (0x51000 + (x) * 0x1000)
+#define TSI721_DMAC_DRDCNT(x) (0x51004 + (x) * 0x1000)
+
+#define TSI721_DMAC_CTL(x) (0x51008 + (x) * 0x1000)
+#define TSI721_DMAC_CTL_SUSP 0x00000002
+#define TSI721_DMAC_CTL_INIT 0x00000001
+
+#define TSI721_DMAC_INT(x) (0x5100c + (x) * 0x1000)
+#define TSI721_DMAC_INT_STFULL 0x00000010
+#define TSI721_DMAC_INT_DONE 0x00000008
+#define TSI721_DMAC_INT_SUSP 0x00000004
+#define TSI721_DMAC_INT_ERR 0x00000002
+#define TSI721_DMAC_INT_IOFDONE 0x00000001
+#define TSI721_DMAC_INT_ALL 0x0000001f
+
+#define TSI721_DMAC_INTSET(x) (0x51010 + (x) * 0x1000)
+
+#define TSI721_DMAC_STS(x) (0x51014 + (x) * 0x1000)
+#define TSI721_DMAC_STS_ABORT 0x00400000
+#define TSI721_DMAC_STS_RUN 0x00200000
+#define TSI721_DMAC_STS_CS 0x001f0000
+
+#define TSI721_DMAC_INTE(x) (0x51018 + (x) * 0x1000)
+
+#define TSI721_DMAC_DPTRL(x) (0x51024 + (x) * 0x1000)
+#define TSI721_DMAC_DPTRL_MASK 0xffffffe0
+
+#define TSI721_DMAC_DPTRH(x) (0x51028 + (x) * 0x1000)
+
+#define TSI721_DMAC_DSBL(x) (0x5102c + (x) * 0x1000)
+#define TSI721_DMAC_DSBL_MASK 0xffffffc0
+
+#define TSI721_DMAC_DSBH(x) (0x51030 + (x) * 0x1000)
+
+#define TSI721_DMAC_DSSZ(x) (0x51034 + (x) * 0x1000)
+#define TSI721_DMAC_DSSZ_SIZE_M 0x0000000f
+#define TSI721_DMAC_DSSZ_SIZE(size) (__fls(size) - 4)
+
+
+#define TSI721_DMAC_DSRP(x) (0x51038 + (x) * 0x1000)
+#define TSI721_DMAC_DSRP_MASK 0x0007ffff
+
+#define TSI721_DMAC_DSWP(x) (0x5103c + (x) * 0x1000)
+#define TSI721_DMAC_DSWP_MASK 0x0007ffff
+
+#define TSI721_BDMA_INTE 0x5f000
+
+/*
+ * Messaging definitions
+ */
+#define TSI721_MSG_BUFFER_SIZE RIO_MAX_MSG_SIZE
+#define TSI721_MSG_MAX_SIZE RIO_MAX_MSG_SIZE
+#define TSI721_IMSG_MAXCH 8
+#define TSI721_IMSG_CHNUM TSI721_IMSG_MAXCH
+#define TSI721_IMSGD_MIN_RING_SIZE 32
+#define TSI721_IMSGD_RING_SIZE 512
+
+#define TSI721_OMSG_CHNUM 4 /* One channel per MBOX */
+#define TSI721_OMSGD_MIN_RING_SIZE 32
+#define TSI721_OMSGD_RING_SIZE 512
+
+/*
+ * Outbound Messaging Engine Registers
+ * x = 0..7
+ */
+
+#define TSI721_OBDMAC_DWRCNT(x) (0x61000 + (x) * 0x1000)
+
+#define TSI721_OBDMAC_DRDCNT(x) (0x61004 + (x) * 0x1000)
+
+#define TSI721_OBDMAC_CTL(x) (0x61008 + (x) * 0x1000)
+#define TSI721_OBDMAC_CTL_MASK 0x00000007
+#define TSI721_OBDMAC_CTL_RETRY_THR 0x00000004
+#define TSI721_OBDMAC_CTL_SUSPEND 0x00000002
+#define TSI721_OBDMAC_CTL_INIT 0x00000001
+
+#define TSI721_OBDMAC_INT(x) (0x6100c + (x) * 0x1000)
+#define TSI721_OBDMAC_INTSET(x) (0x61010 + (x) * 0x1000)
+#define TSI721_OBDMAC_INTE(x) (0x61018 + (x) * 0x1000)
+#define TSI721_OBDMAC_INT_MASK 0x0000001F
+#define TSI721_OBDMAC_INT_ST_FULL 0x00000010
+#define TSI721_OBDMAC_INT_DONE 0x00000008
+#define TSI721_OBDMAC_INT_SUSPENDED 0x00000004
+#define TSI721_OBDMAC_INT_ERROR 0x00000002
+#define TSI721_OBDMAC_INT_IOF_DONE 0x00000001
+#define TSI721_OBDMAC_INT_ALL TSI721_OBDMAC_INT_MASK
+
+#define TSI721_OBDMAC_STS(x) (0x61014 + (x) * 0x1000)
+#define TSI721_OBDMAC_STS_MASK 0x007f0000
+#define TSI721_OBDMAC_STS_ABORT 0x00400000
+#define TSI721_OBDMAC_STS_RUN 0x00200000
+#define TSI721_OBDMAC_STS_CS 0x001f0000
+
+#define TSI721_OBDMAC_PWE(x) (0x6101c + (x) * 0x1000)
+#define TSI721_OBDMAC_PWE_MASK 0x00000002
+#define TSI721_OBDMAC_PWE_ERROR_EN 0x00000002
+
+#define TSI721_OBDMAC_DPTRL(x) (0x61020 + (x) * 0x1000)
+#define TSI721_OBDMAC_DPTRL_MASK 0xfffffff0
+
+#define TSI721_OBDMAC_DPTRH(x) (0x61024 + (x) * 0x1000)
+#define TSI721_OBDMAC_DPTRH_MASK 0xffffffff
+
+#define TSI721_OBDMAC_DSBL(x) (0x61040 + (x) * 0x1000)
+#define TSI721_OBDMAC_DSBL_MASK 0xffffffc0
+
+#define TSI721_OBDMAC_DSBH(x) (0x61044 + (x) * 0x1000)
+#define TSI721_OBDMAC_DSBH_MASK 0xffffffff
+
+#define TSI721_OBDMAC_DSSZ(x) (0x61048 + (x) * 0x1000)
+#define TSI721_OBDMAC_DSSZ_MASK 0x0000000f
+
+#define TSI721_OBDMAC_DSRP(x) (0x6104c + (x) * 0x1000)
+#define TSI721_OBDMAC_DSRP_MASK 0x0007ffff
+
+#define TSI721_OBDMAC_DSWP(x) (0x61050 + (x) * 0x1000)
+#define TSI721_OBDMAC_DSWP_MASK 0x0007ffff
+
+#define TSI721_RQRPTO 0x60010
+#define TSI721_RQRPTO_MASK 0x00ffffff
+#define TSI721_RQRPTO_VAL 400 /* Response TO value */
+
+/*
+ * Inbound Messaging Engine Registers
+ * x = 0..7
+ */
+
+#define TSI721_IB_DEVID_GLOBAL 0xffff
+#define TSI721_IBDMAC_FQBL(x) (0x61200 + (x) * 0x1000)
+#define TSI721_IBDMAC_FQBL_MASK 0xffffffc0
+
+#define TSI721_IBDMAC_FQBH(x) (0x61204 + (x) * 0x1000)
+#define TSI721_IBDMAC_FQBH_MASK 0xffffffff
+
+#define TSI721_IBDMAC_FQSZ_ENTRY_INX TSI721_IMSGD_RING_SIZE
+#define TSI721_IBDMAC_FQSZ(x) (0x61208 + (x) * 0x1000)
+#define TSI721_IBDMAC_FQSZ_MASK 0x0000000f
+
+#define TSI721_IBDMAC_FQRP(x) (0x6120c + (x) * 0x1000)
+#define TSI721_IBDMAC_FQRP_MASK 0x0007ffff
+
+#define TSI721_IBDMAC_FQWP(x) (0x61210 + (x) * 0x1000)
+#define TSI721_IBDMAC_FQWP_MASK 0x0007ffff
+
+#define TSI721_IBDMAC_FQTH(x) (0x61214 + (x) * 0x1000)
+#define TSI721_IBDMAC_FQTH_MASK 0x0007ffff
+
+#define TSI721_IB_DEVID 0x60020
+#define TSI721_IB_DEVID_MASK 0x0000ffff
+
+#define TSI721_IBDMAC_CTL(x) (0x61240 + (x) * 0x1000)
+#define TSI721_IBDMAC_CTL_MASK 0x00000003
+#define TSI721_IBDMAC_CTL_SUSPEND 0x00000002
+#define TSI721_IBDMAC_CTL_INIT 0x00000001
+
+#define TSI721_IBDMAC_STS(x) (0x61244 + (x) * 0x1000)
+#define TSI721_IBDMAC_STS_MASK 0x007f0000
+#define TSI721_IBSMAC_STS_ABORT 0x00400000
+#define TSI721_IBSMAC_STS_RUN 0x00200000
+#define TSI721_IBSMAC_STS_CS 0x001f0000
+
+#define TSI721_IBDMAC_INT(x) (0x61248 + (x) * 0x1000)
+#define TSI721_IBDMAC_INTSET(x) (0x6124c + (x) * 0x1000)
+#define TSI721_IBDMAC_INTE(x) (0x61250 + (x) * 0x1000)
+#define TSI721_IBDMAC_INT_MASK 0x0000100f
+#define TSI721_IBDMAC_INT_SRTO 0x00001000
+#define TSI721_IBDMAC_INT_SUSPENDED 0x00000008
+#define TSI721_IBDMAC_INT_PC_ERROR 0x00000004
+#define TSI721_IBDMAC_INT_FQ_LOW 0x00000002
+#define TSI721_IBDMAC_INT_DQ_RCV 0x00000001
+#define TSI721_IBDMAC_INT_ALL TSI721_IBDMAC_INT_MASK
+
+#define TSI721_IBDMAC_PWE(x) (0x61254 + (x) * 0x1000)
+#define TSI721_IBDMAC_PWE_MASK 0x00001700
+#define TSI721_IBDMAC_PWE_SRTO 0x00001000
+#define TSI721_IBDMAC_PWE_ILL_FMT 0x00000400
+#define TSI721_IBDMAC_PWE_ILL_DEC 0x00000200
+#define TSI721_IBDMAC_PWE_IMP_SP 0x00000100
+
+#define TSI721_IBDMAC_DQBL(x) (0x61300 + (x) * 0x1000)
+#define TSI721_IBDMAC_DQBL_MASK 0xffffffc0
+#define TSI721_IBDMAC_DQBL_ADDR 0xffffffc0
+
+#define TSI721_IBDMAC_DQBH(x) (0x61304 + (x) * 0x1000)
+#define TSI721_IBDMAC_DQBH_MASK 0xffffffff
+
+#define TSI721_IBDMAC_DQRP(x) (0x61308 + (x) * 0x1000)
+#define TSI721_IBDMAC_DQRP_MASK 0x0007ffff
+
+#define TSI721_IBDMAC_DQWR(x) (0x6130c + (x) * 0x1000)
+#define TSI721_IBDMAC_DQWR_MASK 0x0007ffff
+
+#define TSI721_IBDMAC_DQSZ(x) (0x61314 + (x) * 0x1000)
+#define TSI721_IBDMAC_DQSZ_MASK 0x0000000f
+
+/*
+ * Messaging Engine Interrupts
+ */
+
+#define TSI721_SMSG_PWE 0x6a004
+
+#define TSI721_SMSG_INTE 0x6a000
+#define TSI721_SMSG_INT 0x6a008
+#define TSI721_SMSG_INTSET 0x6a010
+#define TSI721_SMSG_INT_MASK 0x0086ffff
+#define TSI721_SMSG_INT_UNS_RSP 0x00800000
+#define TSI721_SMSG_INT_ECC_NCOR 0x00040000
+#define TSI721_SMSG_INT_ECC_COR 0x00020000
+#define TSI721_SMSG_INT_ECC_NCOR_CH 0x0000ff00
+#define TSI721_SMSG_INT_ECC_COR_CH 0x000000ff
+
+#define TSI721_SMSG_ECC_LOG 0x6a014
+#define TSI721_SMSG_ECC_LOG_MASK 0x00070007
+#define TSI721_SMSG_ECC_LOG_ECC_NCOR_M 0x00070000
+#define TSI721_SMSG_ECC_LOG_ECC_COR_M 0x00000007
+
+#define TSI721_RETRY_GEN_CNT 0x6a100
+#define TSI721_RETRY_GEN_CNT_MASK 0xffffffff
+
+#define TSI721_RETRY_RX_CNT 0x6a104
+#define TSI721_RETRY_RX_CNT_MASK 0xffffffff
+
+#define TSI721_SMSG_ECC_COR_LOG(x) (0x6a300 + (x) * 4)
+#define TSI721_SMSG_ECC_COR_LOG_MASK 0x000000ff
+
+#define TSI721_SMSG_ECC_NCOR(x) (0x6a340 + (x) * 4)
+#define TSI721_SMSG_ECC_NCOR_MASK 0x000000ff
+
+/*
+ * Block DMA Descriptors
+ */
+
+struct tsi721_dma_desc {
+ __le32 type_id;
+
+#define TSI721_DMAD_DEVID 0x0000ffff
+#define TSI721_DMAD_CRF 0x00010000
+#define TSI721_DMAD_PRIO 0x00060000
+#define TSI721_DMAD_RTYPE 0x00780000
+#define TSI721_DMAD_IOF 0x08000000
+#define TSI721_DMAD_DTYPE 0xe0000000
+
+ __le32 bcount;
+
+#define TSI721_DMAD_BCOUNT1 0x03ffffff /* if DTYPE == 1 */
+#define TSI721_DMAD_BCOUNT2 0x0000000f /* if DTYPE == 2 */
+#define TSI721_DMAD_TT 0x0c000000
+#define TSI721_DMAD_RADDR0 0xc0000000
+
+ union {
+ __le32 raddr_lo; /* if DTYPE == (1 || 2) */
+ __le32 next_lo; /* if DTYPE == 3 */
+ };
+
+#define TSI721_DMAD_CFGOFF 0x00ffffff
+#define TSI721_DMAD_HOPCNT 0xff000000
+
+ union {
+ __le32 raddr_hi; /* if DTYPE == (1 || 2) */
+ __le32 next_hi; /* if DTYPE == 3 */
+ };
+
+ union {
+ struct { /* if DTYPE == 1 */
+ __le32 bufptr_lo;
+ __le32 bufptr_hi;
+ __le32 s_dist;
+ __le32 s_size;
+ } t1;
+ __le32 data[4]; /* if DTYPE == 2 */
+ u32 reserved[4]; /* if DTYPE == 3 */
+ };
+} __aligned(32);
+
+/*
+ * Inbound Messaging Descriptor
+ */
+struct tsi721_imsg_desc {
+ __le32 type_id;
+
+#define TSI721_IMD_DEVID 0x0000ffff
+#define TSI721_IMD_CRF 0x00010000
+#define TSI721_IMD_PRIO 0x00060000
+#define TSI721_IMD_TT 0x00180000
+#define TSI721_IMD_DTYPE 0xe0000000
+
+ __le32 msg_info;
+
+#define TSI721_IMD_BCOUNT 0x00000ff8
+#define TSI721_IMD_SSIZE 0x0000f000
+#define TSI721_IMD_LETER 0x00030000
+#define TSI721_IMD_XMBOX 0x003c0000
+#define TSI721_IMD_MBOX 0x00c00000
+#define TSI721_IMD_CS 0x78000000
+#define TSI721_IMD_HO 0x80000000
+
+ __le32 bufptr_lo;
+ __le32 bufptr_hi;
+ u32 reserved[12];
+
+} __aligned(64);
+
+/*
+ * Outbound Messaging Descriptor
+ */
+struct tsi721_omsg_desc {
+ __le32 type_id;
+
+#define TSI721_OMD_DEVID 0x0000ffff
+#define TSI721_OMD_CRF 0x00010000
+#define TSI721_OMD_PRIO 0x00060000
+#define TSI721_OMD_IOF 0x08000000
+#define TSI721_OMD_DTYPE 0xe0000000
+#define TSI721_OMD_RSRVD 0x17f80000
+
+ __le32 msg_info;
+
+#define TSI721_OMD_BCOUNT 0x00000ff8
+#define TSI721_OMD_SSIZE 0x0000f000
+#define TSI721_OMD_LETER 0x00030000
+#define TSI721_OMD_XMBOX 0x003c0000
+#define TSI721_OMD_MBOX 0x00c00000
+#define TSI721_OMD_TT 0x0c000000
+
+ union {
+ __le32 bufptr_lo; /* if DTYPE == 4 */
+ __le32 next_lo; /* if DTYPE == 5 */
+ };
+
+ union {
+ __le32 bufptr_hi; /* if DTYPE == 4 */
+ __le32 next_hi; /* if DTYPE == 5 */
+ };
+
+} __aligned(16);
+
+struct tsi721_dma_sts {
+ __le64 desc_sts[8];
+} __aligned(64);
+
+struct tsi721_desc_sts_fifo {
+ union {
+ __le64 da64;
+ struct {
+ __le32 lo;
+ __le32 hi;
+ } da32;
+ } stat[8];
+} __aligned(64);
+
+/* Descriptor types for BDMA and Messaging blocks */
+enum dma_dtype {
+ DTYPE1 = 1, /* Data Transfer DMA Descriptor */
+ DTYPE2 = 2, /* Immediate Data Transfer DMA Descriptor */
+ DTYPE3 = 3, /* Block Pointer DMA Descriptor */
+ DTYPE4 = 4, /* Outbound Msg DMA Descriptor */
+ DTYPE5 = 5, /* OB Messaging Block Pointer Descriptor */
+ DTYPE6 = 6 /* Inbound Messaging Descriptor */
+};
+
+enum dma_rtype {
+ NREAD = 0,
+ LAST_NWRITE_R = 1,
+ ALL_NWRITE = 2,
+ ALL_NWRITE_R = 3,
+ MAINT_RD = 4,
+ MAINT_WR = 5
+};
+
+/*
+ * mport Driver Definitions
+ */
+#define TSI721_DMA_CHNUM TSI721_DMA_MAXCH
+
+#define TSI721_DMACH_MAINT 0 /* DMA channel for maint requests */
+#define TSI721_DMACH_MAINT_NBD 32 /* Number of BDs for maint requests */
+
+#define MSG_DMA_ENTRY_INX_TO_SIZE(x) ((0x10 << (x)) & 0xFFFF0)
+
+enum tsi721_smsg_int_flag {
+ SMSG_INT_NONE = 0x00000000,
+ SMSG_INT_ECC_COR_CH = 0x000000ff,
+ SMSG_INT_ECC_NCOR_CH = 0x0000ff00,
+ SMSG_INT_ECC_COR = 0x00020000,
+ SMSG_INT_ECC_NCOR = 0x00040000,
+ SMSG_INT_UNS_RSP = 0x00800000,
+ SMSG_INT_ALL = 0x0006ffff
+};
+
+/* Structures */
+
+struct tsi721_bdma_chan {
+ int bd_num; /* number of buffer descriptors */
+ void *bd_base; /* start of DMA descriptors */
+ dma_addr_t bd_phys;
+ void *sts_base; /* start of DMA BD status FIFO */
+ dma_addr_t sts_phys;
+ int sts_size;
+};
+
+struct tsi721_imsg_ring {
+ u32 size;
+ /* VA/PA of data buffers for incoming messages */
+ void *buf_base;
+ dma_addr_t buf_phys;
+ /* VA/PA of circular free buffer list */
+ void *imfq_base;
+ dma_addr_t imfq_phys;
+ /* VA/PA of Inbound message descriptors */
+ void *imd_base;
+ dma_addr_t imd_phys;
+ /* Inbound Queue buffer pointers */
+ void *imq_base[TSI721_IMSGD_RING_SIZE];
+
+ u32 rx_slot;
+ void *dev_id;
+ u32 fq_wrptr;
+ u32 desc_rdptr;
+ spinlock_t lock;
+};
+
+struct tsi721_omsg_ring {
+ u32 size;
+ /* VA/PA of OB Msg descriptors */
+ void *omd_base;
+ dma_addr_t omd_phys;
+ /* VA/PA of OB Msg data buffers */
+ void *omq_base[TSI721_OMSGD_RING_SIZE];
+ dma_addr_t omq_phys[TSI721_OMSGD_RING_SIZE];
+ /* VA/PA of OB Msg descriptor status FIFO */
+ void *sts_base;
+ dma_addr_t sts_phys;
+ u32 sts_size; /* # of allocated status entries */
+ u32 sts_rdptr;
+
+ u32 tx_slot;
+ void *dev_id;
+ u32 wr_count;
+ spinlock_t lock;
+};
+
+enum tsi721_flags {
+ TSI721_USING_MSI = (1 << 0),
+ TSI721_USING_MSIX = (1 << 1),
+ TSI721_IMSGID_SET = (1 << 2),
+};
+
+#ifdef CONFIG_PCI_MSI
+/*
+ * MSI-X Table Entries (0 ... 69)
+ */
+#define TSI721_MSIX_DMACH_DONE(x) (0 + (x))
+#define TSI721_MSIX_DMACH_INT(x) (8 + (x))
+#define TSI721_MSIX_BDMA_INT 16
+#define TSI721_MSIX_OMSG_DONE(x) (17 + (x))
+#define TSI721_MSIX_OMSG_INT(x) (25 + (x))
+#define TSI721_MSIX_IMSG_DQ_RCV(x) (33 + (x))
+#define TSI721_MSIX_IMSG_INT(x) (41 + (x))
+#define TSI721_MSIX_MSG_INT 49
+#define TSI721_MSIX_SR2PC_IDBQ_RCV(x) (50 + (x))
+#define TSI721_MSIX_SR2PC_CH_INT(x) (58 + (x))
+#define TSI721_MSIX_SR2PC_INT 66
+#define TSI721_MSIX_PC2SR_INT 67
+#define TSI721_MSIX_SRIO_MAC_INT 68
+#define TSI721_MSIX_I2C_INT 69
+
+/* MSI-X vector and init table entry indexes */
+enum tsi721_msix_vect {
+ TSI721_VECT_IDB,
+ TSI721_VECT_PWRX, /* PW_RX is part of SRIO MAC Interrupt reporting */
+ TSI721_VECT_OMB0_DONE,
+ TSI721_VECT_OMB1_DONE,
+ TSI721_VECT_OMB2_DONE,
+ TSI721_VECT_OMB3_DONE,
+ TSI721_VECT_OMB0_INT,
+ TSI721_VECT_OMB1_INT,
+ TSI721_VECT_OMB2_INT,
+ TSI721_VECT_OMB3_INT,
+ TSI721_VECT_IMB0_RCV,
+ TSI721_VECT_IMB1_RCV,
+ TSI721_VECT_IMB2_RCV,
+ TSI721_VECT_IMB3_RCV,
+ TSI721_VECT_IMB0_INT,
+ TSI721_VECT_IMB1_INT,
+ TSI721_VECT_IMB2_INT,
+ TSI721_VECT_IMB3_INT,
+ TSI721_VECT_MAX
+};
+
+#define IRQ_DEVICE_NAME_MAX 64
+
+struct msix_irq {
+ u16 vector;
+ char irq_name[IRQ_DEVICE_NAME_MAX];
+};
+#endif /* CONFIG_PCI_MSI */
+
+struct tsi721_device {
+ struct pci_dev *pdev;
+ struct rio_mport *mport;
+ u32 flags;
+ void __iomem *regs;
+#ifdef CONFIG_PCI_MSI
+ struct msix_irq msix[TSI721_VECT_MAX];
+#endif
+ /* Doorbells */
+ void __iomem *odb_base;
+ void *idb_base;
+ dma_addr_t idb_dma;
+ struct work_struct idb_work;
+ u32 db_discard_count;
+
+ /* Inbound Port-Write */
+ struct work_struct pw_work;
+ struct kfifo pw_fifo;
+ spinlock_t pw_fifo_lock;
+ u32 pw_discard_count;
+
+ /* BDMA Engine */
+ struct tsi721_bdma_chan bdma[TSI721_DMA_CHNUM];
+
+ /* Inbound Messaging */
+ int imsg_init[TSI721_IMSG_CHNUM];
+ struct tsi721_imsg_ring imsg_ring[TSI721_IMSG_CHNUM];
+
+ /* Outbound Messaging */
+ int omsg_init[TSI721_OMSG_CHNUM];
+ struct tsi721_omsg_ring omsg_ring[TSI721_OMSG_CHNUM];
+};
+
+#endif
diff --git a/drivers/rapidio/rio-scan.c b/drivers/rapidio/rio-scan.c
index ebe77dd87da..2bebd791a09 100644
--- a/drivers/rapidio/rio-scan.c
+++ b/drivers/rapidio/rio-scan.c
@@ -516,7 +516,7 @@ static struct rio_dev __devinit *rio_setup_device(struct rio_net *net,
return rdev;
cleanup:
- if (rio_is_switch(rdev))
+ if (rswitch)
kfree(rswitch->route_table);
kfree(rdev);
@@ -923,7 +923,7 @@ static int __devinit rio_enum_peer(struct rio_net *net, struct rio_mport *port,
* rio_enum_complete- Tests if enumeration of a network is complete
* @port: Master port to send transaction
*
- * Tests the Component Tag CSR for non-zero value (enumeration
+ * Tests the PGCCSR discovered bit for non-zero value (enumeration
* complete flag). Return %1 if enumeration is complete or %0 if
* enumeration is incomplete.
*/
@@ -933,7 +933,7 @@ static int rio_enum_complete(struct rio_mport *port)
rio_local_read_config_32(port, port->phys_efptr + RIO_PORT_GEN_CTL_CSR,
&regval);
- return (regval & RIO_PORT_GEN_MASTER) ? 1 : 0;
+ return (regval & RIO_PORT_GEN_DISCOVERED) ? 1 : 0;
}
/**
diff --git a/drivers/rapidio/switches/idt_gen2.c b/drivers/rapidio/switches/idt_gen2.c
index 043ee3136e4..809b7a3336b 100644
--- a/drivers/rapidio/switches/idt_gen2.c
+++ b/drivers/rapidio/switches/idt_gen2.c
@@ -10,6 +10,7 @@
* option) any later version.
*/
+#include <linux/stat.h>
#include <linux/rio.h>
#include <linux/rio_drv.h>
#include <linux/rio_ids.h>
diff --git a/drivers/regulator/88pm8607.c b/drivers/regulator/88pm8607.c
index d63fddb0fbb..ca0d608f824 100644
--- a/drivers/regulator/88pm8607.c
+++ b/drivers/regulator/88pm8607.c
@@ -16,6 +16,7 @@
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <linux/mfd/88pm860x.h>
+#include <linux/module.h>
struct pm8607_regulator_info {
struct regulator_desc desc;
@@ -412,7 +413,7 @@ static int __devinit pm8607_regulator_probe(struct platform_device *pdev)
if (info->desc.id == res->start)
break;
}
- if ((i < 0) || (i > PM8607_ID_RG_MAX)) {
+ if (i == ARRAY_SIZE(pm8607_regulator_info)) {
dev_err(&pdev->dev, "Failed to find regulator %llu\n",
(unsigned long long)res->start);
return -EINVAL;
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index c7fd2c0e3f2..9713b1b860c 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -64,6 +64,16 @@ config REGULATOR_USERSPACE_CONSUMER
If unsure, say no.
+config REGULATOR_GPIO
+ tristate "GPIO regulator support"
+ depends on GENERIC_GPIO
+ help
+ This driver provides support for regulators that can be
+ controlled via gpios.
+ It is capable of supporting current and voltage regulators
+ and the platform has to provide a mapping of GPIO-states
+ to target volts/amps.
+
config REGULATOR_BQ24022
tristate "TI bq24022 Dual Input 1-Cell Li-Ion Charger IC"
help
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 040d5aa6353..93a6318f532 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_REGULATOR_FIXED_VOLTAGE) += fixed.o
obj-$(CONFIG_REGULATOR_VIRTUAL_CONSUMER) += virtual.o
obj-$(CONFIG_REGULATOR_USERSPACE_CONSUMER) += userspace-consumer.o
+obj-$(CONFIG_REGULATOR_GPIO) += gpio-regulator.o
obj-$(CONFIG_REGULATOR_AD5398) += ad5398.o
obj-$(CONFIG_REGULATOR_BQ24022) += bq24022.o
obj-$(CONFIG_REGULATOR_LP3971) += lp3971.o
diff --git a/drivers/regulator/aat2870-regulator.c b/drivers/regulator/aat2870-regulator.c
index cd4104542f0..5abeb3ac3e8 100644
--- a/drivers/regulator/aat2870-regulator.c
+++ b/drivers/regulator/aat2870-regulator.c
@@ -22,6 +22,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/err.h>
+#include <linux/module.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
diff --git a/drivers/regulator/ab8500.c b/drivers/regulator/ab8500.c
index 02f3c2333c8..6e1ae69646b 100644
--- a/drivers/regulator/ab8500.c
+++ b/drivers/regulator/ab8500.c
@@ -13,6 +13,7 @@
*/
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/mfd/ab8500.h>
diff --git a/drivers/regulator/bq24022.c b/drivers/regulator/bq24022.c
index 068d488a4f7..e24d1b7d97a 100644
--- a/drivers/regulator/bq24022.c
+++ b/drivers/regulator/bq24022.c
@@ -14,6 +14,7 @@
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/err.h>
+#include <linux/module.h>
#include <linux/gpio.h>
#include <linux/regulator/bq24022.h>
#include <linux/regulator/driver.h>
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index d8e6a429e8b..669d0216022 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -28,6 +28,7 @@
#include <linux/regulator/consumer.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
+#include <linux/module.h>
#define CREATE_TRACE_POINTS
#include <trace/events/regulator.h>
@@ -1425,7 +1426,7 @@ int regulator_enable(struct regulator *regulator)
ret = _regulator_enable(rdev);
mutex_unlock(&rdev->mutex);
- if (ret != 0)
+ if (ret != 0 && rdev->supply)
regulator_disable(rdev->supply);
return ret;
@@ -1552,6 +1553,68 @@ int regulator_force_disable(struct regulator *regulator)
}
EXPORT_SYMBOL_GPL(regulator_force_disable);
+static void regulator_disable_work(struct work_struct *work)
+{
+ struct regulator_dev *rdev = container_of(work, struct regulator_dev,
+ disable_work.work);
+ int count, i, ret;
+
+ mutex_lock(&rdev->mutex);
+
+ BUG_ON(!rdev->deferred_disables);
+
+ count = rdev->deferred_disables;
+ rdev->deferred_disables = 0;
+
+ for (i = 0; i < count; i++) {
+ ret = _regulator_disable(rdev);
+ if (ret != 0)
+ rdev_err(rdev, "Deferred disable failed: %d\n", ret);
+ }
+
+ mutex_unlock(&rdev->mutex);
+
+ if (rdev->supply) {
+ for (i = 0; i < count; i++) {
+ ret = regulator_disable(rdev->supply);
+ if (ret != 0) {
+ rdev_err(rdev,
+ "Supply disable failed: %d\n", ret);
+ }
+ }
+ }
+}
+
+/**
+ * regulator_disable_deferred - disable regulator output with delay
+ * @regulator: regulator source
+ * @ms: miliseconds until the regulator is disabled
+ *
+ * Execute regulator_disable() on the regulator after a delay. This
+ * is intended for use with devices that require some time to quiesce.
+ *
+ * NOTE: this will only disable the regulator output if no other consumer
+ * devices have it enabled, the regulator device supports disabling and
+ * machine constraints permit this operation.
+ */
+int regulator_disable_deferred(struct regulator *regulator, int ms)
+{
+ struct regulator_dev *rdev = regulator->rdev;
+ int ret;
+
+ mutex_lock(&rdev->mutex);
+ rdev->deferred_disables++;
+ mutex_unlock(&rdev->mutex);
+
+ ret = schedule_delayed_work(&rdev->disable_work,
+ msecs_to_jiffies(ms));
+ if (ret < 0)
+ return ret;
+ else
+ return 0;
+}
+EXPORT_SYMBOL_GPL(regulator_disable_deferred);
+
static int _regulator_is_enabled(struct regulator_dev *rdev)
{
/* If we don't know then assume that the regulator is always on */
@@ -2622,6 +2685,7 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
INIT_LIST_HEAD(&rdev->consumer_list);
INIT_LIST_HEAD(&rdev->list);
BLOCKING_INIT_NOTIFIER_HEAD(&rdev->notifier);
+ INIT_DELAYED_WORK(&rdev->disable_work, regulator_disable_work);
/* preform any regulator specific init */
if (init_data->regulator_init) {
@@ -2729,6 +2793,7 @@ void regulator_unregister(struct regulator_dev *rdev)
#ifdef CONFIG_DEBUG_FS
debugfs_remove_recursive(rdev->debugfs);
#endif
+ flush_work_sync(&rdev->disable_work.work);
WARN_ON(rdev->open_count);
unset_regulator_supplies(rdev);
list_del(&rdev->list);
@@ -2907,6 +2972,43 @@ void *regulator_get_init_drvdata(struct regulator_init_data *reg_init_data)
}
EXPORT_SYMBOL_GPL(regulator_get_init_drvdata);
+#ifdef CONFIG_DEBUG_FS
+static ssize_t supply_map_read_file(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ ssize_t len, ret = 0;
+ struct regulator_map *map;
+
+ if (!buf)
+ return -ENOMEM;
+
+ list_for_each_entry(map, &regulator_map_list, list) {
+ len = snprintf(buf + ret, PAGE_SIZE - ret,
+ "%s -> %s.%s\n",
+ rdev_get_name(map->regulator), map->dev_name,
+ map->supply);
+ if (len >= 0)
+ ret += len;
+ if (ret > PAGE_SIZE) {
+ ret = PAGE_SIZE;
+ break;
+ }
+ }
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
+
+ kfree(buf);
+
+ return ret;
+}
+
+static const struct file_operations supply_map_fops = {
+ .read = supply_map_read_file,
+ .llseek = default_llseek,
+};
+#endif
+
static int __init regulator_init(void)
{
int ret;
@@ -2919,6 +3021,10 @@ static int __init regulator_init(void)
pr_warn("regulator: Failed to create debugfs directory\n");
debugfs_root = NULL;
}
+
+ if (IS_ERR(debugfs_create_file("supply_map", 0444, debugfs_root,
+ NULL, &supply_map_fops)))
+ pr_warn("regulator: Failed to create supplies debugfs\n");
#endif
regulator_dummy_init();
diff --git a/drivers/regulator/da903x.c b/drivers/regulator/da903x.c
index 362e0822108..e23ddfa8b2c 100644
--- a/drivers/regulator/da903x.c
+++ b/drivers/regulator/da903x.c
@@ -12,6 +12,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/err.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
diff --git a/drivers/regulator/db8500-prcmu.c b/drivers/regulator/db8500-prcmu.c
index 2bb8f451cc0..78329751af5 100644
--- a/drivers/regulator/db8500-prcmu.c
+++ b/drivers/regulator/db8500-prcmu.c
@@ -13,10 +13,11 @@
#include <linux/err.h>
#include <linux/spinlock.h>
#include <linux/platform_device.h>
-#include <linux/mfd/db8500-prcmu.h>
+#include <linux/mfd/dbx500-prcmu.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/db8500-prcmu.h>
+#include <linux/module.h>
/*
* power state reference count
diff --git a/drivers/regulator/dummy.c b/drivers/regulator/dummy.c
index f6ef6694ab9..b8f520513ce 100644
--- a/drivers/regulator/dummy.c
+++ b/drivers/regulator/dummy.c
@@ -16,6 +16,7 @@
*/
#include <linux/err.h>
+#include <linux/export.h>
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
diff --git a/drivers/regulator/fixed.c b/drivers/regulator/fixed.c
index 2fe9d99c9f2..21ecf212a52 100644
--- a/drivers/regulator/fixed.c
+++ b/drivers/regulator/fixed.c
@@ -20,6 +20,7 @@
#include <linux/err.h>
#include <linux/mutex.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/fixed.h>
diff --git a/drivers/regulator/gpio-regulator.c b/drivers/regulator/gpio-regulator.c
new file mode 100644
index 00000000000..f0acf52498b
--- /dev/null
+++ b/drivers/regulator/gpio-regulator.c
@@ -0,0 +1,358 @@
+/*
+ * gpio-regulator.c
+ *
+ * Copyright 2011 Heiko Stuebner <heiko@sntech.de>
+ *
+ * based on fixed.c
+ *
+ * Copyright 2008 Wolfson Microelectronics PLC.
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * Copyright (c) 2009 Nokia Corporation
+ * Roger Quadros <ext-roger.quadros@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This is useful for systems with mixed controllable and
+ * non-controllable regulators, as well as for allowing testing on
+ * systems with no controllable regulators.
+ */
+
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/gpio-regulator.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+
+struct gpio_regulator_data {
+ struct regulator_desc desc;
+ struct regulator_dev *dev;
+
+ int enable_gpio;
+ bool enable_high;
+ bool is_enabled;
+ unsigned startup_delay;
+
+ struct gpio *gpios;
+ int nr_gpios;
+
+ struct gpio_regulator_state *states;
+ int nr_states;
+
+ int state;
+};
+
+static int gpio_regulator_is_enabled(struct regulator_dev *dev)
+{
+ struct gpio_regulator_data *data = rdev_get_drvdata(dev);
+
+ return data->is_enabled;
+}
+
+static int gpio_regulator_enable(struct regulator_dev *dev)
+{
+ struct gpio_regulator_data *data = rdev_get_drvdata(dev);
+
+ if (gpio_is_valid(data->enable_gpio)) {
+ gpio_set_value_cansleep(data->enable_gpio, data->enable_high);
+ data->is_enabled = true;
+ }
+
+ return 0;
+}
+
+static int gpio_regulator_disable(struct regulator_dev *dev)
+{
+ struct gpio_regulator_data *data = rdev_get_drvdata(dev);
+
+ if (gpio_is_valid(data->enable_gpio)) {
+ gpio_set_value_cansleep(data->enable_gpio, !data->enable_high);
+ data->is_enabled = false;
+ }
+
+ return 0;
+}
+
+static int gpio_regulator_enable_time(struct regulator_dev *dev)
+{
+ struct gpio_regulator_data *data = rdev_get_drvdata(dev);
+
+ return data->startup_delay;
+}
+
+static int gpio_regulator_get_value(struct regulator_dev *dev)
+{
+ struct gpio_regulator_data *data = rdev_get_drvdata(dev);
+ int ptr;
+
+ for (ptr = 0; ptr < data->nr_states; ptr++)
+ if (data->states[ptr].gpios == data->state)
+ return data->states[ptr].value;
+
+ return -EINVAL;
+}
+
+static int gpio_regulator_set_value(struct regulator_dev *dev,
+ int min, int max)
+{
+ struct gpio_regulator_data *data = rdev_get_drvdata(dev);
+ int ptr, target, state;
+
+ target = -1;
+ for (ptr = 0; ptr < data->nr_states; ptr++)
+ if (data->states[ptr].value >= min &&
+ data->states[ptr].value <= max)
+ target = data->states[ptr].gpios;
+
+ if (target < 0)
+ return -EINVAL;
+
+ for (ptr = 0; ptr < data->nr_gpios; ptr++) {
+ state = (target & (1 << ptr)) >> ptr;
+ gpio_set_value(data->gpios[ptr].gpio, state);
+ }
+ data->state = target;
+
+ return 0;
+}
+
+static int gpio_regulator_set_voltage(struct regulator_dev *dev,
+ int min_uV, int max_uV,
+ unsigned *selector)
+{
+ return gpio_regulator_set_value(dev, min_uV, max_uV);
+}
+
+static int gpio_regulator_list_voltage(struct regulator_dev *dev,
+ unsigned selector)
+{
+ struct gpio_regulator_data *data = rdev_get_drvdata(dev);
+
+ if (selector >= data->nr_states)
+ return -EINVAL;
+
+ return data->states[selector].value;
+}
+
+static int gpio_regulator_set_current_limit(struct regulator_dev *dev,
+ int min_uA, int max_uA)
+{
+ return gpio_regulator_set_value(dev, min_uA, max_uA);
+}
+
+static struct regulator_ops gpio_regulator_voltage_ops = {
+ .is_enabled = gpio_regulator_is_enabled,
+ .enable = gpio_regulator_enable,
+ .disable = gpio_regulator_disable,
+ .enable_time = gpio_regulator_enable_time,
+ .get_voltage = gpio_regulator_get_value,
+ .set_voltage = gpio_regulator_set_voltage,
+ .list_voltage = gpio_regulator_list_voltage,
+};
+
+static struct regulator_ops gpio_regulator_current_ops = {
+ .is_enabled = gpio_regulator_is_enabled,
+ .enable = gpio_regulator_enable,
+ .disable = gpio_regulator_disable,
+ .enable_time = gpio_regulator_enable_time,
+ .get_current_limit = gpio_regulator_get_value,
+ .set_current_limit = gpio_regulator_set_current_limit,
+};
+
+static int __devinit gpio_regulator_probe(struct platform_device *pdev)
+{
+ struct gpio_regulator_config *config = pdev->dev.platform_data;
+ struct gpio_regulator_data *drvdata;
+ int ptr, ret, state;
+
+ drvdata = kzalloc(sizeof(struct gpio_regulator_data), GFP_KERNEL);
+ if (drvdata == NULL) {
+ dev_err(&pdev->dev, "Failed to allocate device data\n");
+ return -ENOMEM;
+ }
+
+ drvdata->desc.name = kstrdup(config->supply_name, GFP_KERNEL);
+ if (drvdata->desc.name == NULL) {
+ dev_err(&pdev->dev, "Failed to allocate supply name\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ drvdata->gpios = kmemdup(config->gpios,
+ config->nr_gpios * sizeof(struct gpio),
+ GFP_KERNEL);
+ if (drvdata->gpios == NULL) {
+ dev_err(&pdev->dev, "Failed to allocate gpio data\n");
+ ret = -ENOMEM;
+ goto err_name;
+ }
+
+ drvdata->states = kmemdup(config->states,
+ config->nr_states *
+ sizeof(struct gpio_regulator_state),
+ GFP_KERNEL);
+ if (drvdata->states == NULL) {
+ dev_err(&pdev->dev, "Failed to allocate state data\n");
+ ret = -ENOMEM;
+ goto err_memgpio;
+ }
+ drvdata->nr_states = config->nr_states;
+
+ drvdata->desc.owner = THIS_MODULE;
+
+ /* handle regulator type*/
+ switch (config->type) {
+ case REGULATOR_VOLTAGE:
+ drvdata->desc.type = REGULATOR_VOLTAGE;
+ drvdata->desc.ops = &gpio_regulator_voltage_ops;
+ drvdata->desc.n_voltages = config->nr_states;
+ break;
+ case REGULATOR_CURRENT:
+ drvdata->desc.type = REGULATOR_CURRENT;
+ drvdata->desc.ops = &gpio_regulator_current_ops;
+ break;
+ default:
+ dev_err(&pdev->dev, "No regulator type set\n");
+ ret = -EINVAL;
+ goto err_memgpio;
+ break;
+ }
+
+ drvdata->enable_gpio = config->enable_gpio;
+ drvdata->startup_delay = config->startup_delay;
+
+ if (gpio_is_valid(config->enable_gpio)) {
+ drvdata->enable_high = config->enable_high;
+
+ ret = gpio_request(config->enable_gpio, config->supply_name);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Could not obtain regulator enable GPIO %d: %d\n",
+ config->enable_gpio, ret);
+ goto err_memstate;
+ }
+
+ /* set output direction without changing state
+ * to prevent glitch
+ */
+ if (config->enabled_at_boot) {
+ drvdata->is_enabled = true;
+ ret = gpio_direction_output(config->enable_gpio,
+ config->enable_high);
+ } else {
+ drvdata->is_enabled = false;
+ ret = gpio_direction_output(config->enable_gpio,
+ !config->enable_high);
+ }
+
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Could not configure regulator enable GPIO %d direction: %d\n",
+ config->enable_gpio, ret);
+ goto err_enablegpio;
+ }
+ } else {
+ /* Regulator without GPIO control is considered
+ * always enabled
+ */
+ drvdata->is_enabled = true;
+ }
+
+ drvdata->nr_gpios = config->nr_gpios;
+ ret = gpio_request_array(drvdata->gpios, drvdata->nr_gpios);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Could not obtain regulator setting GPIOs: %d\n", ret);
+ goto err_enablegpio;
+ }
+
+ /* build initial state from gpio init data. */
+ state = 0;
+ for (ptr = 0; ptr < drvdata->nr_gpios; ptr++) {
+ if (config->gpios[ptr].flags & GPIOF_OUT_INIT_HIGH)
+ state |= (1 << ptr);
+ }
+ drvdata->state = state;
+
+ drvdata->dev = regulator_register(&drvdata->desc, &pdev->dev,
+ config->init_data, drvdata);
+ if (IS_ERR(drvdata->dev)) {
+ ret = PTR_ERR(drvdata->dev);
+ dev_err(&pdev->dev, "Failed to register regulator: %d\n", ret);
+ goto err_stategpio;
+ }
+
+ platform_set_drvdata(pdev, drvdata);
+
+ return 0;
+
+err_stategpio:
+ gpio_free_array(drvdata->gpios, drvdata->nr_gpios);
+err_enablegpio:
+ if (gpio_is_valid(config->enable_gpio))
+ gpio_free(config->enable_gpio);
+err_memstate:
+ kfree(drvdata->states);
+err_memgpio:
+ kfree(drvdata->gpios);
+err_name:
+ kfree(drvdata->desc.name);
+err:
+ kfree(drvdata);
+ return ret;
+}
+
+static int __devexit gpio_regulator_remove(struct platform_device *pdev)
+{
+ struct gpio_regulator_data *drvdata = platform_get_drvdata(pdev);
+
+ regulator_unregister(drvdata->dev);
+
+ gpio_free_array(drvdata->gpios, drvdata->nr_gpios);
+
+ kfree(drvdata->states);
+ kfree(drvdata->gpios);
+
+ if (gpio_is_valid(drvdata->enable_gpio))
+ gpio_free(drvdata->enable_gpio);
+
+ kfree(drvdata->desc.name);
+ kfree(drvdata);
+
+ return 0;
+}
+
+static struct platform_driver gpio_regulator_driver = {
+ .probe = gpio_regulator_probe,
+ .remove = __devexit_p(gpio_regulator_remove),
+ .driver = {
+ .name = "gpio-regulator",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init gpio_regulator_init(void)
+{
+ return platform_driver_register(&gpio_regulator_driver);
+}
+subsys_initcall(gpio_regulator_init);
+
+static void __exit gpio_regulator_exit(void)
+{
+ platform_driver_unregister(&gpio_regulator_driver);
+}
+module_exit(gpio_regulator_exit);
+
+MODULE_AUTHOR("Heiko Stuebner <heiko@sntech.de>");
+MODULE_DESCRIPTION("gpio voltage regulator");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:gpio-regulator");
diff --git a/drivers/regulator/lp3971.c b/drivers/regulator/lp3971.c
index 0f22ef12601..72b16b5f3db 100644
--- a/drivers/regulator/lp3971.c
+++ b/drivers/regulator/lp3971.c
@@ -16,6 +16,7 @@
#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/lp3971.h>
#include <linux/slab.h>
diff --git a/drivers/regulator/lp3972.c b/drivers/regulator/lp3972.c
index 6aa1b506fb5..fbc5e3741be 100644
--- a/drivers/regulator/lp3972.c
+++ b/drivers/regulator/lp3972.c
@@ -12,6 +12,7 @@
#include <linux/bug.h>
#include <linux/err.h>
#include <linux/i2c.h>
+#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/lp3972.h>
diff --git a/drivers/regulator/max8649.c b/drivers/regulator/max8649.c
index 30eb9e54f7e..1062cf9f02d 100644
--- a/drivers/regulator/max8649.c
+++ b/drivers/regulator/max8649.c
@@ -221,7 +221,7 @@ static int max8649_enable_time(struct regulator_dev *rdev)
ret = (ret & MAX8649_RAMP_MASK) >> 5;
rate = (32 * 1000) >> ret; /* uV/uS */
- return (voltage / rate);
+ return DIV_ROUND_UP(voltage, rate);
}
static int max8649_set_mode(struct regulator_dev *rdev, unsigned int mode)
diff --git a/drivers/regulator/max8925-regulator.c b/drivers/regulator/max8925-regulator.c
index e4dbd667c04..cc9ec0e0327 100644
--- a/drivers/regulator/max8925-regulator.c
+++ b/drivers/regulator/max8925-regulator.c
@@ -9,6 +9,7 @@
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/init.h>
#include <linux/err.h>
#include <linux/i2c.h>
diff --git a/drivers/regulator/max8952.c b/drivers/regulator/max8952.c
index 486ed8141fc..3883d85c5b8 100644
--- a/drivers/regulator/max8952.c
+++ b/drivers/regulator/max8952.c
@@ -26,7 +26,6 @@
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/max8952.h>
-#include <linux/mutex.h>
#include <linux/gpio.h>
#include <linux/io.h>
#include <linux/slab.h>
@@ -47,7 +46,6 @@ enum {
struct max8952_data {
struct i2c_client *client;
struct device *dev;
- struct mutex mutex;
struct max8952_platform_data *pdata;
struct regulator_dev *rdev;
@@ -208,7 +206,6 @@ static int __devinit max8952_pmic_probe(struct i2c_client *client,
max8952->client = client;
max8952->dev = &client->dev;
max8952->pdata = pdata;
- mutex_init(&max8952->mutex);
max8952->rdev = regulator_register(&regulator, max8952->dev,
&pdata->reg_data, max8952);
diff --git a/drivers/regulator/max8997.c b/drivers/regulator/max8997.c
index ad6628ca94f..6176129a27e 100644
--- a/drivers/regulator/max8997.c
+++ b/drivers/regulator/max8997.c
@@ -26,6 +26,7 @@
#include <linux/err.h>
#include <linux/gpio.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
diff --git a/drivers/regulator/mc13783-regulator.c b/drivers/regulator/mc13783-regulator.c
index 730f43ad415..8479082e1ae 100644
--- a/drivers/regulator/mc13783-regulator.c
+++ b/drivers/regulator/mc13783-regulator.c
@@ -18,6 +18,7 @@
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/err.h>
+#include <linux/module.h>
#include "mc13xxx.h"
#define MC13783_REG_SWITCHERS5 29
@@ -336,9 +337,9 @@ static int __devinit mc13783_regulator_probe(struct platform_device *pdev)
{
struct mc13xxx_regulator_priv *priv;
struct mc13xxx *mc13783 = dev_get_drvdata(pdev->dev.parent);
- struct mc13783_regulator_platform_data *pdata =
+ struct mc13xxx_regulator_platform_data *pdata =
dev_get_platdata(&pdev->dev);
- struct mc13783_regulator_init_data *init_data;
+ struct mc13xxx_regulator_init_data *init_data;
int i, ret;
dev_dbg(&pdev->dev, "%s id %d\n", __func__, pdev->id);
@@ -381,7 +382,7 @@ err:
static int __devexit mc13783_regulator_remove(struct platform_device *pdev)
{
struct mc13xxx_regulator_priv *priv = platform_get_drvdata(pdev);
- struct mc13783_regulator_platform_data *pdata =
+ struct mc13xxx_regulator_platform_data *pdata =
dev_get_platdata(&pdev->dev);
int i;
diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
index 3285d41842f..023d17d022c 100644
--- a/drivers/regulator/mc13892-regulator.c
+++ b/drivers/regulator/mc13892-regulator.c
@@ -18,6 +18,7 @@
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/err.h>
+#include <linux/module.h>
#include "mc13xxx.h"
#define MC13892_REVISION 7
diff --git a/drivers/regulator/mc13xxx-regulator-core.c b/drivers/regulator/mc13xxx-regulator-core.c
index bc27ab13637..6532853a6ef 100644
--- a/drivers/regulator/mc13xxx-regulator-core.c
+++ b/drivers/regulator/mc13xxx-regulator-core.c
@@ -23,6 +23,7 @@
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/err.h>
+#include <linux/module.h>
#include "mc13xxx.h"
static int mc13xxx_regulator_enable(struct regulator_dev *rdev)
diff --git a/drivers/regulator/tps65023-regulator.c b/drivers/regulator/tps65023-regulator.c
index 701a5900f83..9fb4c7b8175 100644
--- a/drivers/regulator/tps65023-regulator.c
+++ b/drivers/regulator/tps65023-regulator.c
@@ -63,6 +63,13 @@
#define TPS65023_REG_CTRL_LDO2_EN BIT(2)
#define TPS65023_REG_CTRL_LDO1_EN BIT(1)
+/* REG_CTRL2 bitfields */
+#define TPS65023_REG_CTRL2_GO BIT(7)
+#define TPS65023_REG_CTRL2_CORE_ADJ BIT(6)
+#define TPS65023_REG_CTRL2_DCDC2 BIT(2)
+#define TPS65023_REG_CTRL2_DCDC1 BIT(1)
+#define TPS65023_REG_CTRL2_DCDC3 BIT(0)
+
/* LDO_CTRL bitfields */
#define TPS65023_LDO_CTRL_LDOx_SHIFT(ldo_id) ((ldo_id)*4)
#define TPS65023_LDO_CTRL_LDOx_MASK(ldo_id) (0xF0 >> ((ldo_id)*4))
@@ -85,7 +92,7 @@
#define TPS65023_MAX_REG_ID TPS65023_LDO_2
/* Supported voltage values for regulators */
-static const u16 VDCDC1_VSEL_table[] = {
+static const u16 VCORE_VSEL_table[] = {
800, 825, 850, 875,
900, 925, 950, 975,
1000, 1025, 1050, 1075,
@@ -96,20 +103,29 @@ static const u16 VDCDC1_VSEL_table[] = {
1500, 1525, 1550, 1600,
};
-static const u16 LDO1_VSEL_table[] = {
+/* Supported voltage values for LDO regulators for tps65020 */
+static const u16 TPS65020_LDO1_VSEL_table[] = {
+ 1000, 1050, 1100, 1300,
+ 1800, 2500, 3000, 3300,
+};
+
+static const u16 TPS65020_LDO2_VSEL_table[] = {
+ 1000, 1050, 1100, 1300,
+ 1800, 2500, 3000, 3300,
+};
+
+/* Supported voltage values for LDO regulators
+ * for tps65021 and tps65023 */
+static const u16 TPS65023_LDO1_VSEL_table[] = {
1000, 1100, 1300, 1800,
2200, 2600, 2800, 3150,
};
-static const u16 LDO2_VSEL_table[] = {
+static const u16 TPS65023_LDO2_VSEL_table[] = {
1050, 1200, 1300, 1800,
2500, 2800, 3000, 3300,
};
-static unsigned int num_voltages[] = {ARRAY_SIZE(VDCDC1_VSEL_table),
- 0, 0, ARRAY_SIZE(LDO1_VSEL_table),
- ARRAY_SIZE(LDO2_VSEL_table)};
-
/* Regulator specific details */
struct tps_info {
const char *name;
@@ -127,6 +143,13 @@ struct tps_pmic {
struct regulator_dev *rdev[TPS65023_NUM_REGULATOR];
const struct tps_info *info[TPS65023_NUM_REGULATOR];
struct regmap *regmap;
+ u8 core_regulator;
+};
+
+/* Struct passed as driver data */
+struct tps_driver_data {
+ const struct tps_info *info;
+ u8 core_regulator;
};
static int tps_65023_set_bits(struct tps_pmic *tps, u8 reg, u8 mask)
@@ -253,7 +276,7 @@ static int tps65023_dcdc_get_voltage(struct regulator_dev *dev)
if (dcdc < TPS65023_DCDC_1 || dcdc > TPS65023_DCDC_3)
return -EINVAL;
- if (dcdc == TPS65023_DCDC_1) {
+ if (dcdc == tps->core_regulator) {
data = tps_65023_reg_read(tps, TPS65023_REG_DEF_CORE);
if (data < 0)
return data;
@@ -270,10 +293,10 @@ static int tps65023_dcdc_set_voltage(struct regulator_dev *dev,
struct tps_pmic *tps = rdev_get_drvdata(dev);
int dcdc = rdev_get_id(dev);
int vsel;
+ int ret;
- if (dcdc != TPS65023_DCDC_1)
+ if (dcdc != tps->core_regulator)
return -EINVAL;
-
if (min_uV < tps->info[dcdc]->min_uV
|| min_uV > tps->info[dcdc]->max_uV)
return -EINVAL;
@@ -292,11 +315,21 @@ static int tps65023_dcdc_set_voltage(struct regulator_dev *dev,
*selector = vsel;
- /* write to the register in case we found a match */
if (vsel == tps->info[dcdc]->table_len)
- return -EINVAL;
- else
- return tps_65023_reg_write(tps, TPS65023_REG_DEF_CORE, vsel);
+ goto failed;
+
+ ret = tps_65023_reg_write(tps, TPS65023_REG_DEF_CORE, vsel);
+
+ /* Tell the chip that we have changed the value in DEFCORE
+ * and its time to update the core voltage
+ */
+ tps_65023_set_bits(tps, TPS65023_REG_CON_CTRL2,
+ TPS65023_REG_CTRL2_GO);
+
+ return ret;
+
+failed:
+ return -EINVAL;
}
static int tps65023_ldo_get_voltage(struct regulator_dev *dev)
@@ -362,7 +395,7 @@ static int tps65023_dcdc_list_voltage(struct regulator_dev *dev,
if (dcdc < TPS65023_DCDC_1 || dcdc > TPS65023_DCDC_3)
return -EINVAL;
- if (dcdc == TPS65023_DCDC_1) {
+ if (dcdc == tps->core_regulator) {
if (selector >= tps->info[dcdc]->table_len)
return -EINVAL;
else
@@ -414,7 +447,8 @@ static struct regmap_config tps65023_regmap_config = {
static int __devinit tps_65023_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- const struct tps_info *info = (void *)id->driver_data;
+ const struct tps_driver_data *drv_data = (void *)id->driver_data;
+ const struct tps_info *info = drv_data->info;
struct regulator_init_data *init_data;
struct regulator_dev *rdev;
struct tps_pmic *tps;
@@ -446,6 +480,7 @@ static int __devinit tps_65023_probe(struct i2c_client *client,
/* common for all regulators */
tps->client = client;
+ tps->core_regulator = drv_data->core_regulator;
for (i = 0; i < TPS65023_NUM_REGULATOR; i++, info++, init_data++) {
/* Store regulator specific information */
@@ -453,7 +488,7 @@ static int __devinit tps_65023_probe(struct i2c_client *client,
tps->desc[i].name = info->name;
tps->desc[i].id = i;
- tps->desc[i].n_voltages = num_voltages[i];
+ tps->desc[i].n_voltages = info->table_len;
tps->desc[i].ops = (i > TPS65023_DCDC_3 ?
&tps65023_ldo_ops : &tps65023_dcdc_ops);
tps->desc[i].type = REGULATOR_VOLTAGE;
@@ -475,6 +510,14 @@ static int __devinit tps_65023_probe(struct i2c_client *client,
i2c_set_clientdata(client, tps);
+ /* Enable setting output voltage by I2C */
+ tps_65023_clear_bits(tps, TPS65023_REG_CON_CTRL2,
+ TPS65023_REG_CTRL2_CORE_ADJ);
+
+ /* Enable setting output voltage by I2C */
+ tps_65023_clear_bits(tps, TPS65023_REG_CON_CTRL2,
+ TPS65023_REG_CTRL2_CORE_ADJ);
+
return 0;
fail:
@@ -507,13 +550,86 @@ static int __devexit tps_65023_remove(struct i2c_client *client)
return 0;
}
+static const struct tps_info tps65020_regs[] = {
+ {
+ .name = "VDCDC1",
+ .min_uV = 3300000,
+ .max_uV = 3300000,
+ .fixed = 1,
+ },
+ {
+ .name = "VDCDC2",
+ .min_uV = 1800000,
+ .max_uV = 1800000,
+ .fixed = 1,
+ },
+ {
+ .name = "VDCDC3",
+ .min_uV = 800000,
+ .max_uV = 1600000,
+ .table_len = ARRAY_SIZE(VCORE_VSEL_table),
+ .table = VCORE_VSEL_table,
+ },
+
+ {
+ .name = "LDO1",
+ .min_uV = 1000000,
+ .max_uV = 3150000,
+ .table_len = ARRAY_SIZE(TPS65020_LDO1_VSEL_table),
+ .table = TPS65020_LDO1_VSEL_table,
+ },
+ {
+ .name = "LDO2",
+ .min_uV = 1050000,
+ .max_uV = 3300000,
+ .table_len = ARRAY_SIZE(TPS65020_LDO2_VSEL_table),
+ .table = TPS65020_LDO2_VSEL_table,
+ },
+};
+
+static const struct tps_info tps65021_regs[] = {
+ {
+ .name = "VDCDC1",
+ .min_uV = 3300000,
+ .max_uV = 3300000,
+ .fixed = 1,
+ },
+ {
+ .name = "VDCDC2",
+ .min_uV = 1800000,
+ .max_uV = 1800000,
+ .fixed = 1,
+ },
+ {
+ .name = "VDCDC3",
+ .min_uV = 800000,
+ .max_uV = 1600000,
+ .table_len = ARRAY_SIZE(VCORE_VSEL_table),
+ .table = VCORE_VSEL_table,
+ },
+ {
+ .name = "LDO1",
+ .min_uV = 1000000,
+ .max_uV = 3150000,
+ .table_len = ARRAY_SIZE(TPS65023_LDO1_VSEL_table),
+ .table = TPS65023_LDO1_VSEL_table,
+ },
+ {
+ .name = "LDO2",
+ .min_uV = 1050000,
+ .max_uV = 3300000,
+ .table_len = ARRAY_SIZE(TPS65023_LDO2_VSEL_table),
+ .table = TPS65023_LDO2_VSEL_table,
+ },
+};
+
static const struct tps_info tps65023_regs[] = {
{
.name = "VDCDC1",
.min_uV = 800000,
.max_uV = 1600000,
- .table_len = ARRAY_SIZE(VDCDC1_VSEL_table),
- .table = VDCDC1_VSEL_table,
+ .table_len = ARRAY_SIZE(VCORE_VSEL_table),
+ .table = VCORE_VSEL_table,
},
{
.name = "VDCDC2",
@@ -531,23 +647,40 @@ static const struct tps_info tps65023_regs[] = {
.name = "LDO1",
.min_uV = 1000000,
.max_uV = 3150000,
- .table_len = ARRAY_SIZE(LDO1_VSEL_table),
- .table = LDO1_VSEL_table,
+ .table_len = ARRAY_SIZE(TPS65023_LDO1_VSEL_table),
+ .table = TPS65023_LDO1_VSEL_table,
},
{
.name = "LDO2",
.min_uV = 1050000,
.max_uV = 3300000,
- .table_len = ARRAY_SIZE(LDO2_VSEL_table),
- .table = LDO2_VSEL_table,
+ .table_len = ARRAY_SIZE(TPS65023_LDO2_VSEL_table),
+ .table = TPS65023_LDO2_VSEL_table,
},
};
+static struct tps_driver_data tps65020_drv_data = {
+ .info = tps65020_regs,
+ .core_regulator = TPS65023_DCDC_3,
+};
+
+static struct tps_driver_data tps65021_drv_data = {
+ .info = tps65021_regs,
+ .core_regulator = TPS65023_DCDC_3,
+};
+
+static struct tps_driver_data tps65023_drv_data = {
+ .info = tps65023_regs,
+ .core_regulator = TPS65023_DCDC_1,
+};
+
static const struct i2c_device_id tps_65023_id[] = {
{.name = "tps65023",
- .driver_data = (unsigned long) tps65023_regs,},
+ .driver_data = (unsigned long) &tps65023_drv_data},
{.name = "tps65021",
- .driver_data = (unsigned long) tps65023_regs,},
+ .driver_data = (unsigned long) &tps65021_drv_data,},
+ {.name = "tps65020",
+ .driver_data = (unsigned long) &tps65020_drv_data},
{ },
};
diff --git a/drivers/regulator/tps6507x-regulator.c b/drivers/regulator/tps6507x-regulator.c
index bfffabc21ed..bdef70365f5 100644
--- a/drivers/regulator/tps6507x-regulator.c
+++ b/drivers/regulator/tps6507x-regulator.c
@@ -90,12 +90,6 @@ static const u16 LDO2_VSEL_table[] = {
3000, 3100, 3200, 3300,
};
-static unsigned int num_voltages[] = {ARRAY_SIZE(VDCDCx_VSEL_table),
- ARRAY_SIZE(VDCDCx_VSEL_table),
- ARRAY_SIZE(VDCDCx_VSEL_table),
- ARRAY_SIZE(LDO1_VSEL_table),
- ARRAY_SIZE(LDO2_VSEL_table)};
-
struct tps_info {
const char *name;
unsigned min_uV;
@@ -598,7 +592,7 @@ int tps6507x_pmic_probe(struct platform_device *pdev)
tps->desc[i].name = info->name;
tps->desc[i].id = i;
- tps->desc[i].n_voltages = num_voltages[i];
+ tps->desc[i].n_voltages = info->table_len;
tps->desc[i].ops = (i > TPS6507X_DCDC_3 ?
&tps6507x_pmic_ldo_ops : &tps6507x_pmic_dcdc_ops);
tps->desc[i].type = REGULATOR_VOLTAGE;
diff --git a/drivers/regulator/tps6586x-regulator.c b/drivers/regulator/tps6586x-regulator.c
index bb04a75a4c9..14b9389dd52 100644
--- a/drivers/regulator/tps6586x-regulator.c
+++ b/drivers/regulator/tps6586x-regulator.c
@@ -14,6 +14,7 @@
*/
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/init.h>
#include <linux/err.h>
#include <linux/slab.h>
@@ -332,6 +333,36 @@ static inline int tps6586x_regulator_preinit(struct device *parent,
1 << ri->enable_bit[1]);
}
+static int tps6586x_regulator_set_slew_rate(struct platform_device *pdev)
+{
+ struct device *parent = pdev->dev.parent;
+ struct regulator_init_data *p = pdev->dev.platform_data;
+ struct tps6586x_settings *setting = p->driver_data;
+ uint8_t reg;
+
+ if (setting == NULL)
+ return 0;
+
+ if (!(setting->slew_rate & TPS6586X_SLEW_RATE_SET))
+ return 0;
+
+ /* only SM0 and SM1 can have the slew rate settings */
+ switch (pdev->id) {
+ case TPS6586X_ID_SM_0:
+ reg = TPS6586X_SM0SL;
+ break;
+ case TPS6586X_ID_SM_1:
+ reg = TPS6586X_SM1SL;
+ break;
+ default:
+ dev_warn(&pdev->dev, "Only SM0/SM1 can set slew rate\n");
+ return -EINVAL;
+ }
+
+ return tps6586x_write(parent, reg,
+ setting->slew_rate & TPS6586X_SLEW_RATE_MASK);
+}
+
static inline struct tps6586x_regulator *find_regulator_info(int id)
{
struct tps6586x_regulator *ri;
@@ -374,7 +405,7 @@ static int __devinit tps6586x_regulator_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, rdev);
- return 0;
+ return tps6586x_regulator_set_slew_rate(pdev);
}
static int __devexit tps6586x_regulator_remove(struct platform_device *pdev)
diff --git a/drivers/regulator/tps65912-regulator.c b/drivers/regulator/tps65912-regulator.c
index 3a9313e00fa..39d4a1749e7 100644
--- a/drivers/regulator/tps65912-regulator.c
+++ b/drivers/regulator/tps65912-regulator.c
@@ -43,8 +43,6 @@
#define TPS65912_REG_LDO9 12
#define TPS65912_REG_LDO10 13
-#define TPS65912_MAX_REG_ID TPS65912_REG_LDO_10
-
/* Number of step-down converters available */
#define TPS65912_NUM_DCDC 4
diff --git a/drivers/regulator/userspace-consumer.c b/drivers/regulator/userspace-consumer.c
index 9d5ba935759..fc665514699 100644
--- a/drivers/regulator/userspace-consumer.c
+++ b/drivers/regulator/userspace-consumer.c
@@ -18,6 +18,7 @@
#include <linux/err.h>
#include <linux/mutex.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/regulator/userspace-consumer.h>
diff --git a/drivers/regulator/virtual.c b/drivers/regulator/virtual.c
index 69e550f5763..858c1f861ba 100644
--- a/drivers/regulator/virtual.c
+++ b/drivers/regulator/virtual.c
@@ -16,6 +16,7 @@
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
+#include <linux/module.h>
struct virtual_consumer_data {
struct mutex lock;
diff --git a/drivers/regulator/wm8400-regulator.c b/drivers/regulator/wm8400-regulator.c
index 0f12c70bebc..71632ddc378 100644
--- a/drivers/regulator/wm8400-regulator.c
+++ b/drivers/regulator/wm8400-regulator.c
@@ -15,6 +15,7 @@
#include <linux/bug.h>
#include <linux/err.h>
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/regulator/driver.h>
#include <linux/mfd/wm8400-private.h>
diff --git a/drivers/regulator/wm8994-regulator.c b/drivers/regulator/wm8994-regulator.c
index 1a6a690f24d..b87bf5c841f 100644
--- a/drivers/regulator/wm8994-regulator.c
+++ b/drivers/regulator/wm8994-regulator.c
@@ -140,6 +140,14 @@ static int wm8994_ldo2_list_voltage(struct regulator_dev *rdev,
return (selector * 100000) + 900000;
case WM8958:
return (selector * 100000) + 1000000;
+ case WM1811:
+ switch (selector) {
+ case 0:
+ return -EINVAL;
+ default:
+ return (selector * 100000) + 950000;
+ }
+ break;
default:
return -EINVAL;
}
@@ -170,6 +178,11 @@ static int wm8994_ldo2_set_voltage(struct regulator_dev *rdev,
case WM8958:
selector = (min_uV - 1000000) / 100000;
break;
+ case WM1811:
+ selector = (min_uV - 950000) / 100000;
+ if (selector == 0)
+ selector = 1;
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 5a538fc1cc8..53eb4e55b28 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -8,7 +8,7 @@ config RTC_LIB
menuconfig RTC_CLASS
bool "Real Time Clock"
default n
- depends on !S390
+ depends on !S390 && !UML
select RTC_LIB
help
Generic RTC class support. If you say yes here, you will
diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c
index 01a7df5317c..e8326f26fa2 100644
--- a/drivers/rtc/class.c
+++ b/drivers/rtc/class.c
@@ -21,16 +21,13 @@
#include "rtc-core.h"
-static DEFINE_IDR(rtc_idr);
-static DEFINE_MUTEX(idr_lock);
+static DEFINE_IDA(rtc_ida);
struct class *rtc_class;
static void rtc_device_release(struct device *dev)
{
struct rtc_device *rtc = to_rtc_device(dev);
- mutex_lock(&idr_lock);
- idr_remove(&rtc_idr, rtc->id);
- mutex_unlock(&idr_lock);
+ ida_simple_remove(&rtc_ida, rtc->id);
kfree(rtc);
}
@@ -146,25 +143,16 @@ struct rtc_device *rtc_device_register(const char *name, struct device *dev,
struct rtc_wkalrm alrm;
int id, err;
- if (idr_pre_get(&rtc_idr, GFP_KERNEL) == 0) {
- err = -ENOMEM;
+ id = ida_simple_get(&rtc_ida, 0, 0, GFP_KERNEL);
+ if (id < 0) {
+ err = id;
goto exit;
}
-
- mutex_lock(&idr_lock);
- err = idr_get_new(&rtc_idr, NULL, &id);
- mutex_unlock(&idr_lock);
-
- if (err < 0)
- goto exit;
-
- id = id & MAX_ID_MASK;
-
rtc = kzalloc(sizeof(struct rtc_device), GFP_KERNEL);
if (rtc == NULL) {
err = -ENOMEM;
- goto exit_idr;
+ goto exit_ida;
}
rtc->id = id;
@@ -222,10 +210,8 @@ struct rtc_device *rtc_device_register(const char *name, struct device *dev,
exit_kfree:
kfree(rtc);
-exit_idr:
- mutex_lock(&idr_lock);
- idr_remove(&rtc_idr, id);
- mutex_unlock(&idr_lock);
+exit_ida:
+ ida_simple_remove(&rtc_ida, id);
exit:
dev_err(dev, "rtc core: unable to register %s, err = %d\n",
@@ -276,7 +262,7 @@ static void __exit rtc_exit(void)
{
rtc_dev_exit();
class_destroy(rtc_class);
- idr_destroy(&rtc_idr);
+ ida_destroy(&rtc_ida);
}
subsys_initcall(rtc_init);
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index 44e91e598f8..8e286259a00 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -13,6 +13,7 @@
#include <linux/rtc.h>
#include <linux/sched.h>
+#include <linux/module.h>
#include <linux/log2.h>
#include <linux/workqueue.h>
diff --git a/drivers/rtc/rtc-dm355evm.c b/drivers/rtc/rtc-dm355evm.c
index 58d4e18530d..2322c43af20 100644
--- a/drivers/rtc/rtc-dm355evm.c
+++ b/drivers/rtc/rtc-dm355evm.c
@@ -14,6 +14,7 @@
#include <linux/platform_device.h>
#include <linux/i2c/dm355evm_msp.h>
+#include <linux/module.h>
/*
diff --git a/drivers/rtc/rtc-ds1305.c b/drivers/rtc/rtc-ds1305.c
index 57fbcc149ba..3a33b1fdbe0 100644
--- a/drivers/rtc/rtc-ds1305.c
+++ b/drivers/rtc/rtc-ds1305.c
@@ -17,6 +17,7 @@
#include <linux/spi/spi.h>
#include <linux/spi/ds1305.h>
+#include <linux/module.h>
/*
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index b2005b44e4f..62b0763b7b9 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -34,6 +34,7 @@ enum ds_type {
ds_1388,
ds_3231,
m41t00,
+ mcp7941x,
rx_8025,
// rs5c372 too? different address...
};
@@ -43,6 +44,7 @@ enum ds_type {
#define DS1307_REG_SECS 0x00 /* 00-59 */
# define DS1307_BIT_CH 0x80
# define DS1340_BIT_nEOSC 0x80
+# define MCP7941X_BIT_ST 0x80
#define DS1307_REG_MIN 0x01 /* 00-59 */
#define DS1307_REG_HOUR 0x02 /* 00-23, or 1-12{am,pm} */
# define DS1307_BIT_12HR 0x40 /* in REG_HOUR */
@@ -50,6 +52,7 @@ enum ds_type {
# define DS1340_BIT_CENTURY_EN 0x80 /* in REG_HOUR */
# define DS1340_BIT_CENTURY 0x40 /* in REG_HOUR */
#define DS1307_REG_WDAY 0x03 /* 01-07 */
+# define MCP7941X_BIT_VBATEN 0x08
#define DS1307_REG_MDAY 0x04 /* 01-31 */
#define DS1307_REG_MONTH 0x05 /* 01-12 */
# define DS1337_BIT_CENTURY 0x80 /* in REG_MONTH */
@@ -137,6 +140,8 @@ static const struct chip_desc chips[] = {
},
[m41t00] = {
},
+[mcp7941x] = {
+},
[rx_8025] = {
}, };
@@ -149,6 +154,7 @@ static const struct i2c_device_id ds1307_id[] = {
{ "ds1340", ds_1340 },
{ "ds3231", ds_3231 },
{ "m41t00", m41t00 },
+ { "mcp7941x", mcp7941x },
{ "pt7c4338", ds_1307 },
{ "rx8025", rx_8025 },
{ }
@@ -365,6 +371,10 @@ static int ds1307_set_time(struct device *dev, struct rtc_time *t)
buf[DS1307_REG_HOUR] |= DS1340_BIT_CENTURY_EN
| DS1340_BIT_CENTURY;
break;
+ case mcp7941x:
+ buf[DS1307_REG_SECS] |= MCP7941X_BIT_ST;
+ buf[DS1307_REG_WDAY] |= MCP7941X_BIT_VBATEN;
+ break;
default:
break;
}
@@ -809,6 +819,23 @@ read_rtc:
dev_warn(&client->dev, "SET TIME!\n");
}
break;
+ case mcp7941x:
+ /* make sure that the backup battery is enabled */
+ if (!(ds1307->regs[DS1307_REG_WDAY] & MCP7941X_BIT_VBATEN)) {
+ i2c_smbus_write_byte_data(client, DS1307_REG_WDAY,
+ ds1307->regs[DS1307_REG_WDAY]
+ | MCP7941X_BIT_VBATEN);
+ }
+
+ /* clock halted? turn it on, so clock can tick. */
+ if (!(tmp & MCP7941X_BIT_ST)) {
+ i2c_smbus_write_byte_data(client, DS1307_REG_SECS,
+ MCP7941X_BIT_ST);
+ dev_warn(&client->dev, "SET TIME!\n");
+ goto read_rtc;
+ }
+
+ break;
case rx_8025:
case ds_1337:
case ds_1339:
diff --git a/drivers/rtc/rtc-ds1511.c b/drivers/rtc/rtc-ds1511.c
index 568ad30617e..586c244a05d 100644
--- a/drivers/rtc/rtc-ds1511.c
+++ b/drivers/rtc/rtc-ds1511.c
@@ -23,6 +23,7 @@
#include <linux/rtc.h>
#include <linux/platform_device.h>
#include <linux/io.h>
+#include <linux/module.h>
#define DRV_VERSION "0.6"
diff --git a/drivers/rtc/rtc-ds1553.c b/drivers/rtc/rtc-ds1553.c
index fee41b97c9e..1350029044e 100644
--- a/drivers/rtc/rtc-ds1553.c
+++ b/drivers/rtc/rtc-ds1553.c
@@ -18,6 +18,7 @@
#include <linux/rtc.h>
#include <linux/platform_device.h>
#include <linux/io.h>
+#include <linux/module.h>
#define DRV_VERSION "0.3"
diff --git a/drivers/rtc/rtc-ds1672.c b/drivers/rtc/rtc-ds1672.c
index 06dfb54f99b..a319402a544 100644
--- a/drivers/rtc/rtc-ds1672.c
+++ b/drivers/rtc/rtc-ds1672.c
@@ -11,6 +11,7 @@
#include <linux/i2c.h>
#include <linux/rtc.h>
+#include <linux/module.h>
#define DRV_VERSION "0.4"
diff --git a/drivers/rtc/rtc-ds1742.c b/drivers/rtc/rtc-ds1742.c
index d84a448dd75..e3e0f92b60f 100644
--- a/drivers/rtc/rtc-ds1742.c
+++ b/drivers/rtc/rtc-ds1742.c
@@ -21,6 +21,7 @@
#include <linux/rtc.h>
#include <linux/platform_device.h>
#include <linux/io.h>
+#include <linux/module.h>
#define DRV_VERSION "0.4"
diff --git a/drivers/rtc/rtc-em3027.c b/drivers/rtc/rtc-em3027.c
index d8e1c257855..8414dea5fb1 100644
--- a/drivers/rtc/rtc-em3027.c
+++ b/drivers/rtc/rtc-em3027.c
@@ -14,6 +14,7 @@
#include <linux/i2c.h>
#include <linux/rtc.h>
#include <linux/bcd.h>
+#include <linux/module.h>
/* Registers */
#define EM3027_REG_ON_OFF_CTRL 0x00
diff --git a/drivers/rtc/rtc-isl12022.c b/drivers/rtc/rtc-isl12022.c
index ddbc797ea6c..6186833973e 100644
--- a/drivers/rtc/rtc-isl12022.c
+++ b/drivers/rtc/rtc-isl12022.c
@@ -15,6 +15,7 @@
#include <linux/bcd.h>
#include <linux/rtc.h>
#include <linux/slab.h>
+#include <linux/module.h>
#define DRV_VERSION "0.1"
diff --git a/drivers/rtc/rtc-mc13xxx.c b/drivers/rtc/rtc-mc13xxx.c
index a1a278bc340..9d0c3b478d5 100644
--- a/drivers/rtc/rtc-mc13xxx.c
+++ b/drivers/rtc/rtc-mc13xxx.c
@@ -309,7 +309,7 @@ static irqreturn_t mc13xxx_rtc_reset_handler(int irq, void *dev)
return IRQ_HANDLED;
}
-static int __devinit mc13xxx_rtc_probe(struct platform_device *pdev)
+static int __init mc13xxx_rtc_probe(struct platform_device *pdev)
{
int ret;
struct mc13xxx_rtc *priv;
@@ -378,7 +378,7 @@ err_reset_irq_request:
return ret;
}
-static int __devexit mc13xxx_rtc_remove(struct platform_device *pdev)
+static int __exit mc13xxx_rtc_remove(struct platform_device *pdev)
{
struct mc13xxx_rtc *priv = platform_get_drvdata(pdev);
@@ -410,7 +410,7 @@ const struct platform_device_id mc13xxx_rtc_idtable[] = {
static struct platform_driver mc13xxx_rtc_driver = {
.id_table = mc13xxx_rtc_idtable,
- .remove = __devexit_p(mc13xxx_rtc_remove),
+ .remove = __exit_p(mc13xxx_rtc_remove),
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/rtc/rtc-mrst.c b/drivers/rtc/rtc-mrst.c
index d33544802a2..bb21f443fb7 100644
--- a/drivers/rtc/rtc-mrst.c
+++ b/drivers/rtc/rtc-mrst.c
@@ -76,12 +76,15 @@ static inline unsigned char vrtc_is_updating(void)
/*
* rtc_time's year contains the increment over 1900, but vRTC's YEAR
* register can't be programmed to value larger than 0x64, so vRTC
- * driver chose to use 1960 (1970 is UNIX time start point) as the base,
+ * driver chose to use 1972 (1970 is UNIX time start point) as the base,
* and does the translation at read/write time.
*
- * Why not just use 1970 as the offset? it's because using 1960 will
+ * Why not just use 1970 as the offset? it's because using 1972 will
* make it consistent in leap year setting for both vrtc and low-level
- * physical rtc devices.
+ * physical rtc devices. Then why not use 1960 as the offset? If we use
+ * 1960, for a device's first use, its YEAR register is 0 and the system
+ * year will be parsed as 1960 which is not a valid UNIX time and will
+ * cause many applications to fail mysteriously.
*/
static int mrst_read_time(struct device *dev, struct rtc_time *time)
{
@@ -99,10 +102,10 @@ static int mrst_read_time(struct device *dev, struct rtc_time *time)
time->tm_year = vrtc_cmos_read(RTC_YEAR);
spin_unlock_irqrestore(&rtc_lock, flags);
- /* Adjust for the 1960/1900 */
- time->tm_year += 60;
+ /* Adjust for the 1972/1900 */
+ time->tm_year += 72;
time->tm_mon--;
- return RTC_24H;
+ return rtc_valid_tm(time);
}
static int mrst_set_time(struct device *dev, struct rtc_time *time)
@@ -119,9 +122,9 @@ static int mrst_set_time(struct device *dev, struct rtc_time *time)
min = time->tm_min;
sec = time->tm_sec;
- if (yrs < 70 || yrs > 138)
+ if (yrs < 72 || yrs > 138)
return -EINVAL;
- yrs -= 60;
+ yrs -= 72;
spin_lock_irqsave(&rtc_lock, flags);
diff --git a/drivers/rtc/rtc-mv.c b/drivers/rtc/rtc-mv.c
index 60627a76451..768e2edb967 100644
--- a/drivers/rtc/rtc-mv.c
+++ b/drivers/rtc/rtc-mv.c
@@ -14,6 +14,7 @@
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/gfp.h>
+#include <linux/module.h>
#define RTC_TIME_REG_OFFS 0
diff --git a/drivers/rtc/rtc-pcf2123.c b/drivers/rtc/rtc-pcf2123.c
index 71bab0ef544..2ee3bbf7e5e 100644
--- a/drivers/rtc/rtc-pcf2123.c
+++ b/drivers/rtc/rtc-pcf2123.c
@@ -42,6 +42,7 @@
#include <linux/slab.h>
#include <linux/rtc.h>
#include <linux/spi/spi.h>
+#include <linux/module.h>
#define DRV_VERSION "0.6"
diff --git a/drivers/rtc/rtc-pcf8563.c b/drivers/rtc/rtc-pcf8563.c
index b42c0c67926..606fdfab34e 100644
--- a/drivers/rtc/rtc-pcf8563.c
+++ b/drivers/rtc/rtc-pcf8563.c
@@ -18,6 +18,7 @@
#include <linux/bcd.h>
#include <linux/rtc.h>
#include <linux/slab.h>
+#include <linux/module.h>
#define DRV_VERSION "0.4.3"
diff --git a/drivers/rtc/rtc-rs5c348.c b/drivers/rtc/rtc-rs5c348.c
index 368d0e63cf8..971bc8e08da 100644
--- a/drivers/rtc/rtc-rs5c348.c
+++ b/drivers/rtc/rtc-rs5c348.c
@@ -23,6 +23,7 @@
#include <linux/rtc.h>
#include <linux/workqueue.h>
#include <linux/spi/spi.h>
+#include <linux/module.h>
#define DRV_VERSION "0.2"
diff --git a/drivers/rtc/rtc-rs5c372.c b/drivers/rtc/rtc-rs5c372.c
index 85c1b848dd7..d29f5432c6e 100644
--- a/drivers/rtc/rtc-rs5c372.c
+++ b/drivers/rtc/rtc-rs5c372.c
@@ -14,6 +14,7 @@
#include <linux/rtc.h>
#include <linux/bcd.h>
#include <linux/slab.h>
+#include <linux/module.h>
#define DRV_VERSION "0.6"
diff --git a/drivers/rtc/rtc-stk17ta8.c b/drivers/rtc/rtc-stk17ta8.c
index 3b943673cd3..ed3e9b59903 100644
--- a/drivers/rtc/rtc-stk17ta8.c
+++ b/drivers/rtc/rtc-stk17ta8.c
@@ -21,6 +21,7 @@
#include <linux/rtc.h>
#include <linux/platform_device.h>
#include <linux/io.h>
+#include <linux/module.h>
#define DRV_VERSION "0.1"
diff --git a/drivers/rtc/rtc-tx4939.c b/drivers/rtc/rtc-tx4939.c
index ec6313d1535..aac0ffed434 100644
--- a/drivers/rtc/rtc-tx4939.c
+++ b/drivers/rtc/rtc-tx4939.c
@@ -11,6 +11,7 @@
#include <linux/rtc.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
+#include <linux/module.h>
#include <linux/io.h>
#include <linux/gfp.h>
#include <asm/txx9/tx4939.h>
diff --git a/drivers/rtc/rtc-x1205.c b/drivers/rtc/rtc-x1205.c
index b00aad2620d..8c051d3179d 100644
--- a/drivers/rtc/rtc-x1205.c
+++ b/drivers/rtc/rtc-x1205.c
@@ -21,6 +21,7 @@
#include <linux/bcd.h>
#include <linux/rtc.h>
#include <linux/delay.h>
+#include <linux/module.h>
#define DRV_VERSION "1.0.8"
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index a1d3ddba99c..65894f05a80 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -11,7 +11,6 @@
#define KMSG_COMPONENT "dasd"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
-#include <linux/kernel_stat.h>
#include <linux/kmod.h>
#include <linux/init.h>
#include <linux/interrupt.h>
@@ -1594,7 +1593,6 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
unsigned long long now;
int expires;
- kstat_cpu(smp_processor_id()).irqs[IOINT_DAS]++;
if (IS_ERR(irb)) {
switch (PTR_ERR(irb)) {
case -EIO:
@@ -2061,13 +2059,14 @@ void dasd_add_request_tail(struct dasd_ccw_req *cqr)
/*
* Wakeup helper for the 'sleep_on' functions.
*/
-static void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data)
+void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data)
{
spin_lock_irq(get_ccwdev_lock(cqr->startdev->cdev));
cqr->callback_data = DASD_SLEEPON_END_TAG;
spin_unlock_irq(get_ccwdev_lock(cqr->startdev->cdev));
wake_up(&generic_waitq);
}
+EXPORT_SYMBOL_GPL(dasd_wakeup_cb);
static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr)
{
@@ -2167,7 +2166,9 @@ static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible)
} else
wait_event(generic_waitq, !(device->stopped));
- cqr->callback = dasd_wakeup_cb;
+ if (!cqr->callback)
+ cqr->callback = dasd_wakeup_cb;
+
cqr->callback_data = DASD_SLEEPON_START_TAG;
dasd_add_request_tail(cqr);
if (interruptible) {
@@ -2263,7 +2264,11 @@ int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr)
cqr->callback = dasd_wakeup_cb;
cqr->callback_data = DASD_SLEEPON_START_TAG;
cqr->status = DASD_CQR_QUEUED;
- list_add(&cqr->devlist, &device->ccw_queue);
+ /*
+ * add new request as second
+ * first the terminated cqr needs to be finished
+ */
+ list_add(&cqr->devlist, device->ccw_queue.next);
/* let the bh start the request to keep them in order */
dasd_schedule_device_bh(device);
@@ -3284,6 +3289,9 @@ int dasd_generic_pm_freeze(struct ccw_device *cdev)
if (IS_ERR(device))
return PTR_ERR(device);
+ /* mark device as suspended */
+ set_bit(DASD_FLAG_SUSPENDED, &device->flags);
+
if (device->discipline->freeze)
rc = device->discipline->freeze(device);
@@ -3358,6 +3366,7 @@ int dasd_generic_restore_device(struct ccw_device *cdev)
if (device->block)
dasd_schedule_block_bh(device->block);
+ clear_bit(DASD_FLAG_SUSPENDED, &device->flags);
dasd_put_device(device);
return 0;
}
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 6e835c9fdfc..6ab29680586 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -844,6 +844,30 @@ static void dasd_eckd_fill_rcd_cqr(struct dasd_device *device,
set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags);
}
+/*
+ * Wakeup helper for read_conf
+ * if the cqr is not done and needs some error recovery
+ * the buffer has to be re-initialized with the EBCDIC "V1.0"
+ * to show support for virtual device SNEQ
+ */
+static void read_conf_cb(struct dasd_ccw_req *cqr, void *data)
+{
+ struct ccw1 *ccw;
+ __u8 *rcd_buffer;
+
+ if (cqr->status != DASD_CQR_DONE) {
+ ccw = cqr->cpaddr;
+ rcd_buffer = (__u8 *)((addr_t) ccw->cda);
+ memset(rcd_buffer, 0, sizeof(*rcd_buffer));
+
+ rcd_buffer[0] = 0xE5;
+ rcd_buffer[1] = 0xF1;
+ rcd_buffer[2] = 0x4B;
+ rcd_buffer[3] = 0xF0;
+ }
+ dasd_wakeup_cb(cqr, data);
+}
+
static int dasd_eckd_read_conf_immediately(struct dasd_device *device,
struct dasd_ccw_req *cqr,
__u8 *rcd_buffer,
@@ -863,6 +887,7 @@ static int dasd_eckd_read_conf_immediately(struct dasd_device *device,
clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags);
cqr->retries = 5;
+ cqr->callback = read_conf_cb;
rc = dasd_sleep_on_immediatly(cqr);
return rc;
}
@@ -900,6 +925,7 @@ static int dasd_eckd_read_conf_lpm(struct dasd_device *device,
goto out_error;
}
dasd_eckd_fill_rcd_cqr(device, cqr, rcd_buf, lpm);
+ cqr->callback = read_conf_cb;
ret = dasd_sleep_on(cqr);
/*
* on success we update the user input parms
@@ -1075,6 +1101,12 @@ static void do_path_verification_work(struct work_struct *work)
data = container_of(work, struct path_verification_work_data, worker);
device = data->device;
+ /* delay path verification until device was resumed */
+ if (test_bit(DASD_FLAG_SUSPENDED, &device->flags)) {
+ schedule_work(work);
+ return;
+ }
+
opm = 0;
npm = 0;
ppm = 0;
@@ -2021,9 +2053,13 @@ static void dasd_eckd_check_for_device_change(struct dasd_device *device,
/* first of all check for state change pending interrupt */
mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP;
if ((scsw_dstat(&irb->scsw) & mask) == mask) {
- /* for alias only and not in offline processing*/
+ /*
+ * for alias only, not in offline processing
+ * and only if not suspended
+ */
if (!device->block && private->lcu &&
- !test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
+ !test_bit(DASD_FLAG_OFFLINE, &device->flags) &&
+ !test_bit(DASD_FLAG_SUSPENDED, &device->flags)) {
/*
* the state change could be caused by an alias
* reassignment remove device from alias handling
@@ -2350,7 +2386,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
new_track = 1;
end_idaw = 0;
len_to_track_end = 0;
- idaw_dst = 0;
+ idaw_dst = NULL;
idaw_len = 0;
rq_for_each_segment(bv, req, iter) {
dst = page_address(bv->bv_page) + bv->bv_offset;
@@ -2412,7 +2448,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
if (end_idaw) {
idaws = idal_create_words(idaws, idaw_dst,
idaw_len);
- idaw_dst = 0;
+ idaw_dst = NULL;
idaw_len = 0;
end_idaw = 0;
}
@@ -3998,6 +4034,7 @@ static struct ccw_driver dasd_eckd_driver = {
.thaw = dasd_generic_restore_device,
.restore = dasd_generic_restore_device,
.uc_handler = dasd_generic_uc_handler,
+ .int_class = IOINT_DAS,
};
/*
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index 4b71b116486..a62a75358eb 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -79,6 +79,7 @@ static struct ccw_driver dasd_fba_driver = {
.freeze = dasd_generic_pm_freeze,
.thaw = dasd_generic_restore_device,
.restore = dasd_generic_restore_device,
+ .int_class = IOINT_DAS,
};
static void
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 1dd12bd85a6..afe8c33422e 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -516,6 +516,7 @@ struct dasd_block {
*/
#define DASD_FLAG_IS_RESERVED 7 /* The device is reserved */
#define DASD_FLAG_LOCK_STOLEN 8 /* The device lock was stolen */
+#define DASD_FLAG_SUSPENDED 9 /* The device was suspended */
void dasd_put_device_wake(struct dasd_device *);
@@ -643,6 +644,7 @@ struct dasd_ccw_req *
dasd_smalloc_request(int , int, int, struct dasd_device *);
void dasd_kfree_request(struct dasd_ccw_req *, struct dasd_device *);
void dasd_sfree_request(struct dasd_ccw_req *, struct dasd_device *);
+void dasd_wakeup_cb(struct dasd_ccw_req *, void *);
static inline int
dasd_kmalloc_set_cda(struct ccw1 *ccw, void *cda, struct dasd_device *device)
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 9b43ae94beb..a5a55da2a1a 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -27,7 +27,7 @@
static int dcssblk_open(struct block_device *bdev, fmode_t mode);
static int dcssblk_release(struct gendisk *disk, fmode_t mode);
-static int dcssblk_make_request(struct request_queue *q, struct bio *bio);
+static void dcssblk_make_request(struct request_queue *q, struct bio *bio);
static int dcssblk_direct_access(struct block_device *bdev, sector_t secnum,
void **kaddr, unsigned long *pfn);
@@ -814,7 +814,7 @@ out:
return rc;
}
-static int
+static void
dcssblk_make_request(struct request_queue *q, struct bio *bio)
{
struct dcssblk_dev_info *dev_info;
@@ -871,10 +871,9 @@ dcssblk_make_request(struct request_queue *q, struct bio *bio)
bytes_done += bvec->bv_len;
}
bio_endio(bio, 0);
- return 0;
+ return;
fail:
bio_io_error(bio);
- return 0;
}
static int
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index 1f6a4d894e7..98f3e4ade92 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -181,7 +181,7 @@ static unsigned long xpram_highest_page_index(void)
/*
* Block device make request function.
*/
-static int xpram_make_request(struct request_queue *q, struct bio *bio)
+static void xpram_make_request(struct request_queue *q, struct bio *bio)
{
xpram_device_t *xdev = bio->bi_bdev->bd_disk->private_data;
struct bio_vec *bvec;
@@ -221,10 +221,9 @@ static int xpram_make_request(struct request_queue *q, struct bio *bio)
}
set_bit(BIO_UPTODATE, &bio->bi_flags);
bio_endio(bio, 0);
- return 0;
+ return;
fail:
bio_io_error(bio);
- return 0;
}
static int xpram_getgeo(struct block_device *bdev, struct hd_geometry *geo)
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index 694464c65fc..934458ad55e 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -9,7 +9,6 @@
* Dan Morrison, IBM Corporation <dmorriso@cse.buffalo.edu>
*/
-#include <linux/kernel_stat.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kdev_t.h>
@@ -362,7 +361,6 @@ static void raw3215_irq(struct ccw_device *cdev, unsigned long intparm,
int cstat, dstat;
int count;
- kstat_cpu(smp_processor_id()).irqs[IOINT_C15]++;
raw = dev_get_drvdata(&cdev->dev);
req = (struct raw3215_req *) intparm;
cstat = irb->scsw.cmd.cstat;
@@ -776,6 +774,7 @@ static struct ccw_driver raw3215_ccw_driver = {
.freeze = &raw3215_pm_stop,
.thaw = &raw3215_pm_start,
.restore = &raw3215_pm_start,
+ .int_class = IOINT_C15,
};
#ifdef CONFIG_TN3215_CONSOLE
diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c
index f6489eb7e97..e71298158f9 100644
--- a/drivers/s390/char/fs3270.c
+++ b/drivers/s390/char/fs3270.c
@@ -11,6 +11,7 @@
#include <linux/console.h>
#include <linux/init.h>
#include <linux/interrupt.h>
+#include <linux/module.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/types.h>
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index 810ac38631c..e5cb9248a44 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -7,7 +7,6 @@
* Copyright IBM Corp. 2003, 2009
*/
-#include <linux/kernel_stat.h>
#include <linux/module.h>
#include <linux/err.h>
#include <linux/init.h>
@@ -330,7 +329,6 @@ raw3270_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
struct raw3270_request *rq;
int rc;
- kstat_cpu(smp_processor_id()).irqs[IOINT_C70]++;
rp = dev_get_drvdata(&cdev->dev);
if (!rp)
return;
@@ -1398,6 +1396,7 @@ static struct ccw_driver raw3270_ccw_driver = {
.freeze = &raw3270_pm_stop,
.thaw = &raw3270_pm_start,
.restore = &raw3270_pm_start,
+ .int_class = IOINT_C70,
};
static int
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
index 837e010299a..0b54a91f8dc 100644
--- a/drivers/s390/char/sclp_cmd.c
+++ b/drivers/s390/char/sclp_cmd.c
@@ -61,8 +61,8 @@ static int __init sclp_cmd_sync_early(sclp_cmdw_t cmd, void *sccb)
rc = sclp_service_call(cmd, sccb);
if (rc)
goto out;
- __load_psw_mask(PSW_BASE_BITS | PSW_MASK_EXT |
- PSW_MASK_WAIT | PSW_DEFAULT_KEY);
+ __load_psw_mask(PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_EA |
+ PSW_MASK_BA | PSW_MASK_EXT | PSW_MASK_WAIT);
local_irq_disable();
out:
/* Contents of the sccb might have changed. */
diff --git a/drivers/s390/char/sclp_cpi_sys.c b/drivers/s390/char/sclp_cpi_sys.c
index 4a51e3f0968..bd1b9c91905 100644
--- a/drivers/s390/char/sclp_cpi_sys.c
+++ b/drivers/s390/char/sclp_cpi_sys.c
@@ -21,6 +21,7 @@
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/completion.h>
+#include <linux/export.h>
#include <asm/ebcdic.h>
#include <asm/sclp.h>
diff --git a/drivers/s390/char/sclp_quiesce.c b/drivers/s390/char/sclp_quiesce.c
index a90a02c28d6..87fc0ac11e6 100644
--- a/drivers/s390/char/sclp_quiesce.c
+++ b/drivers/s390/char/sclp_quiesce.c
@@ -30,7 +30,8 @@ static void do_machine_quiesce(void)
psw_t quiesce_psw;
smp_send_stop();
- quiesce_psw.mask = PSW_BASE_BITS | PSW_MASK_WAIT;
+ quiesce_psw.mask =
+ PSW_MASK_BASE | PSW_MASK_EA | PSW_MASK_BA | PSW_MASK_WAIT;
quiesce_psw.addr = 0xfff;
__load_psw(quiesce_psw);
}
diff --git a/drivers/s390/char/tape_34xx.c b/drivers/s390/char/tape_34xx.c
index 9eff2df70dd..934ef33eb9a 100644
--- a/drivers/s390/char/tape_34xx.c
+++ b/drivers/s390/char/tape_34xx.c
@@ -1330,6 +1330,7 @@ static struct ccw_driver tape_34xx_driver = {
.set_online = tape_34xx_online,
.set_offline = tape_generic_offline,
.freeze = tape_generic_pm_suspend,
+ .int_class = IOINT_TAP,
};
static int
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c
index a7d57072888..49c6aab7ad7 100644
--- a/drivers/s390/char/tape_3590.c
+++ b/drivers/s390/char/tape_3590.c
@@ -1762,6 +1762,7 @@ static struct ccw_driver tape_3590_driver = {
.set_offline = tape_generic_offline,
.set_online = tape_3590_online,
.freeze = tape_generic_pm_suspend,
+ .int_class = IOINT_TAP,
};
/*
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c
index 7978a0adeaf..b3a3e8e8656 100644
--- a/drivers/s390/char/tape_core.c
+++ b/drivers/s390/char/tape_core.c
@@ -14,7 +14,6 @@
#define KMSG_COMPONENT "tape"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
-#include <linux/kernel_stat.h>
#include <linux/module.h>
#include <linux/init.h> // for kernel parameters
#include <linux/kmod.h> // for requesting modules
@@ -1115,7 +1114,6 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
struct tape_request *request;
int rc;
- kstat_cpu(smp_processor_id()).irqs[IOINT_TAP]++;
device = dev_get_drvdata(&cdev->dev);
if (device == NULL) {
return;
diff --git a/drivers/s390/char/vmcp.c b/drivers/s390/char/vmcp.c
index 31a3ccbb649..75bde6a8b7d 100644
--- a/drivers/s390/char/vmcp.c
+++ b/drivers/s390/char/vmcp.c
@@ -16,6 +16,7 @@
#include <linux/kernel.h>
#include <linux/miscdevice.h>
#include <linux/slab.h>
+#include <linux/export.h>
#include <asm/compat.h>
#include <asm/cpcmd.h>
#include <asm/debug.h>
diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c
index f6b00c3df42..85f4a9a5d12 100644
--- a/drivers/s390/char/vmur.c
+++ b/drivers/s390/char/vmur.c
@@ -11,9 +11,9 @@
#define KMSG_COMPONENT "vmur"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
-#include <linux/kernel_stat.h>
#include <linux/cdev.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include <asm/uaccess.h>
#include <asm/cio.h>
@@ -74,6 +74,7 @@ static struct ccw_driver ur_driver = {
.set_online = ur_set_online,
.set_offline = ur_set_offline,
.freeze = ur_pm_suspend,
+ .int_class = IOINT_VMR,
};
static DEFINE_MUTEX(vmur_mutex);
@@ -305,7 +306,6 @@ static void ur_int_handler(struct ccw_device *cdev, unsigned long intparm,
{
struct urdev *urd;
- kstat_cpu(smp_processor_id()).irqs[IOINT_VMR]++;
TRACE("ur_int_handler: intparm=0x%lx cstat=%02x dstat=%02x res=%u\n",
intparm, irb->scsw.cmd.cstat, irb->scsw.cmd.dstat,
irb->scsw.cmd.count);
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index 3b94044027c..43068fbd0ba 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -16,6 +16,7 @@
#include <linux/slab.h>
#include <linux/miscdevice.h>
#include <linux/debugfs.h>
+#include <linux/module.h>
#include <asm/asm-offsets.h>
#include <asm/ipl.h>
#include <asm/sclp.h>
@@ -142,22 +143,6 @@ static int memcpy_hsa_kernel(void *dest, unsigned long src, size_t count)
return memcpy_hsa(dest, src, count, TO_KERNEL);
}
-static int memcpy_real_user(void __user *dest, unsigned long src, size_t count)
-{
- static char buf[4096];
- int offs = 0, size;
-
- while (offs < count) {
- size = min(sizeof(buf), count - offs);
- if (memcpy_real(buf, (void *) src + offs, size))
- return -EFAULT;
- if (copy_to_user(dest + offs, buf, size))
- return -EFAULT;
- offs += size;
- }
- return 0;
-}
-
static int __init init_cpu_info(enum arch_id arch)
{
struct save_area *sa;
@@ -346,8 +331,8 @@ static ssize_t zcore_read(struct file *file, char __user *buf, size_t count,
/* Copy from real mem */
size = count - mem_offs - hdr_count;
- rc = memcpy_real_user(buf + hdr_count + mem_offs, mem_start + mem_offs,
- size);
+ rc = copy_to_user_real(buf + hdr_count + mem_offs,
+ (void *) mem_start + mem_offs, size);
if (rc)
goto fail;
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index 5c567414c4b..4f1989d27b1 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -29,31 +29,20 @@
/* a device matches a driver if all its slave devices match the same
* entry of the driver */
-static int
-ccwgroup_bus_match (struct device * dev, struct device_driver * drv)
+static int ccwgroup_bus_match(struct device *dev, struct device_driver * drv)
{
- struct ccwgroup_device *gdev;
- struct ccwgroup_driver *gdrv;
-
- gdev = to_ccwgroupdev(dev);
- gdrv = to_ccwgroupdrv(drv);
+ struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
+ struct ccwgroup_driver *gdrv = to_ccwgroupdrv(drv);
if (gdev->creator_id == gdrv->driver_id)
return 1;
return 0;
}
-static int
-ccwgroup_uevent (struct device *dev, struct kobj_uevent_env *env)
-{
- /* TODO */
- return 0;
-}
static struct bus_type ccwgroup_bus_type;
-static void
-__ccwgroup_remove_symlinks(struct ccwgroup_device *gdev)
+static void __ccwgroup_remove_symlinks(struct ccwgroup_device *gdev)
{
int i;
char str[8];
@@ -63,7 +52,6 @@ __ccwgroup_remove_symlinks(struct ccwgroup_device *gdev)
sysfs_remove_link(&gdev->dev.kobj, str);
sysfs_remove_link(&gdev->cdev[i]->dev.kobj, "group_device");
}
-
}
/*
@@ -87,6 +75,87 @@ static void __ccwgroup_remove_cdev_refs(struct ccwgroup_device *gdev)
}
}
+static int ccwgroup_set_online(struct ccwgroup_device *gdev)
+{
+ struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
+ int ret = 0;
+
+ if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0)
+ return -EAGAIN;
+ if (gdev->state == CCWGROUP_ONLINE)
+ goto out;
+ if (gdrv->set_online)
+ ret = gdrv->set_online(gdev);
+ if (ret)
+ goto out;
+
+ gdev->state = CCWGROUP_ONLINE;
+out:
+ atomic_set(&gdev->onoff, 0);
+ return ret;
+}
+
+static int ccwgroup_set_offline(struct ccwgroup_device *gdev)
+{
+ struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
+ int ret = 0;
+
+ if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0)
+ return -EAGAIN;
+ if (gdev->state == CCWGROUP_OFFLINE)
+ goto out;
+ if (gdrv->set_offline)
+ ret = gdrv->set_offline(gdev);
+ if (ret)
+ goto out;
+
+ gdev->state = CCWGROUP_OFFLINE;
+out:
+ atomic_set(&gdev->onoff, 0);
+ return ret;
+}
+
+static ssize_t ccwgroup_online_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
+ struct ccwgroup_driver *gdrv = to_ccwgroupdrv(dev->driver);
+ unsigned long value;
+ int ret;
+
+ if (!dev->driver)
+ return -EINVAL;
+ if (!try_module_get(gdrv->driver.owner))
+ return -EINVAL;
+
+ ret = strict_strtoul(buf, 0, &value);
+ if (ret)
+ goto out;
+
+ if (value == 1)
+ ret = ccwgroup_set_online(gdev);
+ else if (value == 0)
+ ret = ccwgroup_set_offline(gdev);
+ else
+ ret = -EINVAL;
+out:
+ module_put(gdrv->driver.owner);
+ return (ret == 0) ? count : ret;
+}
+
+static ssize_t ccwgroup_online_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
+ int online;
+
+ online = (gdev->state == CCWGROUP_ONLINE) ? 1 : 0;
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", online);
+}
+
/*
* Provide an 'ungroup' attribute so the user can remove group devices no
* longer needed or accidentially created. Saves memory :)
@@ -104,14 +173,13 @@ static void ccwgroup_ungroup_callback(struct device *dev)
mutex_unlock(&gdev->reg_mutex);
}
-static ssize_t
-ccwgroup_ungroup_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t ccwgroup_ungroup_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
- struct ccwgroup_device *gdev;
+ struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
int rc;
- gdev = to_ccwgroupdev(dev);
-
/* Prevent concurrent online/offline processing and ungrouping. */
if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0)
return -EAGAIN;
@@ -132,24 +200,35 @@ out:
}
return count;
}
-
static DEVICE_ATTR(ungroup, 0200, NULL, ccwgroup_ungroup_store);
+static DEVICE_ATTR(online, 0644, ccwgroup_online_show, ccwgroup_online_store);
-static void
-ccwgroup_release (struct device *dev)
+static struct attribute *ccwgroup_attrs[] = {
+ &dev_attr_online.attr,
+ &dev_attr_ungroup.attr,
+ NULL,
+};
+static struct attribute_group ccwgroup_attr_group = {
+ .attrs = ccwgroup_attrs,
+};
+static const struct attribute_group *ccwgroup_attr_groups[] = {
+ &ccwgroup_attr_group,
+ NULL,
+};
+
+static void ccwgroup_release(struct device *dev)
{
kfree(to_ccwgroupdev(dev));
}
-static int
-__ccwgroup_create_symlinks(struct ccwgroup_device *gdev)
+static int __ccwgroup_create_symlinks(struct ccwgroup_device *gdev)
{
char str[8];
int i, rc;
for (i = 0; i < gdev->count; i++) {
- rc = sysfs_create_link(&gdev->cdev[i]->dev.kobj, &gdev->dev.kobj,
- "group_device");
+ rc = sysfs_create_link(&gdev->cdev[i]->dev.kobj,
+ &gdev->dev.kobj, "group_device");
if (rc) {
for (--i; i >= 0; i--)
sysfs_remove_link(&gdev->cdev[i]->dev.kobj,
@@ -159,8 +238,8 @@ __ccwgroup_create_symlinks(struct ccwgroup_device *gdev)
}
for (i = 0; i < gdev->count; i++) {
sprintf(str, "cdev%d", i);
- rc = sysfs_create_link(&gdev->dev.kobj, &gdev->cdev[i]->dev.kobj,
- str);
+ rc = sysfs_create_link(&gdev->dev.kobj,
+ &gdev->cdev[i]->dev.kobj, str);
if (rc) {
for (--i; i >= 0; i--) {
sprintf(str, "cdev%d", i);
@@ -293,26 +372,17 @@ int ccwgroup_create_from_string(struct device *root, unsigned int creator_id,
}
dev_set_name(&gdev->dev, "%s", dev_name(&gdev->cdev[0]->dev));
-
+ gdev->dev.groups = ccwgroup_attr_groups;
rc = device_add(&gdev->dev);
if (rc)
goto error;
- get_device(&gdev->dev);
- rc = device_create_file(&gdev->dev, &dev_attr_ungroup);
-
+ rc = __ccwgroup_create_symlinks(gdev);
if (rc) {
- device_unregister(&gdev->dev);
+ device_del(&gdev->dev);
goto error;
}
-
- rc = __ccwgroup_create_symlinks(gdev);
- if (!rc) {
- mutex_unlock(&gdev->reg_mutex);
- put_device(&gdev->dev);
- return 0;
- }
- device_remove_file(&gdev->dev, &dev_attr_ungroup);
- device_unregister(&gdev->dev);
+ mutex_unlock(&gdev->reg_mutex);
+ return 0;
error:
for (i = 0; i < num_devices; i++)
if (gdev->cdev[i]) {
@@ -330,7 +400,15 @@ error:
EXPORT_SYMBOL(ccwgroup_create_from_string);
static int ccwgroup_notifier(struct notifier_block *nb, unsigned long action,
- void *data);
+ void *data)
+{
+ struct device *dev = data;
+
+ if (action == BUS_NOTIFY_UNBIND_DRIVER)
+ device_schedule_callback(dev, ccwgroup_ungroup_callback);
+
+ return NOTIFY_OK;
+}
static struct notifier_block ccwgroup_nb = {
.notifier_call = ccwgroup_notifier
@@ -362,138 +440,21 @@ module_exit(cleanup_ccwgroup);
/************************** driver stuff ******************************/
-static int
-ccwgroup_set_online(struct ccwgroup_device *gdev)
+static int ccwgroup_probe(struct device *dev)
{
- struct ccwgroup_driver *gdrv;
- int ret;
-
- if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0)
- return -EAGAIN;
- if (gdev->state == CCWGROUP_ONLINE) {
- ret = 0;
- goto out;
- }
- if (!gdev->dev.driver) {
- ret = -EINVAL;
- goto out;
- }
- gdrv = to_ccwgroupdrv (gdev->dev.driver);
- if ((ret = gdrv->set_online ? gdrv->set_online(gdev) : 0))
- goto out;
-
- gdev->state = CCWGROUP_ONLINE;
- out:
- atomic_set(&gdev->onoff, 0);
- return ret;
-}
-
-static int
-ccwgroup_set_offline(struct ccwgroup_device *gdev)
-{
- struct ccwgroup_driver *gdrv;
- int ret;
-
- if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0)
- return -EAGAIN;
- if (gdev->state == CCWGROUP_OFFLINE) {
- ret = 0;
- goto out;
- }
- if (!gdev->dev.driver) {
- ret = -EINVAL;
- goto out;
- }
- gdrv = to_ccwgroupdrv (gdev->dev.driver);
- if ((ret = gdrv->set_offline ? gdrv->set_offline(gdev) : 0))
- goto out;
-
- gdev->state = CCWGROUP_OFFLINE;
- out:
- atomic_set(&gdev->onoff, 0);
- return ret;
-}
-
-static ssize_t
-ccwgroup_online_store (struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
-{
- struct ccwgroup_device *gdev;
- struct ccwgroup_driver *gdrv;
- unsigned long value;
- int ret;
-
- if (!dev->driver)
- return -ENODEV;
-
- gdev = to_ccwgroupdev(dev);
- gdrv = to_ccwgroupdrv(dev->driver);
-
- if (!try_module_get(gdrv->driver.owner))
- return -EINVAL;
-
- ret = strict_strtoul(buf, 0, &value);
- if (ret)
- goto out;
-
- if (value == 1)
- ret = ccwgroup_set_online(gdev);
- else if (value == 0)
- ret = ccwgroup_set_offline(gdev);
- else
- ret = -EINVAL;
-out:
- module_put(gdrv->driver.owner);
- return (ret == 0) ? count : ret;
-}
-
-static ssize_t
-ccwgroup_online_show (struct device *dev, struct device_attribute *attr, char *buf)
-{
- int online;
-
- online = (to_ccwgroupdev(dev)->state == CCWGROUP_ONLINE);
-
- return sprintf(buf, online ? "1\n" : "0\n");
-}
-
-static DEVICE_ATTR(online, 0644, ccwgroup_online_show, ccwgroup_online_store);
-
-static int
-ccwgroup_probe (struct device *dev)
-{
- struct ccwgroup_device *gdev;
- struct ccwgroup_driver *gdrv;
-
- int ret;
-
- gdev = to_ccwgroupdev(dev);
- gdrv = to_ccwgroupdrv(dev->driver);
-
- if ((ret = device_create_file(dev, &dev_attr_online)))
- return ret;
-
- ret = gdrv->probe ? gdrv->probe(gdev) : -ENODEV;
- if (ret)
- device_remove_file(dev, &dev_attr_online);
+ struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
+ struct ccwgroup_driver *gdrv = to_ccwgroupdrv(dev->driver);
- return ret;
+ return gdrv->probe ? gdrv->probe(gdev) : -ENODEV;
}
-static int
-ccwgroup_remove (struct device *dev)
+static int ccwgroup_remove(struct device *dev)
{
- struct ccwgroup_device *gdev;
- struct ccwgroup_driver *gdrv;
-
- device_remove_file(dev, &dev_attr_online);
- device_remove_file(dev, &dev_attr_ungroup);
+ struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
+ struct ccwgroup_driver *gdrv = to_ccwgroupdrv(dev->driver);
if (!dev->driver)
return 0;
-
- gdev = to_ccwgroupdev(dev);
- gdrv = to_ccwgroupdrv(dev->driver);
-
if (gdrv->remove)
gdrv->remove(gdev);
@@ -502,15 +463,11 @@ ccwgroup_remove (struct device *dev)
static void ccwgroup_shutdown(struct device *dev)
{
- struct ccwgroup_device *gdev;
- struct ccwgroup_driver *gdrv;
+ struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
+ struct ccwgroup_driver *gdrv = to_ccwgroupdrv(dev->driver);
if (!dev->driver)
return;
-
- gdev = to_ccwgroupdev(dev);
- gdrv = to_ccwgroupdrv(dev->driver);
-
if (gdrv->shutdown)
gdrv->shutdown(gdev);
}
@@ -586,26 +543,12 @@ static const struct dev_pm_ops ccwgroup_pm_ops = {
static struct bus_type ccwgroup_bus_type = {
.name = "ccwgroup",
.match = ccwgroup_bus_match,
- .uevent = ccwgroup_uevent,
.probe = ccwgroup_probe,
.remove = ccwgroup_remove,
.shutdown = ccwgroup_shutdown,
.pm = &ccwgroup_pm_ops,
};
-
-static int ccwgroup_notifier(struct notifier_block *nb, unsigned long action,
- void *data)
-{
- struct device *dev = data;
-
- if (action == BUS_NOTIFY_UNBIND_DRIVER)
- device_schedule_callback(dev, ccwgroup_ungroup_callback);
-
- return NOTIFY_OK;
-}
-
-
/**
* ccwgroup_driver_register() - register a ccw group driver
* @cdriver: driver to be registered
@@ -619,9 +562,9 @@ int ccwgroup_driver_register(struct ccwgroup_driver *cdriver)
return driver_register(&cdriver->driver);
}
+EXPORT_SYMBOL(ccwgroup_driver_register);
-static int
-__ccwgroup_match_all(struct device *dev, void *data)
+static int __ccwgroup_match_all(struct device *dev, void *data)
{
return 1;
}
@@ -652,6 +595,7 @@ void ccwgroup_driver_unregister(struct ccwgroup_driver *cdriver)
put_driver(&cdriver->driver);
driver_unregister(&cdriver->driver);
}
+EXPORT_SYMBOL(ccwgroup_driver_unregister);
/**
* ccwgroup_probe_ccwdev() - probe function for slave devices
@@ -666,6 +610,7 @@ int ccwgroup_probe_ccwdev(struct ccw_device *cdev)
{
return 0;
}
+EXPORT_SYMBOL(ccwgroup_probe_ccwdev);
/**
* ccwgroup_remove_ccwdev() - remove function for slave devices
@@ -702,9 +647,5 @@ void ccwgroup_remove_ccwdev(struct ccw_device *cdev)
/* Release ccwgroup device reference for local processing. */
put_device(&gdev->dev);
}
-
-MODULE_LICENSE("GPL");
-EXPORT_SYMBOL(ccwgroup_driver_register);
-EXPORT_SYMBOL(ccwgroup_driver_unregister);
-EXPORT_SYMBOL(ccwgroup_probe_ccwdev);
EXPORT_SYMBOL(ccwgroup_remove_ccwdev);
+MODULE_LICENSE("GPL");
diff --git a/drivers/s390/cio/ccwreq.c b/drivers/s390/cio/ccwreq.c
index d15f8b4d78b..5156264d0c7 100644
--- a/drivers/s390/cio/ccwreq.c
+++ b/drivers/s390/cio/ccwreq.c
@@ -1,10 +1,13 @@
/*
* Handling of internal CCW device requests.
*
- * Copyright IBM Corp. 2009
+ * Copyright IBM Corp. 2009, 2011
* Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
*/
+#define KMSG_COMPONENT "cio"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/types.h>
#include <linux/err.h>
#include <asm/ccwdev.h>
@@ -323,7 +326,21 @@ void ccw_request_timeout(struct ccw_device *cdev)
{
struct subchannel *sch = to_subchannel(cdev->dev.parent);
struct ccw_request *req = &cdev->private->req;
- int rc;
+ int rc = -ENODEV, chp;
+
+ if (cio_update_schib(sch))
+ goto err;
+
+ for (chp = 0; chp < 8; chp++) {
+ if ((0x80 >> chp) & sch->schib.pmcw.lpum)
+ pr_warning("%s: No interrupt was received within %lus "
+ "(CS=%02x, DS=%02x, CHPID=%x.%02x)\n",
+ dev_name(&cdev->dev), req->timeout / HZ,
+ scsw_cstat(&sch->schib.scsw),
+ scsw_dstat(&sch->schib.scsw),
+ sch->schid.cssid,
+ sch->schib.pmcw.chpid[chp]);
+ }
if (!ccwreq_next_path(cdev)) {
/* set the final return code for this request */
@@ -342,7 +359,7 @@ err:
* ccw_request_notoper - notoper handler for I/O request procedure
* @cdev: ccw device
*
- * Handle timeout during I/O request procedure.
+ * Handle notoper during I/O request procedure.
*/
void ccw_request_notoper(struct ccw_device *cdev)
{
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
index 2d32233943a..e792436c927 100644
--- a/drivers/s390/cio/chp.c
+++ b/drivers/s390/cio/chp.c
@@ -10,6 +10,8 @@
#include <linux/bug.h>
#include <linux/workqueue.h>
#include <linux/spinlock.h>
+#include <linux/export.h>
+#include <linux/sched.h>
#include <linux/init.h>
#include <linux/jiffies.h>
#include <linux/wait.h>
diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c
index e950f1ad4dd..0c87b0fc771 100644
--- a/drivers/s390/cio/chsc_sch.c
+++ b/drivers/s390/cio/chsc_sch.c
@@ -1,7 +1,7 @@
/*
* Driver for s390 chsc subchannels
*
- * Copyright IBM Corp. 2008, 2009
+ * Copyright IBM Corp. 2008, 2011
*
* Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
*
@@ -12,6 +12,7 @@
#include <linux/module.h>
#include <linux/uaccess.h>
#include <linux/miscdevice.h>
+#include <linux/kernel_stat.h>
#include <asm/compat.h>
#include <asm/cio.h>
@@ -56,6 +57,8 @@ static void chsc_subchannel_irq(struct subchannel *sch)
CHSC_LOG(4, "irb");
CHSC_LOG_HEX(4, irb, sizeof(*irb));
+ kstat_cpu(smp_processor_id()).irqs[IOINT_CSC]++;
+
/* Copy irb to provided request and set done. */
if (!request) {
CHSC_MSG(0, "Interrupt on sch 0.%x.%04x with no request\n",
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index eb3140ee821..dc67c397449 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -622,6 +622,7 @@ void __irq_entry do_IRQ(struct pt_regs *regs)
sch = (struct subchannel *)(unsigned long)tpi_info->intparm;
if (!sch) {
/* Clear pending interrupt condition. */
+ kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++;
tsch(tpi_info->schid, irb);
continue;
}
@@ -634,7 +635,10 @@ void __irq_entry do_IRQ(struct pt_regs *regs)
/* Call interrupt handler if there is one. */
if (sch->driver && sch->driver->irq)
sch->driver->irq(sch);
- }
+ else
+ kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++;
+ } else
+ kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++;
spin_unlock(sch->lock);
/*
* Are more interrupts pending?
@@ -667,18 +671,23 @@ static int cio_tpi(void)
tpi_info = (struct tpi_info *)&S390_lowcore.subchannel_id;
if (tpi(NULL) != 1)
return 0;
+ kstat_cpu(smp_processor_id()).irqs[IO_INTERRUPT]++;
if (tpi_info->adapter_IO) {
do_adapter_IO(tpi_info->isc);
return 1;
}
irb = (struct irb *)&S390_lowcore.irb;
/* Store interrupt response block to lowcore. */
- if (tsch(tpi_info->schid, irb) != 0)
+ if (tsch(tpi_info->schid, irb) != 0) {
/* Not status pending or not operational. */
+ kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++;
return 1;
+ }
sch = (struct subchannel *)(unsigned long)tpi_info->intparm;
- if (!sch)
+ if (!sch) {
+ kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++;
return 1;
+ }
irq_context = in_interrupt();
if (!irq_context)
local_bh_disable();
@@ -687,6 +696,8 @@ static int cio_tpi(void)
memcpy(&sch->schib.scsw, &irb->scsw, sizeof(union scsw));
if (sch->driver && sch->driver->irq)
sch->driver->irq(sch);
+ else
+ kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++;
spin_unlock(sch->lock);
irq_exit();
if (!irq_context)
@@ -1058,7 +1069,7 @@ void reipl_ccw_dev(struct ccw_dev_id *devid)
{
struct subchannel_id schid;
- s390_reset_system();
+ s390_reset_system(NULL, NULL);
if (reipl_find_schid(devid, &schid) != 0)
panic("IPL Device not found\n");
do_reipl_asm(*((__u32*)&schid));
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
index 80ebdddf774..33bb4d891e1 100644
--- a/drivers/s390/cio/css.h
+++ b/drivers/s390/cio/css.h
@@ -133,6 +133,8 @@ struct channel_subsystem {
extern struct channel_subsystem *channel_subsystems[];
+void channel_subsystem_reinit(void);
+
/* Helper functions to build lists for the slow path. */
void css_schedule_eval(struct subchannel_id schid);
void css_schedule_eval_all(void);
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 8e04c00cf0a..d734f4a0eca 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -21,6 +21,7 @@
#include <linux/device.h>
#include <linux/workqueue.h>
#include <linux/timer.h>
+#include <linux/kernel_stat.h>
#include <asm/ccwdev.h>
#include <asm/cio.h>
@@ -747,6 +748,7 @@ static int io_subchannel_initialize_dev(struct subchannel *sch,
struct ccw_device *cdev)
{
cdev->private->cdev = cdev;
+ cdev->private->int_class = IOINT_CIO;
atomic_set(&cdev->private->onoff, 0);
cdev->dev.parent = &sch->dev;
cdev->dev.release = ccw_device_release;
@@ -1010,6 +1012,8 @@ static void io_subchannel_irq(struct subchannel *sch)
CIO_TRACE_EVENT(6, dev_name(&sch->dev));
if (cdev)
dev_fsm_event(cdev, DEV_EVENT_INTERRUPT);
+ else
+ kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++;
}
void io_subchannel_init_config(struct subchannel *sch)
@@ -1621,6 +1625,7 @@ ccw_device_probe_console(void)
memset(&console_private, 0, sizeof(struct ccw_device_private));
console_cdev.private = &console_private;
console_private.cdev = &console_cdev;
+ console_private.int_class = IOINT_CIO;
ret = ccw_device_console_enable(&console_cdev, sch);
if (ret) {
cio_release_console();
@@ -1702,11 +1707,18 @@ ccw_device_probe (struct device *dev)
int ret;
cdev->drv = cdrv; /* to let the driver call _set_online */
+ /* Note: we interpret class 0 in this context as an uninitialized
+ * field since it translates to a non-I/O interrupt class. */
+ if (cdrv->int_class != 0)
+ cdev->private->int_class = cdrv->int_class;
+ else
+ cdev->private->int_class = IOINT_CIO;
ret = cdrv->probe ? cdrv->probe(cdev) : -ENODEV;
if (ret) {
cdev->drv = NULL;
+ cdev->private->int_class = IOINT_CIO;
return ret;
}
@@ -1740,6 +1752,7 @@ ccw_device_remove (struct device *dev)
}
ccw_device_set_timeout(cdev, 0);
cdev->drv = NULL;
+ cdev->private->int_class = IOINT_CIO;
return 0;
}
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h
index 0b7245c72d5..179824b3082 100644
--- a/drivers/s390/cio/device.h
+++ b/drivers/s390/cio/device.h
@@ -5,6 +5,7 @@
#include <linux/atomic.h>
#include <linux/wait.h>
#include <linux/notifier.h>
+#include <linux/kernel_stat.h>
#include "io_sch.h"
/*
@@ -56,7 +57,17 @@ extern fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS];
static inline void
dev_fsm_event(struct ccw_device *cdev, enum dev_event dev_event)
{
- dev_jumptable[cdev->private->state][dev_event](cdev, dev_event);
+ int state = cdev->private->state;
+
+ if (dev_event == DEV_EVENT_INTERRUPT) {
+ if (state == DEV_STATE_ONLINE)
+ kstat_cpu(smp_processor_id()).
+ irqs[cdev->private->int_class]++;
+ else if (state != DEV_STATE_CMFCHANGE &&
+ state != DEV_STATE_CMFUPDATE)
+ kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++;
+ }
+ dev_jumptable[state][dev_event](cdev, dev_event);
}
/*
diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h
index ba31ad88f4f..2ebb492a5c1 100644
--- a/drivers/s390/cio/io_sch.h
+++ b/drivers/s390/cio/io_sch.h
@@ -4,6 +4,7 @@
#include <linux/types.h>
#include <asm/schid.h>
#include <asm/ccwdev.h>
+#include <asm/irq.h>
#include "css.h"
#include "orb.h"
@@ -157,6 +158,7 @@ struct ccw_device_private {
struct list_head cmb_list; /* list of measured devices */
u64 cmb_start_time; /* clock value of cmb reset */
void *cmb_wait; /* deferred cmb enable/disable */
+ enum interruption_class int_class;
};
static inline int rsch(struct subchannel_id schid)
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index 3dd86441da3..b962ffbc080 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -18,14 +18,6 @@
#define QDIO_BUSY_BIT_RETRIES 1000 /* = 10s retry time */
#define QDIO_INPUT_THRESHOLD (500 << 12) /* 500 microseconds */
-/*
- * if an asynchronous HiperSockets queue runs full, the 10 seconds timer wait
- * till next initiative to give transmitted skbs back to the stack is too long.
- * Therefore polling is started in case of multicast queue is filled more
- * than 50 percent.
- */
-#define QDIO_IQDIO_POLL_LVL 65 /* HS multicast queue */
-
enum qdio_irq_states {
QDIO_IRQ_STATE_INACTIVE,
QDIO_IRQ_STATE_ESTABLISHED,
@@ -290,6 +282,9 @@ struct qdio_q {
/* error condition during a data transfer */
unsigned int qdio_error;
+ /* last scan of the queue */
+ u64 timestamp;
+
struct tasklet_struct tasklet;
struct qdio_queue_perf_stat q_stats;
@@ -423,31 +418,7 @@ static inline int multicast_outbound(struct qdio_q *q)
#define queue_irqs_disabled(q) \
(test_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state) != 0)
-#define TIQDIO_SHARED_IND 63
-
-/* device state change indicators */
-struct indicator_t {
- u32 ind; /* u32 because of compare-and-swap performance */
- atomic_t count; /* use count, 0 or 1 for non-shared indicators */
-};
-
-extern struct indicator_t *q_indicators;
-
-static inline int has_multiple_inq_on_dsci(struct qdio_irq *irq)
-{
- return irq->nr_input_qs > 1;
-}
-
-static inline int references_shared_dsci(struct qdio_irq *irq)
-{
- return irq->dsci == &q_indicators[TIQDIO_SHARED_IND].ind;
-}
-
-static inline int shared_ind(struct qdio_q *q)
-{
- struct qdio_irq *i = q->irq_ptr;
- return references_shared_dsci(i) || has_multiple_inq_on_dsci(i);
-}
+extern u64 last_ai_time;
/* prototypes for thin interrupt */
void qdio_setup_thinint(struct qdio_irq *irq_ptr);
@@ -460,7 +431,8 @@ int tiqdio_allocate_memory(void);
void tiqdio_free_memory(void);
int tiqdio_register_thinints(void);
void tiqdio_unregister_thinints(void);
-
+void clear_nonshared_ind(struct qdio_irq *);
+int test_nonshared_ind(struct qdio_irq *);
/* prototypes for setup */
void qdio_inbound_processing(unsigned long data);
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c
index aaf7f935bfd..29021f4e96b 100644
--- a/drivers/s390/cio/qdio_debug.c
+++ b/drivers/s390/cio/qdio_debug.c
@@ -7,6 +7,8 @@
*/
#include <linux/seq_file.h>
#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/export.h>
#include <asm/debug.h>
#include "qdio_debug.h"
#include "qdio.h"
@@ -54,15 +56,17 @@ static int qstat_show(struct seq_file *m, void *v)
if (!q)
return 0;
- seq_printf(m, "DSCI: %d nr_used: %d\n",
- *(u32 *)q->irq_ptr->dsci, atomic_read(&q->nr_buf_used));
- seq_printf(m, "ftc: %d last_move: %d\n",
+ seq_printf(m, "Timestamp: %Lx Last AI: %Lx\n",
+ q->timestamp, last_ai_time);
+ seq_printf(m, "nr_used: %d ftc: %d last_move: %d\n",
+ atomic_read(&q->nr_buf_used),
q->first_to_check, q->last_move);
if (q->is_input_q) {
seq_printf(m, "polling: %d ack start: %d ack count: %d\n",
q->u.in.polling, q->u.in.ack_start,
q->u.in.ack_count);
- seq_printf(m, "IRQs disabled: %u\n",
+ seq_printf(m, "DSCI: %d IRQs disabled: %u\n",
+ *(u32 *)q->irq_ptr->dsci,
test_bit(QDIO_QUEUE_IRQS_DISABLED,
&q->u.in.queue_irq_state));
}
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 9a122280246..3ef8d071c64 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -15,7 +15,6 @@
#include <linux/delay.h>
#include <linux/gfp.h>
#include <linux/io.h>
-#include <linux/kernel_stat.h>
#include <linux/atomic.h>
#include <asm/debug.h>
#include <asm/qdio.h>
@@ -105,9 +104,12 @@ static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq)
/* all done or next buffer state different */
if (ccq == 0 || ccq == 32)
return 0;
- /* not all buffers processed */
- if (ccq == 96 || ccq == 97)
+ /* no buffer processed */
+ if (ccq == 97)
return 1;
+ /* not all buffers processed */
+ if (ccq == 96)
+ return 2;
/* notify devices immediately */
DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq);
return -EIO;
@@ -127,10 +129,8 @@ static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq)
static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
int start, int count, int auto_ack)
{
+ int rc, tmp_count = count, tmp_start = start, nr = q->nr, retried = 0;
unsigned int ccq = 0;
- int tmp_count = count, tmp_start = start;
- int nr = q->nr;
- int rc;
BUG_ON(!q->irq_ptr->sch_token);
qperf_inc(q, eqbs);
@@ -141,29 +141,34 @@ again:
ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count,
auto_ack);
rc = qdio_check_ccq(q, ccq);
-
- /* At least one buffer was processed, return and extract the remaining
- * buffers later.
- */
- if ((ccq == 96) && (count != tmp_count)) {
- qperf_inc(q, eqbs_partial);
- return (count - tmp_count);
- }
+ if (!rc)
+ return count - tmp_count;
if (rc == 1) {
DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS again:%2d", ccq);
goto again;
}
- if (rc < 0) {
- DBF_ERROR("%4x EQBS ERROR", SCH_NO(q));
- DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
- q->handler(q->irq_ptr->cdev,
- QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
- 0, -1, -1, q->irq_ptr->int_parm);
- return 0;
+ if (rc == 2) {
+ BUG_ON(tmp_count == count);
+ qperf_inc(q, eqbs_partial);
+ DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS part:%02x",
+ tmp_count);
+ /*
+ * Retry once, if that fails bail out and process the
+ * extracted buffers before trying again.
+ */
+ if (!retried++)
+ goto again;
+ else
+ return count - tmp_count;
}
- return count - tmp_count;
+
+ DBF_ERROR("%4x EQBS ERROR", SCH_NO(q));
+ DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
+ q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
+ 0, -1, -1, q->irq_ptr->int_parm);
+ return 0;
}
/**
@@ -196,21 +201,22 @@ static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start,
again:
ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count);
rc = qdio_check_ccq(q, ccq);
- if (rc == 1) {
+ if (!rc) {
+ WARN_ON(tmp_count);
+ return count - tmp_count;
+ }
+
+ if (rc == 1 || rc == 2) {
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "SQBS again:%2d", ccq);
qperf_inc(q, sqbs_partial);
goto again;
}
- if (rc < 0) {
- DBF_ERROR("%4x SQBS ERROR", SCH_NO(q));
- DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
- q->handler(q->irq_ptr->cdev,
- QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
- 0, -1, -1, q->irq_ptr->int_parm);
- return 0;
- }
- WARN_ON(tmp_count);
- return count - tmp_count;
+
+ DBF_ERROR("%4x SQBS ERROR", SCH_NO(q));
+ DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
+ q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
+ 0, -1, -1, q->irq_ptr->int_parm);
+ return 0;
}
/* returns number of examined buffers and their common state in *state */
@@ -275,7 +281,7 @@ static inline int set_buf_state(struct qdio_q *q, int bufnr,
}
/* set slsb states to initial state */
-void qdio_init_buf_states(struct qdio_irq *irq_ptr)
+static void qdio_init_buf_states(struct qdio_irq *irq_ptr)
{
struct qdio_q *q;
int i;
@@ -444,7 +450,7 @@ static void process_buffer_error(struct qdio_q *q, int count)
qperf_inc(q, target_full);
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x",
q->first_to_check);
- return;
+ goto set;
}
DBF_ERROR("%4x BUF ERROR", SCH_NO(q));
@@ -454,6 +460,7 @@ static void process_buffer_error(struct qdio_q *q, int count)
q->sbal[q->first_to_check]->element[14].sflags,
q->sbal[q->first_to_check]->element[15].sflags);
+set:
/*
* Interrupts may be avoided as long as the error is present
* so change the buffer state immediately to avoid starvation.
@@ -511,6 +518,8 @@ static int get_inbound_buffer_frontier(struct qdio_q *q)
int count, stop;
unsigned char state = 0;
+ q->timestamp = get_clock_fast();
+
/*
* Don't check 128 buffers, as otherwise qdio_inbound_q_moved
* would return 0.
@@ -780,6 +789,8 @@ static int get_outbound_buffer_frontier(struct qdio_q *q)
int count, stop;
unsigned char state = 0;
+ q->timestamp = get_clock_fast();
+
if (need_siga_sync(q))
if (((queue_type(q) != QDIO_IQDIO_QFMT) &&
!pci_out_supported(q)) ||
@@ -910,21 +921,13 @@ static void __qdio_outbound_processing(struct qdio_q *q)
if (!pci_out_supported(q) && !qdio_outbound_q_done(q))
goto sched;
- /* bail out for HiperSockets unicast queues */
- if (queue_type(q) == QDIO_IQDIO_QFMT && !multicast_outbound(q))
- return;
-
- if ((queue_type(q) == QDIO_IQDIO_QFMT) &&
- (atomic_read(&q->nr_buf_used)) > QDIO_IQDIO_POLL_LVL)
- goto sched;
-
if (q->u.out.pci_out_enabled)
return;
/*
* Now we know that queue type is either qeth without pci enabled
- * or HiperSockets multicast. Make sure buffer switch from PRIMED to
- * EMPTY is noticed and outbound_handler is called after some time.
+ * or HiperSockets. Make sure buffer switch from PRIMED to EMPTY
+ * is noticed and outbound_handler is called after some time.
*/
if (qdio_outbound_q_done(q))
del_timer(&q->u.out.timer);
@@ -1070,6 +1073,7 @@ static void qdio_handle_activate_check(struct ccw_device *cdev,
{
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
struct qdio_q *q;
+ int count;
DBF_ERROR("%4x ACT CHECK", irq_ptr->schid.sch_no);
DBF_ERROR("intp :%lx", intparm);
@@ -1083,8 +1087,10 @@ static void qdio_handle_activate_check(struct ccw_device *cdev,
dump_stack();
goto no_handler;
}
+
+ count = sub_buf(q->first_to_check, q->first_to_kick);
q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
- 0, -1, -1, irq_ptr->int_parm);
+ q->nr, q->first_to_kick, count, irq_ptr->int_parm);
no_handler:
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
}
@@ -1123,7 +1129,6 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
return;
}
- kstat_cpu(smp_processor_id()).irqs[IOINT_QDI]++;
if (irq_ptr->perf_stat_enabled)
irq_ptr->perf_stat.qdio_int++;
@@ -1714,9 +1719,7 @@ int qdio_start_irq(struct ccw_device *cdev, int nr)
WARN_ON(queue_irqs_enabled(q));
- if (!shared_ind(q))
- xchg(q->irq_ptr->dsci, 0);
-
+ clear_nonshared_ind(irq_ptr);
qdio_stop_polling(q);
clear_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state);
@@ -1724,7 +1727,7 @@ int qdio_start_irq(struct ccw_device *cdev, int nr)
* We need to check again to not lose initiative after
* resetting the ACK state.
*/
- if (!shared_ind(q) && *q->irq_ptr->dsci)
+ if (test_nonshared_ind(irq_ptr))
goto rescan;
if (!qdio_inbound_q_done(q))
goto rescan;
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index dd8bd670a6b..2acc01f90a6 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -8,6 +8,7 @@
*/
#include <linux/kernel.h>
#include <linux/slab.h>
+#include <linux/export.h>
#include <asm/qdio.h>
#include "cio.h"
@@ -381,6 +382,7 @@ static void setup_qdr(struct qdio_irq *irq_ptr,
int i;
irq_ptr->qdr->qfmt = qdio_init->q_format;
+ irq_ptr->qdr->ac = qdio_init->qdr_ac;
irq_ptr->qdr->iqdcnt = qdio_init->no_input_qs;
irq_ptr->qdr->oqdcnt = qdio_init->no_output_qs;
irq_ptr->qdr->iqdsz = sizeof(struct qdesfmt0) / 4; /* size in words */
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
index a3e3949d7b6..011eadea3ee 100644
--- a/drivers/s390/cio/qdio_thinint.c
+++ b/drivers/s390/cio/qdio_thinint.c
@@ -26,17 +26,24 @@
*/
#define TIQDIO_NR_NONSHARED_IND 63
#define TIQDIO_NR_INDICATORS (TIQDIO_NR_NONSHARED_IND + 1)
+#define TIQDIO_SHARED_IND 63
+
+/* device state change indicators */
+struct indicator_t {
+ u32 ind; /* u32 because of compare-and-swap performance */
+ atomic_t count; /* use count, 0 or 1 for non-shared indicators */
+};
/* list of thin interrupt input queues */
static LIST_HEAD(tiq_list);
-DEFINE_MUTEX(tiq_list_lock);
+static DEFINE_MUTEX(tiq_list_lock);
/* adapter local summary indicator */
static u8 *tiqdio_alsi;
-struct indicator_t *q_indicators;
+static struct indicator_t *q_indicators;
-static u64 last_ai_time;
+u64 last_ai_time;
/* returns addr for the device state change indicator */
static u32 *get_indicator(void)
@@ -90,6 +97,43 @@ void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr)
synchronize_rcu();
}
+static inline int has_multiple_inq_on_dsci(struct qdio_irq *irq_ptr)
+{
+ return irq_ptr->nr_input_qs > 1;
+}
+
+static inline int references_shared_dsci(struct qdio_irq *irq_ptr)
+{
+ return irq_ptr->dsci == &q_indicators[TIQDIO_SHARED_IND].ind;
+}
+
+static inline int shared_ind(struct qdio_irq *irq_ptr)
+{
+ return references_shared_dsci(irq_ptr) ||
+ has_multiple_inq_on_dsci(irq_ptr);
+}
+
+void clear_nonshared_ind(struct qdio_irq *irq_ptr)
+{
+ if (!is_thinint_irq(irq_ptr))
+ return;
+ if (shared_ind(irq_ptr))
+ return;
+ xchg(irq_ptr->dsci, 0);
+}
+
+int test_nonshared_ind(struct qdio_irq *irq_ptr)
+{
+ if (!is_thinint_irq(irq_ptr))
+ return 0;
+ if (shared_ind(irq_ptr))
+ return 0;
+ if (*irq_ptr->dsci)
+ return 1;
+ else
+ return 0;
+}
+
static inline u32 clear_shared_ind(void)
{
if (!atomic_read(&q_indicators[TIQDIO_SHARED_IND].count))
@@ -119,7 +163,7 @@ static inline void tiqdio_call_inq_handlers(struct qdio_irq *irq)
q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr,
q->irq_ptr->int_parm);
} else {
- if (!shared_ind(q))
+ if (!shared_ind(q->irq_ptr))
xchg(q->irq_ptr->dsci, 0);
/*
diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c
index aec60d55b10..94f49ffa70b 100644
--- a/drivers/s390/kvm/kvm_virtio.c
+++ b/drivers/s390/kvm/kvm_virtio.c
@@ -20,6 +20,7 @@
#include <linux/virtio_console.h>
#include <linux/interrupt.h>
#include <linux/virtio_ring.h>
+#include <linux/export.h>
#include <linux/pfn.h>
#include <asm/io.h>
#include <asm/kvm_para.h>
@@ -33,7 +34,7 @@
* The pointer to our (page) of device descriptions.
*/
static void *kvm_devices;
-struct work_struct hotplug_work;
+static struct work_struct hotplug_work;
struct kvm_device {
struct virtio_device vdev;
@@ -334,10 +335,10 @@ static void scan_devices(void)
*/
static int match_desc(struct device *dev, void *data)
{
- if ((ulong)to_kvmdev(dev_to_virtio(dev))->desc == (ulong)data)
- return 1;
+ struct virtio_device *vdev = dev_to_virtio(dev);
+ struct kvm_device *kdev = to_kvmdev(vdev);
- return 0;
+ return kdev->desc == data;
}
/*
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c
index f1fa2483ae6..b41fae37d3a 100644
--- a/drivers/s390/net/claw.c
+++ b/drivers/s390/net/claw.c
@@ -63,7 +63,6 @@
#define KMSG_COMPONENT "claw"
-#include <linux/kernel_stat.h>
#include <asm/ccwdev.h>
#include <asm/ccwgroup.h>
#include <asm/debug.h>
@@ -291,6 +290,7 @@ static struct ccw_driver claw_ccw_driver = {
.ids = claw_ids,
.probe = ccwgroup_probe_ccwdev,
.remove = ccwgroup_remove_ccwdev,
+ .int_class = IOINT_CLW,
};
static ssize_t
@@ -645,7 +645,6 @@ claw_irq_handler(struct ccw_device *cdev,
struct claw_env *p_env;
struct chbk *p_ch_r=NULL;
- kstat_cpu(smp_processor_id()).irqs[IOINT_CLW]++;
CLAW_DBF_TEXT(4, trace, "clawirq");
/* Bypass all 'unsolicited interrupts' */
privptr = dev_get_drvdata(&cdev->dev);
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index 426787efc49..5cb93a8e340 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -24,7 +24,6 @@
#define KMSG_COMPONENT "ctcm"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
-#include <linux/kernel_stat.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
@@ -1203,7 +1202,6 @@ static void ctcm_irq_handler(struct ccw_device *cdev,
int cstat;
int dstat;
- kstat_cpu(smp_processor_id()).irqs[IOINT_CTC]++;
CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG,
"Enter %s(%s)", CTCM_FUNTAIL, dev_name(&cdev->dev));
@@ -1769,6 +1767,7 @@ static struct ccw_driver ctcm_ccw_driver = {
.ids = ctcm_ids,
.probe = ccwgroup_probe_ccwdev,
.remove = ccwgroup_remove_ccwdev,
+ .int_class = IOINT_CTC,
};
static struct ccwgroup_driver ctcm_group_driver = {
diff --git a/drivers/s390/net/ctcm_sysfs.c b/drivers/s390/net/ctcm_sysfs.c
index 8305319b2a8..650aec1839e 100644
--- a/drivers/s390/net/ctcm_sysfs.c
+++ b/drivers/s390/net/ctcm_sysfs.c
@@ -159,7 +159,7 @@ static ssize_t ctcm_proto_store(struct device *dev,
return count;
}
-const char *ctcm_type[] = {
+static const char *ctcm_type[] = {
"not a channel",
"CTC/A",
"FICON channel",
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index fb246b944b1..c28713da1ec 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -26,7 +26,6 @@
#define KMSG_COMPONENT "lcs"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
-#include <linux/kernel_stat.h>
#include <linux/module.h>
#include <linux/if.h>
#include <linux/netdevice.h>
@@ -1399,7 +1398,6 @@ lcs_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
int rc, index;
int cstat, dstat;
- kstat_cpu(smp_processor_id()).irqs[IOINT_LCS]++;
if (lcs_check_irb_error(cdev, irb))
return;
@@ -1972,7 +1970,7 @@ lcs_portno_store (struct device *dev, struct device_attribute *attr, const char
static DEVICE_ATTR(portno, 0644, lcs_portno_show, lcs_portno_store);
-const char *lcs_type[] = {
+static const char *lcs_type[] = {
"not a channel",
"2216 parallel",
"2216 channel",
@@ -2399,6 +2397,7 @@ static struct ccw_driver lcs_ccw_driver = {
.ids = lcs_ids,
.probe = ccwgroup_probe_ccwdev,
.remove = ccwgroup_remove_ccwdev,
+ .int_class = IOINT_LCS,
};
/**
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index ce735204d31..e4c1176ee25 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -1415,7 +1415,7 @@ static int qeth_l3_send_checksum_command(struct qeth_card *card)
return 0;
}
-int qeth_l3_set_rx_csum(struct qeth_card *card, int on)
+static int qeth_l3_set_rx_csum(struct qeth_card *card, int on)
{
int rc = 0;
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 645b0fcbb37..08601810966 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -31,6 +31,7 @@
#include <linux/miscdevice.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include "zfcp_ext.h"
#include "zfcp_fc.h"
#include "zfcp_reqlist.h"
diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c
index e8b7cee6204..96f13ad8812 100644
--- a/drivers/s390/scsi/zfcp_ccw.c
+++ b/drivers/s390/scsi/zfcp_ccw.c
@@ -9,6 +9,7 @@
#define KMSG_COMPONENT "zfcp"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#include <linux/module.h>
#include "zfcp_ext.h"
#include "zfcp_reqlist.h"
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index 96d1462e0bf..a9a816e4aa5 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -9,6 +9,7 @@
#define KMSG_COMPONENT "zfcp"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#include <linux/module.h>
#include <linux/ctype.h>
#include <linux/slab.h>
#include <asm/debug.h>
@@ -163,6 +164,42 @@ void zfcp_dbf_hba_bit_err(char *tag, struct zfcp_fsf_req *req)
spin_unlock_irqrestore(&dbf->hba_lock, flags);
}
+/**
+ * zfcp_dbf_hba_def_err - trace event for deferred error messages
+ * @adapter: pointer to struct zfcp_adapter
+ * @req_id: request id which caused the deferred error message
+ * @scount: number of sbals incl. the signaling sbal
+ * @pl: array of all involved sbals
+ */
+void zfcp_dbf_hba_def_err(struct zfcp_adapter *adapter, u64 req_id, u16 scount,
+ void **pl)
+{
+ struct zfcp_dbf *dbf = adapter->dbf;
+ struct zfcp_dbf_pay *payload = &dbf->pay_buf;
+ unsigned long flags;
+ u16 length;
+
+ if (!pl)
+ return;
+
+ spin_lock_irqsave(&dbf->pay_lock, flags);
+ memset(payload, 0, sizeof(*payload));
+
+ memcpy(payload->area, "def_err", 7);
+ payload->fsf_req_id = req_id;
+ payload->counter = 0;
+ length = min((u16)sizeof(struct qdio_buffer),
+ (u16)ZFCP_DBF_PAY_MAX_REC);
+
+ while ((char *)pl[payload->counter] && payload->counter < scount) {
+ memcpy(payload->data, (char *)pl[payload->counter], length);
+ debug_event(dbf->pay, 1, payload, zfcp_dbf_plen(length));
+ payload->counter++;
+ }
+
+ spin_unlock_irqrestore(&dbf->pay_lock, flags);
+}
+
static void zfcp_dbf_set_common(struct zfcp_dbf_rec *rec,
struct zfcp_adapter *adapter,
struct zfcp_port *port,
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index 527ba48eea5..ed5d921e82c 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -72,6 +72,7 @@ struct zfcp_reqlist;
#define ZFCP_STATUS_COMMON_NOESC 0x00200000
/* adapter status */
+#define ZFCP_STATUS_ADAPTER_MB_ACT 0x00000001
#define ZFCP_STATUS_ADAPTER_QDIOUP 0x00000002
#define ZFCP_STATUS_ADAPTER_SIOSL_ISSUED 0x00000004
#define ZFCP_STATUS_ADAPTER_XCONFIG_OK 0x00000008
@@ -314,4 +315,10 @@ struct zfcp_fsf_req {
void (*handler)(struct zfcp_fsf_req *);
};
+static inline
+int zfcp_adapter_multi_buffer_active(struct zfcp_adapter *adapter)
+{
+ return atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_MB_ACT;
+}
+
#endif /* ZFCP_DEF_H */
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index 03627cfd81c..2302e1cfb76 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -53,6 +53,7 @@ extern void zfcp_dbf_hba_fsf_uss(char *, struct zfcp_fsf_req *);
extern void zfcp_dbf_hba_fsf_res(char *, struct zfcp_fsf_req *);
extern void zfcp_dbf_hba_bit_err(char *, struct zfcp_fsf_req *);
extern void zfcp_dbf_hba_berr(struct zfcp_dbf *, struct zfcp_fsf_req *);
+extern void zfcp_dbf_hba_def_err(struct zfcp_adapter *, u64, u16, void **);
extern void zfcp_dbf_san_req(char *, struct zfcp_fsf_req *, u32);
extern void zfcp_dbf_san_res(char *, struct zfcp_fsf_req *);
extern void zfcp_dbf_san_in_els(char *, struct zfcp_fsf_req *);
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 022fb6a8cb8..e9a787e2e6a 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -936,39 +936,47 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
struct scatterlist *sg_resp)
{
struct zfcp_adapter *adapter = req->adapter;
+ struct zfcp_qdio *qdio = adapter->qdio;
+ struct fsf_qtcb *qtcb = req->qtcb;
u32 feat = adapter->adapter_features;
- int bytes;
- if (!(feat & FSF_FEATURE_ELS_CT_CHAINED_SBALS)) {
- if (!zfcp_qdio_sg_one_sbale(sg_req) ||
- !zfcp_qdio_sg_one_sbale(sg_resp))
- return -EOPNOTSUPP;
+ if (zfcp_adapter_multi_buffer_active(adapter)) {
+ if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_req))
+ return -EIO;
+ if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_resp))
+ return -EIO;
- zfcp_fsf_setup_ct_els_unchained(adapter->qdio, &req->qdio_req,
- sg_req, sg_resp);
+ zfcp_qdio_set_data_div(qdio, &req->qdio_req,
+ zfcp_qdio_sbale_count(sg_req));
+ zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
+ zfcp_qdio_set_scount(qdio, &req->qdio_req);
return 0;
}
/* use single, unchained SBAL if it can hold the request */
if (zfcp_qdio_sg_one_sbale(sg_req) && zfcp_qdio_sg_one_sbale(sg_resp)) {
- zfcp_fsf_setup_ct_els_unchained(adapter->qdio, &req->qdio_req,
+ zfcp_fsf_setup_ct_els_unchained(qdio, &req->qdio_req,
sg_req, sg_resp);
return 0;
}
- bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->qdio_req, sg_req);
- if (bytes <= 0)
+ if (!(feat & FSF_FEATURE_ELS_CT_CHAINED_SBALS))
+ return -EOPNOTSUPP;
+
+ if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_req))
return -EIO;
- zfcp_qdio_set_sbale_last(adapter->qdio, &req->qdio_req);
- req->qtcb->bottom.support.req_buf_length = bytes;
- zfcp_qdio_skip_to_last_sbale(&req->qdio_req);
- bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->qdio_req,
- sg_resp);
- req->qtcb->bottom.support.resp_buf_length = bytes;
- if (bytes <= 0)
+ qtcb->bottom.support.req_buf_length = zfcp_qdio_real_bytes(sg_req);
+
+ zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
+ zfcp_qdio_skip_to_last_sbale(qdio, &req->qdio_req);
+
+ if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_resp))
return -EIO;
- zfcp_qdio_set_sbale_last(adapter->qdio, &req->qdio_req);
+
+ qtcb->bottom.support.resp_buf_length = zfcp_qdio_real_bytes(sg_resp);
+
+ zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
return 0;
}
@@ -1119,7 +1127,8 @@ int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id,
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
- zfcp_qdio_sbal_limit(qdio, &req->qdio_req, 2);
+ if (!zfcp_adapter_multi_buffer_active(adapter))
+ zfcp_qdio_sbal_limit(qdio, &req->qdio_req, 2);
ret = zfcp_fsf_setup_ct_els(req, els->req, els->resp, timeout);
@@ -2162,7 +2171,7 @@ int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd)
struct zfcp_fsf_req *req;
struct fcp_cmnd *fcp_cmnd;
u8 sbtype = SBAL_SFLAGS0_TYPE_READ;
- int real_bytes, retval = -EIO, dix_bytes = 0;
+ int retval = -EIO;
struct scsi_device *sdev = scsi_cmnd->device;
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
@@ -2207,7 +2216,8 @@ int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd)
io->ref_tag_value = scsi_get_lba(scsi_cmnd) & 0xFFFFFFFF;
}
- zfcp_fsf_set_data_dir(scsi_cmnd, &io->data_direction);
+ if (zfcp_fsf_set_data_dir(scsi_cmnd, &io->data_direction))
+ goto failed_scsi_cmnd;
fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd, 0);
@@ -2215,18 +2225,22 @@ int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd)
if (scsi_prot_sg_count(scsi_cmnd)) {
zfcp_qdio_set_data_div(qdio, &req->qdio_req,
scsi_prot_sg_count(scsi_cmnd));
- dix_bytes = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
+ retval = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
+ scsi_prot_sglist(scsi_cmnd));
+ if (retval)
+ goto failed_scsi_cmnd;
+ io->prot_data_length = zfcp_qdio_real_bytes(
scsi_prot_sglist(scsi_cmnd));
- io->prot_data_length = dix_bytes;
}
- real_bytes = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
- scsi_sglist(scsi_cmnd));
-
- if (unlikely(real_bytes < 0) || unlikely(dix_bytes < 0))
+ retval = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
+ scsi_sglist(scsi_cmnd));
+ if (unlikely(retval))
goto failed_scsi_cmnd;
zfcp_qdio_set_sbale_last(adapter->qdio, &req->qdio_req);
+ if (zfcp_adapter_multi_buffer_active(adapter))
+ zfcp_qdio_set_scount(qdio, &req->qdio_req);
retval = zfcp_fsf_req_send(req);
if (unlikely(retval))
@@ -2328,7 +2342,7 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter,
struct zfcp_qdio *qdio = adapter->qdio;
struct zfcp_fsf_req *req = NULL;
struct fsf_qtcb_bottom_support *bottom;
- int retval = -EIO, bytes;
+ int retval = -EIO;
u8 direction;
if (!(adapter->adapter_features & FSF_FEATURE_CFDC))
@@ -2361,13 +2375,17 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter,
bottom->operation_subtype = FSF_CFDC_OPERATION_SUBTYPE;
bottom->option = fsf_cfdc->option;
- bytes = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, fsf_cfdc->sg);
+ retval = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, fsf_cfdc->sg);
- if (bytes != ZFCP_CFDC_MAX_SIZE) {
+ if (retval ||
+ (zfcp_qdio_real_bytes(fsf_cfdc->sg) != ZFCP_CFDC_MAX_SIZE)) {
zfcp_fsf_req_free(req);
+ retval = -EIO;
goto out;
}
- zfcp_qdio_set_sbale_last(adapter->qdio, &req->qdio_req);
+ zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
+ if (zfcp_adapter_multi_buffer_active(adapter))
+ zfcp_qdio_set_scount(qdio, &req->qdio_req);
zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
retval = zfcp_fsf_req_send(req);
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index d9c40ea73ee..e14da5751d3 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -10,11 +10,16 @@
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/slab.h>
+#include <linux/module.h>
#include "zfcp_ext.h"
#include "zfcp_qdio.h"
#define QBUFF_PER_PAGE (PAGE_SIZE / sizeof(struct qdio_buffer))
+static bool enable_multibuffer;
+module_param_named(datarouter, enable_multibuffer, bool, 0400);
+MODULE_PARM_DESC(datarouter, "Enable hardware data router support");
+
static int zfcp_qdio_buffers_enqueue(struct qdio_buffer **sbal)
{
int pos;
@@ -37,8 +42,11 @@ static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *id,
dev_warn(&adapter->ccw_device->dev, "A QDIO problem occurred\n");
- if (qdio_err & QDIO_ERROR_SLSB_STATE)
+ if (qdio_err & QDIO_ERROR_SLSB_STATE) {
zfcp_qdio_siosl(adapter);
+ zfcp_erp_adapter_shutdown(adapter, 0, id);
+ return;
+ }
zfcp_erp_adapter_reopen(adapter,
ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
ZFCP_STATUS_COMMON_ERP_FAILED, id);
@@ -93,9 +101,27 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
unsigned long parm)
{
struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm;
- int sbal_idx, sbal_no;
+ struct zfcp_adapter *adapter = qdio->adapter;
+ struct qdio_buffer_element *sbale;
+ int sbal_no, sbal_idx;
+ void *pl[ZFCP_QDIO_MAX_SBALS_PER_REQ + 1];
+ u64 req_id;
+ u8 scount;
if (unlikely(qdio_err)) {
+ memset(pl, 0, ZFCP_QDIO_MAX_SBALS_PER_REQ * sizeof(void *));
+ if (zfcp_adapter_multi_buffer_active(adapter)) {
+ sbale = qdio->res_q[idx]->element;
+ req_id = (u64) sbale->addr;
+ scount = sbale->scount + 1; /* incl. signaling SBAL */
+
+ for (sbal_no = 0; sbal_no < scount; sbal_no++) {
+ sbal_idx = (idx + sbal_no) %
+ QDIO_MAX_BUFFERS_PER_Q;
+ pl[sbal_no] = qdio->res_q[sbal_idx];
+ }
+ zfcp_dbf_hba_def_err(adapter, req_id, scount, pl);
+ }
zfcp_qdio_handler_error(qdio, "qdires1", qdio_err);
return;
}
@@ -155,7 +181,7 @@ zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
static struct qdio_buffer_element *
zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
{
- if (q_req->sbale_curr == ZFCP_QDIO_LAST_SBALE_PER_SBAL)
+ if (q_req->sbale_curr == qdio->max_sbale_per_sbal - 1)
return zfcp_qdio_sbal_chain(qdio, q_req);
q_req->sbale_curr++;
return zfcp_qdio_sbale_curr(qdio, q_req);
@@ -167,13 +193,12 @@ zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
* @q_req: pointer to struct zfcp_qdio_req
* @sg: scatter-gather list
* @max_sbals: upper bound for number of SBALs to be used
- * Returns: number of bytes, or error (negativ)
+ * Returns: zero or -EINVAL on error
*/
int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
struct scatterlist *sg)
{
struct qdio_buffer_element *sbale;
- int bytes = 0;
/* set storage-block type for this request */
sbale = zfcp_qdio_sbale_req(qdio, q_req);
@@ -187,14 +212,10 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
q_req->sbal_number);
return -EINVAL;
}
-
sbale->addr = sg_virt(sg);
sbale->length = sg->length;
-
- bytes += sg->length;
}
-
- return bytes;
+ return 0;
}
static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio)
@@ -283,6 +304,8 @@ static void zfcp_qdio_setup_init_data(struct qdio_initialize *id,
memcpy(id->adapter_name, dev_name(&id->cdev->dev), 8);
ASCEBC(id->adapter_name, 8);
id->qib_rflags = QIB_RFLAGS_ENABLE_DATA_DIV;
+ if (enable_multibuffer)
+ id->qdr_ac |= QDR_AC_MULTI_BUFFER_ENABLE;
id->no_input_qs = 1;
id->no_output_qs = 1;
id->input_handler = zfcp_qdio_int_resp;
@@ -378,6 +401,17 @@ int zfcp_qdio_open(struct zfcp_qdio *qdio)
atomic_set_mask(ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED,
&qdio->adapter->status);
+ if (ssqd.qdioac2 & CHSC_AC2_MULTI_BUFFER_ENABLED) {
+ atomic_set_mask(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status);
+ qdio->max_sbale_per_sbal = QDIO_MAX_ELEMENTS_PER_BUFFER;
+ } else {
+ atomic_clear_mask(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status);
+ qdio->max_sbale_per_sbal = QDIO_MAX_ELEMENTS_PER_BUFFER - 1;
+ }
+
+ qdio->max_sbale_per_req =
+ ZFCP_QDIO_MAX_SBALS_PER_REQ * qdio->max_sbale_per_sbal
+ - 2;
if (qdio_activate(cdev))
goto failed_qdio;
@@ -397,6 +431,11 @@ int zfcp_qdio_open(struct zfcp_qdio *qdio)
atomic_set(&qdio->req_q_free, QDIO_MAX_BUFFERS_PER_Q);
atomic_set_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status);
+ if (adapter->scsi_host) {
+ adapter->scsi_host->sg_tablesize = qdio->max_sbale_per_req;
+ adapter->scsi_host->max_sectors = qdio->max_sbale_per_req * 8;
+ }
+
return 0;
failed_qdio:
diff --git a/drivers/s390/scsi/zfcp_qdio.h b/drivers/s390/scsi/zfcp_qdio.h
index 54e22ace012..8ac7f5342d2 100644
--- a/drivers/s390/scsi/zfcp_qdio.h
+++ b/drivers/s390/scsi/zfcp_qdio.h
@@ -13,20 +13,9 @@
#define ZFCP_QDIO_SBALE_LEN PAGE_SIZE
-/* DMQ bug workaround: don't use last SBALE */
-#define ZFCP_QDIO_MAX_SBALES_PER_SBAL (QDIO_MAX_ELEMENTS_PER_BUFFER - 1)
-
-/* index of last SBALE (with respect to DMQ bug workaround) */
-#define ZFCP_QDIO_LAST_SBALE_PER_SBAL (ZFCP_QDIO_MAX_SBALES_PER_SBAL - 1)
-
/* Max SBALS for chaining */
#define ZFCP_QDIO_MAX_SBALS_PER_REQ 36
-/* max. number of (data buffer) SBALEs in largest SBAL chain
- * request ID + QTCB in SBALE 0 + 1 of first SBAL in chain */
-#define ZFCP_QDIO_MAX_SBALES_PER_REQ \
- (ZFCP_QDIO_MAX_SBALS_PER_REQ * ZFCP_QDIO_MAX_SBALES_PER_SBAL - 2)
-
/**
* struct zfcp_qdio - basic qdio data structure
* @res_q: response queue
@@ -53,6 +42,8 @@ struct zfcp_qdio {
atomic_t req_q_full;
wait_queue_head_t req_q_wq;
struct zfcp_adapter *adapter;
+ u16 max_sbale_per_sbal;
+ u16 max_sbale_per_req;
};
/**
@@ -155,7 +146,7 @@ void zfcp_qdio_fill_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
{
struct qdio_buffer_element *sbale;
- BUG_ON(q_req->sbale_curr == ZFCP_QDIO_LAST_SBALE_PER_SBAL);
+ BUG_ON(q_req->sbale_curr == qdio->max_sbale_per_sbal - 1);
q_req->sbale_curr++;
sbale = zfcp_qdio_sbale_curr(qdio, q_req);
sbale->addr = data;
@@ -195,9 +186,10 @@ int zfcp_qdio_sg_one_sbale(struct scatterlist *sg)
* @q_req: The current zfcp_qdio_req
*/
static inline
-void zfcp_qdio_skip_to_last_sbale(struct zfcp_qdio_req *q_req)
+void zfcp_qdio_skip_to_last_sbale(struct zfcp_qdio *qdio,
+ struct zfcp_qdio_req *q_req)
{
- q_req->sbale_curr = ZFCP_QDIO_LAST_SBALE_PER_SBAL;
+ q_req->sbale_curr = qdio->max_sbale_per_sbal - 1;
}
/**
@@ -228,8 +220,52 @@ void zfcp_qdio_set_data_div(struct zfcp_qdio *qdio,
{
struct qdio_buffer_element *sbale;
- sbale = &qdio->req_q[q_req->sbal_first]->element[0];
+ sbale = qdio->req_q[q_req->sbal_first]->element;
sbale->length = count;
}
+/**
+ * zfcp_qdio_sbale_count - count sbale used
+ * @sg: pointer to struct scatterlist
+ */
+static inline
+unsigned int zfcp_qdio_sbale_count(struct scatterlist *sg)
+{
+ unsigned int count = 0;
+
+ for (; sg; sg = sg_next(sg))
+ count++;
+
+ return count;
+}
+
+/**
+ * zfcp_qdio_real_bytes - count bytes used
+ * @sg: pointer to struct scatterlist
+ */
+static inline
+unsigned int zfcp_qdio_real_bytes(struct scatterlist *sg)
+{
+ unsigned int real_bytes = 0;
+
+ for (; sg; sg = sg_next(sg))
+ real_bytes += sg->length;
+
+ return real_bytes;
+}
+
+/**
+ * zfcp_qdio_set_scount - set SBAL count value
+ * @qdio: pointer to struct zfcp_qdio
+ * @q_req: The current zfcp_qdio_req
+ */
+static inline
+void zfcp_qdio_set_scount(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
+{
+ struct qdio_buffer_element *sbale;
+
+ sbale = qdio->req_q[q_req->sbal_first]->element;
+ sbale->scount = q_req->sbal_number - 1;
+}
+
#endif /* ZFCP_QDIO_H */
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 7cac873c738..11f07f88822 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -9,6 +9,7 @@
#define KMSG_COMPONENT "zfcp"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#include <linux/module.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <scsi/fc/fc_fcp.h>
@@ -24,11 +25,8 @@ module_param_named(queue_depth, default_depth, uint, 0600);
MODULE_PARM_DESC(queue_depth, "Default queue depth for new SCSI devices");
static bool enable_dif;
-
-#ifdef CONFIG_ZFCP_DIF
-module_param_named(dif, enable_dif, bool, 0600);
+module_param_named(dif, enable_dif, bool, 0400);
MODULE_PARM_DESC(dif, "Enable DIF/DIX data integrity support");
-#endif
static bool allow_lun_scan = 1;
module_param(allow_lun_scan, bool, 0600);
@@ -309,8 +307,8 @@ static struct scsi_host_template zfcp_scsi_host_template = {
.proc_name = "zfcp",
.can_queue = 4096,
.this_id = -1,
- .sg_tablesize = ZFCP_QDIO_MAX_SBALES_PER_REQ,
- .max_sectors = (ZFCP_QDIO_MAX_SBALES_PER_REQ * 8),
+ .sg_tablesize = 1, /* adjusted later */
+ .max_sectors = 8, /* adjusted later */
.dma_boundary = ZFCP_QDIO_SBALE_LEN - 1,
.cmd_per_lun = 1,
.use_clustering = 1,
@@ -668,9 +666,9 @@ void zfcp_scsi_set_prot(struct zfcp_adapter *adapter)
adapter->adapter_features & FSF_FEATURE_DIX_PROT_TCPIP) {
mask |= SHOST_DIX_TYPE1_PROTECTION;
scsi_host_set_guard(shost, SHOST_DIX_GUARD_IP);
- shost->sg_prot_tablesize = ZFCP_QDIO_MAX_SBALES_PER_REQ / 2;
- shost->sg_tablesize = ZFCP_QDIO_MAX_SBALES_PER_REQ / 2;
- shost->max_sectors = ZFCP_QDIO_MAX_SBALES_PER_REQ * 8 / 2;
+ shost->sg_prot_tablesize = adapter->qdio->max_sbale_per_req / 2;
+ shost->sg_tablesize = adapter->qdio->max_sbale_per_req / 2;
+ shost->max_sectors = shost->sg_tablesize * 8;
}
scsi_host_set_prot(shost, mask);
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 3878b739508..06ea3bcfdd2 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -309,6 +309,7 @@ config SCSI_FC_TGT_ATTRS
config SCSI_ISCSI_ATTRS
tristate "iSCSI Transport Attributes"
depends on SCSI && NET
+ select BLK_DEV_BSGLIB
help
If you wish to export transport-specific information about
each attached iSCSI device to sysfs, say Y.
@@ -559,6 +560,15 @@ source "drivers/scsi/aic7xxx/Kconfig.aic79xx"
source "drivers/scsi/aic94xx/Kconfig"
source "drivers/scsi/mvsas/Kconfig"
+config SCSI_MVUMI
+ tristate "Marvell UMI driver"
+ depends on SCSI && PCI
+ help
+ Module for Marvell Universal Message Interface(UMI) driver
+
+ To compile this driver as a module, choose M here: the
+ module will be called mvumi.
+
config SCSI_DPT_I2O
tristate "Adaptec I2O RAID support "
depends on SCSI && PCI && VIRT_TO_BUS
@@ -607,20 +617,6 @@ config SCSI_ARCMSR
To compile this driver as a module, choose M here: the
module will be called arcmsr (modprobe arcmsr).
-config SCSI_ARCMSR_AER
- bool "Enable PCI Error Recovery Capability in Areca Driver(ARCMSR)"
- depends on SCSI_ARCMSR && PCIEAER
- default n
- help
- The advanced error reporting(AER) capability is "NOT" provided by
- ARC1200/1201/1202 SATA RAID controllers cards.
- If your card is one of ARC1200/1201/1202, please use the default setting, n.
- If your card is other models, you could pick it
- on condition that the kernel version is greater than 2.6.19.
- This function is maintained driver by Nick Cheng. If you have any
- problems or suggestion, you are welcome to contact with <nick.cheng@areca.com.tw>.
- To enable this function, choose Y here.
-
source "drivers/scsi/megaraid/Kconfig.megaraid"
source "drivers/scsi/mpt2sas/Kconfig"
@@ -1872,10 +1868,6 @@ config ZFCP
called zfcp. If you want to compile it as a module, say M here
and read <file:Documentation/kbuild/modules.txt>.
-config ZFCP_DIF
- tristate "T10 DIF/DIX support for the zfcp driver (EXPERIMENTAL)"
- depends on ZFCP && EXPERIMENTAL
-
config SCSI_PMCRAID
tristate "PMC SIERRA Linux MaxRAID adapter support"
depends on PCI && SCSI && NET
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 6153a66a8a3..2b887498be5 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -134,6 +134,7 @@ obj-$(CONFIG_SCSI_IBMVFC) += ibmvscsi/
obj-$(CONFIG_SCSI_HPTIOP) += hptiop.o
obj-$(CONFIG_SCSI_STEX) += stex.o
obj-$(CONFIG_SCSI_MVSAS) += mvsas/
+obj-$(CONFIG_SCSI_MVUMI) += mvumi.o
obj-$(CONFIG_PS3_ROM) += ps3rom.o
obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libiscsi.o libiscsi_tcp.o cxgbi/
obj-$(CONFIG_SCSI_CXGB4_ISCSI) += libiscsi.o libiscsi_tcp.o cxgbi/
diff --git a/drivers/scsi/a2091.c b/drivers/scsi/a2091.c
index 1bb5d3f0e26..79a30633d4a 100644
--- a/drivers/scsi/a2091.c
+++ b/drivers/scsi/a2091.c
@@ -5,6 +5,7 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/zorro.h>
+#include <linux/module.h>
#include <asm/page.h>
#include <asm/pgtable.h>
diff --git a/drivers/scsi/a3000.c b/drivers/scsi/a3000.c
index d9468027fb6..e29fe0e708f 100644
--- a/drivers/scsi/a3000.c
+++ b/drivers/scsi/a3000.c
@@ -6,6 +6,7 @@
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
+#include <linux/module.h>
#include <asm/page.h>
#include <asm/pgtable.h>
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index 06199574144..409f5805bdd 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -34,6 +34,7 @@
#include <linux/blkdev.h>
#include <asm/uaccess.h>
#include <linux/highmem.h> /* For flush_kernel_dcache_page */
+#include <linux/module.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 3382475dc22..4aa76d6f11d 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -894,16 +894,17 @@ static ssize_t aac_show_serial_number(struct device *device,
int len = 0;
if (le32_to_cpu(dev->adapter_info.serial[0]) != 0xBAD0)
- len = snprintf(buf, PAGE_SIZE, "%06X\n",
+ len = snprintf(buf, 16, "%06X\n",
le32_to_cpu(dev->adapter_info.serial[0]));
if (len &&
!memcmp(&dev->supplement_adapter_info.MfgPcbaSerialNo[
sizeof(dev->supplement_adapter_info.MfgPcbaSerialNo)-len],
buf, len-1))
- len = snprintf(buf, PAGE_SIZE, "%.*s\n",
+ len = snprintf(buf, 16, "%.*s\n",
(int)sizeof(dev->supplement_adapter_info.MfgPcbaSerialNo),
dev->supplement_adapter_info.MfgPcbaSerialNo);
- return len;
+
+ return min(len, 16);
}
static ssize_t aac_show_max_channel(struct device *device,
diff --git a/drivers/scsi/aic94xx/aic94xx_scb.c b/drivers/scsi/aic94xx/aic94xx_scb.c
index 29593275201..fdac7c2fef3 100644
--- a/drivers/scsi/aic94xx/aic94xx_scb.c
+++ b/drivers/scsi/aic94xx/aic94xx_scb.c
@@ -906,6 +906,7 @@ int asd_control_phy(struct asd_sas_phy *phy, enum phy_func func, void *arg)
switch (func) {
case PHY_FUNC_CLEAR_ERROR_LOG:
+ case PHY_FUNC_GET_EVENTS:
return -ENOSYS;
case PHY_FUNC_SET_LINK_RATE:
rates = arg;
diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c
index b8a82f2c62c..cdb15364bc6 100644
--- a/drivers/scsi/be2iscsi/be_cmds.c
+++ b/drivers/scsi/be2iscsi/be_cmds.c
@@ -660,6 +660,7 @@ int beiscsi_cmd_mccq_create(struct beiscsi_hba *phba,
spin_lock(&phba->ctrl.mbox_lock);
ctrl = &phba->ctrl;
wrb = wrb_from_mbox(&ctrl->mbox_mem);
+ memset(wrb, 0, sizeof(*wrb));
req = embedded_payload(wrb);
ctxt = &req->context;
@@ -868,3 +869,22 @@ error:
beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL);
return status;
}
+
+int beiscsi_cmd_reset_function(struct beiscsi_hba *phba)
+{
+ struct be_ctrl_info *ctrl = &phba->ctrl;
+ struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
+ struct be_post_sgl_pages_req *req = embedded_payload(wrb);
+ int status;
+
+ spin_lock(&ctrl->mbox_lock);
+
+ req = embedded_payload(wrb);
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_FUNCTION_RESET, sizeof(*req));
+ status = be_mbox_notify_wait(phba);
+
+ spin_unlock(&ctrl->mbox_lock);
+ return status;
+}
diff --git a/drivers/scsi/be2iscsi/be_cmds.h b/drivers/scsi/be2iscsi/be_cmds.h
index 497eb29e5c9..8b40a5b4366 100644
--- a/drivers/scsi/be2iscsi/be_cmds.h
+++ b/drivers/scsi/be2iscsi/be_cmds.h
@@ -561,6 +561,8 @@ int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl,
struct be_dma_mem *q_mem, u32 page_offset,
u32 num_pages);
+int beiscsi_cmd_reset_function(struct beiscsi_hba *phba);
+
int be_cmd_wrbq_create(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem,
struct be_queue_info *wrbq);
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
index 3cad1060502..8b002f6db6c 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.c
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -177,9 +177,8 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session,
{
struct iscsi_conn *conn = cls_conn->dd_data;
struct beiscsi_conn *beiscsi_conn = conn->dd_data;
- struct Scsi_Host *shost =
- (struct Scsi_Host *)iscsi_session_to_shost(cls_session);
- struct beiscsi_hba *phba = (struct beiscsi_hba *)iscsi_host_priv(shost);
+ struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+ struct beiscsi_hba *phba = iscsi_host_priv(shost);
struct beiscsi_endpoint *beiscsi_ep;
struct iscsi_endpoint *ep;
@@ -290,7 +289,7 @@ int beiscsi_set_param(struct iscsi_cls_conn *cls_conn,
int beiscsi_get_host_param(struct Scsi_Host *shost,
enum iscsi_host_param param, char *buf)
{
- struct beiscsi_hba *phba = (struct beiscsi_hba *)iscsi_host_priv(shost);
+ struct beiscsi_hba *phba = iscsi_host_priv(shost);
int status = 0;
SE_DEBUG(DBG_LVL_8, "In beiscsi_get_host_param, param= %d\n", param);
@@ -733,3 +732,56 @@ void beiscsi_ep_disconnect(struct iscsi_endpoint *ep)
beiscsi_unbind_conn_to_cid(phba, beiscsi_ep->ep_cid);
iscsi_destroy_endpoint(beiscsi_ep->openiscsi_ep);
}
+
+mode_t be2iscsi_attr_is_visible(int param_type, int param)
+{
+ switch (param_type) {
+ case ISCSI_HOST_PARAM:
+ switch (param) {
+ case ISCSI_HOST_PARAM_HWADDRESS:
+ case ISCSI_HOST_PARAM_IPADDRESS:
+ case ISCSI_HOST_PARAM_INITIATOR_NAME:
+ return S_IRUGO;
+ default:
+ return 0;
+ }
+ case ISCSI_PARAM:
+ switch (param) {
+ case ISCSI_PARAM_MAX_RECV_DLENGTH:
+ case ISCSI_PARAM_MAX_XMIT_DLENGTH:
+ case ISCSI_PARAM_HDRDGST_EN:
+ case ISCSI_PARAM_DATADGST_EN:
+ case ISCSI_PARAM_CONN_ADDRESS:
+ case ISCSI_PARAM_CONN_PORT:
+ case ISCSI_PARAM_EXP_STATSN:
+ case ISCSI_PARAM_PERSISTENT_ADDRESS:
+ case ISCSI_PARAM_PERSISTENT_PORT:
+ case ISCSI_PARAM_PING_TMO:
+ case ISCSI_PARAM_RECV_TMO:
+ case ISCSI_PARAM_INITIAL_R2T_EN:
+ case ISCSI_PARAM_MAX_R2T:
+ case ISCSI_PARAM_IMM_DATA_EN:
+ case ISCSI_PARAM_FIRST_BURST:
+ case ISCSI_PARAM_MAX_BURST:
+ case ISCSI_PARAM_PDU_INORDER_EN:
+ case ISCSI_PARAM_DATASEQ_INORDER_EN:
+ case ISCSI_PARAM_ERL:
+ case ISCSI_PARAM_TARGET_NAME:
+ case ISCSI_PARAM_TPGT:
+ case ISCSI_PARAM_USERNAME:
+ case ISCSI_PARAM_PASSWORD:
+ case ISCSI_PARAM_USERNAME_IN:
+ case ISCSI_PARAM_PASSWORD_IN:
+ case ISCSI_PARAM_FAST_ABORT:
+ case ISCSI_PARAM_ABORT_TMO:
+ case ISCSI_PARAM_LU_RESET_TMO:
+ case ISCSI_PARAM_IFACE_NAME:
+ case ISCSI_PARAM_INITIATOR_NAME:
+ return S_IRUGO;
+ default:
+ return 0;
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/scsi/be2iscsi/be_iscsi.h b/drivers/scsi/be2iscsi/be_iscsi.h
index ff60b7fd92d..4a1f2e393f3 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.h
+++ b/drivers/scsi/be2iscsi/be_iscsi.h
@@ -26,6 +26,8 @@
#define BE2_IPV4 0x1
#define BE2_IPV6 0x10
+mode_t be2iscsi_attr_is_visible(int param_type, int param);
+
void beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
struct beiscsi_offload_params *params);
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 0a9bdfa3d93..379c696dac1 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -27,6 +27,7 @@
#include <linux/kernel.h>
#include <linux/semaphore.h>
#include <linux/iscsi_boot_sysfs.h>
+#include <linux/module.h>
#include <scsi/libiscsi.h>
#include <scsi/scsi_transport_iscsi.h>
@@ -822,33 +823,47 @@ static int beiscsi_init_irqs(struct beiscsi_hba *phba)
struct hwi_controller *phwi_ctrlr;
struct hwi_context_memory *phwi_context;
int ret, msix_vec, i, j;
- char desc[32];
phwi_ctrlr = phba->phwi_ctrlr;
phwi_context = phwi_ctrlr->phwi_ctxt;
if (phba->msix_enabled) {
for (i = 0; i < phba->num_cpus; i++) {
- sprintf(desc, "beiscsi_msix_%04x", i);
+ phba->msi_name[i] = kzalloc(BEISCSI_MSI_NAME,
+ GFP_KERNEL);
+ if (!phba->msi_name[i]) {
+ ret = -ENOMEM;
+ goto free_msix_irqs;
+ }
+
+ sprintf(phba->msi_name[i], "beiscsi_%02x_%02x",
+ phba->shost->host_no, i);
msix_vec = phba->msix_entries[i].vector;
- ret = request_irq(msix_vec, be_isr_msix, 0, desc,
+ ret = request_irq(msix_vec, be_isr_msix, 0,
+ phba->msi_name[i],
&phwi_context->be_eq[i]);
if (ret) {
shost_printk(KERN_ERR, phba->shost,
"beiscsi_init_irqs-Failed to"
"register msix for i = %d\n", i);
- if (!i)
- return ret;
+ kfree(phba->msi_name[i]);
goto free_msix_irqs;
}
}
+ phba->msi_name[i] = kzalloc(BEISCSI_MSI_NAME, GFP_KERNEL);
+ if (!phba->msi_name[i]) {
+ ret = -ENOMEM;
+ goto free_msix_irqs;
+ }
+ sprintf(phba->msi_name[i], "beiscsi_mcc_%02x",
+ phba->shost->host_no);
msix_vec = phba->msix_entries[i].vector;
- ret = request_irq(msix_vec, be_isr_mcc, 0, "beiscsi_msix_mcc",
+ ret = request_irq(msix_vec, be_isr_mcc, 0, phba->msi_name[i],
&phwi_context->be_eq[i]);
if (ret) {
shost_printk(KERN_ERR, phba->shost, "beiscsi_init_irqs-"
"Failed to register beiscsi_msix_mcc\n");
- i++;
+ kfree(phba->msi_name[i]);
goto free_msix_irqs;
}
@@ -863,8 +878,11 @@ static int beiscsi_init_irqs(struct beiscsi_hba *phba)
}
return 0;
free_msix_irqs:
- for (j = i - 1; j == 0; j++)
+ for (j = i - 1; j >= 0; j--) {
+ kfree(phba->msi_name[j]);
+ msix_vec = phba->msix_entries[j].vector;
free_irq(msix_vec, &phwi_context->be_eq[j]);
+ }
return ret;
}
@@ -1106,7 +1124,12 @@ be_complete_io(struct beiscsi_conn *beiscsi_conn,
& SOL_STS_MASK) >> 8);
flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
& SOL_FLAGS_MASK) >> 24) | 0x80;
+ if (!task->sc) {
+ if (io_task->scsi_cmnd)
+ scsi_dma_unmap(io_task->scsi_cmnd);
+ return;
+ }
task->sc->result = (DID_OK << 16) | status;
if (rsp != ISCSI_STATUS_CMD_COMPLETED) {
task->sc->result = DID_ERROR << 16;
@@ -4027,11 +4050,11 @@ static int beiscsi_mtask(struct iscsi_task *task)
TGT_DM_CMD);
AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt,
pwrb, 0);
- AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
+ AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 1);
} else {
AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
INI_RD_CMD);
- AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 1);
+ AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
}
hwi_write_buffer(pwrb, task);
break;
@@ -4102,9 +4125,8 @@ static int beiscsi_task_xmit(struct iscsi_task *task)
return beiscsi_iotask(task, sg, num_sg, xferlen, writedir);
}
-static void beiscsi_remove(struct pci_dev *pcidev)
+static void beiscsi_quiesce(struct beiscsi_hba *phba)
{
- struct beiscsi_hba *phba = NULL;
struct hwi_controller *phwi_ctrlr;
struct hwi_context_memory *phwi_context;
struct be_eq_obj *pbe_eq;
@@ -4112,12 +4134,6 @@ static void beiscsi_remove(struct pci_dev *pcidev)
u8 *real_offset = 0;
u32 value = 0;
- phba = (struct beiscsi_hba *)pci_get_drvdata(pcidev);
- if (!phba) {
- dev_err(&pcidev->dev, "beiscsi_remove called with no phba\n");
- return;
- }
-
phwi_ctrlr = phba->phwi_ctrlr;
phwi_context = phwi_ctrlr->phwi_ctxt;
hwi_disable_intr(phba);
@@ -4125,6 +4141,7 @@ static void beiscsi_remove(struct pci_dev *pcidev)
for (i = 0; i <= phba->num_cpus; i++) {
msix_vec = phba->msix_entries[i].vector;
free_irq(msix_vec, &phwi_context->be_eq[i]);
+ kfree(phba->msi_name[i]);
}
} else
if (phba->pcidev->irq)
@@ -4152,10 +4169,40 @@ static void beiscsi_remove(struct pci_dev *pcidev)
phba->ctrl.mbox_mem_alloced.size,
phba->ctrl.mbox_mem_alloced.va,
phba->ctrl.mbox_mem_alloced.dma);
+}
+
+static void beiscsi_remove(struct pci_dev *pcidev)
+{
+
+ struct beiscsi_hba *phba = NULL;
+
+ phba = pci_get_drvdata(pcidev);
+ if (!phba) {
+ dev_err(&pcidev->dev, "beiscsi_remove called with no phba\n");
+ return;
+ }
+
+ beiscsi_quiesce(phba);
iscsi_boot_destroy_kset(phba->boot_kset);
iscsi_host_remove(phba->shost);
pci_dev_put(phba->pcidev);
iscsi_host_free(phba->shost);
+ pci_disable_device(pcidev);
+}
+
+static void beiscsi_shutdown(struct pci_dev *pcidev)
+{
+
+ struct beiscsi_hba *phba = NULL;
+
+ phba = (struct beiscsi_hba *)pci_get_drvdata(pcidev);
+ if (!phba) {
+ dev_err(&pcidev->dev, "beiscsi_shutdown called with no phba\n");
+ return;
+ }
+
+ beiscsi_quiesce(phba);
+ pci_disable_device(pcidev);
}
static void beiscsi_msix_enable(struct beiscsi_hba *phba)
@@ -4235,7 +4282,7 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
gcrashmode++;
shost_printk(KERN_ERR, phba->shost,
"Loading Driver in crashdump mode\n");
- ret = beiscsi_pci_soft_reset(phba);
+ ret = beiscsi_cmd_reset_function(phba);
if (ret) {
shost_printk(KERN_ERR, phba->shost,
"Reset Failed. Aborting Crashdump\n");
@@ -4364,37 +4411,12 @@ struct iscsi_transport beiscsi_iscsi_transport = {
.name = DRV_NAME,
.caps = CAP_RECOVERY_L0 | CAP_HDRDGST | CAP_TEXT_NEGO |
CAP_MULTI_R2T | CAP_DATADGST | CAP_DATA_PATH_OFFLOAD,
- .param_mask = ISCSI_MAX_RECV_DLENGTH |
- ISCSI_MAX_XMIT_DLENGTH |
- ISCSI_HDRDGST_EN |
- ISCSI_DATADGST_EN |
- ISCSI_INITIAL_R2T_EN |
- ISCSI_MAX_R2T |
- ISCSI_IMM_DATA_EN |
- ISCSI_FIRST_BURST |
- ISCSI_MAX_BURST |
- ISCSI_PDU_INORDER_EN |
- ISCSI_DATASEQ_INORDER_EN |
- ISCSI_ERL |
- ISCSI_CONN_PORT |
- ISCSI_CONN_ADDRESS |
- ISCSI_EXP_STATSN |
- ISCSI_PERSISTENT_PORT |
- ISCSI_PERSISTENT_ADDRESS |
- ISCSI_TARGET_NAME | ISCSI_TPGT |
- ISCSI_USERNAME | ISCSI_PASSWORD |
- ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
- ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
- ISCSI_LU_RESET_TMO |
- ISCSI_PING_TMO | ISCSI_RECV_TMO |
- ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
- .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS |
- ISCSI_HOST_INITIATOR_NAME,
.create_session = beiscsi_session_create,
.destroy_session = beiscsi_session_destroy,
.create_conn = beiscsi_conn_create,
.bind_conn = beiscsi_conn_bind,
.destroy_conn = iscsi_conn_teardown,
+ .attr_is_visible = be2iscsi_attr_is_visible,
.set_param = beiscsi_set_param,
.get_conn_param = iscsi_conn_get_param,
.get_session_param = iscsi_session_get_param,
@@ -4418,6 +4440,7 @@ static struct pci_driver beiscsi_pci_driver = {
.name = DRV_NAME,
.probe = beiscsi_dev_probe,
.remove = beiscsi_remove,
+ .shutdown = beiscsi_shutdown,
.id_table = beiscsi_pci_id_table
};
diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h
index 5ce5170254c..b4a06d5e5f9 100644
--- a/drivers/scsi/be2iscsi/be_main.h
+++ b/drivers/scsi/be2iscsi/be_main.h
@@ -34,7 +34,7 @@
#include "be.h"
#define DRV_NAME "be2iscsi"
-#define BUILD_STR "2.103.298.0"
+#define BUILD_STR "4.1.239.0"
#define BE_NAME "ServerEngines BladeEngine2" \
"Linux iSCSI Driver version" BUILD_STR
#define DRV_DESC BE_NAME " " "Driver"
@@ -162,6 +162,8 @@ do { \
#define PAGES_REQUIRED(x) \
((x < PAGE_SIZE) ? 1 : ((x + PAGE_SIZE - 1) / PAGE_SIZE))
+#define BEISCSI_MSI_NAME 20 /* size of msi_name string */
+
enum be_mem_enum {
HWI_MEM_ADDN_CONTEXT,
HWI_MEM_WRB,
@@ -287,6 +289,7 @@ struct beiscsi_hba {
unsigned int num_cpus;
unsigned int nxt_cqid;
struct msix_entry msix_entries[MAX_CPUS + 1];
+ char *msi_name[MAX_CPUS + 1];
bool msix_enabled;
struct be_mem_descriptor *init_mem;
diff --git a/drivers/scsi/bfa/bfad_debugfs.c b/drivers/scsi/bfa/bfad_debugfs.c
index b412e0300dd..dee1a094c2c 100644
--- a/drivers/scsi/bfa/bfad_debugfs.c
+++ b/drivers/scsi/bfa/bfad_debugfs.c
@@ -16,6 +16,7 @@
*/
#include <linux/debugfs.h>
+#include <linux/export.h>
#include "bfad_drv.h"
#include "bfad_im.h"
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index 01312381639..e5db649e8eb 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -19,6 +19,8 @@
* bfad_im.c Linux driver IM module.
*/
+#include <linux/export.h>
+
#include "bfad_drv.h"
#include "bfad_im.h"
#include "bfa_fcs.h"
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
index dd335a2a797..049ea907e04 100644
--- a/drivers/scsi/bnx2fc/bnx2fc.h
+++ b/drivers/scsi/bnx2fc/bnx2fc.h
@@ -62,7 +62,7 @@
#include "bnx2fc_constants.h"
#define BNX2FC_NAME "bnx2fc"
-#define BNX2FC_VERSION "1.0.4"
+#define BNX2FC_VERSION "1.0.9"
#define PFX "bnx2fc: "
@@ -145,6 +145,9 @@
#define REC_RETRY_COUNT 1
#define BNX2FC_NUM_ERR_BITS 63
+#define BNX2FC_RELOGIN_WAIT_TIME 200
+#define BNX2FC_RELOGIN_WAIT_CNT 10
+
/* bnx2fc driver uses only one instance of fcoe_percpu_s */
extern struct fcoe_percpu_s bnx2fc_global;
@@ -224,6 +227,7 @@ struct bnx2fc_interface {
struct fcoe_ctlr ctlr;
u8 vlan_enabled;
int vlan_id;
+ bool enabled;
};
#define bnx2fc_from_ctlr(fip) container_of(fip, struct bnx2fc_interface, ctlr)
diff --git a/drivers/scsi/bnx2fc/bnx2fc_els.c b/drivers/scsi/bnx2fc/bnx2fc_els.c
index d66dcbd0df1..ce0ce3e32f3 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_els.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_els.c
@@ -268,17 +268,6 @@ void bnx2fc_srr_compl(struct bnx2fc_els_cb_arg *cb_arg)
orig_io_req = cb_arg->aborted_io_req;
srr_req = cb_arg->io_req;
- if (test_bit(BNX2FC_FLAG_IO_COMPL, &orig_io_req->req_flags)) {
- BNX2FC_IO_DBG(srr_req, "srr_compl: xid - 0x%x completed",
- orig_io_req->xid);
- goto srr_compl_done;
- }
- if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &orig_io_req->req_flags)) {
- BNX2FC_IO_DBG(srr_req, "rec abts in prog "
- "orig_io - 0x%x\n",
- orig_io_req->xid);
- goto srr_compl_done;
- }
if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &srr_req->req_flags)) {
/* SRR timedout */
BNX2FC_IO_DBG(srr_req, "srr timed out, abort "
@@ -290,6 +279,12 @@ void bnx2fc_srr_compl(struct bnx2fc_els_cb_arg *cb_arg)
"failed. issue cleanup\n");
bnx2fc_initiate_cleanup(srr_req);
}
+ if (test_bit(BNX2FC_FLAG_IO_COMPL, &orig_io_req->req_flags) ||
+ test_bit(BNX2FC_FLAG_ISSUE_ABTS, &orig_io_req->req_flags)) {
+ BNX2FC_IO_DBG(srr_req, "srr_compl:xid 0x%x flags = %lx",
+ orig_io_req->xid, orig_io_req->req_flags);
+ goto srr_compl_done;
+ }
orig_io_req->srr_retry++;
if (orig_io_req->srr_retry <= SRR_RETRY_COUNT) {
struct bnx2fc_rport *tgt = orig_io_req->tgt;
@@ -311,6 +306,12 @@ void bnx2fc_srr_compl(struct bnx2fc_els_cb_arg *cb_arg)
}
goto srr_compl_done;
}
+ if (test_bit(BNX2FC_FLAG_IO_COMPL, &orig_io_req->req_flags) ||
+ test_bit(BNX2FC_FLAG_ISSUE_ABTS, &orig_io_req->req_flags)) {
+ BNX2FC_IO_DBG(srr_req, "srr_compl:xid - 0x%x flags = %lx",
+ orig_io_req->xid, orig_io_req->req_flags);
+ goto srr_compl_done;
+ }
mp_req = &(srr_req->mp_req);
fc_hdr = &(mp_req->resp_fc_hdr);
resp_len = mp_req->resp_len;
@@ -391,18 +392,6 @@ void bnx2fc_rec_compl(struct bnx2fc_els_cb_arg *cb_arg)
BNX2FC_IO_DBG(rec_req, "rec_compl: orig xid = 0x%x", orig_io_req->xid);
tgt = orig_io_req->tgt;
- if (test_bit(BNX2FC_FLAG_IO_COMPL, &orig_io_req->req_flags)) {
- BNX2FC_IO_DBG(rec_req, "completed"
- "orig_io - 0x%x\n",
- orig_io_req->xid);
- goto rec_compl_done;
- }
- if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &orig_io_req->req_flags)) {
- BNX2FC_IO_DBG(rec_req, "abts in prog "
- "orig_io - 0x%x\n",
- orig_io_req->xid);
- goto rec_compl_done;
- }
/* Handle REC timeout case */
if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &rec_req->req_flags)) {
BNX2FC_IO_DBG(rec_req, "timed out, abort "
@@ -433,6 +422,20 @@ void bnx2fc_rec_compl(struct bnx2fc_els_cb_arg *cb_arg)
}
goto rec_compl_done;
}
+
+ if (test_bit(BNX2FC_FLAG_IO_COMPL, &orig_io_req->req_flags)) {
+ BNX2FC_IO_DBG(rec_req, "completed"
+ "orig_io - 0x%x\n",
+ orig_io_req->xid);
+ goto rec_compl_done;
+ }
+ if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &orig_io_req->req_flags)) {
+ BNX2FC_IO_DBG(rec_req, "abts in prog "
+ "orig_io - 0x%x\n",
+ orig_io_req->xid);
+ goto rec_compl_done;
+ }
+
mp_req = &(rec_req->mp_req);
fc_hdr = &(mp_req->resp_fc_hdr);
resp_len = mp_req->resp_len;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index 820a1840c3f..8c6156a10d9 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -22,7 +22,7 @@ DEFINE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu);
#define DRV_MODULE_NAME "bnx2fc"
#define DRV_MODULE_VERSION BNX2FC_VERSION
-#define DRV_MODULE_RELDATE "Jun 23, 2011"
+#define DRV_MODULE_RELDATE "Oct 21, 2011"
static char version[] __devinitdata =
@@ -56,6 +56,7 @@ static struct scsi_host_template bnx2fc_shost_template;
static struct fc_function_template bnx2fc_transport_function;
static struct fc_function_template bnx2fc_vport_xport_function;
static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode);
+static void __bnx2fc_destroy(struct bnx2fc_interface *interface);
static int bnx2fc_destroy(struct net_device *net_device);
static int bnx2fc_enable(struct net_device *netdev);
static int bnx2fc_disable(struct net_device *netdev);
@@ -64,7 +65,6 @@ static void bnx2fc_recv_frame(struct sk_buff *skb);
static void bnx2fc_start_disc(struct bnx2fc_interface *interface);
static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev);
-static int bnx2fc_net_config(struct fc_lport *lp);
static int bnx2fc_lport_config(struct fc_lport *lport);
static int bnx2fc_em_config(struct fc_lport *lport);
static int bnx2fc_bind_adapter_devices(struct bnx2fc_hba *hba);
@@ -78,6 +78,7 @@ static void bnx2fc_destroy_work(struct work_struct *work);
static struct bnx2fc_hba *bnx2fc_hba_lookup(struct net_device *phys_dev);
static struct bnx2fc_interface *bnx2fc_interface_lookup(struct net_device
*phys_dev);
+static inline void bnx2fc_interface_put(struct bnx2fc_interface *interface);
static struct bnx2fc_hba *bnx2fc_find_hba_for_cnic(struct cnic_dev *cnic);
static int bnx2fc_fw_init(struct bnx2fc_hba *hba);
@@ -98,6 +99,25 @@ static struct notifier_block bnx2fc_cpu_notifier = {
.notifier_call = bnx2fc_cpu_callback,
};
+static inline struct net_device *bnx2fc_netdev(const struct fc_lport *lport)
+{
+ return ((struct bnx2fc_interface *)
+ ((struct fcoe_port *)lport_priv(lport))->priv)->netdev;
+}
+
+/**
+ * bnx2fc_get_lesb() - Fill the FCoE Link Error Status Block
+ * @lport: the local port
+ * @fc_lesb: the link error status block
+ */
+static void bnx2fc_get_lesb(struct fc_lport *lport,
+ struct fc_els_lesb *fc_lesb)
+{
+ struct net_device *netdev = bnx2fc_netdev(lport);
+
+ __fcoe_get_lesb(lport, fc_lesb, netdev);
+}
+
static void bnx2fc_clean_rx_queue(struct fc_lport *lp)
{
struct fcoe_percpu_s *bg;
@@ -545,6 +565,14 @@ static void bnx2fc_recv_frame(struct sk_buff *skb)
break;
}
}
+
+ if (fh->fh_r_ctl == FC_RCTL_BA_ABTS) {
+ /* Drop incoming ABTS */
+ put_cpu();
+ kfree_skb(skb);
+ return;
+ }
+
if (le32_to_cpu(fr_crc(fp)) !=
~crc32(~0, skb->data, fr_len)) {
if (stats->InvalidCRCCount < 5)
@@ -727,7 +755,7 @@ void bnx2fc_get_link_state(struct bnx2fc_hba *hba)
clear_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state);
}
-static int bnx2fc_net_config(struct fc_lport *lport)
+static int bnx2fc_net_config(struct fc_lport *lport, struct net_device *netdev)
{
struct bnx2fc_hba *hba;
struct bnx2fc_interface *interface;
@@ -753,11 +781,16 @@ static int bnx2fc_net_config(struct fc_lport *lport)
bnx2fc_link_speed_update(lport);
if (!lport->vport) {
- wwnn = fcoe_wwn_from_mac(interface->ctlr.ctl_src_addr, 1, 0);
+ if (fcoe_get_wwn(netdev, &wwnn, NETDEV_FCOE_WWNN))
+ wwnn = fcoe_wwn_from_mac(interface->ctlr.ctl_src_addr,
+ 1, 0);
BNX2FC_HBA_DBG(lport, "WWNN = 0x%llx\n", wwnn);
fc_set_wwnn(lport, wwnn);
- wwpn = fcoe_wwn_from_mac(interface->ctlr.ctl_src_addr, 2, 0);
+ if (fcoe_get_wwn(netdev, &wwpn, NETDEV_FCOE_WWPN))
+ wwpn = fcoe_wwn_from_mac(interface->ctlr.ctl_src_addr,
+ 2, 0);
+
BNX2FC_HBA_DBG(lport, "WWPN = 0x%llx\n", wwpn);
fc_set_wwpn(lport, wwpn);
}
@@ -769,8 +802,8 @@ static void bnx2fc_destroy_timer(unsigned long data)
{
struct bnx2fc_hba *hba = (struct bnx2fc_hba *)data;
- BNX2FC_MISC_DBG("ERROR:bnx2fc_destroy_timer - "
- "Destroy compl not received!!\n");
+ printk(KERN_ERR PFX "ERROR:bnx2fc_destroy_timer - "
+ "Destroy compl not received!!\n");
set_bit(BNX2FC_FLAG_DESTROY_CMPL, &hba->flags);
wake_up_interruptible(&hba->destroy_wait);
}
@@ -783,7 +816,7 @@ static void bnx2fc_destroy_timer(unsigned long data)
* @vlan_id: vlan id - associated vlan id with this event
*
* Handles NETDEV_UP, NETDEV_DOWN, NETDEV_GOING_DOWN,NETDEV_CHANGE and
- * NETDEV_CHANGE_MTU events
+ * NETDEV_CHANGE_MTU events. Handle NETDEV_UNREGISTER only for vlans.
*/
static void bnx2fc_indicate_netevent(void *context, unsigned long event,
u16 vlan_id)
@@ -791,12 +824,11 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
struct bnx2fc_hba *hba = (struct bnx2fc_hba *)context;
struct fc_lport *lport;
struct fc_lport *vport;
- struct bnx2fc_interface *interface;
+ struct bnx2fc_interface *interface, *tmp;
int wait_for_upload = 0;
u32 link_possible = 1;
- /* Ignore vlans for now */
- if (vlan_id != 0)
+ if (vlan_id != 0 && event != NETDEV_UNREGISTER)
return;
switch (event) {
@@ -820,6 +852,18 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
case NETDEV_CHANGE:
break;
+ case NETDEV_UNREGISTER:
+ if (!vlan_id)
+ return;
+ mutex_lock(&bnx2fc_dev_lock);
+ list_for_each_entry_safe(interface, tmp, &if_list, list) {
+ if (interface->hba == hba &&
+ interface->vlan_id == (vlan_id & VLAN_VID_MASK))
+ __bnx2fc_destroy(interface);
+ }
+ mutex_unlock(&bnx2fc_dev_lock);
+ return;
+
default:
printk(KERN_ERR PFX "Unkonwn netevent %ld", event);
return;
@@ -838,8 +882,15 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
bnx2fc_link_speed_update(lport);
if (link_possible && !bnx2fc_link_ok(lport)) {
- printk(KERN_ERR "indicate_netevent: ctlr_link_up\n");
- fcoe_ctlr_link_up(&interface->ctlr);
+ /* Reset max recv frame size to default */
+ fc_set_mfs(lport, BNX2FC_MFS);
+ /*
+ * ctlr link up will only be handled during
+ * enable to avoid sending discovery solicitation
+ * on a stale vlan
+ */
+ if (interface->enabled)
+ fcoe_ctlr_link_up(&interface->ctlr);
} else if (fcoe_ctlr_link_down(&interface->ctlr)) {
mutex_lock(&lport->lp_mutex);
list_for_each_entry(vport, &lport->vports, list)
@@ -995,6 +1046,17 @@ static int bnx2fc_vport_create(struct fc_vport *vport, bool disabled)
struct bnx2fc_interface *interface = port->priv;
struct net_device *netdev = interface->netdev;
struct fc_lport *vn_port;
+ int rc;
+ char buf[32];
+
+ rc = fcoe_validate_vport_create(vport);
+ if (rc) {
+ fcoe_wwn_to_str(vport->port_name, buf, sizeof(buf));
+ printk(KERN_ERR PFX "Failed to create vport, "
+ "WWPN (0x%s) already exists\n",
+ buf);
+ return rc;
+ }
if (!test_bit(BNX2FC_FLAG_FW_INIT_DONE, &interface->hba->flags)) {
printk(KERN_ERR PFX "vn ports cannot be created on"
@@ -1024,16 +1086,46 @@ static int bnx2fc_vport_create(struct fc_vport *vport, bool disabled)
return 0;
}
+static void bnx2fc_free_vport(struct bnx2fc_hba *hba, struct fc_lport *lport)
+{
+ struct bnx2fc_lport *blport, *tmp;
+
+ spin_lock_bh(&hba->hba_lock);
+ list_for_each_entry_safe(blport, tmp, &hba->vports, list) {
+ if (blport->lport == lport) {
+ list_del(&blport->list);
+ kfree(blport);
+ }
+ }
+ spin_unlock_bh(&hba->hba_lock);
+}
+
static int bnx2fc_vport_destroy(struct fc_vport *vport)
{
struct Scsi_Host *shost = vport_to_shost(vport);
struct fc_lport *n_port = shost_priv(shost);
struct fc_lport *vn_port = vport->dd_data;
struct fcoe_port *port = lport_priv(vn_port);
+ struct bnx2fc_interface *interface = port->priv;
+ struct fc_lport *v_port;
+ bool found = false;
mutex_lock(&n_port->lp_mutex);
+ list_for_each_entry(v_port, &n_port->vports, list)
+ if (v_port->vport == vport) {
+ found = true;
+ break;
+ }
+
+ if (!found) {
+ mutex_unlock(&n_port->lp_mutex);
+ return -ENOENT;
+ }
list_del(&vn_port->list);
mutex_unlock(&n_port->lp_mutex);
+ bnx2fc_free_vport(interface->hba, port->lport);
+ bnx2fc_port_shutdown(port->lport);
+ bnx2fc_interface_put(interface);
queue_work(bnx2fc_wq, &port->destroy_work);
return 0;
}
@@ -1054,7 +1146,7 @@ static int bnx2fc_vport_disable(struct fc_vport *vport, bool disable)
}
-static int bnx2fc_netdev_setup(struct bnx2fc_interface *interface)
+static int bnx2fc_interface_setup(struct bnx2fc_interface *interface)
{
struct net_device *netdev = interface->netdev;
struct net_device *physdev = interface->hba->phys_dev;
@@ -1252,7 +1344,7 @@ struct bnx2fc_interface *bnx2fc_interface_create(struct bnx2fc_hba *hba,
interface->ctlr.get_src_addr = bnx2fc_get_src_mac;
set_bit(BNX2FC_CTLR_INIT_DONE, &interface->if_flags);
- rc = bnx2fc_netdev_setup(interface);
+ rc = bnx2fc_interface_setup(interface);
if (!rc)
return interface;
@@ -1318,7 +1410,7 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
fc_set_wwpn(lport, vport->port_name);
}
/* Configure netdev and networking properties of the lport */
- rc = bnx2fc_net_config(lport);
+ rc = bnx2fc_net_config(lport, interface->netdev);
if (rc) {
printk(KERN_ERR PFX "Error on bnx2fc_net_config\n");
goto lp_config_err;
@@ -1372,7 +1464,7 @@ free_blport:
return NULL;
}
-static void bnx2fc_netdev_cleanup(struct bnx2fc_interface *interface)
+static void bnx2fc_net_cleanup(struct bnx2fc_interface *interface)
{
/* Dont listen for Ethernet packets anymore */
__dev_remove_pack(&interface->fcoe_packet_type);
@@ -1380,10 +1472,11 @@ static void bnx2fc_netdev_cleanup(struct bnx2fc_interface *interface)
synchronize_net();
}
-static void bnx2fc_if_destroy(struct fc_lport *lport, struct bnx2fc_hba *hba)
+static void bnx2fc_interface_cleanup(struct bnx2fc_interface *interface)
{
+ struct fc_lport *lport = interface->ctlr.lp;
struct fcoe_port *port = lport_priv(lport);
- struct bnx2fc_lport *blport, *tmp;
+ struct bnx2fc_hba *hba = interface->hba;
/* Stop the transmit retry timer */
del_timer_sync(&port->timer);
@@ -1391,6 +1484,14 @@ static void bnx2fc_if_destroy(struct fc_lport *lport, struct bnx2fc_hba *hba)
/* Free existing transmit skbs */
fcoe_clean_pending_queue(lport);
+ bnx2fc_net_cleanup(interface);
+
+ bnx2fc_free_vport(hba, lport);
+}
+
+static void bnx2fc_if_destroy(struct fc_lport *lport)
+{
+
/* Free queued packets for the receive thread */
bnx2fc_clean_rx_queue(lport);
@@ -1407,19 +1508,22 @@ static void bnx2fc_if_destroy(struct fc_lport *lport, struct bnx2fc_hba *hba)
/* Free memory used by statistical counters */
fc_lport_free_stats(lport);
- spin_lock_bh(&hba->hba_lock);
- list_for_each_entry_safe(blport, tmp, &hba->vports, list) {
- if (blport->lport == lport) {
- list_del(&blport->list);
- kfree(blport);
- }
- }
- spin_unlock_bh(&hba->hba_lock);
-
/* Release Scsi_Host */
scsi_host_put(lport->host);
}
+static void __bnx2fc_destroy(struct bnx2fc_interface *interface)
+{
+ struct fc_lport *lport = interface->ctlr.lp;
+ struct fcoe_port *port = lport_priv(lport);
+
+ bnx2fc_interface_cleanup(interface);
+ bnx2fc_stop(interface);
+ list_del(&interface->list);
+ bnx2fc_interface_put(interface);
+ queue_work(bnx2fc_wq, &port->destroy_work);
+}
+
/**
* bnx2fc_destroy - Destroy a bnx2fc FCoE interface
*
@@ -1433,8 +1537,6 @@ static void bnx2fc_if_destroy(struct fc_lport *lport, struct bnx2fc_hba *hba)
static int bnx2fc_destroy(struct net_device *netdev)
{
struct bnx2fc_interface *interface = NULL;
- struct bnx2fc_hba *hba;
- struct fc_lport *lport;
int rc = 0;
rtnl_lock();
@@ -1447,15 +1549,9 @@ static int bnx2fc_destroy(struct net_device *netdev)
goto netdev_err;
}
- hba = interface->hba;
- bnx2fc_netdev_cleanup(interface);
- lport = interface->ctlr.lp;
- bnx2fc_stop(interface);
- list_del(&interface->list);
destroy_workqueue(interface->timer_work_queue);
- bnx2fc_interface_put(interface);
- bnx2fc_if_destroy(lport, hba);
+ __bnx2fc_destroy(interface);
netdev_err:
mutex_unlock(&bnx2fc_dev_lock);
@@ -1467,22 +1563,13 @@ static void bnx2fc_destroy_work(struct work_struct *work)
{
struct fcoe_port *port;
struct fc_lport *lport;
- struct bnx2fc_interface *interface;
- struct bnx2fc_hba *hba;
port = container_of(work, struct fcoe_port, destroy_work);
lport = port->lport;
- interface = port->priv;
- hba = interface->hba;
BNX2FC_HBA_DBG(lport, "Entered bnx2fc_destroy_work\n");
- bnx2fc_port_shutdown(lport);
- rtnl_lock();
- mutex_lock(&bnx2fc_dev_lock);
- bnx2fc_if_destroy(lport, hba);
- mutex_unlock(&bnx2fc_dev_lock);
- rtnl_unlock();
+ bnx2fc_if_destroy(lport);
}
static void bnx2fc_unbind_adapter_devices(struct bnx2fc_hba *hba)
@@ -1661,6 +1748,7 @@ static void bnx2fc_fw_destroy(struct bnx2fc_hba *hba)
wait_event_interruptible(hba->destroy_wait,
test_bit(BNX2FC_FLAG_DESTROY_CMPL,
&hba->flags));
+ clear_bit(BNX2FC_FLAG_DESTROY_CMPL, &hba->flags);
/* This should never happen */
if (signal_pending(current))
flush_signals(current);
@@ -1723,7 +1811,7 @@ static void bnx2fc_start_disc(struct bnx2fc_interface *interface)
lport = interface->ctlr.lp;
BNX2FC_HBA_DBG(lport, "calling fc_fabric_login\n");
- if (!bnx2fc_link_ok(lport)) {
+ if (!bnx2fc_link_ok(lport) && interface->enabled) {
BNX2FC_HBA_DBG(lport, "ctlr_link_up\n");
fcoe_ctlr_link_up(&interface->ctlr);
fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT;
@@ -1737,6 +1825,11 @@ static void bnx2fc_start_disc(struct bnx2fc_interface *interface)
if (++wait_cnt > 12)
break;
}
+
+ /* Reset max receive frame size to default */
+ if (fc_set_mfs(lport, BNX2FC_MFS))
+ return;
+
fc_lport_init(lport);
fc_fabric_login(lport);
}
@@ -1800,6 +1893,7 @@ static int bnx2fc_disable(struct net_device *netdev)
rc = -ENODEV;
printk(KERN_ERR PFX "bnx2fc_disable: interface or lport not found\n");
} else {
+ interface->enabled = false;
fcoe_ctlr_link_down(&interface->ctlr);
fcoe_clean_pending_queue(interface->ctlr.lp);
}
@@ -1822,8 +1916,10 @@ static int bnx2fc_enable(struct net_device *netdev)
if (!interface || !interface->ctlr.lp) {
rc = -ENODEV;
printk(KERN_ERR PFX "bnx2fc_enable: interface or lport not found\n");
- } else if (!bnx2fc_link_ok(interface->ctlr.lp))
+ } else if (!bnx2fc_link_ok(interface->ctlr.lp)) {
fcoe_ctlr_link_up(&interface->ctlr);
+ interface->enabled = true;
+ }
mutex_unlock(&bnx2fc_dev_lock);
rtnl_unlock();
@@ -1923,7 +2019,6 @@ static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
if (!lport) {
printk(KERN_ERR PFX "Failed to create interface (%s)\n",
netdev->name);
- bnx2fc_netdev_cleanup(interface);
rc = -EINVAL;
goto if_create_err;
}
@@ -1936,8 +2031,15 @@ static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
/* Make this master N_port */
interface->ctlr.lp = lport;
+ if (!bnx2fc_link_ok(lport)) {
+ fcoe_ctlr_link_up(&interface->ctlr);
+ fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT;
+ set_bit(ADAPTER_STATE_READY, &interface->hba->adapter_state);
+ }
+
BNX2FC_HBA_DBG(lport, "create: START DISC\n");
bnx2fc_start_disc(interface);
+ interface->enabled = true;
/*
* Release from kref_init in bnx2fc_interface_setup, on success
* lport should be holding a reference taken in bnx2fc_if_create
@@ -1951,6 +2053,7 @@ static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
if_create_err:
destroy_workqueue(interface->timer_work_queue);
ifput_err:
+ bnx2fc_net_cleanup(interface);
bnx2fc_interface_put(interface);
netdev_err:
module_put(THIS_MODULE);
@@ -2017,7 +2120,6 @@ static void bnx2fc_ulp_exit(struct cnic_dev *dev)
{
struct bnx2fc_hba *hba;
struct bnx2fc_interface *interface, *tmp;
- struct fc_lport *lport;
BNX2FC_MISC_DBG("Entered bnx2fc_ulp_exit\n");
@@ -2039,18 +2141,10 @@ static void bnx2fc_ulp_exit(struct cnic_dev *dev)
list_del_init(&hba->list);
adapter_count--;
- list_for_each_entry_safe(interface, tmp, &if_list, list) {
+ list_for_each_entry_safe(interface, tmp, &if_list, list)
/* destroy not called yet, move to quiesced list */
- if (interface->hba == hba) {
- bnx2fc_netdev_cleanup(interface);
- bnx2fc_stop(interface);
-
- list_del(&interface->list);
- lport = interface->ctlr.lp;
- bnx2fc_interface_put(interface);
- bnx2fc_if_destroy(lport, hba);
- }
- }
+ if (interface->hba == hba)
+ __bnx2fc_destroy(interface);
mutex_unlock(&bnx2fc_dev_lock);
bnx2fc_ulp_stop(hba);
@@ -2119,7 +2213,7 @@ static void bnx2fc_percpu_thread_create(unsigned int cpu)
(void *)p,
"bnx2fc_thread/%d", cpu);
/* bind thread to the cpu */
- if (likely(!IS_ERR(p->iothread))) {
+ if (likely(!IS_ERR(thread))) {
kthread_bind(thread, cpu);
p->iothread = thread;
wake_up_process(thread);
@@ -2131,7 +2225,6 @@ static void bnx2fc_percpu_thread_destroy(unsigned int cpu)
struct bnx2fc_percpu_s *p;
struct task_struct *thread;
struct bnx2fc_work *work, *tmp;
- LIST_HEAD(work_list);
BNX2FC_MISC_DBG("destroying io thread for CPU %d\n", cpu);
@@ -2143,7 +2236,7 @@ static void bnx2fc_percpu_thread_destroy(unsigned int cpu)
/* Free all work in the list */
- list_for_each_entry_safe(work, tmp, &work_list, list) {
+ list_for_each_entry_safe(work, tmp, &p->work_list, list) {
list_del_init(&work->list);
bnx2fc_process_cq_compl(work->tgt, work->wqe);
kfree(work);
@@ -2376,6 +2469,7 @@ static struct fc_function_template bnx2fc_transport_function = {
.vport_create = bnx2fc_vport_create,
.vport_delete = bnx2fc_vport_destroy,
.vport_disable = bnx2fc_vport_disable,
+ .bsg_request = fc_lport_bsg_request,
};
static struct fc_function_template bnx2fc_vport_xport_function = {
@@ -2409,6 +2503,7 @@ static struct fc_function_template bnx2fc_vport_xport_function = {
.get_fc_host_stats = fc_get_host_stats,
.issue_fc_host_lip = bnx2fc_fcoe_reset,
.terminate_rport_io = fc_rport_terminate_io,
+ .bsg_request = fc_lport_bsg_request,
};
/**
@@ -2438,6 +2533,7 @@ static struct libfc_function_template bnx2fc_libfc_fcn_templ = {
.elsct_send = bnx2fc_elsct_send,
.fcp_abort_io = bnx2fc_abort_io,
.fcp_cleanup = bnx2fc_cleanup,
+ .get_lesb = bnx2fc_get_lesb,
.rport_event_callback = bnx2fc_rport_event_handler,
};
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
index 72cfb14acd3..1923a25cb6a 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -1009,6 +1009,7 @@ int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt)
u32 cq_cons;
struct fcoe_cqe *cqe;
u32 num_free_sqes = 0;
+ u32 num_cqes = 0;
u16 wqe;
/*
@@ -1058,10 +1059,11 @@ unlock:
wake_up_process(fps->iothread);
else
bnx2fc_process_cq_compl(tgt, wqe);
+ num_free_sqes++;
}
cqe++;
tgt->cq_cons_idx++;
- num_free_sqes++;
+ num_cqes++;
if (tgt->cq_cons_idx == BNX2FC_CQ_WQES_MAX) {
tgt->cq_cons_idx = 0;
@@ -1070,8 +1072,10 @@ unlock:
1 - tgt->cq_curr_toggle_bit;
}
}
- if (num_free_sqes) {
- bnx2fc_arm_cq(tgt);
+ if (num_cqes) {
+ /* Arm CQ only if doorbell is mapped */
+ if (tgt->ctx_base)
+ bnx2fc_arm_cq(tgt);
atomic_add(num_free_sqes, &tgt->free_sqes);
}
spin_unlock_bh(&tgt->cq_lock);
@@ -1739,11 +1743,13 @@ void bnx2fc_init_task(struct bnx2fc_cmd *io_req,
/* Init state to NORMAL */
task->txwr_rxrd.const_ctx.init_flags |= task_type <<
FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
- if (dev_type == TYPE_TAPE)
+ if (dev_type == TYPE_TAPE) {
task->txwr_rxrd.const_ctx.init_flags |=
FCOE_TASK_DEV_TYPE_TAPE <<
FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
- else
+ io_req->rec_retry = 0;
+ io_req->rec_retry = 0;
+ } else
task->txwr_rxrd.const_ctx.init_flags |=
FCOE_TASK_DEV_TYPE_DISK <<
FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index 6cc3789075b..84a78af83f9 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -17,7 +17,7 @@
static int bnx2fc_split_bd(struct bnx2fc_cmd *io_req, u64 addr, int sg_len,
int bd_index);
static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req);
-static void bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd *io_req);
+static int bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd *io_req);
static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req);
static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req);
static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
@@ -1103,7 +1103,10 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
struct fc_rport_libfc_priv *rp = rport->dd_data;
struct bnx2fc_cmd *io_req;
struct fc_lport *lport;
+ struct fc_rport_priv *rdata;
struct bnx2fc_rport *tgt;
+ int logo_issued;
+ int wait_cnt = 0;
int rc = FAILED;
@@ -1192,8 +1195,40 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
} else {
printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) "
"already in abts processing\n", io_req->xid);
+ if (cancel_delayed_work(&io_req->timeout_work))
+ kref_put(&io_req->refcount,
+ bnx2fc_cmd_release); /* drop timer hold */
+ bnx2fc_initiate_cleanup(io_req);
+
+ spin_unlock_bh(&tgt->tgt_lock);
+
+ wait_for_completion(&io_req->tm_done);
+
+ spin_lock_bh(&tgt->tgt_lock);
+ io_req->wait_for_comp = 0;
+ rdata = io_req->tgt->rdata;
+ logo_issued = test_and_set_bit(BNX2FC_FLAG_EXPL_LOGO,
+ &tgt->flags);
kref_put(&io_req->refcount, bnx2fc_cmd_release);
spin_unlock_bh(&tgt->tgt_lock);
+
+ if (!logo_issued) {
+ BNX2FC_IO_DBG(io_req, "Expl logo - tgt flags = 0x%lx\n",
+ tgt->flags);
+ mutex_lock(&lport->disc.disc_mutex);
+ lport->tt.rport_logoff(rdata);
+ mutex_unlock(&lport->disc.disc_mutex);
+ do {
+ msleep(BNX2FC_RELOGIN_WAIT_TIME);
+ /*
+ * If session not recovered, let SCSI-ml
+ * escalate error recovery.
+ */
+ if (wait_cnt++ > BNX2FC_RELOGIN_WAIT_CNT)
+ return FAILED;
+ } while (!test_bit(BNX2FC_FLAG_SESSION_READY,
+ &tgt->flags));
+ }
return SUCCESS;
}
if (rc == FAILED) {
@@ -1251,7 +1286,6 @@ void bnx2fc_process_seq_cleanup_compl(struct bnx2fc_cmd *seq_clnp_req,
seq_clnp_req->xid);
goto free_cb_arg;
}
- kref_get(&orig_io_req->refcount);
spin_unlock_bh(&tgt->tgt_lock);
rc = bnx2fc_send_srr(orig_io_req, offset, r_ctl);
@@ -1276,6 +1310,8 @@ void bnx2fc_process_cleanup_compl(struct bnx2fc_cmd *io_req,
io_req->refcount.refcount.counter, io_req->cmd_type);
bnx2fc_scsi_done(io_req, DID_ERROR);
kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ if (io_req->wait_for_comp)
+ complete(&io_req->tm_done);
}
void bnx2fc_process_abts_compl(struct bnx2fc_cmd *io_req,
@@ -1569,6 +1605,8 @@ static int bnx2fc_split_bd(struct bnx2fc_cmd *io_req, u64 addr, int sg_len,
static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req)
{
+ struct bnx2fc_interface *interface = io_req->port->priv;
+ struct bnx2fc_hba *hba = interface->hba;
struct scsi_cmnd *sc = io_req->sc_cmd;
struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl;
struct scatterlist *sg;
@@ -1580,7 +1618,8 @@ static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req)
u64 addr;
int i;
- sg_count = scsi_dma_map(sc);
+ sg_count = dma_map_sg(&hba->pcidev->dev, scsi_sglist(sc),
+ scsi_sg_count(sc), sc->sc_data_direction);
scsi_for_each_sg(sc, sg, sg_count, i) {
sg_len = sg_dma_len(sg);
addr = sg_dma_address(sg);
@@ -1605,20 +1644,24 @@ static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req)
return bd_count;
}
-static void bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd *io_req)
+static int bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd *io_req)
{
struct scsi_cmnd *sc = io_req->sc_cmd;
struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl;
int bd_count;
- if (scsi_sg_count(sc))
+ if (scsi_sg_count(sc)) {
bd_count = bnx2fc_map_sg(io_req);
- else {
+ if (bd_count == 0)
+ return -ENOMEM;
+ } else {
bd_count = 0;
bd[0].buf_addr_lo = bd[0].buf_addr_hi = 0;
bd[0].buf_len = bd[0].flags = 0;
}
io_req->bd_tbl->bd_valid = bd_count;
+
+ return 0;
}
static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req)
@@ -1790,12 +1833,6 @@ int bnx2fc_queuecommand(struct Scsi_Host *host,
tgt = (struct bnx2fc_rport *)&rp[1];
if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) {
- if (test_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags)) {
- sc_cmd->result = DID_NO_CONNECT << 16;
- sc_cmd->scsi_done(sc_cmd);
- return 0;
-
- }
/*
* Session is not offloaded yet. Let SCSI-ml retry
* the command.
@@ -1946,7 +1983,13 @@ int bnx2fc_post_io_req(struct bnx2fc_rport *tgt,
xid = io_req->xid;
/* Build buffer descriptor list for firmware from sg list */
- bnx2fc_build_bd_list_from_sg(io_req);
+ if (bnx2fc_build_bd_list_from_sg(io_req)) {
+ printk(KERN_ERR PFX "BD list creation failed\n");
+ spin_lock_bh(&tgt->tgt_lock);
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ spin_unlock_bh(&tgt->tgt_lock);
+ return -EAGAIN;
+ }
task_idx = xid / BNX2FC_TASKS_PER_PAGE;
index = xid % BNX2FC_TASKS_PER_PAGE;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
index d5311b577cc..c1800b53127 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
@@ -76,7 +76,7 @@ static void bnx2fc_offload_session(struct fcoe_port *port,
if (rval) {
printk(KERN_ERR PFX "Failed to allocate conn id for "
"port_id (%6x)\n", rport->port_id);
- goto ofld_err;
+ goto tgt_init_err;
}
/* Allocate session resources */
@@ -134,18 +134,17 @@ retry_ofld:
/* upload will take care of cleaning up sess resc */
lport->tt.rport_logoff(rdata);
}
- /* Arm CQ */
- bnx2fc_arm_cq(tgt);
return;
ofld_err:
/* couldn't offload the session. log off from this rport */
BNX2FC_TGT_DBG(tgt, "bnx2fc_offload_session - offload error\n");
- lport->tt.rport_logoff(rdata);
/* Free session resources */
bnx2fc_free_session_resc(hba, tgt);
+tgt_init_err:
if (tgt->fcoe_conn_id != -1)
bnx2fc_free_conn_id(hba, tgt->fcoe_conn_id);
+ lport->tt.rport_logoff(rdata);
}
void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt)
@@ -624,7 +623,6 @@ static void bnx2fc_free_conn_id(struct bnx2fc_hba *hba, u32 conn_id)
/* called with hba mutex held */
spin_lock_bh(&hba->hba_lock);
hba->tgt_ofld_list[conn_id] = NULL;
- hba->next_conn_id = conn_id;
spin_unlock_bh(&hba->hba_lock);
}
@@ -791,8 +789,6 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
return 0;
mem_alloc_failure:
- bnx2fc_free_session_resc(hba, tgt);
- bnx2fc_free_conn_id(hba, tgt->fcoe_conn_id);
return -ENOMEM;
}
@@ -807,14 +803,14 @@ mem_alloc_failure:
static void bnx2fc_free_session_resc(struct bnx2fc_hba *hba,
struct bnx2fc_rport *tgt)
{
- BNX2FC_TGT_DBG(tgt, "Freeing up session resources\n");
+ void __iomem *ctx_base_ptr;
- if (tgt->ctx_base) {
- iounmap(tgt->ctx_base);
- tgt->ctx_base = NULL;
- }
+ BNX2FC_TGT_DBG(tgt, "Freeing up session resources\n");
spin_lock_bh(&tgt->cq_lock);
+ ctx_base_ptr = tgt->ctx_base;
+ tgt->ctx_base = NULL;
+
/* Free LCQ */
if (tgt->lcq) {
dma_free_coherent(&hba->pcidev->dev, tgt->lcq_mem_size,
@@ -868,4 +864,7 @@ static void bnx2fc_free_session_resc(struct bnx2fc_hba *hba,
tgt->sq = NULL;
}
spin_unlock_bh(&tgt->cq_lock);
+
+ if (ctx_base_ptr)
+ iounmap(ctx_base_ptr);
}
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index cffd4d75df5..d1e69719097 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -2177,6 +2177,59 @@ static int bnx2i_nl_set_path(struct Scsi_Host *shost, struct iscsi_path *params)
return 0;
}
+static mode_t bnx2i_attr_is_visible(int param_type, int param)
+{
+ switch (param_type) {
+ case ISCSI_HOST_PARAM:
+ switch (param) {
+ case ISCSI_HOST_PARAM_NETDEV_NAME:
+ case ISCSI_HOST_PARAM_HWADDRESS:
+ case ISCSI_HOST_PARAM_IPADDRESS:
+ return S_IRUGO;
+ default:
+ return 0;
+ }
+ case ISCSI_PARAM:
+ switch (param) {
+ case ISCSI_PARAM_MAX_RECV_DLENGTH:
+ case ISCSI_PARAM_MAX_XMIT_DLENGTH:
+ case ISCSI_PARAM_HDRDGST_EN:
+ case ISCSI_PARAM_DATADGST_EN:
+ case ISCSI_PARAM_CONN_ADDRESS:
+ case ISCSI_PARAM_CONN_PORT:
+ case ISCSI_PARAM_EXP_STATSN:
+ case ISCSI_PARAM_PERSISTENT_ADDRESS:
+ case ISCSI_PARAM_PERSISTENT_PORT:
+ case ISCSI_PARAM_PING_TMO:
+ case ISCSI_PARAM_RECV_TMO:
+ case ISCSI_PARAM_INITIAL_R2T_EN:
+ case ISCSI_PARAM_MAX_R2T:
+ case ISCSI_PARAM_IMM_DATA_EN:
+ case ISCSI_PARAM_FIRST_BURST:
+ case ISCSI_PARAM_MAX_BURST:
+ case ISCSI_PARAM_PDU_INORDER_EN:
+ case ISCSI_PARAM_DATASEQ_INORDER_EN:
+ case ISCSI_PARAM_ERL:
+ case ISCSI_PARAM_TARGET_NAME:
+ case ISCSI_PARAM_TPGT:
+ case ISCSI_PARAM_USERNAME:
+ case ISCSI_PARAM_PASSWORD:
+ case ISCSI_PARAM_USERNAME_IN:
+ case ISCSI_PARAM_PASSWORD_IN:
+ case ISCSI_PARAM_FAST_ABORT:
+ case ISCSI_PARAM_ABORT_TMO:
+ case ISCSI_PARAM_LU_RESET_TMO:
+ case ISCSI_PARAM_TGT_RESET_TMO:
+ case ISCSI_PARAM_IFACE_NAME:
+ case ISCSI_PARAM_INITIATOR_NAME:
+ return S_IRUGO;
+ default:
+ return 0;
+ }
+ }
+
+ return 0;
+}
/*
* 'Scsi_Host_Template' structure and 'iscsi_tranport' structure template
@@ -2207,37 +2260,12 @@ struct iscsi_transport bnx2i_iscsi_transport = {
CAP_MULTI_R2T | CAP_DATADGST |
CAP_DATA_PATH_OFFLOAD |
CAP_TEXT_NEGO,
- .param_mask = ISCSI_MAX_RECV_DLENGTH |
- ISCSI_MAX_XMIT_DLENGTH |
- ISCSI_HDRDGST_EN |
- ISCSI_DATADGST_EN |
- ISCSI_INITIAL_R2T_EN |
- ISCSI_MAX_R2T |
- ISCSI_IMM_DATA_EN |
- ISCSI_FIRST_BURST |
- ISCSI_MAX_BURST |
- ISCSI_PDU_INORDER_EN |
- ISCSI_DATASEQ_INORDER_EN |
- ISCSI_ERL |
- ISCSI_CONN_PORT |
- ISCSI_CONN_ADDRESS |
- ISCSI_EXP_STATSN |
- ISCSI_PERSISTENT_PORT |
- ISCSI_PERSISTENT_ADDRESS |
- ISCSI_TARGET_NAME | ISCSI_TPGT |
- ISCSI_USERNAME | ISCSI_PASSWORD |
- ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
- ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
- ISCSI_LU_RESET_TMO | ISCSI_TGT_RESET_TMO |
- ISCSI_PING_TMO | ISCSI_RECV_TMO |
- ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
- .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS |
- ISCSI_HOST_NETDEV_NAME,
.create_session = bnx2i_session_create,
.destroy_session = bnx2i_session_destroy,
.create_conn = bnx2i_conn_create,
.bind_conn = bnx2i_conn_bind,
.destroy_conn = bnx2i_conn_destroy,
+ .attr_is_visible = bnx2i_attr_is_visible,
.set_param = iscsi_set_param,
.get_conn_param = iscsi_conn_get_param,
.get_session_param = iscsi_session_get_param,
diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
index 1242c7c04a0..000294a9df8 100644
--- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
+++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
@@ -105,25 +105,7 @@ static struct iscsi_transport cxgb3i_iscsi_transport = {
.caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST
| CAP_DATADGST | CAP_DIGEST_OFFLOAD |
CAP_PADDING_OFFLOAD | CAP_TEXT_NEGO,
- .param_mask = ISCSI_MAX_RECV_DLENGTH | ISCSI_MAX_XMIT_DLENGTH |
- ISCSI_HDRDGST_EN | ISCSI_DATADGST_EN |
- ISCSI_INITIAL_R2T_EN | ISCSI_MAX_R2T |
- ISCSI_IMM_DATA_EN | ISCSI_FIRST_BURST |
- ISCSI_MAX_BURST | ISCSI_PDU_INORDER_EN |
- ISCSI_DATASEQ_INORDER_EN | ISCSI_ERL |
- ISCSI_CONN_PORT | ISCSI_CONN_ADDRESS |
- ISCSI_EXP_STATSN | ISCSI_PERSISTENT_PORT |
- ISCSI_PERSISTENT_ADDRESS |
- ISCSI_TARGET_NAME | ISCSI_TPGT |
- ISCSI_USERNAME | ISCSI_PASSWORD |
- ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
- ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
- ISCSI_LU_RESET_TMO | ISCSI_TGT_RESET_TMO |
- ISCSI_PING_TMO | ISCSI_RECV_TMO |
- ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
- .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS |
- ISCSI_HOST_INITIATOR_NAME |
- ISCSI_HOST_NETDEV_NAME,
+ .attr_is_visible = cxgbi_attr_is_visible,
.get_host_param = cxgbi_get_host_param,
.set_host_param = cxgbi_set_host_param,
/* session management */
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 31c79bde697..ac7a9b1e3e2 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -106,25 +106,7 @@ static struct iscsi_transport cxgb4i_iscsi_transport = {
.caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST |
CAP_DATADGST | CAP_DIGEST_OFFLOAD |
CAP_PADDING_OFFLOAD | CAP_TEXT_NEGO,
- .param_mask = ISCSI_MAX_RECV_DLENGTH | ISCSI_MAX_XMIT_DLENGTH |
- ISCSI_HDRDGST_EN | ISCSI_DATADGST_EN |
- ISCSI_INITIAL_R2T_EN | ISCSI_MAX_R2T |
- ISCSI_IMM_DATA_EN | ISCSI_FIRST_BURST |
- ISCSI_MAX_BURST | ISCSI_PDU_INORDER_EN |
- ISCSI_DATASEQ_INORDER_EN | ISCSI_ERL |
- ISCSI_CONN_PORT | ISCSI_CONN_ADDRESS |
- ISCSI_EXP_STATSN | ISCSI_PERSISTENT_PORT |
- ISCSI_PERSISTENT_ADDRESS |
- ISCSI_TARGET_NAME | ISCSI_TPGT |
- ISCSI_USERNAME | ISCSI_PASSWORD |
- ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
- ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
- ISCSI_LU_RESET_TMO | ISCSI_TGT_RESET_TMO |
- ISCSI_PING_TMO | ISCSI_RECV_TMO |
- ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
- .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS |
- ISCSI_HOST_INITIATOR_NAME |
- ISCSI_HOST_NETDEV_NAME,
+ .attr_is_visible = cxgbi_attr_is_visible,
.get_host_param = cxgbi_get_host_param,
.set_host_param = cxgbi_set_host_param,
/* session management */
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index 1c1329bc77c..c10f74a566f 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -25,6 +25,7 @@
#include <net/dst.h>
#include <net/route.h>
#include <linux/inetdevice.h> /* ip_dev_find */
+#include <linux/module.h>
#include <net/tcp.h>
static unsigned int dbg_level;
@@ -2568,6 +2569,62 @@ void cxgbi_iscsi_cleanup(struct iscsi_transport *itp,
}
EXPORT_SYMBOL_GPL(cxgbi_iscsi_cleanup);
+mode_t cxgbi_attr_is_visible(int param_type, int param)
+{
+ switch (param_type) {
+ case ISCSI_HOST_PARAM:
+ switch (param) {
+ case ISCSI_HOST_PARAM_NETDEV_NAME:
+ case ISCSI_HOST_PARAM_HWADDRESS:
+ case ISCSI_HOST_PARAM_IPADDRESS:
+ case ISCSI_HOST_PARAM_INITIATOR_NAME:
+ return S_IRUGO;
+ default:
+ return 0;
+ }
+ case ISCSI_PARAM:
+ switch (param) {
+ case ISCSI_PARAM_MAX_RECV_DLENGTH:
+ case ISCSI_PARAM_MAX_XMIT_DLENGTH:
+ case ISCSI_PARAM_HDRDGST_EN:
+ case ISCSI_PARAM_DATADGST_EN:
+ case ISCSI_PARAM_CONN_ADDRESS:
+ case ISCSI_PARAM_CONN_PORT:
+ case ISCSI_PARAM_EXP_STATSN:
+ case ISCSI_PARAM_PERSISTENT_ADDRESS:
+ case ISCSI_PARAM_PERSISTENT_PORT:
+ case ISCSI_PARAM_PING_TMO:
+ case ISCSI_PARAM_RECV_TMO:
+ case ISCSI_PARAM_INITIAL_R2T_EN:
+ case ISCSI_PARAM_MAX_R2T:
+ case ISCSI_PARAM_IMM_DATA_EN:
+ case ISCSI_PARAM_FIRST_BURST:
+ case ISCSI_PARAM_MAX_BURST:
+ case ISCSI_PARAM_PDU_INORDER_EN:
+ case ISCSI_PARAM_DATASEQ_INORDER_EN:
+ case ISCSI_PARAM_ERL:
+ case ISCSI_PARAM_TARGET_NAME:
+ case ISCSI_PARAM_TPGT:
+ case ISCSI_PARAM_USERNAME:
+ case ISCSI_PARAM_PASSWORD:
+ case ISCSI_PARAM_USERNAME_IN:
+ case ISCSI_PARAM_PASSWORD_IN:
+ case ISCSI_PARAM_FAST_ABORT:
+ case ISCSI_PARAM_ABORT_TMO:
+ case ISCSI_PARAM_LU_RESET_TMO:
+ case ISCSI_PARAM_TGT_RESET_TMO:
+ case ISCSI_PARAM_IFACE_NAME:
+ case ISCSI_PARAM_INITIATOR_NAME:
+ return S_IRUGO;
+ default:
+ return 0;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cxgbi_attr_is_visible);
+
static int __init libcxgbi_init_module(void)
{
sw_tag_idx_bits = (__ilog2_u32(ISCSI_ITT_MASK)) + 1;
diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h
index 3a25b1187c1..20c88279c7a 100644
--- a/drivers/scsi/cxgbi/libcxgbi.h
+++ b/drivers/scsi/cxgbi/libcxgbi.h
@@ -709,6 +709,7 @@ int cxgbi_conn_xmit_pdu(struct iscsi_task *);
void cxgbi_cleanup_task(struct iscsi_task *task);
+mode_t cxgbi_attr_is_visible(int param_type, int param);
void cxgbi_get_conn_stats(struct iscsi_cls_conn *, struct iscsi_stats *);
int cxgbi_set_conn_param(struct iscsi_cls_conn *,
enum iscsi_param, char *, int);
diff --git a/drivers/scsi/device_handler/scsi_dh.c b/drivers/scsi/device_handler/scsi_dh.c
index 0119b814779..23149b9e297 100644
--- a/drivers/scsi/device_handler/scsi_dh.c
+++ b/drivers/scsi/device_handler/scsi_dh.c
@@ -22,6 +22,7 @@
*/
#include <linux/slab.h>
+#include <linux/module.h>
#include <scsi/scsi_dh.h>
#include "../scsi_priv.h"
@@ -60,6 +61,46 @@ static struct scsi_device_handler *get_device_handler_by_idx(int idx)
}
/*
+ * device_handler_match_function - Match a device handler to a device
+ * @sdev - SCSI device to be tested
+ *
+ * Tests @sdev against the match function of all registered device_handler.
+ * Returns the found device handler or NULL if not found.
+ */
+static struct scsi_device_handler *
+device_handler_match_function(struct scsi_device *sdev)
+{
+ struct scsi_device_handler *tmp_dh, *found_dh = NULL;
+
+ spin_lock(&list_lock);
+ list_for_each_entry(tmp_dh, &scsi_dh_list, list) {
+ if (tmp_dh->match && tmp_dh->match(sdev)) {
+ found_dh = tmp_dh;
+ break;
+ }
+ }
+ spin_unlock(&list_lock);
+ return found_dh;
+}
+
+/*
+ * device_handler_match_devlist - Match a device handler to a device
+ * @sdev - SCSI device to be tested
+ *
+ * Tests @sdev against all device_handler registered in the devlist.
+ * Returns the found device handler or NULL if not found.
+ */
+static struct scsi_device_handler *
+device_handler_match_devlist(struct scsi_device *sdev)
+{
+ int idx;
+
+ idx = scsi_get_device_flags_keyed(sdev, sdev->vendor, sdev->model,
+ SCSI_DEVINFO_DH);
+ return get_device_handler_by_idx(idx);
+}
+
+/*
* device_handler_match - Attach a device handler to a device
* @scsi_dh - The device handler to match against or NULL
* @sdev - SCSI device to be tested against @scsi_dh
@@ -72,12 +113,11 @@ static struct scsi_device_handler *
device_handler_match(struct scsi_device_handler *scsi_dh,
struct scsi_device *sdev)
{
- struct scsi_device_handler *found_dh = NULL;
- int idx;
+ struct scsi_device_handler *found_dh;
- idx = scsi_get_device_flags_keyed(sdev, sdev->vendor, sdev->model,
- SCSI_DEVINFO_DH);
- found_dh = get_device_handler_by_idx(idx);
+ found_dh = device_handler_match_function(sdev);
+ if (!found_dh)
+ found_dh = device_handler_match_devlist(sdev);
if (scsi_dh && found_dh != scsi_dh)
found_dh = NULL;
@@ -151,6 +191,10 @@ store_dh_state(struct device *dev, struct device_attribute *attr,
struct scsi_device_handler *scsi_dh;
int err = -EINVAL;
+ if (sdev->sdev_state == SDEV_CANCEL ||
+ sdev->sdev_state == SDEV_DEL)
+ return -ENODEV;
+
if (!sdev->scsi_dh_data) {
/*
* Attach to a device handler
@@ -327,7 +371,7 @@ int scsi_register_device_handler(struct scsi_device_handler *scsi_dh)
list_add(&scsi_dh->list, &scsi_dh_list);
spin_unlock(&list_lock);
- for (i = 0; scsi_dh->devlist[i].vendor; i++) {
+ for (i = 0; scsi_dh->devlist && scsi_dh->devlist[i].vendor; i++) {
scsi_dev_info_list_add_keyed(0,
scsi_dh->devlist[i].vendor,
scsi_dh->devlist[i].model,
@@ -360,7 +404,7 @@ int scsi_unregister_device_handler(struct scsi_device_handler *scsi_dh)
bus_for_each_dev(&scsi_bus_type, NULL, scsi_dh,
scsi_dh_notifier_remove);
- for (i = 0; scsi_dh->devlist[i].vendor; i++) {
+ for (i = 0; scsi_dh->devlist && scsi_dh->devlist[i].vendor; i++) {
scsi_dev_info_list_del_keyed(scsi_dh->devlist[i].vendor,
scsi_dh->devlist[i].model,
SCSI_DEVINFO_DH);
@@ -398,7 +442,15 @@ int scsi_dh_activate(struct request_queue *q, activate_complete fn, void *data)
spin_lock_irqsave(q->queue_lock, flags);
sdev = q->queuedata;
- if (sdev && sdev->scsi_dh_data)
+ if (!sdev) {
+ spin_unlock_irqrestore(q->queue_lock, flags);
+ err = SCSI_DH_NOSYS;
+ if (fn)
+ fn(data, err);
+ return err;
+ }
+
+ if (sdev->scsi_dh_data)
scsi_dh = sdev->scsi_dh_data->scsi_dh;
dev = get_device(&sdev->sdev_gendev);
if (!scsi_dh || !dev ||
@@ -468,7 +520,7 @@ int scsi_dh_handler_exist(const char *name)
EXPORT_SYMBOL_GPL(scsi_dh_handler_exist);
/*
- * scsi_dh_handler_attach - Attach device handler
+ * scsi_dh_attach - Attach device handler
* @sdev - sdev the handler should be attached to
* @name - name of the handler to attach
*/
@@ -498,7 +550,7 @@ int scsi_dh_attach(struct request_queue *q, const char *name)
EXPORT_SYMBOL_GPL(scsi_dh_attach);
/*
- * scsi_dh_handler_detach - Detach device handler
+ * scsi_dh_detach - Detach device handler
* @sdev - sdev the handler should be detached from
*
* This function will detach the device handler only
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 6fec9fe5dc3..4ef021291a4 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -21,6 +21,7 @@
*/
#include <linux/slab.h>
#include <linux/delay.h>
+#include <linux/module.h>
#include <scsi/scsi.h>
#include <scsi/scsi_eh.h>
#include <scsi/scsi_dh.h>
@@ -128,43 +129,6 @@ static struct request *get_alua_req(struct scsi_device *sdev,
}
/*
- * submit_std_inquiry - Issue a standard INQUIRY command
- * @sdev: sdev the command should be send to
- */
-static int submit_std_inquiry(struct scsi_device *sdev, struct alua_dh_data *h)
-{
- struct request *rq;
- int err = SCSI_DH_RES_TEMP_UNAVAIL;
-
- rq = get_alua_req(sdev, h->inq, ALUA_INQUIRY_SIZE, READ);
- if (!rq)
- goto done;
-
- /* Prepare the command. */
- rq->cmd[0] = INQUIRY;
- rq->cmd[1] = 0;
- rq->cmd[2] = 0;
- rq->cmd[4] = ALUA_INQUIRY_SIZE;
- rq->cmd_len = COMMAND_SIZE(INQUIRY);
-
- rq->sense = h->sense;
- memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
- rq->sense_len = h->senselen = 0;
-
- err = blk_execute_rq(rq->q, NULL, rq, 1);
- if (err == -EIO) {
- sdev_printk(KERN_INFO, sdev,
- "%s: std inquiry failed with %x\n",
- ALUA_DH_NAME, rq->errors);
- h->senselen = rq->sense_len;
- err = SCSI_DH_IO;
- }
- blk_put_request(rq);
-done:
- return err;
-}
-
-/*
* submit_vpd_inquiry - Issue an INQUIRY VPD page 0x83 command
* @sdev: sdev the command should be sent to
*/
@@ -338,23 +302,17 @@ static unsigned submit_stpg(struct alua_dh_data *h)
}
/*
- * alua_std_inquiry - Evaluate standard INQUIRY command
+ * alua_check_tpgs - Evaluate TPGS setting
* @sdev: device to be checked
*
- * Just extract the TPGS setting to find out if ALUA
+ * Examine the TPGS setting of the sdev to find out if ALUA
* is supported.
*/
-static int alua_std_inquiry(struct scsi_device *sdev, struct alua_dh_data *h)
+static int alua_check_tpgs(struct scsi_device *sdev, struct alua_dh_data *h)
{
- int err;
-
- err = submit_std_inquiry(sdev, h);
-
- if (err != SCSI_DH_OK)
- return err;
+ int err = SCSI_DH_OK;
- /* Check TPGS setting */
- h->tpgs = (h->inq[5] >> 4) & 0x3;
+ h->tpgs = scsi_device_tpgs(sdev);
switch (h->tpgs) {
case TPGS_MODE_EXPLICIT|TPGS_MODE_IMPLICIT:
sdev_printk(KERN_INFO, sdev,
@@ -508,27 +466,28 @@ static int alua_check_sense(struct scsi_device *sdev,
* Power On, Reset, or Bus Device Reset, just retry.
*/
return ADD_TO_MLQUEUE;
- if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x06) {
+ if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x06)
/*
* ALUA state changed
*/
return ADD_TO_MLQUEUE;
- }
- if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x07) {
+ if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x07)
/*
* Implicit ALUA state transition failed
*/
return ADD_TO_MLQUEUE;
- }
- if (sense_hdr->asc == 0x3f && sense_hdr->ascq == 0x0e) {
+ if (sense_hdr->asc == 0x3f && sense_hdr->ascq == 0x03)
+ /*
+ * Inquiry data has changed
+ */
+ return ADD_TO_MLQUEUE;
+ if (sense_hdr->asc == 0x3f && sense_hdr->ascq == 0x0e)
/*
* REPORTED_LUNS_DATA_HAS_CHANGED is reported
* when switching controllers on targets like
* Intel Multi-Flex. We can just retry.
*/
return ADD_TO_MLQUEUE;
- }
-
break;
}
@@ -547,9 +506,9 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h)
{
struct scsi_sense_hdr sense_hdr;
int len, k, off, valid_states = 0;
- char *ucp;
+ unsigned char *ucp;
unsigned err;
- unsigned long expiry, interval = 10;
+ unsigned long expiry, interval = 1000;
expiry = round_jiffies_up(jiffies + ALUA_FAILOVER_TIMEOUT);
retry:
@@ -610,7 +569,7 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h)
case TPGS_STATE_TRANSITIONING:
if (time_before(jiffies, expiry)) {
/* State transition, retry */
- interval *= 10;
+ interval *= 2;
msleep(interval);
goto retry;
}
@@ -642,7 +601,7 @@ static int alua_initialize(struct scsi_device *sdev, struct alua_dh_data *h)
{
int err;
- err = alua_std_inquiry(sdev, h);
+ err = alua_check_tpgs(sdev, h);
if (err != SCSI_DH_OK)
goto out;
@@ -674,11 +633,9 @@ static int alua_activate(struct scsi_device *sdev,
struct alua_dh_data *h = get_alua_data(sdev);
int err = SCSI_DH_OK;
- if (h->group_id != -1) {
- err = alua_rtpg(sdev, h);
- if (err != SCSI_DH_OK)
- goto out;
- }
+ err = alua_rtpg(sdev, h);
+ if (err != SCSI_DH_OK)
+ goto out;
if (h->tpgs & TPGS_MODE_EXPLICIT &&
h->state != TPGS_STATE_OPTIMIZED &&
@@ -720,23 +677,10 @@ static int alua_prep_fn(struct scsi_device *sdev, struct request *req)
}
-static const struct scsi_dh_devlist alua_dev_list[] = {
- {"HP", "MSA VOLUME" },
- {"HP", "HSV101" },
- {"HP", "HSV111" },
- {"HP", "HSV200" },
- {"HP", "HSV210" },
- {"HP", "HSV300" },
- {"IBM", "2107900" },
- {"IBM", "2145" },
- {"Pillar", "Axiom" },
- {"Intel", "Multi-Flex"},
- {"NETAPP", "LUN"},
- {"NETAPP", "LUN C-Mode"},
- {"AIX", "NVDISK"},
- {"Promise", "VTrak"},
- {NULL, NULL}
-};
+static bool alua_match(struct scsi_device *sdev)
+{
+ return (scsi_device_tpgs(sdev) != 0);
+}
static int alua_bus_attach(struct scsi_device *sdev);
static void alua_bus_detach(struct scsi_device *sdev);
@@ -744,12 +688,12 @@ static void alua_bus_detach(struct scsi_device *sdev);
static struct scsi_device_handler alua_dh = {
.name = ALUA_DH_NAME,
.module = THIS_MODULE,
- .devlist = alua_dev_list,
.attach = alua_bus_attach,
.detach = alua_bus_detach,
.prep_fn = alua_prep_fn,
.check_sense = alua_check_sense,
.activate = alua_activate,
+ .match = alua_match,
};
/*
@@ -791,6 +735,7 @@ static int alua_bus_attach(struct scsi_device *sdev)
spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
sdev->scsi_dh_data = scsi_dh_data;
spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
+ sdev_printk(KERN_NOTICE, sdev, "%s: Attached\n", ALUA_DH_NAME);
return 0;
diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c
index 48441f6908a..591186cf189 100644
--- a/drivers/scsi/device_handler/scsi_dh_emc.c
+++ b/drivers/scsi/device_handler/scsi_dh_emc.c
@@ -21,6 +21,7 @@
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/slab.h>
+#include <linux/module.h>
#include <scsi/scsi.h>
#include <scsi/scsi_eh.h>
#include <scsi/scsi_dh.h>
diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
index b479f1eef96..0f86a18b157 100644
--- a/drivers/scsi/device_handler/scsi_dh_hp_sw.c
+++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
@@ -22,6 +22,7 @@
*/
#include <linux/slab.h>
+#include <linux/module.h>
#include <scsi/scsi.h>
#include <scsi/scsi_dbg.h>
#include <scsi/scsi_eh.h>
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index 27c9d65d54a..1d312792006 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -1,5 +1,5 @@
/*
- * Engenio/LSI RDAC SCSI Device Handler
+ * LSI/Engenio/NetApp E-Series RDAC SCSI Device Handler
*
* Copyright (C) 2005 Mike Christie. All rights reserved.
* Copyright (C) Chandra Seetharaman, IBM Corp. 2007
@@ -24,6 +24,7 @@
#include <scsi/scsi_dh.h>
#include <linux/workqueue.h>
#include <linux/slab.h>
+#include <linux/module.h>
#define RDAC_NAME "rdac"
#define RDAC_RETRY_COUNT 5
@@ -795,6 +796,7 @@ static const struct scsi_dh_devlist rdac_dev_list[] = {
{"IBM", "3526"},
{"SGI", "TP9400"},
{"SGI", "TP9500"},
+ {"SGI", "TP9700"},
{"SGI", "IS"},
{"STK", "OPENstorage D280"},
{"SUN", "CSM200_R"},
@@ -814,6 +816,7 @@ static const struct scsi_dh_devlist rdac_dev_list[] = {
{"SUN", "CSM100_R_FC"},
{"SUN", "STK6580_6780"},
{"SUN", "SUN_6180"},
+ {"SUN", "ArrayStorage"},
{NULL, NULL},
};
@@ -945,7 +948,7 @@ static void __exit rdac_exit(void)
module_init(rdac_init);
module_exit(rdac_exit);
-MODULE_DESCRIPTION("Multipath LSI/Engenio RDAC driver");
+MODULE_DESCRIPTION("Multipath LSI/Engenio/NetApp E-Series RDAC driver");
MODULE_AUTHOR("Mike Christie, Chandra Seetharaman");
MODULE_VERSION("01.00.0000.0000");
MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index a1c0ddd53aa..cefbe44bb84 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -51,7 +51,7 @@ MODULE_DESCRIPTION("FCoE");
MODULE_LICENSE("GPL v2");
/* Performance tuning parameters for fcoe */
-static unsigned int fcoe_ddp_min;
+static unsigned int fcoe_ddp_min = 4096;
module_param_named(ddp_min, fcoe_ddp_min, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(ddp_min, "Minimum I/O size in bytes for " \
"Direct Data Placement (DDP).");
@@ -137,7 +137,6 @@ static int fcoe_vport_create(struct fc_vport *, bool disabled);
static int fcoe_vport_disable(struct fc_vport *, bool disable);
static void fcoe_set_vport_symbolic_name(struct fc_vport *);
static void fcoe_set_port_id(struct fc_lport *, u32, struct fc_frame *);
-static int fcoe_validate_vport_create(struct fc_vport *);
static struct libfc_function_template fcoe_libfc_fcn_templ = {
.frame_send = fcoe_xmit,
@@ -280,6 +279,7 @@ static int fcoe_interface_setup(struct fcoe_interface *fcoe,
* use the first one for SPMA */
real_dev = (netdev->priv_flags & IFF_802_1Q_VLAN) ?
vlan_dev_real_dev(netdev) : netdev;
+ fcoe->realdev = real_dev;
rcu_read_lock();
for_each_dev_addr(real_dev, ha) {
if ((ha->type == NETDEV_HW_ADDR_T_SAN) &&
@@ -580,23 +580,6 @@ static int fcoe_lport_config(struct fc_lport *lport)
}
/**
- * fcoe_get_wwn() - Get the world wide name from LLD if it supports it
- * @netdev: the associated net device
- * @wwn: the output WWN
- * @type: the type of WWN (WWPN or WWNN)
- *
- * Returns: 0 for success
- */
-static int fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type)
-{
- const struct net_device_ops *ops = netdev->netdev_ops;
-
- if (ops->ndo_fcoe_get_wwn)
- return ops->ndo_fcoe_get_wwn(netdev, wwn, type);
- return -EINVAL;
-}
-
-/**
* fcoe_netdev_features_change - Updates the lport's offload flags based
* on the LLD netdev's FCoE feature flags
*/
@@ -1134,8 +1117,9 @@ static void fcoe_percpu_thread_create(unsigned int cpu)
p = &per_cpu(fcoe_percpu, cpu);
- thread = kthread_create(fcoe_percpu_receive_thread,
- (void *)p, "fcoethread/%d", cpu);
+ thread = kthread_create_on_node(fcoe_percpu_receive_thread,
+ (void *)p, cpu_to_node(cpu),
+ "fcoethread/%d", cpu);
if (likely(!IS_ERR(thread))) {
kthread_bind(thread, cpu);
@@ -1538,7 +1522,13 @@ int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
skb_reset_network_header(skb);
skb->mac_len = elen;
skb->protocol = htons(ETH_P_FCOE);
- skb->dev = fcoe->netdev;
+ if (fcoe->netdev->priv_flags & IFF_802_1Q_VLAN &&
+ fcoe->realdev->features & NETIF_F_HW_VLAN_TX) {
+ skb->vlan_tci = VLAN_TAG_PRESENT |
+ vlan_dev_vlan_id(fcoe->netdev);
+ skb->dev = fcoe->realdev;
+ } else
+ skb->dev = fcoe->netdev;
/* fill up mac and fcoe headers */
eh = eth_hdr(skb);
@@ -2357,14 +2347,11 @@ static void fcoe_flogi_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
goto done;
mac = fr_cb(fp)->granted_mac;
- if (is_zero_ether_addr(mac)) {
- /* pre-FIP */
- if (fcoe_ctlr_recv_flogi(fip, lport, fp)) {
- fc_frame_free(fp);
- return;
- }
- }
- fcoe_update_src_mac(lport, mac);
+ /* pre-FIP */
+ if (is_zero_ether_addr(mac))
+ fcoe_ctlr_recv_flogi(fip, lport, fp);
+ if (!is_zero_ether_addr(mac))
+ fcoe_update_src_mac(lport, mac);
done:
fc_lport_flogi_resp(seq, fp, lport);
}
@@ -2446,7 +2433,7 @@ static int fcoe_vport_create(struct fc_vport *vport, bool disabled)
rc = fcoe_validate_vport_create(vport);
if (rc) {
- wwn_to_str(vport->port_name, buf, sizeof(buf));
+ fcoe_wwn_to_str(vport->port_name, buf, sizeof(buf));
printk(KERN_ERR "fcoe: Failed to create vport, "
"WWPN (0x%s) already exists\n",
buf);
@@ -2555,28 +2542,9 @@ static void fcoe_set_vport_symbolic_name(struct fc_vport *vport)
static void fcoe_get_lesb(struct fc_lport *lport,
struct fc_els_lesb *fc_lesb)
{
- unsigned int cpu;
- u32 lfc, vlfc, mdac;
- struct fcoe_dev_stats *devst;
- struct fcoe_fc_els_lesb *lesb;
- struct rtnl_link_stats64 temp;
struct net_device *netdev = fcoe_netdev(lport);
- lfc = 0;
- vlfc = 0;
- mdac = 0;
- lesb = (struct fcoe_fc_els_lesb *)fc_lesb;
- memset(lesb, 0, sizeof(*lesb));
- for_each_possible_cpu(cpu) {
- devst = per_cpu_ptr(lport->dev_stats, cpu);
- lfc += devst->LinkFailureCount;
- vlfc += devst->VLinkFailureCount;
- mdac += devst->MissDiscAdvCount;
- }
- lesb->lesb_link_fail = htonl(lfc);
- lesb->lesb_vlink_fail = htonl(vlfc);
- lesb->lesb_miss_fka = htonl(mdac);
- lesb->lesb_fcs_error = htonl(dev_get_stats(netdev, &temp)->rx_crc_errors);
+ __fcoe_get_lesb(lport, fc_lesb, netdev);
}
/**
@@ -2600,49 +2568,3 @@ static void fcoe_set_port_id(struct fc_lport *lport,
if (fp && fc_frame_payload_op(fp) == ELS_FLOGI)
fcoe_ctlr_recv_flogi(&fcoe->ctlr, lport, fp);
}
-
-/**
- * fcoe_validate_vport_create() - Validate a vport before creating it
- * @vport: NPIV port to be created
- *
- * This routine is meant to add validation for a vport before creating it
- * via fcoe_vport_create().
- * Current validations are:
- * - WWPN supplied is unique for given lport
- *
- *
-*/
-static int fcoe_validate_vport_create(struct fc_vport *vport)
-{
- struct Scsi_Host *shost = vport_to_shost(vport);
- struct fc_lport *n_port = shost_priv(shost);
- struct fc_lport *vn_port;
- int rc = 0;
- char buf[32];
-
- mutex_lock(&n_port->lp_mutex);
-
- wwn_to_str(vport->port_name, buf, sizeof(buf));
- /* Check if the wwpn is not same as that of the lport */
- if (!memcmp(&n_port->wwpn, &vport->port_name, sizeof(u64))) {
- FCOE_DBG("vport WWPN 0x%s is same as that of the "
- "base port WWPN\n", buf);
- rc = -EINVAL;
- goto out;
- }
-
- /* Check if there is any existing vport with same wwpn */
- list_for_each_entry(vn_port, &n_port->vports, list) {
- if (!memcmp(&vn_port->wwpn, &vport->port_name, sizeof(u64))) {
- FCOE_DBG("vport with given WWPN 0x%s already "
- "exists\n", buf);
- rc = -EINVAL;
- break;
- }
- }
-
-out:
- mutex_unlock(&n_port->lp_mutex);
-
- return rc;
-}
diff --git a/drivers/scsi/fcoe/fcoe.h b/drivers/scsi/fcoe/fcoe.h
index c4a93993c0c..6c6884bcf84 100644
--- a/drivers/scsi/fcoe/fcoe.h
+++ b/drivers/scsi/fcoe/fcoe.h
@@ -80,6 +80,7 @@ do { \
struct fcoe_interface {
struct list_head list;
struct net_device *netdev;
+ struct net_device *realdev;
struct packet_type fcoe_packet_type;
struct packet_type fip_packet_type;
struct fcoe_ctlr ctlr;
@@ -99,14 +100,4 @@ static inline struct net_device *fcoe_netdev(const struct fc_lport *lport)
((struct fcoe_port *)lport_priv(lport))->priv)->netdev;
}
-static inline void wwn_to_str(u64 wwn, char *buf, int len)
-{
- u8 wwpn[8];
-
- u64_to_wwn(wwn, wwpn);
- snprintf(buf, len, "%02x%02x%02x%02x%02x%02x%02x%02x",
- wwpn[0], wwpn[1], wwpn[2], wwpn[3],
- wwpn[4], wwpn[5], wwpn[6], wwpn[7]);
-}
-
#endif /* _FCOE_H_ */
diff --git a/drivers/scsi/fcoe/fcoe_transport.c b/drivers/scsi/fcoe/fcoe_transport.c
index dac8e39a518..bd97b2273f2 100644
--- a/drivers/scsi/fcoe/fcoe_transport.c
+++ b/drivers/scsi/fcoe/fcoe_transport.c
@@ -83,6 +83,107 @@ static struct notifier_block libfcoe_notifier = {
.notifier_call = libfcoe_device_notification,
};
+void __fcoe_get_lesb(struct fc_lport *lport,
+ struct fc_els_lesb *fc_lesb,
+ struct net_device *netdev)
+{
+ unsigned int cpu;
+ u32 lfc, vlfc, mdac;
+ struct fcoe_dev_stats *devst;
+ struct fcoe_fc_els_lesb *lesb;
+ struct rtnl_link_stats64 temp;
+
+ lfc = 0;
+ vlfc = 0;
+ mdac = 0;
+ lesb = (struct fcoe_fc_els_lesb *)fc_lesb;
+ memset(lesb, 0, sizeof(*lesb));
+ for_each_possible_cpu(cpu) {
+ devst = per_cpu_ptr(lport->dev_stats, cpu);
+ lfc += devst->LinkFailureCount;
+ vlfc += devst->VLinkFailureCount;
+ mdac += devst->MissDiscAdvCount;
+ }
+ lesb->lesb_link_fail = htonl(lfc);
+ lesb->lesb_vlink_fail = htonl(vlfc);
+ lesb->lesb_miss_fka = htonl(mdac);
+ lesb->lesb_fcs_error =
+ htonl(dev_get_stats(netdev, &temp)->rx_crc_errors);
+}
+EXPORT_SYMBOL_GPL(__fcoe_get_lesb);
+
+void fcoe_wwn_to_str(u64 wwn, char *buf, int len)
+{
+ u8 wwpn[8];
+
+ u64_to_wwn(wwn, wwpn);
+ snprintf(buf, len, "%02x%02x%02x%02x%02x%02x%02x%02x",
+ wwpn[0], wwpn[1], wwpn[2], wwpn[3],
+ wwpn[4], wwpn[5], wwpn[6], wwpn[7]);
+}
+EXPORT_SYMBOL_GPL(fcoe_wwn_to_str);
+
+/**
+ * fcoe_validate_vport_create() - Validate a vport before creating it
+ * @vport: NPIV port to be created
+ *
+ * This routine is meant to add validation for a vport before creating it
+ * via fcoe_vport_create().
+ * Current validations are:
+ * - WWPN supplied is unique for given lport
+ */
+int fcoe_validate_vport_create(struct fc_vport *vport)
+{
+ struct Scsi_Host *shost = vport_to_shost(vport);
+ struct fc_lport *n_port = shost_priv(shost);
+ struct fc_lport *vn_port;
+ int rc = 0;
+ char buf[32];
+
+ mutex_lock(&n_port->lp_mutex);
+
+ fcoe_wwn_to_str(vport->port_name, buf, sizeof(buf));
+ /* Check if the wwpn is not same as that of the lport */
+ if (!memcmp(&n_port->wwpn, &vport->port_name, sizeof(u64))) {
+ LIBFCOE_TRANSPORT_DBG("vport WWPN 0x%s is same as that of the "
+ "base port WWPN\n", buf);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /* Check if there is any existing vport with same wwpn */
+ list_for_each_entry(vn_port, &n_port->vports, list) {
+ if (!memcmp(&vn_port->wwpn, &vport->port_name, sizeof(u64))) {
+ LIBFCOE_TRANSPORT_DBG("vport with given WWPN 0x%s "
+ "already exists\n", buf);
+ rc = -EINVAL;
+ break;
+ }
+ }
+out:
+ mutex_unlock(&n_port->lp_mutex);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(fcoe_validate_vport_create);
+
+/**
+ * fcoe_get_wwn() - Get the world wide name from LLD if it supports it
+ * @netdev: the associated net device
+ * @wwn: the output WWN
+ * @type: the type of WWN (WWPN or WWNN)
+ *
+ * Returns: 0 for success
+ */
+int fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type)
+{
+ const struct net_device_ops *ops = netdev->netdev_ops;
+
+ if (ops->ndo_fcoe_get_wwn)
+ return ops->ndo_fcoe_get_wwn(netdev, wwn, type);
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(fcoe_get_wwn);
+
/**
* fcoe_fc_crc() - Calculates the CRC for a given frame
* @fp: The frame to be checksumed
diff --git a/drivers/scsi/gvp11.c b/drivers/scsi/gvp11.c
index 50bb54150a7..488fbc64865 100644
--- a/drivers/scsi/gvp11.c
+++ b/drivers/scsi/gvp11.c
@@ -5,6 +5,7 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/zorro.h>
+#include <linux/module.h>
#include <asm/page.h>
#include <asm/pgtable.h>
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 4f7a5829ea4..351dc0b86fa 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -286,6 +286,7 @@ static void scsi_host_dev_release(struct device *dev)
{
struct Scsi_Host *shost = dev_to_shost(dev);
struct device *parent = dev->parent;
+ struct request_queue *q;
scsi_proc_hostdir_rm(shost->hostt);
@@ -293,9 +294,11 @@ static void scsi_host_dev_release(struct device *dev)
kthread_stop(shost->ehandler);
if (shost->work_q)
destroy_workqueue(shost->work_q);
- if (shost->uspace_req_q) {
- kfree(shost->uspace_req_q->queuedata);
- scsi_free_queue(shost->uspace_req_q);
+ q = shost->uspace_req_q;
+ if (q) {
+ kfree(q->queuedata);
+ q->queuedata = NULL;
+ scsi_free_queue(q);
}
scsi_destroy_command_freelist(shost);
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index b200b736b00..e76107b2ade 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -48,6 +48,7 @@
#include <linux/bitmap.h>
#include <linux/atomic.h>
#include <linux/kthread.h>
+#include <linux/jiffies.h>
#include "hpsa_cmd.h"
#include "hpsa.h"
@@ -127,6 +128,10 @@ static struct board_type products[] = {
static int number_of_controllers;
+static struct list_head hpsa_ctlr_list = LIST_HEAD_INIT(hpsa_ctlr_list);
+static spinlock_t lockup_detector_lock;
+static struct task_struct *hpsa_lockup_detector;
+
static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id);
static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id);
static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg);
@@ -484,6 +489,7 @@ static struct scsi_host_template hpsa_driver_template = {
#endif
.sdev_attrs = hpsa_sdev_attrs,
.shost_attrs = hpsa_shost_attrs,
+ .max_sectors = 8192,
};
@@ -566,16 +572,16 @@ static int hpsa_find_target_lun(struct ctlr_info *h,
* assumes h->devlock is held
*/
int i, found = 0;
- DECLARE_BITMAP(lun_taken, HPSA_MAX_SCSI_DEVS_PER_HBA);
+ DECLARE_BITMAP(lun_taken, HPSA_MAX_DEVICES);
- memset(&lun_taken[0], 0, HPSA_MAX_SCSI_DEVS_PER_HBA >> 3);
+ memset(&lun_taken[0], 0, HPSA_MAX_DEVICES >> 3);
for (i = 0; i < h->ndevices; i++) {
if (h->dev[i]->bus == bus && h->dev[i]->target != -1)
set_bit(h->dev[i]->target, lun_taken);
}
- for (i = 0; i < HPSA_MAX_SCSI_DEVS_PER_HBA; i++) {
+ for (i = 0; i < HPSA_MAX_DEVICES; i++) {
if (!test_bit(i, lun_taken)) {
/* *bus = 1; */
*target = i;
@@ -598,7 +604,7 @@ static int hpsa_scsi_add_entry(struct ctlr_info *h, int hostno,
unsigned char addr1[8], addr2[8];
struct hpsa_scsi_dev_t *sd;
- if (n >= HPSA_MAX_SCSI_DEVS_PER_HBA) {
+ if (n >= HPSA_MAX_DEVICES) {
dev_err(&h->pdev->dev, "too many devices, some will be "
"inaccessible.\n");
return -1;
@@ -673,7 +679,7 @@ static void hpsa_scsi_replace_entry(struct ctlr_info *h, int hostno,
struct hpsa_scsi_dev_t *removed[], int *nremoved)
{
/* assumes h->devlock is held */
- BUG_ON(entry < 0 || entry >= HPSA_MAX_SCSI_DEVS_PER_HBA);
+ BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
removed[*nremoved] = h->dev[entry];
(*nremoved)++;
@@ -702,7 +708,7 @@ static void hpsa_scsi_remove_entry(struct ctlr_info *h, int hostno, int entry,
int i;
struct hpsa_scsi_dev_t *sd;
- BUG_ON(entry < 0 || entry >= HPSA_MAX_SCSI_DEVS_PER_HBA);
+ BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
sd = h->dev[entry];
removed[*nremoved] = h->dev[entry];
@@ -814,10 +820,8 @@ static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
int nadded, nremoved;
struct Scsi_Host *sh = NULL;
- added = kzalloc(sizeof(*added) * HPSA_MAX_SCSI_DEVS_PER_HBA,
- GFP_KERNEL);
- removed = kzalloc(sizeof(*removed) * HPSA_MAX_SCSI_DEVS_PER_HBA,
- GFP_KERNEL);
+ added = kzalloc(sizeof(*added) * HPSA_MAX_DEVICES, GFP_KERNEL);
+ removed = kzalloc(sizeof(*removed) * HPSA_MAX_DEVICES, GFP_KERNEL);
if (!added || !removed) {
dev_warn(&h->pdev->dev, "out of memory in "
@@ -1338,6 +1342,22 @@ static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
wait_for_completion(&wait);
}
+static void hpsa_scsi_do_simple_cmd_core_if_no_lockup(struct ctlr_info *h,
+ struct CommandList *c)
+{
+ unsigned long flags;
+
+ /* If controller lockup detected, fake a hardware error. */
+ spin_lock_irqsave(&h->lock, flags);
+ if (unlikely(h->lockup_detected)) {
+ spin_unlock_irqrestore(&h->lock, flags);
+ c->err_info->CommandStatus = CMD_HARDWARE_ERR;
+ } else {
+ spin_unlock_irqrestore(&h->lock, flags);
+ hpsa_scsi_do_simple_cmd_core(h, c);
+ }
+}
+
static void hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
struct CommandList *c, int data_direction)
{
@@ -1735,7 +1755,6 @@ static int add_msa2xxx_enclosure_device(struct ctlr_info *h,
if (is_scsi_rev_5(h))
return 0; /* p1210m doesn't need to do this. */
-#define MAX_MSA2XXX_ENCLOSURES 32
if (*nmsa2xxx_enclosures >= MAX_MSA2XXX_ENCLOSURES) {
dev_warn(&h->pdev->dev, "Maximum number of MSA2XXX "
"enclosures exceeded. Check your hardware "
@@ -1846,8 +1865,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
int raid_ctlr_position;
DECLARE_BITMAP(lunzerobits, HPSA_MAX_TARGETS_PER_CTLR);
- currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_SCSI_DEVS_PER_HBA,
- GFP_KERNEL);
+ currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL);
physdev_list = kzalloc(reportlunsize, GFP_KERNEL);
logdev_list = kzalloc(reportlunsize, GFP_KERNEL);
tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL);
@@ -1870,6 +1888,13 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
/* Allocate the per device structures */
for (i = 0; i < ndevs_to_allocate; i++) {
+ if (i >= HPSA_MAX_DEVICES) {
+ dev_warn(&h->pdev->dev, "maximum devices (%d) exceeded."
+ " %d devices ignored.\n", HPSA_MAX_DEVICES,
+ ndevs_to_allocate - HPSA_MAX_DEVICES);
+ break;
+ }
+
currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL);
if (!currentsd[i]) {
dev_warn(&h->pdev->dev, "out of memory at %s:%d\n",
@@ -1956,7 +1981,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
default:
break;
}
- if (ncurrent >= HPSA_MAX_SCSI_DEVS_PER_HBA)
+ if (ncurrent >= HPSA_MAX_DEVICES)
break;
}
adjust_hpsa_scsi_table(h, hostno, currentsd, ncurrent);
@@ -2048,8 +2073,14 @@ static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd,
}
memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr));
- /* Need a lock as this is being allocated from the pool */
spin_lock_irqsave(&h->lock, flags);
+ if (unlikely(h->lockup_detected)) {
+ spin_unlock_irqrestore(&h->lock, flags);
+ cmd->result = DID_ERROR << 16;
+ done(cmd);
+ return 0;
+ }
+ /* Need a lock as this is being allocated from the pool */
c = cmd_alloc(h);
spin_unlock_irqrestore(&h->lock, flags);
if (c == NULL) { /* trouble... */
@@ -2601,7 +2632,7 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
c->SG[0].Len = iocommand.buf_size;
c->SG[0].Ext = 0; /* we are not chaining*/
}
- hpsa_scsi_do_simple_cmd_core(h, c);
+ hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c);
if (iocommand.buf_size > 0)
hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL);
check_ioctl_unit_attention(h, c);
@@ -2724,7 +2755,7 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
c->SG[i].Ext = 0;
}
}
- hpsa_scsi_do_simple_cmd_core(h, c);
+ hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c);
if (sg_used)
hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL);
check_ioctl_unit_attention(h, c);
@@ -2872,6 +2903,8 @@ static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
c->Request.Timeout = 0;
c->Request.CDB[0] = BMIC_WRITE;
c->Request.CDB[6] = BMIC_CACHE_FLUSH;
+ c->Request.CDB[7] = (size >> 8) & 0xFF;
+ c->Request.CDB[8] = size & 0xFF;
break;
case TEST_UNIT_READY:
c->Request.CDBLen = 6;
@@ -3091,6 +3124,7 @@ static irqreturn_t hpsa_intx_discard_completions(int irq, void *dev_id)
if (interrupt_not_for_us(h))
return IRQ_NONE;
spin_lock_irqsave(&h->lock, flags);
+ h->last_intr_timestamp = get_jiffies_64();
while (interrupt_pending(h)) {
raw_tag = get_next_completion(h);
while (raw_tag != FIFO_EMPTY)
@@ -3110,6 +3144,7 @@ static irqreturn_t hpsa_msix_discard_completions(int irq, void *dev_id)
return IRQ_NONE;
spin_lock_irqsave(&h->lock, flags);
+ h->last_intr_timestamp = get_jiffies_64();
raw_tag = get_next_completion(h);
while (raw_tag != FIFO_EMPTY)
raw_tag = next_command(h);
@@ -3126,6 +3161,7 @@ static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id)
if (interrupt_not_for_us(h))
return IRQ_NONE;
spin_lock_irqsave(&h->lock, flags);
+ h->last_intr_timestamp = get_jiffies_64();
while (interrupt_pending(h)) {
raw_tag = get_next_completion(h);
while (raw_tag != FIFO_EMPTY) {
@@ -3146,6 +3182,7 @@ static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id)
u32 raw_tag;
spin_lock_irqsave(&h->lock, flags);
+ h->last_intr_timestamp = get_jiffies_64();
raw_tag = get_next_completion(h);
while (raw_tag != FIFO_EMPTY) {
if (hpsa_tag_contains_index(raw_tag))
@@ -3300,6 +3337,13 @@ static int hpsa_controller_hard_reset(struct pci_dev *pdev,
pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
pmcsr |= PCI_D0;
pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
+
+ /*
+ * The P600 requires a small delay when changing states.
+ * Otherwise we may think the board did not reset and we bail.
+ * This for kdump only and is particular to the P600.
+ */
+ msleep(500);
}
return 0;
}
@@ -3438,10 +3482,8 @@ static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
} else {
use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
if (use_doorbell) {
- dev_warn(&pdev->dev, "Controller claims that "
- "'Bit 2 doorbell reset' is "
- "supported, but not 'bit 5 doorbell reset'. "
- "Firmware update is recommended.\n");
+ dev_warn(&pdev->dev, "Soft reset not supported. "
+ "Firmware update is required.\n");
rc = -ENOTSUPP; /* try soft reset */
goto unmap_cfgtable;
}
@@ -4085,6 +4127,149 @@ static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
kfree(h);
}
+static void remove_ctlr_from_lockup_detector_list(struct ctlr_info *h)
+{
+ assert_spin_locked(&lockup_detector_lock);
+ if (!hpsa_lockup_detector)
+ return;
+ if (h->lockup_detected)
+ return; /* already stopped the lockup detector */
+ list_del(&h->lockup_list);
+}
+
+/* Called when controller lockup detected. */
+static void fail_all_cmds_on_list(struct ctlr_info *h, struct list_head *list)
+{
+ struct CommandList *c = NULL;
+
+ assert_spin_locked(&h->lock);
+ /* Mark all outstanding commands as failed and complete them. */
+ while (!list_empty(list)) {
+ c = list_entry(list->next, struct CommandList, list);
+ c->err_info->CommandStatus = CMD_HARDWARE_ERR;
+ finish_cmd(c, c->Header.Tag.lower);
+ }
+}
+
+static void controller_lockup_detected(struct ctlr_info *h)
+{
+ unsigned long flags;
+
+ assert_spin_locked(&lockup_detector_lock);
+ remove_ctlr_from_lockup_detector_list(h);
+ h->access.set_intr_mask(h, HPSA_INTR_OFF);
+ spin_lock_irqsave(&h->lock, flags);
+ h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
+ spin_unlock_irqrestore(&h->lock, flags);
+ dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x\n",
+ h->lockup_detected);
+ pci_disable_device(h->pdev);
+ spin_lock_irqsave(&h->lock, flags);
+ fail_all_cmds_on_list(h, &h->cmpQ);
+ fail_all_cmds_on_list(h, &h->reqQ);
+ spin_unlock_irqrestore(&h->lock, flags);
+}
+
+#define HEARTBEAT_SAMPLE_INTERVAL (10 * HZ)
+#define HEARTBEAT_CHECK_MINIMUM_INTERVAL (HEARTBEAT_SAMPLE_INTERVAL / 2)
+
+static void detect_controller_lockup(struct ctlr_info *h)
+{
+ u64 now;
+ u32 heartbeat;
+ unsigned long flags;
+
+ assert_spin_locked(&lockup_detector_lock);
+ now = get_jiffies_64();
+ /* If we've received an interrupt recently, we're ok. */
+ if (time_after64(h->last_intr_timestamp +
+ (HEARTBEAT_CHECK_MINIMUM_INTERVAL), now))
+ return;
+
+ /*
+ * If we've already checked the heartbeat recently, we're ok.
+ * This could happen if someone sends us a signal. We
+ * otherwise don't care about signals in this thread.
+ */
+ if (time_after64(h->last_heartbeat_timestamp +
+ (HEARTBEAT_CHECK_MINIMUM_INTERVAL), now))
+ return;
+
+ /* If heartbeat has not changed since we last looked, we're not ok. */
+ spin_lock_irqsave(&h->lock, flags);
+ heartbeat = readl(&h->cfgtable->HeartBeat);
+ spin_unlock_irqrestore(&h->lock, flags);
+ if (h->last_heartbeat == heartbeat) {
+ controller_lockup_detected(h);
+ return;
+ }
+
+ /* We're ok. */
+ h->last_heartbeat = heartbeat;
+ h->last_heartbeat_timestamp = now;
+}
+
+static int detect_controller_lockup_thread(void *notused)
+{
+ struct ctlr_info *h;
+ unsigned long flags;
+
+ while (1) {
+ struct list_head *this, *tmp;
+
+ schedule_timeout_interruptible(HEARTBEAT_SAMPLE_INTERVAL);
+ if (kthread_should_stop())
+ break;
+ spin_lock_irqsave(&lockup_detector_lock, flags);
+ list_for_each_safe(this, tmp, &hpsa_ctlr_list) {
+ h = list_entry(this, struct ctlr_info, lockup_list);
+ detect_controller_lockup(h);
+ }
+ spin_unlock_irqrestore(&lockup_detector_lock, flags);
+ }
+ return 0;
+}
+
+static void add_ctlr_to_lockup_detector_list(struct ctlr_info *h)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&lockup_detector_lock, flags);
+ list_add_tail(&h->lockup_list, &hpsa_ctlr_list);
+ spin_unlock_irqrestore(&lockup_detector_lock, flags);
+}
+
+static void start_controller_lockup_detector(struct ctlr_info *h)
+{
+ /* Start the lockup detector thread if not already started */
+ if (!hpsa_lockup_detector) {
+ spin_lock_init(&lockup_detector_lock);
+ hpsa_lockup_detector =
+ kthread_run(detect_controller_lockup_thread,
+ NULL, "hpsa");
+ }
+ if (!hpsa_lockup_detector) {
+ dev_warn(&h->pdev->dev,
+ "Could not start lockup detector thread\n");
+ return;
+ }
+ add_ctlr_to_lockup_detector_list(h);
+}
+
+static void stop_controller_lockup_detector(struct ctlr_info *h)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&lockup_detector_lock, flags);
+ remove_ctlr_from_lockup_detector_list(h);
+ /* If the list of ctlr's to monitor is empty, stop the thread */
+ if (list_empty(&hpsa_ctlr_list)) {
+ kthread_stop(hpsa_lockup_detector);
+ hpsa_lockup_detector = NULL;
+ }
+ spin_unlock_irqrestore(&lockup_detector_lock, flags);
+}
+
static int __devinit hpsa_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -4122,7 +4307,6 @@ reinit_after_soft_reset:
return -ENOMEM;
h->pdev = pdev;
- h->busy_initializing = 1;
h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT;
INIT_LIST_HEAD(&h->cmpQ);
INIT_LIST_HEAD(&h->reqQ);
@@ -4231,7 +4415,7 @@ reinit_after_soft_reset:
hpsa_hba_inquiry(h);
hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
- h->busy_initializing = 0;
+ start_controller_lockup_detector(h);
return 1;
clean4:
@@ -4240,7 +4424,6 @@ clean4:
free_irq(h->intr[h->intr_mode], h);
clean2:
clean1:
- h->busy_initializing = 0;
kfree(h);
return rc;
}
@@ -4295,10 +4478,11 @@ static void __devexit hpsa_remove_one(struct pci_dev *pdev)
struct ctlr_info *h;
if (pci_get_drvdata(pdev) == NULL) {
- dev_err(&pdev->dev, "unable to remove device \n");
+ dev_err(&pdev->dev, "unable to remove device\n");
return;
}
h = pci_get_drvdata(pdev);
+ stop_controller_lockup_detector(h);
hpsa_unregister_scsi(h); /* unhook from SCSI subsystem */
hpsa_shutdown(pdev);
iounmap(h->vaddr);
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index 7f53ceaa723..91edafb8c7e 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -95,8 +95,6 @@ struct ctlr_info {
unsigned long *cmd_pool_bits;
int nr_allocs;
int nr_frees;
- int busy_initializing;
- int busy_scanning;
int scan_finished;
spinlock_t scan_lock;
wait_queue_head_t scan_wait_queue;
@@ -104,8 +102,7 @@ struct ctlr_info {
struct Scsi_Host *scsi_host;
spinlock_t devlock; /* to protect hba[ctlr]->dev[]; */
int ndevices; /* number of used elements in .dev[] array. */
-#define HPSA_MAX_SCSI_DEVS_PER_HBA 256
- struct hpsa_scsi_dev_t *dev[HPSA_MAX_SCSI_DEVS_PER_HBA];
+ struct hpsa_scsi_dev_t *dev[HPSA_MAX_DEVICES];
/*
* Performant mode tables.
*/
@@ -124,6 +121,11 @@ struct ctlr_info {
unsigned char reply_pool_wraparound;
u32 *blockFetchTable;
unsigned char *hba_inquiry_data;
+ u64 last_intr_timestamp;
+ u32 last_heartbeat;
+ u64 last_heartbeat_timestamp;
+ u32 lockup_detected;
+ struct list_head lockup_list;
};
#define HPSA_ABORT_MSG 0
#define HPSA_DEVICE_RESET_MSG 1
diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
index 55d741b019d..3fd4715935c 100644
--- a/drivers/scsi/hpsa_cmd.h
+++ b/drivers/scsi/hpsa_cmd.h
@@ -123,8 +123,11 @@ union u64bit {
/* FIXME this is a per controller value (barf!) */
#define HPSA_MAX_TARGETS_PER_CTLR 16
-#define HPSA_MAX_LUN 256
+#define HPSA_MAX_LUN 1024
#define HPSA_MAX_PHYS_LUN 1024
+#define MAX_MSA2XXX_ENCLOSURES 32
+#define HPSA_MAX_DEVICES (HPSA_MAX_PHYS_LUN + HPSA_MAX_LUN + \
+ MAX_MSA2XXX_ENCLOSURES + 1) /* + 1 is for the controller itself */
/* SCSI-3 Commands */
#pragma pack(1)
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 8d636301e32..fd860d952b2 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -2901,7 +2901,7 @@ static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
- if (ioa_cfg->sdt_state != GET_DUMP) {
+ if (ioa_cfg->sdt_state != READ_DUMP) {
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
return;
}
@@ -3097,7 +3097,7 @@ static void ipr_worker_thread(struct work_struct *work)
ENTER;
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
- if (ioa_cfg->sdt_state == GET_DUMP) {
+ if (ioa_cfg->sdt_state == READ_DUMP) {
dump = ioa_cfg->dump;
if (!dump) {
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
@@ -3109,7 +3109,7 @@ static void ipr_worker_thread(struct work_struct *work)
kref_put(&dump->kref, ipr_release_dump);
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
- if (ioa_cfg->sdt_state == DUMP_OBTAINED)
+ if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
return;
@@ -3751,14 +3751,6 @@ static ssize_t ipr_store_update_fw(struct device *dev,
image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
- if (be32_to_cpu(image_hdr->header_length) > fw_entry->size ||
- (ioa_cfg->vpd_cbs->page3_data.card_type &&
- ioa_cfg->vpd_cbs->page3_data.card_type != image_hdr->card_type)) {
- dev_err(&ioa_cfg->pdev->dev, "Invalid microcode buffer\n");
- release_firmware(fw_entry);
- return -EINVAL;
- }
-
src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
sglist = ipr_alloc_ucode_buffer(dnld_size);
@@ -3777,6 +3769,8 @@ static ssize_t ipr_store_update_fw(struct device *dev,
goto out;
}
+ ipr_info("Updating microcode, please be patient. This may take up to 30 minutes.\n");
+
result = ipr_update_ioa_ucode(ioa_cfg, sglist);
if (!result)
@@ -7449,8 +7443,11 @@ static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
if (ioa_cfg->sdt_state == GET_DUMP)
+ ioa_cfg->sdt_state = WAIT_FOR_DUMP;
+ else if (ioa_cfg->sdt_state == READ_DUMP)
ioa_cfg->sdt_state = ABORT_DUMP;
+ ioa_cfg->dump_timeout = 1;
ipr_cmd->job_step = ipr_reset_alert;
return IPR_RC_JOB_CONTINUE;
@@ -7614,6 +7611,8 @@ static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
ipr_cmd->job_step = ipr_reset_enable_ioa;
if (GET_DUMP == ioa_cfg->sdt_state) {
+ ioa_cfg->sdt_state = READ_DUMP;
+ ioa_cfg->dump_timeout = 0;
if (ioa_cfg->sis64)
ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT);
else
@@ -8003,8 +8002,12 @@ static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
if (ioa_cfg->ioa_is_dead)
return;
- if (ioa_cfg->in_reset_reload && ioa_cfg->sdt_state == GET_DUMP)
- ioa_cfg->sdt_state = ABORT_DUMP;
+ if (ioa_cfg->in_reset_reload) {
+ if (ioa_cfg->sdt_state == GET_DUMP)
+ ioa_cfg->sdt_state = WAIT_FOR_DUMP;
+ else if (ioa_cfg->sdt_state == READ_DUMP)
+ ioa_cfg->sdt_state = ABORT_DUMP;
+ }
if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
dev_err(&ioa_cfg->pdev->dev,
@@ -8812,7 +8815,7 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
ioa_cfg->needs_hard_reset = 1;
- if (interrupts & IPR_PCII_ERROR_INTERRUPTS)
+ if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices)
ioa_cfg->needs_hard_reset = 1;
if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
ioa_cfg->ioa_unit_checked = 1;
@@ -9120,6 +9123,8 @@ static struct pci_device_id ipr_pci_table[] __devinitdata = {
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index f93f8637c5a..ac84736c1b9 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -82,6 +82,7 @@
#define IPR_SUBS_DEV_ID_57B4 0x033B
#define IPR_SUBS_DEV_ID_57B2 0x035F
+#define IPR_SUBS_DEV_ID_57C3 0x0353
#define IPR_SUBS_DEV_ID_57C4 0x0354
#define IPR_SUBS_DEV_ID_57C6 0x0357
#define IPR_SUBS_DEV_ID_57CC 0x035C
@@ -208,7 +209,7 @@
#define IPR_CANCEL_ALL_TIMEOUT (ipr_fastfail ? 10 * HZ : 30 * HZ)
#define IPR_ABORT_TASK_TIMEOUT (ipr_fastfail ? 10 * HZ : 30 * HZ)
#define IPR_INTERNAL_TIMEOUT (ipr_fastfail ? 10 * HZ : 30 * HZ)
-#define IPR_WRITE_BUFFER_TIMEOUT (10 * 60 * HZ)
+#define IPR_WRITE_BUFFER_TIMEOUT (30 * 60 * HZ)
#define IPR_SET_SUP_DEVICE_TIMEOUT (2 * 60 * HZ)
#define IPR_REQUEST_SENSE_TIMEOUT (10 * HZ)
#define IPR_OPERATIONAL_TIMEOUT (5 * 60)
@@ -1360,6 +1361,7 @@ enum ipr_sdt_state {
INACTIVE,
WAIT_FOR_DUMP,
GET_DUMP,
+ READ_DUMP,
ABORT_DUMP,
DUMP_OBTAINED
};
@@ -1384,6 +1386,7 @@ struct ipr_ioa_cfg {
u8 needs_warm_reset:1;
u8 msi_received:1;
u8 sis64:1;
+ u8 dump_timeout:1;
u8 revid;
diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c
index 6981b773a88..e7fe9c4c85b 100644
--- a/drivers/scsi/isci/host.c
+++ b/drivers/scsi/isci/host.c
@@ -1263,6 +1263,10 @@ void isci_host_deinit(struct isci_host *ihost)
{
int i;
+ /* disable output data selects */
+ for (i = 0; i < isci_gpio_count(ihost); i++)
+ writel(SGPIO_HW_CONTROL, &ihost->scu_registers->peg0.sgpio.output_data_select[i]);
+
isci_host_change_state(ihost, isci_stopping);
for (i = 0; i < SCI_MAX_PORTS; i++) {
struct isci_port *iport = &ihost->ports[i];
@@ -1281,6 +1285,12 @@ void isci_host_deinit(struct isci_host *ihost)
spin_unlock_irq(&ihost->scic_lock);
wait_for_stop(ihost);
+
+ /* disable sgpio: where the above wait should give time for the
+ * enclosure to sample the gpios going inactive
+ */
+ writel(0, &ihost->scu_registers->peg0.sgpio.interface_control);
+
sci_controller_reset(ihost);
/* Cancel any/all outstanding port timers */
@@ -1340,7 +1350,7 @@ static void isci_user_parameters_get(struct sci_user_parameters *u)
u->stp_max_occupancy_timeout = stp_max_occ_to;
u->ssp_max_occupancy_timeout = ssp_max_occ_to;
u->no_outbound_task_timeout = no_outbound_task_to;
- u->max_number_concurrent_device_spin_up = max_concurr_spinup;
+ u->max_concurr_spinup = max_concurr_spinup;
}
static void sci_controller_initial_state_enter(struct sci_base_state_machine *sm)
@@ -1651,7 +1661,7 @@ static void sci_controller_set_default_config_parameters(struct isci_host *ihost
ihost->oem_parameters.controller.mode_type = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE;
/* Default to APC mode. */
- ihost->oem_parameters.controller.max_concurrent_dev_spin_up = 1;
+ ihost->oem_parameters.controller.max_concurr_spin_up = 1;
/* Default to no SSC operation. */
ihost->oem_parameters.controller.do_enable_ssc = false;
@@ -1777,7 +1787,8 @@ int sci_oem_parameters_validate(struct sci_oem_params *oem)
} else
return -EINVAL;
- if (oem->controller.max_concurrent_dev_spin_up > MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT)
+ if (oem->controller.max_concurr_spin_up > MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT ||
+ oem->controller.max_concurr_spin_up < 1)
return -EINVAL;
return 0;
@@ -1800,6 +1811,16 @@ static enum sci_status sci_oem_parameters_set(struct isci_host *ihost)
return SCI_FAILURE_INVALID_STATE;
}
+static u8 max_spin_up(struct isci_host *ihost)
+{
+ if (ihost->user_parameters.max_concurr_spinup)
+ return min_t(u8, ihost->user_parameters.max_concurr_spinup,
+ MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT);
+ else
+ return min_t(u8, ihost->oem_parameters.controller.max_concurr_spin_up,
+ MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT);
+}
+
static void power_control_timeout(unsigned long data)
{
struct sci_timer *tmr = (struct sci_timer *)data;
@@ -1829,8 +1850,7 @@ static void power_control_timeout(unsigned long data)
if (iphy == NULL)
continue;
- if (ihost->power_control.phys_granted_power >=
- ihost->oem_parameters.controller.max_concurrent_dev_spin_up)
+ if (ihost->power_control.phys_granted_power >= max_spin_up(ihost))
break;
ihost->power_control.requesters[i] = NULL;
@@ -1855,8 +1875,7 @@ void sci_controller_power_control_queue_insert(struct isci_host *ihost,
{
BUG_ON(iphy == NULL);
- if (ihost->power_control.phys_granted_power <
- ihost->oem_parameters.controller.max_concurrent_dev_spin_up) {
+ if (ihost->power_control.phys_granted_power < max_spin_up(ihost)) {
ihost->power_control.phys_granted_power++;
sci_phy_consume_power_handler(iphy);
@@ -2365,6 +2384,12 @@ int isci_host_init(struct isci_host *ihost)
for (i = 0; i < SCI_MAX_PHYS; i++)
isci_phy_init(&ihost->phys[i], ihost, i);
+ /* enable sgpio */
+ writel(1, &ihost->scu_registers->peg0.sgpio.interface_control);
+ for (i = 0; i < isci_gpio_count(ihost); i++)
+ writel(SGPIO_HW_CONTROL, &ihost->scu_registers->peg0.sgpio.output_data_select[i]);
+ writel(0, &ihost->scu_registers->peg0.sgpio.vendor_specific_code);
+
for (i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) {
struct isci_remote_device *idev = &ihost->devices[i];
@@ -2760,3 +2785,56 @@ enum sci_task_status sci_controller_start_task(struct isci_host *ihost,
return status;
}
+
+static int sci_write_gpio_tx_gp(struct isci_host *ihost, u8 reg_index, u8 reg_count, u8 *write_data)
+{
+ int d;
+
+ /* no support for TX_GP_CFG */
+ if (reg_index == 0)
+ return -EINVAL;
+
+ for (d = 0; d < isci_gpio_count(ihost); d++) {
+ u32 val = 0x444; /* all ODx.n clear */
+ int i;
+
+ for (i = 0; i < 3; i++) {
+ int bit = (i << 2) + 2;
+
+ bit = try_test_sas_gpio_gp_bit(to_sas_gpio_od(d, i),
+ write_data, reg_index,
+ reg_count);
+ if (bit < 0)
+ break;
+
+ /* if od is set, clear the 'invert' bit */
+ val &= ~(bit << ((i << 2) + 2));
+ }
+
+ if (i < 3)
+ break;
+ writel(val, &ihost->scu_registers->peg0.sgpio.output_data_select[d]);
+ }
+
+ /* unless reg_index is > 1, we should always be able to write at
+ * least one register
+ */
+ return d > 0;
+}
+
+int isci_gpio_write(struct sas_ha_struct *sas_ha, u8 reg_type, u8 reg_index,
+ u8 reg_count, u8 *write_data)
+{
+ struct isci_host *ihost = sas_ha->lldd_ha;
+ int written;
+
+ switch (reg_type) {
+ case SAS_GPIO_REG_TX_GP:
+ written = sci_write_gpio_tx_gp(ihost, reg_index, reg_count, write_data);
+ break;
+ default:
+ written = -EINVAL;
+ }
+
+ return written;
+}
diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h
index 9f33831a2f0..646051afd3c 100644
--- a/drivers/scsi/isci/host.h
+++ b/drivers/scsi/isci/host.h
@@ -440,6 +440,18 @@ static inline bool is_c0(struct pci_dev *pdev)
return false;
}
+/* set hw control for 'activity', even though active enclosures seem to drive
+ * the activity led on their own. Skip setting FSENG control on 'status' due
+ * to unexpected operation and 'error' due to not being a supported automatic
+ * FSENG output
+ */
+#define SGPIO_HW_CONTROL 0x00000443
+
+static inline int isci_gpio_count(struct isci_host *ihost)
+{
+ return ARRAY_SIZE(ihost->scu_registers->peg0.sgpio.output_data_select);
+}
+
void sci_controller_post_request(struct isci_host *ihost,
u32 request);
void sci_controller_release_frame(struct isci_host *ihost,
@@ -542,4 +554,7 @@ void sci_port_configuration_agent_construct(
enum sci_status sci_port_configuration_agent_initialize(
struct isci_host *ihost,
struct sci_port_configuration_agent *port_agent);
+
+int isci_gpio_write(struct sas_ha_struct *, u8 reg_type, u8 reg_index,
+ u8 reg_count, u8 *write_data);
#endif
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index 29aa34efb0f..a97edabcb85 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -118,7 +118,7 @@ unsigned char phy_gen = 3;
module_param(phy_gen, byte, 0);
MODULE_PARM_DESC(phy_gen, "PHY generation (1: 1.5Gbps 2: 3.0Gbps 3: 6.0Gbps)");
-unsigned char max_concurr_spinup = 1;
+unsigned char max_concurr_spinup;
module_param(max_concurr_spinup, byte, 0);
MODULE_PARM_DESC(max_concurr_spinup, "Max concurrent device spinup");
@@ -192,6 +192,9 @@ static struct sas_domain_function_template isci_transport_ops = {
/* Phy management */
.lldd_control_phy = isci_phy_control,
+
+ /* GPIO support */
+ .lldd_write_gpio = isci_gpio_write,
};
diff --git a/drivers/scsi/isci/isci.h b/drivers/scsi/isci/isci.h
index d1de63312e7..8efeb6b0832 100644
--- a/drivers/scsi/isci/isci.h
+++ b/drivers/scsi/isci/isci.h
@@ -97,7 +97,7 @@
#define SCU_MAX_COMPLETION_QUEUE_SHIFT (ilog2(SCU_MAX_COMPLETION_QUEUE_ENTRIES))
#define SCU_ABSOLUTE_MAX_UNSOLICITED_FRAMES (4096)
-#define SCU_UNSOLICITED_FRAME_BUFFER_SIZE (1024)
+#define SCU_UNSOLICITED_FRAME_BUFFER_SIZE (1024U)
#define SCU_INVALID_FRAME_INDEX (0xFFFF)
#define SCU_IO_REQUEST_MAX_SGE_SIZE (0x00FFFFFF)
diff --git a/drivers/scsi/isci/phy.c b/drivers/scsi/isci/phy.c
index 09e61134037..35f50c2183e 100644
--- a/drivers/scsi/isci/phy.c
+++ b/drivers/scsi/isci/phy.c
@@ -1313,6 +1313,17 @@ int isci_phy_control(struct asd_sas_phy *sas_phy,
ret = isci_port_perform_hard_reset(ihost, iport, iphy);
break;
+ case PHY_FUNC_GET_EVENTS: {
+ struct scu_link_layer_registers __iomem *r;
+ struct sas_phy *phy = sas_phy->phy;
+
+ r = iphy->link_layer_registers;
+ phy->running_disparity_error_count = readl(&r->running_disparity_error_count);
+ phy->loss_of_dword_sync_count = readl(&r->loss_of_sync_error_count);
+ phy->phy_reset_problem_count = readl(&r->phy_reset_problem_count);
+ phy->invalid_dword_count = readl(&r->invalid_dword_counter);
+ break;
+ }
default:
dev_dbg(&ihost->pdev->dev,
diff --git a/drivers/scsi/isci/port.c b/drivers/scsi/isci/port.c
index 8f6f9b77e41..ac7f27749f9 100644
--- a/drivers/scsi/isci/port.c
+++ b/drivers/scsi/isci/port.c
@@ -145,48 +145,15 @@ static void sci_port_bcn_enable(struct isci_port *iport)
}
}
-/* called under sci_lock to stabilize phy:port associations */
-void isci_port_bcn_enable(struct isci_host *ihost, struct isci_port *iport)
-{
- int i;
-
- clear_bit(IPORT_BCN_BLOCKED, &iport->flags);
- wake_up(&ihost->eventq);
-
- if (!test_and_clear_bit(IPORT_BCN_PENDING, &iport->flags))
- return;
-
- for (i = 0; i < ARRAY_SIZE(iport->phy_table); i++) {
- struct isci_phy *iphy = iport->phy_table[i];
-
- if (!iphy)
- continue;
-
- ihost->sas_ha.notify_port_event(&iphy->sas_phy,
- PORTE_BROADCAST_RCVD);
- break;
- }
-}
-
static void isci_port_bc_change_received(struct isci_host *ihost,
struct isci_port *iport,
struct isci_phy *iphy)
{
- if (iport && test_bit(IPORT_BCN_BLOCKED, &iport->flags)) {
- dev_dbg(&ihost->pdev->dev,
- "%s: disabled BCN; isci_phy = %p, sas_phy = %p\n",
- __func__, iphy, &iphy->sas_phy);
- set_bit(IPORT_BCN_PENDING, &iport->flags);
- atomic_inc(&iport->event);
- wake_up(&ihost->eventq);
- } else {
- dev_dbg(&ihost->pdev->dev,
- "%s: isci_phy = %p, sas_phy = %p\n",
- __func__, iphy, &iphy->sas_phy);
+ dev_dbg(&ihost->pdev->dev,
+ "%s: isci_phy = %p, sas_phy = %p\n",
+ __func__, iphy, &iphy->sas_phy);
- ihost->sas_ha.notify_port_event(&iphy->sas_phy,
- PORTE_BROADCAST_RCVD);
- }
+ ihost->sas_ha.notify_port_event(&iphy->sas_phy, PORTE_BROADCAST_RCVD);
sci_port_bcn_enable(iport);
}
@@ -278,9 +245,6 @@ static void isci_port_link_down(struct isci_host *isci_host,
/* check to see if this is the last phy on this port. */
if (isci_phy->sas_phy.port &&
isci_phy->sas_phy.port->num_phys == 1) {
- atomic_inc(&isci_port->event);
- isci_port_bcn_enable(isci_host, isci_port);
-
/* change the state for all devices on this port. The
* next task sent to this device will be returned as
* SAS_TASK_UNDELIVERED, and the scsi mid layer will
@@ -294,8 +258,8 @@ static void isci_port_link_down(struct isci_host *isci_host,
__func__, isci_device);
set_bit(IDEV_GONE, &isci_device->flags);
}
+ isci_port_change_state(isci_port, isci_stopping);
}
- isci_port_change_state(isci_port, isci_stopping);
}
/* Notify libsas of the borken link, this will trigger calls to our
@@ -350,6 +314,34 @@ static void isci_port_stop_complete(struct isci_host *ihost,
dev_dbg(&ihost->pdev->dev, "Port stop complete\n");
}
+
+static bool is_port_ready_state(enum sci_port_states state)
+{
+ switch (state) {
+ case SCI_PORT_READY:
+ case SCI_PORT_SUB_WAITING:
+ case SCI_PORT_SUB_OPERATIONAL:
+ case SCI_PORT_SUB_CONFIGURING:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/* flag dummy rnc hanling when exiting a ready state */
+static void port_state_machine_change(struct isci_port *iport,
+ enum sci_port_states state)
+{
+ struct sci_base_state_machine *sm = &iport->sm;
+ enum sci_port_states old_state = sm->current_state_id;
+
+ if (is_port_ready_state(old_state) && !is_port_ready_state(state))
+ iport->ready_exit = true;
+
+ sci_change_state(sm, state);
+ iport->ready_exit = false;
+}
+
/**
* isci_port_hard_reset_complete() - This function is called by the sci core
* when the hard reset complete notification has been received.
@@ -368,6 +360,26 @@ static void isci_port_hard_reset_complete(struct isci_port *isci_port,
/* Save the status of the hard reset from the port. */
isci_port->hard_reset_status = completion_status;
+ if (completion_status != SCI_SUCCESS) {
+
+ /* The reset failed. The port state is now SCI_PORT_FAILED. */
+ if (isci_port->active_phy_mask == 0) {
+
+ /* Generate the link down now to the host, since it
+ * was intercepted by the hard reset state machine when
+ * it really happened.
+ */
+ isci_port_link_down(isci_port->isci_host,
+ &isci_port->isci_host->phys[
+ isci_port->last_active_phy],
+ isci_port);
+ }
+ /* Advance the port state so that link state changes will be
+ * noticed.
+ */
+ port_state_machine_change(isci_port, SCI_PORT_SUB_WAITING);
+
+ }
complete_all(&isci_port->hard_reset_complete);
}
@@ -657,6 +669,8 @@ void sci_port_deactivate_phy(struct isci_port *iport, struct isci_phy *iphy,
struct isci_host *ihost = iport->owning_controller;
iport->active_phy_mask &= ~(1 << iphy->phy_index);
+ if (!iport->active_phy_mask)
+ iport->last_active_phy = iphy->phy_index;
iphy->max_negotiated_speed = SAS_LINK_RATE_UNKNOWN;
@@ -683,33 +697,6 @@ static void sci_port_invalid_link_up(struct isci_port *iport, struct isci_phy *i
}
}
-static bool is_port_ready_state(enum sci_port_states state)
-{
- switch (state) {
- case SCI_PORT_READY:
- case SCI_PORT_SUB_WAITING:
- case SCI_PORT_SUB_OPERATIONAL:
- case SCI_PORT_SUB_CONFIGURING:
- return true;
- default:
- return false;
- }
-}
-
-/* flag dummy rnc hanling when exiting a ready state */
-static void port_state_machine_change(struct isci_port *iport,
- enum sci_port_states state)
-{
- struct sci_base_state_machine *sm = &iport->sm;
- enum sci_port_states old_state = sm->current_state_id;
-
- if (is_port_ready_state(old_state) && !is_port_ready_state(state))
- iport->ready_exit = true;
-
- sci_change_state(sm, state);
- iport->ready_exit = false;
-}
-
/**
* sci_port_general_link_up_handler - phy can be assigned to port?
* @sci_port: sci_port object for which has a phy that has gone link up.
@@ -1622,7 +1609,8 @@ void sci_port_construct(struct isci_port *iport, u8 index,
iport->logical_port_index = SCIC_SDS_DUMMY_PORT;
iport->physical_port_index = index;
iport->active_phy_mask = 0;
- iport->ready_exit = false;
+ iport->last_active_phy = 0;
+ iport->ready_exit = false;
iport->owning_controller = ihost;
@@ -1648,7 +1636,6 @@ void isci_port_init(struct isci_port *iport, struct isci_host *ihost, int index)
init_completion(&iport->start_complete);
iport->isci_host = ihost;
isci_port_change_state(iport, isci_freed);
- atomic_set(&iport->event, 0);
}
/**
@@ -1676,7 +1663,7 @@ int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *ipor
{
unsigned long flags;
enum sci_status status;
- int idx, ret = TMF_RESP_FUNC_COMPLETE;
+ int ret = TMF_RESP_FUNC_COMPLETE;
dev_dbg(&ihost->pdev->dev, "%s: iport = %p\n",
__func__, iport);
@@ -1697,8 +1684,13 @@ int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *ipor
"%s: iport = %p; hard reset completion\n",
__func__, iport);
- if (iport->hard_reset_status != SCI_SUCCESS)
+ if (iport->hard_reset_status != SCI_SUCCESS) {
ret = TMF_RESP_FUNC_FAILED;
+
+ dev_err(&ihost->pdev->dev,
+ "%s: iport = %p; hard reset failed (0x%x)\n",
+ __func__, iport, iport->hard_reset_status);
+ }
} else {
ret = TMF_RESP_FUNC_FAILED;
@@ -1718,18 +1710,6 @@ int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *ipor
"%s: iport = %p; hard reset failed "
"(0x%x) - driving explicit link fail for all phys\n",
__func__, iport, iport->hard_reset_status);
-
- /* Down all phys in the port. */
- spin_lock_irqsave(&ihost->scic_lock, flags);
- for (idx = 0; idx < SCI_MAX_PHYS; ++idx) {
- struct isci_phy *iphy = iport->phy_table[idx];
-
- if (!iphy)
- continue;
- sci_phy_stop(iphy);
- sci_phy_start(iphy);
- }
- spin_unlock_irqrestore(&ihost->scic_lock, flags);
}
return ret;
}
diff --git a/drivers/scsi/isci/port.h b/drivers/scsi/isci/port.h
index b50ecd4e8f9..cb5ffbc3860 100644
--- a/drivers/scsi/isci/port.h
+++ b/drivers/scsi/isci/port.h
@@ -77,7 +77,6 @@ enum isci_status {
/**
* struct isci_port - isci direct attached sas port object
- * @event: counts bcns and port stop events (for bcn filtering)
* @ready_exit: several states constitute 'ready'. When exiting ready we
* need to take extra port-teardown actions that are
* skipped when exiting to another 'ready' state.
@@ -92,10 +91,6 @@ enum isci_status {
*/
struct isci_port {
enum isci_status status;
- #define IPORT_BCN_BLOCKED 0
- #define IPORT_BCN_PENDING 1
- unsigned long flags;
- atomic_t event;
struct isci_host *isci_host;
struct asd_sas_port sas_port;
struct list_head remote_dev_list;
@@ -109,6 +104,7 @@ struct isci_port {
u8 logical_port_index;
u8 physical_port_index;
u8 active_phy_mask;
+ u8 last_active_phy;
u16 reserved_rni;
u16 reserved_tag;
u32 started_request_count;
diff --git a/drivers/scsi/isci/port_config.c b/drivers/scsi/isci/port_config.c
index 486b113c634..38a99d28114 100644
--- a/drivers/scsi/isci/port_config.c
+++ b/drivers/scsi/isci/port_config.c
@@ -678,7 +678,7 @@ static void apc_agent_timeout(unsigned long data)
configure_phy_mask = ~port_agent->phy_configured_mask & port_agent->phy_ready_mask;
if (!configure_phy_mask)
- return;
+ goto done;
for (index = 0; index < SCI_MAX_PHYS; index++) {
if ((configure_phy_mask & (1 << index)) == 0)
diff --git a/drivers/scsi/isci/probe_roms.h b/drivers/scsi/isci/probe_roms.h
index dc007e692f4..2c75248ca32 100644
--- a/drivers/scsi/isci/probe_roms.h
+++ b/drivers/scsi/isci/probe_roms.h
@@ -112,7 +112,7 @@ struct sci_user_parameters {
* This field specifies the maximum number of direct attached devices
* that can have power supplied to them simultaneously.
*/
- u8 max_number_concurrent_device_spin_up;
+ u8 max_concurr_spinup;
/**
* This field specifies the number of seconds to allow a phy to consume
@@ -219,7 +219,7 @@ struct sci_bios_oem_param_block_hdr {
struct sci_oem_params {
struct {
uint8_t mode_type;
- uint8_t max_concurrent_dev_spin_up;
+ uint8_t max_concurr_spin_up;
uint8_t do_enable_ssc;
uint8_t reserved;
} controller;
diff --git a/drivers/scsi/isci/registers.h b/drivers/scsi/isci/registers.h
index 00afc738bbe..eaa541afc75 100644
--- a/drivers/scsi/isci/registers.h
+++ b/drivers/scsi/isci/registers.h
@@ -875,122 +875,6 @@ struct scu_iit_entry {
#define SCU_PTSxSR_GEN_BIT(name) \
SCU_GEN_BIT(SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_ ## name)
-
-/*
- * *****************************************************************************
- * * SGPIO Register shift and mask values
- * ***************************************************************************** */
-#define SCU_SGPIO_CONTROL_SGPIO_ENABLE_SHIFT (0)
-#define SCU_SGPIO_CONTROL_SGPIO_ENABLE_MASK (0x00000001)
-#define SCU_SGPIO_CONTROL_SGPIO_SERIAL_CLOCK_SELECT_SHIFT (1)
-#define SCU_SGPIO_CONTROL_SGPIO_SERIAL_CLOCK_SELECT_MASK (0x00000002)
-#define SCU_SGPIO_CONTROL_SGPIO_SERIAL_SHIFT_WIDTH_SELECT_SHIFT (2)
-#define SCU_SGPIO_CONTROL_SGPIO_SERIAL_SHIFT_WIDTH_SELECT_MASK (0x00000004)
-#define SCU_SGPIO_CONTROL_SGPIO_TEST_BIT_SHIFT (15)
-#define SCU_SGPIO_CONTROL_SGPIO_TEST_BIT_MASK (0x00008000)
-#define SCU_SGPIO_CONTROL_SGPIO_RESERVED_MASK (0xFFFF7FF8)
-
-#define SCU_SGICRx_GEN_BIT(name) \
- SCU_GEN_BIT(SCU_SGPIO_CONTROL_SGPIO_ ## name)
-
-#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R0_SHIFT (0)
-#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R0_MASK (0x0000000F)
-#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R1_SHIFT (4)
-#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R1_MASK (0x000000F0)
-#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R2_SHIFT (8)
-#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R2_MASK (0x00000F00)
-#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R3_SHIFT (12)
-#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R3_MASK (0x0000F000)
-#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_RESERVED_MASK (0xFFFF0000)
-
-#define SCU_SGPBRx_GEN_VAL(name, value) \
- SCU_GEN_VALUE(SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_ ## name, value)
-
-#define SCU_SGPIO_START_DRIVE_LOWER_R0_SHIFT (0)
-#define SCU_SGPIO_START_DRIVE_LOWER_R0_MASK (0x00000003)
-#define SCU_SGPIO_START_DRIVE_LOWER_R1_SHIFT (4)
-#define SCU_SGPIO_START_DRIVE_LOWER_R1_MASK (0x00000030)
-#define SCU_SGPIO_START_DRIVE_LOWER_R2_SHIFT (8)
-#define SCU_SGPIO_START_DRIVE_LOWER_R2_MASK (0x00000300)
-#define SCU_SGPIO_START_DRIVE_LOWER_R3_SHIFT (12)
-#define SCU_SGPIO_START_DRIVE_LOWER_R3_MASK (0x00003000)
-#define SCU_SGPIO_START_DRIVE_LOWER_RESERVED_MASK (0xFFFF8888)
-
-#define SCU_SGSDLRx_GEN_VAL(name, value) \
- SCU_GEN_VALUE(SCU_SGPIO_START_DRIVE_LOWER_ ## name, value)
-
-#define SCU_SGPIO_START_DRIVE_UPPER_R0_SHIFT (0)
-#define SCU_SGPIO_START_DRIVE_UPPER_R0_MASK (0x00000003)
-#define SCU_SGPIO_START_DRIVE_UPPER_R1_SHIFT (4)
-#define SCU_SGPIO_START_DRIVE_UPPER_R1_MASK (0x00000030)
-#define SCU_SGPIO_START_DRIVE_UPPER_R2_SHIFT (8)
-#define SCU_SGPIO_START_DRIVE_UPPER_R2_MASK (0x00000300)
-#define SCU_SGPIO_START_DRIVE_UPPER_R3_SHIFT (12)
-#define SCU_SGPIO_START_DRIVE_UPPER_R3_MASK (0x00003000)
-#define SCU_SGPIO_START_DRIVE_UPPER_RESERVED_MASK (0xFFFF8888)
-
-#define SCU_SGSDURx_GEN_VAL(name, value) \
- SCU_GEN_VALUE(SCU_SGPIO_START_DRIVE_LOWER_ ## name, value)
-
-#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D0_SHIFT (0)
-#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D0_MASK (0x00000003)
-#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D1_SHIFT (4)
-#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D1_MASK (0x00000030)
-#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D2_SHIFT (8)
-#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D2_MASK (0x00000300)
-#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D3_SHIFT (12)
-#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D3_MASK (0x00003000)
-#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_RESERVED_MASK (0xFFFF8888)
-
-#define SCU_SGSIDLRx_GEN_VAL(name, value) \
- SCU_GEN_VALUE(SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_ ## name, value)
-
-#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D0_SHIFT (0)
-#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D0_MASK (0x00000003)
-#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D1_SHIFT (4)
-#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D1_MASK (0x00000030)
-#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D2_SHIFT (8)
-#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D2_MASK (0x00000300)
-#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D3_SHIFT (12)
-#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D3_MASK (0x00003000)
-#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_RESERVED_MASK (0xFFFF8888)
-
-#define SCU_SGSIDURx_GEN_VAL(name, value) \
- SCU_GEN_VALUE(SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_ ## name, value)
-
-#define SCU_SGPIO_VENDOR_SPECIFIC_CODE_SHIFT (0)
-#define SCU_SGPIO_VENDOR_SPECIFIC_CODE_MASK (0x0000000F)
-#define SCU_SGPIO_VENDOR_SPECIFIC_CODE_RESERVED_MASK (0xFFFFFFF0)
-
-#define SCU_SGVSCR_GEN_VAL(value) \
- SCU_GEN_VALUE(SCU_SGPIO_VENDOR_SPECIFIC_CODE ## name, value)
-
-#define SCU_SGPIO_OUPUT_DATA_SELECT_INPUT_DATA0_SHIFT (0)
-#define SCU_SGPIO_OUPUT_DATA_SELECT_INPUT_DATA0_MASK (0x00000003)
-#define SCU_SGPIO_OUPUT_DATA_SELECT_INVERT_INPUT_DATA0_SHIFT (2)
-#define SCU_SGPIO_OUPUT_DATA_SELECT_INVERT_INPUT_DATA0_MASK (0x00000004)
-#define SCU_SGPIO_OUPUT_DATA_SELECT_JOG_ENABLE_DATA0_SHIFT (3)
-#define SCU_SGPIO_OUPUT_DATA_SELECT_JOG_ENABLE_DATA0_MASK (0x00000008)
-#define SCU_SGPIO_OUPUT_DATA_SELECT_INPUT_DATA1_SHIFT (4)
-#define SCU_SGPIO_OUPUT_DATA_SELECT_INPUT_DATA1_MASK (0x00000030)
-#define SCU_SGPIO_OUPUT_DATA_SELECT_INVERT_INPUT_DATA1_SHIFT (6)
-#define SCU_SGPIO_OUPUT_DATA_SELECT_INVERT_INPUT_DATA1_MASK (0x00000040)
-#define SCU_SGPIO_OUPUT_DATA_SELECT_JOG_ENABLE_DATA1_SHIFT (7)
-#define SCU_SGPIO_OUPUT_DATA_SELECT_JOG_ENABLE_DATA1_MASK (0x00000080)
-#define SCU_SGPIO_OUPUT_DATA_SELECT_INPUT_DATA2_SHIFT (8)
-#define SCU_SGPIO_OUPUT_DATA_SELECT_INPUT_DATA2_MASK (0x00000300)
-#define SCU_SGPIO_OUPUT_DATA_SELECT_INVERT_INPUT_DATA2_SHIFT (10)
-#define SCU_SGPIO_OUPUT_DATA_SELECT_INVERT_INPUT_DATA2_MASK (0x00000400)
-#define SCU_SGPIO_OUPUT_DATA_SELECT_JOG_ENABLE_DATA2_SHIFT (11)
-#define SCU_SGPIO_OUPUT_DATA_SELECT_JOG_ENABLE_DATA2_MASK (0x00000800)
-#define SCU_SGPIO_OUPUT_DATA_SELECT_RESERVED_MASK (0xFFFFF000)
-
-#define SCU_SGODSR_GEN_VAL(name, value) \
- SCU_GEN_VALUE(SCU_SGPIO_OUPUT_DATA_SELECT_ ## name, value)
-
-#define SCU_SGODSR_GEN_BIT(name) \
- SCU_GEN_BIT(SCU_SGPIO_OUPUT_DATA_SELECT_ ## name)
-
/*
* *****************************************************************************
* * SMU Registers
@@ -1529,10 +1413,12 @@ struct scu_sgpio_registers {
u32 serial_input_upper;
/* 0x0018 SGPIO_SGVSCR */
u32 vendor_specific_code;
+/* 0x001C Reserved */
+ u32 reserved_001c;
/* 0x0020 SGPIO_SGODSR */
- u32 ouput_data_select[8];
+ u32 output_data_select[8];
/* Remainder of memory space 256 bytes */
- u32 reserved_1444_14ff[0x31];
+ u32 reserved_1444_14ff[0x30];
};
diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c
index b6e6368c266..b207cd3b15a 100644
--- a/drivers/scsi/isci/remote_device.c
+++ b/drivers/scsi/isci/remote_device.c
@@ -386,6 +386,18 @@ static bool is_remote_device_ready(struct isci_remote_device *idev)
}
}
+/*
+ * called once the remote node context has transisitioned to a ready
+ * state (after suspending RX and/or TX due to early D2H fis)
+ */
+static void atapi_remote_device_resume_done(void *_dev)
+{
+ struct isci_remote_device *idev = _dev;
+ struct isci_request *ireq = idev->working_request;
+
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+}
+
enum sci_status sci_remote_device_event_handler(struct isci_remote_device *idev,
u32 event_code)
{
@@ -432,6 +444,16 @@ enum sci_status sci_remote_device_event_handler(struct isci_remote_device *idev,
if (status != SCI_SUCCESS)
return status;
+ if (state == SCI_STP_DEV_ATAPI_ERROR) {
+ /* For ATAPI error state resume the RNC right away. */
+ if (scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX ||
+ scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX) {
+ return sci_remote_node_context_resume(&idev->rnc,
+ atapi_remote_device_resume_done,
+ idev);
+ }
+ }
+
if (state == SCI_STP_DEV_IDLE) {
/* We pick up suspension events to handle specifically to this
@@ -625,6 +647,7 @@ enum sci_status sci_remote_device_complete_io(struct isci_host *ihost,
case SCI_STP_DEV_CMD:
case SCI_STP_DEV_NCQ:
case SCI_STP_DEV_NCQ_ERROR:
+ case SCI_STP_DEV_ATAPI_ERROR:
status = common_complete_io(iport, idev, ireq);
if (status != SCI_SUCCESS)
break;
@@ -1020,6 +1043,7 @@ static const struct sci_base_state sci_remote_device_state_table[] = {
[SCI_STP_DEV_NCQ_ERROR] = {
.enter_state = sci_stp_remote_device_ready_ncq_error_substate_enter,
},
+ [SCI_STP_DEV_ATAPI_ERROR] = { },
[SCI_STP_DEV_AWAIT_RESET] = { },
[SCI_SMP_DEV_IDLE] = {
.enter_state = sci_smp_remote_device_ready_idle_substate_enter,
@@ -1414,88 +1438,3 @@ int isci_remote_device_found(struct domain_device *domain_dev)
return status == SCI_SUCCESS ? 0 : -ENODEV;
}
-/**
- * isci_device_is_reset_pending() - This function will check if there is any
- * pending reset condition on the device.
- * @request: This parameter is the isci_device object.
- *
- * true if there is a reset pending for the device.
- */
-bool isci_device_is_reset_pending(
- struct isci_host *isci_host,
- struct isci_remote_device *isci_device)
-{
- struct isci_request *isci_request;
- struct isci_request *tmp_req;
- bool reset_is_pending = false;
- unsigned long flags;
-
- dev_dbg(&isci_host->pdev->dev,
- "%s: isci_device = %p\n", __func__, isci_device);
-
- spin_lock_irqsave(&isci_host->scic_lock, flags);
-
- /* Check for reset on all pending requests. */
- list_for_each_entry_safe(isci_request, tmp_req,
- &isci_device->reqs_in_process, dev_node) {
- dev_dbg(&isci_host->pdev->dev,
- "%s: isci_device = %p request = %p\n",
- __func__, isci_device, isci_request);
-
- if (isci_request->ttype == io_task) {
- struct sas_task *task = isci_request_access_task(
- isci_request);
-
- spin_lock(&task->task_state_lock);
- if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET)
- reset_is_pending = true;
- spin_unlock(&task->task_state_lock);
- }
- }
-
- spin_unlock_irqrestore(&isci_host->scic_lock, flags);
-
- dev_dbg(&isci_host->pdev->dev,
- "%s: isci_device = %p reset_is_pending = %d\n",
- __func__, isci_device, reset_is_pending);
-
- return reset_is_pending;
-}
-
-/**
- * isci_device_clear_reset_pending() - This function will clear if any pending
- * reset condition flags on the device.
- * @request: This parameter is the isci_device object.
- *
- * true if there is a reset pending for the device.
- */
-void isci_device_clear_reset_pending(struct isci_host *ihost, struct isci_remote_device *idev)
-{
- struct isci_request *isci_request;
- struct isci_request *tmp_req;
- unsigned long flags = 0;
-
- dev_dbg(&ihost->pdev->dev, "%s: idev=%p, ihost=%p\n",
- __func__, idev, ihost);
-
- spin_lock_irqsave(&ihost->scic_lock, flags);
-
- /* Clear reset pending on all pending requests. */
- list_for_each_entry_safe(isci_request, tmp_req,
- &idev->reqs_in_process, dev_node) {
- dev_dbg(&ihost->pdev->dev, "%s: idev = %p request = %p\n",
- __func__, idev, isci_request);
-
- if (isci_request->ttype == io_task) {
-
- unsigned long flags2;
- struct sas_task *task = isci_request_access_task(
- isci_request);
-
- spin_lock_irqsave(&task->task_state_lock, flags2);
- task->task_state_flags &= ~SAS_TASK_NEED_DEV_RESET;
- spin_unlock_irqrestore(&task->task_state_lock, flags2);
- }
- }
- spin_unlock_irqrestore(&ihost->scic_lock, flags);
-}
diff --git a/drivers/scsi/isci/remote_device.h b/drivers/scsi/isci/remote_device.h
index 57ccfc3d6ad..483ee50152f 100644
--- a/drivers/scsi/isci/remote_device.h
+++ b/drivers/scsi/isci/remote_device.h
@@ -132,10 +132,7 @@ void isci_remote_device_nuke_requests(struct isci_host *ihost,
struct isci_remote_device *idev);
void isci_remote_device_gone(struct domain_device *domain_dev);
int isci_remote_device_found(struct domain_device *domain_dev);
-bool isci_device_is_reset_pending(struct isci_host *ihost,
- struct isci_remote_device *idev);
-void isci_device_clear_reset_pending(struct isci_host *ihost,
- struct isci_remote_device *idev);
+
/**
* sci_remote_device_stop() - This method will stop both transmission and
* reception of link activity for the supplied remote device. This method
@@ -244,6 +241,15 @@ enum sci_remote_device_states {
SCI_STP_DEV_NCQ_ERROR,
/**
+ * This is the ATAPI error state for the STP ATAPI remote device.
+ * This state is entered when ATAPI device sends error status FIS
+ * without data while the device object is in CMD state.
+ * A suspension event is expected in this state.
+ * The device object will resume right away.
+ */
+ SCI_STP_DEV_ATAPI_ERROR,
+
+ /**
* This is the READY substate indicates the device is waiting for the RESET task
* coming to be recovered from certain hardware specific error.
*/
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
index b5d3a8c4d32..192cb48d849 100644
--- a/drivers/scsi/isci/request.c
+++ b/drivers/scsi/isci/request.c
@@ -191,7 +191,7 @@ static void sci_task_request_build_ssp_task_iu(struct isci_request *ireq)
task_iu->task_func = isci_tmf->tmf_code;
task_iu->task_tag =
- (ireq->ttype == tmf_task) ?
+ (test_bit(IREQ_TMF, &ireq->flags)) ?
isci_tmf->io_tag :
SCI_CONTROLLER_INVALID_IO_TAG;
}
@@ -481,7 +481,29 @@ static void sci_stp_optimized_request_construct(struct isci_request *ireq,
}
}
+static void sci_atapi_construct(struct isci_request *ireq)
+{
+ struct host_to_dev_fis *h2d_fis = &ireq->stp.cmd;
+ struct sas_task *task;
+
+ /* To simplify the implementation we take advantage of the
+ * silicon's partial acceleration of atapi protocol (dma data
+ * transfers), so we promote all commands to dma protocol. This
+ * breaks compatibility with ATA_HORKAGE_ATAPI_MOD16_DMA drives.
+ */
+ h2d_fis->features |= ATAPI_PKT_DMA;
+
+ scu_stp_raw_request_construct_task_context(ireq);
+ task = isci_request_access_task(ireq);
+ if (task->data_dir == DMA_NONE)
+ task->total_xfer_len = 0;
+
+ /* clear the response so we can detect arrivial of an
+ * unsolicited h2d fis
+ */
+ ireq->stp.rsp.fis_type = 0;
+}
static enum sci_status
sci_io_request_construct_sata(struct isci_request *ireq,
@@ -491,9 +513,10 @@ sci_io_request_construct_sata(struct isci_request *ireq,
{
enum sci_status status = SCI_SUCCESS;
struct sas_task *task = isci_request_access_task(ireq);
+ struct domain_device *dev = ireq->target_device->domain_dev;
/* check for management protocols */
- if (ireq->ttype == tmf_task) {
+ if (test_bit(IREQ_TMF, &ireq->flags)) {
struct isci_tmf *tmf = isci_request_access_tmf(ireq);
if (tmf->tmf_code == isci_tmf_sata_srst_high ||
@@ -519,6 +542,13 @@ sci_io_request_construct_sata(struct isci_request *ireq,
}
+ /* ATAPI */
+ if (dev->sata_dev.command_set == ATAPI_COMMAND_SET &&
+ task->ata_task.fis.command == ATA_CMD_PACKET) {
+ sci_atapi_construct(ireq);
+ return SCI_SUCCESS;
+ }
+
/* non data */
if (task->data_dir == DMA_NONE) {
scu_stp_raw_request_construct_task_context(ireq);
@@ -602,7 +632,7 @@ enum sci_status sci_task_request_construct_sata(struct isci_request *ireq)
enum sci_status status = SCI_SUCCESS;
/* check for management protocols */
- if (ireq->ttype == tmf_task) {
+ if (test_bit(IREQ_TMF, &ireq->flags)) {
struct isci_tmf *tmf = isci_request_access_tmf(ireq);
if (tmf->tmf_code == isci_tmf_sata_srst_high ||
@@ -627,7 +657,7 @@ enum sci_status sci_task_request_construct_sata(struct isci_request *ireq)
/**
* sci_req_tx_bytes - bytes transferred when reply underruns request
- * @sci_req: request that was terminated early
+ * @ireq: request that was terminated early
*/
#define SCU_TASK_CONTEXT_SRAM 0x200000
static u32 sci_req_tx_bytes(struct isci_request *ireq)
@@ -729,6 +759,10 @@ sci_io_request_terminate(struct isci_request *ireq)
case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED:
case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG:
case SCI_REQ_STP_SOFT_RESET_WAIT_D2H:
+ case SCI_REQ_ATAPI_WAIT_H2D:
+ case SCI_REQ_ATAPI_WAIT_PIO_SETUP:
+ case SCI_REQ_ATAPI_WAIT_D2H:
+ case SCI_REQ_ATAPI_WAIT_TC_COMP:
sci_change_state(&ireq->sm, SCI_REQ_ABORTING);
return SCI_SUCCESS;
case SCI_REQ_TASK_WAIT_TC_RESP:
@@ -1194,8 +1228,8 @@ static enum sci_status sci_stp_request_pio_data_out_transmit_data(struct isci_re
{
struct isci_stp_request *stp_req = &ireq->stp.req;
struct scu_sgl_element_pair *sgl_pair;
+ enum sci_status status = SCI_SUCCESS;
struct scu_sgl_element *sgl;
- enum sci_status status;
u32 offset;
u32 len = 0;
@@ -1249,7 +1283,7 @@ static enum sci_status sci_stp_request_pio_data_out_transmit_data(struct isci_re
*/
static enum sci_status
sci_stp_request_pio_data_in_copy_data_buffer(struct isci_stp_request *stp_req,
- u8 *data_buf, u32 len)
+ u8 *data_buf, u32 len)
{
struct isci_request *ireq;
u8 *src_addr;
@@ -1423,6 +1457,128 @@ static enum sci_status sci_stp_request_udma_general_frame_handler(struct isci_re
return status;
}
+static enum sci_status process_unsolicited_fis(struct isci_request *ireq,
+ u32 frame_index)
+{
+ struct isci_host *ihost = ireq->owning_controller;
+ enum sci_status status;
+ struct dev_to_host_fis *frame_header;
+ u32 *frame_buffer;
+
+ status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
+ frame_index,
+ (void **)&frame_header);
+
+ if (status != SCI_SUCCESS)
+ return status;
+
+ if (frame_header->fis_type != FIS_REGD2H) {
+ dev_err(&ireq->isci_host->pdev->dev,
+ "%s ERROR: invalid fis type 0x%X\n",
+ __func__, frame_header->fis_type);
+ return SCI_FAILURE;
+ }
+
+ sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
+ frame_index,
+ (void **)&frame_buffer);
+
+ sci_controller_copy_sata_response(&ireq->stp.rsp,
+ (u32 *)frame_header,
+ frame_buffer);
+
+ /* Frame has been decoded return it to the controller */
+ sci_controller_release_frame(ihost, frame_index);
+
+ return status;
+}
+
+static enum sci_status atapi_d2h_reg_frame_handler(struct isci_request *ireq,
+ u32 frame_index)
+{
+ struct sas_task *task = isci_request_access_task(ireq);
+ enum sci_status status;
+
+ status = process_unsolicited_fis(ireq, frame_index);
+
+ if (status == SCI_SUCCESS) {
+ if (ireq->stp.rsp.status & ATA_ERR)
+ status = SCI_IO_FAILURE_RESPONSE_VALID;
+ } else {
+ status = SCI_IO_FAILURE_RESPONSE_VALID;
+ }
+
+ if (status != SCI_SUCCESS) {
+ ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
+ ireq->sci_status = status;
+ } else {
+ ireq->scu_status = SCU_TASK_DONE_GOOD;
+ ireq->sci_status = SCI_SUCCESS;
+ }
+
+ /* the d2h ufi is the end of non-data commands */
+ if (task->data_dir == DMA_NONE)
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+
+ return status;
+}
+
+static void scu_atapi_reconstruct_raw_frame_task_context(struct isci_request *ireq)
+{
+ struct ata_device *dev = sas_to_ata_dev(ireq->target_device->domain_dev);
+ void *atapi_cdb = ireq->ttype_ptr.io_task_ptr->ata_task.atapi_packet;
+ struct scu_task_context *task_context = ireq->tc;
+
+ /* fill in the SCU Task Context for a DATA fis containing CDB in Raw Frame
+ * type. The TC for previous Packet fis was already there, we only need to
+ * change the H2D fis content.
+ */
+ memset(&ireq->stp.cmd, 0, sizeof(struct host_to_dev_fis));
+ memcpy(((u8 *)&ireq->stp.cmd + sizeof(u32)), atapi_cdb, ATAPI_CDB_LEN);
+ memset(&(task_context->type.stp), 0, sizeof(struct stp_task_context));
+ task_context->type.stp.fis_type = FIS_DATA;
+ task_context->transfer_length_bytes = dev->cdb_len;
+}
+
+static void scu_atapi_construct_task_context(struct isci_request *ireq)
+{
+ struct ata_device *dev = sas_to_ata_dev(ireq->target_device->domain_dev);
+ struct sas_task *task = isci_request_access_task(ireq);
+ struct scu_task_context *task_context = ireq->tc;
+ int cdb_len = dev->cdb_len;
+
+ /* reference: SSTL 1.13.4.2
+ * task_type, sata_direction
+ */
+ if (task->data_dir == DMA_TO_DEVICE) {
+ task_context->task_type = SCU_TASK_TYPE_PACKET_DMA_OUT;
+ task_context->sata_direction = 0;
+ } else {
+ /* todo: for NO_DATA command, we need to send out raw frame. */
+ task_context->task_type = SCU_TASK_TYPE_PACKET_DMA_IN;
+ task_context->sata_direction = 1;
+ }
+
+ memset(&task_context->type.stp, 0, sizeof(task_context->type.stp));
+ task_context->type.stp.fis_type = FIS_DATA;
+
+ memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd));
+ memcpy(&ireq->stp.cmd.lbal, task->ata_task.atapi_packet, cdb_len);
+ task_context->ssp_command_iu_length = cdb_len / sizeof(u32);
+
+ /* task phase is set to TX_CMD */
+ task_context->task_phase = 0x1;
+
+ /* retry counter */
+ task_context->stp_retry_count = 0;
+
+ /* data transfer size. */
+ task_context->transfer_length_bytes = task->total_xfer_len;
+
+ /* setup sgl */
+ sci_request_build_sgl(ireq);
+}
+
enum sci_status
sci_io_request_frame_handler(struct isci_request *ireq,
u32 frame_index)
@@ -1490,29 +1646,30 @@ sci_io_request_frame_handler(struct isci_request *ireq,
return SCI_SUCCESS;
case SCI_REQ_SMP_WAIT_RESP: {
- struct smp_resp *rsp_hdr = &ireq->smp.rsp;
- void *frame_header;
+ struct sas_task *task = isci_request_access_task(ireq);
+ struct scatterlist *sg = &task->smp_task.smp_resp;
+ void *frame_header, *kaddr;
+ u8 *rsp;
sci_unsolicited_frame_control_get_header(&ihost->uf_control,
- frame_index,
- &frame_header);
-
- /* byte swap the header. */
- word_cnt = SMP_RESP_HDR_SZ / sizeof(u32);
- sci_swab32_cpy(rsp_hdr, frame_header, word_cnt);
+ frame_index,
+ &frame_header);
+ kaddr = kmap_atomic(sg_page(sg), KM_IRQ0);
+ rsp = kaddr + sg->offset;
+ sci_swab32_cpy(rsp, frame_header, 1);
- if (rsp_hdr->frame_type == SMP_RESPONSE) {
+ if (rsp[0] == SMP_RESPONSE) {
void *smp_resp;
sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
- frame_index,
- &smp_resp);
+ frame_index,
+ &smp_resp);
- word_cnt = (sizeof(struct smp_resp) - SMP_RESP_HDR_SZ) /
- sizeof(u32);
-
- sci_swab32_cpy(((u8 *) rsp_hdr) + SMP_RESP_HDR_SZ,
- smp_resp, word_cnt);
+ word_cnt = (sg->length/4)-1;
+ if (word_cnt > 0)
+ word_cnt = min_t(unsigned int, word_cnt,
+ SCU_UNSOLICITED_FRAME_BUFFER_SIZE/4);
+ sci_swab32_cpy(rsp + 4, smp_resp, word_cnt);
ireq->scu_status = SCU_TASK_DONE_GOOD;
ireq->sci_status = SCI_SUCCESS;
@@ -1528,12 +1685,13 @@ sci_io_request_frame_handler(struct isci_request *ireq,
__func__,
ireq,
frame_index,
- rsp_hdr->frame_type);
+ rsp[0]);
ireq->scu_status = SCU_TASK_DONE_SMP_FRM_TYPE_ERR;
ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
}
+ kunmap_atomic(kaddr, KM_IRQ0);
sci_controller_release_frame(ihost, frame_index);
@@ -1833,6 +1991,24 @@ sci_io_request_frame_handler(struct isci_request *ireq,
return status;
}
+ case SCI_REQ_ATAPI_WAIT_PIO_SETUP: {
+ struct sas_task *task = isci_request_access_task(ireq);
+
+ sci_controller_release_frame(ihost, frame_index);
+ ireq->target_device->working_request = ireq;
+ if (task->data_dir == DMA_NONE) {
+ sci_change_state(&ireq->sm, SCI_REQ_ATAPI_WAIT_TC_COMP);
+ scu_atapi_reconstruct_raw_frame_task_context(ireq);
+ } else {
+ sci_change_state(&ireq->sm, SCI_REQ_ATAPI_WAIT_D2H);
+ scu_atapi_construct_task_context(ireq);
+ }
+
+ sci_controller_continue_io(ireq);
+ return SCI_SUCCESS;
+ }
+ case SCI_REQ_ATAPI_WAIT_D2H:
+ return atapi_d2h_reg_frame_handler(ireq, frame_index);
case SCI_REQ_ABORTING:
/*
* TODO: Is it even possible to get an unsolicited frame in the
@@ -1898,10 +2074,9 @@ static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR):
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_R_ERR):
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CMD_LL_R_ERR):
- case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CRC_ERR):
sci_remote_device_suspend(ireq->target_device,
SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code)));
- /* Fall through to the default case */
+ /* Fall through to the default case */
default:
/* All other completion status cause the IO to be complete. */
ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
@@ -1964,6 +2139,112 @@ stp_request_soft_reset_await_h2d_diagnostic_tc_event(struct isci_request *ireq,
return SCI_SUCCESS;
}
+static enum sci_status atapi_raw_completion(struct isci_request *ireq, u32 completion_code,
+ enum sci_base_request_states next)
+{
+ enum sci_status status = SCI_SUCCESS;
+
+ switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
+ ireq->scu_status = SCU_TASK_DONE_GOOD;
+ ireq->sci_status = SCI_SUCCESS;
+ sci_change_state(&ireq->sm, next);
+ break;
+ default:
+ /* All other completion status cause the IO to be complete.
+ * If a NAK was received, then it is up to the user to retry
+ * the request.
+ */
+ ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
+ ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
+
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ break;
+ }
+
+ return status;
+}
+
+static enum sci_status atapi_data_tc_completion_handler(struct isci_request *ireq,
+ u32 completion_code)
+{
+ struct isci_remote_device *idev = ireq->target_device;
+ struct dev_to_host_fis *d2h = &ireq->stp.rsp;
+ enum sci_status status = SCI_SUCCESS;
+
+ switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
+ case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT):
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ break;
+
+ case (SCU_TASK_DONE_UNEXP_FIS << SCU_COMPLETION_TL_STATUS_SHIFT): {
+ u16 len = sci_req_tx_bytes(ireq);
+
+ /* likely non-error data underrrun, workaround missing
+ * d2h frame from the controller
+ */
+ if (d2h->fis_type != FIS_REGD2H) {
+ d2h->fis_type = FIS_REGD2H;
+ d2h->flags = (1 << 6);
+ d2h->status = 0x50;
+ d2h->error = 0;
+ d2h->lbal = 0;
+ d2h->byte_count_low = len & 0xff;
+ d2h->byte_count_high = len >> 8;
+ d2h->device = 0xa0;
+ d2h->lbal_exp = 0;
+ d2h->lbam_exp = 0;
+ d2h->lbah_exp = 0;
+ d2h->_r_a = 0;
+ d2h->sector_count = 0x3;
+ d2h->sector_count_exp = 0;
+ d2h->_r_b = 0;
+ d2h->_r_c = 0;
+ d2h->_r_d = 0;
+ }
+
+ ireq->scu_status = SCU_TASK_DONE_GOOD;
+ ireq->sci_status = SCI_SUCCESS_IO_DONE_EARLY;
+ status = ireq->sci_status;
+
+ /* the hw will have suspended the rnc, so complete the
+ * request upon pending resume
+ */
+ sci_change_state(&idev->sm, SCI_STP_DEV_ATAPI_ERROR);
+ break;
+ }
+ case (SCU_TASK_DONE_EXCESS_DATA << SCU_COMPLETION_TL_STATUS_SHIFT):
+ /* In this case, there is no UF coming after.
+ * compelte the IO now.
+ */
+ ireq->scu_status = SCU_TASK_DONE_GOOD;
+ ireq->sci_status = SCI_SUCCESS;
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ break;
+
+ default:
+ if (d2h->fis_type == FIS_REGD2H) {
+ /* UF received change the device state to ATAPI_ERROR */
+ status = ireq->sci_status;
+ sci_change_state(&idev->sm, SCI_STP_DEV_ATAPI_ERROR);
+ } else {
+ /* If receiving any non-sucess TC status, no UF
+ * received yet, then an UF for the status fis
+ * is coming after (XXX: suspect this is
+ * actually a protocol error or a bug like the
+ * DONE_UNEXP_FIS case)
+ */
+ ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
+ ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
+
+ sci_change_state(&ireq->sm, SCI_REQ_ATAPI_WAIT_D2H);
+ }
+ break;
+ }
+
+ return status;
+}
+
enum sci_status
sci_io_request_tc_completion(struct isci_request *ireq,
u32 completion_code)
@@ -2015,6 +2296,17 @@ sci_io_request_tc_completion(struct isci_request *ireq,
return request_aborting_state_tc_event(ireq,
completion_code);
+ case SCI_REQ_ATAPI_WAIT_H2D:
+ return atapi_raw_completion(ireq, completion_code,
+ SCI_REQ_ATAPI_WAIT_PIO_SETUP);
+
+ case SCI_REQ_ATAPI_WAIT_TC_COMP:
+ return atapi_raw_completion(ireq, completion_code,
+ SCI_REQ_ATAPI_WAIT_D2H);
+
+ case SCI_REQ_ATAPI_WAIT_D2H:
+ return atapi_data_tc_completion_handler(ireq, completion_code);
+
default:
dev_warn(&ihost->pdev->dev,
"%s: SCIC IO Request given task completion "
@@ -2338,14 +2630,8 @@ static void isci_task_save_for_upper_layer_completion(
switch (task_notification_selection) {
case isci_perform_normal_io_completion:
-
/* Normal notification (task_done) */
- dev_dbg(&host->pdev->dev,
- "%s: Normal - task = %p, response=%d (%d), status=%d (%d)\n",
- __func__,
- task,
- task->task_status.resp, response,
- task->task_status.stat, status);
+
/* Add to the completed list. */
list_add(&request->completed_node,
&host->requests_to_complete);
@@ -2358,13 +2644,6 @@ static void isci_task_save_for_upper_layer_completion(
/* No notification to libsas because this request is
* already in the abort path.
*/
- dev_dbg(&host->pdev->dev,
- "%s: Aborted - task = %p, response=%d (%d), status=%d (%d)\n",
- __func__,
- task,
- task->task_status.resp, response,
- task->task_status.stat, status);
-
/* Wake up whatever process was waiting for this
* request to complete.
*/
@@ -2381,30 +2660,22 @@ static void isci_task_save_for_upper_layer_completion(
case isci_perform_error_io_completion:
/* Use sas_task_abort */
- dev_dbg(&host->pdev->dev,
- "%s: Error - task = %p, response=%d (%d), status=%d (%d)\n",
- __func__,
- task,
- task->task_status.resp, response,
- task->task_status.stat, status);
/* Add to the aborted list. */
list_add(&request->completed_node,
&host->requests_to_errorback);
break;
default:
- dev_dbg(&host->pdev->dev,
- "%s: Unknown - task = %p, response=%d (%d), status=%d (%d)\n",
- __func__,
- task,
- task->task_status.resp, response,
- task->task_status.stat, status);
-
/* Add to the error to libsas list. */
list_add(&request->completed_node,
&host->requests_to_errorback);
break;
}
+ dev_dbg(&host->pdev->dev,
+ "%s: %d - task = %p, response=%d (%d), status=%d (%d)\n",
+ __func__, task_notification_selection, task,
+ (task) ? task->task_status.resp : 0, response,
+ (task) ? task->task_status.stat : 0, status);
}
static void isci_process_stp_response(struct sas_task *task, struct dev_to_host_fis *fis)
@@ -2421,6 +2692,8 @@ static void isci_process_stp_response(struct sas_task *task, struct dev_to_host_
*/
if (fis->status & ATA_DF)
ts->stat = SAS_PROTO_RESPONSE;
+ else if (fis->status & ATA_ERR)
+ ts->stat = SAM_STAT_CHECK_CONDITION;
else
ts->stat = SAM_STAT_GOOD;
@@ -2434,9 +2707,9 @@ static void isci_request_io_request_complete(struct isci_host *ihost,
struct sas_task *task = isci_request_access_task(request);
struct ssp_response_iu *resp_iu;
unsigned long task_flags;
- struct isci_remote_device *idev = isci_lookup_device(task->dev);
- enum service_response response = SAS_TASK_UNDELIVERED;
- enum exec_status status = SAS_ABORTED_TASK;
+ struct isci_remote_device *idev = request->target_device;
+ enum service_response response = SAS_TASK_UNDELIVERED;
+ enum exec_status status = SAS_ABORTED_TASK;
enum isci_request_status request_status;
enum isci_completion_selection complete_to_host
= isci_perform_normal_io_completion;
@@ -2603,18 +2876,7 @@ static void isci_request_io_request_complete(struct isci_host *ihost,
status = SAM_STAT_GOOD;
set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
- if (task->task_proto == SAS_PROTOCOL_SMP) {
- void *rsp = &request->smp.rsp;
-
- dev_dbg(&ihost->pdev->dev,
- "%s: SMP protocol completion\n",
- __func__);
-
- sg_copy_from_buffer(
- &task->smp_task.smp_resp, 1,
- rsp, sizeof(struct smp_resp));
- } else if (completion_status
- == SCI_IO_SUCCESS_IO_DONE_EARLY) {
+ if (completion_status == SCI_IO_SUCCESS_IO_DONE_EARLY) {
/* This was an SSP / STP / SATA transfer.
* There is a possibility that less data than
@@ -2778,7 +3040,6 @@ static void isci_request_io_request_complete(struct isci_host *ihost,
/* complete the io request to the core. */
sci_controller_complete_io(ihost, request->target_device, request);
- isci_put_device(idev);
/* set terminated handle so it cannot be completed or
* terminated again, and to cause any calls into abort
@@ -2791,37 +3052,42 @@ static void sci_request_started_state_enter(struct sci_base_state_machine *sm)
{
struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
struct domain_device *dev = ireq->target_device->domain_dev;
+ enum sci_base_request_states state;
struct sas_task *task;
/* XXX as hch said always creating an internal sas_task for tmf
* requests would simplify the driver
*/
- task = ireq->ttype == io_task ? isci_request_access_task(ireq) : NULL;
+ task = (test_bit(IREQ_TMF, &ireq->flags)) ? NULL : isci_request_access_task(ireq);
/* all unaccelerated request types (non ssp or ncq) handled with
* substates
*/
if (!task && dev->dev_type == SAS_END_DEV) {
- sci_change_state(sm, SCI_REQ_TASK_WAIT_TC_COMP);
+ state = SCI_REQ_TASK_WAIT_TC_COMP;
} else if (!task &&
(isci_request_access_tmf(ireq)->tmf_code == isci_tmf_sata_srst_high ||
isci_request_access_tmf(ireq)->tmf_code == isci_tmf_sata_srst_low)) {
- sci_change_state(sm, SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED);
+ state = SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED;
} else if (task && task->task_proto == SAS_PROTOCOL_SMP) {
- sci_change_state(sm, SCI_REQ_SMP_WAIT_RESP);
+ state = SCI_REQ_SMP_WAIT_RESP;
} else if (task && sas_protocol_ata(task->task_proto) &&
!task->ata_task.use_ncq) {
- u32 state;
-
- if (task->data_dir == DMA_NONE)
+ if (dev->sata_dev.command_set == ATAPI_COMMAND_SET &&
+ task->ata_task.fis.command == ATA_CMD_PACKET) {
+ state = SCI_REQ_ATAPI_WAIT_H2D;
+ } else if (task->data_dir == DMA_NONE) {
state = SCI_REQ_STP_NON_DATA_WAIT_H2D;
- else if (task->ata_task.dma_xfer)
+ } else if (task->ata_task.dma_xfer) {
state = SCI_REQ_STP_UDMA_WAIT_TC_COMP;
- else /* PIO */
+ } else /* PIO */ {
state = SCI_REQ_STP_PIO_WAIT_H2D;
-
- sci_change_state(sm, state);
+ }
+ } else {
+ /* SSP or NCQ are fully accelerated, no substates */
+ return;
}
+ sci_change_state(sm, state);
}
static void sci_request_completed_state_enter(struct sci_base_state_machine *sm)
@@ -2913,6 +3179,10 @@ static const struct sci_base_state sci_request_state_table[] = {
[SCI_REQ_TASK_WAIT_TC_RESP] = { },
[SCI_REQ_SMP_WAIT_RESP] = { },
[SCI_REQ_SMP_WAIT_TC_COMP] = { },
+ [SCI_REQ_ATAPI_WAIT_H2D] = { },
+ [SCI_REQ_ATAPI_WAIT_PIO_SETUP] = { },
+ [SCI_REQ_ATAPI_WAIT_D2H] = { },
+ [SCI_REQ_ATAPI_WAIT_TC_COMP] = { },
[SCI_REQ_COMPLETED] = {
.enter_state = sci_request_completed_state_enter,
},
@@ -3272,7 +3542,7 @@ static struct isci_request *isci_io_request_from_tag(struct isci_host *ihost,
ireq = isci_request_from_tag(ihost, tag);
ireq->ttype_ptr.io_task_ptr = task;
- ireq->ttype = io_task;
+ clear_bit(IREQ_TMF, &ireq->flags);
task->lldd_task = ireq;
return ireq;
@@ -3286,7 +3556,7 @@ struct isci_request *isci_tmf_request_from_tag(struct isci_host *ihost,
ireq = isci_request_from_tag(ihost, tag);
ireq->ttype_ptr.tmf_task_ptr = isci_tmf;
- ireq->ttype = tmf_task;
+ set_bit(IREQ_TMF, &ireq->flags);
return ireq;
}
diff --git a/drivers/scsi/isci/request.h b/drivers/scsi/isci/request.h
index 7a1d5a9778e..be38933dd6d 100644
--- a/drivers/scsi/isci/request.h
+++ b/drivers/scsi/isci/request.h
@@ -77,11 +77,6 @@ enum isci_request_status {
dead = 0x07
};
-enum task_type {
- io_task = 0,
- tmf_task = 1
-};
-
enum sci_request_protocol {
SCIC_NO_PROTOCOL,
SCIC_SMP_PROTOCOL,
@@ -96,7 +91,6 @@ enum sci_request_protocol {
* to wait for another fis or if the transfer is complete. Upon
* receipt of a d2h fis this will be the status field of that fis.
* @sgl - track pio transfer progress as we iterate through the sgl
- * @device_cdb_len - atapi device advertises it's transfer constraints at setup
*/
struct isci_stp_request {
u32 pio_len;
@@ -107,7 +101,6 @@ struct isci_stp_request {
u8 set;
u32 offset;
} sgl;
- u32 device_cdb_len;
};
struct isci_request {
@@ -118,7 +111,6 @@ struct isci_request {
#define IREQ_ACTIVE 3
unsigned long flags;
/* XXX kill ttype and ttype_ptr, allocate full sas_task */
- enum task_type ttype;
union ttype_ptr_union {
struct sas_task *io_task_ptr; /* When ttype==io_task */
struct isci_tmf *tmf_task_ptr; /* When ttype==tmf_task */
@@ -174,9 +166,6 @@ struct isci_request {
};
} ssp;
struct {
- struct smp_resp rsp;
- } smp;
- struct {
struct isci_stp_request req;
struct host_to_dev_fis cmd;
struct dev_to_host_fis rsp;
@@ -252,6 +241,32 @@ enum sci_base_request_states {
SCI_REQ_STP_PIO_DATA_OUT,
/*
+ * While in this state the IO request object is waiting for the TC
+ * completion notification for the H2D Register FIS
+ */
+ SCI_REQ_ATAPI_WAIT_H2D,
+
+ /*
+ * While in this state the IO request object is waiting for either a
+ * PIO Setup.
+ */
+ SCI_REQ_ATAPI_WAIT_PIO_SETUP,
+
+ /*
+ * The non-data IO transit to this state in this state after receiving
+ * TC completion. While in this state IO request object is waiting for
+ * D2H status frame as UF.
+ */
+ SCI_REQ_ATAPI_WAIT_D2H,
+
+ /*
+ * When transmitting raw frames hardware reports task context completion
+ * after every frame submission, so in the non-accelerated case we need
+ * to expect the completion for the "cdb" frame.
+ */
+ SCI_REQ_ATAPI_WAIT_TC_COMP,
+
+ /*
* The AWAIT_TC_COMPLETION sub-state indicates that the started raw
* task management request is waiting for the transmission of the
* initial frame (i.e. command, task, etc.).
diff --git a/drivers/scsi/isci/sas.h b/drivers/scsi/isci/sas.h
index 462b15174d3..dc26b4aea99 100644
--- a/drivers/scsi/isci/sas.h
+++ b/drivers/scsi/isci/sas.h
@@ -204,8 +204,6 @@ struct smp_req {
u8 req_data[0];
} __packed;
-#define SMP_RESP_HDR_SZ 4
-
/*
* struct sci_sas_address - This structure depicts how a SAS address is
* represented by SCI.
diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c
index d6bcdd013dc..66ad3dc8949 100644
--- a/drivers/scsi/isci/task.c
+++ b/drivers/scsi/isci/task.c
@@ -212,16 +212,27 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags)
task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
spin_unlock_irqrestore(&task->task_state_lock, flags);
- /* Indicate QUEUE_FULL so that the scsi
- * midlayer retries. if the request
- * failed for remote device reasons,
- * it gets returned as
- * SAS_TASK_UNDELIVERED next time
- * through.
- */
- isci_task_refuse(ihost, task,
- SAS_TASK_COMPLETE,
- SAS_QUEUE_FULL);
+ if (test_bit(IDEV_GONE, &idev->flags)) {
+
+ /* Indicate that the device
+ * is gone.
+ */
+ isci_task_refuse(ihost, task,
+ SAS_TASK_UNDELIVERED,
+ SAS_DEVICE_UNKNOWN);
+ } else {
+ /* Indicate QUEUE_FULL so that
+ * the scsi midlayer retries.
+ * If the request failed for
+ * remote device reasons, it
+ * gets returned as
+ * SAS_TASK_UNDELIVERED next
+ * time through.
+ */
+ isci_task_refuse(ihost, task,
+ SAS_TASK_COMPLETE,
+ SAS_QUEUE_FULL);
+ }
}
}
}
@@ -243,7 +254,7 @@ static enum sci_status isci_sata_management_task_request_build(struct isci_reque
struct isci_tmf *isci_tmf;
enum sci_status status;
- if (tmf_task != ireq->ttype)
+ if (!test_bit(IREQ_TMF, &ireq->flags))
return SCI_FAILURE;
isci_tmf = isci_request_access_tmf(ireq);
@@ -327,6 +338,60 @@ static struct isci_request *isci_task_request_build(struct isci_host *ihost,
return ireq;
}
+/**
+* isci_request_mark_zombie() - This function must be called with scic_lock held.
+*/
+static void isci_request_mark_zombie(struct isci_host *ihost, struct isci_request *ireq)
+{
+ struct completion *tmf_completion = NULL;
+ struct completion *req_completion;
+
+ /* Set the request state to "dead". */
+ ireq->status = dead;
+
+ req_completion = ireq->io_request_completion;
+ ireq->io_request_completion = NULL;
+
+ if (test_bit(IREQ_TMF, &ireq->flags)) {
+ /* Break links with the TMF request. */
+ struct isci_tmf *tmf = isci_request_access_tmf(ireq);
+
+ /* In the case where a task request is dying,
+ * the thread waiting on the complete will sit and
+ * timeout unless we wake it now. Since the TMF
+ * has a default error status, complete it here
+ * to wake the waiting thread.
+ */
+ if (tmf) {
+ tmf_completion = tmf->complete;
+ tmf->complete = NULL;
+ }
+ ireq->ttype_ptr.tmf_task_ptr = NULL;
+ dev_dbg(&ihost->pdev->dev, "%s: tmf_code %d, managed tag %#x\n",
+ __func__, tmf->tmf_code, tmf->io_tag);
+ } else {
+ /* Break links with the sas_task - the callback is done
+ * elsewhere.
+ */
+ struct sas_task *task = isci_request_access_task(ireq);
+
+ if (task)
+ task->lldd_task = NULL;
+
+ ireq->ttype_ptr.io_task_ptr = NULL;
+ }
+
+ dev_warn(&ihost->pdev->dev, "task context unrecoverable (tag: %#x)\n",
+ ireq->io_tag);
+
+ /* Don't force waiting threads to timeout. */
+ if (req_completion)
+ complete(req_completion);
+
+ if (tmf_completion != NULL)
+ complete(tmf_completion);
+}
+
static int isci_task_execute_tmf(struct isci_host *ihost,
struct isci_remote_device *idev,
struct isci_tmf *tmf, unsigned long timeout_ms)
@@ -364,6 +429,7 @@ static int isci_task_execute_tmf(struct isci_host *ihost,
/* Assign the pointer to the TMF's completion kernel wait structure. */
tmf->complete = &completion;
+ tmf->status = SCI_FAILURE_TIMEOUT;
ireq = isci_task_request_build(ihost, idev, tag, tmf);
if (!ireq)
@@ -399,18 +465,35 @@ static int isci_task_execute_tmf(struct isci_host *ihost,
msecs_to_jiffies(timeout_ms));
if (timeleft == 0) {
+ /* The TMF did not complete - this could be because
+ * of an unplug. Terminate the TMF request now.
+ */
spin_lock_irqsave(&ihost->scic_lock, flags);
if (tmf->cb_state_func != NULL)
- tmf->cb_state_func(isci_tmf_timed_out, tmf, tmf->cb_data);
+ tmf->cb_state_func(isci_tmf_timed_out, tmf,
+ tmf->cb_data);
- sci_controller_terminate_request(ihost,
- idev,
- ireq);
+ sci_controller_terminate_request(ihost, idev, ireq);
spin_unlock_irqrestore(&ihost->scic_lock, flags);
- wait_for_completion(tmf->complete);
+ timeleft = wait_for_completion_timeout(
+ &completion,
+ msecs_to_jiffies(ISCI_TERMINATION_TIMEOUT_MSEC));
+
+ if (!timeleft) {
+ /* Strange condition - the termination of the TMF
+ * request timed-out.
+ */
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+
+ /* If the TMF status has not changed, kill it. */
+ if (tmf->status == SCI_FAILURE_TIMEOUT)
+ isci_request_mark_zombie(ihost, ireq);
+
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+ }
}
isci_print_tmf(tmf);
@@ -501,48 +584,17 @@ static enum isci_request_status isci_task_validate_request_to_abort(
return old_state;
}
-/**
-* isci_request_cleanup_completed_loiterer() - This function will take care of
-* the final cleanup on any request which has been explicitly terminated.
-* @isci_host: This parameter specifies the ISCI host object
-* @isci_device: This is the device to which the request is pending.
-* @isci_request: This parameter specifies the terminated request object.
-* @task: This parameter is the libsas I/O request.
-*/
-static void isci_request_cleanup_completed_loiterer(
- struct isci_host *isci_host,
- struct isci_remote_device *isci_device,
- struct isci_request *isci_request,
- struct sas_task *task)
+static int isci_request_is_dealloc_managed(enum isci_request_status stat)
{
- unsigned long flags;
-
- dev_dbg(&isci_host->pdev->dev,
- "%s: isci_device=%p, request=%p, task=%p\n",
- __func__, isci_device, isci_request, task);
-
- if (task != NULL) {
-
- spin_lock_irqsave(&task->task_state_lock, flags);
- task->lldd_task = NULL;
-
- task->task_state_flags &= ~SAS_TASK_NEED_DEV_RESET;
-
- isci_set_task_doneflags(task);
-
- /* If this task is not in the abort path, call task_done. */
- if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
-
- spin_unlock_irqrestore(&task->task_state_lock, flags);
- task->task_done(task);
- } else
- spin_unlock_irqrestore(&task->task_state_lock, flags);
- }
-
- if (isci_request != NULL) {
- spin_lock_irqsave(&isci_host->scic_lock, flags);
- list_del_init(&isci_request->dev_node);
- spin_unlock_irqrestore(&isci_host->scic_lock, flags);
+ switch (stat) {
+ case aborted:
+ case aborting:
+ case terminating:
+ case completed:
+ case dead:
+ return true;
+ default:
+ return false;
}
}
@@ -563,11 +615,9 @@ static void isci_terminate_request_core(struct isci_host *ihost,
enum sci_status status = SCI_SUCCESS;
bool was_terminated = false;
bool needs_cleanup_handling = false;
- enum isci_request_status request_status;
unsigned long flags;
unsigned long termination_completed = 1;
struct completion *io_request_completion;
- struct sas_task *task;
dev_dbg(&ihost->pdev->dev,
"%s: device = %p; request = %p\n",
@@ -577,10 +627,6 @@ static void isci_terminate_request_core(struct isci_host *ihost,
io_request_completion = isci_request->io_request_completion;
- task = (isci_request->ttype == io_task)
- ? isci_request_access_task(isci_request)
- : NULL;
-
/* Note that we are not going to control
* the target to abort the request.
*/
@@ -619,42 +665,27 @@ static void isci_terminate_request_core(struct isci_host *ihost,
__func__, isci_request, io_request_completion);
/* Wait here for the request to complete. */
- #define TERMINATION_TIMEOUT_MSEC 500
termination_completed
= wait_for_completion_timeout(
io_request_completion,
- msecs_to_jiffies(TERMINATION_TIMEOUT_MSEC));
+ msecs_to_jiffies(ISCI_TERMINATION_TIMEOUT_MSEC));
if (!termination_completed) {
/* The request to terminate has timed out. */
- spin_lock_irqsave(&ihost->scic_lock,
- flags);
+ spin_lock_irqsave(&ihost->scic_lock, flags);
/* Check for state changes. */
- if (!test_bit(IREQ_TERMINATED, &isci_request->flags)) {
+ if (!test_bit(IREQ_TERMINATED,
+ &isci_request->flags)) {
/* The best we can do is to have the
* request die a silent death if it
* ever really completes.
- *
- * Set the request state to "dead",
- * and clear the task pointer so that
- * an actual completion event callback
- * doesn't do anything.
*/
- isci_request->status = dead;
- isci_request->io_request_completion
- = NULL;
-
- if (isci_request->ttype == io_task) {
-
- /* Break links with the
- * sas_task.
- */
- isci_request->ttype_ptr.io_task_ptr
- = NULL;
- }
+ isci_request_mark_zombie(ihost,
+ isci_request);
+ needs_cleanup_handling = true;
} else
termination_completed = 1;
@@ -691,29 +722,28 @@ static void isci_terminate_request_core(struct isci_host *ihost,
* needs to be detached and freed here.
*/
spin_lock_irqsave(&isci_request->state_lock, flags);
- request_status = isci_request->status;
-
- if ((isci_request->ttype == io_task) /* TMFs are in their own thread */
- && ((request_status == aborted)
- || (request_status == aborting)
- || (request_status == terminating)
- || (request_status == completed)
- || (request_status == dead)
- )
- ) {
-
- /* The completion routine won't free a request in
- * the aborted/aborting/etc. states, so we do
- * it here.
- */
- needs_cleanup_handling = true;
- }
+
+ needs_cleanup_handling
+ = isci_request_is_dealloc_managed(
+ isci_request->status);
+
spin_unlock_irqrestore(&isci_request->state_lock, flags);
}
- if (needs_cleanup_handling)
- isci_request_cleanup_completed_loiterer(
- ihost, idev, isci_request, task);
+ if (needs_cleanup_handling) {
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: cleanup isci_device=%p, request=%p\n",
+ __func__, idev, isci_request);
+
+ if (isci_request != NULL) {
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ isci_free_tag(ihost, isci_request->io_tag);
+ isci_request_change_state(isci_request, unallocated);
+ list_del_init(&isci_request->dev_node);
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+ }
+ }
}
}
@@ -772,7 +802,9 @@ void isci_terminate_pending_requests(struct isci_host *ihost,
dev_dbg(&ihost->pdev->dev,
"%s: idev=%p request=%p; task=%p old_state=%d\n",
__func__, idev, ireq,
- ireq->ttype == io_task ? isci_request_access_task(ireq) : NULL,
+ (!test_bit(IREQ_TMF, &ireq->flags)
+ ? isci_request_access_task(ireq)
+ : NULL),
old_state);
/* If the old_state is started:
@@ -889,22 +921,14 @@ int isci_task_lu_reset(struct domain_device *domain_device, u8 *lun)
"%s: domain_device=%p, isci_host=%p; isci_device=%p\n",
__func__, domain_device, isci_host, isci_device);
- if (isci_device)
- set_bit(IDEV_EH, &isci_device->flags);
+ if (!isci_device) {
+ /* If the device is gone, stop the escalations. */
+ dev_dbg(&isci_host->pdev->dev, "%s: No dev\n", __func__);
- /* If there is a device reset pending on any request in the
- * device's list, fail this LUN reset request in order to
- * escalate to the device reset.
- */
- if (!isci_device ||
- isci_device_is_reset_pending(isci_host, isci_device)) {
- dev_dbg(&isci_host->pdev->dev,
- "%s: No dev (%p), or "
- "RESET PENDING: domain_device=%p\n",
- __func__, isci_device, domain_device);
- ret = TMF_RESP_FUNC_FAILED;
+ ret = TMF_RESP_FUNC_COMPLETE;
goto out;
}
+ set_bit(IDEV_EH, &isci_device->flags);
/* Send the task management part of the reset. */
if (sas_protocol_ata(domain_device->tproto)) {
@@ -1013,7 +1037,7 @@ int isci_task_abort_task(struct sas_task *task)
struct isci_tmf tmf;
int ret = TMF_RESP_FUNC_FAILED;
unsigned long flags;
- bool any_dev_reset = false;
+ int perform_termination = 0;
/* Get the isci_request reference from the task. Note that
* this check does not depend on the pending request list
@@ -1035,89 +1059,34 @@ int isci_task_abort_task(struct sas_task *task)
spin_unlock_irqrestore(&isci_host->scic_lock, flags);
dev_dbg(&isci_host->pdev->dev,
- "%s: task = %p\n", __func__, task);
-
- if (!isci_device || !old_request)
- goto out;
-
- set_bit(IDEV_EH, &isci_device->flags);
-
- /* This version of the driver will fail abort requests for
- * SATA/STP. Failing the abort request this way will cause the
- * SCSI error handler thread to escalate to LUN reset
- */
- if (sas_protocol_ata(task->task_proto)) {
- dev_dbg(&isci_host->pdev->dev,
- " task %p is for a STP/SATA device;"
- " returning TMF_RESP_FUNC_FAILED\n"
- " to cause a LUN reset...\n", task);
- goto out;
- }
-
- dev_dbg(&isci_host->pdev->dev,
- "%s: old_request == %p\n", __func__, old_request);
-
- any_dev_reset = isci_device_is_reset_pending(isci_host, isci_device);
+ "%s: dev = %p, task = %p, old_request == %p\n",
+ __func__, isci_device, task, old_request);
- spin_lock_irqsave(&task->task_state_lock, flags);
-
- any_dev_reset = any_dev_reset || (task->task_state_flags & SAS_TASK_NEED_DEV_RESET);
+ if (isci_device)
+ set_bit(IDEV_EH, &isci_device->flags);
- /* If the extraction of the request reference from the task
- * failed, then the request has been completed (or if there is a
- * pending reset then this abort request function must be failed
- * in order to escalate to the target reset).
+ /* Device reset conditions signalled in task_state_flags are the
+ * responsbility of libsas to observe at the start of the error
+ * handler thread.
*/
- if ((old_request == NULL) || any_dev_reset) {
-
- /* If the device reset task flag is set, fail the task
- * management request. Otherwise, the original request
- * has completed.
- */
- if (any_dev_reset) {
-
- /* Turn off the task's DONE to make sure this
- * task is escalated to a target reset.
- */
- task->task_state_flags &= ~SAS_TASK_STATE_DONE;
-
- /* Make the reset happen as soon as possible. */
- task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
-
- spin_unlock_irqrestore(&task->task_state_lock, flags);
-
- /* Fail the task management request in order to
- * escalate to the target reset.
- */
- ret = TMF_RESP_FUNC_FAILED;
-
- dev_dbg(&isci_host->pdev->dev,
- "%s: Failing task abort in order to "
- "escalate to target reset because\n"
- "SAS_TASK_NEED_DEV_RESET is set for "
- "task %p on dev %p\n",
- __func__, task, isci_device);
-
-
- } else {
- /* The request has already completed and there
- * is nothing to do here other than to set the task
- * done bit, and indicate that the task abort function
- * was sucessful.
- */
- isci_set_task_doneflags(task);
-
- spin_unlock_irqrestore(&task->task_state_lock, flags);
+ if (!isci_device || !old_request) {
+ /* The request has already completed and there
+ * is nothing to do here other than to set the task
+ * done bit, and indicate that the task abort function
+ * was sucessful.
+ */
+ spin_lock_irqsave(&task->task_state_lock, flags);
+ task->task_state_flags |= SAS_TASK_STATE_DONE;
+ task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR |
+ SAS_TASK_STATE_PENDING);
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
- ret = TMF_RESP_FUNC_COMPLETE;
+ ret = TMF_RESP_FUNC_COMPLETE;
- dev_dbg(&isci_host->pdev->dev,
- "%s: abort task not needed for %p\n",
- __func__, task);
- }
+ dev_dbg(&isci_host->pdev->dev,
+ "%s: abort task not needed for %p\n",
+ __func__, task);
goto out;
- } else {
- spin_unlock_irqrestore(&task->task_state_lock, flags);
}
spin_lock_irqsave(&isci_host->scic_lock, flags);
@@ -1146,24 +1115,44 @@ int isci_task_abort_task(struct sas_task *task)
goto out;
}
if (task->task_proto == SAS_PROTOCOL_SMP ||
+ sas_protocol_ata(task->task_proto) ||
test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags)) {
spin_unlock_irqrestore(&isci_host->scic_lock, flags);
dev_dbg(&isci_host->pdev->dev,
- "%s: SMP request (%d)"
+ "%s: %s request"
" or complete_in_target (%d), thus no TMF\n",
- __func__, (task->task_proto == SAS_PROTOCOL_SMP),
+ __func__,
+ ((task->task_proto == SAS_PROTOCOL_SMP)
+ ? "SMP"
+ : (sas_protocol_ata(task->task_proto)
+ ? "SATA/STP"
+ : "<other>")
+ ),
test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags));
- /* Set the state on the task. */
- isci_task_all_done(task);
-
- ret = TMF_RESP_FUNC_COMPLETE;
+ if (test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags)) {
+ spin_lock_irqsave(&task->task_state_lock, flags);
+ task->task_state_flags |= SAS_TASK_STATE_DONE;
+ task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR |
+ SAS_TASK_STATE_PENDING);
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+ ret = TMF_RESP_FUNC_COMPLETE;
+ } else {
+ spin_lock_irqsave(&task->task_state_lock, flags);
+ task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR |
+ SAS_TASK_STATE_PENDING);
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+ }
- /* Stopping and SMP devices are not sent a TMF, and are not
- * reset, but the outstanding I/O request is terminated below.
+ /* STP and SMP devices are not sent a TMF, but the
+ * outstanding I/O request is terminated below. This is
+ * because SATA/STP and SMP discovery path timeouts directly
+ * call the abort task interface for cleanup.
*/
+ perform_termination = 1;
+
} else {
/* Fill in the tmf stucture */
isci_task_build_abort_task_tmf(&tmf, isci_tmf_ssp_task_abort,
@@ -1172,22 +1161,24 @@ int isci_task_abort_task(struct sas_task *task)
spin_unlock_irqrestore(&isci_host->scic_lock, flags);
- #define ISCI_ABORT_TASK_TIMEOUT_MS 500 /* half second timeout. */
+ #define ISCI_ABORT_TASK_TIMEOUT_MS 500 /* 1/2 second timeout */
ret = isci_task_execute_tmf(isci_host, isci_device, &tmf,
ISCI_ABORT_TASK_TIMEOUT_MS);
- if (ret != TMF_RESP_FUNC_COMPLETE)
+ if (ret == TMF_RESP_FUNC_COMPLETE)
+ perform_termination = 1;
+ else
dev_dbg(&isci_host->pdev->dev,
- "%s: isci_task_send_tmf failed\n",
- __func__);
+ "%s: isci_task_send_tmf failed\n", __func__);
}
- if (ret == TMF_RESP_FUNC_COMPLETE) {
+ if (perform_termination) {
set_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags);
/* Clean up the request on our side, and wait for the aborted
* I/O to complete.
*/
- isci_terminate_request_core(isci_host, isci_device, old_request);
+ isci_terminate_request_core(isci_host, isci_device,
+ old_request);
}
/* Make sure we do not leave a reference to aborted_io_completion */
@@ -1288,7 +1279,8 @@ isci_task_request_complete(struct isci_host *ihost,
enum sci_task_status completion_status)
{
struct isci_tmf *tmf = isci_request_access_tmf(ireq);
- struct completion *tmf_complete;
+ struct completion *tmf_complete = NULL;
+ struct completion *request_complete = ireq->io_request_completion;
dev_dbg(&ihost->pdev->dev,
"%s: request = %p, status=%d\n",
@@ -1296,278 +1288,53 @@ isci_task_request_complete(struct isci_host *ihost,
isci_request_change_state(ireq, completed);
- tmf->status = completion_status;
set_bit(IREQ_COMPLETE_IN_TARGET, &ireq->flags);
- if (tmf->proto == SAS_PROTOCOL_SSP) {
- memcpy(&tmf->resp.resp_iu,
- &ireq->ssp.rsp,
- SSP_RESP_IU_MAX_SIZE);
- } else if (tmf->proto == SAS_PROTOCOL_SATA) {
- memcpy(&tmf->resp.d2h_fis,
- &ireq->stp.rsp,
- sizeof(struct dev_to_host_fis));
+ if (tmf) {
+ tmf->status = completion_status;
+
+ if (tmf->proto == SAS_PROTOCOL_SSP) {
+ memcpy(&tmf->resp.resp_iu,
+ &ireq->ssp.rsp,
+ SSP_RESP_IU_MAX_SIZE);
+ } else if (tmf->proto == SAS_PROTOCOL_SATA) {
+ memcpy(&tmf->resp.d2h_fis,
+ &ireq->stp.rsp,
+ sizeof(struct dev_to_host_fis));
+ }
+ /* PRINT_TMF( ((struct isci_tmf *)request->task)); */
+ tmf_complete = tmf->complete;
}
-
- /* PRINT_TMF( ((struct isci_tmf *)request->task)); */
- tmf_complete = tmf->complete;
-
sci_controller_complete_io(ihost, ireq->target_device, ireq);
/* set the 'terminated' flag handle to make sure it cannot be terminated
* or completed again.
*/
set_bit(IREQ_TERMINATED, &ireq->flags);
- isci_request_change_state(ireq, unallocated);
- list_del_init(&ireq->dev_node);
-
- /* The task management part completes last. */
- complete(tmf_complete);
-}
-
-static void isci_smp_task_timedout(unsigned long _task)
-{
- struct sas_task *task = (void *) _task;
- unsigned long flags;
-
- spin_lock_irqsave(&task->task_state_lock, flags);
- if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
- task->task_state_flags |= SAS_TASK_STATE_ABORTED;
- spin_unlock_irqrestore(&task->task_state_lock, flags);
-
- complete(&task->completion);
-}
-
-static void isci_smp_task_done(struct sas_task *task)
-{
- if (!del_timer(&task->timer))
- return;
- complete(&task->completion);
-}
-
-static struct sas_task *isci_alloc_task(void)
-{
- struct sas_task *task = kzalloc(sizeof(*task), GFP_KERNEL);
-
- if (task) {
- INIT_LIST_HEAD(&task->list);
- spin_lock_init(&task->task_state_lock);
- task->task_state_flags = SAS_TASK_STATE_PENDING;
- init_timer(&task->timer);
- init_completion(&task->completion);
- }
-
- return task;
-}
-
-static void isci_free_task(struct isci_host *ihost, struct sas_task *task)
-{
- if (task) {
- BUG_ON(!list_empty(&task->list));
- kfree(task);
- }
-}
-
-static int isci_smp_execute_task(struct isci_host *ihost,
- struct domain_device *dev, void *req,
- int req_size, void *resp, int resp_size)
-{
- int res, retry;
- struct sas_task *task = NULL;
-
- for (retry = 0; retry < 3; retry++) {
- task = isci_alloc_task();
- if (!task)
- return -ENOMEM;
-
- task->dev = dev;
- task->task_proto = dev->tproto;
- sg_init_one(&task->smp_task.smp_req, req, req_size);
- sg_init_one(&task->smp_task.smp_resp, resp, resp_size);
-
- task->task_done = isci_smp_task_done;
-
- task->timer.data = (unsigned long) task;
- task->timer.function = isci_smp_task_timedout;
- task->timer.expires = jiffies + 10*HZ;
- add_timer(&task->timer);
-
- res = isci_task_execute_task(task, 1, GFP_KERNEL);
-
- if (res) {
- del_timer(&task->timer);
- dev_dbg(&ihost->pdev->dev,
- "%s: executing SMP task failed:%d\n",
- __func__, res);
- goto ex_err;
- }
-
- wait_for_completion(&task->completion);
- res = -ECOMM;
- if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
- dev_dbg(&ihost->pdev->dev,
- "%s: smp task timed out or aborted\n",
- __func__);
- isci_task_abort_task(task);
- if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
- dev_dbg(&ihost->pdev->dev,
- "%s: SMP task aborted and not done\n",
- __func__);
- goto ex_err;
- }
- }
- if (task->task_status.resp == SAS_TASK_COMPLETE &&
- task->task_status.stat == SAM_STAT_GOOD) {
- res = 0;
- break;
- }
- if (task->task_status.resp == SAS_TASK_COMPLETE &&
- task->task_status.stat == SAS_DATA_UNDERRUN) {
- /* no error, but return the number of bytes of
- * underrun */
- res = task->task_status.residual;
- break;
- }
- if (task->task_status.resp == SAS_TASK_COMPLETE &&
- task->task_status.stat == SAS_DATA_OVERRUN) {
- res = -EMSGSIZE;
- break;
- } else {
- dev_dbg(&ihost->pdev->dev,
- "%s: task to dev %016llx response: 0x%x "
- "status 0x%x\n", __func__,
- SAS_ADDR(dev->sas_addr),
- task->task_status.resp,
- task->task_status.stat);
- isci_free_task(ihost, task);
- task = NULL;
- }
- }
-ex_err:
- BUG_ON(retry == 3 && task != NULL);
- isci_free_task(ihost, task);
- return res;
-}
-
-#define DISCOVER_REQ_SIZE 16
-#define DISCOVER_RESP_SIZE 56
-
-int isci_smp_get_phy_attached_dev_type(struct isci_host *ihost,
- struct domain_device *dev,
- int phy_id, int *adt)
-{
- struct smp_resp *disc_resp;
- u8 *disc_req;
- int res;
-
- disc_resp = kzalloc(DISCOVER_RESP_SIZE, GFP_KERNEL);
- if (!disc_resp)
- return -ENOMEM;
-
- disc_req = kzalloc(DISCOVER_REQ_SIZE, GFP_KERNEL);
- if (disc_req) {
- disc_req[0] = SMP_REQUEST;
- disc_req[1] = SMP_DISCOVER;
- disc_req[9] = phy_id;
- } else {
- kfree(disc_resp);
- return -ENOMEM;
- }
- res = isci_smp_execute_task(ihost, dev, disc_req, DISCOVER_REQ_SIZE,
- disc_resp, DISCOVER_RESP_SIZE);
- if (!res) {
- if (disc_resp->result != SMP_RESP_FUNC_ACC)
- res = disc_resp->result;
- else
- *adt = disc_resp->disc.attached_dev_type;
+ /* As soon as something is in the terminate path, deallocation is
+ * managed there. Note that the final non-managed state of a task
+ * request is "completed".
+ */
+ if ((ireq->status == completed) ||
+ !isci_request_is_dealloc_managed(ireq->status)) {
+ isci_request_change_state(ireq, unallocated);
+ isci_free_tag(ihost, ireq->io_tag);
+ list_del_init(&ireq->dev_node);
}
- kfree(disc_req);
- kfree(disc_resp);
-
- return res;
-}
-static void isci_wait_for_smp_phy_reset(struct isci_remote_device *idev, int phy_num)
-{
- struct domain_device *dev = idev->domain_dev;
- struct isci_port *iport = idev->isci_port;
- struct isci_host *ihost = iport->isci_host;
- int res, iteration = 0, attached_device_type;
- #define STP_WAIT_MSECS 25000
- unsigned long tmo = msecs_to_jiffies(STP_WAIT_MSECS);
- unsigned long deadline = jiffies + tmo;
- enum {
- SMP_PHYWAIT_PHYDOWN,
- SMP_PHYWAIT_PHYUP,
- SMP_PHYWAIT_DONE
- } phy_state = SMP_PHYWAIT_PHYDOWN;
-
- /* While there is time, wait for the phy to go away and come back */
- while (time_is_after_jiffies(deadline) && phy_state != SMP_PHYWAIT_DONE) {
- int event = atomic_read(&iport->event);
-
- ++iteration;
-
- tmo = wait_event_timeout(ihost->eventq,
- event != atomic_read(&iport->event) ||
- !test_bit(IPORT_BCN_BLOCKED, &iport->flags),
- tmo);
- /* link down, stop polling */
- if (!test_bit(IPORT_BCN_BLOCKED, &iport->flags))
- break;
+ /* "request_complete" is set if the task was being terminated. */
+ if (request_complete)
+ complete(request_complete);
- dev_dbg(&ihost->pdev->dev,
- "%s: iport %p, iteration %d,"
- " phase %d: time_remaining %lu, bcns = %d\n",
- __func__, iport, iteration, phy_state,
- tmo, test_bit(IPORT_BCN_PENDING, &iport->flags));
-
- res = isci_smp_get_phy_attached_dev_type(ihost, dev, phy_num,
- &attached_device_type);
- tmo = deadline - jiffies;
-
- if (res) {
- dev_dbg(&ihost->pdev->dev,
- "%s: iteration %d, phase %d:"
- " SMP error=%d, time_remaining=%lu\n",
- __func__, iteration, phy_state, res, tmo);
- break;
- }
- dev_dbg(&ihost->pdev->dev,
- "%s: iport %p, iteration %d,"
- " phase %d: time_remaining %lu, bcns = %d, "
- "attdevtype = %x\n",
- __func__, iport, iteration, phy_state,
- tmo, test_bit(IPORT_BCN_PENDING, &iport->flags),
- attached_device_type);
-
- switch (phy_state) {
- case SMP_PHYWAIT_PHYDOWN:
- /* Has the device gone away? */
- if (!attached_device_type)
- phy_state = SMP_PHYWAIT_PHYUP;
-
- break;
-
- case SMP_PHYWAIT_PHYUP:
- /* Has the device come back? */
- if (attached_device_type)
- phy_state = SMP_PHYWAIT_DONE;
- break;
-
- case SMP_PHYWAIT_DONE:
- break;
- }
-
- }
- dev_dbg(&ihost->pdev->dev, "%s: done\n", __func__);
+ /* The task management part completes last. */
+ if (tmf_complete)
+ complete(tmf_complete);
}
static int isci_reset_device(struct isci_host *ihost,
struct isci_remote_device *idev)
{
struct sas_phy *phy = sas_find_local_phy(idev->domain_dev);
- struct isci_port *iport = idev->isci_port;
enum sci_status status;
unsigned long flags;
int rc;
@@ -1587,13 +1354,6 @@ static int isci_reset_device(struct isci_host *ihost,
}
spin_unlock_irqrestore(&ihost->scic_lock, flags);
- /* Make sure all pending requests are able to be fully terminated. */
- isci_device_clear_reset_pending(ihost, idev);
-
- /* If this is a device on an expander, disable BCN processing. */
- if (!scsi_is_sas_phy_local(phy))
- set_bit(IPORT_BCN_BLOCKED, &iport->flags);
-
rc = sas_phy_reset(phy, true);
/* Terminate in-progress I/O now. */
@@ -1604,21 +1364,6 @@ static int isci_reset_device(struct isci_host *ihost,
status = sci_remote_device_reset_complete(idev);
spin_unlock_irqrestore(&ihost->scic_lock, flags);
- /* If this is a device on an expander, bring the phy back up. */
- if (!scsi_is_sas_phy_local(phy)) {
- /* A phy reset will cause the device to go away then reappear.
- * Since libsas will take action on incoming BCNs (eg. remove
- * a device going through an SMP phy-control driven reset),
- * we need to wait until the phy comes back up before letting
- * discovery proceed in libsas.
- */
- isci_wait_for_smp_phy_reset(idev, phy->number);
-
- spin_lock_irqsave(&ihost->scic_lock, flags);
- isci_port_bcn_enable(ihost, idev->isci_port);
- spin_unlock_irqrestore(&ihost->scic_lock, flags);
- }
-
if (status != SCI_SUCCESS) {
dev_dbg(&ihost->pdev->dev,
"%s: sci_remote_device_reset_complete(%p) "
diff --git a/drivers/scsi/isci/task.h b/drivers/scsi/isci/task.h
index 4a7fa90287e..bc78c0a41d5 100644
--- a/drivers/scsi/isci/task.h
+++ b/drivers/scsi/isci/task.h
@@ -58,6 +58,8 @@
#include <scsi/sas_ata.h>
#include "host.h"
+#define ISCI_TERMINATION_TIMEOUT_MSEC 500
+
struct isci_request;
/**
@@ -224,35 +226,6 @@ enum isci_completion_selection {
isci_perform_error_io_completion /* Use sas_task_abort */
};
-static inline void isci_set_task_doneflags(
- struct sas_task *task)
-{
- /* Since no futher action will be taken on this task,
- * make sure to mark it complete from the lldd perspective.
- */
- task->task_state_flags |= SAS_TASK_STATE_DONE;
- task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
- task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
-}
-/**
- * isci_task_all_done() - This function clears the task bits to indicate the
- * LLDD is done with the task.
- *
- *
- */
-static inline void isci_task_all_done(
- struct sas_task *task)
-{
- unsigned long flags;
-
- /* Since no futher action will be taken on this task,
- * make sure to mark it complete from the lldd perspective.
- */
- spin_lock_irqsave(&task->task_state_lock, flags);
- isci_set_task_doneflags(task);
- spin_unlock_irqrestore(&task->task_state_lock, flags);
-}
-
/**
* isci_task_set_completion_status() - This function sets the completion status
* for the request.
@@ -286,6 +259,25 @@ isci_task_set_completion_status(
task->task_status.resp = response;
task->task_status.stat = status;
+ switch (task->task_proto) {
+
+ case SAS_PROTOCOL_SATA:
+ case SAS_PROTOCOL_STP:
+ case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
+
+ if (task_notification_selection
+ == isci_perform_error_io_completion) {
+ /* SATA/STP I/O has it's own means of scheduling device
+ * error handling on the normal path.
+ */
+ task_notification_selection
+ = isci_perform_normal_io_completion;
+ }
+ break;
+ default:
+ break;
+ }
+
switch (task_notification_selection) {
case isci_perform_error_io_completion:
@@ -315,7 +307,9 @@ isci_task_set_completion_status(
/* Fall through to the normal case... */
case isci_perform_normal_io_completion:
/* Normal notification (task_done) */
- isci_set_task_doneflags(task);
+ task->task_state_flags |= SAS_TASK_STATE_DONE;
+ task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR |
+ SAS_TASK_STATE_PENDING);
break;
default:
WARN_ONCE(1, "unknown task_notification_selection: %d\n",
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 7724414588f..7c34d8e7cc7 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -35,6 +35,7 @@
#include <linux/delay.h>
#include <linux/kfifo.h>
#include <linux/scatterlist.h>
+#include <linux/module.h>
#include <net/tcp.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
@@ -872,6 +873,61 @@ static void iscsi_sw_tcp_session_destroy(struct iscsi_cls_session *cls_session)
iscsi_host_free(shost);
}
+static mode_t iscsi_sw_tcp_attr_is_visible(int param_type, int param)
+{
+ switch (param_type) {
+ case ISCSI_HOST_PARAM:
+ switch (param) {
+ case ISCSI_HOST_PARAM_NETDEV_NAME:
+ case ISCSI_HOST_PARAM_HWADDRESS:
+ case ISCSI_HOST_PARAM_IPADDRESS:
+ case ISCSI_HOST_PARAM_INITIATOR_NAME:
+ return S_IRUGO;
+ default:
+ return 0;
+ }
+ case ISCSI_PARAM:
+ switch (param) {
+ case ISCSI_PARAM_MAX_RECV_DLENGTH:
+ case ISCSI_PARAM_MAX_XMIT_DLENGTH:
+ case ISCSI_PARAM_HDRDGST_EN:
+ case ISCSI_PARAM_DATADGST_EN:
+ case ISCSI_PARAM_CONN_ADDRESS:
+ case ISCSI_PARAM_CONN_PORT:
+ case ISCSI_PARAM_EXP_STATSN:
+ case ISCSI_PARAM_PERSISTENT_ADDRESS:
+ case ISCSI_PARAM_PERSISTENT_PORT:
+ case ISCSI_PARAM_PING_TMO:
+ case ISCSI_PARAM_RECV_TMO:
+ case ISCSI_PARAM_INITIAL_R2T_EN:
+ case ISCSI_PARAM_MAX_R2T:
+ case ISCSI_PARAM_IMM_DATA_EN:
+ case ISCSI_PARAM_FIRST_BURST:
+ case ISCSI_PARAM_MAX_BURST:
+ case ISCSI_PARAM_PDU_INORDER_EN:
+ case ISCSI_PARAM_DATASEQ_INORDER_EN:
+ case ISCSI_PARAM_ERL:
+ case ISCSI_PARAM_TARGET_NAME:
+ case ISCSI_PARAM_TPGT:
+ case ISCSI_PARAM_USERNAME:
+ case ISCSI_PARAM_PASSWORD:
+ case ISCSI_PARAM_USERNAME_IN:
+ case ISCSI_PARAM_PASSWORD_IN:
+ case ISCSI_PARAM_FAST_ABORT:
+ case ISCSI_PARAM_ABORT_TMO:
+ case ISCSI_PARAM_LU_RESET_TMO:
+ case ISCSI_PARAM_TGT_RESET_TMO:
+ case ISCSI_PARAM_IFACE_NAME:
+ case ISCSI_PARAM_INITIATOR_NAME:
+ return S_IRUGO;
+ default:
+ return 0;
+ }
+ }
+
+ return 0;
+}
+
static int iscsi_sw_tcp_slave_alloc(struct scsi_device *sdev)
{
set_bit(QUEUE_FLAG_BIDI, &sdev->request_queue->queue_flags);
@@ -910,33 +966,6 @@ static struct iscsi_transport iscsi_sw_tcp_transport = {
.name = "tcp",
.caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST
| CAP_DATADGST,
- .param_mask = ISCSI_MAX_RECV_DLENGTH |
- ISCSI_MAX_XMIT_DLENGTH |
- ISCSI_HDRDGST_EN |
- ISCSI_DATADGST_EN |
- ISCSI_INITIAL_R2T_EN |
- ISCSI_MAX_R2T |
- ISCSI_IMM_DATA_EN |
- ISCSI_FIRST_BURST |
- ISCSI_MAX_BURST |
- ISCSI_PDU_INORDER_EN |
- ISCSI_DATASEQ_INORDER_EN |
- ISCSI_ERL |
- ISCSI_CONN_PORT |
- ISCSI_CONN_ADDRESS |
- ISCSI_EXP_STATSN |
- ISCSI_PERSISTENT_PORT |
- ISCSI_PERSISTENT_ADDRESS |
- ISCSI_TARGET_NAME | ISCSI_TPGT |
- ISCSI_USERNAME | ISCSI_PASSWORD |
- ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
- ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
- ISCSI_LU_RESET_TMO | ISCSI_TGT_RESET_TMO |
- ISCSI_PING_TMO | ISCSI_RECV_TMO |
- ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
- .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS |
- ISCSI_HOST_INITIATOR_NAME |
- ISCSI_HOST_NETDEV_NAME,
/* session management */
.create_session = iscsi_sw_tcp_session_create,
.destroy_session = iscsi_sw_tcp_session_destroy,
@@ -944,6 +973,7 @@ static struct iscsi_transport iscsi_sw_tcp_transport = {
.create_conn = iscsi_sw_tcp_conn_create,
.bind_conn = iscsi_sw_tcp_conn_bind,
.destroy_conn = iscsi_sw_tcp_conn_destroy,
+ .attr_is_visible = iscsi_sw_tcp_attr_is_visible,
.set_param = iscsi_sw_tcp_conn_set_param,
.get_conn_param = iscsi_sw_tcp_conn_get_param,
.get_session_param = iscsi_session_get_param,
diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c
index b9cb8140b39..7269e928824 100644
--- a/drivers/scsi/libfc/fc_disc.c
+++ b/drivers/scsi/libfc/fc_disc.c
@@ -35,6 +35,7 @@
#include <linux/timer.h>
#include <linux/slab.h>
#include <linux/err.h>
+#include <linux/export.h>
#include <asm/unaligned.h>
#include <scsi/fc/fc_gs.h>
diff --git a/drivers/scsi/libfc/fc_elsct.c b/drivers/scsi/libfc/fc_elsct.c
index 9b25969e2ad..fb9161dc4ca 100644
--- a/drivers/scsi/libfc/fc_elsct.c
+++ b/drivers/scsi/libfc/fc_elsct.c
@@ -21,6 +21,7 @@
* Provide interface to send ELS/CT FC frames
*/
+#include <linux/export.h>
#include <asm/unaligned.h>
#include <scsi/fc/fc_gs.h>
#include <scsi/fc/fc_ns.h>
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index d261e982a2f..9de9db27e87 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -26,6 +26,7 @@
#include <linux/timer.h>
#include <linux/slab.h>
#include <linux/err.h>
+#include <linux/export.h>
#include <scsi/fc/fc_fc2.h>
@@ -65,16 +66,15 @@ static struct workqueue_struct *fc_exch_workqueue;
* assigned range of exchanges to per cpu pool.
*/
struct fc_exch_pool {
+ spinlock_t lock;
+ struct list_head ex_list;
u16 next_index;
u16 total_exches;
/* two cache of free slot in exch array */
u16 left;
u16 right;
-
- spinlock_t lock;
- struct list_head ex_list;
-};
+} ____cacheline_aligned_in_smp;
/**
* struct fc_exch_mgr - The Exchange Manager (EM).
@@ -91,13 +91,13 @@ struct fc_exch_pool {
* It manages the allocation of exchange IDs.
*/
struct fc_exch_mgr {
+ struct fc_exch_pool *pool;
+ mempool_t *ep_pool;
enum fc_class class;
struct kref kref;
u16 min_xid;
u16 max_xid;
- mempool_t *ep_pool;
u16 pool_max_index;
- struct fc_exch_pool *pool;
/*
* currently exchange mgr stats are updated but not used.
@@ -470,6 +470,7 @@ static int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp,
struct fc_frame_header *fh = fc_frame_header_get(fp);
int error;
u32 f_ctl;
+ u8 fh_type = fh->fh_type;
ep = fc_seq_exch(sp);
WARN_ON((ep->esb_stat & ESB_ST_SEQ_INIT) != ESB_ST_SEQ_INIT);
@@ -494,7 +495,7 @@ static int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp,
*/
error = lport->tt.frame_send(lport, fp);
- if (fh->fh_type == FC_TYPE_BLS)
+ if (fh_type == FC_TYPE_BLS)
return error;
/*
@@ -1793,6 +1794,9 @@ restart:
goto restart;
}
}
+ pool->next_index = 0;
+ pool->left = FC_XID_UNKNOWN;
+ pool->right = FC_XID_UNKNOWN;
spin_unlock_bh(&pool->lock);
}
@@ -2281,6 +2285,7 @@ struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lport,
goto free_mempool;
for_each_possible_cpu(cpu) {
pool = per_cpu_ptr(mp->pool, cpu);
+ pool->next_index = 0;
pool->left = FC_XID_UNKNOWN;
pool->right = FC_XID_UNKNOWN;
spin_lock_init(&pool->lock);
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index 4c41ee816f0..221875ec3d7 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -759,7 +759,6 @@ static void fc_fcp_recv(struct fc_seq *seq, struct fc_frame *fp, void *arg)
goto out;
if (fc_fcp_lock_pkt(fsp))
goto out;
- fsp->last_pkt_time = jiffies;
if (fh->fh_type == FC_TYPE_BLS) {
fc_fcp_abts_resp(fsp, fp);
@@ -1148,7 +1147,6 @@ static int fc_fcp_cmd_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp,
rc = -1;
goto unlock;
}
- fsp->last_pkt_time = jiffies;
fsp->seq_ptr = seq;
fc_fcp_pkt_hold(fsp); /* hold for fc_fcp_pkt_destroy */
diff --git a/drivers/scsi/libfc/fc_libfc.c b/drivers/scsi/libfc/fc_libfc.c
index b7735129f1f..1bf9841ef15 100644
--- a/drivers/scsi/libfc/fc_libfc.c
+++ b/drivers/scsi/libfc/fc_libfc.c
@@ -21,6 +21,7 @@
#include <linux/types.h>
#include <linux/scatterlist.h>
#include <linux/crc32.h>
+#include <linux/module.h>
#include <scsi/libfc.h>
#include <scsi/fc_encode.h>
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index 628f347404f..e77094a587e 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -89,6 +89,7 @@
#include <linux/timer.h>
#include <linux/delay.h>
+#include <linux/module.h>
#include <linux/slab.h>
#include <asm/unaligned.h>
@@ -1030,16 +1031,8 @@ static void fc_lport_enter_reset(struct fc_lport *lport)
FCH_EVT_LIPRESET, 0);
fc_vports_linkchange(lport);
fc_lport_reset_locked(lport);
- if (lport->link_up) {
- /*
- * Wait upto resource allocation time out before
- * doing re-login since incomplete FIP exchanged
- * from last session may collide with exchanges
- * in new session.
- */
- msleep(lport->r_a_tov);
+ if (lport->link_up)
fc_lport_enter_flogi(lport);
- }
}
/**
@@ -1481,6 +1474,7 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
void *lp_arg)
{
struct fc_lport *lport = lp_arg;
+ struct fc_frame_header *fh;
struct fc_els_flogi *flp;
u32 did;
u16 csp_flags;
@@ -1508,49 +1502,56 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
goto err;
}
+ fh = fc_frame_header_get(fp);
did = fc_frame_did(fp);
- if (fc_frame_payload_op(fp) == ELS_LS_ACC && did) {
- flp = fc_frame_payload_get(fp, sizeof(*flp));
- if (flp) {
- mfs = ntohs(flp->fl_csp.sp_bb_data) &
- FC_SP_BB_DATA_MASK;
- if (mfs >= FC_SP_MIN_MAX_PAYLOAD &&
- mfs < lport->mfs)
- lport->mfs = mfs;
- csp_flags = ntohs(flp->fl_csp.sp_features);
- r_a_tov = ntohl(flp->fl_csp.sp_r_a_tov);
- e_d_tov = ntohl(flp->fl_csp.sp_e_d_tov);
- if (csp_flags & FC_SP_FT_EDTR)
- e_d_tov /= 1000000;
-
- lport->npiv_enabled = !!(csp_flags & FC_SP_FT_NPIV_ACC);
-
- if ((csp_flags & FC_SP_FT_FPORT) == 0) {
- if (e_d_tov > lport->e_d_tov)
- lport->e_d_tov = e_d_tov;
- lport->r_a_tov = 2 * e_d_tov;
- fc_lport_set_port_id(lport, did, fp);
- printk(KERN_INFO "host%d: libfc: "
- "Port (%6.6x) entered "
- "point-to-point mode\n",
- lport->host->host_no, did);
- fc_lport_ptp_setup(lport, fc_frame_sid(fp),
- get_unaligned_be64(
- &flp->fl_wwpn),
- get_unaligned_be64(
- &flp->fl_wwnn));
- } else {
- lport->e_d_tov = e_d_tov;
- lport->r_a_tov = r_a_tov;
- fc_host_fabric_name(lport->host) =
- get_unaligned_be64(&flp->fl_wwnn);
- fc_lport_set_port_id(lport, did, fp);
- fc_lport_enter_dns(lport);
- }
- }
- } else {
- FC_LPORT_DBG(lport, "FLOGI RJT or bad response\n");
+ if (fh->fh_r_ctl != FC_RCTL_ELS_REP || did == 0 ||
+ fc_frame_payload_op(fp) != ELS_LS_ACC) {
+ FC_LPORT_DBG(lport, "FLOGI not accepted or bad response\n");
fc_lport_error(lport, fp);
+ goto err;
+ }
+
+ flp = fc_frame_payload_get(fp, sizeof(*flp));
+ if (!flp) {
+ FC_LPORT_DBG(lport, "FLOGI bad response\n");
+ fc_lport_error(lport, fp);
+ goto err;
+ }
+
+ mfs = ntohs(flp->fl_csp.sp_bb_data) &
+ FC_SP_BB_DATA_MASK;
+ if (mfs >= FC_SP_MIN_MAX_PAYLOAD &&
+ mfs < lport->mfs)
+ lport->mfs = mfs;
+ csp_flags = ntohs(flp->fl_csp.sp_features);
+ r_a_tov = ntohl(flp->fl_csp.sp_r_a_tov);
+ e_d_tov = ntohl(flp->fl_csp.sp_e_d_tov);
+ if (csp_flags & FC_SP_FT_EDTR)
+ e_d_tov /= 1000000;
+
+ lport->npiv_enabled = !!(csp_flags & FC_SP_FT_NPIV_ACC);
+
+ if ((csp_flags & FC_SP_FT_FPORT) == 0) {
+ if (e_d_tov > lport->e_d_tov)
+ lport->e_d_tov = e_d_tov;
+ lport->r_a_tov = 2 * e_d_tov;
+ fc_lport_set_port_id(lport, did, fp);
+ printk(KERN_INFO "host%d: libfc: "
+ "Port (%6.6x) entered "
+ "point-to-point mode\n",
+ lport->host->host_no, did);
+ fc_lport_ptp_setup(lport, fc_frame_sid(fp),
+ get_unaligned_be64(
+ &flp->fl_wwpn),
+ get_unaligned_be64(
+ &flp->fl_wwnn));
+ } else {
+ lport->e_d_tov = e_d_tov;
+ lport->r_a_tov = r_a_tov;
+ fc_host_fabric_name(lport->host) =
+ get_unaligned_be64(&flp->fl_wwnn);
+ fc_lport_set_port_id(lport, did, fp);
+ fc_lport_enter_dns(lport);
}
out:
diff --git a/drivers/scsi/libfc/fc_npiv.c b/drivers/scsi/libfc/fc_npiv.c
index f33b897e478..9fbf78ed821 100644
--- a/drivers/scsi/libfc/fc_npiv.c
+++ b/drivers/scsi/libfc/fc_npiv.c
@@ -22,6 +22,7 @@
*/
#include <scsi/libfc.h>
+#include <linux/export.h>
/**
* fc_vport_create() - Create a new NPIV vport instance
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index 760db761944..b9e434844a6 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -51,6 +51,7 @@
#include <linux/rcupdate.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
+#include <linux/export.h>
#include <asm/unaligned.h>
#include <scsi/libfc.h>
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 256a999d010..143bbe448be 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -26,6 +26,7 @@
#include <linux/delay.h>
#include <linux/log2.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include <asm/unaligned.h>
#include <net/tcp.h>
#include <scsi/scsi_cmnd.h>
@@ -3163,7 +3164,6 @@ int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
{
struct iscsi_conn *conn = cls_conn->dd_data;
struct iscsi_session *session = conn->session;
- uint32_t value;
switch(param) {
case ISCSI_PARAM_FAST_ABORT:
@@ -3220,14 +3220,6 @@ int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
case ISCSI_PARAM_ERL:
sscanf(buf, "%d", &session->erl);
break;
- case ISCSI_PARAM_IFMARKER_EN:
- sscanf(buf, "%d", &value);
- BUG_ON(value);
- break;
- case ISCSI_PARAM_OFMARKER_EN:
- sscanf(buf, "%d", &value);
- BUG_ON(value);
- break;
case ISCSI_PARAM_EXP_STATSN:
sscanf(buf, "%u", &conn->exp_statsn);
break;
diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c
index 09b232fd9a1..5715a3d0a3d 100644
--- a/drivers/scsi/libiscsi_tcp.c
+++ b/drivers/scsi/libiscsi_tcp.c
@@ -36,6 +36,7 @@
#include <linux/delay.h>
#include <linux/kfifo.h>
#include <linux/scatterlist.h>
+#include <linux/module.h>
#include <net/tcp.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index f5831930df9..54a5199ceb5 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -219,17 +219,20 @@ out_err2:
/* ---------- Device registration and unregistration ---------- */
-static inline void sas_unregister_common_dev(struct domain_device *dev)
+static void sas_unregister_common_dev(struct asd_sas_port *port, struct domain_device *dev)
{
sas_notify_lldd_dev_gone(dev);
if (!dev->parent)
dev->port->port_dev = NULL;
else
list_del_init(&dev->siblings);
+
+ spin_lock_irq(&port->dev_list_lock);
list_del_init(&dev->dev_list_node);
+ spin_unlock_irq(&port->dev_list_lock);
}
-void sas_unregister_dev(struct domain_device *dev)
+void sas_unregister_dev(struct asd_sas_port *port, struct domain_device *dev)
{
if (dev->rphy) {
sas_remove_children(&dev->rphy->dev);
@@ -241,15 +244,15 @@ void sas_unregister_dev(struct domain_device *dev)
kfree(dev->ex_dev.ex_phy);
dev->ex_dev.ex_phy = NULL;
}
- sas_unregister_common_dev(dev);
+ sas_unregister_common_dev(port, dev);
}
void sas_unregister_domain_devices(struct asd_sas_port *port)
{
struct domain_device *dev, *n;
- list_for_each_entry_safe_reverse(dev,n,&port->dev_list,dev_list_node)
- sas_unregister_dev(dev);
+ list_for_each_entry_safe_reverse(dev, n, &port->dev_list, dev_list_node)
+ sas_unregister_dev(port, dev);
port->port->rphy = NULL;
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 16ad97df5ba..1b831c55ec6 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -199,6 +199,8 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id,
phy->virtual = dr->virtual;
phy->last_da_index = -1;
+ phy->phy->identify.sas_address = SAS_ADDR(phy->attached_sas_addr);
+ phy->phy->identify.device_type = phy->attached_dev_type;
phy->phy->identify.initiator_port_protocols = phy->attached_iproto;
phy->phy->identify.target_port_protocols = phy->attached_tproto;
phy->phy->identify.phy_identifier = phy_id;
@@ -329,6 +331,7 @@ static void ex_assign_report_general(struct domain_device *dev,
dev->ex_dev.ex_change_count = be16_to_cpu(rg->change_count);
dev->ex_dev.max_route_indexes = be16_to_cpu(rg->route_indexes);
dev->ex_dev.num_phys = min(rg->num_phys, (u8)MAX_EXPANDER_PHYS);
+ dev->ex_dev.t2t_supp = rg->t2t_supp;
dev->ex_dev.conf_route_table = rg->conf_route_table;
dev->ex_dev.configuring = rg->configuring;
memcpy(dev->ex_dev.enclosure_logical_id, rg->enclosure_logical_id, 8);
@@ -751,7 +754,10 @@ static struct domain_device *sas_ex_discover_end_dev(
out_list_del:
sas_rphy_free(child->rphy);
child->rphy = NULL;
+
+ spin_lock_irq(&parent->port->dev_list_lock);
list_del(&child->dev_list_node);
+ spin_unlock_irq(&parent->port->dev_list_lock);
out_free:
sas_port_delete(phy->port);
out_err:
@@ -1133,15 +1139,17 @@ static void sas_print_parent_topology_bug(struct domain_device *child,
};
struct domain_device *parent = child->parent;
- sas_printk("%s ex %016llx phy 0x%x <--> %s ex %016llx phy 0x%x "
- "has %c:%c routing link!\n",
+ sas_printk("%s ex %016llx (T2T supp:%d) phy 0x%x <--> %s ex %016llx "
+ "(T2T supp:%d) phy 0x%x has %c:%c routing link!\n",
ex_type[parent->dev_type],
SAS_ADDR(parent->sas_addr),
+ parent->ex_dev.t2t_supp,
parent_phy->phy_id,
ex_type[child->dev_type],
SAS_ADDR(child->sas_addr),
+ child->ex_dev.t2t_supp,
child_phy->phy_id,
ra_char[parent_phy->routing_attr],
@@ -1238,10 +1246,15 @@ static int sas_check_parent_topology(struct domain_device *child)
sas_print_parent_topology_bug(child, parent_phy, child_phy);
res = -ENODEV;
}
- } else if (parent_phy->routing_attr == TABLE_ROUTING &&
- child_phy->routing_attr != SUBTRACTIVE_ROUTING) {
- sas_print_parent_topology_bug(child, parent_phy, child_phy);
- res = -ENODEV;
+ } else if (parent_phy->routing_attr == TABLE_ROUTING) {
+ if (child_phy->routing_attr == SUBTRACTIVE_ROUTING ||
+ (child_phy->routing_attr == TABLE_ROUTING &&
+ child_ex->t2t_supp && parent_ex->t2t_supp)) {
+ /* All good */;
+ } else {
+ sas_print_parent_topology_bug(child, parent_phy, child_phy);
+ res = -ENODEV;
+ }
}
break;
case FANOUT_DEV:
@@ -1729,7 +1742,7 @@ out:
return res;
}
-static void sas_unregister_ex_tree(struct domain_device *dev)
+static void sas_unregister_ex_tree(struct asd_sas_port *port, struct domain_device *dev)
{
struct expander_device *ex = &dev->ex_dev;
struct domain_device *child, *n;
@@ -1738,11 +1751,11 @@ static void sas_unregister_ex_tree(struct domain_device *dev)
child->gone = 1;
if (child->dev_type == EDGE_DEV ||
child->dev_type == FANOUT_DEV)
- sas_unregister_ex_tree(child);
+ sas_unregister_ex_tree(port, child);
else
- sas_unregister_dev(child);
+ sas_unregister_dev(port, child);
}
- sas_unregister_dev(dev);
+ sas_unregister_dev(port, dev);
}
static void sas_unregister_devs_sas_addr(struct domain_device *parent,
@@ -1759,9 +1772,9 @@ static void sas_unregister_devs_sas_addr(struct domain_device *parent,
child->gone = 1;
if (child->dev_type == EDGE_DEV ||
child->dev_type == FANOUT_DEV)
- sas_unregister_ex_tree(child);
+ sas_unregister_ex_tree(parent->port, child);
else
- sas_unregister_dev(child);
+ sas_unregister_dev(parent->port, child);
break;
}
}
diff --git a/drivers/scsi/libsas/sas_host_smp.c b/drivers/scsi/libsas/sas_host_smp.c
index 04ad8dd1a74..bb8f49269a6 100644
--- a/drivers/scsi/libsas/sas_host_smp.c
+++ b/drivers/scsi/libsas/sas_host_smp.c
@@ -11,6 +11,7 @@
#include <linux/scatterlist.h>
#include <linux/blkdev.h>
#include <linux/slab.h>
+#include <linux/export.h>
#include "sas_internal.h"
@@ -51,6 +52,91 @@ static void sas_host_smp_discover(struct sas_ha_struct *sas_ha, u8 *resp_data,
resp_data[15] = rphy->identify.target_port_protocols;
}
+/**
+ * to_sas_gpio_gp_bit - given the gpio frame data find the byte/bit position of 'od'
+ * @od: od bit to find
+ * @data: incoming bitstream (from frame)
+ * @index: requested data register index (from frame)
+ * @count: total number of registers in the bitstream (from frame)
+ * @bit: bit position of 'od' in the returned byte
+ *
+ * returns NULL if 'od' is not in 'data'
+ *
+ * From SFF-8485 v0.7:
+ * "In GPIO_TX[1], bit 0 of byte 3 contains the first bit (i.e., OD0.0)
+ * and bit 7 of byte 0 contains the 32nd bit (i.e., OD10.1).
+ *
+ * In GPIO_TX[2], bit 0 of byte 3 contains the 33rd bit (i.e., OD10.2)
+ * and bit 7 of byte 0 contains the 64th bit (i.e., OD21.0)."
+ *
+ * The general-purpose (raw-bitstream) RX registers have the same layout
+ * although 'od' is renamed 'id' for 'input data'.
+ *
+ * SFF-8489 defines the behavior of the LEDs in response to the 'od' values.
+ */
+static u8 *to_sas_gpio_gp_bit(unsigned int od, u8 *data, u8 index, u8 count, u8 *bit)
+{
+ unsigned int reg;
+ u8 byte;
+
+ /* gp registers start at index 1 */
+ if (index == 0)
+ return NULL;
+
+ index--; /* make index 0-based */
+ if (od < index * 32)
+ return NULL;
+
+ od -= index * 32;
+ reg = od >> 5;
+
+ if (reg >= count)
+ return NULL;
+
+ od &= (1 << 5) - 1;
+ byte = 3 - (od >> 3);
+ *bit = od & ((1 << 3) - 1);
+
+ return &data[reg * 4 + byte];
+}
+
+int try_test_sas_gpio_gp_bit(unsigned int od, u8 *data, u8 index, u8 count)
+{
+ u8 *byte;
+ u8 bit;
+
+ byte = to_sas_gpio_gp_bit(od, data, index, count, &bit);
+ if (!byte)
+ return -1;
+
+ return (*byte >> bit) & 1;
+}
+EXPORT_SYMBOL(try_test_sas_gpio_gp_bit);
+
+static int sas_host_smp_write_gpio(struct sas_ha_struct *sas_ha, u8 *resp_data,
+ u8 reg_type, u8 reg_index, u8 reg_count,
+ u8 *req_data)
+{
+ struct sas_internal *i = to_sas_internal(sas_ha->core.shost->transportt);
+ int written;
+
+ if (i->dft->lldd_write_gpio == NULL) {
+ resp_data[2] = SMP_RESP_FUNC_UNK;
+ return 0;
+ }
+
+ written = i->dft->lldd_write_gpio(sas_ha, reg_type, reg_index,
+ reg_count, req_data);
+
+ if (written < 0) {
+ resp_data[2] = SMP_RESP_FUNC_FAILED;
+ written = 0;
+ } else
+ resp_data[2] = SMP_RESP_FUNC_ACC;
+
+ return written;
+}
+
static void sas_report_phy_sata(struct sas_ha_struct *sas_ha, u8 *resp_data,
u8 phy_id)
{
@@ -230,9 +316,23 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
/* Can't implement; hosts have no routes */
break;
- case SMP_WRITE_GPIO_REG:
- /* FIXME: need GPIO support in the transport class */
+ case SMP_WRITE_GPIO_REG: {
+ /* SFF-8485 v0.7 */
+ const int base_frame_size = 11;
+ int to_write = req_data[4];
+
+ if (blk_rq_bytes(req) < base_frame_size + to_write * 4 ||
+ req->resid_len < base_frame_size + to_write * 4) {
+ resp_data[2] = SMP_RESP_INV_FRM_LEN;
+ break;
+ }
+
+ to_write = sas_host_smp_write_gpio(sas_ha, resp_data, req_data[2],
+ req_data[3], to_write, &req_data[8]);
+ req->resid_len -= base_frame_size + to_write * 4;
+ rsp->resid_len -= 8;
break;
+ }
case SMP_CONF_ROUTE_INFO:
/* Can't implement; hosts have no routes */
diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c
index 2dc55343f67..d81c3b1989f 100644
--- a/drivers/scsi/libsas/sas_init.c
+++ b/drivers/scsi/libsas/sas_init.c
@@ -37,7 +37,32 @@
#include "../scsi_sas_internal.h"
-struct kmem_cache *sas_task_cache;
+static struct kmem_cache *sas_task_cache;
+
+struct sas_task *sas_alloc_task(gfp_t flags)
+{
+ struct sas_task *task = kmem_cache_zalloc(sas_task_cache, flags);
+
+ if (task) {
+ INIT_LIST_HEAD(&task->list);
+ spin_lock_init(&task->task_state_lock);
+ task->task_state_flags = SAS_TASK_STATE_PENDING;
+ init_timer(&task->timer);
+ init_completion(&task->completion);
+ }
+
+ return task;
+}
+EXPORT_SYMBOL_GPL(sas_alloc_task);
+
+void sas_free_task(struct sas_task *task)
+{
+ if (task) {
+ BUG_ON(!list_empty(&task->list));
+ kmem_cache_free(sas_task_cache, task);
+ }
+}
+EXPORT_SYMBOL_GPL(sas_free_task);
/*------------ SAS addr hash -----------*/
void sas_hash_addr(u8 *hashed, const u8 *sas_addr)
@@ -152,10 +177,15 @@ int sas_unregister_ha(struct sas_ha_struct *sas_ha)
static int sas_get_linkerrors(struct sas_phy *phy)
{
- if (scsi_is_sas_phy_local(phy))
- /* FIXME: we have no local phy stats
- * gathering at this time */
- return -EINVAL;
+ if (scsi_is_sas_phy_local(phy)) {
+ struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
+ struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
+ struct asd_sas_phy *asd_phy = sas_ha->sas_phy[phy->number];
+ struct sas_internal *i =
+ to_sas_internal(sas_ha->core.shost->transportt);
+
+ return i->dft->lldd_control_phy(asd_phy, PHY_FUNC_GET_EVENTS, NULL);
+ }
return sas_smp_get_phy_events(phy);
}
@@ -293,8 +323,7 @@ EXPORT_SYMBOL_GPL(sas_domain_release_transport);
static int __init sas_class_init(void)
{
- sas_task_cache = kmem_cache_create("sas_task", sizeof(struct sas_task),
- 0, SLAB_HWCACHE_ALIGN, NULL);
+ sas_task_cache = KMEM_CACHE(sas_task, SLAB_HWCACHE_ALIGN);
if (!sas_task_cache)
return -ENOMEM;
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index eeba76cdf77..b6e233d9a0a 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -25,6 +25,7 @@
#include <linux/kthread.h>
#include <linux/firmware.h>
+#include <linux/export.h>
#include <linux/ctype.h>
#include "sas_internal.h"
@@ -182,79 +183,56 @@ int sas_queue_up(struct sas_task *task)
return 0;
}
-/**
- * sas_queuecommand -- Enqueue a command for processing
- * @parameters: See SCSI Core documentation
- *
- * Note: XXX: Remove the host unlock/lock pair when SCSI Core can
- * call us without holding an IRQ spinlock...
- */
-static int sas_queuecommand_lck(struct scsi_cmnd *cmd,
- void (*scsi_done)(struct scsi_cmnd *))
- __releases(host->host_lock)
- __acquires(dev->sata_dev.ap->lock)
- __releases(dev->sata_dev.ap->lock)
- __acquires(host->host_lock)
+int sas_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
{
- int res = 0;
- struct domain_device *dev = cmd_to_domain_dev(cmd);
- struct Scsi_Host *host = cmd->device->host;
struct sas_internal *i = to_sas_internal(host->transportt);
+ struct domain_device *dev = cmd_to_domain_dev(cmd);
+ struct sas_ha_struct *sas_ha = dev->port->ha;
+ struct sas_task *task;
+ int res = 0;
- spin_unlock_irq(host->host_lock);
+ /* If the device fell off, no sense in issuing commands */
+ if (dev->gone) {
+ cmd->result = DID_BAD_TARGET << 16;
+ goto out_done;
+ }
- {
- struct sas_ha_struct *sas_ha = dev->port->ha;
- struct sas_task *task;
-
- /* If the device fell off, no sense in issuing commands */
- if (dev->gone) {
- cmd->result = DID_BAD_TARGET << 16;
- scsi_done(cmd);
- goto out;
- }
+ if (dev_is_sata(dev)) {
+ unsigned long flags;
- if (dev_is_sata(dev)) {
- unsigned long flags;
+ spin_lock_irqsave(dev->sata_dev.ap->lock, flags);
+ res = ata_sas_queuecmd(cmd, dev->sata_dev.ap);
+ spin_unlock_irqrestore(dev->sata_dev.ap->lock, flags);
+ return res;
+ }
- spin_lock_irqsave(dev->sata_dev.ap->lock, flags);
- res = ata_sas_queuecmd(cmd, dev->sata_dev.ap);
- spin_unlock_irqrestore(dev->sata_dev.ap->lock, flags);
- goto out;
- }
+ task = sas_create_task(cmd, dev, GFP_ATOMIC);
+ if (!task)
+ return SCSI_MLQUEUE_HOST_BUSY;
- res = -ENOMEM;
- task = sas_create_task(cmd, dev, GFP_ATOMIC);
- if (!task)
- goto out;
+ /* Queue up, Direct Mode or Task Collector Mode. */
+ if (sas_ha->lldd_max_execute_num < 2)
+ res = i->dft->lldd_execute_task(task, 1, GFP_ATOMIC);
+ else
+ res = sas_queue_up(task);
- cmd->scsi_done = scsi_done;
- /* Queue up, Direct Mode or Task Collector Mode. */
- if (sas_ha->lldd_max_execute_num < 2)
- res = i->dft->lldd_execute_task(task, 1, GFP_ATOMIC);
- else
- res = sas_queue_up(task);
+ if (res)
+ goto out_free_task;
+ return 0;
- /* Examine */
- if (res) {
- SAS_DPRINTK("lldd_execute_task returned: %d\n", res);
- ASSIGN_SAS_TASK(cmd, NULL);
- sas_free_task(task);
- if (res == -SAS_QUEUE_FULL) {
- cmd->result = DID_SOFT_ERROR << 16; /* retry */
- res = 0;
- scsi_done(cmd);
- }
- goto out;
- }
- }
-out:
- spin_lock_irq(host->host_lock);
- return res;
+out_free_task:
+ SAS_DPRINTK("lldd_execute_task returned: %d\n", res);
+ ASSIGN_SAS_TASK(cmd, NULL);
+ sas_free_task(task);
+ if (res == -SAS_QUEUE_FULL)
+ cmd->result = DID_SOFT_ERROR << 16; /* retry */
+ else
+ cmd->result = DID_ERROR << 16;
+out_done:
+ cmd->scsi_done(cmd);
+ return 0;
}
-DEF_SCSI_QCMD(sas_queuecommand)
-
static void sas_eh_finish_cmd(struct scsi_cmnd *cmd)
{
struct sas_task *task = TO_SAS_TASK(cmd);
@@ -784,8 +762,7 @@ int sas_target_alloc(struct scsi_target *starget)
return 0;
}
-#define SAS_DEF_QD 32
-#define SAS_MAX_QD 64
+#define SAS_DEF_QD 256
int sas_slave_configure(struct scsi_device *scsi_dev)
{
@@ -825,34 +802,41 @@ void sas_slave_destroy(struct scsi_device *scsi_dev)
struct domain_device *dev = sdev_to_domain_dev(scsi_dev);
if (dev_is_sata(dev))
- dev->sata_dev.ap->link.device[0].class = ATA_DEV_NONE;
+ sas_to_ata_dev(dev)->class = ATA_DEV_NONE;
}
-int sas_change_queue_depth(struct scsi_device *scsi_dev, int new_depth,
- int reason)
+int sas_change_queue_depth(struct scsi_device *sdev, int depth, int reason)
{
- int res = min(new_depth, SAS_MAX_QD);
+ struct domain_device *dev = sdev_to_domain_dev(sdev);
- if (reason != SCSI_QDEPTH_DEFAULT)
+ if (dev_is_sata(dev))
+ return __ata_change_queue_depth(dev->sata_dev.ap, sdev, depth,
+ reason);
+
+ switch (reason) {
+ case SCSI_QDEPTH_DEFAULT:
+ case SCSI_QDEPTH_RAMP_UP:
+ if (!sdev->tagged_supported)
+ depth = 1;
+ scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
+ break;
+ case SCSI_QDEPTH_QFULL:
+ scsi_track_queue_full(sdev, depth);
+ break;
+ default:
return -EOPNOTSUPP;
-
- if (scsi_dev->tagged_supported)
- scsi_adjust_queue_depth(scsi_dev, scsi_get_tag_type(scsi_dev),
- res);
- else {
- struct domain_device *dev = sdev_to_domain_dev(scsi_dev);
- sas_printk("device %llx LUN %x queue depth changed to 1\n",
- SAS_ADDR(dev->sas_addr),
- scsi_dev->lun);
- scsi_adjust_queue_depth(scsi_dev, 0, 1);
- res = 1;
}
- return res;
+ return depth;
}
int sas_change_queue_type(struct scsi_device *scsi_dev, int qt)
{
+ struct domain_device *dev = sdev_to_domain_dev(scsi_dev);
+
+ if (dev_is_sata(dev))
+ return -EINVAL;
+
if (!scsi_dev->tagged_supported)
return 0;
diff --git a/drivers/scsi/libsas/sas_task.c b/drivers/scsi/libsas/sas_task.c
index b13a3346894..a78e5bd3e51 100644
--- a/drivers/scsi/libsas/sas_task.c
+++ b/drivers/scsi/libsas/sas_task.c
@@ -1,4 +1,5 @@
#include <linux/kernel.h>
+#include <linux/export.h>
#include <scsi/sas.h>
#include <scsi/libsas.h>
diff --git a/drivers/scsi/libsrp.c b/drivers/scsi/libsrp.c
index ff6a28ce9b6..0707ecdbaa3 100644
--- a/drivers/scsi/libsrp.c
+++ b/drivers/scsi/libsrp.c
@@ -23,6 +23,7 @@
#include <linux/kfifo.h>
#include <linux/scatterlist.h>
#include <linux/dma-mapping.h>
+#include <linux/module.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_tcq.h>
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index c088a36d1f3..bb4c8e0584e 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -846,8 +846,24 @@ struct lpfc_hba {
struct dentry *debug_hbqinfo;
struct dentry *debug_dumpHostSlim;
struct dentry *debug_dumpHBASlim;
- struct dentry *debug_dumpData; /* BlockGuard BPL*/
- struct dentry *debug_dumpDif; /* BlockGuard BPL*/
+ struct dentry *debug_dumpData; /* BlockGuard BPL */
+ struct dentry *debug_dumpDif; /* BlockGuard BPL */
+ struct dentry *debug_InjErrLBA; /* LBA to inject errors at */
+ struct dentry *debug_writeGuard; /* inject write guard_tag errors */
+ struct dentry *debug_writeApp; /* inject write app_tag errors */
+ struct dentry *debug_writeRef; /* inject write ref_tag errors */
+ struct dentry *debug_readApp; /* inject read app_tag errors */
+ struct dentry *debug_readRef; /* inject read ref_tag errors */
+
+ /* T10 DIF error injection */
+ uint32_t lpfc_injerr_wgrd_cnt;
+ uint32_t lpfc_injerr_wapp_cnt;
+ uint32_t lpfc_injerr_wref_cnt;
+ uint32_t lpfc_injerr_rapp_cnt;
+ uint32_t lpfc_injerr_rref_cnt;
+ sector_t lpfc_injerr_lba;
+#define LPFC_INJERR_LBA_OFF (sector_t)0xffffffffffffffff
+
struct dentry *debug_slow_ring_trc;
struct lpfc_debugfs_trc *slow_ring_trc;
atomic_t slow_ring_trc_cnt;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 2542f1f8bf8..d0ebaeb7ef6 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -23,6 +23,7 @@
#include <linux/delay.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
+#include <linux/module.h>
#include <linux/aer.h>
#include <linux/gfp.h>
#include <linux/kernel.h>
@@ -52,6 +53,13 @@
#define LPFC_MIN_DEVLOSS_TMO 1
#define LPFC_MAX_DEVLOSS_TMO 255
+/*
+ * Write key size should be multiple of 4. If write key is changed
+ * make sure that library write key is also changed.
+ */
+#define LPFC_REG_WRITE_KEY_SIZE 4
+#define LPFC_REG_WRITE_KEY "EMLX"
+
/**
* lpfc_jedec_to_ascii - Hex to ascii convertor according to JEDEC rules
* @incr: integer to convert.
@@ -693,7 +701,7 @@ lpfc_selective_reset(struct lpfc_hba *phba)
int rc;
if (!phba->cfg_enable_hba_reset)
- return -EIO;
+ return -EACCES;
status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
@@ -742,9 +750,11 @@ lpfc_issue_reset(struct device *dev, struct device_attribute *attr,
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
-
int status = -EINVAL;
+ if (!phba->cfg_enable_hba_reset)
+ return -EACCES;
+
if (strncmp(buf, "selective", sizeof("selective") - 1) == 0)
status = phba->lpfc_selective_reset(phba);
@@ -765,16 +775,21 @@ lpfc_issue_reset(struct device *dev, struct device_attribute *attr,
* Returns:
* zero for success
**/
-static int
+int
lpfc_sli4_pdev_status_reg_wait(struct lpfc_hba *phba)
{
- struct lpfc_register portstat_reg;
+ struct lpfc_register portstat_reg = {0};
int i;
-
+ msleep(100);
lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
&portstat_reg.word0);
+ /* verify if privilaged for the request operation */
+ if (!bf_get(lpfc_sliport_status_rn, &portstat_reg) &&
+ !bf_get(lpfc_sliport_status_err, &portstat_reg))
+ return -EPERM;
+
/* wait for the SLI port firmware ready after firmware reset */
for (i = 0; i < LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT; i++) {
msleep(10);
@@ -816,16 +831,13 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
int rc;
if (!phba->cfg_enable_hba_reset)
- return -EIO;
+ return -EACCES;
if ((phba->sli_rev < LPFC_SLI_REV4) ||
(bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
LPFC_SLI_INTF_IF_TYPE_2))
return -EPERM;
- if (!pdev->is_physfn)
- return -EPERM;
-
/* Disable SR-IOV virtual functions if enabled */
if (phba->cfg_sriov_nr_virtfn) {
pci_disable_sriov(pdev);
@@ -858,7 +870,7 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
rc = lpfc_sli4_pdev_status_reg_wait(phba);
if (rc)
- return -EIO;
+ return rc;
init_completion(&online_compl);
rc = lpfc_workq_post_event(phba, &status, &online_compl,
@@ -984,7 +996,7 @@ lpfc_board_mode_store(struct device *dev, struct device_attribute *attr,
if (!status)
return strlen(buf);
else
- return -EIO;
+ return status;
}
/**
@@ -3885,18 +3897,23 @@ sysfs_ctlreg_write(struct file *filp, struct kobject *kobj,
if ((off + count) > FF_REG_AREA_SIZE)
return -ERANGE;
- if (count == 0) return 0;
+ if (count <= LPFC_REG_WRITE_KEY_SIZE)
+ return 0;
if (off % 4 || count % 4 || (unsigned long)buf % 4)
return -EINVAL;
- if (!(vport->fc_flag & FC_OFFLINE_MODE)) {
+ /* This is to protect HBA registers from accidental writes. */
+ if (memcmp(buf, LPFC_REG_WRITE_KEY, LPFC_REG_WRITE_KEY_SIZE))
+ return -EINVAL;
+
+ if (!(vport->fc_flag & FC_OFFLINE_MODE))
return -EPERM;
- }
spin_lock_irq(&phba->hbalock);
- for (buf_off = 0; buf_off < count; buf_off += sizeof(uint32_t))
- writel(*((uint32_t *)(buf + buf_off)),
+ for (buf_off = 0; buf_off < count - LPFC_REG_WRITE_KEY_SIZE;
+ buf_off += sizeof(uint32_t))
+ writel(*((uint32_t *)(buf + buf_off + LPFC_REG_WRITE_KEY_SIZE)),
phba->ctrl_regs_memmap_p + off + buf_off);
spin_unlock_irq(&phba->hbalock);
@@ -4097,8 +4114,10 @@ sysfs_mbox_read(struct file *filp, struct kobject *kobj,
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
- int rc;
+ LPFC_MBOXQ_t *mboxq;
MAILBOX_t *pmb;
+ uint32_t mbox_tmo;
+ int rc;
if (off > MAILBOX_CMD_SIZE)
return -ERANGE;
@@ -4123,7 +4142,8 @@ sysfs_mbox_read(struct file *filp, struct kobject *kobj,
if (off == 0 &&
phba->sysfs_mbox.state == SMBOX_WRITING &&
phba->sysfs_mbox.offset >= 2 * sizeof(uint32_t)) {
- pmb = &phba->sysfs_mbox.mbox->u.mb;
+ mboxq = (LPFC_MBOXQ_t *)&phba->sysfs_mbox.mbox;
+ pmb = &mboxq->u.mb;
switch (pmb->mbxCommand) {
/* Offline only */
case MBX_INIT_LINK:
@@ -4233,9 +4253,8 @@ sysfs_mbox_read(struct file *filp, struct kobject *kobj,
} else {
spin_unlock_irq(&phba->hbalock);
- rc = lpfc_sli_issue_mbox_wait (phba,
- phba->sysfs_mbox.mbox,
- lpfc_mbox_tmo_val(phba, pmb->mbxCommand) * HZ);
+ mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
+ rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
spin_lock_irq(&phba->hbalock);
}
@@ -4480,9 +4499,10 @@ lpfc_get_host_fabric_name (struct Scsi_Host *shost)
spin_lock_irq(shost->host_lock);
- if ((vport->fc_flag & FC_FABRIC) ||
- ((phba->fc_topology == LPFC_TOPOLOGY_LOOP) &&
- (vport->fc_flag & FC_PUBLIC_LOOP)))
+ if ((vport->port_state > LPFC_FLOGI) &&
+ ((vport->fc_flag & FC_FABRIC) ||
+ ((phba->fc_topology == LPFC_TOPOLOGY_LOOP) &&
+ (vport->fc_flag & FC_PUBLIC_LOOP))))
node_name = wwn_to_u64(phba->fc_fabparam.nodeName.u.wwn);
else
/* fabric is local port if there is no F/FL_Port */
@@ -4555,9 +4575,17 @@ lpfc_get_stats(struct Scsi_Host *shost)
memset(hs, 0, sizeof (struct fc_host_statistics));
hs->tx_frames = pmb->un.varRdStatus.xmitFrameCnt;
- hs->tx_words = (pmb->un.varRdStatus.xmitByteCnt * 256);
+ /*
+ * The MBX_READ_STATUS returns tx_k_bytes which has to
+ * converted to words
+ */
+ hs->tx_words = (uint64_t)
+ ((uint64_t)pmb->un.varRdStatus.xmitByteCnt
+ * (uint64_t)256);
hs->rx_frames = pmb->un.varRdStatus.rcvFrameCnt;
- hs->rx_words = (pmb->un.varRdStatus.rcvByteCnt * 256);
+ hs->rx_words = (uint64_t)
+ ((uint64_t)pmb->un.varRdStatus.rcvByteCnt
+ * (uint64_t)256);
memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t));
pmb->mbxCommand = MBX_READ_LNK_STAT;
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index a6db6aef133..60f95347bab 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -209,7 +209,7 @@ void __lpfc_mbox_cmpl_put(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_mbox_cmpl_put(struct lpfc_hba *, LPFC_MBOXQ_t *);
int lpfc_mbox_cmd_check(struct lpfc_hba *, LPFC_MBOXQ_t *);
int lpfc_mbox_dev_check(struct lpfc_hba *);
-int lpfc_mbox_tmo_val(struct lpfc_hba *, int);
+int lpfc_mbox_tmo_val(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_init_vfi(struct lpfcMboxq *, struct lpfc_vport *);
void lpfc_reg_vfi(struct lpfcMboxq *, struct lpfc_vport *, dma_addr_t);
void lpfc_init_vpi(struct lpfc_hba *, struct lpfcMboxq *, uint16_t);
@@ -451,3 +451,5 @@ int lpfc_wr_object(struct lpfc_hba *, struct list_head *, uint32_t, uint32_t *);
/* functions to support SR-IOV */
int lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *, int);
uint16_t lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *);
+int lpfc_sli4_queue_create(struct lpfc_hba *);
+void lpfc_sli4_queue_destroy(struct lpfc_hba *);
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 779b88e1469..707081d0a22 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -1856,6 +1856,9 @@ lpfc_decode_firmware_rev(struct lpfc_hba *phba, char *fwrevision, int flag)
case 2:
c = 'B';
break;
+ case 3:
+ c = 'X';
+ break;
default:
c = 0;
break;
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index a0424dd90e4..28382596fb9 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -20,6 +20,7 @@
#include <linux/blkdev.h>
#include <linux/delay.h>
+#include <linux/module.h>
#include <linux/dma-mapping.h>
#include <linux/idr.h>
#include <linux/interrupt.h>
@@ -996,6 +997,85 @@ lpfc_debugfs_dumpDataDif_write(struct file *file, const char __user *buf,
return nbytes;
}
+static int
+lpfc_debugfs_dif_err_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t
+lpfc_debugfs_dif_err_read(struct file *file, char __user *buf,
+ size_t nbytes, loff_t *ppos)
+{
+ struct dentry *dent = file->f_dentry;
+ struct lpfc_hba *phba = file->private_data;
+ char cbuf[16];
+ int cnt = 0;
+
+ if (dent == phba->debug_writeGuard)
+ cnt = snprintf(cbuf, 16, "%u\n", phba->lpfc_injerr_wgrd_cnt);
+ else if (dent == phba->debug_writeApp)
+ cnt = snprintf(cbuf, 16, "%u\n", phba->lpfc_injerr_wapp_cnt);
+ else if (dent == phba->debug_writeRef)
+ cnt = snprintf(cbuf, 16, "%u\n", phba->lpfc_injerr_wref_cnt);
+ else if (dent == phba->debug_readApp)
+ cnt = snprintf(cbuf, 16, "%u\n", phba->lpfc_injerr_rapp_cnt);
+ else if (dent == phba->debug_readRef)
+ cnt = snprintf(cbuf, 16, "%u\n", phba->lpfc_injerr_rref_cnt);
+ else if (dent == phba->debug_InjErrLBA)
+ cnt = snprintf(cbuf, 16, "0x%lx\n",
+ (unsigned long) phba->lpfc_injerr_lba);
+ else
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0547 Unknown debugfs error injection entry\n");
+
+ return simple_read_from_buffer(buf, nbytes, ppos, &cbuf, cnt);
+}
+
+static ssize_t
+lpfc_debugfs_dif_err_write(struct file *file, const char __user *buf,
+ size_t nbytes, loff_t *ppos)
+{
+ struct dentry *dent = file->f_dentry;
+ struct lpfc_hba *phba = file->private_data;
+ char dstbuf[32];
+ unsigned long tmp;
+ int size;
+
+ memset(dstbuf, 0, 32);
+ size = (nbytes < 32) ? nbytes : 32;
+ if (copy_from_user(dstbuf, buf, size))
+ return 0;
+
+ if (strict_strtoul(dstbuf, 0, &tmp))
+ return 0;
+
+ if (dent == phba->debug_writeGuard)
+ phba->lpfc_injerr_wgrd_cnt = (uint32_t)tmp;
+ else if (dent == phba->debug_writeApp)
+ phba->lpfc_injerr_wapp_cnt = (uint32_t)tmp;
+ else if (dent == phba->debug_writeRef)
+ phba->lpfc_injerr_wref_cnt = (uint32_t)tmp;
+ else if (dent == phba->debug_readApp)
+ phba->lpfc_injerr_rapp_cnt = (uint32_t)tmp;
+ else if (dent == phba->debug_readRef)
+ phba->lpfc_injerr_rref_cnt = (uint32_t)tmp;
+ else if (dent == phba->debug_InjErrLBA)
+ phba->lpfc_injerr_lba = (sector_t)tmp;
+ else
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0548 Unknown debugfs error injection entry\n");
+
+ return nbytes;
+}
+
+static int
+lpfc_debugfs_dif_err_release(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
/**
* lpfc_debugfs_nodelist_open - Open the nodelist debugfs file
* @inode: The inode pointer that contains a vport pointer.
@@ -3380,6 +3460,16 @@ static const struct file_operations lpfc_debugfs_op_dumpDif = {
.release = lpfc_debugfs_dumpDataDif_release,
};
+#undef lpfc_debugfs_op_dif_err
+static const struct file_operations lpfc_debugfs_op_dif_err = {
+ .owner = THIS_MODULE,
+ .open = lpfc_debugfs_dif_err_open,
+ .llseek = lpfc_debugfs_lseek,
+ .read = lpfc_debugfs_dif_err_read,
+ .write = lpfc_debugfs_dif_err_write,
+ .release = lpfc_debugfs_dif_err_release,
+};
+
#undef lpfc_debugfs_op_slow_ring_trc
static const struct file_operations lpfc_debugfs_op_slow_ring_trc = {
.owner = THIS_MODULE,
@@ -3788,6 +3878,74 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
goto debug_failed;
}
+ /* Setup DIF Error Injections */
+ snprintf(name, sizeof(name), "InjErrLBA");
+ phba->debug_InjErrLBA =
+ debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+ phba->hba_debugfs_root,
+ phba, &lpfc_debugfs_op_dif_err);
+ if (!phba->debug_InjErrLBA) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "0807 Cannot create debugfs InjErrLBA\n");
+ goto debug_failed;
+ }
+ phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF;
+
+ snprintf(name, sizeof(name), "writeGuardInjErr");
+ phba->debug_writeGuard =
+ debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+ phba->hba_debugfs_root,
+ phba, &lpfc_debugfs_op_dif_err);
+ if (!phba->debug_writeGuard) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "0802 Cannot create debugfs writeGuard\n");
+ goto debug_failed;
+ }
+
+ snprintf(name, sizeof(name), "writeAppInjErr");
+ phba->debug_writeApp =
+ debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+ phba->hba_debugfs_root,
+ phba, &lpfc_debugfs_op_dif_err);
+ if (!phba->debug_writeApp) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "0803 Cannot create debugfs writeApp\n");
+ goto debug_failed;
+ }
+
+ snprintf(name, sizeof(name), "writeRefInjErr");
+ phba->debug_writeRef =
+ debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+ phba->hba_debugfs_root,
+ phba, &lpfc_debugfs_op_dif_err);
+ if (!phba->debug_writeRef) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "0804 Cannot create debugfs writeRef\n");
+ goto debug_failed;
+ }
+
+ snprintf(name, sizeof(name), "readAppInjErr");
+ phba->debug_readApp =
+ debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+ phba->hba_debugfs_root,
+ phba, &lpfc_debugfs_op_dif_err);
+ if (!phba->debug_readApp) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "0805 Cannot create debugfs readApp\n");
+ goto debug_failed;
+ }
+
+ snprintf(name, sizeof(name), "readRefInjErr");
+ phba->debug_readRef =
+ debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+ phba->hba_debugfs_root,
+ phba, &lpfc_debugfs_op_dif_err);
+ if (!phba->debug_readRef) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "0806 Cannot create debugfs readApp\n");
+ goto debug_failed;
+ }
+
/* Setup slow ring trace */
if (lpfc_debugfs_max_slow_ring_trc) {
num = lpfc_debugfs_max_slow_ring_trc - 1;
@@ -4090,6 +4248,30 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport)
debugfs_remove(phba->debug_dumpDif); /* dumpDif */
phba->debug_dumpDif = NULL;
}
+ if (phba->debug_InjErrLBA) {
+ debugfs_remove(phba->debug_InjErrLBA); /* InjErrLBA */
+ phba->debug_InjErrLBA = NULL;
+ }
+ if (phba->debug_writeGuard) {
+ debugfs_remove(phba->debug_writeGuard); /* writeGuard */
+ phba->debug_writeGuard = NULL;
+ }
+ if (phba->debug_writeApp) {
+ debugfs_remove(phba->debug_writeApp); /* writeApp */
+ phba->debug_writeApp = NULL;
+ }
+ if (phba->debug_writeRef) {
+ debugfs_remove(phba->debug_writeRef); /* writeRef */
+ phba->debug_writeRef = NULL;
+ }
+ if (phba->debug_readApp) {
+ debugfs_remove(phba->debug_readApp); /* readApp */
+ phba->debug_readApp = NULL;
+ }
+ if (phba->debug_readRef) {
+ debugfs_remove(phba->debug_readRef); /* readRef */
+ phba->debug_readRef = NULL;
+ }
if (phba->slow_ring_trc) {
kfree(phba->slow_ring_trc);
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 023da0e00d3..445826a4c98 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -3386,7 +3386,14 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
cmdiocb->context1 = NULL;
}
}
+
+ /*
+ * The driver received a LOGO from the rport and has ACK'd it.
+ * At this point, the driver is done so release the IOCB and
+ * remove the ndlp reference.
+ */
lpfc_els_free_iocb(phba, cmdiocb);
+ lpfc_nlp_put(ndlp);
return;
}
@@ -4082,9 +4089,6 @@ lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
phba->fc_stat.elsXmitACC++;
elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
- lpfc_nlp_put(ndlp);
- elsiocb->context1 = NULL; /* Don't need ndlp for cmpl,
- * it could be freed */
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
if (rc == IOCB_ERROR) {
@@ -4166,6 +4170,11 @@ lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data,
psli = &phba->sli;
cmdsize = oldiocb->iocb.unsli3.rcvsli3.acc_len;
+ /* The accumulated length can exceed the BPL_SIZE. For
+ * now, use this as the limit
+ */
+ if (cmdsize > LPFC_BPL_SIZE)
+ cmdsize = LPFC_BPL_SIZE;
elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
ndlp->nlp_DID, ELS_CMD_ACC);
if (!elsiocb)
@@ -4189,9 +4198,6 @@ lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data,
phba->fc_stat.elsXmitACC++;
elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
- lpfc_nlp_put(ndlp);
- elsiocb->context1 = NULL; /* Don't need ndlp for cmpl,
- * it could be freed */
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
if (rc == IOCB_ERROR) {
@@ -7258,16 +7264,11 @@ lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
icmd->un.elsreq64.myID = 0;
icmd->un.elsreq64.fl = 1;
- if ((phba->sli_rev == LPFC_SLI_REV4) &&
- (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
- LPFC_SLI_INTF_IF_TYPE_0)) {
- /* FDISC needs to be 1 for WQE VPI */
- elsiocb->iocb.ulpCt_h = (SLI4_CT_VPI >> 1) & 1;
- elsiocb->iocb.ulpCt_l = SLI4_CT_VPI & 1 ;
- /* Set the ulpContext to the vpi */
- elsiocb->iocb.ulpContext = phba->vpi_ids[vport->vpi];
- } else {
- /* For FDISC, Let FDISC rsp set the NPortID for this VPI */
+ /*
+ * SLI3 ports require a different context type value than SLI4.
+ * Catch SLI3 ports here and override the prep.
+ */
+ if (phba->sli_rev == LPFC_SLI_REV3) {
icmd->ulpCt_h = 1;
icmd->ulpCt_l = 0;
}
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 0b47adf9fee..091f68e5cb7 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1412,7 +1412,7 @@ lpfc_register_fcf(struct lpfc_hba *phba)
if (phba->pport->port_state != LPFC_FLOGI) {
phba->hba_flag |= FCF_RR_INPROG;
spin_unlock_irq(&phba->hbalock);
- lpfc_issue_init_vfi(phba->pport);
+ lpfc_initial_flogi(phba->pport);
return;
}
spin_unlock_irq(&phba->hbalock);
@@ -2646,7 +2646,9 @@ lpfc_init_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
{
struct lpfc_vport *vport = mboxq->vport;
- if (mboxq->u.mb.mbxStatus && (mboxq->u.mb.mbxStatus != 0x4002)) {
+ /* VFI not supported on interface type 0, just do the flogi */
+ if (mboxq->u.mb.mbxStatus && (bf_get(lpfc_sli_intf_if_type,
+ &phba->sli4_hba.sli_intf) != LPFC_SLI_INTF_IF_TYPE_0)) {
lpfc_printf_vlog(vport, KERN_ERR,
LOG_MBOX,
"2891 Init VFI mailbox failed 0x%x\n",
@@ -2655,6 +2657,7 @@ lpfc_init_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
return;
}
+
lpfc_initial_flogi(vport);
mempool_free(mboxq, phba->mbox_mem_pool);
return;
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 7f8003b5181..98d21521f53 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -41,6 +41,8 @@
* Or clear that bit field:
* bf_set(example_bit_field, &t1, 0);
*/
+#define bf_get_be32(name, ptr) \
+ ((be32_to_cpu((ptr)->name##_WORD) >> name##_SHIFT) & name##_MASK)
#define bf_get_le32(name, ptr) \
((le32_to_cpu((ptr)->name##_WORD) >> name##_SHIFT) & name##_MASK)
#define bf_get(name, ptr) \
@@ -678,7 +680,6 @@ struct lpfc_register {
#define lpfc_rq_doorbell_num_posted_SHIFT 16
#define lpfc_rq_doorbell_num_posted_MASK 0x3FFF
#define lpfc_rq_doorbell_num_posted_WORD word0
-#define LPFC_RQ_POST_BATCH 8 /* RQEs to post at one time */
#define lpfc_rq_doorbell_id_SHIFT 0
#define lpfc_rq_doorbell_id_MASK 0xFFFF
#define lpfc_rq_doorbell_id_WORD word0
@@ -784,6 +785,8 @@ union lpfc_sli4_cfg_shdr {
#define LPFC_Q_CREATE_VERSION_2 2
#define LPFC_Q_CREATE_VERSION_1 1
#define LPFC_Q_CREATE_VERSION_0 0
+#define LPFC_OPCODE_VERSION_0 0
+#define LPFC_OPCODE_VERSION_1 1
} request;
struct {
uint32_t word6;
@@ -825,6 +828,7 @@ struct mbox_header {
#define LPFC_EXTENT_VERSION_DEFAULT 0
/* Subsystem Definitions */
+#define LPFC_MBOX_SUBSYSTEM_NA 0x0
#define LPFC_MBOX_SUBSYSTEM_COMMON 0x1
#define LPFC_MBOX_SUBSYSTEM_FCOE 0xC
@@ -835,25 +839,34 @@ struct mbox_header {
#define HOST_ENDIAN_HIGH_WORD1 0xFF7856FF
/* Common Opcodes */
-#define LPFC_MBOX_OPCODE_CQ_CREATE 0x0C
-#define LPFC_MBOX_OPCODE_EQ_CREATE 0x0D
-#define LPFC_MBOX_OPCODE_MQ_CREATE 0x15
-#define LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES 0x20
-#define LPFC_MBOX_OPCODE_NOP 0x21
-#define LPFC_MBOX_OPCODE_MQ_DESTROY 0x35
-#define LPFC_MBOX_OPCODE_CQ_DESTROY 0x36
-#define LPFC_MBOX_OPCODE_EQ_DESTROY 0x37
-#define LPFC_MBOX_OPCODE_QUERY_FW_CFG 0x3A
-#define LPFC_MBOX_OPCODE_FUNCTION_RESET 0x3D
-#define LPFC_MBOX_OPCODE_MQ_CREATE_EXT 0x5A
-#define LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO 0x9A
-#define LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT 0x9B
-#define LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT 0x9C
-#define LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT 0x9D
-#define LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG 0xA0
-#define LPFC_MBOX_OPCODE_GET_PROFILE_CONFIG 0xA4
-#define LPFC_MBOX_OPCODE_WRITE_OBJECT 0xAC
-#define LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS 0xB5
+#define LPFC_MBOX_OPCODE_NA 0x00
+#define LPFC_MBOX_OPCODE_CQ_CREATE 0x0C
+#define LPFC_MBOX_OPCODE_EQ_CREATE 0x0D
+#define LPFC_MBOX_OPCODE_MQ_CREATE 0x15
+#define LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES 0x20
+#define LPFC_MBOX_OPCODE_NOP 0x21
+#define LPFC_MBOX_OPCODE_MQ_DESTROY 0x35
+#define LPFC_MBOX_OPCODE_CQ_DESTROY 0x36
+#define LPFC_MBOX_OPCODE_EQ_DESTROY 0x37
+#define LPFC_MBOX_OPCODE_QUERY_FW_CFG 0x3A
+#define LPFC_MBOX_OPCODE_FUNCTION_RESET 0x3D
+#define LPFC_MBOX_OPCODE_GET_PORT_NAME 0x4D
+#define LPFC_MBOX_OPCODE_MQ_CREATE_EXT 0x5A
+#define LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO 0x9A
+#define LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT 0x9B
+#define LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT 0x9C
+#define LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT 0x9D
+#define LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG 0xA0
+#define LPFC_MBOX_OPCODE_GET_PROFILE_CONFIG 0xA4
+#define LPFC_MBOX_OPCODE_SET_PROFILE_CONFIG 0xA5
+#define LPFC_MBOX_OPCODE_GET_PROFILE_LIST 0xA6
+#define LPFC_MBOX_OPCODE_SET_ACT_PROFILE 0xA8
+#define LPFC_MBOX_OPCODE_GET_FACTORY_PROFILE_CONFIG 0xA9
+#define LPFC_MBOX_OPCODE_READ_OBJECT 0xAB
+#define LPFC_MBOX_OPCODE_WRITE_OBJECT 0xAC
+#define LPFC_MBOX_OPCODE_READ_OBJECT_LIST 0xAD
+#define LPFC_MBOX_OPCODE_DELETE_OBJECT 0xAE
+#define LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS 0xB5
/* FCoE Opcodes */
#define LPFC_MBOX_OPCODE_FCOE_WQ_CREATE 0x01
@@ -867,6 +880,7 @@ struct mbox_header {
#define LPFC_MBOX_OPCODE_FCOE_DELETE_FCF 0x0A
#define LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE 0x0B
#define LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF 0x10
+#define LPFC_MBOX_OPCODE_FCOE_SET_FCLINK_SETTINGS 0x21
#define LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE 0x22
#define LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_LOOPBACK 0x23
@@ -1470,16 +1484,81 @@ struct sli4_sge { /* SLI-4 */
uint32_t addr_lo;
uint32_t word2;
-#define lpfc_sli4_sge_offset_SHIFT 0 /* Offset of buffer - Not used*/
-#define lpfc_sli4_sge_offset_MASK 0x1FFFFFFF
+#define lpfc_sli4_sge_offset_SHIFT 0
+#define lpfc_sli4_sge_offset_MASK 0x07FFFFFF
#define lpfc_sli4_sge_offset_WORD word2
-#define lpfc_sli4_sge_last_SHIFT 31 /* Last SEG in the SGL sets
- this flag !! */
+#define lpfc_sli4_sge_type_SHIFT 27
+#define lpfc_sli4_sge_type_MASK 0x0000000F
+#define lpfc_sli4_sge_type_WORD word2
+#define LPFC_SGE_TYPE_DATA 0x0
+#define LPFC_SGE_TYPE_DIF 0x4
+#define LPFC_SGE_TYPE_LSP 0x5
+#define LPFC_SGE_TYPE_PEDIF 0x6
+#define LPFC_SGE_TYPE_PESEED 0x7
+#define LPFC_SGE_TYPE_DISEED 0x8
+#define LPFC_SGE_TYPE_ENC 0x9
+#define LPFC_SGE_TYPE_ATM 0xA
+#define LPFC_SGE_TYPE_SKIP 0xC
+#define lpfc_sli4_sge_last_SHIFT 31 /* Last SEG in the SGL sets it */
#define lpfc_sli4_sge_last_MASK 0x00000001
#define lpfc_sli4_sge_last_WORD word2
uint32_t sge_len;
};
+struct sli4_sge_diseed { /* SLI-4 */
+ uint32_t ref_tag;
+ uint32_t ref_tag_tran;
+
+ uint32_t word2;
+#define lpfc_sli4_sge_dif_apptran_SHIFT 0
+#define lpfc_sli4_sge_dif_apptran_MASK 0x0000FFFF
+#define lpfc_sli4_sge_dif_apptran_WORD word2
+#define lpfc_sli4_sge_dif_af_SHIFT 24
+#define lpfc_sli4_sge_dif_af_MASK 0x00000001
+#define lpfc_sli4_sge_dif_af_WORD word2
+#define lpfc_sli4_sge_dif_na_SHIFT 25
+#define lpfc_sli4_sge_dif_na_MASK 0x00000001
+#define lpfc_sli4_sge_dif_na_WORD word2
+#define lpfc_sli4_sge_dif_hi_SHIFT 26
+#define lpfc_sli4_sge_dif_hi_MASK 0x00000001
+#define lpfc_sli4_sge_dif_hi_WORD word2
+#define lpfc_sli4_sge_dif_type_SHIFT 27
+#define lpfc_sli4_sge_dif_type_MASK 0x0000000F
+#define lpfc_sli4_sge_dif_type_WORD word2
+#define lpfc_sli4_sge_dif_last_SHIFT 31 /* Last SEG in the SGL sets it */
+#define lpfc_sli4_sge_dif_last_MASK 0x00000001
+#define lpfc_sli4_sge_dif_last_WORD word2
+ uint32_t word3;
+#define lpfc_sli4_sge_dif_apptag_SHIFT 0
+#define lpfc_sli4_sge_dif_apptag_MASK 0x0000FFFF
+#define lpfc_sli4_sge_dif_apptag_WORD word3
+#define lpfc_sli4_sge_dif_bs_SHIFT 16
+#define lpfc_sli4_sge_dif_bs_MASK 0x00000007
+#define lpfc_sli4_sge_dif_bs_WORD word3
+#define lpfc_sli4_sge_dif_ai_SHIFT 19
+#define lpfc_sli4_sge_dif_ai_MASK 0x00000001
+#define lpfc_sli4_sge_dif_ai_WORD word3
+#define lpfc_sli4_sge_dif_me_SHIFT 20
+#define lpfc_sli4_sge_dif_me_MASK 0x00000001
+#define lpfc_sli4_sge_dif_me_WORD word3
+#define lpfc_sli4_sge_dif_re_SHIFT 21
+#define lpfc_sli4_sge_dif_re_MASK 0x00000001
+#define lpfc_sli4_sge_dif_re_WORD word3
+#define lpfc_sli4_sge_dif_ce_SHIFT 22
+#define lpfc_sli4_sge_dif_ce_MASK 0x00000001
+#define lpfc_sli4_sge_dif_ce_WORD word3
+#define lpfc_sli4_sge_dif_nr_SHIFT 23
+#define lpfc_sli4_sge_dif_nr_MASK 0x00000001
+#define lpfc_sli4_sge_dif_nr_WORD word3
+#define lpfc_sli4_sge_dif_oprx_SHIFT 24
+#define lpfc_sli4_sge_dif_oprx_MASK 0x0000000F
+#define lpfc_sli4_sge_dif_oprx_WORD word3
+#define lpfc_sli4_sge_dif_optx_SHIFT 28
+#define lpfc_sli4_sge_dif_optx_MASK 0x0000000F
+#define lpfc_sli4_sge_dif_optx_WORD word3
+/* optx and oprx use BG_OP_IN defines in lpfc_hw.h */
+};
+
struct fcf_record {
uint32_t max_rcv_size;
uint32_t fka_adv_period;
@@ -2019,6 +2098,15 @@ struct lpfc_mbx_read_config {
#define lpfc_mbx_rd_conf_extnts_inuse_MASK 0x00000001
#define lpfc_mbx_rd_conf_extnts_inuse_WORD word1
uint32_t word2;
+#define lpfc_mbx_rd_conf_lnk_numb_SHIFT 0
+#define lpfc_mbx_rd_conf_lnk_numb_MASK 0x0000003F
+#define lpfc_mbx_rd_conf_lnk_numb_WORD word2
+#define lpfc_mbx_rd_conf_lnk_type_SHIFT 6
+#define lpfc_mbx_rd_conf_lnk_type_MASK 0x00000003
+#define lpfc_mbx_rd_conf_lnk_type_WORD word2
+#define lpfc_mbx_rd_conf_lnk_ldv_SHIFT 8
+#define lpfc_mbx_rd_conf_lnk_ldv_MASK 0x00000001
+#define lpfc_mbx_rd_conf_lnk_ldv_WORD word2
#define lpfc_mbx_rd_conf_topology_SHIFT 24
#define lpfc_mbx_rd_conf_topology_MASK 0x000000FF
#define lpfc_mbx_rd_conf_topology_WORD word2
@@ -2552,8 +2640,152 @@ struct lpfc_mbx_get_prof_cfg {
} u;
};
+struct lpfc_controller_attribute {
+ uint32_t version_string[8];
+ uint32_t manufacturer_name[8];
+ uint32_t supported_modes;
+ uint32_t word17;
+#define lpfc_cntl_attr_eprom_ver_lo_SHIFT 0
+#define lpfc_cntl_attr_eprom_ver_lo_MASK 0x000000ff
+#define lpfc_cntl_attr_eprom_ver_lo_WORD word17
+#define lpfc_cntl_attr_eprom_ver_hi_SHIFT 8
+#define lpfc_cntl_attr_eprom_ver_hi_MASK 0x000000ff
+#define lpfc_cntl_attr_eprom_ver_hi_WORD word17
+ uint32_t mbx_da_struct_ver;
+ uint32_t ep_fw_da_struct_ver;
+ uint32_t ncsi_ver_str[3];
+ uint32_t dflt_ext_timeout;
+ uint32_t model_number[8];
+ uint32_t description[16];
+ uint32_t serial_number[8];
+ uint32_t ip_ver_str[8];
+ uint32_t fw_ver_str[8];
+ uint32_t bios_ver_str[8];
+ uint32_t redboot_ver_str[8];
+ uint32_t driver_ver_str[8];
+ uint32_t flash_fw_ver_str[8];
+ uint32_t functionality;
+ uint32_t word105;
+#define lpfc_cntl_attr_max_cbd_len_SHIFT 0
+#define lpfc_cntl_attr_max_cbd_len_MASK 0x0000ffff
+#define lpfc_cntl_attr_max_cbd_len_WORD word105
+#define lpfc_cntl_attr_asic_rev_SHIFT 16
+#define lpfc_cntl_attr_asic_rev_MASK 0x000000ff
+#define lpfc_cntl_attr_asic_rev_WORD word105
+#define lpfc_cntl_attr_gen_guid0_SHIFT 24
+#define lpfc_cntl_attr_gen_guid0_MASK 0x000000ff
+#define lpfc_cntl_attr_gen_guid0_WORD word105
+ uint32_t gen_guid1_12[3];
+ uint32_t word109;
+#define lpfc_cntl_attr_gen_guid13_14_SHIFT 0
+#define lpfc_cntl_attr_gen_guid13_14_MASK 0x0000ffff
+#define lpfc_cntl_attr_gen_guid13_14_WORD word109
+#define lpfc_cntl_attr_gen_guid15_SHIFT 16
+#define lpfc_cntl_attr_gen_guid15_MASK 0x000000ff
+#define lpfc_cntl_attr_gen_guid15_WORD word109
+#define lpfc_cntl_attr_hba_port_cnt_SHIFT 24
+#define lpfc_cntl_attr_hba_port_cnt_MASK 0x000000ff
+#define lpfc_cntl_attr_hba_port_cnt_WORD word109
+ uint32_t word110;
+#define lpfc_cntl_attr_dflt_lnk_tmo_SHIFT 0
+#define lpfc_cntl_attr_dflt_lnk_tmo_MASK 0x0000ffff
+#define lpfc_cntl_attr_dflt_lnk_tmo_WORD word110
+#define lpfc_cntl_attr_multi_func_dev_SHIFT 24
+#define lpfc_cntl_attr_multi_func_dev_MASK 0x000000ff
+#define lpfc_cntl_attr_multi_func_dev_WORD word110
+ uint32_t word111;
+#define lpfc_cntl_attr_cache_valid_SHIFT 0
+#define lpfc_cntl_attr_cache_valid_MASK 0x000000ff
+#define lpfc_cntl_attr_cache_valid_WORD word111
+#define lpfc_cntl_attr_hba_status_SHIFT 8
+#define lpfc_cntl_attr_hba_status_MASK 0x000000ff
+#define lpfc_cntl_attr_hba_status_WORD word111
+#define lpfc_cntl_attr_max_domain_SHIFT 16
+#define lpfc_cntl_attr_max_domain_MASK 0x000000ff
+#define lpfc_cntl_attr_max_domain_WORD word111
+#define lpfc_cntl_attr_lnk_numb_SHIFT 24
+#define lpfc_cntl_attr_lnk_numb_MASK 0x0000003f
+#define lpfc_cntl_attr_lnk_numb_WORD word111
+#define lpfc_cntl_attr_lnk_type_SHIFT 30
+#define lpfc_cntl_attr_lnk_type_MASK 0x00000003
+#define lpfc_cntl_attr_lnk_type_WORD word111
+ uint32_t fw_post_status;
+ uint32_t hba_mtu[8];
+ uint32_t word121;
+ uint32_t reserved1[3];
+ uint32_t word125;
+#define lpfc_cntl_attr_pci_vendor_id_SHIFT 0
+#define lpfc_cntl_attr_pci_vendor_id_MASK 0x0000ffff
+#define lpfc_cntl_attr_pci_vendor_id_WORD word125
+#define lpfc_cntl_attr_pci_device_id_SHIFT 16
+#define lpfc_cntl_attr_pci_device_id_MASK 0x0000ffff
+#define lpfc_cntl_attr_pci_device_id_WORD word125
+ uint32_t word126;
+#define lpfc_cntl_attr_pci_subvdr_id_SHIFT 0
+#define lpfc_cntl_attr_pci_subvdr_id_MASK 0x0000ffff
+#define lpfc_cntl_attr_pci_subvdr_id_WORD word126
+#define lpfc_cntl_attr_pci_subsys_id_SHIFT 16
+#define lpfc_cntl_attr_pci_subsys_id_MASK 0x0000ffff
+#define lpfc_cntl_attr_pci_subsys_id_WORD word126
+ uint32_t word127;
+#define lpfc_cntl_attr_pci_bus_num_SHIFT 0
+#define lpfc_cntl_attr_pci_bus_num_MASK 0x000000ff
+#define lpfc_cntl_attr_pci_bus_num_WORD word127
+#define lpfc_cntl_attr_pci_dev_num_SHIFT 8
+#define lpfc_cntl_attr_pci_dev_num_MASK 0x000000ff
+#define lpfc_cntl_attr_pci_dev_num_WORD word127
+#define lpfc_cntl_attr_pci_fnc_num_SHIFT 16
+#define lpfc_cntl_attr_pci_fnc_num_MASK 0x000000ff
+#define lpfc_cntl_attr_pci_fnc_num_WORD word127
+#define lpfc_cntl_attr_inf_type_SHIFT 24
+#define lpfc_cntl_attr_inf_type_MASK 0x000000ff
+#define lpfc_cntl_attr_inf_type_WORD word127
+ uint32_t unique_id[2];
+ uint32_t word130;
+#define lpfc_cntl_attr_num_netfil_SHIFT 0
+#define lpfc_cntl_attr_num_netfil_MASK 0x000000ff
+#define lpfc_cntl_attr_num_netfil_WORD word130
+ uint32_t reserved2[4];
+};
+
+struct lpfc_mbx_get_cntl_attributes {
+ union lpfc_sli4_cfg_shdr cfg_shdr;
+ struct lpfc_controller_attribute cntl_attr;
+};
+
+struct lpfc_mbx_get_port_name {
+ struct mbox_header header;
+ union {
+ struct {
+ uint32_t word4;
+#define lpfc_mbx_get_port_name_lnk_type_SHIFT 0
+#define lpfc_mbx_get_port_name_lnk_type_MASK 0x00000003
+#define lpfc_mbx_get_port_name_lnk_type_WORD word4
+ } request;
+ struct {
+ uint32_t word4;
+#define lpfc_mbx_get_port_name_name0_SHIFT 0
+#define lpfc_mbx_get_port_name_name0_MASK 0x000000FF
+#define lpfc_mbx_get_port_name_name0_WORD word4
+#define lpfc_mbx_get_port_name_name1_SHIFT 8
+#define lpfc_mbx_get_port_name_name1_MASK 0x000000FF
+#define lpfc_mbx_get_port_name_name1_WORD word4
+#define lpfc_mbx_get_port_name_name2_SHIFT 16
+#define lpfc_mbx_get_port_name_name2_MASK 0x000000FF
+#define lpfc_mbx_get_port_name_name2_WORD word4
+#define lpfc_mbx_get_port_name_name3_SHIFT 24
+#define lpfc_mbx_get_port_name_name3_MASK 0x000000FF
+#define lpfc_mbx_get_port_name_name3_WORD word4
+#define LPFC_LINK_NUMBER_0 0
+#define LPFC_LINK_NUMBER_1 1
+#define LPFC_LINK_NUMBER_2 2
+#define LPFC_LINK_NUMBER_3 3
+ } response;
+ } u;
+};
+
/* Mailbox Completion Queue Error Messages */
-#define MB_CQE_STATUS_SUCCESS 0x0
+#define MB_CQE_STATUS_SUCCESS 0x0
#define MB_CQE_STATUS_INSUFFICIENT_PRIVILEGES 0x1
#define MB_CQE_STATUS_INVALID_PARAMETER 0x2
#define MB_CQE_STATUS_INSUFFICIENT_RESOURCES 0x3
@@ -2637,8 +2869,9 @@ struct lpfc_mqe {
struct lpfc_mbx_run_link_diag_test link_diag_test;
struct lpfc_mbx_get_func_cfg get_func_cfg;
struct lpfc_mbx_get_prof_cfg get_prof_cfg;
- struct lpfc_mbx_nop nop;
struct lpfc_mbx_wr_object wr_object;
+ struct lpfc_mbx_get_port_name get_port_name;
+ struct lpfc_mbx_nop nop;
} un;
};
@@ -2855,6 +3088,9 @@ struct wqe_common {
#define wqe_ctxt_tag_MASK 0x0000FFFF
#define wqe_ctxt_tag_WORD word6
uint32_t word7;
+#define wqe_dif_SHIFT 0
+#define wqe_dif_MASK 0x00000003
+#define wqe_dif_WORD word7
#define wqe_ct_SHIFT 2
#define wqe_ct_MASK 0x00000003
#define wqe_ct_WORD word7
@@ -2867,12 +3103,21 @@ struct wqe_common {
#define wqe_class_SHIFT 16
#define wqe_class_MASK 0x00000007
#define wqe_class_WORD word7
+#define wqe_ar_SHIFT 19
+#define wqe_ar_MASK 0x00000001
+#define wqe_ar_WORD word7
+#define wqe_ag_SHIFT wqe_ar_SHIFT
+#define wqe_ag_MASK wqe_ar_MASK
+#define wqe_ag_WORD wqe_ar_WORD
#define wqe_pu_SHIFT 20
#define wqe_pu_MASK 0x00000003
#define wqe_pu_WORD word7
#define wqe_erp_SHIFT 22
#define wqe_erp_MASK 0x00000001
#define wqe_erp_WORD word7
+#define wqe_conf_SHIFT wqe_erp_SHIFT
+#define wqe_conf_MASK wqe_erp_MASK
+#define wqe_conf_WORD wqe_erp_WORD
#define wqe_lnk_SHIFT 23
#define wqe_lnk_MASK 0x00000001
#define wqe_lnk_WORD word7
@@ -2931,6 +3176,9 @@ struct wqe_common {
#define wqe_xc_SHIFT 21
#define wqe_xc_MASK 0x00000001
#define wqe_xc_WORD word10
+#define wqe_sr_SHIFT 22
+#define wqe_sr_MASK 0x00000001
+#define wqe_sr_WORD word10
#define wqe_ccpe_SHIFT 23
#define wqe_ccpe_MASK 0x00000001
#define wqe_ccpe_WORD word10
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index a3c820083c3..55bc4fc7376 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -24,6 +24,7 @@
#include <linux/dma-mapping.h>
#include <linux/idr.h>
#include <linux/interrupt.h>
+#include <linux/module.h>
#include <linux/kthread.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
@@ -58,8 +59,7 @@ spinlock_t _dump_buf_lock;
static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
static int lpfc_post_rcv_buf(struct lpfc_hba *);
-static int lpfc_sli4_queue_create(struct lpfc_hba *);
-static void lpfc_sli4_queue_destroy(struct lpfc_hba *);
+static int lpfc_sli4_queue_verify(struct lpfc_hba *);
static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
static int lpfc_setup_endian_order(struct lpfc_hba *);
static int lpfc_sli4_read_config(struct lpfc_hba *);
@@ -1438,6 +1438,7 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
struct Scsi_Host *shost;
uint32_t if_type;
struct lpfc_register portstat_reg;
+ int rc;
/* If the pci channel is offline, ignore possible errors, since
* we cannot communicate with the pci card anyway.
@@ -1480,16 +1481,24 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
lpfc_sli4_offline_eratt(phba);
return;
}
- if (bf_get(lpfc_sliport_status_rn, &portstat_reg)) {
- /*
- * TODO: Attempt port recovery via a port reset.
- * When fully implemented, the driver should
- * attempt to recover the port here and return.
- * For now, log an error and take the port offline.
- */
+ /*
+ * On error status condition, driver need to wait for port
+ * ready before performing reset.
+ */
+ rc = lpfc_sli4_pdev_status_reg_wait(phba);
+ if (!rc) {
+ /* need reset: attempt for port recovery */
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2887 Port Error: Attempting "
"Port Recovery\n");
+ lpfc_offline_prep(phba);
+ lpfc_offline(phba);
+ lpfc_sli_brdrestart(phba);
+ if (lpfc_online(phba) == 0) {
+ lpfc_unblock_mgmt_io(phba);
+ return;
+ }
+ /* fall through for not able to recover */
}
lpfc_sli4_offline_eratt(phba);
break;
@@ -1724,11 +1733,20 @@ lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
j = 0;
Length -= (3+i);
while(i--) {
- phba->Port[j++] = vpd[index++];
- if (j == 19)
- break;
+ if ((phba->sli_rev == LPFC_SLI_REV4) &&
+ (phba->sli4_hba.pport_name_sta ==
+ LPFC_SLI4_PPNAME_GET)) {
+ j++;
+ index++;
+ } else
+ phba->Port[j++] = vpd[index++];
+ if (j == 19)
+ break;
}
- phba->Port[j] = 0;
+ if ((phba->sli_rev != LPFC_SLI_REV4) ||
+ (phba->sli4_hba.pport_name_sta ==
+ LPFC_SLI4_PPNAME_NON))
+ phba->Port[j] = 0;
continue;
}
else {
@@ -1958,7 +1976,7 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
case PCI_DEVICE_ID_LANCER_FCOE:
case PCI_DEVICE_ID_LANCER_FCOE_VF:
oneConnect = 1;
- m = (typeof(m)){"OCe50100", "PCIe", "FCoE"};
+ m = (typeof(m)){"OCe15100", "PCIe", "FCoE"};
break;
default:
m = (typeof(m)){"Unknown", "", ""};
@@ -2432,17 +2450,19 @@ lpfc_block_mgmt_io(struct lpfc_hba * phba)
uint8_t actcmd = MBX_HEARTBEAT;
unsigned long timeout;
-
+ timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
spin_lock_irqsave(&phba->hbalock, iflag);
phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
- if (phba->sli.mbox_active)
+ if (phba->sli.mbox_active) {
actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
+ /* Determine how long we might wait for the active mailbox
+ * command to be gracefully completed by firmware.
+ */
+ timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
+ phba->sli.mbox_active) * 1000) + jiffies;
+ }
spin_unlock_irqrestore(&phba->hbalock, iflag);
- /* Determine how long we might wait for the active mailbox
- * command to be gracefully completed by firmware.
- */
- timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, actcmd) * 1000) +
- jiffies;
+
/* Wait for the outstnading mailbox command to complete */
while (phba->sli.mbox_active) {
/* Check active mailbox complete status every 2ms */
@@ -3949,7 +3969,7 @@ static int
lpfc_enable_pci_dev(struct lpfc_hba *phba)
{
struct pci_dev *pdev;
- int bars;
+ int bars = 0;
/* Obtain PCI device reference */
if (!phba->pcidev)
@@ -3978,6 +3998,8 @@ lpfc_enable_pci_dev(struct lpfc_hba *phba)
out_disable_device:
pci_disable_device(pdev);
out_error:
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "1401 Failed to enable pci device, bars:x%x\n", bars);
return -ENODEV;
}
@@ -4051,9 +4073,6 @@ lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba)
uint16_t nr_virtfn;
int pos;
- if (!pdev->is_physfn)
- return 0;
-
pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
if (pos == 0)
return 0;
@@ -4474,15 +4493,15 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
}
}
mempool_free(mboxq, phba->mbox_mem_pool);
- /* Create all the SLI4 queues */
- rc = lpfc_sli4_queue_create(phba);
+ /* Verify all the SLI4 queues */
+ rc = lpfc_sli4_queue_verify(phba);
if (rc)
goto out_free_bsmbx;
/* Create driver internal CQE event pool */
rc = lpfc_sli4_cq_event_pool_create(phba);
if (rc)
- goto out_destroy_queue;
+ goto out_free_bsmbx;
/* Initialize and populate the iocb list per host */
rc = lpfc_init_sgl_list(phba);
@@ -4516,14 +4535,21 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
goto out_remove_rpi_hdrs;
}
- phba->sli4_hba.fcp_eq_hdl = kzalloc((sizeof(struct lpfc_fcp_eq_hdl) *
+ /*
+ * The cfg_fcp_eq_count can be zero whenever there is exactly one
+ * interrupt vector. This is not an error
+ */
+ if (phba->cfg_fcp_eq_count) {
+ phba->sli4_hba.fcp_eq_hdl =
+ kzalloc((sizeof(struct lpfc_fcp_eq_hdl) *
phba->cfg_fcp_eq_count), GFP_KERNEL);
- if (!phba->sli4_hba.fcp_eq_hdl) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "2572 Failed allocate memory for fast-path "
- "per-EQ handle array\n");
- rc = -ENOMEM;
- goto out_free_fcf_rr_bmask;
+ if (!phba->sli4_hba.fcp_eq_hdl) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2572 Failed allocate memory for "
+ "fast-path per-EQ handle array\n");
+ rc = -ENOMEM;
+ goto out_free_fcf_rr_bmask;
+ }
}
phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) *
@@ -4567,8 +4593,6 @@ out_free_sgl_list:
lpfc_free_sgl_list(phba);
out_destroy_cq_event_pool:
lpfc_sli4_cq_event_pool_destroy(phba);
-out_destroy_queue:
- lpfc_sli4_queue_destroy(phba);
out_free_bsmbx:
lpfc_destroy_bootstrap_mbox(phba);
out_free_mem:
@@ -4608,9 +4632,6 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
/* Free the SCSI sgl management array */
kfree(phba->sli4_hba.lpfc_scsi_psb_array);
- /* Free the SLI4 queues */
- lpfc_sli4_queue_destroy(phba);
-
/* Free the completion queue EQ event pool */
lpfc_sli4_cq_event_release_all(phba);
lpfc_sli4_cq_event_pool_destroy(phba);
@@ -6139,24 +6160,21 @@ lpfc_setup_endian_order(struct lpfc_hba *phba)
}
/**
- * lpfc_sli4_queue_create - Create all the SLI4 queues
+ * lpfc_sli4_queue_verify - Verify and update EQ and CQ counts
* @phba: pointer to lpfc hba data structure.
*
- * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA
- * operation. For each SLI4 queue type, the parameters such as queue entry
- * count (queue depth) shall be taken from the module parameter. For now,
- * we just use some constant number as place holder.
+ * This routine is invoked to check the user settable queue counts for EQs and
+ * CQs. after this routine is called the counts will be set to valid values that
+ * adhere to the constraints of the system's interrupt vectors and the port's
+ * queue resources.
*
* Return codes
* 0 - successful
* -ENOMEM - No available memory
- * -EIO - The mailbox failed to complete successfully.
**/
static int
-lpfc_sli4_queue_create(struct lpfc_hba *phba)
+lpfc_sli4_queue_verify(struct lpfc_hba *phba)
{
- struct lpfc_queue *qdesc;
- int fcp_eqidx, fcp_cqidx, fcp_wqidx;
int cfg_fcp_wq_count;
int cfg_fcp_eq_count;
@@ -6229,14 +6247,43 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
/* The overall number of event queues used */
phba->sli4_hba.cfg_eqn = phba->cfg_fcp_eq_count + LPFC_SP_EQN_DEF;
- /*
- * Create Event Queues (EQs)
- */
-
/* Get EQ depth from module parameter, fake the default for now */
phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
+ /* Get CQ depth from module parameter, fake the default for now */
+ phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
+ phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
+
+ return 0;
+out_error:
+ return -ENOMEM;
+}
+
+/**
+ * lpfc_sli4_queue_create - Create all the SLI4 queues
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA
+ * operation. For each SLI4 queue type, the parameters such as queue entry
+ * count (queue depth) shall be taken from the module parameter. For now,
+ * we just use some constant number as place holder.
+ *
+ * Return codes
+ * 0 - sucessful
+ * -ENOMEM - No availble memory
+ * -EIO - The mailbox failed to complete successfully.
+ **/
+int
+lpfc_sli4_queue_create(struct lpfc_hba *phba)
+{
+ struct lpfc_queue *qdesc;
+ int fcp_eqidx, fcp_cqidx, fcp_wqidx;
+
+ /*
+ * Create Event Queues (EQs)
+ */
+
/* Create slow path event queue */
qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
phba->sli4_hba.eq_ecount);
@@ -6247,14 +6294,20 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
}
phba->sli4_hba.sp_eq = qdesc;
- /* Create fast-path FCP Event Queue(s) */
- phba->sli4_hba.fp_eq = kzalloc((sizeof(struct lpfc_queue *) *
- phba->cfg_fcp_eq_count), GFP_KERNEL);
- if (!phba->sli4_hba.fp_eq) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "2576 Failed allocate memory for fast-path "
- "EQ record array\n");
- goto out_free_sp_eq;
+ /*
+ * Create fast-path FCP Event Queue(s). The cfg_fcp_eq_count can be
+ * zero whenever there is exactly one interrupt vector. This is not
+ * an error.
+ */
+ if (phba->cfg_fcp_eq_count) {
+ phba->sli4_hba.fp_eq = kzalloc((sizeof(struct lpfc_queue *) *
+ phba->cfg_fcp_eq_count), GFP_KERNEL);
+ if (!phba->sli4_hba.fp_eq) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2576 Failed allocate memory for "
+ "fast-path EQ record array\n");
+ goto out_free_sp_eq;
+ }
}
for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
@@ -6271,10 +6324,6 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
* Create Complete Queues (CQs)
*/
- /* Get CQ depth from module parameter, fake the default for now */
- phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
- phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
-
/* Create slow-path Mailbox Command Complete Queue */
qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
phba->sli4_hba.cq_ecount);
@@ -6296,16 +6345,25 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
phba->sli4_hba.els_cq = qdesc;
- /* Create fast-path FCP Completion Queue(s), one-to-one with EQs */
- phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) *
- phba->cfg_fcp_eq_count), GFP_KERNEL);
+ /*
+ * Create fast-path FCP Completion Queue(s), one-to-one with FCP EQs.
+ * If there are no FCP EQs then create exactly one FCP CQ.
+ */
+ if (phba->cfg_fcp_eq_count)
+ phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) *
+ phba->cfg_fcp_eq_count),
+ GFP_KERNEL);
+ else
+ phba->sli4_hba.fcp_cq = kzalloc(sizeof(struct lpfc_queue *),
+ GFP_KERNEL);
if (!phba->sli4_hba.fcp_cq) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2577 Failed allocate memory for fast-path "
"CQ record array\n");
goto out_free_els_cq;
}
- for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) {
+ fcp_cqidx = 0;
+ do {
qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
phba->sli4_hba.cq_ecount);
if (!qdesc) {
@@ -6315,7 +6373,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
goto out_free_fcp_cq;
}
phba->sli4_hba.fcp_cq[fcp_cqidx] = qdesc;
- }
+ } while (++fcp_cqidx < phba->cfg_fcp_eq_count);
/* Create Mailbox Command Queue */
phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
@@ -6447,7 +6505,7 @@ out_error:
* -ENOMEM - No available memory
* -EIO - The mailbox failed to complete successfully.
**/
-static void
+void
lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
{
int fcp_qidx;
@@ -6723,6 +6781,10 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
"0540 Receive Queue not allocated\n");
goto out_destroy_fcp_wq;
}
+
+ lpfc_rq_adjust_repost(phba, phba->sli4_hba.hdr_rq, LPFC_ELS_HBQ);
+ lpfc_rq_adjust_repost(phba, phba->sli4_hba.dat_rq, LPFC_ELS_HBQ);
+
rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
phba->sli4_hba.els_cq, LPFC_USOL);
if (rc) {
@@ -6731,6 +6793,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
"rc = 0x%x\n", rc);
goto out_destroy_fcp_wq;
}
+
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "
"parent cq-id=%d\n",
@@ -6790,8 +6853,10 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba)
/* Unset ELS complete queue */
lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
/* Unset FCP response complete queue */
- for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
+ fcp_qidx = 0;
+ do {
lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]);
+ } while (++fcp_qidx < phba->cfg_fcp_eq_count);
/* Unset fast-path event queue */
for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]);
@@ -7040,10 +7105,11 @@ lpfc_pci_function_reset(struct lpfc_hba *phba)
* the loop again.
*/
for (rdy_chk = 0; rdy_chk < 1000; rdy_chk++) {
+ msleep(10);
if (lpfc_readl(phba->sli4_hba.u.if_type2.
STATUSregaddr, &reg_data.word0)) {
rc = -ENODEV;
- break;
+ goto out;
}
if (bf_get(lpfc_sliport_status_rdy, &reg_data))
break;
@@ -7051,7 +7117,6 @@ lpfc_pci_function_reset(struct lpfc_hba *phba)
reset_again++;
break;
}
- msleep(10);
}
/*
@@ -7065,11 +7130,6 @@ lpfc_pci_function_reset(struct lpfc_hba *phba)
}
/* Detect any port errors. */
- if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
- &reg_data.word0)) {
- rc = -ENODEV;
- break;
- }
if ((bf_get(lpfc_sliport_status_err, &reg_data)) ||
(rdy_chk >= 1000)) {
phba->work_status[0] = readl(
@@ -7102,6 +7162,7 @@ lpfc_pci_function_reset(struct lpfc_hba *phba)
break;
}
+out:
/* Catch the not-ready port failure after a port reset. */
if (num_resets >= MAX_IF_TYPE_2_RESETS)
rc = -ENODEV;
@@ -7149,12 +7210,13 @@ lpfc_sli4_send_nop_mbox_cmds(struct lpfc_hba *phba, uint32_t cnt)
lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
LPFC_MBOX_OPCODE_NOP, length, LPFC_SLI4_MBX_EMBED);
- mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
for (cmdsent = 0; cmdsent < cnt; cmdsent++) {
if (!phba->sli4_hba.intr_enable)
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
- else
+ else {
+ mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
+ }
if (rc == MBX_TIMEOUT)
break;
/* Check return status */
@@ -7974,6 +8036,7 @@ lpfc_sli4_unset_hba(struct lpfc_hba *phba)
/* Reset SLI4 HBA FCoE function */
lpfc_pci_function_reset(phba);
+ lpfc_sli4_queue_destroy(phba);
return;
}
@@ -8087,6 +8150,7 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
/* Reset SLI4 HBA FCoE function */
lpfc_pci_function_reset(phba);
+ lpfc_sli4_queue_destroy(phba);
/* Stop the SLI4 device port */
phba->pport->work_port_events = 0;
@@ -8120,7 +8184,7 @@ lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
if (!phba->sli4_hba.intr_enable)
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
else {
- mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_PORT_CAPABILITIES);
+ mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
}
@@ -8182,6 +8246,7 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
int rc;
struct lpfc_mqe *mqe = &mboxq->u.mqe;
struct lpfc_pc_sli4_params *sli4_params;
+ uint32_t mbox_tmo;
int length;
struct lpfc_sli4_parameters *mbx_sli4_parameters;
@@ -8200,9 +8265,10 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
length, LPFC_SLI4_MBX_EMBED);
if (!phba->sli4_hba.intr_enable)
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
- else
- rc = lpfc_sli_issue_mbox_wait(phba, mboxq,
- lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG));
+ else {
+ mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
+ rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
+ }
if (unlikely(rc))
return rc;
sli4_params = &phba->sli4_hba.pc_sli4_params;
@@ -8271,11 +8337,8 @@ lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
/* Perform generic PCI device enabling operation */
error = lpfc_enable_pci_dev(phba);
- if (error) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "1401 Failed to enable pci device.\n");
+ if (error)
goto out_free_phba;
- }
/* Set up SLI API function jump table for PCI-device group-0 HBAs */
error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP);
@@ -8322,6 +8385,9 @@ lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
goto out_free_iocb_list;
}
+ /* Get the default values for Model Name and Description */
+ lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
+
/* Create SCSI host to the physical port */
error = lpfc_create_shost(phba);
if (error) {
@@ -8885,16 +8951,17 @@ lpfc_write_firmware(struct lpfc_hba *phba, const struct firmware *fw)
uint32_t offset = 0, temp_offset = 0;
INIT_LIST_HEAD(&dma_buffer_list);
- if ((image->magic_number != LPFC_GROUP_OJECT_MAGIC_NUM) ||
- (bf_get(lpfc_grp_hdr_file_type, image) != LPFC_FILE_TYPE_GROUP) ||
- (bf_get(lpfc_grp_hdr_id, image) != LPFC_FILE_ID_GROUP) ||
- (image->size != fw->size)) {
+ if ((be32_to_cpu(image->magic_number) != LPFC_GROUP_OJECT_MAGIC_NUM) ||
+ (bf_get_be32(lpfc_grp_hdr_file_type, image) !=
+ LPFC_FILE_TYPE_GROUP) ||
+ (bf_get_be32(lpfc_grp_hdr_id, image) != LPFC_FILE_ID_GROUP) ||
+ (be32_to_cpu(image->size) != fw->size)) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3022 Invalid FW image found. "
- "Magic:%d Type:%x ID:%x\n",
- image->magic_number,
- bf_get(lpfc_grp_hdr_file_type, image),
- bf_get(lpfc_grp_hdr_id, image));
+ "Magic:%x Type:%x ID:%x\n",
+ be32_to_cpu(image->magic_number),
+ bf_get_be32(lpfc_grp_hdr_file_type, image),
+ bf_get_be32(lpfc_grp_hdr_id, image));
return -EINVAL;
}
lpfc_decode_firmware_rev(phba, fwrev, 1);
@@ -8924,11 +8991,11 @@ lpfc_write_firmware(struct lpfc_hba *phba, const struct firmware *fw)
while (offset < fw->size) {
temp_offset = offset;
list_for_each_entry(dmabuf, &dma_buffer_list, list) {
- if (offset + SLI4_PAGE_SIZE > fw->size) {
- temp_offset += fw->size - offset;
+ if (temp_offset + SLI4_PAGE_SIZE > fw->size) {
memcpy(dmabuf->virt,
fw->data + temp_offset,
- fw->size - offset);
+ fw->size - temp_offset);
+ temp_offset = fw->size;
break;
}
memcpy(dmabuf->virt, fw->data + temp_offset,
@@ -8984,7 +9051,6 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
uint32_t cfg_mode, intr_mode;
int mcnt;
int adjusted_fcp_eq_count;
- int fcp_qidx;
const struct firmware *fw;
uint8_t file_name[16];
@@ -8995,11 +9061,8 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
/* Perform generic PCI device enabling operation */
error = lpfc_enable_pci_dev(phba);
- if (error) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "1409 Failed to enable pci device.\n");
+ if (error)
goto out_free_phba;
- }
/* Set up SLI API function jump table for PCI-device group-1 HBAs */
error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC);
@@ -9054,6 +9117,9 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
goto out_free_iocb_list;
}
+ /* Get the default values for Model Name and Description */
+ lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
+
/* Create SCSI host to the physical port */
error = lpfc_create_shost(phba);
if (error) {
@@ -9093,16 +9159,6 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
adjusted_fcp_eq_count = phba->sli4_hba.msix_vec_nr - 1;
else
adjusted_fcp_eq_count = phba->cfg_fcp_eq_count;
- /* Free unused EQs */
- for (fcp_qidx = adjusted_fcp_eq_count;
- fcp_qidx < phba->cfg_fcp_eq_count;
- fcp_qidx++) {
- lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]);
- /* do not delete the first fcp_cq */
- if (fcp_qidx)
- lpfc_sli4_queue_free(
- phba->sli4_hba.fcp_cq[fcp_qidx]);
- }
phba->cfg_fcp_eq_count = adjusted_fcp_eq_count;
/* Set up SLI-4 HBA */
if (lpfc_sli4_hba_setup(phba)) {
@@ -9285,6 +9341,7 @@ lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg)
/* Disable interrupt from device */
lpfc_sli4_disable_intr(phba);
+ lpfc_sli4_queue_destroy(phba);
/* Save device state to PCI config space */
pci_save_state(pdev);
@@ -9414,6 +9471,7 @@ lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
/* Disable interrupt and pci device */
lpfc_sli4_disable_intr(phba);
+ lpfc_sli4_queue_destroy(phba);
pci_disable_device(phba->pcidev);
/* Flush all driver's outstanding SCSI I/Os as we are to reset */
diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h
index e3b790e5915..baf53e6c2bd 100644
--- a/drivers/scsi/lpfc/lpfc_logmsg.h
+++ b/drivers/scsi/lpfc/lpfc_logmsg.h
@@ -36,6 +36,7 @@
#define LOG_SECURITY 0x00008000 /* Security events */
#define LOG_EVENT 0x00010000 /* CT,TEMP,DUMP, logging */
#define LOG_FIP 0x00020000 /* FIP events */
+#define LOG_FCP_UNDER 0x00040000 /* FCP underruns errors */
#define LOG_ALL_MSG 0xffffffff /* LOG all messages */
#define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 83450cc5c4d..2ebc7d2540c 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -1598,9 +1598,12 @@ lpfc_mbox_dev_check(struct lpfc_hba *phba)
* Timeout value to be used for the given mailbox command
**/
int
-lpfc_mbox_tmo_val(struct lpfc_hba *phba, int cmd)
+lpfc_mbox_tmo_val(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
{
- switch (cmd) {
+ MAILBOX_t *mbox = &mboxq->u.mb;
+ uint8_t subsys, opcode;
+
+ switch (mbox->mbxCommand) {
case MBX_WRITE_NV: /* 0x03 */
case MBX_UPDATE_CFG: /* 0x1B */
case MBX_DOWN_LOAD: /* 0x1C */
@@ -1610,6 +1613,28 @@ lpfc_mbox_tmo_val(struct lpfc_hba *phba, int cmd)
case MBX_LOAD_EXP_ROM: /* 0x9C */
return LPFC_MBOX_TMO_FLASH_CMD;
case MBX_SLI4_CONFIG: /* 0x9b */
+ subsys = lpfc_sli_config_mbox_subsys_get(phba, mboxq);
+ opcode = lpfc_sli_config_mbox_opcode_get(phba, mboxq);
+ if (subsys == LPFC_MBOX_SUBSYSTEM_COMMON) {
+ switch (opcode) {
+ case LPFC_MBOX_OPCODE_READ_OBJECT:
+ case LPFC_MBOX_OPCODE_WRITE_OBJECT:
+ case LPFC_MBOX_OPCODE_READ_OBJECT_LIST:
+ case LPFC_MBOX_OPCODE_DELETE_OBJECT:
+ case LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG:
+ case LPFC_MBOX_OPCODE_GET_PROFILE_LIST:
+ case LPFC_MBOX_OPCODE_SET_ACT_PROFILE:
+ case LPFC_MBOX_OPCODE_SET_PROFILE_CONFIG:
+ case LPFC_MBOX_OPCODE_GET_FACTORY_PROFILE_CONFIG:
+ return LPFC_MBOX_SLI4_CONFIG_EXTENDED_TMO;
+ }
+ }
+ if (subsys == LPFC_MBOX_SUBSYSTEM_FCOE) {
+ switch (opcode) {
+ case LPFC_MBOX_OPCODE_FCOE_SET_FCLINK_SETTINGS:
+ return LPFC_MBOX_SLI4_CONFIG_EXTENDED_TMO;
+ }
+ }
return LPFC_MBOX_SLI4_CONFIG_TMO;
}
return LPFC_MBOX_TMO;
@@ -1859,7 +1884,7 @@ lpfc_sli4_mbox_rsrc_extent(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
}
/* Complete the initialization for the particular Opcode. */
- opcode = lpfc_sli4_mbox_opcode_get(phba, mbox);
+ opcode = lpfc_sli_config_mbox_opcode_get(phba, mbox);
switch (opcode) {
case LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT:
if (emb == LPFC_SLI4_MBX_EMBED)
@@ -1886,23 +1911,56 @@ lpfc_sli4_mbox_rsrc_extent(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
}
/**
- * lpfc_sli4_mbox_opcode_get - Get the opcode from a sli4 mailbox command
+ * lpfc_sli_config_mbox_subsys_get - Get subsystem from a sli_config mbox cmd
* @phba: pointer to lpfc hba data structure.
- * @mbox: pointer to lpfc mbox command.
+ * @mbox: pointer to lpfc mbox command queue entry.
+ *
+ * This routine gets the subsystem from a SLI4 specific SLI_CONFIG mailbox
+ * command. If the mailbox command is not MBX_SLI4_CONFIG (0x9B) or if the
+ * sub-header is not present, subsystem LPFC_MBOX_SUBSYSTEM_NA (0x0) shall
+ * be returned.
+ **/
+uint8_t
+lpfc_sli_config_mbox_subsys_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
+{
+ struct lpfc_mbx_sli4_config *sli4_cfg;
+ union lpfc_sli4_cfg_shdr *cfg_shdr;
+
+ if (mbox->u.mb.mbxCommand != MBX_SLI4_CONFIG)
+ return LPFC_MBOX_SUBSYSTEM_NA;
+ sli4_cfg = &mbox->u.mqe.un.sli4_config;
+
+ /* For embedded mbox command, get opcode from embedded sub-header*/
+ if (bf_get(lpfc_mbox_hdr_emb, &sli4_cfg->header.cfg_mhdr)) {
+ cfg_shdr = &mbox->u.mqe.un.sli4_config.header.cfg_shdr;
+ return bf_get(lpfc_mbox_hdr_subsystem, &cfg_shdr->request);
+ }
+
+ /* For non-embedded mbox command, get opcode from first dma page */
+ if (unlikely(!mbox->sge_array))
+ return LPFC_MBOX_SUBSYSTEM_NA;
+ cfg_shdr = (union lpfc_sli4_cfg_shdr *)mbox->sge_array->addr[0];
+ return bf_get(lpfc_mbox_hdr_subsystem, &cfg_shdr->request);
+}
+
+/**
+ * lpfc_sli_config_mbox_opcode_get - Get opcode from a sli_config mbox cmd
+ * @phba: pointer to lpfc hba data structure.
+ * @mbox: pointer to lpfc mbox command queue entry.
*
- * This routine gets the opcode from a SLI4 specific mailbox command for
- * sending IOCTL command. If the mailbox command is not MBX_SLI4_CONFIG
- * (0x9B) or if the IOCTL sub-header is not present, opcode 0x0 shall be
+ * This routine gets the opcode from a SLI4 specific SLI_CONFIG mailbox
+ * command. If the mailbox command is not MBX_SLI4_CONFIG (0x9B) or if
+ * the sub-header is not present, opcode LPFC_MBOX_OPCODE_NA (0x0) be
* returned.
**/
uint8_t
-lpfc_sli4_mbox_opcode_get(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
+lpfc_sli_config_mbox_opcode_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
{
struct lpfc_mbx_sli4_config *sli4_cfg;
union lpfc_sli4_cfg_shdr *cfg_shdr;
if (mbox->u.mb.mbxCommand != MBX_SLI4_CONFIG)
- return 0;
+ return LPFC_MBOX_OPCODE_NA;
sli4_cfg = &mbox->u.mqe.un.sli4_config;
/* For embedded mbox command, get opcode from embedded sub-header*/
@@ -1913,7 +1971,7 @@ lpfc_sli4_mbox_opcode_get(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
/* For non-embedded mbox command, get opcode from first dma page */
if (unlikely(!mbox->sge_array))
- return 0;
+ return LPFC_MBOX_OPCODE_NA;
cfg_shdr = (union lpfc_sli4_cfg_shdr *)mbox->sge_array->addr[0];
return bf_get(lpfc_mbox_hdr_opcode, &cfg_shdr->request);
}
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index eadd241eeff..2e1e54e5c3a 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -21,6 +21,7 @@
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
+#include <linux/export.h>
#include <linux/delay.h>
#include <asm/unaligned.h>
@@ -58,6 +59,13 @@ static char *dif_op_str[] = {
"SCSI_PROT_READ_PASS",
"SCSI_PROT_WRITE_PASS",
};
+
+struct scsi_dif_tuple {
+ __be16 guard_tag; /* Checksum */
+ __be16 app_tag; /* Opaque storage */
+ __be32 ref_tag; /* Target LBA or indirect LBA */
+};
+
static void
lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
static void
@@ -1263,6 +1271,174 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
return 0;
}
+static inline unsigned
+lpfc_cmd_blksize(struct scsi_cmnd *sc)
+{
+ return sc->device->sector_size;
+}
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+/*
+ * Given a scsi cmnd, determine the BlockGuard tags to be used with it
+ * @sc: The SCSI command to examine
+ * @reftag: (out) BlockGuard reference tag for transmitted data
+ * @apptag: (out) BlockGuard application tag for transmitted data
+ * @new_guard (in) Value to replace CRC with if needed
+ *
+ * Returns (1) if error injection was performed, (0) otherwise
+ */
+static int
+lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
+ uint32_t *reftag, uint16_t *apptag, uint32_t new_guard)
+{
+ struct scatterlist *sgpe; /* s/g prot entry */
+ struct scatterlist *sgde; /* s/g data entry */
+ struct scsi_dif_tuple *src;
+ uint32_t op = scsi_get_prot_op(sc);
+ uint32_t blksize;
+ uint32_t numblks;
+ sector_t lba;
+ int rc = 0;
+
+ if (op == SCSI_PROT_NORMAL)
+ return 0;
+
+ lba = scsi_get_lba(sc);
+ if (phba->lpfc_injerr_lba != LPFC_INJERR_LBA_OFF) {
+ blksize = lpfc_cmd_blksize(sc);
+ numblks = (scsi_bufflen(sc) + blksize - 1) / blksize;
+
+ /* Make sure we have the right LBA if one is specified */
+ if ((phba->lpfc_injerr_lba < lba) ||
+ (phba->lpfc_injerr_lba >= (lba + numblks)))
+ return 0;
+ }
+
+ sgpe = scsi_prot_sglist(sc);
+ sgde = scsi_sglist(sc);
+
+ /* Should we change the Reference Tag */
+ if (reftag) {
+ /*
+ * If we are SCSI_PROT_WRITE_STRIP, the protection data is
+ * being stripped from the wire, thus it doesn't matter.
+ */
+ if ((op == SCSI_PROT_WRITE_PASS) ||
+ (op == SCSI_PROT_WRITE_INSERT)) {
+ if (phba->lpfc_injerr_wref_cnt) {
+
+ /* DEADBEEF will be the reftag on the wire */
+ *reftag = 0xDEADBEEF;
+ phba->lpfc_injerr_wref_cnt--;
+ phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF;
+ rc = 1;
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+ "9081 BLKGRD: Injecting reftag error: "
+ "write lba x%lx\n", (unsigned long)lba);
+ }
+ } else {
+ if (phba->lpfc_injerr_rref_cnt) {
+ *reftag = 0xDEADBEEF;
+ phba->lpfc_injerr_rref_cnt--;
+ phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF;
+ rc = 1;
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+ "9076 BLKGRD: Injecting reftag error: "
+ "read lba x%lx\n", (unsigned long)lba);
+ }
+ }
+ }
+
+ /* Should we change the Application Tag */
+ if (apptag) {
+ /*
+ * If we are SCSI_PROT_WRITE_STRIP, the protection data is
+ * being stripped from the wire, thus it doesn't matter.
+ */
+ if ((op == SCSI_PROT_WRITE_PASS) ||
+ (op == SCSI_PROT_WRITE_INSERT)) {
+ if (phba->lpfc_injerr_wapp_cnt) {
+
+ /* DEAD will be the apptag on the wire */
+ *apptag = 0xDEAD;
+ phba->lpfc_injerr_wapp_cnt--;
+ phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF;
+ rc = 1;
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+ "9077 BLKGRD: Injecting apptag error: "
+ "write lba x%lx\n", (unsigned long)lba);
+ }
+ } else {
+ if (phba->lpfc_injerr_rapp_cnt) {
+ *apptag = 0xDEAD;
+ phba->lpfc_injerr_rapp_cnt--;
+ phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF;
+ rc = 1;
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+ "9078 BLKGRD: Injecting apptag error: "
+ "read lba x%lx\n", (unsigned long)lba);
+ }
+ }
+ }
+
+ /* Should we change the Guard Tag */
+
+ /*
+ * If we are SCSI_PROT_WRITE_INSERT, the protection data is
+ * being on the wire is being fully generated on the HBA.
+ * The host cannot change it or force an error.
+ */
+ if (((op == SCSI_PROT_WRITE_STRIP) ||
+ (op == SCSI_PROT_WRITE_PASS)) &&
+ phba->lpfc_injerr_wgrd_cnt) {
+ if (sgpe) {
+ src = (struct scsi_dif_tuple *)sg_virt(sgpe);
+ /*
+ * Just inject an error in the first
+ * prot block.
+ */
+ lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+ "9079 BLKGRD: Injecting guard error: "
+ "write lba x%lx oldGuard x%x refTag x%x\n",
+ (unsigned long)lba, src->guard_tag,
+ src->ref_tag);
+
+ src->guard_tag = (uint16_t)new_guard;
+ phba->lpfc_injerr_wgrd_cnt--;
+ phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF;
+ rc = 1;
+
+ } else {
+ blksize = lpfc_cmd_blksize(sc);
+ /*
+ * Jump past the first data block
+ * and inject an error in the
+ * prot data. The prot data is already
+ * embedded after the regular data.
+ */
+ src = (struct scsi_dif_tuple *)
+ (sg_virt(sgde) + blksize);
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+ "9080 BLKGRD: Injecting guard error: "
+ "write lba x%lx oldGuard x%x refTag x%x\n",
+ (unsigned long)lba, src->guard_tag,
+ src->ref_tag);
+
+ src->guard_tag = (uint16_t)new_guard;
+ phba->lpfc_injerr_wgrd_cnt--;
+ phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF;
+ rc = 1;
+ }
+ }
+ return rc;
+}
+#endif
+
/*
* Given a scsi cmnd, determine the BlockGuard opcodes to be used with it
* @sc: The SCSI command to examine
@@ -1341,18 +1517,6 @@ lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
return ret;
}
-struct scsi_dif_tuple {
- __be16 guard_tag; /* Checksum */
- __be16 app_tag; /* Opaque storage */
- __be32 ref_tag; /* Target LBA or indirect LBA */
-};
-
-static inline unsigned
-lpfc_cmd_blksize(struct scsi_cmnd *sc)
-{
- return sc->device->sector_size;
-}
-
/*
* This function sets up buffer list for protection groups of
* type LPFC_PG_TYPE_NO_DIF
@@ -1401,6 +1565,11 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
blksize = lpfc_cmd_blksize(sc);
reftag = scsi_get_lba(sc) & 0xffffffff;
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ /* reftag is the only error we can inject here */
+ lpfc_bg_err_inject(phba, sc, &reftag, 0, 0);
+#endif
+
/* setup PDE5 with what we have */
pde5 = (struct lpfc_pde5 *) bpl;
memset(pde5, 0, sizeof(struct lpfc_pde5));
@@ -1532,6 +1701,11 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
blksize = lpfc_cmd_blksize(sc);
reftag = scsi_get_lba(sc) & 0xffffffff;
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ /* reftag / guard tag are the only errors we can inject here */
+ lpfc_bg_err_inject(phba, sc, &reftag, 0, 0xDEAD);
+#endif
+
split_offset = 0;
do {
/* setup PDE5 with what we have */
@@ -1671,7 +1845,6 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
}
} while (!alldone);
-
out:
return num_bde;
@@ -2075,6 +2248,7 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
else
bf_set(lpfc_sli4_sge_last, sgl, 0);
bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
+ bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
sgl->word2 = cpu_to_le32(sgl->word2);
sgl->sge_len = cpu_to_le32(dma_len);
dma_offset += dma_len;
@@ -2325,8 +2499,9 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
}
lp = (uint32_t *)cmnd->sense_buffer;
- if (!scsi_status && (resp_info & RESID_UNDER))
- logit = LOG_FCP;
+ if (!scsi_status && (resp_info & RESID_UNDER) &&
+ vport->cfg_log_verbose & LOG_FCP_UNDER)
+ logit = LOG_FCP_UNDER;
lpfc_printf_vlog(vport, KERN_WARNING, logit,
"9024 FCP command x%x failed: x%x SNS x%x x%x "
@@ -2342,7 +2517,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
if (resp_info & RESID_UNDER) {
scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId));
- lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_UNDER,
"9025 FCP Read Underrun, expected %d, "
"residual %d Data: x%x x%x x%x\n",
be32_to_cpu(fcpcmd->fcpDl),
@@ -2449,6 +2624,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
struct lpfc_fast_path_event *fast_path_evt;
struct Scsi_Host *shost;
uint32_t queue_depth, scsi_id;
+ uint32_t logit = LOG_FCP;
/* Sanity check on return of outstanding command */
if (!(lpfc_cmd->pCmd))
@@ -2470,16 +2646,22 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
else if (lpfc_cmd->status >= IOSTAT_CNT)
lpfc_cmd->status = IOSTAT_DEFAULT;
-
- lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
- "9030 FCP cmd x%x failed <%d/%d> "
- "status: x%x result: x%x Data: x%x x%x\n",
- cmd->cmnd[0],
- cmd->device ? cmd->device->id : 0xffff,
- cmd->device ? cmd->device->lun : 0xffff,
- lpfc_cmd->status, lpfc_cmd->result,
- pIocbOut->iocb.ulpContext,
- lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
+ if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR
+ && !lpfc_cmd->fcp_rsp->rspStatus3
+ && (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER)
+ && !(phba->cfg_log_verbose & LOG_FCP_UNDER))
+ logit = 0;
+ else
+ logit = LOG_FCP | LOG_FCP_UNDER;
+ lpfc_printf_vlog(vport, KERN_WARNING, logit,
+ "9030 FCP cmd x%x failed <%d/%d> "
+ "status: x%x result: x%x Data: x%x x%x\n",
+ cmd->cmnd[0],
+ cmd->device ? cmd->device->id : 0xffff,
+ cmd->device ? cmd->device->lun : 0xffff,
+ lpfc_cmd->status, lpfc_cmd->result,
+ pIocbOut->iocb.ulpContext,
+ lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
switch (lpfc_cmd->status) {
case IOSTAT_FCP_RSP_ERROR:
@@ -3056,8 +3238,9 @@ lpfc_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
}
ndlp = rdata->pnode;
- if (!(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
- scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
+ if ((scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) &&
+ (!(phba->sli3_options & LPFC_SLI3_BG_ENABLED) ||
+ (phba->sli_rev == LPFC_SLI_REV4))) {
lpfc_printf_log(phba, KERN_ERR, LOG_BG,
"9058 BLKGRD: ERROR: rcvd protected cmd:%02x"
@@ -3691,9 +3874,9 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
fc_host_post_vendor_event(shost, fc_get_event_number(),
sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
- ret = fc_block_scsi_eh(cmnd);
- if (ret)
- return ret;
+ status = fc_block_scsi_eh(cmnd);
+ if (status)
+ return status;
/*
* Since the driver manages a single bus device, reset all
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 8b799f047a9..4d4104f38c9 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -379,10 +379,10 @@ lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
dq->host_index = ((dq->host_index + 1) % dq->entry_count);
/* Ring The Header Receive Queue Doorbell */
- if (!(hq->host_index % LPFC_RQ_POST_BATCH)) {
+ if (!(hq->host_index % hq->entry_repost)) {
doorbell.word0 = 0;
bf_set(lpfc_rq_doorbell_num_posted, &doorbell,
- LPFC_RQ_POST_BATCH);
+ hq->entry_repost);
bf_set(lpfc_rq_doorbell_id, &doorbell, hq->queue_id);
writel(doorbell.word0, hq->phba->sli4_hba.RQDBregaddr);
}
@@ -1864,7 +1864,7 @@ lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
{
if (phba->sli_rev == LPFC_SLI_REV4)
return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
- lpfc_hbq_defs[qno]->entry_count);
+ lpfc_hbq_defs[qno]->entry_count);
else
return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
lpfc_hbq_defs[qno]->init_count);
@@ -2200,10 +2200,13 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
/* Unknown mailbox command compl */
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
"(%d):0323 Unknown Mailbox command "
- "x%x (x%x) Cmpl\n",
+ "x%x (x%x/x%x) Cmpl\n",
pmb->vport ? pmb->vport->vpi : 0,
pmbox->mbxCommand,
- lpfc_sli4_mbox_opcode_get(phba, pmb));
+ lpfc_sli_config_mbox_subsys_get(phba,
+ pmb),
+ lpfc_sli_config_mbox_opcode_get(phba,
+ pmb));
phba->link_state = LPFC_HBA_ERROR;
phba->work_hs = HS_FFER3;
lpfc_handle_eratt(phba);
@@ -2215,17 +2218,19 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
/* Mbox cmd cmpl error - RETRYing */
lpfc_printf_log(phba, KERN_INFO,
- LOG_MBOX | LOG_SLI,
- "(%d):0305 Mbox cmd cmpl "
- "error - RETRYing Data: x%x "
- "(x%x) x%x x%x x%x\n",
- pmb->vport ? pmb->vport->vpi :0,
- pmbox->mbxCommand,
- lpfc_sli4_mbox_opcode_get(phba,
- pmb),
- pmbox->mbxStatus,
- pmbox->un.varWords[0],
- pmb->vport->port_state);
+ LOG_MBOX | LOG_SLI,
+ "(%d):0305 Mbox cmd cmpl "
+ "error - RETRYing Data: x%x "
+ "(x%x/x%x) x%x x%x x%x\n",
+ pmb->vport ? pmb->vport->vpi : 0,
+ pmbox->mbxCommand,
+ lpfc_sli_config_mbox_subsys_get(phba,
+ pmb),
+ lpfc_sli_config_mbox_opcode_get(phba,
+ pmb),
+ pmbox->mbxStatus,
+ pmbox->un.varWords[0],
+ pmb->vport->port_state);
pmbox->mbxStatus = 0;
pmbox->mbxOwner = OWN_HOST;
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
@@ -2236,11 +2241,12 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
/* Mailbox cmd <cmd> Cmpl <cmpl> */
lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
- "(%d):0307 Mailbox cmd x%x (x%x) Cmpl x%p "
+ "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl x%p "
"Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n",
pmb->vport ? pmb->vport->vpi : 0,
pmbox->mbxCommand,
- lpfc_sli4_mbox_opcode_get(phba, pmb),
+ lpfc_sli_config_mbox_subsys_get(phba, pmb),
+ lpfc_sli_config_mbox_opcode_get(phba, pmb),
pmb->mbox_cmpl,
*((uint32_t *) pmbox),
pmbox->un.varWords[0],
@@ -4686,6 +4692,175 @@ lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
}
/**
+ * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine retrieves SLI4 device physical port name this PCI function
+ * is attached to.
+ *
+ * Return codes
+ * 0 - sucessful
+ * otherwise - failed to retrieve physical port name
+ **/
+static int
+lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
+{
+ LPFC_MBOXQ_t *mboxq;
+ struct lpfc_mbx_read_config *rd_config;
+ struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr;
+ struct lpfc_controller_attribute *cntl_attr;
+ struct lpfc_mbx_get_port_name *get_port_name;
+ void *virtaddr = NULL;
+ uint32_t alloclen, reqlen;
+ uint32_t shdr_status, shdr_add_status;
+ union lpfc_sli4_cfg_shdr *shdr;
+ char cport_name = 0;
+ int rc;
+
+ /* We assume nothing at this point */
+ phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
+ phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON;
+
+ mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mboxq)
+ return -ENOMEM;
+
+ /* obtain link type and link number via READ_CONFIG */
+ lpfc_read_config(phba, mboxq);
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+ if (rc == MBX_SUCCESS) {
+ rd_config = &mboxq->u.mqe.un.rd_config;
+ if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) {
+ phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
+ phba->sli4_hba.lnk_info.lnk_tp =
+ bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config);
+ phba->sli4_hba.lnk_info.lnk_no =
+ bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config);
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "3081 lnk_type:%d, lnk_numb:%d\n",
+ phba->sli4_hba.lnk_info.lnk_tp,
+ phba->sli4_hba.lnk_info.lnk_no);
+ goto retrieve_ppname;
+ } else
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "3082 Mailbox (x%x) returned ldv:x0\n",
+ bf_get(lpfc_mqe_command,
+ &mboxq->u.mqe));
+ } else
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "3083 Mailbox (x%x) failed, status:x%x\n",
+ bf_get(lpfc_mqe_command, &mboxq->u.mqe),
+ bf_get(lpfc_mqe_status, &mboxq->u.mqe));
+
+ /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */
+ reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes);
+ alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
+ LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen,
+ LPFC_SLI4_MBX_NEMBED);
+ if (alloclen < reqlen) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "3084 Allocated DMA memory size (%d) is "
+ "less than the requested DMA memory size "
+ "(%d)\n", alloclen, reqlen);
+ rc = -ENOMEM;
+ goto out_free_mboxq;
+ }
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+ virtaddr = mboxq->sge_array->addr[0];
+ mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr;
+ shdr = &mbx_cntl_attr->cfg_shdr;
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+ if (shdr_status || shdr_add_status || rc) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "3085 Mailbox x%x (x%x/x%x) failed, "
+ "rc:x%x, status:x%x, add_status:x%x\n",
+ bf_get(lpfc_mqe_command, &mboxq->u.mqe),
+ lpfc_sli_config_mbox_subsys_get(phba, mboxq),
+ lpfc_sli_config_mbox_opcode_get(phba, mboxq),
+ rc, shdr_status, shdr_add_status);
+ rc = -ENXIO;
+ goto out_free_mboxq;
+ }
+ cntl_attr = &mbx_cntl_attr->cntl_attr;
+ phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
+ phba->sli4_hba.lnk_info.lnk_tp =
+ bf_get(lpfc_cntl_attr_lnk_type, cntl_attr);
+ phba->sli4_hba.lnk_info.lnk_no =
+ bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr);
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "3086 lnk_type:%d, lnk_numb:%d\n",
+ phba->sli4_hba.lnk_info.lnk_tp,
+ phba->sli4_hba.lnk_info.lnk_no);
+
+retrieve_ppname:
+ lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
+ LPFC_MBOX_OPCODE_GET_PORT_NAME,
+ sizeof(struct lpfc_mbx_get_port_name) -
+ sizeof(struct lpfc_sli4_cfg_mhdr),
+ LPFC_SLI4_MBX_EMBED);
+ get_port_name = &mboxq->u.mqe.un.get_port_name;
+ shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr;
+ bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1);
+ bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request,
+ phba->sli4_hba.lnk_info.lnk_tp);
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+ if (shdr_status || shdr_add_status || rc) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "3087 Mailbox x%x (x%x/x%x) failed: "
+ "rc:x%x, status:x%x, add_status:x%x\n",
+ bf_get(lpfc_mqe_command, &mboxq->u.mqe),
+ lpfc_sli_config_mbox_subsys_get(phba, mboxq),
+ lpfc_sli_config_mbox_opcode_get(phba, mboxq),
+ rc, shdr_status, shdr_add_status);
+ rc = -ENXIO;
+ goto out_free_mboxq;
+ }
+ switch (phba->sli4_hba.lnk_info.lnk_no) {
+ case LPFC_LINK_NUMBER_0:
+ cport_name = bf_get(lpfc_mbx_get_port_name_name0,
+ &get_port_name->u.response);
+ phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
+ break;
+ case LPFC_LINK_NUMBER_1:
+ cport_name = bf_get(lpfc_mbx_get_port_name_name1,
+ &get_port_name->u.response);
+ phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
+ break;
+ case LPFC_LINK_NUMBER_2:
+ cport_name = bf_get(lpfc_mbx_get_port_name_name2,
+ &get_port_name->u.response);
+ phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
+ break;
+ case LPFC_LINK_NUMBER_3:
+ cport_name = bf_get(lpfc_mbx_get_port_name_name3,
+ &get_port_name->u.response);
+ phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
+ break;
+ default:
+ break;
+ }
+
+ if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) {
+ phba->Port[0] = cport_name;
+ phba->Port[1] = '\0';
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "3091 SLI get port name: %s\n", phba->Port);
+ }
+
+out_free_mboxq:
+ if (rc != MBX_TIMEOUT) {
+ if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
+ lpfc_sli4_mbox_cmd_free(phba, mboxq);
+ else
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ }
+ return rc;
+}
+
+/**
* lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues
* @phba: pointer to lpfc hba data structure.
*
@@ -4754,7 +4929,7 @@ lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
if (!phba->sli4_hba.intr_enable)
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
else {
- mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
+ mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
}
if (unlikely(rc)) {
@@ -4911,7 +5086,7 @@ lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t *extnt_cnt,
if (!phba->sli4_hba.intr_enable)
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
else {
- mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
+ mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
}
@@ -5194,7 +5369,7 @@ lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
if (!phba->sli4_hba.intr_enable)
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
else {
- mbox_tmo = lpfc_mbox_tmo_val(phba, mbox_tmo);
+ mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
}
if (unlikely(rc)) {
@@ -5619,7 +5794,7 @@ lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type,
if (!phba->sli4_hba.intr_enable)
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
else {
- mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
+ mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
}
@@ -5748,6 +5923,17 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
kfree(vpd);
goto out_free_mbox;
}
+
+ /*
+ * Retrieve sli4 device physical port name, failure of doing it
+ * is considered as non-fatal.
+ */
+ rc = lpfc_sli4_retrieve_pport_name(phba);
+ if (!rc)
+ lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
+ "3080 Successful retrieving SLI4 device "
+ "physical port name: %s.\n", phba->Port);
+
/*
* Evaluate the read rev and vpd data. Populate the driver
* state with the results. If this routine fails, the failure
@@ -5818,9 +6004,13 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
* then turn off the global config parameters to disable the
* feature in the driver. This is not a fatal error.
*/
- if ((phba->cfg_enable_bg) &&
- !(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
- ftr_rsp++;
+ phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
+ if (phba->cfg_enable_bg) {
+ if (bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))
+ phba->sli3_options |= LPFC_SLI3_BG_ENABLED;
+ else
+ ftr_rsp++;
+ }
if (phba->max_vpi && phba->cfg_enable_npiv &&
!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
@@ -5937,12 +6127,20 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
goto out_free_mbox;
}
+ /* Create all the SLI4 queues */
+ rc = lpfc_sli4_queue_create(phba);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3089 Failed to allocate queues\n");
+ rc = -ENODEV;
+ goto out_stop_timers;
+ }
/* Set up all the queues to the device */
rc = lpfc_sli4_queue_setup(phba);
if (unlikely(rc)) {
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
"0381 Error %d during queue setup.\n ", rc);
- goto out_stop_timers;
+ goto out_destroy_queue;
}
/* Arm the CQs and then EQs on device */
@@ -6015,15 +6213,20 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
spin_lock_irq(&phba->hbalock);
phba->link_state = LPFC_LINK_DOWN;
spin_unlock_irq(&phba->hbalock);
- if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK)
+ if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
+ if (rc)
+ goto out_unset_queue;
+ }
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ return rc;
out_unset_queue:
/* Unset all the queues set up in this routine when error out */
- if (rc)
- lpfc_sli4_queue_unset(phba);
+ lpfc_sli4_queue_unset(phba);
+out_destroy_queue:
+ lpfc_sli4_queue_destroy(phba);
out_stop_timers:
- if (rc)
- lpfc_stop_hba_timers(phba);
+ lpfc_stop_hba_timers(phba);
out_free_mbox:
mempool_free(mboxq, phba->mbox_mem_pool);
return rc;
@@ -6318,7 +6521,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
}
/* timeout active mbox command */
mod_timer(&psli->mbox_tmo, (jiffies +
- (HZ * lpfc_mbox_tmo_val(phba, mb->mbxCommand))));
+ (HZ * lpfc_mbox_tmo_val(phba, pmbox))));
}
/* Mailbox cmd <cmd> issue */
@@ -6442,9 +6645,8 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
drvr_flag);
goto out_not_finished;
}
- timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
- mb->mbxCommand) *
- 1000) + jiffies;
+ timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
+ 1000) + jiffies;
i = 0;
/* Wait for command to complete */
while (((word0 & OWN_CHIP) == OWN_CHIP) ||
@@ -6555,21 +6757,21 @@ static int
lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
{
struct lpfc_sli *psli = &phba->sli;
- uint8_t actcmd = MBX_HEARTBEAT;
int rc = 0;
- unsigned long timeout;
+ unsigned long timeout = 0;
/* Mark the asynchronous mailbox command posting as blocked */
spin_lock_irq(&phba->hbalock);
psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
- if (phba->sli.mbox_active)
- actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
- spin_unlock_irq(&phba->hbalock);
/* Determine how long we might wait for the active mailbox
* command to be gracefully completed by firmware.
*/
- timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, actcmd) * 1000) +
- jiffies;
+ if (phba->sli.mbox_active)
+ timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
+ phba->sli.mbox_active) *
+ 1000) + jiffies;
+ spin_unlock_irq(&phba->hbalock);
+
/* Wait for the outstnading mailbox command to complete */
while (phba->sli.mbox_active) {
/* Check active mailbox complete status every 2ms */
@@ -6664,11 +6866,12 @@ lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
spin_unlock_irqrestore(&phba->hbalock, iflag);
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
- "(%d):2532 Mailbox command x%x (x%x) "
+ "(%d):2532 Mailbox command x%x (x%x/x%x) "
"cannot issue Data: x%x x%x\n",
mboxq->vport ? mboxq->vport->vpi : 0,
mboxq->u.mb.mbxCommand,
- lpfc_sli4_mbox_opcode_get(phba, mboxq),
+ lpfc_sli_config_mbox_subsys_get(phba, mboxq),
+ lpfc_sli_config_mbox_opcode_get(phba, mboxq),
psli->sli_flag, MBX_POLL);
return MBXERR_ERROR;
}
@@ -6691,7 +6894,7 @@ lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
dma_address = &phba->sli4_hba.bmbx.dma_address;
writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr);
- timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mbx_cmnd)
+ timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)
* 1000) + jiffies;
do {
bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
@@ -6707,7 +6910,7 @@ lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
/* Post the low mailbox dma address to the port. */
writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr);
- timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mbx_cmnd)
+ timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)
* 1000) + jiffies;
do {
bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
@@ -6746,11 +6949,12 @@ lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
lpfc_sli4_swap_str(phba, mboxq);
lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
- "(%d):0356 Mailbox cmd x%x (x%x) Status x%x "
+ "(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x "
"Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
" x%x x%x CQ: x%x x%x x%x x%x\n",
- mboxq->vport ? mboxq->vport->vpi : 0,
- mbx_cmnd, lpfc_sli4_mbox_opcode_get(phba, mboxq),
+ mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
+ lpfc_sli_config_mbox_subsys_get(phba, mboxq),
+ lpfc_sli_config_mbox_opcode_get(phba, mboxq),
bf_get(lpfc_mqe_status, mb),
mb->un.mb_words[0], mb->un.mb_words[1],
mb->un.mb_words[2], mb->un.mb_words[3],
@@ -6796,11 +7000,12 @@ lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
rc = lpfc_mbox_dev_check(phba);
if (unlikely(rc)) {
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
- "(%d):2544 Mailbox command x%x (x%x) "
+ "(%d):2544 Mailbox command x%x (x%x/x%x) "
"cannot issue Data: x%x x%x\n",
mboxq->vport ? mboxq->vport->vpi : 0,
mboxq->u.mb.mbxCommand,
- lpfc_sli4_mbox_opcode_get(phba, mboxq),
+ lpfc_sli_config_mbox_subsys_get(phba, mboxq),
+ lpfc_sli_config_mbox_opcode_get(phba, mboxq),
psli->sli_flag, flag);
goto out_not_finished;
}
@@ -6814,20 +7019,25 @@ lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
if (rc != MBX_SUCCESS)
lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
"(%d):2541 Mailbox command x%x "
- "(x%x) cannot issue Data: x%x x%x\n",
+ "(x%x/x%x) cannot issue Data: "
+ "x%x x%x\n",
mboxq->vport ? mboxq->vport->vpi : 0,
mboxq->u.mb.mbxCommand,
- lpfc_sli4_mbox_opcode_get(phba, mboxq),
+ lpfc_sli_config_mbox_subsys_get(phba,
+ mboxq),
+ lpfc_sli_config_mbox_opcode_get(phba,
+ mboxq),
psli->sli_flag, flag);
return rc;
} else if (flag == MBX_POLL) {
lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
"(%d):2542 Try to issue mailbox command "
- "x%x (x%x) synchronously ahead of async"
+ "x%x (x%x/x%x) synchronously ahead of async"
"mailbox command queue: x%x x%x\n",
mboxq->vport ? mboxq->vport->vpi : 0,
mboxq->u.mb.mbxCommand,
- lpfc_sli4_mbox_opcode_get(phba, mboxq),
+ lpfc_sli_config_mbox_subsys_get(phba, mboxq),
+ lpfc_sli_config_mbox_opcode_get(phba, mboxq),
psli->sli_flag, flag);
/* Try to block the asynchronous mailbox posting */
rc = lpfc_sli4_async_mbox_block(phba);
@@ -6836,16 +7046,18 @@ lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
if (rc != MBX_SUCCESS)
lpfc_printf_log(phba, KERN_ERR,
- LOG_MBOX | LOG_SLI,
- "(%d):2597 Mailbox command "
- "x%x (x%x) cannot issue "
- "Data: x%x x%x\n",
- mboxq->vport ?
- mboxq->vport->vpi : 0,
- mboxq->u.mb.mbxCommand,
- lpfc_sli4_mbox_opcode_get(phba,
- mboxq),
- psli->sli_flag, flag);
+ LOG_MBOX | LOG_SLI,
+ "(%d):2597 Mailbox command "
+ "x%x (x%x/x%x) cannot issue "
+ "Data: x%x x%x\n",
+ mboxq->vport ?
+ mboxq->vport->vpi : 0,
+ mboxq->u.mb.mbxCommand,
+ lpfc_sli_config_mbox_subsys_get(phba,
+ mboxq),
+ lpfc_sli_config_mbox_opcode_get(phba,
+ mboxq),
+ psli->sli_flag, flag);
/* Unblock the async mailbox posting afterward */
lpfc_sli4_async_mbox_unblock(phba);
}
@@ -6856,11 +7068,12 @@ lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
rc = lpfc_mbox_cmd_check(phba, mboxq);
if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
- "(%d):2543 Mailbox command x%x (x%x) "
+ "(%d):2543 Mailbox command x%x (x%x/x%x) "
"cannot issue Data: x%x x%x\n",
mboxq->vport ? mboxq->vport->vpi : 0,
mboxq->u.mb.mbxCommand,
- lpfc_sli4_mbox_opcode_get(phba, mboxq),
+ lpfc_sli_config_mbox_subsys_get(phba, mboxq),
+ lpfc_sli_config_mbox_opcode_get(phba, mboxq),
psli->sli_flag, flag);
goto out_not_finished;
}
@@ -6872,10 +7085,11 @@ lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
spin_unlock_irqrestore(&phba->hbalock, iflags);
lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
"(%d):0354 Mbox cmd issue - Enqueue Data: "
- "x%x (x%x) x%x x%x x%x\n",
+ "x%x (x%x/x%x) x%x x%x x%x\n",
mboxq->vport ? mboxq->vport->vpi : 0xffffff,
bf_get(lpfc_mqe_command, &mboxq->u.mqe),
- lpfc_sli4_mbox_opcode_get(phba, mboxq),
+ lpfc_sli_config_mbox_subsys_get(phba, mboxq),
+ lpfc_sli_config_mbox_opcode_get(phba, mboxq),
phba->pport->port_state,
psli->sli_flag, MBX_NOWAIT);
/* Wake up worker thread to transport mailbox command from head */
@@ -6952,13 +7166,14 @@ lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
/* Start timer for the mbox_tmo and log some mailbox post messages */
mod_timer(&psli->mbox_tmo, (jiffies +
- (HZ * lpfc_mbox_tmo_val(phba, mbx_cmnd))));
+ (HZ * lpfc_mbox_tmo_val(phba, mboxq))));
lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
- "(%d):0355 Mailbox cmd x%x (x%x) issue Data: "
+ "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: "
"x%x x%x\n",
mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
- lpfc_sli4_mbox_opcode_get(phba, mboxq),
+ lpfc_sli_config_mbox_subsys_get(phba, mboxq),
+ lpfc_sli_config_mbox_opcode_get(phba, mboxq),
phba->pport->port_state, psli->sli_flag);
if (mbx_cmnd != MBX_HEARTBEAT) {
@@ -6982,11 +7197,12 @@ lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe);
if (rc != MBX_SUCCESS) {
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
- "(%d):2533 Mailbox command x%x (x%x) "
+ "(%d):2533 Mailbox command x%x (x%x/x%x) "
"cannot issue Data: x%x x%x\n",
mboxq->vport ? mboxq->vport->vpi : 0,
mboxq->u.mb.mbxCommand,
- lpfc_sli4_mbox_opcode_get(phba, mboxq),
+ lpfc_sli_config_mbox_subsys_get(phba, mboxq),
+ lpfc_sli_config_mbox_opcode_get(phba, mboxq),
psli->sli_flag, MBX_NOWAIT);
goto out_not_finished;
}
@@ -7322,6 +7538,8 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
if (inbound == 1)
offset = 0;
bf_set(lpfc_sli4_sge_offset, sgl, offset);
+ bf_set(lpfc_sli4_sge_type, sgl,
+ LPFC_SGE_TYPE_DATA);
offset += bde.tus.f.bdeSize;
}
sgl->word2 = cpu_to_le32(sgl->word2);
@@ -9359,7 +9577,6 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
/* now issue the command */
retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
-
if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
wait_event_interruptible_timeout(done_q,
pmboxq->mbox_flag & LPFC_MBX_WAKE,
@@ -9403,23 +9620,24 @@ void
lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba)
{
struct lpfc_sli *psli = &phba->sli;
- uint8_t actcmd = MBX_HEARTBEAT;
unsigned long timeout;
+ timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
spin_lock_irq(&phba->hbalock);
psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
spin_unlock_irq(&phba->hbalock);
if (psli->sli_flag & LPFC_SLI_ACTIVE) {
spin_lock_irq(&phba->hbalock);
- if (phba->sli.mbox_active)
- actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
- spin_unlock_irq(&phba->hbalock);
/* Determine how long we might wait for the active mailbox
* command to be gracefully completed by firmware.
*/
- timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, actcmd) *
- 1000) + jiffies;
+ if (phba->sli.mbox_active)
+ timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
+ phba->sli.mbox_active) *
+ 1000) + jiffies;
+ spin_unlock_irq(&phba->hbalock);
+
while (phba->sli.mbox_active) {
/* Check active mailbox complete status every 2ms */
msleep(2);
@@ -10415,12 +10633,17 @@ lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
/* Move mbox data to caller's mailbox region, do endian swapping */
if (pmb->mbox_cmpl && mbox)
lpfc_sli_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe));
- /* Set the mailbox status with SLI4 range 0x4000 */
- mcqe_status = bf_get(lpfc_mcqe_status, mcqe);
- if (mcqe_status != MB_CQE_STATUS_SUCCESS)
- bf_set(lpfc_mqe_status, mqe,
- (LPFC_MBX_ERROR_RANGE | mcqe_status));
+ /*
+ * For mcqe errors, conditionally move a modified error code to
+ * the mbox so that the error will not be missed.
+ */
+ mcqe_status = bf_get(lpfc_mcqe_status, mcqe);
+ if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
+ if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS)
+ bf_set(lpfc_mqe_status, mqe,
+ (LPFC_MBX_ERROR_RANGE | mcqe_status));
+ }
if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT,
@@ -10796,7 +11019,7 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
case LPFC_MCQ:
while ((cqe = lpfc_sli4_cq_get(cq))) {
workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe);
- if (!(++ecount % LPFC_GET_QE_REL_INT))
+ if (!(++ecount % cq->entry_repost))
lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
}
break;
@@ -10808,7 +11031,7 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
else
workposted |= lpfc_sli4_sp_handle_cqe(phba, cq,
cqe);
- if (!(++ecount % LPFC_GET_QE_REL_INT))
+ if (!(++ecount % cq->entry_repost))
lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
}
break;
@@ -11040,7 +11263,7 @@ lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
/* Process all the entries to the CQ */
while ((cqe = lpfc_sli4_cq_get(cq))) {
workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq, cqe);
- if (!(++ecount % LPFC_GET_QE_REL_INT))
+ if (!(++ecount % cq->entry_repost))
lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
}
@@ -11110,6 +11333,8 @@ lpfc_sli4_sp_intr_handler(int irq, void *dev_id)
/* Get to the EQ struct associated with this vector */
speq = phba->sli4_hba.sp_eq;
+ if (unlikely(!speq))
+ return IRQ_NONE;
/* Check device state for handling interrupt */
if (unlikely(lpfc_intr_state_check(phba))) {
@@ -11127,7 +11352,7 @@ lpfc_sli4_sp_intr_handler(int irq, void *dev_id)
*/
while ((eqe = lpfc_sli4_eq_get(speq))) {
lpfc_sli4_sp_handle_eqe(phba, eqe);
- if (!(++ecount % LPFC_GET_QE_REL_INT))
+ if (!(++ecount % speq->entry_repost))
lpfc_sli4_eq_release(speq, LPFC_QUEUE_NOARM);
}
@@ -11187,6 +11412,8 @@ lpfc_sli4_fp_intr_handler(int irq, void *dev_id)
if (unlikely(!phba))
return IRQ_NONE;
+ if (unlikely(!phba->sli4_hba.fp_eq))
+ return IRQ_NONE;
/* Get to the EQ struct associated with this vector */
fpeq = phba->sli4_hba.fp_eq[fcp_eqidx];
@@ -11207,7 +11434,7 @@ lpfc_sli4_fp_intr_handler(int irq, void *dev_id)
*/
while ((eqe = lpfc_sli4_eq_get(fpeq))) {
lpfc_sli4_fp_handle_eqe(phba, eqe, fcp_eqidx);
- if (!(++ecount % LPFC_GET_QE_REL_INT))
+ if (!(++ecount % fpeq->entry_repost))
lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_NOARM);
}
@@ -11359,6 +11586,15 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
}
queue->entry_size = entry_size;
queue->entry_count = entry_count;
+
+ /*
+ * entry_repost is calculated based on the number of entries in the
+ * queue. This works out except for RQs. If buffers are NOT initially
+ * posted for every RQE, entry_repost should be adjusted accordingly.
+ */
+ queue->entry_repost = (entry_count >> 3);
+ if (queue->entry_repost < LPFC_QUEUE_MIN_REPOST)
+ queue->entry_repost = LPFC_QUEUE_MIN_REPOST;
queue->phba = phba;
return queue;
@@ -11924,6 +12160,31 @@ out:
}
/**
+ * lpfc_rq_adjust_repost - Adjust entry_repost for an RQ
+ * @phba: HBA structure that indicates port to create a queue on.
+ * @rq: The queue structure to use for the receive queue.
+ * @qno: The associated HBQ number
+ *
+ *
+ * For SLI4 we need to adjust the RQ repost value based on
+ * the number of buffers that are initially posted to the RQ.
+ */
+void
+lpfc_rq_adjust_repost(struct lpfc_hba *phba, struct lpfc_queue *rq, int qno)
+{
+ uint32_t cnt;
+
+ cnt = lpfc_hbq_defs[qno]->entry_count;
+
+ /* Recalc repost for RQs based on buffers initially posted */
+ cnt = (cnt >> 3);
+ if (cnt < LPFC_QUEUE_MIN_REPOST)
+ cnt = LPFC_QUEUE_MIN_REPOST;
+
+ rq->entry_repost = cnt;
+}
+
+/**
* lpfc_rq_create - Create a Receive Queue on the HBA
* @phba: HBA structure that indicates port to create a queue on.
* @hrq: The queue structure to use to create the header receive queue.
@@ -12489,7 +12750,7 @@ lpfc_sli4_post_sgl(struct lpfc_hba *phba,
if (!phba->sli4_hba.intr_enable)
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
else {
- mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
+ mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
}
/* The IOCTL status is embedded in the mailbox subheader. */
@@ -12704,7 +12965,7 @@ lpfc_sli4_post_els_sgl_list(struct lpfc_hba *phba)
if (!phba->sli4_hba.intr_enable)
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
else {
- mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
+ mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
}
shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
@@ -12867,7 +13128,7 @@ lpfc_sli4_post_els_sgl_list_ext(struct lpfc_hba *phba)
if (!phba->sli4_hba.intr_enable)
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
else {
- mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
+ mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
}
shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
@@ -12991,7 +13252,7 @@ lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, struct list_head *sblist,
if (!phba->sli4_hba.intr_enable)
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
else {
- mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
+ mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
}
shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
@@ -13147,7 +13408,7 @@ lpfc_sli4_post_scsi_sgl_blk_ext(struct lpfc_hba *phba, struct list_head *sblist,
if (!phba->sli4_hba.intr_enable)
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
else {
- mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
+ mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
}
shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
@@ -13296,7 +13557,8 @@ lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
uint32_t did = (fc_hdr->fh_d_id[0] << 16 |
fc_hdr->fh_d_id[1] << 8 |
fc_hdr->fh_d_id[2]);
-
+ if (did == Fabric_DID)
+ return phba->pport;
vports = lpfc_create_vport_work_array(phba);
if (vports != NULL)
for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
@@ -14312,7 +14574,7 @@ lpfc_sli4_init_vpi(struct lpfc_vport *vport)
if (!mboxq)
return -ENOMEM;
lpfc_init_vpi(phba, mboxq, vport->vpi);
- mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_INIT_VPI);
+ mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
if (rc != MBX_SUCCESS) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI,
@@ -15188,7 +15450,7 @@ lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
if (!phba->sli4_hba.intr_enable)
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
else {
- mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
+ mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
}
/* The IOCTL status is embedded in the mailbox subheader. */
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index a0075b0af14..29c13b63e32 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -293,13 +293,11 @@ struct lpfc_sli {
struct lpfc_lnk_stat lnk_stat_offsets;
};
-#define LPFC_MBOX_TMO 30 /* Sec tmo for outstanding mbox
- command */
-#define LPFC_MBOX_SLI4_CONFIG_TMO 60 /* Sec tmo for outstanding mbox
- command */
-#define LPFC_MBOX_TMO_FLASH_CMD 300 /* Sec tmo for outstanding FLASH write
- * or erase cmds. This is especially
- * long because of the potential of
- * multiple flash erases that can be
- * spawned.
- */
+/* Timeout for normal outstanding mbox command (Seconds) */
+#define LPFC_MBOX_TMO 30
+/* Timeout for non-flash-based outstanding sli_config mbox command (Seconds) */
+#define LPFC_MBOX_SLI4_CONFIG_TMO 60
+/* Timeout for flash-based outstanding sli_config mbox command (Seconds) */
+#define LPFC_MBOX_SLI4_CONFIG_EXTENDED_TMO 300
+/* Timeout for other flash-based outstanding mbox command (Seconds) */
+#define LPFC_MBOX_TMO_FLASH_CMD 300
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 19bb87ae859..d5cffd8af34 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -23,7 +23,6 @@
#define LPFC_XRI_EXCH_BUSY_WAIT_T1 10
#define LPFC_XRI_EXCH_BUSY_WAIT_T2 30000
#define LPFC_RELEASE_NOTIFICATION_INTERVAL 32
-#define LPFC_GET_QE_REL_INT 32
#define LPFC_RPI_LOW_WATER_MARK 10
#define LPFC_UNREG_FCF 1
@@ -126,6 +125,8 @@ struct lpfc_queue {
struct list_head child_list;
uint32_t entry_count; /* Number of entries to support on the queue */
uint32_t entry_size; /* Size of each queue entry. */
+ uint32_t entry_repost; /* Count of entries before doorbell is rung */
+#define LPFC_QUEUE_MIN_REPOST 8
uint32_t queue_id; /* Queue ID assigned by the hardware */
uint32_t assoc_qid; /* Queue ID associated with, for CQ/WQ/MQ */
struct list_head page_list;
@@ -388,6 +389,16 @@ struct lpfc_iov {
uint32_t vf_number;
};
+struct lpfc_sli4_lnk_info {
+ uint8_t lnk_dv;
+#define LPFC_LNK_DAT_INVAL 0
+#define LPFC_LNK_DAT_VAL 1
+ uint8_t lnk_tp;
+#define LPFC_LNK_GE 0x0 /* FCoE */
+#define LPFC_LNK_FC 0x1 /* FC */
+ uint8_t lnk_no;
+};
+
/* SLI4 HBA data structure entries */
struct lpfc_sli4_hba {
void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for
@@ -503,6 +514,10 @@ struct lpfc_sli4_hba {
struct list_head sp_els_xri_aborted_work_queue;
struct list_head sp_unsol_work_queue;
struct lpfc_sli4_link link_state;
+ struct lpfc_sli4_lnk_info lnk_info;
+ uint32_t pport_name_sta;
+#define LPFC_SLI4_PPNAME_NON 0
+#define LPFC_SLI4_PPNAME_GET 1
struct lpfc_iov iov;
spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */
spinlock_t abts_sgl_list_lock; /* list of aborted els IOs */
@@ -553,6 +568,7 @@ struct lpfc_rsrc_blks {
* SLI4 specific function prototypes
*/
int lpfc_pci_function_reset(struct lpfc_hba *);
+int lpfc_sli4_pdev_status_reg_wait(struct lpfc_hba *);
int lpfc_sli4_hba_setup(struct lpfc_hba *);
int lpfc_sli4_config(struct lpfc_hba *, struct lpfcMboxq *, uint8_t,
uint8_t, uint32_t, bool);
@@ -576,6 +592,7 @@ uint32_t lpfc_wq_create(struct lpfc_hba *, struct lpfc_queue *,
struct lpfc_queue *, uint32_t);
uint32_t lpfc_rq_create(struct lpfc_hba *, struct lpfc_queue *,
struct lpfc_queue *, struct lpfc_queue *, uint32_t);
+void lpfc_rq_adjust_repost(struct lpfc_hba *, struct lpfc_queue *, int);
uint32_t lpfc_eq_destroy(struct lpfc_hba *, struct lpfc_queue *);
uint32_t lpfc_cq_destroy(struct lpfc_hba *, struct lpfc_queue *);
uint32_t lpfc_mq_destroy(struct lpfc_hba *, struct lpfc_queue *);
@@ -632,5 +649,5 @@ void lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *, LPFC_MBOXQ_t *);
int lpfc_sli4_unregister_fcf(struct lpfc_hba *);
int lpfc_sli4_post_status_check(struct lpfc_hba *);
-uint8_t lpfc_sli4_mbox_opcode_get(struct lpfc_hba *, struct lpfcMboxq *);
-
+uint8_t lpfc_sli_config_mbox_subsys_get(struct lpfc_hba *, LPFC_MBOXQ_t *);
+uint8_t lpfc_sli_config_mbox_opcode_get(struct lpfc_hba *, LPFC_MBOXQ_t *);
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index c1e0ae94d9f..b0630e37f1e 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "8.3.25"
+#define LPFC_DRIVER_VERSION "8.3.27"
#define LPFC_DRIVER_NAME "lpfc"
#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp"
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index 1feb551a57b..cff6ca67415 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -692,13 +692,14 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
/* Indicate free memory when release */
NLP_SET_FREE_REQ(ndlp);
} else {
- if (!NLP_CHK_NODE_ACT(ndlp))
+ if (!NLP_CHK_NODE_ACT(ndlp)) {
ndlp = lpfc_enable_node(vport, ndlp,
NLP_STE_UNUSED_NODE);
if (!ndlp)
goto skip_logo;
+ }
- /* Remove ndlp from vport npld list */
+ /* Remove ndlp from vport list */
lpfc_dequeue_node(vport, ndlp);
spin_lock_irq(&phba->ndlp_lock);
if (!NLP_CHK_FREE_REQ(ndlp))
@@ -711,8 +712,17 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
}
spin_unlock_irq(&phba->ndlp_lock);
}
- if (!(vport->vpi_state & LPFC_VPI_REGISTERED))
+
+ /*
+ * If the vpi is not registered, then a valid FDISC doesn't
+ * exist and there is no need for a ELS LOGO. Just cleanup
+ * the ndlp.
+ */
+ if (!(vport->vpi_state & LPFC_VPI_REGISTERED)) {
+ lpfc_nlp_put(ndlp);
goto skip_logo;
+ }
+
vport->unreg_vpi_cmpl = VPORT_INVAL;
timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
if (!lpfc_issue_els_npiv_logo(vport, ndlp))
diff --git a/drivers/scsi/mac53c94.c b/drivers/scsi/mac53c94.c
index 6c42dff0f4d..e6173376605 100644
--- a/drivers/scsi/mac53c94.c
+++ b/drivers/scsi/mac53c94.c
@@ -17,6 +17,7 @@
#include <linux/stat.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
+#include <linux/module.h>
#include <asm/dbdma.h>
#include <asm/io.h>
#include <asm/pgtable.h>
diff --git a/drivers/scsi/mac_esp.c b/drivers/scsi/mac_esp.c
index 3893337e3dd..590ce1ef201 100644
--- a/drivers/scsi/mac_esp.c
+++ b/drivers/scsi/mac_esp.c
@@ -230,9 +230,6 @@ static void mac_esp_send_pdma_cmd(struct esp *esp, u32 addr, u32 esp_count,
u32 dma_count, int write, u8 cmd)
{
struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp);
- unsigned long flags;
-
- local_irq_save(flags);
mep->error = 0;
@@ -270,8 +267,6 @@ static void mac_esp_send_pdma_cmd(struct esp *esp, u32 addr, u32 esp_count,
esp_count = n;
}
} while (esp_count);
-
- local_irq_restore(flags);
}
/*
@@ -353,8 +348,6 @@ static void mac_esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count,
struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp);
u8 *fifo = esp->regs + ESP_FDATA * 16;
- disable_irq(esp->host->irq);
-
cmd &= ~ESP_CMD_DMA;
mep->error = 0;
@@ -431,8 +424,6 @@ static void mac_esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count,
scsi_esp_cmd(esp, ESP_CMD_TI);
}
}
-
- enable_irq(esp->host->irq);
}
static int mac_esp_irq_pending(struct esp *esp)
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index 8883ca36f93..35bd13879fe 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -71,6 +71,7 @@
*/
#include <linux/slab.h>
+#include <linux/module.h>
#include "megaraid_mbox.h"
static int megaraid_init(void);
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 3948a00d81f..dd94c7d574f 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -33,9 +33,9 @@
/*
* MegaRAID SAS Driver meta data
*/
-#define MEGASAS_VERSION "00.00.05.40-rc1"
-#define MEGASAS_RELDATE "Jul. 26, 2011"
-#define MEGASAS_EXT_VERSION "Tue. Jul. 26 17:00:00 PDT 2011"
+#define MEGASAS_VERSION "00.00.06.12-rc1"
+#define MEGASAS_RELDATE "Oct. 5, 2011"
+#define MEGASAS_EXT_VERSION "Wed. Oct. 5 17:00:00 PDT 2011"
/*
* Device IDs
@@ -48,6 +48,7 @@
#define PCI_DEVICE_ID_LSI_SAS0073SKINNY 0x0073
#define PCI_DEVICE_ID_LSI_SAS0071SKINNY 0x0071
#define PCI_DEVICE_ID_LSI_FUSION 0x005b
+#define PCI_DEVICE_ID_LSI_INVADER 0x005d
/*
* =====================================
@@ -138,6 +139,7 @@
#define MFI_CMD_ABORT 0x06
#define MFI_CMD_SMP 0x07
#define MFI_CMD_STP 0x08
+#define MFI_CMD_INVALID 0xff
#define MR_DCMD_CTRL_GET_INFO 0x01010000
#define MR_DCMD_LD_GET_LIST 0x03010000
@@ -221,6 +223,7 @@ enum MFI_STAT {
MFI_STAT_RESERVATION_IN_PROGRESS = 0x36,
MFI_STAT_I2C_ERRORS_DETECTED = 0x37,
MFI_STAT_PCI_ERRORS_DETECTED = 0x38,
+ MFI_STAT_CONFIG_SEQ_MISMATCH = 0x67,
MFI_STAT_INVALID_STATUS = 0xFF
};
@@ -716,7 +719,7 @@ struct megasas_ctrl_info {
#define MEGASAS_DEFAULT_INIT_ID -1
#define MEGASAS_MAX_LUN 8
#define MEGASAS_MAX_LD 64
-#define MEGASAS_DEFAULT_CMD_PER_LUN 128
+#define MEGASAS_DEFAULT_CMD_PER_LUN 256
#define MEGASAS_MAX_PD (MEGASAS_MAX_PD_CHANNELS * \
MEGASAS_MAX_DEV_PER_CHANNEL)
#define MEGASAS_MAX_LD_IDS (MEGASAS_MAX_LD_CHANNELS * \
@@ -755,6 +758,7 @@ struct megasas_ctrl_info {
#define MEGASAS_INT_CMDS 32
#define MEGASAS_SKINNY_INT_CMDS 5
+#define MEGASAS_MAX_MSIX_QUEUES 16
/*
* FW can accept both 32 and 64 bit SGLs. We want to allocate 32/64 bit
* SGLs based on the size of dma_addr_t
@@ -1276,6 +1280,11 @@ struct megasas_aen_event {
struct megasas_instance *instance;
};
+struct megasas_irq_context {
+ struct megasas_instance *instance;
+ u32 MSIxIndex;
+};
+
struct megasas_instance {
u32 *producer;
@@ -1349,8 +1358,9 @@ struct megasas_instance {
/* Ptr to hba specific information */
void *ctrl_context;
- u8 msi_flag;
- struct msix_entry msixentry;
+ unsigned int msix_vectors;
+ struct msix_entry msixentry[MEGASAS_MAX_MSIX_QUEUES];
+ struct megasas_irq_context irq_context[MEGASAS_MAX_MSIX_QUEUES];
u64 map_id;
struct megasas_cmd *map_update_cmd;
unsigned long bar;
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 776d0198866..29a994f9c4f 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -18,7 +18,7 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* FILE: megaraid_sas_base.c
- * Version : v00.00.05.40-rc1
+ * Version : v00.00.06.12-rc1
*
* Authors: LSI Corporation
* Sreenivas Bagalkote
@@ -84,7 +84,7 @@ MODULE_VERSION(MEGASAS_VERSION);
MODULE_AUTHOR("megaraidlinux@lsi.com");
MODULE_DESCRIPTION("LSI MegaRAID SAS Driver");
-int megasas_transition_to_ready(struct megasas_instance *instance);
+int megasas_transition_to_ready(struct megasas_instance *instance, int ocr);
static int megasas_get_pd_list(struct megasas_instance *instance);
static int megasas_issue_init_mfi(struct megasas_instance *instance);
static int megasas_register_aen(struct megasas_instance *instance,
@@ -114,6 +114,8 @@ static struct pci_device_id megasas_pci_table[] = {
/* xscale IOP */
{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FUSION)},
/* Fusion */
+ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INVADER)},
+ /* Invader */
{}
};
@@ -213,6 +215,10 @@ megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
cmd->scmd = NULL;
cmd->frame_count = 0;
+ if ((instance->pdev->device != PCI_DEVICE_ID_LSI_FUSION) &&
+ (instance->pdev->device != PCI_DEVICE_ID_LSI_INVADER) &&
+ (reset_devices))
+ cmd->frame->hdr.cmd = MFI_CMD_INVALID;
list_add_tail(&cmd->list, &instance->cmd_pool);
spin_unlock_irqrestore(&instance->cmd_pool_lock, flags);
@@ -1583,7 +1589,8 @@ void megaraid_sas_kill_hba(struct megasas_instance *instance)
{
if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
(instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
- (instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION)) {
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER)) {
writel(MFI_STOP_ADP, &instance->reg_set->doorbell);
} else {
writel(MFI_STOP_ADP, &instance->reg_set->inbound_doorbell);
@@ -1907,7 +1914,6 @@ static int megasas_generic_reset(struct scsi_cmnd *scmd)
static enum
blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
{
- struct megasas_cmd *cmd = (struct megasas_cmd *)scmd->SCp.ptr;
struct megasas_instance *instance;
unsigned long flags;
@@ -1916,7 +1922,7 @@ blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
return BLK_EH_NOT_HANDLED;
}
- instance = cmd->instance;
+ instance = (struct megasas_instance *)scmd->device->host->hostdata;
if (!(instance->flag & MEGASAS_FW_BUSY)) {
/* FW is busy, throttle IO */
spin_lock_irqsave(instance->host->host_lock, flags);
@@ -1957,7 +1963,8 @@ static int megasas_reset_bus_host(struct scsi_cmnd *scmd)
/*
* First wait for all commands to complete
*/
- if (instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION)
+ if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER))
ret = megasas_reset_fusion(scmd->device->host);
else
ret = megasas_generic_reset(scmd);
@@ -2161,7 +2168,16 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
cmd->scmd->SCp.ptr = NULL;
switch (hdr->cmd) {
-
+ case MFI_CMD_INVALID:
+ /* Some older 1068 controller FW may keep a pended
+ MR_DCMD_CTRL_EVENT_GET_INFO left over from the main kernel
+ when booting the kdump kernel. Ignore this command to
+ prevent a kernel panic on shutdown of the kdump kernel. */
+ printk(KERN_WARNING "megaraid_sas: MFI_CMD_INVALID command "
+ "completed.\n");
+ printk(KERN_WARNING "megaraid_sas: If you have a controller "
+ "other than PERC5, please upgrade your firmware.\n");
+ break;
case MFI_CMD_PD_SCSI_IO:
case MFI_CMD_LD_SCSI_IO:
@@ -2477,7 +2493,7 @@ process_fw_state_change_wq(struct work_struct *work)
msleep(1000);
}
- if (megasas_transition_to_ready(instance)) {
+ if (megasas_transition_to_ready(instance, 1)) {
printk(KERN_NOTICE "megaraid_sas:adapter not ready\n");
megaraid_sas_kill_hba(instance);
@@ -2532,7 +2548,7 @@ megasas_deplete_reply_queue(struct megasas_instance *instance,
instance->reg_set)
) == 0) {
/* Hardware may not set outbound_intr_status in MSI-X mode */
- if (!instance->msi_flag)
+ if (!instance->msix_vectors)
return IRQ_NONE;
}
@@ -2590,16 +2606,14 @@ megasas_deplete_reply_queue(struct megasas_instance *instance,
*/
static irqreturn_t megasas_isr(int irq, void *devp)
{
- struct megasas_instance *instance;
+ struct megasas_irq_context *irq_context = devp;
+ struct megasas_instance *instance = irq_context->instance;
unsigned long flags;
irqreturn_t rc;
- if (atomic_read(
- &(((struct megasas_instance *)devp)->fw_reset_no_pci_access)))
+ if (atomic_read(&instance->fw_reset_no_pci_access))
return IRQ_HANDLED;
- instance = (struct megasas_instance *)devp;
-
spin_lock_irqsave(&instance->hba_lock, flags);
rc = megasas_deplete_reply_queue(instance, DID_OK);
spin_unlock_irqrestore(&instance->hba_lock, flags);
@@ -2617,7 +2631,7 @@ static irqreturn_t megasas_isr(int irq, void *devp)
* has to wait for the ready state.
*/
int
-megasas_transition_to_ready(struct megasas_instance* instance)
+megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
{
int i;
u8 max_wait;
@@ -2639,11 +2653,13 @@ megasas_transition_to_ready(struct megasas_instance* instance)
switch (fw_state) {
case MFI_STATE_FAULT:
-
printk(KERN_DEBUG "megasas: FW in FAULT state!!\n");
- max_wait = MEGASAS_RESET_WAIT_TIME;
- cur_state = MFI_STATE_FAULT;
- break;
+ if (ocr) {
+ max_wait = MEGASAS_RESET_WAIT_TIME;
+ cur_state = MFI_STATE_FAULT;
+ break;
+ } else
+ return -ENODEV;
case MFI_STATE_WAIT_HANDSHAKE:
/*
@@ -2654,7 +2670,9 @@ megasas_transition_to_ready(struct megasas_instance* instance)
(instance->pdev->device ==
PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
(instance->pdev->device ==
- PCI_DEVICE_ID_LSI_FUSION)) {
+ PCI_DEVICE_ID_LSI_FUSION) ||
+ (instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_INVADER)) {
writel(
MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG,
&instance->reg_set->doorbell);
@@ -2674,7 +2692,9 @@ megasas_transition_to_ready(struct megasas_instance* instance)
(instance->pdev->device ==
PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
(instance->pdev->device ==
- PCI_DEVICE_ID_LSI_FUSION)) {
+ PCI_DEVICE_ID_LSI_FUSION) ||
+ (instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_INVADER)) {
writel(MFI_INIT_HOTPLUG,
&instance->reg_set->doorbell);
} else
@@ -2695,11 +2715,15 @@ megasas_transition_to_ready(struct megasas_instance* instance)
(instance->pdev->device ==
PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
(instance->pdev->device
- == PCI_DEVICE_ID_LSI_FUSION)) {
+ == PCI_DEVICE_ID_LSI_FUSION) ||
+ (instance->pdev->device
+ == PCI_DEVICE_ID_LSI_INVADER)) {
writel(MFI_RESET_FLAGS,
&instance->reg_set->doorbell);
- if (instance->pdev->device ==
- PCI_DEVICE_ID_LSI_FUSION) {
+ if ((instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_FUSION) ||
+ (instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_INVADER)) {
for (i = 0; i < (10 * 1000); i += 20) {
if (readl(
&instance->
@@ -2922,6 +2946,10 @@ static int megasas_create_frame_pool(struct megasas_instance *instance)
memset(cmd->frame, 0, total_sz);
cmd->frame->io.context = cmd->index;
cmd->frame->io.pad_0 = 0;
+ if ((instance->pdev->device != PCI_DEVICE_ID_LSI_FUSION) &&
+ (instance->pdev->device != PCI_DEVICE_ID_LSI_INVADER) &&
+ (reset_devices))
+ cmd->frame->hdr.cmd = MFI_CMD_INVALID;
}
return 0;
@@ -3474,6 +3502,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
struct megasas_register_set __iomem *reg_set;
struct megasas_ctrl_info *ctrl_info;
unsigned long bar_list;
+ int i;
/* Find first memory bar */
bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM);
@@ -3496,6 +3525,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
switch (instance->pdev->device) {
case PCI_DEVICE_ID_LSI_FUSION:
+ case PCI_DEVICE_ID_LSI_INVADER:
instance->instancet = &megasas_instance_template_fusion;
break;
case PCI_DEVICE_ID_LSI_SAS1078R:
@@ -3520,15 +3550,39 @@ static int megasas_init_fw(struct megasas_instance *instance)
/*
* We expect the FW state to be READY
*/
- if (megasas_transition_to_ready(instance))
+ if (megasas_transition_to_ready(instance, 0))
goto fail_ready_state;
/* Check if MSI-X is supported while in ready state */
msix_enable = (instance->instancet->read_fw_status_reg(reg_set) &
0x4000000) >> 0x1a;
- if (msix_enable && !msix_disable &&
- !pci_enable_msix(instance->pdev, &instance->msixentry, 1))
- instance->msi_flag = 1;
+ if (msix_enable && !msix_disable) {
+ /* Check max MSI-X vectors */
+ if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER)) {
+ instance->msix_vectors = (readl(&instance->reg_set->
+ outbound_scratch_pad_2
+ ) & 0x1F) + 1;
+ } else
+ instance->msix_vectors = 1;
+ /* Don't bother allocating more MSI-X vectors than cpus */
+ instance->msix_vectors = min(instance->msix_vectors,
+ (unsigned int)num_online_cpus());
+ for (i = 0; i < instance->msix_vectors; i++)
+ instance->msixentry[i].entry = i;
+ i = pci_enable_msix(instance->pdev, instance->msixentry,
+ instance->msix_vectors);
+ if (i >= 0) {
+ if (i) {
+ if (!pci_enable_msix(instance->pdev,
+ instance->msixentry, i))
+ instance->msix_vectors = i;
+ else
+ instance->msix_vectors = 0;
+ }
+ } else
+ instance->msix_vectors = 0;
+ }
/* Get operational params, sge flags, send init cmd to controller */
if (instance->instancet->init_adapter(instance))
@@ -3892,7 +3946,8 @@ static int megasas_io_attach(struct megasas_instance *instance)
host->max_cmd_len = 16;
/* Fusion only supports host reset */
- if (instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) {
+ if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER)) {
host->hostt->eh_device_reset_handler = NULL;
host->hostt->eh_bus_reset_handler = NULL;
}
@@ -3942,7 +3997,7 @@ fail_set_dma_mask:
static int __devinit
megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
- int rval, pos;
+ int rval, pos, i, j;
struct Scsi_Host *host;
struct megasas_instance *instance;
u16 control = 0;
@@ -4002,6 +4057,7 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
switch (instance->pdev->device) {
case PCI_DEVICE_ID_LSI_FUSION:
+ case PCI_DEVICE_ID_LSI_INVADER:
{
struct fusion_context *fusion;
@@ -4094,7 +4150,8 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
instance->last_time = 0;
instance->disableOnlineCtrlReset = 1;
- if (instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION)
+ if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER))
INIT_WORK(&instance->work_init, megasas_fusion_ocr_wq);
else
INIT_WORK(&instance->work_init, process_fw_state_change_wq);
@@ -4108,11 +4165,32 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
/*
* Register IRQ
*/
- if (request_irq(instance->msi_flag ? instance->msixentry.vector :
- pdev->irq, instance->instancet->service_isr,
- IRQF_SHARED, "megasas", instance)) {
- printk(KERN_DEBUG "megasas: Failed to register IRQ\n");
- goto fail_irq;
+ if (instance->msix_vectors) {
+ for (i = 0 ; i < instance->msix_vectors; i++) {
+ instance->irq_context[i].instance = instance;
+ instance->irq_context[i].MSIxIndex = i;
+ if (request_irq(instance->msixentry[i].vector,
+ instance->instancet->service_isr, 0,
+ "megasas",
+ &instance->irq_context[i])) {
+ printk(KERN_DEBUG "megasas: Failed to "
+ "register IRQ for vector %d.\n", i);
+ for (j = 0 ; j < i ; j++)
+ free_irq(
+ instance->msixentry[j].vector,
+ &instance->irq_context[j]);
+ goto fail_irq;
+ }
+ }
+ } else {
+ instance->irq_context[0].instance = instance;
+ instance->irq_context[0].MSIxIndex = 0;
+ if (request_irq(pdev->irq, instance->instancet->service_isr,
+ IRQF_SHARED, "megasas",
+ &instance->irq_context[0])) {
+ printk(KERN_DEBUG "megasas: Failed to register IRQ\n");
+ goto fail_irq;
+ }
}
instance->instancet->enable_intr(instance->reg_set);
@@ -4156,15 +4234,20 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
pci_set_drvdata(pdev, NULL);
instance->instancet->disable_intr(instance->reg_set);
- free_irq(instance->msi_flag ? instance->msixentry.vector :
- instance->pdev->irq, instance);
+ if (instance->msix_vectors)
+ for (i = 0 ; i < instance->msix_vectors; i++)
+ free_irq(instance->msixentry[i].vector,
+ &instance->irq_context[i]);
+ else
+ free_irq(instance->pdev->irq, &instance->irq_context[0]);
fail_irq:
- if (instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION)
+ if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER))
megasas_release_fusion(instance);
else
megasas_release_mfi(instance);
fail_init_mfi:
- if (instance->msi_flag)
+ if (instance->msix_vectors)
pci_disable_msix(instance->pdev);
fail_alloc_dma_buf:
if (instance->evt_detail)
@@ -4280,6 +4363,7 @@ megasas_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct Scsi_Host *host;
struct megasas_instance *instance;
+ int i;
instance = pci_get_drvdata(pdev);
host = instance->host;
@@ -4303,9 +4387,14 @@ megasas_suspend(struct pci_dev *pdev, pm_message_t state)
pci_set_drvdata(instance->pdev, instance);
instance->instancet->disable_intr(instance->reg_set);
- free_irq(instance->msi_flag ? instance->msixentry.vector :
- instance->pdev->irq, instance);
- if (instance->msi_flag)
+
+ if (instance->msix_vectors)
+ for (i = 0 ; i < instance->msix_vectors; i++)
+ free_irq(instance->msixentry[i].vector,
+ &instance->irq_context[i]);
+ else
+ free_irq(instance->pdev->irq, &instance->irq_context[0]);
+ if (instance->msix_vectors)
pci_disable_msix(instance->pdev);
pci_save_state(pdev);
@@ -4323,7 +4412,7 @@ megasas_suspend(struct pci_dev *pdev, pm_message_t state)
static int
megasas_resume(struct pci_dev *pdev)
{
- int rval;
+ int rval, i, j;
struct Scsi_Host *host;
struct megasas_instance *instance;
@@ -4357,15 +4446,17 @@ megasas_resume(struct pci_dev *pdev)
/*
* We expect the FW state to be READY
*/
- if (megasas_transition_to_ready(instance))
+ if (megasas_transition_to_ready(instance, 0))
goto fail_ready_state;
/* Now re-enable MSI-X */
- if (instance->msi_flag)
- pci_enable_msix(instance->pdev, &instance->msixentry, 1);
+ if (instance->msix_vectors)
+ pci_enable_msix(instance->pdev, instance->msixentry,
+ instance->msix_vectors);
switch (instance->pdev->device) {
case PCI_DEVICE_ID_LSI_FUSION:
+ case PCI_DEVICE_ID_LSI_INVADER:
{
megasas_reset_reply_desc(instance);
if (megasas_ioc_init_fusion(instance)) {
@@ -4391,11 +4482,32 @@ megasas_resume(struct pci_dev *pdev)
/*
* Register IRQ
*/
- if (request_irq(instance->msi_flag ? instance->msixentry.vector :
- pdev->irq, instance->instancet->service_isr,
- IRQF_SHARED, "megasas", instance)) {
- printk(KERN_ERR "megasas: Failed to register IRQ\n");
- goto fail_irq;
+ if (instance->msix_vectors) {
+ for (i = 0 ; i < instance->msix_vectors; i++) {
+ instance->irq_context[i].instance = instance;
+ instance->irq_context[i].MSIxIndex = i;
+ if (request_irq(instance->msixentry[i].vector,
+ instance->instancet->service_isr, 0,
+ "megasas",
+ &instance->irq_context[i])) {
+ printk(KERN_DEBUG "megasas: Failed to "
+ "register IRQ for vector %d.\n", i);
+ for (j = 0 ; j < i ; j++)
+ free_irq(
+ instance->msixentry[j].vector,
+ &instance->irq_context[j]);
+ goto fail_irq;
+ }
+ }
+ } else {
+ instance->irq_context[0].instance = instance;
+ instance->irq_context[0].MSIxIndex = 0;
+ if (request_irq(pdev->irq, instance->instancet->service_isr,
+ IRQF_SHARED, "megasas",
+ &instance->irq_context[0])) {
+ printk(KERN_DEBUG "megasas: Failed to register IRQ\n");
+ goto fail_irq;
+ }
}
instance->instancet->enable_intr(instance->reg_set);
@@ -4492,13 +4604,18 @@ static void __devexit megasas_detach_one(struct pci_dev *pdev)
instance->instancet->disable_intr(instance->reg_set);
- free_irq(instance->msi_flag ? instance->msixentry.vector :
- instance->pdev->irq, instance);
- if (instance->msi_flag)
+ if (instance->msix_vectors)
+ for (i = 0 ; i < instance->msix_vectors; i++)
+ free_irq(instance->msixentry[i].vector,
+ &instance->irq_context[i]);
+ else
+ free_irq(instance->pdev->irq, &instance->irq_context[0]);
+ if (instance->msix_vectors)
pci_disable_msix(instance->pdev);
switch (instance->pdev->device) {
case PCI_DEVICE_ID_LSI_FUSION:
+ case PCI_DEVICE_ID_LSI_INVADER:
megasas_release_fusion(instance);
for (i = 0; i < 2 ; i++)
if (fusion->ld_map[i])
@@ -4539,14 +4656,20 @@ static void __devexit megasas_detach_one(struct pci_dev *pdev)
*/
static void megasas_shutdown(struct pci_dev *pdev)
{
+ int i;
struct megasas_instance *instance = pci_get_drvdata(pdev);
+
instance->unload = 1;
megasas_flush_cache(instance);
megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
instance->instancet->disable_intr(instance->reg_set);
- free_irq(instance->msi_flag ? instance->msixentry.vector :
- instance->pdev->irq, instance);
- if (instance->msi_flag)
+ if (instance->msix_vectors)
+ for (i = 0 ; i < instance->msix_vectors; i++)
+ free_irq(instance->msixentry[i].vector,
+ &instance->irq_context[i]);
+ else
+ free_irq(instance->pdev->irq, &instance->irq_context[0]);
+ if (instance->msix_vectors)
pci_disable_msix(instance->pdev);
}
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
index 5a5af1fe758..5255dd688ac 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -52,6 +52,7 @@
#include <scsi/scsi_host.h>
#include "megaraid_sas_fusion.h"
+#include "megaraid_sas.h"
#include <asm/div64.h>
#define ABS_DIFF(a, b) (((a) > (b)) ? ((a) - (b)) : ((b) - (a)))
@@ -226,8 +227,9 @@ u32 MR_GetSpanBlock(u32 ld, u64 row, u64 *span_blk,
* span - Span number
* block - Absolute Block number in the physical disk
*/
-u8 MR_GetPhyParams(u32 ld, u64 stripRow, u16 stripRef, u64 *pdBlock,
- u16 *pDevHandle, struct RAID_CONTEXT *pRAID_Context,
+u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
+ u16 stripRef, u64 *pdBlock, u16 *pDevHandle,
+ struct RAID_CONTEXT *pRAID_Context,
struct MR_FW_RAID_MAP_ALL *map)
{
struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
@@ -279,7 +281,8 @@ u8 MR_GetPhyParams(u32 ld, u64 stripRow, u16 stripRef, u64 *pdBlock,
*pDevHandle = MR_PdDevHandleGet(pd, map);
else {
*pDevHandle = MR_PD_INVALID; /* set dev handle as invalid. */
- if (raid->level >= 5)
+ if ((raid->level >= 5) &&
+ (instance->pdev->device != PCI_DEVICE_ID_LSI_INVADER))
pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE;
else if (raid->level == 1) {
/* Get alternate Pd. */
@@ -306,7 +309,8 @@ u8 MR_GetPhyParams(u32 ld, u64 stripRow, u16 stripRef, u64 *pdBlock,
* This function will return 0 if region lock was acquired OR return num strips
*/
u8
-MR_BuildRaidContext(struct IO_REQUEST_INFO *io_info,
+MR_BuildRaidContext(struct megasas_instance *instance,
+ struct IO_REQUEST_INFO *io_info,
struct RAID_CONTEXT *pRAID_Context,
struct MR_FW_RAID_MAP_ALL *map)
{
@@ -394,8 +398,12 @@ MR_BuildRaidContext(struct IO_REQUEST_INFO *io_info,
}
pRAID_Context->timeoutValue = map->raidMap.fpPdIoTimeoutSec;
- pRAID_Context->regLockFlags = (isRead) ? REGION_TYPE_SHARED_READ :
- raid->regTypeReqOnWrite;
+ if (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER)
+ pRAID_Context->regLockFlags = (isRead) ?
+ raid->regTypeReqOnRead : raid->regTypeReqOnWrite;
+ else
+ pRAID_Context->regLockFlags = (isRead) ?
+ REGION_TYPE_SHARED_READ : raid->regTypeReqOnWrite;
pRAID_Context->VirtualDiskTgtId = raid->targetId;
pRAID_Context->regLockRowLBA = regStart;
pRAID_Context->regLockLength = regSize;
@@ -404,7 +412,8 @@ MR_BuildRaidContext(struct IO_REQUEST_INFO *io_info,
/*Get Phy Params only if FP capable, or else leave it to MR firmware
to do the calculation.*/
if (io_info->fpOkForIo) {
- retval = MR_GetPhyParams(ld, start_strip, ref_in_start_stripe,
+ retval = MR_GetPhyParams(instance, ld, start_strip,
+ ref_in_start_stripe,
&io_info->pdBlock,
&io_info->devHandle, pRAID_Context,
map);
@@ -415,7 +424,8 @@ MR_BuildRaidContext(struct IO_REQUEST_INFO *io_info,
} else if (isRead) {
uint stripIdx;
for (stripIdx = 0; stripIdx < num_strips; stripIdx++) {
- if (!MR_GetPhyParams(ld, start_strip + stripIdx,
+ if (!MR_GetPhyParams(instance, ld,
+ start_strip + stripIdx,
ref_in_start_stripe,
&io_info->pdBlock,
&io_info->devHandle,
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index f13e7abd345..bfd87fab39a 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -74,7 +74,8 @@ megasas_issue_polled(struct megasas_instance *instance,
struct megasas_cmd *cmd);
u8
-MR_BuildRaidContext(struct IO_REQUEST_INFO *io_info,
+MR_BuildRaidContext(struct megasas_instance *instance,
+ struct IO_REQUEST_INFO *io_info,
struct RAID_CONTEXT *pRAID_Context,
struct MR_FW_RAID_MAP_ALL *map);
u16 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_FW_RAID_MAP_ALL *map);
@@ -89,7 +90,7 @@ u8 MR_ValidateMapInfo(struct MR_FW_RAID_MAP_ALL *map,
struct LD_LOAD_BALANCE_INFO *lbInfo);
u16 get_updated_dev_handle(struct LD_LOAD_BALANCE_INFO *lbInfo,
struct IO_REQUEST_INFO *in_info);
-int megasas_transition_to_ready(struct megasas_instance *instance);
+int megasas_transition_to_ready(struct megasas_instance *instance, int ocr);
void megaraid_sas_kill_hba(struct megasas_instance *instance);
extern u32 megasas_dbg_lvl;
@@ -101,6 +102,10 @@ extern u32 megasas_dbg_lvl;
void
megasas_enable_intr_fusion(struct megasas_register_set __iomem *regs)
{
+ /* For Thunderbolt/Invader also clear intr on enable */
+ writel(~0, &regs->outbound_intr_status);
+ readl(&regs->outbound_intr_status);
+
writel(~MFI_FUSION_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask);
/* Dummy readl to force pci flush */
@@ -139,11 +144,6 @@ megasas_clear_intr_fusion(struct megasas_register_set __iomem *regs)
if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK))
return 0;
- /*
- * dummy read to flush PCI
- */
- readl(&regs->outbound_intr_status);
-
return 1;
}
@@ -385,7 +385,7 @@ static int megasas_create_frame_pool_fusion(struct megasas_instance *instance)
int
megasas_alloc_cmds_fusion(struct megasas_instance *instance)
{
- int i, j;
+ int i, j, count;
u32 max_cmd, io_frames_sz;
struct fusion_context *fusion;
struct megasas_cmd_fusion *cmd;
@@ -409,9 +409,10 @@ megasas_alloc_cmds_fusion(struct megasas_instance *instance)
goto fail_req_desc;
}
+ count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
fusion->reply_frames_desc_pool =
pci_pool_create("reply_frames pool", instance->pdev,
- fusion->reply_alloc_sz, 16, 0);
+ fusion->reply_alloc_sz * count, 16, 0);
if (!fusion->reply_frames_desc_pool) {
printk(KERN_ERR "megasas; Could not allocate memory for "
@@ -430,7 +431,7 @@ megasas_alloc_cmds_fusion(struct megasas_instance *instance)
}
reply_desc = fusion->reply_frames_desc;
- for (i = 0; i < fusion->reply_q_depth; i++, reply_desc++)
+ for (i = 0; i < fusion->reply_q_depth * count; i++, reply_desc++)
reply_desc->Words = ULLONG_MAX;
io_frames_sz = fusion->io_frames_alloc_sz;
@@ -590,7 +591,6 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
struct megasas_init_frame *init_frame;
struct MPI2_IOC_INIT_REQUEST *IOCInitMessage;
dma_addr_t ioc_init_handle;
- u32 context;
struct megasas_cmd *cmd;
u8 ret;
struct fusion_context *fusion;
@@ -634,14 +634,13 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
fusion->reply_frames_desc_phys;
IOCInitMessage->SystemRequestFrameBaseAddress =
fusion->io_request_frames_phys;
-
+ /* Set to 0 for none or 1 MSI-X vectors */
+ IOCInitMessage->HostMSIxVectors = (instance->msix_vectors > 0 ?
+ instance->msix_vectors : 0);
init_frame = (struct megasas_init_frame *)cmd->frame;
memset(init_frame, 0, MEGAMFI_FRAME_SIZE);
frame_hdr = &cmd->frame->hdr;
- context = init_frame->context;
- init_frame->context = context;
-
frame_hdr->cmd_status = 0xFF;
frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
@@ -881,7 +880,7 @@ megasas_init_adapter_fusion(struct megasas_instance *instance)
struct megasas_register_set __iomem *reg_set;
struct fusion_context *fusion;
u32 max_cmd;
- int i = 0;
+ int i = 0, count;
fusion = instance->ctrl_context;
@@ -933,7 +932,9 @@ megasas_init_adapter_fusion(struct megasas_instance *instance)
(MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
sizeof(union MPI2_SGE_IO_UNION))/16;
- fusion->last_reply_idx = 0;
+ count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
+ for (i = 0 ; i < count; i++)
+ fusion->last_reply_idx[i] = 0;
/*
* Allocate memory for descriptors
@@ -1043,7 +1044,9 @@ map_cmd_status(struct megasas_cmd_fusion *cmd, u8 status, u8 ext_status)
case MFI_STAT_DEVICE_NOT_FOUND:
cmd->scmd->result = DID_BAD_TARGET << 16;
break;
-
+ case MFI_STAT_CONFIG_SEQ_MISMATCH:
+ cmd->scmd->result = DID_IMM_RETRY << 16;
+ break;
default:
printk(KERN_DEBUG "megasas: FW status %#x\n", status);
cmd->scmd->result = DID_ERROR << 16;
@@ -1066,14 +1069,17 @@ megasas_make_sgl_fusion(struct megasas_instance *instance,
struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr,
struct megasas_cmd_fusion *cmd)
{
- int i, sg_processed;
- int sge_count, sge_idx;
+ int i, sg_processed, sge_count;
struct scatterlist *os_sgl;
struct fusion_context *fusion;
fusion = instance->ctrl_context;
- cmd->io_request->ChainOffset = 0;
+ if (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) {
+ struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr_end = sgl_ptr;
+ sgl_ptr_end += fusion->max_sge_in_main_msg - 1;
+ sgl_ptr_end->Flags = 0;
+ }
sge_count = scsi_dma_map(scp);
@@ -1082,16 +1088,14 @@ megasas_make_sgl_fusion(struct megasas_instance *instance,
if (sge_count > instance->max_num_sge || !sge_count)
return sge_count;
- if (sge_count > fusion->max_sge_in_main_msg) {
- /* One element to store the chain info */
- sge_idx = fusion->max_sge_in_main_msg - 1;
- } else
- sge_idx = sge_count;
-
scsi_for_each_sg(scp, os_sgl, sge_count, i) {
sgl_ptr->Length = sg_dma_len(os_sgl);
sgl_ptr->Address = sg_dma_address(os_sgl);
sgl_ptr->Flags = 0;
+ if (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) {
+ if (i == sge_count - 1)
+ sgl_ptr->Flags = IEEE_SGE_FLAGS_END_OF_LIST;
+ }
sgl_ptr++;
sg_processed = i + 1;
@@ -1100,13 +1104,30 @@ megasas_make_sgl_fusion(struct megasas_instance *instance,
(sge_count > fusion->max_sge_in_main_msg)) {
struct MPI25_IEEE_SGE_CHAIN64 *sg_chain;
- cmd->io_request->ChainOffset =
- fusion->chain_offset_io_request;
+ if (instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_INVADER) {
+ if ((cmd->io_request->IoFlags &
+ MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) !=
+ MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH)
+ cmd->io_request->ChainOffset =
+ fusion->
+ chain_offset_io_request;
+ else
+ cmd->io_request->ChainOffset = 0;
+ } else
+ cmd->io_request->ChainOffset =
+ fusion->chain_offset_io_request;
+
sg_chain = sgl_ptr;
/* Prepare chain element */
sg_chain->NextChainOffset = 0;
- sg_chain->Flags = (IEEE_SGE_FLAGS_CHAIN_ELEMENT |
- MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR);
+ if (instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_INVADER)
+ sg_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT;
+ else
+ sg_chain->Flags =
+ (IEEE_SGE_FLAGS_CHAIN_ELEMENT |
+ MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR);
sg_chain->Length = (sizeof(union MPI2_SGE_IO_UNION)
*(sge_count - sg_processed));
sg_chain->Address = cmd->sg_frame_phys_addr;
@@ -1399,11 +1420,18 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
io_request->RaidContext.regLockFlags = 0;
fp_possible = 0;
} else {
- if (MR_BuildRaidContext(&io_info, &io_request->RaidContext,
+ if (MR_BuildRaidContext(instance, &io_info,
+ &io_request->RaidContext,
local_map_ptr))
fp_possible = io_info.fpOkForIo;
}
+ /* Use smp_processor_id() for now until cmd->request->cpu is CPU
+ id by default, not CPU group id, otherwise all MSI-X queues won't
+ be utilized */
+ cmd->request_desc->SCSIIO.MSIxIndex = instance->msix_vectors ?
+ smp_processor_id() % instance->msix_vectors : 0;
+
if (fp_possible) {
megasas_set_pd_lba(io_request, scp->cmd_len, &io_info, scp,
local_map_ptr, start_lba_lo);
@@ -1412,6 +1440,20 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
cmd->request_desc->SCSIIO.RequestFlags =
(MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY
<< MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+ if (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) {
+ if (io_request->RaidContext.regLockFlags ==
+ REGION_TYPE_UNUSED)
+ cmd->request_desc->SCSIIO.RequestFlags =
+ (MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK <<
+ MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+ io_request->RaidContext.Type = MPI2_TYPE_CUDA;
+ io_request->RaidContext.nseg = 0x1;
+ io_request->IoFlags |=
+ MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH;
+ io_request->RaidContext.regLockFlags |=
+ (MR_RL_FLAGS_GRANT_DESTINATION_CUDA |
+ MR_RL_FLAGS_SEQ_NUM_ENABLE);
+ }
if ((fusion->load_balance_info[device_id].loadBalanceFlag) &&
(io_info.isRead)) {
io_info.devHandle =
@@ -1426,11 +1468,23 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
} else {
io_request->RaidContext.timeoutValue =
local_map_ptr->raidMap.fpPdIoTimeoutSec;
- io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST;
- io_request->DevHandle = device_id;
cmd->request_desc->SCSIIO.RequestFlags =
(MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO
<< MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+ if (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) {
+ if (io_request->RaidContext.regLockFlags ==
+ REGION_TYPE_UNUSED)
+ cmd->request_desc->SCSIIO.RequestFlags =
+ (MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK <<
+ MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+ io_request->RaidContext.Type = MPI2_TYPE_CUDA;
+ io_request->RaidContext.regLockFlags |=
+ (MR_RL_FLAGS_GRANT_DESTINATION_CPU0 |
+ MR_RL_FLAGS_SEQ_NUM_ENABLE);
+ io_request->RaidContext.nseg = 0x1;
+ }
+ io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST;
+ io_request->DevHandle = device_id;
} /* Not FP */
}
@@ -1513,8 +1567,10 @@ megasas_build_io_fusion(struct megasas_instance *instance,
io_request->EEDPFlags = 0;
io_request->Control = 0;
io_request->EEDPBlockSize = 0;
- io_request->IoFlags = 0;
+ io_request->ChainOffset = 0;
io_request->RaidContext.RAIDFlags = 0;
+ io_request->RaidContext.Type = 0;
+ io_request->RaidContext.nseg = 0;
memcpy(io_request->CDB.CDB32, scp->cmnd, scp->cmd_len);
/*
@@ -1612,7 +1668,6 @@ megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance,
req_desc->Words = 0;
cmd->request_desc = req_desc;
- cmd->request_desc->Words = 0;
if (megasas_build_io_fusion(instance, scmd, cmd)) {
megasas_return_cmd_fusion(instance, cmd);
@@ -1647,7 +1702,7 @@ megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance,
* Completes all commands that is in reply descriptor queue
*/
int
-complete_cmd_fusion(struct megasas_instance *instance)
+complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex)
{
union MPI2_REPLY_DESCRIPTORS_UNION *desc;
struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *reply_desc;
@@ -1667,7 +1722,9 @@ complete_cmd_fusion(struct megasas_instance *instance)
return IRQ_HANDLED;
desc = fusion->reply_frames_desc;
- desc += fusion->last_reply_idx;
+ desc += ((MSIxIndex * fusion->reply_alloc_sz)/
+ sizeof(union MPI2_REPLY_DESCRIPTORS_UNION)) +
+ fusion->last_reply_idx[MSIxIndex];
reply_desc = (struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
@@ -1740,16 +1797,19 @@ complete_cmd_fusion(struct megasas_instance *instance)
break;
}
- fusion->last_reply_idx++;
- if (fusion->last_reply_idx >= fusion->reply_q_depth)
- fusion->last_reply_idx = 0;
+ fusion->last_reply_idx[MSIxIndex]++;
+ if (fusion->last_reply_idx[MSIxIndex] >=
+ fusion->reply_q_depth)
+ fusion->last_reply_idx[MSIxIndex] = 0;
desc->Words = ULLONG_MAX;
num_completed++;
/* Get the next reply descriptor */
- if (!fusion->last_reply_idx)
- desc = fusion->reply_frames_desc;
+ if (!fusion->last_reply_idx[MSIxIndex])
+ desc = fusion->reply_frames_desc +
+ ((MSIxIndex * fusion->reply_alloc_sz)/
+ sizeof(union MPI2_REPLY_DESCRIPTORS_UNION));
else
desc++;
@@ -1769,7 +1829,7 @@ complete_cmd_fusion(struct megasas_instance *instance)
return IRQ_NONE;
wmb();
- writel(fusion->last_reply_idx,
+ writel((MSIxIndex << 24) | fusion->last_reply_idx[MSIxIndex],
&instance->reg_set->reply_post_host_index);
megasas_check_and_restore_queue_depth(instance);
return IRQ_HANDLED;
@@ -1787,6 +1847,9 @@ megasas_complete_cmd_dpc_fusion(unsigned long instance_addr)
struct megasas_instance *instance =
(struct megasas_instance *)instance_addr;
unsigned long flags;
+ u32 count, MSIxIndex;
+
+ count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
/* If we have already declared adapter dead, donot complete cmds */
spin_lock_irqsave(&instance->hba_lock, flags);
@@ -1797,7 +1860,8 @@ megasas_complete_cmd_dpc_fusion(unsigned long instance_addr)
spin_unlock_irqrestore(&instance->hba_lock, flags);
spin_lock_irqsave(&instance->completion_lock, flags);
- complete_cmd_fusion(instance);
+ for (MSIxIndex = 0 ; MSIxIndex < count; MSIxIndex++)
+ complete_cmd_fusion(instance, MSIxIndex);
spin_unlock_irqrestore(&instance->completion_lock, flags);
}
@@ -1806,20 +1870,24 @@ megasas_complete_cmd_dpc_fusion(unsigned long instance_addr)
*/
irqreturn_t megasas_isr_fusion(int irq, void *devp)
{
- struct megasas_instance *instance = (struct megasas_instance *)devp;
+ struct megasas_irq_context *irq_context = devp;
+ struct megasas_instance *instance = irq_context->instance;
u32 mfiStatus, fw_state;
- if (!instance->msi_flag) {
+ if (!instance->msix_vectors) {
mfiStatus = instance->instancet->clear_intr(instance->reg_set);
if (!mfiStatus)
return IRQ_NONE;
}
/* If we are resetting, bail */
- if (test_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags))
+ if (test_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags)) {
+ instance->instancet->clear_intr(instance->reg_set);
return IRQ_HANDLED;
+ }
- if (!complete_cmd_fusion(instance)) {
+ if (!complete_cmd_fusion(instance, irq_context->MSIxIndex)) {
+ instance->instancet->clear_intr(instance->reg_set);
/* If we didn't complete any commands, check for FW fault */
fw_state = instance->instancet->read_fw_status_reg(
instance->reg_set) & MFI_STATE_MASK;
@@ -1866,6 +1934,14 @@ build_mpt_mfi_pass_thru(struct megasas_instance *instance,
fusion = instance->ctrl_context;
io_req = cmd->io_request;
+
+ if (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) {
+ struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr_end =
+ (struct MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL;
+ sgl_ptr_end += fusion->max_sge_in_main_msg - 1;
+ sgl_ptr_end->Flags = 0;
+ }
+
mpi25_ieee_chain =
(struct MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL.IeeeChain;
@@ -1928,15 +2004,12 @@ megasas_issue_dcmd_fusion(struct megasas_instance *instance,
struct megasas_cmd *cmd)
{
union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
- union desc_value d_val;
req_desc = build_mpt_cmd(instance, cmd);
if (!req_desc) {
printk(KERN_ERR "Couldn't issue MFI pass thru cmd\n");
return;
}
- d_val.word = req_desc->Words;
-
instance->instancet->fire_cmd(instance, req_desc->u.low,
req_desc->u.high, instance->reg_set);
}
@@ -2029,14 +2102,16 @@ out:
void megasas_reset_reply_desc(struct megasas_instance *instance)
{
- int i;
+ int i, count;
struct fusion_context *fusion;
union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
fusion = instance->ctrl_context;
- fusion->last_reply_idx = 0;
+ count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
+ for (i = 0 ; i < count ; i++)
+ fusion->last_reply_idx[i] = 0;
reply_desc = fusion->reply_frames_desc;
- for (i = 0 ; i < fusion->reply_q_depth; i++, reply_desc++)
+ for (i = 0 ; i < fusion->reply_q_depth * count; i++, reply_desc++)
reply_desc->Words = ULLONG_MAX;
}
@@ -2057,8 +2132,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost)
if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
printk(KERN_WARNING "megaraid_sas: Hardware critical error, "
"returning FAILED.\n");
- retval = FAILED;
- goto out;
+ return FAILED;
}
mutex_lock(&instance->reset_mutex);
@@ -2173,7 +2247,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost)
}
/* Wait for FW to become ready */
- if (megasas_transition_to_ready(instance)) {
+ if (megasas_transition_to_ready(instance, 1)) {
printk(KERN_WARNING "megaraid_sas: Failed to "
"transition controller to ready.\n");
continue;
@@ -2186,6 +2260,8 @@ int megasas_reset_fusion(struct Scsi_Host *shost)
continue;
}
+ clear_bit(MEGASAS_FUSION_IN_RESET,
+ &instance->reset_flags);
instance->instancet->enable_intr(instance->reg_set);
instance->adprecovery = MEGASAS_HBA_OPERATIONAL;
@@ -2247,6 +2323,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost)
megaraid_sas_kill_hba(instance);
retval = FAILED;
} else {
+ clear_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
instance->instancet->enable_intr(instance->reg_set);
instance->adprecovery = MEGASAS_HBA_OPERATIONAL;
}
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h
index 82b577a72c8..088c9f91da9 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.h
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h
@@ -43,6 +43,15 @@
#define HOST_DIAG_WRITE_ENABLE 0x80
#define HOST_DIAG_RESET_ADAPTER 0x4
#define MEGASAS_FUSION_MAX_RESET_TRIES 3
+#define MAX_MSIX_QUEUES_FUSION 16
+
+/* Invader defines */
+#define MPI2_TYPE_CUDA 0x2
+#define MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH 0x4000
+#define MR_RL_FLAGS_GRANT_DESTINATION_CPU0 0x00
+#define MR_RL_FLAGS_GRANT_DESTINATION_CPU1 0x10
+#define MR_RL_FLAGS_GRANT_DESTINATION_CUDA 0x80
+#define MR_RL_FLAGS_SEQ_NUM_ENABLE 0x8
/* T10 PI defines */
#define MR_PROT_INFO_TYPE_CONTROLLER 0x8
@@ -70,7 +79,7 @@ enum MR_RAID_FLAGS_IO_SUB_TYPE {
*/
#define MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO 0x7
#define MEGASAS_REQ_DESCRIPT_FLAGS_MFA 0x1
-
+#define MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK 0x2
#define MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT 1
#define MEGASAS_FP_CMD_LEN 16
@@ -82,7 +91,9 @@ enum MR_RAID_FLAGS_IO_SUB_TYPE {
*/
struct RAID_CONTEXT {
- u16 resvd0;
+ u8 Type:4;
+ u8 nseg:4;
+ u8 resvd0;
u16 timeoutValue;
u8 regLockFlags;
u8 resvd1;
@@ -527,7 +538,7 @@ struct MR_LD_RAID {
u8 ldState;
u8 regTypeReqOnWrite;
u8 modFactor;
- u8 reserved2[1];
+ u8 regTypeReqOnRead;
u16 seqNum;
struct {
@@ -663,7 +674,7 @@ struct fusion_context {
union MPI2_REPLY_DESCRIPTORS_UNION *reply_frames_desc;
struct dma_pool *reply_frames_desc_pool;
- u16 last_reply_idx;
+ u16 last_reply_idx[MAX_MSIX_QUEUES_FUSION];
u32 reply_q_depth;
u32 request_alloc_sz;
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2.h b/drivers/scsi/mpt2sas/mpi/mpi2.h
index 3105d5e8d90..8dc1b32918d 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000-2010 LSI Corporation.
+ * Copyright (c) 2000-2011 LSI Corporation.
*
*
* Name: mpi2.h
@@ -8,7 +8,7 @@
* scatter/gather formats.
* Creation Date: June 21, 2006
*
- * mpi2.h Version: 02.00.18
+ * mpi2.h Version: 02.00.20
*
* Version History
* ---------------
@@ -66,6 +66,9 @@
* 08-11-10 02.00.17 Bumped MPI2_HEADER_VERSION_UNIT.
* 11-10-10 02.00.18 Bumped MPI2_HEADER_VERSION_UNIT.
* Added MPI2_IEEE_SGE_FLAGS_SYSTEMPLBCPI_ADDR define.
+ * 02-23-11 02.00.19 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Added MPI2_FUNCTION_SEND_HOST_MESSAGE.
+ * 03-09-11 02.00.20 Bumped MPI2_HEADER_VERSION_UNIT.
* --------------------------------------------------------------------------
*/
@@ -91,7 +94,7 @@
#define MPI2_VERSION_02_00 (0x0200)
/* versioning for this MPI header set */
-#define MPI2_HEADER_VERSION_UNIT (0x12)
+#define MPI2_HEADER_VERSION_UNIT (0x14)
#define MPI2_HEADER_VERSION_DEV (0x00)
#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00)
#define MPI2_HEADER_VERSION_UNIT_SHIFT (8)
@@ -515,6 +518,8 @@ typedef union _MPI2_REPLY_DESCRIPTORS_UNION
#define MPI2_FUNCTION_HOST_BASED_DISCOVERY_ACTION (0x2F)
/* Power Management Control */
#define MPI2_FUNCTION_PWR_MGMT_CONTROL (0x30)
+/* Send Host Message */
+#define MPI2_FUNCTION_SEND_HOST_MESSAGE (0x31)
/* beginning of product-specific range */
#define MPI2_FUNCTION_MIN_PRODUCT_SPECIFIC (0xF0)
/* end of product-specific range */
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
index 61475a6480e..cfd95b4e300 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
@@ -1,12 +1,12 @@
/*
- * Copyright (c) 2000-2010 LSI Corporation.
+ * Copyright (c) 2000-2011 LSI Corporation.
*
*
* Name: mpi2_cnfg.h
* Title: MPI Configuration messages and pages
* Creation Date: November 10, 2006
*
- * mpi2_cnfg.h Version: 02.00.17
+ * mpi2_cnfg.h Version: 02.00.19
*
* Version History
* ---------------
@@ -134,6 +134,12 @@
* to MPI2_CONFIG_PAGE_IO_UNIT_7.
* Added MPI2_CONFIG_EXTPAGETYPE_EXT_MANUFACTURING define
* and MPI2_CONFIG_PAGE_EXT_MAN_PS structure.
+ * 02-23-11 02.00.18 Added ProxyVF_ID field to MPI2_CONFIG_REQUEST.
+ * Added IO Unit Page 8, IO Unit Page 9,
+ * and IO Unit Page 10.
+ * Added SASNotifyPrimitiveMasks field to
+ * MPI2_CONFIG_PAGE_IOC_7.
+ * 03-09-11 02.00.19 Fixed IO Unit Page 10 (to match the spec).
* --------------------------------------------------------------------------
*/
@@ -329,7 +335,9 @@ typedef struct _MPI2_CONFIG_REQUEST
U8 VP_ID; /* 0x08 */
U8 VF_ID; /* 0x09 */
U16 Reserved1; /* 0x0A */
- U32 Reserved2; /* 0x0C */
+ U8 Reserved2; /* 0x0C */
+ U8 ProxyVF_ID; /* 0x0D */
+ U16 Reserved4; /* 0x0E */
U32 Reserved3; /* 0x10 */
MPI2_CONFIG_PAGE_HEADER Header; /* 0x14 */
U32 PageAddress; /* 0x18 */
@@ -915,6 +923,120 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_7 {
#define MPI2_IOUNITPAGE7_BOARD_TEMP_FAHRENHEIT (0x01)
#define MPI2_IOUNITPAGE7_BOARD_TEMP_CELSIUS (0x02)
+/* IO Unit Page 8 */
+
+#define MPI2_IOUNIT8_NUM_THRESHOLDS (4)
+
+typedef struct _MPI2_IOUNIT8_SENSOR {
+ U16 Flags; /* 0x00 */
+ U16 Reserved1; /* 0x02 */
+ U16
+ Threshold[MPI2_IOUNIT8_NUM_THRESHOLDS]; /* 0x04 */
+ U32 Reserved2; /* 0x0C */
+ U32 Reserved3; /* 0x10 */
+ U32 Reserved4; /* 0x14 */
+} MPI2_IOUNIT8_SENSOR, MPI2_POINTER PTR_MPI2_IOUNIT8_SENSOR,
+Mpi2IOUnit8Sensor_t, MPI2_POINTER pMpi2IOUnit8Sensor_t;
+
+/* defines for IO Unit Page 8 Sensor Flags field */
+#define MPI2_IOUNIT8_SENSOR_FLAGS_T3_ENABLE (0x0008)
+#define MPI2_IOUNIT8_SENSOR_FLAGS_T2_ENABLE (0x0004)
+#define MPI2_IOUNIT8_SENSOR_FLAGS_T1_ENABLE (0x0002)
+#define MPI2_IOUNIT8_SENSOR_FLAGS_T0_ENABLE (0x0001)
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * one and check the value returned for NumSensors at runtime.
+ */
+#ifndef MPI2_IOUNITPAGE8_SENSOR_ENTRIES
+#define MPI2_IOUNITPAGE8_SENSOR_ENTRIES (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_8 {
+ MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
+ U32 Reserved1; /* 0x04 */
+ U32 Reserved2; /* 0x08 */
+ U8 NumSensors; /* 0x0C */
+ U8 PollingInterval; /* 0x0D */
+ U16 Reserved3; /* 0x0E */
+ MPI2_IOUNIT8_SENSOR
+ Sensor[MPI2_IOUNITPAGE8_SENSOR_ENTRIES];/* 0x10 */
+} MPI2_CONFIG_PAGE_IO_UNIT_8, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IO_UNIT_8,
+Mpi2IOUnitPage8_t, MPI2_POINTER pMpi2IOUnitPage8_t;
+
+#define MPI2_IOUNITPAGE8_PAGEVERSION (0x00)
+
+
+/* IO Unit Page 9 */
+
+typedef struct _MPI2_IOUNIT9_SENSOR {
+ U16 CurrentTemperature; /* 0x00 */
+ U16 Reserved1; /* 0x02 */
+ U8 Flags; /* 0x04 */
+ U8 Reserved2; /* 0x05 */
+ U16 Reserved3; /* 0x06 */
+ U32 Reserved4; /* 0x08 */
+ U32 Reserved5; /* 0x0C */
+} MPI2_IOUNIT9_SENSOR, MPI2_POINTER PTR_MPI2_IOUNIT9_SENSOR,
+Mpi2IOUnit9Sensor_t, MPI2_POINTER pMpi2IOUnit9Sensor_t;
+
+/* defines for IO Unit Page 9 Sensor Flags field */
+#define MPI2_IOUNIT9_SENSOR_FLAGS_TEMP_VALID (0x01)
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * one and check the value returned for NumSensors at runtime.
+ */
+#ifndef MPI2_IOUNITPAGE9_SENSOR_ENTRIES
+#define MPI2_IOUNITPAGE9_SENSOR_ENTRIES (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_9 {
+ MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
+ U32 Reserved1; /* 0x04 */
+ U32 Reserved2; /* 0x08 */
+ U8 NumSensors; /* 0x0C */
+ U8 Reserved4; /* 0x0D */
+ U16 Reserved3; /* 0x0E */
+ MPI2_IOUNIT9_SENSOR
+ Sensor[MPI2_IOUNITPAGE9_SENSOR_ENTRIES];/* 0x10 */
+} MPI2_CONFIG_PAGE_IO_UNIT_9, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IO_UNIT_9,
+Mpi2IOUnitPage9_t, MPI2_POINTER pMpi2IOUnitPage9_t;
+
+#define MPI2_IOUNITPAGE9_PAGEVERSION (0x00)
+
+
+/* IO Unit Page 10 */
+
+typedef struct _MPI2_IOUNIT10_FUNCTION {
+ U8 CreditPercent; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U16 Reserved2; /* 0x02 */
+} MPI2_IOUNIT10_FUNCTION, MPI2_POINTER PTR_MPI2_IOUNIT10_FUNCTION,
+Mpi2IOUnit10Function_t, MPI2_POINTER pMpi2IOUnit10Function_t;
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * one and check the value returned for NumFunctions at runtime.
+ */
+#ifndef MPI2_IOUNITPAGE10_FUNCTION_ENTRIES
+#define MPI2_IOUNITPAGE10_FUNCTION_ENTRIES (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_10 {
+ MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
+ U8 NumFunctions; /* 0x04 */
+ U8 Reserved1; /* 0x05 */
+ U16 Reserved2; /* 0x06 */
+ U32 Reserved3; /* 0x08 */
+ U32 Reserved4; /* 0x0C */
+ MPI2_IOUNIT10_FUNCTION
+ Function[MPI2_IOUNITPAGE10_FUNCTION_ENTRIES];/* 0x10 */
+} MPI2_CONFIG_PAGE_IO_UNIT_10, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IO_UNIT_10,
+Mpi2IOUnitPage10_t, MPI2_POINTER pMpi2IOUnitPage10_t;
+
+#define MPI2_IOUNITPAGE10_PAGEVERSION (0x01)
+
/****************************************************************************
@@ -1022,12 +1144,12 @@ typedef struct _MPI2_CONFIG_PAGE_IOC_7
U32 Reserved1; /* 0x04 */
U32 EventMasks[MPI2_IOCPAGE7_EVENTMASK_WORDS];/* 0x08 */
U16 SASBroadcastPrimitiveMasks; /* 0x18 */
- U16 Reserved2; /* 0x1A */
+ U16 SASNotifyPrimitiveMasks; /* 0x1A */
U32 Reserved3; /* 0x1C */
} MPI2_CONFIG_PAGE_IOC_7, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IOC_7,
Mpi2IOCPage7_t, MPI2_POINTER pMpi2IOCPage7_t;
-#define MPI2_IOCPAGE7_PAGEVERSION (0x01)
+#define MPI2_IOCPAGE7_PAGEVERSION (0x02)
/* IOC Page 8 */
@@ -2070,16 +2192,16 @@ typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_8 {
#define MPI2_SASIOUNITPAGE8_PAGEVERSION (0x00)
/* defines for PowerManagementCapabilities field */
-#define MPI2_SASIOUNIT8_PM_HOST_PORT_WIDTH_MOD (0x000001000)
-#define MPI2_SASIOUNIT8_PM_HOST_SAS_SLUMBER_MODE (0x000000800)
-#define MPI2_SASIOUNIT8_PM_HOST_SAS_PARTIAL_MODE (0x000000400)
-#define MPI2_SASIOUNIT8_PM_HOST_SATA_SLUMBER_MODE (0x000000200)
-#define MPI2_SASIOUNIT8_PM_HOST_SATA_PARTIAL_MODE (0x000000100)
-#define MPI2_SASIOUNIT8_PM_IOUNIT_PORT_WIDTH_MOD (0x000000010)
-#define MPI2_SASIOUNIT8_PM_IOUNIT_SAS_SLUMBER_MODE (0x000000008)
-#define MPI2_SASIOUNIT8_PM_IOUNIT_SAS_PARTIAL_MODE (0x000000004)
-#define MPI2_SASIOUNIT8_PM_IOUNIT_SATA_SLUMBER_MODE (0x000000002)
-#define MPI2_SASIOUNIT8_PM_IOUNIT_SATA_PARTIAL_MODE (0x000000001)
+#define MPI2_SASIOUNIT8_PM_HOST_PORT_WIDTH_MOD (0x00001000)
+#define MPI2_SASIOUNIT8_PM_HOST_SAS_SLUMBER_MODE (0x00000800)
+#define MPI2_SASIOUNIT8_PM_HOST_SAS_PARTIAL_MODE (0x00000400)
+#define MPI2_SASIOUNIT8_PM_HOST_SATA_SLUMBER_MODE (0x00000200)
+#define MPI2_SASIOUNIT8_PM_HOST_SATA_PARTIAL_MODE (0x00000100)
+#define MPI2_SASIOUNIT8_PM_IOUNIT_PORT_WIDTH_MOD (0x00000010)
+#define MPI2_SASIOUNIT8_PM_IOUNIT_SAS_SLUMBER_MODE (0x00000008)
+#define MPI2_SASIOUNIT8_PM_IOUNIT_SAS_PARTIAL_MODE (0x00000004)
+#define MPI2_SASIOUNIT8_PM_IOUNIT_SATA_SLUMBER_MODE (0x00000002)
+#define MPI2_SASIOUNIT8_PM_IOUNIT_SATA_PARTIAL_MODE (0x00000001)
@@ -2266,6 +2388,7 @@ typedef struct _MPI2_CONFIG_PAGE_SAS_DEV_0
/* see mpi2_sas.h for values for SAS Device Page 0 DeviceInfo values */
/* values for SAS Device Page 0 Flags field */
+#define MPI2_SAS_DEVICE0_FLAGS_UNAUTHORIZED_DEVICE (0x8000)
#define MPI2_SAS_DEVICE0_FLAGS_SLUMBER_PM_CAPABLE (0x1000)
#define MPI2_SAS_DEVICE0_FLAGS_PARTIAL_PM_CAPABLE (0x0800)
#define MPI2_SAS_DEVICE0_FLAGS_SATA_ASYNCHRONOUS_NOTIFY (0x0400)
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
index 1f0c190d336..93d9b6956d0 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
@@ -1,12 +1,12 @@
/*
- * Copyright (c) 2000-2010 LSI Corporation.
+ * Copyright (c) 2000-2011 LSI Corporation.
*
*
* Name: mpi2_ioc.h
* Title: MPI IOC, Port, Event, FW Download, and FW Upload messages
* Creation Date: October 11, 2006
*
- * mpi2_ioc.h Version: 02.00.16
+ * mpi2_ioc.h Version: 02.00.17
*
* Version History
* ---------------
@@ -104,6 +104,12 @@
* 05-12-10 02.00.15 Marked Task Set Full Event as obsolete.
* Added MPI2_EVENT_SAS_TOPO_LR_UNSUPPORTED_PHY define.
* 11-10-10 02.00.16 Added MPI2_FW_DOWNLOAD_ITYPE_MIN_PRODUCT_SPECIFIC.
+ * 02-23-11 02.00.17 Added SAS NOTIFY Primitive event, and added
+ * SASNotifyPrimitiveMasks field to
+ * MPI2_EVENT_NOTIFICATION_REQUEST.
+ * Added Temperature Threshold Event.
+ * Added Host Message Event.
+ * Added Send Host Message request and reply.
* --------------------------------------------------------------------------
*/
@@ -421,7 +427,7 @@ typedef struct _MPI2_EVENT_NOTIFICATION_REQUEST
U32 Reserved6; /* 0x10 */
U32 EventMasks[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS];/* 0x14 */
U16 SASBroadcastPrimitiveMasks; /* 0x24 */
- U16 Reserved7; /* 0x26 */
+ U16 SASNotifyPrimitiveMasks; /* 0x26 */
U32 Reserved8; /* 0x28 */
} MPI2_EVENT_NOTIFICATION_REQUEST,
MPI2_POINTER PTR_MPI2_EVENT_NOTIFICATION_REQUEST,
@@ -476,6 +482,9 @@ typedef struct _MPI2_EVENT_NOTIFICATION_REPLY
#define MPI2_EVENT_GPIO_INTERRUPT (0x0023)
#define MPI2_EVENT_HOST_BASED_DISCOVERY_PHY (0x0024)
#define MPI2_EVENT_SAS_QUIESCE (0x0025)
+#define MPI2_EVENT_SAS_NOTIFY_PRIMITIVE (0x0026)
+#define MPI2_EVENT_TEMP_THRESHOLD (0x0027)
+#define MPI2_EVENT_HOST_MESSAGE (0x0028)
/* Log Entry Added Event data */
@@ -507,6 +516,39 @@ typedef struct _MPI2_EVENT_DATA_GPIO_INTERRUPT {
MPI2_POINTER PTR_MPI2_EVENT_DATA_GPIO_INTERRUPT,
Mpi2EventDataGpioInterrupt_t, MPI2_POINTER pMpi2EventDataGpioInterrupt_t;
+/* Temperature Threshold Event data */
+
+typedef struct _MPI2_EVENT_DATA_TEMPERATURE {
+ U16 Status; /* 0x00 */
+ U8 SensorNum; /* 0x02 */
+ U8 Reserved1; /* 0x03 */
+ U16 CurrentTemperature; /* 0x04 */
+ U16 Reserved2; /* 0x06 */
+ U32 Reserved3; /* 0x08 */
+ U32 Reserved4; /* 0x0C */
+} MPI2_EVENT_DATA_TEMPERATURE,
+MPI2_POINTER PTR_MPI2_EVENT_DATA_TEMPERATURE,
+Mpi2EventDataTemperature_t, MPI2_POINTER pMpi2EventDataTemperature_t;
+
+/* Temperature Threshold Event data Status bits */
+#define MPI2_EVENT_TEMPERATURE3_EXCEEDED (0x0008)
+#define MPI2_EVENT_TEMPERATURE2_EXCEEDED (0x0004)
+#define MPI2_EVENT_TEMPERATURE1_EXCEEDED (0x0002)
+#define MPI2_EVENT_TEMPERATURE0_EXCEEDED (0x0001)
+
+
+/* Host Message Event data */
+
+typedef struct _MPI2_EVENT_DATA_HOST_MESSAGE {
+ U8 SourceVF_ID; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U16 Reserved2; /* 0x02 */
+ U32 Reserved3; /* 0x04 */
+ U32 HostData[1]; /* 0x08 */
+} MPI2_EVENT_DATA_HOST_MESSAGE, MPI2_POINTER PTR_MPI2_EVENT_DATA_HOST_MESSAGE,
+Mpi2EventDataHostMessage_t, MPI2_POINTER pMpi2EventDataHostMessage_t;
+
+
/* Hard Reset Received Event data */
typedef struct _MPI2_EVENT_DATA_HARD_RESET_RECEIVED
@@ -749,6 +791,24 @@ typedef struct _MPI2_EVENT_DATA_SAS_BROADCAST_PRIMITIVE
#define MPI2_EVENT_PRIMITIVE_CHANGE0_RESERVED (0x07)
#define MPI2_EVENT_PRIMITIVE_CHANGE1_RESERVED (0x08)
+/* SAS Notify Primitive Event data */
+
+typedef struct _MPI2_EVENT_DATA_SAS_NOTIFY_PRIMITIVE {
+ U8 PhyNum; /* 0x00 */
+ U8 Port; /* 0x01 */
+ U8 Reserved1; /* 0x02 */
+ U8 Primitive; /* 0x03 */
+} MPI2_EVENT_DATA_SAS_NOTIFY_PRIMITIVE,
+MPI2_POINTER PTR_MPI2_EVENT_DATA_SAS_NOTIFY_PRIMITIVE,
+Mpi2EventDataSasNotifyPrimitive_t,
+MPI2_POINTER pMpi2EventDataSasNotifyPrimitive_t;
+
+/* defines for the Primitive field */
+#define MPI2_EVENT_NOTIFY_ENABLE_SPINUP (0x01)
+#define MPI2_EVENT_NOTIFY_POWER_LOSS_EXPECTED (0x02)
+#define MPI2_EVENT_NOTIFY_RESERVED1 (0x03)
+#define MPI2_EVENT_NOTIFY_RESERVED2 (0x04)
+
/* SAS Initiator Device Status Change Event data */
@@ -1001,6 +1061,53 @@ typedef struct _MPI2_EVENT_ACK_REPLY
/****************************************************************************
+* SendHostMessage message
+****************************************************************************/
+
+/* SendHostMessage Request message */
+typedef struct _MPI2_SEND_HOST_MESSAGE_REQUEST {
+ U16 HostDataLength; /* 0x00 */
+ U8 ChainOffset; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 Reserved1; /* 0x04 */
+ U8 Reserved2; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved3; /* 0x0A */
+ U8 Reserved4; /* 0x0C */
+ U8 DestVF_ID; /* 0x0D */
+ U16 Reserved5; /* 0x0E */
+ U32 Reserved6; /* 0x10 */
+ U32 Reserved7; /* 0x14 */
+ U32 Reserved8; /* 0x18 */
+ U32 Reserved9; /* 0x1C */
+ U32 Reserved10; /* 0x20 */
+ U32 HostData[1]; /* 0x24 */
+} MPI2_SEND_HOST_MESSAGE_REQUEST,
+MPI2_POINTER PTR_MPI2_SEND_HOST_MESSAGE_REQUEST,
+Mpi2SendHostMessageRequest_t, MPI2_POINTER pMpi2SendHostMessageRequest_t;
+
+
+/* SendHostMessage Reply message */
+typedef struct _MPI2_SEND_HOST_MESSAGE_REPLY {
+ U16 HostDataLength; /* 0x00 */
+ U8 MsgLength; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 Reserved1; /* 0x04 */
+ U8 Reserved2; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved3; /* 0x0A */
+ U16 Reserved4; /* 0x0C */
+ U16 IOCStatus; /* 0x0E */
+ U32 IOCLogInfo; /* 0x10 */
+} MPI2_SEND_HOST_MESSAGE_REPLY, MPI2_POINTER PTR_MPI2_SEND_HOST_MESSAGE_REPLY,
+Mpi2SendHostMessageReply_t, MPI2_POINTER pMpi2SendHostMessageReply_t;
+
+
+/****************************************************************************
* FWDownload message
****************************************************************************/
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index 6825772cfd6..beda04a8404 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -81,6 +81,15 @@ static int missing_delay[2] = {-1, -1};
module_param_array(missing_delay, int, NULL, 0);
MODULE_PARM_DESC(missing_delay, " device missing delay , io missing delay");
+static int mpt2sas_fwfault_debug;
+MODULE_PARM_DESC(mpt2sas_fwfault_debug, " enable detection of firmware fault "
+ "and halt firmware - (default=0)");
+
+static int disable_discovery = -1;
+module_param(disable_discovery, int, 0);
+MODULE_PARM_DESC(disable_discovery, " disable discovery ");
+
+
/* diag_buffer_enable is bitwise
* bit 0 set = TRACE
* bit 1 set = SNAPSHOT
@@ -93,14 +102,6 @@ module_param(diag_buffer_enable, int, 0);
MODULE_PARM_DESC(diag_buffer_enable, " post diag buffers "
"(TRACE=1/SNAPSHOT=2/EXTENDED=4/default=0)");
-static int mpt2sas_fwfault_debug;
-MODULE_PARM_DESC(mpt2sas_fwfault_debug, " enable detection of firmware fault "
- "and halt firmware - (default=0)");
-
-static int disable_discovery = -1;
-module_param(disable_discovery, int, 0);
-MODULE_PARM_DESC(disable_discovery, " disable discovery ");
-
/**
* _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug.
*
@@ -691,6 +692,7 @@ mpt2sas_base_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
memcpy(ioc->base_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
}
ioc->base_cmds.status &= ~MPT2_CMD_PENDING;
+
complete(&ioc->base_cmds.done);
return 1;
}
@@ -833,25 +835,31 @@ union reply_descriptor {
static irqreturn_t
_base_interrupt(int irq, void *bus_id)
{
+ struct adapter_reply_queue *reply_q = bus_id;
union reply_descriptor rd;
u32 completed_cmds;
u8 request_desript_type;
u16 smid;
u8 cb_idx;
u32 reply;
- u8 msix_index;
- struct MPT2SAS_ADAPTER *ioc = bus_id;
+ u8 msix_index = reply_q->msix_index;
+ struct MPT2SAS_ADAPTER *ioc = reply_q->ioc;
Mpi2ReplyDescriptorsUnion_t *rpf;
u8 rc;
if (ioc->mask_interrupts)
return IRQ_NONE;
- rpf = &ioc->reply_post_free[ioc->reply_post_host_index];
+ if (!atomic_add_unless(&reply_q->busy, 1, 1))
+ return IRQ_NONE;
+
+ rpf = &reply_q->reply_post_free[reply_q->reply_post_host_index];
request_desript_type = rpf->Default.ReplyFlags
& MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
- if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
+ if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) {
+ atomic_dec(&reply_q->busy);
return IRQ_NONE;
+ }
completed_cmds = 0;
cb_idx = 0xFF;
@@ -860,9 +868,7 @@ _base_interrupt(int irq, void *bus_id)
if (rd.u.low == UINT_MAX || rd.u.high == UINT_MAX)
goto out;
reply = 0;
- cb_idx = 0xFF;
smid = le16_to_cpu(rpf->Default.DescriptorTypeDependent1);
- msix_index = rpf->Default.MSIxIndex;
if (request_desript_type ==
MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
reply = le32_to_cpu
@@ -906,32 +912,86 @@ _base_interrupt(int irq, void *bus_id)
next:
rpf->Words = cpu_to_le64(ULLONG_MAX);
- ioc->reply_post_host_index = (ioc->reply_post_host_index ==
+ reply_q->reply_post_host_index =
+ (reply_q->reply_post_host_index ==
(ioc->reply_post_queue_depth - 1)) ? 0 :
- ioc->reply_post_host_index + 1;
+ reply_q->reply_post_host_index + 1;
request_desript_type =
- ioc->reply_post_free[ioc->reply_post_host_index].Default.
- ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
+ reply_q->reply_post_free[reply_q->reply_post_host_index].
+ Default.ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
completed_cmds++;
if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
goto out;
- if (!ioc->reply_post_host_index)
- rpf = ioc->reply_post_free;
+ if (!reply_q->reply_post_host_index)
+ rpf = reply_q->reply_post_free;
else
rpf++;
} while (1);
out:
- if (!completed_cmds)
+ if (!completed_cmds) {
+ atomic_dec(&reply_q->busy);
return IRQ_NONE;
-
+ }
wmb();
- writel(ioc->reply_post_host_index, &ioc->chip->ReplyPostHostIndex);
+ if (ioc->is_warpdrive) {
+ writel(reply_q->reply_post_host_index,
+ ioc->reply_post_host_index[msix_index]);
+ atomic_dec(&reply_q->busy);
+ return IRQ_HANDLED;
+ }
+ writel(reply_q->reply_post_host_index | (msix_index <<
+ MPI2_RPHI_MSIX_INDEX_SHIFT), &ioc->chip->ReplyPostHostIndex);
+ atomic_dec(&reply_q->busy);
return IRQ_HANDLED;
}
/**
+ * _base_is_controller_msix_enabled - is controller support muli-reply queues
+ * @ioc: per adapter object
+ *
+ */
+static inline int
+_base_is_controller_msix_enabled(struct MPT2SAS_ADAPTER *ioc)
+{
+ return (ioc->facts.IOCCapabilities &
+ MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable;
+}
+
+/**
+ * mpt2sas_base_flush_reply_queues - flushing the MSIX reply queues
+ * @ioc: per adapter object
+ * Context: ISR conext
+ *
+ * Called when a Task Management request has completed. We want
+ * to flush the other reply queues so all the outstanding IO has been
+ * completed back to OS before we process the TM completetion.
+ *
+ * Return nothing.
+ */
+void
+mpt2sas_base_flush_reply_queues(struct MPT2SAS_ADAPTER *ioc)
+{
+ struct adapter_reply_queue *reply_q;
+
+ /* If MSIX capability is turned off
+ * then multi-queues are not enabled
+ */
+ if (!_base_is_controller_msix_enabled(ioc))
+ return;
+
+ list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
+ if (ioc->shost_recovery)
+ return;
+ /* TMs are on msix_index == 0 */
+ if (reply_q->msix_index == 0)
+ continue;
+ _base_interrupt(reply_q->vector, (void *)reply_q);
+ }
+}
+
+/**
* mpt2sas_base_release_callback_handler - clear interrupt callback handler
* @cb_idx: callback index
*
@@ -1081,74 +1141,171 @@ _base_config_dma_addressing(struct MPT2SAS_ADAPTER *ioc, struct pci_dev *pdev)
}
/**
- * _base_save_msix_table - backup msix vector table
+ * _base_check_enable_msix - checks MSIX capabable.
* @ioc: per adapter object
*
- * This address an errata where diag reset clears out the table
+ * Check to see if card is capable of MSIX, and set number
+ * of available msix vectors
*/
-static void
-_base_save_msix_table(struct MPT2SAS_ADAPTER *ioc)
+static int
+_base_check_enable_msix(struct MPT2SAS_ADAPTER *ioc)
{
- int i;
+ int base;
+ u16 message_control;
- if (!ioc->msix_enable || ioc->msix_table_backup == NULL)
- return;
- for (i = 0; i < ioc->msix_vector_count; i++)
- ioc->msix_table_backup[i] = ioc->msix_table[i];
+ base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX);
+ if (!base) {
+ dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "msix not "
+ "supported\n", ioc->name));
+ return -EINVAL;
+ }
+
+ /* get msix vector count */
+ /* NUMA_IO not supported for older controllers */
+ if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2004 ||
+ ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 ||
+ ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_1 ||
+ ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_2 ||
+ ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_3 ||
+ ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_1 ||
+ ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_2)
+ ioc->msix_vector_count = 1;
+ else {
+ pci_read_config_word(ioc->pdev, base + 2, &message_control);
+ ioc->msix_vector_count = (message_control & 0x3FF) + 1;
+ }
+ dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "msix is supported, "
+ "vector_count(%d)\n", ioc->name, ioc->msix_vector_count));
+
+ return 0;
}
/**
- * _base_restore_msix_table - this restores the msix vector table
+ * _base_free_irq - free irq
* @ioc: per adapter object
*
+ * Freeing respective reply_queue from the list.
*/
static void
-_base_restore_msix_table(struct MPT2SAS_ADAPTER *ioc)
+_base_free_irq(struct MPT2SAS_ADAPTER *ioc)
{
- int i;
+ struct adapter_reply_queue *reply_q, *next;
- if (!ioc->msix_enable || ioc->msix_table_backup == NULL)
+ if (list_empty(&ioc->reply_queue_list))
return;
- for (i = 0; i < ioc->msix_vector_count; i++)
- ioc->msix_table[i] = ioc->msix_table_backup[i];
+ list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) {
+ list_del(&reply_q->list);
+ synchronize_irq(reply_q->vector);
+ free_irq(reply_q->vector, reply_q);
+ kfree(reply_q);
+ }
}
/**
- * _base_check_enable_msix - checks MSIX capabable.
+ * _base_request_irq - request irq
* @ioc: per adapter object
+ * @index: msix index into vector table
+ * @vector: irq vector
*
- * Check to see if card is capable of MSIX, and set number
- * of available msix vectors
+ * Inserting respective reply_queue into the list.
*/
static int
-_base_check_enable_msix(struct MPT2SAS_ADAPTER *ioc)
+_base_request_irq(struct MPT2SAS_ADAPTER *ioc, u8 index, u32 vector)
{
- int base;
- u16 message_control;
- u32 msix_table_offset;
+ struct adapter_reply_queue *reply_q;
+ int r;
- base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX);
- if (!base) {
- dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "msix not "
- "supported\n", ioc->name));
- return -EINVAL;
+ reply_q = kzalloc(sizeof(struct adapter_reply_queue), GFP_KERNEL);
+ if (!reply_q) {
+ printk(MPT2SAS_ERR_FMT "unable to allocate memory %d!\n",
+ ioc->name, (int)sizeof(struct adapter_reply_queue));
+ return -ENOMEM;
+ }
+ reply_q->ioc = ioc;
+ reply_q->msix_index = index;
+ reply_q->vector = vector;
+ atomic_set(&reply_q->busy, 0);
+ if (ioc->msix_enable)
+ snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-msix%d",
+ MPT2SAS_DRIVER_NAME, ioc->id, index);
+ else
+ snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d",
+ MPT2SAS_DRIVER_NAME, ioc->id);
+ r = request_irq(vector, _base_interrupt, IRQF_SHARED, reply_q->name,
+ reply_q);
+ if (r) {
+ printk(MPT2SAS_ERR_FMT "unable to allocate interrupt %d!\n",
+ reply_q->name, vector);
+ kfree(reply_q);
+ return -EBUSY;
}
- /* get msix vector count */
- pci_read_config_word(ioc->pdev, base + 2, &message_control);
- ioc->msix_vector_count = (message_control & 0x3FF) + 1;
+ INIT_LIST_HEAD(&reply_q->list);
+ list_add_tail(&reply_q->list, &ioc->reply_queue_list);
+ return 0;
+}
- /* get msix table */
- pci_read_config_dword(ioc->pdev, base + 4, &msix_table_offset);
- msix_table_offset &= 0xFFFFFFF8;
- ioc->msix_table = (u32 *)((void *)ioc->chip + msix_table_offset);
+/**
+ * _base_assign_reply_queues - assigning msix index for each cpu
+ * @ioc: per adapter object
+ *
+ * The enduser would need to set the affinity via /proc/irq/#/smp_affinity
+ *
+ * It would nice if we could call irq_set_affinity, however it is not
+ * an exported symbol
+ */
+static void
+_base_assign_reply_queues(struct MPT2SAS_ADAPTER *ioc)
+{
+ struct adapter_reply_queue *reply_q;
+ int cpu_id;
+ int cpu_grouping, loop, grouping, grouping_mod;
- dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "msix is supported, "
- "vector_count(%d), table_offset(0x%08x), table(%p)\n", ioc->name,
- ioc->msix_vector_count, msix_table_offset, ioc->msix_table));
- return 0;
+ if (!_base_is_controller_msix_enabled(ioc))
+ return;
+
+ memset(ioc->cpu_msix_table, 0, ioc->cpu_msix_table_sz);
+ /* when there are more cpus than available msix vectors,
+ * then group cpus togeather on same irq
+ */
+ if (ioc->cpu_count > ioc->msix_vector_count) {
+ grouping = ioc->cpu_count / ioc->msix_vector_count;
+ grouping_mod = ioc->cpu_count % ioc->msix_vector_count;
+ if (grouping < 2 || (grouping == 2 && !grouping_mod))
+ cpu_grouping = 2;
+ else if (grouping < 4 || (grouping == 4 && !grouping_mod))
+ cpu_grouping = 4;
+ else if (grouping < 8 || (grouping == 8 && !grouping_mod))
+ cpu_grouping = 8;
+ else
+ cpu_grouping = 16;
+ } else
+ cpu_grouping = 0;
+
+ loop = 0;
+ reply_q = list_entry(ioc->reply_queue_list.next,
+ struct adapter_reply_queue, list);
+ for_each_online_cpu(cpu_id) {
+ if (!cpu_grouping) {
+ ioc->cpu_msix_table[cpu_id] = reply_q->msix_index;
+ reply_q = list_entry(reply_q->list.next,
+ struct adapter_reply_queue, list);
+ } else {
+ if (loop < cpu_grouping) {
+ ioc->cpu_msix_table[cpu_id] =
+ reply_q->msix_index;
+ loop++;
+ } else {
+ reply_q = list_entry(reply_q->list.next,
+ struct adapter_reply_queue, list);
+ ioc->cpu_msix_table[cpu_id] =
+ reply_q->msix_index;
+ loop = 1;
+ }
+ }
+ }
}
/**
@@ -1161,8 +1318,6 @@ _base_disable_msix(struct MPT2SAS_ADAPTER *ioc)
{
if (ioc->msix_enable) {
pci_disable_msix(ioc->pdev);
- kfree(ioc->msix_table_backup);
- ioc->msix_table_backup = NULL;
ioc->msix_enable = 0;
}
}
@@ -1175,10 +1330,13 @@ _base_disable_msix(struct MPT2SAS_ADAPTER *ioc)
static int
_base_enable_msix(struct MPT2SAS_ADAPTER *ioc)
{
- struct msix_entry entries;
+ struct msix_entry *entries, *a;
int r;
+ int i;
u8 try_msix = 0;
+ INIT_LIST_HEAD(&ioc->reply_queue_list);
+
if (msix_disable == -1 || msix_disable == 0)
try_msix = 1;
@@ -1188,51 +1346,48 @@ _base_enable_msix(struct MPT2SAS_ADAPTER *ioc)
if (_base_check_enable_msix(ioc) != 0)
goto try_ioapic;
- ioc->msix_table_backup = kcalloc(ioc->msix_vector_count,
- sizeof(u32), GFP_KERNEL);
- if (!ioc->msix_table_backup) {
- dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "allocation for "
- "msix_table_backup failed!!!\n", ioc->name));
+ ioc->reply_queue_count = min_t(u8, ioc->cpu_count,
+ ioc->msix_vector_count);
+
+ entries = kcalloc(ioc->reply_queue_count, sizeof(struct msix_entry),
+ GFP_KERNEL);
+ if (!entries) {
+ dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "kcalloc "
+ "failed @ at %s:%d/%s() !!!\n", ioc->name, __FILE__,
+ __LINE__, __func__));
goto try_ioapic;
}
- memset(&entries, 0, sizeof(struct msix_entry));
- r = pci_enable_msix(ioc->pdev, &entries, 1);
+ for (i = 0, a = entries; i < ioc->reply_queue_count; i++, a++)
+ a->entry = i;
+
+ r = pci_enable_msix(ioc->pdev, entries, ioc->reply_queue_count);
if (r) {
dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "pci_enable_msix "
"failed (r=%d) !!!\n", ioc->name, r));
+ kfree(entries);
goto try_ioapic;
}
- r = request_irq(entries.vector, _base_interrupt, IRQF_SHARED,
- ioc->name, ioc);
- if (r) {
- dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "unable to allocate "
- "interrupt %d !!!\n", ioc->name, entries.vector));
- pci_disable_msix(ioc->pdev);
- goto try_ioapic;
+ ioc->msix_enable = 1;
+ for (i = 0, a = entries; i < ioc->reply_queue_count; i++, a++) {
+ r = _base_request_irq(ioc, i, a->vector);
+ if (r) {
+ _base_free_irq(ioc);
+ _base_disable_msix(ioc);
+ kfree(entries);
+ goto try_ioapic;
+ }
}
- ioc->pci_irq = entries.vector;
- ioc->msix_enable = 1;
+ kfree(entries);
return 0;
/* failback to io_apic interrupt routing */
try_ioapic:
- r = request_irq(ioc->pdev->irq, _base_interrupt, IRQF_SHARED,
- ioc->name, ioc);
- if (r) {
- printk(MPT2SAS_ERR_FMT "unable to allocate interrupt %d!\n",
- ioc->name, ioc->pdev->irq);
- r = -EBUSY;
- goto out_fail;
- }
-
- ioc->pci_irq = ioc->pdev->irq;
- return 0;
+ r = _base_request_irq(ioc, 0, ioc->pdev->irq);
- out_fail:
return r;
}
@@ -1251,6 +1406,7 @@ mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc)
int i, r = 0;
u64 pio_chip = 0;
u64 chip_phys = 0;
+ struct adapter_reply_queue *reply_q;
dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n",
ioc->name, __func__));
@@ -1313,9 +1469,11 @@ mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc)
if (r)
goto out_fail;
- printk(MPT2SAS_INFO_FMT "%s: IRQ %d\n",
- ioc->name, ((ioc->msix_enable) ? "PCI-MSI-X enabled" :
- "IO-APIC enabled"), ioc->pci_irq);
+ list_for_each_entry(reply_q, &ioc->reply_queue_list, list)
+ printk(MPT2SAS_INFO_FMT "%s: IRQ %d\n",
+ reply_q->name, ((ioc->msix_enable) ? "PCI-MSI-X enabled" :
+ "IO-APIC enabled"), reply_q->vector);
+
printk(MPT2SAS_INFO_FMT "iomem(0x%016llx), mapped(0x%p), size(%d)\n",
ioc->name, (unsigned long long)chip_phys, ioc->chip, memap_sz);
printk(MPT2SAS_INFO_FMT "ioport(0x%016llx), size(%d)\n",
@@ -1330,7 +1488,6 @@ mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc)
if (ioc->chip_phys)
iounmap(ioc->chip);
ioc->chip_phys = 0;
- ioc->pci_irq = -1;
pci_release_selected_regions(ioc->pdev, ioc->bars);
pci_disable_pcie_error_reporting(pdev);
pci_disable_device(pdev);
@@ -1577,6 +1734,12 @@ static inline void _base_writeq(__u64 b, volatile void __iomem *addr,
}
#endif
+static inline u8
+_base_get_msix_index(struct MPT2SAS_ADAPTER *ioc)
+{
+ return ioc->cpu_msix_table[smp_processor_id()];
+}
+
/**
* mpt2sas_base_put_smid_scsi_io - send SCSI_IO request to firmware
* @ioc: per adapter object
@@ -1593,7 +1756,7 @@ mpt2sas_base_put_smid_scsi_io(struct MPT2SAS_ADAPTER *ioc, u16 smid, u16 handle)
descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
- descriptor.SCSIIO.MSIxIndex = 0; /* TODO */
+ descriptor.SCSIIO.MSIxIndex = _base_get_msix_index(ioc);
descriptor.SCSIIO.SMID = cpu_to_le16(smid);
descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
descriptor.SCSIIO.LMID = 0;
@@ -1617,7 +1780,7 @@ mpt2sas_base_put_smid_hi_priority(struct MPT2SAS_ADAPTER *ioc, u16 smid)
descriptor.HighPriority.RequestFlags =
MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
- descriptor.HighPriority.MSIxIndex = 0; /* TODO */
+ descriptor.HighPriority.MSIxIndex = 0;
descriptor.HighPriority.SMID = cpu_to_le16(smid);
descriptor.HighPriority.LMID = 0;
descriptor.HighPriority.Reserved1 = 0;
@@ -1639,7 +1802,7 @@ mpt2sas_base_put_smid_default(struct MPT2SAS_ADAPTER *ioc, u16 smid)
u64 *request = (u64 *)&descriptor;
descriptor.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
- descriptor.Default.MSIxIndex = 0; /* TODO */
+ descriptor.Default.MSIxIndex = _base_get_msix_index(ioc);
descriptor.Default.SMID = cpu_to_le16(smid);
descriptor.Default.LMID = 0;
descriptor.Default.DescriptorTypeDependent = 0;
@@ -1664,7 +1827,7 @@ mpt2sas_base_put_smid_target_assist(struct MPT2SAS_ADAPTER *ioc, u16 smid,
descriptor.SCSITarget.RequestFlags =
MPI2_REQ_DESCRIPT_FLAGS_SCSI_TARGET;
- descriptor.SCSITarget.MSIxIndex = 0; /* TODO */
+ descriptor.SCSITarget.MSIxIndex = _base_get_msix_index(ioc);
descriptor.SCSITarget.SMID = cpu_to_le16(smid);
descriptor.SCSITarget.LMID = 0;
descriptor.SCSITarget.IoIndex = cpu_to_le16(io_index);
@@ -2171,7 +2334,7 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
u16 max_sge_elements;
u16 num_of_reply_frames;
u16 chains_needed_per_io;
- u32 sz, total_sz;
+ u32 sz, total_sz, reply_post_free_sz;
u32 retry_sz;
u16 max_request_credit;
int i;
@@ -2498,7 +2661,12 @@ chain_done:
total_sz += sz;
/* reply post queue, 16 byte align */
- sz = ioc->reply_post_queue_depth * sizeof(Mpi2DefaultReplyDescriptor_t);
+ reply_post_free_sz = ioc->reply_post_queue_depth *
+ sizeof(Mpi2DefaultReplyDescriptor_t);
+ if (_base_is_controller_msix_enabled(ioc))
+ sz = reply_post_free_sz * ioc->reply_queue_count;
+ else
+ sz = reply_post_free_sz;
ioc->reply_post_free_dma_pool = pci_pool_create("reply_post_free pool",
ioc->pdev, sz, 16, 0);
if (!ioc->reply_post_free_dma_pool) {
@@ -3186,6 +3354,7 @@ _base_get_ioc_facts(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
facts->MaxChainDepth = mpi_reply.MaxChainDepth;
facts->WhoInit = mpi_reply.WhoInit;
facts->NumberOfPorts = mpi_reply.NumberOfPorts;
+ facts->MaxMSIxVectors = mpi_reply.MaxMSIxVectors;
facts->RequestCredit = le16_to_cpu(mpi_reply.RequestCredit);
facts->MaxReplyDescriptorPostQueueDepth =
le16_to_cpu(mpi_reply.MaxReplyDescriptorPostQueueDepth);
@@ -3243,7 +3412,8 @@ _base_send_ioc_init(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
mpi_request.MsgVersion = cpu_to_le16(MPI2_VERSION);
mpi_request.HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION);
-
+ if (_base_is_controller_msix_enabled(ioc))
+ mpi_request.HostMSIxVectors = ioc->reply_queue_count;
mpi_request.SystemRequestFrameSize = cpu_to_le16(ioc->request_sz/4);
mpi_request.ReplyDescriptorPostQueueDepth =
cpu_to_le16(ioc->reply_post_queue_depth);
@@ -3302,6 +3472,58 @@ _base_send_ioc_init(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
}
/**
+ * mpt2sas_port_enable_done - command completion routine for port enable
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @msix_index: MSIX table index supplied by the OS
+ * @reply: reply message frame(lower 32bit addr)
+ *
+ * Return 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
+ */
+u8
+mpt2sas_port_enable_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
+ u32 reply)
+{
+ MPI2DefaultReply_t *mpi_reply;
+ u16 ioc_status;
+
+ mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
+ if (mpi_reply && mpi_reply->Function == MPI2_FUNCTION_EVENT_ACK)
+ return 1;
+
+ if (ioc->port_enable_cmds.status == MPT2_CMD_NOT_USED)
+ return 1;
+
+ ioc->port_enable_cmds.status |= MPT2_CMD_COMPLETE;
+ if (mpi_reply) {
+ ioc->port_enable_cmds.status |= MPT2_CMD_REPLY_VALID;
+ memcpy(ioc->port_enable_cmds.reply, mpi_reply,
+ mpi_reply->MsgLength*4);
+ }
+ ioc->port_enable_cmds.status &= ~MPT2_CMD_PENDING;
+
+ ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
+
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
+ ioc->port_enable_failed = 1;
+
+ if (ioc->is_driver_loading) {
+ if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
+ mpt2sas_port_enable_complete(ioc);
+ return 1;
+ } else {
+ ioc->start_scan_failed = ioc_status;
+ ioc->start_scan = 0;
+ return 1;
+ }
+ }
+ complete(&ioc->port_enable_cmds.done);
+ return 1;
+}
+
+
+/**
* _base_send_port_enable - send port_enable(discovery stuff) to firmware
* @ioc: per adapter object
* @sleep_flag: CAN_SLEEP or NO_SLEEP
@@ -3312,67 +3534,151 @@ static int
_base_send_port_enable(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
{
Mpi2PortEnableRequest_t *mpi_request;
- u32 ioc_state;
+ Mpi2PortEnableReply_t *mpi_reply;
unsigned long timeleft;
int r = 0;
u16 smid;
+ u16 ioc_status;
printk(MPT2SAS_INFO_FMT "sending port enable !!\n", ioc->name);
- if (ioc->base_cmds.status & MPT2_CMD_PENDING) {
+ if (ioc->port_enable_cmds.status & MPT2_CMD_PENDING) {
printk(MPT2SAS_ERR_FMT "%s: internal command already in use\n",
ioc->name, __func__);
return -EAGAIN;
}
- smid = mpt2sas_base_get_smid(ioc, ioc->base_cb_idx);
+ smid = mpt2sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
if (!smid) {
printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
ioc->name, __func__);
return -EAGAIN;
}
- ioc->base_cmds.status = MPT2_CMD_PENDING;
+ ioc->port_enable_cmds.status = MPT2_CMD_PENDING;
mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
- ioc->base_cmds.smid = smid;
+ ioc->port_enable_cmds.smid = smid;
memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
- mpi_request->VF_ID = 0; /* TODO */
- mpi_request->VP_ID = 0;
+ init_completion(&ioc->port_enable_cmds.done);
mpt2sas_base_put_smid_default(ioc, smid);
- init_completion(&ioc->base_cmds.done);
- timeleft = wait_for_completion_timeout(&ioc->base_cmds.done,
+ timeleft = wait_for_completion_timeout(&ioc->port_enable_cmds.done,
300*HZ);
- if (!(ioc->base_cmds.status & MPT2_CMD_COMPLETE)) {
+ if (!(ioc->port_enable_cmds.status & MPT2_CMD_COMPLETE)) {
printk(MPT2SAS_ERR_FMT "%s: timeout\n",
ioc->name, __func__);
_debug_dump_mf(mpi_request,
sizeof(Mpi2PortEnableRequest_t)/4);
- if (ioc->base_cmds.status & MPT2_CMD_RESET)
+ if (ioc->port_enable_cmds.status & MPT2_CMD_RESET)
r = -EFAULT;
else
r = -ETIME;
goto out;
- } else
- dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: complete\n",
- ioc->name, __func__));
+ }
+ mpi_reply = ioc->port_enable_cmds.reply;
- ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_OPERATIONAL,
- 60, sleep_flag);
- if (ioc_state) {
- printk(MPT2SAS_ERR_FMT "%s: failed going to operational state "
- " (ioc_state=0x%x)\n", ioc->name, __func__, ioc_state);
+ ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ printk(MPT2SAS_ERR_FMT "%s: failed with (ioc_status=0x%08x)\n",
+ ioc->name, __func__, ioc_status);
r = -EFAULT;
+ goto out;
}
out:
- ioc->base_cmds.status = MPT2_CMD_NOT_USED;
- printk(MPT2SAS_INFO_FMT "port enable: %s\n",
- ioc->name, ((r == 0) ? "SUCCESS" : "FAILED"));
+ ioc->port_enable_cmds.status = MPT2_CMD_NOT_USED;
+ printk(MPT2SAS_INFO_FMT "port enable: %s\n", ioc->name, ((r == 0) ?
+ "SUCCESS" : "FAILED"));
return r;
}
/**
+ * mpt2sas_port_enable - initiate firmware discovery (don't wait for reply)
+ * @ioc: per adapter object
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt2sas_port_enable(struct MPT2SAS_ADAPTER *ioc)
+{
+ Mpi2PortEnableRequest_t *mpi_request;
+ u16 smid;
+
+ printk(MPT2SAS_INFO_FMT "sending port enable !!\n", ioc->name);
+
+ if (ioc->port_enable_cmds.status & MPT2_CMD_PENDING) {
+ printk(MPT2SAS_ERR_FMT "%s: internal command already in use\n",
+ ioc->name, __func__);
+ return -EAGAIN;
+ }
+
+ smid = mpt2sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
+ if (!smid) {
+ printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ return -EAGAIN;
+ }
+
+ ioc->port_enable_cmds.status = MPT2_CMD_PENDING;
+ mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
+ ioc->port_enable_cmds.smid = smid;
+ memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
+ mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
+
+ mpt2sas_base_put_smid_default(ioc, smid);
+ return 0;
+}
+
+/**
+ * _base_determine_wait_on_discovery - desposition
+ * @ioc: per adapter object
+ *
+ * Decide whether to wait on discovery to complete. Used to either
+ * locate boot device, or report volumes ahead of physical devices.
+ *
+ * Returns 1 for wait, 0 for don't wait
+ */
+static int
+_base_determine_wait_on_discovery(struct MPT2SAS_ADAPTER *ioc)
+{
+ /* We wait for discovery to complete if IR firmware is loaded.
+ * The sas topology events arrive before PD events, so we need time to
+ * turn on the bit in ioc->pd_handles to indicate PD
+ * Also, it maybe required to report Volumes ahead of physical
+ * devices when MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING is set.
+ */
+ if (ioc->ir_firmware)
+ return 1;
+
+ /* if no Bios, then we don't need to wait */
+ if (!ioc->bios_pg3.BiosVersion)
+ return 0;
+
+ /* Bios is present, then we drop down here.
+ *
+ * If there any entries in the Bios Page 2, then we wait
+ * for discovery to complete.
+ */
+
+ /* Current Boot Device */
+ if ((ioc->bios_pg2.CurrentBootDeviceForm &
+ MPI2_BIOSPAGE2_FORM_MASK) ==
+ MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED &&
+ /* Request Boot Device */
+ (ioc->bios_pg2.ReqBootDeviceForm &
+ MPI2_BIOSPAGE2_FORM_MASK) ==
+ MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED &&
+ /* Alternate Request Boot Device */
+ (ioc->bios_pg2.ReqAltBootDeviceForm &
+ MPI2_BIOSPAGE2_FORM_MASK) ==
+ MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED)
+ return 0;
+
+ return 1;
+}
+
+
+/**
* _base_unmask_events - turn on notification for this event
* @ioc: per adapter object
* @event: firmware event
@@ -3512,9 +3818,6 @@ _base_diag_reset(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
u32 hcb_size;
printk(MPT2SAS_INFO_FMT "sending diag reset !!\n", ioc->name);
-
- _base_save_msix_table(ioc);
-
drsprintk(ioc, printk(MPT2SAS_INFO_FMT "clear interrupts\n",
ioc->name));
@@ -3610,7 +3913,6 @@ _base_diag_reset(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
goto out;
}
- _base_restore_msix_table(ioc);
printk(MPT2SAS_INFO_FMT "diag reset: SUCCESS\n", ioc->name);
return 0;
@@ -3691,6 +3993,9 @@ _base_make_ioc_operational(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
u16 smid;
struct _tr_list *delayed_tr, *delayed_tr_next;
u8 hide_flag;
+ struct adapter_reply_queue *reply_q;
+ long reply_post_free;
+ u32 reply_post_free_sz;
dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
__func__));
@@ -3756,21 +4061,46 @@ _base_make_ioc_operational(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
ioc->reply_sz)
ioc->reply_free[i] = cpu_to_le32(reply_address);
+ /* initialize reply queues */
+ _base_assign_reply_queues(ioc);
+
/* initialize Reply Post Free Queue */
- for (i = 0; i < ioc->reply_post_queue_depth; i++)
- ioc->reply_post_free[i].Words = cpu_to_le64(ULLONG_MAX);
+ reply_post_free = (long)ioc->reply_post_free;
+ reply_post_free_sz = ioc->reply_post_queue_depth *
+ sizeof(Mpi2DefaultReplyDescriptor_t);
+ list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
+ reply_q->reply_post_host_index = 0;
+ reply_q->reply_post_free = (Mpi2ReplyDescriptorsUnion_t *)
+ reply_post_free;
+ for (i = 0; i < ioc->reply_post_queue_depth; i++)
+ reply_q->reply_post_free[i].Words =
+ cpu_to_le64(ULLONG_MAX);
+ if (!_base_is_controller_msix_enabled(ioc))
+ goto skip_init_reply_post_free_queue;
+ reply_post_free += reply_post_free_sz;
+ }
+ skip_init_reply_post_free_queue:
r = _base_send_ioc_init(ioc, sleep_flag);
if (r)
return r;
- /* initialize the index's */
+ /* initialize reply free host index */
ioc->reply_free_host_index = ioc->reply_free_queue_depth - 1;
- ioc->reply_post_host_index = 0;
writel(ioc->reply_free_host_index, &ioc->chip->ReplyFreeHostIndex);
- writel(0, &ioc->chip->ReplyPostHostIndex);
+
+ /* initialize reply post host index */
+ list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
+ writel(reply_q->msix_index << MPI2_RPHI_MSIX_INDEX_SHIFT,
+ &ioc->chip->ReplyPostHostIndex);
+ if (!_base_is_controller_msix_enabled(ioc))
+ goto skip_init_reply_post_host_index;
+ }
+
+ skip_init_reply_post_host_index:
_base_unmask_interrupts(ioc);
+
r = _base_event_notification(ioc, sleep_flag);
if (r)
return r;
@@ -3778,7 +4108,18 @@ _base_make_ioc_operational(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
if (sleep_flag == CAN_SLEEP)
_base_static_config_pages(ioc);
- if (ioc->wait_for_port_enable_to_complete && ioc->is_warpdrive) {
+
+ if (ioc->is_driver_loading) {
+
+
+
+ ioc->wait_for_discovery_to_complete =
+ _base_determine_wait_on_discovery(ioc);
+ return r; /* scan_start and scan_finished support */
+ }
+
+
+ if (ioc->wait_for_discovery_to_complete && ioc->is_warpdrive) {
if (ioc->manu_pg10.OEMIdentifier == 0x80) {
hide_flag = (u8) (ioc->manu_pg10.OEMSpecificFlags0 &
MFG_PAGE10_HIDE_SSDS_MASK);
@@ -3787,13 +4128,6 @@ _base_make_ioc_operational(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
}
}
- if (ioc->wait_for_port_enable_to_complete) {
- if (diag_buffer_enable != 0)
- mpt2sas_enable_diag_buffer(ioc, diag_buffer_enable);
- if (disable_discovery > 0)
- return r;
- }
-
r = _base_send_port_enable(ioc, sleep_flag);
if (r)
return r;
@@ -3819,14 +4153,10 @@ mpt2sas_base_free_resources(struct MPT2SAS_ADAPTER *ioc)
ioc->shost_recovery = 1;
_base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET);
ioc->shost_recovery = 0;
- if (ioc->pci_irq) {
- synchronize_irq(pdev->irq);
- free_irq(ioc->pci_irq, ioc);
- }
+ _base_free_irq(ioc);
_base_disable_msix(ioc);
if (ioc->chip_phys)
iounmap(ioc->chip);
- ioc->pci_irq = -1;
ioc->chip_phys = 0;
pci_release_selected_regions(ioc->pdev, ioc->bars);
pci_disable_pcie_error_reporting(pdev);
@@ -3844,14 +4174,50 @@ int
mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
{
int r, i;
+ int cpu_id, last_cpu_id = 0;
dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
__func__));
+ /* setup cpu_msix_table */
+ ioc->cpu_count = num_online_cpus();
+ for_each_online_cpu(cpu_id)
+ last_cpu_id = cpu_id;
+ ioc->cpu_msix_table_sz = last_cpu_id + 1;
+ ioc->cpu_msix_table = kzalloc(ioc->cpu_msix_table_sz, GFP_KERNEL);
+ ioc->reply_queue_count = 1;
+ if (!ioc->cpu_msix_table) {
+ dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "allocation for "
+ "cpu_msix_table failed!!!\n", ioc->name));
+ r = -ENOMEM;
+ goto out_free_resources;
+ }
+
+ if (ioc->is_warpdrive) {
+ ioc->reply_post_host_index = kcalloc(ioc->cpu_msix_table_sz,
+ sizeof(resource_size_t *), GFP_KERNEL);
+ if (!ioc->reply_post_host_index) {
+ dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "allocation "
+ "for cpu_msix_table failed!!!\n", ioc->name));
+ r = -ENOMEM;
+ goto out_free_resources;
+ }
+ }
+
r = mpt2sas_base_map_resources(ioc);
if (r)
return r;
+ if (ioc->is_warpdrive) {
+ ioc->reply_post_host_index[0] =
+ (resource_size_t *)&ioc->chip->ReplyPostHostIndex;
+
+ for (i = 1; i < ioc->cpu_msix_table_sz; i++)
+ ioc->reply_post_host_index[i] = (resource_size_t *)
+ ((u8 *)&ioc->chip->Doorbell + (0x4000 + ((i - 1)
+ * 4)));
+ }
+
pci_set_drvdata(ioc->pdev, ioc->shost);
r = _base_get_ioc_facts(ioc, CAN_SLEEP);
if (r)
@@ -3898,6 +4264,10 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
ioc->base_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
ioc->base_cmds.status = MPT2_CMD_NOT_USED;
+ /* port_enable command bits */
+ ioc->port_enable_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
+ ioc->port_enable_cmds.status = MPT2_CMD_NOT_USED;
+
/* transport internal command bits */
ioc->transport_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
ioc->transport_cmds.status = MPT2_CMD_NOT_USED;
@@ -3939,8 +4309,6 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
goto out_free_resources;
}
- init_completion(&ioc->shost_recovery_done);
-
for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
ioc->event_masks[i] = -1;
@@ -3963,7 +4331,6 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
_base_update_missing_delay(ioc, missing_delay[0],
missing_delay[1]);
- mpt2sas_base_start_watchdog(ioc);
return 0;
out_free_resources:
@@ -3972,12 +4339,16 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
mpt2sas_base_free_resources(ioc);
_base_release_memory_pools(ioc);
pci_set_drvdata(ioc->pdev, NULL);
+ kfree(ioc->cpu_msix_table);
+ if (ioc->is_warpdrive)
+ kfree(ioc->reply_post_host_index);
kfree(ioc->pd_handles);
kfree(ioc->tm_cmds.reply);
kfree(ioc->transport_cmds.reply);
kfree(ioc->scsih_cmds.reply);
kfree(ioc->config_cmds.reply);
kfree(ioc->base_cmds.reply);
+ kfree(ioc->port_enable_cmds.reply);
kfree(ioc->ctl_cmds.reply);
kfree(ioc->ctl_cmds.sense);
kfree(ioc->pfacts);
@@ -4009,11 +4380,15 @@ mpt2sas_base_detach(struct MPT2SAS_ADAPTER *ioc)
mpt2sas_base_free_resources(ioc);
_base_release_memory_pools(ioc);
pci_set_drvdata(ioc->pdev, NULL);
+ kfree(ioc->cpu_msix_table);
+ if (ioc->is_warpdrive)
+ kfree(ioc->reply_post_host_index);
kfree(ioc->pd_handles);
kfree(ioc->pfacts);
kfree(ioc->ctl_cmds.reply);
kfree(ioc->ctl_cmds.sense);
kfree(ioc->base_cmds.reply);
+ kfree(ioc->port_enable_cmds.reply);
kfree(ioc->tm_cmds.reply);
kfree(ioc->transport_cmds.reply);
kfree(ioc->scsih_cmds.reply);
@@ -4055,6 +4430,20 @@ _base_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase)
mpt2sas_base_free_smid(ioc, ioc->base_cmds.smid);
complete(&ioc->base_cmds.done);
}
+ if (ioc->port_enable_cmds.status & MPT2_CMD_PENDING) {
+ ioc->port_enable_failed = 1;
+ ioc->port_enable_cmds.status |= MPT2_CMD_RESET;
+ mpt2sas_base_free_smid(ioc, ioc->port_enable_cmds.smid);
+ if (ioc->is_driver_loading) {
+ ioc->start_scan_failed =
+ MPI2_IOCSTATUS_INTERNAL_ERROR;
+ ioc->start_scan = 0;
+ ioc->port_enable_cmds.status =
+ MPT2_CMD_NOT_USED;
+ } else
+ complete(&ioc->port_enable_cmds.done);
+
+ }
if (ioc->config_cmds.status & MPT2_CMD_PENDING) {
ioc->config_cmds.status |= MPT2_CMD_RESET;
mpt2sas_base_free_smid(ioc, ioc->config_cmds.smid);
@@ -4120,7 +4509,6 @@ mpt2sas_base_hard_reset_handler(struct MPT2SAS_ADAPTER *ioc, int sleep_flag,
{
int r;
unsigned long flags;
- u8 pe_complete = ioc->wait_for_port_enable_to_complete;
dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name,
__func__));
@@ -4167,7 +4555,8 @@ mpt2sas_base_hard_reset_handler(struct MPT2SAS_ADAPTER *ioc, int sleep_flag,
/* If this hard reset is called while port enable is active, then
* there is no reason to call make_ioc_operational
*/
- if (pe_complete) {
+ if (ioc->is_driver_loading && ioc->port_enable_failed) {
+ ioc->remove_host = 1;
r = -EFAULT;
goto out;
}
@@ -4181,7 +4570,6 @@ mpt2sas_base_hard_reset_handler(struct MPT2SAS_ADAPTER *ioc, int sleep_flag,
spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
ioc->ioc_reset_in_progress_status = r;
ioc->shost_recovery = 0;
- complete(&ioc->shost_recovery_done);
spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
mutex_unlock(&ioc->reset_in_progress_mutex);
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h
index 8d5be2120c6..3c3babc7d26 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
@@ -69,8 +69,8 @@
#define MPT2SAS_DRIVER_NAME "mpt2sas"
#define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>"
#define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver"
-#define MPT2SAS_DRIVER_VERSION "09.100.00.00"
-#define MPT2SAS_MAJOR_VERSION 09
+#define MPT2SAS_DRIVER_VERSION "10.100.00.00"
+#define MPT2SAS_MAJOR_VERSION 10
#define MPT2SAS_MINOR_VERSION 100
#define MPT2SAS_BUILD_VERSION 00
#define MPT2SAS_RELEASE_VERSION 00
@@ -544,6 +544,28 @@ struct _tr_list {
typedef void (*MPT_ADD_SGE)(void *paddr, u32 flags_length, dma_addr_t dma_addr);
+/**
+ * struct adapter_reply_queue - the reply queue struct
+ * @ioc: per adapter object
+ * @msix_index: msix index into vector table
+ * @vector: irq vector
+ * @reply_post_host_index: head index in the pool where FW completes IO
+ * @reply_post_free: reply post base virt address
+ * @name: the name registered to request_irq()
+ * @busy: isr is actively processing replies on another cpu
+ * @list: this list
+*/
+struct adapter_reply_queue {
+ struct MPT2SAS_ADAPTER *ioc;
+ u8 msix_index;
+ unsigned int vector;
+ u32 reply_post_host_index;
+ Mpi2ReplyDescriptorsUnion_t *reply_post_free;
+ char name[MPT_NAME_LENGTH];
+ atomic_t busy;
+ struct list_head list;
+};
+
/* IOC Facts and Port Facts converted from little endian to cpu */
union mpi2_version_union {
MPI2_VERSION_STRUCT Struct;
@@ -606,7 +628,7 @@ enum mutex_type {
* @list: ioc_list
* @shost: shost object
* @id: unique adapter id
- * @pci_irq: irq number
+ * @cpu_count: number online cpus
* @name: generic ioc string
* @tmp_string: tmp string used for logging
* @pdev: pci pdev object
@@ -633,11 +655,16 @@ enum mutex_type {
* @ignore_loginfos: ignore loginfos during task management
* @remove_host: flag for when driver unloads, to avoid sending dev resets
* @pci_error_recovery: flag to prevent ioc access until slot reset completes
- * @wait_for_port_enable_to_complete:
+ * @wait_for_discovery_to_complete: flag set at driver load time when
+ * waiting on reporting devices
+ * @is_driver_loading: flag set at driver load time
+ * @port_enable_failed: flag set when port enable has failed
+ * @start_scan: flag set from scan_start callback, cleared from _mpt2sas_fw_work
+ * @start_scan_failed: means port enable failed, return's the ioc_status
* @msix_enable: flag indicating msix is enabled
* @msix_vector_count: number msix vectors
- * @msix_table: virt address to the msix table
- * @msix_table_backup: backup msix table
+ * @cpu_msix_table: table for mapping cpus to msix index
+ * @cpu_msix_table_sz: table size
* @scsi_io_cb_idx: shost generated commands
* @tm_cb_idx: task management commands
* @scsih_cb_idx: scsih internal commands
@@ -728,7 +755,8 @@ enum mutex_type {
* @reply_post_queue_depth: reply post queue depth
* @reply_post_free: pool for reply post (64bit descriptor)
* @reply_post_free_dma:
- * @reply_post_free_dma_pool:
+ * @reply_queue_count: number of reply queue's
+ * @reply_queue_list: link list contaning the reply queue info
* @reply_post_host_index: head index in the pool where FW completes IO
* @delayed_tr_list: target reset link list
* @delayed_tr_volume_list: volume target reset link list
@@ -737,7 +765,7 @@ struct MPT2SAS_ADAPTER {
struct list_head list;
struct Scsi_Host *shost;
u8 id;
- u32 pci_irq;
+ int cpu_count;
char name[MPT_NAME_LENGTH];
char tmp_string[MPT_STRING_LENGTH];
struct pci_dev *pdev;
@@ -767,20 +795,26 @@ struct MPT2SAS_ADAPTER {
u8 shost_recovery;
struct mutex reset_in_progress_mutex;
- struct completion shost_recovery_done;
spinlock_t ioc_reset_in_progress_lock;
u8 ioc_link_reset_in_progress;
- int ioc_reset_in_progress_status;
+ u8 ioc_reset_in_progress_status;
u8 ignore_loginfos;
u8 remove_host;
u8 pci_error_recovery;
- u8 wait_for_port_enable_to_complete;
+ u8 wait_for_discovery_to_complete;
+ struct completion port_enable_done;
+ u8 is_driver_loading;
+ u8 port_enable_failed;
+
+ u8 start_scan;
+ u16 start_scan_failed;
u8 msix_enable;
u16 msix_vector_count;
- u32 *msix_table;
- u32 *msix_table_backup;
+ u8 *cpu_msix_table;
+ resource_size_t **reply_post_host_index;
+ u16 cpu_msix_table_sz;
u32 ioc_reset_count;
/* internal commands, callback index */
@@ -790,11 +824,13 @@ struct MPT2SAS_ADAPTER {
u8 scsih_cb_idx;
u8 ctl_cb_idx;
u8 base_cb_idx;
+ u8 port_enable_cb_idx;
u8 config_cb_idx;
u8 tm_tr_cb_idx;
u8 tm_tr_volume_cb_idx;
u8 tm_sas_control_cb_idx;
struct _internal_cmd base_cmds;
+ struct _internal_cmd port_enable_cmds;
struct _internal_cmd transport_cmds;
struct _internal_cmd scsih_cmds;
struct _internal_cmd tm_cmds;
@@ -911,7 +947,8 @@ struct MPT2SAS_ADAPTER {
Mpi2ReplyDescriptorsUnion_t *reply_post_free;
dma_addr_t reply_post_free_dma;
struct dma_pool *reply_post_free_dma_pool;
- u32 reply_post_host_index;
+ u8 reply_queue_count;
+ struct list_head reply_queue_list;
struct list_head delayed_tr_list;
struct list_head delayed_tr_volume_list;
@@ -955,6 +992,7 @@ void *mpt2sas_base_get_sense_buffer(struct MPT2SAS_ADAPTER *ioc, u16 smid);
void mpt2sas_base_build_zero_len_sge(struct MPT2SAS_ADAPTER *ioc, void *paddr);
__le32 mpt2sas_base_get_sense_buffer_dma(struct MPT2SAS_ADAPTER *ioc,
u16 smid);
+void mpt2sas_base_flush_reply_queues(struct MPT2SAS_ADAPTER *ioc);
/* hi-priority queue */
u16 mpt2sas_base_get_smid_hpr(struct MPT2SAS_ADAPTER *ioc, u8 cb_idx);
@@ -975,6 +1013,8 @@ void mpt2sas_base_release_callback_handler(u8 cb_idx);
u8 mpt2sas_base_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
u32 reply);
+u8 mpt2sas_port_enable_done(struct MPT2SAS_ADAPTER *ioc, u16 smid,
+ u8 msix_index, u32 reply);
void *mpt2sas_base_get_reply_virt_addr(struct MPT2SAS_ADAPTER *ioc, u32 phys_addr);
u32 mpt2sas_base_get_iocstate(struct MPT2SAS_ADAPTER *ioc, int cooked);
@@ -989,6 +1029,8 @@ void mpt2sas_base_validate_event_type(struct MPT2SAS_ADAPTER *ioc, u32 *event_ty
void mpt2sas_halt_firmware(struct MPT2SAS_ADAPTER *ioc);
+int mpt2sas_port_enable(struct MPT2SAS_ADAPTER *ioc);
+
/* scsih shared API */
u8 mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
u32 reply);
@@ -1006,6 +1048,8 @@ struct _sas_node *mpt2sas_scsih_expander_find_by_sas_address(struct MPT2SAS_ADAP
struct _sas_device *mpt2sas_scsih_sas_device_find_by_sas_address(
struct MPT2SAS_ADAPTER *ioc, u64 sas_address);
+void mpt2sas_port_enable_complete(struct MPT2SAS_ADAPTER *ioc);
+
void mpt2sas_scsih_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase);
/* config shared API */
diff --git a/drivers/scsi/mpt2sas/mpt2sas_config.c b/drivers/scsi/mpt2sas/mpt2sas_config.c
index 2b1101076cf..36ea0b2d802 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_config.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_config.c
@@ -1356,6 +1356,9 @@ mpt2sas_config_get_volume_handle(struct MPT2SAS_ADAPTER *ioc, u16 pd_handle,
Mpi2ConfigReply_t mpi_reply;
int r, i, config_page_sz;
u16 ioc_status;
+ int config_num;
+ u16 element_type;
+ u16 phys_disk_dev_handle;
*volume_handle = 0;
memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
@@ -1371,35 +1374,53 @@ mpt2sas_config_get_volume_handle(struct MPT2SAS_ADAPTER *ioc, u16 pd_handle,
if (r)
goto out;
- mpi_request.PageAddress =
- cpu_to_le32(MPI2_RAID_PGAD_FORM_ACTIVE_CONFIG);
mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
config_page_sz = (le16_to_cpu(mpi_reply.ExtPageLength) * 4);
config_page = kmalloc(config_page_sz, GFP_KERNEL);
- if (!config_page)
- goto out;
- r = _config_request(ioc, &mpi_request, &mpi_reply,
- MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
- config_page_sz);
- if (r)
+ if (!config_page) {
+ r = -1;
goto out;
-
- r = -1;
- ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
- if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
- goto out;
- for (i = 0; i < config_page->NumElements; i++) {
- if ((le16_to_cpu(config_page->ConfigElement[i].ElementFlags) &
- MPI2_RAIDCONFIG0_EFLAGS_MASK_ELEMENT_TYPE) !=
- MPI2_RAIDCONFIG0_EFLAGS_VOL_PHYS_DISK_ELEMENT)
- continue;
- if (le16_to_cpu(config_page->ConfigElement[i].
- PhysDiskDevHandle) == pd_handle) {
- *volume_handle = le16_to_cpu(config_page->
- ConfigElement[i].VolDevHandle);
- r = 0;
+ }
+ config_num = 0xff;
+ while (1) {
+ mpi_request.PageAddress = cpu_to_le32(config_num +
+ MPI2_RAID_PGAD_FORM_GET_NEXT_CONFIGNUM);
+ r = _config_request(ioc, &mpi_request, &mpi_reply,
+ MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ config_page_sz);
+ if (r)
+ goto out;
+ r = -1;
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
goto out;
+ for (i = 0; i < config_page->NumElements; i++) {
+ element_type = le16_to_cpu(config_page->
+ ConfigElement[i].ElementFlags) &
+ MPI2_RAIDCONFIG0_EFLAGS_MASK_ELEMENT_TYPE;
+ if (element_type ==
+ MPI2_RAIDCONFIG0_EFLAGS_VOL_PHYS_DISK_ELEMENT ||
+ element_type ==
+ MPI2_RAIDCONFIG0_EFLAGS_OCE_ELEMENT) {
+ phys_disk_dev_handle =
+ le16_to_cpu(config_page->ConfigElement[i].
+ PhysDiskDevHandle);
+ if (phys_disk_dev_handle == pd_handle) {
+ *volume_handle =
+ le16_to_cpu(config_page->
+ ConfigElement[i].VolDevHandle);
+ r = 0;
+ goto out;
+ }
+ } else if (element_type ==
+ MPI2_RAIDCONFIG0_EFLAGS_HOT_SPARE_ELEMENT) {
+ *volume_handle = 0;
+ r = 0;
+ goto out;
+ }
}
+ config_num = config_page->ConfigNum;
}
out:
kfree(config_page);
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
index 246d5fbc6e5..aabcb911706 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_ctl.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
@@ -1207,6 +1207,9 @@ _ctl_do_reset(void __user *arg)
if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc)
return -ENODEV;
+ if (ioc->shost_recovery || ioc->pci_error_recovery ||
+ ioc->is_driver_loading)
+ return -EAGAIN;
dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name,
__func__));
@@ -2178,7 +2181,8 @@ _ctl_ioctl_main(struct file *file, unsigned int cmd, void __user *arg)
!ioc)
return -ENODEV;
- if (ioc->shost_recovery || ioc->pci_error_recovery)
+ if (ioc->shost_recovery || ioc->pci_error_recovery ||
+ ioc->is_driver_loading)
return -EAGAIN;
if (_IOC_SIZE(cmd) == sizeof(struct mpt2_ioctl_command)) {
@@ -2297,7 +2301,8 @@ _ctl_compat_mpt_command(struct file *file, unsigned cmd, unsigned long arg)
if (_ctl_verify_adapter(karg32.hdr.ioc_number, &ioc) == -1 || !ioc)
return -ENODEV;
- if (ioc->shost_recovery || ioc->pci_error_recovery)
+ if (ioc->shost_recovery || ioc->pci_error_recovery ||
+ ioc->is_driver_loading)
return -EAGAIN;
memset(&karg, 0, sizeof(struct mpt2_ioctl_command));
@@ -2704,6 +2709,33 @@ _ctl_ioc_reset_count_show(struct device *cdev, struct device_attribute *attr,
static DEVICE_ATTR(ioc_reset_count, S_IRUGO,
_ctl_ioc_reset_count_show, NULL);
+/**
+ * _ctl_ioc_reply_queue_count_show - number of reply queues
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * This is number of reply queues
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_ioc_reply_queue_count_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ u8 reply_queue_count;
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
+
+ if ((ioc->facts.IOCCapabilities &
+ MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable)
+ reply_queue_count = ioc->reply_queue_count;
+ else
+ reply_queue_count = 1;
+ return snprintf(buf, PAGE_SIZE, "%d\n", reply_queue_count);
+}
+static DEVICE_ATTR(reply_queue_count, S_IRUGO,
+ _ctl_ioc_reply_queue_count_show, NULL);
+
struct DIAG_BUFFER_START {
__le32 Size;
__le32 DiagVersion;
@@ -2914,6 +2946,7 @@ struct device_attribute *mpt2sas_host_attrs[] = {
&dev_attr_host_trace_buffer_size,
&dev_attr_host_trace_buffer,
&dev_attr_host_trace_buffer_enable,
+ &dev_attr_reply_queue_count,
NULL,
};
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index 5202de3f3d3..8889b1babca 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -71,6 +71,9 @@ static void _firmware_event_work(struct work_struct *work);
static u8 _scsih_check_for_pending_tm(struct MPT2SAS_ADAPTER *ioc, u16 smid);
+static void _scsih_scan_start(struct Scsi_Host *shost);
+static int _scsih_scan_finished(struct Scsi_Host *shost, unsigned long time);
+
/* global parameters */
LIST_HEAD(mpt2sas_ioc_list);
@@ -79,6 +82,7 @@ static u8 scsi_io_cb_idx = -1;
static u8 tm_cb_idx = -1;
static u8 ctl_cb_idx = -1;
static u8 base_cb_idx = -1;
+static u8 port_enable_cb_idx = -1;
static u8 transport_cb_idx = -1;
static u8 scsih_cb_idx = -1;
static u8 config_cb_idx = -1;
@@ -103,6 +107,18 @@ static int max_lun = MPT2SAS_MAX_LUN;
module_param(max_lun, int, 0);
MODULE_PARM_DESC(max_lun, " max lun, default=16895 ");
+/* diag_buffer_enable is bitwise
+ * bit 0 set = TRACE
+ * bit 1 set = SNAPSHOT
+ * bit 2 set = EXTENDED
+ *
+ * Either bit can be set, or both
+ */
+static int diag_buffer_enable = -1;
+module_param(diag_buffer_enable, int, 0);
+MODULE_PARM_DESC(diag_buffer_enable, " post diag buffers "
+ "(TRACE=1/SNAPSHOT=2/EXTENDED=4/default=0)");
+
/**
* struct sense_info - common structure for obtaining sense keys
* @skey: sense key
@@ -117,8 +133,8 @@ struct sense_info {
#define MPT2SAS_TURN_ON_FAULT_LED (0xFFFC)
-#define MPT2SAS_RESCAN_AFTER_HOST_RESET (0xFFFF)
-
+#define MPT2SAS_PORT_ENABLE_COMPLETE (0xFFFD)
+#define MPT2SAS_REMOVE_UNRESPONDING_DEVICES (0xFFFF)
/**
* struct fw_event_work - firmware event struct
* @list: link list framework
@@ -372,31 +388,34 @@ _scsih_get_sas_address(struct MPT2SAS_ADAPTER *ioc, u16 handle,
Mpi2SasDevicePage0_t sas_device_pg0;
Mpi2ConfigReply_t mpi_reply;
u32 ioc_status;
+ *sas_address = 0;
if (handle <= ioc->sas_hba.num_phys) {
*sas_address = ioc->sas_hba.sas_address;
return 0;
- } else
- *sas_address = 0;
+ }
if ((mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
- printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name,
+ __FILE__, __LINE__, __func__);
return -ENXIO;
}
- ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
- MPI2_IOCSTATUS_MASK;
- if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- printk(MPT2SAS_ERR_FMT "handle(0x%04x), ioc_status(0x%04x)"
- "\nfailure at %s:%d/%s()!\n", ioc->name, handle, ioc_status,
- __FILE__, __LINE__, __func__);
- return -EIO;
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
+ if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
+ *sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
+ return 0;
}
- *sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
- return 0;
+ /* we hit this becuase the given parent handle doesn't exist */
+ if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
+ return -ENXIO;
+ /* else error case */
+ printk(MPT2SAS_ERR_FMT "handle(0x%04x), ioc_status(0x%04x), "
+ "failure at %s:%d/%s()!\n", ioc->name, handle, ioc_status,
+ __FILE__, __LINE__, __func__);
+ return -EIO;
}
/**
@@ -424,7 +443,11 @@ _scsih_determine_boot_device(struct MPT2SAS_ADAPTER *ioc,
u16 slot;
/* only process this function when driver loads */
- if (!ioc->wait_for_port_enable_to_complete)
+ if (!ioc->is_driver_loading)
+ return;
+
+ /* no Bios, return immediately */
+ if (!ioc->bios_pg3.BiosVersion)
return;
if (!is_raid) {
@@ -587,8 +610,15 @@ _scsih_sas_device_add(struct MPT2SAS_ADAPTER *ioc,
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
if (!mpt2sas_transport_port_add(ioc, sas_device->handle,
- sas_device->sas_address_parent))
+ sas_device->sas_address_parent)) {
_scsih_sas_device_remove(ioc, sas_device);
+ } else if (!sas_device->starget) {
+ if (!ioc->is_driver_loading)
+ mpt2sas_transport_port_remove(ioc,
+ sas_device->sas_address,
+ sas_device->sas_address_parent);
+ _scsih_sas_device_remove(ioc, sas_device);
+ }
}
/**
@@ -1400,6 +1430,10 @@ _scsih_slave_destroy(struct scsi_device *sdev)
{
struct MPT2SAS_TARGET *sas_target_priv_data;
struct scsi_target *starget;
+ struct Scsi_Host *shost;
+ struct MPT2SAS_ADAPTER *ioc;
+ struct _sas_device *sas_device;
+ unsigned long flags;
if (!sdev->hostdata)
return;
@@ -1407,6 +1441,19 @@ _scsih_slave_destroy(struct scsi_device *sdev)
starget = scsi_target(sdev);
sas_target_priv_data = starget->hostdata;
sas_target_priv_data->num_luns--;
+
+ shost = dev_to_shost(&starget->dev);
+ ioc = shost_priv(shost);
+
+ if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
+ sas_target_priv_data->sas_address);
+ if (sas_device)
+ sas_device->starget = NULL;
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ }
+
kfree(sdev->hostdata);
sdev->hostdata = NULL;
}
@@ -1598,8 +1645,10 @@ _scsih_set_level(struct scsi_device *sdev, struct _raid_device *raid_device)
* _scsih_get_volume_capabilities - volume capabilities
* @ioc: per adapter object
* @sas_device: the raid_device object
+ *
+ * Returns 0 for success, else 1
*/
-static void
+static int
_scsih_get_volume_capabilities(struct MPT2SAS_ADAPTER *ioc,
struct _raid_device *raid_device)
{
@@ -1612,9 +1661,10 @@ _scsih_get_volume_capabilities(struct MPT2SAS_ADAPTER *ioc,
if ((mpt2sas_config_get_number_pds(ioc, raid_device->handle,
&num_pds)) || !num_pds) {
- printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- return;
+ dfailprintk(ioc, printk(MPT2SAS_WARN_FMT
+ "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__,
+ __func__));
+ return 1;
}
raid_device->num_pds = num_pds;
@@ -1622,17 +1672,19 @@ _scsih_get_volume_capabilities(struct MPT2SAS_ADAPTER *ioc,
sizeof(Mpi2RaidVol0PhysDisk_t));
vol_pg0 = kzalloc(sz, GFP_KERNEL);
if (!vol_pg0) {
- printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- return;
+ dfailprintk(ioc, printk(MPT2SAS_WARN_FMT
+ "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__,
+ __func__));
+ return 1;
}
if ((mpt2sas_config_get_raid_volume_pg0(ioc, &mpi_reply, vol_pg0,
MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle, sz))) {
- printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ dfailprintk(ioc, printk(MPT2SAS_WARN_FMT
+ "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__,
+ __func__));
kfree(vol_pg0);
- return;
+ return 1;
}
raid_device->volume_type = vol_pg0->VolumeType;
@@ -1652,6 +1704,7 @@ _scsih_get_volume_capabilities(struct MPT2SAS_ADAPTER *ioc,
}
kfree(vol_pg0);
+ return 0;
}
/**
* _scsih_disable_ddio - Disable direct I/O for all the volumes
@@ -1922,13 +1975,20 @@ _scsih_slave_configure(struct scsi_device *sdev)
sas_target_priv_data->handle);
spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
if (!raid_device) {
- printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- return 0;
+ dfailprintk(ioc, printk(MPT2SAS_WARN_FMT
+ "failure at %s:%d/%s()!\n", ioc->name, __FILE__,
+ __LINE__, __func__));
+ return 1;
}
_scsih_get_volume_capabilities(ioc, raid_device);
+ if (_scsih_get_volume_capabilities(ioc, raid_device)) {
+ dfailprintk(ioc, printk(MPT2SAS_WARN_FMT
+ "failure at %s:%d/%s()!\n", ioc->name, __FILE__,
+ __LINE__, __func__));
+ return 1;
+ }
/*
* WARPDRIVE: Initialize the required data for Direct IO
*/
@@ -2002,11 +2062,22 @@ _scsih_slave_configure(struct scsi_device *sdev)
if (sas_device) {
if (sas_target_priv_data->flags &
MPT_TARGET_FLAGS_RAID_COMPONENT) {
- mpt2sas_config_get_volume_handle(ioc,
- sas_device->handle, &sas_device->volume_handle);
- mpt2sas_config_get_volume_wwid(ioc,
+ if (mpt2sas_config_get_volume_handle(ioc,
+ sas_device->handle, &sas_device->volume_handle)) {
+ dfailprintk(ioc, printk(MPT2SAS_WARN_FMT
+ "failure at %s:%d/%s()!\n", ioc->name,
+ __FILE__, __LINE__, __func__));
+ return 1;
+ }
+ if (sas_device->volume_handle &&
+ mpt2sas_config_get_volume_wwid(ioc,
sas_device->volume_handle,
- &sas_device->volume_wwid);
+ &sas_device->volume_wwid)) {
+ dfailprintk(ioc, printk(MPT2SAS_WARN_FMT
+ "failure at %s:%d/%s()!\n", ioc->name,
+ __FILE__, __LINE__, __func__));
+ return 1;
+ }
}
if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) {
qdepth = MPT2SAS_SAS_QUEUE_DEPTH;
@@ -2035,6 +2106,11 @@ _scsih_slave_configure(struct scsi_device *sdev)
if (!ssp_target)
_scsih_display_sata_capabilities(ioc, sas_device, sdev);
+ } else {
+ dfailprintk(ioc, printk(MPT2SAS_WARN_FMT
+ "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__,
+ __func__));
+ return 1;
}
_scsih_change_queue_depth(sdev, qdepth, SCSI_QDEPTH_DEFAULT);
@@ -2161,6 +2237,7 @@ _scsih_tm_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
return 1;
if (ioc->tm_cmds.smid != smid)
return 1;
+ mpt2sas_base_flush_reply_queues(ioc);
ioc->tm_cmds.status |= MPT2_CMD_COMPLETE;
mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
if (mpi_reply) {
@@ -2713,22 +2790,38 @@ _scsih_fw_event_free(struct MPT2SAS_ADAPTER *ioc, struct fw_event_work
/**
- * _scsih_queue_rescan - queue a topology rescan from user context
+ * _scsih_error_recovery_delete_devices - remove devices not responding
* @ioc: per adapter object
*
* Return nothing.
*/
static void
-_scsih_queue_rescan(struct MPT2SAS_ADAPTER *ioc)
+_scsih_error_recovery_delete_devices(struct MPT2SAS_ADAPTER *ioc)
{
struct fw_event_work *fw_event;
- if (ioc->wait_for_port_enable_to_complete)
+ if (ioc->is_driver_loading)
return;
+ fw_event->event = MPT2SAS_REMOVE_UNRESPONDING_DEVICES;
+ fw_event->ioc = ioc;
+ _scsih_fw_event_add(ioc, fw_event);
+}
+
+/**
+ * mpt2sas_port_enable_complete - port enable completed (fake event)
+ * @ioc: per adapter object
+ *
+ * Return nothing.
+ */
+void
+mpt2sas_port_enable_complete(struct MPT2SAS_ADAPTER *ioc)
+{
+ struct fw_event_work *fw_event;
+
fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC);
if (!fw_event)
return;
- fw_event->event = MPT2SAS_RESCAN_AFTER_HOST_RESET;
+ fw_event->event = MPT2SAS_PORT_ENABLE_COMPLETE;
fw_event->ioc = ioc;
_scsih_fw_event_add(ioc, fw_event);
}
@@ -2976,14 +3069,27 @@ _scsih_tm_tr_send(struct MPT2SAS_ADAPTER *ioc, u16 handle)
Mpi2SCSITaskManagementRequest_t *mpi_request;
u16 smid;
struct _sas_device *sas_device;
- struct MPT2SAS_TARGET *sas_target_priv_data;
+ struct MPT2SAS_TARGET *sas_target_priv_data = NULL;
+ u64 sas_address = 0;
unsigned long flags;
struct _tr_list *delayed_tr;
+ u32 ioc_state;
- if (ioc->shost_recovery || ioc->remove_host ||
- ioc->pci_error_recovery) {
- dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: host reset in "
- "progress!\n", __func__, ioc->name));
+ if (ioc->remove_host) {
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: host has been "
+ "removed: handle(0x%04x)\n", __func__, ioc->name, handle));
+ return;
+ } else if (ioc->pci_error_recovery) {
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: host in pci "
+ "error recovery: handle(0x%04x)\n", __func__, ioc->name,
+ handle));
+ return;
+ }
+ ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
+ if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: host is not "
+ "operational: handle(0x%04x)\n", __func__, ioc->name,
+ handle));
return;
}
@@ -2997,13 +3103,18 @@ _scsih_tm_tr_send(struct MPT2SAS_ADAPTER *ioc, u16 handle)
sas_device->starget->hostdata) {
sas_target_priv_data = sas_device->starget->hostdata;
sas_target_priv_data->deleted = 1;
- dewtprintk(ioc, printk(MPT2SAS_INFO_FMT
- "setting delete flag: handle(0x%04x), "
- "sas_addr(0x%016llx)\n", ioc->name, handle,
- (unsigned long long) sas_device->sas_address));
+ sas_address = sas_device->sas_address;
}
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ if (sas_target_priv_data) {
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "setting delete flag: "
+ "handle(0x%04x), sas_addr(0x%016llx)\n", ioc->name, handle,
+ (unsigned long long)sas_address));
+ _scsih_ublock_io_device(ioc, handle);
+ sas_target_priv_data->handle = MPT2SAS_INVALID_DEVICE_HANDLE;
+ }
+
smid = mpt2sas_base_get_smid_hpr(ioc, ioc->tm_tr_cb_idx);
if (!smid) {
delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
@@ -3184,11 +3295,21 @@ _scsih_tm_tr_complete(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
mpt2sas_base_get_reply_virt_addr(ioc, reply);
Mpi2SasIoUnitControlRequest_t *mpi_request;
u16 smid_sas_ctrl;
+ u32 ioc_state;
- if (ioc->shost_recovery || ioc->remove_host ||
- ioc->pci_error_recovery) {
- dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: host reset in "
- "progress!\n", __func__, ioc->name));
+ if (ioc->remove_host) {
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: host has been "
+ "removed\n", __func__, ioc->name));
+ return 1;
+ } else if (ioc->pci_error_recovery) {
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: host in pci "
+ "error recovery\n", __func__, ioc->name));
+ return 1;
+ }
+ ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
+ if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: host is not "
+ "operational\n", __func__, ioc->name));
return 1;
}
@@ -5098,7 +5219,7 @@ _scsih_add_device(struct MPT2SAS_ADAPTER *ioc, u16 handle, u8 phy_num, u8 is_pd)
/* get device name */
sas_device->device_name = le64_to_cpu(sas_device_pg0.DeviceName);
- if (ioc->wait_for_port_enable_to_complete)
+ if (ioc->wait_for_discovery_to_complete)
_scsih_sas_device_init_add(ioc, sas_device);
else
_scsih_sas_device_add(ioc, sas_device);
@@ -5134,6 +5255,9 @@ _scsih_remove_device(struct MPT2SAS_ADAPTER *ioc,
if (sas_device_backup.starget && sas_device_backup.starget->hostdata) {
sas_target_priv_data = sas_device_backup.starget->hostdata;
sas_target_priv_data->deleted = 1;
+ _scsih_ublock_io_device(ioc, sas_device_backup.handle);
+ sas_target_priv_data->handle =
+ MPT2SAS_INVALID_DEVICE_HANDLE;
}
_scsih_ublock_io_device(ioc, sas_device_backup.handle);
@@ -5287,7 +5411,7 @@ _scsih_sas_topology_change_event(struct MPT2SAS_ADAPTER *ioc,
_scsih_sas_topology_change_event_debug(ioc, event_data);
#endif
- if (ioc->shost_recovery || ioc->remove_host || ioc->pci_error_recovery)
+ if (ioc->remove_host || ioc->pci_error_recovery)
return;
if (!ioc->sas_hba.num_phys)
@@ -5348,6 +5472,9 @@ _scsih_sas_topology_change_event(struct MPT2SAS_ADAPTER *ioc,
switch (reason_code) {
case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
+ if (ioc->shost_recovery)
+ break;
+
if (link_rate == prev_link_rate)
break;
@@ -5361,6 +5488,9 @@ _scsih_sas_topology_change_event(struct MPT2SAS_ADAPTER *ioc,
break;
case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
+ if (ioc->shost_recovery)
+ break;
+
mpt2sas_transport_update_links(ioc, sas_address,
handle, phy_number, link_rate);
@@ -5621,7 +5751,7 @@ broadcast_aen_retry:
termination_count = 0;
query_count = 0;
for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
- if (ioc->ioc_reset_in_progress_status)
+ if (ioc->shost_recovery)
goto out;
scmd = _scsih_scsi_lookup_get(ioc, smid);
if (!scmd)
@@ -5643,7 +5773,7 @@ broadcast_aen_retry:
lun = sas_device_priv_data->lun;
query_count++;
- if (ioc->ioc_reset_in_progress_status)
+ if (ioc->shost_recovery)
goto out;
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
@@ -5685,7 +5815,7 @@ broadcast_aen_retry:
goto broadcast_aen_retry;
}
- if (ioc->ioc_reset_in_progress_status)
+ if (ioc->shost_recovery)
goto out_no_lock;
r = mpt2sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id,
@@ -5724,7 +5854,7 @@ broadcast_aen_retry:
ioc->name, __func__, query_count, termination_count));
ioc->broadcast_aen_busy = 0;
- if (!ioc->ioc_reset_in_progress_status)
+ if (!ioc->shost_recovery)
_scsih_ublock_io_all_device(ioc);
mutex_unlock(&ioc->tm_cmds.mutex);
}
@@ -5788,8 +5918,11 @@ _scsih_reprobe_lun(struct scsi_device *sdev, void *no_uld_attach)
static void
_scsih_reprobe_target(struct scsi_target *starget, int no_uld_attach)
{
- struct MPT2SAS_TARGET *sas_target_priv_data = starget->hostdata;
+ struct MPT2SAS_TARGET *sas_target_priv_data;
+ if (starget == NULL)
+ return;
+ sas_target_priv_data = starget->hostdata;
if (no_uld_attach)
sas_target_priv_data->flags |= MPT_TARGET_FLAGS_RAID_COMPONENT;
else
@@ -5844,7 +5977,7 @@ _scsih_sas_volume_add(struct MPT2SAS_ADAPTER *ioc,
raid_device->handle = handle;
raid_device->wwid = wwid;
_scsih_raid_device_add(ioc, raid_device);
- if (!ioc->wait_for_port_enable_to_complete) {
+ if (!ioc->wait_for_discovery_to_complete) {
rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
raid_device->id, 0);
if (rc)
@@ -6126,6 +6259,10 @@ _scsih_sas_ir_config_change_event(struct MPT2SAS_ADAPTER *ioc,
_scsih_sas_ir_config_change_event_debug(ioc, event_data);
#endif
+
+ if (ioc->shost_recovery)
+ return;
+
foreign_config = (le32_to_cpu(event_data->Flags) &
MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ? 1 : 0;
@@ -6184,6 +6321,9 @@ _scsih_sas_ir_volume_event(struct MPT2SAS_ADAPTER *ioc,
int rc;
Mpi2EventDataIrVolume_t *event_data = fw_event->event_data;
+ if (ioc->shost_recovery)
+ return;
+
if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED)
return;
@@ -6266,6 +6406,9 @@ _scsih_sas_ir_physical_disk_event(struct MPT2SAS_ADAPTER *ioc,
Mpi2EventDataIrPhysicalDisk_t *event_data = fw_event->event_data;
u64 sas_address;
+ if (ioc->shost_recovery)
+ return;
+
if (event_data->ReasonCode != MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED)
return;
@@ -6509,10 +6652,10 @@ _scsih_search_responding_sas_devices(struct MPT2SAS_ADAPTER *ioc)
u32 device_info;
u16 slot;
- printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, __func__);
+ printk(MPT2SAS_INFO_FMT "search for end-devices: start\n", ioc->name);
if (list_empty(&ioc->sas_device_list))
- return;
+ goto out;
handle = 0xFFFF;
while (!(mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply,
@@ -6531,6 +6674,9 @@ _scsih_search_responding_sas_devices(struct MPT2SAS_ADAPTER *ioc)
_scsih_mark_responding_sas_device(ioc, sas_address, slot,
handle);
}
+out:
+ printk(MPT2SAS_INFO_FMT "search for end-devices: complete\n",
+ ioc->name);
}
/**
@@ -6606,10 +6752,14 @@ _scsih_search_responding_raid_devices(struct MPT2SAS_ADAPTER *ioc)
u16 handle;
u8 phys_disk_num;
- printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, __func__);
+ if (!ioc->ir_firmware)
+ return;
+
+ printk(MPT2SAS_INFO_FMT "search for raid volumes: start\n",
+ ioc->name);
if (list_empty(&ioc->raid_device_list))
- return;
+ goto out;
handle = 0xFFFF;
while (!(mpt2sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
@@ -6648,6 +6798,9 @@ _scsih_search_responding_raid_devices(struct MPT2SAS_ADAPTER *ioc)
set_bit(handle, ioc->pd_handles);
}
}
+out:
+ printk(MPT2SAS_INFO_FMT "search for responding raid volumes: "
+ "complete\n", ioc->name);
}
/**
@@ -6707,10 +6860,10 @@ _scsih_search_responding_expanders(struct MPT2SAS_ADAPTER *ioc)
u64 sas_address;
u16 handle;
- printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, __func__);
+ printk(MPT2SAS_INFO_FMT "search for expanders: start\n", ioc->name);
if (list_empty(&ioc->sas_expander_list))
- return;
+ goto out;
handle = 0xFFFF;
while (!(mpt2sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
@@ -6729,6 +6882,8 @@ _scsih_search_responding_expanders(struct MPT2SAS_ADAPTER *ioc)
_scsih_mark_responding_expander(ioc, sas_address, handle);
}
+ out:
+ printk(MPT2SAS_INFO_FMT "search for expanders: complete\n", ioc->name);
}
/**
@@ -6744,6 +6899,8 @@ _scsih_remove_unresponding_sas_devices(struct MPT2SAS_ADAPTER *ioc)
struct _sas_node *sas_expander;
struct _raid_device *raid_device, *raid_device_next;
+ printk(MPT2SAS_INFO_FMT "removing unresponding devices: start\n",
+ ioc->name);
list_for_each_entry_safe(sas_device, sas_device_next,
&ioc->sas_device_list, list) {
@@ -6763,6 +6920,9 @@ _scsih_remove_unresponding_sas_devices(struct MPT2SAS_ADAPTER *ioc)
_scsih_remove_device(ioc, sas_device);
}
+ if (!ioc->ir_firmware)
+ goto retry_expander_search;
+
list_for_each_entry_safe(raid_device, raid_device_next,
&ioc->raid_device_list, list) {
if (raid_device->responding) {
@@ -6789,52 +6949,170 @@ _scsih_remove_unresponding_sas_devices(struct MPT2SAS_ADAPTER *ioc)
mpt2sas_expander_remove(ioc, sas_expander->sas_address);
goto retry_expander_search;
}
+ printk(MPT2SAS_INFO_FMT "removing unresponding devices: complete\n",
+ ioc->name);
+ /* unblock devices */
+ _scsih_ublock_io_all_device(ioc);
+}
+
+static void
+_scsih_refresh_expander_links(struct MPT2SAS_ADAPTER *ioc,
+ struct _sas_node *sas_expander, u16 handle)
+{
+ Mpi2ExpanderPage1_t expander_pg1;
+ Mpi2ConfigReply_t mpi_reply;
+ int i;
+
+ for (i = 0 ; i < sas_expander->num_phys ; i++) {
+ if ((mpt2sas_config_get_expander_pg1(ioc, &mpi_reply,
+ &expander_pg1, i, handle))) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return;
+ }
+
+ mpt2sas_transport_update_links(ioc, sas_expander->sas_address,
+ le16_to_cpu(expander_pg1.AttachedDevHandle), i,
+ expander_pg1.NegotiatedLinkRate >> 4);
+ }
}
/**
- * _scsih_hide_unhide_sas_devices - add/remove device to/from OS
+ * _scsih_scan_for_devices_after_reset - scan for devices after host reset
* @ioc: per adapter object
*
* Return nothing.
*/
static void
-_scsih_hide_unhide_sas_devices(struct MPT2SAS_ADAPTER *ioc)
+_scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
{
- struct _sas_device *sas_device, *sas_device_next;
+ Mpi2ExpanderPage0_t expander_pg0;
+ Mpi2SasDevicePage0_t sas_device_pg0;
+ Mpi2RaidVolPage1_t volume_pg1;
+ Mpi2RaidVolPage0_t volume_pg0;
+ Mpi2RaidPhysDiskPage0_t pd_pg0;
+ Mpi2EventIrConfigElement_t element;
+ Mpi2ConfigReply_t mpi_reply;
+ u8 phys_disk_num;
+ u16 ioc_status;
+ u16 handle, parent_handle;
+ u64 sas_address;
+ struct _sas_device *sas_device;
+ struct _sas_node *expander_device;
+ static struct _raid_device *raid_device;
- if (!ioc->is_warpdrive || ioc->mfg_pg10_hide_flag !=
- MFG_PAGE10_HIDE_IF_VOL_PRESENT)
- return;
+ printk(MPT2SAS_INFO_FMT "scan devices: start\n", ioc->name);
- if (ioc->hide_drives) {
- if (_scsih_get_num_volumes(ioc))
- return;
- ioc->hide_drives = 0;
- list_for_each_entry_safe(sas_device, sas_device_next,
- &ioc->sas_device_list, list) {
- if (!mpt2sas_transport_port_add(ioc, sas_device->handle,
- sas_device->sas_address_parent)) {
- _scsih_sas_device_remove(ioc, sas_device);
- } else if (!sas_device->starget) {
- mpt2sas_transport_port_remove(ioc,
- sas_device->sas_address,
- sas_device->sas_address_parent);
- _scsih_sas_device_remove(ioc, sas_device);
- }
+ _scsih_sas_host_refresh(ioc);
+
+ /* expanders */
+ handle = 0xFFFF;
+ while (!(mpt2sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
+ MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) {
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
+ break;
+ handle = le16_to_cpu(expander_pg0.DevHandle);
+ expander_device = mpt2sas_scsih_expander_find_by_sas_address(
+ ioc, le64_to_cpu(expander_pg0.SASAddress));
+ if (expander_device)
+ _scsih_refresh_expander_links(ioc, expander_device,
+ handle);
+ else
+ _scsih_expander_add(ioc, handle);
+ }
+
+ if (!ioc->ir_firmware)
+ goto skip_to_sas;
+
+ /* phys disk */
+ phys_disk_num = 0xFF;
+ while (!(mpt2sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
+ &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM,
+ phys_disk_num))) {
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
+ break;
+ phys_disk_num = pd_pg0.PhysDiskNum;
+ handle = le16_to_cpu(pd_pg0.DevHandle);
+ sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
+ if (sas_device)
+ continue;
+ if (mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply,
+ &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
+ handle) != 0)
+ continue;
+ parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
+ if (!_scsih_get_sas_address(ioc, parent_handle,
+ &sas_address)) {
+ mpt2sas_transport_update_links(ioc, sas_address,
+ handle, sas_device_pg0.PhyNum,
+ MPI2_SAS_NEG_LINK_RATE_1_5);
+ set_bit(handle, ioc->pd_handles);
+ _scsih_add_device(ioc, handle, 0, 1);
}
- } else {
- if (!_scsih_get_num_volumes(ioc))
- return;
- ioc->hide_drives = 1;
- list_for_each_entry_safe(sas_device, sas_device_next,
- &ioc->sas_device_list, list) {
- mpt2sas_transport_port_remove(ioc,
- sas_device->sas_address,
- sas_device->sas_address_parent);
+ }
+
+ /* volumes */
+ handle = 0xFFFF;
+ while (!(mpt2sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
+ &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
+ break;
+ handle = le16_to_cpu(volume_pg1.DevHandle);
+ raid_device = _scsih_raid_device_find_by_wwid(ioc,
+ le64_to_cpu(volume_pg1.WWID));
+ if (raid_device)
+ continue;
+ if (mpt2sas_config_get_raid_volume_pg0(ioc, &mpi_reply,
+ &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
+ sizeof(Mpi2RaidVolPage0_t)))
+ continue;
+ if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
+ volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE ||
+ volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED) {
+ memset(&element, 0, sizeof(Mpi2EventIrConfigElement_t));
+ element.ReasonCode = MPI2_EVENT_IR_CHANGE_RC_ADDED;
+ element.VolDevHandle = volume_pg1.DevHandle;
+ _scsih_sas_volume_add(ioc, &element);
}
}
+
+ skip_to_sas:
+
+ /* sas devices */
+ handle = 0xFFFF;
+ while (!(mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply,
+ &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
+ handle))) {
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
+ break;
+ handle = le16_to_cpu(sas_device_pg0.DevHandle);
+ if (!(_scsih_is_end_device(
+ le32_to_cpu(sas_device_pg0.DeviceInfo))))
+ continue;
+ sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
+ le64_to_cpu(sas_device_pg0.SASAddress));
+ if (sas_device)
+ continue;
+ parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
+ if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) {
+ mpt2sas_transport_update_links(ioc, sas_address, handle,
+ sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
+ _scsih_add_device(ioc, handle, 0, 0);
+ }
+ }
+
+ printk(MPT2SAS_INFO_FMT "scan devices: complete\n", ioc->name);
}
+
/**
* mpt2sas_scsih_reset_handler - reset callback handler (for scsih)
* @ioc: per adapter object
@@ -6870,7 +7148,6 @@ mpt2sas_scsih_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase)
}
_scsih_fw_event_cleanup_queue(ioc);
_scsih_flush_running_cmds(ioc);
- _scsih_queue_rescan(ioc);
break;
case MPT2_IOC_DONE_RESET:
dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: "
@@ -6880,6 +7157,13 @@ mpt2sas_scsih_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase)
_scsih_search_responding_sas_devices(ioc);
_scsih_search_responding_raid_devices(ioc);
_scsih_search_responding_expanders(ioc);
+ if (!ioc->is_driver_loading) {
+ _scsih_prep_device_scan(ioc);
+ _scsih_search_responding_sas_devices(ioc);
+ _scsih_search_responding_raid_devices(ioc);
+ _scsih_search_responding_expanders(ioc);
+ _scsih_error_recovery_delete_devices(ioc);
+ }
break;
}
}
@@ -6897,7 +7181,6 @@ _firmware_event_work(struct work_struct *work)
{
struct fw_event_work *fw_event = container_of(work,
struct fw_event_work, delayed_work.work);
- unsigned long flags;
struct MPT2SAS_ADAPTER *ioc = fw_event->ioc;
/* the queue is being flushed so ignore this event */
@@ -6907,23 +7190,21 @@ _firmware_event_work(struct work_struct *work)
return;
}
- if (fw_event->event == MPT2SAS_RESCAN_AFTER_HOST_RESET) {
- _scsih_fw_event_free(ioc, fw_event);
- spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
- if (ioc->shost_recovery) {
- init_completion(&ioc->shost_recovery_done);
- spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock,
- flags);
- wait_for_completion(&ioc->shost_recovery_done);
- } else
- spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock,
- flags);
+ switch (fw_event->event) {
+ case MPT2SAS_REMOVE_UNRESPONDING_DEVICES:
+ while (scsi_host_in_recovery(ioc->shost))
+ ssleep(1);
_scsih_remove_unresponding_sas_devices(ioc);
- _scsih_hide_unhide_sas_devices(ioc);
- return;
- }
+ _scsih_scan_for_devices_after_reset(ioc);
+ break;
+ case MPT2SAS_PORT_ENABLE_COMPLETE:
+ ioc->start_scan = 0;
- switch (fw_event->event) {
+
+
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "port enable: complete "
+ "from worker thread\n", ioc->name));
+ break;
case MPT2SAS_TURN_ON_FAULT_LED:
_scsih_turn_on_fault_led(ioc, fw_event->device_handle);
break;
@@ -7120,6 +7401,8 @@ static struct scsi_host_template scsih_driver_template = {
.slave_configure = _scsih_slave_configure,
.target_destroy = _scsih_target_destroy,
.slave_destroy = _scsih_slave_destroy,
+ .scan_finished = _scsih_scan_finished,
+ .scan_start = _scsih_scan_start,
.change_queue_depth = _scsih_change_queue_depth,
.change_queue_type = _scsih_change_queue_type,
.eh_abort_handler = _scsih_abort,
@@ -7353,6 +7636,7 @@ _scsih_remove(struct pci_dev *pdev)
}
sas_remove_host(shost);
+ mpt2sas_base_detach(ioc);
list_del(&ioc->list);
scsi_remove_host(shost);
scsi_host_put(shost);
@@ -7379,7 +7663,12 @@ _scsih_probe_boot_devices(struct MPT2SAS_ADAPTER *ioc)
unsigned long flags;
int rc;
+ /* no Bios, return immediately */
+ if (!ioc->bios_pg3.BiosVersion)
+ return;
+
device = NULL;
+ is_raid = 0;
if (ioc->req_boot_device.device) {
device = ioc->req_boot_device.device;
is_raid = ioc->req_boot_device.is_raid;
@@ -7415,8 +7704,9 @@ _scsih_probe_boot_devices(struct MPT2SAS_ADAPTER *ioc)
sas_device->sas_address_parent)) {
_scsih_sas_device_remove(ioc, sas_device);
} else if (!sas_device->starget) {
- mpt2sas_transport_port_remove(ioc, sas_address,
- sas_address_parent);
+ if (!ioc->is_driver_loading)
+ mpt2sas_transport_port_remove(ioc, sas_address,
+ sas_address_parent);
_scsih_sas_device_remove(ioc, sas_device);
}
}
@@ -7460,22 +7750,28 @@ _scsih_probe_sas(struct MPT2SAS_ADAPTER *ioc)
/* SAS Device List */
list_for_each_entry_safe(sas_device, next, &ioc->sas_device_init_list,
list) {
- spin_lock_irqsave(&ioc->sas_device_lock, flags);
- list_move_tail(&sas_device->list, &ioc->sas_device_list);
- spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
if (ioc->hide_drives)
continue;
if (!mpt2sas_transport_port_add(ioc, sas_device->handle,
sas_device->sas_address_parent)) {
- _scsih_sas_device_remove(ioc, sas_device);
+ list_del(&sas_device->list);
+ kfree(sas_device);
+ continue;
} else if (!sas_device->starget) {
- mpt2sas_transport_port_remove(ioc,
- sas_device->sas_address,
- sas_device->sas_address_parent);
- _scsih_sas_device_remove(ioc, sas_device);
+ if (!ioc->is_driver_loading)
+ mpt2sas_transport_port_remove(ioc,
+ sas_device->sas_address,
+ sas_device->sas_address_parent);
+ list_del(&sas_device->list);
+ kfree(sas_device);
+ continue;
+
}
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ list_move_tail(&sas_device->list, &ioc->sas_device_list);
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
}
}
@@ -7488,9 +7784,7 @@ _scsih_probe_sas(struct MPT2SAS_ADAPTER *ioc)
static void
_scsih_probe_devices(struct MPT2SAS_ADAPTER *ioc)
{
- u16 volume_mapping_flags =
- le16_to_cpu(ioc->ioc_pg8.IRVolumeMappingFlags) &
- MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE;
+ u16 volume_mapping_flags;
if (!(ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR))
return; /* return when IOC doesn't support initiator mode */
@@ -7498,18 +7792,93 @@ _scsih_probe_devices(struct MPT2SAS_ADAPTER *ioc)
_scsih_probe_boot_devices(ioc);
if (ioc->ir_firmware) {
- if ((volume_mapping_flags &
- MPI2_IOCPAGE8_IRFLAGS_HIGH_VOLUME_MAPPING)) {
- _scsih_probe_sas(ioc);
+ volume_mapping_flags =
+ le16_to_cpu(ioc->ioc_pg8.IRVolumeMappingFlags) &
+ MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE;
+ if (volume_mapping_flags ==
+ MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING) {
_scsih_probe_raid(ioc);
+ _scsih_probe_sas(ioc);
} else {
- _scsih_probe_raid(ioc);
_scsih_probe_sas(ioc);
+ _scsih_probe_raid(ioc);
}
} else
_scsih_probe_sas(ioc);
}
+
+/**
+ * _scsih_scan_start - scsi lld callback for .scan_start
+ * @shost: SCSI host pointer
+ *
+ * The shost has the ability to discover targets on its own instead
+ * of scanning the entire bus. In our implemention, we will kick off
+ * firmware discovery.
+ */
+static void
+_scsih_scan_start(struct Scsi_Host *shost)
+{
+ struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
+ int rc;
+
+ if (diag_buffer_enable != -1 && diag_buffer_enable != 0)
+ mpt2sas_enable_diag_buffer(ioc, diag_buffer_enable);
+
+ ioc->start_scan = 1;
+ rc = mpt2sas_port_enable(ioc);
+
+ if (rc != 0)
+ printk(MPT2SAS_INFO_FMT "port enable: FAILED\n", ioc->name);
+}
+
+/**
+ * _scsih_scan_finished - scsi lld callback for .scan_finished
+ * @shost: SCSI host pointer
+ * @time: elapsed time of the scan in jiffies
+ *
+ * This function will be called periodically until it returns 1 with the
+ * scsi_host and the elapsed time of the scan in jiffies. In our implemention,
+ * we wait for firmware discovery to complete, then return 1.
+ */
+static int
+_scsih_scan_finished(struct Scsi_Host *shost, unsigned long time)
+{
+ struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
+
+ if (time >= (300 * HZ)) {
+ ioc->base_cmds.status = MPT2_CMD_NOT_USED;
+ printk(MPT2SAS_INFO_FMT "port enable: FAILED with timeout "
+ "(timeout=300s)\n", ioc->name);
+ ioc->is_driver_loading = 0;
+ return 1;
+ }
+
+ if (ioc->start_scan)
+ return 0;
+
+ if (ioc->start_scan_failed) {
+ printk(MPT2SAS_INFO_FMT "port enable: FAILED with "
+ "(ioc_status=0x%08x)\n", ioc->name, ioc->start_scan_failed);
+ ioc->is_driver_loading = 0;
+ ioc->wait_for_discovery_to_complete = 0;
+ ioc->remove_host = 1;
+ return 1;
+ }
+
+ printk(MPT2SAS_INFO_FMT "port enable: SUCCESS\n", ioc->name);
+ ioc->base_cmds.status = MPT2_CMD_NOT_USED;
+
+ if (ioc->wait_for_discovery_to_complete) {
+ ioc->wait_for_discovery_to_complete = 0;
+ _scsih_probe_devices(ioc);
+ }
+ mpt2sas_base_start_watchdog(ioc);
+ ioc->is_driver_loading = 0;
+ return 1;
+}
+
+
/**
* _scsih_probe - attach and add scsi host
* @pdev: PCI device struct
@@ -7546,6 +7915,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ioc->tm_cb_idx = tm_cb_idx;
ioc->ctl_cb_idx = ctl_cb_idx;
ioc->base_cb_idx = base_cb_idx;
+ ioc->port_enable_cb_idx = port_enable_cb_idx;
ioc->transport_cb_idx = transport_cb_idx;
ioc->scsih_cb_idx = scsih_cb_idx;
ioc->config_cb_idx = config_cb_idx;
@@ -7618,14 +7988,14 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto out_thread_fail;
}
- ioc->wait_for_port_enable_to_complete = 1;
+ ioc->is_driver_loading = 1;
if ((mpt2sas_base_attach(ioc))) {
printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
ioc->name, __FILE__, __LINE__, __func__);
goto out_attach_fail;
}
- ioc->wait_for_port_enable_to_complete = 0;
+ scsi_scan_host(shost);
if (ioc->is_warpdrive) {
if (ioc->mfg_pg10_hide_flag == MFG_PAGE10_EXPOSE_ALL_DISKS)
ioc->hide_drives = 0;
@@ -7648,6 +8018,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
out_thread_fail:
list_del(&ioc->list);
scsi_remove_host(shost);
+ scsi_host_put(shost);
out_add_shost_fail:
return -ENODEV;
}
@@ -7894,6 +8265,8 @@ _scsih_init(void)
/* base internal commands callback handler */
base_cb_idx = mpt2sas_base_register_callback_handler(mpt2sas_base_done);
+ port_enable_cb_idx = mpt2sas_base_register_callback_handler(
+ mpt2sas_port_enable_done);
/* transport internal commands callback handler */
transport_cb_idx = mpt2sas_base_register_callback_handler(
@@ -7948,6 +8321,7 @@ _scsih_exit(void)
mpt2sas_base_release_callback_handler(scsi_io_cb_idx);
mpt2sas_base_release_callback_handler(tm_cb_idx);
mpt2sas_base_release_callback_handler(base_cb_idx);
+ mpt2sas_base_release_callback_handler(port_enable_cb_idx);
mpt2sas_base_release_callback_handler(transport_cb_idx);
mpt2sas_base_release_callback_handler(scsih_cb_idx);
mpt2sas_base_release_callback_handler(config_cb_idx);
diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c
index 15c79802621..230732241aa 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_transport.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c
@@ -163,7 +163,7 @@ _transport_set_identify(struct MPT2SAS_ADAPTER *ioc, u16 handle,
return -EIO;
}
- memset(identify, 0, sizeof(identify));
+ memset(identify, 0, sizeof(*identify));
device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
/* sas_address */
diff --git a/drivers/scsi/mvsas/mv_94xx.c b/drivers/scsi/mvsas/mv_94xx.c
index 3501291618f..7e423e5ad5e 100644
--- a/drivers/scsi/mvsas/mv_94xx.c
+++ b/drivers/scsi/mvsas/mv_94xx.c
@@ -398,6 +398,16 @@ static int __devinit mvs_94xx_init(struct mvs_info *mvi)
/* init phys */
mvs_phy_hacks(mvi);
+ /* disable non data frame retry */
+ tmp = mvs_cr32(mvi, CMD_SAS_CTL1);
+ if ((revision == VANIR_A0_REV) ||
+ (revision == VANIR_B0_REV) ||
+ (revision == VANIR_C0_REV)) {
+ tmp &= ~0xffff;
+ tmp |= 0x007f;
+ mvs_cw32(mvi, CMD_SAS_CTL1, tmp);
+ }
+
/* set LED blink when IO*/
mw32(MVS_PA_VSR_ADDR, VSR_PHY_ACT_LED);
tmp = mr32(MVS_PA_VSR_PORT);
@@ -500,6 +510,27 @@ static int __devinit mvs_94xx_init(struct mvs_info *mvi)
tmp |= CINT_PHY_MASK;
mw32(MVS_INT_MASK, tmp);
+ tmp = mvs_cr32(mvi, CMD_LINK_TIMER);
+ tmp |= 0xFFFF0000;
+ mvs_cw32(mvi, CMD_LINK_TIMER, tmp);
+
+ /* tune STP performance */
+ tmp = 0x003F003F;
+ mvs_cw32(mvi, CMD_PL_TIMER, tmp);
+
+ /* This can improve expander large block size seq write performance */
+ tmp = mvs_cr32(mvi, CMD_PORT_LAYER_TIMER1);
+ tmp |= 0xFFFF007F;
+ mvs_cw32(mvi, CMD_PORT_LAYER_TIMER1, tmp);
+
+ /* change the connection open-close behavior (bit 9)
+ * set bit8 to 1 for performance tuning */
+ tmp = mvs_cr32(mvi, CMD_SL_MODE0);
+ tmp |= 0x00000300;
+ /* set bit0 to 0 to enable retry for no_dest reject case */
+ tmp &= 0xFFFFFFFE;
+ mvs_cw32(mvi, CMD_SL_MODE0, tmp);
+
/* Enable SRS interrupt */
mw32(MVS_INT_MASK_SRS_0, 0xFFFF);
@@ -823,6 +854,10 @@ static void mvs_94xx_fix_phy_info(struct mvs_info *mvi, int i,
phy->att_dev_info = PORT_DEV_STP_TRGT | 1;
}
+ /* enable spin up bit */
+ mvs_write_port_cfg_addr(mvi, i, PHYR_PHY_STAT);
+ mvs_write_port_cfg_data(mvi, i, 0x04);
+
}
void mvs_94xx_phy_set_link_rate(struct mvs_info *mvi, u32 phy_id,
diff --git a/drivers/scsi/mvsas/mv_defs.h b/drivers/scsi/mvsas/mv_defs.h
index dec7cadb748..f5451940d28 100644
--- a/drivers/scsi/mvsas/mv_defs.h
+++ b/drivers/scsi/mvsas/mv_defs.h
@@ -387,6 +387,8 @@ enum sas_cmd_port_registers {
CMD_SL_MODE0 = 0x1BC, /* SL Mode 0 */
CMD_SL_MODE1 = 0x1C0, /* SL Mode 1 */
CMD_PND_FIFO_CTL1 = 0x1C4, /* Pending FIFO Control 1 */
+ CMD_PORT_LAYER_TIMER1 = 0x1E0, /* Port Layer Timer 1 */
+ CMD_LINK_TIMER = 0x1E4, /* Link Timer */
};
enum mvs_info_flags {
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index 4e9af66fd1d..6f589195746 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -59,7 +59,7 @@ static struct scsi_host_template mvs_sht = {
.name = DRV_NAME,
.queuecommand = sas_queuecommand,
.target_alloc = sas_target_alloc,
- .slave_configure = mvs_slave_configure,
+ .slave_configure = sas_slave_configure,
.slave_destroy = sas_slave_destroy,
.scan_finished = mvs_scan_finished,
.scan_start = mvs_scan_start,
@@ -74,7 +74,7 @@ static struct scsi_host_template mvs_sht = {
.use_clustering = ENABLE_CLUSTERING,
.eh_device_reset_handler = sas_eh_device_reset_handler,
.eh_bus_reset_handler = sas_eh_bus_reset_handler,
- .slave_alloc = mvs_slave_alloc,
+ .slave_alloc = sas_slave_alloc,
.target_destroy = sas_target_destroy,
.ioctl = sas_ioctl,
.shost_attrs = mvst_host_attrs,
@@ -707,6 +707,15 @@ static struct pci_device_id __devinitdata mvs_pci_table[] = {
{ PCI_VDEVICE(TTI, 0x2760), chip_9480 },
{
.vendor = 0x1b4b,
+ .device = 0x9480,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = 0x9480,
+ .class = 0,
+ .class_mask = 0,
+ .driver_data = chip_9480,
+ },
+ {
+ .vendor = 0x1b4b,
.device = 0x9445,
.subvendor = PCI_ANY_ID,
.subdevice = 0x9480,
@@ -723,6 +732,16 @@ static struct pci_device_id __devinitdata mvs_pci_table[] = {
.class_mask = 0,
.driver_data = chip_9485,
},
+ { PCI_VDEVICE(OCZ, 0x1021), chip_9485}, /* OCZ RevoDrive3 */
+ { PCI_VDEVICE(OCZ, 0x1022), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */
+ { PCI_VDEVICE(OCZ, 0x1040), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */
+ { PCI_VDEVICE(OCZ, 0x1041), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */
+ { PCI_VDEVICE(OCZ, 0x1042), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */
+ { PCI_VDEVICE(OCZ, 0x1043), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */
+ { PCI_VDEVICE(OCZ, 0x1044), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */
+ { PCI_VDEVICE(OCZ, 0x1080), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */
+ { PCI_VDEVICE(OCZ, 0x1083), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */
+ { PCI_VDEVICE(OCZ, 0x1084), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */
{ } /* terminate list */
};
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index 4958fefff36..a4884a57cf7 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -214,7 +214,7 @@ int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
break;
case PHY_FUNC_RELEASE_SPINUP_HOLD:
default:
- rc = -EOPNOTSUPP;
+ rc = -ENOSYS;
}
msleep(200);
return rc;
@@ -265,6 +265,12 @@ static void mvs_bytes_dmaed(struct mvs_info *mvi, int i)
id->dev_type = phy->identify.device_type;
id->initiator_bits = SAS_PROTOCOL_ALL;
id->target_bits = phy->identify.target_port_protocols;
+
+ /* direct attached SAS device */
+ if (phy->att_dev_info & PORT_SSP_TRGT_MASK) {
+ MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_PHY_STAT);
+ MVS_CHIP_DISP->write_port_cfg_data(mvi, i, 0x00);
+ }
} else if (phy->phy_type & PORT_TYPE_SATA) {
/*Nothing*/
}
@@ -276,36 +282,6 @@ static void mvs_bytes_dmaed(struct mvs_info *mvi, int i)
PORTE_BYTES_DMAED);
}
-int mvs_slave_alloc(struct scsi_device *scsi_dev)
-{
- struct domain_device *dev = sdev_to_domain_dev(scsi_dev);
- if (dev_is_sata(dev)) {
- /* We don't need to rescan targets
- * if REPORT_LUNS request is failed
- */
- if (scsi_dev->lun > 0)
- return -ENXIO;
- scsi_dev->tagged_supported = 1;
- }
-
- return sas_slave_alloc(scsi_dev);
-}
-
-int mvs_slave_configure(struct scsi_device *sdev)
-{
- struct domain_device *dev = sdev_to_domain_dev(sdev);
- int ret = sas_slave_configure(sdev);
-
- if (ret)
- return ret;
- if (!dev_is_sata(dev)) {
- sas_change_queue_depth(sdev,
- MVS_QUEUE_SIZE,
- SCSI_QDEPTH_DEFAULT);
- }
- return 0;
-}
-
void mvs_scan_start(struct Scsi_Host *shost)
{
int i, j;
@@ -426,7 +402,7 @@ static int mvs_task_prep_smp(struct mvs_info *mvi,
/* generate open address frame hdr (first 12 bytes) */
/* initiator, SMP, ftype 1h */
buf_oaf[0] = (1 << 7) | (PROTOCOL_SMP << 4) | 0x01;
- buf_oaf[1] = dev->linkrate & 0xf;
+ buf_oaf[1] = min(sas_port->linkrate, dev->linkrate) & 0xf;
*(u16 *)(buf_oaf + 2) = 0xFFFF; /* SAS SPEC */
memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE);
@@ -571,7 +547,7 @@ static int mvs_task_prep_ata(struct mvs_info *mvi,
/* generate open address frame hdr (first 12 bytes) */
/* initiator, STP, ftype 1h */
buf_oaf[0] = (1 << 7) | (PROTOCOL_STP << 4) | 0x1;
- buf_oaf[1] = dev->linkrate & 0xf;
+ buf_oaf[1] = min(sas_port->linkrate, dev->linkrate) & 0xf;
*(u16 *)(buf_oaf + 2) = cpu_to_be16(mvi_dev->device_id + 1);
memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE);
@@ -679,7 +655,7 @@ static int mvs_task_prep_ssp(struct mvs_info *mvi,
/* generate open address frame hdr (first 12 bytes) */
/* initiator, SSP, ftype 1h */
buf_oaf[0] = (1 << 7) | (PROTOCOL_SSP << 4) | 0x1;
- buf_oaf[1] = dev->linkrate & 0xf;
+ buf_oaf[1] = min(sas_port->linkrate, dev->linkrate) & 0xf;
*(u16 *)(buf_oaf + 2) = cpu_to_be16(mvi_dev->device_id + 1);
memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE);
@@ -1241,6 +1217,12 @@ static void mvs_port_notify_formed(struct asd_sas_phy *sas_phy, int lock)
port->wide_port_phymap = sas_port->phy_mask;
mv_printk("set wide port phy map %x\n", sas_port->phy_mask);
mvs_update_wideport(mvi, sas_phy->id);
+
+ /* direct attached SAS device */
+ if (phy->att_dev_info & PORT_SSP_TRGT_MASK) {
+ MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_PHY_STAT);
+ MVS_CHIP_DISP->write_port_cfg_data(mvi, i, 0x04);
+ }
}
if (lock)
spin_unlock_irqrestore(&mvi->lock, flags);
@@ -1387,28 +1369,6 @@ void mvs_dev_gone(struct domain_device *dev)
mvs_dev_gone_notify(dev);
}
-static struct sas_task *mvs_alloc_task(void)
-{
- struct sas_task *task = kzalloc(sizeof(struct sas_task), GFP_KERNEL);
-
- if (task) {
- INIT_LIST_HEAD(&task->list);
- spin_lock_init(&task->task_state_lock);
- task->task_state_flags = SAS_TASK_STATE_PENDING;
- init_timer(&task->timer);
- init_completion(&task->completion);
- }
- return task;
-}
-
-static void mvs_free_task(struct sas_task *task)
-{
- if (task) {
- BUG_ON(!list_empty(&task->list));
- kfree(task);
- }
-}
-
static void mvs_task_done(struct sas_task *task)
{
if (!del_timer(&task->timer))
@@ -1432,7 +1392,7 @@ static int mvs_exec_internal_tmf_task(struct domain_device *dev,
struct sas_task *task = NULL;
for (retry = 0; retry < 3; retry++) {
- task = mvs_alloc_task();
+ task = sas_alloc_task(GFP_KERNEL);
if (!task)
return -ENOMEM;
@@ -1490,15 +1450,14 @@ static int mvs_exec_internal_tmf_task(struct domain_device *dev,
SAS_ADDR(dev->sas_addr),
task->task_status.resp,
task->task_status.stat);
- mvs_free_task(task);
+ sas_free_task(task);
task = NULL;
}
}
ex_err:
BUG_ON(retry == 3 && task != NULL);
- if (task != NULL)
- mvs_free_task(task);
+ sas_free_task(task);
return res;
}
diff --git a/drivers/scsi/mvsas/mv_sas.h b/drivers/scsi/mvsas/mv_sas.h
index 44b47451322..c04a4f5b597 100644
--- a/drivers/scsi/mvsas/mv_sas.h
+++ b/drivers/scsi/mvsas/mv_sas.h
@@ -46,7 +46,7 @@
#include "mv_defs.h"
#define DRV_NAME "mvsas"
-#define DRV_VERSION "0.8.2"
+#define DRV_VERSION "0.8.16"
#define MVS_ID_NOT_MAPPED 0x7f
#define WIDE_PORT_MAX_PHY 4
#define mv_printk(fmt, arg ...) \
@@ -458,8 +458,6 @@ int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
void *funcdata);
void __devinit mvs_set_sas_addr(struct mvs_info *mvi, int port_id,
u32 off_lo, u32 off_hi, u64 sas_addr);
-int mvs_slave_alloc(struct scsi_device *scsi_dev);
-int mvs_slave_configure(struct scsi_device *sdev);
void mvs_scan_start(struct Scsi_Host *shost);
int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time);
int mvs_queue_command(struct sas_task *task, const int num,
diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c
new file mode 100644
index 00000000000..88cf1db21a7
--- /dev/null
+++ b/drivers/scsi/mvumi.c
@@ -0,0 +1,2018 @@
+/*
+ * Marvell UMI driver
+ *
+ * Copyright 2011 Marvell. <jyli@marvell.com>
+ *
+ * This file is licensed under GPLv2.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ * USA
+*/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/blkdev.h>
+#include <linux/io.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_eh.h>
+#include <linux/uaccess.h>
+
+#include "mvumi.h"
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("jyli@marvell.com");
+MODULE_DESCRIPTION("Marvell UMI Driver");
+
+static DEFINE_PCI_DEVICE_TABLE(mvumi_pci_table) = {
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_2, PCI_DEVICE_ID_MARVELL_MV9143) },
+ { 0 }
+};
+
+MODULE_DEVICE_TABLE(pci, mvumi_pci_table);
+
+static void tag_init(struct mvumi_tag *st, unsigned short size)
+{
+ unsigned short i;
+ BUG_ON(size != st->size);
+ st->top = size;
+ for (i = 0; i < size; i++)
+ st->stack[i] = size - 1 - i;
+}
+
+static unsigned short tag_get_one(struct mvumi_hba *mhba, struct mvumi_tag *st)
+{
+ BUG_ON(st->top <= 0);
+ return st->stack[--st->top];
+}
+
+static void tag_release_one(struct mvumi_hba *mhba, struct mvumi_tag *st,
+ unsigned short tag)
+{
+ BUG_ON(st->top >= st->size);
+ st->stack[st->top++] = tag;
+}
+
+static bool tag_is_empty(struct mvumi_tag *st)
+{
+ if (st->top == 0)
+ return 1;
+ else
+ return 0;
+}
+
+static void mvumi_unmap_pci_addr(struct pci_dev *dev, void **addr_array)
+{
+ int i;
+
+ for (i = 0; i < MAX_BASE_ADDRESS; i++)
+ if ((pci_resource_flags(dev, i) & IORESOURCE_MEM) &&
+ addr_array[i])
+ pci_iounmap(dev, addr_array[i]);
+}
+
+static int mvumi_map_pci_addr(struct pci_dev *dev, void **addr_array)
+{
+ int i;
+
+ for (i = 0; i < MAX_BASE_ADDRESS; i++) {
+ if (pci_resource_flags(dev, i) & IORESOURCE_MEM) {
+ addr_array[i] = pci_iomap(dev, i, 0);
+ if (!addr_array[i]) {
+ dev_err(&dev->dev, "failed to map Bar[%d]\n",
+ i);
+ mvumi_unmap_pci_addr(dev, addr_array);
+ return -ENOMEM;
+ }
+ } else
+ addr_array[i] = NULL;
+
+ dev_dbg(&dev->dev, "Bar %d : %p.\n", i, addr_array[i]);
+ }
+
+ return 0;
+}
+
+static struct mvumi_res *mvumi_alloc_mem_resource(struct mvumi_hba *mhba,
+ enum resource_type type, unsigned int size)
+{
+ struct mvumi_res *res = kzalloc(sizeof(*res), GFP_KERNEL);
+
+ if (!res) {
+ dev_err(&mhba->pdev->dev,
+ "Failed to allocate memory for resouce manager.\n");
+ return NULL;
+ }
+
+ switch (type) {
+ case RESOURCE_CACHED_MEMORY:
+ res->virt_addr = kzalloc(size, GFP_KERNEL);
+ if (!res->virt_addr) {
+ dev_err(&mhba->pdev->dev,
+ "unable to allocate memory,size = %d.\n", size);
+ kfree(res);
+ return NULL;
+ }
+ break;
+
+ case RESOURCE_UNCACHED_MEMORY:
+ size = round_up(size, 8);
+ res->virt_addr = pci_alloc_consistent(mhba->pdev, size,
+ &res->bus_addr);
+ if (!res->virt_addr) {
+ dev_err(&mhba->pdev->dev,
+ "unable to allocate consistent mem,"
+ "size = %d.\n", size);
+ kfree(res);
+ return NULL;
+ }
+ memset(res->virt_addr, 0, size);
+ break;
+
+ default:
+ dev_err(&mhba->pdev->dev, "unknown resource type %d.\n", type);
+ kfree(res);
+ return NULL;
+ }
+
+ res->type = type;
+ res->size = size;
+ INIT_LIST_HEAD(&res->entry);
+ list_add_tail(&res->entry, &mhba->res_list);
+
+ return res;
+}
+
+static void mvumi_release_mem_resource(struct mvumi_hba *mhba)
+{
+ struct mvumi_res *res, *tmp;
+
+ list_for_each_entry_safe(res, tmp, &mhba->res_list, entry) {
+ switch (res->type) {
+ case RESOURCE_UNCACHED_MEMORY:
+ pci_free_consistent(mhba->pdev, res->size,
+ res->virt_addr, res->bus_addr);
+ break;
+ case RESOURCE_CACHED_MEMORY:
+ kfree(res->virt_addr);
+ break;
+ default:
+ dev_err(&mhba->pdev->dev,
+ "unknown resource type %d\n", res->type);
+ break;
+ }
+ list_del(&res->entry);
+ kfree(res);
+ }
+ mhba->fw_flag &= ~MVUMI_FW_ALLOC;
+}
+
+/**
+ * mvumi_make_sgl - Prepares SGL
+ * @mhba: Adapter soft state
+ * @scmd: SCSI command from the mid-layer
+ * @sgl_p: SGL to be filled in
+ * @sg_count return the number of SG elements
+ *
+ * If successful, this function returns 0. otherwise, it returns -1.
+ */
+static int mvumi_make_sgl(struct mvumi_hba *mhba, struct scsi_cmnd *scmd,
+ void *sgl_p, unsigned char *sg_count)
+{
+ struct scatterlist *sg;
+ struct mvumi_sgl *m_sg = (struct mvumi_sgl *) sgl_p;
+ unsigned int i;
+ unsigned int sgnum = scsi_sg_count(scmd);
+ dma_addr_t busaddr;
+
+ if (sgnum) {
+ sg = scsi_sglist(scmd);
+ *sg_count = pci_map_sg(mhba->pdev, sg, sgnum,
+ (int) scmd->sc_data_direction);
+ if (*sg_count > mhba->max_sge) {
+ dev_err(&mhba->pdev->dev, "sg count[0x%x] is bigger "
+ "than max sg[0x%x].\n",
+ *sg_count, mhba->max_sge);
+ return -1;
+ }
+ for (i = 0; i < *sg_count; i++) {
+ busaddr = sg_dma_address(&sg[i]);
+ m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(busaddr));
+ m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(busaddr));
+ m_sg->flags = 0;
+ m_sg->size = cpu_to_le32(sg_dma_len(&sg[i]));
+ if ((i + 1) == *sg_count)
+ m_sg->flags |= SGD_EOT;
+
+ m_sg++;
+ }
+ } else {
+ scmd->SCp.dma_handle = scsi_bufflen(scmd) ?
+ pci_map_single(mhba->pdev, scsi_sglist(scmd),
+ scsi_bufflen(scmd),
+ (int) scmd->sc_data_direction)
+ : 0;
+ busaddr = scmd->SCp.dma_handle;
+ m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(busaddr));
+ m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(busaddr));
+ m_sg->flags = SGD_EOT;
+ m_sg->size = cpu_to_le32(scsi_bufflen(scmd));
+ *sg_count = 1;
+ }
+
+ return 0;
+}
+
+static int mvumi_internal_cmd_sgl(struct mvumi_hba *mhba, struct mvumi_cmd *cmd,
+ unsigned int size)
+{
+ struct mvumi_sgl *m_sg;
+ void *virt_addr;
+ dma_addr_t phy_addr;
+
+ if (size == 0)
+ return 0;
+
+ virt_addr = pci_alloc_consistent(mhba->pdev, size, &phy_addr);
+ if (!virt_addr)
+ return -1;
+
+ memset(virt_addr, 0, size);
+
+ m_sg = (struct mvumi_sgl *) &cmd->frame->payload[0];
+ cmd->frame->sg_counts = 1;
+ cmd->data_buf = virt_addr;
+
+ m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(phy_addr));
+ m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(phy_addr));
+ m_sg->flags = SGD_EOT;
+ m_sg->size = cpu_to_le32(size);
+
+ return 0;
+}
+
+static struct mvumi_cmd *mvumi_create_internal_cmd(struct mvumi_hba *mhba,
+ unsigned int buf_size)
+{
+ struct mvumi_cmd *cmd;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd) {
+ dev_err(&mhba->pdev->dev, "failed to create a internal cmd\n");
+ return NULL;
+ }
+ INIT_LIST_HEAD(&cmd->queue_pointer);
+
+ cmd->frame = kzalloc(mhba->ib_max_size, GFP_KERNEL);
+ if (!cmd->frame) {
+ dev_err(&mhba->pdev->dev, "failed to allocate memory for FW"
+ " frame,size = %d.\n", mhba->ib_max_size);
+ kfree(cmd);
+ return NULL;
+ }
+
+ if (buf_size) {
+ if (mvumi_internal_cmd_sgl(mhba, cmd, buf_size)) {
+ dev_err(&mhba->pdev->dev, "failed to allocate memory"
+ " for internal frame\n");
+ kfree(cmd->frame);
+ kfree(cmd);
+ return NULL;
+ }
+ } else
+ cmd->frame->sg_counts = 0;
+
+ return cmd;
+}
+
+static void mvumi_delete_internal_cmd(struct mvumi_hba *mhba,
+ struct mvumi_cmd *cmd)
+{
+ struct mvumi_sgl *m_sg;
+ unsigned int size;
+ dma_addr_t phy_addr;
+
+ if (cmd && cmd->frame) {
+ if (cmd->frame->sg_counts) {
+ m_sg = (struct mvumi_sgl *) &cmd->frame->payload[0];
+ size = m_sg->size;
+
+ phy_addr = (dma_addr_t) m_sg->baseaddr_l |
+ (dma_addr_t) ((m_sg->baseaddr_h << 16) << 16);
+
+ pci_free_consistent(mhba->pdev, size, cmd->data_buf,
+ phy_addr);
+ }
+ kfree(cmd->frame);
+ kfree(cmd);
+ }
+}
+
+/**
+ * mvumi_get_cmd - Get a command from the free pool
+ * @mhba: Adapter soft state
+ *
+ * Returns a free command from the pool
+ */
+static struct mvumi_cmd *mvumi_get_cmd(struct mvumi_hba *mhba)
+{
+ struct mvumi_cmd *cmd = NULL;
+
+ if (likely(!list_empty(&mhba->cmd_pool))) {
+ cmd = list_entry((&mhba->cmd_pool)->next,
+ struct mvumi_cmd, queue_pointer);
+ list_del_init(&cmd->queue_pointer);
+ } else
+ dev_warn(&mhba->pdev->dev, "command pool is empty!\n");
+
+ return cmd;
+}
+
+/**
+ * mvumi_return_cmd - Return a cmd to free command pool
+ * @mhba: Adapter soft state
+ * @cmd: Command packet to be returned to free command pool
+ */
+static inline void mvumi_return_cmd(struct mvumi_hba *mhba,
+ struct mvumi_cmd *cmd)
+{
+ cmd->scmd = NULL;
+ list_add_tail(&cmd->queue_pointer, &mhba->cmd_pool);
+}
+
+/**
+ * mvumi_free_cmds - Free all the cmds in the free cmd pool
+ * @mhba: Adapter soft state
+ */
+static void mvumi_free_cmds(struct mvumi_hba *mhba)
+{
+ struct mvumi_cmd *cmd;
+
+ while (!list_empty(&mhba->cmd_pool)) {
+ cmd = list_first_entry(&mhba->cmd_pool, struct mvumi_cmd,
+ queue_pointer);
+ list_del(&cmd->queue_pointer);
+ kfree(cmd->frame);
+ kfree(cmd);
+ }
+}
+
+/**
+ * mvumi_alloc_cmds - Allocates the command packets
+ * @mhba: Adapter soft state
+ *
+ */
+static int mvumi_alloc_cmds(struct mvumi_hba *mhba)
+{
+ int i;
+ struct mvumi_cmd *cmd;
+
+ for (i = 0; i < mhba->max_io; i++) {
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd)
+ goto err_exit;
+
+ INIT_LIST_HEAD(&cmd->queue_pointer);
+ list_add_tail(&cmd->queue_pointer, &mhba->cmd_pool);
+ cmd->frame = kzalloc(mhba->ib_max_size, GFP_KERNEL);
+ if (!cmd->frame)
+ goto err_exit;
+ }
+ return 0;
+
+err_exit:
+ dev_err(&mhba->pdev->dev,
+ "failed to allocate memory for cmd[0x%x].\n", i);
+ while (!list_empty(&mhba->cmd_pool)) {
+ cmd = list_first_entry(&mhba->cmd_pool, struct mvumi_cmd,
+ queue_pointer);
+ list_del(&cmd->queue_pointer);
+ kfree(cmd->frame);
+ kfree(cmd);
+ }
+ return -ENOMEM;
+}
+
+static int mvumi_get_ib_list_entry(struct mvumi_hba *mhba, void **ib_entry)
+{
+ unsigned int ib_rp_reg, cur_ib_entry;
+
+ if (atomic_read(&mhba->fw_outstanding) >= mhba->max_io) {
+ dev_warn(&mhba->pdev->dev, "firmware io overflow.\n");
+ return -1;
+ }
+ ib_rp_reg = ioread32(mhba->mmio + CLA_INB_READ_POINTER);
+
+ if (unlikely(((ib_rp_reg & CL_SLOT_NUM_MASK) ==
+ (mhba->ib_cur_slot & CL_SLOT_NUM_MASK)) &&
+ ((ib_rp_reg & CL_POINTER_TOGGLE) !=
+ (mhba->ib_cur_slot & CL_POINTER_TOGGLE)))) {
+ dev_warn(&mhba->pdev->dev, "no free slot to use.\n");
+ return -1;
+ }
+
+ cur_ib_entry = mhba->ib_cur_slot & CL_SLOT_NUM_MASK;
+ cur_ib_entry++;
+ if (cur_ib_entry >= mhba->list_num_io) {
+ cur_ib_entry -= mhba->list_num_io;
+ mhba->ib_cur_slot ^= CL_POINTER_TOGGLE;
+ }
+ mhba->ib_cur_slot &= ~CL_SLOT_NUM_MASK;
+ mhba->ib_cur_slot |= (cur_ib_entry & CL_SLOT_NUM_MASK);
+ *ib_entry = mhba->ib_list + cur_ib_entry * mhba->ib_max_size;
+ atomic_inc(&mhba->fw_outstanding);
+
+ return 0;
+}
+
+static void mvumi_send_ib_list_entry(struct mvumi_hba *mhba)
+{
+ iowrite32(0xfff, mhba->ib_shadow);
+ iowrite32(mhba->ib_cur_slot, mhba->mmio + CLA_INB_WRITE_POINTER);
+}
+
+static char mvumi_check_ob_frame(struct mvumi_hba *mhba,
+ unsigned int cur_obf, struct mvumi_rsp_frame *p_outb_frame)
+{
+ unsigned short tag, request_id;
+
+ udelay(1);
+ p_outb_frame = mhba->ob_list + cur_obf * mhba->ob_max_size;
+ request_id = p_outb_frame->request_id;
+ tag = p_outb_frame->tag;
+ if (tag > mhba->tag_pool.size) {
+ dev_err(&mhba->pdev->dev, "ob frame data error\n");
+ return -1;
+ }
+ if (mhba->tag_cmd[tag] == NULL) {
+ dev_err(&mhba->pdev->dev, "tag[0x%x] with NO command\n", tag);
+ return -1;
+ } else if (mhba->tag_cmd[tag]->request_id != request_id &&
+ mhba->request_id_enabled) {
+ dev_err(&mhba->pdev->dev, "request ID from FW:0x%x,"
+ "cmd request ID:0x%x\n", request_id,
+ mhba->tag_cmd[tag]->request_id);
+ return -1;
+ }
+
+ return 0;
+}
+
+static void mvumi_receive_ob_list_entry(struct mvumi_hba *mhba)
+{
+ unsigned int ob_write_reg, ob_write_shadow_reg;
+ unsigned int cur_obf, assign_obf_end, i;
+ struct mvumi_ob_data *ob_data;
+ struct mvumi_rsp_frame *p_outb_frame;
+
+ do {
+ ob_write_reg = ioread32(mhba->mmio + CLA_OUTB_COPY_POINTER);
+ ob_write_shadow_reg = ioread32(mhba->ob_shadow);
+ } while ((ob_write_reg & CL_SLOT_NUM_MASK) != ob_write_shadow_reg);
+
+ cur_obf = mhba->ob_cur_slot & CL_SLOT_NUM_MASK;
+ assign_obf_end = ob_write_reg & CL_SLOT_NUM_MASK;
+
+ if ((ob_write_reg & CL_POINTER_TOGGLE) !=
+ (mhba->ob_cur_slot & CL_POINTER_TOGGLE)) {
+ assign_obf_end += mhba->list_num_io;
+ }
+
+ for (i = (assign_obf_end - cur_obf); i != 0; i--) {
+ cur_obf++;
+ if (cur_obf >= mhba->list_num_io) {
+ cur_obf -= mhba->list_num_io;
+ mhba->ob_cur_slot ^= CL_POINTER_TOGGLE;
+ }
+
+ p_outb_frame = mhba->ob_list + cur_obf * mhba->ob_max_size;
+
+ /* Copy pointer may point to entry in outbound list
+ * before entry has valid data
+ */
+ if (unlikely(p_outb_frame->tag > mhba->tag_pool.size ||
+ mhba->tag_cmd[p_outb_frame->tag] == NULL ||
+ p_outb_frame->request_id !=
+ mhba->tag_cmd[p_outb_frame->tag]->request_id))
+ if (mvumi_check_ob_frame(mhba, cur_obf, p_outb_frame))
+ continue;
+
+ if (!list_empty(&mhba->ob_data_list)) {
+ ob_data = (struct mvumi_ob_data *)
+ list_first_entry(&mhba->ob_data_list,
+ struct mvumi_ob_data, list);
+ list_del_init(&ob_data->list);
+ } else {
+ ob_data = NULL;
+ if (cur_obf == 0) {
+ cur_obf = mhba->list_num_io - 1;
+ mhba->ob_cur_slot ^= CL_POINTER_TOGGLE;
+ } else
+ cur_obf -= 1;
+ break;
+ }
+
+ memcpy(ob_data->data, p_outb_frame, mhba->ob_max_size);
+ p_outb_frame->tag = 0xff;
+
+ list_add_tail(&ob_data->list, &mhba->free_ob_list);
+ }
+ mhba->ob_cur_slot &= ~CL_SLOT_NUM_MASK;
+ mhba->ob_cur_slot |= (cur_obf & CL_SLOT_NUM_MASK);
+ iowrite32(mhba->ob_cur_slot, mhba->mmio + CLA_OUTB_READ_POINTER);
+}
+
+static void mvumi_reset(void *regs)
+{
+ iowrite32(0, regs + CPU_ENPOINTA_MASK_REG);
+ if (ioread32(regs + CPU_ARM_TO_PCIEA_MSG1) != HANDSHAKE_DONESTATE)
+ return;
+
+ iowrite32(DRBL_SOFT_RESET, regs + CPU_PCIEA_TO_ARM_DRBL_REG);
+}
+
+static unsigned char mvumi_start(struct mvumi_hba *mhba);
+
+static int mvumi_wait_for_outstanding(struct mvumi_hba *mhba)
+{
+ mhba->fw_state = FW_STATE_ABORT;
+ mvumi_reset(mhba->mmio);
+
+ if (mvumi_start(mhba))
+ return FAILED;
+ else
+ return SUCCESS;
+}
+
+static int mvumi_host_reset(struct scsi_cmnd *scmd)
+{
+ struct mvumi_hba *mhba;
+
+ mhba = (struct mvumi_hba *) scmd->device->host->hostdata;
+
+ scmd_printk(KERN_NOTICE, scmd, "RESET -%ld cmd=%x retries=%x\n",
+ scmd->serial_number, scmd->cmnd[0], scmd->retries);
+
+ return mvumi_wait_for_outstanding(mhba);
+}
+
+static int mvumi_issue_blocked_cmd(struct mvumi_hba *mhba,
+ struct mvumi_cmd *cmd)
+{
+ unsigned long flags;
+
+ cmd->cmd_status = REQ_STATUS_PENDING;
+
+ if (atomic_read(&cmd->sync_cmd)) {
+ dev_err(&mhba->pdev->dev,
+ "last blocked cmd not finished, sync_cmd = %d\n",
+ atomic_read(&cmd->sync_cmd));
+ BUG_ON(1);
+ return -1;
+ }
+ atomic_inc(&cmd->sync_cmd);
+ spin_lock_irqsave(mhba->shost->host_lock, flags);
+ mhba->instancet->fire_cmd(mhba, cmd);
+ spin_unlock_irqrestore(mhba->shost->host_lock, flags);
+
+ wait_event_timeout(mhba->int_cmd_wait_q,
+ (cmd->cmd_status != REQ_STATUS_PENDING),
+ MVUMI_INTERNAL_CMD_WAIT_TIME * HZ);
+
+ /* command timeout */
+ if (atomic_read(&cmd->sync_cmd)) {
+ spin_lock_irqsave(mhba->shost->host_lock, flags);
+ atomic_dec(&cmd->sync_cmd);
+ if (mhba->tag_cmd[cmd->frame->tag]) {
+ mhba->tag_cmd[cmd->frame->tag] = 0;
+ dev_warn(&mhba->pdev->dev, "TIMEOUT:release tag [%d]\n",
+ cmd->frame->tag);
+ tag_release_one(mhba, &mhba->tag_pool, cmd->frame->tag);
+ }
+ if (!list_empty(&cmd->queue_pointer)) {
+ dev_warn(&mhba->pdev->dev,
+ "TIMEOUT:A internal command doesn't send!\n");
+ list_del_init(&cmd->queue_pointer);
+ } else
+ atomic_dec(&mhba->fw_outstanding);
+
+ spin_unlock_irqrestore(mhba->shost->host_lock, flags);
+ }
+ return 0;
+}
+
+static void mvumi_release_fw(struct mvumi_hba *mhba)
+{
+ mvumi_free_cmds(mhba);
+ mvumi_release_mem_resource(mhba);
+ mvumi_unmap_pci_addr(mhba->pdev, mhba->base_addr);
+ kfree(mhba->handshake_page);
+ pci_release_regions(mhba->pdev);
+}
+
+static unsigned char mvumi_flush_cache(struct mvumi_hba *mhba)
+{
+ struct mvumi_cmd *cmd;
+ struct mvumi_msg_frame *frame;
+ unsigned char device_id, retry = 0;
+ unsigned char bitcount = sizeof(unsigned char) * 8;
+
+ for (device_id = 0; device_id < mhba->max_target_id; device_id++) {
+ if (!(mhba->target_map[device_id / bitcount] &
+ (1 << (device_id % bitcount))))
+ continue;
+get_cmd: cmd = mvumi_create_internal_cmd(mhba, 0);
+ if (!cmd) {
+ if (retry++ >= 5) {
+ dev_err(&mhba->pdev->dev, "failed to get memory"
+ " for internal flush cache cmd for "
+ "device %d", device_id);
+ retry = 0;
+ continue;
+ } else
+ goto get_cmd;
+ }
+ cmd->scmd = NULL;
+ cmd->cmd_status = REQ_STATUS_PENDING;
+ atomic_set(&cmd->sync_cmd, 0);
+ frame = cmd->frame;
+ frame->req_function = CL_FUN_SCSI_CMD;
+ frame->device_id = device_id;
+ frame->cmd_flag = CMD_FLAG_NON_DATA;
+ frame->data_transfer_length = 0;
+ frame->cdb_length = MAX_COMMAND_SIZE;
+ memset(frame->cdb, 0, MAX_COMMAND_SIZE);
+ frame->cdb[0] = SCSI_CMD_MARVELL_SPECIFIC;
+ frame->cdb[2] = CDB_CORE_SHUTDOWN;
+
+ mvumi_issue_blocked_cmd(mhba, cmd);
+ if (cmd->cmd_status != SAM_STAT_GOOD) {
+ dev_err(&mhba->pdev->dev,
+ "device %d flush cache failed, status=0x%x.\n",
+ device_id, cmd->cmd_status);
+ }
+
+ mvumi_delete_internal_cmd(mhba, cmd);
+ }
+ return 0;
+}
+
+static unsigned char
+mvumi_calculate_checksum(struct mvumi_hs_header *p_header,
+ unsigned short len)
+{
+ unsigned char *ptr;
+ unsigned char ret = 0, i;
+
+ ptr = (unsigned char *) p_header->frame_content;
+ for (i = 0; i < len; i++) {
+ ret ^= *ptr;
+ ptr++;
+ }
+
+ return ret;
+}
+
+void mvumi_hs_build_page(struct mvumi_hba *mhba,
+ struct mvumi_hs_header *hs_header)
+{
+ struct mvumi_hs_page2 *hs_page2;
+ struct mvumi_hs_page4 *hs_page4;
+ struct mvumi_hs_page3 *hs_page3;
+ struct timeval time;
+ unsigned int local_time;
+
+ switch (hs_header->page_code) {
+ case HS_PAGE_HOST_INFO:
+ hs_page2 = (struct mvumi_hs_page2 *) hs_header;
+ hs_header->frame_length = sizeof(*hs_page2) - 4;
+ memset(hs_header->frame_content, 0, hs_header->frame_length);
+ hs_page2->host_type = 3; /* 3 mean linux*/
+ hs_page2->host_ver.ver_major = VER_MAJOR;
+ hs_page2->host_ver.ver_minor = VER_MINOR;
+ hs_page2->host_ver.ver_oem = VER_OEM;
+ hs_page2->host_ver.ver_build = VER_BUILD;
+ hs_page2->system_io_bus = 0;
+ hs_page2->slot_number = 0;
+ hs_page2->intr_level = 0;
+ hs_page2->intr_vector = 0;
+ do_gettimeofday(&time);
+ local_time = (unsigned int) (time.tv_sec -
+ (sys_tz.tz_minuteswest * 60));
+ hs_page2->seconds_since1970 = local_time;
+ hs_header->checksum = mvumi_calculate_checksum(hs_header,
+ hs_header->frame_length);
+ break;
+
+ case HS_PAGE_FIRM_CTL:
+ hs_page3 = (struct mvumi_hs_page3 *) hs_header;
+ hs_header->frame_length = sizeof(*hs_page3) - 4;
+ memset(hs_header->frame_content, 0, hs_header->frame_length);
+ hs_header->checksum = mvumi_calculate_checksum(hs_header,
+ hs_header->frame_length);
+ break;
+
+ case HS_PAGE_CL_INFO:
+ hs_page4 = (struct mvumi_hs_page4 *) hs_header;
+ hs_header->frame_length = sizeof(*hs_page4) - 4;
+ memset(hs_header->frame_content, 0, hs_header->frame_length);
+ hs_page4->ib_baseaddr_l = lower_32_bits(mhba->ib_list_phys);
+ hs_page4->ib_baseaddr_h = upper_32_bits(mhba->ib_list_phys);
+
+ hs_page4->ob_baseaddr_l = lower_32_bits(mhba->ob_list_phys);
+ hs_page4->ob_baseaddr_h = upper_32_bits(mhba->ob_list_phys);
+ hs_page4->ib_entry_size = mhba->ib_max_size_setting;
+ hs_page4->ob_entry_size = mhba->ob_max_size_setting;
+ hs_page4->ob_depth = mhba->list_num_io;
+ hs_page4->ib_depth = mhba->list_num_io;
+ hs_header->checksum = mvumi_calculate_checksum(hs_header,
+ hs_header->frame_length);
+ break;
+
+ default:
+ dev_err(&mhba->pdev->dev, "cannot build page, code[0x%x]\n",
+ hs_header->page_code);
+ break;
+ }
+}
+
+/**
+ * mvumi_init_data - Initialize requested date for FW
+ * @mhba: Adapter soft state
+ */
+static int mvumi_init_data(struct mvumi_hba *mhba)
+{
+ struct mvumi_ob_data *ob_pool;
+ struct mvumi_res *res_mgnt;
+ unsigned int tmp_size, offset, i;
+ void *virmem, *v;
+ dma_addr_t p;
+
+ if (mhba->fw_flag & MVUMI_FW_ALLOC)
+ return 0;
+
+ tmp_size = mhba->ib_max_size * mhba->max_io;
+ tmp_size += 128 + mhba->ob_max_size * mhba->max_io;
+ tmp_size += 8 + sizeof(u32) + 16;
+
+ res_mgnt = mvumi_alloc_mem_resource(mhba,
+ RESOURCE_UNCACHED_MEMORY, tmp_size);
+ if (!res_mgnt) {
+ dev_err(&mhba->pdev->dev,
+ "failed to allocate memory for inbound list\n");
+ goto fail_alloc_dma_buf;
+ }
+
+ p = res_mgnt->bus_addr;
+ v = res_mgnt->virt_addr;
+ /* ib_list */
+ offset = round_up(p, 128) - p;
+ p += offset;
+ v += offset;
+ mhba->ib_list = v;
+ mhba->ib_list_phys = p;
+ v += mhba->ib_max_size * mhba->max_io;
+ p += mhba->ib_max_size * mhba->max_io;
+ /* ib shadow */
+ offset = round_up(p, 8) - p;
+ p += offset;
+ v += offset;
+ mhba->ib_shadow = v;
+ mhba->ib_shadow_phys = p;
+ p += sizeof(u32);
+ v += sizeof(u32);
+ /* ob shadow */
+ offset = round_up(p, 8) - p;
+ p += offset;
+ v += offset;
+ mhba->ob_shadow = v;
+ mhba->ob_shadow_phys = p;
+ p += 8;
+ v += 8;
+
+ /* ob list */
+ offset = round_up(p, 128) - p;
+ p += offset;
+ v += offset;
+
+ mhba->ob_list = v;
+ mhba->ob_list_phys = p;
+
+ /* ob data pool */
+ tmp_size = mhba->max_io * (mhba->ob_max_size + sizeof(*ob_pool));
+ tmp_size = round_up(tmp_size, 8);
+
+ res_mgnt = mvumi_alloc_mem_resource(mhba,
+ RESOURCE_CACHED_MEMORY, tmp_size);
+ if (!res_mgnt) {
+ dev_err(&mhba->pdev->dev,
+ "failed to allocate memory for outbound data buffer\n");
+ goto fail_alloc_dma_buf;
+ }
+ virmem = res_mgnt->virt_addr;
+
+ for (i = mhba->max_io; i != 0; i--) {
+ ob_pool = (struct mvumi_ob_data *) virmem;
+ list_add_tail(&ob_pool->list, &mhba->ob_data_list);
+ virmem += mhba->ob_max_size + sizeof(*ob_pool);
+ }
+
+ tmp_size = sizeof(unsigned short) * mhba->max_io +
+ sizeof(struct mvumi_cmd *) * mhba->max_io;
+ tmp_size += round_up(mhba->max_target_id, sizeof(unsigned char) * 8) /
+ (sizeof(unsigned char) * 8);
+
+ res_mgnt = mvumi_alloc_mem_resource(mhba,
+ RESOURCE_CACHED_MEMORY, tmp_size);
+ if (!res_mgnt) {
+ dev_err(&mhba->pdev->dev,
+ "failed to allocate memory for tag and target map\n");
+ goto fail_alloc_dma_buf;
+ }
+
+ virmem = res_mgnt->virt_addr;
+ mhba->tag_pool.stack = virmem;
+ mhba->tag_pool.size = mhba->max_io;
+ tag_init(&mhba->tag_pool, mhba->max_io);
+ virmem += sizeof(unsigned short) * mhba->max_io;
+
+ mhba->tag_cmd = virmem;
+ virmem += sizeof(struct mvumi_cmd *) * mhba->max_io;
+
+ mhba->target_map = virmem;
+
+ mhba->fw_flag |= MVUMI_FW_ALLOC;
+ return 0;
+
+fail_alloc_dma_buf:
+ mvumi_release_mem_resource(mhba);
+ return -1;
+}
+
+static int mvumi_hs_process_page(struct mvumi_hba *mhba,
+ struct mvumi_hs_header *hs_header)
+{
+ struct mvumi_hs_page1 *hs_page1;
+ unsigned char page_checksum;
+
+ page_checksum = mvumi_calculate_checksum(hs_header,
+ hs_header->frame_length);
+ if (page_checksum != hs_header->checksum) {
+ dev_err(&mhba->pdev->dev, "checksum error\n");
+ return -1;
+ }
+
+ switch (hs_header->page_code) {
+ case HS_PAGE_FIRM_CAP:
+ hs_page1 = (struct mvumi_hs_page1 *) hs_header;
+
+ mhba->max_io = hs_page1->max_io_support;
+ mhba->list_num_io = hs_page1->cl_inout_list_depth;
+ mhba->max_transfer_size = hs_page1->max_transfer_size;
+ mhba->max_target_id = hs_page1->max_devices_support;
+ mhba->hba_capability = hs_page1->capability;
+ mhba->ib_max_size_setting = hs_page1->cl_in_max_entry_size;
+ mhba->ib_max_size = (1 << hs_page1->cl_in_max_entry_size) << 2;
+
+ mhba->ob_max_size_setting = hs_page1->cl_out_max_entry_size;
+ mhba->ob_max_size = (1 << hs_page1->cl_out_max_entry_size) << 2;
+
+ dev_dbg(&mhba->pdev->dev, "FW version:%d\n",
+ hs_page1->fw_ver.ver_build);
+
+ break;
+ default:
+ dev_err(&mhba->pdev->dev, "handshake: page code error\n");
+ return -1;
+ }
+ return 0;
+}
+
+/**
+ * mvumi_handshake - Move the FW to READY state
+ * @mhba: Adapter soft state
+ *
+ * During the initialization, FW passes can potentially be in any one of
+ * several possible states. If the FW in operational, waiting-for-handshake
+ * states, driver must take steps to bring it to ready state. Otherwise, it
+ * has to wait for the ready state.
+ */
+static int mvumi_handshake(struct mvumi_hba *mhba)
+{
+ unsigned int hs_state, tmp, hs_fun;
+ struct mvumi_hs_header *hs_header;
+ void *regs = mhba->mmio;
+
+ if (mhba->fw_state == FW_STATE_STARTING)
+ hs_state = HS_S_START;
+ else {
+ tmp = ioread32(regs + CPU_ARM_TO_PCIEA_MSG0);
+ hs_state = HS_GET_STATE(tmp);
+ dev_dbg(&mhba->pdev->dev, "handshake state[0x%x].\n", hs_state);
+ if (HS_GET_STATUS(tmp) != HS_STATUS_OK) {
+ mhba->fw_state = FW_STATE_STARTING;
+ return -1;
+ }
+ }
+
+ hs_fun = 0;
+ switch (hs_state) {
+ case HS_S_START:
+ mhba->fw_state = FW_STATE_HANDSHAKING;
+ HS_SET_STATUS(hs_fun, HS_STATUS_OK);
+ HS_SET_STATE(hs_fun, HS_S_RESET);
+ iowrite32(HANDSHAKE_SIGNATURE, regs + CPU_PCIEA_TO_ARM_MSG1);
+ iowrite32(hs_fun, regs + CPU_PCIEA_TO_ARM_MSG0);
+ iowrite32(DRBL_HANDSHAKE, regs + CPU_PCIEA_TO_ARM_DRBL_REG);
+ break;
+
+ case HS_S_RESET:
+ iowrite32(lower_32_bits(mhba->handshake_page_phys),
+ regs + CPU_PCIEA_TO_ARM_MSG1);
+ iowrite32(upper_32_bits(mhba->handshake_page_phys),
+ regs + CPU_ARM_TO_PCIEA_MSG1);
+ HS_SET_STATUS(hs_fun, HS_STATUS_OK);
+ HS_SET_STATE(hs_fun, HS_S_PAGE_ADDR);
+ iowrite32(hs_fun, regs + CPU_PCIEA_TO_ARM_MSG0);
+ iowrite32(DRBL_HANDSHAKE, regs + CPU_PCIEA_TO_ARM_DRBL_REG);
+
+ break;
+
+ case HS_S_PAGE_ADDR:
+ case HS_S_QUERY_PAGE:
+ case HS_S_SEND_PAGE:
+ hs_header = (struct mvumi_hs_header *) mhba->handshake_page;
+ if (hs_header->page_code == HS_PAGE_FIRM_CAP) {
+ mhba->hba_total_pages =
+ ((struct mvumi_hs_page1 *) hs_header)->total_pages;
+
+ if (mhba->hba_total_pages == 0)
+ mhba->hba_total_pages = HS_PAGE_TOTAL-1;
+ }
+
+ if (hs_state == HS_S_QUERY_PAGE) {
+ if (mvumi_hs_process_page(mhba, hs_header)) {
+ HS_SET_STATE(hs_fun, HS_S_ABORT);
+ return -1;
+ }
+ if (mvumi_init_data(mhba)) {
+ HS_SET_STATE(hs_fun, HS_S_ABORT);
+ return -1;
+ }
+ } else if (hs_state == HS_S_PAGE_ADDR) {
+ hs_header->page_code = 0;
+ mhba->hba_total_pages = HS_PAGE_TOTAL-1;
+ }
+
+ if ((hs_header->page_code + 1) <= mhba->hba_total_pages) {
+ hs_header->page_code++;
+ if (hs_header->page_code != HS_PAGE_FIRM_CAP) {
+ mvumi_hs_build_page(mhba, hs_header);
+ HS_SET_STATE(hs_fun, HS_S_SEND_PAGE);
+ } else
+ HS_SET_STATE(hs_fun, HS_S_QUERY_PAGE);
+ } else
+ HS_SET_STATE(hs_fun, HS_S_END);
+
+ HS_SET_STATUS(hs_fun, HS_STATUS_OK);
+ iowrite32(hs_fun, regs + CPU_PCIEA_TO_ARM_MSG0);
+ iowrite32(DRBL_HANDSHAKE, regs + CPU_PCIEA_TO_ARM_DRBL_REG);
+ break;
+
+ case HS_S_END:
+ /* Set communication list ISR */
+ tmp = ioread32(regs + CPU_ENPOINTA_MASK_REG);
+ tmp |= INT_MAP_COMAOUT | INT_MAP_COMAERR;
+ iowrite32(tmp, regs + CPU_ENPOINTA_MASK_REG);
+ iowrite32(mhba->list_num_io, mhba->ib_shadow);
+ /* Set InBound List Avaliable count shadow */
+ iowrite32(lower_32_bits(mhba->ib_shadow_phys),
+ regs + CLA_INB_AVAL_COUNT_BASEL);
+ iowrite32(upper_32_bits(mhba->ib_shadow_phys),
+ regs + CLA_INB_AVAL_COUNT_BASEH);
+
+ /* Set OutBound List Avaliable count shadow */
+ iowrite32((mhba->list_num_io-1) | CL_POINTER_TOGGLE,
+ mhba->ob_shadow);
+ iowrite32(lower_32_bits(mhba->ob_shadow_phys), regs + 0x5B0);
+ iowrite32(upper_32_bits(mhba->ob_shadow_phys), regs + 0x5B4);
+
+ mhba->ib_cur_slot = (mhba->list_num_io - 1) | CL_POINTER_TOGGLE;
+ mhba->ob_cur_slot = (mhba->list_num_io - 1) | CL_POINTER_TOGGLE;
+ mhba->fw_state = FW_STATE_STARTED;
+
+ break;
+ default:
+ dev_err(&mhba->pdev->dev, "unknown handshake state [0x%x].\n",
+ hs_state);
+ return -1;
+ }
+ return 0;
+}
+
+static unsigned char mvumi_handshake_event(struct mvumi_hba *mhba)
+{
+ unsigned int isr_status;
+ unsigned long before;
+
+ before = jiffies;
+ mvumi_handshake(mhba);
+ do {
+ isr_status = mhba->instancet->read_fw_status_reg(mhba->mmio);
+
+ if (mhba->fw_state == FW_STATE_STARTED)
+ return 0;
+ if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) {
+ dev_err(&mhba->pdev->dev,
+ "no handshake response at state 0x%x.\n",
+ mhba->fw_state);
+ dev_err(&mhba->pdev->dev,
+ "isr : global=0x%x,status=0x%x.\n",
+ mhba->global_isr, isr_status);
+ return -1;
+ }
+ rmb();
+ usleep_range(1000, 2000);
+ } while (!(isr_status & DRBL_HANDSHAKE_ISR));
+
+ return 0;
+}
+
+static unsigned char mvumi_check_handshake(struct mvumi_hba *mhba)
+{
+ void *regs = mhba->mmio;
+ unsigned int tmp;
+ unsigned long before;
+
+ before = jiffies;
+ tmp = ioread32(regs + CPU_ARM_TO_PCIEA_MSG1);
+ while ((tmp != HANDSHAKE_READYSTATE) && (tmp != HANDSHAKE_DONESTATE)) {
+ if (tmp != HANDSHAKE_READYSTATE)
+ iowrite32(DRBL_MU_RESET,
+ regs + CPU_PCIEA_TO_ARM_DRBL_REG);
+ if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) {
+ dev_err(&mhba->pdev->dev,
+ "invalid signature [0x%x].\n", tmp);
+ return -1;
+ }
+ usleep_range(1000, 2000);
+ rmb();
+ tmp = ioread32(regs + CPU_ARM_TO_PCIEA_MSG1);
+ }
+
+ mhba->fw_state = FW_STATE_STARTING;
+ dev_dbg(&mhba->pdev->dev, "start firmware handshake...\n");
+ do {
+ if (mvumi_handshake_event(mhba)) {
+ dev_err(&mhba->pdev->dev,
+ "handshake failed at state 0x%x.\n",
+ mhba->fw_state);
+ return -1;
+ }
+ } while (mhba->fw_state != FW_STATE_STARTED);
+
+ dev_dbg(&mhba->pdev->dev, "firmware handshake done\n");
+
+ return 0;
+}
+
+static unsigned char mvumi_start(struct mvumi_hba *mhba)
+{
+ void *regs = mhba->mmio;
+ unsigned int tmp;
+ /* clear Door bell */
+ tmp = ioread32(regs + CPU_ARM_TO_PCIEA_DRBL_REG);
+ iowrite32(tmp, regs + CPU_ARM_TO_PCIEA_DRBL_REG);
+
+ iowrite32(0x3FFFFFFF, regs + CPU_ARM_TO_PCIEA_MASK_REG);
+ tmp = ioread32(regs + CPU_ENPOINTA_MASK_REG) | INT_MAP_DL_CPU2PCIEA;
+ iowrite32(tmp, regs + CPU_ENPOINTA_MASK_REG);
+ if (mvumi_check_handshake(mhba))
+ return -1;
+
+ return 0;
+}
+
+/**
+ * mvumi_complete_cmd - Completes a command
+ * @mhba: Adapter soft state
+ * @cmd: Command to be completed
+ */
+static void mvumi_complete_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd,
+ struct mvumi_rsp_frame *ob_frame)
+{
+ struct scsi_cmnd *scmd = cmd->scmd;
+
+ cmd->scmd->SCp.ptr = NULL;
+ scmd->result = ob_frame->req_status;
+
+ switch (ob_frame->req_status) {
+ case SAM_STAT_GOOD:
+ scmd->result |= DID_OK << 16;
+ break;
+ case SAM_STAT_BUSY:
+ scmd->result |= DID_BUS_BUSY << 16;
+ break;
+ case SAM_STAT_CHECK_CONDITION:
+ scmd->result |= (DID_OK << 16);
+ if (ob_frame->rsp_flag & CL_RSP_FLAG_SENSEDATA) {
+ memcpy(cmd->scmd->sense_buffer, ob_frame->payload,
+ sizeof(struct mvumi_sense_data));
+ scmd->result |= (DRIVER_SENSE << 24);
+ }
+ break;
+ default:
+ scmd->result |= (DRIVER_INVALID << 24) | (DID_ABORT << 16);
+ break;
+ }
+
+ if (scsi_bufflen(scmd)) {
+ if (scsi_sg_count(scmd)) {
+ pci_unmap_sg(mhba->pdev,
+ scsi_sglist(scmd),
+ scsi_sg_count(scmd),
+ (int) scmd->sc_data_direction);
+ } else {
+ pci_unmap_single(mhba->pdev,
+ scmd->SCp.dma_handle,
+ scsi_bufflen(scmd),
+ (int) scmd->sc_data_direction);
+
+ scmd->SCp.dma_handle = 0;
+ }
+ }
+ cmd->scmd->scsi_done(scmd);
+ mvumi_return_cmd(mhba, cmd);
+}
+static void mvumi_complete_internal_cmd(struct mvumi_hba *mhba,
+ struct mvumi_cmd *cmd,
+ struct mvumi_rsp_frame *ob_frame)
+{
+ if (atomic_read(&cmd->sync_cmd)) {
+ cmd->cmd_status = ob_frame->req_status;
+
+ if ((ob_frame->req_status == SAM_STAT_CHECK_CONDITION) &&
+ (ob_frame->rsp_flag & CL_RSP_FLAG_SENSEDATA) &&
+ cmd->data_buf) {
+ memcpy(cmd->data_buf, ob_frame->payload,
+ sizeof(struct mvumi_sense_data));
+ }
+ atomic_dec(&cmd->sync_cmd);
+ wake_up(&mhba->int_cmd_wait_q);
+ }
+}
+
+static void mvumi_show_event(struct mvumi_hba *mhba,
+ struct mvumi_driver_event *ptr)
+{
+ unsigned int i;
+
+ dev_warn(&mhba->pdev->dev,
+ "Event[0x%x] id[0x%x] severity[0x%x] device id[0x%x]\n",
+ ptr->sequence_no, ptr->event_id, ptr->severity, ptr->device_id);
+ if (ptr->param_count) {
+ printk(KERN_WARNING "Event param(len 0x%x): ",
+ ptr->param_count);
+ for (i = 0; i < ptr->param_count; i++)
+ printk(KERN_WARNING "0x%x ", ptr->params[i]);
+
+ printk(KERN_WARNING "\n");
+ }
+
+ if (ptr->sense_data_length) {
+ printk(KERN_WARNING "Event sense data(len 0x%x): ",
+ ptr->sense_data_length);
+ for (i = 0; i < ptr->sense_data_length; i++)
+ printk(KERN_WARNING "0x%x ", ptr->sense_data[i]);
+ printk(KERN_WARNING "\n");
+ }
+}
+
+static void mvumi_notification(struct mvumi_hba *mhba, u8 msg, void *buffer)
+{
+ if (msg == APICDB1_EVENT_GETEVENT) {
+ int i, count;
+ struct mvumi_driver_event *param = NULL;
+ struct mvumi_event_req *er = buffer;
+ count = er->count;
+ if (count > MAX_EVENTS_RETURNED) {
+ dev_err(&mhba->pdev->dev, "event count[0x%x] is bigger"
+ " than max event count[0x%x].\n",
+ count, MAX_EVENTS_RETURNED);
+ return;
+ }
+ for (i = 0; i < count; i++) {
+ param = &er->events[i];
+ mvumi_show_event(mhba, param);
+ }
+ }
+}
+
+static int mvumi_get_event(struct mvumi_hba *mhba, unsigned char msg)
+{
+ struct mvumi_cmd *cmd;
+ struct mvumi_msg_frame *frame;
+
+ cmd = mvumi_create_internal_cmd(mhba, 512);
+ if (!cmd)
+ return -1;
+ cmd->scmd = NULL;
+ cmd->cmd_status = REQ_STATUS_PENDING;
+ atomic_set(&cmd->sync_cmd, 0);
+ frame = cmd->frame;
+ frame->device_id = 0;
+ frame->cmd_flag = CMD_FLAG_DATA_IN;
+ frame->req_function = CL_FUN_SCSI_CMD;
+ frame->cdb_length = MAX_COMMAND_SIZE;
+ frame->data_transfer_length = sizeof(struct mvumi_event_req);
+ memset(frame->cdb, 0, MAX_COMMAND_SIZE);
+ frame->cdb[0] = APICDB0_EVENT;
+ frame->cdb[1] = msg;
+ mvumi_issue_blocked_cmd(mhba, cmd);
+
+ if (cmd->cmd_status != SAM_STAT_GOOD)
+ dev_err(&mhba->pdev->dev, "get event failed, status=0x%x.\n",
+ cmd->cmd_status);
+ else
+ mvumi_notification(mhba, cmd->frame->cdb[1], cmd->data_buf);
+
+ mvumi_delete_internal_cmd(mhba, cmd);
+ return 0;
+}
+
+static void mvumi_scan_events(struct work_struct *work)
+{
+ struct mvumi_events_wq *mu_ev =
+ container_of(work, struct mvumi_events_wq, work_q);
+
+ mvumi_get_event(mu_ev->mhba, mu_ev->event);
+ kfree(mu_ev);
+}
+
+static void mvumi_launch_events(struct mvumi_hba *mhba, u8 msg)
+{
+ struct mvumi_events_wq *mu_ev;
+
+ mu_ev = kzalloc(sizeof(*mu_ev), GFP_ATOMIC);
+ if (mu_ev) {
+ INIT_WORK(&mu_ev->work_q, mvumi_scan_events);
+ mu_ev->mhba = mhba;
+ mu_ev->event = msg;
+ mu_ev->param = NULL;
+ schedule_work(&mu_ev->work_q);
+ }
+}
+
+static void mvumi_handle_clob(struct mvumi_hba *mhba)
+{
+ struct mvumi_rsp_frame *ob_frame;
+ struct mvumi_cmd *cmd;
+ struct mvumi_ob_data *pool;
+
+ while (!list_empty(&mhba->free_ob_list)) {
+ pool = list_first_entry(&mhba->free_ob_list,
+ struct mvumi_ob_data, list);
+ list_del_init(&pool->list);
+ list_add_tail(&pool->list, &mhba->ob_data_list);
+
+ ob_frame = (struct mvumi_rsp_frame *) &pool->data[0];
+ cmd = mhba->tag_cmd[ob_frame->tag];
+
+ atomic_dec(&mhba->fw_outstanding);
+ mhba->tag_cmd[ob_frame->tag] = 0;
+ tag_release_one(mhba, &mhba->tag_pool, ob_frame->tag);
+ if (cmd->scmd)
+ mvumi_complete_cmd(mhba, cmd, ob_frame);
+ else
+ mvumi_complete_internal_cmd(mhba, cmd, ob_frame);
+ }
+ mhba->instancet->fire_cmd(mhba, NULL);
+}
+
+static irqreturn_t mvumi_isr_handler(int irq, void *devp)
+{
+ struct mvumi_hba *mhba = (struct mvumi_hba *) devp;
+ unsigned long flags;
+
+ spin_lock_irqsave(mhba->shost->host_lock, flags);
+ if (unlikely(mhba->instancet->clear_intr(mhba) || !mhba->global_isr)) {
+ spin_unlock_irqrestore(mhba->shost->host_lock, flags);
+ return IRQ_NONE;
+ }
+
+ if (mhba->global_isr & INT_MAP_DL_CPU2PCIEA) {
+ if (mhba->isr_status & DRBL_HANDSHAKE_ISR) {
+ dev_warn(&mhba->pdev->dev, "enter handshake again!\n");
+ mvumi_handshake(mhba);
+ }
+ if (mhba->isr_status & DRBL_EVENT_NOTIFY)
+ mvumi_launch_events(mhba, APICDB1_EVENT_GETEVENT);
+ }
+
+ if (mhba->global_isr & INT_MAP_COMAOUT)
+ mvumi_receive_ob_list_entry(mhba);
+
+ mhba->global_isr = 0;
+ mhba->isr_status = 0;
+ if (mhba->fw_state == FW_STATE_STARTED)
+ mvumi_handle_clob(mhba);
+ spin_unlock_irqrestore(mhba->shost->host_lock, flags);
+ return IRQ_HANDLED;
+}
+
+static enum mvumi_qc_result mvumi_send_command(struct mvumi_hba *mhba,
+ struct mvumi_cmd *cmd)
+{
+ void *ib_entry;
+ struct mvumi_msg_frame *ib_frame;
+ unsigned int frame_len;
+
+ ib_frame = cmd->frame;
+ if (unlikely(mhba->fw_state != FW_STATE_STARTED)) {
+ dev_dbg(&mhba->pdev->dev, "firmware not ready.\n");
+ return MV_QUEUE_COMMAND_RESULT_NO_RESOURCE;
+ }
+ if (tag_is_empty(&mhba->tag_pool)) {
+ dev_dbg(&mhba->pdev->dev, "no free tag.\n");
+ return MV_QUEUE_COMMAND_RESULT_NO_RESOURCE;
+ }
+ if (mvumi_get_ib_list_entry(mhba, &ib_entry))
+ return MV_QUEUE_COMMAND_RESULT_NO_RESOURCE;
+
+ cmd->frame->tag = tag_get_one(mhba, &mhba->tag_pool);
+ cmd->frame->request_id = mhba->io_seq++;
+ cmd->request_id = cmd->frame->request_id;
+ mhba->tag_cmd[cmd->frame->tag] = cmd;
+ frame_len = sizeof(*ib_frame) - 4 +
+ ib_frame->sg_counts * sizeof(struct mvumi_sgl);
+ memcpy(ib_entry, ib_frame, frame_len);
+ return MV_QUEUE_COMMAND_RESULT_SENT;
+}
+
+static void mvumi_fire_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd)
+{
+ unsigned short num_of_cl_sent = 0;
+ enum mvumi_qc_result result;
+
+ if (cmd)
+ list_add_tail(&cmd->queue_pointer, &mhba->waiting_req_list);
+
+ while (!list_empty(&mhba->waiting_req_list)) {
+ cmd = list_first_entry(&mhba->waiting_req_list,
+ struct mvumi_cmd, queue_pointer);
+ list_del_init(&cmd->queue_pointer);
+ result = mvumi_send_command(mhba, cmd);
+ switch (result) {
+ case MV_QUEUE_COMMAND_RESULT_SENT:
+ num_of_cl_sent++;
+ break;
+ case MV_QUEUE_COMMAND_RESULT_NO_RESOURCE:
+ list_add(&cmd->queue_pointer, &mhba->waiting_req_list);
+ if (num_of_cl_sent > 0)
+ mvumi_send_ib_list_entry(mhba);
+
+ return;
+ }
+ }
+ if (num_of_cl_sent > 0)
+ mvumi_send_ib_list_entry(mhba);
+}
+
+/**
+ * mvumi_enable_intr - Enables interrupts
+ * @regs: FW register set
+ */
+static void mvumi_enable_intr(void *regs)
+{
+ unsigned int mask;
+
+ iowrite32(0x3FFFFFFF, regs + CPU_ARM_TO_PCIEA_MASK_REG);
+ mask = ioread32(regs + CPU_ENPOINTA_MASK_REG);
+ mask |= INT_MAP_DL_CPU2PCIEA | INT_MAP_COMAOUT | INT_MAP_COMAERR;
+ iowrite32(mask, regs + CPU_ENPOINTA_MASK_REG);
+}
+
+/**
+ * mvumi_disable_intr -Disables interrupt
+ * @regs: FW register set
+ */
+static void mvumi_disable_intr(void *regs)
+{
+ unsigned int mask;
+
+ iowrite32(0, regs + CPU_ARM_TO_PCIEA_MASK_REG);
+ mask = ioread32(regs + CPU_ENPOINTA_MASK_REG);
+ mask &= ~(INT_MAP_DL_CPU2PCIEA | INT_MAP_COMAOUT | INT_MAP_COMAERR);
+ iowrite32(mask, regs + CPU_ENPOINTA_MASK_REG);
+}
+
+static int mvumi_clear_intr(void *extend)
+{
+ struct mvumi_hba *mhba = (struct mvumi_hba *) extend;
+ unsigned int status, isr_status = 0, tmp = 0;
+ void *regs = mhba->mmio;
+
+ status = ioread32(regs + CPU_MAIN_INT_CAUSE_REG);
+ if (!(status & INT_MAP_MU) || status == 0xFFFFFFFF)
+ return 1;
+ if (unlikely(status & INT_MAP_COMAERR)) {
+ tmp = ioread32(regs + CLA_ISR_CAUSE);
+ if (tmp & (CLIC_IN_ERR_IRQ | CLIC_OUT_ERR_IRQ))
+ iowrite32(tmp & (CLIC_IN_ERR_IRQ | CLIC_OUT_ERR_IRQ),
+ regs + CLA_ISR_CAUSE);
+ status ^= INT_MAP_COMAERR;
+ /* inbound or outbound parity error, command will timeout */
+ }
+ if (status & INT_MAP_COMAOUT) {
+ tmp = ioread32(regs + CLA_ISR_CAUSE);
+ if (tmp & CLIC_OUT_IRQ)
+ iowrite32(tmp & CLIC_OUT_IRQ, regs + CLA_ISR_CAUSE);
+ }
+ if (status & INT_MAP_DL_CPU2PCIEA) {
+ isr_status = ioread32(regs + CPU_ARM_TO_PCIEA_DRBL_REG);
+ if (isr_status)
+ iowrite32(isr_status, regs + CPU_ARM_TO_PCIEA_DRBL_REG);
+ }
+
+ mhba->global_isr = status;
+ mhba->isr_status = isr_status;
+
+ return 0;
+}
+
+/**
+ * mvumi_read_fw_status_reg - returns the current FW status value
+ * @regs: FW register set
+ */
+static unsigned int mvumi_read_fw_status_reg(void *regs)
+{
+ unsigned int status;
+
+ status = ioread32(regs + CPU_ARM_TO_PCIEA_DRBL_REG);
+ if (status)
+ iowrite32(status, regs + CPU_ARM_TO_PCIEA_DRBL_REG);
+ return status;
+}
+
+static struct mvumi_instance_template mvumi_instance_template = {
+ .fire_cmd = mvumi_fire_cmd,
+ .enable_intr = mvumi_enable_intr,
+ .disable_intr = mvumi_disable_intr,
+ .clear_intr = mvumi_clear_intr,
+ .read_fw_status_reg = mvumi_read_fw_status_reg,
+};
+
+static int mvumi_slave_configure(struct scsi_device *sdev)
+{
+ struct mvumi_hba *mhba;
+ unsigned char bitcount = sizeof(unsigned char) * 8;
+
+ mhba = (struct mvumi_hba *) sdev->host->hostdata;
+ if (sdev->id >= mhba->max_target_id)
+ return -EINVAL;
+
+ mhba->target_map[sdev->id / bitcount] |= (1 << (sdev->id % bitcount));
+ return 0;
+}
+
+/**
+ * mvumi_build_frame - Prepares a direct cdb (DCDB) command
+ * @mhba: Adapter soft state
+ * @scmd: SCSI command
+ * @cmd: Command to be prepared in
+ *
+ * This function prepares CDB commands. These are typcially pass-through
+ * commands to the devices.
+ */
+static unsigned char mvumi_build_frame(struct mvumi_hba *mhba,
+ struct scsi_cmnd *scmd, struct mvumi_cmd *cmd)
+{
+ struct mvumi_msg_frame *pframe;
+
+ cmd->scmd = scmd;
+ cmd->cmd_status = REQ_STATUS_PENDING;
+ pframe = cmd->frame;
+ pframe->device_id = ((unsigned short) scmd->device->id) |
+ (((unsigned short) scmd->device->lun) << 8);
+ pframe->cmd_flag = 0;
+
+ switch (scmd->sc_data_direction) {
+ case DMA_NONE:
+ pframe->cmd_flag |= CMD_FLAG_NON_DATA;
+ break;
+ case DMA_FROM_DEVICE:
+ pframe->cmd_flag |= CMD_FLAG_DATA_IN;
+ break;
+ case DMA_TO_DEVICE:
+ pframe->cmd_flag |= CMD_FLAG_DATA_OUT;
+ break;
+ case DMA_BIDIRECTIONAL:
+ default:
+ dev_warn(&mhba->pdev->dev, "unexpected data direction[%d] "
+ "cmd[0x%x]\n", scmd->sc_data_direction, scmd->cmnd[0]);
+ goto error;
+ }
+
+ pframe->cdb_length = scmd->cmd_len;
+ memcpy(pframe->cdb, scmd->cmnd, pframe->cdb_length);
+ pframe->req_function = CL_FUN_SCSI_CMD;
+ if (scsi_bufflen(scmd)) {
+ if (mvumi_make_sgl(mhba, scmd, &pframe->payload[0],
+ &pframe->sg_counts))
+ goto error;
+
+ pframe->data_transfer_length = scsi_bufflen(scmd);
+ } else {
+ pframe->sg_counts = 0;
+ pframe->data_transfer_length = 0;
+ }
+ return 0;
+
+error:
+ scmd->result = (DID_OK << 16) | (DRIVER_SENSE << 24) |
+ SAM_STAT_CHECK_CONDITION;
+ scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 0x24,
+ 0);
+ return -1;
+}
+
+/**
+ * mvumi_queue_command - Queue entry point
+ * @scmd: SCSI command to be queued
+ * @done: Callback entry point
+ */
+static int mvumi_queue_command(struct Scsi_Host *shost,
+ struct scsi_cmnd *scmd)
+{
+ struct mvumi_cmd *cmd;
+ struct mvumi_hba *mhba;
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(shost->host_lock, irq_flags);
+ scsi_cmd_get_serial(shost, scmd);
+
+ mhba = (struct mvumi_hba *) shost->hostdata;
+ scmd->result = 0;
+ cmd = mvumi_get_cmd(mhba);
+ if (unlikely(!cmd)) {
+ spin_unlock_irqrestore(shost->host_lock, irq_flags);
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+
+ if (unlikely(mvumi_build_frame(mhba, scmd, cmd)))
+ goto out_return_cmd;
+
+ cmd->scmd = scmd;
+ scmd->SCp.ptr = (char *) cmd;
+ mhba->instancet->fire_cmd(mhba, cmd);
+ spin_unlock_irqrestore(shost->host_lock, irq_flags);
+ return 0;
+
+out_return_cmd:
+ mvumi_return_cmd(mhba, cmd);
+ scmd->scsi_done(scmd);
+ spin_unlock_irqrestore(shost->host_lock, irq_flags);
+ return 0;
+}
+
+static enum blk_eh_timer_return mvumi_timed_out(struct scsi_cmnd *scmd)
+{
+ struct mvumi_cmd *cmd = (struct mvumi_cmd *) scmd->SCp.ptr;
+ struct Scsi_Host *host = scmd->device->host;
+ struct mvumi_hba *mhba = shost_priv(host);
+ unsigned long flags;
+
+ spin_lock_irqsave(mhba->shost->host_lock, flags);
+
+ if (mhba->tag_cmd[cmd->frame->tag]) {
+ mhba->tag_cmd[cmd->frame->tag] = 0;
+ tag_release_one(mhba, &mhba->tag_pool, cmd->frame->tag);
+ }
+ if (!list_empty(&cmd->queue_pointer))
+ list_del_init(&cmd->queue_pointer);
+ else
+ atomic_dec(&mhba->fw_outstanding);
+
+ scmd->result = (DRIVER_INVALID << 24) | (DID_ABORT << 16);
+ scmd->SCp.ptr = NULL;
+ if (scsi_bufflen(scmd)) {
+ if (scsi_sg_count(scmd)) {
+ pci_unmap_sg(mhba->pdev,
+ scsi_sglist(scmd),
+ scsi_sg_count(scmd),
+ (int)scmd->sc_data_direction);
+ } else {
+ pci_unmap_single(mhba->pdev,
+ scmd->SCp.dma_handle,
+ scsi_bufflen(scmd),
+ (int)scmd->sc_data_direction);
+
+ scmd->SCp.dma_handle = 0;
+ }
+ }
+ mvumi_return_cmd(mhba, cmd);
+ spin_unlock_irqrestore(mhba->shost->host_lock, flags);
+
+ return BLK_EH_NOT_HANDLED;
+}
+
+static int
+mvumi_bios_param(struct scsi_device *sdev, struct block_device *bdev,
+ sector_t capacity, int geom[])
+{
+ int heads, sectors;
+ sector_t cylinders;
+ unsigned long tmp;
+
+ heads = 64;
+ sectors = 32;
+ tmp = heads * sectors;
+ cylinders = capacity;
+ sector_div(cylinders, tmp);
+
+ if (capacity >= 0x200000) {
+ heads = 255;
+ sectors = 63;
+ tmp = heads * sectors;
+ cylinders = capacity;
+ sector_div(cylinders, tmp);
+ }
+ geom[0] = heads;
+ geom[1] = sectors;
+ geom[2] = cylinders;
+
+ return 0;
+}
+
+static struct scsi_host_template mvumi_template = {
+
+ .module = THIS_MODULE,
+ .name = "Marvell Storage Controller",
+ .slave_configure = mvumi_slave_configure,
+ .queuecommand = mvumi_queue_command,
+ .eh_host_reset_handler = mvumi_host_reset,
+ .bios_param = mvumi_bios_param,
+ .this_id = -1,
+};
+
+static struct scsi_transport_template mvumi_transport_template = {
+ .eh_timed_out = mvumi_timed_out,
+};
+
+/**
+ * mvumi_init_fw - Initializes the FW
+ * @mhba: Adapter soft state
+ *
+ * This is the main function for initializing firmware.
+ */
+static int mvumi_init_fw(struct mvumi_hba *mhba)
+{
+ int ret = 0;
+
+ if (pci_request_regions(mhba->pdev, MV_DRIVER_NAME)) {
+ dev_err(&mhba->pdev->dev, "IO memory region busy!\n");
+ return -EBUSY;
+ }
+ ret = mvumi_map_pci_addr(mhba->pdev, mhba->base_addr);
+ if (ret)
+ goto fail_ioremap;
+
+ mhba->mmio = mhba->base_addr[0];
+
+ switch (mhba->pdev->device) {
+ case PCI_DEVICE_ID_MARVELL_MV9143:
+ mhba->instancet = &mvumi_instance_template;
+ mhba->io_seq = 0;
+ mhba->max_sge = MVUMI_MAX_SG_ENTRY;
+ mhba->request_id_enabled = 1;
+ break;
+ default:
+ dev_err(&mhba->pdev->dev, "device 0x%x not supported!\n",
+ mhba->pdev->device);
+ mhba->instancet = NULL;
+ ret = -EINVAL;
+ goto fail_alloc_mem;
+ }
+ dev_dbg(&mhba->pdev->dev, "device id : %04X is found.\n",
+ mhba->pdev->device);
+
+ mhba->handshake_page = kzalloc(HSP_MAX_SIZE, GFP_KERNEL);
+ if (!mhba->handshake_page) {
+ dev_err(&mhba->pdev->dev,
+ "failed to allocate memory for handshake\n");
+ ret = -ENOMEM;
+ goto fail_alloc_mem;
+ }
+ mhba->handshake_page_phys = virt_to_phys(mhba->handshake_page);
+
+ if (mvumi_start(mhba)) {
+ ret = -EINVAL;
+ goto fail_ready_state;
+ }
+ ret = mvumi_alloc_cmds(mhba);
+ if (ret)
+ goto fail_ready_state;
+
+ return 0;
+
+fail_ready_state:
+ mvumi_release_mem_resource(mhba);
+ kfree(mhba->handshake_page);
+fail_alloc_mem:
+ mvumi_unmap_pci_addr(mhba->pdev, mhba->base_addr);
+fail_ioremap:
+ pci_release_regions(mhba->pdev);
+
+ return ret;
+}
+
+/**
+ * mvumi_io_attach - Attaches this driver to SCSI mid-layer
+ * @mhba: Adapter soft state
+ */
+static int mvumi_io_attach(struct mvumi_hba *mhba)
+{
+ struct Scsi_Host *host = mhba->shost;
+ int ret;
+ unsigned int max_sg = (mhba->ib_max_size + 4 -
+ sizeof(struct mvumi_msg_frame)) / sizeof(struct mvumi_sgl);
+
+ host->irq = mhba->pdev->irq;
+ host->unique_id = mhba->unique_id;
+ host->can_queue = (mhba->max_io - 1) ? (mhba->max_io - 1) : 1;
+ host->sg_tablesize = mhba->max_sge > max_sg ? max_sg : mhba->max_sge;
+ host->max_sectors = mhba->max_transfer_size / 512;
+ host->cmd_per_lun = (mhba->max_io - 1) ? (mhba->max_io - 1) : 1;
+ host->max_id = mhba->max_target_id;
+ host->max_cmd_len = MAX_COMMAND_SIZE;
+ host->transportt = &mvumi_transport_template;
+
+ ret = scsi_add_host(host, &mhba->pdev->dev);
+ if (ret) {
+ dev_err(&mhba->pdev->dev, "scsi_add_host failed\n");
+ return ret;
+ }
+ mhba->fw_flag |= MVUMI_FW_ATTACH;
+ scsi_scan_host(host);
+
+ return 0;
+}
+
+/**
+ * mvumi_probe_one - PCI hotplug entry point
+ * @pdev: PCI device structure
+ * @id: PCI ids of supported hotplugged adapter
+ */
+static int __devinit mvumi_probe_one(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct Scsi_Host *host;
+ struct mvumi_hba *mhba;
+ int ret;
+
+ dev_dbg(&pdev->dev, " %#4.04x:%#4.04x:%#4.04x:%#4.04x: ",
+ pdev->vendor, pdev->device, pdev->subsystem_vendor,
+ pdev->subsystem_device);
+
+ ret = pci_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ pci_set_master(pdev);
+
+ if (IS_DMA64) {
+ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (ret) {
+ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (ret)
+ goto fail_set_dma_mask;
+ }
+ } else {
+ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (ret)
+ goto fail_set_dma_mask;
+ }
+
+ host = scsi_host_alloc(&mvumi_template, sizeof(*mhba));
+ if (!host) {
+ dev_err(&pdev->dev, "scsi_host_alloc failed\n");
+ ret = -ENOMEM;
+ goto fail_alloc_instance;
+ }
+ mhba = shost_priv(host);
+
+ INIT_LIST_HEAD(&mhba->cmd_pool);
+ INIT_LIST_HEAD(&mhba->ob_data_list);
+ INIT_LIST_HEAD(&mhba->free_ob_list);
+ INIT_LIST_HEAD(&mhba->res_list);
+ INIT_LIST_HEAD(&mhba->waiting_req_list);
+ atomic_set(&mhba->fw_outstanding, 0);
+ init_waitqueue_head(&mhba->int_cmd_wait_q);
+
+ mhba->pdev = pdev;
+ mhba->shost = host;
+ mhba->unique_id = pdev->bus->number << 8 | pdev->devfn;
+
+ ret = mvumi_init_fw(mhba);
+ if (ret)
+ goto fail_init_fw;
+
+ ret = request_irq(mhba->pdev->irq, mvumi_isr_handler, IRQF_SHARED,
+ "mvumi", mhba);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register IRQ\n");
+ goto fail_init_irq;
+ }
+ mhba->instancet->enable_intr(mhba->mmio);
+ pci_set_drvdata(pdev, mhba);
+
+ ret = mvumi_io_attach(mhba);
+ if (ret)
+ goto fail_io_attach;
+ dev_dbg(&pdev->dev, "probe mvumi driver successfully.\n");
+
+ return 0;
+
+fail_io_attach:
+ pci_set_drvdata(pdev, NULL);
+ mhba->instancet->disable_intr(mhba->mmio);
+ free_irq(mhba->pdev->irq, mhba);
+fail_init_irq:
+ mvumi_release_fw(mhba);
+fail_init_fw:
+ scsi_host_put(host);
+
+fail_alloc_instance:
+fail_set_dma_mask:
+ pci_disable_device(pdev);
+
+ return ret;
+}
+
+static void mvumi_detach_one(struct pci_dev *pdev)
+{
+ struct Scsi_Host *host;
+ struct mvumi_hba *mhba;
+
+ mhba = pci_get_drvdata(pdev);
+ host = mhba->shost;
+ scsi_remove_host(mhba->shost);
+ mvumi_flush_cache(mhba);
+
+ mhba->instancet->disable_intr(mhba->mmio);
+ free_irq(mhba->pdev->irq, mhba);
+ mvumi_release_fw(mhba);
+ scsi_host_put(host);
+ pci_set_drvdata(pdev, NULL);
+ pci_disable_device(pdev);
+ dev_dbg(&pdev->dev, "driver is removed!\n");
+}
+
+/**
+ * mvumi_shutdown - Shutdown entry point
+ * @device: Generic device structure
+ */
+static void mvumi_shutdown(struct pci_dev *pdev)
+{
+ struct mvumi_hba *mhba = pci_get_drvdata(pdev);
+
+ mvumi_flush_cache(mhba);
+}
+
+static int mvumi_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct mvumi_hba *mhba = NULL;
+
+ mhba = pci_get_drvdata(pdev);
+ mvumi_flush_cache(mhba);
+
+ pci_set_drvdata(pdev, mhba);
+ mhba->instancet->disable_intr(mhba->mmio);
+ free_irq(mhba->pdev->irq, mhba);
+ mvumi_unmap_pci_addr(pdev, mhba->base_addr);
+ pci_release_regions(pdev);
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+
+ return 0;
+}
+
+static int mvumi_resume(struct pci_dev *pdev)
+{
+ int ret;
+ struct mvumi_hba *mhba = NULL;
+
+ mhba = pci_get_drvdata(pdev);
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_enable_wake(pdev, PCI_D0, 0);
+ pci_restore_state(pdev);
+
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "enable device failed\n");
+ return ret;
+ }
+ pci_set_master(pdev);
+ if (IS_DMA64) {
+ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (ret) {
+ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (ret)
+ goto fail;
+ }
+ } else {
+ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (ret)
+ goto fail;
+ }
+ ret = pci_request_regions(mhba->pdev, MV_DRIVER_NAME);
+ if (ret)
+ goto fail;
+ ret = mvumi_map_pci_addr(mhba->pdev, mhba->base_addr);
+ if (ret)
+ goto release_regions;
+
+ mhba->mmio = mhba->base_addr[0];
+ mvumi_reset(mhba->mmio);
+
+ if (mvumi_start(mhba)) {
+ ret = -EINVAL;
+ goto unmap_pci_addr;
+ }
+
+ ret = request_irq(mhba->pdev->irq, mvumi_isr_handler, IRQF_SHARED,
+ "mvumi", mhba);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register IRQ\n");
+ goto unmap_pci_addr;
+ }
+ mhba->instancet->enable_intr(mhba->mmio);
+
+ return 0;
+
+unmap_pci_addr:
+ mvumi_unmap_pci_addr(pdev, mhba->base_addr);
+release_regions:
+ pci_release_regions(pdev);
+fail:
+ pci_disable_device(pdev);
+
+ return ret;
+}
+
+static struct pci_driver mvumi_pci_driver = {
+
+ .name = MV_DRIVER_NAME,
+ .id_table = mvumi_pci_table,
+ .probe = mvumi_probe_one,
+ .remove = __devexit_p(mvumi_detach_one),
+ .shutdown = mvumi_shutdown,
+#ifdef CONFIG_PM
+ .suspend = mvumi_suspend,
+ .resume = mvumi_resume,
+#endif
+};
+
+/**
+ * mvumi_init - Driver load entry point
+ */
+static int __init mvumi_init(void)
+{
+ return pci_register_driver(&mvumi_pci_driver);
+}
+
+/**
+ * mvumi_exit - Driver unload entry point
+ */
+static void __exit mvumi_exit(void)
+{
+
+ pci_unregister_driver(&mvumi_pci_driver);
+}
+
+module_init(mvumi_init);
+module_exit(mvumi_exit);
diff --git a/drivers/scsi/mvumi.h b/drivers/scsi/mvumi.h
new file mode 100644
index 00000000000..10b9237566f
--- /dev/null
+++ b/drivers/scsi/mvumi.h
@@ -0,0 +1,505 @@
+/*
+ * Marvell UMI head file
+ *
+ * Copyright 2011 Marvell. <jyli@marvell.com>
+ *
+ * This file is licensed under GPLv2.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ * USA
+ */
+
+#ifndef MVUMI_H
+#define MVUMI_H
+
+#define MAX_BASE_ADDRESS 6
+
+#define VER_MAJOR 1
+#define VER_MINOR 1
+#define VER_OEM 0
+#define VER_BUILD 1500
+
+#define MV_DRIVER_NAME "mvumi"
+#define PCI_VENDOR_ID_MARVELL_2 0x1b4b
+#define PCI_DEVICE_ID_MARVELL_MV9143 0x9143
+
+#define MVUMI_INTERNAL_CMD_WAIT_TIME 45
+
+#define IS_DMA64 (sizeof(dma_addr_t) == 8)
+
+enum mvumi_qc_result {
+ MV_QUEUE_COMMAND_RESULT_SENT = 0,
+ MV_QUEUE_COMMAND_RESULT_NO_RESOURCE,
+};
+
+enum {
+ /*******************************************/
+
+ /* ARM Mbus Registers Map */
+
+ /*******************************************/
+ CPU_MAIN_INT_CAUSE_REG = 0x20200,
+ CPU_MAIN_IRQ_MASK_REG = 0x20204,
+ CPU_MAIN_FIQ_MASK_REG = 0x20208,
+ CPU_ENPOINTA_MASK_REG = 0x2020C,
+ CPU_ENPOINTB_MASK_REG = 0x20210,
+
+ INT_MAP_COMAERR = 1 << 6,
+ INT_MAP_COMAIN = 1 << 7,
+ INT_MAP_COMAOUT = 1 << 8,
+ INT_MAP_COMBERR = 1 << 9,
+ INT_MAP_COMBIN = 1 << 10,
+ INT_MAP_COMBOUT = 1 << 11,
+
+ INT_MAP_COMAINT = (INT_MAP_COMAOUT | INT_MAP_COMAERR),
+ INT_MAP_COMBINT = (INT_MAP_COMBOUT | INT_MAP_COMBIN | INT_MAP_COMBERR),
+
+ INT_MAP_DL_PCIEA2CPU = 1 << 0,
+ INT_MAP_DL_CPU2PCIEA = 1 << 1,
+
+ /***************************************/
+
+ /* ARM Doorbell Registers Map */
+
+ /***************************************/
+ CPU_PCIEA_TO_ARM_DRBL_REG = 0x20400,
+ CPU_PCIEA_TO_ARM_MASK_REG = 0x20404,
+ CPU_ARM_TO_PCIEA_DRBL_REG = 0x20408,
+ CPU_ARM_TO_PCIEA_MASK_REG = 0x2040C,
+
+ DRBL_HANDSHAKE = 1 << 0,
+ DRBL_SOFT_RESET = 1 << 1,
+ DRBL_BUS_CHANGE = 1 << 2,
+ DRBL_EVENT_NOTIFY = 1 << 3,
+ DRBL_MU_RESET = 1 << 4,
+ DRBL_HANDSHAKE_ISR = DRBL_HANDSHAKE,
+
+ CPU_PCIEA_TO_ARM_MSG0 = 0x20430,
+ CPU_PCIEA_TO_ARM_MSG1 = 0x20434,
+ CPU_ARM_TO_PCIEA_MSG0 = 0x20438,
+ CPU_ARM_TO_PCIEA_MSG1 = 0x2043C,
+
+ /*******************************************/
+
+ /* ARM Communication List Registers Map */
+
+ /*******************************************/
+ CLA_INB_LIST_BASEL = 0x500,
+ CLA_INB_LIST_BASEH = 0x504,
+ CLA_INB_AVAL_COUNT_BASEL = 0x508,
+ CLA_INB_AVAL_COUNT_BASEH = 0x50C,
+ CLA_INB_DESTI_LIST_BASEL = 0x510,
+ CLA_INB_DESTI_LIST_BASEH = 0x514,
+ CLA_INB_WRITE_POINTER = 0x518,
+ CLA_INB_READ_POINTER = 0x51C,
+
+ CLA_OUTB_LIST_BASEL = 0x530,
+ CLA_OUTB_LIST_BASEH = 0x534,
+ CLA_OUTB_SOURCE_LIST_BASEL = 0x538,
+ CLA_OUTB_SOURCE_LIST_BASEH = 0x53C,
+ CLA_OUTB_COPY_POINTER = 0x544,
+ CLA_OUTB_READ_POINTER = 0x548,
+
+ CLA_ISR_CAUSE = 0x560,
+ CLA_ISR_MASK = 0x564,
+
+ INT_MAP_MU = (INT_MAP_DL_CPU2PCIEA | INT_MAP_COMAINT),
+
+ CL_POINTER_TOGGLE = 1 << 12,
+
+ CLIC_IN_IRQ = 1 << 0,
+ CLIC_OUT_IRQ = 1 << 1,
+ CLIC_IN_ERR_IRQ = 1 << 8,
+ CLIC_OUT_ERR_IRQ = 1 << 12,
+
+ CL_SLOT_NUM_MASK = 0xFFF,
+
+ /*
+ * Command flag is the flag for the CDB command itself
+ */
+ /* 1-non data; 0-data command */
+ CMD_FLAG_NON_DATA = 1 << 0,
+ CMD_FLAG_DMA = 1 << 1,
+ CMD_FLAG_PIO = 1 << 2,
+ /* 1-host read data */
+ CMD_FLAG_DATA_IN = 1 << 3,
+ /* 1-host write data */
+ CMD_FLAG_DATA_OUT = 1 << 4,
+
+ SCSI_CMD_MARVELL_SPECIFIC = 0xE1,
+ CDB_CORE_SHUTDOWN = 0xB,
+};
+
+#define APICDB0_EVENT 0xF4
+#define APICDB1_EVENT_GETEVENT 0
+#define MAX_EVENTS_RETURNED 6
+
+struct mvumi_driver_event {
+ u32 time_stamp;
+ u32 sequence_no;
+ u32 event_id;
+ u8 severity;
+ u8 param_count;
+ u16 device_id;
+ u32 params[4];
+ u8 sense_data_length;
+ u8 Reserved1;
+ u8 sense_data[30];
+};
+
+struct mvumi_event_req {
+ unsigned char count;
+ unsigned char reserved[3];
+ struct mvumi_driver_event events[MAX_EVENTS_RETURNED];
+};
+
+struct mvumi_events_wq {
+ struct work_struct work_q;
+ struct mvumi_hba *mhba;
+ unsigned int event;
+ void *param;
+};
+
+#define MVUMI_MAX_SG_ENTRY 32
+#define SGD_EOT (1L << 27)
+
+struct mvumi_sgl {
+ u32 baseaddr_l;
+ u32 baseaddr_h;
+ u32 flags;
+ u32 size;
+};
+
+struct mvumi_res {
+ struct list_head entry;
+ dma_addr_t bus_addr;
+ void *virt_addr;
+ unsigned int size;
+ unsigned short type; /* enum Resource_Type */
+};
+
+/* Resource type */
+enum resource_type {
+ RESOURCE_CACHED_MEMORY = 0,
+ RESOURCE_UNCACHED_MEMORY
+};
+
+struct mvumi_sense_data {
+ u8 error_eode:7;
+ u8 valid:1;
+ u8 segment_number;
+ u8 sense_key:4;
+ u8 reserved:1;
+ u8 incorrect_length:1;
+ u8 end_of_media:1;
+ u8 file_mark:1;
+ u8 information[4];
+ u8 additional_sense_length;
+ u8 command_specific_information[4];
+ u8 additional_sense_code;
+ u8 additional_sense_code_qualifier;
+ u8 field_replaceable_unit_code;
+ u8 sense_key_specific[3];
+};
+
+/* Request initiator must set the status to REQ_STATUS_PENDING. */
+#define REQ_STATUS_PENDING 0x80
+
+struct mvumi_cmd {
+ struct list_head queue_pointer;
+ struct mvumi_msg_frame *frame;
+ struct scsi_cmnd *scmd;
+ atomic_t sync_cmd;
+ void *data_buf;
+ unsigned short request_id;
+ unsigned char cmd_status;
+};
+
+/*
+ * the function type of the in bound frame
+ */
+#define CL_FUN_SCSI_CMD 0x1
+
+struct mvumi_msg_frame {
+ u16 device_id;
+ u16 tag;
+ u8 cmd_flag;
+ u8 req_function;
+ u8 cdb_length;
+ u8 sg_counts;
+ u32 data_transfer_length;
+ u16 request_id;
+ u16 reserved1;
+ u8 cdb[MAX_COMMAND_SIZE];
+ u32 payload[1];
+};
+
+/*
+ * the respond flag for data_payload of the out bound frame
+ */
+#define CL_RSP_FLAG_NODATA 0x0
+#define CL_RSP_FLAG_SENSEDATA 0x1
+
+struct mvumi_rsp_frame {
+ u16 device_id;
+ u16 tag;
+ u8 req_status;
+ u8 rsp_flag; /* Indicates the type of Data_Payload.*/
+ u16 request_id;
+ u32 payload[1];
+};
+
+struct mvumi_ob_data {
+ struct list_head list;
+ unsigned char data[0];
+};
+
+struct version_info {
+ u32 ver_major;
+ u32 ver_minor;
+ u32 ver_oem;
+ u32 ver_build;
+};
+
+#define FW_MAX_DELAY 30
+#define MVUMI_FW_BUSY (1U << 0)
+#define MVUMI_FW_ATTACH (1U << 1)
+#define MVUMI_FW_ALLOC (1U << 2)
+
+/*
+ * State is the state of the MU
+ */
+#define FW_STATE_IDLE 0
+#define FW_STATE_STARTING 1
+#define FW_STATE_HANDSHAKING 2
+#define FW_STATE_STARTED 3
+#define FW_STATE_ABORT 4
+
+#define HANDSHAKE_SIGNATURE 0x5A5A5A5AL
+#define HANDSHAKE_READYSTATE 0x55AA5AA5L
+#define HANDSHAKE_DONESTATE 0x55AAA55AL
+
+/* HandShake Status definition */
+#define HS_STATUS_OK 1
+#define HS_STATUS_ERR 2
+#define HS_STATUS_INVALID 3
+
+/* HandShake State/Cmd definition */
+#define HS_S_START 1
+#define HS_S_RESET 2
+#define HS_S_PAGE_ADDR 3
+#define HS_S_QUERY_PAGE 4
+#define HS_S_SEND_PAGE 5
+#define HS_S_END 6
+#define HS_S_ABORT 7
+#define HS_PAGE_VERIFY_SIZE 128
+
+#define HS_GET_STATE(a) (a & 0xFFFF)
+#define HS_GET_STATUS(a) ((a & 0xFFFF0000) >> 16)
+#define HS_SET_STATE(a, b) (a |= (b & 0xFFFF))
+#define HS_SET_STATUS(a, b) (a |= ((b & 0xFFFF) << 16))
+
+/* handshake frame */
+struct mvumi_hs_frame {
+ u16 size;
+ /* host information */
+ u8 host_type;
+ u8 reserved_1[1];
+ struct version_info host_ver; /* bios or driver version */
+
+ /* controller information */
+ u32 system_io_bus;
+ u32 slot_number;
+ u32 intr_level;
+ u32 intr_vector;
+
+ /* communication list configuration */
+ u32 ib_baseaddr_l;
+ u32 ib_baseaddr_h;
+ u32 ob_baseaddr_l;
+ u32 ob_baseaddr_h;
+
+ u8 ib_entry_size;
+ u8 ob_entry_size;
+ u8 ob_depth;
+ u8 ib_depth;
+
+ /* system time */
+ u64 seconds_since1970;
+};
+
+struct mvumi_hs_header {
+ u8 page_code;
+ u8 checksum;
+ u16 frame_length;
+ u32 frame_content[1];
+};
+
+/*
+ * the page code type of the handshake header
+ */
+#define HS_PAGE_FIRM_CAP 0x1
+#define HS_PAGE_HOST_INFO 0x2
+#define HS_PAGE_FIRM_CTL 0x3
+#define HS_PAGE_CL_INFO 0x4
+#define HS_PAGE_TOTAL 0x5
+
+#define HSP_SIZE(i) sizeof(struct mvumi_hs_page##i)
+
+#define HSP_MAX_SIZE ({ \
+ int size, m1, m2; \
+ m1 = max(HSP_SIZE(1), HSP_SIZE(3)); \
+ m2 = max(HSP_SIZE(2), HSP_SIZE(4)); \
+ size = max(m1, m2); \
+ size; \
+})
+
+/* The format of the page code for Firmware capability */
+struct mvumi_hs_page1 {
+ u8 pagecode;
+ u8 checksum;
+ u16 frame_length;
+
+ u16 number_of_ports;
+ u16 max_devices_support;
+ u16 max_io_support;
+ u16 umi_ver;
+ u32 max_transfer_size;
+ struct version_info fw_ver;
+ u8 cl_in_max_entry_size;
+ u8 cl_out_max_entry_size;
+ u8 cl_inout_list_depth;
+ u8 total_pages;
+ u16 capability;
+ u16 reserved1;
+};
+
+/* The format of the page code for Host information */
+struct mvumi_hs_page2 {
+ u8 pagecode;
+ u8 checksum;
+ u16 frame_length;
+
+ u8 host_type;
+ u8 reserved[3];
+ struct version_info host_ver;
+ u32 system_io_bus;
+ u32 slot_number;
+ u32 intr_level;
+ u32 intr_vector;
+ u64 seconds_since1970;
+};
+
+/* The format of the page code for firmware control */
+struct mvumi_hs_page3 {
+ u8 pagecode;
+ u8 checksum;
+ u16 frame_length;
+ u16 control;
+ u8 reserved[2];
+ u32 host_bufferaddr_l;
+ u32 host_bufferaddr_h;
+ u32 host_eventaddr_l;
+ u32 host_eventaddr_h;
+};
+
+struct mvumi_hs_page4 {
+ u8 pagecode;
+ u8 checksum;
+ u16 frame_length;
+ u32 ib_baseaddr_l;
+ u32 ib_baseaddr_h;
+ u32 ob_baseaddr_l;
+ u32 ob_baseaddr_h;
+ u8 ib_entry_size;
+ u8 ob_entry_size;
+ u8 ob_depth;
+ u8 ib_depth;
+};
+
+struct mvumi_tag {
+ unsigned short *stack;
+ unsigned short top;
+ unsigned short size;
+};
+
+struct mvumi_hba {
+ void *base_addr[MAX_BASE_ADDRESS];
+ void *mmio;
+ struct list_head cmd_pool;
+ struct Scsi_Host *shost;
+ wait_queue_head_t int_cmd_wait_q;
+ struct pci_dev *pdev;
+ unsigned int unique_id;
+ atomic_t fw_outstanding;
+ struct mvumi_instance_template *instancet;
+
+ void *ib_list;
+ dma_addr_t ib_list_phys;
+
+ void *ob_list;
+ dma_addr_t ob_list_phys;
+
+ void *ib_shadow;
+ dma_addr_t ib_shadow_phys;
+
+ void *ob_shadow;
+ dma_addr_t ob_shadow_phys;
+
+ void *handshake_page;
+ dma_addr_t handshake_page_phys;
+
+ unsigned int global_isr;
+ unsigned int isr_status;
+
+ unsigned short max_sge;
+ unsigned short max_target_id;
+ unsigned char *target_map;
+ unsigned int max_io;
+ unsigned int list_num_io;
+ unsigned int ib_max_size;
+ unsigned int ob_max_size;
+ unsigned int ib_max_size_setting;
+ unsigned int ob_max_size_setting;
+ unsigned int max_transfer_size;
+ unsigned char hba_total_pages;
+ unsigned char fw_flag;
+ unsigned char request_id_enabled;
+ unsigned short hba_capability;
+ unsigned short io_seq;
+
+ unsigned int ib_cur_slot;
+ unsigned int ob_cur_slot;
+ unsigned int fw_state;
+
+ struct list_head ob_data_list;
+ struct list_head free_ob_list;
+ struct list_head res_list;
+ struct list_head waiting_req_list;
+
+ struct mvumi_tag tag_pool;
+ struct mvumi_cmd **tag_cmd;
+};
+
+struct mvumi_instance_template {
+ void (*fire_cmd)(struct mvumi_hba *, struct mvumi_cmd *);
+ void (*enable_intr)(void *) ;
+ void (*disable_intr)(void *);
+ int (*clear_intr)(void *);
+ unsigned int (*read_fw_status_reg)(void *);
+};
+
+extern struct timezone sys_tz;
+#endif
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
index 86afb13f1e7..c06b8e5aa2c 100644
--- a/drivers/scsi/osd/osd_initiator.c
+++ b/drivers/scsi/osd/osd_initiator.c
@@ -40,6 +40,7 @@
*/
#include <linux/slab.h>
+#include <linux/module.h>
#include <scsi/osd_initiator.h>
#include <scsi/osd_sec.h>
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index 8b7db1e53c1..b7b92f7be2a 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -567,11 +567,11 @@ static void init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha)
value = pm8001_cr32(pm8001_ha, 0, 0x44);
offset = value & 0x03FFFFFF;
PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("Scratchpad 0 Offset: %x \n", offset));
+ pm8001_printk("Scratchpad 0 Offset: %x\n", offset));
pcilogic = (value & 0xFC000000) >> 26;
pcibar = get_pci_bar_index(pcilogic);
PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("Scratchpad 0 PCI BAR: %d \n", pcibar));
+ pm8001_printk("Scratchpad 0 PCI BAR: %d\n", pcibar));
pm8001_ha->main_cfg_tbl_addr = base_addr =
pm8001_ha->io_mem[pcibar].memvirtaddr + offset;
pm8001_ha->general_stat_tbl_addr =
@@ -1245,7 +1245,7 @@ static int mpi_build_cmd(struct pm8001_hba_info *pm8001_ha,
if (mpi_msg_free_get(circularQ, 64, &pMessage) < 0) {
PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("No free mpi buffer \n"));
+ pm8001_printk("No free mpi buffer\n"));
return -1;
}
BUG_ON(!payload);
@@ -1262,7 +1262,7 @@ static int mpi_build_cmd(struct pm8001_hba_info *pm8001_ha,
pm8001_cw32(pm8001_ha, circularQ->pi_pci_bar,
circularQ->pi_offset, circularQ->producer_idx);
PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("after PI= %d CI= %d \n", circularQ->producer_idx,
+ pm8001_printk("after PI= %d CI= %d\n", circularQ->producer_idx,
circularQ->consumer_index));
return 0;
}
@@ -1474,7 +1474,7 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
switch (status) {
case IO_SUCCESS:
PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS"
- ",param = %d \n", param));
+ ",param = %d\n", param));
if (param == 0) {
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAM_STAT_GOOD;
@@ -1490,14 +1490,14 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
break;
case IO_ABORTED:
PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_ABORTED IOMB Tag \n"));
+ pm8001_printk("IO_ABORTED IOMB Tag\n"));
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_ABORTED_TASK;
break;
case IO_UNDERFLOW:
/* SSP Completion with error */
PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW"
- ",param = %d \n", param));
+ ",param = %d\n", param));
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DATA_UNDERRUN;
ts->residual = param;
@@ -1649,6 +1649,7 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+ break;
default:
PM8001_IO_DBG(pm8001_ha,
pm8001_printk("Unknown status 0x%x\n", status));
@@ -1937,14 +1938,14 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
ts->buf_valid_size = sizeof(*resp);
} else
PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("response to large \n"));
+ pm8001_printk("response to large\n"));
}
if (pm8001_dev)
pm8001_dev->running_req--;
break;
case IO_ABORTED:
PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_ABORTED IOMB Tag \n"));
+ pm8001_printk("IO_ABORTED IOMB Tag\n"));
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_ABORTED_TASK;
if (pm8001_dev)
@@ -2728,11 +2729,11 @@ static int mpi_local_phy_ctl(struct pm8001_hba_info *pm8001_ha, void *piomb)
u32 phy_op = le32_to_cpu(pPayload->phyop_phyid) & OP_BITS;
if (status != 0) {
PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("%x phy execute %x phy op failed! \n",
+ pm8001_printk("%x phy execute %x phy op failed!\n",
phy_id, phy_op));
} else
PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("%x phy execute %x phy op success! \n",
+ pm8001_printk("%x phy execute %x phy op success!\n",
phy_id, phy_op));
return 0;
}
@@ -3018,7 +3019,7 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
break;
case PORT_INVALID:
PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk(" PortInvalid portID %d \n", port_id));
+ pm8001_printk(" PortInvalid portID %d\n", port_id));
PM8001_MSG_DBG(pm8001_ha,
pm8001_printk(" Last phy Down and port invalid\n"));
port->port_attached = 0;
@@ -3027,7 +3028,7 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
break;
case PORT_IN_RESET:
PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk(" Port In Reset portID %d \n", port_id));
+ pm8001_printk(" Port In Reset portID %d\n", port_id));
break;
case PORT_NOT_ESTABLISHED:
PM8001_MSG_DBG(pm8001_ha,
@@ -3220,7 +3221,7 @@ mpi_general_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
pm8001_printk(" status = 0x%x\n", status));
for (i = 0; i < GENERAL_EVENT_PAYLOAD; i++)
PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("inb_IOMB_payload[0x%x] 0x%x, \n", i,
+ pm8001_printk("inb_IOMB_payload[0x%x] 0x%x,\n", i,
pPayload->inb_IOMB_payload[i]));
return 0;
}
@@ -3312,12 +3313,12 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void* piomb)
break;
case HW_EVENT_SAS_PHY_UP:
PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_PHY_START_STATUS \n"));
+ pm8001_printk("HW_EVENT_PHY_START_STATUS\n"));
hw_event_sas_phy_up(pm8001_ha, piomb);
break;
case HW_EVENT_SATA_PHY_UP:
PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_SATA_PHY_UP \n"));
+ pm8001_printk("HW_EVENT_SATA_PHY_UP\n"));
hw_event_sata_phy_up(pm8001_ha, piomb);
break;
case HW_EVENT_PHY_STOP_STATUS:
@@ -3329,12 +3330,12 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void* piomb)
break;
case HW_EVENT_SATA_SPINUP_HOLD:
PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_SATA_SPINUP_HOLD \n"));
+ pm8001_printk("HW_EVENT_SATA_SPINUP_HOLD\n"));
sas_ha->notify_phy_event(&phy->sas_phy, PHYE_SPINUP_HOLD);
break;
case HW_EVENT_PHY_DOWN:
PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_PHY_DOWN \n"));
+ pm8001_printk("HW_EVENT_PHY_DOWN\n"));
sas_ha->notify_phy_event(&phy->sas_phy, PHYE_LOSS_OF_SIGNAL);
phy->phy_attached = 0;
phy->phy_state = 0;
@@ -3446,7 +3447,7 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void* piomb)
break;
case HW_EVENT_LINK_ERR_PHY_RESET_FAILED:
PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_LINK_ERR_PHY_RESET_FAILED \n"));
+ pm8001_printk("HW_EVENT_LINK_ERR_PHY_RESET_FAILED\n"));
pm8001_hw_event_ack_req(pm8001_ha, 0,
HW_EVENT_LINK_ERR_PHY_RESET_FAILED,
port_id, phy_id, 0, 0);
@@ -3456,25 +3457,25 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void* piomb)
break;
case HW_EVENT_PORT_RESET_TIMER_TMO:
PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_PORT_RESET_TIMER_TMO \n"));
+ pm8001_printk("HW_EVENT_PORT_RESET_TIMER_TMO\n"));
sas_phy_disconnected(sas_phy);
phy->phy_attached = 0;
sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
break;
case HW_EVENT_PORT_RECOVERY_TIMER_TMO:
PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_PORT_RECOVERY_TIMER_TMO \n"));
+ pm8001_printk("HW_EVENT_PORT_RECOVERY_TIMER_TMO\n"));
sas_phy_disconnected(sas_phy);
phy->phy_attached = 0;
sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
break;
case HW_EVENT_PORT_RECOVER:
PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_PORT_RECOVER \n"));
+ pm8001_printk("HW_EVENT_PORT_RECOVER\n"));
break;
case HW_EVENT_PORT_RESET_COMPLETE:
PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_PORT_RESET_COMPLETE \n"));
+ pm8001_printk("HW_EVENT_PORT_RESET_COMPLETE\n"));
break;
case EVENT_BROADCAST_ASYNCH_EVENT:
PM8001_MSG_DBG(pm8001_ha,
@@ -3502,21 +3503,21 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb)
switch (opc) {
case OPC_OUB_ECHO:
- PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_ECHO \n"));
+ PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_ECHO\n"));
break;
case OPC_OUB_HW_EVENT:
PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_HW_EVENT \n"));
+ pm8001_printk("OPC_OUB_HW_EVENT\n"));
mpi_hw_event(pm8001_ha, piomb);
break;
case OPC_OUB_SSP_COMP:
PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_SSP_COMP \n"));
+ pm8001_printk("OPC_OUB_SSP_COMP\n"));
mpi_ssp_completion(pm8001_ha, piomb);
break;
case OPC_OUB_SMP_COMP:
PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_SMP_COMP \n"));
+ pm8001_printk("OPC_OUB_SMP_COMP\n"));
mpi_smp_completion(pm8001_ha, piomb);
break;
case OPC_OUB_LOCAL_PHY_CNTRL:
@@ -3526,26 +3527,26 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb)
break;
case OPC_OUB_DEV_REGIST:
PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_DEV_REGIST \n"));
+ pm8001_printk("OPC_OUB_DEV_REGIST\n"));
mpi_reg_resp(pm8001_ha, piomb);
break;
case OPC_OUB_DEREG_DEV:
PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("unresgister the deviece \n"));
+ pm8001_printk("unresgister the deviece\n"));
mpi_dereg_resp(pm8001_ha, piomb);
break;
case OPC_OUB_GET_DEV_HANDLE:
PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_GET_DEV_HANDLE \n"));
+ pm8001_printk("OPC_OUB_GET_DEV_HANDLE\n"));
break;
case OPC_OUB_SATA_COMP:
PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_SATA_COMP \n"));
+ pm8001_printk("OPC_OUB_SATA_COMP\n"));
mpi_sata_completion(pm8001_ha, piomb);
break;
case OPC_OUB_SATA_EVENT:
PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_SATA_EVENT \n"));
+ pm8001_printk("OPC_OUB_SATA_EVENT\n"));
mpi_sata_event(pm8001_ha, piomb);
break;
case OPC_OUB_SSP_EVENT:
@@ -3858,19 +3859,19 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
circularQ = &pm8001_ha->inbnd_q_tbl[0];
if (task->data_dir == PCI_DMA_NONE) {
ATAP = 0x04; /* no data*/
- PM8001_IO_DBG(pm8001_ha, pm8001_printk("no data \n"));
+ PM8001_IO_DBG(pm8001_ha, pm8001_printk("no data\n"));
} else if (likely(!task->ata_task.device_control_reg_update)) {
if (task->ata_task.dma_xfer) {
ATAP = 0x06; /* DMA */
- PM8001_IO_DBG(pm8001_ha, pm8001_printk("DMA \n"));
+ PM8001_IO_DBG(pm8001_ha, pm8001_printk("DMA\n"));
} else {
ATAP = 0x05; /* PIO*/
- PM8001_IO_DBG(pm8001_ha, pm8001_printk("PIO \n"));
+ PM8001_IO_DBG(pm8001_ha, pm8001_printk("PIO\n"));
}
if (task->ata_task.use_ncq &&
dev->sata_dev.command_set != ATAPI_COMMAND_SET) {
ATAP = 0x07; /* FPDMA */
- PM8001_IO_DBG(pm8001_ha, pm8001_printk("FPDMA \n"));
+ PM8001_IO_DBG(pm8001_ha, pm8001_printk("FPDMA\n"));
}
}
if (task->ata_task.use_ncq && pm8001_get_ncq_tag(task, &hdr_tag))
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index 172cefb6deb..c21a2163f9f 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -61,7 +61,7 @@ static struct scsi_host_template pm8001_sht = {
.name = DRV_NAME,
.queuecommand = sas_queuecommand,
.target_alloc = sas_target_alloc,
- .slave_configure = pm8001_slave_configure,
+ .slave_configure = sas_slave_configure,
.slave_destroy = sas_slave_destroy,
.scan_finished = pm8001_scan_finished,
.scan_start = pm8001_scan_start,
@@ -76,7 +76,7 @@ static struct scsi_host_template pm8001_sht = {
.use_clustering = ENABLE_CLUSTERING,
.eh_device_reset_handler = sas_eh_device_reset_handler,
.eh_bus_reset_handler = sas_eh_bus_reset_handler,
- .slave_alloc = pm8001_slave_alloc,
+ .slave_alloc = sas_slave_alloc,
.target_destroy = sas_target_destroy,
.ioctl = sas_ioctl,
.shost_attrs = pm8001_host_attrs,
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index 6ae059ebb4b..fb3dc997886 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -210,26 +210,12 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
PM8001_CHIP_DISP->phy_stop_req(pm8001_ha, phy_id);
break;
default:
- rc = -EOPNOTSUPP;
+ rc = -ENOSYS;
}
msleep(300);
return rc;
}
-int pm8001_slave_alloc(struct scsi_device *scsi_dev)
-{
- struct domain_device *dev = sdev_to_domain_dev(scsi_dev);
- if (dev_is_sata(dev)) {
- /* We don't need to rescan targets
- * if REPORT_LUNS request is failed
- */
- if (scsi_dev->lun > 0)
- return -ENXIO;
- scsi_dev->tagged_supported = 1;
- }
- return sas_slave_alloc(scsi_dev);
-}
-
/**
* pm8001_scan_start - we should enable all HBA phys by sending the phy_start
* command to HBA.
@@ -314,22 +300,7 @@ static int pm8001_task_prep_ssp(struct pm8001_hba_info *pm8001_ha,
{
return PM8001_CHIP_DISP->ssp_io_req(pm8001_ha, ccb);
}
-int pm8001_slave_configure(struct scsi_device *sdev)
-{
- struct domain_device *dev = sdev_to_domain_dev(sdev);
- int ret = sas_slave_configure(sdev);
- if (ret)
- return ret;
- if (dev_is_sata(dev)) {
- #ifdef PM8001_DISABLE_NCQ
- struct ata_port *ap = dev->sata_dev.ap;
- struct ata_device *adev = ap->link.device;
- adev->flags |= ATA_DFLAG_NCQ_OFF;
- scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, 1);
- #endif
- }
- return 0;
-}
+
/* Find the local port id that's attached to this device */
static int sas_find_local_port_id(struct domain_device *dev)
{
@@ -385,21 +356,8 @@ static int pm8001_task_exec(struct sas_task *task, const int num,
do {
dev = t->dev;
pm8001_dev = dev->lldd_dev;
- if (DEV_IS_GONE(pm8001_dev)) {
- if (pm8001_dev) {
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("device %d not ready.\n",
- pm8001_dev->device_id));
- } else {
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("device %016llx not "
- "ready.\n", SAS_ADDR(dev->sas_addr)));
- }
- rc = SAS_PHY_DOWN;
- goto out_done;
- }
port = &pm8001_ha->port[sas_find_local_port_id(dev)];
- if (!port->port_attached) {
+ if (DEV_IS_GONE(pm8001_dev) || !port->port_attached) {
if (sas_protocol_ata(t->task_proto)) {
struct task_status_struct *ts = &t->task_status;
ts->resp = SAS_TASK_UNDELIVERED;
@@ -651,7 +609,7 @@ static int pm8001_dev_found_notify(struct domain_device *dev)
flag = 1; /* directly sata*/
}
} /*register this device to HBA*/
- PM8001_DISC_DBG(pm8001_ha, pm8001_printk("Found device \n"));
+ PM8001_DISC_DBG(pm8001_ha, pm8001_printk("Found device\n"));
PM8001_CHIP_DISP->reg_dev_req(pm8001_ha, pm8001_device, flag);
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
wait_for_completion(&completion);
@@ -669,30 +627,6 @@ int pm8001_dev_found(struct domain_device *dev)
return pm8001_dev_found_notify(dev);
}
-/**
- * pm8001_alloc_task - allocate a task structure for TMF
- */
-static struct sas_task *pm8001_alloc_task(void)
-{
- struct sas_task *task = kzalloc(sizeof(*task), GFP_KERNEL);
- if (task) {
- INIT_LIST_HEAD(&task->list);
- spin_lock_init(&task->task_state_lock);
- task->task_state_flags = SAS_TASK_STATE_PENDING;
- init_timer(&task->timer);
- init_completion(&task->completion);
- }
- return task;
-}
-
-static void pm8001_free_task(struct sas_task *task)
-{
- if (task) {
- BUG_ON(!list_empty(&task->list));
- kfree(task);
- }
-}
-
static void pm8001_task_done(struct sas_task *task)
{
if (!del_timer(&task->timer))
@@ -728,7 +662,7 @@ static int pm8001_exec_internal_tmf_task(struct domain_device *dev,
struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev);
for (retry = 0; retry < 3; retry++) {
- task = pm8001_alloc_task();
+ task = sas_alloc_task(GFP_KERNEL);
if (!task)
return -ENOMEM;
@@ -789,14 +723,13 @@ static int pm8001_exec_internal_tmf_task(struct domain_device *dev,
SAS_ADDR(dev->sas_addr),
task->task_status.resp,
task->task_status.stat));
- pm8001_free_task(task);
+ sas_free_task(task);
task = NULL;
}
}
ex_err:
BUG_ON(retry == 3 && task != NULL);
- if (task != NULL)
- pm8001_free_task(task);
+ sas_free_task(task);
return res;
}
@@ -811,7 +744,7 @@ pm8001_exec_internal_task_abort(struct pm8001_hba_info *pm8001_ha,
struct sas_task *task = NULL;
for (retry = 0; retry < 3; retry++) {
- task = pm8001_alloc_task();
+ task = sas_alloc_task(GFP_KERNEL);
if (!task)
return -ENOMEM;
@@ -864,14 +797,13 @@ pm8001_exec_internal_task_abort(struct pm8001_hba_info *pm8001_ha,
SAS_ADDR(dev->sas_addr),
task->task_status.resp,
task->task_status.stat));
- pm8001_free_task(task);
+ sas_free_task(task);
task = NULL;
}
}
ex_err:
BUG_ON(retry == 3 && task != NULL);
- if (task != NULL)
- pm8001_free_task(task);
+ sas_free_task(task);
return res;
}
@@ -1026,13 +958,14 @@ int pm8001_query_task(struct sas_task *task)
/* The task is still in Lun, release it then */
case TMF_RESP_FUNC_SUCC:
PM8001_EH_DBG(pm8001_ha,
- pm8001_printk("The task is still in Lun \n"));
+ pm8001_printk("The task is still in Lun\n"));
+ break;
/* The task is not in Lun or failed, reset the phy */
case TMF_RESP_FUNC_FAILED:
case TMF_RESP_FUNC_COMPLETE:
PM8001_EH_DBG(pm8001_ha,
pm8001_printk("The task is not in Lun or failed,"
- " reset the phy \n"));
+ " reset the phy\n"));
break;
}
}
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
index b97c8ab0c20..93959febe20 100644
--- a/drivers/scsi/pm8001/pm8001_sas.h
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -471,8 +471,6 @@ void pm8001_ccb_task_free(struct pm8001_hba_info *pm8001_ha,
struct sas_task *task, struct pm8001_ccb_info *ccb, u32 ccb_idx);
int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
void *funcdata);
-int pm8001_slave_alloc(struct scsi_device *scsi_dev);
-int pm8001_slave_configure(struct scsi_device *sdev);
void pm8001_scan_start(struct Scsi_Host *shost);
int pm8001_scan_finished(struct Scsi_Host *shost, unsigned long time);
int pm8001_queue_command(struct sas_task *task, const int num,
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index b86db84d6f3..5163edb925c 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -4102,7 +4102,7 @@ static long pmcraid_chr_ioctl(
struct pmcraid_ioctl_header *hdr = NULL;
int retval = -ENOTTY;
- hdr = kmalloc(GFP_KERNEL, sizeof(struct pmcraid_ioctl_header));
+ hdr = kmalloc(sizeof(struct pmcraid_ioctl_header), GFP_KERNEL);
if (!hdr) {
pmcraid_err("faile to allocate memory for ioctl header\n");
diff --git a/drivers/scsi/ps3rom.c b/drivers/scsi/ps3rom.c
index cd178b9e40c..959f10055be 100644
--- a/drivers/scsi/ps3rom.c
+++ b/drivers/scsi/ps3rom.c
@@ -20,6 +20,7 @@
#include <linux/cdrom.h>
#include <linux/highmem.h>
+#include <linux/module.h>
#include <linux/slab.h>
#include <scsi/scsi.h>
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index a31e05f3bfd..ac326c41e93 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -23,11 +23,23 @@ qla2x00_sysfs_read_fw_dump(struct file *filp, struct kobject *kobj,
struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
struct device, kobj)));
struct qla_hw_data *ha = vha->hw;
+ int rval = 0;
if (ha->fw_dump_reading == 0)
return 0;
- return memory_read_from_buffer(buf, count, &off, ha->fw_dump,
+ if (IS_QLA82XX(ha)) {
+ if (off < ha->md_template_size) {
+ rval = memory_read_from_buffer(buf, count,
+ &off, ha->md_tmplt_hdr, ha->md_template_size);
+ return rval;
+ }
+ off -= ha->md_template_size;
+ rval = memory_read_from_buffer(buf, count,
+ &off, ha->md_dump, ha->md_dump_size);
+ return rval;
+ } else
+ return memory_read_from_buffer(buf, count, &off, ha->fw_dump,
ha->fw_dump_len);
}
@@ -41,12 +53,6 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
struct qla_hw_data *ha = vha->hw;
int reading;
- if (IS_QLA82XX(ha)) {
- ql_dbg(ql_dbg_user, vha, 0x705b,
- "Firmware dump not supported for ISP82xx\n");
- return count;
- }
-
if (off != 0)
return (0);
@@ -59,6 +65,10 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
ql_log(ql_log_info, vha, 0x705d,
"Firmware dump cleared on (%ld).\n", vha->host_no);
+ if (IS_QLA82XX(vha->hw)) {
+ qla82xx_md_free(vha);
+ qla82xx_md_prep(vha);
+ }
ha->fw_dump_reading = 0;
ha->fw_dumped = 0;
break;
@@ -75,10 +85,29 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
qla2x00_alloc_fw_dump(vha);
break;
case 3:
- qla2x00_system_error(vha);
+ if (IS_QLA82XX(ha)) {
+ qla82xx_idc_lock(ha);
+ qla82xx_set_reset_owner(vha);
+ qla82xx_idc_unlock(ha);
+ } else
+ qla2x00_system_error(vha);
+ break;
+ case 4:
+ if (IS_QLA82XX(ha)) {
+ if (ha->md_tmplt_hdr)
+ ql_dbg(ql_dbg_user, vha, 0x705b,
+ "MiniDump supported with this firmware.\n");
+ else
+ ql_dbg(ql_dbg_user, vha, 0x709d,
+ "MiniDump not supported with this firmware.\n");
+ }
+ break;
+ case 5:
+ if (IS_QLA82XX(ha))
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
break;
}
- return (count);
+ return -EINVAL;
}
static struct bin_attribute sysfs_fw_dump_attr = {
@@ -122,7 +151,7 @@ qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj,
if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->nvram_size ||
!ha->isp_ops->write_nvram)
- return 0;
+ return -EINVAL;
/* Checksum NVRAM. */
if (IS_FWI2_CAPABLE(ha)) {
@@ -165,7 +194,7 @@ qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj,
qla2xxx_wake_dpc(vha);
qla2x00_wait_for_chip_reset(vha);
- return (count);
+ return count;
}
static struct bin_attribute sysfs_nvram_attr = {
@@ -239,10 +268,10 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
int val, valid;
if (off)
- return 0;
+ return -EINVAL;
if (unlikely(pci_channel_offline(ha->pdev)))
- return 0;
+ return -EAGAIN;
if (sscanf(buf, "%d:%x:%x", &val, &start, &size) < 1)
return -EINVAL;
@@ -253,7 +282,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
case 0:
if (ha->optrom_state != QLA_SREADING &&
ha->optrom_state != QLA_SWRITING)
- break;
+ return -EINVAL;
ha->optrom_state = QLA_SWAITING;
@@ -266,7 +295,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
break;
case 1:
if (ha->optrom_state != QLA_SWAITING)
- break;
+ return -EINVAL;
ha->optrom_region_start = start;
ha->optrom_region_size = start + size > ha->optrom_size ?
@@ -280,7 +309,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
"(%x).\n", ha->optrom_region_size);
ha->optrom_state = QLA_SWAITING;
- return count;
+ return -ENOMEM;
}
if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
@@ -299,7 +328,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
break;
case 2:
if (ha->optrom_state != QLA_SWAITING)
- break;
+ return -EINVAL;
/*
* We need to be more restrictive on which FLASH regions are
@@ -347,7 +376,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
"(%x)\n", ha->optrom_region_size);
ha->optrom_state = QLA_SWAITING;
- return count;
+ return -ENOMEM;
}
ql_dbg(ql_dbg_user, vha, 0x7067,
@@ -358,7 +387,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
break;
case 3:
if (ha->optrom_state != QLA_SWRITING)
- break;
+ return -ENOMEM;
if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x7068,
@@ -374,7 +403,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
ha->optrom_region_start, ha->optrom_region_size);
break;
default:
- count = -EINVAL;
+ return -EINVAL;
}
return count;
}
@@ -398,10 +427,10 @@ qla2x00_sysfs_read_vpd(struct file *filp, struct kobject *kobj,
struct qla_hw_data *ha = vha->hw;
if (unlikely(pci_channel_offline(ha->pdev)))
- return 0;
+ return -EAGAIN;
if (!capable(CAP_SYS_ADMIN))
- return 0;
+ return -EINVAL;
if (IS_NOCACHE_VPD_TYPE(ha))
ha->isp_ops->read_optrom(vha, ha->vpd, ha->flt_region_vpd << 2,
@@ -438,17 +467,17 @@ qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj,
/* Update flash version information for 4Gb & above. */
if (!IS_FWI2_CAPABLE(ha))
- goto done;
+ return -EINVAL;
tmp_data = vmalloc(256);
if (!tmp_data) {
ql_log(ql_log_warn, vha, 0x706b,
"Unable to allocate memory for VPD information update.\n");
- goto done;
+ return -ENOMEM;
}
ha->isp_ops->get_flash_version(vha, tmp_data);
vfree(tmp_data);
-done:
+
return count;
}
@@ -505,8 +534,7 @@ do_read:
"Unable to read SFP data (%x/%x/%x).\n", rval,
addr, offset);
- count = 0;
- break;
+ return -EIO;
}
memcpy(buf, ha->sfp_data, SFP_BLOCK_SIZE);
buf += SFP_BLOCK_SIZE;
@@ -536,7 +564,7 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
int type;
if (off != 0)
- return 0;
+ return -EINVAL;
type = simple_strtol(buf, NULL, 10);
switch (type) {
@@ -546,13 +574,18 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
scsi_block_requests(vha->host);
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ if (IS_QLA82XX(ha)) {
+ qla82xx_idc_lock(ha);
+ qla82xx_set_reset_owner(vha);
+ qla82xx_idc_unlock(ha);
+ }
qla2xxx_wake_dpc(vha);
qla2x00_wait_for_chip_reset(vha);
scsi_unblock_requests(vha->host);
break;
case 0x2025d:
if (!IS_QLA81XX(ha))
- break;
+ return -EPERM;
ql_log(ql_log_info, vha, 0x706f,
"Issuing MPI reset.\n");
@@ -571,7 +604,7 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
if (!IS_QLA82XX(ha) || vha != base_vha) {
ql_log(ql_log_info, vha, 0x7071,
"FCoE ctx reset no supported.\n");
- return count;
+ return -EPERM;
}
ql_log(ql_log_info, vha, 0x7072,
@@ -607,7 +640,7 @@ qla2x00_sysfs_write_edc(struct file *filp, struct kobject *kobj,
ha->edc_data_len = 0;
if (!capable(CAP_SYS_ADMIN) || off != 0 || count < 8)
- return 0;
+ return -EINVAL;
if (!ha->edc_data) {
ha->edc_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
@@ -615,7 +648,7 @@ qla2x00_sysfs_write_edc(struct file *filp, struct kobject *kobj,
if (!ha->edc_data) {
ql_log(ql_log_warn, vha, 0x7073,
"Unable to allocate memory for EDC write.\n");
- return 0;
+ return -ENOMEM;
}
}
@@ -634,9 +667,9 @@ qla2x00_sysfs_write_edc(struct file *filp, struct kobject *kobj,
dev, adr, len, opt);
if (rval != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x7074,
- "Unable to write EDC (%x) %02x:%04x:%02x:%02x\n",
+ "Unable to write EDC (%x) %02x:%04x:%02x:%02hhx\n",
rval, dev, adr, opt, len, buf[8]);
- return 0;
+ return -EIO;
}
return count;
@@ -665,7 +698,7 @@ qla2x00_sysfs_write_edc_status(struct file *filp, struct kobject *kobj,
ha->edc_data_len = 0;
if (!capable(CAP_SYS_ADMIN) || off != 0 || count < 8)
- return 0;
+ return -EINVAL;
if (!ha->edc_data) {
ha->edc_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
@@ -673,7 +706,7 @@ qla2x00_sysfs_write_edc_status(struct file *filp, struct kobject *kobj,
if (!ha->edc_data) {
ql_log(ql_log_warn, vha, 0x708c,
"Unable to allocate memory for EDC status.\n");
- return 0;
+ return -ENOMEM;
}
}
@@ -693,7 +726,7 @@ qla2x00_sysfs_write_edc_status(struct file *filp, struct kobject *kobj,
ql_log(ql_log_info, vha, 0x7075,
"Unable to write EDC status (%x) %02x:%04x:%02x.\n",
rval, dev, adr, opt, len);
- return 0;
+ return -EIO;
}
ha->edc_data_len = len;
@@ -805,7 +838,7 @@ qla2x00_sysfs_read_dcbx_tlv(struct file *filp, struct kobject *kobj,
if (!ha->dcbx_tlv) {
ql_log(ql_log_warn, vha, 0x7078,
"Unable to allocate memory for DCBX TLV read-data.\n");
- return 0;
+ return -ENOMEM;
}
do_read:
@@ -817,7 +850,7 @@ do_read:
if (rval != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x7079,
"Unable to read DCBX TLV (%x).\n", rval);
- count = 0;
+ return -EIO;
}
memcpy(buf, ha->dcbx_tlv, count);
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 07d1767cd26..8b641a8a0c7 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -704,6 +704,7 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
elreq.options = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
if ((ha->current_topology == ISP_CFG_F ||
+ (atomic_read(&vha->loop_state) == LOOP_DOWN) ||
(IS_QLA81XX(ha) &&
le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE
&& req_data_len == MAX_ELS_FRAME_PAYLOAD)) &&
@@ -1447,6 +1448,148 @@ qla2x00_update_optrom(struct fc_bsg_job *bsg_job)
}
static int
+qla2x00_update_fru_versions(struct fc_bsg_job *bsg_job)
+{
+ struct Scsi_Host *host = bsg_job->shost;
+ scsi_qla_host_t *vha = shost_priv(host);
+ struct qla_hw_data *ha = vha->hw;
+ int rval = 0;
+ uint8_t bsg[DMA_POOL_SIZE];
+ struct qla_image_version_list *list = (void *)bsg;
+ struct qla_image_version *image;
+ uint32_t count;
+ dma_addr_t sfp_dma;
+ void *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
+ if (!sfp) {
+ bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ EXT_STATUS_NO_MEMORY;
+ goto done;
+ }
+
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, list, sizeof(bsg));
+
+ image = list->version;
+ count = list->count;
+ while (count--) {
+ memcpy(sfp, &image->field_info, sizeof(image->field_info));
+ rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
+ image->field_address.device, image->field_address.offset,
+ sizeof(image->field_info), image->field_address.option);
+ if (rval) {
+ bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ EXT_STATUS_MAILBOX;
+ goto dealloc;
+ }
+ image++;
+ }
+
+ bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
+
+dealloc:
+ dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
+
+done:
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+ bsg_job->reply->result = DID_OK << 16;
+ bsg_job->job_done(bsg_job);
+
+ return 0;
+}
+
+static int
+qla2x00_read_fru_status(struct fc_bsg_job *bsg_job)
+{
+ struct Scsi_Host *host = bsg_job->shost;
+ scsi_qla_host_t *vha = shost_priv(host);
+ struct qla_hw_data *ha = vha->hw;
+ int rval = 0;
+ uint8_t bsg[DMA_POOL_SIZE];
+ struct qla_status_reg *sr = (void *)bsg;
+ dma_addr_t sfp_dma;
+ uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
+ if (!sfp) {
+ bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ EXT_STATUS_NO_MEMORY;
+ goto done;
+ }
+
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, sr, sizeof(*sr));
+
+ rval = qla2x00_read_sfp(vha, sfp_dma, sfp,
+ sr->field_address.device, sr->field_address.offset,
+ sizeof(sr->status_reg), sr->field_address.option);
+ sr->status_reg = *sfp;
+
+ if (rval) {
+ bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ EXT_STATUS_MAILBOX;
+ goto dealloc;
+ }
+
+ sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, sr, sizeof(*sr));
+
+ bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
+
+dealloc:
+ dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
+
+done:
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+ bsg_job->reply->reply_payload_rcv_len = sizeof(*sr);
+ bsg_job->reply->result = DID_OK << 16;
+ bsg_job->job_done(bsg_job);
+
+ return 0;
+}
+
+static int
+qla2x00_write_fru_status(struct fc_bsg_job *bsg_job)
+{
+ struct Scsi_Host *host = bsg_job->shost;
+ scsi_qla_host_t *vha = shost_priv(host);
+ struct qla_hw_data *ha = vha->hw;
+ int rval = 0;
+ uint8_t bsg[DMA_POOL_SIZE];
+ struct qla_status_reg *sr = (void *)bsg;
+ dma_addr_t sfp_dma;
+ uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
+ if (!sfp) {
+ bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ EXT_STATUS_NO_MEMORY;
+ goto done;
+ }
+
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, sr, sizeof(*sr));
+
+ *sfp = sr->status_reg;
+ rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
+ sr->field_address.device, sr->field_address.offset,
+ sizeof(sr->status_reg), sr->field_address.option);
+
+ if (rval) {
+ bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ EXT_STATUS_MAILBOX;
+ goto dealloc;
+ }
+
+ bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
+
+dealloc:
+ dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
+
+done:
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+ bsg_job->reply->result = DID_OK << 16;
+ bsg_job->job_done(bsg_job);
+
+ return 0;
+}
+
+static int
qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
{
switch (bsg_job->request->rqst_data.h_vendor.vendor_cmd[0]) {
@@ -1474,6 +1617,15 @@ qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
case QL_VND_UPDATE_FLASH:
return qla2x00_update_optrom(bsg_job);
+ case QL_VND_SET_FRU_VERSION:
+ return qla2x00_update_fru_versions(bsg_job);
+
+ case QL_VND_READ_FRU_STATUS:
+ return qla2x00_read_fru_status(bsg_job);
+
+ case QL_VND_WRITE_FRU_STATUS:
+ return qla2x00_write_fru_status(bsg_job);
+
default:
bsg_job->reply->result = (DID_ERROR << 16);
bsg_job->job_done(bsg_job);
diff --git a/drivers/scsi/qla2xxx/qla_bsg.h b/drivers/scsi/qla2xxx/qla_bsg.h
index 0f0f54e35f0..70caa63a893 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.h
+++ b/drivers/scsi/qla2xxx/qla_bsg.h
@@ -16,6 +16,16 @@
#define QL_VND_FCP_PRIO_CFG_CMD 0x06
#define QL_VND_READ_FLASH 0x07
#define QL_VND_UPDATE_FLASH 0x08
+#define QL_VND_SET_FRU_VERSION 0x0B
+#define QL_VND_READ_FRU_STATUS 0x0C
+#define QL_VND_WRITE_FRU_STATUS 0x0D
+
+/* BSG Vendor specific subcode returns */
+#define EXT_STATUS_OK 0
+#define EXT_STATUS_ERR 1
+#define EXT_STATUS_INVALID_PARAM 6
+#define EXT_STATUS_MAILBOX 11
+#define EXT_STATUS_NO_MEMORY 17
/* BSG definations for interpreting CommandSent field */
#define INT_DEF_LB_LOOPBACK_CMD 0
@@ -141,4 +151,36 @@ struct qla_port_param {
uint16_t mode;
uint16_t speed;
} __attribute__ ((packed));
+
+
+/* FRU VPD */
+
+#define MAX_FRU_SIZE 36
+
+struct qla_field_address {
+ uint16_t offset;
+ uint16_t device;
+ uint16_t option;
+} __packed;
+
+struct qla_field_info {
+ uint8_t version[MAX_FRU_SIZE];
+} __packed;
+
+struct qla_image_version {
+ struct qla_field_address field_address;
+ struct qla_field_info field_info;
+} __packed;
+
+struct qla_image_version_list {
+ uint32_t count;
+ struct qla_image_version version[0];
+} __packed;
+
+struct qla_status_reg {
+ struct qla_field_address field_address;
+ uint8_t status_reg;
+ uint8_t reserved[7];
+} __packed;
+
#endif
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index d79cd8a5f83..9df4787715c 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -12,7 +12,7 @@
* | Level | Last Value Used | Holes |
* ----------------------------------------------------------------------
* | Module Init and Probe | 0x0116 | |
- * | Mailbox commands | 0x1126 | |
+ * | Mailbox commands | 0x1129 | |
* | Device Discovery | 0x2083 | |
* | Queue Command and IO tracing | 0x302e | 0x3008 |
* | DPC Thread | 0x401c | |
@@ -22,7 +22,7 @@
* | Task Management | 0x8041 | |
* | AER/EEH | 0x900f | |
* | Virtual Port | 0xa007 | |
- * | ISP82XX Specific | 0xb04f | |
+ * | ISP82XX Specific | 0xb051 | |
* | MultiQ | 0xc00b | |
* | Misc | 0xd00b | |
* ----------------------------------------------------------------------
@@ -403,7 +403,7 @@ qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
return ptr + sizeof(struct qla2xxx_mq_chain);
}
-static void
+void
qla2xxx_dump_post_process(scsi_qla_host_t *vha, int rval)
{
struct qla_hw_data *ha = vha->hw;
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index a03eaf40f37..fcf052c50bf 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -2438,7 +2438,8 @@ struct qla_hw_data {
uint32_t quiesce_owner:1;
uint32_t thermal_supported:1;
uint32_t isp82xx_reset_hdlr_active:1;
- /* 26 bits */
+ uint32_t isp82xx_reset_owner:1;
+ /* 28 bits */
} flags;
/* This spinlock is used to protect "io transactions", you must
@@ -2822,6 +2823,12 @@ struct qla_hw_data {
uint8_t fw_type;
__le32 file_prd_off; /* File firmware product offset */
+
+ uint32_t md_template_size;
+ void *md_tmplt_hdr;
+ dma_addr_t md_tmplt_hdr_dma;
+ void *md_dump;
+ uint32_t md_dump_size;
};
/*
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 29b1a3e2823..ce32d8135c9 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -104,6 +104,8 @@ extern int ql2xenablehba_err_chk;
extern int ql2xtargetreset;
extern int ql2xdontresethba;
extern unsigned int ql2xmaxlun;
+extern int ql2xmdcapmask;
+extern int ql2xmdenable;
extern int qla2x00_loop_reset(scsi_qla_host_t *);
extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
@@ -407,6 +409,8 @@ extern void qla2x00_beacon_blink(struct scsi_qla_host *);
extern int qla24xx_beacon_on(struct scsi_qla_host *);
extern int qla24xx_beacon_off(struct scsi_qla_host *);
extern void qla24xx_beacon_blink(struct scsi_qla_host *);
+extern int qla82xx_beacon_on(struct scsi_qla_host *);
+extern int qla82xx_beacon_off(struct scsi_qla_host *);
extern uint8_t *qla2x00_read_optrom_data(struct scsi_qla_host *, uint8_t *,
uint32_t, uint32_t);
@@ -442,6 +446,7 @@ extern void qla2x00_dump_buffer_zipped(uint8_t *, uint32_t);
extern void ql_dump_regs(uint32_t, scsi_qla_host_t *, int32_t);
extern void ql_dump_buffer(uint32_t, scsi_qla_host_t *, int32_t,
uint8_t *, uint32_t);
+extern void qla2xxx_dump_post_process(scsi_qla_host_t *, int);
/*
* Global Function Prototypes in qla_gs.c source file.
@@ -569,7 +574,10 @@ extern int qla82xx_mbx_intr_enable(scsi_qla_host_t *);
extern int qla82xx_mbx_intr_disable(scsi_qla_host_t *);
extern void qla82xx_start_iocbs(srb_t *);
extern int qla82xx_fcoe_ctx_reset(scsi_qla_host_t *);
+extern int qla82xx_check_md_needed(scsi_qla_host_t *);
extern void qla82xx_chip_reset_cleanup(scsi_qla_host_t *);
+extern int qla82xx_mbx_beacon_ctl(scsi_qla_host_t *, int);
+extern char *qdev_state(uint32_t);
/* BSG related functions */
extern int qla24xx_bsg_request(struct fc_bsg_job *);
@@ -579,4 +587,14 @@ extern int qla2x00_issue_iocb_timeout(scsi_qla_host_t *, void *,
dma_addr_t, size_t, uint32_t);
extern int qla2x00_get_idma_speed(scsi_qla_host_t *, uint16_t,
uint16_t *, uint16_t *);
+
+/* Minidump related functions */
+extern int qla82xx_md_get_template_size(scsi_qla_host_t *);
+extern int qla82xx_md_get_template(scsi_qla_host_t *);
+extern int qla82xx_md_alloc(scsi_qla_host_t *);
+extern void qla82xx_md_free(scsi_qla_host_t *);
+extern int qla82xx_md_collect(scsi_qla_host_t *);
+extern void qla82xx_md_prep(scsi_qla_host_t *);
+extern void qla82xx_set_reset_owner(scsi_qla_host_t *);
+
#endif /* _QLA_GBL_H */
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 37da04d3db2..f03e915f187 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -1480,13 +1480,19 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
if (rval == QLA_SUCCESS) {
enable_82xx_npiv:
fw_major_version = ha->fw_major_version;
- rval = qla2x00_get_fw_version(vha,
- &ha->fw_major_version,
- &ha->fw_minor_version,
- &ha->fw_subminor_version,
- &ha->fw_attributes, &ha->fw_memory_size,
- ha->mpi_version, &ha->mpi_capabilities,
- ha->phy_version);
+ if (IS_QLA82XX(ha))
+ qla82xx_check_md_needed(vha);
+ else {
+ rval = qla2x00_get_fw_version(vha,
+ &ha->fw_major_version,
+ &ha->fw_minor_version,
+ &ha->fw_subminor_version,
+ &ha->fw_attributes,
+ &ha->fw_memory_size,
+ ha->mpi_version,
+ &ha->mpi_capabilities,
+ ha->phy_version);
+ }
if (rval != QLA_SUCCESS)
goto failed;
ha->flags.npiv_supported = 0;
@@ -1503,10 +1509,8 @@ enable_82xx_npiv:
&ha->fw_xcb_count, NULL, NULL,
&ha->max_npiv_vports, NULL);
- if (!fw_major_version && ql2xallocfwdump) {
- if (!IS_QLA82XX(ha))
- qla2x00_alloc_fw_dump(vha);
- }
+ if (!fw_major_version && ql2xallocfwdump)
+ qla2x00_alloc_fw_dump(vha);
}
} else {
ql_log(ql_log_fatal, vha, 0x00cd,
@@ -1924,7 +1928,7 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
rval = qla84xx_init_chip(vha);
if (rval != QLA_SUCCESS) {
ql_log(ql_log_warn,
- vha, 0x8043,
+ vha, 0x8026,
"Init chip failed.\n");
break;
}
@@ -1933,7 +1937,7 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
cs84xx_time = jiffies - cs84xx_time;
wtime += cs84xx_time;
mtime += cs84xx_time;
- ql_dbg(ql_dbg_taskm, vha, 0x8042,
+ ql_dbg(ql_dbg_taskm, vha, 0x8025,
"Increasing wait time by %ld. "
"New time %ld.\n", cs84xx_time,
wtime);
@@ -5443,11 +5447,7 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
/* Update the firmware version */
- qla2x00_get_fw_version(vha, &ha->fw_major_version,
- &ha->fw_minor_version, &ha->fw_subminor_version,
- &ha->fw_attributes, &ha->fw_memory_size,
- ha->mpi_version, &ha->mpi_capabilities,
- ha->phy_version);
+ status = qla82xx_check_md_needed(vha);
if (ha->fce) {
ha->flags.fce_enabled = 1;
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 8a7591f035e..2516adf1aee 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -2060,6 +2060,11 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
case ELS_IOCB_TYPE:
qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
break;
+ case MARKER_TYPE:
+ /* Do nothing in this case, this check is to prevent it
+ * from falling into default case
+ */
+ break;
default:
/* Type Not Supported. */
ql_dbg(ql_dbg_async, vha, 0x5042,
@@ -2274,7 +2279,7 @@ qla25xx_msix_rsp_q(int irq, void *dev_id)
ha = rsp->hw;
/* Clear the interrupt, if enabled, for this response queue */
- if (rsp->options & ~BIT_6) {
+ if (!ha->flags.disable_msix_handshake) {
reg = &ha->iobase->isp24;
spin_lock_irqsave(&ha->hardware_lock, flags);
WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index f7604ea1af8..3b3cec9f6ac 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -4186,3 +4186,130 @@ qla82xx_mbx_intr_disable(scsi_qla_host_t *vha)
return rval;
}
+
+int
+qla82xx_md_get_template_size(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+ int rval = QLA_FUNCTION_FAILED;
+
+ ql_dbg(ql_dbg_mbx, vha, 0x111f, "Entered %s.\n", __func__);
+
+ memset(mcp->mb, 0 , sizeof(mcp->mb));
+ mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
+ mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
+ mcp->mb[2] = LSW(RQST_TMPLT_SIZE);
+ mcp->mb[3] = MSW(RQST_TMPLT_SIZE);
+
+ mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|
+ MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
+
+ mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
+ mcp->tov = MBX_TOV_SECONDS;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ /* Always copy back return mailbox values. */
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1120,
+ "mailbox command FAILED=0x%x, subcode=%x.\n",
+ (mcp->mb[1] << 16) | mcp->mb[0],
+ (mcp->mb[3] << 16) | mcp->mb[2]);
+ } else {
+ ql_dbg(ql_dbg_mbx, vha, 0x1121, "Done %s.\n", __func__);
+ ha->md_template_size = ((mcp->mb[3] << 16) | mcp->mb[2]);
+ if (!ha->md_template_size) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1122,
+ "Null template size obtained.\n");
+ rval = QLA_FUNCTION_FAILED;
+ }
+ }
+ return rval;
+}
+
+int
+qla82xx_md_get_template(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+ int rval = QLA_FUNCTION_FAILED;
+
+ ql_dbg(ql_dbg_mbx, vha, 0x1123, "Entered %s.\n", __func__);
+
+ ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev,
+ ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL);
+ if (!ha->md_tmplt_hdr) {
+ ql_log(ql_log_warn, vha, 0x1124,
+ "Unable to allocate memory for Minidump template.\n");
+ return rval;
+ }
+
+ memset(mcp->mb, 0 , sizeof(mcp->mb));
+ mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
+ mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
+ mcp->mb[2] = LSW(RQST_TMPLT);
+ mcp->mb[3] = MSW(RQST_TMPLT);
+ mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma));
+ mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma));
+ mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma));
+ mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma));
+ mcp->mb[8] = LSW(ha->md_template_size);
+ mcp->mb[9] = MSW(ha->md_template_size);
+
+ mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8|
+ MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1125,
+ "mailbox command FAILED=0x%x, subcode=%x.\n",
+ ((mcp->mb[1] << 16) | mcp->mb[0]),
+ ((mcp->mb[3] << 16) | mcp->mb[2]));
+ } else
+ ql_dbg(ql_dbg_mbx, vha, 0x1126, "Done %s.\n", __func__);
+ return rval;
+}
+
+int
+qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable)
+{
+ int rval;
+ struct qla_hw_data *ha = vha->hw;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ if (!IS_QLA82XX(ha))
+ return QLA_FUNCTION_FAILED;
+
+ ql_dbg(ql_dbg_mbx, vha, 0x1127,
+ "Entered %s.\n", __func__);
+
+ memset(mcp, 0, sizeof(mbx_cmd_t));
+ mcp->mb[0] = MBC_SET_LED_CONFIG;
+ if (enable)
+ mcp->mb[7] = 0xE;
+ else
+ mcp->mb[7] = 0xD;
+
+ mcp->out_mb = MBX_7|MBX_0;
+ mcp->in_mb = MBX_0;
+ mcp->tov = 30;
+ mcp->flags = 0;
+
+ rval = qla2x00_mailbox_command(vha, mcp);
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1128,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+ } else {
+ ql_dbg(ql_dbg_mbx, vha, 0x1129,
+ "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 049807cda41..94bded5ddce 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -7,6 +7,8 @@
#include "qla_def.h"
#include <linux/delay.h>
#include <linux/pci.h>
+#include <linux/ratelimit.h>
+#include <linux/vmalloc.h>
#include <scsi/scsi_tcq.h>
#define MASK(n) ((1ULL<<(n))-1)
@@ -328,7 +330,7 @@ unsigned qla82xx_crb_hub_agt[64] = {
};
/* Device states */
-char *qdev_state[] = {
+char *q_dev_state[] = {
"Unknown",
"Cold",
"Initializing",
@@ -339,6 +341,11 @@ char *qdev_state[] = {
"Quiescent",
};
+char *qdev_state(uint32_t dev_state)
+{
+ return q_dev_state[dev_state];
+}
+
/*
* In: 'off' is offset from CRB space in 128M pci map
* Out: 'off' is 2M pci map addr
@@ -2355,9 +2362,13 @@ qla82xx_need_reset(struct qla_hw_data *ha)
uint32_t drv_state;
int rval;
- drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
- rval = drv_state & (QLA82XX_DRVST_RST_RDY << (ha->portnum * 4));
- return rval;
+ if (ha->flags.isp82xx_reset_owner)
+ return 1;
+ else {
+ drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
+ rval = drv_state & (QLA82XX_DRVST_RST_RDY << (ha->portnum * 4));
+ return rval;
+ }
}
static inline void
@@ -2374,8 +2385,8 @@ qla82xx_set_rst_ready(struct qla_hw_data *ha)
drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
}
drv_state |= (QLA82XX_DRVST_RST_RDY << (ha->portnum * 4));
- ql_log(ql_log_info, vha, 0x00bb,
- "drv_state = 0x%x.\n", drv_state);
+ ql_dbg(ql_dbg_init, vha, 0x00bb,
+ "drv_state = 0x%08x.\n", drv_state);
qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state);
}
@@ -2598,7 +2609,7 @@ qla2xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
*dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
*dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
- *dsd_seg++ = cpu_to_le32(dsd_list_len);
+ cmd_pkt->fcp_data_dseg_len = cpu_to_le32(dsd_list_len);
} else {
*cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
*cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
@@ -3529,6 +3540,7 @@ static void
qla82xx_need_reset_handler(scsi_qla_host_t *vha)
{
uint32_t dev_state, drv_state, drv_active;
+ uint32_t active_mask = 0;
unsigned long reset_timeout;
struct qla_hw_data *ha = vha->hw;
struct req_que *req = ha->req_q_map[0];
@@ -3541,15 +3553,32 @@ qla82xx_need_reset_handler(scsi_qla_host_t *vha)
qla82xx_idc_lock(ha);
}
- qla82xx_set_rst_ready(ha);
+ drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
+ if (!ha->flags.isp82xx_reset_owner) {
+ ql_dbg(ql_dbg_p3p, vha, 0xb028,
+ "reset_acknowledged by 0x%x\n", ha->portnum);
+ qla82xx_set_rst_ready(ha);
+ } else {
+ active_mask = ~(QLA82XX_DRV_ACTIVE << (ha->portnum * 4));
+ drv_active &= active_mask;
+ ql_dbg(ql_dbg_p3p, vha, 0xb029,
+ "active_mask: 0x%08x\n", active_mask);
+ }
/* wait for 10 seconds for reset ack from all functions */
reset_timeout = jiffies + (ha->nx_reset_timeout * HZ);
drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
+ dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
- while (drv_state != drv_active) {
+ ql_dbg(ql_dbg_p3p, vha, 0xb02a,
+ "drv_state: 0x%08x, drv_active: 0x%08x, "
+ "dev_state: 0x%08x, active_mask: 0x%08x\n",
+ drv_state, drv_active, dev_state, active_mask);
+
+ while (drv_state != drv_active &&
+ dev_state != QLA82XX_DEV_INITIALIZING) {
if (time_after_eq(jiffies, reset_timeout)) {
ql_log(ql_log_warn, vha, 0x00b5,
"Reset timeout.\n");
@@ -3560,23 +3589,87 @@ qla82xx_need_reset_handler(scsi_qla_host_t *vha)
qla82xx_idc_lock(ha);
drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
+ if (ha->flags.isp82xx_reset_owner)
+ drv_active &= active_mask;
+ dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
}
- dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
+ ql_dbg(ql_dbg_p3p, vha, 0xb02b,
+ "drv_state: 0x%08x, drv_active: 0x%08x, "
+ "dev_state: 0x%08x, active_mask: 0x%08x\n",
+ drv_state, drv_active, dev_state, active_mask);
+
ql_log(ql_log_info, vha, 0x00b6,
"Device state is 0x%x = %s.\n",
dev_state,
- dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown");
+ dev_state < MAX_STATES ? qdev_state(dev_state) : "Unknown");
/* Force to DEV_COLD unless someone else is starting a reset */
- if (dev_state != QLA82XX_DEV_INITIALIZING) {
+ if (dev_state != QLA82XX_DEV_INITIALIZING &&
+ dev_state != QLA82XX_DEV_COLD) {
ql_log(ql_log_info, vha, 0x00b7,
"HW State: COLD/RE-INIT.\n");
qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_COLD);
+ if (ql2xmdenable) {
+ if (qla82xx_md_collect(vha))
+ ql_log(ql_log_warn, vha, 0xb02c,
+ "Not able to collect minidump.\n");
+ } else
+ ql_log(ql_log_warn, vha, 0xb04f,
+ "Minidump disabled.\n");
}
}
int
+qla82xx_check_md_needed(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ uint16_t fw_major_version, fw_minor_version, fw_subminor_version;
+ int rval = QLA_SUCCESS;
+
+ fw_major_version = ha->fw_major_version;
+ fw_minor_version = ha->fw_minor_version;
+ fw_subminor_version = ha->fw_subminor_version;
+
+ rval = qla2x00_get_fw_version(vha, &ha->fw_major_version,
+ &ha->fw_minor_version, &ha->fw_subminor_version,
+ &ha->fw_attributes, &ha->fw_memory_size,
+ ha->mpi_version, &ha->mpi_capabilities,
+ ha->phy_version);
+
+ if (rval != QLA_SUCCESS)
+ return rval;
+
+ if (ql2xmdenable) {
+ if (!ha->fw_dumped) {
+ if (fw_major_version != ha->fw_major_version ||
+ fw_minor_version != ha->fw_minor_version ||
+ fw_subminor_version != ha->fw_subminor_version) {
+
+ ql_log(ql_log_info, vha, 0xb02d,
+ "Firmware version differs "
+ "Previous version: %d:%d:%d - "
+ "New version: %d:%d:%d\n",
+ ha->fw_major_version,
+ ha->fw_minor_version,
+ ha->fw_subminor_version,
+ fw_major_version, fw_minor_version,
+ fw_subminor_version);
+ /* Release MiniDump resources */
+ qla82xx_md_free(vha);
+ /* ALlocate MiniDump resources */
+ qla82xx_md_prep(vha);
+ } else
+ ql_log(ql_log_info, vha, 0xb02e,
+ "Firmware dump available to retrieve\n",
+ vha->host_no);
+ }
+ }
+ return rval;
+}
+
+
+int
qla82xx_check_fw_alive(scsi_qla_host_t *vha)
{
uint32_t fw_heartbeat_counter;
@@ -3637,7 +3730,7 @@ qla82xx_device_state_handler(scsi_qla_host_t *vha)
ql_log(ql_log_info, vha, 0x009b,
"Device state is 0x%x = %s.\n",
dev_state,
- dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown");
+ dev_state < MAX_STATES ? qdev_state(dev_state) : "Unknown");
/* wait for 30 seconds for device to go ready */
dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ);
@@ -3659,26 +3752,33 @@ qla82xx_device_state_handler(scsi_qla_host_t *vha)
ql_log(ql_log_info, vha, 0x009d,
"Device state is 0x%x = %s.\n",
dev_state,
- dev_state < MAX_STATES ? qdev_state[dev_state] :
+ dev_state < MAX_STATES ? qdev_state(dev_state) :
"Unknown");
}
switch (dev_state) {
case QLA82XX_DEV_READY:
+ qla82xx_check_md_needed(vha);
+ ha->flags.isp82xx_reset_owner = 0;
goto exit;
case QLA82XX_DEV_COLD:
rval = qla82xx_device_bootstrap(vha);
- goto exit;
+ break;
case QLA82XX_DEV_INITIALIZING:
qla82xx_idc_unlock(ha);
msleep(1000);
qla82xx_idc_lock(ha);
break;
case QLA82XX_DEV_NEED_RESET:
- if (!ql2xdontresethba)
- qla82xx_need_reset_handler(vha);
+ if (!ql2xdontresethba)
+ qla82xx_need_reset_handler(vha);
+ else {
+ qla82xx_idc_unlock(ha);
+ msleep(1000);
+ qla82xx_idc_lock(ha);
+ }
dev_init_timeout = jiffies +
- (ha->nx_dev_init_timeout * HZ);
+ (ha->nx_dev_init_timeout * HZ);
break;
case QLA82XX_DEV_NEED_QUIESCENT:
qla82xx_need_qsnt_handler(vha);
@@ -3791,6 +3891,28 @@ int qla82xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
return rval;
}
+void
+qla82xx_set_reset_owner(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ uint32_t dev_state;
+
+ dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
+ if (dev_state == QLA82XX_DEV_READY) {
+ ql_log(ql_log_info, vha, 0xb02f,
+ "HW State: NEED RESET\n");
+ qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
+ QLA82XX_DEV_NEED_RESET);
+ ha->flags.isp82xx_reset_owner = 1;
+ ql_dbg(ql_dbg_p3p, vha, 0xb030,
+ "reset_owner is 0x%x\n", ha->portnum);
+ } else
+ ql_log(ql_log_info, vha, 0xb031,
+ "Device state is 0x%x = %s.\n",
+ dev_state,
+ dev_state < MAX_STATES ? qdev_state(dev_state) : "Unknown");
+}
+
/*
* qla82xx_abort_isp
* Resets ISP and aborts all outstanding commands.
@@ -3806,7 +3928,6 @@ qla82xx_abort_isp(scsi_qla_host_t *vha)
{
int rval;
struct qla_hw_data *ha = vha->hw;
- uint32_t dev_state;
if (vha->device_flags & DFLG_DEV_FAILED) {
ql_log(ql_log_warn, vha, 0x8024,
@@ -3816,16 +3937,7 @@ qla82xx_abort_isp(scsi_qla_host_t *vha)
ha->flags.isp82xx_reset_hdlr_active = 1;
qla82xx_idc_lock(ha);
- dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
- if (dev_state == QLA82XX_DEV_READY) {
- ql_log(ql_log_info, vha, 0x8025,
- "HW State: NEED RESET.\n");
- qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
- QLA82XX_DEV_NEED_RESET);
- } else
- ql_log(ql_log_info, vha, 0x8026,
- "Hw State: %s.\n", dev_state < MAX_STATES ?
- qdev_state[dev_state] : "Unknown");
+ qla82xx_set_reset_owner(vha);
qla82xx_idc_unlock(ha);
rval = qla82xx_device_state_handler(vha);
@@ -4016,3 +4128,803 @@ qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha)
}
}
}
+
+/* Minidump related functions */
+int
+qla82xx_md_rw_32(struct qla_hw_data *ha, uint32_t off, u32 data, uint8_t flag)
+{
+ uint32_t off_value, rval = 0;
+
+ WRT_REG_DWORD((void *)(CRB_WINDOW_2M + ha->nx_pcibase),
+ (off & 0xFFFF0000));
+
+ /* Read back value to make sure write has gone through */
+ RD_REG_DWORD((void *)(CRB_WINDOW_2M + ha->nx_pcibase));
+ off_value = (off & 0x0000FFFF);
+
+ if (flag)
+ WRT_REG_DWORD((void *)
+ (off_value + CRB_INDIRECT_2M + ha->nx_pcibase),
+ data);
+ else
+ rval = RD_REG_DWORD((void *)
+ (off_value + CRB_INDIRECT_2M + ha->nx_pcibase));
+
+ return rval;
+}
+
+static int
+qla82xx_minidump_process_control(scsi_qla_host_t *vha,
+ qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct qla82xx_md_entry_crb *crb_entry;
+ uint32_t read_value, opcode, poll_time;
+ uint32_t addr, index, crb_addr;
+ unsigned long wtime;
+ struct qla82xx_md_template_hdr *tmplt_hdr;
+ uint32_t rval = QLA_SUCCESS;
+ int i;
+
+ tmplt_hdr = (struct qla82xx_md_template_hdr *)ha->md_tmplt_hdr;
+ crb_entry = (struct qla82xx_md_entry_crb *)entry_hdr;
+ crb_addr = crb_entry->addr;
+
+ for (i = 0; i < crb_entry->op_count; i++) {
+ opcode = crb_entry->crb_ctrl.opcode;
+ if (opcode & QLA82XX_DBG_OPCODE_WR) {
+ qla82xx_md_rw_32(ha, crb_addr,
+ crb_entry->value_1, 1);
+ opcode &= ~QLA82XX_DBG_OPCODE_WR;
+ }
+
+ if (opcode & QLA82XX_DBG_OPCODE_RW) {
+ read_value = qla82xx_md_rw_32(ha, crb_addr, 0, 0);
+ qla82xx_md_rw_32(ha, crb_addr, read_value, 1);
+ opcode &= ~QLA82XX_DBG_OPCODE_RW;
+ }
+
+ if (opcode & QLA82XX_DBG_OPCODE_AND) {
+ read_value = qla82xx_md_rw_32(ha, crb_addr, 0, 0);
+ read_value &= crb_entry->value_2;
+ opcode &= ~QLA82XX_DBG_OPCODE_AND;
+ if (opcode & QLA82XX_DBG_OPCODE_OR) {
+ read_value |= crb_entry->value_3;
+ opcode &= ~QLA82XX_DBG_OPCODE_OR;
+ }
+ qla82xx_md_rw_32(ha, crb_addr, read_value, 1);
+ }
+
+ if (opcode & QLA82XX_DBG_OPCODE_OR) {
+ read_value = qla82xx_md_rw_32(ha, crb_addr, 0, 0);
+ read_value |= crb_entry->value_3;
+ qla82xx_md_rw_32(ha, crb_addr, read_value, 1);
+ opcode &= ~QLA82XX_DBG_OPCODE_OR;
+ }
+
+ if (opcode & QLA82XX_DBG_OPCODE_POLL) {
+ poll_time = crb_entry->crb_strd.poll_timeout;
+ wtime = jiffies + poll_time;
+ read_value = qla82xx_md_rw_32(ha, crb_addr, 0, 0);
+
+ do {
+ if ((read_value & crb_entry->value_2)
+ == crb_entry->value_1)
+ break;
+ else if (time_after_eq(jiffies, wtime)) {
+ /* capturing dump failed */
+ rval = QLA_FUNCTION_FAILED;
+ break;
+ } else
+ read_value = qla82xx_md_rw_32(ha,
+ crb_addr, 0, 0);
+ } while (1);
+ opcode &= ~QLA82XX_DBG_OPCODE_POLL;
+ }
+
+ if (opcode & QLA82XX_DBG_OPCODE_RDSTATE) {
+ if (crb_entry->crb_strd.state_index_a) {
+ index = crb_entry->crb_strd.state_index_a;
+ addr = tmplt_hdr->saved_state_array[index];
+ } else
+ addr = crb_addr;
+
+ read_value = qla82xx_md_rw_32(ha, addr, 0, 0);
+ index = crb_entry->crb_ctrl.state_index_v;
+ tmplt_hdr->saved_state_array[index] = read_value;
+ opcode &= ~QLA82XX_DBG_OPCODE_RDSTATE;
+ }
+
+ if (opcode & QLA82XX_DBG_OPCODE_WRSTATE) {
+ if (crb_entry->crb_strd.state_index_a) {
+ index = crb_entry->crb_strd.state_index_a;
+ addr = tmplt_hdr->saved_state_array[index];
+ } else
+ addr = crb_addr;
+
+ if (crb_entry->crb_ctrl.state_index_v) {
+ index = crb_entry->crb_ctrl.state_index_v;
+ read_value =
+ tmplt_hdr->saved_state_array[index];
+ } else
+ read_value = crb_entry->value_1;
+
+ qla82xx_md_rw_32(ha, addr, read_value, 1);
+ opcode &= ~QLA82XX_DBG_OPCODE_WRSTATE;
+ }
+
+ if (opcode & QLA82XX_DBG_OPCODE_MDSTATE) {
+ index = crb_entry->crb_ctrl.state_index_v;
+ read_value = tmplt_hdr->saved_state_array[index];
+ read_value <<= crb_entry->crb_ctrl.shl;
+ read_value >>= crb_entry->crb_ctrl.shr;
+ if (crb_entry->value_2)
+ read_value &= crb_entry->value_2;
+ read_value |= crb_entry->value_3;
+ read_value += crb_entry->value_1;
+ tmplt_hdr->saved_state_array[index] = read_value;
+ opcode &= ~QLA82XX_DBG_OPCODE_MDSTATE;
+ }
+ crb_addr += crb_entry->crb_strd.addr_stride;
+ }
+ return rval;
+}
+
+static void
+qla82xx_minidump_process_rdocm(scsi_qla_host_t *vha,
+ qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
+{
+ struct qla_hw_data *ha = vha->hw;
+ uint32_t r_addr, r_stride, loop_cnt, i, r_value;
+ struct qla82xx_md_entry_rdocm *ocm_hdr;
+ uint32_t *data_ptr = *d_ptr;
+
+ ocm_hdr = (struct qla82xx_md_entry_rdocm *)entry_hdr;
+ r_addr = ocm_hdr->read_addr;
+ r_stride = ocm_hdr->read_addr_stride;
+ loop_cnt = ocm_hdr->op_count;
+
+ for (i = 0; i < loop_cnt; i++) {
+ r_value = RD_REG_DWORD((void *)(r_addr + ha->nx_pcibase));
+ *data_ptr++ = cpu_to_le32(r_value);
+ r_addr += r_stride;
+ }
+ *d_ptr = data_ptr;
+}
+
+static void
+qla82xx_minidump_process_rdmux(scsi_qla_host_t *vha,
+ qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
+{
+ struct qla_hw_data *ha = vha->hw;
+ uint32_t r_addr, s_stride, s_addr, s_value, loop_cnt, i, r_value;
+ struct qla82xx_md_entry_mux *mux_hdr;
+ uint32_t *data_ptr = *d_ptr;
+
+ mux_hdr = (struct qla82xx_md_entry_mux *)entry_hdr;
+ r_addr = mux_hdr->read_addr;
+ s_addr = mux_hdr->select_addr;
+ s_stride = mux_hdr->select_value_stride;
+ s_value = mux_hdr->select_value;
+ loop_cnt = mux_hdr->op_count;
+
+ for (i = 0; i < loop_cnt; i++) {
+ qla82xx_md_rw_32(ha, s_addr, s_value, 1);
+ r_value = qla82xx_md_rw_32(ha, r_addr, 0, 0);
+ *data_ptr++ = cpu_to_le32(s_value);
+ *data_ptr++ = cpu_to_le32(r_value);
+ s_value += s_stride;
+ }
+ *d_ptr = data_ptr;
+}
+
+static void
+qla82xx_minidump_process_rdcrb(scsi_qla_host_t *vha,
+ qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
+{
+ struct qla_hw_data *ha = vha->hw;
+ uint32_t r_addr, r_stride, loop_cnt, i, r_value;
+ struct qla82xx_md_entry_crb *crb_hdr;
+ uint32_t *data_ptr = *d_ptr;
+
+ crb_hdr = (struct qla82xx_md_entry_crb *)entry_hdr;
+ r_addr = crb_hdr->addr;
+ r_stride = crb_hdr->crb_strd.addr_stride;
+ loop_cnt = crb_hdr->op_count;
+
+ for (i = 0; i < loop_cnt; i++) {
+ r_value = qla82xx_md_rw_32(ha, r_addr, 0, 0);
+ *data_ptr++ = cpu_to_le32(r_addr);
+ *data_ptr++ = cpu_to_le32(r_value);
+ r_addr += r_stride;
+ }
+ *d_ptr = data_ptr;
+}
+
+static int
+qla82xx_minidump_process_l2tag(scsi_qla_host_t *vha,
+ qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
+{
+ struct qla_hw_data *ha = vha->hw;
+ uint32_t addr, r_addr, c_addr, t_r_addr;
+ uint32_t i, k, loop_count, t_value, r_cnt, r_value;
+ unsigned long p_wait, w_time, p_mask;
+ uint32_t c_value_w, c_value_r;
+ struct qla82xx_md_entry_cache *cache_hdr;
+ int rval = QLA_FUNCTION_FAILED;
+ uint32_t *data_ptr = *d_ptr;
+
+ cache_hdr = (struct qla82xx_md_entry_cache *)entry_hdr;
+ loop_count = cache_hdr->op_count;
+ r_addr = cache_hdr->read_addr;
+ c_addr = cache_hdr->control_addr;
+ c_value_w = cache_hdr->cache_ctrl.write_value;
+
+ t_r_addr = cache_hdr->tag_reg_addr;
+ t_value = cache_hdr->addr_ctrl.init_tag_value;
+ r_cnt = cache_hdr->read_ctrl.read_addr_cnt;
+ p_wait = cache_hdr->cache_ctrl.poll_wait;
+ p_mask = cache_hdr->cache_ctrl.poll_mask;
+
+ for (i = 0; i < loop_count; i++) {
+ qla82xx_md_rw_32(ha, t_r_addr, t_value, 1);
+ if (c_value_w)
+ qla82xx_md_rw_32(ha, c_addr, c_value_w, 1);
+
+ if (p_mask) {
+ w_time = jiffies + p_wait;
+ do {
+ c_value_r = qla82xx_md_rw_32(ha, c_addr, 0, 0);
+ if ((c_value_r & p_mask) == 0)
+ break;
+ else if (time_after_eq(jiffies, w_time)) {
+ /* capturing dump failed */
+ ql_dbg(ql_dbg_p3p, vha, 0xb032,
+ "c_value_r: 0x%x, poll_mask: 0x%lx, "
+ "w_time: 0x%lx\n",
+ c_value_r, p_mask, w_time);
+ return rval;
+ }
+ } while (1);
+ }
+
+ addr = r_addr;
+ for (k = 0; k < r_cnt; k++) {
+ r_value = qla82xx_md_rw_32(ha, addr, 0, 0);
+ *data_ptr++ = cpu_to_le32(r_value);
+ addr += cache_hdr->read_ctrl.read_addr_stride;
+ }
+ t_value += cache_hdr->addr_ctrl.tag_value_stride;
+ }
+ *d_ptr = data_ptr;
+ return QLA_SUCCESS;
+}
+
+static void
+qla82xx_minidump_process_l1cache(scsi_qla_host_t *vha,
+ qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
+{
+ struct qla_hw_data *ha = vha->hw;
+ uint32_t addr, r_addr, c_addr, t_r_addr;
+ uint32_t i, k, loop_count, t_value, r_cnt, r_value;
+ uint32_t c_value_w;
+ struct qla82xx_md_entry_cache *cache_hdr;
+ uint32_t *data_ptr = *d_ptr;
+
+ cache_hdr = (struct qla82xx_md_entry_cache *)entry_hdr;
+ loop_count = cache_hdr->op_count;
+ r_addr = cache_hdr->read_addr;
+ c_addr = cache_hdr->control_addr;
+ c_value_w = cache_hdr->cache_ctrl.write_value;
+
+ t_r_addr = cache_hdr->tag_reg_addr;
+ t_value = cache_hdr->addr_ctrl.init_tag_value;
+ r_cnt = cache_hdr->read_ctrl.read_addr_cnt;
+
+ for (i = 0; i < loop_count; i++) {
+ qla82xx_md_rw_32(ha, t_r_addr, t_value, 1);
+ qla82xx_md_rw_32(ha, c_addr, c_value_w, 1);
+ addr = r_addr;
+ for (k = 0; k < r_cnt; k++) {
+ r_value = qla82xx_md_rw_32(ha, addr, 0, 0);
+ *data_ptr++ = cpu_to_le32(r_value);
+ addr += cache_hdr->read_ctrl.read_addr_stride;
+ }
+ t_value += cache_hdr->addr_ctrl.tag_value_stride;
+ }
+ *d_ptr = data_ptr;
+}
+
+static void
+qla82xx_minidump_process_queue(scsi_qla_host_t *vha,
+ qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
+{
+ struct qla_hw_data *ha = vha->hw;
+ uint32_t s_addr, r_addr;
+ uint32_t r_stride, r_value, r_cnt, qid = 0;
+ uint32_t i, k, loop_cnt;
+ struct qla82xx_md_entry_queue *q_hdr;
+ uint32_t *data_ptr = *d_ptr;
+
+ q_hdr = (struct qla82xx_md_entry_queue *)entry_hdr;
+ s_addr = q_hdr->select_addr;
+ r_cnt = q_hdr->rd_strd.read_addr_cnt;
+ r_stride = q_hdr->rd_strd.read_addr_stride;
+ loop_cnt = q_hdr->op_count;
+
+ for (i = 0; i < loop_cnt; i++) {
+ qla82xx_md_rw_32(ha, s_addr, qid, 1);
+ r_addr = q_hdr->read_addr;
+ for (k = 0; k < r_cnt; k++) {
+ r_value = qla82xx_md_rw_32(ha, r_addr, 0, 0);
+ *data_ptr++ = cpu_to_le32(r_value);
+ r_addr += r_stride;
+ }
+ qid += q_hdr->q_strd.queue_id_stride;
+ }
+ *d_ptr = data_ptr;
+}
+
+static void
+qla82xx_minidump_process_rdrom(scsi_qla_host_t *vha,
+ qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
+{
+ struct qla_hw_data *ha = vha->hw;
+ uint32_t r_addr, r_value;
+ uint32_t i, loop_cnt;
+ struct qla82xx_md_entry_rdrom *rom_hdr;
+ uint32_t *data_ptr = *d_ptr;
+
+ rom_hdr = (struct qla82xx_md_entry_rdrom *)entry_hdr;
+ r_addr = rom_hdr->read_addr;
+ loop_cnt = rom_hdr->read_data_size/sizeof(uint32_t);
+
+ for (i = 0; i < loop_cnt; i++) {
+ qla82xx_md_rw_32(ha, MD_DIRECT_ROM_WINDOW,
+ (r_addr & 0xFFFF0000), 1);
+ r_value = qla82xx_md_rw_32(ha,
+ MD_DIRECT_ROM_READ_BASE +
+ (r_addr & 0x0000FFFF), 0, 0);
+ *data_ptr++ = cpu_to_le32(r_value);
+ r_addr += sizeof(uint32_t);
+ }
+ *d_ptr = data_ptr;
+}
+
+static int
+qla82xx_minidump_process_rdmem(scsi_qla_host_t *vha,
+ qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
+{
+ struct qla_hw_data *ha = vha->hw;
+ uint32_t r_addr, r_value, r_data;
+ uint32_t i, j, loop_cnt;
+ struct qla82xx_md_entry_rdmem *m_hdr;
+ unsigned long flags;
+ int rval = QLA_FUNCTION_FAILED;
+ uint32_t *data_ptr = *d_ptr;
+
+ m_hdr = (struct qla82xx_md_entry_rdmem *)entry_hdr;
+ r_addr = m_hdr->read_addr;
+ loop_cnt = m_hdr->read_data_size/16;
+
+ if (r_addr & 0xf) {
+ ql_log(ql_log_warn, vha, 0xb033,
+ "Read addr 0x%x not 16 bytes alligned\n", r_addr);
+ return rval;
+ }
+
+ if (m_hdr->read_data_size % 16) {
+ ql_log(ql_log_warn, vha, 0xb034,
+ "Read data[0x%x] not multiple of 16 bytes\n",
+ m_hdr->read_data_size);
+ return rval;
+ }
+
+ ql_dbg(ql_dbg_p3p, vha, 0xb035,
+ "[%s]: rdmem_addr: 0x%x, read_data_size: 0x%x, loop_cnt: 0x%x\n",
+ __func__, r_addr, m_hdr->read_data_size, loop_cnt);
+
+ write_lock_irqsave(&ha->hw_lock, flags);
+ for (i = 0; i < loop_cnt; i++) {
+ qla82xx_md_rw_32(ha, MD_MIU_TEST_AGT_ADDR_LO, r_addr, 1);
+ r_value = 0;
+ qla82xx_md_rw_32(ha, MD_MIU_TEST_AGT_ADDR_HI, r_value, 1);
+ r_value = MIU_TA_CTL_ENABLE;
+ qla82xx_md_rw_32(ha, MD_MIU_TEST_AGT_CTRL, r_value, 1);
+ r_value = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE;
+ qla82xx_md_rw_32(ha, MD_MIU_TEST_AGT_CTRL, r_value, 1);
+
+ for (j = 0; j < MAX_CTL_CHECK; j++) {
+ r_value = qla82xx_md_rw_32(ha,
+ MD_MIU_TEST_AGT_CTRL, 0, 0);
+ if ((r_value & MIU_TA_CTL_BUSY) == 0)
+ break;
+ }
+
+ if (j >= MAX_CTL_CHECK) {
+ printk_ratelimited(KERN_ERR
+ "failed to read through agent\n");
+ write_unlock_irqrestore(&ha->hw_lock, flags);
+ return rval;
+ }
+
+ for (j = 0; j < 4; j++) {
+ r_data = qla82xx_md_rw_32(ha,
+ MD_MIU_TEST_AGT_RDDATA[j], 0, 0);
+ *data_ptr++ = cpu_to_le32(r_data);
+ }
+ r_addr += 16;
+ }
+ write_unlock_irqrestore(&ha->hw_lock, flags);
+ *d_ptr = data_ptr;
+ return QLA_SUCCESS;
+}
+
+static int
+qla82xx_validate_template_chksum(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ uint64_t chksum = 0;
+ uint32_t *d_ptr = (uint32_t *)ha->md_tmplt_hdr;
+ int count = ha->md_template_size/sizeof(uint32_t);
+
+ while (count-- > 0)
+ chksum += *d_ptr++;
+ while (chksum >> 32)
+ chksum = (chksum & 0xFFFFFFFF) + (chksum >> 32);
+ return ~chksum;
+}
+
+static void
+qla82xx_mark_entry_skipped(scsi_qla_host_t *vha,
+ qla82xx_md_entry_hdr_t *entry_hdr, int index)
+{
+ entry_hdr->d_ctrl.driver_flags |= QLA82XX_DBG_SKIPPED_FLAG;
+ ql_dbg(ql_dbg_p3p, vha, 0xb036,
+ "Skipping entry[%d]: "
+ "ETYPE[0x%x]-ELEVEL[0x%x]\n",
+ index, entry_hdr->entry_type,
+ entry_hdr->d_ctrl.entry_capture_mask);
+}
+
+int
+qla82xx_md_collect(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ int no_entry_hdr = 0;
+ qla82xx_md_entry_hdr_t *entry_hdr;
+ struct qla82xx_md_template_hdr *tmplt_hdr;
+ uint32_t *data_ptr;
+ uint32_t total_data_size = 0, f_capture_mask, data_collected = 0;
+ int i = 0, rval = QLA_FUNCTION_FAILED;
+
+ tmplt_hdr = (struct qla82xx_md_template_hdr *)ha->md_tmplt_hdr;
+ data_ptr = (uint32_t *)ha->md_dump;
+
+ if (ha->fw_dumped) {
+ ql_log(ql_log_info, vha, 0xb037,
+ "Firmware dump available to retrive\n");
+ goto md_failed;
+ }
+
+ ha->fw_dumped = 0;
+
+ if (!ha->md_tmplt_hdr || !ha->md_dump) {
+ ql_log(ql_log_warn, vha, 0xb038,
+ "Memory not allocated for minidump capture\n");
+ goto md_failed;
+ }
+
+ if (qla82xx_validate_template_chksum(vha)) {
+ ql_log(ql_log_info, vha, 0xb039,
+ "Template checksum validation error\n");
+ goto md_failed;
+ }
+
+ no_entry_hdr = tmplt_hdr->num_of_entries;
+ ql_dbg(ql_dbg_p3p, vha, 0xb03a,
+ "No of entry headers in Template: 0x%x\n", no_entry_hdr);
+
+ ql_dbg(ql_dbg_p3p, vha, 0xb03b,
+ "Capture Mask obtained: 0x%x\n", tmplt_hdr->capture_debug_level);
+
+ f_capture_mask = tmplt_hdr->capture_debug_level & 0xFF;
+
+ /* Validate whether required debug level is set */
+ if ((f_capture_mask & 0x3) != 0x3) {
+ ql_log(ql_log_warn, vha, 0xb03c,
+ "Minimum required capture mask[0x%x] level not set\n",
+ f_capture_mask);
+ goto md_failed;
+ }
+ tmplt_hdr->driver_capture_mask = ql2xmdcapmask;
+
+ tmplt_hdr->driver_info[0] = vha->host_no;
+ tmplt_hdr->driver_info[1] = (QLA_DRIVER_MAJOR_VER << 24) |
+ (QLA_DRIVER_MINOR_VER << 16) | (QLA_DRIVER_PATCH_VER << 8) |
+ QLA_DRIVER_BETA_VER;
+
+ total_data_size = ha->md_dump_size;
+
+ ql_dbg(ql_log_info, vha, 0xb03d,
+ "Total minidump data_size 0x%x to be captured\n", total_data_size);
+
+ /* Check whether template obtained is valid */
+ if (tmplt_hdr->entry_type != QLA82XX_TLHDR) {
+ ql_log(ql_log_warn, vha, 0xb04e,
+ "Bad template header entry type: 0x%x obtained\n",
+ tmplt_hdr->entry_type);
+ goto md_failed;
+ }
+
+ entry_hdr = (qla82xx_md_entry_hdr_t *) \
+ (((uint8_t *)ha->md_tmplt_hdr) + tmplt_hdr->first_entry_offset);
+
+ /* Walk through the entry headers */
+ for (i = 0; i < no_entry_hdr; i++) {
+
+ if (data_collected > total_data_size) {
+ ql_log(ql_log_warn, vha, 0xb03e,
+ "More MiniDump data collected: [0x%x]\n",
+ data_collected);
+ goto md_failed;
+ }
+
+ if (!(entry_hdr->d_ctrl.entry_capture_mask &
+ ql2xmdcapmask)) {
+ entry_hdr->d_ctrl.driver_flags |=
+ QLA82XX_DBG_SKIPPED_FLAG;
+ ql_dbg(ql_dbg_p3p, vha, 0xb03f,
+ "Skipping entry[%d]: "
+ "ETYPE[0x%x]-ELEVEL[0x%x]\n",
+ i, entry_hdr->entry_type,
+ entry_hdr->d_ctrl.entry_capture_mask);
+ goto skip_nxt_entry;
+ }
+
+ ql_dbg(ql_dbg_p3p, vha, 0xb040,
+ "[%s]: data ptr[%d]: %p, entry_hdr: %p\n"
+ "entry_type: 0x%x, captrue_mask: 0x%x\n",
+ __func__, i, data_ptr, entry_hdr,
+ entry_hdr->entry_type,
+ entry_hdr->d_ctrl.entry_capture_mask);
+
+ ql_dbg(ql_dbg_p3p, vha, 0xb041,
+ "Data collected: [0x%x], Dump size left:[0x%x]\n",
+ data_collected, (ha->md_dump_size - data_collected));
+
+ /* Decode the entry type and take
+ * required action to capture debug data */
+ switch (entry_hdr->entry_type) {
+ case QLA82XX_RDEND:
+ qla82xx_mark_entry_skipped(vha, entry_hdr, i);
+ break;
+ case QLA82XX_CNTRL:
+ rval = qla82xx_minidump_process_control(vha,
+ entry_hdr, &data_ptr);
+ if (rval != QLA_SUCCESS) {
+ qla82xx_mark_entry_skipped(vha, entry_hdr, i);
+ goto md_failed;
+ }
+ break;
+ case QLA82XX_RDCRB:
+ qla82xx_minidump_process_rdcrb(vha,
+ entry_hdr, &data_ptr);
+ break;
+ case QLA82XX_RDMEM:
+ rval = qla82xx_minidump_process_rdmem(vha,
+ entry_hdr, &data_ptr);
+ if (rval != QLA_SUCCESS) {
+ qla82xx_mark_entry_skipped(vha, entry_hdr, i);
+ goto md_failed;
+ }
+ break;
+ case QLA82XX_BOARD:
+ case QLA82XX_RDROM:
+ qla82xx_minidump_process_rdrom(vha,
+ entry_hdr, &data_ptr);
+ break;
+ case QLA82XX_L2DTG:
+ case QLA82XX_L2ITG:
+ case QLA82XX_L2DAT:
+ case QLA82XX_L2INS:
+ rval = qla82xx_minidump_process_l2tag(vha,
+ entry_hdr, &data_ptr);
+ if (rval != QLA_SUCCESS) {
+ qla82xx_mark_entry_skipped(vha, entry_hdr, i);
+ goto md_failed;
+ }
+ break;
+ case QLA82XX_L1DAT:
+ case QLA82XX_L1INS:
+ qla82xx_minidump_process_l1cache(vha,
+ entry_hdr, &data_ptr);
+ break;
+ case QLA82XX_RDOCM:
+ qla82xx_minidump_process_rdocm(vha,
+ entry_hdr, &data_ptr);
+ break;
+ case QLA82XX_RDMUX:
+ qla82xx_minidump_process_rdmux(vha,
+ entry_hdr, &data_ptr);
+ break;
+ case QLA82XX_QUEUE:
+ qla82xx_minidump_process_queue(vha,
+ entry_hdr, &data_ptr);
+ break;
+ case QLA82XX_RDNOP:
+ default:
+ qla82xx_mark_entry_skipped(vha, entry_hdr, i);
+ break;
+ }
+
+ ql_dbg(ql_dbg_p3p, vha, 0xb042,
+ "[%s]: data ptr[%d]: %p\n", __func__, i, data_ptr);
+
+ data_collected = (uint8_t *)data_ptr -
+ (uint8_t *)ha->md_dump;
+skip_nxt_entry:
+ entry_hdr = (qla82xx_md_entry_hdr_t *) \
+ (((uint8_t *)entry_hdr) + entry_hdr->entry_size);
+ }
+
+ if (data_collected != total_data_size) {
+ ql_dbg(ql_log_warn, vha, 0xb043,
+ "MiniDump data mismatch: Data collected: [0x%x],"
+ "total_data_size:[0x%x]\n",
+ data_collected, total_data_size);
+ goto md_failed;
+ }
+
+ ql_log(ql_log_info, vha, 0xb044,
+ "Firmware dump saved to temp buffer (%ld/%p %ld/%p).\n",
+ vha->host_no, ha->md_tmplt_hdr, vha->host_no, ha->md_dump);
+ ha->fw_dumped = 1;
+ qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
+
+md_failed:
+ return rval;
+}
+
+int
+qla82xx_md_alloc(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ int i, k;
+ struct qla82xx_md_template_hdr *tmplt_hdr;
+
+ tmplt_hdr = (struct qla82xx_md_template_hdr *)ha->md_tmplt_hdr;
+
+ if (ql2xmdcapmask < 0x3 || ql2xmdcapmask > 0x7F) {
+ ql2xmdcapmask = tmplt_hdr->capture_debug_level & 0xFF;
+ ql_log(ql_log_info, vha, 0xb045,
+ "Forcing driver capture mask to firmware default capture mask: 0x%x.\n",
+ ql2xmdcapmask);
+ }
+
+ for (i = 0x2, k = 1; (i & QLA82XX_DEFAULT_CAP_MASK); i <<= 1, k++) {
+ if (i & ql2xmdcapmask)
+ ha->md_dump_size += tmplt_hdr->capture_size_array[k];
+ }
+
+ if (ha->md_dump) {
+ ql_log(ql_log_warn, vha, 0xb046,
+ "Firmware dump previously allocated.\n");
+ return 1;
+ }
+
+ ha->md_dump = vmalloc(ha->md_dump_size);
+ if (ha->md_dump == NULL) {
+ ql_log(ql_log_warn, vha, 0xb047,
+ "Unable to allocate memory for Minidump size "
+ "(0x%x).\n", ha->md_dump_size);
+ return 1;
+ }
+ return 0;
+}
+
+void
+qla82xx_md_free(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ /* Release the template header allocated */
+ if (ha->md_tmplt_hdr) {
+ ql_log(ql_log_info, vha, 0xb048,
+ "Free MiniDump template: %p, size (%d KB)\n",
+ ha->md_tmplt_hdr, ha->md_template_size / 1024);
+ dma_free_coherent(&ha->pdev->dev, ha->md_template_size,
+ ha->md_tmplt_hdr, ha->md_tmplt_hdr_dma);
+ ha->md_tmplt_hdr = 0;
+ }
+
+ /* Release the template data buffer allocated */
+ if (ha->md_dump) {
+ ql_log(ql_log_info, vha, 0xb049,
+ "Free MiniDump memory: %p, size (%d KB)\n",
+ ha->md_dump, ha->md_dump_size / 1024);
+ vfree(ha->md_dump);
+ ha->md_dump_size = 0;
+ ha->md_dump = 0;
+ }
+}
+
+void
+qla82xx_md_prep(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ int rval;
+
+ /* Get Minidump template size */
+ rval = qla82xx_md_get_template_size(vha);
+ if (rval == QLA_SUCCESS) {
+ ql_log(ql_log_info, vha, 0xb04a,
+ "MiniDump Template size obtained (%d KB)\n",
+ ha->md_template_size / 1024);
+
+ /* Get Minidump template */
+ rval = qla82xx_md_get_template(vha);
+ if (rval == QLA_SUCCESS) {
+ ql_dbg(ql_dbg_p3p, vha, 0xb04b,
+ "MiniDump Template obtained\n");
+
+ /* Allocate memory for minidump */
+ rval = qla82xx_md_alloc(vha);
+ if (rval == QLA_SUCCESS)
+ ql_log(ql_log_info, vha, 0xb04c,
+ "MiniDump memory allocated (%d KB)\n",
+ ha->md_dump_size / 1024);
+ else {
+ ql_log(ql_log_info, vha, 0xb04d,
+ "Free MiniDump template: %p, size: (%d KB)\n",
+ ha->md_tmplt_hdr,
+ ha->md_template_size / 1024);
+ dma_free_coherent(&ha->pdev->dev,
+ ha->md_template_size,
+ ha->md_tmplt_hdr, ha->md_tmplt_hdr_dma);
+ ha->md_tmplt_hdr = 0;
+ }
+
+ }
+ }
+}
+
+int
+qla82xx_beacon_on(struct scsi_qla_host *vha)
+{
+
+ int rval;
+ struct qla_hw_data *ha = vha->hw;
+ qla82xx_idc_lock(ha);
+ rval = qla82xx_mbx_beacon_ctl(vha, 1);
+
+ if (rval) {
+ ql_log(ql_log_warn, vha, 0xb050,
+ "mbx set led config failed in %s\n", __func__);
+ goto exit;
+ }
+ ha->beacon_blink_led = 1;
+exit:
+ qla82xx_idc_unlock(ha);
+ return rval;
+}
+
+int
+qla82xx_beacon_off(struct scsi_qla_host *vha)
+{
+
+ int rval;
+ struct qla_hw_data *ha = vha->hw;
+ qla82xx_idc_lock(ha);
+ rval = qla82xx_mbx_beacon_ctl(vha, 0);
+
+ if (rval) {
+ ql_log(ql_log_warn, vha, 0xb051,
+ "mbx set led config failed in %s\n", __func__);
+ goto exit;
+ }
+ ha->beacon_blink_led = 0;
+exit:
+ qla82xx_idc_unlock(ha);
+ return rval;
+}
diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h
index 8a21832c669..57820c199bc 100644
--- a/drivers/scsi/qla2xxx/qla_nx.h
+++ b/drivers/scsi/qla2xxx/qla_nx.h
@@ -484,8 +484,6 @@
#define QLA82XX_ADDR_OCM1 (0x0000000200400000ULL)
#define QLA82XX_ADDR_OCM1_MAX (0x00000002004fffffULL)
#define QLA82XX_ADDR_QDR_NET (0x0000000300000000ULL)
-
-#define QLA82XX_P2_ADDR_QDR_NET_MAX (0x00000003001fffffULL)
#define QLA82XX_P3_ADDR_QDR_NET_MAX (0x0000000303ffffffULL)
#define QLA82XX_PCI_CRBSPACE (unsigned long)0x06000000
@@ -890,6 +888,7 @@ struct ct6_dsd {
};
#define MBC_TOGGLE_INTERRUPT 0x10
+#define MBC_SET_LED_CONFIG 0x125
/* Flash offset */
#define FLT_REG_BOOTLOAD_82XX 0x72
@@ -922,4 +921,256 @@ struct ct6_dsd {
#define M25P_INSTR_DP 0xb9
#define M25P_INSTR_RES 0xab
+/* Minidump related */
+
+/*
+ * Version of the template
+ * 4 Bytes
+ * X.Major.Minor.RELEASE
+ */
+#define QLA82XX_MINIDUMP_VERSION 0x10101
+
+/*
+ * Entry Type Defines
+ */
+#define QLA82XX_RDNOP 0
+#define QLA82XX_RDCRB 1
+#define QLA82XX_RDMUX 2
+#define QLA82XX_QUEUE 3
+#define QLA82XX_BOARD 4
+#define QLA82XX_RDSRE 5
+#define QLA82XX_RDOCM 6
+#define QLA82XX_CACHE 10
+#define QLA82XX_L1DAT 11
+#define QLA82XX_L1INS 12
+#define QLA82XX_L2DTG 21
+#define QLA82XX_L2ITG 22
+#define QLA82XX_L2DAT 23
+#define QLA82XX_L2INS 24
+#define QLA82XX_RDROM 71
+#define QLA82XX_RDMEM 72
+#define QLA82XX_CNTRL 98
+#define QLA82XX_TLHDR 99
+#define QLA82XX_RDEND 255
+
+/*
+ * Opcodes for Control Entries.
+ * These Flags are bit fields.
+ */
+#define QLA82XX_DBG_OPCODE_WR 0x01
+#define QLA82XX_DBG_OPCODE_RW 0x02
+#define QLA82XX_DBG_OPCODE_AND 0x04
+#define QLA82XX_DBG_OPCODE_OR 0x08
+#define QLA82XX_DBG_OPCODE_POLL 0x10
+#define QLA82XX_DBG_OPCODE_RDSTATE 0x20
+#define QLA82XX_DBG_OPCODE_WRSTATE 0x40
+#define QLA82XX_DBG_OPCODE_MDSTATE 0x80
+
+/*
+ * Template Header and Entry Header definitions start here.
+ */
+
+/*
+ * Template Header
+ * Parts of the template header can be modified by the driver.
+ * These include the saved_state_array, capture_debug_level, driver_timestamp
+ */
+
+#define QLA82XX_DBG_STATE_ARRAY_LEN 16
+#define QLA82XX_DBG_CAP_SIZE_ARRAY_LEN 8
+#define QLA82XX_DBG_RSVD_ARRAY_LEN 8
+
+/*
+ * Driver Flags
+ */
+#define QLA82XX_DBG_SKIPPED_FLAG 0x80 /* driver skipped this entry */
+#define QLA82XX_DEFAULT_CAP_MASK 0xFF /* default capture mask */
+
+struct qla82xx_md_template_hdr {
+ uint32_t entry_type;
+ uint32_t first_entry_offset;
+ uint32_t size_of_template;
+ uint32_t capture_debug_level;
+
+ uint32_t num_of_entries;
+ uint32_t version;
+ uint32_t driver_timestamp;
+ uint32_t template_checksum;
+
+ uint32_t driver_capture_mask;
+ uint32_t driver_info[3];
+
+ uint32_t saved_state_array[QLA82XX_DBG_STATE_ARRAY_LEN];
+ uint32_t capture_size_array[QLA82XX_DBG_CAP_SIZE_ARRAY_LEN];
+
+ /* markers_array used to capture some special locations on board */
+ uint32_t markers_array[QLA82XX_DBG_RSVD_ARRAY_LEN];
+ uint32_t num_of_free_entries; /* For internal use */
+ uint32_t free_entry_offset; /* For internal use */
+ uint32_t total_table_size; /* For internal use */
+ uint32_t bkup_table_offset; /* For internal use */
+} __packed;
+
+/*
+ * Entry Header: Common to All Entry Types
+ */
+
+/*
+ * Driver Code is for driver to write some info about the entry.
+ * Currently not used.
+ */
+typedef struct qla82xx_md_entry_hdr {
+ uint32_t entry_type;
+ uint32_t entry_size;
+ uint32_t entry_capture_size;
+ struct {
+ uint8_t entry_capture_mask;
+ uint8_t entry_code;
+ uint8_t driver_code;
+ uint8_t driver_flags;
+ } d_ctrl;
+} __packed qla82xx_md_entry_hdr_t;
+
+/*
+ * Read CRB entry header
+ */
+struct qla82xx_md_entry_crb {
+ qla82xx_md_entry_hdr_t h;
+ uint32_t addr;
+ struct {
+ uint8_t addr_stride;
+ uint8_t state_index_a;
+ uint16_t poll_timeout;
+ } crb_strd;
+
+ uint32_t data_size;
+ uint32_t op_count;
+
+ struct {
+ uint8_t opcode;
+ uint8_t state_index_v;
+ uint8_t shl;
+ uint8_t shr;
+ } crb_ctrl;
+
+ uint32_t value_1;
+ uint32_t value_2;
+ uint32_t value_3;
+} __packed;
+
+/*
+ * Cache entry header
+ */
+struct qla82xx_md_entry_cache {
+ qla82xx_md_entry_hdr_t h;
+
+ uint32_t tag_reg_addr;
+ struct {
+ uint16_t tag_value_stride;
+ uint16_t init_tag_value;
+ } addr_ctrl;
+
+ uint32_t data_size;
+ uint32_t op_count;
+
+ uint32_t control_addr;
+ struct {
+ uint16_t write_value;
+ uint8_t poll_mask;
+ uint8_t poll_wait;
+ } cache_ctrl;
+
+ uint32_t read_addr;
+ struct {
+ uint8_t read_addr_stride;
+ uint8_t read_addr_cnt;
+ uint16_t rsvd_1;
+ } read_ctrl;
+} __packed;
+
+/*
+ * Read OCM
+ */
+struct qla82xx_md_entry_rdocm {
+ qla82xx_md_entry_hdr_t h;
+
+ uint32_t rsvd_0;
+ uint32_t rsvd_1;
+ uint32_t data_size;
+ uint32_t op_count;
+
+ uint32_t rsvd_2;
+ uint32_t rsvd_3;
+ uint32_t read_addr;
+ uint32_t read_addr_stride;
+ uint32_t read_addr_cntrl;
+} __packed;
+
+/*
+ * Read Memory
+ */
+struct qla82xx_md_entry_rdmem {
+ qla82xx_md_entry_hdr_t h;
+ uint32_t rsvd[6];
+ uint32_t read_addr;
+ uint32_t read_data_size;
+} __packed;
+
+/*
+ * Read ROM
+ */
+struct qla82xx_md_entry_rdrom {
+ qla82xx_md_entry_hdr_t h;
+ uint32_t rsvd[6];
+ uint32_t read_addr;
+ uint32_t read_data_size;
+} __packed;
+
+struct qla82xx_md_entry_mux {
+ qla82xx_md_entry_hdr_t h;
+
+ uint32_t select_addr;
+ uint32_t rsvd_0;
+ uint32_t data_size;
+ uint32_t op_count;
+
+ uint32_t select_value;
+ uint32_t select_value_stride;
+ uint32_t read_addr;
+ uint32_t rsvd_1;
+} __packed;
+
+struct qla82xx_md_entry_queue {
+ qla82xx_md_entry_hdr_t h;
+
+ uint32_t select_addr;
+ struct {
+ uint16_t queue_id_stride;
+ uint16_t rsvd_0;
+ } q_strd;
+
+ uint32_t data_size;
+ uint32_t op_count;
+ uint32_t rsvd_1;
+ uint32_t rsvd_2;
+
+ uint32_t read_addr;
+ struct {
+ uint8_t read_addr_stride;
+ uint8_t read_addr_cnt;
+ uint16_t rsvd_3;
+ } rd_strd;
+} __packed;
+
+#define MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE 0x129
+#define RQST_TMPLT_SIZE 0x0
+#define RQST_TMPLT 0x1
+#define MD_DIRECT_ROM_WINDOW 0x42110030
+#define MD_DIRECT_ROM_READ_BASE 0x42150000
+#define MD_MIU_TEST_AGT_CTRL 0x41000090
+#define MD_MIU_TEST_AGT_ADDR_LO 0x41000094
+#define MD_MIU_TEST_AGT_ADDR_HI 0x41000098
+
+static const int MD_MIU_TEST_AGT_RDDATA[] = { 0x410000A8, 0x410000AC,
+ 0x410000B8, 0x410000BC };
#endif
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 1e69527f1e4..fd14c7bfc62 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -143,7 +143,7 @@ MODULE_PARM_DESC(ql2xmultique_tag,
"Set it to 1 to turn on the cpu affinity.");
int ql2xfwloadbin;
-module_param(ql2xfwloadbin, int, S_IRUGO);
+module_param(ql2xfwloadbin, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ql2xfwloadbin,
"Option to specify location from which to load ISP firmware:.\n"
" 2 -- load firmware via the request_firmware() (hotplug).\n"
@@ -158,11 +158,11 @@ MODULE_PARM_DESC(ql2xetsenable,
"Default is 0 - skip ETS enablement.");
int ql2xdbwr = 1;
-module_param(ql2xdbwr, int, S_IRUGO);
+module_param(ql2xdbwr, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ql2xdbwr,
- "Option to specify scheme for request queue posting.\n"
- " 0 -- Regular doorbell.\n"
- " 1 -- CAMRAM doorbell (faster).\n");
+ "Option to specify scheme for request queue posting.\n"
+ " 0 -- Regular doorbell.\n"
+ " 1 -- CAMRAM doorbell (faster).\n");
int ql2xtargetreset = 1;
module_param(ql2xtargetreset, int, S_IRUGO);
@@ -183,11 +183,11 @@ MODULE_PARM_DESC(ql2xasynctmfenable,
"Default is 0 - Issue TM IOCBs via mailbox mechanism.");
int ql2xdontresethba;
-module_param(ql2xdontresethba, int, S_IRUGO);
+module_param(ql2xdontresethba, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ql2xdontresethba,
- "Option to specify reset behaviour.\n"
- " 0 (Default) -- Reset on failure.\n"
- " 1 -- Do not reset on failure.\n");
+ "Option to specify reset behaviour.\n"
+ " 0 (Default) -- Reset on failure.\n"
+ " 1 -- Do not reset on failure.\n");
uint ql2xmaxlun = MAX_LUNS;
module_param(ql2xmaxlun, uint, S_IRUGO);
@@ -195,6 +195,19 @@ MODULE_PARM_DESC(ql2xmaxlun,
"Defines the maximum LU number to register with the SCSI "
"midlayer. Default is 65535.");
+int ql2xmdcapmask = 0x1F;
+module_param(ql2xmdcapmask, int, S_IRUGO);
+MODULE_PARM_DESC(ql2xmdcapmask,
+ "Set the Minidump driver capture mask level. "
+ "Default is 0x7F - Can be set to 0x3, 0x7, 0xF, 0x1F, 0x7F.");
+
+int ql2xmdenable;
+module_param(ql2xmdenable, int, S_IRUGO);
+MODULE_PARM_DESC(ql2xmdenable,
+ "Enable/disable MiniDump. "
+ "0 (Default) - MiniDump disabled. "
+ "1 - MiniDump enabled.");
+
/*
* SCSI host template entry points
*/
@@ -1750,9 +1763,9 @@ static struct isp_operations qla82xx_isp_ops = {
.read_nvram = qla24xx_read_nvram_data,
.write_nvram = qla24xx_write_nvram_data,
.fw_dump = qla24xx_fw_dump,
- .beacon_on = qla24xx_beacon_on,
- .beacon_off = qla24xx_beacon_off,
- .beacon_blink = qla24xx_beacon_blink,
+ .beacon_on = qla82xx_beacon_on,
+ .beacon_off = qla82xx_beacon_off,
+ .beacon_blink = NULL,
.read_optrom = qla82xx_read_optrom_data,
.write_optrom = qla82xx_write_optrom_data,
.get_flash_version = qla24xx_get_flash_version,
@@ -2670,6 +2683,8 @@ qla2x00_free_device(scsi_qla_host_t *vha)
qla2x00_mem_free(ha);
+ qla82xx_md_free(vha);
+
qla2x00_free_queues(ha);
}
@@ -3903,8 +3918,11 @@ qla2x00_timer(scsi_qla_host_t *vha)
/* Check if beacon LED needs to be blinked for physical host only */
if (!vha->vp_idx && (ha->beacon_blink_led == 1)) {
- set_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags);
- start_dpc++;
+ /* There is no beacon_blink function for ISP82xx */
+ if (!IS_QLA82XX(ha)) {
+ set_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags);
+ start_dpc++;
+ }
}
/* Process any deferred work. */
diff --git a/drivers/scsi/qla4xxx/Kconfig b/drivers/scsi/qla4xxx/Kconfig
index 0f5599e0abf..f1ad02ea212 100644
--- a/drivers/scsi/qla4xxx/Kconfig
+++ b/drivers/scsi/qla4xxx/Kconfig
@@ -2,6 +2,7 @@ config SCSI_QLA_ISCSI
tristate "QLogic ISP4XXX and ISP82XX host adapter family support"
depends on PCI && SCSI && NET
select SCSI_ISCSI_ATTRS
+ select ISCSI_BOOT_SYSFS
---help---
This driver supports the QLogic 40xx (ISP4XXX) and 8022 (ISP82XX)
iSCSI host adapter family.
diff --git a/drivers/scsi/qla4xxx/Makefile b/drivers/scsi/qla4xxx/Makefile
index 252523d7847..5b44139ff43 100644
--- a/drivers/scsi/qla4xxx/Makefile
+++ b/drivers/scsi/qla4xxx/Makefile
@@ -1,5 +1,5 @@
qla4xxx-y := ql4_os.o ql4_init.o ql4_mbx.o ql4_iocb.o ql4_isr.o \
- ql4_nx.o ql4_nvram.o ql4_dbg.o ql4_attr.o
+ ql4_nx.o ql4_nvram.o ql4_dbg.o ql4_attr.o ql4_bsg.o
obj-$(CONFIG_SCSI_QLA_ISCSI) += qla4xxx.o
diff --git a/drivers/scsi/qla4xxx/ql4_attr.c b/drivers/scsi/qla4xxx/ql4_attr.c
index 864d018631c..0b0a7d42137 100644
--- a/drivers/scsi/qla4xxx/ql4_attr.c
+++ b/drivers/scsi/qla4xxx/ql4_attr.c
@@ -55,15 +55,91 @@ qla4xxx_optrom_version_show(struct device *dev, struct device_attribute *attr,
ha->bootload_patch, ha->bootload_build);
}
+static ssize_t
+qla4xxx_board_id_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
+ return snprintf(buf, PAGE_SIZE, "0x%08X\n", ha->board_id);
+}
+
+static ssize_t
+qla4xxx_fw_state_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
+
+ qla4xxx_get_firmware_state(ha);
+ return snprintf(buf, PAGE_SIZE, "0x%08X%8X\n", ha->firmware_state,
+ ha->addl_fw_state);
+}
+
+static ssize_t
+qla4xxx_phy_port_cnt_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
+
+ if (!is_qla8022(ha))
+ return -ENOSYS;
+
+ return snprintf(buf, PAGE_SIZE, "0x%04X\n", ha->phy_port_cnt);
+}
+
+static ssize_t
+qla4xxx_phy_port_num_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
+
+ if (!is_qla8022(ha))
+ return -ENOSYS;
+
+ return snprintf(buf, PAGE_SIZE, "0x%04X\n", ha->phy_port_num);
+}
+
+static ssize_t
+qla4xxx_iscsi_func_cnt_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
+
+ if (!is_qla8022(ha))
+ return -ENOSYS;
+
+ return snprintf(buf, PAGE_SIZE, "0x%04X\n", ha->iscsi_pci_func_cnt);
+}
+
+static ssize_t
+qla4xxx_hba_model_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", ha->model_name);
+}
+
static DEVICE_ATTR(fw_version, S_IRUGO, qla4xxx_fw_version_show, NULL);
static DEVICE_ATTR(serial_num, S_IRUGO, qla4xxx_serial_num_show, NULL);
static DEVICE_ATTR(iscsi_version, S_IRUGO, qla4xxx_iscsi_version_show, NULL);
static DEVICE_ATTR(optrom_version, S_IRUGO, qla4xxx_optrom_version_show, NULL);
+static DEVICE_ATTR(board_id, S_IRUGO, qla4xxx_board_id_show, NULL);
+static DEVICE_ATTR(fw_state, S_IRUGO, qla4xxx_fw_state_show, NULL);
+static DEVICE_ATTR(phy_port_cnt, S_IRUGO, qla4xxx_phy_port_cnt_show, NULL);
+static DEVICE_ATTR(phy_port_num, S_IRUGO, qla4xxx_phy_port_num_show, NULL);
+static DEVICE_ATTR(iscsi_func_cnt, S_IRUGO, qla4xxx_iscsi_func_cnt_show, NULL);
+static DEVICE_ATTR(hba_model, S_IRUGO, qla4xxx_hba_model_show, NULL);
struct device_attribute *qla4xxx_host_attrs[] = {
&dev_attr_fw_version,
&dev_attr_serial_num,
&dev_attr_iscsi_version,
&dev_attr_optrom_version,
+ &dev_attr_board_id,
+ &dev_attr_fw_state,
+ &dev_attr_phy_port_cnt,
+ &dev_attr_phy_port_num,
+ &dev_attr_iscsi_func_cnt,
+ &dev_attr_hba_model,
NULL,
};
diff --git a/drivers/scsi/qla4xxx/ql4_bsg.c b/drivers/scsi/qla4xxx/ql4_bsg.c
new file mode 100644
index 00000000000..8acdc582ff6
--- /dev/null
+++ b/drivers/scsi/qla4xxx/ql4_bsg.c
@@ -0,0 +1,513 @@
+/*
+ * QLogic iSCSI HBA Driver
+ * Copyright (c) 2011 QLogic Corporation
+ *
+ * See LICENSE.qla4xxx for copyright and licensing details.
+ */
+
+#include "ql4_def.h"
+#include "ql4_glbl.h"
+#include "ql4_bsg.h"
+
+static int
+qla4xxx_read_flash(struct bsg_job *bsg_job)
+{
+ struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
+ struct scsi_qla_host *ha = to_qla_host(host);
+ struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
+ struct iscsi_bsg_request *bsg_req = bsg_job->request;
+ uint32_t offset = 0;
+ uint32_t length = 0;
+ dma_addr_t flash_dma;
+ uint8_t *flash = NULL;
+ int rval = -EINVAL;
+
+ bsg_reply->reply_payload_rcv_len = 0;
+
+ if (unlikely(pci_channel_offline(ha->pdev)))
+ goto leave;
+
+ if (ql4xxx_reset_active(ha)) {
+ ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
+ rval = -EBUSY;
+ goto leave;
+ }
+
+ if (ha->flash_state != QLFLASH_WAITING) {
+ ql4_printk(KERN_ERR, ha, "%s: another flash operation "
+ "active\n", __func__);
+ rval = -EBUSY;
+ goto leave;
+ }
+
+ ha->flash_state = QLFLASH_READING;
+ offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
+ length = bsg_job->reply_payload.payload_len;
+
+ flash = dma_alloc_coherent(&ha->pdev->dev, length, &flash_dma,
+ GFP_KERNEL);
+ if (!flash) {
+ ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for flash "
+ "data\n", __func__);
+ rval = -ENOMEM;
+ goto leave;
+ }
+
+ rval = qla4xxx_get_flash(ha, flash_dma, offset, length);
+ if (rval) {
+ ql4_printk(KERN_ERR, ha, "%s: get flash failed\n", __func__);
+ bsg_reply->result = DID_ERROR << 16;
+ rval = -EIO;
+ } else {
+ bsg_reply->reply_payload_rcv_len =
+ sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt,
+ flash, length);
+ bsg_reply->result = DID_OK << 16;
+ }
+
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+ dma_free_coherent(&ha->pdev->dev, length, flash, flash_dma);
+leave:
+ ha->flash_state = QLFLASH_WAITING;
+ return rval;
+}
+
+static int
+qla4xxx_update_flash(struct bsg_job *bsg_job)
+{
+ struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
+ struct scsi_qla_host *ha = to_qla_host(host);
+ struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
+ struct iscsi_bsg_request *bsg_req = bsg_job->request;
+ uint32_t length = 0;
+ uint32_t offset = 0;
+ uint32_t options = 0;
+ dma_addr_t flash_dma;
+ uint8_t *flash = NULL;
+ int rval = -EINVAL;
+
+ bsg_reply->reply_payload_rcv_len = 0;
+
+ if (unlikely(pci_channel_offline(ha->pdev)))
+ goto leave;
+
+ if (ql4xxx_reset_active(ha)) {
+ ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
+ rval = -EBUSY;
+ goto leave;
+ }
+
+ if (ha->flash_state != QLFLASH_WAITING) {
+ ql4_printk(KERN_ERR, ha, "%s: another flash operation "
+ "active\n", __func__);
+ rval = -EBUSY;
+ goto leave;
+ }
+
+ ha->flash_state = QLFLASH_WRITING;
+ length = bsg_job->request_payload.payload_len;
+ offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
+ options = bsg_req->rqst_data.h_vendor.vendor_cmd[2];
+
+ flash = dma_alloc_coherent(&ha->pdev->dev, length, &flash_dma,
+ GFP_KERNEL);
+ if (!flash) {
+ ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for flash "
+ "data\n", __func__);
+ rval = -ENOMEM;
+ goto leave;
+ }
+
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, flash, length);
+
+ rval = qla4xxx_set_flash(ha, flash_dma, offset, length, options);
+ if (rval) {
+ ql4_printk(KERN_ERR, ha, "%s: set flash failed\n", __func__);
+ bsg_reply->result = DID_ERROR << 16;
+ rval = -EIO;
+ } else
+ bsg_reply->result = DID_OK << 16;
+
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+ dma_free_coherent(&ha->pdev->dev, length, flash, flash_dma);
+leave:
+ ha->flash_state = QLFLASH_WAITING;
+ return rval;
+}
+
+static int
+qla4xxx_get_acb_state(struct bsg_job *bsg_job)
+{
+ struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
+ struct scsi_qla_host *ha = to_qla_host(host);
+ struct iscsi_bsg_request *bsg_req = bsg_job->request;
+ struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
+ uint32_t status[MBOX_REG_COUNT];
+ uint32_t acb_idx;
+ uint32_t ip_idx;
+ int rval = -EINVAL;
+
+ bsg_reply->reply_payload_rcv_len = 0;
+
+ if (unlikely(pci_channel_offline(ha->pdev)))
+ goto leave;
+
+ /* Only 4022 and above adapters are supported */
+ if (is_qla4010(ha))
+ goto leave;
+
+ if (ql4xxx_reset_active(ha)) {
+ ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
+ rval = -EBUSY;
+ goto leave;
+ }
+
+ if (bsg_job->reply_payload.payload_len < sizeof(status)) {
+ ql4_printk(KERN_ERR, ha, "%s: invalid payload len %d\n",
+ __func__, bsg_job->reply_payload.payload_len);
+ rval = -EINVAL;
+ goto leave;
+ }
+
+ acb_idx = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
+ ip_idx = bsg_req->rqst_data.h_vendor.vendor_cmd[2];
+
+ rval = qla4xxx_get_ip_state(ha, acb_idx, ip_idx, status);
+ if (rval) {
+ ql4_printk(KERN_ERR, ha, "%s: get ip state failed\n",
+ __func__);
+ bsg_reply->result = DID_ERROR << 16;
+ rval = -EIO;
+ } else {
+ bsg_reply->reply_payload_rcv_len =
+ sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt,
+ status, sizeof(status));
+ bsg_reply->result = DID_OK << 16;
+ }
+
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+leave:
+ return rval;
+}
+
+static int
+qla4xxx_read_nvram(struct bsg_job *bsg_job)
+{
+ struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
+ struct scsi_qla_host *ha = to_qla_host(host);
+ struct iscsi_bsg_request *bsg_req = bsg_job->request;
+ struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
+ uint32_t offset = 0;
+ uint32_t len = 0;
+ uint32_t total_len = 0;
+ dma_addr_t nvram_dma;
+ uint8_t *nvram = NULL;
+ int rval = -EINVAL;
+
+ bsg_reply->reply_payload_rcv_len = 0;
+
+ if (unlikely(pci_channel_offline(ha->pdev)))
+ goto leave;
+
+ /* Only 40xx adapters are supported */
+ if (!(is_qla4010(ha) || is_qla4022(ha) || is_qla4032(ha)))
+ goto leave;
+
+ if (ql4xxx_reset_active(ha)) {
+ ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
+ rval = -EBUSY;
+ goto leave;
+ }
+
+ offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
+ len = bsg_job->reply_payload.payload_len;
+ total_len = offset + len;
+
+ /* total len should not be greater than max NVRAM size */
+ if ((is_qla4010(ha) && total_len > QL4010_NVRAM_SIZE) ||
+ ((is_qla4022(ha) || is_qla4032(ha)) &&
+ total_len > QL40X2_NVRAM_SIZE)) {
+ ql4_printk(KERN_ERR, ha, "%s: offset+len greater than max"
+ " nvram size, offset=%d len=%d\n",
+ __func__, offset, len);
+ goto leave;
+ }
+
+ nvram = dma_alloc_coherent(&ha->pdev->dev, len, &nvram_dma,
+ GFP_KERNEL);
+ if (!nvram) {
+ ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for nvram "
+ "data\n", __func__);
+ rval = -ENOMEM;
+ goto leave;
+ }
+
+ rval = qla4xxx_get_nvram(ha, nvram_dma, offset, len);
+ if (rval) {
+ ql4_printk(KERN_ERR, ha, "%s: get nvram failed\n", __func__);
+ bsg_reply->result = DID_ERROR << 16;
+ rval = -EIO;
+ } else {
+ bsg_reply->reply_payload_rcv_len =
+ sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt,
+ nvram, len);
+ bsg_reply->result = DID_OK << 16;
+ }
+
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+ dma_free_coherent(&ha->pdev->dev, len, nvram, nvram_dma);
+leave:
+ return rval;
+}
+
+static int
+qla4xxx_update_nvram(struct bsg_job *bsg_job)
+{
+ struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
+ struct scsi_qla_host *ha = to_qla_host(host);
+ struct iscsi_bsg_request *bsg_req = bsg_job->request;
+ struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
+ uint32_t offset = 0;
+ uint32_t len = 0;
+ uint32_t total_len = 0;
+ dma_addr_t nvram_dma;
+ uint8_t *nvram = NULL;
+ int rval = -EINVAL;
+
+ bsg_reply->reply_payload_rcv_len = 0;
+
+ if (unlikely(pci_channel_offline(ha->pdev)))
+ goto leave;
+
+ if (!(is_qla4010(ha) || is_qla4022(ha) || is_qla4032(ha)))
+ goto leave;
+
+ if (ql4xxx_reset_active(ha)) {
+ ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
+ rval = -EBUSY;
+ goto leave;
+ }
+
+ offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
+ len = bsg_job->request_payload.payload_len;
+ total_len = offset + len;
+
+ /* total len should not be greater than max NVRAM size */
+ if ((is_qla4010(ha) && total_len > QL4010_NVRAM_SIZE) ||
+ ((is_qla4022(ha) || is_qla4032(ha)) &&
+ total_len > QL40X2_NVRAM_SIZE)) {
+ ql4_printk(KERN_ERR, ha, "%s: offset+len greater than max"
+ " nvram size, offset=%d len=%d\n",
+ __func__, offset, len);
+ goto leave;
+ }
+
+ nvram = dma_alloc_coherent(&ha->pdev->dev, len, &nvram_dma,
+ GFP_KERNEL);
+ if (!nvram) {
+ ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for flash "
+ "data\n", __func__);
+ rval = -ENOMEM;
+ goto leave;
+ }
+
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, nvram, len);
+
+ rval = qla4xxx_set_nvram(ha, nvram_dma, offset, len);
+ if (rval) {
+ ql4_printk(KERN_ERR, ha, "%s: set nvram failed\n", __func__);
+ bsg_reply->result = DID_ERROR << 16;
+ rval = -EIO;
+ } else
+ bsg_reply->result = DID_OK << 16;
+
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+ dma_free_coherent(&ha->pdev->dev, len, nvram, nvram_dma);
+leave:
+ return rval;
+}
+
+static int
+qla4xxx_restore_defaults(struct bsg_job *bsg_job)
+{
+ struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
+ struct scsi_qla_host *ha = to_qla_host(host);
+ struct iscsi_bsg_request *bsg_req = bsg_job->request;
+ struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
+ uint32_t region = 0;
+ uint32_t field0 = 0;
+ uint32_t field1 = 0;
+ int rval = -EINVAL;
+
+ bsg_reply->reply_payload_rcv_len = 0;
+
+ if (unlikely(pci_channel_offline(ha->pdev)))
+ goto leave;
+
+ if (is_qla4010(ha))
+ goto leave;
+
+ if (ql4xxx_reset_active(ha)) {
+ ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
+ rval = -EBUSY;
+ goto leave;
+ }
+
+ region = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
+ field0 = bsg_req->rqst_data.h_vendor.vendor_cmd[2];
+ field1 = bsg_req->rqst_data.h_vendor.vendor_cmd[3];
+
+ rval = qla4xxx_restore_factory_defaults(ha, region, field0, field1);
+ if (rval) {
+ ql4_printk(KERN_ERR, ha, "%s: set nvram failed\n", __func__);
+ bsg_reply->result = DID_ERROR << 16;
+ rval = -EIO;
+ } else
+ bsg_reply->result = DID_OK << 16;
+
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+leave:
+ return rval;
+}
+
+static int
+qla4xxx_bsg_get_acb(struct bsg_job *bsg_job)
+{
+ struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
+ struct scsi_qla_host *ha = to_qla_host(host);
+ struct iscsi_bsg_request *bsg_req = bsg_job->request;
+ struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
+ uint32_t acb_type = 0;
+ uint32_t len = 0;
+ dma_addr_t acb_dma;
+ uint8_t *acb = NULL;
+ int rval = -EINVAL;
+
+ bsg_reply->reply_payload_rcv_len = 0;
+
+ if (unlikely(pci_channel_offline(ha->pdev)))
+ goto leave;
+
+ /* Only 4022 and above adapters are supported */
+ if (is_qla4010(ha))
+ goto leave;
+
+ if (ql4xxx_reset_active(ha)) {
+ ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
+ rval = -EBUSY;
+ goto leave;
+ }
+
+ acb_type = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
+ len = bsg_job->reply_payload.payload_len;
+ if (len < sizeof(struct addr_ctrl_blk)) {
+ ql4_printk(KERN_ERR, ha, "%s: invalid acb len %d\n",
+ __func__, len);
+ rval = -EINVAL;
+ goto leave;
+ }
+
+ acb = dma_alloc_coherent(&ha->pdev->dev, len, &acb_dma, GFP_KERNEL);
+ if (!acb) {
+ ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for acb "
+ "data\n", __func__);
+ rval = -ENOMEM;
+ goto leave;
+ }
+
+ rval = qla4xxx_get_acb(ha, acb_dma, acb_type, len);
+ if (rval) {
+ ql4_printk(KERN_ERR, ha, "%s: get acb failed\n", __func__);
+ bsg_reply->result = DID_ERROR << 16;
+ rval = -EIO;
+ } else {
+ bsg_reply->reply_payload_rcv_len =
+ sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt,
+ acb, len);
+ bsg_reply->result = DID_OK << 16;
+ }
+
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+ dma_free_coherent(&ha->pdev->dev, len, acb, acb_dma);
+leave:
+ return rval;
+}
+
+/**
+ * qla4xxx_process_vendor_specific - handle vendor specific bsg request
+ * @job: iscsi_bsg_job to handle
+ **/
+int qla4xxx_process_vendor_specific(struct bsg_job *bsg_job)
+{
+ struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
+ struct iscsi_bsg_request *bsg_req = bsg_job->request;
+ struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
+ struct scsi_qla_host *ha = to_qla_host(host);
+
+ switch (bsg_req->rqst_data.h_vendor.vendor_cmd[0]) {
+ case QLISCSI_VND_READ_FLASH:
+ return qla4xxx_read_flash(bsg_job);
+
+ case QLISCSI_VND_UPDATE_FLASH:
+ return qla4xxx_update_flash(bsg_job);
+
+ case QLISCSI_VND_GET_ACB_STATE:
+ return qla4xxx_get_acb_state(bsg_job);
+
+ case QLISCSI_VND_READ_NVRAM:
+ return qla4xxx_read_nvram(bsg_job);
+
+ case QLISCSI_VND_UPDATE_NVRAM:
+ return qla4xxx_update_nvram(bsg_job);
+
+ case QLISCSI_VND_RESTORE_DEFAULTS:
+ return qla4xxx_restore_defaults(bsg_job);
+
+ case QLISCSI_VND_GET_ACB:
+ return qla4xxx_bsg_get_acb(bsg_job);
+
+ default:
+ ql4_printk(KERN_ERR, ha, "%s: invalid BSG vendor command: "
+ "0x%x\n", __func__, bsg_req->msgcode);
+ bsg_reply->result = (DID_ERROR << 16);
+ bsg_reply->reply_payload_rcv_len = 0;
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+ return -ENOSYS;
+ }
+}
+
+/**
+ * qla4xxx_bsg_request - handle bsg request from ISCSI transport
+ * @job: iscsi_bsg_job to handle
+ */
+int qla4xxx_bsg_request(struct bsg_job *bsg_job)
+{
+ struct iscsi_bsg_request *bsg_req = bsg_job->request;
+ struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
+ struct scsi_qla_host *ha = to_qla_host(host);
+
+ switch (bsg_req->msgcode) {
+ case ISCSI_BSG_HST_VENDOR:
+ return qla4xxx_process_vendor_specific(bsg_job);
+
+ default:
+ ql4_printk(KERN_ERR, ha, "%s: invalid BSG command: 0x%x\n",
+ __func__, bsg_req->msgcode);
+ }
+
+ return -ENOSYS;
+}
diff --git a/drivers/scsi/qla4xxx/ql4_bsg.h b/drivers/scsi/qla4xxx/ql4_bsg.h
new file mode 100644
index 00000000000..c6a0364509f
--- /dev/null
+++ b/drivers/scsi/qla4xxx/ql4_bsg.h
@@ -0,0 +1,19 @@
+/*
+ * QLogic iSCSI HBA Driver
+ * Copyright (c) 2011 QLogic Corporation
+ *
+ * See LICENSE.qla4xxx for copyright and licensing details.
+ */
+#ifndef __QL4_BSG_H
+#define __QL4_BSG_H
+
+/* BSG Vendor specific commands */
+#define QLISCSI_VND_READ_FLASH 1
+#define QLISCSI_VND_UPDATE_FLASH 2
+#define QLISCSI_VND_GET_ACB_STATE 3
+#define QLISCSI_VND_READ_NVRAM 4
+#define QLISCSI_VND_UPDATE_NVRAM 5
+#define QLISCSI_VND_RESTORE_DEFAULTS 6
+#define QLISCSI_VND_GET_ACB 7
+
+#endif
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index 473c5c872b3..ace637bf254 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -25,6 +25,7 @@
#include <linux/interrupt.h>
#include <linux/mutex.h>
#include <linux/aer.h>
+#include <linux/bsg-lib.h>
#include <net/tcp.h>
#include <scsi/scsi.h>
@@ -33,9 +34,14 @@
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_transport.h>
#include <scsi/scsi_transport_iscsi.h>
+#include <scsi/scsi_bsg_iscsi.h>
+#include <scsi/scsi_netlink.h>
+#include <scsi/libiscsi.h>
#include "ql4_dbg.h"
#include "ql4_nx.h"
+#include "ql4_fw.h"
+#include "ql4_nvram.h"
#ifndef PCI_DEVICE_ID_QLOGIC_ISP4010
#define PCI_DEVICE_ID_QLOGIC_ISP4010 0x4010
@@ -109,7 +115,7 @@
#define MAX_BUSES 1
#define MAX_TARGETS MAX_DEV_DB_ENTRIES
#define MAX_LUNS 0xffff
-#define MAX_AEN_ENTRIES 256 /* should be > EXT_DEF_MAX_AEN_QUEUE */
+#define MAX_AEN_ENTRIES MAX_DEV_DB_ENTRIES
#define MAX_DDB_ENTRIES MAX_DEV_DB_ENTRIES
#define MAX_PDU_ENTRIES 32
#define INVALID_ENTRY 0xFFFF
@@ -166,6 +172,7 @@
#define RELOGIN_TOV 18
#define ISNS_DEREG_TOV 5
#define HBA_ONLINE_TOV 30
+#define DISABLE_ACB_TOV 30
#define MAX_RESET_HA_RETRIES 2
@@ -227,52 +234,12 @@ struct ql4_aen_log {
* Device Database (DDB) structure
*/
struct ddb_entry {
- struct list_head list; /* ddb list */
struct scsi_qla_host *ha;
struct iscsi_cls_session *sess;
struct iscsi_cls_conn *conn;
- atomic_t state; /* DDB State */
-
- unsigned long flags; /* DDB Flags */
-
uint16_t fw_ddb_index; /* DDB firmware index */
- uint16_t options;
uint32_t fw_ddb_device_state; /* F/W Device State -- see ql4_fw.h */
-
- uint32_t CmdSn;
- uint16_t target_session_id;
- uint16_t connection_id;
- uint16_t exe_throttle; /* Max mumber of cmds outstanding
- * simultaneously */
- uint16_t task_mgmt_timeout; /* Min time for task mgmt cmds to
- * complete */
- uint16_t default_relogin_timeout; /* Max time to wait for
- * relogin to complete */
- uint16_t tcp_source_port_num;
- uint32_t default_time2wait; /* Default Min time between
- * relogins (+aens) */
-
- atomic_t retry_relogin_timer; /* Min Time between relogins
- * (4000 only) */
- atomic_t relogin_timer; /* Max Time to wait for relogin to complete */
- atomic_t relogin_retry_count; /* Num of times relogin has been
- * retried */
-
- uint16_t port;
- uint32_t tpgt;
- uint8_t ip_addr[IP_ADDR_LEN];
- uint8_t iscsi_name[ISCSI_NAME_SIZE]; /* 72 x48 */
- uint8_t iscsi_alias[0x20];
- uint8_t isid[6];
- uint16_t iscsi_max_burst_len;
- uint16_t iscsi_max_outsnd_r2t;
- uint16_t iscsi_first_burst_len;
- uint16_t iscsi_max_rcv_data_seg_len;
- uint16_t iscsi_max_snd_data_seg_len;
-
- struct in6_addr remote_ipv6_addr;
- struct in6_addr link_local_ipv6_addr;
};
/*
@@ -293,8 +260,6 @@ struct ddb_entry {
#define DF_FO_MASKED 3
-#include "ql4_fw.h"
-#include "ql4_nvram.h"
struct ql82xx_hw_data {
/* Offsets for flash/nvram access (set to ~0 if not used). */
@@ -312,7 +277,10 @@ struct ql82xx_hw_data {
uint32_t flt_region_boot;
uint32_t flt_region_bootload;
uint32_t flt_region_fw;
- uint32_t reserved;
+
+ uint32_t flt_iscsi_param;
+ uint32_t flt_region_chap;
+ uint32_t flt_chap_size;
};
struct qla4_8xxx_legacy_intr_set {
@@ -357,6 +325,68 @@ struct isp_operations {
int (*get_sys_info) (struct scsi_qla_host *);
};
+/*qla4xxx ipaddress configuration details */
+struct ipaddress_config {
+ uint16_t ipv4_options;
+ uint16_t tcp_options;
+ uint16_t ipv4_vlan_tag;
+ uint8_t ipv4_addr_state;
+ uint8_t ip_address[IP_ADDR_LEN];
+ uint8_t subnet_mask[IP_ADDR_LEN];
+ uint8_t gateway[IP_ADDR_LEN];
+ uint32_t ipv6_options;
+ uint32_t ipv6_addl_options;
+ uint8_t ipv6_link_local_state;
+ uint8_t ipv6_addr0_state;
+ uint8_t ipv6_addr1_state;
+ uint8_t ipv6_default_router_state;
+ uint16_t ipv6_vlan_tag;
+ struct in6_addr ipv6_link_local_addr;
+ struct in6_addr ipv6_addr0;
+ struct in6_addr ipv6_addr1;
+ struct in6_addr ipv6_default_router_addr;
+ uint16_t eth_mtu_size;
+ uint16_t ipv4_port;
+ uint16_t ipv6_port;
+};
+
+#define QL4_CHAP_MAX_NAME_LEN 256
+#define QL4_CHAP_MAX_SECRET_LEN 100
+#define LOCAL_CHAP 0
+#define BIDI_CHAP 1
+
+struct ql4_chap_format {
+ u8 intr_chap_name[QL4_CHAP_MAX_NAME_LEN];
+ u8 intr_secret[QL4_CHAP_MAX_SECRET_LEN];
+ u8 target_chap_name[QL4_CHAP_MAX_NAME_LEN];
+ u8 target_secret[QL4_CHAP_MAX_SECRET_LEN];
+ u16 intr_chap_name_length;
+ u16 intr_secret_length;
+ u16 target_chap_name_length;
+ u16 target_secret_length;
+};
+
+struct ip_address_format {
+ u8 ip_type;
+ u8 ip_address[16];
+};
+
+struct ql4_conn_info {
+ u16 dest_port;
+ struct ip_address_format dest_ipaddr;
+ struct ql4_chap_format chap;
+};
+
+struct ql4_boot_session_info {
+ u8 target_name[224];
+ struct ql4_conn_info conn_list[1];
+};
+
+struct ql4_boot_tgt_info {
+ struct ql4_boot_session_info boot_pri_sess;
+ struct ql4_boot_session_info boot_sec_sess;
+};
+
/*
* Linux Host Adapter structure
*/
@@ -451,10 +481,6 @@ struct scsi_qla_host {
/* --- From Init_FW --- */
/* init_cb_t *init_cb; */
uint16_t firmware_options;
- uint16_t tcp_options;
- uint8_t ip_address[IP_ADDR_LEN];
- uint8_t subnet_mask[IP_ADDR_LEN];
- uint8_t gateway[IP_ADDR_LEN];
uint8_t alias[32];
uint8_t name_string[256];
uint8_t heartbeat_interval;
@@ -462,7 +488,7 @@ struct scsi_qla_host {
/* --- From FlashSysInfo --- */
uint8_t my_mac[MAC_ADDR_LEN];
uint8_t serial_number[16];
-
+ uint16_t port_num;
/* --- From GetFwState --- */
uint32_t firmware_state;
uint32_t addl_fw_state;
@@ -524,31 +550,13 @@ struct scsi_qla_host {
volatile uint8_t mbox_status_count;
volatile uint32_t mbox_status[MBOX_REG_COUNT];
- /* local device database list (contains internal ddb entries) */
- struct list_head ddb_list;
-
- /* Map ddb_list entry by FW ddb index */
+ /* FW ddb index map */
struct ddb_entry *fw_ddb_index_map[MAX_DDB_ENTRIES];
/* Saved srb for status continuation entry processing */
struct srb *status_srb;
- /* IPv6 support info from InitFW */
uint8_t acb_version;
- uint8_t ipv4_addr_state;
- uint16_t ipv4_options;
-
- uint32_t resvd2;
- uint32_t ipv6_options;
- uint32_t ipv6_addl_options;
- uint8_t ipv6_link_local_state;
- uint8_t ipv6_addr0_state;
- uint8_t ipv6_addr1_state;
- uint8_t ipv6_default_router_state;
- struct in6_addr ipv6_link_local_addr;
- struct in6_addr ipv6_addr0;
- struct in6_addr ipv6_addr1;
- struct in6_addr ipv6_default_router_addr;
/* qla82xx specific fields */
struct device_reg_82xx __iomem *qla4_8xxx_reg; /* Base I/O address */
@@ -584,6 +592,11 @@ struct scsi_qla_host {
struct completion mbx_intr_comp;
+ struct ipaddress_config ip_config;
+ struct iscsi_iface *iface_ipv4;
+ struct iscsi_iface *iface_ipv6_0;
+ struct iscsi_iface *iface_ipv6_1;
+
/* --- From About Firmware --- */
uint16_t iscsi_major;
uint16_t iscsi_minor;
@@ -591,16 +604,60 @@ struct scsi_qla_host {
uint16_t bootload_minor;
uint16_t bootload_patch;
uint16_t bootload_build;
+
+ uint32_t flash_state;
+#define QLFLASH_WAITING 0
+#define QLFLASH_READING 1
+#define QLFLASH_WRITING 2
+ struct dma_pool *chap_dma_pool;
+ uint8_t *chap_list; /* CHAP table cache */
+ struct mutex chap_sem;
+#define CHAP_DMA_BLOCK_SIZE 512
+ struct workqueue_struct *task_wq;
+ unsigned long ddb_idx_map[MAX_DDB_ENTRIES / BITS_PER_LONG];
+#define SYSFS_FLAG_FW_SEL_BOOT 2
+ struct iscsi_boot_kset *boot_kset;
+ struct ql4_boot_tgt_info boot_tgt;
+ uint16_t phy_port_num;
+ uint16_t phy_port_cnt;
+ uint16_t iscsi_pci_func_cnt;
+ uint8_t model_name[16];
+ struct completion disable_acb_comp;
+};
+
+struct ql4_task_data {
+ struct scsi_qla_host *ha;
+ uint8_t iocb_req_cnt;
+ dma_addr_t data_dma;
+ void *req_buffer;
+ dma_addr_t req_dma;
+ uint32_t req_len;
+ void *resp_buffer;
+ dma_addr_t resp_dma;
+ uint32_t resp_len;
+ struct iscsi_task *task;
+ struct passthru_status sts;
+ struct work_struct task_work;
+};
+
+struct qla_endpoint {
+ struct Scsi_Host *host;
+ struct sockaddr dst_addr;
+};
+
+struct qla_conn {
+ struct qla_endpoint *qla_ep;
};
static inline int is_ipv4_enabled(struct scsi_qla_host *ha)
{
- return ((ha->ipv4_options & IPOPT_IPv4_PROTOCOL_ENABLE) != 0);
+ return ((ha->ip_config.ipv4_options & IPOPT_IPV4_PROTOCOL_ENABLE) != 0);
}
static inline int is_ipv6_enabled(struct scsi_qla_host *ha)
{
- return ((ha->ipv6_options & IPV6_OPT_IPV6_PROTOCOL_ENABLE) != 0);
+ return ((ha->ip_config.ipv6_options &
+ IPV6_OPT_IPV6_PROTOCOL_ENABLE) != 0);
}
static inline int is_qla4010(struct scsi_qla_host *ha)
@@ -618,6 +675,11 @@ static inline int is_qla4032(struct scsi_qla_host *ha)
return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP4032;
}
+static inline int is_qla40XX(struct scsi_qla_host *ha)
+{
+ return is_qla4032(ha) || is_qla4022(ha) || is_qla4010(ha);
+}
+
static inline int is_qla8022(struct scsi_qla_host *ha)
{
return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8022;
@@ -640,7 +702,7 @@ static inline int adapter_up(struct scsi_qla_host *ha)
static inline struct scsi_qla_host* to_qla_host(struct Scsi_Host *shost)
{
- return (struct scsi_qla_host *)shost->hostdata;
+ return (struct scsi_qla_host *)iscsi_host_priv(shost);
}
static inline void __iomem* isp_semaphore(struct scsi_qla_host *ha)
@@ -760,6 +822,16 @@ static inline void ql4xxx_unlock_drvr(struct scsi_qla_host *a)
ql4xxx_sem_unlock(a, QL4022_DRVR_SEM_MASK);
}
+static inline int ql4xxx_reset_active(struct scsi_qla_host *ha)
+{
+ return test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) ||
+ test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
+ test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags) ||
+ test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
+ test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags) ||
+ test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags);
+
+}
/*---------------------------------------------------------------------------*/
/* Defines for qla4xxx_initialize_adapter() and qla4xxx_recover_adapter() */
diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h
index 01082aa7709..cbd5a20dbbd 100644
--- a/drivers/scsi/qla4xxx/ql4_fw.h
+++ b/drivers/scsi/qla4xxx/ql4_fw.h
@@ -146,6 +146,13 @@ struct isp_reg {
#define QL4022_NVRAM_SEM_MASK (QL4022_RESOURCE_MASK_BASE_CODE << (10+16))
#define QL4022_FLASH_SEM_MASK (QL4022_RESOURCE_MASK_BASE_CODE << (13+16))
+/* nvram address for 4032 */
+#define NVRAM_PORT0_BOOT_MODE 0x03b1
+#define NVRAM_PORT0_BOOT_PRI_TGT 0x03b2
+#define NVRAM_PORT0_BOOT_SEC_TGT 0x03bb
+#define NVRAM_PORT1_BOOT_MODE 0x07b1
+#define NVRAM_PORT1_BOOT_PRI_TGT 0x07b2
+#define NVRAM_PORT1_BOOT_SEC_TGT 0x07bb
/* Page # defines for 4022 */
@@ -194,6 +201,9 @@ static inline uint32_t clr_rmask(uint32_t val)
/* ISP 4022 nvram definitions */
#define NVR_WRITE_ENABLE 0x00000010 /* 4022 */
+#define QL4010_NVRAM_SIZE 0x200
+#define QL40X2_NVRAM_SIZE 0x800
+
/* ISP port_status definitions */
/* ISP Semaphore definitions */
@@ -241,6 +251,8 @@ union external_hw_config_reg {
#define FA_BOOT_CODE_ADDR_82 0x20000
#define FA_RISC_CODE_ADDR_82 0x40000
#define FA_GOLD_RISC_CODE_ADDR_82 0x80000
+#define FA_FLASH_ISCSI_CHAP 0x540000
+#define FA_FLASH_CHAP_SIZE 0xC0000
/* Flash Description Table */
struct qla_fdt_layout {
@@ -296,8 +308,11 @@ struct qla_flt_header {
#define FLT_REG_FLT 0x1c
#define FLT_REG_BOOTLOAD_82 0x72
#define FLT_REG_FW_82 0x74
+#define FLT_REG_FW_82_1 0x97
#define FLT_REG_GOLD_FW_82 0x75
#define FLT_REG_BOOT_CODE_82 0x78
+#define FLT_REG_ISCSI_PARAM 0x65
+#define FLT_REG_ISCSI_CHAP 0x63
struct qla_flt_region {
uint32_t code;
@@ -331,9 +346,11 @@ struct qla_flt_region {
#define MBOX_CMD_WRITE_FLASH 0x0025
#define MBOX_CMD_READ_FLASH 0x0026
#define MBOX_CMD_CLEAR_DATABASE_ENTRY 0x0031
+#define MBOX_CMD_CONN_OPEN 0x0074
#define MBOX_CMD_CONN_CLOSE_SESS_LOGOUT 0x0056
-#define LOGOUT_OPTION_CLOSE_SESSION 0x01
-#define LOGOUT_OPTION_RELOGIN 0x02
+#define LOGOUT_OPTION_CLOSE_SESSION 0x0002
+#define LOGOUT_OPTION_RELOGIN 0x0004
+#define LOGOUT_OPTION_FREE_DDB 0x0008
#define MBOX_CMD_EXECUTE_IOCB_A64 0x005A
#define MBOX_CMD_INITIALIZE_FIRMWARE 0x0060
#define MBOX_CMD_GET_INIT_FW_CTRL_BLOCK 0x0061
@@ -342,12 +359,15 @@ struct qla_flt_region {
#define MBOX_CMD_GET_DATABASE_ENTRY 0x0064
#define DDB_DS_UNASSIGNED 0x00
#define DDB_DS_NO_CONNECTION_ACTIVE 0x01
+#define DDB_DS_DISCOVERY 0x02
#define DDB_DS_SESSION_ACTIVE 0x04
#define DDB_DS_SESSION_FAILED 0x06
#define DDB_DS_LOGIN_IN_PROCESS 0x07
#define MBOX_CMD_GET_FW_STATE 0x0069
#define MBOX_CMD_GET_INIT_FW_CTRL_BLOCK_DEFAULTS 0x006A
#define MBOX_CMD_GET_SYS_INFO 0x0078
+#define MBOX_CMD_GET_NVRAM 0x0078 /* For 40xx */
+#define MBOX_CMD_SET_NVRAM 0x0079 /* For 40xx */
#define MBOX_CMD_RESTORE_FACTORY_DEFAULTS 0x0087
#define MBOX_CMD_SET_ACB 0x0088
#define MBOX_CMD_GET_ACB 0x0089
@@ -375,7 +395,10 @@ struct qla_flt_region {
#define FW_ADDSTATE_DHCPv4_LEASE_EXPIRED 0x0008
#define FW_ADDSTATE_LINK_UP 0x0010
#define FW_ADDSTATE_ISNS_SVC_ENABLED 0x0020
+
#define MBOX_CMD_GET_DATABASE_ENTRY_DEFAULTS 0x006B
+#define IPV6_DEFAULT_DDB_ENTRY 0x0001
+
#define MBOX_CMD_CONN_OPEN_SESS_LOGIN 0x0074
#define MBOX_CMD_GET_CRASH_RECORD 0x0076 /* 4010 only */
#define MBOX_CMD_GET_CONN_EVENT_LOG 0x0077
@@ -434,6 +457,14 @@ struct qla_flt_region {
#define ACB_STATE_VALID 0x05
#define ACB_STATE_DISABLING 0x06
+/* FLASH offsets */
+#define FLASH_SEGMENT_IFCB 0x04000000
+
+#define FLASH_OPT_RMW_HOLD 0
+#define FLASH_OPT_RMW_INIT 1
+#define FLASH_OPT_COMMIT 2
+#define FLASH_OPT_RMW_COMMIT 3
+
/*************************************************************************/
/* Host Adapter Initialization Control Block (from host) */
@@ -455,7 +486,8 @@ struct addr_ctrl_blk {
uint8_t res0; /* 07 */
uint16_t eth_mtu_size; /* 08-09 */
uint16_t add_fw_options; /* 0A-0B */
-#define SERIALIZE_TASK_MGMT 0x0400
+#define ADFWOPT_SERIALIZE_TASK_MGMT 0x0400
+#define ADFWOPT_AUTOCONN_DISABLE 0x0002
uint8_t hb_interval; /* 0C */
uint8_t inst_num; /* 0D */
@@ -473,8 +505,10 @@ struct addr_ctrl_blk {
uint16_t iscsi_opts; /* 30-31 */
uint16_t ipv4_tcp_opts; /* 32-33 */
+#define TCPOPT_DHCP_ENABLE 0x0200
uint16_t ipv4_ip_opts; /* 34-35 */
-#define IPOPT_IPv4_PROTOCOL_ENABLE 0x8000
+#define IPOPT_IPV4_PROTOCOL_ENABLE 0x8000
+#define IPOPT_VLAN_TAGGING_ENABLE 0x2000
uint16_t iscsi_max_pdu_size; /* 36-37 */
uint8_t ipv4_tos; /* 38 */
@@ -526,6 +560,7 @@ struct addr_ctrl_blk {
uint16_t ipv6_port; /* 204-205 */
uint16_t ipv6_opts; /* 206-207 */
#define IPV6_OPT_IPV6_PROTOCOL_ENABLE 0x8000
+#define IPV6_OPT_VLAN_TAGGING_ENABLE 0x2000
uint16_t ipv6_addtl_opts; /* 208-209 */
#define IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE 0x0002 /* Pri ACB
@@ -574,13 +609,105 @@ struct init_fw_ctrl_blk {
/* struct addr_ctrl_blk sec;*/
};
+#define PRIMARI_ACB 0
+#define SECONDARY_ACB 1
+
+struct addr_ctrl_blk_def {
+ uint8_t reserved1[1]; /* 00 */
+ uint8_t control; /* 01 */
+ uint8_t reserved2[11]; /* 02-0C */
+ uint8_t inst_num; /* 0D */
+ uint8_t reserved3[34]; /* 0E-2F */
+ uint16_t iscsi_opts; /* 30-31 */
+ uint16_t ipv4_tcp_opts; /* 32-33 */
+ uint16_t ipv4_ip_opts; /* 34-35 */
+ uint16_t iscsi_max_pdu_size; /* 36-37 */
+ uint8_t ipv4_tos; /* 38 */
+ uint8_t ipv4_ttl; /* 39 */
+ uint8_t reserved4[2]; /* 3A-3B */
+ uint16_t def_timeout; /* 3C-3D */
+ uint16_t iscsi_fburst_len; /* 3E-3F */
+ uint8_t reserved5[4]; /* 40-43 */
+ uint16_t iscsi_max_outstnd_r2t; /* 44-45 */
+ uint8_t reserved6[2]; /* 46-47 */
+ uint16_t ipv4_port; /* 48-49 */
+ uint16_t iscsi_max_burst_len; /* 4A-4B */
+ uint8_t reserved7[4]; /* 4C-4F */
+ uint8_t ipv4_addr[4]; /* 50-53 */
+ uint16_t ipv4_vlan_tag; /* 54-55 */
+ uint8_t ipv4_addr_state; /* 56 */
+ uint8_t ipv4_cacheid; /* 57 */
+ uint8_t reserved8[8]; /* 58-5F */
+ uint8_t ipv4_subnet[4]; /* 60-63 */
+ uint8_t reserved9[12]; /* 64-6F */
+ uint8_t ipv4_gw_addr[4]; /* 70-73 */
+ uint8_t reserved10[84]; /* 74-C7 */
+ uint8_t abort_timer; /* C8 */
+ uint8_t ipv4_tcp_wsf; /* C9 */
+ uint8_t reserved11[10]; /* CA-D3 */
+ uint8_t ipv4_dhcp_vid_len; /* D4 */
+ uint8_t ipv4_dhcp_vid[11]; /* D5-DF */
+ uint8_t reserved12[20]; /* E0-F3 */
+ uint8_t ipv4_dhcp_alt_cid_len; /* F4 */
+ uint8_t ipv4_dhcp_alt_cid[11]; /* F5-FF */
+ uint8_t iscsi_name[224]; /* 100-1DF */
+ uint8_t reserved13[32]; /* 1E0-1FF */
+ uint32_t cookie; /* 200-203 */
+ uint16_t ipv6_port; /* 204-205 */
+ uint16_t ipv6_opts; /* 206-207 */
+ uint16_t ipv6_addtl_opts; /* 208-209 */
+ uint16_t ipv6_tcp_opts; /* 20A-20B */
+ uint8_t ipv6_tcp_wsf; /* 20C */
+ uint16_t ipv6_flow_lbl; /* 20D-20F */
+ uint8_t ipv6_dflt_rtr_addr[16]; /* 210-21F */
+ uint16_t ipv6_vlan_tag; /* 220-221 */
+ uint8_t ipv6_lnk_lcl_addr_state; /* 222 */
+ uint8_t ipv6_addr0_state; /* 223 */
+ uint8_t ipv6_addr1_state; /* 224 */
+ uint8_t ipv6_dflt_rtr_state; /* 225 */
+ uint8_t ipv6_traffic_class; /* 226 */
+ uint8_t ipv6_hop_limit; /* 227 */
+ uint8_t ipv6_if_id[8]; /* 228-22F */
+ uint8_t ipv6_addr0[16]; /* 230-23F */
+ uint8_t ipv6_addr1[16]; /* 240-24F */
+ uint32_t ipv6_nd_reach_time; /* 250-253 */
+ uint32_t ipv6_nd_rexmit_timer; /* 254-257 */
+ uint32_t ipv6_nd_stale_timeout; /* 258-25B */
+ uint8_t ipv6_dup_addr_detect_count; /* 25C */
+ uint8_t ipv6_cache_id; /* 25D */
+ uint8_t reserved14[18]; /* 25E-26F */
+ uint32_t ipv6_gw_advrt_mtu; /* 270-273 */
+ uint8_t reserved15[140]; /* 274-2FF */
+};
+
/*************************************************************************/
+#define MAX_CHAP_ENTRIES_40XX 128
+#define MAX_CHAP_ENTRIES_82XX 1024
+#define MAX_RESRV_CHAP_IDX 3
+#define FLASH_CHAP_OFFSET 0x06000000
+
+struct ql4_chap_table {
+ uint16_t link;
+ uint8_t flags;
+ uint8_t secret_len;
+#define MIN_CHAP_SECRET_LEN 12
+#define MAX_CHAP_SECRET_LEN 100
+ uint8_t secret[MAX_CHAP_SECRET_LEN];
+#define MAX_CHAP_NAME_LEN 256
+ uint8_t name[MAX_CHAP_NAME_LEN];
+ uint16_t reserved;
+#define CHAP_VALID_COOKIE 0x4092
+#define CHAP_INVALID_COOKIE 0xFFEE
+ uint16_t cookie;
+};
+
struct dev_db_entry {
uint16_t options; /* 00-01 */
#define DDB_OPT_DISC_SESSION 0x10
#define DDB_OPT_TARGET 0x02 /* device is a target */
#define DDB_OPT_IPV6_DEVICE 0x100
+#define DDB_OPT_AUTO_SENDTGTS_DISABLE 0x40
#define DDB_OPT_IPV6_NULL_LINK_LOCAL 0x800 /* post connection */
#define DDB_OPT_IPV6_FW_DEFINED_LINK_LOCAL 0x800 /* pre connection */
@@ -591,6 +718,7 @@ struct dev_db_entry {
uint16_t tcp_options; /* 0A-0B */
uint16_t ip_options; /* 0C-0D */
uint16_t iscsi_max_rcv_data_seg_len; /* 0E-0F */
+#define BYTE_UNITS 512
uint32_t res1; /* 10-13 */
uint16_t iscsi_max_snd_data_seg_len; /* 14-15 */
uint16_t iscsi_first_burst_len; /* 16-17 */
@@ -627,7 +755,10 @@ struct dev_db_entry {
uint8_t tcp_rcv_wsf; /* 1C7 */
uint32_t stat_sn; /* 1C8-1CB */
uint32_t exp_stat_sn; /* 1CC-1CF */
- uint8_t res6[0x30]; /* 1D0-1FF */
+ uint8_t res6[0x2b]; /* 1D0-1FB */
+#define DDB_VALID_COOKIE 0x9034
+ uint16_t cookie; /* 1FC-1FD */
+ uint16_t len; /* 1FE-1FF */
};
/*************************************************************************/
@@ -639,6 +770,14 @@ struct dev_db_entry {
#define FLASH_EOF_OFFSET (FLASH_DEFAULTBLOCKSIZE-8) /* 4 bytes
* for EOF
* signature */
+#define FLASH_RAW_ACCESS_ADDR 0x8e000000
+
+#define BOOT_PARAM_OFFSET_PORT0 0x3b0
+#define BOOT_PARAM_OFFSET_PORT1 0x7b0
+
+#define FLASH_OFFSET_DB_INFO 0x05000000
+#define FLASH_OFFSET_DB_END (FLASH_OFFSET_DB_INFO + 0x7fff)
+
struct sys_info_phys_addr {
uint8_t address[6]; /* 00-05 */
@@ -774,6 +913,7 @@ struct qla4_header {
uint8_t entryStatus;
uint8_t systemDefined;
+#define SD_ISCSI_PDU 0x01
uint8_t entryCount;
/* SyetemDefined definition */
@@ -931,21 +1071,22 @@ struct passthru0 {
struct qla4_header hdr; /* 00-03 */
uint32_t handle; /* 04-07 */
uint16_t target; /* 08-09 */
- uint16_t connectionID; /* 0A-0B */
+ uint16_t connection_id; /* 0A-0B */
#define ISNS_DEFAULT_SERVER_CONN_ID ((uint16_t)0x8000)
- uint16_t controlFlags; /* 0C-0D */
+ uint16_t control_flags; /* 0C-0D */
#define PT_FLAG_ETHERNET_FRAME 0x8000
#define PT_FLAG_ISNS_PDU 0x8000
#define PT_FLAG_SEND_BUFFER 0x0200
#define PT_FLAG_WAIT_4_RESPONSE 0x0100
+#define PT_FLAG_ISCSI_PDU 0x1000
uint16_t timeout; /* 0E-0F */
#define PT_DEFAULT_TIMEOUT 30 /* seconds */
- struct data_seg_a64 outDataSeg64; /* 10-1B */
+ struct data_seg_a64 out_dsd; /* 10-1B */
uint32_t res1; /* 1C-1F */
- struct data_seg_a64 inDataSeg64; /* 20-2B */
+ struct data_seg_a64 in_dsd; /* 20-2B */
uint8_t res2[20]; /* 2C-3F */
};
@@ -978,4 +1119,43 @@ struct response {
#define RESPONSE_PROCESSED 0xDEADDEAD /* Signature */
};
+struct ql_iscsi_stats {
+ uint8_t reserved1[656]; /* 0000-028F */
+ uint32_t tx_cmd_pdu; /* 0290-0293 */
+ uint32_t tx_resp_pdu; /* 0294-0297 */
+ uint32_t rx_cmd_pdu; /* 0298-029B */
+ uint32_t rx_resp_pdu; /* 029C-029F */
+
+ uint64_t tx_data_octets; /* 02A0-02A7 */
+ uint64_t rx_data_octets; /* 02A8-02AF */
+
+ uint32_t hdr_digest_err; /* 02B0–02B3 */
+ uint32_t data_digest_err; /* 02B4–02B7 */
+ uint32_t conn_timeout_err; /* 02B8–02BB */
+ uint32_t framing_err; /* 02BC–02BF */
+
+ uint32_t tx_nopout_pdus; /* 02C0–02C3 */
+ uint32_t tx_scsi_cmd_pdus; /* 02C4–02C7 */
+ uint32_t tx_tmf_cmd_pdus; /* 02C8–02CB */
+ uint32_t tx_login_cmd_pdus; /* 02CC–02CF */
+ uint32_t tx_text_cmd_pdus; /* 02D0–02D3 */
+ uint32_t tx_scsi_write_pdus; /* 02D4–02D7 */
+ uint32_t tx_logout_cmd_pdus; /* 02D8–02DB */
+ uint32_t tx_snack_req_pdus; /* 02DC–02DF */
+
+ uint32_t rx_nopin_pdus; /* 02E0–02E3 */
+ uint32_t rx_scsi_resp_pdus; /* 02E4–02E7 */
+ uint32_t rx_tmf_resp_pdus; /* 02E8–02EB */
+ uint32_t rx_login_resp_pdus; /* 02EC–02EF */
+ uint32_t rx_text_resp_pdus; /* 02F0–02F3 */
+ uint32_t rx_scsi_read_pdus; /* 02F4–02F7 */
+ uint32_t rx_logout_resp_pdus; /* 02F8–02FB */
+
+ uint32_t rx_r2t_pdus; /* 02FC–02FF */
+ uint32_t rx_async_pdus; /* 0300–0303 */
+ uint32_t rx_reject_pdus; /* 0304–0307 */
+
+ uint8_t reserved2[264]; /* 0x0308 - 0x040F */
+};
+
#endif /* _QLA4X_FW_H */
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
index a53a256c1f8..160db9d5ea2 100644
--- a/drivers/scsi/qla4xxx/ql4_glbl.h
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -12,20 +12,15 @@ struct iscsi_cls_conn;
int qla4xxx_hw_reset(struct scsi_qla_host *ha);
int ql4xxx_lock_drvr_wait(struct scsi_qla_host *a);
-int qla4xxx_send_tgts(struct scsi_qla_host *ha, char *ip, uint16_t port);
int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb *srb);
-int qla4xxx_initialize_adapter(struct scsi_qla_host *ha,
- uint8_t renew_ddb_list);
+int qla4xxx_initialize_adapter(struct scsi_qla_host *ha);
int qla4xxx_soft_reset(struct scsi_qla_host *ha);
irqreturn_t qla4xxx_intr_handler(int irq, void *dev_id);
-void qla4xxx_free_ddb_list(struct scsi_qla_host *ha);
void qla4xxx_free_ddb(struct scsi_qla_host *ha, struct ddb_entry *ddb_entry);
void qla4xxx_process_aen(struct scsi_qla_host *ha, uint8_t process_aen);
int qla4xxx_get_dhcp_ip_address(struct scsi_qla_host *ha);
-int qla4xxx_relogin_device(struct scsi_qla_host *ha,
- struct ddb_entry *ddb_entry);
int qla4xxx_abort_task(struct scsi_qla_host *ha, struct srb *srb);
int qla4xxx_reset_lun(struct scsi_qla_host *ha, struct ddb_entry *ddb_entry,
int lun);
@@ -51,15 +46,24 @@ int qla4xxx_get_fwddb_entry(struct scsi_qla_host *ha,
uint16_t *connection_id);
int qla4xxx_set_ddb_entry(struct scsi_qla_host * ha, uint16_t fw_ddb_index,
- dma_addr_t fw_ddb_entry_dma);
-
-void qla4xxx_mark_device_missing(struct scsi_qla_host *ha,
- struct ddb_entry *ddb_entry);
+ dma_addr_t fw_ddb_entry_dma, uint32_t *mbx_sts);
+uint8_t qla4xxx_get_ifcb(struct scsi_qla_host *ha, uint32_t *mbox_cmd,
+ uint32_t *mbox_sts, dma_addr_t init_fw_cb_dma);
+int qla4xxx_conn_close_sess_logout(struct scsi_qla_host *ha,
+ uint16_t fw_ddb_index,
+ uint16_t connection_id,
+ uint16_t option);
+int qla4xxx_disable_acb(struct scsi_qla_host *ha);
+int qla4xxx_set_acb(struct scsi_qla_host *ha, uint32_t *mbox_cmd,
+ uint32_t *mbox_sts, dma_addr_t acb_dma);
+int qla4xxx_get_acb(struct scsi_qla_host *ha, dma_addr_t acb_dma,
+ uint32_t acb_type, uint32_t len);
+int qla4xxx_get_ip_state(struct scsi_qla_host *ha, uint32_t acb_idx,
+ uint32_t ip_idx, uint32_t *sts);
+void qla4xxx_mark_device_missing(struct iscsi_cls_session *cls_session);
u16 rd_nvram_word(struct scsi_qla_host *ha, int offset);
+u8 rd_nvram_byte(struct scsi_qla_host *ha, int offset);
void qla4xxx_get_crash_record(struct scsi_qla_host *ha);
-struct ddb_entry *qla4xxx_alloc_sess(struct scsi_qla_host *ha);
-int qla4xxx_add_sess(struct ddb_entry *);
-void qla4xxx_destroy_sess(struct ddb_entry *ddb_entry);
int qla4xxx_is_nvram_configuration_valid(struct scsi_qla_host *ha);
int qla4xxx_about_firmware(struct scsi_qla_host *ha);
void qla4xxx_interrupt_service_routine(struct scsi_qla_host *ha,
@@ -68,14 +72,13 @@ int qla4xxx_init_rings(struct scsi_qla_host *ha);
void qla4xxx_srb_compl(struct kref *ref);
struct srb *qla4xxx_del_from_active_array(struct scsi_qla_host *ha,
uint32_t index);
-int qla4xxx_reinitialize_ddb_list(struct scsi_qla_host *ha);
int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
uint32_t state, uint32_t conn_error);
void qla4xxx_dump_buffer(void *b, uint32_t size);
int qla4xxx_send_marker_iocb(struct scsi_qla_host *ha,
struct ddb_entry *ddb_entry, int lun, uint16_t mrkr_mod);
-int qla4_is_relogin_allowed(struct scsi_qla_host *ha, uint32_t conn_err);
-
+int qla4xxx_set_flash(struct scsi_qla_host *ha, dma_addr_t dma_addr,
+ uint32_t offset, uint32_t length, uint32_t options);
int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
uint8_t outCount, uint32_t *mbx_cmd, uint32_t *mbx_sts);
@@ -95,6 +98,11 @@ void qla4xxx_wake_dpc(struct scsi_qla_host *ha);
void qla4xxx_get_conn_event_log(struct scsi_qla_host *ha);
void qla4xxx_mailbox_premature_completion(struct scsi_qla_host *ha);
void qla4xxx_dump_registers(struct scsi_qla_host *ha);
+uint8_t qla4xxx_update_local_ifcb(struct scsi_qla_host *ha,
+ uint32_t *mbox_cmd,
+ uint32_t *mbox_sts,
+ struct addr_ctrl_blk *init_fw_cb,
+ dma_addr_t init_fw_cb_dma);
void qla4_8xxx_pci_config(struct scsi_qla_host *);
int qla4_8xxx_iospace_config(struct scsi_qla_host *ha);
@@ -134,6 +142,37 @@ int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha);
void qla4_8xxx_need_qsnt_handler(struct scsi_qla_host *ha);
void qla4_8xxx_clear_drv_active(struct scsi_qla_host *ha);
void qla4_8xxx_set_drv_active(struct scsi_qla_host *ha);
+int qla4xxx_conn_open(struct scsi_qla_host *ha, uint16_t fw_ddb_index);
+int qla4xxx_set_param_ddbentry(struct scsi_qla_host *ha,
+ struct ddb_entry *ddb_entry,
+ struct iscsi_cls_conn *cls_conn,
+ uint32_t *mbx_sts);
+int qla4xxx_session_logout_ddb(struct scsi_qla_host *ha,
+ struct ddb_entry *ddb_entry, int options);
+int qla4xxx_req_ddb_entry(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
+ uint32_t *mbx_sts);
+int qla4xxx_clear_ddb_entry(struct scsi_qla_host *ha, uint32_t fw_ddb_index);
+int qla4xxx_send_passthru0(struct iscsi_task *task);
+int qla4xxx_get_mgmt_data(struct scsi_qla_host *ha, uint16_t fw_ddb_index,
+ uint16_t stats_size, dma_addr_t stats_dma);
+void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
+ struct ddb_entry *ddb_entry);
+int qla4xxx_bootdb_by_index(struct scsi_qla_host *ha,
+ struct dev_db_entry *fw_ddb_entry,
+ dma_addr_t fw_ddb_entry_dma, uint16_t ddb_index);
+int qla4xxx_get_chap(struct scsi_qla_host *ha, char *username,
+ char *password, uint16_t idx);
+int qla4xxx_get_nvram(struct scsi_qla_host *ha, dma_addr_t nvram_dma,
+ uint32_t offset, uint32_t size);
+int qla4xxx_set_nvram(struct scsi_qla_host *ha, dma_addr_t nvram_dma,
+ uint32_t offset, uint32_t size);
+int qla4xxx_restore_factory_defaults(struct scsi_qla_host *ha,
+ uint32_t region, uint32_t field0,
+ uint32_t field1);
+
+/* BSG Functions */
+int qla4xxx_bsg_request(struct bsg_job *bsg_job);
+int qla4xxx_process_vendor_specific(struct bsg_job *bsg_job);
extern int ql4xextended_error_logging;
extern int ql4xdontresethba;
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index 42ed5db2d53..3075fbaef55 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -11,9 +11,6 @@
#include "ql4_dbg.h"
#include "ql4_inline.h"
-static struct ddb_entry *qla4xxx_alloc_ddb(struct scsi_qla_host *ha,
- uint32_t fw_ddb_index);
-
static void ql4xxx_set_mac_number(struct scsi_qla_host *ha)
{
uint32_t value;
@@ -48,41 +45,15 @@ static void ql4xxx_set_mac_number(struct scsi_qla_host *ha)
* @ha: pointer to host adapter structure.
* @ddb_entry: pointer to device database entry
*
- * This routine deallocates and unlinks the specified ddb_entry from the
- * adapter's
+ * This routine marks a DDB entry INVALID
**/
void qla4xxx_free_ddb(struct scsi_qla_host *ha,
struct ddb_entry *ddb_entry)
{
- /* Remove device entry from list */
- list_del_init(&ddb_entry->list);
-
/* Remove device pointer from index mapping arrays */
ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] =
(struct ddb_entry *) INVALID_ENTRY;
ha->tot_ddbs--;
-
- /* Free memory and scsi-ml struct for device entry */
- qla4xxx_destroy_sess(ddb_entry);
-}
-
-/**
- * qla4xxx_free_ddb_list - deallocate all ddbs
- * @ha: pointer to host adapter structure.
- *
- * This routine deallocates and removes all devices on the sppecified adapter.
- **/
-void qla4xxx_free_ddb_list(struct scsi_qla_host *ha)
-{
- struct list_head *ptr;
- struct ddb_entry *ddb_entry;
-
- while (!list_empty(&ha->ddb_list)) {
- ptr = ha->ddb_list.next;
- /* Free memory for device entry and remove */
- ddb_entry = list_entry(ptr, struct ddb_entry, list);
- qla4xxx_free_ddb(ha, ddb_entry);
- }
}
/**
@@ -236,38 +207,44 @@ qla4xxx_wait_for_ip_config(struct scsi_qla_host *ha)
FW_ADDSTATE_DHCPv4_LEASE_ACQUIRED) == 0)) {
ipv4_wait = 1;
}
- if (((ha->ipv6_addl_options &
- IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE) != 0) &&
- ((ha->ipv6_link_local_state == IP_ADDRSTATE_ACQUIRING) ||
- (ha->ipv6_addr0_state == IP_ADDRSTATE_ACQUIRING) ||
- (ha->ipv6_addr1_state == IP_ADDRSTATE_ACQUIRING))) {
+ if (((ha->ip_config.ipv6_addl_options &
+ IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE) != 0) &&
+ ((ha->ip_config.ipv6_link_local_state ==
+ IP_ADDRSTATE_ACQUIRING) ||
+ (ha->ip_config.ipv6_addr0_state ==
+ IP_ADDRSTATE_ACQUIRING) ||
+ (ha->ip_config.ipv6_addr1_state ==
+ IP_ADDRSTATE_ACQUIRING))) {
ipv6_wait = 1;
- if ((ha->ipv6_link_local_state ==
- IP_ADDRSTATE_PREFERRED) ||
- (ha->ipv6_addr0_state == IP_ADDRSTATE_PREFERRED) ||
- (ha->ipv6_addr1_state == IP_ADDRSTATE_PREFERRED)) {
+ if ((ha->ip_config.ipv6_link_local_state ==
+ IP_ADDRSTATE_PREFERRED) ||
+ (ha->ip_config.ipv6_addr0_state ==
+ IP_ADDRSTATE_PREFERRED) ||
+ (ha->ip_config.ipv6_addr1_state ==
+ IP_ADDRSTATE_PREFERRED)) {
DEBUG2(printk(KERN_INFO "scsi%ld: %s: "
"Preferred IP configured."
" Don't wait!\n", ha->host_no,
__func__));
ipv6_wait = 0;
}
- if (memcmp(&ha->ipv6_default_router_addr, ip_address,
- IPv6_ADDR_LEN) == 0) {
+ if (memcmp(&ha->ip_config.ipv6_default_router_addr,
+ ip_address, IPv6_ADDR_LEN) == 0) {
DEBUG2(printk(KERN_INFO "scsi%ld: %s: "
"No Router configured. "
"Don't wait!\n", ha->host_no,
__func__));
ipv6_wait = 0;
}
- if ((ha->ipv6_default_router_state ==
- IPV6_RTRSTATE_MANUAL) &&
- (ha->ipv6_link_local_state ==
- IP_ADDRSTATE_TENTATIVE) &&
- (memcmp(&ha->ipv6_link_local_addr,
- &ha->ipv6_default_router_addr, 4) == 0)) {
+ if ((ha->ip_config.ipv6_default_router_state ==
+ IPV6_RTRSTATE_MANUAL) &&
+ (ha->ip_config.ipv6_link_local_state ==
+ IP_ADDRSTATE_TENTATIVE) &&
+ (memcmp(&ha->ip_config.ipv6_link_local_addr,
+ &ha->ip_config.ipv6_default_router_addr, 4) ==
+ 0)) {
DEBUG2(printk("scsi%ld: %s: LinkLocal Router & "
"IP configured. Don't wait!\n",
ha->host_no, __func__));
@@ -279,11 +256,14 @@ qla4xxx_wait_for_ip_config(struct scsi_qla_host *ha)
"IP(s) \"", ha->host_no, __func__));
if (ipv4_wait)
DEBUG2(printk("IPv4 "));
- if (ha->ipv6_link_local_state == IP_ADDRSTATE_ACQUIRING)
+ if (ha->ip_config.ipv6_link_local_state ==
+ IP_ADDRSTATE_ACQUIRING)
DEBUG2(printk("IPv6LinkLocal "));
- if (ha->ipv6_addr0_state == IP_ADDRSTATE_ACQUIRING)
+ if (ha->ip_config.ipv6_addr0_state ==
+ IP_ADDRSTATE_ACQUIRING)
DEBUG2(printk("IPv6Addr0 "));
- if (ha->ipv6_addr1_state == IP_ADDRSTATE_ACQUIRING)
+ if (ha->ip_config.ipv6_addr1_state ==
+ IP_ADDRSTATE_ACQUIRING)
DEBUG2(printk("IPv6Addr1 "));
DEBUG2(printk("\"\n"));
}
@@ -466,486 +446,19 @@ static int qla4xxx_init_firmware(struct scsi_qla_host *ha)
return qla4xxx_get_firmware_status(ha);
}
-static struct ddb_entry* qla4xxx_get_ddb_entry(struct scsi_qla_host *ha,
- uint32_t fw_ddb_index,
- uint32_t *new_tgt)
-{
- struct dev_db_entry *fw_ddb_entry = NULL;
- dma_addr_t fw_ddb_entry_dma;
- struct ddb_entry *ddb_entry = NULL;
- int found = 0;
- uint32_t device_state;
-
- *new_tgt = 0;
- /* Make sure the dma buffer is valid */
- fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev,
- sizeof(*fw_ddb_entry),
- &fw_ddb_entry_dma, GFP_KERNEL);
- if (fw_ddb_entry == NULL) {
- DEBUG2(printk("scsi%ld: %s: Unable to allocate dma buffer.\n",
- ha->host_no, __func__));
- goto exit_get_ddb_entry_no_free;
- }
-
- if (qla4xxx_get_fwddb_entry(ha, fw_ddb_index, fw_ddb_entry,
- fw_ddb_entry_dma, NULL, NULL,
- &device_state, NULL, NULL, NULL) ==
- QLA_ERROR) {
- DEBUG2(printk("scsi%ld: %s: failed get_ddb_entry for "
- "fw_ddb_index %d\n", ha->host_no, __func__,
- fw_ddb_index));
- goto exit_get_ddb_entry;
- }
-
- /* Allocate DDB if not already allocated. */
- DEBUG2(printk("scsi%ld: %s: Looking for ddb[%d]\n", ha->host_no,
- __func__, fw_ddb_index));
- list_for_each_entry(ddb_entry, &ha->ddb_list, list) {
- if ((memcmp(ddb_entry->iscsi_name, fw_ddb_entry->iscsi_name,
- ISCSI_NAME_SIZE) == 0) &&
- (ddb_entry->tpgt ==
- le32_to_cpu(fw_ddb_entry->tgt_portal_grp)) &&
- (memcmp(ddb_entry->isid, fw_ddb_entry->isid,
- sizeof(ddb_entry->isid)) == 0)) {
- found++;
- break;
- }
- }
-
- /* if not found allocate new ddb */
- if (!found) {
- DEBUG2(printk("scsi%ld: %s: ddb[%d] not found - allocating "
- "new ddb\n", ha->host_no, __func__,
- fw_ddb_index));
- *new_tgt = 1;
- ddb_entry = qla4xxx_alloc_ddb(ha, fw_ddb_index);
- }
-
-exit_get_ddb_entry:
- dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), fw_ddb_entry,
- fw_ddb_entry_dma);
-
-exit_get_ddb_entry_no_free:
- return ddb_entry;
-}
-
-/**
- * qla4xxx_update_ddb_entry - update driver's internal ddb
- * @ha: pointer to host adapter structure.
- * @ddb_entry: pointer to device database structure to be filled
- * @fw_ddb_index: index of the ddb entry in fw ddb table
- *
- * This routine updates the driver's internal device database entry
- * with information retrieved from the firmware's device database
- * entry for the specified device. The ddb_entry->fw_ddb_index field
- * must be initialized prior to calling this routine
- *
- **/
-static int qla4xxx_update_ddb_entry(struct scsi_qla_host *ha,
- struct ddb_entry *ddb_entry,
- uint32_t fw_ddb_index)
-{
- struct dev_db_entry *fw_ddb_entry = NULL;
- dma_addr_t fw_ddb_entry_dma;
- int status = QLA_ERROR;
- uint32_t conn_err;
-
- if (ddb_entry == NULL) {
- DEBUG2(printk("scsi%ld: %s: ddb_entry is NULL\n", ha->host_no,
- __func__));
-
- goto exit_update_ddb_no_free;
- }
-
- /* Make sure the dma buffer is valid */
- fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev,
- sizeof(*fw_ddb_entry),
- &fw_ddb_entry_dma, GFP_KERNEL);
- if (fw_ddb_entry == NULL) {
- DEBUG2(printk("scsi%ld: %s: Unable to allocate dma buffer.\n",
- ha->host_no, __func__));
-
- goto exit_update_ddb_no_free;
- }
-
- if (qla4xxx_get_fwddb_entry(ha, fw_ddb_index, fw_ddb_entry,
- fw_ddb_entry_dma, NULL, NULL,
- &ddb_entry->fw_ddb_device_state, &conn_err,
- &ddb_entry->tcp_source_port_num,
- &ddb_entry->connection_id) ==
- QLA_ERROR) {
- DEBUG2(printk("scsi%ld: %s: failed get_ddb_entry for "
- "fw_ddb_index %d\n", ha->host_no, __func__,
- fw_ddb_index));
-
- goto exit_update_ddb;
- }
-
- status = QLA_SUCCESS;
- ddb_entry->options = le16_to_cpu(fw_ddb_entry->options);
- ddb_entry->target_session_id = le16_to_cpu(fw_ddb_entry->tsid);
- ddb_entry->task_mgmt_timeout =
- le16_to_cpu(fw_ddb_entry->def_timeout);
- ddb_entry->CmdSn = 0;
- ddb_entry->exe_throttle = le16_to_cpu(fw_ddb_entry->exec_throttle);
- ddb_entry->default_relogin_timeout =
- le16_to_cpu(fw_ddb_entry->def_timeout);
- ddb_entry->default_time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
-
- /* Update index in case it changed */
- ddb_entry->fw_ddb_index = fw_ddb_index;
- ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
-
- ddb_entry->port = le16_to_cpu(fw_ddb_entry->port);
- ddb_entry->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
- memcpy(ddb_entry->isid, fw_ddb_entry->isid, sizeof(ddb_entry->isid));
-
- memcpy(&ddb_entry->iscsi_name[0], &fw_ddb_entry->iscsi_name[0],
- min(sizeof(ddb_entry->iscsi_name),
- sizeof(fw_ddb_entry->iscsi_name)));
- memcpy(&ddb_entry->iscsi_alias[0], &fw_ddb_entry->iscsi_alias[0],
- min(sizeof(ddb_entry->iscsi_alias),
- sizeof(fw_ddb_entry->iscsi_alias)));
- memcpy(&ddb_entry->ip_addr[0], &fw_ddb_entry->ip_addr[0],
- min(sizeof(ddb_entry->ip_addr), sizeof(fw_ddb_entry->ip_addr)));
-
- ddb_entry->iscsi_max_burst_len = fw_ddb_entry->iscsi_max_burst_len;
- ddb_entry->iscsi_max_outsnd_r2t = fw_ddb_entry->iscsi_max_outsnd_r2t;
- ddb_entry->iscsi_first_burst_len = fw_ddb_entry->iscsi_first_burst_len;
- ddb_entry->iscsi_max_rcv_data_seg_len =
- fw_ddb_entry->iscsi_max_rcv_data_seg_len;
- ddb_entry->iscsi_max_snd_data_seg_len =
- fw_ddb_entry->iscsi_max_snd_data_seg_len;
-
- if (ddb_entry->options & DDB_OPT_IPV6_DEVICE) {
- memcpy(&ddb_entry->remote_ipv6_addr,
- fw_ddb_entry->ip_addr,
- min(sizeof(ddb_entry->remote_ipv6_addr),
- sizeof(fw_ddb_entry->ip_addr)));
- memcpy(&ddb_entry->link_local_ipv6_addr,
- fw_ddb_entry->link_local_ipv6_addr,
- min(sizeof(ddb_entry->link_local_ipv6_addr),
- sizeof(fw_ddb_entry->link_local_ipv6_addr)));
-
- DEBUG2(ql4_printk(KERN_INFO, ha, "%s: DDB[%d] State %04x"
- " ConnErr %08x IP %pI6 "
- ":%04d \"%s\"\n",
- __func__, fw_ddb_index,
- ddb_entry->fw_ddb_device_state,
- conn_err, fw_ddb_entry->ip_addr,
- le16_to_cpu(fw_ddb_entry->port),
- fw_ddb_entry->iscsi_name));
- } else
- DEBUG2(ql4_printk(KERN_INFO, ha, "%s: DDB[%d] State %04x"
- " ConnErr %08x IP %pI4 "
- ":%04d \"%s\"\n",
- __func__, fw_ddb_index,
- ddb_entry->fw_ddb_device_state,
- conn_err, fw_ddb_entry->ip_addr,
- le16_to_cpu(fw_ddb_entry->port),
- fw_ddb_entry->iscsi_name));
-exit_update_ddb:
- if (fw_ddb_entry)
- dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
- fw_ddb_entry, fw_ddb_entry_dma);
-
-exit_update_ddb_no_free:
- return status;
-}
-
-/**
- * qla4xxx_alloc_ddb - allocate device database entry
- * @ha: Pointer to host adapter structure.
- * @fw_ddb_index: Firmware's device database index
- *
- * This routine allocates a ddb_entry, ititializes some values, and
- * inserts it into the ddb list.
- **/
-static struct ddb_entry * qla4xxx_alloc_ddb(struct scsi_qla_host *ha,
- uint32_t fw_ddb_index)
+static void qla4xxx_set_model_info(struct scsi_qla_host *ha)
{
- struct ddb_entry *ddb_entry;
-
- DEBUG2(printk("scsi%ld: %s: fw_ddb_index [%d]\n", ha->host_no,
- __func__, fw_ddb_index));
-
- ddb_entry = qla4xxx_alloc_sess(ha);
- if (ddb_entry == NULL) {
- DEBUG2(printk("scsi%ld: %s: Unable to allocate memory "
- "to add fw_ddb_index [%d]\n",
- ha->host_no, __func__, fw_ddb_index));
- return ddb_entry;
- }
+ uint16_t board_id_string[8];
+ int i;
+ int size = sizeof(ha->nvram->isp4022.boardIdStr);
+ int offset = offsetof(struct eeprom_data, isp4022.boardIdStr) / 2;
- ddb_entry->fw_ddb_index = fw_ddb_index;
- atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
- atomic_set(&ddb_entry->relogin_timer, 0);
- atomic_set(&ddb_entry->relogin_retry_count, 0);
- atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
- list_add_tail(&ddb_entry->list, &ha->ddb_list);
- ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
- ha->tot_ddbs++;
-
- return ddb_entry;
-}
-
-/**
- * qla4_is_relogin_allowed - Are we allowed to login?
- * @ha: Pointer to host adapter structure.
- * @conn_err: Last connection error associated with the ddb
- *
- * This routine tests the given connection error to determine if
- * we are allowed to login.
- **/
-int qla4_is_relogin_allowed(struct scsi_qla_host *ha, uint32_t conn_err)
-{
- uint32_t err_code, login_rsp_sts_class;
- int relogin = 1;
-
- err_code = ((conn_err & 0x00ff0000) >> 16);
- login_rsp_sts_class = ((conn_err & 0x0000ff00) >> 8);
- if (err_code == 0x1c || err_code == 0x06) {
- DEBUG2(ql4_printk(KERN_INFO, ha,
- ": conn_err=0x%08x, send target completed"
- " or access denied failure\n", conn_err));
- relogin = 0;
- }
- if ((err_code == 0x08) && (login_rsp_sts_class == 0x02)) {
- /* Login Response PDU returned an error.
- Login Response Status in Error Code Detail
- indicates login should not be retried.*/
- DEBUG2(ql4_printk(KERN_INFO, ha,
- ": conn_err=0x%08x, do not retry relogin\n",
- conn_err));
- relogin = 0;
+ for (i = 0; i < (size / 2) ; i++) {
+ board_id_string[i] = rd_nvram_word(ha, offset);
+ offset += 1;
}
- return relogin;
-}
-
-static void qla4xxx_flush_AENS(struct scsi_qla_host *ha)
-{
- unsigned long wtime;
-
- /* Flush the 0x8014 AEN from the firmware as a result of
- * Auto connect. We are basically doing get_firmware_ddb()
- * to determine whether we need to log back in or not.
- * Trying to do a set ddb before we have processed 0x8014
- * will result in another set_ddb() for the same ddb. In other
- * words there will be stale entries in the aen_q.
- */
- wtime = jiffies + (2 * HZ);
- do {
- if (qla4xxx_get_firmware_state(ha) == QLA_SUCCESS)
- if (ha->firmware_state & (BIT_2 | BIT_0))
- return;
-
- if (test_and_clear_bit(DPC_AEN, &ha->dpc_flags))
- qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
-
- msleep(1000);
- } while (!time_after_eq(jiffies, wtime));
-}
-
-/**
- * qla4xxx_build_ddb_list - builds driver ddb list
- * @ha: Pointer to host adapter structure.
- *
- * This routine searches for all valid firmware ddb entries and builds
- * an internal ddb list. Ddbs that are considered valid are those with
- * a device state of SESSION_ACTIVE.
- * A relogin (set_ddb) is issued for DDBs that are not online.
- **/
-static int qla4xxx_build_ddb_list(struct scsi_qla_host *ha)
-{
- int status = QLA_ERROR;
- uint32_t fw_ddb_index = 0;
- uint32_t next_fw_ddb_index = 0;
- uint32_t ddb_state;
- uint32_t conn_err;
- struct ddb_entry *ddb_entry;
- struct dev_db_entry *fw_ddb_entry = NULL;
- dma_addr_t fw_ddb_entry_dma;
- uint32_t ipv6_device;
- uint32_t new_tgt;
-
- qla4xxx_flush_AENS(ha);
-
- fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
- &fw_ddb_entry_dma, GFP_KERNEL);
- if (fw_ddb_entry == NULL) {
- DEBUG2(ql4_printk(KERN_INFO, ha, "%s: DMA alloc failed\n",
- __func__));
-
- goto exit_build_ddb_list_no_free;
- }
-
- ql4_printk(KERN_INFO, ha, "Initializing DDBs ...\n");
- for (fw_ddb_index = 0; fw_ddb_index < MAX_DDB_ENTRIES;
- fw_ddb_index = next_fw_ddb_index) {
- /* First, let's see if a device exists here */
- if (qla4xxx_get_fwddb_entry(ha, fw_ddb_index, fw_ddb_entry,
- 0, NULL, &next_fw_ddb_index,
- &ddb_state, &conn_err,
- NULL, NULL) ==
- QLA_ERROR) {
- DEBUG2(printk("scsi%ld: %s: get_ddb_entry, "
- "fw_ddb_index %d failed", ha->host_no,
- __func__, fw_ddb_index));
- goto exit_build_ddb_list;
- }
-
- DEBUG2(printk("scsi%ld: %s: Getting DDB[%d] ddbstate=0x%x, "
- "next_fw_ddb_index=%d.\n", ha->host_no, __func__,
- fw_ddb_index, ddb_state, next_fw_ddb_index));
-
- /* Issue relogin, if necessary. */
- if (ddb_state == DDB_DS_SESSION_FAILED ||
- ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) {
- /* Try and login to device */
- DEBUG2(printk("scsi%ld: %s: Login to DDB[%d]\n",
- ha->host_no, __func__, fw_ddb_index));
- ipv6_device = le16_to_cpu(fw_ddb_entry->options) &
- DDB_OPT_IPV6_DEVICE;
- if (qla4_is_relogin_allowed(ha, conn_err) &&
- ((!ipv6_device &&
- *((uint32_t *)fw_ddb_entry->ip_addr))
- || ipv6_device)) {
- qla4xxx_set_ddb_entry(ha, fw_ddb_index, 0);
- if (qla4xxx_get_fwddb_entry(ha, fw_ddb_index,
- NULL, 0, NULL,
- &next_fw_ddb_index,
- &ddb_state, &conn_err,
- NULL, NULL)
- == QLA_ERROR) {
- DEBUG2(printk("scsi%ld: %s:"
- "get_ddb_entry %d failed\n",
- ha->host_no,
- __func__, fw_ddb_index));
- goto exit_build_ddb_list;
- }
- }
- }
-
- if (ddb_state != DDB_DS_SESSION_ACTIVE)
- goto next_one;
- /*
- * if fw_ddb with session active state found,
- * add to ddb_list
- */
- DEBUG2(printk("scsi%ld: %s: DDB[%d] added to list\n",
- ha->host_no, __func__, fw_ddb_index));
-
- /* Add DDB to internal our ddb list. */
- ddb_entry = qla4xxx_get_ddb_entry(ha, fw_ddb_index, &new_tgt);
- if (ddb_entry == NULL) {
- DEBUG2(printk("scsi%ld: %s: Unable to allocate memory "
- "for device at fw_ddb_index %d\n",
- ha->host_no, __func__, fw_ddb_index));
- goto exit_build_ddb_list;
- }
- /* Fill in the device structure */
- if (qla4xxx_update_ddb_entry(ha, ddb_entry, fw_ddb_index) ==
- QLA_ERROR) {
- ha->fw_ddb_index_map[fw_ddb_index] =
- (struct ddb_entry *)INVALID_ENTRY;
-
- DEBUG2(printk("scsi%ld: %s: update_ddb_entry failed "
- "for fw_ddb_index %d.\n",
- ha->host_no, __func__, fw_ddb_index));
- goto exit_build_ddb_list;
- }
-
-next_one:
- /* We know we've reached the last device when
- * next_fw_ddb_index is 0 */
- if (next_fw_ddb_index == 0)
- break;
- }
-
- status = QLA_SUCCESS;
- ql4_printk(KERN_INFO, ha, "DDB list done..\n");
-
-exit_build_ddb_list:
- dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), fw_ddb_entry,
- fw_ddb_entry_dma);
-
-exit_build_ddb_list_no_free:
- return status;
-}
-
-static int qla4xxx_initialize_ddb_list(struct scsi_qla_host *ha)
-{
- uint16_t fw_ddb_index;
- int status = QLA_SUCCESS;
-
- /* free the ddb list if is not empty */
- if (!list_empty(&ha->ddb_list))
- qla4xxx_free_ddb_list(ha);
-
- for (fw_ddb_index = 0; fw_ddb_index < MAX_DDB_ENTRIES; fw_ddb_index++)
- ha->fw_ddb_index_map[fw_ddb_index] =
- (struct ddb_entry *)INVALID_ENTRY;
-
- ha->tot_ddbs = 0;
-
- /* Perform device discovery and build ddb list. */
- status = qla4xxx_build_ddb_list(ha);
-
- return status;
-}
-
-/**
- * qla4xxx_reinitialize_ddb_list - update the driver ddb list
- * @ha: pointer to host adapter structure.
- *
- * This routine obtains device information from the F/W database after
- * firmware or adapter resets. The device table is preserved.
- **/
-int qla4xxx_reinitialize_ddb_list(struct scsi_qla_host *ha)
-{
- int status = QLA_SUCCESS;
- struct ddb_entry *ddb_entry, *detemp;
-
- /* Update the device information for all devices. */
- list_for_each_entry_safe(ddb_entry, detemp, &ha->ddb_list, list) {
- qla4xxx_update_ddb_entry(ha, ddb_entry,
- ddb_entry->fw_ddb_index);
- if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) {
- atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
- DEBUG2(printk ("scsi%ld: %s: ddb index [%d] marked "
- "ONLINE\n", ha->host_no, __func__,
- ddb_entry->fw_ddb_index));
- iscsi_unblock_session(ddb_entry->sess);
- } else if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE)
- qla4xxx_mark_device_missing(ha, ddb_entry);
- }
- return status;
-}
-
-/**
- * qla4xxx_relogin_device - re-establish session
- * @ha: Pointer to host adapter structure.
- * @ddb_entry: Pointer to device database entry
- *
- * This routine does a session relogin with the specified device.
- * The ddb entry must be assigned prior to making this call.
- **/
-int qla4xxx_relogin_device(struct scsi_qla_host *ha,
- struct ddb_entry * ddb_entry)
-{
- uint16_t relogin_timer;
-
- relogin_timer = max(ddb_entry->default_relogin_timeout,
- (uint16_t)RELOGIN_TOV);
- atomic_set(&ddb_entry->relogin_timer, relogin_timer);
-
- DEBUG2(printk("scsi%ld: Relogin ddb [%d]. TOV=%d\n", ha->host_no,
- ddb_entry->fw_ddb_index, relogin_timer));
-
- qla4xxx_set_ddb_entry(ha, ddb_entry->fw_ddb_index, 0);
-
- return QLA_SUCCESS;
+ memcpy(ha->model_name, board_id_string, size);
}
static int qla4xxx_config_nvram(struct scsi_qla_host *ha)
@@ -983,6 +496,12 @@ static int qla4xxx_config_nvram(struct scsi_qla_host *ha)
else
return QLA_ERROR;
}
+
+ if (is_qla4022(ha) || is_qla4032(ha))
+ qla4xxx_set_model_info(ha);
+ else
+ strcpy(ha->model_name, "QLA4010");
+
DEBUG(printk("scsi%ld: %s: Setting extHwConfig to 0xFFFF%04x\n",
ha->host_no, __func__, extHwConfig.Asuint32_t));
@@ -1246,23 +765,56 @@ int qla4xxx_start_firmware(struct scsi_qla_host *ha)
}
return status;
}
+/**
+ * qla4xxx_free_ddb_index - Free DDBs reserved by firmware
+ * @ha: pointer to adapter structure
+ *
+ * Since firmware is not running in autoconnect mode the DDB indices should
+ * be freed so that when login happens from user space there are free DDB
+ * indices available.
+ **/
+static void qla4xxx_free_ddb_index(struct scsi_qla_host *ha)
+{
+ int max_ddbs;
+ int ret;
+ uint32_t idx = 0, next_idx = 0;
+ uint32_t state = 0, conn_err = 0;
+
+ max_ddbs = is_qla40XX(ha) ? MAX_PRST_DEV_DB_ENTRIES :
+ MAX_DEV_DB_ENTRIES;
+
+ for (idx = 0; idx < max_ddbs; idx = next_idx) {
+ ret = qla4xxx_get_fwddb_entry(ha, idx, NULL, 0, NULL,
+ &next_idx, &state, &conn_err,
+ NULL, NULL);
+ if (ret == QLA_ERROR)
+ continue;
+ if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
+ state == DDB_DS_SESSION_FAILED) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Freeing DDB index = 0x%x\n", idx));
+ ret = qla4xxx_clear_ddb_entry(ha, idx);
+ if (ret == QLA_ERROR)
+ ql4_printk(KERN_ERR, ha,
+ "Unable to clear DDB index = "
+ "0x%x\n", idx);
+ }
+ if (next_idx == 0)
+ break;
+ }
+}
/**
* qla4xxx_initialize_adapter - initiailizes hba
* @ha: Pointer to host adapter structure.
- * @renew_ddb_list: Indicates what to do with the adapter's ddb list
- * after adapter recovery has completed.
- * 0=preserve ddb list, 1=destroy and rebuild ddb list
*
* This routine parforms all of the steps necessary to initialize the adapter.
*
**/
-int qla4xxx_initialize_adapter(struct scsi_qla_host *ha,
- uint8_t renew_ddb_list)
+int qla4xxx_initialize_adapter(struct scsi_qla_host *ha)
{
int status = QLA_ERROR;
- int8_t ip_address[IP_ADDR_LEN] = {0} ;
ha->eeprom_cmd_data = 0;
@@ -1288,47 +840,8 @@ int qla4xxx_initialize_adapter(struct scsi_qla_host *ha,
if (status == QLA_ERROR)
goto exit_init_hba;
- /*
- * FW is waiting to get an IP address from DHCP server: Skip building
- * the ddb_list and wait for DHCP lease acquired aen to come in
- * followed by 0x8014 aen" to trigger the tgt discovery process.
- */
- if (ha->firmware_state & FW_STATE_CONFIGURING_IP)
- goto exit_init_online;
-
- /* Skip device discovery if ip and subnet is zero */
- if (memcmp(ha->ip_address, ip_address, IP_ADDR_LEN) == 0 ||
- memcmp(ha->subnet_mask, ip_address, IP_ADDR_LEN) == 0)
- goto exit_init_online;
+ qla4xxx_free_ddb_index(ha);
- if (renew_ddb_list == PRESERVE_DDB_LIST) {
- /*
- * We want to preserve lun states (i.e. suspended, etc.)
- * for recovery initiated by the driver. So just update
- * the device states for the existing ddb_list.
- */
- qla4xxx_reinitialize_ddb_list(ha);
- } else if (renew_ddb_list == REBUILD_DDB_LIST) {
- /*
- * We want to build the ddb_list from scratch during
- * driver initialization and recovery initiated by the
- * INT_HBA_RESET IOCTL.
- */
- status = qla4xxx_initialize_ddb_list(ha);
- if (status == QLA_ERROR) {
- DEBUG2(printk("%s(%ld) Error occurred during build"
- "ddb list\n", __func__, ha->host_no));
- goto exit_init_hba;
- }
-
- }
- if (!ha->tot_ddbs) {
- DEBUG2(printk("scsi%ld: Failed to initialize devices or none "
- "present in Firmware device database\n",
- ha->host_no));
- }
-
-exit_init_online:
set_bit(AF_ONLINE, &ha->flags);
exit_init_hba:
if (is_qla8022(ha) && (status == QLA_ERROR)) {
@@ -1343,61 +856,6 @@ exit_init_hba:
}
/**
- * qla4xxx_add_device_dynamically - ddb addition due to an AEN
- * @ha: Pointer to host adapter structure.
- * @fw_ddb_index: Firmware's device database index
- *
- * This routine processes adds a device as a result of an 8014h AEN.
- **/
-static void qla4xxx_add_device_dynamically(struct scsi_qla_host *ha,
- uint32_t fw_ddb_index)
-{
- struct ddb_entry * ddb_entry;
- uint32_t new_tgt;
-
- /* First allocate a device structure */
- ddb_entry = qla4xxx_get_ddb_entry(ha, fw_ddb_index, &new_tgt);
- if (ddb_entry == NULL) {
- DEBUG2(printk(KERN_WARNING
- "scsi%ld: Unable to allocate memory to add "
- "fw_ddb_index %d\n", ha->host_no, fw_ddb_index));
- return;
- }
-
- if (!new_tgt && (ddb_entry->fw_ddb_index != fw_ddb_index)) {
- /* Target has been bound to a new fw_ddb_index */
- qla4xxx_free_ddb(ha, ddb_entry);
- ddb_entry = qla4xxx_alloc_ddb(ha, fw_ddb_index);
- if (ddb_entry == NULL) {
- DEBUG2(printk(KERN_WARNING
- "scsi%ld: Unable to allocate memory"
- " to add fw_ddb_index %d\n",
- ha->host_no, fw_ddb_index));
- return;
- }
- }
- if (qla4xxx_update_ddb_entry(ha, ddb_entry, fw_ddb_index) ==
- QLA_ERROR) {
- ha->fw_ddb_index_map[fw_ddb_index] =
- (struct ddb_entry *)INVALID_ENTRY;
- DEBUG2(printk(KERN_WARNING
- "scsi%ld: failed to add new device at index "
- "[%d]\n Unable to retrieve fw ddb entry\n",
- ha->host_no, fw_ddb_index));
- qla4xxx_free_ddb(ha, ddb_entry);
- return;
- }
-
- if (qla4xxx_add_sess(ddb_entry)) {
- DEBUG2(printk(KERN_WARNING
- "scsi%ld: failed to add new device at index "
- "[%d]\n Unable to add connection and session\n",
- ha->host_no, fw_ddb_index));
- qla4xxx_free_ddb(ha, ddb_entry);
- }
-}
-
-/**
* qla4xxx_process_ddb_changed - process ddb state change
* @ha - Pointer to host adapter structure.
* @fw_ddb_index - Firmware's device database index
@@ -1409,88 +867,94 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
uint32_t state, uint32_t conn_err)
{
struct ddb_entry * ddb_entry;
+ uint32_t old_fw_ddb_device_state;
+ int status = QLA_ERROR;
/* check for out of range index */
if (fw_ddb_index >= MAX_DDB_ENTRIES)
- return QLA_ERROR;
+ goto exit_ddb_event;
/* Get the corresponging ddb entry */
ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, fw_ddb_index);
/* Device does not currently exist in our database. */
if (ddb_entry == NULL) {
- if (state == DDB_DS_SESSION_ACTIVE)
- qla4xxx_add_device_dynamically(ha, fw_ddb_index);
- return QLA_SUCCESS;
+ ql4_printk(KERN_ERR, ha, "%s: No ddb_entry at FW index [%d]\n",
+ __func__, fw_ddb_index);
+
+ if (state == DDB_DS_NO_CONNECTION_ACTIVE)
+ clear_bit(fw_ddb_index, ha->ddb_idx_map);
+
+ goto exit_ddb_event;
}
- /* Device already exists in our database. */
- DEBUG2(printk("scsi%ld: %s DDB - old state= 0x%x, new state=0x%x for "
- "index [%d]\n", ha->host_no, __func__,
- ddb_entry->fw_ddb_device_state, state, fw_ddb_index));
+ old_fw_ddb_device_state = ddb_entry->fw_ddb_device_state;
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: DDB - old state = 0x%x, new state = 0x%x for "
+ "index [%d]\n", __func__,
+ ddb_entry->fw_ddb_device_state, state, fw_ddb_index));
ddb_entry->fw_ddb_device_state = state;
- /* Device is back online. */
- if ((ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) &&
- (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE)) {
- atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
- atomic_set(&ddb_entry->relogin_retry_count, 0);
- atomic_set(&ddb_entry->relogin_timer, 0);
- clear_bit(DF_RELOGIN, &ddb_entry->flags);
- iscsi_unblock_session(ddb_entry->sess);
- iscsi_session_event(ddb_entry->sess,
- ISCSI_KEVENT_CREATE_SESSION);
- /*
- * Change the lun state to READY in case the lun TIMEOUT before
- * the device came back.
- */
- } else if (ddb_entry->fw_ddb_device_state != DDB_DS_SESSION_ACTIVE) {
- /* Device went away, mark device missing */
- if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE) {
- DEBUG2(ql4_printk(KERN_INFO, ha, "%s mark missing "
- "ddb_entry 0x%p sess 0x%p conn 0x%p\n",
- __func__, ddb_entry,
- ddb_entry->sess, ddb_entry->conn));
- qla4xxx_mark_device_missing(ha, ddb_entry);
- }
- /*
- * Relogin if device state changed to a not active state.
- * However, do not relogin if a RELOGIN is in process, or
- * we are not allowed to relogin to this DDB.
- */
- if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_FAILED &&
- !test_bit(DF_RELOGIN, &ddb_entry->flags) &&
- qla4_is_relogin_allowed(ha, conn_err)) {
+ switch (old_fw_ddb_device_state) {
+ case DDB_DS_LOGIN_IN_PROCESS:
+ switch (state) {
+ case DDB_DS_SESSION_ACTIVE:
+ case DDB_DS_DISCOVERY:
+ iscsi_conn_start(ddb_entry->conn);
+ iscsi_conn_login_event(ddb_entry->conn,
+ ISCSI_CONN_STATE_LOGGED_IN);
+ qla4xxx_update_session_conn_param(ha, ddb_entry);
+ status = QLA_SUCCESS;
+ break;
+ case DDB_DS_SESSION_FAILED:
+ case DDB_DS_NO_CONNECTION_ACTIVE:
+ iscsi_conn_login_event(ddb_entry->conn,
+ ISCSI_CONN_STATE_FREE);
+ status = QLA_SUCCESS;
+ break;
+ }
+ break;
+ case DDB_DS_SESSION_ACTIVE:
+ switch (state) {
+ case DDB_DS_SESSION_FAILED:
/*
- * This triggers a relogin. After the relogin_timer
- * expires, the relogin gets scheduled. We must wait a
- * minimum amount of time since receiving an 0x8014 AEN
- * with failed device_state or a logout response before
- * we can issue another relogin.
+ * iscsi_session failure will cause userspace to
+ * stop the connection which in turn would block the
+ * iscsi_session and start relogin
*/
- /* Firmware pads this timeout: (time2wait +1).
- * Driver retry to login should be longer than F/W.
- * Otherwise F/W will fail
- * set_ddb() mbx cmd with 0x4005 since it still
- * counting down its time2wait.
- */
- atomic_set(&ddb_entry->relogin_timer, 0);
- atomic_set(&ddb_entry->retry_relogin_timer,
- ddb_entry->default_time2wait + 4);
- DEBUG(printk("scsi%ld: %s: ddb[%d] "
- "initiate relogin after %d seconds\n",
- ha->host_no, __func__,
- ddb_entry->fw_ddb_index,
- ddb_entry->default_time2wait + 4));
- } else {
- DEBUG(printk("scsi%ld: %s: ddb[%d] "
- "relogin not initiated, state = %d, "
- "ddb_entry->flags = 0x%lx\n",
- ha->host_no, __func__,
- ddb_entry->fw_ddb_index,
- ddb_entry->fw_ddb_device_state,
- ddb_entry->flags));
+ iscsi_session_failure(ddb_entry->sess->dd_data,
+ ISCSI_ERR_CONN_FAILED);
+ status = QLA_SUCCESS;
+ break;
+ case DDB_DS_NO_CONNECTION_ACTIVE:
+ clear_bit(fw_ddb_index, ha->ddb_idx_map);
+ status = QLA_SUCCESS;
+ break;
+ }
+ break;
+ case DDB_DS_SESSION_FAILED:
+ switch (state) {
+ case DDB_DS_SESSION_ACTIVE:
+ case DDB_DS_DISCOVERY:
+ iscsi_conn_start(ddb_entry->conn);
+ iscsi_conn_login_event(ddb_entry->conn,
+ ISCSI_CONN_STATE_LOGGED_IN);
+ qla4xxx_update_session_conn_param(ha, ddb_entry);
+ status = QLA_SUCCESS;
+ break;
+ case DDB_DS_SESSION_FAILED:
+ iscsi_session_failure(ddb_entry->sess->dd_data,
+ ISCSI_ERR_CONN_FAILED);
+ status = QLA_SUCCESS;
+ break;
}
+ break;
+ default:
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Unknown Event\n",
+ __func__));
+ break;
}
- return QLA_SUCCESS;
+
+exit_ddb_event:
+ return status;
}
diff --git a/drivers/scsi/qla4xxx/ql4_iocb.c b/drivers/scsi/qla4xxx/ql4_iocb.c
index 75fcd82a8fc..41066935190 100644
--- a/drivers/scsi/qla4xxx/ql4_iocb.c
+++ b/drivers/scsi/qla4xxx/ql4_iocb.c
@@ -313,10 +313,8 @@ int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
cmd_entry->hdr.entryType = ET_COMMAND;
cmd_entry->handle = cpu_to_le32(index);
cmd_entry->target = cpu_to_le16(ddb_entry->fw_ddb_index);
- cmd_entry->connection_id = cpu_to_le16(ddb_entry->connection_id);
int_to_scsilun(cmd->device->lun, &cmd_entry->lun);
- cmd_entry->cmdSeqNum = cpu_to_le32(ddb_entry->CmdSn);
cmd_entry->ttlByteCnt = cpu_to_le32(scsi_bufflen(cmd));
memcpy(cmd_entry->cdb, cmd->cmnd, cmd->cmd_len);
cmd_entry->dataSegCnt = cpu_to_le16(tot_dsds);
@@ -381,3 +379,69 @@ queuing_error:
return QLA_ERROR;
}
+int qla4xxx_send_passthru0(struct iscsi_task *task)
+{
+ struct passthru0 *passthru_iocb;
+ struct iscsi_session *sess = task->conn->session;
+ struct ddb_entry *ddb_entry = sess->dd_data;
+ struct scsi_qla_host *ha = ddb_entry->ha;
+ struct ql4_task_data *task_data = task->dd_data;
+ uint16_t ctrl_flags = 0;
+ unsigned long flags;
+ int ret = QLA_ERROR;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ task_data->iocb_req_cnt = 1;
+ /* Put the IOCB on the request queue */
+ if (!qla4xxx_space_in_req_ring(ha, task_data->iocb_req_cnt))
+ goto queuing_error;
+
+ passthru_iocb = (struct passthru0 *) ha->request_ptr;
+
+ memset(passthru_iocb, 0, sizeof(struct passthru0));
+ passthru_iocb->hdr.entryType = ET_PASSTHRU0;
+ passthru_iocb->hdr.systemDefined = SD_ISCSI_PDU;
+ passthru_iocb->hdr.entryCount = task_data->iocb_req_cnt;
+ passthru_iocb->handle = task->itt;
+ passthru_iocb->target = cpu_to_le16(ddb_entry->fw_ddb_index);
+ passthru_iocb->timeout = cpu_to_le16(PT_DEFAULT_TIMEOUT);
+
+ /* Setup the out & in DSDs */
+ if (task_data->req_len) {
+ memcpy((uint8_t *)task_data->req_buffer +
+ sizeof(struct iscsi_hdr), task->data, task->data_count);
+ ctrl_flags |= PT_FLAG_SEND_BUFFER;
+ passthru_iocb->out_dsd.base.addrLow =
+ cpu_to_le32(LSDW(task_data->req_dma));
+ passthru_iocb->out_dsd.base.addrHigh =
+ cpu_to_le32(MSDW(task_data->req_dma));
+ passthru_iocb->out_dsd.count =
+ cpu_to_le32(task->data_count +
+ sizeof(struct iscsi_hdr));
+ }
+ if (task_data->resp_len) {
+ passthru_iocb->in_dsd.base.addrLow =
+ cpu_to_le32(LSDW(task_data->resp_dma));
+ passthru_iocb->in_dsd.base.addrHigh =
+ cpu_to_le32(MSDW(task_data->resp_dma));
+ passthru_iocb->in_dsd.count =
+ cpu_to_le32(task_data->resp_len);
+ }
+
+ ctrl_flags |= (PT_FLAG_ISCSI_PDU | PT_FLAG_WAIT_4_RESPONSE);
+ passthru_iocb->control_flags = cpu_to_le16(ctrl_flags);
+
+ /* Update the request pointer */
+ qla4xxx_advance_req_ring_ptr(ha);
+ wmb();
+
+ /* Track IOCB used */
+ ha->iocb_cnt += task_data->iocb_req_cnt;
+ ha->req_q_count -= task_data->iocb_req_cnt;
+ ha->isp_ops->queue_iocb(ha);
+ ret = QLA_SUCCESS;
+
+queuing_error:
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ return ret;
+}
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c
index 0e72921c752..827e93078b9 100644
--- a/drivers/scsi/qla4xxx/ql4_isr.c
+++ b/drivers/scsi/qla4xxx/ql4_isr.c
@@ -224,8 +224,8 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha,
* I/O to this device. We should get a ddb state change
* AEN soon.
*/
- if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE)
- qla4xxx_mark_device_missing(ha, ddb_entry);
+ if (iscsi_is_session_online(ddb_entry->sess))
+ qla4xxx_mark_device_missing(ddb_entry->sess);
break;
case SCS_DATA_UNDERRUN:
@@ -306,8 +306,8 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha,
* send I/O to this device. We should get a ddb
* state change AEN soon.
*/
- if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE)
- qla4xxx_mark_device_missing(ha, ddb_entry);
+ if (iscsi_is_session_online(ddb_entry->sess))
+ qla4xxx_mark_device_missing(ddb_entry->sess);
cmd->result = DID_TRANSPORT_DISRUPTED << 16;
break;
@@ -341,6 +341,51 @@ status_entry_exit:
}
/**
+ * qla4xxx_passthru_status_entry - processes passthru status IOCBs (0x3C)
+ * @ha: Pointer to host adapter structure.
+ * @sts_entry: Pointer to status entry structure.
+ **/
+static void qla4xxx_passthru_status_entry(struct scsi_qla_host *ha,
+ struct passthru_status *sts_entry)
+{
+ struct iscsi_task *task;
+ struct ddb_entry *ddb_entry;
+ struct ql4_task_data *task_data;
+ struct iscsi_cls_conn *cls_conn;
+ struct iscsi_conn *conn;
+ itt_t itt;
+ uint32_t fw_ddb_index;
+
+ itt = sts_entry->handle;
+ fw_ddb_index = le32_to_cpu(sts_entry->target);
+
+ ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, fw_ddb_index);
+
+ if (ddb_entry == NULL) {
+ ql4_printk(KERN_ERR, ha, "%s: Invalid target index = 0x%x\n",
+ __func__, sts_entry->target);
+ return;
+ }
+
+ cls_conn = ddb_entry->conn;
+ conn = cls_conn->dd_data;
+ spin_lock(&conn->session->lock);
+ task = iscsi_itt_to_task(conn, itt);
+ spin_unlock(&conn->session->lock);
+
+ if (task == NULL) {
+ ql4_printk(KERN_ERR, ha, "%s: Task is NULL\n", __func__);
+ return;
+ }
+
+ task_data = task->dd_data;
+ memcpy(&task_data->sts, sts_entry, sizeof(struct passthru_status));
+ ha->req_q_count += task_data->iocb_req_cnt;
+ ha->iocb_cnt -= task_data->iocb_req_cnt;
+ queue_work(ha->task_wq, &task_data->task_work);
+}
+
+/**
* qla4xxx_process_response_queue - process response queue completions
* @ha: Pointer to host adapter structure.
*
@@ -375,6 +420,14 @@ void qla4xxx_process_response_queue(struct scsi_qla_host *ha)
break;
case ET_PASSTHRU_STATUS:
+ if (sts_entry->hdr.systemDefined == SD_ISCSI_PDU)
+ qla4xxx_passthru_status_entry(ha,
+ (struct passthru_status *)sts_entry);
+ else
+ ql4_printk(KERN_ERR, ha,
+ "%s: Invalid status received\n",
+ __func__);
+
break;
case ET_STATUS_CONTINUATION:
@@ -566,6 +619,8 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
else if ((mbox_sts[3] == ACB_STATE_ACQUIRING) &&
(mbox_sts[2] == ACB_STATE_VALID))
set_bit(DPC_RESET_HA, &ha->dpc_flags);
+ else if ((mbox_sts[3] == ACB_STATE_UNCONFIGURED))
+ complete(&ha->disable_acb_comp);
break;
case MBOX_ASTS_MAC_ADDRESS_CHANGED:
@@ -1009,23 +1064,23 @@ void qla4xxx_process_aen(struct scsi_qla_host * ha, uint8_t process_aen)
switch (mbox_sts[0]) {
case MBOX_ASTS_DATABASE_CHANGED:
- if (process_aen == FLUSH_DDB_CHANGED_AENS) {
+ switch (process_aen) {
+ case FLUSH_DDB_CHANGED_AENS:
DEBUG2(printk("scsi%ld: AEN[%d] %04x, index "
"[%d] state=%04x FLUSHED!\n",
ha->host_no, ha->aen_out,
mbox_sts[0], mbox_sts[2],
mbox_sts[3]));
break;
+ case PROCESS_ALL_AENS:
+ default:
+ /* Specific device. */
+ if (mbox_sts[1] == 1)
+ qla4xxx_process_ddb_changed(ha,
+ mbox_sts[2], mbox_sts[3],
+ mbox_sts[4]);
+ break;
}
- case PROCESS_ALL_AENS:
- default:
- if (mbox_sts[1] == 0) { /* Global DB change. */
- qla4xxx_reinitialize_ddb_list(ha);
- } else if (mbox_sts[1] == 1) { /* Specific device. */
- qla4xxx_process_ddb_changed(ha, mbox_sts[2],
- mbox_sts[3], mbox_sts[4]);
- }
- break;
}
spin_lock_irqsave(&ha->hardware_lock, flags);
}
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index fce8289e975..4c2b8487039 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -303,7 +303,7 @@ qla4xxx_set_ifcb(struct scsi_qla_host *ha, uint32_t *mbox_cmd,
return QLA_SUCCESS;
}
-static uint8_t
+uint8_t
qla4xxx_get_ifcb(struct scsi_qla_host *ha, uint32_t *mbox_cmd,
uint32_t *mbox_sts, dma_addr_t init_fw_cb_dma)
{
@@ -327,43 +327,69 @@ qla4xxx_get_ifcb(struct scsi_qla_host *ha, uint32_t *mbox_cmd,
static void
qla4xxx_update_local_ip(struct scsi_qla_host *ha,
- struct addr_ctrl_blk *init_fw_cb)
+ struct addr_ctrl_blk *init_fw_cb)
{
+ ha->ip_config.tcp_options = le16_to_cpu(init_fw_cb->ipv4_tcp_opts);
+ ha->ip_config.ipv4_options = le16_to_cpu(init_fw_cb->ipv4_ip_opts);
+ ha->ip_config.ipv4_addr_state =
+ le16_to_cpu(init_fw_cb->ipv4_addr_state);
+ ha->ip_config.eth_mtu_size =
+ le16_to_cpu(init_fw_cb->eth_mtu_size);
+ ha->ip_config.ipv4_port = le16_to_cpu(init_fw_cb->ipv4_port);
+
+ if (ha->acb_version == ACB_SUPPORTED) {
+ ha->ip_config.ipv6_options = le16_to_cpu(init_fw_cb->ipv6_opts);
+ ha->ip_config.ipv6_addl_options =
+ le16_to_cpu(init_fw_cb->ipv6_addtl_opts);
+ }
+
/* Save IPv4 Address Info */
- memcpy(ha->ip_address, init_fw_cb->ipv4_addr,
- min(sizeof(ha->ip_address), sizeof(init_fw_cb->ipv4_addr)));
- memcpy(ha->subnet_mask, init_fw_cb->ipv4_subnet,
- min(sizeof(ha->subnet_mask), sizeof(init_fw_cb->ipv4_subnet)));
- memcpy(ha->gateway, init_fw_cb->ipv4_gw_addr,
- min(sizeof(ha->gateway), sizeof(init_fw_cb->ipv4_gw_addr)));
+ memcpy(ha->ip_config.ip_address, init_fw_cb->ipv4_addr,
+ min(sizeof(ha->ip_config.ip_address),
+ sizeof(init_fw_cb->ipv4_addr)));
+ memcpy(ha->ip_config.subnet_mask, init_fw_cb->ipv4_subnet,
+ min(sizeof(ha->ip_config.subnet_mask),
+ sizeof(init_fw_cb->ipv4_subnet)));
+ memcpy(ha->ip_config.gateway, init_fw_cb->ipv4_gw_addr,
+ min(sizeof(ha->ip_config.gateway),
+ sizeof(init_fw_cb->ipv4_gw_addr)));
+
+ ha->ip_config.ipv4_vlan_tag = be16_to_cpu(init_fw_cb->ipv4_vlan_tag);
if (is_ipv6_enabled(ha)) {
/* Save IPv6 Address */
- ha->ipv6_link_local_state = init_fw_cb->ipv6_lnk_lcl_addr_state;
- ha->ipv6_addr0_state = init_fw_cb->ipv6_addr0_state;
- ha->ipv6_addr1_state = init_fw_cb->ipv6_addr1_state;
- ha->ipv6_default_router_state = init_fw_cb->ipv6_dflt_rtr_state;
- ha->ipv6_link_local_addr.in6_u.u6_addr8[0] = 0xFE;
- ha->ipv6_link_local_addr.in6_u.u6_addr8[1] = 0x80;
-
- memcpy(&ha->ipv6_link_local_addr.in6_u.u6_addr8[8],
- init_fw_cb->ipv6_if_id,
- min(sizeof(ha->ipv6_link_local_addr)/2,
- sizeof(init_fw_cb->ipv6_if_id)));
- memcpy(&ha->ipv6_addr0, init_fw_cb->ipv6_addr0,
- min(sizeof(ha->ipv6_addr0),
- sizeof(init_fw_cb->ipv6_addr0)));
- memcpy(&ha->ipv6_addr1, init_fw_cb->ipv6_addr1,
- min(sizeof(ha->ipv6_addr1),
- sizeof(init_fw_cb->ipv6_addr1)));
- memcpy(&ha->ipv6_default_router_addr,
- init_fw_cb->ipv6_dflt_rtr_addr,
- min(sizeof(ha->ipv6_default_router_addr),
- sizeof(init_fw_cb->ipv6_dflt_rtr_addr)));
+ ha->ip_config.ipv6_link_local_state =
+ le16_to_cpu(init_fw_cb->ipv6_lnk_lcl_addr_state);
+ ha->ip_config.ipv6_addr0_state =
+ le16_to_cpu(init_fw_cb->ipv6_addr0_state);
+ ha->ip_config.ipv6_addr1_state =
+ le16_to_cpu(init_fw_cb->ipv6_addr1_state);
+ ha->ip_config.ipv6_default_router_state =
+ le16_to_cpu(init_fw_cb->ipv6_dflt_rtr_state);
+ ha->ip_config.ipv6_link_local_addr.in6_u.u6_addr8[0] = 0xFE;
+ ha->ip_config.ipv6_link_local_addr.in6_u.u6_addr8[1] = 0x80;
+
+ memcpy(&ha->ip_config.ipv6_link_local_addr.in6_u.u6_addr8[8],
+ init_fw_cb->ipv6_if_id,
+ min(sizeof(ha->ip_config.ipv6_link_local_addr)/2,
+ sizeof(init_fw_cb->ipv6_if_id)));
+ memcpy(&ha->ip_config.ipv6_addr0, init_fw_cb->ipv6_addr0,
+ min(sizeof(ha->ip_config.ipv6_addr0),
+ sizeof(init_fw_cb->ipv6_addr0)));
+ memcpy(&ha->ip_config.ipv6_addr1, init_fw_cb->ipv6_addr1,
+ min(sizeof(ha->ip_config.ipv6_addr1),
+ sizeof(init_fw_cb->ipv6_addr1)));
+ memcpy(&ha->ip_config.ipv6_default_router_addr,
+ init_fw_cb->ipv6_dflt_rtr_addr,
+ min(sizeof(ha->ip_config.ipv6_default_router_addr),
+ sizeof(init_fw_cb->ipv6_dflt_rtr_addr)));
+ ha->ip_config.ipv6_vlan_tag =
+ be16_to_cpu(init_fw_cb->ipv6_vlan_tag);
+ ha->ip_config.ipv6_port = le16_to_cpu(init_fw_cb->ipv6_port);
}
}
-static uint8_t
+uint8_t
qla4xxx_update_local_ifcb(struct scsi_qla_host *ha,
uint32_t *mbox_cmd,
uint32_t *mbox_sts,
@@ -383,9 +409,6 @@ qla4xxx_update_local_ifcb(struct scsi_qla_host *ha,
/* Save some info in adapter structure. */
ha->acb_version = init_fw_cb->acb_version;
ha->firmware_options = le16_to_cpu(init_fw_cb->fw_options);
- ha->tcp_options = le16_to_cpu(init_fw_cb->ipv4_tcp_opts);
- ha->ipv4_options = le16_to_cpu(init_fw_cb->ipv4_ip_opts);
- ha->ipv4_addr_state = le16_to_cpu(init_fw_cb->ipv4_addr_state);
ha->heartbeat_interval = init_fw_cb->hb_interval;
memcpy(ha->name_string, init_fw_cb->iscsi_name,
min(sizeof(ha->name_string),
@@ -393,10 +416,6 @@ qla4xxx_update_local_ifcb(struct scsi_qla_host *ha,
/*memcpy(ha->alias, init_fw_cb->Alias,
min(sizeof(ha->alias), sizeof(init_fw_cb->Alias)));*/
- if (ha->acb_version == ACB_SUPPORTED) {
- ha->ipv6_options = init_fw_cb->ipv6_opts;
- ha->ipv6_addl_options = init_fw_cb->ipv6_addtl_opts;
- }
qla4xxx_update_local_ip(ha, init_fw_cb);
return QLA_SUCCESS;
@@ -462,10 +481,11 @@ int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha)
init_fw_cb->fw_options &= __constant_cpu_to_le16(~FWOPT_TARGET_MODE);
- /* Set bit for "serialize task mgmt" all other bits need to be zero */
init_fw_cb->add_fw_options = 0;
init_fw_cb->add_fw_options |=
- __constant_cpu_to_le16(SERIALIZE_TASK_MGMT);
+ __constant_cpu_to_le16(ADFWOPT_SERIALIZE_TASK_MGMT);
+ init_fw_cb->add_fw_options |=
+ __constant_cpu_to_le16(ADFWOPT_AUTOCONN_DISABLE);
if (qla4xxx_set_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma)
!= QLA_SUCCESS) {
@@ -691,19 +711,38 @@ exit_get_fwddb:
return status;
}
+int qla4xxx_conn_open(struct scsi_qla_host *ha, uint16_t fw_ddb_index)
+{
+ uint32_t mbox_cmd[MBOX_REG_COUNT];
+ uint32_t mbox_sts[MBOX_REG_COUNT];
+ int status;
+
+ memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+ memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+ mbox_cmd[0] = MBOX_CMD_CONN_OPEN;
+ mbox_cmd[1] = fw_ddb_index;
+
+ status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 2, &mbox_cmd[0],
+ &mbox_sts[0]);
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: status = %d mbx0 = 0x%x mbx1 = 0x%x\n",
+ __func__, status, mbox_sts[0], mbox_sts[1]));
+ return status;
+}
+
/**
* qla4xxx_set_fwddb_entry - sets a ddb entry.
* @ha: Pointer to host adapter structure.
* @fw_ddb_index: Firmware's device database index
- * @fw_ddb_entry: Pointer to firmware's ddb entry structure, or NULL.
+ * @fw_ddb_entry_dma: dma address of ddb entry
+ * @mbx_sts: mailbox 0 to be returned or NULL
*
* This routine initializes or updates the adapter's device database
- * entry for the specified device. It also triggers a login for the
- * specified device. Therefore, it may also be used as a secondary
- * login routine when a NULL pointer is specified for the fw_ddb_entry.
+ * entry for the specified device.
**/
int qla4xxx_set_ddb_entry(struct scsi_qla_host * ha, uint16_t fw_ddb_index,
- dma_addr_t fw_ddb_entry_dma)
+ dma_addr_t fw_ddb_entry_dma, uint32_t *mbx_sts)
{
uint32_t mbox_cmd[MBOX_REG_COUNT];
uint32_t mbox_sts[MBOX_REG_COUNT];
@@ -722,13 +761,41 @@ int qla4xxx_set_ddb_entry(struct scsi_qla_host * ha, uint16_t fw_ddb_index,
mbox_cmd[4] = sizeof(struct dev_db_entry);
status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 5, &mbox_cmd[0],
- &mbox_sts[0]);
+ &mbox_sts[0]);
+ if (mbx_sts)
+ *mbx_sts = mbox_sts[0];
DEBUG2(printk("scsi%ld: %s: status=%d mbx0=0x%x mbx4=0x%x\n",
ha->host_no, __func__, status, mbox_sts[0], mbox_sts[4]);)
return status;
}
+int qla4xxx_session_logout_ddb(struct scsi_qla_host *ha,
+ struct ddb_entry *ddb_entry, int options)
+{
+ int status;
+ uint32_t mbox_cmd[MBOX_REG_COUNT];
+ uint32_t mbox_sts[MBOX_REG_COUNT];
+
+ memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+ memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+ mbox_cmd[0] = MBOX_CMD_CONN_CLOSE_SESS_LOGOUT;
+ mbox_cmd[1] = ddb_entry->fw_ddb_index;
+ mbox_cmd[3] = options;
+
+ status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 2, &mbox_cmd[0],
+ &mbox_sts[0]);
+ if (status != QLA_SUCCESS) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: MBOX_CMD_CONN_CLOSE_SESS_LOGOUT "
+ "failed sts %04X %04X", __func__,
+ mbox_sts[0], mbox_sts[1]));
+ }
+
+ return status;
+}
+
/**
* qla4xxx_get_crash_record - retrieves crash record.
* @ha: Pointer to host adapter structure.
@@ -805,7 +872,6 @@ void qla4xxx_get_conn_event_log(struct scsi_qla_host * ha)
uint32_t max_event_log_entries;
uint8_t i;
-
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_cmd));
@@ -1104,7 +1170,7 @@ exit_about_fw:
return status;
}
-static int qla4xxx_get_default_ddb(struct scsi_qla_host *ha,
+static int qla4xxx_get_default_ddb(struct scsi_qla_host *ha, uint32_t options,
dma_addr_t dma_addr)
{
uint32_t mbox_cmd[MBOX_REG_COUNT];
@@ -1114,6 +1180,7 @@ static int qla4xxx_get_default_ddb(struct scsi_qla_host *ha,
memset(&mbox_sts, 0, sizeof(mbox_sts));
mbox_cmd[0] = MBOX_CMD_GET_DATABASE_ENTRY_DEFAULTS;
+ mbox_cmd[1] = options;
mbox_cmd[2] = LSDW(dma_addr);
mbox_cmd[3] = MSDW(dma_addr);
@@ -1126,8 +1193,10 @@ static int qla4xxx_get_default_ddb(struct scsi_qla_host *ha,
return QLA_SUCCESS;
}
-static int qla4xxx_req_ddb_entry(struct scsi_qla_host *ha, uint32_t *ddb_index)
+int qla4xxx_req_ddb_entry(struct scsi_qla_host *ha, uint32_t ddb_index,
+ uint32_t *mbx_sts)
{
+ int status;
uint32_t mbox_cmd[MBOX_REG_COUNT];
uint32_t mbox_sts[MBOX_REG_COUNT];
@@ -1135,75 +1204,646 @@ static int qla4xxx_req_ddb_entry(struct scsi_qla_host *ha, uint32_t *ddb_index)
memset(&mbox_sts, 0, sizeof(mbox_sts));
mbox_cmd[0] = MBOX_CMD_REQUEST_DATABASE_ENTRY;
- mbox_cmd[1] = MAX_PRST_DEV_DB_ENTRIES;
+ mbox_cmd[1] = ddb_index;
- if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 3, &mbox_cmd[0], &mbox_sts[0]) !=
- QLA_SUCCESS) {
- if (mbox_sts[0] == MBOX_STS_COMMAND_ERROR) {
- *ddb_index = mbox_sts[2];
+ status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0],
+ &mbox_sts[0]);
+ if (status != QLA_SUCCESS) {
+ DEBUG2(ql4_printk(KERN_ERR, ha, "%s: failed status %04X\n",
+ __func__, mbox_sts[0]));
+ }
+
+ *mbx_sts = mbox_sts[0];
+ return status;
+}
+
+int qla4xxx_clear_ddb_entry(struct scsi_qla_host *ha, uint32_t ddb_index)
+{
+ int status;
+ uint32_t mbox_cmd[MBOX_REG_COUNT];
+ uint32_t mbox_sts[MBOX_REG_COUNT];
+
+ memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+ memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+ mbox_cmd[0] = MBOX_CMD_CLEAR_DATABASE_ENTRY;
+ mbox_cmd[1] = ddb_index;
+
+ status = qla4xxx_mailbox_command(ha, 2, 1, &mbox_cmd[0],
+ &mbox_sts[0]);
+ if (status != QLA_SUCCESS) {
+ DEBUG2(ql4_printk(KERN_ERR, ha, "%s: failed status %04X\n",
+ __func__, mbox_sts[0]));
+ }
+
+ return status;
+}
+
+int qla4xxx_set_flash(struct scsi_qla_host *ha, dma_addr_t dma_addr,
+ uint32_t offset, uint32_t length, uint32_t options)
+{
+ uint32_t mbox_cmd[MBOX_REG_COUNT];
+ uint32_t mbox_sts[MBOX_REG_COUNT];
+ int status = QLA_SUCCESS;
+
+ memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+ memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+ mbox_cmd[0] = MBOX_CMD_WRITE_FLASH;
+ mbox_cmd[1] = LSDW(dma_addr);
+ mbox_cmd[2] = MSDW(dma_addr);
+ mbox_cmd[3] = offset;
+ mbox_cmd[4] = length;
+ mbox_cmd[5] = options;
+
+ status = qla4xxx_mailbox_command(ha, 6, 2, &mbox_cmd[0], &mbox_sts[0]);
+ if (status != QLA_SUCCESS) {
+ DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: MBOX_CMD_WRITE_FLASH "
+ "failed w/ status %04X, mbx1 %04X\n",
+ __func__, mbox_sts[0], mbox_sts[1]));
+ }
+ return status;
+}
+
+int qla4xxx_bootdb_by_index(struct scsi_qla_host *ha,
+ struct dev_db_entry *fw_ddb_entry,
+ dma_addr_t fw_ddb_entry_dma, uint16_t ddb_index)
+{
+ uint32_t dev_db_start_offset = FLASH_OFFSET_DB_INFO;
+ uint32_t dev_db_end_offset;
+ int status = QLA_ERROR;
+
+ memset(fw_ddb_entry, 0, sizeof(*fw_ddb_entry));
+
+ dev_db_start_offset += (ddb_index * sizeof(*fw_ddb_entry));
+ dev_db_end_offset = FLASH_OFFSET_DB_END;
+
+ if (dev_db_start_offset > dev_db_end_offset) {
+ DEBUG2(ql4_printk(KERN_ERR, ha,
+ "%s:Invalid DDB index %d", __func__,
+ ddb_index));
+ goto exit_bootdb_failed;
+ }
+
+ if (qla4xxx_get_flash(ha, fw_ddb_entry_dma, dev_db_start_offset,
+ sizeof(*fw_ddb_entry)) != QLA_SUCCESS) {
+ ql4_printk(KERN_ERR, ha, "scsi%ld: %s: Get Flash"
+ "failed\n", ha->host_no, __func__);
+ goto exit_bootdb_failed;
+ }
+
+ if (fw_ddb_entry->cookie == DDB_VALID_COOKIE)
+ status = QLA_SUCCESS;
+
+exit_bootdb_failed:
+ return status;
+}
+
+int qla4xxx_get_chap(struct scsi_qla_host *ha, char *username, char *password,
+ uint16_t idx)
+{
+ int ret = 0;
+ int rval = QLA_ERROR;
+ uint32_t offset = 0, chap_size;
+ struct ql4_chap_table *chap_table;
+ dma_addr_t chap_dma;
+
+ chap_table = dma_pool_alloc(ha->chap_dma_pool, GFP_KERNEL, &chap_dma);
+ if (chap_table == NULL) {
+ ret = -ENOMEM;
+ goto exit_get_chap;
+ }
+
+ chap_size = sizeof(struct ql4_chap_table);
+ memset(chap_table, 0, chap_size);
+
+ if (is_qla40XX(ha))
+ offset = FLASH_CHAP_OFFSET | (idx * chap_size);
+ else {
+ offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2);
+ /* flt_chap_size is CHAP table size for both ports
+ * so divide it by 2 to calculate the offset for second port
+ */
+ if (ha->port_num == 1)
+ offset += (ha->hw.flt_chap_size / 2);
+ offset += (idx * chap_size);
+ }
+
+ rval = qla4xxx_get_flash(ha, chap_dma, offset, chap_size);
+ if (rval != QLA_SUCCESS) {
+ ret = -EINVAL;
+ goto exit_get_chap;
+ }
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "Chap Cookie: x%x\n",
+ __le16_to_cpu(chap_table->cookie)));
+
+ if (__le16_to_cpu(chap_table->cookie) != CHAP_VALID_COOKIE) {
+ ql4_printk(KERN_ERR, ha, "No valid chap entry found\n");
+ goto exit_get_chap;
+ }
+
+ strncpy(password, chap_table->secret, QL4_CHAP_MAX_SECRET_LEN);
+ strncpy(username, chap_table->name, QL4_CHAP_MAX_NAME_LEN);
+ chap_table->cookie = __constant_cpu_to_le16(CHAP_VALID_COOKIE);
+
+exit_get_chap:
+ dma_pool_free(ha->chap_dma_pool, chap_table, chap_dma);
+ return ret;
+}
+
+static int qla4xxx_set_chap(struct scsi_qla_host *ha, char *username,
+ char *password, uint16_t idx, int bidi)
+{
+ int ret = 0;
+ int rval = QLA_ERROR;
+ uint32_t offset = 0;
+ struct ql4_chap_table *chap_table;
+ dma_addr_t chap_dma;
+
+ chap_table = dma_pool_alloc(ha->chap_dma_pool, GFP_KERNEL, &chap_dma);
+ if (chap_table == NULL) {
+ ret = -ENOMEM;
+ goto exit_set_chap;
+ }
+
+ memset(chap_table, 0, sizeof(struct ql4_chap_table));
+ if (bidi)
+ chap_table->flags |= BIT_6; /* peer */
+ else
+ chap_table->flags |= BIT_7; /* local */
+ chap_table->secret_len = strlen(password);
+ strncpy(chap_table->secret, password, MAX_CHAP_SECRET_LEN);
+ strncpy(chap_table->name, username, MAX_CHAP_NAME_LEN);
+ chap_table->cookie = __constant_cpu_to_le16(CHAP_VALID_COOKIE);
+ offset = FLASH_CHAP_OFFSET | (idx * sizeof(struct ql4_chap_table));
+ rval = qla4xxx_set_flash(ha, chap_dma, offset,
+ sizeof(struct ql4_chap_table),
+ FLASH_OPT_RMW_COMMIT);
+
+ if (rval == QLA_SUCCESS && ha->chap_list) {
+ /* Update ha chap_list cache */
+ memcpy((struct ql4_chap_table *)ha->chap_list + idx,
+ chap_table, sizeof(struct ql4_chap_table));
+ }
+ dma_pool_free(ha->chap_dma_pool, chap_table, chap_dma);
+ if (rval != QLA_SUCCESS)
+ ret = -EINVAL;
+
+exit_set_chap:
+ return ret;
+}
+
+/**
+ * qla4xxx_get_chap_index - Get chap index given username and secret
+ * @ha: pointer to adapter structure
+ * @username: CHAP username to be searched
+ * @password: CHAP password to be searched
+ * @bidi: Is this a BIDI CHAP
+ * @chap_index: CHAP index to be returned
+ *
+ * Match the username and password in the chap_list, return the index if a
+ * match is found. If a match is not found then add the entry in FLASH and
+ * return the index at which entry is written in the FLASH.
+ **/
+static int qla4xxx_get_chap_index(struct scsi_qla_host *ha, char *username,
+ char *password, int bidi, uint16_t *chap_index)
+{
+ int i, rval;
+ int free_index = -1;
+ int found_index = 0;
+ int max_chap_entries = 0;
+ struct ql4_chap_table *chap_table;
+
+ if (is_qla8022(ha))
+ max_chap_entries = (ha->hw.flt_chap_size / 2) /
+ sizeof(struct ql4_chap_table);
+ else
+ max_chap_entries = MAX_CHAP_ENTRIES_40XX;
+
+ if (!ha->chap_list) {
+ ql4_printk(KERN_ERR, ha, "Do not have CHAP table cache\n");
+ return QLA_ERROR;
+ }
+
+ mutex_lock(&ha->chap_sem);
+ for (i = 0; i < max_chap_entries; i++) {
+ chap_table = (struct ql4_chap_table *)ha->chap_list + i;
+ if (chap_table->cookie !=
+ __constant_cpu_to_le16(CHAP_VALID_COOKIE)) {
+ if (i > MAX_RESRV_CHAP_IDX && free_index == -1)
+ free_index = i;
+ continue;
+ }
+ if (bidi) {
+ if (chap_table->flags & BIT_7)
+ continue;
} else {
- DEBUG2(printk("scsi%ld: %s: failed status %04X\n",
- ha->host_no, __func__, mbox_sts[0]));
- return QLA_ERROR;
+ if (chap_table->flags & BIT_6)
+ continue;
+ }
+ if (!strncmp(chap_table->secret, password,
+ MAX_CHAP_SECRET_LEN) &&
+ !strncmp(chap_table->name, username,
+ MAX_CHAP_NAME_LEN)) {
+ *chap_index = i;
+ found_index = 1;
+ break;
}
- } else {
- *ddb_index = MAX_PRST_DEV_DB_ENTRIES;
}
- return QLA_SUCCESS;
+ /* If chap entry is not present and a free index is available then
+ * write the entry in flash
+ */
+ if (!found_index && free_index != -1) {
+ rval = qla4xxx_set_chap(ha, username, password,
+ free_index, bidi);
+ if (!rval) {
+ *chap_index = free_index;
+ found_index = 1;
+ }
+ }
+
+ mutex_unlock(&ha->chap_sem);
+
+ if (found_index)
+ return QLA_SUCCESS;
+ return QLA_ERROR;
}
+int qla4xxx_conn_close_sess_logout(struct scsi_qla_host *ha,
+ uint16_t fw_ddb_index,
+ uint16_t connection_id,
+ uint16_t option)
+{
+ uint32_t mbox_cmd[MBOX_REG_COUNT];
+ uint32_t mbox_sts[MBOX_REG_COUNT];
+ int status = QLA_SUCCESS;
+
+ memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+ memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+ mbox_cmd[0] = MBOX_CMD_CONN_CLOSE_SESS_LOGOUT;
+ mbox_cmd[1] = fw_ddb_index;
+ mbox_cmd[2] = connection_id;
+ mbox_cmd[3] = option;
-int qla4xxx_send_tgts(struct scsi_qla_host *ha, char *ip, uint16_t port)
+ status = qla4xxx_mailbox_command(ha, 4, 2, &mbox_cmd[0], &mbox_sts[0]);
+ if (status != QLA_SUCCESS) {
+ DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: MBOX_CMD_CONN_CLOSE "
+ "option %04x failed w/ status %04X %04X\n",
+ __func__, option, mbox_sts[0], mbox_sts[1]));
+ }
+ return status;
+}
+
+int qla4xxx_disable_acb(struct scsi_qla_host *ha)
{
- struct dev_db_entry *fw_ddb_entry;
- dma_addr_t fw_ddb_entry_dma;
- uint32_t ddb_index;
- int ret_val = QLA_SUCCESS;
+ uint32_t mbox_cmd[MBOX_REG_COUNT];
+ uint32_t mbox_sts[MBOX_REG_COUNT];
+ int status = QLA_SUCCESS;
+
+ memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+ memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+ mbox_cmd[0] = MBOX_CMD_DISABLE_ACB;
+ status = qla4xxx_mailbox_command(ha, 8, 5, &mbox_cmd[0], &mbox_sts[0]);
+ if (status != QLA_SUCCESS) {
+ DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: MBOX_CMD_DISABLE_ACB "
+ "failed w/ status %04X %04X %04X", __func__,
+ mbox_sts[0], mbox_sts[1], mbox_sts[2]));
+ }
+ return status;
+}
+
+int qla4xxx_get_acb(struct scsi_qla_host *ha, dma_addr_t acb_dma,
+ uint32_t acb_type, uint32_t len)
+{
+ uint32_t mbox_cmd[MBOX_REG_COUNT];
+ uint32_t mbox_sts[MBOX_REG_COUNT];
+ int status = QLA_SUCCESS;
- fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev,
- sizeof(*fw_ddb_entry),
+ memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+ memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+ mbox_cmd[0] = MBOX_CMD_GET_ACB;
+ mbox_cmd[1] = acb_type;
+ mbox_cmd[2] = LSDW(acb_dma);
+ mbox_cmd[3] = MSDW(acb_dma);
+ mbox_cmd[4] = len;
+
+ status = qla4xxx_mailbox_command(ha, 5, 5, &mbox_cmd[0], &mbox_sts[0]);
+ if (status != QLA_SUCCESS) {
+ DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: MBOX_CMD_GET_ACB "
+ "failed w/ status %04X\n", __func__,
+ mbox_sts[0]));
+ }
+ return status;
+}
+
+int qla4xxx_set_acb(struct scsi_qla_host *ha, uint32_t *mbox_cmd,
+ uint32_t *mbox_sts, dma_addr_t acb_dma)
+{
+ int status = QLA_SUCCESS;
+
+ memset(mbox_cmd, 0, sizeof(mbox_cmd[0]) * MBOX_REG_COUNT);
+ memset(mbox_sts, 0, sizeof(mbox_sts[0]) * MBOX_REG_COUNT);
+ mbox_cmd[0] = MBOX_CMD_SET_ACB;
+ mbox_cmd[1] = 0; /* Primary ACB */
+ mbox_cmd[2] = LSDW(acb_dma);
+ mbox_cmd[3] = MSDW(acb_dma);
+ mbox_cmd[4] = sizeof(struct addr_ctrl_blk);
+
+ status = qla4xxx_mailbox_command(ha, 5, 5, &mbox_cmd[0], &mbox_sts[0]);
+ if (status != QLA_SUCCESS) {
+ DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: MBOX_CMD_SET_ACB "
+ "failed w/ status %04X\n", __func__,
+ mbox_sts[0]));
+ }
+ return status;
+}
+
+int qla4xxx_set_param_ddbentry(struct scsi_qla_host *ha,
+ struct ddb_entry *ddb_entry,
+ struct iscsi_cls_conn *cls_conn,
+ uint32_t *mbx_sts)
+{
+ struct dev_db_entry *fw_ddb_entry;
+ struct iscsi_conn *conn;
+ struct iscsi_session *sess;
+ struct qla_conn *qla_conn;
+ struct sockaddr *dst_addr;
+ dma_addr_t fw_ddb_entry_dma;
+ int status = QLA_SUCCESS;
+ int rval = 0;
+ struct sockaddr_in *addr;
+ struct sockaddr_in6 *addr6;
+ char *ip;
+ uint16_t iscsi_opts = 0;
+ uint32_t options = 0;
+ uint16_t idx;
+
+ fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
&fw_ddb_entry_dma, GFP_KERNEL);
if (!fw_ddb_entry) {
- DEBUG2(printk("scsi%ld: %s: Unable to allocate dma buffer.\n",
- ha->host_no, __func__));
- ret_val = QLA_ERROR;
- goto exit_send_tgts_no_free;
+ DEBUG2(ql4_printk(KERN_ERR, ha,
+ "%s: Unable to allocate dma buffer.\n",
+ __func__));
+ rval = -ENOMEM;
+ goto exit_set_param_no_free;
}
- ret_val = qla4xxx_get_default_ddb(ha, fw_ddb_entry_dma);
- if (ret_val != QLA_SUCCESS)
- goto exit_send_tgts;
+ conn = cls_conn->dd_data;
+ qla_conn = conn->dd_data;
+ sess = conn->session;
+ dst_addr = &qla_conn->qla_ep->dst_addr;
- ret_val = qla4xxx_req_ddb_entry(ha, &ddb_index);
- if (ret_val != QLA_SUCCESS)
- goto exit_send_tgts;
+ if (dst_addr->sa_family == AF_INET6)
+ options |= IPV6_DEFAULT_DDB_ENTRY;
- memset(fw_ddb_entry->iscsi_alias, 0,
- sizeof(fw_ddb_entry->iscsi_alias));
+ status = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma);
+ if (status == QLA_ERROR) {
+ rval = -EINVAL;
+ goto exit_set_param;
+ }
- memset(fw_ddb_entry->iscsi_name, 0,
- sizeof(fw_ddb_entry->iscsi_name));
+ iscsi_opts = le16_to_cpu(fw_ddb_entry->iscsi_options);
+ memset(fw_ddb_entry->iscsi_alias, 0, sizeof(fw_ddb_entry->iscsi_alias));
+
+ memset(fw_ddb_entry->iscsi_name, 0, sizeof(fw_ddb_entry->iscsi_name));
+
+ if (sess->targetname != NULL) {
+ memcpy(fw_ddb_entry->iscsi_name, sess->targetname,
+ min(strlen(sess->targetname),
+ sizeof(fw_ddb_entry->iscsi_name)));
+ }
memset(fw_ddb_entry->ip_addr, 0, sizeof(fw_ddb_entry->ip_addr));
- memset(fw_ddb_entry->tgt_addr, 0,
- sizeof(fw_ddb_entry->tgt_addr));
+ memset(fw_ddb_entry->tgt_addr, 0, sizeof(fw_ddb_entry->tgt_addr));
+
+ fw_ddb_entry->options = DDB_OPT_TARGET | DDB_OPT_AUTO_SENDTGTS_DISABLE;
+
+ if (dst_addr->sa_family == AF_INET) {
+ addr = (struct sockaddr_in *)dst_addr;
+ ip = (char *)&addr->sin_addr;
+ memcpy(fw_ddb_entry->ip_addr, ip, IP_ADDR_LEN);
+ fw_ddb_entry->port = cpu_to_le16(ntohs(addr->sin_port));
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: Destination Address [%pI4]: index [%d]\n",
+ __func__, fw_ddb_entry->ip_addr,
+ ddb_entry->fw_ddb_index));
+ } else if (dst_addr->sa_family == AF_INET6) {
+ addr6 = (struct sockaddr_in6 *)dst_addr;
+ ip = (char *)&addr6->sin6_addr;
+ memcpy(fw_ddb_entry->ip_addr, ip, IPv6_ADDR_LEN);
+ fw_ddb_entry->port = cpu_to_le16(ntohs(addr6->sin6_port));
+ fw_ddb_entry->options |= DDB_OPT_IPV6_DEVICE;
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: Destination Address [%pI6]: index [%d]\n",
+ __func__, fw_ddb_entry->ip_addr,
+ ddb_entry->fw_ddb_index));
+ } else {
+ ql4_printk(KERN_ERR, ha,
+ "%s: Failed to get IP Address\n",
+ __func__);
+ rval = -EINVAL;
+ goto exit_set_param;
+ }
+
+ /* CHAP */
+ if (sess->username != NULL && sess->password != NULL) {
+ if (strlen(sess->username) && strlen(sess->password)) {
+ iscsi_opts |= BIT_7;
+
+ rval = qla4xxx_get_chap_index(ha, sess->username,
+ sess->password,
+ LOCAL_CHAP, &idx);
+ if (rval)
+ goto exit_set_param;
+
+ fw_ddb_entry->chap_tbl_idx = cpu_to_le16(idx);
+ }
+ }
+
+ if (sess->username_in != NULL && sess->password_in != NULL) {
+ /* Check if BIDI CHAP */
+ if (strlen(sess->username_in) && strlen(sess->password_in)) {
+ iscsi_opts |= BIT_4;
+
+ rval = qla4xxx_get_chap_index(ha, sess->username_in,
+ sess->password_in,
+ BIDI_CHAP, &idx);
+ if (rval)
+ goto exit_set_param;
+ }
+ }
+
+ if (sess->initial_r2t_en)
+ iscsi_opts |= BIT_10;
+
+ if (sess->imm_data_en)
+ iscsi_opts |= BIT_11;
+
+ fw_ddb_entry->iscsi_options = cpu_to_le16(iscsi_opts);
+
+ if (conn->max_recv_dlength)
+ fw_ddb_entry->iscsi_max_rcv_data_seg_len =
+ __constant_cpu_to_le16((conn->max_recv_dlength / BYTE_UNITS));
- fw_ddb_entry->options = (DDB_OPT_DISC_SESSION | DDB_OPT_TARGET);
- fw_ddb_entry->port = cpu_to_le16(ntohs(port));
+ if (sess->max_r2t)
+ fw_ddb_entry->iscsi_max_outsnd_r2t = cpu_to_le16(sess->max_r2t);
- fw_ddb_entry->ip_addr[0] = *ip;
- fw_ddb_entry->ip_addr[1] = *(ip + 1);
- fw_ddb_entry->ip_addr[2] = *(ip + 2);
- fw_ddb_entry->ip_addr[3] = *(ip + 3);
+ if (sess->first_burst)
+ fw_ddb_entry->iscsi_first_burst_len =
+ __constant_cpu_to_le16((sess->first_burst / BYTE_UNITS));
- ret_val = qla4xxx_set_ddb_entry(ha, ddb_index, fw_ddb_entry_dma);
+ if (sess->max_burst)
+ fw_ddb_entry->iscsi_max_burst_len =
+ __constant_cpu_to_le16((sess->max_burst / BYTE_UNITS));
-exit_send_tgts:
+ if (sess->time2wait)
+ fw_ddb_entry->iscsi_def_time2wait =
+ cpu_to_le16(sess->time2wait);
+
+ if (sess->time2retain)
+ fw_ddb_entry->iscsi_def_time2retain =
+ cpu_to_le16(sess->time2retain);
+
+ status = qla4xxx_set_ddb_entry(ha, ddb_entry->fw_ddb_index,
+ fw_ddb_entry_dma, mbx_sts);
+
+ if (status != QLA_SUCCESS)
+ rval = -EINVAL;
+exit_set_param:
dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
fw_ddb_entry, fw_ddb_entry_dma);
-exit_send_tgts_no_free:
- return ret_val;
+exit_set_param_no_free:
+ return rval;
+}
+
+int qla4xxx_get_mgmt_data(struct scsi_qla_host *ha, uint16_t fw_ddb_index,
+ uint16_t stats_size, dma_addr_t stats_dma)
+{
+ int status = QLA_SUCCESS;
+ uint32_t mbox_cmd[MBOX_REG_COUNT];
+ uint32_t mbox_sts[MBOX_REG_COUNT];
+
+ memset(mbox_cmd, 0, sizeof(mbox_cmd[0]) * MBOX_REG_COUNT);
+ memset(mbox_sts, 0, sizeof(mbox_sts[0]) * MBOX_REG_COUNT);
+ mbox_cmd[0] = MBOX_CMD_GET_MANAGEMENT_DATA;
+ mbox_cmd[1] = fw_ddb_index;
+ mbox_cmd[2] = LSDW(stats_dma);
+ mbox_cmd[3] = MSDW(stats_dma);
+ mbox_cmd[4] = stats_size;
+
+ status = qla4xxx_mailbox_command(ha, 5, 1, &mbox_cmd[0], &mbox_sts[0]);
+ if (status != QLA_SUCCESS) {
+ DEBUG2(ql4_printk(KERN_WARNING, ha,
+ "%s: MBOX_CMD_GET_MANAGEMENT_DATA "
+ "failed w/ status %04X\n", __func__,
+ mbox_sts[0]));
+ }
+ return status;
}
+int qla4xxx_get_ip_state(struct scsi_qla_host *ha, uint32_t acb_idx,
+ uint32_t ip_idx, uint32_t *sts)
+{
+ uint32_t mbox_cmd[MBOX_REG_COUNT];
+ uint32_t mbox_sts[MBOX_REG_COUNT];
+ int status = QLA_SUCCESS;
+
+ memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+ memset(&mbox_sts, 0, sizeof(mbox_sts));
+ mbox_cmd[0] = MBOX_CMD_GET_IP_ADDR_STATE;
+ mbox_cmd[1] = acb_idx;
+ mbox_cmd[2] = ip_idx;
+
+ status = qla4xxx_mailbox_command(ha, 3, 8, &mbox_cmd[0], &mbox_sts[0]);
+ if (status != QLA_SUCCESS) {
+ DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: "
+ "MBOX_CMD_GET_IP_ADDR_STATE failed w/ "
+ "status %04X\n", __func__, mbox_sts[0]));
+ }
+ memcpy(sts, mbox_sts, sizeof(mbox_sts));
+ return status;
+}
+
+int qla4xxx_get_nvram(struct scsi_qla_host *ha, dma_addr_t nvram_dma,
+ uint32_t offset, uint32_t size)
+{
+ int status = QLA_SUCCESS;
+ uint32_t mbox_cmd[MBOX_REG_COUNT];
+ uint32_t mbox_sts[MBOX_REG_COUNT];
+
+ memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+ memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+ mbox_cmd[0] = MBOX_CMD_GET_NVRAM;
+ mbox_cmd[1] = LSDW(nvram_dma);
+ mbox_cmd[2] = MSDW(nvram_dma);
+ mbox_cmd[3] = offset;
+ mbox_cmd[4] = size;
+
+ status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0],
+ &mbox_sts[0]);
+ if (status != QLA_SUCCESS) {
+ DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed "
+ "status %04X\n", ha->host_no, __func__,
+ mbox_sts[0]));
+ }
+ return status;
+}
+
+int qla4xxx_set_nvram(struct scsi_qla_host *ha, dma_addr_t nvram_dma,
+ uint32_t offset, uint32_t size)
+{
+ int status = QLA_SUCCESS;
+ uint32_t mbox_cmd[MBOX_REG_COUNT];
+ uint32_t mbox_sts[MBOX_REG_COUNT];
+
+ memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+ memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+ mbox_cmd[0] = MBOX_CMD_SET_NVRAM;
+ mbox_cmd[1] = LSDW(nvram_dma);
+ mbox_cmd[2] = MSDW(nvram_dma);
+ mbox_cmd[3] = offset;
+ mbox_cmd[4] = size;
+
+ status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0],
+ &mbox_sts[0]);
+ if (status != QLA_SUCCESS) {
+ DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed "
+ "status %04X\n", ha->host_no, __func__,
+ mbox_sts[0]));
+ }
+ return status;
+}
+
+int qla4xxx_restore_factory_defaults(struct scsi_qla_host *ha,
+ uint32_t region, uint32_t field0,
+ uint32_t field1)
+{
+ int status = QLA_SUCCESS;
+ uint32_t mbox_cmd[MBOX_REG_COUNT];
+ uint32_t mbox_sts[MBOX_REG_COUNT];
+
+ memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+ memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+ mbox_cmd[0] = MBOX_CMD_RESTORE_FACTORY_DEFAULTS;
+ mbox_cmd[3] = region;
+ mbox_cmd[4] = field0;
+ mbox_cmd[5] = field1;
+
+ status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 3, &mbox_cmd[0],
+ &mbox_sts[0]);
+ if (status != QLA_SUCCESS) {
+ DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed "
+ "status %04X\n", ha->host_no, __func__,
+ mbox_sts[0]));
+ }
+ return status;
+}
diff --git a/drivers/scsi/qla4xxx/ql4_nvram.c b/drivers/scsi/qla4xxx/ql4_nvram.c
index b4b859b2d47..7851f314ba9 100644
--- a/drivers/scsi/qla4xxx/ql4_nvram.c
+++ b/drivers/scsi/qla4xxx/ql4_nvram.c
@@ -156,6 +156,27 @@ u16 rd_nvram_word(struct scsi_qla_host * ha, int offset)
return val;
}
+u8 rd_nvram_byte(struct scsi_qla_host *ha, int offset)
+{
+ u16 val = 0;
+ u8 rval = 0;
+ int index = 0;
+
+ if (offset & 0x1)
+ index = (offset - 1) / 2;
+ else
+ index = offset / 2;
+
+ val = le16_to_cpu(rd_nvram_word(ha, index));
+
+ if (offset & 0x1)
+ rval = (u8)((val & 0xff00) >> 8);
+ else
+ rval = (u8)((val & 0x00ff));
+
+ return rval;
+}
+
int qla4xxx_is_nvram_configuration_valid(struct scsi_qla_host * ha)
{
int status = QLA_ERROR;
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index fdfe27b3869..f484ff43819 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -2015,11 +2015,19 @@ qla4_8xxx_get_flt_info(struct scsi_qla_host *ha, uint32_t flt_addr)
hw->flt_region_boot = start;
break;
case FLT_REG_FW_82:
+ case FLT_REG_FW_82_1:
hw->flt_region_fw = start;
break;
case FLT_REG_BOOTLOAD_82:
hw->flt_region_bootload = start;
break;
+ case FLT_REG_ISCSI_PARAM:
+ hw->flt_iscsi_param = start;
+ break;
+ case FLT_REG_ISCSI_CHAP:
+ hw->flt_region_chap = start;
+ hw->flt_chap_size = le32_to_cpu(region->size);
+ break;
}
}
goto done;
@@ -2032,6 +2040,9 @@ no_flash_data:
hw->flt_region_boot = FA_BOOT_CODE_ADDR_82;
hw->flt_region_bootload = FA_BOOT_LOAD_ADDR_82;
hw->flt_region_fw = FA_RISC_CODE_ADDR_82;
+ hw->flt_region_chap = FA_FLASH_ISCSI_CHAP;
+ hw->flt_chap_size = FA_FLASH_CHAP_SIZE;
+
done:
DEBUG2(ql4_printk(KERN_INFO, ha, "FLT[%s]: flt=0x%x fdt=0x%x "
"boot=0x%x bootload=0x%x fw=0x%x\n", loc, hw->flt_region_flt,
@@ -2258,10 +2269,16 @@ int qla4_8xxx_get_sys_info(struct scsi_qla_host *ha)
}
/* Save M.A.C. address & serial_number */
+ ha->port_num = sys_info->port_num;
memcpy(ha->my_mac, &sys_info->mac_addr[0],
min(sizeof(ha->my_mac), sizeof(sys_info->mac_addr)));
memcpy(ha->serial_number, &sys_info->serial_number,
min(sizeof(ha->serial_number), sizeof(sys_info->serial_number)));
+ memcpy(ha->model_name, &sys_info->board_id_str,
+ min(sizeof(ha->model_name), sizeof(sys_info->board_id_str)));
+ ha->phy_port_cnt = sys_info->phys_port_cnt;
+ ha->phy_port_num = sys_info->port_num;
+ ha->iscsi_pci_func_cnt = sys_info->iscsi_pci_func_cnt;
DEBUG2(printk("scsi%ld: %s: "
"mac %02x:%02x:%02x:%02x:%02x:%02x "
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index f2364ec59f0..30f31b127f3 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -6,6 +6,8 @@
*/
#include <linux/moduleparam.h>
#include <linux/slab.h>
+#include <linux/blkdev.h>
+#include <linux/iscsi_boot_sysfs.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsicam.h>
@@ -63,6 +65,7 @@ MODULE_PARM_DESC(ql4xsess_recovery_tmo,
"Target Session Recovery Timeout.\n"
" Default: 30 sec.");
+static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha);
/*
* SCSI host template entry points
*/
@@ -71,18 +74,41 @@ static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha);
/*
* iSCSI template entry points
*/
-static int qla4xxx_tgt_dscvr(struct Scsi_Host *shost,
- enum iscsi_tgt_dscvr type, uint32_t enable,
- struct sockaddr *dst_addr);
static int qla4xxx_conn_get_param(struct iscsi_cls_conn *conn,
enum iscsi_param param, char *buf);
-static int qla4xxx_sess_get_param(struct iscsi_cls_session *sess,
- enum iscsi_param param, char *buf);
static int qla4xxx_host_get_param(struct Scsi_Host *shost,
enum iscsi_host_param param, char *buf);
-static void qla4xxx_recovery_timedout(struct iscsi_cls_session *session);
+static int qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data,
+ uint32_t len);
+static int qla4xxx_get_iface_param(struct iscsi_iface *iface,
+ enum iscsi_param_type param_type,
+ int param, char *buf);
static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc);
-
+static struct iscsi_endpoint *qla4xxx_ep_connect(struct Scsi_Host *shost,
+ struct sockaddr *dst_addr,
+ int non_blocking);
+static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms);
+static void qla4xxx_ep_disconnect(struct iscsi_endpoint *ep);
+static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep,
+ enum iscsi_param param, char *buf);
+static int qla4xxx_conn_start(struct iscsi_cls_conn *conn);
+static struct iscsi_cls_conn *
+qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx);
+static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session,
+ struct iscsi_cls_conn *cls_conn,
+ uint64_t transport_fd, int is_leading);
+static void qla4xxx_conn_destroy(struct iscsi_cls_conn *conn);
+static struct iscsi_cls_session *
+qla4xxx_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
+ uint16_t qdepth, uint32_t initial_cmdsn);
+static void qla4xxx_session_destroy(struct iscsi_cls_session *sess);
+static void qla4xxx_task_work(struct work_struct *wdata);
+static int qla4xxx_alloc_pdu(struct iscsi_task *, uint8_t);
+static int qla4xxx_task_xmit(struct iscsi_task *);
+static void qla4xxx_task_cleanup(struct iscsi_task *);
+static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session);
+static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn,
+ struct iscsi_stats *stats);
/*
* SCSI host template entry points
*/
@@ -94,7 +120,8 @@ static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd);
static int qla4xxx_slave_alloc(struct scsi_device *device);
static int qla4xxx_slave_configure(struct scsi_device *device);
static void qla4xxx_slave_destroy(struct scsi_device *sdev);
-static void qla4xxx_scan_start(struct Scsi_Host *shost);
+static mode_t ql4_attr_is_visible(int param_type, int param);
+static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type);
static struct qla4_8xxx_legacy_intr_set legacy_intr[] =
QLA82XX_LEGACY_INTR_CONFIG;
@@ -115,9 +142,6 @@ static struct scsi_host_template qla4xxx_driver_template = {
.slave_alloc = qla4xxx_slave_alloc,
.slave_destroy = qla4xxx_slave_destroy,
- .scan_finished = iscsi_scan_finished,
- .scan_start = qla4xxx_scan_start,
-
.this_id = -1,
.cmd_per_lun = 3,
.use_clustering = ENABLE_CLUSTERING,
@@ -125,58 +149,396 @@ static struct scsi_host_template qla4xxx_driver_template = {
.max_sectors = 0xFFFF,
.shost_attrs = qla4xxx_host_attrs,
+ .host_reset = qla4xxx_host_reset,
+ .vendor_id = SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC,
};
static struct iscsi_transport qla4xxx_iscsi_transport = {
.owner = THIS_MODULE,
.name = DRIVER_NAME,
- .caps = CAP_FW_DB | CAP_SENDTARGETS_OFFLOAD |
- CAP_DATA_PATH_OFFLOAD,
- .param_mask = ISCSI_CONN_PORT | ISCSI_CONN_ADDRESS |
- ISCSI_TARGET_NAME | ISCSI_TPGT |
- ISCSI_TARGET_ALIAS,
- .host_param_mask = ISCSI_HOST_HWADDRESS |
- ISCSI_HOST_IPADDRESS |
- ISCSI_HOST_INITIATOR_NAME,
- .tgt_dscvr = qla4xxx_tgt_dscvr,
+ .caps = CAP_TEXT_NEGO |
+ CAP_DATA_PATH_OFFLOAD | CAP_HDRDGST |
+ CAP_DATADGST | CAP_LOGIN_OFFLOAD |
+ CAP_MULTI_R2T,
+ .attr_is_visible = ql4_attr_is_visible,
+ .create_session = qla4xxx_session_create,
+ .destroy_session = qla4xxx_session_destroy,
+ .start_conn = qla4xxx_conn_start,
+ .create_conn = qla4xxx_conn_create,
+ .bind_conn = qla4xxx_conn_bind,
+ .stop_conn = iscsi_conn_stop,
+ .destroy_conn = qla4xxx_conn_destroy,
+ .set_param = iscsi_set_param,
.get_conn_param = qla4xxx_conn_get_param,
- .get_session_param = qla4xxx_sess_get_param,
+ .get_session_param = iscsi_session_get_param,
+ .get_ep_param = qla4xxx_get_ep_param,
+ .ep_connect = qla4xxx_ep_connect,
+ .ep_poll = qla4xxx_ep_poll,
+ .ep_disconnect = qla4xxx_ep_disconnect,
+ .get_stats = qla4xxx_conn_get_stats,
+ .send_pdu = iscsi_conn_send_pdu,
+ .xmit_task = qla4xxx_task_xmit,
+ .cleanup_task = qla4xxx_task_cleanup,
+ .alloc_pdu = qla4xxx_alloc_pdu,
+
.get_host_param = qla4xxx_host_get_param,
- .session_recovery_timedout = qla4xxx_recovery_timedout,
+ .set_iface_param = qla4xxx_iface_set_param,
+ .get_iface_param = qla4xxx_get_iface_param,
+ .bsg_request = qla4xxx_bsg_request,
};
static struct scsi_transport_template *qla4xxx_scsi_transport;
-static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc)
+static mode_t ql4_attr_is_visible(int param_type, int param)
{
- struct iscsi_cls_session *session;
- struct ddb_entry *ddb_entry;
+ switch (param_type) {
+ case ISCSI_HOST_PARAM:
+ switch (param) {
+ case ISCSI_HOST_PARAM_HWADDRESS:
+ case ISCSI_HOST_PARAM_IPADDRESS:
+ case ISCSI_HOST_PARAM_INITIATOR_NAME:
+ return S_IRUGO;
+ default:
+ return 0;
+ }
+ case ISCSI_PARAM:
+ switch (param) {
+ case ISCSI_PARAM_PERSISTENT_ADDRESS:
+ case ISCSI_PARAM_PERSISTENT_PORT:
+ case ISCSI_PARAM_CONN_ADDRESS:
+ case ISCSI_PARAM_CONN_PORT:
+ case ISCSI_PARAM_TARGET_NAME:
+ case ISCSI_PARAM_TPGT:
+ case ISCSI_PARAM_TARGET_ALIAS:
+ case ISCSI_PARAM_MAX_BURST:
+ case ISCSI_PARAM_MAX_R2T:
+ case ISCSI_PARAM_FIRST_BURST:
+ case ISCSI_PARAM_MAX_RECV_DLENGTH:
+ case ISCSI_PARAM_MAX_XMIT_DLENGTH:
+ case ISCSI_PARAM_IFACE_NAME:
+ return S_IRUGO;
+ default:
+ return 0;
+ }
+ case ISCSI_NET_PARAM:
+ switch (param) {
+ case ISCSI_NET_PARAM_IPV4_ADDR:
+ case ISCSI_NET_PARAM_IPV4_SUBNET:
+ case ISCSI_NET_PARAM_IPV4_GW:
+ case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
+ case ISCSI_NET_PARAM_IFACE_ENABLE:
+ case ISCSI_NET_PARAM_IPV6_LINKLOCAL:
+ case ISCSI_NET_PARAM_IPV6_ADDR:
+ case ISCSI_NET_PARAM_IPV6_ROUTER:
+ case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG:
+ case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG:
+ case ISCSI_NET_PARAM_VLAN_ID:
+ case ISCSI_NET_PARAM_VLAN_PRIORITY:
+ case ISCSI_NET_PARAM_VLAN_ENABLED:
+ case ISCSI_NET_PARAM_MTU:
+ case ISCSI_NET_PARAM_PORT:
+ return S_IRUGO;
+ default:
+ return 0;
+ }
+ }
- session = starget_to_session(scsi_target(sc->device));
- ddb_entry = session->dd_data;
+ return 0;
+}
- /* if we are not logged in then the LLD is going to clean up the cmd */
- if (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE)
- return BLK_EH_RESET_TIMER;
- else
- return BLK_EH_NOT_HANDLED;
+static int qla4xxx_get_iface_param(struct iscsi_iface *iface,
+ enum iscsi_param_type param_type,
+ int param, char *buf)
+{
+ struct Scsi_Host *shost = iscsi_iface_to_shost(iface);
+ struct scsi_qla_host *ha = to_qla_host(shost);
+ int len = -ENOSYS;
+
+ if (param_type != ISCSI_NET_PARAM)
+ return -ENOSYS;
+
+ switch (param) {
+ case ISCSI_NET_PARAM_IPV4_ADDR:
+ len = sprintf(buf, "%pI4\n", &ha->ip_config.ip_address);
+ break;
+ case ISCSI_NET_PARAM_IPV4_SUBNET:
+ len = sprintf(buf, "%pI4\n", &ha->ip_config.subnet_mask);
+ break;
+ case ISCSI_NET_PARAM_IPV4_GW:
+ len = sprintf(buf, "%pI4\n", &ha->ip_config.gateway);
+ break;
+ case ISCSI_NET_PARAM_IFACE_ENABLE:
+ if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
+ len = sprintf(buf, "%s\n",
+ (ha->ip_config.ipv4_options &
+ IPOPT_IPV4_PROTOCOL_ENABLE) ?
+ "enabled" : "disabled");
+ else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
+ len = sprintf(buf, "%s\n",
+ (ha->ip_config.ipv6_options &
+ IPV6_OPT_IPV6_PROTOCOL_ENABLE) ?
+ "enabled" : "disabled");
+ break;
+ case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
+ len = sprintf(buf, "%s\n",
+ (ha->ip_config.tcp_options & TCPOPT_DHCP_ENABLE) ?
+ "dhcp" : "static");
+ break;
+ case ISCSI_NET_PARAM_IPV6_ADDR:
+ if (iface->iface_num == 0)
+ len = sprintf(buf, "%pI6\n", &ha->ip_config.ipv6_addr0);
+ if (iface->iface_num == 1)
+ len = sprintf(buf, "%pI6\n", &ha->ip_config.ipv6_addr1);
+ break;
+ case ISCSI_NET_PARAM_IPV6_LINKLOCAL:
+ len = sprintf(buf, "%pI6\n",
+ &ha->ip_config.ipv6_link_local_addr);
+ break;
+ case ISCSI_NET_PARAM_IPV6_ROUTER:
+ len = sprintf(buf, "%pI6\n",
+ &ha->ip_config.ipv6_default_router_addr);
+ break;
+ case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG:
+ len = sprintf(buf, "%s\n",
+ (ha->ip_config.ipv6_addl_options &
+ IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE) ?
+ "nd" : "static");
+ break;
+ case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG:
+ len = sprintf(buf, "%s\n",
+ (ha->ip_config.ipv6_addl_options &
+ IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR) ?
+ "auto" : "static");
+ break;
+ case ISCSI_NET_PARAM_VLAN_ID:
+ if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
+ len = sprintf(buf, "%d\n",
+ (ha->ip_config.ipv4_vlan_tag &
+ ISCSI_MAX_VLAN_ID));
+ else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
+ len = sprintf(buf, "%d\n",
+ (ha->ip_config.ipv6_vlan_tag &
+ ISCSI_MAX_VLAN_ID));
+ break;
+ case ISCSI_NET_PARAM_VLAN_PRIORITY:
+ if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
+ len = sprintf(buf, "%d\n",
+ ((ha->ip_config.ipv4_vlan_tag >> 13) &
+ ISCSI_MAX_VLAN_PRIORITY));
+ else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
+ len = sprintf(buf, "%d\n",
+ ((ha->ip_config.ipv6_vlan_tag >> 13) &
+ ISCSI_MAX_VLAN_PRIORITY));
+ break;
+ case ISCSI_NET_PARAM_VLAN_ENABLED:
+ if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
+ len = sprintf(buf, "%s\n",
+ (ha->ip_config.ipv4_options &
+ IPOPT_VLAN_TAGGING_ENABLE) ?
+ "enabled" : "disabled");
+ else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
+ len = sprintf(buf, "%s\n",
+ (ha->ip_config.ipv6_options &
+ IPV6_OPT_VLAN_TAGGING_ENABLE) ?
+ "enabled" : "disabled");
+ break;
+ case ISCSI_NET_PARAM_MTU:
+ len = sprintf(buf, "%d\n", ha->ip_config.eth_mtu_size);
+ break;
+ case ISCSI_NET_PARAM_PORT:
+ if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
+ len = sprintf(buf, "%d\n", ha->ip_config.ipv4_port);
+ else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
+ len = sprintf(buf, "%d\n", ha->ip_config.ipv6_port);
+ break;
+ default:
+ len = -ENOSYS;
+ }
+
+ return len;
}
-static void qla4xxx_recovery_timedout(struct iscsi_cls_session *session)
+static struct iscsi_endpoint *
+qla4xxx_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
+ int non_blocking)
{
- struct ddb_entry *ddb_entry = session->dd_data;
- struct scsi_qla_host *ha = ddb_entry->ha;
+ int ret;
+ struct iscsi_endpoint *ep;
+ struct qla_endpoint *qla_ep;
+ struct scsi_qla_host *ha;
+ struct sockaddr_in *addr;
+ struct sockaddr_in6 *addr6;
+
+ DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
+ if (!shost) {
+ ret = -ENXIO;
+ printk(KERN_ERR "%s: shost is NULL\n",
+ __func__);
+ return ERR_PTR(ret);
+ }
+
+ ha = iscsi_host_priv(shost);
+
+ ep = iscsi_create_endpoint(sizeof(struct qla_endpoint));
+ if (!ep) {
+ ret = -ENOMEM;
+ return ERR_PTR(ret);
+ }
+
+ qla_ep = ep->dd_data;
+ memset(qla_ep, 0, sizeof(struct qla_endpoint));
+ if (dst_addr->sa_family == AF_INET) {
+ memcpy(&qla_ep->dst_addr, dst_addr, sizeof(struct sockaddr_in));
+ addr = (struct sockaddr_in *)&qla_ep->dst_addr;
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %pI4\n", __func__,
+ (char *)&addr->sin_addr));
+ } else if (dst_addr->sa_family == AF_INET6) {
+ memcpy(&qla_ep->dst_addr, dst_addr,
+ sizeof(struct sockaddr_in6));
+ addr6 = (struct sockaddr_in6 *)&qla_ep->dst_addr;
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %pI6\n", __func__,
+ (char *)&addr6->sin6_addr));
+ }
+
+ qla_ep->host = shost;
+
+ return ep;
+}
+
+static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
+{
+ struct qla_endpoint *qla_ep;
+ struct scsi_qla_host *ha;
+ int ret = 0;
+
+ DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
+ qla_ep = ep->dd_data;
+ ha = to_qla_host(qla_ep->host);
+
+ if (adapter_up(ha))
+ ret = 1;
+
+ return ret;
+}
+
+static void qla4xxx_ep_disconnect(struct iscsi_endpoint *ep)
+{
+ DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
+ iscsi_destroy_endpoint(ep);
+}
+
+static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep,
+ enum iscsi_param param,
+ char *buf)
+{
+ struct qla_endpoint *qla_ep = ep->dd_data;
+ struct sockaddr *dst_addr;
- if (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE) {
- atomic_set(&ddb_entry->state, DDB_STATE_DEAD);
+ DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
- DEBUG2(printk("scsi%ld: %s: ddb [%d] session recovery timeout "
- "of (%d) secs exhausted, marking device DEAD.\n",
- ha->host_no, __func__, ddb_entry->fw_ddb_index,
- ddb_entry->sess->recovery_tmo));
+ switch (param) {
+ case ISCSI_PARAM_CONN_PORT:
+ case ISCSI_PARAM_CONN_ADDRESS:
+ if (!qla_ep)
+ return -ENOTCONN;
+
+ dst_addr = (struct sockaddr *)&qla_ep->dst_addr;
+ if (!dst_addr)
+ return -ENOTCONN;
+
+ return iscsi_conn_get_addr_param((struct sockaddr_storage *)
+ &qla_ep->dst_addr, param, buf);
+ default:
+ return -ENOSYS;
}
}
+static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn,
+ struct iscsi_stats *stats)
+{
+ struct iscsi_session *sess;
+ struct iscsi_cls_session *cls_sess;
+ struct ddb_entry *ddb_entry;
+ struct scsi_qla_host *ha;
+ struct ql_iscsi_stats *ql_iscsi_stats;
+ int stats_size;
+ int ret;
+ dma_addr_t iscsi_stats_dma;
+
+ DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
+
+ cls_sess = iscsi_conn_to_session(cls_conn);
+ sess = cls_sess->dd_data;
+ ddb_entry = sess->dd_data;
+ ha = ddb_entry->ha;
+
+ stats_size = PAGE_ALIGN(sizeof(struct ql_iscsi_stats));
+ /* Allocate memory */
+ ql_iscsi_stats = dma_alloc_coherent(&ha->pdev->dev, stats_size,
+ &iscsi_stats_dma, GFP_KERNEL);
+ if (!ql_iscsi_stats) {
+ ql4_printk(KERN_ERR, ha,
+ "Unable to allocate memory for iscsi stats\n");
+ goto exit_get_stats;
+ }
+
+ ret = qla4xxx_get_mgmt_data(ha, ddb_entry->fw_ddb_index, stats_size,
+ iscsi_stats_dma);
+ if (ret != QLA_SUCCESS) {
+ ql4_printk(KERN_ERR, ha,
+ "Unable to retreive iscsi stats\n");
+ goto free_stats;
+ }
+
+ /* octets */
+ stats->txdata_octets = le64_to_cpu(ql_iscsi_stats->tx_data_octets);
+ stats->rxdata_octets = le64_to_cpu(ql_iscsi_stats->rx_data_octets);
+ /* xmit pdus */
+ stats->noptx_pdus = le32_to_cpu(ql_iscsi_stats->tx_nopout_pdus);
+ stats->scsicmd_pdus = le32_to_cpu(ql_iscsi_stats->tx_scsi_cmd_pdus);
+ stats->tmfcmd_pdus = le32_to_cpu(ql_iscsi_stats->tx_tmf_cmd_pdus);
+ stats->login_pdus = le32_to_cpu(ql_iscsi_stats->tx_login_cmd_pdus);
+ stats->text_pdus = le32_to_cpu(ql_iscsi_stats->tx_text_cmd_pdus);
+ stats->dataout_pdus = le32_to_cpu(ql_iscsi_stats->tx_scsi_write_pdus);
+ stats->logout_pdus = le32_to_cpu(ql_iscsi_stats->tx_logout_cmd_pdus);
+ stats->snack_pdus = le32_to_cpu(ql_iscsi_stats->tx_snack_req_pdus);
+ /* recv pdus */
+ stats->noprx_pdus = le32_to_cpu(ql_iscsi_stats->rx_nopin_pdus);
+ stats->scsirsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_scsi_resp_pdus);
+ stats->tmfrsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_tmf_resp_pdus);
+ stats->textrsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_text_resp_pdus);
+ stats->datain_pdus = le32_to_cpu(ql_iscsi_stats->rx_scsi_read_pdus);
+ stats->logoutrsp_pdus =
+ le32_to_cpu(ql_iscsi_stats->rx_logout_resp_pdus);
+ stats->r2t_pdus = le32_to_cpu(ql_iscsi_stats->rx_r2t_pdus);
+ stats->async_pdus = le32_to_cpu(ql_iscsi_stats->rx_async_pdus);
+ stats->rjt_pdus = le32_to_cpu(ql_iscsi_stats->rx_reject_pdus);
+
+free_stats:
+ dma_free_coherent(&ha->pdev->dev, stats_size, ql_iscsi_stats,
+ iscsi_stats_dma);
+exit_get_stats:
+ return;
+}
+
+static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc)
+{
+ struct iscsi_cls_session *session;
+ struct iscsi_session *sess;
+ unsigned long flags;
+ enum blk_eh_timer_return ret = BLK_EH_NOT_HANDLED;
+
+ session = starget_to_session(scsi_target(sc->device));
+ sess = session->dd_data;
+
+ spin_lock_irqsave(&session->lock, flags);
+ if (session->state == ISCSI_SESSION_FAILED)
+ ret = BLK_EH_RESET_TIMER;
+ spin_unlock_irqrestore(&session->lock, flags);
+
+ return ret;
+}
+
static int qla4xxx_host_get_param(struct Scsi_Host *shost,
enum iscsi_host_param param, char *buf)
{
@@ -188,9 +550,7 @@ static int qla4xxx_host_get_param(struct Scsi_Host *shost,
len = sysfs_format_mac(buf, ha->my_mac, MAC_ADDR_LEN);
break;
case ISCSI_HOST_PARAM_IPADDRESS:
- len = sprintf(buf, "%d.%d.%d.%d\n", ha->ip_address[0],
- ha->ip_address[1], ha->ip_address[2],
- ha->ip_address[3]);
+ len = sprintf(buf, "%pI4\n", &ha->ip_config.ip_address);
break;
case ISCSI_HOST_PARAM_INITIATOR_NAME:
len = sprintf(buf, "%s\n", ha->name_string);
@@ -202,154 +562,851 @@ static int qla4xxx_host_get_param(struct Scsi_Host *shost,
return len;
}
-static int qla4xxx_sess_get_param(struct iscsi_cls_session *sess,
- enum iscsi_param param, char *buf)
+static void qla4xxx_create_ipv4_iface(struct scsi_qla_host *ha)
{
- struct ddb_entry *ddb_entry = sess->dd_data;
- int len;
+ if (ha->iface_ipv4)
+ return;
- switch (param) {
- case ISCSI_PARAM_TARGET_NAME:
- len = snprintf(buf, PAGE_SIZE - 1, "%s\n",
- ddb_entry->iscsi_name);
+ /* IPv4 */
+ ha->iface_ipv4 = iscsi_create_iface(ha->host,
+ &qla4xxx_iscsi_transport,
+ ISCSI_IFACE_TYPE_IPV4, 0, 0);
+ if (!ha->iface_ipv4)
+ ql4_printk(KERN_ERR, ha, "Could not create IPv4 iSCSI "
+ "iface0.\n");
+}
+
+static void qla4xxx_create_ipv6_iface(struct scsi_qla_host *ha)
+{
+ if (!ha->iface_ipv6_0)
+ /* IPv6 iface-0 */
+ ha->iface_ipv6_0 = iscsi_create_iface(ha->host,
+ &qla4xxx_iscsi_transport,
+ ISCSI_IFACE_TYPE_IPV6, 0,
+ 0);
+ if (!ha->iface_ipv6_0)
+ ql4_printk(KERN_ERR, ha, "Could not create IPv6 iSCSI "
+ "iface0.\n");
+
+ if (!ha->iface_ipv6_1)
+ /* IPv6 iface-1 */
+ ha->iface_ipv6_1 = iscsi_create_iface(ha->host,
+ &qla4xxx_iscsi_transport,
+ ISCSI_IFACE_TYPE_IPV6, 1,
+ 0);
+ if (!ha->iface_ipv6_1)
+ ql4_printk(KERN_ERR, ha, "Could not create IPv6 iSCSI "
+ "iface1.\n");
+}
+
+static void qla4xxx_create_ifaces(struct scsi_qla_host *ha)
+{
+ if (ha->ip_config.ipv4_options & IPOPT_IPV4_PROTOCOL_ENABLE)
+ qla4xxx_create_ipv4_iface(ha);
+
+ if (ha->ip_config.ipv6_options & IPV6_OPT_IPV6_PROTOCOL_ENABLE)
+ qla4xxx_create_ipv6_iface(ha);
+}
+
+static void qla4xxx_destroy_ipv4_iface(struct scsi_qla_host *ha)
+{
+ if (ha->iface_ipv4) {
+ iscsi_destroy_iface(ha->iface_ipv4);
+ ha->iface_ipv4 = NULL;
+ }
+}
+
+static void qla4xxx_destroy_ipv6_iface(struct scsi_qla_host *ha)
+{
+ if (ha->iface_ipv6_0) {
+ iscsi_destroy_iface(ha->iface_ipv6_0);
+ ha->iface_ipv6_0 = NULL;
+ }
+ if (ha->iface_ipv6_1) {
+ iscsi_destroy_iface(ha->iface_ipv6_1);
+ ha->iface_ipv6_1 = NULL;
+ }
+}
+
+static void qla4xxx_destroy_ifaces(struct scsi_qla_host *ha)
+{
+ qla4xxx_destroy_ipv4_iface(ha);
+ qla4xxx_destroy_ipv6_iface(ha);
+}
+
+static void qla4xxx_set_ipv6(struct scsi_qla_host *ha,
+ struct iscsi_iface_param_info *iface_param,
+ struct addr_ctrl_blk *init_fw_cb)
+{
+ /*
+ * iface_num 0 is valid for IPv6 Addr, linklocal, router, autocfg.
+ * iface_num 1 is valid only for IPv6 Addr.
+ */
+ switch (iface_param->param) {
+ case ISCSI_NET_PARAM_IPV6_ADDR:
+ if (iface_param->iface_num & 0x1)
+ /* IPv6 Addr 1 */
+ memcpy(init_fw_cb->ipv6_addr1, iface_param->value,
+ sizeof(init_fw_cb->ipv6_addr1));
+ else
+ /* IPv6 Addr 0 */
+ memcpy(init_fw_cb->ipv6_addr0, iface_param->value,
+ sizeof(init_fw_cb->ipv6_addr0));
+ break;
+ case ISCSI_NET_PARAM_IPV6_LINKLOCAL:
+ if (iface_param->iface_num & 0x1)
+ break;
+ memcpy(init_fw_cb->ipv6_if_id, &iface_param->value[8],
+ sizeof(init_fw_cb->ipv6_if_id));
+ break;
+ case ISCSI_NET_PARAM_IPV6_ROUTER:
+ if (iface_param->iface_num & 0x1)
+ break;
+ memcpy(init_fw_cb->ipv6_dflt_rtr_addr, iface_param->value,
+ sizeof(init_fw_cb->ipv6_dflt_rtr_addr));
+ break;
+ case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG:
+ /* Autocfg applies to even interface */
+ if (iface_param->iface_num & 0x1)
+ break;
+
+ if (iface_param->value[0] == ISCSI_IPV6_AUTOCFG_DISABLE)
+ init_fw_cb->ipv6_addtl_opts &=
+ cpu_to_le16(
+ ~IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE);
+ else if (iface_param->value[0] == ISCSI_IPV6_AUTOCFG_ND_ENABLE)
+ init_fw_cb->ipv6_addtl_opts |=
+ cpu_to_le16(
+ IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE);
+ else
+ ql4_printk(KERN_ERR, ha, "Invalid autocfg setting for "
+ "IPv6 addr\n");
break;
- case ISCSI_PARAM_TPGT:
- len = sprintf(buf, "%u\n", ddb_entry->tpgt);
+ case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG:
+ /* Autocfg applies to even interface */
+ if (iface_param->iface_num & 0x1)
+ break;
+
+ if (iface_param->value[0] ==
+ ISCSI_IPV6_LINKLOCAL_AUTOCFG_ENABLE)
+ init_fw_cb->ipv6_addtl_opts |= cpu_to_le16(
+ IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR);
+ else if (iface_param->value[0] ==
+ ISCSI_IPV6_LINKLOCAL_AUTOCFG_DISABLE)
+ init_fw_cb->ipv6_addtl_opts &= cpu_to_le16(
+ ~IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR);
+ else
+ ql4_printk(KERN_ERR, ha, "Invalid autocfg setting for "
+ "IPv6 linklocal addr\n");
break;
- case ISCSI_PARAM_TARGET_ALIAS:
- len = snprintf(buf, PAGE_SIZE - 1, "%s\n",
- ddb_entry->iscsi_alias);
+ case ISCSI_NET_PARAM_IPV6_ROUTER_AUTOCFG:
+ /* Autocfg applies to even interface */
+ if (iface_param->iface_num & 0x1)
+ break;
+
+ if (iface_param->value[0] == ISCSI_IPV6_ROUTER_AUTOCFG_ENABLE)
+ memset(init_fw_cb->ipv6_dflt_rtr_addr, 0,
+ sizeof(init_fw_cb->ipv6_dflt_rtr_addr));
+ break;
+ case ISCSI_NET_PARAM_IFACE_ENABLE:
+ if (iface_param->value[0] == ISCSI_IFACE_ENABLE) {
+ init_fw_cb->ipv6_opts |=
+ cpu_to_le16(IPV6_OPT_IPV6_PROTOCOL_ENABLE);
+ qla4xxx_create_ipv6_iface(ha);
+ } else {
+ init_fw_cb->ipv6_opts &=
+ cpu_to_le16(~IPV6_OPT_IPV6_PROTOCOL_ENABLE &
+ 0xFFFF);
+ qla4xxx_destroy_ipv6_iface(ha);
+ }
+ break;
+ case ISCSI_NET_PARAM_VLAN_TAG:
+ if (iface_param->len != sizeof(init_fw_cb->ipv6_vlan_tag))
+ break;
+ init_fw_cb->ipv6_vlan_tag =
+ cpu_to_be16(*(uint16_t *)iface_param->value);
+ break;
+ case ISCSI_NET_PARAM_VLAN_ENABLED:
+ if (iface_param->value[0] == ISCSI_VLAN_ENABLE)
+ init_fw_cb->ipv6_opts |=
+ cpu_to_le16(IPV6_OPT_VLAN_TAGGING_ENABLE);
+ else
+ init_fw_cb->ipv6_opts &=
+ cpu_to_le16(~IPV6_OPT_VLAN_TAGGING_ENABLE);
+ break;
+ case ISCSI_NET_PARAM_MTU:
+ init_fw_cb->eth_mtu_size =
+ cpu_to_le16(*(uint16_t *)iface_param->value);
+ break;
+ case ISCSI_NET_PARAM_PORT:
+ /* Autocfg applies to even interface */
+ if (iface_param->iface_num & 0x1)
+ break;
+
+ init_fw_cb->ipv6_port =
+ cpu_to_le16(*(uint16_t *)iface_param->value);
break;
default:
- return -ENOSYS;
+ ql4_printk(KERN_ERR, ha, "Unknown IPv6 param = %d\n",
+ iface_param->param);
+ break;
}
+}
- return len;
+static void qla4xxx_set_ipv4(struct scsi_qla_host *ha,
+ struct iscsi_iface_param_info *iface_param,
+ struct addr_ctrl_blk *init_fw_cb)
+{
+ switch (iface_param->param) {
+ case ISCSI_NET_PARAM_IPV4_ADDR:
+ memcpy(init_fw_cb->ipv4_addr, iface_param->value,
+ sizeof(init_fw_cb->ipv4_addr));
+ break;
+ case ISCSI_NET_PARAM_IPV4_SUBNET:
+ memcpy(init_fw_cb->ipv4_subnet, iface_param->value,
+ sizeof(init_fw_cb->ipv4_subnet));
+ break;
+ case ISCSI_NET_PARAM_IPV4_GW:
+ memcpy(init_fw_cb->ipv4_gw_addr, iface_param->value,
+ sizeof(init_fw_cb->ipv4_gw_addr));
+ break;
+ case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
+ if (iface_param->value[0] == ISCSI_BOOTPROTO_DHCP)
+ init_fw_cb->ipv4_tcp_opts |=
+ cpu_to_le16(TCPOPT_DHCP_ENABLE);
+ else if (iface_param->value[0] == ISCSI_BOOTPROTO_STATIC)
+ init_fw_cb->ipv4_tcp_opts &=
+ cpu_to_le16(~TCPOPT_DHCP_ENABLE);
+ else
+ ql4_printk(KERN_ERR, ha, "Invalid IPv4 bootproto\n");
+ break;
+ case ISCSI_NET_PARAM_IFACE_ENABLE:
+ if (iface_param->value[0] == ISCSI_IFACE_ENABLE) {
+ init_fw_cb->ipv4_ip_opts |=
+ cpu_to_le16(IPOPT_IPV4_PROTOCOL_ENABLE);
+ qla4xxx_create_ipv4_iface(ha);
+ } else {
+ init_fw_cb->ipv4_ip_opts &=
+ cpu_to_le16(~IPOPT_IPV4_PROTOCOL_ENABLE &
+ 0xFFFF);
+ qla4xxx_destroy_ipv4_iface(ha);
+ }
+ break;
+ case ISCSI_NET_PARAM_VLAN_TAG:
+ if (iface_param->len != sizeof(init_fw_cb->ipv4_vlan_tag))
+ break;
+ init_fw_cb->ipv4_vlan_tag =
+ cpu_to_be16(*(uint16_t *)iface_param->value);
+ break;
+ case ISCSI_NET_PARAM_VLAN_ENABLED:
+ if (iface_param->value[0] == ISCSI_VLAN_ENABLE)
+ init_fw_cb->ipv4_ip_opts |=
+ cpu_to_le16(IPOPT_VLAN_TAGGING_ENABLE);
+ else
+ init_fw_cb->ipv4_ip_opts &=
+ cpu_to_le16(~IPOPT_VLAN_TAGGING_ENABLE);
+ break;
+ case ISCSI_NET_PARAM_MTU:
+ init_fw_cb->eth_mtu_size =
+ cpu_to_le16(*(uint16_t *)iface_param->value);
+ break;
+ case ISCSI_NET_PARAM_PORT:
+ init_fw_cb->ipv4_port =
+ cpu_to_le16(*(uint16_t *)iface_param->value);
+ break;
+ default:
+ ql4_printk(KERN_ERR, ha, "Unknown IPv4 param = %d\n",
+ iface_param->param);
+ break;
+ }
}
-static int qla4xxx_conn_get_param(struct iscsi_cls_conn *conn,
+static void
+qla4xxx_initcb_to_acb(struct addr_ctrl_blk *init_fw_cb)
+{
+ struct addr_ctrl_blk_def *acb;
+ acb = (struct addr_ctrl_blk_def *)init_fw_cb;
+ memset(acb->reserved1, 0, sizeof(acb->reserved1));
+ memset(acb->reserved2, 0, sizeof(acb->reserved2));
+ memset(acb->reserved3, 0, sizeof(acb->reserved3));
+ memset(acb->reserved4, 0, sizeof(acb->reserved4));
+ memset(acb->reserved5, 0, sizeof(acb->reserved5));
+ memset(acb->reserved6, 0, sizeof(acb->reserved6));
+ memset(acb->reserved7, 0, sizeof(acb->reserved7));
+ memset(acb->reserved8, 0, sizeof(acb->reserved8));
+ memset(acb->reserved9, 0, sizeof(acb->reserved9));
+ memset(acb->reserved10, 0, sizeof(acb->reserved10));
+ memset(acb->reserved11, 0, sizeof(acb->reserved11));
+ memset(acb->reserved12, 0, sizeof(acb->reserved12));
+ memset(acb->reserved13, 0, sizeof(acb->reserved13));
+ memset(acb->reserved14, 0, sizeof(acb->reserved14));
+ memset(acb->reserved15, 0, sizeof(acb->reserved15));
+}
+
+static int
+qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data, uint32_t len)
+{
+ struct scsi_qla_host *ha = to_qla_host(shost);
+ int rval = 0;
+ struct iscsi_iface_param_info *iface_param = NULL;
+ struct addr_ctrl_blk *init_fw_cb = NULL;
+ dma_addr_t init_fw_cb_dma;
+ uint32_t mbox_cmd[MBOX_REG_COUNT];
+ uint32_t mbox_sts[MBOX_REG_COUNT];
+ uint32_t rem = len;
+ struct nlattr *attr;
+
+ init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
+ sizeof(struct addr_ctrl_blk),
+ &init_fw_cb_dma, GFP_KERNEL);
+ if (!init_fw_cb) {
+ ql4_printk(KERN_ERR, ha, "%s: Unable to alloc init_cb\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
+ memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+ memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+ if (qla4xxx_get_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma)) {
+ ql4_printk(KERN_ERR, ha, "%s: get ifcb failed\n", __func__);
+ rval = -EIO;
+ goto exit_init_fw_cb;
+ }
+
+ nla_for_each_attr(attr, data, len, rem) {
+ iface_param = nla_data(attr);
+
+ if (iface_param->param_type != ISCSI_NET_PARAM)
+ continue;
+
+ switch (iface_param->iface_type) {
+ case ISCSI_IFACE_TYPE_IPV4:
+ switch (iface_param->iface_num) {
+ case 0:
+ qla4xxx_set_ipv4(ha, iface_param, init_fw_cb);
+ break;
+ default:
+ /* Cannot have more than one IPv4 interface */
+ ql4_printk(KERN_ERR, ha, "Invalid IPv4 iface "
+ "number = %d\n",
+ iface_param->iface_num);
+ break;
+ }
+ break;
+ case ISCSI_IFACE_TYPE_IPV6:
+ switch (iface_param->iface_num) {
+ case 0:
+ case 1:
+ qla4xxx_set_ipv6(ha, iface_param, init_fw_cb);
+ break;
+ default:
+ /* Cannot have more than two IPv6 interface */
+ ql4_printk(KERN_ERR, ha, "Invalid IPv6 iface "
+ "number = %d\n",
+ iface_param->iface_num);
+ break;
+ }
+ break;
+ default:
+ ql4_printk(KERN_ERR, ha, "Invalid iface type\n");
+ break;
+ }
+ }
+
+ init_fw_cb->cookie = cpu_to_le32(0x11BEAD5A);
+
+ rval = qla4xxx_set_flash(ha, init_fw_cb_dma, FLASH_SEGMENT_IFCB,
+ sizeof(struct addr_ctrl_blk),
+ FLASH_OPT_RMW_COMMIT);
+ if (rval != QLA_SUCCESS) {
+ ql4_printk(KERN_ERR, ha, "%s: set flash mbx failed\n",
+ __func__);
+ rval = -EIO;
+ goto exit_init_fw_cb;
+ }
+
+ qla4xxx_disable_acb(ha);
+
+ qla4xxx_initcb_to_acb(init_fw_cb);
+
+ rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma);
+ if (rval != QLA_SUCCESS) {
+ ql4_printk(KERN_ERR, ha, "%s: set acb mbx failed\n",
+ __func__);
+ rval = -EIO;
+ goto exit_init_fw_cb;
+ }
+
+ memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
+ qla4xxx_update_local_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb,
+ init_fw_cb_dma);
+
+exit_init_fw_cb:
+ dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk),
+ init_fw_cb, init_fw_cb_dma);
+
+ return rval;
+}
+
+static int qla4xxx_conn_get_param(struct iscsi_cls_conn *cls_conn,
enum iscsi_param param, char *buf)
{
- struct iscsi_cls_session *session;
- struct ddb_entry *ddb_entry;
- int len;
+ struct iscsi_conn *conn;
+ struct qla_conn *qla_conn;
+ struct sockaddr *dst_addr;
+ int len = 0;
- session = iscsi_dev_to_session(conn->dev.parent);
- ddb_entry = session->dd_data;
+ conn = cls_conn->dd_data;
+ qla_conn = conn->dd_data;
+ dst_addr = &qla_conn->qla_ep->dst_addr;
switch (param) {
case ISCSI_PARAM_CONN_PORT:
- len = sprintf(buf, "%hu\n", ddb_entry->port);
- break;
case ISCSI_PARAM_CONN_ADDRESS:
- /* TODO: what are the ipv6 bits */
- len = sprintf(buf, "%pI4\n", &ddb_entry->ip_addr);
- break;
+ return iscsi_conn_get_addr_param((struct sockaddr_storage *)
+ dst_addr, param, buf);
default:
- return -ENOSYS;
+ return iscsi_conn_get_param(cls_conn, param, buf);
}
return len;
+
}
-static int qla4xxx_tgt_dscvr(struct Scsi_Host *shost,
- enum iscsi_tgt_dscvr type, uint32_t enable,
- struct sockaddr *dst_addr)
+static struct iscsi_cls_session *
+qla4xxx_session_create(struct iscsi_endpoint *ep,
+ uint16_t cmds_max, uint16_t qdepth,
+ uint32_t initial_cmdsn)
{
+ struct iscsi_cls_session *cls_sess;
struct scsi_qla_host *ha;
- struct sockaddr_in *addr;
- struct sockaddr_in6 *addr6;
+ struct qla_endpoint *qla_ep;
+ struct ddb_entry *ddb_entry;
+ uint32_t ddb_index;
+ uint32_t mbx_sts = 0;
+ struct iscsi_session *sess;
+ struct sockaddr *dst_addr;
+ int ret;
+
+ DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
+ if (!ep) {
+ printk(KERN_ERR "qla4xxx: missing ep.\n");
+ return NULL;
+ }
+
+ qla_ep = ep->dd_data;
+ dst_addr = (struct sockaddr *)&qla_ep->dst_addr;
+ ha = to_qla_host(qla_ep->host);
+
+get_ddb_index:
+ ddb_index = find_first_zero_bit(ha->ddb_idx_map, MAX_DDB_ENTRIES);
+
+ if (ddb_index >= MAX_DDB_ENTRIES) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Free DDB index not available\n"));
+ return NULL;
+ }
+
+ if (test_and_set_bit(ddb_index, ha->ddb_idx_map))
+ goto get_ddb_index;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Found a free DDB index at %d\n", ddb_index));
+ ret = qla4xxx_req_ddb_entry(ha, ddb_index, &mbx_sts);
+ if (ret == QLA_ERROR) {
+ if (mbx_sts == MBOX_STS_COMMAND_ERROR) {
+ ql4_printk(KERN_INFO, ha,
+ "DDB index = %d not available trying next\n",
+ ddb_index);
+ goto get_ddb_index;
+ }
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Free FW DDB not available\n"));
+ return NULL;
+ }
+
+ cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, qla_ep->host,
+ cmds_max, sizeof(struct ddb_entry),
+ sizeof(struct ql4_task_data),
+ initial_cmdsn, ddb_index);
+ if (!cls_sess)
+ return NULL;
+
+ sess = cls_sess->dd_data;
+ ddb_entry = sess->dd_data;
+ ddb_entry->fw_ddb_index = ddb_index;
+ ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE;
+ ddb_entry->ha = ha;
+ ddb_entry->sess = cls_sess;
+ cls_sess->recovery_tmo = ql4xsess_recovery_tmo;
+ ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] = ddb_entry;
+ ha->tot_ddbs++;
+
+ return cls_sess;
+}
+
+static void qla4xxx_session_destroy(struct iscsi_cls_session *cls_sess)
+{
+ struct iscsi_session *sess;
+ struct ddb_entry *ddb_entry;
+ struct scsi_qla_host *ha;
+ unsigned long flags;
+
+ DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
+ sess = cls_sess->dd_data;
+ ddb_entry = sess->dd_data;
+ ha = ddb_entry->ha;
+
+ qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ qla4xxx_free_ddb(ha, ddb_entry);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ iscsi_session_teardown(cls_sess);
+}
+
+static struct iscsi_cls_conn *
+qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx)
+{
+ struct iscsi_cls_conn *cls_conn;
+ struct iscsi_session *sess;
+ struct ddb_entry *ddb_entry;
+
+ DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
+ cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn),
+ conn_idx);
+ sess = cls_sess->dd_data;
+ ddb_entry = sess->dd_data;
+ ddb_entry->conn = cls_conn;
+
+ return cls_conn;
+}
+
+static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session,
+ struct iscsi_cls_conn *cls_conn,
+ uint64_t transport_fd, int is_leading)
+{
+ struct iscsi_conn *conn;
+ struct qla_conn *qla_conn;
+ struct iscsi_endpoint *ep;
+
+ DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
+
+ if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
+ return -EINVAL;
+ ep = iscsi_lookup_endpoint(transport_fd);
+ conn = cls_conn->dd_data;
+ qla_conn = conn->dd_data;
+ qla_conn->qla_ep = ep->dd_data;
+ return 0;
+}
+
+static int qla4xxx_conn_start(struct iscsi_cls_conn *cls_conn)
+{
+ struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn);
+ struct iscsi_session *sess;
+ struct ddb_entry *ddb_entry;
+ struct scsi_qla_host *ha;
+ struct dev_db_entry *fw_ddb_entry;
+ dma_addr_t fw_ddb_entry_dma;
+ uint32_t mbx_sts = 0;
int ret = 0;
+ int status = QLA_SUCCESS;
- ha = (struct scsi_qla_host *) shost->hostdata;
+ DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
+ sess = cls_sess->dd_data;
+ ddb_entry = sess->dd_data;
+ ha = ddb_entry->ha;
- switch (type) {
- case ISCSI_TGT_DSCVR_SEND_TARGETS:
- if (dst_addr->sa_family == AF_INET) {
- addr = (struct sockaddr_in *)dst_addr;
- if (qla4xxx_send_tgts(ha, (char *)&addr->sin_addr,
- addr->sin_port) != QLA_SUCCESS)
- ret = -EIO;
- } else if (dst_addr->sa_family == AF_INET6) {
- /*
- * TODO: fix qla4xxx_send_tgts
- */
- addr6 = (struct sockaddr_in6 *)dst_addr;
- if (qla4xxx_send_tgts(ha, (char *)&addr6->sin6_addr,
- addr6->sin6_port) != QLA_SUCCESS)
- ret = -EIO;
- } else
- ret = -ENOSYS;
- break;
- default:
- ret = -ENOSYS;
+ fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+ &fw_ddb_entry_dma, GFP_KERNEL);
+ if (!fw_ddb_entry) {
+ ql4_printk(KERN_ERR, ha,
+ "%s: Unable to allocate dma buffer\n", __func__);
+ return -ENOMEM;
+ }
+
+ ret = qla4xxx_set_param_ddbentry(ha, ddb_entry, cls_conn, &mbx_sts);
+ if (ret) {
+ /* If iscsid is stopped and started then no need to do
+ * set param again since ddb state will be already
+ * active and FW does not allow set ddb to an
+ * active session.
+ */
+ if (mbx_sts)
+ if (ddb_entry->fw_ddb_device_state ==
+ DDB_DS_SESSION_ACTIVE) {
+ iscsi_conn_start(ddb_entry->conn);
+ iscsi_conn_login_event(ddb_entry->conn,
+ ISCSI_CONN_STATE_LOGGED_IN);
+ goto exit_set_param;
+ }
+
+ ql4_printk(KERN_ERR, ha, "%s: Failed set param for index[%d]\n",
+ __func__, ddb_entry->fw_ddb_index);
+ goto exit_conn_start;
+ }
+
+ status = qla4xxx_conn_open(ha, ddb_entry->fw_ddb_index);
+ if (status == QLA_ERROR) {
+ ql4_printk(KERN_ERR, ha, "%s: Login failed: %s\n", __func__,
+ sess->targetname);
+ ret = -EINVAL;
+ goto exit_conn_start;
}
+
+ if (ddb_entry->fw_ddb_device_state == DDB_DS_NO_CONNECTION_ACTIVE)
+ ddb_entry->fw_ddb_device_state = DDB_DS_LOGIN_IN_PROCESS;
+
+ DEBUG2(printk(KERN_INFO "%s: DDB state [%d]\n", __func__,
+ ddb_entry->fw_ddb_device_state));
+
+exit_set_param:
+ ret = 0;
+
+exit_conn_start:
+ dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+ fw_ddb_entry, fw_ddb_entry_dma);
return ret;
}
-void qla4xxx_destroy_sess(struct ddb_entry *ddb_entry)
+static void qla4xxx_conn_destroy(struct iscsi_cls_conn *cls_conn)
{
- if (!ddb_entry->sess)
- return;
+ struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn);
+ struct iscsi_session *sess;
+ struct scsi_qla_host *ha;
+ struct ddb_entry *ddb_entry;
+ int options;
- if (ddb_entry->conn) {
- atomic_set(&ddb_entry->state, DDB_STATE_DEAD);
- iscsi_remove_session(ddb_entry->sess);
+ DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
+ sess = cls_sess->dd_data;
+ ddb_entry = sess->dd_data;
+ ha = ddb_entry->ha;
+
+ options = LOGOUT_OPTION_CLOSE_SESSION;
+ if (qla4xxx_session_logout_ddb(ha, ddb_entry, options) == QLA_ERROR)
+ ql4_printk(KERN_ERR, ha, "%s: Logout failed\n", __func__);
+}
+
+static void qla4xxx_task_work(struct work_struct *wdata)
+{
+ struct ql4_task_data *task_data;
+ struct scsi_qla_host *ha;
+ struct passthru_status *sts;
+ struct iscsi_task *task;
+ struct iscsi_hdr *hdr;
+ uint8_t *data;
+ uint32_t data_len;
+ struct iscsi_conn *conn;
+ int hdr_len;
+ itt_t itt;
+
+ task_data = container_of(wdata, struct ql4_task_data, task_work);
+ ha = task_data->ha;
+ task = task_data->task;
+ sts = &task_data->sts;
+ hdr_len = sizeof(struct iscsi_hdr);
+
+ DEBUG3(printk(KERN_INFO "Status returned\n"));
+ DEBUG3(qla4xxx_dump_buffer(sts, 64));
+ DEBUG3(printk(KERN_INFO "Response buffer"));
+ DEBUG3(qla4xxx_dump_buffer(task_data->resp_buffer, 64));
+
+ conn = task->conn;
+
+ switch (sts->completionStatus) {
+ case PASSTHRU_STATUS_COMPLETE:
+ hdr = (struct iscsi_hdr *)task_data->resp_buffer;
+ /* Assign back the itt in hdr, until we use the PREASSIGN_TAG */
+ itt = sts->handle;
+ hdr->itt = itt;
+ data = task_data->resp_buffer + hdr_len;
+ data_len = task_data->resp_len - hdr_len;
+ iscsi_complete_pdu(conn, hdr, data, data_len);
+ break;
+ default:
+ ql4_printk(KERN_ERR, ha, "Passthru failed status = 0x%x\n",
+ sts->completionStatus);
+ break;
}
- iscsi_free_session(ddb_entry->sess);
+ return;
}
-int qla4xxx_add_sess(struct ddb_entry *ddb_entry)
+static int qla4xxx_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
{
- int err;
+ struct ql4_task_data *task_data;
+ struct iscsi_session *sess;
+ struct ddb_entry *ddb_entry;
+ struct scsi_qla_host *ha;
+ int hdr_len;
- ddb_entry->sess->recovery_tmo = ql4xsess_recovery_tmo;
+ sess = task->conn->session;
+ ddb_entry = sess->dd_data;
+ ha = ddb_entry->ha;
+ task_data = task->dd_data;
+ memset(task_data, 0, sizeof(struct ql4_task_data));
- err = iscsi_add_session(ddb_entry->sess, ddb_entry->fw_ddb_index);
- if (err) {
- DEBUG2(printk(KERN_ERR "Could not add session.\n"));
- return err;
+ if (task->sc) {
+ ql4_printk(KERN_INFO, ha,
+ "%s: SCSI Commands not implemented\n", __func__);
+ return -EINVAL;
}
- ddb_entry->conn = iscsi_create_conn(ddb_entry->sess, 0, 0);
- if (!ddb_entry->conn) {
- iscsi_remove_session(ddb_entry->sess);
- DEBUG2(printk(KERN_ERR "Could not add connection.\n"));
- return -ENOMEM;
+ hdr_len = sizeof(struct iscsi_hdr);
+ task_data->ha = ha;
+ task_data->task = task;
+
+ if (task->data_count) {
+ task_data->data_dma = dma_map_single(&ha->pdev->dev, task->data,
+ task->data_count,
+ PCI_DMA_TODEVICE);
}
- /* finally ready to go */
- iscsi_unblock_session(ddb_entry->sess);
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n",
+ __func__, task->conn->max_recv_dlength, hdr_len));
+
+ task_data->resp_len = task->conn->max_recv_dlength + hdr_len;
+ task_data->resp_buffer = dma_alloc_coherent(&ha->pdev->dev,
+ task_data->resp_len,
+ &task_data->resp_dma,
+ GFP_ATOMIC);
+ if (!task_data->resp_buffer)
+ goto exit_alloc_pdu;
+
+ task_data->req_len = task->data_count + hdr_len;
+ task_data->req_buffer = dma_alloc_coherent(&ha->pdev->dev,
+ task_data->req_len,
+ &task_data->req_dma,
+ GFP_ATOMIC);
+ if (!task_data->req_buffer)
+ goto exit_alloc_pdu;
+
+ task->hdr = task_data->req_buffer;
+
+ INIT_WORK(&task_data->task_work, qla4xxx_task_work);
+
return 0;
+
+exit_alloc_pdu:
+ if (task_data->resp_buffer)
+ dma_free_coherent(&ha->pdev->dev, task_data->resp_len,
+ task_data->resp_buffer, task_data->resp_dma);
+
+ if (task_data->req_buffer)
+ dma_free_coherent(&ha->pdev->dev, task_data->req_len,
+ task_data->req_buffer, task_data->req_dma);
+ return -ENOMEM;
}
-struct ddb_entry *qla4xxx_alloc_sess(struct scsi_qla_host *ha)
+static void qla4xxx_task_cleanup(struct iscsi_task *task)
{
+ struct ql4_task_data *task_data;
+ struct iscsi_session *sess;
struct ddb_entry *ddb_entry;
- struct iscsi_cls_session *sess;
-
- sess = iscsi_alloc_session(ha->host, &qla4xxx_iscsi_transport,
- sizeof(struct ddb_entry));
- if (!sess)
- return NULL;
+ struct scsi_qla_host *ha;
+ int hdr_len;
+ hdr_len = sizeof(struct iscsi_hdr);
+ sess = task->conn->session;
ddb_entry = sess->dd_data;
- memset(ddb_entry, 0, sizeof(*ddb_entry));
- ddb_entry->ha = ha;
- ddb_entry->sess = sess;
- return ddb_entry;
+ ha = ddb_entry->ha;
+ task_data = task->dd_data;
+
+ if (task->data_count) {
+ dma_unmap_single(&ha->pdev->dev, task_data->data_dma,
+ task->data_count, PCI_DMA_TODEVICE);
+ }
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n",
+ __func__, task->conn->max_recv_dlength, hdr_len));
+
+ dma_free_coherent(&ha->pdev->dev, task_data->resp_len,
+ task_data->resp_buffer, task_data->resp_dma);
+ dma_free_coherent(&ha->pdev->dev, task_data->req_len,
+ task_data->req_buffer, task_data->req_dma);
+ return;
}
-static void qla4xxx_scan_start(struct Scsi_Host *shost)
+static int qla4xxx_task_xmit(struct iscsi_task *task)
{
- struct scsi_qla_host *ha = shost_priv(shost);
- struct ddb_entry *ddb_entry, *ddbtemp;
+ struct scsi_cmnd *sc = task->sc;
+ struct iscsi_session *sess = task->conn->session;
+ struct ddb_entry *ddb_entry = sess->dd_data;
+ struct scsi_qla_host *ha = ddb_entry->ha;
+
+ if (!sc)
+ return qla4xxx_send_passthru0(task);
- /* finish setup of sessions that were already setup in firmware */
- list_for_each_entry_safe(ddb_entry, ddbtemp, &ha->ddb_list, list) {
- if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE)
- qla4xxx_add_sess(ddb_entry);
+ ql4_printk(KERN_INFO, ha, "%s: scsi cmd xmit not implemented\n",
+ __func__);
+ return -ENOSYS;
+}
+
+void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
+ struct ddb_entry *ddb_entry)
+{
+ struct iscsi_cls_session *cls_sess;
+ struct iscsi_cls_conn *cls_conn;
+ struct iscsi_session *sess;
+ struct iscsi_conn *conn;
+ uint32_t ddb_state;
+ dma_addr_t fw_ddb_entry_dma;
+ struct dev_db_entry *fw_ddb_entry;
+
+ fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+ &fw_ddb_entry_dma, GFP_KERNEL);
+ if (!fw_ddb_entry) {
+ ql4_printk(KERN_ERR, ha,
+ "%s: Unable to allocate dma buffer\n", __func__);
+ return;
}
+
+ if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry,
+ fw_ddb_entry_dma, NULL, NULL, &ddb_state,
+ NULL, NULL, NULL) == QLA_ERROR) {
+ DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed "
+ "get_ddb_entry for fw_ddb_index %d\n",
+ ha->host_no, __func__,
+ ddb_entry->fw_ddb_index));
+ return;
+ }
+
+ cls_sess = ddb_entry->sess;
+ sess = cls_sess->dd_data;
+
+ cls_conn = ddb_entry->conn;
+ conn = cls_conn->dd_data;
+
+ /* Update params */
+ conn->max_recv_dlength = BYTE_UNITS *
+ le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len);
+
+ conn->max_xmit_dlength = BYTE_UNITS *
+ le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len);
+
+ sess->initial_r2t_en =
+ (BIT_10 & le16_to_cpu(fw_ddb_entry->iscsi_options));
+
+ sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t);
+
+ sess->imm_data_en = (BIT_11 & le16_to_cpu(fw_ddb_entry->iscsi_options));
+
+ sess->first_burst = BYTE_UNITS *
+ le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len);
+
+ sess->max_burst = BYTE_UNITS *
+ le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len);
+
+ sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
+
+ sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain);
+
+ sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
+
+ memcpy(sess->initiatorname, ha->name_string,
+ min(sizeof(ha->name_string), sizeof(sess->initiatorname)));
}
/*
@@ -376,25 +1433,15 @@ static void qla4xxx_stop_timer(struct scsi_qla_host *ha)
}
/***
- * qla4xxx_mark_device_missing - mark a device as missing.
- * @ha: Pointer to host adapter structure.
+ * qla4xxx_mark_device_missing - blocks the session
+ * @cls_session: Pointer to the session to be blocked
* @ddb_entry: Pointer to device database entry
*
* This routine marks a device missing and close connection.
**/
-void qla4xxx_mark_device_missing(struct scsi_qla_host *ha,
- struct ddb_entry *ddb_entry)
+void qla4xxx_mark_device_missing(struct iscsi_cls_session *cls_session)
{
- if ((atomic_read(&ddb_entry->state) != DDB_STATE_DEAD)) {
- atomic_set(&ddb_entry->state, DDB_STATE_MISSING);
- DEBUG2(printk("scsi%ld: ddb [%d] marked MISSING\n",
- ha->host_no, ddb_entry->fw_ddb_index));
- } else
- DEBUG2(printk("scsi%ld: ddb [%d] DEAD\n", ha->host_no,
- ddb_entry->fw_ddb_index))
-
- iscsi_block_session(ddb_entry->sess);
- iscsi_conn_error_event(ddb_entry->conn, ISCSI_ERR_CONN_FAILED);
+ iscsi_block_session(cls_session);
}
/**
@@ -405,10 +1452,7 @@ void qla4xxx_mark_device_missing(struct scsi_qla_host *ha,
**/
void qla4xxx_mark_all_devices_missing(struct scsi_qla_host *ha)
{
- struct ddb_entry *ddb_entry, *ddbtemp;
- list_for_each_entry_safe(ddb_entry, ddbtemp, &ha->ddb_list, list) {
- qla4xxx_mark_device_missing(ha, ddb_entry);
- }
+ iscsi_host_for_each_session(ha->host, qla4xxx_mark_device_missing);
}
static struct srb* qla4xxx_get_new_srb(struct scsi_qla_host *ha,
@@ -495,20 +1539,13 @@ static int qla4xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
goto qc_fail_command;
}
- if (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE) {
- if (atomic_read(&ddb_entry->state) == DDB_STATE_DEAD) {
- cmd->result = DID_NO_CONNECT << 16;
- goto qc_fail_command;
- }
- return SCSI_MLQUEUE_TARGET_BUSY;
- }
-
if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) ||
test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) ||
test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) ||
!test_bit(AF_ONLINE, &ha->flags) ||
+ !test_bit(AF_LINK_UP, &ha->flags) ||
test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))
goto qc_host_busy;
@@ -563,6 +1600,13 @@ static void qla4xxx_mem_free(struct scsi_qla_host *ha)
ha->srb_mempool = NULL;
+ if (ha->chap_dma_pool)
+ dma_pool_destroy(ha->chap_dma_pool);
+
+ if (ha->chap_list)
+ vfree(ha->chap_list);
+ ha->chap_list = NULL;
+
/* release io space registers */
if (is_qla8022(ha)) {
if (ha->nx_pcibase)
@@ -636,6 +1680,15 @@ static int qla4xxx_mem_alloc(struct scsi_qla_host *ha)
goto mem_alloc_error_exit;
}
+ ha->chap_dma_pool = dma_pool_create("ql4_chap", &ha->pdev->dev,
+ CHAP_DMA_BLOCK_SIZE, 8, 0);
+
+ if (ha->chap_dma_pool == NULL) {
+ ql4_printk(KERN_WARNING, ha,
+ "%s: chap_dma_pool allocation failed..\n", __func__);
+ goto mem_alloc_error_exit;
+ }
+
return QLA_SUCCESS;
mem_alloc_error_exit:
@@ -753,7 +1806,6 @@ void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
**/
static void qla4xxx_timer(struct scsi_qla_host *ha)
{
- struct ddb_entry *ddb_entry, *dtemp;
int start_dpc = 0;
uint16_t w;
@@ -773,69 +1825,6 @@ static void qla4xxx_timer(struct scsi_qla_host *ha)
qla4_8xxx_watchdog(ha);
}
- /* Search for relogin's to time-out and port down retry. */
- list_for_each_entry_safe(ddb_entry, dtemp, &ha->ddb_list, list) {
- /* Count down time between sending relogins */
- if (adapter_up(ha) &&
- !test_bit(DF_RELOGIN, &ddb_entry->flags) &&
- atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE) {
- if (atomic_read(&ddb_entry->retry_relogin_timer) !=
- INVALID_ENTRY) {
- if (atomic_read(&ddb_entry->retry_relogin_timer)
- == 0) {
- atomic_set(&ddb_entry->
- retry_relogin_timer,
- INVALID_ENTRY);
- set_bit(DPC_RELOGIN_DEVICE,
- &ha->dpc_flags);
- set_bit(DF_RELOGIN, &ddb_entry->flags);
- DEBUG2(printk("scsi%ld: %s: ddb [%d]"
- " login device\n",
- ha->host_no, __func__,
- ddb_entry->fw_ddb_index));
- } else
- atomic_dec(&ddb_entry->
- retry_relogin_timer);
- }
- }
-
- /* Wait for relogin to timeout */
- if (atomic_read(&ddb_entry->relogin_timer) &&
- (atomic_dec_and_test(&ddb_entry->relogin_timer) != 0)) {
- /*
- * If the relogin times out and the device is
- * still NOT ONLINE then try and relogin again.
- */
- if (atomic_read(&ddb_entry->state) !=
- DDB_STATE_ONLINE &&
- ddb_entry->fw_ddb_device_state ==
- DDB_DS_SESSION_FAILED) {
- /* Reset retry relogin timer */
- atomic_inc(&ddb_entry->relogin_retry_count);
- DEBUG2(printk("scsi%ld: ddb [%d] relogin"
- " timed out-retrying"
- " relogin (%d)\n",
- ha->host_no,
- ddb_entry->fw_ddb_index,
- atomic_read(&ddb_entry->
- relogin_retry_count))
- );
- start_dpc++;
- DEBUG(printk("scsi%ld:%d:%d: ddb [%d] "
- "initiate relogin after"
- " %d seconds\n",
- ha->host_no, ddb_entry->bus,
- ddb_entry->target,
- ddb_entry->fw_ddb_index,
- ddb_entry->default_time2wait + 4)
- );
-
- atomic_set(&ddb_entry->retry_relogin_timer,
- ddb_entry->default_time2wait + 4);
- }
- }
- }
-
if (!is_qla8022(ha)) {
/* Check for heartbeat interval. */
if (ha->firmware_options & FWOPT_HEARTBEAT_ENABLE &&
@@ -1081,6 +2070,17 @@ void qla4xxx_dead_adapter_cleanup(struct scsi_qla_host *ha)
clear_bit(AF_INIT_DONE, &ha->flags);
}
+static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session)
+{
+ struct iscsi_session *sess;
+ struct ddb_entry *ddb_entry;
+
+ sess = cls_session->dd_data;
+ ddb_entry = sess->dd_data;
+ ddb_entry->fw_ddb_device_state = DDB_DS_SESSION_FAILED;
+ iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED);
+}
+
/**
* qla4xxx_recover_adapter - recovers adapter after a fatal error
* @ha: Pointer to host adapter structure.
@@ -1093,11 +2093,14 @@ static int qla4xxx_recover_adapter(struct scsi_qla_host *ha)
/* Stall incoming I/O until we are done */
scsi_block_requests(ha->host);
clear_bit(AF_ONLINE, &ha->flags);
+ clear_bit(AF_LINK_UP, &ha->flags);
DEBUG2(ql4_printk(KERN_INFO, ha, "%s: adapter OFFLINE\n", __func__));
set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
+ iscsi_host_for_each_session(ha->host, qla4xxx_fail_session);
+
if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
reset_chip = 1;
@@ -1160,7 +2163,7 @@ recover_ha_init_adapter:
/* NOTE: AF_ONLINE flag set upon successful completion of
* qla4xxx_initialize_adapter */
- status = qla4xxx_initialize_adapter(ha, PRESERVE_DDB_LIST);
+ status = qla4xxx_initialize_adapter(ha);
}
/* Retry failed adapter initialization, if necessary
@@ -1225,27 +2228,34 @@ recover_ha_init_adapter:
return status;
}
-static void qla4xxx_relogin_all_devices(struct scsi_qla_host *ha)
+static void qla4xxx_relogin_devices(struct iscsi_cls_session *cls_session)
{
- struct ddb_entry *ddb_entry, *dtemp;
+ struct iscsi_session *sess;
+ struct ddb_entry *ddb_entry;
+ struct scsi_qla_host *ha;
- list_for_each_entry_safe(ddb_entry, dtemp, &ha->ddb_list, list) {
- if ((atomic_read(&ddb_entry->state) == DDB_STATE_MISSING) ||
- (atomic_read(&ddb_entry->state) == DDB_STATE_DEAD)) {
- if (ddb_entry->fw_ddb_device_state ==
- DDB_DS_SESSION_ACTIVE) {
- atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
- ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
- " marked ONLINE\n", ha->host_no, __func__,
- ddb_entry->fw_ddb_index);
-
- iscsi_unblock_session(ddb_entry->sess);
- } else
- qla4xxx_relogin_device(ha, ddb_entry);
+ sess = cls_session->dd_data;
+ ddb_entry = sess->dd_data;
+ ha = ddb_entry->ha;
+ if (!iscsi_is_session_online(cls_session)) {
+ if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) {
+ ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
+ " unblock session\n", ha->host_no, __func__,
+ ddb_entry->fw_ddb_index);
+ iscsi_unblock_session(ddb_entry->sess);
+ } else {
+ /* Trigger relogin */
+ iscsi_session_failure(cls_session->dd_data,
+ ISCSI_ERR_CONN_FAILED);
}
}
}
+static void qla4xxx_relogin_all_devices(struct scsi_qla_host *ha)
+{
+ iscsi_host_for_each_session(ha->host, qla4xxx_relogin_devices);
+}
+
void qla4xxx_wake_dpc(struct scsi_qla_host *ha)
{
if (ha->dpc_thread)
@@ -1267,7 +2277,6 @@ static void qla4xxx_do_dpc(struct work_struct *work)
{
struct scsi_qla_host *ha =
container_of(work, struct scsi_qla_host, dpc_work);
- struct ddb_entry *ddb_entry, *dtemp;
int status = QLA_ERROR;
DEBUG2(printk("scsi%ld: %s: DPC handler waking up."
@@ -1363,31 +2372,6 @@ dpc_post_reset_ha:
qla4xxx_relogin_all_devices(ha);
}
}
-
- /* ---- relogin device? --- */
- if (adapter_up(ha) &&
- test_and_clear_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags)) {
- list_for_each_entry_safe(ddb_entry, dtemp,
- &ha->ddb_list, list) {
- if (test_and_clear_bit(DF_RELOGIN, &ddb_entry->flags) &&
- atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE)
- qla4xxx_relogin_device(ha, ddb_entry);
-
- /*
- * If mbx cmd times out there is no point
- * in continuing further.
- * With large no of targets this can hang
- * the system.
- */
- if (test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
- printk(KERN_WARNING "scsi%ld: %s: "
- "need to reset hba\n",
- ha->host_no, __func__);
- break;
- }
- }
- }
-
}
/**
@@ -1410,6 +2394,10 @@ static void qla4xxx_free_adapter(struct scsi_qla_host *ha)
if (ha->dpc_thread)
destroy_workqueue(ha->dpc_thread);
+ /* Kill the kernel thread for this host */
+ if (ha->task_wq)
+ destroy_workqueue(ha->task_wq);
+
/* Put firmware in known state */
ha->isp_ops->reset_firmware(ha);
@@ -1601,6 +2589,594 @@ uint16_t qla4_8xxx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha)
return (uint16_t)le32_to_cpu(readl(&ha->qla4_8xxx_reg->rsp_q_in));
}
+static ssize_t qla4xxx_show_boot_eth_info(void *data, int type, char *buf)
+{
+ struct scsi_qla_host *ha = data;
+ char *str = buf;
+ int rc;
+
+ switch (type) {
+ case ISCSI_BOOT_ETH_FLAGS:
+ rc = sprintf(str, "%d\n", SYSFS_FLAG_FW_SEL_BOOT);
+ break;
+ case ISCSI_BOOT_ETH_INDEX:
+ rc = sprintf(str, "0\n");
+ break;
+ case ISCSI_BOOT_ETH_MAC:
+ rc = sysfs_format_mac(str, ha->my_mac,
+ MAC_ADDR_LEN);
+ break;
+ default:
+ rc = -ENOSYS;
+ break;
+ }
+ return rc;
+}
+
+static mode_t qla4xxx_eth_get_attr_visibility(void *data, int type)
+{
+ int rc;
+
+ switch (type) {
+ case ISCSI_BOOT_ETH_FLAGS:
+ case ISCSI_BOOT_ETH_MAC:
+ case ISCSI_BOOT_ETH_INDEX:
+ rc = S_IRUGO;
+ break;
+ default:
+ rc = 0;
+ break;
+ }
+ return rc;
+}
+
+static ssize_t qla4xxx_show_boot_ini_info(void *data, int type, char *buf)
+{
+ struct scsi_qla_host *ha = data;
+ char *str = buf;
+ int rc;
+
+ switch (type) {
+ case ISCSI_BOOT_INI_INITIATOR_NAME:
+ rc = sprintf(str, "%s\n", ha->name_string);
+ break;
+ default:
+ rc = -ENOSYS;
+ break;
+ }
+ return rc;
+}
+
+static mode_t qla4xxx_ini_get_attr_visibility(void *data, int type)
+{
+ int rc;
+
+ switch (type) {
+ case ISCSI_BOOT_INI_INITIATOR_NAME:
+ rc = S_IRUGO;
+ break;
+ default:
+ rc = 0;
+ break;
+ }
+ return rc;
+}
+
+static ssize_t
+qla4xxx_show_boot_tgt_info(struct ql4_boot_session_info *boot_sess, int type,
+ char *buf)
+{
+ struct ql4_conn_info *boot_conn = &boot_sess->conn_list[0];
+ char *str = buf;
+ int rc;
+
+ switch (type) {
+ case ISCSI_BOOT_TGT_NAME:
+ rc = sprintf(buf, "%s\n", (char *)&boot_sess->target_name);
+ break;
+ case ISCSI_BOOT_TGT_IP_ADDR:
+ if (boot_sess->conn_list[0].dest_ipaddr.ip_type == 0x1)
+ rc = sprintf(buf, "%pI4\n",
+ &boot_conn->dest_ipaddr.ip_address);
+ else
+ rc = sprintf(str, "%pI6\n",
+ &boot_conn->dest_ipaddr.ip_address);
+ break;
+ case ISCSI_BOOT_TGT_PORT:
+ rc = sprintf(str, "%d\n", boot_conn->dest_port);
+ break;
+ case ISCSI_BOOT_TGT_CHAP_NAME:
+ rc = sprintf(str, "%.*s\n",
+ boot_conn->chap.target_chap_name_length,
+ (char *)&boot_conn->chap.target_chap_name);
+ break;
+ case ISCSI_BOOT_TGT_CHAP_SECRET:
+ rc = sprintf(str, "%.*s\n",
+ boot_conn->chap.target_secret_length,
+ (char *)&boot_conn->chap.target_secret);
+ break;
+ case ISCSI_BOOT_TGT_REV_CHAP_NAME:
+ rc = sprintf(str, "%.*s\n",
+ boot_conn->chap.intr_chap_name_length,
+ (char *)&boot_conn->chap.intr_chap_name);
+ break;
+ case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
+ rc = sprintf(str, "%.*s\n",
+ boot_conn->chap.intr_secret_length,
+ (char *)&boot_conn->chap.intr_secret);
+ break;
+ case ISCSI_BOOT_TGT_FLAGS:
+ rc = sprintf(str, "%d\n", SYSFS_FLAG_FW_SEL_BOOT);
+ break;
+ case ISCSI_BOOT_TGT_NIC_ASSOC:
+ rc = sprintf(str, "0\n");
+ break;
+ default:
+ rc = -ENOSYS;
+ break;
+ }
+ return rc;
+}
+
+static ssize_t qla4xxx_show_boot_tgt_pri_info(void *data, int type, char *buf)
+{
+ struct scsi_qla_host *ha = data;
+ struct ql4_boot_session_info *boot_sess = &(ha->boot_tgt.boot_pri_sess);
+
+ return qla4xxx_show_boot_tgt_info(boot_sess, type, buf);
+}
+
+static ssize_t qla4xxx_show_boot_tgt_sec_info(void *data, int type, char *buf)
+{
+ struct scsi_qla_host *ha = data;
+ struct ql4_boot_session_info *boot_sess = &(ha->boot_tgt.boot_sec_sess);
+
+ return qla4xxx_show_boot_tgt_info(boot_sess, type, buf);
+}
+
+static mode_t qla4xxx_tgt_get_attr_visibility(void *data, int type)
+{
+ int rc;
+
+ switch (type) {
+ case ISCSI_BOOT_TGT_NAME:
+ case ISCSI_BOOT_TGT_IP_ADDR:
+ case ISCSI_BOOT_TGT_PORT:
+ case ISCSI_BOOT_TGT_CHAP_NAME:
+ case ISCSI_BOOT_TGT_CHAP_SECRET:
+ case ISCSI_BOOT_TGT_REV_CHAP_NAME:
+ case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
+ case ISCSI_BOOT_TGT_NIC_ASSOC:
+ case ISCSI_BOOT_TGT_FLAGS:
+ rc = S_IRUGO;
+ break;
+ default:
+ rc = 0;
+ break;
+ }
+ return rc;
+}
+
+static void qla4xxx_boot_release(void *data)
+{
+ struct scsi_qla_host *ha = data;
+
+ scsi_host_put(ha->host);
+}
+
+static int get_fw_boot_info(struct scsi_qla_host *ha, uint16_t ddb_index[])
+{
+ dma_addr_t buf_dma;
+ uint32_t addr, pri_addr, sec_addr;
+ uint32_t offset;
+ uint16_t func_num;
+ uint8_t val;
+ uint8_t *buf = NULL;
+ size_t size = 13 * sizeof(uint8_t);
+ int ret = QLA_SUCCESS;
+
+ func_num = PCI_FUNC(ha->pdev->devfn);
+
+ ql4_printk(KERN_INFO, ha, "%s: Get FW boot info for 0x%x func %d\n",
+ __func__, ha->pdev->device, func_num);
+
+ if (is_qla40XX(ha)) {
+ if (func_num == 1) {
+ addr = NVRAM_PORT0_BOOT_MODE;
+ pri_addr = NVRAM_PORT0_BOOT_PRI_TGT;
+ sec_addr = NVRAM_PORT0_BOOT_SEC_TGT;
+ } else if (func_num == 3) {
+ addr = NVRAM_PORT1_BOOT_MODE;
+ pri_addr = NVRAM_PORT1_BOOT_PRI_TGT;
+ sec_addr = NVRAM_PORT1_BOOT_SEC_TGT;
+ } else {
+ ret = QLA_ERROR;
+ goto exit_boot_info;
+ }
+
+ /* Check Boot Mode */
+ val = rd_nvram_byte(ha, addr);
+ if (!(val & 0x07)) {
+ DEBUG2(ql4_printk(KERN_ERR, ha,
+ "%s: Failed Boot options : 0x%x\n",
+ __func__, val));
+ ret = QLA_ERROR;
+ goto exit_boot_info;
+ }
+
+ /* get primary valid target index */
+ val = rd_nvram_byte(ha, pri_addr);
+ if (val & BIT_7)
+ ddb_index[0] = (val & 0x7f);
+
+ /* get secondary valid target index */
+ val = rd_nvram_byte(ha, sec_addr);
+ if (val & BIT_7)
+ ddb_index[1] = (val & 0x7f);
+
+ } else if (is_qla8022(ha)) {
+ buf = dma_alloc_coherent(&ha->pdev->dev, size,
+ &buf_dma, GFP_KERNEL);
+ if (!buf) {
+ DEBUG2(ql4_printk(KERN_ERR, ha,
+ "%s: Unable to allocate dma buffer\n",
+ __func__));
+ ret = QLA_ERROR;
+ goto exit_boot_info;
+ }
+
+ if (ha->port_num == 0)
+ offset = BOOT_PARAM_OFFSET_PORT0;
+ else if (ha->port_num == 1)
+ offset = BOOT_PARAM_OFFSET_PORT1;
+ else {
+ ret = QLA_ERROR;
+ goto exit_boot_info_free;
+ }
+ addr = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_iscsi_param * 4) +
+ offset;
+ if (qla4xxx_get_flash(ha, buf_dma, addr,
+ 13 * sizeof(uint8_t)) != QLA_SUCCESS) {
+ DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: Get Flash"
+ "failed\n", ha->host_no, __func__));
+ ret = QLA_ERROR;
+ goto exit_boot_info_free;
+ }
+ /* Check Boot Mode */
+ if (!(buf[1] & 0x07)) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Failed: Boot options : 0x%x\n",
+ buf[1]));
+ ret = QLA_ERROR;
+ goto exit_boot_info_free;
+ }
+
+ /* get primary valid target index */
+ if (buf[2] & BIT_7)
+ ddb_index[0] = buf[2] & 0x7f;
+
+ /* get secondary valid target index */
+ if (buf[11] & BIT_7)
+ ddb_index[1] = buf[11] & 0x7f;
+ } else {
+ ret = QLA_ERROR;
+ goto exit_boot_info;
+ }
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Primary target ID %d, Secondary"
+ " target ID %d\n", __func__, ddb_index[0],
+ ddb_index[1]));
+
+exit_boot_info_free:
+ dma_free_coherent(&ha->pdev->dev, size, buf, buf_dma);
+exit_boot_info:
+ return ret;
+}
+
+/**
+ * qla4xxx_get_bidi_chap - Get a BIDI CHAP user and password
+ * @ha: pointer to adapter structure
+ * @username: CHAP username to be returned
+ * @password: CHAP password to be returned
+ *
+ * If a boot entry has BIDI CHAP enabled then we need to set the BIDI CHAP
+ * user and password in the sysfs entry in /sys/firmware/iscsi_boot#/.
+ * So from the CHAP cache find the first BIDI CHAP entry and set it
+ * to the boot record in sysfs.
+ **/
+static int qla4xxx_get_bidi_chap(struct scsi_qla_host *ha, char *username,
+ char *password)
+{
+ int i, ret = -EINVAL;
+ int max_chap_entries = 0;
+ struct ql4_chap_table *chap_table;
+
+ if (is_qla8022(ha))
+ max_chap_entries = (ha->hw.flt_chap_size / 2) /
+ sizeof(struct ql4_chap_table);
+ else
+ max_chap_entries = MAX_CHAP_ENTRIES_40XX;
+
+ if (!ha->chap_list) {
+ ql4_printk(KERN_ERR, ha, "Do not have CHAP table cache\n");
+ return ret;
+ }
+
+ mutex_lock(&ha->chap_sem);
+ for (i = 0; i < max_chap_entries; i++) {
+ chap_table = (struct ql4_chap_table *)ha->chap_list + i;
+ if (chap_table->cookie !=
+ __constant_cpu_to_le16(CHAP_VALID_COOKIE)) {
+ continue;
+ }
+
+ if (chap_table->flags & BIT_7) /* local */
+ continue;
+
+ if (!(chap_table->flags & BIT_6)) /* Not BIDI */
+ continue;
+
+ strncpy(password, chap_table->secret, QL4_CHAP_MAX_SECRET_LEN);
+ strncpy(username, chap_table->name, QL4_CHAP_MAX_NAME_LEN);
+ ret = 0;
+ break;
+ }
+ mutex_unlock(&ha->chap_sem);
+
+ return ret;
+}
+
+
+static int qla4xxx_get_boot_target(struct scsi_qla_host *ha,
+ struct ql4_boot_session_info *boot_sess,
+ uint16_t ddb_index)
+{
+ struct ql4_conn_info *boot_conn = &boot_sess->conn_list[0];
+ struct dev_db_entry *fw_ddb_entry;
+ dma_addr_t fw_ddb_entry_dma;
+ uint16_t idx;
+ uint16_t options;
+ int ret = QLA_SUCCESS;
+
+ fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+ &fw_ddb_entry_dma, GFP_KERNEL);
+ if (!fw_ddb_entry) {
+ DEBUG2(ql4_printk(KERN_ERR, ha,
+ "%s: Unable to allocate dma buffer.\n",
+ __func__));
+ ret = QLA_ERROR;
+ return ret;
+ }
+
+ if (qla4xxx_bootdb_by_index(ha, fw_ddb_entry,
+ fw_ddb_entry_dma, ddb_index)) {
+ DEBUG2(ql4_printk(KERN_ERR, ha,
+ "%s: Flash DDB read Failed\n", __func__));
+ ret = QLA_ERROR;
+ goto exit_boot_target;
+ }
+
+ /* Update target name and IP from DDB */
+ memcpy(boot_sess->target_name, fw_ddb_entry->iscsi_name,
+ min(sizeof(boot_sess->target_name),
+ sizeof(fw_ddb_entry->iscsi_name)));
+
+ options = le16_to_cpu(fw_ddb_entry->options);
+ if (options & DDB_OPT_IPV6_DEVICE) {
+ memcpy(&boot_conn->dest_ipaddr.ip_address,
+ &fw_ddb_entry->ip_addr[0], IPv6_ADDR_LEN);
+ } else {
+ boot_conn->dest_ipaddr.ip_type = 0x1;
+ memcpy(&boot_conn->dest_ipaddr.ip_address,
+ &fw_ddb_entry->ip_addr[0], IP_ADDR_LEN);
+ }
+
+ boot_conn->dest_port = le16_to_cpu(fw_ddb_entry->port);
+
+ /* update chap information */
+ idx = __le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
+
+ if (BIT_7 & le16_to_cpu(fw_ddb_entry->iscsi_options)) {
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "Setting chap\n"));
+
+ ret = qla4xxx_get_chap(ha, (char *)&boot_conn->chap.
+ target_chap_name,
+ (char *)&boot_conn->chap.target_secret,
+ idx);
+ if (ret) {
+ ql4_printk(KERN_ERR, ha, "Failed to set chap\n");
+ ret = QLA_ERROR;
+ goto exit_boot_target;
+ }
+
+ boot_conn->chap.target_chap_name_length = QL4_CHAP_MAX_NAME_LEN;
+ boot_conn->chap.target_secret_length = QL4_CHAP_MAX_SECRET_LEN;
+ }
+
+ if (BIT_4 & le16_to_cpu(fw_ddb_entry->iscsi_options)) {
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "Setting BIDI chap\n"));
+
+ ret = qla4xxx_get_bidi_chap(ha,
+ (char *)&boot_conn->chap.intr_chap_name,
+ (char *)&boot_conn->chap.intr_secret);
+
+ if (ret) {
+ ql4_printk(KERN_ERR, ha, "Failed to set BIDI chap\n");
+ ret = QLA_ERROR;
+ goto exit_boot_target;
+ }
+
+ boot_conn->chap.intr_chap_name_length = QL4_CHAP_MAX_NAME_LEN;
+ boot_conn->chap.intr_secret_length = QL4_CHAP_MAX_SECRET_LEN;
+ }
+
+exit_boot_target:
+ dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+ fw_ddb_entry, fw_ddb_entry_dma);
+ return ret;
+}
+
+static int qla4xxx_get_boot_info(struct scsi_qla_host *ha)
+{
+ uint16_t ddb_index[2];
+ int ret = QLA_ERROR;
+ int rval;
+
+ memset(ddb_index, 0, sizeof(ddb_index));
+ ddb_index[0] = 0xffff;
+ ddb_index[1] = 0xffff;
+ ret = get_fw_boot_info(ha, ddb_index);
+ if (ret != QLA_SUCCESS) {
+ DEBUG2(ql4_printk(KERN_ERR, ha,
+ "%s: Failed to set boot info.\n", __func__));
+ return ret;
+ }
+
+ if (ddb_index[0] == 0xffff)
+ goto sec_target;
+
+ rval = qla4xxx_get_boot_target(ha, &(ha->boot_tgt.boot_pri_sess),
+ ddb_index[0]);
+ if (rval != QLA_SUCCESS) {
+ DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Failed to get "
+ "primary target\n", __func__));
+ } else
+ ret = QLA_SUCCESS;
+
+sec_target:
+ if (ddb_index[1] == 0xffff)
+ goto exit_get_boot_info;
+
+ rval = qla4xxx_get_boot_target(ha, &(ha->boot_tgt.boot_sec_sess),
+ ddb_index[1]);
+ if (rval != QLA_SUCCESS) {
+ DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Failed to get "
+ "secondary target\n", __func__));
+ } else
+ ret = QLA_SUCCESS;
+
+exit_get_boot_info:
+ return ret;
+}
+
+static int qla4xxx_setup_boot_info(struct scsi_qla_host *ha)
+{
+ struct iscsi_boot_kobj *boot_kobj;
+
+ if (qla4xxx_get_boot_info(ha) != QLA_SUCCESS)
+ return 0;
+
+ ha->boot_kset = iscsi_boot_create_host_kset(ha->host->host_no);
+ if (!ha->boot_kset)
+ goto kset_free;
+
+ if (!scsi_host_get(ha->host))
+ goto kset_free;
+ boot_kobj = iscsi_boot_create_target(ha->boot_kset, 0, ha,
+ qla4xxx_show_boot_tgt_pri_info,
+ qla4xxx_tgt_get_attr_visibility,
+ qla4xxx_boot_release);
+ if (!boot_kobj)
+ goto put_host;
+
+ if (!scsi_host_get(ha->host))
+ goto kset_free;
+ boot_kobj = iscsi_boot_create_target(ha->boot_kset, 1, ha,
+ qla4xxx_show_boot_tgt_sec_info,
+ qla4xxx_tgt_get_attr_visibility,
+ qla4xxx_boot_release);
+ if (!boot_kobj)
+ goto put_host;
+
+ if (!scsi_host_get(ha->host))
+ goto kset_free;
+ boot_kobj = iscsi_boot_create_initiator(ha->boot_kset, 0, ha,
+ qla4xxx_show_boot_ini_info,
+ qla4xxx_ini_get_attr_visibility,
+ qla4xxx_boot_release);
+ if (!boot_kobj)
+ goto put_host;
+
+ if (!scsi_host_get(ha->host))
+ goto kset_free;
+ boot_kobj = iscsi_boot_create_ethernet(ha->boot_kset, 0, ha,
+ qla4xxx_show_boot_eth_info,
+ qla4xxx_eth_get_attr_visibility,
+ qla4xxx_boot_release);
+ if (!boot_kobj)
+ goto put_host;
+
+ return 0;
+
+put_host:
+ scsi_host_put(ha->host);
+kset_free:
+ iscsi_boot_destroy_kset(ha->boot_kset);
+ return -ENOMEM;
+}
+
+
+/**
+ * qla4xxx_create chap_list - Create CHAP list from FLASH
+ * @ha: pointer to adapter structure
+ *
+ * Read flash and make a list of CHAP entries, during login when a CHAP entry
+ * is received, it will be checked in this list. If entry exist then the CHAP
+ * entry index is set in the DDB. If CHAP entry does not exist in this list
+ * then a new entry is added in FLASH in CHAP table and the index obtained is
+ * used in the DDB.
+ **/
+static void qla4xxx_create_chap_list(struct scsi_qla_host *ha)
+{
+ int rval = 0;
+ uint8_t *chap_flash_data = NULL;
+ uint32_t offset;
+ dma_addr_t chap_dma;
+ uint32_t chap_size = 0;
+
+ if (is_qla40XX(ha))
+ chap_size = MAX_CHAP_ENTRIES_40XX *
+ sizeof(struct ql4_chap_table);
+ else /* Single region contains CHAP info for both
+ * ports which is divided into half for each port.
+ */
+ chap_size = ha->hw.flt_chap_size / 2;
+
+ chap_flash_data = dma_alloc_coherent(&ha->pdev->dev, chap_size,
+ &chap_dma, GFP_KERNEL);
+ if (!chap_flash_data) {
+ ql4_printk(KERN_ERR, ha, "No memory for chap_flash_data\n");
+ return;
+ }
+ if (is_qla40XX(ha))
+ offset = FLASH_CHAP_OFFSET;
+ else {
+ offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2);
+ if (ha->port_num == 1)
+ offset += chap_size;
+ }
+
+ rval = qla4xxx_get_flash(ha, chap_dma, offset, chap_size);
+ if (rval != QLA_SUCCESS)
+ goto exit_chap_list;
+
+ if (ha->chap_list == NULL)
+ ha->chap_list = vmalloc(chap_size);
+ if (ha->chap_list == NULL) {
+ ql4_printk(KERN_ERR, ha, "No memory for ha->chap_list\n");
+ goto exit_chap_list;
+ }
+
+ memcpy(ha->chap_list, chap_flash_data, chap_size);
+
+exit_chap_list:
+ dma_free_coherent(&ha->pdev->dev, chap_size,
+ chap_flash_data, chap_dma);
+ return;
+}
+
/**
* qla4xxx_probe_adapter - callback function to probe HBA
* @pdev: pointer to pci_dev structure
@@ -1624,7 +3200,7 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
if (pci_enable_device(pdev))
return -1;
- host = scsi_host_alloc(&qla4xxx_driver_template, sizeof(*ha));
+ host = iscsi_host_alloc(&qla4xxx_driver_template, sizeof(*ha), 0);
if (host == NULL) {
printk(KERN_WARNING
"qla4xxx: Couldn't allocate host from scsi layer!\n");
@@ -1632,7 +3208,7 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
}
/* Clear our data area */
- ha = (struct scsi_qla_host *) host->hostdata;
+ ha = to_qla_host(host);
memset(ha, 0, sizeof(*ha));
/* Save the information from PCI BIOS. */
@@ -1675,11 +3251,12 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
qla4xxx_config_dma_addressing(ha);
/* Initialize lists and spinlocks. */
- INIT_LIST_HEAD(&ha->ddb_list);
INIT_LIST_HEAD(&ha->free_srb_q);
mutex_init(&ha->mbox_sem);
+ mutex_init(&ha->chap_sem);
init_completion(&ha->mbx_intr_comp);
+ init_completion(&ha->disable_acb_comp);
spin_lock_init(&ha->hardware_lock);
@@ -1692,6 +3269,27 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
goto probe_failed;
}
+ host->cmd_per_lun = 3;
+ host->max_channel = 0;
+ host->max_lun = MAX_LUNS - 1;
+ host->max_id = MAX_TARGETS;
+ host->max_cmd_len = IOCB_MAX_CDB_LEN;
+ host->can_queue = MAX_SRBS ;
+ host->transportt = qla4xxx_scsi_transport;
+
+ ret = scsi_init_shared_tag_map(host, MAX_SRBS);
+ if (ret) {
+ ql4_printk(KERN_WARNING, ha,
+ "%s: scsi_init_shared_tag_map failed\n", __func__);
+ goto probe_failed;
+ }
+
+ pci_set_drvdata(pdev, ha);
+
+ ret = scsi_add_host(host, &pdev->dev);
+ if (ret)
+ goto probe_failed;
+
if (is_qla8022(ha))
(void) qla4_8xxx_get_flash_info(ha);
@@ -1700,7 +3298,7 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
* firmware
* NOTE: interrupts enabled upon successful completion
*/
- status = qla4xxx_initialize_adapter(ha, REBUILD_DDB_LIST);
+ status = qla4xxx_initialize_adapter(ha);
while ((!test_bit(AF_ONLINE, &ha->flags)) &&
init_retry_count++ < MAX_INIT_RETRIES) {
@@ -1721,7 +3319,7 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
if (ha->isp_ops->reset_chip(ha) == QLA_ERROR)
continue;
- status = qla4xxx_initialize_adapter(ha, REBUILD_DDB_LIST);
+ status = qla4xxx_initialize_adapter(ha);
}
if (!test_bit(AF_ONLINE, &ha->flags)) {
@@ -1736,24 +3334,9 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
qla4_8xxx_idc_unlock(ha);
}
ret = -ENODEV;
- goto probe_failed;
+ goto remove_host;
}
- host->cmd_per_lun = 3;
- host->max_channel = 0;
- host->max_lun = MAX_LUNS - 1;
- host->max_id = MAX_TARGETS;
- host->max_cmd_len = IOCB_MAX_CDB_LEN;
- host->can_queue = MAX_SRBS ;
- host->transportt = qla4xxx_scsi_transport;
-
- ret = scsi_init_shared_tag_map(host, MAX_SRBS);
- if (ret) {
- ql4_printk(KERN_WARNING, ha,
- "scsi_init_shared_tag_map failed\n");
- goto probe_failed;
- }
-
/* Startup the kernel thread for this host adapter. */
DEBUG2(printk("scsi: %s: Starting kernel thread for "
"qla4xxx_dpc\n", __func__));
@@ -1762,10 +3345,18 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
if (!ha->dpc_thread) {
ql4_printk(KERN_WARNING, ha, "Unable to start DPC thread!\n");
ret = -ENODEV;
- goto probe_failed;
+ goto remove_host;
}
INIT_WORK(&ha->dpc_work, qla4xxx_do_dpc);
+ sprintf(buf, "qla4xxx_%lu_task", ha->host_no);
+ ha->task_wq = alloc_workqueue(buf, WQ_MEM_RECLAIM, 1);
+ if (!ha->task_wq) {
+ ql4_printk(KERN_WARNING, ha, "Unable to start task thread!\n");
+ ret = -ENODEV;
+ goto remove_host;
+ }
+
/* For ISP-82XX, request_irqs is called in qla4_8xxx_load_risc
* (which is called indirectly by qla4xxx_initialize_adapter),
* so that irqs will be registered after crbinit but before
@@ -1776,7 +3367,7 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
if (ret) {
ql4_printk(KERN_WARNING, ha, "Failed to reserve "
"interrupt %d already in use.\n", pdev->irq);
- goto probe_failed;
+ goto remove_host;
}
}
@@ -1788,21 +3379,25 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
set_bit(AF_INIT_DONE, &ha->flags);
- pci_set_drvdata(pdev, ha);
-
- ret = scsi_add_host(host, &pdev->dev);
- if (ret)
- goto probe_failed;
-
printk(KERN_INFO
" QLogic iSCSI HBA Driver version: %s\n"
" QLogic ISP%04x @ %s, host#=%ld, fw=%02d.%02d.%02d.%02d\n",
qla4xxx_version_str, ha->pdev->device, pci_name(ha->pdev),
ha->host_no, ha->firmware_version[0], ha->firmware_version[1],
ha->patch_number, ha->build_number);
- scsi_scan_host(host);
+
+ qla4xxx_create_chap_list(ha);
+
+ if (qla4xxx_setup_boot_info(ha))
+ ql4_printk(KERN_ERR, ha, "%s:ISCSI boot info setup failed\n",
+ __func__);
+
+ qla4xxx_create_ifaces(ha);
return 0;
+remove_host:
+ scsi_remove_host(ha->host);
+
probe_failed:
qla4xxx_free_adapter(ha);
@@ -1867,8 +3462,11 @@ static void __devexit qla4xxx_remove_adapter(struct pci_dev *pdev)
if (!is_qla8022(ha))
qla4xxx_prevent_other_port_reinit(ha);
- /* remove devs from iscsi_sessions to scsi_devices */
- qla4xxx_free_ddb_list(ha);
+ /* destroy iface from sysfs */
+ qla4xxx_destroy_ifaces(ha);
+
+ if (ha->boot_kset)
+ iscsi_boot_destroy_kset(ha->boot_kset);
scsi_remove_host(ha->host);
@@ -1907,10 +3505,15 @@ static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha)
static int qla4xxx_slave_alloc(struct scsi_device *sdev)
{
- struct iscsi_cls_session *sess = starget_to_session(sdev->sdev_target);
- struct ddb_entry *ddb = sess->dd_data;
+ struct iscsi_cls_session *cls_sess;
+ struct iscsi_session *sess;
+ struct ddb_entry *ddb;
int queue_depth = QL4_DEF_QDEPTH;
+ cls_sess = starget_to_session(sdev->sdev_target);
+ sess = cls_sess->dd_data;
+ ddb = sess->dd_data;
+
sdev->hostdata = ddb;
sdev->tagged_supported = 1;
@@ -2248,7 +3851,7 @@ static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd)
int return_status = FAILED;
struct scsi_qla_host *ha;
- ha = (struct scsi_qla_host *) cmd->device->host->hostdata;
+ ha = to_qla_host(cmd->device->host);
if (ql4xdontresethba) {
DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n",
@@ -2284,6 +3887,110 @@ static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd)
return return_status;
}
+static int qla4xxx_context_reset(struct scsi_qla_host *ha)
+{
+ uint32_t mbox_cmd[MBOX_REG_COUNT];
+ uint32_t mbox_sts[MBOX_REG_COUNT];
+ struct addr_ctrl_blk_def *acb = NULL;
+ uint32_t acb_len = sizeof(struct addr_ctrl_blk_def);
+ int rval = QLA_SUCCESS;
+ dma_addr_t acb_dma;
+
+ acb = dma_alloc_coherent(&ha->pdev->dev,
+ sizeof(struct addr_ctrl_blk_def),
+ &acb_dma, GFP_KERNEL);
+ if (!acb) {
+ ql4_printk(KERN_ERR, ha, "%s: Unable to alloc acb\n",
+ __func__);
+ rval = -ENOMEM;
+ goto exit_port_reset;
+ }
+
+ memset(acb, 0, acb_len);
+
+ rval = qla4xxx_get_acb(ha, acb_dma, PRIMARI_ACB, acb_len);
+ if (rval != QLA_SUCCESS) {
+ rval = -EIO;
+ goto exit_free_acb;
+ }
+
+ rval = qla4xxx_disable_acb(ha);
+ if (rval != QLA_SUCCESS) {
+ rval = -EIO;
+ goto exit_free_acb;
+ }
+
+ wait_for_completion_timeout(&ha->disable_acb_comp,
+ DISABLE_ACB_TOV * HZ);
+
+ rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], acb_dma);
+ if (rval != QLA_SUCCESS) {
+ rval = -EIO;
+ goto exit_free_acb;
+ }
+
+exit_free_acb:
+ dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk_def),
+ acb, acb_dma);
+exit_port_reset:
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s %s\n", __func__,
+ rval == QLA_SUCCESS ? "SUCCEEDED" : "FAILED"));
+ return rval;
+}
+
+static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type)
+{
+ struct scsi_qla_host *ha = to_qla_host(shost);
+ int rval = QLA_SUCCESS;
+
+ if (ql4xdontresethba) {
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Don't Reset HBA\n",
+ __func__));
+ rval = -EPERM;
+ goto exit_host_reset;
+ }
+
+ rval = qla4xxx_wait_for_hba_online(ha);
+ if (rval != QLA_SUCCESS) {
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Unable to reset host "
+ "adapter\n", __func__));
+ rval = -EIO;
+ goto exit_host_reset;
+ }
+
+ if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
+ goto recover_adapter;
+
+ switch (reset_type) {
+ case SCSI_ADAPTER_RESET:
+ set_bit(DPC_RESET_HA, &ha->dpc_flags);
+ break;
+ case SCSI_FIRMWARE_RESET:
+ if (!test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
+ if (is_qla8022(ha))
+ /* set firmware context reset */
+ set_bit(DPC_RESET_HA_FW_CONTEXT,
+ &ha->dpc_flags);
+ else {
+ rval = qla4xxx_context_reset(ha);
+ goto exit_host_reset;
+ }
+ }
+ break;
+ }
+
+recover_adapter:
+ rval = qla4xxx_recover_adapter(ha);
+ if (rval != QLA_SUCCESS) {
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: recover adapter fail\n",
+ __func__));
+ rval = -EIO;
+ }
+
+exit_host_reset:
+ return rval;
+}
+
/* PCI AER driver recovers from all correctable errors w/o
* driver intervention. For uncorrectable errors PCI AER
* driver calls the following device driver's callbacks
@@ -2360,7 +4067,8 @@ static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
if (test_bit(AF_ONLINE, &ha->flags)) {
clear_bit(AF_ONLINE, &ha->flags);
- qla4xxx_mark_all_devices_missing(ha);
+ clear_bit(AF_LINK_UP, &ha->flags);
+ iscsi_host_for_each_session(ha->host, qla4xxx_fail_session);
qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
}
@@ -2407,7 +4115,7 @@ static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
qla4_8xxx_idc_unlock(ha);
clear_bit(AF_FW_RECOVERY, &ha->flags);
- rval = qla4xxx_initialize_adapter(ha, PRESERVE_DDB_LIST);
+ rval = qla4xxx_initialize_adapter(ha);
qla4_8xxx_idc_lock(ha);
if (rval != QLA_SUCCESS) {
@@ -2443,8 +4151,7 @@ static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
if ((qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE) ==
QLA82XX_DEV_READY)) {
clear_bit(AF_FW_RECOVERY, &ha->flags);
- rval = qla4xxx_initialize_adapter(ha,
- PRESERVE_DDB_LIST);
+ rval = qla4xxx_initialize_adapter(ha);
if (rval == QLA_SUCCESS) {
ret = qla4xxx_request_irqs(ha);
if (ret) {
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h
index 61049287725..c15347d3f53 100644
--- a/drivers/scsi/qla4xxx/ql4_version.h
+++ b/drivers/scsi/qla4xxx/ql4_version.h
@@ -5,4 +5,4 @@
* See LICENSE.qla4xxx for copyright and licensing details.
*/
-#define QLA4XXX_DRIVER_VERSION "5.02.00-k7"
+#define QLA4XXX_DRIVER_VERSION "5.02.00-k8"
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c
index 9689d41c788..e40dc1cb09a 100644
--- a/drivers/scsi/qlogicpti.c
+++ b/drivers/scsi/qlogicpti.c
@@ -880,7 +880,7 @@ static inline void cmd_frob(struct Command_Entry *cmd, struct scsi_cmnd *Cmnd,
cmd->control_flags |= CFLAG_WRITE;
else
cmd->control_flags |= CFLAG_READ;
- cmd->time_out = 30;
+ cmd->time_out = Cmnd->request->timeout/HZ;
memcpy(cmd->cdb, Cmnd->cmnd, Cmnd->cmd_len);
}
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index a4b9cdbaaa0..dc6131e6a1b 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -293,8 +293,16 @@ static int scsi_check_sense(struct scsi_cmnd *scmd)
* so that we can deal with it there.
*/
if (scmd->device->expecting_cc_ua) {
- scmd->device->expecting_cc_ua = 0;
- return NEEDS_RETRY;
+ /*
+ * Because some device does not queue unit
+ * attentions correctly, we carefully check
+ * additional sense code and qualifier so as
+ * not to squash media change unit attention.
+ */
+ if (sshdr.asc != 0x28 || sshdr.ascq != 0x00) {
+ scmd->device->expecting_cc_ua = 0;
+ return NEEDS_RETRY;
+ }
}
/*
* if the device is in the process of becoming ready, we
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index fc3f168decb..06bc26554a6 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -12,6 +12,7 @@
#include <linux/blkdev.h>
#include <linux/completion.h>
#include <linux/kernel.h>
+#include <linux/export.h>
#include <linux/mempool.h>
#include <linux/slab.h>
#include <linux/init.h>
@@ -1698,6 +1699,15 @@ struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
void scsi_free_queue(struct request_queue *q)
{
+ unsigned long flags;
+
+ WARN_ON(q->queuedata);
+
+ /* cause scsi_request_fn() to kill all non-finished requests */
+ spin_lock_irqsave(q->queue_lock, flags);
+ q->request_fn(q);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+
blk_cleanup_queue(q);
}
diff --git a/drivers/scsi/scsi_lib_dma.c b/drivers/scsi/scsi_lib_dma.c
index dcd128583b8..2ac3f3975f7 100644
--- a/drivers/scsi/scsi_lib_dma.c
+++ b/drivers/scsi/scsi_lib_dma.c
@@ -4,6 +4,7 @@
#include <linux/blkdev.h>
#include <linux/device.h>
+#include <linux/export.h>
#include <linux/kernel.h>
#include <scsi/scsi.h>
diff --git a/drivers/scsi/scsi_netlink.c b/drivers/scsi/scsi_netlink.c
index 26a8a45584e..44f76e8b58a 100644
--- a/drivers/scsi/scsi_netlink.c
+++ b/drivers/scsi/scsi_netlink.c
@@ -23,6 +23,7 @@
#include <linux/security.h>
#include <linux/delay.h>
#include <linux/slab.h>
+#include <linux/export.h>
#include <net/sock.h>
#include <net/netlink.h>
diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
index d82a023a901..d329f8b12e2 100644
--- a/drivers/scsi/scsi_pm.c
+++ b/drivers/scsi/scsi_pm.c
@@ -6,6 +6,7 @@
*/
#include <linux/pm_runtime.h>
+#include <linux/export.h>
#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 44e8ca398ef..72273a0e566 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -322,6 +322,7 @@ out_device_destroy:
scsi_device_set_state(sdev, SDEV_DEL);
transport_destroy_device(&sdev->sdev_gendev);
put_device(&sdev->sdev_dev);
+ scsi_free_queue(sdev->request_queue);
put_device(&sdev->sdev_gendev);
out:
if (display_failure_msg)
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index e0bd3f790fc..04c2a278076 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -246,6 +246,43 @@ show_shost_active_mode(struct device *dev,
static DEVICE_ATTR(active_mode, S_IRUGO | S_IWUSR, show_shost_active_mode, NULL);
+static int check_reset_type(char *str)
+{
+ if (strncmp(str, "adapter", 10) == 0)
+ return SCSI_ADAPTER_RESET;
+ else if (strncmp(str, "firmware", 10) == 0)
+ return SCSI_FIRMWARE_RESET;
+ else
+ return 0;
+}
+
+static ssize_t
+store_host_reset(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct scsi_host_template *sht = shost->hostt;
+ int ret = -EINVAL;
+ char str[10];
+ int type;
+
+ sscanf(buf, "%s", str);
+ type = check_reset_type(str);
+
+ if (!type)
+ goto exit_store_host_reset;
+
+ if (sht->host_reset)
+ ret = sht->host_reset(shost, type);
+
+exit_store_host_reset:
+ if (ret == 0)
+ ret = count;
+ return ret;
+}
+
+static DEVICE_ATTR(host_reset, S_IWUSR, NULL, store_host_reset);
+
shost_rd_attr(unique_id, "%u\n");
shost_rd_attr(host_busy, "%hu\n");
shost_rd_attr(cmd_per_lun, "%hd\n");
@@ -272,6 +309,7 @@ static struct attribute *scsi_sysfs_shost_attrs[] = {
&dev_attr_active_mode.attr,
&dev_attr_prot_capabilities.attr,
&dev_attr_prot_guard_type.attr,
+ &dev_attr_host_reset.attr,
NULL
};
diff --git a/drivers/scsi/scsi_tgt_if.c b/drivers/scsi/scsi_tgt_if.c
index 0172de19700..6209110f295 100644
--- a/drivers/scsi/scsi_tgt_if.c
+++ b/drivers/scsi/scsi_tgt_if.c
@@ -22,6 +22,7 @@
#include <linux/miscdevice.h>
#include <linux/gfp.h>
#include <linux/file.h>
+#include <linux/export.h>
#include <net/tcp.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 3fd16d7212d..96029e6d027 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -23,6 +23,8 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/slab.h>
+#include <linux/bsg-lib.h>
+#include <linux/idr.h>
#include <net/tcp.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
@@ -31,10 +33,7 @@
#include <scsi/scsi_transport_iscsi.h>
#include <scsi/iscsi_if.h>
#include <scsi/scsi_cmnd.h>
-
-#define ISCSI_SESSION_ATTRS 23
-#define ISCSI_CONN_ATTRS 13
-#define ISCSI_HOST_ATTRS 4
+#include <scsi/scsi_bsg_iscsi.h>
#define ISCSI_TRANSPORT_VERSION "2.0-870"
@@ -76,16 +75,14 @@ struct iscsi_internal {
struct list_head list;
struct device dev;
- struct device_attribute *host_attrs[ISCSI_HOST_ATTRS + 1];
struct transport_container conn_cont;
- struct device_attribute *conn_attrs[ISCSI_CONN_ATTRS + 1];
struct transport_container session_cont;
- struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
};
static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
static struct workqueue_struct *iscsi_eh_timer_workq;
+static DEFINE_IDA(iscsi_sess_ida);
/*
* list of registered transports and lock that must
* be held while accessing list. The iscsi_transport_lock must
@@ -270,6 +267,291 @@ struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle)
}
EXPORT_SYMBOL_GPL(iscsi_lookup_endpoint);
+/*
+ * Interface to display network param to sysfs
+ */
+
+static void iscsi_iface_release(struct device *dev)
+{
+ struct iscsi_iface *iface = iscsi_dev_to_iface(dev);
+ struct device *parent = iface->dev.parent;
+
+ kfree(iface);
+ put_device(parent);
+}
+
+
+static struct class iscsi_iface_class = {
+ .name = "iscsi_iface",
+ .dev_release = iscsi_iface_release,
+};
+
+#define ISCSI_IFACE_ATTR(_prefix, _name, _mode, _show, _store) \
+struct device_attribute dev_attr_##_prefix##_##_name = \
+ __ATTR(_name, _mode, _show, _store)
+
+/* iface attrs show */
+#define iscsi_iface_attr_show(type, name, param_type, param) \
+static ssize_t \
+show_##type##_##name(struct device *dev, struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct iscsi_iface *iface = iscsi_dev_to_iface(dev); \
+ struct iscsi_transport *t = iface->transport; \
+ return t->get_iface_param(iface, param_type, param, buf); \
+} \
+
+#define iscsi_iface_net_attr(type, name, param) \
+ iscsi_iface_attr_show(type, name, ISCSI_NET_PARAM, param) \
+static ISCSI_IFACE_ATTR(type, name, S_IRUGO, show_##type##_##name, NULL);
+
+/* generic read only ipvi4 attribute */
+iscsi_iface_net_attr(ipv4_iface, ipaddress, ISCSI_NET_PARAM_IPV4_ADDR);
+iscsi_iface_net_attr(ipv4_iface, gateway, ISCSI_NET_PARAM_IPV4_GW);
+iscsi_iface_net_attr(ipv4_iface, subnet, ISCSI_NET_PARAM_IPV4_SUBNET);
+iscsi_iface_net_attr(ipv4_iface, bootproto, ISCSI_NET_PARAM_IPV4_BOOTPROTO);
+
+/* generic read only ipv6 attribute */
+iscsi_iface_net_attr(ipv6_iface, ipaddress, ISCSI_NET_PARAM_IPV6_ADDR);
+iscsi_iface_net_attr(ipv6_iface, link_local_addr, ISCSI_NET_PARAM_IPV6_LINKLOCAL);
+iscsi_iface_net_attr(ipv6_iface, router_addr, ISCSI_NET_PARAM_IPV6_ROUTER);
+iscsi_iface_net_attr(ipv6_iface, ipaddr_autocfg,
+ ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG);
+iscsi_iface_net_attr(ipv6_iface, link_local_autocfg,
+ ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG);
+
+/* common read only iface attribute */
+iscsi_iface_net_attr(iface, enabled, ISCSI_NET_PARAM_IFACE_ENABLE);
+iscsi_iface_net_attr(iface, vlan_id, ISCSI_NET_PARAM_VLAN_ID);
+iscsi_iface_net_attr(iface, vlan_priority, ISCSI_NET_PARAM_VLAN_PRIORITY);
+iscsi_iface_net_attr(iface, vlan_enabled, ISCSI_NET_PARAM_VLAN_ENABLED);
+iscsi_iface_net_attr(iface, mtu, ISCSI_NET_PARAM_MTU);
+iscsi_iface_net_attr(iface, port, ISCSI_NET_PARAM_PORT);
+
+static mode_t iscsi_iface_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int i)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct iscsi_iface *iface = iscsi_dev_to_iface(dev);
+ struct iscsi_transport *t = iface->transport;
+ int param;
+
+ if (attr == &dev_attr_iface_enabled.attr)
+ param = ISCSI_NET_PARAM_IFACE_ENABLE;
+ else if (attr == &dev_attr_iface_vlan_id.attr)
+ param = ISCSI_NET_PARAM_VLAN_ID;
+ else if (attr == &dev_attr_iface_vlan_priority.attr)
+ param = ISCSI_NET_PARAM_VLAN_PRIORITY;
+ else if (attr == &dev_attr_iface_vlan_enabled.attr)
+ param = ISCSI_NET_PARAM_VLAN_ENABLED;
+ else if (attr == &dev_attr_iface_mtu.attr)
+ param = ISCSI_NET_PARAM_MTU;
+ else if (attr == &dev_attr_iface_port.attr)
+ param = ISCSI_NET_PARAM_PORT;
+ else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) {
+ if (attr == &dev_attr_ipv4_iface_ipaddress.attr)
+ param = ISCSI_NET_PARAM_IPV4_ADDR;
+ else if (attr == &dev_attr_ipv4_iface_gateway.attr)
+ param = ISCSI_NET_PARAM_IPV4_GW;
+ else if (attr == &dev_attr_ipv4_iface_subnet.attr)
+ param = ISCSI_NET_PARAM_IPV4_SUBNET;
+ else if (attr == &dev_attr_ipv4_iface_bootproto.attr)
+ param = ISCSI_NET_PARAM_IPV4_BOOTPROTO;
+ else
+ return 0;
+ } else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6) {
+ if (attr == &dev_attr_ipv6_iface_ipaddress.attr)
+ param = ISCSI_NET_PARAM_IPV6_ADDR;
+ else if (attr == &dev_attr_ipv6_iface_link_local_addr.attr)
+ param = ISCSI_NET_PARAM_IPV6_LINKLOCAL;
+ else if (attr == &dev_attr_ipv6_iface_router_addr.attr)
+ param = ISCSI_NET_PARAM_IPV6_ROUTER;
+ else if (attr == &dev_attr_ipv6_iface_ipaddr_autocfg.attr)
+ param = ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG;
+ else if (attr == &dev_attr_ipv6_iface_link_local_autocfg.attr)
+ param = ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG;
+ else
+ return 0;
+ } else {
+ WARN_ONCE(1, "Invalid iface attr");
+ return 0;
+ }
+
+ return t->attr_is_visible(ISCSI_NET_PARAM, param);
+}
+
+static struct attribute *iscsi_iface_attrs[] = {
+ &dev_attr_iface_enabled.attr,
+ &dev_attr_iface_vlan_id.attr,
+ &dev_attr_iface_vlan_priority.attr,
+ &dev_attr_iface_vlan_enabled.attr,
+ &dev_attr_ipv4_iface_ipaddress.attr,
+ &dev_attr_ipv4_iface_gateway.attr,
+ &dev_attr_ipv4_iface_subnet.attr,
+ &dev_attr_ipv4_iface_bootproto.attr,
+ &dev_attr_ipv6_iface_ipaddress.attr,
+ &dev_attr_ipv6_iface_link_local_addr.attr,
+ &dev_attr_ipv6_iface_router_addr.attr,
+ &dev_attr_ipv6_iface_ipaddr_autocfg.attr,
+ &dev_attr_ipv6_iface_link_local_autocfg.attr,
+ &dev_attr_iface_mtu.attr,
+ &dev_attr_iface_port.attr,
+ NULL,
+};
+
+static struct attribute_group iscsi_iface_group = {
+ .attrs = iscsi_iface_attrs,
+ .is_visible = iscsi_iface_attr_is_visible,
+};
+
+struct iscsi_iface *
+iscsi_create_iface(struct Scsi_Host *shost, struct iscsi_transport *transport,
+ uint32_t iface_type, uint32_t iface_num, int dd_size)
+{
+ struct iscsi_iface *iface;
+ int err;
+
+ iface = kzalloc(sizeof(*iface) + dd_size, GFP_KERNEL);
+ if (!iface)
+ return NULL;
+
+ iface->transport = transport;
+ iface->iface_type = iface_type;
+ iface->iface_num = iface_num;
+ iface->dev.release = iscsi_iface_release;
+ iface->dev.class = &iscsi_iface_class;
+ /* parent reference released in iscsi_iface_release */
+ iface->dev.parent = get_device(&shost->shost_gendev);
+ if (iface_type == ISCSI_IFACE_TYPE_IPV4)
+ dev_set_name(&iface->dev, "ipv4-iface-%u-%u", shost->host_no,
+ iface_num);
+ else
+ dev_set_name(&iface->dev, "ipv6-iface-%u-%u", shost->host_no,
+ iface_num);
+
+ err = device_register(&iface->dev);
+ if (err)
+ goto free_iface;
+
+ err = sysfs_create_group(&iface->dev.kobj, &iscsi_iface_group);
+ if (err)
+ goto unreg_iface;
+
+ if (dd_size)
+ iface->dd_data = &iface[1];
+ return iface;
+
+unreg_iface:
+ device_unregister(&iface->dev);
+ return NULL;
+
+free_iface:
+ put_device(iface->dev.parent);
+ kfree(iface);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(iscsi_create_iface);
+
+void iscsi_destroy_iface(struct iscsi_iface *iface)
+{
+ sysfs_remove_group(&iface->dev.kobj, &iscsi_iface_group);
+ device_unregister(&iface->dev);
+}
+EXPORT_SYMBOL_GPL(iscsi_destroy_iface);
+
+/*
+ * BSG support
+ */
+/**
+ * iscsi_bsg_host_dispatch - Dispatch command to LLD.
+ * @job: bsg job to be processed
+ */
+static int iscsi_bsg_host_dispatch(struct bsg_job *job)
+{
+ struct Scsi_Host *shost = iscsi_job_to_shost(job);
+ struct iscsi_bsg_request *req = job->request;
+ struct iscsi_bsg_reply *reply = job->reply;
+ struct iscsi_internal *i = to_iscsi_internal(shost->transportt);
+ int cmdlen = sizeof(uint32_t); /* start with length of msgcode */
+ int ret;
+
+ /* check if we have the msgcode value at least */
+ if (job->request_len < sizeof(uint32_t)) {
+ ret = -ENOMSG;
+ goto fail_host_msg;
+ }
+
+ /* Validate the host command */
+ switch (req->msgcode) {
+ case ISCSI_BSG_HST_VENDOR:
+ cmdlen += sizeof(struct iscsi_bsg_host_vendor);
+ if ((shost->hostt->vendor_id == 0L) ||
+ (req->rqst_data.h_vendor.vendor_id !=
+ shost->hostt->vendor_id)) {
+ ret = -ESRCH;
+ goto fail_host_msg;
+ }
+ break;
+ default:
+ ret = -EBADR;
+ goto fail_host_msg;
+ }
+
+ /* check if we really have all the request data needed */
+ if (job->request_len < cmdlen) {
+ ret = -ENOMSG;
+ goto fail_host_msg;
+ }
+
+ ret = i->iscsi_transport->bsg_request(job);
+ if (!ret)
+ return 0;
+
+fail_host_msg:
+ /* return the errno failure code as the only status */
+ BUG_ON(job->reply_len < sizeof(uint32_t));
+ reply->reply_payload_rcv_len = 0;
+ reply->result = ret;
+ job->reply_len = sizeof(uint32_t);
+ bsg_job_done(job, ret, 0);
+ return 0;
+}
+
+/**
+ * iscsi_bsg_host_add - Create and add the bsg hooks to receive requests
+ * @shost: shost for iscsi_host
+ * @ihost: iscsi_cls_host adding the structures to
+ */
+static int
+iscsi_bsg_host_add(struct Scsi_Host *shost, struct iscsi_cls_host *ihost)
+{
+ struct device *dev = &shost->shost_gendev;
+ struct iscsi_internal *i = to_iscsi_internal(shost->transportt);
+ struct request_queue *q;
+ char bsg_name[20];
+ int ret;
+
+ if (!i->iscsi_transport->bsg_request)
+ return -ENOTSUPP;
+
+ snprintf(bsg_name, sizeof(bsg_name), "iscsi_host%d", shost->host_no);
+
+ q = __scsi_alloc_queue(shost, bsg_request_fn);
+ if (!q)
+ return -ENOMEM;
+
+ ret = bsg_setup_queue(dev, q, bsg_name, iscsi_bsg_host_dispatch, 0);
+ if (ret) {
+ shost_printk(KERN_ERR, shost, "bsg interface failed to "
+ "initialize - no request queue\n");
+ blk_cleanup_queue(q);
+ return ret;
+ }
+
+ ihost->bsg_q = q;
+ return 0;
+}
+
static int iscsi_setup_host(struct transport_container *tc, struct device *dev,
struct device *cdev)
{
@@ -279,13 +561,30 @@ static int iscsi_setup_host(struct transport_container *tc, struct device *dev,
memset(ihost, 0, sizeof(*ihost));
atomic_set(&ihost->nr_scans, 0);
mutex_init(&ihost->mutex);
+
+ iscsi_bsg_host_add(shost, ihost);
+ /* ignore any bsg add error - we just can't do sgio */
+
+ return 0;
+}
+
+static int iscsi_remove_host(struct transport_container *tc,
+ struct device *dev, struct device *cdev)
+{
+ struct Scsi_Host *shost = dev_to_shost(dev);
+ struct iscsi_cls_host *ihost = shost->shost_data;
+
+ if (ihost->bsg_q) {
+ bsg_remove_queue(ihost->bsg_q);
+ blk_cleanup_queue(ihost->bsg_q);
+ }
return 0;
}
static DECLARE_TRANSPORT_CLASS(iscsi_host_class,
"iscsi_host",
iscsi_setup_host,
- NULL,
+ iscsi_remove_host,
NULL);
static DECLARE_TRANSPORT_CLASS(iscsi_session_class,
@@ -404,6 +703,19 @@ int iscsi_session_chkready(struct iscsi_cls_session *session)
}
EXPORT_SYMBOL_GPL(iscsi_session_chkready);
+int iscsi_is_session_online(struct iscsi_cls_session *session)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&session->lock, flags);
+ if (session->state == ISCSI_SESSION_LOGGED_IN)
+ ret = 1;
+ spin_unlock_irqrestore(&session->lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iscsi_is_session_online);
+
static void iscsi_session_release(struct device *dev)
{
struct iscsi_cls_session *session = iscsi_dev_to_session(dev);
@@ -680,6 +992,7 @@ static void __iscsi_unbind_session(struct work_struct *work)
struct Scsi_Host *shost = iscsi_session_to_shost(session);
struct iscsi_cls_host *ihost = shost->shost_data;
unsigned long flags;
+ unsigned int target_id;
ISCSI_DBG_TRANS_SESSION(session, "Unbinding session\n");
@@ -691,10 +1004,15 @@ static void __iscsi_unbind_session(struct work_struct *work)
mutex_unlock(&ihost->mutex);
return;
}
+
+ target_id = session->target_id;
session->target_id = ISCSI_MAX_TARGET;
spin_unlock_irqrestore(&session->lock, flags);
mutex_unlock(&ihost->mutex);
+ if (session->ida_used)
+ ida_simple_remove(&iscsi_sess_ida, target_id);
+
scsi_remove_target(&session->dev);
iscsi_session_event(session, ISCSI_KEVENT_UNBIND_SESSION);
ISCSI_DBG_TRANS_SESSION(session, "Completed target removal\n");
@@ -735,59 +1053,36 @@ iscsi_alloc_session(struct Scsi_Host *shost, struct iscsi_transport *transport,
}
EXPORT_SYMBOL_GPL(iscsi_alloc_session);
-static int iscsi_get_next_target_id(struct device *dev, void *data)
-{
- struct iscsi_cls_session *session;
- unsigned long flags;
- int err = 0;
-
- if (!iscsi_is_session_dev(dev))
- return 0;
-
- session = iscsi_dev_to_session(dev);
- spin_lock_irqsave(&session->lock, flags);
- if (*((unsigned int *) data) == session->target_id)
- err = -EEXIST;
- spin_unlock_irqrestore(&session->lock, flags);
- return err;
-}
-
int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
{
struct Scsi_Host *shost = iscsi_session_to_shost(session);
struct iscsi_cls_host *ihost;
unsigned long flags;
- unsigned int id = target_id;
+ int id = 0;
int err;
ihost = shost->shost_data;
session->sid = atomic_add_return(1, &iscsi_session_nr);
- if (id == ISCSI_MAX_TARGET) {
- for (id = 0; id < ISCSI_MAX_TARGET; id++) {
- err = device_for_each_child(&shost->shost_gendev, &id,
- iscsi_get_next_target_id);
- if (!err)
- break;
- }
+ if (target_id == ISCSI_MAX_TARGET) {
+ id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
- if (id == ISCSI_MAX_TARGET) {
+ if (id < 0) {
iscsi_cls_session_printk(KERN_ERR, session,
- "Too many iscsi targets. Max "
- "number of targets is %d.\n",
- ISCSI_MAX_TARGET - 1);
- err = -EOVERFLOW;
- goto release_host;
+ "Failure in Target ID Allocation\n");
+ return id;
}
- }
- session->target_id = id;
+ session->target_id = (unsigned int)id;
+ session->ida_used = true;
+ } else
+ session->target_id = target_id;
dev_set_name(&session->dev, "session%u", session->sid);
err = device_add(&session->dev);
if (err) {
iscsi_cls_session_printk(KERN_ERR, session,
"could not register session's dev\n");
- goto release_host;
+ goto release_ida;
}
transport_register_device(&session->dev);
@@ -799,8 +1094,10 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
ISCSI_DBG_TRANS_SESSION(session, "Completed session adding\n");
return 0;
-release_host:
- scsi_host_put(shost);
+release_ida:
+ if (session->ida_used)
+ ida_simple_remove(&iscsi_sess_ida, session->target_id);
+
return err;
}
EXPORT_SYMBOL_GPL(iscsi_add_session);
@@ -1144,6 +1441,40 @@ void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error)
}
EXPORT_SYMBOL_GPL(iscsi_conn_error_event);
+void iscsi_conn_login_event(struct iscsi_cls_conn *conn,
+ enum iscsi_conn_state state)
+{
+ struct nlmsghdr *nlh;
+ struct sk_buff *skb;
+ struct iscsi_uevent *ev;
+ struct iscsi_internal *priv;
+ int len = NLMSG_SPACE(sizeof(*ev));
+
+ priv = iscsi_if_transport_lookup(conn->transport);
+ if (!priv)
+ return;
+
+ skb = alloc_skb(len, GFP_ATOMIC);
+ if (!skb) {
+ iscsi_cls_conn_printk(KERN_ERR, conn, "gracefully ignored "
+ "conn login (%d)\n", state);
+ return;
+ }
+
+ nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0);
+ ev = NLMSG_DATA(nlh);
+ ev->transport_handle = iscsi_handle(conn->transport);
+ ev->type = ISCSI_KEVENT_CONN_LOGIN_STATE;
+ ev->r.conn_login.state = state;
+ ev->r.conn_login.cid = conn->cid;
+ ev->r.conn_login.sid = iscsi_conn_get_sid(conn);
+ iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_ATOMIC);
+
+ iscsi_cls_conn_printk(KERN_INFO, conn, "detected conn login (%d)\n",
+ state);
+}
+EXPORT_SYMBOL_GPL(iscsi_conn_login_event);
+
static int
iscsi_if_send_reply(uint32_t group, int seq, int type, int done, int multi,
void *payload, int size)
@@ -1558,6 +1889,29 @@ iscsi_set_path(struct iscsi_transport *transport, struct iscsi_uevent *ev)
}
static int
+iscsi_set_iface_params(struct iscsi_transport *transport,
+ struct iscsi_uevent *ev, uint32_t len)
+{
+ char *data = (char *)ev + sizeof(*ev);
+ struct Scsi_Host *shost;
+ int err;
+
+ if (!transport->set_iface_param)
+ return -ENOSYS;
+
+ shost = scsi_host_lookup(ev->u.set_iface_params.host_no);
+ if (!shost) {
+ printk(KERN_ERR "set_iface_params could not find host no %u\n",
+ ev->u.set_iface_params.host_no);
+ return -ENODEV;
+ }
+
+ err = transport->set_iface_param(shost, data, len);
+ scsi_host_put(shost);
+ return err;
+}
+
+static int
iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
{
int err = 0;
@@ -1696,6 +2050,10 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
case ISCSI_UEVENT_PATH_UPDATE:
err = iscsi_set_path(transport, ev);
break;
+ case ISCSI_UEVENT_SET_IFACE_PARAMS:
+ err = iscsi_set_iface_params(transport, ev,
+ nlmsg_attrlen(nlh, sizeof(*ev)));
+ break;
default:
err = -ENOSYS;
break;
@@ -1824,6 +2182,70 @@ static ISCSI_CLASS_ATTR(conn, field, S_IRUGO, \
iscsi_conn_ep_attr(address, ISCSI_PARAM_CONN_ADDRESS);
iscsi_conn_ep_attr(port, ISCSI_PARAM_CONN_PORT);
+static struct attribute *iscsi_conn_attrs[] = {
+ &dev_attr_conn_max_recv_dlength.attr,
+ &dev_attr_conn_max_xmit_dlength.attr,
+ &dev_attr_conn_header_digest.attr,
+ &dev_attr_conn_data_digest.attr,
+ &dev_attr_conn_ifmarker.attr,
+ &dev_attr_conn_ofmarker.attr,
+ &dev_attr_conn_address.attr,
+ &dev_attr_conn_port.attr,
+ &dev_attr_conn_exp_statsn.attr,
+ &dev_attr_conn_persistent_address.attr,
+ &dev_attr_conn_persistent_port.attr,
+ &dev_attr_conn_ping_tmo.attr,
+ &dev_attr_conn_recv_tmo.attr,
+ NULL,
+};
+
+static mode_t iscsi_conn_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int i)
+{
+ struct device *cdev = container_of(kobj, struct device, kobj);
+ struct iscsi_cls_conn *conn = transport_class_to_conn(cdev);
+ struct iscsi_transport *t = conn->transport;
+ int param;
+
+ if (attr == &dev_attr_conn_max_recv_dlength.attr)
+ param = ISCSI_PARAM_MAX_RECV_DLENGTH;
+ else if (attr == &dev_attr_conn_max_xmit_dlength.attr)
+ param = ISCSI_PARAM_MAX_XMIT_DLENGTH;
+ else if (attr == &dev_attr_conn_header_digest.attr)
+ param = ISCSI_PARAM_HDRDGST_EN;
+ else if (attr == &dev_attr_conn_data_digest.attr)
+ param = ISCSI_PARAM_DATADGST_EN;
+ else if (attr == &dev_attr_conn_ifmarker.attr)
+ param = ISCSI_PARAM_IFMARKER_EN;
+ else if (attr == &dev_attr_conn_ofmarker.attr)
+ param = ISCSI_PARAM_OFMARKER_EN;
+ else if (attr == &dev_attr_conn_address.attr)
+ param = ISCSI_PARAM_CONN_ADDRESS;
+ else if (attr == &dev_attr_conn_port.attr)
+ param = ISCSI_PARAM_CONN_PORT;
+ else if (attr == &dev_attr_conn_exp_statsn.attr)
+ param = ISCSI_PARAM_EXP_STATSN;
+ else if (attr == &dev_attr_conn_persistent_address.attr)
+ param = ISCSI_PARAM_PERSISTENT_ADDRESS;
+ else if (attr == &dev_attr_conn_persistent_port.attr)
+ param = ISCSI_PARAM_PERSISTENT_PORT;
+ else if (attr == &dev_attr_conn_ping_tmo.attr)
+ param = ISCSI_PARAM_PING_TMO;
+ else if (attr == &dev_attr_conn_recv_tmo.attr)
+ param = ISCSI_PARAM_RECV_TMO;
+ else {
+ WARN_ONCE(1, "Invalid conn attr");
+ return 0;
+ }
+
+ return t->attr_is_visible(ISCSI_PARAM, param);
+}
+
+static struct attribute_group iscsi_conn_group = {
+ .attrs = iscsi_conn_attrs,
+ .is_visible = iscsi_conn_attr_is_visible,
+};
+
/*
* iSCSI session attrs
*/
@@ -1845,7 +2267,6 @@ show_session_param_##param(struct device *dev, \
iscsi_session_attr_show(param, perm) \
static ISCSI_CLASS_ATTR(sess, field, S_IRUGO, show_session_param_##param, \
NULL);
-
iscsi_session_attr(targetname, ISCSI_PARAM_TARGET_NAME, 0);
iscsi_session_attr(initial_r2t, ISCSI_PARAM_INITIAL_R2T_EN, 0);
iscsi_session_attr(max_outstanding_r2t, ISCSI_PARAM_MAX_R2T, 0);
@@ -1922,6 +2343,100 @@ static ISCSI_CLASS_ATTR(priv_sess, field, S_IRUGO | S_IWUSR, \
store_priv_session_##field)
iscsi_priv_session_rw_attr(recovery_tmo, "%d");
+static struct attribute *iscsi_session_attrs[] = {
+ &dev_attr_sess_initial_r2t.attr,
+ &dev_attr_sess_max_outstanding_r2t.attr,
+ &dev_attr_sess_immediate_data.attr,
+ &dev_attr_sess_first_burst_len.attr,
+ &dev_attr_sess_max_burst_len.attr,
+ &dev_attr_sess_data_pdu_in_order.attr,
+ &dev_attr_sess_data_seq_in_order.attr,
+ &dev_attr_sess_erl.attr,
+ &dev_attr_sess_targetname.attr,
+ &dev_attr_sess_tpgt.attr,
+ &dev_attr_sess_password.attr,
+ &dev_attr_sess_password_in.attr,
+ &dev_attr_sess_username.attr,
+ &dev_attr_sess_username_in.attr,
+ &dev_attr_sess_fast_abort.attr,
+ &dev_attr_sess_abort_tmo.attr,
+ &dev_attr_sess_lu_reset_tmo.attr,
+ &dev_attr_sess_tgt_reset_tmo.attr,
+ &dev_attr_sess_ifacename.attr,
+ &dev_attr_sess_initiatorname.attr,
+ &dev_attr_sess_targetalias.attr,
+ &dev_attr_priv_sess_recovery_tmo.attr,
+ &dev_attr_priv_sess_state.attr,
+ NULL,
+};
+
+static mode_t iscsi_session_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int i)
+{
+ struct device *cdev = container_of(kobj, struct device, kobj);
+ struct iscsi_cls_session *session = transport_class_to_session(cdev);
+ struct iscsi_transport *t = session->transport;
+ int param;
+
+ if (attr == &dev_attr_sess_initial_r2t.attr)
+ param = ISCSI_PARAM_INITIAL_R2T_EN;
+ else if (attr == &dev_attr_sess_max_outstanding_r2t.attr)
+ param = ISCSI_PARAM_MAX_R2T;
+ else if (attr == &dev_attr_sess_immediate_data.attr)
+ param = ISCSI_PARAM_IMM_DATA_EN;
+ else if (attr == &dev_attr_sess_first_burst_len.attr)
+ param = ISCSI_PARAM_FIRST_BURST;
+ else if (attr == &dev_attr_sess_max_burst_len.attr)
+ param = ISCSI_PARAM_MAX_BURST;
+ else if (attr == &dev_attr_sess_data_pdu_in_order.attr)
+ param = ISCSI_PARAM_PDU_INORDER_EN;
+ else if (attr == &dev_attr_sess_data_seq_in_order.attr)
+ param = ISCSI_PARAM_DATASEQ_INORDER_EN;
+ else if (attr == &dev_attr_sess_erl.attr)
+ param = ISCSI_PARAM_ERL;
+ else if (attr == &dev_attr_sess_targetname.attr)
+ param = ISCSI_PARAM_TARGET_NAME;
+ else if (attr == &dev_attr_sess_tpgt.attr)
+ param = ISCSI_PARAM_TPGT;
+ else if (attr == &dev_attr_sess_password.attr)
+ param = ISCSI_PARAM_USERNAME;
+ else if (attr == &dev_attr_sess_password_in.attr)
+ param = ISCSI_PARAM_USERNAME_IN;
+ else if (attr == &dev_attr_sess_username.attr)
+ param = ISCSI_PARAM_PASSWORD;
+ else if (attr == &dev_attr_sess_username_in.attr)
+ param = ISCSI_PARAM_PASSWORD_IN;
+ else if (attr == &dev_attr_sess_fast_abort.attr)
+ param = ISCSI_PARAM_FAST_ABORT;
+ else if (attr == &dev_attr_sess_abort_tmo.attr)
+ param = ISCSI_PARAM_ABORT_TMO;
+ else if (attr == &dev_attr_sess_lu_reset_tmo.attr)
+ param = ISCSI_PARAM_LU_RESET_TMO;
+ else if (attr == &dev_attr_sess_tgt_reset_tmo.attr)
+ param = ISCSI_PARAM_TGT_RESET_TMO;
+ else if (attr == &dev_attr_sess_ifacename.attr)
+ param = ISCSI_PARAM_IFACE_NAME;
+ else if (attr == &dev_attr_sess_initiatorname.attr)
+ param = ISCSI_PARAM_INITIATOR_NAME;
+ else if (attr == &dev_attr_sess_targetalias.attr)
+ param = ISCSI_PARAM_TARGET_ALIAS;
+ else if (attr == &dev_attr_priv_sess_recovery_tmo.attr)
+ return S_IRUGO | S_IWUSR;
+ else if (attr == &dev_attr_priv_sess_state.attr)
+ return S_IRUGO;
+ else {
+ WARN_ONCE(1, "Invalid session attr");
+ return 0;
+ }
+
+ return t->attr_is_visible(ISCSI_PARAM, param);
+}
+
+static struct attribute_group iscsi_session_group = {
+ .attrs = iscsi_session_attrs,
+ .is_visible = iscsi_session_attr_is_visible,
+};
+
/*
* iSCSI host attrs
*/
@@ -1945,41 +2460,42 @@ iscsi_host_attr(hwaddress, ISCSI_HOST_PARAM_HWADDRESS);
iscsi_host_attr(ipaddress, ISCSI_HOST_PARAM_IPADDRESS);
iscsi_host_attr(initiatorname, ISCSI_HOST_PARAM_INITIATOR_NAME);
-#define SETUP_PRIV_SESSION_RD_ATTR(field) \
-do { \
- priv->session_attrs[count] = &dev_attr_priv_sess_##field; \
- count++; \
-} while (0)
-
-#define SETUP_PRIV_SESSION_RW_ATTR(field) \
-do { \
- priv->session_attrs[count] = &dev_attr_priv_sess_##field; \
- count++; \
-} while (0)
-
-#define SETUP_SESSION_RD_ATTR(field, param_flag) \
-do { \
- if (tt->param_mask & param_flag) { \
- priv->session_attrs[count] = &dev_attr_sess_##field; \
- count++; \
- } \
-} while (0)
+static struct attribute *iscsi_host_attrs[] = {
+ &dev_attr_host_netdev.attr,
+ &dev_attr_host_hwaddress.attr,
+ &dev_attr_host_ipaddress.attr,
+ &dev_attr_host_initiatorname.attr,
+ NULL,
+};
-#define SETUP_CONN_RD_ATTR(field, param_flag) \
-do { \
- if (tt->param_mask & param_flag) { \
- priv->conn_attrs[count] = &dev_attr_conn_##field; \
- count++; \
- } \
-} while (0)
+static mode_t iscsi_host_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int i)
+{
+ struct device *cdev = container_of(kobj, struct device, kobj);
+ struct Scsi_Host *shost = transport_class_to_shost(cdev);
+ struct iscsi_internal *priv = to_iscsi_internal(shost->transportt);
+ int param;
+
+ if (attr == &dev_attr_host_netdev.attr)
+ param = ISCSI_HOST_PARAM_NETDEV_NAME;
+ else if (attr == &dev_attr_host_hwaddress.attr)
+ param = ISCSI_HOST_PARAM_HWADDRESS;
+ else if (attr == &dev_attr_host_ipaddress.attr)
+ param = ISCSI_HOST_PARAM_IPADDRESS;
+ else if (attr == &dev_attr_host_initiatorname.attr)
+ param = ISCSI_HOST_PARAM_INITIATOR_NAME;
+ else {
+ WARN_ONCE(1, "Invalid host attr");
+ return 0;
+ }
-#define SETUP_HOST_RD_ATTR(field, param_flag) \
-do { \
- if (tt->host_param_mask & param_flag) { \
- priv->host_attrs[count] = &dev_attr_host_##field; \
- count++; \
- } \
-} while (0)
+ return priv->iscsi_transport->attr_is_visible(ISCSI_HOST_PARAM, param);
+}
+
+static struct attribute_group iscsi_host_group = {
+ .attrs = iscsi_host_attrs,
+ .is_visible = iscsi_host_attr_is_visible,
+};
static int iscsi_session_match(struct attribute_container *cont,
struct device *dev)
@@ -2051,7 +2567,7 @@ iscsi_register_transport(struct iscsi_transport *tt)
{
struct iscsi_internal *priv;
unsigned long flags;
- int count = 0, err;
+ int err;
BUG_ON(!tt);
@@ -2078,77 +2594,24 @@ iscsi_register_transport(struct iscsi_transport *tt)
goto unregister_dev;
/* host parameters */
- priv->t.host_attrs.ac.attrs = &priv->host_attrs[0];
priv->t.host_attrs.ac.class = &iscsi_host_class.class;
priv->t.host_attrs.ac.match = iscsi_host_match;
+ priv->t.host_attrs.ac.grp = &iscsi_host_group;
priv->t.host_size = sizeof(struct iscsi_cls_host);
transport_container_register(&priv->t.host_attrs);
- SETUP_HOST_RD_ATTR(netdev, ISCSI_HOST_NETDEV_NAME);
- SETUP_HOST_RD_ATTR(ipaddress, ISCSI_HOST_IPADDRESS);
- SETUP_HOST_RD_ATTR(hwaddress, ISCSI_HOST_HWADDRESS);
- SETUP_HOST_RD_ATTR(initiatorname, ISCSI_HOST_INITIATOR_NAME);
- BUG_ON(count > ISCSI_HOST_ATTRS);
- priv->host_attrs[count] = NULL;
- count = 0;
-
/* connection parameters */
- priv->conn_cont.ac.attrs = &priv->conn_attrs[0];
priv->conn_cont.ac.class = &iscsi_connection_class.class;
priv->conn_cont.ac.match = iscsi_conn_match;
+ priv->conn_cont.ac.grp = &iscsi_conn_group;
transport_container_register(&priv->conn_cont);
- SETUP_CONN_RD_ATTR(max_recv_dlength, ISCSI_MAX_RECV_DLENGTH);
- SETUP_CONN_RD_ATTR(max_xmit_dlength, ISCSI_MAX_XMIT_DLENGTH);
- SETUP_CONN_RD_ATTR(header_digest, ISCSI_HDRDGST_EN);
- SETUP_CONN_RD_ATTR(data_digest, ISCSI_DATADGST_EN);
- SETUP_CONN_RD_ATTR(ifmarker, ISCSI_IFMARKER_EN);
- SETUP_CONN_RD_ATTR(ofmarker, ISCSI_OFMARKER_EN);
- SETUP_CONN_RD_ATTR(address, ISCSI_CONN_ADDRESS);
- SETUP_CONN_RD_ATTR(port, ISCSI_CONN_PORT);
- SETUP_CONN_RD_ATTR(exp_statsn, ISCSI_EXP_STATSN);
- SETUP_CONN_RD_ATTR(persistent_address, ISCSI_PERSISTENT_ADDRESS);
- SETUP_CONN_RD_ATTR(persistent_port, ISCSI_PERSISTENT_PORT);
- SETUP_CONN_RD_ATTR(ping_tmo, ISCSI_PING_TMO);
- SETUP_CONN_RD_ATTR(recv_tmo, ISCSI_RECV_TMO);
-
- BUG_ON(count > ISCSI_CONN_ATTRS);
- priv->conn_attrs[count] = NULL;
- count = 0;
-
/* session parameters */
- priv->session_cont.ac.attrs = &priv->session_attrs[0];
priv->session_cont.ac.class = &iscsi_session_class.class;
priv->session_cont.ac.match = iscsi_session_match;
+ priv->session_cont.ac.grp = &iscsi_session_group;
transport_container_register(&priv->session_cont);
- SETUP_SESSION_RD_ATTR(initial_r2t, ISCSI_INITIAL_R2T_EN);
- SETUP_SESSION_RD_ATTR(max_outstanding_r2t, ISCSI_MAX_R2T);
- SETUP_SESSION_RD_ATTR(immediate_data, ISCSI_IMM_DATA_EN);
- SETUP_SESSION_RD_ATTR(first_burst_len, ISCSI_FIRST_BURST);
- SETUP_SESSION_RD_ATTR(max_burst_len, ISCSI_MAX_BURST);
- SETUP_SESSION_RD_ATTR(data_pdu_in_order, ISCSI_PDU_INORDER_EN);
- SETUP_SESSION_RD_ATTR(data_seq_in_order, ISCSI_DATASEQ_INORDER_EN);
- SETUP_SESSION_RD_ATTR(erl, ISCSI_ERL);
- SETUP_SESSION_RD_ATTR(targetname, ISCSI_TARGET_NAME);
- SETUP_SESSION_RD_ATTR(tpgt, ISCSI_TPGT);
- SETUP_SESSION_RD_ATTR(password, ISCSI_USERNAME);
- SETUP_SESSION_RD_ATTR(password_in, ISCSI_USERNAME_IN);
- SETUP_SESSION_RD_ATTR(username, ISCSI_PASSWORD);
- SETUP_SESSION_RD_ATTR(username_in, ISCSI_PASSWORD_IN);
- SETUP_SESSION_RD_ATTR(fast_abort, ISCSI_FAST_ABORT);
- SETUP_SESSION_RD_ATTR(abort_tmo, ISCSI_ABORT_TMO);
- SETUP_SESSION_RD_ATTR(lu_reset_tmo,ISCSI_LU_RESET_TMO);
- SETUP_SESSION_RD_ATTR(tgt_reset_tmo,ISCSI_TGT_RESET_TMO);
- SETUP_SESSION_RD_ATTR(ifacename, ISCSI_IFACE_NAME);
- SETUP_SESSION_RD_ATTR(initiatorname, ISCSI_INITIATOR_NAME);
- SETUP_SESSION_RD_ATTR(targetalias, ISCSI_TARGET_ALIAS);
- SETUP_PRIV_SESSION_RW_ATTR(recovery_tmo);
- SETUP_PRIV_SESSION_RD_ATTR(state);
-
- BUG_ON(count > ISCSI_SESSION_ATTRS);
- priv->session_attrs[count] = NULL;
-
spin_lock_irqsave(&iscsi_transport_lock, flags);
list_add(&priv->list, &iscsi_transports);
spin_unlock_irqrestore(&iscsi_transport_lock, flags);
@@ -2210,10 +2673,14 @@ static __init int iscsi_transport_init(void)
if (err)
goto unregister_transport_class;
- err = transport_class_register(&iscsi_host_class);
+ err = class_register(&iscsi_iface_class);
if (err)
goto unregister_endpoint_class;
+ err = transport_class_register(&iscsi_host_class);
+ if (err)
+ goto unregister_iface_class;
+
err = transport_class_register(&iscsi_connection_class);
if (err)
goto unregister_host_class;
@@ -2243,6 +2710,8 @@ unregister_conn_class:
transport_class_unregister(&iscsi_connection_class);
unregister_host_class:
transport_class_unregister(&iscsi_host_class);
+unregister_iface_class:
+ class_unregister(&iscsi_iface_class);
unregister_endpoint_class:
class_unregister(&iscsi_endpoint_class);
unregister_transport_class:
@@ -2258,6 +2727,7 @@ static void __exit iscsi_transport_exit(void)
transport_class_unregister(&iscsi_session_class);
transport_class_unregister(&iscsi_host_class);
class_unregister(&iscsi_endpoint_class);
+ class_unregister(&iscsi_iface_class);
class_unregister(&iscsi_transport_class);
}
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index c6fcf76cade..9d9330ae421 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -1545,8 +1545,14 @@ int sas_rphy_add(struct sas_rphy *rphy)
if (identify->device_type == SAS_END_DEVICE &&
rphy->scsi_target_id != -1) {
- scsi_scan_target(&rphy->dev, 0,
- rphy->scsi_target_id, SCAN_WILD_CARD, 0);
+ int lun;
+
+ if (identify->target_port_protocols & SAS_PROTOCOL_SSP)
+ lun = SCAN_WILD_CARD;
+ else
+ lun = 0;
+
+ scsi_scan_target(&rphy->dev, 0, rphy->scsi_target_id, lun, 0);
}
return 0;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 953773cb26d..fa3a5918009 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1066,12 +1066,13 @@ static int sd_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
struct gendisk *disk = bdev->bd_disk;
- struct scsi_device *sdp = scsi_disk(disk)->device;
+ struct scsi_disk *sdkp = scsi_disk(disk);
+ struct scsi_device *sdp = sdkp->device;
void __user *p = (void __user *)arg;
int error;
- SCSI_LOG_IOCTL(1, printk("sd_ioctl: disk=%s, cmd=0x%x\n",
- disk->disk_name, cmd));
+ SCSI_LOG_IOCTL(1, sd_printk(KERN_INFO, sdkp, "sd_ioctl: disk=%s, "
+ "cmd=0x%x\n", disk->disk_name, cmd));
/*
* If we are in the middle of error recovery, don't let anyone
@@ -2589,18 +2590,16 @@ static int sd_probe(struct device *dev)
spin_unlock(&sd_index_lock);
} while (error == -EAGAIN);
- if (error)
+ if (error) {
+ sdev_printk(KERN_WARNING, sdp, "sd_probe: memory exhausted.\n");
goto out_put;
-
- if (index >= SD_MAX_DISKS) {
- error = -ENODEV;
- sdev_printk(KERN_WARNING, sdp, "SCSI disk (sd) name space exhausted.\n");
- goto out_free_index;
}
error = sd_format_disk_name("sd", index, gd->disk_name, DISK_NAME_LEN);
- if (error)
+ if (error) {
+ sdev_printk(KERN_WARNING, sdp, "SCSI disk (sd) name length exceeded.\n");
goto out_free_index;
+ }
sdkp->device = sdp;
sdkp->driver = &sd_template;
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 6ad798bfd52..4163f2910e3 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -9,12 +9,6 @@
#define SD_MAJORS 16
/*
- * This is limited by the naming scheme enforced in sd_probe,
- * add another character to it if you really need more disks.
- */
-#define SD_MAX_DISKS (((26 * 26) + 26 + 1) * 26)
-
-/*
* Time out in seconds for disks and Magneto-opticals (which are slower).
*/
#define SD_TIMEOUT (30 * HZ)
diff --git a/drivers/scsi/sr_ioctl.c b/drivers/scsi/sr_ioctl.c
index 8be30554119..a3911c39ea5 100644
--- a/drivers/scsi/sr_ioctl.c
+++ b/drivers/scsi/sr_ioctl.c
@@ -4,6 +4,7 @@
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/blkdev.h>
+#include <linux/module.h>
#include <linux/blkpg.h>
#include <linux/cdrom.h>
#include <linux/delay.h>
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 1871b8ae83a..9b28f39bac2 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -462,14 +462,16 @@ static void st_scsi_execute_end(struct request *req, int uptodate)
{
struct st_request *SRpnt = req->end_io_data;
struct scsi_tape *STp = SRpnt->stp;
+ struct bio *tmp;
STp->buffer->cmdstat.midlevel_result = SRpnt->result = req->errors;
STp->buffer->cmdstat.residual = req->resid_len;
+ tmp = SRpnt->bio;
if (SRpnt->waiting)
complete(SRpnt->waiting);
- blk_rq_unmap_user(SRpnt->bio);
+ blk_rq_unmap_user(tmp);
__blk_put_request(req->q, req);
}
diff --git a/drivers/sfi/sfi_core.h b/drivers/sfi/sfi_core.h
index b7cf220d44e..1d5cfe854cf 100644
--- a/drivers/sfi/sfi_core.h
+++ b/drivers/sfi/sfi_core.h
@@ -55,6 +55,9 @@
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+
+#include <linux/sysfs.h>
+
struct sfi_table_key{
char *sig;
char *oem_id;
diff --git a/drivers/sh/Makefile b/drivers/sh/Makefile
index 24e6cec0ae8..67e272ab162 100644
--- a/drivers/sh/Makefile
+++ b/drivers/sh/Makefile
@@ -7,3 +7,11 @@ obj-$(CONFIG_HAVE_CLK) += clk/
obj-$(CONFIG_MAPLE) += maple/
obj-$(CONFIG_SUPERHYWAY) += superhyway/
obj-$(CONFIG_GENERIC_GPIO) += pfc.o
+
+#
+# For the moment we only use this framework for ARM-based SH/R-Mobile
+# platforms and generic SH. SH-based SH-Mobile platforms are still using
+# an older framework that is pending up-porting, at which point this
+# special casing can go away.
+#
+obj-$(CONFIG_SUPERH)$(CONFIG_ARCH_SHMOBILE) += pm_runtime.o
diff --git a/drivers/sh/clk/core.c b/drivers/sh/clk/core.c
index dc8d022c07a..db257a35e71 100644
--- a/drivers/sh/clk/core.c
+++ b/drivers/sh/clk/core.c
@@ -25,7 +25,6 @@
#include <linux/seq_file.h>
#include <linux/err.h>
#include <linux/io.h>
-#include <linux/debugfs.h>
#include <linux/cpufreq.h>
#include <linux/clk.h>
#include <linux/sh_clk.h>
@@ -173,6 +172,26 @@ long clk_rate_div_range_round(struct clk *clk, unsigned int div_min,
return clk_rate_round_helper(&div_range_round);
}
+static long clk_rate_mult_range_iter(unsigned int pos,
+ struct clk_rate_round_data *rounder)
+{
+ return clk_get_rate(rounder->arg) * pos;
+}
+
+long clk_rate_mult_range_round(struct clk *clk, unsigned int mult_min,
+ unsigned int mult_max, unsigned long rate)
+{
+ struct clk_rate_round_data mult_range_round = {
+ .min = mult_min,
+ .max = mult_max,
+ .func = clk_rate_mult_range_iter,
+ .arg = clk_get_parent(clk),
+ .rate = rate,
+ };
+
+ return clk_rate_round_helper(&mult_range_round);
+}
+
int clk_rate_table_find(struct clk *clk,
struct cpufreq_frequency_table *freq_table,
unsigned long rate)
@@ -205,9 +224,6 @@ int clk_reparent(struct clk *child, struct clk *parent)
list_add(&child->sibling, &parent->children);
child->parent = parent;
- /* now do the debugfs renaming to reattach the child
- to the proper parent */
-
return 0;
}
@@ -665,89 +681,6 @@ static int __init clk_syscore_init(void)
subsys_initcall(clk_syscore_init);
#endif
-/*
- * debugfs support to trace clock tree hierarchy and attributes
- */
-static struct dentry *clk_debugfs_root;
-
-static int clk_debugfs_register_one(struct clk *c)
-{
- int err;
- struct dentry *d;
- struct clk *pa = c->parent;
- char s[255];
- char *p = s;
-
- p += sprintf(p, "%p", c);
- d = debugfs_create_dir(s, pa ? pa->dentry : clk_debugfs_root);
- if (!d)
- return -ENOMEM;
- c->dentry = d;
-
- d = debugfs_create_u8("usecount", S_IRUGO, c->dentry, (u8 *)&c->usecount);
- if (!d) {
- err = -ENOMEM;
- goto err_out;
- }
- d = debugfs_create_u32("rate", S_IRUGO, c->dentry, (u32 *)&c->rate);
- if (!d) {
- err = -ENOMEM;
- goto err_out;
- }
- d = debugfs_create_x32("flags", S_IRUGO, c->dentry, (u32 *)&c->flags);
- if (!d) {
- err = -ENOMEM;
- goto err_out;
- }
- return 0;
-
-err_out:
- debugfs_remove_recursive(c->dentry);
- return err;
-}
-
-static int clk_debugfs_register(struct clk *c)
-{
- int err;
- struct clk *pa = c->parent;
-
- if (pa && !pa->dentry) {
- err = clk_debugfs_register(pa);
- if (err)
- return err;
- }
-
- if (!c->dentry) {
- err = clk_debugfs_register_one(c);
- if (err)
- return err;
- }
- return 0;
-}
-
-static int __init clk_debugfs_init(void)
-{
- struct clk *c;
- struct dentry *d;
- int err;
-
- d = debugfs_create_dir("clock", NULL);
- if (!d)
- return -ENOMEM;
- clk_debugfs_root = d;
-
- list_for_each_entry(c, &clock_list, node) {
- err = clk_debugfs_register(c);
- if (err)
- goto err_out;
- }
- return 0;
-err_out:
- debugfs_remove_recursive(clk_debugfs_root);
- return err;
-}
-late_initcall(clk_debugfs_init);
-
static int __init clk_late_init(void)
{
unsigned long flags;
diff --git a/drivers/sh/intc/chip.c b/drivers/sh/intc/chip.c
index 33b2ed451e0..7b246efa94e 100644
--- a/drivers/sh/intc/chip.c
+++ b/drivers/sh/intc/chip.c
@@ -186,7 +186,7 @@ static unsigned char intc_irq_sense_table[IRQ_TYPE_SENSE_MASK + 1] = {
!defined(CONFIG_CPU_SUBTYPE_SH7709)
[IRQ_TYPE_LEVEL_HIGH] = VALID(3),
#endif
-#if defined(CONFIG_ARCH_SH7372)
+#if defined(CONFIG_ARM) /* all recent SH-Mobile / R-Mobile ARM support this */
[IRQ_TYPE_EDGE_BOTH] = VALID(4),
#endif
};
@@ -202,11 +202,16 @@ static int intc_set_type(struct irq_data *data, unsigned int type)
if (!value)
return -EINVAL;
+ value &= ~SENSE_VALID_FLAG;
+
ihp = intc_find_irq(d->sense, d->nr_sense, irq);
if (ihp) {
+ /* PINT has 2-bit sense registers, should fail on EDGE_BOTH */
+ if (value >= (1 << _INTC_WIDTH(ihp->handle)))
+ return -EINVAL;
+
addr = INTC_REG(d, _INTC_ADDR_E(ihp->handle), 0);
- intc_reg_fns[_INTC_FN(ihp->handle)](addr, ihp->handle,
- value & ~SENSE_VALID_FLAG);
+ intc_reg_fns[_INTC_FN(ihp->handle)](addr, ihp->handle, value);
}
return 0;
diff --git a/drivers/sh/intc/core.c b/drivers/sh/intc/core.c
index c6ca115c71d..8b7a141ff35 100644
--- a/drivers/sh/intc/core.c
+++ b/drivers/sh/intc/core.c
@@ -22,6 +22,7 @@
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/slab.h>
+#include <linux/stat.h>
#include <linux/interrupt.h>
#include <linux/sh_intc.h>
#include <linux/sysdev.h>
@@ -29,6 +30,7 @@
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/radix-tree.h>
+#include <linux/export.h>
#include "internals.h"
LIST_HEAD(intc_list);
diff --git a/drivers/sh/intc/dynamic.c b/drivers/sh/intc/dynamic.c
index a3677c9dfe3..5fea1ee8799 100644
--- a/drivers/sh/intc/dynamic.c
+++ b/drivers/sh/intc/dynamic.c
@@ -14,6 +14,7 @@
#include <linux/irq.h>
#include <linux/bitmap.h>
#include <linux/spinlock.h>
+#include <linux/module.h>
#include "internals.h" /* only for activate_irq() damage.. */
/*
diff --git a/drivers/sh/intc/userimask.c b/drivers/sh/intc/userimask.c
index e32304b66cf..56bf9336b92 100644
--- a/drivers/sh/intc/userimask.c
+++ b/drivers/sh/intc/userimask.c
@@ -13,6 +13,7 @@
#include <linux/sysdev.h>
#include <linux/init.h>
#include <linux/io.h>
+#include <linux/stat.h>
#include <asm/sizes.h>
#include "internals.h"
diff --git a/drivers/sh/intc/virq.c b/drivers/sh/intc/virq.c
index 1e6e2d0353e..c7ec49ffd9f 100644
--- a/drivers/sh/intc/virq.c
+++ b/drivers/sh/intc/virq.c
@@ -14,6 +14,7 @@
#include <linux/list.h>
#include <linux/radix-tree.h>
#include <linux/spinlock.h>
+#include <linux/export.h>
#include "internals.h"
static struct intc_map_entry intc_irq_xlate[NR_IRQS];
diff --git a/drivers/sh/maple/maple.c b/drivers/sh/maple/maple.c
index 1e20604257a..bec81c2404f 100644
--- a/drivers/sh/maple/maple.c
+++ b/drivers/sh/maple/maple.c
@@ -20,6 +20,7 @@
#include <linux/maple.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
+#include <linux/module.h>
#include <asm/cacheflush.h>
#include <asm/dma.h>
#include <asm/io.h>
diff --git a/drivers/sh/pfc.c b/drivers/sh/pfc.c
index 75934e3ea34..e67fe170d8d 100644
--- a/drivers/sh/pfc.c
+++ b/drivers/sh/pfc.c
@@ -217,7 +217,7 @@ static int get_config_reg(struct pinmux_info *gpioc, pinmux_enum_t enum_id,
if (!r_width)
break;
- for (n = 0; n < (r_width / f_width) * 1 << f_width; n++) {
+ for (n = 0; n < (r_width / f_width) * (1 << f_width); n++) {
if (config_reg->enum_ids[n] == enum_id) {
*crp = config_reg;
*indexp = n;
@@ -577,6 +577,32 @@ static void sh_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
sh_gpio_set_value(chip_to_pinmux(chip), offset, value);
}
+static int sh_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+ struct pinmux_info *gpioc = chip_to_pinmux(chip);
+ pinmux_enum_t enum_id;
+ pinmux_enum_t *enum_ids;
+ int i, k, pos;
+
+ pos = 0;
+ enum_id = 0;
+ while (1) {
+ pos = get_gpio_enum_id(gpioc, offset, pos, &enum_id);
+ if (pos <= 0 || !enum_id)
+ break;
+
+ for (i = 0; i < gpioc->gpio_irq_size; i++) {
+ enum_ids = gpioc->gpio_irq[i].enum_ids;
+ for (k = 0; enum_ids[k]; k++) {
+ if (enum_ids[k] == enum_id)
+ return gpioc->gpio_irq[i].irq;
+ }
+ }
+ }
+
+ return -ENOSYS;
+}
+
int register_pinmux(struct pinmux_info *pip)
{
struct gpio_chip *chip = &pip->chip;
@@ -592,6 +618,7 @@ int register_pinmux(struct pinmux_info *pip)
chip->get = sh_gpio_get;
chip->direction_output = sh_gpio_direction_output;
chip->set = sh_gpio_set;
+ chip->to_irq = sh_gpio_to_irq;
WARN_ON(pip->first_gpio != 0); /* needs testing */
diff --git a/drivers/sh/pm_runtime.c b/drivers/sh/pm_runtime.c
new file mode 100644
index 00000000000..afe9282629b
--- /dev/null
+++ b/drivers/sh/pm_runtime.c
@@ -0,0 +1,65 @@
+/*
+ * Runtime PM support code
+ *
+ * Copyright (C) 2009-2010 Magnus Damm
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_clock.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/sh_clk.h>
+#include <linux/bitmap.h>
+#include <linux/slab.h>
+
+#ifdef CONFIG_PM_RUNTIME
+
+static int default_platform_runtime_idle(struct device *dev)
+{
+ /* suspend synchronously to disable clocks immediately */
+ return pm_runtime_suspend(dev);
+}
+
+static struct dev_pm_domain default_pm_domain = {
+ .ops = {
+ .runtime_suspend = pm_clk_suspend,
+ .runtime_resume = pm_clk_resume,
+ .runtime_idle = default_platform_runtime_idle,
+ USE_PLATFORM_PM_SLEEP_OPS
+ },
+};
+
+#define DEFAULT_PM_DOMAIN_PTR (&default_pm_domain)
+
+#else
+
+#define DEFAULT_PM_DOMAIN_PTR NULL
+
+#endif /* CONFIG_PM_RUNTIME */
+
+static struct pm_clk_notifier_block platform_bus_notifier = {
+ .pm_domain = DEFAULT_PM_DOMAIN_PTR,
+ .con_ids = { NULL, },
+};
+
+static int __init sh_pm_runtime_init(void)
+{
+ pm_clk_add_notifier(&platform_bus_type, &platform_bus_notifier);
+ return 0;
+}
+core_initcall(sh_pm_runtime_init);
+
+static int __init sh_pm_runtime_late_init(void)
+{
+ pm_genpd_poweroff_unused();
+ return 0;
+}
+late_initcall(sh_pm_runtime_late_init);
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 52e2900d9d8..a1fd73df541 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -88,7 +88,7 @@ config SPI_BFIN_SPORT
config SPI_AU1550
tristate "Au1550/Au12x0 SPI Controller"
- depends on (SOC_AU1550 || SOC_AU1200) && EXPERIMENTAL
+ depends on MIPS_ALCHEMY && EXPERIMENTAL
select SPI_BITBANG
help
If you say yes to this option, support will be included for the
diff --git a/drivers/spi/spi-altera.c b/drivers/spi/spi-altera.c
index 4813a63ce6f..c00d00e96ee 100644
--- a/drivers/spi/spi-altera.c
+++ b/drivers/spi/spi-altera.c
@@ -16,6 +16,7 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/errno.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi_bitbang.h>
@@ -320,18 +321,7 @@ static struct platform_driver altera_spi_driver = {
.of_match_table = altera_spi_match,
},
};
-
-static int __init altera_spi_init(void)
-{
- return platform_driver_register(&altera_spi_driver);
-}
-module_init(altera_spi_init);
-
-static void __exit altera_spi_exit(void)
-{
- platform_driver_unregister(&altera_spi_driver);
-}
-module_exit(altera_spi_exit);
+module_platform_driver(altera_spi_driver);
MODULE_DESCRIPTION("Altera SPI driver");
MODULE_AUTHOR("Thomas Chou <thomas@wytron.com.tw>");
diff --git a/drivers/spi/spi-ath79.c b/drivers/spi/spi-ath79.c
index 03019bf5a5e..024b48aed5c 100644
--- a/drivers/spi/spi-ath79.c
+++ b/drivers/spi/spi-ath79.c
@@ -273,18 +273,7 @@ static struct platform_driver ath79_spi_driver = {
.owner = THIS_MODULE,
},
};
-
-static __init int ath79_spi_init(void)
-{
- return platform_driver_register(&ath79_spi_driver);
-}
-module_init(ath79_spi_init);
-
-static __exit void ath79_spi_exit(void)
-{
- platform_driver_unregister(&ath79_spi_driver);
-}
-module_exit(ath79_spi_exit);
+module_platform_driver(ath79_spi_driver);
MODULE_DESCRIPTION("SPI controller driver for Atheros AR71XX/AR724X/AR913X");
MODULE_AUTHOR("Gabor Juhos <juhosg@openwrt.org>");
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index 82dee9a6c0d..16d6a839c7f 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -22,7 +22,7 @@
#include <asm/io.h>
#include <mach/board.h>
-#include <mach/gpio.h>
+#include <asm/gpio.h>
#include <mach/cpu.h>
/* SPI register offsets */
@@ -907,7 +907,7 @@ static void atmel_spi_cleanup(struct spi_device *spi)
/*-------------------------------------------------------------------------*/
-static int __init atmel_spi_probe(struct platform_device *pdev)
+static int __devinit atmel_spi_probe(struct platform_device *pdev)
{
struct resource *regs;
int irq;
@@ -1003,7 +1003,7 @@ out_free:
return ret;
}
-static int __exit atmel_spi_remove(struct platform_device *pdev)
+static int __devexit atmel_spi_remove(struct platform_device *pdev)
{
struct spi_master *master = platform_get_drvdata(pdev);
struct atmel_spi *as = spi_master_get_devdata(master);
@@ -1072,20 +1072,10 @@ static struct platform_driver atmel_spi_driver = {
},
.suspend = atmel_spi_suspend,
.resume = atmel_spi_resume,
+ .probe = atmel_spi_probe,
.remove = __exit_p(atmel_spi_remove),
};
-
-static int __init atmel_spi_init(void)
-{
- return platform_driver_probe(&atmel_spi_driver, atmel_spi_probe);
-}
-module_init(atmel_spi_init);
-
-static void __exit atmel_spi_exit(void)
-{
- platform_driver_unregister(&atmel_spi_driver);
-}
-module_exit(atmel_spi_exit);
+module_platform_driver(atmel_spi_driver);
MODULE_DESCRIPTION("Atmel AT32/AT91 SPI Controller driver");
MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
diff --git a/drivers/spi/spi-au1550.c b/drivers/spi/spi-au1550.c
index bddee5f516b..5784c879961 100644
--- a/drivers/spi/spi-au1550.c
+++ b/drivers/spi/spi-au1550.c
@@ -25,6 +25,7 @@
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/errno.h>
+#include <linux/module.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/resource.h>
diff --git a/drivers/spi/spi-bfin-sport.c b/drivers/spi/spi-bfin-sport.c
index e557ff617b1..248a2cc671a 100644
--- a/drivers/spi/spi-bfin-sport.c
+++ b/drivers/spi/spi-bfin-sport.c
@@ -938,15 +938,4 @@ static struct platform_driver bfin_sport_spi_driver = {
.suspend = bfin_sport_spi_suspend,
.resume = bfin_sport_spi_resume,
};
-
-static int __init bfin_sport_spi_init(void)
-{
- return platform_driver_register(&bfin_sport_spi_driver);
-}
-module_init(bfin_sport_spi_init);
-
-static void __exit bfin_sport_spi_exit(void)
-{
- platform_driver_unregister(&bfin_sport_spi_driver);
-}
-module_exit(bfin_sport_spi_exit);
+module_platform_driver(bfin_sport_spi_driver);
diff --git a/drivers/spi/spi-bfin5xx.c b/drivers/spi/spi-bfin5xx.c
index b8d25f2b703..3b83ff8b1e2 100644
--- a/drivers/spi/spi-bfin5xx.c
+++ b/drivers/spi/spi-bfin5xx.c
@@ -1098,7 +1098,7 @@ static int bfin_spi_setup(struct spi_device *spi)
if (chip->pio_interrupt && !drv_data->irq_requested) {
ret = request_irq(drv_data->spi_irq, bfin_spi_pio_irq_handler,
- IRQF_DISABLED, "BFIN_SPI", drv_data);
+ 0, "BFIN_SPI", drv_data);
if (ret) {
dev_err(&spi->dev, "Unable to register spi IRQ\n");
goto error;
diff --git a/drivers/spi/spi-bitbang.c b/drivers/spi/spi-bitbang.c
index 02d57fbba29..aef59b1a15f 100644
--- a/drivers/spi/spi-bitbang.c
+++ b/drivers/spi/spi-bitbang.c
@@ -20,6 +20,7 @@
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <linux/interrupt.h>
+#include <linux/module.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/platform_device.h>
diff --git a/drivers/spi/spi-butterfly.c b/drivers/spi/spi-butterfly.c
index 9f907ec52de..5ed08e53743 100644
--- a/drivers/spi/spi-butterfly.c
+++ b/drivers/spi/spi-butterfly.c
@@ -20,6 +20,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/delay.h>
+#include <linux/module.h>
#include <linux/device.h>
#include <linux/parport.h>
diff --git a/drivers/spi/spi-coldfire-qspi.c b/drivers/spi/spi-coldfire-qspi.c
index ae2cd1c1fda..6eee64a5d24 100644
--- a/drivers/spi/spi-coldfire-qspi.c
+++ b/drivers/spi/spi-coldfire-qspi.c
@@ -487,7 +487,7 @@ static int __devinit mcfqspi_probe(struct platform_device *pdev)
goto fail2;
}
- status = request_irq(mcfqspi->irq, mcfqspi_irq_handler, IRQF_DISABLED,
+ status = request_irq(mcfqspi->irq, mcfqspi_irq_handler, 0,
pdev->name, mcfqspi);
if (status) {
dev_dbg(&pdev->dev, "request_irq failed\n");
@@ -621,20 +621,10 @@ static struct platform_driver mcfqspi_driver = {
.driver.name = DRIVER_NAME,
.driver.owner = THIS_MODULE,
.driver.pm = MCFQSPI_DEV_PM_OPS,
+ .probe = mcfqspi_probe,
.remove = __devexit_p(mcfqspi_remove),
};
-
-static int __init mcfqspi_init(void)
-{
- return platform_driver_probe(&mcfqspi_driver, mcfqspi_probe);
-}
-module_init(mcfqspi_init);
-
-static void __exit mcfqspi_exit(void)
-{
- platform_driver_unregister(&mcfqspi_driver);
-}
-module_exit(mcfqspi_exit);
+module_platform_driver(mcfqspi_driver);
MODULE_AUTHOR("Steven King <sfking@fdwdc.com>");
MODULE_DESCRIPTION("Coldfire QSPI Controller Driver");
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
index 1f0ed8005c9..31bfba805cf 100644
--- a/drivers/spi/spi-davinci.c
+++ b/drivers/spi/spi-davinci.c
@@ -799,7 +799,7 @@ rx_dma_failed:
* It will invoke spi_bitbang_start to create work queue so that client driver
* can register transfer method to work queue.
*/
-static int davinci_spi_probe(struct platform_device *pdev)
+static int __devinit davinci_spi_probe(struct platform_device *pdev)
{
struct spi_master *master;
struct davinci_spi *dspi;
@@ -984,7 +984,7 @@ err:
* It will also call spi_bitbang_stop to destroy the work queue which was
* created by spi_bitbang_start.
*/
-static int __exit davinci_spi_remove(struct platform_device *pdev)
+static int __devexit davinci_spi_remove(struct platform_device *pdev)
{
struct davinci_spi *dspi;
struct spi_master *master;
@@ -1011,20 +1011,10 @@ static struct platform_driver davinci_spi_driver = {
.name = "spi_davinci",
.owner = THIS_MODULE,
},
- .remove = __exit_p(davinci_spi_remove),
+ .probe = davinci_spi_probe,
+ .remove = __devexit_p(davinci_spi_remove),
};
-
-static int __init davinci_spi_init(void)
-{
- return platform_driver_probe(&davinci_spi_driver, davinci_spi_probe);
-}
-module_init(davinci_spi_init);
-
-static void __exit davinci_spi_exit(void)
-{
- platform_driver_unregister(&davinci_spi_driver);
-}
-module_exit(davinci_spi_exit);
+module_platform_driver(davinci_spi_driver);
MODULE_DESCRIPTION("TI DaVinci SPI Master Controller Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c
index 130e55537db..e743a45ee92 100644
--- a/drivers/spi/spi-dw-mid.c
+++ b/drivers/spi/spi-dw-mid.c
@@ -116,13 +116,13 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
/* 1. setup DMA related registers */
if (cs_change) {
spi_enable_chip(dws, 0);
- dw_writew(dws, dmardlr, 0xf);
- dw_writew(dws, dmatdlr, 0x10);
+ dw_writew(dws, DW_SPI_DMARDLR, 0xf);
+ dw_writew(dws, DW_SPI_DMATDLR, 0x10);
if (dws->tx_dma)
dma_ctrl |= 0x2;
if (dws->rx_dma)
dma_ctrl |= 0x1;
- dw_writew(dws, dmacr, dma_ctrl);
+ dw_writew(dws, DW_SPI_DMACR, dma_ctrl);
spi_enable_chip(dws, 1);
}
@@ -200,7 +200,8 @@ static struct dw_spi_dma_ops mid_dma_ops = {
int dw_spi_mid_init(struct dw_spi *dws)
{
- u32 *clk_reg, clk_cdiv;
+ void __iomem *clk_reg;
+ u32 clk_cdiv;
clk_reg = ioremap_nocache(MRST_CLK_SPI0_REG, 16);
if (!clk_reg)
diff --git a/drivers/spi/spi-dw-mmio.c b/drivers/spi/spi-dw-mmio.c
index 34eb66501db..db2f1ba06ea 100644
--- a/drivers/spi/spi-dw-mmio.c
+++ b/drivers/spi/spi-dw-mmio.c
@@ -15,6 +15,7 @@
#include <linux/slab.h>
#include <linux/spi/spi.h>
#include <linux/scatterlist.h>
+#include <linux/module.h>
#include "spi-dw.h"
@@ -127,24 +128,14 @@ static int __devexit dw_spi_mmio_remove(struct platform_device *pdev)
}
static struct platform_driver dw_spi_mmio_driver = {
+ .probe = dw_spi_mmio_probe,
.remove = __devexit_p(dw_spi_mmio_remove),
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
},
};
-
-static int __init dw_spi_mmio_init(void)
-{
- return platform_driver_probe(&dw_spi_mmio_driver, dw_spi_mmio_probe);
-}
-module_init(dw_spi_mmio_init);
-
-static void __exit dw_spi_mmio_exit(void)
-{
- platform_driver_unregister(&dw_spi_mmio_driver);
-}
-module_exit(dw_spi_mmio_exit);
+module_platform_driver(dw_spi_mmio_driver);
MODULE_AUTHOR("Jean-Hugues Deschenes <jean-hugues.deschenes@octasic.com>");
MODULE_DESCRIPTION("Memory-mapped I/O interface driver for DW SPI Core");
diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c
index c5f37f03ac8..f64250ea161 100644
--- a/drivers/spi/spi-dw-pci.c
+++ b/drivers/spi/spi-dw-pci.c
@@ -21,6 +21,7 @@
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/spi/spi.h>
+#include <linux/module.h>
#include "spi-dw.h"
diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c
index 857cd30b44b..082458d73ce 100644
--- a/drivers/spi/spi-dw.c
+++ b/drivers/spi/spi-dw.c
@@ -19,6 +19,7 @@
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
+#include <linux/module.h>
#include <linux/highmem.h>
#include <linux/delay.h>
#include <linux/slab.h>
@@ -88,35 +89,35 @@ static ssize_t spi_show_regs(struct file *file, char __user *user_buf,
len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
"=================================\n");
len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
- "CTRL0: \t\t0x%08x\n", dw_readl(dws, ctrl0));
+ "CTRL0: \t\t0x%08x\n", dw_readl(dws, DW_SPI_CTRL0));
len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
- "CTRL1: \t\t0x%08x\n", dw_readl(dws, ctrl1));
+ "CTRL1: \t\t0x%08x\n", dw_readl(dws, DW_SPI_CTRL1));
len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
- "SSIENR: \t0x%08x\n", dw_readl(dws, ssienr));
+ "SSIENR: \t0x%08x\n", dw_readl(dws, DW_SPI_SSIENR));
len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
- "SER: \t\t0x%08x\n", dw_readl(dws, ser));
+ "SER: \t\t0x%08x\n", dw_readl(dws, DW_SPI_SER));
len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
- "BAUDR: \t\t0x%08x\n", dw_readl(dws, baudr));
+ "BAUDR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_BAUDR));
len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
- "TXFTLR: \t0x%08x\n", dw_readl(dws, txfltr));
+ "TXFTLR: \t0x%08x\n", dw_readl(dws, DW_SPI_TXFLTR));
len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
- "RXFTLR: \t0x%08x\n", dw_readl(dws, rxfltr));
+ "RXFTLR: \t0x%08x\n", dw_readl(dws, DW_SPI_RXFLTR));
len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
- "TXFLR: \t\t0x%08x\n", dw_readl(dws, txflr));
+ "TXFLR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_TXFLR));
len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
- "RXFLR: \t\t0x%08x\n", dw_readl(dws, rxflr));
+ "RXFLR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_RXFLR));
len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
- "SR: \t\t0x%08x\n", dw_readl(dws, sr));
+ "SR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_SR));
len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
- "IMR: \t\t0x%08x\n", dw_readl(dws, imr));
+ "IMR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_IMR));
len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
- "ISR: \t\t0x%08x\n", dw_readl(dws, isr));
+ "ISR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_ISR));
len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
- "DMACR: \t\t0x%08x\n", dw_readl(dws, dmacr));
+ "DMACR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_DMACR));
len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
- "DMATDLR: \t0x%08x\n", dw_readl(dws, dmatdlr));
+ "DMATDLR: \t0x%08x\n", dw_readl(dws, DW_SPI_DMATDLR));
len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
- "DMARDLR: \t0x%08x\n", dw_readl(dws, dmardlr));
+ "DMARDLR: \t0x%08x\n", dw_readl(dws, DW_SPI_DMARDLR));
len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
"=================================\n");
@@ -166,7 +167,7 @@ static inline u32 tx_max(struct dw_spi *dws)
u32 tx_left, tx_room, rxtx_gap;
tx_left = (dws->tx_end - dws->tx) / dws->n_bytes;
- tx_room = dws->fifo_len - dw_readw(dws, txflr);
+ tx_room = dws->fifo_len - dw_readw(dws, DW_SPI_TXFLR);
/*
* Another concern is about the tx/rx mismatch, we
@@ -187,7 +188,7 @@ static inline u32 rx_max(struct dw_spi *dws)
{
u32 rx_left = (dws->rx_end - dws->rx) / dws->n_bytes;
- return min(rx_left, (u32)dw_readw(dws, rxflr));
+ return min(rx_left, (u32)dw_readw(dws, DW_SPI_RXFLR));
}
static void dw_writer(struct dw_spi *dws)
@@ -203,7 +204,7 @@ static void dw_writer(struct dw_spi *dws)
else
txw = *(u16 *)(dws->tx);
}
- dw_writew(dws, dr, txw);
+ dw_writew(dws, DW_SPI_DR, txw);
dws->tx += dws->n_bytes;
}
}
@@ -214,7 +215,7 @@ static void dw_reader(struct dw_spi *dws)
u16 rxw;
while (max--) {
- rxw = dw_readw(dws, dr);
+ rxw = dw_readw(dws, DW_SPI_DR);
/* Care rx only if the transfer's original "rx" is not null */
if (dws->rx_end - dws->len) {
if (dws->n_bytes == 1)
@@ -322,13 +323,13 @@ EXPORT_SYMBOL_GPL(dw_spi_xfer_done);
static irqreturn_t interrupt_transfer(struct dw_spi *dws)
{
- u16 irq_status = dw_readw(dws, isr);
+ u16 irq_status = dw_readw(dws, DW_SPI_ISR);
/* Error handling */
if (irq_status & (SPI_INT_TXOI | SPI_INT_RXOI | SPI_INT_RXUI)) {
- dw_readw(dws, txoicr);
- dw_readw(dws, rxoicr);
- dw_readw(dws, rxuicr);
+ dw_readw(dws, DW_SPI_TXOICR);
+ dw_readw(dws, DW_SPI_RXOICR);
+ dw_readw(dws, DW_SPI_RXUICR);
int_error_stop(dws, "interrupt_transfer: fifo overrun/underrun");
return IRQ_HANDLED;
}
@@ -352,7 +353,7 @@ static irqreturn_t interrupt_transfer(struct dw_spi *dws)
static irqreturn_t dw_spi_irq(int irq, void *dev_id)
{
struct dw_spi *dws = dev_id;
- u16 irq_status = dw_readw(dws, isr) & 0x3f;
+ u16 irq_status = dw_readw(dws, DW_SPI_ISR) & 0x3f;
if (!irq_status)
return IRQ_NONE;
@@ -520,11 +521,11 @@ static void pump_transfers(unsigned long data)
* 2. clk_div is changed
* 3. control value changes
*/
- if (dw_readw(dws, ctrl0) != cr0 || cs_change || clk_div || imask) {
+ if (dw_readw(dws, DW_SPI_CTRL0) != cr0 || cs_change || clk_div || imask) {
spi_enable_chip(dws, 0);
- if (dw_readw(dws, ctrl0) != cr0)
- dw_writew(dws, ctrl0, cr0);
+ if (dw_readw(dws, DW_SPI_CTRL0) != cr0)
+ dw_writew(dws, DW_SPI_CTRL0, cr0);
spi_set_clk(dws, clk_div ? clk_div : chip->clk_div);
spi_chip_sel(dws, spi->chip_select);
@@ -534,7 +535,7 @@ static void pump_transfers(unsigned long data)
if (imask)
spi_umask_intr(dws, imask);
if (txint_level)
- dw_writew(dws, txfltr, txint_level);
+ dw_writew(dws, DW_SPI_TXFLTR, txint_level);
spi_enable_chip(dws, 1);
if (cs_change)
@@ -790,13 +791,13 @@ static void spi_hw_init(struct dw_spi *dws)
if (!dws->fifo_len) {
u32 fifo;
for (fifo = 2; fifo <= 257; fifo++) {
- dw_writew(dws, txfltr, fifo);
- if (fifo != dw_readw(dws, txfltr))
+ dw_writew(dws, DW_SPI_TXFLTR, fifo);
+ if (fifo != dw_readw(dws, DW_SPI_TXFLTR))
break;
}
dws->fifo_len = (fifo == 257) ? 0 : fifo;
- dw_writew(dws, txfltr, 0);
+ dw_writew(dws, DW_SPI_TXFLTR, 0);
}
}
diff --git a/drivers/spi/spi-dw.h b/drivers/spi/spi-dw.h
index 8b7b07bf6c3..9c57c078031 100644
--- a/drivers/spi/spi-dw.h
+++ b/drivers/spi/spi-dw.h
@@ -4,6 +4,33 @@
#include <linux/io.h>
#include <linux/scatterlist.h>
+/* Register offsets */
+#define DW_SPI_CTRL0 0x00
+#define DW_SPI_CTRL1 0x04
+#define DW_SPI_SSIENR 0x08
+#define DW_SPI_MWCR 0x0c
+#define DW_SPI_SER 0x10
+#define DW_SPI_BAUDR 0x14
+#define DW_SPI_TXFLTR 0x18
+#define DW_SPI_RXFLTR 0x1c
+#define DW_SPI_TXFLR 0x20
+#define DW_SPI_RXFLR 0x24
+#define DW_SPI_SR 0x28
+#define DW_SPI_IMR 0x2c
+#define DW_SPI_ISR 0x30
+#define DW_SPI_RISR 0x34
+#define DW_SPI_TXOICR 0x38
+#define DW_SPI_RXOICR 0x3c
+#define DW_SPI_RXUICR 0x40
+#define DW_SPI_MSTICR 0x44
+#define DW_SPI_ICR 0x48
+#define DW_SPI_DMACR 0x4c
+#define DW_SPI_DMATDLR 0x50
+#define DW_SPI_DMARDLR 0x54
+#define DW_SPI_IDR 0x58
+#define DW_SPI_VERSION 0x5c
+#define DW_SPI_DR 0x60
+
/* Bit fields in CTRLR0 */
#define SPI_DFS_OFFSET 0
@@ -55,35 +82,6 @@ enum dw_ssi_type {
SSI_NS_MICROWIRE,
};
-struct dw_spi_reg {
- u32 ctrl0;
- u32 ctrl1;
- u32 ssienr;
- u32 mwcr;
- u32 ser;
- u32 baudr;
- u32 txfltr;
- u32 rxfltr;
- u32 txflr;
- u32 rxflr;
- u32 sr;
- u32 imr;
- u32 isr;
- u32 risr;
- u32 txoicr;
- u32 rxoicr;
- u32 rxuicr;
- u32 msticr;
- u32 icr;
- u32 dmacr;
- u32 dmatdlr;
- u32 dmardlr;
- u32 idr;
- u32 version;
- u32 dr; /* Currently oper as 32 bits,
- though only low 16 bits matters */
-} __packed;
-
struct dw_spi;
struct dw_spi_dma_ops {
int (*dma_init)(struct dw_spi *dws);
@@ -161,23 +159,34 @@ struct dw_spi {
#endif
};
-#define dw_readl(dw, name) \
- __raw_readl(&(((struct dw_spi_reg *)dw->regs)->name))
-#define dw_writel(dw, name, val) \
- __raw_writel((val), &(((struct dw_spi_reg *)dw->regs)->name))
-#define dw_readw(dw, name) \
- __raw_readw(&(((struct dw_spi_reg *)dw->regs)->name))
-#define dw_writew(dw, name, val) \
- __raw_writew((val), &(((struct dw_spi_reg *)dw->regs)->name))
+static inline u32 dw_readl(struct dw_spi *dws, u32 offset)
+{
+ return __raw_readl(dws->regs + offset);
+}
+
+static inline void dw_writel(struct dw_spi *dws, u32 offset, u32 val)
+{
+ __raw_writel(val, dws->regs + offset);
+}
+
+static inline u16 dw_readw(struct dw_spi *dws, u32 offset)
+{
+ return __raw_readw(dws->regs + offset);
+}
+
+static inline void dw_writew(struct dw_spi *dws, u32 offset, u16 val)
+{
+ __raw_writew(val, dws->regs + offset);
+}
static inline void spi_enable_chip(struct dw_spi *dws, int enable)
{
- dw_writel(dws, ssienr, (enable ? 1 : 0));
+ dw_writel(dws, DW_SPI_SSIENR, (enable ? 1 : 0));
}
static inline void spi_set_clk(struct dw_spi *dws, u16 div)
{
- dw_writel(dws, baudr, div);
+ dw_writel(dws, DW_SPI_BAUDR, div);
}
static inline void spi_chip_sel(struct dw_spi *dws, u16 cs)
@@ -188,7 +197,7 @@ static inline void spi_chip_sel(struct dw_spi *dws, u16 cs)
if (dws->cs_control)
dws->cs_control(1);
- dw_writel(dws, ser, 1 << cs);
+ dw_writel(dws, DW_SPI_SER, 1 << cs);
}
/* Disable IRQ bits */
@@ -196,8 +205,8 @@ static inline void spi_mask_intr(struct dw_spi *dws, u32 mask)
{
u32 new_mask;
- new_mask = dw_readl(dws, imr) & ~mask;
- dw_writel(dws, imr, new_mask);
+ new_mask = dw_readl(dws, DW_SPI_IMR) & ~mask;
+ dw_writel(dws, DW_SPI_IMR, new_mask);
}
/* Enable IRQ bits */
@@ -205,8 +214,8 @@ static inline void spi_umask_intr(struct dw_spi *dws, u32 mask)
{
u32 new_mask;
- new_mask = dw_readl(dws, imr) | mask;
- dw_writel(dws, imr, new_mask);
+ new_mask = dw_readl(dws, DW_SPI_IMR) | mask;
+ dw_writel(dws, DW_SPI_IMR, new_mask);
}
/*
diff --git a/drivers/spi/spi-ep93xx.c b/drivers/spi/spi-ep93xx.c
index 1cf645479bf..0a282e5fcc9 100644
--- a/drivers/spi/spi-ep93xx.c
+++ b/drivers/spi/spi-ep93xx.c
@@ -24,6 +24,7 @@
#include <linux/dmaengine.h>
#include <linux/bitops.h>
#include <linux/interrupt.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/workqueue.h>
#include <linux/sched.h>
@@ -1025,7 +1026,7 @@ static void ep93xx_spi_release_dma(struct ep93xx_spi *espi)
free_page((unsigned long)espi->zeropage);
}
-static int __init ep93xx_spi_probe(struct platform_device *pdev)
+static int __devinit ep93xx_spi_probe(struct platform_device *pdev)
{
struct spi_master *master;
struct ep93xx_spi_info *info;
@@ -1150,7 +1151,7 @@ fail_release_master:
return error;
}
-static int __exit ep93xx_spi_remove(struct platform_device *pdev)
+static int __devexit ep93xx_spi_remove(struct platform_device *pdev)
{
struct spi_master *master = platform_get_drvdata(pdev);
struct ep93xx_spi *espi = spi_master_get_devdata(master);
@@ -1196,20 +1197,10 @@ static struct platform_driver ep93xx_spi_driver = {
.name = "ep93xx-spi",
.owner = THIS_MODULE,
},
- .remove = __exit_p(ep93xx_spi_remove),
+ .probe = ep93xx_spi_probe,
+ .remove = __devexit_p(ep93xx_spi_remove),
};
-
-static int __init ep93xx_spi_init(void)
-{
- return platform_driver_probe(&ep93xx_spi_driver, ep93xx_spi_probe);
-}
-module_init(ep93xx_spi_init);
-
-static void __exit ep93xx_spi_exit(void)
-{
- platform_driver_unregister(&ep93xx_spi_driver);
-}
-module_exit(ep93xx_spi_exit);
+module_platform_driver(ep93xx_spi_driver);
MODULE_DESCRIPTION("EP93xx SPI Controller driver");
MODULE_AUTHOR("Mika Westerberg <mika.westerberg@iki.fi>");
diff --git a/drivers/spi/spi-fsl-espi.c b/drivers/spi/spi-fsl-espi.c
index 54e499d5f92..d770f03705c 100644
--- a/drivers/spi/spi-fsl-espi.c
+++ b/drivers/spi/spi-fsl-espi.c
@@ -744,18 +744,7 @@ static struct platform_driver fsl_espi_driver = {
.probe = of_fsl_espi_probe,
.remove = __devexit_p(of_fsl_espi_remove),
};
-
-static int __init fsl_espi_init(void)
-{
- return platform_driver_register(&fsl_espi_driver);
-}
-module_init(fsl_espi_init);
-
-static void __exit fsl_espi_exit(void)
-{
- platform_driver_unregister(&fsl_espi_driver);
-}
-module_exit(fsl_espi_exit);
+module_platform_driver(fsl_espi_driver);
MODULE_AUTHOR("Mingkai Hu");
MODULE_DESCRIPTION("Enhanced Freescale SPI Driver");
diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c
index 0e88ab74549..e093d3ec41b 100644
--- a/drivers/spi/spi-gpio.c
+++ b/drivers/spi/spi-gpio.c
@@ -18,6 +18,7 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/gpio.h>
@@ -311,7 +312,7 @@ done:
return value;
}
-static int __init spi_gpio_probe(struct platform_device *pdev)
+static int __devinit spi_gpio_probe(struct platform_device *pdev)
{
int status;
struct spi_master *master;
@@ -379,7 +380,7 @@ gpio_free:
return status;
}
-static int __exit spi_gpio_remove(struct platform_device *pdev)
+static int __devexit spi_gpio_remove(struct platform_device *pdev)
{
struct spi_gpio *spi_gpio;
struct spi_gpio_platform_data *pdata;
@@ -408,21 +409,10 @@ MODULE_ALIAS("platform:" DRIVER_NAME);
static struct platform_driver spi_gpio_driver = {
.driver.name = DRIVER_NAME,
.driver.owner = THIS_MODULE,
- .remove = __exit_p(spi_gpio_remove),
+ .probe = spi_gpio_probe,
+ .remove = __devexit_p(spi_gpio_remove),
};
-
-static int __init spi_gpio_init(void)
-{
- return platform_driver_probe(&spi_gpio_driver, spi_gpio_probe);
-}
-module_init(spi_gpio_init);
-
-static void __exit spi_gpio_exit(void)
-{
- platform_driver_unregister(&spi_gpio_driver);
-}
-module_exit(spi_gpio_exit);
-
+module_platform_driver(spi_gpio_driver);
MODULE_DESCRIPTION("SPI master driver using generic bitbanged GPIO ");
MODULE_AUTHOR("David Brownell");
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index fa594d604ac..c6e697f5e00 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -929,19 +929,7 @@ static struct platform_driver spi_imx_driver = {
.probe = spi_imx_probe,
.remove = __devexit_p(spi_imx_remove),
};
-
-static int __init spi_imx_init(void)
-{
- return platform_driver_register(&spi_imx_driver);
-}
-
-static void __exit spi_imx_exit(void)
-{
- platform_driver_unregister(&spi_imx_driver);
-}
-
-module_init(spi_imx_init);
-module_exit(spi_imx_exit);
+module_platform_driver(spi_imx_driver);
MODULE_DESCRIPTION("SPI Master Controller driver");
MODULE_AUTHOR("Sascha Hauer, Pengutronix");
diff --git a/drivers/spi/spi-mpc512x-psc.c b/drivers/spi/spi-mpc512x-psc.c
index 6a5b4238fb6..4c63f772780 100644
--- a/drivers/spi/spi-mpc512x-psc.c
+++ b/drivers/spi/spi-mpc512x-psc.c
@@ -559,18 +559,7 @@ static struct platform_driver mpc512x_psc_spi_of_driver = {
.of_match_table = mpc512x_psc_spi_of_match,
},
};
-
-static int __init mpc512x_psc_spi_init(void)
-{
- return platform_driver_register(&mpc512x_psc_spi_of_driver);
-}
-module_init(mpc512x_psc_spi_init);
-
-static void __exit mpc512x_psc_spi_exit(void)
-{
- platform_driver_unregister(&mpc512x_psc_spi_of_driver);
-}
-module_exit(mpc512x_psc_spi_exit);
+module_platform_driver(mpc512x_psc_spi_of_driver);
MODULE_AUTHOR("John Rigby");
MODULE_DESCRIPTION("MPC512x PSC SPI Driver");
diff --git a/drivers/spi/spi-mpc52xx-psc.c b/drivers/spi/spi-mpc52xx-psc.c
index e30baf0852a..66047156d90 100644
--- a/drivers/spi/spi-mpc52xx-psc.c
+++ b/drivers/spi/spi-mpc52xx-psc.c
@@ -511,18 +511,7 @@ static struct platform_driver mpc52xx_psc_spi_of_driver = {
.of_match_table = mpc52xx_psc_spi_of_match,
},
};
-
-static int __init mpc52xx_psc_spi_init(void)
-{
- return platform_driver_register(&mpc52xx_psc_spi_of_driver);
-}
-module_init(mpc52xx_psc_spi_init);
-
-static void __exit mpc52xx_psc_spi_exit(void)
-{
- platform_driver_unregister(&mpc52xx_psc_spi_of_driver);
-}
-module_exit(mpc52xx_psc_spi_exit);
+module_platform_driver(mpc52xx_psc_spi_of_driver);
MODULE_AUTHOR("Dragos Carp");
MODULE_DESCRIPTION("MPC52xx PSC SPI Driver");
diff --git a/drivers/spi/spi-mpc52xx.c b/drivers/spi/spi-mpc52xx.c
index 015a974bed7..57633d96345 100644
--- a/drivers/spi/spi-mpc52xx.c
+++ b/drivers/spi/spi-mpc52xx.c
@@ -564,16 +564,4 @@ static struct platform_driver mpc52xx_spi_of_driver = {
.probe = mpc52xx_spi_probe,
.remove = __devexit_p(mpc52xx_spi_remove),
};
-
-static int __init mpc52xx_spi_init(void)
-{
- return platform_driver_register(&mpc52xx_spi_of_driver);
-}
-module_init(mpc52xx_spi_init);
-
-static void __exit mpc52xx_spi_exit(void)
-{
- platform_driver_unregister(&mpc52xx_spi_of_driver);
-}
-module_exit(mpc52xx_spi_exit);
-
+module_platform_driver(mpc52xx_spi_of_driver);
diff --git a/drivers/spi/spi-nuc900.c b/drivers/spi/spi-nuc900.c
index c0a6ce81f9c..e763254741c 100644
--- a/drivers/spi/spi-nuc900.c
+++ b/drivers/spi/spi-nuc900.c
@@ -484,19 +484,7 @@ static struct platform_driver nuc900_spi_driver = {
.owner = THIS_MODULE,
},
};
-
-static int __init nuc900_spi_init(void)
-{
- return platform_driver_register(&nuc900_spi_driver);
-}
-
-static void __exit nuc900_spi_exit(void)
-{
- platform_driver_unregister(&nuc900_spi_driver);
-}
-
-module_init(nuc900_spi_init);
-module_exit(nuc900_spi_exit);
+module_platform_driver(nuc900_spi_driver);
MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>");
MODULE_DESCRIPTION("nuc900 spi driver!");
diff --git a/drivers/spi/spi-oc-tiny.c b/drivers/spi/spi-oc-tiny.c
index f1bde66cea1..698018fd992 100644
--- a/drivers/spi/spi-oc-tiny.c
+++ b/drivers/spi/spi-oc-tiny.c
@@ -18,6 +18,7 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/errno.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi_bitbang.h>
@@ -406,18 +407,7 @@ static struct platform_driver tiny_spi_driver = {
.of_match_table = tiny_spi_match,
},
};
-
-static int __init tiny_spi_init(void)
-{
- return platform_driver_register(&tiny_spi_driver);
-}
-module_init(tiny_spi_init);
-
-static void __exit tiny_spi_exit(void)
-{
- platform_driver_unregister(&tiny_spi_driver);
-}
-module_exit(tiny_spi_exit);
+module_platform_driver(tiny_spi_driver);
MODULE_DESCRIPTION("OpenCores tiny SPI driver");
MODULE_AUTHOR("Thomas Chou <thomas@wytron.com.tw>");
diff --git a/drivers/spi/spi-omap-uwire.c b/drivers/spi/spi-omap-uwire.c
index 00a8e9d7dbe..610f7391456 100644
--- a/drivers/spi/spi-omap-uwire.c
+++ b/drivers/spi/spi-omap-uwire.c
@@ -45,6 +45,7 @@
#include <linux/spi/spi.h>
#include <linux/spi/spi_bitbang.h>
+#include <linux/module.h>
#include <asm/system.h>
#include <asm/irq.h>
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index fde3a2d4f12..322be7aea8b 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -1116,15 +1116,16 @@ static int __init omap2_mcspi_probe(struct platform_device *pdev)
status = -ENODEV;
goto err1;
}
+
+ r->start += pdata->regs_offset;
+ r->end += pdata->regs_offset;
+ mcspi->phys = r->start;
if (!request_mem_region(r->start, resource_size(r),
dev_name(&pdev->dev))) {
status = -EBUSY;
goto err1;
}
- r->start += pdata->regs_offset;
- r->end += pdata->regs_offset;
- mcspi->phys = r->start;
mcspi->base = ioremap(r->start, resource_size(r));
if (!mcspi->base) {
dev_dbg(&pdev->dev, "can't ioremap MCSPI\n");
diff --git a/drivers/spi/spi-orion.c b/drivers/spi/spi-orion.c
index 9421a390a5e..13448c832c4 100644
--- a/drivers/spi/spi-orion.c
+++ b/drivers/spi/spi-orion.c
@@ -17,6 +17,7 @@
#include <linux/io.h>
#include <linux/spi/spi.h>
#include <linux/spi/orion_spi.h>
+#include <linux/module.h>
#include <asm/unaligned.h>
#define DRIVER_NAME "orion_spi"
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
index 730b4a37b82..f103e470cb6 100644
--- a/drivers/spi/spi-pl022.c
+++ b/drivers/spi/spi-pl022.c
@@ -113,7 +113,6 @@
#define SSP_CR0_MASK_CSS_ST (0x1FUL << 16)
#define SSP_CR0_MASK_FRF_ST (0x3UL << 21)
-
/*
* SSP Control Register 0 - SSP_CR1
*/
@@ -283,7 +282,6 @@
#define SPI_POLLING_TIMEOUT 1000
-
/*
* The type of reading going on on this chip
*/
@@ -515,9 +513,6 @@ static void giveback(struct pl022 *pl022)
if (msg->complete)
msg->complete(msg->context);
/* This message is completed, so let's turn off the clocks & power */
- clk_disable(pl022->clk);
- amba_pclk_disable(pl022->adev);
- amba_vcore_disable(pl022->adev);
pm_runtime_put(&pl022->adev->dev);
}
@@ -752,7 +747,6 @@ static void readwriter(struct pl022 *pl022)
*/
}
-
/**
* next_transfer - Move to the Next transfer in the current spi message
* @pl022: SSP driver private data structure
@@ -1019,14 +1013,14 @@ static int configure_dma(struct pl022 *pl022)
dmaengine_slave_config(txchan, &tx_conf);
/* Create sglists for the transfers */
- pages = (pl022->cur_transfer->len >> PAGE_SHIFT) + 1;
+ pages = DIV_ROUND_UP(pl022->cur_transfer->len, PAGE_SIZE);
dev_dbg(&pl022->adev->dev, "using %d pages for transfer\n", pages);
- ret = sg_alloc_table(&pl022->sgt_rx, pages, GFP_KERNEL);
+ ret = sg_alloc_table(&pl022->sgt_rx, pages, GFP_ATOMIC);
if (ret)
goto err_alloc_rx_sg;
- ret = sg_alloc_table(&pl022->sgt_tx, pages, GFP_KERNEL);
+ ret = sg_alloc_table(&pl022->sgt_tx, pages, GFP_ATOMIC);
if (ret)
goto err_alloc_tx_sg;
@@ -1534,8 +1528,7 @@ static void pump_messages(struct work_struct *work)
/* Initial message state */
pl022->cur_msg->state = STATE_START;
pl022->cur_transfer = list_entry(pl022->cur_msg->transfers.next,
- struct spi_transfer,
- transfer_list);
+ struct spi_transfer, transfer_list);
/* Setup the SPI using the per chip configuration */
pl022->cur_chip = spi_get_ctldata(pl022->cur_msg->spi);
@@ -1545,9 +1538,6 @@ static void pump_messages(struct work_struct *work)
* (poll/interrupt/DMA)
*/
pm_runtime_get_sync(&pl022->adev->dev);
- amba_vcore_enable(pl022->adev);
- amba_pclk_enable(pl022->adev);
- clk_enable(pl022->clk);
restore_state(pl022);
flush(pl022);
@@ -1557,7 +1547,6 @@ static void pump_messages(struct work_struct *work)
do_interrupt_dma_transfer(pl022);
}
-
static int __init init_queue(struct pl022 *pl022)
{
INIT_LIST_HEAD(&pl022->queue);
@@ -1566,8 +1555,8 @@ static int __init init_queue(struct pl022 *pl022)
pl022->running = false;
pl022->busy = false;
- tasklet_init(&pl022->pump_transfers,
- pump_transfers, (unsigned long)pl022);
+ tasklet_init(&pl022->pump_transfers, pump_transfers,
+ (unsigned long)pl022);
INIT_WORK(&pl022->pump_messages, pump_messages);
pl022->workqueue = create_singlethread_workqueue(
@@ -1578,7 +1567,6 @@ static int __init init_queue(struct pl022 *pl022)
return 0;
}
-
static int start_queue(struct pl022 *pl022)
{
unsigned long flags;
@@ -1601,7 +1589,6 @@ static int start_queue(struct pl022 *pl022)
return 0;
}
-
static int stop_queue(struct pl022 *pl022)
{
unsigned long flags;
@@ -1797,71 +1784,70 @@ static int pl022_transfer(struct spi_device *spi, struct spi_message *msg)
return 0;
}
-static int calculate_effective_freq(struct pl022 *pl022,
- int freq,
- struct ssp_clock_params *clk_freq)
+static inline u32 spi_rate(u32 rate, u16 cpsdvsr, u16 scr)
+{
+ return rate / (cpsdvsr * (1 + scr));
+}
+
+static int calculate_effective_freq(struct pl022 *pl022, int freq, struct
+ ssp_clock_params * clk_freq)
{
/* Lets calculate the frequency parameters */
- u16 cpsdvsr = 2;
- u16 scr = 0;
- bool freq_found = false;
- u32 rate;
- u32 max_tclk;
- u32 min_tclk;
+ u16 cpsdvsr = CPSDVR_MIN, scr = SCR_MIN;
+ u32 rate, max_tclk, min_tclk, best_freq = 0, best_cpsdvsr = 0,
+ best_scr = 0, tmp, found = 0;
rate = clk_get_rate(pl022->clk);
/* cpsdvscr = 2 & scr 0 */
- max_tclk = (rate / (CPSDVR_MIN * (1 + SCR_MIN)));
+ max_tclk = spi_rate(rate, CPSDVR_MIN, SCR_MIN);
/* cpsdvsr = 254 & scr = 255 */
- min_tclk = (rate / (CPSDVR_MAX * (1 + SCR_MAX)));
-
- if ((freq <= max_tclk) && (freq >= min_tclk)) {
- while (cpsdvsr <= CPSDVR_MAX && !freq_found) {
- while (scr <= SCR_MAX && !freq_found) {
- if ((rate /
- (cpsdvsr * (1 + scr))) > freq)
- scr += 1;
- else {
- /*
- * This bool is made true when
- * effective frequency >=
- * target frequency is found
- */
- freq_found = true;
- if ((rate /
- (cpsdvsr * (1 + scr))) != freq) {
- if (scr == SCR_MIN) {
- cpsdvsr -= 2;
- scr = SCR_MAX;
- } else
- scr -= 1;
- }
- }
- }
- if (!freq_found) {
- cpsdvsr += 2;
- scr = SCR_MIN;
- }
- }
- if (cpsdvsr != 0) {
- dev_dbg(&pl022->adev->dev,
- "SSP Effective Frequency is %u\n",
- (rate / (cpsdvsr * (1 + scr))));
- clk_freq->cpsdvsr = (u8) (cpsdvsr & 0xFF);
- clk_freq->scr = (u8) (scr & 0xFF);
- dev_dbg(&pl022->adev->dev,
- "SSP cpsdvsr = %d, scr = %d\n",
- clk_freq->cpsdvsr, clk_freq->scr);
- }
- } else {
+ min_tclk = spi_rate(rate, CPSDVR_MAX, SCR_MAX);
+
+ if (!((freq <= max_tclk) && (freq >= min_tclk))) {
dev_err(&pl022->adev->dev,
"controller data is incorrect: out of range frequency");
return -EINVAL;
}
+
+ /*
+ * best_freq will give closest possible available rate (<= requested
+ * freq) for all values of scr & cpsdvsr.
+ */
+ while ((cpsdvsr <= CPSDVR_MAX) && !found) {
+ while (scr <= SCR_MAX) {
+ tmp = spi_rate(rate, cpsdvsr, scr);
+
+ if (tmp > freq)
+ scr++;
+ /*
+ * If found exact value, update and break.
+ * If found more closer value, update and continue.
+ */
+ else if ((tmp == freq) || (tmp > best_freq)) {
+ best_freq = tmp;
+ best_cpsdvsr = cpsdvsr;
+ best_scr = scr;
+
+ if (tmp == freq)
+ break;
+ }
+ scr++;
+ }
+ cpsdvsr += 2;
+ scr = SCR_MIN;
+ }
+
+ clk_freq->cpsdvsr = (u8) (best_cpsdvsr & 0xFF);
+ clk_freq->scr = (u8) (best_scr & 0xFF);
+ dev_dbg(&pl022->adev->dev,
+ "SSP Target Frequency is: %u, Effective Frequency is %u\n",
+ freq, best_freq);
+ dev_dbg(&pl022->adev->dev, "SSP cpsdvsr = %d, scr = %d\n",
+ clk_freq->cpsdvsr, clk_freq->scr);
+
return 0;
}
-
/*
* A piece of default chip info unless the platform
* supplies it.
@@ -1879,7 +1865,6 @@ static const struct pl022_config_chip pl022_default_chip_info = {
.cs_control = null_cs_control,
};
-
/**
* pl022_setup - setup function registered to SPI master framework
* @spi: spi device which is requesting setup
@@ -1956,7 +1941,6 @@ static int pl022_setup(struct spi_device *spi)
goto err_config_params;
}
-
status = verify_controller_parameters(pl022, chip_info);
if (status) {
dev_err(&spi->dev, "controller data is incorrect");
@@ -2096,7 +2080,8 @@ static int pl022_setup(struct spi_device *spi)
}
SSP_WRITE_BITS(chip->cr1, SSP_DISABLED, SSP_CR1_MASK_SSE, 1);
SSP_WRITE_BITS(chip->cr1, chip_info->hierarchy, SSP_CR1_MASK_MS, 2);
- SSP_WRITE_BITS(chip->cr1, chip_info->slave_tx_disable, SSP_CR1_MASK_SOD, 3);
+ SSP_WRITE_BITS(chip->cr1, chip_info->slave_tx_disable, SSP_CR1_MASK_SOD,
+ 3);
/* Save controller_state */
spi_set_ctldata(spi, chip);
@@ -2122,7 +2107,6 @@ static void pl022_cleanup(struct spi_device *spi)
kfree(chip);
}
-
static int __devinit
pl022_probe(struct amba_device *adev, const struct amba_id *id)
{
@@ -2186,8 +2170,6 @@ pl022_probe(struct amba_device *adev, const struct amba_id *id)
}
printk(KERN_INFO "pl022: mapped registers from 0x%08x to %p\n",
adev->res.start, pl022->virtbase);
- pm_runtime_enable(dev);
- pm_runtime_resume(dev);
pl022->clk = clk_get(&adev->dev, NULL);
if (IS_ERR(pl022->clk)) {
@@ -2196,6 +2178,12 @@ pl022_probe(struct amba_device *adev, const struct amba_id *id)
goto err_no_clk;
}
+ status = clk_prepare(pl022->clk);
+ if (status) {
+ dev_err(&adev->dev, "could not prepare SSP/SPI bus clock\n");
+ goto err_clk_prep;
+ }
+
/* Disable SSP */
writew((readw(SSP_CR1(pl022->virtbase)) & (~SSP_CR1_MASK_SSE)),
SSP_CR1(pl022->virtbase));
@@ -2235,22 +2223,22 @@ pl022_probe(struct amba_device *adev, const struct amba_id *id)
goto err_spi_register;
}
dev_dbg(dev, "probe succeeded\n");
- /*
- * Disable the silicon block pclk and any voltage domain and just
- * power it up and clock it when it's needed
- */
- amba_pclk_disable(adev);
- amba_vcore_disable(adev);
+
+ /* let runtime pm put suspend */
+ pm_runtime_put(dev);
return 0;
err_spi_register:
err_start_queue:
err_init_queue:
destroy_queue(pl022);
- pl022_dma_remove(pl022);
+ if (platform_info->enable_dma)
+ pl022_dma_remove(pl022);
+
free_irq(adev->irq[0], pl022);
- pm_runtime_disable(&adev->dev);
err_no_irq:
+ clk_unprepare(pl022->clk);
+ err_clk_prep:
clk_put(pl022->clk);
err_no_clk:
iounmap(pl022->virtbase);
@@ -2271,13 +2259,22 @@ pl022_remove(struct amba_device *adev)
if (!pl022)
return 0;
+ /*
+ * undo pm_runtime_put() in probe. I assume that we're not
+ * accessing the primecell here.
+ */
+ pm_runtime_get_noresume(&adev->dev);
+
/* Remove the queue */
if (destroy_queue(pl022) != 0)
dev_err(&adev->dev, "queue remove failed\n");
load_ssp_default_config(pl022);
- pl022_dma_remove(pl022);
+ if (pl022->master_info->enable_dma)
+ pl022_dma_remove(pl022);
+
free_irq(adev->irq[0], pl022);
clk_disable(pl022->clk);
+ clk_unprepare(pl022->clk);
clk_put(pl022->clk);
iounmap(pl022->virtbase);
amba_release_regions(adev);
@@ -2288,46 +2285,70 @@ pl022_remove(struct amba_device *adev)
return 0;
}
-#ifdef CONFIG_PM
-static int pl022_suspend(struct amba_device *adev, pm_message_t state)
+#ifdef CONFIG_SUSPEND
+static int pl022_suspend(struct device *dev)
{
- struct pl022 *pl022 = amba_get_drvdata(adev);
+ struct pl022 *pl022 = dev_get_drvdata(dev);
int status = 0;
status = stop_queue(pl022);
if (status) {
- dev_warn(&adev->dev, "suspend cannot stop queue\n");
+ dev_warn(dev, "suspend cannot stop queue\n");
return status;
}
- amba_vcore_enable(adev);
- amba_pclk_enable(adev);
+ amba_vcore_enable(pl022->adev);
+ amba_pclk_enable(pl022->adev);
load_ssp_default_config(pl022);
- amba_pclk_disable(adev);
- amba_vcore_disable(adev);
- dev_dbg(&adev->dev, "suspended\n");
+ amba_pclk_disable(pl022->adev);
+ amba_vcore_disable(pl022->adev);
+ dev_dbg(dev, "suspended\n");
return 0;
}
-static int pl022_resume(struct amba_device *adev)
+static int pl022_resume(struct device *dev)
{
- struct pl022 *pl022 = amba_get_drvdata(adev);
+ struct pl022 *pl022 = dev_get_drvdata(dev);
int status = 0;
/* Start the queue running */
status = start_queue(pl022);
if (status)
- dev_err(&adev->dev, "problem starting queue (%d)\n", status);
+ dev_err(dev, "problem starting queue (%d)\n", status);
else
- dev_dbg(&adev->dev, "resumed\n");
+ dev_dbg(dev, "resumed\n");
return status;
}
-#else
-#define pl022_suspend NULL
-#define pl022_resume NULL
#endif /* CONFIG_PM */
+#ifdef CONFIG_PM_RUNTIME
+static int pl022_runtime_suspend(struct device *dev)
+{
+ struct pl022 *pl022 = dev_get_drvdata(dev);
+
+ clk_disable(pl022->clk);
+ amba_vcore_disable(pl022->adev);
+
+ return 0;
+}
+
+static int pl022_runtime_resume(struct device *dev)
+{
+ struct pl022 *pl022 = dev_get_drvdata(dev);
+
+ amba_vcore_enable(pl022->adev);
+ clk_enable(pl022->clk);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops pl022_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pl022_suspend, pl022_resume)
+ SET_RUNTIME_PM_OPS(pl022_runtime_suspend, pl022_runtime_resume, NULL)
+};
+
static struct vendor_data vendor_arm = {
.fifodepth = 8,
.max_bpw = 16,
@@ -2337,7 +2358,6 @@ static struct vendor_data vendor_arm = {
.loopback = true,
};
-
static struct vendor_data vendor_st = {
.fifodepth = 32,
.max_bpw = 32,
@@ -2392,9 +2412,9 @@ static struct amba_id pl022_ids[] = {
* and 32 locations deep TX/RX FIFO but no extended
* CR0/CR1 register
*/
- .id = 0x00080023,
- .mask = 0xffffffff,
- .data = &vendor_st_pl023,
+ .id = 0x00080023,
+ .mask = 0xffffffff,
+ .data = &vendor_st_pl023,
},
{
.id = 0x10080023,
@@ -2407,27 +2427,23 @@ static struct amba_id pl022_ids[] = {
static struct amba_driver pl022_driver = {
.drv = {
.name = "ssp-pl022",
+ .pm = &pl022_dev_pm_ops,
},
.id_table = pl022_ids,
.probe = pl022_probe,
.remove = __devexit_p(pl022_remove),
- .suspend = pl022_suspend,
- .resume = pl022_resume,
};
-
static int __init pl022_init(void)
{
return amba_driver_register(&pl022_driver);
}
-
subsys_initcall(pl022_init);
static void __exit pl022_exit(void)
{
amba_driver_unregister(&pl022_driver);
}
-
module_exit(pl022_exit);
MODULE_AUTHOR("Linus Walleij <linus.walleij@stericsson.com>");
diff --git a/drivers/spi/spi-ppc4xx.c b/drivers/spi/spi-ppc4xx.c
index b267fd901e5..98ec53285fc 100644
--- a/drivers/spi/spi-ppc4xx.c
+++ b/drivers/spi/spi-ppc4xx.c
@@ -514,7 +514,7 @@ static int __init spi_ppc4xx_of_probe(struct platform_device *op)
/* Request IRQ */
hw->irqnum = irq_of_parse_and_map(np, 0);
ret = request_irq(hw->irqnum, spi_ppc4xx_int,
- IRQF_DISABLED, "spi_ppc4xx_of", (void *)hw);
+ 0, "spi_ppc4xx_of", (void *)hw);
if (ret) {
dev_err(dev, "unable to allocate interrupt\n");
goto free_gpios;
@@ -594,18 +594,7 @@ static struct platform_driver spi_ppc4xx_of_driver = {
.of_match_table = spi_ppc4xx_of_match,
},
};
-
-static int __init spi_ppc4xx_init(void)
-{
- return platform_driver_register(&spi_ppc4xx_of_driver);
-}
-module_init(spi_ppc4xx_init);
-
-static void __exit spi_ppc4xx_exit(void)
-{
- platform_driver_unregister(&spi_ppc4xx_of_driver);
-}
-module_exit(spi_ppc4xx_exit);
+module_platform_driver(spi_ppc4xx_of_driver);
MODULE_AUTHOR("Gary Jennejohn & Stefan Roese");
MODULE_DESCRIPTION("Simple PPC4xx SPI Driver");
diff --git a/drivers/spi/spi-pxa2xx-pci.c b/drivers/spi/spi-pxa2xx-pci.c
index 378e504f89e..8caa07d58e6 100644
--- a/drivers/spi/spi-pxa2xx-pci.c
+++ b/drivers/spi/spi-pxa2xx-pci.c
@@ -5,6 +5,7 @@
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/of_device.h>
+#include <linux/module.h>
#include <linux/spi/pxa2xx_spi.h>
struct ce4100_info {
diff --git a/drivers/spi/spi-s3c24xx.c b/drivers/spi/spi-s3c24xx.c
index 1996ac57ef9..fc064535f4f 100644
--- a/drivers/spi/spi-s3c24xx.c
+++ b/drivers/spi/spi-s3c24xx.c
@@ -24,6 +24,7 @@
#include <linux/spi/spi.h>
#include <linux/spi/spi_bitbang.h>
+#include <linux/module.h>
#include <plat/regs-spi.h>
#include <mach/spi.h>
@@ -505,7 +506,7 @@ static void s3c24xx_spi_initialsetup(struct s3c24xx_spi *hw)
}
}
-static int __init s3c24xx_spi_probe(struct platform_device *pdev)
+static int __devinit s3c24xx_spi_probe(struct platform_device *pdev)
{
struct s3c2410_spi_info *pdata;
struct s3c24xx_spi *hw;
@@ -661,7 +662,7 @@ static int __init s3c24xx_spi_probe(struct platform_device *pdev)
return err;
}
-static int __exit s3c24xx_spi_remove(struct platform_device *dev)
+static int __devexit s3c24xx_spi_remove(struct platform_device *dev)
{
struct s3c24xx_spi *hw = platform_get_drvdata(dev);
@@ -719,26 +720,15 @@ static const struct dev_pm_ops s3c24xx_spi_pmops = {
MODULE_ALIAS("platform:s3c2410-spi");
static struct platform_driver s3c24xx_spi_driver = {
- .remove = __exit_p(s3c24xx_spi_remove),
+ .probe = s3c24xx_spi_probe,
+ .remove = __devexit_p(s3c24xx_spi_remove),
.driver = {
.name = "s3c2410-spi",
.owner = THIS_MODULE,
.pm = S3C24XX_SPI_PMOPS,
},
};
-
-static int __init s3c24xx_spi_init(void)
-{
- return platform_driver_probe(&s3c24xx_spi_driver, s3c24xx_spi_probe);
-}
-
-static void __exit s3c24xx_spi_exit(void)
-{
- platform_driver_unregister(&s3c24xx_spi_driver);
-}
-
-module_init(s3c24xx_spi_init);
-module_exit(s3c24xx_spi_exit);
+module_platform_driver(s3c24xx_spi_driver);
MODULE_DESCRIPTION("S3C24XX SPI Driver");
MODULE_AUTHOR("Ben Dooks, <ben@simtec.co.uk>");
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index 595dacc7645..019a7163572 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -131,6 +131,12 @@
#define RXBUSY (1<<2)
#define TXBUSY (1<<3)
+struct s3c64xx_spi_dma_data {
+ unsigned ch;
+ enum dma_data_direction direction;
+ enum dma_ch dmach;
+};
+
/**
* struct s3c64xx_spi_driver_data - Runtime info holder for SPI driver.
* @clk: Pointer to the spi clock.
@@ -164,13 +170,14 @@ struct s3c64xx_spi_driver_data {
struct work_struct work;
struct list_head queue;
spinlock_t lock;
- enum dma_ch rx_dmach;
- enum dma_ch tx_dmach;
unsigned long sfr_start;
struct completion xfer_completion;
unsigned state;
unsigned cur_mode, cur_bpw;
unsigned cur_speed;
+ struct s3c64xx_spi_dma_data rx_dma;
+ struct s3c64xx_spi_dma_data tx_dma;
+ struct samsung_dma_ops *ops;
};
static struct s3c2410_dma_client s3c64xx_spi_dma_client = {
@@ -226,6 +233,78 @@ static void flush_fifo(struct s3c64xx_spi_driver_data *sdd)
writel(val, regs + S3C64XX_SPI_CH_CFG);
}
+static void s3c64xx_spi_dmacb(void *data)
+{
+ struct s3c64xx_spi_driver_data *sdd;
+ struct s3c64xx_spi_dma_data *dma = data;
+ unsigned long flags;
+
+ if (dma->direction == DMA_FROM_DEVICE)
+ sdd = container_of(data,
+ struct s3c64xx_spi_driver_data, rx_dma);
+ else
+ sdd = container_of(data,
+ struct s3c64xx_spi_driver_data, tx_dma);
+
+ spin_lock_irqsave(&sdd->lock, flags);
+
+ if (dma->direction == DMA_FROM_DEVICE) {
+ sdd->state &= ~RXBUSY;
+ if (!(sdd->state & TXBUSY))
+ complete(&sdd->xfer_completion);
+ } else {
+ sdd->state &= ~TXBUSY;
+ if (!(sdd->state & RXBUSY))
+ complete(&sdd->xfer_completion);
+ }
+
+ spin_unlock_irqrestore(&sdd->lock, flags);
+}
+
+static void prepare_dma(struct s3c64xx_spi_dma_data *dma,
+ unsigned len, dma_addr_t buf)
+{
+ struct s3c64xx_spi_driver_data *sdd;
+ struct samsung_dma_prep_info info;
+
+ if (dma->direction == DMA_FROM_DEVICE)
+ sdd = container_of((void *)dma,
+ struct s3c64xx_spi_driver_data, rx_dma);
+ else
+ sdd = container_of((void *)dma,
+ struct s3c64xx_spi_driver_data, tx_dma);
+
+ info.cap = DMA_SLAVE;
+ info.len = len;
+ info.fp = s3c64xx_spi_dmacb;
+ info.fp_param = dma;
+ info.direction = dma->direction;
+ info.buf = buf;
+
+ sdd->ops->prepare(dma->ch, &info);
+ sdd->ops->trigger(dma->ch);
+}
+
+static int acquire_dma(struct s3c64xx_spi_driver_data *sdd)
+{
+ struct samsung_dma_info info;
+
+ sdd->ops = samsung_dma_get_ops();
+
+ info.cap = DMA_SLAVE;
+ info.client = &s3c64xx_spi_dma_client;
+ info.width = sdd->cur_bpw / 8;
+
+ info.direction = sdd->rx_dma.direction;
+ info.fifo = sdd->sfr_start + S3C64XX_SPI_RX_DATA;
+ sdd->rx_dma.ch = sdd->ops->request(sdd->rx_dma.dmach, &info);
+ info.direction = sdd->tx_dma.direction;
+ info.fifo = sdd->sfr_start + S3C64XX_SPI_TX_DATA;
+ sdd->tx_dma.ch = sdd->ops->request(sdd->tx_dma.dmach, &info);
+
+ return 1;
+}
+
static void enable_datapath(struct s3c64xx_spi_driver_data *sdd,
struct spi_device *spi,
struct spi_transfer *xfer, int dma_mode)
@@ -258,10 +337,7 @@ static void enable_datapath(struct s3c64xx_spi_driver_data *sdd,
chcfg |= S3C64XX_SPI_CH_TXCH_ON;
if (dma_mode) {
modecfg |= S3C64XX_SPI_MODE_TXDMA_ON;
- s3c2410_dma_config(sdd->tx_dmach, sdd->cur_bpw / 8);
- s3c2410_dma_enqueue(sdd->tx_dmach, (void *)sdd,
- xfer->tx_dma, xfer->len);
- s3c2410_dma_ctrl(sdd->tx_dmach, S3C2410_DMAOP_START);
+ prepare_dma(&sdd->tx_dma, xfer->len, xfer->tx_dma);
} else {
switch (sdd->cur_bpw) {
case 32:
@@ -293,10 +369,7 @@ static void enable_datapath(struct s3c64xx_spi_driver_data *sdd,
writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff)
| S3C64XX_SPI_PACKET_CNT_EN,
regs + S3C64XX_SPI_PACKET_CNT);
- s3c2410_dma_config(sdd->rx_dmach, sdd->cur_bpw / 8);
- s3c2410_dma_enqueue(sdd->rx_dmach, (void *)sdd,
- xfer->rx_dma, xfer->len);
- s3c2410_dma_ctrl(sdd->rx_dmach, S3C2410_DMAOP_START);
+ prepare_dma(&sdd->rx_dma, xfer->len, xfer->rx_dma);
}
}
@@ -482,46 +555,6 @@ static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
}
}
-static void s3c64xx_spi_dma_rxcb(struct s3c2410_dma_chan *chan, void *buf_id,
- int size, enum s3c2410_dma_buffresult res)
-{
- struct s3c64xx_spi_driver_data *sdd = buf_id;
- unsigned long flags;
-
- spin_lock_irqsave(&sdd->lock, flags);
-
- if (res == S3C2410_RES_OK)
- sdd->state &= ~RXBUSY;
- else
- dev_err(&sdd->pdev->dev, "DmaAbrtRx-%d\n", size);
-
- /* If the other done */
- if (!(sdd->state & TXBUSY))
- complete(&sdd->xfer_completion);
-
- spin_unlock_irqrestore(&sdd->lock, flags);
-}
-
-static void s3c64xx_spi_dma_txcb(struct s3c2410_dma_chan *chan, void *buf_id,
- int size, enum s3c2410_dma_buffresult res)
-{
- struct s3c64xx_spi_driver_data *sdd = buf_id;
- unsigned long flags;
-
- spin_lock_irqsave(&sdd->lock, flags);
-
- if (res == S3C2410_RES_OK)
- sdd->state &= ~TXBUSY;
- else
- dev_err(&sdd->pdev->dev, "DmaAbrtTx-%d \n", size);
-
- /* If the other done */
- if (!(sdd->state & RXBUSY))
- complete(&sdd->xfer_completion);
-
- spin_unlock_irqrestore(&sdd->lock, flags);
-}
-
#define XFER_DMAADDR_INVALID DMA_BIT_MASK(32)
static int s3c64xx_spi_map_mssg(struct s3c64xx_spi_driver_data *sdd,
@@ -696,12 +729,10 @@ static void handle_msg(struct s3c64xx_spi_driver_data *sdd,
if (use_dma) {
if (xfer->tx_buf != NULL
&& (sdd->state & TXBUSY))
- s3c2410_dma_ctrl(sdd->tx_dmach,
- S3C2410_DMAOP_FLUSH);
+ sdd->ops->stop(sdd->tx_dma.ch);
if (xfer->rx_buf != NULL
&& (sdd->state & RXBUSY))
- s3c2410_dma_ctrl(sdd->rx_dmach,
- S3C2410_DMAOP_FLUSH);
+ sdd->ops->stop(sdd->rx_dma.ch);
}
goto out;
@@ -739,30 +770,6 @@ out:
msg->complete(msg->context);
}
-static int acquire_dma(struct s3c64xx_spi_driver_data *sdd)
-{
- if (s3c2410_dma_request(sdd->rx_dmach,
- &s3c64xx_spi_dma_client, NULL) < 0) {
- dev_err(&sdd->pdev->dev, "cannot get RxDMA\n");
- return 0;
- }
- s3c2410_dma_set_buffdone_fn(sdd->rx_dmach, s3c64xx_spi_dma_rxcb);
- s3c2410_dma_devconfig(sdd->rx_dmach, S3C2410_DMASRC_HW,
- sdd->sfr_start + S3C64XX_SPI_RX_DATA);
-
- if (s3c2410_dma_request(sdd->tx_dmach,
- &s3c64xx_spi_dma_client, NULL) < 0) {
- dev_err(&sdd->pdev->dev, "cannot get TxDMA\n");
- s3c2410_dma_free(sdd->rx_dmach, &s3c64xx_spi_dma_client);
- return 0;
- }
- s3c2410_dma_set_buffdone_fn(sdd->tx_dmach, s3c64xx_spi_dma_txcb);
- s3c2410_dma_devconfig(sdd->tx_dmach, S3C2410_DMASRC_MEM,
- sdd->sfr_start + S3C64XX_SPI_TX_DATA);
-
- return 1;
-}
-
static void s3c64xx_spi_work(struct work_struct *work)
{
struct s3c64xx_spi_driver_data *sdd = container_of(work,
@@ -799,8 +806,8 @@ static void s3c64xx_spi_work(struct work_struct *work)
spin_unlock_irqrestore(&sdd->lock, flags);
/* Free DMA channels */
- s3c2410_dma_free(sdd->tx_dmach, &s3c64xx_spi_dma_client);
- s3c2410_dma_free(sdd->rx_dmach, &s3c64xx_spi_dma_client);
+ sdd->ops->release(sdd->rx_dma.ch, &s3c64xx_spi_dma_client);
+ sdd->ops->release(sdd->tx_dma.ch, &s3c64xx_spi_dma_client);
}
static int s3c64xx_spi_transfer(struct spi_device *spi,
@@ -1017,8 +1024,10 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev)
sdd->cntrlr_info = sci;
sdd->pdev = pdev;
sdd->sfr_start = mem_res->start;
- sdd->tx_dmach = dmatx_res->start;
- sdd->rx_dmach = dmarx_res->start;
+ sdd->tx_dma.dmach = dmatx_res->start;
+ sdd->tx_dma.direction = DMA_TO_DEVICE;
+ sdd->rx_dma.dmach = dmarx_res->start;
+ sdd->rx_dma.direction = DMA_FROM_DEVICE;
sdd->cur_bpw = 8;
@@ -1106,7 +1115,7 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev)
pdev->id, master->num_chipselect);
dev_dbg(&pdev->dev, "\tIOmem=[0x%x-0x%x]\tDMA=[Rx-%d, Tx-%d]\n",
mem_res->end, mem_res->start,
- sdd->rx_dmach, sdd->tx_dmach);
+ sdd->rx_dma.dmach, sdd->tx_dma.dmach);
return 0;
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index e00d94b2225..1f466bc66d9 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -19,6 +19,7 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
@@ -635,7 +636,7 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
goto err2;
}
- ret = request_irq(i, sh_msiof_spi_irq, IRQF_DISABLED,
+ ret = request_irq(i, sh_msiof_spi_irq, 0,
dev_name(&pdev->dev), p);
if (ret) {
dev_err(&pdev->dev, "unable to request irq\n");
@@ -730,18 +731,7 @@ static struct platform_driver sh_msiof_spi_drv = {
.pm = &sh_msiof_spi_dev_pm_ops,
},
};
-
-static int __init sh_msiof_spi_init(void)
-{
- return platform_driver_register(&sh_msiof_spi_drv);
-}
-module_init(sh_msiof_spi_init);
-
-static void __exit sh_msiof_spi_exit(void)
-{
- platform_driver_unregister(&sh_msiof_spi_drv);
-}
-module_exit(sh_msiof_spi_exit);
+module_platform_driver(sh_msiof_spi_drv);
MODULE_DESCRIPTION("SuperH MSIOF SPI Master Interface Driver");
MODULE_AUTHOR("Magnus Damm");
diff --git a/drivers/spi/spi-sh-sci.c b/drivers/spi/spi-sh-sci.c
index e7779c09f6e..097e506042b 100644
--- a/drivers/spi/spi-sh-sci.c
+++ b/drivers/spi/spi-sh-sci.c
@@ -22,6 +22,7 @@
#include <linux/spi/spi.h>
#include <linux/spi/spi_bitbang.h>
+#include <linux/module.h>
#include <asm/spi.h>
#include <asm/io.h>
@@ -186,18 +187,7 @@ static struct platform_driver sh_sci_spi_drv = {
.owner = THIS_MODULE,
},
};
-
-static int __init sh_sci_spi_init(void)
-{
- return platform_driver_register(&sh_sci_spi_drv);
-}
-module_init(sh_sci_spi_init);
-
-static void __exit sh_sci_spi_exit(void)
-{
- platform_driver_unregister(&sh_sci_spi_drv);
-}
-module_exit(sh_sci_spi_exit);
+module_platform_driver(sh_sci_spi_drv);
MODULE_DESCRIPTION("SH SCI SPI Driver");
MODULE_AUTHOR("Magnus Damm <damm@opensource.se>");
diff --git a/drivers/spi/spi-sh.c b/drivers/spi/spi-sh.c
index 9eedd71ad89..70c8af9f7cc 100644
--- a/drivers/spi/spi-sh.c
+++ b/drivers/spi/spi-sh.c
@@ -484,7 +484,7 @@ static int __devinit spi_sh_probe(struct platform_device *pdev)
goto error2;
}
- ret = request_irq(irq, spi_sh_irq, IRQF_DISABLED, "spi_sh", ss);
+ ret = request_irq(irq, spi_sh_irq, 0, "spi_sh", ss);
if (ret < 0) {
dev_err(&pdev->dev, "request_irq error\n");
goto error3;
@@ -524,18 +524,7 @@ static struct platform_driver spi_sh_driver = {
.owner = THIS_MODULE,
},
};
-
-static int __init spi_sh_init(void)
-{
- return platform_driver_register(&spi_sh_driver);
-}
-module_init(spi_sh_init);
-
-static void __exit spi_sh_exit(void)
-{
- platform_driver_unregister(&spi_sh_driver);
-}
-module_exit(spi_sh_exit);
+module_platform_driver(spi_sh_driver);
MODULE_DESCRIPTION("SH SPI bus driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-stmp.c b/drivers/spi/spi-stmp.c
index fadff76eb7e..58e38528532 100644
--- a/drivers/spi/spi-stmp.c
+++ b/drivers/spi/spi-stmp.c
@@ -659,19 +659,8 @@ static struct platform_driver stmp_spi_driver = {
.suspend = stmp_spi_suspend,
.resume = stmp_spi_resume,
};
+module_platform_driver(stmp_spi_driver);
-static int __init stmp_spi_init(void)
-{
- return platform_driver_register(&stmp_spi_driver);
-}
-
-static void __exit stmp_spi_exit(void)
-{
- platform_driver_unregister(&stmp_spi_driver);
-}
-
-module_init(stmp_spi_init);
-module_exit(stmp_spi_exit);
module_param(pio, int, S_IRUGO);
module_param(clock, int, S_IRUGO);
MODULE_AUTHOR("dmitry pervushin <dpervushin@embeddedalley.com>");
diff --git a/drivers/spi/spi-tegra.c b/drivers/spi/spi-tegra.c
index a5a6302dc8e..ae6d78a3e91 100644
--- a/drivers/spi/spi-tegra.c
+++ b/drivers/spi/spi-tegra.c
@@ -18,6 +18,7 @@
*/
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/init.h>
#include <linux/err.h>
#include <linux/platform_device.h>
@@ -464,7 +465,7 @@ static int spi_tegra_transfer(struct spi_device *spi, struct spi_message *m)
return 0;
}
-static int __init spi_tegra_probe(struct platform_device *pdev)
+static int __devinit spi_tegra_probe(struct platform_device *pdev)
{
struct spi_master *master;
struct spi_tegra_data *tspi;
@@ -612,19 +613,9 @@ static struct platform_driver spi_tegra_driver = {
.owner = THIS_MODULE,
.of_match_table = spi_tegra_of_match_table,
},
+ .probe = spi_tegra_probe,
.remove = __devexit_p(spi_tegra_remove),
};
-
-static int __init spi_tegra_init(void)
-{
- return platform_driver_probe(&spi_tegra_driver, spi_tegra_probe);
-}
-module_init(spi_tegra_init);
-
-static void __exit spi_tegra_exit(void)
-{
- platform_driver_unregister(&spi_tegra_driver);
-}
-module_exit(spi_tegra_exit);
+module_platform_driver(spi_tegra_driver);
MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-ti-ssp.c b/drivers/spi/spi-ti-ssp.c
index ee22795c797..3f6f6e81c65 100644
--- a/drivers/spi/spi-ti-ssp.c
+++ b/drivers/spi/spi-ti-ssp.c
@@ -22,6 +22,7 @@
#include <linux/err.h>
#include <linux/completion.h>
#include <linux/delay.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
#include <linux/mfd/ti_ssp.h>
@@ -383,18 +384,7 @@ static struct platform_driver ti_ssp_spi_driver = {
.owner = THIS_MODULE,
},
};
-
-static int __init ti_ssp_spi_init(void)
-{
- return platform_driver_register(&ti_ssp_spi_driver);
-}
-module_init(ti_ssp_spi_init);
-
-static void __exit ti_ssp_spi_exit(void)
-{
- platform_driver_unregister(&ti_ssp_spi_driver);
-}
-module_exit(ti_ssp_spi_exit);
+module_platform_driver(ti_ssp_spi_driver);
MODULE_DESCRIPTION("SSP SPI Master");
MODULE_AUTHOR("Cyril Chemparathy");
diff --git a/drivers/spi/spi-tle62x0.c b/drivers/spi/spi-tle62x0.c
index 940e73d1cf0..0ce5c12aab5 100644
--- a/drivers/spi/spi-tle62x0.c
+++ b/drivers/spi/spi-tle62x0.c
@@ -11,6 +11,7 @@
#include <linux/device.h>
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/slab.h>
#include <linux/spi/spi.h>
diff --git a/drivers/spi/spi-txx9.c b/drivers/spi/spi-txx9.c
index f0a2ab0428a..d5a3cbb646c 100644
--- a/drivers/spi/spi-txx9.c
+++ b/drivers/spi/spi-txx9.c
@@ -25,6 +25,7 @@
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/io.h>
+#include <linux/module.h>
#include <asm/gpio.h>
diff --git a/drivers/spi/spi-xilinx.c b/drivers/spi/spi-xilinx.c
index 4d2c75df886..4c5a663b9fa 100644
--- a/drivers/spi/spi-xilinx.c
+++ b/drivers/spi/spi-xilinx.c
@@ -538,18 +538,7 @@ static struct platform_driver xilinx_spi_driver = {
.of_match_table = xilinx_spi_of_match,
},
};
-
-static int __init xilinx_spi_pltfm_init(void)
-{
- return platform_driver_register(&xilinx_spi_driver);
-}
-module_init(xilinx_spi_pltfm_init);
-
-static void __exit xilinx_spi_pltfm_exit(void)
-{
- platform_driver_unregister(&xilinx_spi_driver);
-}
-module_exit(xilinx_spi_pltfm_exit);
+module_platform_driver(xilinx_spi_driver);
MODULE_AUTHOR("MontaVista Software, Inc. <source@mvista.com>");
MODULE_DESCRIPTION("Xilinx SPI driver");
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 4d1b9f517ce..77eae99af11 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -29,6 +29,7 @@
#include <linux/spi/spi.h>
#include <linux/of_spi.h>
#include <linux/pm_runtime.h>
+#include <linux/export.h>
static void spidev_release(struct device *dev)
{
diff --git a/drivers/ssb/b43_pci_bridge.c b/drivers/ssb/b43_pci_bridge.c
index bf53e44c82a..bad7ba517a1 100644
--- a/drivers/ssb/b43_pci_bridge.c
+++ b/drivers/ssb/b43_pci_bridge.c
@@ -11,6 +11,7 @@
*/
#include <linux/pci.h>
+#include <linux/module.h>
#include <linux/ssb/ssb.h>
#include "ssb_private.h"
diff --git a/drivers/ssb/driver_chipcommon.c b/drivers/ssb/driver_chipcommon.c
index 5d9c97c2479..e9d2ca11283 100644
--- a/drivers/ssb/driver_chipcommon.c
+++ b/drivers/ssb/driver_chipcommon.c
@@ -10,6 +10,7 @@
#include <linux/ssb/ssb.h>
#include <linux/ssb/ssb_regs.h>
+#include <linux/export.h>
#include <linux/pci.h>
#include "ssb_private.h"
diff --git a/drivers/ssb/driver_chipcommon_pmu.c b/drivers/ssb/driver_chipcommon_pmu.c
index 52901c14c68..e5a2e0e9bc1 100644
--- a/drivers/ssb/driver_chipcommon_pmu.c
+++ b/drivers/ssb/driver_chipcommon_pmu.c
@@ -12,6 +12,7 @@
#include <linux/ssb/ssb_regs.h>
#include <linux/ssb/ssb_driver_chipcommon.h>
#include <linux/delay.h>
+#include <linux/export.h>
#include "ssb_private.h"
diff --git a/drivers/ssb/driver_gige.c b/drivers/ssb/driver_gige.c
index 3adb98dad70..f30ea689933 100644
--- a/drivers/ssb/driver_gige.c
+++ b/drivers/ssb/driver_gige.c
@@ -10,6 +10,7 @@
#include <linux/ssb/ssb.h>
#include <linux/ssb/ssb_driver_gige.h>
+#include <linux/export.h>
#include <linux/pci.h>
#include <linux/pci_regs.h>
#include <linux/slab.h>
diff --git a/drivers/ssb/driver_pcicore.c b/drivers/ssb/driver_pcicore.c
index e6ac3177fbb..520e8286db2 100644
--- a/drivers/ssb/driver_pcicore.c
+++ b/drivers/ssb/driver_pcicore.c
@@ -10,6 +10,7 @@
#include <linux/ssb/ssb.h>
#include <linux/pci.h>
+#include <linux/export.h>
#include <linux/delay.h>
#include <linux/ssb/ssb_embedded.h>
@@ -516,10 +517,14 @@ static void ssb_pcicore_pcie_setup_workarounds(struct ssb_pcicore *pc)
static void __devinit ssb_pcicore_init_clientmode(struct ssb_pcicore *pc)
{
- ssb_pcicore_fix_sprom_core_index(pc);
+ struct ssb_device *pdev = pc->dev;
+ struct ssb_bus *bus = pdev->bus;
+
+ if (bus->bustype == SSB_BUSTYPE_PCI)
+ ssb_pcicore_fix_sprom_core_index(pc);
/* Disable PCI interrupts. */
- ssb_write32(pc->dev, SSB_INTVEC, 0);
+ ssb_write32(pdev, SSB_INTVEC, 0);
/* Additional PCIe always once-executed workarounds */
if (pc->dev->id.coreid == SSB_DEV_PCIE) {
diff --git a/drivers/ssb/embedded.c b/drivers/ssb/embedded.c
index eec3e267be4..9ef124f9ee2 100644
--- a/drivers/ssb/embedded.c
+++ b/drivers/ssb/embedded.c
@@ -8,6 +8,7 @@
* Licensed under the GNU/GPL. See COPYING for details.
*/
+#include <linux/export.h>
#include <linux/ssb/ssb.h>
#include <linux/ssb/ssb_embedded.h>
#include <linux/ssb/ssb_driver_pci.h>
diff --git a/drivers/ssb/main.c b/drivers/ssb/main.c
index d0cbdb0cf9d..bb6317fb925 100644
--- a/drivers/ssb/main.c
+++ b/drivers/ssb/main.c
@@ -12,6 +12,7 @@
#include <linux/delay.h>
#include <linux/io.h>
+#include <linux/module.h>
#include <linux/ssb/ssb.h>
#include <linux/ssb/ssb_regs.h>
#include <linux/ssb/ssb_driver_gige.h>
diff --git a/drivers/ssb/pci.c b/drivers/ssb/pci.c
index 34c3bab90b9..973223f5de8 100644
--- a/drivers/ssb/pci.c
+++ b/drivers/ssb/pci.c
@@ -607,6 +607,29 @@ static void sprom_extract_r8(struct ssb_sprom *out, const u16 *in)
memcpy(&out->antenna_gain.ghz5, &out->antenna_gain.ghz24,
sizeof(out->antenna_gain.ghz5));
+ /* Extract FEM info */
+ SPEX(fem.ghz2.tssipos, SSB_SPROM8_FEM2G,
+ SSB_SROM8_FEM_TSSIPOS, SSB_SROM8_FEM_TSSIPOS_SHIFT);
+ SPEX(fem.ghz2.extpa_gain, SSB_SPROM8_FEM2G,
+ SSB_SROM8_FEM_EXTPA_GAIN, SSB_SROM8_FEM_EXTPA_GAIN_SHIFT);
+ SPEX(fem.ghz2.pdet_range, SSB_SPROM8_FEM2G,
+ SSB_SROM8_FEM_PDET_RANGE, SSB_SROM8_FEM_PDET_RANGE_SHIFT);
+ SPEX(fem.ghz2.tr_iso, SSB_SPROM8_FEM2G,
+ SSB_SROM8_FEM_TR_ISO, SSB_SROM8_FEM_TR_ISO_SHIFT);
+ SPEX(fem.ghz2.antswlut, SSB_SPROM8_FEM2G,
+ SSB_SROM8_FEM_ANTSWLUT, SSB_SROM8_FEM_ANTSWLUT_SHIFT);
+
+ SPEX(fem.ghz5.tssipos, SSB_SPROM8_FEM5G,
+ SSB_SROM8_FEM_TSSIPOS, SSB_SROM8_FEM_TSSIPOS_SHIFT);
+ SPEX(fem.ghz5.extpa_gain, SSB_SPROM8_FEM5G,
+ SSB_SROM8_FEM_EXTPA_GAIN, SSB_SROM8_FEM_EXTPA_GAIN_SHIFT);
+ SPEX(fem.ghz5.pdet_range, SSB_SPROM8_FEM5G,
+ SSB_SROM8_FEM_PDET_RANGE, SSB_SROM8_FEM_PDET_RANGE_SHIFT);
+ SPEX(fem.ghz5.tr_iso, SSB_SPROM8_FEM5G,
+ SSB_SROM8_FEM_TR_ISO, SSB_SROM8_FEM_TR_ISO_SHIFT);
+ SPEX(fem.ghz5.antswlut, SSB_SPROM8_FEM5G,
+ SSB_SROM8_FEM_ANTSWLUT, SSB_SROM8_FEM_ANTSWLUT_SHIFT);
+
sprom_extract_r458(out, in);
/* TODO - get remaining rev 8 stuff needed */
diff --git a/drivers/ssb/pcihost_wrapper.c b/drivers/ssb/pcihost_wrapper.c
index 116a8116984..af5448f5e2d 100644
--- a/drivers/ssb/pcihost_wrapper.c
+++ b/drivers/ssb/pcihost_wrapper.c
@@ -12,6 +12,7 @@
*/
#include <linux/pci.h>
+#include <linux/export.h>
#include <linux/slab.h>
#include <linux/ssb/ssb.h>
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 89e50398dba..25cdff36a78 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -30,14 +30,6 @@ source "drivers/staging/et131x/Kconfig"
source "drivers/staging/slicoss/Kconfig"
-source "drivers/staging/go7007/Kconfig"
-
-source "drivers/staging/cx25821/Kconfig"
-
-source "drivers/staging/tm6000/Kconfig"
-
-source "drivers/staging/cxd2099/Kconfig"
-
source "drivers/staging/usbip/Kconfig"
source "drivers/staging/winbond/Kconfig"
@@ -74,8 +66,6 @@ source "drivers/staging/phison/Kconfig"
source "drivers/staging/line6/Kconfig"
-source "drivers/gpu/drm/vmwgfx/Kconfig"
-
source "drivers/gpu/drm/nouveau/Kconfig"
source "drivers/staging/octeon/Kconfig"
@@ -108,20 +98,12 @@ source "drivers/staging/wlags49_h25/Kconfig"
source "drivers/staging/sm7xx/Kconfig"
-source "drivers/staging/dt3155v4l/Kconfig"
-
source "drivers/staging/crystalhd/Kconfig"
source "drivers/staging/cxt1e1/Kconfig"
source "drivers/staging/xgifb/Kconfig"
-source "drivers/staging/lirc/Kconfig"
-
-source "drivers/staging/easycap/Kconfig"
-
-source "drivers/staging/solo6x10/Kconfig"
-
source "drivers/staging/tidspbridge/Kconfig"
source "drivers/staging/quickstart/Kconfig"
@@ -144,10 +126,10 @@ source "drivers/staging/ste_rmi4/Kconfig"
source "drivers/staging/gma500/Kconfig"
-source "drivers/staging/altera-stapl/Kconfig"
-
source "drivers/staging/mei/Kconfig"
source "drivers/staging/nvec/Kconfig"
+source "drivers/staging/media/Kconfig"
+
endif # STAGING
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index d7a5a04d0a2..a25f3f26c7f 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -4,13 +4,9 @@
obj-$(CONFIG_STAGING) += staging.o
obj-y += serial/
+obj-y += media/
obj-$(CONFIG_ET131X) += et131x/
obj-$(CONFIG_SLICOSS) += slicoss/
-obj-$(CONFIG_VIDEO_GO7007) += go7007/
-obj-$(CONFIG_VIDEO_CX25821) += cx25821/
-obj-$(CONFIG_VIDEO_TM6000) += tm6000/
-obj-$(CONFIG_DVB_CXD2099) += cxd2099/
-obj-$(CONFIG_LIRC_STAGING) += lirc/
obj-$(CONFIG_USBIP_CORE) += usbip/
obj-$(CONFIG_W35UND) += winbond/
obj-$(CONFIG_PRISM2_USB) += wlan-ng/
@@ -45,12 +41,9 @@ obj-$(CONFIG_ZCACHE) += zcache/
obj-$(CONFIG_WLAGS49_H2) += wlags49_h2/
obj-$(CONFIG_WLAGS49_H25) += wlags49_h25/
obj-$(CONFIG_FB_SM7XX) += sm7xx/
-obj-$(CONFIG_VIDEO_DT3155) += dt3155v4l/
obj-$(CONFIG_CRYSTALHD) += crystalhd/
obj-$(CONFIG_CXT1E1) += cxt1e1/
obj-$(CONFIG_FB_XGI) += xgifb/
-obj-$(CONFIG_EASYCAP) += easycap/
-obj-$(CONFIG_SOLO6X10) += solo6x10/
obj-$(CONFIG_TIDSPBRIDGE) += tidspbridge/
obj-$(CONFIG_ACPI_QUICKSTART) += quickstart/
obj-$(CONFIG_SBE_2T3E3) += sbe-2t3e3/
@@ -59,7 +52,6 @@ obj-$(CONFIG_BCM_WIMAX) += bcm/
obj-$(CONFIG_FT1000) += ft1000/
obj-$(CONFIG_SND_INTEL_SST) += intel_sst/
obj-$(CONFIG_SPEAKUP) += speakup/
-obj-$(CONFIG_ALTERA_STAPL) +=altera-stapl/
obj-$(CONFIG_TOUCHSCREEN_CLEARPAD_TM1217) += cptm1217/
obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4) += ste_rmi4/
obj-$(CONFIG_DRM_PSB) += gma500/
diff --git a/drivers/staging/altera-stapl/Makefile b/drivers/staging/altera-stapl/Makefile
deleted file mode 100644
index ddeede3c4b9..00000000000
--- a/drivers/staging/altera-stapl/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-altera-stapl-y := altera-lpt.o altera-jtag.o altera-comp.o altera.o
-
-obj-$(CONFIG_ALTERA_STAPL) += altera-stapl.o
diff --git a/drivers/staging/altera-stapl/altera.h b/drivers/staging/altera-stapl/altera.h
deleted file mode 100644
index 94c0c6181da..00000000000
--- a/drivers/staging/altera-stapl/altera.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * altera.h
- *
- * altera FPGA driver
- *
- * Copyright (C) Altera Corporation 1998-2001
- * Copyright (C) 2010 NetUP Inc.
- * Copyright (C) 2010 Igor M. Liplianin <liplianin@netup.ru>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- *
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#ifndef _ALTERA_H_
-#define _ALTERA_H_
-
-struct altera_config {
- void *dev;
- u8 *action;
- int (*jtag_io) (void *dev, int tms, int tdi, int tdo);
-};
-
-#if defined(CONFIG_ALTERA_STAPL) || \
- (defined(CONFIG_ALTERA_STAPL_MODULE) && defined(MODULE))
-
-extern int altera_init(struct altera_config *config, const struct firmware *fw);
-#else
-
-static inline int altera_init(struct altera_config *config,
- const struct firmware *fw)
-{
- printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
- return 0;
-}
-#endif /* CONFIG_ALTERA_STAPL */
-
-#endif /* _ALTERA_H_ */
diff --git a/drivers/staging/cx25821/README b/drivers/staging/cx25821/README
deleted file mode 100644
index a9ba50b9888..00000000000
--- a/drivers/staging/cx25821/README
+++ /dev/null
@@ -1,6 +0,0 @@
-Todo:
- - checkpatch.pl cleanups
- - sparse cleanups
-
-Please send patches to linux-media@vger.kernel.org
-
diff --git a/drivers/staging/cxt1e1/linux.c b/drivers/staging/cxt1e1/linux.c
index 24e009c0149..911c0e4375f 100644
--- a/drivers/staging/cxt1e1/linux.c
+++ b/drivers/staging/cxt1e1/linux.c
@@ -16,6 +16,7 @@
#include <linux/types.h>
#include <linux/netdevice.h>
+#include <linux/module.h>
#include <linux/hdlc.h>
#include <linux/if_arp.h>
#include <linux/init.h>
diff --git a/drivers/staging/gma500/intel_i2c.c b/drivers/staging/gma500/intel_i2c.c
index e33432df510..51cbf65268e 100644
--- a/drivers/staging/gma500/intel_i2c.c
+++ b/drivers/staging/gma500/intel_i2c.c
@@ -20,6 +20,7 @@
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
+#include <linux/export.h>
#include "psb_drv.h"
#include "psb_intel_reg.h"
diff --git a/drivers/staging/gma500/mdfld_dsi_output.c b/drivers/staging/gma500/mdfld_dsi_output.c
index 9050c0f78b1..3f979db2c3a 100644
--- a/drivers/staging/gma500/mdfld_dsi_output.c
+++ b/drivers/staging/gma500/mdfld_dsi_output.c
@@ -32,6 +32,7 @@
#include <asm/intel_scu_ipc.h>
#include "mdfld_dsi_pkg_sender.h"
#include <linux/pm_runtime.h>
+#include <linux/moduleparam.h>
#define MDFLD_DSI_BRIGHTNESS_MAX_LEVEL 100
diff --git a/drivers/staging/gma500/mdfld_output.c b/drivers/staging/gma500/mdfld_output.c
index ee55f87ba1f..eabf53d58f9 100644
--- a/drivers/staging/gma500/mdfld_output.c
+++ b/drivers/staging/gma500/mdfld_output.c
@@ -26,6 +26,7 @@
*/
#include <linux/init.h>
+#include <linux/moduleparam.h>
#include "mdfld_dsi_dbi.h"
#include "mdfld_dsi_dpi.h"
#include "mdfld_dsi_output.h"
@@ -167,4 +168,4 @@ void mdfld_output_setup(struct drm_device *dev)
else
mdfld_dbi_dsr_init(dev);
}
-} \ No newline at end of file
+}
diff --git a/drivers/staging/gma500/mid_bios.c b/drivers/staging/gma500/mid_bios.c
index 8cfe301f8fb..ee3c0368e32 100644
--- a/drivers/staging/gma500/mid_bios.c
+++ b/drivers/staging/gma500/mid_bios.c
@@ -23,6 +23,7 @@
* - Check ioremap failures
*/
+#include <linux/moduleparam.h>
#include <drm/drmP.h>
#include <drm/drm.h>
#include "psb_drm.h"
diff --git a/drivers/staging/gma500/mrst_hdmi_i2c.c b/drivers/staging/gma500/mrst_hdmi_i2c.c
index 351b9d897b9..36e7edc4d14 100644
--- a/drivers/staging/gma500/mrst_hdmi_i2c.c
+++ b/drivers/staging/gma500/mrst_hdmi_i2c.c
@@ -29,6 +29,7 @@
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
+#include <linux/export.h>
#include "psb_drv.h"
#define HDMI_READ(reg) readl(hdmi_dev->regs + (reg))
diff --git a/drivers/staging/gma500/psb_drv.c b/drivers/staging/gma500/psb_drv.c
index dc676c2ce81..986a04d16ba 100644
--- a/drivers/staging/gma500/psb_drv.c
+++ b/drivers/staging/gma500/psb_drv.c
@@ -35,6 +35,7 @@
#include <linux/notifier.h>
#include <linux/spinlock.h>
#include <linux/pm_runtime.h>
+#include <linux/module.h>
#include <acpi/video.h>
static int drm_psb_trap_pagefaults;
diff --git a/drivers/staging/hv/Makefile b/drivers/staging/hv/Makefile
index e071c12c8f6..0f55ceee919 100644
--- a/drivers/staging/hv/Makefile
+++ b/drivers/staging/hv/Makefile
@@ -1,4 +1,3 @@
-obj-$(CONFIG_HYPERV) += hv_timesource.o
obj-$(CONFIG_HYPERV_STORAGE) += hv_storvsc.o
obj-$(CONFIG_HYPERV_NET) += hv_netvsc.o
obj-$(CONFIG_HYPERV_MOUSE) += hv_mouse.o
diff --git a/drivers/staging/hv/hv_timesource.c b/drivers/staging/hv/hv_timesource.c
deleted file mode 100644
index 2b0f9aaf912..00000000000
--- a/drivers/staging/hv/hv_timesource.c
+++ /dev/null
@@ -1,101 +0,0 @@
-/*
- * A clocksource for Linux running on HyperV.
- *
- *
- * Copyright (C) 2010, Novell, Inc.
- * Author : K. Y. Srinivasan <ksrinivasan@novell.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- */
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/clocksource.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/dmi.h>
-#include <asm/hyperv.h>
-#include <asm/mshyperv.h>
-#include <asm/hypervisor.h>
-
-#define HV_CLOCK_SHIFT 22
-
-static cycle_t read_hv_clock(struct clocksource *arg)
-{
- cycle_t current_tick;
- /*
- * Read the partition counter to get the current tick count. This count
- * is set to 0 when the partition is created and is incremented in
- * 100 nanosecond units.
- */
- rdmsrl(HV_X64_MSR_TIME_REF_COUNT, current_tick);
- return current_tick;
-}
-
-static struct clocksource hyperv_cs = {
- .name = "hyperv_clocksource",
- .rating = 400, /* use this when running on Hyperv*/
- .read = read_hv_clock,
- .mask = CLOCKSOURCE_MASK(64),
- /*
- * The time ref counter in HyperV is in 100ns units.
- * The definition of mult is:
- * mult/2^shift = ns/cyc = 100
- * mult = (100 << shift)
- */
- .mult = (100 << HV_CLOCK_SHIFT),
- .shift = HV_CLOCK_SHIFT,
-};
-
-static const struct dmi_system_id __initconst
-hv_timesource_dmi_table[] __maybe_unused = {
- {
- .ident = "Hyper-V",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Virtual Machine"),
- DMI_MATCH(DMI_BOARD_NAME, "Virtual Machine"),
- },
- },
- { },
-};
-MODULE_DEVICE_TABLE(dmi, hv_timesource_dmi_table);
-
-static const struct pci_device_id __initconst
-hv_timesource_pci_table[] __maybe_unused = {
- { PCI_DEVICE(0x1414, 0x5353) }, /* VGA compatible controller */
- { 0 }
-};
-MODULE_DEVICE_TABLE(pci, hv_timesource_pci_table);
-
-
-static int __init init_hv_clocksource(void)
-{
- if ((x86_hyper != &x86_hyper_ms_hyperv) ||
- !(ms_hyperv.features & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE))
- return -ENODEV;
-
- if (!dmi_check_system(hv_timesource_dmi_table))
- return -ENODEV;
-
- pr_info("Registering HyperV clock source\n");
- return clocksource_register(&hyperv_cs);
-}
-
-module_init(init_hv_clocksource);
-MODULE_DESCRIPTION("HyperV based clocksource");
-MODULE_AUTHOR("K. Y. Srinivasan <ksrinivasan@novell.com>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/iio/accel/adis16201_ring.c b/drivers/staging/iio/accel/adis16201_ring.c
index dbd883294d6..0016ed378e3 100644
--- a/drivers/staging/iio/accel/adis16201_ring.c
+++ b/drivers/staging/iio/accel/adis16201_ring.c
@@ -1,3 +1,4 @@
+#include <linux/export.h>
#include <linux/interrupt.h>
#include <linux/mutex.h>
#include <linux/kernel.h>
diff --git a/drivers/staging/iio/accel/adis16201_trigger.c b/drivers/staging/iio/accel/adis16201_trigger.c
index f448258884c..bce505e716d 100644
--- a/drivers/staging/iio/accel/adis16201_trigger.c
+++ b/drivers/staging/iio/accel/adis16201_trigger.c
@@ -1,6 +1,7 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/spi/spi.h>
+#include <linux/export.h>
#include "../iio.h"
#include "../trigger.h"
diff --git a/drivers/staging/iio/accel/adis16203_ring.c b/drivers/staging/iio/accel/adis16203_ring.c
index 838d3012c87..1fdfe6f6ac6 100644
--- a/drivers/staging/iio/accel/adis16203_ring.c
+++ b/drivers/staging/iio/accel/adis16203_ring.c
@@ -1,3 +1,4 @@
+#include <linux/export.h>
#include <linux/interrupt.h>
#include <linux/mutex.h>
#include <linux/kernel.h>
diff --git a/drivers/staging/iio/accel/adis16203_trigger.c b/drivers/staging/iio/accel/adis16203_trigger.c
index 50165f9ddc5..24bcb8e15c5 100644
--- a/drivers/staging/iio/accel/adis16203_trigger.c
+++ b/drivers/staging/iio/accel/adis16203_trigger.c
@@ -1,6 +1,7 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/spi/spi.h>
+#include <linux/export.h>
#include "../iio.h"
#include "../trigger.h"
diff --git a/drivers/staging/iio/accel/adis16204_ring.c b/drivers/staging/iio/accel/adis16204_ring.c
index 08551bb48f1..6fd3d8f51f2 100644
--- a/drivers/staging/iio/accel/adis16204_ring.c
+++ b/drivers/staging/iio/accel/adis16204_ring.c
@@ -1,3 +1,4 @@
+#include <linux/export.h>
#include <linux/interrupt.h>
#include <linux/mutex.h>
#include <linux/kernel.h>
diff --git a/drivers/staging/iio/accel/adis16204_trigger.c b/drivers/staging/iio/accel/adis16204_trigger.c
index 55b661c98d2..6e542af02c0 100644
--- a/drivers/staging/iio/accel/adis16204_trigger.c
+++ b/drivers/staging/iio/accel/adis16204_trigger.c
@@ -1,6 +1,7 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/spi/spi.h>
+#include <linux/export.h>
#include "../iio.h"
#include "../trigger.h"
diff --git a/drivers/staging/iio/accel/adis16209_ring.c b/drivers/staging/iio/accel/adis16209_ring.c
index bb66364bef0..d17e39d9545 100644
--- a/drivers/staging/iio/accel/adis16209_ring.c
+++ b/drivers/staging/iio/accel/adis16209_ring.c
@@ -1,3 +1,4 @@
+#include <linux/export.h>
#include <linux/interrupt.h>
#include <linux/mutex.h>
#include <linux/kernel.h>
diff --git a/drivers/staging/iio/accel/adis16209_trigger.c b/drivers/staging/iio/accel/adis16209_trigger.c
index 8df8a9791d5..c5d82c1a55d 100644
--- a/drivers/staging/iio/accel/adis16209_trigger.c
+++ b/drivers/staging/iio/accel/adis16209_trigger.c
@@ -1,6 +1,7 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/spi/spi.h>
+#include <linux/export.h>
#include "../iio.h"
#include "../trigger.h"
diff --git a/drivers/staging/iio/accel/adis16240_ring.c b/drivers/staging/iio/accel/adis16240_ring.c
index 34f1e7e6a56..b907ca3f4fd 100644
--- a/drivers/staging/iio/accel/adis16240_ring.c
+++ b/drivers/staging/iio/accel/adis16240_ring.c
@@ -1,3 +1,4 @@
+#include <linux/export.h>
#include <linux/interrupt.h>
#include <linux/mutex.h>
#include <linux/kernel.h>
diff --git a/drivers/staging/iio/accel/adis16240_trigger.c b/drivers/staging/iio/accel/adis16240_trigger.c
index 13f1d142eea..8e0ce568e64 100644
--- a/drivers/staging/iio/accel/adis16240_trigger.c
+++ b/drivers/staging/iio/accel/adis16240_trigger.c
@@ -1,6 +1,7 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/spi/spi.h>
+#include <linux/export.h>
#include "../iio.h"
#include "../trigger.h"
diff --git a/drivers/staging/iio/accel/lis3l02dq_ring.c b/drivers/staging/iio/accel/lis3l02dq_ring.c
index 5c542dd0461..89527af8f4c 100644
--- a/drivers/staging/iio/accel/lis3l02dq_ring.c
+++ b/drivers/staging/iio/accel/lis3l02dq_ring.c
@@ -4,6 +4,7 @@
#include <linux/kernel.h>
#include <linux/spi/spi.h>
#include <linux/slab.h>
+#include <linux/export.h>
#include "../iio.h"
#include "../ring_sw.h"
diff --git a/drivers/staging/iio/adc/ad7793.c b/drivers/staging/iio/adc/ad7793.c
index a831b92cd08..999f8f746cf 100644
--- a/drivers/staging/iio/adc/ad7793.c
+++ b/drivers/staging/iio/adc/ad7793.c
@@ -16,6 +16,7 @@
#include <linux/err.h>
#include <linux/sched.h>
#include <linux/delay.h>
+#include <linux/module.h>
#include "../iio.h"
#include "../sysfs.h"
diff --git a/drivers/staging/iio/dac/ad5686.c b/drivers/staging/iio/dac/ad5686.c
index 48389e1c19f..974c6f5b60c 100644
--- a/drivers/staging/iio/dac/ad5686.c
+++ b/drivers/staging/iio/dac/ad5686.c
@@ -15,6 +15,7 @@
#include <linux/slab.h>
#include <linux/sysfs.h>
#include <linux/regulator/consumer.h>
+#include <linux/module.h>
#include "../iio.h"
#include "../sysfs.h"
diff --git a/drivers/staging/iio/gyro/adis16060_core.c b/drivers/staging/iio/gyro/adis16060_core.c
index 38cf3f4bf72..ff1b5a82b3d 100644
--- a/drivers/staging/iio/gyro/adis16060_core.c
+++ b/drivers/staging/iio/gyro/adis16060_core.c
@@ -6,6 +6,7 @@
* Licensed under the GPL-2 or later.
*/
+#include <linux/module.h>
#include <linux/delay.h>
#include <linux/mutex.h>
#include <linux/device.h>
diff --git a/drivers/staging/iio/gyro/adis16260_ring.c b/drivers/staging/iio/gyro/adis16260_ring.c
index 679c1515571..52a9e784e7c 100644
--- a/drivers/staging/iio/gyro/adis16260_ring.c
+++ b/drivers/staging/iio/gyro/adis16260_ring.c
@@ -1,3 +1,4 @@
+#include <linux/export.h>
#include <linux/interrupt.h>
#include <linux/mutex.h>
#include <linux/kernel.h>
diff --git a/drivers/staging/iio/gyro/adis16260_trigger.c b/drivers/staging/iio/gyro/adis16260_trigger.c
index 2f2b2160f44..8299cd18d70 100644
--- a/drivers/staging/iio/gyro/adis16260_trigger.c
+++ b/drivers/staging/iio/gyro/adis16260_trigger.c
@@ -1,6 +1,7 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/spi/spi.h>
+#include <linux/export.h>
#include "../iio.h"
#include "../trigger.h"
diff --git a/drivers/staging/iio/iio_simple_dummy_buffer.c b/drivers/staging/iio/iio_simple_dummy_buffer.c
index f0b36d25414..edad0e7b4f4 100644
--- a/drivers/staging/iio/iio_simple_dummy_buffer.c
+++ b/drivers/staging/iio/iio_simple_dummy_buffer.c
@@ -12,6 +12,7 @@
*/
#include <linux/kernel.h>
+#include <linux/export.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
diff --git a/drivers/staging/iio/imu/adis16400_ring.c b/drivers/staging/iio/imu/adis16400_ring.c
index c3682458d78..fd886bf51a6 100644
--- a/drivers/staging/iio/imu/adis16400_ring.c
+++ b/drivers/staging/iio/imu/adis16400_ring.c
@@ -4,6 +4,7 @@
#include <linux/spi/spi.h>
#include <linux/slab.h>
#include <linux/bitops.h>
+#include <linux/export.h>
#include "../iio.h"
#include "../ring_sw.h"
diff --git a/drivers/staging/iio/imu/adis16400_trigger.c b/drivers/staging/iio/imu/adis16400_trigger.c
index bf991531e0d..5bf00075752 100644
--- a/drivers/staging/iio/imu/adis16400_trigger.c
+++ b/drivers/staging/iio/imu/adis16400_trigger.c
@@ -1,6 +1,7 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/spi/spi.h>
+#include <linux/export.h>
#include "../iio.h"
#include "../trigger.h"
diff --git a/drivers/staging/iio/industrialio-buffer.c b/drivers/staging/iio/industrialio-buffer.c
index 6dd5d7d629a..9df0ce81dad 100644
--- a/drivers/staging/iio/industrialio-buffer.c
+++ b/drivers/staging/iio/industrialio-buffer.c
@@ -14,6 +14,7 @@
* - Alternative access techniques?
*/
#include <linux/kernel.h>
+#include <linux/export.h>
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/cdev.h>
diff --git a/drivers/staging/iio/meter/ade7758_ring.c b/drivers/staging/iio/meter/ade7758_ring.c
index 99ade658a2d..00fa2ac5c45 100644
--- a/drivers/staging/iio/meter/ade7758_ring.c
+++ b/drivers/staging/iio/meter/ade7758_ring.c
@@ -5,6 +5,7 @@
*
* Licensed under the GPL-2.
*/
+#include <linux/export.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/spi/spi.h>
diff --git a/drivers/staging/iio/meter/ade7758_trigger.c b/drivers/staging/iio/meter/ade7758_trigger.c
index 392dfe30244..b6569c70665 100644
--- a/drivers/staging/iio/meter/ade7758_trigger.c
+++ b/drivers/staging/iio/meter/ade7758_trigger.c
@@ -9,6 +9,7 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/spi/spi.h>
+#include <linux/export.h>
#include "../iio.h"
#include "../trigger.h"
diff --git a/drivers/staging/iio/trigger.h b/drivers/staging/iio/trigger.h
index 598fcb3599f..5cc42a655c8 100644
--- a/drivers/staging/iio/trigger.h
+++ b/drivers/staging/iio/trigger.h
@@ -115,8 +115,7 @@ void iio_trigger_poll_chained(struct iio_trigger *trig, s64 time);
irqreturn_t iio_trigger_generic_data_rdy_poll(int irq, void *private);
-struct iio_trigger *iio_allocate_trigger(const char *fmt, ...)
- __attribute__((format(printf, 1, 2)));
+__printf(1, 2) struct iio_trigger *iio_allocate_trigger(const char *fmt, ...);
void iio_free_trigger(struct iio_trigger *trig);
#endif /* _IIO_TRIGGER_H_ */
diff --git a/drivers/staging/intel_sst/intel_sst.c b/drivers/staging/intel_sst/intel_sst.c
index c303d85011b..ff9aaec0557 100644
--- a/drivers/staging/intel_sst/intel_sst.c
+++ b/drivers/staging/intel_sst/intel_sst.c
@@ -37,6 +37,7 @@
#include <linux/firmware.h>
#include <linux/miscdevice.h>
#include <linux/pm_runtime.h>
+#include <linux/module.h>
#include <asm/mrst.h>
#include "intel_sst.h"
#include "intel_sst_ioctl.h"
diff --git a/drivers/staging/intel_sst/intel_sst_drv_interface.c b/drivers/staging/intel_sst/intel_sst_drv_interface.c
index 69daa1404b6..22bd29c0c43 100644
--- a/drivers/staging/intel_sst/intel_sst_drv_interface.c
+++ b/drivers/staging/intel_sst/intel_sst_drv_interface.c
@@ -33,6 +33,7 @@
#include <linux/fs.h>
#include <linux/firmware.h>
#include <linux/pm_runtime.h>
+#include <linux/export.h>
#include "intel_sst.h"
#include "intel_sst_ioctl.h"
#include "intel_sst_fw_ipc.h"
diff --git a/drivers/staging/line6/audio.c b/drivers/staging/line6/audio.c
index 61db1f99b0c..8e7398393a5 100644
--- a/drivers/staging/line6/audio.c
+++ b/drivers/staging/line6/audio.c
@@ -11,6 +11,7 @@
#include <sound/core.h>
#include <sound/initval.h>
+#include <linux/export.h>
#include "driver.h"
#include "audio.h"
diff --git a/drivers/staging/media/Kconfig b/drivers/staging/media/Kconfig
new file mode 100644
index 00000000000..7e5caa39ed3
--- /dev/null
+++ b/drivers/staging/media/Kconfig
@@ -0,0 +1,37 @@
+menuconfig STAGING_MEDIA
+ bool "Media staging drivers"
+ default n
+ ---help---
+ This option allows you to select a number of media drivers that
+ don't have the "normal" Linux kernel quality level.
+ Most of them don't follow properly the V4L, DVB and/or RC API's,
+ so, they won't likely work fine with the existing applications.
+ That also means that, one fixed, their API's will change to match
+ the existing ones.
+
+ If you wish to work on these drivers, to help improve them, or
+ to report problems you have with them, please use the
+ linux-media@vger.kernel.org mailing list.
+
+ If in doubt, say N here.
+
+
+if STAGING_MEDIA
+
+# Please keep them in alphabetic order
+source "drivers/staging/media/as102/Kconfig"
+
+source "drivers/staging/media/cxd2099/Kconfig"
+
+source "drivers/staging/media/dt3155v4l/Kconfig"
+
+source "drivers/staging/media/easycap/Kconfig"
+
+source "drivers/staging/media/go7007/Kconfig"
+
+source "drivers/staging/media/solo6x10/Kconfig"
+
+# Keep LIRC at the end, as it has sub-menus
+source "drivers/staging/media/lirc/Kconfig"
+
+endif
diff --git a/drivers/staging/media/Makefile b/drivers/staging/media/Makefile
new file mode 100644
index 00000000000..c69124cdb0d
--- /dev/null
+++ b/drivers/staging/media/Makefile
@@ -0,0 +1,7 @@
+obj-$(CONFIG_DVB_AS102) += as102/
+obj-$(CONFIG_DVB_CXD2099) += cxd2099/
+obj-$(CONFIG_EASYCAP) += easycap/
+obj-$(CONFIG_LIRC_STAGING) += lirc/
+obj-$(CONFIG_SOLO6X10) += solo6x10/
+obj-$(CONFIG_VIDEO_DT3155) += dt3155v4l/
+obj-$(CONFIG_VIDEO_GO7007) += go7007/
diff --git a/drivers/staging/media/as102/Kconfig b/drivers/staging/media/as102/Kconfig
new file mode 100644
index 00000000000..5865029db0f
--- /dev/null
+++ b/drivers/staging/media/as102/Kconfig
@@ -0,0 +1,7 @@
+config DVB_AS102
+ tristate "Abilis AS102 DVB receiver"
+ depends on DVB_CORE && USB && I2C && INPUT
+ help
+ Choose Y or M here if you have a device containing an AS102
+
+ To compile this driver as a module, choose M here
diff --git a/drivers/staging/media/as102/Makefile b/drivers/staging/media/as102/Makefile
new file mode 100644
index 00000000000..e7dbb6f814d
--- /dev/null
+++ b/drivers/staging/media/as102/Makefile
@@ -0,0 +1,6 @@
+dvb-as102-objs := as102_drv.o as102_fw.o as10x_cmd.o as10x_cmd_stream.o \
+ as102_fe.o as102_usb_drv.o as10x_cmd_cfg.o
+
+obj-$(CONFIG_DVB_AS102) += dvb-as102.o
+
+EXTRA_CFLAGS += -DCONFIG_AS102_USB -Idrivers/media/dvb/dvb-core
diff --git a/drivers/staging/media/as102/as102_drv.c b/drivers/staging/media/as102/as102_drv.c
new file mode 100644
index 00000000000..d335c7d6fa0
--- /dev/null
+++ b/drivers/staging/media/as102/as102_drv.c
@@ -0,0 +1,351 @@
+/*
+ * Abilis Systems Single DVB-T Receiver
+ * Copyright (C) 2008 Pierrick Hascoet <pierrick.hascoet@abilis.com>
+ * Copyright (C) 2010 Devin Heitmueller <dheitmueller@kernellabs.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/kref.h>
+#include <asm/uaccess.h>
+#include <linux/usb.h>
+
+/* header file for Usb device driver*/
+#include "as102_drv.h"
+#include "as102_fw.h"
+#include "dvbdev.h"
+
+int debug;
+module_param_named(debug, debug, int, 0644);
+MODULE_PARM_DESC(debug, "Turn on/off debugging (default: off)");
+
+int dual_tuner;
+module_param_named(dual_tuner, dual_tuner, int, 0644);
+MODULE_PARM_DESC(dual_tuner, "Activate Dual-Tuner config (default: off)");
+
+static int fw_upload = 1;
+module_param_named(fw_upload, fw_upload, int, 0644);
+MODULE_PARM_DESC(fw_upload, "Turn on/off default FW upload (default: on)");
+
+static int pid_filtering;
+module_param_named(pid_filtering, pid_filtering, int, 0644);
+MODULE_PARM_DESC(pid_filtering, "Activate HW PID filtering (default: off)");
+
+static int ts_auto_disable;
+module_param_named(ts_auto_disable, ts_auto_disable, int, 0644);
+MODULE_PARM_DESC(ts_auto_disable, "Stream Auto Enable on FW (default: off)");
+
+int elna_enable = 1;
+module_param_named(elna_enable, elna_enable, int, 0644);
+MODULE_PARM_DESC(elna_enable, "Activate eLNA (default: on)");
+
+#ifdef DVB_DEFINE_MOD_OPT_ADAPTER_NR
+DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
+#endif
+
+static void as102_stop_stream(struct as102_dev_t *dev)
+{
+ struct as102_bus_adapter_t *bus_adap;
+
+ if (dev != NULL)
+ bus_adap = &dev->bus_adap;
+ else
+ return;
+
+ if (bus_adap->ops->stop_stream != NULL)
+ bus_adap->ops->stop_stream(dev);
+
+ if (ts_auto_disable) {
+ if (mutex_lock_interruptible(&dev->bus_adap.lock))
+ return;
+
+ if (as10x_cmd_stop_streaming(bus_adap) < 0)
+ dprintk(debug, "as10x_cmd_stop_streaming failed\n");
+
+ mutex_unlock(&dev->bus_adap.lock);
+ }
+}
+
+static int as102_start_stream(struct as102_dev_t *dev)
+{
+ struct as102_bus_adapter_t *bus_adap;
+ int ret = -EFAULT;
+
+ if (dev != NULL)
+ bus_adap = &dev->bus_adap;
+ else
+ return ret;
+
+ if (bus_adap->ops->start_stream != NULL)
+ ret = bus_adap->ops->start_stream(dev);
+
+ if (ts_auto_disable) {
+ if (mutex_lock_interruptible(&dev->bus_adap.lock))
+ return -EFAULT;
+
+ ret = as10x_cmd_start_streaming(bus_adap);
+
+ mutex_unlock(&dev->bus_adap.lock);
+ }
+
+ return ret;
+}
+
+static int as10x_pid_filter(struct as102_dev_t *dev,
+ int index, u16 pid, int onoff) {
+
+ struct as102_bus_adapter_t *bus_adap = &dev->bus_adap;
+ int ret = -EFAULT;
+
+ ENTER();
+
+ if (mutex_lock_interruptible(&dev->bus_adap.lock)) {
+ dprintk(debug, "mutex_lock_interruptible(lock) failed !\n");
+ return -EBUSY;
+ }
+
+ switch (onoff) {
+ case 0:
+ ret = as10x_cmd_del_PID_filter(bus_adap, (uint16_t) pid);
+ dprintk(debug, "DEL_PID_FILTER([%02d] 0x%04x) ret = %d\n",
+ index, pid, ret);
+ break;
+ case 1:
+ {
+ struct as10x_ts_filter filter;
+
+ filter.type = TS_PID_TYPE_TS;
+ filter.idx = 0xFF;
+ filter.pid = pid;
+
+ ret = as10x_cmd_add_PID_filter(bus_adap, &filter);
+ dprintk(debug, "ADD_PID_FILTER([%02d -> %02d], 0x%04x) ret = %d\n",
+ index, filter.idx, filter.pid, ret);
+ break;
+ }
+ }
+
+ mutex_unlock(&dev->bus_adap.lock);
+
+ LEAVE();
+ return ret;
+}
+
+static int as102_dvb_dmx_start_feed(struct dvb_demux_feed *dvbdmxfeed)
+{
+ int ret = 0;
+ struct dvb_demux *demux = dvbdmxfeed->demux;
+ struct as102_dev_t *as102_dev = demux->priv;
+
+ ENTER();
+
+ if (mutex_lock_interruptible(&as102_dev->sem))
+ return -ERESTARTSYS;
+
+ if (pid_filtering) {
+ as10x_pid_filter(as102_dev,
+ dvbdmxfeed->index, dvbdmxfeed->pid, 1);
+ }
+
+ if (as102_dev->streaming++ == 0)
+ ret = as102_start_stream(as102_dev);
+
+ mutex_unlock(&as102_dev->sem);
+ LEAVE();
+ return ret;
+}
+
+static int as102_dvb_dmx_stop_feed(struct dvb_demux_feed *dvbdmxfeed)
+{
+ struct dvb_demux *demux = dvbdmxfeed->demux;
+ struct as102_dev_t *as102_dev = demux->priv;
+
+ ENTER();
+
+ if (mutex_lock_interruptible(&as102_dev->sem))
+ return -ERESTARTSYS;
+
+ if (--as102_dev->streaming == 0)
+ as102_stop_stream(as102_dev);
+
+ if (pid_filtering) {
+ as10x_pid_filter(as102_dev,
+ dvbdmxfeed->index, dvbdmxfeed->pid, 0);
+ }
+
+ mutex_unlock(&as102_dev->sem);
+ LEAVE();
+ return 0;
+}
+
+int as102_dvb_register(struct as102_dev_t *as102_dev)
+{
+ int ret = 0;
+ ENTER();
+
+ ret = dvb_register_adapter(&as102_dev->dvb_adap,
+ as102_dev->name,
+ THIS_MODULE,
+#if defined(CONFIG_AS102_USB)
+ &as102_dev->bus_adap.usb_dev->dev
+#elif defined(CONFIG_AS102_SPI)
+ &as102_dev->bus_adap.spi_dev->dev
+#else
+#error >>> dvb_register_adapter <<<
+#endif
+#ifdef DVB_DEFINE_MOD_OPT_ADAPTER_NR
+ , adapter_nr
+#endif
+ );
+ if (ret < 0) {
+ err("%s: dvb_register_adapter() failed (errno = %d)",
+ __func__, ret);
+ goto failed;
+ }
+
+ as102_dev->dvb_dmx.priv = as102_dev;
+ as102_dev->dvb_dmx.filternum = pid_filtering ? 16 : 256;
+ as102_dev->dvb_dmx.feednum = 256;
+ as102_dev->dvb_dmx.start_feed = as102_dvb_dmx_start_feed;
+ as102_dev->dvb_dmx.stop_feed = as102_dvb_dmx_stop_feed;
+
+ as102_dev->dvb_dmx.dmx.capabilities = DMX_TS_FILTERING |
+ DMX_SECTION_FILTERING;
+
+ as102_dev->dvb_dmxdev.filternum = as102_dev->dvb_dmx.filternum;
+ as102_dev->dvb_dmxdev.demux = &as102_dev->dvb_dmx.dmx;
+ as102_dev->dvb_dmxdev.capabilities = 0;
+
+ ret = dvb_dmx_init(&as102_dev->dvb_dmx);
+ if (ret < 0) {
+ err("%s: dvb_dmx_init() failed (errno = %d)", __func__, ret);
+ goto failed;
+ }
+
+ ret = dvb_dmxdev_init(&as102_dev->dvb_dmxdev, &as102_dev->dvb_adap);
+ if (ret < 0) {
+ err("%s: dvb_dmxdev_init() failed (errno = %d)", __func__,
+ ret);
+ goto failed;
+ }
+
+ ret = as102_dvb_register_fe(as102_dev, &as102_dev->dvb_fe);
+ if (ret < 0) {
+ err("%s: as102_dvb_register_frontend() failed (errno = %d)",
+ __func__, ret);
+ goto failed;
+ }
+
+ /* init bus mutex for token locking */
+ mutex_init(&as102_dev->bus_adap.lock);
+
+ /* init start / stop stream mutex */
+ mutex_init(&as102_dev->sem);
+
+#if defined(CONFIG_FW_LOADER) || defined(CONFIG_FW_LOADER_MODULE)
+ /*
+ * try to load as102 firmware. If firmware upload failed, we'll be
+ * able to upload it later.
+ */
+ if (fw_upload)
+ try_then_request_module(as102_fw_upload(&as102_dev->bus_adap),
+ "firmware_class");
+#endif
+
+failed:
+ LEAVE();
+ /* FIXME: free dvb_XXX */
+ return ret;
+}
+
+void as102_dvb_unregister(struct as102_dev_t *as102_dev)
+{
+ ENTER();
+
+ /* unregister as102 frontend */
+ as102_dvb_unregister_fe(&as102_dev->dvb_fe);
+
+ /* unregister demux device */
+ dvb_dmxdev_release(&as102_dev->dvb_dmxdev);
+ dvb_dmx_release(&as102_dev->dvb_dmx);
+
+ /* unregister dvb adapter */
+ dvb_unregister_adapter(&as102_dev->dvb_adap);
+
+ LEAVE();
+}
+
+static int __init as102_driver_init(void)
+{
+ int ret = 0;
+
+ ENTER();
+
+ /* register this driver with the low level subsystem */
+#if defined(CONFIG_AS102_USB)
+ ret = usb_register(&as102_usb_driver);
+ if (ret)
+ err("usb_register failed (ret = %d)", ret);
+#endif
+#if defined(CONFIG_AS102_SPI)
+ ret = spi_register_driver(&as102_spi_driver);
+ if (ret)
+ printk(KERN_ERR "spi_register failed (ret = %d)", ret);
+#endif
+
+ LEAVE();
+ return ret;
+}
+
+/*
+ * Mandatory function : Adds a special section to the module indicating
+ * where initialisation function is defined
+ */
+module_init(as102_driver_init);
+
+/**
+ * as102_driver_exit - as102 driver exit point
+ *
+ * This function is called when device has to be removed.
+ */
+static void __exit as102_driver_exit(void)
+{
+ ENTER();
+ /* deregister this driver with the low level bus subsystem */
+#if defined(CONFIG_AS102_USB)
+ usb_deregister(&as102_usb_driver);
+#endif
+#if defined(CONFIG_AS102_SPI)
+ spi_unregister_driver(&as102_spi_driver);
+#endif
+ LEAVE();
+}
+
+/*
+ * required function for unload: Adds a special section to the module
+ * indicating where unload function is defined
+ */
+module_exit(as102_driver_exit);
+/* modinfo details */
+MODULE_DESCRIPTION(DRIVER_FULL_NAME);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Pierrick Hascoet <pierrick.hascoet@abilis.com>");
+
+/* EOF - vim: set textwidth=80 ts=8 sw=8 sts=8 noet: */
diff --git a/drivers/staging/media/as102/as102_drv.h b/drivers/staging/media/as102/as102_drv.h
new file mode 100644
index 00000000000..bcda635b5a9
--- /dev/null
+++ b/drivers/staging/media/as102/as102_drv.h
@@ -0,0 +1,141 @@
+/*
+ * Abilis Systems Single DVB-T Receiver
+ * Copyright (C) 2008 Pierrick Hascoet <pierrick.hascoet@abilis.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#if defined(CONFIG_AS102_USB)
+#include <linux/usb.h>
+extern struct usb_driver as102_usb_driver;
+#endif
+
+#if defined(CONFIG_AS102_SPI)
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/cdev.h>
+
+extern struct spi_driver as102_spi_driver;
+#endif
+
+#include "dvb_demux.h"
+#include "dvb_frontend.h"
+#include "dmxdev.h"
+
+#define DRIVER_FULL_NAME "Abilis Systems as10x usb driver"
+#define DRIVER_NAME "as10x_usb"
+
+extern int debug;
+
+#define dprintk(debug, args...) \
+ do { if (debug) { \
+ printk(KERN_DEBUG "%s: ",__FUNCTION__); \
+ printk(args); \
+ } } while (0)
+
+#ifdef TRACE
+#define ENTER() printk(">> enter %s\n", __FUNCTION__)
+#define LEAVE() printk("<< leave %s\n", __FUNCTION__)
+#else
+#define ENTER()
+#define LEAVE()
+#endif
+
+#define AS102_DEVICE_MAJOR 192
+
+#define AS102_USB_BUF_SIZE 512
+#define MAX_STREAM_URB 32
+
+#include "as10x_cmd.h"
+
+#if defined(CONFIG_AS102_USB)
+#include "as102_usb_drv.h"
+#endif
+
+#if defined(CONFIG_AS102_SPI)
+#include "as10x_spi_drv.h"
+#endif
+
+
+struct as102_bus_adapter_t {
+#if defined(CONFIG_AS102_USB)
+ struct usb_device *usb_dev;
+#elif defined(CONFIG_AS102_SPI)
+ struct spi_device *spi_dev;
+ struct cdev cdev; /* spidev raw device */
+
+ struct timer_list timer;
+ struct completion xfer_done;
+#endif
+ /* bus token lock */
+ struct mutex lock;
+ /* low level interface for bus adapter */
+ union as10x_bus_token_t {
+#if defined(CONFIG_AS102_USB)
+ /* usb token */
+ struct as10x_usb_token_cmd_t usb;
+#endif
+#if defined(CONFIG_AS102_SPI)
+ /* spi token */
+ struct as10x_spi_token_cmd_t spi;
+#endif
+ } token;
+
+ /* token cmd xfer id */
+ uint16_t cmd_xid;
+
+ /* as10x command and response for dvb interface*/
+ struct as10x_cmd_t *cmd, *rsp;
+
+ /* bus adapter private ops callback */
+ struct as102_priv_ops_t *ops;
+};
+
+struct as102_dev_t {
+ const char *name;
+ struct as102_bus_adapter_t bus_adap;
+ struct list_head device_entry;
+ struct kref kref;
+ unsigned long minor;
+
+ struct dvb_adapter dvb_adap;
+ struct dvb_frontend dvb_fe;
+ struct dvb_demux dvb_dmx;
+ struct dmxdev dvb_dmxdev;
+
+ /* demodulator stats */
+ struct as10x_demod_stats demod_stats;
+ /* signal strength */
+ uint16_t signal_strength;
+ /* bit error rate */
+ uint32_t ber;
+
+ /* timer handle to trig ts stream download */
+ struct timer_list timer_handle;
+
+ struct mutex sem;
+ dma_addr_t dma_addr;
+ void *stream;
+ int streaming;
+ struct urb *stream_urb[MAX_STREAM_URB];
+};
+
+int as102_dvb_register(struct as102_dev_t *dev);
+void as102_dvb_unregister(struct as102_dev_t *dev);
+
+int as102_dvb_register_fe(struct as102_dev_t *dev, struct dvb_frontend *fe);
+int as102_dvb_unregister_fe(struct dvb_frontend *dev);
+
+/* EOF - vim: set textwidth=80 ts=8 sw=8 sts=8 noet: */
diff --git a/drivers/staging/media/as102/as102_fe.c b/drivers/staging/media/as102/as102_fe.c
new file mode 100644
index 00000000000..3550f905367
--- /dev/null
+++ b/drivers/staging/media/as102/as102_fe.c
@@ -0,0 +1,603 @@
+/*
+ * Abilis Systems Single DVB-T Receiver
+ * Copyright (C) 2008 Pierrick Hascoet <pierrick.hascoet@abilis.com>
+ * Copyright (C) 2010 Devin Heitmueller <dheitmueller@kernellabs.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#include <linux/version.h>
+
+#include "as102_drv.h"
+#include "as10x_types.h"
+#include "as10x_cmd.h"
+
+extern int elna_enable;
+
+static void as10x_fe_copy_tps_parameters(struct dvb_frontend_parameters *dst,
+ struct as10x_tps *src);
+
+static void as102_fe_copy_tune_parameters(struct as10x_tune_args *dst,
+ struct dvb_frontend_parameters *src);
+
+static int as102_fe_set_frontend(struct dvb_frontend *fe,
+ struct dvb_frontend_parameters *params)
+{
+ int ret = 0;
+ struct as102_dev_t *dev;
+ struct as10x_tune_args tune_args = { 0 };
+
+ ENTER();
+
+ dev = (struct as102_dev_t *) fe->tuner_priv;
+ if (dev == NULL)
+ return -ENODEV;
+
+ if (mutex_lock_interruptible(&dev->bus_adap.lock))
+ return -EBUSY;
+
+ as102_fe_copy_tune_parameters(&tune_args, params);
+
+ /* send abilis command: SET_TUNE */
+ ret = as10x_cmd_set_tune(&dev->bus_adap, &tune_args);
+ if (ret != 0)
+ dprintk(debug, "as10x_cmd_set_tune failed. (err = %d)\n", ret);
+
+ mutex_unlock(&dev->bus_adap.lock);
+
+ LEAVE();
+ return (ret < 0) ? -EINVAL : 0;
+}
+
+static int as102_fe_get_frontend(struct dvb_frontend *fe,
+ struct dvb_frontend_parameters *p) {
+ int ret = 0;
+ struct as102_dev_t *dev;
+ struct as10x_tps tps = { 0 };
+
+ ENTER();
+
+ dev = (struct as102_dev_t *) fe->tuner_priv;
+ if (dev == NULL)
+ return -EINVAL;
+
+ if (mutex_lock_interruptible(&dev->bus_adap.lock))
+ return -EBUSY;
+
+ /* send abilis command: GET_TPS */
+ ret = as10x_cmd_get_tps(&dev->bus_adap, &tps);
+
+ if (ret == 0)
+ as10x_fe_copy_tps_parameters(p, &tps);
+
+ mutex_unlock(&dev->bus_adap.lock);
+
+ LEAVE();
+ return (ret < 0) ? -EINVAL : 0;
+}
+
+static int as102_fe_get_tune_settings(struct dvb_frontend *fe,
+ struct dvb_frontend_tune_settings *settings) {
+ ENTER();
+
+#if 0
+ dprintk(debug, "step_size = %d\n", settings->step_size);
+ dprintk(debug, "max_drift = %d\n", settings->max_drift);
+ dprintk(debug, "min_delay_ms = %d -> %d\n", settings->min_delay_ms,
+ 1000);
+#endif
+
+ settings->min_delay_ms = 1000;
+
+ LEAVE();
+ return 0;
+}
+
+
+static int as102_fe_read_status(struct dvb_frontend *fe, fe_status_t *status)
+{
+ int ret = 0;
+ struct as102_dev_t *dev;
+ struct as10x_tune_status tstate = { 0 };
+
+ ENTER();
+
+ dev = (struct as102_dev_t *) fe->tuner_priv;
+ if (dev == NULL)
+ return -ENODEV;
+
+ if (mutex_lock_interruptible(&dev->bus_adap.lock))
+ return -EBUSY;
+
+ /* send abilis command: GET_TUNE_STATUS */
+ ret = as10x_cmd_get_tune_status(&dev->bus_adap, &tstate);
+ if (ret < 0) {
+ dprintk(debug, "as10x_cmd_get_tune_status failed (err = %d)\n",
+ ret);
+ goto out;
+ }
+
+ dev->signal_strength = tstate.signal_strength;
+ dev->ber = tstate.BER;
+
+ switch (tstate.tune_state) {
+ case TUNE_STATUS_SIGNAL_DVB_OK:
+ *status = FE_HAS_SIGNAL | FE_HAS_CARRIER;
+ break;
+ case TUNE_STATUS_STREAM_DETECTED:
+ *status = FE_HAS_SIGNAL | FE_HAS_CARRIER | FE_HAS_SYNC;
+ break;
+ case TUNE_STATUS_STREAM_TUNED:
+ *status = FE_HAS_SIGNAL | FE_HAS_CARRIER | FE_HAS_SYNC |
+ FE_HAS_LOCK;
+ break;
+ default:
+ *status = TUNE_STATUS_NOT_TUNED;
+ }
+
+ dprintk(debug, "tuner status: 0x%02x, strength %d, per: %d, ber: %d\n",
+ tstate.tune_state, tstate.signal_strength,
+ tstate.PER, tstate.BER);
+
+ if (*status & FE_HAS_LOCK) {
+ if (as10x_cmd_get_demod_stats(&dev->bus_adap,
+ (struct as10x_demod_stats *) &dev->demod_stats) < 0) {
+ memset(&dev->demod_stats, 0, sizeof(dev->demod_stats));
+ dprintk(debug, "as10x_cmd_get_demod_stats failed "
+ "(probably not tuned)\n");
+ } else {
+ dprintk(debug,
+ "demod status: fc: 0x%08x, bad fc: 0x%08x, "
+ "bytes corrected: 0x%08x , MER: 0x%04x\n",
+ dev->demod_stats.frame_count,
+ dev->demod_stats.bad_frame_count,
+ dev->demod_stats.bytes_fixed_by_rs,
+ dev->demod_stats.mer);
+ }
+ } else {
+ memset(&dev->demod_stats, 0, sizeof(dev->demod_stats));
+ }
+
+out:
+ mutex_unlock(&dev->bus_adap.lock);
+ LEAVE();
+ return ret;
+}
+
+/*
+ * Note:
+ * - in AS102 SNR=MER
+ * - the SNR will be returned in linear terms, i.e. not in dB
+ * - the accuracy equals ±2dB for a SNR range from 4dB to 30dB
+ * - the accuracy is >2dB for SNR values outside this range
+ */
+static int as102_fe_read_snr(struct dvb_frontend *fe, u16 *snr)
+{
+ struct as102_dev_t *dev;
+
+ ENTER();
+
+ dev = (struct as102_dev_t *) fe->tuner_priv;
+ if (dev == NULL)
+ return -ENODEV;
+
+ *snr = dev->demod_stats.mer;
+
+ LEAVE();
+ return 0;
+}
+
+static int as102_fe_read_ber(struct dvb_frontend *fe, u32 *ber)
+{
+ struct as102_dev_t *dev;
+
+ ENTER();
+
+ dev = (struct as102_dev_t *) fe->tuner_priv;
+ if (dev == NULL)
+ return -ENODEV;
+
+ *ber = dev->ber;
+
+ LEAVE();
+ return 0;
+}
+
+static int as102_fe_read_signal_strength(struct dvb_frontend *fe,
+ u16 *strength)
+{
+ struct as102_dev_t *dev;
+
+ ENTER();
+
+ dev = (struct as102_dev_t *) fe->tuner_priv;
+ if (dev == NULL)
+ return -ENODEV;
+
+ *strength = (((0xffff * 400) * dev->signal_strength + 41000) * 2);
+
+ LEAVE();
+ return 0;
+}
+
+static int as102_fe_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
+{
+ struct as102_dev_t *dev;
+
+ ENTER();
+
+ dev = (struct as102_dev_t *) fe->tuner_priv;
+ if (dev == NULL)
+ return -ENODEV;
+
+ if (dev->demod_stats.has_started)
+ *ucblocks = dev->demod_stats.bad_frame_count;
+ else
+ *ucblocks = 0;
+
+ LEAVE();
+ return 0;
+}
+
+static int as102_fe_ts_bus_ctrl(struct dvb_frontend *fe, int acquire)
+{
+ struct as102_dev_t *dev;
+ int ret;
+
+ ENTER();
+
+ dev = (struct as102_dev_t *) fe->tuner_priv;
+ if (dev == NULL)
+ return -ENODEV;
+
+ if (mutex_lock_interruptible(&dev->bus_adap.lock))
+ return -EBUSY;
+
+ if (acquire) {
+ if (elna_enable)
+ as10x_cmd_set_context(&dev->bus_adap, 1010, 0xC0);
+
+ ret = as10x_cmd_turn_on(&dev->bus_adap);
+ } else {
+ ret = as10x_cmd_turn_off(&dev->bus_adap);
+ }
+
+ mutex_unlock(&dev->bus_adap.lock);
+
+ LEAVE();
+ return ret;
+}
+
+static struct dvb_frontend_ops as102_fe_ops = {
+ .info = {
+ .name = "Unknown AS102 device",
+ .type = FE_OFDM,
+ .frequency_min = 174000000,
+ .frequency_max = 862000000,
+ .frequency_stepsize = 166667,
+ .caps = FE_CAN_INVERSION_AUTO
+ | FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4
+ | FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO
+ | FE_CAN_QAM_16 | FE_CAN_QAM_64 | FE_CAN_QPSK
+ | FE_CAN_QAM_AUTO
+ | FE_CAN_TRANSMISSION_MODE_AUTO
+ | FE_CAN_GUARD_INTERVAL_AUTO
+ | FE_CAN_HIERARCHY_AUTO
+ | FE_CAN_RECOVER
+ | FE_CAN_MUTE_TS
+ },
+
+ .set_frontend = as102_fe_set_frontend,
+ .get_frontend = as102_fe_get_frontend,
+ .get_tune_settings = as102_fe_get_tune_settings,
+
+ .read_status = as102_fe_read_status,
+ .read_snr = as102_fe_read_snr,
+ .read_ber = as102_fe_read_ber,
+ .read_signal_strength = as102_fe_read_signal_strength,
+ .read_ucblocks = as102_fe_read_ucblocks,
+ .ts_bus_ctrl = as102_fe_ts_bus_ctrl,
+};
+
+int as102_dvb_unregister_fe(struct dvb_frontend *fe)
+{
+ /* unregister frontend */
+ dvb_unregister_frontend(fe);
+
+ /* detach frontend */
+ dvb_frontend_detach(fe);
+
+ return 0;
+}
+
+int as102_dvb_register_fe(struct as102_dev_t *as102_dev,
+ struct dvb_frontend *dvb_fe)
+{
+ int errno;
+ struct dvb_adapter *dvb_adap;
+
+ if (as102_dev == NULL)
+ return -EINVAL;
+
+ /* extract dvb_adapter */
+ dvb_adap = &as102_dev->dvb_adap;
+
+ /* init frontend callback ops */
+ memcpy(&dvb_fe->ops, &as102_fe_ops, sizeof(struct dvb_frontend_ops));
+ strncpy(dvb_fe->ops.info.name, as102_dev->name,
+ sizeof(dvb_fe->ops.info.name));
+
+ /* register dbvb frontend */
+ errno = dvb_register_frontend(dvb_adap, dvb_fe);
+ if (errno == 0)
+ dvb_fe->tuner_priv = as102_dev;
+
+ return errno;
+}
+
+static void as10x_fe_copy_tps_parameters(struct dvb_frontend_parameters *dst,
+ struct as10x_tps *as10x_tps)
+{
+
+ struct dvb_ofdm_parameters *fe_tps = &dst->u.ofdm;
+
+ /* extract consteallation */
+ switch (as10x_tps->constellation) {
+ case CONST_QPSK:
+ fe_tps->constellation = QPSK;
+ break;
+ case CONST_QAM16:
+ fe_tps->constellation = QAM_16;
+ break;
+ case CONST_QAM64:
+ fe_tps->constellation = QAM_64;
+ break;
+ }
+
+ /* extract hierarchy */
+ switch (as10x_tps->hierarchy) {
+ case HIER_NONE:
+ fe_tps->hierarchy_information = HIERARCHY_NONE;
+ break;
+ case HIER_ALPHA_1:
+ fe_tps->hierarchy_information = HIERARCHY_1;
+ break;
+ case HIER_ALPHA_2:
+ fe_tps->hierarchy_information = HIERARCHY_2;
+ break;
+ case HIER_ALPHA_4:
+ fe_tps->hierarchy_information = HIERARCHY_4;
+ break;
+ }
+
+ /* extract code rate HP */
+ switch (as10x_tps->code_rate_HP) {
+ case CODE_RATE_1_2:
+ fe_tps->code_rate_HP = FEC_1_2;
+ break;
+ case CODE_RATE_2_3:
+ fe_tps->code_rate_HP = FEC_2_3;
+ break;
+ case CODE_RATE_3_4:
+ fe_tps->code_rate_HP = FEC_3_4;
+ break;
+ case CODE_RATE_5_6:
+ fe_tps->code_rate_HP = FEC_5_6;
+ break;
+ case CODE_RATE_7_8:
+ fe_tps->code_rate_HP = FEC_7_8;
+ break;
+ }
+
+ /* extract code rate LP */
+ switch (as10x_tps->code_rate_LP) {
+ case CODE_RATE_1_2:
+ fe_tps->code_rate_LP = FEC_1_2;
+ break;
+ case CODE_RATE_2_3:
+ fe_tps->code_rate_LP = FEC_2_3;
+ break;
+ case CODE_RATE_3_4:
+ fe_tps->code_rate_LP = FEC_3_4;
+ break;
+ case CODE_RATE_5_6:
+ fe_tps->code_rate_LP = FEC_5_6;
+ break;
+ case CODE_RATE_7_8:
+ fe_tps->code_rate_LP = FEC_7_8;
+ break;
+ }
+
+ /* extract guard interval */
+ switch (as10x_tps->guard_interval) {
+ case GUARD_INT_1_32:
+ fe_tps->guard_interval = GUARD_INTERVAL_1_32;
+ break;
+ case GUARD_INT_1_16:
+ fe_tps->guard_interval = GUARD_INTERVAL_1_16;
+ break;
+ case GUARD_INT_1_8:
+ fe_tps->guard_interval = GUARD_INTERVAL_1_8;
+ break;
+ case GUARD_INT_1_4:
+ fe_tps->guard_interval = GUARD_INTERVAL_1_4;
+ break;
+ }
+
+ /* extract transmission mode */
+ switch (as10x_tps->transmission_mode) {
+ case TRANS_MODE_2K:
+ fe_tps->transmission_mode = TRANSMISSION_MODE_2K;
+ break;
+ case TRANS_MODE_8K:
+ fe_tps->transmission_mode = TRANSMISSION_MODE_8K;
+ break;
+ }
+}
+
+static uint8_t as102_fe_get_code_rate(fe_code_rate_t arg)
+{
+ uint8_t c;
+
+ switch (arg) {
+ case FEC_1_2:
+ c = CODE_RATE_1_2;
+ break;
+ case FEC_2_3:
+ c = CODE_RATE_2_3;
+ break;
+ case FEC_3_4:
+ c = CODE_RATE_3_4;
+ break;
+ case FEC_5_6:
+ c = CODE_RATE_5_6;
+ break;
+ case FEC_7_8:
+ c = CODE_RATE_7_8;
+ break;
+ default:
+ c = CODE_RATE_UNKNOWN;
+ break;
+ }
+
+ return c;
+}
+
+static void as102_fe_copy_tune_parameters(struct as10x_tune_args *tune_args,
+ struct dvb_frontend_parameters *params)
+{
+
+ /* set frequency */
+ tune_args->freq = params->frequency / 1000;
+
+ /* fix interleaving_mode */
+ tune_args->interleaving_mode = INTLV_NATIVE;
+
+ switch (params->u.ofdm.bandwidth) {
+ case BANDWIDTH_8_MHZ:
+ tune_args->bandwidth = BW_8_MHZ;
+ break;
+ case BANDWIDTH_7_MHZ:
+ tune_args->bandwidth = BW_7_MHZ;
+ break;
+ case BANDWIDTH_6_MHZ:
+ tune_args->bandwidth = BW_6_MHZ;
+ break;
+ default:
+ tune_args->bandwidth = BW_8_MHZ;
+ }
+
+ switch (params->u.ofdm.guard_interval) {
+ case GUARD_INTERVAL_1_32:
+ tune_args->guard_interval = GUARD_INT_1_32;
+ break;
+ case GUARD_INTERVAL_1_16:
+ tune_args->guard_interval = GUARD_INT_1_16;
+ break;
+ case GUARD_INTERVAL_1_8:
+ tune_args->guard_interval = GUARD_INT_1_8;
+ break;
+ case GUARD_INTERVAL_1_4:
+ tune_args->guard_interval = GUARD_INT_1_4;
+ break;
+ case GUARD_INTERVAL_AUTO:
+ default:
+ tune_args->guard_interval = GUARD_UNKNOWN;
+ break;
+ }
+
+ switch (params->u.ofdm.constellation) {
+ case QPSK:
+ tune_args->constellation = CONST_QPSK;
+ break;
+ case QAM_16:
+ tune_args->constellation = CONST_QAM16;
+ break;
+ case QAM_64:
+ tune_args->constellation = CONST_QAM64;
+ break;
+ default:
+ tune_args->constellation = CONST_UNKNOWN;
+ break;
+ }
+
+ switch (params->u.ofdm.transmission_mode) {
+ case TRANSMISSION_MODE_2K:
+ tune_args->transmission_mode = TRANS_MODE_2K;
+ break;
+ case TRANSMISSION_MODE_8K:
+ tune_args->transmission_mode = TRANS_MODE_8K;
+ break;
+ default:
+ tune_args->transmission_mode = TRANS_MODE_UNKNOWN;
+ }
+
+ switch (params->u.ofdm.hierarchy_information) {
+ case HIERARCHY_NONE:
+ tune_args->hierarchy = HIER_NONE;
+ break;
+ case HIERARCHY_1:
+ tune_args->hierarchy = HIER_ALPHA_1;
+ break;
+ case HIERARCHY_2:
+ tune_args->hierarchy = HIER_ALPHA_2;
+ break;
+ case HIERARCHY_4:
+ tune_args->hierarchy = HIER_ALPHA_4;
+ break;
+ case HIERARCHY_AUTO:
+ tune_args->hierarchy = HIER_UNKNOWN;
+ break;
+ }
+
+ dprintk(debug, "tuner parameters: freq: %d bw: 0x%02x gi: 0x%02x\n",
+ params->frequency,
+ tune_args->bandwidth,
+ tune_args->guard_interval);
+
+ /*
+ * Detect a hierarchy selection
+ * if HP/LP are both set to FEC_NONE, HP will be selected.
+ */
+ if ((tune_args->hierarchy != HIER_NONE) &&
+ ((params->u.ofdm.code_rate_LP == FEC_NONE) ||
+ (params->u.ofdm.code_rate_HP == FEC_NONE))) {
+
+ if (params->u.ofdm.code_rate_LP == FEC_NONE) {
+ tune_args->hier_select = HIER_HIGH_PRIORITY;
+ tune_args->code_rate =
+ as102_fe_get_code_rate(params->u.ofdm.code_rate_HP);
+ }
+
+ if (params->u.ofdm.code_rate_HP == FEC_NONE) {
+ tune_args->hier_select = HIER_LOW_PRIORITY;
+ tune_args->code_rate =
+ as102_fe_get_code_rate(params->u.ofdm.code_rate_LP);
+ }
+
+ dprintk(debug, "\thierarchy: 0x%02x "
+ "selected: %s code_rate_%s: 0x%02x\n",
+ tune_args->hierarchy,
+ tune_args->hier_select == HIER_HIGH_PRIORITY ?
+ "HP" : "LP",
+ tune_args->hier_select == HIER_HIGH_PRIORITY ?
+ "HP" : "LP",
+ tune_args->code_rate);
+ } else {
+ tune_args->code_rate =
+ as102_fe_get_code_rate(params->u.ofdm.code_rate_HP);
+ }
+}
+
+/* EOF - vim: set textwidth=80 ts=8 sw=8 sts=8 noet: */
diff --git a/drivers/staging/media/as102/as102_fw.c b/drivers/staging/media/as102/as102_fw.c
new file mode 100644
index 00000000000..c019df933cc
--- /dev/null
+++ b/drivers/staging/media/as102/as102_fw.c
@@ -0,0 +1,251 @@
+/*
+ * Abilis Systems Single DVB-T Receiver
+ * Copyright (C) 2008 Pierrick Hascoet <pierrick.hascoet@abilis.com>
+ * Copyright (C) 2010 Devin Heitmueller <dheitmueller@kernellabs.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/ctype.h>
+#include <linux/delay.h>
+#include <linux/firmware.h>
+
+#include "as102_drv.h"
+#include "as102_fw.h"
+
+#if defined(CONFIG_FW_LOADER) || defined(CONFIG_FW_LOADER_MODULE)
+char as102_st_fw1[] = "as102_data1_st.hex";
+char as102_st_fw2[] = "as102_data2_st.hex";
+char as102_dt_fw1[] = "as102_data1_dt.hex";
+char as102_dt_fw2[] = "as102_data2_dt.hex";
+
+static unsigned char atohx(unsigned char *dst, char *src)
+{
+ unsigned char value = 0;
+
+ char msb = tolower(*src) - '0';
+ char lsb = tolower(*(src + 1)) - '0';
+
+ if (msb > 9)
+ msb -= 7;
+ if (lsb > 9)
+ lsb -= 7;
+
+ *dst = value = ((msb & 0xF) << 4) | (lsb & 0xF);
+ return value;
+}
+
+/*
+ * Parse INTEL HEX firmware file to extract address and data.
+ */
+static int parse_hex_line(unsigned char *fw_data, unsigned char *addr,
+ unsigned char *data, int *dataLength,
+ unsigned char *addr_has_changed) {
+
+ int count = 0;
+ unsigned char *src, dst;
+
+ if (*fw_data++ != ':') {
+ printk(KERN_ERR "invalid firmware file\n");
+ return -EFAULT;
+ }
+
+ /* locate end of line */
+ for (src = fw_data; *src != '\n'; src += 2) {
+ atohx(&dst, src);
+ /* parse line to split addr / data */
+ switch (count) {
+ case 0:
+ *dataLength = dst;
+ break;
+ case 1:
+ addr[2] = dst;
+ break;
+ case 2:
+ addr[3] = dst;
+ break;
+ case 3:
+ /* check if data is an address */
+ if (dst == 0x04)
+ *addr_has_changed = 1;
+ else
+ *addr_has_changed = 0;
+ break;
+ case 4:
+ case 5:
+ if (*addr_has_changed)
+ addr[(count - 4)] = dst;
+ else
+ data[(count - 4)] = dst;
+ break;
+ default:
+ data[(count - 4)] = dst;
+ break;
+ }
+ count++;
+ }
+
+ /* return read value + ':' + '\n' */
+ return (count * 2) + 2;
+}
+
+static int as102_firmware_upload(struct as102_bus_adapter_t *bus_adap,
+ unsigned char *cmd,
+ const struct firmware *firmware) {
+
+ struct as10x_fw_pkt_t fw_pkt;
+ int total_read_bytes = 0, errno = 0;
+ unsigned char addr_has_changed = 0;
+
+ ENTER();
+
+ for (total_read_bytes = 0; total_read_bytes < firmware->size; ) {
+ int read_bytes = 0, data_len = 0;
+
+ /* parse intel hex line */
+ read_bytes = parse_hex_line(
+ (u8 *) (firmware->data + total_read_bytes),
+ fw_pkt.raw.address,
+ fw_pkt.raw.data,
+ &data_len,
+ &addr_has_changed);
+
+ if (read_bytes <= 0)
+ goto error;
+
+ /* detect the end of file */
+ total_read_bytes += read_bytes;
+ if (total_read_bytes == firmware->size) {
+ fw_pkt.u.request[0] = 0x00;
+ fw_pkt.u.request[1] = 0x03;
+
+ /* send EOF command */
+ errno = bus_adap->ops->upload_fw_pkt(bus_adap,
+ (uint8_t *)
+ &fw_pkt, 2, 0);
+ if (errno < 0)
+ goto error;
+ } else {
+ if (!addr_has_changed) {
+ /* prepare command to send */
+ fw_pkt.u.request[0] = 0x00;
+ fw_pkt.u.request[1] = 0x01;
+
+ data_len += sizeof(fw_pkt.u.request);
+ data_len += sizeof(fw_pkt.raw.address);
+
+ /* send cmd to device */
+ errno = bus_adap->ops->upload_fw_pkt(bus_adap,
+ (uint8_t *)
+ &fw_pkt,
+ data_len,
+ 0);
+ if (errno < 0)
+ goto error;
+ }
+ }
+ }
+error:
+ LEAVE();
+ return (errno == 0) ? total_read_bytes : errno;
+}
+
+int as102_fw_upload(struct as102_bus_adapter_t *bus_adap)
+{
+ int errno = -EFAULT;
+ const struct firmware *firmware;
+ unsigned char *cmd_buf = NULL;
+ char *fw1, *fw2;
+
+#if defined(CONFIG_AS102_USB)
+ struct usb_device *dev = bus_adap->usb_dev;
+#endif
+#if defined(CONFIG_AS102_SPI)
+ struct spi_device *dev = bus_adap->spi_dev;
+#endif
+ ENTER();
+
+ /* select fw file to upload */
+ if (dual_tuner) {
+ fw1 = as102_dt_fw1;
+ fw2 = as102_dt_fw2;
+ } else {
+ fw1 = as102_st_fw1;
+ fw2 = as102_st_fw2;
+ }
+
+#if defined(CONFIG_FW_LOADER) || defined(CONFIG_FW_LOADER_MODULE)
+ /* allocate buffer to store firmware upload command and data */
+ cmd_buf = kzalloc(MAX_FW_PKT_SIZE, GFP_KERNEL);
+ if (cmd_buf == NULL) {
+ errno = -ENOMEM;
+ goto error;
+ }
+
+ /* request kernel to locate firmware file: part1 */
+ errno = request_firmware(&firmware, fw1, &dev->dev);
+ if (errno < 0) {
+ printk(KERN_ERR "%s: unable to locate firmware file: %s\n",
+ DRIVER_NAME, fw1);
+ goto error;
+ }
+
+ /* initiate firmware upload */
+ errno = as102_firmware_upload(bus_adap, cmd_buf, firmware);
+ if (errno < 0) {
+ printk(KERN_ERR "%s: error during firmware upload part1\n",
+ DRIVER_NAME);
+ goto error;
+ }
+
+ printk(KERN_INFO "%s: fimrware: %s loaded with success\n",
+ DRIVER_NAME, fw1);
+ release_firmware(firmware);
+
+ /* wait for boot to complete */
+ mdelay(100);
+
+ /* request kernel to locate firmware file: part2 */
+ errno = request_firmware(&firmware, fw2, &dev->dev);
+ if (errno < 0) {
+ printk(KERN_ERR "%s: unable to locate firmware file: %s\n",
+ DRIVER_NAME, fw2);
+ goto error;
+ }
+
+ /* initiate firmware upload */
+ errno = as102_firmware_upload(bus_adap, cmd_buf, firmware);
+ if (errno < 0) {
+ printk(KERN_ERR "%s: error during firmware upload part2\n",
+ DRIVER_NAME);
+ goto error;
+ }
+
+ printk(KERN_INFO "%s: fimrware: %s loaded with success\n",
+ DRIVER_NAME, fw2);
+error:
+ /* free data buffer */
+ kfree(cmd_buf);
+ /* release firmware if needed */
+ if (firmware != NULL)
+ release_firmware(firmware);
+#endif
+ LEAVE();
+ return errno;
+}
+#endif
+
+/* EOF - vim: set textwidth=80 ts=8 sw=8 sts=8 noet: */
diff --git a/drivers/staging/media/as102/as102_fw.h b/drivers/staging/media/as102/as102_fw.h
new file mode 100644
index 00000000000..27e5347e2e1
--- /dev/null
+++ b/drivers/staging/media/as102/as102_fw.h
@@ -0,0 +1,42 @@
+/*
+ * Abilis Systems Single DVB-T Receiver
+ * Copyright (C) 2008 Pierrick Hascoet <pierrick.hascoet@abilis.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#define MAX_FW_PKT_SIZE 64
+
+extern int dual_tuner;
+
+#pragma pack(1)
+struct as10x_raw_fw_pkt {
+ unsigned char address[4];
+ unsigned char data[MAX_FW_PKT_SIZE - 6];
+};
+
+struct as10x_fw_pkt_t {
+ union {
+ unsigned char request[2];
+ unsigned char length[2];
+ } u;
+ struct as10x_raw_fw_pkt raw;
+};
+#pragma pack()
+
+#ifdef __KERNEL__
+int as102_fw_upload(struct as102_bus_adapter_t *bus_adap);
+#endif
+
+/* EOF - vim: set textwidth=80 ts=8 sw=8 sts=8 noet: */
diff --git a/drivers/staging/media/as102/as102_usb_drv.c b/drivers/staging/media/as102/as102_usb_drv.c
new file mode 100644
index 00000000000..264be2dbd2a
--- /dev/null
+++ b/drivers/staging/media/as102/as102_usb_drv.c
@@ -0,0 +1,478 @@
+/*
+ * Abilis Systems Single DVB-T Receiver
+ * Copyright (C) 2008 Pierrick Hascoet <pierrick.hascoet@abilis.com>
+ * Copyright (C) 2010 Devin Heitmueller <dheitmueller@kernellabs.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/usb.h>
+
+#include "as102_drv.h"
+#include "as102_usb_drv.h"
+#include "as102_fw.h"
+
+static void as102_usb_disconnect(struct usb_interface *interface);
+static int as102_usb_probe(struct usb_interface *interface,
+ const struct usb_device_id *id);
+
+static int as102_usb_start_stream(struct as102_dev_t *dev);
+static void as102_usb_stop_stream(struct as102_dev_t *dev);
+
+static int as102_open(struct inode *inode, struct file *file);
+static int as102_release(struct inode *inode, struct file *file);
+
+static struct usb_device_id as102_usb_id_table[] = {
+ { USB_DEVICE(AS102_USB_DEVICE_VENDOR_ID, AS102_USB_DEVICE_PID_0001) },
+ { USB_DEVICE(PCTV_74E_USB_VID, PCTV_74E_USB_PID) },
+ { USB_DEVICE(ELGATO_EYETV_DTT_USB_VID, ELGATO_EYETV_DTT_USB_PID) },
+ { USB_DEVICE(NBOX_DVBT_DONGLE_USB_VID, NBOX_DVBT_DONGLE_USB_PID) },
+ { } /* Terminating entry */
+};
+
+/* Note that this table must always have the same number of entries as the
+ as102_usb_id_table struct */
+static const char *as102_device_names[] = {
+ AS102_REFERENCE_DESIGN,
+ AS102_PCTV_74E,
+ AS102_ELGATO_EYETV_DTT_NAME,
+ AS102_NBOX_DVBT_DONGLE_NAME,
+ NULL /* Terminating entry */
+};
+
+struct usb_driver as102_usb_driver = {
+ .name = DRIVER_FULL_NAME,
+ .probe = as102_usb_probe,
+ .disconnect = as102_usb_disconnect,
+ .id_table = as102_usb_id_table
+};
+
+static const struct file_operations as102_dev_fops = {
+ .owner = THIS_MODULE,
+ .open = as102_open,
+ .release = as102_release,
+};
+
+static struct usb_class_driver as102_usb_class_driver = {
+ .name = "aton2-%d",
+ .fops = &as102_dev_fops,
+ .minor_base = AS102_DEVICE_MAJOR,
+};
+
+static int as102_usb_xfer_cmd(struct as102_bus_adapter_t *bus_adap,
+ unsigned char *send_buf, int send_buf_len,
+ unsigned char *recv_buf, int recv_buf_len)
+{
+ int ret = 0;
+ ENTER();
+
+ if (send_buf != NULL) {
+ ret = usb_control_msg(bus_adap->usb_dev,
+ usb_sndctrlpipe(bus_adap->usb_dev, 0),
+ AS102_USB_DEVICE_TX_CTRL_CMD,
+ USB_DIR_OUT | USB_TYPE_VENDOR |
+ USB_RECIP_DEVICE,
+ bus_adap->cmd_xid, /* value */
+ 0, /* index */
+ send_buf, send_buf_len,
+ USB_CTRL_SET_TIMEOUT /* 200 */);
+ if (ret < 0) {
+ dprintk(debug, "usb_control_msg(send) failed, err %i\n",
+ ret);
+ return ret;
+ }
+
+ if (ret != send_buf_len) {
+ dprintk(debug, "only wrote %d of %d bytes\n",
+ ret, send_buf_len);
+ return -1;
+ }
+ }
+
+ if (recv_buf != NULL) {
+#ifdef TRACE
+ dprintk(debug, "want to read: %d bytes\n", recv_buf_len);
+#endif
+ ret = usb_control_msg(bus_adap->usb_dev,
+ usb_rcvctrlpipe(bus_adap->usb_dev, 0),
+ AS102_USB_DEVICE_RX_CTRL_CMD,
+ USB_DIR_IN | USB_TYPE_VENDOR |
+ USB_RECIP_DEVICE,
+ bus_adap->cmd_xid, /* value */
+ 0, /* index */
+ recv_buf, recv_buf_len,
+ USB_CTRL_GET_TIMEOUT /* 200 */);
+ if (ret < 0) {
+ dprintk(debug, "usb_control_msg(recv) failed, err %i\n",
+ ret);
+ return ret;
+ }
+#ifdef TRACE
+ dprintk(debug, "read %d bytes\n", recv_buf_len);
+#endif
+ }
+
+ LEAVE();
+ return ret;
+}
+
+static int as102_send_ep1(struct as102_bus_adapter_t *bus_adap,
+ unsigned char *send_buf,
+ int send_buf_len,
+ int swap32)
+{
+ int ret = 0, actual_len;
+
+ ret = usb_bulk_msg(bus_adap->usb_dev,
+ usb_sndbulkpipe(bus_adap->usb_dev, 1),
+ send_buf, send_buf_len, &actual_len, 200);
+ if (ret) {
+ dprintk(debug, "usb_bulk_msg(send) failed, err %i\n", ret);
+ return ret;
+ }
+
+ if (actual_len != send_buf_len) {
+ dprintk(debug, "only wrote %d of %d bytes\n",
+ actual_len, send_buf_len);
+ return -1;
+ }
+ return ret ? ret : actual_len;
+}
+
+static int as102_read_ep2(struct as102_bus_adapter_t *bus_adap,
+ unsigned char *recv_buf, int recv_buf_len)
+{
+ int ret = 0, actual_len;
+
+ if (recv_buf == NULL)
+ return -EINVAL;
+
+ ret = usb_bulk_msg(bus_adap->usb_dev,
+ usb_rcvbulkpipe(bus_adap->usb_dev, 2),
+ recv_buf, recv_buf_len, &actual_len, 200);
+ if (ret) {
+ dprintk(debug, "usb_bulk_msg(recv) failed, err %i\n", ret);
+ return ret;
+ }
+
+ if (actual_len != recv_buf_len) {
+ dprintk(debug, "only read %d of %d bytes\n",
+ actual_len, recv_buf_len);
+ return -1;
+ }
+ return ret ? ret : actual_len;
+}
+
+struct as102_priv_ops_t as102_priv_ops = {
+ .upload_fw_pkt = as102_send_ep1,
+ .xfer_cmd = as102_usb_xfer_cmd,
+ .as102_read_ep2 = as102_read_ep2,
+ .start_stream = as102_usb_start_stream,
+ .stop_stream = as102_usb_stop_stream,
+};
+
+static int as102_submit_urb_stream(struct as102_dev_t *dev, struct urb *urb)
+{
+ int err;
+
+ usb_fill_bulk_urb(urb,
+ dev->bus_adap.usb_dev,
+ usb_rcvbulkpipe(dev->bus_adap.usb_dev, 0x2),
+ urb->transfer_buffer,
+ AS102_USB_BUF_SIZE,
+ as102_urb_stream_irq,
+ dev);
+
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (err)
+ dprintk(debug, "%s: usb_submit_urb failed\n", __func__);
+
+ return err;
+}
+
+void as102_urb_stream_irq(struct urb *urb)
+{
+ struct as102_dev_t *as102_dev = urb->context;
+
+ if (urb->actual_length > 0) {
+ dvb_dmx_swfilter(&as102_dev->dvb_dmx,
+ urb->transfer_buffer,
+ urb->actual_length);
+ } else {
+ if (urb->actual_length == 0)
+ memset(urb->transfer_buffer, 0, AS102_USB_BUF_SIZE);
+ }
+
+ /* is not stopped, re-submit urb */
+ if (as102_dev->streaming)
+ as102_submit_urb_stream(as102_dev, urb);
+}
+
+static void as102_free_usb_stream_buffer(struct as102_dev_t *dev)
+{
+ int i;
+
+ ENTER();
+
+ for (i = 0; i < MAX_STREAM_URB; i++)
+ usb_free_urb(dev->stream_urb[i]);
+
+ usb_free_coherent(dev->bus_adap.usb_dev,
+ MAX_STREAM_URB * AS102_USB_BUF_SIZE,
+ dev->stream,
+ dev->dma_addr);
+ LEAVE();
+}
+
+static int as102_alloc_usb_stream_buffer(struct as102_dev_t *dev)
+{
+ int i, ret = 0;
+
+ ENTER();
+
+ dev->stream = usb_alloc_coherent(dev->bus_adap.usb_dev,
+ MAX_STREAM_URB * AS102_USB_BUF_SIZE,
+ GFP_KERNEL,
+ &dev->dma_addr);
+ if (!dev->stream) {
+ dprintk(debug, "%s: usb_buffer_alloc failed\n", __func__);
+ return -ENOMEM;
+ }
+
+ memset(dev->stream, 0, MAX_STREAM_URB * AS102_USB_BUF_SIZE);
+
+ /* init urb buffers */
+ for (i = 0; i < MAX_STREAM_URB; i++) {
+ struct urb *urb;
+
+ urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (urb == NULL) {
+ dprintk(debug, "%s: usb_alloc_urb failed\n", __func__);
+ as102_free_usb_stream_buffer(dev);
+ return -ENOMEM;
+ }
+
+ urb->transfer_buffer = dev->stream + (i * AS102_USB_BUF_SIZE);
+ urb->transfer_buffer_length = AS102_USB_BUF_SIZE;
+
+ dev->stream_urb[i] = urb;
+ }
+ LEAVE();
+ return ret;
+}
+
+static void as102_usb_stop_stream(struct as102_dev_t *dev)
+{
+ int i;
+
+ for (i = 0; i < MAX_STREAM_URB; i++)
+ usb_kill_urb(dev->stream_urb[i]);
+}
+
+static int as102_usb_start_stream(struct as102_dev_t *dev)
+{
+ int i, ret = 0;
+
+ for (i = 0; i < MAX_STREAM_URB; i++) {
+ ret = as102_submit_urb_stream(dev, dev->stream_urb[i]);
+ if (ret) {
+ as102_usb_stop_stream(dev);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void as102_usb_release(struct kref *kref)
+{
+ struct as102_dev_t *as102_dev;
+
+ ENTER();
+
+ as102_dev = container_of(kref, struct as102_dev_t, kref);
+ if (as102_dev != NULL) {
+ usb_put_dev(as102_dev->bus_adap.usb_dev);
+ kfree(as102_dev);
+ }
+
+ LEAVE();
+}
+
+static void as102_usb_disconnect(struct usb_interface *intf)
+{
+ struct as102_dev_t *as102_dev;
+
+ ENTER();
+
+ /* extract as102_dev_t from usb_device private data */
+ as102_dev = usb_get_intfdata(intf);
+
+ /* unregister dvb layer */
+ as102_dvb_unregister(as102_dev);
+
+ /* free usb buffers */
+ as102_free_usb_stream_buffer(as102_dev);
+
+ usb_set_intfdata(intf, NULL);
+
+ /* usb unregister device */
+ usb_deregister_dev(intf, &as102_usb_class_driver);
+
+ /* decrement usage counter */
+ kref_put(&as102_dev->kref, as102_usb_release);
+
+ printk(KERN_INFO "%s: device has been disconnected\n", DRIVER_NAME);
+
+ LEAVE();
+}
+
+static int as102_usb_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ int ret;
+ struct as102_dev_t *as102_dev;
+ int i;
+
+ ENTER();
+
+ as102_dev = kzalloc(sizeof(struct as102_dev_t), GFP_KERNEL);
+ if (as102_dev == NULL) {
+ err("%s: kzalloc failed", __func__);
+ return -ENOMEM;
+ }
+
+ /* This should never actually happen */
+ if ((sizeof(as102_usb_id_table) / sizeof(struct usb_device_id)) !=
+ (sizeof(as102_device_names) / sizeof(const char *))) {
+ printk(KERN_ERR "Device names table invalid size");
+ return -EINVAL;
+ }
+
+ /* Assign the user-friendly device name */
+ for (i = 0; i < (sizeof(as102_usb_id_table) /
+ sizeof(struct usb_device_id)); i++) {
+ if (id == &as102_usb_id_table[i])
+ as102_dev->name = as102_device_names[i];
+ }
+
+ if (as102_dev->name == NULL)
+ as102_dev->name = "Unknown AS102 device";
+
+ /* set private callback functions */
+ as102_dev->bus_adap.ops = &as102_priv_ops;
+
+ /* init cmd token for usb bus */
+ as102_dev->bus_adap.cmd = &as102_dev->bus_adap.token.usb.c;
+ as102_dev->bus_adap.rsp = &as102_dev->bus_adap.token.usb.r;
+
+ /* init kernel device reference */
+ kref_init(&as102_dev->kref);
+
+ /* store as102 device to usb_device private data */
+ usb_set_intfdata(intf, (void *) as102_dev);
+
+ /* store in as102 device the usb_device pointer */
+ as102_dev->bus_adap.usb_dev = usb_get_dev(interface_to_usbdev(intf));
+
+ /* we can register the device now, as it is ready */
+ ret = usb_register_dev(intf, &as102_usb_class_driver);
+ if (ret < 0) {
+ /* something prevented us from registering this driver */
+ err("%s: usb_register_dev() failed (errno = %d)",
+ __func__, ret);
+ goto failed;
+ }
+
+ printk(KERN_INFO "%s: device has been detected\n", DRIVER_NAME);
+
+ /* request buffer allocation for streaming */
+ ret = as102_alloc_usb_stream_buffer(as102_dev);
+ if (ret != 0)
+ goto failed;
+
+ /* register dvb layer */
+ ret = as102_dvb_register(as102_dev);
+
+ LEAVE();
+ return ret;
+
+failed:
+ usb_set_intfdata(intf, NULL);
+ kfree(as102_dev);
+ return ret;
+}
+
+static int as102_open(struct inode *inode, struct file *file)
+{
+ int ret = 0, minor = 0;
+ struct usb_interface *intf = NULL;
+ struct as102_dev_t *dev = NULL;
+
+ ENTER();
+
+ /* read minor from inode */
+ minor = iminor(inode);
+
+ /* fetch device from usb interface */
+ intf = usb_find_interface(&as102_usb_driver, minor);
+ if (intf == NULL) {
+ printk(KERN_ERR "%s: can't find device for minor %d\n",
+ __func__, minor);
+ ret = -ENODEV;
+ goto exit;
+ }
+
+ /* get our device */
+ dev = usb_get_intfdata(intf);
+ if (dev == NULL) {
+ ret = -EFAULT;
+ goto exit;
+ }
+
+ /* save our device object in the file's private structure */
+ file->private_data = dev;
+
+ /* increment our usage count for the device */
+ kref_get(&dev->kref);
+
+exit:
+ LEAVE();
+ return ret;
+}
+
+static int as102_release(struct inode *inode, struct file *file)
+{
+ int ret = 0;
+ struct as102_dev_t *dev = NULL;
+
+ ENTER();
+
+ dev = file->private_data;
+ if (dev != NULL) {
+ /* decrement the count on our device */
+ kref_put(&dev->kref, as102_usb_release);
+ }
+
+ LEAVE();
+ return ret;
+}
+
+MODULE_DEVICE_TABLE(usb, as102_usb_id_table);
+
+/* EOF - vim: set textwidth=80 ts=8 sw=8 sts=8 noet: */
diff --git a/drivers/staging/media/as102/as102_usb_drv.h b/drivers/staging/media/as102/as102_usb_drv.h
new file mode 100644
index 00000000000..fb1fc41dcd7
--- /dev/null
+++ b/drivers/staging/media/as102/as102_usb_drv.h
@@ -0,0 +1,59 @@
+/*
+ * Abilis Systems Single DVB-T Receiver
+ * Copyright (C) 2008 Pierrick Hascoet <pierrick.hascoet@abilis.com>
+ * Copyright (C) 2010 Devin Heitmueller <dheitmueller@kernellabs.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#include <linux/version.h>
+
+#ifndef _AS102_USB_DRV_H_
+#define _AS102_USB_DRV_H_
+
+#define AS102_USB_DEVICE_TX_CTRL_CMD 0xF1
+#define AS102_USB_DEVICE_RX_CTRL_CMD 0xF2
+
+/* define these values to match the supported devices */
+
+/* Abilis system: "TITAN" */
+#define AS102_REFERENCE_DESIGN "Abilis Systems DVB-Titan"
+#define AS102_USB_DEVICE_VENDOR_ID 0x1BA6
+#define AS102_USB_DEVICE_PID_0001 0x0001
+
+/* PCTV Systems: PCTV picoStick (74e) */
+#define AS102_PCTV_74E "PCTV Systems picoStick (74e)"
+#define PCTV_74E_USB_VID 0x2013
+#define PCTV_74E_USB_PID 0x0246
+
+/* Elgato: EyeTV DTT Deluxe */
+#define AS102_ELGATO_EYETV_DTT_NAME "Elgato EyeTV DTT Deluxe"
+#define ELGATO_EYETV_DTT_USB_VID 0x0fd9
+#define ELGATO_EYETV_DTT_USB_PID 0x002c
+
+/* nBox: nBox DVB-T Dongle */
+#define AS102_NBOX_DVBT_DONGLE_NAME "nBox DVB-T Dongle"
+#define NBOX_DVBT_DONGLE_USB_VID 0x0b89
+#define NBOX_DVBT_DONGLE_USB_PID 0x0007
+
+void as102_urb_stream_irq(struct urb *urb);
+
+struct as10x_usb_token_cmd_t {
+ /* token cmd */
+ struct as10x_cmd_t c;
+ /* token response */
+ struct as10x_cmd_t r;
+};
+#endif
+/* EOF - vim: set textwidth=80 ts=8 sw=8 sts=8 noet: */
diff --git a/drivers/staging/media/as102/as10x_cmd.c b/drivers/staging/media/as102/as10x_cmd.c
new file mode 100644
index 00000000000..0dcba806578
--- /dev/null
+++ b/drivers/staging/media/as102/as10x_cmd.c
@@ -0,0 +1,452 @@
+/*
+ * Abilis Systems Single DVB-T Receiver
+ * Copyright (C) 2008 Pierrick Hascoet <pierrick.hascoet@abilis.com>
+ * Copyright (C) 2010 Devin Heitmueller <dheitmueller@kernellabs.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/kernel.h>
+#include "as102_drv.h"
+#include "as10x_types.h"
+#include "as10x_cmd.h"
+
+/**
+ * as10x_cmd_turn_on - send turn on command to AS10x
+ * @phandle: pointer to AS10x handle
+ *
+ * Return 0 when no error, < 0 in case of error.
+ */
+int as10x_cmd_turn_on(as10x_handle_t *phandle)
+{
+ int error;
+ struct as10x_cmd_t *pcmd, *prsp;
+
+ ENTER();
+
+ pcmd = phandle->cmd;
+ prsp = phandle->rsp;
+
+ /* prepare command */
+ as10x_cmd_build(pcmd, (++phandle->cmd_xid),
+ sizeof(pcmd->body.turn_on.req));
+
+ /* fill command */
+ pcmd->body.turn_on.req.proc_id = cpu_to_le16(CONTROL_PROC_TURNON);
+
+ /* send command */
+ if (phandle->ops->xfer_cmd) {
+ error = phandle->ops->xfer_cmd(phandle, (uint8_t *) pcmd,
+ sizeof(pcmd->body.turn_on.req) +
+ HEADER_SIZE,
+ (uint8_t *) prsp,
+ sizeof(prsp->body.turn_on.rsp) +
+ HEADER_SIZE);
+ } else {
+ error = AS10X_CMD_ERROR;
+ }
+
+ if (error < 0)
+ goto out;
+
+ /* parse response */
+ error = as10x_rsp_parse(prsp, CONTROL_PROC_TURNON_RSP);
+
+out:
+ LEAVE();
+ return error;
+}
+
+/**
+ * as10x_cmd_turn_off - send turn off command to AS10x
+ * @phandle: pointer to AS10x handle
+ *
+ * Return 0 on success or negative value in case of error.
+ */
+int as10x_cmd_turn_off(as10x_handle_t *phandle)
+{
+ int error;
+ struct as10x_cmd_t *pcmd, *prsp;
+
+ ENTER();
+
+ pcmd = phandle->cmd;
+ prsp = phandle->rsp;
+
+ /* prepare command */
+ as10x_cmd_build(pcmd, (++phandle->cmd_xid),
+ sizeof(pcmd->body.turn_off.req));
+
+ /* fill command */
+ pcmd->body.turn_off.req.proc_id = cpu_to_le16(CONTROL_PROC_TURNOFF);
+
+ /* send command */
+ if (phandle->ops->xfer_cmd) {
+ error = phandle->ops->xfer_cmd(
+ phandle, (uint8_t *) pcmd,
+ sizeof(pcmd->body.turn_off.req) + HEADER_SIZE,
+ (uint8_t *) prsp,
+ sizeof(prsp->body.turn_off.rsp) + HEADER_SIZE);
+ } else {
+ error = AS10X_CMD_ERROR;
+ }
+
+ if (error < 0)
+ goto out;
+
+ /* parse response */
+ error = as10x_rsp_parse(prsp, CONTROL_PROC_TURNOFF_RSP);
+
+out:
+ LEAVE();
+ return error;
+}
+
+/**
+ * as10x_cmd_set_tune - send set tune command to AS10x
+ * @phandle: pointer to AS10x handle
+ * @ptune: tune parameters
+ *
+ * Return 0 on success or negative value in case of error.
+ */
+int as10x_cmd_set_tune(as10x_handle_t *phandle, struct as10x_tune_args *ptune)
+{
+ int error;
+ struct as10x_cmd_t *preq, *prsp;
+
+ ENTER();
+
+ preq = phandle->cmd;
+ prsp = phandle->rsp;
+
+ /* prepare command */
+ as10x_cmd_build(preq, (++phandle->cmd_xid),
+ sizeof(preq->body.set_tune.req));
+
+ /* fill command */
+ preq->body.set_tune.req.proc_id = cpu_to_le16(CONTROL_PROC_SETTUNE);
+ preq->body.set_tune.req.args.freq = cpu_to_le32(ptune->freq);
+ preq->body.set_tune.req.args.bandwidth = ptune->bandwidth;
+ preq->body.set_tune.req.args.hier_select = ptune->hier_select;
+ preq->body.set_tune.req.args.constellation = ptune->constellation;
+ preq->body.set_tune.req.args.hierarchy = ptune->hierarchy;
+ preq->body.set_tune.req.args.interleaving_mode =
+ ptune->interleaving_mode;
+ preq->body.set_tune.req.args.code_rate = ptune->code_rate;
+ preq->body.set_tune.req.args.guard_interval = ptune->guard_interval;
+ preq->body.set_tune.req.args.transmission_mode =
+ ptune->transmission_mode;
+
+ /* send command */
+ if (phandle->ops->xfer_cmd) {
+ error = phandle->ops->xfer_cmd(phandle,
+ (uint8_t *) preq,
+ sizeof(preq->body.set_tune.req)
+ + HEADER_SIZE,
+ (uint8_t *) prsp,
+ sizeof(prsp->body.set_tune.rsp)
+ + HEADER_SIZE);
+ } else {
+ error = AS10X_CMD_ERROR;
+ }
+
+ if (error < 0)
+ goto out;
+
+ /* parse response */
+ error = as10x_rsp_parse(prsp, CONTROL_PROC_SETTUNE_RSP);
+
+out:
+ LEAVE();
+ return error;
+}
+
+/**
+ * as10x_cmd_get_tune_status - send get tune status command to AS10x
+ * @phandle: pointer to AS10x handle
+ * @pstatus: pointer to updated status structure of the current tune
+ *
+ * Return 0 on success or negative value in case of error.
+ */
+int as10x_cmd_get_tune_status(as10x_handle_t *phandle,
+ struct as10x_tune_status *pstatus)
+{
+ int error;
+ struct as10x_cmd_t *preq, *prsp;
+
+ ENTER();
+
+ preq = phandle->cmd;
+ prsp = phandle->rsp;
+
+ /* prepare command */
+ as10x_cmd_build(preq, (++phandle->cmd_xid),
+ sizeof(preq->body.get_tune_status.req));
+
+ /* fill command */
+ preq->body.get_tune_status.req.proc_id =
+ cpu_to_le16(CONTROL_PROC_GETTUNESTAT);
+
+ /* send command */
+ if (phandle->ops->xfer_cmd) {
+ error = phandle->ops->xfer_cmd(
+ phandle,
+ (uint8_t *) preq,
+ sizeof(preq->body.get_tune_status.req) + HEADER_SIZE,
+ (uint8_t *) prsp,
+ sizeof(prsp->body.get_tune_status.rsp) + HEADER_SIZE);
+ } else {
+ error = AS10X_CMD_ERROR;
+ }
+
+ if (error < 0)
+ goto out;
+
+ /* parse response */
+ error = as10x_rsp_parse(prsp, CONTROL_PROC_GETTUNESTAT_RSP);
+ if (error < 0)
+ goto out;
+
+ /* Response OK -> get response data */
+ pstatus->tune_state = prsp->body.get_tune_status.rsp.sts.tune_state;
+ pstatus->signal_strength =
+ le16_to_cpu(prsp->body.get_tune_status.rsp.sts.signal_strength);
+ pstatus->PER = le16_to_cpu(prsp->body.get_tune_status.rsp.sts.PER);
+ pstatus->BER = le16_to_cpu(prsp->body.get_tune_status.rsp.sts.BER);
+
+out:
+ LEAVE();
+ return error;
+}
+
+/**
+ * send get TPS command to AS10x
+ * @phandle: pointer to AS10x handle
+ * @ptps: pointer to TPS parameters structure
+ *
+ * Return 0 on success or negative value in case of error.
+ */
+int as10x_cmd_get_tps(as10x_handle_t *phandle, struct as10x_tps *ptps)
+{
+ int error;
+ struct as10x_cmd_t *pcmd, *prsp;
+
+ ENTER();
+
+ pcmd = phandle->cmd;
+ prsp = phandle->rsp;
+
+ /* prepare command */
+ as10x_cmd_build(pcmd, (++phandle->cmd_xid),
+ sizeof(pcmd->body.get_tps.req));
+
+ /* fill command */
+ pcmd->body.get_tune_status.req.proc_id =
+ cpu_to_le16(CONTROL_PROC_GETTPS);
+
+ /* send command */
+ if (phandle->ops->xfer_cmd) {
+ error = phandle->ops->xfer_cmd(phandle,
+ (uint8_t *) pcmd,
+ sizeof(pcmd->body.get_tps.req) +
+ HEADER_SIZE,
+ (uint8_t *) prsp,
+ sizeof(prsp->body.get_tps.rsp) +
+ HEADER_SIZE);
+ } else {
+ error = AS10X_CMD_ERROR;
+ }
+
+ if (error < 0)
+ goto out;
+
+ /* parse response */
+ error = as10x_rsp_parse(prsp, CONTROL_PROC_GETTPS_RSP);
+ if (error < 0)
+ goto out;
+
+ /* Response OK -> get response data */
+ ptps->constellation = prsp->body.get_tps.rsp.tps.constellation;
+ ptps->hierarchy = prsp->body.get_tps.rsp.tps.hierarchy;
+ ptps->interleaving_mode = prsp->body.get_tps.rsp.tps.interleaving_mode;
+ ptps->code_rate_HP = prsp->body.get_tps.rsp.tps.code_rate_HP;
+ ptps->code_rate_LP = prsp->body.get_tps.rsp.tps.code_rate_LP;
+ ptps->guard_interval = prsp->body.get_tps.rsp.tps.guard_interval;
+ ptps->transmission_mode = prsp->body.get_tps.rsp.tps.transmission_mode;
+ ptps->DVBH_mask_HP = prsp->body.get_tps.rsp.tps.DVBH_mask_HP;
+ ptps->DVBH_mask_LP = prsp->body.get_tps.rsp.tps.DVBH_mask_LP;
+ ptps->cell_ID = le16_to_cpu(prsp->body.get_tps.rsp.tps.cell_ID);
+
+out:
+ LEAVE();
+ return error;
+}
+
+/**
+ * as10x_cmd_get_demod_stats - send get demod stats command to AS10x
+ * @phandle: pointer to AS10x handle
+ * @pdemod_stats: pointer to demod stats parameters structure
+ *
+ * Return 0 on success or negative value in case of error.
+ */
+int as10x_cmd_get_demod_stats(as10x_handle_t *phandle,
+ struct as10x_demod_stats *pdemod_stats)
+{
+ int error;
+ struct as10x_cmd_t *pcmd, *prsp;
+
+ ENTER();
+
+ pcmd = phandle->cmd;
+ prsp = phandle->rsp;
+
+ /* prepare command */
+ as10x_cmd_build(pcmd, (++phandle->cmd_xid),
+ sizeof(pcmd->body.get_demod_stats.req));
+
+ /* fill command */
+ pcmd->body.get_demod_stats.req.proc_id =
+ cpu_to_le16(CONTROL_PROC_GET_DEMOD_STATS);
+
+ /* send command */
+ if (phandle->ops->xfer_cmd) {
+ error = phandle->ops->xfer_cmd(phandle,
+ (uint8_t *) pcmd,
+ sizeof(pcmd->body.get_demod_stats.req)
+ + HEADER_SIZE,
+ (uint8_t *) prsp,
+ sizeof(prsp->body.get_demod_stats.rsp)
+ + HEADER_SIZE);
+ } else {
+ error = AS10X_CMD_ERROR;
+ }
+
+ if (error < 0)
+ goto out;
+
+ /* parse response */
+ error = as10x_rsp_parse(prsp, CONTROL_PROC_GET_DEMOD_STATS_RSP);
+ if (error < 0)
+ goto out;
+
+ /* Response OK -> get response data */
+ pdemod_stats->frame_count =
+ le32_to_cpu(prsp->body.get_demod_stats.rsp.stats.frame_count);
+ pdemod_stats->bad_frame_count =
+ le32_to_cpu(prsp->body.get_demod_stats.rsp.stats.bad_frame_count);
+ pdemod_stats->bytes_fixed_by_rs =
+ le32_to_cpu(prsp->body.get_demod_stats.rsp.stats.bytes_fixed_by_rs);
+ pdemod_stats->mer =
+ le16_to_cpu(prsp->body.get_demod_stats.rsp.stats.mer);
+ pdemod_stats->has_started =
+ prsp->body.get_demod_stats.rsp.stats.has_started;
+
+out:
+ LEAVE();
+ return error;
+}
+
+/**
+ * as10x_cmd_get_impulse_resp - send get impulse response command to AS10x
+ * @phandle: pointer to AS10x handle
+ * @is_ready: pointer to value indicating when impulse
+ * response data is ready
+ *
+ * Return 0 on success or negative value in case of error.
+ */
+int as10x_cmd_get_impulse_resp(as10x_handle_t *phandle,
+ uint8_t *is_ready)
+{
+ int error;
+ struct as10x_cmd_t *pcmd, *prsp;
+
+ ENTER();
+
+ pcmd = phandle->cmd;
+ prsp = phandle->rsp;
+
+ /* prepare command */
+ as10x_cmd_build(pcmd, (++phandle->cmd_xid),
+ sizeof(pcmd->body.get_impulse_rsp.req));
+
+ /* fill command */
+ pcmd->body.get_impulse_rsp.req.proc_id =
+ cpu_to_le16(CONTROL_PROC_GET_IMPULSE_RESP);
+
+ /* send command */
+ if (phandle->ops->xfer_cmd) {
+ error = phandle->ops->xfer_cmd(phandle,
+ (uint8_t *) pcmd,
+ sizeof(pcmd->body.get_impulse_rsp.req)
+ + HEADER_SIZE,
+ (uint8_t *) prsp,
+ sizeof(prsp->body.get_impulse_rsp.rsp)
+ + HEADER_SIZE);
+ } else {
+ error = AS10X_CMD_ERROR;
+ }
+
+ if (error < 0)
+ goto out;
+
+ /* parse response */
+ error = as10x_rsp_parse(prsp, CONTROL_PROC_GET_IMPULSE_RESP_RSP);
+ if (error < 0)
+ goto out;
+
+ /* Response OK -> get response data */
+ *is_ready = prsp->body.get_impulse_rsp.rsp.is_ready;
+
+out:
+ LEAVE();
+ return error;
+}
+
+/**
+ * as10x_cmd_build - build AS10x command header
+ * @pcmd: pointer to AS10x command buffer
+ * @xid: sequence id of the command
+ * @cmd_len: length of the command
+ */
+void as10x_cmd_build(struct as10x_cmd_t *pcmd,
+ uint16_t xid, uint16_t cmd_len)
+{
+ pcmd->header.req_id = cpu_to_le16(xid);
+ pcmd->header.prog = cpu_to_le16(SERVICE_PROG_ID);
+ pcmd->header.version = cpu_to_le16(SERVICE_PROG_VERSION);
+ pcmd->header.data_len = cpu_to_le16(cmd_len);
+}
+
+/**
+ * as10x_rsp_parse - Parse command response
+ * @prsp: pointer to AS10x command buffer
+ * @proc_id: id of the command
+ *
+ * Return 0 on success or negative value in case of error.
+ */
+int as10x_rsp_parse(struct as10x_cmd_t *prsp, uint16_t proc_id)
+{
+ int error;
+
+ /* extract command error code */
+ error = prsp->body.common.rsp.error;
+
+ if ((error == 0) &&
+ (le16_to_cpu(prsp->body.common.rsp.proc_id) == proc_id)) {
+ return 0;
+ }
+
+ return AS10X_CMD_ERROR;
+}
diff --git a/drivers/staging/media/as102/as10x_cmd.h b/drivers/staging/media/as102/as10x_cmd.h
new file mode 100644
index 00000000000..01a716380e0
--- /dev/null
+++ b/drivers/staging/media/as102/as10x_cmd.h
@@ -0,0 +1,540 @@
+/*
+ * Abilis Systems Single DVB-T Receiver
+ * Copyright (C) 2008 Pierrick Hascoet <pierrick.hascoet@abilis.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#ifndef _AS10X_CMD_H_
+#define _AS10X_CMD_H_
+
+#ifdef __KERNEL__
+#include <linux/kernel.h>
+#endif
+
+#include "as10x_types.h"
+
+/*********************************/
+/* MACRO DEFINITIONS */
+/*********************************/
+#define AS10X_CMD_ERROR -1
+
+#define SERVICE_PROG_ID 0x0002
+#define SERVICE_PROG_VERSION 0x0001
+
+#define HIER_NONE 0x00
+#define HIER_LOW_PRIORITY 0x01
+
+#define HEADER_SIZE (sizeof(struct as10x_cmd_header_t))
+
+/* context request types */
+#define GET_CONTEXT_DATA 1
+#define SET_CONTEXT_DATA 2
+
+/* ODSP suspend modes */
+#define CFG_MODE_ODSP_RESUME 0
+#define CFG_MODE_ODSP_SUSPEND 1
+
+/* Dump memory size */
+#define DUMP_BLOCK_SIZE_MAX 0x20
+
+/*********************************/
+/* TYPE DEFINITION */
+/*********************************/
+typedef enum {
+ CONTROL_PROC_TURNON = 0x0001,
+ CONTROL_PROC_TURNON_RSP = 0x0100,
+ CONTROL_PROC_SET_REGISTER = 0x0002,
+ CONTROL_PROC_SET_REGISTER_RSP = 0x0200,
+ CONTROL_PROC_GET_REGISTER = 0x0003,
+ CONTROL_PROC_GET_REGISTER_RSP = 0x0300,
+ CONTROL_PROC_SETTUNE = 0x000A,
+ CONTROL_PROC_SETTUNE_RSP = 0x0A00,
+ CONTROL_PROC_GETTUNESTAT = 0x000B,
+ CONTROL_PROC_GETTUNESTAT_RSP = 0x0B00,
+ CONTROL_PROC_GETTPS = 0x000D,
+ CONTROL_PROC_GETTPS_RSP = 0x0D00,
+ CONTROL_PROC_SETFILTER = 0x000E,
+ CONTROL_PROC_SETFILTER_RSP = 0x0E00,
+ CONTROL_PROC_REMOVEFILTER = 0x000F,
+ CONTROL_PROC_REMOVEFILTER_RSP = 0x0F00,
+ CONTROL_PROC_GET_IMPULSE_RESP = 0x0012,
+ CONTROL_PROC_GET_IMPULSE_RESP_RSP = 0x1200,
+ CONTROL_PROC_START_STREAMING = 0x0013,
+ CONTROL_PROC_START_STREAMING_RSP = 0x1300,
+ CONTROL_PROC_STOP_STREAMING = 0x0014,
+ CONTROL_PROC_STOP_STREAMING_RSP = 0x1400,
+ CONTROL_PROC_GET_DEMOD_STATS = 0x0015,
+ CONTROL_PROC_GET_DEMOD_STATS_RSP = 0x1500,
+ CONTROL_PROC_ELNA_CHANGE_MODE = 0x0016,
+ CONTROL_PROC_ELNA_CHANGE_MODE_RSP = 0x1600,
+ CONTROL_PROC_ODSP_CHANGE_MODE = 0x0017,
+ CONTROL_PROC_ODSP_CHANGE_MODE_RSP = 0x1700,
+ CONTROL_PROC_AGC_CHANGE_MODE = 0x0018,
+ CONTROL_PROC_AGC_CHANGE_MODE_RSP = 0x1800,
+
+ CONTROL_PROC_CONTEXT = 0x00FC,
+ CONTROL_PROC_CONTEXT_RSP = 0xFC00,
+ CONTROL_PROC_DUMP_MEMORY = 0x00FD,
+ CONTROL_PROC_DUMP_MEMORY_RSP = 0xFD00,
+ CONTROL_PROC_DUMPLOG_MEMORY = 0x00FE,
+ CONTROL_PROC_DUMPLOG_MEMORY_RSP = 0xFE00,
+ CONTROL_PROC_TURNOFF = 0x00FF,
+ CONTROL_PROC_TURNOFF_RSP = 0xFF00
+} control_proc;
+
+
+#pragma pack(1)
+typedef union {
+ /* request */
+ struct {
+ /* request identifier */
+ uint16_t proc_id;
+ } req;
+ /* response */
+ struct {
+ /* response identifier */
+ uint16_t proc_id;
+ /* error */
+ uint8_t error;
+ } rsp;
+} TURN_ON;
+
+typedef union {
+ /* request */
+ struct {
+ /* request identifier */
+ uint16_t proc_id;
+ } req;
+ /* response */
+ struct {
+ /* response identifier */
+ uint16_t proc_id;
+ /* error */
+ uint8_t err;
+ } rsp;
+} TURN_OFF;
+
+typedef union {
+ /* request */
+ struct {
+ /* request identifier */
+ uint16_t proc_id;
+ /* tune params */
+ struct as10x_tune_args args;
+ } req;
+ /* response */
+ struct {
+ /* response identifier */
+ uint16_t proc_id;
+ /* response error */
+ uint8_t error;
+ } rsp;
+} SET_TUNE;
+
+typedef union {
+ /* request */
+ struct {
+ /* request identifier */
+ uint16_t proc_id;
+ } req;
+ /* response */
+ struct {
+ /* response identifier */
+ uint16_t proc_id;
+ /* response error */
+ uint8_t error;
+ /* tune status */
+ struct as10x_tune_status sts;
+ } rsp;
+} GET_TUNE_STATUS;
+
+typedef union {
+ /* request */
+ struct {
+ /* request identifier */
+ uint16_t proc_id;
+ } req;
+ /* response */
+ struct {
+ /* response identifier */
+ uint16_t proc_id;
+ /* response error */
+ uint8_t error;
+ /* tps details */
+ struct as10x_tps tps;
+ } rsp;
+} GET_TPS;
+
+typedef union {
+ /* request */
+ struct {
+ /* request identifier */
+ uint16_t proc_id;
+ } req;
+ /* response */
+ struct {
+ /* response identifier */
+ uint16_t proc_id;
+ /* response error */
+ uint8_t error;
+ } rsp;
+} COMMON;
+
+typedef union {
+ /* request */
+ struct {
+ /* request identifier */
+ uint16_t proc_id;
+ /* PID to filter */
+ uint16_t pid;
+ /* stream type (MPE, PSI/SI or PES )*/
+ uint8_t stream_type;
+ /* PID index in filter table */
+ uint8_t idx;
+ } req;
+ /* response */
+ struct {
+ /* response identifier */
+ uint16_t proc_id;
+ /* response error */
+ uint8_t error;
+ /* Filter id */
+ uint8_t filter_id;
+ } rsp;
+} ADD_PID_FILTER;
+
+typedef union {
+ /* request */
+ struct {
+ /* request identifier */
+ uint16_t proc_id;
+ /* PID to remove */
+ uint16_t pid;
+ } req;
+ /* response */
+ struct {
+ /* response identifier */
+ uint16_t proc_id;
+ /* response error */
+ uint8_t error;
+ } rsp;
+} DEL_PID_FILTER;
+
+typedef union {
+ /* request */
+ struct {
+ /* request identifier */
+ uint16_t proc_id;
+ } req;
+ /* response */
+ struct {
+ /* response identifier */
+ uint16_t proc_id;
+ /* error */
+ uint8_t error;
+ } rsp;
+} START_STREAMING;
+
+typedef union {
+ /* request */
+ struct {
+ /* request identifier */
+ uint16_t proc_id;
+ } req;
+ /* response */
+ struct {
+ /* response identifier */
+ uint16_t proc_id;
+ /* error */
+ uint8_t error;
+ } rsp;
+} STOP_STREAMING;
+
+typedef union {
+ /* request */
+ struct {
+ /* request identifier */
+ uint16_t proc_id;
+ } req;
+ /* response */
+ struct {
+ /* response identifier */
+ uint16_t proc_id;
+ /* error */
+ uint8_t error;
+ /* demod stats */
+ struct as10x_demod_stats stats;
+ } rsp;
+} GET_DEMOD_STATS;
+
+typedef union {
+ /* request */
+ struct {
+ /* request identifier */
+ uint16_t proc_id;
+ } req;
+ /* response */
+ struct {
+ /* response identifier */
+ uint16_t proc_id;
+ /* error */
+ uint8_t error;
+ /* impulse response ready */
+ uint8_t is_ready;
+ } rsp;
+} GET_IMPULSE_RESP;
+
+typedef union {
+ /* request */
+ struct {
+ /* request identifier */
+ uint16_t proc_id;
+ /* value to write (for set context)*/
+ struct as10x_register_value reg_val;
+ /* context tag */
+ uint16_t tag;
+ /* context request type */
+ uint16_t type;
+ } req;
+ /* response */
+ struct {
+ /* response identifier */
+ uint16_t proc_id;
+ /* value read (for get context) */
+ struct as10x_register_value reg_val;
+ /* context request type */
+ uint16_t type;
+ /* error */
+ uint8_t error;
+ } rsp;
+} FW_CONTEXT;
+
+typedef union {
+ /* request */
+ struct {
+ /* response identifier */
+ uint16_t proc_id;
+ /* register description */
+ struct as10x_register_addr reg_addr;
+ /* register content */
+ struct as10x_register_value reg_val;
+ } req;
+ /* response */
+ struct {
+ /* response identifier */
+ uint16_t proc_id;
+ /* error */
+ uint8_t error;
+ } rsp;
+} SET_REGISTER;
+
+typedef union {
+ /* request */
+ struct {
+ /* response identifier */
+ uint16_t proc_id;
+ /* register description */
+ struct as10x_register_addr reg_addr;
+ } req;
+ /* response */
+ struct {
+ /* response identifier */
+ uint16_t proc_id;
+ /* error */
+ uint8_t error;
+ /* register content */
+ struct as10x_register_value reg_val;
+ } rsp;
+} GET_REGISTER;
+
+typedef union {
+ /* request */
+ struct {
+ /* request identifier */
+ uint16_t proc_id;
+ /* mode */
+ uint8_t mode;
+ } req;
+ /* response */
+ struct {
+ /* response identifier */
+ uint16_t proc_id;
+ /* error */
+ uint8_t error;
+ } rsp;
+} CFG_CHANGE_MODE;
+
+struct as10x_cmd_header_t {
+ uint16_t req_id;
+ uint16_t prog;
+ uint16_t version;
+ uint16_t data_len;
+};
+
+#define DUMP_BLOCK_SIZE 16
+typedef union {
+ /* request */
+ struct {
+ /* request identifier */
+ uint16_t proc_id;
+ /* dump memory type request */
+ uint8_t dump_req;
+ /* register description */
+ struct as10x_register_addr reg_addr;
+ /* nb blocks to read */
+ uint16_t num_blocks;
+ } req;
+ /* response */
+ struct {
+ /* response identifier */
+ uint16_t proc_id;
+ /* error */
+ uint8_t error;
+ /* dump response */
+ uint8_t dump_rsp;
+ /* data */
+ union {
+ uint8_t data8[DUMP_BLOCK_SIZE];
+ uint16_t data16[DUMP_BLOCK_SIZE / sizeof(uint16_t)];
+ uint32_t data32[DUMP_BLOCK_SIZE / sizeof(uint32_t)];
+ } u;
+ } rsp;
+} DUMP_MEMORY;
+
+typedef union {
+ struct {
+ /* request identifier */
+ uint16_t proc_id;
+ /* dump memory type request */
+ uint8_t dump_req;
+ } req;
+ struct {
+ /* request identifier */
+ uint16_t proc_id;
+ /* error */
+ uint8_t error;
+ /* dump response */
+ uint8_t dump_rsp;
+ /* dump data */
+ uint8_t data[DUMP_BLOCK_SIZE];
+ } rsp;
+} DUMPLOG_MEMORY;
+
+typedef union {
+ /* request */
+ struct {
+ uint16_t proc_id;
+ uint8_t data[64 - sizeof(struct as10x_cmd_header_t) -2 /* proc_id */];
+ } req;
+ /* response */
+ struct {
+ uint16_t proc_id;
+ uint8_t error;
+ uint8_t data[64 - sizeof(struct as10x_cmd_header_t) /* header */
+ - 2 /* proc_id */ - 1 /* rc */];
+ } rsp;
+} RAW_DATA;
+
+struct as10x_cmd_t {
+ /* header */
+ struct as10x_cmd_header_t header;
+ /* body */
+ union {
+ TURN_ON turn_on;
+ TURN_OFF turn_off;
+ SET_TUNE set_tune;
+ GET_TUNE_STATUS get_tune_status;
+ GET_TPS get_tps;
+ COMMON common;
+ ADD_PID_FILTER add_pid_filter;
+ DEL_PID_FILTER del_pid_filter;
+ START_STREAMING start_streaming;
+ STOP_STREAMING stop_streaming;
+ GET_DEMOD_STATS get_demod_stats;
+ GET_IMPULSE_RESP get_impulse_rsp;
+ FW_CONTEXT context;
+ SET_REGISTER set_register;
+ GET_REGISTER get_register;
+ CFG_CHANGE_MODE cfg_change_mode;
+ DUMP_MEMORY dump_memory;
+ DUMPLOG_MEMORY dumplog_memory;
+ RAW_DATA raw_data;
+ } body;
+};
+
+struct as10x_token_cmd_t {
+ /* token cmd */
+ struct as10x_cmd_t c;
+ /* token response */
+ struct as10x_cmd_t r;
+};
+#pragma pack()
+
+
+/**************************/
+/* FUNCTION DECLARATION */
+/**************************/
+
+void as10x_cmd_build(struct as10x_cmd_t *pcmd, uint16_t proc_id,
+ uint16_t cmd_len);
+int as10x_rsp_parse(struct as10x_cmd_t *r, uint16_t proc_id);
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* as10x cmd */
+int as10x_cmd_turn_on(as10x_handle_t *phandle);
+int as10x_cmd_turn_off(as10x_handle_t *phandle);
+
+int as10x_cmd_set_tune(as10x_handle_t *phandle,
+ struct as10x_tune_args *ptune);
+
+int as10x_cmd_get_tune_status(as10x_handle_t *phandle,
+ struct as10x_tune_status *pstatus);
+
+int as10x_cmd_get_tps(as10x_handle_t *phandle,
+ struct as10x_tps *ptps);
+
+int as10x_cmd_get_demod_stats(as10x_handle_t *phandle,
+ struct as10x_demod_stats *pdemod_stats);
+
+int as10x_cmd_get_impulse_resp(as10x_handle_t *phandle,
+ uint8_t *is_ready);
+
+/* as10x cmd stream */
+int as10x_cmd_add_PID_filter(as10x_handle_t *phandle,
+ struct as10x_ts_filter *filter);
+int as10x_cmd_del_PID_filter(as10x_handle_t *phandle,
+ uint16_t pid_value);
+
+int as10x_cmd_start_streaming(as10x_handle_t *phandle);
+int as10x_cmd_stop_streaming(as10x_handle_t *phandle);
+
+/* as10x cmd cfg */
+int as10x_cmd_set_context(as10x_handle_t *phandle,
+ uint16_t tag,
+ uint32_t value);
+int as10x_cmd_get_context(as10x_handle_t *phandle,
+ uint16_t tag,
+ uint32_t *pvalue);
+
+int as10x_cmd_eLNA_change_mode(as10x_handle_t *phandle, uint8_t mode);
+int as10x_context_rsp_parse(struct as10x_cmd_t *prsp, uint16_t proc_id);
+#ifdef __cplusplus
+}
+#endif
+#endif
+/* EOF - vim: set textwidth=80 ts=3 sw=3 sts=3 et: */
diff --git a/drivers/staging/media/as102/as10x_cmd_cfg.c b/drivers/staging/media/as102/as10x_cmd_cfg.c
new file mode 100644
index 00000000000..ec6f69fcf39
--- /dev/null
+++ b/drivers/staging/media/as102/as10x_cmd_cfg.c
@@ -0,0 +1,215 @@
+/*
+ * Abilis Systems Single DVB-T Receiver
+ * Copyright (C) 2008 Pierrick Hascoet <pierrick.hascoet@abilis.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/kernel.h>
+#include "as102_drv.h"
+#include "as10x_types.h"
+#include "as10x_cmd.h"
+
+/***************************/
+/* FUNCTION DEFINITION */
+/***************************/
+
+/**
+ * as10x_cmd_get_context - Send get context command to AS10x
+ * @phandle: pointer to AS10x handle
+ * @tag: context tag
+ * @pvalue: pointer where to store context value read
+ *
+ * Return 0 on success or negative value in case of error.
+ */
+int as10x_cmd_get_context(as10x_handle_t *phandle, uint16_t tag,
+ uint32_t *pvalue)
+{
+ int error;
+ struct as10x_cmd_t *pcmd, *prsp;
+
+ ENTER();
+
+ pcmd = phandle->cmd;
+ prsp = phandle->rsp;
+
+ /* prepare command */
+ as10x_cmd_build(pcmd, (++phandle->cmd_xid),
+ sizeof(pcmd->body.context.req));
+
+ /* fill command */
+ pcmd->body.context.req.proc_id = cpu_to_le16(CONTROL_PROC_CONTEXT);
+ pcmd->body.context.req.tag = cpu_to_le16(tag);
+ pcmd->body.context.req.type = cpu_to_le16(GET_CONTEXT_DATA);
+
+ /* send command */
+ if (phandle->ops->xfer_cmd) {
+ error = phandle->ops->xfer_cmd(phandle,
+ (uint8_t *) pcmd,
+ sizeof(pcmd->body.context.req)
+ + HEADER_SIZE,
+ (uint8_t *) prsp,
+ sizeof(prsp->body.context.rsp)
+ + HEADER_SIZE);
+ } else {
+ error = AS10X_CMD_ERROR;
+ }
+
+ if (error < 0)
+ goto out;
+
+ /* parse response: context command do not follow the common response */
+ /* structure -> specific handling response parse required */
+ error = as10x_context_rsp_parse(prsp, CONTROL_PROC_CONTEXT_RSP);
+
+ if (error == 0) {
+ /* Response OK -> get response data */
+ *pvalue = le32_to_cpu(prsp->body.context.rsp.reg_val.u.value32);
+ /* value returned is always a 32-bit value */
+ }
+
+out:
+ LEAVE();
+ return error;
+}
+
+/**
+ * as10x_cmd_set_context - send set context command to AS10x
+ * @phandle: pointer to AS10x handle
+ * @tag: context tag
+ * @value: value to set in context
+ *
+ * Return 0 on success or negative value in case of error.
+ */
+int as10x_cmd_set_context(as10x_handle_t *phandle, uint16_t tag,
+ uint32_t value)
+{
+ int error;
+ struct as10x_cmd_t *pcmd, *prsp;
+
+ ENTER();
+
+ pcmd = phandle->cmd;
+ prsp = phandle->rsp;
+
+ /* prepare command */
+ as10x_cmd_build(pcmd, (++phandle->cmd_xid),
+ sizeof(pcmd->body.context.req));
+
+ /* fill command */
+ pcmd->body.context.req.proc_id = cpu_to_le16(CONTROL_PROC_CONTEXT);
+ /* pcmd->body.context.req.reg_val.mode initialization is not required */
+ pcmd->body.context.req.reg_val.u.value32 = cpu_to_le32(value);
+ pcmd->body.context.req.tag = cpu_to_le16(tag);
+ pcmd->body.context.req.type = cpu_to_le16(SET_CONTEXT_DATA);
+
+ /* send command */
+ if (phandle->ops->xfer_cmd) {
+ error = phandle->ops->xfer_cmd(phandle,
+ (uint8_t *) pcmd,
+ sizeof(pcmd->body.context.req)
+ + HEADER_SIZE,
+ (uint8_t *) prsp,
+ sizeof(prsp->body.context.rsp)
+ + HEADER_SIZE);
+ } else {
+ error = AS10X_CMD_ERROR;
+ }
+
+ if (error < 0)
+ goto out;
+
+ /* parse response: context command do not follow the common response */
+ /* structure -> specific handling response parse required */
+ error = as10x_context_rsp_parse(prsp, CONTROL_PROC_CONTEXT_RSP);
+
+out:
+ LEAVE();
+ return error;
+}
+
+/**
+ * as10x_cmd_eLNA_change_mode - send eLNA change mode command to AS10x
+ * @phandle: pointer to AS10x handle
+ * @mode: mode selected:
+ * - ON : 0x0 => eLNA always ON
+ * - OFF : 0x1 => eLNA always OFF
+ * - AUTO : 0x2 => eLNA follow hysteresis parameters
+ * to be ON or OFF
+ *
+ * Return 0 on success or negative value in case of error.
+ */
+int as10x_cmd_eLNA_change_mode(as10x_handle_t *phandle, uint8_t mode)
+{
+ int error;
+ struct as10x_cmd_t *pcmd, *prsp;
+
+ ENTER();
+
+ pcmd = phandle->cmd;
+ prsp = phandle->rsp;
+
+ /* prepare command */
+ as10x_cmd_build(pcmd, (++phandle->cmd_xid),
+ sizeof(pcmd->body.cfg_change_mode.req));
+
+ /* fill command */
+ pcmd->body.cfg_change_mode.req.proc_id =
+ cpu_to_le16(CONTROL_PROC_ELNA_CHANGE_MODE);
+ pcmd->body.cfg_change_mode.req.mode = mode;
+
+ /* send command */
+ if (phandle->ops->xfer_cmd) {
+ error = phandle->ops->xfer_cmd(phandle, (uint8_t *) pcmd,
+ sizeof(pcmd->body.cfg_change_mode.req)
+ + HEADER_SIZE, (uint8_t *) prsp,
+ sizeof(prsp->body.cfg_change_mode.rsp)
+ + HEADER_SIZE);
+ } else {
+ error = AS10X_CMD_ERROR;
+ }
+
+ if (error < 0)
+ goto out;
+
+ /* parse response */
+ error = as10x_rsp_parse(prsp, CONTROL_PROC_ELNA_CHANGE_MODE_RSP);
+
+out:
+ LEAVE();
+ return error;
+}
+
+/**
+ * as10x_context_rsp_parse - Parse context command response
+ * @prsp: pointer to AS10x command response buffer
+ * @proc_id: id of the command
+ *
+ * Since the contex command reponse does not follow the common
+ * response, a specific parse function is required.
+ * Return 0 on success or negative value in case of error.
+ */
+int as10x_context_rsp_parse(struct as10x_cmd_t *prsp, uint16_t proc_id)
+{
+ int err;
+
+ err = prsp->body.context.rsp.error;
+
+ if ((err == 0) &&
+ (le16_to_cpu(prsp->body.context.rsp.proc_id) == proc_id)) {
+ return 0;
+ }
+ return AS10X_CMD_ERROR;
+}
diff --git a/drivers/staging/media/as102/as10x_cmd_stream.c b/drivers/staging/media/as102/as10x_cmd_stream.c
new file mode 100644
index 00000000000..045c7068319
--- /dev/null
+++ b/drivers/staging/media/as102/as10x_cmd_stream.c
@@ -0,0 +1,223 @@
+/*
+ * Abilis Systems Single DVB-T Receiver
+ * Copyright (C) 2008 Pierrick Hascoet <pierrick.hascoet@abilis.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/kernel.h>
+#include "as102_drv.h"
+#include "as10x_cmd.h"
+
+/**
+ * as10x_cmd_add_PID_filter - send add filter command to AS10x
+ * @phandle: pointer to AS10x handle
+ * @filter: TSFilter filter for DVB-T
+ *
+ * Return 0 on success or negative value in case of error.
+ */
+int as10x_cmd_add_PID_filter(as10x_handle_t *phandle,
+ struct as10x_ts_filter *filter)
+{
+ int error;
+ struct as10x_cmd_t *pcmd, *prsp;
+
+ ENTER();
+
+ pcmd = phandle->cmd;
+ prsp = phandle->rsp;
+
+ /* prepare command */
+ as10x_cmd_build(pcmd, (++phandle->cmd_xid),
+ sizeof(pcmd->body.add_pid_filter.req));
+
+ /* fill command */
+ pcmd->body.add_pid_filter.req.proc_id =
+ cpu_to_le16(CONTROL_PROC_SETFILTER);
+ pcmd->body.add_pid_filter.req.pid = cpu_to_le16(filter->pid);
+ pcmd->body.add_pid_filter.req.stream_type = filter->type;
+
+ if (filter->idx < 16)
+ pcmd->body.add_pid_filter.req.idx = filter->idx;
+ else
+ pcmd->body.add_pid_filter.req.idx = 0xFF;
+
+ /* send command */
+ if (phandle->ops->xfer_cmd) {
+ error = phandle->ops->xfer_cmd(phandle, (uint8_t *) pcmd,
+ sizeof(pcmd->body.add_pid_filter.req)
+ + HEADER_SIZE, (uint8_t *) prsp,
+ sizeof(prsp->body.add_pid_filter.rsp)
+ + HEADER_SIZE);
+ } else {
+ error = AS10X_CMD_ERROR;
+ }
+
+ if (error < 0)
+ goto out;
+
+ /* parse response */
+ error = as10x_rsp_parse(prsp, CONTROL_PROC_SETFILTER_RSP);
+
+ if (error == 0) {
+ /* Response OK -> get response data */
+ filter->idx = prsp->body.add_pid_filter.rsp.filter_id;
+ }
+
+out:
+ LEAVE();
+ return error;
+}
+
+/**
+ * as10x_cmd_del_PID_filter - Send delete filter command to AS10x
+ * @phandle: pointer to AS10x handle
+ * @pid_value: PID to delete
+ *
+ * Return 0 on success or negative value in case of error.
+ */
+int as10x_cmd_del_PID_filter(as10x_handle_t *phandle,
+ uint16_t pid_value)
+{
+ int error;
+ struct as10x_cmd_t *pcmd, *prsp;
+
+ ENTER();
+
+ pcmd = phandle->cmd;
+ prsp = phandle->rsp;
+
+ /* prepare command */
+ as10x_cmd_build(pcmd, (++phandle->cmd_xid),
+ sizeof(pcmd->body.del_pid_filter.req));
+
+ /* fill command */
+ pcmd->body.del_pid_filter.req.proc_id =
+ cpu_to_le16(CONTROL_PROC_REMOVEFILTER);
+ pcmd->body.del_pid_filter.req.pid = cpu_to_le16(pid_value);
+
+ /* send command */
+ if (phandle->ops->xfer_cmd) {
+ error = phandle->ops->xfer_cmd(phandle, (uint8_t *) pcmd,
+ sizeof(pcmd->body.del_pid_filter.req)
+ + HEADER_SIZE, (uint8_t *) prsp,
+ sizeof(prsp->body.del_pid_filter.rsp)
+ + HEADER_SIZE);
+ } else {
+ error = AS10X_CMD_ERROR;
+ }
+
+ if (error < 0)
+ goto out;
+
+ /* parse response */
+ error = as10x_rsp_parse(prsp, CONTROL_PROC_REMOVEFILTER_RSP);
+
+out:
+ LEAVE();
+ return error;
+}
+
+/**
+ * as10x_cmd_start_streaming - Send start streaming command to AS10x
+ * @phandle: pointer to AS10x handle
+ *
+ * Return 0 on success or negative value in case of error.
+ */
+int as10x_cmd_start_streaming(as10x_handle_t *phandle)
+{
+ int error;
+ struct as10x_cmd_t *pcmd, *prsp;
+
+ ENTER();
+
+ pcmd = phandle->cmd;
+ prsp = phandle->rsp;
+
+ /* prepare command */
+ as10x_cmd_build(pcmd, (++phandle->cmd_xid),
+ sizeof(pcmd->body.start_streaming.req));
+
+ /* fill command */
+ pcmd->body.start_streaming.req.proc_id =
+ cpu_to_le16(CONTROL_PROC_START_STREAMING);
+
+ /* send command */
+ if (phandle->ops->xfer_cmd) {
+ error = phandle->ops->xfer_cmd(phandle, (uint8_t *) pcmd,
+ sizeof(pcmd->body.start_streaming.req)
+ + HEADER_SIZE, (uint8_t *) prsp,
+ sizeof(prsp->body.start_streaming.rsp)
+ + HEADER_SIZE);
+ } else {
+ error = AS10X_CMD_ERROR;
+ }
+
+ if (error < 0)
+ goto out;
+
+ /* parse response */
+ error = as10x_rsp_parse(prsp, CONTROL_PROC_START_STREAMING_RSP);
+
+out:
+ LEAVE();
+ return error;
+}
+
+/**
+ * as10x_cmd_stop_streaming - Send stop streaming command to AS10x
+ * @phandle: pointer to AS10x handle
+ *
+ * Return 0 on success or negative value in case of error.
+ */
+int as10x_cmd_stop_streaming(as10x_handle_t *phandle)
+{
+ int8_t error;
+ struct as10x_cmd_t *pcmd, *prsp;
+
+ ENTER();
+
+ pcmd = phandle->cmd;
+ prsp = phandle->rsp;
+
+ /* prepare command */
+ as10x_cmd_build(pcmd, (++phandle->cmd_xid),
+ sizeof(pcmd->body.stop_streaming.req));
+
+ /* fill command */
+ pcmd->body.stop_streaming.req.proc_id =
+ cpu_to_le16(CONTROL_PROC_STOP_STREAMING);
+
+ /* send command */
+ if (phandle->ops->xfer_cmd) {
+ error = phandle->ops->xfer_cmd(phandle, (uint8_t *) pcmd,
+ sizeof(pcmd->body.stop_streaming.req)
+ + HEADER_SIZE, (uint8_t *) prsp,
+ sizeof(prsp->body.stop_streaming.rsp)
+ + HEADER_SIZE);
+ } else {
+ error = AS10X_CMD_ERROR;
+ }
+
+ if (error < 0)
+ goto out;
+
+ /* parse response */
+ error = as10x_rsp_parse(prsp, CONTROL_PROC_STOP_STREAMING_RSP);
+
+out:
+ LEAVE();
+ return error;
+}
diff --git a/drivers/staging/media/as102/as10x_handle.h b/drivers/staging/media/as102/as10x_handle.h
new file mode 100644
index 00000000000..4f01a76e982
--- /dev/null
+++ b/drivers/staging/media/as102/as10x_handle.h
@@ -0,0 +1,58 @@
+/*
+ * Abilis Systems Single DVB-T Receiver
+ * Copyright (C) 2008 Pierrick Hascoet <pierrick.hascoet@abilis.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#ifdef __KERNEL__
+struct as102_bus_adapter_t;
+struct as102_dev_t;
+
+#define as10x_handle_t struct as102_bus_adapter_t
+#include "as10x_cmd.h"
+
+/* values for "mode" field */
+#define REGMODE8 8
+#define REGMODE16 16
+#define REGMODE32 32
+
+struct as102_priv_ops_t {
+ int (*upload_fw_pkt) (struct as102_bus_adapter_t *bus_adap,
+ unsigned char *buf, int buflen, int swap32);
+
+ int (*send_cmd) (struct as102_bus_adapter_t *bus_adap,
+ unsigned char *buf, int buflen);
+
+ int (*xfer_cmd) (struct as102_bus_adapter_t *bus_adap,
+ unsigned char *send_buf, int send_buf_len,
+ unsigned char *recv_buf, int recv_buf_len);
+/*
+ int (*pid_filter) (struct as102_bus_adapter_t *bus_adap,
+ int index, u16 pid, int onoff);
+*/
+ int (*start_stream) (struct as102_dev_t *dev);
+ void (*stop_stream) (struct as102_dev_t *dev);
+
+ int (*reset_target) (struct as102_bus_adapter_t *bus_adap);
+
+ int (*read_write)(struct as102_bus_adapter_t *bus_adap, uint8_t mode,
+ uint32_t rd_addr, uint16_t rd_len,
+ uint32_t wr_addr, uint16_t wr_len);
+
+ int (*as102_read_ep2) (struct as102_bus_adapter_t *bus_adap,
+ unsigned char *recv_buf,
+ int recv_buf_len);
+};
+#endif
diff --git a/drivers/staging/media/as102/as10x_types.h b/drivers/staging/media/as102/as10x_types.h
new file mode 100644
index 00000000000..3dedb3c1420
--- /dev/null
+++ b/drivers/staging/media/as102/as10x_types.h
@@ -0,0 +1,198 @@
+/*
+ * Abilis Systems Single DVB-T Receiver
+ * Copyright (C) 2008 Pierrick Hascoet <pierrick.hascoet@abilis.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#ifndef _AS10X_TYPES_H_
+#define _AS10X_TYPES_H_
+
+#include "as10x_handle.h"
+
+/*********************************/
+/* MACRO DEFINITIONS */
+/*********************************/
+
+/* bandwidth constant values */
+#define BW_5_MHZ 0x00
+#define BW_6_MHZ 0x01
+#define BW_7_MHZ 0x02
+#define BW_8_MHZ 0x03
+
+/* hierarchy priority selection values */
+#define HIER_NO_PRIORITY 0x00
+#define HIER_LOW_PRIORITY 0x01
+#define HIER_HIGH_PRIORITY 0x02
+
+/* constellation available values */
+#define CONST_QPSK 0x00
+#define CONST_QAM16 0x01
+#define CONST_QAM64 0x02
+#define CONST_UNKNOWN 0xFF
+
+/* hierarchy available values */
+#define HIER_NONE 0x00
+#define HIER_ALPHA_1 0x01
+#define HIER_ALPHA_2 0x02
+#define HIER_ALPHA_4 0x03
+#define HIER_UNKNOWN 0xFF
+
+/* interleaving available values */
+#define INTLV_NATIVE 0x00
+#define INTLV_IN_DEPTH 0x01
+#define INTLV_UNKNOWN 0xFF
+
+/* code rate available values */
+#define CODE_RATE_1_2 0x00
+#define CODE_RATE_2_3 0x01
+#define CODE_RATE_3_4 0x02
+#define CODE_RATE_5_6 0x03
+#define CODE_RATE_7_8 0x04
+#define CODE_RATE_UNKNOWN 0xFF
+
+/* guard interval available values */
+#define GUARD_INT_1_32 0x00
+#define GUARD_INT_1_16 0x01
+#define GUARD_INT_1_8 0x02
+#define GUARD_INT_1_4 0x03
+#define GUARD_UNKNOWN 0xFF
+
+/* transmission mode available values */
+#define TRANS_MODE_2K 0x00
+#define TRANS_MODE_8K 0x01
+#define TRANS_MODE_4K 0x02
+#define TRANS_MODE_UNKNOWN 0xFF
+
+/* DVBH signalling available values */
+#define TIMESLICING_PRESENT 0x01
+#define MPE_FEC_PRESENT 0x02
+
+/* tune state available */
+#define TUNE_STATUS_NOT_TUNED 0x00
+#define TUNE_STATUS_IDLE 0x01
+#define TUNE_STATUS_LOCKING 0x02
+#define TUNE_STATUS_SIGNAL_DVB_OK 0x03
+#define TUNE_STATUS_STREAM_DETECTED 0x04
+#define TUNE_STATUS_STREAM_TUNED 0x05
+#define TUNE_STATUS_ERROR 0xFF
+
+/* available TS FID filter types */
+#define TS_PID_TYPE_TS 0
+#define TS_PID_TYPE_PSI_SI 1
+#define TS_PID_TYPE_MPE 2
+
+/* number of echos available */
+#define MAX_ECHOS 15
+
+/* Context types */
+#define CONTEXT_LNA 1010
+#define CONTEXT_ELNA_HYSTERESIS 4003
+#define CONTEXT_ELNA_GAIN 4004
+#define CONTEXT_MER_THRESHOLD 5005
+#define CONTEXT_MER_OFFSET 5006
+#define CONTEXT_IR_STATE 7000
+#define CONTEXT_TSOUT_MSB_FIRST 7004
+#define CONTEXT_TSOUT_FALLING_EDGE 7005
+
+/* Configuration modes */
+#define CFG_MODE_ON 0
+#define CFG_MODE_OFF 1
+#define CFG_MODE_AUTO 2
+
+#pragma pack(1)
+struct as10x_tps {
+ uint8_t constellation;
+ uint8_t hierarchy;
+ uint8_t interleaving_mode;
+ uint8_t code_rate_HP;
+ uint8_t code_rate_LP;
+ uint8_t guard_interval;
+ uint8_t transmission_mode;
+ uint8_t DVBH_mask_HP;
+ uint8_t DVBH_mask_LP;
+ uint16_t cell_ID;
+};
+
+struct as10x_tune_args {
+ /* frequency */
+ uint32_t freq;
+ /* bandwidth */
+ uint8_t bandwidth;
+ /* hierarchy selection */
+ uint8_t hier_select;
+ /* constellation */
+ uint8_t constellation;
+ /* hierarchy */
+ uint8_t hierarchy;
+ /* interleaving mode */
+ uint8_t interleaving_mode;
+ /* code rate */
+ uint8_t code_rate;
+ /* guard interval */
+ uint8_t guard_interval;
+ /* transmission mode */
+ uint8_t transmission_mode;
+};
+
+struct as10x_tune_status {
+ /* tune status */
+ uint8_t tune_state;
+ /* signal strength */
+ int16_t signal_strength;
+ /* packet error rate 10^-4 */
+ uint16_t PER;
+ /* bit error rate 10^-4 */
+ uint16_t BER;
+};
+
+struct as10x_demod_stats {
+ /* frame counter */
+ uint32_t frame_count;
+ /* Bad frame counter */
+ uint32_t bad_frame_count;
+ /* Number of wrong bytes fixed by Reed-Solomon */
+ uint32_t bytes_fixed_by_rs;
+ /* Averaged MER */
+ uint16_t mer;
+ /* statistics calculation state indicator (started or not) */
+ uint8_t has_started;
+};
+
+struct as10x_ts_filter {
+ uint16_t pid; /** valid PID value 0x00 : 0x2000 */
+ uint8_t type; /** Red TS_PID_TYPE_<N> values */
+ uint8_t idx; /** index in filtering table */
+};
+
+struct as10x_register_value {
+ uint8_t mode;
+ union {
+ uint8_t value8; /* 8 bit value */
+ uint16_t value16; /* 16 bit value */
+ uint32_t value32; /* 32 bit value */
+ }u;
+};
+
+#pragma pack()
+
+struct as10x_register_addr {
+ /* register addr */
+ uint32_t addr;
+ /* register mode access */
+ uint8_t mode;
+};
+
+
+#endif
diff --git a/drivers/staging/cxd2099/Kconfig b/drivers/staging/media/cxd2099/Kconfig
index b48aefddc84..b48aefddc84 100644
--- a/drivers/staging/cxd2099/Kconfig
+++ b/drivers/staging/media/cxd2099/Kconfig
diff --git a/drivers/staging/cxd2099/Makefile b/drivers/staging/media/cxd2099/Makefile
index 64cfc77be35..64cfc77be35 100644
--- a/drivers/staging/cxd2099/Makefile
+++ b/drivers/staging/media/cxd2099/Makefile
diff --git a/drivers/staging/cxd2099/TODO b/drivers/staging/media/cxd2099/TODO
index 375bb6f8ee2..375bb6f8ee2 100644
--- a/drivers/staging/cxd2099/TODO
+++ b/drivers/staging/media/cxd2099/TODO
diff --git a/drivers/staging/cxd2099/cxd2099.c b/drivers/staging/media/cxd2099/cxd2099.c
index 1c04185bcfd..1c04185bcfd 100644
--- a/drivers/staging/cxd2099/cxd2099.c
+++ b/drivers/staging/media/cxd2099/cxd2099.c
diff --git a/drivers/staging/cxd2099/cxd2099.h b/drivers/staging/media/cxd2099/cxd2099.h
index 19c588a5958..19c588a5958 100644
--- a/drivers/staging/cxd2099/cxd2099.h
+++ b/drivers/staging/media/cxd2099/cxd2099.h
diff --git a/drivers/staging/dt3155v4l/Kconfig b/drivers/staging/media/dt3155v4l/Kconfig
index 226a1ca90b3..226a1ca90b3 100644
--- a/drivers/staging/dt3155v4l/Kconfig
+++ b/drivers/staging/media/dt3155v4l/Kconfig
diff --git a/drivers/staging/dt3155v4l/Makefile b/drivers/staging/media/dt3155v4l/Makefile
index ce7a3ec2faf..ce7a3ec2faf 100644
--- a/drivers/staging/dt3155v4l/Makefile
+++ b/drivers/staging/media/dt3155v4l/Makefile
diff --git a/drivers/staging/dt3155v4l/dt3155v4l.c b/drivers/staging/media/dt3155v4l/dt3155v4l.c
index 5b212dc725e..04e93c49f03 100644
--- a/drivers/staging/dt3155v4l/dt3155v4l.c
+++ b/drivers/staging/media/dt3155v4l/dt3155v4l.c
@@ -193,7 +193,7 @@ dt3155_start_acq(struct dt3155_priv *pd)
struct vb2_buffer *vb = pd->curr_buf;
dma_addr_t dma_addr;
- dma_addr = vb2_dma_contig_plane_paddr(vb, 0);
+ dma_addr = vb2_dma_contig_plane_dma_addr(vb, 0);
iowrite32(dma_addr, pd->regs + EVEN_DMA_START);
iowrite32(dma_addr + img_width, pd->regs + ODD_DMA_START);
iowrite32(img_width, pd->regs + EVEN_DMA_STRIDE);
@@ -357,7 +357,7 @@ dt3155_irq_handler_even(int irq, void *dev_id)
ivb = list_first_entry(&ipd->dmaq, typeof(*ivb), done_entry);
list_del(&ivb->done_entry);
ipd->curr_buf = ivb;
- dma_addr = vb2_dma_contig_plane_paddr(ivb, 0);
+ dma_addr = vb2_dma_contig_plane_dma_addr(ivb, 0);
iowrite32(dma_addr, ipd->regs + EVEN_DMA_START);
iowrite32(dma_addr + img_width, ipd->regs + ODD_DMA_START);
iowrite32(img_width, ipd->regs + EVEN_DMA_STRIDE);
diff --git a/drivers/staging/dt3155v4l/dt3155v4l.h b/drivers/staging/media/dt3155v4l/dt3155v4l.h
index 2e4f89d402e..2e4f89d402e 100644
--- a/drivers/staging/dt3155v4l/dt3155v4l.h
+++ b/drivers/staging/media/dt3155v4l/dt3155v4l.h
diff --git a/drivers/staging/easycap/Kconfig b/drivers/staging/media/easycap/Kconfig
index a425a6f9cdc..a425a6f9cdc 100644
--- a/drivers/staging/easycap/Kconfig
+++ b/drivers/staging/media/easycap/Kconfig
diff --git a/drivers/staging/easycap/Makefile b/drivers/staging/media/easycap/Makefile
index a34e75f59c1..a34e75f59c1 100644
--- a/drivers/staging/easycap/Makefile
+++ b/drivers/staging/media/easycap/Makefile
diff --git a/drivers/staging/easycap/README b/drivers/staging/media/easycap/README
index 796b032384b..796b032384b 100644
--- a/drivers/staging/easycap/README
+++ b/drivers/staging/media/easycap/README
diff --git a/drivers/staging/easycap/easycap.h b/drivers/staging/media/easycap/easycap.h
index 7b256a948c2..7b256a948c2 100644
--- a/drivers/staging/easycap/easycap.h
+++ b/drivers/staging/media/easycap/easycap.h
diff --git a/drivers/staging/easycap/easycap_ioctl.c b/drivers/staging/media/easycap/easycap_ioctl.c
index c99addfb624..c99addfb624 100644
--- a/drivers/staging/easycap/easycap_ioctl.c
+++ b/drivers/staging/media/easycap/easycap_ioctl.c
diff --git a/drivers/staging/easycap/easycap_low.c b/drivers/staging/media/easycap/easycap_low.c
index 0385735ac6d..0385735ac6d 100644
--- a/drivers/staging/easycap/easycap_low.c
+++ b/drivers/staging/media/easycap/easycap_low.c
diff --git a/drivers/staging/easycap/easycap_main.c b/drivers/staging/media/easycap/easycap_main.c
index a45c0b50706..a45c0b50706 100644
--- a/drivers/staging/easycap/easycap_main.c
+++ b/drivers/staging/media/easycap/easycap_main.c
diff --git a/drivers/staging/easycap/easycap_settings.c b/drivers/staging/media/easycap/easycap_settings.c
index 70f59b13c34..70f59b13c34 100644
--- a/drivers/staging/easycap/easycap_settings.c
+++ b/drivers/staging/media/easycap/easycap_settings.c
diff --git a/drivers/staging/easycap/easycap_sound.c b/drivers/staging/media/easycap/easycap_sound.c
index b22bb39b5f6..b22bb39b5f6 100644
--- a/drivers/staging/easycap/easycap_sound.c
+++ b/drivers/staging/media/easycap/easycap_sound.c
diff --git a/drivers/staging/easycap/easycap_testcard.c b/drivers/staging/media/easycap/easycap_testcard.c
index 0f71470ace3..0f71470ace3 100644
--- a/drivers/staging/easycap/easycap_testcard.c
+++ b/drivers/staging/media/easycap/easycap_testcard.c
diff --git a/drivers/staging/go7007/Kconfig b/drivers/staging/media/go7007/Kconfig
index 7dfb2815b9e..7dfb2815b9e 100644
--- a/drivers/staging/go7007/Kconfig
+++ b/drivers/staging/media/go7007/Kconfig
diff --git a/drivers/staging/go7007/Makefile b/drivers/staging/media/go7007/Makefile
index 6ee837c5670..6ee837c5670 100644
--- a/drivers/staging/go7007/Makefile
+++ b/drivers/staging/media/go7007/Makefile
diff --git a/drivers/staging/go7007/README b/drivers/staging/media/go7007/README
index 48f44763781..48f44763781 100644
--- a/drivers/staging/go7007/README
+++ b/drivers/staging/media/go7007/README
diff --git a/drivers/staging/go7007/go7007-driver.c b/drivers/staging/media/go7007/go7007-driver.c
index 6c9279a6d60..6c9279a6d60 100644
--- a/drivers/staging/go7007/go7007-driver.c
+++ b/drivers/staging/media/go7007/go7007-driver.c
diff --git a/drivers/staging/go7007/go7007-fw.c b/drivers/staging/media/go7007/go7007-fw.c
index c9a6409edfe..c9a6409edfe 100644
--- a/drivers/staging/go7007/go7007-fw.c
+++ b/drivers/staging/media/go7007/go7007-fw.c
diff --git a/drivers/staging/go7007/go7007-i2c.c b/drivers/staging/media/go7007/go7007-i2c.c
index b8cfa1a6eae..b8cfa1a6eae 100644
--- a/drivers/staging/go7007/go7007-i2c.c
+++ b/drivers/staging/media/go7007/go7007-i2c.c
diff --git a/drivers/staging/go7007/go7007-priv.h b/drivers/staging/media/go7007/go7007-priv.h
index b58c394c655..b58c394c655 100644
--- a/drivers/staging/go7007/go7007-priv.h
+++ b/drivers/staging/media/go7007/go7007-priv.h
diff --git a/drivers/staging/go7007/go7007-usb.c b/drivers/staging/media/go7007/go7007-usb.c
index 3db3b0a91cc..3db3b0a91cc 100644
--- a/drivers/staging/go7007/go7007-usb.c
+++ b/drivers/staging/media/go7007/go7007-usb.c
diff --git a/drivers/staging/go7007/go7007-v4l2.c b/drivers/staging/media/go7007/go7007-v4l2.c
index 2b27d8da70a..2b27d8da70a 100644
--- a/drivers/staging/go7007/go7007-v4l2.c
+++ b/drivers/staging/media/go7007/go7007-v4l2.c
diff --git a/drivers/staging/go7007/go7007.h b/drivers/staging/media/go7007/go7007.h
index 7399c915a93..7399c915a93 100644
--- a/drivers/staging/go7007/go7007.h
+++ b/drivers/staging/media/go7007/go7007.h
diff --git a/drivers/staging/go7007/go7007.txt b/drivers/staging/media/go7007/go7007.txt
index 9db1f3952fd..9db1f3952fd 100644
--- a/drivers/staging/go7007/go7007.txt
+++ b/drivers/staging/media/go7007/go7007.txt
diff --git a/drivers/staging/go7007/s2250-board.c b/drivers/staging/media/go7007/s2250-board.c
index e7736a91553..e7736a91553 100644
--- a/drivers/staging/go7007/s2250-board.c
+++ b/drivers/staging/media/go7007/s2250-board.c
diff --git a/drivers/staging/go7007/s2250-loader.c b/drivers/staging/media/go7007/s2250-loader.c
index 4e132519e25..4e132519e25 100644
--- a/drivers/staging/go7007/s2250-loader.c
+++ b/drivers/staging/media/go7007/s2250-loader.c
diff --git a/drivers/staging/go7007/s2250-loader.h b/drivers/staging/media/go7007/s2250-loader.h
index b7c301af16c..b7c301af16c 100644
--- a/drivers/staging/go7007/s2250-loader.h
+++ b/drivers/staging/media/go7007/s2250-loader.h
diff --git a/drivers/staging/go7007/saa7134-go7007.c b/drivers/staging/media/go7007/saa7134-go7007.c
index cf7c34a9945..cf7c34a9945 100644
--- a/drivers/staging/go7007/saa7134-go7007.c
+++ b/drivers/staging/media/go7007/saa7134-go7007.c
diff --git a/drivers/staging/go7007/snd-go7007.c b/drivers/staging/media/go7007/snd-go7007.c
index deac938d850..deac938d850 100644
--- a/drivers/staging/go7007/snd-go7007.c
+++ b/drivers/staging/media/go7007/snd-go7007.c
diff --git a/drivers/staging/go7007/wis-i2c.h b/drivers/staging/media/go7007/wis-i2c.h
index 3c2b9be455d..3c2b9be455d 100644
--- a/drivers/staging/go7007/wis-i2c.h
+++ b/drivers/staging/media/go7007/wis-i2c.h
diff --git a/drivers/staging/go7007/wis-ov7640.c b/drivers/staging/media/go7007/wis-ov7640.c
index 6bc9470fecb..6bc9470fecb 100644
--- a/drivers/staging/go7007/wis-ov7640.c
+++ b/drivers/staging/media/go7007/wis-ov7640.c
diff --git a/drivers/staging/go7007/wis-saa7113.c b/drivers/staging/media/go7007/wis-saa7113.c
index 05e0e108386..05e0e108386 100644
--- a/drivers/staging/go7007/wis-saa7113.c
+++ b/drivers/staging/media/go7007/wis-saa7113.c
diff --git a/drivers/staging/go7007/wis-saa7115.c b/drivers/staging/media/go7007/wis-saa7115.c
index 46cff59e28b..46cff59e28b 100644
--- a/drivers/staging/go7007/wis-saa7115.c
+++ b/drivers/staging/media/go7007/wis-saa7115.c
diff --git a/drivers/staging/go7007/wis-sony-tuner.c b/drivers/staging/media/go7007/wis-sony-tuner.c
index 8f1b7d4f6a2..8f1b7d4f6a2 100644
--- a/drivers/staging/go7007/wis-sony-tuner.c
+++ b/drivers/staging/media/go7007/wis-sony-tuner.c
diff --git a/drivers/staging/go7007/wis-tw2804.c b/drivers/staging/media/go7007/wis-tw2804.c
index 9134f03e3cf..9134f03e3cf 100644
--- a/drivers/staging/go7007/wis-tw2804.c
+++ b/drivers/staging/media/go7007/wis-tw2804.c
diff --git a/drivers/staging/go7007/wis-tw9903.c b/drivers/staging/media/go7007/wis-tw9903.c
index 9230f4a8052..9230f4a8052 100644
--- a/drivers/staging/go7007/wis-tw9903.c
+++ b/drivers/staging/media/go7007/wis-tw9903.c
diff --git a/drivers/staging/go7007/wis-uda1342.c b/drivers/staging/media/go7007/wis-uda1342.c
index 0127be2f3be..0127be2f3be 100644
--- a/drivers/staging/go7007/wis-uda1342.c
+++ b/drivers/staging/media/go7007/wis-uda1342.c
diff --git a/drivers/staging/lirc/Kconfig b/drivers/staging/media/lirc/Kconfig
index 526ec0fc2f0..526ec0fc2f0 100644
--- a/drivers/staging/lirc/Kconfig
+++ b/drivers/staging/media/lirc/Kconfig
diff --git a/drivers/staging/lirc/Makefile b/drivers/staging/media/lirc/Makefile
index d76b0fa2af5..d76b0fa2af5 100644
--- a/drivers/staging/lirc/Makefile
+++ b/drivers/staging/media/lirc/Makefile
diff --git a/drivers/staging/lirc/TODO b/drivers/staging/media/lirc/TODO
index b6cb593f55c..b6cb593f55c 100644
--- a/drivers/staging/lirc/TODO
+++ b/drivers/staging/media/lirc/TODO
diff --git a/drivers/staging/lirc/TODO.lirc_zilog b/drivers/staging/media/lirc/TODO.lirc_zilog
index a97800a8e12..a97800a8e12 100644
--- a/drivers/staging/lirc/TODO.lirc_zilog
+++ b/drivers/staging/media/lirc/TODO.lirc_zilog
diff --git a/drivers/staging/lirc/lirc_bt829.c b/drivers/staging/media/lirc/lirc_bt829.c
index c5a0d27a02d..c5a0d27a02d 100644
--- a/drivers/staging/lirc/lirc_bt829.c
+++ b/drivers/staging/media/lirc/lirc_bt829.c
diff --git a/drivers/staging/lirc/lirc_ene0100.h b/drivers/staging/media/lirc/lirc_ene0100.h
index 06bebd6acc4..06bebd6acc4 100644
--- a/drivers/staging/lirc/lirc_ene0100.h
+++ b/drivers/staging/media/lirc/lirc_ene0100.h
diff --git a/drivers/staging/lirc/lirc_igorplugusb.c b/drivers/staging/media/lirc/lirc_igorplugusb.c
index 0dc2c2b22c2..0dc2c2b22c2 100644
--- a/drivers/staging/lirc/lirc_igorplugusb.c
+++ b/drivers/staging/media/lirc/lirc_igorplugusb.c
diff --git a/drivers/staging/lirc/lirc_imon.c b/drivers/staging/media/lirc/lirc_imon.c
index f5308d5929c..f5308d5929c 100644
--- a/drivers/staging/lirc/lirc_imon.c
+++ b/drivers/staging/media/lirc/lirc_imon.c
diff --git a/drivers/staging/lirc/lirc_parallel.c b/drivers/staging/media/lirc/lirc_parallel.c
index 792aac0a8e7..792aac0a8e7 100644
--- a/drivers/staging/lirc/lirc_parallel.c
+++ b/drivers/staging/media/lirc/lirc_parallel.c
diff --git a/drivers/staging/lirc/lirc_parallel.h b/drivers/staging/media/lirc/lirc_parallel.h
index 4bed6afe063..4bed6afe063 100644
--- a/drivers/staging/lirc/lirc_parallel.h
+++ b/drivers/staging/media/lirc/lirc_parallel.h
diff --git a/drivers/staging/lirc/lirc_sasem.c b/drivers/staging/media/lirc/lirc_sasem.c
index a2d18b0aa04..a2d18b0aa04 100644
--- a/drivers/staging/lirc/lirc_sasem.c
+++ b/drivers/staging/media/lirc/lirc_sasem.c
diff --git a/drivers/staging/lirc/lirc_serial.c b/drivers/staging/media/lirc/lirc_serial.c
index 8a060a8a722..8a060a8a722 100644
--- a/drivers/staging/lirc/lirc_serial.c
+++ b/drivers/staging/media/lirc/lirc_serial.c
diff --git a/drivers/staging/lirc/lirc_sir.c b/drivers/staging/media/lirc/lirc_sir.c
index 6903d3992ec..6903d3992ec 100644
--- a/drivers/staging/lirc/lirc_sir.c
+++ b/drivers/staging/media/lirc/lirc_sir.c
diff --git a/drivers/staging/lirc/lirc_ttusbir.c b/drivers/staging/media/lirc/lirc_ttusbir.c
index e4b329b8caf..e4b329b8caf 100644
--- a/drivers/staging/lirc/lirc_ttusbir.c
+++ b/drivers/staging/media/lirc/lirc_ttusbir.c
diff --git a/drivers/staging/lirc/lirc_zilog.c b/drivers/staging/media/lirc/lirc_zilog.c
index 0302d82a12f..0302d82a12f 100644
--- a/drivers/staging/lirc/lirc_zilog.c
+++ b/drivers/staging/media/lirc/lirc_zilog.c
diff --git a/drivers/staging/solo6x10/Kconfig b/drivers/staging/media/solo6x10/Kconfig
index 03dcac4ea4d..03dcac4ea4d 100644
--- a/drivers/staging/solo6x10/Kconfig
+++ b/drivers/staging/media/solo6x10/Kconfig
diff --git a/drivers/staging/solo6x10/Makefile b/drivers/staging/media/solo6x10/Makefile
index 72816cf1670..72816cf1670 100644
--- a/drivers/staging/solo6x10/Makefile
+++ b/drivers/staging/media/solo6x10/Makefile
diff --git a/drivers/staging/solo6x10/TODO b/drivers/staging/media/solo6x10/TODO
index 7e6c4fa130d..7e6c4fa130d 100644
--- a/drivers/staging/solo6x10/TODO
+++ b/drivers/staging/media/solo6x10/TODO
diff --git a/drivers/staging/solo6x10/core.c b/drivers/staging/media/solo6x10/core.c
index f974f6412ad..f974f6412ad 100644
--- a/drivers/staging/solo6x10/core.c
+++ b/drivers/staging/media/solo6x10/core.c
diff --git a/drivers/staging/solo6x10/disp.c b/drivers/staging/media/solo6x10/disp.c
index 884c0eb757c..884c0eb757c 100644
--- a/drivers/staging/solo6x10/disp.c
+++ b/drivers/staging/media/solo6x10/disp.c
diff --git a/drivers/staging/solo6x10/enc.c b/drivers/staging/media/solo6x10/enc.c
index de502599bb1..de502599bb1 100644
--- a/drivers/staging/solo6x10/enc.c
+++ b/drivers/staging/media/solo6x10/enc.c
diff --git a/drivers/staging/solo6x10/g723.c b/drivers/staging/media/solo6x10/g723.c
index 59274bfca95..2cd0de28a63 100644
--- a/drivers/staging/solo6x10/g723.c
+++ b/drivers/staging/media/solo6x10/g723.c
@@ -23,6 +23,7 @@
#include <linux/kthread.h>
#include <linux/slab.h>
#include <linux/freezer.h>
+#include <linux/export.h>
#include <sound/core.h>
#include <sound/initval.h>
#include <sound/pcm.h>
diff --git a/drivers/staging/solo6x10/gpio.c b/drivers/staging/media/solo6x10/gpio.c
index 0925e6f33a9..0925e6f33a9 100644
--- a/drivers/staging/solo6x10/gpio.c
+++ b/drivers/staging/media/solo6x10/gpio.c
diff --git a/drivers/staging/solo6x10/i2c.c b/drivers/staging/media/solo6x10/i2c.c
index ef95a500b4d..ef95a500b4d 100644
--- a/drivers/staging/solo6x10/i2c.c
+++ b/drivers/staging/media/solo6x10/i2c.c
diff --git a/drivers/staging/solo6x10/jpeg.h b/drivers/staging/media/solo6x10/jpeg.h
index 50defec318c..50defec318c 100644
--- a/drivers/staging/solo6x10/jpeg.h
+++ b/drivers/staging/media/solo6x10/jpeg.h
diff --git a/drivers/staging/solo6x10/offsets.h b/drivers/staging/media/solo6x10/offsets.h
index 3d7e569f1cf..3d7e569f1cf 100644
--- a/drivers/staging/solo6x10/offsets.h
+++ b/drivers/staging/media/solo6x10/offsets.h
diff --git a/drivers/staging/solo6x10/osd-font.h b/drivers/staging/media/solo6x10/osd-font.h
index 591e0e82e0e..591e0e82e0e 100644
--- a/drivers/staging/solo6x10/osd-font.h
+++ b/drivers/staging/media/solo6x10/osd-font.h
diff --git a/drivers/staging/solo6x10/p2m.c b/drivers/staging/media/solo6x10/p2m.c
index 56210f0fc5e..56210f0fc5e 100644
--- a/drivers/staging/solo6x10/p2m.c
+++ b/drivers/staging/media/solo6x10/p2m.c
diff --git a/drivers/staging/solo6x10/registers.h b/drivers/staging/media/solo6x10/registers.h
index aca544472c9..aca544472c9 100644
--- a/drivers/staging/solo6x10/registers.h
+++ b/drivers/staging/media/solo6x10/registers.h
diff --git a/drivers/staging/solo6x10/solo6x10.h b/drivers/staging/media/solo6x10/solo6x10.h
index abee7213202..abee7213202 100644
--- a/drivers/staging/solo6x10/solo6x10.h
+++ b/drivers/staging/media/solo6x10/solo6x10.h
diff --git a/drivers/staging/solo6x10/tw28.c b/drivers/staging/media/solo6x10/tw28.c
index db56b42c56c..db56b42c56c 100644
--- a/drivers/staging/solo6x10/tw28.c
+++ b/drivers/staging/media/solo6x10/tw28.c
diff --git a/drivers/staging/solo6x10/tw28.h b/drivers/staging/media/solo6x10/tw28.h
index a44a03afbd3..a44a03afbd3 100644
--- a/drivers/staging/solo6x10/tw28.h
+++ b/drivers/staging/media/solo6x10/tw28.h
diff --git a/drivers/staging/solo6x10/v4l2-enc.c b/drivers/staging/media/solo6x10/v4l2-enc.c
index bee7280bbed..bee7280bbed 100644
--- a/drivers/staging/solo6x10/v4l2-enc.c
+++ b/drivers/staging/media/solo6x10/v4l2-enc.c
diff --git a/drivers/staging/solo6x10/v4l2.c b/drivers/staging/media/solo6x10/v4l2.c
index 571c3a348d3..571c3a348d3 100644
--- a/drivers/staging/solo6x10/v4l2.c
+++ b/drivers/staging/media/solo6x10/v4l2.c
diff --git a/drivers/staging/pohmelfs/inode.c b/drivers/staging/pohmelfs/inode.c
index f3c6060c96b..7a1955583b7 100644
--- a/drivers/staging/pohmelfs/inode.c
+++ b/drivers/staging/pohmelfs/inode.c
@@ -1197,7 +1197,7 @@ const struct inode_operations pohmelfs_file_inode_operations = {
void pohmelfs_fill_inode(struct inode *inode, struct netfs_inode_info *info)
{
inode->i_mode = info->mode;
- inode->i_nlink = info->nlink;
+ set_nlink(inode, info->nlink);
inode->i_uid = info->uid;
inode->i_gid = info->gid;
inode->i_blocks = info->blocks;
diff --git a/drivers/staging/rts5139/rts51x_scsi.c b/drivers/staging/rts5139/rts51x_scsi.c
index 3b32f9e6e4f..87c9cdc8bd2 100644
--- a/drivers/staging/rts5139/rts51x_scsi.c
+++ b/drivers/staging/rts5139/rts51x_scsi.c
@@ -28,6 +28,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
+#include <linux/export.h>
#include <scsi/scsi.h>
#include <scsi/scsi_eh.h>
diff --git a/drivers/staging/spectra/lld_mtd.c b/drivers/staging/spectra/lld_mtd.c
index 2bd34662beb..a9c309a167c 100644
--- a/drivers/staging/spectra/lld_mtd.c
+++ b/drivers/staging/spectra/lld_mtd.c
@@ -340,7 +340,7 @@ u16 mtd_Read_Page_Main_Spare(u8 *read_data, u32 Block,
struct mtd_oob_ops ops;
int ret;
- ops.mode = MTD_OOB_AUTO;
+ ops.mode = MTD_OPS_AUTO_OOB;
ops.datbuf = read_data;
ops.len = DeviceInfo.wPageDataSize;
ops.oobbuf = read_data + DeviceInfo.wPageDataSize + BTSIG_OFFSET;
@@ -400,7 +400,7 @@ u16 mtd_Write_Page_Main_Spare(u8 *write_data, u32 Block,
struct mtd_oob_ops ops;
int ret;
- ops.mode = MTD_OOB_AUTO;
+ ops.mode = MTD_OPS_AUTO_OOB;
ops.datbuf = write_data;
ops.len = DeviceInfo.wPageDataSize;
ops.oobbuf = write_data + DeviceInfo.wPageDataSize + BTSIG_OFFSET;
@@ -473,7 +473,7 @@ u16 mtd_Read_Page_Spare(u8 *read_data, u32 Block,
struct mtd_oob_ops ops;
int ret;
- ops.mode = MTD_OOB_AUTO;
+ ops.mode = MTD_OPS_AUTO_OOB;
ops.datbuf = NULL;
ops.len = 0;
ops.oobbuf = read_data;
diff --git a/drivers/staging/tm6000/CARDLIST b/drivers/staging/tm6000/CARDLIST
deleted file mode 100644
index b5edce48799..00000000000
--- a/drivers/staging/tm6000/CARDLIST
+++ /dev/null
@@ -1,16 +0,0 @@
- 1 -> Generic tm5600 board (tm5600) [6000:0001]
- 2 -> Generic tm6000 board (tm6000) [6000:0001]
- 3 -> Generic tm6010 board (tm6010) [6000:0002]
- 4 -> 10Moons UT821 (tm5600) [6000:0001]
- 5 -> 10Moons UT330 (tm5600)
- 6 -> ADSTech Dual TV (tm6000) [06e1:f332]
- 7 -> FreeCom and similar (tm6000) [14aa:0620]
- 8 -> ADSTech Mini Dual TV (tm6000) [06e1:b339]
- 9 -> Hauppauge WinTV HVR-900H/USB2 Stick (tm6010) [2040:6600,2040:6601,2040:6610,2040:6611]
- 10 -> Beholder Wander (tm6010) [6000:dec0]
- 11 -> Beholder Voyager (tm6010) [6000:dec1]
- 12 -> TerraTec Cinergy Hybrid XE/Cinergy Hybrid Stick (tm6010) [0ccd:0086,0ccd:00a5]
- 13 -> TwinHan TU501 (tm6010) [13d3:3240,13d3:3241,13d3:3243,13d3:3264]
- 14 -> Beholder Wander Lite (tm6010) [6000:dec2]
- 15 -> Beholder Voyager Lite (tm6010) [6000:dec3]
-
diff --git a/drivers/staging/tm6000/README b/drivers/staging/tm6000/README
deleted file mode 100644
index c340ebc2ee9..00000000000
--- a/drivers/staging/tm6000/README
+++ /dev/null
@@ -1,22 +0,0 @@
-Todo:
- - Fix the loss of some blocks when receiving the video URB's
- - Add a lock at tm6000_read_write_usb() to prevent two simultaneous access to the
- URB control transfers
- - Properly add the locks at tm6000-video
- - Add audio support
- - Add vbi support
- - Add IR support
- - Do several cleanups
- - I think that frame1/frame0 are inverted. This causes a funny effect at the image.
- the fix is trivial, but require some tests
- - My tm6010 devices sometimes insist on stop working. I need to turn them off, removing
- from my machine and wait for a while for it to work again. I'm starting to think that
- it is an overheat issue - is there a workaround that we could do?
- - Sometimes, tm6010 doesn't read eeprom at the proper time (hardware bug). So, the device
- got miss-detected as a "generic" tm6000. This can be really bad if the tuner is the
- Low Power one, as it may result on loading the high power firmware, that could damage
- the device. Maybe we may read eeprom to double check, when the device is marked as "generic"
- - Coding Style fixes
- - sparse cleanups
-
-Please send patches to linux-media@vger.kernel.org
diff --git a/drivers/staging/tm6000/TODO b/drivers/staging/tm6000/TODO
deleted file mode 100644
index 135d0ea3ad7..00000000000
--- a/drivers/staging/tm6000/TODO
+++ /dev/null
@@ -1,8 +0,0 @@
-There a few things to do before putting this driver in production:
- - IR NEC with tm5600/6000 TV cards
- - IR RC5 with tm5600/6000/6010 TV cards
- - CodingStyle;
- - Fix audio;
- - Fix some panic/OOPS conditions.
-
-Please send patches to linux-media@vger.kernel.org
diff --git a/drivers/staging/tm6000/tm6000-stds.c b/drivers/staging/tm6000/tm6000-stds.c
deleted file mode 100644
index 8b29d732ddc..00000000000
--- a/drivers/staging/tm6000/tm6000-stds.c
+++ /dev/null
@@ -1,679 +0,0 @@
-/*
- * tm6000-stds.c - driver for TM5600/TM6000/TM6010 USB video capture devices
- *
- * Copyright (C) 2007 Mauro Carvalho Chehab <mchehab@redhat.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation version 2
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include "tm6000.h"
-#include "tm6000-regs.h"
-
-static unsigned int tm6010_a_mode = 0;
-module_param(tm6010_a_mode, int, 0644);
-MODULE_PARM_DESC(tm6010_a_mode, "set tm6010 sif audio mode");
-
-struct tm6000_reg_settings {
- unsigned char req;
- unsigned char reg;
- unsigned char value;
-};
-
-
-struct tm6000_std_settings {
- v4l2_std_id id;
- struct tm6000_reg_settings common[27];
-};
-
-static struct tm6000_std_settings composite_stds[] = {
- {
- .id = V4L2_STD_PAL_M,
- .common = {
- {TM6010_REQ07_R3F_RESET, 0x01},
- {TM6010_REQ07_R00_VIDEO_CONTROL0, 0x04},
- {TM6010_REQ07_R01_VIDEO_CONTROL1, 0x0e},
- {TM6010_REQ07_R02_VIDEO_CONTROL2, 0x5f},
- {TM6010_REQ07_R03_YC_SEP_CONTROL, 0x00},
- {TM6010_REQ07_R07_OUTPUT_CONTROL, 0x31},
- {TM6010_REQ07_R18_CHROMA_DTO_INCREMENT3, 0x1e},
- {TM6010_REQ07_R19_CHROMA_DTO_INCREMENT2, 0x83},
- {TM6010_REQ07_R1A_CHROMA_DTO_INCREMENT1, 0x0a},
- {TM6010_REQ07_R1B_CHROMA_DTO_INCREMENT0, 0xe0},
- {TM6010_REQ07_R1C_HSYNC_DTO_INCREMENT3, 0x1c},
- {TM6010_REQ07_R1D_HSYNC_DTO_INCREMENT2, 0xcc},
- {TM6010_REQ07_R1E_HSYNC_DTO_INCREMENT1, 0xcc},
- {TM6010_REQ07_R1F_HSYNC_DTO_INCREMENT0, 0xcd},
- {TM6010_REQ07_R2E_ACTIVE_VIDEO_HSTART, 0x88},
- {TM6010_REQ07_R30_ACTIVE_VIDEO_VSTART, 0x20},
- {TM6010_REQ07_R31_ACTIVE_VIDEO_VHIGHT, 0x61},
- {TM6010_REQ07_R33_VSYNC_HLOCK_MAX, 0x0c},
- {TM6010_REQ07_R35_VSYNC_AGC_MAX, 0x1c},
- {TM6010_REQ07_R82_COMB_FILTER_CONFIG, 0x52},
- {TM6010_REQ07_R83_CHROMA_LOCK_CONFIG, 0x6F},
-
- {TM6010_REQ07_R04_LUMA_HAGC_CONTROL, 0xdc},
- {TM6010_REQ07_R0D_CHROMA_KILL_LEVEL, 0x07},
- {TM6010_REQ07_R3F_RESET, 0x00},
- {0, 0, 0},
- },
- }, {
- .id = V4L2_STD_PAL_Nc,
- .common = {
- {TM6010_REQ07_R3F_RESET, 0x01},
- {TM6010_REQ07_R00_VIDEO_CONTROL0, 0x36},
- {TM6010_REQ07_R01_VIDEO_CONTROL1, 0x0e},
- {TM6010_REQ07_R02_VIDEO_CONTROL2, 0x5f},
- {TM6010_REQ07_R03_YC_SEP_CONTROL, 0x02},
- {TM6010_REQ07_R07_OUTPUT_CONTROL, 0x31},
- {TM6010_REQ07_R18_CHROMA_DTO_INCREMENT3, 0x1e},
- {TM6010_REQ07_R19_CHROMA_DTO_INCREMENT2, 0x91},
- {TM6010_REQ07_R1A_CHROMA_DTO_INCREMENT1, 0x1f},
- {TM6010_REQ07_R1B_CHROMA_DTO_INCREMENT0, 0x0c},
- {TM6010_REQ07_R1C_HSYNC_DTO_INCREMENT3, 0x1c},
- {TM6010_REQ07_R1D_HSYNC_DTO_INCREMENT2, 0xcc},
- {TM6010_REQ07_R1E_HSYNC_DTO_INCREMENT1, 0xcc},
- {TM6010_REQ07_R1F_HSYNC_DTO_INCREMENT0, 0xcd},
- {TM6010_REQ07_R2E_ACTIVE_VIDEO_HSTART, 0x8c},
- {TM6010_REQ07_R30_ACTIVE_VIDEO_VSTART, 0x2c},
- {TM6010_REQ07_R31_ACTIVE_VIDEO_VHIGHT, 0xc1},
- {TM6010_REQ07_R33_VSYNC_HLOCK_MAX, 0x0c},
- {TM6010_REQ07_R35_VSYNC_AGC_MAX, 0x1c},
- {TM6010_REQ07_R82_COMB_FILTER_CONFIG, 0x52},
- {TM6010_REQ07_R83_CHROMA_LOCK_CONFIG, 0x6F},
-
- {TM6010_REQ07_R04_LUMA_HAGC_CONTROL, 0xdc},
- {TM6010_REQ07_R0D_CHROMA_KILL_LEVEL, 0x07},
- {TM6010_REQ07_R3F_RESET, 0x00},
- {0, 0, 0},
- },
- }, {
- .id = V4L2_STD_PAL,
- .common = {
- {TM6010_REQ07_R3F_RESET, 0x01},
- {TM6010_REQ07_R00_VIDEO_CONTROL0, 0x32},
- {TM6010_REQ07_R01_VIDEO_CONTROL1, 0x0e},
- {TM6010_REQ07_R02_VIDEO_CONTROL2, 0x5f},
- {TM6010_REQ07_R03_YC_SEP_CONTROL, 0x02},
- {TM6010_REQ07_R07_OUTPUT_CONTROL, 0x31},
- {TM6010_REQ07_R18_CHROMA_DTO_INCREMENT3, 0x25},
- {TM6010_REQ07_R19_CHROMA_DTO_INCREMENT2, 0xd5},
- {TM6010_REQ07_R1A_CHROMA_DTO_INCREMENT1, 0x63},
- {TM6010_REQ07_R1B_CHROMA_DTO_INCREMENT0, 0x50},
- {TM6010_REQ07_R1C_HSYNC_DTO_INCREMENT3, 0x1c},
- {TM6010_REQ07_R1D_HSYNC_DTO_INCREMENT2, 0xcc},
- {TM6010_REQ07_R1E_HSYNC_DTO_INCREMENT1, 0xcc},
- {TM6010_REQ07_R1F_HSYNC_DTO_INCREMENT0, 0xcd},
- {TM6010_REQ07_R2E_ACTIVE_VIDEO_HSTART, 0x8c},
- {TM6010_REQ07_R30_ACTIVE_VIDEO_VSTART, 0x2c},
- {TM6010_REQ07_R31_ACTIVE_VIDEO_VHIGHT, 0xc1},
- {TM6010_REQ07_R33_VSYNC_HLOCK_MAX, 0x0c},
- {TM6010_REQ07_R35_VSYNC_AGC_MAX, 0x1c},
- {TM6010_REQ07_R82_COMB_FILTER_CONFIG, 0x52},
- {TM6010_REQ07_R83_CHROMA_LOCK_CONFIG, 0x6F},
-
- {TM6010_REQ07_R04_LUMA_HAGC_CONTROL, 0xdc},
- {TM6010_REQ07_R0D_CHROMA_KILL_LEVEL, 0x07},
- {TM6010_REQ07_R3F_RESET, 0x00},
- {0, 0, 0},
- },
- }, {
- .id = V4L2_STD_SECAM,
- .common = {
- {TM6010_REQ07_R3F_RESET, 0x01},
- {TM6010_REQ07_R00_VIDEO_CONTROL0, 0x38},
- {TM6010_REQ07_R01_VIDEO_CONTROL1, 0x0e},
- {TM6010_REQ07_R02_VIDEO_CONTROL2, 0x5f},
- {TM6010_REQ07_R03_YC_SEP_CONTROL, 0x02},
- {TM6010_REQ07_R07_OUTPUT_CONTROL, 0x31},
- {TM6010_REQ07_R18_CHROMA_DTO_INCREMENT3, 0x24},
- {TM6010_REQ07_R19_CHROMA_DTO_INCREMENT2, 0x92},
- {TM6010_REQ07_R1A_CHROMA_DTO_INCREMENT1, 0xe8},
- {TM6010_REQ07_R1B_CHROMA_DTO_INCREMENT0, 0xed},
- {TM6010_REQ07_R1C_HSYNC_DTO_INCREMENT3, 0x1c},
- {TM6010_REQ07_R1D_HSYNC_DTO_INCREMENT2, 0xcc},
- {TM6010_REQ07_R1E_HSYNC_DTO_INCREMENT1, 0xcc},
- {TM6010_REQ07_R1F_HSYNC_DTO_INCREMENT0, 0xcd},
- {TM6010_REQ07_R2E_ACTIVE_VIDEO_HSTART, 0x8c},
- {TM6010_REQ07_R30_ACTIVE_VIDEO_VSTART, 0x2c},
- {TM6010_REQ07_R31_ACTIVE_VIDEO_VHIGHT, 0xc1},
- {TM6010_REQ07_R33_VSYNC_HLOCK_MAX, 0x2c},
- {TM6010_REQ07_R35_VSYNC_AGC_MAX, 0x18},
- {TM6010_REQ07_R82_COMB_FILTER_CONFIG, 0x42},
- {TM6010_REQ07_R83_CHROMA_LOCK_CONFIG, 0xFF},
-
- {TM6010_REQ07_R0D_CHROMA_KILL_LEVEL, 0x07},
- {TM6010_REQ07_R3F_RESET, 0x00},
- {0, 0, 0},
- },
- }, {
- .id = V4L2_STD_NTSC,
- .common = {
- {TM6010_REQ07_R3F_RESET, 0x01},
- {TM6010_REQ07_R00_VIDEO_CONTROL0, 0x00},
- {TM6010_REQ07_R01_VIDEO_CONTROL1, 0x0f},
- {TM6010_REQ07_R02_VIDEO_CONTROL2, 0x5f},
- {TM6010_REQ07_R03_YC_SEP_CONTROL, 0x00},
- {TM6010_REQ07_R07_OUTPUT_CONTROL, 0x31},
- {TM6010_REQ07_R18_CHROMA_DTO_INCREMENT3, 0x1e},
- {TM6010_REQ07_R19_CHROMA_DTO_INCREMENT2, 0x8b},
- {TM6010_REQ07_R1A_CHROMA_DTO_INCREMENT1, 0xa2},
- {TM6010_REQ07_R1B_CHROMA_DTO_INCREMENT0, 0xe9},
- {TM6010_REQ07_R1C_HSYNC_DTO_INCREMENT3, 0x1c},
- {TM6010_REQ07_R1D_HSYNC_DTO_INCREMENT2, 0xcc},
- {TM6010_REQ07_R1E_HSYNC_DTO_INCREMENT1, 0xcc},
- {TM6010_REQ07_R1F_HSYNC_DTO_INCREMENT0, 0xcd},
- {TM6010_REQ07_R2E_ACTIVE_VIDEO_HSTART, 0x88},
- {TM6010_REQ07_R30_ACTIVE_VIDEO_VSTART, 0x22},
- {TM6010_REQ07_R31_ACTIVE_VIDEO_VHIGHT, 0x61},
- {TM6010_REQ07_R33_VSYNC_HLOCK_MAX, 0x1c},
- {TM6010_REQ07_R35_VSYNC_AGC_MAX, 0x1c},
- {TM6010_REQ07_R82_COMB_FILTER_CONFIG, 0x42},
- {TM6010_REQ07_R83_CHROMA_LOCK_CONFIG, 0x6F},
-
- {TM6010_REQ07_R04_LUMA_HAGC_CONTROL, 0xdd},
- {TM6010_REQ07_R0D_CHROMA_KILL_LEVEL, 0x07},
- {TM6010_REQ07_R3F_RESET, 0x00},
- {0, 0, 0},
- },
- },
-};
-
-static struct tm6000_std_settings svideo_stds[] = {
- {
- .id = V4L2_STD_PAL_M,
- .common = {
- {TM6010_REQ07_R3F_RESET, 0x01},
- {TM6010_REQ07_R00_VIDEO_CONTROL0, 0x05},
- {TM6010_REQ07_R01_VIDEO_CONTROL1, 0x0e},
- {TM6010_REQ07_R02_VIDEO_CONTROL2, 0x5f},
- {TM6010_REQ07_R03_YC_SEP_CONTROL, 0x04},
- {TM6010_REQ07_R07_OUTPUT_CONTROL, 0x31},
- {TM6010_REQ07_R18_CHROMA_DTO_INCREMENT3, 0x1e},
- {TM6010_REQ07_R19_CHROMA_DTO_INCREMENT2, 0x83},
- {TM6010_REQ07_R1A_CHROMA_DTO_INCREMENT1, 0x0a},
- {TM6010_REQ07_R1B_CHROMA_DTO_INCREMENT0, 0xe0},
- {TM6010_REQ07_R1C_HSYNC_DTO_INCREMENT3, 0x1c},
- {TM6010_REQ07_R1D_HSYNC_DTO_INCREMENT2, 0xcc},
- {TM6010_REQ07_R1E_HSYNC_DTO_INCREMENT1, 0xcc},
- {TM6010_REQ07_R1F_HSYNC_DTO_INCREMENT0, 0xcd},
- {TM6010_REQ07_R2E_ACTIVE_VIDEO_HSTART, 0x88},
- {TM6010_REQ07_R30_ACTIVE_VIDEO_VSTART, 0x22},
- {TM6010_REQ07_R31_ACTIVE_VIDEO_VHIGHT, 0x61},
- {TM6010_REQ07_R33_VSYNC_HLOCK_MAX, 0x0c},
- {TM6010_REQ07_R35_VSYNC_AGC_MAX, 0x1c},
- {TM6010_REQ07_R82_COMB_FILTER_CONFIG, 0x52},
- {TM6010_REQ07_R83_CHROMA_LOCK_CONFIG, 0x6F},
-
- {TM6010_REQ07_R04_LUMA_HAGC_CONTROL, 0xdc},
- {TM6010_REQ07_R0D_CHROMA_KILL_LEVEL, 0x07},
- {TM6010_REQ07_R3F_RESET, 0x00},
- {0, 0, 0},
- },
- }, {
- .id = V4L2_STD_PAL_Nc,
- .common = {
- {TM6010_REQ07_R3F_RESET, 0x01},
- {TM6010_REQ07_R00_VIDEO_CONTROL0, 0x37},
- {TM6010_REQ07_R01_VIDEO_CONTROL1, 0x0e},
- {TM6010_REQ07_R02_VIDEO_CONTROL2, 0x5f},
- {TM6010_REQ07_R03_YC_SEP_CONTROL, 0x04},
- {TM6010_REQ07_R07_OUTPUT_CONTROL, 0x31},
- {TM6010_REQ07_R18_CHROMA_DTO_INCREMENT3, 0x1e},
- {TM6010_REQ07_R19_CHROMA_DTO_INCREMENT2, 0x91},
- {TM6010_REQ07_R1A_CHROMA_DTO_INCREMENT1, 0x1f},
- {TM6010_REQ07_R1B_CHROMA_DTO_INCREMENT0, 0x0c},
- {TM6010_REQ07_R1C_HSYNC_DTO_INCREMENT3, 0x1c},
- {TM6010_REQ07_R1D_HSYNC_DTO_INCREMENT2, 0xcc},
- {TM6010_REQ07_R1E_HSYNC_DTO_INCREMENT1, 0xcc},
- {TM6010_REQ07_R1F_HSYNC_DTO_INCREMENT0, 0xcd},
- {TM6010_REQ07_R2E_ACTIVE_VIDEO_HSTART, 0x88},
- {TM6010_REQ07_R30_ACTIVE_VIDEO_VSTART, 0x22},
- {TM6010_REQ07_R31_ACTIVE_VIDEO_VHIGHT, 0xc1},
- {TM6010_REQ07_R33_VSYNC_HLOCK_MAX, 0x0c},
- {TM6010_REQ07_R35_VSYNC_AGC_MAX, 0x1c},
- {TM6010_REQ07_R82_COMB_FILTER_CONFIG, 0x52},
- {TM6010_REQ07_R83_CHROMA_LOCK_CONFIG, 0x6F},
-
- {TM6010_REQ07_R04_LUMA_HAGC_CONTROL, 0xdc},
- {TM6010_REQ07_R0D_CHROMA_KILL_LEVEL, 0x07},
- {TM6010_REQ07_R3F_RESET, 0x00},
- {0, 0, 0},
- },
- }, {
- .id = V4L2_STD_PAL,
- .common = {
- {TM6010_REQ07_R3F_RESET, 0x01},
- {TM6010_REQ07_R00_VIDEO_CONTROL0, 0x33},
- {TM6010_REQ07_R01_VIDEO_CONTROL1, 0x0e},
- {TM6010_REQ07_R02_VIDEO_CONTROL2, 0x5f},
- {TM6010_REQ07_R03_YC_SEP_CONTROL, 0x04},
- {TM6010_REQ07_R07_OUTPUT_CONTROL, 0x30},
- {TM6010_REQ07_R18_CHROMA_DTO_INCREMENT3, 0x25},
- {TM6010_REQ07_R19_CHROMA_DTO_INCREMENT2, 0xd5},
- {TM6010_REQ07_R1A_CHROMA_DTO_INCREMENT1, 0x63},
- {TM6010_REQ07_R1B_CHROMA_DTO_INCREMENT0, 0x50},
- {TM6010_REQ07_R1C_HSYNC_DTO_INCREMENT3, 0x1c},
- {TM6010_REQ07_R1D_HSYNC_DTO_INCREMENT2, 0xcc},
- {TM6010_REQ07_R1E_HSYNC_DTO_INCREMENT1, 0xcc},
- {TM6010_REQ07_R1F_HSYNC_DTO_INCREMENT0, 0xcd},
- {TM6010_REQ07_R2E_ACTIVE_VIDEO_HSTART, 0x8c},
- {TM6010_REQ07_R30_ACTIVE_VIDEO_VSTART, 0x2a},
- {TM6010_REQ07_R31_ACTIVE_VIDEO_VHIGHT, 0xc1},
- {TM6010_REQ07_R33_VSYNC_HLOCK_MAX, 0x0c},
- {TM6010_REQ07_R35_VSYNC_AGC_MAX, 0x1c},
- {TM6010_REQ07_R82_COMB_FILTER_CONFIG, 0x52},
- {TM6010_REQ07_R83_CHROMA_LOCK_CONFIG, 0x6F},
-
- {TM6010_REQ07_R04_LUMA_HAGC_CONTROL, 0xdc},
- {TM6010_REQ07_R0D_CHROMA_KILL_LEVEL, 0x07},
- {TM6010_REQ07_R3F_RESET, 0x00},
- {0, 0, 0},
- },
- }, {
- .id = V4L2_STD_SECAM,
- .common = {
- {TM6010_REQ07_R3F_RESET, 0x01},
- {TM6010_REQ07_R00_VIDEO_CONTROL0, 0x39},
- {TM6010_REQ07_R01_VIDEO_CONTROL1, 0x0e},
- {TM6010_REQ07_R02_VIDEO_CONTROL2, 0x5f},
- {TM6010_REQ07_R03_YC_SEP_CONTROL, 0x03},
- {TM6010_REQ07_R07_OUTPUT_CONTROL, 0x31},
- {TM6010_REQ07_R18_CHROMA_DTO_INCREMENT3, 0x24},
- {TM6010_REQ07_R19_CHROMA_DTO_INCREMENT2, 0x92},
- {TM6010_REQ07_R1A_CHROMA_DTO_INCREMENT1, 0xe8},
- {TM6010_REQ07_R1B_CHROMA_DTO_INCREMENT0, 0xed},
- {TM6010_REQ07_R1C_HSYNC_DTO_INCREMENT3, 0x1c},
- {TM6010_REQ07_R1D_HSYNC_DTO_INCREMENT2, 0xcc},
- {TM6010_REQ07_R1E_HSYNC_DTO_INCREMENT1, 0xcc},
- {TM6010_REQ07_R1F_HSYNC_DTO_INCREMENT0, 0xcd},
- {TM6010_REQ07_R2E_ACTIVE_VIDEO_HSTART, 0x8c},
- {TM6010_REQ07_R30_ACTIVE_VIDEO_VSTART, 0x2a},
- {TM6010_REQ07_R31_ACTIVE_VIDEO_VHIGHT, 0xc1},
- {TM6010_REQ07_R33_VSYNC_HLOCK_MAX, 0x2c},
- {TM6010_REQ07_R35_VSYNC_AGC_MAX, 0x18},
- {TM6010_REQ07_R82_COMB_FILTER_CONFIG, 0x42},
- {TM6010_REQ07_R83_CHROMA_LOCK_CONFIG, 0xFF},
-
- {TM6010_REQ07_R0D_CHROMA_KILL_LEVEL, 0x07},
- {TM6010_REQ07_R3F_RESET, 0x00},
- {0, 0, 0},
- },
- }, {
- .id = V4L2_STD_NTSC,
- .common = {
- {TM6010_REQ07_R3F_RESET, 0x01},
- {TM6010_REQ07_R00_VIDEO_CONTROL0, 0x01},
- {TM6010_REQ07_R01_VIDEO_CONTROL1, 0x0f},
- {TM6010_REQ07_R02_VIDEO_CONTROL2, 0x5f},
- {TM6010_REQ07_R03_YC_SEP_CONTROL, 0x03},
- {TM6010_REQ07_R07_OUTPUT_CONTROL, 0x30},
- {TM6010_REQ07_R17_HLOOP_MAXSTATE, 0x8b},
- {TM6010_REQ07_R18_CHROMA_DTO_INCREMENT3, 0x1e},
- {TM6010_REQ07_R19_CHROMA_DTO_INCREMENT2, 0x8b},
- {TM6010_REQ07_R1A_CHROMA_DTO_INCREMENT1, 0xa2},
- {TM6010_REQ07_R1B_CHROMA_DTO_INCREMENT0, 0xe9},
- {TM6010_REQ07_R1C_HSYNC_DTO_INCREMENT3, 0x1c},
- {TM6010_REQ07_R1D_HSYNC_DTO_INCREMENT2, 0xcc},
- {TM6010_REQ07_R1E_HSYNC_DTO_INCREMENT1, 0xcc},
- {TM6010_REQ07_R1F_HSYNC_DTO_INCREMENT0, 0xcd},
- {TM6010_REQ07_R2E_ACTIVE_VIDEO_HSTART, 0x88},
- {TM6010_REQ07_R30_ACTIVE_VIDEO_VSTART, 0x22},
- {TM6010_REQ07_R31_ACTIVE_VIDEO_VHIGHT, 0x61},
- {TM6010_REQ07_R33_VSYNC_HLOCK_MAX, 0x1c},
- {TM6010_REQ07_R35_VSYNC_AGC_MAX, 0x1c},
- {TM6010_REQ07_R82_COMB_FILTER_CONFIG, 0x42},
- {TM6010_REQ07_R83_CHROMA_LOCK_CONFIG, 0x6F},
-
- {TM6010_REQ07_R04_LUMA_HAGC_CONTROL, 0xdd},
- {TM6010_REQ07_R0D_CHROMA_KILL_LEVEL, 0x07},
- {TM6010_REQ07_R3F_RESET, 0x00},
- {0, 0, 0},
- },
- },
-};
-
-
-static int tm6000_set_audio_std(struct tm6000_core *dev)
-{
- uint8_t areg_02 = 0x04; /* GC1 Fixed gain 0dB */
- uint8_t areg_05 = 0x01; /* Auto 4.5 = M Japan, Auto 6.5 = DK */
- uint8_t areg_06 = 0x02; /* Auto de-emphasis, mannual channel mode */
- uint8_t nicam_flag = 0; /* No NICAM */
-
- if (dev->radio) {
- tm6000_set_reg(dev, TM6010_REQ08_R01_A_INIT, 0x00);
- tm6000_set_reg(dev, TM6010_REQ08_R02_A_FIX_GAIN_CTRL, 0x04);
- tm6000_set_reg(dev, TM6010_REQ08_R03_A_AUTO_GAIN_CTRL, 0x00);
- tm6000_set_reg(dev, TM6010_REQ08_R04_A_SIF_AMP_CTRL, 0x80);
- tm6000_set_reg(dev, TM6010_REQ08_R05_A_STANDARD_MOD, 0x0c);
- /* set mono or stereo */
- if (dev->amode == V4L2_TUNER_MODE_MONO)
- tm6000_set_reg(dev, TM6010_REQ08_R06_A_SOUND_MOD, 0x00);
- else if (dev->amode == V4L2_TUNER_MODE_STEREO)
- tm6000_set_reg(dev, TM6010_REQ08_R06_A_SOUND_MOD, 0x02);
- tm6000_set_reg(dev, TM6010_REQ08_R09_A_MAIN_VOL, 0x18);
- tm6000_set_reg(dev, TM6010_REQ08_R0C_A_ASD_THRES2, 0x0a);
- tm6000_set_reg(dev, TM6010_REQ08_R0D_A_AMD_THRES, 0x40);
- tm6000_set_reg(dev, TM6010_REQ08_RF1_AADC_POWER_DOWN, 0xfe);
- tm6000_set_reg(dev, TM6010_REQ08_R1E_A_GAIN_DEEMPH_OUT, 0x13);
- tm6000_set_reg(dev, TM6010_REQ08_R01_A_INIT, 0x80);
- return 0;
- }
-
- switch (tm6010_a_mode) {
- /* auto */
- case 0:
- switch (dev->norm) {
- case V4L2_STD_NTSC_M_KR:
- areg_05 |= 0x00;
- break;
- case V4L2_STD_NTSC_M_JP:
- areg_05 |= 0x40;
- break;
- case V4L2_STD_NTSC_M:
- case V4L2_STD_PAL_M:
- case V4L2_STD_PAL_N:
- areg_05 |= 0x20;
- break;
- case V4L2_STD_PAL_Nc:
- areg_05 |= 0x60;
- break;
- case V4L2_STD_SECAM_L:
- areg_05 |= 0x00;
- break;
- case V4L2_STD_DK:
- areg_05 |= 0x10;
- break;
- }
- break;
- /* A2 */
- case 1:
- switch (dev->norm) {
- case V4L2_STD_B:
- case V4L2_STD_GH:
- areg_05 = 0x05;
- break;
- case V4L2_STD_DK:
- areg_05 = 0x09;
- break;
- }
- break;
- /* NICAM */
- case 2:
- switch (dev->norm) {
- case V4L2_STD_B:
- case V4L2_STD_GH:
- areg_05 = 0x07;
- break;
- case V4L2_STD_DK:
- areg_05 = 0x06;
- break;
- case V4L2_STD_PAL_I:
- areg_05 = 0x08;
- break;
- case V4L2_STD_SECAM_L:
- areg_05 = 0x0a;
- areg_02 = 0x02;
- break;
- }
- nicam_flag = 1;
- break;
- /* other */
- case 3:
- switch (dev->norm) {
- /* DK3_A2 */
- case V4L2_STD_DK:
- areg_05 = 0x0b;
- break;
- /* Korea */
- case V4L2_STD_NTSC_M_KR:
- areg_05 = 0x04;
- break;
- /* EIAJ */
- case V4L2_STD_NTSC_M_JP:
- areg_05 = 0x03;
- break;
- default:
- areg_05 = 0x02;
- break;
- }
- break;
- }
-
- tm6000_set_reg(dev, TM6010_REQ08_R01_A_INIT, 0x00);
- tm6000_set_reg(dev, TM6010_REQ08_R02_A_FIX_GAIN_CTRL, areg_02);
- tm6000_set_reg(dev, TM6010_REQ08_R03_A_AUTO_GAIN_CTRL, 0x00);
- tm6000_set_reg(dev, TM6010_REQ08_R04_A_SIF_AMP_CTRL, 0xa0);
- tm6000_set_reg(dev, TM6010_REQ08_R05_A_STANDARD_MOD, areg_05);
- tm6000_set_reg(dev, TM6010_REQ08_R06_A_SOUND_MOD, areg_06);
- tm6000_set_reg(dev, TM6010_REQ08_R07_A_LEFT_VOL, 0x00);
- tm6000_set_reg(dev, TM6010_REQ08_R08_A_RIGHT_VOL, 0x00);
- tm6000_set_reg(dev, TM6010_REQ08_R09_A_MAIN_VOL, 0x08);
- tm6000_set_reg(dev, TM6010_REQ08_R0A_A_I2S_MOD, 0x91);
- tm6000_set_reg(dev, TM6010_REQ08_R0B_A_ASD_THRES1, 0x20);
- tm6000_set_reg(dev, TM6010_REQ08_R0C_A_ASD_THRES2, 0x12);
- tm6000_set_reg(dev, TM6010_REQ08_R0D_A_AMD_THRES, 0x20);
- tm6000_set_reg(dev, TM6010_REQ08_R0E_A_MONO_THRES1, 0xf0);
- tm6000_set_reg(dev, TM6010_REQ08_R0F_A_MONO_THRES2, 0x80);
- tm6000_set_reg(dev, TM6010_REQ08_R10_A_MUTE_THRES1, 0xc0);
- tm6000_set_reg(dev, TM6010_REQ08_R11_A_MUTE_THRES2, 0x80);
- tm6000_set_reg(dev, TM6010_REQ08_R12_A_AGC_U, 0x12);
- tm6000_set_reg(dev, TM6010_REQ08_R13_A_AGC_ERR_T, 0xfe);
- tm6000_set_reg(dev, TM6010_REQ08_R14_A_AGC_GAIN_INIT, 0x20);
- tm6000_set_reg(dev, TM6010_REQ08_R15_A_AGC_STEP_THR, 0x14);
- tm6000_set_reg(dev, TM6010_REQ08_R16_A_AGC_GAIN_MAX, 0xfe);
- tm6000_set_reg(dev, TM6010_REQ08_R17_A_AGC_GAIN_MIN, 0x01);
- tm6000_set_reg(dev, TM6010_REQ08_R18_A_TR_CTRL, 0xa0);
- tm6000_set_reg(dev, TM6010_REQ08_R19_A_FH_2FH_GAIN, 0x32);
- tm6000_set_reg(dev, TM6010_REQ08_R1A_A_NICAM_SER_MAX, 0x64);
- tm6000_set_reg(dev, TM6010_REQ08_R1B_A_NICAM_SER_MIN, 0x20);
- tm6000_set_reg(dev, REQ_08_SET_GET_AVREG_BIT, 0x1c, 0x00);
- tm6000_set_reg(dev, REQ_08_SET_GET_AVREG_BIT, 0x1d, 0x00);
- tm6000_set_reg(dev, TM6010_REQ08_R1E_A_GAIN_DEEMPH_OUT, 0x13);
- tm6000_set_reg(dev, TM6010_REQ08_R1F_A_TEST_INTF_SEL, 0x00);
- tm6000_set_reg(dev, TM6010_REQ08_R20_A_TEST_PIN_SEL, 0x00);
- tm6000_set_reg(dev, TM6010_REQ08_R01_A_INIT, 0x80);
-
- return 0;
-}
-
-void tm6000_get_std_res(struct tm6000_core *dev)
-{
- /* Currently, those are the only supported resoltions */
- if (dev->norm & V4L2_STD_525_60)
- dev->height = 480;
- else
- dev->height = 576;
-
- dev->width = 720;
-}
-
-static int tm6000_load_std(struct tm6000_core *dev,
- struct tm6000_reg_settings *set, int max_size)
-{
- int i, rc;
-
- /* Load board's initialization table */
- for (i = 0; max_size; i++) {
- if (!set[i].req)
- return 0;
-
- rc = tm6000_set_reg(dev, set[i].req, set[i].reg, set[i].value);
- if (rc < 0) {
- printk(KERN_ERR "Error %i while setting "
- "req %d, reg %d to value %d\n",
- rc, set[i].req, set[i].reg, set[i].value);
- return rc;
- }
- }
-
- return 0;
-}
-
-int tm6000_set_standard(struct tm6000_core *dev)
-{
- int i, rc = 0;
- u8 reg_07_fe = 0x8a;
- u8 reg_08_f1 = 0xfc;
- u8 reg_08_e2 = 0xf0;
- u8 reg_08_e6 = 0x0f;
-
- tm6000_get_std_res(dev);
-
- if (dev->radio) {
- /* todo */
- }
-
- if (dev->dev_type == TM6010) {
- switch (dev->vinput[dev->input].vmux) {
- case TM6000_VMUX_VIDEO_A:
- tm6000_set_reg(dev, TM6010_REQ08_RE3_ADC_IN1_SEL, 0xf4);
- tm6000_set_reg(dev, TM6010_REQ08_REA_BUFF_DRV_CTRL, 0xf1);
- tm6000_set_reg(dev, TM6010_REQ08_REB_SIF_GAIN_CTRL, 0xe0);
- tm6000_set_reg(dev, TM6010_REQ08_REC_REVERSE_YC_CTRL, 0xc2);
- tm6000_set_reg(dev, TM6010_REQ08_RED_GAIN_SEL, 0xe8);
- reg_07_fe |= 0x01;
- break;
- case TM6000_VMUX_VIDEO_B:
- tm6000_set_reg(dev, TM6010_REQ08_RE3_ADC_IN1_SEL, 0xf8);
- tm6000_set_reg(dev, TM6010_REQ08_REA_BUFF_DRV_CTRL, 0xf1);
- tm6000_set_reg(dev, TM6010_REQ08_REB_SIF_GAIN_CTRL, 0xe0);
- tm6000_set_reg(dev, TM6010_REQ08_REC_REVERSE_YC_CTRL, 0xc2);
- tm6000_set_reg(dev, TM6010_REQ08_RED_GAIN_SEL, 0xe8);
- reg_07_fe |= 0x01;
- break;
- case TM6000_VMUX_VIDEO_AB:
- tm6000_set_reg(dev, TM6010_REQ08_RE3_ADC_IN1_SEL, 0xfc);
- tm6000_set_reg(dev, TM6010_REQ08_RE4_ADC_IN2_SEL, 0xf8);
- reg_08_e6 = 0x00;
- tm6000_set_reg(dev, TM6010_REQ08_REA_BUFF_DRV_CTRL, 0xf2);
- tm6000_set_reg(dev, TM6010_REQ08_REB_SIF_GAIN_CTRL, 0xf0);
- tm6000_set_reg(dev, TM6010_REQ08_REC_REVERSE_YC_CTRL, 0xc2);
- tm6000_set_reg(dev, TM6010_REQ08_RED_GAIN_SEL, 0xe0);
- break;
- default:
- break;
- }
- switch (dev->vinput[dev->input].amux) {
- case TM6000_AMUX_ADC1:
- tm6000_set_reg_mask(dev, TM6010_REQ08_RF0_DAUDIO_INPUT_CONFIG,
- 0x00, 0x0f);
- break;
- case TM6000_AMUX_ADC2:
- tm6000_set_reg_mask(dev, TM6010_REQ08_RF0_DAUDIO_INPUT_CONFIG,
- 0x08, 0x0f);
- break;
- case TM6000_AMUX_SIF1:
- reg_08_e2 |= 0x02;
- reg_08_e6 = 0x08;
- reg_07_fe |= 0x40;
- reg_08_f1 |= 0x02;
- tm6000_set_reg(dev, TM6010_REQ08_RE4_ADC_IN2_SEL, 0xf3);
- tm6000_set_reg_mask(dev, TM6010_REQ08_RF0_DAUDIO_INPUT_CONFIG,
- 0x02, 0x0f);
- break;
- case TM6000_AMUX_SIF2:
- reg_08_e2 |= 0x02;
- reg_08_e6 = 0x08;
- reg_07_fe |= 0x40;
- reg_08_f1 |= 0x02;
- tm6000_set_reg(dev, TM6010_REQ08_RE4_ADC_IN2_SEL, 0xf7);
- tm6000_set_reg_mask(dev, TM6010_REQ08_RF0_DAUDIO_INPUT_CONFIG,
- 0x02, 0x0f);
- break;
- default:
- break;
- }
- tm6000_set_reg(dev, TM6010_REQ08_RE2_POWER_DOWN_CTRL1, reg_08_e2);
- tm6000_set_reg(dev, TM6010_REQ08_RE6_POWER_DOWN_CTRL2, reg_08_e6);
- tm6000_set_reg(dev, TM6010_REQ08_RF1_AADC_POWER_DOWN, reg_08_f1);
- tm6000_set_reg(dev, TM6010_REQ07_RFE_POWER_DOWN, reg_07_fe);
- } else {
- switch (dev->vinput[dev->input].vmux) {
- case TM6000_VMUX_VIDEO_A:
- tm6000_set_reg(dev, TM6000_REQ07_RE3_VADC_INP_LPF_SEL1, 0x10);
- tm6000_set_reg(dev, TM6000_REQ07_RE5_VADC_INP_LPF_SEL2, 0x00);
- tm6000_set_reg(dev, TM6000_REQ07_RE8_VADC_PWDOWN_CTL, 0x0f);
- tm6000_set_reg(dev,
- REQ_03_SET_GET_MCU_PIN, dev->vinput[dev->input].v_gpio, 0);
- break;
- case TM6000_VMUX_VIDEO_B:
- tm6000_set_reg(dev, TM6000_REQ07_RE3_VADC_INP_LPF_SEL1, 0x00);
- tm6000_set_reg(dev, TM6000_REQ07_RE5_VADC_INP_LPF_SEL2, 0x00);
- tm6000_set_reg(dev, TM6000_REQ07_RE8_VADC_PWDOWN_CTL, 0x0f);
- tm6000_set_reg(dev,
- REQ_03_SET_GET_MCU_PIN, dev->vinput[dev->input].v_gpio, 0);
- break;
- case TM6000_VMUX_VIDEO_AB:
- tm6000_set_reg(dev, TM6000_REQ07_RE3_VADC_INP_LPF_SEL1, 0x10);
- tm6000_set_reg(dev, TM6000_REQ07_RE5_VADC_INP_LPF_SEL2, 0x10);
- tm6000_set_reg(dev, TM6000_REQ07_RE8_VADC_PWDOWN_CTL, 0x00);
- tm6000_set_reg(dev,
- REQ_03_SET_GET_MCU_PIN, dev->vinput[dev->input].v_gpio, 1);
- break;
- default:
- break;
- }
- switch (dev->vinput[dev->input].amux) {
- case TM6000_AMUX_ADC1:
- tm6000_set_reg_mask(dev,
- TM6000_REQ07_REB_VADC_AADC_MODE, 0x00, 0x0f);
- break;
- case TM6000_AMUX_ADC2:
- tm6000_set_reg_mask(dev,
- TM6000_REQ07_REB_VADC_AADC_MODE, 0x04, 0x0f);
- break;
- default:
- break;
- }
- }
- if (dev->vinput[dev->input].type == TM6000_INPUT_SVIDEO) {
- for (i = 0; i < ARRAY_SIZE(svideo_stds); i++) {
- if (dev->norm & svideo_stds[i].id) {
- rc = tm6000_load_std(dev, svideo_stds[i].common,
- sizeof(svideo_stds[i].
- common));
- goto ret;
- }
- }
- return -EINVAL;
- } else {
- for (i = 0; i < ARRAY_SIZE(composite_stds); i++) {
- if (dev->norm & composite_stds[i].id) {
- rc = tm6000_load_std(dev,
- composite_stds[i].common,
- sizeof(composite_stds[i].
- common));
- goto ret;
- }
- }
- return -EINVAL;
- }
-
-ret:
- if (rc < 0)
- return rc;
-
- if ((dev->dev_type == TM6010) &&
- ((dev->vinput[dev->input].amux == TM6000_AMUX_SIF1) ||
- (dev->vinput[dev->input].amux == TM6000_AMUX_SIF2)))
- tm6000_set_audio_std(dev);
-
- msleep(40);
-
-
- return 0;
-}
diff --git a/drivers/staging/usbip/usbip_common.c b/drivers/staging/usbip/usbip_common.c
index f4b53d103c5..3b7a847f465 100644
--- a/drivers/staging/usbip/usbip_common.c
+++ b/drivers/staging/usbip/usbip_common.c
@@ -22,6 +22,7 @@
#include <linux/fs.h>
#include <linux/kernel.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include <net/sock.h>
#include "usbip_common.h"
diff --git a/drivers/staging/usbip/usbip_event.c b/drivers/staging/usbip/usbip_event.c
index ecd1862539c..d332a34ddb6 100644
--- a/drivers/staging/usbip/usbip_event.c
+++ b/drivers/staging/usbip/usbip_event.c
@@ -18,6 +18,7 @@
*/
#include <linux/kthread.h>
+#include <linux/export.h>
#include "usbip_common.h"
diff --git a/drivers/staging/winbond/wbusb.c b/drivers/staging/winbond/wbusb.c
index a2e8bd452ed..f958eb4f0d8 100644
--- a/drivers/staging/winbond/wbusb.c
+++ b/drivers/staging/winbond/wbusb.c
@@ -11,6 +11,7 @@
*/
#include <net/mac80211.h>
#include <linux/usb.h>
+#include <linux/module.h>
#include "core.h"
#include "mds_f.h"
diff --git a/drivers/staging/wlags49_h2/wl_cs.c b/drivers/staging/wlags49_h2/wl_cs.c
index 321580267fe..2faee2dd4bb 100644
--- a/drivers/staging/wlags49_h2/wl_cs.c
+++ b/drivers/staging/wlags49_h2/wl_cs.c
@@ -82,6 +82,7 @@
#include <linux/skbuff.h>
#include <linux/if_arp.h>
#include <linux/ioport.h>
+#include <linux/module.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/cisreg.h>
diff --git a/drivers/staging/xgifb/XGI_main_26.c b/drivers/staging/xgifb/XGI_main_26.c
index 36db231cd80..277e408c39c 100644
--- a/drivers/staging/xgifb/XGI_main_26.c
+++ b/drivers/staging/xgifb/XGI_main_26.c
@@ -1300,27 +1300,17 @@ static int XGIfb_do_set_var(struct fb_var_screeninfo *var, int isactive,
return 0;
}
-static int XGIfb_pan_var(struct xgifb_video_info *xgifb_info,
- struct fb_var_screeninfo *var)
+static int XGIfb_pan_var(struct fb_var_screeninfo *var, struct fb_info *info)
{
+ struct xgifb_video_info *xgifb_info = info->par;
unsigned int base;
/* printk("Inside pan_var"); */
- if (var->xoffset > (var->xres_virtual - var->xres)) {
- /* printk("Pan: xo: %d xv %d xr %d\n",
- var->xoffset, var->xres_virtual, var->xres); */
- return -EINVAL;
- }
- if (var->yoffset > (var->yres_virtual - var->yres)) {
- /* printk("Pan: yo: %d yv %d yr %d\n",
- var->yoffset, var->yres_virtual, var->yres); */
- return -EINVAL;
- }
- base = var->yoffset * var->xres_virtual + var->xoffset;
+ base = var->yoffset * info->var.xres_virtual + var->xoffset;
/* calculate base bpp dep. */
- switch (var->bits_per_pixel) {
+ switch (info->var.bits_per_pixel) {
case 16:
base >>= 1;
break;
@@ -1615,13 +1605,12 @@ static int XGIfb_pan_display(struct fb_var_screeninfo *var,
struct fb_info *info)
{
int err;
- struct xgifb_video_info *xgifb_info = info->par;
/* printk("\nInside pan_display:\n"); */
- if (var->xoffset > (var->xres_virtual - var->xres))
+ if (var->xoffset > (info->var.xres_virtual - info->var.xres))
return -EINVAL;
- if (var->yoffset > (var->yres_virtual - var->yres))
+ if (var->yoffset > (info->var.yres_virtual - info->var.yres))
return -EINVAL;
if (var->vmode & FB_VMODE_YWRAP) {
@@ -1634,7 +1623,7 @@ static int XGIfb_pan_display(struct fb_var_screeninfo *var,
> info->var.yres_virtual)
return -EINVAL;
}
- err = XGIfb_pan_var(xgifb_info, var);
+ err = XGIfb_pan_var(var, info);
if (err < 0)
return err;
diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
index b9926ee0052..09de99fbb7e 100644
--- a/drivers/staging/zram/zram_drv.c
+++ b/drivers/staging/zram/zram_drv.c
@@ -556,7 +556,7 @@ static inline int valid_io_request(struct zram *zram, struct bio *bio)
/*
* Handler function for all zram I/O requests.
*/
-static int zram_make_request(struct request_queue *queue, struct bio *bio)
+static void zram_make_request(struct request_queue *queue, struct bio *bio)
{
struct zram *zram = queue->queuedata;
@@ -575,13 +575,12 @@ static int zram_make_request(struct request_queue *queue, struct bio *bio)
__zram_make_request(zram, bio, bio_data_dir(bio));
up_read(&zram->init_lock);
- return 0;
+ return;
error_unlock:
up_read(&zram->init_lock);
error:
bio_io_error(bio);
- return 0;
}
void __zram_reset_device(struct zram *zram)
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 4d01768fcd9..0fd96c10271 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -22,6 +22,7 @@
#include <linux/kthread.h>
#include <linux/crypto.h>
#include <linux/completion.h>
+#include <linux/module.h>
#include <asm/unaligned.h>
#include <scsi/scsi_device.h>
#include <scsi/iscsi_proto.h>
@@ -1079,7 +1080,9 @@ attach_cmd:
*/
if (!cmd->immediate_data) {
cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
- if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
+ if (cmdsn_ret == CMDSN_LOWER_THAN_EXP)
+ return 0;
+ else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
return iscsit_add_reject_from_cmd(
ISCSI_REASON_PROTOCOL_ERROR,
1, 0, buf, cmd);
@@ -1819,17 +1822,16 @@ attach:
int cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
if (cmdsn_ret == CMDSN_HIGHER_THAN_EXP)
out_of_order_cmdsn = 1;
- else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
+ else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP)
return 0;
- } else { /* (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) */
+ else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
return iscsit_add_reject_from_cmd(
ISCSI_REASON_PROTOCOL_ERROR,
1, 0, buf, cmd);
- }
}
iscsit_ack_from_expstatsn(conn, hdr->exp_statsn);
- if (out_of_order_cmdsn)
+ if (out_of_order_cmdsn || !(hdr->opcode & ISCSI_OP_IMMEDIATE))
return 0;
/*
* Found the referenced task, send to transport for processing.
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index f1643dbf6a9..db327845e46 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -20,6 +20,7 @@
****************************************************************************/
#include <linux/configfs.h>
+#include <linux/export.h>
#include <target/target_core_base.h>
#include <target/target_core_transport.h>
#include <target/target_core_fabric_ops.h>
diff --git a/drivers/target/iscsi/iscsi_target_stat.c b/drivers/target/iscsi/iscsi_target_stat.c
index bbdbe9301b2..f1db83077e0 100644
--- a/drivers/target/iscsi/iscsi_target_stat.c
+++ b/drivers/target/iscsi/iscsi_target_stat.c
@@ -20,6 +20,7 @@
******************************************************************************/
#include <linux/configfs.h>
+#include <linux/export.h>
#include <scsi/iscsi_proto.h>
#include <target/target_core_base.h>
#include <target/target_core_transport.h>
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index b15d8cbf630..3df1c9b8ae6 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -174,6 +174,24 @@ static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd)
sgl_bidi = sdb->table.sgl;
sgl_bidi_count = sdb->table.nents;
}
+ /*
+ * Because some userspace code via scsi-generic do not memset their
+ * associated read buffers, go ahead and do that here for type
+ * SCF_SCSI_CONTROL_SG_IO_CDB. Also note that this is currently
+ * guaranteed to be a single SGL for SCF_SCSI_CONTROL_SG_IO_CDB
+ * by target core in transport_generic_allocate_tasks() ->
+ * transport_generic_cmd_sequencer().
+ */
+ if (se_cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB &&
+ se_cmd->data_direction == DMA_FROM_DEVICE) {
+ struct scatterlist *sg = scsi_sglist(sc);
+ unsigned char *buf = kmap(sg_page(sg)) + sg->offset;
+
+ if (buf != NULL) {
+ memset(buf, 0, sg->length);
+ kunmap(sg_page(sg));
+ }
+ }
/* Tell the core about our preallocated memory */
ret = transport_generic_map_mem_to_cmd(se_cmd, scsi_sglist(sc),
@@ -187,7 +205,7 @@ static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd)
/*
* Called from struct target_core_fabric_ops->check_stop_free()
*/
-static void tcm_loop_check_stop_free(struct se_cmd *se_cmd)
+static int tcm_loop_check_stop_free(struct se_cmd *se_cmd)
{
/*
* Do not release struct se_cmd's containing a valid TMR
@@ -195,12 +213,13 @@ static void tcm_loop_check_stop_free(struct se_cmd *se_cmd)
* with transport_generic_free_cmd().
*/
if (se_cmd->se_tmr_req)
- return;
+ return 0;
/*
* Release the struct se_cmd, which will make a callback to release
* struct tcm_loop_cmd * in tcm_loop_deallocate_core_cmd()
*/
transport_generic_free_cmd(se_cmd, 0);
+ return 1;
}
static void tcm_loop_release_cmd(struct se_cmd *se_cmd)
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index 8f4447749c7..88f2ad43ec8 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -27,6 +27,7 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/configfs.h>
+#include <linux/export.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
@@ -58,8 +59,9 @@ struct t10_alua_lu_gp *default_lu_gp;
*
* See spc4r17 section 6.27
*/
-int core_emulate_report_target_port_groups(struct se_cmd *cmd)
+int target_emulate_report_target_port_groups(struct se_task *task)
{
+ struct se_cmd *cmd = task->task_se_cmd;
struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev;
struct se_port *port;
struct t10_alua_tg_pt_gp *tg_pt_gp;
@@ -164,6 +166,8 @@ int core_emulate_report_target_port_groups(struct se_cmd *cmd)
transport_kunmap_first_data_page(cmd);
+ task->task_scsi_status = GOOD;
+ transport_complete_task(task, 1);
return 0;
}
@@ -172,8 +176,9 @@ int core_emulate_report_target_port_groups(struct se_cmd *cmd)
*
* See spc4r17 section 6.35
*/
-int core_emulate_set_target_port_groups(struct se_cmd *cmd)
+int target_emulate_set_target_port_groups(struct se_task *task)
{
+ struct se_cmd *cmd = task->task_se_cmd;
struct se_device *dev = cmd->se_dev;
struct se_subsystem_dev *su_dev = dev->se_sub_dev;
struct se_port *port, *l_port = cmd->se_lun->lun_sep;
@@ -341,7 +346,8 @@ int core_emulate_set_target_port_groups(struct se_cmd *cmd)
out:
transport_kunmap_first_data_page(cmd);
-
+ task->task_scsi_status = GOOD;
+ transport_complete_task(task, 1);
return 0;
}
diff --git a/drivers/target/target_core_alua.h b/drivers/target/target_core_alua.h
index c86f97a081e..c5b4ecd3e74 100644
--- a/drivers/target/target_core_alua.h
+++ b/drivers/target/target_core_alua.h
@@ -66,8 +66,8 @@ extern struct kmem_cache *t10_alua_lu_gp_mem_cache;
extern struct kmem_cache *t10_alua_tg_pt_gp_cache;
extern struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
-extern int core_emulate_report_target_port_groups(struct se_cmd *);
-extern int core_emulate_set_target_port_groups(struct se_cmd *);
+extern int target_emulate_report_target_port_groups(struct se_task *);
+extern int target_emulate_set_target_port_groups(struct se_task *);
extern int core_alua_check_nonop_delay(struct se_cmd *);
extern int core_alua_do_port_transition(struct t10_alua_tg_pt_gp *,
struct se_device *, struct se_port *,
diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c
index 38535eb1392..683ba02b824 100644
--- a/drivers/target/target_core_cdb.c
+++ b/drivers/target/target_core_cdb.c
@@ -32,6 +32,7 @@
#include <target/target_core_transport.h>
#include <target/target_core_fabric_ops.h>
#include "target_core_ua.h"
+#include "target_core_cdb.h"
static void
target_fill_alua_data(struct se_port *port, unsigned char *buf)
@@ -679,16 +680,18 @@ target_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf)
return 0;
}
-static int
-target_emulate_inquiry(struct se_cmd *cmd)
+int target_emulate_inquiry(struct se_task *task)
{
+ struct se_cmd *cmd = task->task_se_cmd;
struct se_device *dev = cmd->se_dev;
unsigned char *buf;
unsigned char *cdb = cmd->t_task_cdb;
int p, ret;
- if (!(cdb[1] & 0x1))
- return target_emulate_inquiry_std(cmd);
+ if (!(cdb[1] & 0x1)) {
+ ret = target_emulate_inquiry_std(cmd);
+ goto out;
+ }
/*
* Make sure we at least have 4 bytes of INQUIRY response
@@ -707,22 +710,30 @@ target_emulate_inquiry(struct se_cmd *cmd)
buf[0] = dev->transport->get_device_type(dev);
- for (p = 0; p < ARRAY_SIZE(evpd_handlers); ++p)
+ for (p = 0; p < ARRAY_SIZE(evpd_handlers); ++p) {
if (cdb[2] == evpd_handlers[p].page) {
buf[1] = cdb[2];
ret = evpd_handlers[p].emulate(cmd, buf);
- transport_kunmap_first_data_page(cmd);
- return ret;
+ goto out_unmap;
}
+ }
- transport_kunmap_first_data_page(cmd);
pr_err("Unknown VPD Code: 0x%02x\n", cdb[2]);
- return -EINVAL;
+ ret = -EINVAL;
+
+out_unmap:
+ transport_kunmap_first_data_page(cmd);
+out:
+ if (!ret) {
+ task->task_scsi_status = GOOD;
+ transport_complete_task(task, 1);
+ }
+ return ret;
}
-static int
-target_emulate_readcapacity(struct se_cmd *cmd)
+int target_emulate_readcapacity(struct se_task *task)
{
+ struct se_cmd *cmd = task->task_se_cmd;
struct se_device *dev = cmd->se_dev;
unsigned char *buf;
unsigned long long blocks_long = dev->transport->get_blocks(dev);
@@ -751,12 +762,14 @@ target_emulate_readcapacity(struct se_cmd *cmd)
transport_kunmap_first_data_page(cmd);
+ task->task_scsi_status = GOOD;
+ transport_complete_task(task, 1);
return 0;
}
-static int
-target_emulate_readcapacity_16(struct se_cmd *cmd)
+int target_emulate_readcapacity_16(struct se_task *task)
{
+ struct se_cmd *cmd = task->task_se_cmd;
struct se_device *dev = cmd->se_dev;
unsigned char *buf;
unsigned long long blocks = dev->transport->get_blocks(dev);
@@ -784,6 +797,8 @@ target_emulate_readcapacity_16(struct se_cmd *cmd)
transport_kunmap_first_data_page(cmd);
+ task->task_scsi_status = GOOD;
+ transport_complete_task(task, 1);
return 0;
}
@@ -922,14 +937,15 @@ target_modesense_dpofua(unsigned char *buf, int type)
}
}
-static int
-target_emulate_modesense(struct se_cmd *cmd, int ten)
+int target_emulate_modesense(struct se_task *task)
{
+ struct se_cmd *cmd = task->task_se_cmd;
struct se_device *dev = cmd->se_dev;
char *cdb = cmd->t_task_cdb;
unsigned char *rbuf;
int type = dev->transport->get_device_type(dev);
- int offset = (ten) ? 8 : 4;
+ int ten = (cmd->t_task_cdb[0] == MODE_SENSE_10);
+ int offset = ten ? 8 : 4;
int length = 0;
unsigned char buf[SE_MODE_PAGE_BUF];
@@ -995,12 +1011,14 @@ target_emulate_modesense(struct se_cmd *cmd, int ten)
memcpy(rbuf, buf, offset);
transport_kunmap_first_data_page(cmd);
+ task->task_scsi_status = GOOD;
+ transport_complete_task(task, 1);
return 0;
}
-static int
-target_emulate_request_sense(struct se_cmd *cmd)
+int target_emulate_request_sense(struct se_task *task)
{
+ struct se_cmd *cmd = task->task_se_cmd;
unsigned char *cdb = cmd->t_task_cdb;
unsigned char *buf;
u8 ua_asc = 0, ua_ascq = 0;
@@ -1059,7 +1077,8 @@ target_emulate_request_sense(struct se_cmd *cmd)
end:
transport_kunmap_first_data_page(cmd);
-
+ task->task_scsi_status = GOOD;
+ transport_complete_task(task, 1);
return 0;
}
@@ -1067,8 +1086,7 @@ end:
* Used for TCM/IBLOCK and TCM/FILEIO for block/blk-lib.c level discard support.
* Note this is not used for TCM/pSCSI passthrough
*/
-static int
-target_emulate_unmap(struct se_task *task)
+int target_emulate_unmap(struct se_task *task)
{
struct se_cmd *cmd = task->task_se_cmd;
struct se_device *dev = cmd->se_dev;
@@ -1079,6 +1097,12 @@ target_emulate_unmap(struct se_task *task)
int ret = 0, offset;
unsigned short dl, bd_dl;
+ if (!dev->transport->do_discard) {
+ pr_err("UNMAP emulation not supported for: %s\n",
+ dev->transport->name);
+ return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ }
+
/* First UNMAP block descriptor starts at 8 byte offset */
offset = 8;
size -= 8;
@@ -1110,7 +1134,10 @@ target_emulate_unmap(struct se_task *task)
err:
transport_kunmap_first_data_page(cmd);
-
+ if (!ret) {
+ task->task_scsi_status = GOOD;
+ transport_complete_task(task, 1);
+ }
return ret;
}
@@ -1118,14 +1145,28 @@ err:
* Used for TCM/IBLOCK and TCM/FILEIO for block/blk-lib.c level discard support.
* Note this is not used for TCM/pSCSI passthrough
*/
-static int
-target_emulate_write_same(struct se_task *task, u32 num_blocks)
+int target_emulate_write_same(struct se_task *task)
{
struct se_cmd *cmd = task->task_se_cmd;
struct se_device *dev = cmd->se_dev;
sector_t range;
sector_t lba = cmd->t_task_lba;
+ u32 num_blocks;
int ret;
+
+ if (!dev->transport->do_discard) {
+ pr_err("WRITE_SAME emulation not supported"
+ " for: %s\n", dev->transport->name);
+ return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ }
+
+ if (cmd->t_task_cdb[0] == WRITE_SAME)
+ num_blocks = get_unaligned_be16(&cmd->t_task_cdb[7]);
+ else if (cmd->t_task_cdb[0] == WRITE_SAME_16)
+ num_blocks = get_unaligned_be32(&cmd->t_task_cdb[10]);
+ else /* WRITE_SAME_32 via VARIABLE_LENGTH_CMD */
+ num_blocks = get_unaligned_be32(&cmd->t_task_cdb[28]);
+
/*
* Use the explicit range when non zero is supplied, otherwise calculate
* the remaining range based on ->get_blocks() - starting LBA.
@@ -1144,127 +1185,30 @@ target_emulate_write_same(struct se_task *task, u32 num_blocks)
return ret;
}
+ task->task_scsi_status = GOOD;
+ transport_complete_task(task, 1);
return 0;
}
-int
-transport_emulate_control_cdb(struct se_task *task)
+int target_emulate_synchronize_cache(struct se_task *task)
{
- struct se_cmd *cmd = task->task_se_cmd;
- struct se_device *dev = cmd->se_dev;
- unsigned short service_action;
- int ret = 0;
+ struct se_device *dev = task->task_se_cmd->se_dev;
- switch (cmd->t_task_cdb[0]) {
- case INQUIRY:
- ret = target_emulate_inquiry(cmd);
- break;
- case READ_CAPACITY:
- ret = target_emulate_readcapacity(cmd);
- break;
- case MODE_SENSE:
- ret = target_emulate_modesense(cmd, 0);
- break;
- case MODE_SENSE_10:
- ret = target_emulate_modesense(cmd, 1);
- break;
- case SERVICE_ACTION_IN:
- switch (cmd->t_task_cdb[1] & 0x1f) {
- case SAI_READ_CAPACITY_16:
- ret = target_emulate_readcapacity_16(cmd);
- break;
- default:
- pr_err("Unsupported SA: 0x%02x\n",
- cmd->t_task_cdb[1] & 0x1f);
- return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
- }
- break;
- case REQUEST_SENSE:
- ret = target_emulate_request_sense(cmd);
- break;
- case UNMAP:
- if (!dev->transport->do_discard) {
- pr_err("UNMAP emulation not supported for: %s\n",
- dev->transport->name);
- return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
- }
- ret = target_emulate_unmap(task);
- break;
- case WRITE_SAME:
- if (!dev->transport->do_discard) {
- pr_err("WRITE_SAME emulation not supported"
- " for: %s\n", dev->transport->name);
- return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
- }
- ret = target_emulate_write_same(task,
- get_unaligned_be16(&cmd->t_task_cdb[7]));
- break;
- case WRITE_SAME_16:
- if (!dev->transport->do_discard) {
- pr_err("WRITE_SAME_16 emulation not supported"
- " for: %s\n", dev->transport->name);
- return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
- }
- ret = target_emulate_write_same(task,
- get_unaligned_be32(&cmd->t_task_cdb[10]));
- break;
- case VARIABLE_LENGTH_CMD:
- service_action =
- get_unaligned_be16(&cmd->t_task_cdb[8]);
- switch (service_action) {
- case WRITE_SAME_32:
- if (!dev->transport->do_discard) {
- pr_err("WRITE_SAME_32 SA emulation not"
- " supported for: %s\n",
- dev->transport->name);
- return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
- }
- ret = target_emulate_write_same(task,
- get_unaligned_be32(&cmd->t_task_cdb[28]));
- break;
- default:
- pr_err("Unsupported VARIABLE_LENGTH_CMD SA:"
- " 0x%02x\n", service_action);
- break;
- }
- break;
- case SYNCHRONIZE_CACHE:
- case 0x91: /* SYNCHRONIZE_CACHE_16: */
- if (!dev->transport->do_sync_cache) {
- pr_err("SYNCHRONIZE_CACHE emulation not supported"
- " for: %s\n", dev->transport->name);
- return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
- }
- dev->transport->do_sync_cache(task);
- break;
- case ALLOW_MEDIUM_REMOVAL:
- case ERASE:
- case REZERO_UNIT:
- case SEEK_10:
- case SPACE:
- case START_STOP:
- case TEST_UNIT_READY:
- case VERIFY:
- case WRITE_FILEMARKS:
- break;
- default:
- pr_err("Unsupported SCSI Opcode: 0x%02x for %s\n",
- cmd->t_task_cdb[0], dev->transport->name);
+ if (!dev->transport->do_sync_cache) {
+ pr_err("SYNCHRONIZE_CACHE emulation not supported"
+ " for: %s\n", dev->transport->name);
return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
}
- if (ret < 0)
- return ret;
- /*
- * Handle the successful completion here unless a caller
- * has explictly requested an asychronous completion.
- */
- if (!(cmd->se_cmd_flags & SCF_EMULATE_CDB_ASYNC)) {
- task->task_scsi_status = GOOD;
- transport_complete_task(task, 1);
- }
+ dev->transport->do_sync_cache(task);
+ return 0;
+}
- return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+int target_emulate_noop(struct se_task *task)
+{
+ task->task_scsi_status = GOOD;
+ transport_complete_task(task, 1);
+ return 0;
}
/*
diff --git a/drivers/target/target_core_cdb.h b/drivers/target/target_core_cdb.h
new file mode 100644
index 00000000000..ad6b1e39300
--- /dev/null
+++ b/drivers/target/target_core_cdb.h
@@ -0,0 +1,14 @@
+#ifndef TARGET_CORE_CDB_H
+#define TARGET_CORE_CDB_H
+
+int target_emulate_inquiry(struct se_task *task);
+int target_emulate_readcapacity(struct se_task *task);
+int target_emulate_readcapacity_16(struct se_task *task);
+int target_emulate_modesense(struct se_task *task);
+int target_emulate_request_sense(struct se_task *task);
+int target_emulate_unmap(struct se_task *task);
+int target_emulate_write_same(struct se_task *task);
+int target_emulate_synchronize_cache(struct se_task *task);
+int target_emulate_noop(struct se_task *task);
+
+#endif /* TARGET_CORE_CDB_H */
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index f870c3bcfd8..ba5edec2c5f 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -35,6 +35,7 @@
#include <linux/spinlock.h>
#include <linux/kthread.h>
#include <linux/in.h>
+#include <linux/export.h>
#include <net/sock.h>
#include <net/tcp.h>
#include <scsi/scsi.h>
@@ -651,23 +652,15 @@ void core_dev_unexport(
lun->lun_se_dev = NULL;
}
-int transport_core_report_lun_response(struct se_cmd *se_cmd)
+int target_report_luns(struct se_task *se_task)
{
+ struct se_cmd *se_cmd = se_task->task_se_cmd;
struct se_dev_entry *deve;
struct se_lun *se_lun;
struct se_session *se_sess = se_cmd->se_sess;
- struct se_task *se_task;
unsigned char *buf;
u32 cdb_offset = 0, lun_count = 0, offset = 8, i;
- list_for_each_entry(se_task, &se_cmd->t_task_list, t_list)
- break;
-
- if (!se_task) {
- pr_err("Unable to locate struct se_task for struct se_cmd\n");
- return PYX_TRANSPORT_LU_COMM_FAILURE;
- }
-
buf = transport_kmap_first_data_page(se_cmd);
/*
@@ -713,6 +706,8 @@ done:
buf[2] = ((lun_count >> 8) & 0xff);
buf[3] = (lun_count & 0xff);
+ se_task->task_scsi_status = GOOD;
+ transport_complete_task(se_task, 1);
return PYX_TRANSPORT_SENT_TO_TRANSPORT;
}
diff --git a/drivers/target/target_core_fabric_lib.c b/drivers/target/target_core_fabric_lib.c
index 39f021b855e..ec4249be617 100644
--- a/drivers/target/target_core_fabric_lib.c
+++ b/drivers/target/target_core_fabric_lib.c
@@ -29,6 +29,7 @@
#include <linux/string.h>
#include <linux/ctype.h>
#include <linux/spinlock.h>
+#include <linux/export.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 19a0be9c657..67cd6fe05bf 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -32,6 +32,7 @@
#include <linux/blkdev.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/module.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
diff --git a/drivers/target/target_core_hba.c b/drivers/target/target_core_hba.c
index 0639b975d6f..c68019d6c40 100644
--- a/drivers/target/target_core_hba.c
+++ b/drivers/target/target_core_hba.c
@@ -32,6 +32,7 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/in.h>
+#include <linux/module.h>
#include <net/sock.h>
#include <net/tcp.h>
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 41ad02b5fb8..7698efe2926 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -37,6 +37,7 @@
#include <linux/bio.h>
#include <linux/genhd.h>
#include <linux/file.h>
+#include <linux/module.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 0c4f783f924..5a4ebfc3a54 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -116,114 +116,21 @@ static int core_scsi2_reservation_check(struct se_cmd *cmd, u32 *pr_reg_type)
return ret;
}
-static int core_scsi2_reservation_release(struct se_cmd *cmd)
-{
- struct se_device *dev = cmd->se_dev;
- struct se_session *sess = cmd->se_sess;
- struct se_portal_group *tpg = sess->se_tpg;
-
- if (!sess || !tpg)
- return 0;
-
- spin_lock(&dev->dev_reservation_lock);
- if (!dev->dev_reserved_node_acl || !sess) {
- spin_unlock(&dev->dev_reservation_lock);
- return 0;
- }
-
- if (dev->dev_reserved_node_acl != sess->se_node_acl) {
- spin_unlock(&dev->dev_reservation_lock);
- return 0;
- }
- dev->dev_reserved_node_acl = NULL;
- dev->dev_flags &= ~DF_SPC2_RESERVATIONS;
- if (dev->dev_flags & DF_SPC2_RESERVATIONS_WITH_ISID) {
- dev->dev_res_bin_isid = 0;
- dev->dev_flags &= ~DF_SPC2_RESERVATIONS_WITH_ISID;
- }
- pr_debug("SCSI-2 Released reservation for %s LUN: %u ->"
- " MAPPED LUN: %u for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
- cmd->se_lun->unpacked_lun, cmd->se_deve->mapped_lun,
- sess->se_node_acl->initiatorname);
- spin_unlock(&dev->dev_reservation_lock);
-
- return 0;
-}
-
-static int core_scsi2_reservation_reserve(struct se_cmd *cmd)
-{
- struct se_device *dev = cmd->se_dev;
- struct se_session *sess = cmd->se_sess;
- struct se_portal_group *tpg = sess->se_tpg;
-
- if ((cmd->t_task_cdb[1] & 0x01) &&
- (cmd->t_task_cdb[1] & 0x02)) {
- pr_err("LongIO and Obselete Bits set, returning"
- " ILLEGAL_REQUEST\n");
- return PYX_TRANSPORT_ILLEGAL_REQUEST;
- }
- /*
- * This is currently the case for target_core_mod passthrough struct se_cmd
- * ops
- */
- if (!sess || !tpg)
- return 0;
-
- spin_lock(&dev->dev_reservation_lock);
- if (dev->dev_reserved_node_acl &&
- (dev->dev_reserved_node_acl != sess->se_node_acl)) {
- pr_err("SCSI-2 RESERVATION CONFLIFT for %s fabric\n",
- tpg->se_tpg_tfo->get_fabric_name());
- pr_err("Original reserver LUN: %u %s\n",
- cmd->se_lun->unpacked_lun,
- dev->dev_reserved_node_acl->initiatorname);
- pr_err("Current attempt - LUN: %u -> MAPPED LUN: %u"
- " from %s \n", cmd->se_lun->unpacked_lun,
- cmd->se_deve->mapped_lun,
- sess->se_node_acl->initiatorname);
- spin_unlock(&dev->dev_reservation_lock);
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
- }
-
- dev->dev_reserved_node_acl = sess->se_node_acl;
- dev->dev_flags |= DF_SPC2_RESERVATIONS;
- if (sess->sess_bin_isid != 0) {
- dev->dev_res_bin_isid = sess->sess_bin_isid;
- dev->dev_flags |= DF_SPC2_RESERVATIONS_WITH_ISID;
- }
- pr_debug("SCSI-2 Reserved %s LUN: %u -> MAPPED LUN: %u"
- " for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
- cmd->se_lun->unpacked_lun, cmd->se_deve->mapped_lun,
- sess->se_node_acl->initiatorname);
- spin_unlock(&dev->dev_reservation_lock);
-
- return 0;
-}
-
static struct t10_pr_registration *core_scsi3_locate_pr_reg(struct se_device *,
struct se_node_acl *, struct se_session *);
static void core_scsi3_put_pr_reg(struct t10_pr_registration *);
-/*
- * Setup in target_core_transport.c:transport_generic_cmd_sequencer()
- * and called via struct se_cmd->transport_emulate_cdb() in TCM processing
- * thread context.
- */
-int core_scsi2_emulate_crh(struct se_cmd *cmd)
+static int target_check_scsi2_reservation_conflict(struct se_cmd *cmd, int *ret)
{
struct se_session *se_sess = cmd->se_sess;
struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev;
struct t10_pr_registration *pr_reg;
struct t10_reservation *pr_tmpl = &su_dev->t10_pr;
- unsigned char *cdb = &cmd->t_task_cdb[0];
int crh = (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS);
int conflict = 0;
- if (!se_sess)
- return 0;
-
if (!crh)
- goto after_crh;
+ return false;
pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl,
se_sess);
@@ -251,14 +158,16 @@ int core_scsi2_emulate_crh(struct se_cmd *cmd)
*/
if (pr_reg->pr_res_holder) {
core_scsi3_put_pr_reg(pr_reg);
- return 0;
+ *ret = 0;
+ return false;
}
if ((pr_reg->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_REGONLY) ||
(pr_reg->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_REGONLY) ||
(pr_reg->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
(pr_reg->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) {
core_scsi3_put_pr_reg(pr_reg);
- return 0;
+ *ret = 0;
+ return true;
}
core_scsi3_put_pr_reg(pr_reg);
conflict = 1;
@@ -282,18 +191,118 @@ int core_scsi2_emulate_crh(struct se_cmd *cmd)
pr_err("Received legacy SPC-2 RESERVE/RELEASE"
" while active SPC-3 registrations exist,"
" returning RESERVATION_CONFLICT\n");
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ *ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
+ return true;
}
-after_crh:
- if ((cdb[0] == RESERVE) || (cdb[0] == RESERVE_10))
- return core_scsi2_reservation_reserve(cmd);
- else if ((cdb[0] == RELEASE) || (cdb[0] == RELEASE_10))
- return core_scsi2_reservation_release(cmd);
- else
- return PYX_TRANSPORT_INVALID_CDB_FIELD;
+ return false;
+}
+
+int target_scsi2_reservation_release(struct se_task *task)
+{
+ struct se_cmd *cmd = task->task_se_cmd;
+ struct se_device *dev = cmd->se_dev;
+ struct se_session *sess = cmd->se_sess;
+ struct se_portal_group *tpg = sess->se_tpg;
+ int ret = 0;
+
+ if (!sess || !tpg)
+ goto out;
+ if (target_check_scsi2_reservation_conflict(cmd, &ret))
+ goto out;
+
+ ret = 0;
+ spin_lock(&dev->dev_reservation_lock);
+ if (!dev->dev_reserved_node_acl || !sess)
+ goto out_unlock;
+
+ if (dev->dev_reserved_node_acl != sess->se_node_acl)
+ goto out_unlock;
+
+ dev->dev_reserved_node_acl = NULL;
+ dev->dev_flags &= ~DF_SPC2_RESERVATIONS;
+ if (dev->dev_flags & DF_SPC2_RESERVATIONS_WITH_ISID) {
+ dev->dev_res_bin_isid = 0;
+ dev->dev_flags &= ~DF_SPC2_RESERVATIONS_WITH_ISID;
+ }
+ pr_debug("SCSI-2 Released reservation for %s LUN: %u ->"
+ " MAPPED LUN: %u for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
+ cmd->se_lun->unpacked_lun, cmd->se_deve->mapped_lun,
+ sess->se_node_acl->initiatorname);
+
+out_unlock:
+ spin_unlock(&dev->dev_reservation_lock);
+out:
+ if (!ret) {
+ task->task_scsi_status = GOOD;
+ transport_complete_task(task, 1);
+ }
+ return ret;
+}
+
+int target_scsi2_reservation_reserve(struct se_task *task)
+{
+ struct se_cmd *cmd = task->task_se_cmd;
+ struct se_device *dev = cmd->se_dev;
+ struct se_session *sess = cmd->se_sess;
+ struct se_portal_group *tpg = sess->se_tpg;
+ int ret = 0;
+
+ if ((cmd->t_task_cdb[1] & 0x01) &&
+ (cmd->t_task_cdb[1] & 0x02)) {
+ pr_err("LongIO and Obselete Bits set, returning"
+ " ILLEGAL_REQUEST\n");
+ ret = PYX_TRANSPORT_ILLEGAL_REQUEST;
+ goto out;
+ }
+ /*
+ * This is currently the case for target_core_mod passthrough struct se_cmd
+ * ops
+ */
+ if (!sess || !tpg)
+ goto out;
+ if (target_check_scsi2_reservation_conflict(cmd, &ret))
+ goto out;
+
+ ret = 0;
+ spin_lock(&dev->dev_reservation_lock);
+ if (dev->dev_reserved_node_acl &&
+ (dev->dev_reserved_node_acl != sess->se_node_acl)) {
+ pr_err("SCSI-2 RESERVATION CONFLIFT for %s fabric\n",
+ tpg->se_tpg_tfo->get_fabric_name());
+ pr_err("Original reserver LUN: %u %s\n",
+ cmd->se_lun->unpacked_lun,
+ dev->dev_reserved_node_acl->initiatorname);
+ pr_err("Current attempt - LUN: %u -> MAPPED LUN: %u"
+ " from %s \n", cmd->se_lun->unpacked_lun,
+ cmd->se_deve->mapped_lun,
+ sess->se_node_acl->initiatorname);
+ ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
+ goto out_unlock;
+ }
+
+ dev->dev_reserved_node_acl = sess->se_node_acl;
+ dev->dev_flags |= DF_SPC2_RESERVATIONS;
+ if (sess->sess_bin_isid != 0) {
+ dev->dev_res_bin_isid = sess->sess_bin_isid;
+ dev->dev_flags |= DF_SPC2_RESERVATIONS_WITH_ISID;
+ }
+ pr_debug("SCSI-2 Reserved %s LUN: %u -> MAPPED LUN: %u"
+ " for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
+ cmd->se_lun->unpacked_lun, cmd->se_deve->mapped_lun,
+ sess->se_node_acl->initiatorname);
+
+out_unlock:
+ spin_unlock(&dev->dev_reservation_lock);
+out:
+ if (!ret) {
+ task->task_scsi_status = GOOD;
+ transport_complete_task(task, 1);
+ }
+ return ret;
}
+
/*
* Begin SPC-3/SPC-4 Persistent Reservations emulation support
*
@@ -418,12 +427,12 @@ static int core_scsi3_pr_seq_non_holder(
break;
case RELEASE:
case RELEASE_10:
- /* Handled by CRH=1 in core_scsi2_emulate_crh() */
+ /* Handled by CRH=1 in target_scsi2_reservation_release() */
ret = 0;
break;
case RESERVE:
case RESERVE_10:
- /* Handled by CRH=1 in core_scsi2_emulate_crh() */
+ /* Handled by CRH=1 in target_scsi2_reservation_reserve() */
ret = 0;
break;
case TEST_UNIT_READY:
@@ -3739,12 +3748,33 @@ static unsigned long long core_scsi3_extract_reservation_key(unsigned char *cdb)
/*
* See spc4r17 section 6.14 Table 170
*/
-static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb)
+int target_scsi3_emulate_pr_out(struct se_task *task)
{
+ struct se_cmd *cmd = task->task_se_cmd;
+ unsigned char *cdb = &cmd->t_task_cdb[0];
unsigned char *buf;
u64 res_key, sa_res_key;
int sa, scope, type, aptpl;
int spec_i_pt = 0, all_tg_pt = 0, unreg = 0;
+ int ret;
+
+ /*
+ * Following spc2r20 5.5.1 Reservations overview:
+ *
+ * If a logical unit has been reserved by any RESERVE command and is
+ * still reserved by any initiator, all PERSISTENT RESERVE IN and all
+ * PERSISTENT RESERVE OUT commands shall conflict regardless of
+ * initiator or service action and shall terminate with a RESERVATION
+ * CONFLICT status.
+ */
+ if (cmd->se_dev->dev_flags & DF_SPC2_RESERVATIONS) {
+ pr_err("Received PERSISTENT_RESERVE CDB while legacy"
+ " SPC-2 reservation is held, returning"
+ " RESERVATION_CONFLICT\n");
+ ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
+ goto out;
+ }
+
/*
* FIXME: A NULL struct se_session pointer means an this is not coming from
* a $FABRIC_MOD's nexus, but from internal passthrough ops.
@@ -3755,7 +3785,8 @@ static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb)
if (cmd->data_length < 24) {
pr_warn("SPC-PR: Received PR OUT parameter list"
" length too small: %u\n", cmd->data_length);
- return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ goto out;
}
/*
* From the PERSISTENT_RESERVE_OUT command descriptor block (CDB)
@@ -3788,8 +3819,11 @@ static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb)
/*
* SPEC_I_PT=1 is only valid for Service action: REGISTER
*/
- if (spec_i_pt && ((cdb[1] & 0x1f) != PRO_REGISTER))
- return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ if (spec_i_pt && ((cdb[1] & 0x1f) != PRO_REGISTER)) {
+ ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ goto out;
+ }
+
/*
* From spc4r17 section 6.14:
*
@@ -3803,7 +3837,8 @@ static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb)
(cmd->data_length != 24)) {
pr_warn("SPC-PR: Received PR OUT illegal parameter"
" list length: %u\n", cmd->data_length);
- return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ goto out;
}
/*
* (core_scsi3_emulate_pro_* function parameters
@@ -3812,35 +3847,47 @@ static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb)
*/
switch (sa) {
case PRO_REGISTER:
- return core_scsi3_emulate_pro_register(cmd,
+ ret = core_scsi3_emulate_pro_register(cmd,
res_key, sa_res_key, aptpl, all_tg_pt, spec_i_pt, 0);
+ break;
case PRO_RESERVE:
- return core_scsi3_emulate_pro_reserve(cmd,
- type, scope, res_key);
+ ret = core_scsi3_emulate_pro_reserve(cmd, type, scope, res_key);
+ break;
case PRO_RELEASE:
- return core_scsi3_emulate_pro_release(cmd,
- type, scope, res_key);
+ ret = core_scsi3_emulate_pro_release(cmd, type, scope, res_key);
+ break;
case PRO_CLEAR:
- return core_scsi3_emulate_pro_clear(cmd, res_key);
+ ret = core_scsi3_emulate_pro_clear(cmd, res_key);
+ break;
case PRO_PREEMPT:
- return core_scsi3_emulate_pro_preempt(cmd, type, scope,
+ ret = core_scsi3_emulate_pro_preempt(cmd, type, scope,
res_key, sa_res_key, 0);
+ break;
case PRO_PREEMPT_AND_ABORT:
- return core_scsi3_emulate_pro_preempt(cmd, type, scope,
+ ret = core_scsi3_emulate_pro_preempt(cmd, type, scope,
res_key, sa_res_key, 1);
+ break;
case PRO_REGISTER_AND_IGNORE_EXISTING_KEY:
- return core_scsi3_emulate_pro_register(cmd,
+ ret = core_scsi3_emulate_pro_register(cmd,
0, sa_res_key, aptpl, all_tg_pt, spec_i_pt, 1);
+ break;
case PRO_REGISTER_AND_MOVE:
- return core_scsi3_emulate_pro_register_and_move(cmd, res_key,
+ ret = core_scsi3_emulate_pro_register_and_move(cmd, res_key,
sa_res_key, aptpl, unreg);
+ break;
default:
pr_err("Unknown PERSISTENT_RESERVE_OUT service"
" action: 0x%02x\n", cdb[1] & 0x1f);
- return PYX_TRANSPORT_INVALID_CDB_FIELD;
+ ret = PYX_TRANSPORT_INVALID_CDB_FIELD;
+ break;
}
- return PYX_TRANSPORT_INVALID_CDB_FIELD;
+out:
+ if (!ret) {
+ task->task_scsi_status = GOOD;
+ transport_complete_task(task, 1);
+ }
+ return ret;
}
/*
@@ -4190,29 +4237,11 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd)
return 0;
}
-static int core_scsi3_emulate_pr_in(struct se_cmd *cmd, unsigned char *cdb)
+int target_scsi3_emulate_pr_in(struct se_task *task)
{
- switch (cdb[1] & 0x1f) {
- case PRI_READ_KEYS:
- return core_scsi3_pri_read_keys(cmd);
- case PRI_READ_RESERVATION:
- return core_scsi3_pri_read_reservation(cmd);
- case PRI_REPORT_CAPABILITIES:
- return core_scsi3_pri_report_capabilities(cmd);
- case PRI_READ_FULL_STATUS:
- return core_scsi3_pri_read_full_status(cmd);
- default:
- pr_err("Unknown PERSISTENT_RESERVE_IN service"
- " action: 0x%02x\n", cdb[1] & 0x1f);
- return PYX_TRANSPORT_INVALID_CDB_FIELD;
- }
-
-}
+ struct se_cmd *cmd = task->task_se_cmd;
+ int ret;
-int core_scsi3_emulate_pr(struct se_cmd *cmd)
-{
- unsigned char *cdb = &cmd->t_task_cdb[0];
- struct se_device *dev = cmd->se_dev;
/*
* Following spc2r20 5.5.1 Reservations overview:
*
@@ -4222,16 +4251,38 @@ int core_scsi3_emulate_pr(struct se_cmd *cmd)
* initiator or service action and shall terminate with a RESERVATION
* CONFLICT status.
*/
- if (dev->dev_flags & DF_SPC2_RESERVATIONS) {
+ if (cmd->se_dev->dev_flags & DF_SPC2_RESERVATIONS) {
pr_err("Received PERSISTENT_RESERVE CDB while legacy"
" SPC-2 reservation is held, returning"
" RESERVATION_CONFLICT\n");
return PYX_TRANSPORT_RESERVATION_CONFLICT;
}
- return (cdb[0] == PERSISTENT_RESERVE_OUT) ?
- core_scsi3_emulate_pr_out(cmd, cdb) :
- core_scsi3_emulate_pr_in(cmd, cdb);
+ switch (cmd->t_task_cdb[1] & 0x1f) {
+ case PRI_READ_KEYS:
+ ret = core_scsi3_pri_read_keys(cmd);
+ break;
+ case PRI_READ_RESERVATION:
+ ret = core_scsi3_pri_read_reservation(cmd);
+ break;
+ case PRI_REPORT_CAPABILITIES:
+ ret = core_scsi3_pri_report_capabilities(cmd);
+ break;
+ case PRI_READ_FULL_STATUS:
+ ret = core_scsi3_pri_read_full_status(cmd);
+ break;
+ default:
+ pr_err("Unknown PERSISTENT_RESERVE_IN service"
+ " action: 0x%02x\n", cmd->t_task_cdb[1] & 0x1f);
+ ret = PYX_TRANSPORT_INVALID_CDB_FIELD;
+ break;
+ }
+
+ if (!ret) {
+ task->task_scsi_status = GOOD;
+ transport_complete_task(task, 1);
+ }
+ return ret;
}
static int core_pt_reservation_check(struct se_cmd *cmd, u32 *pr_res_type)
diff --git a/drivers/target/target_core_pr.h b/drivers/target/target_core_pr.h
index c8f47d06458..b97f6940dd0 100644
--- a/drivers/target/target_core_pr.h
+++ b/drivers/target/target_core_pr.h
@@ -47,7 +47,8 @@ extern struct kmem_cache *t10_pr_reg_cache;
extern int core_pr_dump_initiator_port(struct t10_pr_registration *,
char *, u32);
-extern int core_scsi2_emulate_crh(struct se_cmd *);
+extern int target_scsi2_reservation_release(struct se_task *task);
+extern int target_scsi2_reservation_reserve(struct se_task *task);
extern int core_scsi3_alloc_aptpl_registration(
struct t10_reservation *, u64,
unsigned char *, unsigned char *, u32,
@@ -61,7 +62,9 @@ extern void core_scsi3_free_all_registrations(struct se_device *);
extern unsigned char *core_scsi3_pr_dump_type(int);
extern int core_scsi3_check_cdb_abort_and_preempt(struct list_head *,
struct se_cmd *);
-extern int core_scsi3_emulate_pr(struct se_cmd *);
+
+extern int target_scsi3_emulate_pr_in(struct se_task *task);
+extern int target_scsi3_emulate_pr_out(struct se_task *task);
extern int core_setup_reservations(struct se_device *, int);
#endif /* TARGET_CORE_PR_H */
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index dad671dee9e..ed32e1efe42 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -36,6 +36,7 @@
#include <linux/genhd.h>
#include <linux/cdrom.h>
#include <linux/file.h>
+#include <linux/module.h>
#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_cmnd.h>
@@ -1091,7 +1092,7 @@ static int pscsi_do_task(struct se_task *task)
req = blk_make_request(pdv->pdv_sd->request_queue, hbio,
GFP_KERNEL);
- if (!req) {
+ if (IS_ERR(req)) {
pr_err("pSCSI: blk_make_request() failed\n");
goto fail;
}
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index 570b144a1ed..217e29df629 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -27,6 +27,7 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/list.h>
+#include <linux/export.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
@@ -118,7 +119,7 @@ static void core_tmr_drain_tmr_list(
/*
* Allow the received TMR to return with FUNCTION_COMPLETE.
*/
- if (tmr && (tmr_p == tmr))
+ if (tmr_p == tmr)
continue;
cmd = tmr_p->task_cmd;
@@ -147,19 +148,18 @@ static void core_tmr_drain_tmr_list(
}
spin_unlock(&cmd->t_state_lock);
- list_move_tail(&tmr->tmr_list, &drain_tmr_list);
+ list_move_tail(&tmr_p->tmr_list, &drain_tmr_list);
}
spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
- while (!list_empty(&drain_tmr_list)) {
- tmr = list_entry(drain_tmr_list.next, struct se_tmr_req, tmr_list);
- list_del(&tmr->tmr_list);
+ list_for_each_entry_safe(tmr_p, tmr_pp, &drain_tmr_list, tmr_list) {
+ list_del_init(&tmr_p->tmr_list);
cmd = tmr_p->task_cmd;
pr_debug("LUN_RESET: %s releasing TMR %p Function: 0x%02x,"
" Response: 0x%02x, t_state: %d\n",
- (preempt_and_abort_list) ? "Preempt" : "", tmr,
- tmr->function, tmr->response, cmd->t_state);
+ (preempt_and_abort_list) ? "Preempt" : "", tmr_p,
+ tmr_p->function, tmr_p->response, cmd->t_state);
transport_cmd_finish_abort(cmd, 1);
}
@@ -330,16 +330,6 @@ static void core_tmr_drain_cmd_list(
*/
if (prout_cmd == cmd)
continue;
- /*
- * Skip direct processing of TRANSPORT_FREE_CMD_INTR for
- * HW target mode fabrics.
- */
- spin_lock(&cmd->t_state_lock);
- if (cmd->t_state == TRANSPORT_FREE_CMD_INTR) {
- spin_unlock(&cmd->t_state_lock);
- continue;
- }
- spin_unlock(&cmd->t_state_lock);
atomic_set(&cmd->t_transport_queue_active, 0);
atomic_dec(&qobj->queue_cnt);
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index 49fd0a9b0a5..8ddd133025b 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -32,6 +32,7 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/in.h>
+#include <linux/export.h>
#include <net/sock.h>
#include <net/tcp.h>
#include <scsi/scsi.h>
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index d7525580448..3400ae6e93f 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -36,6 +36,7 @@
#include <linux/kthread.h>
#include <linux/in.h>
#include <linux/cdrom.h>
+#include <linux/module.h>
#include <asm/unaligned.h>
#include <net/sock.h>
#include <net/tcp.h>
@@ -52,6 +53,7 @@
#include <target/target_core_configfs.h>
#include "target_core_alua.h"
+#include "target_core_cdb.h"
#include "target_core_hba.h"
#include "target_core_pr.h"
#include "target_core_ua.h"
@@ -268,6 +270,9 @@ struct se_session *transport_init_session(void)
}
INIT_LIST_HEAD(&se_sess->sess_list);
INIT_LIST_HEAD(&se_sess->sess_acl_list);
+ INIT_LIST_HEAD(&se_sess->sess_cmd_list);
+ INIT_LIST_HEAD(&se_sess->sess_wait_list);
+ spin_lock_init(&se_sess->sess_cmd_lock);
return se_sess;
}
@@ -514,13 +519,16 @@ static int transport_cmd_check_stop(
* Some fabric modules like tcm_loop can release
* their internally allocated I/O reference now and
* struct se_cmd now.
+ *
+ * Fabric modules are expected to return '1' here if the
+ * se_cmd being passed is released at this point,
+ * or zero if not being released.
*/
if (cmd->se_tfo->check_stop_free != NULL) {
spin_unlock_irqrestore(
&cmd->t_state_lock, flags);
- cmd->se_tfo->check_stop_free(cmd);
- return 1;
+ return cmd->se_tfo->check_stop_free(cmd);
}
}
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
@@ -730,6 +738,10 @@ void transport_complete_task(struct se_task *task, int success)
complete(&task->task_stop_comp);
return;
}
+
+ if (!success)
+ cmd->t_tasks_failed = 1;
+
/*
* Decrement the outstanding t_task_cdbs_left count. The last
* struct se_task from struct se_cmd will complete itself into the
@@ -740,7 +752,7 @@ void transport_complete_task(struct se_task *task, int success)
return;
}
- if (!success || cmd->t_tasks_failed) {
+ if (cmd->t_tasks_failed) {
if (!task->task_error_status) {
task->task_error_status =
PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
@@ -908,7 +920,7 @@ void transport_remove_task_from_execute_queue(
}
/*
- * Handle QUEUE_FULL / -EAGAIN status
+ * Handle QUEUE_FULL / -EAGAIN and -ENOMEM status
*/
static void target_qf_do_work(struct work_struct *work)
@@ -1498,11 +1510,12 @@ void transport_init_se_cmd(
INIT_LIST_HEAD(&cmd->se_ordered_node);
INIT_LIST_HEAD(&cmd->se_qf_node);
INIT_LIST_HEAD(&cmd->se_queue_node);
-
+ INIT_LIST_HEAD(&cmd->se_cmd_list);
INIT_LIST_HEAD(&cmd->t_task_list);
init_completion(&cmd->transport_lun_fe_stop_comp);
init_completion(&cmd->transport_lun_stop_comp);
init_completion(&cmd->t_transport_stop_comp);
+ init_completion(&cmd->cmd_wait_comp);
spin_lock_init(&cmd->t_state_lock);
atomic_set(&cmd->transport_dev_active, 1);
@@ -1645,9 +1658,7 @@ int transport_handle_cdb_direct(
* and call transport_generic_request_failure() if necessary..
*/
ret = transport_generic_new_cmd(cmd);
- if (ret == -EAGAIN)
- return 0;
- else if (ret < 0) {
+ if (ret < 0) {
cmd->transport_error_status = ret;
transport_generic_request_failure(cmd, 0,
(cmd->data_direction != DMA_TO_DEVICE));
@@ -1717,13 +1728,6 @@ int transport_generic_handle_tmr(
}
EXPORT_SYMBOL(transport_generic_handle_tmr);
-void transport_generic_free_cmd_intr(
- struct se_cmd *cmd)
-{
- transport_add_cmd_to_queue(cmd, TRANSPORT_FREE_CMD_INTR, false);
-}
-EXPORT_SYMBOL(transport_generic_free_cmd_intr);
-
/*
* If the task is active, request it to be stopped and sleep until it
* has completed.
@@ -1886,7 +1890,7 @@ static void transport_generic_request_failure(
ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
ret = cmd->se_tfo->queue_status(cmd);
- if (ret == -EAGAIN)
+ if (ret == -EAGAIN || ret == -ENOMEM)
goto queue_full;
goto check_stop;
case PYX_TRANSPORT_USE_SENSE_REASON:
@@ -1913,7 +1917,7 @@ static void transport_generic_request_failure(
else {
ret = transport_send_check_condition_and_sense(cmd,
cmd->scsi_sense_reason, 0);
- if (ret == -EAGAIN)
+ if (ret == -EAGAIN || ret == -ENOMEM)
goto queue_full;
}
@@ -2153,62 +2157,20 @@ check_depth:
atomic_set(&cmd->t_transport_sent, 1);
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- /*
- * The struct se_cmd->transport_emulate_cdb() function pointer is used
- * to grab REPORT_LUNS and other CDBs we want to handle before they hit the
- * struct se_subsystem_api->do_task() caller below.
- */
- if (cmd->transport_emulate_cdb) {
- error = cmd->transport_emulate_cdb(cmd);
- if (error != 0) {
- cmd->transport_error_status = error;
- spin_lock_irqsave(&cmd->t_state_lock, flags);
- task->task_flags &= ~TF_ACTIVE;
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- atomic_set(&cmd->t_transport_sent, 0);
- transport_stop_tasks_for_cmd(cmd);
- atomic_inc(&dev->depth_left);
- transport_generic_request_failure(cmd, 0, 1);
- goto check_depth;
- }
- /*
- * Handle the successful completion for transport_emulate_cdb()
- * for synchronous operation, following SCF_EMULATE_CDB_ASYNC
- * Otherwise the caller is expected to complete the task with
- * proper status.
- */
- if (!(cmd->se_cmd_flags & SCF_EMULATE_CDB_ASYNC)) {
- cmd->scsi_status = SAM_STAT_GOOD;
- task->task_scsi_status = GOOD;
- transport_complete_task(task, 1);
- }
- } else {
- /*
- * Currently for all virtual TCM plugins including IBLOCK, FILEIO and
- * RAMDISK we use the internal transport_emulate_control_cdb() logic
- * with struct se_subsystem_api callers for the primary SPC-3 TYPE_DISK
- * LUN emulation code.
- *
- * For TCM/pSCSI and all other SCF_SCSI_DATA_SG_IO_CDB I/O tasks we
- * call ->do_task() directly and let the underlying TCM subsystem plugin
- * code handle the CDB emulation.
- */
- if ((dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) &&
- (!(task->task_se_cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)))
- error = transport_emulate_control_cdb(task);
- else
- error = dev->transport->do_task(task);
- if (error != 0) {
- cmd->transport_error_status = error;
- spin_lock_irqsave(&cmd->t_state_lock, flags);
- task->task_flags &= ~TF_ACTIVE;
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- atomic_set(&cmd->t_transport_sent, 0);
- transport_stop_tasks_for_cmd(cmd);
- atomic_inc(&dev->depth_left);
- transport_generic_request_failure(cmd, 0, 1);
- }
+ if (cmd->execute_task)
+ error = cmd->execute_task(task);
+ else
+ error = dev->transport->do_task(task);
+ if (error != 0) {
+ cmd->transport_error_status = error;
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ task->task_flags &= ~TF_ACTIVE;
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+ atomic_set(&cmd->t_transport_sent, 0);
+ transport_stop_tasks_for_cmd(cmd);
+ atomic_inc(&dev->depth_left);
+ transport_generic_request_failure(cmd, 0, 1);
}
goto check_depth;
@@ -2642,6 +2604,13 @@ static int transport_generic_cmd_sequencer(
*/
}
+ /*
+ * If we operate in passthrough mode we skip most CDB emulation and
+ * instead hand the commands down to the physical SCSI device.
+ */
+ passthrough =
+ (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV);
+
switch (cdb[0]) {
case READ_6:
sectors = transport_get_sectors_6(cdb, cmd, &sector_ret);
@@ -2721,9 +2690,12 @@ static int transport_generic_cmd_sequencer(
cmd->t_task_lba = transport_lba_32(cdb);
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
- if (dev->transport->transport_type ==
- TRANSPORT_PLUGIN_PHBA_PDEV)
+ /*
+ * Do now allow BIDI commands for passthrough mode.
+ */
+ if (passthrough)
goto out_unsupported_cdb;
+
/*
* Setup BIDI XOR callback to be run after I/O completion.
*/
@@ -2732,13 +2704,6 @@ static int transport_generic_cmd_sequencer(
break;
case VARIABLE_LENGTH_CMD:
service_action = get_unaligned_be16(&cdb[8]);
- /*
- * Determine if this is TCM/PSCSI device and we should disable
- * internal emulation for this CDB.
- */
- passthrough = (dev->transport->transport_type ==
- TRANSPORT_PLUGIN_PHBA_PDEV);
-
switch (service_action) {
case XDWRITEREAD_32:
sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
@@ -2752,8 +2717,12 @@ static int transport_generic_cmd_sequencer(
cmd->t_task_lba = transport_lba_64_ext(cdb);
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
+ /*
+ * Do now allow BIDI commands for passthrough mode.
+ */
if (passthrough)
goto out_unsupported_cdb;
+
/*
* Setup BIDI XOR callback to be run during after I/O
* completion.
@@ -2779,7 +2748,8 @@ static int transport_generic_cmd_sequencer(
if (target_check_write_same_discard(&cdb[10], dev) < 0)
goto out_invalid_cdb_field;
-
+ if (!passthrough)
+ cmd->execute_task = target_emulate_write_same;
break;
default:
pr_err("VARIABLE_LENGTH_CMD service action"
@@ -2793,12 +2763,10 @@ static int transport_generic_cmd_sequencer(
/*
* Check for emulated MI_REPORT_TARGET_PGS.
*/
- if (cdb[1] == MI_REPORT_TARGET_PGS) {
- cmd->transport_emulate_cdb =
- (su_dev->t10_alua.alua_type ==
- SPC3_ALUA_EMULATED) ?
- core_emulate_report_target_port_groups :
- NULL;
+ if (cdb[1] == MI_REPORT_TARGET_PGS &&
+ su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
+ cmd->execute_task =
+ target_emulate_report_target_port_groups;
}
size = (cdb[6] << 24) | (cdb[7] << 16) |
(cdb[8] << 8) | cdb[9];
@@ -2819,8 +2787,15 @@ static int transport_generic_cmd_sequencer(
case MODE_SENSE:
size = cdb[4];
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
+ if (!passthrough)
+ cmd->execute_task = target_emulate_modesense;
break;
case MODE_SENSE_10:
+ size = (cdb[7] << 8) + cdb[8];
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
+ if (!passthrough)
+ cmd->execute_task = target_emulate_modesense;
+ break;
case GPCMD_READ_BUFFER_CAPACITY:
case GPCMD_SEND_OPC:
case LOG_SELECT:
@@ -2840,11 +2815,14 @@ static int transport_generic_cmd_sequencer(
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
case PERSISTENT_RESERVE_IN:
+ if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS)
+ cmd->execute_task = target_scsi3_emulate_pr_in;
+ size = (cdb[7] << 8) + cdb[8];
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
+ break;
case PERSISTENT_RESERVE_OUT:
- cmd->transport_emulate_cdb =
- (su_dev->t10_pr.res_type ==
- SPC3_PERSISTENT_RESERVATIONS) ?
- core_scsi3_emulate_pr : NULL;
+ if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS)
+ cmd->execute_task = target_scsi3_emulate_pr_out;
size = (cdb[7] << 8) + cdb[8];
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
@@ -2863,12 +2841,10 @@ static int transport_generic_cmd_sequencer(
*
* Check for emulated MO_SET_TARGET_PGS.
*/
- if (cdb[1] == MO_SET_TARGET_PGS) {
- cmd->transport_emulate_cdb =
- (su_dev->t10_alua.alua_type ==
- SPC3_ALUA_EMULATED) ?
- core_emulate_set_target_port_groups :
- NULL;
+ if (cdb[1] == MO_SET_TARGET_PGS &&
+ su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
+ cmd->execute_task =
+ target_emulate_set_target_port_groups;
}
size = (cdb[6] << 24) | (cdb[7] << 16) |
@@ -2888,6 +2864,8 @@ static int transport_generic_cmd_sequencer(
if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
cmd->sam_task_attr = MSG_HEAD_TAG;
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
+ if (!passthrough)
+ cmd->execute_task = target_emulate_inquiry;
break;
case READ_BUFFER:
size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
@@ -2896,6 +2874,8 @@ static int transport_generic_cmd_sequencer(
case READ_CAPACITY:
size = READ_CAP_LEN;
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
+ if (!passthrough)
+ cmd->execute_task = target_emulate_readcapacity;
break;
case READ_MEDIA_SERIAL_NUMBER:
case SECURITY_PROTOCOL_IN:
@@ -2904,6 +2884,21 @@ static int transport_generic_cmd_sequencer(
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
case SERVICE_ACTION_IN:
+ switch (cmd->t_task_cdb[1] & 0x1f) {
+ case SAI_READ_CAPACITY_16:
+ if (!passthrough)
+ cmd->execute_task =
+ target_emulate_readcapacity_16;
+ break;
+ default:
+ if (passthrough)
+ break;
+
+ pr_err("Unsupported SA: 0x%02x\n",
+ cmd->t_task_cdb[1] & 0x1f);
+ goto out_unsupported_cdb;
+ }
+ /*FALLTHROUGH*/
case ACCESS_CONTROL_IN:
case ACCESS_CONTROL_OUT:
case EXTENDED_COPY:
@@ -2934,6 +2929,8 @@ static int transport_generic_cmd_sequencer(
case REQUEST_SENSE:
size = cdb[4];
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
+ if (!passthrough)
+ cmd->execute_task = target_emulate_request_sense;
break;
case READ_ELEMENT_STATUS:
size = 65536 * cdb[7] + 256 * cdb[8] + cdb[9];
@@ -2961,10 +2958,8 @@ static int transport_generic_cmd_sequencer(
* is running in SPC_PASSTHROUGH, and wants reservations
* emulation disabled.
*/
- cmd->transport_emulate_cdb =
- (su_dev->t10_pr.res_type !=
- SPC_PASSTHROUGH) ?
- core_scsi2_emulate_crh : NULL;
+ if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH)
+ cmd->execute_task = target_scsi2_reservation_reserve;
cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
break;
case RELEASE:
@@ -2978,10 +2973,8 @@ static int transport_generic_cmd_sequencer(
else
size = cmd->data_length;
- cmd->transport_emulate_cdb =
- (su_dev->t10_pr.res_type !=
- SPC_PASSTHROUGH) ?
- core_scsi2_emulate_crh : NULL;
+ if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH)
+ cmd->execute_task = target_scsi2_reservation_release;
cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
break;
case SYNCHRONIZE_CACHE:
@@ -3002,16 +2995,9 @@ static int transport_generic_cmd_sequencer(
size = transport_get_size(sectors, cdb, cmd);
cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
- /*
- * For TCM/pSCSI passthrough, skip cmd->transport_emulate_cdb()
- */
- if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
+ if (passthrough)
break;
- /*
- * Set SCF_EMULATE_CDB_ASYNC to ensure asynchronous operation
- * for SYNCHRONIZE_CACHE* Immed=1 case in __transport_execute_tasks()
- */
- cmd->se_cmd_flags |= SCF_EMULATE_CDB_ASYNC;
+
/*
* Check to ensure that LBA + Range does not exceed past end of
* device for IBLOCK and FILEIO ->do_sync_cache() backend calls
@@ -3020,10 +3006,13 @@ static int transport_generic_cmd_sequencer(
if (transport_cmd_get_valid_sectors(cmd) < 0)
goto out_invalid_cdb_field;
}
+ cmd->execute_task = target_emulate_synchronize_cache;
break;
case UNMAP:
size = get_unaligned_be16(&cdb[7]);
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
+ if (!passthrough)
+ cmd->execute_task = target_emulate_unmap;
break;
case WRITE_SAME_16:
sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
@@ -3042,6 +3031,8 @@ static int transport_generic_cmd_sequencer(
if (target_check_write_same_discard(&cdb[1], dev) < 0)
goto out_invalid_cdb_field;
+ if (!passthrough)
+ cmd->execute_task = target_emulate_write_same;
break;
case WRITE_SAME:
sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
@@ -3063,26 +3054,31 @@ static int transport_generic_cmd_sequencer(
*/
if (target_check_write_same_discard(&cdb[1], dev) < 0)
goto out_invalid_cdb_field;
+ if (!passthrough)
+ cmd->execute_task = target_emulate_write_same;
break;
case ALLOW_MEDIUM_REMOVAL:
- case GPCMD_CLOSE_TRACK:
case ERASE:
- case INITIALIZE_ELEMENT_STATUS:
- case GPCMD_LOAD_UNLOAD:
case REZERO_UNIT:
case SEEK_10:
- case GPCMD_SET_SPEED:
case SPACE:
case START_STOP:
case TEST_UNIT_READY:
case VERIFY:
case WRITE_FILEMARKS:
+ cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
+ if (!passthrough)
+ cmd->execute_task = target_emulate_noop;
+ break;
+ case GPCMD_CLOSE_TRACK:
+ case INITIALIZE_ELEMENT_STATUS:
+ case GPCMD_LOAD_UNLOAD:
+ case GPCMD_SET_SPEED:
case MOVE_MEDIUM:
cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
break;
case REPORT_LUNS:
- cmd->transport_emulate_cdb =
- transport_core_report_lun_response;
+ cmd->execute_task = target_report_luns;
size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
/*
* Do implict HEAD_OF_QUEUE processing for REPORT_LUNS
@@ -3134,6 +3130,11 @@ static int transport_generic_cmd_sequencer(
cmd->data_length = size;
}
+ /* reject any command that we don't have a handler for */
+ if (!(passthrough || cmd->execute_task ||
+ (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)))
+ goto out_unsupported_cdb;
+
/* Let's limit control cdbs to a page, for simplicity's sake. */
if ((cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) &&
size > PAGE_SIZE)
@@ -3308,7 +3309,7 @@ static void target_complete_ok_work(struct work_struct *work)
if (cmd->scsi_status) {
ret = transport_send_check_condition_and_sense(
cmd, reason, 1);
- if (ret == -EAGAIN)
+ if (ret == -EAGAIN || ret == -ENOMEM)
goto queue_full;
transport_lun_remove_cmd(cmd);
@@ -3333,7 +3334,7 @@ static void target_complete_ok_work(struct work_struct *work)
spin_unlock(&cmd->se_lun->lun_sep_lock);
ret = cmd->se_tfo->queue_data_in(cmd);
- if (ret == -EAGAIN)
+ if (ret == -EAGAIN || ret == -ENOMEM)
goto queue_full;
break;
case DMA_TO_DEVICE:
@@ -3354,14 +3355,14 @@ static void target_complete_ok_work(struct work_struct *work)
}
spin_unlock(&cmd->se_lun->lun_sep_lock);
ret = cmd->se_tfo->queue_data_in(cmd);
- if (ret == -EAGAIN)
+ if (ret == -EAGAIN || ret == -ENOMEM)
goto queue_full;
break;
}
/* Fall through for DMA_TO_DEVICE */
case DMA_NONE:
ret = cmd->se_tfo->queue_status(cmd);
- if (ret == -EAGAIN)
+ if (ret == -EAGAIN || ret == -ENOMEM)
goto queue_full;
break;
default:
@@ -3890,7 +3891,10 @@ EXPORT_SYMBOL(transport_generic_process_write);
static void transport_write_pending_qf(struct se_cmd *cmd)
{
- if (cmd->se_tfo->write_pending(cmd) == -EAGAIN) {
+ int ret;
+
+ ret = cmd->se_tfo->write_pending(cmd);
+ if (ret == -EAGAIN || ret == -ENOMEM) {
pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n",
cmd);
transport_handle_queue_full(cmd, cmd->se_dev);
@@ -3920,7 +3924,7 @@ static int transport_generic_write_pending(struct se_cmd *cmd)
* frontend know that WRITE buffers are ready.
*/
ret = cmd->se_tfo->write_pending(cmd);
- if (ret == -EAGAIN)
+ if (ret == -EAGAIN || ret == -ENOMEM)
goto queue_full;
else if (ret < 0)
return ret;
@@ -3931,7 +3935,7 @@ queue_full:
pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
cmd->t_state = TRANSPORT_COMPLETE_QF_WP;
transport_handle_queue_full(cmd, cmd->se_dev);
- return ret;
+ return 0;
}
/**
@@ -3949,6 +3953,14 @@ void transport_release_cmd(struct se_cmd *cmd)
core_tmr_release_req(cmd->se_tmr_req);
if (cmd->t_task_cdb != cmd->__t_task_cdb)
kfree(cmd->t_task_cdb);
+ /*
+ * Check if target_wait_for_sess_cmds() is expecting to
+ * release se_cmd directly here..
+ */
+ if (cmd->check_release != 0 && cmd->se_tfo->check_release_cmd)
+ if (cmd->se_tfo->check_release_cmd(cmd) != 0)
+ return;
+
cmd->se_tfo->release_cmd(cmd);
}
EXPORT_SYMBOL(transport_release_cmd);
@@ -3976,6 +3988,114 @@ void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
}
EXPORT_SYMBOL(transport_generic_free_cmd);
+/* target_get_sess_cmd - Add command to active ->sess_cmd_list
+ * @se_sess: session to reference
+ * @se_cmd: command descriptor to add
+ */
+void target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
+ list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
+ se_cmd->check_release = 1;
+ spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+}
+EXPORT_SYMBOL(target_get_sess_cmd);
+
+/* target_put_sess_cmd - Check for active I/O shutdown or list delete
+ * @se_sess: session to reference
+ * @se_cmd: command descriptor to drop
+ */
+int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
+ if (list_empty(&se_cmd->se_cmd_list)) {
+ spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+ WARN_ON(1);
+ return 0;
+ }
+
+ if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) {
+ spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+ complete(&se_cmd->cmd_wait_comp);
+ return 1;
+ }
+ list_del(&se_cmd->se_cmd_list);
+ spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(target_put_sess_cmd);
+
+/* target_splice_sess_cmd_list - Split active cmds into sess_wait_list
+ * @se_sess: session to split
+ */
+void target_splice_sess_cmd_list(struct se_session *se_sess)
+{
+ struct se_cmd *se_cmd;
+ unsigned long flags;
+
+ WARN_ON(!list_empty(&se_sess->sess_wait_list));
+ INIT_LIST_HEAD(&se_sess->sess_wait_list);
+
+ spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
+ se_sess->sess_tearing_down = 1;
+
+ list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list);
+
+ list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list)
+ se_cmd->cmd_wait_set = 1;
+
+ spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+}
+EXPORT_SYMBOL(target_splice_sess_cmd_list);
+
+/* target_wait_for_sess_cmds - Wait for outstanding descriptors
+ * @se_sess: session to wait for active I/O
+ * @wait_for_tasks: Make extra transport_wait_for_tasks call
+ */
+void target_wait_for_sess_cmds(
+ struct se_session *se_sess,
+ int wait_for_tasks)
+{
+ struct se_cmd *se_cmd, *tmp_cmd;
+ bool rc = false;
+
+ list_for_each_entry_safe(se_cmd, tmp_cmd,
+ &se_sess->sess_wait_list, se_cmd_list) {
+ list_del(&se_cmd->se_cmd_list);
+
+ pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:"
+ " %d\n", se_cmd, se_cmd->t_state,
+ se_cmd->se_tfo->get_cmd_state(se_cmd));
+
+ if (wait_for_tasks) {
+ pr_debug("Calling transport_wait_for_tasks se_cmd: %p t_state: %d,"
+ " fabric state: %d\n", se_cmd, se_cmd->t_state,
+ se_cmd->se_tfo->get_cmd_state(se_cmd));
+
+ rc = transport_wait_for_tasks(se_cmd);
+
+ pr_debug("After transport_wait_for_tasks se_cmd: %p t_state: %d,"
+ " fabric state: %d\n", se_cmd, se_cmd->t_state,
+ se_cmd->se_tfo->get_cmd_state(se_cmd));
+ }
+
+ if (!rc) {
+ wait_for_completion(&se_cmd->cmd_wait_comp);
+ pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d"
+ " fabric state: %d\n", se_cmd, se_cmd->t_state,
+ se_cmd->se_tfo->get_cmd_state(se_cmd));
+ }
+
+ se_cmd->se_tfo->release_cmd(se_cmd);
+ }
+}
+EXPORT_SYMBOL(target_wait_for_sess_cmds);
+
/* transport_lun_wait_for_tasks():
*
* Called from ConfigFS context to stop the passed struct se_cmd to allow
@@ -4152,14 +4272,14 @@ int transport_clear_lun_from_sessions(struct se_lun *lun)
* Called from frontend fabric context to wait for storage engine
* to pause and/or release frontend generated struct se_cmd.
*/
-void transport_wait_for_tasks(struct se_cmd *cmd)
+bool transport_wait_for_tasks(struct se_cmd *cmd)
{
unsigned long flags;
spin_lock_irqsave(&cmd->t_state_lock, flags);
if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && !(cmd->se_tmr_req)) {
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- return;
+ return false;
}
/*
* Only perform a possible wait_for_tasks if SCF_SUPPORTED_SAM_OPCODE
@@ -4167,7 +4287,7 @@ void transport_wait_for_tasks(struct se_cmd *cmd)
*/
if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) && !cmd->se_tmr_req) {
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- return;
+ return false;
}
/*
* If we are already stopped due to an external event (ie: LUN shutdown)
@@ -4210,7 +4330,7 @@ void transport_wait_for_tasks(struct se_cmd *cmd)
if (!atomic_read(&cmd->t_transport_active) ||
atomic_read(&cmd->t_transport_aborted)) {
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- return;
+ return false;
}
atomic_set(&cmd->t_transport_stop, 1);
@@ -4235,6 +4355,8 @@ void transport_wait_for_tasks(struct se_cmd *cmd)
cmd->se_tfo->get_task_tag(cmd));
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+
+ return true;
}
EXPORT_SYMBOL(transport_wait_for_tasks);
@@ -4583,9 +4705,7 @@ get_cmd:
break;
}
ret = transport_generic_new_cmd(cmd);
- if (ret == -EAGAIN)
- break;
- else if (ret < 0) {
+ if (ret < 0) {
cmd->transport_error_status = ret;
transport_generic_request_failure(cmd,
0, (cmd->data_direction !=
@@ -4595,9 +4715,6 @@ get_cmd:
case TRANSPORT_PROCESS_WRITE:
transport_generic_process_write(cmd);
break;
- case TRANSPORT_FREE_CMD_INTR:
- transport_generic_free_cmd(cmd, 0);
- break;
case TRANSPORT_PROCESS_TMR:
transport_generic_do_tmr(cmd);
break;
diff --git a/drivers/target/tcm_fc/tcm_fc.h b/drivers/target/tcm_fc/tcm_fc.h
index 3749d8b4b42..e05c55100ec 100644
--- a/drivers/target/tcm_fc/tcm_fc.h
+++ b/drivers/target/tcm_fc/tcm_fc.h
@@ -156,7 +156,7 @@ int ft_lport_notify(struct notifier_block *, unsigned long, void *);
/*
* IO methods.
*/
-void ft_check_stop_free(struct se_cmd *);
+int ft_check_stop_free(struct se_cmd *);
void ft_release_cmd(struct se_cmd *);
int ft_queue_status(struct se_cmd *);
int ft_queue_data_in(struct se_cmd *);
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index 6195026cc7b..4fac37c4c61 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -112,9 +112,10 @@ void ft_release_cmd(struct se_cmd *se_cmd)
ft_free_cmd(cmd);
}
-void ft_check_stop_free(struct se_cmd *se_cmd)
+int ft_check_stop_free(struct se_cmd *se_cmd)
{
transport_generic_free_cmd(se_cmd, 0);
+ return 1;
}
/*
diff --git a/drivers/thermal/thermal_sys.c b/drivers/thermal/thermal_sys.c
index 708f8e92771..dd9a5743fa9 100644
--- a/drivers/thermal/thermal_sys.c
+++ b/drivers/thermal/thermal_sys.c
@@ -678,10 +678,10 @@ static void thermal_zone_device_set_polling(struct thermal_zone_device *tz,
return;
if (delay > 1000)
- schedule_delayed_work(&(tz->poll_queue),
+ queue_delayed_work(system_freezable_wq, &(tz->poll_queue),
round_jiffies(msecs_to_jiffies(delay)));
else
- schedule_delayed_work(&(tz->poll_queue),
+ queue_delayed_work(system_freezable_wq, &(tz->poll_queue),
msecs_to_jiffies(delay));
}
diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig
index 8816f53e004..b3d17416d86 100644
--- a/drivers/tty/Kconfig
+++ b/drivers/tty/Kconfig
@@ -1,6 +1,6 @@
config VT
bool "Virtual terminal" if EXPERT
- depends on !S390
+ depends on !S390 && !UML
select INPUT
default y
---help---
diff --git a/drivers/tty/hvc/Kconfig b/drivers/tty/hvc/Kconfig
index e371753ba92..4222035acfb 100644
--- a/drivers/tty/hvc/Kconfig
+++ b/drivers/tty/hvc/Kconfig
@@ -34,6 +34,15 @@ config HVC_ISERIES
help
iSeries machines support a hypervisor virtual console.
+config HVC_OPAL
+ bool "OPAL Console support"
+ depends on PPC_POWERNV
+ select HVC_DRIVER
+ select HVC_IRQ
+ default y
+ help
+ PowerNV machines running under OPAL need that driver to get a console
+
config HVC_RTAS
bool "IBM RTAS Console support"
depends on PPC_RTAS
diff --git a/drivers/tty/hvc/Makefile b/drivers/tty/hvc/Makefile
index e2920531637..89abf40bc73 100644
--- a/drivers/tty/hvc/Makefile
+++ b/drivers/tty/hvc/Makefile
@@ -1,4 +1,5 @@
obj-$(CONFIG_HVC_CONSOLE) += hvc_vio.o hvsi_lib.o
+obj-$(CONFIG_HVC_OPAL) += hvc_opal.o hvsi_lib.o
obj-$(CONFIG_HVC_OLD_HVSI) += hvsi.o
obj-$(CONFIG_HVC_ISERIES) += hvc_iseries.o
obj-$(CONFIG_HVC_RTAS) += hvc_rtas.o
diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
index 7430bc3c8d5..b6b2d18fa38 100644
--- a/drivers/tty/hvc/hvc_console.c
+++ b/drivers/tty/hvc/hvc_console.c
@@ -852,7 +852,7 @@ struct hvc_struct *hvc_alloc(uint32_t vtermno, int data,
* find index to use:
* see if this vterm id matches one registered for console.
*/
- for (i = 0; i < MAX_NR_HVC_CONSOLES; i++)
+ for (i=0; i < MAX_NR_HVC_CONSOLES; i++)
if (vtermnos[i] == hp->vtermno &&
cons_ops[i] == hp->ops)
break;
@@ -862,13 +862,9 @@ struct hvc_struct *hvc_alloc(uint32_t vtermno, int data,
i = ++last_hvc;
hp->index = i;
- hvc_console.index = i;
- vtermnos[i] = vtermno;
- cons_ops[i] = ops;
list_add_tail(&(hp->next), &hvc_structs);
spin_unlock(&hvc_structs_lock);
- register_console(&hvc_console);
return hp;
}
@@ -879,7 +875,6 @@ int hvc_remove(struct hvc_struct *hp)
unsigned long flags;
struct tty_struct *tty;
- unregister_console(&hvc_console);
spin_lock_irqsave(&hp->lock, flags);
tty = tty_kref_get(hp->tty);
diff --git a/drivers/tty/hvc/hvc_iseries.c b/drivers/tty/hvc/hvc_iseries.c
index 21c54955084..3f4a897bf4d 100644
--- a/drivers/tty/hvc/hvc_iseries.c
+++ b/drivers/tty/hvc/hvc_iseries.c
@@ -23,6 +23,7 @@
#include <linux/types.h>
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/console.h>
diff --git a/drivers/tty/hvc/hvc_opal.c b/drivers/tty/hvc/hvc_opal.c
new file mode 100644
index 00000000000..ced26c8ccd5
--- /dev/null
+++ b/drivers/tty/hvc/hvc_opal.c
@@ -0,0 +1,425 @@
+/*
+ * opal driver interface to hvc_console.c
+ *
+ * Copyright 2011 Benjamin Herrenschmidt <benh@kernel.crashing.org>, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#undef DEBUG
+
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/console.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/export.h>
+
+#include <asm/hvconsole.h>
+#include <asm/prom.h>
+#include <asm/firmware.h>
+#include <asm/hvsi.h>
+#include <asm/udbg.h>
+#include <asm/opal.h>
+
+#include "hvc_console.h"
+
+static const char hvc_opal_name[] = "hvc_opal";
+
+static struct of_device_id hvc_opal_match[] __devinitdata = {
+ { .name = "serial", .compatible = "ibm,opal-console-raw" },
+ { .name = "serial", .compatible = "ibm,opal-console-hvsi" },
+ { },
+};
+
+typedef enum hv_protocol {
+ HV_PROTOCOL_RAW,
+ HV_PROTOCOL_HVSI
+} hv_protocol_t;
+
+struct hvc_opal_priv {
+ hv_protocol_t proto; /* Raw data or HVSI packets */
+ struct hvsi_priv hvsi; /* HVSI specific data */
+};
+static struct hvc_opal_priv *hvc_opal_privs[MAX_NR_HVC_CONSOLES];
+
+/* For early boot console */
+static struct hvc_opal_priv hvc_opal_boot_priv;
+static u32 hvc_opal_boot_termno;
+
+static const struct hv_ops hvc_opal_raw_ops = {
+ .get_chars = opal_get_chars,
+ .put_chars = opal_put_chars,
+ .notifier_add = notifier_add_irq,
+ .notifier_del = notifier_del_irq,
+ .notifier_hangup = notifier_hangup_irq,
+};
+
+static int hvc_opal_hvsi_get_chars(uint32_t vtermno, char *buf, int count)
+{
+ struct hvc_opal_priv *pv = hvc_opal_privs[vtermno];
+
+ if (WARN_ON(!pv))
+ return -ENODEV;
+
+ return hvsilib_get_chars(&pv->hvsi, buf, count);
+}
+
+static int hvc_opal_hvsi_put_chars(uint32_t vtermno, const char *buf, int count)
+{
+ struct hvc_opal_priv *pv = hvc_opal_privs[vtermno];
+
+ if (WARN_ON(!pv))
+ return -ENODEV;
+
+ return hvsilib_put_chars(&pv->hvsi, buf, count);
+}
+
+static int hvc_opal_hvsi_open(struct hvc_struct *hp, int data)
+{
+ struct hvc_opal_priv *pv = hvc_opal_privs[hp->vtermno];
+ int rc;
+
+ pr_devel("HVSI@%x: do open !\n", hp->vtermno);
+
+ rc = notifier_add_irq(hp, data);
+ if (rc)
+ return rc;
+
+ return hvsilib_open(&pv->hvsi, hp);
+}
+
+static void hvc_opal_hvsi_close(struct hvc_struct *hp, int data)
+{
+ struct hvc_opal_priv *pv = hvc_opal_privs[hp->vtermno];
+
+ pr_devel("HVSI@%x: do close !\n", hp->vtermno);
+
+ hvsilib_close(&pv->hvsi, hp);
+
+ notifier_del_irq(hp, data);
+}
+
+void hvc_opal_hvsi_hangup(struct hvc_struct *hp, int data)
+{
+ struct hvc_opal_priv *pv = hvc_opal_privs[hp->vtermno];
+
+ pr_devel("HVSI@%x: do hangup !\n", hp->vtermno);
+
+ hvsilib_close(&pv->hvsi, hp);
+
+ notifier_hangup_irq(hp, data);
+}
+
+static int hvc_opal_hvsi_tiocmget(struct hvc_struct *hp)
+{
+ struct hvc_opal_priv *pv = hvc_opal_privs[hp->vtermno];
+
+ if (!pv)
+ return -EINVAL;
+ return pv->hvsi.mctrl;
+}
+
+static int hvc_opal_hvsi_tiocmset(struct hvc_struct *hp, unsigned int set,
+ unsigned int clear)
+{
+ struct hvc_opal_priv *pv = hvc_opal_privs[hp->vtermno];
+
+ pr_devel("HVSI@%x: Set modem control, set=%x,clr=%x\n",
+ hp->vtermno, set, clear);
+
+ if (set & TIOCM_DTR)
+ hvsilib_write_mctrl(&pv->hvsi, 1);
+ else if (clear & TIOCM_DTR)
+ hvsilib_write_mctrl(&pv->hvsi, 0);
+
+ return 0;
+}
+
+static const struct hv_ops hvc_opal_hvsi_ops = {
+ .get_chars = hvc_opal_hvsi_get_chars,
+ .put_chars = hvc_opal_hvsi_put_chars,
+ .notifier_add = hvc_opal_hvsi_open,
+ .notifier_del = hvc_opal_hvsi_close,
+ .notifier_hangup = hvc_opal_hvsi_hangup,
+ .tiocmget = hvc_opal_hvsi_tiocmget,
+ .tiocmset = hvc_opal_hvsi_tiocmset,
+};
+
+static int __devinit hvc_opal_probe(struct platform_device *dev)
+{
+ const struct hv_ops *ops;
+ struct hvc_struct *hp;
+ struct hvc_opal_priv *pv;
+ hv_protocol_t proto;
+ unsigned int termno, boot = 0;
+ const __be32 *reg;
+
+ if (of_device_is_compatible(dev->dev.of_node, "ibm,opal-console-raw")) {
+ proto = HV_PROTOCOL_RAW;
+ ops = &hvc_opal_raw_ops;
+ } else if (of_device_is_compatible(dev->dev.of_node,
+ "ibm,opal-console-hvsi")) {
+ proto = HV_PROTOCOL_HVSI;
+ ops = &hvc_opal_hvsi_ops;
+ } else {
+ pr_err("hvc_opal: Unkown protocol for %s\n",
+ dev->dev.of_node->full_name);
+ return -ENXIO;
+ }
+
+ reg = of_get_property(dev->dev.of_node, "reg", NULL);
+ termno = reg ? be32_to_cpup(reg) : 0;
+
+ /* Is it our boot one ? */
+ if (hvc_opal_privs[termno] == &hvc_opal_boot_priv) {
+ pv = hvc_opal_privs[termno];
+ boot = 1;
+ } else if (hvc_opal_privs[termno] == NULL) {
+ pv = kzalloc(sizeof(struct hvc_opal_priv), GFP_KERNEL);
+ if (!pv)
+ return -ENOMEM;
+ pv->proto = proto;
+ hvc_opal_privs[termno] = pv;
+ if (proto == HV_PROTOCOL_HVSI)
+ hvsilib_init(&pv->hvsi, opal_get_chars, opal_put_chars,
+ termno, 0);
+
+ /* Instanciate now to establish a mapping index==vtermno */
+ hvc_instantiate(termno, termno, ops);
+ } else {
+ pr_err("hvc_opal: Device %s has duplicate terminal number #%d\n",
+ dev->dev.of_node->full_name, termno);
+ return -ENXIO;
+ }
+
+ pr_info("hvc%d: %s protocol on %s%s\n", termno,
+ proto == HV_PROTOCOL_RAW ? "raw" : "hvsi",
+ dev->dev.of_node->full_name,
+ boot ? " (boot console)" : "");
+
+ /* We don't do IRQ yet */
+ hp = hvc_alloc(termno, 0, ops, MAX_VIO_PUT_CHARS);
+ if (IS_ERR(hp))
+ return PTR_ERR(hp);
+ dev_set_drvdata(&dev->dev, hp);
+
+ return 0;
+}
+
+static int __devexit hvc_opal_remove(struct platform_device *dev)
+{
+ struct hvc_struct *hp = dev_get_drvdata(&dev->dev);
+ int rc, termno;
+
+ termno = hp->vtermno;
+ rc = hvc_remove(hp);
+ if (rc == 0) {
+ if (hvc_opal_privs[termno] != &hvc_opal_boot_priv)
+ kfree(hvc_opal_privs[termno]);
+ hvc_opal_privs[termno] = NULL;
+ }
+ return rc;
+}
+
+static struct platform_driver hvc_opal_driver = {
+ .probe = hvc_opal_probe,
+ .remove = __devexit_p(hvc_opal_remove),
+ .driver = {
+ .name = hvc_opal_name,
+ .owner = THIS_MODULE,
+ .of_match_table = hvc_opal_match,
+ }
+};
+
+static int __init hvc_opal_init(void)
+{
+ if (!firmware_has_feature(FW_FEATURE_OPAL))
+ return -ENODEV;
+
+ /* Register as a vio device to receive callbacks */
+ return platform_driver_register(&hvc_opal_driver);
+}
+module_init(hvc_opal_init);
+
+static void __exit hvc_opal_exit(void)
+{
+ platform_driver_unregister(&hvc_opal_driver);
+}
+module_exit(hvc_opal_exit);
+
+static void udbg_opal_putc(char c)
+{
+ unsigned int termno = hvc_opal_boot_termno;
+ int count = -1;
+
+ if (c == '\n')
+ udbg_opal_putc('\r');
+
+ do {
+ switch(hvc_opal_boot_priv.proto) {
+ case HV_PROTOCOL_RAW:
+ count = opal_put_chars(termno, &c, 1);
+ break;
+ case HV_PROTOCOL_HVSI:
+ count = hvc_opal_hvsi_put_chars(termno, &c, 1);
+ break;
+ }
+ } while(count == 0 || count == -EAGAIN);
+}
+
+static int udbg_opal_getc_poll(void)
+{
+ unsigned int termno = hvc_opal_boot_termno;
+ int rc = 0;
+ char c;
+
+ switch(hvc_opal_boot_priv.proto) {
+ case HV_PROTOCOL_RAW:
+ rc = opal_get_chars(termno, &c, 1);
+ break;
+ case HV_PROTOCOL_HVSI:
+ rc = hvc_opal_hvsi_get_chars(termno, &c, 1);
+ break;
+ }
+ if (!rc)
+ return -1;
+ return c;
+}
+
+static int udbg_opal_getc(void)
+{
+ int ch;
+ for (;;) {
+ ch = udbg_opal_getc_poll();
+ if (ch == -1) {
+ /* This shouldn't be needed...but... */
+ volatile unsigned long delay;
+ for (delay=0; delay < 2000000; delay++)
+ ;
+ } else {
+ return ch;
+ }
+ }
+}
+
+static void udbg_init_opal_common(void)
+{
+ udbg_putc = udbg_opal_putc;
+ udbg_getc = udbg_opal_getc;
+ udbg_getc_poll = udbg_opal_getc_poll;
+ tb_ticks_per_usec = 0x200; /* Make udelay not suck */
+}
+
+void __init hvc_opal_init_early(void)
+{
+ struct device_node *stdout_node = NULL;
+ const u32 *termno;
+ const char *name = NULL;
+ const struct hv_ops *ops;
+ u32 index;
+
+ /* find the boot console from /chosen/stdout */
+ if (of_chosen)
+ name = of_get_property(of_chosen, "linux,stdout-path", NULL);
+ if (name) {
+ stdout_node = of_find_node_by_path(name);
+ if (!stdout_node) {
+ pr_err("hvc_opal: Failed to locate default console!\n");
+ return;
+ }
+ } else {
+ struct device_node *opal, *np;
+
+ /* Current OPAL takeover doesn't provide the stdout
+ * path, so we hard wire it
+ */
+ opal = of_find_node_by_path("/ibm,opal/consoles");
+ if (opal)
+ pr_devel("hvc_opal: Found consoles in new location\n");
+ if (!opal) {
+ opal = of_find_node_by_path("/ibm,opal");
+ if (opal)
+ pr_devel("hvc_opal: "
+ "Found consoles in old location\n");
+ }
+ if (!opal)
+ return;
+ for_each_child_of_node(opal, np) {
+ if (!strcmp(np->name, "serial")) {
+ stdout_node = np;
+ break;
+ }
+ }
+ of_node_put(opal);
+ }
+ if (!stdout_node)
+ return;
+ termno = of_get_property(stdout_node, "reg", NULL);
+ index = termno ? *termno : 0;
+ if (index >= MAX_NR_HVC_CONSOLES)
+ return;
+ hvc_opal_privs[index] = &hvc_opal_boot_priv;
+
+ /* Check the protocol */
+ if (of_device_is_compatible(stdout_node, "ibm,opal-console-raw")) {
+ hvc_opal_boot_priv.proto = HV_PROTOCOL_RAW;
+ ops = &hvc_opal_raw_ops;
+ pr_devel("hvc_opal: Found RAW console\n");
+ }
+ else if (of_device_is_compatible(stdout_node,"ibm,opal-console-hvsi")) {
+ hvc_opal_boot_priv.proto = HV_PROTOCOL_HVSI;
+ ops = &hvc_opal_hvsi_ops;
+ hvsilib_init(&hvc_opal_boot_priv.hvsi, opal_get_chars,
+ opal_put_chars, index, 1);
+ /* HVSI, perform the handshake now */
+ hvsilib_establish(&hvc_opal_boot_priv.hvsi);
+ pr_devel("hvc_opal: Found HVSI console\n");
+ } else
+ goto out;
+ hvc_opal_boot_termno = index;
+ udbg_init_opal_common();
+ add_preferred_console("hvc", index, NULL);
+ hvc_instantiate(index, index, ops);
+out:
+ of_node_put(stdout_node);
+}
+
+#ifdef CONFIG_PPC_EARLY_DEBUG_OPAL_RAW
+void __init udbg_init_debug_opal(void)
+{
+ u32 index = CONFIG_PPC_EARLY_DEBUG_OPAL_VTERMNO;
+ hvc_opal_privs[index] = &hvc_opal_boot_priv;
+ hvc_opal_boot_priv.proto = HV_PROTOCOL_RAW;
+ hvc_opal_boot_termno = index;
+ udbg_init_opal_common();
+}
+#endif /* CONFIG_PPC_EARLY_DEBUG_OPAL_RAW */
+
+#ifdef CONFIG_PPC_EARLY_DEBUG_OPAL_HVSI
+void __init udbg_init_debug_opal_hvsi(void)
+{
+ u32 index = CONFIG_PPC_EARLY_DEBUG_OPAL_VTERMNO;
+ hvc_opal_privs[index] = &hvc_opal_boot_priv;
+ hvc_opal_boot_termno = index;
+ udbg_init_opal_common();
+ hvsilib_init(&hvc_opal_boot_priv.hvsi, opal_get_chars, opal_put_chars,
+ index, 1);
+ hvsilib_establish(&hvc_opal_boot_priv.hvsi);
+}
+#endif /* CONFIG_PPC_EARLY_DEBUG_OPAL_HVSI */
diff --git a/drivers/tty/hvc/hvc_vio.c b/drivers/tty/hvc/hvc_vio.c
index 130aace67f3..fc3c3ad6c07 100644
--- a/drivers/tty/hvc/hvc_vio.c
+++ b/drivers/tty/hvc/hvc_vio.c
@@ -41,6 +41,7 @@
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/console.h>
+#include <linux/module.h>
#include <asm/hvconsole.h>
#include <asm/vio.h>
diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
index 55882b5930a..b9040bec36b 100644
--- a/drivers/tty/hvc/hvcs.c
+++ b/drivers/tty/hvc/hvcs.c
@@ -1532,7 +1532,7 @@ static int __devinit hvcs_initialize(void)
goto register_fail;
}
- hvcs_pi_buff = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ hvcs_pi_buff = (unsigned long *) __get_free_page(GFP_KERNEL);
if (!hvcs_pi_buff) {
rc = -ENOMEM;
goto buff_alloc_fail;
@@ -1548,7 +1548,7 @@ static int __devinit hvcs_initialize(void)
return 0;
kthread_fail:
- kfree(hvcs_pi_buff);
+ free_page((unsigned long)hvcs_pi_buff);
buff_alloc_fail:
tty_unregister_driver(hvcs_tty_driver);
register_fail:
@@ -1597,7 +1597,7 @@ static void __exit hvcs_module_exit(void)
kthread_stop(hvcs_task);
spin_lock(&hvcs_pi_lock);
- kfree(hvcs_pi_buff);
+ free_page((unsigned long)hvcs_pi_buff);
hvcs_pi_buff = NULL;
spin_unlock(&hvcs_pi_lock);
diff --git a/drivers/tty/hvc/hvsi_lib.c b/drivers/tty/hvc/hvsi_lib.c
index bd9b09827b2..6f4dd83d869 100644
--- a/drivers/tty/hvc/hvsi_lib.c
+++ b/drivers/tty/hvc/hvsi_lib.c
@@ -183,7 +183,7 @@ int hvsilib_get_chars(struct hvsi_priv *pv, char *buf, int count)
unsigned int tries, read = 0;
if (WARN_ON(!pv))
- return 0;
+ return -ENXIO;
/* If we aren't open, don't do anything in order to avoid races
* with connection establishment. The hvc core will call this
@@ -234,7 +234,7 @@ int hvsilib_put_chars(struct hvsi_priv *pv, const char *buf, int count)
int rc, adjcount = min(count, HVSI_MAX_OUTGOING_DATA);
if (WARN_ON(!pv))
- return 0;
+ return -ENODEV;
dp.hdr.type = VS_DATA_PACKET_HEADER;
dp.hdr.len = adjcount + sizeof(struct hvsi_header);
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index 4cb0d0a3e57..fc7bbba585c 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -66,14 +66,16 @@
static int debug;
module_param(debug, int, 0600);
-#define T1 (HZ/10)
-#define T2 (HZ/3)
-#define N2 3
+/* Defaults: these are from the specification */
+
+#define T1 10 /* 100mS */
+#define T2 34 /* 333mS */
+#define N2 3 /* Retry 3 times */
/* Use long timers for testing at low speed with debug on */
#ifdef DEBUG_TIMING
-#define T1 HZ
-#define T2 (2 * HZ)
+#define T1 100
+#define T2 200
#endif
/*
diff --git a/drivers/tty/serial/8250.c b/drivers/tty/serial/8250.c
index a87a56cb541..eeadf1b8e09 100644
--- a/drivers/tty/serial/8250.c
+++ b/drivers/tty/serial/8250.c
@@ -450,24 +450,6 @@ static void au_serial_out(struct uart_port *p, int offset, int value)
__raw_writel(value, p->membase + offset);
}
-static unsigned int tsi_serial_in(struct uart_port *p, int offset)
-{
- unsigned int tmp;
- offset = map_8250_in_reg(p, offset) << p->regshift;
- if (offset == UART_IIR) {
- tmp = readl(p->membase + (UART_IIR & ~3));
- return (tmp >> 16) & 0xff; /* UART_IIR % 4 == 2 */
- } else
- return readb(p->membase + offset);
-}
-
-static void tsi_serial_out(struct uart_port *p, int offset, int value)
-{
- offset = map_8250_out_reg(p, offset) << p->regshift;
- if (!((offset == UART_IER) && (value & UART_IER_UUE)))
- writeb(value, p->membase + offset);
-}
-
static unsigned int io_serial_in(struct uart_port *p, int offset)
{
offset = map_8250_in_reg(p, offset) << p->regshift;
@@ -508,11 +490,6 @@ static void set_io_from_upio(struct uart_port *p)
p->serial_out = au_serial_out;
break;
- case UPIO_TSI:
- p->serial_in = tsi_serial_in;
- p->serial_out = tsi_serial_out;
- break;
-
default:
p->serial_in = io_serial_in;
p->serial_out = io_serial_out;
diff --git a/drivers/tty/serial/altera_jtaguart.c b/drivers/tty/serial/altera_jtaguart.c
index 00a73ecb2df..530181e49f6 100644
--- a/drivers/tty/serial/altera_jtaguart.c
+++ b/drivers/tty/serial/altera_jtaguart.c
@@ -18,6 +18,7 @@
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/console.h>
+#include <linux/of.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/serial.h>
@@ -472,8 +473,6 @@ static struct of_device_id altera_jtaguart_match[] = {
{},
};
MODULE_DEVICE_TABLE(of, altera_jtaguart_match);
-#else
-#define altera_jtaguart_match NULL
#endif /* CONFIG_OF */
static struct platform_driver altera_jtaguart_platform_driver = {
@@ -482,7 +481,7 @@ static struct platform_driver altera_jtaguart_platform_driver = {
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
- .of_match_table = altera_jtaguart_match,
+ .of_match_table = of_match_ptr(altera_jtaguart_match),
},
};
diff --git a/drivers/tty/serial/altera_uart.c b/drivers/tty/serial/altera_uart.c
index d902558ccfd..1d04c5037f2 100644
--- a/drivers/tty/serial/altera_uart.c
+++ b/drivers/tty/serial/altera_uart.c
@@ -616,8 +616,6 @@ static struct of_device_id altera_uart_match[] = {
{},
};
MODULE_DEVICE_TABLE(of, altera_uart_match);
-#else
-#define altera_uart_match NULL
#endif /* CONFIG_OF */
static struct platform_driver altera_uart_platform_driver = {
@@ -626,7 +624,7 @@ static struct platform_driver altera_uart_platform_driver = {
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
- .of_match_table = altera_uart_match,
+ .of_match_table = of_match_ptr(altera_uart_match),
},
};
diff --git a/drivers/tty/serial/amba-pl010.c b/drivers/tty/serial/amba-pl010.c
index c0d10c4ddb7..efdf92c3a35 100644
--- a/drivers/tty/serial/amba-pl010.c
+++ b/drivers/tty/serial/amba-pl010.c
@@ -312,12 +312,16 @@ static int pl010_startup(struct uart_port *port)
struct uart_amba_port *uap = (struct uart_amba_port *)port;
int retval;
+ retval = clk_prepare(uap->clk);
+ if (retval)
+ goto out;
+
/*
* Try to enable the clock producer.
*/
retval = clk_enable(uap->clk);
if (retval)
- goto out;
+ goto clk_unprep;
uap->port.uartclk = clk_get_rate(uap->clk);
@@ -343,6 +347,8 @@ static int pl010_startup(struct uart_port *port)
clk_dis:
clk_disable(uap->clk);
+ clk_unprep:
+ clk_unprepare(uap->clk);
out:
return retval;
}
@@ -370,6 +376,7 @@ static void pl010_shutdown(struct uart_port *port)
* Shut down the clock producer
*/
clk_disable(uap->clk);
+ clk_unprepare(uap->clk);
}
static void
@@ -626,6 +633,7 @@ static int __init pl010_console_setup(struct console *co, char *options)
int bits = 8;
int parity = 'n';
int flow = 'n';
+ int ret;
/*
* Check whether an invalid uart number has been specified, and
@@ -638,6 +646,10 @@ static int __init pl010_console_setup(struct console *co, char *options)
if (!uap)
return -ENODEV;
+ ret = clk_prepare(uap->clk);
+ if (ret)
+ return ret;
+
uap->port.uartclk = clk_get_rate(uap->clk);
if (options)
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index f5f6831b0a6..00233af1acc 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -1367,12 +1367,16 @@ static int pl011_startup(struct uart_port *port)
unsigned int cr;
int retval;
+ retval = clk_prepare(uap->clk);
+ if (retval)
+ goto out;
+
/*
* Try to enable the clock producer.
*/
retval = clk_enable(uap->clk);
if (retval)
- goto out;
+ goto clk_unprep;
uap->port.uartclk = clk_get_rate(uap->clk);
@@ -1446,6 +1450,8 @@ static int pl011_startup(struct uart_port *port)
clk_dis:
clk_disable(uap->clk);
+ clk_unprep:
+ clk_unprepare(uap->clk);
out:
return retval;
}
@@ -1497,6 +1503,7 @@ static void pl011_shutdown(struct uart_port *port)
* Shut down the clock producer
*/
clk_disable(uap->clk);
+ clk_unprepare(uap->clk);
if (uap->port.dev->platform_data) {
struct amba_pl011_data *plat;
@@ -1800,6 +1807,7 @@ static int __init pl011_console_setup(struct console *co, char *options)
int bits = 8;
int parity = 'n';
int flow = 'n';
+ int ret;
/*
* Check whether an invalid uart number has been specified, and
@@ -1812,6 +1820,10 @@ static int __init pl011_console_setup(struct console *co, char *options)
if (!uap)
return -ENODEV;
+ ret = clk_prepare(uap->clk);
+ if (ret)
+ return ret;
+
if (uap->port.dev->platform_data) {
struct amba_pl011_data *plat;
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 9988c0c305c..4a0f86fa1e9 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -48,7 +48,7 @@
#ifdef CONFIG_ARM
#include <mach/cpu.h>
-#include <mach/gpio.h>
+#include <asm/gpio.h>
#endif
#define PDC_BUFFER_SIZE 512
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 54ffdc6243f..163fc9021f5 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -1290,17 +1290,20 @@ static int serial_imx_resume(struct platform_device *dev)
static int serial_imx_probe_dt(struct imx_port *sport,
struct platform_device *pdev)
{
- static int portnum = 0;
struct device_node *np = pdev->dev.of_node;
const struct of_device_id *of_id =
of_match_device(imx_uart_dt_ids, &pdev->dev);
+ int ret;
if (!np)
return -ENODEV;
- sport->port.line = portnum++;
- if (sport->port.line >= UART_NR)
- return -EINVAL;
+ ret = of_alias_get_id(np, "serial");
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to get alias id, errno %d\n", ret);
+ return -ENODEV;
+ }
+ sport->port.line = ret;
if (of_get_property(np, "fsl,uart-has-rtscts", NULL))
sport->have_rtscts = 1;
diff --git a/drivers/tty/serial/jsm/jsm_driver.c b/drivers/tty/serial/jsm/jsm_driver.c
index 648b6a3efa3..7c867a046c9 100644
--- a/drivers/tty/serial/jsm/jsm_driver.c
+++ b/drivers/tty/serial/jsm/jsm_driver.c
@@ -24,7 +24,7 @@
*
*
***********************************************************************/
-#include <linux/moduleparam.h>
+#include <linux/module.h>
#include <linux/pci.h>
#include <linux/slab.h>
diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
index 87e7e6c876d..2b42a01a81c 100644
--- a/drivers/tty/serial/kgdboc.c
+++ b/drivers/tty/serial/kgdboc.c
@@ -19,6 +19,7 @@
#include <linux/console.h>
#include <linux/vt_kern.h>
#include <linux/input.h>
+#include <linux/module.h>
#define MAX_CONFIG_LEN 40
diff --git a/drivers/tty/serial/max3100.c b/drivers/tty/serial/max3100.c
index 2af5aa5f3a8..8a6cc8c30b5 100644
--- a/drivers/tty/serial/max3100.c
+++ b/drivers/tty/serial/max3100.c
@@ -43,6 +43,7 @@
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/device.h>
+#include <linux/module.h>
#include <linux/serial_core.h>
#include <linux/serial.h>
#include <linux/spi/spi.h>
diff --git a/drivers/tty/serial/max3107-aava.c b/drivers/tty/serial/max3107-aava.c
index d73aadd7a9a..90c40f22ec7 100644
--- a/drivers/tty/serial/max3107-aava.c
+++ b/drivers/tty/serial/max3107-aava.c
@@ -36,6 +36,7 @@
#include <linux/platform_device.h>
#include <linux/gpio.h>
#include <linux/sfi.h>
+#include <linux/module.h>
#include <asm/mrst.h>
#include "max3107.h"
diff --git a/drivers/tty/serial/max3107.c b/drivers/tty/serial/max3107.c
index db00b595cab..7827000db4f 100644
--- a/drivers/tty/serial/max3107.c
+++ b/drivers/tty/serial/max3107.c
@@ -36,6 +36,7 @@
#include <linux/gpio.h>
#include <linux/spi/spi.h>
#include <linux/freezer.h>
+#include <linux/module.h>
#include "max3107.h"
static const struct baud_table brg26_ext[] = {
diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
index 29cbfd8c4e7..8131e2c2801 100644
--- a/drivers/tty/serial/msm_serial.c
+++ b/drivers/tty/serial/msm_serial.c
@@ -19,6 +19,7 @@
# define SUPPORT_SYSRQ
#endif
+#include <linux/atomic.h>
#include <linux/hrtimer.h>
#include <linux/module.h>
#include <linux/io.h>
@@ -33,6 +34,8 @@
#include <linux/clk.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
#include "msm_serial.h"
@@ -589,9 +592,8 @@ static void msm_release_port(struct uart_port *port)
iowrite32(GSBI_PROTOCOL_IDLE, msm_port->gsbi_base +
GSBI_CONTROL);
- gsbi_resource = platform_get_resource_byname(pdev,
- IORESOURCE_MEM,
- "gsbi_resource");
+ gsbi_resource = platform_get_resource(pdev,
+ IORESOURCE_MEM, 1);
if (unlikely(!gsbi_resource))
return;
@@ -612,8 +614,7 @@ static int msm_request_port(struct uart_port *port)
resource_size_t size;
int ret;
- uart_resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- "uart_resource");
+ uart_resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (unlikely(!uart_resource))
return -ENXIO;
@@ -628,8 +629,7 @@ static int msm_request_port(struct uart_port *port)
goto fail_release_port;
}
- gsbi_resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- "gsbi_resource");
+ gsbi_resource = platform_get_resource(pdev, IORESOURCE_MEM, 1);
/* Is this a GSBI-based port? */
if (gsbi_resource) {
size = resource_size(gsbi_resource);
@@ -857,6 +857,8 @@ static struct uart_driver msm_uart_driver = {
.cons = MSM_CONSOLE,
};
+static atomic_t msm_uart_next_id = ATOMIC_INIT(0);
+
static int __init msm_serial_probe(struct platform_device *pdev)
{
struct msm_port *msm_port;
@@ -864,6 +866,9 @@ static int __init msm_serial_probe(struct platform_device *pdev)
struct uart_port *port;
int irq;
+ if (pdev->id == -1)
+ pdev->id = atomic_inc_return(&msm_uart_next_id) - 1;
+
if (unlikely(pdev->id < 0 || pdev->id >= UART_NR))
return -ENXIO;
@@ -873,7 +878,7 @@ static int __init msm_serial_probe(struct platform_device *pdev)
port->dev = &pdev->dev;
msm_port = UART_TO_MSM(port);
- if (platform_get_resource_byname(pdev, IORESOURCE_MEM, "gsbi_resource"))
+ if (platform_get_resource(pdev, IORESOURCE_MEM, 1))
msm_port->is_uartdm = 1;
else
msm_port->is_uartdm = 0;
@@ -897,8 +902,7 @@ static int __init msm_serial_probe(struct platform_device *pdev)
printk(KERN_INFO "uartclk = %d\n", port->uartclk);
- resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- "uart_resource");
+ resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (unlikely(!resource))
return -ENXIO;
port->mapbase = resource->start;
@@ -922,11 +926,17 @@ static int __devexit msm_serial_remove(struct platform_device *pdev)
return 0;
}
+static struct of_device_id msm_match_table[] = {
+ { .compatible = "qcom,msm-uart" },
+ {}
+};
+
static struct platform_driver msm_platform_driver = {
.remove = msm_serial_remove,
.driver = {
.name = "msm_serial",
.owner = THIS_MODULE,
+ .of_match_table = msm_match_table,
},
};
diff --git a/drivers/tty/serial/nwpserial.c b/drivers/tty/serial/nwpserial.c
index 9beaff1cec2..dd4c31d1aee 100644
--- a/drivers/tty/serial/nwpserial.c
+++ b/drivers/tty/serial/nwpserial.c
@@ -10,6 +10,7 @@
*
*/
#include <linux/init.h>
+#include <linux/export.h>
#include <linux/console.h>
#include <linux/serial.h>
#include <linux/serial_reg.h>
diff --git a/drivers/tty/serial/of_serial.c b/drivers/tty/serial/of_serial.c
index e58cece6f44..e8c9cee07d0 100644
--- a/drivers/tty/serial/of_serial.c
+++ b/drivers/tty/serial/of_serial.c
@@ -200,17 +200,7 @@ static struct platform_driver of_platform_serial_driver = {
.remove = of_platform_serial_remove,
};
-static int __init of_platform_serial_init(void)
-{
- return platform_driver_register(&of_platform_serial_driver);
-}
-module_init(of_platform_serial_init);
-
-static void __exit of_platform_serial_exit(void)
-{
- return platform_driver_unregister(&of_platform_serial_driver);
-};
-module_exit(of_platform_serial_exit);
+module_platform_driver(of_platform_serial_driver);
MODULE_AUTHOR("Arnd Bergmann <arnd@arndb.de>");
MODULE_LICENSE("GPL");
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 9871c57b348..aff9d612dff 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -207,6 +207,25 @@ static struct plat_sci_reg sci_regmap[SCIx_NR_REGTYPES][SCIx_NR_REGS] = {
},
/*
+ * Common SH-2(A) SCIF definitions for ports with FIFO data
+ * count registers.
+ */
+ [SCIx_SH2_SCIF_FIFODATA_REGTYPE] = {
+ [SCSMR] = { 0x00, 16 },
+ [SCBRR] = { 0x04, 8 },
+ [SCSCR] = { 0x08, 16 },
+ [SCxTDR] = { 0x0c, 8 },
+ [SCxSR] = { 0x10, 16 },
+ [SCxRDR] = { 0x14, 8 },
+ [SCFCR] = { 0x18, 16 },
+ [SCFDR] = { 0x1c, 16 },
+ [SCTFDR] = sci_reg_invalid,
+ [SCRFDR] = sci_reg_invalid,
+ [SCSPTR] = { 0x20, 16 },
+ [SCLSR] = { 0x24, 16 },
+ },
+
+ /*
* Common SH-3 SCIF definitions.
*/
[SCIx_SH3_SCIF_REGTYPE] = {
@@ -1446,12 +1465,8 @@ static bool filter(struct dma_chan *chan, void *slave)
dev_dbg(chan->device->dev, "%s: slave ID %d\n", __func__,
param->slave_id);
- if (param->dma_dev == chan->device->dev) {
- chan->private = param;
- return true;
- } else {
- return false;
- }
+ chan->private = param;
+ return true;
}
static void rx_timer_fn(unsigned long arg)
@@ -1477,10 +1492,10 @@ static void sci_request_dma(struct uart_port *port)
dma_cap_mask_t mask;
int nent;
- dev_dbg(port->dev, "%s: port %d DMA %p\n", __func__,
- port->line, s->cfg->dma_dev);
+ dev_dbg(port->dev, "%s: port %d\n", __func__,
+ port->line);
- if (!s->cfg->dma_dev)
+ if (s->cfg->dma_slave_tx <= 0 || s->cfg->dma_slave_rx <= 0)
return;
dma_cap_zero(mask);
@@ -1490,7 +1505,6 @@ static void sci_request_dma(struct uart_port *port)
/* Slave ID, e.g., SHDMA_SLAVE_SCIF0_TX */
param->slave_id = s->cfg->dma_slave_tx;
- param->dma_dev = s->cfg->dma_dev;
s->cookie_tx = -EINVAL;
chan = dma_request_channel(mask, filter, param);
@@ -1519,7 +1533,6 @@ static void sci_request_dma(struct uart_port *port)
/* Slave ID, e.g., SHDMA_SLAVE_SCIF0_RX */
param->slave_id = s->cfg->dma_slave_rx;
- param->dma_dev = s->cfg->dma_dev;
chan = dma_request_channel(mask, filter, param);
dev_dbg(port->dev, "%s: RX: got channel %p\n", __func__, chan);
@@ -1564,9 +1577,6 @@ static void sci_free_dma(struct uart_port *port)
{
struct sci_port *s = to_sci_port(port);
- if (!s->cfg->dma_dev)
- return;
-
if (s->chan_tx)
sci_tx_dma_release(s, false);
if (s->chan_rx)
@@ -1981,9 +1991,9 @@ static int __devinit sci_init_single(struct platform_device *dev,
port->serial_in = sci_serial_in;
port->serial_out = sci_serial_out;
- if (p->dma_dev)
- dev_dbg(port->dev, "DMA device %p, tx %d, rx %d\n",
- p->dma_dev, p->dma_slave_tx, p->dma_slave_rx);
+ if (p->dma_slave_tx > 0 && p->dma_slave_rx > 0)
+ dev_dbg(port->dev, "DMA tx %d, rx %d\n",
+ p->dma_slave_tx, p->dma_slave_rx);
return 0;
}
diff --git a/drivers/tty/serial/timbuart.c b/drivers/tty/serial/timbuart.c
index a4b63bfeaa2..e76c8b747fb 100644
--- a/drivers/tty/serial/timbuart.c
+++ b/drivers/tty/serial/timbuart.c
@@ -29,6 +29,7 @@
#include <linux/platform_device.h>
#include <linux/ioport.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include "timbuart.h"
diff --git a/drivers/tty/serial/uartlite.c b/drivers/tty/serial/uartlite.c
index b908615ccaa..6cd414341d5 100644
--- a/drivers/tty/serial/uartlite.c
+++ b/drivers/tty/serial/uartlite.c
@@ -569,8 +569,6 @@ static struct of_device_id ulite_of_match[] __devinitdata = {
{}
};
MODULE_DEVICE_TABLE(of, ulite_of_match);
-#else /* CONFIG_OF */
-#define ulite_of_match NULL
#endif /* CONFIG_OF */
static int __devinit ulite_probe(struct platform_device *pdev)
@@ -610,7 +608,7 @@ static struct platform_driver ulite_platform_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "uartlite",
- .of_match_table = ulite_of_match,
+ .of_match_table = of_match_ptr(ulite_of_match),
},
};
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
index 8c03b127fd0..b627363352e 100644
--- a/drivers/tty/serial/xilinx_uartps.c
+++ b/drivers/tty/serial/xilinx_uartps.c
@@ -20,6 +20,7 @@
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/of.h>
+#include <linux/module.h>
#define XUARTPS_TTY_NAME "ttyPS"
#define XUARTPS_NAME "xuartps"
diff --git a/drivers/tty/vt/vc_screen.c b/drivers/tty/vt/vc_screen.c
index 66825c9f516..7a367ff5122 100644
--- a/drivers/tty/vt/vc_screen.c
+++ b/drivers/tty/vt/vc_screen.c
@@ -22,6 +22,7 @@
#include <linux/kernel.h>
#include <linux/major.h>
#include <linux/errno.h>
+#include <linux/export.h>
#include <linux/tty.h>
#include <linux/interrupt.h>
#include <linux/mm.h>
diff --git a/drivers/uio/uio_pdrv.c b/drivers/uio/uio_pdrv.c
index bdc3db94612..ff505951735 100644
--- a/drivers/uio/uio_pdrv.c
+++ b/drivers/uio/uio_pdrv.c
@@ -11,6 +11,7 @@
#include <linux/platform_device.h>
#include <linux/uio_driver.h>
#include <linux/stringify.h>
+#include <linux/module.h>
#include <linux/slab.h>
#define DRIVER_NAME "uio_pdrv"
diff --git a/drivers/uio/uio_pdrv_genirq.c b/drivers/uio/uio_pdrv_genirq.c
index 0b2ed71e3bf..25de302009a 100644
--- a/drivers/uio/uio_pdrv_genirq.c
+++ b/drivers/uio/uio_pdrv_genirq.c
@@ -18,6 +18,7 @@
#include <linux/uio_driver.h>
#include <linux/spinlock.h>
#include <linux/bitops.h>
+#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/stringify.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index 4ac2750491d..791f11bed60 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -62,7 +62,6 @@ config USB_ARCH_HAS_EHCI
boolean
default y if FSL_SOC
default y if PPC_MPC512x
- default y if SOC_AU1200
default y if ARCH_IXP4XX
default y if ARCH_W90X900
default y if ARCH_AT91SAM9G45
diff --git a/drivers/usb/c67x00/c67x00-drv.c b/drivers/usb/c67x00/c67x00-drv.c
index 62050f7a4f9..57ae44cd0b8 100644
--- a/drivers/usb/c67x00/c67x00-drv.c
+++ b/drivers/usb/c67x00/c67x00-drv.c
@@ -38,6 +38,7 @@
#include <linux/io.h>
#include <linux/list.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include <linux/usb.h>
#include <linux/usb/c67x00.h>
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index 3b029a0a478..45887a0ff87 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -24,6 +24,7 @@
#include <linux/device.h>
#include <linux/slab.h>
+#include <linux/export.h>
#include <linux/usb.h>
#include <linux/usb/quirks.h>
#include <linux/usb/hcd.h>
@@ -1667,6 +1668,11 @@ int usb_runtime_suspend(struct device *dev)
return -EAGAIN;
status = usb_suspend_both(udev, PMSG_AUTO_SUSPEND);
+
+ /* Allow a retry if autosuspend failed temporarily */
+ if (status == -EAGAIN || status == -EBUSY)
+ usb_mark_last_busy(udev);
+
/* The PM core reacts badly unless the return code is 0,
* -EAGAIN, or -EBUSY, so always return -EBUSY on an error.
*/
diff --git a/drivers/usb/core/notify.c b/drivers/usb/core/notify.c
index 7542dce3f5a..7728c91dfa2 100644
--- a/drivers/usb/core/notify.c
+++ b/drivers/usb/core/notify.c
@@ -10,6 +10,7 @@
#include <linux/kernel.h>
+#include <linux/export.h>
#include <linux/notifier.h>
#include <linux/usb.h>
#include <linux/mutex.h>
diff --git a/drivers/usb/gadget/cdc2.c b/drivers/usb/gadget/cdc2.c
index 672674c2fb3..725550f06fa 100644
--- a/drivers/usb/gadget/cdc2.c
+++ b/drivers/usb/gadget/cdc2.c
@@ -12,6 +12,7 @@
#include <linux/kernel.h>
#include <linux/utsname.h>
+#include <linux/module.h>
#include "u_ether.h"
#include "u_serial.h"
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 8a5529d214f..f71b0787983 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -14,6 +14,7 @@
#include <linux/kallsyms.h>
#include <linux/kernel.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include <linux/device.h>
#include <linux/utsname.h>
diff --git a/drivers/usb/gadget/dbgp.c b/drivers/usb/gadget/dbgp.c
index f855ecf7a63..6256420089f 100644
--- a/drivers/usb/gadget/dbgp.c
+++ b/drivers/usb/gadget/dbgp.c
@@ -9,6 +9,7 @@
/* verbose messages */
#include <linux/kernel.h>
#include <linux/device.h>
+#include <linux/module.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
diff --git a/drivers/usb/gadget/f_fs.c b/drivers/usb/gadget/f_fs.c
index 6b1c20b6c9b..acb38004eec 100644
--- a/drivers/usb/gadget/f_fs.c
+++ b/drivers/usb/gadget/f_fs.c
@@ -20,6 +20,7 @@
#include <linux/blkdev.h>
#include <linux/pagemap.h>
+#include <linux/export.h>
#include <asm/unaligned.h>
#include <linux/usb/composite.h>
diff --git a/drivers/usb/gadget/f_obex.c b/drivers/usb/gadget/f_obex.c
index e3f74bf5da2..5f400f66aa9 100644
--- a/drivers/usb/gadget/f_obex.c
+++ b/drivers/usb/gadget/f_obex.c
@@ -17,6 +17,7 @@
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/device.h>
+#include <linux/module.h>
#include "u_serial.h"
#include "gadget_chips.h"
diff --git a/drivers/usb/gadget/f_sourcesink.c b/drivers/usb/gadget/f_sourcesink.c
index 168906d2b5d..7aa7ac82c02 100644
--- a/drivers/usb/gadget/f_sourcesink.c
+++ b/drivers/usb/gadget/f_sourcesink.c
@@ -15,6 +15,7 @@
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/device.h>
+#include <linux/module.h>
#include "g_zero.h"
#include "gadget_chips.h"
diff --git a/drivers/usb/gadget/file_storage.c b/drivers/usb/gadget/file_storage.c
index 3ac4f51cd0b..f7e39b0365c 100644
--- a/drivers/usb/gadget/file_storage.c
+++ b/drivers/usb/gadget/file_storage.c
@@ -243,6 +243,7 @@
#include <linux/kref.h>
#include <linux/kthread.h>
#include <linux/limits.h>
+#include <linux/module.h>
#include <linux/rwsem.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
diff --git a/drivers/usb/gadget/fusb300_udc.c b/drivers/usb/gadget/fusb300_udc.c
index e593f2849fa..74da206c840 100644
--- a/drivers/usb/gadget/fusb300_udc.c
+++ b/drivers/usb/gadget/fusb300_udc.c
@@ -13,6 +13,7 @@
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
diff --git a/drivers/usb/gadget/gmidi.c b/drivers/usb/gadget/gmidi.c
index 8fcde37aa6d..681bd038b1d 100644
--- a/drivers/usb/gadget/gmidi.c
+++ b/drivers/usb/gadget/gmidi.c
@@ -23,6 +23,7 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/utsname.h>
+#include <linux/module.h>
#include <linux/device.h>
#include <sound/core.h>
diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c
index 3a4a664bab4..6597a6813e4 100644
--- a/drivers/usb/gadget/u_serial.c
+++ b/drivers/usb/gadget/u_serial.c
@@ -25,6 +25,7 @@
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/slab.h>
+#include <linux/export.h>
#include "u_serial.h"
diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile
index ed48a5d79e1..7ca290fcb07 100644
--- a/drivers/usb/host/Makefile
+++ b/drivers/usb/host/Makefile
@@ -36,3 +36,4 @@ obj-$(CONFIG_USB_HWA_HCD) += hwa-hc.o
obj-$(CONFIG_USB_IMX21_HCD) += imx21-hcd.o
obj-$(CONFIG_USB_FSL_MPH_DR_OF) += fsl-mph-dr-of.o
obj-$(CONFIG_USB_OCTEON2_COMMON) += octeon2-common.o
+obj-$(CONFIG_MIPS_ALCHEMY) += alchemy-common.o
diff --git a/drivers/usb/host/alchemy-common.c b/drivers/usb/host/alchemy-common.c
new file mode 100644
index 00000000000..b4192c964d0
--- /dev/null
+++ b/drivers/usb/host/alchemy-common.c
@@ -0,0 +1,337 @@
+/*
+ * USB block power/access management abstraction.
+ *
+ * Au1000+: The OHCI block control register is at the far end of the OHCI memory
+ * area. Au1550 has OHCI on different base address. No need to handle
+ * UDC here.
+ * Au1200: one register to control access and clocks to O/EHCI, UDC and OTG
+ * as well as the PHY for EHCI and UDC.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/syscore_ops.h>
+#include <asm/mach-au1x00/au1000.h>
+
+/* control register offsets */
+#define AU1000_OHCICFG 0x7fffc
+#define AU1550_OHCICFG 0x07ffc
+#define AU1200_USBCFG 0x04
+
+/* Au1000 USB block config bits */
+#define USBHEN_RD (1 << 4) /* OHCI reset-done indicator */
+#define USBHEN_CE (1 << 3) /* OHCI block clock enable */
+#define USBHEN_E (1 << 2) /* OHCI block enable */
+#define USBHEN_C (1 << 1) /* OHCI block coherency bit */
+#define USBHEN_BE (1 << 0) /* OHCI Big-Endian */
+
+/* Au1200 USB config bits */
+#define USBCFG_PFEN (1 << 31) /* prefetch enable (undoc) */
+#define USBCFG_RDCOMB (1 << 30) /* read combining (undoc) */
+#define USBCFG_UNKNOWN (5 << 20) /* unknown, leave this way */
+#define USBCFG_SSD (1 << 23) /* serial short detect en */
+#define USBCFG_PPE (1 << 19) /* HS PHY PLL */
+#define USBCFG_UCE (1 << 18) /* UDC clock enable */
+#define USBCFG_ECE (1 << 17) /* EHCI clock enable */
+#define USBCFG_OCE (1 << 16) /* OHCI clock enable */
+#define USBCFG_FLA(x) (((x) & 0x3f) << 8)
+#define USBCFG_UCAM (1 << 7) /* coherent access (undoc) */
+#define USBCFG_GME (1 << 6) /* OTG mem access */
+#define USBCFG_DBE (1 << 5) /* UDC busmaster enable */
+#define USBCFG_DME (1 << 4) /* UDC mem enable */
+#define USBCFG_EBE (1 << 3) /* EHCI busmaster enable */
+#define USBCFG_EME (1 << 2) /* EHCI mem enable */
+#define USBCFG_OBE (1 << 1) /* OHCI busmaster enable */
+#define USBCFG_OME (1 << 0) /* OHCI mem enable */
+#define USBCFG_INIT_AU1200 (USBCFG_PFEN | USBCFG_RDCOMB | USBCFG_UNKNOWN |\
+ USBCFG_SSD | USBCFG_FLA(0x20) | USBCFG_UCAM | \
+ USBCFG_GME | USBCFG_DBE | USBCFG_DME | \
+ USBCFG_EBE | USBCFG_EME | USBCFG_OBE | \
+ USBCFG_OME)
+
+
+static DEFINE_SPINLOCK(alchemy_usb_lock);
+
+
+static inline void __au1200_ohci_control(void __iomem *base, int enable)
+{
+ unsigned long r = __raw_readl(base + AU1200_USBCFG);
+ if (enable) {
+ __raw_writel(r | USBCFG_OCE, base + AU1200_USBCFG);
+ wmb();
+ udelay(2000);
+ } else {
+ __raw_writel(r & ~USBCFG_OCE, base + AU1200_USBCFG);
+ wmb();
+ udelay(1000);
+ }
+}
+
+static inline void __au1200_ehci_control(void __iomem *base, int enable)
+{
+ unsigned long r = __raw_readl(base + AU1200_USBCFG);
+ if (enable) {
+ __raw_writel(r | USBCFG_ECE | USBCFG_PPE, base + AU1200_USBCFG);
+ wmb();
+ udelay(1000);
+ } else {
+ if (!(r & USBCFG_UCE)) /* UDC also off? */
+ r &= ~USBCFG_PPE; /* yes: disable HS PHY PLL */
+ __raw_writel(r & ~USBCFG_ECE, base + AU1200_USBCFG);
+ wmb();
+ udelay(1000);
+ }
+}
+
+static inline void __au1200_udc_control(void __iomem *base, int enable)
+{
+ unsigned long r = __raw_readl(base + AU1200_USBCFG);
+ if (enable) {
+ __raw_writel(r | USBCFG_UCE | USBCFG_PPE, base + AU1200_USBCFG);
+ wmb();
+ } else {
+ if (!(r & USBCFG_ECE)) /* EHCI also off? */
+ r &= ~USBCFG_PPE; /* yes: disable HS PHY PLL */
+ __raw_writel(r & ~USBCFG_UCE, base + AU1200_USBCFG);
+ wmb();
+ }
+}
+
+static inline int au1200_coherency_bug(void)
+{
+#if defined(CONFIG_DMA_COHERENT)
+ /* Au1200 AB USB does not support coherent memory */
+ if (!(read_c0_prid() & 0xff)) {
+ printk(KERN_INFO "Au1200 USB: this is chip revision AB !!\n");
+ printk(KERN_INFO "Au1200 USB: update your board or re-configure"
+ " the kernel\n");
+ return -ENODEV;
+ }
+#endif
+ return 0;
+}
+
+static inline int au1200_usb_control(int block, int enable)
+{
+ void __iomem *base =
+ (void __iomem *)KSEG1ADDR(AU1200_USB_CTL_PHYS_ADDR);
+ int ret = 0;
+
+ switch (block) {
+ case ALCHEMY_USB_OHCI0:
+ ret = au1200_coherency_bug();
+ if (ret && enable)
+ goto out;
+ __au1200_ohci_control(base, enable);
+ break;
+ case ALCHEMY_USB_UDC0:
+ __au1200_udc_control(base, enable);
+ break;
+ case ALCHEMY_USB_EHCI0:
+ ret = au1200_coherency_bug();
+ if (ret && enable)
+ goto out;
+ __au1200_ehci_control(base, enable);
+ break;
+ default:
+ ret = -ENODEV;
+ }
+out:
+ return ret;
+}
+
+
+/* initialize USB block(s) to a known working state */
+static inline void au1200_usb_init(void)
+{
+ void __iomem *base =
+ (void __iomem *)KSEG1ADDR(AU1200_USB_CTL_PHYS_ADDR);
+ __raw_writel(USBCFG_INIT_AU1200, base + AU1200_USBCFG);
+ wmb();
+ udelay(1000);
+}
+
+static inline void au1000_usb_init(unsigned long rb, int reg)
+{
+ void __iomem *base = (void __iomem *)KSEG1ADDR(rb + reg);
+ unsigned long r = __raw_readl(base);
+
+#if defined(__BIG_ENDIAN)
+ r |= USBHEN_BE;
+#endif
+ r |= USBHEN_C;
+
+ __raw_writel(r, base);
+ wmb();
+ udelay(1000);
+}
+
+
+static inline void __au1xx0_ohci_control(int enable, unsigned long rb, int creg)
+{
+ void __iomem *base = (void __iomem *)KSEG1ADDR(rb);
+ unsigned long r = __raw_readl(base + creg);
+
+ if (enable) {
+ __raw_writel(r | USBHEN_CE, base + creg);
+ wmb();
+ udelay(1000);
+ __raw_writel(r | USBHEN_CE | USBHEN_E, base + creg);
+ wmb();
+ udelay(1000);
+
+ /* wait for reset complete (read reg twice: au1500 erratum) */
+ while (__raw_readl(base + creg),
+ !(__raw_readl(base + creg) & USBHEN_RD))
+ udelay(1000);
+ } else {
+ __raw_writel(r & ~(USBHEN_CE | USBHEN_E), base + creg);
+ wmb();
+ }
+}
+
+static inline int au1000_usb_control(int block, int enable, unsigned long rb,
+ int creg)
+{
+ int ret = 0;
+
+ switch (block) {
+ case ALCHEMY_USB_OHCI0:
+ __au1xx0_ohci_control(enable, rb, creg);
+ break;
+ default:
+ ret = -ENODEV;
+ }
+ return ret;
+}
+
+/*
+ * alchemy_usb_control - control Alchemy on-chip USB blocks
+ * @block: USB block to target
+ * @enable: set 1 to enable a block, 0 to disable
+ */
+int alchemy_usb_control(int block, int enable)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&alchemy_usb_lock, flags);
+ switch (alchemy_get_cputype()) {
+ case ALCHEMY_CPU_AU1000:
+ case ALCHEMY_CPU_AU1500:
+ case ALCHEMY_CPU_AU1100:
+ ret = au1000_usb_control(block, enable,
+ AU1000_USB_OHCI_PHYS_ADDR, AU1000_OHCICFG);
+ break;
+ case ALCHEMY_CPU_AU1550:
+ ret = au1000_usb_control(block, enable,
+ AU1550_USB_OHCI_PHYS_ADDR, AU1550_OHCICFG);
+ break;
+ case ALCHEMY_CPU_AU1200:
+ ret = au1200_usb_control(block, enable);
+ break;
+ default:
+ ret = -ENODEV;
+ }
+ spin_unlock_irqrestore(&alchemy_usb_lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(alchemy_usb_control);
+
+
+static unsigned long alchemy_usb_pmdata[2];
+
+static void au1000_usb_pm(unsigned long br, int creg, int susp)
+{
+ void __iomem *base = (void __iomem *)KSEG1ADDR(br);
+
+ if (susp) {
+ alchemy_usb_pmdata[0] = __raw_readl(base + creg);
+ /* There appears to be some undocumented reset register.... */
+ __raw_writel(0, base + 0x04);
+ wmb();
+ __raw_writel(0, base + creg);
+ wmb();
+ } else {
+ __raw_writel(alchemy_usb_pmdata[0], base + creg);
+ wmb();
+ }
+}
+
+static void au1200_usb_pm(int susp)
+{
+ void __iomem *base =
+ (void __iomem *)KSEG1ADDR(AU1200_USB_OTG_PHYS_ADDR);
+ if (susp) {
+ /* save OTG_CAP/MUX registers which indicate port routing */
+ /* FIXME: write an OTG driver to do that */
+ alchemy_usb_pmdata[0] = __raw_readl(base + 0x00);
+ alchemy_usb_pmdata[1] = __raw_readl(base + 0x04);
+ } else {
+ /* restore access to all MMIO areas */
+ au1200_usb_init();
+
+ /* restore OTG_CAP/MUX registers */
+ __raw_writel(alchemy_usb_pmdata[0], base + 0x00);
+ __raw_writel(alchemy_usb_pmdata[1], base + 0x04);
+ wmb();
+ }
+}
+
+static void alchemy_usb_pm(int susp)
+{
+ switch (alchemy_get_cputype()) {
+ case ALCHEMY_CPU_AU1000:
+ case ALCHEMY_CPU_AU1500:
+ case ALCHEMY_CPU_AU1100:
+ au1000_usb_pm(AU1000_USB_OHCI_PHYS_ADDR, AU1000_OHCICFG, susp);
+ break;
+ case ALCHEMY_CPU_AU1550:
+ au1000_usb_pm(AU1550_USB_OHCI_PHYS_ADDR, AU1550_OHCICFG, susp);
+ break;
+ case ALCHEMY_CPU_AU1200:
+ au1200_usb_pm(susp);
+ break;
+ }
+}
+
+static int alchemy_usb_suspend(void)
+{
+ alchemy_usb_pm(1);
+ return 0;
+}
+
+static void alchemy_usb_resume(void)
+{
+ alchemy_usb_pm(0);
+}
+
+static struct syscore_ops alchemy_usb_pm_ops = {
+ .suspend = alchemy_usb_suspend,
+ .resume = alchemy_usb_resume,
+};
+
+static int __init alchemy_usb_init(void)
+{
+ switch (alchemy_get_cputype()) {
+ case ALCHEMY_CPU_AU1000:
+ case ALCHEMY_CPU_AU1500:
+ case ALCHEMY_CPU_AU1100:
+ au1000_usb_init(AU1000_USB_OHCI_PHYS_ADDR, AU1000_OHCICFG);
+ break;
+ case ALCHEMY_CPU_AU1550:
+ au1000_usb_init(AU1550_USB_OHCI_PHYS_ADDR, AU1550_OHCICFG);
+ break;
+ case ALCHEMY_CPU_AU1200:
+ au1200_usb_init();
+ break;
+ }
+
+ register_syscore_ops(&alchemy_usb_pm_ops);
+
+ return 0;
+}
+arch_initcall(alchemy_usb_init);
diff --git a/drivers/usb/host/ehci-au1xxx.c b/drivers/usb/host/ehci-au1xxx.c
index 65719e8d24e..18bafa99fe5 100644
--- a/drivers/usb/host/ehci-au1xxx.c
+++ b/drivers/usb/host/ehci-au1xxx.c
@@ -14,61 +14,9 @@
#include <linux/platform_device.h>
#include <asm/mach-au1x00/au1000.h>
-#define USB_HOST_CONFIG (USB_MSR_BASE + USB_MSR_MCFG)
-#define USB_MCFG_PFEN (1<<31)
-#define USB_MCFG_RDCOMB (1<<30)
-#define USB_MCFG_SSDEN (1<<23)
-#define USB_MCFG_PHYPLLEN (1<<19)
-#define USB_MCFG_UCECLKEN (1<<18)
-#define USB_MCFG_EHCCLKEN (1<<17)
-#ifdef CONFIG_DMA_COHERENT
-#define USB_MCFG_UCAM (1<<7)
-#else
-#define USB_MCFG_UCAM (0)
-#endif
-#define USB_MCFG_EBMEN (1<<3)
-#define USB_MCFG_EMEMEN (1<<2)
-
-#define USBH_ENABLE_CE (USB_MCFG_PHYPLLEN | USB_MCFG_EHCCLKEN)
-#define USBH_ENABLE_INIT (USB_MCFG_PFEN | USB_MCFG_RDCOMB | \
- USBH_ENABLE_CE | USB_MCFG_SSDEN | \
- USB_MCFG_UCAM | USB_MCFG_EBMEN | \
- USB_MCFG_EMEMEN)
-
-#define USBH_DISABLE (USB_MCFG_EBMEN | USB_MCFG_EMEMEN)
extern int usb_disabled(void);
-static void au1xxx_start_ehc(void)
-{
- /* enable clock to EHCI block and HS PHY PLL*/
- au_writel(au_readl(USB_HOST_CONFIG) | USBH_ENABLE_CE, USB_HOST_CONFIG);
- au_sync();
- udelay(1000);
-
- /* enable EHCI mmio */
- au_writel(au_readl(USB_HOST_CONFIG) | USBH_ENABLE_INIT, USB_HOST_CONFIG);
- au_sync();
- udelay(1000);
-}
-
-static void au1xxx_stop_ehc(void)
-{
- unsigned long c;
-
- /* Disable mem */
- au_writel(au_readl(USB_HOST_CONFIG) & ~USBH_DISABLE, USB_HOST_CONFIG);
- au_sync();
- udelay(1000);
-
- /* Disable EHC clock. If the HS PHY is unused disable it too. */
- c = au_readl(USB_HOST_CONFIG) & ~USB_MCFG_EHCCLKEN;
- if (!(c & USB_MCFG_UCECLKEN)) /* UDC disabled? */
- c &= ~USB_MCFG_PHYPLLEN; /* yes: disable HS PHY PLL */
- au_writel(c, USB_HOST_CONFIG);
- au_sync();
-}
-
static int au1xxx_ehci_setup(struct usb_hcd *hcd)
{
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
@@ -136,16 +84,6 @@ static int ehci_hcd_au1xxx_drv_probe(struct platform_device *pdev)
if (usb_disabled())
return -ENODEV;
-#if defined(CONFIG_SOC_AU1200) && defined(CONFIG_DMA_COHERENT)
- /* Au1200 AB USB does not support coherent memory */
- if (!(read_c0_prid() & 0xff)) {
- printk(KERN_INFO "%s: this is chip revision AB!\n", pdev->name);
- printk(KERN_INFO "%s: update your board or re-configure"
- " the kernel\n", pdev->name);
- return -ENODEV;
- }
-#endif
-
if (pdev->resource[1].flags != IORESOURCE_IRQ) {
pr_debug("resource[1] is not IORESOURCE_IRQ");
return -ENOMEM;
@@ -171,7 +109,11 @@ static int ehci_hcd_au1xxx_drv_probe(struct platform_device *pdev)
goto err2;
}
- au1xxx_start_ehc();
+ if (alchemy_usb_control(ALCHEMY_USB_EHCI0, 1)) {
+ printk(KERN_INFO "%s: controller init failed!\n", pdev->name);
+ ret = -ENODEV;
+ goto err3;
+ }
ehci = hcd_to_ehci(hcd);
ehci->caps = hcd->regs;
@@ -187,7 +129,8 @@ static int ehci_hcd_au1xxx_drv_probe(struct platform_device *pdev)
return ret;
}
- au1xxx_stop_ehc();
+ alchemy_usb_control(ALCHEMY_USB_EHCI0, 0);
+err3:
iounmap(hcd->regs);
err2:
release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
@@ -201,10 +144,10 @@ static int ehci_hcd_au1xxx_drv_remove(struct platform_device *pdev)
struct usb_hcd *hcd = platform_get_drvdata(pdev);
usb_remove_hcd(hcd);
+ alchemy_usb_control(ALCHEMY_USB_EHCI0, 0);
iounmap(hcd->regs);
release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
usb_put_hcd(hcd);
- au1xxx_stop_ehc();
platform_set_drvdata(pdev, NULL);
return 0;
@@ -236,7 +179,7 @@ static int ehci_hcd_au1xxx_drv_suspend(struct device *dev)
// could save FLADJ in case of Vaux power loss
// ... we'd only use it to handle clock skew
- au1xxx_stop_ehc();
+ alchemy_usb_control(ALCHEMY_USB_EHCI0, 0);
return rc;
}
@@ -246,7 +189,7 @@ static int ehci_hcd_au1xxx_drv_resume(struct device *dev)
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
- au1xxx_start_ehc();
+ alchemy_usb_control(ALCHEMY_USB_EHCI0, 1);
// maybe restore FLADJ
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 59e81615e09..3ff9f82f726 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -1224,7 +1224,7 @@ MODULE_LICENSE ("GPL");
#define PLATFORM_DRIVER ehci_hcd_sh_driver
#endif
-#ifdef CONFIG_SOC_AU1200
+#ifdef CONFIG_MIPS_ALCHEMY
#include "ehci-au1xxx.c"
#define PLATFORM_DRIVER ehci_hcd_au1xxx_driver
#endif
diff --git a/drivers/usb/host/fsl-mph-dr-of.c b/drivers/usb/host/fsl-mph-dr-of.c
index 79a66d622f9..9037035ad1e 100644
--- a/drivers/usb/host/fsl-mph-dr-of.c
+++ b/drivers/usb/host/fsl-mph-dr-of.c
@@ -16,6 +16,7 @@
#include <linux/io.h>
#include <linux/of_platform.h>
#include <linux/clk.h>
+#include <linux/module.h>
struct fsl_usb2_dev_data {
char *dr_mode; /* controller mode */
diff --git a/drivers/usb/host/isp1760-if.c b/drivers/usb/host/isp1760-if.c
index 2c7fc830c9e..a7dc1e1d45f 100644
--- a/drivers/usb/host/isp1760-if.c
+++ b/drivers/usb/host/isp1760-if.c
@@ -11,6 +11,7 @@
#include <linux/usb.h>
#include <linux/io.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/usb/isp1760.h>
#include <linux/usb/hcd.h>
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
index 944291e10f9..ba3a46b78b7 100644
--- a/drivers/usb/host/ohci-at91.c
+++ b/drivers/usb/host/ohci-at91.c
@@ -35,8 +35,7 @@ extern int usb_disabled(void);
static void at91_start_clock(void)
{
- if (cpu_is_at91sam9261() || cpu_is_at91sam9g10())
- clk_enable(hclk);
+ clk_enable(hclk);
clk_enable(iclk);
clk_enable(fclk);
clocked = 1;
@@ -46,8 +45,7 @@ static void at91_stop_clock(void)
{
clk_disable(fclk);
clk_disable(iclk);
- if (cpu_is_at91sam9261() || cpu_is_at91sam9g10())
- clk_disable(hclk);
+ clk_disable(hclk);
clocked = 0;
}
@@ -142,8 +140,7 @@ static int usb_hcd_at91_probe(const struct hc_driver *driver,
iclk = clk_get(&pdev->dev, "ohci_clk");
fclk = clk_get(&pdev->dev, "uhpck");
- if (cpu_is_at91sam9261() || cpu_is_at91sam9g10())
- hclk = clk_get(&pdev->dev, "hck0");
+ hclk = clk_get(&pdev->dev, "hclk");
at91_start_hc(pdev);
ohci_hcd_init(hcd_to_ohci(hcd));
@@ -155,8 +152,7 @@ static int usb_hcd_at91_probe(const struct hc_driver *driver,
/* Error handling */
at91_stop_hc(pdev);
- if (cpu_is_at91sam9261() || cpu_is_at91sam9g10())
- clk_put(hclk);
+ clk_put(hclk);
clk_put(fclk);
clk_put(iclk);
@@ -192,8 +188,7 @@ static void usb_hcd_at91_remove(struct usb_hcd *hcd,
release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
usb_put_hcd(hcd);
- if (cpu_is_at91sam9261() || cpu_is_at91sam9g10())
- clk_put(hclk);
+ clk_put(hclk);
clk_put(fclk);
clk_put(iclk);
fclk = iclk = hclk = NULL;
@@ -223,6 +218,156 @@ ohci_at91_start (struct usb_hcd *hcd)
return 0;
}
+static void ohci_at91_usb_set_power(struct at91_usbh_data *pdata, int port, int enable)
+{
+ if (port < 0 || port >= 2)
+ return;
+
+ gpio_set_value(pdata->vbus_pin[port], !pdata->vbus_pin_inverted ^ enable);
+}
+
+static int ohci_at91_usb_get_power(struct at91_usbh_data *pdata, int port)
+{
+ if (port < 0 || port >= 2)
+ return -EINVAL;
+
+ return gpio_get_value(pdata->vbus_pin[port]) ^ !pdata->vbus_pin_inverted;
+}
+
+/*
+ * Update the status data from the hub with the over-current indicator change.
+ */
+static int ohci_at91_hub_status_data(struct usb_hcd *hcd, char *buf)
+{
+ struct at91_usbh_data *pdata = hcd->self.controller->platform_data;
+ int length = ohci_hub_status_data(hcd, buf);
+ int port;
+
+ for (port = 0; port < ARRAY_SIZE(pdata->overcurrent_pin); port++) {
+ if (pdata->overcurrent_changed[port]) {
+ if (! length)
+ length = 1;
+ buf[0] |= 1 << (port + 1);
+ }
+ }
+
+ return length;
+}
+
+/*
+ * Look at the control requests to the root hub and see if we need to override.
+ */
+static int ohci_at91_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+ u16 wIndex, char *buf, u16 wLength)
+{
+ struct at91_usbh_data *pdata = hcd->self.controller->platform_data;
+ struct usb_hub_descriptor *desc;
+ int ret = -EINVAL;
+ u32 *data = (u32 *)buf;
+
+ dev_dbg(hcd->self.controller,
+ "ohci_at91_hub_control(%p,0x%04x,0x%04x,0x%04x,%p,%04x)\n",
+ hcd, typeReq, wValue, wIndex, buf, wLength);
+
+ switch (typeReq) {
+ case SetPortFeature:
+ if (wValue == USB_PORT_FEAT_POWER) {
+ dev_dbg(hcd->self.controller, "SetPortFeat: POWER\n");
+ ohci_at91_usb_set_power(pdata, wIndex - 1, 1);
+ goto out;
+ }
+ break;
+
+ case ClearPortFeature:
+ switch (wValue) {
+ case USB_PORT_FEAT_C_OVER_CURRENT:
+ dev_dbg(hcd->self.controller,
+ "ClearPortFeature: C_OVER_CURRENT\n");
+
+ if (wIndex == 1 || wIndex == 2) {
+ pdata->overcurrent_changed[wIndex-1] = 0;
+ pdata->overcurrent_status[wIndex-1] = 0;
+ }
+
+ goto out;
+
+ case USB_PORT_FEAT_OVER_CURRENT:
+ dev_dbg(hcd->self.controller,
+ "ClearPortFeature: OVER_CURRENT\n");
+
+ if (wIndex == 1 || wIndex == 2) {
+ pdata->overcurrent_status[wIndex-1] = 0;
+ }
+
+ goto out;
+
+ case USB_PORT_FEAT_POWER:
+ dev_dbg(hcd->self.controller,
+ "ClearPortFeature: POWER\n");
+
+ if (wIndex == 1 || wIndex == 2) {
+ ohci_at91_usb_set_power(pdata, wIndex - 1, 0);
+ return 0;
+ }
+ }
+ break;
+ }
+
+ ret = ohci_hub_control(hcd, typeReq, wValue, wIndex, buf, wLength);
+ if (ret)
+ goto out;
+
+ switch (typeReq) {
+ case GetHubDescriptor:
+
+ /* update the hub's descriptor */
+
+ desc = (struct usb_hub_descriptor *)buf;
+
+ dev_dbg(hcd->self.controller, "wHubCharacteristics 0x%04x\n",
+ desc->wHubCharacteristics);
+
+ /* remove the old configurations for power-switching, and
+ * over-current protection, and insert our new configuration
+ */
+
+ desc->wHubCharacteristics &= ~cpu_to_le16(HUB_CHAR_LPSM);
+ desc->wHubCharacteristics |= cpu_to_le16(0x0001);
+
+ if (pdata->overcurrent_supported) {
+ desc->wHubCharacteristics &= ~cpu_to_le16(HUB_CHAR_OCPM);
+ desc->wHubCharacteristics |= cpu_to_le16(0x0008|0x0001);
+ }
+
+ dev_dbg(hcd->self.controller, "wHubCharacteristics after 0x%04x\n",
+ desc->wHubCharacteristics);
+
+ return ret;
+
+ case GetPortStatus:
+ /* check port status */
+
+ dev_dbg(hcd->self.controller, "GetPortStatus(%d)\n", wIndex);
+
+ if (wIndex == 1 || wIndex == 2) {
+ if (! ohci_at91_usb_get_power(pdata, wIndex-1)) {
+ *data &= ~cpu_to_le32(RH_PS_PPS);
+ }
+
+ if (pdata->overcurrent_changed[wIndex-1]) {
+ *data |= cpu_to_le32(RH_PS_OCIC);
+ }
+
+ if (pdata->overcurrent_status[wIndex-1]) {
+ *data |= cpu_to_le32(RH_PS_POCI);
+ }
+ }
+ }
+
+ out:
+ return ret;
+}
+
/*-------------------------------------------------------------------------*/
static const struct hc_driver ohci_at91_hc_driver = {
@@ -258,8 +403,8 @@ static const struct hc_driver ohci_at91_hc_driver = {
/*
* root hub support
*/
- .hub_status_data = ohci_hub_status_data,
- .hub_control = ohci_hub_control,
+ .hub_status_data = ohci_at91_hub_status_data,
+ .hub_control = ohci_at91_hub_control,
#ifdef CONFIG_PM
.bus_suspend = ohci_bus_suspend,
.bus_resume = ohci_bus_resume,
@@ -269,22 +414,71 @@ static const struct hc_driver ohci_at91_hc_driver = {
/*-------------------------------------------------------------------------*/
+static irqreturn_t ohci_hcd_at91_overcurrent_irq(int irq, void *data)
+{
+ struct platform_device *pdev = data;
+ struct at91_usbh_data *pdata = pdev->dev.platform_data;
+ int val, gpio, port;
+
+ /* From the GPIO notifying the over-current situation, find
+ * out the corresponding port */
+ gpio = irq_to_gpio(irq);
+ for (port = 0; port < ARRAY_SIZE(pdata->overcurrent_pin); port++) {
+ if (pdata->overcurrent_pin[port] == gpio)
+ break;
+ }
+
+ if (port == ARRAY_SIZE(pdata->overcurrent_pin)) {
+ dev_err(& pdev->dev, "overcurrent interrupt from unknown GPIO\n");
+ return IRQ_HANDLED;
+ }
+
+ val = gpio_get_value(gpio);
+
+ /* When notified of an over-current situation, disable power
+ on the corresponding port, and mark this port in
+ over-current. */
+ if (! val) {
+ ohci_at91_usb_set_power(pdata, port, 0);
+ pdata->overcurrent_status[port] = 1;
+ pdata->overcurrent_changed[port] = 1;
+ }
+
+ dev_dbg(& pdev->dev, "overcurrent situation %s\n",
+ val ? "exited" : "notified");
+
+ return IRQ_HANDLED;
+}
+
+/*-------------------------------------------------------------------------*/
+
static int ohci_hcd_at91_drv_probe(struct platform_device *pdev)
{
struct at91_usbh_data *pdata = pdev->dev.platform_data;
int i;
if (pdata) {
- /* REVISIT make the driver support per-port power switching,
- * and also overcurrent detection. Here we assume the ports
- * are always powered while this driver is active, and use
- * active-low power switches.
- */
for (i = 0; i < ARRAY_SIZE(pdata->vbus_pin); i++) {
if (pdata->vbus_pin[i] <= 0)
continue;
gpio_request(pdata->vbus_pin[i], "ohci_vbus");
- gpio_direction_output(pdata->vbus_pin[i], 0);
+ ohci_at91_usb_set_power(pdata, i, 1);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(pdata->overcurrent_pin); i++) {
+ int ret;
+
+ if (pdata->overcurrent_pin[i] <= 0)
+ continue;
+ gpio_request(pdata->overcurrent_pin[i], "ohci_overcurrent");
+
+ ret = request_irq(gpio_to_irq(pdata->overcurrent_pin[i]),
+ ohci_hcd_at91_overcurrent_irq,
+ IRQF_SHARED, "ohci_overcurrent", pdev);
+ if (ret) {
+ gpio_free(pdata->overcurrent_pin[i]);
+ dev_warn(& pdev->dev, "cannot get GPIO IRQ for overcurrent\n");
+ }
}
}
@@ -301,9 +495,16 @@ static int ohci_hcd_at91_drv_remove(struct platform_device *pdev)
for (i = 0; i < ARRAY_SIZE(pdata->vbus_pin); i++) {
if (pdata->vbus_pin[i] <= 0)
continue;
- gpio_direction_output(pdata->vbus_pin[i], 1);
+ ohci_at91_usb_set_power(pdata, i, 0);
gpio_free(pdata->vbus_pin[i]);
}
+
+ for (i = 0; i < ARRAY_SIZE(pdata->overcurrent_pin); i++) {
+ if (pdata->overcurrent_pin[i] <= 0)
+ continue;
+ free_irq(gpio_to_irq(pdata->overcurrent_pin[i]), pdev);
+ gpio_free(pdata->overcurrent_pin[i]);
+ }
}
device_init_wakeup(&pdev->dev, 0);
diff --git a/drivers/usb/host/ohci-au1xxx.c b/drivers/usb/host/ohci-au1xxx.c
index 6b7bc50dfea..9b66df8278f 100644
--- a/drivers/usb/host/ohci-au1xxx.c
+++ b/drivers/usb/host/ohci-au1xxx.c
@@ -23,92 +23,9 @@
#include <asm/mach-au1x00/au1000.h>
-#ifndef CONFIG_SOC_AU1200
-
-#define USBH_ENABLE_BE (1<<0)
-#define USBH_ENABLE_C (1<<1)
-#define USBH_ENABLE_E (1<<2)
-#define USBH_ENABLE_CE (1<<3)
-#define USBH_ENABLE_RD (1<<4)
-
-#ifdef __LITTLE_ENDIAN
-#define USBH_ENABLE_INIT (USBH_ENABLE_CE | USBH_ENABLE_E | USBH_ENABLE_C)
-#elif defined(__BIG_ENDIAN)
-#define USBH_ENABLE_INIT (USBH_ENABLE_CE | USBH_ENABLE_E | USBH_ENABLE_C | \
- USBH_ENABLE_BE)
-#else
-#error not byte order defined
-#endif
-
-#else /* Au1200 */
-
-#define USB_HOST_CONFIG (USB_MSR_BASE + USB_MSR_MCFG)
-#define USB_MCFG_PFEN (1<<31)
-#define USB_MCFG_RDCOMB (1<<30)
-#define USB_MCFG_SSDEN (1<<23)
-#define USB_MCFG_OHCCLKEN (1<<16)
-#ifdef CONFIG_DMA_COHERENT
-#define USB_MCFG_UCAM (1<<7)
-#else
-#define USB_MCFG_UCAM (0)
-#endif
-#define USB_MCFG_OBMEN (1<<1)
-#define USB_MCFG_OMEMEN (1<<0)
-
-#define USBH_ENABLE_CE USB_MCFG_OHCCLKEN
-
-#define USBH_ENABLE_INIT (USB_MCFG_PFEN | USB_MCFG_RDCOMB | \
- USBH_ENABLE_CE | USB_MCFG_SSDEN | \
- USB_MCFG_UCAM | \
- USB_MCFG_OBMEN | USB_MCFG_OMEMEN)
-
-#define USBH_DISABLE (USB_MCFG_OBMEN | USB_MCFG_OMEMEN)
-
-#endif /* Au1200 */
extern int usb_disabled(void);
-static void au1xxx_start_ohc(void)
-{
- /* enable host controller */
-#ifndef CONFIG_SOC_AU1200
- au_writel(USBH_ENABLE_CE, USB_HOST_CONFIG);
- au_sync();
- udelay(1000);
-
- au_writel(au_readl(USB_HOST_CONFIG) | USBH_ENABLE_INIT, USB_HOST_CONFIG);
- au_sync();
- udelay(1000);
-
- /* wait for reset complete (read register twice; see au1500 errata) */
- while (au_readl(USB_HOST_CONFIG),
- !(au_readl(USB_HOST_CONFIG) & USBH_ENABLE_RD))
- udelay(1000);
-
-#else /* Au1200 */
- au_writel(au_readl(USB_HOST_CONFIG) | USBH_ENABLE_CE, USB_HOST_CONFIG);
- au_sync();
- udelay(1000);
-
- au_writel(au_readl(USB_HOST_CONFIG) | USBH_ENABLE_INIT, USB_HOST_CONFIG);
- au_sync();
- udelay(2000);
-#endif /* Au1200 */
-}
-
-static void au1xxx_stop_ohc(void)
-{
-#ifdef CONFIG_SOC_AU1200
- /* Disable mem */
- au_writel(au_readl(USB_HOST_CONFIG) & ~USBH_DISABLE, USB_HOST_CONFIG);
- au_sync();
- udelay(1000);
-#endif
- /* Disable clock */
- au_writel(au_readl(USB_HOST_CONFIG) & ~USBH_ENABLE_CE, USB_HOST_CONFIG);
- au_sync();
-}
-
static int __devinit ohci_au1xxx_start(struct usb_hcd *hcd)
{
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
@@ -178,17 +95,6 @@ static int ohci_hcd_au1xxx_drv_probe(struct platform_device *pdev)
if (usb_disabled())
return -ENODEV;
-#if defined(CONFIG_SOC_AU1200) && defined(CONFIG_DMA_COHERENT)
- /* Au1200 AB USB does not support coherent memory */
- if (!(read_c0_prid() & 0xff)) {
- printk(KERN_INFO "%s: this is chip revision AB !!\n",
- pdev->name);
- printk(KERN_INFO "%s: update your board or re-configure "
- "the kernel\n", pdev->name);
- return -ENODEV;
- }
-#endif
-
if (pdev->resource[1].flags != IORESOURCE_IRQ) {
pr_debug("resource[1] is not IORESOURCE_IRQ\n");
return -ENOMEM;
@@ -214,7 +120,12 @@ static int ohci_hcd_au1xxx_drv_probe(struct platform_device *pdev)
goto err2;
}
- au1xxx_start_ohc();
+ if (alchemy_usb_control(ALCHEMY_USB_OHCI0, 1)) {
+ printk(KERN_INFO "%s: controller init failed!\n", pdev->name);
+ ret = -ENODEV;
+ goto err3;
+ }
+
ohci_hcd_init(hcd_to_ohci(hcd));
ret = usb_add_hcd(hcd, pdev->resource[1].start,
@@ -224,7 +135,8 @@ static int ohci_hcd_au1xxx_drv_probe(struct platform_device *pdev)
return ret;
}
- au1xxx_stop_ohc();
+ alchemy_usb_control(ALCHEMY_USB_OHCI0, 0);
+err3:
iounmap(hcd->regs);
err2:
release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
@@ -238,7 +150,7 @@ static int ohci_hcd_au1xxx_drv_remove(struct platform_device *pdev)
struct usb_hcd *hcd = platform_get_drvdata(pdev);
usb_remove_hcd(hcd);
- au1xxx_stop_ohc();
+ alchemy_usb_control(ALCHEMY_USB_OHCI0, 0);
iounmap(hcd->regs);
release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
usb_put_hcd(hcd);
@@ -275,7 +187,7 @@ static int ohci_hcd_au1xxx_drv_suspend(struct device *dev)
clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
- au1xxx_stop_ohc();
+ alchemy_usb_control(ALCHEMY_USB_OHCI0, 0);
bail:
spin_unlock_irqrestore(&ohci->lock, flags);
@@ -286,7 +198,7 @@ static int ohci_hcd_au1xxx_drv_resume(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
- au1xxx_start_ohc();
+ alchemy_usb_control(ALCHEMY_USB_OHCI0, 1);
set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
ohci_finish_controller_resume(hcd);
diff --git a/drivers/usb/host/ohci-pnx4008.c b/drivers/usb/host/ohci-pnx4008.c
index 9ad8bee22c1..0013db7bdf9 100644
--- a/drivers/usb/host/ohci-pnx4008.c
+++ b/drivers/usb/host/ohci-pnx4008.c
@@ -26,7 +26,7 @@
#include <mach/platform.h>
#include <mach/irqs.h>
-#include <mach/gpio.h>
+#include <asm/gpio.h>
#define USB_CTRL IO_ADDRESS(PNX4008_PWRMAN_BASE + 0x64)
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index 629a96813fd..27a3dec32fa 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -13,6 +13,7 @@
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/delay.h>
+#include <linux/export.h>
#include <linux/acpi.h>
#include <linux/dmi.h>
#include "pci-quirks.h"
diff --git a/drivers/usb/host/whci/debug.c b/drivers/usb/host/whci/debug.c
index 767af265e00..ba61dae9e4d 100644
--- a/drivers/usb/host/whci/debug.c
+++ b/drivers/usb/host/whci/debug.c
@@ -19,6 +19,7 @@
#include <linux/kernel.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
+#include <linux/export.h>
#include "../../wusbcore/wusbhc.h"
diff --git a/drivers/usb/host/whci/hcd.c b/drivers/usb/host/whci/hcd.c
index 9546f6cd01f..1e141f755b2 100644
--- a/drivers/usb/host/whci/hcd.c
+++ b/drivers/usb/host/whci/hcd.c
@@ -17,6 +17,7 @@
*/
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/module.h>
#include <linux/uwb/umc.h>
#include "../../wusbcore/wusbhc.h"
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 431efe72b1f..430e88fd3f6 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -20,6 +20,7 @@
* Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#include <linux/gfp.h>
#include <asm/unaligned.h>
#include "xhci.h"
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 9f51f88cc0f..ef98b38626f 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -22,6 +22,7 @@
#include <linux/pci.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include "xhci.h"
diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c
index a04b2ff9dd8..91cd85076a4 100644
--- a/drivers/usb/mon/mon_bin.c
+++ b/drivers/usb/mon/mon_bin.c
@@ -11,6 +11,7 @@
#include <linux/types.h>
#include <linux/fs.h>
#include <linux/cdev.h>
+#include <linux/export.h>
#include <linux/usb.h>
#include <linux/poll.h>
#include <linux/compat.h>
diff --git a/drivers/usb/mon/mon_stat.c b/drivers/usb/mon/mon_stat.c
index e5ce42bd316..ebd6189a501 100644
--- a/drivers/usb/mon/mon_stat.c
+++ b/drivers/usb/mon/mon_stat.c
@@ -9,6 +9,7 @@
#include <linux/kernel.h>
#include <linux/slab.h>
+#include <linux/export.h>
#include <linux/usb.h>
#include <linux/fs.h>
#include <asm/uaccess.h>
diff --git a/drivers/usb/mon/mon_text.c b/drivers/usb/mon/mon_text.c
index 1c3afcc11bd..ad408251d95 100644
--- a/drivers/usb/mon/mon_text.c
+++ b/drivers/usb/mon/mon_text.c
@@ -9,6 +9,7 @@
#include <linux/usb.h>
#include <linux/slab.h>
#include <linux/time.h>
+#include <linux/export.h>
#include <linux/mutex.h>
#include <linux/debugfs.h>
#include <linux/scatterlist.h>
diff --git a/drivers/usb/musb/davinci.c b/drivers/usb/musb/davinci.c
index 8bdf25a8b02..f9a3f62a83b 100644
--- a/drivers/usb/musb/davinci.c
+++ b/drivers/usb/musb/davinci.c
@@ -35,7 +35,7 @@
#include <mach/hardware.h>
#include <mach/memory.h>
-#include <mach/gpio.h>
+#include <asm/gpio.h>
#include <mach/cputype.h>
#include <asm/mach-types.h>
diff --git a/drivers/usb/musb/musb_debugfs.c b/drivers/usb/musb/musb_debugfs.c
index b0176e4569e..61f4ee466df 100644
--- a/drivers/usb/musb/musb_debugfs.c
+++ b/drivers/usb/musb/musb_debugfs.c
@@ -41,12 +41,6 @@
#include <linux/debugfs.h>
#include <linux/seq_file.h>
-#ifdef CONFIG_ARM
-#include <mach/hardware.h>
-#include <mach/memory.h>
-#include <asm/mach-types.h>
-#endif
-
#include <asm/uaccess.h>
#include "musb_core.h"
diff --git a/drivers/usb/otg/gpio_vbus.c b/drivers/usb/otg/gpio_vbus.c
index 52733d9959b..fb644c107de 100644
--- a/drivers/usb/otg/gpio_vbus.c
+++ b/drivers/usb/otg/gpio_vbus.c
@@ -11,6 +11,7 @@
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/gpio.h>
+#include <linux/module.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/usb.h>
diff --git a/drivers/usb/otg/otg.c b/drivers/usb/otg/otg.c
index fb7adeff9ff..307c27bc51e 100644
--- a/drivers/usb/otg/otg.c
+++ b/drivers/usb/otg/otg.c
@@ -10,6 +10,7 @@
*/
#include <linux/kernel.h>
+#include <linux/export.h>
#include <linux/device.h>
#include <linux/usb/otg.h>
diff --git a/drivers/usb/otg/ulpi.c b/drivers/usb/otg/ulpi.c
index 770d799d5af..0b0466728fd 100644
--- a/drivers/usb/otg/ulpi.c
+++ b/drivers/usb/otg/ulpi.c
@@ -25,6 +25,7 @@
#include <linux/kernel.h>
#include <linux/slab.h>
+#include <linux/export.h>
#include <linux/usb.h>
#include <linux/usb/otg.h>
#include <linux/usb/ulpi.h>
diff --git a/drivers/usb/serial/aircable.c b/drivers/usb/serial/aircable.c
index aba201cb872..b43d07df4c4 100644
--- a/drivers/usb/serial/aircable.c
+++ b/drivers/usb/serial/aircable.c
@@ -47,6 +47,7 @@
#include <asm/unaligned.h>
#include <linux/tty.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include <linux/tty_flip.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index b9bb24729c9..aa9367f5b42 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -13,6 +13,7 @@
#include <linux/tty.h>
#include <linux/tty_flip.h>
+#include <linux/module.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
#include <linux/slab.h>
diff --git a/drivers/usb/storage/option_ms.c b/drivers/usb/storage/option_ms.c
index 89460181d12..e0f76bb0591 100644
--- a/drivers/usb/storage/option_ms.c
+++ b/drivers/usb/storage/option_ms.c
@@ -22,6 +22,7 @@
#include <linux/usb.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include "usb.h"
#include "transport.h"
diff --git a/drivers/usb/storage/protocol.c b/drivers/usb/storage/protocol.c
index fc310f75ead..93c1a4d86f5 100644
--- a/drivers/usb/storage/protocol.c
+++ b/drivers/usb/storage/protocol.c
@@ -43,6 +43,7 @@
*/
#include <linux/highmem.h>
+#include <linux/export.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
diff --git a/drivers/usb/storage/sierra_ms.c b/drivers/usb/storage/sierra_ms.c
index 1deca07c826..37539c89e3b 100644
--- a/drivers/usb/storage/sierra_ms.c
+++ b/drivers/usb/storage/sierra_ms.c
@@ -3,6 +3,7 @@
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <linux/usb.h>
+#include <linux/module.h>
#include <linux/slab.h>
#include "usb.h"
diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
index ff32390d61e..0e5c91c6187 100644
--- a/drivers/usb/storage/transport.c
+++ b/drivers/usb/storage/transport.c
@@ -46,6 +46,7 @@
#include <linux/sched.h>
#include <linux/gfp.h>
#include <linux/errno.h>
+#include <linux/export.h>
#include <linux/usb/quirks.h>
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index 23f0dd9c36d..1d10d5b8204 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -11,6 +11,7 @@
#include <linux/blkdev.h>
#include <linux/slab.h>
#include <linux/types.h>
+#include <linux/module.h>
#include <linux/usb.h>
#include <linux/usb/storage.h>
diff --git a/drivers/usb/wusbcore/devconnect.c b/drivers/usb/wusbcore/devconnect.c
index 7ec24e46b34..231009af65a 100644
--- a/drivers/usb/wusbcore/devconnect.c
+++ b/drivers/usb/wusbcore/devconnect.c
@@ -90,6 +90,7 @@
#include <linux/ctype.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
+#include <linux/export.h>
#include "wusbhc.h"
static void wusbhc_devconnect_acked_work(struct work_struct *work);
diff --git a/drivers/usb/wusbcore/mmc.c b/drivers/usb/wusbcore/mmc.c
index 0a57ff0a0b0..b8c72583c04 100644
--- a/drivers/usb/wusbcore/mmc.c
+++ b/drivers/usb/wusbcore/mmc.c
@@ -38,6 +38,7 @@
*/
#include <linux/usb/wusb.h>
#include <linux/slab.h>
+#include <linux/export.h>
#include "wusbhc.h"
/* Initialize the MMCIEs handling mechanism */
diff --git a/drivers/usb/wusbcore/rh.c b/drivers/usb/wusbcore/rh.c
index 39de3900ad2..59ff254dfb6 100644
--- a/drivers/usb/wusbcore/rh.c
+++ b/drivers/usb/wusbcore/rh.c
@@ -70,6 +70,7 @@
* wusbhc_rh_start_port_reset() ??? unimplemented
*/
#include <linux/slab.h>
+#include <linux/export.h>
#include "wusbhc.h"
/*
diff --git a/drivers/usb/wusbcore/security.c b/drivers/usb/wusbcore/security.c
index b60799b811c..371f61733f0 100644
--- a/drivers/usb/wusbcore/security.c
+++ b/drivers/usb/wusbcore/security.c
@@ -26,6 +26,7 @@
#include <linux/slab.h>
#include <linux/usb/ch9.h>
#include <linux/random.h>
+#include <linux/export.h>
#include "wusbhc.h"
static void wusbhc_set_gtk_callback(struct urb *urb);
diff --git a/drivers/usb/wusbcore/wa-hc.c b/drivers/usb/wusbcore/wa-hc.c
index 0d1863c9edd..9e4a9246168 100644
--- a/drivers/usb/wusbcore/wa-hc.c
+++ b/drivers/usb/wusbcore/wa-hc.c
@@ -23,6 +23,7 @@
* FIXME: docs
*/
#include <linux/slab.h>
+#include <linux/module.h>
#include "wusbhc.h"
#include "wa-hc.h"
diff --git a/drivers/usb/wusbcore/wa-rpipe.c b/drivers/usb/wusbcore/wa-rpipe.c
index 2acc7f504c5..f0d546c5a08 100644
--- a/drivers/usb/wusbcore/wa-rpipe.c
+++ b/drivers/usb/wusbcore/wa-rpipe.c
@@ -61,6 +61,7 @@
#include <linux/atomic.h>
#include <linux/bitmap.h>
#include <linux/slab.h>
+#include <linux/export.h>
#include "wusbhc.h"
#include "wa-hc.h"
diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
index 419334568be..57c01ab09ad 100644
--- a/drivers/usb/wusbcore/wa-xfer.c
+++ b/drivers/usb/wusbcore/wa-xfer.c
@@ -84,6 +84,7 @@
#include <linux/slab.h>
#include <linux/hash.h>
#include <linux/ratelimit.h>
+#include <linux/export.h>
#include "wa-hc.h"
#include "wusbhc.h"
diff --git a/drivers/uwb/est.c b/drivers/uwb/est.c
index a2eaa3c33b0..de81ebf5178 100644
--- a/drivers/uwb/est.c
+++ b/drivers/uwb/est.c
@@ -41,6 +41,7 @@
*/
#include <linux/spinlock.h>
#include <linux/slab.h>
+#include <linux/export.h>
#include "uwb-internal.h"
diff --git a/drivers/uwb/i1480/dfu/dfu.c b/drivers/uwb/i1480/dfu/dfu.c
index da7b1d08003..b08d1c2ee3f 100644
--- a/drivers/uwb/i1480/dfu/dfu.c
+++ b/drivers/uwb/i1480/dfu/dfu.c
@@ -33,6 +33,7 @@
#include <linux/device.h>
#include <linux/uwb.h>
#include <linux/random.h>
+#include <linux/export.h>
/*
* i1480_rceb_check - Check RCEB for expected field values
diff --git a/drivers/uwb/ie.c b/drivers/uwb/ie.c
index 30acec74042..902b0f2f961 100644
--- a/drivers/uwb/ie.c
+++ b/drivers/uwb/ie.c
@@ -25,6 +25,7 @@
*/
#include <linux/slab.h>
+#include <linux/export.h>
#include "uwb-internal.h"
/**
diff --git a/drivers/uwb/lc-dev.c b/drivers/uwb/lc-dev.c
index 90113bafefc..5241f1d0ef7 100644
--- a/drivers/uwb/lc-dev.c
+++ b/drivers/uwb/lc-dev.c
@@ -25,9 +25,11 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/device.h>
+#include <linux/export.h>
#include <linux/err.h>
#include <linux/kdev_t.h>
#include <linux/random.h>
+#include <linux/stat.h>
#include "uwb-internal.h"
/* We initialize addresses to 0xff (invalid, as it is bcast) */
diff --git a/drivers/uwb/lc-rc.c b/drivers/uwb/lc-rc.c
index b4395f41a00..4d688c75080 100644
--- a/drivers/uwb/lc-rc.c
+++ b/drivers/uwb/lc-rc.c
@@ -36,6 +36,7 @@
#include <linux/etherdevice.h>
#include <linux/usb.h>
#include <linux/slab.h>
+#include <linux/export.h>
#include "uwb-internal.h"
diff --git a/drivers/uwb/neh.c b/drivers/uwb/neh.c
index 697e56a5bcd..a269937be1b 100644
--- a/drivers/uwb/neh.c
+++ b/drivers/uwb/neh.c
@@ -85,6 +85,7 @@
#include <linux/timer.h>
#include <linux/slab.h>
#include <linux/err.h>
+#include <linux/export.h>
#include "uwb-internal.h"
diff --git a/drivers/uwb/pal.c b/drivers/uwb/pal.c
index 99a19c19909..8ee7d90a8c6 100644
--- a/drivers/uwb/pal.c
+++ b/drivers/uwb/pal.c
@@ -18,6 +18,7 @@
#include <linux/kernel.h>
#include <linux/debugfs.h>
#include <linux/uwb.h>
+#include <linux/export.h>
#include "uwb-internal.h"
diff --git a/drivers/uwb/radio.c b/drivers/uwb/radio.c
index f0d55495f5e..d58dfecf9a7 100644
--- a/drivers/uwb/radio.c
+++ b/drivers/uwb/radio.c
@@ -17,6 +17,7 @@
*/
#include <linux/kernel.h>
#include <linux/uwb.h>
+#include <linux/export.h>
#include "uwb-internal.h"
diff --git a/drivers/uwb/reset.c b/drivers/uwb/reset.c
index 3de630b0f69..703228559e8 100644
--- a/drivers/uwb/reset.c
+++ b/drivers/uwb/reset.c
@@ -32,6 +32,7 @@
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/delay.h>
+#include <linux/export.h>
#include "uwb-internal.h"
diff --git a/drivers/uwb/rsv.c b/drivers/uwb/rsv.c
index 78c892233cf..0b0d8bce842 100644
--- a/drivers/uwb/rsv.c
+++ b/drivers/uwb/rsv.c
@@ -19,6 +19,7 @@
#include <linux/uwb.h>
#include <linux/slab.h>
#include <linux/random.h>
+#include <linux/export.h>
#include "uwb-internal.h"
diff --git a/drivers/uwb/scan.c b/drivers/uwb/scan.c
index 367aa12786b..cbb6a5e703d 100644
--- a/drivers/uwb/scan.c
+++ b/drivers/uwb/scan.c
@@ -36,6 +36,7 @@
#include <linux/device.h>
#include <linux/err.h>
#include <linux/slab.h>
+#include <linux/stat.h>
#include "uwb-internal.h"
diff --git a/drivers/uwb/umc-bus.c b/drivers/uwb/umc-bus.c
index 5fad4e791b3..82a84d53120 100644
--- a/drivers/uwb/umc-bus.c
+++ b/drivers/uwb/umc-bus.c
@@ -8,6 +8,7 @@
#include <linux/kernel.h>
#include <linux/sysfs.h>
#include <linux/workqueue.h>
+#include <linux/module.h>
#include <linux/uwb/umc.h>
#include <linux/pci.h>
diff --git a/drivers/uwb/umc-dev.c b/drivers/uwb/umc-dev.c
index b2948ec5787..4613c13cd85 100644
--- a/drivers/uwb/umc-dev.c
+++ b/drivers/uwb/umc-dev.c
@@ -6,6 +6,7 @@
* This file is released under the GNU GPL v2.
*/
#include <linux/kernel.h>
+#include <linux/export.h>
#include <linux/slab.h>
#include <linux/uwb/umc.h>
diff --git a/drivers/uwb/umc-drv.c b/drivers/uwb/umc-drv.c
index 367b5eb85d6..26d0ae1816b 100644
--- a/drivers/uwb/umc-drv.c
+++ b/drivers/uwb/umc-drv.c
@@ -6,6 +6,7 @@
* This file is released under the GNU GPL v2.
*/
#include <linux/kernel.h>
+#include <linux/export.h>
#include <linux/uwb/umc.h>
int __umc_driver_register(struct umc_driver *umc_drv, struct module *module,
diff --git a/drivers/uwb/whci.c b/drivers/uwb/whci.c
index b221142446a..f48093e649e 100644
--- a/drivers/uwb/whci.c
+++ b/drivers/uwb/whci.c
@@ -7,6 +7,7 @@
*/
#include <linux/delay.h>
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
diff --git a/drivers/video/68328fb.c b/drivers/video/68328fb.c
index 75a39eab70c..a425d65d5ba 100644
--- a/drivers/video/68328fb.c
+++ b/drivers/video/68328fb.c
@@ -378,8 +378,8 @@ static int mc68x328fb_pan_display(struct fb_var_screeninfo *var,
|| var->xoffset)
return -EINVAL;
} else {
- if (var->xoffset + var->xres > info->var.xres_virtual ||
- var->yoffset + var->yres > info->var.yres_virtual)
+ if (var->xoffset + info->var.xres > info->var.xres_virtual ||
+ var->yoffset + info->var.yres > info->var.yres_virtual)
return -EINVAL;
}
info->var.xoffset = var->xoffset;
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 549b960667c..d83e967e4e1 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -259,6 +259,15 @@ config FB_TILEBLITTING
comment "Frame buffer hardware drivers"
depends on FB
+config FB_GRVGA
+ tristate "Aeroflex Gaisler framebuffer support"
+ depends on FB && SPARC
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
+ ---help---
+ This enables support for the SVGACTRL framebuffer in the GRLIB IP library from Aeroflex Gaisler.
+
config FB_CIRRUS
tristate "Cirrus Logic support"
depends on FB && (ZORRO || PCI)
@@ -1744,7 +1753,7 @@ endchoice
config FB_AU1100
bool "Au1100 LCD Driver"
- depends on (FB = y) && MIPS && SOC_AU1100
+ depends on (FB = y) && MIPS_ALCHEMY
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
@@ -1755,10 +1764,11 @@ config FB_AU1100
config FB_AU1200
bool "Au1200 LCD Driver"
- depends on (FB = y) && MIPS && SOC_AU1200
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
+ depends on (FB = y) && MIPS_ALCHEMY
+ select FB_SYS_FILLRECT
+ select FB_SYS_COPYAREA
+ select FB_SYS_IMAGEBLIT
+ select FB_SYS_FOPS
help
This is the framebuffer driver for the AMD Au1200 SOC. It can drive
various panels and CRTs by passing in kernel cmd line option
@@ -2027,7 +2037,7 @@ config FB_TMIO_ACCELL
config FB_S3C
tristate "Samsung S3C framebuffer support"
- depends on FB && S3C_DEV_FB
+ depends on FB && (S3C_DEV_FB || S5P_DEV_FIMD0)
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
@@ -2110,6 +2120,22 @@ config FB_SM501
If unsure, say N.
+config FB_SMSCUFX
+ tristate "SMSC UFX6000/7000 USB Framebuffer support"
+ depends on FB && USB
+ select FB_MODE_HELPERS
+ select FB_SYS_FILLRECT
+ select FB_SYS_COPYAREA
+ select FB_SYS_IMAGEBLIT
+ select FB_SYS_FOPS
+ select FB_DEFERRED_IO
+ ---help---
+ This is a kernel framebuffer driver for SMSC UFX USB devices.
+ Supports fbdev clients like xf86-video-fbdev, kdrive, fbi, and
+ mplayer -vo fbdev. Supports both UFX6000 (USB 2.0) and UFX7000
+ (USB 3.0) devices.
+ To compile as a module, choose M here: the module name is smscufx.
+
config FB_UDL
tristate "Displaylink USB Framebuffer support"
depends on FB && USB
diff --git a/drivers/video/Makefile b/drivers/video/Makefile
index 8b83129e209..9b9d8fff773 100644
--- a/drivers/video/Makefile
+++ b/drivers/video/Makefile
@@ -33,6 +33,7 @@ obj-$(CONFIG_FB_AMIGA) += amifb.o c2p_planar.o
obj-$(CONFIG_FB_ARC) += arcfb.o
obj-$(CONFIG_FB_CLPS711X) += clps711xfb.o
obj-$(CONFIG_FB_CYBER2000) += cyber2000fb.o
+obj-$(CONFIG_FB_GRVGA) += grvga.o
obj-$(CONFIG_FB_PM2) += pm2fb.o
obj-$(CONFIG_FB_PM3) += pm3fb.o
@@ -127,6 +128,7 @@ obj-$(CONFIG_FB_IBM_GXT4500) += gxt4500.o
obj-$(CONFIG_FB_PS3) += ps3fb.o
obj-$(CONFIG_FB_SM501) += sm501fb.o
obj-$(CONFIG_FB_UDL) += udlfb.o
+obj-$(CONFIG_FB_SMSCUFX) += smscufx.o
obj-$(CONFIG_FB_XILINX) += xilinxfb.o
obj-$(CONFIG_SH_MIPI_DSI) += sh_mipi_dsi.o
obj-$(CONFIG_FB_SH_MOBILE_HDMI) += sh_mobile_hdmi.o
diff --git a/drivers/video/acornfb.c b/drivers/video/acornfb.c
index 6183a57eb69..b303f171506 100644
--- a/drivers/video/acornfb.c
+++ b/drivers/video/acornfb.c
@@ -850,9 +850,10 @@ acornfb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
u_int y_bottom = var->yoffset;
if (!(var->vmode & FB_VMODE_YWRAP))
- y_bottom += var->yres;
+ y_bottom += info->var.yres;
- BUG_ON(y_bottom > var->yres_virtual);
+ if (y_bottom > info->var.yres_virtual)
+ return -EINVAL;
acornfb_update_dma(info, var);
diff --git a/drivers/video/amba-clcd.c b/drivers/video/amba-clcd.c
index cf03ad06714..2cda6ba0939 100644
--- a/drivers/video/amba-clcd.c
+++ b/drivers/video/amba-clcd.c
@@ -447,6 +447,10 @@ static int clcdfb_register(struct clcd_fb *fb)
goto out;
}
+ ret = clk_prepare(fb->clk);
+ if (ret)
+ goto free_clk;
+
fb->fb.device = &fb->dev->dev;
fb->fb.fix.mmio_start = fb->dev->res.start;
@@ -456,7 +460,7 @@ static int clcdfb_register(struct clcd_fb *fb)
if (!fb->regs) {
printk(KERN_ERR "CLCD: unable to remap registers\n");
ret = -ENOMEM;
- goto free_clk;
+ goto clk_unprep;
}
fb->fb.fbops = &clcdfb_ops;
@@ -530,6 +534,8 @@ static int clcdfb_register(struct clcd_fb *fb)
fb_dealloc_cmap(&fb->fb.cmap);
unmap:
iounmap(fb->regs);
+ clk_unprep:
+ clk_unprepare(fb->clk);
free_clk:
clk_put(fb->clk);
out:
@@ -595,6 +601,7 @@ static int clcdfb_remove(struct amba_device *dev)
if (fb->fb.cmap.len)
fb_dealloc_cmap(&fb->fb.cmap);
iounmap(fb->regs);
+ clk_unprepare(fb->clk);
clk_put(fb->clk);
fb->board->remove(fb);
diff --git a/drivers/video/arkfb.c b/drivers/video/arkfb.c
index 8686429cbdf..555dd4c64f5 100644
--- a/drivers/video/arkfb.c
+++ b/drivers/video/arkfb.c
@@ -908,13 +908,14 @@ static int arkfb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info
unsigned int offset;
/* Calculate the offset */
- if (var->bits_per_pixel == 0) {
- offset = (var->yoffset / 16) * (var->xres_virtual / 2) + (var->xoffset / 2);
+ if (info->var.bits_per_pixel == 0) {
+ offset = (var->yoffset / 16) * (info->var.xres_virtual / 2)
+ + (var->xoffset / 2);
offset = offset >> 2;
} else {
offset = (var->yoffset * info->fix.line_length) +
- (var->xoffset * var->bits_per_pixel / 8);
- offset = offset >> ((var->bits_per_pixel == 4) ? 2 : 3);
+ (var->xoffset * info->var.bits_per_pixel / 8);
+ offset = offset >> ((info->var.bits_per_pixel == 4) ? 2 : 3);
}
/* Set the offset */
diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c
index 817ab60f753..63409c122ae 100644
--- a/drivers/video/atmel_lcdfb.c
+++ b/drivers/video/atmel_lcdfb.c
@@ -18,10 +18,11 @@
#include <linux/delay.h>
#include <linux/backlight.h>
#include <linux/gfp.h>
+#include <linux/module.h>
#include <mach/board.h>
#include <mach/cpu.h>
-#include <mach/gpio.h>
+#include <asm/gpio.h>
#include <video/atmel_lcdc.h>
@@ -39,7 +40,8 @@
| FBINFO_HWACCEL_YPAN)
static inline void atmel_lcdfb_update_dma2d(struct atmel_lcdfb_info *sinfo,
- struct fb_var_screeninfo *var)
+ struct fb_var_screeninfo *var,
+ struct fb_info *info)
{
}
@@ -50,14 +52,16 @@ static inline void atmel_lcdfb_update_dma2d(struct atmel_lcdfb_info *sinfo,
| FBINFO_HWACCEL_YPAN)
static void atmel_lcdfb_update_dma2d(struct atmel_lcdfb_info *sinfo,
- struct fb_var_screeninfo *var)
+ struct fb_var_screeninfo *var,
+ struct fb_info *info)
{
u32 dma2dcfg;
u32 pixeloff;
- pixeloff = (var->xoffset * var->bits_per_pixel) & 0x1f;
+ pixeloff = (var->xoffset * info->var.bits_per_pixel) & 0x1f;
- dma2dcfg = ((var->xres_virtual - var->xres) * var->bits_per_pixel) / 8;
+ dma2dcfg = (info->var.xres_virtual - info->var.xres)
+ * info->var.bits_per_pixel / 8;
dma2dcfg |= pixeloff << ATMEL_LCDC_PIXELOFF_OFFSET;
lcdc_writel(sinfo, ATMEL_LCDC_DMA2DCFG, dma2dcfg);
@@ -249,14 +253,14 @@ static void atmel_lcdfb_update_dma(struct fb_info *info,
unsigned long dma_addr;
dma_addr = (fix->smem_start + var->yoffset * fix->line_length
- + var->xoffset * var->bits_per_pixel / 8);
+ + var->xoffset * info->var.bits_per_pixel / 8);
dma_addr &= ~3UL;
/* Set framebuffer DMA base address and pixel offset */
lcdc_writel(sinfo, ATMEL_LCDC_DMABADDR1, dma_addr);
- atmel_lcdfb_update_dma2d(sinfo, var);
+ atmel_lcdfb_update_dma2d(sinfo, var, info);
}
static inline void atmel_lcdfb_free_video_memory(struct atmel_lcdfb_info *sinfo)
diff --git a/drivers/video/aty/radeon_base.c b/drivers/video/aty/radeon_base.c
index 32f8cf6200a..150684882ef 100644
--- a/drivers/video/aty/radeon_base.c
+++ b/drivers/video/aty/radeon_base.c
@@ -845,16 +845,16 @@ static int radeonfb_pan_display (struct fb_var_screeninfo *var,
{
struct radeonfb_info *rinfo = info->par;
- if ((var->xoffset + var->xres > var->xres_virtual)
- || (var->yoffset + var->yres > var->yres_virtual))
- return -EINVAL;
+ if ((var->xoffset + info->var.xres > info->var.xres_virtual)
+ || (var->yoffset + info->var.yres > info->var.yres_virtual))
+ return -EINVAL;
if (rinfo->asleep)
return 0;
radeon_fifo_wait(2);
- OUTREG(CRTC_OFFSET, ((var->yoffset * var->xres_virtual + var->xoffset)
- * var->bits_per_pixel / 8) & ~7);
+ OUTREG(CRTC_OFFSET, (var->yoffset * info->fix.line_length +
+ var->xoffset * info->var.bits_per_pixel / 8) & ~7);
return 0;
}
diff --git a/drivers/video/au1100fb.c b/drivers/video/au1100fb.c
index 01a8fde67f2..649cb35de4e 100644
--- a/drivers/video/au1100fb.c
+++ b/drivers/video/au1100fb.c
@@ -110,12 +110,6 @@ static struct fb_var_screeninfo au1100fb_var __devinitdata = {
.vmode = FB_VMODE_NONINTERLACED,
};
-static struct au1100fb_drv_info drv_info;
-
-static int nocursor = 0;
-module_param(nocursor, int, 0644);
-MODULE_PARM_DESC(nocursor, "cursor enable/disable");
-
/* fb_blank
* Blank the screen. Depending on the mode, the screen will be
* activated with the backlight color, or desactivated
@@ -132,7 +126,7 @@ static int au1100fb_fb_blank(int blank_mode, struct fb_info *fbi)
/* Turn on panel */
fbdev->regs->lcd_control |= LCD_CONTROL_GO;
#ifdef CONFIG_MIPS_PB1100
- if (drv_info.panel_idx == 1) {
+ if (fbdev->panel_idx == 1) {
au_writew(au_readw(PB1100_G_CONTROL)
| (PB1100_G_CONTROL_BL | PB1100_G_CONTROL_VDD),
PB1100_G_CONTROL);
@@ -147,7 +141,7 @@ static int au1100fb_fb_blank(int blank_mode, struct fb_info *fbi)
/* Turn off panel */
fbdev->regs->lcd_control &= ~LCD_CONTROL_GO;
#ifdef CONFIG_MIPS_PB1100
- if (drv_info.panel_idx == 1) {
+ if (fbdev->panel_idx == 1) {
au_writew(au_readw(PB1100_G_CONTROL)
& ~(PB1100_G_CONTROL_BL | PB1100_G_CONTROL_VDD),
PB1100_G_CONTROL);
@@ -428,17 +422,6 @@ int au1100fb_fb_mmap(struct fb_info *fbi, struct vm_area_struct *vma)
return 0;
}
-/* fb_cursor
- * Used to disable cursor drawing...
- */
-int au1100fb_fb_cursor(struct fb_info *info, struct fb_cursor *cursor)
-{
- if (nocursor)
- return 0;
- else
- return -EINVAL; /* just to force soft_cursor() call */
-}
-
static struct fb_ops au1100fb_ops =
{
.owner = THIS_MODULE,
@@ -450,13 +433,53 @@ static struct fb_ops au1100fb_ops =
.fb_imageblit = cfb_imageblit,
.fb_rotate = au1100fb_fb_rotate,
.fb_mmap = au1100fb_fb_mmap,
- .fb_cursor = au1100fb_fb_cursor,
};
/*-------------------------------------------------------------------------*/
-/* AU1100 LCD controller device driver */
+static int au1100fb_setup(struct au1100fb_device *fbdev)
+{
+ char *this_opt, *options;
+ int num_panels = ARRAY_SIZE(known_lcd_panels);
+
+ if (num_panels <= 0) {
+ print_err("No LCD panels supported by driver!");
+ return -ENODEV;
+ }
+
+ if (fb_get_options(DRIVER_NAME, &options))
+ return -ENODEV;
+ if (!options)
+ return -ENODEV;
+
+ while ((this_opt = strsep(&options, ",")) != NULL) {
+ /* Panel option */
+ if (!strncmp(this_opt, "panel:", 6)) {
+ int i;
+ this_opt += 6;
+ for (i = 0; i < num_panels; i++) {
+ if (!strncmp(this_opt, known_lcd_panels[i].name,
+ strlen(this_opt))) {
+ fbdev->panel = &known_lcd_panels[i];
+ fbdev->panel_idx = i;
+ break;
+ }
+ }
+ if (i >= num_panels) {
+ print_warn("Panel '%s' not supported!", this_opt);
+ return -ENODEV;
+ }
+ }
+ /* Unsupported option */
+ else
+ print_warn("Unsupported option \"%s\"", this_opt);
+ }
+
+ print_info("Panel=%s", fbdev->panel->name);
+
+ return 0;
+}
static int __devinit au1100fb_drv_probe(struct platform_device *dev)
{
@@ -465,22 +488,21 @@ static int __devinit au1100fb_drv_probe(struct platform_device *dev)
unsigned long page;
u32 sys_clksrc;
- if (!dev)
- return -EINVAL;
-
/* Allocate new device private */
- if (!(fbdev = kzalloc(sizeof(struct au1100fb_device), GFP_KERNEL))) {
+ fbdev = kzalloc(sizeof(struct au1100fb_device), GFP_KERNEL);
+ if (!fbdev) {
print_err("fail to allocate device private record");
return -ENOMEM;
}
- fbdev->panel = &known_lcd_panels[drv_info.panel_idx];
+ if (au1100fb_setup(fbdev))
+ goto failed;
platform_set_drvdata(dev, (void *)fbdev);
/* Allocate region for our registers and map them */
- if (!(regs_res = platform_get_resource(to_platform_device(dev),
- IORESOURCE_MEM, 0))) {
+ regs_res = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ if (!regs_res) {
print_err("fail to retrieve registers resource");
return -EFAULT;
}
@@ -500,13 +522,11 @@ static int __devinit au1100fb_drv_probe(struct platform_device *dev)
print_dbg("Register memory map at %p", fbdev->regs);
print_dbg("phys=0x%08x, size=%d", fbdev->regs_phys, fbdev->regs_len);
-
-
/* Allocate the framebuffer to the maximum screen size * nbr of video buffers */
fbdev->fb_len = fbdev->panel->xres * fbdev->panel->yres *
(fbdev->panel->bpp >> 3) * AU1100FB_NBR_VIDEO_BUFFERS;
- fbdev->fb_mem = dma_alloc_coherent(dev, PAGE_ALIGN(fbdev->fb_len),
+ fbdev->fb_mem = dma_alloc_coherent(&dev->dev, PAGE_ALIGN(fbdev->fb_len),
&fbdev->fb_phys, GFP_KERNEL);
if (!fbdev->fb_mem) {
print_err("fail to allocate frambuffer (size: %dK))",
@@ -525,7 +545,7 @@ static int __devinit au1100fb_drv_probe(struct platform_device *dev)
page < PAGE_ALIGN((unsigned long)fbdev->fb_mem + fbdev->fb_len);
page += PAGE_SIZE) {
#if CONFIG_DMA_NONCOHERENT
- SetPageReserved(virt_to_page(CAC_ADDR(page)));
+ SetPageReserved(virt_to_page(CAC_ADDR((void *)page)));
#else
SetPageReserved(virt_to_page(page));
#endif
@@ -578,7 +598,8 @@ failed:
release_mem_region(fbdev->regs_phys, fbdev->regs_len);
}
if (fbdev->fb_mem) {
- dma_free_noncoherent(dev, fbdev->fb_len, fbdev->fb_mem, fbdev->fb_phys);
+ dma_free_noncoherent(&dev->dev, fbdev->fb_len, fbdev->fb_mem,
+ fbdev->fb_phys);
}
if (fbdev->info.cmap.len != 0) {
fb_dealloc_cmap(&fbdev->info.cmap);
@@ -608,7 +629,8 @@ int au1100fb_drv_remove(struct platform_device *dev)
release_mem_region(fbdev->regs_phys, fbdev->regs_len);
- dma_free_coherent(dev, PAGE_ALIGN(fbdev->fb_len), fbdev->fb_mem, fbdev->fb_phys);
+ dma_free_coherent(&dev->dev, PAGE_ALIGN(fbdev->fb_len), fbdev->fb_mem,
+ fbdev->fb_phys);
fb_dealloc_cmap(&fbdev->info.cmap);
kfree(fbdev->info.pseudo_palette);
@@ -675,101 +697,18 @@ static struct platform_driver au1100fb_driver = {
.resume = au1100fb_drv_resume,
};
-/*-------------------------------------------------------------------------*/
-
-/* Kernel driver */
-
-int au1100fb_setup(char *options)
-{
- char* this_opt;
- int num_panels = ARRAY_SIZE(known_lcd_panels);
- char* mode = NULL;
- int panel_idx = 0;
-
- if (num_panels <= 0) {
- print_err("No LCD panels supported by driver!");
- return -EFAULT;
- }
-
- if (options) {
- while ((this_opt = strsep(&options,",")) != NULL) {
- /* Panel option */
- if (!strncmp(this_opt, "panel:", 6)) {
- int i;
- this_opt += 6;
- for (i = 0; i < num_panels; i++) {
- if (!strncmp(this_opt,
- known_lcd_panels[i].name,
- strlen(this_opt))) {
- panel_idx = i;
- break;
- }
- }
- if (i >= num_panels) {
- print_warn("Panel %s not supported!", this_opt);
- }
- }
- if (!strncmp(this_opt, "nocursor", 8)) {
- this_opt += 8;
- nocursor = 1;
- print_info("Cursor disabled");
- }
- /* Mode option (only option that start with digit) */
- else if (isdigit(this_opt[0])) {
- mode = kstrdup(this_opt, GFP_KERNEL);
- if (!mode) {
- print_err("memory allocation failed");
- return -ENOMEM;
- }
- }
- /* Unsupported option */
- else {
- print_warn("Unsupported option \"%s\"", this_opt);
- }
- }
- }
-
- drv_info.panel_idx = panel_idx;
- drv_info.opt_mode = mode;
-
- print_info("Panel=%s Mode=%s",
- known_lcd_panels[drv_info.panel_idx].name,
- drv_info.opt_mode ? drv_info.opt_mode : "default");
-
- return 0;
-}
-
-int __init au1100fb_init(void)
+static int __init au1100fb_load(void)
{
- char* options;
- int ret;
-
- print_info("" DRIVER_DESC "");
-
- memset(&drv_info, 0, sizeof(drv_info));
-
- if (fb_get_options(DRIVER_NAME, &options))
- return -ENODEV;
-
- /* Setup driver with options */
- ret = au1100fb_setup(options);
- if (ret < 0) {
- print_err("Fail to setup driver");
- return ret;
- }
-
return platform_driver_register(&au1100fb_driver);
}
-void __exit au1100fb_cleanup(void)
+static void __exit au1100fb_unload(void)
{
platform_driver_unregister(&au1100fb_driver);
-
- kfree(drv_info.opt_mode);
}
-module_init(au1100fb_init);
-module_exit(au1100fb_cleanup);
+module_init(au1100fb_load);
+module_exit(au1100fb_unload);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
diff --git a/drivers/video/au1100fb.h b/drivers/video/au1100fb.h
index 164fe2f231e..12d9642d546 100644
--- a/drivers/video/au1100fb.h
+++ b/drivers/video/au1100fb.h
@@ -108,6 +108,7 @@ struct au1100fb_device {
unsigned char* fb_mem; /* FrameBuffer memory map */
size_t fb_len;
dma_addr_t fb_phys;
+ int panel_idx;
};
/********************************************************************/
@@ -364,11 +365,6 @@ static struct au1100fb_panel known_lcd_panels[] =
},
};
-struct au1100fb_drv_info {
- int panel_idx;
- char *opt_mode;
-};
-
/********************************************************************/
/* Inline helpers */
diff --git a/drivers/video/au1200fb.c b/drivers/video/au1200fb.c
index 5dff32ac804..72005598040 100644
--- a/drivers/video/au1200fb.c
+++ b/drivers/video/au1200fb.c
@@ -46,18 +46,10 @@
#include <asm/mach-au1x00/au1000.h>
#include "au1200fb.h"
-#ifdef CONFIG_PM
-#include <asm/mach-au1x00/au1xxx_pm.h>
-#endif
-
-#ifndef CONFIG_FB_AU1200_DEVS
-#define CONFIG_FB_AU1200_DEVS 4
-#endif
-
#define DRIVER_NAME "au1200fb"
#define DRIVER_DESC "LCD controller driver for AU1200 processors"
-#define DEBUG 1
+#define DEBUG 0
#define print_err(f, arg...) printk(KERN_ERR DRIVER_NAME ": " f "\n", ## arg)
#define print_warn(f, arg...) printk(KERN_WARNING DRIVER_NAME ": " f "\n", ## arg)
@@ -150,7 +142,7 @@ struct au1200_lcd_iodata_t {
/* Private, per-framebuffer management information (independent of the panel itself) */
struct au1200fb_device {
- struct fb_info fb_info; /* FB driver info record */
+ struct fb_info *fb_info; /* FB driver info record */
int plane;
unsigned char* fb_mem; /* FrameBuffer memory map */
@@ -158,7 +150,6 @@ struct au1200fb_device {
dma_addr_t fb_phys;
};
-static struct au1200fb_device _au1200fb_devices[CONFIG_FB_AU1200_DEVS];
/********************************************************************/
/* LCD controller restrictions */
@@ -171,10 +162,18 @@ static struct au1200fb_device _au1200fb_devices[CONFIG_FB_AU1200_DEVS];
/* Default number of visible screen buffer to allocate */
#define AU1200FB_NBR_VIDEO_BUFFERS 1
+/* Default maximum number of fb devices to create */
+#define MAX_DEVICE_COUNT 4
+
+/* Default window configuration entry to use (see windows[]) */
+#define DEFAULT_WINDOW_INDEX 2
+
/********************************************************************/
+static struct fb_info *_au1200fb_infos[MAX_DEVICE_COUNT];
static struct au1200_lcd *lcd = (struct au1200_lcd *) AU1200_LCD_ADDR;
-static int window_index = 2; /* default is zero */
+static int device_count = MAX_DEVICE_COUNT;
+static int window_index = DEFAULT_WINDOW_INDEX; /* default is zero */
static int panel_index = 2; /* default is zero */
static struct window_settings *win;
static struct panel_settings *panel;
@@ -205,12 +204,6 @@ struct window_settings {
extern int board_au1200fb_panel_init (void);
extern int board_au1200fb_panel_shutdown (void);
-#ifdef CONFIG_PM
-int au1200fb_pm_callback(au1xxx_power_dev_t *dev,
- au1xxx_request_t request, void *data);
-au1xxx_power_dev_t *LCD_pm_dev;
-#endif
-
/*
* Default window configurations
*/
@@ -652,25 +645,6 @@ static struct panel_settings known_lcd_panels[] =
/********************************************************************/
-#ifdef CONFIG_PM
-static int set_brightness(unsigned int brightness)
-{
- unsigned int hi1, divider;
-
- /* limit brightness pwm duty to >= 30/1600 */
- if (brightness < 30) {
- brightness = 30;
- }
- divider = (lcd->pwmdiv & 0x3FFFF) + 1;
- hi1 = (lcd->pwmhi >> 16) + 1;
- hi1 = (((brightness & 0xFF) + 1) * divider >> 8);
- lcd->pwmhi &= 0xFFFF;
- lcd->pwmhi |= (hi1 << 16);
-
- return brightness;
-}
-#endif /* CONFIG_PM */
-
static int winbpp (unsigned int winctrl1)
{
int bits = 0;
@@ -712,8 +686,8 @@ static int fbinfo2index (struct fb_info *fb_info)
{
int i;
- for (i = 0; i < CONFIG_FB_AU1200_DEVS; ++i) {
- if (fb_info == (struct fb_info *)(&_au1200fb_devices[i].fb_info))
+ for (i = 0; i < device_count; ++i) {
+ if (fb_info == _au1200fb_infos[i])
return i;
}
printk("au1200fb: ERROR: fbinfo2index failed!\n");
@@ -962,7 +936,7 @@ static void au1200_setmode(struct au1200fb_device *fbdev)
lcd->window[plane].winctrl2 = ( 0
| LCD_WINCTRL2_CKMODE_00
| LCD_WINCTRL2_DBM
- | LCD_WINCTRL2_BX_N( fbdev->fb_info.fix.line_length)
+ | LCD_WINCTRL2_BX_N(fbdev->fb_info->fix.line_length)
| LCD_WINCTRL2_SCX_1
| LCD_WINCTRL2_SCY_1
) ;
@@ -1050,7 +1024,7 @@ static void au1200fb_update_fbinfo(struct fb_info *fbi)
static int au1200fb_fb_check_var(struct fb_var_screeninfo *var,
struct fb_info *fbi)
{
- struct au1200fb_device *fbdev = (struct au1200fb_device *)fbi;
+ struct au1200fb_device *fbdev = fbi->par;
u32 pixclock;
int screen_size, plane;
@@ -1142,7 +1116,7 @@ static int au1200fb_fb_check_var(struct fb_var_screeninfo *var,
*/
static int au1200fb_fb_set_par(struct fb_info *fbi)
{
- struct au1200fb_device *fbdev = (struct au1200fb_device *)fbi;
+ struct au1200fb_device *fbdev = fbi->par;
au1200fb_update_fbinfo(fbi);
au1200_setmode(fbdev);
@@ -1246,11 +1220,7 @@ static int au1200fb_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
unsigned int len;
unsigned long start=0, off;
- struct au1200fb_device *fbdev = (struct au1200fb_device *) info;
-
-#ifdef CONFIG_PM
- au1xxx_pm_access(LCD_pm_dev);
-#endif
+ struct au1200fb_device *fbdev = info->par;
if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) {
return -EINVAL;
@@ -1461,10 +1431,6 @@ static int au1200fb_ioctl(struct fb_info *info, unsigned int cmd,
int plane;
int val;
-#ifdef CONFIG_PM
- au1xxx_pm_access(LCD_pm_dev);
-#endif
-
plane = fbinfo2index(info);
print_dbg("au1200fb: ioctl %d on plane %d\n", cmd, plane);
@@ -1536,9 +1502,11 @@ static struct fb_ops au1200fb_fb_ops = {
.fb_set_par = au1200fb_fb_set_par,
.fb_setcolreg = au1200fb_fb_setcolreg,
.fb_blank = au1200fb_fb_blank,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit,
+ .fb_fillrect = sys_fillrect,
+ .fb_copyarea = sys_copyarea,
+ .fb_imageblit = sys_imageblit,
+ .fb_read = fb_sys_read,
+ .fb_write = fb_sys_write,
.fb_sync = NULL,
.fb_ioctl = au1200fb_ioctl,
.fb_mmap = au1200fb_fb_mmap,
@@ -1561,10 +1529,9 @@ static irqreturn_t au1200fb_handle_irq(int irq, void* dev_id)
static int au1200fb_init_fbinfo(struct au1200fb_device *fbdev)
{
- struct fb_info *fbi = &fbdev->fb_info;
+ struct fb_info *fbi = fbdev->fb_info;
int bpp;
- memset(fbi, 0, sizeof(struct fb_info));
fbi->fbops = &au1200fb_fb_ops;
bpp = winbpp(win->w[fbdev->plane].mode_winctrl1);
@@ -1623,24 +1590,36 @@ static int au1200fb_init_fbinfo(struct au1200fb_device *fbdev)
/* AU1200 LCD controller device driver */
-static int au1200fb_drv_probe(struct platform_device *dev)
+static int __devinit au1200fb_drv_probe(struct platform_device *dev)
{
struct au1200fb_device *fbdev;
+ struct fb_info *fbi = NULL;
unsigned long page;
- int bpp, plane, ret;
+ int bpp, plane, ret, irq;
- if (!dev)
- return -EINVAL;
+ /* shut gcc up */
+ ret = 0;
+ fbdev = NULL;
+
+ /* Kickstart the panel */
+ au1200_setpanel(panel);
- for (plane = 0; plane < CONFIG_FB_AU1200_DEVS; ++plane) {
+ for (plane = 0; plane < device_count; ++plane) {
bpp = winbpp(win->w[plane].mode_winctrl1);
if (win->w[plane].xres == 0)
win->w[plane].xres = panel->Xres;
if (win->w[plane].yres == 0)
win->w[plane].yres = panel->Yres;
- fbdev = &_au1200fb_devices[plane];
- memset(fbdev, 0, sizeof(struct au1200fb_device));
+ fbi = framebuffer_alloc(sizeof(struct au1200fb_device),
+ &dev->dev);
+ if (!fbi)
+ goto failed;
+
+ _au1200fb_infos[plane] = fbi;
+ fbdev = fbi->par;
+ fbdev->fb_info = fbi;
+
fbdev->plane = plane;
/* Allocate the framebuffer to the maximum screen size */
@@ -1673,30 +1652,31 @@ static int au1200fb_drv_probe(struct platform_device *dev)
goto failed;
/* Register new framebuffer */
- if ((ret = register_framebuffer(&fbdev->fb_info)) < 0) {
+ ret = register_framebuffer(fbi);
+ if (ret < 0) {
print_err("cannot register new framebuffer");
goto failed;
}
- au1200fb_fb_set_par(&fbdev->fb_info);
+ au1200fb_fb_set_par(fbi);
#if !defined(CONFIG_FRAMEBUFFER_CONSOLE) && defined(CONFIG_LOGO)
if (plane == 0)
- if (fb_prepare_logo(&fbdev->fb_info, FB_ROTATE_UR)) {
+ if (fb_prepare_logo(fbi, FB_ROTATE_UR)) {
/* Start display and show logo on boot */
- fb_set_cmap(&fbdev->fb_info.cmap,
- &fbdev->fb_info);
-
- fb_show_logo(&fbdev->fb_info, FB_ROTATE_UR);
+ fb_set_cmap(&fbi->cmap, fbi);
+ fb_show_logo(fbi, FB_ROTATE_UR);
}
#endif
}
/* Now hook interrupt too */
- if ((ret = request_irq(AU1200_LCD_INT, au1200fb_handle_irq,
- IRQF_DISABLED | IRQF_SHARED, "lcd", (void *)dev)) < 0) {
+ irq = platform_get_irq(dev, 0);
+ ret = request_irq(irq, au1200fb_handle_irq,
+ IRQF_SHARED, "lcd", (void *)dev);
+ if (ret) {
print_err("fail to request interrupt line %d (err: %d)",
- AU1200_LCD_INT, ret);
+ irq, ret);
goto failed;
}
@@ -1705,84 +1685,108 @@ static int au1200fb_drv_probe(struct platform_device *dev)
failed:
/* NOTE: This only does the current plane/window that failed; others are still active */
if (fbdev->fb_mem)
- dma_free_noncoherent(dev, PAGE_ALIGN(fbdev->fb_len),
+ dma_free_noncoherent(&dev->dev, PAGE_ALIGN(fbdev->fb_len),
fbdev->fb_mem, fbdev->fb_phys);
- if (fbdev->fb_info.cmap.len != 0)
- fb_dealloc_cmap(&fbdev->fb_info.cmap);
- if (fbdev->fb_info.pseudo_palette)
- kfree(fbdev->fb_info.pseudo_palette);
+ if (fbi) {
+ if (fbi->cmap.len != 0)
+ fb_dealloc_cmap(&fbi->cmap);
+ kfree(fbi->pseudo_palette);
+ }
if (plane == 0)
free_irq(AU1200_LCD_INT, (void*)dev);
return ret;
}
-static int au1200fb_drv_remove(struct platform_device *dev)
+static int __devexit au1200fb_drv_remove(struct platform_device *dev)
{
struct au1200fb_device *fbdev;
+ struct fb_info *fbi;
int plane;
- if (!dev)
- return -ENODEV;
-
/* Turn off the panel */
au1200_setpanel(NULL);
- for (plane = 0; plane < CONFIG_FB_AU1200_DEVS; ++plane)
- {
- fbdev = &_au1200fb_devices[plane];
+ for (plane = 0; plane < device_count; ++plane) {
+ fbi = _au1200fb_infos[plane];
+ fbdev = fbi->par;
/* Clean up all probe data */
- unregister_framebuffer(&fbdev->fb_info);
+ unregister_framebuffer(fbi);
if (fbdev->fb_mem)
dma_free_noncoherent(&dev->dev,
PAGE_ALIGN(fbdev->fb_len),
fbdev->fb_mem, fbdev->fb_phys);
- if (fbdev->fb_info.cmap.len != 0)
- fb_dealloc_cmap(&fbdev->fb_info.cmap);
- if (fbdev->fb_info.pseudo_palette)
- kfree(fbdev->fb_info.pseudo_palette);
+ if (fbi->cmap.len != 0)
+ fb_dealloc_cmap(&fbi->cmap);
+ kfree(fbi->pseudo_palette);
+
+ framebuffer_release(fbi);
+ _au1200fb_infos[plane] = NULL;
}
- free_irq(AU1200_LCD_INT, (void *)dev);
+ free_irq(platform_get_irq(dev, 0), (void *)dev);
return 0;
}
#ifdef CONFIG_PM
-static int au1200fb_drv_suspend(struct platform_device *dev, u32 state)
+static int au1200fb_drv_suspend(struct device *dev)
{
- /* TODO */
+ au1200_setpanel(NULL);
+
+ lcd->outmask = 0;
+ au_sync();
+
return 0;
}
-static int au1200fb_drv_resume(struct platform_device *dev)
+static int au1200fb_drv_resume(struct device *dev)
{
- /* TODO */
+ struct fb_info *fbi;
+ int i;
+
+ /* Kickstart the panel */
+ au1200_setpanel(panel);
+
+ for (i = 0; i < device_count; i++) {
+ fbi = _au1200fb_infos[i];
+ au1200fb_fb_set_par(fbi);
+ }
+
return 0;
}
+
+static const struct dev_pm_ops au1200fb_pmops = {
+ .suspend = au1200fb_drv_suspend,
+ .resume = au1200fb_drv_resume,
+ .freeze = au1200fb_drv_suspend,
+ .thaw = au1200fb_drv_resume,
+};
+
+#define AU1200FB_PMOPS (&au1200fb_pmops)
+
+#else
+#define AU1200FB_PMOPS NULL
#endif /* CONFIG_PM */
static struct platform_driver au1200fb_driver = {
.driver = {
- .name = "au1200-lcd",
- .owner = THIS_MODULE,
+ .name = "au1200-lcd",
+ .owner = THIS_MODULE,
+ .pm = AU1200FB_PMOPS,
},
.probe = au1200fb_drv_probe,
- .remove = au1200fb_drv_remove,
-#ifdef CONFIG_PM
- .suspend = au1200fb_drv_suspend,
- .resume = au1200fb_drv_resume,
-#endif
+ .remove = __devexit_p(au1200fb_drv_remove),
};
/*-------------------------------------------------------------------------*/
/* Kernel driver */
-static void au1200fb_setup(void)
+static int au1200fb_setup(void)
{
- char* options = NULL;
- char* this_opt;
+ char *options = NULL;
+ char *this_opt, *endptr;
int num_panels = ARRAY_SIZE(known_lcd_panels);
int panel_idx = -1;
@@ -1827,70 +1831,42 @@ static void au1200fb_setup(void)
nohwcursor = 1;
}
- /* Unsupported option */
- else {
- print_warn("Unsupported option \"%s\"", this_opt);
+ else if (strncmp(this_opt, "devices:", 8) == 0) {
+ this_opt += 8;
+ device_count = simple_strtol(this_opt,
+ &endptr, 0);
+ if ((device_count < 0) ||
+ (device_count > MAX_DEVICE_COUNT))
+ device_count = MAX_DEVICE_COUNT;
}
- }
- }
-}
-#ifdef CONFIG_PM
-static int au1200fb_pm_callback(au1xxx_power_dev_t *dev,
- au1xxx_request_t request, void *data) {
- int retval = -1;
- unsigned int d = 0;
- unsigned int brightness = 0;
-
- if (request == AU1XXX_PM_SLEEP) {
- board_au1200fb_panel_shutdown();
- }
- else if (request == AU1XXX_PM_WAKEUP) {
- if(dev->prev_state == SLEEP_STATE)
- {
- int plane;
- au1200_setpanel(panel);
- for (plane = 0; plane < CONFIG_FB_AU1200_DEVS; ++plane) {
- struct au1200fb_device *fbdev;
- fbdev = &_au1200fb_devices[plane];
- au1200fb_fb_set_par(&fbdev->fb_info);
+ else if (strncmp(this_opt, "wincfg:", 7) == 0) {
+ this_opt += 7;
+ window_index = simple_strtol(this_opt,
+ &endptr, 0);
+ if ((window_index < 0) ||
+ (window_index >= ARRAY_SIZE(windows)))
+ window_index = DEFAULT_WINDOW_INDEX;
}
- }
- d = *((unsigned int*)data);
- if(d <=10) brightness = 26;
- else if(d<=20) brightness = 51;
- else if(d<=30) brightness = 77;
- else if(d<=40) brightness = 102;
- else if(d<=50) brightness = 128;
- else if(d<=60) brightness = 153;
- else if(d<=70) brightness = 179;
- else if(d<=80) brightness = 204;
- else if(d<=90) brightness = 230;
- else brightness = 255;
- set_brightness(brightness);
- } else if (request == AU1XXX_PM_GETSTATUS) {
- return dev->cur_state;
- } else if (request == AU1XXX_PM_ACCESS) {
- if (dev->cur_state != SLEEP_STATE)
- return retval;
- else {
- au1200_setpanel(panel);
+ else if (strncmp(this_opt, "off", 3) == 0)
+ return 1;
+ /* Unsupported option */
+ else {
+ print_warn("Unsupported option \"%s\"", this_opt);
+ }
}
- } else if (request == AU1XXX_PM_IDLE) {
- } else if (request == AU1XXX_PM_CLEANUP) {
}
-
- return retval;
+ return 0;
}
-#endif
static int __init au1200fb_init(void)
{
print_info("" DRIVER_DESC "");
/* Setup driver with options */
- au1200fb_setup();
+ if (au1200fb_setup())
+ return -ENODEV;
/* Point to the panel selected */
panel = &known_lcd_panels[panel_index];
@@ -1899,17 +1875,6 @@ static int __init au1200fb_init(void)
printk(DRIVER_NAME ": Panel %d %s\n", panel_index, panel->name);
printk(DRIVER_NAME ": Win %d %s\n", window_index, win->name);
- /* Kickstart the panel, the framebuffers/windows come soon enough */
- au1200_setpanel(panel);
-
- #ifdef CONFIG_PM
- LCD_pm_dev = new_au1xxx_power_device("LCD", &au1200fb_pm_callback, NULL);
- if ( LCD_pm_dev == NULL)
- printk(KERN_INFO "Unable to create a power management device entry for the au1200fb.\n");
- else
- printk(KERN_INFO "Power management device entry for the au1200fb loaded.\n");
- #endif
-
return platform_driver_register(&au1200fb_driver);
}
diff --git a/drivers/video/backlight/88pm860x_bl.c b/drivers/video/backlight/88pm860x_bl.c
index c04b94da81f..1105fa1ed7f 100644
--- a/drivers/video/backlight/88pm860x_bl.c
+++ b/drivers/video/backlight/88pm860x_bl.c
@@ -17,6 +17,7 @@
#include <linux/i2c.h>
#include <linux/backlight.h>
#include <linux/mfd/88pm860x.h>
+#include <linux/module.h>
#define MAX_BRIGHTNESS (0xFF)
#define MIN_BRIGHTNESS (0)
diff --git a/drivers/video/backlight/adp5520_bl.c b/drivers/video/backlight/adp5520_bl.c
index d1aee730d7d..dfb763e9147 100644
--- a/drivers/video/backlight/adp5520_bl.c
+++ b/drivers/video/backlight/adp5520_bl.c
@@ -13,6 +13,7 @@
#include <linux/backlight.h>
#include <linux/mfd/adp5520.h>
#include <linux/slab.h>
+#include <linux/module.h>
struct adp5520_bl {
struct device *master;
diff --git a/drivers/video/backlight/adp8860_bl.c b/drivers/video/backlight/adp8860_bl.c
index 183b6f63985..66bc74d9ce2 100644
--- a/drivers/video/backlight/adp8860_bl.c
+++ b/drivers/video/backlight/adp8860_bl.c
@@ -7,7 +7,6 @@
*/
#include <linux/module.h>
-#include <linux/version.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/pm.h>
diff --git a/drivers/video/backlight/adp8870_bl.c b/drivers/video/backlight/adp8870_bl.c
index d06886a2bfb..6c68a6899e8 100644
--- a/drivers/video/backlight/adp8870_bl.c
+++ b/drivers/video/backlight/adp8870_bl.c
@@ -7,7 +7,6 @@
*/
#include <linux/module.h>
-#include <linux/version.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/pm.h>
@@ -932,7 +931,6 @@ out:
out1:
backlight_device_unregister(bl);
out2:
- i2c_set_clientdata(client, NULL);
kfree(data);
return ret;
@@ -952,7 +950,6 @@ static int __devexit adp8870_remove(struct i2c_client *client)
&adp8870_bl_attr_group);
backlight_device_unregister(data->bl);
- i2c_set_clientdata(client, NULL);
kfree(data);
return 0;
diff --git a/drivers/video/backlight/ams369fg06.c b/drivers/video/backlight/ams369fg06.c
index 9f0a491e2a0..7838a23fbdd 100644
--- a/drivers/video/backlight/ams369fg06.c
+++ b/drivers/video/backlight/ams369fg06.c
@@ -22,6 +22,7 @@
*/
#include <linux/wait.h>
+#include <linux/module.h>
#include <linux/fb.h>
#include <linux/delay.h>
#include <linux/gpio.h>
diff --git a/drivers/video/backlight/da903x_bl.c b/drivers/video/backlight/da903x_bl.c
index 62043f12a5a..d68f14bbb68 100644
--- a/drivers/video/backlight/da903x_bl.c
+++ b/drivers/video/backlight/da903x_bl.c
@@ -19,6 +19,7 @@
#include <linux/backlight.h>
#include <linux/mfd/da903x.h>
#include <linux/slab.h>
+#include <linux/module.h>
#define DA9030_WLED_CONTROL 0x25
#define DA9030_WLED_CP_EN (1 << 6)
diff --git a/drivers/video/backlight/ep93xx_bl.c b/drivers/video/backlight/ep93xx_bl.c
index b0582917f0c..c74a6f4baa1 100644
--- a/drivers/video/backlight/ep93xx_bl.c
+++ b/drivers/video/backlight/ep93xx_bl.c
@@ -13,6 +13,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/module.h>
#include <linux/io.h>
#include <linux/fb.h>
#include <linux/backlight.h>
diff --git a/drivers/video/backlight/generic_bl.c b/drivers/video/backlight/generic_bl.c
index 8c6befd65a3..adb191466d6 100644
--- a/drivers/video/backlight/generic_bl.c
+++ b/drivers/video/backlight/generic_bl.c
@@ -56,7 +56,7 @@ static int genericbl_get_intensity(struct backlight_device *bd)
* Called when the battery is low to limit the backlight intensity.
* If limit==0 clear any limit, otherwise limit the intensity
*/
-void corgibl_limit_intensity(int limit)
+void genericbl_limit_intensity(int limit)
{
struct backlight_device *bd = generic_backlight_device;
@@ -68,7 +68,7 @@ void corgibl_limit_intensity(int limit)
backlight_update_status(generic_backlight_device);
mutex_unlock(&bd->ops_lock);
}
-EXPORT_SYMBOL(corgibl_limit_intensity);
+EXPORT_SYMBOL(genericbl_limit_intensity);
static const struct backlight_ops genericbl_ops = {
.options = BL_CORE_SUSPENDRESUME,
diff --git a/drivers/video/backlight/l4f00242t03.c b/drivers/video/backlight/l4f00242t03.c
index 98ad3e5f7c8..4f5d1c4cb6a 100644
--- a/drivers/video/backlight/l4f00242t03.c
+++ b/drivers/video/backlight/l4f00242t03.c
@@ -14,6 +14,7 @@
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/delay.h>
+#include <linux/module.h>
#include <linux/gpio.h>
#include <linux/lcd.h>
#include <linux/slab.h>
@@ -52,15 +53,11 @@ static void l4f00242t03_lcd_init(struct spi_device *spi)
dev_dbg(&spi->dev, "initializing LCD\n");
- if (priv->io_reg) {
- regulator_set_voltage(priv->io_reg, 1800000, 1800000);
- regulator_enable(priv->io_reg);
- }
+ regulator_set_voltage(priv->io_reg, 1800000, 1800000);
+ regulator_enable(priv->io_reg);
- if (priv->core_reg) {
- regulator_set_voltage(priv->core_reg, 2800000, 2800000);
- regulator_enable(priv->core_reg);
- }
+ regulator_set_voltage(priv->core_reg, 2800000, 2800000);
+ regulator_enable(priv->core_reg);
l4f00242t03_reset(pdata->reset_gpio);
@@ -78,11 +75,8 @@ static void l4f00242t03_lcd_powerdown(struct spi_device *spi)
gpio_set_value(pdata->data_enable_gpio, 0);
- if (priv->io_reg)
- regulator_disable(priv->io_reg);
-
- if (priv->core_reg)
- regulator_disable(priv->core_reg);
+ regulator_disable(priv->io_reg);
+ regulator_disable(priv->core_reg);
}
static int l4f00242t03_lcd_power_get(struct lcd_device *ld)
@@ -178,47 +172,34 @@ static int __devinit l4f00242t03_probe(struct spi_device *spi)
priv->spi = spi;
- ret = gpio_request(pdata->reset_gpio, "lcd l4f00242t03 reset");
+ ret = gpio_request_one(pdata->reset_gpio, GPIOF_OUT_INIT_HIGH,
+ "lcd l4f00242t03 reset");
if (ret) {
dev_err(&spi->dev,
"Unable to get the lcd l4f00242t03 reset gpio.\n");
goto err;
}
- ret = gpio_direction_output(pdata->reset_gpio, 1);
- if (ret)
- goto err2;
-
- ret = gpio_request(pdata->data_enable_gpio,
- "lcd l4f00242t03 data enable");
+ ret = gpio_request_one(pdata->data_enable_gpio, GPIOF_OUT_INIT_LOW,
+ "lcd l4f00242t03 data enable");
if (ret) {
dev_err(&spi->dev,
"Unable to get the lcd l4f00242t03 data en gpio.\n");
goto err2;
}
- ret = gpio_direction_output(pdata->data_enable_gpio, 0);
- if (ret)
+ priv->io_reg = regulator_get(&spi->dev, "vdd");
+ if (IS_ERR(priv->io_reg)) {
+ dev_err(&spi->dev, "%s: Unable to get the IO regulator\n",
+ __func__);
goto err3;
-
- if (pdata->io_supply) {
- priv->io_reg = regulator_get(NULL, pdata->io_supply);
-
- if (IS_ERR(priv->io_reg)) {
- pr_err("%s: Unable to get the IO regulator\n",
- __func__);
- goto err3;
- }
}
- if (pdata->core_supply) {
- priv->core_reg = regulator_get(NULL, pdata->core_supply);
-
- if (IS_ERR(priv->core_reg)) {
- pr_err("%s: Unable to get the core regulator\n",
- __func__);
- goto err4;
- }
+ priv->core_reg = regulator_get(&spi->dev, "vcore");
+ if (IS_ERR(priv->core_reg)) {
+ dev_err(&spi->dev, "%s: Unable to get the core regulator\n",
+ __func__);
+ goto err4;
}
priv->ld = lcd_device_register("l4f00242t03",
@@ -238,11 +219,9 @@ static int __devinit l4f00242t03_probe(struct spi_device *spi)
return 0;
err5:
- if (priv->core_reg)
- regulator_put(priv->core_reg);
+ regulator_put(priv->core_reg);
err4:
- if (priv->io_reg)
- regulator_put(priv->io_reg);
+ regulator_put(priv->io_reg);
err3:
gpio_free(pdata->data_enable_gpio);
err2:
@@ -266,10 +245,8 @@ static int __devexit l4f00242t03_remove(struct spi_device *spi)
gpio_free(pdata->data_enable_gpio);
gpio_free(pdata->reset_gpio);
- if (priv->io_reg)
- regulator_put(priv->io_reg);
- if (priv->core_reg)
- regulator_put(priv->core_reg);
+ regulator_put(priv->io_reg);
+ regulator_put(priv->core_reg);
kfree(priv);
diff --git a/drivers/video/backlight/ld9040.c b/drivers/video/backlight/ld9040.c
index 5934655eb1f..da9a5ce0ccb 100644
--- a/drivers/video/backlight/ld9040.c
+++ b/drivers/video/backlight/ld9040.c
@@ -30,6 +30,7 @@
#include <linux/kernel.h>
#include <linux/lcd.h>
#include <linux/backlight.h>
+#include <linux/module.h>
#include "ld9040_gamma.h"
diff --git a/drivers/video/backlight/lms283gf05.c b/drivers/video/backlight/lms283gf05.c
index 5d3cf33953a..4ec78cfe26e 100644
--- a/drivers/video/backlight/lms283gf05.c
+++ b/drivers/video/backlight/lms283gf05.c
@@ -17,6 +17,7 @@
#include <linux/spi/spi.h>
#include <linux/spi/lms283gf05.h>
+#include <linux/module.h>
struct lms283gf05_state {
struct spi_device *spi;
diff --git a/drivers/video/backlight/max8925_bl.c b/drivers/video/backlight/max8925_bl.c
index 07e8e273ced..7bbc802560e 100644
--- a/drivers/video/backlight/max8925_bl.c
+++ b/drivers/video/backlight/max8925_bl.c
@@ -17,6 +17,7 @@
#include <linux/backlight.h>
#include <linux/mfd/max8925.h>
#include <linux/slab.h>
+#include <linux/module.h>
#define MAX_BRIGHTNESS (0xff)
#define MIN_BRIGHTNESS (0)
diff --git a/drivers/video/backlight/s6e63m0.c b/drivers/video/backlight/s6e63m0.c
index 694e5aab0d6..e132157d854 100644
--- a/drivers/video/backlight/s6e63m0.c
+++ b/drivers/video/backlight/s6e63m0.c
@@ -30,6 +30,7 @@
#include <linux/kernel.h>
#include <linux/lcd.h>
#include <linux/backlight.h>
+#include <linux/module.h>
#include "s6e63m0_gamma.h"
diff --git a/drivers/video/backlight/wm831x_bl.c b/drivers/video/backlight/wm831x_bl.c
index d4c6eb248ff..fbe9e9316f3 100644
--- a/drivers/video/backlight/wm831x_bl.c
+++ b/drivers/video/backlight/wm831x_bl.c
@@ -11,6 +11,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
+#include <linux/module.h>
#include <linux/fb.h>
#include <linux/backlight.h>
#include <linux/slab.h>
diff --git a/drivers/video/bf54x-lq043fb.c b/drivers/video/bf54x-lq043fb.c
index 2464b910b59..56720fb476b 100644
--- a/drivers/video/bf54x-lq043fb.c
+++ b/drivers/video/bf54x-lq043fb.c
@@ -633,7 +633,7 @@ static int __devinit bfin_bf54x_probe(struct platform_device *pdev)
goto out7;
}
- if (request_irq(info->irq, bfin_bf54x_irq_error, IRQF_DISABLED,
+ if (request_irq(info->irq, bfin_bf54x_irq_error, 0,
"PPI ERROR", info) < 0) {
printk(KERN_ERR DRIVER_NAME
": unable to request PPI ERROR IRQ\n");
diff --git a/drivers/video/bfin-lq035q1-fb.c b/drivers/video/bfin-lq035q1-fb.c
index 23b6c4b62c7..c633068372c 100644
--- a/drivers/video/bfin-lq035q1-fb.c
+++ b/drivers/video/bfin-lq035q1-fb.c
@@ -695,7 +695,7 @@ static int __devinit bfin_lq035q1_probe(struct platform_device *pdev)
goto out7;
}
- ret = request_irq(info->irq, bfin_lq035q1_irq_error, IRQF_DISABLED,
+ ret = request_irq(info->irq, bfin_lq035q1_irq_error, 0,
DRIVER_NAME" PPI ERROR", info);
if (ret < 0) {
dev_err(&pdev->dev, "unable to request PPI ERROR IRQ\n");
diff --git a/drivers/video/bfin-t350mcqb-fb.c b/drivers/video/bfin-t350mcqb-fb.c
index d8de29f0dd8..d5e12675961 100644
--- a/drivers/video/bfin-t350mcqb-fb.c
+++ b/drivers/video/bfin-t350mcqb-fb.c
@@ -529,7 +529,7 @@ static int __devinit bfin_t350mcqb_probe(struct platform_device *pdev)
goto out7;
}
- ret = request_irq(info->irq, bfin_t350mcqb_irq_error, IRQF_DISABLED,
+ ret = request_irq(info->irq, bfin_t350mcqb_irq_error, 0,
"PPI ERROR", info);
if (ret < 0) {
printk(KERN_ERR DRIVER_NAME
diff --git a/drivers/video/bfin_adv7393fb.c b/drivers/video/bfin_adv7393fb.c
index 8486f541156..811dd7f6aa4 100644
--- a/drivers/video/bfin_adv7393fb.c
+++ b/drivers/video/bfin_adv7393fb.c
@@ -481,7 +481,7 @@ static int __devinit bfin_adv7393_fb_probe(struct i2c_client *client,
goto out_4;
}
- if (request_irq(IRQ_PPI_ERROR, ppi_irq_error, IRQF_DISABLED,
+ if (request_irq(IRQ_PPI_ERROR, ppi_irq_error, 0,
"PPI ERROR", fbdev) < 0) {
dev_err(&client->dev, "unable to request PPI ERROR IRQ\n");
ret = -EFAULT;
diff --git a/drivers/video/carminefb.c b/drivers/video/carminefb.c
index caaa27d4a46..2c76fdf23f2 100644
--- a/drivers/video/carminefb.c
+++ b/drivers/video/carminefb.c
@@ -12,6 +12,7 @@
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include "carminefb.h"
#include "carminefb_regs.h"
@@ -32,11 +33,11 @@
#define CARMINEFB_DEFAULT_VIDEO_MODE 1
static unsigned int fb_mode = CARMINEFB_DEFAULT_VIDEO_MODE;
-module_param(fb_mode, uint, 444);
+module_param(fb_mode, uint, 0444);
MODULE_PARM_DESC(fb_mode, "Initial video mode as integer.");
static char *fb_mode_str;
-module_param(fb_mode_str, charp, 444);
+module_param(fb_mode_str, charp, 0444);
MODULE_PARM_DESC(fb_mode_str, "Initial video mode in characters.");
/*
@@ -46,7 +47,7 @@ MODULE_PARM_DESC(fb_mode_str, "Initial video mode in characters.");
* 0b010 Display 1
*/
static int fb_displays = CARMINE_USE_DISPLAY0 | CARMINE_USE_DISPLAY1;
-module_param(fb_displays, int, 444);
+module_param(fb_displays, int, 0444);
MODULE_PARM_DESC(fb_displays, "Bit mode, which displays are used");
struct carmine_hw {
diff --git a/drivers/video/cobalt_lcdfb.c b/drivers/video/cobalt_lcdfb.c
index e02764319ff..f56699d8122 100644
--- a/drivers/video/cobalt_lcdfb.c
+++ b/drivers/video/cobalt_lcdfb.c
@@ -24,6 +24,7 @@
#include <linux/ioport.h>
#include <linux/uaccess.h>
#include <linux/platform_device.h>
+#include <linux/module.h>
/*
* Cursor position address
diff --git a/drivers/video/controlfb.c b/drivers/video/controlfb.c
index 9075bea5587..7b2c40abae1 100644
--- a/drivers/video/controlfb.c
+++ b/drivers/video/controlfb.c
@@ -550,7 +550,7 @@ static void control_set_hardware(struct fb_info_control *p, struct fb_par_contro
/*
- * Parse user speficied options (`video=controlfb:')
+ * Parse user specified options (`video=controlfb:')
*/
static void __init control_setup(char *options)
{
diff --git a/drivers/video/da8xx-fb.c b/drivers/video/da8xx-fb.c
index fcdac872522..55f91d9ab00 100644
--- a/drivers/video/da8xx-fb.c
+++ b/drivers/video/da8xx-fb.c
@@ -35,6 +35,9 @@
#define DRIVER_NAME "da8xx_lcdc"
+#define LCD_VERSION_1 1
+#define LCD_VERSION_2 2
+
/* LCD Status Register */
#define LCD_END_OF_FRAME1 BIT(9)
#define LCD_END_OF_FRAME0 BIT(8)
@@ -49,7 +52,9 @@
#define LCD_DMA_BURST_4 0x2
#define LCD_DMA_BURST_8 0x3
#define LCD_DMA_BURST_16 0x4
-#define LCD_END_OF_FRAME_INT_ENA BIT(2)
+#define LCD_V1_END_OF_FRAME_INT_ENA BIT(2)
+#define LCD_V2_END_OF_FRAME0_INT_ENA BIT(8)
+#define LCD_V2_END_OF_FRAME1_INT_ENA BIT(9)
#define LCD_DUAL_FRAME_BUFFER_ENABLE BIT(0)
/* LCD Control Register */
@@ -65,12 +70,18 @@
#define LCD_MONO_8BIT_MODE BIT(9)
#define LCD_RASTER_ORDER BIT(8)
#define LCD_TFT_MODE BIT(7)
-#define LCD_UNDERFLOW_INT_ENA BIT(6)
-#define LCD_PL_ENABLE BIT(4)
+#define LCD_V1_UNDERFLOW_INT_ENA BIT(6)
+#define LCD_V2_UNDERFLOW_INT_ENA BIT(5)
+#define LCD_V1_PL_INT_ENA BIT(4)
+#define LCD_V2_PL_INT_ENA BIT(6)
#define LCD_MONOCHROME_MODE BIT(1)
#define LCD_RASTER_ENABLE BIT(0)
#define LCD_TFT_ALT_ENABLE BIT(23)
#define LCD_STN_565_ENABLE BIT(24)
+#define LCD_V2_DMA_CLK_EN BIT(2)
+#define LCD_V2_LIDD_CLK_EN BIT(1)
+#define LCD_V2_CORE_CLK_EN BIT(0)
+#define LCD_V2_LPP_B10 26
/* LCD Raster Timing 2 Register */
#define LCD_AC_BIAS_TRANSITIONS_PER_INT(x) ((x) << 16)
@@ -82,6 +93,7 @@
#define LCD_INVERT_FRAME_CLOCK BIT(20)
/* LCD Block */
+#define LCD_PID_REG 0x0
#define LCD_CTRL_REG 0x4
#define LCD_STAT_REG 0x8
#define LCD_RASTER_CTRL_REG 0x28
@@ -94,6 +106,17 @@
#define LCD_DMA_FRM_BUF_BASE_ADDR_1_REG 0x4C
#define LCD_DMA_FRM_BUF_CEILING_ADDR_1_REG 0x50
+/* Interrupt Registers available only in Version 2 */
+#define LCD_RAW_STAT_REG 0x58
+#define LCD_MASKED_STAT_REG 0x5c
+#define LCD_INT_ENABLE_SET_REG 0x60
+#define LCD_INT_ENABLE_CLR_REG 0x64
+#define LCD_END_OF_INT_IND_REG 0x68
+
+/* Clock registers available only on Version 2 */
+#define LCD_CLK_ENABLE_REG 0x6c
+#define LCD_CLK_RESET_REG 0x70
+
#define LCD_NUM_BUFFERS 2
#define WSI_TIMEOUT 50
@@ -105,6 +128,8 @@
static resource_size_t da8xx_fb_reg_base;
static struct resource *lcdc_regs;
+static unsigned int lcd_revision;
+static irq_handler_t lcdc_irq_handler;
static inline unsigned int lcdc_read(unsigned int addr)
{
@@ -240,6 +265,7 @@ static void lcd_blit(int load_mode, struct da8xx_fb_par *par)
u32 end;
u32 reg_ras;
u32 reg_dma;
+ u32 reg_int;
/* init reg to clear PLM (loading mode) fields */
reg_ras = lcdc_read(LCD_RASTER_CTRL_REG);
@@ -252,7 +278,14 @@ static void lcd_blit(int load_mode, struct da8xx_fb_par *par)
end = par->dma_end;
reg_ras |= LCD_PALETTE_LOAD_MODE(DATA_ONLY);
- reg_dma |= LCD_END_OF_FRAME_INT_ENA;
+ if (lcd_revision == LCD_VERSION_1) {
+ reg_dma |= LCD_V1_END_OF_FRAME_INT_ENA;
+ } else {
+ reg_int = lcdc_read(LCD_INT_ENABLE_SET_REG) |
+ LCD_V2_END_OF_FRAME0_INT_ENA |
+ LCD_V2_END_OF_FRAME1_INT_ENA;
+ lcdc_write(reg_int, LCD_INT_ENABLE_SET_REG);
+ }
reg_dma |= LCD_DUAL_FRAME_BUFFER_ENABLE;
lcdc_write(start, LCD_DMA_FRM_BUF_BASE_ADDR_0_REG);
@@ -264,7 +297,14 @@ static void lcd_blit(int load_mode, struct da8xx_fb_par *par)
end = start + par->palette_sz - 1;
reg_ras |= LCD_PALETTE_LOAD_MODE(PALETTE_ONLY);
- reg_ras |= LCD_PL_ENABLE;
+
+ if (lcd_revision == LCD_VERSION_1) {
+ reg_ras |= LCD_V1_PL_INT_ENA;
+ } else {
+ reg_int = lcdc_read(LCD_INT_ENABLE_SET_REG) |
+ LCD_V2_PL_INT_ENA;
+ lcdc_write(reg_int, LCD_INT_ENABLE_SET_REG);
+ }
lcdc_write(start, LCD_DMA_FRM_BUF_BASE_ADDR_0_REG);
lcdc_write(end, LCD_DMA_FRM_BUF_CEILING_ADDR_0_REG);
@@ -348,6 +388,7 @@ static void lcd_cfg_vertical_sync(int back_porch, int pulse_width,
static int lcd_cfg_display(const struct lcd_ctrl_config *cfg)
{
u32 reg;
+ u32 reg_int;
reg = lcdc_read(LCD_RASTER_CTRL_REG) & ~(LCD_TFT_MODE |
LCD_MONO_8BIT_MODE |
@@ -375,7 +416,13 @@ static int lcd_cfg_display(const struct lcd_ctrl_config *cfg)
}
/* enable additional interrupts here */
- reg |= LCD_UNDERFLOW_INT_ENA;
+ if (lcd_revision == LCD_VERSION_1) {
+ reg |= LCD_V1_UNDERFLOW_INT_ENA;
+ } else {
+ reg_int = lcdc_read(LCD_INT_ENABLE_SET_REG) |
+ LCD_V2_UNDERFLOW_INT_ENA;
+ lcdc_write(reg_int, LCD_INT_ENABLE_SET_REG);
+ }
lcdc_write(reg, LCD_RASTER_CTRL_REG);
@@ -413,18 +460,43 @@ static int lcd_cfg_frame_buffer(struct da8xx_fb_par *par, u32 width, u32 height,
/* Set the Panel Width */
/* Pixels per line = (PPL + 1)*16 */
- /*0x3F in bits 4..9 gives max horisontal resolution = 1024 pixels*/
- width &= 0x3f0;
+ if (lcd_revision == LCD_VERSION_1) {
+ /*
+ * 0x3F in bits 4..9 gives max horizontal resolution = 1024
+ * pixels.
+ */
+ width &= 0x3f0;
+ } else {
+ /*
+ * 0x7F in bits 4..10 gives max horizontal resolution = 2048
+ * pixels.
+ */
+ width &= 0x7f0;
+ }
+
reg = lcdc_read(LCD_RASTER_TIMING_0_REG);
reg &= 0xfffffc00;
- reg |= ((width >> 4) - 1) << 4;
+ if (lcd_revision == LCD_VERSION_1) {
+ reg |= ((width >> 4) - 1) << 4;
+ } else {
+ width = (width >> 4) - 1;
+ reg |= ((width & 0x3f) << 4) | ((width & 0x40) >> 3);
+ }
lcdc_write(reg, LCD_RASTER_TIMING_0_REG);
/* Set the Panel Height */
+ /* Set bits 9:0 of Lines Per Pixel */
reg = lcdc_read(LCD_RASTER_TIMING_1_REG);
reg = ((height - 1) & 0x3ff) | (reg & 0xfffffc00);
lcdc_write(reg, LCD_RASTER_TIMING_1_REG);
+ /* Set bit 10 of Lines Per Pixel */
+ if (lcd_revision == LCD_VERSION_2) {
+ reg = lcdc_read(LCD_RASTER_TIMING_2_REG);
+ reg |= ((height - 1) & 0x400) << 16;
+ lcdc_write(reg, LCD_RASTER_TIMING_2_REG);
+ }
+
/* Set the Raster Order of the Frame Buffer */
reg = lcdc_read(LCD_RASTER_CTRL_REG) & ~(1 << 8);
if (raster_order)
@@ -511,6 +583,9 @@ static void lcd_reset(struct da8xx_fb_par *par)
/* DMA has to be disabled */
lcdc_write(0, LCD_DMA_CTRL_REG);
lcdc_write(0, LCD_RASTER_CTRL_REG);
+
+ if (lcd_revision == LCD_VERSION_2)
+ lcdc_write(0, LCD_INT_ENABLE_SET_REG);
}
static void lcd_calc_clk_divider(struct da8xx_fb_par *par)
@@ -523,6 +598,11 @@ static void lcd_calc_clk_divider(struct da8xx_fb_par *par)
/* Configure the LCD clock divisor. */
lcdc_write(LCD_CLK_DIVISOR(div) |
(LCD_RASTER_MODE & 0x1), LCD_CTRL_REG);
+
+ if (lcd_revision == LCD_VERSION_2)
+ lcdc_write(LCD_V2_DMA_CLK_EN | LCD_V2_LIDD_CLK_EN |
+ LCD_V2_CORE_CLK_EN, LCD_CLK_ENABLE_REG);
+
}
static int lcd_init(struct da8xx_fb_par *par, const struct lcd_ctrl_config *cfg,
@@ -583,7 +663,63 @@ static int lcd_init(struct da8xx_fb_par *par, const struct lcd_ctrl_config *cfg,
return 0;
}
-static irqreturn_t lcdc_irq_handler(int irq, void *arg)
+/* IRQ handler for version 2 of LCDC */
+static irqreturn_t lcdc_irq_handler_rev02(int irq, void *arg)
+{
+ struct da8xx_fb_par *par = arg;
+ u32 stat = lcdc_read(LCD_MASKED_STAT_REG);
+ u32 reg_int;
+
+ if ((stat & LCD_SYNC_LOST) && (stat & LCD_FIFO_UNDERFLOW)) {
+ lcd_disable_raster();
+ lcdc_write(stat, LCD_MASKED_STAT_REG);
+ lcd_enable_raster();
+ } else if (stat & LCD_PL_LOAD_DONE) {
+ /*
+ * Must disable raster before changing state of any control bit.
+ * And also must be disabled before clearing the PL loading
+ * interrupt via the following write to the status register. If
+ * this is done after then one gets multiple PL done interrupts.
+ */
+ lcd_disable_raster();
+
+ lcdc_write(stat, LCD_MASKED_STAT_REG);
+
+ /* Disable PL completion inerrupt */
+ reg_int = lcdc_read(LCD_INT_ENABLE_CLR_REG) |
+ (LCD_V2_PL_INT_ENA);
+ lcdc_write(reg_int, LCD_INT_ENABLE_CLR_REG);
+
+ /* Setup and start data loading mode */
+ lcd_blit(LOAD_DATA, par);
+ } else {
+ lcdc_write(stat, LCD_MASKED_STAT_REG);
+
+ if (stat & LCD_END_OF_FRAME0) {
+ lcdc_write(par->dma_start,
+ LCD_DMA_FRM_BUF_BASE_ADDR_0_REG);
+ lcdc_write(par->dma_end,
+ LCD_DMA_FRM_BUF_CEILING_ADDR_0_REG);
+ par->vsync_flag = 1;
+ wake_up_interruptible(&par->vsync_wait);
+ }
+
+ if (stat & LCD_END_OF_FRAME1) {
+ lcdc_write(par->dma_start,
+ LCD_DMA_FRM_BUF_BASE_ADDR_1_REG);
+ lcdc_write(par->dma_end,
+ LCD_DMA_FRM_BUF_CEILING_ADDR_1_REG);
+ par->vsync_flag = 1;
+ wake_up_interruptible(&par->vsync_wait);
+ }
+ }
+
+ lcdc_write(0, LCD_END_OF_INT_IND_REG);
+ return IRQ_HANDLED;
+}
+
+/* IRQ handler for version 1 LCDC */
+static irqreturn_t lcdc_irq_handler_rev01(int irq, void *arg)
{
struct da8xx_fb_par *par = arg;
u32 stat = lcdc_read(LCD_STAT_REG);
@@ -606,7 +742,7 @@ static irqreturn_t lcdc_irq_handler(int irq, void *arg)
/* Disable PL completion inerrupt */
reg_ras = lcdc_read(LCD_RASTER_CTRL_REG);
- reg_ras &= ~LCD_PL_ENABLE;
+ reg_ras &= ~LCD_V1_PL_INT_ENA;
lcdc_write(reg_ras, LCD_RASTER_CTRL_REG);
/* Setup and start data loading mode */
@@ -877,8 +1013,8 @@ static int da8xx_pan_display(struct fb_var_screeninfo *var,
start = fix->smem_start +
new_var.yoffset * fix->line_length +
- new_var.xoffset * var->bits_per_pixel / 8;
- end = start + var->yres * fix->line_length - 1;
+ new_var.xoffset * fbi->var.bits_per_pixel / 8;
+ end = start + fbi->var.yres * fix->line_length - 1;
par->dma_start = start;
par->dma_end = end;
}
@@ -945,6 +1081,22 @@ static int __devinit fb_probe(struct platform_device *device)
if (ret)
goto err_clk_put;
+ /* Determine LCD IP Version */
+ switch (lcdc_read(LCD_PID_REG)) {
+ case 0x4C100102:
+ lcd_revision = LCD_VERSION_1;
+ break;
+ case 0x4F200800:
+ lcd_revision = LCD_VERSION_2;
+ break;
+ default:
+ dev_warn(&device->dev, "Unknown PID Reg value 0x%x, "
+ "defaulting to LCD revision 1\n",
+ lcdc_read(LCD_PID_REG));
+ lcd_revision = LCD_VERSION_1;
+ break;
+ }
+
for (i = 0, lcdc_info = known_lcd_panels;
i < ARRAY_SIZE(known_lcd_panels);
i++, lcdc_info++) {
@@ -1085,7 +1237,13 @@ static int __devinit fb_probe(struct platform_device *device)
}
#endif
- ret = request_irq(par->irq, lcdc_irq_handler, 0, DRIVER_NAME, par);
+ if (lcd_revision == LCD_VERSION_1)
+ lcdc_irq_handler = lcdc_irq_handler_rev01;
+ else
+ lcdc_irq_handler = lcdc_irq_handler_rev02;
+
+ ret = request_irq(par->irq, lcdc_irq_handler, 0,
+ DRIVER_NAME, par);
if (ret)
goto irq_freq;
return 0;
diff --git a/drivers/video/ep93xx-fb.c b/drivers/video/ep93xx-fb.c
index 40e5f17d1e4..2e830ec52a5 100644
--- a/drivers/video/ep93xx-fb.c
+++ b/drivers/video/ep93xx-fb.c
@@ -18,6 +18,7 @@
*/
#include <linux/platform_device.h>
+#include <linux/module.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/clk.h>
diff --git a/drivers/video/fb-puv3.c b/drivers/video/fb-puv3.c
index 27f2c57e06e..60a787fa32c 100644
--- a/drivers/video/fb-puv3.c
+++ b/drivers/video/fb-puv3.c
@@ -624,8 +624,8 @@ static int unifb_pan_display(struct fb_var_screeninfo *var,
|| var->xoffset)
return -EINVAL;
} else {
- if (var->xoffset + var->xres > info->var.xres_virtual ||
- var->yoffset + var->yres > info->var.yres_virtual)
+ if (var->xoffset + info->var.xres > info->var.xres_virtual ||
+ var->yoffset + info->var.yres > info->var.yres_virtual)
return -EINVAL;
}
info->var.xoffset = var->xoffset;
diff --git a/drivers/video/fb_ddc.c b/drivers/video/fb_ddc.c
index 4a874c8d039..2b106f046fd 100644
--- a/drivers/video/fb_ddc.c
+++ b/drivers/video/fb_ddc.c
@@ -1,5 +1,5 @@
/*
- * driver/vide/fb_ddc.c - DDC/EDID read support.
+ * drivers/video/fb_ddc.c - DDC/EDID read support.
*
* Copyright (C) 2006 Dennis Munsie <dmunsie@cecropia.com>
*
@@ -10,6 +10,7 @@
#include <linux/delay.h>
#include <linux/device.h>
+#include <linux/module.h>
#include <linux/fb.h>
#include <linux/i2c-algo-bit.h>
#include <linux/slab.h>
diff --git a/drivers/video/fb_defio.c b/drivers/video/fb_defio.c
index 32814e8800e..c27e153d888 100644
--- a/drivers/video/fb_defio.c
+++ b/drivers/video/fb_defio.c
@@ -223,8 +223,7 @@ void fb_deferred_io_cleanup(struct fb_info *info)
int i;
BUG_ON(!fbdefio);
- cancel_delayed_work(&info->deferred_work);
- flush_scheduled_work();
+ cancel_delayed_work_sync(&info->deferred_work);
/* clear out the mapping that we setup */
for (i = 0 ; i < info->fix.smem_len; i += PAGE_SIZE) {
diff --git a/drivers/video/fb_notify.c b/drivers/video/fb_notify.c
index 8c020389e4f..74c2da52888 100644
--- a/drivers/video/fb_notify.c
+++ b/drivers/video/fb_notify.c
@@ -12,6 +12,7 @@
*/
#include <linux/fb.h>
#include <linux/notifier.h>
+#include <linux/export.h>
static BLOCKING_NOTIFIER_HEAD(fb_notifier_list);
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index 5aac00eb183..ad936295d8f 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -1738,8 +1738,6 @@ void fb_set_suspend(struct fb_info *info, int state)
{
struct fb_event event;
- if (!lock_fb_info(info))
- return;
event.info = info;
if (state) {
fb_notifier_call_chain(FB_EVENT_SUSPEND, &event);
@@ -1748,7 +1746,6 @@ void fb_set_suspend(struct fb_info *info, int state)
info->state = FBINFO_STATE_RUNNING;
fb_notifier_call_chain(FB_EVENT_RESUME, &event);
}
- unlock_fb_info(info);
}
/**
diff --git a/drivers/video/fbmon.c b/drivers/video/fbmon.c
index 4f57485f8c5..cef65574db6 100644
--- a/drivers/video/fbmon.c
+++ b/drivers/video/fbmon.c
@@ -493,7 +493,8 @@ static int get_est_timing(unsigned char *block, struct fb_videomode *mode)
return num;
}
-static int get_std_timing(unsigned char *block, struct fb_videomode *mode)
+static int get_std_timing(unsigned char *block, struct fb_videomode *mode,
+ int ver, int rev)
{
int xres, yres = 0, refresh, ratio, i;
@@ -504,7 +505,11 @@ static int get_std_timing(unsigned char *block, struct fb_videomode *mode)
ratio = (block[1] & 0xc0) >> 6;
switch (ratio) {
case 0:
- yres = xres;
+ /* in EDID 1.3 the meaning of 0 changed to 16:10 (prior 1:1) */
+ if (ver < 1 || (ver == 1 && rev < 3))
+ yres = xres;
+ else
+ yres = (xres * 10)/16;
break;
case 1:
yres = (xres * 3)/4;
@@ -533,12 +538,12 @@ static int get_std_timing(unsigned char *block, struct fb_videomode *mode)
}
static int get_dst_timing(unsigned char *block,
- struct fb_videomode *mode)
+ struct fb_videomode *mode, int ver, int rev)
{
int j, num = 0;
for (j = 0; j < 6; j++, block += STD_TIMING_DESCRIPTION_SIZE)
- num += get_std_timing(block, &mode[num]);
+ num += get_std_timing(block, &mode[num], ver, rev);
return num;
}
@@ -599,6 +604,10 @@ static struct fb_videomode *fb_create_modedb(unsigned char *edid, int *dbsize)
struct fb_videomode *mode, *m;
unsigned char *block;
int num = 0, i, first = 1;
+ int ver, rev;
+
+ ver = edid[EDID_STRUCT_VERSION];
+ rev = edid[EDID_STRUCT_REVISION];
mode = kzalloc(50 * sizeof(struct fb_videomode), GFP_KERNEL);
if (mode == NULL)
@@ -632,12 +641,12 @@ static struct fb_videomode *fb_create_modedb(unsigned char *edid, int *dbsize)
DPRINTK(" Standard Timings\n");
block = edid + STD_TIMING_DESCRIPTIONS_START;
for (i = 0; i < STD_TIMING; i++, block += STD_TIMING_DESCRIPTION_SIZE)
- num += get_std_timing(block, &mode[num]);
+ num += get_std_timing(block, &mode[num], ver, rev);
block = edid + DETAILED_TIMING_DESCRIPTIONS_START;
for (i = 0; i < 4; i++, block+= DETAILED_TIMING_DESCRIPTION_SIZE) {
if (block[0] == 0x00 && block[1] == 0x00 && block[3] == 0xfa)
- num += get_dst_timing(block + 5, &mode[num]);
+ num += get_dst_timing(block + 5, &mode[num], ver, rev);
}
/* Yikes, EDID data is totally useless */
diff --git a/drivers/video/fbsysfs.c b/drivers/video/fbsysfs.c
index 04251ce8918..67afa9c2289 100644
--- a/drivers/video/fbsysfs.c
+++ b/drivers/video/fbsysfs.c
@@ -399,9 +399,12 @@ static ssize_t store_fbstate(struct device *device,
state = simple_strtoul(buf, &last, 0);
+ if (!lock_fb_info(fb_info))
+ return -ENODEV;
console_lock();
fb_set_suspend(fb_info, (int)state);
console_unlock();
+ unlock_fb_info(fb_info);
return count;
}
diff --git a/drivers/video/fsl-diu-fb.c b/drivers/video/fsl-diu-fb.c
index 0acc7d65aea..a16beeb5f54 100644
--- a/drivers/video/fsl-diu-fb.c
+++ b/drivers/video/fsl-diu-fb.c
@@ -30,37 +30,40 @@
#include <linux/clk.h>
#include <linux/uaccess.h>
#include <linux/vmalloc.h>
-
-#include <linux/of_platform.h>
+#include <linux/spinlock.h>
#include <sysdev/fsl_soc.h>
#include <linux/fsl-diu-fb.h>
#include "edid.h"
-/*
- * These parameters give default parameters
- * for video output 1024x768,
- * FIXME - change timing to proper amounts
- * hsync 31.5kHz, vsync 60Hz
- */
-static struct fb_videomode __devinitdata fsl_diu_default_mode = {
- .refresh = 60,
- .xres = 1024,
- .yres = 768,
- .pixclock = 15385,
- .left_margin = 160,
- .right_margin = 24,
- .upper_margin = 29,
- .lower_margin = 3,
- .hsync_len = 136,
- .vsync_len = 6,
- .sync = FB_SYNC_COMP_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
- .vmode = FB_VMODE_NONINTERLACED
+#define FSL_AOI_NUM 6 /* 5 AOIs and one dummy AOI */
+ /* 1 for plane 0, 2 for plane 1&2 each */
+
+/* HW cursor parameters */
+#define MAX_CURS 32
+
+/* INT_STATUS/INT_MASK field descriptions */
+#define INT_VSYNC 0x01 /* Vsync interrupt */
+#define INT_VSYNC_WB 0x02 /* Vsync interrupt for write back operation */
+#define INT_UNDRUN 0x04 /* Under run exception interrupt */
+#define INT_PARERR 0x08 /* Display parameters error interrupt */
+#define INT_LS_BF_VS 0x10 /* Lines before vsync. interrupt */
+
+struct diu_addr {
+ void *vaddr; /* Virtual address */
+ dma_addr_t paddr; /* Physical address */
+ __u32 offset;
};
+/*
+ * List of supported video modes
+ *
+ * The first entry is the default video mode. The remain entries are in
+ * order if increasing resolution and frequency. The 320x240-60 mode is
+ * the initial AOI for the second and third planes.
+ */
static struct fb_videomode __devinitdata fsl_diu_mode_db[] = {
{
- .name = "1024x768-60",
.refresh = 60,
.xres = 1024,
.yres = 768,
@@ -75,7 +78,132 @@ static struct fb_videomode __devinitdata fsl_diu_mode_db[] = {
.vmode = FB_VMODE_NONINTERLACED
},
{
- .name = "1024x768-70",
+ .refresh = 60,
+ .xres = 320,
+ .yres = 240,
+ .pixclock = 79440,
+ .left_margin = 16,
+ .right_margin = 16,
+ .upper_margin = 16,
+ .lower_margin = 5,
+ .hsync_len = 48,
+ .vsync_len = 1,
+ .sync = FB_SYNC_COMP_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .vmode = FB_VMODE_NONINTERLACED
+ },
+ {
+ .refresh = 60,
+ .xres = 640,
+ .yres = 480,
+ .pixclock = 39722,
+ .left_margin = 48,
+ .right_margin = 16,
+ .upper_margin = 33,
+ .lower_margin = 10,
+ .hsync_len = 96,
+ .vsync_len = 2,
+ .sync = FB_SYNC_COMP_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .vmode = FB_VMODE_NONINTERLACED
+ },
+ {
+ .refresh = 72,
+ .xres = 640,
+ .yres = 480,
+ .pixclock = 32052,
+ .left_margin = 128,
+ .right_margin = 24,
+ .upper_margin = 28,
+ .lower_margin = 9,
+ .hsync_len = 40,
+ .vsync_len = 3,
+ .sync = FB_SYNC_COMP_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .vmode = FB_VMODE_NONINTERLACED
+ },
+ {
+ .refresh = 75,
+ .xres = 640,
+ .yres = 480,
+ .pixclock = 31747,
+ .left_margin = 120,
+ .right_margin = 16,
+ .upper_margin = 16,
+ .lower_margin = 1,
+ .hsync_len = 64,
+ .vsync_len = 3,
+ .sync = FB_SYNC_COMP_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .vmode = FB_VMODE_NONINTERLACED
+ },
+ {
+ .refresh = 90,
+ .xres = 640,
+ .yres = 480,
+ .pixclock = 25057,
+ .left_margin = 120,
+ .right_margin = 32,
+ .upper_margin = 14,
+ .lower_margin = 25,
+ .hsync_len = 40,
+ .vsync_len = 14,
+ .sync = FB_SYNC_COMP_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .vmode = FB_VMODE_NONINTERLACED
+ },
+ {
+ .refresh = 100,
+ .xres = 640,
+ .yres = 480,
+ .pixclock = 22272,
+ .left_margin = 48,
+ .right_margin = 32,
+ .upper_margin = 17,
+ .lower_margin = 22,
+ .hsync_len = 128,
+ .vsync_len = 12,
+ .sync = FB_SYNC_COMP_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .vmode = FB_VMODE_NONINTERLACED
+ },
+ {
+ .refresh = 60,
+ .xres = 800,
+ .yres = 480,
+ .pixclock = 33805,
+ .left_margin = 96,
+ .right_margin = 24,
+ .upper_margin = 10,
+ .lower_margin = 3,
+ .hsync_len = 72,
+ .vsync_len = 7,
+ .sync = FB_SYNC_COMP_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .vmode = FB_VMODE_NONINTERLACED
+ },
+ {
+ .refresh = 60,
+ .xres = 800,
+ .yres = 600,
+ .pixclock = 25000,
+ .left_margin = 88,
+ .right_margin = 40,
+ .upper_margin = 23,
+ .lower_margin = 1,
+ .hsync_len = 128,
+ .vsync_len = 4,
+ .sync = FB_SYNC_COMP_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .vmode = FB_VMODE_NONINTERLACED
+ },
+ {
+ .refresh = 60,
+ .xres = 854,
+ .yres = 480,
+ .pixclock = 31518,
+ .left_margin = 104,
+ .right_margin = 16,
+ .upper_margin = 13,
+ .lower_margin = 1,
+ .hsync_len = 88,
+ .vsync_len = 3,
+ .sync = FB_SYNC_COMP_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .vmode = FB_VMODE_NONINTERLACED
+ },
+ {
.refresh = 70,
.xres = 1024,
.yres = 768,
@@ -90,7 +218,6 @@ static struct fb_videomode __devinitdata fsl_diu_mode_db[] = {
.vmode = FB_VMODE_NONINTERLACED
},
{
- .name = "1024x768-75",
.refresh = 75,
.xres = 1024,
.yres = 768,
@@ -105,7 +232,34 @@ static struct fb_videomode __devinitdata fsl_diu_mode_db[] = {
.vmode = FB_VMODE_NONINTERLACED
},
{
- .name = "1280x1024-60",
+ .refresh = 60,
+ .xres = 1280,
+ .yres = 480,
+ .pixclock = 18939,
+ .left_margin = 353,
+ .right_margin = 47,
+ .upper_margin = 39,
+ .lower_margin = 4,
+ .hsync_len = 8,
+ .vsync_len = 2,
+ .sync = FB_SYNC_COMP_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .vmode = FB_VMODE_NONINTERLACED
+ },
+ {
+ .refresh = 60,
+ .xres = 1280,
+ .yres = 720,
+ .pixclock = 13426,
+ .left_margin = 192,
+ .right_margin = 64,
+ .upper_margin = 22,
+ .lower_margin = 1,
+ .hsync_len = 136,
+ .vsync_len = 3,
+ .sync = FB_SYNC_COMP_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .vmode = FB_VMODE_NONINTERLACED
+ },
+ {
.refresh = 60,
.xres = 1280,
.yres = 1024,
@@ -120,7 +274,6 @@ static struct fb_videomode __devinitdata fsl_diu_mode_db[] = {
.vmode = FB_VMODE_NONINTERLACED
},
{
- .name = "1280x1024-70",
.refresh = 70,
.xres = 1280,
.yres = 1024,
@@ -135,7 +288,6 @@ static struct fb_videomode __devinitdata fsl_diu_mode_db[] = {
.vmode = FB_VMODE_NONINTERLACED
},
{
- .name = "1280x1024-75",
.refresh = 75,
.xres = 1280,
.yres = 1024,
@@ -150,40 +302,25 @@ static struct fb_videomode __devinitdata fsl_diu_mode_db[] = {
.vmode = FB_VMODE_NONINTERLACED
},
{
- .name = "320x240", /* for AOI only */
.refresh = 60,
- .xres = 320,
- .yres = 240,
- .pixclock = 15385,
- .left_margin = 0,
- .right_margin = 0,
- .upper_margin = 0,
- .lower_margin = 0,
- .hsync_len = 0,
- .vsync_len = 0,
- .sync = FB_SYNC_COMP_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
- .vmode = FB_VMODE_NONINTERLACED
- },
- {
- .name = "1280x480-60",
- .refresh = 60,
- .xres = 1280,
- .yres = 480,
- .pixclock = 18939,
- .left_margin = 353,
- .right_margin = 47,
- .upper_margin = 39,
- .lower_margin = 4,
- .hsync_len = 8,
- .vsync_len = 2,
+ .xres = 1920,
+ .yres = 1080,
+ .pixclock = 5787,
+ .left_margin = 328,
+ .right_margin = 120,
+ .upper_margin = 34,
+ .lower_margin = 1,
+ .hsync_len = 208,
+ .vsync_len = 3,
.sync = FB_SYNC_COMP_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
.vmode = FB_VMODE_NONINTERLACED
},
};
-static char *fb_mode = "1024x768-32@60";
+static char *fb_mode;
static unsigned long default_bpp = 32;
-static int monitor_port;
+static enum fsl_diu_monitor_port monitor_port;
+static char *monitor_string;
#if defined(CONFIG_NOT_COHERENT_CACHE)
static u8 *coherence_data;
@@ -201,15 +338,27 @@ struct fsl_diu_data {
void *dummy_aoi_virt;
unsigned int irq;
int fb_enabled;
- int monitor_port;
+ enum fsl_diu_monitor_port monitor_port;
+ struct diu __iomem *diu_reg;
+ spinlock_t reg_lock;
+ struct diu_addr ad;
+ struct diu_addr gamma;
+ struct diu_addr pallete;
+ struct diu_addr cursor;
+};
+
+enum mfb_index {
+ PLANE0 = 0, /* Plane 0, only one AOI that fills the screen */
+ PLANE1_AOI0, /* Plane 1, first AOI */
+ PLANE1_AOI1, /* Plane 1, second AOI */
+ PLANE2_AOI0, /* Plane 2, first AOI */
+ PLANE2_AOI1, /* Plane 2, second AOI */
};
struct mfb_info {
- int index;
- int type;
+ enum mfb_index index;
char *id;
int registered;
- int blank;
unsigned long pseudo_palette[16];
struct diu_ad *ad;
int cursor_reset;
@@ -223,63 +372,82 @@ struct mfb_info {
static struct mfb_info mfb_template[] = {
- { /* AOI 0 for plane 0 */
- .index = 0,
- .type = MFB_TYPE_OUTPUT,
- .id = "Panel0",
- .registered = 0,
- .count = 0,
- .x_aoi_d = 0,
- .y_aoi_d = 0,
+ {
+ .index = PLANE0,
+ .id = "Panel0",
+ .registered = 0,
+ .count = 0,
+ .x_aoi_d = 0,
+ .y_aoi_d = 0,
},
- { /* AOI 0 for plane 1 */
- .index = 1,
- .type = MFB_TYPE_OUTPUT,
- .id = "Panel1 AOI0",
- .registered = 0,
- .g_alpha = 0xff,
- .count = 0,
- .x_aoi_d = 0,
- .y_aoi_d = 0,
+ {
+ .index = PLANE1_AOI0,
+ .id = "Panel1 AOI0",
+ .registered = 0,
+ .g_alpha = 0xff,
+ .count = 0,
+ .x_aoi_d = 0,
+ .y_aoi_d = 0,
},
- { /* AOI 1 for plane 1 */
- .index = 2,
- .type = MFB_TYPE_OUTPUT,
- .id = "Panel1 AOI1",
- .registered = 0,
- .g_alpha = 0xff,
- .count = 0,
- .x_aoi_d = 0,
- .y_aoi_d = 480,
+ {
+ .index = PLANE1_AOI1,
+ .id = "Panel1 AOI1",
+ .registered = 0,
+ .g_alpha = 0xff,
+ .count = 0,
+ .x_aoi_d = 0,
+ .y_aoi_d = 480,
},
- { /* AOI 0 for plane 2 */
- .index = 3,
- .type = MFB_TYPE_OUTPUT,
- .id = "Panel2 AOI0",
- .registered = 0,
- .g_alpha = 0xff,
- .count = 0,
- .x_aoi_d = 640,
- .y_aoi_d = 0,
+ {
+ .index = PLANE2_AOI0,
+ .id = "Panel2 AOI0",
+ .registered = 0,
+ .g_alpha = 0xff,
+ .count = 0,
+ .x_aoi_d = 640,
+ .y_aoi_d = 0,
},
- { /* AOI 1 for plane 2 */
- .index = 4,
- .type = MFB_TYPE_OUTPUT,
- .id = "Panel2 AOI1",
- .registered = 0,
- .g_alpha = 0xff,
- .count = 0,
- .x_aoi_d = 640,
- .y_aoi_d = 480,
+ {
+ .index = PLANE2_AOI1,
+ .id = "Panel2 AOI1",
+ .registered = 0,
+ .g_alpha = 0xff,
+ .count = 0,
+ .x_aoi_d = 640,
+ .y_aoi_d = 480,
},
};
-static struct diu_hw dr = {
- .mode = MFB_MODE1,
- .reg_lock = __SPIN_LOCK_UNLOCKED(diu_hw.reg_lock),
-};
+/**
+ * fsl_diu_name_to_port - convert a port name to a monitor port enum
+ *
+ * Takes the name of a monitor port ("dvi", "lvds", or "dlvds") and returns
+ * the enum fsl_diu_monitor_port that corresponds to that string.
+ *
+ * For compatibility with older versions, a number ("0", "1", or "2") is also
+ * supported.
+ *
+ * If the string is unknown, DVI is assumed.
+ *
+ * If the particular port is not supported by the platform, another port
+ * (platform-specific) is chosen instead.
+ */
+static enum fsl_diu_monitor_port fsl_diu_name_to_port(const char *s)
+{
+ enum fsl_diu_monitor_port port = FSL_DIU_PORT_DVI;
+ unsigned long val;
-static struct diu_pool pool;
+ if (s) {
+ if (!strict_strtoul(s, 10, &val) && (val <= 2))
+ port = (enum fsl_diu_monitor_port) val;
+ else if (strncmp(s, "lvds", 4) == 0)
+ port = FSL_DIU_PORT_LVDS;
+ else if (strncmp(s, "dlvds", 5) == 0)
+ port = FSL_DIU_PORT_DLVDS;
+ }
+
+ return diu_ops.valid_monitor_port(port);
+}
/**
* fsl_diu_alloc - allocate memory for the DIU
@@ -292,14 +460,9 @@ static void *fsl_diu_alloc(size_t size, phys_addr_t *phys)
{
void *virt;
- pr_debug("size=%zu\n", size);
-
virt = alloc_pages_exact(size, GFP_DMA | __GFP_ZERO);
- if (virt) {
+ if (virt)
*phys = virt_to_phys(virt);
- pr_debug("virt=%p phys=%llx\n", virt,
- (unsigned long long)*phys);
- }
return virt;
}
@@ -313,8 +476,6 @@ static void *fsl_diu_alloc(size_t size, phys_addr_t *phys)
*/
static void fsl_diu_free(void *virt, size_t size)
{
- pr_debug("virt=%p size=%zu\n", virt, size);
-
if (virt && size)
free_pages_exact(virt, size);
}
@@ -330,82 +491,72 @@ void wr_reg_wa(u32 *reg, u32 val)
} while (in_be32(reg) != val);
}
-static int fsl_diu_enable_panel(struct fb_info *info)
+static void fsl_diu_enable_panel(struct fb_info *info)
{
struct mfb_info *pmfbi, *cmfbi, *mfbi = info->par;
- struct diu *hw = dr.diu_reg;
struct diu_ad *ad = mfbi->ad;
struct fsl_diu_data *machine_data = mfbi->parent;
- int res = 0;
+ struct diu __iomem *hw = machine_data->diu_reg;
- pr_debug("enable_panel index %d\n", mfbi->index);
- if (mfbi->type != MFB_TYPE_OFF) {
- switch (mfbi->index) {
- case 0: /* plane 0 */
- if (hw->desc[0] != ad->paddr)
- wr_reg_wa(&hw->desc[0], ad->paddr);
- break;
- case 1: /* plane 1 AOI 0 */
- cmfbi = machine_data->fsl_diu_info[2]->par;
- if (hw->desc[1] != ad->paddr) { /* AOI0 closed */
- if (cmfbi->count > 0) /* AOI1 open */
- ad->next_ad =
- cpu_to_le32(cmfbi->ad->paddr);
- else
- ad->next_ad = 0;
- wr_reg_wa(&hw->desc[1], ad->paddr);
- }
- break;
- case 3: /* plane 2 AOI 0 */
- cmfbi = machine_data->fsl_diu_info[4]->par;
- if (hw->desc[2] != ad->paddr) { /* AOI0 closed */
- if (cmfbi->count > 0) /* AOI1 open */
- ad->next_ad =
- cpu_to_le32(cmfbi->ad->paddr);
- else
- ad->next_ad = 0;
- wr_reg_wa(&hw->desc[2], ad->paddr);
- }
- break;
- case 2: /* plane 1 AOI 1 */
- pmfbi = machine_data->fsl_diu_info[1]->par;
- ad->next_ad = 0;
- if (hw->desc[1] == machine_data->dummy_ad->paddr)
- wr_reg_wa(&hw->desc[1], ad->paddr);
- else /* AOI0 open */
- pmfbi->ad->next_ad = cpu_to_le32(ad->paddr);
- break;
- case 4: /* plane 2 AOI 1 */
- pmfbi = machine_data->fsl_diu_info[3]->par;
- ad->next_ad = 0;
- if (hw->desc[2] == machine_data->dummy_ad->paddr)
- wr_reg_wa(&hw->desc[2], ad->paddr);
- else /* AOI0 was open */
- pmfbi->ad->next_ad = cpu_to_le32(ad->paddr);
- break;
- default:
- res = -EINVAL;
- break;
+ switch (mfbi->index) {
+ case PLANE0:
+ if (hw->desc[0] != ad->paddr)
+ wr_reg_wa(&hw->desc[0], ad->paddr);
+ break;
+ case PLANE1_AOI0:
+ cmfbi = machine_data->fsl_diu_info[2]->par;
+ if (hw->desc[1] != ad->paddr) { /* AOI0 closed */
+ if (cmfbi->count > 0) /* AOI1 open */
+ ad->next_ad =
+ cpu_to_le32(cmfbi->ad->paddr);
+ else
+ ad->next_ad = 0;
+ wr_reg_wa(&hw->desc[1], ad->paddr);
}
- } else
- res = -EINVAL;
- return res;
+ break;
+ case PLANE2_AOI0:
+ cmfbi = machine_data->fsl_diu_info[4]->par;
+ if (hw->desc[2] != ad->paddr) { /* AOI0 closed */
+ if (cmfbi->count > 0) /* AOI1 open */
+ ad->next_ad =
+ cpu_to_le32(cmfbi->ad->paddr);
+ else
+ ad->next_ad = 0;
+ wr_reg_wa(&hw->desc[2], ad->paddr);
+ }
+ break;
+ case PLANE1_AOI1:
+ pmfbi = machine_data->fsl_diu_info[1]->par;
+ ad->next_ad = 0;
+ if (hw->desc[1] == machine_data->dummy_ad->paddr)
+ wr_reg_wa(&hw->desc[1], ad->paddr);
+ else /* AOI0 open */
+ pmfbi->ad->next_ad = cpu_to_le32(ad->paddr);
+ break;
+ case PLANE2_AOI1:
+ pmfbi = machine_data->fsl_diu_info[3]->par;
+ ad->next_ad = 0;
+ if (hw->desc[2] == machine_data->dummy_ad->paddr)
+ wr_reg_wa(&hw->desc[2], ad->paddr);
+ else /* AOI0 was open */
+ pmfbi->ad->next_ad = cpu_to_le32(ad->paddr);
+ break;
+ }
}
-static int fsl_diu_disable_panel(struct fb_info *info)
+static void fsl_diu_disable_panel(struct fb_info *info)
{
struct mfb_info *pmfbi, *cmfbi, *mfbi = info->par;
- struct diu *hw = dr.diu_reg;
struct diu_ad *ad = mfbi->ad;
struct fsl_diu_data *machine_data = mfbi->parent;
- int res = 0;
+ struct diu __iomem *hw = machine_data->diu_reg;
switch (mfbi->index) {
- case 0: /* plane 0 */
+ case PLANE0:
if (hw->desc[0] != machine_data->dummy_ad->paddr)
wr_reg_wa(&hw->desc[0], machine_data->dummy_ad->paddr);
break;
- case 1: /* plane 1 AOI 0 */
+ case PLANE1_AOI0:
cmfbi = machine_data->fsl_diu_info[2]->par;
if (cmfbi->count > 0) /* AOI1 is open */
wr_reg_wa(&hw->desc[1], cmfbi->ad->paddr);
@@ -414,7 +565,7 @@ static int fsl_diu_disable_panel(struct fb_info *info)
wr_reg_wa(&hw->desc[1], machine_data->dummy_ad->paddr);
/* close AOI 0 */
break;
- case 3: /* plane 2 AOI 0 */
+ case PLANE2_AOI0:
cmfbi = machine_data->fsl_diu_info[4]->par;
if (cmfbi->count > 0) /* AOI1 is open */
wr_reg_wa(&hw->desc[2], cmfbi->ad->paddr);
@@ -423,7 +574,7 @@ static int fsl_diu_disable_panel(struct fb_info *info)
wr_reg_wa(&hw->desc[2], machine_data->dummy_ad->paddr);
/* close AOI 0 */
break;
- case 2: /* plane 1 AOI 1 */
+ case PLANE1_AOI1:
pmfbi = machine_data->fsl_diu_info[1]->par;
if (hw->desc[1] != ad->paddr) {
/* AOI1 is not the first in the chain */
@@ -434,7 +585,7 @@ static int fsl_diu_disable_panel(struct fb_info *info)
wr_reg_wa(&hw->desc[1], machine_data->dummy_ad->paddr);
/* close AOI 1 */
break;
- case 4: /* plane 2 AOI 1 */
+ case PLANE2_AOI1:
pmfbi = machine_data->fsl_diu_info[3]->par;
if (hw->desc[2] != ad->paddr) {
/* AOI1 is not the first in the chain */
@@ -445,31 +596,26 @@ static int fsl_diu_disable_panel(struct fb_info *info)
wr_reg_wa(&hw->desc[2], machine_data->dummy_ad->paddr);
/* close AOI 1 */
break;
- default:
- res = -EINVAL;
- break;
}
-
- return res;
}
static void enable_lcdc(struct fb_info *info)
{
- struct diu *hw = dr.diu_reg;
struct mfb_info *mfbi = info->par;
struct fsl_diu_data *machine_data = mfbi->parent;
+ struct diu __iomem *hw = machine_data->diu_reg;
if (!machine_data->fb_enabled) {
- out_be32(&hw->diu_mode, dr.mode);
+ out_be32(&hw->diu_mode, MFB_MODE1);
machine_data->fb_enabled++;
}
}
static void disable_lcdc(struct fb_info *info)
{
- struct diu *hw = dr.diu_reg;
struct mfb_info *mfbi = info->par;
struct fsl_diu_data *machine_data = mfbi->parent;
+ struct diu __iomem *hw = machine_data->diu_reg;
if (machine_data->fb_enabled) {
out_be32(&hw->diu_mode, 0);
@@ -482,7 +628,8 @@ static void adjust_aoi_size_position(struct fb_var_screeninfo *var,
{
struct mfb_info *lower_aoi_mfbi, *upper_aoi_mfbi, *mfbi = info->par;
struct fsl_diu_data *machine_data = mfbi->parent;
- int available_height, upper_aoi_bottom, index = mfbi->index;
+ int available_height, upper_aoi_bottom;
+ enum mfb_index index = mfbi->index;
int lower_aoi_is_open, upper_aoi_is_open;
__u32 base_plane_width, base_plane_height, upper_aoi_height;
@@ -494,14 +641,14 @@ static void adjust_aoi_size_position(struct fb_var_screeninfo *var,
if (mfbi->y_aoi_d < 0)
mfbi->y_aoi_d = 0;
switch (index) {
- case 0:
+ case PLANE0:
if (mfbi->x_aoi_d != 0)
mfbi->x_aoi_d = 0;
if (mfbi->y_aoi_d != 0)
mfbi->y_aoi_d = 0;
break;
- case 1: /* AOI 0 */
- case 3:
+ case PLANE1_AOI0:
+ case PLANE2_AOI0:
lower_aoi_mfbi = machine_data->fsl_diu_info[index+1]->par;
lower_aoi_is_open = lower_aoi_mfbi->count > 0 ? 1 : 0;
if (var->xres > base_plane_width)
@@ -518,8 +665,8 @@ static void adjust_aoi_size_position(struct fb_var_screeninfo *var,
if ((mfbi->y_aoi_d + var->yres) > available_height)
mfbi->y_aoi_d = available_height - var->yres;
break;
- case 2: /* AOI 1 */
- case 4:
+ case PLANE1_AOI1:
+ case PLANE2_AOI1:
upper_aoi_mfbi = machine_data->fsl_diu_info[index-1]->par;
upper_aoi_height =
machine_data->fsl_diu_info[index-1]->var.yres;
@@ -555,9 +702,6 @@ static void adjust_aoi_size_position(struct fb_var_screeninfo *var,
static int fsl_diu_check_var(struct fb_var_screeninfo *var,
struct fb_info *info)
{
- pr_debug("check_var xres: %d\n", var->xres);
- pr_debug("check_var yres: %d\n", var->yres);
-
if (var->xres_virtual < var->xres)
var->xres_virtual = var->xres;
if (var->yres_virtual < var->yres)
@@ -652,7 +796,7 @@ static void set_fix(struct fb_info *info)
struct fb_var_screeninfo *var = &info->var;
struct mfb_info *mfbi = info->par;
- strncpy(fix->id, mfbi->id, strlen(mfbi->id));
+ strncpy(fix->id, mfbi->id, sizeof(fix->id));
fix->line_length = var->xres_virtual * var->bits_per_pixel / 8;
fix->type = FB_TYPE_PACKED_PIXELS;
fix->accel = FB_ACCEL_NONE;
@@ -666,45 +810,37 @@ static void update_lcdc(struct fb_info *info)
struct fb_var_screeninfo *var = &info->var;
struct mfb_info *mfbi = info->par;
struct fsl_diu_data *machine_data = mfbi->parent;
- struct diu *hw;
+ struct diu __iomem *hw;
int i, j;
char __iomem *cursor_base, *gamma_table_base;
u32 temp;
- hw = dr.diu_reg;
-
- if (mfbi->type == MFB_TYPE_OFF) {
- fsl_diu_disable_panel(info);
- return;
- }
+ hw = machine_data->diu_reg;
diu_ops.set_monitor_port(machine_data->monitor_port);
- gamma_table_base = pool.gamma.vaddr;
- cursor_base = pool.cursor.vaddr;
+ gamma_table_base = machine_data->gamma.vaddr;
+ cursor_base = machine_data->cursor.vaddr;
/* Prep for DIU init - gamma table, cursor table */
for (i = 0; i <= 2; i++)
- for (j = 0; j <= 255; j++)
- *gamma_table_base++ = j;
+ for (j = 0; j <= 255; j++)
+ *gamma_table_base++ = j;
- diu_ops.set_gamma_table(machine_data->monitor_port, pool.gamma.vaddr);
+ diu_ops.set_gamma_table(machine_data->monitor_port,
+ machine_data->gamma.vaddr);
- pr_debug("update-lcdc: HW - %p\n Disabling DIU\n", hw);
disable_lcdc(info);
/* Program DIU registers */
- out_be32(&hw->gamma, pool.gamma.paddr);
- out_be32(&hw->cursor, pool.cursor.paddr);
+ out_be32(&hw->gamma, machine_data->gamma.paddr);
+ out_be32(&hw->cursor, machine_data->cursor.paddr);
out_be32(&hw->bgnd, 0x007F7F7F); /* BGND */
out_be32(&hw->bgnd_wb, 0); /* BGND_WB */
out_be32(&hw->disp_size, (var->yres << 16 | var->xres));
/* DISP SIZE */
- pr_debug("DIU xres: %d\n", var->xres);
- pr_debug("DIU yres: %d\n", var->yres);
-
out_be32(&hw->wb_size, 0); /* WB SIZE */
out_be32(&hw->wb_mem_addr, 0); /* WB MEM ADDR */
@@ -721,15 +857,6 @@ static void update_lcdc(struct fb_info *info)
out_be32(&hw->vsyn_para, temp);
- pr_debug("DIU right_margin - %d\n", var->right_margin);
- pr_debug("DIU left_margin - %d\n", var->left_margin);
- pr_debug("DIU hsync_len - %d\n", var->hsync_len);
- pr_debug("DIU upper_margin - %d\n", var->upper_margin);
- pr_debug("DIU lower_margin - %d\n", var->lower_margin);
- pr_debug("DIU vsync_len - %d\n", var->vsync_len);
- pr_debug("DIU HSYNC - 0x%08x\n", hw->hsyn_para);
- pr_debug("DIU VSYNC - 0x%08x\n", hw->vsyn_para);
-
diu_ops.set_pixel_clock(var->pixclock);
out_be32(&hw->syn_pol, 0); /* SYNC SIGNALS POLARITY */
@@ -746,14 +873,9 @@ static int map_video_memory(struct fb_info *info)
phys_addr_t phys;
u32 smem_len = info->fix.line_length * info->var.yres_virtual;
- pr_debug("info->var.xres_virtual = %d\n", info->var.xres_virtual);
- pr_debug("info->var.yres_virtual = %d\n", info->var.yres_virtual);
- pr_debug("info->fix.line_length = %d\n", info->fix.line_length);
- pr_debug("MAP_VIDEO_MEMORY: smem_len = %u\n", smem_len);
-
info->screen_base = fsl_diu_alloc(smem_len, &phys);
if (info->screen_base == NULL) {
- printk(KERN_ERR "Unable to allocate fb memory\n");
+ dev_err(info->dev, "unable to allocate fb memory\n");
return -ENOMEM;
}
mutex_lock(&info->mm_lock);
@@ -762,10 +884,6 @@ static int map_video_memory(struct fb_info *info)
mutex_unlock(&info->mm_lock);
info->screen_size = info->fix.smem_len;
- pr_debug("Allocated fb @ paddr=0x%08lx, size=%d.\n",
- info->fix.smem_start, info->fix.smem_len);
- pr_debug("screen base %p\n", info->screen_base);
-
return 0;
}
@@ -810,9 +928,9 @@ static int fsl_diu_set_par(struct fb_info *info)
struct mfb_info *mfbi = info->par;
struct fsl_diu_data *machine_data = mfbi->parent;
struct diu_ad *ad = mfbi->ad;
- struct diu *hw;
+ struct diu __iomem *hw;
- hw = dr.diu_reg;
+ hw = machine_data->diu_reg;
set_fix(info);
mfbi->cursor_reset = 1;
@@ -822,18 +940,16 @@ static int fsl_diu_set_par(struct fb_info *info)
if (len != info->fix.smem_len) {
if (info->fix.smem_start)
unmap_video_memory(info);
- pr_debug("SET PAR: smem_len = %d\n", info->fix.smem_len);
/* Memory allocation for framebuffer */
if (map_video_memory(info)) {
- printk(KERN_ERR "Unable to allocate fb memory 1\n");
+ dev_err(info->dev, "unable to allocate fb memory 1\n");
return -ENOMEM;
}
}
- ad->pix_fmt =
- diu_ops.get_pixel_format(var->bits_per_pixel,
- machine_data->monitor_port);
+ ad->pix_fmt = diu_ops.get_pixel_format(machine_data->monitor_port,
+ var->bits_per_pixel);
ad->addr = cpu_to_le32(info->fix.smem_start);
ad->src_size_g_alpha = cpu_to_le32((var->yres_virtual << 12) |
var->xres_virtual) | mfbi->g_alpha;
@@ -851,14 +967,14 @@ static int fsl_diu_set_par(struct fb_info *info)
ad->ckmin_g = 255;
ad->ckmin_b = 255;
- if (mfbi->index == 0)
+ if (mfbi->index == PLANE0)
update_lcdc(info);
return 0;
}
static inline __u32 CNVT_TOHW(__u32 val, __u32 width)
{
- return ((val<<width) + 0x7FFF - val)>>16;
+ return ((val << width) + 0x7FFF - val) >> 16;
}
/*
@@ -870,8 +986,9 @@ static inline __u32 CNVT_TOHW(__u32 val, __u32 width)
* pseudo_palette in struct fb_info. For pseudocolor mode we have a limited
* color palette.
*/
-static int fsl_diu_setcolreg(unsigned regno, unsigned red, unsigned green,
- unsigned blue, unsigned transp, struct fb_info *info)
+static int fsl_diu_setcolreg(unsigned int regno, unsigned int red,
+ unsigned int green, unsigned int blue,
+ unsigned int transp, struct fb_info *info)
{
int ret = 1;
@@ -906,9 +1023,6 @@ static int fsl_diu_setcolreg(unsigned regno, unsigned red, unsigned green,
ret = 0;
}
break;
- case FB_VISUAL_STATIC_PSEUDOCOLOR:
- case FB_VISUAL_PSEUDOCOLOR:
- break;
}
return ret;
@@ -944,37 +1058,6 @@ static int fsl_diu_pan_display(struct fb_var_screeninfo *var,
return 0;
}
-/*
- * Blank the screen if blank_mode != 0, else unblank. Return 0 if blanking
- * succeeded, != 0 if un-/blanking failed.
- * blank_mode == 2: suspend vsync
- * blank_mode == 3: suspend hsync
- * blank_mode == 4: powerdown
- */
-static int fsl_diu_blank(int blank_mode, struct fb_info *info)
-{
- struct mfb_info *mfbi = info->par;
-
- mfbi->blank = blank_mode;
-
- switch (blank_mode) {
- case FB_BLANK_VSYNC_SUSPEND:
- case FB_BLANK_HSYNC_SUSPEND:
- /* FIXME: fixes to enable_panel and enable lcdc needed */
- case FB_BLANK_NORMAL:
- /* fsl_diu_disable_panel(info);*/
- break;
- case FB_BLANK_POWERDOWN:
- /* disable_lcdc(info); */
- break;
- case FB_BLANK_UNBLANK:
- /* fsl_diu_enable_panel(info);*/
- break;
- }
-
- return 0;
-}
-
static int fsl_diu_ioctl(struct fb_info *info, unsigned int cmd,
unsigned long arg)
{
@@ -989,25 +1072,29 @@ static int fsl_diu_ioctl(struct fb_info *info, unsigned int cmd,
if (!arg)
return -EINVAL;
switch (cmd) {
+ case MFB_SET_PIXFMT_OLD:
+ dev_warn(info->dev,
+ "MFB_SET_PIXFMT value of 0x%08x is deprecated.\n",
+ MFB_SET_PIXFMT_OLD);
case MFB_SET_PIXFMT:
if (copy_from_user(&pix_fmt, buf, sizeof(pix_fmt)))
return -EFAULT;
ad->pix_fmt = pix_fmt;
- pr_debug("Set pixel format to 0x%08x\n", ad->pix_fmt);
break;
+ case MFB_GET_PIXFMT_OLD:
+ dev_warn(info->dev,
+ "MFB_GET_PIXFMT value of 0x%08x is deprecated.\n",
+ MFB_GET_PIXFMT_OLD);
case MFB_GET_PIXFMT:
pix_fmt = ad->pix_fmt;
if (copy_to_user(buf, &pix_fmt, sizeof(pix_fmt)))
return -EFAULT;
- pr_debug("get pixel format 0x%08x\n", ad->pix_fmt);
break;
case MFB_SET_AOID:
if (copy_from_user(&aoi_d, buf, sizeof(aoi_d)))
return -EFAULT;
mfbi->x_aoi_d = aoi_d.x_aoi_d;
mfbi->y_aoi_d = aoi_d.y_aoi_d;
- pr_debug("set AOI display offset of index %d to (%d,%d)\n",
- mfbi->index, aoi_d.x_aoi_d, aoi_d.y_aoi_d);
fsl_diu_check_var(&info->var, info);
fsl_diu_set_aoi(info);
break;
@@ -1016,14 +1103,11 @@ static int fsl_diu_ioctl(struct fb_info *info, unsigned int cmd,
aoi_d.y_aoi_d = mfbi->y_aoi_d;
if (copy_to_user(buf, &aoi_d, sizeof(aoi_d)))
return -EFAULT;
- pr_debug("get AOI display offset of index %d (%d,%d)\n",
- mfbi->index, aoi_d.x_aoi_d, aoi_d.y_aoi_d);
break;
case MFB_GET_ALPHA:
global_alpha = mfbi->g_alpha;
if (copy_to_user(buf, &global_alpha, sizeof(global_alpha)))
return -EFAULT;
- pr_debug("get global alpha of index %d\n", mfbi->index);
break;
case MFB_SET_ALPHA:
/* set panel information */
@@ -1032,7 +1116,6 @@ static int fsl_diu_ioctl(struct fb_info *info, unsigned int cmd,
ad->src_size_g_alpha = (ad->src_size_g_alpha & (~0xff)) |
(global_alpha & 0xff);
mfbi->g_alpha = global_alpha;
- pr_debug("set global alpha for index %d\n", mfbi->index);
break;
case MFB_SET_CHROMA_KEY:
/* set panel winformation */
@@ -1060,27 +1143,9 @@ static int fsl_diu_ioctl(struct fb_info *info, unsigned int cmd,
ad->ckmin_g = ck.green_min;
ad->ckmin_b = ck.blue_min;
}
- pr_debug("set chroma key\n");
break;
- case FBIOGET_GWINFO:
- if (mfbi->type == MFB_TYPE_OFF)
- return -ENODEV;
- /* get graphic window information */
- if (copy_to_user(buf, ad, sizeof(*ad)))
- return -EFAULT;
- break;
- case FBIOGET_HWCINFO:
- pr_debug("FBIOGET_HWCINFO:0x%08x\n", FBIOGET_HWCINFO);
- break;
- case FBIOPUT_MODEINFO:
- pr_debug("FBIOPUT_MODEINFO:0x%08x\n", FBIOPUT_MODEINFO);
- break;
- case FBIOGET_DISPINFO:
- pr_debug("FBIOGET_DISPINFO:0x%08x\n", FBIOGET_DISPINFO);
- break;
-
default:
- printk(KERN_ERR "Unknown ioctl command (0x%08X)\n", cmd);
+ dev_err(info->dev, "unknown ioctl command (0x%08X)\n", cmd);
return -ENOIOCTLCMD;
}
@@ -1095,22 +1160,18 @@ static int fsl_diu_open(struct fb_info *info, int user)
int res = 0;
/* free boot splash memory on first /dev/fb0 open */
- if (!mfbi->index && diu_ops.release_bootmem)
+ if ((mfbi->index == PLANE0) && diu_ops.release_bootmem)
diu_ops.release_bootmem();
spin_lock(&diu_lock);
mfbi->count++;
if (mfbi->count == 1) {
- pr_debug("open plane index %d\n", mfbi->index);
fsl_diu_check_var(&info->var, info);
res = fsl_diu_set_par(info);
if (res < 0)
mfbi->count--;
- else {
- res = fsl_diu_enable_panel(info);
- if (res < 0)
- mfbi->count--;
- }
+ else
+ fsl_diu_enable_panel(info);
}
spin_unlock(&diu_lock);
@@ -1126,12 +1187,9 @@ static int fsl_diu_release(struct fb_info *info, int user)
spin_lock(&diu_lock);
mfbi->count--;
- if (mfbi->count == 0) {
- pr_debug("release plane index %d\n", mfbi->index);
- res = fsl_diu_disable_panel(info);
- if (res < 0)
- mfbi->count++;
- }
+ if (mfbi->count == 0)
+ fsl_diu_disable_panel(info);
+
spin_unlock(&diu_lock);
return res;
}
@@ -1141,7 +1199,6 @@ static struct fb_ops fsl_diu_ops = {
.fb_check_var = fsl_diu_check_var,
.fb_set_par = fsl_diu_set_par,
.fb_setcolreg = fsl_diu_setcolreg,
- .fb_blank = fsl_diu_blank,
.fb_pan_display = fsl_diu_pan_display,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
@@ -1178,7 +1235,7 @@ static int __devinit install_fb(struct fb_info *info)
if (init_fbinfo(info))
return -EINVAL;
- if (mfbi->index == 0) { /* plane 0 */
+ if (mfbi->index == PLANE0) {
if (mfbi->edid_data) {
/* Now build modedb from EDID */
fb_edid_to_monspecs(mfbi->edid_data, &info->monspecs);
@@ -1192,43 +1249,23 @@ static int __devinit install_fb(struct fb_info *info)
} else {
aoi_mode = init_aoi_mode;
}
- pr_debug("mode used = %s\n", aoi_mode);
- rc = fb_find_mode(&info->var, info, aoi_mode, db, dbsize,
- &fsl_diu_default_mode, default_bpp);
- switch (rc) {
- case 1:
- pr_debug("using mode specified in @mode\n");
- break;
- case 2:
- pr_debug("using mode specified in @mode "
- "with ignored refresh rate\n");
- break;
- case 3:
- pr_debug("using mode default mode\n");
- break;
- case 4:
- pr_debug("using mode from list\n");
- break;
- default:
- pr_debug("rc = %d\n", rc);
- pr_debug("failed to find mode\n");
+ rc = fb_find_mode(&info->var, info, aoi_mode, db, dbsize, NULL,
+ default_bpp);
+ if (!rc) {
/*
* For plane 0 we continue and look into
* driver's internal modedb.
*/
- if (mfbi->index == 0 && mfbi->edid_data)
+ if ((mfbi->index == PLANE0) && mfbi->edid_data)
has_default_mode = 0;
else
return -EINVAL;
- break;
}
if (!has_default_mode) {
rc = fb_find_mode(&info->var, info, aoi_mode, fsl_diu_mode_db,
- ARRAY_SIZE(fsl_diu_mode_db),
- &fsl_diu_default_mode,
- default_bpp);
- if (rc > 0 && rc < 5)
+ ARRAY_SIZE(fsl_diu_mode_db), NULL, default_bpp);
+ if (rc)
has_default_mode = 1;
}
@@ -1256,33 +1293,22 @@ static int __devinit install_fb(struct fb_info *info)
fb_videomode_to_var(&info->var, modedb);
}
- pr_debug("xres_virtual %d\n", info->var.xres_virtual);
- pr_debug("bits_per_pixel %d\n", info->var.bits_per_pixel);
-
- pr_debug("info->var.yres_virtual = %d\n", info->var.yres_virtual);
- pr_debug("info->fix.line_length = %d\n", info->fix.line_length);
-
- if (mfbi->type == MFB_TYPE_OFF)
- mfbi->blank = FB_BLANK_NORMAL;
- else
- mfbi->blank = FB_BLANK_UNBLANK;
-
if (fsl_diu_check_var(&info->var, info)) {
- printk(KERN_ERR "fb_check_var failed");
+ dev_err(info->dev, "fsl_diu_check_var failed\n");
+ unmap_video_memory(info);
fb_dealloc_cmap(&info->cmap);
return -EINVAL;
}
if (register_framebuffer(info) < 0) {
- printk(KERN_ERR "register_framebuffer failed");
+ dev_err(info->dev, "register_framebuffer failed\n");
unmap_video_memory(info);
fb_dealloc_cmap(&info->cmap);
return -EINVAL;
}
mfbi->registered = 1;
- printk(KERN_INFO "fb%d: %s fb device registered successfully.\n",
- info->node, info->fix.id);
+ dev_info(info->dev, "%s registered successfully\n", mfbi->id);
return 0;
}
@@ -1294,7 +1320,7 @@ static void uninstall_fb(struct fb_info *info)
if (!mfbi->registered)
return;
- if (mfbi->index == 0)
+ if (mfbi->index == PLANE0)
kfree(mfbi->edid_data);
unregister_framebuffer(info);
@@ -1307,20 +1333,20 @@ static void uninstall_fb(struct fb_info *info)
static irqreturn_t fsl_diu_isr(int irq, void *dev_id)
{
- struct diu *hw = dr.diu_reg;
+ struct diu __iomem *hw = dev_id;
unsigned int status = in_be32(&hw->int_status);
if (status) {
/* This is the workaround for underrun */
if (status & INT_UNDRUN) {
out_be32(&hw->diu_mode, 0);
- pr_debug("Err: DIU occurs underrun!\n");
udelay(1);
out_be32(&hw->diu_mode, 1);
}
#if defined(CONFIG_NOT_COHERENT_CACHE)
else if (status & INT_VSYNC) {
unsigned int i;
+
for (i = 0; i < coherence_data_size;
i += d_cache_line_size)
__asm__ __volatile__ (
@@ -1333,43 +1359,38 @@ static irqreturn_t fsl_diu_isr(int irq, void *dev_id)
return IRQ_NONE;
}
-static int request_irq_local(int irq)
+static int request_irq_local(struct fsl_diu_data *machine_data)
{
- unsigned long status, ints;
- struct diu *hw;
+ struct diu __iomem *hw = machine_data->diu_reg;
+ u32 ints;
int ret;
- hw = dr.diu_reg;
-
/* Read to clear the status */
- status = in_be32(&hw->int_status);
+ in_be32(&hw->int_status);
- ret = request_irq(irq, fsl_diu_isr, 0, "diu", NULL);
- if (ret)
- pr_info("Request diu IRQ failed.\n");
- else {
+ ret = request_irq(machine_data->irq, fsl_diu_isr, 0, "fsl-diu-fb", hw);
+ if (!ret) {
ints = INT_PARERR | INT_LS_BF_VS;
#if !defined(CONFIG_NOT_COHERENT_CACHE)
ints |= INT_VSYNC;
#endif
- if (dr.mode == MFB_MODE2 || dr.mode == MFB_MODE3)
- ints |= INT_VSYNC_WB;
/* Read to clear the status */
- status = in_be32(&hw->int_status);
+ in_be32(&hw->int_status);
out_be32(&hw->int_mask, ints);
}
+
return ret;
}
-static void free_irq_local(int irq)
+static void free_irq_local(struct fsl_diu_data *machine_data)
{
- struct diu *hw = dr.diu_reg;
+ struct diu __iomem *hw = machine_data->diu_reg;
/* Disable all LCDC interrupt */
out_be32(&hw->int_mask, 0x1f);
- free_irq(irq, NULL);
+ free_irq(machine_data->irq, NULL);
}
#ifdef CONFIG_PM
@@ -1406,49 +1427,42 @@ static int fsl_diu_resume(struct platform_device *ofdev)
static int allocate_buf(struct device *dev, struct diu_addr *buf, u32 size,
u32 bytes_align)
{
- u32 offset, ssize;
- u32 mask;
- dma_addr_t paddr = 0;
+ u32 offset;
+ dma_addr_t mask;
- ssize = size + bytes_align;
- buf->vaddr = dma_alloc_coherent(dev, ssize, &paddr, GFP_DMA |
- __GFP_ZERO);
+ buf->vaddr =
+ dma_alloc_coherent(dev, size + bytes_align, &buf->paddr,
+ GFP_DMA | __GFP_ZERO);
if (!buf->vaddr)
return -ENOMEM;
- buf->paddr = (__u32) paddr;
-
mask = bytes_align - 1;
- offset = (u32)buf->paddr & mask;
+ offset = buf->paddr & mask;
if (offset) {
buf->offset = bytes_align - offset;
- buf->paddr = (u32)buf->paddr + offset;
+ buf->paddr = buf->paddr + offset;
} else
buf->offset = 0;
+
return 0;
}
static void free_buf(struct device *dev, struct diu_addr *buf, u32 size,
u32 bytes_align)
{
- dma_free_coherent(dev, size + bytes_align,
- buf->vaddr, (buf->paddr - buf->offset));
- return;
+ dma_free_coherent(dev, size + bytes_align, buf->vaddr,
+ buf->paddr - buf->offset);
}
static ssize_t store_monitor(struct device *device,
struct device_attribute *attr, const char *buf, size_t count)
{
- int old_monitor_port;
- unsigned long val;
+ enum fsl_diu_monitor_port old_monitor_port;
struct fsl_diu_data *machine_data =
container_of(attr, struct fsl_diu_data, dev_attr);
- if (strict_strtoul(buf, 10, &val))
- return 0;
-
old_monitor_port = machine_data->monitor_port;
- machine_data->monitor_port = diu_ops.set_sysfs_monitor_port(val);
+ machine_data->monitor_port = fsl_diu_name_to_port(buf);
if (old_monitor_port != machine_data->monitor_port) {
/* All AOIs need adjust pixel format
@@ -1468,16 +1482,25 @@ static ssize_t show_monitor(struct device *device,
{
struct fsl_diu_data *machine_data =
container_of(attr, struct fsl_diu_data, dev_attr);
- return diu_ops.show_monitor_port(machine_data->monitor_port, buf);
+
+ switch (machine_data->monitor_port) {
+ case FSL_DIU_PORT_DVI:
+ return sprintf(buf, "DVI\n");
+ case FSL_DIU_PORT_LVDS:
+ return sprintf(buf, "Single-link LVDS\n");
+ case FSL_DIU_PORT_DLVDS:
+ return sprintf(buf, "Dual-link LVDS\n");
+ }
+
+ return 0;
}
-static int __devinit fsl_diu_probe(struct platform_device *ofdev)
+static int __devinit fsl_diu_probe(struct platform_device *pdev)
{
- struct device_node *np = ofdev->dev.of_node;
+ struct device_node *np = pdev->dev.of_node;
struct mfb_info *mfbi;
- phys_addr_t dummy_ad_addr;
+ phys_addr_t dummy_ad_addr = 0;
int ret, i, error = 0;
- struct resource res;
struct fsl_diu_data *machine_data;
int diu_mode;
@@ -1485,11 +1508,13 @@ static int __devinit fsl_diu_probe(struct platform_device *ofdev)
if (!machine_data)
return -ENOMEM;
+ spin_lock_init(&machine_data->reg_lock);
+
for (i = 0; i < ARRAY_SIZE(machine_data->fsl_diu_info); i++) {
machine_data->fsl_diu_info[i] =
- framebuffer_alloc(sizeof(struct mfb_info), &ofdev->dev);
+ framebuffer_alloc(sizeof(struct mfb_info), &pdev->dev);
if (!machine_data->fsl_diu_info[i]) {
- dev_err(&ofdev->dev, "cannot allocate memory\n");
+ dev_err(&pdev->dev, "cannot allocate memory\n");
ret = -ENOMEM;
goto error2;
}
@@ -1497,7 +1522,7 @@ static int __devinit fsl_diu_probe(struct platform_device *ofdev)
memcpy(mfbi, &mfb_template[i], sizeof(struct mfb_info));
mfbi->parent = machine_data;
- if (mfbi->index == 0) {
+ if (mfbi->index == PLANE0) {
const u8 *prop;
int len;
@@ -1509,60 +1534,49 @@ static int __devinit fsl_diu_probe(struct platform_device *ofdev)
}
}
- ret = of_address_to_resource(np, 0, &res);
- if (ret) {
- dev_err(&ofdev->dev, "could not obtain DIU address\n");
- goto error;
- }
- if (!res.start) {
- dev_err(&ofdev->dev, "invalid DIU address\n");
- goto error;
- }
- dev_dbg(&ofdev->dev, "%s, res.start: 0x%08x\n", __func__, res.start);
-
- dr.diu_reg = ioremap(res.start, sizeof(struct diu));
- if (!dr.diu_reg) {
- dev_err(&ofdev->dev, "Err: can't map DIU registers!\n");
+ machine_data->diu_reg = of_iomap(np, 0);
+ if (!machine_data->diu_reg) {
+ dev_err(&pdev->dev, "cannot map DIU registers\n");
ret = -EFAULT;
goto error2;
}
- diu_mode = in_be32(&dr.diu_reg->diu_mode);
- if (diu_mode != MFB_MODE1)
- out_be32(&dr.diu_reg->diu_mode, 0); /* disable DIU */
+ diu_mode = in_be32(&machine_data->diu_reg->diu_mode);
+ if (diu_mode == MFB_MODE0)
+ out_be32(&machine_data->diu_reg->diu_mode, 0); /* disable DIU */
/* Get the IRQ of the DIU */
machine_data->irq = irq_of_parse_and_map(np, 0);
if (!machine_data->irq) {
- dev_err(&ofdev->dev, "could not get DIU IRQ\n");
+ dev_err(&pdev->dev, "could not get DIU IRQ\n");
ret = -EINVAL;
goto error;
}
machine_data->monitor_port = monitor_port;
/* Area descriptor memory pool aligns to 64-bit boundary */
- if (allocate_buf(&ofdev->dev, &pool.ad,
+ if (allocate_buf(&pdev->dev, &machine_data->ad,
sizeof(struct diu_ad) * FSL_AOI_NUM, 8))
return -ENOMEM;
/* Get memory for Gamma Table - 32-byte aligned memory */
- if (allocate_buf(&ofdev->dev, &pool.gamma, 768, 32)) {
+ if (allocate_buf(&pdev->dev, &machine_data->gamma, 768, 32)) {
ret = -ENOMEM;
goto error;
}
/* For performance, cursor bitmap buffer aligns to 32-byte boundary */
- if (allocate_buf(&ofdev->dev, &pool.cursor, MAX_CURS * MAX_CURS * 2,
- 32)) {
+ if (allocate_buf(&pdev->dev, &machine_data->cursor,
+ MAX_CURS * MAX_CURS * 2, 32)) {
ret = -ENOMEM;
goto error;
}
i = ARRAY_SIZE(machine_data->fsl_diu_info);
- machine_data->dummy_ad = (struct diu_ad *)
- ((u32)pool.ad.vaddr + pool.ad.offset) + i;
- machine_data->dummy_ad->paddr = pool.ad.paddr +
+ machine_data->dummy_ad = (struct diu_ad *)((u32)machine_data->ad.vaddr +
+ machine_data->ad.offset) + i;
+ machine_data->dummy_ad->paddr = machine_data->ad.paddr +
i * sizeof(struct diu_ad);
machine_data->dummy_aoi_virt = fsl_diu_alloc(64, &dummy_ad_addr);
if (!machine_data->dummy_aoi_virt) {
@@ -1581,30 +1595,29 @@ static int __devinit fsl_diu_probe(struct platform_device *ofdev)
* Let DIU display splash screen if it was pre-initialized
* by the bootloader, set dummy area descriptor otherwise.
*/
- if (diu_mode != MFB_MODE1)
- out_be32(&dr.diu_reg->desc[0], machine_data->dummy_ad->paddr);
+ if (diu_mode == MFB_MODE0)
+ out_be32(&machine_data->diu_reg->desc[0],
+ machine_data->dummy_ad->paddr);
- out_be32(&dr.diu_reg->desc[1], machine_data->dummy_ad->paddr);
- out_be32(&dr.diu_reg->desc[2], machine_data->dummy_ad->paddr);
+ out_be32(&machine_data->diu_reg->desc[1], machine_data->dummy_ad->paddr);
+ out_be32(&machine_data->diu_reg->desc[2], machine_data->dummy_ad->paddr);
for (i = 0; i < ARRAY_SIZE(machine_data->fsl_diu_info); i++) {
machine_data->fsl_diu_info[i]->fix.smem_start = 0;
mfbi = machine_data->fsl_diu_info[i]->par;
- mfbi->ad = (struct diu_ad *)((u32)pool.ad.vaddr
- + pool.ad.offset) + i;
- mfbi->ad->paddr = pool.ad.paddr + i * sizeof(struct diu_ad);
+ mfbi->ad = (struct diu_ad *)((u32)machine_data->ad.vaddr
+ + machine_data->ad.offset) + i;
+ mfbi->ad->paddr =
+ machine_data->ad.paddr + i * sizeof(struct diu_ad);
ret = install_fb(machine_data->fsl_diu_info[i]);
if (ret) {
- dev_err(&ofdev->dev,
- "Failed to register framebuffer %d\n",
- i);
+ dev_err(&pdev->dev, "could not register fb %d\n", i);
goto error;
}
}
- if (request_irq_local(machine_data->irq)) {
- dev_err(machine_data->fsl_diu_info[0]->dev,
- "could not request irq for diu.");
+ if (request_irq_local(machine_data)) {
+ dev_err(&pdev->dev, "could not claim irq\n");
goto error;
}
@@ -1616,29 +1629,28 @@ static int __devinit fsl_diu_probe(struct platform_device *ofdev)
error = device_create_file(machine_data->fsl_diu_info[0]->dev,
&machine_data->dev_attr);
if (error) {
- dev_err(machine_data->fsl_diu_info[0]->dev,
- "could not create sysfs %s file\n",
+ dev_err(&pdev->dev, "could not create sysfs file %s\n",
machine_data->dev_attr.attr.name);
}
- dev_set_drvdata(&ofdev->dev, machine_data);
+ dev_set_drvdata(&pdev->dev, machine_data);
return 0;
error:
- for (i = ARRAY_SIZE(machine_data->fsl_diu_info);
- i > 0; i--)
- uninstall_fb(machine_data->fsl_diu_info[i - 1]);
- if (pool.ad.vaddr)
- free_buf(&ofdev->dev, &pool.ad,
+ for (i = 0; i < ARRAY_SIZE(machine_data->fsl_diu_info); i++)
+ uninstall_fb(machine_data->fsl_diu_info[i]);
+
+ if (machine_data->ad.vaddr)
+ free_buf(&pdev->dev, &machine_data->ad,
sizeof(struct diu_ad) * FSL_AOI_NUM, 8);
- if (pool.gamma.vaddr)
- free_buf(&ofdev->dev, &pool.gamma, 768, 32);
- if (pool.cursor.vaddr)
- free_buf(&ofdev->dev, &pool.cursor, MAX_CURS * MAX_CURS * 2,
- 32);
+ if (machine_data->gamma.vaddr)
+ free_buf(&pdev->dev, &machine_data->gamma, 768, 32);
+ if (machine_data->cursor.vaddr)
+ free_buf(&pdev->dev, &machine_data->cursor,
+ MAX_CURS * MAX_CURS * 2, 32);
if (machine_data->dummy_aoi_virt)
fsl_diu_free(machine_data->dummy_aoi_virt, 64);
- iounmap(dr.diu_reg);
+ iounmap(machine_data->diu_reg);
error2:
for (i = 0; i < ARRAY_SIZE(machine_data->fsl_diu_info); i++)
@@ -1649,28 +1661,27 @@ error2:
return ret;
}
-
-static int fsl_diu_remove(struct platform_device *ofdev)
+static int fsl_diu_remove(struct platform_device *pdev)
{
struct fsl_diu_data *machine_data;
int i;
- machine_data = dev_get_drvdata(&ofdev->dev);
+ machine_data = dev_get_drvdata(&pdev->dev);
disable_lcdc(machine_data->fsl_diu_info[0]);
- free_irq_local(machine_data->irq);
- for (i = ARRAY_SIZE(machine_data->fsl_diu_info); i > 0; i--)
- uninstall_fb(machine_data->fsl_diu_info[i - 1]);
- if (pool.ad.vaddr)
- free_buf(&ofdev->dev, &pool.ad,
+ free_irq_local(machine_data);
+ for (i = 0; i < ARRAY_SIZE(machine_data->fsl_diu_info); i++)
+ uninstall_fb(machine_data->fsl_diu_info[i]);
+ if (machine_data->ad.vaddr)
+ free_buf(&pdev->dev, &machine_data->ad,
sizeof(struct diu_ad) * FSL_AOI_NUM, 8);
- if (pool.gamma.vaddr)
- free_buf(&ofdev->dev, &pool.gamma, 768, 32);
- if (pool.cursor.vaddr)
- free_buf(&ofdev->dev, &pool.cursor, MAX_CURS * MAX_CURS * 2,
- 32);
+ if (machine_data->gamma.vaddr)
+ free_buf(&pdev->dev, &machine_data->gamma, 768, 32);
+ if (machine_data->cursor.vaddr)
+ free_buf(&pdev->dev, &machine_data->cursor,
+ MAX_CURS * MAX_CURS * 2, 32);
if (machine_data->dummy_aoi_virt)
fsl_diu_free(machine_data->dummy_aoi_virt, 64);
- iounmap(dr.diu_reg);
+ iounmap(machine_data->diu_reg);
for (i = 0; i < ARRAY_SIZE(machine_data->fsl_diu_info); i++)
if (machine_data->fsl_diu_info[i])
framebuffer_release(machine_data->fsl_diu_info[i]);
@@ -1692,8 +1703,7 @@ static int __init fsl_diu_setup(char *options)
if (!*opt)
continue;
if (!strncmp(opt, "monitor=", 8)) {
- if (!strict_strtoul(opt + 8, 10, &val) && (val <= 2))
- monitor_port = val;
+ monitor_port = fsl_diu_name_to_port(opt + 8);
} else if (!strncmp(opt, "bpp=", 4)) {
if (!strict_strtoul(opt + 4, 10, &val))
default_bpp = val;
@@ -1720,7 +1730,7 @@ MODULE_DEVICE_TABLE(of, fsl_diu_match);
static struct platform_driver fsl_diu_driver = {
.driver = {
- .name = "fsl_diu",
+ .name = "fsl-diu-fb",
.owner = THIS_MODULE,
.of_match_table = fsl_diu_match,
},
@@ -1746,48 +1756,54 @@ static int __init fsl_diu_init(void)
if (fb_get_options("fslfb", &option))
return -ENODEV;
fsl_diu_setup(option);
+#else
+ monitor_port = fsl_diu_name_to_port(monitor_string);
#endif
- printk(KERN_INFO "Freescale DIU driver\n");
+ pr_info("Freescale Display Interface Unit (DIU) framebuffer driver\n");
#ifdef CONFIG_NOT_COHERENT_CACHE
np = of_find_node_by_type(NULL, "cpu");
if (!np) {
- printk(KERN_ERR "Err: can't find device node 'cpu'\n");
+ pr_err("fsl-diu-fb: can't find 'cpu' device node\n");
return -ENODEV;
}
prop = of_get_property(np, "d-cache-size", NULL);
if (prop == NULL) {
+ pr_err("fsl-diu-fb: missing 'd-cache-size' property' "
+ "in 'cpu' node\n");
of_node_put(np);
return -ENODEV;
}
- /* Freescale PLRU requires 13/8 times the cache size to do a proper
- displacement flush
+ /*
+ * Freescale PLRU requires 13/8 times the cache size to do a proper
+ * displacement flush
*/
- coherence_data_size = *prop * 13;
+ coherence_data_size = be32_to_cpup(prop) * 13;
coherence_data_size /= 8;
prop = of_get_property(np, "d-cache-line-size", NULL);
if (prop == NULL) {
+ pr_err("fsl-diu-fb: missing 'd-cache-line-size' property' "
+ "in 'cpu' node\n");
of_node_put(np);
return -ENODEV;
}
- d_cache_line_size = *prop;
+ d_cache_line_size = be32_to_cpup(prop);
of_node_put(np);
coherence_data = vmalloc(coherence_data_size);
if (!coherence_data)
return -ENOMEM;
#endif
+
ret = platform_driver_register(&fsl_diu_driver);
if (ret) {
- printk(KERN_ERR
- "fsl-diu: failed to register platform driver\n");
+ pr_err("fsl-diu-fb: failed to register platform driver\n");
#if defined(CONFIG_NOT_COHERENT_CACHE)
vfree(coherence_data);
#endif
- iounmap(dr.diu_reg);
}
return ret;
}
@@ -1811,8 +1827,8 @@ module_param_named(mode, fb_mode, charp, 0);
MODULE_PARM_DESC(mode,
"Specify resolution as \"<xres>x<yres>[-<bpp>][@<refresh>]\" ");
module_param_named(bpp, default_bpp, ulong, 0);
-MODULE_PARM_DESC(bpp, "Specify bit-per-pixel if not specified mode");
-module_param_named(monitor, monitor_port, int, 0);
-MODULE_PARM_DESC(monitor,
- "Specify the monitor port (0, 1 or 2) if supported by the platform");
+MODULE_PARM_DESC(bpp, "Specify bit-per-pixel if not specified in 'mode'");
+module_param_named(monitor, monitor_string, charp, 0);
+MODULE_PARM_DESC(monitor, "Specify the monitor port "
+ "(\"dvi\", \"lvds\", or \"dlvds\") if supported by the platform");
diff --git a/drivers/video/g364fb.c b/drivers/video/g364fb.c
index d662317d85e..223896cc5f7 100644
--- a/drivers/video/g364fb.c
+++ b/drivers/video/g364fb.c
@@ -149,10 +149,11 @@ int g364fb_cursor(struct fb_info *info, struct fb_cursor *cursor)
static int g364fb_pan_display(struct fb_var_screeninfo *var,
struct fb_info *info)
{
- if (var->xoffset || var->yoffset + var->yres > var->yres_virtual)
+ if (var->xoffset ||
+ var->yoffset + info->var.yres > info->var.yres_virtual)
return -EINVAL;
- *(unsigned int *) TOP_REG = var->yoffset * var->xres;
+ *(unsigned int *) TOP_REG = var->yoffset * info->var.xres;
return 0;
}
diff --git a/drivers/video/grvga.c b/drivers/video/grvga.c
new file mode 100644
index 00000000000..f37e0253820
--- /dev/null
+++ b/drivers/video/grvga.c
@@ -0,0 +1,579 @@
+/*
+ * Driver for Aeroflex Gaisler SVGACTRL framebuffer device.
+ *
+ * 2011 (c) Aeroflex Gaisler AB
+ *
+ * Full documentation of the core can be found here:
+ * http://www.gaisler.com/products/grlib/grip.pdf
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * Contributors: Kristoffer Glembo <kristoffer@gaisler.com>
+ *
+ */
+
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/of_platform.h>
+#include <linux/of_device.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/tty.h>
+#include <linux/mm.h>
+#include <linux/fb.h>
+#include <linux/io.h>
+
+struct grvga_regs {
+ u32 status; /* 0x00 */
+ u32 video_length; /* 0x04 */
+ u32 front_porch; /* 0x08 */
+ u32 sync_length; /* 0x0C */
+ u32 line_length; /* 0x10 */
+ u32 fb_pos; /* 0x14 */
+ u32 clk_vector[4]; /* 0x18 */
+ u32 clut; /* 0x20 */
+};
+
+struct grvga_par {
+ struct grvga_regs *regs;
+ u32 color_palette[16]; /* 16 entry pseudo palette used by fbcon in true color mode */
+ int clk_sel;
+ int fb_alloced; /* = 1 if framebuffer is allocated in main memory */
+};
+
+
+static const struct fb_videomode grvga_modedb[] = {
+ {
+ /* 640x480 @ 60 Hz */
+ NULL, 60, 640, 480, 40000, 48, 16, 39, 11, 96, 2,
+ 0, FB_VMODE_NONINTERLACED
+ }, {
+ /* 800x600 @ 60 Hz */
+ NULL, 60, 800, 600, 25000, 88, 40, 23, 1, 128, 4,
+ 0, FB_VMODE_NONINTERLACED
+ }, {
+ /* 800x600 @ 72 Hz */
+ NULL, 72, 800, 600, 20000, 64, 56, 23, 37, 120, 6,
+ 0, FB_VMODE_NONINTERLACED
+ }, {
+ /* 1024x768 @ 60 Hz */
+ NULL, 60, 1024, 768, 15385, 160, 24, 29, 3, 136, 6,
+ 0, FB_VMODE_NONINTERLACED
+ }
+ };
+
+static struct fb_fix_screeninfo grvga_fix __initdata = {
+ .id = "AG SVGACTRL",
+ .type = FB_TYPE_PACKED_PIXELS,
+ .visual = FB_VISUAL_PSEUDOCOLOR,
+ .xpanstep = 0,
+ .ypanstep = 1,
+ .ywrapstep = 0,
+ .accel = FB_ACCEL_NONE,
+};
+
+static int grvga_check_var(struct fb_var_screeninfo *var,
+ struct fb_info *info)
+{
+ struct grvga_par *par = info->par;
+ int i;
+
+ if (!var->xres)
+ var->xres = 1;
+ if (!var->yres)
+ var->yres = 1;
+ if (var->bits_per_pixel <= 8)
+ var->bits_per_pixel = 8;
+ else if (var->bits_per_pixel <= 16)
+ var->bits_per_pixel = 16;
+ else if (var->bits_per_pixel <= 24)
+ var->bits_per_pixel = 24;
+ else if (var->bits_per_pixel <= 32)
+ var->bits_per_pixel = 32;
+ else
+ return -EINVAL;
+
+ var->xres_virtual = var->xres;
+ var->yres_virtual = 2*var->yres;
+
+ if (info->fix.smem_len) {
+ if ((var->yres_virtual*var->xres_virtual*var->bits_per_pixel/8) > info->fix.smem_len)
+ return -ENOMEM;
+ }
+
+ /* Which clocks that are available can be read out in these registers */
+ for (i = 0; i <= 3 ; i++) {
+ if (var->pixclock == par->regs->clk_vector[i])
+ break;
+ }
+ if (i <= 3)
+ par->clk_sel = i;
+ else
+ return -EINVAL;
+
+ switch (info->var.bits_per_pixel) {
+ case 8:
+ var->red = (struct fb_bitfield) {0, 8, 0}; /* offset, length, msb-right */
+ var->green = (struct fb_bitfield) {0, 8, 0};
+ var->blue = (struct fb_bitfield) {0, 8, 0};
+ var->transp = (struct fb_bitfield) {0, 0, 0};
+ break;
+ case 16:
+ var->red = (struct fb_bitfield) {11, 5, 0};
+ var->green = (struct fb_bitfield) {5, 6, 0};
+ var->blue = (struct fb_bitfield) {0, 5, 0};
+ var->transp = (struct fb_bitfield) {0, 0, 0};
+ break;
+ case 24:
+ case 32:
+ var->red = (struct fb_bitfield) {16, 8, 0};
+ var->green = (struct fb_bitfield) {8, 8, 0};
+ var->blue = (struct fb_bitfield) {0, 8, 0};
+ var->transp = (struct fb_bitfield) {24, 8, 0};
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int grvga_set_par(struct fb_info *info)
+{
+
+ u32 func = 0;
+ struct grvga_par *par = info->par;
+
+ __raw_writel(((info->var.yres - 1) << 16) | (info->var.xres - 1),
+ &par->regs->video_length);
+
+ __raw_writel((info->var.lower_margin << 16) | (info->var.right_margin),
+ &par->regs->front_porch);
+
+ __raw_writel((info->var.vsync_len << 16) | (info->var.hsync_len),
+ &par->regs->sync_length);
+
+ __raw_writel(((info->var.yres + info->var.lower_margin + info->var.upper_margin + info->var.vsync_len - 1) << 16) |
+ (info->var.xres + info->var.right_margin + info->var.left_margin + info->var.hsync_len - 1),
+ &par->regs->line_length);
+
+ switch (info->var.bits_per_pixel) {
+ case 8:
+ info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
+ func = 1;
+ break;
+ case 16:
+ info->fix.visual = FB_VISUAL_TRUECOLOR;
+ func = 2;
+ break;
+ case 24:
+ case 32:
+ info->fix.visual = FB_VISUAL_TRUECOLOR;
+ func = 3;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ __raw_writel((par->clk_sel << 6) | (func << 4) | 1,
+ &par->regs->status);
+
+ info->fix.line_length = (info->var.xres_virtual*info->var.bits_per_pixel)/8;
+ return 0;
+}
+
+static int grvga_setcolreg(unsigned regno, unsigned red, unsigned green, unsigned blue, unsigned transp, struct fb_info *info)
+{
+ struct grvga_par *par;
+ par = info->par;
+
+ if (regno >= 256) /* Size of CLUT */
+ return -EINVAL;
+
+ if (info->var.grayscale) {
+ /* grayscale = 0.30*R + 0.59*G + 0.11*B */
+ red = green = blue = (red * 77 + green * 151 + blue * 28) >> 8;
+ }
+
+
+
+#define CNVT_TOHW(val, width) ((((val)<<(width))+0x7FFF-(val))>>16)
+
+ red = CNVT_TOHW(red, info->var.red.length);
+ green = CNVT_TOHW(green, info->var.green.length);
+ blue = CNVT_TOHW(blue, info->var.blue.length);
+ transp = CNVT_TOHW(transp, info->var.transp.length);
+
+#undef CNVT_TOHW
+
+ /* In PSEUDOCOLOR we use the hardware CLUT */
+ if (info->fix.visual == FB_VISUAL_PSEUDOCOLOR)
+ __raw_writel((regno << 24) | (red << 16) | (green << 8) | blue,
+ &par->regs->clut);
+
+ /* Truecolor uses the pseudo palette */
+ else if (info->fix.visual == FB_VISUAL_TRUECOLOR) {
+ u32 v;
+ if (regno >= 16)
+ return -EINVAL;
+
+
+ v = (red << info->var.red.offset) |
+ (green << info->var.green.offset) |
+ (blue << info->var.blue.offset) |
+ (transp << info->var.transp.offset);
+
+ ((u32 *) (info->pseudo_palette))[regno] = v;
+ }
+ return 0;
+}
+
+static int grvga_pan_display(struct fb_var_screeninfo *var,
+ struct fb_info *info)
+{
+ struct grvga_par *par = info->par;
+ struct fb_fix_screeninfo *fix = &info->fix;
+ u32 base_addr;
+
+ if (var->xoffset != 0)
+ return -EINVAL;
+
+ base_addr = fix->smem_start + (var->yoffset * fix->line_length);
+ base_addr &= ~3UL;
+
+ /* Set framebuffer base address */
+ __raw_writel(base_addr,
+ &par->regs->fb_pos);
+
+ return 0;
+}
+
+static struct fb_ops grvga_ops = {
+ .owner = THIS_MODULE,
+ .fb_check_var = grvga_check_var,
+ .fb_set_par = grvga_set_par,
+ .fb_setcolreg = grvga_setcolreg,
+ .fb_pan_display = grvga_pan_display,
+ .fb_fillrect = cfb_fillrect,
+ .fb_copyarea = cfb_copyarea,
+ .fb_imageblit = cfb_imageblit
+};
+
+static int __init grvga_parse_custom(char *options,
+ struct fb_var_screeninfo *screendata)
+{
+ char *this_opt;
+ int count = 0;
+ if (!options || !*options)
+ return -1;
+
+ while ((this_opt = strsep(&options, " ")) != NULL) {
+ if (!*this_opt)
+ continue;
+
+ switch (count) {
+ case 0:
+ screendata->pixclock = simple_strtoul(this_opt, NULL, 0);
+ count++;
+ break;
+ case 1:
+ screendata->xres = screendata->xres_virtual = simple_strtoul(this_opt, NULL, 0);
+ count++;
+ break;
+ case 2:
+ screendata->right_margin = simple_strtoul(this_opt, NULL, 0);
+ count++;
+ break;
+ case 3:
+ screendata->hsync_len = simple_strtoul(this_opt, NULL, 0);
+ count++;
+ break;
+ case 4:
+ screendata->left_margin = simple_strtoul(this_opt, NULL, 0);
+ count++;
+ break;
+ case 5:
+ screendata->yres = screendata->yres_virtual = simple_strtoul(this_opt, NULL, 0);
+ count++;
+ break;
+ case 6:
+ screendata->lower_margin = simple_strtoul(this_opt, NULL, 0);
+ count++;
+ break;
+ case 7:
+ screendata->vsync_len = simple_strtoul(this_opt, NULL, 0);
+ count++;
+ break;
+ case 8:
+ screendata->upper_margin = simple_strtoul(this_opt, NULL, 0);
+ count++;
+ break;
+ case 9:
+ screendata->bits_per_pixel = simple_strtoul(this_opt, NULL, 0);
+ count++;
+ break;
+ default:
+ return -1;
+ }
+ }
+ screendata->activate = FB_ACTIVATE_NOW;
+ screendata->vmode = FB_VMODE_NONINTERLACED;
+ return 0;
+}
+
+static int __devinit grvga_probe(struct platform_device *dev)
+{
+ struct fb_info *info;
+ int retval = -ENOMEM;
+ unsigned long virtual_start;
+ unsigned long grvga_fix_addr = 0;
+ unsigned long physical_start = 0;
+ unsigned long grvga_mem_size = 0;
+ struct grvga_par *par = NULL;
+ char *options = NULL, *mode_opt = NULL;
+
+ info = framebuffer_alloc(sizeof(struct grvga_par), &dev->dev);
+ if (!info) {
+ dev_err(&dev->dev, "framebuffer_alloc failed\n");
+ return -ENOMEM;
+ }
+
+ /* Expecting: "grvga: modestring, [addr:<framebuffer physical address>], [size:<framebuffer size>]
+ *
+ * If modestring is custom:<custom mode string> we parse the string which then contains all videoparameters
+ * If address is left out, we allocate memory,
+ * if size is left out we only allocate enough to support the given mode.
+ */
+ if (fb_get_options("grvga", &options)) {
+ retval = -ENODEV;
+ goto err;
+ }
+
+ if (!options || !*options)
+ options = "640x480-8@60";
+
+ while (1) {
+ char *this_opt = strsep(&options, ",");
+
+ if (!this_opt)
+ break;
+
+ if (!strncmp(this_opt, "custom", 6)) {
+ if (grvga_parse_custom(this_opt, &info->var) < 0) {
+ dev_err(&dev->dev, "Failed to parse custom mode (%s).\n", this_opt);
+ retval = -EINVAL;
+ goto err1;
+ }
+ } else if (!strncmp(this_opt, "addr", 4))
+ grvga_fix_addr = simple_strtoul(this_opt + 5, NULL, 16);
+ else if (!strncmp(this_opt, "size", 4))
+ grvga_mem_size = simple_strtoul(this_opt + 5, NULL, 0);
+ else
+ mode_opt = this_opt;
+ }
+
+ par = info->par;
+ info->fbops = &grvga_ops;
+ info->fix = grvga_fix;
+ info->pseudo_palette = par->color_palette;
+ info->flags = FBINFO_DEFAULT | FBINFO_PARTIAL_PAN_OK | FBINFO_HWACCEL_YPAN;
+ info->fix.smem_len = grvga_mem_size;
+
+ if (!request_mem_region(dev->resource[0].start, resource_size(&dev->resource[0]), "grlib-svgactrl regs")) {
+ dev_err(&dev->dev, "registers already mapped\n");
+ retval = -EBUSY;
+ goto err;
+ }
+
+ par->regs = of_ioremap(&dev->resource[0], 0,
+ resource_size(&dev->resource[0]),
+ "grlib-svgactrl regs");
+
+ if (!par->regs) {
+ dev_err(&dev->dev, "failed to map registers\n");
+ retval = -ENOMEM;
+ goto err1;
+ }
+
+ retval = fb_alloc_cmap(&info->cmap, 256, 0);
+ if (retval < 0) {
+ dev_err(&dev->dev, "failed to allocate mem with fb_alloc_cmap\n");
+ retval = -ENOMEM;
+ goto err2;
+ }
+
+ if (mode_opt) {
+ retval = fb_find_mode(&info->var, info, mode_opt,
+ grvga_modedb, sizeof(grvga_modedb), &grvga_modedb[0], 8);
+ if (!retval || retval == 4) {
+ retval = -EINVAL;
+ goto err3;
+ }
+ }
+
+ if (!grvga_mem_size)
+ grvga_mem_size = info->var.xres_virtual * info->var.yres_virtual * info->var.bits_per_pixel/8;
+
+ if (grvga_fix_addr) {
+ /* Got framebuffer base address from argument list */
+
+ physical_start = grvga_fix_addr;
+
+ if (!request_mem_region(physical_start, grvga_mem_size, dev->name)) {
+ dev_err(&dev->dev, "failed to request memory region\n");
+ retval = -ENOMEM;
+ goto err3;
+ }
+
+ virtual_start = (unsigned long) ioremap(physical_start, grvga_mem_size);
+
+ if (!virtual_start) {
+ dev_err(&dev->dev, "error mapping framebuffer memory\n");
+ retval = -ENOMEM;
+ goto err4;
+ }
+ } else { /* Allocate frambuffer memory */
+
+ unsigned long page;
+
+ virtual_start = (unsigned long) __get_free_pages(GFP_DMA,
+ get_order(grvga_mem_size));
+ if (!virtual_start) {
+ dev_err(&dev->dev,
+ "unable to allocate framebuffer memory (%lu bytes)\n",
+ grvga_mem_size);
+ retval = -ENOMEM;
+ goto err3;
+ }
+
+ physical_start = dma_map_single(&dev->dev, (void *)virtual_start, grvga_mem_size, DMA_TO_DEVICE);
+
+ /* Set page reserved so that mmap will work. This is necessary
+ * since we'll be remapping normal memory.
+ */
+ for (page = virtual_start;
+ page < PAGE_ALIGN(virtual_start + grvga_mem_size);
+ page += PAGE_SIZE) {
+ SetPageReserved(virt_to_page(page));
+ }
+
+ par->fb_alloced = 1;
+ }
+
+ memset((unsigned long *) virtual_start, 0, grvga_mem_size);
+
+ info->screen_base = (char __iomem *) virtual_start;
+ info->fix.smem_start = physical_start;
+ info->fix.smem_len = grvga_mem_size;
+
+ dev_set_drvdata(&dev->dev, info);
+
+ dev_info(&dev->dev,
+ "Aeroflex Gaisler framebuffer device (fb%d), %dx%d-%d, using %luK of video memory @ %p\n",
+ info->node, info->var.xres, info->var.yres, info->var.bits_per_pixel,
+ grvga_mem_size >> 10, info->screen_base);
+
+ retval = register_framebuffer(info);
+ if (retval < 0) {
+ dev_err(&dev->dev, "failed to register framebuffer\n");
+ goto err4;
+ }
+
+ __raw_writel(physical_start, &par->regs->fb_pos);
+ __raw_writel(__raw_readl(&par->regs->status) | 1, /* Enable framebuffer */
+ &par->regs->status);
+
+ return 0;
+
+err4:
+ dev_set_drvdata(&dev->dev, NULL);
+ if (grvga_fix_addr) {
+ release_mem_region(physical_start, grvga_mem_size);
+ iounmap((void *)virtual_start);
+ } else
+ kfree((void *)virtual_start);
+err3:
+ fb_dealloc_cmap(&info->cmap);
+err2:
+ of_iounmap(&dev->resource[0], par->regs,
+ resource_size(&dev->resource[0]));
+err1:
+ release_mem_region(dev->resource[0].start, resource_size(&dev->resource[0]));
+err:
+ framebuffer_release(info);
+
+ return retval;
+}
+
+static int __devexit grvga_remove(struct platform_device *device)
+{
+ struct fb_info *info = dev_get_drvdata(&device->dev);
+ struct grvga_par *par = info->par;
+
+ if (info) {
+ unregister_framebuffer(info);
+ fb_dealloc_cmap(&info->cmap);
+
+ of_iounmap(&device->resource[0], par->regs,
+ resource_size(&device->resource[0]));
+ release_mem_region(device->resource[0].start, resource_size(&device->resource[0]));
+
+ if (!par->fb_alloced) {
+ release_mem_region(info->fix.smem_start, info->fix.smem_len);
+ iounmap(info->screen_base);
+ } else
+ kfree((void *)info->screen_base);
+
+ framebuffer_release(info);
+ dev_set_drvdata(&device->dev, NULL);
+ }
+
+ return 0;
+}
+
+static struct of_device_id svgactrl_of_match[] = {
+ {
+ .name = "GAISLER_SVGACTRL",
+ },
+ {
+ .name = "01_063",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, svgactrl_of_match);
+
+static struct platform_driver grvga_driver = {
+ .driver = {
+ .name = "grlib-svgactrl",
+ .owner = THIS_MODULE,
+ .of_match_table = svgactrl_of_match,
+ },
+ .probe = grvga_probe,
+ .remove = __devexit_p(grvga_remove),
+};
+
+
+static int __init grvga_init(void)
+{
+ return platform_driver_register(&grvga_driver);
+}
+
+static void __exit grvga_exit(void)
+{
+ platform_driver_unregister(&grvga_driver);
+}
+
+module_init(grvga_init);
+module_exit(grvga_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Aeroflex Gaisler");
+MODULE_DESCRIPTION("Aeroflex Gaisler framebuffer device driver");
diff --git a/drivers/video/gxt4500.c b/drivers/video/gxt4500.c
index 896e53dea90..0fad23f810a 100644
--- a/drivers/video/gxt4500.c
+++ b/drivers/video/gxt4500.c
@@ -543,8 +543,8 @@ static int gxt4500_pan_display(struct fb_var_screeninfo *var,
if (var->xoffset & 7)
return -EINVAL;
- if (var->xoffset + var->xres > var->xres_virtual ||
- var->yoffset + var->yres > var->yres_virtual)
+ if (var->xoffset + info->var.xres > info->var.xres_virtual ||
+ var->yoffset + info->var.yres > info->var.yres_virtual)
return -EINVAL;
writereg(par, REFRESH_START, (var->xoffset << 16) | var->yoffset);
diff --git a/drivers/video/hgafb.c b/drivers/video/hgafb.c
index 4052718eefa..4394389caf6 100644
--- a/drivers/video/hgafb.c
+++ b/drivers/video/hgafb.c
@@ -422,8 +422,8 @@ static int hgafb_pan_display(struct fb_var_screeninfo *var,
var->xoffset)
return -EINVAL;
} else {
- if (var->xoffset + var->xres > info->var.xres_virtual
- || var->yoffset + var->yres > info->var.yres_virtual
+ if (var->xoffset + info->var.xres > info->var.xres_virtual
+ || var->yoffset + info->var.yres > info->var.yres_virtual
|| var->yoffset % 8)
return -EINVAL;
}
diff --git a/drivers/video/imsttfb.c b/drivers/video/imsttfb.c
index efb2c10656b..8149356471e 100644
--- a/drivers/video/imsttfb.c
+++ b/drivers/video/imsttfb.c
@@ -749,7 +749,7 @@ set_offset (struct fb_var_screeninfo *var, struct fb_info *info)
{
struct imstt_par *par = info->par;
__u32 off = var->yoffset * (info->fix.line_length >> 3)
- + ((var->xoffset * (var->bits_per_pixel >> 3)) >> 3);
+ + ((var->xoffset * (info->var.bits_per_pixel >> 3)) >> 3);
write_reg_le32(par->dc_regs, SSR, off);
}
diff --git a/drivers/video/intelfb/intelfbhw.c b/drivers/video/intelfb/intelfbhw.c
index 38065cf94ac..fbad61da359 100644
--- a/drivers/video/intelfb/intelfbhw.c
+++ b/drivers/video/intelfb/intelfbhw.c
@@ -390,12 +390,12 @@ int intelfbhw_pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
xoffset = ROUND_DOWN_TO(var->xoffset, 8);
yoffset = var->yoffset;
- if ((xoffset + var->xres > var->xres_virtual) ||
- (yoffset + var->yres > var->yres_virtual))
+ if ((xoffset + info->var.xres > info->var.xres_virtual) ||
+ (yoffset + info->var.yres > info->var.yres_virtual))
return -EINVAL;
offset = (yoffset * dinfo->pitch) +
- (xoffset * var->bits_per_pixel) / 8;
+ (xoffset * info->var.bits_per_pixel) / 8;
offset += dinfo->fb.offset << 12;
diff --git a/drivers/video/mb862xx/mb862xx-i2c.c b/drivers/video/mb862xx/mb862xx-i2c.c
index b953099edd8..273769bb8de 100644
--- a/drivers/video/mb862xx/mb862xx-i2c.c
+++ b/drivers/video/mb862xx/mb862xx-i2c.c
@@ -13,6 +13,7 @@
#include <linux/i2c.h>
#include <linux/io.h>
#include <linux/delay.h>
+#include <linux/export.h>
#include "mb862xxfb.h"
#include "mb862xx_reg.h"
@@ -23,7 +24,7 @@ static int mb862xx_i2c_wait_event(struct i2c_adapter *adap)
u32 reg;
do {
- udelay(1);
+ udelay(10);
reg = inreg(i2c, GC_I2C_BCR);
if (reg & (I2C_INT | I2C_BER))
break;
diff --git a/drivers/video/mb862xx/mb862xxfbdrv.c b/drivers/video/mb862xx/mb862xxfbdrv.c
index ee1de3e26de..11a7a333701 100644
--- a/drivers/video/mb862xx/mb862xxfbdrv.c
+++ b/drivers/video/mb862xx/mb862xxfbdrv.c
@@ -17,6 +17,7 @@
#include <linux/fb.h>
#include <linux/delay.h>
#include <linux/uaccess.h>
+#include <linux/module.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
@@ -278,7 +279,7 @@ static int mb862xxfb_pan(struct fb_var_screeninfo *var,
reg = pack(var->yoffset, var->xoffset);
outreg(disp, GC_L0WY_L0WX, reg);
- reg = pack(var->yres_virtual, var->xres_virtual);
+ reg = pack(info->var.yres_virtual, info->var.xres_virtual);
outreg(disp, GC_L0WH_L0WW, reg);
return 0;
}
@@ -737,7 +738,7 @@ static int __devinit of_platform_mb862xx_probe(struct platform_device *ofdev)
if (mb862xx_gdc_init(par))
goto io_unmap;
- if (request_irq(par->irq, mb862xx_intr, IRQF_DISABLED,
+ if (request_irq(par->irq, mb862xx_intr, 0,
DRV_NAME, (void *)par)) {
dev_err(dev, "Cannot request irq\n");
goto io_unmap;
@@ -1073,7 +1074,7 @@ static int __devinit mb862xx_pci_probe(struct pci_dev *pdev,
if (mb862xx_pci_gdc_init(par))
goto io_unmap;
- if (request_irq(par->irq, mb862xx_intr, IRQF_DISABLED | IRQF_SHARED,
+ if (request_irq(par->irq, mb862xx_intr, IRQF_SHARED,
DRV_NAME, (void *)par)) {
dev_err(dev, "Cannot request irq\n");
goto io_unmap;
diff --git a/drivers/video/mbx/mbxfb.c b/drivers/video/mbx/mbxfb.c
index afea9abbd67..6ce34160da7 100644
--- a/drivers/video/mbx/mbxfb.c
+++ b/drivers/video/mbx/mbxfb.c
@@ -34,7 +34,7 @@
#include "regs.h"
#include "reg_bits.h"
-static unsigned long virt_base_2700;
+static void __iomem *virt_base_2700;
#define write_reg(val, reg) do { writel((val), (reg)); } while(0)
@@ -850,7 +850,7 @@ static int mbxfb_suspend(struct platform_device *dev, pm_message_t state)
{
/* make frame buffer memory enter self-refresh mode */
write_reg_dly(LMPWR_MC_PWR_SRM, LMPWR);
- while (LMPWRSTAT != LMPWRSTAT_MC_PWR_SRM)
+ while (readl(LMPWRSTAT) != LMPWRSTAT_MC_PWR_SRM)
; /* empty statement */
/* reset the device, since it's initial state is 'mostly sleeping' */
@@ -946,7 +946,7 @@ static int __devinit mbxfb_probe(struct platform_device *dev)
ret = -EINVAL;
goto err3;
}
- virt_base_2700 = (unsigned long)mfbi->reg_virt_addr;
+ virt_base_2700 = mfbi->reg_virt_addr;
mfbi->fb_virt_addr = ioremap_nocache(mfbi->fb_phys_addr,
res_size(mfbi->fb_req));
diff --git a/drivers/video/modedb.c b/drivers/video/modedb.c
index cb175fe7abc..a9a907c440d 100644
--- a/drivers/video/modedb.c
+++ b/drivers/video/modedb.c
@@ -491,55 +491,56 @@ EXPORT_SYMBOL(vesa_modes);
static int fb_try_mode(struct fb_var_screeninfo *var, struct fb_info *info,
const struct fb_videomode *mode, unsigned int bpp)
{
- int err = 0;
-
- DPRINTK("Trying mode %s %dx%d-%d@%d\n", mode->name ? mode->name : "noname",
- mode->xres, mode->yres, bpp, mode->refresh);
- var->xres = mode->xres;
- var->yres = mode->yres;
- var->xres_virtual = mode->xres;
- var->yres_virtual = mode->yres;
- var->xoffset = 0;
- var->yoffset = 0;
- var->bits_per_pixel = bpp;
- var->activate |= FB_ACTIVATE_TEST;
- var->pixclock = mode->pixclock;
- var->left_margin = mode->left_margin;
- var->right_margin = mode->right_margin;
- var->upper_margin = mode->upper_margin;
- var->lower_margin = mode->lower_margin;
- var->hsync_len = mode->hsync_len;
- var->vsync_len = mode->vsync_len;
- var->sync = mode->sync;
- var->vmode = mode->vmode;
- if (info->fbops->fb_check_var)
- err = info->fbops->fb_check_var(var, info);
- var->activate &= ~FB_ACTIVATE_TEST;
- return err;
+ int err = 0;
+
+ DPRINTK("Trying mode %s %dx%d-%d@%d\n",
+ mode->name ? mode->name : "noname",
+ mode->xres, mode->yres, bpp, mode->refresh);
+ var->xres = mode->xres;
+ var->yres = mode->yres;
+ var->xres_virtual = mode->xres;
+ var->yres_virtual = mode->yres;
+ var->xoffset = 0;
+ var->yoffset = 0;
+ var->bits_per_pixel = bpp;
+ var->activate |= FB_ACTIVATE_TEST;
+ var->pixclock = mode->pixclock;
+ var->left_margin = mode->left_margin;
+ var->right_margin = mode->right_margin;
+ var->upper_margin = mode->upper_margin;
+ var->lower_margin = mode->lower_margin;
+ var->hsync_len = mode->hsync_len;
+ var->vsync_len = mode->vsync_len;
+ var->sync = mode->sync;
+ var->vmode = mode->vmode;
+ if (info->fbops->fb_check_var)
+ err = info->fbops->fb_check_var(var, info);
+ var->activate &= ~FB_ACTIVATE_TEST;
+ return err;
}
/**
- * fb_find_mode - finds a valid video mode
- * @var: frame buffer user defined part of display
- * @info: frame buffer info structure
- * @mode_option: string video mode to find
- * @db: video mode database
- * @dbsize: size of @db
- * @default_mode: default video mode to fall back to
- * @default_bpp: default color depth in bits per pixel
+ * fb_find_mode - finds a valid video mode
+ * @var: frame buffer user defined part of display
+ * @info: frame buffer info structure
+ * @mode_option: string video mode to find
+ * @db: video mode database
+ * @dbsize: size of @db
+ * @default_mode: default video mode to fall back to
+ * @default_bpp: default color depth in bits per pixel
*
- * Finds a suitable video mode, starting with the specified mode
- * in @mode_option with fallback to @default_mode. If
- * @default_mode fails, all modes in the video mode database will
- * be tried.
+ * Finds a suitable video mode, starting with the specified mode
+ * in @mode_option with fallback to @default_mode. If
+ * @default_mode fails, all modes in the video mode database will
+ * be tried.
*
- * Valid mode specifiers for @mode_option:
+ * Valid mode specifiers for @mode_option:
*
- * <xres>x<yres>[M][R][-<bpp>][@<refresh>][i][m] or
- * <name>[-<bpp>][@<refresh>]
+ * <xres>x<yres>[M][R][-<bpp>][@<refresh>][i][m] or
+ * <name>[-<bpp>][@<refresh>]
*
- * with <xres>, <yres>, <bpp> and <refresh> decimal numbers and
- * <name> a string.
+ * with <xres>, <yres>, <bpp> and <refresh> decimal numbers and
+ * <name> a string.
*
* If 'M' is present after yres (and before refresh/bpp if present),
* the function will compute the timings using VESA(tm) Coordinated
@@ -551,12 +552,12 @@ static int fb_try_mode(struct fb_var_screeninfo *var, struct fb_info *info,
*
* 1024x768MR-8@60m - Reduced blank with margins at 60Hz.
*
- * NOTE: The passed struct @var is _not_ cleared! This allows you
- * to supply values for e.g. the grayscale and accel_flags fields.
+ * NOTE: The passed struct @var is _not_ cleared! This allows you
+ * to supply values for e.g. the grayscale and accel_flags fields.
*
- * Returns zero for failure, 1 if using specified @mode_option,
- * 2 if using specified @mode_option with an ignored refresh rate,
- * 3 if default mode is used, 4 if fall back to any valid mode.
+ * Returns zero for failure, 1 if using specified @mode_option,
+ * 2 if using specified @mode_option with an ignored refresh rate,
+ * 3 if default mode is used, 4 if fall back to any valid mode.
*
*/
@@ -566,198 +567,203 @@ int fb_find_mode(struct fb_var_screeninfo *var,
const struct fb_videomode *default_mode,
unsigned int default_bpp)
{
- int i;
-
- /* Set up defaults */
- if (!db) {
- db = modedb;
- dbsize = ARRAY_SIZE(modedb);
- }
-
- if (!default_mode)
- default_mode = &db[0];
-
- if (!default_bpp)
- default_bpp = 8;
-
- /* Did the user specify a video mode? */
- if (!mode_option)
- mode_option = fb_mode_option;
- if (mode_option) {
- const char *name = mode_option;
- unsigned int namelen = strlen(name);
- int res_specified = 0, bpp_specified = 0, refresh_specified = 0;
- unsigned int xres = 0, yres = 0, bpp = default_bpp, refresh = 0;
- int yres_specified = 0, cvt = 0, rb = 0, interlace = 0, margins = 0;
- u32 best, diff, tdiff;
-
- for (i = namelen-1; i >= 0; i--) {
- switch (name[i]) {
- case '@':
- namelen = i;
- if (!refresh_specified && !bpp_specified &&
- !yres_specified) {
- refresh = simple_strtol(&name[i+1], NULL, 10);
- refresh_specified = 1;
- if (cvt || rb)
- cvt = 0;
- } else
- goto done;
- break;
- case '-':
- namelen = i;
- if (!bpp_specified && !yres_specified) {
- bpp = simple_strtol(&name[i+1], NULL, 10);
- bpp_specified = 1;
- if (cvt || rb)
- cvt = 0;
- } else
- goto done;
- break;
- case 'x':
- if (!yres_specified) {
- yres = simple_strtol(&name[i+1], NULL, 10);
- yres_specified = 1;
- } else
- goto done;
- break;
- case '0' ... '9':
- break;
- case 'M':
- if (!yres_specified)
- cvt = 1;
- break;
- case 'R':
- if (!cvt)
- rb = 1;
- break;
- case 'm':
- if (!cvt)
- margins = 1;
- break;
- case 'i':
- if (!cvt)
- interlace = 1;
- break;
- default:
- goto done;
- }
- }
- if (i < 0 && yres_specified) {
- xres = simple_strtol(name, NULL, 10);
- res_specified = 1;
- }
-done:
- if (cvt) {
- struct fb_videomode cvt_mode;
- int ret;
-
- DPRINTK("CVT mode %dx%d@%dHz%s%s%s\n", xres, yres,
- (refresh) ? refresh : 60, (rb) ? " reduced blanking" :
- "", (margins) ? " with margins" : "", (interlace) ?
- " interlaced" : "");
-
- memset(&cvt_mode, 0, sizeof(cvt_mode));
- cvt_mode.xres = xres;
- cvt_mode.yres = yres;
- cvt_mode.refresh = (refresh) ? refresh : 60;
+ int i;
- if (interlace)
- cvt_mode.vmode |= FB_VMODE_INTERLACED;
- else
- cvt_mode.vmode &= ~FB_VMODE_INTERLACED;
+ /* Set up defaults */
+ if (!db) {
+ db = modedb;
+ dbsize = ARRAY_SIZE(modedb);
+ }
- ret = fb_find_mode_cvt(&cvt_mode, margins, rb);
+ if (!default_mode)
+ default_mode = &db[0];
+
+ if (!default_bpp)
+ default_bpp = 8;
+
+ /* Did the user specify a video mode? */
+ if (!mode_option)
+ mode_option = fb_mode_option;
+ if (mode_option) {
+ const char *name = mode_option;
+ unsigned int namelen = strlen(name);
+ int res_specified = 0, bpp_specified = 0, refresh_specified = 0;
+ unsigned int xres = 0, yres = 0, bpp = default_bpp, refresh = 0;
+ int yres_specified = 0, cvt = 0, rb = 0, interlace = 0;
+ int margins = 0;
+ u32 best, diff, tdiff;
+
+ for (i = namelen-1; i >= 0; i--) {
+ switch (name[i]) {
+ case '@':
+ namelen = i;
+ if (!refresh_specified && !bpp_specified &&
+ !yres_specified) {
+ refresh = simple_strtol(&name[i+1], NULL,
+ 10);
+ refresh_specified = 1;
+ if (cvt || rb)
+ cvt = 0;
+ } else
+ goto done;
+ break;
+ case '-':
+ namelen = i;
+ if (!bpp_specified && !yres_specified) {
+ bpp = simple_strtol(&name[i+1], NULL,
+ 10);
+ bpp_specified = 1;
+ if (cvt || rb)
+ cvt = 0;
+ } else
+ goto done;
+ break;
+ case 'x':
+ if (!yres_specified) {
+ yres = simple_strtol(&name[i+1], NULL,
+ 10);
+ yres_specified = 1;
+ } else
+ goto done;
+ break;
+ case '0' ... '9':
+ break;
+ case 'M':
+ if (!yres_specified)
+ cvt = 1;
+ break;
+ case 'R':
+ if (!cvt)
+ rb = 1;
+ break;
+ case 'm':
+ if (!cvt)
+ margins = 1;
+ break;
+ case 'i':
+ if (!cvt)
+ interlace = 1;
+ break;
+ default:
+ goto done;
+ }
+ }
+ if (i < 0 && yres_specified) {
+ xres = simple_strtol(name, NULL, 10);
+ res_specified = 1;
+ }
+done:
+ if (cvt) {
+ struct fb_videomode cvt_mode;
+ int ret;
+
+ DPRINTK("CVT mode %dx%d@%dHz%s%s%s\n", xres, yres,
+ (refresh) ? refresh : 60,
+ (rb) ? " reduced blanking" : "",
+ (margins) ? " with margins" : "",
+ (interlace) ? " interlaced" : "");
+
+ memset(&cvt_mode, 0, sizeof(cvt_mode));
+ cvt_mode.xres = xres;
+ cvt_mode.yres = yres;
+ cvt_mode.refresh = (refresh) ? refresh : 60;
+
+ if (interlace)
+ cvt_mode.vmode |= FB_VMODE_INTERLACED;
+ else
+ cvt_mode.vmode &= ~FB_VMODE_INTERLACED;
+
+ ret = fb_find_mode_cvt(&cvt_mode, margins, rb);
+
+ if (!ret && !fb_try_mode(var, info, &cvt_mode, bpp)) {
+ DPRINTK("modedb CVT: CVT mode ok\n");
+ return 1;
+ }
- if (!ret && !fb_try_mode(var, info, &cvt_mode, bpp)) {
- DPRINTK("modedb CVT: CVT mode ok\n");
- return 1;
- }
+ DPRINTK("CVT mode invalid, getting mode from database\n");
+ }
- DPRINTK("CVT mode invalid, getting mode from database\n");
- }
+ DPRINTK("Trying specified video mode%s %ix%i\n",
+ refresh_specified ? "" : " (ignoring refresh rate)",
+ xres, yres);
- DPRINTK("Trying specified video mode%s %ix%i\n",
- refresh_specified ? "" : " (ignoring refresh rate)", xres, yres);
-
- if (!refresh_specified) {
- /*
- * If the caller has provided a custom mode database and a
- * valid monspecs structure, we look for the mode with the
- * highest refresh rate. Otherwise we play it safe it and
- * try to find a mode with a refresh rate closest to the
- * standard 60 Hz.
- */
- if (db != modedb &&
- info->monspecs.vfmin && info->monspecs.vfmax &&
- info->monspecs.hfmin && info->monspecs.hfmax &&
- info->monspecs.dclkmax) {
- refresh = 1000;
- } else {
- refresh = 60;
+ if (!refresh_specified) {
+ /*
+ * If the caller has provided a custom mode database and
+ * a valid monspecs structure, we look for the mode with
+ * the highest refresh rate. Otherwise we play it safe
+ * it and try to find a mode with a refresh rate closest
+ * to the standard 60 Hz.
+ */
+ if (db != modedb &&
+ info->monspecs.vfmin && info->monspecs.vfmax &&
+ info->monspecs.hfmin && info->monspecs.hfmax &&
+ info->monspecs.dclkmax) {
+ refresh = 1000;
+ } else {
+ refresh = 60;
+ }
}
- }
- diff = -1;
- best = -1;
- for (i = 0; i < dbsize; i++) {
- if ((name_matches(db[i], name, namelen) ||
- (res_specified && res_matches(db[i], xres, yres))) &&
- !fb_try_mode(var, info, &db[i], bpp)) {
- if (refresh_specified && db[i].refresh == refresh) {
- return 1;
- } else {
+ diff = -1;
+ best = -1;
+ for (i = 0; i < dbsize; i++) {
+ if ((name_matches(db[i], name, namelen) ||
+ (res_specified && res_matches(db[i], xres, yres))) &&
+ !fb_try_mode(var, info, &db[i], bpp)) {
+ if (refresh_specified && db[i].refresh == refresh)
+ return 1;
+
if (abs(db[i].refresh - refresh) < diff) {
diff = abs(db[i].refresh - refresh);
best = i;
}
}
}
- }
- if (best != -1) {
- fb_try_mode(var, info, &db[best], bpp);
- return (refresh_specified) ? 2 : 1;
- }
-
- diff = 2 * (xres + yres);
- best = -1;
- DPRINTK("Trying best-fit modes\n");
- for (i = 0; i < dbsize; i++) {
- DPRINTK("Trying %ix%i\n", db[i].xres, db[i].yres);
- if (!fb_try_mode(var, info, &db[i], bpp)) {
- tdiff = abs(db[i].xres - xres) +
- abs(db[i].yres - yres);
-
- /*
- * Penalize modes with resolutions smaller
- * than requested.
- */
- if (xres > db[i].xres || yres > db[i].yres)
- tdiff += xres + yres;
+ if (best != -1) {
+ fb_try_mode(var, info, &db[best], bpp);
+ return (refresh_specified) ? 2 : 1;
+ }
- if (diff > tdiff) {
- diff = tdiff;
- best = i;
+ diff = 2 * (xres + yres);
+ best = -1;
+ DPRINTK("Trying best-fit modes\n");
+ for (i = 0; i < dbsize; i++) {
+ DPRINTK("Trying %ix%i\n", db[i].xres, db[i].yres);
+ if (!fb_try_mode(var, info, &db[i], bpp)) {
+ tdiff = abs(db[i].xres - xres) +
+ abs(db[i].yres - yres);
+
+ /*
+ * Penalize modes with resolutions smaller
+ * than requested.
+ */
+ if (xres > db[i].xres || yres > db[i].yres)
+ tdiff += xres + yres;
+
+ if (diff > tdiff) {
+ diff = tdiff;
+ best = i;
+ }
}
}
+ if (best != -1) {
+ fb_try_mode(var, info, &db[best], bpp);
+ return 5;
+ }
}
- if (best != -1) {
- fb_try_mode(var, info, &db[best], bpp);
- return 5;
- }
- }
- DPRINTK("Trying default video mode\n");
- if (!fb_try_mode(var, info, default_mode, default_bpp))
- return 3;
+ DPRINTK("Trying default video mode\n");
+ if (!fb_try_mode(var, info, default_mode, default_bpp))
+ return 3;
- DPRINTK("Trying all modes\n");
- for (i = 0; i < dbsize; i++)
- if (!fb_try_mode(var, info, &db[i], default_bpp))
- return 4;
+ DPRINTK("Trying all modes\n");
+ for (i = 0; i < dbsize; i++)
+ if (!fb_try_mode(var, info, &db[i], default_bpp))
+ return 4;
- DPRINTK("No valid mode found\n");
- return 0;
+ DPRINTK("No valid mode found\n");
+ return 0;
}
/**
diff --git a/drivers/video/msm/mddi.c b/drivers/video/msm/mddi.c
index 178b0720bd7..4527cbf0a4e 100644
--- a/drivers/video/msm/mddi.c
+++ b/drivers/video/msm/mddi.c
@@ -715,7 +715,7 @@ static int __devinit mddi_probe(struct platform_device *pdev)
mddi->int_enable = 0;
mddi_writel(mddi->int_enable, INTEN);
- ret = request_irq(mddi->irq, mddi_isr, IRQF_DISABLED, "mddi",
+ ret = request_irq(mddi->irq, mddi_isr, 0, "mddi",
&mddi->client_data);
if (ret) {
printk(KERN_ERR "mddi: failed to request enable irq!\n");
diff --git a/drivers/video/msm/mdp.c b/drivers/video/msm/mdp.c
index 243d16f09b8..cb2ddf164c9 100644
--- a/drivers/video/msm/mdp.c
+++ b/drivers/video/msm/mdp.c
@@ -28,6 +28,7 @@
#include <mach/msm_iomap.h>
#include <mach/msm_fb.h>
#include <linux/platform_device.h>
+#include <linux/export.h>
#include "mdp_hw.h"
@@ -421,10 +422,11 @@ int mdp_probe(struct platform_device *pdev)
clk = clk_get(&pdev->dev, "mdp_clk");
if (IS_ERR(clk)) {
printk(KERN_INFO "mdp: failed to get mdp clk");
- return PTR_ERR(clk);
+ ret = PTR_ERR(clk);
+ goto error_get_clk;
}
- ret = request_irq(mdp->irq, mdp_isr, IRQF_DISABLED, "msm_mdp", mdp);
+ ret = request_irq(mdp->irq, mdp_isr, 0, "msm_mdp", mdp);
if (ret)
goto error_request_irq;
disable_irq(mdp->irq);
@@ -495,6 +497,7 @@ int mdp_probe(struct platform_device *pdev)
error_device_register:
free_irq(mdp->irq, mdp);
error_request_irq:
+error_get_clk:
iounmap(mdp->base);
error_get_irq:
error_ioremap:
diff --git a/drivers/video/mx3fb.c b/drivers/video/mx3fb.c
index 7e3a490e8d7..e3406ab3130 100644
--- a/drivers/video/mx3fb.c
+++ b/drivers/video/mx3fb.c
@@ -382,6 +382,9 @@ static void sdc_disable_channel(struct mx3fb_info *mx3_fbi)
uint32_t enabled;
unsigned long flags;
+ if (mx3_fbi->txd == NULL)
+ return;
+
spin_lock_irqsave(&mx3fb->lock, flags);
enabled = sdc_fb_uninit(mx3_fbi);
@@ -986,9 +989,19 @@ static void __blank(int blank, struct fb_info *fbi)
{
struct mx3fb_info *mx3_fbi = fbi->par;
struct mx3fb_data *mx3fb = mx3_fbi->mx3fb;
+ int was_blank = mx3_fbi->blank;
mx3_fbi->blank = blank;
+ /* Attention!
+ * Do not call sdc_disable_channel() for a channel that is disabled
+ * already! This will result in a kernel NULL pointer dereference
+ * (mx3_fbi->txd is NULL). Hide the fact, that all blank modes are
+ * handled equally by this driver.
+ */
+ if (blank > FB_BLANK_UNBLANK && was_blank > FB_BLANK_UNBLANK)
+ return;
+
switch (blank) {
case FB_BLANK_POWERDOWN:
case FB_BLANK_VSYNC_SUSPEND:
@@ -1062,15 +1075,15 @@ static int mx3fb_pan_display(struct fb_var_screeninfo *var,
y_bottom = var->yoffset;
if (!(var->vmode & FB_VMODE_YWRAP))
- y_bottom += var->yres;
+ y_bottom += fbi->var.yres;
if (y_bottom > fbi->var.yres_virtual)
return -EINVAL;
mutex_lock(&mx3_fbi->mutex);
- offset = (var->yoffset * var->xres_virtual + var->xoffset) *
- (var->bits_per_pixel / 8);
+ offset = var->yoffset * fbi->fix.line_length
+ + var->xoffset * (fbi->var.bits_per_pixel / 8);
base = fbi->fix.smem_start + offset;
dev_dbg(fbi->device, "Updating SDC BG buf %d address=0x%08lX\n",
diff --git a/drivers/video/mxsfb.c b/drivers/video/mxsfb.c
index 0b2f2dd4141..d837d63c456 100644
--- a/drivers/video/mxsfb.c
+++ b/drivers/video/mxsfb.c
@@ -39,6 +39,7 @@
* the required value in the imx_fb_videomode structure.
*/
+#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
diff --git a/drivers/video/neofb.c b/drivers/video/neofb.c
index 588527a254c..feea7b1dc38 100644
--- a/drivers/video/neofb.c
+++ b/drivers/video/neofb.c
@@ -1185,8 +1185,8 @@ static int neofb_pan_display(struct fb_var_screeninfo *var,
DBG("neofb_update_start");
- Base = (var->yoffset * var->xres_virtual + var->xoffset) >> 2;
- Base *= (var->bits_per_pixel + 7) / 8;
+ Base = (var->yoffset * info->var.xres_virtual + var->xoffset) >> 2;
+ Base *= (info->var.bits_per_pixel + 7) / 8;
neoUnlock();
diff --git a/drivers/video/nuc900fb.c b/drivers/video/nuc900fb.c
index 0fff59782e4..d1fbbd888cf 100644
--- a/drivers/video/nuc900fb.c
+++ b/drivers/video/nuc900fb.c
@@ -39,7 +39,6 @@
#include <mach/regs-clock.h>
#include <mach/regs-ldm.h>
#include <mach/fb.h>
-#include <mach/clkdev.h>
#include "nuc900fb.h"
@@ -588,7 +587,7 @@ static int __devinit nuc900fb_probe(struct platform_device *pdev)
fbinfo->flags = FBINFO_FLAG_DEFAULT;
fbinfo->pseudo_palette = &fbi->pseudo_pal;
- ret = request_irq(irq, nuc900fb_irqhandler, IRQF_DISABLED,
+ ret = request_irq(irq, nuc900fb_irqhandler, 0,
pdev->name, fbinfo);
if (ret) {
dev_err(&pdev->dev, "cannot register irq handler %d -err %d\n",
diff --git a/drivers/video/omap/Kconfig b/drivers/video/omap/Kconfig
index 196fa2e7f43..84ff23208c2 100644
--- a/drivers/video/omap/Kconfig
+++ b/drivers/video/omap/Kconfig
@@ -9,35 +9,6 @@ config FB_OMAP
help
Frame buffer driver for OMAP based boards.
-config FB_OMAP_LCD_VGA
- bool "Use LCD in VGA mode"
- depends on MACH_OMAP_3430SDP || MACH_OMAP_LDP
- help
- Set LCD resolution as VGA (640 X 480).
- Default resolution without this option is QVGA(320 X 240).
- Please take a look at drivers/video/omap/lcd_ldp.c file
- for lcd driver code.
-choice
- depends on FB_OMAP && MACH_OVERO
- prompt "Screen resolution"
- default FB_OMAP_079M3R
- help
- Selected desired screen resolution
-
-config FB_OMAP_031M3R
- boolean "640 x 480 @ 60 Hz Reduced blanking"
-
-config FB_OMAP_048M3R
- boolean "800 x 600 @ 60 Hz Reduced blanking"
-
-config FB_OMAP_079M3R
- boolean "1024 x 768 @ 60 Hz Reduced blanking"
-
-config FB_OMAP_092M9R
- boolean "1280 x 720 @ 60 Hz Reduced blanking"
-
-endchoice
-
config FB_OMAP_LCDC_EXTERNAL
bool "External LCD controller support"
depends on FB_OMAP
diff --git a/drivers/video/omap/Makefile b/drivers/video/omap/Makefile
index 25db55696e1..ef78550917f 100644
--- a/drivers/video/omap/Makefile
+++ b/drivers/video/omap/Makefile
@@ -17,7 +17,6 @@ objs-y$(CONFIG_FB_OMAP_LCDC_HWA742) += hwa742.o
objs-y$(CONFIG_FB_OMAP_LCDC_BLIZZARD) += blizzard.o
objs-y$(CONFIG_MACH_AMS_DELTA) += lcd_ams_delta.o
-objs-y$(CONFIG_MACH_OMAP_H4) += lcd_h4.o
objs-y$(CONFIG_MACH_OMAP_H3) += lcd_h3.o
objs-y$(CONFIG_MACH_OMAP_PALMTE) += lcd_palmte.o
objs-y$(CONFIG_MACH_OMAP_PALMTT) += lcd_palmtt.o
@@ -26,14 +25,7 @@ objs-$(CONFIG_ARCH_OMAP16XX)$(CONFIG_MACH_OMAP_INNOVATOR) += lcd_inn1610.o
objs-$(CONFIG_ARCH_OMAP15XX)$(CONFIG_MACH_OMAP_INNOVATOR) += lcd_inn1510.o
objs-y$(CONFIG_MACH_OMAP_OSK) += lcd_osk.o
-objs-y$(CONFIG_MACH_OMAP_APOLLON) += lcd_apollon.o
-objs-y$(CONFIG_MACH_OMAP_2430SDP) += lcd_2430sdp.o
-objs-y$(CONFIG_MACH_OMAP_3430SDP) += lcd_2430sdp.o
-objs-y$(CONFIG_MACH_OMAP_LDP) += lcd_ldp.o
-objs-y$(CONFIG_MACH_OMAP3EVM) += lcd_omap3evm.o
-objs-y$(CONFIG_MACH_OMAP3_BEAGLE) += lcd_omap3beagle.o
objs-y$(CONFIG_FB_OMAP_LCD_MIPID) += lcd_mipid.o
-objs-y$(CONFIG_MACH_OVERO) += lcd_overo.o
objs-y$(CONFIG_MACH_HERALD) += lcd_htcherald.o
omapfb-objs := $(objs-yy)
diff --git a/drivers/video/omap/lcd_2430sdp.c b/drivers/video/omap/lcd_2430sdp.c
deleted file mode 100644
index e3eccc9af78..00000000000
--- a/drivers/video/omap/lcd_2430sdp.c
+++ /dev/null
@@ -1,203 +0,0 @@
-/*
- * LCD panel support for the TI 2430SDP board
- *
- * Copyright (C) 2007 MontaVista
- * Author: Hunyue Yau <hyau@mvista.com>
- *
- * Derived from drivers/video/omap/lcd-apollon.c
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/delay.h>
-#include <linux/gpio.h>
-#include <linux/i2c/twl.h>
-
-#include <plat/mux.h>
-#include <asm/mach-types.h>
-
-#include "omapfb.h"
-
-#define SDP2430_LCD_PANEL_BACKLIGHT_GPIO 91
-#define SDP2430_LCD_PANEL_ENABLE_GPIO 154
-#define SDP3430_LCD_PANEL_BACKLIGHT_GPIO 24
-#define SDP3430_LCD_PANEL_ENABLE_GPIO 28
-
-static unsigned backlight_gpio;
-static unsigned enable_gpio;
-
-#define LCD_PIXCLOCK_MAX 5400 /* freq 5.4 MHz */
-#define PM_RECEIVER TWL4030_MODULE_PM_RECEIVER
-#define ENABLE_VAUX2_DEDICATED 0x09
-#define ENABLE_VAUX2_DEV_GRP 0x20
-#define ENABLE_VAUX3_DEDICATED 0x03
-#define ENABLE_VAUX3_DEV_GRP 0x20
-
-#define ENABLE_VPLL2_DEDICATED 0x05
-#define ENABLE_VPLL2_DEV_GRP 0xE0
-#define TWL4030_VPLL2_DEV_GRP 0x33
-#define TWL4030_VPLL2_DEDICATED 0x36
-
-#define t2_out(c, r, v) twl_i2c_write_u8(c, r, v)
-
-
-static int sdp2430_panel_init(struct lcd_panel *panel,
- struct omapfb_device *fbdev)
-{
- if (machine_is_omap_3430sdp()) {
- enable_gpio = SDP3430_LCD_PANEL_ENABLE_GPIO;
- backlight_gpio = SDP3430_LCD_PANEL_BACKLIGHT_GPIO;
- } else {
- enable_gpio = SDP2430_LCD_PANEL_ENABLE_GPIO;
- backlight_gpio = SDP2430_LCD_PANEL_BACKLIGHT_GPIO;
- }
-
- gpio_request(enable_gpio, "LCD enable"); /* LCD panel */
- gpio_request(backlight_gpio, "LCD bl"); /* LCD backlight */
- gpio_direction_output(enable_gpio, 0);
- gpio_direction_output(backlight_gpio, 0);
-
- return 0;
-}
-
-static void sdp2430_panel_cleanup(struct lcd_panel *panel)
-{
- gpio_free(backlight_gpio);
- gpio_free(enable_gpio);
-}
-
-static int sdp2430_panel_enable(struct lcd_panel *panel)
-{
- u8 ded_val, ded_reg;
- u8 grp_val, grp_reg;
-
- if (machine_is_omap_3430sdp()) {
- ded_reg = TWL4030_VAUX3_DEDICATED;
- ded_val = ENABLE_VAUX3_DEDICATED;
- grp_reg = TWL4030_VAUX3_DEV_GRP;
- grp_val = ENABLE_VAUX3_DEV_GRP;
-
- if (omap_rev() > OMAP3430_REV_ES1_0) {
- t2_out(PM_RECEIVER, ENABLE_VPLL2_DEDICATED,
- TWL4030_VPLL2_DEDICATED);
- t2_out(PM_RECEIVER, ENABLE_VPLL2_DEV_GRP,
- TWL4030_VPLL2_DEV_GRP);
- }
- } else {
- ded_reg = TWL4030_VAUX2_DEDICATED;
- ded_val = ENABLE_VAUX2_DEDICATED;
- grp_reg = TWL4030_VAUX2_DEV_GRP;
- grp_val = ENABLE_VAUX2_DEV_GRP;
- }
-
- gpio_set_value(enable_gpio, 1);
- gpio_set_value(backlight_gpio, 1);
-
- if (0 != t2_out(PM_RECEIVER, ded_val, ded_reg))
- return -EIO;
- if (0 != t2_out(PM_RECEIVER, grp_val, grp_reg))
- return -EIO;
-
- return 0;
-}
-
-static void sdp2430_panel_disable(struct lcd_panel *panel)
-{
- gpio_set_value(enable_gpio, 0);
- gpio_set_value(backlight_gpio, 0);
- if (omap_rev() > OMAP3430_REV_ES1_0) {
- t2_out(PM_RECEIVER, 0x0, TWL4030_VPLL2_DEDICATED);
- t2_out(PM_RECEIVER, 0x0, TWL4030_VPLL2_DEV_GRP);
- msleep(4);
- }
-}
-
-static unsigned long sdp2430_panel_get_caps(struct lcd_panel *panel)
-{
- return 0;
-}
-
-struct lcd_panel sdp2430_panel = {
- .name = "sdp2430",
- .config = OMAP_LCDC_PANEL_TFT | OMAP_LCDC_INV_VSYNC |
- OMAP_LCDC_INV_HSYNC,
-
- .bpp = 16,
- .data_lines = 16,
- .x_res = 240,
- .y_res = 320,
- .hsw = 3, /* hsync_len (4) - 1 */
- .hfp = 3, /* right_margin (4) - 1 */
- .hbp = 39, /* left_margin (40) - 1 */
- .vsw = 1, /* vsync_len (2) - 1 */
- .vfp = 2, /* lower_margin */
- .vbp = 7, /* upper_margin (8) - 1 */
-
- .pixel_clock = LCD_PIXCLOCK_MAX,
-
- .init = sdp2430_panel_init,
- .cleanup = sdp2430_panel_cleanup,
- .enable = sdp2430_panel_enable,
- .disable = sdp2430_panel_disable,
- .get_caps = sdp2430_panel_get_caps,
-};
-
-static int sdp2430_panel_probe(struct platform_device *pdev)
-{
- omapfb_register_panel(&sdp2430_panel);
- return 0;
-}
-
-static int sdp2430_panel_remove(struct platform_device *pdev)
-{
- return 0;
-}
-
-static int sdp2430_panel_suspend(struct platform_device *pdev,
- pm_message_t mesg)
-{
- return 0;
-}
-
-static int sdp2430_panel_resume(struct platform_device *pdev)
-{
- return 0;
-}
-
-struct platform_driver sdp2430_panel_driver = {
- .probe = sdp2430_panel_probe,
- .remove = sdp2430_panel_remove,
- .suspend = sdp2430_panel_suspend,
- .resume = sdp2430_panel_resume,
- .driver = {
- .name = "sdp2430_lcd",
- .owner = THIS_MODULE,
- },
-};
-
-static int __init sdp2430_panel_drv_init(void)
-{
- return platform_driver_register(&sdp2430_panel_driver);
-}
-
-static void __exit sdp2430_panel_drv_exit(void)
-{
- platform_driver_unregister(&sdp2430_panel_driver);
-}
-
-module_init(sdp2430_panel_drv_init);
-module_exit(sdp2430_panel_drv_exit);
diff --git a/drivers/video/omap/lcd_apollon.c b/drivers/video/omap/lcd_apollon.c
deleted file mode 100644
index 10459d8bd9a..00000000000
--- a/drivers/video/omap/lcd_apollon.c
+++ /dev/null
@@ -1,136 +0,0 @@
-/*
- * LCD panel support for the Samsung OMAP2 Apollon board
- *
- * Copyright (C) 2005,2006 Samsung Electronics
- * Author: Kyungmin Park <kyungmin.park@samsung.com>
- *
- * Derived from drivers/video/omap/lcd-h4.c
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-
-#include <linux/module.h>
-#include <linux/platform_device.h>
-
-#include <mach/gpio.h>
-
-#include "omapfb.h"
-
-/* #define USE_35INCH_LCD 1 */
-
-static int apollon_panel_init(struct lcd_panel *panel,
- struct omapfb_device *fbdev)
-{
- return 0;
-}
-
-static void apollon_panel_cleanup(struct lcd_panel *panel)
-{
-}
-
-static int apollon_panel_enable(struct lcd_panel *panel)
-{
- return 0;
-}
-
-static void apollon_panel_disable(struct lcd_panel *panel)
-{
-}
-
-static unsigned long apollon_panel_get_caps(struct lcd_panel *panel)
-{
- return 0;
-}
-
-struct lcd_panel apollon_panel = {
- .name = "apollon",
- .config = OMAP_LCDC_PANEL_TFT | OMAP_LCDC_INV_VSYNC |
- OMAP_LCDC_INV_HSYNC,
-
- .bpp = 16,
- .data_lines = 18,
-#ifdef USE_35INCH_LCD
- .x_res = 240,
- .y_res = 320,
- .hsw = 2,
- .hfp = 3,
- .hbp = 9,
- .vsw = 4,
- .vfp = 3,
- .vbp = 5,
-#else
- .x_res = 480,
- .y_res = 272,
- .hsw = 41,
- .hfp = 2,
- .hbp = 2,
- .vsw = 10,
- .vfp = 2,
- .vbp = 2,
-#endif
- .pixel_clock = 6250,
-
- .init = apollon_panel_init,
- .cleanup = apollon_panel_cleanup,
- .enable = apollon_panel_enable,
- .disable = apollon_panel_disable,
- .get_caps = apollon_panel_get_caps,
-};
-
-static int apollon_panel_probe(struct platform_device *pdev)
-{
- omapfb_register_panel(&apollon_panel);
- return 0;
-}
-
-static int apollon_panel_remove(struct platform_device *pdev)
-{
- return 0;
-}
-
-static int apollon_panel_suspend(struct platform_device *pdev,
- pm_message_t mesg)
-{
- return 0;
-}
-
-static int apollon_panel_resume(struct platform_device *pdev)
-{
- return 0;
-}
-
-struct platform_driver apollon_panel_driver = {
- .probe = apollon_panel_probe,
- .remove = apollon_panel_remove,
- .suspend = apollon_panel_suspend,
- .resume = apollon_panel_resume,
- .driver = {
- .name = "apollon_lcd",
- .owner = THIS_MODULE,
- },
-};
-
-static int __init apollon_panel_drv_init(void)
-{
- return platform_driver_register(&apollon_panel_driver);
-}
-
-static void __exit apollon_panel_drv_exit(void)
-{
- platform_driver_unregister(&apollon_panel_driver);
-}
-
-module_init(apollon_panel_drv_init);
-module_exit(apollon_panel_drv_exit);
diff --git a/drivers/video/omap/lcd_h3.c b/drivers/video/omap/lcd_h3.c
index 8df688748b5..622ad839fd9 100644
--- a/drivers/video/omap/lcd_h3.c
+++ b/drivers/video/omap/lcd_h3.c
@@ -23,7 +23,7 @@
#include <linux/platform_device.h>
#include <linux/i2c/tps65010.h>
-#include <mach/gpio.h>
+#include <asm/gpio.h>
#include "omapfb.h"
#define MODULE_NAME "omapfb-lcd_h3"
diff --git a/drivers/video/omap/lcd_h4.c b/drivers/video/omap/lcd_h4.c
deleted file mode 100644
index 03a06a98275..00000000000
--- a/drivers/video/omap/lcd_h4.c
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- * LCD panel support for the TI OMAP H4 board
- *
- * Copyright (C) 2004 Nokia Corporation
- * Author: Imre Deak <imre.deak@nokia.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-
-#include <linux/module.h>
-#include <linux/platform_device.h>
-
-#include "omapfb.h"
-
-static int h4_panel_init(struct lcd_panel *panel, struct omapfb_device *fbdev)
-{
- return 0;
-}
-
-static void h4_panel_cleanup(struct lcd_panel *panel)
-{
-}
-
-static int h4_panel_enable(struct lcd_panel *panel)
-{
- return 0;
-}
-
-static void h4_panel_disable(struct lcd_panel *panel)
-{
-}
-
-static unsigned long h4_panel_get_caps(struct lcd_panel *panel)
-{
- return 0;
-}
-
-static struct lcd_panel h4_panel = {
- .name = "h4",
- .config = OMAP_LCDC_PANEL_TFT,
-
- .bpp = 16,
- .data_lines = 16,
- .x_res = 240,
- .y_res = 320,
- .pixel_clock = 6250,
- .hsw = 15,
- .hfp = 15,
- .hbp = 60,
- .vsw = 1,
- .vfp = 1,
- .vbp = 1,
-
- .init = h4_panel_init,
- .cleanup = h4_panel_cleanup,
- .enable = h4_panel_enable,
- .disable = h4_panel_disable,
- .get_caps = h4_panel_get_caps,
-};
-
-static int h4_panel_probe(struct platform_device *pdev)
-{
- omapfb_register_panel(&h4_panel);
- return 0;
-}
-
-static int h4_panel_remove(struct platform_device *pdev)
-{
- return 0;
-}
-
-static int h4_panel_suspend(struct platform_device *pdev, pm_message_t mesg)
-{
- return 0;
-}
-
-static int h4_panel_resume(struct platform_device *pdev)
-{
- return 0;
-}
-
-static struct platform_driver h4_panel_driver = {
- .probe = h4_panel_probe,
- .remove = h4_panel_remove,
- .suspend = h4_panel_suspend,
- .resume = h4_panel_resume,
- .driver = {
- .name = "lcd_h4",
- .owner = THIS_MODULE,
- },
-};
-
-static int __init h4_panel_drv_init(void)
-{
- return platform_driver_register(&h4_panel_driver);
-}
-
-static void __exit h4_panel_drv_cleanup(void)
-{
- platform_driver_unregister(&h4_panel_driver);
-}
-
-module_init(h4_panel_drv_init);
-module_exit(h4_panel_drv_cleanup);
-
diff --git a/drivers/video/omap/lcd_inn1610.c b/drivers/video/omap/lcd_inn1610.c
index 9fff86f67bd..12cc52a70f9 100644
--- a/drivers/video/omap/lcd_inn1610.c
+++ b/drivers/video/omap/lcd_inn1610.c
@@ -22,7 +22,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <mach/gpio.h>
+#include <asm/gpio.h>
#include "omapfb.h"
#define MODULE_NAME "omapfb-lcd_h3"
diff --git a/drivers/video/omap/lcd_ldp.c b/drivers/video/omap/lcd_ldp.c
deleted file mode 100644
index 0f5952cae85..00000000000
--- a/drivers/video/omap/lcd_ldp.c
+++ /dev/null
@@ -1,201 +0,0 @@
-/*
- * LCD panel support for the TI LDP board
- *
- * Copyright (C) 2007 WindRiver
- * Author: Stanley Miao <stanley.miao@windriver.com>
- *
- * Derived from drivers/video/omap/lcd-2430sdp.c
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/delay.h>
-#include <linux/i2c/twl.h>
-
-#include <mach/gpio.h>
-#include <plat/mux.h>
-#include <asm/mach-types.h>
-
-#include "omapfb.h"
-
-#define LCD_PANEL_BACKLIGHT_GPIO (15 + OMAP_MAX_GPIO_LINES)
-#define LCD_PANEL_ENABLE_GPIO (7 + OMAP_MAX_GPIO_LINES)
-
-#define LCD_PANEL_RESET_GPIO 55
-#define LCD_PANEL_QVGA_GPIO 56
-
-#ifdef CONFIG_FB_OMAP_LCD_VGA
-#define LCD_XRES 480
-#define LCD_YRES 640
-#define LCD_PIXCLOCK_MAX 41700
-#else
-#define LCD_XRES 240
-#define LCD_YRES 320
-#define LCD_PIXCLOCK_MAX 185186
-#endif
-
-#define PM_RECEIVER TWL4030_MODULE_PM_RECEIVER
-#define ENABLE_VAUX2_DEDICATED 0x09
-#define ENABLE_VAUX2_DEV_GRP 0x20
-#define ENABLE_VAUX3_DEDICATED 0x03
-#define ENABLE_VAUX3_DEV_GRP 0x20
-
-#define ENABLE_VPLL2_DEDICATED 0x05
-#define ENABLE_VPLL2_DEV_GRP 0xE0
-#define TWL4030_VPLL2_DEV_GRP 0x33
-#define TWL4030_VPLL2_DEDICATED 0x36
-
-#define t2_out(c, r, v) twl_i2c_write_u8(c, r, v)
-
-
-static int ldp_panel_init(struct lcd_panel *panel,
- struct omapfb_device *fbdev)
-{
- gpio_request(LCD_PANEL_RESET_GPIO, "lcd reset");
- gpio_request(LCD_PANEL_QVGA_GPIO, "lcd qvga");
- gpio_request(LCD_PANEL_ENABLE_GPIO, "lcd panel");
- gpio_request(LCD_PANEL_BACKLIGHT_GPIO, "lcd backlight");
-
- gpio_direction_output(LCD_PANEL_QVGA_GPIO, 0);
- gpio_direction_output(LCD_PANEL_RESET_GPIO, 0);
- gpio_direction_output(LCD_PANEL_ENABLE_GPIO, 0);
- gpio_direction_output(LCD_PANEL_BACKLIGHT_GPIO, 0);
-
-#ifdef CONFIG_FB_OMAP_LCD_VGA
- gpio_set_value(LCD_PANEL_QVGA_GPIO, 0);
-#else
- gpio_set_value(LCD_PANEL_QVGA_GPIO, 1);
-#endif
- gpio_set_value(LCD_PANEL_RESET_GPIO, 1);
-
- return 0;
-}
-
-static void ldp_panel_cleanup(struct lcd_panel *panel)
-{
- gpio_free(LCD_PANEL_BACKLIGHT_GPIO);
- gpio_free(LCD_PANEL_ENABLE_GPIO);
- gpio_free(LCD_PANEL_QVGA_GPIO);
- gpio_free(LCD_PANEL_RESET_GPIO);
-}
-
-static int ldp_panel_enable(struct lcd_panel *panel)
-{
- if (0 != t2_out(PM_RECEIVER, ENABLE_VPLL2_DEDICATED,
- TWL4030_VPLL2_DEDICATED))
- return -EIO;
- if (0 != t2_out(PM_RECEIVER, ENABLE_VPLL2_DEV_GRP,
- TWL4030_VPLL2_DEV_GRP))
- return -EIO;
-
- gpio_direction_output(LCD_PANEL_ENABLE_GPIO, 1);
- gpio_direction_output(LCD_PANEL_BACKLIGHT_GPIO, 1);
-
- if (0 != t2_out(PM_RECEIVER, ENABLE_VAUX3_DEDICATED,
- TWL4030_VAUX3_DEDICATED))
- return -EIO;
- if (0 != t2_out(PM_RECEIVER, ENABLE_VAUX3_DEV_GRP,
- TWL4030_VAUX3_DEV_GRP))
- return -EIO;
-
- return 0;
-}
-
-static void ldp_panel_disable(struct lcd_panel *panel)
-{
- gpio_direction_output(LCD_PANEL_ENABLE_GPIO, 0);
- gpio_direction_output(LCD_PANEL_BACKLIGHT_GPIO, 0);
-
- t2_out(PM_RECEIVER, 0x0, TWL4030_VPLL2_DEDICATED);
- t2_out(PM_RECEIVER, 0x0, TWL4030_VPLL2_DEV_GRP);
- msleep(4);
-}
-
-static unsigned long ldp_panel_get_caps(struct lcd_panel *panel)
-{
- return 0;
-}
-
-struct lcd_panel ldp_panel = {
- .name = "ldp",
- .config = OMAP_LCDC_PANEL_TFT | OMAP_LCDC_INV_VSYNC |
- OMAP_LCDC_INV_HSYNC,
-
- .bpp = 16,
- .data_lines = 18,
- .x_res = LCD_XRES,
- .y_res = LCD_YRES,
- .hsw = 3, /* hsync_len (4) - 1 */
- .hfp = 3, /* right_margin (4) - 1 */
- .hbp = 39, /* left_margin (40) - 1 */
- .vsw = 1, /* vsync_len (2) - 1 */
- .vfp = 2, /* lower_margin */
- .vbp = 7, /* upper_margin (8) - 1 */
-
- .pixel_clock = LCD_PIXCLOCK_MAX,
-
- .init = ldp_panel_init,
- .cleanup = ldp_panel_cleanup,
- .enable = ldp_panel_enable,
- .disable = ldp_panel_disable,
- .get_caps = ldp_panel_get_caps,
-};
-
-static int ldp_panel_probe(struct platform_device *pdev)
-{
- omapfb_register_panel(&ldp_panel);
- return 0;
-}
-
-static int ldp_panel_remove(struct platform_device *pdev)
-{
- return 0;
-}
-
-static int ldp_panel_suspend(struct platform_device *pdev, pm_message_t mesg)
-{
- return 0;
-}
-
-static int ldp_panel_resume(struct platform_device *pdev)
-{
- return 0;
-}
-
-struct platform_driver ldp_panel_driver = {
- .probe = ldp_panel_probe,
- .remove = ldp_panel_remove,
- .suspend = ldp_panel_suspend,
- .resume = ldp_panel_resume,
- .driver = {
- .name = "ldp_lcd",
- .owner = THIS_MODULE,
- },
-};
-
-static int __init ldp_panel_drv_init(void)
-{
- return platform_driver_register(&ldp_panel_driver);
-}
-
-static void __exit ldp_panel_drv_exit(void)
-{
- platform_driver_unregister(&ldp_panel_driver);
-}
-
-module_init(ldp_panel_drv_init);
-module_exit(ldp_panel_drv_exit);
diff --git a/drivers/video/omap/lcd_mipid.c b/drivers/video/omap/lcd_mipid.c
index 90e3bdd1b7a..eb381db7fe5 100644
--- a/drivers/video/omap/lcd_mipid.c
+++ b/drivers/video/omap/lcd_mipid.c
@@ -23,6 +23,7 @@
#include <linux/slab.h>
#include <linux/workqueue.h>
#include <linux/spi/spi.h>
+#include <linux/module.h>
#include <plat/lcd_mipid.h>
diff --git a/drivers/video/omap/lcd_omap3beagle.c b/drivers/video/omap/lcd_omap3beagle.c
deleted file mode 100644
index d7c6c3e0afc..00000000000
--- a/drivers/video/omap/lcd_omap3beagle.c
+++ /dev/null
@@ -1,130 +0,0 @@
-/*
- * LCD panel support for the TI OMAP3 Beagle board
- *
- * Author: Koen Kooi <koen@openembedded.org>
- *
- * Derived from drivers/video/omap/lcd-omap3evm.c
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/gpio.h>
-#include <linux/i2c/twl.h>
-
-#include <asm/mach-types.h>
-
-#include "omapfb.h"
-
-#define LCD_PANEL_ENABLE_GPIO 170
-
-static int omap3beagle_panel_init(struct lcd_panel *panel,
- struct omapfb_device *fbdev)
-{
- gpio_request(LCD_PANEL_ENABLE_GPIO, "LCD enable");
- return 0;
-}
-
-static void omap3beagle_panel_cleanup(struct lcd_panel *panel)
-{
- gpio_free(LCD_PANEL_ENABLE_GPIO);
-}
-
-static int omap3beagle_panel_enable(struct lcd_panel *panel)
-{
- gpio_set_value(LCD_PANEL_ENABLE_GPIO, 1);
- return 0;
-}
-
-static void omap3beagle_panel_disable(struct lcd_panel *panel)
-{
- gpio_set_value(LCD_PANEL_ENABLE_GPIO, 0);
-}
-
-static unsigned long omap3beagle_panel_get_caps(struct lcd_panel *panel)
-{
- return 0;
-}
-
-struct lcd_panel omap3beagle_panel = {
- .name = "omap3beagle",
- .config = OMAP_LCDC_PANEL_TFT,
-
- .bpp = 16,
- .data_lines = 24,
- .x_res = 1024,
- .y_res = 768,
- .hsw = 3, /* hsync_len (4) - 1 */
- .hfp = 3, /* right_margin (4) - 1 */
- .hbp = 39, /* left_margin (40) - 1 */
- .vsw = 1, /* vsync_len (2) - 1 */
- .vfp = 2, /* lower_margin */
- .vbp = 7, /* upper_margin (8) - 1 */
-
- .pixel_clock = 64000,
-
- .init = omap3beagle_panel_init,
- .cleanup = omap3beagle_panel_cleanup,
- .enable = omap3beagle_panel_enable,
- .disable = omap3beagle_panel_disable,
- .get_caps = omap3beagle_panel_get_caps,
-};
-
-static int omap3beagle_panel_probe(struct platform_device *pdev)
-{
- omapfb_register_panel(&omap3beagle_panel);
- return 0;
-}
-
-static int omap3beagle_panel_remove(struct platform_device *pdev)
-{
- return 0;
-}
-
-static int omap3beagle_panel_suspend(struct platform_device *pdev,
- pm_message_t mesg)
-{
- return 0;
-}
-
-static int omap3beagle_panel_resume(struct platform_device *pdev)
-{
- return 0;
-}
-
-struct platform_driver omap3beagle_panel_driver = {
- .probe = omap3beagle_panel_probe,
- .remove = omap3beagle_panel_remove,
- .suspend = omap3beagle_panel_suspend,
- .resume = omap3beagle_panel_resume,
- .driver = {
- .name = "omap3beagle_lcd",
- .owner = THIS_MODULE,
- },
-};
-
-static int __init omap3beagle_panel_drv_init(void)
-{
- return platform_driver_register(&omap3beagle_panel_driver);
-}
-
-static void __exit omap3beagle_panel_drv_exit(void)
-{
- platform_driver_unregister(&omap3beagle_panel_driver);
-}
-
-module_init(omap3beagle_panel_drv_init);
-module_exit(omap3beagle_panel_drv_exit);
diff --git a/drivers/video/omap/lcd_omap3evm.c b/drivers/video/omap/lcd_omap3evm.c
deleted file mode 100644
index 06840da0b09..00000000000
--- a/drivers/video/omap/lcd_omap3evm.c
+++ /dev/null
@@ -1,193 +0,0 @@
-/*
- * LCD panel support for the TI OMAP3 EVM board
- *
- * Author: Steve Sakoman <steve@sakoman.com>
- *
- * Derived from drivers/video/omap/lcd-apollon.c
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/gpio.h>
-#include <linux/i2c/twl.h>
-
-#include <plat/mux.h>
-#include <asm/mach-types.h>
-
-#include "omapfb.h"
-
-#define LCD_PANEL_ENABLE_GPIO 153
-#define LCD_PANEL_LR 2
-#define LCD_PANEL_UD 3
-#define LCD_PANEL_INI 152
-#define LCD_PANEL_QVGA 154
-#define LCD_PANEL_RESB 155
-
-#define ENABLE_VDAC_DEDICATED 0x03
-#define ENABLE_VDAC_DEV_GRP 0x20
-#define ENABLE_VPLL2_DEDICATED 0x05
-#define ENABLE_VPLL2_DEV_GRP 0xE0
-
-#define TWL_LED_LEDEN 0x00
-#define TWL_PWMA_PWMAON 0x00
-#define TWL_PWMA_PWMAOFF 0x01
-
-static unsigned int bklight_level;
-
-static int omap3evm_panel_init(struct lcd_panel *panel,
- struct omapfb_device *fbdev)
-{
- gpio_request(LCD_PANEL_LR, "LCD lr");
- gpio_request(LCD_PANEL_UD, "LCD ud");
- gpio_request(LCD_PANEL_INI, "LCD ini");
- gpio_request(LCD_PANEL_RESB, "LCD resb");
- gpio_request(LCD_PANEL_QVGA, "LCD qvga");
-
- gpio_direction_output(LCD_PANEL_RESB, 1);
- gpio_direction_output(LCD_PANEL_INI, 1);
- gpio_direction_output(LCD_PANEL_QVGA, 0);
- gpio_direction_output(LCD_PANEL_LR, 1);
- gpio_direction_output(LCD_PANEL_UD, 1);
-
- twl_i2c_write_u8(TWL4030_MODULE_LED, 0x11, TWL_LED_LEDEN);
- twl_i2c_write_u8(TWL4030_MODULE_PWMA, 0x01, TWL_PWMA_PWMAON);
- twl_i2c_write_u8(TWL4030_MODULE_PWMA, 0x02, TWL_PWMA_PWMAOFF);
- bklight_level = 100;
-
- return 0;
-}
-
-static void omap3evm_panel_cleanup(struct lcd_panel *panel)
-{
- gpio_free(LCD_PANEL_QVGA);
- gpio_free(LCD_PANEL_RESB);
- gpio_free(LCD_PANEL_INI);
- gpio_free(LCD_PANEL_UD);
- gpio_free(LCD_PANEL_LR);
-}
-
-static int omap3evm_panel_enable(struct lcd_panel *panel)
-{
- gpio_set_value(LCD_PANEL_ENABLE_GPIO, 0);
- return 0;
-}
-
-static void omap3evm_panel_disable(struct lcd_panel *panel)
-{
- gpio_set_value(LCD_PANEL_ENABLE_GPIO, 1);
-}
-
-static unsigned long omap3evm_panel_get_caps(struct lcd_panel *panel)
-{
- return 0;
-}
-
-static int omap3evm_bklight_setlevel(struct lcd_panel *panel,
- unsigned int level)
-{
- u8 c;
- if ((level >= 0) && (level <= 100)) {
- c = (125 * (100 - level)) / 100 + 2;
- twl_i2c_write_u8(TWL4030_MODULE_PWMA, c, TWL_PWMA_PWMAOFF);
- bklight_level = level;
- }
- return 0;
-}
-
-static unsigned int omap3evm_bklight_getlevel(struct lcd_panel *panel)
-{
- return bklight_level;
-}
-
-static unsigned int omap3evm_bklight_getmaxlevel(struct lcd_panel *panel)
-{
- return 100;
-}
-
-struct lcd_panel omap3evm_panel = {
- .name = "omap3evm",
- .config = OMAP_LCDC_PANEL_TFT | OMAP_LCDC_INV_VSYNC |
- OMAP_LCDC_INV_HSYNC,
-
- .bpp = 16,
- .data_lines = 18,
- .x_res = 480,
- .y_res = 640,
- .hsw = 3, /* hsync_len (4) - 1 */
- .hfp = 3, /* right_margin (4) - 1 */
- .hbp = 39, /* left_margin (40) - 1 */
- .vsw = 1, /* vsync_len (2) - 1 */
- .vfp = 2, /* lower_margin */
- .vbp = 7, /* upper_margin (8) - 1 */
-
- .pixel_clock = 26000,
-
- .init = omap3evm_panel_init,
- .cleanup = omap3evm_panel_cleanup,
- .enable = omap3evm_panel_enable,
- .disable = omap3evm_panel_disable,
- .get_caps = omap3evm_panel_get_caps,
- .set_bklight_level = omap3evm_bklight_setlevel,
- .get_bklight_level = omap3evm_bklight_getlevel,
- .get_bklight_max = omap3evm_bklight_getmaxlevel,
-};
-
-static int omap3evm_panel_probe(struct platform_device *pdev)
-{
- omapfb_register_panel(&omap3evm_panel);
- return 0;
-}
-
-static int omap3evm_panel_remove(struct platform_device *pdev)
-{
- return 0;
-}
-
-static int omap3evm_panel_suspend(struct platform_device *pdev,
- pm_message_t mesg)
-{
- return 0;
-}
-
-static int omap3evm_panel_resume(struct platform_device *pdev)
-{
- return 0;
-}
-
-struct platform_driver omap3evm_panel_driver = {
- .probe = omap3evm_panel_probe,
- .remove = omap3evm_panel_remove,
- .suspend = omap3evm_panel_suspend,
- .resume = omap3evm_panel_resume,
- .driver = {
- .name = "omap3evm_lcd",
- .owner = THIS_MODULE,
- },
-};
-
-static int __init omap3evm_panel_drv_init(void)
-{
- return platform_driver_register(&omap3evm_panel_driver);
-}
-
-static void __exit omap3evm_panel_drv_exit(void)
-{
- platform_driver_unregister(&omap3evm_panel_driver);
-}
-
-module_init(omap3evm_panel_drv_init);
-module_exit(omap3evm_panel_drv_exit);
diff --git a/drivers/video/omap/lcd_osk.c b/drivers/video/omap/lcd_osk.c
index b87e8b83f29..6f8d13c4120 100644
--- a/drivers/video/omap/lcd_osk.c
+++ b/drivers/video/omap/lcd_osk.c
@@ -23,7 +23,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <mach/gpio.h>
+#include <asm/gpio.h>
#include <plat/mux.h>
#include "omapfb.h"
diff --git a/drivers/video/omap/lcd_overo.c b/drivers/video/omap/lcd_overo.c
deleted file mode 100644
index 564933ffac6..00000000000
--- a/drivers/video/omap/lcd_overo.c
+++ /dev/null
@@ -1,180 +0,0 @@
-/*
- * LCD panel support for the Gumstix Overo
- *
- * Author: Steve Sakoman <steve@sakoman.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- */
-
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/i2c/twl.h>
-
-#include <mach/gpio.h>
-#include <plat/mux.h>
-#include <asm/mach-types.h>
-
-#include "omapfb.h"
-
-#define LCD_ENABLE 144
-
-static int overo_panel_init(struct lcd_panel *panel,
- struct omapfb_device *fbdev)
-{
- if ((gpio_request(LCD_ENABLE, "LCD_ENABLE") == 0) &&
- (gpio_direction_output(LCD_ENABLE, 1) == 0))
- gpio_export(LCD_ENABLE, 0);
- else
- printk(KERN_ERR "could not obtain gpio for LCD_ENABLE\n");
-
- return 0;
-}
-
-static void overo_panel_cleanup(struct lcd_panel *panel)
-{
- gpio_free(LCD_ENABLE);
-}
-
-static int overo_panel_enable(struct lcd_panel *panel)
-{
- gpio_set_value(LCD_ENABLE, 1);
- return 0;
-}
-
-static void overo_panel_disable(struct lcd_panel *panel)
-{
- gpio_set_value(LCD_ENABLE, 0);
-}
-
-static unsigned long overo_panel_get_caps(struct lcd_panel *panel)
-{
- return 0;
-}
-
-struct lcd_panel overo_panel = {
- .name = "overo",
- .config = OMAP_LCDC_PANEL_TFT,
- .bpp = 16,
- .data_lines = 24,
-
-#if defined CONFIG_FB_OMAP_031M3R
-
- /* 640 x 480 @ 60 Hz Reduced blanking VESA CVT 0.31M3-R */
- .x_res = 640,
- .y_res = 480,
- .hfp = 48,
- .hsw = 32,
- .hbp = 80,
- .vfp = 3,
- .vsw = 4,
- .vbp = 7,
- .pixel_clock = 23500,
-
-#elif defined CONFIG_FB_OMAP_048M3R
-
- /* 800 x 600 @ 60 Hz Reduced blanking VESA CVT 0.48M3-R */
- .x_res = 800,
- .y_res = 600,
- .hfp = 48,
- .hsw = 32,
- .hbp = 80,
- .vfp = 3,
- .vsw = 4,
- .vbp = 11,
- .pixel_clock = 35500,
-
-#elif defined CONFIG_FB_OMAP_079M3R
-
- /* 1024 x 768 @ 60 Hz Reduced blanking VESA CVT 0.79M3-R */
- .x_res = 1024,
- .y_res = 768,
- .hfp = 48,
- .hsw = 32,
- .hbp = 80,
- .vfp = 3,
- .vsw = 4,
- .vbp = 15,
- .pixel_clock = 56000,
-
-#elif defined CONFIG_FB_OMAP_092M9R
-
- /* 1280 x 720 @ 60 Hz Reduced blanking VESA CVT 0.92M9-R */
- .x_res = 1280,
- .y_res = 720,
- .hfp = 48,
- .hsw = 32,
- .hbp = 80,
- .vfp = 3,
- .vsw = 5,
- .vbp = 13,
- .pixel_clock = 64000,
-
-#else
-
- /* use 640 x 480 if no config option */
- /* 640 x 480 @ 60 Hz Reduced blanking VESA CVT 0.31M3-R */
- .x_res = 640,
- .y_res = 480,
- .hfp = 48,
- .hsw = 32,
- .hbp = 80,
- .vfp = 3,
- .vsw = 4,
- .vbp = 7,
- .pixel_clock = 23500,
-
-#endif
-
- .init = overo_panel_init,
- .cleanup = overo_panel_cleanup,
- .enable = overo_panel_enable,
- .disable = overo_panel_disable,
- .get_caps = overo_panel_get_caps,
-};
-
-static int overo_panel_probe(struct platform_device *pdev)
-{
- omapfb_register_panel(&overo_panel);
- return 0;
-}
-
-static int overo_panel_remove(struct platform_device *pdev)
-{
- /* omapfb does not have unregister_panel */
- return 0;
-}
-
-static struct platform_driver overo_panel_driver = {
- .probe = overo_panel_probe,
- .remove = overo_panel_remove,
- .driver = {
- .name = "overo_lcd",
- .owner = THIS_MODULE,
- },
-};
-
-static int __init overo_panel_drv_init(void)
-{
- return platform_driver_register(&overo_panel_driver);
-}
-
-static void __exit overo_panel_drv_exit(void)
-{
- platform_driver_unregister(&overo_panel_driver);
-}
-
-module_init(overo_panel_drv_init);
-module_exit(overo_panel_drv_exit);
diff --git a/drivers/video/omap/lcd_palmtt.c b/drivers/video/omap/lcd_palmtt.c
index ff0e6d7ab3a..b51b332e5a2 100644
--- a/drivers/video/omap/lcd_palmtt.c
+++ b/drivers/video/omap/lcd_palmtt.c
@@ -29,7 +29,7 @@ GPIO13 - screen blanking
#include <linux/module.h>
#include <linux/io.h>
-#include <mach/gpio.h>
+#include <asm/gpio.h>
#include "omapfb.h"
static int palmtt_panel_init(struct lcd_panel *panel,
diff --git a/drivers/video/omap/omapfb_main.c b/drivers/video/omap/omapfb_main.c
index b3ddd743d8a..25d8e510319 100644
--- a/drivers/video/omap/omapfb_main.c
+++ b/drivers/video/omap/omapfb_main.c
@@ -28,6 +28,7 @@
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
+#include <linux/module.h>
#include <plat/dma.h>
diff --git a/drivers/video/omap2/displays/Kconfig b/drivers/video/omap2/displays/Kconfig
index 609a2807317..8d8e1fe1901 100644
--- a/drivers/video/omap2/displays/Kconfig
+++ b/drivers/video/omap2/displays/Kconfig
@@ -10,6 +10,13 @@ config PANEL_GENERIC_DPI
Supports LCD Panel used in TI SDP3430 and EVM boards,
OMAP3517 EVM boards and CM-T35.
+config PANEL_DVI
+ tristate "DVI output"
+ depends on OMAP2_DSS_DPI
+ help
+ Driver for external monitors, connected via DVI. The driver uses i2c
+ to read EDID information from the monitor.
+
config PANEL_LGPHILIPS_LB035Q02
tristate "LG.Philips LB035Q02 LCD Panel"
depends on OMAP2_DSS_DPI && SPI
@@ -19,20 +26,30 @@ config PANEL_LGPHILIPS_LB035Q02
config PANEL_SHARP_LS037V7DW01
tristate "Sharp LS037V7DW01 LCD Panel"
depends on OMAP2_DSS_DPI
- select BACKLIGHT_CLASS_DEVICE
+ depends on BACKLIGHT_CLASS_DEVICE
help
LCD Panel used in TI's SDP3430 and EVM boards
config PANEL_NEC_NL8048HL11_01B
tristate "NEC NL8048HL11-01B Panel"
depends on OMAP2_DSS_DPI
+ depends on SPI
+ depends on BACKLIGHT_CLASS_DEVICE
help
This NEC NL8048HL11-01B panel is TFT LCD
used in the Zoom2/3/3630 sdp boards.
+config PANEL_PICODLP
+ tristate "TI PICO DLP mini-projector"
+ depends on OMAP2_DSS && I2C
+ help
+ A mini-projector used in TI's SDP4430 and EVM boards
+ For more info please visit http://www.dlp.com/projector/
+
config PANEL_TAAL
tristate "Taal DSI Panel"
depends on OMAP2_DSS_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
help
Taal DSI command mode panel from TPO.
@@ -45,7 +62,14 @@ config PANEL_TPO_TD043MTEA1
config PANEL_ACX565AKM
tristate "ACX565AKM Panel"
depends on OMAP2_DSS_SDI && SPI
- select BACKLIGHT_CLASS_DEVICE
+ depends on BACKLIGHT_CLASS_DEVICE
help
This is the LCD panel used on Nokia N900
+
+config PANEL_N8X0
+ tristate "N8X0 Panel"
+ depends on OMAP2_DSS_RFBI && SPI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ This is the LCD panel used on Nokia N8x0
endmenu
diff --git a/drivers/video/omap2/displays/Makefile b/drivers/video/omap2/displays/Makefile
index 0f601ab3abf..fbfafc6eebb 100644
--- a/drivers/video/omap2/displays/Makefile
+++ b/drivers/video/omap2/displays/Makefile
@@ -1,8 +1,11 @@
obj-$(CONFIG_PANEL_GENERIC_DPI) += panel-generic-dpi.o
+obj-$(CONFIG_PANEL_DVI) += panel-dvi.o
obj-$(CONFIG_PANEL_LGPHILIPS_LB035Q02) += panel-lgphilips-lb035q02.o
obj-$(CONFIG_PANEL_SHARP_LS037V7DW01) += panel-sharp-ls037v7dw01.o
obj-$(CONFIG_PANEL_NEC_NL8048HL11_01B) += panel-nec-nl8048hl11-01b.o
obj-$(CONFIG_PANEL_TAAL) += panel-taal.o
+obj-$(CONFIG_PANEL_PICODLP) += panel-picodlp.o
obj-$(CONFIG_PANEL_TPO_TD043MTEA1) += panel-tpo-td043mtea1.o
obj-$(CONFIG_PANEL_ACX565AKM) += panel-acx565akm.o
+obj-$(CONFIG_PANEL_N8X0) += panel-n8x0.o
diff --git a/drivers/video/omap2/displays/panel-dvi.c b/drivers/video/omap2/displays/panel-dvi.c
new file mode 100644
index 00000000000..03eb14af33e
--- /dev/null
+++ b/drivers/video/omap2/displays/panel-dvi.c
@@ -0,0 +1,363 @@
+/*
+ * DVI output support
+ *
+ * Copyright (C) 2011 Texas Instruments Inc
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <video/omapdss.h>
+#include <linux/i2c.h>
+#include <drm/drm_edid.h>
+
+#include <video/omap-panel-dvi.h>
+
+static const struct omap_video_timings panel_dvi_default_timings = {
+ .x_res = 640,
+ .y_res = 480,
+
+ .pixel_clock = 23500,
+
+ .hfp = 48,
+ .hsw = 32,
+ .hbp = 80,
+
+ .vfp = 3,
+ .vsw = 4,
+ .vbp = 7,
+};
+
+struct panel_drv_data {
+ struct omap_dss_device *dssdev;
+
+ struct mutex lock;
+};
+
+static inline struct panel_dvi_platform_data
+*get_pdata(const struct omap_dss_device *dssdev)
+{
+ return dssdev->data;
+}
+
+static int panel_dvi_power_on(struct omap_dss_device *dssdev)
+{
+ struct panel_dvi_platform_data *pdata = get_pdata(dssdev);
+ int r;
+
+ if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
+ return 0;
+
+ r = omapdss_dpi_display_enable(dssdev);
+ if (r)
+ goto err0;
+
+ if (pdata->platform_enable) {
+ r = pdata->platform_enable(dssdev);
+ if (r)
+ goto err1;
+ }
+
+ return 0;
+err1:
+ omapdss_dpi_display_disable(dssdev);
+err0:
+ return r;
+}
+
+static void panel_dvi_power_off(struct omap_dss_device *dssdev)
+{
+ struct panel_dvi_platform_data *pdata = get_pdata(dssdev);
+
+ if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
+ return;
+
+ if (pdata->platform_disable)
+ pdata->platform_disable(dssdev);
+
+ omapdss_dpi_display_disable(dssdev);
+}
+
+static int panel_dvi_probe(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata;
+
+ ddata = kzalloc(sizeof(*ddata), GFP_KERNEL);
+ if (!ddata)
+ return -ENOMEM;
+
+ dssdev->panel.timings = panel_dvi_default_timings;
+ dssdev->panel.config = OMAP_DSS_LCD_TFT;
+
+ ddata->dssdev = dssdev;
+ mutex_init(&ddata->lock);
+
+ dev_set_drvdata(&dssdev->dev, ddata);
+
+ return 0;
+}
+
+static void __exit panel_dvi_remove(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = dev_get_drvdata(&dssdev->dev);
+
+ mutex_lock(&ddata->lock);
+
+ dev_set_drvdata(&dssdev->dev, NULL);
+
+ mutex_unlock(&ddata->lock);
+
+ kfree(ddata);
+}
+
+static int panel_dvi_enable(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = dev_get_drvdata(&dssdev->dev);
+ int r;
+
+ mutex_lock(&ddata->lock);
+
+ r = panel_dvi_power_on(dssdev);
+ if (r == 0)
+ dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+
+ mutex_unlock(&ddata->lock);
+
+ return r;
+}
+
+static void panel_dvi_disable(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = dev_get_drvdata(&dssdev->dev);
+
+ mutex_lock(&ddata->lock);
+
+ panel_dvi_power_off(dssdev);
+
+ dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
+
+ mutex_unlock(&ddata->lock);
+}
+
+static int panel_dvi_suspend(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = dev_get_drvdata(&dssdev->dev);
+
+ mutex_lock(&ddata->lock);
+
+ panel_dvi_power_off(dssdev);
+
+ dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
+
+ mutex_unlock(&ddata->lock);
+
+ return 0;
+}
+
+static int panel_dvi_resume(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = dev_get_drvdata(&dssdev->dev);
+ int r;
+
+ mutex_lock(&ddata->lock);
+
+ r = panel_dvi_power_on(dssdev);
+ if (r == 0)
+ dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+
+ mutex_unlock(&ddata->lock);
+
+ return r;
+}
+
+static void panel_dvi_set_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ struct panel_drv_data *ddata = dev_get_drvdata(&dssdev->dev);
+
+ mutex_lock(&ddata->lock);
+ dpi_set_timings(dssdev, timings);
+ mutex_unlock(&ddata->lock);
+}
+
+static void panel_dvi_get_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ struct panel_drv_data *ddata = dev_get_drvdata(&dssdev->dev);
+
+ mutex_lock(&ddata->lock);
+ *timings = dssdev->panel.timings;
+ mutex_unlock(&ddata->lock);
+}
+
+static int panel_dvi_check_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ struct panel_drv_data *ddata = dev_get_drvdata(&dssdev->dev);
+ int r;
+
+ mutex_lock(&ddata->lock);
+ r = dpi_check_timings(dssdev, timings);
+ mutex_unlock(&ddata->lock);
+
+ return r;
+}
+
+
+static int panel_dvi_ddc_read(struct i2c_adapter *adapter,
+ unsigned char *buf, u16 count, u8 offset)
+{
+ int r, retries;
+
+ for (retries = 3; retries > 0; retries--) {
+ struct i2c_msg msgs[] = {
+ {
+ .addr = DDC_ADDR,
+ .flags = 0,
+ .len = 1,
+ .buf = &offset,
+ }, {
+ .addr = DDC_ADDR,
+ .flags = I2C_M_RD,
+ .len = count,
+ .buf = buf,
+ }
+ };
+
+ r = i2c_transfer(adapter, msgs, 2);
+ if (r == 2)
+ return 0;
+
+ if (r != -EAGAIN)
+ break;
+ }
+
+ return r < 0 ? r : -EIO;
+}
+
+static int panel_dvi_read_edid(struct omap_dss_device *dssdev,
+ u8 *edid, int len)
+{
+ struct panel_drv_data *ddata = dev_get_drvdata(&dssdev->dev);
+ struct panel_dvi_platform_data *pdata = get_pdata(dssdev);
+ struct i2c_adapter *adapter;
+ int r, l, bytes_read;
+
+ mutex_lock(&ddata->lock);
+
+ if (pdata->i2c_bus_num == 0) {
+ r = -ENODEV;
+ goto err;
+ }
+
+ adapter = i2c_get_adapter(pdata->i2c_bus_num);
+ if (!adapter) {
+ dev_err(&dssdev->dev, "Failed to get I2C adapter, bus %d\n",
+ pdata->i2c_bus_num);
+ r = -EINVAL;
+ goto err;
+ }
+
+ l = min(EDID_LENGTH, len);
+ r = panel_dvi_ddc_read(adapter, edid, l, 0);
+ if (r)
+ goto err;
+
+ bytes_read = l;
+
+ /* if there are extensions, read second block */
+ if (len > EDID_LENGTH && edid[0x7e] > 0) {
+ l = min(EDID_LENGTH, len - EDID_LENGTH);
+
+ r = panel_dvi_ddc_read(adapter, edid + EDID_LENGTH,
+ l, EDID_LENGTH);
+ if (r)
+ goto err;
+
+ bytes_read += l;
+ }
+
+ mutex_unlock(&ddata->lock);
+
+ return bytes_read;
+
+err:
+ mutex_unlock(&ddata->lock);
+ return r;
+}
+
+static bool panel_dvi_detect(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = dev_get_drvdata(&dssdev->dev);
+ struct panel_dvi_platform_data *pdata = get_pdata(dssdev);
+ struct i2c_adapter *adapter;
+ unsigned char out;
+ int r;
+
+ mutex_lock(&ddata->lock);
+
+ if (pdata->i2c_bus_num == 0)
+ goto out;
+
+ adapter = i2c_get_adapter(pdata->i2c_bus_num);
+ if (!adapter)
+ goto out;
+
+ r = panel_dvi_ddc_read(adapter, &out, 1, 0);
+
+ mutex_unlock(&ddata->lock);
+
+ return r == 0;
+
+out:
+ mutex_unlock(&ddata->lock);
+ return true;
+}
+
+static struct omap_dss_driver panel_dvi_driver = {
+ .probe = panel_dvi_probe,
+ .remove = __exit_p(panel_dvi_remove),
+
+ .enable = panel_dvi_enable,
+ .disable = panel_dvi_disable,
+ .suspend = panel_dvi_suspend,
+ .resume = panel_dvi_resume,
+
+ .set_timings = panel_dvi_set_timings,
+ .get_timings = panel_dvi_get_timings,
+ .check_timings = panel_dvi_check_timings,
+
+ .read_edid = panel_dvi_read_edid,
+ .detect = panel_dvi_detect,
+
+ .driver = {
+ .name = "dvi",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init panel_dvi_init(void)
+{
+ return omap_dss_register_driver(&panel_dvi_driver);
+}
+
+static void __exit panel_dvi_exit(void)
+{
+ omap_dss_unregister_driver(&panel_dvi_driver);
+}
+
+module_init(panel_dvi_init);
+module_exit(panel_dvi_exit);
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/omap2/displays/panel-generic-dpi.c b/drivers/video/omap2/displays/panel-generic-dpi.c
index 9c90f75653f..519c47d2057 100644
--- a/drivers/video/omap2/displays/panel-generic-dpi.c
+++ b/drivers/video/omap2/displays/panel-generic-dpi.c
@@ -58,30 +58,6 @@ struct panel_config {
/* Panel configurations */
static struct panel_config generic_dpi_panels[] = {
- /* Generic Panel */
- {
- {
- .x_res = 640,
- .y_res = 480,
-
- .pixel_clock = 23500,
-
- .hfp = 48,
- .hsw = 32,
- .hbp = 80,
-
- .vfp = 3,
- .vsw = 4,
- .vbp = 7,
- },
- .acbi = 0x0,
- .acb = 0x0,
- .config = OMAP_DSS_LCD_TFT,
- .power_on_delay = 0,
- .power_off_delay = 0,
- .name = "generic",
- },
-
/* Sharp LQ043T1DG01 */
{
{
@@ -232,6 +208,95 @@ static struct panel_config generic_dpi_panels[] = {
.power_off_delay = 0,
.name = "powertip_ph480272t",
},
+
+ /* Innolux AT070TN83 */
+ {
+ {
+ .x_res = 800,
+ .y_res = 480,
+
+ .pixel_clock = 40000,
+
+ .hsw = 48,
+ .hfp = 1,
+ .hbp = 1,
+
+ .vsw = 3,
+ .vfp = 12,
+ .vbp = 25,
+ },
+ .acbi = 0x0,
+ .acb = 0x28,
+ .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS |
+ OMAP_DSS_LCD_IHS,
+ .power_on_delay = 0,
+ .power_off_delay = 0,
+ .name = "innolux_at070tn83",
+ },
+
+ /* NEC NL2432DR22-11B */
+ {
+ {
+ .x_res = 240,
+ .y_res = 320,
+
+ .pixel_clock = 5400,
+
+ .hsw = 3,
+ .hfp = 3,
+ .hbp = 39,
+
+ .vsw = 1,
+ .vfp = 2,
+ .vbp = 7,
+ },
+ .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS |
+ OMAP_DSS_LCD_IHS,
+ .name = "nec_nl2432dr22-11b",
+ },
+
+ /* Unknown panel used in OMAP H4 */
+ {
+ {
+ .x_res = 240,
+ .y_res = 320,
+
+ .pixel_clock = 6250,
+
+ .hsw = 15,
+ .hfp = 15,
+ .hbp = 60,
+
+ .vsw = 1,
+ .vfp = 1,
+ .vbp = 1,
+ },
+ .config = OMAP_DSS_LCD_TFT,
+
+ .name = "h4",
+ },
+
+ /* Unknown panel used in Samsung OMAP2 Apollon */
+ {
+ {
+ .x_res = 480,
+ .y_res = 272,
+
+ .pixel_clock = 6250,
+
+ .hsw = 41,
+ .hfp = 2,
+ .hbp = 2,
+
+ .vsw = 10,
+ .vfp = 2,
+ .vbp = 2,
+ },
+ .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS |
+ OMAP_DSS_LCD_IHS,
+
+ .name = "apollon",
+ },
};
struct panel_drv_data {
diff --git a/drivers/video/omap2/displays/panel-n8x0.c b/drivers/video/omap2/displays/panel-n8x0.c
new file mode 100644
index 00000000000..150e8bae35a
--- /dev/null
+++ b/drivers/video/omap2/displays/panel-n8x0.c
@@ -0,0 +1,747 @@
+/* #define DEBUG */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+#include <linux/spi/spi.h>
+#include <linux/backlight.h>
+#include <linux/fb.h>
+
+#include <video/omapdss.h>
+#include <video/omap-panel-n8x0.h>
+
+#define BLIZZARD_REV_CODE 0x00
+#define BLIZZARD_CONFIG 0x02
+#define BLIZZARD_PLL_DIV 0x04
+#define BLIZZARD_PLL_LOCK_RANGE 0x06
+#define BLIZZARD_PLL_CLOCK_SYNTH_0 0x08
+#define BLIZZARD_PLL_CLOCK_SYNTH_1 0x0a
+#define BLIZZARD_PLL_MODE 0x0c
+#define BLIZZARD_CLK_SRC 0x0e
+#define BLIZZARD_MEM_BANK0_ACTIVATE 0x10
+#define BLIZZARD_MEM_BANK0_STATUS 0x14
+#define BLIZZARD_PANEL_CONFIGURATION 0x28
+#define BLIZZARD_HDISP 0x2a
+#define BLIZZARD_HNDP 0x2c
+#define BLIZZARD_VDISP0 0x2e
+#define BLIZZARD_VDISP1 0x30
+#define BLIZZARD_VNDP 0x32
+#define BLIZZARD_HSW 0x34
+#define BLIZZARD_VSW 0x38
+#define BLIZZARD_DISPLAY_MODE 0x68
+#define BLIZZARD_INPUT_WIN_X_START_0 0x6c
+#define BLIZZARD_DATA_SOURCE_SELECT 0x8e
+#define BLIZZARD_DISP_MEM_DATA_PORT 0x90
+#define BLIZZARD_DISP_MEM_READ_ADDR0 0x92
+#define BLIZZARD_POWER_SAVE 0xE6
+#define BLIZZARD_NDISP_CTRL_STATUS 0xE8
+
+/* Data source select */
+/* For S1D13745 */
+#define BLIZZARD_SRC_WRITE_LCD_BACKGROUND 0x00
+#define BLIZZARD_SRC_WRITE_LCD_DESTRUCTIVE 0x01
+#define BLIZZARD_SRC_WRITE_OVERLAY_ENABLE 0x04
+#define BLIZZARD_SRC_DISABLE_OVERLAY 0x05
+/* For S1D13744 */
+#define BLIZZARD_SRC_WRITE_LCD 0x00
+#define BLIZZARD_SRC_BLT_LCD 0x06
+
+#define BLIZZARD_COLOR_RGB565 0x01
+#define BLIZZARD_COLOR_YUV420 0x09
+
+#define BLIZZARD_VERSION_S1D13745 0x01 /* Hailstorm */
+#define BLIZZARD_VERSION_S1D13744 0x02 /* Blizzard */
+
+#define MIPID_CMD_READ_DISP_ID 0x04
+#define MIPID_CMD_READ_RED 0x06
+#define MIPID_CMD_READ_GREEN 0x07
+#define MIPID_CMD_READ_BLUE 0x08
+#define MIPID_CMD_READ_DISP_STATUS 0x09
+#define MIPID_CMD_RDDSDR 0x0F
+#define MIPID_CMD_SLEEP_IN 0x10
+#define MIPID_CMD_SLEEP_OUT 0x11
+#define MIPID_CMD_DISP_OFF 0x28
+#define MIPID_CMD_DISP_ON 0x29
+
+static struct panel_drv_data {
+ struct mutex lock;
+
+ struct omap_dss_device *dssdev;
+ struct spi_device *spidev;
+ struct backlight_device *bldev;
+
+ int blizzard_ver;
+} s_drv_data;
+
+
+static inline
+struct panel_n8x0_data *get_board_data(const struct omap_dss_device *dssdev)
+{
+ return dssdev->data;
+}
+
+static inline
+struct panel_drv_data *get_drv_data(const struct omap_dss_device *dssdev)
+{
+ return &s_drv_data;
+}
+
+
+static inline void blizzard_cmd(u8 cmd)
+{
+ omap_rfbi_write_command(&cmd, 1);
+}
+
+static inline void blizzard_write(u8 cmd, const u8 *buf, int len)
+{
+ omap_rfbi_write_command(&cmd, 1);
+ omap_rfbi_write_data(buf, len);
+}
+
+static inline void blizzard_read(u8 cmd, u8 *buf, int len)
+{
+ omap_rfbi_write_command(&cmd, 1);
+ omap_rfbi_read_data(buf, len);
+}
+
+static u8 blizzard_read_reg(u8 cmd)
+{
+ u8 data;
+ blizzard_read(cmd, &data, 1);
+ return data;
+}
+
+static void blizzard_ctrl_setup_update(struct omap_dss_device *dssdev,
+ int x, int y, int w, int h)
+{
+ struct panel_drv_data *ddata = get_drv_data(dssdev);
+ u8 tmp[18];
+ int x_end, y_end;
+
+ x_end = x + w - 1;
+ y_end = y + h - 1;
+
+ tmp[0] = x;
+ tmp[1] = x >> 8;
+ tmp[2] = y;
+ tmp[3] = y >> 8;
+ tmp[4] = x_end;
+ tmp[5] = x_end >> 8;
+ tmp[6] = y_end;
+ tmp[7] = y_end >> 8;
+
+ /* scaling? */
+ tmp[8] = x;
+ tmp[9] = x >> 8;
+ tmp[10] = y;
+ tmp[11] = y >> 8;
+ tmp[12] = x_end;
+ tmp[13] = x_end >> 8;
+ tmp[14] = y_end;
+ tmp[15] = y_end >> 8;
+
+ tmp[16] = BLIZZARD_COLOR_RGB565;
+
+ if (ddata->blizzard_ver == BLIZZARD_VERSION_S1D13745)
+ tmp[17] = BLIZZARD_SRC_WRITE_LCD_BACKGROUND;
+ else
+ tmp[17] = ddata->blizzard_ver == BLIZZARD_VERSION_S1D13744 ?
+ BLIZZARD_SRC_WRITE_LCD :
+ BLIZZARD_SRC_WRITE_LCD_DESTRUCTIVE;
+
+ omap_rfbi_configure(dssdev, 16, 8);
+
+ blizzard_write(BLIZZARD_INPUT_WIN_X_START_0, tmp, 18);
+
+ omap_rfbi_configure(dssdev, 16, 16);
+}
+
+static void mipid_transfer(struct spi_device *spi, int cmd, const u8 *wbuf,
+ int wlen, u8 *rbuf, int rlen)
+{
+ struct spi_message m;
+ struct spi_transfer *x, xfer[4];
+ u16 w;
+ int r;
+
+ spi_message_init(&m);
+
+ memset(xfer, 0, sizeof(xfer));
+ x = &xfer[0];
+
+ cmd &= 0xff;
+ x->tx_buf = &cmd;
+ x->bits_per_word = 9;
+ x->len = 2;
+ spi_message_add_tail(x, &m);
+
+ if (wlen) {
+ x++;
+ x->tx_buf = wbuf;
+ x->len = wlen;
+ x->bits_per_word = 9;
+ spi_message_add_tail(x, &m);
+ }
+
+ if (rlen) {
+ x++;
+ x->rx_buf = &w;
+ x->len = 1;
+ spi_message_add_tail(x, &m);
+
+ if (rlen > 1) {
+ /* Arrange for the extra clock before the first
+ * data bit.
+ */
+ x->bits_per_word = 9;
+ x->len = 2;
+
+ x++;
+ x->rx_buf = &rbuf[1];
+ x->len = rlen - 1;
+ spi_message_add_tail(x, &m);
+ }
+ }
+
+ r = spi_sync(spi, &m);
+ if (r < 0)
+ dev_dbg(&spi->dev, "spi_sync %d\n", r);
+
+ if (rlen)
+ rbuf[0] = w & 0xff;
+}
+
+static inline void mipid_cmd(struct spi_device *spi, int cmd)
+{
+ mipid_transfer(spi, cmd, NULL, 0, NULL, 0);
+}
+
+static inline void mipid_write(struct spi_device *spi,
+ int reg, const u8 *buf, int len)
+{
+ mipid_transfer(spi, reg, buf, len, NULL, 0);
+}
+
+static inline void mipid_read(struct spi_device *spi,
+ int reg, u8 *buf, int len)
+{
+ mipid_transfer(spi, reg, NULL, 0, buf, len);
+}
+
+static void set_data_lines(struct spi_device *spi, int data_lines)
+{
+ u16 par;
+
+ switch (data_lines) {
+ case 16:
+ par = 0x150;
+ break;
+ case 18:
+ par = 0x160;
+ break;
+ case 24:
+ par = 0x170;
+ break;
+ }
+
+ mipid_write(spi, 0x3a, (u8 *)&par, 2);
+}
+
+static void send_init_string(struct spi_device *spi)
+{
+ u16 initpar[] = { 0x0102, 0x0100, 0x0100 };
+ mipid_write(spi, 0xc2, (u8 *)initpar, sizeof(initpar));
+}
+
+static void send_display_on(struct spi_device *spi)
+{
+ mipid_cmd(spi, MIPID_CMD_DISP_ON);
+}
+
+static void send_display_off(struct spi_device *spi)
+{
+ mipid_cmd(spi, MIPID_CMD_DISP_OFF);
+}
+
+static void send_sleep_out(struct spi_device *spi)
+{
+ mipid_cmd(spi, MIPID_CMD_SLEEP_OUT);
+ msleep(120);
+}
+
+static void send_sleep_in(struct spi_device *spi)
+{
+ mipid_cmd(spi, MIPID_CMD_SLEEP_IN);
+ msleep(50);
+}
+
+static int n8x0_panel_power_on(struct omap_dss_device *dssdev)
+{
+ int r;
+ struct panel_n8x0_data *bdata = get_board_data(dssdev);
+ struct panel_drv_data *ddata = get_drv_data(dssdev);
+ struct spi_device *spi = ddata->spidev;
+ u8 rev, conf;
+ u8 display_id[3];
+ const char *panel_name;
+
+ if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
+ return 0;
+
+ gpio_direction_output(bdata->ctrl_pwrdown, 1);
+
+ if (bdata->platform_enable) {
+ r = bdata->platform_enable(dssdev);
+ if (r)
+ goto err_plat_en;
+ }
+
+ r = omapdss_rfbi_display_enable(dssdev);
+ if (r)
+ goto err_rfbi_en;
+
+ rev = blizzard_read_reg(BLIZZARD_REV_CODE);
+ conf = blizzard_read_reg(BLIZZARD_CONFIG);
+
+ switch (rev & 0xfc) {
+ case 0x9c:
+ ddata->blizzard_ver = BLIZZARD_VERSION_S1D13744;
+ dev_info(&dssdev->dev, "s1d13744 LCD controller rev %d "
+ "initialized (CNF pins %x)\n", rev & 0x03, conf & 0x07);
+ break;
+ case 0xa4:
+ ddata->blizzard_ver = BLIZZARD_VERSION_S1D13745;
+ dev_info(&dssdev->dev, "s1d13745 LCD controller rev %d "
+ "initialized (CNF pins %x)\n", rev & 0x03, conf & 0x07);
+ break;
+ default:
+ dev_err(&dssdev->dev, "invalid s1d1374x revision %02x\n", rev);
+ r = -ENODEV;
+ goto err_inv_chip;
+ }
+
+ /* panel */
+
+ gpio_direction_output(bdata->panel_reset, 1);
+
+ mipid_read(spi, MIPID_CMD_READ_DISP_ID, display_id, 3);
+ dev_dbg(&spi->dev, "MIPI display ID: %02x%02x%02x\n",
+ display_id[0], display_id[1], display_id[2]);
+
+ switch (display_id[0]) {
+ case 0x45:
+ panel_name = "lph8923";
+ break;
+ case 0x83:
+ panel_name = "ls041y3";
+ break;
+ default:
+ dev_err(&dssdev->dev, "invalid display ID 0x%x\n",
+ display_id[0]);
+ r = -ENODEV;
+ goto err_inv_panel;
+ }
+
+ dev_info(&dssdev->dev, "%s rev %02x LCD detected\n",
+ panel_name, display_id[1]);
+
+ send_sleep_out(spi);
+ send_init_string(spi);
+ set_data_lines(spi, 24);
+ send_display_on(spi);
+
+ return 0;
+
+err_inv_panel:
+ /*
+ * HACK: we should turn off the panel here, but there is some problem
+ * with the initialization sequence, and we fail to init the panel if we
+ * have turned it off
+ */
+ /* gpio_direction_output(bdata->panel_reset, 0); */
+err_inv_chip:
+ omapdss_rfbi_display_disable(dssdev);
+err_rfbi_en:
+ if (bdata->platform_disable)
+ bdata->platform_disable(dssdev);
+err_plat_en:
+ gpio_direction_output(bdata->ctrl_pwrdown, 0);
+ return r;
+}
+
+static void n8x0_panel_power_off(struct omap_dss_device *dssdev)
+{
+ struct panel_n8x0_data *bdata = get_board_data(dssdev);
+ struct panel_drv_data *ddata = get_drv_data(dssdev);
+ struct spi_device *spi = ddata->spidev;
+
+ if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
+ return;
+
+ send_display_off(spi);
+ send_sleep_in(spi);
+
+ if (bdata->platform_disable)
+ bdata->platform_disable(dssdev);
+
+ /*
+ * HACK: we should turn off the panel here, but there is some problem
+ * with the initialization sequence, and we fail to init the panel if we
+ * have turned it off
+ */
+ /* gpio_direction_output(bdata->panel_reset, 0); */
+ gpio_direction_output(bdata->ctrl_pwrdown, 0);
+ omapdss_rfbi_display_disable(dssdev);
+}
+
+static const struct rfbi_timings n8x0_panel_timings = {
+ .cs_on_time = 0,
+
+ .we_on_time = 9000,
+ .we_off_time = 18000,
+ .we_cycle_time = 36000,
+
+ .re_on_time = 9000,
+ .re_off_time = 27000,
+ .re_cycle_time = 36000,
+
+ .access_time = 27000,
+ .cs_off_time = 36000,
+
+ .cs_pulse_width = 0,
+};
+
+static int n8x0_bl_update_status(struct backlight_device *dev)
+{
+ struct omap_dss_device *dssdev = dev_get_drvdata(&dev->dev);
+ struct panel_n8x0_data *bdata = get_board_data(dssdev);
+ struct panel_drv_data *ddata = get_drv_data(dssdev);
+ int r;
+ int level;
+
+ mutex_lock(&ddata->lock);
+
+ if (dev->props.fb_blank == FB_BLANK_UNBLANK &&
+ dev->props.power == FB_BLANK_UNBLANK)
+ level = dev->props.brightness;
+ else
+ level = 0;
+
+ dev_dbg(&dssdev->dev, "update brightness to %d\n", level);
+
+ if (!bdata->set_backlight)
+ r = -EINVAL;
+ else
+ r = bdata->set_backlight(dssdev, level);
+
+ mutex_unlock(&ddata->lock);
+
+ return r;
+}
+
+static int n8x0_bl_get_intensity(struct backlight_device *dev)
+{
+ if (dev->props.fb_blank == FB_BLANK_UNBLANK &&
+ dev->props.power == FB_BLANK_UNBLANK)
+ return dev->props.brightness;
+
+ return 0;
+}
+
+static const struct backlight_ops n8x0_bl_ops = {
+ .get_brightness = n8x0_bl_get_intensity,
+ .update_status = n8x0_bl_update_status,
+};
+
+static int n8x0_panel_probe(struct omap_dss_device *dssdev)
+{
+ struct panel_n8x0_data *bdata = get_board_data(dssdev);
+ struct panel_drv_data *ddata;
+ struct backlight_device *bldev;
+ struct backlight_properties props;
+ int r;
+
+ dev_dbg(&dssdev->dev, "probe\n");
+
+ if (!bdata)
+ return -EINVAL;
+
+ s_drv_data.dssdev = dssdev;
+
+ ddata = &s_drv_data;
+
+ mutex_init(&ddata->lock);
+
+ dssdev->panel.config = OMAP_DSS_LCD_TFT;
+ dssdev->panel.timings.x_res = 800;
+ dssdev->panel.timings.y_res = 480;
+ dssdev->ctrl.pixel_size = 16;
+ dssdev->ctrl.rfbi_timings = n8x0_panel_timings;
+
+ memset(&props, 0, sizeof(props));
+ props.max_brightness = 127;
+ props.type = BACKLIGHT_PLATFORM;
+ bldev = backlight_device_register(dev_name(&dssdev->dev), &dssdev->dev,
+ dssdev, &n8x0_bl_ops, &props);
+ if (IS_ERR(bldev)) {
+ r = PTR_ERR(bldev);
+ dev_err(&dssdev->dev, "register backlight failed\n");
+ return r;
+ }
+
+ ddata->bldev = bldev;
+
+ bldev->props.fb_blank = FB_BLANK_UNBLANK;
+ bldev->props.power = FB_BLANK_UNBLANK;
+ bldev->props.brightness = 127;
+
+ n8x0_bl_update_status(bldev);
+
+ return 0;
+}
+
+static void n8x0_panel_remove(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = get_drv_data(dssdev);
+ struct backlight_device *bldev;
+
+ dev_dbg(&dssdev->dev, "remove\n");
+
+ bldev = ddata->bldev;
+ bldev->props.power = FB_BLANK_POWERDOWN;
+ n8x0_bl_update_status(bldev);
+ backlight_device_unregister(bldev);
+
+ dev_set_drvdata(&dssdev->dev, NULL);
+}
+
+static int n8x0_panel_enable(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = get_drv_data(dssdev);
+ int r;
+
+ dev_dbg(&dssdev->dev, "enable\n");
+
+ mutex_lock(&ddata->lock);
+
+ rfbi_bus_lock();
+
+ r = n8x0_panel_power_on(dssdev);
+
+ rfbi_bus_unlock();
+
+ if (r) {
+ mutex_unlock(&ddata->lock);
+ return r;
+ }
+
+ dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+
+ mutex_unlock(&ddata->lock);
+
+ return 0;
+}
+
+static void n8x0_panel_disable(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = get_drv_data(dssdev);
+
+ dev_dbg(&dssdev->dev, "disable\n");
+
+ mutex_lock(&ddata->lock);
+
+ rfbi_bus_lock();
+
+ n8x0_panel_power_off(dssdev);
+
+ rfbi_bus_unlock();
+
+ dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
+
+ mutex_unlock(&ddata->lock);
+}
+
+static int n8x0_panel_suspend(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = get_drv_data(dssdev);
+
+ dev_dbg(&dssdev->dev, "suspend\n");
+
+ mutex_lock(&ddata->lock);
+
+ rfbi_bus_lock();
+
+ n8x0_panel_power_off(dssdev);
+
+ rfbi_bus_unlock();
+
+ dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
+
+ mutex_unlock(&ddata->lock);
+
+ return 0;
+}
+
+static int n8x0_panel_resume(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = get_drv_data(dssdev);
+ int r;
+
+ dev_dbg(&dssdev->dev, "resume\n");
+
+ mutex_lock(&ddata->lock);
+
+ rfbi_bus_lock();
+
+ r = n8x0_panel_power_on(dssdev);
+
+ rfbi_bus_unlock();
+
+ if (r) {
+ mutex_unlock(&ddata->lock);
+ return r;
+ }
+
+ dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+
+ mutex_unlock(&ddata->lock);
+
+ return 0;
+}
+
+static void n8x0_panel_get_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ *timings = dssdev->panel.timings;
+}
+
+static void n8x0_panel_get_resolution(struct omap_dss_device *dssdev,
+ u16 *xres, u16 *yres)
+{
+ *xres = dssdev->panel.timings.x_res;
+ *yres = dssdev->panel.timings.y_res;
+}
+
+static void update_done(void *data)
+{
+ rfbi_bus_unlock();
+}
+
+static int n8x0_panel_update(struct omap_dss_device *dssdev,
+ u16 x, u16 y, u16 w, u16 h)
+{
+ struct panel_drv_data *ddata = get_drv_data(dssdev);
+
+ dev_dbg(&dssdev->dev, "update\n");
+
+ mutex_lock(&ddata->lock);
+ rfbi_bus_lock();
+
+ omap_rfbi_prepare_update(dssdev, &x, &y, &w, &h);
+
+ blizzard_ctrl_setup_update(dssdev, x, y, w, h);
+
+ omap_rfbi_update(dssdev, x, y, w, h, update_done, NULL);
+
+ mutex_unlock(&ddata->lock);
+
+ return 0;
+}
+
+static int n8x0_panel_sync(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = get_drv_data(dssdev);
+
+ dev_dbg(&dssdev->dev, "sync\n");
+
+ mutex_lock(&ddata->lock);
+ rfbi_bus_lock();
+ rfbi_bus_unlock();
+ mutex_unlock(&ddata->lock);
+
+ return 0;
+}
+
+static struct omap_dss_driver n8x0_panel_driver = {
+ .probe = n8x0_panel_probe,
+ .remove = n8x0_panel_remove,
+
+ .enable = n8x0_panel_enable,
+ .disable = n8x0_panel_disable,
+ .suspend = n8x0_panel_suspend,
+ .resume = n8x0_panel_resume,
+
+ .update = n8x0_panel_update,
+ .sync = n8x0_panel_sync,
+
+ .get_resolution = n8x0_panel_get_resolution,
+ .get_recommended_bpp = omapdss_default_get_recommended_bpp,
+
+ .get_timings = n8x0_panel_get_timings,
+
+ .driver = {
+ .name = "n8x0_panel",
+ .owner = THIS_MODULE,
+ },
+};
+
+/* PANEL */
+
+static int mipid_spi_probe(struct spi_device *spi)
+{
+ dev_dbg(&spi->dev, "mipid_spi_probe\n");
+
+ spi->mode = SPI_MODE_0;
+
+ s_drv_data.spidev = spi;
+
+ return 0;
+}
+
+static int mipid_spi_remove(struct spi_device *spi)
+{
+ dev_dbg(&spi->dev, "mipid_spi_remove\n");
+ return 0;
+}
+
+static struct spi_driver mipid_spi_driver = {
+ .driver = {
+ .name = "lcd_mipid",
+ .bus = &spi_bus_type,
+ .owner = THIS_MODULE,
+ },
+ .probe = mipid_spi_probe,
+ .remove = __devexit_p(mipid_spi_remove),
+};
+
+static int __init n8x0_panel_drv_init(void)
+{
+ int r;
+
+ r = spi_register_driver(&mipid_spi_driver);
+ if (r) {
+ pr_err("n8x0_panel: spi driver registration failed\n");
+ return r;
+ }
+
+ r = omap_dss_register_driver(&n8x0_panel_driver);
+ if (r) {
+ pr_err("n8x0_panel: dss driver registration failed\n");
+ spi_unregister_driver(&mipid_spi_driver);
+ return r;
+ }
+
+ return 0;
+}
+
+static void __exit n8x0_panel_drv_exit(void)
+{
+ spi_unregister_driver(&mipid_spi_driver);
+
+ omap_dss_unregister_driver(&n8x0_panel_driver);
+}
+
+module_init(n8x0_panel_drv_init);
+module_exit(n8x0_panel_drv_exit);
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/omap2/displays/panel-picodlp.c b/drivers/video/omap2/displays/panel-picodlp.c
new file mode 100644
index 00000000000..98ebdaddab5
--- /dev/null
+++ b/drivers/video/omap2/displays/panel-picodlp.c
@@ -0,0 +1,594 @@
+/*
+ * picodlp panel driver
+ * picodlp_i2c_driver: i2c_client driver
+ *
+ * Copyright (C) 2009-2011 Texas Instruments
+ * Author: Mythri P K <mythripk@ti.com>
+ * Mayuresh Janorkar <mayur@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/module.h>
+#include <linux/input.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/firmware.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+
+#include <video/omapdss.h>
+#include <video/omap-panel-picodlp.h>
+
+#include "panel-picodlp.h"
+
+struct picodlp_data {
+ struct mutex lock;
+ struct i2c_client *picodlp_i2c_client;
+};
+
+static struct i2c_board_info picodlp_i2c_board_info = {
+ I2C_BOARD_INFO("picodlp_i2c_driver", 0x1b),
+};
+
+struct picodlp_i2c_data {
+ struct mutex xfer_lock;
+};
+
+static struct i2c_device_id picodlp_i2c_id[] = {
+ { "picodlp_i2c_driver", 0 },
+};
+
+struct picodlp_i2c_command {
+ u8 reg;
+ u32 value;
+};
+
+static struct omap_video_timings pico_ls_timings = {
+ .x_res = 864,
+ .y_res = 480,
+ .hsw = 7,
+ .hfp = 11,
+ .hbp = 7,
+
+ .pixel_clock = 19200,
+
+ .vsw = 2,
+ .vfp = 3,
+ .vbp = 14,
+};
+
+static inline struct picodlp_panel_data
+ *get_panel_data(const struct omap_dss_device *dssdev)
+{
+ return (struct picodlp_panel_data *) dssdev->data;
+}
+
+static u32 picodlp_i2c_read(struct i2c_client *client, u8 reg)
+{
+ u8 read_cmd[] = {READ_REG_SELECT, reg}, data[4];
+ struct picodlp_i2c_data *picodlp_i2c_data = i2c_get_clientdata(client);
+ struct i2c_msg msg[2];
+
+ mutex_lock(&picodlp_i2c_data->xfer_lock);
+
+ msg[0].addr = client->addr;
+ msg[0].flags = 0;
+ msg[0].len = 2;
+ msg[0].buf = read_cmd;
+
+ msg[1].addr = client->addr;
+ msg[1].flags = I2C_M_RD;
+ msg[1].len = 4;
+ msg[1].buf = data;
+
+ i2c_transfer(client->adapter, msg, 2);
+ mutex_unlock(&picodlp_i2c_data->xfer_lock);
+ return (data[3] | (data[2] << 8) | (data[1] << 16) | (data[0] << 24));
+}
+
+static int picodlp_i2c_write_block(struct i2c_client *client,
+ u8 *data, int len)
+{
+ struct i2c_msg msg;
+ int i, r, msg_count = 1;
+
+ struct picodlp_i2c_data *picodlp_i2c_data = i2c_get_clientdata(client);
+
+ if (len < 1 || len > 32) {
+ dev_err(&client->dev,
+ "too long syn_write_block len %d\n", len);
+ return -EIO;
+ }
+ mutex_lock(&picodlp_i2c_data->xfer_lock);
+
+ msg.addr = client->addr;
+ msg.flags = 0;
+ msg.len = len;
+ msg.buf = data;
+ r = i2c_transfer(client->adapter, &msg, msg_count);
+ mutex_unlock(&picodlp_i2c_data->xfer_lock);
+
+ /*
+ * i2c_transfer returns:
+ * number of messages sent in case of success
+ * a negative error number in case of failure
+ */
+ if (r != msg_count)
+ goto err;
+
+ /* In case of success */
+ for (i = 0; i < len; i++)
+ dev_dbg(&client->dev,
+ "addr %x bw 0x%02x[%d]: 0x%02x\n",
+ client->addr, data[0] + i, i, data[i]);
+
+ return 0;
+err:
+ dev_err(&client->dev, "picodlp_i2c_write error\n");
+ return r;
+}
+
+static int picodlp_i2c_write(struct i2c_client *client, u8 reg, u32 value)
+{
+ u8 data[5];
+ int i;
+
+ data[0] = reg;
+ for (i = 1; i < 5; i++)
+ data[i] = (value >> (32 - (i) * 8)) & 0xFF;
+
+ return picodlp_i2c_write_block(client, data, 5);
+}
+
+static int picodlp_i2c_write_array(struct i2c_client *client,
+ const struct picodlp_i2c_command commands[],
+ int count)
+{
+ int i, r = 0;
+ for (i = 0; i < count; i++) {
+ r = picodlp_i2c_write(client, commands[i].reg,
+ commands[i].value);
+ if (r)
+ return r;
+ }
+ return r;
+}
+
+static int picodlp_wait_for_dma_done(struct i2c_client *client)
+{
+ u8 trial = 100;
+
+ do {
+ msleep(1);
+ if (!trial--)
+ return -ETIMEDOUT;
+ } while (picodlp_i2c_read(client, MAIN_STATUS) & DMA_STATUS);
+
+ return 0;
+}
+
+/**
+ * picodlp_i2c_init: i2c_initialization routine
+ * client: i2c_client for communication
+ *
+ * return
+ * 0 : Success, no error
+ * error code : Failure
+ */
+static int picodlp_i2c_init(struct i2c_client *client)
+{
+ int r;
+ static const struct picodlp_i2c_command init_cmd_set1[] = {
+ {SOFT_RESET, 1},
+ {DMD_PARK_TRIGGER, 1},
+ {MISC_REG, 5},
+ {SEQ_CONTROL, 0},
+ {SEQ_VECTOR, 0x100},
+ {DMD_BLOCK_COUNT, 7},
+ {DMD_VCC_CONTROL, 0x109},
+ {DMD_PARK_PULSE_COUNT, 0xA},
+ {DMD_PARK_PULSE_WIDTH, 0xB},
+ {DMD_PARK_DELAY, 0x2ED},
+ {DMD_SHADOW_ENABLE, 0},
+ {FLASH_OPCODE, 0xB},
+ {FLASH_DUMMY_BYTES, 1},
+ {FLASH_ADDR_BYTES, 3},
+ {PBC_CONTROL, 0},
+ {FLASH_START_ADDR, CMT_LUT_0_START_ADDR},
+ {FLASH_READ_BYTES, CMT_LUT_0_SIZE},
+ {CMT_SPLASH_LUT_START_ADDR, 0},
+ {CMT_SPLASH_LUT_DEST_SELECT, CMT_LUT_ALL},
+ {PBC_CONTROL, 1},
+ };
+
+ static const struct picodlp_i2c_command init_cmd_set2[] = {
+ {PBC_CONTROL, 0},
+ {CMT_SPLASH_LUT_DEST_SELECT, 0},
+ {PBC_CONTROL, 0},
+ {FLASH_START_ADDR, SEQUENCE_0_START_ADDR},
+ {FLASH_READ_BYTES, SEQUENCE_0_SIZE},
+ {SEQ_RESET_LUT_START_ADDR, 0},
+ {SEQ_RESET_LUT_DEST_SELECT, SEQ_SEQ_LUT},
+ {PBC_CONTROL, 1},
+ };
+
+ static const struct picodlp_i2c_command init_cmd_set3[] = {
+ {PBC_CONTROL, 0},
+ {SEQ_RESET_LUT_DEST_SELECT, 0},
+ {PBC_CONTROL, 0},
+ {FLASH_START_ADDR, DRC_TABLE_0_START_ADDR},
+ {FLASH_READ_BYTES, DRC_TABLE_0_SIZE},
+ {SEQ_RESET_LUT_START_ADDR, 0},
+ {SEQ_RESET_LUT_DEST_SELECT, SEQ_DRC_LUT_ALL},
+ {PBC_CONTROL, 1},
+ };
+
+ static const struct picodlp_i2c_command init_cmd_set4[] = {
+ {PBC_CONTROL, 0},
+ {SEQ_RESET_LUT_DEST_SELECT, 0},
+ {SDC_ENABLE, 1},
+ {AGC_CTRL, 7},
+ {CCA_C1A, 0x100},
+ {CCA_C1B, 0x0},
+ {CCA_C1C, 0x0},
+ {CCA_C2A, 0x0},
+ {CCA_C2B, 0x100},
+ {CCA_C2C, 0x0},
+ {CCA_C3A, 0x0},
+ {CCA_C3B, 0x0},
+ {CCA_C3C, 0x100},
+ {CCA_C7A, 0x100},
+ {CCA_C7B, 0x100},
+ {CCA_C7C, 0x100},
+ {CCA_ENABLE, 1},
+ {CPU_IF_MODE, 1},
+ {SHORT_FLIP, 1},
+ {CURTAIN_CONTROL, 0},
+ {DMD_PARK_TRIGGER, 0},
+ {R_DRIVE_CURRENT, 0x298},
+ {G_DRIVE_CURRENT, 0x298},
+ {B_DRIVE_CURRENT, 0x298},
+ {RGB_DRIVER_ENABLE, 7},
+ {SEQ_CONTROL, 0},
+ {ACTGEN_CONTROL, 0x10},
+ {SEQUENCE_MODE, SEQ_LOCK},
+ {DATA_FORMAT, RGB888},
+ {INPUT_RESOLUTION, WVGA_864_LANDSCAPE},
+ {INPUT_SOURCE, PARALLEL_RGB},
+ {CPU_IF_SYNC_METHOD, 1},
+ {SEQ_CONTROL, 1}
+ };
+
+ r = picodlp_i2c_write_array(client, init_cmd_set1,
+ ARRAY_SIZE(init_cmd_set1));
+ if (r)
+ return r;
+
+ r = picodlp_wait_for_dma_done(client);
+ if (r)
+ return r;
+
+ r = picodlp_i2c_write_array(client, init_cmd_set2,
+ ARRAY_SIZE(init_cmd_set2));
+ if (r)
+ return r;
+
+ r = picodlp_wait_for_dma_done(client);
+ if (r)
+ return r;
+
+ r = picodlp_i2c_write_array(client, init_cmd_set3,
+ ARRAY_SIZE(init_cmd_set3));
+ if (r)
+ return r;
+
+ r = picodlp_wait_for_dma_done(client);
+ if (r)
+ return r;
+
+ r = picodlp_i2c_write_array(client, init_cmd_set4,
+ ARRAY_SIZE(init_cmd_set4));
+ if (r)
+ return r;
+
+ return 0;
+}
+
+static int picodlp_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct picodlp_i2c_data *picodlp_i2c_data;
+
+ picodlp_i2c_data = kzalloc(sizeof(struct picodlp_i2c_data), GFP_KERNEL);
+
+ if (!picodlp_i2c_data)
+ return -ENOMEM;
+
+ mutex_init(&picodlp_i2c_data->xfer_lock);
+ i2c_set_clientdata(client, picodlp_i2c_data);
+
+ return 0;
+}
+
+static int picodlp_i2c_remove(struct i2c_client *client)
+{
+ struct picodlp_i2c_data *picodlp_i2c_data =
+ i2c_get_clientdata(client);
+ kfree(picodlp_i2c_data);
+ return 0;
+}
+
+static struct i2c_driver picodlp_i2c_driver = {
+ .driver = {
+ .name = "picodlp_i2c_driver",
+ },
+ .probe = picodlp_i2c_probe,
+ .remove = picodlp_i2c_remove,
+ .id_table = picodlp_i2c_id,
+};
+
+static int picodlp_panel_power_on(struct omap_dss_device *dssdev)
+{
+ int r, trial = 100;
+ struct picodlp_data *picod = dev_get_drvdata(&dssdev->dev);
+ struct picodlp_panel_data *picodlp_pdata = get_panel_data(dssdev);
+
+ if (dssdev->platform_enable) {
+ r = dssdev->platform_enable(dssdev);
+ if (r)
+ return r;
+ }
+
+ gpio_set_value(picodlp_pdata->pwrgood_gpio, 0);
+ msleep(1);
+ gpio_set_value(picodlp_pdata->pwrgood_gpio, 1);
+
+ while (!gpio_get_value(picodlp_pdata->emu_done_gpio)) {
+ if (!trial--) {
+ dev_err(&dssdev->dev, "emu_done signal not"
+ " going high\n");
+ return -ETIMEDOUT;
+ }
+ msleep(5);
+ }
+ /*
+ * As per dpp2600 programming guide,
+ * it is required to sleep for 1000ms after emu_done signal goes high
+ * then only i2c commands can be successfully sent to dpp2600
+ */
+ msleep(1000);
+ r = omapdss_dpi_display_enable(dssdev);
+ if (r) {
+ dev_err(&dssdev->dev, "failed to enable DPI\n");
+ goto err1;
+ }
+
+ r = picodlp_i2c_init(picod->picodlp_i2c_client);
+ if (r)
+ goto err;
+
+ dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+
+ return r;
+err:
+ omapdss_dpi_display_disable(dssdev);
+err1:
+ if (dssdev->platform_disable)
+ dssdev->platform_disable(dssdev);
+
+ return r;
+}
+
+static void picodlp_panel_power_off(struct omap_dss_device *dssdev)
+{
+ struct picodlp_panel_data *picodlp_pdata = get_panel_data(dssdev);
+
+ omapdss_dpi_display_disable(dssdev);
+
+ gpio_set_value(picodlp_pdata->emu_done_gpio, 0);
+ gpio_set_value(picodlp_pdata->pwrgood_gpio, 0);
+
+ if (dssdev->platform_disable)
+ dssdev->platform_disable(dssdev);
+}
+
+static int picodlp_panel_probe(struct omap_dss_device *dssdev)
+{
+ struct picodlp_data *picod;
+ struct picodlp_panel_data *picodlp_pdata = get_panel_data(dssdev);
+ struct i2c_adapter *adapter;
+ struct i2c_client *picodlp_i2c_client;
+ int r = 0, picodlp_adapter_id;
+
+ dssdev->panel.config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_ONOFF |
+ OMAP_DSS_LCD_IHS | OMAP_DSS_LCD_IVS;
+ dssdev->panel.acb = 0x0;
+ dssdev->panel.timings = pico_ls_timings;
+
+ picod = kzalloc(sizeof(struct picodlp_data), GFP_KERNEL);
+ if (!picod)
+ return -ENOMEM;
+
+ mutex_init(&picod->lock);
+
+ picodlp_adapter_id = picodlp_pdata->picodlp_adapter_id;
+
+ adapter = i2c_get_adapter(picodlp_adapter_id);
+ if (!adapter) {
+ dev_err(&dssdev->dev, "can't get i2c adapter\n");
+ r = -ENODEV;
+ goto err;
+ }
+
+ picodlp_i2c_client = i2c_new_device(adapter, &picodlp_i2c_board_info);
+ if (!picodlp_i2c_client) {
+ dev_err(&dssdev->dev, "can't add i2c device::"
+ " picodlp_i2c_client is NULL\n");
+ r = -ENODEV;
+ goto err;
+ }
+
+ picod->picodlp_i2c_client = picodlp_i2c_client;
+
+ dev_set_drvdata(&dssdev->dev, picod);
+ return r;
+err:
+ kfree(picod);
+ return r;
+}
+
+static void picodlp_panel_remove(struct omap_dss_device *dssdev)
+{
+ struct picodlp_data *picod = dev_get_drvdata(&dssdev->dev);
+
+ i2c_unregister_device(picod->picodlp_i2c_client);
+ dev_set_drvdata(&dssdev->dev, NULL);
+ dev_dbg(&dssdev->dev, "removing picodlp panel\n");
+
+ kfree(picod);
+}
+
+static int picodlp_panel_enable(struct omap_dss_device *dssdev)
+{
+ struct picodlp_data *picod = dev_get_drvdata(&dssdev->dev);
+ int r;
+
+ dev_dbg(&dssdev->dev, "enabling picodlp panel\n");
+
+ mutex_lock(&picod->lock);
+ if (dssdev->state != OMAP_DSS_DISPLAY_DISABLED) {
+ mutex_unlock(&picod->lock);
+ return -EINVAL;
+ }
+
+ r = picodlp_panel_power_on(dssdev);
+ mutex_unlock(&picod->lock);
+
+ return r;
+}
+
+static void picodlp_panel_disable(struct omap_dss_device *dssdev)
+{
+ struct picodlp_data *picod = dev_get_drvdata(&dssdev->dev);
+
+ mutex_lock(&picod->lock);
+ /* Turn off DLP Power */
+ if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
+ picodlp_panel_power_off(dssdev);
+
+ dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
+ mutex_unlock(&picod->lock);
+
+ dev_dbg(&dssdev->dev, "disabling picodlp panel\n");
+}
+
+static int picodlp_panel_suspend(struct omap_dss_device *dssdev)
+{
+ struct picodlp_data *picod = dev_get_drvdata(&dssdev->dev);
+
+ mutex_lock(&picod->lock);
+ /* Turn off DLP Power */
+ if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) {
+ mutex_unlock(&picod->lock);
+ dev_err(&dssdev->dev, "unable to suspend picodlp panel,"
+ " panel is not ACTIVE\n");
+ return -EINVAL;
+ }
+
+ picodlp_panel_power_off(dssdev);
+
+ dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
+ mutex_unlock(&picod->lock);
+
+ dev_dbg(&dssdev->dev, "suspending picodlp panel\n");
+ return 0;
+}
+
+static int picodlp_panel_resume(struct omap_dss_device *dssdev)
+{
+ struct picodlp_data *picod = dev_get_drvdata(&dssdev->dev);
+ int r;
+
+ mutex_lock(&picod->lock);
+ if (dssdev->state != OMAP_DSS_DISPLAY_SUSPENDED) {
+ mutex_unlock(&picod->lock);
+ dev_err(&dssdev->dev, "unable to resume picodlp panel,"
+ " panel is not ACTIVE\n");
+ return -EINVAL;
+ }
+
+ r = picodlp_panel_power_on(dssdev);
+ mutex_unlock(&picod->lock);
+ dev_dbg(&dssdev->dev, "resuming picodlp panel\n");
+ return r;
+}
+
+static void picodlp_get_resolution(struct omap_dss_device *dssdev,
+ u16 *xres, u16 *yres)
+{
+ *xres = dssdev->panel.timings.x_res;
+ *yres = dssdev->panel.timings.y_res;
+}
+
+static struct omap_dss_driver picodlp_driver = {
+ .probe = picodlp_panel_probe,
+ .remove = picodlp_panel_remove,
+
+ .enable = picodlp_panel_enable,
+ .disable = picodlp_panel_disable,
+
+ .get_resolution = picodlp_get_resolution,
+
+ .suspend = picodlp_panel_suspend,
+ .resume = picodlp_panel_resume,
+
+ .driver = {
+ .name = "picodlp_panel",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init picodlp_init(void)
+{
+ int r = 0;
+
+ r = i2c_add_driver(&picodlp_i2c_driver);
+ if (r) {
+ printk(KERN_WARNING "picodlp_i2c_driver" \
+ " registration failed\n");
+ return r;
+ }
+
+ r = omap_dss_register_driver(&picodlp_driver);
+ if (r)
+ i2c_del_driver(&picodlp_i2c_driver);
+
+ return r;
+}
+
+static void __exit picodlp_exit(void)
+{
+ i2c_del_driver(&picodlp_i2c_driver);
+ omap_dss_unregister_driver(&picodlp_driver);
+}
+
+module_init(picodlp_init);
+module_exit(picodlp_exit);
+
+MODULE_AUTHOR("Mythri P K <mythripk@ti.com>");
+MODULE_DESCRIPTION("picodlp driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/omap2/displays/panel-picodlp.h b/drivers/video/omap2/displays/panel-picodlp.h
new file mode 100644
index 00000000000..a34b431a726
--- /dev/null
+++ b/drivers/video/omap2/displays/panel-picodlp.h
@@ -0,0 +1,288 @@
+/*
+ * Header file required by picodlp panel driver
+ *
+ * Copyright (C) 2009-2011 Texas Instruments
+ * Author: Mythri P K <mythripk@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef __OMAP2_DISPLAY_PANEL_PICODLP_H
+#define __OMAP2_DISPLAY_PANEL_PICODLP_H
+
+/* Commands used for configuring picodlp panel */
+
+#define MAIN_STATUS 0x03
+#define PBC_CONTROL 0x08
+#define INPUT_SOURCE 0x0B
+#define INPUT_RESOLUTION 0x0C
+#define DATA_FORMAT 0x0D
+#define IMG_ROTATION 0x0E
+#define LONG_FLIP 0x0F
+#define SHORT_FLIP 0x10
+#define TEST_PAT_SELECT 0x11
+#define R_DRIVE_CURRENT 0x12
+#define G_DRIVE_CURRENT 0x13
+#define B_DRIVE_CURRENT 0x14
+#define READ_REG_SELECT 0x15
+#define RGB_DRIVER_ENABLE 0x16
+
+#define CPU_IF_MODE 0x18
+#define FRAME_RATE 0x19
+#define CPU_IF_SYNC_METHOD 0x1A
+#define CPU_IF_SOF 0x1B
+#define CPU_IF_EOF 0x1C
+#define CPU_IF_SLEEP 0x1D
+
+#define SEQUENCE_MODE 0x1E
+#define SOFT_RESET 0x1F
+#define FRONT_END_RESET 0x21
+#define AUTO_PWR_ENABLE 0x22
+
+#define VSYNC_LINE_DELAY 0x23
+#define CPU_PI_HORIZ_START 0x24
+#define CPU_PI_VERT_START 0x25
+#define CPU_PI_HORIZ_WIDTH 0x26
+#define CPU_PI_VERT_HEIGHT 0x27
+
+#define PIXEL_MASK_CROP 0x28
+#define CROP_FIRST_LINE 0x29
+#define CROP_LAST_LINE 0x2A
+#define CROP_FIRST_PIXEL 0x2B
+#define CROP_LAST_PIXEL 0x2C
+#define DMD_PARK_TRIGGER 0x2D
+
+#define MISC_REG 0x30
+
+/* AGC registers */
+#define AGC_CTRL 0x50
+#define AGC_CLIPPED_PIXS 0x55
+#define AGC_BRIGHT_PIXS 0x56
+#define AGC_BG_PIXS 0x57
+#define AGC_SAFETY_MARGIN 0x17
+
+/* Color Coordinate Adjustment registers */
+#define CCA_ENABLE 0x5E
+#define CCA_C1A 0x5F
+#define CCA_C1B 0x60
+#define CCA_C1C 0x61
+#define CCA_C2A 0x62
+#define CCA_C2B 0x63
+#define CCA_C2C 0x64
+#define CCA_C3A 0x65
+#define CCA_C3B 0x66
+#define CCA_C3C 0x67
+#define CCA_C7A 0x71
+#define CCA_C7B 0x72
+#define CCA_C7C 0x73
+
+/**
+ * DLP Pico Processor 2600 comes with flash
+ * We can do DMA operations from flash for accessing Look Up Tables
+ */
+#define DMA_STATUS 0x100
+#define FLASH_ADDR_BYTES 0x74
+#define FLASH_DUMMY_BYTES 0x75
+#define FLASH_WRITE_BYTES 0x76
+#define FLASH_READ_BYTES 0x77
+#define FLASH_OPCODE 0x78
+#define FLASH_START_ADDR 0x79
+#define FLASH_DUMMY2 0x7A
+#define FLASH_WRITE_DATA 0x7B
+
+#define TEMPORAL_DITH_DISABLE 0x7E
+#define SEQ_CONTROL 0x82
+#define SEQ_VECTOR 0x83
+
+/* DMD is Digital Micromirror Device */
+#define DMD_BLOCK_COUNT 0x84
+#define DMD_VCC_CONTROL 0x86
+#define DMD_PARK_PULSE_COUNT 0x87
+#define DMD_PARK_PULSE_WIDTH 0x88
+#define DMD_PARK_DELAY 0x89
+#define DMD_SHADOW_ENABLE 0x8E
+#define SEQ_STATUS 0x8F
+#define FLASH_CLOCK_CONTROL 0x98
+#define DMD_PARK 0x2D
+
+#define SDRAM_BIST_ENABLE 0x46
+#define DDR_DRIVER_STRENGTH 0x9A
+#define SDC_ENABLE 0x9D
+#define SDC_BUFF_SWAP_DISABLE 0xA3
+#define CURTAIN_CONTROL 0xA6
+#define DDR_BUS_SWAP_ENABLE 0xA7
+#define DMD_TRC_ENABLE 0xA8
+#define DMD_BUS_SWAP_ENABLE 0xA9
+
+#define ACTGEN_ENABLE 0xAE
+#define ACTGEN_CONTROL 0xAF
+#define ACTGEN_HORIZ_BP 0xB0
+#define ACTGEN_VERT_BP 0xB1
+
+/* Look Up Table access */
+#define CMT_SPLASH_LUT_START_ADDR 0xFA
+#define CMT_SPLASH_LUT_DEST_SELECT 0xFB
+#define CMT_SPLASH_LUT_DATA 0xFC
+#define SEQ_RESET_LUT_START_ADDR 0xFD
+#define SEQ_RESET_LUT_DEST_SELECT 0xFE
+#define SEQ_RESET_LUT_DATA 0xFF
+
+/* Input source definitions */
+#define PARALLEL_RGB 0
+#define INT_TEST_PATTERN 1
+#define SPLASH_SCREEN 2
+#define CPU_INTF 3
+#define BT656 4
+
+/* Standard input resolution definitions */
+#define QWVGA_LANDSCAPE 3 /* (427h*240v) */
+#define WVGA_864_LANDSCAPE 21 /* (864h*480v) */
+#define WVGA_DMD_OPTICAL_TEST 35 /* (608h*684v) */
+
+/* Standard data format definitions */
+#define RGB565 0
+#define RGB666 1
+#define RGB888 2
+
+/* Test Pattern definitions */
+#define TPG_CHECKERBOARD 0
+#define TPG_BLACK 1
+#define TPG_WHITE 2
+#define TPG_RED 3
+#define TPG_BLUE 4
+#define TPG_GREEN 5
+#define TPG_VLINES_BLACK 6
+#define TPG_HLINES_BLACK 7
+#define TPG_VLINES_ALT 8
+#define TPG_HLINES_ALT 9
+#define TPG_DIAG_LINES 10
+#define TPG_GREYRAMP_VERT 11
+#define TPG_GREYRAMP_HORIZ 12
+#define TPG_ANSI_CHECKERBOARD 13
+
+/* sequence mode definitions */
+#define SEQ_FREE_RUN 0
+#define SEQ_LOCK 1
+
+/* curtain color definitions */
+#define CURTAIN_BLACK 0
+#define CURTAIN_RED 1
+#define CURTAIN_GREEN 2
+#define CURTAIN_BLUE 3
+#define CURTAIN_YELLOW 4
+#define CURTAIN_MAGENTA 5
+#define CURTAIN_CYAN 6
+#define CURTAIN_WHITE 7
+
+/* LUT definitions */
+#define CMT_LUT_NONE 0
+#define CMT_LUT_GREEN 1
+#define CMT_LUT_RED 2
+#define CMT_LUT_BLUE 3
+#define CMT_LUT_ALL 4
+#define SPLASH_LUT 5
+
+#define SEQ_LUT_NONE 0
+#define SEQ_DRC_LUT_0 1
+#define SEQ_DRC_LUT_1 2
+#define SEQ_DRC_LUT_2 3
+#define SEQ_DRC_LUT_3 4
+#define SEQ_SEQ_LUT 5
+#define SEQ_DRC_LUT_ALL 6
+#define WPC_PROGRAM_LUT 7
+
+#define BITSTREAM_START_ADDR 0x00000000
+#define BITSTREAM_SIZE 0x00040000
+
+#define WPC_FW_0_START_ADDR 0x00040000
+#define WPC_FW_0_SIZE 0x00000ce8
+
+#define SEQUENCE_0_START_ADDR 0x00044000
+#define SEQUENCE_0_SIZE 0x00001000
+
+#define SEQUENCE_1_START_ADDR 0x00045000
+#define SEQUENCE_1_SIZE 0x00000d10
+
+#define SEQUENCE_2_START_ADDR 0x00046000
+#define SEQUENCE_2_SIZE 0x00000d10
+
+#define SEQUENCE_3_START_ADDR 0x00047000
+#define SEQUENCE_3_SIZE 0x00000d10
+
+#define SEQUENCE_4_START_ADDR 0x00048000
+#define SEQUENCE_4_SIZE 0x00000d10
+
+#define SEQUENCE_5_START_ADDR 0x00049000
+#define SEQUENCE_5_SIZE 0x00000d10
+
+#define SEQUENCE_6_START_ADDR 0x0004a000
+#define SEQUENCE_6_SIZE 0x00000d10
+
+#define CMT_LUT_0_START_ADDR 0x0004b200
+#define CMT_LUT_0_SIZE 0x00000600
+
+#define CMT_LUT_1_START_ADDR 0x0004b800
+#define CMT_LUT_1_SIZE 0x00000600
+
+#define CMT_LUT_2_START_ADDR 0x0004be00
+#define CMT_LUT_2_SIZE 0x00000600
+
+#define CMT_LUT_3_START_ADDR 0x0004c400
+#define CMT_LUT_3_SIZE 0x00000600
+
+#define CMT_LUT_4_START_ADDR 0x0004ca00
+#define CMT_LUT_4_SIZE 0x00000600
+
+#define CMT_LUT_5_START_ADDR 0x0004d000
+#define CMT_LUT_5_SIZE 0x00000600
+
+#define CMT_LUT_6_START_ADDR 0x0004d600
+#define CMT_LUT_6_SIZE 0x00000600
+
+#define DRC_TABLE_0_START_ADDR 0x0004dc00
+#define DRC_TABLE_0_SIZE 0x00000100
+
+#define SPLASH_0_START_ADDR 0x0004dd00
+#define SPLASH_0_SIZE 0x00032280
+
+#define SEQUENCE_7_START_ADDR 0x00080000
+#define SEQUENCE_7_SIZE 0x00000d10
+
+#define SEQUENCE_8_START_ADDR 0x00081800
+#define SEQUENCE_8_SIZE 0x00000d10
+
+#define SEQUENCE_9_START_ADDR 0x00083000
+#define SEQUENCE_9_SIZE 0x00000d10
+
+#define CMT_LUT_7_START_ADDR 0x0008e000
+#define CMT_LUT_7_SIZE 0x00000600
+
+#define CMT_LUT_8_START_ADDR 0x0008e800
+#define CMT_LUT_8_SIZE 0x00000600
+
+#define CMT_LUT_9_START_ADDR 0x0008f000
+#define CMT_LUT_9_SIZE 0x00000600
+
+#define SPLASH_1_START_ADDR 0x0009a000
+#define SPLASH_1_SIZE 0x00032280
+
+#define SPLASH_2_START_ADDR 0x000cd000
+#define SPLASH_2_SIZE 0x00032280
+
+#define SPLASH_3_START_ADDR 0x00100000
+#define SPLASH_3_SIZE 0x00032280
+
+#define OPT_SPLASH_0_START_ADDR 0x00134000
+#define OPT_SPLASH_0_SIZE 0x000cb100
+
+#endif
diff --git a/drivers/video/omap2/displays/panel-taal.c b/drivers/video/omap2/displays/panel-taal.c
index 4e888ac09b3..80c3f6ab1a9 100644
--- a/drivers/video/omap2/displays/panel-taal.c
+++ b/drivers/video/omap2/displays/panel-taal.c
@@ -35,26 +35,12 @@
#include <video/omapdss.h>
#include <video/omap-panel-nokia-dsi.h>
+#include <video/mipi_display.h>
/* DSI Virtual channel. Hardcoded for now. */
#define TCH 0
#define DCS_READ_NUM_ERRORS 0x05
-#define DCS_READ_POWER_MODE 0x0a
-#define DCS_READ_MADCTL 0x0b
-#define DCS_READ_PIXEL_FORMAT 0x0c
-#define DCS_RDDSDR 0x0f
-#define DCS_SLEEP_IN 0x10
-#define DCS_SLEEP_OUT 0x11
-#define DCS_DISPLAY_OFF 0x28
-#define DCS_DISPLAY_ON 0x29
-#define DCS_COLUMN_ADDR 0x2a
-#define DCS_PAGE_ADDR 0x2b
-#define DCS_MEMORY_WRITE 0x2c
-#define DCS_TEAR_OFF 0x34
-#define DCS_TEAR_ON 0x35
-#define DCS_MEM_ACC_CTRL 0x36
-#define DCS_PIXEL_FORMAT 0x3a
#define DCS_BRIGHTNESS 0x51
#define DCS_CTRL_DISPLAY 0x53
#define DCS_WRITE_CABC 0x55
@@ -222,8 +208,6 @@ struct taal_data {
struct delayed_work te_timeout_work;
- bool use_dsi_bl;
-
bool cabc_broken;
unsigned cabc_mode;
@@ -302,7 +286,7 @@ static int taal_sleep_in(struct taal_data *td)
hw_guard_wait(td);
- cmd = DCS_SLEEP_IN;
+ cmd = MIPI_DCS_ENTER_SLEEP_MODE;
r = dsi_vc_dcs_write_nosync(td->dssdev, td->channel, &cmd, 1);
if (r)
return r;
@@ -321,7 +305,7 @@ static int taal_sleep_out(struct taal_data *td)
hw_guard_wait(td);
- r = taal_dcs_write_0(td, DCS_SLEEP_OUT);
+ r = taal_dcs_write_0(td, MIPI_DCS_EXIT_SLEEP_MODE);
if (r)
return r;
@@ -356,7 +340,7 @@ static int taal_set_addr_mode(struct taal_data *td, u8 rotate, bool mirror)
u8 mode;
int b5, b6, b7;
- r = taal_dcs_read_1(td, DCS_READ_MADCTL, &mode);
+ r = taal_dcs_read_1(td, MIPI_DCS_GET_ADDRESS_MODE, &mode);
if (r)
return r;
@@ -390,7 +374,7 @@ static int taal_set_addr_mode(struct taal_data *td, u8 rotate, bool mirror)
mode &= ~((1<<7) | (1<<6) | (1<<5));
mode |= (b7 << 7) | (b6 << 6) | (b5 << 5);
- return taal_dcs_write_1(td, DCS_MEM_ACC_CTRL, mode);
+ return taal_dcs_write_1(td, MIPI_DCS_SET_ADDRESS_MODE, mode);
}
static int taal_set_update_window(struct taal_data *td,
@@ -403,7 +387,7 @@ static int taal_set_update_window(struct taal_data *td,
u16 y2 = y + h - 1;
u8 buf[5];
- buf[0] = DCS_COLUMN_ADDR;
+ buf[0] = MIPI_DCS_SET_COLUMN_ADDRESS;
buf[1] = (x1 >> 8) & 0xff;
buf[2] = (x1 >> 0) & 0xff;
buf[3] = (x2 >> 8) & 0xff;
@@ -413,7 +397,7 @@ static int taal_set_update_window(struct taal_data *td,
if (r)
return r;
- buf[0] = DCS_PAGE_ADDR;
+ buf[0] = MIPI_DCS_SET_PAGE_ADDRESS;
buf[1] = (y1 >> 8) & 0xff;
buf[2] = (y1 >> 0) & 0xff;
buf[3] = (y2 >> 8) & 0xff;
@@ -555,7 +539,6 @@ static int taal_bl_update_status(struct backlight_device *dev)
{
struct omap_dss_device *dssdev = dev_get_drvdata(&dev->dev);
struct taal_data *td = dev_get_drvdata(&dssdev->dev);
- struct nokia_dsi_panel_data *panel_data = get_panel_data(dssdev);
int r;
int level;
@@ -569,23 +552,16 @@ static int taal_bl_update_status(struct backlight_device *dev)
mutex_lock(&td->lock);
- if (td->use_dsi_bl) {
- if (td->enabled) {
- dsi_bus_lock(dssdev);
+ if (td->enabled) {
+ dsi_bus_lock(dssdev);
- r = taal_wake_up(dssdev);
- if (!r)
- r = taal_dcs_write_1(td, DCS_BRIGHTNESS, level);
+ r = taal_wake_up(dssdev);
+ if (!r)
+ r = taal_dcs_write_1(td, DCS_BRIGHTNESS, level);
- dsi_bus_unlock(dssdev);
- } else {
- r = 0;
- }
+ dsi_bus_unlock(dssdev);
} else {
- if (!panel_data->set_backlight)
- r = -EINVAL;
- else
- r = panel_data->set_backlight(dssdev, level);
+ r = 0;
}
mutex_unlock(&td->lock);
@@ -964,7 +940,7 @@ static int taal_probe(struct omap_dss_device *dssdev)
{
struct backlight_properties props;
struct taal_data *td;
- struct backlight_device *bldev;
+ struct backlight_device *bldev = NULL;
struct nokia_dsi_panel_data *panel_data = get_panel_data(dssdev);
struct panel_config *panel_config = NULL;
int r, i;
@@ -990,7 +966,7 @@ static int taal_probe(struct omap_dss_device *dssdev)
dssdev->panel.config = OMAP_DSS_LCD_TFT;
dssdev->panel.timings = panel_config->timings;
- dssdev->ctrl.pixel_size = 24;
+ dssdev->panel.dsi_pix_fmt = OMAP_DSS_DSI_FMT_RGB888;
td = kzalloc(sizeof(*td), GFP_KERNEL);
if (!td) {
@@ -1025,35 +1001,26 @@ static int taal_probe(struct omap_dss_device *dssdev)
taal_hw_reset(dssdev);
- /* if no platform set_backlight() defined, presume DSI backlight
- * control */
- memset(&props, 0, sizeof(struct backlight_properties));
- if (!panel_data->set_backlight)
- td->use_dsi_bl = true;
-
- if (td->use_dsi_bl)
+ if (panel_data->use_dsi_backlight) {
+ memset(&props, 0, sizeof(struct backlight_properties));
props.max_brightness = 255;
- else
- props.max_brightness = 127;
-
- props.type = BACKLIGHT_RAW;
- bldev = backlight_device_register(dev_name(&dssdev->dev), &dssdev->dev,
- dssdev, &taal_bl_ops, &props);
- if (IS_ERR(bldev)) {
- r = PTR_ERR(bldev);
- goto err_bl;
- }
- td->bldev = bldev;
+ props.type = BACKLIGHT_RAW;
+ bldev = backlight_device_register(dev_name(&dssdev->dev),
+ &dssdev->dev, dssdev, &taal_bl_ops, &props);
+ if (IS_ERR(bldev)) {
+ r = PTR_ERR(bldev);
+ goto err_bl;
+ }
+
+ td->bldev = bldev;
- bldev->props.fb_blank = FB_BLANK_UNBLANK;
- bldev->props.power = FB_BLANK_UNBLANK;
- if (td->use_dsi_bl)
+ bldev->props.fb_blank = FB_BLANK_UNBLANK;
+ bldev->props.power = FB_BLANK_UNBLANK;
bldev->props.brightness = 255;
- else
- bldev->props.brightness = 127;
- taal_bl_update_status(bldev);
+ taal_bl_update_status(bldev);
+ }
if (panel_data->use_ext_te) {
int gpio = panel_data->ext_te_gpio;
@@ -1067,7 +1034,7 @@ static int taal_probe(struct omap_dss_device *dssdev)
gpio_direction_input(gpio);
r = request_irq(gpio_to_irq(gpio), taal_te_isr,
- IRQF_DISABLED | IRQF_TRIGGER_RISING,
+ IRQF_TRIGGER_RISING,
"taal vsync", dssdev);
if (r) {
@@ -1111,7 +1078,8 @@ err_irq:
if (panel_data->use_ext_te)
gpio_free(panel_data->ext_te_gpio);
err_gpio:
- backlight_device_unregister(bldev);
+ if (bldev != NULL)
+ backlight_device_unregister(bldev);
err_bl:
destroy_workqueue(td->workqueue);
err_wq:
@@ -1140,9 +1108,11 @@ static void __exit taal_remove(struct omap_dss_device *dssdev)
}
bldev = td->bldev;
- bldev->props.power = FB_BLANK_POWERDOWN;
- taal_bl_update_status(bldev);
- backlight_device_unregister(bldev);
+ if (bldev != NULL) {
+ bldev->props.power = FB_BLANK_POWERDOWN;
+ taal_bl_update_status(bldev);
+ backlight_device_unregister(bldev);
+ }
taal_cancel_ulps_work(dssdev);
taal_cancel_esd_work(dssdev);
@@ -1195,7 +1165,8 @@ static int taal_power_on(struct omap_dss_device *dssdev)
if (r)
goto err;
- r = taal_dcs_write_1(td, DCS_PIXEL_FORMAT, 0x7); /* 24bit/pixel */
+ r = taal_dcs_write_1(td, MIPI_DCS_SET_PIXEL_FORMAT,
+ MIPI_DCS_PIXEL_FMT_24BIT);
if (r)
goto err;
@@ -1209,7 +1180,7 @@ static int taal_power_on(struct omap_dss_device *dssdev)
goto err;
}
- r = taal_dcs_write_0(td, DCS_DISPLAY_ON);
+ r = taal_dcs_write_0(td, MIPI_DCS_SET_DISPLAY_ON);
if (r)
goto err;
@@ -1246,7 +1217,7 @@ static void taal_power_off(struct omap_dss_device *dssdev)
struct taal_data *td = dev_get_drvdata(&dssdev->dev);
int r;
- r = taal_dcs_write_0(td, DCS_DISPLAY_OFF);
+ r = taal_dcs_write_0(td, MIPI_DCS_SET_DISPLAY_OFF);
if (!r)
r = taal_sleep_in(td);
@@ -1529,9 +1500,9 @@ static int _taal_enable_te(struct omap_dss_device *dssdev, bool enable)
int r;
if (enable)
- r = taal_dcs_write_1(td, DCS_TEAR_ON, 0);
+ r = taal_dcs_write_1(td, MIPI_DCS_SET_TEAR_ON, 0);
else
- r = taal_dcs_write_0(td, DCS_TEAR_OFF);
+ r = taal_dcs_write_0(td, MIPI_DCS_SET_TEAR_OFF);
if (!panel_data->use_ext_te)
omapdss_dsi_enable_te(dssdev, enable);
@@ -1851,7 +1822,7 @@ static void taal_esd_work(struct work_struct *work)
goto err;
}
- r = taal_dcs_read_1(td, DCS_RDDSDR, &state1);
+ r = taal_dcs_read_1(td, MIPI_DCS_GET_DIAGNOSTIC_RESULT, &state1);
if (r) {
dev_err(&dssdev->dev, "failed to read Taal status\n");
goto err;
@@ -1864,7 +1835,7 @@ static void taal_esd_work(struct work_struct *work)
goto err;
}
- r = taal_dcs_read_1(td, DCS_RDDSDR, &state2);
+ r = taal_dcs_read_1(td, MIPI_DCS_GET_DIAGNOSTIC_RESULT, &state2);
if (r) {
dev_err(&dssdev->dev, "failed to read Taal status\n");
goto err;
@@ -1880,7 +1851,7 @@ static void taal_esd_work(struct work_struct *work)
/* Self-diagnostics result is also shown on TE GPIO line. We need
* to re-enable TE after self diagnostics */
if (td->te_enabled && panel_data->use_ext_te) {
- r = taal_dcs_write_1(td, DCS_TEAR_ON, 0);
+ r = taal_dcs_write_1(td, MIPI_DCS_SET_TEAR_ON, 0);
if (r)
goto err;
}
diff --git a/drivers/video/omap2/dss/Kconfig b/drivers/video/omap2/dss/Kconfig
index 0d12524db14..7be7c06a249 100644
--- a/drivers/video/omap2/dss/Kconfig
+++ b/drivers/video/omap2/dss/Kconfig
@@ -1,5 +1,5 @@
menuconfig OMAP2_DSS
- tristate "OMAP2+ Display Subsystem support (EXPERIMENTAL)"
+ tristate "OMAP2+ Display Subsystem support"
depends on ARCH_OMAP2PLUS
help
OMAP2+ Display Subsystem support.
diff --git a/drivers/video/omap2/dss/Makefile b/drivers/video/omap2/dss/Makefile
index 10d9d3bb3e2..bd34ac5b202 100644
--- a/drivers/video/omap2/dss/Makefile
+++ b/drivers/video/omap2/dss/Makefile
@@ -6,4 +6,4 @@ omapdss-$(CONFIG_OMAP2_DSS_VENC) += venc.o
omapdss-$(CONFIG_OMAP2_DSS_SDI) += sdi.o
omapdss-$(CONFIG_OMAP2_DSS_DSI) += dsi.o
omapdss-$(CONFIG_OMAP4_DSS_HDMI) += hdmi.o \
- hdmi_omap4_panel.o
+ hdmi_panel.o ti_hdmi_4xxx_ip.o
diff --git a/drivers/video/omap2/dss/core.c b/drivers/video/omap2/dss/core.c
index 76821fefce9..86ec12e16c7 100644
--- a/drivers/video/omap2/dss/core.c
+++ b/drivers/video/omap2/dss/core.c
@@ -145,6 +145,10 @@ static int dss_initialize_debugfs(void)
debugfs_create_file("venc", S_IRUGO, dss_debugfs_dir,
&venc_dump_regs, &dss_debug_fops);
#endif
+#ifdef CONFIG_OMAP4_DSS_HDMI
+ debugfs_create_file("hdmi", S_IRUGO, dss_debugfs_dir,
+ &hdmi_dump_regs, &dss_debug_fops);
+#endif
return 0;
}
diff --git a/drivers/video/omap2/dss/dispc.c b/drivers/video/omap2/dss/dispc.c
index 0f3961a1ce2..3532782551c 100644
--- a/drivers/video/omap2/dss/dispc.c
+++ b/drivers/video/omap2/dss/dispc.c
@@ -25,6 +25,7 @@
#include <linux/kernel.h>
#include <linux/dma-mapping.h>
#include <linux/vmalloc.h>
+#include <linux/export.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/jiffies.h>
@@ -106,7 +107,7 @@ static struct {
int irq;
struct clk *dss_clk;
- u32 fifo_size[3];
+ u32 fifo_size[MAX_DSS_OVERLAYS];
spinlock_t irq_lock;
u32 irq_error_mask;
@@ -171,172 +172,98 @@ static int dispc_get_ctx_loss_count(void)
static void dispc_save_context(void)
{
- int i;
+ int i, j;
DSSDBG("dispc_save_context\n");
SR(IRQENABLE);
SR(CONTROL);
SR(CONFIG);
- SR(DEFAULT_COLOR(OMAP_DSS_CHANNEL_LCD));
- SR(DEFAULT_COLOR(OMAP_DSS_CHANNEL_DIGIT));
- SR(TRANS_COLOR(OMAP_DSS_CHANNEL_LCD));
- SR(TRANS_COLOR(OMAP_DSS_CHANNEL_DIGIT));
SR(LINE_NUMBER);
- SR(TIMING_H(OMAP_DSS_CHANNEL_LCD));
- SR(TIMING_V(OMAP_DSS_CHANNEL_LCD));
- SR(POL_FREQ(OMAP_DSS_CHANNEL_LCD));
- SR(DIVISORo(OMAP_DSS_CHANNEL_LCD));
- if (dss_has_feature(FEAT_GLOBAL_ALPHA))
+ if (dss_has_feature(FEAT_ALPHA_FIXED_ZORDER) ||
+ dss_has_feature(FEAT_ALPHA_FREE_ZORDER))
SR(GLOBAL_ALPHA);
- SR(SIZE_MGR(OMAP_DSS_CHANNEL_DIGIT));
- SR(SIZE_MGR(OMAP_DSS_CHANNEL_LCD));
if (dss_has_feature(FEAT_MGR_LCD2)) {
SR(CONTROL2);
- SR(DEFAULT_COLOR(OMAP_DSS_CHANNEL_LCD2));
- SR(TRANS_COLOR(OMAP_DSS_CHANNEL_LCD2));
- SR(SIZE_MGR(OMAP_DSS_CHANNEL_LCD2));
- SR(TIMING_H(OMAP_DSS_CHANNEL_LCD2));
- SR(TIMING_V(OMAP_DSS_CHANNEL_LCD2));
- SR(POL_FREQ(OMAP_DSS_CHANNEL_LCD2));
- SR(DIVISORo(OMAP_DSS_CHANNEL_LCD2));
SR(CONFIG2);
}
- SR(OVL_BA0(OMAP_DSS_GFX));
- SR(OVL_BA1(OMAP_DSS_GFX));
- SR(OVL_POSITION(OMAP_DSS_GFX));
- SR(OVL_SIZE(OMAP_DSS_GFX));
- SR(OVL_ATTRIBUTES(OMAP_DSS_GFX));
- SR(OVL_FIFO_THRESHOLD(OMAP_DSS_GFX));
- SR(OVL_ROW_INC(OMAP_DSS_GFX));
- SR(OVL_PIXEL_INC(OMAP_DSS_GFX));
- SR(OVL_WINDOW_SKIP(OMAP_DSS_GFX));
- SR(OVL_TABLE_BA(OMAP_DSS_GFX));
+ for (i = 0; i < dss_feat_get_num_mgrs(); i++) {
+ SR(DEFAULT_COLOR(i));
+ SR(TRANS_COLOR(i));
+ SR(SIZE_MGR(i));
+ if (i == OMAP_DSS_CHANNEL_DIGIT)
+ continue;
+ SR(TIMING_H(i));
+ SR(TIMING_V(i));
+ SR(POL_FREQ(i));
+ SR(DIVISORo(i));
- SR(DATA_CYCLE1(OMAP_DSS_CHANNEL_LCD));
- SR(DATA_CYCLE2(OMAP_DSS_CHANNEL_LCD));
- SR(DATA_CYCLE3(OMAP_DSS_CHANNEL_LCD));
+ SR(DATA_CYCLE1(i));
+ SR(DATA_CYCLE2(i));
+ SR(DATA_CYCLE3(i));
- if (dss_has_feature(FEAT_CPR)) {
- SR(CPR_COEF_R(OMAP_DSS_CHANNEL_LCD));
- SR(CPR_COEF_G(OMAP_DSS_CHANNEL_LCD));
- SR(CPR_COEF_B(OMAP_DSS_CHANNEL_LCD));
- }
- if (dss_has_feature(FEAT_MGR_LCD2)) {
if (dss_has_feature(FEAT_CPR)) {
- SR(CPR_COEF_B(OMAP_DSS_CHANNEL_LCD2));
- SR(CPR_COEF_G(OMAP_DSS_CHANNEL_LCD2));
- SR(CPR_COEF_R(OMAP_DSS_CHANNEL_LCD2));
+ SR(CPR_COEF_R(i));
+ SR(CPR_COEF_G(i));
+ SR(CPR_COEF_B(i));
}
-
- SR(DATA_CYCLE1(OMAP_DSS_CHANNEL_LCD2));
- SR(DATA_CYCLE2(OMAP_DSS_CHANNEL_LCD2));
- SR(DATA_CYCLE3(OMAP_DSS_CHANNEL_LCD2));
}
- if (dss_has_feature(FEAT_PRELOAD))
- SR(OVL_PRELOAD(OMAP_DSS_GFX));
-
- /* VID1 */
- SR(OVL_BA0(OMAP_DSS_VIDEO1));
- SR(OVL_BA1(OMAP_DSS_VIDEO1));
- SR(OVL_POSITION(OMAP_DSS_VIDEO1));
- SR(OVL_SIZE(OMAP_DSS_VIDEO1));
- SR(OVL_ATTRIBUTES(OMAP_DSS_VIDEO1));
- SR(OVL_FIFO_THRESHOLD(OMAP_DSS_VIDEO1));
- SR(OVL_ROW_INC(OMAP_DSS_VIDEO1));
- SR(OVL_PIXEL_INC(OMAP_DSS_VIDEO1));
- SR(OVL_FIR(OMAP_DSS_VIDEO1));
- SR(OVL_PICTURE_SIZE(OMAP_DSS_VIDEO1));
- SR(OVL_ACCU0(OMAP_DSS_VIDEO1));
- SR(OVL_ACCU1(OMAP_DSS_VIDEO1));
-
- for (i = 0; i < 8; i++)
- SR(OVL_FIR_COEF_H(OMAP_DSS_VIDEO1, i));
-
- for (i = 0; i < 8; i++)
- SR(OVL_FIR_COEF_HV(OMAP_DSS_VIDEO1, i));
-
- for (i = 0; i < 5; i++)
- SR(OVL_CONV_COEF(OMAP_DSS_VIDEO1, i));
-
- if (dss_has_feature(FEAT_FIR_COEF_V)) {
- for (i = 0; i < 8; i++)
- SR(OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, i));
- }
-
- if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) {
- SR(OVL_BA0_UV(OMAP_DSS_VIDEO1));
- SR(OVL_BA1_UV(OMAP_DSS_VIDEO1));
- SR(OVL_FIR2(OMAP_DSS_VIDEO1));
- SR(OVL_ACCU2_0(OMAP_DSS_VIDEO1));
- SR(OVL_ACCU2_1(OMAP_DSS_VIDEO1));
-
- for (i = 0; i < 8; i++)
- SR(OVL_FIR_COEF_H2(OMAP_DSS_VIDEO1, i));
-
- for (i = 0; i < 8; i++)
- SR(OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO1, i));
-
- for (i = 0; i < 8; i++)
- SR(OVL_FIR_COEF_V2(OMAP_DSS_VIDEO1, i));
- }
- if (dss_has_feature(FEAT_ATTR2))
- SR(OVL_ATTRIBUTES2(OMAP_DSS_VIDEO1));
-
- if (dss_has_feature(FEAT_PRELOAD))
- SR(OVL_PRELOAD(OMAP_DSS_VIDEO1));
-
- /* VID2 */
- SR(OVL_BA0(OMAP_DSS_VIDEO2));
- SR(OVL_BA1(OMAP_DSS_VIDEO2));
- SR(OVL_POSITION(OMAP_DSS_VIDEO2));
- SR(OVL_SIZE(OMAP_DSS_VIDEO2));
- SR(OVL_ATTRIBUTES(OMAP_DSS_VIDEO2));
- SR(OVL_FIFO_THRESHOLD(OMAP_DSS_VIDEO2));
- SR(OVL_ROW_INC(OMAP_DSS_VIDEO2));
- SR(OVL_PIXEL_INC(OMAP_DSS_VIDEO2));
- SR(OVL_FIR(OMAP_DSS_VIDEO2));
- SR(OVL_PICTURE_SIZE(OMAP_DSS_VIDEO2));
- SR(OVL_ACCU0(OMAP_DSS_VIDEO2));
- SR(OVL_ACCU1(OMAP_DSS_VIDEO2));
+ for (i = 0; i < dss_feat_get_num_ovls(); i++) {
+ SR(OVL_BA0(i));
+ SR(OVL_BA1(i));
+ SR(OVL_POSITION(i));
+ SR(OVL_SIZE(i));
+ SR(OVL_ATTRIBUTES(i));
+ SR(OVL_FIFO_THRESHOLD(i));
+ SR(OVL_ROW_INC(i));
+ SR(OVL_PIXEL_INC(i));
+ if (dss_has_feature(FEAT_PRELOAD))
+ SR(OVL_PRELOAD(i));
+ if (i == OMAP_DSS_GFX) {
+ SR(OVL_WINDOW_SKIP(i));
+ SR(OVL_TABLE_BA(i));
+ continue;
+ }
+ SR(OVL_FIR(i));
+ SR(OVL_PICTURE_SIZE(i));
+ SR(OVL_ACCU0(i));
+ SR(OVL_ACCU1(i));
- for (i = 0; i < 8; i++)
- SR(OVL_FIR_COEF_H(OMAP_DSS_VIDEO2, i));
+ for (j = 0; j < 8; j++)
+ SR(OVL_FIR_COEF_H(i, j));
- for (i = 0; i < 8; i++)
- SR(OVL_FIR_COEF_HV(OMAP_DSS_VIDEO2, i));
+ for (j = 0; j < 8; j++)
+ SR(OVL_FIR_COEF_HV(i, j));
- for (i = 0; i < 5; i++)
- SR(OVL_CONV_COEF(OMAP_DSS_VIDEO2, i));
+ for (j = 0; j < 5; j++)
+ SR(OVL_CONV_COEF(i, j));
- if (dss_has_feature(FEAT_FIR_COEF_V)) {
- for (i = 0; i < 8; i++)
- SR(OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, i));
- }
+ if (dss_has_feature(FEAT_FIR_COEF_V)) {
+ for (j = 0; j < 8; j++)
+ SR(OVL_FIR_COEF_V(i, j));
+ }
- if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) {
- SR(OVL_BA0_UV(OMAP_DSS_VIDEO2));
- SR(OVL_BA1_UV(OMAP_DSS_VIDEO2));
- SR(OVL_FIR2(OMAP_DSS_VIDEO2));
- SR(OVL_ACCU2_0(OMAP_DSS_VIDEO2));
- SR(OVL_ACCU2_1(OMAP_DSS_VIDEO2));
+ if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) {
+ SR(OVL_BA0_UV(i));
+ SR(OVL_BA1_UV(i));
+ SR(OVL_FIR2(i));
+ SR(OVL_ACCU2_0(i));
+ SR(OVL_ACCU2_1(i));
- for (i = 0; i < 8; i++)
- SR(OVL_FIR_COEF_H2(OMAP_DSS_VIDEO2, i));
+ for (j = 0; j < 8; j++)
+ SR(OVL_FIR_COEF_H2(i, j));
- for (i = 0; i < 8; i++)
- SR(OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO2, i));
+ for (j = 0; j < 8; j++)
+ SR(OVL_FIR_COEF_HV2(i, j));
- for (i = 0; i < 8; i++)
- SR(OVL_FIR_COEF_V2(OMAP_DSS_VIDEO2, i));
+ for (j = 0; j < 8; j++)
+ SR(OVL_FIR_COEF_V2(i, j));
+ }
+ if (dss_has_feature(FEAT_ATTR2))
+ SR(OVL_ATTRIBUTES2(i));
}
- if (dss_has_feature(FEAT_ATTR2))
- SR(OVL_ATTRIBUTES2(OMAP_DSS_VIDEO2));
-
- if (dss_has_feature(FEAT_PRELOAD))
- SR(OVL_PRELOAD(OMAP_DSS_VIDEO2));
if (dss_has_feature(FEAT_CORE_CLK_DIV))
SR(DIVISOR);
@@ -349,7 +276,7 @@ static void dispc_save_context(void)
static void dispc_restore_context(void)
{
- int i, ctx;
+ int i, j, ctx;
DSSDBG("dispc_restore_context\n");
@@ -367,165 +294,89 @@ static void dispc_restore_context(void)
/*RR(IRQENABLE);*/
/*RR(CONTROL);*/
RR(CONFIG);
- RR(DEFAULT_COLOR(OMAP_DSS_CHANNEL_LCD));
- RR(DEFAULT_COLOR(OMAP_DSS_CHANNEL_DIGIT));
- RR(TRANS_COLOR(OMAP_DSS_CHANNEL_LCD));
- RR(TRANS_COLOR(OMAP_DSS_CHANNEL_DIGIT));
RR(LINE_NUMBER);
- RR(TIMING_H(OMAP_DSS_CHANNEL_LCD));
- RR(TIMING_V(OMAP_DSS_CHANNEL_LCD));
- RR(POL_FREQ(OMAP_DSS_CHANNEL_LCD));
- RR(DIVISORo(OMAP_DSS_CHANNEL_LCD));
- if (dss_has_feature(FEAT_GLOBAL_ALPHA))
+ if (dss_has_feature(FEAT_ALPHA_FIXED_ZORDER) ||
+ dss_has_feature(FEAT_ALPHA_FREE_ZORDER))
RR(GLOBAL_ALPHA);
- RR(SIZE_MGR(OMAP_DSS_CHANNEL_DIGIT));
- RR(SIZE_MGR(OMAP_DSS_CHANNEL_LCD));
- if (dss_has_feature(FEAT_MGR_LCD2)) {
- RR(DEFAULT_COLOR(OMAP_DSS_CHANNEL_LCD2));
- RR(TRANS_COLOR(OMAP_DSS_CHANNEL_LCD2));
- RR(SIZE_MGR(OMAP_DSS_CHANNEL_LCD2));
- RR(TIMING_H(OMAP_DSS_CHANNEL_LCD2));
- RR(TIMING_V(OMAP_DSS_CHANNEL_LCD2));
- RR(POL_FREQ(OMAP_DSS_CHANNEL_LCD2));
- RR(DIVISORo(OMAP_DSS_CHANNEL_LCD2));
+ if (dss_has_feature(FEAT_MGR_LCD2))
RR(CONFIG2);
- }
-
- RR(OVL_BA0(OMAP_DSS_GFX));
- RR(OVL_BA1(OMAP_DSS_GFX));
- RR(OVL_POSITION(OMAP_DSS_GFX));
- RR(OVL_SIZE(OMAP_DSS_GFX));
- RR(OVL_ATTRIBUTES(OMAP_DSS_GFX));
- RR(OVL_FIFO_THRESHOLD(OMAP_DSS_GFX));
- RR(OVL_ROW_INC(OMAP_DSS_GFX));
- RR(OVL_PIXEL_INC(OMAP_DSS_GFX));
- RR(OVL_WINDOW_SKIP(OMAP_DSS_GFX));
- RR(OVL_TABLE_BA(OMAP_DSS_GFX));
-
- RR(DATA_CYCLE1(OMAP_DSS_CHANNEL_LCD));
- RR(DATA_CYCLE2(OMAP_DSS_CHANNEL_LCD));
- RR(DATA_CYCLE3(OMAP_DSS_CHANNEL_LCD));
+ for (i = 0; i < dss_feat_get_num_mgrs(); i++) {
+ RR(DEFAULT_COLOR(i));
+ RR(TRANS_COLOR(i));
+ RR(SIZE_MGR(i));
+ if (i == OMAP_DSS_CHANNEL_DIGIT)
+ continue;
+ RR(TIMING_H(i));
+ RR(TIMING_V(i));
+ RR(POL_FREQ(i));
+ RR(DIVISORo(i));
- if (dss_has_feature(FEAT_CPR)) {
- RR(CPR_COEF_R(OMAP_DSS_CHANNEL_LCD));
- RR(CPR_COEF_G(OMAP_DSS_CHANNEL_LCD));
- RR(CPR_COEF_B(OMAP_DSS_CHANNEL_LCD));
- }
- if (dss_has_feature(FEAT_MGR_LCD2)) {
- RR(DATA_CYCLE1(OMAP_DSS_CHANNEL_LCD2));
- RR(DATA_CYCLE2(OMAP_DSS_CHANNEL_LCD2));
- RR(DATA_CYCLE3(OMAP_DSS_CHANNEL_LCD2));
+ RR(DATA_CYCLE1(i));
+ RR(DATA_CYCLE2(i));
+ RR(DATA_CYCLE3(i));
if (dss_has_feature(FEAT_CPR)) {
- RR(CPR_COEF_B(OMAP_DSS_CHANNEL_LCD2));
- RR(CPR_COEF_G(OMAP_DSS_CHANNEL_LCD2));
- RR(CPR_COEF_R(OMAP_DSS_CHANNEL_LCD2));
+ RR(CPR_COEF_R(i));
+ RR(CPR_COEF_G(i));
+ RR(CPR_COEF_B(i));
}
}
- if (dss_has_feature(FEAT_PRELOAD))
- RR(OVL_PRELOAD(OMAP_DSS_GFX));
-
- /* VID1 */
- RR(OVL_BA0(OMAP_DSS_VIDEO1));
- RR(OVL_BA1(OMAP_DSS_VIDEO1));
- RR(OVL_POSITION(OMAP_DSS_VIDEO1));
- RR(OVL_SIZE(OMAP_DSS_VIDEO1));
- RR(OVL_ATTRIBUTES(OMAP_DSS_VIDEO1));
- RR(OVL_FIFO_THRESHOLD(OMAP_DSS_VIDEO1));
- RR(OVL_ROW_INC(OMAP_DSS_VIDEO1));
- RR(OVL_PIXEL_INC(OMAP_DSS_VIDEO1));
- RR(OVL_FIR(OMAP_DSS_VIDEO1));
- RR(OVL_PICTURE_SIZE(OMAP_DSS_VIDEO1));
- RR(OVL_ACCU0(OMAP_DSS_VIDEO1));
- RR(OVL_ACCU1(OMAP_DSS_VIDEO1));
-
- for (i = 0; i < 8; i++)
- RR(OVL_FIR_COEF_H(OMAP_DSS_VIDEO1, i));
-
- for (i = 0; i < 8; i++)
- RR(OVL_FIR_COEF_HV(OMAP_DSS_VIDEO1, i));
-
- for (i = 0; i < 5; i++)
- RR(OVL_CONV_COEF(OMAP_DSS_VIDEO1, i));
-
- if (dss_has_feature(FEAT_FIR_COEF_V)) {
- for (i = 0; i < 8; i++)
- RR(OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, i));
- }
-
- if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) {
- RR(OVL_BA0_UV(OMAP_DSS_VIDEO1));
- RR(OVL_BA1_UV(OMAP_DSS_VIDEO1));
- RR(OVL_FIR2(OMAP_DSS_VIDEO1));
- RR(OVL_ACCU2_0(OMAP_DSS_VIDEO1));
- RR(OVL_ACCU2_1(OMAP_DSS_VIDEO1));
-
- for (i = 0; i < 8; i++)
- RR(OVL_FIR_COEF_H2(OMAP_DSS_VIDEO1, i));
-
- for (i = 0; i < 8; i++)
- RR(OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO1, i));
-
- for (i = 0; i < 8; i++)
- RR(OVL_FIR_COEF_V2(OMAP_DSS_VIDEO1, i));
- }
- if (dss_has_feature(FEAT_ATTR2))
- RR(OVL_ATTRIBUTES2(OMAP_DSS_VIDEO1));
-
- if (dss_has_feature(FEAT_PRELOAD))
- RR(OVL_PRELOAD(OMAP_DSS_VIDEO1));
-
- /* VID2 */
- RR(OVL_BA0(OMAP_DSS_VIDEO2));
- RR(OVL_BA1(OMAP_DSS_VIDEO2));
- RR(OVL_POSITION(OMAP_DSS_VIDEO2));
- RR(OVL_SIZE(OMAP_DSS_VIDEO2));
- RR(OVL_ATTRIBUTES(OMAP_DSS_VIDEO2));
- RR(OVL_FIFO_THRESHOLD(OMAP_DSS_VIDEO2));
- RR(OVL_ROW_INC(OMAP_DSS_VIDEO2));
- RR(OVL_PIXEL_INC(OMAP_DSS_VIDEO2));
- RR(OVL_FIR(OMAP_DSS_VIDEO2));
- RR(OVL_PICTURE_SIZE(OMAP_DSS_VIDEO2));
- RR(OVL_ACCU0(OMAP_DSS_VIDEO2));
- RR(OVL_ACCU1(OMAP_DSS_VIDEO2));
+ for (i = 0; i < dss_feat_get_num_ovls(); i++) {
+ RR(OVL_BA0(i));
+ RR(OVL_BA1(i));
+ RR(OVL_POSITION(i));
+ RR(OVL_SIZE(i));
+ RR(OVL_ATTRIBUTES(i));
+ RR(OVL_FIFO_THRESHOLD(i));
+ RR(OVL_ROW_INC(i));
+ RR(OVL_PIXEL_INC(i));
+ if (dss_has_feature(FEAT_PRELOAD))
+ RR(OVL_PRELOAD(i));
+ if (i == OMAP_DSS_GFX) {
+ RR(OVL_WINDOW_SKIP(i));
+ RR(OVL_TABLE_BA(i));
+ continue;
+ }
+ RR(OVL_FIR(i));
+ RR(OVL_PICTURE_SIZE(i));
+ RR(OVL_ACCU0(i));
+ RR(OVL_ACCU1(i));
- for (i = 0; i < 8; i++)
- RR(OVL_FIR_COEF_H(OMAP_DSS_VIDEO2, i));
+ for (j = 0; j < 8; j++)
+ RR(OVL_FIR_COEF_H(i, j));
- for (i = 0; i < 8; i++)
- RR(OVL_FIR_COEF_HV(OMAP_DSS_VIDEO2, i));
+ for (j = 0; j < 8; j++)
+ RR(OVL_FIR_COEF_HV(i, j));
- for (i = 0; i < 5; i++)
- RR(OVL_CONV_COEF(OMAP_DSS_VIDEO2, i));
+ for (j = 0; j < 5; j++)
+ RR(OVL_CONV_COEF(i, j));
- if (dss_has_feature(FEAT_FIR_COEF_V)) {
- for (i = 0; i < 8; i++)
- RR(OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, i));
- }
+ if (dss_has_feature(FEAT_FIR_COEF_V)) {
+ for (j = 0; j < 8; j++)
+ RR(OVL_FIR_COEF_V(i, j));
+ }
- if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) {
- RR(OVL_BA0_UV(OMAP_DSS_VIDEO2));
- RR(OVL_BA1_UV(OMAP_DSS_VIDEO2));
- RR(OVL_FIR2(OMAP_DSS_VIDEO2));
- RR(OVL_ACCU2_0(OMAP_DSS_VIDEO2));
- RR(OVL_ACCU2_1(OMAP_DSS_VIDEO2));
+ if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) {
+ RR(OVL_BA0_UV(i));
+ RR(OVL_BA1_UV(i));
+ RR(OVL_FIR2(i));
+ RR(OVL_ACCU2_0(i));
+ RR(OVL_ACCU2_1(i));
- for (i = 0; i < 8; i++)
- RR(OVL_FIR_COEF_H2(OMAP_DSS_VIDEO2, i));
+ for (j = 0; j < 8; j++)
+ RR(OVL_FIR_COEF_H2(i, j));
- for (i = 0; i < 8; i++)
- RR(OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO2, i));
+ for (j = 0; j < 8; j++)
+ RR(OVL_FIR_COEF_HV2(i, j));
- for (i = 0; i < 8; i++)
- RR(OVL_FIR_COEF_V2(OMAP_DSS_VIDEO2, i));
+ for (j = 0; j < 8; j++)
+ RR(OVL_FIR_COEF_V2(i, j));
+ }
+ if (dss_has_feature(FEAT_ATTR2))
+ RR(OVL_ATTRIBUTES2(i));
}
- if (dss_has_feature(FEAT_ATTR2))
- RR(OVL_ATTRIBUTES2(OMAP_DSS_VIDEO2));
-
- if (dss_has_feature(FEAT_PRELOAD))
- RR(OVL_PRELOAD(OMAP_DSS_VIDEO2));
if (dss_has_feature(FEAT_CORE_CLK_DIV))
RR(DIVISOR);
@@ -570,13 +421,28 @@ void dispc_runtime_put(void)
WARN_ON(r < 0);
}
+static inline bool dispc_mgr_is_lcd(enum omap_channel channel)
+{
+ if (channel == OMAP_DSS_CHANNEL_LCD ||
+ channel == OMAP_DSS_CHANNEL_LCD2)
+ return true;
+ else
+ return false;
+}
+
+static struct omap_dss_device *dispc_mgr_get_device(enum omap_channel channel)
+{
+ struct omap_overlay_manager *mgr =
+ omap_dss_get_overlay_manager(channel);
-bool dispc_go_busy(enum omap_channel channel)
+ return mgr ? mgr->device : NULL;
+}
+
+bool dispc_mgr_go_busy(enum omap_channel channel)
{
int bit;
- if (channel == OMAP_DSS_CHANNEL_LCD ||
- channel == OMAP_DSS_CHANNEL_LCD2)
+ if (dispc_mgr_is_lcd(channel))
bit = 5; /* GOLCD */
else
bit = 6; /* GODIGIT */
@@ -587,13 +453,12 @@ bool dispc_go_busy(enum omap_channel channel)
return REG_GET(DISPC_CONTROL, bit, bit) == 1;
}
-void dispc_go(enum omap_channel channel)
+void dispc_mgr_go(enum omap_channel channel)
{
int bit;
bool enable_bit, go_bit;
- if (channel == OMAP_DSS_CHANNEL_LCD ||
- channel == OMAP_DSS_CHANNEL_LCD2)
+ if (dispc_mgr_is_lcd(channel))
bit = 0; /* LCDENABLE */
else
bit = 1; /* DIGITALENABLE */
@@ -607,8 +472,7 @@ void dispc_go(enum omap_channel channel)
if (!enable_bit)
return;
- if (channel == OMAP_DSS_CHANNEL_LCD ||
- channel == OMAP_DSS_CHANNEL_LCD2)
+ if (dispc_mgr_is_lcd(channel))
bit = 5; /* GOLCD */
else
bit = 6; /* GODIGIT */
@@ -632,43 +496,44 @@ void dispc_go(enum omap_channel channel)
REG_FLD_MOD(DISPC_CONTROL, 1, bit, bit);
}
-static void _dispc_write_firh_reg(enum omap_plane plane, int reg, u32 value)
+static void dispc_ovl_write_firh_reg(enum omap_plane plane, int reg, u32 value)
{
dispc_write_reg(DISPC_OVL_FIR_COEF_H(plane, reg), value);
}
-static void _dispc_write_firhv_reg(enum omap_plane plane, int reg, u32 value)
+static void dispc_ovl_write_firhv_reg(enum omap_plane plane, int reg, u32 value)
{
dispc_write_reg(DISPC_OVL_FIR_COEF_HV(plane, reg), value);
}
-static void _dispc_write_firv_reg(enum omap_plane plane, int reg, u32 value)
+static void dispc_ovl_write_firv_reg(enum omap_plane plane, int reg, u32 value)
{
dispc_write_reg(DISPC_OVL_FIR_COEF_V(plane, reg), value);
}
-static void _dispc_write_firh2_reg(enum omap_plane plane, int reg, u32 value)
+static void dispc_ovl_write_firh2_reg(enum omap_plane plane, int reg, u32 value)
{
BUG_ON(plane == OMAP_DSS_GFX);
dispc_write_reg(DISPC_OVL_FIR_COEF_H2(plane, reg), value);
}
-static void _dispc_write_firhv2_reg(enum omap_plane plane, int reg, u32 value)
+static void dispc_ovl_write_firhv2_reg(enum omap_plane plane, int reg,
+ u32 value)
{
BUG_ON(plane == OMAP_DSS_GFX);
dispc_write_reg(DISPC_OVL_FIR_COEF_HV2(plane, reg), value);
}
-static void _dispc_write_firv2_reg(enum omap_plane plane, int reg, u32 value)
+static void dispc_ovl_write_firv2_reg(enum omap_plane plane, int reg, u32 value)
{
BUG_ON(plane == OMAP_DSS_GFX);
dispc_write_reg(DISPC_OVL_FIR_COEF_V2(plane, reg), value);
}
-static void _dispc_set_scale_coef(enum omap_plane plane, int hscaleup,
+static void dispc_ovl_set_scale_coef(enum omap_plane plane, int hscaleup,
int vscaleup, int five_taps,
enum omap_color_component color_comp)
{
@@ -769,11 +634,11 @@ static void _dispc_set_scale_coef(enum omap_plane plane, int hscaleup,
| FLD_VAL(v_coef[i].vc2, 31, 24);
if (color_comp == DISPC_COLOR_COMPONENT_RGB_Y) {
- _dispc_write_firh_reg(plane, i, h);
- _dispc_write_firhv_reg(plane, i, hv);
+ dispc_ovl_write_firh_reg(plane, i, h);
+ dispc_ovl_write_firhv_reg(plane, i, hv);
} else {
- _dispc_write_firh2_reg(plane, i, h);
- _dispc_write_firhv2_reg(plane, i, hv);
+ dispc_ovl_write_firh2_reg(plane, i, h);
+ dispc_ovl_write_firhv2_reg(plane, i, hv);
}
}
@@ -784,15 +649,16 @@ static void _dispc_set_scale_coef(enum omap_plane plane, int hscaleup,
v = FLD_VAL(v_coef[i].vc00, 7, 0)
| FLD_VAL(v_coef[i].vc22, 15, 8);
if (color_comp == DISPC_COLOR_COMPONENT_RGB_Y)
- _dispc_write_firv_reg(plane, i, v);
+ dispc_ovl_write_firv_reg(plane, i, v);
else
- _dispc_write_firv2_reg(plane, i, v);
+ dispc_ovl_write_firv2_reg(plane, i, v);
}
}
}
static void _dispc_setup_color_conv_coef(void)
{
+ int i;
const struct color_conv_coef {
int ry, rcr, rcb, gy, gcr, gcb, by, bcr, bcb;
int full_range;
@@ -806,65 +672,54 @@ static void _dispc_setup_color_conv_coef(void)
ct = &ctbl_bt601_5;
- dispc_write_reg(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO1, 0),
- CVAL(ct->rcr, ct->ry));
- dispc_write_reg(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO1, 1),
- CVAL(ct->gy, ct->rcb));
- dispc_write_reg(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO1, 2),
- CVAL(ct->gcb, ct->gcr));
- dispc_write_reg(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO1, 3),
- CVAL(ct->bcr, ct->by));
- dispc_write_reg(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO1, 4),
- CVAL(0, ct->bcb));
-
- dispc_write_reg(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO2, 0),
- CVAL(ct->rcr, ct->ry));
- dispc_write_reg(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO2, 1),
- CVAL(ct->gy, ct->rcb));
- dispc_write_reg(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO2, 2),
- CVAL(ct->gcb, ct->gcr));
- dispc_write_reg(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO2, 3),
- CVAL(ct->bcr, ct->by));
- dispc_write_reg(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO2, 4),
- CVAL(0, ct->bcb));
+ for (i = 1; i < dss_feat_get_num_ovls(); i++) {
+ dispc_write_reg(DISPC_OVL_CONV_COEF(i, 0),
+ CVAL(ct->rcr, ct->ry));
+ dispc_write_reg(DISPC_OVL_CONV_COEF(i, 1),
+ CVAL(ct->gy, ct->rcb));
+ dispc_write_reg(DISPC_OVL_CONV_COEF(i, 2),
+ CVAL(ct->gcb, ct->gcr));
+ dispc_write_reg(DISPC_OVL_CONV_COEF(i, 3),
+ CVAL(ct->bcr, ct->by));
+ dispc_write_reg(DISPC_OVL_CONV_COEF(i, 4),
+ CVAL(0, ct->bcb));
-#undef CVAL
+ REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(i), ct->full_range,
+ 11, 11);
+ }
- REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(OMAP_DSS_VIDEO1),
- ct->full_range, 11, 11);
- REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(OMAP_DSS_VIDEO2),
- ct->full_range, 11, 11);
+#undef CVAL
}
-static void _dispc_set_plane_ba0(enum omap_plane plane, u32 paddr)
+static void dispc_ovl_set_ba0(enum omap_plane plane, u32 paddr)
{
dispc_write_reg(DISPC_OVL_BA0(plane), paddr);
}
-static void _dispc_set_plane_ba1(enum omap_plane plane, u32 paddr)
+static void dispc_ovl_set_ba1(enum omap_plane plane, u32 paddr)
{
dispc_write_reg(DISPC_OVL_BA1(plane), paddr);
}
-static void _dispc_set_plane_ba0_uv(enum omap_plane plane, u32 paddr)
+static void dispc_ovl_set_ba0_uv(enum omap_plane plane, u32 paddr)
{
dispc_write_reg(DISPC_OVL_BA0_UV(plane), paddr);
}
-static void _dispc_set_plane_ba1_uv(enum omap_plane plane, u32 paddr)
+static void dispc_ovl_set_ba1_uv(enum omap_plane plane, u32 paddr)
{
dispc_write_reg(DISPC_OVL_BA1_UV(plane), paddr);
}
-static void _dispc_set_plane_pos(enum omap_plane plane, int x, int y)
+static void dispc_ovl_set_pos(enum omap_plane plane, int x, int y)
{
u32 val = FLD_VAL(y, 26, 16) | FLD_VAL(x, 10, 0);
dispc_write_reg(DISPC_OVL_POSITION(plane), val);
}
-static void _dispc_set_pic_size(enum omap_plane plane, int width, int height)
+static void dispc_ovl_set_pic_size(enum omap_plane plane, int width, int height)
{
u32 val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0);
@@ -874,7 +729,7 @@ static void _dispc_set_pic_size(enum omap_plane plane, int width, int height)
dispc_write_reg(DISPC_OVL_PICTURE_SIZE(plane), val);
}
-static void _dispc_set_vid_size(enum omap_plane plane, int width, int height)
+static void dispc_ovl_set_vid_size(enum omap_plane plane, int width, int height)
{
u32 val;
@@ -885,44 +740,61 @@ static void _dispc_set_vid_size(enum omap_plane plane, int width, int height)
dispc_write_reg(DISPC_OVL_SIZE(plane), val);
}
-static void _dispc_set_pre_mult_alpha(enum omap_plane plane, bool enable)
+static void dispc_ovl_set_zorder(enum omap_plane plane, u8 zorder)
{
- if (!dss_has_feature(FEAT_PRE_MULT_ALPHA))
+ struct omap_overlay *ovl = omap_dss_get_overlay(plane);
+
+ if ((ovl->caps & OMAP_DSS_OVL_CAP_ZORDER) == 0)
return;
- if (!dss_has_feature(FEAT_GLOBAL_ALPHA_VID1) &&
- plane == OMAP_DSS_VIDEO1)
+ REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), zorder, 27, 26);
+}
+
+static void dispc_ovl_enable_zorder_planes(void)
+{
+ int i;
+
+ if (!dss_has_feature(FEAT_ALPHA_FREE_ZORDER))
return;
- REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), enable ? 1 : 0, 28, 28);
+ for (i = 0; i < dss_feat_get_num_ovls(); i++)
+ REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(i), 1, 25, 25);
}
-static void _dispc_setup_global_alpha(enum omap_plane plane, u8 global_alpha)
+static void dispc_ovl_set_pre_mult_alpha(enum omap_plane plane, bool enable)
{
- if (!dss_has_feature(FEAT_GLOBAL_ALPHA))
+ struct omap_overlay *ovl = omap_dss_get_overlay(plane);
+
+ if ((ovl->caps & OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA) == 0)
return;
- if (!dss_has_feature(FEAT_GLOBAL_ALPHA_VID1) &&
- plane == OMAP_DSS_VIDEO1)
+ REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), enable ? 1 : 0, 28, 28);
+}
+
+static void dispc_ovl_setup_global_alpha(enum omap_plane plane, u8 global_alpha)
+{
+ static const unsigned shifts[] = { 0, 8, 16, 24, };
+ int shift;
+ struct omap_overlay *ovl = omap_dss_get_overlay(plane);
+
+ if ((ovl->caps & OMAP_DSS_OVL_CAP_GLOBAL_ALPHA) == 0)
return;
- if (plane == OMAP_DSS_GFX)
- REG_FLD_MOD(DISPC_GLOBAL_ALPHA, global_alpha, 7, 0);
- else if (plane == OMAP_DSS_VIDEO2)
- REG_FLD_MOD(DISPC_GLOBAL_ALPHA, global_alpha, 23, 16);
+ shift = shifts[plane];
+ REG_FLD_MOD(DISPC_GLOBAL_ALPHA, global_alpha, shift + 7, shift);
}
-static void _dispc_set_pix_inc(enum omap_plane plane, s32 inc)
+static void dispc_ovl_set_pix_inc(enum omap_plane plane, s32 inc)
{
dispc_write_reg(DISPC_OVL_PIXEL_INC(plane), inc);
}
-static void _dispc_set_row_inc(enum omap_plane plane, s32 inc)
+static void dispc_ovl_set_row_inc(enum omap_plane plane, s32 inc)
{
dispc_write_reg(DISPC_OVL_ROW_INC(plane), inc);
}
-static void _dispc_set_color_mode(enum omap_plane plane,
+static void dispc_ovl_set_color_mode(enum omap_plane plane,
enum omap_color_mode color_mode)
{
u32 m = 0;
@@ -1003,7 +875,7 @@ static void _dispc_set_color_mode(enum omap_plane plane,
REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), m, 4, 1);
}
-void dispc_set_channel_out(enum omap_plane plane,
+static void dispc_ovl_set_channel_out(enum omap_plane plane,
enum omap_channel channel)
{
int shift;
@@ -1016,6 +888,7 @@ void dispc_set_channel_out(enum omap_plane plane,
break;
case OMAP_DSS_VIDEO1:
case OMAP_DSS_VIDEO2:
+ case OMAP_DSS_VIDEO3:
shift = 16;
break;
default:
@@ -1050,24 +923,13 @@ void dispc_set_channel_out(enum omap_plane plane,
dispc_write_reg(DISPC_OVL_ATTRIBUTES(plane), val);
}
-static void dispc_set_burst_size(enum omap_plane plane,
+static void dispc_ovl_set_burst_size(enum omap_plane plane,
enum omap_burst_size burst_size)
{
+ static const unsigned shifts[] = { 6, 14, 14, 14, };
int shift;
- switch (plane) {
- case OMAP_DSS_GFX:
- shift = 6;
- break;
- case OMAP_DSS_VIDEO1:
- case OMAP_DSS_VIDEO2:
- shift = 14;
- break;
- default:
- BUG();
- return;
- }
-
+ shift = shifts[plane];
REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), burst_size, shift + 1, shift);
}
@@ -1078,10 +940,10 @@ static void dispc_configure_burst_sizes(void)
/* Configure burst size always to maximum size */
for (i = 0; i < omap_dss_get_num_overlays(); ++i)
- dispc_set_burst_size(i, burst_size);
+ dispc_ovl_set_burst_size(i, burst_size);
}
-u32 dispc_get_burst_size(enum omap_plane plane)
+u32 dispc_ovl_get_burst_size(enum omap_plane plane)
{
unsigned unit = dss_feat_get_burst_size_unit();
/* burst multiplier is always x8 (see dispc_configure_burst_sizes()) */
@@ -1102,7 +964,7 @@ void dispc_enable_gamma_table(bool enable)
REG_FLD_MOD(DISPC_CONFIG, enable, 9, 9);
}
-void dispc_enable_cpr(enum omap_channel channel, bool enable)
+void dispc_mgr_enable_cpr(enum omap_channel channel, bool enable)
{
u16 reg;
@@ -1116,12 +978,12 @@ void dispc_enable_cpr(enum omap_channel channel, bool enable)
REG_FLD_MOD(reg, enable, 15, 15);
}
-void dispc_set_cpr_coef(enum omap_channel channel,
+void dispc_mgr_set_cpr_coef(enum omap_channel channel,
struct omap_dss_cpr_coefs *coefs)
{
u32 coef_r, coef_g, coef_b;
- if (channel != OMAP_DSS_CHANNEL_LCD && channel != OMAP_DSS_CHANNEL_LCD2)
+ if (!dispc_mgr_is_lcd(channel))
return;
coef_r = FLD_VAL(coefs->rr, 31, 22) | FLD_VAL(coefs->rg, 20, 11) |
@@ -1136,7 +998,7 @@ void dispc_set_cpr_coef(enum omap_channel channel,
dispc_write_reg(DISPC_CPR_COEF_B(channel), coef_b);
}
-static void _dispc_set_vid_color_conv(enum omap_plane plane, bool enable)
+static void dispc_ovl_set_vid_color_conv(enum omap_plane plane, bool enable)
{
u32 val;
@@ -1147,19 +1009,16 @@ static void _dispc_set_vid_color_conv(enum omap_plane plane, bool enable)
dispc_write_reg(DISPC_OVL_ATTRIBUTES(plane), val);
}
-void dispc_enable_replication(enum omap_plane plane, bool enable)
+static void dispc_ovl_enable_replication(enum omap_plane plane, bool enable)
{
- int bit;
-
- if (plane == OMAP_DSS_GFX)
- bit = 5;
- else
- bit = 10;
+ static const unsigned shifts[] = { 5, 10, 10, 10 };
+ int shift;
- REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), enable, bit, bit);
+ shift = shifts[plane];
+ REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), enable, shift, shift);
}
-void dispc_set_lcd_size(enum omap_channel channel, u16 width, u16 height)
+void dispc_mgr_set_lcd_size(enum omap_channel channel, u16 width, u16 height)
{
u32 val;
BUG_ON((width > (1 << 11)) || (height > (1 << 11)));
@@ -1186,19 +1045,20 @@ static void dispc_read_plane_fifo_sizes(void)
dss_feat_get_reg_field(FEAT_REG_FIFOSIZE, &start, &end);
- for (plane = 0; plane < ARRAY_SIZE(dispc.fifo_size); ++plane) {
+ for (plane = 0; plane < dss_feat_get_num_ovls(); ++plane) {
size = REG_GET(DISPC_OVL_FIFO_SIZE_STATUS(plane), start, end);
size *= unit;
dispc.fifo_size[plane] = size;
}
}
-u32 dispc_get_plane_fifo_size(enum omap_plane plane)
+u32 dispc_ovl_get_fifo_size(enum omap_plane plane)
{
return dispc.fifo_size[plane];
}
-void dispc_set_fifo_threshold(enum omap_plane plane, u32 low, u32 high)
+static void dispc_ovl_set_fifo_threshold(enum omap_plane plane, u32 low,
+ u32 high)
{
u8 hi_start, hi_end, lo_start, lo_end;
u32 unit;
@@ -1233,7 +1093,7 @@ void dispc_enable_fifomerge(bool enable)
REG_FLD_MOD(DISPC_CONFIG, enable ? 1 : 0, 14, 14);
}
-static void _dispc_set_fir(enum omap_plane plane,
+static void dispc_ovl_set_fir(enum omap_plane plane,
int hinc, int vinc,
enum omap_color_component color_comp)
{
@@ -1256,7 +1116,7 @@ static void _dispc_set_fir(enum omap_plane plane,
}
}
-static void _dispc_set_vid_accu0(enum omap_plane plane, int haccu, int vaccu)
+static void dispc_ovl_set_vid_accu0(enum omap_plane plane, int haccu, int vaccu)
{
u32 val;
u8 hor_start, hor_end, vert_start, vert_end;
@@ -1270,7 +1130,7 @@ static void _dispc_set_vid_accu0(enum omap_plane plane, int haccu, int vaccu)
dispc_write_reg(DISPC_OVL_ACCU0(plane), val);
}
-static void _dispc_set_vid_accu1(enum omap_plane plane, int haccu, int vaccu)
+static void dispc_ovl_set_vid_accu1(enum omap_plane plane, int haccu, int vaccu)
{
u32 val;
u8 hor_start, hor_end, vert_start, vert_end;
@@ -1284,7 +1144,8 @@ static void _dispc_set_vid_accu1(enum omap_plane plane, int haccu, int vaccu)
dispc_write_reg(DISPC_OVL_ACCU1(plane), val);
}
-static void _dispc_set_vid_accu2_0(enum omap_plane plane, int haccu, int vaccu)
+static void dispc_ovl_set_vid_accu2_0(enum omap_plane plane, int haccu,
+ int vaccu)
{
u32 val;
@@ -1292,7 +1153,8 @@ static void _dispc_set_vid_accu2_0(enum omap_plane plane, int haccu, int vaccu)
dispc_write_reg(DISPC_OVL_ACCU2_0(plane), val);
}
-static void _dispc_set_vid_accu2_1(enum omap_plane plane, int haccu, int vaccu)
+static void dispc_ovl_set_vid_accu2_1(enum omap_plane plane, int haccu,
+ int vaccu)
{
u32 val;
@@ -1300,7 +1162,7 @@ static void _dispc_set_vid_accu2_1(enum omap_plane plane, int haccu, int vaccu)
dispc_write_reg(DISPC_OVL_ACCU2_1(plane), val);
}
-static void _dispc_set_scale_param(enum omap_plane plane,
+static void dispc_ovl_set_scale_param(enum omap_plane plane,
u16 orig_width, u16 orig_height,
u16 out_width, u16 out_height,
bool five_taps, u8 rotation,
@@ -1312,15 +1174,16 @@ static void _dispc_set_scale_param(enum omap_plane plane,
hscaleup = orig_width <= out_width;
vscaleup = orig_height <= out_height;
- _dispc_set_scale_coef(plane, hscaleup, vscaleup, five_taps, color_comp);
+ dispc_ovl_set_scale_coef(plane, hscaleup, vscaleup, five_taps,
+ color_comp);
fir_hinc = 1024 * orig_width / out_width;
fir_vinc = 1024 * orig_height / out_height;
- _dispc_set_fir(plane, fir_hinc, fir_vinc, color_comp);
+ dispc_ovl_set_fir(plane, fir_hinc, fir_vinc, color_comp);
}
-static void _dispc_set_scaling_common(enum omap_plane plane,
+static void dispc_ovl_set_scaling_common(enum omap_plane plane,
u16 orig_width, u16 orig_height,
u16 out_width, u16 out_height,
bool ilace, bool five_taps,
@@ -1331,7 +1194,7 @@ static void _dispc_set_scaling_common(enum omap_plane plane,
int accu1 = 0;
u32 l;
- _dispc_set_scale_param(plane, orig_width, orig_height,
+ dispc_ovl_set_scale_param(plane, orig_width, orig_height,
out_width, out_height, five_taps,
rotation, DISPC_COLOR_COMPONENT_RGB_Y);
l = dispc_read_reg(DISPC_OVL_ATTRIBUTES(plane));
@@ -1370,11 +1233,11 @@ static void _dispc_set_scaling_common(enum omap_plane plane,
}
}
- _dispc_set_vid_accu0(plane, 0, accu0);
- _dispc_set_vid_accu1(plane, 0, accu1);
+ dispc_ovl_set_vid_accu0(plane, 0, accu0);
+ dispc_ovl_set_vid_accu1(plane, 0, accu1);
}
-static void _dispc_set_scaling_uv(enum omap_plane plane,
+static void dispc_ovl_set_scaling_uv(enum omap_plane plane,
u16 orig_width, u16 orig_height,
u16 out_width, u16 out_height,
bool ilace, bool five_taps,
@@ -1422,7 +1285,7 @@ static void _dispc_set_scaling_uv(enum omap_plane plane,
if (out_height != orig_height)
scale_y = true;
- _dispc_set_scale_param(plane, orig_width, orig_height,
+ dispc_ovl_set_scale_param(plane, orig_width, orig_height,
out_width, out_height, five_taps,
rotation, DISPC_COLOR_COMPONENT_UV);
@@ -1433,11 +1296,11 @@ static void _dispc_set_scaling_uv(enum omap_plane plane,
/* set V scaling */
REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), scale_y ? 1 : 0, 6, 6);
- _dispc_set_vid_accu2_0(plane, 0x80, 0);
- _dispc_set_vid_accu2_1(plane, 0x80, 0);
+ dispc_ovl_set_vid_accu2_0(plane, 0x80, 0);
+ dispc_ovl_set_vid_accu2_1(plane, 0x80, 0);
}
-static void _dispc_set_scaling(enum omap_plane plane,
+static void dispc_ovl_set_scaling(enum omap_plane plane,
u16 orig_width, u16 orig_height,
u16 out_width, u16 out_height,
bool ilace, bool five_taps,
@@ -1446,14 +1309,14 @@ static void _dispc_set_scaling(enum omap_plane plane,
{
BUG_ON(plane == OMAP_DSS_GFX);
- _dispc_set_scaling_common(plane,
+ dispc_ovl_set_scaling_common(plane,
orig_width, orig_height,
out_width, out_height,
ilace, five_taps,
fieldmode, color_mode,
rotation);
- _dispc_set_scaling_uv(plane,
+ dispc_ovl_set_scaling_uv(plane,
orig_width, orig_height,
out_width, out_height,
ilace, five_taps,
@@ -1461,7 +1324,7 @@ static void _dispc_set_scaling(enum omap_plane plane,
rotation);
}
-static void _dispc_set_rotation_attrs(enum omap_plane plane, u8 rotation,
+static void dispc_ovl_set_rotation_attrs(enum omap_plane plane, u8 rotation,
bool mirroring, enum omap_color_mode color_mode)
{
bool row_repeat = false;
@@ -1789,12 +1652,11 @@ static unsigned long calc_fclk_five_taps(enum omap_channel channel, u16 width,
enum omap_color_mode color_mode)
{
u32 fclk = 0;
- /* FIXME venc pclk? */
- u64 tmp, pclk = dispc_pclk_rate(channel);
+ u64 tmp, pclk = dispc_mgr_pclk_rate(channel);
if (height > out_height) {
- /* FIXME get real display PPL */
- unsigned int ppl = 800;
+ struct omap_dss_device *dssdev = dispc_mgr_get_device(channel);
+ unsigned int ppl = dssdev->panel.timings.x_res;
tmp = pclk * height * out_width;
do_div(tmp, 2 * out_height * ppl);
@@ -1846,114 +1708,120 @@ static unsigned long calc_fclk(enum omap_channel channel, u16 width,
else
vf = 1;
- /* FIXME venc pclk? */
- return dispc_pclk_rate(channel) * vf * hf;
+ return dispc_mgr_pclk_rate(channel) * vf * hf;
}
-int dispc_setup_plane(enum omap_plane plane,
- u32 paddr, u16 screen_width,
- u16 pos_x, u16 pos_y,
- u16 width, u16 height,
+static int dispc_ovl_calc_scaling(enum omap_plane plane,
+ enum omap_channel channel, u16 width, u16 height,
u16 out_width, u16 out_height,
- enum omap_color_mode color_mode,
- bool ilace,
- enum omap_dss_rotation_type rotation_type,
- u8 rotation, bool mirror,
- u8 global_alpha, u8 pre_mult_alpha,
- enum omap_channel channel, u32 puv_addr)
-{
- const int maxdownscale = cpu_is_omap34xx() ? 4 : 2;
- bool five_taps = 0;
- bool fieldmode = 0;
- int cconv = 0;
- unsigned offset0, offset1;
- s32 row_inc;
- s32 pix_inc;
- u16 frame_height = height;
- unsigned int field_offset = 0;
+ enum omap_color_mode color_mode, bool *five_taps)
+{
+ struct omap_overlay *ovl = omap_dss_get_overlay(plane);
+ const int maxdownscale = dss_feat_get_param_max(FEAT_PARAM_DOWNSCALE);
+ unsigned long fclk = 0;
- DSSDBG("dispc_setup_plane %d, pa %x, sw %d, %d,%d, %dx%d -> "
- "%dx%d, ilace %d, cmode %x, rot %d, mir %d chan %d\n",
- plane, paddr, screen_width, pos_x, pos_y,
- width, height,
- out_width, out_height,
- ilace, color_mode,
- rotation, mirror, channel);
+ if ((ovl->caps & OMAP_DSS_OVL_CAP_SCALE) == 0) {
+ if (width != out_width || height != out_height)
+ return -EINVAL;
+ else
+ return 0;
+ }
- if (paddr == 0)
+ if (out_width < width / maxdownscale ||
+ out_width > width * 8)
return -EINVAL;
- if (ilace && height == out_height)
- fieldmode = 1;
+ if (out_height < height / maxdownscale ||
+ out_height > height * 8)
+ return -EINVAL;
- if (ilace) {
- if (fieldmode)
- height /= 2;
- pos_y /= 2;
- out_height /= 2;
+ /* Must use 5-tap filter? */
+ *five_taps = height > out_height * 2;
- DSSDBG("adjusting for ilace: height %d, pos_y %d, "
- "out_height %d\n",
- height, pos_y, out_height);
+ if (!*five_taps) {
+ fclk = calc_fclk(channel, width, height, out_width,
+ out_height);
+
+ /* Try 5-tap filter if 3-tap fclk is too high */
+ if (cpu_is_omap34xx() && height > out_height &&
+ fclk > dispc_fclk_rate())
+ *five_taps = true;
}
- if (!dss_feat_color_mode_supported(plane, color_mode))
+ if (width > (2048 >> *five_taps)) {
+ DSSERR("failed to set up scaling, fclk too low\n");
return -EINVAL;
+ }
- if (plane == OMAP_DSS_GFX) {
- if (width != out_width || height != out_height)
- return -EINVAL;
- } else {
- /* video plane */
+ if (*five_taps)
+ fclk = calc_fclk_five_taps(channel, width, height,
+ out_width, out_height, color_mode);
- unsigned long fclk = 0;
+ DSSDBG("required fclk rate = %lu Hz\n", fclk);
+ DSSDBG("current fclk rate = %lu Hz\n", dispc_fclk_rate());
- if (out_width < width / maxdownscale ||
- out_width > width * 8)
- return -EINVAL;
+ if (!fclk || fclk > dispc_fclk_rate()) {
+ DSSERR("failed to set up scaling, "
+ "required fclk rate = %lu Hz, "
+ "current fclk rate = %lu Hz\n",
+ fclk, dispc_fclk_rate());
+ return -EINVAL;
+ }
- if (out_height < height / maxdownscale ||
- out_height > height * 8)
- return -EINVAL;
+ return 0;
+}
- if (color_mode == OMAP_DSS_COLOR_YUV2 ||
- color_mode == OMAP_DSS_COLOR_UYVY ||
- color_mode == OMAP_DSS_COLOR_NV12)
- cconv = 1;
+int dispc_ovl_setup(enum omap_plane plane, struct omap_overlay_info *oi,
+ bool ilace, enum omap_channel channel, bool replication,
+ u32 fifo_low, u32 fifo_high)
+{
+ struct omap_overlay *ovl = omap_dss_get_overlay(plane);
+ bool five_taps = false;
+ bool fieldmode = 0;
+ int r, cconv = 0;
+ unsigned offset0, offset1;
+ s32 row_inc;
+ s32 pix_inc;
+ u16 frame_height = oi->height;
+ unsigned int field_offset = 0;
- /* Must use 5-tap filter? */
- five_taps = height > out_height * 2;
+ DSSDBG("dispc_ovl_setup %d, pa %x, pa_uv %x, sw %d, %d,%d, %dx%d -> "
+ "%dx%d, cmode %x, rot %d, mir %d, ilace %d chan %d repl %d "
+ "fifo_low %d fifo high %d\n", plane, oi->paddr, oi->p_uv_addr,
+ oi->screen_width, oi->pos_x, oi->pos_y, oi->width, oi->height,
+ oi->out_width, oi->out_height, oi->color_mode, oi->rotation,
+ oi->mirror, ilace, channel, replication, fifo_low, fifo_high);
- if (!five_taps) {
- fclk = calc_fclk(channel, width, height, out_width,
- out_height);
+ if (oi->paddr == 0)
+ return -EINVAL;
- /* Try 5-tap filter if 3-tap fclk is too high */
- if (cpu_is_omap34xx() && height > out_height &&
- fclk > dispc_fclk_rate())
- five_taps = true;
- }
+ if (ilace && oi->height == oi->out_height)
+ fieldmode = 1;
- if (width > (2048 >> five_taps)) {
- DSSERR("failed to set up scaling, fclk too low\n");
- return -EINVAL;
- }
+ if (ilace) {
+ if (fieldmode)
+ oi->height /= 2;
+ oi->pos_y /= 2;
+ oi->out_height /= 2;
- if (five_taps)
- fclk = calc_fclk_five_taps(channel, width, height,
- out_width, out_height, color_mode);
+ DSSDBG("adjusting for ilace: height %d, pos_y %d, "
+ "out_height %d\n",
+ oi->height, oi->pos_y, oi->out_height);
+ }
- DSSDBG("required fclk rate = %lu Hz\n", fclk);
- DSSDBG("current fclk rate = %lu Hz\n", dispc_fclk_rate());
+ if (!dss_feat_color_mode_supported(plane, oi->color_mode))
+ return -EINVAL;
- if (!fclk || fclk > dispc_fclk_rate()) {
- DSSERR("failed to set up scaling, "
- "required fclk rate = %lu Hz, "
- "current fclk rate = %lu Hz\n",
- fclk, dispc_fclk_rate());
- return -EINVAL;
- }
- }
+ r = dispc_ovl_calc_scaling(plane, channel, oi->width, oi->height,
+ oi->out_width, oi->out_height, oi->color_mode,
+ &five_taps);
+ if (r)
+ return r;
+
+ if (oi->color_mode == OMAP_DSS_COLOR_YUV2 ||
+ oi->color_mode == OMAP_DSS_COLOR_UYVY ||
+ oi->color_mode == OMAP_DSS_COLOR_NV12)
+ cconv = 1;
if (ilace && !fieldmode) {
/*
@@ -1963,69 +1831,76 @@ int dispc_setup_plane(enum omap_plane plane,
* so the integer part must be added to the base address of the
* bottom field.
*/
- if (!height || height == out_height)
+ if (!oi->height || oi->height == oi->out_height)
field_offset = 0;
else
- field_offset = height / out_height / 2;
+ field_offset = oi->height / oi->out_height / 2;
}
/* Fields are independent but interleaved in memory. */
if (fieldmode)
field_offset = 1;
- if (rotation_type == OMAP_DSS_ROT_DMA)
- calc_dma_rotation_offset(rotation, mirror,
- screen_width, width, frame_height, color_mode,
- fieldmode, field_offset,
+ if (oi->rotation_type == OMAP_DSS_ROT_DMA)
+ calc_dma_rotation_offset(oi->rotation, oi->mirror,
+ oi->screen_width, oi->width, frame_height,
+ oi->color_mode, fieldmode, field_offset,
&offset0, &offset1, &row_inc, &pix_inc);
else
- calc_vrfb_rotation_offset(rotation, mirror,
- screen_width, width, frame_height, color_mode,
- fieldmode, field_offset,
+ calc_vrfb_rotation_offset(oi->rotation, oi->mirror,
+ oi->screen_width, oi->width, frame_height,
+ oi->color_mode, fieldmode, field_offset,
&offset0, &offset1, &row_inc, &pix_inc);
DSSDBG("offset0 %u, offset1 %u, row_inc %d, pix_inc %d\n",
offset0, offset1, row_inc, pix_inc);
- _dispc_set_color_mode(plane, color_mode);
+ dispc_ovl_set_color_mode(plane, oi->color_mode);
- _dispc_set_plane_ba0(plane, paddr + offset0);
- _dispc_set_plane_ba1(plane, paddr + offset1);
+ dispc_ovl_set_ba0(plane, oi->paddr + offset0);
+ dispc_ovl_set_ba1(plane, oi->paddr + offset1);
- if (OMAP_DSS_COLOR_NV12 == color_mode) {
- _dispc_set_plane_ba0_uv(plane, puv_addr + offset0);
- _dispc_set_plane_ba1_uv(plane, puv_addr + offset1);
+ if (OMAP_DSS_COLOR_NV12 == oi->color_mode) {
+ dispc_ovl_set_ba0_uv(plane, oi->p_uv_addr + offset0);
+ dispc_ovl_set_ba1_uv(plane, oi->p_uv_addr + offset1);
}
- _dispc_set_row_inc(plane, row_inc);
- _dispc_set_pix_inc(plane, pix_inc);
+ dispc_ovl_set_row_inc(plane, row_inc);
+ dispc_ovl_set_pix_inc(plane, pix_inc);
- DSSDBG("%d,%d %dx%d -> %dx%d\n", pos_x, pos_y, width, height,
- out_width, out_height);
+ DSSDBG("%d,%d %dx%d -> %dx%d\n", oi->pos_x, oi->pos_y, oi->width,
+ oi->height, oi->out_width, oi->out_height);
- _dispc_set_plane_pos(plane, pos_x, pos_y);
+ dispc_ovl_set_pos(plane, oi->pos_x, oi->pos_y);
- _dispc_set_pic_size(plane, width, height);
+ dispc_ovl_set_pic_size(plane, oi->width, oi->height);
- if (plane != OMAP_DSS_GFX) {
- _dispc_set_scaling(plane, width, height,
- out_width, out_height,
+ if (ovl->caps & OMAP_DSS_OVL_CAP_SCALE) {
+ dispc_ovl_set_scaling(plane, oi->width, oi->height,
+ oi->out_width, oi->out_height,
ilace, five_taps, fieldmode,
- color_mode, rotation);
- _dispc_set_vid_size(plane, out_width, out_height);
- _dispc_set_vid_color_conv(plane, cconv);
+ oi->color_mode, oi->rotation);
+ dispc_ovl_set_vid_size(plane, oi->out_width, oi->out_height);
+ dispc_ovl_set_vid_color_conv(plane, cconv);
}
- _dispc_set_rotation_attrs(plane, rotation, mirror, color_mode);
+ dispc_ovl_set_rotation_attrs(plane, oi->rotation, oi->mirror,
+ oi->color_mode);
+
+ dispc_ovl_set_zorder(plane, oi->zorder);
+ dispc_ovl_set_pre_mult_alpha(plane, oi->pre_mult_alpha);
+ dispc_ovl_setup_global_alpha(plane, oi->global_alpha);
- _dispc_set_pre_mult_alpha(plane, pre_mult_alpha);
- _dispc_setup_global_alpha(plane, global_alpha);
+ dispc_ovl_set_channel_out(plane, channel);
+
+ dispc_ovl_enable_replication(plane, replication);
+ dispc_ovl_set_fifo_threshold(plane, fifo_low, fifo_high);
return 0;
}
-int dispc_enable_plane(enum omap_plane plane, bool enable)
+int dispc_ovl_enable(enum omap_plane plane, bool enable)
{
DSSDBG("dispc_enable_plane %d, %d\n", plane, enable);
@@ -2048,7 +1923,7 @@ static void _enable_lcd_out(enum omap_channel channel, bool enable)
REG_FLD_MOD(DISPC_CONTROL, enable ? 1 : 0, 0, 0);
}
-static void dispc_enable_lcd_out(enum omap_channel channel, bool enable)
+static void dispc_mgr_enable_lcd_out(enum omap_channel channel, bool enable)
{
struct completion frame_done_completion;
bool is_on;
@@ -2095,14 +1970,19 @@ static void _enable_digit_out(bool enable)
REG_FLD_MOD(DISPC_CONTROL, enable ? 1 : 0, 1, 1);
}
-static void dispc_enable_digit_out(bool enable)
+static void dispc_mgr_enable_digit_out(bool enable)
{
struct completion frame_done_completion;
- int r;
+ enum dss_hdmi_venc_clk_source_select src;
+ int r, i;
+ u32 irq_mask;
+ int num_irqs;
if (REG_GET(DISPC_CONTROL, 1, 1) == enable)
return;
+ src = dss_get_hdmi_venc_clk_source();
+
if (enable) {
unsigned long flags;
/* When we enable digit output, we'll get an extra digit
@@ -2119,43 +1999,47 @@ static void dispc_enable_digit_out(bool enable)
* wait for the extra sync losts */
init_completion(&frame_done_completion);
+ if (src == DSS_HDMI_M_PCLK && enable == false) {
+ irq_mask = DISPC_IRQ_FRAMEDONETV;
+ num_irqs = 1;
+ } else {
+ irq_mask = DISPC_IRQ_EVSYNC_EVEN | DISPC_IRQ_EVSYNC_ODD;
+ /* XXX I understand from TRM that we should only wait for the
+ * current field to complete. But it seems we have to wait for
+ * both fields */
+ num_irqs = 2;
+ }
+
r = omap_dispc_register_isr(dispc_disable_isr, &frame_done_completion,
- DISPC_IRQ_EVSYNC_EVEN | DISPC_IRQ_EVSYNC_ODD);
+ irq_mask);
if (r)
- DSSERR("failed to register EVSYNC isr\n");
+ DSSERR("failed to register %x isr\n", irq_mask);
_enable_digit_out(enable);
- /* XXX I understand from TRM that we should only wait for the
- * current field to complete. But it seems we have to wait
- * for both fields */
- if (!wait_for_completion_timeout(&frame_done_completion,
- msecs_to_jiffies(100)))
- DSSERR("timeout waiting for EVSYNC\n");
-
- if (!wait_for_completion_timeout(&frame_done_completion,
- msecs_to_jiffies(100)))
- DSSERR("timeout waiting for EVSYNC\n");
+ for (i = 0; i < num_irqs; ++i) {
+ if (!wait_for_completion_timeout(&frame_done_completion,
+ msecs_to_jiffies(100)))
+ DSSERR("timeout waiting for digit out to %s\n",
+ enable ? "start" : "stop");
+ }
- r = omap_dispc_unregister_isr(dispc_disable_isr,
- &frame_done_completion,
- DISPC_IRQ_EVSYNC_EVEN | DISPC_IRQ_EVSYNC_ODD);
+ r = omap_dispc_unregister_isr(dispc_disable_isr, &frame_done_completion,
+ irq_mask);
if (r)
- DSSERR("failed to unregister EVSYNC isr\n");
+ DSSERR("failed to unregister %x isr\n", irq_mask);
if (enable) {
unsigned long flags;
spin_lock_irqsave(&dispc.irq_lock, flags);
- dispc.irq_error_mask = DISPC_IRQ_MASK_ERROR;
- if (dss_has_feature(FEAT_MGR_LCD2))
- dispc.irq_error_mask |= DISPC_IRQ_SYNC_LOST2;
+ dispc.irq_error_mask |= DISPC_IRQ_SYNC_LOST_DIGIT;
dispc_write_reg(DISPC_IRQSTATUS, DISPC_IRQ_SYNC_LOST_DIGIT);
_omap_dispc_set_irqs();
spin_unlock_irqrestore(&dispc.irq_lock, flags);
}
}
-bool dispc_is_channel_enabled(enum omap_channel channel)
+bool dispc_mgr_is_enabled(enum omap_channel channel)
{
if (channel == OMAP_DSS_CHANNEL_LCD)
return !!REG_GET(DISPC_CONTROL, 0, 0);
@@ -2167,13 +2051,12 @@ bool dispc_is_channel_enabled(enum omap_channel channel)
BUG();
}
-void dispc_enable_channel(enum omap_channel channel, bool enable)
+void dispc_mgr_enable(enum omap_channel channel, bool enable)
{
- if (channel == OMAP_DSS_CHANNEL_LCD ||
- channel == OMAP_DSS_CHANNEL_LCD2)
- dispc_enable_lcd_out(channel, enable);
+ if (dispc_mgr_is_lcd(channel))
+ dispc_mgr_enable_lcd_out(channel, enable);
else if (channel == OMAP_DSS_CHANNEL_DIGIT)
- dispc_enable_digit_out(enable);
+ dispc_mgr_enable_digit_out(enable);
else
BUG();
}
@@ -2202,7 +2085,7 @@ void dispc_pck_free_enable(bool enable)
REG_FLD_MOD(DISPC_CONTROL, enable ? 1 : 0, 27, 27);
}
-void dispc_enable_fifohandcheck(enum omap_channel channel, bool enable)
+void dispc_mgr_enable_fifohandcheck(enum omap_channel channel, bool enable)
{
if (channel == OMAP_DSS_CHANNEL_LCD2)
REG_FLD_MOD(DISPC_CONFIG2, enable ? 1 : 0, 16, 16);
@@ -2211,7 +2094,7 @@ void dispc_enable_fifohandcheck(enum omap_channel channel, bool enable)
}
-void dispc_set_lcd_display_type(enum omap_channel channel,
+void dispc_mgr_set_lcd_display_type(enum omap_channel channel,
enum omap_lcd_display_type type)
{
int mode;
@@ -2242,12 +2125,12 @@ void dispc_set_loadmode(enum omap_dss_load_mode mode)
}
-void dispc_set_default_color(enum omap_channel channel, u32 color)
+void dispc_mgr_set_default_color(enum omap_channel channel, u32 color)
{
dispc_write_reg(DISPC_DEFAULT_COLOR(channel), color);
}
-u32 dispc_get_default_color(enum omap_channel channel)
+u32 dispc_mgr_get_default_color(enum omap_channel channel)
{
u32 l;
@@ -2260,7 +2143,7 @@ u32 dispc_get_default_color(enum omap_channel channel)
return l;
}
-void dispc_set_trans_key(enum omap_channel ch,
+void dispc_mgr_set_trans_key(enum omap_channel ch,
enum omap_dss_trans_key_type type,
u32 trans_key)
{
@@ -2274,7 +2157,7 @@ void dispc_set_trans_key(enum omap_channel ch,
dispc_write_reg(DISPC_TRANS_COLOR(ch), trans_key);
}
-void dispc_get_trans_key(enum omap_channel ch,
+void dispc_mgr_get_trans_key(enum omap_channel ch,
enum omap_dss_trans_key_type *type,
u32 *trans_key)
{
@@ -2293,7 +2176,7 @@ void dispc_get_trans_key(enum omap_channel ch,
*trans_key = dispc_read_reg(DISPC_TRANS_COLOR(ch));
}
-void dispc_enable_trans_key(enum omap_channel ch, bool enable)
+void dispc_mgr_enable_trans_key(enum omap_channel ch, bool enable)
{
if (ch == OMAP_DSS_CHANNEL_LCD)
REG_FLD_MOD(DISPC_CONFIG, enable, 10, 10);
@@ -2302,39 +2185,36 @@ void dispc_enable_trans_key(enum omap_channel ch, bool enable)
else /* OMAP_DSS_CHANNEL_LCD2 */
REG_FLD_MOD(DISPC_CONFIG2, enable, 10, 10);
}
-void dispc_enable_alpha_blending(enum omap_channel ch, bool enable)
+
+void dispc_mgr_enable_alpha_fixed_zorder(enum omap_channel ch, bool enable)
{
- if (!dss_has_feature(FEAT_GLOBAL_ALPHA))
+ if (!dss_has_feature(FEAT_ALPHA_FIXED_ZORDER))
return;
if (ch == OMAP_DSS_CHANNEL_LCD)
REG_FLD_MOD(DISPC_CONFIG, enable, 18, 18);
else if (ch == OMAP_DSS_CHANNEL_DIGIT)
REG_FLD_MOD(DISPC_CONFIG, enable, 19, 19);
- else /* OMAP_DSS_CHANNEL_LCD2 */
- REG_FLD_MOD(DISPC_CONFIG2, enable, 18, 18);
}
-bool dispc_alpha_blending_enabled(enum omap_channel ch)
+
+bool dispc_mgr_alpha_fixed_zorder_enabled(enum omap_channel ch)
{
bool enabled;
- if (!dss_has_feature(FEAT_GLOBAL_ALPHA))
+ if (!dss_has_feature(FEAT_ALPHA_FIXED_ZORDER))
return false;
if (ch == OMAP_DSS_CHANNEL_LCD)
enabled = REG_GET(DISPC_CONFIG, 18, 18);
else if (ch == OMAP_DSS_CHANNEL_DIGIT)
enabled = REG_GET(DISPC_CONFIG, 19, 19);
- else if (ch == OMAP_DSS_CHANNEL_LCD2)
- enabled = REG_GET(DISPC_CONFIG2, 18, 18);
else
BUG();
return enabled;
}
-
-bool dispc_trans_key_enabled(enum omap_channel ch)
+bool dispc_mgr_trans_key_enabled(enum omap_channel ch)
{
bool enabled;
@@ -2351,7 +2231,7 @@ bool dispc_trans_key_enabled(enum omap_channel ch)
}
-void dispc_set_tft_data_lines(enum omap_channel channel, u8 data_lines)
+void dispc_mgr_set_tft_data_lines(enum omap_channel channel, u8 data_lines)
{
int code;
@@ -2379,46 +2259,41 @@ void dispc_set_tft_data_lines(enum omap_channel channel, u8 data_lines)
REG_FLD_MOD(DISPC_CONTROL, code, 9, 8);
}
-void dispc_set_parallel_interface_mode(enum omap_channel channel,
- enum omap_parallel_interface_mode mode)
+void dispc_mgr_set_io_pad_mode(enum dss_io_pad_mode mode)
{
u32 l;
- int stallmode;
- int gpout0 = 1;
- int gpout1;
+ int gpout0, gpout1;
switch (mode) {
- case OMAP_DSS_PARALLELMODE_BYPASS:
- stallmode = 0;
- gpout1 = 1;
+ case DSS_IO_PAD_MODE_RESET:
+ gpout0 = 0;
+ gpout1 = 0;
break;
-
- case OMAP_DSS_PARALLELMODE_RFBI:
- stallmode = 1;
+ case DSS_IO_PAD_MODE_RFBI:
+ gpout0 = 1;
gpout1 = 0;
break;
-
- case OMAP_DSS_PARALLELMODE_DSI:
- stallmode = 1;
+ case DSS_IO_PAD_MODE_BYPASS:
+ gpout0 = 1;
gpout1 = 1;
break;
-
default:
BUG();
return;
}
- if (channel == OMAP_DSS_CHANNEL_LCD2) {
- l = dispc_read_reg(DISPC_CONTROL2);
- l = FLD_MOD(l, stallmode, 11, 11);
- dispc_write_reg(DISPC_CONTROL2, l);
- } else {
- l = dispc_read_reg(DISPC_CONTROL);
- l = FLD_MOD(l, stallmode, 11, 11);
- l = FLD_MOD(l, gpout0, 15, 15);
- l = FLD_MOD(l, gpout1, 16, 16);
- dispc_write_reg(DISPC_CONTROL, l);
- }
+ l = dispc_read_reg(DISPC_CONTROL);
+ l = FLD_MOD(l, gpout0, 15, 15);
+ l = FLD_MOD(l, gpout1, 16, 16);
+ dispc_write_reg(DISPC_CONTROL, l);
+}
+
+void dispc_mgr_enable_stallmode(enum omap_channel channel, bool enable)
+{
+ if (channel == OMAP_DSS_CHANNEL_LCD2)
+ REG_FLD_MOD(DISPC_CONTROL2, enable, 11, 11);
+ else
+ REG_FLD_MOD(DISPC_CONTROL, enable, 11, 11);
}
static bool _dispc_lcd_timings_ok(int hsw, int hfp, int hbp,
@@ -2452,7 +2327,7 @@ bool dispc_lcd_timings_ok(struct omap_video_timings *timings)
timings->vfp, timings->vbp);
}
-static void _dispc_set_lcd_timings(enum omap_channel channel, int hsw,
+static void _dispc_mgr_set_lcd_timings(enum omap_channel channel, int hsw,
int hfp, int hbp, int vsw, int vfp, int vbp)
{
u32 timing_h, timing_v;
@@ -2476,7 +2351,7 @@ static void _dispc_set_lcd_timings(enum omap_channel channel, int hsw,
}
/* change name to mode? */
-void dispc_set_lcd_timings(enum omap_channel channel,
+void dispc_mgr_set_lcd_timings(enum omap_channel channel,
struct omap_video_timings *timings)
{
unsigned xtot, ytot;
@@ -2487,11 +2362,11 @@ void dispc_set_lcd_timings(enum omap_channel channel,
timings->vfp, timings->vbp))
BUG();
- _dispc_set_lcd_timings(channel, timings->hsw, timings->hfp,
+ _dispc_mgr_set_lcd_timings(channel, timings->hsw, timings->hfp,
timings->hbp, timings->vsw, timings->vfp,
timings->vbp);
- dispc_set_lcd_size(channel, timings->x_res, timings->y_res);
+ dispc_mgr_set_lcd_size(channel, timings->x_res, timings->y_res);
xtot = timings->x_res + timings->hfp + timings->hsw + timings->hbp;
ytot = timings->y_res + timings->vfp + timings->vsw + timings->vbp;
@@ -2509,17 +2384,17 @@ void dispc_set_lcd_timings(enum omap_channel channel,
DSSDBG("hsync %luHz, vsync %luHz\n", ht, vt);
}
-static void dispc_set_lcd_divisor(enum omap_channel channel, u16 lck_div,
+static void dispc_mgr_set_lcd_divisor(enum omap_channel channel, u16 lck_div,
u16 pck_div)
{
BUG_ON(lck_div < 1);
- BUG_ON(pck_div < 2);
+ BUG_ON(pck_div < 1);
dispc_write_reg(DISPC_DIVISORo(channel),
FLD_VAL(lck_div, 23, 16) | FLD_VAL(pck_div, 7, 0));
}
-static void dispc_get_lcd_divisor(enum omap_channel channel, int *lck_div,
+static void dispc_mgr_get_lcd_divisor(enum omap_channel channel, int *lck_div,
int *pck_div)
{
u32 l;
@@ -2552,7 +2427,7 @@ unsigned long dispc_fclk_rate(void)
return r;
}
-unsigned long dispc_lclk_rate(enum omap_channel channel)
+unsigned long dispc_mgr_lclk_rate(enum omap_channel channel)
{
struct platform_device *dsidev;
int lcd;
@@ -2582,19 +2457,34 @@ unsigned long dispc_lclk_rate(enum omap_channel channel)
return r / lcd;
}
-unsigned long dispc_pclk_rate(enum omap_channel channel)
+unsigned long dispc_mgr_pclk_rate(enum omap_channel channel)
{
- int pcd;
unsigned long r;
- u32 l;
- l = dispc_read_reg(DISPC_DIVISORo(channel));
+ if (dispc_mgr_is_lcd(channel)) {
+ int pcd;
+ u32 l;
- pcd = FLD_GET(l, 7, 0);
+ l = dispc_read_reg(DISPC_DIVISORo(channel));
- r = dispc_lclk_rate(channel);
+ pcd = FLD_GET(l, 7, 0);
- return r / pcd;
+ r = dispc_mgr_lclk_rate(channel);
+
+ return r / pcd;
+ } else {
+ struct omap_dss_device *dssdev =
+ dispc_mgr_get_device(channel);
+
+ switch (dssdev->type) {
+ case OMAP_DISPLAY_TYPE_VENC:
+ return venc_get_pixel_clock();
+ case OMAP_DISPLAY_TYPE_HDMI:
+ return hdmi_get_pixel_clock();
+ default:
+ BUG();
+ }
+ }
}
void dispc_dump_clocks(struct seq_file *s)
@@ -2631,12 +2521,12 @@ void dispc_dump_clocks(struct seq_file *s)
dss_get_generic_clk_source_name(lcd_clk_src),
dss_feat_get_clk_source_name(lcd_clk_src));
- dispc_get_lcd_divisor(OMAP_DSS_CHANNEL_LCD, &lcd, &pcd);
+ dispc_mgr_get_lcd_divisor(OMAP_DSS_CHANNEL_LCD, &lcd, &pcd);
seq_printf(s, "lck\t\t%-16lulck div\t%u\n",
- dispc_lclk_rate(OMAP_DSS_CHANNEL_LCD), lcd);
+ dispc_mgr_lclk_rate(OMAP_DSS_CHANNEL_LCD), lcd);
seq_printf(s, "pck\t\t%-16lupck div\t%u\n",
- dispc_pclk_rate(OMAP_DSS_CHANNEL_LCD), pcd);
+ dispc_mgr_pclk_rate(OMAP_DSS_CHANNEL_LCD), pcd);
if (dss_has_feature(FEAT_MGR_LCD2)) {
seq_printf(s, "- LCD2 -\n");
@@ -2646,12 +2536,12 @@ void dispc_dump_clocks(struct seq_file *s)
dss_get_generic_clk_source_name(lcd_clk_src),
dss_feat_get_clk_source_name(lcd_clk_src));
- dispc_get_lcd_divisor(OMAP_DSS_CHANNEL_LCD2, &lcd, &pcd);
+ dispc_mgr_get_lcd_divisor(OMAP_DSS_CHANNEL_LCD2, &lcd, &pcd);
seq_printf(s, "lck\t\t%-16lulck div\t%u\n",
- dispc_lclk_rate(OMAP_DSS_CHANNEL_LCD2), lcd);
+ dispc_mgr_lclk_rate(OMAP_DSS_CHANNEL_LCD2), lcd);
seq_printf(s, "pck\t\t%-16lupck div\t%u\n",
- dispc_pclk_rate(OMAP_DSS_CHANNEL_LCD2), pcd);
+ dispc_mgr_pclk_rate(OMAP_DSS_CHANNEL_LCD2), pcd);
}
dispc_runtime_put();
@@ -2692,6 +2582,10 @@ void dispc_dump_irqs(struct seq_file *s)
PIS(VID1_END_WIN);
PIS(VID2_FIFO_UNDERFLOW);
PIS(VID2_END_WIN);
+ if (dss_feat_get_num_ovls() > 3) {
+ PIS(VID3_FIFO_UNDERFLOW);
+ PIS(VID3_END_WIN);
+ }
PIS(SYNC_LOST);
PIS(SYNC_LOST_DIGIT);
PIS(WAKEUP);
@@ -2707,11 +2601,26 @@ void dispc_dump_irqs(struct seq_file *s)
void dispc_dump_regs(struct seq_file *s)
{
+ int i, j;
+ const char *mgr_names[] = {
+ [OMAP_DSS_CHANNEL_LCD] = "LCD",
+ [OMAP_DSS_CHANNEL_DIGIT] = "TV",
+ [OMAP_DSS_CHANNEL_LCD2] = "LCD2",
+ };
+ const char *ovl_names[] = {
+ [OMAP_DSS_GFX] = "GFX",
+ [OMAP_DSS_VIDEO1] = "VID1",
+ [OMAP_DSS_VIDEO2] = "VID2",
+ [OMAP_DSS_VIDEO3] = "VID3",
+ };
+ const char **p_names;
+
#define DUMPREG(r) seq_printf(s, "%-50s %08x\n", #r, dispc_read_reg(r))
if (dispc_runtime_get())
return;
+ /* DISPC common registers */
DUMPREG(DISPC_REVISION);
DUMPREG(DISPC_SYSCONFIG);
DUMPREG(DISPC_SYSSTATUS);
@@ -2720,247 +2629,139 @@ void dispc_dump_regs(struct seq_file *s)
DUMPREG(DISPC_CONTROL);
DUMPREG(DISPC_CONFIG);
DUMPREG(DISPC_CAPABLE);
- DUMPREG(DISPC_DEFAULT_COLOR(OMAP_DSS_CHANNEL_LCD));
- DUMPREG(DISPC_DEFAULT_COLOR(OMAP_DSS_CHANNEL_DIGIT));
- DUMPREG(DISPC_TRANS_COLOR(OMAP_DSS_CHANNEL_LCD));
- DUMPREG(DISPC_TRANS_COLOR(OMAP_DSS_CHANNEL_DIGIT));
DUMPREG(DISPC_LINE_STATUS);
DUMPREG(DISPC_LINE_NUMBER);
- DUMPREG(DISPC_TIMING_H(OMAP_DSS_CHANNEL_LCD));
- DUMPREG(DISPC_TIMING_V(OMAP_DSS_CHANNEL_LCD));
- DUMPREG(DISPC_POL_FREQ(OMAP_DSS_CHANNEL_LCD));
- DUMPREG(DISPC_DIVISORo(OMAP_DSS_CHANNEL_LCD));
- if (dss_has_feature(FEAT_GLOBAL_ALPHA))
+ if (dss_has_feature(FEAT_ALPHA_FIXED_ZORDER) ||
+ dss_has_feature(FEAT_ALPHA_FREE_ZORDER))
DUMPREG(DISPC_GLOBAL_ALPHA);
- DUMPREG(DISPC_SIZE_MGR(OMAP_DSS_CHANNEL_DIGIT));
- DUMPREG(DISPC_SIZE_MGR(OMAP_DSS_CHANNEL_LCD));
if (dss_has_feature(FEAT_MGR_LCD2)) {
DUMPREG(DISPC_CONTROL2);
DUMPREG(DISPC_CONFIG2);
- DUMPREG(DISPC_DEFAULT_COLOR(OMAP_DSS_CHANNEL_LCD2));
- DUMPREG(DISPC_TRANS_COLOR(OMAP_DSS_CHANNEL_LCD2));
- DUMPREG(DISPC_TIMING_H(OMAP_DSS_CHANNEL_LCD2));
- DUMPREG(DISPC_TIMING_V(OMAP_DSS_CHANNEL_LCD2));
- DUMPREG(DISPC_POL_FREQ(OMAP_DSS_CHANNEL_LCD2));
- DUMPREG(DISPC_DIVISORo(OMAP_DSS_CHANNEL_LCD2));
- DUMPREG(DISPC_SIZE_MGR(OMAP_DSS_CHANNEL_LCD2));
- }
-
- DUMPREG(DISPC_OVL_BA0(OMAP_DSS_GFX));
- DUMPREG(DISPC_OVL_BA1(OMAP_DSS_GFX));
- DUMPREG(DISPC_OVL_POSITION(OMAP_DSS_GFX));
- DUMPREG(DISPC_OVL_SIZE(OMAP_DSS_GFX));
- DUMPREG(DISPC_OVL_ATTRIBUTES(OMAP_DSS_GFX));
- DUMPREG(DISPC_OVL_FIFO_THRESHOLD(OMAP_DSS_GFX));
- DUMPREG(DISPC_OVL_FIFO_SIZE_STATUS(OMAP_DSS_GFX));
- DUMPREG(DISPC_OVL_ROW_INC(OMAP_DSS_GFX));
- DUMPREG(DISPC_OVL_PIXEL_INC(OMAP_DSS_GFX));
- DUMPREG(DISPC_OVL_WINDOW_SKIP(OMAP_DSS_GFX));
- DUMPREG(DISPC_OVL_TABLE_BA(OMAP_DSS_GFX));
-
- DUMPREG(DISPC_DATA_CYCLE1(OMAP_DSS_CHANNEL_LCD));
- DUMPREG(DISPC_DATA_CYCLE2(OMAP_DSS_CHANNEL_LCD));
- DUMPREG(DISPC_DATA_CYCLE3(OMAP_DSS_CHANNEL_LCD));
-
- if (dss_has_feature(FEAT_CPR)) {
- DUMPREG(DISPC_CPR_COEF_R(OMAP_DSS_CHANNEL_LCD));
- DUMPREG(DISPC_CPR_COEF_G(OMAP_DSS_CHANNEL_LCD));
- DUMPREG(DISPC_CPR_COEF_B(OMAP_DSS_CHANNEL_LCD));
}
- if (dss_has_feature(FEAT_MGR_LCD2)) {
- DUMPREG(DISPC_DATA_CYCLE1(OMAP_DSS_CHANNEL_LCD2));
- DUMPREG(DISPC_DATA_CYCLE2(OMAP_DSS_CHANNEL_LCD2));
- DUMPREG(DISPC_DATA_CYCLE3(OMAP_DSS_CHANNEL_LCD2));
+
+#undef DUMPREG
+
+#define DISPC_REG(i, name) name(i)
+#define DUMPREG(i, r) seq_printf(s, "%s(%s)%*s %08x\n", #r, p_names[i], \
+ 48 - strlen(#r) - strlen(p_names[i]), " ", \
+ dispc_read_reg(DISPC_REG(i, r)))
+
+ p_names = mgr_names;
+
+ /* DISPC channel specific registers */
+ for (i = 0; i < dss_feat_get_num_mgrs(); i++) {
+ DUMPREG(i, DISPC_DEFAULT_COLOR);
+ DUMPREG(i, DISPC_TRANS_COLOR);
+ DUMPREG(i, DISPC_SIZE_MGR);
+
+ if (i == OMAP_DSS_CHANNEL_DIGIT)
+ continue;
+
+ DUMPREG(i, DISPC_DEFAULT_COLOR);
+ DUMPREG(i, DISPC_TRANS_COLOR);
+ DUMPREG(i, DISPC_TIMING_H);
+ DUMPREG(i, DISPC_TIMING_V);
+ DUMPREG(i, DISPC_POL_FREQ);
+ DUMPREG(i, DISPC_DIVISORo);
+ DUMPREG(i, DISPC_SIZE_MGR);
+
+ DUMPREG(i, DISPC_DATA_CYCLE1);
+ DUMPREG(i, DISPC_DATA_CYCLE2);
+ DUMPREG(i, DISPC_DATA_CYCLE3);
if (dss_has_feature(FEAT_CPR)) {
- DUMPREG(DISPC_CPR_COEF_R(OMAP_DSS_CHANNEL_LCD2));
- DUMPREG(DISPC_CPR_COEF_G(OMAP_DSS_CHANNEL_LCD2));
- DUMPREG(DISPC_CPR_COEF_B(OMAP_DSS_CHANNEL_LCD2));
+ DUMPREG(i, DISPC_CPR_COEF_R);
+ DUMPREG(i, DISPC_CPR_COEF_G);
+ DUMPREG(i, DISPC_CPR_COEF_B);
+ }
+ }
+
+ p_names = ovl_names;
+
+ for (i = 0; i < dss_feat_get_num_ovls(); i++) {
+ DUMPREG(i, DISPC_OVL_BA0);
+ DUMPREG(i, DISPC_OVL_BA1);
+ DUMPREG(i, DISPC_OVL_POSITION);
+ DUMPREG(i, DISPC_OVL_SIZE);
+ DUMPREG(i, DISPC_OVL_ATTRIBUTES);
+ DUMPREG(i, DISPC_OVL_FIFO_THRESHOLD);
+ DUMPREG(i, DISPC_OVL_FIFO_SIZE_STATUS);
+ DUMPREG(i, DISPC_OVL_ROW_INC);
+ DUMPREG(i, DISPC_OVL_PIXEL_INC);
+ if (dss_has_feature(FEAT_PRELOAD))
+ DUMPREG(i, DISPC_OVL_PRELOAD);
+
+ if (i == OMAP_DSS_GFX) {
+ DUMPREG(i, DISPC_OVL_WINDOW_SKIP);
+ DUMPREG(i, DISPC_OVL_TABLE_BA);
+ continue;
+ }
+
+ DUMPREG(i, DISPC_OVL_FIR);
+ DUMPREG(i, DISPC_OVL_PICTURE_SIZE);
+ DUMPREG(i, DISPC_OVL_ACCU0);
+ DUMPREG(i, DISPC_OVL_ACCU1);
+ if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) {
+ DUMPREG(i, DISPC_OVL_BA0_UV);
+ DUMPREG(i, DISPC_OVL_BA1_UV);
+ DUMPREG(i, DISPC_OVL_FIR2);
+ DUMPREG(i, DISPC_OVL_ACCU2_0);
+ DUMPREG(i, DISPC_OVL_ACCU2_1);
}
+ if (dss_has_feature(FEAT_ATTR2))
+ DUMPREG(i, DISPC_OVL_ATTRIBUTES2);
+ if (dss_has_feature(FEAT_PRELOAD))
+ DUMPREG(i, DISPC_OVL_PRELOAD);
}
- if (dss_has_feature(FEAT_PRELOAD))
- DUMPREG(DISPC_OVL_PRELOAD(OMAP_DSS_GFX));
-
- DUMPREG(DISPC_OVL_BA0(OMAP_DSS_VIDEO1));
- DUMPREG(DISPC_OVL_BA1(OMAP_DSS_VIDEO1));
- DUMPREG(DISPC_OVL_POSITION(OMAP_DSS_VIDEO1));
- DUMPREG(DISPC_OVL_SIZE(OMAP_DSS_VIDEO1));
- DUMPREG(DISPC_OVL_ATTRIBUTES(OMAP_DSS_VIDEO1));
- DUMPREG(DISPC_OVL_FIFO_THRESHOLD(OMAP_DSS_VIDEO1));
- DUMPREG(DISPC_OVL_FIFO_SIZE_STATUS(OMAP_DSS_VIDEO1));
- DUMPREG(DISPC_OVL_ROW_INC(OMAP_DSS_VIDEO1));
- DUMPREG(DISPC_OVL_PIXEL_INC(OMAP_DSS_VIDEO1));
- DUMPREG(DISPC_OVL_FIR(OMAP_DSS_VIDEO1));
- DUMPREG(DISPC_OVL_PICTURE_SIZE(OMAP_DSS_VIDEO1));
- DUMPREG(DISPC_OVL_ACCU0(OMAP_DSS_VIDEO1));
- DUMPREG(DISPC_OVL_ACCU1(OMAP_DSS_VIDEO1));
-
- DUMPREG(DISPC_OVL_BA0(OMAP_DSS_VIDEO2));
- DUMPREG(DISPC_OVL_BA1(OMAP_DSS_VIDEO2));
- DUMPREG(DISPC_OVL_POSITION(OMAP_DSS_VIDEO2));
- DUMPREG(DISPC_OVL_SIZE(OMAP_DSS_VIDEO2));
- DUMPREG(DISPC_OVL_ATTRIBUTES(OMAP_DSS_VIDEO2));
- DUMPREG(DISPC_OVL_FIFO_THRESHOLD(OMAP_DSS_VIDEO2));
- DUMPREG(DISPC_OVL_FIFO_SIZE_STATUS(OMAP_DSS_VIDEO2));
- DUMPREG(DISPC_OVL_ROW_INC(OMAP_DSS_VIDEO2));
- DUMPREG(DISPC_OVL_PIXEL_INC(OMAP_DSS_VIDEO2));
- DUMPREG(DISPC_OVL_FIR(OMAP_DSS_VIDEO2));
- DUMPREG(DISPC_OVL_PICTURE_SIZE(OMAP_DSS_VIDEO2));
- DUMPREG(DISPC_OVL_ACCU0(OMAP_DSS_VIDEO2));
- DUMPREG(DISPC_OVL_ACCU1(OMAP_DSS_VIDEO2));
-
- DUMPREG(DISPC_OVL_FIR_COEF_H(OMAP_DSS_VIDEO1, 0));
- DUMPREG(DISPC_OVL_FIR_COEF_H(OMAP_DSS_VIDEO1, 1));
- DUMPREG(DISPC_OVL_FIR_COEF_H(OMAP_DSS_VIDEO1, 2));
- DUMPREG(DISPC_OVL_FIR_COEF_H(OMAP_DSS_VIDEO1, 3));
- DUMPREG(DISPC_OVL_FIR_COEF_H(OMAP_DSS_VIDEO1, 4));
- DUMPREG(DISPC_OVL_FIR_COEF_H(OMAP_DSS_VIDEO1, 5));
- DUMPREG(DISPC_OVL_FIR_COEF_H(OMAP_DSS_VIDEO1, 6));
- DUMPREG(DISPC_OVL_FIR_COEF_H(OMAP_DSS_VIDEO1, 7));
- DUMPREG(DISPC_OVL_FIR_COEF_HV(OMAP_DSS_VIDEO1, 0));
- DUMPREG(DISPC_OVL_FIR_COEF_HV(OMAP_DSS_VIDEO1, 1));
- DUMPREG(DISPC_OVL_FIR_COEF_HV(OMAP_DSS_VIDEO1, 2));
- DUMPREG(DISPC_OVL_FIR_COEF_HV(OMAP_DSS_VIDEO1, 3));
- DUMPREG(DISPC_OVL_FIR_COEF_HV(OMAP_DSS_VIDEO1, 4));
- DUMPREG(DISPC_OVL_FIR_COEF_HV(OMAP_DSS_VIDEO1, 5));
- DUMPREG(DISPC_OVL_FIR_COEF_HV(OMAP_DSS_VIDEO1, 6));
- DUMPREG(DISPC_OVL_FIR_COEF_HV(OMAP_DSS_VIDEO1, 7));
- DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO1, 0));
- DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO1, 1));
- DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO1, 2));
- DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO1, 3));
- DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO1, 4));
- if (dss_has_feature(FEAT_FIR_COEF_V)) {
- DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 0));
- DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 1));
- DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 2));
- DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 3));
- DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 4));
- DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 5));
- DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 6));
- DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 7));
- }
-
- if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) {
- DUMPREG(DISPC_OVL_BA0_UV(OMAP_DSS_VIDEO1));
- DUMPREG(DISPC_OVL_BA1_UV(OMAP_DSS_VIDEO1));
- DUMPREG(DISPC_OVL_FIR2(OMAP_DSS_VIDEO1));
- DUMPREG(DISPC_OVL_ACCU2_0(OMAP_DSS_VIDEO1));
- DUMPREG(DISPC_OVL_ACCU2_1(OMAP_DSS_VIDEO1));
-
- DUMPREG(DISPC_OVL_FIR_COEF_H2(OMAP_DSS_VIDEO1, 0));
- DUMPREG(DISPC_OVL_FIR_COEF_H2(OMAP_DSS_VIDEO1, 1));
- DUMPREG(DISPC_OVL_FIR_COEF_H2(OMAP_DSS_VIDEO1, 2));
- DUMPREG(DISPC_OVL_FIR_COEF_H2(OMAP_DSS_VIDEO1, 3));
- DUMPREG(DISPC_OVL_FIR_COEF_H2(OMAP_DSS_VIDEO1, 4));
- DUMPREG(DISPC_OVL_FIR_COEF_H2(OMAP_DSS_VIDEO1, 5));
- DUMPREG(DISPC_OVL_FIR_COEF_H2(OMAP_DSS_VIDEO1, 6));
- DUMPREG(DISPC_OVL_FIR_COEF_H2(OMAP_DSS_VIDEO1, 7));
-
- DUMPREG(DISPC_OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO1, 0));
- DUMPREG(DISPC_OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO1, 1));
- DUMPREG(DISPC_OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO1, 2));
- DUMPREG(DISPC_OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO1, 3));
- DUMPREG(DISPC_OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO1, 4));
- DUMPREG(DISPC_OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO1, 5));
- DUMPREG(DISPC_OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO1, 6));
- DUMPREG(DISPC_OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO1, 7));
-
- DUMPREG(DISPC_OVL_FIR_COEF_V2(OMAP_DSS_VIDEO1, 0));
- DUMPREG(DISPC_OVL_FIR_COEF_V2(OMAP_DSS_VIDEO1, 1));
- DUMPREG(DISPC_OVL_FIR_COEF_V2(OMAP_DSS_VIDEO1, 2));
- DUMPREG(DISPC_OVL_FIR_COEF_V2(OMAP_DSS_VIDEO1, 3));
- DUMPREG(DISPC_OVL_FIR_COEF_V2(OMAP_DSS_VIDEO1, 4));
- DUMPREG(DISPC_OVL_FIR_COEF_V2(OMAP_DSS_VIDEO1, 5));
- DUMPREG(DISPC_OVL_FIR_COEF_V2(OMAP_DSS_VIDEO1, 6));
- DUMPREG(DISPC_OVL_FIR_COEF_V2(OMAP_DSS_VIDEO1, 7));
- }
- if (dss_has_feature(FEAT_ATTR2))
- DUMPREG(DISPC_OVL_ATTRIBUTES2(OMAP_DSS_VIDEO1));
-
-
- DUMPREG(DISPC_OVL_FIR_COEF_H(OMAP_DSS_VIDEO2, 0));
- DUMPREG(DISPC_OVL_FIR_COEF_H(OMAP_DSS_VIDEO2, 1));
- DUMPREG(DISPC_OVL_FIR_COEF_H(OMAP_DSS_VIDEO2, 2));
- DUMPREG(DISPC_OVL_FIR_COEF_H(OMAP_DSS_VIDEO2, 3));
- DUMPREG(DISPC_OVL_FIR_COEF_H(OMAP_DSS_VIDEO2, 4));
- DUMPREG(DISPC_OVL_FIR_COEF_H(OMAP_DSS_VIDEO2, 5));
- DUMPREG(DISPC_OVL_FIR_COEF_H(OMAP_DSS_VIDEO2, 6));
- DUMPREG(DISPC_OVL_FIR_COEF_H(OMAP_DSS_VIDEO2, 7));
- DUMPREG(DISPC_OVL_FIR_COEF_HV(OMAP_DSS_VIDEO2, 0));
- DUMPREG(DISPC_OVL_FIR_COEF_HV(OMAP_DSS_VIDEO2, 1));
- DUMPREG(DISPC_OVL_FIR_COEF_HV(OMAP_DSS_VIDEO2, 2));
- DUMPREG(DISPC_OVL_FIR_COEF_HV(OMAP_DSS_VIDEO2, 3));
- DUMPREG(DISPC_OVL_FIR_COEF_HV(OMAP_DSS_VIDEO2, 4));
- DUMPREG(DISPC_OVL_FIR_COEF_HV(OMAP_DSS_VIDEO2, 5));
- DUMPREG(DISPC_OVL_FIR_COEF_HV(OMAP_DSS_VIDEO2, 6));
- DUMPREG(DISPC_OVL_FIR_COEF_HV(OMAP_DSS_VIDEO2, 7));
- DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO2, 0));
- DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO2, 1));
- DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO2, 2));
- DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO2, 3));
- DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO2, 4));
-
- if (dss_has_feature(FEAT_FIR_COEF_V)) {
- DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 0));
- DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 1));
- DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 2));
- DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 3));
- DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 4));
- DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 5));
- DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 6));
- DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 7));
- }
-
- if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) {
- DUMPREG(DISPC_OVL_BA0_UV(OMAP_DSS_VIDEO2));
- DUMPREG(DISPC_OVL_BA1_UV(OMAP_DSS_VIDEO2));
- DUMPREG(DISPC_OVL_FIR2(OMAP_DSS_VIDEO2));
- DUMPREG(DISPC_OVL_ACCU2_0(OMAP_DSS_VIDEO2));
- DUMPREG(DISPC_OVL_ACCU2_1(OMAP_DSS_VIDEO2));
-
- DUMPREG(DISPC_OVL_FIR_COEF_H2(OMAP_DSS_VIDEO2, 0));
- DUMPREG(DISPC_OVL_FIR_COEF_H2(OMAP_DSS_VIDEO2, 1));
- DUMPREG(DISPC_OVL_FIR_COEF_H2(OMAP_DSS_VIDEO2, 2));
- DUMPREG(DISPC_OVL_FIR_COEF_H2(OMAP_DSS_VIDEO2, 3));
- DUMPREG(DISPC_OVL_FIR_COEF_H2(OMAP_DSS_VIDEO2, 4));
- DUMPREG(DISPC_OVL_FIR_COEF_H2(OMAP_DSS_VIDEO2, 5));
- DUMPREG(DISPC_OVL_FIR_COEF_H2(OMAP_DSS_VIDEO2, 6));
- DUMPREG(DISPC_OVL_FIR_COEF_H2(OMAP_DSS_VIDEO2, 7));
-
- DUMPREG(DISPC_OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO2, 0));
- DUMPREG(DISPC_OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO2, 1));
- DUMPREG(DISPC_OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO2, 2));
- DUMPREG(DISPC_OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO2, 3));
- DUMPREG(DISPC_OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO2, 4));
- DUMPREG(DISPC_OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO2, 5));
- DUMPREG(DISPC_OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO2, 6));
- DUMPREG(DISPC_OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO2, 7));
-
- DUMPREG(DISPC_OVL_FIR_COEF_V2(OMAP_DSS_VIDEO2, 0));
- DUMPREG(DISPC_OVL_FIR_COEF_V2(OMAP_DSS_VIDEO2, 1));
- DUMPREG(DISPC_OVL_FIR_COEF_V2(OMAP_DSS_VIDEO2, 2));
- DUMPREG(DISPC_OVL_FIR_COEF_V2(OMAP_DSS_VIDEO2, 3));
- DUMPREG(DISPC_OVL_FIR_COEF_V2(OMAP_DSS_VIDEO2, 4));
- DUMPREG(DISPC_OVL_FIR_COEF_V2(OMAP_DSS_VIDEO2, 5));
- DUMPREG(DISPC_OVL_FIR_COEF_V2(OMAP_DSS_VIDEO2, 6));
- DUMPREG(DISPC_OVL_FIR_COEF_V2(OMAP_DSS_VIDEO2, 7));
- }
- if (dss_has_feature(FEAT_ATTR2))
- DUMPREG(DISPC_OVL_ATTRIBUTES2(OMAP_DSS_VIDEO2));
-
- if (dss_has_feature(FEAT_PRELOAD)) {
- DUMPREG(DISPC_OVL_PRELOAD(OMAP_DSS_VIDEO1));
- DUMPREG(DISPC_OVL_PRELOAD(OMAP_DSS_VIDEO2));
+#undef DISPC_REG
+#undef DUMPREG
+
+#define DISPC_REG(plane, name, i) name(plane, i)
+#define DUMPREG(plane, name, i) \
+ seq_printf(s, "%s_%d(%s)%*s %08x\n", #name, i, p_names[plane], \
+ 46 - strlen(#name) - strlen(p_names[plane]), " ", \
+ dispc_read_reg(DISPC_REG(plane, name, i)))
+
+ /* Video pipeline coefficient registers */
+
+ /* start from OMAP_DSS_VIDEO1 */
+ for (i = 1; i < dss_feat_get_num_ovls(); i++) {
+ for (j = 0; j < 8; j++)
+ DUMPREG(i, DISPC_OVL_FIR_COEF_H, j);
+
+ for (j = 0; j < 8; j++)
+ DUMPREG(i, DISPC_OVL_FIR_COEF_HV, j);
+
+ for (j = 0; j < 5; j++)
+ DUMPREG(i, DISPC_OVL_CONV_COEF, j);
+
+ if (dss_has_feature(FEAT_FIR_COEF_V)) {
+ for (j = 0; j < 8; j++)
+ DUMPREG(i, DISPC_OVL_FIR_COEF_V, j);
+ }
+
+ if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) {
+ for (j = 0; j < 8; j++)
+ DUMPREG(i, DISPC_OVL_FIR_COEF_H2, j);
+
+ for (j = 0; j < 8; j++)
+ DUMPREG(i, DISPC_OVL_FIR_COEF_HV2, j);
+
+ for (j = 0; j < 8; j++)
+ DUMPREG(i, DISPC_OVL_FIR_COEF_V2, j);
+ }
}
dispc_runtime_put();
+
+#undef DISPC_REG
#undef DUMPREG
}
-static void _dispc_set_pol_freq(enum omap_channel channel, bool onoff, bool rf,
- bool ieo, bool ipc, bool ihs, bool ivs, u8 acbi, u8 acb)
+static void _dispc_mgr_set_pol_freq(enum omap_channel channel, bool onoff,
+ bool rf, bool ieo, bool ipc, bool ihs, bool ivs, u8 acbi,
+ u8 acb)
{
u32 l = 0;
@@ -2979,10 +2780,10 @@ static void _dispc_set_pol_freq(enum omap_channel channel, bool onoff, bool rf,
dispc_write_reg(DISPC_POL_FREQ(channel), l);
}
-void dispc_set_pol_freq(enum omap_channel channel,
+void dispc_mgr_set_pol_freq(enum omap_channel channel,
enum omap_panel_config config, u8 acbi, u8 acb)
{
- _dispc_set_pol_freq(channel, (config & OMAP_DSS_LCD_ONOFF) != 0,
+ _dispc_mgr_set_pol_freq(channel, (config & OMAP_DSS_LCD_ONOFF) != 0,
(config & OMAP_DSS_LCD_RF) != 0,
(config & OMAP_DSS_LCD_IEO) != 0,
(config & OMAP_DSS_LCD_IPC) != 0,
@@ -2995,11 +2796,17 @@ void dispc_set_pol_freq(enum omap_channel channel,
void dispc_find_clk_divs(bool is_tft, unsigned long req_pck, unsigned long fck,
struct dispc_clock_info *cinfo)
{
- u16 pcd_min = is_tft ? 2 : 3;
+ u16 pcd_min, pcd_max;
unsigned long best_pck;
u16 best_ld, cur_ld;
u16 best_pd, cur_pd;
+ pcd_min = dss_feat_get_param_min(FEAT_PARAM_DSS_PCD);
+ pcd_max = dss_feat_get_param_max(FEAT_PARAM_DSS_PCD);
+
+ if (!is_tft)
+ pcd_min = 3;
+
best_pck = 0;
best_ld = 0;
best_pd = 0;
@@ -3007,7 +2814,7 @@ void dispc_find_clk_divs(bool is_tft, unsigned long req_pck, unsigned long fck,
for (cur_ld = 1; cur_ld <= 255; ++cur_ld) {
unsigned long lck = fck / cur_ld;
- for (cur_pd = pcd_min; cur_pd <= 255; ++cur_pd) {
+ for (cur_pd = pcd_min; cur_pd <= pcd_max; ++cur_pd) {
unsigned long pck = lck / cur_pd;
long old_delta = abs(best_pck - req_pck);
long new_delta = abs(pck - req_pck);
@@ -3042,7 +2849,7 @@ int dispc_calc_clock_rates(unsigned long dispc_fclk_rate,
{
if (cinfo->lck_div > 255 || cinfo->lck_div == 0)
return -EINVAL;
- if (cinfo->pck_div < 2 || cinfo->pck_div > 255)
+ if (cinfo->pck_div < 1 || cinfo->pck_div > 255)
return -EINVAL;
cinfo->lck = dispc_fclk_rate / cinfo->lck_div;
@@ -3051,18 +2858,18 @@ int dispc_calc_clock_rates(unsigned long dispc_fclk_rate,
return 0;
}
-int dispc_set_clock_div(enum omap_channel channel,
+int dispc_mgr_set_clock_div(enum omap_channel channel,
struct dispc_clock_info *cinfo)
{
DSSDBG("lck = %lu (%u)\n", cinfo->lck, cinfo->lck_div);
DSSDBG("pck = %lu (%u)\n", cinfo->pck, cinfo->pck_div);
- dispc_set_lcd_divisor(channel, cinfo->lck_div, cinfo->pck_div);
+ dispc_mgr_set_lcd_divisor(channel, cinfo->lck_div, cinfo->pck_div);
return 0;
}
-int dispc_get_clock_div(enum omap_channel channel,
+int dispc_mgr_get_clock_div(enum omap_channel channel,
struct dispc_clock_info *cinfo)
{
unsigned long fck;
@@ -3207,6 +3014,8 @@ static void print_irq_status(u32 status)
PIS(OCP_ERR);
PIS(VID1_FIFO_UNDERFLOW);
PIS(VID2_FIFO_UNDERFLOW);
+ if (dss_feat_get_num_ovls() > 3)
+ PIS(VID3_FIFO_UNDERFLOW);
PIS(SYNC_LOST);
PIS(SYNC_LOST_DIGIT);
if (dss_has_feature(FEAT_MGR_LCD2))
@@ -3300,178 +3109,72 @@ static void dispc_error_worker(struct work_struct *work)
int i;
u32 errors;
unsigned long flags;
+ static const unsigned fifo_underflow_bits[] = {
+ DISPC_IRQ_GFX_FIFO_UNDERFLOW,
+ DISPC_IRQ_VID1_FIFO_UNDERFLOW,
+ DISPC_IRQ_VID2_FIFO_UNDERFLOW,
+ DISPC_IRQ_VID3_FIFO_UNDERFLOW,
+ };
+
+ static const unsigned sync_lost_bits[] = {
+ DISPC_IRQ_SYNC_LOST,
+ DISPC_IRQ_SYNC_LOST_DIGIT,
+ DISPC_IRQ_SYNC_LOST2,
+ };
spin_lock_irqsave(&dispc.irq_lock, flags);
errors = dispc.error_irqs;
dispc.error_irqs = 0;
spin_unlock_irqrestore(&dispc.irq_lock, flags);
- if (errors & DISPC_IRQ_GFX_FIFO_UNDERFLOW) {
- DSSERR("GFX_FIFO_UNDERFLOW, disabling GFX\n");
- for (i = 0; i < omap_dss_get_num_overlays(); ++i) {
- struct omap_overlay *ovl;
- ovl = omap_dss_get_overlay(i);
-
- if (!(ovl->caps & OMAP_DSS_OVL_CAP_DISPC))
- continue;
-
- if (ovl->id == 0) {
- dispc_enable_plane(ovl->id, 0);
- dispc_go(ovl->manager->id);
- mdelay(50);
- break;
- }
- }
- }
-
- if (errors & DISPC_IRQ_VID1_FIFO_UNDERFLOW) {
- DSSERR("VID1_FIFO_UNDERFLOW, disabling VID1\n");
- for (i = 0; i < omap_dss_get_num_overlays(); ++i) {
- struct omap_overlay *ovl;
- ovl = omap_dss_get_overlay(i);
-
- if (!(ovl->caps & OMAP_DSS_OVL_CAP_DISPC))
- continue;
-
- if (ovl->id == 1) {
- dispc_enable_plane(ovl->id, 0);
- dispc_go(ovl->manager->id);
- mdelay(50);
- break;
- }
- }
- }
-
- if (errors & DISPC_IRQ_VID2_FIFO_UNDERFLOW) {
- DSSERR("VID2_FIFO_UNDERFLOW, disabling VID2\n");
- for (i = 0; i < omap_dss_get_num_overlays(); ++i) {
- struct omap_overlay *ovl;
- ovl = omap_dss_get_overlay(i);
-
- if (!(ovl->caps & OMAP_DSS_OVL_CAP_DISPC))
- continue;
+ dispc_runtime_get();
- if (ovl->id == 2) {
- dispc_enable_plane(ovl->id, 0);
- dispc_go(ovl->manager->id);
- mdelay(50);
- break;
- }
- }
- }
-
- if (errors & DISPC_IRQ_SYNC_LOST) {
- struct omap_overlay_manager *manager = NULL;
- bool enable = false;
+ for (i = 0; i < omap_dss_get_num_overlays(); ++i) {
+ struct omap_overlay *ovl;
+ unsigned bit;
- DSSERR("SYNC_LOST, disabling LCD\n");
+ ovl = omap_dss_get_overlay(i);
+ bit = fifo_underflow_bits[i];
- for (i = 0; i < omap_dss_get_num_overlay_managers(); ++i) {
- struct omap_overlay_manager *mgr;
- mgr = omap_dss_get_overlay_manager(i);
-
- if (mgr->id == OMAP_DSS_CHANNEL_LCD) {
- manager = mgr;
- enable = mgr->device->state ==
- OMAP_DSS_DISPLAY_ACTIVE;
- mgr->device->driver->disable(mgr->device);
- break;
- }
- }
-
- if (manager) {
- struct omap_dss_device *dssdev = manager->device;
- for (i = 0; i < omap_dss_get_num_overlays(); ++i) {
- struct omap_overlay *ovl;
- ovl = omap_dss_get_overlay(i);
-
- if (!(ovl->caps & OMAP_DSS_OVL_CAP_DISPC))
- continue;
-
- if (ovl->id != 0 && ovl->manager == manager)
- dispc_enable_plane(ovl->id, 0);
- }
-
- dispc_go(manager->id);
+ if (bit & errors) {
+ DSSERR("FIFO UNDERFLOW on %s, disabling the overlay\n",
+ ovl->name);
+ dispc_ovl_enable(ovl->id, false);
+ dispc_mgr_go(ovl->manager->id);
mdelay(50);
- if (enable)
- dssdev->driver->enable(dssdev);
}
}
- if (errors & DISPC_IRQ_SYNC_LOST_DIGIT) {
- struct omap_overlay_manager *manager = NULL;
- bool enable = false;
+ for (i = 0; i < omap_dss_get_num_overlay_managers(); ++i) {
+ struct omap_overlay_manager *mgr;
+ unsigned bit;
- DSSERR("SYNC_LOST_DIGIT, disabling TV\n");
+ mgr = omap_dss_get_overlay_manager(i);
+ bit = sync_lost_bits[i];
- for (i = 0; i < omap_dss_get_num_overlay_managers(); ++i) {
- struct omap_overlay_manager *mgr;
- mgr = omap_dss_get_overlay_manager(i);
+ if (bit & errors) {
+ struct omap_dss_device *dssdev = mgr->device;
+ bool enable;
- if (mgr->id == OMAP_DSS_CHANNEL_DIGIT) {
- manager = mgr;
- enable = mgr->device->state ==
- OMAP_DSS_DISPLAY_ACTIVE;
- mgr->device->driver->disable(mgr->device);
- break;
- }
- }
+ DSSERR("SYNC_LOST on channel %s, restarting the output "
+ "with video overlays disabled\n",
+ mgr->name);
- if (manager) {
- struct omap_dss_device *dssdev = manager->device;
- for (i = 0; i < omap_dss_get_num_overlays(); ++i) {
- struct omap_overlay *ovl;
- ovl = omap_dss_get_overlay(i);
-
- if (!(ovl->caps & OMAP_DSS_OVL_CAP_DISPC))
- continue;
-
- if (ovl->id != 0 && ovl->manager == manager)
- dispc_enable_plane(ovl->id, 0);
- }
+ enable = dssdev->state == OMAP_DSS_DISPLAY_ACTIVE;
+ dssdev->driver->disable(dssdev);
- dispc_go(manager->id);
- mdelay(50);
- if (enable)
- dssdev->driver->enable(dssdev);
- }
- }
-
- if (errors & DISPC_IRQ_SYNC_LOST2) {
- struct omap_overlay_manager *manager = NULL;
- bool enable = false;
-
- DSSERR("SYNC_LOST for LCD2, disabling LCD2\n");
-
- for (i = 0; i < omap_dss_get_num_overlay_managers(); ++i) {
- struct omap_overlay_manager *mgr;
- mgr = omap_dss_get_overlay_manager(i);
-
- if (mgr->id == OMAP_DSS_CHANNEL_LCD2) {
- manager = mgr;
- enable = mgr->device->state ==
- OMAP_DSS_DISPLAY_ACTIVE;
- mgr->device->driver->disable(mgr->device);
- break;
- }
- }
-
- if (manager) {
- struct omap_dss_device *dssdev = manager->device;
for (i = 0; i < omap_dss_get_num_overlays(); ++i) {
struct omap_overlay *ovl;
ovl = omap_dss_get_overlay(i);
- if (!(ovl->caps & OMAP_DSS_OVL_CAP_DISPC))
- continue;
-
- if (ovl->id != 0 && ovl->manager == manager)
- dispc_enable_plane(ovl->id, 0);
+ if (ovl->id != OMAP_DSS_GFX &&
+ ovl->manager == mgr)
+ dispc_ovl_enable(ovl->id, false);
}
- dispc_go(manager->id);
+ dispc_mgr_go(mgr->id);
mdelay(50);
+
if (enable)
dssdev->driver->enable(dssdev);
}
@@ -3482,9 +3185,7 @@ static void dispc_error_worker(struct work_struct *work)
for (i = 0; i < omap_dss_get_num_overlay_managers(); ++i) {
struct omap_overlay_manager *mgr;
mgr = omap_dss_get_overlay_manager(i);
-
- if (mgr->caps & OMAP_DSS_OVL_CAP_DISPC)
- mgr->device->driver->disable(mgr->device);
+ mgr->device->driver->disable(mgr->device);
}
}
@@ -3492,6 +3193,8 @@ static void dispc_error_worker(struct work_struct *work)
dispc.irq_error_mask |= errors;
_omap_dispc_set_irqs();
spin_unlock_irqrestore(&dispc.irq_lock, flags);
+
+ dispc_runtime_put();
}
int omap_dispc_wait_for_irq_timeout(u32 irqmask, unsigned long timeout)
@@ -3586,6 +3289,8 @@ static void _omap_dispc_initialize_irq(void)
dispc.irq_error_mask = DISPC_IRQ_MASK_ERROR;
if (dss_has_feature(FEAT_MGR_LCD2))
dispc.irq_error_mask |= DISPC_IRQ_SYNC_LOST2;
+ if (dss_feat_get_num_ovls() > 3)
+ dispc.irq_error_mask |= DISPC_IRQ_VID3_FIFO_UNDERFLOW;
/* there's SYNC_LOST_DIGIT waiting after enabling the DSS,
* so clear it */
@@ -3635,6 +3340,8 @@ static void _omap_dispc_initial_config(void)
dispc_read_plane_fifo_sizes();
dispc_configure_burst_sizes();
+
+ dispc_ovl_enable_zorder_planes();
}
/* DISPC HW IP initialisation */
@@ -3734,7 +3441,6 @@ static int omap_dispchw_remove(struct platform_device *pdev)
static int dispc_runtime_suspend(struct device *dev)
{
dispc_save_context();
- clk_disable(dispc.dss_clk);
dss_runtime_put();
return 0;
@@ -3748,7 +3454,6 @@ static int dispc_runtime_resume(struct device *dev)
if (r < 0)
return r;
- clk_enable(dispc.dss_clk);
dispc_restore_context();
return 0;
diff --git a/drivers/video/omap2/dss/dispc.h b/drivers/video/omap2/dss/dispc.h
index 6c9ee0a0efb..c06efc38983 100644
--- a/drivers/video/omap2/dss/dispc.h
+++ b/drivers/video/omap2/dss/dispc.h
@@ -291,6 +291,8 @@ static inline u16 DISPC_OVL_BASE(enum omap_plane plane)
return 0x00BC;
case OMAP_DSS_VIDEO2:
return 0x014C;
+ case OMAP_DSS_VIDEO3:
+ return 0x0300;
default:
BUG();
}
@@ -304,6 +306,8 @@ static inline u16 DISPC_BA0_OFFSET(enum omap_plane plane)
case OMAP_DSS_VIDEO1:
case OMAP_DSS_VIDEO2:
return 0x0000;
+ case OMAP_DSS_VIDEO3:
+ return 0x0008;
default:
BUG();
}
@@ -316,6 +320,8 @@ static inline u16 DISPC_BA1_OFFSET(enum omap_plane plane)
case OMAP_DSS_VIDEO1:
case OMAP_DSS_VIDEO2:
return 0x0004;
+ case OMAP_DSS_VIDEO3:
+ return 0x000C;
default:
BUG();
}
@@ -330,6 +336,8 @@ static inline u16 DISPC_BA0_UV_OFFSET(enum omap_plane plane)
return 0x0544;
case OMAP_DSS_VIDEO2:
return 0x04BC;
+ case OMAP_DSS_VIDEO3:
+ return 0x0310;
default:
BUG();
}
@@ -344,6 +352,8 @@ static inline u16 DISPC_BA1_UV_OFFSET(enum omap_plane plane)
return 0x0548;
case OMAP_DSS_VIDEO2:
return 0x04C0;
+ case OMAP_DSS_VIDEO3:
+ return 0x0314;
default:
BUG();
}
@@ -356,6 +366,8 @@ static inline u16 DISPC_POS_OFFSET(enum omap_plane plane)
case OMAP_DSS_VIDEO1:
case OMAP_DSS_VIDEO2:
return 0x0008;
+ case OMAP_DSS_VIDEO3:
+ return 0x009C;
default:
BUG();
}
@@ -368,6 +380,8 @@ static inline u16 DISPC_SIZE_OFFSET(enum omap_plane plane)
case OMAP_DSS_VIDEO1:
case OMAP_DSS_VIDEO2:
return 0x000C;
+ case OMAP_DSS_VIDEO3:
+ return 0x00A8;
default:
BUG();
}
@@ -381,6 +395,8 @@ static inline u16 DISPC_ATTR_OFFSET(enum omap_plane plane)
case OMAP_DSS_VIDEO1:
case OMAP_DSS_VIDEO2:
return 0x0010;
+ case OMAP_DSS_VIDEO3:
+ return 0x0070;
default:
BUG();
}
@@ -395,6 +411,8 @@ static inline u16 DISPC_ATTR2_OFFSET(enum omap_plane plane)
return 0x0568;
case OMAP_DSS_VIDEO2:
return 0x04DC;
+ case OMAP_DSS_VIDEO3:
+ return 0x032C;
default:
BUG();
}
@@ -408,6 +426,8 @@ static inline u16 DISPC_FIFO_THRESH_OFFSET(enum omap_plane plane)
case OMAP_DSS_VIDEO1:
case OMAP_DSS_VIDEO2:
return 0x0014;
+ case OMAP_DSS_VIDEO3:
+ return 0x008C;
default:
BUG();
}
@@ -421,6 +441,8 @@ static inline u16 DISPC_FIFO_SIZE_STATUS_OFFSET(enum omap_plane plane)
case OMAP_DSS_VIDEO1:
case OMAP_DSS_VIDEO2:
return 0x0018;
+ case OMAP_DSS_VIDEO3:
+ return 0x0088;
default:
BUG();
}
@@ -434,6 +456,8 @@ static inline u16 DISPC_ROW_INC_OFFSET(enum omap_plane plane)
case OMAP_DSS_VIDEO1:
case OMAP_DSS_VIDEO2:
return 0x001C;
+ case OMAP_DSS_VIDEO3:
+ return 0x00A4;
default:
BUG();
}
@@ -447,6 +471,8 @@ static inline u16 DISPC_PIX_INC_OFFSET(enum omap_plane plane)
case OMAP_DSS_VIDEO1:
case OMAP_DSS_VIDEO2:
return 0x0020;
+ case OMAP_DSS_VIDEO3:
+ return 0x0098;
default:
BUG();
}
@@ -459,6 +485,7 @@ static inline u16 DISPC_WINDOW_SKIP_OFFSET(enum omap_plane plane)
return 0x0034;
case OMAP_DSS_VIDEO1:
case OMAP_DSS_VIDEO2:
+ case OMAP_DSS_VIDEO3:
BUG();
default:
BUG();
@@ -472,6 +499,7 @@ static inline u16 DISPC_TABLE_BA_OFFSET(enum omap_plane plane)
return 0x0038;
case OMAP_DSS_VIDEO1:
case OMAP_DSS_VIDEO2:
+ case OMAP_DSS_VIDEO3:
BUG();
default:
BUG();
@@ -486,6 +514,8 @@ static inline u16 DISPC_FIR_OFFSET(enum omap_plane plane)
case OMAP_DSS_VIDEO1:
case OMAP_DSS_VIDEO2:
return 0x0024;
+ case OMAP_DSS_VIDEO3:
+ return 0x0090;
default:
BUG();
}
@@ -500,6 +530,8 @@ static inline u16 DISPC_FIR2_OFFSET(enum omap_plane plane)
return 0x0580;
case OMAP_DSS_VIDEO2:
return 0x055C;
+ case OMAP_DSS_VIDEO3:
+ return 0x0424;
default:
BUG();
}
@@ -513,6 +545,8 @@ static inline u16 DISPC_PIC_SIZE_OFFSET(enum omap_plane plane)
case OMAP_DSS_VIDEO1:
case OMAP_DSS_VIDEO2:
return 0x0028;
+ case OMAP_DSS_VIDEO3:
+ return 0x0094;
default:
BUG();
}
@@ -527,6 +561,8 @@ static inline u16 DISPC_ACCU0_OFFSET(enum omap_plane plane)
case OMAP_DSS_VIDEO1:
case OMAP_DSS_VIDEO2:
return 0x002C;
+ case OMAP_DSS_VIDEO3:
+ return 0x0000;
default:
BUG();
}
@@ -541,6 +577,8 @@ static inline u16 DISPC_ACCU2_0_OFFSET(enum omap_plane plane)
return 0x0584;
case OMAP_DSS_VIDEO2:
return 0x0560;
+ case OMAP_DSS_VIDEO3:
+ return 0x0428;
default:
BUG();
}
@@ -554,6 +592,8 @@ static inline u16 DISPC_ACCU1_OFFSET(enum omap_plane plane)
case OMAP_DSS_VIDEO1:
case OMAP_DSS_VIDEO2:
return 0x0030;
+ case OMAP_DSS_VIDEO3:
+ return 0x0004;
default:
BUG();
}
@@ -568,6 +608,8 @@ static inline u16 DISPC_ACCU2_1_OFFSET(enum omap_plane plane)
return 0x0588;
case OMAP_DSS_VIDEO2:
return 0x0564;
+ case OMAP_DSS_VIDEO3:
+ return 0x042C;
default:
BUG();
}
@@ -582,6 +624,8 @@ static inline u16 DISPC_FIR_COEF_H_OFFSET(enum omap_plane plane, u16 i)
case OMAP_DSS_VIDEO1:
case OMAP_DSS_VIDEO2:
return 0x0034 + i * 0x8;
+ case OMAP_DSS_VIDEO3:
+ return 0x0010 + i * 0x8;
default:
BUG();
}
@@ -597,6 +641,8 @@ static inline u16 DISPC_FIR_COEF_H2_OFFSET(enum omap_plane plane, u16 i)
return 0x058C + i * 0x8;
case OMAP_DSS_VIDEO2:
return 0x0568 + i * 0x8;
+ case OMAP_DSS_VIDEO3:
+ return 0x0430 + i * 0x8;
default:
BUG();
}
@@ -611,6 +657,8 @@ static inline u16 DISPC_FIR_COEF_HV_OFFSET(enum omap_plane plane, u16 i)
case OMAP_DSS_VIDEO1:
case OMAP_DSS_VIDEO2:
return 0x0038 + i * 0x8;
+ case OMAP_DSS_VIDEO3:
+ return 0x0014 + i * 0x8;
default:
BUG();
}
@@ -626,6 +674,8 @@ static inline u16 DISPC_FIR_COEF_HV2_OFFSET(enum omap_plane plane, u16 i)
return 0x0590 + i * 8;
case OMAP_DSS_VIDEO2:
return 0x056C + i * 0x8;
+ case OMAP_DSS_VIDEO3:
+ return 0x0434 + i * 0x8;
default:
BUG();
}
@@ -639,6 +689,7 @@ static inline u16 DISPC_CONV_COEF_OFFSET(enum omap_plane plane, u16 i)
BUG();
case OMAP_DSS_VIDEO1:
case OMAP_DSS_VIDEO2:
+ case OMAP_DSS_VIDEO3:
return 0x0074 + i * 0x4;
default:
BUG();
@@ -655,6 +706,8 @@ static inline u16 DISPC_FIR_COEF_V_OFFSET(enum omap_plane plane, u16 i)
return 0x0124 + i * 0x4;
case OMAP_DSS_VIDEO2:
return 0x00B4 + i * 0x4;
+ case OMAP_DSS_VIDEO3:
+ return 0x0050 + i * 0x4;
default:
BUG();
}
@@ -670,6 +723,8 @@ static inline u16 DISPC_FIR_COEF_V2_OFFSET(enum omap_plane plane, u16 i)
return 0x05CC + i * 0x4;
case OMAP_DSS_VIDEO2:
return 0x05A8 + i * 0x4;
+ case OMAP_DSS_VIDEO3:
+ return 0x0470 + i * 0x4;
default:
BUG();
}
@@ -684,6 +739,8 @@ static inline u16 DISPC_PRELOAD_OFFSET(enum omap_plane plane)
return 0x0174;
case OMAP_DSS_VIDEO2:
return 0x00E8;
+ case OMAP_DSS_VIDEO3:
+ return 0x00A0;
default:
BUG();
}
diff --git a/drivers/video/omap2/dss/display.c b/drivers/video/omap2/dss/display.c
index 94495e45ec5..be331dc5a61 100644
--- a/drivers/video/omap2/dss/display.c
+++ b/drivers/video/omap2/dss/display.c
@@ -45,14 +45,13 @@ static ssize_t display_enabled_store(struct device *dev,
const char *buf, size_t size)
{
struct omap_dss_device *dssdev = to_dss_device(dev);
- int r, enabled;
+ int r;
+ bool enabled;
- r = kstrtoint(buf, 0, &enabled);
+ r = strtobool(buf, &enabled);
if (r)
return r;
- enabled = !!enabled;
-
if (enabled != (dssdev->state != OMAP_DSS_DISPLAY_DISABLED)) {
if (enabled) {
r = dssdev->driver->enable(dssdev);
@@ -79,17 +78,16 @@ static ssize_t display_tear_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
struct omap_dss_device *dssdev = to_dss_device(dev);
- int te, r;
+ int r;
+ bool te;
if (!dssdev->driver->enable_te || !dssdev->driver->get_te)
return -ENOENT;
- r = kstrtoint(buf, 0, &te);
+ r = strtobool(buf, &te);
if (r)
return r;
- te = !!te;
-
r = dssdev->driver->enable_te(dssdev, te);
if (r)
return r;
@@ -195,17 +193,16 @@ static ssize_t display_mirror_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
struct omap_dss_device *dssdev = to_dss_device(dev);
- int mirror, r;
+ int r;
+ bool mirror;
if (!dssdev->driver->set_mirror || !dssdev->driver->get_mirror)
return -ENOENT;
- r = kstrtoint(buf, 0, &mirror);
+ r = strtobool(buf, &mirror);
if (r)
return r;
- mirror = !!mirror;
-
r = dssdev->driver->set_mirror(dssdev, mirror);
if (r)
return r;
@@ -302,11 +299,15 @@ int omapdss_default_get_recommended_bpp(struct omap_dss_device *dssdev)
return 16;
case OMAP_DISPLAY_TYPE_DBI:
- case OMAP_DISPLAY_TYPE_DSI:
if (dssdev->ctrl.pixel_size == 24)
return 24;
else
return 16;
+ case OMAP_DISPLAY_TYPE_DSI:
+ if (dsi_get_pixel_size(dssdev->panel.dsi_pix_fmt) > 16)
+ return 24;
+ else
+ return 16;
case OMAP_DISPLAY_TYPE_VENC:
case OMAP_DISPLAY_TYPE_SDI:
case OMAP_DISPLAY_TYPE_HDMI:
@@ -342,9 +343,11 @@ bool dss_use_replication(struct omap_dss_device *dssdev,
bpp = 24;
break;
case OMAP_DISPLAY_TYPE_DBI:
- case OMAP_DISPLAY_TYPE_DSI:
bpp = dssdev->ctrl.pixel_size;
break;
+ case OMAP_DISPLAY_TYPE_DSI:
+ bpp = dsi_get_pixel_size(dssdev->panel.dsi_pix_fmt);
+ break;
default:
BUG();
}
diff --git a/drivers/video/omap2/dss/dpi.c b/drivers/video/omap2/dss/dpi.c
index f053b180ecd..976ac23dcd0 100644
--- a/drivers/video/omap2/dss/dpi.c
+++ b/drivers/video/omap2/dss/dpi.c
@@ -24,6 +24,7 @@
#include <linux/kernel.h>
#include <linux/delay.h>
+#include <linux/export.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/platform_device.h>
@@ -82,9 +83,11 @@ static int dpi_set_dsi_clk(struct omap_dss_device *dssdev, bool is_tft,
dss_select_dispc_clk_source(dssdev->clocks.dispc.dispc_fclk_src);
- r = dispc_set_clock_div(dssdev->manager->id, &dispc_cinfo);
- if (r)
+ r = dispc_mgr_set_clock_div(dssdev->manager->id, &dispc_cinfo);
+ if (r) {
+ dss_select_dispc_clk_source(OMAP_DSS_CLK_SRC_FCK);
return r;
+ }
*fck = dsi_cinfo.dsi_pll_hsdiv_dispc_clk;
*lck_div = dispc_cinfo.lck_div;
@@ -109,7 +112,7 @@ static int dpi_set_dispc_clk(struct omap_dss_device *dssdev, bool is_tft,
if (r)
return r;
- r = dispc_set_clock_div(dssdev->manager->id, &dispc_cinfo);
+ r = dispc_mgr_set_clock_div(dssdev->manager->id, &dispc_cinfo);
if (r)
return r;
@@ -129,7 +132,7 @@ static int dpi_set_mode(struct omap_dss_device *dssdev)
bool is_tft;
int r = 0;
- dispc_set_pol_freq(dssdev->manager->id, dssdev->panel.config,
+ dispc_mgr_set_pol_freq(dssdev->manager->id, dssdev->panel.config,
dssdev->panel.acbi, dssdev->panel.acb);
is_tft = (dssdev->panel.config & OMAP_DSS_LCD_TFT) != 0;
@@ -153,7 +156,7 @@ static int dpi_set_mode(struct omap_dss_device *dssdev)
t->pixel_clock = pck;
}
- dispc_set_lcd_timings(dssdev->manager->id, t);
+ dispc_mgr_set_lcd_timings(dssdev->manager->id, t);
return 0;
}
@@ -164,11 +167,12 @@ static void dpi_basic_init(struct omap_dss_device *dssdev)
is_tft = (dssdev->panel.config & OMAP_DSS_LCD_TFT) != 0;
- dispc_set_parallel_interface_mode(dssdev->manager->id,
- OMAP_DSS_PARALLELMODE_BYPASS);
- dispc_set_lcd_display_type(dssdev->manager->id, is_tft ?
+ dispc_mgr_set_io_pad_mode(DSS_IO_PAD_MODE_BYPASS);
+ dispc_mgr_enable_stallmode(dssdev->manager->id, false);
+
+ dispc_mgr_set_lcd_display_type(dssdev->manager->id, is_tft ?
OMAP_DSS_LCD_DISPLAY_TFT : OMAP_DSS_LCD_DISPLAY_STN);
- dispc_set_tft_data_lines(dssdev->manager->id,
+ dispc_mgr_set_tft_data_lines(dssdev->manager->id,
dssdev->phy.dpi.data_lines);
}
@@ -176,6 +180,11 @@ int omapdss_dpi_display_enable(struct omap_dss_device *dssdev)
{
int r;
+ if (dssdev->manager == NULL) {
+ DSSERR("failed to enable display: no manager\n");
+ return -ENODEV;
+ }
+
r = omap_dss_start_device(dssdev);
if (r) {
DSSERR("failed to start device\n");
@@ -277,7 +286,7 @@ void dpi_set_timings(struct omap_dss_device *dssdev,
}
dpi_set_mode(dssdev);
- dispc_go(dssdev->manager->id);
+ dispc_mgr_go(dssdev->manager->id);
dispc_runtime_put();
dss_runtime_put();
diff --git a/drivers/video/omap2/dss/dsi.c b/drivers/video/omap2/dss/dsi.c
index 7adbbeb8433..5abf8e7e745 100644
--- a/drivers/video/omap2/dss/dsi.c
+++ b/drivers/video/omap2/dss/dsi.c
@@ -27,6 +27,7 @@
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/mutex.h>
+#include <linux/module.h>
#include <linux/semaphore.h>
#include <linux/seq_file.h>
#include <linux/platform_device.h>
@@ -39,6 +40,7 @@
#include <linux/pm_runtime.h>
#include <video/omapdss.h>
+#include <video/mipi_display.h>
#include <plat/clock.h>
#include "dss.h"
@@ -131,7 +133,7 @@ struct dsi_reg { u16 idx; };
#define DSI_IRQ_TA_TIMEOUT (1 << 20)
#define DSI_IRQ_ERROR_MASK \
(DSI_IRQ_HS_TX_TIMEOUT | DSI_IRQ_LP_RX_TIMEOUT | DSI_IRQ_SYNC_LOST | \
- DSI_IRQ_TA_TIMEOUT)
+ DSI_IRQ_TA_TIMEOUT | DSI_IRQ_SYNC_LOST)
#define DSI_IRQ_CHANNEL_MASK 0xf
/* Virtual channel interrupts */
@@ -198,18 +200,6 @@ struct dsi_reg { u16 idx; };
DSI_CIO_IRQ_ERRCONTENTIONLP0_4 | DSI_CIO_IRQ_ERRCONTENTIONLP1_4 | \
DSI_CIO_IRQ_ERRCONTENTIONLP0_5 | DSI_CIO_IRQ_ERRCONTENTIONLP1_5)
-#define DSI_DT_DCS_SHORT_WRITE_0 0x05
-#define DSI_DT_DCS_SHORT_WRITE_1 0x15
-#define DSI_DT_DCS_READ 0x06
-#define DSI_DT_SET_MAX_RET_PKG_SIZE 0x37
-#define DSI_DT_NULL_PACKET 0x09
-#define DSI_DT_DCS_LONG_WRITE 0x39
-
-#define DSI_DT_RX_ACK_WITH_ERR 0x02
-#define DSI_DT_RX_DCS_LONG_READ 0x1c
-#define DSI_DT_RX_SHORT_READ_1 0x21
-#define DSI_DT_RX_SHORT_READ_2 0x22
-
typedef void (*omap_dsi_isr_t) (void *arg, u32 mask);
#define DSI_MAX_NR_ISRS 2
@@ -228,9 +218,9 @@ enum fifo_size {
DSI_FIFO_SIZE_128 = 4,
};
-enum dsi_vc_mode {
- DSI_VC_MODE_L4 = 0,
- DSI_VC_MODE_VP,
+enum dsi_vc_source {
+ DSI_VC_SOURCE_L4 = 0,
+ DSI_VC_SOURCE_VP,
};
enum dsi_lane {
@@ -274,7 +264,8 @@ struct dsi_data {
struct clk *dss_clk;
struct clk *sys_clk;
- void (*dsi_mux_pads)(bool enable);
+ int (*enable_pads)(int dsi_id, unsigned lane_mask);
+ void (*disable_pads)(int dsi_id, unsigned lane_mask);
struct dsi_clock_info current_cinfo;
@@ -282,7 +273,7 @@ struct dsi_data {
struct regulator *vdds_dsi_reg;
struct {
- enum dsi_vc_mode mode;
+ enum dsi_vc_source source;
struct omap_dss_device *dssdev;
enum fifo_size fifo_size;
int vc_id;
@@ -368,14 +359,9 @@ struct platform_device *dsi_get_dsidev_from_id(int module)
return dsi_pdev_map[module];
}
-static int dsi_get_dsidev_id(struct platform_device *dsidev)
+static inline int dsi_get_dsidev_id(struct platform_device *dsidev)
{
- /* TEMP: Pass 0 as the dsi module index till the time the dsi platform
- * device names aren't changed to the form "omapdss_dsi.0",
- * "omapdss_dsi.1" and so on */
- BUG_ON(dsidev->id != -1);
-
- return 0;
+ return dsidev->id;
}
static inline void dsi_write_reg(struct platform_device *dsidev,
@@ -437,6 +423,21 @@ static inline int wait_for_bit_change(struct platform_device *dsidev,
return value;
}
+u8 dsi_get_pixel_size(enum omap_dss_dsi_pixel_format fmt)
+{
+ switch (fmt) {
+ case OMAP_DSS_DSI_FMT_RGB888:
+ case OMAP_DSS_DSI_FMT_RGB666:
+ return 24;
+ case OMAP_DSS_DSI_FMT_RGB666_PACKED:
+ return 18;
+ case OMAP_DSS_DSI_FMT_RGB565:
+ return 16;
+ default:
+ BUG();
+ }
+}
+
#ifdef DEBUG
static void dsi_perf_mark_setup(struct platform_device *dsidev)
{
@@ -453,6 +454,7 @@ static void dsi_perf_mark_start(struct platform_device *dsidev)
static void dsi_perf_show(struct platform_device *dsidev, const char *name)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+ struct omap_dss_device *dssdev = dsi->update_region.device;
ktime_t t, setup_time, trans_time;
u32 total_bytes;
u32 setup_us, trans_us, total_us;
@@ -476,7 +478,7 @@ static void dsi_perf_show(struct platform_device *dsidev, const char *name)
total_bytes = dsi->update_region.w *
dsi->update_region.h *
- dsi->update_region.device->ctrl.pixel_size / 8;
+ dsi_get_pixel_size(dssdev->panel.dsi_pix_fmt) / 8;
printk(KERN_INFO "DSI(%s): %u us + %u us = %u us (%uHz), "
"%u bytes, %u kbytes/sec\n",
@@ -1287,7 +1289,7 @@ static int dsi_calc_clock_rates(struct omap_dss_device *dssdev,
* with DSS_SYS_CLK source also */
cinfo->highfreq = 0;
} else {
- cinfo->clkin = dispc_pclk_rate(dssdev->manager->id);
+ cinfo->clkin = dispc_mgr_pclk_rate(dssdev->manager->id);
if (cinfo->clkin < 32000000)
cinfo->highfreq = 0;
@@ -2360,6 +2362,24 @@ static int dsi_cio_wait_tx_clk_esc_reset(struct omap_dss_device *dssdev)
return 0;
}
+static unsigned dsi_get_lane_mask(struct omap_dss_device *dssdev)
+{
+ unsigned lanes = 0;
+
+ if (dssdev->phy.dsi.clk_lane != 0)
+ lanes |= 1 << (dssdev->phy.dsi.clk_lane - 1);
+ if (dssdev->phy.dsi.data1_lane != 0)
+ lanes |= 1 << (dssdev->phy.dsi.data1_lane - 1);
+ if (dssdev->phy.dsi.data2_lane != 0)
+ lanes |= 1 << (dssdev->phy.dsi.data2_lane - 1);
+ if (dssdev->phy.dsi.data3_lane != 0)
+ lanes |= 1 << (dssdev->phy.dsi.data3_lane - 1);
+ if (dssdev->phy.dsi.data4_lane != 0)
+ lanes |= 1 << (dssdev->phy.dsi.data4_lane - 1);
+
+ return lanes;
+}
+
static int dsi_cio_init(struct omap_dss_device *dssdev)
{
struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
@@ -2370,8 +2390,9 @@ static int dsi_cio_init(struct omap_dss_device *dssdev)
DSSDBGF();
- if (dsi->dsi_mux_pads)
- dsi->dsi_mux_pads(true);
+ r = dsi->enable_pads(dsidev->id, dsi_get_lane_mask(dssdev));
+ if (r)
+ return r;
dsi_enable_scp_clk(dsidev);
@@ -2452,6 +2473,12 @@ static int dsi_cio_init(struct omap_dss_device *dssdev)
dsi_cio_timings(dsidev);
+ if (dssdev->panel.dsi_mode == OMAP_DSS_DSI_VIDEO_MODE) {
+ /* DDR_CLK_ALWAYS_ON */
+ REG_FLD_MOD(dsidev, DSI_CLK_CTRL,
+ dssdev->panel.dsi_vm_data.ddr_clk_always_on, 13, 13);
+ }
+
dsi->ulps_enabled = false;
DSSDBG("CIO init done\n");
@@ -2467,19 +2494,21 @@ err_cio_pwr:
dsi_cio_disable_lane_override(dsidev);
err_scp_clk_dom:
dsi_disable_scp_clk(dsidev);
- if (dsi->dsi_mux_pads)
- dsi->dsi_mux_pads(false);
+ dsi->disable_pads(dsidev->id, dsi_get_lane_mask(dssdev));
return r;
}
-static void dsi_cio_uninit(struct platform_device *dsidev)
+static void dsi_cio_uninit(struct omap_dss_device *dssdev)
{
+ struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+ /* DDR_CLK_ALWAYS_ON */
+ REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 0, 13, 13);
+
dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_OFF);
dsi_disable_scp_clk(dsidev);
- if (dsi->dsi_mux_pads)
- dsi->dsi_mux_pads(false);
+ dsi->disable_pads(dsidev->id, dsi_get_lane_mask(dssdev));
}
static void dsi_config_tx_fifo(struct platform_device *dsidev,
@@ -2669,10 +2698,10 @@ static int dsi_sync_vc(struct platform_device *dsidev, int channel)
if (!dsi_vc_is_enabled(dsidev, channel))
return 0;
- switch (dsi->vc[channel].mode) {
- case DSI_VC_MODE_VP:
+ switch (dsi->vc[channel].source) {
+ case DSI_VC_SOURCE_VP:
return dsi_sync_vc_vp(dsidev, channel);
- case DSI_VC_MODE_L4:
+ case DSI_VC_SOURCE_L4:
return dsi_sync_vc_l4(dsidev, channel);
default:
BUG();
@@ -2726,43 +2755,12 @@ static void dsi_vc_initial_config(struct platform_device *dsidev, int channel)
dsi_write_reg(dsidev, DSI_VC_CTRL(channel), r);
}
-static int dsi_vc_config_l4(struct platform_device *dsidev, int channel)
+static int dsi_vc_config_source(struct platform_device *dsidev, int channel,
+ enum dsi_vc_source source)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
- if (dsi->vc[channel].mode == DSI_VC_MODE_L4)
- return 0;
-
- DSSDBGF("%d", channel);
-
- dsi_sync_vc(dsidev, channel);
-
- dsi_vc_enable(dsidev, channel, 0);
-
- /* VC_BUSY */
- if (wait_for_bit_change(dsidev, DSI_VC_CTRL(channel), 15, 0) != 0) {
- DSSERR("vc(%d) busy when trying to config for L4\n", channel);
- return -EIO;
- }
-
- REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), 0, 1, 1); /* SOURCE, 0 = L4 */
-
- /* DCS_CMD_ENABLE */
- if (dss_has_feature(FEAT_DSI_DCS_CMD_CONFIG_VC))
- REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), 0, 30, 30);
-
- dsi_vc_enable(dsidev, channel, 1);
-
- dsi->vc[channel].mode = DSI_VC_MODE_L4;
-
- return 0;
-}
-
-static int dsi_vc_config_vp(struct platform_device *dsidev, int channel)
-{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
-
- if (dsi->vc[channel].mode == DSI_VC_MODE_VP)
+ if (dsi->vc[channel].source == source)
return 0;
DSSDBGF("%d", channel);
@@ -2777,21 +2775,22 @@ static int dsi_vc_config_vp(struct platform_device *dsidev, int channel)
return -EIO;
}
- /* SOURCE, 1 = video port */
- REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), 1, 1, 1);
+ /* SOURCE, 0 = L4, 1 = video port */
+ REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), source, 1, 1);
/* DCS_CMD_ENABLE */
- if (dss_has_feature(FEAT_DSI_DCS_CMD_CONFIG_VC))
- REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), 1, 30, 30);
+ if (dss_has_feature(FEAT_DSI_DCS_CMD_CONFIG_VC)) {
+ bool enable = source == DSI_VC_SOURCE_VP;
+ REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), enable, 30, 30);
+ }
dsi_vc_enable(dsidev, channel, 1);
- dsi->vc[channel].mode = DSI_VC_MODE_VP;
+ dsi->vc[channel].source = source;
return 0;
}
-
void omapdss_dsi_vc_enable_hs(struct omap_dss_device *dssdev, int channel,
bool enable)
{
@@ -2810,6 +2809,10 @@ void omapdss_dsi_vc_enable_hs(struct omap_dss_device *dssdev, int channel,
dsi_if_enable(dsidev, 1);
dsi_force_tx_stop_mode_io(dsidev);
+
+ /* start the DDR clock by sending a NULL packet */
+ if (dssdev->panel.dsi_vm_data.ddr_clk_always_on && enable)
+ dsi_vc_send_null(dssdev, channel);
}
EXPORT_SYMBOL(omapdss_dsi_vc_enable_hs);
@@ -2873,16 +2876,16 @@ static u16 dsi_vc_flush_receive_data(struct platform_device *dsidev,
val = dsi_read_reg(dsidev, DSI_VC_SHORT_PACKET_HEADER(channel));
DSSERR("\trawval %#08x\n", val);
dt = FLD_GET(val, 5, 0);
- if (dt == DSI_DT_RX_ACK_WITH_ERR) {
+ if (dt == MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT) {
u16 err = FLD_GET(val, 23, 8);
dsi_show_rx_ack_with_err(err);
- } else if (dt == DSI_DT_RX_SHORT_READ_1) {
+ } else if (dt == MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE) {
DSSERR("\tDCS short response, 1 byte: %#x\n",
FLD_GET(val, 23, 8));
- } else if (dt == DSI_DT_RX_SHORT_READ_2) {
+ } else if (dt == MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE) {
DSSERR("\tDCS short response, 2 byte: %#x\n",
FLD_GET(val, 23, 8));
- } else if (dt == DSI_DT_RX_DCS_LONG_READ) {
+ } else if (dt == MIPI_DSI_RX_DCS_LONG_READ_RESPONSE) {
DSSERR("\tDCS long response, len %d\n",
FLD_GET(val, 23, 8));
dsi_vc_flush_long_data(dsidev, channel);
@@ -3007,7 +3010,7 @@ static int dsi_vc_send_long(struct platform_device *dsidev, int channel,
return -EINVAL;
}
- dsi_vc_config_l4(dsidev, channel);
+ dsi_vc_config_source(dsidev, channel, DSI_VC_SOURCE_L4);
dsi_vc_write_long_header(dsidev, channel, data_type, len, ecc);
@@ -3066,7 +3069,7 @@ static int dsi_vc_send_short(struct platform_device *dsidev, int channel,
channel,
data_type, data & 0xff, (data >> 8) & 0xff);
- dsi_vc_config_l4(dsidev, channel);
+ dsi_vc_config_source(dsidev, channel, DSI_VC_SOURCE_L4);
if (FLD_GET(dsi_read_reg(dsidev, DSI_VC_CTRL(channel)), 16, 16)) {
DSSERR("ERROR FIFO FULL, aborting transfer\n");
@@ -3085,44 +3088,66 @@ static int dsi_vc_send_short(struct platform_device *dsidev, int channel,
int dsi_vc_send_null(struct omap_dss_device *dssdev, int channel)
{
struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
- u8 nullpkg[] = {0, 0, 0, 0};
- return dsi_vc_send_long(dsidev, channel, DSI_DT_NULL_PACKET, nullpkg,
- 4, 0);
+ return dsi_vc_send_long(dsidev, channel, MIPI_DSI_NULL_PACKET, NULL,
+ 0, 0);
}
EXPORT_SYMBOL(dsi_vc_send_null);
-int dsi_vc_dcs_write_nosync(struct omap_dss_device *dssdev, int channel,
- u8 *data, int len)
+static int dsi_vc_write_nosync_common(struct omap_dss_device *dssdev,
+ int channel, u8 *data, int len, enum dss_dsi_content_type type)
{
struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
int r;
- BUG_ON(len == 0);
-
- if (len == 1) {
- r = dsi_vc_send_short(dsidev, channel, DSI_DT_DCS_SHORT_WRITE_0,
- data[0], 0);
+ if (len == 0) {
+ BUG_ON(type == DSS_DSI_CONTENT_DCS);
+ r = dsi_vc_send_short(dsidev, channel,
+ MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM, 0, 0);
+ } else if (len == 1) {
+ r = dsi_vc_send_short(dsidev, channel,
+ type == DSS_DSI_CONTENT_GENERIC ?
+ MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM :
+ MIPI_DSI_DCS_SHORT_WRITE, data[0], 0);
} else if (len == 2) {
- r = dsi_vc_send_short(dsidev, channel, DSI_DT_DCS_SHORT_WRITE_1,
+ r = dsi_vc_send_short(dsidev, channel,
+ type == DSS_DSI_CONTENT_GENERIC ?
+ MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM :
+ MIPI_DSI_DCS_SHORT_WRITE_PARAM,
data[0] | (data[1] << 8), 0);
} else {
- /* 0x39 = DCS Long Write */
- r = dsi_vc_send_long(dsidev, channel, DSI_DT_DCS_LONG_WRITE,
- data, len, 0);
+ r = dsi_vc_send_long(dsidev, channel,
+ type == DSS_DSI_CONTENT_GENERIC ?
+ MIPI_DSI_GENERIC_LONG_WRITE :
+ MIPI_DSI_DCS_LONG_WRITE, data, len, 0);
}
return r;
}
+
+int dsi_vc_dcs_write_nosync(struct omap_dss_device *dssdev, int channel,
+ u8 *data, int len)
+{
+ return dsi_vc_write_nosync_common(dssdev, channel, data, len,
+ DSS_DSI_CONTENT_DCS);
+}
EXPORT_SYMBOL(dsi_vc_dcs_write_nosync);
-int dsi_vc_dcs_write(struct omap_dss_device *dssdev, int channel, u8 *data,
- int len)
+int dsi_vc_generic_write_nosync(struct omap_dss_device *dssdev, int channel,
+ u8 *data, int len)
+{
+ return dsi_vc_write_nosync_common(dssdev, channel, data, len,
+ DSS_DSI_CONTENT_GENERIC);
+}
+EXPORT_SYMBOL(dsi_vc_generic_write_nosync);
+
+static int dsi_vc_write_common(struct omap_dss_device *dssdev, int channel,
+ u8 *data, int len, enum dss_dsi_content_type type)
{
struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
int r;
- r = dsi_vc_dcs_write_nosync(dssdev, channel, data, len);
+ r = dsi_vc_write_nosync_common(dssdev, channel, data, len, type);
if (r)
goto err;
@@ -3140,18 +3165,39 @@ int dsi_vc_dcs_write(struct omap_dss_device *dssdev, int channel, u8 *data,
return 0;
err:
- DSSERR("dsi_vc_dcs_write(ch %d, cmd 0x%02x, len %d) failed\n",
+ DSSERR("dsi_vc_write_common(ch %d, cmd 0x%02x, len %d) failed\n",
channel, data[0], len);
return r;
}
+
+int dsi_vc_dcs_write(struct omap_dss_device *dssdev, int channel, u8 *data,
+ int len)
+{
+ return dsi_vc_write_common(dssdev, channel, data, len,
+ DSS_DSI_CONTENT_DCS);
+}
EXPORT_SYMBOL(dsi_vc_dcs_write);
+int dsi_vc_generic_write(struct omap_dss_device *dssdev, int channel, u8 *data,
+ int len)
+{
+ return dsi_vc_write_common(dssdev, channel, data, len,
+ DSS_DSI_CONTENT_GENERIC);
+}
+EXPORT_SYMBOL(dsi_vc_generic_write);
+
int dsi_vc_dcs_write_0(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd)
{
return dsi_vc_dcs_write(dssdev, channel, &dcs_cmd, 1);
}
EXPORT_SYMBOL(dsi_vc_dcs_write_0);
+int dsi_vc_generic_write_0(struct omap_dss_device *dssdev, int channel)
+{
+ return dsi_vc_generic_write(dssdev, channel, NULL, 0);
+}
+EXPORT_SYMBOL(dsi_vc_generic_write_0);
+
int dsi_vc_dcs_write_1(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd,
u8 param)
{
@@ -3162,25 +3208,87 @@ int dsi_vc_dcs_write_1(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd,
}
EXPORT_SYMBOL(dsi_vc_dcs_write_1);
-int dsi_vc_dcs_read(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd,
- u8 *buf, int buflen)
+int dsi_vc_generic_write_1(struct omap_dss_device *dssdev, int channel,
+ u8 param)
+{
+ return dsi_vc_generic_write(dssdev, channel, &param, 1);
+}
+EXPORT_SYMBOL(dsi_vc_generic_write_1);
+
+int dsi_vc_generic_write_2(struct omap_dss_device *dssdev, int channel,
+ u8 param1, u8 param2)
+{
+ u8 buf[2];
+ buf[0] = param1;
+ buf[1] = param2;
+ return dsi_vc_generic_write(dssdev, channel, buf, 2);
+}
+EXPORT_SYMBOL(dsi_vc_generic_write_2);
+
+static int dsi_vc_dcs_send_read_request(struct omap_dss_device *dssdev,
+ int channel, u8 dcs_cmd)
{
struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
- u32 val;
- u8 dt;
int r;
if (dsi->debug_read)
- DSSDBG("dsi_vc_dcs_read(ch%d, dcs_cmd %x)\n", channel, dcs_cmd);
+ DSSDBG("dsi_vc_dcs_send_read_request(ch%d, dcs_cmd %x)\n",
+ channel, dcs_cmd);
- r = dsi_vc_send_short(dsidev, channel, DSI_DT_DCS_READ, dcs_cmd, 0);
- if (r)
- goto err;
+ r = dsi_vc_send_short(dsidev, channel, MIPI_DSI_DCS_READ, dcs_cmd, 0);
+ if (r) {
+ DSSERR("dsi_vc_dcs_send_read_request(ch %d, cmd 0x%02x)"
+ " failed\n", channel, dcs_cmd);
+ return r;
+ }
- r = dsi_vc_send_bta_sync(dssdev, channel);
- if (r)
- goto err;
+ return 0;
+}
+
+static int dsi_vc_generic_send_read_request(struct omap_dss_device *dssdev,
+ int channel, u8 *reqdata, int reqlen)
+{
+ struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
+ struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+ u16 data;
+ u8 data_type;
+ int r;
+
+ if (dsi->debug_read)
+ DSSDBG("dsi_vc_generic_send_read_request(ch %d, reqlen %d)\n",
+ channel, reqlen);
+
+ if (reqlen == 0) {
+ data_type = MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM;
+ data = 0;
+ } else if (reqlen == 1) {
+ data_type = MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM;
+ data = reqdata[0];
+ } else if (reqlen == 2) {
+ data_type = MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM;
+ data = reqdata[0] | (reqdata[1] << 8);
+ } else {
+ BUG();
+ }
+
+ r = dsi_vc_send_short(dsidev, channel, data_type, data, 0);
+ if (r) {
+ DSSERR("dsi_vc_generic_send_read_request(ch %d, reqlen %d)"
+ " failed\n", channel, reqlen);
+ return r;
+ }
+
+ return 0;
+}
+
+static int dsi_vc_read_rx_fifo(struct platform_device *dsidev, int channel,
+ u8 *buf, int buflen, enum dss_dsi_content_type type)
+{
+ struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+ u32 val;
+ u8 dt;
+ int r;
/* RX_FIFO_NOT_EMPTY */
if (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20) == 0) {
@@ -3193,16 +3301,20 @@ int dsi_vc_dcs_read(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd,
if (dsi->debug_read)
DSSDBG("\theader: %08x\n", val);
dt = FLD_GET(val, 5, 0);
- if (dt == DSI_DT_RX_ACK_WITH_ERR) {
+ if (dt == MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT) {
u16 err = FLD_GET(val, 23, 8);
dsi_show_rx_ack_with_err(err);
r = -EIO;
goto err;
- } else if (dt == DSI_DT_RX_SHORT_READ_1) {
+ } else if (dt == (type == DSS_DSI_CONTENT_GENERIC ?
+ MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE :
+ MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE)) {
u8 data = FLD_GET(val, 15, 8);
if (dsi->debug_read)
- DSSDBG("\tDCS short response, 1 byte: %02x\n", data);
+ DSSDBG("\t%s short response, 1 byte: %02x\n",
+ type == DSS_DSI_CONTENT_GENERIC ? "GENERIC" :
+ "DCS", data);
if (buflen < 1) {
r = -EIO;
@@ -3212,10 +3324,14 @@ int dsi_vc_dcs_read(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd,
buf[0] = data;
return 1;
- } else if (dt == DSI_DT_RX_SHORT_READ_2) {
+ } else if (dt == (type == DSS_DSI_CONTENT_GENERIC ?
+ MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE :
+ MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE)) {
u16 data = FLD_GET(val, 23, 8);
if (dsi->debug_read)
- DSSDBG("\tDCS short response, 2 byte: %04x\n", data);
+ DSSDBG("\t%s short response, 2 byte: %04x\n",
+ type == DSS_DSI_CONTENT_GENERIC ? "GENERIC" :
+ "DCS", data);
if (buflen < 2) {
r = -EIO;
@@ -3226,11 +3342,15 @@ int dsi_vc_dcs_read(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd,
buf[1] = (data >> 8) & 0xff;
return 2;
- } else if (dt == DSI_DT_RX_DCS_LONG_READ) {
+ } else if (dt == (type == DSS_DSI_CONTENT_GENERIC ?
+ MIPI_DSI_RX_GENERIC_LONG_READ_RESPONSE :
+ MIPI_DSI_RX_DCS_LONG_READ_RESPONSE)) {
int w;
int len = FLD_GET(val, 23, 8);
if (dsi->debug_read)
- DSSDBG("\tDCS long response, len %d\n", len);
+ DSSDBG("\t%s long response, len %d\n",
+ type == DSS_DSI_CONTENT_GENERIC ? "GENERIC" :
+ "DCS", len);
if (len > buflen) {
r = -EIO;
@@ -3266,58 +3386,126 @@ int dsi_vc_dcs_read(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd,
BUG();
err:
- DSSERR("dsi_vc_dcs_read(ch %d, cmd 0x%02x) failed\n",
- channel, dcs_cmd);
+ DSSERR("dsi_vc_read_rx_fifo(ch %d type %s) failed\n", channel,
+ type == DSS_DSI_CONTENT_GENERIC ? "GENERIC" : "DCS");
+
return r;
+}
+int dsi_vc_dcs_read(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd,
+ u8 *buf, int buflen)
+{
+ struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
+ int r;
+
+ r = dsi_vc_dcs_send_read_request(dssdev, channel, dcs_cmd);
+ if (r)
+ goto err;
+
+ r = dsi_vc_send_bta_sync(dssdev, channel);
+ if (r)
+ goto err;
+
+ r = dsi_vc_read_rx_fifo(dsidev, channel, buf, buflen,
+ DSS_DSI_CONTENT_DCS);
+ if (r < 0)
+ goto err;
+
+ if (r != buflen) {
+ r = -EIO;
+ goto err;
+ }
+
+ return 0;
+err:
+ DSSERR("dsi_vc_dcs_read(ch %d, cmd 0x%02x) failed\n", channel, dcs_cmd);
+ return r;
}
EXPORT_SYMBOL(dsi_vc_dcs_read);
-int dsi_vc_dcs_read_1(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd,
- u8 *data)
+static int dsi_vc_generic_read(struct omap_dss_device *dssdev, int channel,
+ u8 *reqdata, int reqlen, u8 *buf, int buflen)
{
+ struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
int r;
- r = dsi_vc_dcs_read(dssdev, channel, dcs_cmd, data, 1);
+ r = dsi_vc_generic_send_read_request(dssdev, channel, reqdata, reqlen);
+ if (r)
+ return r;
+
+ r = dsi_vc_send_bta_sync(dssdev, channel);
+ if (r)
+ return r;
+ r = dsi_vc_read_rx_fifo(dsidev, channel, buf, buflen,
+ DSS_DSI_CONTENT_GENERIC);
if (r < 0)
return r;
- if (r != 1)
- return -EIO;
+ if (r != buflen) {
+ r = -EIO;
+ return r;
+ }
return 0;
}
-EXPORT_SYMBOL(dsi_vc_dcs_read_1);
-int dsi_vc_dcs_read_2(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd,
- u8 *data1, u8 *data2)
+int dsi_vc_generic_read_0(struct omap_dss_device *dssdev, int channel, u8 *buf,
+ int buflen)
{
- u8 buf[2];
int r;
- r = dsi_vc_dcs_read(dssdev, channel, dcs_cmd, buf, 2);
+ r = dsi_vc_generic_read(dssdev, channel, NULL, 0, buf, buflen);
+ if (r) {
+ DSSERR("dsi_vc_generic_read_0(ch %d) failed\n", channel);
+ return r;
+ }
- if (r < 0)
+ return 0;
+}
+EXPORT_SYMBOL(dsi_vc_generic_read_0);
+
+int dsi_vc_generic_read_1(struct omap_dss_device *dssdev, int channel, u8 param,
+ u8 *buf, int buflen)
+{
+ int r;
+
+ r = dsi_vc_generic_read(dssdev, channel, &param, 1, buf, buflen);
+ if (r) {
+ DSSERR("dsi_vc_generic_read_1(ch %d) failed\n", channel);
return r;
+ }
- if (r != 2)
- return -EIO;
+ return 0;
+}
+EXPORT_SYMBOL(dsi_vc_generic_read_1);
- *data1 = buf[0];
- *data2 = buf[1];
+int dsi_vc_generic_read_2(struct omap_dss_device *dssdev, int channel,
+ u8 param1, u8 param2, u8 *buf, int buflen)
+{
+ int r;
+ u8 reqdata[2];
+
+ reqdata[0] = param1;
+ reqdata[1] = param2;
+
+ r = dsi_vc_generic_read(dssdev, channel, reqdata, 2, buf, buflen);
+ if (r) {
+ DSSERR("dsi_vc_generic_read_2(ch %d) failed\n", channel);
+ return r;
+ }
return 0;
}
-EXPORT_SYMBOL(dsi_vc_dcs_read_2);
+EXPORT_SYMBOL(dsi_vc_generic_read_2);
int dsi_vc_set_max_rx_packet_size(struct omap_dss_device *dssdev, int channel,
u16 len)
{
struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
- return dsi_vc_send_short(dsidev, channel, DSI_DT_SET_MAX_RET_PKG_SIZE,
- len, 0);
+ return dsi_vc_send_short(dsidev, channel,
+ MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE, len, 0);
}
EXPORT_SYMBOL(dsi_vc_set_max_rx_packet_size);
@@ -3508,6 +3696,75 @@ static void dsi_set_hs_tx_timeout(struct platform_device *dsidev,
ticks, x4 ? " x4" : "", x16 ? " x16" : "",
(total_ticks * 1000) / (fck / 1000 / 1000));
}
+
+static void dsi_config_vp_num_line_buffers(struct omap_dss_device *dssdev)
+{
+ struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
+ int num_line_buffers;
+
+ if (dssdev->panel.dsi_mode == OMAP_DSS_DSI_VIDEO_MODE) {
+ int bpp = dsi_get_pixel_size(dssdev->panel.dsi_pix_fmt);
+ unsigned line_buf_size = dsi_get_line_buf_size(dsidev);
+ struct omap_video_timings *timings = &dssdev->panel.timings;
+ /*
+ * Don't use line buffers if width is greater than the video
+ * port's line buffer size
+ */
+ if (line_buf_size <= timings->x_res * bpp / 8)
+ num_line_buffers = 0;
+ else
+ num_line_buffers = 2;
+ } else {
+ /* Use maximum number of line buffers in command mode */
+ num_line_buffers = 2;
+ }
+
+ /* LINE_BUFFER */
+ REG_FLD_MOD(dsidev, DSI_CTRL, num_line_buffers, 13, 12);
+}
+
+static void dsi_config_vp_sync_events(struct omap_dss_device *dssdev)
+{
+ struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
+ int de_pol = dssdev->panel.dsi_vm_data.vp_de_pol;
+ int hsync_pol = dssdev->panel.dsi_vm_data.vp_hsync_pol;
+ int vsync_pol = dssdev->panel.dsi_vm_data.vp_vsync_pol;
+ bool vsync_end = dssdev->panel.dsi_vm_data.vp_vsync_end;
+ bool hsync_end = dssdev->panel.dsi_vm_data.vp_hsync_end;
+ u32 r;
+
+ r = dsi_read_reg(dsidev, DSI_CTRL);
+ r = FLD_MOD(r, de_pol, 9, 9); /* VP_DE_POL */
+ r = FLD_MOD(r, hsync_pol, 10, 10); /* VP_HSYNC_POL */
+ r = FLD_MOD(r, vsync_pol, 11, 11); /* VP_VSYNC_POL */
+ r = FLD_MOD(r, 1, 15, 15); /* VP_VSYNC_START */
+ r = FLD_MOD(r, vsync_end, 16, 16); /* VP_VSYNC_END */
+ r = FLD_MOD(r, 1, 17, 17); /* VP_HSYNC_START */
+ r = FLD_MOD(r, hsync_end, 18, 18); /* VP_HSYNC_END */
+ dsi_write_reg(dsidev, DSI_CTRL, r);
+}
+
+static void dsi_config_blanking_modes(struct omap_dss_device *dssdev)
+{
+ struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
+ int blanking_mode = dssdev->panel.dsi_vm_data.blanking_mode;
+ int hfp_blanking_mode = dssdev->panel.dsi_vm_data.hfp_blanking_mode;
+ int hbp_blanking_mode = dssdev->panel.dsi_vm_data.hbp_blanking_mode;
+ int hsa_blanking_mode = dssdev->panel.dsi_vm_data.hsa_blanking_mode;
+ u32 r;
+
+ /*
+ * 0 = TX FIFO packets sent or LPS in corresponding blanking periods
+ * 1 = Long blanking packets are sent in corresponding blanking periods
+ */
+ r = dsi_read_reg(dsidev, DSI_CTRL);
+ r = FLD_MOD(r, blanking_mode, 20, 20); /* BLANKING_MODE */
+ r = FLD_MOD(r, hfp_blanking_mode, 21, 21); /* HFP_BLANKING */
+ r = FLD_MOD(r, hbp_blanking_mode, 22, 22); /* HBP_BLANKING */
+ r = FLD_MOD(r, hsa_blanking_mode, 23, 23); /* HSA_BLANKING */
+ dsi_write_reg(dsidev, DSI_CTRL, r);
+}
+
static int dsi_proto_config(struct omap_dss_device *dssdev)
{
struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
@@ -3530,7 +3787,7 @@ static int dsi_proto_config(struct omap_dss_device *dssdev)
dsi_set_lp_rx_timeout(dsidev, 0x1fff, true, true);
dsi_set_hs_tx_timeout(dsidev, 0x1fff, true, true);
- switch (dssdev->ctrl.pixel_size) {
+ switch (dsi_get_pixel_size(dssdev->panel.dsi_pix_fmt)) {
case 16:
buswidth = 0;
break;
@@ -3551,7 +3808,6 @@ static int dsi_proto_config(struct omap_dss_device *dssdev)
r = FLD_MOD(r, 1, 4, 4); /* VP_CLK_RATIO, always 1, see errata*/
r = FLD_MOD(r, buswidth, 7, 6); /* VP_DATA_BUS_WIDTH */
r = FLD_MOD(r, 0, 8, 8); /* VP_CLK_POL */
- r = FLD_MOD(r, 2, 13, 12); /* LINE_BUFFER, 2 lines */
r = FLD_MOD(r, 1, 14, 14); /* TRIGGER_RESET_MODE */
r = FLD_MOD(r, 1, 19, 19); /* EOT_ENABLE */
if (!dss_has_feature(FEAT_DSI_DCS_CMD_CONFIG_VC)) {
@@ -3562,6 +3818,13 @@ static int dsi_proto_config(struct omap_dss_device *dssdev)
dsi_write_reg(dsidev, DSI_CTRL, r);
+ dsi_config_vp_num_line_buffers(dssdev);
+
+ if (dssdev->panel.dsi_mode == OMAP_DSS_DSI_VIDEO_MODE) {
+ dsi_config_vp_sync_events(dssdev);
+ dsi_config_blanking_modes(dssdev);
+ }
+
dsi_vc_initial_config(dsidev, 0);
dsi_vc_initial_config(dsidev, 1);
dsi_vc_initial_config(dsidev, 2);
@@ -3580,6 +3843,7 @@ static void dsi_proto_timings(struct omap_dss_device *dssdev)
unsigned ddr_clk_pre, ddr_clk_post;
unsigned enter_hs_mode_lat, exit_hs_mode_lat;
unsigned ths_eot;
+ int ndl = dsi_get_num_data_lanes_dssdev(dssdev);
u32 r;
r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG0);
@@ -3602,7 +3866,7 @@ static void dsi_proto_timings(struct omap_dss_device *dssdev)
/* min 60ns + 52*UI */
tclk_post = ns2ddr(dsidev, 60) + 26;
- ths_eot = DIV_ROUND_UP(4, dsi_get_num_data_lanes_dssdev(dssdev));
+ ths_eot = DIV_ROUND_UP(4, ndl);
ddr_clk_pre = DIV_ROUND_UP(tclk_pre + tlpx + tclk_zero + tclk_prepare,
4);
@@ -3632,162 +3896,114 @@ static void dsi_proto_timings(struct omap_dss_device *dssdev)
DSSDBG("enter_hs_mode_lat %u, exit_hs_mode_lat %u\n",
enter_hs_mode_lat, exit_hs_mode_lat);
-}
-
-
-#define DSI_DECL_VARS \
- int __dsi_cb = 0; u32 __dsi_cv = 0;
-#define DSI_FLUSH(dsidev, ch) \
- if (__dsi_cb > 0) { \
- /*DSSDBG("sending long packet %#010x\n", __dsi_cv);*/ \
- dsi_write_reg(dsidev, DSI_VC_LONG_PACKET_PAYLOAD(ch), __dsi_cv); \
- __dsi_cb = __dsi_cv = 0; \
+ if (dssdev->panel.dsi_mode == OMAP_DSS_DSI_VIDEO_MODE) {
+ /* TODO: Implement a video mode check_timings function */
+ int hsa = dssdev->panel.dsi_vm_data.hsa;
+ int hfp = dssdev->panel.dsi_vm_data.hfp;
+ int hbp = dssdev->panel.dsi_vm_data.hbp;
+ int vsa = dssdev->panel.dsi_vm_data.vsa;
+ int vfp = dssdev->panel.dsi_vm_data.vfp;
+ int vbp = dssdev->panel.dsi_vm_data.vbp;
+ int window_sync = dssdev->panel.dsi_vm_data.window_sync;
+ bool hsync_end = dssdev->panel.dsi_vm_data.vp_hsync_end;
+ struct omap_video_timings *timings = &dssdev->panel.timings;
+ int bpp = dsi_get_pixel_size(dssdev->panel.dsi_pix_fmt);
+ int tl, t_he, width_bytes;
+
+ t_he = hsync_end ?
+ ((hsa == 0 && ndl == 3) ? 1 : DIV_ROUND_UP(4, ndl)) : 0;
+
+ width_bytes = DIV_ROUND_UP(timings->x_res * bpp, 8);
+
+ /* TL = t_HS + HSA + t_HE + HFP + ceil((WC + 6) / NDL) + HBP */
+ tl = DIV_ROUND_UP(4, ndl) + (hsync_end ? hsa : 0) + t_he + hfp +
+ DIV_ROUND_UP(width_bytes + 6, ndl) + hbp;
+
+ DSSDBG("HBP: %d, HFP: %d, HSA: %d, TL: %d TXBYTECLKHS\n", hbp,
+ hfp, hsync_end ? hsa : 0, tl);
+ DSSDBG("VBP: %d, VFP: %d, VSA: %d, VACT: %d lines\n", vbp, vfp,
+ vsa, timings->y_res);
+
+ r = dsi_read_reg(dsidev, DSI_VM_TIMING1);
+ r = FLD_MOD(r, hbp, 11, 0); /* HBP */
+ r = FLD_MOD(r, hfp, 23, 12); /* HFP */
+ r = FLD_MOD(r, hsync_end ? hsa : 0, 31, 24); /* HSA */
+ dsi_write_reg(dsidev, DSI_VM_TIMING1, r);
+
+ r = dsi_read_reg(dsidev, DSI_VM_TIMING2);
+ r = FLD_MOD(r, vbp, 7, 0); /* VBP */
+ r = FLD_MOD(r, vfp, 15, 8); /* VFP */
+ r = FLD_MOD(r, vsa, 23, 16); /* VSA */
+ r = FLD_MOD(r, window_sync, 27, 24); /* WINDOW_SYNC */
+ dsi_write_reg(dsidev, DSI_VM_TIMING2, r);
+
+ r = dsi_read_reg(dsidev, DSI_VM_TIMING3);
+ r = FLD_MOD(r, timings->y_res, 14, 0); /* VACT */
+ r = FLD_MOD(r, tl, 31, 16); /* TL */
+ dsi_write_reg(dsidev, DSI_VM_TIMING3, r);
}
+}
-#define DSI_PUSH(dsidev, ch, data) \
- do { \
- __dsi_cv |= (data) << (__dsi_cb * 8); \
- /*DSSDBG("cv = %#010x, cb = %d\n", __dsi_cv, __dsi_cb);*/ \
- if (++__dsi_cb > 3) \
- DSI_FLUSH(dsidev, ch); \
- } while (0)
-
-static int dsi_update_screen_l4(struct omap_dss_device *dssdev,
- int x, int y, int w, int h)
+int dsi_video_mode_enable(struct omap_dss_device *dssdev, int channel)
{
- /* Note: supports only 24bit colors in 32bit container */
struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
- int first = 1;
- int fifo_stalls = 0;
- int max_dsi_packet_size;
- int max_data_per_packet;
- int max_pixels_per_packet;
- int pixels_left;
- int bytespp = dssdev->ctrl.pixel_size / 8;
- int scr_width;
- u32 __iomem *data;
- int start_offset;
- int horiz_inc;
- int current_x;
- struct omap_overlay *ovl;
-
- debug_irq = 0;
-
- DSSDBG("dsi_update_screen_l4 (%d,%d %dx%d)\n",
- x, y, w, h);
+ int bpp = dsi_get_pixel_size(dssdev->panel.dsi_pix_fmt);
+ u8 data_type;
+ u16 word_count;
- ovl = dssdev->manager->overlays[0];
-
- if (ovl->info.color_mode != OMAP_DSS_COLOR_RGB24U)
- return -EINVAL;
-
- if (dssdev->ctrl.pixel_size != 24)
- return -EINVAL;
-
- scr_width = ovl->info.screen_width;
- data = ovl->info.vaddr;
-
- start_offset = scr_width * y + x;
- horiz_inc = scr_width - w;
- current_x = x;
-
- /* We need header(4) + DCSCMD(1) + pixels(numpix*bytespp) bytes
- * in fifo */
-
- /* When using CPU, max long packet size is TX buffer size */
- max_dsi_packet_size = dsi->vc[0].fifo_size * 32 * 4;
-
- /* we seem to get better perf if we divide the tx fifo to half,
- and while the other half is being sent, we fill the other half
- max_dsi_packet_size /= 2; */
-
- max_data_per_packet = max_dsi_packet_size - 4 - 1;
-
- max_pixels_per_packet = max_data_per_packet / bytespp;
-
- DSSDBG("max_pixels_per_packet %d\n", max_pixels_per_packet);
-
- pixels_left = w * h;
+ switch (dssdev->panel.dsi_pix_fmt) {
+ case OMAP_DSS_DSI_FMT_RGB888:
+ data_type = MIPI_DSI_PACKED_PIXEL_STREAM_24;
+ break;
+ case OMAP_DSS_DSI_FMT_RGB666:
+ data_type = MIPI_DSI_PIXEL_STREAM_3BYTE_18;
+ break;
+ case OMAP_DSS_DSI_FMT_RGB666_PACKED:
+ data_type = MIPI_DSI_PACKED_PIXEL_STREAM_18;
+ break;
+ case OMAP_DSS_DSI_FMT_RGB565:
+ data_type = MIPI_DSI_PACKED_PIXEL_STREAM_16;
+ break;
+ default:
+ BUG();
+ };
- DSSDBG("total pixels %d\n", pixels_left);
+ dsi_if_enable(dsidev, false);
+ dsi_vc_enable(dsidev, channel, false);
- data += start_offset;
+ /* MODE, 1 = video mode */
+ REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), 1, 4, 4);
- while (pixels_left > 0) {
- /* 0x2c = write_memory_start */
- /* 0x3c = write_memory_continue */
- u8 dcs_cmd = first ? 0x2c : 0x3c;
- int pixels;
- DSI_DECL_VARS;
- first = 0;
+ word_count = DIV_ROUND_UP(dssdev->panel.timings.x_res * bpp, 8);
-#if 1
- /* using fifo not empty */
- /* TX_FIFO_NOT_EMPTY */
- while (FLD_GET(dsi_read_reg(dsidev, DSI_VC_CTRL(0)), 5, 5)) {
- fifo_stalls++;
- if (fifo_stalls > 0xfffff) {
- DSSERR("fifo stalls overflow, pixels left %d\n",
- pixels_left);
- dsi_if_enable(dsidev, 0);
- return -EIO;
- }
- udelay(1);
- }
-#elif 1
- /* using fifo emptiness */
- while ((REG_GET(dsidev, DSI_TX_FIFO_VC_EMPTINESS, 7, 0)+1)*4 <
- max_dsi_packet_size) {
- fifo_stalls++;
- if (fifo_stalls > 0xfffff) {
- DSSERR("fifo stalls overflow, pixels left %d\n",
- pixels_left);
- dsi_if_enable(dsidev, 0);
- return -EIO;
- }
- }
-#else
- while ((REG_GET(dsidev, DSI_TX_FIFO_VC_EMPTINESS,
- 7, 0) + 1) * 4 == 0) {
- fifo_stalls++;
- if (fifo_stalls > 0xfffff) {
- DSSERR("fifo stalls overflow, pixels left %d\n",
- pixels_left);
- dsi_if_enable(dsidev, 0);
- return -EIO;
- }
- }
-#endif
- pixels = min(max_pixels_per_packet, pixels_left);
+ dsi_vc_write_long_header(dsidev, channel, data_type, word_count, 0);
- pixels_left -= pixels;
+ dsi_vc_enable(dsidev, channel, true);
+ dsi_if_enable(dsidev, true);
- dsi_vc_write_long_header(dsidev, 0, DSI_DT_DCS_LONG_WRITE,
- 1 + pixels * bytespp, 0);
+ dssdev->manager->enable(dssdev->manager);
- DSI_PUSH(dsidev, 0, dcs_cmd);
+ return 0;
+}
+EXPORT_SYMBOL(dsi_video_mode_enable);
- while (pixels-- > 0) {
- u32 pix = __raw_readl(data++);
+void dsi_video_mode_disable(struct omap_dss_device *dssdev, int channel)
+{
+ struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
- DSI_PUSH(dsidev, 0, (pix >> 16) & 0xff);
- DSI_PUSH(dsidev, 0, (pix >> 8) & 0xff);
- DSI_PUSH(dsidev, 0, (pix >> 0) & 0xff);
+ dsi_if_enable(dsidev, false);
+ dsi_vc_enable(dsidev, channel, false);
- current_x++;
- if (current_x == x+w) {
- current_x = x;
- data += horiz_inc;
- }
- }
+ /* MODE, 0 = command mode */
+ REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), 0, 4, 4);
- DSI_FLUSH(dsidev, 0);
- }
+ dsi_vc_enable(dsidev, channel, true);
+ dsi_if_enable(dsidev, true);
- return 0;
+ dssdev->manager->disable(dssdev->manager);
}
+EXPORT_SYMBOL(dsi_video_mode_disable);
static void dsi_update_screen_dispc(struct omap_dss_device *dssdev,
u16 x, u16 y, u16 w, u16 h)
@@ -3808,9 +4024,9 @@ static void dsi_update_screen_dispc(struct omap_dss_device *dssdev,
DSSDBG("dsi_update_screen_dispc(%d,%d %dx%d)\n",
x, y, w, h);
- dsi_vc_config_vp(dsidev, channel);
+ dsi_vc_config_source(dsidev, channel, DSI_VC_SOURCE_VP);
- bytespp = dssdev->ctrl.pixel_size / 8;
+ bytespp = dsi_get_pixel_size(dssdev->panel.dsi_pix_fmt) / 8;
bytespl = w * bytespp;
bytespf = bytespl * h;
@@ -3831,7 +4047,7 @@ static void dsi_update_screen_dispc(struct omap_dss_device *dssdev,
l = FLD_VAL(total_len, 23, 0); /* TE_SIZE */
dsi_write_reg(dsidev, DSI_VC_TE(channel), l);
- dsi_vc_write_long_header(dsidev, channel, DSI_DT_DCS_LONG_WRITE,
+ dsi_vc_write_long_header(dsidev, channel, MIPI_DSI_DCS_LONG_WRITE,
packet_len, 0);
if (dsi->te_enabled)
@@ -3956,11 +4172,9 @@ int omap_dsi_prepare_update(struct omap_dss_device *dssdev,
dsi_perf_mark_setup(dsidev);
- if (dssdev->manager->caps & OMAP_DSS_OVL_MGR_CAP_DISPC) {
- dss_setup_partial_planes(dssdev, x, y, w, h,
- enlarge_update_area);
- dispc_set_lcd_size(dssdev->manager->id, *w, *h);
- }
+ dss_setup_partial_planes(dssdev, x, y, w, h,
+ enlarge_update_area);
+ dispc_mgr_set_lcd_size(dssdev->manager->id, *w, *h);
return 0;
}
@@ -3982,27 +4196,16 @@ int omap_dsi_update(struct omap_dss_device *dssdev,
* see rather obscure HW error happening, as DSS halts. */
BUG_ON(x % 2 == 1);
- if (dssdev->manager->caps & OMAP_DSS_OVL_MGR_CAP_DISPC) {
- dsi->framedone_callback = callback;
- dsi->framedone_data = data;
-
- dsi->update_region.x = x;
- dsi->update_region.y = y;
- dsi->update_region.w = w;
- dsi->update_region.h = h;
- dsi->update_region.device = dssdev;
-
- dsi_update_screen_dispc(dssdev, x, y, w, h);
- } else {
- int r;
+ dsi->framedone_callback = callback;
+ dsi->framedone_data = data;
- r = dsi_update_screen_l4(dssdev, x, y, w, h);
- if (r)
- return r;
+ dsi->update_region.x = x;
+ dsi->update_region.y = y;
+ dsi->update_region.w = w;
+ dsi->update_region.h = h;
+ dsi->update_region.device = dssdev;
- dsi_perf_show(dsidev, "L4");
- callback(0, data);
- }
+ dsi_update_screen_dispc(dssdev, x, y, w, h);
return 0;
}
@@ -4013,28 +4216,9 @@ EXPORT_SYMBOL(omap_dsi_update);
static int dsi_display_init_dispc(struct omap_dss_device *dssdev)
{
int r;
- u32 irq;
-
- irq = dssdev->manager->id == OMAP_DSS_CHANNEL_LCD ?
- DISPC_IRQ_FRAMEDONE : DISPC_IRQ_FRAMEDONE2;
- r = omap_dispc_register_isr(dsi_framedone_irq_callback, (void *) dssdev,
- irq);
- if (r) {
- DSSERR("can't get FRAMEDONE irq\n");
- return r;
- }
-
- dispc_set_lcd_display_type(dssdev->manager->id,
- OMAP_DSS_LCD_DISPLAY_TFT);
-
- dispc_set_parallel_interface_mode(dssdev->manager->id,
- OMAP_DSS_PARALLELMODE_DSI);
- dispc_enable_fifohandcheck(dssdev->manager->id, 1);
-
- dispc_set_tft_data_lines(dssdev->manager->id, dssdev->ctrl.pixel_size);
-
- {
+ if (dssdev->panel.dsi_mode == OMAP_DSS_DSI_CMD_MODE) {
+ u32 irq;
struct omap_video_timings timings = {
.hsw = 1,
.hfp = 1,
@@ -4044,21 +4228,46 @@ static int dsi_display_init_dispc(struct omap_dss_device *dssdev)
.vbp = 0,
};
- dispc_set_lcd_timings(dssdev->manager->id, &timings);
+ irq = dssdev->manager->id == OMAP_DSS_CHANNEL_LCD ?
+ DISPC_IRQ_FRAMEDONE : DISPC_IRQ_FRAMEDONE2;
+
+ r = omap_dispc_register_isr(dsi_framedone_irq_callback,
+ (void *) dssdev, irq);
+ if (r) {
+ DSSERR("can't get FRAMEDONE irq\n");
+ return r;
+ }
+
+ dispc_mgr_enable_stallmode(dssdev->manager->id, true);
+ dispc_mgr_enable_fifohandcheck(dssdev->manager->id, 1);
+
+ dispc_mgr_set_lcd_timings(dssdev->manager->id, &timings);
+ } else {
+ dispc_mgr_enable_stallmode(dssdev->manager->id, false);
+ dispc_mgr_enable_fifohandcheck(dssdev->manager->id, 0);
+
+ dispc_mgr_set_lcd_timings(dssdev->manager->id,
+ &dssdev->panel.timings);
}
+ dispc_mgr_set_lcd_display_type(dssdev->manager->id,
+ OMAP_DSS_LCD_DISPLAY_TFT);
+ dispc_mgr_set_tft_data_lines(dssdev->manager->id,
+ dsi_get_pixel_size(dssdev->panel.dsi_pix_fmt));
return 0;
}
static void dsi_display_uninit_dispc(struct omap_dss_device *dssdev)
{
- u32 irq;
+ if (dssdev->panel.dsi_mode == OMAP_DSS_DSI_CMD_MODE) {
+ u32 irq;
- irq = dssdev->manager->id == OMAP_DSS_CHANNEL_LCD ?
- DISPC_IRQ_FRAMEDONE : DISPC_IRQ_FRAMEDONE2;
+ irq = dssdev->manager->id == OMAP_DSS_CHANNEL_LCD ?
+ DISPC_IRQ_FRAMEDONE : DISPC_IRQ_FRAMEDONE2;
- omap_dispc_unregister_isr(dsi_framedone_irq_callback, (void *) dssdev,
- irq);
+ omap_dispc_unregister_isr(dsi_framedone_irq_callback,
+ (void *) dssdev, irq);
+ }
}
static int dsi_configure_dsi_clocks(struct omap_dss_device *dssdev)
@@ -4106,7 +4315,7 @@ static int dsi_configure_dispc_clocks(struct omap_dss_device *dssdev)
return r;
}
- r = dispc_set_clock_div(dssdev->manager->id, &dispc_cinfo);
+ r = dispc_mgr_set_clock_div(dssdev->manager->id, &dispc_cinfo);
if (r) {
DSSERR("Failed to set dispc clocks\n");
return r;
@@ -4166,10 +4375,12 @@ static int dsi_display_init_dsi(struct omap_dss_device *dssdev)
return 0;
err3:
- dsi_cio_uninit(dsidev);
+ dsi_cio_uninit(dssdev);
err2:
dss_select_dispc_clk_source(OMAP_DSS_CLK_SRC_FCK);
dss_select_dsi_clk_source(dsi_module, OMAP_DSS_CLK_SRC_FCK);
+ dss_select_lcd_clk_source(dssdev->manager->id, OMAP_DSS_CLK_SRC_FCK);
+
err1:
dsi_pll_uninit(dsidev, true);
err0:
@@ -4195,7 +4406,8 @@ static void dsi_display_uninit_dsi(struct omap_dss_device *dssdev,
dss_select_dispc_clk_source(OMAP_DSS_CLK_SRC_FCK);
dss_select_dsi_clk_source(dsi_module, OMAP_DSS_CLK_SRC_FCK);
- dsi_cio_uninit(dsidev);
+ dss_select_lcd_clk_source(dssdev->manager->id, OMAP_DSS_CLK_SRC_FCK);
+ dsi_cio_uninit(dssdev);
dsi_pll_uninit(dsidev, disconnect_lanes);
}
@@ -4211,6 +4423,12 @@ int omapdss_dsi_display_enable(struct omap_dss_device *dssdev)
mutex_lock(&dsi->lock);
+ if (dssdev->manager == NULL) {
+ DSSERR("failed to enable display: no manager\n");
+ r = -ENODEV;
+ goto err_start_dev;
+ }
+
r = omap_dss_start_device(dssdev);
if (r) {
DSSERR("failed to start device\n");
@@ -4307,9 +4525,10 @@ int dsi_init_display(struct omap_dss_device *dssdev)
DSSDBG("DSI init\n");
- /* XXX these should be figured out dynamically */
- dssdev->caps = OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE |
- OMAP_DSS_DISPLAY_CAP_TEAR_ELIM;
+ if (dssdev->panel.dsi_mode == OMAP_DSS_DSI_CMD_MODE) {
+ dssdev->caps = OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE |
+ OMAP_DSS_DISPLAY_CAP_TEAR_ELIM;
+ }
if (dsi->vdds_dsi_reg == NULL) {
struct regulator *vdds_dsi;
@@ -4435,10 +4654,7 @@ static int dsi_get_clocks(struct platform_device *dsidev)
dsi->dss_clk = clk;
- if (cpu_is_omap34xx() || cpu_is_omap3630())
- clk = clk_get(&dsidev->dev, "dss2_alwon_fck");
- else
- clk = clk_get(&dsidev->dev, "sys_clk");
+ clk = clk_get(&dsidev->dev, "sys_clk");
if (IS_ERR(clk)) {
DSSERR("can't get sys_clk\n");
clk_put(dsi->dss_clk);
@@ -4462,7 +4678,7 @@ static void dsi_put_clocks(struct platform_device *dsidev)
}
/* DSI1 HW IP initialisation */
-static int omap_dsi1hw_probe(struct platform_device *dsidev)
+static int omap_dsihw_probe(struct platform_device *dsidev)
{
struct omap_display_platform_data *dss_plat_data;
struct omap_dss_board_info *board_info;
@@ -4483,7 +4699,8 @@ static int omap_dsi1hw_probe(struct platform_device *dsidev)
dss_plat_data = dsidev->dev.platform_data;
board_info = dss_plat_data->board_data;
- dsi->dsi_mux_pads = board_info->dsi_mux_pads;
+ dsi->enable_pads = board_info->dsi_enable_pads;
+ dsi->disable_pads = board_info->dsi_disable_pads;
spin_lock_init(&dsi->irq_lock);
spin_lock_init(&dsi->errors_lock);
@@ -4539,7 +4756,7 @@ static int omap_dsi1hw_probe(struct platform_device *dsidev)
/* DSI VCs initialization */
for (i = 0; i < ARRAY_SIZE(dsi->vc); i++) {
- dsi->vc[i].mode = DSI_VC_MODE_L4;
+ dsi->vc[i].source = DSI_VC_SOURCE_L4;
dsi->vc[i].dssdev = NULL;
dsi->vc[i].vc_id = 0;
}
@@ -4572,7 +4789,7 @@ err_alloc:
return r;
}
-static int omap_dsi1hw_remove(struct platform_device *dsidev)
+static int omap_dsihw_remove(struct platform_device *dsidev)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
@@ -4602,10 +4819,6 @@ static int omap_dsi1hw_remove(struct platform_device *dsidev)
static int dsi_runtime_suspend(struct device *dev)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(to_platform_device(dev));
-
- clk_disable(dsi->dss_clk);
-
dispc_runtime_put();
dss_runtime_put();
@@ -4614,7 +4827,6 @@ static int dsi_runtime_suspend(struct device *dev)
static int dsi_runtime_resume(struct device *dev)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(to_platform_device(dev));
int r;
r = dss_runtime_get();
@@ -4625,8 +4837,6 @@ static int dsi_runtime_resume(struct device *dev)
if (r)
goto err_get_dispc;
- clk_enable(dsi->dss_clk);
-
return 0;
err_get_dispc:
@@ -4640,11 +4850,11 @@ static const struct dev_pm_ops dsi_pm_ops = {
.runtime_resume = dsi_runtime_resume,
};
-static struct platform_driver omap_dsi1hw_driver = {
- .probe = omap_dsi1hw_probe,
- .remove = omap_dsi1hw_remove,
+static struct platform_driver omap_dsihw_driver = {
+ .probe = omap_dsihw_probe,
+ .remove = omap_dsihw_remove,
.driver = {
- .name = "omapdss_dsi1",
+ .name = "omapdss_dsi",
.owner = THIS_MODULE,
.pm = &dsi_pm_ops,
},
@@ -4652,10 +4862,10 @@ static struct platform_driver omap_dsi1hw_driver = {
int dsi_init_platform_driver(void)
{
- return platform_driver_register(&omap_dsi1hw_driver);
+ return platform_driver_register(&omap_dsihw_driver);
}
void dsi_uninit_platform_driver(void)
{
- return platform_driver_unregister(&omap_dsi1hw_driver);
+ return platform_driver_unregister(&omap_dsihw_driver);
}
diff --git a/drivers/video/omap2/dss/dss.c b/drivers/video/omap2/dss/dss.c
index 0f9c3a6457a..17033457ee8 100644
--- a/drivers/video/omap2/dss/dss.c
+++ b/drivers/video/omap2/dss/dss.c
@@ -24,6 +24,7 @@
#include <linux/kernel.h>
#include <linux/io.h>
+#include <linux/export.h>
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/seq_file.h>
@@ -639,6 +640,17 @@ void dss_select_hdmi_venc_clk_source(enum dss_hdmi_venc_clk_source_select hdmi)
REG_FLD_MOD(DSS_CONTROL, hdmi, 15, 15); /* VENC_HDMI_SWITCH */
}
+enum dss_hdmi_venc_clk_source_select dss_get_hdmi_venc_clk_source(void)
+{
+ enum omap_display_type displays;
+
+ displays = dss_feat_get_supported_displays(OMAP_DSS_CHANNEL_DIGIT);
+ if ((displays & OMAP_DISPLAY_TYPE_HDMI) == 0)
+ return DSS_VENC_TV_CLK;
+
+ return REG_GET(DSS_CONTROL, 15, 15);
+}
+
static int dss_get_clocks(void)
{
struct clk *clk;
@@ -691,11 +703,6 @@ static void dss_put_clocks(void)
clk_put(dss.dss_clk);
}
-struct clk *dss_get_ick(void)
-{
- return clk_get(&dss.pdev->dev, "ick");
-}
-
int dss_runtime_get(void)
{
int r;
@@ -824,13 +831,11 @@ static int omap_dsshw_remove(struct platform_device *pdev)
static int dss_runtime_suspend(struct device *dev)
{
dss_save_context();
- clk_disable(dss.dss_clk);
return 0;
}
static int dss_runtime_resume(struct device *dev)
{
- clk_enable(dss.dss_clk);
dss_restore_context();
return 0;
}
diff --git a/drivers/video/omap2/dss/dss.h b/drivers/video/omap2/dss/dss.h
index 9c94b1152c2..6308fc59fc9 100644
--- a/drivers/video/omap2/dss/dss.h
+++ b/drivers/video/omap2/dss/dss.h
@@ -97,10 +97,10 @@ extern unsigned int dss_debug;
#define FLD_MOD(orig, val, start, end) \
(((orig) & ~FLD_MASK(start, end)) | FLD_VAL(val, start, end))
-enum omap_parallel_interface_mode {
- OMAP_DSS_PARALLELMODE_BYPASS, /* MIPI DPI */
- OMAP_DSS_PARALLELMODE_RFBI, /* MIPI DBI */
- OMAP_DSS_PARALLELMODE_DSI,
+enum dss_io_pad_mode {
+ DSS_IO_PAD_MODE_RESET,
+ DSS_IO_PAD_MODE_RFBI,
+ DSS_IO_PAD_MODE_BYPASS,
};
enum dss_hdmi_venc_clk_source_select {
@@ -108,6 +108,11 @@ enum dss_hdmi_venc_clk_source_select {
DSS_HDMI_M_PCLK = 1,
};
+enum dss_dsi_content_type {
+ DSS_DSI_CONTENT_DCS,
+ DSS_DSI_CONTENT_GENERIC,
+};
+
struct dss_clock_info {
/* rates that we get with dividers below */
unsigned long fck;
@@ -150,16 +155,6 @@ struct dsi_clock_info {
bool use_sys_clk;
};
-/* HDMI PLL structure */
-struct hdmi_pll_info {
- u16 regn;
- u16 regm;
- u32 regmf;
- u16 regm2;
- u16 regsd;
- u16 dcofreq;
-};
-
struct seq_file;
struct platform_device;
@@ -209,9 +204,8 @@ void dss_uninit_platform_driver(void);
int dss_runtime_get(void);
void dss_runtime_put(void);
-struct clk *dss_get_ick(void);
-
void dss_select_hdmi_venc_clk_source(enum dss_hdmi_venc_clk_source_select);
+enum dss_hdmi_venc_clk_source_select dss_get_hdmi_venc_clk_source(void);
const char *dss_get_generic_clk_source_name(enum omap_dss_clk_source clk_src);
void dss_dump_clocks(struct seq_file *s);
@@ -279,6 +273,8 @@ void dsi_create_debugfs_files_reg(struct dentry *debugfs_dir,
int dsi_init_display(struct omap_dss_device *display);
void dsi_irq_handler(void);
+u8 dsi_get_pixel_size(enum omap_dss_dsi_pixel_format fmt);
+
unsigned long dsi_get_pll_hsdiv_dispc_rate(struct platform_device *dsidev);
int dsi_pll_set_clock_div(struct platform_device *dsidev,
struct dsi_clock_info *cinfo);
@@ -309,6 +305,11 @@ static inline int dsi_runtime_get(struct platform_device *dsidev)
static inline void dsi_runtime_put(struct platform_device *dsidev)
{
}
+static inline u8 dsi_get_pixel_size(enum omap_dss_dsi_pixel_format fmt)
+{
+ WARN("%s: DSI not compiled in, returning pixel_size as 0\n", __func__);
+ return 0;
+}
static inline unsigned long dsi_get_pll_hsdiv_dispc_rate(struct platform_device *dsidev)
{
WARN("%s: DSI not compiled in, returning rate as 0\n", __func__);
@@ -385,90 +386,71 @@ void dispc_disable_sidle(void);
void dispc_lcd_enable_signal_polarity(bool act_high);
void dispc_lcd_enable_signal(bool enable);
void dispc_pck_free_enable(bool enable);
-void dispc_enable_fifohandcheck(enum omap_channel channel, bool enable);
-
-void dispc_set_lcd_size(enum omap_channel channel, u16 width, u16 height);
void dispc_set_digit_size(u16 width, u16 height);
-u32 dispc_get_plane_fifo_size(enum omap_plane plane);
-void dispc_set_fifo_threshold(enum omap_plane plane, u32 low, u32 high);
void dispc_enable_fifomerge(bool enable);
-u32 dispc_get_burst_size(enum omap_plane plane);
-void dispc_enable_cpr(enum omap_channel channel, bool enable);
-void dispc_set_cpr_coef(enum omap_channel channel,
- struct omap_dss_cpr_coefs *coefs);
-
-void dispc_set_plane_ba0(enum omap_plane plane, u32 paddr);
-void dispc_set_plane_ba1(enum omap_plane plane, u32 paddr);
-void dispc_set_plane_pos(enum omap_plane plane, u16 x, u16 y);
-void dispc_set_plane_size(enum omap_plane plane, u16 width, u16 height);
-void dispc_set_channel_out(enum omap_plane plane,
- enum omap_channel channel_out);
-
void dispc_enable_gamma_table(bool enable);
-int dispc_setup_plane(enum omap_plane plane,
- u32 paddr, u16 screen_width,
- u16 pos_x, u16 pos_y,
- u16 width, u16 height,
- u16 out_width, u16 out_height,
- enum omap_color_mode color_mode,
- bool ilace,
- enum omap_dss_rotation_type rotation_type,
- u8 rotation, bool mirror,
- u8 global_alpha, u8 pre_mult_alpha,
- enum omap_channel channel,
- u32 puv_addr);
-
-bool dispc_go_busy(enum omap_channel channel);
-void dispc_go(enum omap_channel channel);
-void dispc_enable_channel(enum omap_channel channel, bool enable);
-bool dispc_is_channel_enabled(enum omap_channel channel);
-int dispc_enable_plane(enum omap_plane plane, bool enable);
-void dispc_enable_replication(enum omap_plane plane, bool enable);
-
-void dispc_set_parallel_interface_mode(enum omap_channel channel,
- enum omap_parallel_interface_mode mode);
-void dispc_set_tft_data_lines(enum omap_channel channel, u8 data_lines);
-void dispc_set_lcd_display_type(enum omap_channel channel,
- enum omap_lcd_display_type type);
void dispc_set_loadmode(enum omap_dss_load_mode mode);
-void dispc_set_default_color(enum omap_channel channel, u32 color);
-u32 dispc_get_default_color(enum omap_channel channel);
-void dispc_set_trans_key(enum omap_channel ch,
- enum omap_dss_trans_key_type type,
- u32 trans_key);
-void dispc_get_trans_key(enum omap_channel ch,
- enum omap_dss_trans_key_type *type,
- u32 *trans_key);
-void dispc_enable_trans_key(enum omap_channel ch, bool enable);
-void dispc_enable_alpha_blending(enum omap_channel ch, bool enable);
-bool dispc_trans_key_enabled(enum omap_channel ch);
-bool dispc_alpha_blending_enabled(enum omap_channel ch);
-
bool dispc_lcd_timings_ok(struct omap_video_timings *timings);
-void dispc_set_lcd_timings(enum omap_channel channel,
- struct omap_video_timings *timings);
unsigned long dispc_fclk_rate(void);
-unsigned long dispc_lclk_rate(enum omap_channel channel);
-unsigned long dispc_pclk_rate(enum omap_channel channel);
-void dispc_set_pol_freq(enum omap_channel channel,
- enum omap_panel_config config, u8 acbi, u8 acb);
void dispc_find_clk_divs(bool is_tft, unsigned long req_pck, unsigned long fck,
struct dispc_clock_info *cinfo);
int dispc_calc_clock_rates(unsigned long dispc_fclk_rate,
struct dispc_clock_info *cinfo);
-int dispc_set_clock_div(enum omap_channel channel,
+
+
+u32 dispc_ovl_get_fifo_size(enum omap_plane plane);
+u32 dispc_ovl_get_burst_size(enum omap_plane plane);
+int dispc_ovl_setup(enum omap_plane plane, struct omap_overlay_info *oi,
+ bool ilace, enum omap_channel channel, bool replication,
+ u32 fifo_low, u32 fifo_high);
+int dispc_ovl_enable(enum omap_plane plane, bool enable);
+
+
+void dispc_mgr_enable_fifohandcheck(enum omap_channel channel, bool enable);
+void dispc_mgr_set_lcd_size(enum omap_channel channel, u16 width, u16 height);
+void dispc_mgr_enable_cpr(enum omap_channel channel, bool enable);
+void dispc_mgr_set_cpr_coef(enum omap_channel channel,
+ struct omap_dss_cpr_coefs *coefs);
+bool dispc_mgr_go_busy(enum omap_channel channel);
+void dispc_mgr_go(enum omap_channel channel);
+void dispc_mgr_enable(enum omap_channel channel, bool enable);
+bool dispc_mgr_is_channel_enabled(enum omap_channel channel);
+void dispc_mgr_set_io_pad_mode(enum dss_io_pad_mode mode);
+void dispc_mgr_enable_stallmode(enum omap_channel channel, bool enable);
+void dispc_mgr_set_tft_data_lines(enum omap_channel channel, u8 data_lines);
+void dispc_mgr_set_lcd_display_type(enum omap_channel channel,
+ enum omap_lcd_display_type type);
+void dispc_mgr_set_default_color(enum omap_channel channel, u32 color);
+u32 dispc_mgr_get_default_color(enum omap_channel channel);
+void dispc_mgr_set_trans_key(enum omap_channel ch,
+ enum omap_dss_trans_key_type type,
+ u32 trans_key);
+void dispc_mgr_get_trans_key(enum omap_channel ch,
+ enum omap_dss_trans_key_type *type,
+ u32 *trans_key);
+void dispc_mgr_enable_trans_key(enum omap_channel ch, bool enable);
+void dispc_mgr_enable_alpha_fixed_zorder(enum omap_channel ch, bool enable);
+bool dispc_mgr_trans_key_enabled(enum omap_channel ch);
+bool dispc_mgr_alpha_fixed_zorder_enabled(enum omap_channel ch);
+void dispc_mgr_set_lcd_timings(enum omap_channel channel,
+ struct omap_video_timings *timings);
+void dispc_mgr_set_pol_freq(enum omap_channel channel,
+ enum omap_panel_config config, u8 acbi, u8 acb);
+unsigned long dispc_mgr_lclk_rate(enum omap_channel channel);
+unsigned long dispc_mgr_pclk_rate(enum omap_channel channel);
+int dispc_mgr_set_clock_div(enum omap_channel channel,
struct dispc_clock_info *cinfo);
-int dispc_get_clock_div(enum omap_channel channel,
+int dispc_mgr_get_clock_div(enum omap_channel channel,
struct dispc_clock_info *cinfo);
-
/* VENC */
#ifdef CONFIG_OMAP2_DSS_VENC
int venc_init_platform_driver(void);
void venc_uninit_platform_driver(void);
void venc_dump_regs(struct seq_file *s);
int venc_init_display(struct omap_dss_device *display);
+unsigned long venc_get_pixel_clock(void);
#else
static inline int venc_init_platform_driver(void)
{
@@ -477,6 +459,11 @@ static inline int venc_init_platform_driver(void)
static inline void venc_uninit_platform_driver(void)
{
}
+static inline unsigned long venc_get_pixel_clock(void)
+{
+ WARN("%s: VENC not compiled in, returning pclk as 0\n", __func__);
+ return 0;
+}
#endif
/* HDMI */
@@ -484,6 +471,8 @@ static inline void venc_uninit_platform_driver(void)
int hdmi_init_platform_driver(void);
void hdmi_uninit_platform_driver(void);
int hdmi_init_display(struct omap_dss_device *dssdev);
+unsigned long hdmi_get_pixel_clock(void);
+void hdmi_dump_regs(struct seq_file *s);
#else
static inline int hdmi_init_display(struct omap_dss_device *dssdev)
{
@@ -496,12 +485,19 @@ static inline int hdmi_init_platform_driver(void)
static inline void hdmi_uninit_platform_driver(void)
{
}
+static inline unsigned long hdmi_get_pixel_clock(void)
+{
+ WARN("%s: HDMI not compiled in, returning pclk as 0\n", __func__);
+ return 0;
+}
#endif
int omapdss_hdmi_display_enable(struct omap_dss_device *dssdev);
void omapdss_hdmi_display_disable(struct omap_dss_device *dssdev);
void omapdss_hdmi_display_set_timing(struct omap_dss_device *dssdev);
int omapdss_hdmi_display_check_timing(struct omap_dss_device *dssdev,
struct omap_video_timings *timings);
+int omapdss_hdmi_read_edid(u8 *buf, int len);
+bool omapdss_hdmi_detect(void);
int hdmi_panel_init(void);
void hdmi_panel_exit(void);
diff --git a/drivers/video/omap2/dss/dss_features.c b/drivers/video/omap2/dss/dss_features.c
index b415c4ee621..b402699168a 100644
--- a/drivers/video/omap2/dss/dss_features.c
+++ b/drivers/video/omap2/dss/dss_features.c
@@ -47,6 +47,7 @@ struct omap_dss_features {
const int num_ovls;
const enum omap_display_type *supported_displays;
const enum omap_color_mode *supported_color_modes;
+ const enum omap_overlay_caps *overlay_caps;
const char * const *clksrc_names;
const struct dss_param_range *dss_params;
@@ -209,6 +210,68 @@ static const enum omap_color_mode omap4_dss_supported_color_modes[] = {
OMAP_DSS_COLOR_ARGB16 | OMAP_DSS_COLOR_XRGB16_1555 |
OMAP_DSS_COLOR_ARGB32 | OMAP_DSS_COLOR_RGBX16 |
OMAP_DSS_COLOR_RGBX32,
+
+ /* OMAP_DSS_VIDEO3 */
+ OMAP_DSS_COLOR_RGB16 | OMAP_DSS_COLOR_RGB12U |
+ OMAP_DSS_COLOR_YUV2 | OMAP_DSS_COLOR_ARGB16_1555 |
+ OMAP_DSS_COLOR_RGBA32 | OMAP_DSS_COLOR_NV12 |
+ OMAP_DSS_COLOR_RGBA16 | OMAP_DSS_COLOR_RGB24U |
+ OMAP_DSS_COLOR_RGB24P | OMAP_DSS_COLOR_UYVY |
+ OMAP_DSS_COLOR_ARGB16 | OMAP_DSS_COLOR_XRGB16_1555 |
+ OMAP_DSS_COLOR_ARGB32 | OMAP_DSS_COLOR_RGBX16 |
+ OMAP_DSS_COLOR_RGBX32,
+};
+
+static const enum omap_overlay_caps omap2_dss_overlay_caps[] = {
+ /* OMAP_DSS_GFX */
+ 0,
+
+ /* OMAP_DSS_VIDEO1 */
+ OMAP_DSS_OVL_CAP_SCALE,
+
+ /* OMAP_DSS_VIDEO2 */
+ OMAP_DSS_OVL_CAP_SCALE,
+};
+
+static const enum omap_overlay_caps omap3430_dss_overlay_caps[] = {
+ /* OMAP_DSS_GFX */
+ OMAP_DSS_OVL_CAP_GLOBAL_ALPHA,
+
+ /* OMAP_DSS_VIDEO1 */
+ OMAP_DSS_OVL_CAP_SCALE,
+
+ /* OMAP_DSS_VIDEO2 */
+ OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_GLOBAL_ALPHA,
+};
+
+static const enum omap_overlay_caps omap3630_dss_overlay_caps[] = {
+ /* OMAP_DSS_GFX */
+ OMAP_DSS_OVL_CAP_GLOBAL_ALPHA | OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA,
+
+ /* OMAP_DSS_VIDEO1 */
+ OMAP_DSS_OVL_CAP_SCALE,
+
+ /* OMAP_DSS_VIDEO2 */
+ OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_GLOBAL_ALPHA |
+ OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA,
+};
+
+static const enum omap_overlay_caps omap4_dss_overlay_caps[] = {
+ /* OMAP_DSS_GFX */
+ OMAP_DSS_OVL_CAP_GLOBAL_ALPHA | OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA |
+ OMAP_DSS_OVL_CAP_ZORDER,
+
+ /* OMAP_DSS_VIDEO1 */
+ OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_GLOBAL_ALPHA |
+ OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA | OMAP_DSS_OVL_CAP_ZORDER,
+
+ /* OMAP_DSS_VIDEO2 */
+ OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_GLOBAL_ALPHA |
+ OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA | OMAP_DSS_OVL_CAP_ZORDER,
+
+ /* OMAP_DSS_VIDEO3 */
+ OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_GLOBAL_ALPHA |
+ OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA | OMAP_DSS_OVL_CAP_ZORDER,
};
static const char * const omap2_dss_clk_source_names[] = {
@@ -233,32 +296,38 @@ static const char * const omap4_dss_clk_source_names[] = {
static const struct dss_param_range omap2_dss_param_range[] = {
[FEAT_PARAM_DSS_FCK] = { 0, 173000000 },
+ [FEAT_PARAM_DSS_PCD] = { 2, 255 },
[FEAT_PARAM_DSIPLL_REGN] = { 0, 0 },
[FEAT_PARAM_DSIPLL_REGM] = { 0, 0 },
[FEAT_PARAM_DSIPLL_REGM_DISPC] = { 0, 0 },
[FEAT_PARAM_DSIPLL_REGM_DSI] = { 0, 0 },
[FEAT_PARAM_DSIPLL_FINT] = { 0, 0 },
[FEAT_PARAM_DSIPLL_LPDIV] = { 0, 0 },
+ [FEAT_PARAM_DOWNSCALE] = { 1, 2 },
};
static const struct dss_param_range omap3_dss_param_range[] = {
[FEAT_PARAM_DSS_FCK] = { 0, 173000000 },
+ [FEAT_PARAM_DSS_PCD] = { 1, 255 },
[FEAT_PARAM_DSIPLL_REGN] = { 0, (1 << 7) - 1 },
[FEAT_PARAM_DSIPLL_REGM] = { 0, (1 << 11) - 1 },
[FEAT_PARAM_DSIPLL_REGM_DISPC] = { 0, (1 << 4) - 1 },
[FEAT_PARAM_DSIPLL_REGM_DSI] = { 0, (1 << 4) - 1 },
[FEAT_PARAM_DSIPLL_FINT] = { 750000, 2100000 },
[FEAT_PARAM_DSIPLL_LPDIV] = { 1, (1 << 13) - 1},
+ [FEAT_PARAM_DOWNSCALE] = { 1, 4 },
};
static const struct dss_param_range omap4_dss_param_range[] = {
[FEAT_PARAM_DSS_FCK] = { 0, 186000000 },
+ [FEAT_PARAM_DSS_PCD] = { 1, 255 },
[FEAT_PARAM_DSIPLL_REGN] = { 0, (1 << 8) - 1 },
[FEAT_PARAM_DSIPLL_REGM] = { 0, (1 << 12) - 1 },
[FEAT_PARAM_DSIPLL_REGM_DISPC] = { 0, (1 << 5) - 1 },
[FEAT_PARAM_DSIPLL_REGM_DSI] = { 0, (1 << 5) - 1 },
[FEAT_PARAM_DSIPLL_FINT] = { 500000, 2500000 },
[FEAT_PARAM_DSIPLL_LPDIV] = { 0, (1 << 13) - 1 },
+ [FEAT_PARAM_DOWNSCALE] = { 1, 4 },
};
/* OMAP2 DSS Features */
@@ -275,6 +344,7 @@ static const struct omap_dss_features omap2_dss_features = {
.num_ovls = 3,
.supported_displays = omap2_dss_supported_displays,
.supported_color_modes = omap2_dss_supported_color_modes,
+ .overlay_caps = omap2_dss_overlay_caps,
.clksrc_names = omap2_dss_clk_source_names,
.dss_params = omap2_dss_param_range,
.buffer_size_unit = 1,
@@ -287,18 +357,19 @@ static const struct omap_dss_features omap3430_dss_features = {
.num_reg_fields = ARRAY_SIZE(omap3_dss_reg_fields),
.has_feature =
- FEAT_GLOBAL_ALPHA | FEAT_LCDENABLEPOL |
+ FEAT_LCDENABLEPOL |
FEAT_LCDENABLESIGNAL | FEAT_PCKFREEENABLE |
FEAT_FUNCGATED | FEAT_ROWREPEATENABLE |
FEAT_LINEBUFFERSPLIT | FEAT_RESIZECONF |
FEAT_DSI_PLL_FREQSEL | FEAT_DSI_REVERSE_TXCLKESC |
FEAT_VENC_REQUIRES_TV_DAC_CLK | FEAT_CPR | FEAT_PRELOAD |
- FEAT_FIR_COEF_V,
+ FEAT_FIR_COEF_V | FEAT_ALPHA_FIXED_ZORDER,
.num_mgrs = 2,
.num_ovls = 3,
.supported_displays = omap3430_dss_supported_displays,
.supported_color_modes = omap3_dss_supported_color_modes,
+ .overlay_caps = omap3430_dss_overlay_caps,
.clksrc_names = omap3_dss_clk_source_names,
.dss_params = omap3_dss_param_range,
.buffer_size_unit = 1,
@@ -310,18 +381,19 @@ static const struct omap_dss_features omap3630_dss_features = {
.num_reg_fields = ARRAY_SIZE(omap3_dss_reg_fields),
.has_feature =
- FEAT_GLOBAL_ALPHA | FEAT_LCDENABLEPOL |
+ FEAT_LCDENABLEPOL |
FEAT_LCDENABLESIGNAL | FEAT_PCKFREEENABLE |
- FEAT_PRE_MULT_ALPHA | FEAT_FUNCGATED |
+ FEAT_FUNCGATED |
FEAT_ROWREPEATENABLE | FEAT_LINEBUFFERSPLIT |
FEAT_RESIZECONF | FEAT_DSI_PLL_PWR_BUG |
FEAT_DSI_PLL_FREQSEL | FEAT_CPR | FEAT_PRELOAD |
- FEAT_FIR_COEF_V,
+ FEAT_FIR_COEF_V | FEAT_ALPHA_FIXED_ZORDER,
.num_mgrs = 2,
.num_ovls = 3,
.supported_displays = omap3630_dss_supported_displays,
.supported_color_modes = omap3_dss_supported_color_modes,
+ .overlay_caps = omap3630_dss_overlay_caps,
.clksrc_names = omap3_dss_clk_source_names,
.dss_params = omap3_dss_param_range,
.buffer_size_unit = 1,
@@ -335,17 +407,18 @@ static const struct omap_dss_features omap4430_es1_0_dss_features = {
.num_reg_fields = ARRAY_SIZE(omap4_dss_reg_fields),
.has_feature =
- FEAT_GLOBAL_ALPHA | FEAT_PRE_MULT_ALPHA |
- FEAT_MGR_LCD2 | FEAT_GLOBAL_ALPHA_VID1 |
+ FEAT_MGR_LCD2 |
FEAT_CORE_CLK_DIV | FEAT_LCD_CLK_SRC |
FEAT_DSI_DCS_CMD_CONFIG_VC | FEAT_DSI_VC_OCP_WIDTH |
FEAT_DSI_GNQ | FEAT_HANDLE_UV_SEPARATE | FEAT_ATTR2 |
- FEAT_CPR | FEAT_PRELOAD | FEAT_FIR_COEF_V,
+ FEAT_CPR | FEAT_PRELOAD | FEAT_FIR_COEF_V |
+ FEAT_ALPHA_FREE_ZORDER,
.num_mgrs = 3,
- .num_ovls = 3,
+ .num_ovls = 4,
.supported_displays = omap4_dss_supported_displays,
.supported_color_modes = omap4_dss_supported_color_modes,
+ .overlay_caps = omap4_dss_overlay_caps,
.clksrc_names = omap4_dss_clk_source_names,
.dss_params = omap4_dss_param_range,
.buffer_size_unit = 16,
@@ -358,24 +431,50 @@ static const struct omap_dss_features omap4_dss_features = {
.num_reg_fields = ARRAY_SIZE(omap4_dss_reg_fields),
.has_feature =
- FEAT_GLOBAL_ALPHA | FEAT_PRE_MULT_ALPHA |
- FEAT_MGR_LCD2 | FEAT_GLOBAL_ALPHA_VID1 |
+ FEAT_MGR_LCD2 |
FEAT_CORE_CLK_DIV | FEAT_LCD_CLK_SRC |
FEAT_DSI_DCS_CMD_CONFIG_VC | FEAT_DSI_VC_OCP_WIDTH |
FEAT_DSI_GNQ | FEAT_HDMI_CTS_SWMODE |
FEAT_HANDLE_UV_SEPARATE | FEAT_ATTR2 | FEAT_CPR |
- FEAT_PRELOAD | FEAT_FIR_COEF_V,
+ FEAT_PRELOAD | FEAT_FIR_COEF_V | FEAT_ALPHA_FREE_ZORDER,
.num_mgrs = 3,
- .num_ovls = 3,
+ .num_ovls = 4,
.supported_displays = omap4_dss_supported_displays,
.supported_color_modes = omap4_dss_supported_color_modes,
+ .overlay_caps = omap4_dss_overlay_caps,
.clksrc_names = omap4_dss_clk_source_names,
.dss_params = omap4_dss_param_range,
.buffer_size_unit = 16,
.burst_size_unit = 16,
};
+#if defined(CONFIG_OMAP4_DSS_HDMI)
+/* HDMI OMAP4 Functions*/
+static const struct ti_hdmi_ip_ops omap4_hdmi_functions = {
+
+ .video_configure = ti_hdmi_4xxx_basic_configure,
+ .phy_enable = ti_hdmi_4xxx_phy_enable,
+ .phy_disable = ti_hdmi_4xxx_phy_disable,
+ .read_edid = ti_hdmi_4xxx_read_edid,
+ .detect = ti_hdmi_4xxx_detect,
+ .pll_enable = ti_hdmi_4xxx_pll_enable,
+ .pll_disable = ti_hdmi_4xxx_pll_disable,
+ .video_enable = ti_hdmi_4xxx_wp_video_start,
+ .dump_wrapper = ti_hdmi_4xxx_wp_dump,
+ .dump_core = ti_hdmi_4xxx_core_dump,
+ .dump_pll = ti_hdmi_4xxx_pll_dump,
+ .dump_phy = ti_hdmi_4xxx_phy_dump,
+
+};
+
+void dss_init_hdmi_ip_ops(struct hdmi_ip_data *ip_data)
+{
+ if (cpu_is_omap44xx())
+ ip_data->ops = &omap4_hdmi_functions;
+}
+#endif
+
/* Functions returning values related to a DSS feature */
int dss_feat_get_num_mgrs(void)
{
@@ -407,6 +506,11 @@ enum omap_color_mode dss_feat_get_supported_color_modes(enum omap_plane plane)
return omap_current_dss_features->supported_color_modes[plane];
}
+enum omap_overlay_caps dss_feat_get_overlay_caps(enum omap_plane plane)
+{
+ return omap_current_dss_features->overlay_caps[plane];
+}
+
bool dss_feat_color_mode_supported(enum omap_plane plane,
enum omap_color_mode color_mode)
{
diff --git a/drivers/video/omap2/dss/dss_features.h b/drivers/video/omap2/dss/dss_features.h
index b7398cbcda5..6a6c05dd45c 100644
--- a/drivers/video/omap2/dss/dss_features.h
+++ b/drivers/video/omap2/dss/dss_features.h
@@ -20,16 +20,17 @@
#ifndef __OMAP2_DSS_FEATURES_H
#define __OMAP2_DSS_FEATURES_H
+#if defined(CONFIG_OMAP4_DSS_HDMI)
+#include "ti_hdmi.h"
+#endif
+
#define MAX_DSS_MANAGERS 3
-#define MAX_DSS_OVERLAYS 3
+#define MAX_DSS_OVERLAYS 4
#define MAX_DSS_LCD_MANAGERS 2
#define MAX_NUM_DSI 2
/* DSS has feature id */
enum dss_feat_id {
- FEAT_GLOBAL_ALPHA = 1 << 0,
- FEAT_GLOBAL_ALPHA_VID1 = 1 << 1,
- FEAT_PRE_MULT_ALPHA = 1 << 2,
FEAT_LCDENABLEPOL = 1 << 3,
FEAT_LCDENABLESIGNAL = 1 << 4,
FEAT_PCKFREEENABLE = 1 << 5,
@@ -55,6 +56,8 @@ enum dss_feat_id {
FEAT_CPR = 1 << 23,
FEAT_PRELOAD = 1 << 24,
FEAT_FIR_COEF_V = 1 << 25,
+ FEAT_ALPHA_FIXED_ZORDER = 1 << 26,
+ FEAT_ALPHA_FREE_ZORDER = 1 << 27,
};
/* DSS register field id */
@@ -75,12 +78,14 @@ enum dss_feat_reg_field {
enum dss_range_param {
FEAT_PARAM_DSS_FCK,
+ FEAT_PARAM_DSS_PCD,
FEAT_PARAM_DSIPLL_REGN,
FEAT_PARAM_DSIPLL_REGM,
FEAT_PARAM_DSIPLL_REGM_DISPC,
FEAT_PARAM_DSIPLL_REGM_DSI,
FEAT_PARAM_DSIPLL_FINT,
FEAT_PARAM_DSIPLL_LPDIV,
+ FEAT_PARAM_DOWNSCALE,
};
/* DSS Feature Functions */
@@ -90,6 +95,7 @@ unsigned long dss_feat_get_param_min(enum dss_range_param param);
unsigned long dss_feat_get_param_max(enum dss_range_param param);
enum omap_display_type dss_feat_get_supported_displays(enum omap_channel channel);
enum omap_color_mode dss_feat_get_supported_color_modes(enum omap_plane plane);
+enum omap_overlay_caps dss_feat_get_overlay_caps(enum omap_plane plane);
bool dss_feat_color_mode_supported(enum omap_plane plane,
enum omap_color_mode color_mode);
const char *dss_feat_get_clk_source_name(enum omap_dss_clk_source id);
@@ -100,4 +106,7 @@ u32 dss_feat_get_burst_size_unit(void); /* in bytes */
bool dss_has_feature(enum dss_feat_id id);
void dss_feat_get_reg_field(enum dss_feat_reg_field id, u8 *start, u8 *end);
void dss_features_init(void);
+#if defined(CONFIG_OMAP4_DSS_HDMI)
+void dss_init_hdmi_ip_ops(struct hdmi_ip_data *ip_data);
+#endif
#endif
diff --git a/drivers/video/omap2/dss/hdmi.c b/drivers/video/omap2/dss/hdmi.c
index 256f27a9064..3262f0f1fa3 100644
--- a/drivers/video/omap2/dss/hdmi.c
+++ b/drivers/video/omap2/dss/hdmi.c
@@ -37,26 +37,41 @@
defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
#include <sound/soc.h>
#include <sound/pcm_params.h>
+#include "ti_hdmi_4xxx_ip.h"
#endif
+#include "ti_hdmi.h"
#include "dss.h"
-#include "hdmi.h"
#include "dss_features.h"
+#define HDMI_WP 0x0
+#define HDMI_CORE_SYS 0x400
+#define HDMI_CORE_AV 0x900
+#define HDMI_PLLCTRL 0x200
+#define HDMI_PHY 0x300
+
+/* HDMI EDID Length move this */
+#define HDMI_EDID_MAX_LENGTH 256
+#define EDID_TIMING_DESCRIPTOR_SIZE 0x12
+#define EDID_DESCRIPTOR_BLOCK0_ADDRESS 0x36
+#define EDID_DESCRIPTOR_BLOCK1_ADDRESS 0x80
+#define EDID_SIZE_BLOCK0_TIMING_DESCRIPTOR 4
+#define EDID_SIZE_BLOCK1_TIMING_DESCRIPTOR 4
+
+#define OMAP_HDMI_TIMINGS_NB 34
+
+#define HDMI_DEFAULT_REGN 16
+#define HDMI_DEFAULT_REGM2 1
+
static struct {
struct mutex lock;
struct omap_display_platform_data *pdata;
struct platform_device *pdev;
- void __iomem *base_wp; /* HDMI wrapper */
+ struct hdmi_ip_data ip_data;
int code;
int mode;
- u8 edid[HDMI_EDID_MAX_LENGTH];
- u8 edid_set;
- bool custom_set;
- struct hdmi_config cfg;
struct clk *sys_clk;
- struct clk *hdmi_clk;
} hdmi;
/*
@@ -144,30 +159,6 @@ static const int code_vesa[85] = {
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, 27, 28, -1, 33};
-static const u8 edid_header[8] = {0x0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x0};
-
-static inline void hdmi_write_reg(const struct hdmi_reg idx, u32 val)
-{
- __raw_writel(val, hdmi.base_wp + idx.idx);
-}
-
-static inline u32 hdmi_read_reg(const struct hdmi_reg idx)
-{
- return __raw_readl(hdmi.base_wp + idx.idx);
-}
-
-static inline int hdmi_wait_for_bit_change(const struct hdmi_reg idx,
- int b2, int b1, u32 val)
-{
- u32 t = 0;
- while (val != REG_GET(idx, b2, b1)) {
- udelay(1);
- if (t++ > 10000)
- return !val;
- }
- return val;
-}
-
static int hdmi_runtime_get(void)
{
int r;
@@ -193,304 +184,7 @@ int hdmi_init_display(struct omap_dss_device *dssdev)
{
DSSDBG("init_display\n");
- return 0;
-}
-
-static int hdmi_pll_init(enum hdmi_clk_refsel refsel, int dcofreq,
- struct hdmi_pll_info *fmt, u16 sd)
-{
- u32 r;
-
- /* PLL start always use manual mode */
- REG_FLD_MOD(PLLCTRL_PLL_CONTROL, 0x0, 0, 0);
-
- r = hdmi_read_reg(PLLCTRL_CFG1);
- r = FLD_MOD(r, fmt->regm, 20, 9); /* CFG1_PLL_REGM */
- r = FLD_MOD(r, fmt->regn, 8, 1); /* CFG1_PLL_REGN */
-
- hdmi_write_reg(PLLCTRL_CFG1, r);
-
- r = hdmi_read_reg(PLLCTRL_CFG2);
-
- r = FLD_MOD(r, 0x0, 12, 12); /* PLL_HIGHFREQ divide by 2 */
- r = FLD_MOD(r, 0x1, 13, 13); /* PLL_REFEN */
- r = FLD_MOD(r, 0x0, 14, 14); /* PHY_CLKINEN de-assert during locking */
-
- if (dcofreq) {
- /* divider programming for frequency beyond 1000Mhz */
- REG_FLD_MOD(PLLCTRL_CFG3, sd, 17, 10);
- r = FLD_MOD(r, 0x4, 3, 1); /* 1000MHz and 2000MHz */
- } else {
- r = FLD_MOD(r, 0x2, 3, 1); /* 500MHz and 1000MHz */
- }
-
- hdmi_write_reg(PLLCTRL_CFG2, r);
-
- r = hdmi_read_reg(PLLCTRL_CFG4);
- r = FLD_MOD(r, fmt->regm2, 24, 18);
- r = FLD_MOD(r, fmt->regmf, 17, 0);
-
- hdmi_write_reg(PLLCTRL_CFG4, r);
-
- /* go now */
- REG_FLD_MOD(PLLCTRL_PLL_GO, 0x1, 0, 0);
-
- /* wait for bit change */
- if (hdmi_wait_for_bit_change(PLLCTRL_PLL_GO, 0, 0, 1) != 1) {
- DSSERR("PLL GO bit not set\n");
- return -ETIMEDOUT;
- }
-
- /* Wait till the lock bit is set in PLL status */
- if (hdmi_wait_for_bit_change(PLLCTRL_PLL_STATUS, 1, 1, 1) != 1) {
- DSSWARN("cannot lock PLL\n");
- DSSWARN("CFG1 0x%x\n",
- hdmi_read_reg(PLLCTRL_CFG1));
- DSSWARN("CFG2 0x%x\n",
- hdmi_read_reg(PLLCTRL_CFG2));
- DSSWARN("CFG4 0x%x\n",
- hdmi_read_reg(PLLCTRL_CFG4));
- return -ETIMEDOUT;
- }
-
- DSSDBG("PLL locked!\n");
-
- return 0;
-}
-
-/* PHY_PWR_CMD */
-static int hdmi_set_phy_pwr(enum hdmi_phy_pwr val)
-{
- /* Command for power control of HDMI PHY */
- REG_FLD_MOD(HDMI_WP_PWR_CTRL, val, 7, 6);
-
- /* Status of the power control of HDMI PHY */
- if (hdmi_wait_for_bit_change(HDMI_WP_PWR_CTRL, 5, 4, val) != val) {
- DSSERR("Failed to set PHY power mode to %d\n", val);
- return -ETIMEDOUT;
- }
-
- return 0;
-}
-
-/* PLL_PWR_CMD */
-static int hdmi_set_pll_pwr(enum hdmi_pll_pwr val)
-{
- /* Command for power control of HDMI PLL */
- REG_FLD_MOD(HDMI_WP_PWR_CTRL, val, 3, 2);
-
- /* wait till PHY_PWR_STATUS is set */
- if (hdmi_wait_for_bit_change(HDMI_WP_PWR_CTRL, 1, 0, val) != val) {
- DSSERR("Failed to set PHY_PWR_STATUS\n");
- return -ETIMEDOUT;
- }
-
- return 0;
-}
-
-static int hdmi_pll_reset(void)
-{
- /* SYSRESET controlled by power FSM */
- REG_FLD_MOD(PLLCTRL_PLL_CONTROL, 0x0, 3, 3);
-
- /* READ 0x0 reset is in progress */
- if (hdmi_wait_for_bit_change(PLLCTRL_PLL_STATUS, 0, 0, 1) != 1) {
- DSSERR("Failed to sysreset PLL\n");
- return -ETIMEDOUT;
- }
-
- return 0;
-}
-
-static int hdmi_phy_init(void)
-{
- u16 r = 0;
-
- r = hdmi_set_phy_pwr(HDMI_PHYPWRCMD_LDOON);
- if (r)
- return r;
-
- r = hdmi_set_phy_pwr(HDMI_PHYPWRCMD_TXON);
- if (r)
- return r;
-
- /*
- * Read address 0 in order to get the SCP reset done completed
- * Dummy access performed to make sure reset is done
- */
- hdmi_read_reg(HDMI_TXPHY_TX_CTRL);
-
- /*
- * Write to phy address 0 to configure the clock
- * use HFBITCLK write HDMI_TXPHY_TX_CONTROL_FREQOUT field
- */
- REG_FLD_MOD(HDMI_TXPHY_TX_CTRL, 0x1, 31, 30);
-
- /* Write to phy address 1 to start HDMI line (TXVALID and TMDSCLKEN) */
- hdmi_write_reg(HDMI_TXPHY_DIGITAL_CTRL, 0xF0000000);
-
- /* Setup max LDO voltage */
- REG_FLD_MOD(HDMI_TXPHY_POWER_CTRL, 0xB, 3, 0);
-
- /* Write to phy address 3 to change the polarity control */
- REG_FLD_MOD(HDMI_TXPHY_PAD_CFG_CTRL, 0x1, 27, 27);
-
- return 0;
-}
-
-static int hdmi_pll_program(struct hdmi_pll_info *fmt)
-{
- u16 r = 0;
- enum hdmi_clk_refsel refsel;
-
- r = hdmi_set_pll_pwr(HDMI_PLLPWRCMD_ALLOFF);
- if (r)
- return r;
-
- r = hdmi_set_pll_pwr(HDMI_PLLPWRCMD_BOTHON_ALLCLKS);
- if (r)
- return r;
-
- r = hdmi_pll_reset();
- if (r)
- return r;
-
- refsel = HDMI_REFSEL_SYSCLK;
-
- r = hdmi_pll_init(refsel, fmt->dcofreq, fmt, fmt->regsd);
- if (r)
- return r;
-
- return 0;
-}
-
-static void hdmi_phy_off(void)
-{
- hdmi_set_phy_pwr(HDMI_PHYPWRCMD_OFF);
-}
-
-static int hdmi_core_ddc_edid(u8 *pedid, int ext)
-{
- u32 i, j;
- char checksum = 0;
- u32 offset = 0;
-
- /* Turn on CLK for DDC */
- REG_FLD_MOD(HDMI_CORE_AV_DPD, 0x7, 2, 0);
-
- /*
- * SW HACK : Without the Delay DDC(i2c bus) reads 0 values /
- * right shifted values( The behavior is not consistent and seen only
- * with some TV's)
- */
- usleep_range(800, 1000);
-
- if (!ext) {
- /* Clk SCL Devices */
- REG_FLD_MOD(HDMI_CORE_DDC_CMD, 0xA, 3, 0);
-
- /* HDMI_CORE_DDC_STATUS_IN_PROG */
- if (hdmi_wait_for_bit_change(HDMI_CORE_DDC_STATUS,
- 4, 4, 0) != 0) {
- DSSERR("Failed to program DDC\n");
- return -ETIMEDOUT;
- }
-
- /* Clear FIFO */
- REG_FLD_MOD(HDMI_CORE_DDC_CMD, 0x9, 3, 0);
-
- /* HDMI_CORE_DDC_STATUS_IN_PROG */
- if (hdmi_wait_for_bit_change(HDMI_CORE_DDC_STATUS,
- 4, 4, 0) != 0) {
- DSSERR("Failed to program DDC\n");
- return -ETIMEDOUT;
- }
-
- } else {
- if (ext % 2 != 0)
- offset = 0x80;
- }
-
- /* Load Segment Address Register */
- REG_FLD_MOD(HDMI_CORE_DDC_SEGM, ext/2, 7, 0);
-
- /* Load Slave Address Register */
- REG_FLD_MOD(HDMI_CORE_DDC_ADDR, 0xA0 >> 1, 7, 1);
-
- /* Load Offset Address Register */
- REG_FLD_MOD(HDMI_CORE_DDC_OFFSET, offset, 7, 0);
-
- /* Load Byte Count */
- REG_FLD_MOD(HDMI_CORE_DDC_COUNT1, 0x80, 7, 0);
- REG_FLD_MOD(HDMI_CORE_DDC_COUNT2, 0x0, 1, 0);
-
- /* Set DDC_CMD */
- if (ext)
- REG_FLD_MOD(HDMI_CORE_DDC_CMD, 0x4, 3, 0);
- else
- REG_FLD_MOD(HDMI_CORE_DDC_CMD, 0x2, 3, 0);
-
- /* HDMI_CORE_DDC_STATUS_BUS_LOW */
- if (REG_GET(HDMI_CORE_DDC_STATUS, 6, 6) == 1) {
- DSSWARN("I2C Bus Low?\n");
- return -EIO;
- }
- /* HDMI_CORE_DDC_STATUS_NO_ACK */
- if (REG_GET(HDMI_CORE_DDC_STATUS, 5, 5) == 1) {
- DSSWARN("I2C No Ack\n");
- return -EIO;
- }
-
- i = ext * 128;
- j = 0;
- while (((REG_GET(HDMI_CORE_DDC_STATUS, 4, 4) == 1) ||
- (REG_GET(HDMI_CORE_DDC_STATUS, 2, 2) == 0)) &&
- j < 128) {
-
- if (REG_GET(HDMI_CORE_DDC_STATUS, 2, 2) == 0) {
- /* FIFO not empty */
- pedid[i++] = REG_GET(HDMI_CORE_DDC_DATA, 7, 0);
- j++;
- }
- }
-
- for (j = 0; j < 128; j++)
- checksum += pedid[j];
-
- if (checksum != 0) {
- DSSERR("E-EDID checksum failed!!\n");
- return -EIO;
- }
-
- return 0;
-}
-
-static int read_edid(u8 *pedid, u16 max_length)
-{
- int r = 0, n = 0, i = 0;
- int max_ext_blocks = (max_length / 128) - 1;
-
- r = hdmi_core_ddc_edid(pedid, 0);
- if (r) {
- return r;
- } else {
- n = pedid[0x7e];
-
- /*
- * README: need to comply with max_length set by the caller.
- * Better implementation should be to allocate necessary
- * memory to store EDID according to nb_block field found
- * in first block
- */
- if (n > max_ext_blocks)
- n = max_ext_blocks;
-
- for (i = 1; i <= n; i++) {
- r = hdmi_core_ddc_edid(pedid, i);
- if (r)
- return r;
- }
- }
+ dss_init_hdmi_ip_ops(&hdmi.ip_data);
return 0;
}
@@ -518,7 +212,7 @@ static struct hdmi_cm hdmi_get_code(struct omap_video_timings *timing)
{
int i = 0, code = -1, temp_vsync = 0, temp_hsync = 0;
int timing_vsync = 0, timing_hsync = 0;
- struct omap_video_timings temp;
+ struct hdmi_video_timings temp;
struct hdmi_cm cm = {-1};
DSSDBG("hdmi_get_code\n");
@@ -556,500 +250,6 @@ static struct hdmi_cm hdmi_get_code(struct omap_video_timings *timing)
return cm;
}
-static void get_horz_vert_timing_info(int current_descriptor_addrs, u8 *edid ,
- struct omap_video_timings *timings)
-{
- /* X and Y resolution */
- timings->x_res = (((edid[current_descriptor_addrs + 4] & 0xF0) << 4) |
- edid[current_descriptor_addrs + 2]);
- timings->y_res = (((edid[current_descriptor_addrs + 7] & 0xF0) << 4) |
- edid[current_descriptor_addrs + 5]);
-
- timings->pixel_clock = ((edid[current_descriptor_addrs + 1] << 8) |
- edid[current_descriptor_addrs]);
-
- timings->pixel_clock = 10 * timings->pixel_clock;
-
- /* HORIZONTAL FRONT PORCH */
- timings->hfp = edid[current_descriptor_addrs + 8] |
- ((edid[current_descriptor_addrs + 11] & 0xc0) << 2);
- /* HORIZONTAL SYNC WIDTH */
- timings->hsw = edid[current_descriptor_addrs + 9] |
- ((edid[current_descriptor_addrs + 11] & 0x30) << 4);
- /* HORIZONTAL BACK PORCH */
- timings->hbp = (((edid[current_descriptor_addrs + 4] & 0x0F) << 8) |
- edid[current_descriptor_addrs + 3]) -
- (timings->hfp + timings->hsw);
- /* VERTICAL FRONT PORCH */
- timings->vfp = ((edid[current_descriptor_addrs + 10] & 0xF0) >> 4) |
- ((edid[current_descriptor_addrs + 11] & 0x0f) << 2);
- /* VERTICAL SYNC WIDTH */
- timings->vsw = (edid[current_descriptor_addrs + 10] & 0x0F) |
- ((edid[current_descriptor_addrs + 11] & 0x03) << 4);
- /* VERTICAL BACK PORCH */
- timings->vbp = (((edid[current_descriptor_addrs + 7] & 0x0F) << 8) |
- edid[current_descriptor_addrs + 6]) -
- (timings->vfp + timings->vsw);
-
-}
-
-/* Description : This function gets the resolution information from EDID */
-static void get_edid_timing_data(u8 *edid)
-{
- u8 count;
- u16 current_descriptor_addrs;
- struct hdmi_cm cm;
- struct omap_video_timings edid_timings;
-
- /* search block 0, there are 4 DTDs arranged in priority order */
- for (count = 0; count < EDID_SIZE_BLOCK0_TIMING_DESCRIPTOR; count++) {
- current_descriptor_addrs =
- EDID_DESCRIPTOR_BLOCK0_ADDRESS +
- count * EDID_TIMING_DESCRIPTOR_SIZE;
- get_horz_vert_timing_info(current_descriptor_addrs,
- edid, &edid_timings);
- cm = hdmi_get_code(&edid_timings);
- DSSDBG("Block0[%d] value matches code = %d , mode = %d\n",
- count, cm.code, cm.mode);
- if (cm.code == -1) {
- continue;
- } else {
- hdmi.code = cm.code;
- hdmi.mode = cm.mode;
- DSSDBG("code = %d , mode = %d\n",
- hdmi.code, hdmi.mode);
- return;
- }
- }
- if (edid[0x7e] != 0x00) {
- for (count = 0; count < EDID_SIZE_BLOCK1_TIMING_DESCRIPTOR;
- count++) {
- current_descriptor_addrs =
- EDID_DESCRIPTOR_BLOCK1_ADDRESS +
- count * EDID_TIMING_DESCRIPTOR_SIZE;
- get_horz_vert_timing_info(current_descriptor_addrs,
- edid, &edid_timings);
- cm = hdmi_get_code(&edid_timings);
- DSSDBG("Block1[%d] value matches code = %d, mode = %d",
- count, cm.code, cm.mode);
- if (cm.code == -1) {
- continue;
- } else {
- hdmi.code = cm.code;
- hdmi.mode = cm.mode;
- DSSDBG("code = %d , mode = %d\n",
- hdmi.code, hdmi.mode);
- return;
- }
- }
- }
-
- DSSINFO("no valid timing found , falling back to VGA\n");
- hdmi.code = 4; /* setting default value of 640 480 VGA */
- hdmi.mode = HDMI_DVI;
-}
-
-static void hdmi_read_edid(struct omap_video_timings *dp)
-{
- int ret = 0, code;
-
- memset(hdmi.edid, 0, HDMI_EDID_MAX_LENGTH);
-
- if (!hdmi.edid_set)
- ret = read_edid(hdmi.edid, HDMI_EDID_MAX_LENGTH);
-
- if (!ret) {
- if (!memcmp(hdmi.edid, edid_header, sizeof(edid_header))) {
- /* search for timings of default resolution */
- get_edid_timing_data(hdmi.edid);
- hdmi.edid_set = true;
- }
- } else {
- DSSWARN("failed to read E-EDID\n");
- }
-
- if (!hdmi.edid_set) {
- DSSINFO("fallback to VGA\n");
- hdmi.code = 4; /* setting default value of 640 480 VGA */
- hdmi.mode = HDMI_DVI;
- }
-
- code = get_timings_index();
-
- *dp = cea_vesa_timings[code].timings;
-}
-
-static void hdmi_core_init(struct hdmi_core_video_config *video_cfg,
- struct hdmi_core_infoframe_avi *avi_cfg,
- struct hdmi_core_packet_enable_repeat *repeat_cfg)
-{
- DSSDBG("Enter hdmi_core_init\n");
-
- /* video core */
- video_cfg->ip_bus_width = HDMI_INPUT_8BIT;
- video_cfg->op_dither_truc = HDMI_OUTPUTTRUNCATION_8BIT;
- video_cfg->deep_color_pkt = HDMI_DEEPCOLORPACKECTDISABLE;
- video_cfg->pkt_mode = HDMI_PACKETMODERESERVEDVALUE;
- video_cfg->hdmi_dvi = HDMI_DVI;
- video_cfg->tclk_sel_clkmult = HDMI_FPLL10IDCK;
-
- /* info frame */
- avi_cfg->db1_format = 0;
- avi_cfg->db1_active_info = 0;
- avi_cfg->db1_bar_info_dv = 0;
- avi_cfg->db1_scan_info = 0;
- avi_cfg->db2_colorimetry = 0;
- avi_cfg->db2_aspect_ratio = 0;
- avi_cfg->db2_active_fmt_ar = 0;
- avi_cfg->db3_itc = 0;
- avi_cfg->db3_ec = 0;
- avi_cfg->db3_q_range = 0;
- avi_cfg->db3_nup_scaling = 0;
- avi_cfg->db4_videocode = 0;
- avi_cfg->db5_pixel_repeat = 0;
- avi_cfg->db6_7_line_eoftop = 0 ;
- avi_cfg->db8_9_line_sofbottom = 0;
- avi_cfg->db10_11_pixel_eofleft = 0;
- avi_cfg->db12_13_pixel_sofright = 0;
-
- /* packet enable and repeat */
- repeat_cfg->audio_pkt = 0;
- repeat_cfg->audio_pkt_repeat = 0;
- repeat_cfg->avi_infoframe = 0;
- repeat_cfg->avi_infoframe_repeat = 0;
- repeat_cfg->gen_cntrl_pkt = 0;
- repeat_cfg->gen_cntrl_pkt_repeat = 0;
- repeat_cfg->generic_pkt = 0;
- repeat_cfg->generic_pkt_repeat = 0;
-}
-
-static void hdmi_core_powerdown_disable(void)
-{
- DSSDBG("Enter hdmi_core_powerdown_disable\n");
- REG_FLD_MOD(HDMI_CORE_CTRL1, 0x0, 0, 0);
-}
-
-static void hdmi_core_swreset_release(void)
-{
- DSSDBG("Enter hdmi_core_swreset_release\n");
- REG_FLD_MOD(HDMI_CORE_SYS_SRST, 0x0, 0, 0);
-}
-
-static void hdmi_core_swreset_assert(void)
-{
- DSSDBG("Enter hdmi_core_swreset_assert\n");
- REG_FLD_MOD(HDMI_CORE_SYS_SRST, 0x1, 0, 0);
-}
-
-/* DSS_HDMI_CORE_VIDEO_CONFIG */
-static void hdmi_core_video_config(struct hdmi_core_video_config *cfg)
-{
- u32 r = 0;
-
- /* sys_ctrl1 default configuration not tunable */
- r = hdmi_read_reg(HDMI_CORE_CTRL1);
- r = FLD_MOD(r, HDMI_CORE_CTRL1_VEN_FOLLOWVSYNC, 5, 5);
- r = FLD_MOD(r, HDMI_CORE_CTRL1_HEN_FOLLOWHSYNC, 4, 4);
- r = FLD_MOD(r, HDMI_CORE_CTRL1_BSEL_24BITBUS, 2, 2);
- r = FLD_MOD(r, HDMI_CORE_CTRL1_EDGE_RISINGEDGE, 1, 1);
- hdmi_write_reg(HDMI_CORE_CTRL1, r);
-
- REG_FLD_MOD(HDMI_CORE_SYS_VID_ACEN, cfg->ip_bus_width, 7, 6);
-
- /* Vid_Mode */
- r = hdmi_read_reg(HDMI_CORE_SYS_VID_MODE);
-
- /* dither truncation configuration */
- if (cfg->op_dither_truc > HDMI_OUTPUTTRUNCATION_12BIT) {
- r = FLD_MOD(r, cfg->op_dither_truc - 3, 7, 6);
- r = FLD_MOD(r, 1, 5, 5);
- } else {
- r = FLD_MOD(r, cfg->op_dither_truc, 7, 6);
- r = FLD_MOD(r, 0, 5, 5);
- }
- hdmi_write_reg(HDMI_CORE_SYS_VID_MODE, r);
-
- /* HDMI_Ctrl */
- r = hdmi_read_reg(HDMI_CORE_AV_HDMI_CTRL);
- r = FLD_MOD(r, cfg->deep_color_pkt, 6, 6);
- r = FLD_MOD(r, cfg->pkt_mode, 5, 3);
- r = FLD_MOD(r, cfg->hdmi_dvi, 0, 0);
- hdmi_write_reg(HDMI_CORE_AV_HDMI_CTRL, r);
-
- /* TMDS_CTRL */
- REG_FLD_MOD(HDMI_CORE_SYS_TMDS_CTRL,
- cfg->tclk_sel_clkmult, 6, 5);
-}
-
-static void hdmi_core_aux_infoframe_avi_config(
- struct hdmi_core_infoframe_avi info_avi)
-{
- u32 val;
- char sum = 0, checksum = 0;
-
- sum += 0x82 + 0x002 + 0x00D;
- hdmi_write_reg(HDMI_CORE_AV_AVI_TYPE, 0x082);
- hdmi_write_reg(HDMI_CORE_AV_AVI_VERS, 0x002);
- hdmi_write_reg(HDMI_CORE_AV_AVI_LEN, 0x00D);
-
- val = (info_avi.db1_format << 5) |
- (info_avi.db1_active_info << 4) |
- (info_avi.db1_bar_info_dv << 2) |
- (info_avi.db1_scan_info);
- hdmi_write_reg(HDMI_CORE_AV_AVI_DBYTE(0), val);
- sum += val;
-
- val = (info_avi.db2_colorimetry << 6) |
- (info_avi.db2_aspect_ratio << 4) |
- (info_avi.db2_active_fmt_ar);
- hdmi_write_reg(HDMI_CORE_AV_AVI_DBYTE(1), val);
- sum += val;
-
- val = (info_avi.db3_itc << 7) |
- (info_avi.db3_ec << 4) |
- (info_avi.db3_q_range << 2) |
- (info_avi.db3_nup_scaling);
- hdmi_write_reg(HDMI_CORE_AV_AVI_DBYTE(2), val);
- sum += val;
-
- hdmi_write_reg(HDMI_CORE_AV_AVI_DBYTE(3), info_avi.db4_videocode);
- sum += info_avi.db4_videocode;
-
- val = info_avi.db5_pixel_repeat;
- hdmi_write_reg(HDMI_CORE_AV_AVI_DBYTE(4), val);
- sum += val;
-
- val = info_avi.db6_7_line_eoftop & 0x00FF;
- hdmi_write_reg(HDMI_CORE_AV_AVI_DBYTE(5), val);
- sum += val;
-
- val = ((info_avi.db6_7_line_eoftop >> 8) & 0x00FF);
- hdmi_write_reg(HDMI_CORE_AV_AVI_DBYTE(6), val);
- sum += val;
-
- val = info_avi.db8_9_line_sofbottom & 0x00FF;
- hdmi_write_reg(HDMI_CORE_AV_AVI_DBYTE(7), val);
- sum += val;
-
- val = ((info_avi.db8_9_line_sofbottom >> 8) & 0x00FF);
- hdmi_write_reg(HDMI_CORE_AV_AVI_DBYTE(8), val);
- sum += val;
-
- val = info_avi.db10_11_pixel_eofleft & 0x00FF;
- hdmi_write_reg(HDMI_CORE_AV_AVI_DBYTE(9), val);
- sum += val;
-
- val = ((info_avi.db10_11_pixel_eofleft >> 8) & 0x00FF);
- hdmi_write_reg(HDMI_CORE_AV_AVI_DBYTE(10), val);
- sum += val;
-
- val = info_avi.db12_13_pixel_sofright & 0x00FF;
- hdmi_write_reg(HDMI_CORE_AV_AVI_DBYTE(11), val);
- sum += val;
-
- val = ((info_avi.db12_13_pixel_sofright >> 8) & 0x00FF);
- hdmi_write_reg(HDMI_CORE_AV_AVI_DBYTE(12), val);
- sum += val;
-
- checksum = 0x100 - sum;
- hdmi_write_reg(HDMI_CORE_AV_AVI_CHSUM, checksum);
-}
-
-static void hdmi_core_av_packet_config(
- struct hdmi_core_packet_enable_repeat repeat_cfg)
-{
- /* enable/repeat the infoframe */
- hdmi_write_reg(HDMI_CORE_AV_PB_CTRL1,
- (repeat_cfg.audio_pkt << 5) |
- (repeat_cfg.audio_pkt_repeat << 4) |
- (repeat_cfg.avi_infoframe << 1) |
- (repeat_cfg.avi_infoframe_repeat));
-
- /* enable/repeat the packet */
- hdmi_write_reg(HDMI_CORE_AV_PB_CTRL2,
- (repeat_cfg.gen_cntrl_pkt << 3) |
- (repeat_cfg.gen_cntrl_pkt_repeat << 2) |
- (repeat_cfg.generic_pkt << 1) |
- (repeat_cfg.generic_pkt_repeat));
-}
-
-static void hdmi_wp_init(struct omap_video_timings *timings,
- struct hdmi_video_format *video_fmt,
- struct hdmi_video_interface *video_int)
-{
- DSSDBG("Enter hdmi_wp_init\n");
-
- timings->hbp = 0;
- timings->hfp = 0;
- timings->hsw = 0;
- timings->vbp = 0;
- timings->vfp = 0;
- timings->vsw = 0;
-
- video_fmt->packing_mode = HDMI_PACK_10b_RGB_YUV444;
- video_fmt->y_res = 0;
- video_fmt->x_res = 0;
-
- video_int->vsp = 0;
- video_int->hsp = 0;
-
- video_int->interlacing = 0;
- video_int->tm = 0; /* HDMI_TIMING_SLAVE */
-
-}
-
-static void hdmi_wp_video_start(bool start)
-{
- REG_FLD_MOD(HDMI_WP_VIDEO_CFG, start, 31, 31);
-}
-
-static void hdmi_wp_video_init_format(struct hdmi_video_format *video_fmt,
- struct omap_video_timings *timings, struct hdmi_config *param)
-{
- DSSDBG("Enter hdmi_wp_video_init_format\n");
-
- video_fmt->y_res = param->timings.timings.y_res;
- video_fmt->x_res = param->timings.timings.x_res;
-
- timings->hbp = param->timings.timings.hbp;
- timings->hfp = param->timings.timings.hfp;
- timings->hsw = param->timings.timings.hsw;
- timings->vbp = param->timings.timings.vbp;
- timings->vfp = param->timings.timings.vfp;
- timings->vsw = param->timings.timings.vsw;
-}
-
-static void hdmi_wp_video_config_format(
- struct hdmi_video_format *video_fmt)
-{
- u32 l = 0;
-
- REG_FLD_MOD(HDMI_WP_VIDEO_CFG, video_fmt->packing_mode, 10, 8);
-
- l |= FLD_VAL(video_fmt->y_res, 31, 16);
- l |= FLD_VAL(video_fmt->x_res, 15, 0);
- hdmi_write_reg(HDMI_WP_VIDEO_SIZE, l);
-}
-
-static void hdmi_wp_video_config_interface(
- struct hdmi_video_interface *video_int)
-{
- u32 r;
- DSSDBG("Enter hdmi_wp_video_config_interface\n");
-
- r = hdmi_read_reg(HDMI_WP_VIDEO_CFG);
- r = FLD_MOD(r, video_int->vsp, 7, 7);
- r = FLD_MOD(r, video_int->hsp, 6, 6);
- r = FLD_MOD(r, video_int->interlacing, 3, 3);
- r = FLD_MOD(r, video_int->tm, 1, 0);
- hdmi_write_reg(HDMI_WP_VIDEO_CFG, r);
-}
-
-static void hdmi_wp_video_config_timing(
- struct omap_video_timings *timings)
-{
- u32 timing_h = 0;
- u32 timing_v = 0;
-
- DSSDBG("Enter hdmi_wp_video_config_timing\n");
-
- timing_h |= FLD_VAL(timings->hbp, 31, 20);
- timing_h |= FLD_VAL(timings->hfp, 19, 8);
- timing_h |= FLD_VAL(timings->hsw, 7, 0);
- hdmi_write_reg(HDMI_WP_VIDEO_TIMING_H, timing_h);
-
- timing_v |= FLD_VAL(timings->vbp, 31, 20);
- timing_v |= FLD_VAL(timings->vfp, 19, 8);
- timing_v |= FLD_VAL(timings->vsw, 7, 0);
- hdmi_write_reg(HDMI_WP_VIDEO_TIMING_V, timing_v);
-}
-
-static void hdmi_basic_configure(struct hdmi_config *cfg)
-{
- /* HDMI */
- struct omap_video_timings video_timing;
- struct hdmi_video_format video_format;
- struct hdmi_video_interface video_interface;
- /* HDMI core */
- struct hdmi_core_infoframe_avi avi_cfg;
- struct hdmi_core_video_config v_core_cfg;
- struct hdmi_core_packet_enable_repeat repeat_cfg;
-
- hdmi_wp_init(&video_timing, &video_format,
- &video_interface);
-
- hdmi_core_init(&v_core_cfg,
- &avi_cfg,
- &repeat_cfg);
-
- hdmi_wp_video_init_format(&video_format,
- &video_timing, cfg);
-
- hdmi_wp_video_config_timing(&video_timing);
-
- /* video config */
- video_format.packing_mode = HDMI_PACK_24b_RGB_YUV444_YUV422;
-
- hdmi_wp_video_config_format(&video_format);
-
- video_interface.vsp = cfg->timings.vsync_pol;
- video_interface.hsp = cfg->timings.hsync_pol;
- video_interface.interlacing = cfg->interlace;
- video_interface.tm = 1 ; /* HDMI_TIMING_MASTER_24BIT */
-
- hdmi_wp_video_config_interface(&video_interface);
-
- /*
- * configure core video part
- * set software reset in the core
- */
- hdmi_core_swreset_assert();
-
- /* power down off */
- hdmi_core_powerdown_disable();
-
- v_core_cfg.pkt_mode = HDMI_PACKETMODE24BITPERPIXEL;
- v_core_cfg.hdmi_dvi = cfg->cm.mode;
-
- hdmi_core_video_config(&v_core_cfg);
-
- /* release software reset in the core */
- hdmi_core_swreset_release();
-
- /*
- * configure packet
- * info frame video see doc CEA861-D page 65
- */
- avi_cfg.db1_format = HDMI_INFOFRAME_AVI_DB1Y_RGB;
- avi_cfg.db1_active_info =
- HDMI_INFOFRAME_AVI_DB1A_ACTIVE_FORMAT_OFF;
- avi_cfg.db1_bar_info_dv = HDMI_INFOFRAME_AVI_DB1B_NO;
- avi_cfg.db1_scan_info = HDMI_INFOFRAME_AVI_DB1S_0;
- avi_cfg.db2_colorimetry = HDMI_INFOFRAME_AVI_DB2C_NO;
- avi_cfg.db2_aspect_ratio = HDMI_INFOFRAME_AVI_DB2M_NO;
- avi_cfg.db2_active_fmt_ar = HDMI_INFOFRAME_AVI_DB2R_SAME;
- avi_cfg.db3_itc = HDMI_INFOFRAME_AVI_DB3ITC_NO;
- avi_cfg.db3_ec = HDMI_INFOFRAME_AVI_DB3EC_XVYUV601;
- avi_cfg.db3_q_range = HDMI_INFOFRAME_AVI_DB3Q_DEFAULT;
- avi_cfg.db3_nup_scaling = HDMI_INFOFRAME_AVI_DB3SC_NO;
- avi_cfg.db4_videocode = cfg->cm.code;
- avi_cfg.db5_pixel_repeat = HDMI_INFOFRAME_AVI_DB5PR_NO;
- avi_cfg.db6_7_line_eoftop = 0;
- avi_cfg.db8_9_line_sofbottom = 0;
- avi_cfg.db10_11_pixel_eofleft = 0;
- avi_cfg.db12_13_pixel_sofright = 0;
-
- hdmi_core_aux_infoframe_avi_config(avi_cfg);
-
- /* enable/repeat the infoframe */
- repeat_cfg.avi_infoframe = HDMI_PACKETENABLE;
- repeat_cfg.avi_infoframe_repeat = HDMI_PACKETREPEATON;
- /* wakeup */
- repeat_cfg.audio_pkt = HDMI_PACKETENABLE;
- repeat_cfg.audio_pkt_repeat = HDMI_PACKETREPEATON;
- hdmi_core_av_packet_config(repeat_cfg);
-}
-
static void update_hdmi_timings(struct hdmi_config *cfg,
struct omap_video_timings *timings, int code)
{
@@ -1066,6 +266,12 @@ static void update_hdmi_timings(struct hdmi_config *cfg,
cfg->timings.hsync_pol = cea_vesa_timings[code].hsync_pol;
}
+unsigned long hdmi_get_pixel_clock(void)
+{
+ /* HDMI Pixel Clock in Mhz */
+ return hdmi.ip_data.cfg.timings.timings.pixel_clock * 10000;
+}
+
static void hdmi_compute_pll(struct omap_dss_device *dssdev, int phy,
struct hdmi_pll_info *pi)
{
@@ -1077,15 +283,23 @@ static void hdmi_compute_pll(struct omap_dss_device *dssdev, int phy,
* Input clock is predivided by N + 1
* out put of which is reference clk
*/
- pi->regn = dssdev->clocks.hdmi.regn;
- refclk = clkin / (pi->regn + 1);
+ if (dssdev->clocks.hdmi.regn == 0)
+ pi->regn = HDMI_DEFAULT_REGN;
+ else
+ pi->regn = dssdev->clocks.hdmi.regn;
+
+ refclk = clkin / pi->regn;
/*
* multiplier is pixel_clk/ref_clk
* Multiplying by 100 to avoid fractional part removal
*/
pi->regm = (phy * 100 / (refclk)) / 100;
- pi->regm2 = dssdev->clocks.hdmi.regm2;
+
+ if (dssdev->clocks.hdmi.regm2 == 0)
+ pi->regm2 = HDMI_DEFAULT_REGM2;
+ else
+ pi->regm2 = dssdev->clocks.hdmi.regm2;
/*
* fractional multiplier is remainder of the difference between
@@ -1100,7 +314,10 @@ static void hdmi_compute_pll(struct omap_dss_device *dssdev, int phy,
* is greater than 1000MHz
*/
pi->dcofreq = phy > 1000 * 100;
- pi->regsd = ((pi->regm * clkin / 10) / ((pi->regn + 1) * 250) + 5) / 10;
+ pi->regsd = ((pi->regm * clkin / 10) / (pi->regn * 250) + 5) / 10;
+
+ /* Set the reference clock to sysclk reference */
+ pi->refsel = HDMI_REFSEL_SYSCLK;
DSSDBG("M = %d Mf = %d\n", pi->regm, pi->regmf);
DSSDBG("range = %d sd = %d\n", pi->dcofreq, pi->regsd);
@@ -1109,7 +326,6 @@ static void hdmi_compute_pll(struct omap_dss_device *dssdev, int phy,
static int hdmi_power_on(struct omap_dss_device *dssdev)
{
int r, code = 0;
- struct hdmi_pll_info pll_data;
struct omap_video_timings *p;
unsigned long phy;
@@ -1117,7 +333,7 @@ static int hdmi_power_on(struct omap_dss_device *dssdev)
if (r)
return r;
- dispc_enable_channel(OMAP_DSS_CHANNEL_DIGIT, 0);
+ dispc_mgr_enable(OMAP_DSS_CHANNEL_DIGIT, 0);
p = &dssdev->panel.timings;
@@ -1125,36 +341,31 @@ static int hdmi_power_on(struct omap_dss_device *dssdev)
dssdev->panel.timings.x_res,
dssdev->panel.timings.y_res);
- if (!hdmi.custom_set) {
- DSSDBG("Read EDID as no EDID is not set on poweron\n");
- hdmi_read_edid(p);
- }
code = get_timings_index();
- dssdev->panel.timings = cea_vesa_timings[code].timings;
- update_hdmi_timings(&hdmi.cfg, p, code);
+ update_hdmi_timings(&hdmi.ip_data.cfg, p, code);
phy = p->pixel_clock;
- hdmi_compute_pll(dssdev, phy, &pll_data);
+ hdmi_compute_pll(dssdev, phy, &hdmi.ip_data.pll_data);
- hdmi_wp_video_start(0);
+ hdmi.ip_data.ops->video_enable(&hdmi.ip_data, 0);
- /* config the PLL and PHY first */
- r = hdmi_pll_program(&pll_data);
+ /* config the PLL and PHY hdmi_set_pll_pwrfirst */
+ r = hdmi.ip_data.ops->pll_enable(&hdmi.ip_data);
if (r) {
DSSDBG("Failed to lock PLL\n");
goto err;
}
- r = hdmi_phy_init();
+ r = hdmi.ip_data.ops->phy_enable(&hdmi.ip_data);
if (r) {
DSSDBG("Failed to start PHY\n");
goto err;
}
- hdmi.cfg.cm.mode = hdmi.mode;
- hdmi.cfg.cm.code = hdmi.code;
- hdmi_basic_configure(&hdmi.cfg);
+ hdmi.ip_data.cfg.cm.mode = hdmi.mode;
+ hdmi.ip_data.cfg.cm.code = hdmi.code;
+ hdmi.ip_data.ops->video_configure(&hdmi.ip_data);
/* Make selection of HDMI in DSS */
dss_select_hdmi_venc_clk_source(DSS_HDMI_M_PCLK);
@@ -1174,9 +385,9 @@ static int hdmi_power_on(struct omap_dss_device *dssdev)
dispc_set_digit_size(dssdev->panel.timings.x_res,
dssdev->panel.timings.y_res);
- dispc_enable_channel(OMAP_DSS_CHANNEL_DIGIT, 1);
+ hdmi.ip_data.ops->video_enable(&hdmi.ip_data, 1);
- hdmi_wp_video_start(1);
+ dispc_mgr_enable(OMAP_DSS_CHANNEL_DIGIT, 1);
return 0;
err:
@@ -1186,14 +397,12 @@ err:
static void hdmi_power_off(struct omap_dss_device *dssdev)
{
- dispc_enable_channel(OMAP_DSS_CHANNEL_DIGIT, 0);
+ dispc_mgr_enable(OMAP_DSS_CHANNEL_DIGIT, 0);
- hdmi_wp_video_start(0);
- hdmi_phy_off();
- hdmi_set_pll_pwr(HDMI_PLLPWRCMD_ALLOFF);
+ hdmi.ip_data.ops->video_enable(&hdmi.ip_data, 0);
+ hdmi.ip_data.ops->phy_disable(&hdmi.ip_data);
+ hdmi.ip_data.ops->pll_disable(&hdmi.ip_data);
hdmi_runtime_put();
-
- hdmi.edid_set = 0;
}
int omapdss_hdmi_display_check_timing(struct omap_dss_device *dssdev,
@@ -1203,7 +412,6 @@ int omapdss_hdmi_display_check_timing(struct omap_dss_device *dssdev,
cm = hdmi_get_code(timings);
if (cm.code == -1) {
- DSSERR("Invalid timing entered\n");
return -EINVAL;
}
@@ -1215,12 +423,69 @@ void omapdss_hdmi_display_set_timing(struct omap_dss_device *dssdev)
{
struct hdmi_cm cm;
- hdmi.custom_set = 1;
cm = hdmi_get_code(&dssdev->panel.timings);
hdmi.code = cm.code;
hdmi.mode = cm.mode;
- omapdss_hdmi_display_enable(dssdev);
- hdmi.custom_set = 0;
+
+ if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) {
+ int r;
+
+ hdmi_power_off(dssdev);
+
+ r = hdmi_power_on(dssdev);
+ if (r)
+ DSSERR("failed to power on device\n");
+ }
+}
+
+void hdmi_dump_regs(struct seq_file *s)
+{
+ mutex_lock(&hdmi.lock);
+
+ if (hdmi_runtime_get())
+ return;
+
+ hdmi.ip_data.ops->dump_wrapper(&hdmi.ip_data, s);
+ hdmi.ip_data.ops->dump_pll(&hdmi.ip_data, s);
+ hdmi.ip_data.ops->dump_phy(&hdmi.ip_data, s);
+ hdmi.ip_data.ops->dump_core(&hdmi.ip_data, s);
+
+ hdmi_runtime_put();
+ mutex_unlock(&hdmi.lock);
+}
+
+int omapdss_hdmi_read_edid(u8 *buf, int len)
+{
+ int r;
+
+ mutex_lock(&hdmi.lock);
+
+ r = hdmi_runtime_get();
+ BUG_ON(r);
+
+ r = hdmi.ip_data.ops->read_edid(&hdmi.ip_data, buf, len);
+
+ hdmi_runtime_put();
+ mutex_unlock(&hdmi.lock);
+
+ return r;
+}
+
+bool omapdss_hdmi_detect(void)
+{
+ int r;
+
+ mutex_lock(&hdmi.lock);
+
+ r = hdmi_runtime_get();
+ BUG_ON(r);
+
+ r = hdmi.ip_data.ops->detect(&hdmi.ip_data);
+
+ hdmi_runtime_put();
+ mutex_unlock(&hdmi.lock);
+
+ return r == 1;
}
int omapdss_hdmi_display_enable(struct omap_dss_device *dssdev)
@@ -1231,6 +496,12 @@ int omapdss_hdmi_display_enable(struct omap_dss_device *dssdev)
mutex_lock(&hdmi.lock);
+ if (dssdev->manager == NULL) {
+ DSSERR("failed to enable display: no manager\n");
+ r = -ENODEV;
+ goto err0;
+ }
+
r = omap_dss_start_device(dssdev);
if (r) {
DSSERR("failed to start device\n");
@@ -1282,219 +553,9 @@ void omapdss_hdmi_display_disable(struct omap_dss_device *dssdev)
#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
-static void hdmi_wp_audio_config_format(
- struct hdmi_audio_format *aud_fmt)
-{
- u32 r;
-
- DSSDBG("Enter hdmi_wp_audio_config_format\n");
-
- r = hdmi_read_reg(HDMI_WP_AUDIO_CFG);
- r = FLD_MOD(r, aud_fmt->stereo_channels, 26, 24);
- r = FLD_MOD(r, aud_fmt->active_chnnls_msk, 23, 16);
- r = FLD_MOD(r, aud_fmt->en_sig_blk_strt_end, 5, 5);
- r = FLD_MOD(r, aud_fmt->type, 4, 4);
- r = FLD_MOD(r, aud_fmt->justification, 3, 3);
- r = FLD_MOD(r, aud_fmt->sample_order, 2, 2);
- r = FLD_MOD(r, aud_fmt->samples_per_word, 1, 1);
- r = FLD_MOD(r, aud_fmt->sample_size, 0, 0);
- hdmi_write_reg(HDMI_WP_AUDIO_CFG, r);
-}
-
-static void hdmi_wp_audio_config_dma(struct hdmi_audio_dma *aud_dma)
-{
- u32 r;
-
- DSSDBG("Enter hdmi_wp_audio_config_dma\n");
-
- r = hdmi_read_reg(HDMI_WP_AUDIO_CFG2);
- r = FLD_MOD(r, aud_dma->transfer_size, 15, 8);
- r = FLD_MOD(r, aud_dma->block_size, 7, 0);
- hdmi_write_reg(HDMI_WP_AUDIO_CFG2, r);
-
- r = hdmi_read_reg(HDMI_WP_AUDIO_CTRL);
- r = FLD_MOD(r, aud_dma->mode, 9, 9);
- r = FLD_MOD(r, aud_dma->fifo_threshold, 8, 0);
- hdmi_write_reg(HDMI_WP_AUDIO_CTRL, r);
-}
-
-static void hdmi_core_audio_config(struct hdmi_core_audio_config *cfg)
-{
- u32 r;
-
- /* audio clock recovery parameters */
- r = hdmi_read_reg(HDMI_CORE_AV_ACR_CTRL);
- r = FLD_MOD(r, cfg->use_mclk, 2, 2);
- r = FLD_MOD(r, cfg->en_acr_pkt, 1, 1);
- r = FLD_MOD(r, cfg->cts_mode, 0, 0);
- hdmi_write_reg(HDMI_CORE_AV_ACR_CTRL, r);
-
- REG_FLD_MOD(HDMI_CORE_AV_N_SVAL1, cfg->n, 7, 0);
- REG_FLD_MOD(HDMI_CORE_AV_N_SVAL2, cfg->n >> 8, 7, 0);
- REG_FLD_MOD(HDMI_CORE_AV_N_SVAL3, cfg->n >> 16, 7, 0);
-
- if (cfg->cts_mode == HDMI_AUDIO_CTS_MODE_SW) {
- REG_FLD_MOD(HDMI_CORE_AV_CTS_SVAL1, cfg->cts, 7, 0);
- REG_FLD_MOD(HDMI_CORE_AV_CTS_SVAL2, cfg->cts >> 8, 7, 0);
- REG_FLD_MOD(HDMI_CORE_AV_CTS_SVAL3, cfg->cts >> 16, 7, 0);
- } else {
- /*
- * HDMI IP uses this configuration to divide the MCLK to
- * update CTS value.
- */
- REG_FLD_MOD(HDMI_CORE_AV_FREQ_SVAL, cfg->mclk_mode, 2, 0);
-
- /* Configure clock for audio packets */
- REG_FLD_MOD(HDMI_CORE_AV_AUD_PAR_BUSCLK_1,
- cfg->aud_par_busclk, 7, 0);
- REG_FLD_MOD(HDMI_CORE_AV_AUD_PAR_BUSCLK_2,
- (cfg->aud_par_busclk >> 8), 7, 0);
- REG_FLD_MOD(HDMI_CORE_AV_AUD_PAR_BUSCLK_3,
- (cfg->aud_par_busclk >> 16), 7, 0);
- }
-
- /* Override of SPDIF sample frequency with value in I2S_CHST4 */
- REG_FLD_MOD(HDMI_CORE_AV_SPDIF_CTRL, cfg->fs_override, 1, 1);
-
- /* I2S parameters */
- REG_FLD_MOD(HDMI_CORE_AV_I2S_CHST4, cfg->freq_sample, 3, 0);
-
- r = hdmi_read_reg(HDMI_CORE_AV_I2S_IN_CTRL);
- r = FLD_MOD(r, cfg->i2s_cfg.en_high_bitrate_aud, 7, 7);
- r = FLD_MOD(r, cfg->i2s_cfg.sck_edge_mode, 6, 6);
- r = FLD_MOD(r, cfg->i2s_cfg.cbit_order, 5, 5);
- r = FLD_MOD(r, cfg->i2s_cfg.vbit, 4, 4);
- r = FLD_MOD(r, cfg->i2s_cfg.ws_polarity, 3, 3);
- r = FLD_MOD(r, cfg->i2s_cfg.justification, 2, 2);
- r = FLD_MOD(r, cfg->i2s_cfg.direction, 1, 1);
- r = FLD_MOD(r, cfg->i2s_cfg.shift, 0, 0);
- hdmi_write_reg(HDMI_CORE_AV_I2S_IN_CTRL, r);
-
- r = hdmi_read_reg(HDMI_CORE_AV_I2S_CHST5);
- r = FLD_MOD(r, cfg->freq_sample, 7, 4);
- r = FLD_MOD(r, cfg->i2s_cfg.word_length, 3, 1);
- r = FLD_MOD(r, cfg->i2s_cfg.word_max_length, 0, 0);
- hdmi_write_reg(HDMI_CORE_AV_I2S_CHST5, r);
-
- REG_FLD_MOD(HDMI_CORE_AV_I2S_IN_LEN, cfg->i2s_cfg.in_length_bits, 3, 0);
-
- /* Audio channels and mode parameters */
- REG_FLD_MOD(HDMI_CORE_AV_HDMI_CTRL, cfg->layout, 2, 1);
- r = hdmi_read_reg(HDMI_CORE_AV_AUD_MODE);
- r = FLD_MOD(r, cfg->i2s_cfg.active_sds, 7, 4);
- r = FLD_MOD(r, cfg->en_dsd_audio, 3, 3);
- r = FLD_MOD(r, cfg->en_parallel_aud_input, 2, 2);
- r = FLD_MOD(r, cfg->en_spdif, 1, 1);
- hdmi_write_reg(HDMI_CORE_AV_AUD_MODE, r);
-}
-
-static void hdmi_core_audio_infoframe_config(
- struct hdmi_core_infoframe_audio *info_aud)
-{
- u8 val;
- u8 sum = 0, checksum = 0;
-
- /*
- * Set audio info frame type, version and length as
- * described in HDMI 1.4a Section 8.2.2 specification.
- * Checksum calculation is defined in Section 5.3.5.
- */
- hdmi_write_reg(HDMI_CORE_AV_AUDIO_TYPE, 0x84);
- hdmi_write_reg(HDMI_CORE_AV_AUDIO_VERS, 0x01);
- hdmi_write_reg(HDMI_CORE_AV_AUDIO_LEN, 0x0a);
- sum += 0x84 + 0x001 + 0x00a;
-
- val = (info_aud->db1_coding_type << 4)
- | (info_aud->db1_channel_count - 1);
- hdmi_write_reg(HDMI_CORE_AV_AUD_DBYTE(0), val);
- sum += val;
-
- val = (info_aud->db2_sample_freq << 2) | info_aud->db2_sample_size;
- hdmi_write_reg(HDMI_CORE_AV_AUD_DBYTE(1), val);
- sum += val;
-
- hdmi_write_reg(HDMI_CORE_AV_AUD_DBYTE(2), 0x00);
-
- val = info_aud->db4_channel_alloc;
- hdmi_write_reg(HDMI_CORE_AV_AUD_DBYTE(3), val);
- sum += val;
-
- val = (info_aud->db5_downmix_inh << 7) | (info_aud->db5_lsv << 3);
- hdmi_write_reg(HDMI_CORE_AV_AUD_DBYTE(4), val);
- sum += val;
-
- hdmi_write_reg(HDMI_CORE_AV_AUD_DBYTE(5), 0x00);
- hdmi_write_reg(HDMI_CORE_AV_AUD_DBYTE(6), 0x00);
- hdmi_write_reg(HDMI_CORE_AV_AUD_DBYTE(7), 0x00);
- hdmi_write_reg(HDMI_CORE_AV_AUD_DBYTE(8), 0x00);
- hdmi_write_reg(HDMI_CORE_AV_AUD_DBYTE(9), 0x00);
- checksum = 0x100 - sum;
- hdmi_write_reg(HDMI_CORE_AV_AUDIO_CHSUM, checksum);
-
- /*
- * TODO: Add MPEG and SPD enable and repeat cfg when EDID parsing
- * is available.
- */
-}
-
-static int hdmi_config_audio_acr(u32 sample_freq, u32 *n, u32 *cts)
-{
- u32 r;
- u32 deep_color = 0;
- u32 pclk = hdmi.cfg.timings.timings.pixel_clock;
-
- if (n == NULL || cts == NULL)
- return -EINVAL;
- /*
- * Obtain current deep color configuration. This needed
- * to calculate the TMDS clock based on the pixel clock.
- */
- r = REG_GET(HDMI_WP_VIDEO_CFG, 1, 0);
- switch (r) {
- case 1: /* No deep color selected */
- deep_color = 100;
- break;
- case 2: /* 10-bit deep color selected */
- deep_color = 125;
- break;
- case 3: /* 12-bit deep color selected */
- deep_color = 150;
- break;
- default:
- return -EINVAL;
- }
-
- switch (sample_freq) {
- case 32000:
- if ((deep_color == 125) && ((pclk == 54054)
- || (pclk == 74250)))
- *n = 8192;
- else
- *n = 4096;
- break;
- case 44100:
- *n = 6272;
- break;
- case 48000:
- if ((deep_color == 125) && ((pclk == 54054)
- || (pclk == 74250)))
- *n = 8192;
- else
- *n = 6144;
- break;
- default:
- *n = 0;
- return -EINVAL;
- }
-
- /* Calculate CTS. See HDMI 1.3a or 1.4a specifications */
- *cts = pclk * (*n / 128) * deep_color / (sample_freq / 10);
-
- return 0;
-}
-
-static int hdmi_audio_hw_params(struct snd_pcm_substream *substream,
+static int hdmi_audio_hw_params(struct hdmi_ip_data *ip_data,
+ struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
@@ -1548,7 +609,7 @@ static int hdmi_audio_hw_params(struct snd_pcm_substream *substream,
return -EINVAL;
}
- err = hdmi_config_audio_acr(params_rate(params), &n, &cts);
+ err = hdmi_config_audio_acr(ip_data, params_rate(params), &n, &cts);
if (err < 0)
return err;
@@ -1564,8 +625,8 @@ static int hdmi_audio_hw_params(struct snd_pcm_substream *substream,
audio_dma.mode = HDMI_AUDIO_TRANSF_DMA;
audio_dma.fifo_threshold = 0x20; /* in number of samples */
- hdmi_wp_audio_config_dma(&audio_dma);
- hdmi_wp_audio_config_format(&audio_format);
+ hdmi_wp_audio_config_dma(ip_data, &audio_dma);
+ hdmi_wp_audio_config_format(ip_data, &audio_format);
/*
* I2S config
@@ -1609,7 +670,7 @@ static int hdmi_audio_hw_params(struct snd_pcm_substream *substream,
/* Use parallel audio interface */
core_cfg.en_parallel_aud_input = true;
- hdmi_core_audio_config(&core_cfg);
+ hdmi_core_audio_config(ip_data, &core_cfg);
/*
* Configure packet
@@ -1623,36 +684,10 @@ static int hdmi_audio_hw_params(struct snd_pcm_substream *substream,
aud_if_cfg.db5_downmix_inh = false;
aud_if_cfg.db5_lsv = 0;
- hdmi_core_audio_infoframe_config(&aud_if_cfg);
+ hdmi_core_audio_infoframe_config(ip_data, &aud_if_cfg);
return 0;
}
-static int hdmi_audio_trigger(struct snd_pcm_substream *substream, int cmd,
- struct snd_soc_dai *dai)
-{
- int err = 0;
- switch (cmd) {
- case SNDRV_PCM_TRIGGER_START:
- case SNDRV_PCM_TRIGGER_RESUME:
- case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
- REG_FLD_MOD(HDMI_CORE_AV_AUD_MODE, 1, 0, 0);
- REG_FLD_MOD(HDMI_WP_AUDIO_CTRL, 1, 31, 31);
- REG_FLD_MOD(HDMI_WP_AUDIO_CTRL, 1, 30, 30);
- break;
-
- case SNDRV_PCM_TRIGGER_STOP:
- case SNDRV_PCM_TRIGGER_SUSPEND:
- case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
- REG_FLD_MOD(HDMI_CORE_AV_AUD_MODE, 0, 0, 0);
- REG_FLD_MOD(HDMI_WP_AUDIO_CTRL, 0, 30, 30);
- REG_FLD_MOD(HDMI_WP_AUDIO_CTRL, 0, 31, 31);
- break;
- default:
- err = -EINVAL;
- }
- return err;
-}
-
static int hdmi_audio_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
@@ -1698,15 +733,6 @@ static int hdmi_get_clocks(struct platform_device *pdev)
hdmi.sys_clk = clk;
- clk = clk_get(&pdev->dev, "dss_48mhz_clk");
- if (IS_ERR(clk)) {
- DSSERR("can't get hdmi_clk\n");
- clk_put(hdmi.sys_clk);
- return PTR_ERR(clk);
- }
-
- hdmi.hdmi_clk = clk;
-
return 0;
}
@@ -1714,8 +740,6 @@ static void hdmi_put_clocks(void)
{
if (hdmi.sys_clk)
clk_put(hdmi.sys_clk);
- if (hdmi.hdmi_clk)
- clk_put(hdmi.hdmi_clk);
}
/* HDMI HW IP initialisation */
@@ -1736,20 +760,26 @@ static int omapdss_hdmihw_probe(struct platform_device *pdev)
}
/* Base address taken from platform */
- hdmi.base_wp = ioremap(hdmi_mem->start, resource_size(hdmi_mem));
- if (!hdmi.base_wp) {
+ hdmi.ip_data.base_wp = ioremap(hdmi_mem->start,
+ resource_size(hdmi_mem));
+ if (!hdmi.ip_data.base_wp) {
DSSERR("can't ioremap WP\n");
return -ENOMEM;
}
r = hdmi_get_clocks(pdev);
if (r) {
- iounmap(hdmi.base_wp);
+ iounmap(hdmi.ip_data.base_wp);
return r;
}
pm_runtime_enable(&pdev->dev);
+ hdmi.ip_data.core_sys_offset = HDMI_CORE_SYS;
+ hdmi.ip_data.core_av_offset = HDMI_CORE_AV;
+ hdmi.ip_data.pll_offset = HDMI_PLLCTRL;
+ hdmi.ip_data.phy_offset = HDMI_PHY;
+
hdmi_panel_init();
#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
@@ -1779,14 +809,13 @@ static int omapdss_hdmihw_remove(struct platform_device *pdev)
hdmi_put_clocks();
- iounmap(hdmi.base_wp);
+ iounmap(hdmi.ip_data.base_wp);
return 0;
}
static int hdmi_runtime_suspend(struct device *dev)
{
- clk_disable(hdmi.hdmi_clk);
clk_disable(hdmi.sys_clk);
dispc_runtime_put();
@@ -1809,7 +838,6 @@ static int hdmi_runtime_resume(struct device *dev)
clk_enable(hdmi.sys_clk);
- clk_enable(hdmi.hdmi_clk);
return 0;
diff --git a/drivers/video/omap2/dss/hdmi_omap4_panel.c b/drivers/video/omap2/dss/hdmi_panel.c
index 7d4f2bd7c50..533d5dc634d 100644
--- a/drivers/video/omap2/dss/hdmi_omap4_panel.c
+++ b/drivers/video/omap2/dss/hdmi_panel.c
@@ -1,5 +1,5 @@
/*
- * hdmi_omap4_panel.c
+ * hdmi_panel.c
*
* HDMI library support functions for TI OMAP4 processors.
*
@@ -25,6 +25,7 @@
#include <linux/mutex.h>
#include <linux/module.h>
#include <video/omapdss.h>
+#include <linux/slab.h>
#include "dss.h"
@@ -40,13 +41,7 @@ static int hdmi_panel_probe(struct omap_dss_device *dssdev)
dssdev->panel.config = OMAP_DSS_LCD_TFT |
OMAP_DSS_LCD_IVS | OMAP_DSS_LCD_IHS;
- /*
- * Initialize the timings to 640 * 480
- * This is only for framebuffer update not for TV timing setting
- * Setting TV timing will be done only on enable
- */
- dssdev->panel.timings.x_res = 640;
- dssdev->panel.timings.y_res = 480;
+ dssdev->panel.timings = (struct omap_video_timings){640, 480, 25175, 96, 16, 48, 2 , 11, 31};
DSSDBG("hdmi_panel_probe x_res= %d y_res = %d\n",
dssdev->panel.timings.x_res,
@@ -161,12 +156,7 @@ static void hdmi_set_timings(struct omap_dss_device *dssdev,
mutex_lock(&hdmi.hdmi_lock);
dssdev->panel.timings = *timings;
-
- if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) {
- /* turn the hdmi off and on to get new timings to use */
- omapdss_hdmi_display_disable(dssdev);
- omapdss_hdmi_display_set_timing(dssdev);
- }
+ omapdss_hdmi_display_set_timing(dssdev);
mutex_unlock(&hdmi.hdmi_lock);
}
@@ -181,12 +171,54 @@ static int hdmi_check_timings(struct omap_dss_device *dssdev,
mutex_lock(&hdmi.hdmi_lock);
r = omapdss_hdmi_display_check_timing(dssdev, timings);
- if (r) {
- DSSERR("Timing cannot be applied\n");
- goto err;
+
+ mutex_unlock(&hdmi.hdmi_lock);
+ return r;
+}
+
+static int hdmi_read_edid(struct omap_dss_device *dssdev, u8 *buf, int len)
+{
+ int r;
+
+ mutex_lock(&hdmi.hdmi_lock);
+
+ if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) {
+ r = omapdss_hdmi_display_enable(dssdev);
+ if (r)
+ goto err;
+ }
+
+ r = omapdss_hdmi_read_edid(buf, len);
+
+ if (dssdev->state == OMAP_DSS_DISPLAY_DISABLED ||
+ dssdev->state == OMAP_DSS_DISPLAY_SUSPENDED)
+ omapdss_hdmi_display_disable(dssdev);
+err:
+ mutex_unlock(&hdmi.hdmi_lock);
+
+ return r;
+}
+
+static bool hdmi_detect(struct omap_dss_device *dssdev)
+{
+ int r;
+
+ mutex_lock(&hdmi.hdmi_lock);
+
+ if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) {
+ r = omapdss_hdmi_display_enable(dssdev);
+ if (r)
+ goto err;
}
+
+ r = omapdss_hdmi_detect();
+
+ if (dssdev->state == OMAP_DSS_DISPLAY_DISABLED ||
+ dssdev->state == OMAP_DSS_DISPLAY_SUSPENDED)
+ omapdss_hdmi_display_disable(dssdev);
err:
mutex_unlock(&hdmi.hdmi_lock);
+
return r;
}
@@ -200,6 +232,8 @@ static struct omap_dss_driver hdmi_driver = {
.get_timings = hdmi_get_timings,
.set_timings = hdmi_set_timings,
.check_timings = hdmi_check_timings,
+ .read_edid = hdmi_read_edid,
+ .detect = hdmi_detect,
.driver = {
.name = "hdmi_panel",
.owner = THIS_MODULE,
diff --git a/drivers/video/omap2/dss/manager.c b/drivers/video/omap2/dss/manager.c
index 13d72d5c714..6e63845cc7d 100644
--- a/drivers/video/omap2/dss/manager.c
+++ b/drivers/video/omap2/dss/manager.c
@@ -106,7 +106,7 @@ put_device:
static ssize_t manager_default_color_show(struct omap_overlay_manager *mgr,
char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%d\n", mgr->info.default_color);
+ return snprintf(buf, PAGE_SIZE, "%#x\n", mgr->info.default_color);
}
static ssize_t manager_default_color_store(struct omap_overlay_manager *mgr,
@@ -116,8 +116,9 @@ static ssize_t manager_default_color_store(struct omap_overlay_manager *mgr,
u32 color;
int r;
- if (sscanf(buf, "%d", &color) != 1)
- return -EINVAL;
+ r = kstrtouint(buf, 0, &color);
+ if (r)
+ return r;
mgr->get_manager_info(mgr, &info);
@@ -184,7 +185,7 @@ static ssize_t manager_trans_key_type_store(struct omap_overlay_manager *mgr,
static ssize_t manager_trans_key_value_show(struct omap_overlay_manager *mgr,
char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%d\n", mgr->info.trans_key);
+ return snprintf(buf, PAGE_SIZE, "%#x\n", mgr->info.trans_key);
}
static ssize_t manager_trans_key_value_store(struct omap_overlay_manager *mgr,
@@ -194,8 +195,9 @@ static ssize_t manager_trans_key_value_store(struct omap_overlay_manager *mgr,
u32 key_value;
int r;
- if (sscanf(buf, "%d", &key_value) != 1)
- return -EINVAL;
+ r = kstrtouint(buf, 0, &key_value);
+ if (r)
+ return r;
mgr->get_manager_info(mgr, &info);
@@ -222,15 +224,16 @@ static ssize_t manager_trans_key_enabled_store(struct omap_overlay_manager *mgr,
const char *buf, size_t size)
{
struct omap_overlay_manager_info info;
- int enable;
+ bool enable;
int r;
- if (sscanf(buf, "%d", &enable) != 1)
- return -EINVAL;
+ r = strtobool(buf, &enable);
+ if (r)
+ return r;
mgr->get_manager_info(mgr, &info);
- info.trans_enabled = enable ? true : false;
+ info.trans_enabled = enable;
r = mgr->set_manager_info(mgr, &info);
if (r)
@@ -246,7 +249,10 @@ static ssize_t manager_trans_key_enabled_store(struct omap_overlay_manager *mgr,
static ssize_t manager_alpha_blending_enabled_show(
struct omap_overlay_manager *mgr, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%d\n", mgr->info.alpha_enabled);
+ WARN_ON(!dss_has_feature(FEAT_ALPHA_FIXED_ZORDER));
+
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ mgr->info.partial_alpha_enabled);
}
static ssize_t manager_alpha_blending_enabled_store(
@@ -254,15 +260,18 @@ static ssize_t manager_alpha_blending_enabled_store(
const char *buf, size_t size)
{
struct omap_overlay_manager_info info;
- int enable;
+ bool enable;
int r;
- if (sscanf(buf, "%d", &enable) != 1)
- return -EINVAL;
+ WARN_ON(!dss_has_feature(FEAT_ALPHA_FIXED_ZORDER));
+
+ r = strtobool(buf, &enable);
+ if (r)
+ return r;
mgr->get_manager_info(mgr, &info);
- info.alpha_enabled = enable ? true : false;
+ info.partial_alpha_enabled = enable;
r = mgr->set_manager_info(mgr, &info);
if (r)
@@ -285,19 +294,16 @@ static ssize_t manager_cpr_enable_store(struct omap_overlay_manager *mgr,
const char *buf, size_t size)
{
struct omap_overlay_manager_info info;
- int v;
int r;
bool enable;
if (!dss_has_feature(FEAT_CPR))
return -ENODEV;
- r = kstrtoint(buf, 0, &v);
+ r = strtobool(buf, &enable);
if (r)
return r;
- enable = !!v;
-
mgr->get_manager_info(mgr, &info);
if (info.cpr_enable == enable)
@@ -586,6 +592,13 @@ static int omap_dss_unset_device(struct omap_overlay_manager *mgr)
return -EINVAL;
}
+ /*
+ * Don't allow currently enabled displays to have the overlay manager
+ * pulled out from underneath them
+ */
+ if (mgr->device->state != OMAP_DSS_DISPLAY_DISABLED)
+ return -EINVAL;
+
mgr->device->manager = NULL;
mgr->device = NULL;
mgr->device_changed = true;
@@ -801,7 +814,7 @@ static int configure_overlay(enum omap_plane plane)
{
struct overlay_cache_data *c;
struct manager_cache_data *mc;
- struct omap_overlay_info *oi;
+ struct omap_overlay_info *oi, new_oi;
struct omap_overlay_manager_info *mi;
u16 outw, outh;
u16 x, y, w, h;
@@ -815,7 +828,7 @@ static int configure_overlay(enum omap_plane plane)
oi = &c->info;
if (!c->enabled) {
- dispc_enable_plane(plane, 0);
+ dispc_ovl_enable(plane, 0);
return 0;
}
@@ -843,7 +856,7 @@ static int configure_overlay(enum omap_plane plane)
/* If the overlay is outside the update region, disable it */
if (!rectangle_intersects(mc->x, mc->y, mc->w, mc->h,
x, y, outw, outh)) {
- dispc_enable_plane(plane, 0);
+ dispc_ovl_enable(plane, 0);
return 0;
}
@@ -921,34 +934,27 @@ static int configure_overlay(enum omap_plane plane)
}
}
- r = dispc_setup_plane(plane,
- paddr,
- oi->screen_width,
- x, y,
- w, h,
- outw, outh,
- oi->color_mode,
- c->ilace,
- oi->rotation_type,
- oi->rotation,
- oi->mirror,
- oi->global_alpha,
- oi->pre_mult_alpha,
- c->channel,
- oi->p_uv_addr);
+ new_oi = *oi;
+
+ /* update new_oi members which could have been possibly updated */
+ new_oi.pos_x = x;
+ new_oi.pos_y = y;
+ new_oi.width = w;
+ new_oi.height = h;
+ new_oi.out_width = outw;
+ new_oi.out_height = outh;
+ new_oi.paddr = paddr;
+ r = dispc_ovl_setup(plane, &new_oi, c->ilace, c->channel,
+ c->replication, c->fifo_low, c->fifo_high);
if (r) {
/* this shouldn't happen */
- DSSERR("dispc_setup_plane failed for ovl %d\n", plane);
- dispc_enable_plane(plane, 0);
+ DSSERR("dispc_ovl_setup failed for ovl %d\n", plane);
+ dispc_ovl_enable(plane, 0);
return r;
}
- dispc_enable_replication(plane, c->replication);
-
- dispc_set_fifo_threshold(plane, c->fifo_low, c->fifo_high);
-
- dispc_enable_plane(plane, 1);
+ dispc_ovl_enable(plane, 1);
return 0;
}
@@ -962,13 +968,13 @@ static void configure_manager(enum omap_channel channel)
/* picking info from the cache */
mi = &dss_cache.manager_cache[channel].info;
- dispc_set_default_color(channel, mi->default_color);
- dispc_set_trans_key(channel, mi->trans_key_type, mi->trans_key);
- dispc_enable_trans_key(channel, mi->trans_enabled);
- dispc_enable_alpha_blending(channel, mi->alpha_enabled);
+ dispc_mgr_set_default_color(channel, mi->default_color);
+ dispc_mgr_set_trans_key(channel, mi->trans_key_type, mi->trans_key);
+ dispc_mgr_enable_trans_key(channel, mi->trans_enabled);
+ dispc_mgr_enable_alpha_fixed_zorder(channel, mi->partial_alpha_enabled);
if (dss_has_feature(FEAT_CPR)) {
- dispc_enable_cpr(channel, mi->cpr_enable);
- dispc_set_cpr_coef(channel, &mi->cpr_coefs);
+ dispc_mgr_enable_cpr(channel, mi->cpr_enable);
+ dispc_mgr_set_cpr_coef(channel, &mi->cpr_coefs);
}
}
@@ -992,7 +998,7 @@ static int configure_dispc(void)
busy = false;
for (i = 0; i < num_mgrs; i++) {
- mgr_busy[i] = dispc_go_busy(i);
+ mgr_busy[i] = dispc_mgr_go_busy(i);
mgr_go[i] = false;
}
@@ -1053,7 +1059,7 @@ static int configure_dispc(void)
* always be turned off after frame, and new settings will be
* taken in to use at next update */
if (!mc->manual_update)
- dispc_go(i);
+ dispc_mgr_go(i);
}
if (busy)
@@ -1258,7 +1264,7 @@ static void dss_apply_irq_handler(void *data, u32 mask)
u32 irq_mask;
for (i = 0; i < num_mgrs; i++)
- mgr_busy[i] = dispc_go_busy(i);
+ mgr_busy[i] = dispc_mgr_go_busy(i);
spin_lock(&dss_cache.lock);
@@ -1280,7 +1286,7 @@ static void dss_apply_irq_handler(void *data, u32 mask)
/* re-read busy flags */
for (i = 0; i < num_mgrs; i++)
- mgr_busy[i] = dispc_go_busy(i);
+ mgr_busy[i] = dispc_mgr_go_busy(i);
/* keep running as long as there are busy managers, so that
* we can collect overlay-applied information */
@@ -1326,11 +1332,13 @@ static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
ovl = omap_dss_get_overlay(i);
- if (!(ovl->caps & OMAP_DSS_OVL_CAP_DISPC))
- continue;
-
oc = &dss_cache.overlay_cache[ovl->id];
+ if (ovl->manager_changed) {
+ ovl->manager_changed = false;
+ ovl->info_dirty = true;
+ }
+
if (!overlay_enabled(ovl)) {
if (oc->enabled) {
oc->enabled = false;
@@ -1375,9 +1383,6 @@ static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
list_for_each_entry(mgr, &manager_list, list) {
struct omap_dss_device *dssdev;
- if (!(mgr->caps & OMAP_DSS_OVL_MGR_CAP_DISPC))
- continue;
-
mc = &dss_cache.manager_cache[mgr->id];
if (mgr->device_changed) {
@@ -1423,9 +1428,6 @@ static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
ovl = omap_dss_get_overlay(i);
- if (!(ovl->caps & OMAP_DSS_OVL_CAP_DISPC))
- continue;
-
oc = &dss_cache.overlay_cache[ovl->id];
if (!oc->enabled)
@@ -1433,11 +1435,11 @@ static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
dssdev = ovl->manager->device;
- size = dispc_get_plane_fifo_size(ovl->id);
+ size = dispc_ovl_get_fifo_size(ovl->id);
if (use_fifomerge)
size *= 3;
- burst_size = dispc_get_burst_size(ovl->id);
+ burst_size = dispc_ovl_get_burst_size(ovl->id);
switch (dssdev->type) {
case OMAP_DISPLAY_TYPE_DPI:
@@ -1484,12 +1486,17 @@ static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
static int dss_check_manager(struct omap_overlay_manager *mgr)
{
- /* OMAP supports only graphics source transparency color key and alpha
- * blending simultaneously. See TRM 15.4.2.4.2.2 Alpha Mode */
-
- if (mgr->info.alpha_enabled && mgr->info.trans_enabled &&
- mgr->info.trans_key_type != OMAP_DSS_COLOR_KEY_GFX_DST)
- return -EINVAL;
+ if (dss_has_feature(FEAT_ALPHA_FIXED_ZORDER)) {
+ /*
+ * OMAP3 supports only graphics source transparency color key
+ * and alpha blending simultaneously. See TRM 15.4.2.4.2.2
+ * Alpha Mode
+ */
+ if (mgr->info.partial_alpha_enabled && mgr->info.trans_enabled
+ && mgr->info.trans_key_type !=
+ OMAP_DSS_COLOR_KEY_GFX_DST)
+ return -EINVAL;
+ }
return 0;
}
@@ -1522,13 +1529,13 @@ static void omap_dss_mgr_get_info(struct omap_overlay_manager *mgr,
static int dss_mgr_enable(struct omap_overlay_manager *mgr)
{
- dispc_enable_channel(mgr->id, 1);
+ dispc_mgr_enable(mgr->id, 1);
return 0;
}
static int dss_mgr_disable(struct omap_overlay_manager *mgr)
{
- dispc_enable_channel(mgr->id, 0);
+ dispc_mgr_enable(mgr->id, 0);
return 0;
}
@@ -1580,7 +1587,7 @@ int dss_init_overlay_managers(struct platform_device *pdev)
mgr->enable = &dss_mgr_enable;
mgr->disable = &dss_mgr_disable;
- mgr->caps = OMAP_DSS_OVL_MGR_CAP_DISPC;
+ mgr->caps = 0;
mgr->supported_displays =
dss_feat_get_supported_displays(mgr->id);
@@ -1597,42 +1604,6 @@ int dss_init_overlay_managers(struct platform_device *pdev)
}
}
-#ifdef L4_EXAMPLE
- {
- int omap_dss_mgr_apply_l4(struct omap_overlay_manager *mgr)
- {
- DSSDBG("omap_dss_mgr_apply_l4(%s)\n", mgr->name);
-
- return 0;
- }
-
- struct omap_overlay_manager *mgr;
- mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
-
- BUG_ON(mgr == NULL);
-
- mgr->name = "l4";
- mgr->supported_displays =
- OMAP_DISPLAY_TYPE_DBI | OMAP_DISPLAY_TYPE_DSI;
-
- mgr->set_device = &omap_dss_set_device;
- mgr->unset_device = &omap_dss_unset_device;
- mgr->apply = &omap_dss_mgr_apply_l4;
- mgr->set_manager_info = &omap_dss_mgr_set_info;
- mgr->get_manager_info = &omap_dss_mgr_get_info;
-
- dss_overlay_setup_l4_manager(mgr);
-
- omap_dss_add_overlay_manager(mgr);
-
- r = kobject_init_and_add(&mgr->kobj, &manager_ktype,
- &pdev->dev.kobj, "managerl4");
-
- if (r)
- DSSERR("failed to create sysfs file\n");
- }
-#endif
-
return 0;
}
diff --git a/drivers/video/omap2/dss/overlay.c b/drivers/video/omap2/dss/overlay.c
index c84380c53c3..ab8e40e4875 100644
--- a/drivers/video/omap2/dss/overlay.c
+++ b/drivers/video/omap2/dss/overlay.c
@@ -211,16 +211,17 @@ static ssize_t overlay_enabled_show(struct omap_overlay *ovl, char *buf)
static ssize_t overlay_enabled_store(struct omap_overlay *ovl, const char *buf,
size_t size)
{
- int r, enable;
+ int r;
+ bool enable;
struct omap_overlay_info info;
ovl->get_overlay_info(ovl, &info);
- r = kstrtoint(buf, 0, &enable);
+ r = strtobool(buf, &enable);
if (r)
return r;
- info.enabled = !!enable;
+ info.enabled = enable;
r = ovl->set_overlay_info(ovl, &info);
if (r)
@@ -248,7 +249,7 @@ static ssize_t overlay_global_alpha_store(struct omap_overlay *ovl,
u8 alpha;
struct omap_overlay_info info;
- if (!dss_has_feature(FEAT_GLOBAL_ALPHA))
+ if ((ovl->caps & OMAP_DSS_OVL_CAP_GLOBAL_ALPHA) == 0)
return -ENODEV;
r = kstrtou8(buf, 0, &alpha);
@@ -257,14 +258,7 @@ static ssize_t overlay_global_alpha_store(struct omap_overlay *ovl,
ovl->get_overlay_info(ovl, &info);
- /* Video1 plane does not support global alpha
- * to always make it 255 completely opaque
- */
- if (!dss_has_feature(FEAT_GLOBAL_ALPHA_VID1) &&
- ovl->id == OMAP_DSS_VIDEO1)
- info.global_alpha = 255;
- else
- info.global_alpha = alpha;
+ info.global_alpha = alpha;
r = ovl->set_overlay_info(ovl, &info);
if (r)
@@ -293,20 +287,52 @@ static ssize_t overlay_pre_mult_alpha_store(struct omap_overlay *ovl,
u8 alpha;
struct omap_overlay_info info;
+ if ((ovl->caps & OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA) == 0)
+ return -ENODEV;
+
r = kstrtou8(buf, 0, &alpha);
if (r)
return r;
ovl->get_overlay_info(ovl, &info);
- /* only GFX and Video2 plane support pre alpha multiplied
- * set zero for Video1 plane
- */
- if (!dss_has_feature(FEAT_GLOBAL_ALPHA_VID1) &&
- ovl->id == OMAP_DSS_VIDEO1)
- info.pre_mult_alpha = 0;
- else
- info.pre_mult_alpha = alpha;
+ info.pre_mult_alpha = alpha;
+
+ r = ovl->set_overlay_info(ovl, &info);
+ if (r)
+ return r;
+
+ if (ovl->manager) {
+ r = ovl->manager->apply(ovl->manager);
+ if (r)
+ return r;
+ }
+
+ return size;
+}
+
+static ssize_t overlay_zorder_show(struct omap_overlay *ovl, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", ovl->info.zorder);
+}
+
+static ssize_t overlay_zorder_store(struct omap_overlay *ovl,
+ const char *buf, size_t size)
+{
+ int r;
+ u8 zorder;
+ struct omap_overlay_info info;
+
+ if ((ovl->caps & OMAP_DSS_OVL_CAP_ZORDER) == 0)
+ return -ENODEV;
+
+ r = kstrtou8(buf, 0, &zorder);
+ if (r)
+ return r;
+
+ ovl->get_overlay_info(ovl, &info);
+
+ info.zorder = zorder;
r = ovl->set_overlay_info(ovl, &info);
if (r)
@@ -347,6 +373,8 @@ static OVERLAY_ATTR(global_alpha, S_IRUGO|S_IWUSR,
static OVERLAY_ATTR(pre_mult_alpha, S_IRUGO|S_IWUSR,
overlay_pre_mult_alpha_show,
overlay_pre_mult_alpha_store);
+static OVERLAY_ATTR(zorder, S_IRUGO|S_IWUSR,
+ overlay_zorder_show, overlay_zorder_store);
static struct attribute *overlay_sysfs_attrs[] = {
&overlay_attr_name.attr,
@@ -358,6 +386,7 @@ static struct attribute *overlay_sysfs_attrs[] = {
&overlay_attr_enabled.attr,
&overlay_attr_global_alpha.attr,
&overlay_attr_pre_mult_alpha.attr,
+ &overlay_attr_zorder.attr,
NULL
};
@@ -407,6 +436,7 @@ int dss_check_overlay(struct omap_overlay *ovl, struct omap_dss_device *dssdev)
struct omap_overlay_info *info;
u16 outw, outh;
u16 dw, dh;
+ int i;
if (!dssdev)
return 0;
@@ -462,6 +492,31 @@ int dss_check_overlay(struct omap_overlay *ovl, struct omap_dss_device *dssdev)
return -EINVAL;
}
+ if (ovl->caps & OMAP_DSS_OVL_CAP_ZORDER) {
+ if (info->zorder < 0 || info->zorder > 3) {
+ DSSERR("zorder out of range: %d\n",
+ info->zorder);
+ return -EINVAL;
+ }
+ /*
+ * Check that zorder doesn't match with zorder of any other
+ * overlay which is enabled and is also connected to the same
+ * manager
+ */
+ for (i = 0; i < omap_dss_get_num_overlays(); i++) {
+ struct omap_overlay *tmp_ovl = omap_dss_get_overlay(i);
+
+ if (tmp_ovl->id != ovl->id &&
+ tmp_ovl->manager == ovl->manager &&
+ tmp_ovl->info.enabled == true &&
+ tmp_ovl->info.zorder == info->zorder) {
+ DSSERR("%s and %s have same zorder: %d\n",
+ ovl->name, tmp_ovl->name, info->zorder);
+ return -EINVAL;
+ }
+ }
+ }
+
return 0;
}
@@ -516,6 +571,7 @@ static int omap_dss_set_manager(struct omap_overlay *ovl,
}
ovl->manager = mgr;
+ ovl->manager_changed = true;
/* XXX: When there is an overlay on a DSI manual update display, and
* the overlay is first disabled, then moved to tv, and enabled, we
@@ -529,15 +585,12 @@ static int omap_dss_set_manager(struct omap_overlay *ovl,
* Userspace workaround for this is to update the LCD after disabling
* the overlay, but before moving the overlay to TV.
*/
- dispc_set_channel_out(ovl->id, mgr->id);
return 0;
}
static int omap_dss_unset_manager(struct omap_overlay *ovl)
{
- int r;
-
if (!ovl->manager) {
DSSERR("failed to detach overlay: manager not set\n");
return -EINVAL;
@@ -548,11 +601,8 @@ static int omap_dss_unset_manager(struct omap_overlay *ovl)
return -EINVAL;
}
- r = ovl->wait_for_go(ovl);
- if (r)
- return r;
-
ovl->manager = NULL;
+ ovl->manager_changed = true;
return 0;
}
@@ -618,22 +668,29 @@ void dss_init_overlays(struct platform_device *pdev)
case 0:
ovl->name = "gfx";
ovl->id = OMAP_DSS_GFX;
- ovl->caps = OMAP_DSS_OVL_CAP_DISPC;
ovl->info.global_alpha = 255;
+ ovl->info.zorder = 0;
break;
case 1:
ovl->name = "vid1";
ovl->id = OMAP_DSS_VIDEO1;
- ovl->caps = OMAP_DSS_OVL_CAP_SCALE |
- OMAP_DSS_OVL_CAP_DISPC;
ovl->info.global_alpha = 255;
+ ovl->info.zorder =
+ dss_has_feature(FEAT_ALPHA_FREE_ZORDER) ? 3 : 0;
break;
case 2:
ovl->name = "vid2";
ovl->id = OMAP_DSS_VIDEO2;
- ovl->caps = OMAP_DSS_OVL_CAP_SCALE |
- OMAP_DSS_OVL_CAP_DISPC;
ovl->info.global_alpha = 255;
+ ovl->info.zorder =
+ dss_has_feature(FEAT_ALPHA_FREE_ZORDER) ? 2 : 0;
+ break;
+ case 3:
+ ovl->name = "vid3";
+ ovl->id = OMAP_DSS_VIDEO3;
+ ovl->info.global_alpha = 255;
+ ovl->info.zorder =
+ dss_has_feature(FEAT_ALPHA_FREE_ZORDER) ? 1 : 0;
break;
}
@@ -643,6 +700,7 @@ void dss_init_overlays(struct platform_device *pdev)
ovl->get_overlay_info = &dss_ovl_get_overlay_info;
ovl->wait_for_go = &dss_ovl_wait_for_go;
+ ovl->caps = dss_feat_get_overlay_caps(ovl->id);
ovl->supported_modes =
dss_feat_get_supported_color_modes(ovl->id);
diff --git a/drivers/video/omap2/dss/rfbi.c b/drivers/video/omap2/dss/rfbi.c
index 39f4c597026..1130c608a56 100644
--- a/drivers/video/omap2/dss/rfbi.c
+++ b/drivers/video/omap2/dss/rfbi.c
@@ -24,6 +24,7 @@
#include <linux/kernel.h>
#include <linux/dma-mapping.h>
+#include <linux/export.h>
#include <linux/vmalloc.h>
#include <linux/clk.h>
#include <linux/io.h>
@@ -309,9 +310,9 @@ static void rfbi_transfer_area(struct omap_dss_device *dssdev, u16 width,
DSSDBG("rfbi_transfer_area %dx%d\n", width, height);
- dispc_set_lcd_size(dssdev->manager->id, width, height);
+ dispc_mgr_set_lcd_size(dssdev->manager->id, width, height);
- dispc_enable_channel(dssdev->manager->id, true);
+ dispc_mgr_enable(dssdev->manager->id, true);
rfbi.framedone_callback = callback;
rfbi.framedone_callback_data = data;
@@ -783,10 +784,8 @@ int omap_rfbi_prepare_update(struct omap_dss_device *dssdev,
if (*w == 0 || *h == 0)
return -EINVAL;
- if (dssdev->manager->caps & OMAP_DSS_OVL_MGR_CAP_DISPC) {
- dss_setup_partial_planes(dssdev, x, y, w, h, true);
- dispc_set_lcd_size(dssdev->manager->id, *w, *h);
- }
+ dss_setup_partial_planes(dssdev, x, y, w, h, true);
+ dispc_mgr_set_lcd_size(dssdev->manager->id, *w, *h);
return 0;
}
@@ -796,22 +795,7 @@ int omap_rfbi_update(struct omap_dss_device *dssdev,
u16 x, u16 y, u16 w, u16 h,
void (*callback)(void *), void *data)
{
- if (dssdev->manager->caps & OMAP_DSS_OVL_MGR_CAP_DISPC) {
- rfbi_transfer_area(dssdev, w, h, callback, data);
- } else {
- struct omap_overlay *ovl;
- void __iomem *addr;
- int scr_width;
-
- ovl = dssdev->manager->overlays[0];
- scr_width = ovl->info.screen_width;
- addr = ovl->info.vaddr;
-
- omap_rfbi_write_pixels(addr, scr_width, x, y, w, h);
-
- callback(data);
- }
-
+ rfbi_transfer_area(dssdev, w, h, callback, data);
return 0;
}
EXPORT_SYMBOL(omap_rfbi_update);
@@ -860,6 +844,11 @@ int omapdss_rfbi_display_enable(struct omap_dss_device *dssdev)
{
int r;
+ if (dssdev->manager == NULL) {
+ DSSERR("failed to enable display: no manager\n");
+ return -ENODEV;
+ }
+
r = rfbi_runtime_get();
if (r)
return r;
@@ -877,13 +866,13 @@ int omapdss_rfbi_display_enable(struct omap_dss_device *dssdev)
goto err1;
}
- dispc_set_lcd_display_type(dssdev->manager->id,
+ dispc_mgr_set_lcd_display_type(dssdev->manager->id,
OMAP_DSS_LCD_DISPLAY_TFT);
- dispc_set_parallel_interface_mode(dssdev->manager->id,
- OMAP_DSS_PARALLELMODE_RFBI);
+ dispc_mgr_set_io_pad_mode(DSS_IO_PAD_MODE_RFBI);
+ dispc_mgr_enable_stallmode(dssdev->manager->id, true);
- dispc_set_tft_data_lines(dssdev->manager->id, dssdev->ctrl.pixel_size);
+ dispc_mgr_set_tft_data_lines(dssdev->manager->id, dssdev->ctrl.pixel_size);
rfbi_configure(dssdev->phy.rfbi.channel,
dssdev->ctrl.pixel_size,
@@ -952,10 +941,7 @@ static int omap_rfbihw_probe(struct platform_device *pdev)
msleep(10);
- if (cpu_is_omap24xx() || cpu_is_omap34xx() || cpu_is_omap3630())
- clk = dss_get_ick();
- else
- clk = clk_get(&pdev->dev, "ick");
+ clk = clk_get(&pdev->dev, "ick");
if (IS_ERR(clk)) {
DSSERR("can't get ick\n");
r = PTR_ERR(clk);
diff --git a/drivers/video/omap2/dss/sdi.c b/drivers/video/omap2/dss/sdi.c
index 3a688c871a4..40305ad7841 100644
--- a/drivers/video/omap2/dss/sdi.c
+++ b/drivers/video/omap2/dss/sdi.c
@@ -23,6 +23,7 @@
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/regulator/consumer.h>
+#include <linux/export.h>
#include <video/omapdss.h>
#include "dss.h"
@@ -35,13 +36,13 @@ static struct {
static void sdi_basic_init(struct omap_dss_device *dssdev)
{
- dispc_set_parallel_interface_mode(dssdev->manager->id,
- OMAP_DSS_PARALLELMODE_BYPASS);
+ dispc_mgr_set_io_pad_mode(DSS_IO_PAD_MODE_BYPASS);
+ dispc_mgr_enable_stallmode(dssdev->manager->id, false);
- dispc_set_lcd_display_type(dssdev->manager->id,
+ dispc_mgr_set_lcd_display_type(dssdev->manager->id,
OMAP_DSS_LCD_DISPLAY_TFT);
- dispc_set_tft_data_lines(dssdev->manager->id, 24);
+ dispc_mgr_set_tft_data_lines(dssdev->manager->id, 24);
dispc_lcd_enable_signal_polarity(1);
}
@@ -55,6 +56,11 @@ int omapdss_sdi_display_enable(struct omap_dss_device *dssdev)
unsigned long pck;
int r;
+ if (dssdev->manager == NULL) {
+ DSSERR("failed to enable display: no manager\n");
+ return -ENODEV;
+ }
+
r = omap_dss_start_device(dssdev);
if (r) {
DSSERR("failed to start device\n");
@@ -78,7 +84,7 @@ int omapdss_sdi_display_enable(struct omap_dss_device *dssdev)
/* 15.5.9.1.2 */
dssdev->panel.config |= OMAP_DSS_LCD_RF | OMAP_DSS_LCD_ONOFF;
- dispc_set_pol_freq(dssdev->manager->id, dssdev->panel.config,
+ dispc_mgr_set_pol_freq(dssdev->manager->id, dssdev->panel.config,
dssdev->panel.acbi, dssdev->panel.acb);
r = dss_calc_clock_div(1, t->pixel_clock * 1000,
@@ -101,13 +107,13 @@ int omapdss_sdi_display_enable(struct omap_dss_device *dssdev)
}
- dispc_set_lcd_timings(dssdev->manager->id, t);
+ dispc_mgr_set_lcd_timings(dssdev->manager->id, t);
r = dss_set_clock_div(&dss_cinfo);
if (r)
goto err_set_dss_clock_div;
- r = dispc_set_clock_div(dssdev->manager->id, &dispc_cinfo);
+ r = dispc_mgr_set_clock_div(dssdev->manager->id, &dispc_cinfo);
if (r)
goto err_set_dispc_clock_div;
diff --git a/drivers/video/omap2/dss/ti_hdmi.h b/drivers/video/omap2/dss/ti_hdmi.h
new file mode 100644
index 00000000000..2c3443dabb1
--- /dev/null
+++ b/drivers/video/omap2/dss/ti_hdmi.h
@@ -0,0 +1,138 @@
+/*
+ * ti_hdmi.h
+ *
+ * HDMI driver definition for TI OMAP4, DM81xx, DM38xx Processor.
+ *
+ * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _TI_HDMI_H
+#define _TI_HDMI_H
+
+struct hdmi_ip_data;
+
+enum hdmi_pll_pwr {
+ HDMI_PLLPWRCMD_ALLOFF = 0,
+ HDMI_PLLPWRCMD_PLLONLY = 1,
+ HDMI_PLLPWRCMD_BOTHON_ALLCLKS = 2,
+ HDMI_PLLPWRCMD_BOTHON_NOPHYCLK = 3
+};
+
+enum hdmi_core_hdmi_dvi {
+ HDMI_DVI = 0,
+ HDMI_HDMI = 1
+};
+
+enum hdmi_clk_refsel {
+ HDMI_REFSEL_PCLK = 0,
+ HDMI_REFSEL_REF1 = 1,
+ HDMI_REFSEL_REF2 = 2,
+ HDMI_REFSEL_SYSCLK = 3
+};
+
+struct hdmi_video_timings {
+ u16 x_res;
+ u16 y_res;
+ /* Unit: KHz */
+ u32 pixel_clock;
+ u16 hsw;
+ u16 hfp;
+ u16 hbp;
+ u16 vsw;
+ u16 vfp;
+ u16 vbp;
+};
+
+/* HDMI timing structure */
+struct hdmi_timings {
+ struct hdmi_video_timings timings;
+ int vsync_pol;
+ int hsync_pol;
+};
+
+struct hdmi_cm {
+ int code;
+ int mode;
+};
+
+struct hdmi_config {
+ struct hdmi_timings timings;
+ u16 interlace;
+ struct hdmi_cm cm;
+};
+
+/* HDMI PLL structure */
+struct hdmi_pll_info {
+ u16 regn;
+ u16 regm;
+ u32 regmf;
+ u16 regm2;
+ u16 regsd;
+ u16 dcofreq;
+ enum hdmi_clk_refsel refsel;
+};
+
+struct ti_hdmi_ip_ops {
+
+ void (*video_configure)(struct hdmi_ip_data *ip_data);
+
+ int (*phy_enable)(struct hdmi_ip_data *ip_data);
+
+ void (*phy_disable)(struct hdmi_ip_data *ip_data);
+
+ int (*read_edid)(struct hdmi_ip_data *ip_data, u8 *edid, int len);
+
+ bool (*detect)(struct hdmi_ip_data *ip_data);
+
+ int (*pll_enable)(struct hdmi_ip_data *ip_data);
+
+ void (*pll_disable)(struct hdmi_ip_data *ip_data);
+
+ void (*video_enable)(struct hdmi_ip_data *ip_data, bool start);
+
+ void (*dump_wrapper)(struct hdmi_ip_data *ip_data, struct seq_file *s);
+
+ void (*dump_core)(struct hdmi_ip_data *ip_data, struct seq_file *s);
+
+ void (*dump_pll)(struct hdmi_ip_data *ip_data, struct seq_file *s);
+
+ void (*dump_phy)(struct hdmi_ip_data *ip_data, struct seq_file *s);
+
+};
+
+struct hdmi_ip_data {
+ void __iomem *base_wp; /* HDMI wrapper */
+ unsigned long core_sys_offset;
+ unsigned long core_av_offset;
+ unsigned long pll_offset;
+ unsigned long phy_offset;
+ const struct ti_hdmi_ip_ops *ops;
+ struct hdmi_config cfg;
+ struct hdmi_pll_info pll_data;
+};
+int ti_hdmi_4xxx_phy_enable(struct hdmi_ip_data *ip_data);
+void ti_hdmi_4xxx_phy_disable(struct hdmi_ip_data *ip_data);
+int ti_hdmi_4xxx_read_edid(struct hdmi_ip_data *ip_data, u8 *edid, int len);
+bool ti_hdmi_4xxx_detect(struct hdmi_ip_data *ip_data);
+void ti_hdmi_4xxx_wp_video_start(struct hdmi_ip_data *ip_data, bool start);
+int ti_hdmi_4xxx_pll_enable(struct hdmi_ip_data *ip_data);
+void ti_hdmi_4xxx_pll_disable(struct hdmi_ip_data *ip_data);
+void ti_hdmi_4xxx_basic_configure(struct hdmi_ip_data *ip_data);
+void ti_hdmi_4xxx_wp_dump(struct hdmi_ip_data *ip_data, struct seq_file *s);
+void ti_hdmi_4xxx_pll_dump(struct hdmi_ip_data *ip_data, struct seq_file *s);
+void ti_hdmi_4xxx_core_dump(struct hdmi_ip_data *ip_data, struct seq_file *s);
+void ti_hdmi_4xxx_phy_dump(struct hdmi_ip_data *ip_data, struct seq_file *s);
+
+#endif
diff --git a/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c b/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c
new file mode 100644
index 00000000000..e1a6ce518af
--- /dev/null
+++ b/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c
@@ -0,0 +1,1239 @@
+/*
+ * ti_hdmi_4xxx_ip.c
+ *
+ * HDMI TI81xx, TI38xx, TI OMAP4 etc IP driver Library
+ * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com/
+ * Authors: Yong Zhi
+ * Mythri pk <mythripk@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/string.h>
+#include <linux/seq_file.h>
+
+#include "ti_hdmi_4xxx_ip.h"
+#include "dss.h"
+
+static inline void hdmi_write_reg(void __iomem *base_addr,
+ const u16 idx, u32 val)
+{
+ __raw_writel(val, base_addr + idx);
+}
+
+static inline u32 hdmi_read_reg(void __iomem *base_addr,
+ const u16 idx)
+{
+ return __raw_readl(base_addr + idx);
+}
+
+static inline void __iomem *hdmi_wp_base(struct hdmi_ip_data *ip_data)
+{
+ return ip_data->base_wp;
+}
+
+static inline void __iomem *hdmi_phy_base(struct hdmi_ip_data *ip_data)
+{
+ return ip_data->base_wp + ip_data->phy_offset;
+}
+
+static inline void __iomem *hdmi_pll_base(struct hdmi_ip_data *ip_data)
+{
+ return ip_data->base_wp + ip_data->pll_offset;
+}
+
+static inline void __iomem *hdmi_av_base(struct hdmi_ip_data *ip_data)
+{
+ return ip_data->base_wp + ip_data->core_av_offset;
+}
+
+static inline void __iomem *hdmi_core_sys_base(struct hdmi_ip_data *ip_data)
+{
+ return ip_data->base_wp + ip_data->core_sys_offset;
+}
+
+static inline int hdmi_wait_for_bit_change(void __iomem *base_addr,
+ const u16 idx,
+ int b2, int b1, u32 val)
+{
+ u32 t = 0;
+ while (val != REG_GET(base_addr, idx, b2, b1)) {
+ udelay(1);
+ if (t++ > 10000)
+ return !val;
+ }
+ return val;
+}
+
+static int hdmi_pll_init(struct hdmi_ip_data *ip_data)
+{
+ u32 r;
+ void __iomem *pll_base = hdmi_pll_base(ip_data);
+ struct hdmi_pll_info *fmt = &ip_data->pll_data;
+
+ /* PLL start always use manual mode */
+ REG_FLD_MOD(pll_base, PLLCTRL_PLL_CONTROL, 0x0, 0, 0);
+
+ r = hdmi_read_reg(pll_base, PLLCTRL_CFG1);
+ r = FLD_MOD(r, fmt->regm, 20, 9); /* CFG1_PLL_REGM */
+ r = FLD_MOD(r, fmt->regn - 1, 8, 1); /* CFG1_PLL_REGN */
+
+ hdmi_write_reg(pll_base, PLLCTRL_CFG1, r);
+
+ r = hdmi_read_reg(pll_base, PLLCTRL_CFG2);
+
+ r = FLD_MOD(r, 0x0, 12, 12); /* PLL_HIGHFREQ divide by 2 */
+ r = FLD_MOD(r, 0x1, 13, 13); /* PLL_REFEN */
+ r = FLD_MOD(r, 0x0, 14, 14); /* PHY_CLKINEN de-assert during locking */
+ r = FLD_MOD(r, fmt->refsel, 22, 21); /* REFSEL */
+
+ if (fmt->dcofreq) {
+ /* divider programming for frequency beyond 1000Mhz */
+ REG_FLD_MOD(pll_base, PLLCTRL_CFG3, fmt->regsd, 17, 10);
+ r = FLD_MOD(r, 0x4, 3, 1); /* 1000MHz and 2000MHz */
+ } else {
+ r = FLD_MOD(r, 0x2, 3, 1); /* 500MHz and 1000MHz */
+ }
+
+ hdmi_write_reg(pll_base, PLLCTRL_CFG2, r);
+
+ r = hdmi_read_reg(pll_base, PLLCTRL_CFG4);
+ r = FLD_MOD(r, fmt->regm2, 24, 18);
+ r = FLD_MOD(r, fmt->regmf, 17, 0);
+
+ hdmi_write_reg(pll_base, PLLCTRL_CFG4, r);
+
+ /* go now */
+ REG_FLD_MOD(pll_base, PLLCTRL_PLL_GO, 0x1, 0, 0);
+
+ /* wait for bit change */
+ if (hdmi_wait_for_bit_change(pll_base, PLLCTRL_PLL_GO,
+ 0, 0, 1) != 1) {
+ pr_err("PLL GO bit not set\n");
+ return -ETIMEDOUT;
+ }
+
+ /* Wait till the lock bit is set in PLL status */
+ if (hdmi_wait_for_bit_change(pll_base,
+ PLLCTRL_PLL_STATUS, 1, 1, 1) != 1) {
+ pr_err("cannot lock PLL\n");
+ pr_err("CFG1 0x%x\n",
+ hdmi_read_reg(pll_base, PLLCTRL_CFG1));
+ pr_err("CFG2 0x%x\n",
+ hdmi_read_reg(pll_base, PLLCTRL_CFG2));
+ pr_err("CFG4 0x%x\n",
+ hdmi_read_reg(pll_base, PLLCTRL_CFG4));
+ return -ETIMEDOUT;
+ }
+
+ pr_debug("PLL locked!\n");
+
+ return 0;
+}
+
+/* PHY_PWR_CMD */
+static int hdmi_set_phy_pwr(struct hdmi_ip_data *ip_data, enum hdmi_phy_pwr val)
+{
+ /* Command for power control of HDMI PHY */
+ REG_FLD_MOD(hdmi_wp_base(ip_data), HDMI_WP_PWR_CTRL, val, 7, 6);
+
+ /* Status of the power control of HDMI PHY */
+ if (hdmi_wait_for_bit_change(hdmi_wp_base(ip_data),
+ HDMI_WP_PWR_CTRL, 5, 4, val) != val) {
+ pr_err("Failed to set PHY power mode to %d\n", val);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+/* PLL_PWR_CMD */
+static int hdmi_set_pll_pwr(struct hdmi_ip_data *ip_data, enum hdmi_pll_pwr val)
+{
+ /* Command for power control of HDMI PLL */
+ REG_FLD_MOD(hdmi_wp_base(ip_data), HDMI_WP_PWR_CTRL, val, 3, 2);
+
+ /* wait till PHY_PWR_STATUS is set */
+ if (hdmi_wait_for_bit_change(hdmi_wp_base(ip_data), HDMI_WP_PWR_CTRL,
+ 1, 0, val) != val) {
+ pr_err("Failed to set PLL_PWR_STATUS\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int hdmi_pll_reset(struct hdmi_ip_data *ip_data)
+{
+ /* SYSRESET controlled by power FSM */
+ REG_FLD_MOD(hdmi_pll_base(ip_data), PLLCTRL_PLL_CONTROL, 0x0, 3, 3);
+
+ /* READ 0x0 reset is in progress */
+ if (hdmi_wait_for_bit_change(hdmi_pll_base(ip_data),
+ PLLCTRL_PLL_STATUS, 0, 0, 1) != 1) {
+ pr_err("Failed to sysreset PLL\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+int ti_hdmi_4xxx_pll_enable(struct hdmi_ip_data *ip_data)
+{
+ u16 r = 0;
+
+ r = hdmi_set_pll_pwr(ip_data, HDMI_PLLPWRCMD_ALLOFF);
+ if (r)
+ return r;
+
+ r = hdmi_set_pll_pwr(ip_data, HDMI_PLLPWRCMD_BOTHON_ALLCLKS);
+ if (r)
+ return r;
+
+ r = hdmi_pll_reset(ip_data);
+ if (r)
+ return r;
+
+ r = hdmi_pll_init(ip_data);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+void ti_hdmi_4xxx_pll_disable(struct hdmi_ip_data *ip_data)
+{
+ hdmi_set_pll_pwr(ip_data, HDMI_PLLPWRCMD_ALLOFF);
+}
+
+int ti_hdmi_4xxx_phy_enable(struct hdmi_ip_data *ip_data)
+{
+ u16 r = 0;
+ void __iomem *phy_base = hdmi_phy_base(ip_data);
+
+ r = hdmi_set_phy_pwr(ip_data, HDMI_PHYPWRCMD_LDOON);
+ if (r)
+ return r;
+
+ r = hdmi_set_phy_pwr(ip_data, HDMI_PHYPWRCMD_TXON);
+ if (r)
+ return r;
+
+ /*
+ * Read address 0 in order to get the SCP reset done completed
+ * Dummy access performed to make sure reset is done
+ */
+ hdmi_read_reg(phy_base, HDMI_TXPHY_TX_CTRL);
+
+ /*
+ * Write to phy address 0 to configure the clock
+ * use HFBITCLK write HDMI_TXPHY_TX_CONTROL_FREQOUT field
+ */
+ REG_FLD_MOD(phy_base, HDMI_TXPHY_TX_CTRL, 0x1, 31, 30);
+
+ /* Write to phy address 1 to start HDMI line (TXVALID and TMDSCLKEN) */
+ hdmi_write_reg(phy_base, HDMI_TXPHY_DIGITAL_CTRL, 0xF0000000);
+
+ /* Setup max LDO voltage */
+ REG_FLD_MOD(phy_base, HDMI_TXPHY_POWER_CTRL, 0xB, 3, 0);
+
+ /* Write to phy address 3 to change the polarity control */
+ REG_FLD_MOD(phy_base, HDMI_TXPHY_PAD_CFG_CTRL, 0x1, 27, 27);
+
+ return 0;
+}
+
+void ti_hdmi_4xxx_phy_disable(struct hdmi_ip_data *ip_data)
+{
+ hdmi_set_phy_pwr(ip_data, HDMI_PHYPWRCMD_OFF);
+}
+
+static int hdmi_core_ddc_init(struct hdmi_ip_data *ip_data)
+{
+ void __iomem *base = hdmi_core_sys_base(ip_data);
+
+ /* Turn on CLK for DDC */
+ REG_FLD_MOD(base, HDMI_CORE_AV_DPD, 0x7, 2, 0);
+
+ /* IN_PROG */
+ if (REG_GET(base, HDMI_CORE_DDC_STATUS, 4, 4) == 1) {
+ /* Abort transaction */
+ REG_FLD_MOD(base, HDMI_CORE_DDC_CMD, 0xf, 3, 0);
+ /* IN_PROG */
+ if (hdmi_wait_for_bit_change(base, HDMI_CORE_DDC_STATUS,
+ 4, 4, 0) != 0) {
+ DSSERR("Timeout aborting DDC transaction\n");
+ return -ETIMEDOUT;
+ }
+ }
+
+ /* Clk SCL Devices */
+ REG_FLD_MOD(base, HDMI_CORE_DDC_CMD, 0xA, 3, 0);
+
+ /* HDMI_CORE_DDC_STATUS_IN_PROG */
+ if (hdmi_wait_for_bit_change(base, HDMI_CORE_DDC_STATUS,
+ 4, 4, 0) != 0) {
+ DSSERR("Timeout starting SCL clock\n");
+ return -ETIMEDOUT;
+ }
+
+ /* Clear FIFO */
+ REG_FLD_MOD(base, HDMI_CORE_DDC_CMD, 0x9, 3, 0);
+
+ /* HDMI_CORE_DDC_STATUS_IN_PROG */
+ if (hdmi_wait_for_bit_change(base, HDMI_CORE_DDC_STATUS,
+ 4, 4, 0) != 0) {
+ DSSERR("Timeout clearing DDC fifo\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int hdmi_core_ddc_edid(struct hdmi_ip_data *ip_data,
+ u8 *pedid, int ext)
+{
+ void __iomem *base = hdmi_core_sys_base(ip_data);
+ u32 i;
+ char checksum;
+ u32 offset = 0;
+
+ /* HDMI_CORE_DDC_STATUS_IN_PROG */
+ if (hdmi_wait_for_bit_change(base, HDMI_CORE_DDC_STATUS,
+ 4, 4, 0) != 0) {
+ DSSERR("Timeout waiting DDC to be ready\n");
+ return -ETIMEDOUT;
+ }
+
+ if (ext % 2 != 0)
+ offset = 0x80;
+
+ /* Load Segment Address Register */
+ REG_FLD_MOD(base, HDMI_CORE_DDC_SEGM, ext / 2, 7, 0);
+
+ /* Load Slave Address Register */
+ REG_FLD_MOD(base, HDMI_CORE_DDC_ADDR, 0xA0 >> 1, 7, 1);
+
+ /* Load Offset Address Register */
+ REG_FLD_MOD(base, HDMI_CORE_DDC_OFFSET, offset, 7, 0);
+
+ /* Load Byte Count */
+ REG_FLD_MOD(base, HDMI_CORE_DDC_COUNT1, 0x80, 7, 0);
+ REG_FLD_MOD(base, HDMI_CORE_DDC_COUNT2, 0x0, 1, 0);
+
+ /* Set DDC_CMD */
+ if (ext)
+ REG_FLD_MOD(base, HDMI_CORE_DDC_CMD, 0x4, 3, 0);
+ else
+ REG_FLD_MOD(base, HDMI_CORE_DDC_CMD, 0x2, 3, 0);
+
+ /* HDMI_CORE_DDC_STATUS_BUS_LOW */
+ if (REG_GET(base, HDMI_CORE_DDC_STATUS, 6, 6) == 1) {
+ pr_err("I2C Bus Low?\n");
+ return -EIO;
+ }
+ /* HDMI_CORE_DDC_STATUS_NO_ACK */
+ if (REG_GET(base, HDMI_CORE_DDC_STATUS, 5, 5) == 1) {
+ pr_err("I2C No Ack\n");
+ return -EIO;
+ }
+
+ for (i = 0; i < 0x80; ++i) {
+ int t;
+
+ /* IN_PROG */
+ if (REG_GET(base, HDMI_CORE_DDC_STATUS, 4, 4) == 0) {
+ DSSERR("operation stopped when reading edid\n");
+ return -EIO;
+ }
+
+ t = 0;
+ /* FIFO_EMPTY */
+ while (REG_GET(base, HDMI_CORE_DDC_STATUS, 2, 2) == 1) {
+ if (t++ > 10000) {
+ DSSERR("timeout reading edid\n");
+ return -ETIMEDOUT;
+ }
+ udelay(1);
+ }
+
+ pedid[i] = REG_GET(base, HDMI_CORE_DDC_DATA, 7, 0);
+ }
+
+ checksum = 0;
+ for (i = 0; i < 0x80; ++i)
+ checksum += pedid[i];
+
+ if (checksum != 0) {
+ pr_err("E-EDID checksum failed!!\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int ti_hdmi_4xxx_read_edid(struct hdmi_ip_data *ip_data,
+ u8 *edid, int len)
+{
+ int r, l;
+
+ if (len < 128)
+ return -EINVAL;
+
+ r = hdmi_core_ddc_init(ip_data);
+ if (r)
+ return r;
+
+ r = hdmi_core_ddc_edid(ip_data, edid, 0);
+ if (r)
+ return r;
+
+ l = 128;
+
+ if (len >= 128 * 2 && edid[0x7e] > 0) {
+ r = hdmi_core_ddc_edid(ip_data, edid + 0x80, 1);
+ if (r)
+ return r;
+ l += 128;
+ }
+
+ return l;
+}
+
+bool ti_hdmi_4xxx_detect(struct hdmi_ip_data *ip_data)
+{
+ int r;
+
+ void __iomem *base = hdmi_core_sys_base(ip_data);
+
+ /* HPD */
+ r = REG_GET(base, HDMI_CORE_SYS_SYS_STAT, 1, 1);
+
+ return r == 1;
+}
+
+static void hdmi_core_init(struct hdmi_core_video_config *video_cfg,
+ struct hdmi_core_infoframe_avi *avi_cfg,
+ struct hdmi_core_packet_enable_repeat *repeat_cfg)
+{
+ pr_debug("Enter hdmi_core_init\n");
+
+ /* video core */
+ video_cfg->ip_bus_width = HDMI_INPUT_8BIT;
+ video_cfg->op_dither_truc = HDMI_OUTPUTTRUNCATION_8BIT;
+ video_cfg->deep_color_pkt = HDMI_DEEPCOLORPACKECTDISABLE;
+ video_cfg->pkt_mode = HDMI_PACKETMODERESERVEDVALUE;
+ video_cfg->hdmi_dvi = HDMI_DVI;
+ video_cfg->tclk_sel_clkmult = HDMI_FPLL10IDCK;
+
+ /* info frame */
+ avi_cfg->db1_format = 0;
+ avi_cfg->db1_active_info = 0;
+ avi_cfg->db1_bar_info_dv = 0;
+ avi_cfg->db1_scan_info = 0;
+ avi_cfg->db2_colorimetry = 0;
+ avi_cfg->db2_aspect_ratio = 0;
+ avi_cfg->db2_active_fmt_ar = 0;
+ avi_cfg->db3_itc = 0;
+ avi_cfg->db3_ec = 0;
+ avi_cfg->db3_q_range = 0;
+ avi_cfg->db3_nup_scaling = 0;
+ avi_cfg->db4_videocode = 0;
+ avi_cfg->db5_pixel_repeat = 0;
+ avi_cfg->db6_7_line_eoftop = 0 ;
+ avi_cfg->db8_9_line_sofbottom = 0;
+ avi_cfg->db10_11_pixel_eofleft = 0;
+ avi_cfg->db12_13_pixel_sofright = 0;
+
+ /* packet enable and repeat */
+ repeat_cfg->audio_pkt = 0;
+ repeat_cfg->audio_pkt_repeat = 0;
+ repeat_cfg->avi_infoframe = 0;
+ repeat_cfg->avi_infoframe_repeat = 0;
+ repeat_cfg->gen_cntrl_pkt = 0;
+ repeat_cfg->gen_cntrl_pkt_repeat = 0;
+ repeat_cfg->generic_pkt = 0;
+ repeat_cfg->generic_pkt_repeat = 0;
+}
+
+static void hdmi_core_powerdown_disable(struct hdmi_ip_data *ip_data)
+{
+ pr_debug("Enter hdmi_core_powerdown_disable\n");
+ REG_FLD_MOD(hdmi_core_sys_base(ip_data), HDMI_CORE_CTRL1, 0x0, 0, 0);
+}
+
+static void hdmi_core_swreset_release(struct hdmi_ip_data *ip_data)
+{
+ pr_debug("Enter hdmi_core_swreset_release\n");
+ REG_FLD_MOD(hdmi_core_sys_base(ip_data), HDMI_CORE_SYS_SRST, 0x0, 0, 0);
+}
+
+static void hdmi_core_swreset_assert(struct hdmi_ip_data *ip_data)
+{
+ pr_debug("Enter hdmi_core_swreset_assert\n");
+ REG_FLD_MOD(hdmi_core_sys_base(ip_data), HDMI_CORE_SYS_SRST, 0x1, 0, 0);
+}
+
+/* HDMI_CORE_VIDEO_CONFIG */
+static void hdmi_core_video_config(struct hdmi_ip_data *ip_data,
+ struct hdmi_core_video_config *cfg)
+{
+ u32 r = 0;
+ void __iomem *core_sys_base = hdmi_core_sys_base(ip_data);
+
+ /* sys_ctrl1 default configuration not tunable */
+ r = hdmi_read_reg(core_sys_base, HDMI_CORE_CTRL1);
+ r = FLD_MOD(r, HDMI_CORE_CTRL1_VEN_FOLLOWVSYNC, 5, 5);
+ r = FLD_MOD(r, HDMI_CORE_CTRL1_HEN_FOLLOWHSYNC, 4, 4);
+ r = FLD_MOD(r, HDMI_CORE_CTRL1_BSEL_24BITBUS, 2, 2);
+ r = FLD_MOD(r, HDMI_CORE_CTRL1_EDGE_RISINGEDGE, 1, 1);
+ hdmi_write_reg(core_sys_base, HDMI_CORE_CTRL1, r);
+
+ REG_FLD_MOD(core_sys_base,
+ HDMI_CORE_SYS_VID_ACEN, cfg->ip_bus_width, 7, 6);
+
+ /* Vid_Mode */
+ r = hdmi_read_reg(core_sys_base, HDMI_CORE_SYS_VID_MODE);
+
+ /* dither truncation configuration */
+ if (cfg->op_dither_truc > HDMI_OUTPUTTRUNCATION_12BIT) {
+ r = FLD_MOD(r, cfg->op_dither_truc - 3, 7, 6);
+ r = FLD_MOD(r, 1, 5, 5);
+ } else {
+ r = FLD_MOD(r, cfg->op_dither_truc, 7, 6);
+ r = FLD_MOD(r, 0, 5, 5);
+ }
+ hdmi_write_reg(core_sys_base, HDMI_CORE_SYS_VID_MODE, r);
+
+ /* HDMI_Ctrl */
+ r = hdmi_read_reg(hdmi_av_base(ip_data), HDMI_CORE_AV_HDMI_CTRL);
+ r = FLD_MOD(r, cfg->deep_color_pkt, 6, 6);
+ r = FLD_MOD(r, cfg->pkt_mode, 5, 3);
+ r = FLD_MOD(r, cfg->hdmi_dvi, 0, 0);
+ hdmi_write_reg(hdmi_av_base(ip_data), HDMI_CORE_AV_HDMI_CTRL, r);
+
+ /* TMDS_CTRL */
+ REG_FLD_MOD(core_sys_base,
+ HDMI_CORE_SYS_TMDS_CTRL, cfg->tclk_sel_clkmult, 6, 5);
+}
+
+static void hdmi_core_aux_infoframe_avi_config(struct hdmi_ip_data *ip_data,
+ struct hdmi_core_infoframe_avi info_avi)
+{
+ u32 val;
+ char sum = 0, checksum = 0;
+ void __iomem *av_base = hdmi_av_base(ip_data);
+
+ sum += 0x82 + 0x002 + 0x00D;
+ hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_TYPE, 0x082);
+ hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_VERS, 0x002);
+ hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_LEN, 0x00D);
+
+ val = (info_avi.db1_format << 5) |
+ (info_avi.db1_active_info << 4) |
+ (info_avi.db1_bar_info_dv << 2) |
+ (info_avi.db1_scan_info);
+ hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_DBYTE(0), val);
+ sum += val;
+
+ val = (info_avi.db2_colorimetry << 6) |
+ (info_avi.db2_aspect_ratio << 4) |
+ (info_avi.db2_active_fmt_ar);
+ hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_DBYTE(1), val);
+ sum += val;
+
+ val = (info_avi.db3_itc << 7) |
+ (info_avi.db3_ec << 4) |
+ (info_avi.db3_q_range << 2) |
+ (info_avi.db3_nup_scaling);
+ hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_DBYTE(2), val);
+ sum += val;
+
+ hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_DBYTE(3),
+ info_avi.db4_videocode);
+ sum += info_avi.db4_videocode;
+
+ val = info_avi.db5_pixel_repeat;
+ hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_DBYTE(4), val);
+ sum += val;
+
+ val = info_avi.db6_7_line_eoftop & 0x00FF;
+ hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_DBYTE(5), val);
+ sum += val;
+
+ val = ((info_avi.db6_7_line_eoftop >> 8) & 0x00FF);
+ hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_DBYTE(6), val);
+ sum += val;
+
+ val = info_avi.db8_9_line_sofbottom & 0x00FF;
+ hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_DBYTE(7), val);
+ sum += val;
+
+ val = ((info_avi.db8_9_line_sofbottom >> 8) & 0x00FF);
+ hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_DBYTE(8), val);
+ sum += val;
+
+ val = info_avi.db10_11_pixel_eofleft & 0x00FF;
+ hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_DBYTE(9), val);
+ sum += val;
+
+ val = ((info_avi.db10_11_pixel_eofleft >> 8) & 0x00FF);
+ hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_DBYTE(10), val);
+ sum += val;
+
+ val = info_avi.db12_13_pixel_sofright & 0x00FF;
+ hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_DBYTE(11), val);
+ sum += val;
+
+ val = ((info_avi.db12_13_pixel_sofright >> 8) & 0x00FF);
+ hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_DBYTE(12), val);
+ sum += val;
+
+ checksum = 0x100 - sum;
+ hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_CHSUM, checksum);
+}
+
+static void hdmi_core_av_packet_config(struct hdmi_ip_data *ip_data,
+ struct hdmi_core_packet_enable_repeat repeat_cfg)
+{
+ /* enable/repeat the infoframe */
+ hdmi_write_reg(hdmi_av_base(ip_data), HDMI_CORE_AV_PB_CTRL1,
+ (repeat_cfg.audio_pkt << 5) |
+ (repeat_cfg.audio_pkt_repeat << 4) |
+ (repeat_cfg.avi_infoframe << 1) |
+ (repeat_cfg.avi_infoframe_repeat));
+
+ /* enable/repeat the packet */
+ hdmi_write_reg(hdmi_av_base(ip_data), HDMI_CORE_AV_PB_CTRL2,
+ (repeat_cfg.gen_cntrl_pkt << 3) |
+ (repeat_cfg.gen_cntrl_pkt_repeat << 2) |
+ (repeat_cfg.generic_pkt << 1) |
+ (repeat_cfg.generic_pkt_repeat));
+}
+
+static void hdmi_wp_init(struct omap_video_timings *timings,
+ struct hdmi_video_format *video_fmt,
+ struct hdmi_video_interface *video_int)
+{
+ pr_debug("Enter hdmi_wp_init\n");
+
+ timings->hbp = 0;
+ timings->hfp = 0;
+ timings->hsw = 0;
+ timings->vbp = 0;
+ timings->vfp = 0;
+ timings->vsw = 0;
+
+ video_fmt->packing_mode = HDMI_PACK_10b_RGB_YUV444;
+ video_fmt->y_res = 0;
+ video_fmt->x_res = 0;
+
+ video_int->vsp = 0;
+ video_int->hsp = 0;
+
+ video_int->interlacing = 0;
+ video_int->tm = 0; /* HDMI_TIMING_SLAVE */
+
+}
+
+void ti_hdmi_4xxx_wp_video_start(struct hdmi_ip_data *ip_data, bool start)
+{
+ REG_FLD_MOD(hdmi_wp_base(ip_data), HDMI_WP_VIDEO_CFG, start, 31, 31);
+}
+
+static void hdmi_wp_video_init_format(struct hdmi_video_format *video_fmt,
+ struct omap_video_timings *timings, struct hdmi_config *param)
+{
+ pr_debug("Enter hdmi_wp_video_init_format\n");
+
+ video_fmt->y_res = param->timings.timings.y_res;
+ video_fmt->x_res = param->timings.timings.x_res;
+
+ timings->hbp = param->timings.timings.hbp;
+ timings->hfp = param->timings.timings.hfp;
+ timings->hsw = param->timings.timings.hsw;
+ timings->vbp = param->timings.timings.vbp;
+ timings->vfp = param->timings.timings.vfp;
+ timings->vsw = param->timings.timings.vsw;
+}
+
+static void hdmi_wp_video_config_format(struct hdmi_ip_data *ip_data,
+ struct hdmi_video_format *video_fmt)
+{
+ u32 l = 0;
+
+ REG_FLD_MOD(hdmi_wp_base(ip_data), HDMI_WP_VIDEO_CFG,
+ video_fmt->packing_mode, 10, 8);
+
+ l |= FLD_VAL(video_fmt->y_res, 31, 16);
+ l |= FLD_VAL(video_fmt->x_res, 15, 0);
+ hdmi_write_reg(hdmi_wp_base(ip_data), HDMI_WP_VIDEO_SIZE, l);
+}
+
+static void hdmi_wp_video_config_interface(struct hdmi_ip_data *ip_data,
+ struct hdmi_video_interface *video_int)
+{
+ u32 r;
+ pr_debug("Enter hdmi_wp_video_config_interface\n");
+
+ r = hdmi_read_reg(hdmi_wp_base(ip_data), HDMI_WP_VIDEO_CFG);
+ r = FLD_MOD(r, video_int->vsp, 7, 7);
+ r = FLD_MOD(r, video_int->hsp, 6, 6);
+ r = FLD_MOD(r, video_int->interlacing, 3, 3);
+ r = FLD_MOD(r, video_int->tm, 1, 0);
+ hdmi_write_reg(hdmi_wp_base(ip_data), HDMI_WP_VIDEO_CFG, r);
+}
+
+static void hdmi_wp_video_config_timing(struct hdmi_ip_data *ip_data,
+ struct omap_video_timings *timings)
+{
+ u32 timing_h = 0;
+ u32 timing_v = 0;
+
+ pr_debug("Enter hdmi_wp_video_config_timing\n");
+
+ timing_h |= FLD_VAL(timings->hbp, 31, 20);
+ timing_h |= FLD_VAL(timings->hfp, 19, 8);
+ timing_h |= FLD_VAL(timings->hsw, 7, 0);
+ hdmi_write_reg(hdmi_wp_base(ip_data), HDMI_WP_VIDEO_TIMING_H, timing_h);
+
+ timing_v |= FLD_VAL(timings->vbp, 31, 20);
+ timing_v |= FLD_VAL(timings->vfp, 19, 8);
+ timing_v |= FLD_VAL(timings->vsw, 7, 0);
+ hdmi_write_reg(hdmi_wp_base(ip_data), HDMI_WP_VIDEO_TIMING_V, timing_v);
+}
+
+void ti_hdmi_4xxx_basic_configure(struct hdmi_ip_data *ip_data)
+{
+ /* HDMI */
+ struct omap_video_timings video_timing;
+ struct hdmi_video_format video_format;
+ struct hdmi_video_interface video_interface;
+ /* HDMI core */
+ struct hdmi_core_infoframe_avi avi_cfg;
+ struct hdmi_core_video_config v_core_cfg;
+ struct hdmi_core_packet_enable_repeat repeat_cfg;
+ struct hdmi_config *cfg = &ip_data->cfg;
+
+ hdmi_wp_init(&video_timing, &video_format,
+ &video_interface);
+
+ hdmi_core_init(&v_core_cfg,
+ &avi_cfg,
+ &repeat_cfg);
+
+ hdmi_wp_video_init_format(&video_format, &video_timing, cfg);
+
+ hdmi_wp_video_config_timing(ip_data, &video_timing);
+
+ /* video config */
+ video_format.packing_mode = HDMI_PACK_24b_RGB_YUV444_YUV422;
+
+ hdmi_wp_video_config_format(ip_data, &video_format);
+
+ video_interface.vsp = cfg->timings.vsync_pol;
+ video_interface.hsp = cfg->timings.hsync_pol;
+ video_interface.interlacing = cfg->interlace;
+ video_interface.tm = 1 ; /* HDMI_TIMING_MASTER_24BIT */
+
+ hdmi_wp_video_config_interface(ip_data, &video_interface);
+
+ /*
+ * configure core video part
+ * set software reset in the core
+ */
+ hdmi_core_swreset_assert(ip_data);
+
+ /* power down off */
+ hdmi_core_powerdown_disable(ip_data);
+
+ v_core_cfg.pkt_mode = HDMI_PACKETMODE24BITPERPIXEL;
+ v_core_cfg.hdmi_dvi = cfg->cm.mode;
+
+ hdmi_core_video_config(ip_data, &v_core_cfg);
+
+ /* release software reset in the core */
+ hdmi_core_swreset_release(ip_data);
+
+ /*
+ * configure packet
+ * info frame video see doc CEA861-D page 65
+ */
+ avi_cfg.db1_format = HDMI_INFOFRAME_AVI_DB1Y_RGB;
+ avi_cfg.db1_active_info =
+ HDMI_INFOFRAME_AVI_DB1A_ACTIVE_FORMAT_OFF;
+ avi_cfg.db1_bar_info_dv = HDMI_INFOFRAME_AVI_DB1B_NO;
+ avi_cfg.db1_scan_info = HDMI_INFOFRAME_AVI_DB1S_0;
+ avi_cfg.db2_colorimetry = HDMI_INFOFRAME_AVI_DB2C_NO;
+ avi_cfg.db2_aspect_ratio = HDMI_INFOFRAME_AVI_DB2M_NO;
+ avi_cfg.db2_active_fmt_ar = HDMI_INFOFRAME_AVI_DB2R_SAME;
+ avi_cfg.db3_itc = HDMI_INFOFRAME_AVI_DB3ITC_NO;
+ avi_cfg.db3_ec = HDMI_INFOFRAME_AVI_DB3EC_XVYUV601;
+ avi_cfg.db3_q_range = HDMI_INFOFRAME_AVI_DB3Q_DEFAULT;
+ avi_cfg.db3_nup_scaling = HDMI_INFOFRAME_AVI_DB3SC_NO;
+ avi_cfg.db4_videocode = cfg->cm.code;
+ avi_cfg.db5_pixel_repeat = HDMI_INFOFRAME_AVI_DB5PR_NO;
+ avi_cfg.db6_7_line_eoftop = 0;
+ avi_cfg.db8_9_line_sofbottom = 0;
+ avi_cfg.db10_11_pixel_eofleft = 0;
+ avi_cfg.db12_13_pixel_sofright = 0;
+
+ hdmi_core_aux_infoframe_avi_config(ip_data, avi_cfg);
+
+ /* enable/repeat the infoframe */
+ repeat_cfg.avi_infoframe = HDMI_PACKETENABLE;
+ repeat_cfg.avi_infoframe_repeat = HDMI_PACKETREPEATON;
+ /* wakeup */
+ repeat_cfg.audio_pkt = HDMI_PACKETENABLE;
+ repeat_cfg.audio_pkt_repeat = HDMI_PACKETREPEATON;
+ hdmi_core_av_packet_config(ip_data, repeat_cfg);
+}
+
+void ti_hdmi_4xxx_wp_dump(struct hdmi_ip_data *ip_data, struct seq_file *s)
+{
+#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r,\
+ hdmi_read_reg(hdmi_wp_base(ip_data), r))
+
+ DUMPREG(HDMI_WP_REVISION);
+ DUMPREG(HDMI_WP_SYSCONFIG);
+ DUMPREG(HDMI_WP_IRQSTATUS_RAW);
+ DUMPREG(HDMI_WP_IRQSTATUS);
+ DUMPREG(HDMI_WP_PWR_CTRL);
+ DUMPREG(HDMI_WP_IRQENABLE_SET);
+ DUMPREG(HDMI_WP_VIDEO_CFG);
+ DUMPREG(HDMI_WP_VIDEO_SIZE);
+ DUMPREG(HDMI_WP_VIDEO_TIMING_H);
+ DUMPREG(HDMI_WP_VIDEO_TIMING_V);
+ DUMPREG(HDMI_WP_WP_CLK);
+ DUMPREG(HDMI_WP_AUDIO_CFG);
+ DUMPREG(HDMI_WP_AUDIO_CFG2);
+ DUMPREG(HDMI_WP_AUDIO_CTRL);
+ DUMPREG(HDMI_WP_AUDIO_DATA);
+}
+
+void ti_hdmi_4xxx_pll_dump(struct hdmi_ip_data *ip_data, struct seq_file *s)
+{
+#define DUMPPLL(r) seq_printf(s, "%-35s %08x\n", #r,\
+ hdmi_read_reg(hdmi_pll_base(ip_data), r))
+
+ DUMPPLL(PLLCTRL_PLL_CONTROL);
+ DUMPPLL(PLLCTRL_PLL_STATUS);
+ DUMPPLL(PLLCTRL_PLL_GO);
+ DUMPPLL(PLLCTRL_CFG1);
+ DUMPPLL(PLLCTRL_CFG2);
+ DUMPPLL(PLLCTRL_CFG3);
+ DUMPPLL(PLLCTRL_CFG4);
+}
+
+void ti_hdmi_4xxx_core_dump(struct hdmi_ip_data *ip_data, struct seq_file *s)
+{
+ int i;
+
+#define CORE_REG(i, name) name(i)
+#define DUMPCORE(r) seq_printf(s, "%-35s %08x\n", #r,\
+ hdmi_read_reg(hdmi_pll_base(ip_data), r))
+#define DUMPCOREAV(i, r) seq_printf(s, "%s[%d]%*s %08x\n", #r, i, \
+ (i < 10) ? 32 - strlen(#r) : 31 - strlen(#r), " ", \
+ hdmi_read_reg(hdmi_pll_base(ip_data), CORE_REG(i, r)))
+
+ DUMPCORE(HDMI_CORE_SYS_VND_IDL);
+ DUMPCORE(HDMI_CORE_SYS_DEV_IDL);
+ DUMPCORE(HDMI_CORE_SYS_DEV_IDH);
+ DUMPCORE(HDMI_CORE_SYS_DEV_REV);
+ DUMPCORE(HDMI_CORE_SYS_SRST);
+ DUMPCORE(HDMI_CORE_CTRL1);
+ DUMPCORE(HDMI_CORE_SYS_SYS_STAT);
+ DUMPCORE(HDMI_CORE_SYS_VID_ACEN);
+ DUMPCORE(HDMI_CORE_SYS_VID_MODE);
+ DUMPCORE(HDMI_CORE_SYS_INTR_STATE);
+ DUMPCORE(HDMI_CORE_SYS_INTR1);
+ DUMPCORE(HDMI_CORE_SYS_INTR2);
+ DUMPCORE(HDMI_CORE_SYS_INTR3);
+ DUMPCORE(HDMI_CORE_SYS_INTR4);
+ DUMPCORE(HDMI_CORE_SYS_UMASK1);
+ DUMPCORE(HDMI_CORE_SYS_TMDS_CTRL);
+ DUMPCORE(HDMI_CORE_SYS_DE_DLY);
+ DUMPCORE(HDMI_CORE_SYS_DE_CTRL);
+ DUMPCORE(HDMI_CORE_SYS_DE_TOP);
+ DUMPCORE(HDMI_CORE_SYS_DE_CNTL);
+ DUMPCORE(HDMI_CORE_SYS_DE_CNTH);
+ DUMPCORE(HDMI_CORE_SYS_DE_LINL);
+ DUMPCORE(HDMI_CORE_SYS_DE_LINH_1);
+
+ DUMPCORE(HDMI_CORE_DDC_CMD);
+ DUMPCORE(HDMI_CORE_DDC_STATUS);
+ DUMPCORE(HDMI_CORE_DDC_ADDR);
+ DUMPCORE(HDMI_CORE_DDC_OFFSET);
+ DUMPCORE(HDMI_CORE_DDC_COUNT1);
+ DUMPCORE(HDMI_CORE_DDC_COUNT2);
+ DUMPCORE(HDMI_CORE_DDC_DATA);
+ DUMPCORE(HDMI_CORE_DDC_SEGM);
+
+ DUMPCORE(HDMI_CORE_AV_HDMI_CTRL);
+ DUMPCORE(HDMI_CORE_AV_DPD);
+ DUMPCORE(HDMI_CORE_AV_PB_CTRL1);
+ DUMPCORE(HDMI_CORE_AV_PB_CTRL2);
+ DUMPCORE(HDMI_CORE_AV_AVI_TYPE);
+ DUMPCORE(HDMI_CORE_AV_AVI_VERS);
+ DUMPCORE(HDMI_CORE_AV_AVI_LEN);
+ DUMPCORE(HDMI_CORE_AV_AVI_CHSUM);
+
+ for (i = 0; i < HDMI_CORE_AV_AVI_DBYTE_NELEMS; i++)
+ DUMPCOREAV(i, HDMI_CORE_AV_AVI_DBYTE);
+
+ for (i = 0; i < HDMI_CORE_AV_SPD_DBYTE_NELEMS; i++)
+ DUMPCOREAV(i, HDMI_CORE_AV_SPD_DBYTE);
+
+ for (i = 0; i < HDMI_CORE_AV_AUD_DBYTE_NELEMS; i++)
+ DUMPCOREAV(i, HDMI_CORE_AV_AUD_DBYTE);
+
+ for (i = 0; i < HDMI_CORE_AV_MPEG_DBYTE_NELEMS; i++)
+ DUMPCOREAV(i, HDMI_CORE_AV_MPEG_DBYTE);
+
+ for (i = 0; i < HDMI_CORE_AV_GEN_DBYTE_NELEMS; i++)
+ DUMPCOREAV(i, HDMI_CORE_AV_GEN_DBYTE);
+
+ for (i = 0; i < HDMI_CORE_AV_GEN2_DBYTE_NELEMS; i++)
+ DUMPCOREAV(i, HDMI_CORE_AV_GEN2_DBYTE);
+
+ DUMPCORE(HDMI_CORE_AV_ACR_CTRL);
+ DUMPCORE(HDMI_CORE_AV_FREQ_SVAL);
+ DUMPCORE(HDMI_CORE_AV_N_SVAL1);
+ DUMPCORE(HDMI_CORE_AV_N_SVAL2);
+ DUMPCORE(HDMI_CORE_AV_N_SVAL3);
+ DUMPCORE(HDMI_CORE_AV_CTS_SVAL1);
+ DUMPCORE(HDMI_CORE_AV_CTS_SVAL2);
+ DUMPCORE(HDMI_CORE_AV_CTS_SVAL3);
+ DUMPCORE(HDMI_CORE_AV_CTS_HVAL1);
+ DUMPCORE(HDMI_CORE_AV_CTS_HVAL2);
+ DUMPCORE(HDMI_CORE_AV_CTS_HVAL3);
+ DUMPCORE(HDMI_CORE_AV_AUD_MODE);
+ DUMPCORE(HDMI_CORE_AV_SPDIF_CTRL);
+ DUMPCORE(HDMI_CORE_AV_HW_SPDIF_FS);
+ DUMPCORE(HDMI_CORE_AV_SWAP_I2S);
+ DUMPCORE(HDMI_CORE_AV_SPDIF_ERTH);
+ DUMPCORE(HDMI_CORE_AV_I2S_IN_MAP);
+ DUMPCORE(HDMI_CORE_AV_I2S_IN_CTRL);
+ DUMPCORE(HDMI_CORE_AV_I2S_CHST0);
+ DUMPCORE(HDMI_CORE_AV_I2S_CHST1);
+ DUMPCORE(HDMI_CORE_AV_I2S_CHST2);
+ DUMPCORE(HDMI_CORE_AV_I2S_CHST4);
+ DUMPCORE(HDMI_CORE_AV_I2S_CHST5);
+ DUMPCORE(HDMI_CORE_AV_ASRC);
+ DUMPCORE(HDMI_CORE_AV_I2S_IN_LEN);
+ DUMPCORE(HDMI_CORE_AV_HDMI_CTRL);
+ DUMPCORE(HDMI_CORE_AV_AUDO_TXSTAT);
+ DUMPCORE(HDMI_CORE_AV_AUD_PAR_BUSCLK_1);
+ DUMPCORE(HDMI_CORE_AV_AUD_PAR_BUSCLK_2);
+ DUMPCORE(HDMI_CORE_AV_AUD_PAR_BUSCLK_3);
+ DUMPCORE(HDMI_CORE_AV_TEST_TXCTRL);
+ DUMPCORE(HDMI_CORE_AV_DPD);
+ DUMPCORE(HDMI_CORE_AV_PB_CTRL1);
+ DUMPCORE(HDMI_CORE_AV_PB_CTRL2);
+ DUMPCORE(HDMI_CORE_AV_AVI_TYPE);
+ DUMPCORE(HDMI_CORE_AV_AVI_VERS);
+ DUMPCORE(HDMI_CORE_AV_AVI_LEN);
+ DUMPCORE(HDMI_CORE_AV_AVI_CHSUM);
+ DUMPCORE(HDMI_CORE_AV_SPD_TYPE);
+ DUMPCORE(HDMI_CORE_AV_SPD_VERS);
+ DUMPCORE(HDMI_CORE_AV_SPD_LEN);
+ DUMPCORE(HDMI_CORE_AV_SPD_CHSUM);
+ DUMPCORE(HDMI_CORE_AV_AUDIO_TYPE);
+ DUMPCORE(HDMI_CORE_AV_AUDIO_VERS);
+ DUMPCORE(HDMI_CORE_AV_AUDIO_LEN);
+ DUMPCORE(HDMI_CORE_AV_AUDIO_CHSUM);
+ DUMPCORE(HDMI_CORE_AV_MPEG_TYPE);
+ DUMPCORE(HDMI_CORE_AV_MPEG_VERS);
+ DUMPCORE(HDMI_CORE_AV_MPEG_LEN);
+ DUMPCORE(HDMI_CORE_AV_MPEG_CHSUM);
+ DUMPCORE(HDMI_CORE_AV_CP_BYTE1);
+ DUMPCORE(HDMI_CORE_AV_CEC_ADDR_ID);
+}
+
+void ti_hdmi_4xxx_phy_dump(struct hdmi_ip_data *ip_data, struct seq_file *s)
+{
+#define DUMPPHY(r) seq_printf(s, "%-35s %08x\n", #r,\
+ hdmi_read_reg(hdmi_phy_base(ip_data), r))
+
+ DUMPPHY(HDMI_TXPHY_TX_CTRL);
+ DUMPPHY(HDMI_TXPHY_DIGITAL_CTRL);
+ DUMPPHY(HDMI_TXPHY_POWER_CTRL);
+ DUMPPHY(HDMI_TXPHY_PAD_CFG_CTRL);
+}
+
+#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
+ defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
+void hdmi_wp_audio_config_format(struct hdmi_ip_data *ip_data,
+ struct hdmi_audio_format *aud_fmt)
+{
+ u32 r;
+
+ DSSDBG("Enter hdmi_wp_audio_config_format\n");
+
+ r = hdmi_read_reg(hdmi_wp_base(ip_data), HDMI_WP_AUDIO_CFG);
+ r = FLD_MOD(r, aud_fmt->stereo_channels, 26, 24);
+ r = FLD_MOD(r, aud_fmt->active_chnnls_msk, 23, 16);
+ r = FLD_MOD(r, aud_fmt->en_sig_blk_strt_end, 5, 5);
+ r = FLD_MOD(r, aud_fmt->type, 4, 4);
+ r = FLD_MOD(r, aud_fmt->justification, 3, 3);
+ r = FLD_MOD(r, aud_fmt->sample_order, 2, 2);
+ r = FLD_MOD(r, aud_fmt->samples_per_word, 1, 1);
+ r = FLD_MOD(r, aud_fmt->sample_size, 0, 0);
+ hdmi_write_reg(hdmi_wp_base(ip_data), HDMI_WP_AUDIO_CFG, r);
+}
+
+void hdmi_wp_audio_config_dma(struct hdmi_ip_data *ip_data,
+ struct hdmi_audio_dma *aud_dma)
+{
+ u32 r;
+
+ DSSDBG("Enter hdmi_wp_audio_config_dma\n");
+
+ r = hdmi_read_reg(hdmi_wp_base(ip_data), HDMI_WP_AUDIO_CFG2);
+ r = FLD_MOD(r, aud_dma->transfer_size, 15, 8);
+ r = FLD_MOD(r, aud_dma->block_size, 7, 0);
+ hdmi_write_reg(hdmi_wp_base(ip_data), HDMI_WP_AUDIO_CFG2, r);
+
+ r = hdmi_read_reg(hdmi_wp_base(ip_data), HDMI_WP_AUDIO_CTRL);
+ r = FLD_MOD(r, aud_dma->mode, 9, 9);
+ r = FLD_MOD(r, aud_dma->fifo_threshold, 8, 0);
+ hdmi_write_reg(hdmi_wp_base(ip_data), HDMI_WP_AUDIO_CTRL, r);
+}
+
+void hdmi_core_audio_config(struct hdmi_ip_data *ip_data,
+ struct hdmi_core_audio_config *cfg)
+{
+ u32 r;
+ void __iomem *av_base = hdmi_av_base(ip_data);
+
+ /* audio clock recovery parameters */
+ r = hdmi_read_reg(av_base, HDMI_CORE_AV_ACR_CTRL);
+ r = FLD_MOD(r, cfg->use_mclk, 2, 2);
+ r = FLD_MOD(r, cfg->en_acr_pkt, 1, 1);
+ r = FLD_MOD(r, cfg->cts_mode, 0, 0);
+ hdmi_write_reg(av_base, HDMI_CORE_AV_ACR_CTRL, r);
+
+ REG_FLD_MOD(av_base, HDMI_CORE_AV_N_SVAL1, cfg->n, 7, 0);
+ REG_FLD_MOD(av_base, HDMI_CORE_AV_N_SVAL2, cfg->n >> 8, 7, 0);
+ REG_FLD_MOD(av_base, HDMI_CORE_AV_N_SVAL3, cfg->n >> 16, 7, 0);
+
+ if (cfg->cts_mode == HDMI_AUDIO_CTS_MODE_SW) {
+ REG_FLD_MOD(av_base, HDMI_CORE_AV_CTS_SVAL1, cfg->cts, 7, 0);
+ REG_FLD_MOD(av_base,
+ HDMI_CORE_AV_CTS_SVAL2, cfg->cts >> 8, 7, 0);
+ REG_FLD_MOD(av_base,
+ HDMI_CORE_AV_CTS_SVAL3, cfg->cts >> 16, 7, 0);
+ } else {
+ /*
+ * HDMI IP uses this configuration to divide the MCLK to
+ * update CTS value.
+ */
+ REG_FLD_MOD(av_base,
+ HDMI_CORE_AV_FREQ_SVAL, cfg->mclk_mode, 2, 0);
+
+ /* Configure clock for audio packets */
+ REG_FLD_MOD(av_base, HDMI_CORE_AV_AUD_PAR_BUSCLK_1,
+ cfg->aud_par_busclk, 7, 0);
+ REG_FLD_MOD(av_base, HDMI_CORE_AV_AUD_PAR_BUSCLK_2,
+ (cfg->aud_par_busclk >> 8), 7, 0);
+ REG_FLD_MOD(av_base, HDMI_CORE_AV_AUD_PAR_BUSCLK_3,
+ (cfg->aud_par_busclk >> 16), 7, 0);
+ }
+
+ /* Override of SPDIF sample frequency with value in I2S_CHST4 */
+ REG_FLD_MOD(av_base, HDMI_CORE_AV_SPDIF_CTRL,
+ cfg->fs_override, 1, 1);
+
+ /* I2S parameters */
+ REG_FLD_MOD(av_base, HDMI_CORE_AV_I2S_CHST4,
+ cfg->freq_sample, 3, 0);
+
+ r = hdmi_read_reg(av_base, HDMI_CORE_AV_I2S_IN_CTRL);
+ r = FLD_MOD(r, cfg->i2s_cfg.en_high_bitrate_aud, 7, 7);
+ r = FLD_MOD(r, cfg->i2s_cfg.sck_edge_mode, 6, 6);
+ r = FLD_MOD(r, cfg->i2s_cfg.cbit_order, 5, 5);
+ r = FLD_MOD(r, cfg->i2s_cfg.vbit, 4, 4);
+ r = FLD_MOD(r, cfg->i2s_cfg.ws_polarity, 3, 3);
+ r = FLD_MOD(r, cfg->i2s_cfg.justification, 2, 2);
+ r = FLD_MOD(r, cfg->i2s_cfg.direction, 1, 1);
+ r = FLD_MOD(r, cfg->i2s_cfg.shift, 0, 0);
+ hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_IN_CTRL, r);
+
+ r = hdmi_read_reg(av_base, HDMI_CORE_AV_I2S_CHST5);
+ r = FLD_MOD(r, cfg->freq_sample, 7, 4);
+ r = FLD_MOD(r, cfg->i2s_cfg.word_length, 3, 1);
+ r = FLD_MOD(r, cfg->i2s_cfg.word_max_length, 0, 0);
+ hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_CHST5, r);
+
+ REG_FLD_MOD(av_base, HDMI_CORE_AV_I2S_IN_LEN,
+ cfg->i2s_cfg.in_length_bits, 3, 0);
+
+ /* Audio channels and mode parameters */
+ REG_FLD_MOD(av_base, HDMI_CORE_AV_HDMI_CTRL, cfg->layout, 2, 1);
+ r = hdmi_read_reg(av_base, HDMI_CORE_AV_AUD_MODE);
+ r = FLD_MOD(r, cfg->i2s_cfg.active_sds, 7, 4);
+ r = FLD_MOD(r, cfg->en_dsd_audio, 3, 3);
+ r = FLD_MOD(r, cfg->en_parallel_aud_input, 2, 2);
+ r = FLD_MOD(r, cfg->en_spdif, 1, 1);
+ hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_MODE, r);
+}
+
+void hdmi_core_audio_infoframe_config(struct hdmi_ip_data *ip_data,
+ struct hdmi_core_infoframe_audio *info_aud)
+{
+ u8 val;
+ u8 sum = 0, checksum = 0;
+ void __iomem *av_base = hdmi_av_base(ip_data);
+
+ /*
+ * Set audio info frame type, version and length as
+ * described in HDMI 1.4a Section 8.2.2 specification.
+ * Checksum calculation is defined in Section 5.3.5.
+ */
+ hdmi_write_reg(av_base, HDMI_CORE_AV_AUDIO_TYPE, 0x84);
+ hdmi_write_reg(av_base, HDMI_CORE_AV_AUDIO_VERS, 0x01);
+ hdmi_write_reg(av_base, HDMI_CORE_AV_AUDIO_LEN, 0x0a);
+ sum += 0x84 + 0x001 + 0x00a;
+
+ val = (info_aud->db1_coding_type << 4)
+ | (info_aud->db1_channel_count - 1);
+ hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(0), val);
+ sum += val;
+
+ val = (info_aud->db2_sample_freq << 2) | info_aud->db2_sample_size;
+ hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(1), val);
+ sum += val;
+
+ hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(2), 0x00);
+
+ val = info_aud->db4_channel_alloc;
+ hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(3), val);
+ sum += val;
+
+ val = (info_aud->db5_downmix_inh << 7) | (info_aud->db5_lsv << 3);
+ hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(4), val);
+ sum += val;
+
+ hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(5), 0x00);
+ hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(6), 0x00);
+ hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(7), 0x00);
+ hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(8), 0x00);
+ hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(9), 0x00);
+
+ checksum = 0x100 - sum;
+ hdmi_write_reg(av_base,
+ HDMI_CORE_AV_AUDIO_CHSUM, checksum);
+
+ /*
+ * TODO: Add MPEG and SPD enable and repeat cfg when EDID parsing
+ * is available.
+ */
+}
+
+int hdmi_config_audio_acr(struct hdmi_ip_data *ip_data,
+ u32 sample_freq, u32 *n, u32 *cts)
+{
+ u32 r;
+ u32 deep_color = 0;
+ u32 pclk = ip_data->cfg.timings.timings.pixel_clock;
+
+ if (n == NULL || cts == NULL)
+ return -EINVAL;
+ /*
+ * Obtain current deep color configuration. This needed
+ * to calculate the TMDS clock based on the pixel clock.
+ */
+ r = REG_GET(hdmi_wp_base(ip_data), HDMI_WP_VIDEO_CFG, 1, 0);
+ switch (r) {
+ case 1: /* No deep color selected */
+ deep_color = 100;
+ break;
+ case 2: /* 10-bit deep color selected */
+ deep_color = 125;
+ break;
+ case 3: /* 12-bit deep color selected */
+ deep_color = 150;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (sample_freq) {
+ case 32000:
+ if ((deep_color == 125) && ((pclk == 54054)
+ || (pclk == 74250)))
+ *n = 8192;
+ else
+ *n = 4096;
+ break;
+ case 44100:
+ *n = 6272;
+ break;
+ case 48000:
+ if ((deep_color == 125) && ((pclk == 54054)
+ || (pclk == 74250)))
+ *n = 8192;
+ else
+ *n = 6144;
+ break;
+ default:
+ *n = 0;
+ return -EINVAL;
+ }
+
+ /* Calculate CTS. See HDMI 1.3a or 1.4a specifications */
+ *cts = pclk * (*n / 128) * deep_color / (sample_freq / 10);
+
+ return 0;
+}
+
+int hdmi_audio_trigger(struct hdmi_ip_data *ip_data,
+ struct snd_pcm_substream *substream, int cmd,
+ struct snd_soc_dai *dai)
+{
+ int err = 0;
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ REG_FLD_MOD(hdmi_av_base(ip_data),
+ HDMI_CORE_AV_AUD_MODE, 1, 0, 0);
+ REG_FLD_MOD(hdmi_wp_base(ip_data),
+ HDMI_WP_AUDIO_CTRL, 1, 31, 31);
+ REG_FLD_MOD(hdmi_wp_base(ip_data),
+ HDMI_WP_AUDIO_CTRL, 1, 30, 30);
+ break;
+
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ REG_FLD_MOD(hdmi_av_base(ip_data),
+ HDMI_CORE_AV_AUD_MODE, 0, 0, 0);
+ REG_FLD_MOD(hdmi_wp_base(ip_data),
+ HDMI_WP_AUDIO_CTRL, 0, 30, 30);
+ REG_FLD_MOD(hdmi_wp_base(ip_data),
+ HDMI_WP_AUDIO_CTRL, 0, 31, 31);
+ break;
+ default:
+ err = -EINVAL;
+ }
+ return err;
+}
+#endif
diff --git a/drivers/video/omap2/dss/hdmi.h b/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.h
index c885f9cb065..204095632d2 100644
--- a/drivers/video/omap2/dss/hdmi.h
+++ b/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.h
@@ -1,7 +1,7 @@
/*
- * hdmi.h
+ * ti_hdmi_4xxx_ip.h
*
- * HDMI driver definition for TI OMAP4 processors.
+ * HDMI header definition for DM81xx, DM38xx, TI OMAP4 etc processors.
*
* Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com/
*
@@ -18,202 +18,177 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#ifndef _OMAP4_DSS_HDMI_H_
-#define _OMAP4_DSS_HDMI_H_
+#ifndef _HDMI_TI_4xxx_H_
+#define _HDMI_TI_4xxx_H_
#include <linux/string.h>
#include <video/omapdss.h>
-
-#define HDMI_WP 0x0
-#define HDMI_CORE_SYS 0x400
-#define HDMI_CORE_AV 0x900
-#define HDMI_PLLCTRL 0x200
-#define HDMI_PHY 0x300
-
-struct hdmi_reg { u16 idx; };
-
-#define HDMI_REG(idx) ((const struct hdmi_reg) { idx })
+#include "ti_hdmi.h"
+#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
+ defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
+#include <sound/soc.h>
+#include <sound/pcm_params.h>
+#endif
/* HDMI Wrapper */
-#define HDMI_WP_REG(idx) HDMI_REG(HDMI_WP + idx)
-
-#define HDMI_WP_REVISION HDMI_WP_REG(0x0)
-#define HDMI_WP_SYSCONFIG HDMI_WP_REG(0x10)
-#define HDMI_WP_IRQSTATUS_RAW HDMI_WP_REG(0x24)
-#define HDMI_WP_IRQSTATUS HDMI_WP_REG(0x28)
-#define HDMI_WP_PWR_CTRL HDMI_WP_REG(0x40)
-#define HDMI_WP_IRQENABLE_SET HDMI_WP_REG(0x2C)
-#define HDMI_WP_VIDEO_CFG HDMI_WP_REG(0x50)
-#define HDMI_WP_VIDEO_SIZE HDMI_WP_REG(0x60)
-#define HDMI_WP_VIDEO_TIMING_H HDMI_WP_REG(0x68)
-#define HDMI_WP_VIDEO_TIMING_V HDMI_WP_REG(0x6C)
-#define HDMI_WP_WP_CLK HDMI_WP_REG(0x70)
-#define HDMI_WP_AUDIO_CFG HDMI_WP_REG(0x80)
-#define HDMI_WP_AUDIO_CFG2 HDMI_WP_REG(0x84)
-#define HDMI_WP_AUDIO_CTRL HDMI_WP_REG(0x88)
-#define HDMI_WP_AUDIO_DATA HDMI_WP_REG(0x8C)
+
+#define HDMI_WP_REVISION 0x0
+#define HDMI_WP_SYSCONFIG 0x10
+#define HDMI_WP_IRQSTATUS_RAW 0x24
+#define HDMI_WP_IRQSTATUS 0x28
+#define HDMI_WP_PWR_CTRL 0x40
+#define HDMI_WP_IRQENABLE_SET 0x2C
+#define HDMI_WP_VIDEO_CFG 0x50
+#define HDMI_WP_VIDEO_SIZE 0x60
+#define HDMI_WP_VIDEO_TIMING_H 0x68
+#define HDMI_WP_VIDEO_TIMING_V 0x6C
+#define HDMI_WP_WP_CLK 0x70
+#define HDMI_WP_AUDIO_CFG 0x80
+#define HDMI_WP_AUDIO_CFG2 0x84
+#define HDMI_WP_AUDIO_CTRL 0x88
+#define HDMI_WP_AUDIO_DATA 0x8C
/* HDMI IP Core System */
-#define HDMI_CORE_SYS_REG(idx) HDMI_REG(HDMI_CORE_SYS + idx)
-
-#define HDMI_CORE_SYS_VND_IDL HDMI_CORE_SYS_REG(0x0)
-#define HDMI_CORE_SYS_DEV_IDL HDMI_CORE_SYS_REG(0x8)
-#define HDMI_CORE_SYS_DEV_IDH HDMI_CORE_SYS_REG(0xC)
-#define HDMI_CORE_SYS_DEV_REV HDMI_CORE_SYS_REG(0x10)
-#define HDMI_CORE_SYS_SRST HDMI_CORE_SYS_REG(0x14)
-#define HDMI_CORE_CTRL1 HDMI_CORE_SYS_REG(0x20)
-#define HDMI_CORE_SYS_SYS_STAT HDMI_CORE_SYS_REG(0x24)
-#define HDMI_CORE_SYS_VID_ACEN HDMI_CORE_SYS_REG(0x124)
-#define HDMI_CORE_SYS_VID_MODE HDMI_CORE_SYS_REG(0x128)
-#define HDMI_CORE_SYS_INTR_STATE HDMI_CORE_SYS_REG(0x1C0)
-#define HDMI_CORE_SYS_INTR1 HDMI_CORE_SYS_REG(0x1C4)
-#define HDMI_CORE_SYS_INTR2 HDMI_CORE_SYS_REG(0x1C8)
-#define HDMI_CORE_SYS_INTR3 HDMI_CORE_SYS_REG(0x1CC)
-#define HDMI_CORE_SYS_INTR4 HDMI_CORE_SYS_REG(0x1D0)
-#define HDMI_CORE_SYS_UMASK1 HDMI_CORE_SYS_REG(0x1D4)
-#define HDMI_CORE_SYS_TMDS_CTRL HDMI_CORE_SYS_REG(0x208)
-#define HDMI_CORE_SYS_DE_DLY HDMI_CORE_SYS_REG(0xC8)
-#define HDMI_CORE_SYS_DE_CTRL HDMI_CORE_SYS_REG(0xCC)
-#define HDMI_CORE_SYS_DE_TOP HDMI_CORE_SYS_REG(0xD0)
-#define HDMI_CORE_SYS_DE_CNTL HDMI_CORE_SYS_REG(0xD8)
-#define HDMI_CORE_SYS_DE_CNTH HDMI_CORE_SYS_REG(0xDC)
-#define HDMI_CORE_SYS_DE_LINL HDMI_CORE_SYS_REG(0xE0)
-#define HDMI_CORE_SYS_DE_LINH_1 HDMI_CORE_SYS_REG(0xE4)
+
+#define HDMI_CORE_SYS_VND_IDL 0x0
+#define HDMI_CORE_SYS_DEV_IDL 0x8
+#define HDMI_CORE_SYS_DEV_IDH 0xC
+#define HDMI_CORE_SYS_DEV_REV 0x10
+#define HDMI_CORE_SYS_SRST 0x14
+#define HDMI_CORE_CTRL1 0x20
+#define HDMI_CORE_SYS_SYS_STAT 0x24
+#define HDMI_CORE_SYS_VID_ACEN 0x124
+#define HDMI_CORE_SYS_VID_MODE 0x128
+#define HDMI_CORE_SYS_INTR_STATE 0x1C0
+#define HDMI_CORE_SYS_INTR1 0x1C4
+#define HDMI_CORE_SYS_INTR2 0x1C8
+#define HDMI_CORE_SYS_INTR3 0x1CC
+#define HDMI_CORE_SYS_INTR4 0x1D0
+#define HDMI_CORE_SYS_UMASK1 0x1D4
+#define HDMI_CORE_SYS_TMDS_CTRL 0x208
+#define HDMI_CORE_SYS_DE_DLY 0xC8
+#define HDMI_CORE_SYS_DE_CTRL 0xCC
+#define HDMI_CORE_SYS_DE_TOP 0xD0
+#define HDMI_CORE_SYS_DE_CNTL 0xD8
+#define HDMI_CORE_SYS_DE_CNTH 0xDC
+#define HDMI_CORE_SYS_DE_LINL 0xE0
+#define HDMI_CORE_SYS_DE_LINH_1 0xE4
#define HDMI_CORE_CTRL1_VEN_FOLLOWVSYNC 0x1
#define HDMI_CORE_CTRL1_HEN_FOLLOWHSYNC 0x1
#define HDMI_CORE_CTRL1_BSEL_24BITBUS 0x1
#define HDMI_CORE_CTRL1_EDGE_RISINGEDGE 0x1
/* HDMI DDC E-DID */
-#define HDMI_CORE_DDC_CMD HDMI_CORE_SYS_REG(0x3CC)
-#define HDMI_CORE_DDC_STATUS HDMI_CORE_SYS_REG(0x3C8)
-#define HDMI_CORE_DDC_ADDR HDMI_CORE_SYS_REG(0x3B4)
-#define HDMI_CORE_DDC_OFFSET HDMI_CORE_SYS_REG(0x3BC)
-#define HDMI_CORE_DDC_COUNT1 HDMI_CORE_SYS_REG(0x3C0)
-#define HDMI_CORE_DDC_COUNT2 HDMI_CORE_SYS_REG(0x3C4)
-#define HDMI_CORE_DDC_DATA HDMI_CORE_SYS_REG(0x3D0)
-#define HDMI_CORE_DDC_SEGM HDMI_CORE_SYS_REG(0x3B8)
+#define HDMI_CORE_DDC_CMD 0x3CC
+#define HDMI_CORE_DDC_STATUS 0x3C8
+#define HDMI_CORE_DDC_ADDR 0x3B4
+#define HDMI_CORE_DDC_OFFSET 0x3BC
+#define HDMI_CORE_DDC_COUNT1 0x3C0
+#define HDMI_CORE_DDC_COUNT2 0x3C4
+#define HDMI_CORE_DDC_DATA 0x3D0
+#define HDMI_CORE_DDC_SEGM 0x3B8
/* HDMI IP Core Audio Video */
-#define HDMI_CORE_AV_REG(idx) HDMI_REG(HDMI_CORE_AV + idx)
-
-#define HDMI_CORE_AV_HDMI_CTRL HDMI_CORE_AV_REG(0xBC)
-#define HDMI_CORE_AV_DPD HDMI_CORE_AV_REG(0xF4)
-#define HDMI_CORE_AV_PB_CTRL1 HDMI_CORE_AV_REG(0xF8)
-#define HDMI_CORE_AV_PB_CTRL2 HDMI_CORE_AV_REG(0xFC)
-#define HDMI_CORE_AV_AVI_TYPE HDMI_CORE_AV_REG(0x100)
-#define HDMI_CORE_AV_AVI_VERS HDMI_CORE_AV_REG(0x104)
-#define HDMI_CORE_AV_AVI_LEN HDMI_CORE_AV_REG(0x108)
-#define HDMI_CORE_AV_AVI_CHSUM HDMI_CORE_AV_REG(0x10C)
-#define HDMI_CORE_AV_AVI_DBYTE(n) HDMI_CORE_AV_REG(n * 4 + 0x110)
-#define HDMI_CORE_AV_AVI_DBYTE_NELEMS HDMI_CORE_AV_REG(15)
-#define HDMI_CORE_AV_SPD_DBYTE HDMI_CORE_AV_REG(0x190)
-#define HDMI_CORE_AV_SPD_DBYTE_NELEMS HDMI_CORE_AV_REG(27)
-#define HDMI_CORE_AV_AUD_DBYTE(n) HDMI_CORE_AV_REG(n * 4 + 0x210)
-#define HDMI_CORE_AV_AUD_DBYTE_NELEMS HDMI_CORE_AV_REG(10)
-#define HDMI_CORE_AV_MPEG_DBYTE HDMI_CORE_AV_REG(0x290)
-#define HDMI_CORE_AV_MPEG_DBYTE_NELEMS HDMI_CORE_AV_REG(27)
-#define HDMI_CORE_AV_GEN_DBYTE HDMI_CORE_AV_REG(0x300)
-#define HDMI_CORE_AV_GEN_DBYTE_NELEMS HDMI_CORE_AV_REG(31)
-#define HDMI_CORE_AV_GEN2_DBYTE HDMI_CORE_AV_REG(0x380)
-#define HDMI_CORE_AV_GEN2_DBYTE_NELEMS HDMI_CORE_AV_REG(31)
-#define HDMI_CORE_AV_ACR_CTRL HDMI_CORE_AV_REG(0x4)
-#define HDMI_CORE_AV_FREQ_SVAL HDMI_CORE_AV_REG(0x8)
-#define HDMI_CORE_AV_N_SVAL1 HDMI_CORE_AV_REG(0xC)
-#define HDMI_CORE_AV_N_SVAL2 HDMI_CORE_AV_REG(0x10)
-#define HDMI_CORE_AV_N_SVAL3 HDMI_CORE_AV_REG(0x14)
-#define HDMI_CORE_AV_CTS_SVAL1 HDMI_CORE_AV_REG(0x18)
-#define HDMI_CORE_AV_CTS_SVAL2 HDMI_CORE_AV_REG(0x1C)
-#define HDMI_CORE_AV_CTS_SVAL3 HDMI_CORE_AV_REG(0x20)
-#define HDMI_CORE_AV_CTS_HVAL1 HDMI_CORE_AV_REG(0x24)
-#define HDMI_CORE_AV_CTS_HVAL2 HDMI_CORE_AV_REG(0x28)
-#define HDMI_CORE_AV_CTS_HVAL3 HDMI_CORE_AV_REG(0x2C)
-#define HDMI_CORE_AV_AUD_MODE HDMI_CORE_AV_REG(0x50)
-#define HDMI_CORE_AV_SPDIF_CTRL HDMI_CORE_AV_REG(0x54)
-#define HDMI_CORE_AV_HW_SPDIF_FS HDMI_CORE_AV_REG(0x60)
-#define HDMI_CORE_AV_SWAP_I2S HDMI_CORE_AV_REG(0x64)
-#define HDMI_CORE_AV_SPDIF_ERTH HDMI_CORE_AV_REG(0x6C)
-#define HDMI_CORE_AV_I2S_IN_MAP HDMI_CORE_AV_REG(0x70)
-#define HDMI_CORE_AV_I2S_IN_CTRL HDMI_CORE_AV_REG(0x74)
-#define HDMI_CORE_AV_I2S_CHST0 HDMI_CORE_AV_REG(0x78)
-#define HDMI_CORE_AV_I2S_CHST1 HDMI_CORE_AV_REG(0x7C)
-#define HDMI_CORE_AV_I2S_CHST2 HDMI_CORE_AV_REG(0x80)
-#define HDMI_CORE_AV_I2S_CHST4 HDMI_CORE_AV_REG(0x84)
-#define HDMI_CORE_AV_I2S_CHST5 HDMI_CORE_AV_REG(0x88)
-#define HDMI_CORE_AV_ASRC HDMI_CORE_AV_REG(0x8C)
-#define HDMI_CORE_AV_I2S_IN_LEN HDMI_CORE_AV_REG(0x90)
-#define HDMI_CORE_AV_HDMI_CTRL HDMI_CORE_AV_REG(0xBC)
-#define HDMI_CORE_AV_AUDO_TXSTAT HDMI_CORE_AV_REG(0xC0)
-#define HDMI_CORE_AV_AUD_PAR_BUSCLK_1 HDMI_CORE_AV_REG(0xCC)
-#define HDMI_CORE_AV_AUD_PAR_BUSCLK_2 HDMI_CORE_AV_REG(0xD0)
-#define HDMI_CORE_AV_AUD_PAR_BUSCLK_3 HDMI_CORE_AV_REG(0xD4)
-#define HDMI_CORE_AV_TEST_TXCTRL HDMI_CORE_AV_REG(0xF0)
-#define HDMI_CORE_AV_DPD HDMI_CORE_AV_REG(0xF4)
-#define HDMI_CORE_AV_PB_CTRL1 HDMI_CORE_AV_REG(0xF8)
-#define HDMI_CORE_AV_PB_CTRL2 HDMI_CORE_AV_REG(0xFC)
-#define HDMI_CORE_AV_AVI_TYPE HDMI_CORE_AV_REG(0x100)
-#define HDMI_CORE_AV_AVI_VERS HDMI_CORE_AV_REG(0x104)
-#define HDMI_CORE_AV_AVI_LEN HDMI_CORE_AV_REG(0x108)
-#define HDMI_CORE_AV_AVI_CHSUM HDMI_CORE_AV_REG(0x10C)
-#define HDMI_CORE_AV_SPD_TYPE HDMI_CORE_AV_REG(0x180)
-#define HDMI_CORE_AV_SPD_VERS HDMI_CORE_AV_REG(0x184)
-#define HDMI_CORE_AV_SPD_LEN HDMI_CORE_AV_REG(0x188)
-#define HDMI_CORE_AV_SPD_CHSUM HDMI_CORE_AV_REG(0x18C)
-#define HDMI_CORE_AV_AUDIO_TYPE HDMI_CORE_AV_REG(0x200)
-#define HDMI_CORE_AV_AUDIO_VERS HDMI_CORE_AV_REG(0x204)
-#define HDMI_CORE_AV_AUDIO_LEN HDMI_CORE_AV_REG(0x208)
-#define HDMI_CORE_AV_AUDIO_CHSUM HDMI_CORE_AV_REG(0x20C)
-#define HDMI_CORE_AV_MPEG_TYPE HDMI_CORE_AV_REG(0x280)
-#define HDMI_CORE_AV_MPEG_VERS HDMI_CORE_AV_REG(0x284)
-#define HDMI_CORE_AV_MPEG_LEN HDMI_CORE_AV_REG(0x288)
-#define HDMI_CORE_AV_MPEG_CHSUM HDMI_CORE_AV_REG(0x28C)
-#define HDMI_CORE_AV_CP_BYTE1 HDMI_CORE_AV_REG(0x37C)
-#define HDMI_CORE_AV_CEC_ADDR_ID HDMI_CORE_AV_REG(0x3FC)
+
+#define HDMI_CORE_AV_HDMI_CTRL 0xBC
+#define HDMI_CORE_AV_DPD 0xF4
+#define HDMI_CORE_AV_PB_CTRL1 0xF8
+#define HDMI_CORE_AV_PB_CTRL2 0xFC
+#define HDMI_CORE_AV_AVI_TYPE 0x100
+#define HDMI_CORE_AV_AVI_VERS 0x104
+#define HDMI_CORE_AV_AVI_LEN 0x108
+#define HDMI_CORE_AV_AVI_CHSUM 0x10C
+#define HDMI_CORE_AV_AVI_DBYTE(n) (n * 4 + 0x110)
+#define HDMI_CORE_AV_AVI_DBYTE_NELEMS 15
+#define HDMI_CORE_AV_SPD_DBYTE(n) (n * 4 + 0x190)
+#define HDMI_CORE_AV_SPD_DBYTE_NELEMS 27
+#define HDMI_CORE_AV_AUD_DBYTE(n) (n * 4 + 0x210)
+#define HDMI_CORE_AV_AUD_DBYTE_NELEMS 10
+#define HDMI_CORE_AV_MPEG_DBYTE(n) (n * 4 + 0x290)
+#define HDMI_CORE_AV_MPEG_DBYTE_NELEMS 27
+#define HDMI_CORE_AV_GEN_DBYTE(n) (n * 4 + 0x300)
+#define HDMI_CORE_AV_GEN_DBYTE_NELEMS 31
+#define HDMI_CORE_AV_GEN2_DBYTE(n) (n * 4 + 0x380)
+#define HDMI_CORE_AV_GEN2_DBYTE_NELEMS 31
+#define HDMI_CORE_AV_ACR_CTRL 0x4
+#define HDMI_CORE_AV_FREQ_SVAL 0x8
+#define HDMI_CORE_AV_N_SVAL1 0xC
+#define HDMI_CORE_AV_N_SVAL2 0x10
+#define HDMI_CORE_AV_N_SVAL3 0x14
+#define HDMI_CORE_AV_CTS_SVAL1 0x18
+#define HDMI_CORE_AV_CTS_SVAL2 0x1C
+#define HDMI_CORE_AV_CTS_SVAL3 0x20
+#define HDMI_CORE_AV_CTS_HVAL1 0x24
+#define HDMI_CORE_AV_CTS_HVAL2 0x28
+#define HDMI_CORE_AV_CTS_HVAL3 0x2C
+#define HDMI_CORE_AV_AUD_MODE 0x50
+#define HDMI_CORE_AV_SPDIF_CTRL 0x54
+#define HDMI_CORE_AV_HW_SPDIF_FS 0x60
+#define HDMI_CORE_AV_SWAP_I2S 0x64
+#define HDMI_CORE_AV_SPDIF_ERTH 0x6C
+#define HDMI_CORE_AV_I2S_IN_MAP 0x70
+#define HDMI_CORE_AV_I2S_IN_CTRL 0x74
+#define HDMI_CORE_AV_I2S_CHST0 0x78
+#define HDMI_CORE_AV_I2S_CHST1 0x7C
+#define HDMI_CORE_AV_I2S_CHST2 0x80
+#define HDMI_CORE_AV_I2S_CHST4 0x84
+#define HDMI_CORE_AV_I2S_CHST5 0x88
+#define HDMI_CORE_AV_ASRC 0x8C
+#define HDMI_CORE_AV_I2S_IN_LEN 0x90
+#define HDMI_CORE_AV_HDMI_CTRL 0xBC
+#define HDMI_CORE_AV_AUDO_TXSTAT 0xC0
+#define HDMI_CORE_AV_AUD_PAR_BUSCLK_1 0xCC
+#define HDMI_CORE_AV_AUD_PAR_BUSCLK_2 0xD0
+#define HDMI_CORE_AV_AUD_PAR_BUSCLK_3 0xD4
+#define HDMI_CORE_AV_TEST_TXCTRL 0xF0
+#define HDMI_CORE_AV_DPD 0xF4
+#define HDMI_CORE_AV_PB_CTRL1 0xF8
+#define HDMI_CORE_AV_PB_CTRL2 0xFC
+#define HDMI_CORE_AV_AVI_TYPE 0x100
+#define HDMI_CORE_AV_AVI_VERS 0x104
+#define HDMI_CORE_AV_AVI_LEN 0x108
+#define HDMI_CORE_AV_AVI_CHSUM 0x10C
+#define HDMI_CORE_AV_SPD_TYPE 0x180
+#define HDMI_CORE_AV_SPD_VERS 0x184
+#define HDMI_CORE_AV_SPD_LEN 0x188
+#define HDMI_CORE_AV_SPD_CHSUM 0x18C
+#define HDMI_CORE_AV_AUDIO_TYPE 0x200
+#define HDMI_CORE_AV_AUDIO_VERS 0x204
+#define HDMI_CORE_AV_AUDIO_LEN 0x208
+#define HDMI_CORE_AV_AUDIO_CHSUM 0x20C
+#define HDMI_CORE_AV_MPEG_TYPE 0x280
+#define HDMI_CORE_AV_MPEG_VERS 0x284
+#define HDMI_CORE_AV_MPEG_LEN 0x288
+#define HDMI_CORE_AV_MPEG_CHSUM 0x28C
+#define HDMI_CORE_AV_CP_BYTE1 0x37C
+#define HDMI_CORE_AV_CEC_ADDR_ID 0x3FC
#define HDMI_CORE_AV_SPD_DBYTE_ELSIZE 0x4
#define HDMI_CORE_AV_GEN2_DBYTE_ELSIZE 0x4
#define HDMI_CORE_AV_MPEG_DBYTE_ELSIZE 0x4
#define HDMI_CORE_AV_GEN_DBYTE_ELSIZE 0x4
/* PLL */
-#define HDMI_PLL_REG(idx) HDMI_REG(HDMI_PLLCTRL + idx)
-#define PLLCTRL_PLL_CONTROL HDMI_PLL_REG(0x0)
-#define PLLCTRL_PLL_STATUS HDMI_PLL_REG(0x4)
-#define PLLCTRL_PLL_GO HDMI_PLL_REG(0x8)
-#define PLLCTRL_CFG1 HDMI_PLL_REG(0xC)
-#define PLLCTRL_CFG2 HDMI_PLL_REG(0x10)
-#define PLLCTRL_CFG3 HDMI_PLL_REG(0x14)
-#define PLLCTRL_CFG4 HDMI_PLL_REG(0x20)
+#define PLLCTRL_PLL_CONTROL 0x0
+#define PLLCTRL_PLL_STATUS 0x4
+#define PLLCTRL_PLL_GO 0x8
+#define PLLCTRL_CFG1 0xC
+#define PLLCTRL_CFG2 0x10
+#define PLLCTRL_CFG3 0x14
+#define PLLCTRL_CFG4 0x20
/* HDMI PHY */
-#define HDMI_PHY_REG(idx) HDMI_REG(HDMI_PHY + idx)
-
-#define HDMI_TXPHY_TX_CTRL HDMI_PHY_REG(0x0)
-#define HDMI_TXPHY_DIGITAL_CTRL HDMI_PHY_REG(0x4)
-#define HDMI_TXPHY_POWER_CTRL HDMI_PHY_REG(0x8)
-#define HDMI_TXPHY_PAD_CFG_CTRL HDMI_PHY_REG(0xC)
-
-/* HDMI EDID Length */
-#define HDMI_EDID_MAX_LENGTH 256
-#define EDID_TIMING_DESCRIPTOR_SIZE 0x12
-#define EDID_DESCRIPTOR_BLOCK0_ADDRESS 0x36
-#define EDID_DESCRIPTOR_BLOCK1_ADDRESS 0x80
-#define EDID_SIZE_BLOCK0_TIMING_DESCRIPTOR 4
-#define EDID_SIZE_BLOCK1_TIMING_DESCRIPTOR 4
-#define OMAP_HDMI_TIMINGS_NB 34
+#define HDMI_TXPHY_TX_CTRL 0x0
+#define HDMI_TXPHY_DIGITAL_CTRL 0x4
+#define HDMI_TXPHY_POWER_CTRL 0x8
+#define HDMI_TXPHY_PAD_CFG_CTRL 0xC
-#define REG_FLD_MOD(idx, val, start, end) \
- hdmi_write_reg(idx, FLD_MOD(hdmi_read_reg(idx), val, start, end))
-#define REG_GET(idx, start, end) \
- FLD_GET(hdmi_read_reg(idx), start, end)
-
-/* HDMI timing structure */
-struct hdmi_timings {
- struct omap_video_timings timings;
- int vsync_pol;
- int hsync_pol;
-};
+#define REG_FLD_MOD(base, idx, val, start, end) \
+ hdmi_write_reg(base, idx, FLD_MOD(hdmi_read_reg(base, idx),\
+ val, start, end))
+#define REG_GET(base, idx, start, end) \
+ FLD_GET(hdmi_read_reg(base, idx), start, end)
enum hdmi_phy_pwr {
HDMI_PHYPWRCMD_OFF = 0,
@@ -221,20 +196,6 @@ enum hdmi_phy_pwr {
HDMI_PHYPWRCMD_TXON = 2
};
-enum hdmi_pll_pwr {
- HDMI_PLLPWRCMD_ALLOFF = 0,
- HDMI_PLLPWRCMD_PLLONLY = 1,
- HDMI_PLLPWRCMD_BOTHON_ALLCLKS = 2,
- HDMI_PLLPWRCMD_BOTHON_NOPHYCLK = 3
-};
-
-enum hdmi_clk_refsel {
- HDMI_REFSEL_PCLK = 0,
- HDMI_REFSEL_REF1 = 1,
- HDMI_REFSEL_REF2 = 2,
- HDMI_REFSEL_SYSCLK = 3
-};
-
enum hdmi_core_inputbus_width {
HDMI_INPUT_8BIT = 0,
HDMI_INPUT_10BIT = 1,
@@ -263,11 +224,6 @@ enum hdmi_core_packet_mode {
HDMI_PACKETMODE48BITPERPIXEL = 7
};
-enum hdmi_core_hdmi_dvi {
- HDMI_DVI = 0,
- HDMI_HDMI = 1
-};
-
enum hdmi_core_tclkselclkmult {
HDMI_FPLL05IDCK = 0,
HDMI_FPLL10IDCK = 1,
@@ -495,40 +451,40 @@ struct hdmi_core_video_config {
* details about infoframe databytes
*/
struct hdmi_core_infoframe_avi {
+ /* Y0, Y1 rgb,yCbCr */
u8 db1_format;
- /* Y0, Y1 rgb,yCbCr */
+ /* A0 Active information Present */
u8 db1_active_info;
- /* A0 Active information Present */
+ /* B0, B1 Bar info data valid */
u8 db1_bar_info_dv;
- /* B0, B1 Bar info data valid */
+ /* S0, S1 scan information */
u8 db1_scan_info;
- /* S0, S1 scan information */
+ /* C0, C1 colorimetry */
u8 db2_colorimetry;
- /* C0, C1 colorimetry */
+ /* M0, M1 Aspect ratio (4:3, 16:9) */
u8 db2_aspect_ratio;
- /* M0, M1 Aspect ratio (4:3, 16:9) */
+ /* R0...R3 Active format aspect ratio */
u8 db2_active_fmt_ar;
- /* R0...R3 Active format aspect ratio */
+ /* ITC IT content. */
u8 db3_itc;
- /* ITC IT content. */
+ /* EC0, EC1, EC2 Extended colorimetry */
u8 db3_ec;
- /* EC0, EC1, EC2 Extended colorimetry */
+ /* Q1, Q0 Quantization range */
u8 db3_q_range;
- /* Q1, Q0 Quantization range */
+ /* SC1, SC0 Non-uniform picture scaling */
u8 db3_nup_scaling;
- /* SC1, SC0 Non-uniform picture scaling */
+ /* VIC0..6 Video format identification */
u8 db4_videocode;
- /* VIC0..6 Video format identification */
+ /* PR0..PR3 Pixel repetition factor */
u8 db5_pixel_repeat;
- /* PR0..PR3 Pixel repetition factor */
+ /* Line number end of top bar */
u16 db6_7_line_eoftop;
- /* Line number end of top bar */
+ /* Line number start of bottom bar */
u16 db8_9_line_sofbottom;
- /* Line number start of bottom bar */
+ /* Pixel number end of left bar */
u16 db10_11_pixel_eofleft;
- /* Pixel number end of left bar */
+ /* Pixel number start of right bar */
u16 db12_13_pixel_sofright;
- /* Pixel number start of right bar */
};
/*
* Refer to section 8.2 in HDMI 1.3 specification for
@@ -568,17 +524,6 @@ struct hdmi_video_interface {
int tm; /* Timing mode */
};
-struct hdmi_cm {
- int code;
- int mode;
-};
-
-struct hdmi_config {
- struct hdmi_timings timings;
- u16 interlace;
- struct hdmi_cm cm;
-};
-
struct hdmi_audio_format {
enum hdmi_stereo_channels stereo_channels;
u8 active_chnnls_msk;
@@ -628,4 +573,21 @@ struct hdmi_core_audio_config {
bool en_parallel_aud_input;
bool en_spdif;
};
+
+#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
+ defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
+int hdmi_audio_trigger(struct hdmi_ip_data *ip_data,
+ struct snd_pcm_substream *substream, int cmd,
+ struct snd_soc_dai *dai);
+int hdmi_config_audio_acr(struct hdmi_ip_data *ip_data,
+ u32 sample_freq, u32 *n, u32 *cts);
+void hdmi_core_audio_infoframe_config(struct hdmi_ip_data *ip_data,
+ struct hdmi_core_infoframe_audio *info_aud);
+void hdmi_core_audio_config(struct hdmi_ip_data *ip_data,
+ struct hdmi_core_audio_config *cfg);
+void hdmi_wp_audio_config_dma(struct hdmi_ip_data *ip_data,
+ struct hdmi_audio_dma *aud_dma);
+void hdmi_wp_audio_config_format(struct hdmi_ip_data *ip_data,
+ struct hdmi_audio_format *aud_fmt);
+#endif
#endif
diff --git a/drivers/video/omap2/dss/venc.c b/drivers/video/omap2/dss/venc.c
index 173c66430da..7533458ba4d 100644
--- a/drivers/video/omap2/dss/venc.c
+++ b/drivers/video/omap2/dss/venc.c
@@ -295,7 +295,6 @@ static struct {
u32 wss_data;
struct regulator *vdda_dac_reg;
- struct clk *tv_clk;
struct clk *tv_dac_clk;
} venc;
@@ -464,9 +463,11 @@ static void venc_power_off(struct omap_dss_device *dssdev)
regulator_disable(venc.vdda_dac_reg);
}
-
-
-
+unsigned long venc_get_pixel_clock(void)
+{
+ /* VENC Pixel Clock in Mhz */
+ return 13500000;
+}
/* driver */
static int venc_panel_probe(struct omap_dss_device *dssdev)
@@ -732,22 +733,10 @@ static int venc_get_clocks(struct platform_device *pdev)
{
struct clk *clk;
- clk = clk_get(&pdev->dev, "fck");
- if (IS_ERR(clk)) {
- DSSERR("can't get fck\n");
- return PTR_ERR(clk);
- }
-
- venc.tv_clk = clk;
-
if (dss_has_feature(FEAT_VENC_REQUIRES_TV_DAC_CLK)) {
- if (cpu_is_omap34xx() || cpu_is_omap3630())
- clk = clk_get(&pdev->dev, "dss_96m_fck");
- else
- clk = clk_get(&pdev->dev, "tv_dac_clk");
+ clk = clk_get(&pdev->dev, "tv_dac_clk");
if (IS_ERR(clk)) {
DSSERR("can't get tv_dac_clk\n");
- clk_put(venc.tv_clk);
return PTR_ERR(clk);
}
} else {
@@ -761,8 +750,6 @@ static int venc_get_clocks(struct platform_device *pdev)
static void venc_put_clocks(void)
{
- if (venc.tv_clk)
- clk_put(venc.tv_clk);
if (venc.tv_dac_clk)
clk_put(venc.tv_dac_clk);
}
@@ -838,7 +825,6 @@ static int venc_runtime_suspend(struct device *dev)
{
if (venc.tv_dac_clk)
clk_disable(venc.tv_dac_clk);
- clk_disable(venc.tv_clk);
dispc_runtime_put();
dss_runtime_put();
@@ -858,7 +844,6 @@ static int venc_runtime_resume(struct device *dev)
if (r < 0)
goto err_get_dispc;
- clk_enable(venc.tv_clk);
if (venc.tv_dac_clk)
clk_enable(venc.tv_dac_clk);
diff --git a/drivers/video/omap2/omapfb/Kconfig b/drivers/video/omap2/omapfb/Kconfig
index aa33386c81f..83d3fe7ec9a 100644
--- a/drivers/video/omap2/omapfb/Kconfig
+++ b/drivers/video/omap2/omapfb/Kconfig
@@ -1,5 +1,5 @@
menuconfig FB_OMAP2
- tristate "OMAP2+ frame buffer support (EXPERIMENTAL)"
+ tristate "OMAP2+ frame buffer support"
depends on FB && OMAP2_DSS
select OMAP2_VRAM
diff --git a/drivers/video/omap2/omapfb/omapfb-ioctl.c b/drivers/video/omap2/omapfb/omapfb-ioctl.c
index 6b1ac23dbbd..df7bcce5b10 100644
--- a/drivers/video/omap2/omapfb/omapfb-ioctl.c
+++ b/drivers/video/omap2/omapfb/omapfb-ioctl.c
@@ -27,6 +27,7 @@
#include <linux/mm.h>
#include <linux/omapfb.h>
#include <linux/vmalloc.h>
+#include <linux/export.h>
#include <video/omapdss.h>
#include <plat/vrfb.h>
diff --git a/drivers/video/omap2/omapfb/omapfb-main.c b/drivers/video/omap2/omapfb/omapfb-main.c
index 602b71a92d3..70aa47de714 100644
--- a/drivers/video/omap2/omapfb/omapfb-main.c
+++ b/drivers/video/omap2/omapfb/omapfb-main.c
@@ -808,19 +808,15 @@ static unsigned calc_rotation_offset_vrfb(const struct fb_var_screeninfo *var,
static void omapfb_calc_addr(const struct omapfb_info *ofbi,
const struct fb_var_screeninfo *var,
const struct fb_fix_screeninfo *fix,
- int rotation, u32 *paddr, void __iomem **vaddr)
+ int rotation, u32 *paddr)
{
u32 data_start_p;
- void __iomem *data_start_v;
int offset;
- if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB) {
+ if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB)
data_start_p = omapfb_get_region_rot_paddr(ofbi, rotation);
- data_start_v = NULL;
- } else {
+ else
data_start_p = omapfb_get_region_paddr(ofbi);
- data_start_v = omapfb_get_region_vaddr(ofbi);
- }
if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB)
offset = calc_rotation_offset_vrfb(var, fix, rotation);
@@ -828,16 +824,14 @@ static void omapfb_calc_addr(const struct omapfb_info *ofbi,
offset = calc_rotation_offset_dma(var, fix, rotation);
data_start_p += offset;
- data_start_v += offset;
if (offset)
DBG("offset %d, %d = %d\n",
var->xoffset, var->yoffset, offset);
- DBG("paddr %x, vaddr %p\n", data_start_p, data_start_v);
+ DBG("paddr %x\n", data_start_p);
*paddr = data_start_p;
- *vaddr = data_start_v;
}
/* setup overlay according to the fb */
@@ -850,7 +844,6 @@ int omapfb_setup_overlay(struct fb_info *fbi, struct omap_overlay *ovl,
struct fb_fix_screeninfo *fix = &fbi->fix;
enum omap_color_mode mode = 0;
u32 data_start_p = 0;
- void __iomem *data_start_v = NULL;
struct omap_overlay_info info;
int xres, yres;
int screen_width;
@@ -880,8 +873,7 @@ int omapfb_setup_overlay(struct fb_info *fbi, struct omap_overlay *ovl,
}
if (ofbi->region->size)
- omapfb_calc_addr(ofbi, var, fix, rotation,
- &data_start_p, &data_start_v);
+ omapfb_calc_addr(ofbi, var, fix, rotation, &data_start_p);
r = fb_mode_to_dss_mode(var, &mode);
if (r) {
@@ -910,7 +902,6 @@ int omapfb_setup_overlay(struct fb_info *fbi, struct omap_overlay *ovl,
mirror = ofbi->mirror;
info.paddr = data_start_p;
- info.vaddr = data_start_v;
info.screen_width = screen_width;
info.width = xres;
info.height = yres;
@@ -2276,6 +2267,87 @@ static int omapfb_parse_def_modes(struct omapfb2_device *fbdev)
return r;
}
+static void fb_videomode_to_omap_timings(struct fb_videomode *m,
+ struct omap_video_timings *t)
+{
+ t->x_res = m->xres;
+ t->y_res = m->yres;
+ t->pixel_clock = PICOS2KHZ(m->pixclock);
+ t->hsw = m->hsync_len;
+ t->hfp = m->right_margin;
+ t->hbp = m->left_margin;
+ t->vsw = m->vsync_len;
+ t->vfp = m->lower_margin;
+ t->vbp = m->upper_margin;
+}
+
+static int omapfb_find_best_mode(struct omap_dss_device *display,
+ struct omap_video_timings *timings)
+{
+ struct fb_monspecs *specs;
+ u8 *edid;
+ int r, i, best_xres, best_idx, len;
+
+ if (!display->driver->read_edid)
+ return -ENODEV;
+
+ len = 0x80 * 2;
+ edid = kmalloc(len, GFP_KERNEL);
+
+ r = display->driver->read_edid(display, edid, len);
+ if (r < 0)
+ goto err1;
+
+ specs = kzalloc(sizeof(*specs), GFP_KERNEL);
+
+ fb_edid_to_monspecs(edid, specs);
+
+ if (edid[126] > 0)
+ fb_edid_add_monspecs(edid + 0x80, specs);
+
+ best_xres = 0;
+ best_idx = -1;
+
+ for (i = 0; i < specs->modedb_len; ++i) {
+ struct fb_videomode *m;
+ struct omap_video_timings t;
+
+ m = &specs->modedb[i];
+
+ if (m->pixclock == 0)
+ continue;
+
+ /* skip repeated pixel modes */
+ if (m->xres == 2880 || m->xres == 1440)
+ continue;
+
+ fb_videomode_to_omap_timings(m, &t);
+
+ r = display->driver->check_timings(display, &t);
+ if (r == 0 && best_xres < m->xres) {
+ best_xres = m->xres;
+ best_idx = i;
+ }
+ }
+
+ if (best_xres == 0) {
+ r = -ENOENT;
+ goto err2;
+ }
+
+ fb_videomode_to_omap_timings(&specs->modedb[best_idx], timings);
+
+ r = 0;
+
+err2:
+ fb_destroy_modedb(specs->modedb);
+ kfree(specs);
+err1:
+ kfree(edid);
+
+ return r;
+}
+
static int omapfb_init_display(struct omapfb2_device *fbdev,
struct omap_dss_device *dssdev)
{
@@ -2373,8 +2445,10 @@ static int omapfb_probe(struct platform_device *pdev)
omap_dss_get_device(dssdev);
if (!dssdev->driver) {
- dev_err(&pdev->dev, "no driver for display\n");
- r = -ENODEV;
+ dev_warn(&pdev->dev, "no driver for display: %s\n",
+ dssdev->name);
+ omap_dss_put_device(dssdev);
+ continue;
}
d = &fbdev->displays[fbdev->num_displays++];
@@ -2402,9 +2476,27 @@ static int omapfb_probe(struct platform_device *pdev)
for (i = 0; i < fbdev->num_managers; i++)
fbdev->managers[i] = omap_dss_get_overlay_manager(i);
+ /* gfx overlay should be the default one. find a display
+ * connected to that, and use it as default display */
+ ovl = omap_dss_get_overlay(0);
+ if (ovl->manager && ovl->manager->device) {
+ def_display = ovl->manager->device;
+ } else {
+ dev_warn(&pdev->dev, "cannot find default display\n");
+ def_display = NULL;
+ }
+
if (def_mode && strlen(def_mode) > 0) {
if (omapfb_parse_def_modes(fbdev))
dev_warn(&pdev->dev, "cannot parse default modes\n");
+ } else if (def_display && def_display->driver->set_timings &&
+ def_display->driver->check_timings) {
+ struct omap_video_timings t;
+
+ r = omapfb_find_best_mode(def_display, &t);
+
+ if (r == 0)
+ def_display->driver->set_timings(def_display, &t);
}
r = omapfb_create_framebuffers(fbdev);
@@ -2421,16 +2513,6 @@ static int omapfb_probe(struct platform_device *pdev)
DBG("mgr->apply'ed\n");
- /* gfx overlay should be the default one. find a display
- * connected to that, and use it as default display */
- ovl = omap_dss_get_overlay(0);
- if (ovl->manager && ovl->manager->device) {
- def_display = ovl->manager->device;
- } else {
- dev_warn(&pdev->dev, "cannot find default display\n");
- def_display = NULL;
- }
-
if (def_display) {
r = omapfb_init_display(fbdev, def_display);
if (r) {
diff --git a/drivers/video/omap2/omapfb/omapfb-sysfs.c b/drivers/video/omap2/omapfb/omapfb-sysfs.c
index 153bf1aceeb..1694d5148f3 100644
--- a/drivers/video/omap2/omapfb/omapfb-sysfs.c
+++ b/drivers/video/omap2/omapfb/omapfb-sysfs.c
@@ -104,16 +104,14 @@ static ssize_t store_mirror(struct device *dev,
{
struct fb_info *fbi = dev_get_drvdata(dev);
struct omapfb_info *ofbi = FB2OFB(fbi);
- int mirror;
+ bool mirror;
int r;
struct fb_var_screeninfo new_var;
- r = kstrtoint(buf, 0, &mirror);
+ r = strtobool(buf, &mirror);
if (r)
return r;
- mirror = !!mirror;
-
if (!lock_fb_info(fbi))
return -ENODEV;
diff --git a/drivers/video/platinumfb.c b/drivers/video/platinumfb.c
index f27ae16ead2..ae3caa6755c 100644
--- a/drivers/video/platinumfb.c
+++ b/drivers/video/platinumfb.c
@@ -490,7 +490,7 @@ static int platinum_var_to_par(struct fb_var_screeninfo *var,
/*
- * Parse user speficied options (`video=platinumfb:')
+ * Parse user specified options (`video=platinumfb:')
*/
static int __init platinumfb_setup(char *options)
{
@@ -683,7 +683,7 @@ static struct platform_driver platinum_driver =
.of_match_table = platinumfb_match,
},
.probe = platinumfb_probe,
- .remove = platinumfb_remove,
+ .remove = __devexit_p(platinumfb_remove),
};
static int __init platinumfb_init(void)
diff --git a/drivers/video/pm2fb.c b/drivers/video/pm2fb.c
index 27f93aab6dd..dc7bfa91e57 100644
--- a/drivers/video/pm2fb.c
+++ b/drivers/video/pm2fb.c
@@ -973,8 +973,8 @@ static int pm2fb_pan_display(struct fb_var_screeninfo *var,
{
struct pm2fb_par *p = info->par;
u32 base;
- u32 depth = (var->bits_per_pixel + 7) & ~7;
- u32 xres = (var->xres + 31) & ~31;
+ u32 depth = (info->var.bits_per_pixel + 7) & ~7;
+ u32 xres = (info->var.xres + 31) & ~31;
depth = (depth > 32) ? 32 : depth;
base = to3264(var->yoffset * xres + var->xoffset, depth, 1);
@@ -1773,7 +1773,7 @@ MODULE_DEVICE_TABLE(pci, pm2fb_id_table);
#ifndef MODULE
/**
- * Parse user speficied options.
+ * Parse user specified options.
*
* This is, comma-separated options following `video=pm2fb:'.
*/
diff --git a/drivers/video/pm3fb.c b/drivers/video/pm3fb.c
index 6666f45a2f8..6632ee5ecb7 100644
--- a/drivers/video/pm3fb.c
+++ b/drivers/video/pm3fb.c
@@ -1147,9 +1147,9 @@ static int pm3fb_pan_display(struct fb_var_screeninfo *var,
struct fb_info *info)
{
struct pm3_par *par = info->par;
- const u32 xres = (var->xres + 31) & ~31;
+ const u32 xres = (info->var.xres + 31) & ~31;
- par->base = pm3fb_shift_bpp(var->bits_per_pixel,
+ par->base = pm3fb_shift_bpp(info->var.bits_per_pixel,
(var->yoffset * xres)
+ var->xoffset);
PM3_WAIT(par, 1);
@@ -1525,7 +1525,7 @@ static int __init pm3fb_setup(char *options)
{
char *this_opt;
- /* Parse user speficied options (`video=pm3fb:') */
+ /* Parse user specified options (`video=pm3fb:') */
if (!options || !*options)
return 0;
diff --git a/drivers/video/pnx4008/sdum.c b/drivers/video/pnx4008/sdum.c
index 5ec4f2d439c..50e00395240 100644
--- a/drivers/video/pnx4008/sdum.c
+++ b/drivers/video/pnx4008/sdum.c
@@ -30,7 +30,7 @@
#include <linux/clk.h>
#include <linux/gfp.h>
#include <asm/uaccess.h>
-#include <mach/gpio.h>
+#include <asm/gpio.h>
#include "sdum.h"
#include "fbcommon.h"
diff --git a/drivers/video/ps3fb.c b/drivers/video/ps3fb.c
index 65560a1a043..213fbbcf613 100644
--- a/drivers/video/ps3fb.c
+++ b/drivers/video/ps3fb.c
@@ -1082,7 +1082,7 @@ static int __devinit ps3fb_probe(struct ps3_system_bus_device *dev)
}
retval = request_irq(ps3fb.irq_no, ps3fb_vsync_interrupt,
- IRQF_DISABLED, DEVICE_NAME, &dev->core);
+ 0, DEVICE_NAME, &dev->core);
if (retval) {
dev_err(&dev->core, "%s: request_irq failed %d\n", __func__,
retval);
diff --git a/drivers/video/pxa3xx-gcu.c b/drivers/video/pxa3xx-gcu.c
index 0283c702109..1ed8b366618 100644
--- a/drivers/video/pxa3xx-gcu.c
+++ b/drivers/video/pxa3xx-gcu.c
@@ -31,8 +31,6 @@
*/
#include <linux/module.h>
-#include <linux/version.h>
-
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/miscdevice.h>
@@ -678,7 +676,7 @@ pxa3xx_gcu_probe(struct platform_device *dev)
}
ret = request_irq(irq, pxa3xx_gcu_handle_irq,
- IRQF_DISABLED, DRV_NAME, priv);
+ 0, DRV_NAME, priv);
if (ret) {
dev_err(&dev->dev, "request_irq failed\n");
ret = -EBUSY;
diff --git a/drivers/video/pxafb.c b/drivers/video/pxafb.c
index 0f4e8c942f9..1d1e4f175e7 100644
--- a/drivers/video/pxafb.c
+++ b/drivers/video/pxafb.c
@@ -1309,16 +1309,6 @@ static int pxafb_smart_init(struct pxafb_info *fbi)
return 0;
}
#else
-int pxafb_smart_queue(struct fb_info *info, uint16_t *cmds, int n_cmds)
-{
- return 0;
-}
-
-int pxafb_smart_flush(struct fb_info *info)
-{
- return 0;
-}
-
static inline int pxafb_smart_init(struct pxafb_info *fbi) { return 0; }
#endif /* CONFIG_FB_PXA_SMARTPANEL */
@@ -2191,7 +2181,7 @@ static int __devinit pxafb_probe(struct platform_device *dev)
goto failed_free_mem;
}
- ret = request_irq(irq, pxafb_handle_irq, IRQF_DISABLED, "LCD", fbi);
+ ret = request_irq(irq, pxafb_handle_irq, 0, "LCD", fbi);
if (ret) {
dev_err(&dev->dev, "request_irq failed: %d\n", ret);
ret = -EBUSY;
diff --git a/drivers/video/s3c-fb.c b/drivers/video/s3c-fb.c
index 4aecf213c9b..0753b1cfcb8 100644
--- a/drivers/video/s3c-fb.c
+++ b/drivers/video/s3c-fb.c
@@ -81,6 +81,7 @@ struct s3c_fb;
* @palette: Address of palette memory, or 0 if none.
* @has_prtcon: Set if has PRTCON register.
* @has_shadowcon: Set if has SHADOWCON register.
+ * @has_clksel: Set if VIDCON0 register has CLKSEL bit.
*/
struct s3c_fb_variant {
unsigned int is_2443:1;
@@ -98,6 +99,7 @@ struct s3c_fb_variant {
unsigned int has_prtcon:1;
unsigned int has_shadowcon:1;
+ unsigned int has_clksel:1;
};
/**
@@ -186,6 +188,7 @@ struct s3c_fb_vsync {
* @dev: The device that we bound to, for printing, etc.
* @regs_res: The resource we claimed for the IO registers.
* @bus_clk: The clk (hclk) feeding our interface and possibly pixclk.
+ * @lcd_clk: The clk (sclk) feeding pixclk.
* @regs: The mapped hardware registers.
* @variant: Variant information for this hardware.
* @enabled: A bitmask of enabled hardware windows.
@@ -200,6 +203,7 @@ struct s3c_fb {
struct device *dev;
struct resource *regs_res;
struct clk *bus_clk;
+ struct clk *lcd_clk;
void __iomem *regs;
struct s3c_fb_variant variant;
@@ -336,10 +340,15 @@ static int s3c_fb_check_var(struct fb_var_screeninfo *var,
*/
static int s3c_fb_calc_pixclk(struct s3c_fb *sfb, unsigned int pixclk)
{
- unsigned long clk = clk_get_rate(sfb->bus_clk);
+ unsigned long clk;
unsigned long long tmp;
unsigned int result;
+ if (sfb->variant.has_clksel)
+ clk = clk_get_rate(sfb->bus_clk);
+ else
+ clk = clk_get_rate(sfb->lcd_clk);
+
tmp = (unsigned long long)clk;
tmp *= pixclk;
@@ -883,7 +892,7 @@ static int s3c_fb_pan_display(struct fb_var_screeninfo *var,
}
}
/* Offset in bytes to the end of the displayed area */
- end_boff = start_boff + var->yres * info->fix.line_length;
+ end_boff = start_boff + info->var.yres * info->fix.line_length;
/* Temporarily turn off per-vsync update from shadow registers until
* both start and end addresses are updated to prevent corruption */
@@ -1354,13 +1363,24 @@ static int __devinit s3c_fb_probe(struct platform_device *pdev)
clk_enable(sfb->bus_clk);
+ if (!sfb->variant.has_clksel) {
+ sfb->lcd_clk = clk_get(dev, "sclk_fimd");
+ if (IS_ERR(sfb->lcd_clk)) {
+ dev_err(dev, "failed to get lcd clock\n");
+ ret = PTR_ERR(sfb->lcd_clk);
+ goto err_bus_clk;
+ }
+
+ clk_enable(sfb->lcd_clk);
+ }
+
pm_runtime_enable(sfb->dev);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(dev, "failed to find registers\n");
ret = -ENOENT;
- goto err_clk;
+ goto err_lcd_clk;
}
sfb->regs_res = request_mem_region(res->start, resource_size(res),
@@ -1368,7 +1388,7 @@ static int __devinit s3c_fb_probe(struct platform_device *pdev)
if (!sfb->regs_res) {
dev_err(dev, "failed to claim register region\n");
ret = -ENOENT;
- goto err_clk;
+ goto err_lcd_clk;
}
sfb->regs = ioremap(res->start, resource_size(res));
@@ -1450,7 +1470,13 @@ err_ioremap:
err_req_region:
release_mem_region(sfb->regs_res->start, resource_size(sfb->regs_res));
-err_clk:
+err_lcd_clk:
+ if (!sfb->variant.has_clksel) {
+ clk_disable(sfb->lcd_clk);
+ clk_put(sfb->lcd_clk);
+ }
+
+err_bus_clk:
clk_disable(sfb->bus_clk);
clk_put(sfb->bus_clk);
@@ -1481,6 +1507,11 @@ static int __devexit s3c_fb_remove(struct platform_device *pdev)
iounmap(sfb->regs);
+ if (!sfb->variant.has_clksel) {
+ clk_disable(sfb->lcd_clk);
+ clk_put(sfb->lcd_clk);
+ }
+
clk_disable(sfb->bus_clk);
clk_put(sfb->bus_clk);
@@ -1510,6 +1541,9 @@ static int s3c_fb_suspend(struct device *dev)
s3c_fb_blank(FB_BLANK_POWERDOWN, win->fbinfo);
}
+ if (!sfb->variant.has_clksel)
+ clk_disable(sfb->lcd_clk);
+
clk_disable(sfb->bus_clk);
return 0;
}
@@ -1524,6 +1558,9 @@ static int s3c_fb_resume(struct device *dev)
clk_enable(sfb->bus_clk);
+ if (!sfb->variant.has_clksel)
+ clk_enable(sfb->lcd_clk);
+
/* setup gpio and output polarity controls */
pd->setup_gpio();
writel(pd->vidcon1, sfb->regs + VIDCON1);
@@ -1569,6 +1606,9 @@ static int s3c_fb_runtime_suspend(struct device *dev)
s3c_fb_blank(FB_BLANK_POWERDOWN, win->fbinfo);
}
+ if (!sfb->variant.has_clksel)
+ clk_disable(sfb->lcd_clk);
+
clk_disable(sfb->bus_clk);
return 0;
}
@@ -1583,6 +1623,9 @@ static int s3c_fb_runtime_resume(struct device *dev)
clk_enable(sfb->bus_clk);
+ if (!sfb->variant.has_clksel)
+ clk_enable(sfb->lcd_clk);
+
/* setup gpio and output polarity controls */
pd->setup_gpio();
writel(pd->vidcon1, sfb->regs + VIDCON1);
@@ -1755,6 +1798,7 @@ static struct s3c_fb_driverdata s3c_fb_data_64xx = {
},
.has_prtcon = 1,
+ .has_clksel = 1,
},
.win[0] = &s3c_fb_data_64xx_wins[0],
.win[1] = &s3c_fb_data_64xx_wins[1],
@@ -1785,6 +1829,7 @@ static struct s3c_fb_driverdata s3c_fb_data_s5pc100 = {
},
.has_prtcon = 1,
+ .has_clksel = 1,
},
.win[0] = &s3c_fb_data_s5p_wins[0],
.win[1] = &s3c_fb_data_s5p_wins[1],
@@ -1815,6 +1860,37 @@ static struct s3c_fb_driverdata s3c_fb_data_s5pv210 = {
},
.has_shadowcon = 1,
+ .has_clksel = 1,
+ },
+ .win[0] = &s3c_fb_data_s5p_wins[0],
+ .win[1] = &s3c_fb_data_s5p_wins[1],
+ .win[2] = &s3c_fb_data_s5p_wins[2],
+ .win[3] = &s3c_fb_data_s5p_wins[3],
+ .win[4] = &s3c_fb_data_s5p_wins[4],
+};
+
+static struct s3c_fb_driverdata s3c_fb_data_exynos4 = {
+ .variant = {
+ .nr_windows = 5,
+ .vidtcon = VIDTCON0,
+ .wincon = WINCON(0),
+ .winmap = WINxMAP(0),
+ .keycon = WKEYCON,
+ .osd = VIDOSD_BASE,
+ .osd_stride = 16,
+ .buf_start = VIDW_BUF_START(0),
+ .buf_size = VIDW_BUF_SIZE(0),
+ .buf_end = VIDW_BUF_END(0),
+
+ .palette = {
+ [0] = 0x2400,
+ [1] = 0x2800,
+ [2] = 0x2c00,
+ [3] = 0x3000,
+ [4] = 0x3400,
+ },
+
+ .has_shadowcon = 1,
},
.win[0] = &s3c_fb_data_s5p_wins[0],
.win[1] = &s3c_fb_data_s5p_wins[1],
@@ -1843,6 +1919,7 @@ static struct s3c_fb_driverdata s3c_fb_data_s3c2443 = {
[0] = 0x400,
[1] = 0x800,
},
+ .has_clksel = 1,
},
.win[0] = &(struct s3c_fb_win_variant) {
.palette_sz = 256,
@@ -1859,6 +1936,30 @@ static struct s3c_fb_driverdata s3c_fb_data_s3c2443 = {
},
};
+static struct s3c_fb_driverdata s3c_fb_data_s5p64x0 = {
+ .variant = {
+ .nr_windows = 3,
+ .vidtcon = VIDTCON0,
+ .wincon = WINCON(0),
+ .winmap = WINxMAP(0),
+ .keycon = WKEYCON,
+ .osd = VIDOSD_BASE,
+ .osd_stride = 16,
+ .buf_start = VIDW_BUF_START(0),
+ .buf_size = VIDW_BUF_SIZE(0),
+ .buf_end = VIDW_BUF_END(0),
+
+ .palette = {
+ [0] = 0x2400,
+ [1] = 0x2800,
+ [2] = 0x2c00,
+ },
+ },
+ .win[0] = &s3c_fb_data_s5p_wins[0],
+ .win[1] = &s3c_fb_data_s5p_wins[1],
+ .win[2] = &s3c_fb_data_s5p_wins[2],
+};
+
static struct platform_device_id s3c_fb_driver_ids[] = {
{
.name = "s3c-fb",
@@ -1870,8 +1971,14 @@ static struct platform_device_id s3c_fb_driver_ids[] = {
.name = "s5pv210-fb",
.driver_data = (unsigned long)&s3c_fb_data_s5pv210,
}, {
+ .name = "exynos4-fb",
+ .driver_data = (unsigned long)&s3c_fb_data_exynos4,
+ }, {
.name = "s3c2443-fb",
.driver_data = (unsigned long)&s3c_fb_data_s3c2443,
+ }, {
+ .name = "s5p64x0-fb",
+ .driver_data = (unsigned long)&s3c_fb_data_s5p64x0,
},
{},
};
diff --git a/drivers/video/s3c2410fb.c b/drivers/video/s3c2410fb.c
index 0aa13761de6..ee4c0df217f 100644
--- a/drivers/video/s3c2410fb.c
+++ b/drivers/video/s3c2410fb.c
@@ -767,7 +767,6 @@ static irqreturn_t s3c2410fb_irq(int irq, void *dev_id)
static int s3c2410fb_cpufreq_transition(struct notifier_block *nb,
unsigned long val, void *data)
{
- struct cpufreq_freqs *freqs = data;
struct s3c2410fb_info *info;
struct fb_info *fbinfo;
long delta_f;
@@ -911,7 +910,7 @@ static int __devinit s3c24xxfb_probe(struct platform_device *pdev,
for (i = 0; i < 256; i++)
info->palette_buffer[i] = PALETTE_BUFF_CLEAR;
- ret = request_irq(irq, s3c2410fb_irq, IRQF_DISABLED, pdev->name, info);
+ ret = request_irq(irq, s3c2410fb_irq, 0, pdev->name, info);
if (ret) {
dev_err(&pdev->dev, "cannot get irq %d - err %d\n", irq, ret);
ret = -EBUSY;
diff --git a/drivers/video/s3fb.c b/drivers/video/s3fb.c
index 4ca5d0c8fe8..946a949f4c7 100644
--- a/drivers/video/s3fb.c
+++ b/drivers/video/s3fb.c
@@ -1019,12 +1019,13 @@ static int s3fb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
unsigned int offset;
/* Calculate the offset */
- if (var->bits_per_pixel == 0) {
- offset = (var->yoffset / 16) * (var->xres_virtual / 2) + (var->xoffset / 2);
+ if (info->var.bits_per_pixel == 0) {
+ offset = (var->yoffset / 16) * (info->var.xres_virtual / 2)
+ + (var->xoffset / 2);
offset = offset >> 2;
} else {
offset = (var->yoffset * info->fix.line_length) +
- (var->xoffset * var->bits_per_pixel / 8);
+ (var->xoffset * info->var.bits_per_pixel / 8);
offset = offset >> 2;
}
@@ -1504,7 +1505,7 @@ static struct pci_driver s3fb_pci_driver = {
.resume = s3_pci_resume,
};
-/* Parse user speficied options */
+/* Parse user specified options */
#ifndef MODULE
static int __init s3fb_setup(char *options)
diff --git a/drivers/video/sa1100fb.c b/drivers/video/sa1100fb.c
index e8b76d65a07..98d55d0e2da 100644
--- a/drivers/video/sa1100fb.c
+++ b/drivers/video/sa1100fb.c
@@ -1457,8 +1457,7 @@ static int __devinit sa1100fb_probe(struct platform_device *pdev)
if (ret)
goto failed;
- ret = request_irq(irq, sa1100fb_handle_irq, IRQF_DISABLED,
- "LCD", fbi);
+ ret = request_irq(irq, sa1100fb_handle_irq, 0, "LCD", fbi);
if (ret) {
printk(KERN_ERR "sa1100fb: request_irq failed: %d\n", ret);
goto failed;
diff --git a/drivers/video/savage/savagefb_accel.c b/drivers/video/savage/savagefb_accel.c
index bbcc055d3bb..bfefa6234cf 100644
--- a/drivers/video/savage/savagefb_accel.c
+++ b/drivers/video/savage/savagefb_accel.c
@@ -11,6 +11,7 @@
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/fb.h>
+#include <linux/module.h>
#include "savagefb.h"
diff --git a/drivers/video/savage/savagefb_driver.c b/drivers/video/savage/savagefb_driver.c
index 4de541ca9c5..beb495044b2 100644
--- a/drivers/video/savage/savagefb_driver.c
+++ b/drivers/video/savage/savagefb_driver.c
@@ -1477,15 +1477,9 @@ static void savagefb_set_par_int(struct savagefb_par *par, struct savage_reg *r
vgaHWProtect(par, 0);
}
-static void savagefb_update_start(struct savagefb_par *par,
- struct fb_var_screeninfo *var)
+static void savagefb_update_start(struct savagefb_par *par, int base)
{
- int base;
-
- base = ((var->yoffset * var->xres_virtual + (var->xoffset & ~1))
- * ((var->bits_per_pixel+7) / 8)) >> 2;
-
- /* now program the start address registers */
+ /* program the start address registers */
vga_out16(0x3d4, (base & 0x00ff00) | 0x0c, par);
vga_out16(0x3d4, ((base & 0x00ff) << 8) | 0x0d, par);
vga_out8(0x3d4, 0x69, par);
@@ -1550,8 +1544,12 @@ static int savagefb_pan_display(struct fb_var_screeninfo *var,
struct fb_info *info)
{
struct savagefb_par *par = info->par;
+ int base;
+
+ base = (var->yoffset * info->fix.line_length
+ + (var->xoffset & ~1) * ((info->var.bits_per_pixel+7) / 8)) >> 2;
- savagefb_update_start(par, var);
+ savagefb_update_start(par, base);
return 0;
}
diff --git a/drivers/video/sh_mipi_dsi.c b/drivers/video/sh_mipi_dsi.c
index 24640c8458a..72ee96bc6b3 100644
--- a/drivers/video/sh_mipi_dsi.c
+++ b/drivers/video/sh_mipi_dsi.c
@@ -17,6 +17,7 @@
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/types.h>
+#include <linux/module.h>
#include <video/mipi_display.h>
#include <video/sh_mipi_dsi.h>
diff --git a/drivers/video/sh_mobile_hdmi.c b/drivers/video/sh_mobile_hdmi.c
index 7d54e2c612f..647ba984f00 100644
--- a/drivers/video/sh_mobile_hdmi.c
+++ b/drivers/video/sh_mobile_hdmi.c
@@ -1111,6 +1111,7 @@ static long sh_hdmi_clk_configure(struct sh_hdmi *hdmi, unsigned long hdmi_rate,
static void sh_hdmi_edid_work_fn(struct work_struct *work)
{
struct sh_hdmi *hdmi = container_of(work, struct sh_hdmi, edid_work.work);
+ struct fb_info *info;
struct sh_mobile_hdmi_info *pdata = hdmi->dev->platform_data;
struct sh_mobile_lcdc_chan *ch;
int ret;
@@ -1123,8 +1124,9 @@ static void sh_hdmi_edid_work_fn(struct work_struct *work)
mutex_lock(&hdmi->mutex);
+ info = hdmi->info;
+
if (hdmi->hp_state == HDMI_HOTPLUG_CONNECTED) {
- struct fb_info *info = hdmi->info;
unsigned long parent_rate = 0, hdmi_rate;
ret = sh_hdmi_read_edid(hdmi, &hdmi_rate, &parent_rate);
@@ -1148,42 +1150,45 @@ static void sh_hdmi_edid_work_fn(struct work_struct *work)
ch = info->par;
- console_lock();
+ if (lock_fb_info(info)) {
+ console_lock();
- /* HDMI plug in */
- if (!sh_hdmi_must_reconfigure(hdmi) &&
- info->state == FBINFO_STATE_RUNNING) {
- /*
- * First activation with the default monitor - just turn
- * on, if we run a resume here, the logo disappears
- */
- if (lock_fb_info(info)) {
+ /* HDMI plug in */
+ if (!sh_hdmi_must_reconfigure(hdmi) &&
+ info->state == FBINFO_STATE_RUNNING) {
+ /*
+ * First activation with the default monitor - just turn
+ * on, if we run a resume here, the logo disappears
+ */
info->var.width = hdmi->var.width;
info->var.height = hdmi->var.height;
sh_hdmi_display_on(hdmi, info);
- unlock_fb_info(info);
+ } else {
+ /* New monitor or have to wake up */
+ fb_set_suspend(info, 0);
}
- } else {
- /* New monitor or have to wake up */
- fb_set_suspend(info, 0);
- }
- console_unlock();
+ console_unlock();
+ unlock_fb_info(info);
+ }
} else {
ret = 0;
- if (!hdmi->info)
+ if (!info)
goto out;
hdmi->monspec.modedb_len = 0;
fb_destroy_modedb(hdmi->monspec.modedb);
hdmi->monspec.modedb = NULL;
- console_lock();
+ if (lock_fb_info(info)) {
+ console_lock();
- /* HDMI disconnect */
- fb_set_suspend(hdmi->info, 1);
+ /* HDMI disconnect */
+ fb_set_suspend(info, 1);
- console_unlock();
+ console_unlock();
+ unlock_fb_info(info);
+ }
}
out:
diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c
index b048417247e..facffc25497 100644
--- a/drivers/video/sh_mobile_lcdcfb.c
+++ b/drivers/video/sh_mobile_lcdcfb.c
@@ -23,40 +23,16 @@
#include <linux/console.h>
#include <linux/backlight.h>
#include <linux/gpio.h>
+#include <linux/module.h>
#include <video/sh_mobile_lcdc.h>
+#include <video/sh_mobile_meram.h>
#include <linux/atomic.h>
#include "sh_mobile_lcdcfb.h"
-#include "sh_mobile_meram.h"
#define SIDE_B_OFFSET 0x1000
#define MIRROR_OFFSET 0x2000
-/* shared registers */
-#define _LDDCKR 0x410
-#define _LDDCKSTPR 0x414
-#define _LDINTR 0x468
-#define _LDSR 0x46c
-#define _LDCNT1R 0x470
-#define _LDCNT2R 0x474
-#define _LDRCNTR 0x478
-#define _LDDDSR 0x47c
-#define _LDDWD0R 0x800
-#define _LDDRDR 0x840
-#define _LDDWAR 0x900
-#define _LDDRAR 0x904
-
-/* shared registers and their order for context save/restore */
-static int lcdc_shared_regs[] = {
- _LDDCKR,
- _LDDCKSTPR,
- _LDINTR,
- _LDDDSR,
- _LDCNT1R,
- _LDCNT2R,
-};
-#define NR_SHARED_REGS ARRAY_SIZE(lcdc_shared_regs)
-
#define MAX_XRES 1920
#define MAX_YRES 1080
@@ -98,22 +74,6 @@ static unsigned long lcdc_offs_sublcd[NR_CH_REGS] = {
[LDPMR] = 0x63c,
};
-#define START_LCDC 0x00000001
-#define LCDC_RESET 0x00000100
-#define DISPLAY_BEU 0x00000008
-#define LCDC_ENABLE 0x00000001
-#define LDINTR_FE 0x00000400
-#define LDINTR_VSE 0x00000200
-#define LDINTR_VEE 0x00000100
-#define LDINTR_FS 0x00000004
-#define LDINTR_VSS 0x00000002
-#define LDINTR_VES 0x00000001
-#define LDRCNTR_SRS 0x00020000
-#define LDRCNTR_SRC 0x00010000
-#define LDRCNTR_MRS 0x00000002
-#define LDRCNTR_MRC 0x00000001
-#define LDSR_MRS 0x00000100
-
static const struct fb_videomode default_720p = {
.name = "HDMI 720p",
.xres = 1280,
@@ -141,7 +101,6 @@ struct sh_mobile_lcdc_priv {
unsigned long lddckr;
struct sh_mobile_lcdc_chan ch[2];
struct notifier_block notifier;
- unsigned long saved_shared_regs[NR_SHARED_REGS];
int started;
int forced_bpp; /* 2 channel LCDC must share bpp setting */
struct sh_mobile_meram_info *meram_dev;
@@ -218,33 +177,36 @@ static void lcdc_sys_write_index(void *handle, unsigned long data)
{
struct sh_mobile_lcdc_chan *ch = handle;
- lcdc_write(ch->lcdc, _LDDWD0R, data | 0x10000000);
- lcdc_wait_bit(ch->lcdc, _LDSR, 2, 0);
- lcdc_write(ch->lcdc, _LDDWAR, 1 | (lcdc_chan_is_sublcd(ch) ? 2 : 0));
- lcdc_wait_bit(ch->lcdc, _LDSR, 2, 0);
+ lcdc_write(ch->lcdc, _LDDWD0R, data | LDDWDxR_WDACT);
+ lcdc_wait_bit(ch->lcdc, _LDSR, LDSR_AS, 0);
+ lcdc_write(ch->lcdc, _LDDWAR, LDDWAR_WA |
+ (lcdc_chan_is_sublcd(ch) ? 2 : 0));
+ lcdc_wait_bit(ch->lcdc, _LDSR, LDSR_AS, 0);
}
static void lcdc_sys_write_data(void *handle, unsigned long data)
{
struct sh_mobile_lcdc_chan *ch = handle;
- lcdc_write(ch->lcdc, _LDDWD0R, data | 0x11000000);
- lcdc_wait_bit(ch->lcdc, _LDSR, 2, 0);
- lcdc_write(ch->lcdc, _LDDWAR, 1 | (lcdc_chan_is_sublcd(ch) ? 2 : 0));
- lcdc_wait_bit(ch->lcdc, _LDSR, 2, 0);
+ lcdc_write(ch->lcdc, _LDDWD0R, data | LDDWDxR_WDACT | LDDWDxR_RSW);
+ lcdc_wait_bit(ch->lcdc, _LDSR, LDSR_AS, 0);
+ lcdc_write(ch->lcdc, _LDDWAR, LDDWAR_WA |
+ (lcdc_chan_is_sublcd(ch) ? 2 : 0));
+ lcdc_wait_bit(ch->lcdc, _LDSR, LDSR_AS, 0);
}
static unsigned long lcdc_sys_read_data(void *handle)
{
struct sh_mobile_lcdc_chan *ch = handle;
- lcdc_write(ch->lcdc, _LDDRDR, 0x01000000);
- lcdc_wait_bit(ch->lcdc, _LDSR, 2, 0);
- lcdc_write(ch->lcdc, _LDDRAR, 1 | (lcdc_chan_is_sublcd(ch) ? 2 : 0));
+ lcdc_write(ch->lcdc, _LDDRDR, LDDRDR_RSR);
+ lcdc_wait_bit(ch->lcdc, _LDSR, LDSR_AS, 0);
+ lcdc_write(ch->lcdc, _LDDRAR, LDDRAR_RA |
+ (lcdc_chan_is_sublcd(ch) ? 2 : 0));
udelay(1);
- lcdc_wait_bit(ch->lcdc, _LDSR, 2, 0);
+ lcdc_wait_bit(ch->lcdc, _LDSR, LDSR_AS, 0);
- return lcdc_read(ch->lcdc, _LDDRDR) & 0x3ffff;
+ return lcdc_read(ch->lcdc, _LDDRDR) & LDDRDR_DRD_MASK;
}
struct sh_mobile_lcdc_sys_bus_ops sh_mobile_lcdc_sys_bus_ops = {
@@ -256,18 +218,22 @@ struct sh_mobile_lcdc_sys_bus_ops sh_mobile_lcdc_sys_bus_ops = {
static void sh_mobile_lcdc_clk_on(struct sh_mobile_lcdc_priv *priv)
{
if (atomic_inc_and_test(&priv->hw_usecnt)) {
- pm_runtime_get_sync(priv->dev);
if (priv->dot_clk)
clk_enable(priv->dot_clk);
+ pm_runtime_get_sync(priv->dev);
+ if (priv->meram_dev && priv->meram_dev->pdev)
+ pm_runtime_get_sync(&priv->meram_dev->pdev->dev);
}
}
static void sh_mobile_lcdc_clk_off(struct sh_mobile_lcdc_priv *priv)
{
if (atomic_sub_return(1, &priv->hw_usecnt) == -1) {
+ if (priv->meram_dev && priv->meram_dev->pdev)
+ pm_runtime_put_sync(&priv->meram_dev->pdev->dev);
+ pm_runtime_put(priv->dev);
if (priv->dot_clk)
clk_disable(priv->dot_clk);
- pm_runtime_put(priv->dev);
}
}
@@ -319,13 +285,13 @@ static void sh_mobile_lcdc_deferred_io(struct fb_info *info,
if (bcfg->start_transfer)
bcfg->start_transfer(bcfg->board_data, ch,
&sh_mobile_lcdc_sys_bus_ops);
- lcdc_write_chan(ch, LDSM2R, 1);
+ lcdc_write_chan(ch, LDSM2R, LDSM2R_OSTRG);
dma_unmap_sg(info->dev, ch->sglist, nr_pages, DMA_TO_DEVICE);
} else {
if (bcfg->start_transfer)
bcfg->start_transfer(bcfg->board_data, ch,
&sh_mobile_lcdc_sys_bus_ops);
- lcdc_write_chan(ch, LDSM2R, 1);
+ lcdc_write_chan(ch, LDSM2R, LDSM2R_OSTRG);
}
}
@@ -341,22 +307,16 @@ static irqreturn_t sh_mobile_lcdc_irq(int irq, void *data)
{
struct sh_mobile_lcdc_priv *priv = data;
struct sh_mobile_lcdc_chan *ch;
- unsigned long tmp;
unsigned long ldintr;
int is_sub;
int k;
- /* acknowledge interrupt */
- ldintr = tmp = lcdc_read(priv, _LDINTR);
- /*
- * disable further VSYNC End IRQs, preserve all other enabled IRQs,
- * write 0 to bits 0-6 to ack all triggered IRQs.
- */
- tmp &= 0xffffff00 & ~LDINTR_VEE;
- lcdc_write(priv, _LDINTR, tmp);
+ /* Acknowledge interrupts and disable further VSYNC End IRQs. */
+ ldintr = lcdc_read(priv, _LDINTR);
+ lcdc_write(priv, _LDINTR, (ldintr ^ LDINTR_STATUS_MASK) & ~LDINTR_VEE);
/* figure out if this interrupt is for main or sub lcd */
- is_sub = (lcdc_read(priv, _LDSR) & (1 << 10)) ? 1 : 0;
+ is_sub = (lcdc_read(priv, _LDSR) & LDSR_MSS) ? 1 : 0;
/* wake up channel and disable clocks */
for (k = 0; k < ARRAY_SIZE(priv->ch); k++) {
@@ -365,7 +325,7 @@ static irqreturn_t sh_mobile_lcdc_irq(int irq, void *data)
if (!ch->enabled)
continue;
- /* Frame Start */
+ /* Frame End */
if (ldintr & LDINTR_FS) {
if (is_sub == lcdc_chan_is_sublcd(ch)) {
ch->frame_end = 1;
@@ -391,16 +351,17 @@ static void sh_mobile_lcdc_start_stop(struct sh_mobile_lcdc_priv *priv,
/* start or stop the lcdc */
if (start)
- lcdc_write(priv, _LDCNT2R, tmp | START_LCDC);
+ lcdc_write(priv, _LDCNT2R, tmp | LDCNT2R_DO);
else
- lcdc_write(priv, _LDCNT2R, tmp & ~START_LCDC);
+ lcdc_write(priv, _LDCNT2R, tmp & ~LDCNT2R_DO);
/* wait until power is applied/stopped on all channels */
for (k = 0; k < ARRAY_SIZE(priv->ch); k++)
if (lcdc_read(priv, _LDCNT2R) & priv->ch[k].enabled)
while (1) {
- tmp = lcdc_read_chan(&priv->ch[k], LDPMR) & 3;
- if (start && tmp == 3)
+ tmp = lcdc_read_chan(&priv->ch[k], LDPMR)
+ & LDPMR_LPS;
+ if (start && tmp == LDPMR_LPS)
break;
if (!start && tmp == 0)
break;
@@ -418,13 +379,13 @@ static void sh_mobile_lcdc_geometry(struct sh_mobile_lcdc_chan *ch)
u32 tmp;
tmp = ch->ldmt1r_value;
- tmp |= (var->sync & FB_SYNC_VERT_HIGH_ACT) ? 0 : 1 << 28;
- tmp |= (var->sync & FB_SYNC_HOR_HIGH_ACT) ? 0 : 1 << 27;
- tmp |= (ch->cfg.flags & LCDC_FLAGS_DWPOL) ? 1 << 26 : 0;
- tmp |= (ch->cfg.flags & LCDC_FLAGS_DIPOL) ? 1 << 25 : 0;
- tmp |= (ch->cfg.flags & LCDC_FLAGS_DAPOL) ? 1 << 24 : 0;
- tmp |= (ch->cfg.flags & LCDC_FLAGS_HSCNT) ? 1 << 17 : 0;
- tmp |= (ch->cfg.flags & LCDC_FLAGS_DWCNT) ? 1 << 16 : 0;
+ tmp |= (var->sync & FB_SYNC_VERT_HIGH_ACT) ? 0 : LDMT1R_VPOL;
+ tmp |= (var->sync & FB_SYNC_HOR_HIGH_ACT) ? 0 : LDMT1R_HPOL;
+ tmp |= (ch->cfg.flags & LCDC_FLAGS_DWPOL) ? LDMT1R_DWPOL : 0;
+ tmp |= (ch->cfg.flags & LCDC_FLAGS_DIPOL) ? LDMT1R_DIPOL : 0;
+ tmp |= (ch->cfg.flags & LCDC_FLAGS_DAPOL) ? LDMT1R_DAPOL : 0;
+ tmp |= (ch->cfg.flags & LCDC_FLAGS_HSCNT) ? LDMT1R_HSCNT : 0;
+ tmp |= (ch->cfg.flags & LCDC_FLAGS_DWCNT) ? LDMT1R_DWCNT : 0;
lcdc_write_chan(ch, LDMT1R, tmp);
/* setup SYS bus */
@@ -463,242 +424,239 @@ static void sh_mobile_lcdc_geometry(struct sh_mobile_lcdc_chan *ch)
lcdc_write_chan(ch, LDHAJR, tmp);
}
-static int sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv)
+/*
+ * __sh_mobile_lcdc_start - Configure and tart the LCDC
+ * @priv: LCDC device
+ *
+ * Configure all enabled channels and start the LCDC device. All external
+ * devices (clocks, MERAM, panels, ...) are not touched by this function.
+ */
+static void __sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv)
{
struct sh_mobile_lcdc_chan *ch;
- struct sh_mobile_lcdc_board_cfg *board_cfg;
unsigned long tmp;
int bpp = 0;
- unsigned long ldddsr;
- int k, m, ret;
+ int k, m;
- /* enable clocks before accessing the hardware */
- for (k = 0; k < ARRAY_SIZE(priv->ch); k++) {
- if (priv->ch[k].enabled) {
- sh_mobile_lcdc_clk_on(priv);
- if (!bpp)
- bpp = priv->ch[k].info->var.bits_per_pixel;
- }
- }
-
- /* reset */
- lcdc_write(priv, _LDCNT2R, lcdc_read(priv, _LDCNT2R) | LCDC_RESET);
- lcdc_wait_bit(priv, _LDCNT2R, LCDC_RESET, 0);
-
- /* enable LCDC channels */
- tmp = lcdc_read(priv, _LDCNT2R);
- tmp |= priv->ch[0].enabled;
- tmp |= priv->ch[1].enabled;
- lcdc_write(priv, _LDCNT2R, tmp);
-
- /* read data from external memory, avoid using the BEU for now */
- lcdc_write(priv, _LDCNT2R, lcdc_read(priv, _LDCNT2R) & ~DISPLAY_BEU);
+ /* Enable LCDC channels. Read data from external memory, avoid using the
+ * BEU for now.
+ */
+ lcdc_write(priv, _LDCNT2R, priv->ch[0].enabled | priv->ch[1].enabled);
- /* stop the lcdc first */
+ /* Stop the LCDC first and disable all interrupts. */
sh_mobile_lcdc_start_stop(priv, 0);
+ lcdc_write(priv, _LDINTR, 0);
- /* configure clocks */
+ /* Configure power supply, dot clocks and start them. */
tmp = priv->lddckr;
for (k = 0; k < ARRAY_SIZE(priv->ch); k++) {
ch = &priv->ch[k];
-
- if (!priv->ch[k].enabled)
+ if (!ch->enabled)
continue;
+ if (!bpp)
+ bpp = ch->info->var.bits_per_pixel;
+
+ /* Power supply */
+ lcdc_write_chan(ch, LDPMR, 0);
+
m = ch->cfg.clock_divider;
if (!m)
continue;
- if (m == 1)
- m = 1 << 6;
- tmp |= m << (lcdc_chan_is_sublcd(ch) ? 8 : 0);
-
- /* FIXME: sh7724 can only use 42, 48, 54 and 60 for the divider denominator */
+ /* FIXME: sh7724 can only use 42, 48, 54 and 60 for the divider
+ * denominator.
+ */
lcdc_write_chan(ch, LDDCKPAT1R, 0);
lcdc_write_chan(ch, LDDCKPAT2R, (1 << (m/2)) - 1);
+
+ if (m == 1)
+ m = LDDCKR_MOSEL;
+ tmp |= m << (lcdc_chan_is_sublcd(ch) ? 8 : 0);
}
lcdc_write(priv, _LDDCKR, tmp);
-
- /* start dotclock again */
lcdc_write(priv, _LDDCKSTPR, 0);
lcdc_wait_bit(priv, _LDDCKSTPR, ~0, 0);
- /* interrupts are disabled to begin with */
- lcdc_write(priv, _LDINTR, 0);
-
+ /* Setup geometry, format, frame buffer memory and operation mode. */
for (k = 0; k < ARRAY_SIZE(priv->ch); k++) {
ch = &priv->ch[k];
-
if (!ch->enabled)
continue;
sh_mobile_lcdc_geometry(ch);
- /* power supply */
- lcdc_write_chan(ch, LDPMR, 0);
-
- board_cfg = &ch->cfg.board_cfg;
- if (board_cfg->setup_sys) {
- ret = board_cfg->setup_sys(board_cfg->board_data,
- ch, &sh_mobile_lcdc_sys_bus_ops);
- if (ret)
- return ret;
- }
- }
-
- /* word and long word swap */
- ldddsr = lcdc_read(priv, _LDDDSR);
- if (priv->ch[0].info->var.nonstd)
- lcdc_write(priv, _LDDDSR, ldddsr | 7);
- else {
- switch (bpp) {
- case 16:
- lcdc_write(priv, _LDDDSR, ldddsr | 6);
- break;
- case 24:
- lcdc_write(priv, _LDDDSR, ldddsr | 7);
- break;
- case 32:
- lcdc_write(priv, _LDDDSR, ldddsr | 4);
- break;
- }
- }
-
- for (k = 0; k < ARRAY_SIZE(priv->ch); k++) {
- unsigned long base_addr_y;
- unsigned long base_addr_c = 0;
- int pitch;
- ch = &priv->ch[k];
-
- if (!priv->ch[k].enabled)
- continue;
-
- /* set bpp format in PKF[4:0] */
- tmp = lcdc_read_chan(ch, LDDFR);
- tmp &= ~0x0003031f;
if (ch->info->var.nonstd) {
- tmp |= (ch->info->var.nonstd << 16);
+ tmp = (ch->info->var.nonstd << 16);
switch (ch->info->var.bits_per_pixel) {
case 12:
+ tmp |= LDDFR_YF_420;
break;
case 16:
- tmp |= (0x1 << 8);
+ tmp |= LDDFR_YF_422;
break;
case 24:
- tmp |= (0x2 << 8);
+ default:
+ tmp |= LDDFR_YF_444;
break;
}
} else {
switch (ch->info->var.bits_per_pixel) {
case 16:
- tmp |= 0x03;
+ tmp = LDDFR_PKF_RGB16;
break;
case 24:
- tmp |= 0x0b;
+ tmp = LDDFR_PKF_RGB24;
break;
case 32:
+ default:
+ tmp = LDDFR_PKF_ARGB32;
break;
}
}
+
lcdc_write_chan(ch, LDDFR, tmp);
+ lcdc_write_chan(ch, LDMLSR, ch->pitch);
+ lcdc_write_chan(ch, LDSA1R, ch->base_addr_y);
+ if (ch->info->var.nonstd)
+ lcdc_write_chan(ch, LDSA2R, ch->base_addr_c);
- base_addr_y = ch->info->fix.smem_start;
- base_addr_c = base_addr_y +
- ch->info->var.xres *
- ch->info->var.yres_virtual;
- pitch = ch->info->fix.line_length;
+ /* When using deferred I/O mode, configure the LCDC for one-shot
+ * operation and enable the frame end interrupt. Otherwise use
+ * continuous read mode.
+ */
+ if (ch->ldmt1r_value & LDMT1R_IFM &&
+ ch->cfg.sys_bus_cfg.deferred_io_msec) {
+ lcdc_write_chan(ch, LDSM1R, LDSM1R_OS);
+ lcdc_write(priv, _LDINTR, LDINTR_FE);
+ } else {
+ lcdc_write_chan(ch, LDSM1R, 0);
+ }
+ }
- /* test if we can enable meram */
- if (ch->cfg.meram_cfg && priv->meram_dev &&
- priv->meram_dev->ops) {
- struct sh_mobile_meram_cfg *cfg;
- struct sh_mobile_meram_info *mdev;
- unsigned long icb_addr_y, icb_addr_c;
- int icb_pitch;
- int pf;
+ /* Word and long word swap. */
+ if (priv->ch[0].info->var.nonstd)
+ tmp = LDDDSR_LS | LDDDSR_WS | LDDDSR_BS;
+ else {
+ switch (bpp) {
+ case 16:
+ tmp = LDDDSR_LS | LDDDSR_WS;
+ break;
+ case 24:
+ tmp = LDDDSR_LS | LDDDSR_WS | LDDDSR_BS;
+ break;
+ case 32:
+ default:
+ tmp = LDDDSR_LS;
+ break;
+ }
+ }
+ lcdc_write(priv, _LDDDSR, tmp);
- cfg = ch->cfg.meram_cfg;
- mdev = priv->meram_dev;
- /* we need to de-init configured ICBs before we
- * we can re-initialize them.
- */
- if (ch->meram_enabled)
- mdev->ops->meram_unregister(mdev, cfg);
+ /* Enable the display output. */
+ lcdc_write(priv, _LDCNT1R, LDCNT1R_DE);
+ sh_mobile_lcdc_start_stop(priv, 1);
+ priv->started = 1;
+}
- ch->meram_enabled = 0;
+static int sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv)
+{
+ struct sh_mobile_meram_info *mdev = priv->meram_dev;
+ struct sh_mobile_lcdc_board_cfg *board_cfg;
+ struct sh_mobile_lcdc_chan *ch;
+ unsigned long tmp;
+ int ret;
+ int k;
- if (ch->info->var.nonstd) {
- if (ch->info->var.bits_per_pixel == 24)
- pf = SH_MOBILE_MERAM_PF_NV24;
- else
- pf = SH_MOBILE_MERAM_PF_NV;
- } else {
- pf = SH_MOBILE_MERAM_PF_RGB;
- }
+ /* enable clocks before accessing the hardware */
+ for (k = 0; k < ARRAY_SIZE(priv->ch); k++) {
+ if (priv->ch[k].enabled)
+ sh_mobile_lcdc_clk_on(priv);
+ }
- ret = mdev->ops->meram_register(mdev, cfg, pitch,
- ch->info->var.yres,
- pf,
- base_addr_y,
- base_addr_c,
- &icb_addr_y,
- &icb_addr_c,
- &icb_pitch);
- if (!ret) {
- /* set LDSA1R value */
- base_addr_y = icb_addr_y;
- pitch = icb_pitch;
-
- /* set LDSA2R value if required */
- if (base_addr_c)
- base_addr_c = icb_addr_c;
-
- ch->meram_enabled = 1;
- }
- }
+ /* reset */
+ lcdc_write(priv, _LDCNT2R, lcdc_read(priv, _LDCNT2R) | LDCNT2R_BR);
+ lcdc_wait_bit(priv, _LDCNT2R, LDCNT2R_BR, 0);
- /* point out our frame buffer */
- lcdc_write_chan(ch, LDSA1R, base_addr_y);
- if (ch->info->var.nonstd)
- lcdc_write_chan(ch, LDSA2R, base_addr_c);
+ for (k = 0; k < ARRAY_SIZE(priv->ch); k++) {
+ ch = &priv->ch[k];
- /* set line size */
- lcdc_write_chan(ch, LDMLSR, pitch);
+ if (!ch->enabled)
+ continue;
- /* setup deferred io if SYS bus */
- tmp = ch->cfg.sys_bus_cfg.deferred_io_msec;
- if (ch->ldmt1r_value & (1 << 12) && tmp) {
- ch->defio.deferred_io = sh_mobile_lcdc_deferred_io;
- ch->defio.delay = msecs_to_jiffies(tmp);
- ch->info->fbdefio = &ch->defio;
- fb_deferred_io_init(ch->info);
+ board_cfg = &ch->cfg.board_cfg;
+ if (board_cfg->setup_sys) {
+ ret = board_cfg->setup_sys(board_cfg->board_data, ch,
+ &sh_mobile_lcdc_sys_bus_ops);
+ if (ret)
+ return ret;
+ }
+ }
- /* one-shot mode */
- lcdc_write_chan(ch, LDSM1R, 1);
+ /* Compute frame buffer base address and pitch for each channel. */
+ for (k = 0; k < ARRAY_SIZE(priv->ch); k++) {
+ struct sh_mobile_meram_cfg *cfg;
+ int pixelformat;
- /* enable "Frame End Interrupt Enable" bit */
- lcdc_write(priv, _LDINTR, LDINTR_FE);
+ ch = &priv->ch[k];
+ if (!ch->enabled)
+ continue;
- } else {
- /* continuous read mode */
- lcdc_write_chan(ch, LDSM1R, 0);
+ ch->base_addr_y = ch->info->fix.smem_start;
+ ch->base_addr_c = ch->base_addr_y
+ + ch->info->var.xres
+ * ch->info->var.yres_virtual;
+ ch->pitch = ch->info->fix.line_length;
+
+ /* Enable MERAM if possible. */
+ cfg = ch->cfg.meram_cfg;
+ if (mdev == NULL || mdev->ops == NULL || cfg == NULL)
+ continue;
+
+ /* we need to de-init configured ICBs before we can
+ * re-initialize them.
+ */
+ if (ch->meram_enabled) {
+ mdev->ops->meram_unregister(mdev, cfg);
+ ch->meram_enabled = 0;
}
+
+ if (!ch->info->var.nonstd)
+ pixelformat = SH_MOBILE_MERAM_PF_RGB;
+ else if (ch->info->var.bits_per_pixel == 24)
+ pixelformat = SH_MOBILE_MERAM_PF_NV24;
+ else
+ pixelformat = SH_MOBILE_MERAM_PF_NV;
+
+ ret = mdev->ops->meram_register(mdev, cfg, ch->pitch,
+ ch->info->var.yres, pixelformat,
+ ch->base_addr_y, ch->base_addr_c,
+ &ch->base_addr_y, &ch->base_addr_c,
+ &ch->pitch);
+ if (!ret)
+ ch->meram_enabled = 1;
}
- /* display output */
- lcdc_write(priv, _LDCNT1R, LCDC_ENABLE);
+ /* Start the LCDC. */
+ __sh_mobile_lcdc_start(priv);
- /* start the lcdc */
- sh_mobile_lcdc_start_stop(priv, 1);
- priv->started = 1;
-
- /* tell the board code to enable the panel */
+ /* Setup deferred I/O, tell the board code to enable the panels, and
+ * turn backlight on.
+ */
for (k = 0; k < ARRAY_SIZE(priv->ch); k++) {
ch = &priv->ch[k];
if (!ch->enabled)
continue;
+ tmp = ch->cfg.sys_bus_cfg.deferred_io_msec;
+ if (ch->ldmt1r_value & LDMT1R_IFM && tmp) {
+ ch->defio.deferred_io = sh_mobile_lcdc_deferred_io;
+ ch->defio.delay = msecs_to_jiffies(tmp);
+ ch->info->fbdefio = &ch->defio;
+ fb_deferred_io_init(ch->info);
+ }
+
board_cfg = &ch->cfg.board_cfg;
if (board_cfg->display_on && try_module_get(board_cfg->owner)) {
board_cfg->display_on(board_cfg->board_data, ch->info);
@@ -776,42 +734,42 @@ static void sh_mobile_lcdc_stop(struct sh_mobile_lcdc_priv *priv)
static int sh_mobile_lcdc_check_interface(struct sh_mobile_lcdc_chan *ch)
{
- int ifm, miftyp;
-
- switch (ch->cfg.interface_type) {
- case RGB8: ifm = 0; miftyp = 0; break;
- case RGB9: ifm = 0; miftyp = 4; break;
- case RGB12A: ifm = 0; miftyp = 5; break;
- case RGB12B: ifm = 0; miftyp = 6; break;
- case RGB16: ifm = 0; miftyp = 7; break;
- case RGB18: ifm = 0; miftyp = 10; break;
- case RGB24: ifm = 0; miftyp = 11; break;
- case SYS8A: ifm = 1; miftyp = 0; break;
- case SYS8B: ifm = 1; miftyp = 1; break;
- case SYS8C: ifm = 1; miftyp = 2; break;
- case SYS8D: ifm = 1; miftyp = 3; break;
- case SYS9: ifm = 1; miftyp = 4; break;
- case SYS12: ifm = 1; miftyp = 5; break;
- case SYS16A: ifm = 1; miftyp = 7; break;
- case SYS16B: ifm = 1; miftyp = 8; break;
- case SYS16C: ifm = 1; miftyp = 9; break;
- case SYS18: ifm = 1; miftyp = 10; break;
- case SYS24: ifm = 1; miftyp = 11; break;
- default: goto bad;
+ int interface_type = ch->cfg.interface_type;
+
+ switch (interface_type) {
+ case RGB8:
+ case RGB9:
+ case RGB12A:
+ case RGB12B:
+ case RGB16:
+ case RGB18:
+ case RGB24:
+ case SYS8A:
+ case SYS8B:
+ case SYS8C:
+ case SYS8D:
+ case SYS9:
+ case SYS12:
+ case SYS16A:
+ case SYS16B:
+ case SYS16C:
+ case SYS18:
+ case SYS24:
+ break;
+ default:
+ return -EINVAL;
}
/* SUBLCD only supports SYS interface */
if (lcdc_chan_is_sublcd(ch)) {
- if (ifm == 0)
- goto bad;
- else
- ifm = 0;
+ if (!(interface_type & LDMT1R_IFM))
+ return -EINVAL;
+
+ interface_type &= ~LDMT1R_IFM;
}
- ch->ldmt1r_value = (ifm << 12) | miftyp;
+ ch->ldmt1r_value = interface_type;
return 0;
- bad:
- return -EINVAL;
}
static int sh_mobile_lcdc_setup_clocks(struct platform_device *pdev,
@@ -819,18 +777,24 @@ static int sh_mobile_lcdc_setup_clocks(struct platform_device *pdev,
struct sh_mobile_lcdc_priv *priv)
{
char *str;
- int icksel;
switch (clock_source) {
- case LCDC_CLK_BUS: str = "bus_clk"; icksel = 0; break;
- case LCDC_CLK_PERIPHERAL: str = "peripheral_clk"; icksel = 1; break;
- case LCDC_CLK_EXTERNAL: str = NULL; icksel = 2; break;
+ case LCDC_CLK_BUS:
+ str = "bus_clk";
+ priv->lddckr = LDDCKR_ICKSEL_BUS;
+ break;
+ case LCDC_CLK_PERIPHERAL:
+ str = "peripheral_clk";
+ priv->lddckr = LDDCKR_ICKSEL_MIPI;
+ break;
+ case LCDC_CLK_EXTERNAL:
+ str = NULL;
+ priv->lddckr = LDDCKR_ICKSEL_HDMI;
+ break;
default:
return -EINVAL;
}
- priv->lddckr = icksel << 16;
-
if (str) {
priv->dot_clk = clk_get(&pdev->dev, str);
if (IS_ERR(priv->dot_clk)) {
@@ -914,12 +878,12 @@ static int sh_mobile_fb_pan_display(struct fb_var_screeninfo *var,
unsigned long base_addr_y, base_addr_c;
unsigned long c_offset;
- if (!var->nonstd)
- new_pan_offset = (var->yoffset * info->fix.line_length) +
- (var->xoffset * (info->var.bits_per_pixel / 8));
+ if (!info->var.nonstd)
+ new_pan_offset = var->yoffset * info->fix.line_length
+ + var->xoffset * (info->var.bits_per_pixel / 8);
else
- new_pan_offset = (var->yoffset * info->fix.line_length) +
- (var->xoffset);
+ new_pan_offset = var->yoffset * info->fix.line_length
+ + var->xoffset;
if (new_pan_offset == ch->pan_offset)
return 0; /* No change, do nothing */
@@ -928,44 +892,40 @@ static int sh_mobile_fb_pan_display(struct fb_var_screeninfo *var,
/* Set the source address for the next refresh */
base_addr_y = ch->dma_handle + new_pan_offset;
- if (var->nonstd) {
+ if (info->var.nonstd) {
/* Set y offset */
- c_offset = (var->yoffset *
- info->fix.line_length *
- (info->var.bits_per_pixel - 8)) / 8;
- base_addr_c = ch->dma_handle + var->xres * var->yres_virtual +
- c_offset;
+ c_offset = var->yoffset * info->fix.line_length
+ * (info->var.bits_per_pixel - 8) / 8;
+ base_addr_c = ch->dma_handle
+ + info->var.xres * info->var.yres_virtual
+ + c_offset;
/* Set x offset */
if (info->var.bits_per_pixel == 24)
base_addr_c += 2 * var->xoffset;
else
base_addr_c += var->xoffset;
- } else
- base_addr_c = 0;
+ }
- if (!ch->meram_enabled) {
- lcdc_write_chan_mirror(ch, LDSA1R, base_addr_y);
- if (base_addr_c)
- lcdc_write_chan_mirror(ch, LDSA2R, base_addr_c);
- } else {
+ if (ch->meram_enabled) {
struct sh_mobile_meram_cfg *cfg;
struct sh_mobile_meram_info *mdev;
- unsigned long icb_addr_y, icb_addr_c;
int ret;
cfg = ch->cfg.meram_cfg;
mdev = priv->meram_dev;
ret = mdev->ops->meram_update(mdev, cfg,
base_addr_y, base_addr_c,
- &icb_addr_y, &icb_addr_c);
+ &base_addr_y, &base_addr_c);
if (ret)
return ret;
+ }
- lcdc_write_chan_mirror(ch, LDSA1R, icb_addr_y);
- if (icb_addr_c)
- lcdc_write_chan_mirror(ch, LDSA2R, icb_addr_c);
+ ch->base_addr_y = base_addr_y;
+ ch->base_addr_c = base_addr_c;
- }
+ lcdc_write_chan_mirror(ch, LDSA1R, base_addr_y);
+ if (info->var.nonstd)
+ lcdc_write_chan_mirror(ch, LDSA2R, base_addr_c);
if (lcdc_chan_is_sublcd(ch))
lcdc_write(ch->lcdc, _LDRCNTR, ldrcntr ^ LDRCNTR_SRS);
@@ -985,9 +945,11 @@ static int sh_mobile_wait_for_vsync(struct fb_info *info)
unsigned long ldintr;
int ret;
- /* Enable VSync End interrupt */
+ /* Enable VSync End interrupt and be careful not to acknowledge any
+ * pending interrupt.
+ */
ldintr = lcdc_read(ch->lcdc, _LDINTR);
- ldintr |= LDINTR_VEE;
+ ldintr |= LDINTR_VEE | LDINTR_STATUS_MASK;
lcdc_write(ch->lcdc, _LDINTR, ldintr);
ret = wait_for_completion_interruptible_timeout(&ch->vsync_completion,
@@ -1037,11 +999,6 @@ static void sh_mobile_fb_reconfig(struct fb_info *info)
/* Couldn't reconfigure, hopefully, can continue as before */
return;
- if (info->var.nonstd)
- info->fix.line_length = mode1.xres;
- else
- info->fix.line_length = mode1.xres * (ch->cfg.bpp / 8);
-
/*
* fb_set_var() calls the notifier change internally, only if
* FBINFO_MISC_USEREVENT flag is set. Since we do not want to fake a
@@ -1094,30 +1051,126 @@ static int sh_mobile_check_var(struct fb_var_screeninfo *var, struct fb_info *in
{
struct sh_mobile_lcdc_chan *ch = info->par;
struct sh_mobile_lcdc_priv *p = ch->lcdc;
+ unsigned int best_dist = (unsigned int)-1;
+ unsigned int best_xres = 0;
+ unsigned int best_yres = 0;
+ unsigned int i;
- if (var->xres > MAX_XRES || var->yres > MAX_YRES ||
- var->xres * var->yres * (ch->cfg.bpp / 8) * 2 > info->fix.smem_len) {
- dev_warn(info->dev, "Invalid info: %u-%u-%u-%u x %u-%u-%u-%u @ %lukHz!\n",
- var->left_margin, var->xres, var->right_margin, var->hsync_len,
- var->upper_margin, var->yres, var->lower_margin, var->vsync_len,
- PICOS2KHZ(var->pixclock));
+ if (var->xres > MAX_XRES || var->yres > MAX_YRES)
return -EINVAL;
+
+ /* If board code provides us with a list of available modes, make sure
+ * we use one of them. Find the mode closest to the requested one. The
+ * distance between two modes is defined as the size of the
+ * non-overlapping parts of the two rectangles.
+ */
+ for (i = 0; i < ch->cfg.num_cfg; ++i) {
+ const struct fb_videomode *mode = &ch->cfg.lcd_cfg[i];
+ unsigned int dist;
+
+ /* We can only round up. */
+ if (var->xres > mode->xres || var->yres > mode->yres)
+ continue;
+
+ dist = var->xres * var->yres + mode->xres * mode->yres
+ - 2 * min(var->xres, mode->xres)
+ * min(var->yres, mode->yres);
+
+ if (dist < best_dist) {
+ best_xres = mode->xres;
+ best_yres = mode->yres;
+ best_dist = dist;
+ }
+ }
+
+ /* If no available mode can be used, return an error. */
+ if (ch->cfg.num_cfg != 0) {
+ if (best_dist == (unsigned int)-1)
+ return -EINVAL;
+
+ var->xres = best_xres;
+ var->yres = best_yres;
}
+ /* Make sure the virtual resolution is at least as big as the visible
+ * resolution.
+ */
+ if (var->xres_virtual < var->xres)
+ var->xres_virtual = var->xres;
+ if (var->yres_virtual < var->yres)
+ var->yres_virtual = var->yres;
+
+ if (var->bits_per_pixel <= 16) { /* RGB 565 */
+ var->bits_per_pixel = 16;
+ var->red.offset = 11;
+ var->red.length = 5;
+ var->green.offset = 5;
+ var->green.length = 6;
+ var->blue.offset = 0;
+ var->blue.length = 5;
+ var->transp.offset = 0;
+ var->transp.length = 0;
+ } else if (var->bits_per_pixel <= 24) { /* RGB 888 */
+ var->bits_per_pixel = 24;
+ var->red.offset = 16;
+ var->red.length = 8;
+ var->green.offset = 8;
+ var->green.length = 8;
+ var->blue.offset = 0;
+ var->blue.length = 8;
+ var->transp.offset = 0;
+ var->transp.length = 0;
+ } else if (var->bits_per_pixel <= 32) { /* RGBA 888 */
+ var->bits_per_pixel = 32;
+ var->red.offset = 16;
+ var->red.length = 8;
+ var->green.offset = 8;
+ var->green.length = 8;
+ var->blue.offset = 0;
+ var->blue.length = 8;
+ var->transp.offset = 24;
+ var->transp.length = 8;
+ } else
+ return -EINVAL;
+
+ var->red.msb_right = 0;
+ var->green.msb_right = 0;
+ var->blue.msb_right = 0;
+ var->transp.msb_right = 0;
+
+ /* Make sure we don't exceed our allocated memory. */
+ if (var->xres_virtual * var->yres_virtual * var->bits_per_pixel / 8 >
+ info->fix.smem_len)
+ return -EINVAL;
+
/* only accept the forced_bpp for dual channel configurations */
if (p->forced_bpp && p->forced_bpp != var->bits_per_pixel)
return -EINVAL;
- switch (var->bits_per_pixel) {
- case 16: /* PKF[4:0] = 00011 - RGB 565 */
- case 24: /* PKF[4:0] = 01011 - RGB 888 */
- case 32: /* PKF[4:0] = 00000 - RGBA 888 */
- break;
- default:
- return -EINVAL;
+ return 0;
+}
+
+static int sh_mobile_set_par(struct fb_info *info)
+{
+ struct sh_mobile_lcdc_chan *ch = info->par;
+ u32 line_length = info->fix.line_length;
+ int ret;
+
+ sh_mobile_lcdc_stop(ch->lcdc);
+
+ if (info->var.nonstd)
+ info->fix.line_length = info->var.xres;
+ else
+ info->fix.line_length = info->var.xres
+ * info->var.bits_per_pixel / 8;
+
+ ret = sh_mobile_lcdc_start(ch->lcdc);
+ if (ret < 0) {
+ dev_err(info->dev, "%s: unable to restart LCDC\n", __func__);
+ info->fix.line_length = line_length;
}
- return 0;
+ return ret;
}
/*
@@ -1177,6 +1230,7 @@ static struct fb_ops sh_mobile_lcdc_ops = {
.fb_open = sh_mobile_open,
.fb_release = sh_mobile_release,
.fb_check_var = sh_mobile_check_var,
+ .fb_set_par = sh_mobile_set_par,
};
static int sh_mobile_lcdc_update_bl(struct backlight_device *bdev)
@@ -1238,66 +1292,6 @@ static void sh_mobile_lcdc_bl_remove(struct backlight_device *bdev)
backlight_device_unregister(bdev);
}
-static int sh_mobile_lcdc_set_bpp(struct fb_var_screeninfo *var, int bpp,
- int nonstd)
-{
- if (nonstd) {
- switch (bpp) {
- case 12:
- case 16:
- case 24:
- var->bits_per_pixel = bpp;
- var->nonstd = nonstd;
- return 0;
- default:
- return -EINVAL;
- }
- }
-
- switch (bpp) {
- case 16: /* PKF[4:0] = 00011 - RGB 565 */
- var->red.offset = 11;
- var->red.length = 5;
- var->green.offset = 5;
- var->green.length = 6;
- var->blue.offset = 0;
- var->blue.length = 5;
- var->transp.offset = 0;
- var->transp.length = 0;
- break;
-
- case 24: /* PKF[4:0] = 01011 - RGB 888 */
- var->red.offset = 16;
- var->red.length = 8;
- var->green.offset = 8;
- var->green.length = 8;
- var->blue.offset = 0;
- var->blue.length = 8;
- var->transp.offset = 0;
- var->transp.length = 0;
- break;
-
- case 32: /* PKF[4:0] = 00000 - RGBA 888 */
- var->red.offset = 16;
- var->red.length = 8;
- var->green.offset = 8;
- var->green.length = 8;
- var->blue.offset = 0;
- var->blue.length = 8;
- var->transp.offset = 24;
- var->transp.length = 8;
- break;
- default:
- return -EINVAL;
- }
- var->bits_per_pixel = bpp;
- var->red.msb_right = 0;
- var->green.msb_right = 0;
- var->blue.msb_right = 0;
- var->transp.msb_right = 0;
- return 0;
-}
-
static int sh_mobile_lcdc_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -1316,47 +1310,20 @@ static int sh_mobile_lcdc_resume(struct device *dev)
static int sh_mobile_lcdc_runtime_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
- struct sh_mobile_lcdc_priv *p = platform_get_drvdata(pdev);
- struct sh_mobile_lcdc_chan *ch;
- int k, n;
-
- /* save per-channel registers */
- for (k = 0; k < ARRAY_SIZE(p->ch); k++) {
- ch = &p->ch[k];
- if (!ch->enabled)
- continue;
- for (n = 0; n < NR_CH_REGS; n++)
- ch->saved_ch_regs[n] = lcdc_read_chan(ch, n);
- }
-
- /* save shared registers */
- for (n = 0; n < NR_SHARED_REGS; n++)
- p->saved_shared_regs[n] = lcdc_read(p, lcdc_shared_regs[n]);
+ struct sh_mobile_lcdc_priv *priv = platform_get_drvdata(pdev);
/* turn off LCDC hardware */
- lcdc_write(p, _LDCNT1R, 0);
+ lcdc_write(priv, _LDCNT1R, 0);
+
return 0;
}
static int sh_mobile_lcdc_runtime_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
- struct sh_mobile_lcdc_priv *p = platform_get_drvdata(pdev);
- struct sh_mobile_lcdc_chan *ch;
- int k, n;
-
- /* restore per-channel registers */
- for (k = 0; k < ARRAY_SIZE(p->ch); k++) {
- ch = &p->ch[k];
- if (!ch->enabled)
- continue;
- for (n = 0; n < NR_CH_REGS; n++)
- lcdc_write_chan(ch, n, ch->saved_ch_regs[n]);
- }
+ struct sh_mobile_lcdc_priv *priv = platform_get_drvdata(pdev);
- /* restore shared registers */
- for (n = 0; n < NR_SHARED_REGS; n++)
- lcdc_write(p, lcdc_shared_regs[n], p->saved_shared_regs[n]);
+ __sh_mobile_lcdc_start(priv);
return 0;
}
@@ -1408,17 +1375,187 @@ static int sh_mobile_lcdc_notify(struct notifier_block *nb,
return NOTIFY_OK;
}
-static int sh_mobile_lcdc_remove(struct platform_device *pdev);
+static int sh_mobile_lcdc_remove(struct platform_device *pdev)
+{
+ struct sh_mobile_lcdc_priv *priv = platform_get_drvdata(pdev);
+ struct fb_info *info;
+ int i;
-static int __devinit sh_mobile_lcdc_probe(struct platform_device *pdev)
+ fb_unregister_client(&priv->notifier);
+
+ for (i = 0; i < ARRAY_SIZE(priv->ch); i++)
+ if (priv->ch[i].info && priv->ch[i].info->dev)
+ unregister_framebuffer(priv->ch[i].info);
+
+ sh_mobile_lcdc_stop(priv);
+
+ for (i = 0; i < ARRAY_SIZE(priv->ch); i++) {
+ info = priv->ch[i].info;
+
+ if (!info || !info->device)
+ continue;
+
+ if (priv->ch[i].sglist)
+ vfree(priv->ch[i].sglist);
+
+ if (info->screen_base)
+ dma_free_coherent(&pdev->dev, info->fix.smem_len,
+ info->screen_base,
+ priv->ch[i].dma_handle);
+ fb_dealloc_cmap(&info->cmap);
+ framebuffer_release(info);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(priv->ch); i++) {
+ if (priv->ch[i].bl)
+ sh_mobile_lcdc_bl_remove(priv->ch[i].bl);
+ }
+
+ if (priv->dot_clk)
+ clk_put(priv->dot_clk);
+
+ if (priv->dev)
+ pm_runtime_disable(priv->dev);
+
+ if (priv->base)
+ iounmap(priv->base);
+
+ if (priv->irq)
+ free_irq(priv->irq, priv);
+ kfree(priv);
+ return 0;
+}
+
+static int __devinit sh_mobile_lcdc_channel_init(struct sh_mobile_lcdc_chan *ch,
+ struct device *dev)
{
+ struct sh_mobile_lcdc_chan_cfg *cfg = &ch->cfg;
+ const struct fb_videomode *max_mode;
+ const struct fb_videomode *mode;
+ struct fb_var_screeninfo *var;
struct fb_info *info;
- struct sh_mobile_lcdc_priv *priv;
+ unsigned int max_size;
+ int num_cfg;
+ void *buf;
+ int ret;
+ int i;
+
+ mutex_init(&ch->open_lock);
+
+ /* Allocate the frame buffer device. */
+ ch->info = framebuffer_alloc(0, dev);
+ if (!ch->info) {
+ dev_err(dev, "unable to allocate fb_info\n");
+ return -ENOMEM;
+ }
+
+ info = ch->info;
+ info->fbops = &sh_mobile_lcdc_ops;
+ info->par = ch;
+ info->pseudo_palette = &ch->pseudo_palette;
+ info->flags = FBINFO_FLAG_DEFAULT;
+
+ /* Iterate through the modes to validate them and find the highest
+ * resolution.
+ */
+ max_mode = NULL;
+ max_size = 0;
+
+ for (i = 0, mode = cfg->lcd_cfg; i < cfg->num_cfg; i++, mode++) {
+ unsigned int size = mode->yres * mode->xres;
+
+ /* NV12 buffers must have even number of lines */
+ if ((cfg->nonstd) && cfg->bpp == 12 &&
+ (mode->yres & 0x1)) {
+ dev_err(dev, "yres must be multiple of 2 for YCbCr420 "
+ "mode.\n");
+ return -EINVAL;
+ }
+
+ if (size > max_size) {
+ max_mode = mode;
+ max_size = size;
+ }
+ }
+
+ if (!max_size)
+ max_size = MAX_XRES * MAX_YRES;
+ else
+ dev_dbg(dev, "Found largest videomode %ux%u\n",
+ max_mode->xres, max_mode->yres);
+
+ /* Initialize fixed screen information. Restrict pan to 2 lines steps
+ * for NV12.
+ */
+ info->fix = sh_mobile_lcdc_fix;
+ info->fix.smem_len = max_size * 2 * cfg->bpp / 8;
+ if (cfg->nonstd && cfg->bpp == 12)
+ info->fix.ypanstep = 2;
+
+ /* Create the mode list. */
+ if (cfg->lcd_cfg == NULL) {
+ mode = &default_720p;
+ num_cfg = 1;
+ } else {
+ mode = cfg->lcd_cfg;
+ num_cfg = cfg->num_cfg;
+ }
+
+ fb_videomode_to_modelist(mode, num_cfg, &info->modelist);
+
+ /* Initialize variable screen information using the first mode as
+ * default. The default Y virtual resolution is twice the panel size to
+ * allow for double-buffering.
+ */
+ var = &info->var;
+ fb_videomode_to_var(var, mode);
+ var->bits_per_pixel = cfg->bpp;
+ var->width = cfg->lcd_size_cfg.width;
+ var->height = cfg->lcd_size_cfg.height;
+ var->yres_virtual = var->yres * 2;
+ var->activate = FB_ACTIVATE_NOW;
+
+ ret = sh_mobile_check_var(var, info);
+ if (ret)
+ return ret;
+
+ /* Allocate frame buffer memory and color map. */
+ buf = dma_alloc_coherent(dev, info->fix.smem_len, &ch->dma_handle,
+ GFP_KERNEL);
+ if (!buf) {
+ dev_err(dev, "unable to allocate buffer\n");
+ return -ENOMEM;
+ }
+
+ ret = fb_alloc_cmap(&info->cmap, PALETTE_NR, 0);
+ if (ret < 0) {
+ dev_err(dev, "unable to allocate cmap\n");
+ dma_free_coherent(dev, info->fix.smem_len,
+ buf, ch->dma_handle);
+ return ret;
+ }
+
+ info->fix.smem_start = ch->dma_handle;
+ if (var->nonstd)
+ info->fix.line_length = var->xres;
+ else
+ info->fix.line_length = var->xres * (cfg->bpp / 8);
+
+ info->screen_base = buf;
+ info->device = dev;
+ ch->display_var = *var;
+
+ return 0;
+}
+
+static int __devinit sh_mobile_lcdc_probe(struct platform_device *pdev)
+{
struct sh_mobile_lcdc_info *pdata = pdev->dev.platform_data;
+ struct sh_mobile_lcdc_priv *priv;
struct resource *res;
+ int num_channels;
int error;
- void *buf;
- int i, j;
+ int i;
if (!pdata) {
dev_err(&pdev->dev, "no platform data defined\n");
@@ -1440,7 +1577,7 @@ static int __devinit sh_mobile_lcdc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, priv);
- error = request_irq(i, sh_mobile_lcdc_irq, IRQF_DISABLED,
+ error = request_irq(i, sh_mobile_lcdc_irq, 0,
dev_name(&pdev->dev), priv);
if (error) {
dev_err(&pdev->dev, "unable to request irq\n");
@@ -1450,9 +1587,8 @@ static int __devinit sh_mobile_lcdc_probe(struct platform_device *pdev)
priv->irq = i;
atomic_set(&priv->hw_usecnt, -1);
- j = 0;
- for (i = 0; i < ARRAY_SIZE(pdata->ch); i++) {
- struct sh_mobile_lcdc_chan *ch = priv->ch + j;
+ for (i = 0, num_channels = 0; i < ARRAY_SIZE(pdata->ch); i++) {
+ struct sh_mobile_lcdc_chan *ch = priv->ch + num_channels;
ch->lcdc = priv;
memcpy(&ch->cfg, &pdata->ch[i], sizeof(pdata->ch[i]));
@@ -1472,26 +1608,26 @@ static int __devinit sh_mobile_lcdc_probe(struct platform_device *pdev)
switch (pdata->ch[i].chan) {
case LCDC_CHAN_MAINLCD:
- ch->enabled = 1 << 1;
+ ch->enabled = LDCNT2R_ME;
ch->reg_offs = lcdc_offs_mainlcd;
- j++;
+ num_channels++;
break;
case LCDC_CHAN_SUBLCD:
- ch->enabled = 1 << 2;
+ ch->enabled = LDCNT2R_SE;
ch->reg_offs = lcdc_offs_sublcd;
- j++;
+ num_channels++;
break;
}
}
- if (!j) {
+ if (!num_channels) {
dev_err(&pdev->dev, "no channels defined\n");
error = -EINVAL;
goto err1;
}
/* for dual channel LCDC (MAIN + SUB) force shared bpp setting */
- if (j == 2)
+ if (num_channels == 2)
priv->forced_bpp = pdata->ch[0].bpp;
priv->base = ioremap_nocache(res->start, resource_size(res));
@@ -1506,125 +1642,23 @@ static int __devinit sh_mobile_lcdc_probe(struct platform_device *pdev)
priv->meram_dev = pdata->meram_dev;
- for (i = 0; i < j; i++) {
- struct fb_var_screeninfo *var;
- const struct fb_videomode *lcd_cfg, *max_cfg = NULL;
+ for (i = 0; i < num_channels; i++) {
struct sh_mobile_lcdc_chan *ch = priv->ch + i;
- struct sh_mobile_lcdc_chan_cfg *cfg = &ch->cfg;
- const struct fb_videomode *mode = cfg->lcd_cfg;
- unsigned long max_size = 0;
- int k;
- int num_cfg;
-
- ch->info = framebuffer_alloc(0, &pdev->dev);
- if (!ch->info) {
- dev_err(&pdev->dev, "unable to allocate fb_info\n");
- error = -ENOMEM;
- break;
- }
-
- info = ch->info;
- var = &info->var;
- info->fbops = &sh_mobile_lcdc_ops;
- info->par = ch;
-
- mutex_init(&ch->open_lock);
-
- for (k = 0, lcd_cfg = mode;
- k < cfg->num_cfg && lcd_cfg;
- k++, lcd_cfg++) {
- unsigned long size = lcd_cfg->yres * lcd_cfg->xres;
- /* NV12 buffers must have even number of lines */
- if ((cfg->nonstd) && cfg->bpp == 12 &&
- (lcd_cfg->yres & 0x1)) {
- dev_err(&pdev->dev, "yres must be multiple of 2"
- " for YCbCr420 mode.\n");
- error = -EINVAL;
- goto err1;
- }
-
- if (size > max_size) {
- max_cfg = lcd_cfg;
- max_size = size;
- }
- }
-
- if (!mode)
- max_size = MAX_XRES * MAX_YRES;
- else if (max_cfg)
- dev_dbg(&pdev->dev, "Found largest videomode %ux%u\n",
- max_cfg->xres, max_cfg->yres);
- info->fix = sh_mobile_lcdc_fix;
- info->fix.smem_len = max_size * 2 * cfg->bpp / 8;
-
- /* Only pan in 2 line steps for NV12 */
- if (cfg->nonstd && cfg->bpp == 12)
- info->fix.ypanstep = 2;
-
- if (!mode) {
- mode = &default_720p;
- num_cfg = 1;
- } else {
- num_cfg = cfg->num_cfg;
- }
-
- fb_videomode_to_modelist(mode, num_cfg, &info->modelist);
-
- fb_videomode_to_var(var, mode);
- var->width = cfg->lcd_size_cfg.width;
- var->height = cfg->lcd_size_cfg.height;
- /* Default Y virtual resolution is 2x panel size */
- var->yres_virtual = var->yres * 2;
- var->activate = FB_ACTIVATE_NOW;
-
- error = sh_mobile_lcdc_set_bpp(var, cfg->bpp, cfg->nonstd);
+ error = sh_mobile_lcdc_channel_init(ch, &pdev->dev);
if (error)
- break;
-
- buf = dma_alloc_coherent(&pdev->dev, info->fix.smem_len,
- &ch->dma_handle, GFP_KERNEL);
- if (!buf) {
- dev_err(&pdev->dev, "unable to allocate buffer\n");
- error = -ENOMEM;
- break;
- }
-
- info->pseudo_palette = &ch->pseudo_palette;
- info->flags = FBINFO_FLAG_DEFAULT;
-
- error = fb_alloc_cmap(&info->cmap, PALETTE_NR, 0);
- if (error < 0) {
- dev_err(&pdev->dev, "unable to allocate cmap\n");
- dma_free_coherent(&pdev->dev, info->fix.smem_len,
- buf, ch->dma_handle);
- break;
- }
-
- info->fix.smem_start = ch->dma_handle;
- if (var->nonstd)
- info->fix.line_length = var->xres;
- else
- info->fix.line_length = var->xres * (cfg->bpp / 8);
-
- info->screen_base = buf;
- info->device = &pdev->dev;
- ch->display_var = *var;
+ goto err1;
}
- if (error)
- goto err1;
-
error = sh_mobile_lcdc_start(priv);
if (error) {
dev_err(&pdev->dev, "unable to start hardware\n");
goto err1;
}
- for (i = 0; i < j; i++) {
+ for (i = 0; i < num_channels; i++) {
struct sh_mobile_lcdc_chan *ch = priv->ch + i;
-
- info = ch->info;
+ struct fb_info *info = ch->info;
if (info->fbdefio) {
ch->sglist = vmalloc(sizeof(struct scatterlist) *
@@ -1665,57 +1699,6 @@ err1:
return error;
}
-static int sh_mobile_lcdc_remove(struct platform_device *pdev)
-{
- struct sh_mobile_lcdc_priv *priv = platform_get_drvdata(pdev);
- struct fb_info *info;
- int i;
-
- fb_unregister_client(&priv->notifier);
-
- for (i = 0; i < ARRAY_SIZE(priv->ch); i++)
- if (priv->ch[i].info && priv->ch[i].info->dev)
- unregister_framebuffer(priv->ch[i].info);
-
- sh_mobile_lcdc_stop(priv);
-
- for (i = 0; i < ARRAY_SIZE(priv->ch); i++) {
- info = priv->ch[i].info;
-
- if (!info || !info->device)
- continue;
-
- if (priv->ch[i].sglist)
- vfree(priv->ch[i].sglist);
-
- if (info->screen_base)
- dma_free_coherent(&pdev->dev, info->fix.smem_len,
- info->screen_base,
- priv->ch[i].dma_handle);
- fb_dealloc_cmap(&info->cmap);
- framebuffer_release(info);
- }
-
- for (i = 0; i < ARRAY_SIZE(priv->ch); i++) {
- if (priv->ch[i].bl)
- sh_mobile_lcdc_bl_remove(priv->ch[i].bl);
- }
-
- if (priv->dot_clk)
- clk_put(priv->dot_clk);
-
- if (priv->dev)
- pm_runtime_disable(priv->dev);
-
- if (priv->base)
- iounmap(priv->base);
-
- if (priv->irq)
- free_irq(priv->irq, priv);
- kfree(priv);
- return 0;
-}
-
static struct platform_driver sh_mobile_lcdc_driver = {
.driver = {
.name = "sh_mobile_lcdc_fb",
diff --git a/drivers/video/sh_mobile_lcdcfb.h b/drivers/video/sh_mobile_lcdcfb.h
index aeed6687e6a..a58a0f38848 100644
--- a/drivers/video/sh_mobile_lcdcfb.h
+++ b/drivers/video/sh_mobile_lcdcfb.h
@@ -18,6 +18,13 @@ struct sh_mobile_lcdc_priv;
struct fb_info;
struct backlight_device;
+/*
+ * struct sh_mobile_lcdc_chan - LCDC display channel
+ *
+ * @base_addr_y: Frame buffer viewport base address (luma component)
+ * @base_addr_c: Frame buffer viewport base address (chroma component)
+ * @pitch: Frame buffer line pitch
+ */
struct sh_mobile_lcdc_chan {
struct sh_mobile_lcdc_priv *lcdc;
unsigned long *reg_offs;
@@ -25,7 +32,6 @@ struct sh_mobile_lcdc_chan {
unsigned long enabled; /* ME and SE in LDCNT2R */
struct sh_mobile_lcdc_chan_cfg cfg;
u32 pseudo_palette[PALETTE_NR];
- unsigned long saved_ch_regs[NR_CH_REGS];
struct fb_info *info;
struct backlight_device *bl;
dma_addr_t dma_handle;
@@ -40,6 +46,10 @@ struct sh_mobile_lcdc_chan {
int blank_status;
struct mutex open_lock; /* protects the use counter */
int meram_enabled;
+
+ unsigned long base_addr_y;
+ unsigned long base_addr_c;
+ unsigned int pitch;
};
#endif
diff --git a/drivers/video/sh_mobile_meram.c b/drivers/video/sh_mobile_meram.c
index cc7d7329dc1..4d63490209c 100644
--- a/drivers/video/sh_mobile_meram.c
+++ b/drivers/video/sh_mobile_meram.c
@@ -12,29 +12,103 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/device.h>
+#include <linux/pm_runtime.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
-
-#include "sh_mobile_meram.h"
+#include <video/sh_mobile_meram.h>
/* meram registers */
-#define MExxCTL 0x0
-#define MExxBSIZE 0x4
-#define MExxMNCF 0x8
-#define MExxSARA 0x10
-#define MExxSARB 0x14
-#define MExxSBSIZE 0x18
-
-#define MERAM_MExxCTL_VAL(ctl, next_icb, addr) \
- ((ctl) | (((next_icb) & 0x1f) << 11) | (((addr) & 0x7ff) << 16))
-#define MERAM_MExxBSIZE_VAL(a, b, c) \
- (((a) << 28) | ((b) << 16) | (c))
-
-#define MEVCR1 0x4
-#define MEACTS 0x10
-#define MEQSEL1 0x40
-#define MEQSEL2 0x44
+#define MEVCR1 0x4
+#define MEVCR1_RST (1 << 31)
+#define MEVCR1_WD (1 << 30)
+#define MEVCR1_AMD1 (1 << 29)
+#define MEVCR1_AMD0 (1 << 28)
+#define MEQSEL1 0x40
+#define MEQSEL2 0x44
+
+#define MExxCTL 0x400
+#define MExxCTL_BV (1 << 31)
+#define MExxCTL_BSZ_SHIFT 28
+#define MExxCTL_MSAR_MASK (0x7ff << MExxCTL_MSAR_SHIFT)
+#define MExxCTL_MSAR_SHIFT 16
+#define MExxCTL_NXT_MASK (0x1f << MExxCTL_NXT_SHIFT)
+#define MExxCTL_NXT_SHIFT 11
+#define MExxCTL_WD1 (1 << 10)
+#define MExxCTL_WD0 (1 << 9)
+#define MExxCTL_WS (1 << 8)
+#define MExxCTL_CB (1 << 7)
+#define MExxCTL_WBF (1 << 6)
+#define MExxCTL_WF (1 << 5)
+#define MExxCTL_RF (1 << 4)
+#define MExxCTL_CM (1 << 3)
+#define MExxCTL_MD_READ (1 << 0)
+#define MExxCTL_MD_WRITE (2 << 0)
+#define MExxCTL_MD_ICB_WB (3 << 0)
+#define MExxCTL_MD_ICB (4 << 0)
+#define MExxCTL_MD_FB (7 << 0)
+#define MExxCTL_MD_MASK (7 << 0)
+#define MExxBSIZE 0x404
+#define MExxBSIZE_RCNT_SHIFT 28
+#define MExxBSIZE_YSZM1_SHIFT 16
+#define MExxBSIZE_XSZM1_SHIFT 0
+#define MExxMNCF 0x408
+#define MExxMNCF_KWBNM_SHIFT 28
+#define MExxMNCF_KRBNM_SHIFT 24
+#define MExxMNCF_BNM_SHIFT 16
+#define MExxMNCF_XBV (1 << 15)
+#define MExxMNCF_CPL_YCBCR444 (1 << 12)
+#define MExxMNCF_CPL_YCBCR420 (2 << 12)
+#define MExxMNCF_CPL_YCBCR422 (3 << 12)
+#define MExxMNCF_CPL_MSK (3 << 12)
+#define MExxMNCF_BL (1 << 2)
+#define MExxMNCF_LNM_SHIFT 0
+#define MExxSARA 0x410
+#define MExxSARB 0x414
+#define MExxSBSIZE 0x418
+#define MExxSBSIZE_HDV (1 << 31)
+#define MExxSBSIZE_HSZ16 (0 << 28)
+#define MExxSBSIZE_HSZ32 (1 << 28)
+#define MExxSBSIZE_HSZ64 (2 << 28)
+#define MExxSBSIZE_HSZ128 (3 << 28)
+#define MExxSBSIZE_SBSIZZ_SHIFT 0
+
+#define MERAM_MExxCTL_VAL(next, addr) \
+ ((((next) << MExxCTL_NXT_SHIFT) & MExxCTL_NXT_MASK) | \
+ (((addr) << MExxCTL_MSAR_SHIFT) & MExxCTL_MSAR_MASK))
+#define MERAM_MExxBSIZE_VAL(rcnt, yszm1, xszm1) \
+ (((rcnt) << MExxBSIZE_RCNT_SHIFT) | \
+ ((yszm1) << MExxBSIZE_YSZM1_SHIFT) | \
+ ((xszm1) << MExxBSIZE_XSZM1_SHIFT))
+
+#define SH_MOBILE_MERAM_ICB_NUM 32
+
+static unsigned long common_regs[] = {
+ MEVCR1,
+ MEQSEL1,
+ MEQSEL2,
+};
+#define CMN_REGS_SIZE ARRAY_SIZE(common_regs)
+
+static unsigned long icb_regs[] = {
+ MExxCTL,
+ MExxBSIZE,
+ MExxMNCF,
+ MExxSARA,
+ MExxSARB,
+ MExxSBSIZE,
+};
+#define ICB_REGS_SIZE ARRAY_SIZE(icb_regs)
+
+struct sh_mobile_meram_priv {
+ void __iomem *base;
+ struct mutex lock;
+ unsigned long used_icb;
+ int used_meram_cache_regions;
+ unsigned long used_meram_cache[SH_MOBILE_MERAM_ICB_NUM];
+ unsigned long cmn_saved_regs[CMN_REGS_SIZE];
+ unsigned long icb_saved_regs[ICB_REGS_SIZE * SH_MOBILE_MERAM_ICB_NUM];
+};
/* settings */
#define MERAM_SEC_LINE 15
@@ -44,8 +118,7 @@
* MERAM/ICB access functions
*/
-#define MERAM_ICB_OFFSET(base, idx, off) \
- ((base) + (0x400 + ((idx) * 0x20) + (off)))
+#define MERAM_ICB_OFFSET(base, idx, off) ((base) + (off) + (idx) * 0x20)
static inline void meram_write_icb(void __iomem *base, int idx, int off,
unsigned long val)
@@ -280,17 +353,18 @@ static int meram_init(struct sh_mobile_meram_priv *priv,
/*
* Set MERAM for framebuffer
*
- * 0x70f: WD = 0x3, WS=0x1, CM=0x1, MD=FB mode
* we also chain the cache_icb and the marker_icb.
* we also split the allocated MERAM buffer between two ICBs.
*/
meram_write_icb(priv->base, icb->cache_icb, MExxCTL,
- MERAM_MExxCTL_VAL(0x70f, icb->marker_icb,
- icb->meram_offset));
+ MERAM_MExxCTL_VAL(icb->marker_icb, icb->meram_offset) |
+ MExxCTL_WD1 | MExxCTL_WD0 | MExxCTL_WS | MExxCTL_CM |
+ MExxCTL_MD_FB);
meram_write_icb(priv->base, icb->marker_icb, MExxCTL,
- MERAM_MExxCTL_VAL(0x70f, icb->cache_icb,
- icb->meram_offset +
- icb->meram_size / 2));
+ MERAM_MExxCTL_VAL(icb->cache_icb, icb->meram_offset +
+ icb->meram_size / 2) |
+ MExxCTL_WD1 | MExxCTL_WD0 | MExxCTL_WS | MExxCTL_CM |
+ MExxCTL_MD_FB);
return 0;
}
@@ -299,8 +373,10 @@ static void meram_deinit(struct sh_mobile_meram_priv *priv,
struct sh_mobile_meram_icb *icb)
{
/* disable ICB */
- meram_write_icb(priv->base, icb->cache_icb, MExxCTL, 0);
- meram_write_icb(priv->base, icb->marker_icb, MExxCTL, 0);
+ meram_write_icb(priv->base, icb->cache_icb, MExxCTL,
+ MExxCTL_WBF | MExxCTL_WF | MExxCTL_RF);
+ meram_write_icb(priv->base, icb->marker_icb, MExxCTL,
+ MExxCTL_WBF | MExxCTL_WF | MExxCTL_RF);
icb->cache_unit = 0;
}
@@ -337,24 +413,22 @@ static int sh_mobile_meram_register(struct sh_mobile_meram_info *pdata,
xres, yres, (!pixelformat) ? "yuv" : "rgb",
base_addr_y, base_addr_c);
- mutex_lock(&priv->lock);
-
/* we can't handle wider than 8192px */
if (xres > 8192) {
dev_err(&pdev->dev, "width exceeding the limit (> 8192).");
- error = -EINVAL;
- goto err;
- }
-
- if (priv->used_meram_cache_regions + 2 > SH_MOBILE_MERAM_ICB_NUM) {
- dev_err(&pdev->dev, "no more ICB available.");
- error = -EINVAL;
- goto err;
+ return -EINVAL;
}
/* do we have at least one ICB config? */
if (cfg->icb[0].marker_icb < 0 || cfg->icb[0].cache_icb < 0) {
dev_err(&pdev->dev, "at least one ICB is required.");
+ return -EINVAL;
+ }
+
+ mutex_lock(&priv->lock);
+
+ if (priv->used_meram_cache_regions + 2 > SH_MOBILE_MERAM_ICB_NUM) {
+ dev_err(&pdev->dev, "no more ICB available.");
error = -EINVAL;
goto err;
}
@@ -460,6 +534,57 @@ static int sh_mobile_meram_update(struct sh_mobile_meram_info *pdata,
return 0;
}
+static int sh_mobile_meram_runtime_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct sh_mobile_meram_priv *priv = platform_get_drvdata(pdev);
+ int k, j;
+
+ for (k = 0; k < CMN_REGS_SIZE; k++)
+ priv->cmn_saved_regs[k] = meram_read_reg(priv->base,
+ common_regs[k]);
+
+ for (j = 0; j < 32; j++) {
+ if (!test_bit(j, &priv->used_icb))
+ continue;
+ for (k = 0; k < ICB_REGS_SIZE; k++) {
+ priv->icb_saved_regs[j * ICB_REGS_SIZE + k] =
+ meram_read_icb(priv->base, j, icb_regs[k]);
+ /* Reset ICB on resume */
+ if (icb_regs[k] == MExxCTL)
+ priv->icb_saved_regs[j * ICB_REGS_SIZE + k] |=
+ MExxCTL_WBF | MExxCTL_WF | MExxCTL_RF;
+ }
+ }
+ return 0;
+}
+
+static int sh_mobile_meram_runtime_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct sh_mobile_meram_priv *priv = platform_get_drvdata(pdev);
+ int k, j;
+
+ for (j = 0; j < 32; j++) {
+ if (!test_bit(j, &priv->used_icb))
+ continue;
+ for (k = 0; k < ICB_REGS_SIZE; k++) {
+ meram_write_icb(priv->base, j, icb_regs[k],
+ priv->icb_saved_regs[j * ICB_REGS_SIZE + k]);
+ }
+ }
+
+ for (k = 0; k < CMN_REGS_SIZE; k++)
+ meram_write_reg(priv->base, common_regs[k],
+ priv->cmn_saved_regs[k]);
+ return 0;
+}
+
+static const struct dev_pm_ops sh_mobile_meram_dev_pm_ops = {
+ .runtime_suspend = sh_mobile_meram_runtime_suspend,
+ .runtime_resume = sh_mobile_meram_runtime_resume,
+};
+
static struct sh_mobile_meram_ops sh_mobile_meram_ops = {
.module = THIS_MODULE,
.meram_register = sh_mobile_meram_register,
@@ -513,7 +638,9 @@ static int __devinit sh_mobile_meram_probe(struct platform_device *pdev)
/* initialize ICB addressing mode */
if (pdata->addr_mode == SH_MOBILE_MERAM_MODE1)
- meram_write_reg(priv->base, MEVCR1, 1 << 29);
+ meram_write_reg(priv->base, MEVCR1, MEVCR1_AMD1);
+
+ pm_runtime_enable(&pdev->dev);
dev_info(&pdev->dev, "sh_mobile_meram initialized.");
@@ -530,6 +657,8 @@ static int sh_mobile_meram_remove(struct platform_device *pdev)
{
struct sh_mobile_meram_priv *priv = platform_get_drvdata(pdev);
+ pm_runtime_disable(&pdev->dev);
+
if (priv->base)
iounmap(priv->base);
@@ -544,6 +673,7 @@ static struct platform_driver sh_mobile_meram_driver = {
.driver = {
.name = "sh_mobile_meram",
.owner = THIS_MODULE,
+ .pm = &sh_mobile_meram_dev_pm_ops,
},
.probe = sh_mobile_meram_probe,
.remove = sh_mobile_meram_remove,
diff --git a/drivers/video/sh_mobile_meram.h b/drivers/video/sh_mobile_meram.h
deleted file mode 100644
index 82c54fbce8b..00000000000
--- a/drivers/video/sh_mobile_meram.h
+++ /dev/null
@@ -1,41 +0,0 @@
-#ifndef __sh_mobile_meram_h__
-#define __sh_mobile_meram_h__
-
-#include <linux/mutex.h>
-#include <video/sh_mobile_meram.h>
-
-/*
- * MERAM private
- */
-
-#define MERAM_ICB_Y 0x1
-#define MERAM_ICB_C 0x2
-
-/* MERAM cache size */
-#define SH_MOBILE_MERAM_ICB_NUM 32
-
-#define SH_MOBILE_MERAM_CACHE_OFFSET(p) ((p) >> 16)
-#define SH_MOBILE_MERAM_CACHE_SIZE(p) ((p) & 0xffff)
-
-struct sh_mobile_meram_priv {
- void __iomem *base;
- struct mutex lock;
- unsigned long used_icb;
- int used_meram_cache_regions;
- unsigned long used_meram_cache[SH_MOBILE_MERAM_ICB_NUM];
-};
-
-int sh_mobile_meram_alloc_icb(const struct sh_mobile_meram_cfg *cfg,
- int xres,
- int yres,
- unsigned int base_addr,
- int yuv_mode,
- int *marker_icb,
- int *out_pitch);
-
-void sh_mobile_meram_free_icb(int marker_icb);
-
-#define SH_MOBILE_MERAM_START(ind, ab) \
- (0xC0000000 | ((ab & 0x1) << 23) | ((ind & 0x1F) << 24))
-
-#endif /* !__sh_mobile_meram_h__ */
diff --git a/drivers/video/sis/sis_main.c b/drivers/video/sis/sis_main.c
index 75259845933..078ca2167d6 100644
--- a/drivers/video/sis/sis_main.c
+++ b/drivers/video/sis/sis_main.c
@@ -1333,19 +1333,14 @@ sisfb_set_base_CRT2(struct sis_video_info *ivideo, unsigned int base)
}
static int
-sisfb_pan_var(struct sis_video_info *ivideo, struct fb_var_screeninfo *var)
+sisfb_pan_var(struct sis_video_info *ivideo, struct fb_info *info,
+ struct fb_var_screeninfo *var)
{
- if(var->xoffset > (var->xres_virtual - var->xres)) {
- return -EINVAL;
- }
- if(var->yoffset > (var->yres_virtual - var->yres)) {
- return -EINVAL;
- }
-
- ivideo->current_base = (var->yoffset * var->xres_virtual) + var->xoffset;
+ ivideo->current_base = var->yoffset * info->var.xres_virtual
+ + var->xoffset;
/* calculate base bpp dep. */
- switch(var->bits_per_pixel) {
+ switch (info->var.bits_per_pixel) {
case 32:
break;
case 16:
@@ -1635,20 +1630,15 @@ sisfb_pan_display(struct fb_var_screeninfo *var, struct fb_info* info)
struct sis_video_info *ivideo = (struct sis_video_info *)info->par;
int err;
- if(var->xoffset > (var->xres_virtual - var->xres))
- return -EINVAL;
-
- if(var->yoffset > (var->yres_virtual - var->yres))
- return -EINVAL;
-
- if(var->vmode & FB_VMODE_YWRAP)
+ if (var->vmode & FB_VMODE_YWRAP)
return -EINVAL;
- if(var->xoffset + info->var.xres > info->var.xres_virtual ||
- var->yoffset + info->var.yres > info->var.yres_virtual)
+ if (var->xoffset + info->var.xres > info->var.xres_virtual ||
+ var->yoffset + info->var.yres > info->var.yres_virtual)
return -EINVAL;
- if((err = sisfb_pan_var(ivideo, var)) < 0)
+ err = sisfb_pan_var(ivideo, info, var);
+ if (err < 0)
return err;
info->var.xoffset = var->xoffset;
diff --git a/drivers/video/skeletonfb.c b/drivers/video/skeletonfb.c
index 89158bc71da..30f7a815a62 100644
--- a/drivers/video/skeletonfb.c
+++ b/drivers/video/skeletonfb.c
@@ -989,7 +989,7 @@ static struct platform_device *xxxfb_device;
*/
int __init xxxfb_setup(char *options)
{
- /* Parse user speficied options (`video=xxxfb:') */
+ /* Parse user specified options (`video=xxxfb:') */
}
#endif /* MODULE */
diff --git a/drivers/video/sm501fb.c b/drivers/video/sm501fb.c
index 6294dca9550..a78254cf8e8 100644
--- a/drivers/video/sm501fb.c
+++ b/drivers/video/sm501fb.c
@@ -582,7 +582,7 @@ static int sm501fb_pan_crt(struct fb_var_screeninfo *var,
{
struct sm501fb_par *par = info->par;
struct sm501fb_info *fbi = par->info;
- unsigned int bytes_pixel = var->bits_per_pixel / 8;
+ unsigned int bytes_pixel = info->var.bits_per_pixel / 8;
unsigned long reg;
unsigned long xoffs;
@@ -614,10 +614,10 @@ static int sm501fb_pan_pnl(struct fb_var_screeninfo *var,
struct sm501fb_info *fbi = par->info;
unsigned long reg;
- reg = var->xoffset | (var->xres_virtual << 16);
+ reg = var->xoffset | (info->var.xres_virtual << 16);
smc501_writel(reg, fbi->regs + SM501_DC_PANEL_FB_WIDTH);
- reg = var->yoffset | (var->yres_virtual << 16);
+ reg = var->yoffset | (info->var.yres_virtual << 16);
smc501_writel(reg, fbi->regs + SM501_DC_PANEL_FB_HEIGHT);
sm501fb_sync_regs(fbi);
diff --git a/drivers/video/smscufx.c b/drivers/video/smscufx.c
new file mode 100644
index 00000000000..aaccffac67a
--- /dev/null
+++ b/drivers/video/smscufx.c
@@ -0,0 +1,1994 @@
+/*
+ * smscufx.c -- Framebuffer driver for SMSC UFX USB controller
+ *
+ * Copyright (C) 2011 Steve Glendinning <steve.glendinning@smsc.com>
+ * Copyright (C) 2009 Roberto De Ioris <roberto@unbit.it>
+ * Copyright (C) 2009 Jaya Kumar <jayakumar.lkml@gmail.com>
+ * Copyright (C) 2009 Bernie Thompson <bernie@plugable.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License v2. See the file COPYING in the main directory of this archive for
+ * more details.
+ *
+ * Based on udlfb, with work from Florian Echtler, Henrik Bjerregaard Pedersen,
+ * and others.
+ *
+ * Works well with Bernie Thompson's X DAMAGE patch to xf86-video-fbdev
+ * available from http://git.plugable.com
+ *
+ * Layout is based on skeletonfb by James Simmons and Geert Uytterhoeven,
+ * usb-skeleton by GregKH.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/usb.h>
+#include <linux/uaccess.h>
+#include <linux/mm.h>
+#include <linux/fb.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include "edid.h"
+
+#define check_warn(status, fmt, args...) \
+ ({ if (status < 0) pr_warn(fmt, ##args); })
+
+#define check_warn_return(status, fmt, args...) \
+ ({ if (status < 0) { pr_warn(fmt, ##args); return status; } })
+
+#define check_warn_goto_error(status, fmt, args...) \
+ ({ if (status < 0) { pr_warn(fmt, ##args); goto error; } })
+
+#define all_bits_set(x, bits) (((x) & (bits)) == (bits))
+
+#define USB_VENDOR_REQUEST_WRITE_REGISTER 0xA0
+#define USB_VENDOR_REQUEST_READ_REGISTER 0xA1
+
+/*
+ * TODO: Propose standard fb.h ioctl for reporting damage,
+ * using _IOWR() and one of the existing area structs from fb.h
+ * Consider these ioctls deprecated, but they're still used by the
+ * DisplayLink X server as yet - need both to be modified in tandem
+ * when new ioctl(s) are ready.
+ */
+#define UFX_IOCTL_RETURN_EDID (0xAD)
+#define UFX_IOCTL_REPORT_DAMAGE (0xAA)
+
+/* -BULK_SIZE as per usb-skeleton. Can we get full page and avoid overhead? */
+#define BULK_SIZE (512)
+#define MAX_TRANSFER (PAGE_SIZE*16 - BULK_SIZE)
+#define WRITES_IN_FLIGHT (4)
+
+#define GET_URB_TIMEOUT (HZ)
+#define FREE_URB_TIMEOUT (HZ*2)
+
+#define BPP 2
+
+#define UFX_DEFIO_WRITE_DELAY 5 /* fb_deferred_io.delay in jiffies */
+#define UFX_DEFIO_WRITE_DISABLE (HZ*60) /* "disable" with long delay */
+
+struct dloarea {
+ int x, y;
+ int w, h;
+};
+
+struct urb_node {
+ struct list_head entry;
+ struct ufx_data *dev;
+ struct delayed_work release_urb_work;
+ struct urb *urb;
+};
+
+struct urb_list {
+ struct list_head list;
+ spinlock_t lock;
+ struct semaphore limit_sem;
+ int available;
+ int count;
+ size_t size;
+};
+
+struct ufx_data {
+ struct usb_device *udev;
+ struct device *gdev; /* &udev->dev */
+ struct fb_info *info;
+ struct urb_list urbs;
+ struct kref kref;
+ int fb_count;
+ bool virtualized; /* true when physical usb device not present */
+ struct delayed_work free_framebuffer_work;
+ atomic_t usb_active; /* 0 = update virtual buffer, but no usb traffic */
+ atomic_t lost_pixels; /* 1 = a render op failed. Need screen refresh */
+ u8 *edid; /* null until we read edid from hw or get from sysfs */
+ size_t edid_size;
+ u32 pseudo_palette[256];
+};
+
+static struct fb_fix_screeninfo ufx_fix = {
+ .id = "smscufx",
+ .type = FB_TYPE_PACKED_PIXELS,
+ .visual = FB_VISUAL_TRUECOLOR,
+ .xpanstep = 0,
+ .ypanstep = 0,
+ .ywrapstep = 0,
+ .accel = FB_ACCEL_NONE,
+};
+
+static const u32 smscufx_info_flags = FBINFO_DEFAULT | FBINFO_READS_FAST |
+ FBINFO_VIRTFB | FBINFO_HWACCEL_IMAGEBLIT | FBINFO_HWACCEL_FILLRECT |
+ FBINFO_HWACCEL_COPYAREA | FBINFO_MISC_ALWAYS_SETPAR;
+
+static struct usb_device_id id_table[] = {
+ {USB_DEVICE(0x0424, 0x9d00),},
+ {USB_DEVICE(0x0424, 0x9d01),},
+ {},
+};
+MODULE_DEVICE_TABLE(usb, id_table);
+
+/* module options */
+static int console; /* Optionally allow fbcon to consume first framebuffer */
+static int fb_defio = true; /* Optionally enable fb_defio mmap support */
+
+/* ufx keeps a list of urbs for efficient bulk transfers */
+static void ufx_urb_completion(struct urb *urb);
+static struct urb *ufx_get_urb(struct ufx_data *dev);
+static int ufx_submit_urb(struct ufx_data *dev, struct urb * urb, size_t len);
+static int ufx_alloc_urb_list(struct ufx_data *dev, int count, size_t size);
+static void ufx_free_urb_list(struct ufx_data *dev);
+
+/* reads a control register */
+static int ufx_reg_read(struct ufx_data *dev, u32 index, u32 *data)
+{
+ u32 *buf = kmalloc(4, GFP_KERNEL);
+ int ret;
+
+ BUG_ON(!dev);
+
+ if (!buf)
+ return -ENOMEM;
+
+ ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
+ USB_VENDOR_REQUEST_READ_REGISTER,
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ 00, index, buf, 4, USB_CTRL_GET_TIMEOUT);
+
+ le32_to_cpus(buf);
+ *data = *buf;
+ kfree(buf);
+
+ if (unlikely(ret < 0))
+ pr_warn("Failed to read register index 0x%08x\n", index);
+
+ return ret;
+}
+
+/* writes a control register */
+static int ufx_reg_write(struct ufx_data *dev, u32 index, u32 data)
+{
+ u32 *buf = kmalloc(4, GFP_KERNEL);
+ int ret;
+
+ BUG_ON(!dev);
+
+ if (!buf)
+ return -ENOMEM;
+
+ *buf = data;
+ cpu_to_le32s(buf);
+
+ ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
+ USB_VENDOR_REQUEST_WRITE_REGISTER,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ 00, index, buf, 4, USB_CTRL_SET_TIMEOUT);
+
+ kfree(buf);
+
+ if (unlikely(ret < 0))
+ pr_warn("Failed to write register index 0x%08x with value "
+ "0x%08x\n", index, data);
+
+ return ret;
+}
+
+static int ufx_reg_clear_and_set_bits(struct ufx_data *dev, u32 index,
+ u32 bits_to_clear, u32 bits_to_set)
+{
+ u32 data;
+ int status = ufx_reg_read(dev, index, &data);
+ check_warn_return(status, "ufx_reg_clear_and_set_bits error reading "
+ "0x%x", index);
+
+ data &= (~bits_to_clear);
+ data |= bits_to_set;
+
+ status = ufx_reg_write(dev, index, data);
+ check_warn_return(status, "ufx_reg_clear_and_set_bits error writing "
+ "0x%x", index);
+
+ return 0;
+}
+
+static int ufx_reg_set_bits(struct ufx_data *dev, u32 index, u32 bits)
+{
+ return ufx_reg_clear_and_set_bits(dev, index, 0, bits);
+}
+
+static int ufx_reg_clear_bits(struct ufx_data *dev, u32 index, u32 bits)
+{
+ return ufx_reg_clear_and_set_bits(dev, index, bits, 0);
+}
+
+static int ufx_lite_reset(struct ufx_data *dev)
+{
+ int status;
+ u32 value;
+
+ status = ufx_reg_write(dev, 0x3008, 0x00000001);
+ check_warn_return(status, "ufx_lite_reset error writing 0x3008");
+
+ status = ufx_reg_read(dev, 0x3008, &value);
+ check_warn_return(status, "ufx_lite_reset error reading 0x3008");
+
+ return (value == 0) ? 0 : -EIO;
+}
+
+/* If display is unblanked, then blank it */
+static int ufx_blank(struct ufx_data *dev, bool wait)
+{
+ u32 dc_ctrl, dc_sts;
+ int i;
+
+ int status = ufx_reg_read(dev, 0x2004, &dc_sts);
+ check_warn_return(status, "ufx_blank error reading 0x2004");
+
+ status = ufx_reg_read(dev, 0x2000, &dc_ctrl);
+ check_warn_return(status, "ufx_blank error reading 0x2000");
+
+ /* return success if display is already blanked */
+ if ((dc_sts & 0x00000100) || (dc_ctrl & 0x00000100))
+ return 0;
+
+ /* request the DC to blank the display */
+ dc_ctrl |= 0x00000100;
+ status = ufx_reg_write(dev, 0x2000, dc_ctrl);
+ check_warn_return(status, "ufx_blank error writing 0x2000");
+
+ /* return success immediately if we don't have to wait */
+ if (!wait)
+ return 0;
+
+ for (i = 0; i < 250; i++) {
+ status = ufx_reg_read(dev, 0x2004, &dc_sts);
+ check_warn_return(status, "ufx_blank error reading 0x2004");
+
+ if (dc_sts & 0x00000100)
+ return 0;
+ }
+
+ /* timed out waiting for display to blank */
+ return -EIO;
+}
+
+/* If display is blanked, then unblank it */
+static int ufx_unblank(struct ufx_data *dev, bool wait)
+{
+ u32 dc_ctrl, dc_sts;
+ int i;
+
+ int status = ufx_reg_read(dev, 0x2004, &dc_sts);
+ check_warn_return(status, "ufx_unblank error reading 0x2004");
+
+ status = ufx_reg_read(dev, 0x2000, &dc_ctrl);
+ check_warn_return(status, "ufx_unblank error reading 0x2000");
+
+ /* return success if display is already unblanked */
+ if (((dc_sts & 0x00000100) == 0) || ((dc_ctrl & 0x00000100) == 0))
+ return 0;
+
+ /* request the DC to unblank the display */
+ dc_ctrl &= ~0x00000100;
+ status = ufx_reg_write(dev, 0x2000, dc_ctrl);
+ check_warn_return(status, "ufx_unblank error writing 0x2000");
+
+ /* return success immediately if we don't have to wait */
+ if (!wait)
+ return 0;
+
+ for (i = 0; i < 250; i++) {
+ status = ufx_reg_read(dev, 0x2004, &dc_sts);
+ check_warn_return(status, "ufx_unblank error reading 0x2004");
+
+ if ((dc_sts & 0x00000100) == 0)
+ return 0;
+ }
+
+ /* timed out waiting for display to unblank */
+ return -EIO;
+}
+
+/* If display is enabled, then disable it */
+static int ufx_disable(struct ufx_data *dev, bool wait)
+{
+ u32 dc_ctrl, dc_sts;
+ int i;
+
+ int status = ufx_reg_read(dev, 0x2004, &dc_sts);
+ check_warn_return(status, "ufx_disable error reading 0x2004");
+
+ status = ufx_reg_read(dev, 0x2000, &dc_ctrl);
+ check_warn_return(status, "ufx_disable error reading 0x2000");
+
+ /* return success if display is already disabled */
+ if (((dc_sts & 0x00000001) == 0) || ((dc_ctrl & 0x00000001) == 0))
+ return 0;
+
+ /* request the DC to disable the display */
+ dc_ctrl &= ~(0x00000001);
+ status = ufx_reg_write(dev, 0x2000, dc_ctrl);
+ check_warn_return(status, "ufx_disable error writing 0x2000");
+
+ /* return success immediately if we don't have to wait */
+ if (!wait)
+ return 0;
+
+ for (i = 0; i < 250; i++) {
+ status = ufx_reg_read(dev, 0x2004, &dc_sts);
+ check_warn_return(status, "ufx_disable error reading 0x2004");
+
+ if ((dc_sts & 0x00000001) == 0)
+ return 0;
+ }
+
+ /* timed out waiting for display to disable */
+ return -EIO;
+}
+
+/* If display is disabled, then enable it */
+static int ufx_enable(struct ufx_data *dev, bool wait)
+{
+ u32 dc_ctrl, dc_sts;
+ int i;
+
+ int status = ufx_reg_read(dev, 0x2004, &dc_sts);
+ check_warn_return(status, "ufx_enable error reading 0x2004");
+
+ status = ufx_reg_read(dev, 0x2000, &dc_ctrl);
+ check_warn_return(status, "ufx_enable error reading 0x2000");
+
+ /* return success if display is already enabled */
+ if ((dc_sts & 0x00000001) || (dc_ctrl & 0x00000001))
+ return 0;
+
+ /* request the DC to enable the display */
+ dc_ctrl |= 0x00000001;
+ status = ufx_reg_write(dev, 0x2000, dc_ctrl);
+ check_warn_return(status, "ufx_enable error writing 0x2000");
+
+ /* return success immediately if we don't have to wait */
+ if (!wait)
+ return 0;
+
+ for (i = 0; i < 250; i++) {
+ status = ufx_reg_read(dev, 0x2004, &dc_sts);
+ check_warn_return(status, "ufx_enable error reading 0x2004");
+
+ if (dc_sts & 0x00000001)
+ return 0;
+ }
+
+ /* timed out waiting for display to enable */
+ return -EIO;
+}
+
+static int ufx_config_sys_clk(struct ufx_data *dev)
+{
+ int status = ufx_reg_write(dev, 0x700C, 0x8000000F);
+ check_warn_return(status, "error writing 0x700C");
+
+ status = ufx_reg_write(dev, 0x7014, 0x0010024F);
+ check_warn_return(status, "error writing 0x7014");
+
+ status = ufx_reg_write(dev, 0x7010, 0x00000000);
+ check_warn_return(status, "error writing 0x7010");
+
+ status = ufx_reg_clear_bits(dev, 0x700C, 0x0000000A);
+ check_warn_return(status, "error clearing PLL1 bypass in 0x700C");
+ msleep(1);
+
+ status = ufx_reg_clear_bits(dev, 0x700C, 0x80000000);
+ check_warn_return(status, "error clearing output gate in 0x700C");
+
+ return 0;
+}
+
+static int ufx_config_ddr2(struct ufx_data *dev)
+{
+ int status, i = 0;
+ u32 tmp;
+
+ status = ufx_reg_write(dev, 0x0004, 0x001F0F77);
+ check_warn_return(status, "error writing 0x0004");
+
+ status = ufx_reg_write(dev, 0x0008, 0xFFF00000);
+ check_warn_return(status, "error writing 0x0008");
+
+ status = ufx_reg_write(dev, 0x000C, 0x0FFF2222);
+ check_warn_return(status, "error writing 0x000C");
+
+ status = ufx_reg_write(dev, 0x0010, 0x00030814);
+ check_warn_return(status, "error writing 0x0010");
+
+ status = ufx_reg_write(dev, 0x0014, 0x00500019);
+ check_warn_return(status, "error writing 0x0014");
+
+ status = ufx_reg_write(dev, 0x0018, 0x020D0F15);
+ check_warn_return(status, "error writing 0x0018");
+
+ status = ufx_reg_write(dev, 0x001C, 0x02532305);
+ check_warn_return(status, "error writing 0x001C");
+
+ status = ufx_reg_write(dev, 0x0020, 0x0B030905);
+ check_warn_return(status, "error writing 0x0020");
+
+ status = ufx_reg_write(dev, 0x0024, 0x00000827);
+ check_warn_return(status, "error writing 0x0024");
+
+ status = ufx_reg_write(dev, 0x0028, 0x00000000);
+ check_warn_return(status, "error writing 0x0028");
+
+ status = ufx_reg_write(dev, 0x002C, 0x00000042);
+ check_warn_return(status, "error writing 0x002C");
+
+ status = ufx_reg_write(dev, 0x0030, 0x09520000);
+ check_warn_return(status, "error writing 0x0030");
+
+ status = ufx_reg_write(dev, 0x0034, 0x02223314);
+ check_warn_return(status, "error writing 0x0034");
+
+ status = ufx_reg_write(dev, 0x0038, 0x00430043);
+ check_warn_return(status, "error writing 0x0038");
+
+ status = ufx_reg_write(dev, 0x003C, 0xF00F000F);
+ check_warn_return(status, "error writing 0x003C");
+
+ status = ufx_reg_write(dev, 0x0040, 0xF380F00F);
+ check_warn_return(status, "error writing 0x0040");
+
+ status = ufx_reg_write(dev, 0x0044, 0xF00F0496);
+ check_warn_return(status, "error writing 0x0044");
+
+ status = ufx_reg_write(dev, 0x0048, 0x03080406);
+ check_warn_return(status, "error writing 0x0048");
+
+ status = ufx_reg_write(dev, 0x004C, 0x00001000);
+ check_warn_return(status, "error writing 0x004C");
+
+ status = ufx_reg_write(dev, 0x005C, 0x00000007);
+ check_warn_return(status, "error writing 0x005C");
+
+ status = ufx_reg_write(dev, 0x0100, 0x54F00012);
+ check_warn_return(status, "error writing 0x0100");
+
+ status = ufx_reg_write(dev, 0x0104, 0x00004012);
+ check_warn_return(status, "error writing 0x0104");
+
+ status = ufx_reg_write(dev, 0x0118, 0x40404040);
+ check_warn_return(status, "error writing 0x0118");
+
+ status = ufx_reg_write(dev, 0x0000, 0x00000001);
+ check_warn_return(status, "error writing 0x0000");
+
+ while (i++ < 500) {
+ status = ufx_reg_read(dev, 0x0000, &tmp);
+ check_warn_return(status, "error reading 0x0000");
+
+ if (all_bits_set(tmp, 0xC0000000))
+ return 0;
+ }
+
+ pr_err("DDR2 initialisation timed out, reg 0x0000=0x%08x", tmp);
+ return -ETIMEDOUT;
+}
+
+struct pll_values {
+ u32 div_r0;
+ u32 div_f0;
+ u32 div_q0;
+ u32 range0;
+ u32 div_r1;
+ u32 div_f1;
+ u32 div_q1;
+ u32 range1;
+};
+
+static u32 ufx_calc_range(u32 ref_freq)
+{
+ if (ref_freq >= 88000000)
+ return 7;
+
+ if (ref_freq >= 54000000)
+ return 6;
+
+ if (ref_freq >= 34000000)
+ return 5;
+
+ if (ref_freq >= 21000000)
+ return 4;
+
+ if (ref_freq >= 13000000)
+ return 3;
+
+ if (ref_freq >= 8000000)
+ return 2;
+
+ return 1;
+}
+
+/* calculates PLL divider settings for a desired target frequency */
+static void ufx_calc_pll_values(const u32 clk_pixel_pll, struct pll_values *asic_pll)
+{
+ const u32 ref_clk = 25000000;
+ u32 div_r0, div_f0, div_q0, div_r1, div_f1, div_q1;
+ u32 min_error = clk_pixel_pll;
+
+ for (div_r0 = 1; div_r0 <= 32; div_r0++) {
+ u32 ref_freq0 = ref_clk / div_r0;
+ if (ref_freq0 < 5000000)
+ break;
+
+ if (ref_freq0 > 200000000)
+ continue;
+
+ for (div_f0 = 1; div_f0 <= 256; div_f0++) {
+ u32 vco_freq0 = ref_freq0 * div_f0;
+
+ if (vco_freq0 < 350000000)
+ continue;
+
+ if (vco_freq0 > 700000000)
+ break;
+
+ for (div_q0 = 0; div_q0 < 7; div_q0++) {
+ u32 pllout_freq0 = vco_freq0 / (1 << div_q0);
+
+ if (pllout_freq0 < 5000000)
+ break;
+
+ if (pllout_freq0 > 200000000)
+ continue;
+
+ for (div_r1 = 1; div_r1 <= 32; div_r1++) {
+ u32 ref_freq1 = pllout_freq0 / div_r1;
+
+ if (ref_freq1 < 5000000)
+ break;
+
+ for (div_f1 = 1; div_f1 <= 256; div_f1++) {
+ u32 vco_freq1 = ref_freq1 * div_f1;
+
+ if (vco_freq1 < 350000000)
+ continue;
+
+ if (vco_freq1 > 700000000)
+ break;
+
+ for (div_q1 = 0; div_q1 < 7; div_q1++) {
+ u32 pllout_freq1 = vco_freq1 / (1 << div_q1);
+ int error = abs(pllout_freq1 - clk_pixel_pll);
+
+ if (pllout_freq1 < 5000000)
+ break;
+
+ if (pllout_freq1 > 700000000)
+ continue;
+
+ if (error < min_error) {
+ min_error = error;
+
+ /* final returned value is equal to calculated value - 1
+ * because a value of 0 = divide by 1 */
+ asic_pll->div_r0 = div_r0 - 1;
+ asic_pll->div_f0 = div_f0 - 1;
+ asic_pll->div_q0 = div_q0;
+ asic_pll->div_r1 = div_r1 - 1;
+ asic_pll->div_f1 = div_f1 - 1;
+ asic_pll->div_q1 = div_q1;
+
+ asic_pll->range0 = ufx_calc_range(ref_freq0);
+ asic_pll->range1 = ufx_calc_range(ref_freq1);
+
+ if (min_error == 0)
+ return;
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+/* sets analog bit PLL configuration values */
+static int ufx_config_pix_clk(struct ufx_data *dev, u32 pixclock)
+{
+ struct pll_values asic_pll = {0};
+ u32 value, clk_pixel, clk_pixel_pll;
+ int status;
+
+ /* convert pixclock (in ps) to frequency (in Hz) */
+ clk_pixel = PICOS2KHZ(pixclock) * 1000;
+ pr_debug("pixclock %d ps = clk_pixel %d Hz", pixclock, clk_pixel);
+
+ /* clk_pixel = 1/2 clk_pixel_pll */
+ clk_pixel_pll = clk_pixel * 2;
+
+ ufx_calc_pll_values(clk_pixel_pll, &asic_pll);
+
+ /* Keep BYPASS and RESET signals asserted until configured */
+ status = ufx_reg_write(dev, 0x7000, 0x8000000F);
+ check_warn_return(status, "error writing 0x7000");
+
+ value = (asic_pll.div_f1 | (asic_pll.div_r1 << 8) |
+ (asic_pll.div_q1 << 16) | (asic_pll.range1 << 20));
+ status = ufx_reg_write(dev, 0x7008, value);
+ check_warn_return(status, "error writing 0x7008");
+
+ value = (asic_pll.div_f0 | (asic_pll.div_r0 << 8) |
+ (asic_pll.div_q0 << 16) | (asic_pll.range0 << 20));
+ status = ufx_reg_write(dev, 0x7004, value);
+ check_warn_return(status, "error writing 0x7004");
+
+ status = ufx_reg_clear_bits(dev, 0x7000, 0x00000005);
+ check_warn_return(status,
+ "error clearing PLL0 bypass bits in 0x7000");
+ msleep(1);
+
+ status = ufx_reg_clear_bits(dev, 0x7000, 0x0000000A);
+ check_warn_return(status,
+ "error clearing PLL1 bypass bits in 0x7000");
+ msleep(1);
+
+ status = ufx_reg_clear_bits(dev, 0x7000, 0x80000000);
+ check_warn_return(status, "error clearing gate bits in 0x7000");
+
+ return 0;
+}
+
+static int ufx_set_vid_mode(struct ufx_data *dev, struct fb_var_screeninfo *var)
+{
+ u32 temp;
+ u16 h_total, h_active, h_blank_start, h_blank_end, h_sync_start, h_sync_end;
+ u16 v_total, v_active, v_blank_start, v_blank_end, v_sync_start, v_sync_end;
+
+ int status = ufx_reg_write(dev, 0x8028, 0);
+ check_warn_return(status, "ufx_set_vid_mode error disabling RGB pad");
+
+ status = ufx_reg_write(dev, 0x8024, 0);
+ check_warn_return(status, "ufx_set_vid_mode error disabling VDAC");
+
+ /* shut everything down before changing timing */
+ status = ufx_blank(dev, true);
+ check_warn_return(status, "ufx_set_vid_mode error blanking display");
+
+ status = ufx_disable(dev, true);
+ check_warn_return(status, "ufx_set_vid_mode error disabling display");
+
+ status = ufx_config_pix_clk(dev, var->pixclock);
+ check_warn_return(status, "ufx_set_vid_mode error configuring pixclock");
+
+ status = ufx_reg_write(dev, 0x2000, 0x00000104);
+ check_warn_return(status, "ufx_set_vid_mode error writing 0x2000");
+
+ /* set horizontal timings */
+ h_total = var->xres + var->right_margin + var->hsync_len + var->left_margin;
+ h_active = var->xres;
+ h_blank_start = var->xres + var->right_margin;
+ h_blank_end = var->xres + var->right_margin + var->hsync_len;
+ h_sync_start = var->xres + var->right_margin;
+ h_sync_end = var->xres + var->right_margin + var->hsync_len;
+
+ temp = ((h_total - 1) << 16) | (h_active - 1);
+ status = ufx_reg_write(dev, 0x2008, temp);
+ check_warn_return(status, "ufx_set_vid_mode error writing 0x2008");
+
+ temp = ((h_blank_start - 1) << 16) | (h_blank_end - 1);
+ status = ufx_reg_write(dev, 0x200C, temp);
+ check_warn_return(status, "ufx_set_vid_mode error writing 0x200C");
+
+ temp = ((h_sync_start - 1) << 16) | (h_sync_end - 1);
+ status = ufx_reg_write(dev, 0x2010, temp);
+ check_warn_return(status, "ufx_set_vid_mode error writing 0x2010");
+
+ /* set vertical timings */
+ v_total = var->upper_margin + var->yres + var->lower_margin + var->vsync_len;
+ v_active = var->yres;
+ v_blank_start = var->yres + var->lower_margin;
+ v_blank_end = var->yres + var->lower_margin + var->vsync_len;
+ v_sync_start = var->yres + var->lower_margin;
+ v_sync_end = var->yres + var->lower_margin + var->vsync_len;
+
+ temp = ((v_total - 1) << 16) | (v_active - 1);
+ status = ufx_reg_write(dev, 0x2014, temp);
+ check_warn_return(status, "ufx_set_vid_mode error writing 0x2014");
+
+ temp = ((v_blank_start - 1) << 16) | (v_blank_end - 1);
+ status = ufx_reg_write(dev, 0x2018, temp);
+ check_warn_return(status, "ufx_set_vid_mode error writing 0x2018");
+
+ temp = ((v_sync_start - 1) << 16) | (v_sync_end - 1);
+ status = ufx_reg_write(dev, 0x201C, temp);
+ check_warn_return(status, "ufx_set_vid_mode error writing 0x201C");
+
+ status = ufx_reg_write(dev, 0x2020, 0x00000000);
+ check_warn_return(status, "ufx_set_vid_mode error writing 0x2020");
+
+ status = ufx_reg_write(dev, 0x2024, 0x00000000);
+ check_warn_return(status, "ufx_set_vid_mode error writing 0x2024");
+
+ /* Set the frame length register (#pix * 2 bytes/pixel) */
+ temp = var->xres * var->yres * 2;
+ temp = (temp + 7) & (~0x7);
+ status = ufx_reg_write(dev, 0x2028, temp);
+ check_warn_return(status, "ufx_set_vid_mode error writing 0x2028");
+
+ /* enable desired output interface & disable others */
+ status = ufx_reg_write(dev, 0x2040, 0);
+ check_warn_return(status, "ufx_set_vid_mode error writing 0x2040");
+
+ status = ufx_reg_write(dev, 0x2044, 0);
+ check_warn_return(status, "ufx_set_vid_mode error writing 0x2044");
+
+ status = ufx_reg_write(dev, 0x2048, 0);
+ check_warn_return(status, "ufx_set_vid_mode error writing 0x2048");
+
+ /* set the sync polarities & enable bit */
+ temp = 0x00000001;
+ if (var->sync & FB_SYNC_HOR_HIGH_ACT)
+ temp |= 0x00000010;
+
+ if (var->sync & FB_SYNC_VERT_HIGH_ACT)
+ temp |= 0x00000008;
+
+ status = ufx_reg_write(dev, 0x2040, temp);
+ check_warn_return(status, "ufx_set_vid_mode error writing 0x2040");
+
+ /* start everything back up */
+ status = ufx_enable(dev, true);
+ check_warn_return(status, "ufx_set_vid_mode error enabling display");
+
+ /* Unblank the display */
+ status = ufx_unblank(dev, true);
+ check_warn_return(status, "ufx_set_vid_mode error unblanking display");
+
+ /* enable RGB pad */
+ status = ufx_reg_write(dev, 0x8028, 0x00000003);
+ check_warn_return(status, "ufx_set_vid_mode error enabling RGB pad");
+
+ /* enable VDAC */
+ status = ufx_reg_write(dev, 0x8024, 0x00000007);
+ check_warn_return(status, "ufx_set_vid_mode error enabling VDAC");
+
+ return 0;
+}
+
+static int ufx_ops_mmap(struct fb_info *info, struct vm_area_struct *vma)
+{
+ unsigned long start = vma->vm_start;
+ unsigned long size = vma->vm_end - vma->vm_start;
+ unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
+ unsigned long page, pos;
+
+ if (offset + size > info->fix.smem_len)
+ return -EINVAL;
+
+ pos = (unsigned long)info->fix.smem_start + offset;
+
+ pr_debug("mmap() framebuffer addr:%lu size:%lu\n",
+ pos, size);
+
+ while (size > 0) {
+ page = vmalloc_to_pfn((void *)pos);
+ if (remap_pfn_range(vma, start, page, PAGE_SIZE, PAGE_SHARED))
+ return -EAGAIN;
+
+ start += PAGE_SIZE;
+ pos += PAGE_SIZE;
+ if (size > PAGE_SIZE)
+ size -= PAGE_SIZE;
+ else
+ size = 0;
+ }
+
+ vma->vm_flags |= VM_RESERVED; /* avoid to swap out this VMA */
+ return 0;
+}
+
+static void ufx_raw_rect(struct ufx_data *dev, u16 *cmd, int x, int y,
+ int width, int height)
+{
+ size_t packed_line_len = ALIGN((width * 2), 4);
+ size_t packed_rect_len = packed_line_len * height;
+ int line;
+
+ BUG_ON(!dev);
+ BUG_ON(!dev->info);
+
+ /* command word */
+ *((u32 *)&cmd[0]) = cpu_to_le32(0x01);
+
+ /* length word */
+ *((u32 *)&cmd[2]) = cpu_to_le32(packed_rect_len + 16);
+
+ cmd[4] = cpu_to_le16(x);
+ cmd[5] = cpu_to_le16(y);
+ cmd[6] = cpu_to_le16(width);
+ cmd[7] = cpu_to_le16(height);
+
+ /* frame base address */
+ *((u32 *)&cmd[8]) = cpu_to_le32(0);
+
+ /* color mode and horizontal resolution */
+ cmd[10] = cpu_to_le16(0x4000 | dev->info->var.xres);
+
+ /* vertical resolution */
+ cmd[11] = cpu_to_le16(dev->info->var.yres);
+
+ /* packed data */
+ for (line = 0; line < height; line++) {
+ const int line_offset = dev->info->fix.line_length * (y + line);
+ const int byte_offset = line_offset + (x * BPP);
+ memcpy(&cmd[(24 + (packed_line_len * line)) / 2],
+ (char *)dev->info->fix.smem_start + byte_offset, width * BPP);
+ }
+}
+
+int ufx_handle_damage(struct ufx_data *dev, int x, int y,
+ int width, int height)
+{
+ size_t packed_line_len = ALIGN((width * 2), 4);
+ int len, status, urb_lines, start_line = 0;
+
+ if ((width <= 0) || (height <= 0) ||
+ (x + width > dev->info->var.xres) ||
+ (y + height > dev->info->var.yres))
+ return -EINVAL;
+
+ if (!atomic_read(&dev->usb_active))
+ return 0;
+
+ while (start_line < height) {
+ struct urb *urb = ufx_get_urb(dev);
+ if (!urb) {
+ pr_warn("ufx_handle_damage unable to get urb");
+ return 0;
+ }
+
+ /* assume we have enough space to transfer at least one line */
+ BUG_ON(urb->transfer_buffer_length < (24 + (width * 2)));
+
+ /* calculate the maximum number of lines we could fit in */
+ urb_lines = (urb->transfer_buffer_length - 24) / packed_line_len;
+
+ /* but we might not need this many */
+ urb_lines = min(urb_lines, (height - start_line));
+
+ memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
+
+ ufx_raw_rect(dev, urb->transfer_buffer, x, (y + start_line), width, urb_lines);
+ len = 24 + (packed_line_len * urb_lines);
+
+ status = ufx_submit_urb(dev, urb, len);
+ check_warn_return(status, "Error submitting URB");
+
+ start_line += urb_lines;
+ }
+
+ return 0;
+}
+
+/* Path triggered by usermode clients who write to filesystem
+ * e.g. cat filename > /dev/fb1
+ * Not used by X Windows or text-mode console. But useful for testing.
+ * Slow because of extra copy and we must assume all pixels dirty. */
+static ssize_t ufx_ops_write(struct fb_info *info, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ ssize_t result;
+ struct ufx_data *dev = info->par;
+ u32 offset = (u32) *ppos;
+
+ result = fb_sys_write(info, buf, count, ppos);
+
+ if (result > 0) {
+ int start = max((int)(offset / info->fix.line_length) - 1, 0);
+ int lines = min((u32)((result / info->fix.line_length) + 1),
+ (u32)info->var.yres);
+
+ ufx_handle_damage(dev, 0, start, info->var.xres, lines);
+ }
+
+ return result;
+}
+
+static void ufx_ops_copyarea(struct fb_info *info,
+ const struct fb_copyarea *area)
+{
+
+ struct ufx_data *dev = info->par;
+
+ sys_copyarea(info, area);
+
+ ufx_handle_damage(dev, area->dx, area->dy,
+ area->width, area->height);
+}
+
+static void ufx_ops_imageblit(struct fb_info *info,
+ const struct fb_image *image)
+{
+ struct ufx_data *dev = info->par;
+
+ sys_imageblit(info, image);
+
+ ufx_handle_damage(dev, image->dx, image->dy,
+ image->width, image->height);
+}
+
+static void ufx_ops_fillrect(struct fb_info *info,
+ const struct fb_fillrect *rect)
+{
+ struct ufx_data *dev = info->par;
+
+ sys_fillrect(info, rect);
+
+ ufx_handle_damage(dev, rect->dx, rect->dy, rect->width,
+ rect->height);
+}
+
+/* NOTE: fb_defio.c is holding info->fbdefio.mutex
+ * Touching ANY framebuffer memory that triggers a page fault
+ * in fb_defio will cause a deadlock, when it also tries to
+ * grab the same mutex. */
+static void ufx_dpy_deferred_io(struct fb_info *info,
+ struct list_head *pagelist)
+{
+ struct page *cur;
+ struct fb_deferred_io *fbdefio = info->fbdefio;
+ struct ufx_data *dev = info->par;
+
+ if (!fb_defio)
+ return;
+
+ if (!atomic_read(&dev->usb_active))
+ return;
+
+ /* walk the written page list and render each to device */
+ list_for_each_entry(cur, &fbdefio->pagelist, lru) {
+ /* create a rectangle of full screen width that encloses the
+ * entire dirty framebuffer page */
+ const int x = 0;
+ const int width = dev->info->var.xres;
+ const int y = (cur->index << PAGE_SHIFT) / (width * 2);
+ int height = (PAGE_SIZE / (width * 2)) + 1;
+ height = min(height, (int)(dev->info->var.yres - y));
+
+ BUG_ON(y >= dev->info->var.yres);
+ BUG_ON((y + height) > dev->info->var.yres);
+
+ ufx_handle_damage(dev, x, y, width, height);
+ }
+}
+
+static int ufx_ops_ioctl(struct fb_info *info, unsigned int cmd,
+ unsigned long arg)
+{
+ struct ufx_data *dev = info->par;
+ struct dloarea *area = NULL;
+
+ if (!atomic_read(&dev->usb_active))
+ return 0;
+
+ /* TODO: Update X server to get this from sysfs instead */
+ if (cmd == UFX_IOCTL_RETURN_EDID) {
+ u8 __user *edid = (u8 __user *)arg;
+ if (copy_to_user(edid, dev->edid, dev->edid_size))
+ return -EFAULT;
+ return 0;
+ }
+
+ /* TODO: Help propose a standard fb.h ioctl to report mmap damage */
+ if (cmd == UFX_IOCTL_REPORT_DAMAGE) {
+ /* If we have a damage-aware client, turn fb_defio "off"
+ * To avoid perf imact of unecessary page fault handling.
+ * Done by resetting the delay for this fb_info to a very
+ * long period. Pages will become writable and stay that way.
+ * Reset to normal value when all clients have closed this fb.
+ */
+ if (info->fbdefio)
+ info->fbdefio->delay = UFX_DEFIO_WRITE_DISABLE;
+
+ area = (struct dloarea *)arg;
+
+ if (area->x < 0)
+ area->x = 0;
+
+ if (area->x > info->var.xres)
+ area->x = info->var.xres;
+
+ if (area->y < 0)
+ area->y = 0;
+
+ if (area->y > info->var.yres)
+ area->y = info->var.yres;
+
+ ufx_handle_damage(dev, area->x, area->y, area->w, area->h);
+ }
+
+ return 0;
+}
+
+/* taken from vesafb */
+static int
+ufx_ops_setcolreg(unsigned regno, unsigned red, unsigned green,
+ unsigned blue, unsigned transp, struct fb_info *info)
+{
+ int err = 0;
+
+ if (regno >= info->cmap.len)
+ return 1;
+
+ if (regno < 16) {
+ if (info->var.red.offset == 10) {
+ /* 1:5:5:5 */
+ ((u32 *) (info->pseudo_palette))[regno] =
+ ((red & 0xf800) >> 1) |
+ ((green & 0xf800) >> 6) | ((blue & 0xf800) >> 11);
+ } else {
+ /* 0:5:6:5 */
+ ((u32 *) (info->pseudo_palette))[regno] =
+ ((red & 0xf800)) |
+ ((green & 0xfc00) >> 5) | ((blue & 0xf800) >> 11);
+ }
+ }
+
+ return err;
+}
+
+/* It's common for several clients to have framebuffer open simultaneously.
+ * e.g. both fbcon and X. Makes things interesting.
+ * Assumes caller is holding info->lock (for open and release at least) */
+static int ufx_ops_open(struct fb_info *info, int user)
+{
+ struct ufx_data *dev = info->par;
+
+ /* fbcon aggressively connects to first framebuffer it finds,
+ * preventing other clients (X) from working properly. Usually
+ * not what the user wants. Fail by default with option to enable. */
+ if (user == 0 && !console)
+ return -EBUSY;
+
+ /* If the USB device is gone, we don't accept new opens */
+ if (dev->virtualized)
+ return -ENODEV;
+
+ dev->fb_count++;
+
+ kref_get(&dev->kref);
+
+ if (fb_defio && (info->fbdefio == NULL)) {
+ /* enable defio at last moment if not disabled by client */
+
+ struct fb_deferred_io *fbdefio;
+
+ fbdefio = kmalloc(sizeof(struct fb_deferred_io), GFP_KERNEL);
+
+ if (fbdefio) {
+ fbdefio->delay = UFX_DEFIO_WRITE_DELAY;
+ fbdefio->deferred_io = ufx_dpy_deferred_io;
+ }
+
+ info->fbdefio = fbdefio;
+ fb_deferred_io_init(info);
+ }
+
+ pr_debug("open /dev/fb%d user=%d fb_info=%p count=%d",
+ info->node, user, info, dev->fb_count);
+
+ return 0;
+}
+
+/*
+ * Called when all client interfaces to start transactions have been disabled,
+ * and all references to our device instance (ufx_data) are released.
+ * Every transaction must have a reference, so we know are fully spun down
+ */
+static void ufx_free(struct kref *kref)
+{
+ struct ufx_data *dev = container_of(kref, struct ufx_data, kref);
+
+ /* this function will wait for all in-flight urbs to complete */
+ if (dev->urbs.count > 0)
+ ufx_free_urb_list(dev);
+
+ pr_debug("freeing ufx_data %p", dev);
+
+ kfree(dev);
+}
+
+static void ufx_release_urb_work(struct work_struct *work)
+{
+ struct urb_node *unode = container_of(work, struct urb_node,
+ release_urb_work.work);
+
+ up(&unode->dev->urbs.limit_sem);
+}
+
+static void ufx_free_framebuffer_work(struct work_struct *work)
+{
+ struct ufx_data *dev = container_of(work, struct ufx_data,
+ free_framebuffer_work.work);
+ struct fb_info *info = dev->info;
+ int node = info->node;
+
+ unregister_framebuffer(info);
+
+ if (info->cmap.len != 0)
+ fb_dealloc_cmap(&info->cmap);
+ if (info->monspecs.modedb)
+ fb_destroy_modedb(info->monspecs.modedb);
+ if (info->screen_base)
+ vfree(info->screen_base);
+
+ fb_destroy_modelist(&info->modelist);
+
+ dev->info = 0;
+
+ /* Assume info structure is freed after this point */
+ framebuffer_release(info);
+
+ pr_debug("fb_info for /dev/fb%d has been freed", node);
+
+ /* ref taken in probe() as part of registering framebfufer */
+ kref_put(&dev->kref, ufx_free);
+}
+
+/*
+ * Assumes caller is holding info->lock mutex (for open and release at least)
+ */
+static int ufx_ops_release(struct fb_info *info, int user)
+{
+ struct ufx_data *dev = info->par;
+
+ dev->fb_count--;
+
+ /* We can't free fb_info here - fbmem will touch it when we return */
+ if (dev->virtualized && (dev->fb_count == 0))
+ schedule_delayed_work(&dev->free_framebuffer_work, HZ);
+
+ if ((dev->fb_count == 0) && (info->fbdefio)) {
+ fb_deferred_io_cleanup(info);
+ kfree(info->fbdefio);
+ info->fbdefio = NULL;
+ info->fbops->fb_mmap = ufx_ops_mmap;
+ }
+
+ pr_debug("released /dev/fb%d user=%d count=%d",
+ info->node, user, dev->fb_count);
+
+ kref_put(&dev->kref, ufx_free);
+
+ return 0;
+}
+
+/* Check whether a video mode is supported by the chip
+ * We start from monitor's modes, so don't need to filter that here */
+static int ufx_is_valid_mode(struct fb_videomode *mode,
+ struct fb_info *info)
+{
+ if ((mode->xres * mode->yres) > (2048 * 1152)) {
+ pr_debug("%dx%d too many pixels",
+ mode->xres, mode->yres);
+ return 0;
+ }
+
+ if (mode->pixclock < 5000) {
+ pr_debug("%dx%d %dps pixel clock too fast",
+ mode->xres, mode->yres, mode->pixclock);
+ return 0;
+ }
+
+ pr_debug("%dx%d (pixclk %dps %dMHz) valid mode", mode->xres, mode->yres,
+ mode->pixclock, (1000000 / mode->pixclock));
+ return 1;
+}
+
+static void ufx_var_color_format(struct fb_var_screeninfo *var)
+{
+ const struct fb_bitfield red = { 11, 5, 0 };
+ const struct fb_bitfield green = { 5, 6, 0 };
+ const struct fb_bitfield blue = { 0, 5, 0 };
+
+ var->bits_per_pixel = 16;
+ var->red = red;
+ var->green = green;
+ var->blue = blue;
+}
+
+static int ufx_ops_check_var(struct fb_var_screeninfo *var,
+ struct fb_info *info)
+{
+ struct fb_videomode mode;
+
+ /* TODO: support dynamically changing framebuffer size */
+ if ((var->xres * var->yres * 2) > info->fix.smem_len)
+ return -EINVAL;
+
+ /* set device-specific elements of var unrelated to mode */
+ ufx_var_color_format(var);
+
+ fb_var_to_videomode(&mode, var);
+
+ if (!ufx_is_valid_mode(&mode, info))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ufx_ops_set_par(struct fb_info *info)
+{
+ struct ufx_data *dev = info->par;
+ int result;
+ u16 *pix_framebuffer;
+ int i;
+
+ pr_debug("set_par mode %dx%d", info->var.xres, info->var.yres);
+ result = ufx_set_vid_mode(dev, &info->var);
+
+ if ((result == 0) && (dev->fb_count == 0)) {
+ /* paint greenscreen */
+ pix_framebuffer = (u16 *) info->screen_base;
+ for (i = 0; i < info->fix.smem_len / 2; i++)
+ pix_framebuffer[i] = 0x37e6;
+
+ ufx_handle_damage(dev, 0, 0, info->var.xres, info->var.yres);
+ }
+
+ /* re-enable defio if previously disabled by damage tracking */
+ if (info->fbdefio)
+ info->fbdefio->delay = UFX_DEFIO_WRITE_DELAY;
+
+ return result;
+}
+
+/* In order to come back from full DPMS off, we need to set the mode again */
+static int ufx_ops_blank(int blank_mode, struct fb_info *info)
+{
+ struct ufx_data *dev = info->par;
+ ufx_set_vid_mode(dev, &info->var);
+ return 0;
+}
+
+static struct fb_ops ufx_ops = {
+ .owner = THIS_MODULE,
+ .fb_read = fb_sys_read,
+ .fb_write = ufx_ops_write,
+ .fb_setcolreg = ufx_ops_setcolreg,
+ .fb_fillrect = ufx_ops_fillrect,
+ .fb_copyarea = ufx_ops_copyarea,
+ .fb_imageblit = ufx_ops_imageblit,
+ .fb_mmap = ufx_ops_mmap,
+ .fb_ioctl = ufx_ops_ioctl,
+ .fb_open = ufx_ops_open,
+ .fb_release = ufx_ops_release,
+ .fb_blank = ufx_ops_blank,
+ .fb_check_var = ufx_ops_check_var,
+ .fb_set_par = ufx_ops_set_par,
+};
+
+/* Assumes &info->lock held by caller
+ * Assumes no active clients have framebuffer open */
+static int ufx_realloc_framebuffer(struct ufx_data *dev, struct fb_info *info)
+{
+ int retval = -ENOMEM;
+ int old_len = info->fix.smem_len;
+ int new_len;
+ unsigned char *old_fb = info->screen_base;
+ unsigned char *new_fb;
+
+ pr_debug("Reallocating framebuffer. Addresses will change!");
+
+ new_len = info->fix.line_length * info->var.yres;
+
+ if (PAGE_ALIGN(new_len) > old_len) {
+ /*
+ * Alloc system memory for virtual framebuffer
+ */
+ new_fb = vmalloc(new_len);
+ if (!new_fb) {
+ pr_err("Virtual framebuffer alloc failed");
+ goto error;
+ }
+
+ if (info->screen_base) {
+ memcpy(new_fb, old_fb, old_len);
+ vfree(info->screen_base);
+ }
+
+ info->screen_base = new_fb;
+ info->fix.smem_len = PAGE_ALIGN(new_len);
+ info->fix.smem_start = (unsigned long) new_fb;
+ info->flags = smscufx_info_flags;
+ }
+
+ retval = 0;
+
+error:
+ return retval;
+}
+
+/* sets up I2C Controller for 100 Kbps, std. speed, 7-bit addr, master,
+ * restart enabled, but no start byte, enable controller */
+static int ufx_i2c_init(struct ufx_data *dev)
+{
+ u32 tmp;
+
+ /* disable the controller before it can be reprogrammed */
+ int status = ufx_reg_write(dev, 0x106C, 0x00);
+ check_warn_return(status, "failed to disable I2C");
+
+ /* Setup the clock count registers
+ * (12+1) = 13 clks @ 2.5 MHz = 5.2 uS */
+ status = ufx_reg_write(dev, 0x1018, 12);
+ check_warn_return(status, "error writing 0x1018");
+
+ /* (6+8) = 14 clks @ 2.5 MHz = 5.6 uS */
+ status = ufx_reg_write(dev, 0x1014, 6);
+ check_warn_return(status, "error writing 0x1014");
+
+ status = ufx_reg_read(dev, 0x1000, &tmp);
+ check_warn_return(status, "error reading 0x1000");
+
+ /* set speed to std mode */
+ tmp &= ~(0x06);
+ tmp |= 0x02;
+
+ /* 7-bit (not 10-bit) addressing */
+ tmp &= ~(0x10);
+
+ /* enable restart conditions and master mode */
+ tmp |= 0x21;
+
+ status = ufx_reg_write(dev, 0x1000, tmp);
+ check_warn_return(status, "error writing 0x1000");
+
+ /* Set normal tx using target address 0 */
+ status = ufx_reg_clear_and_set_bits(dev, 0x1004, 0xC00, 0x000);
+ check_warn_return(status, "error setting TX mode bits in 0x1004");
+
+ /* Enable the controller */
+ status = ufx_reg_write(dev, 0x106C, 0x01);
+ check_warn_return(status, "failed to enable I2C");
+
+ return 0;
+}
+
+/* sets the I2C port mux and target address */
+static int ufx_i2c_configure(struct ufx_data *dev)
+{
+ int status = ufx_reg_write(dev, 0x106C, 0x00);
+ check_warn_return(status, "failed to disable I2C");
+
+ status = ufx_reg_write(dev, 0x3010, 0x00000000);
+ check_warn_return(status, "failed to write 0x3010");
+
+ /* A0h is std for any EDID, right shifted by one */
+ status = ufx_reg_clear_and_set_bits(dev, 0x1004, 0x3FF, (0xA0 >> 1));
+ check_warn_return(status, "failed to set TAR bits in 0x1004");
+
+ status = ufx_reg_write(dev, 0x106C, 0x01);
+ check_warn_return(status, "failed to enable I2C");
+
+ return 0;
+}
+
+/* wait for BUSY to clear, with a timeout of 50ms with 10ms sleeps. if no
+ * monitor is connected, there is no error except for timeout */
+static int ufx_i2c_wait_busy(struct ufx_data *dev)
+{
+ u32 tmp;
+ int i, status;
+
+ for (i = 0; i < 15; i++) {
+ status = ufx_reg_read(dev, 0x1100, &tmp);
+ check_warn_return(status, "0x1100 read failed");
+
+ /* if BUSY is clear, check for error */
+ if ((tmp & 0x80000000) == 0) {
+ if (tmp & 0x20000000) {
+ pr_warn("I2C read failed, 0x1100=0x%08x", tmp);
+ return -EIO;
+ }
+
+ return 0;
+ }
+
+ /* perform the first 10 retries without delay */
+ if (i >= 10)
+ msleep(10);
+ }
+
+ pr_warn("I2C access timed out, resetting I2C hardware");
+ status = ufx_reg_write(dev, 0x1100, 0x40000000);
+ check_warn_return(status, "0x1100 write failed");
+
+ return -ETIMEDOUT;
+}
+
+/* reads a 128-byte EDID block from the currently selected port and TAR */
+static int ufx_read_edid(struct ufx_data *dev, u8 *edid, int edid_len)
+{
+ int i, j, status;
+ u32 *edid_u32 = (u32 *)edid;
+
+ BUG_ON(edid_len != EDID_LENGTH);
+
+ status = ufx_i2c_configure(dev);
+ if (status < 0) {
+ pr_err("ufx_i2c_configure failed");
+ return status;
+ }
+
+ memset(edid, 0xff, EDID_LENGTH);
+
+ /* Read the 128-byte EDID as 2 bursts of 64 bytes */
+ for (i = 0; i < 2; i++) {
+ u32 temp = 0x28070000 | (63 << 20) | (((u32)(i * 64)) << 8);
+ status = ufx_reg_write(dev, 0x1100, temp);
+ check_warn_return(status, "Failed to write 0x1100");
+
+ temp |= 0x80000000;
+ status = ufx_reg_write(dev, 0x1100, temp);
+ check_warn_return(status, "Failed to write 0x1100");
+
+ status = ufx_i2c_wait_busy(dev);
+ check_warn_return(status, "Timeout waiting for I2C BUSY to clear");
+
+ for (j = 0; j < 16; j++) {
+ u32 data_reg_addr = 0x1110 + (j * 4);
+ status = ufx_reg_read(dev, data_reg_addr, edid_u32++);
+ check_warn_return(status, "Error reading i2c data");
+ }
+ }
+
+ /* all FF's in the first 16 bytes indicates nothing is connected */
+ for (i = 0; i < 16; i++) {
+ if (edid[i] != 0xFF) {
+ pr_debug("edid data read succesfully");
+ return EDID_LENGTH;
+ }
+ }
+
+ pr_warn("edid data contains all 0xff");
+ return -ETIMEDOUT;
+}
+
+/* 1) use sw default
+ * 2) Parse into various fb_info structs
+ * 3) Allocate virtual framebuffer memory to back highest res mode
+ *
+ * Parses EDID into three places used by various parts of fbdev:
+ * fb_var_screeninfo contains the timing of the monitor's preferred mode
+ * fb_info.monspecs is full parsed EDID info, including monspecs.modedb
+ * fb_info.modelist is a linked list of all monitor & VESA modes which work
+ *
+ * If EDID is not readable/valid, then modelist is all VESA modes,
+ * monspecs is NULL, and fb_var_screeninfo is set to safe VESA mode
+ * Returns 0 if successful */
+static int ufx_setup_modes(struct ufx_data *dev, struct fb_info *info,
+ char *default_edid, size_t default_edid_size)
+{
+ const struct fb_videomode *default_vmode = NULL;
+ u8 *edid;
+ int i, result = 0, tries = 3;
+
+ if (info->dev) /* only use mutex if info has been registered */
+ mutex_lock(&info->lock);
+
+ edid = kmalloc(EDID_LENGTH, GFP_KERNEL);
+ if (!edid) {
+ result = -ENOMEM;
+ goto error;
+ }
+
+ fb_destroy_modelist(&info->modelist);
+ memset(&info->monspecs, 0, sizeof(info->monspecs));
+
+ /* Try to (re)read EDID from hardware first
+ * EDID data may return, but not parse as valid
+ * Try again a few times, in case of e.g. analog cable noise */
+ while (tries--) {
+ i = ufx_read_edid(dev, edid, EDID_LENGTH);
+
+ if (i >= EDID_LENGTH)
+ fb_edid_to_monspecs(edid, &info->monspecs);
+
+ if (info->monspecs.modedb_len > 0) {
+ dev->edid = edid;
+ dev->edid_size = i;
+ break;
+ }
+ }
+
+ /* If that fails, use a previously returned EDID if available */
+ if (info->monspecs.modedb_len == 0) {
+ pr_err("Unable to get valid EDID from device/display\n");
+
+ if (dev->edid) {
+ fb_edid_to_monspecs(dev->edid, &info->monspecs);
+ if (info->monspecs.modedb_len > 0)
+ pr_err("Using previously queried EDID\n");
+ }
+ }
+
+ /* If that fails, use the default EDID we were handed */
+ if (info->monspecs.modedb_len == 0) {
+ if (default_edid_size >= EDID_LENGTH) {
+ fb_edid_to_monspecs(default_edid, &info->monspecs);
+ if (info->monspecs.modedb_len > 0) {
+ memcpy(edid, default_edid, default_edid_size);
+ dev->edid = edid;
+ dev->edid_size = default_edid_size;
+ pr_err("Using default/backup EDID\n");
+ }
+ }
+ }
+
+ /* If we've got modes, let's pick a best default mode */
+ if (info->monspecs.modedb_len > 0) {
+
+ for (i = 0; i < info->monspecs.modedb_len; i++) {
+ if (ufx_is_valid_mode(&info->monspecs.modedb[i], info))
+ fb_add_videomode(&info->monspecs.modedb[i],
+ &info->modelist);
+ else /* if we've removed top/best mode */
+ info->monspecs.misc &= ~FB_MISC_1ST_DETAIL;
+ }
+
+ default_vmode = fb_find_best_display(&info->monspecs,
+ &info->modelist);
+ }
+
+ /* If everything else has failed, fall back to safe default mode */
+ if (default_vmode == NULL) {
+
+ struct fb_videomode fb_vmode = {0};
+
+ /* Add the standard VESA modes to our modelist
+ * Since we don't have EDID, there may be modes that
+ * overspec monitor and/or are incorrect aspect ratio, etc.
+ * But at least the user has a chance to choose
+ */
+ for (i = 0; i < VESA_MODEDB_SIZE; i++) {
+ if (ufx_is_valid_mode((struct fb_videomode *)
+ &vesa_modes[i], info))
+ fb_add_videomode(&vesa_modes[i],
+ &info->modelist);
+ }
+
+ /* default to resolution safe for projectors
+ * (since they are most common case without EDID)
+ */
+ fb_vmode.xres = 800;
+ fb_vmode.yres = 600;
+ fb_vmode.refresh = 60;
+ default_vmode = fb_find_nearest_mode(&fb_vmode,
+ &info->modelist);
+ }
+
+ /* If we have good mode and no active clients */
+ if ((default_vmode != NULL) && (dev->fb_count == 0)) {
+
+ fb_videomode_to_var(&info->var, default_vmode);
+ ufx_var_color_format(&info->var);
+
+ /* with mode size info, we can now alloc our framebuffer */
+ memcpy(&info->fix, &ufx_fix, sizeof(ufx_fix));
+ info->fix.line_length = info->var.xres *
+ (info->var.bits_per_pixel / 8);
+
+ result = ufx_realloc_framebuffer(dev, info);
+
+ } else
+ result = -EINVAL;
+
+error:
+ if (edid && (dev->edid != edid))
+ kfree(edid);
+
+ if (info->dev)
+ mutex_unlock(&info->lock);
+
+ return result;
+}
+
+static int ufx_usb_probe(struct usb_interface *interface,
+ const struct usb_device_id *id)
+{
+ struct usb_device *usbdev;
+ struct ufx_data *dev;
+ struct fb_info *info = 0;
+ int retval = -ENOMEM;
+ u32 id_rev, fpga_rev;
+
+ /* usb initialization */
+ usbdev = interface_to_usbdev(interface);
+ BUG_ON(!usbdev);
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (dev == NULL) {
+ dev_err(&usbdev->dev, "ufx_usb_probe: failed alloc of dev struct\n");
+ goto error;
+ }
+
+ /* we need to wait for both usb and fbdev to spin down on disconnect */
+ kref_init(&dev->kref); /* matching kref_put in usb .disconnect fn */
+ kref_get(&dev->kref); /* matching kref_put in free_framebuffer_work */
+
+ dev->udev = usbdev;
+ dev->gdev = &usbdev->dev; /* our generic struct device * */
+ usb_set_intfdata(interface, dev);
+
+ dev_dbg(dev->gdev, "%s %s - serial #%s\n",
+ usbdev->manufacturer, usbdev->product, usbdev->serial);
+ dev_dbg(dev->gdev, "vid_%04x&pid_%04x&rev_%04x driver's ufx_data struct at %p\n",
+ usbdev->descriptor.idVendor, usbdev->descriptor.idProduct,
+ usbdev->descriptor.bcdDevice, dev);
+ dev_dbg(dev->gdev, "console enable=%d\n", console);
+ dev_dbg(dev->gdev, "fb_defio enable=%d\n", fb_defio);
+
+ if (!ufx_alloc_urb_list(dev, WRITES_IN_FLIGHT, MAX_TRANSFER)) {
+ retval = -ENOMEM;
+ dev_err(dev->gdev, "ufx_alloc_urb_list failed\n");
+ goto error;
+ }
+
+ /* We don't register a new USB class. Our client interface is fbdev */
+
+ /* allocates framebuffer driver structure, not framebuffer memory */
+ info = framebuffer_alloc(0, &usbdev->dev);
+ if (!info) {
+ retval = -ENOMEM;
+ dev_err(dev->gdev, "framebuffer_alloc failed\n");
+ goto error;
+ }
+
+ dev->info = info;
+ info->par = dev;
+ info->pseudo_palette = dev->pseudo_palette;
+ info->fbops = &ufx_ops;
+
+ retval = fb_alloc_cmap(&info->cmap, 256, 0);
+ if (retval < 0) {
+ dev_err(dev->gdev, "fb_alloc_cmap failed %x\n", retval);
+ goto error;
+ }
+
+ INIT_DELAYED_WORK(&dev->free_framebuffer_work,
+ ufx_free_framebuffer_work);
+
+ INIT_LIST_HEAD(&info->modelist);
+
+ retval = ufx_reg_read(dev, 0x3000, &id_rev);
+ check_warn_goto_error(retval, "error %d reading 0x3000 register from device", retval);
+ dev_dbg(dev->gdev, "ID_REV register value 0x%08x", id_rev);
+
+ retval = ufx_reg_read(dev, 0x3004, &fpga_rev);
+ check_warn_goto_error(retval, "error %d reading 0x3004 register from device", retval);
+ dev_dbg(dev->gdev, "FPGA_REV register value 0x%08x", fpga_rev);
+
+ dev_dbg(dev->gdev, "resetting device");
+ retval = ufx_lite_reset(dev);
+ check_warn_goto_error(retval, "error %d resetting device", retval);
+
+ dev_dbg(dev->gdev, "configuring system clock");
+ retval = ufx_config_sys_clk(dev);
+ check_warn_goto_error(retval, "error %d configuring system clock", retval);
+
+ dev_dbg(dev->gdev, "configuring DDR2 controller");
+ retval = ufx_config_ddr2(dev);
+ check_warn_goto_error(retval, "error %d initialising DDR2 controller", retval);
+
+ dev_dbg(dev->gdev, "configuring I2C controller");
+ retval = ufx_i2c_init(dev);
+ check_warn_goto_error(retval, "error %d initialising I2C controller", retval);
+
+ dev_dbg(dev->gdev, "selecting display mode");
+ retval = ufx_setup_modes(dev, info, NULL, 0);
+ check_warn_goto_error(retval, "unable to find common mode for display and adapter");
+
+ retval = ufx_reg_set_bits(dev, 0x4000, 0x00000001);
+ check_warn_goto_error(retval, "error %d enabling graphics engine", retval);
+
+ /* ready to begin using device */
+ atomic_set(&dev->usb_active, 1);
+
+ dev_dbg(dev->gdev, "checking var");
+ retval = ufx_ops_check_var(&info->var, info);
+ check_warn_goto_error(retval, "error %d ufx_ops_check_var", retval);
+
+ dev_dbg(dev->gdev, "setting par");
+ retval = ufx_ops_set_par(info);
+ check_warn_goto_error(retval, "error %d ufx_ops_set_par", retval);
+
+ dev_dbg(dev->gdev, "registering framebuffer");
+ retval = register_framebuffer(info);
+ check_warn_goto_error(retval, "error %d register_framebuffer", retval);
+
+ dev_info(dev->gdev, "SMSC UDX USB device /dev/fb%d attached. %dx%d resolution."
+ " Using %dK framebuffer memory\n", info->node,
+ info->var.xres, info->var.yres, info->fix.smem_len >> 10);
+
+ return 0;
+
+error:
+ if (dev) {
+ if (info) {
+ if (info->cmap.len != 0)
+ fb_dealloc_cmap(&info->cmap);
+ if (info->monspecs.modedb)
+ fb_destroy_modedb(info->monspecs.modedb);
+ if (info->screen_base)
+ vfree(info->screen_base);
+
+ fb_destroy_modelist(&info->modelist);
+
+ framebuffer_release(info);
+ }
+
+ kref_put(&dev->kref, ufx_free); /* ref for framebuffer */
+ kref_put(&dev->kref, ufx_free); /* last ref from kref_init */
+
+ /* dev has been deallocated. Do not dereference */
+ }
+
+ return retval;
+}
+
+static void ufx_usb_disconnect(struct usb_interface *interface)
+{
+ struct ufx_data *dev;
+ struct fb_info *info;
+
+ dev = usb_get_intfdata(interface);
+ info = dev->info;
+
+ pr_debug("USB disconnect starting\n");
+
+ /* we virtualize until all fb clients release. Then we free */
+ dev->virtualized = true;
+
+ /* When non-active we'll update virtual framebuffer, but no new urbs */
+ atomic_set(&dev->usb_active, 0);
+
+ usb_set_intfdata(interface, NULL);
+
+ /* if clients still have us open, will be freed on last close */
+ if (dev->fb_count == 0)
+ schedule_delayed_work(&dev->free_framebuffer_work, 0);
+
+ /* release reference taken by kref_init in probe() */
+ kref_put(&dev->kref, ufx_free);
+
+ /* consider ufx_data freed */
+}
+
+static struct usb_driver ufx_driver = {
+ .name = "smscufx",
+ .probe = ufx_usb_probe,
+ .disconnect = ufx_usb_disconnect,
+ .id_table = id_table,
+};
+
+static int __init ufx_module_init(void)
+{
+ int res;
+
+ res = usb_register(&ufx_driver);
+ if (res)
+ err("usb_register failed. Error number %d", res);
+
+ return res;
+}
+
+static void __exit ufx_module_exit(void)
+{
+ usb_deregister(&ufx_driver);
+}
+
+module_init(ufx_module_init);
+module_exit(ufx_module_exit);
+
+static void ufx_urb_completion(struct urb *urb)
+{
+ struct urb_node *unode = urb->context;
+ struct ufx_data *dev = unode->dev;
+ unsigned long flags;
+
+ /* sync/async unlink faults aren't errors */
+ if (urb->status) {
+ if (!(urb->status == -ENOENT ||
+ urb->status == -ECONNRESET ||
+ urb->status == -ESHUTDOWN)) {
+ pr_err("%s - nonzero write bulk status received: %d\n",
+ __func__, urb->status);
+ atomic_set(&dev->lost_pixels, 1);
+ }
+ }
+
+ urb->transfer_buffer_length = dev->urbs.size; /* reset to actual */
+
+ spin_lock_irqsave(&dev->urbs.lock, flags);
+ list_add_tail(&unode->entry, &dev->urbs.list);
+ dev->urbs.available++;
+ spin_unlock_irqrestore(&dev->urbs.lock, flags);
+
+ /* When using fb_defio, we deadlock if up() is called
+ * while another is waiting. So queue to another process */
+ if (fb_defio)
+ schedule_delayed_work(&unode->release_urb_work, 0);
+ else
+ up(&dev->urbs.limit_sem);
+}
+
+static void ufx_free_urb_list(struct ufx_data *dev)
+{
+ int count = dev->urbs.count;
+ struct list_head *node;
+ struct urb_node *unode;
+ struct urb *urb;
+ int ret;
+ unsigned long flags;
+
+ pr_debug("Waiting for completes and freeing all render urbs\n");
+
+ /* keep waiting and freeing, until we've got 'em all */
+ while (count--) {
+ /* Getting interrupted means a leak, but ok at shutdown*/
+ ret = down_interruptible(&dev->urbs.limit_sem);
+ if (ret)
+ break;
+
+ spin_lock_irqsave(&dev->urbs.lock, flags);
+
+ node = dev->urbs.list.next; /* have reserved one with sem */
+ list_del_init(node);
+
+ spin_unlock_irqrestore(&dev->urbs.lock, flags);
+
+ unode = list_entry(node, struct urb_node, entry);
+ urb = unode->urb;
+
+ /* Free each separately allocated piece */
+ usb_free_coherent(urb->dev, dev->urbs.size,
+ urb->transfer_buffer, urb->transfer_dma);
+ usb_free_urb(urb);
+ kfree(node);
+ }
+}
+
+static int ufx_alloc_urb_list(struct ufx_data *dev, int count, size_t size)
+{
+ int i = 0;
+ struct urb *urb;
+ struct urb_node *unode;
+ char *buf;
+
+ spin_lock_init(&dev->urbs.lock);
+
+ dev->urbs.size = size;
+ INIT_LIST_HEAD(&dev->urbs.list);
+
+ while (i < count) {
+ unode = kzalloc(sizeof(struct urb_node), GFP_KERNEL);
+ if (!unode)
+ break;
+ unode->dev = dev;
+
+ INIT_DELAYED_WORK(&unode->release_urb_work,
+ ufx_release_urb_work);
+
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!urb) {
+ kfree(unode);
+ break;
+ }
+ unode->urb = urb;
+
+ buf = usb_alloc_coherent(dev->udev, size, GFP_KERNEL,
+ &urb->transfer_dma);
+ if (!buf) {
+ kfree(unode);
+ usb_free_urb(urb);
+ break;
+ }
+
+ /* urb->transfer_buffer_length set to actual before submit */
+ usb_fill_bulk_urb(urb, dev->udev, usb_sndbulkpipe(dev->udev, 1),
+ buf, size, ufx_urb_completion, unode);
+ urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+
+ list_add_tail(&unode->entry, &dev->urbs.list);
+
+ i++;
+ }
+
+ sema_init(&dev->urbs.limit_sem, i);
+ dev->urbs.count = i;
+ dev->urbs.available = i;
+
+ pr_debug("allocated %d %d byte urbs\n", i, (int) size);
+
+ return i;
+}
+
+static struct urb *ufx_get_urb(struct ufx_data *dev)
+{
+ int ret = 0;
+ struct list_head *entry;
+ struct urb_node *unode;
+ struct urb *urb = NULL;
+ unsigned long flags;
+
+ /* Wait for an in-flight buffer to complete and get re-queued */
+ ret = down_timeout(&dev->urbs.limit_sem, GET_URB_TIMEOUT);
+ if (ret) {
+ atomic_set(&dev->lost_pixels, 1);
+ pr_warn("wait for urb interrupted: %x available: %d\n",
+ ret, dev->urbs.available);
+ goto error;
+ }
+
+ spin_lock_irqsave(&dev->urbs.lock, flags);
+
+ BUG_ON(list_empty(&dev->urbs.list)); /* reserved one with limit_sem */
+ entry = dev->urbs.list.next;
+ list_del_init(entry);
+ dev->urbs.available--;
+
+ spin_unlock_irqrestore(&dev->urbs.lock, flags);
+
+ unode = list_entry(entry, struct urb_node, entry);
+ urb = unode->urb;
+
+error:
+ return urb;
+}
+
+static int ufx_submit_urb(struct ufx_data *dev, struct urb *urb, size_t len)
+{
+ int ret;
+
+ BUG_ON(len > dev->urbs.size);
+
+ urb->transfer_buffer_length = len; /* set to actual payload len */
+ ret = usb_submit_urb(urb, GFP_KERNEL);
+ if (ret) {
+ ufx_urb_completion(urb); /* because no one else will */
+ atomic_set(&dev->lost_pixels, 1);
+ pr_err("usb_submit_urb error %x\n", ret);
+ }
+ return ret;
+}
+
+module_param(console, bool, S_IWUSR | S_IRUSR | S_IWGRP | S_IRGRP);
+MODULE_PARM_DESC(console, "Allow fbcon to be used on this display");
+
+module_param(fb_defio, bool, S_IWUSR | S_IRUSR | S_IWGRP | S_IRGRP);
+MODULE_PARM_DESC(fb_defio, "Enable fb_defio mmap support");
+
+MODULE_AUTHOR("Steve Glendinning <steve.glendinning@smsc.com>");
+MODULE_DESCRIPTION("SMSC UFX kernel framebuffer driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/tmiofb.c b/drivers/video/tmiofb.c
index cd1c4dcef8f..8e4a446b5ed 100644
--- a/drivers/video/tmiofb.c
+++ b/drivers/video/tmiofb.c
@@ -744,7 +744,7 @@ static int __devinit tmiofb_probe(struct platform_device *dev)
goto err_ioremap_vram;
}
- retval = request_irq(irq, &tmiofb_irq, IRQF_DISABLED,
+ retval = request_irq(irq, &tmiofb_irq, 0,
dev_name(&dev->dev), info);
if (retval)
diff --git a/drivers/video/tridentfb.c b/drivers/video/tridentfb.c
index c6c77562839..34cf019bba4 100644
--- a/drivers/video/tridentfb.c
+++ b/drivers/video/tridentfb.c
@@ -987,8 +987,8 @@ static int tridentfb_pan_display(struct fb_var_screeninfo *var,
unsigned int offset;
debug("enter\n");
- offset = (var->xoffset + (var->yoffset * var->xres_virtual))
- * var->bits_per_pixel / 32;
+ offset = (var->xoffset + (var->yoffset * info->var.xres_virtual))
+ * info->var.bits_per_pixel / 32;
set_screen_start(par, offset);
debug("exit\n");
return 0;
diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
index 087fc9960bb..3473e75ce78 100644
--- a/drivers/video/udlfb.c
+++ b/drivers/video/udlfb.c
@@ -48,20 +48,30 @@ static const u32 udlfb_info_flags = FBINFO_DEFAULT | FBINFO_READS_FAST |
FBINFO_HWACCEL_COPYAREA | FBINFO_MISC_ALWAYS_SETPAR;
/*
- * There are many DisplayLink-based products, all with unique PIDs. We are able
- * to support all volume ones (circa 2009) with a single driver, so we match
- * globally on VID. TODO: Probe() needs to detect when we might be running
- * "future" chips, and bail on those, so a compatible driver can match.
+ * There are many DisplayLink-based graphics products, all with unique PIDs.
+ * So we match on DisplayLink's VID + Vendor-Defined Interface Class (0xff)
+ * We also require a match on SubClass (0x00) and Protocol (0x00),
+ * which is compatible with all known USB 2.0 era graphics chips and firmware,
+ * but allows DisplayLink to increment those for any future incompatible chips
*/
static struct usb_device_id id_table[] = {
- {.idVendor = 0x17e9, .match_flags = USB_DEVICE_ID_MATCH_VENDOR,},
+ {.idVendor = 0x17e9,
+ .bInterfaceClass = 0xff,
+ .bInterfaceSubClass = 0x00,
+ .bInterfaceProtocol = 0x00,
+ .match_flags = USB_DEVICE_ID_MATCH_VENDOR |
+ USB_DEVICE_ID_MATCH_INT_CLASS |
+ USB_DEVICE_ID_MATCH_INT_SUBCLASS |
+ USB_DEVICE_ID_MATCH_INT_PROTOCOL,
+ },
{},
};
MODULE_DEVICE_TABLE(usb, id_table);
/* module options */
-static int console; /* Optionally allow fbcon to consume first framebuffer */
-static int fb_defio; /* Optionally enable experimental fb_defio mmap support */
+static int console = 1; /* Allow fbcon to open framebuffer */
+static int fb_defio = 1; /* Detect mmap writes using page faults */
+static int shadow = 1; /* Optionally disable shadow framebuffer */
/* dlfb keeps a list of urbs for efficient bulk transfers */
static void dlfb_urb_completion(struct urb *urb);
@@ -94,17 +104,39 @@ static char *dlfb_vidreg_unlock(char *buf)
}
/*
- * On/Off for driving the DisplayLink framebuffer to the display
- * 0x00 H and V sync on
- * 0x01 H and V sync off (screen blank but powered)
- * 0x07 DPMS powerdown (requires modeset to come back)
+ * Map FB_BLANK_* to DisplayLink register
+ * DLReg FB_BLANK_*
+ * ----- -----------------------------
+ * 0x00 FB_BLANK_UNBLANK (0)
+ * 0x01 FB_BLANK (1)
+ * 0x03 FB_BLANK_VSYNC_SUSPEND (2)
+ * 0x05 FB_BLANK_HSYNC_SUSPEND (3)
+ * 0x07 FB_BLANK_POWERDOWN (4) Note: requires modeset to come back
*/
-static char *dlfb_enable_hvsync(char *buf, bool enable)
+static char *dlfb_blanking(char *buf, int fb_blank)
{
- if (enable)
- return dlfb_set_register(buf, 0x1F, 0x00);
- else
- return dlfb_set_register(buf, 0x1F, 0x07);
+ u8 reg;
+
+ switch (fb_blank) {
+ case FB_BLANK_POWERDOWN:
+ reg = 0x07;
+ break;
+ case FB_BLANK_HSYNC_SUSPEND:
+ reg = 0x05;
+ break;
+ case FB_BLANK_VSYNC_SUSPEND:
+ reg = 0x03;
+ break;
+ case FB_BLANK_NORMAL:
+ reg = 0x01;
+ break;
+ default:
+ reg = 0x00;
+ }
+
+ buf = dlfb_set_register(buf, 0x1F, reg);
+
+ return buf;
}
static char *dlfb_set_color_depth(char *buf, u8 selection)
@@ -272,13 +304,15 @@ static int dlfb_set_video_mode(struct dlfb_data *dev,
wrptr = dlfb_set_base8bpp(wrptr, dev->info->fix.smem_len);
wrptr = dlfb_set_vid_cmds(wrptr, var);
- wrptr = dlfb_enable_hvsync(wrptr, true);
+ wrptr = dlfb_blanking(wrptr, FB_BLANK_UNBLANK);
wrptr = dlfb_vidreg_unlock(wrptr);
writesize = wrptr - buf;
retval = dlfb_submit_urb(dev, urb, writesize);
+ dev->blank_mode = FB_BLANK_UNBLANK;
+
return retval;
}
@@ -752,14 +786,13 @@ static int dlfb_ops_ioctl(struct fb_info *info, unsigned int cmd,
{
struct dlfb_data *dev = info->par;
- struct dloarea *area = NULL;
if (!atomic_read(&dev->usb_active))
return 0;
/* TODO: Update X server to get this from sysfs instead */
if (cmd == DLFB_IOCTL_RETURN_EDID) {
- char *edid = (char *)arg;
+ void __user *edid = (void __user *)arg;
if (copy_to_user(edid, dev->edid, dev->edid_size))
return -EFAULT;
return 0;
@@ -767,6 +800,11 @@ static int dlfb_ops_ioctl(struct fb_info *info, unsigned int cmd,
/* TODO: Help propose a standard fb.h ioctl to report mmap damage */
if (cmd == DLFB_IOCTL_REPORT_DAMAGE) {
+ struct dloarea area;
+
+ if (copy_from_user(&area, (void __user *)arg,
+ sizeof(struct dloarea)))
+ return -EFAULT;
/*
* If we have a damage-aware client, turn fb_defio "off"
@@ -778,21 +816,19 @@ static int dlfb_ops_ioctl(struct fb_info *info, unsigned int cmd,
if (info->fbdefio)
info->fbdefio->delay = DL_DEFIO_WRITE_DISABLE;
- area = (struct dloarea *)arg;
+ if (area.x < 0)
+ area.x = 0;
- if (area->x < 0)
- area->x = 0;
+ if (area.x > info->var.xres)
+ area.x = info->var.xres;
- if (area->x > info->var.xres)
- area->x = info->var.xres;
+ if (area.y < 0)
+ area.y = 0;
- if (area->y < 0)
- area->y = 0;
+ if (area.y > info->var.yres)
+ area.y = info->var.yres;
- if (area->y > info->var.yres)
- area->y = info->var.yres;
-
- dlfb_handle_damage(dev, area->x, area->y, area->w, area->h,
+ dlfb_handle_damage(dev, area.x, area.y, area.w, area.h,
info->screen_base);
}
@@ -840,7 +876,7 @@ static int dlfb_ops_open(struct fb_info *info, int user)
* preventing other clients (X) from working properly. Usually
* not what the user wants. Fail by default with option to enable.
*/
- if ((user == 0) & (!console))
+ if ((user == 0) && (!console))
return -EBUSY;
/* If the USB device is gone, we don't accept new opens */
@@ -1039,32 +1075,57 @@ static int dlfb_ops_set_par(struct fb_info *info)
return result;
}
+/* To fonzi the jukebox (e.g. make blanking changes take effect) */
+static char *dlfb_dummy_render(char *buf)
+{
+ *buf++ = 0xAF;
+ *buf++ = 0x6A; /* copy */
+ *buf++ = 0x00; /* from address*/
+ *buf++ = 0x00;
+ *buf++ = 0x00;
+ *buf++ = 0x01; /* one pixel */
+ *buf++ = 0x00; /* to address */
+ *buf++ = 0x00;
+ *buf++ = 0x00;
+ return buf;
+}
+
/*
* In order to come back from full DPMS off, we need to set the mode again
*/
static int dlfb_ops_blank(int blank_mode, struct fb_info *info)
{
struct dlfb_data *dev = info->par;
+ char *bufptr;
+ struct urb *urb;
- if (blank_mode != FB_BLANK_UNBLANK) {
- char *bufptr;
- struct urb *urb;
-
- urb = dlfb_get_urb(dev);
- if (!urb)
- return 0;
+ pr_info("/dev/fb%d FB_BLANK mode %d --> %d\n",
+ info->node, dev->blank_mode, blank_mode);
- bufptr = (char *) urb->transfer_buffer;
- bufptr = dlfb_vidreg_lock(bufptr);
- bufptr = dlfb_enable_hvsync(bufptr, false);
- bufptr = dlfb_vidreg_unlock(bufptr);
+ if ((dev->blank_mode == FB_BLANK_POWERDOWN) &&
+ (blank_mode != FB_BLANK_POWERDOWN)) {
- dlfb_submit_urb(dev, urb, bufptr -
- (char *) urb->transfer_buffer);
- } else {
+ /* returning from powerdown requires a fresh modeset */
dlfb_set_video_mode(dev, &info->var);
}
+ urb = dlfb_get_urb(dev);
+ if (!urb)
+ return 0;
+
+ bufptr = (char *) urb->transfer_buffer;
+ bufptr = dlfb_vidreg_lock(bufptr);
+ bufptr = dlfb_blanking(bufptr, blank_mode);
+ bufptr = dlfb_vidreg_unlock(bufptr);
+
+ /* seems like a render op is needed to have blank change take effect */
+ bufptr = dlfb_dummy_render(bufptr);
+
+ dlfb_submit_urb(dev, urb, bufptr -
+ (char *) urb->transfer_buffer);
+
+ dev->blank_mode = blank_mode;
+
return 0;
}
@@ -1097,7 +1158,7 @@ static int dlfb_realloc_framebuffer(struct dlfb_data *dev, struct fb_info *info)
int new_len;
unsigned char *old_fb = info->screen_base;
unsigned char *new_fb;
- unsigned char *new_back;
+ unsigned char *new_back = 0;
pr_warn("Reallocating framebuffer. Addresses will change!\n");
@@ -1129,7 +1190,8 @@ static int dlfb_realloc_framebuffer(struct dlfb_data *dev, struct fb_info *info)
* But with imperfect damage info we may send pixels over USB
* that were, in fact, unchanged - wasting limited USB bandwidth
*/
- new_back = vzalloc(new_len);
+ if (shadow)
+ new_back = vzalloc(new_len);
if (!new_back)
pr_info("No shadow/backing buffer allocated\n");
else {
@@ -1430,21 +1492,30 @@ static int dlfb_select_std_channel(struct dlfb_data *dev)
}
static int dlfb_parse_vendor_descriptor(struct dlfb_data *dev,
- struct usb_device *usbdev)
+ struct usb_interface *interface)
{
char *desc;
char *buf;
char *desc_end;
- u8 total_len = 0;
+ int total_len = 0;
buf = kzalloc(MAX_VENDOR_DESCRIPTOR_SIZE, GFP_KERNEL);
if (!buf)
return false;
desc = buf;
- total_len = usb_get_descriptor(usbdev, 0x5f, /* vendor specific */
- 0, desc, MAX_VENDOR_DESCRIPTOR_SIZE);
+ total_len = usb_get_descriptor(interface_to_usbdev(interface),
+ 0x5f, /* vendor specific */
+ 0, desc, MAX_VENDOR_DESCRIPTOR_SIZE);
+
+ /* if not found, look in configuration descriptor */
+ if (total_len < 0) {
+ if (0 == usb_get_extra_descriptor(interface->cur_altsetting,
+ 0x5f, &desc))
+ total_len = (int) desc[0];
+ }
+
if (total_len > 5) {
pr_info("vendor descriptor length:%x data:%02x %02x %02x %02x" \
"%02x %02x %02x %02x %02x %02x %02x\n",
@@ -1485,6 +1556,8 @@ static int dlfb_parse_vendor_descriptor(struct dlfb_data *dev,
}
desc += length;
}
+ } else {
+ pr_info("vendor descriptor not available (%d)\n", total_len);
}
goto success;
@@ -1531,10 +1604,11 @@ static int dlfb_usb_probe(struct usb_interface *interface,
usbdev->descriptor.bcdDevice, dev);
pr_info("console enable=%d\n", console);
pr_info("fb_defio enable=%d\n", fb_defio);
+ pr_info("shadow enable=%d\n", shadow);
dev->sku_pixel_limit = 2048 * 1152; /* default to maximum */
- if (!dlfb_parse_vendor_descriptor(dev, usbdev)) {
+ if (!dlfb_parse_vendor_descriptor(dev, interface)) {
pr_err("firmware not recognized. Assume incompatible device\n");
goto error;
}
@@ -1548,7 +1622,7 @@ static int dlfb_usb_probe(struct usb_interface *interface,
/* We don't register a new USB class. Our client interface is fbdev */
/* allocates framebuffer driver structure, not framebuffer memory */
- info = framebuffer_alloc(0, &usbdev->dev);
+ info = framebuffer_alloc(0, &interface->dev);
if (!info) {
retval = -ENOMEM;
pr_err("framebuffer_alloc failed\n");
@@ -1883,10 +1957,13 @@ static int dlfb_submit_urb(struct dlfb_data *dev, struct urb *urb, size_t len)
}
module_param(console, bool, S_IWUSR | S_IRUSR | S_IWGRP | S_IRGRP);
-MODULE_PARM_DESC(console, "Allow fbcon to consume first framebuffer found");
+MODULE_PARM_DESC(console, "Allow fbcon to open framebuffer");
module_param(fb_defio, bool, S_IWUSR | S_IRUSR | S_IWGRP | S_IRGRP);
-MODULE_PARM_DESC(fb_defio, "Enable fb_defio mmap support. *Experimental*");
+MODULE_PARM_DESC(fb_defio, "Page fault detection of mmap writes");
+
+module_param(shadow, bool, S_IWUSR | S_IRUSR | S_IWGRP | S_IRGRP);
+MODULE_PARM_DESC(shadow, "Shadow vid mem. Disable to save mem but lose perf");
MODULE_AUTHOR("Roberto De Ioris <roberto@unbit.it>, "
"Jaya Kumar <jayakumar.lkml@gmail.com>, "
diff --git a/drivers/video/valkyriefb.c b/drivers/video/valkyriefb.c
index 6b52bf65f0b..3f5a041601d 100644
--- a/drivers/video/valkyriefb.c
+++ b/drivers/video/valkyriefb.c
@@ -555,7 +555,7 @@ static int __init valkyrie_init_info(struct fb_info *info,
/*
- * Parse user speficied options (`video=valkyriefb:')
+ * Parse user specified options (`video=valkyriefb:')
*/
int __init valkyriefb_setup(char *options)
{
diff --git a/drivers/video/vfb.c b/drivers/video/vfb.c
index bc67251f1a2..bf2f78065cf 100644
--- a/drivers/video/vfb.c
+++ b/drivers/video/vfb.c
@@ -395,8 +395,8 @@ static int vfb_pan_display(struct fb_var_screeninfo *var,
|| var->xoffset)
return -EINVAL;
} else {
- if (var->xoffset + var->xres > info->var.xres_virtual ||
- var->yoffset + var->yres > info->var.yres_virtual)
+ if (var->xoffset + info->var.xres > info->var.xres_virtual ||
+ var->yoffset + info->var.yres > info->var.yres_virtual)
return -EINVAL;
}
info->var.xoffset = var->xoffset;
diff --git a/drivers/video/vga16fb.c b/drivers/video/vga16fb.c
index 305c975b178..0267acd8dc8 100644
--- a/drivers/video/vga16fb.c
+++ b/drivers/video/vga16fb.c
@@ -207,7 +207,7 @@ static void vga16fb_pan_var(struct fb_info *info,
* granularity if someone supports xoffset in bit resolution */
vga_io_r(VGA_IS1_RC); /* reset flip-flop */
vga_io_w(VGA_ATT_IW, VGA_ATC_PEL);
- if (var->bits_per_pixel == 8)
+ if (info->var.bits_per_pixel == 8)
vga_io_w(VGA_ATT_IW, (xoffset & 3) << 1);
else
vga_io_w(VGA_ATT_IW, xoffset & 7);
diff --git a/drivers/video/via/dvi.c b/drivers/video/via/dvi.c
index b1f364745ca..9138e517267 100644
--- a/drivers/video/via/dvi.c
+++ b/drivers/video/via/dvi.c
@@ -172,30 +172,20 @@ static int tmds_register_read_bytes(int index, u8 *buff, int buff_len)
}
/* DVI Set Mode */
-void viafb_dvi_set_mode(struct VideoModeTable *mode, int mode_bpp,
- int set_iga)
+void viafb_dvi_set_mode(const struct fb_var_screeninfo *var, int iga)
{
- struct VideoModeTable *rb_mode;
- struct crt_mode_table *pDviTiming;
- unsigned long desirePixelClock, maxPixelClock;
- pDviTiming = mode->crtc;
- desirePixelClock = pDviTiming->refresh_rate
- * pDviTiming->crtc.hor_total * pDviTiming->crtc.ver_total
- / 1000000;
- maxPixelClock = (unsigned long)viaparinfo->
- tmds_setting_info->max_pixel_clock;
-
- DEBUG_MSG(KERN_INFO "\nDVI_set_mode!!\n");
-
- if ((maxPixelClock != 0) && (desirePixelClock > maxPixelClock)) {
- rb_mode = viafb_get_rb_mode(mode->crtc[0].crtc.hor_addr,
- mode->crtc[0].crtc.ver_addr);
- if (rb_mode) {
- mode = rb_mode;
- pDviTiming = rb_mode->crtc;
- }
+ struct fb_var_screeninfo dvi_var = *var;
+ struct crt_mode_table *rb_mode;
+ int maxPixelClock;
+
+ maxPixelClock = viaparinfo->shared->tmds_setting_info.max_pixel_clock;
+ if (maxPixelClock && PICOS2KHZ(var->pixclock) / 1000 > maxPixelClock) {
+ rb_mode = viafb_get_best_rb_mode(var->xres, var->yres, 60);
+ if (rb_mode)
+ viafb_fill_var_timing_info(&dvi_var, rb_mode);
}
- viafb_fill_crtc_timing(pDviTiming, mode, mode_bpp / 8, set_iga);
+
+ viafb_fill_crtc_timing(&dvi_var, iga);
}
/* Sense DVI Connector */
diff --git a/drivers/video/via/dvi.h b/drivers/video/via/dvi.h
index f473dd01097..e2116aaf797 100644
--- a/drivers/video/via/dvi.h
+++ b/drivers/video/via/dvi.h
@@ -59,7 +59,6 @@ void viafb_dvi_enable(void);
bool __devinit viafb_tmds_trasmitter_identify(void);
void __devinit viafb_init_dvi_size(struct tmds_chip_information *tmds_chip,
struct tmds_setting_information *tmds_setting);
-void viafb_dvi_set_mode(struct VideoModeTable *videoMode, int mode_bpp,
- int set_iga);
+void viafb_dvi_set_mode(const struct fb_var_screeninfo *var, int iga);
#endif /* __DVI_H__ */
diff --git a/drivers/video/via/global.c b/drivers/video/via/global.c
index e10d8249534..3102171c167 100644
--- a/drivers/video/via/global.c
+++ b/drivers/video/via/global.c
@@ -35,6 +35,8 @@ int viafb_LCD_ON ;
int viafb_LCD2_ON;
int viafb_SAMM_ON;
int viafb_dual_fb;
+unsigned int viafb_second_xres = 640;
+unsigned int viafb_second_yres = 480;
int viafb_hotplug_Xres = 640;
int viafb_hotplug_Yres = 480;
int viafb_hotplug_bpp = 32;
diff --git a/drivers/video/via/global.h b/drivers/video/via/global.h
index ff969dc3459..275dbbbd6b8 100644
--- a/drivers/video/via/global.h
+++ b/drivers/video/via/global.h
@@ -67,6 +67,8 @@ extern int viafb_lcd_dsp_method;
extern int viafb_lcd_mode;
extern int viafb_CRT_ON;
+extern unsigned int viafb_second_xres;
+extern unsigned int viafb_second_yres;
extern int viafb_hotplug_Xres;
extern int viafb_hotplug_Yres;
extern int viafb_hotplug_bpp;
diff --git a/drivers/video/via/hw.c b/drivers/video/via/hw.c
index 47b13535ed2..d5aaca9cfa7 100644
--- a/drivers/video/via/hw.c
+++ b/drivers/video/via/hw.c
@@ -191,67 +191,6 @@ static struct fetch_count fetch_count_reg = {
{IGA2_FETCH_COUNT_REG_NUM, {{CR65, 0, 7}, {CR67, 2, 3} } }
};
-static struct iga1_crtc_timing iga1_crtc_reg = {
- /* IGA1 Horizontal Total */
- {IGA1_HOR_TOTAL_REG_NUM, {{CR00, 0, 7}, {CR36, 3, 3} } },
- /* IGA1 Horizontal Addressable Video */
- {IGA1_HOR_ADDR_REG_NUM, {{CR01, 0, 7} } },
- /* IGA1 Horizontal Blank Start */
- {IGA1_HOR_BLANK_START_REG_NUM, {{CR02, 0, 7} } },
- /* IGA1 Horizontal Blank End */
- {IGA1_HOR_BLANK_END_REG_NUM,
- {{CR03, 0, 4}, {CR05, 7, 7}, {CR33, 5, 5} } },
- /* IGA1 Horizontal Sync Start */
- {IGA1_HOR_SYNC_START_REG_NUM, {{CR04, 0, 7}, {CR33, 4, 4} } },
- /* IGA1 Horizontal Sync End */
- {IGA1_HOR_SYNC_END_REG_NUM, {{CR05, 0, 4} } },
- /* IGA1 Vertical Total */
- {IGA1_VER_TOTAL_REG_NUM,
- {{CR06, 0, 7}, {CR07, 0, 0}, {CR07, 5, 5}, {CR35, 0, 0} } },
- /* IGA1 Vertical Addressable Video */
- {IGA1_VER_ADDR_REG_NUM,
- {{CR12, 0, 7}, {CR07, 1, 1}, {CR07, 6, 6}, {CR35, 2, 2} } },
- /* IGA1 Vertical Blank Start */
- {IGA1_VER_BLANK_START_REG_NUM,
- {{CR15, 0, 7}, {CR07, 3, 3}, {CR09, 5, 5}, {CR35, 3, 3} } },
- /* IGA1 Vertical Blank End */
- {IGA1_VER_BLANK_END_REG_NUM, {{CR16, 0, 7} } },
- /* IGA1 Vertical Sync Start */
- {IGA1_VER_SYNC_START_REG_NUM,
- {{CR10, 0, 7}, {CR07, 2, 2}, {CR07, 7, 7}, {CR35, 1, 1} } },
- /* IGA1 Vertical Sync End */
- {IGA1_VER_SYNC_END_REG_NUM, {{CR11, 0, 3} } }
-};
-
-static struct iga2_crtc_timing iga2_crtc_reg = {
- /* IGA2 Horizontal Total */
- {IGA2_HOR_TOTAL_REG_NUM, {{CR50, 0, 7}, {CR55, 0, 3} } },
- /* IGA2 Horizontal Addressable Video */
- {IGA2_HOR_ADDR_REG_NUM, {{CR51, 0, 7}, {CR55, 4, 6} } },
- /* IGA2 Horizontal Blank Start */
- {IGA2_HOR_BLANK_START_REG_NUM, {{CR52, 0, 7}, {CR54, 0, 2} } },
- /* IGA2 Horizontal Blank End */
- {IGA2_HOR_BLANK_END_REG_NUM,
- {{CR53, 0, 7}, {CR54, 3, 5}, {CR5D, 6, 6} } },
- /* IGA2 Horizontal Sync Start */
- {IGA2_HOR_SYNC_START_REG_NUM,
- {{CR56, 0, 7}, {CR54, 6, 7}, {CR5C, 7, 7}, {CR5D, 7, 7} } },
- /* IGA2 Horizontal Sync End */
- {IGA2_HOR_SYNC_END_REG_NUM, {{CR57, 0, 7}, {CR5C, 6, 6} } },
- /* IGA2 Vertical Total */
- {IGA2_VER_TOTAL_REG_NUM, {{CR58, 0, 7}, {CR5D, 0, 2} } },
- /* IGA2 Vertical Addressable Video */
- {IGA2_VER_ADDR_REG_NUM, {{CR59, 0, 7}, {CR5D, 3, 5} } },
- /* IGA2 Vertical Blank Start */
- {IGA2_VER_BLANK_START_REG_NUM, {{CR5A, 0, 7}, {CR5C, 0, 2} } },
- /* IGA2 Vertical Blank End */
- {IGA2_VER_BLANK_END_REG_NUM, {{CR5B, 0, 7}, {CR5C, 3, 5} } },
- /* IGA2 Vertical Sync Start */
- {IGA2_VER_SYNC_START_REG_NUM, {{CR5E, 0, 7}, {CR5F, 5, 7} } },
- /* IGA2 Vertical Sync End */
- {IGA2_VER_SYNC_END_REG_NUM, {{CR5F, 0, 4} } }
-};
-
static struct rgbLUT palLUT_table[] = {
/* {R,G,B} */
/* Index 0x00~0x03 */
@@ -1528,302 +1467,40 @@ void viafb_set_vclock(u32 clk, int set_iga)
via_write_misc_reg_mask(0x0C, 0x0C); /* select external clock */
}
-void viafb_load_crtc_timing(struct display_timing device_timing,
- int set_iga)
+static struct display_timing var_to_timing(const struct fb_var_screeninfo *var)
{
- int i;
- int viafb_load_reg_num = 0;
- int reg_value = 0;
- struct io_register *reg = NULL;
-
- viafb_unlock_crt();
-
- for (i = 0; i < 12; i++) {
- if (set_iga == IGA1) {
- switch (i) {
- case H_TOTAL_INDEX:
- reg_value =
- IGA1_HOR_TOTAL_FORMULA(device_timing.
- hor_total);
- viafb_load_reg_num =
- iga1_crtc_reg.hor_total.reg_num;
- reg = iga1_crtc_reg.hor_total.reg;
- break;
- case H_ADDR_INDEX:
- reg_value =
- IGA1_HOR_ADDR_FORMULA(device_timing.
- hor_addr);
- viafb_load_reg_num =
- iga1_crtc_reg.hor_addr.reg_num;
- reg = iga1_crtc_reg.hor_addr.reg;
- break;
- case H_BLANK_START_INDEX:
- reg_value =
- IGA1_HOR_BLANK_START_FORMULA
- (device_timing.hor_blank_start);
- viafb_load_reg_num =
- iga1_crtc_reg.hor_blank_start.reg_num;
- reg = iga1_crtc_reg.hor_blank_start.reg;
- break;
- case H_BLANK_END_INDEX:
- reg_value =
- IGA1_HOR_BLANK_END_FORMULA
- (device_timing.hor_blank_start,
- device_timing.hor_blank_end);
- viafb_load_reg_num =
- iga1_crtc_reg.hor_blank_end.reg_num;
- reg = iga1_crtc_reg.hor_blank_end.reg;
- break;
- case H_SYNC_START_INDEX:
- reg_value =
- IGA1_HOR_SYNC_START_FORMULA
- (device_timing.hor_sync_start);
- viafb_load_reg_num =
- iga1_crtc_reg.hor_sync_start.reg_num;
- reg = iga1_crtc_reg.hor_sync_start.reg;
- break;
- case H_SYNC_END_INDEX:
- reg_value =
- IGA1_HOR_SYNC_END_FORMULA
- (device_timing.hor_sync_start,
- device_timing.hor_sync_end);
- viafb_load_reg_num =
- iga1_crtc_reg.hor_sync_end.reg_num;
- reg = iga1_crtc_reg.hor_sync_end.reg;
- break;
- case V_TOTAL_INDEX:
- reg_value =
- IGA1_VER_TOTAL_FORMULA(device_timing.
- ver_total);
- viafb_load_reg_num =
- iga1_crtc_reg.ver_total.reg_num;
- reg = iga1_crtc_reg.ver_total.reg;
- break;
- case V_ADDR_INDEX:
- reg_value =
- IGA1_VER_ADDR_FORMULA(device_timing.
- ver_addr);
- viafb_load_reg_num =
- iga1_crtc_reg.ver_addr.reg_num;
- reg = iga1_crtc_reg.ver_addr.reg;
- break;
- case V_BLANK_START_INDEX:
- reg_value =
- IGA1_VER_BLANK_START_FORMULA
- (device_timing.ver_blank_start);
- viafb_load_reg_num =
- iga1_crtc_reg.ver_blank_start.reg_num;
- reg = iga1_crtc_reg.ver_blank_start.reg;
- break;
- case V_BLANK_END_INDEX:
- reg_value =
- IGA1_VER_BLANK_END_FORMULA
- (device_timing.ver_blank_start,
- device_timing.ver_blank_end);
- viafb_load_reg_num =
- iga1_crtc_reg.ver_blank_end.reg_num;
- reg = iga1_crtc_reg.ver_blank_end.reg;
- break;
- case V_SYNC_START_INDEX:
- reg_value =
- IGA1_VER_SYNC_START_FORMULA
- (device_timing.ver_sync_start);
- viafb_load_reg_num =
- iga1_crtc_reg.ver_sync_start.reg_num;
- reg = iga1_crtc_reg.ver_sync_start.reg;
- break;
- case V_SYNC_END_INDEX:
- reg_value =
- IGA1_VER_SYNC_END_FORMULA
- (device_timing.ver_sync_start,
- device_timing.ver_sync_end);
- viafb_load_reg_num =
- iga1_crtc_reg.ver_sync_end.reg_num;
- reg = iga1_crtc_reg.ver_sync_end.reg;
- break;
-
- }
- }
-
- if (set_iga == IGA2) {
- switch (i) {
- case H_TOTAL_INDEX:
- reg_value =
- IGA2_HOR_TOTAL_FORMULA(device_timing.
- hor_total);
- viafb_load_reg_num =
- iga2_crtc_reg.hor_total.reg_num;
- reg = iga2_crtc_reg.hor_total.reg;
- break;
- case H_ADDR_INDEX:
- reg_value =
- IGA2_HOR_ADDR_FORMULA(device_timing.
- hor_addr);
- viafb_load_reg_num =
- iga2_crtc_reg.hor_addr.reg_num;
- reg = iga2_crtc_reg.hor_addr.reg;
- break;
- case H_BLANK_START_INDEX:
- reg_value =
- IGA2_HOR_BLANK_START_FORMULA
- (device_timing.hor_blank_start);
- viafb_load_reg_num =
- iga2_crtc_reg.hor_blank_start.reg_num;
- reg = iga2_crtc_reg.hor_blank_start.reg;
- break;
- case H_BLANK_END_INDEX:
- reg_value =
- IGA2_HOR_BLANK_END_FORMULA
- (device_timing.hor_blank_start,
- device_timing.hor_blank_end);
- viafb_load_reg_num =
- iga2_crtc_reg.hor_blank_end.reg_num;
- reg = iga2_crtc_reg.hor_blank_end.reg;
- break;
- case H_SYNC_START_INDEX:
- reg_value =
- IGA2_HOR_SYNC_START_FORMULA
- (device_timing.hor_sync_start);
- if (UNICHROME_CN700 <=
- viaparinfo->chip_info->gfx_chip_name)
- viafb_load_reg_num =
- iga2_crtc_reg.hor_sync_start.
- reg_num;
- else
- viafb_load_reg_num = 3;
- reg = iga2_crtc_reg.hor_sync_start.reg;
- break;
- case H_SYNC_END_INDEX:
- reg_value =
- IGA2_HOR_SYNC_END_FORMULA
- (device_timing.hor_sync_start,
- device_timing.hor_sync_end);
- viafb_load_reg_num =
- iga2_crtc_reg.hor_sync_end.reg_num;
- reg = iga2_crtc_reg.hor_sync_end.reg;
- break;
- case V_TOTAL_INDEX:
- reg_value =
- IGA2_VER_TOTAL_FORMULA(device_timing.
- ver_total);
- viafb_load_reg_num =
- iga2_crtc_reg.ver_total.reg_num;
- reg = iga2_crtc_reg.ver_total.reg;
- break;
- case V_ADDR_INDEX:
- reg_value =
- IGA2_VER_ADDR_FORMULA(device_timing.
- ver_addr);
- viafb_load_reg_num =
- iga2_crtc_reg.ver_addr.reg_num;
- reg = iga2_crtc_reg.ver_addr.reg;
- break;
- case V_BLANK_START_INDEX:
- reg_value =
- IGA2_VER_BLANK_START_FORMULA
- (device_timing.ver_blank_start);
- viafb_load_reg_num =
- iga2_crtc_reg.ver_blank_start.reg_num;
- reg = iga2_crtc_reg.ver_blank_start.reg;
- break;
- case V_BLANK_END_INDEX:
- reg_value =
- IGA2_VER_BLANK_END_FORMULA
- (device_timing.ver_blank_start,
- device_timing.ver_blank_end);
- viafb_load_reg_num =
- iga2_crtc_reg.ver_blank_end.reg_num;
- reg = iga2_crtc_reg.ver_blank_end.reg;
- break;
- case V_SYNC_START_INDEX:
- reg_value =
- IGA2_VER_SYNC_START_FORMULA
- (device_timing.ver_sync_start);
- viafb_load_reg_num =
- iga2_crtc_reg.ver_sync_start.reg_num;
- reg = iga2_crtc_reg.ver_sync_start.reg;
- break;
- case V_SYNC_END_INDEX:
- reg_value =
- IGA2_VER_SYNC_END_FORMULA
- (device_timing.ver_sync_start,
- device_timing.ver_sync_end);
- viafb_load_reg_num =
- iga2_crtc_reg.ver_sync_end.reg_num;
- reg = iga2_crtc_reg.ver_sync_end.reg;
- break;
-
- }
- }
- viafb_load_reg(reg_value, viafb_load_reg_num, reg, VIACR);
- }
-
- viafb_lock_crt();
+ struct display_timing timing;
+
+ timing.hor_addr = var->xres;
+ timing.hor_sync_start = timing.hor_addr + var->right_margin;
+ timing.hor_sync_end = timing.hor_sync_start + var->hsync_len;
+ timing.hor_total = timing.hor_sync_end + var->left_margin;
+ timing.hor_blank_start = timing.hor_addr;
+ timing.hor_blank_end = timing.hor_total;
+ timing.ver_addr = var->yres;
+ timing.ver_sync_start = timing.ver_addr + var->lower_margin;
+ timing.ver_sync_end = timing.ver_sync_start + var->vsync_len;
+ timing.ver_total = timing.ver_sync_end + var->upper_margin;
+ timing.ver_blank_start = timing.ver_addr;
+ timing.ver_blank_end = timing.ver_total;
+ return timing;
}
-void viafb_fill_crtc_timing(struct crt_mode_table *crt_table,
- struct VideoModeTable *video_mode, int bpp_byte, int set_iga)
+void viafb_fill_crtc_timing(const struct fb_var_screeninfo *var, int iga)
{
- struct display_timing crt_reg;
- int i;
- int index = 0;
- int h_addr, v_addr;
- u32 clock, refresh = viafb_refresh;
-
- if (viafb_SAMM_ON && set_iga == IGA2)
- refresh = viafb_refresh1;
-
- for (i = 0; i < video_mode->mode_array; i++) {
- index = i;
-
- if (crt_table[i].refresh_rate == refresh)
- break;
- }
+ struct display_timing crt_reg = var_to_timing(var);
- crt_reg = crt_table[index].crtc;
+ if (iga == IGA1)
+ via_set_primary_timing(&crt_reg);
+ else if (iga == IGA2)
+ via_set_secondary_timing(&crt_reg);
- /* Mode 640x480 has border, but LCD/DFP didn't have border. */
- /* So we would delete border. */
- if ((viafb_LCD_ON | viafb_DVI_ON)
- && video_mode->crtc[0].crtc.hor_addr == 640
- && video_mode->crtc[0].crtc.ver_addr == 480
- && refresh == 60) {
- /* The border is 8 pixels. */
- crt_reg.hor_blank_start = crt_reg.hor_blank_start - 8;
-
- /* Blanking time should add left and right borders. */
- crt_reg.hor_blank_end = crt_reg.hor_blank_end + 16;
- }
-
- h_addr = crt_reg.hor_addr;
- v_addr = crt_reg.ver_addr;
- if (set_iga == IGA1) {
- viafb_unlock_crt();
- viafb_write_reg_mask(CR17, VIACR, 0x00, BIT7);
- }
-
- switch (set_iga) {
- case IGA1:
- viafb_load_crtc_timing(crt_reg, IGA1);
- break;
- case IGA2:
- viafb_load_crtc_timing(crt_reg, IGA2);
- break;
- }
-
- viafb_lock_crt();
- viafb_write_reg_mask(CR17, VIACR, 0x80, BIT7);
- viafb_load_fetch_count_reg(h_addr, bpp_byte, set_iga);
-
- /* load FIFO */
- if ((viaparinfo->chip_info->gfx_chip_name != UNICHROME_CLE266)
- && (viaparinfo->chip_info->gfx_chip_name != UNICHROME_K400))
- viafb_load_FIFO_reg(set_iga, h_addr, v_addr);
-
- clock = crt_reg.hor_total * crt_reg.ver_total
- * crt_table[index].refresh_rate;
- viafb_set_vclock(clock, set_iga);
+ viafb_load_fetch_count_reg(var->xres, var->bits_per_pixel / 8, iga);
+ if (viaparinfo->chip_info->gfx_chip_name != UNICHROME_CLE266
+ && viaparinfo->chip_info->gfx_chip_name != UNICHROME_K400)
+ viafb_load_FIFO_reg(iga, var->xres, var->yres);
+ viafb_set_vclock(PICOS2KHZ(var->pixclock) * 1000, iga);
}
void __devinit viafb_init_chip_info(int chip_type)
@@ -2092,23 +1769,9 @@ static u8 get_sync(struct fb_info *info)
return polarity;
}
-int viafb_setmode(struct VideoModeTable *vmode_tbl, int video_bpp,
- struct VideoModeTable *vmode_tbl1, int video_bpp1)
+static void hw_init(void)
{
- int i, j;
- int port;
- u32 devices = viaparinfo->shared->iga1_devices
- | viaparinfo->shared->iga2_devices;
- u8 value, index, mask;
- struct crt_mode_table *crt_timing;
- struct crt_mode_table *crt_timing1 = NULL;
-
- device_screen_off();
- crt_timing = vmode_tbl->crtc;
-
- if (viafb_SAMM_ON == 1) {
- crt_timing1 = vmode_tbl1->crtc;
- }
+ int i;
inb(VIAStatus);
outb(0x00, VIAAR);
@@ -2147,9 +1810,8 @@ int viafb_setmode(struct VideoModeTable *vmode_tbl, int video_bpp,
break;
}
+ /* probably this should go to the scaling code one day */
viafb_write_regx(scaling_parameters, ARRAY_SIZE(scaling_parameters));
- device_off();
- via_set_state(devices, VIA_STATE_OFF);
/* Fill VPIT Parameters */
/* Write Misc Register */
@@ -2175,12 +1837,29 @@ int viafb_setmode(struct VideoModeTable *vmode_tbl, int video_bpp,
inb(VIAStatus);
outb(0x20, VIAAR);
+ load_fix_bit_crtc_reg();
+}
+
+int viafb_setmode(int video_bpp, int video_bpp1)
+{
+ int j;
+ int port;
+ u32 devices = viaparinfo->shared->iga1_devices
+ | viaparinfo->shared->iga2_devices;
+ u8 value, index, mask;
+ struct fb_var_screeninfo var2;
+
+ device_screen_off();
+ device_off();
+ via_set_state(devices, VIA_STATE_OFF);
+
+ hw_init();
+
/* Update Patch Register */
if ((viaparinfo->chip_info->gfx_chip_name == UNICHROME_CLE266
- || viaparinfo->chip_info->gfx_chip_name == UNICHROME_K400)
- && vmode_tbl->crtc[0].crtc.hor_addr == 1024
- && vmode_tbl->crtc[0].crtc.ver_addr == 768) {
+ || viaparinfo->chip_info->gfx_chip_name == UNICHROME_K400)
+ && viafbinfo->var.xres == 1024 && viafbinfo->var.yres == 768) {
for (j = 0; j < res_patch_table[0].table_length; j++) {
index = res_patch_table[0].io_reg_table[j].index;
port = res_patch_table[0].io_reg_table[j].port;
@@ -2190,7 +1869,6 @@ int viafb_setmode(struct VideoModeTable *vmode_tbl, int video_bpp,
}
}
- load_fix_bit_crtc_reg();
via_set_primary_pitch(viafbinfo->fix.line_length);
via_set_secondary_pitch(viafb_dual_fb ? viafbinfo1->fix.line_length
: viafbinfo->fix.line_length);
@@ -2208,23 +1886,28 @@ int viafb_setmode(struct VideoModeTable *vmode_tbl, int video_bpp,
/* Clear On Screen */
+ if (viafb_dual_fb) {
+ var2 = viafbinfo1->var;
+ } else if (viafb_SAMM_ON) {
+ viafb_fill_var_timing_info(&var2, viafb_get_best_mode(
+ viafb_second_xres, viafb_second_yres, viafb_refresh1));
+ var2.bits_per_pixel = viafbinfo->var.bits_per_pixel;
+ }
+
/* CRT set mode */
if (viafb_CRT_ON) {
- if (viafb_SAMM_ON &&
- viaparinfo->shared->iga2_devices & VIA_CRT) {
- viafb_fill_crtc_timing(crt_timing1, vmode_tbl1,
- video_bpp1 / 8, IGA2);
- } else {
- viafb_fill_crtc_timing(crt_timing, vmode_tbl,
- video_bpp / 8,
+ if (viaparinfo->shared->iga2_devices & VIA_CRT
+ && viafb_SAMM_ON)
+ viafb_fill_crtc_timing(&var2, IGA2);
+ else
+ viafb_fill_crtc_timing(&viafbinfo->var,
(viaparinfo->shared->iga1_devices & VIA_CRT)
? IGA1 : IGA2);
- }
/* Patch if set_hres is not 8 alignment (1366) to viafb_setmode
to 8 alignment (1368),there is several pixels (2 pixels)
on right side of screen. */
- if (vmode_tbl->crtc[0].crtc.hor_addr % 8) {
+ if (viafbinfo->var.xres % 8) {
viafb_unlock_crt();
viafb_write_reg(CR02, VIACR,
viafb_read_reg(VIACR, CR02) - 1);
@@ -2233,31 +1916,20 @@ int viafb_setmode(struct VideoModeTable *vmode_tbl, int video_bpp,
}
if (viafb_DVI_ON) {
- if (viafb_SAMM_ON &&
- (viaparinfo->tmds_setting_info->iga_path == IGA2)) {
- viafb_dvi_set_mode(viafb_get_mode
- (viaparinfo->tmds_setting_info->h_active,
- viaparinfo->tmds_setting_info->
- v_active),
- video_bpp1, viaparinfo->
- tmds_setting_info->iga_path);
- } else {
- viafb_dvi_set_mode(viafb_get_mode
- (viaparinfo->tmds_setting_info->h_active,
- viaparinfo->
- tmds_setting_info->v_active),
- video_bpp, viaparinfo->
- tmds_setting_info->iga_path);
- }
+ if (viaparinfo->shared->tmds_setting_info.iga_path == IGA2
+ && viafb_SAMM_ON)
+ viafb_dvi_set_mode(&var2, IGA2);
+ else
+ viafb_dvi_set_mode(&viafbinfo->var,
+ viaparinfo->tmds_setting_info->iga_path);
}
if (viafb_LCD_ON) {
if (viafb_SAMM_ON &&
(viaparinfo->lvds_setting_info->iga_path == IGA2)) {
viaparinfo->lvds_setting_info->bpp = video_bpp1;
- viafb_lcd_set_mode(crt_timing1, viaparinfo->
- lvds_setting_info,
- &viaparinfo->chip_info->lvds_chip_info);
+ viafb_lcd_set_mode(viaparinfo->lvds_setting_info,
+ &viaparinfo->chip_info->lvds_chip_info);
} else {
/* IGA1 doesn't have LCD scaling, so set it center. */
if (viaparinfo->lvds_setting_info->iga_path == IGA1) {
@@ -2265,18 +1937,16 @@ int viafb_setmode(struct VideoModeTable *vmode_tbl, int video_bpp,
LCD_CENTERING;
}
viaparinfo->lvds_setting_info->bpp = video_bpp;
- viafb_lcd_set_mode(crt_timing, viaparinfo->
- lvds_setting_info,
- &viaparinfo->chip_info->lvds_chip_info);
+ viafb_lcd_set_mode(viaparinfo->lvds_setting_info,
+ &viaparinfo->chip_info->lvds_chip_info);
}
}
if (viafb_LCD2_ON) {
if (viafb_SAMM_ON &&
(viaparinfo->lvds_setting_info2->iga_path == IGA2)) {
viaparinfo->lvds_setting_info2->bpp = video_bpp1;
- viafb_lcd_set_mode(crt_timing1, viaparinfo->
- lvds_setting_info2,
- &viaparinfo->chip_info->lvds_chip_info2);
+ viafb_lcd_set_mode(viaparinfo->lvds_setting_info2,
+ &viaparinfo->chip_info->lvds_chip_info2);
} else {
/* IGA1 doesn't have LCD scaling, so set it center. */
if (viaparinfo->lvds_setting_info2->iga_path == IGA1) {
@@ -2284,9 +1954,8 @@ int viafb_setmode(struct VideoModeTable *vmode_tbl, int video_bpp,
LCD_CENTERING;
}
viaparinfo->lvds_setting_info2->bpp = video_bpp;
- viafb_lcd_set_mode(crt_timing, viaparinfo->
- lvds_setting_info2,
- &viaparinfo->chip_info->lvds_chip_info2);
+ viafb_lcd_set_mode(viaparinfo->lvds_setting_info2,
+ &viaparinfo->chip_info->lvds_chip_info2);
}
}
@@ -2296,8 +1965,8 @@ int viafb_setmode(struct VideoModeTable *vmode_tbl, int video_bpp,
/* If set mode normally, save resolution information for hot-plug . */
if (!viafb_hotplug) {
- viafb_hotplug_Xres = vmode_tbl->crtc[0].crtc.hor_addr;
- viafb_hotplug_Yres = vmode_tbl->crtc[0].crtc.ver_addr;
+ viafb_hotplug_Xres = viafbinfo->var.xres;
+ viafb_hotplug_Yres = viafbinfo->var.yres;
viafb_hotplug_bpp = video_bpp;
viafb_hotplug_refresh = viafb_refresh;
@@ -2348,42 +2017,14 @@ int viafb_setmode(struct VideoModeTable *vmode_tbl, int video_bpp,
return 1;
}
-int viafb_get_pixclock(int hres, int vres, int vmode_refresh)
-{
- int i;
- struct crt_mode_table *best;
- struct VideoModeTable *vmode = viafb_get_mode(hres, vres);
-
- if (!vmode)
- return RES_640X480_60HZ_PIXCLOCK;
-
- best = &vmode->crtc[0];
- for (i = 1; i < vmode->mode_array; i++) {
- if (abs(vmode->crtc[i].refresh_rate - vmode_refresh)
- < abs(best->refresh_rate - vmode_refresh))
- best = &vmode->crtc[i];
- }
-
- return 1000000000 / (best->crtc.hor_total * best->crtc.ver_total)
- * 1000 / best->refresh_rate;
-}
-
int viafb_get_refresh(int hres, int vres, u32 long_refresh)
{
- int i;
struct crt_mode_table *best;
- struct VideoModeTable *vmode = viafb_get_mode(hres, vres);
- if (!vmode)
+ best = viafb_get_best_mode(hres, vres, long_refresh);
+ if (!best)
return 60;
- best = &vmode->crtc[0];
- for (i = 1; i < vmode->mode_array; i++) {
- if (abs(vmode->crtc[i].refresh_rate - long_refresh)
- < abs(best->refresh_rate - long_refresh))
- best = &vmode->crtc[i];
- }
-
if (abs(best->refresh_rate - long_refresh) > 3) {
if (hres == 1200 && vres == 900)
return 49; /* OLPC DCON only supports 50 Hz */
@@ -2485,21 +2126,14 @@ void viafb_set_dpa_gfx(int output_interface, struct GFX_DPA_SETTING\
}
/*According var's xres, yres fill var's other timing information*/
-void viafb_fill_var_timing_info(struct fb_var_screeninfo *var, int refresh,
- struct VideoModeTable *vmode_tbl)
+void viafb_fill_var_timing_info(struct fb_var_screeninfo *var,
+ struct crt_mode_table *mode)
{
- struct crt_mode_table *crt_timing = NULL;
struct display_timing crt_reg;
- int i = 0, index = 0;
- crt_timing = vmode_tbl->crtc;
- for (i = 0; i < vmode_tbl->mode_array; i++) {
- index = i;
- if (crt_timing[i].refresh_rate == refresh)
- break;
- }
- crt_reg = crt_timing[index].crtc;
- var->pixclock = viafb_get_pixclock(var->xres, var->yres, refresh);
+ crt_reg = mode->crtc;
+ var->pixclock = 1000000000 / (crt_reg.hor_total * crt_reg.ver_total)
+ * 1000 / mode->refresh_rate;
var->left_margin =
crt_reg.hor_total - (crt_reg.hor_sync_start + crt_reg.hor_sync_end);
var->right_margin = crt_reg.hor_sync_start - crt_reg.hor_addr;
@@ -2509,8 +2143,8 @@ void viafb_fill_var_timing_info(struct fb_var_screeninfo *var, int refresh,
var->lower_margin = crt_reg.ver_sync_start - crt_reg.ver_addr;
var->vsync_len = crt_reg.ver_sync_end;
var->sync = 0;
- if (crt_timing[index].h_sync_polarity == POSITIVE)
+ if (mode->h_sync_polarity == POSITIVE)
var->sync |= FB_SYNC_HOR_HIGH_ACT;
- if (crt_timing[index].v_sync_polarity == POSITIVE)
+ if (mode->v_sync_polarity == POSITIVE)
var->sync |= FB_SYNC_VERT_HIGH_ACT;
}
diff --git a/drivers/video/via/hw.h b/drivers/video/via/hw.h
index c7239eb83ba..4db5b6e8d8d 100644
--- a/drivers/video/via/hw.h
+++ b/drivers/video/via/hw.h
@@ -51,40 +51,6 @@
#define VIA_HSYNC_NEGATIVE 0x01
#define VIA_VSYNC_NEGATIVE 0x02
-/***************************************************
-* Definition IGA1 Design Method of CRTC Registers *
-****************************************************/
-#define IGA1_HOR_TOTAL_FORMULA(x) (((x)/8)-5)
-#define IGA1_HOR_ADDR_FORMULA(x) (((x)/8)-1)
-#define IGA1_HOR_BLANK_START_FORMULA(x) (((x)/8)-1)
-#define IGA1_HOR_BLANK_END_FORMULA(x, y) (((x+y)/8)-1)
-#define IGA1_HOR_SYNC_START_FORMULA(x) ((x)/8)
-#define IGA1_HOR_SYNC_END_FORMULA(x, y) ((x+y)/8)
-
-#define IGA1_VER_TOTAL_FORMULA(x) ((x)-2)
-#define IGA1_VER_ADDR_FORMULA(x) ((x)-1)
-#define IGA1_VER_BLANK_START_FORMULA(x) ((x)-1)
-#define IGA1_VER_BLANK_END_FORMULA(x, y) ((x+y)-1)
-#define IGA1_VER_SYNC_START_FORMULA(x) ((x)-1)
-#define IGA1_VER_SYNC_END_FORMULA(x, y) ((x+y)-1)
-
-/***************************************************
-** Definition IGA2 Design Method of CRTC Registers *
-****************************************************/
-#define IGA2_HOR_TOTAL_FORMULA(x) ((x)-1)
-#define IGA2_HOR_ADDR_FORMULA(x) ((x)-1)
-#define IGA2_HOR_BLANK_START_FORMULA(x) ((x)-1)
-#define IGA2_HOR_BLANK_END_FORMULA(x, y) ((x+y)-1)
-#define IGA2_HOR_SYNC_START_FORMULA(x) ((x)-1)
-#define IGA2_HOR_SYNC_END_FORMULA(x, y) ((x+y)-1)
-
-#define IGA2_VER_TOTAL_FORMULA(x) ((x)-1)
-#define IGA2_VER_ADDR_FORMULA(x) ((x)-1)
-#define IGA2_VER_BLANK_START_FORMULA(x) ((x)-1)
-#define IGA2_VER_BLANK_END_FORMULA(x, y) ((x+y)-1)
-#define IGA2_VER_SYNC_START_FORMULA(x) ((x)-1)
-#define IGA2_VER_SYNC_END_FORMULA(x, y) ((x+y)-1)
-
/**********************************************************/
/* Definition IGA2 Design Method of CRTC Shadow Registers */
/**********************************************************/
@@ -97,33 +63,6 @@
#define IGA2_VER_SYNC_START_SHADOW_FORMULA(x) (x)
#define IGA2_VER_SYNC_END_SHADOW_FORMULA(x, y) (x+y)
-/* Define Register Number for IGA1 CRTC Timing */
-
-/* location: {CR00,0,7},{CR36,3,3} */
-#define IGA1_HOR_TOTAL_REG_NUM 2
-/* location: {CR01,0,7} */
-#define IGA1_HOR_ADDR_REG_NUM 1
-/* location: {CR02,0,7} */
-#define IGA1_HOR_BLANK_START_REG_NUM 1
-/* location: {CR03,0,4},{CR05,7,7},{CR33,5,5} */
-#define IGA1_HOR_BLANK_END_REG_NUM 3
-/* location: {CR04,0,7},{CR33,4,4} */
-#define IGA1_HOR_SYNC_START_REG_NUM 2
-/* location: {CR05,0,4} */
-#define IGA1_HOR_SYNC_END_REG_NUM 1
-/* location: {CR06,0,7},{CR07,0,0},{CR07,5,5},{CR35,0,0} */
-#define IGA1_VER_TOTAL_REG_NUM 4
-/* location: {CR12,0,7},{CR07,1,1},{CR07,6,6},{CR35,2,2} */
-#define IGA1_VER_ADDR_REG_NUM 4
-/* location: {CR15,0,7},{CR07,3,3},{CR09,5,5},{CR35,3,3} */
-#define IGA1_VER_BLANK_START_REG_NUM 4
-/* location: {CR16,0,7} */
-#define IGA1_VER_BLANK_END_REG_NUM 1
-/* location: {CR10,0,7},{CR07,2,2},{CR07,7,7},{CR35,1,1} */
-#define IGA1_VER_SYNC_START_REG_NUM 4
-/* location: {CR11,0,3} */
-#define IGA1_VER_SYNC_END_REG_NUM 1
-
/* Define Register Number for IGA2 Shadow CRTC Timing */
/* location: {CR6D,0,7},{CR71,3,3} */
@@ -143,37 +82,6 @@
/* location: {CR76,0,3} */
#define IGA2_SHADOW_VER_SYNC_END_REG_NUM 1
-/* Define Register Number for IGA2 CRTC Timing */
-
-/* location: {CR50,0,7},{CR55,0,3} */
-#define IGA2_HOR_TOTAL_REG_NUM 2
-/* location: {CR51,0,7},{CR55,4,6} */
-#define IGA2_HOR_ADDR_REG_NUM 2
-/* location: {CR52,0,7},{CR54,0,2} */
-#define IGA2_HOR_BLANK_START_REG_NUM 2
-/* location: CLE266: {CR53,0,7},{CR54,3,5} => CLE266's CR5D[6]
-is reserved, so it may have problem to set 1600x1200 on IGA2. */
-/* Others: {CR53,0,7},{CR54,3,5},{CR5D,6,6} */
-#define IGA2_HOR_BLANK_END_REG_NUM 3
-/* location: {CR56,0,7},{CR54,6,7},{CR5C,7,7} */
-/* VT3314 and Later: {CR56,0,7},{CR54,6,7},{CR5C,7,7}, {CR5D,7,7} */
-#define IGA2_HOR_SYNC_START_REG_NUM 4
-
-/* location: {CR57,0,7},{CR5C,6,6} */
-#define IGA2_HOR_SYNC_END_REG_NUM 2
-/* location: {CR58,0,7},{CR5D,0,2} */
-#define IGA2_VER_TOTAL_REG_NUM 2
-/* location: {CR59,0,7},{CR5D,3,5} */
-#define IGA2_VER_ADDR_REG_NUM 2
-/* location: {CR5A,0,7},{CR5C,0,2} */
-#define IGA2_VER_BLANK_START_REG_NUM 2
-/* location: {CR5E,0,7},{CR5C,3,5} */
-#define IGA2_VER_BLANK_END_REG_NUM 2
-/* location: {CR5E,0,7},{CR5F,5,7} */
-#define IGA2_VER_SYNC_START_REG_NUM 2
-/* location: {CR5F,0,4} */
-#define IGA2_VER_SYNC_END_REG_NUM 1
-
/* Define Fetch Count Register*/
/* location: {SR1C,0,7},{SR1D,0,1} */
@@ -446,87 +354,12 @@ is reserved, so it may have problem to set 1600x1200 on IGA2. */
/* location: {CR78,0,7},{CR79,6,7} */
#define LCD_VER_SCALING_FACTOR_REG_NUM_CLE 2
-/************************************************
- ***** Define IGA1 Display Timing *****
- ************************************************/
struct io_register {
u8 io_addr;
u8 start_bit;
u8 end_bit;
};
-/* IGA1 Horizontal Total */
-struct iga1_hor_total {
- int reg_num;
- struct io_register reg[IGA1_HOR_TOTAL_REG_NUM];
-};
-
-/* IGA1 Horizontal Addressable Video */
-struct iga1_hor_addr {
- int reg_num;
- struct io_register reg[IGA1_HOR_ADDR_REG_NUM];
-};
-
-/* IGA1 Horizontal Blank Start */
-struct iga1_hor_blank_start {
- int reg_num;
- struct io_register reg[IGA1_HOR_BLANK_START_REG_NUM];
-};
-
-/* IGA1 Horizontal Blank End */
-struct iga1_hor_blank_end {
- int reg_num;
- struct io_register reg[IGA1_HOR_BLANK_END_REG_NUM];
-};
-
-/* IGA1 Horizontal Sync Start */
-struct iga1_hor_sync_start {
- int reg_num;
- struct io_register reg[IGA1_HOR_SYNC_START_REG_NUM];
-};
-
-/* IGA1 Horizontal Sync End */
-struct iga1_hor_sync_end {
- int reg_num;
- struct io_register reg[IGA1_HOR_SYNC_END_REG_NUM];
-};
-
-/* IGA1 Vertical Total */
-struct iga1_ver_total {
- int reg_num;
- struct io_register reg[IGA1_VER_TOTAL_REG_NUM];
-};
-
-/* IGA1 Vertical Addressable Video */
-struct iga1_ver_addr {
- int reg_num;
- struct io_register reg[IGA1_VER_ADDR_REG_NUM];
-};
-
-/* IGA1 Vertical Blank Start */
-struct iga1_ver_blank_start {
- int reg_num;
- struct io_register reg[IGA1_VER_BLANK_START_REG_NUM];
-};
-
-/* IGA1 Vertical Blank End */
-struct iga1_ver_blank_end {
- int reg_num;
- struct io_register reg[IGA1_VER_BLANK_END_REG_NUM];
-};
-
-/* IGA1 Vertical Sync Start */
-struct iga1_ver_sync_start {
- int reg_num;
- struct io_register reg[IGA1_VER_SYNC_START_REG_NUM];
-};
-
-/* IGA1 Vertical Sync End */
-struct iga1_ver_sync_end {
- int reg_num;
- struct io_register reg[IGA1_VER_SYNC_END_REG_NUM];
-};
-
/*****************************************************
** Define IGA2 Shadow Display Timing ****
*****************************************************/
@@ -579,82 +412,6 @@ struct iga2_shadow_ver_sync_end {
struct io_register reg[IGA2_SHADOW_VER_SYNC_END_REG_NUM];
};
-/*****************************************************
-** Define IGA2 Display Timing ****
-******************************************************/
-
-/* IGA2 Horizontal Total */
-struct iga2_hor_total {
- int reg_num;
- struct io_register reg[IGA2_HOR_TOTAL_REG_NUM];
-};
-
-/* IGA2 Horizontal Addressable Video */
-struct iga2_hor_addr {
- int reg_num;
- struct io_register reg[IGA2_HOR_ADDR_REG_NUM];
-};
-
-/* IGA2 Horizontal Blank Start */
-struct iga2_hor_blank_start {
- int reg_num;
- struct io_register reg[IGA2_HOR_BLANK_START_REG_NUM];
-};
-
-/* IGA2 Horizontal Blank End */
-struct iga2_hor_blank_end {
- int reg_num;
- struct io_register reg[IGA2_HOR_BLANK_END_REG_NUM];
-};
-
-/* IGA2 Horizontal Sync Start */
-struct iga2_hor_sync_start {
- int reg_num;
- struct io_register reg[IGA2_HOR_SYNC_START_REG_NUM];
-};
-
-/* IGA2 Horizontal Sync End */
-struct iga2_hor_sync_end {
- int reg_num;
- struct io_register reg[IGA2_HOR_SYNC_END_REG_NUM];
-};
-
-/* IGA2 Vertical Total */
-struct iga2_ver_total {
- int reg_num;
- struct io_register reg[IGA2_VER_TOTAL_REG_NUM];
-};
-
-/* IGA2 Vertical Addressable Video */
-struct iga2_ver_addr {
- int reg_num;
- struct io_register reg[IGA2_VER_ADDR_REG_NUM];
-};
-
-/* IGA2 Vertical Blank Start */
-struct iga2_ver_blank_start {
- int reg_num;
- struct io_register reg[IGA2_VER_BLANK_START_REG_NUM];
-};
-
-/* IGA2 Vertical Blank End */
-struct iga2_ver_blank_end {
- int reg_num;
- struct io_register reg[IGA2_VER_BLANK_END_REG_NUM];
-};
-
-/* IGA2 Vertical Sync Start */
-struct iga2_ver_sync_start {
- int reg_num;
- struct io_register reg[IGA2_VER_SYNC_START_REG_NUM];
-};
-
-/* IGA2 Vertical Sync End */
-struct iga2_ver_sync_end {
- int reg_num;
- struct io_register reg[IGA2_VER_SYNC_END_REG_NUM];
-};
-
/* IGA1 Fetch Count Register */
struct iga1_fetch_count {
int reg_num;
@@ -817,21 +574,6 @@ struct display_queue_expire_num {
iga2_display_queue_expire_num_reg;
};
-struct iga1_crtc_timing {
- struct iga1_hor_total hor_total;
- struct iga1_hor_addr hor_addr;
- struct iga1_hor_blank_start hor_blank_start;
- struct iga1_hor_blank_end hor_blank_end;
- struct iga1_hor_sync_start hor_sync_start;
- struct iga1_hor_sync_end hor_sync_end;
- struct iga1_ver_total ver_total;
- struct iga1_ver_addr ver_addr;
- struct iga1_ver_blank_start ver_blank_start;
- struct iga1_ver_blank_end ver_blank_end;
- struct iga1_ver_sync_start ver_sync_start;
- struct iga1_ver_sync_end ver_sync_end;
-};
-
struct iga2_shadow_crtc_timing {
struct iga2_shadow_hor_total hor_total_shadow;
struct iga2_shadow_hor_blank_end hor_blank_end_shadow;
@@ -843,21 +585,6 @@ struct iga2_shadow_crtc_timing {
struct iga2_shadow_ver_sync_end ver_sync_end_shadow;
};
-struct iga2_crtc_timing {
- struct iga2_hor_total hor_total;
- struct iga2_hor_addr hor_addr;
- struct iga2_hor_blank_start hor_blank_start;
- struct iga2_hor_blank_end hor_blank_end;
- struct iga2_hor_sync_start hor_sync_start;
- struct iga2_hor_sync_end hor_sync_end;
- struct iga2_ver_total ver_total;
- struct iga2_ver_addr ver_addr;
- struct iga2_ver_blank_start ver_blank_start;
- struct iga2_ver_blank_end ver_blank_end;
- struct iga2_ver_sync_start ver_sync_start;
- struct iga2_ver_sync_end ver_sync_end;
-};
-
/* device ID */
#define CLE266_FUNCTION3 0x3123
#define KM400_FUNCTION3 0x3205
@@ -910,9 +637,7 @@ extern int viafb_LCD_ON;
extern int viafb_DVI_ON;
extern int viafb_hotplug;
-void viafb_fill_crtc_timing(struct crt_mode_table *crt_table,
- struct VideoModeTable *video_mode, int bpp_byte, int set_iga);
-
+void viafb_fill_crtc_timing(const struct fb_var_screeninfo *var, int iga);
void viafb_set_vclock(u32 CLK, int set_iga);
void viafb_load_reg(int timing_value, int viafb_load_reg_num,
struct io_register *reg,
@@ -932,13 +657,11 @@ void viafb_load_FIFO_reg(int set_iga, int hor_active, int ver_active);
void viafb_set_dpa_gfx(int output_interface, struct GFX_DPA_SETTING\
*p_gfx_dpa_setting);
-int viafb_setmode(struct VideoModeTable *vmode_tbl, int video_bpp,
- struct VideoModeTable *vmode_tbl1, int video_bpp1);
-void viafb_fill_var_timing_info(struct fb_var_screeninfo *var, int refresh,
- struct VideoModeTable *vmode_tbl);
+int viafb_setmode(int video_bpp, int video_bpp1);
+void viafb_fill_var_timing_info(struct fb_var_screeninfo *var,
+ struct crt_mode_table *mode);
void __devinit viafb_init_chip_info(int chip_type);
void __devinit viafb_init_dac(int set_iga);
-int viafb_get_pixclock(int hres, int vres, int vmode_refresh);
int viafb_get_refresh(int hres, int vres, u32 float_refresh);
void viafb_update_device_setting(int hres, int vres, int bpp, int flag);
diff --git a/drivers/video/via/lcd.c b/drivers/video/via/lcd.c
index 6e06981d638..5f3b4e394e8 100644
--- a/drivers/video/via/lcd.c
+++ b/drivers/video/via/lcd.c
@@ -548,9 +548,8 @@ static void lcd_patch_skew(struct lvds_setting_information
}
/* LCD Set Mode */
-void viafb_lcd_set_mode(struct crt_mode_table *mode_crt_table,
- struct lvds_setting_information *plvds_setting_info,
- struct lvds_chip_information *plvds_chip_info)
+void viafb_lcd_set_mode(struct lvds_setting_information *plvds_setting_info,
+ struct lvds_chip_information *plvds_chip_info)
{
int set_iga = plvds_setting_info->iga_path;
int mode_bpp = plvds_setting_info->bpp;
@@ -559,16 +558,15 @@ void viafb_lcd_set_mode(struct crt_mode_table *mode_crt_table,
int panel_hres = plvds_setting_info->lcd_panel_hres;
int panel_vres = plvds_setting_info->lcd_panel_vres;
u32 clock;
- struct display_timing mode_crt_reg, panel_crt_reg;
- struct crt_mode_table *panel_crt_table = NULL;
- struct VideoModeTable *vmode_tbl = viafb_get_mode(panel_hres,
- panel_vres);
+ struct display_timing mode_crt_reg, panel_crt_reg, timing;
+ struct crt_mode_table *mode_crt_table, *panel_crt_table;
DEBUG_MSG(KERN_INFO "viafb_lcd_set_mode!!\n");
/* Get mode table */
+ mode_crt_table = viafb_get_best_mode(set_hres, set_vres, 60);
mode_crt_reg = mode_crt_table->crtc;
/* Get panel table Pointer */
- panel_crt_table = vmode_tbl->crtc;
+ panel_crt_table = viafb_get_best_mode(panel_hres, panel_vres, 60);
panel_crt_reg = panel_crt_table->crtc;
DEBUG_MSG(KERN_INFO "bellow viafb_lcd_set_mode!!\n");
if (VT1636_LVDS == plvds_chip_info->lvds_chip_name)
@@ -576,31 +574,28 @@ void viafb_lcd_set_mode(struct crt_mode_table *mode_crt_table,
clock = panel_crt_reg.hor_total * panel_crt_reg.ver_total
* panel_crt_table->refresh_rate;
plvds_setting_info->vclk = clock;
- if (set_iga == IGA1) {
- /* IGA1 doesn't have LCD scaling, so set it as centering. */
- viafb_load_crtc_timing(lcd_centering_timging
- (mode_crt_reg, panel_crt_reg), IGA1);
+
+ if (set_iga == IGA2 && (set_hres < panel_hres || set_vres < panel_vres)
+ && plvds_setting_info->display_method == LCD_EXPANDSION) {
+ timing = panel_crt_reg;
+ load_lcd_scaling(set_hres, set_vres, panel_hres, panel_vres);
} else {
- /* Expansion */
- if (plvds_setting_info->display_method == LCD_EXPANDSION
- && (set_hres < panel_hres || set_vres < panel_vres)) {
- /* expansion timing IGA2 loaded panel set timing*/
- viafb_load_crtc_timing(panel_crt_reg, IGA2);
- DEBUG_MSG(KERN_INFO "viafb_load_crtc_timing!!\n");
- load_lcd_scaling(set_hres, set_vres, panel_hres,
- panel_vres);
- DEBUG_MSG(KERN_INFO "load_lcd_scaling!!\n");
- } else { /* Centering */
- /* centering timing IGA2 always loaded panel
- and mode releative timing */
- viafb_load_crtc_timing(lcd_centering_timging
- (mode_crt_reg, panel_crt_reg), IGA2);
- viafb_write_reg_mask(CR79, VIACR, 0x00,
+ timing = lcd_centering_timging(mode_crt_reg, panel_crt_reg);
+ if (set_iga == IGA2)
+ /* disable scaling */
+ via_write_reg_mask(VIACR, 0x79, 0x00,
BIT0 + BIT1 + BIT2);
- /* LCD scaling disabled */
- }
}
+ timing.hor_blank_end += timing.hor_blank_start;
+ timing.hor_sync_end += timing.hor_sync_start;
+ timing.ver_blank_end += timing.ver_blank_start;
+ timing.ver_sync_end += timing.ver_sync_start;
+ if (set_iga == IGA1)
+ via_set_primary_timing(&timing);
+ else if (set_iga == IGA2)
+ via_set_secondary_timing(&timing);
+
/* Fetch count for IGA2 only */
viafb_load_fetch_count_reg(set_hres, mode_bpp / 8, set_iga);
diff --git a/drivers/video/via/lcd.h b/drivers/video/via/lcd.h
index 75f60a655b0..77ca7b862e6 100644
--- a/drivers/video/via/lcd.h
+++ b/drivers/video/via/lcd.h
@@ -76,16 +76,13 @@ void __devinit viafb_init_lvds_output_interface(struct lvds_chip_information
*plvds_chip_info,
struct lvds_setting_information
*plvds_setting_info);
-void viafb_lcd_set_mode(struct crt_mode_table *mode_crt_table,
- struct lvds_setting_information *plvds_setting_info,
- struct lvds_chip_information *plvds_chip_info);
+void viafb_lcd_set_mode(struct lvds_setting_information *plvds_setting_info,
+ struct lvds_chip_information *plvds_chip_info);
bool __devinit viafb_lvds_trasmitter_identify(void);
void viafb_init_lvds_output_interface(struct lvds_chip_information
*plvds_chip_info,
struct lvds_setting_information
*plvds_setting_info);
bool viafb_lcd_get_mobile_state(bool *mobile);
-void viafb_load_crtc_timing(struct display_timing device_timing,
- int set_iga);
#endif /* __LCD_H__ */
diff --git a/drivers/video/via/share.h b/drivers/video/via/share.h
index 61b0bd596b8..69d882cbe70 100644
--- a/drivers/video/via/share.h
+++ b/drivers/video/via/share.h
@@ -22,6 +22,8 @@
#ifndef __SHARE_H__
#define __SHARE_H__
+#include "via_modesetting.h"
+
/* Define Bit Field */
#define BIT0 0x01
#define BIT1 0x02
@@ -634,10 +636,6 @@
#define V_SYNC_SATRT_SHADOW_INDEX 18
#define V_SYNC_END_SHADOW_INDEX 19
-/* Definition Video Mode Pixel Clock (picoseconds)
-*/
-#define RES_640X480_60HZ_PIXCLOCK 39722
-
/* LCD display method
*/
#define LCD_EXPANDSION 0x00
@@ -648,23 +646,6 @@
#define LCD_OPENLDI 0x00
#define LCD_SPWG 0x01
-/* Define display timing
-*/
-struct display_timing {
- u16 hor_total;
- u16 hor_addr;
- u16 hor_blank_start;
- u16 hor_blank_end;
- u16 hor_sync_start;
- u16 hor_sync_end;
- u16 ver_total;
- u16 ver_addr;
- u16 ver_blank_start;
- u16 ver_blank_end;
- u16 ver_sync_start;
- u16 ver_sync_end;
-};
-
struct crt_mode_table {
int refresh_rate;
int h_sync_polarity;
diff --git a/drivers/video/via/via-core.c b/drivers/video/via/via-core.c
index eb112b62173..dd58b530c0d 100644
--- a/drivers/video/via/via-core.c
+++ b/drivers/video/via/via-core.c
@@ -35,7 +35,7 @@ static struct via_port_cfg adap_configs[] = {
* The OLPC XO-1.5 puts the camera power and reset lines onto
* GPIO 2C.
*/
-static const struct via_port_cfg olpc_adap_configs[] = {
+static struct via_port_cfg olpc_adap_configs[] = {
[VIA_PORT_26] = { VIA_PORT_I2C, VIA_MODE_I2C, VIASR, 0x26 },
[VIA_PORT_31] = { VIA_PORT_I2C, VIA_MODE_I2C, VIASR, 0x31 },
[VIA_PORT_25] = { VIA_PORT_GPIO, VIA_MODE_GPIO, VIASR, 0x25 },
diff --git a/drivers/video/via/via-gpio.c b/drivers/video/via/via-gpio.c
index ab5341814c7..d69cfef7c33 100644
--- a/drivers/video/via/via-gpio.c
+++ b/drivers/video/via/via-gpio.c
@@ -10,6 +10,7 @@
#include <linux/platform_device.h>
#include <linux/via-core.h>
#include <linux/via-gpio.h>
+#include <linux/export.h>
/*
* The ports we know about. Note that the port-25 gpios are not
diff --git a/drivers/video/via/via_modesetting.c b/drivers/video/via/via_modesetting.c
index 3cddcff88ab..0e431aee17b 100644
--- a/drivers/video/via/via_modesetting.c
+++ b/drivers/video/via/via_modesetting.c
@@ -29,6 +29,110 @@
#include "share.h"
#include "debug.h"
+
+void via_set_primary_timing(const struct display_timing *timing)
+{
+ struct display_timing raw;
+
+ raw.hor_total = timing->hor_total / 8 - 5;
+ raw.hor_addr = timing->hor_addr / 8 - 1;
+ raw.hor_blank_start = timing->hor_blank_start / 8 - 1;
+ raw.hor_blank_end = timing->hor_blank_end / 8 - 1;
+ raw.hor_sync_start = timing->hor_sync_start / 8;
+ raw.hor_sync_end = timing->hor_sync_end / 8;
+ raw.ver_total = timing->ver_total - 2;
+ raw.ver_addr = timing->ver_addr - 1;
+ raw.ver_blank_start = timing->ver_blank_start - 1;
+ raw.ver_blank_end = timing->ver_blank_end - 1;
+ raw.ver_sync_start = timing->ver_sync_start - 1;
+ raw.ver_sync_end = timing->ver_sync_end - 1;
+
+ /* unlock timing registers */
+ via_write_reg_mask(VIACR, 0x11, 0x00, 0x80);
+
+ via_write_reg(VIACR, 0x00, raw.hor_total & 0xFF);
+ via_write_reg(VIACR, 0x01, raw.hor_addr & 0xFF);
+ via_write_reg(VIACR, 0x02, raw.hor_blank_start & 0xFF);
+ via_write_reg_mask(VIACR, 0x03, raw.hor_blank_end & 0x1F, 0x1F);
+ via_write_reg(VIACR, 0x04, raw.hor_sync_start & 0xFF);
+ via_write_reg_mask(VIACR, 0x05, (raw.hor_sync_end & 0x1F)
+ | (raw.hor_blank_end << (7 - 5) & 0x80), 0x9F);
+ via_write_reg(VIACR, 0x06, raw.ver_total & 0xFF);
+ via_write_reg_mask(VIACR, 0x07, (raw.ver_total >> 8 & 0x01)
+ | (raw.ver_addr >> (8 - 1) & 0x02)
+ | (raw.ver_sync_start >> (8 - 2) & 0x04)
+ | (raw.ver_blank_start >> (8 - 3) & 0x08)
+ | (raw.ver_total >> (9 - 5) & 0x20)
+ | (raw.ver_addr >> (9 - 6) & 0x40)
+ | (raw.ver_sync_start >> (9 - 7) & 0x80), 0xEF);
+ via_write_reg_mask(VIACR, 0x09, raw.ver_blank_start >> (9 - 5) & 0x20,
+ 0x20);
+ via_write_reg(VIACR, 0x10, raw.ver_sync_start & 0xFF);
+ via_write_reg_mask(VIACR, 0x11, raw.ver_sync_end & 0x0F, 0x0F);
+ via_write_reg(VIACR, 0x12, raw.ver_addr & 0xFF);
+ via_write_reg(VIACR, 0x15, raw.ver_blank_start & 0xFF);
+ via_write_reg(VIACR, 0x16, raw.ver_blank_end & 0xFF);
+ via_write_reg_mask(VIACR, 0x33, (raw.hor_sync_start >> (8 - 4) & 0x10)
+ | (raw.hor_blank_end >> (6 - 5) & 0x20), 0x30);
+ via_write_reg_mask(VIACR, 0x35, (raw.ver_total >> 10 & 0x01)
+ | (raw.ver_sync_start >> (10 - 1) & 0x02)
+ | (raw.ver_addr >> (10 - 2) & 0x04)
+ | (raw.ver_blank_start >> (10 - 3) & 0x08), 0x0F);
+ via_write_reg_mask(VIACR, 0x36, raw.hor_total >> (8 - 3) & 0x08, 0x08);
+
+ /* lock timing registers */
+ via_write_reg_mask(VIACR, 0x11, 0x80, 0x80);
+
+ /* reset timing control */
+ via_write_reg_mask(VIACR, 0x17, 0x00, 0x80);
+ via_write_reg_mask(VIACR, 0x17, 0x80, 0x80);
+}
+
+void via_set_secondary_timing(const struct display_timing *timing)
+{
+ struct display_timing raw;
+
+ raw.hor_total = timing->hor_total - 1;
+ raw.hor_addr = timing->hor_addr - 1;
+ raw.hor_blank_start = timing->hor_blank_start - 1;
+ raw.hor_blank_end = timing->hor_blank_end - 1;
+ raw.hor_sync_start = timing->hor_sync_start - 1;
+ raw.hor_sync_end = timing->hor_sync_end - 1;
+ raw.ver_total = timing->ver_total - 1;
+ raw.ver_addr = timing->ver_addr - 1;
+ raw.ver_blank_start = timing->ver_blank_start - 1;
+ raw.ver_blank_end = timing->ver_blank_end - 1;
+ raw.ver_sync_start = timing->ver_sync_start - 1;
+ raw.ver_sync_end = timing->ver_sync_end - 1;
+
+ via_write_reg(VIACR, 0x50, raw.hor_total & 0xFF);
+ via_write_reg(VIACR, 0x51, raw.hor_addr & 0xFF);
+ via_write_reg(VIACR, 0x52, raw.hor_blank_start & 0xFF);
+ via_write_reg(VIACR, 0x53, raw.hor_blank_end & 0xFF);
+ via_write_reg(VIACR, 0x54, (raw.hor_blank_start >> 8 & 0x07)
+ | (raw.hor_blank_end >> (8 - 3) & 0x38)
+ | (raw.hor_sync_start >> (8 - 6) & 0xC0));
+ via_write_reg_mask(VIACR, 0x55, (raw.hor_total >> 8 & 0x0F)
+ | (raw.hor_addr >> (8 - 4) & 0x70), 0x7F);
+ via_write_reg(VIACR, 0x56, raw.hor_sync_start & 0xFF);
+ via_write_reg(VIACR, 0x57, raw.hor_sync_end & 0xFF);
+ via_write_reg(VIACR, 0x58, raw.ver_total & 0xFF);
+ via_write_reg(VIACR, 0x59, raw.ver_addr & 0xFF);
+ via_write_reg(VIACR, 0x5A, raw.ver_blank_start & 0xFF);
+ via_write_reg(VIACR, 0x5B, raw.ver_blank_end & 0xFF);
+ via_write_reg(VIACR, 0x5C, (raw.ver_blank_start >> 8 & 0x07)
+ | (raw.ver_blank_end >> (8 - 3) & 0x38)
+ | (raw.hor_sync_end >> (8 - 6) & 0x40)
+ | (raw.hor_sync_start >> (10 - 7) & 0x80));
+ via_write_reg(VIACR, 0x5D, (raw.ver_total >> 8 & 0x07)
+ | (raw.ver_addr >> (8 - 3) & 0x38)
+ | (raw.hor_blank_end >> (11 - 6) & 0x40)
+ | (raw.hor_sync_start >> (11 - 7) & 0x80));
+ via_write_reg(VIACR, 0x5E, raw.ver_sync_start & 0xFF);
+ via_write_reg(VIACR, 0x5F, (raw.ver_sync_end & 0x1F)
+ | (raw.ver_sync_start >> (8 - 5) & 0xE0));
+}
+
void via_set_primary_address(u32 addr)
{
DEBUG_MSG(KERN_DEBUG "via_set_primary_address(0x%08X)\n", addr);
diff --git a/drivers/video/via/via_modesetting.h b/drivers/video/via/via_modesetting.h
index ae35cfdeb37..06e09fe351a 100644
--- a/drivers/video/via/via_modesetting.h
+++ b/drivers/video/via/via_modesetting.h
@@ -28,6 +28,29 @@
#include <linux/types.h>
+
+#define VIA_PITCH_SIZE (1<<3)
+#define VIA_PITCH_MAX 0x3FF8
+
+
+struct display_timing {
+ u16 hor_total;
+ u16 hor_addr;
+ u16 hor_blank_start;
+ u16 hor_blank_end;
+ u16 hor_sync_start;
+ u16 hor_sync_end;
+ u16 ver_total;
+ u16 ver_addr;
+ u16 ver_blank_start;
+ u16 ver_blank_end;
+ u16 ver_sync_start;
+ u16 ver_sync_end;
+};
+
+
+void via_set_primary_timing(const struct display_timing *timing);
+void via_set_secondary_timing(const struct display_timing *timing);
void via_set_primary_address(u32 addr);
void via_set_secondary_address(u32 addr);
void via_set_primary_pitch(u32 pitch);
diff --git a/drivers/video/via/viafbdev.c b/drivers/video/via/viafbdev.c
index 53aa4430d86..a13c258bd32 100644
--- a/drivers/video/via/viafbdev.c
+++ b/drivers/video/via/viafbdev.c
@@ -38,8 +38,6 @@ static char *viafb_mode1;
static int viafb_bpp = 32;
static int viafb_bpp1 = 32;
-static unsigned int viafb_second_xres = 640;
-static unsigned int viafb_second_yres = 480;
static unsigned int viafb_second_offset;
static int viafb_second_size;
@@ -151,7 +149,8 @@ static void viafb_update_fix(struct fb_info *info)
info->fix.visual =
bpp == 8 ? FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_TRUECOLOR;
- info->fix.line_length = (info->var.xres_virtual * bpp / 8 + 7) & ~7;
+ info->fix.line_length = ALIGN(info->var.xres_virtual * bpp / 8,
+ VIA_PITCH_SIZE);
}
static void viafb_setup_fixinfo(struct fb_fix_screeninfo *fix,
@@ -200,7 +199,6 @@ static int viafb_check_var(struct fb_var_screeninfo *var,
struct fb_info *info)
{
int depth, refresh;
- struct VideoModeTable *vmode_entry;
struct viafb_par *ppar = info->par;
u32 line;
@@ -210,8 +208,10 @@ static int viafb_check_var(struct fb_var_screeninfo *var,
if (var->vmode & FB_VMODE_INTERLACED || var->vmode & FB_VMODE_DOUBLE)
return -EINVAL;
- vmode_entry = viafb_get_mode(var->xres, var->yres);
- if (!vmode_entry) {
+ /* the refresh rate is not important here, as we only want to know
+ * whether the resolution exists
+ */
+ if (!viafb_get_best_mode(var->xres, var->yres, 60)) {
DEBUG_MSG(KERN_INFO
"viafb: Mode %dx%dx%d not supported!!\n",
var->xres, var->yres, var->bits_per_pixel);
@@ -238,8 +238,12 @@ static int viafb_check_var(struct fb_var_screeninfo *var,
depth = 24;
viafb_fill_var_color_info(var, depth);
- line = (var->xres_virtual * var->bits_per_pixel / 8 + 7) & ~7;
- if (line * var->yres_virtual > ppar->memsize)
+ if (var->xres_virtual < var->xres)
+ var->xres_virtual = var->xres;
+
+ line = ALIGN(var->xres_virtual * var->bits_per_pixel / 8,
+ VIA_PITCH_SIZE);
+ if (line > VIA_PITCH_MAX || line * var->yres_virtual > ppar->memsize)
return -EINVAL;
/* Based on var passed in to calculate the refresh,
@@ -249,7 +253,8 @@ static int viafb_check_var(struct fb_var_screeninfo *var,
get_var_refresh(var));
/* Adjust var according to our driver's own table */
- viafb_fill_var_timing_info(var, refresh, vmode_entry);
+ viafb_fill_var_timing_info(var,
+ viafb_get_best_mode(var->xres, var->yres, refresh));
if (var->accel_flags & FB_ACCELF_TEXT &&
!ppar->shared->vdev->engine_mmio)
var->accel_flags = 0;
@@ -260,7 +265,6 @@ static int viafb_check_var(struct fb_var_screeninfo *var,
static int viafb_set_par(struct fb_info *info)
{
struct viafb_par *viapar = info->par;
- struct VideoModeTable *vmode_entry, *vmode_entry1 = NULL;
int refresh;
DEBUG_MSG(KERN_INFO "viafb_set_par!\n");
@@ -269,10 +273,7 @@ static int viafb_set_par(struct fb_info *info)
viafb_update_device_setting(viafbinfo->var.xres, viafbinfo->var.yres,
viafbinfo->var.bits_per_pixel, 0);
- vmode_entry = viafb_get_mode(viafbinfo->var.xres, viafbinfo->var.yres);
if (viafb_dual_fb) {
- vmode_entry1 = viafb_get_mode(viafbinfo1->var.xres,
- viafbinfo1->var.yres);
viafb_update_device_setting(viafbinfo1->var.xres,
viafbinfo1->var.yres, viafbinfo1->var.bits_per_pixel,
1);
@@ -280,8 +281,6 @@ static int viafb_set_par(struct fb_info *info)
DEBUG_MSG(KERN_INFO
"viafb_second_xres = %d, viafb_second_yres = %d, bpp = %d\n",
viafb_second_xres, viafb_second_yres, viafb_bpp1);
- vmode_entry1 = viafb_get_mode(viafb_second_xres,
- viafb_second_yres);
viafb_update_device_setting(viafb_second_xres,
viafb_second_yres, viafb_bpp1, 1);
@@ -289,7 +288,8 @@ static int viafb_set_par(struct fb_info *info)
refresh = viafb_get_refresh(info->var.xres, info->var.yres,
get_var_refresh(&info->var));
- if (vmode_entry) {
+ if (viafb_get_best_mode(viafbinfo->var.xres, viafbinfo->var.yres,
+ refresh)) {
if (viafb_dual_fb && viapar->iga_path == IGA2) {
viafb_bpp1 = info->var.bits_per_pixel;
viafb_refresh1 = refresh;
@@ -302,8 +302,7 @@ static int viafb_set_par(struct fb_info *info)
info->flags &= ~FBINFO_HWACCEL_DISABLED;
else
info->flags |= FBINFO_HWACCEL_DISABLED;
- viafb_setmode(vmode_entry, info->var.bits_per_pixel,
- vmode_entry1, viafb_bpp1);
+ viafb_setmode(info->var.bits_per_pixel, viafb_bpp1);
viafb_pan_display(&info->var, info);
}
@@ -348,8 +347,9 @@ static int viafb_pan_display(struct fb_var_screeninfo *var,
struct fb_info *info)
{
struct viafb_par *viapar = info->par;
- u32 vram_addr = (var->yoffset * var->xres_virtual + var->xoffset)
- * (var->bits_per_pixel / 8) + viapar->vram_addr;
+ u32 vram_addr = viapar->vram_addr
+ + var->yoffset * info->fix.line_length
+ + var->xoffset * info->var.bits_per_pixel / 8;
DEBUG_MSG(KERN_DEBUG "viafb_pan_display, address = %d\n", vram_addr);
if (!viafb_dual_fb) {
@@ -1158,7 +1158,8 @@ static ssize_t viafb_dvp0_proc_write(struct file *file,
for (i = 0; i < 3; i++) {
value = strsep(&pbuf, " ");
if (value != NULL) {
- strict_strtoul(value, 0, (unsigned long *)&reg_val);
+ if (kstrtou8(value, 0, &reg_val) < 0)
+ return -EINVAL;
DEBUG_MSG(KERN_INFO "DVP0:reg_val[%l]=:%x\n", i,
reg_val);
switch (i) {
@@ -1228,7 +1229,8 @@ static ssize_t viafb_dvp1_proc_write(struct file *file,
for (i = 0; i < 3; i++) {
value = strsep(&pbuf, " ");
if (value != NULL) {
- strict_strtoul(value, 0, (unsigned long *)&reg_val);
+ if (kstrtou8(value, 0, &reg_val) < 0)
+ return -EINVAL;
switch (i) {
case 0:
viafb_write_reg_mask(CR9B, VIACR,
@@ -1286,7 +1288,8 @@ static ssize_t viafb_dfph_proc_write(struct file *file,
if (copy_from_user(&buf[0], buffer, length))
return -EFAULT;
buf[length - 1] = '\0'; /*Ensure end string */
- strict_strtoul(&buf[0], 0, (unsigned long *)&reg_val);
+ if (kstrtou8(buf, 0, &reg_val) < 0)
+ return -EINVAL;
viafb_write_reg_mask(CR97, VIACR, reg_val, 0x0f);
return count;
}
@@ -1325,7 +1328,8 @@ static ssize_t viafb_dfpl_proc_write(struct file *file,
if (copy_from_user(&buf[0], buffer, length))
return -EFAULT;
buf[length - 1] = '\0'; /*Ensure end string */
- strict_strtoul(&buf[0], 0, (unsigned long *)&reg_val);
+ if (kstrtou8(buf, 0, &reg_val) < 0)
+ return -EINVAL;
viafb_write_reg_mask(CR99, VIACR, reg_val, 0x0f);
return count;
}
@@ -1394,8 +1398,8 @@ static ssize_t viafb_vt1636_proc_write(struct file *file,
for (i = 0; i < 2; i++) {
value = strsep(&pbuf, " ");
if (value != NULL) {
- strict_strtoul(value, 0,
- (unsigned long *)&reg_val.Data);
+ if (kstrtou8(value, 0, &reg_val.Data) < 0)
+ return -EINVAL;
switch (i) {
case 0:
reg_val.Index = 0x08;
@@ -1431,8 +1435,8 @@ static ssize_t viafb_vt1636_proc_write(struct file *file,
for (i = 0; i < 2; i++) {
value = strsep(&pbuf, " ");
if (value != NULL) {
- strict_strtoul(value, 0,
- (unsigned long *)&reg_val.Data);
+ if (kstrtou8(value, 0, &reg_val.Data) < 0)
+ return -EINVAL;
switch (i) {
case 0:
reg_val.Index = 0x08;
@@ -1729,7 +1733,6 @@ static struct viafb_pm_hooks viafb_fb_pm_hooks = {
int __devinit via_fb_pci_probe(struct viafb_dev *vdev)
{
u32 default_xres, default_yres;
- struct VideoModeTable *vmode_entry;
struct fb_var_screeninfo default_var;
int rc;
u32 viafb_par_length;
@@ -1802,7 +1805,6 @@ int __devinit via_fb_pci_probe(struct viafb_dev *vdev)
}
parse_mode(viafb_mode, &default_xres, &default_yres);
- vmode_entry = viafb_get_mode(default_xres, default_yres);
if (viafb_SAMM_ON == 1)
parse_mode(viafb_mode1, &viafb_second_xres,
&viafb_second_yres);
@@ -1812,9 +1814,8 @@ int __devinit via_fb_pci_probe(struct viafb_dev *vdev)
default_var.xres_virtual = default_xres;
default_var.yres_virtual = default_yres;
default_var.bits_per_pixel = viafb_bpp;
- viafb_fill_var_timing_info(&default_var, viafb_get_refresh(
- default_var.xres, default_var.yres, viafb_refresh),
- viafb_get_mode(default_var.xres, default_var.yres));
+ viafb_fill_var_timing_info(&default_var, viafb_get_best_mode(
+ default_var.xres, default_var.yres, viafb_refresh));
viafb_setup_fixinfo(&viafbinfo->fix, viaparinfo);
viafbinfo->var = default_var;
@@ -1853,9 +1854,8 @@ int __devinit via_fb_pci_probe(struct viafb_dev *vdev)
default_var.xres_virtual = viafb_second_xres;
default_var.yres_virtual = viafb_second_yres;
default_var.bits_per_pixel = viafb_bpp1;
- viafb_fill_var_timing_info(&default_var, viafb_get_refresh(
- default_var.xres, default_var.yres, viafb_refresh1),
- viafb_get_mode(default_var.xres, default_var.yres));
+ viafb_fill_var_timing_info(&default_var, viafb_get_best_mode(
+ default_var.xres, default_var.yres, viafb_refresh1));
viafb_setup_fixinfo(&viafbinfo1->fix, viaparinfo1);
viafb_check_var(&default_var, viafbinfo1);
@@ -1950,61 +1950,67 @@ static int __init viafb_setup(void)
if (!*this_opt)
continue;
- if (!strncmp(this_opt, "viafb_mode1=", 12))
+ if (!strncmp(this_opt, "viafb_mode1=", 12)) {
viafb_mode1 = kstrdup(this_opt + 12, GFP_KERNEL);
- else if (!strncmp(this_opt, "viafb_mode=", 11))
+ } else if (!strncmp(this_opt, "viafb_mode=", 11)) {
viafb_mode = kstrdup(this_opt + 11, GFP_KERNEL);
- else if (!strncmp(this_opt, "viafb_bpp1=", 11))
- strict_strtoul(this_opt + 11, 0,
- (unsigned long *)&viafb_bpp1);
- else if (!strncmp(this_opt, "viafb_bpp=", 10))
- strict_strtoul(this_opt + 10, 0,
- (unsigned long *)&viafb_bpp);
- else if (!strncmp(this_opt, "viafb_refresh1=", 15))
- strict_strtoul(this_opt + 15, 0,
- (unsigned long *)&viafb_refresh1);
- else if (!strncmp(this_opt, "viafb_refresh=", 14))
- strict_strtoul(this_opt + 14, 0,
- (unsigned long *)&viafb_refresh);
- else if (!strncmp(this_opt, "viafb_lcd_dsp_method=", 21))
- strict_strtoul(this_opt + 21, 0,
- (unsigned long *)&viafb_lcd_dsp_method);
- else if (!strncmp(this_opt, "viafb_lcd_panel_id=", 19))
- strict_strtoul(this_opt + 19, 0,
- (unsigned long *)&viafb_lcd_panel_id);
- else if (!strncmp(this_opt, "viafb_accel=", 12))
- strict_strtoul(this_opt + 12, 0,
- (unsigned long *)&viafb_accel);
- else if (!strncmp(this_opt, "viafb_SAMM_ON=", 14))
- strict_strtoul(this_opt + 14, 0,
- (unsigned long *)&viafb_SAMM_ON);
- else if (!strncmp(this_opt, "viafb_active_dev=", 17))
+ } else if (!strncmp(this_opt, "viafb_bpp1=", 11)) {
+ if (kstrtouint(this_opt + 11, 0, &viafb_bpp1) < 0)
+ return -EINVAL;
+ } else if (!strncmp(this_opt, "viafb_bpp=", 10)) {
+ if (kstrtouint(this_opt + 10, 0, &viafb_bpp) < 0)
+ return -EINVAL;
+ } else if (!strncmp(this_opt, "viafb_refresh1=", 15)) {
+ if (kstrtoint(this_opt + 15, 0, &viafb_refresh1) < 0)
+ return -EINVAL;
+ } else if (!strncmp(this_opt, "viafb_refresh=", 14)) {
+ if (kstrtoint(this_opt + 14, 0, &viafb_refresh) < 0)
+ return -EINVAL;
+ } else if (!strncmp(this_opt, "viafb_lcd_dsp_method=", 21)) {
+ if (kstrtoint(this_opt + 21, 0,
+ &viafb_lcd_dsp_method) < 0)
+ return -EINVAL;
+ } else if (!strncmp(this_opt, "viafb_lcd_panel_id=", 19)) {
+ if (kstrtoint(this_opt + 19, 0,
+ &viafb_lcd_panel_id) < 0)
+ return -EINVAL;
+ } else if (!strncmp(this_opt, "viafb_accel=", 12)) {
+ if (kstrtoint(this_opt + 12, 0, &viafb_accel) < 0)
+ return -EINVAL;
+ } else if (!strncmp(this_opt, "viafb_SAMM_ON=", 14)) {
+ if (kstrtoint(this_opt + 14, 0, &viafb_SAMM_ON) < 0)
+ return -EINVAL;
+ } else if (!strncmp(this_opt, "viafb_active_dev=", 17)) {
viafb_active_dev = kstrdup(this_opt + 17, GFP_KERNEL);
- else if (!strncmp(this_opt,
- "viafb_display_hardware_layout=", 30))
- strict_strtoul(this_opt + 30, 0,
- (unsigned long *)&viafb_display_hardware_layout);
- else if (!strncmp(this_opt, "viafb_second_size=", 18))
- strict_strtoul(this_opt + 18, 0,
- (unsigned long *)&viafb_second_size);
- else if (!strncmp(this_opt,
- "viafb_platform_epia_dvi=", 24))
- strict_strtoul(this_opt + 24, 0,
- (unsigned long *)&viafb_platform_epia_dvi);
- else if (!strncmp(this_opt,
- "viafb_device_lcd_dualedge=", 26))
- strict_strtoul(this_opt + 26, 0,
- (unsigned long *)&viafb_device_lcd_dualedge);
- else if (!strncmp(this_opt, "viafb_bus_width=", 16))
- strict_strtoul(this_opt + 16, 0,
- (unsigned long *)&viafb_bus_width);
- else if (!strncmp(this_opt, "viafb_lcd_mode=", 15))
- strict_strtoul(this_opt + 15, 0,
- (unsigned long *)&viafb_lcd_mode);
- else if (!strncmp(this_opt, "viafb_lcd_port=", 15))
+ } else if (!strncmp(this_opt,
+ "viafb_display_hardware_layout=", 30)) {
+ if (kstrtoint(this_opt + 30, 0,
+ &viafb_display_hardware_layout) < 0)
+ return -EINVAL;
+ } else if (!strncmp(this_opt, "viafb_second_size=", 18)) {
+ if (kstrtoint(this_opt + 18, 0, &viafb_second_size) < 0)
+ return -EINVAL;
+ } else if (!strncmp(this_opt,
+ "viafb_platform_epia_dvi=", 24)) {
+ if (kstrtoint(this_opt + 24, 0,
+ &viafb_platform_epia_dvi) < 0)
+ return -EINVAL;
+ } else if (!strncmp(this_opt,
+ "viafb_device_lcd_dualedge=", 26)) {
+ if (kstrtoint(this_opt + 26, 0,
+ &viafb_device_lcd_dualedge) < 0)
+ return -EINVAL;
+ } else if (!strncmp(this_opt, "viafb_bus_width=", 16)) {
+ if (kstrtoint(this_opt + 16, 0, &viafb_bus_width) < 0)
+ return -EINVAL;
+ } else if (!strncmp(this_opt, "viafb_lcd_mode=", 15)) {
+ if (kstrtoint(this_opt + 15, 0, &viafb_lcd_mode) < 0)
+ return -EINVAL;
+ } else if (!strncmp(this_opt, "viafb_lcd_port=", 15)) {
viafb_lcd_port = kstrdup(this_opt + 15, GFP_KERNEL);
- else if (!strncmp(this_opt, "viafb_dvi_port=", 15))
+ } else if (!strncmp(this_opt, "viafb_dvi_port=", 15)) {
viafb_dvi_port = kstrdup(this_opt + 15, GFP_KERNEL);
+ }
}
return 0;
}
@@ -2028,9 +2034,9 @@ int __init viafb_init(void)
return r;
#endif
if (parse_mode(viafb_mode, &dummy_x, &dummy_y)
- || !viafb_get_mode(dummy_x, dummy_y)
+ || !viafb_get_best_mode(dummy_x, dummy_y, viafb_refresh)
|| parse_mode(viafb_mode1, &dummy_x, &dummy_y)
- || !viafb_get_mode(dummy_x, dummy_y)
+ || !viafb_get_best_mode(dummy_x, dummy_y, viafb_refresh1)
|| viafb_bpp < 0 || viafb_bpp > 32
|| viafb_bpp1 < 0 || viafb_bpp1 > 32
|| parse_active_dev())
diff --git a/drivers/video/via/viamode.c b/drivers/video/via/viamode.c
index 58df74e1417..0911cac1b2f 100644
--- a/drivers/video/via/viamode.c
+++ b/drivers/video/via/viamode.c
@@ -281,7 +281,7 @@ static struct crt_mode_table CRTM640x480[] = {
/*r_rate,hsp,vsp */
/*HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
{REFRESH_60, M640X480_R60_HSP, M640X480_R60_VSP,
- {800, 640, 648, 144, 656, 96, 525, 480, 480, 45, 490, 2} },
+ {800, 640, 640, 160, 656, 96, 525, 480, 480, 45, 490, 2} },
{REFRESH_75, M640X480_R75_HSP, M640X480_R75_VSP,
{840, 640, 640, 200, 656, 64, 500, 480, 480, 20, 481, 3} },
{REFRESH_85, M640X480_R85_HSP, M640X480_R85_VSP,
@@ -863,26 +863,56 @@ int NUM_TOTAL_CLE266_ModeXregs = ARRAY_SIZE(CLE266_ModeXregs);
int NUM_TOTAL_PATCH_MODE = ARRAY_SIZE(res_patch_table);
-struct VideoModeTable *viafb_get_mode(int hres, int vres)
+static struct VideoModeTable *get_modes(struct VideoModeTable *vmt, int n,
+ int hres, int vres)
{
- u32 i;
- for (i = 0; i < ARRAY_SIZE(viafb_modes); i++)
- if (viafb_modes[i].mode_array &&
- viafb_modes[i].crtc[0].crtc.hor_addr == hres &&
- viafb_modes[i].crtc[0].crtc.ver_addr == vres)
+ int i;
+
+ for (i = 0; i < n; i++)
+ if (vmt[i].mode_array &&
+ vmt[i].crtc[0].crtc.hor_addr == hres &&
+ vmt[i].crtc[0].crtc.ver_addr == vres)
return &viafb_modes[i];
return NULL;
}
-struct VideoModeTable *viafb_get_rb_mode(int hres, int vres)
+static struct crt_mode_table *get_best_mode(struct VideoModeTable *vmt,
+ int refresh)
{
- u32 i;
- for (i = 0; i < ARRAY_SIZE(viafb_rb_modes); i++)
- if (viafb_rb_modes[i].mode_array &&
- viafb_rb_modes[i].crtc[0].crtc.hor_addr == hres &&
- viafb_rb_modes[i].crtc[0].crtc.ver_addr == vres)
- return &viafb_rb_modes[i];
+ struct crt_mode_table *best;
+ int i;
- return NULL;
+ if (!vmt)
+ return NULL;
+
+ best = &vmt->crtc[0];
+ for (i = 1; i < vmt->mode_array; i++) {
+ if (abs(vmt->crtc[i].refresh_rate - refresh)
+ < abs(best->refresh_rate - refresh))
+ best = &vmt->crtc[i];
+ }
+
+ return best;
+}
+
+static struct VideoModeTable *viafb_get_mode(int hres, int vres)
+{
+ return get_modes(viafb_modes, ARRAY_SIZE(viafb_modes), hres, vres);
+}
+
+struct crt_mode_table *viafb_get_best_mode(int hres, int vres, int refresh)
+{
+ return get_best_mode(viafb_get_mode(hres, vres), refresh);
+}
+
+static struct VideoModeTable *viafb_get_rb_mode(int hres, int vres)
+{
+ return get_modes(viafb_rb_modes, ARRAY_SIZE(viafb_rb_modes), hres,
+ vres);
+}
+
+struct crt_mode_table *viafb_get_best_rb_mode(int hres, int vres, int refresh)
+{
+ return get_best_mode(viafb_get_rb_mode(hres, vres), refresh);
}
diff --git a/drivers/video/via/viamode.h b/drivers/video/via/viamode.h
index 3751289eb45..5917a2b00e1 100644
--- a/drivers/video/via/viamode.h
+++ b/drivers/video/via/viamode.h
@@ -60,7 +60,7 @@ extern struct io_reg PM1024x768[];
extern struct patch_table res_patch_table[];
extern struct VPITTable VPIT;
-struct VideoModeTable *viafb_get_mode(int hres, int vres);
-struct VideoModeTable *viafb_get_rb_mode(int hres, int vres);
+struct crt_mode_table *viafb_get_best_mode(int hres, int vres, int refresh);
+struct crt_mode_table *viafb_get_best_rb_mode(int hres, int vres, int refresh);
#endif /* __VIAMODE_H__ */
diff --git a/drivers/video/vt8500lcdfb.c b/drivers/video/vt8500lcdfb.c
index 0e120d67eb6..777c21dd7a6 100644
--- a/drivers/video/vt8500lcdfb.c
+++ b/drivers/video/vt8500lcdfb.c
@@ -210,8 +210,8 @@ static int vt8500lcd_pan_display(struct fb_var_screeninfo *var,
struct vt8500lcd_info *fbi = to_vt8500lcd_info(info);
writel((1 << 31)
- | (((var->xres_virtual - var->xres) * pixlen / 4) << 20)
- | (off >> 2), fbi->regbase + 0x20);
+ | (((info->var.xres_virtual - info->var.xres) * pixlen / 4) << 20)
+ | (off >> 2), fbi->regbase + 0x20);
return 0;
}
@@ -355,7 +355,7 @@ static int __devinit vt8500lcd_probe(struct platform_device *pdev)
goto failed_free_palette;
}
- ret = request_irq(irq, vt8500lcd_handle_irq, IRQF_DISABLED, "LCD", fbi);
+ ret = request_irq(irq, vt8500lcd_handle_irq, 0, "LCD", fbi);
if (ret) {
dev_err(&pdev->dev, "request_irq failed: %d\n", ret);
ret = -EBUSY;
diff --git a/drivers/video/vt8623fb.c b/drivers/video/vt8623fb.c
index f9b3e3dc242..4e74d262cf3 100644
--- a/drivers/video/vt8623fb.c
+++ b/drivers/video/vt8623fb.c
@@ -620,13 +620,14 @@ static int vt8623fb_pan_display(struct fb_var_screeninfo *var, struct fb_info *i
unsigned int offset;
/* Calculate the offset */
- if (var->bits_per_pixel == 0) {
- offset = (var->yoffset / 16) * var->xres_virtual + var->xoffset;
+ if (info->var.bits_per_pixel == 0) {
+ offset = (var->yoffset / 16) * info->var.xres_virtual
+ + var->xoffset;
offset = offset >> 3;
} else {
offset = (var->yoffset * info->fix.line_length) +
- (var->xoffset * var->bits_per_pixel / 8);
- offset = offset >> ((var->bits_per_pixel == 4) ? 2 : 1);
+ (var->xoffset * info->var.bits_per_pixel / 8);
+ offset = offset >> ((info->var.bits_per_pixel == 4) ? 2 : 1);
}
/* Set the offset */
diff --git a/drivers/video/w100fb.c b/drivers/video/w100fb.c
index c8be8af0cc6..2375e5bbf57 100644
--- a/drivers/video/w100fb.c
+++ b/drivers/video/w100fb.c
@@ -33,6 +33,7 @@
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/vmalloc.h>
+#include <linux/module.h>
#include <asm/io.h>
#include <asm/uaccess.h>
#include <video/w100fb.h>
diff --git a/drivers/video/xilinxfb.c b/drivers/video/xilinxfb.c
index 77dea015ff6..fcb6cd90f64 100644
--- a/drivers/video/xilinxfb.c
+++ b/drivers/video/xilinxfb.c
@@ -23,7 +23,6 @@
#include <linux/device.h>
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/version.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/mm.h>
diff --git a/drivers/virt/fsl_hypervisor.c b/drivers/virt/fsl_hypervisor.c
index 3d9162151fd..4939e0ccc4e 100644
--- a/drivers/virt/fsl_hypervisor.c
+++ b/drivers/virt/fsl_hypervisor.c
@@ -706,6 +706,7 @@ static const struct file_operations fsl_hv_fops = {
.poll = fsl_hv_poll,
.read = fsl_hv_read,
.unlocked_ioctl = fsl_hv_ioctl,
+ .compat_ioctl = fsl_hv_ioctl,
};
static struct miscdevice fsl_hv_misc_dev = {
diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig
index 57e493b1bd2..816ed08e7cf 100644
--- a/drivers/virtio/Kconfig
+++ b/drivers/virtio/Kconfig
@@ -35,4 +35,15 @@ config VIRTIO_BALLOON
If unsure, say M.
+ config VIRTIO_MMIO
+ tristate "Platform bus driver for memory mapped virtio devices (EXPERIMENTAL)"
+ depends on EXPERIMENTAL
+ select VIRTIO
+ select VIRTIO_RING
+ ---help---
+ This drivers provides support for memory mapped virtio
+ platform device driver.
+
+ If unsure, say N.
+
endmenu
diff --git a/drivers/virtio/Makefile b/drivers/virtio/Makefile
index 6738c446c19..5a4c63cfd38 100644
--- a/drivers/virtio/Makefile
+++ b/drivers/virtio/Makefile
@@ -1,4 +1,5 @@
obj-$(CONFIG_VIRTIO) += virtio.o
obj-$(CONFIG_VIRTIO_RING) += virtio_ring.o
+obj-$(CONFIG_VIRTIO_MMIO) += virtio_mmio.o
obj-$(CONFIG_VIRTIO_PCI) += virtio_pci.o
obj-$(CONFIG_VIRTIO_BALLOON) += virtio_balloon.o
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index efb35aa8309..984c501c258 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -1,6 +1,7 @@
#include <linux/virtio.h>
#include <linux/spinlock.h>
#include <linux/virtio_config.h>
+#include <linux/module.h>
/* Unique numbering for virtio devices. */
static unsigned int dev_index;
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index e058ace2a4a..94fd738a774 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -25,6 +25,7 @@
#include <linux/freezer.h>
#include <linux/delay.h>
#include <linux/slab.h>
+#include <linux/module.h>
struct virtio_balloon
{
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
new file mode 100644
index 00000000000..acc5e43c373
--- /dev/null
+++ b/drivers/virtio/virtio_mmio.c
@@ -0,0 +1,479 @@
+/*
+ * Virtio memory mapped device driver
+ *
+ * Copyright 2011, ARM Ltd.
+ *
+ * This module allows virtio devices to be used over a virtual, memory mapped
+ * platform device.
+ *
+ * Registers layout (all 32-bit wide):
+ *
+ * offset d. name description
+ * ------ -- ---------------- -----------------
+ *
+ * 0x000 R MagicValue Magic value "virt"
+ * 0x004 R Version Device version (current max. 1)
+ * 0x008 R DeviceID Virtio device ID
+ * 0x00c R VendorID Virtio vendor ID
+ *
+ * 0x010 R HostFeatures Features supported by the host
+ * 0x014 W HostFeaturesSel Set of host features to access via HostFeatures
+ *
+ * 0x020 W GuestFeatures Features activated by the guest
+ * 0x024 W GuestFeaturesSel Set of activated features to set via GuestFeatures
+ * 0x028 W GuestPageSize Size of guest's memory page in bytes
+ *
+ * 0x030 W QueueSel Queue selector
+ * 0x034 R QueueNumMax Maximum size of the currently selected queue
+ * 0x038 W QueueNum Queue size for the currently selected queue
+ * 0x03c W QueueAlign Used Ring alignment for the current queue
+ * 0x040 RW QueuePFN PFN for the currently selected queue
+ *
+ * 0x050 W QueueNotify Queue notifier
+ * 0x060 R InterruptStatus Interrupt status register
+ * 0x060 W InterruptACK Interrupt acknowledge register
+ * 0x070 RW Status Device status register
+ *
+ * 0x100+ RW Device-specific configuration space
+ *
+ * Based on Virtio PCI driver by Anthony Liguori, copyright IBM Corp. 2007
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include <linux/highmem.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/virtio.h>
+#include <linux/virtio_config.h>
+#include <linux/virtio_mmio.h>
+#include <linux/virtio_ring.h>
+
+
+
+/* The alignment to use between consumer and producer parts of vring.
+ * Currently hardcoded to the page size. */
+#define VIRTIO_MMIO_VRING_ALIGN PAGE_SIZE
+
+
+
+#define to_virtio_mmio_device(_plat_dev) \
+ container_of(_plat_dev, struct virtio_mmio_device, vdev)
+
+struct virtio_mmio_device {
+ struct virtio_device vdev;
+ struct platform_device *pdev;
+
+ void __iomem *base;
+ unsigned long version;
+
+ /* a list of queues so we can dispatch IRQs */
+ spinlock_t lock;
+ struct list_head virtqueues;
+};
+
+struct virtio_mmio_vq_info {
+ /* the actual virtqueue */
+ struct virtqueue *vq;
+
+ /* the number of entries in the queue */
+ unsigned int num;
+
+ /* the index of the queue */
+ int queue_index;
+
+ /* the virtual address of the ring queue */
+ void *queue;
+
+ /* the list node for the virtqueues list */
+ struct list_head node;
+};
+
+
+
+/* Configuration interface */
+
+static u32 vm_get_features(struct virtio_device *vdev)
+{
+ struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
+
+ /* TODO: Features > 32 bits */
+ writel(0, vm_dev->base + VIRTIO_MMIO_HOST_FEATURES_SEL);
+
+ return readl(vm_dev->base + VIRTIO_MMIO_HOST_FEATURES);
+}
+
+static void vm_finalize_features(struct virtio_device *vdev)
+{
+ struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
+ int i;
+
+ /* Give virtio_ring a chance to accept features. */
+ vring_transport_features(vdev);
+
+ for (i = 0; i < ARRAY_SIZE(vdev->features); i++) {
+ writel(i, vm_dev->base + VIRTIO_MMIO_GUEST_FEATURES_SET);
+ writel(vdev->features[i],
+ vm_dev->base + VIRTIO_MMIO_GUEST_FEATURES);
+ }
+}
+
+static void vm_get(struct virtio_device *vdev, unsigned offset,
+ void *buf, unsigned len)
+{
+ struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
+ u8 *ptr = buf;
+ int i;
+
+ for (i = 0; i < len; i++)
+ ptr[i] = readb(vm_dev->base + VIRTIO_MMIO_CONFIG + offset + i);
+}
+
+static void vm_set(struct virtio_device *vdev, unsigned offset,
+ const void *buf, unsigned len)
+{
+ struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
+ const u8 *ptr = buf;
+ int i;
+
+ for (i = 0; i < len; i++)
+ writeb(ptr[i], vm_dev->base + VIRTIO_MMIO_CONFIG + offset + i);
+}
+
+static u8 vm_get_status(struct virtio_device *vdev)
+{
+ struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
+
+ return readl(vm_dev->base + VIRTIO_MMIO_STATUS) & 0xff;
+}
+
+static void vm_set_status(struct virtio_device *vdev, u8 status)
+{
+ struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
+
+ /* We should never be setting status to 0. */
+ BUG_ON(status == 0);
+
+ writel(status, vm_dev->base + VIRTIO_MMIO_STATUS);
+}
+
+static void vm_reset(struct virtio_device *vdev)
+{
+ struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
+
+ /* 0 status means a reset. */
+ writel(0, vm_dev->base + VIRTIO_MMIO_STATUS);
+}
+
+
+
+/* Transport interface */
+
+/* the notify function used when creating a virt queue */
+static void vm_notify(struct virtqueue *vq)
+{
+ struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev);
+ struct virtio_mmio_vq_info *info = vq->priv;
+
+ /* We write the queue's selector into the notification register to
+ * signal the other end */
+ writel(info->queue_index, vm_dev->base + VIRTIO_MMIO_QUEUE_NOTIFY);
+}
+
+/* Notify all virtqueues on an interrupt. */
+static irqreturn_t vm_interrupt(int irq, void *opaque)
+{
+ struct virtio_mmio_device *vm_dev = opaque;
+ struct virtio_mmio_vq_info *info;
+ struct virtio_driver *vdrv = container_of(vm_dev->vdev.dev.driver,
+ struct virtio_driver, driver);
+ unsigned long status;
+ unsigned long flags;
+ irqreturn_t ret = IRQ_NONE;
+
+ /* Read and acknowledge interrupts */
+ status = readl(vm_dev->base + VIRTIO_MMIO_INTERRUPT_STATUS);
+ writel(status, vm_dev->base + VIRTIO_MMIO_INTERRUPT_ACK);
+
+ if (unlikely(status & VIRTIO_MMIO_INT_CONFIG)
+ && vdrv && vdrv->config_changed) {
+ vdrv->config_changed(&vm_dev->vdev);
+ ret = IRQ_HANDLED;
+ }
+
+ if (likely(status & VIRTIO_MMIO_INT_VRING)) {
+ spin_lock_irqsave(&vm_dev->lock, flags);
+ list_for_each_entry(info, &vm_dev->virtqueues, node)
+ ret |= vring_interrupt(irq, info->vq);
+ spin_unlock_irqrestore(&vm_dev->lock, flags);
+ }
+
+ return ret;
+}
+
+
+
+static void vm_del_vq(struct virtqueue *vq)
+{
+ struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev);
+ struct virtio_mmio_vq_info *info = vq->priv;
+ unsigned long flags, size;
+
+ spin_lock_irqsave(&vm_dev->lock, flags);
+ list_del(&info->node);
+ spin_unlock_irqrestore(&vm_dev->lock, flags);
+
+ vring_del_virtqueue(vq);
+
+ /* Select and deactivate the queue */
+ writel(info->queue_index, vm_dev->base + VIRTIO_MMIO_QUEUE_SEL);
+ writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_PFN);
+
+ size = PAGE_ALIGN(vring_size(info->num, VIRTIO_MMIO_VRING_ALIGN));
+ free_pages_exact(info->queue, size);
+ kfree(info);
+}
+
+static void vm_del_vqs(struct virtio_device *vdev)
+{
+ struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
+ struct virtqueue *vq, *n;
+
+ list_for_each_entry_safe(vq, n, &vdev->vqs, list)
+ vm_del_vq(vq);
+
+ free_irq(platform_get_irq(vm_dev->pdev, 0), vm_dev);
+}
+
+
+
+static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned index,
+ void (*callback)(struct virtqueue *vq),
+ const char *name)
+{
+ struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
+ struct virtio_mmio_vq_info *info;
+ struct virtqueue *vq;
+ unsigned long flags, size;
+ int err;
+
+ /* Select the queue we're interested in */
+ writel(index, vm_dev->base + VIRTIO_MMIO_QUEUE_SEL);
+
+ /* Queue shouldn't already be set up. */
+ if (readl(vm_dev->base + VIRTIO_MMIO_QUEUE_PFN)) {
+ err = -ENOENT;
+ goto error_available;
+ }
+
+ /* Allocate and fill out our active queue description */
+ info = kmalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ err = -ENOMEM;
+ goto error_kmalloc;
+ }
+ info->queue_index = index;
+
+ /* Allocate pages for the queue - start with a queue as big as
+ * possible (limited by maximum size allowed by device), drop down
+ * to a minimal size, just big enough to fit descriptor table
+ * and two rings (which makes it "alignment_size * 2")
+ */
+ info->num = readl(vm_dev->base + VIRTIO_MMIO_QUEUE_NUM_MAX);
+ while (1) {
+ size = PAGE_ALIGN(vring_size(info->num,
+ VIRTIO_MMIO_VRING_ALIGN));
+ /* Already smallest possible allocation? */
+ if (size <= VIRTIO_MMIO_VRING_ALIGN * 2) {
+ err = -ENOMEM;
+ goto error_alloc_pages;
+ }
+
+ info->queue = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO);
+ if (info->queue)
+ break;
+
+ info->num /= 2;
+ }
+
+ /* Activate the queue */
+ writel(info->num, vm_dev->base + VIRTIO_MMIO_QUEUE_NUM);
+ writel(VIRTIO_MMIO_VRING_ALIGN,
+ vm_dev->base + VIRTIO_MMIO_QUEUE_ALIGN);
+ writel(virt_to_phys(info->queue) >> PAGE_SHIFT,
+ vm_dev->base + VIRTIO_MMIO_QUEUE_PFN);
+
+ /* Create the vring */
+ vq = vring_new_virtqueue(info->num, VIRTIO_MMIO_VRING_ALIGN,
+ vdev, info->queue, vm_notify, callback, name);
+ if (!vq) {
+ err = -ENOMEM;
+ goto error_new_virtqueue;
+ }
+
+ vq->priv = info;
+ info->vq = vq;
+
+ spin_lock_irqsave(&vm_dev->lock, flags);
+ list_add(&info->node, &vm_dev->virtqueues);
+ spin_unlock_irqrestore(&vm_dev->lock, flags);
+
+ return vq;
+
+error_new_virtqueue:
+ writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_PFN);
+ free_pages_exact(info->queue, size);
+error_alloc_pages:
+ kfree(info);
+error_kmalloc:
+error_available:
+ return ERR_PTR(err);
+}
+
+static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs,
+ struct virtqueue *vqs[],
+ vq_callback_t *callbacks[],
+ const char *names[])
+{
+ struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
+ unsigned int irq = platform_get_irq(vm_dev->pdev, 0);
+ int i, err;
+
+ err = request_irq(irq, vm_interrupt, IRQF_SHARED,
+ dev_name(&vdev->dev), vm_dev);
+ if (err)
+ return err;
+
+ for (i = 0; i < nvqs; ++i) {
+ vqs[i] = vm_setup_vq(vdev, i, callbacks[i], names[i]);
+ if (IS_ERR(vqs[i])) {
+ vm_del_vqs(vdev);
+ return PTR_ERR(vqs[i]);
+ }
+ }
+
+ return 0;
+}
+
+
+
+static struct virtio_config_ops virtio_mmio_config_ops = {
+ .get = vm_get,
+ .set = vm_set,
+ .get_status = vm_get_status,
+ .set_status = vm_set_status,
+ .reset = vm_reset,
+ .find_vqs = vm_find_vqs,
+ .del_vqs = vm_del_vqs,
+ .get_features = vm_get_features,
+ .finalize_features = vm_finalize_features,
+};
+
+
+
+/* Platform device */
+
+static int __devinit virtio_mmio_probe(struct platform_device *pdev)
+{
+ struct virtio_mmio_device *vm_dev;
+ struct resource *mem;
+ unsigned long magic;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem)
+ return -EINVAL;
+
+ if (!devm_request_mem_region(&pdev->dev, mem->start,
+ resource_size(mem), pdev->name))
+ return -EBUSY;
+
+ vm_dev = devm_kzalloc(&pdev->dev, sizeof(*vm_dev), GFP_KERNEL);
+ if (!vm_dev)
+ return -ENOMEM;
+
+ vm_dev->vdev.dev.parent = &pdev->dev;
+ vm_dev->vdev.config = &virtio_mmio_config_ops;
+ vm_dev->pdev = pdev;
+ INIT_LIST_HEAD(&vm_dev->virtqueues);
+ spin_lock_init(&vm_dev->lock);
+
+ vm_dev->base = devm_ioremap(&pdev->dev, mem->start, resource_size(mem));
+ if (vm_dev->base == NULL)
+ return -EFAULT;
+
+ /* Check magic value */
+ magic = readl(vm_dev->base + VIRTIO_MMIO_MAGIC_VALUE);
+ if (memcmp(&magic, "virt", 4) != 0) {
+ dev_warn(&pdev->dev, "Wrong magic value 0x%08lx!\n", magic);
+ return -ENODEV;
+ }
+
+ /* Check device version */
+ vm_dev->version = readl(vm_dev->base + VIRTIO_MMIO_VERSION);
+ if (vm_dev->version != 1) {
+ dev_err(&pdev->dev, "Version %ld not supported!\n",
+ vm_dev->version);
+ return -ENXIO;
+ }
+
+ vm_dev->vdev.id.device = readl(vm_dev->base + VIRTIO_MMIO_DEVICE_ID);
+ vm_dev->vdev.id.vendor = readl(vm_dev->base + VIRTIO_MMIO_VENDOR_ID);
+
+ writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_GUEST_PAGE_SIZE);
+
+ platform_set_drvdata(pdev, vm_dev);
+
+ return register_virtio_device(&vm_dev->vdev);
+}
+
+static int __devexit virtio_mmio_remove(struct platform_device *pdev)
+{
+ struct virtio_mmio_device *vm_dev = platform_get_drvdata(pdev);
+
+ unregister_virtio_device(&vm_dev->vdev);
+
+ return 0;
+}
+
+
+
+/* Platform driver */
+
+static struct of_device_id virtio_mmio_match[] = {
+ { .compatible = "virtio,mmio", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, virtio_mmio_match);
+
+static struct platform_driver virtio_mmio_driver = {
+ .probe = virtio_mmio_probe,
+ .remove = __devexit_p(virtio_mmio_remove),
+ .driver = {
+ .name = "virtio-mmio",
+ .owner = THIS_MODULE,
+ .of_match_table = virtio_mmio_match,
+ },
+};
+
+static int __init virtio_mmio_init(void)
+{
+ return platform_driver_register(&virtio_mmio_driver);
+}
+
+static void __exit virtio_mmio_exit(void)
+{
+ platform_driver_unregister(&virtio_mmio_driver);
+}
+
+module_init(virtio_mmio_init);
+module_exit(virtio_mmio_exit);
+
+MODULE_AUTHOR("Pawel Moll <pawel.moll@arm.com>");
+MODULE_DESCRIPTION("Platform bus driver for memory mapped virtio devices");
+MODULE_LICENSE("GPL");
diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c
index 4bcc8b82640..3d1bf41e889 100644
--- a/drivers/virtio/virtio_pci.c
+++ b/drivers/virtio/virtio_pci.c
@@ -415,9 +415,13 @@ static struct virtqueue *setup_vq(struct virtio_device *vdev, unsigned index,
}
}
- spin_lock_irqsave(&vp_dev->lock, flags);
- list_add(&info->node, &vp_dev->virtqueues);
- spin_unlock_irqrestore(&vp_dev->lock, flags);
+ if (callback) {
+ spin_lock_irqsave(&vp_dev->lock, flags);
+ list_add(&info->node, &vp_dev->virtqueues);
+ spin_unlock_irqrestore(&vp_dev->lock, flags);
+ } else {
+ INIT_LIST_HEAD(&info->node);
+ }
return vq;
@@ -590,11 +594,11 @@ static struct virtio_config_ops virtio_pci_config_ops = {
static void virtio_pci_release_dev(struct device *_d)
{
- struct virtio_device *dev = container_of(_d, struct virtio_device,
- dev);
- struct virtio_pci_device *vp_dev = to_vp_device(dev);
-
- kfree(vp_dev);
+ /*
+ * No need for a release method as we allocate/free
+ * all devices together with the pci devices.
+ * Provide an empty one to avoid getting a warning from core.
+ */
}
/* the PCI probing function */
@@ -682,6 +686,7 @@ static void __devexit virtio_pci_remove(struct pci_dev *pci_dev)
pci_iounmap(pci_dev, vp_dev->ioaddr);
pci_release_regions(pci_dev);
pci_disable_device(pci_dev);
+ kfree(vp_dev);
}
#ifdef CONFIG_PM
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 4acf88884f9..c7a2c208f6e 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -21,6 +21,7 @@
#include <linux/virtio_config.h>
#include <linux/device.h>
#include <linux/slab.h>
+#include <linux/module.h>
/* virtio guest is communicating with a virtual "device" that actually runs on
* a host processor. Memory barriers are used to control SMP effects. */
diff --git a/drivers/w1/slaves/w1_ds2760.c b/drivers/w1/slaves/w1_ds2760.c
index 483d4518091..5754c9a4f58 100644
--- a/drivers/w1/slaves/w1_ds2760.c
+++ b/drivers/w1/slaves/w1_ds2760.c
@@ -114,43 +114,7 @@ static struct bin_attribute w1_ds2760_bin_attr = {
.read = w1_ds2760_read_bin,
};
-static DEFINE_IDR(bat_idr);
-static DEFINE_MUTEX(bat_idr_lock);
-
-static int new_bat_id(void)
-{
- int ret;
-
- while (1) {
- int id;
-
- ret = idr_pre_get(&bat_idr, GFP_KERNEL);
- if (ret == 0)
- return -ENOMEM;
-
- mutex_lock(&bat_idr_lock);
- ret = idr_get_new(&bat_idr, NULL, &id);
- mutex_unlock(&bat_idr_lock);
-
- if (ret == 0) {
- ret = id & MAX_ID_MASK;
- break;
- } else if (ret == -EAGAIN) {
- continue;
- } else {
- break;
- }
- }
-
- return ret;
-}
-
-static void release_bat_id(int id)
-{
- mutex_lock(&bat_idr_lock);
- idr_remove(&bat_idr, id);
- mutex_unlock(&bat_idr_lock);
-}
+static DEFINE_IDA(bat_ida);
static int w1_ds2760_add_slave(struct w1_slave *sl)
{
@@ -158,7 +122,7 @@ static int w1_ds2760_add_slave(struct w1_slave *sl)
int id;
struct platform_device *pdev;
- id = new_bat_id();
+ id = ida_simple_get(&bat_ida, 0, 0, GFP_KERNEL);
if (id < 0) {
ret = id;
goto noid;
@@ -187,7 +151,7 @@ bin_attr_failed:
pdev_add_failed:
platform_device_unregister(pdev);
pdev_alloc_failed:
- release_bat_id(id);
+ ida_simple_remove(&bat_ida, id);
noid:
success:
return ret;
@@ -199,7 +163,7 @@ static void w1_ds2760_remove_slave(struct w1_slave *sl)
int id = pdev->id;
platform_device_unregister(pdev);
- release_bat_id(id);
+ ida_simple_remove(&bat_ida, id);
sysfs_remove_bin_file(&sl->dev.kobj, &w1_ds2760_bin_attr);
}
@@ -217,14 +181,14 @@ static int __init w1_ds2760_init(void)
{
printk(KERN_INFO "1-Wire driver for the DS2760 battery monitor "
" chip - (c) 2004-2005, Szabolcs Gyurko\n");
- idr_init(&bat_idr);
+ ida_init(&bat_ida);
return w1_register_family(&w1_ds2760_family);
}
static void __exit w1_ds2760_exit(void)
{
w1_unregister_family(&w1_ds2760_family);
- idr_destroy(&bat_idr);
+ ida_destroy(&bat_ida);
}
EXPORT_SYMBOL(w1_ds2760_read);
diff --git a/drivers/w1/slaves/w1_ds2780.c b/drivers/w1/slaves/w1_ds2780.c
index 274c8f38303..39f78c0b143 100644
--- a/drivers/w1/slaves/w1_ds2780.c
+++ b/drivers/w1/slaves/w1_ds2780.c
@@ -26,20 +26,14 @@
#include "../w1_family.h"
#include "w1_ds2780.h"
-int w1_ds2780_io(struct device *dev, char *buf, int addr, size_t count,
- int io)
+static int w1_ds2780_do_io(struct device *dev, char *buf, int addr,
+ size_t count, int io)
{
struct w1_slave *sl = container_of(dev, struct w1_slave, dev);
- if (!dev)
- return -ENODEV;
+ if (addr > DS2780_DATA_SIZE || addr < 0)
+ return 0;
- mutex_lock(&sl->master->mutex);
-
- if (addr > DS2780_DATA_SIZE || addr < 0) {
- count = 0;
- goto out;
- }
count = min_t(int, count, DS2780_DATA_SIZE - addr);
if (w1_reset_select_slave(sl) == 0) {
@@ -47,7 +41,6 @@ int w1_ds2780_io(struct device *dev, char *buf, int addr, size_t count,
w1_write_8(sl->master, W1_DS2780_WRITE_DATA);
w1_write_8(sl->master, addr);
w1_write_block(sl->master, buf, count);
- /* XXX w1_write_block returns void, not n_written */
} else {
w1_write_8(sl->master, W1_DS2780_READ_DATA);
w1_write_8(sl->master, addr);
@@ -55,13 +48,42 @@ int w1_ds2780_io(struct device *dev, char *buf, int addr, size_t count,
}
}
-out:
+ return count;
+}
+
+int w1_ds2780_io(struct device *dev, char *buf, int addr, size_t count,
+ int io)
+{
+ struct w1_slave *sl = container_of(dev, struct w1_slave, dev);
+ int ret;
+
+ if (!dev)
+ return -ENODEV;
+
+ mutex_lock(&sl->master->mutex);
+
+ ret = w1_ds2780_do_io(dev, buf, addr, count, io);
+
mutex_unlock(&sl->master->mutex);
- return count;
+ return ret;
}
EXPORT_SYMBOL(w1_ds2780_io);
+int w1_ds2780_io_nolock(struct device *dev, char *buf, int addr, size_t count,
+ int io)
+{
+ int ret;
+
+ if (!dev)
+ return -ENODEV;
+
+ ret = w1_ds2780_do_io(dev, buf, addr, count, io);
+
+ return ret;
+}
+EXPORT_SYMBOL(w1_ds2780_io_nolock);
+
int w1_ds2780_eeprom_cmd(struct device *dev, int addr, int cmd)
{
struct w1_slave *sl = container_of(dev, struct w1_slave, dev);
@@ -99,43 +121,7 @@ static struct bin_attribute w1_ds2780_bin_attr = {
.read = w1_ds2780_read_bin,
};
-static DEFINE_IDR(bat_idr);
-static DEFINE_MUTEX(bat_idr_lock);
-
-static int new_bat_id(void)
-{
- int ret;
-
- while (1) {
- int id;
-
- ret = idr_pre_get(&bat_idr, GFP_KERNEL);
- if (ret == 0)
- return -ENOMEM;
-
- mutex_lock(&bat_idr_lock);
- ret = idr_get_new(&bat_idr, NULL, &id);
- mutex_unlock(&bat_idr_lock);
-
- if (ret == 0) {
- ret = id & MAX_ID_MASK;
- break;
- } else if (ret == -EAGAIN) {
- continue;
- } else {
- break;
- }
- }
-
- return ret;
-}
-
-static void release_bat_id(int id)
-{
- mutex_lock(&bat_idr_lock);
- idr_remove(&bat_idr, id);
- mutex_unlock(&bat_idr_lock);
-}
+static DEFINE_IDA(bat_ida);
static int w1_ds2780_add_slave(struct w1_slave *sl)
{
@@ -143,7 +129,7 @@ static int w1_ds2780_add_slave(struct w1_slave *sl)
int id;
struct platform_device *pdev;
- id = new_bat_id();
+ id = ida_simple_get(&bat_ida, 0, 0, GFP_KERNEL);
if (id < 0) {
ret = id;
goto noid;
@@ -172,7 +158,7 @@ bin_attr_failed:
pdev_add_failed:
platform_device_unregister(pdev);
pdev_alloc_failed:
- release_bat_id(id);
+ ida_simple_remove(&bat_ida, id);
noid:
return ret;
}
@@ -183,7 +169,7 @@ static void w1_ds2780_remove_slave(struct w1_slave *sl)
int id = pdev->id;
platform_device_unregister(pdev);
- release_bat_id(id);
+ ida_simple_remove(&bat_ida, id);
sysfs_remove_bin_file(&sl->dev.kobj, &w1_ds2780_bin_attr);
}
@@ -199,14 +185,14 @@ static struct w1_family w1_ds2780_family = {
static int __init w1_ds2780_init(void)
{
- idr_init(&bat_idr);
+ ida_init(&bat_ida);
return w1_register_family(&w1_ds2780_family);
}
static void __exit w1_ds2780_exit(void)
{
w1_unregister_family(&w1_ds2780_family);
- idr_destroy(&bat_idr);
+ ida_destroy(&bat_ida);
}
module_init(w1_ds2780_init);
diff --git a/drivers/w1/slaves/w1_ds2780.h b/drivers/w1/slaves/w1_ds2780.h
index a1fba79eb1b..73737936502 100644
--- a/drivers/w1/slaves/w1_ds2780.h
+++ b/drivers/w1/slaves/w1_ds2780.h
@@ -124,6 +124,8 @@
extern int w1_ds2780_io(struct device *dev, char *buf, int addr, size_t count,
int io);
+extern int w1_ds2780_io_nolock(struct device *dev, char *buf, int addr,
+ size_t count, int io);
extern int w1_ds2780_eeprom_cmd(struct device *dev, int addr, int cmd);
#endif /* !_W1_DS2780_H */
diff --git a/drivers/w1/w1_family.c b/drivers/w1/w1_family.c
index 63359797c8b..e9309778ee7 100644
--- a/drivers/w1/w1_family.c
+++ b/drivers/w1/w1_family.c
@@ -23,6 +23,7 @@
#include <linux/list.h>
#include <linux/sched.h> /* schedule_timeout() */
#include <linux/delay.h>
+#include <linux/export.h>
#include "w1_family.h"
#include "w1.h"
diff --git a/drivers/w1/w1_int.c b/drivers/w1/w1_int.c
index d220bce2cee..68288355727 100644
--- a/drivers/w1/w1_int.c
+++ b/drivers/w1/w1_int.c
@@ -24,6 +24,8 @@
#include <linux/delay.h>
#include <linux/kthread.h>
#include <linux/slab.h>
+#include <linux/export.h>
+#include <linux/moduleparam.h>
#include "w1.h"
#include "w1_log.h"
@@ -78,6 +80,7 @@ static struct w1_master * w1_alloc_dev(u32 id, int slave_count, int slave_ttl,
memcpy(&dev->dev, device, sizeof(struct device));
dev_set_name(&dev->dev, "w1_bus_master%u", dev->id);
snprintf(dev->name, sizeof(dev->name), "w1_bus_master%u", dev->id);
+ dev->dev.init_name = dev->name;
dev->driver = driver;
diff --git a/drivers/w1/w1_io.c b/drivers/w1/w1_io.c
index 765b37b62a4..3135b2c6399 100644
--- a/drivers/w1/w1_io.c
+++ b/drivers/w1/w1_io.c
@@ -158,13 +158,18 @@ EXPORT_SYMBOL_GPL(w1_write_8);
static u8 w1_read_bit(struct w1_master *dev)
{
int result;
+ unsigned long flags;
+ /* sample timing is critical here */
+ local_irq_save(flags);
dev->bus_master->write_bit(dev->bus_master->data, 0);
w1_delay(6);
dev->bus_master->write_bit(dev->bus_master->data, 1);
w1_delay(9);
result = dev->bus_master->read_bit(dev->bus_master->data);
+ local_irq_restore(flags);
+
w1_delay(55);
return result & 0x1;
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 86b0735e6aa..6285867a935 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -66,6 +66,7 @@ config SOFT_WATCHDOG
config WM831X_WATCHDOG
tristate "WM831x watchdog"
depends on MFD_WM831X
+ select WATCHDOG_CORE
help
Support for the watchdog in the WM831x AudioPlus PMICs. When
the watchdog triggers the system will be reset.
@@ -170,6 +171,7 @@ config HAVE_S3C2410_WATCHDOG
config S3C2410_WATCHDOG
tristate "S3C2410 Watchdog"
depends on ARCH_S3C2410 || HAVE_S3C2410_WATCHDOG
+ select WATCHDOG_CORE
help
Watchdog timer block in the Samsung SoCs. This will reboot
the system when the timer expires with the watchdog enabled.
@@ -726,7 +728,7 @@ config SBC8360_WDT
config SBC7240_WDT
tristate "SBC Nano 7240 Watchdog Timer"
- depends on X86_32
+ depends on X86_32 && !UML
---help---
This is the driver for the hardware watchdog found on the IEI
single board computers EPIC Nano 7240 (and likely others). This
@@ -1174,6 +1176,10 @@ config XEN_WDT
by Xen 4.0 and newer. The watchdog timeout period is normally one
minute but can be changed with a boot-time parameter.
+config UML_WATCHDOG
+ tristate "UML watchdog"
+ depends on UML
+
#
# ISA-based Watchdog Cards
#
diff --git a/drivers/watchdog/coh901327_wdt.c b/drivers/watchdog/coh901327_wdt.c
index 9291506b8b2..03f449a430d 100644
--- a/drivers/watchdog/coh901327_wdt.c
+++ b/drivers/watchdog/coh901327_wdt.c
@@ -429,7 +429,7 @@ static int __init coh901327_probe(struct platform_device *pdev)
writew(U300_WDOG_SR_RESET_STATUS_RESET, virtbase + U300_WDOG_SR);
irq = platform_get_irq(pdev, 0);
- if (request_irq(irq, coh901327_interrupt, IRQF_DISABLED,
+ if (request_irq(irq, coh901327_interrupt, 0,
DRV_NAME " Bark", pdev)) {
ret = -EIO;
goto out_no_irq;
diff --git a/drivers/watchdog/eurotechwdt.c b/drivers/watchdog/eurotechwdt.c
index f1d1da662fb..41018d429ab 100644
--- a/drivers/watchdog/eurotechwdt.c
+++ b/drivers/watchdog/eurotechwdt.c
@@ -427,7 +427,7 @@ static int __init eurwdt_init(void)
{
int ret;
- ret = request_irq(irq, eurwdt_interrupt, IRQF_DISABLED, "eurwdt", NULL);
+ ret = request_irq(irq, eurwdt_interrupt, 0, "eurwdt", NULL);
if (ret) {
printk(KERN_ERR "eurwdt: IRQ %d is not free.\n", irq);
goto out;
diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
index 751a591684d..ba6ad662635 100644
--- a/drivers/watchdog/iTCO_wdt.c
+++ b/drivers/watchdog/iTCO_wdt.c
@@ -1,7 +1,7 @@
/*
* intel TCO Watchdog Driver
*
- * (c) Copyright 2006-2010 Wim Van Sebroeck <wim@iguana.be>.
+ * (c) Copyright 2006-2011 Wim Van Sebroeck <wim@iguana.be>.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -44,7 +44,7 @@
/* Module and version information */
#define DRV_NAME "iTCO_wdt"
-#define DRV_VERSION "1.06"
+#define DRV_VERSION "1.07"
#define PFX DRV_NAME ": "
/* Includes */
@@ -384,6 +384,11 @@ MODULE_PARM_DESC(nowayout,
"Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+static int turn_SMI_watchdog_clear_off = 0;
+module_param(turn_SMI_watchdog_clear_off, int, 0);
+MODULE_PARM_DESC(turn_SMI_watchdog_clear_off,
+ "Turn off SMI clearing watchdog (default=0)");
+
/*
* Some TCO specific functions
*/
@@ -808,10 +813,12 @@ static int __devinit iTCO_wdt_init(struct pci_dev *pdev,
ret = -EIO;
goto out_unmap;
}
- /* Bit 13: TCO_EN -> 0 = Disables TCO logic generating an SMI# */
- val32 = inl(SMI_EN);
- val32 &= 0xffffdfff; /* Turn off SMI clearing watchdog */
- outl(val32, SMI_EN);
+ if (turn_SMI_watchdog_clear_off) {
+ /* Bit 13: TCO_EN -> 0 = Disables TCO logic generating an SMI# */
+ val32 = inl(SMI_EN);
+ val32 &= 0xffffdfff; /* Turn off SMI clearing watchdog */
+ outl(val32, SMI_EN);
+ }
/* The TCO I/O registers reside in a 32-byte range pointed to
by the TCOBASE value */
diff --git a/drivers/watchdog/mpcore_wdt.c b/drivers/watchdog/mpcore_wdt.c
index 4dc31024d26..82ccd36e2c9 100644
--- a/drivers/watchdog/mpcore_wdt.c
+++ b/drivers/watchdog/mpcore_wdt.c
@@ -367,8 +367,7 @@ static int __devinit mpcore_wdt_probe(struct platform_device *dev)
goto err_misc;
}
- ret = request_irq(wdt->irq, mpcore_wdt_fire, IRQF_DISABLED,
- "mpcore_wdt", wdt);
+ ret = request_irq(wdt->irq, mpcore_wdt_fire, 0, "mpcore_wdt", wdt);
if (ret) {
dev_printk(KERN_ERR, wdt->dev,
"cannot register IRQ%d for watchdog\n", wdt->irq);
diff --git a/drivers/watchdog/octeon-wdt-main.c b/drivers/watchdog/octeon-wdt-main.c
index 945ee830030..7c0d8630e64 100644
--- a/drivers/watchdog/octeon-wdt-main.c
+++ b/drivers/watchdog/octeon-wdt-main.c
@@ -402,7 +402,7 @@ static void octeon_wdt_setup_interrupt(int cpu)
irq = OCTEON_IRQ_WDOG0 + core;
if (request_irq(irq, octeon_wdt_poke_irq,
- IRQF_DISABLED, "octeon_wdt", octeon_wdt_poke_irq))
+ IRQF_NO_THREAD, "octeon_wdt", octeon_wdt_poke_irq))
panic("octeon_wdt: Couldn't obtain irq %d", irq);
cpumask_set_cpu(cpu, &irq_enabled_cpus);
diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c
index 30da88f47cd..5de7e4fa5b8 100644
--- a/drivers/watchdog/s3c2410_wdt.c
+++ b/drivers/watchdog/s3c2410_wdt.c
@@ -27,9 +27,8 @@
#include <linux/moduleparam.h>
#include <linux/types.h>
#include <linux/timer.h>
-#include <linux/miscdevice.h>
+#include <linux/miscdevice.h> /* for MODULE_ALIAS_MISCDEV */
#include <linux/watchdog.h>
-#include <linux/fs.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
@@ -38,6 +37,7 @@
#include <linux/io.h>
#include <linux/cpufreq.h>
#include <linux/slab.h>
+#include <linux/err.h>
#include <mach/map.h>
@@ -74,14 +74,12 @@ MODULE_PARM_DESC(soft_noboot, "Watchdog action, set to 1 to ignore reboots, "
"0 to reboot (default 0)");
MODULE_PARM_DESC(debug, "Watchdog debug, set to >1 for debug (default 0)");
-static unsigned long open_lock;
static struct device *wdt_dev; /* platform device attached to */
static struct resource *wdt_mem;
static struct resource *wdt_irq;
static struct clk *wdt_clock;
static void __iomem *wdt_base;
static unsigned int wdt_count;
-static char expect_close;
static DEFINE_SPINLOCK(wdt_lock);
/* watchdog control routines */
@@ -93,11 +91,13 @@ static DEFINE_SPINLOCK(wdt_lock);
/* functions */
-static void s3c2410wdt_keepalive(void)
+static int s3c2410wdt_keepalive(struct watchdog_device *wdd)
{
spin_lock(&wdt_lock);
writel(wdt_count, wdt_base + S3C2410_WTCNT);
spin_unlock(&wdt_lock);
+
+ return 0;
}
static void __s3c2410wdt_stop(void)
@@ -109,14 +109,16 @@ static void __s3c2410wdt_stop(void)
writel(wtcon, wdt_base + S3C2410_WTCON);
}
-static void s3c2410wdt_stop(void)
+static int s3c2410wdt_stop(struct watchdog_device *wdd)
{
spin_lock(&wdt_lock);
__s3c2410wdt_stop();
spin_unlock(&wdt_lock);
+
+ return 0;
}
-static void s3c2410wdt_start(void)
+static int s3c2410wdt_start(struct watchdog_device *wdd)
{
unsigned long wtcon;
@@ -142,6 +144,8 @@ static void s3c2410wdt_start(void)
writel(wdt_count, wdt_base + S3C2410_WTCNT);
writel(wtcon, wdt_base + S3C2410_WTCON);
spin_unlock(&wdt_lock);
+
+ return 0;
}
static inline int s3c2410wdt_is_running(void)
@@ -149,7 +153,7 @@ static inline int s3c2410wdt_is_running(void)
return readl(wdt_base + S3C2410_WTCON) & S3C2410_WTCON_ENABLE;
}
-static int s3c2410wdt_set_heartbeat(int timeout)
+static int s3c2410wdt_set_heartbeat(struct watchdog_device *wdd, unsigned timeout)
{
unsigned long freq = clk_get_rate(wdt_clock);
unsigned int count;
@@ -182,8 +186,6 @@ static int s3c2410wdt_set_heartbeat(int timeout)
}
}
- tmr_margin = timeout;
-
DBG("%s: timeout=%d, divisor=%d, count=%d (%08x)\n",
__func__, timeout, divisor, count, count/divisor);
@@ -201,70 +203,6 @@ static int s3c2410wdt_set_heartbeat(int timeout)
return 0;
}
-/*
- * /dev/watchdog handling
- */
-
-static int s3c2410wdt_open(struct inode *inode, struct file *file)
-{
- if (test_and_set_bit(0, &open_lock))
- return -EBUSY;
-
- if (nowayout)
- __module_get(THIS_MODULE);
-
- expect_close = 0;
-
- /* start the timer */
- s3c2410wdt_start();
- return nonseekable_open(inode, file);
-}
-
-static int s3c2410wdt_release(struct inode *inode, struct file *file)
-{
- /*
- * Shut off the timer.
- * Lock it in if it's a module and we set nowayout
- */
-
- if (expect_close == 42)
- s3c2410wdt_stop();
- else {
- dev_err(wdt_dev, "Unexpected close, not stopping watchdog\n");
- s3c2410wdt_keepalive();
- }
- expect_close = 0;
- clear_bit(0, &open_lock);
- return 0;
-}
-
-static ssize_t s3c2410wdt_write(struct file *file, const char __user *data,
- size_t len, loff_t *ppos)
-{
- /*
- * Refresh the timer.
- */
- if (len) {
- if (!nowayout) {
- size_t i;
-
- /* In case it was set long ago */
- expect_close = 0;
-
- for (i = 0; i != len; i++) {
- char c;
-
- if (get_user(c, data + i))
- return -EFAULT;
- if (c == 'V')
- expect_close = 42;
- }
- }
- s3c2410wdt_keepalive();
- }
- return len;
-}
-
#define OPTIONS (WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE)
static const struct watchdog_info s3c2410_wdt_ident = {
@@ -273,53 +211,17 @@ static const struct watchdog_info s3c2410_wdt_ident = {
.identity = "S3C2410 Watchdog",
};
-
-static long s3c2410wdt_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- void __user *argp = (void __user *)arg;
- int __user *p = argp;
- int new_margin;
-
- switch (cmd) {
- case WDIOC_GETSUPPORT:
- return copy_to_user(argp, &s3c2410_wdt_ident,
- sizeof(s3c2410_wdt_ident)) ? -EFAULT : 0;
- case WDIOC_GETSTATUS:
- case WDIOC_GETBOOTSTATUS:
- return put_user(0, p);
- case WDIOC_KEEPALIVE:
- s3c2410wdt_keepalive();
- return 0;
- case WDIOC_SETTIMEOUT:
- if (get_user(new_margin, p))
- return -EFAULT;
- if (s3c2410wdt_set_heartbeat(new_margin))
- return -EINVAL;
- s3c2410wdt_keepalive();
- return put_user(tmr_margin, p);
- case WDIOC_GETTIMEOUT:
- return put_user(tmr_margin, p);
- default:
- return -ENOTTY;
- }
-}
-
-/* kernel interface */
-
-static const struct file_operations s3c2410wdt_fops = {
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .write = s3c2410wdt_write,
- .unlocked_ioctl = s3c2410wdt_ioctl,
- .open = s3c2410wdt_open,
- .release = s3c2410wdt_release,
+static struct watchdog_ops s3c2410wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = s3c2410wdt_start,
+ .stop = s3c2410wdt_stop,
+ .ping = s3c2410wdt_keepalive,
+ .set_timeout = s3c2410wdt_set_heartbeat,
};
-static struct miscdevice s3c2410wdt_miscdev = {
- .minor = WATCHDOG_MINOR,
- .name = "watchdog",
- .fops = &s3c2410wdt_fops,
+static struct watchdog_device s3c2410_wdd = {
+ .info = &s3c2410_wdt_ident,
+ .ops = &s3c2410wdt_ops,
};
/* interrupt handler code */
@@ -328,7 +230,7 @@ static irqreturn_t s3c2410wdt_irq(int irqno, void *param)
{
dev_info(wdt_dev, "watchdog timer expired (irq)\n");
- s3c2410wdt_keepalive();
+ s3c2410wdt_keepalive(&s3c2410_wdd);
return IRQ_HANDLED;
}
@@ -349,14 +251,14 @@ static int s3c2410wdt_cpufreq_transition(struct notifier_block *nb,
* the watchdog is running.
*/
- s3c2410wdt_keepalive();
+ s3c2410wdt_keepalive(&s3c2410_wdd);
} else if (val == CPUFREQ_POSTCHANGE) {
- s3c2410wdt_stop();
+ s3c2410wdt_stop(&s3c2410_wdd);
- ret = s3c2410wdt_set_heartbeat(tmr_margin);
+ ret = s3c2410wdt_set_heartbeat(&s3c2410_wdd, s3c2410_wdd.timeout);
if (ret >= 0)
- s3c2410wdt_start();
+ s3c2410wdt_start(&s3c2410_wdd);
else
goto err;
}
@@ -365,7 +267,8 @@ done:
return 0;
err:
- dev_err(wdt_dev, "cannot set new value for timeout %d\n", tmr_margin);
+ dev_err(wdt_dev, "cannot set new value for timeout %d\n",
+ s3c2410_wdd.timeout);
return ret;
}
@@ -396,10 +299,6 @@ static inline void s3c2410wdt_cpufreq_deregister(void)
}
#endif
-
-
-/* device interface */
-
static int __devinit s3c2410wdt_probe(struct platform_device *pdev)
{
struct device *dev;
@@ -466,8 +365,8 @@ static int __devinit s3c2410wdt_probe(struct platform_device *pdev)
/* see if we can actually set the requested timer margin, and if
* not, try the default value */
- if (s3c2410wdt_set_heartbeat(tmr_margin)) {
- started = s3c2410wdt_set_heartbeat(
+ if (s3c2410wdt_set_heartbeat(&s3c2410_wdd, tmr_margin)) {
+ started = s3c2410wdt_set_heartbeat(&s3c2410_wdd,
CONFIG_S3C2410_WATCHDOG_DEFAULT_TIME);
if (started == 0)
@@ -479,22 +378,21 @@ static int __devinit s3c2410wdt_probe(struct platform_device *pdev)
"cannot start\n");
}
- ret = misc_register(&s3c2410wdt_miscdev);
+ ret = watchdog_register_device(&s3c2410_wdd);
if (ret) {
- dev_err(dev, "cannot register miscdev on minor=%d (%d)\n",
- WATCHDOG_MINOR, ret);
+ dev_err(dev, "cannot register watchdog (%d)\n", ret);
goto err_cpufreq;
}
if (tmr_atboot && started == 0) {
dev_info(dev, "starting watchdog timer\n");
- s3c2410wdt_start();
+ s3c2410wdt_start(&s3c2410_wdd);
} else if (!tmr_atboot) {
/* if we're not enabling the watchdog, then ensure it is
* disabled if it has been left running from the bootloader
* or other source */
- s3c2410wdt_stop();
+ s3c2410wdt_stop(&s3c2410_wdd);
}
/* print out a statement of readiness */
@@ -530,7 +428,7 @@ static int __devinit s3c2410wdt_probe(struct platform_device *pdev)
static int __devexit s3c2410wdt_remove(struct platform_device *dev)
{
- misc_deregister(&s3c2410wdt_miscdev);
+ watchdog_unregister_device(&s3c2410_wdd);
s3c2410wdt_cpufreq_deregister();
@@ -550,7 +448,7 @@ static int __devexit s3c2410wdt_remove(struct platform_device *dev)
static void s3c2410wdt_shutdown(struct platform_device *dev)
{
- s3c2410wdt_stop();
+ s3c2410wdt_stop(&s3c2410_wdd);
}
#ifdef CONFIG_PM
@@ -565,7 +463,7 @@ static int s3c2410wdt_suspend(struct platform_device *dev, pm_message_t state)
wtdat_save = readl(wdt_base + S3C2410_WTDAT);
/* Note that WTCNT doesn't need to be saved. */
- s3c2410wdt_stop();
+ s3c2410wdt_stop(&s3c2410_wdd);
return 0;
}
diff --git a/drivers/watchdog/sb_wdog.c b/drivers/watchdog/sb_wdog.c
index f31493e65b3..b01a30e5a66 100644
--- a/drivers/watchdog/sb_wdog.c
+++ b/drivers/watchdog/sb_wdog.c
@@ -300,7 +300,7 @@ static int __init sbwdog_init(void)
* get the resources
*/
- ret = request_irq(1, sbwdog_interrupt, IRQF_DISABLED | IRQF_SHARED,
+ ret = request_irq(1, sbwdog_interrupt, IRQF_SHARED,
ident.identity, (void *)user_dog);
if (ret) {
printk(KERN_ERR "%s: failed to request irq 1 - %d\n",
@@ -350,7 +350,7 @@ void platform_wd_setup(void)
{
int ret;
- ret = request_irq(1, sbwdog_interrupt, IRQF_DISABLED | IRQF_SHARED,
+ ret = request_irq(1, sbwdog_interrupt, IRQF_SHARED,
"Kernel Watchdog", IOADDR(A_SCD_WDOG_CFG_0));
if (ret) {
printk(KERN_CRIT
diff --git a/drivers/watchdog/sc520_wdt.c b/drivers/watchdog/sc520_wdt.c
index 52b63f2f0da..b2840409ebc 100644
--- a/drivers/watchdog/sc520_wdt.c
+++ b/drivers/watchdog/sc520_wdt.c
@@ -398,7 +398,7 @@ static int __init sc520_wdt_init(void)
WATCHDOG_TIMEOUT);
}
- wdtmrctl = ioremap((unsigned long)(MMCR_BASE + OFFS_WDTMRCTL), 2);
+ wdtmrctl = ioremap(MMCR_BASE + OFFS_WDTMRCTL, 2);
if (!wdtmrctl) {
printk(KERN_ERR PFX "Unable to remap memory\n");
rc = -ENOMEM;
diff --git a/drivers/watchdog/stmp3xxx_wdt.c b/drivers/watchdog/stmp3xxx_wdt.c
index b3421fd2cda..ac2346a452e 100644
--- a/drivers/watchdog/stmp3xxx_wdt.c
+++ b/drivers/watchdog/stmp3xxx_wdt.c
@@ -14,6 +14,7 @@
#include <linux/platform_device.h>
#include <linux/spinlock.h>
#include <linux/uaccess.h>
+#include <linux/module.h>
#include <mach/platform.h>
#include <mach/regs-rtc.h>
diff --git a/drivers/watchdog/w83627hf_wdt.c b/drivers/watchdog/w83627hf_wdt.c
index e5c91d4404e..dd5d6754875 100644
--- a/drivers/watchdog/w83627hf_wdt.c
+++ b/drivers/watchdog/w83627hf_wdt.c
@@ -142,7 +142,7 @@ static void w83627hf_init(void)
w83627hf_unselect_wd_register();
}
-static void wdt_ctrl(int timeout)
+static void wdt_set_time(int timeout)
{
spin_lock(&io_lock);
@@ -158,13 +158,13 @@ static void wdt_ctrl(int timeout)
static int wdt_ping(void)
{
- wdt_ctrl(timeout);
+ wdt_set_time(timeout);
return 0;
}
static int wdt_disable(void)
{
- wdt_ctrl(0);
+ wdt_set_time(0);
return 0;
}
@@ -176,6 +176,24 @@ static int wdt_set_heartbeat(int t)
return 0;
}
+static int wdt_get_time(void)
+{
+ int timeleft;
+
+ spin_lock(&io_lock);
+
+ w83627hf_select_wd_register();
+
+ outb_p(0xF6, WDT_EFER); /* Select CRF6 */
+ timeleft = inb_p(WDT_EFDR); /* Read Timeout counter to CRF6 */
+
+ w83627hf_unselect_wd_register();
+
+ spin_unlock(&io_lock);
+
+ return timeleft;
+}
+
static ssize_t wdt_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
@@ -202,7 +220,7 @@ static long wdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
void __user *argp = (void __user *)arg;
int __user *p = argp;
- int new_timeout;
+ int timeval;
static const struct watchdog_info ident = {
.options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT |
WDIOF_MAGICCLOSE,
@@ -238,14 +256,17 @@ static long wdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
wdt_ping();
break;
case WDIOC_SETTIMEOUT:
- if (get_user(new_timeout, p))
+ if (get_user(timeval, p))
return -EFAULT;
- if (wdt_set_heartbeat(new_timeout))
+ if (wdt_set_heartbeat(timeval))
return -EINVAL;
wdt_ping();
/* Fall */
case WDIOC_GETTIMEOUT:
return put_user(timeout, p);
+ case WDIOC_GETTIMELEFT:
+ timeval = wdt_get_time();
+ return put_user(timeval, p);
default:
return -ENOTTY;
}
diff --git a/drivers/watchdog/wdt.c b/drivers/watchdog/wdt.c
index bb03e151a1d..d2ef002be96 100644
--- a/drivers/watchdog/wdt.c
+++ b/drivers/watchdog/wdt.c
@@ -612,7 +612,7 @@ static int __init wdt_init(void)
goto out;
}
- ret = request_irq(irq, wdt_interrupt, IRQF_DISABLED, "wdt501p", NULL);
+ ret = request_irq(irq, wdt_interrupt, 0, "wdt501p", NULL);
if (ret) {
printk(KERN_ERR "wdt: IRQ %d is not free.\n", irq);
goto outreg;
diff --git a/drivers/watchdog/wdt_pci.c b/drivers/watchdog/wdt_pci.c
index 172dad6c769..e0fc3baa919 100644
--- a/drivers/watchdog/wdt_pci.c
+++ b/drivers/watchdog/wdt_pci.c
@@ -643,7 +643,7 @@ static int __devinit wdtpci_init_one(struct pci_dev *dev,
irq = dev->irq;
io = pci_resource_start(dev, 2);
- if (request_irq(irq, wdtpci_interrupt, IRQF_DISABLED | IRQF_SHARED,
+ if (request_irq(irq, wdtpci_interrupt, IRQF_SHARED,
"wdt_pci", &wdtpci_miscdev)) {
printk(KERN_ERR PFX "IRQ %d is not free\n", irq);
goto out_reg;
diff --git a/drivers/watchdog/wm831x_wdt.c b/drivers/watchdog/wm831x_wdt.c
index 871caea4e1c..7be38556aed 100644
--- a/drivers/watchdog/wm831x_wdt.c
+++ b/drivers/watchdog/wm831x_wdt.c
@@ -12,8 +12,7 @@
#include <linux/moduleparam.h>
#include <linux/types.h>
#include <linux/kernel.h>
-#include <linux/fs.h>
-#include <linux/miscdevice.h>
+#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/watchdog.h>
#include <linux/uaccess.h>
@@ -29,19 +28,19 @@ MODULE_PARM_DESC(nowayout,
"Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
-static unsigned long wm831x_wdt_users;
-static struct miscdevice wm831x_wdt_miscdev;
-static int wm831x_wdt_expect_close;
-static DEFINE_MUTEX(wdt_mutex);
-static struct wm831x *wm831x;
-static unsigned int update_gpio;
-static unsigned int update_state;
+struct wm831x_wdt_drvdata {
+ struct watchdog_device wdt;
+ struct wm831x *wm831x;
+ struct mutex lock;
+ int update_gpio;
+ int update_state;
+};
/* We can't use the sub-second values here but they're included
* for completeness. */
static struct {
- int time; /* Seconds */
- u16 val; /* WDOG_TO value */
+ unsigned int time; /* Seconds */
+ u16 val; /* WDOG_TO value */
} wm831x_wdt_cfgs[] = {
{ 1, 2 },
{ 2, 3 },
@@ -52,32 +51,13 @@ static struct {
{ 33, 7 }, /* Actually 32.768s so include both, others round down */
};
-static int wm831x_wdt_set_timeout(struct wm831x *wm831x, u16 value)
-{
- int ret;
-
- mutex_lock(&wdt_mutex);
-
- ret = wm831x_reg_unlock(wm831x);
- if (ret == 0) {
- ret = wm831x_set_bits(wm831x, WM831X_WATCHDOG,
- WM831X_WDOG_TO_MASK, value);
- wm831x_reg_lock(wm831x);
- } else {
- dev_err(wm831x->dev, "Failed to unlock security key: %d\n",
- ret);
- }
-
- mutex_unlock(&wdt_mutex);
-
- return ret;
-}
-
-static int wm831x_wdt_start(struct wm831x *wm831x)
+static int wm831x_wdt_start(struct watchdog_device *wdt_dev)
{
+ struct wm831x_wdt_drvdata *driver_data = watchdog_get_drvdata(wdt_dev);
+ struct wm831x *wm831x = driver_data->wm831x;
int ret;
- mutex_lock(&wdt_mutex);
+ mutex_lock(&driver_data->lock);
ret = wm831x_reg_unlock(wm831x);
if (ret == 0) {
@@ -89,16 +69,18 @@ static int wm831x_wdt_start(struct wm831x *wm831x)
ret);
}
- mutex_unlock(&wdt_mutex);
+ mutex_unlock(&driver_data->lock);
return ret;
}
-static int wm831x_wdt_stop(struct wm831x *wm831x)
+static int wm831x_wdt_stop(struct watchdog_device *wdt_dev)
{
+ struct wm831x_wdt_drvdata *driver_data = watchdog_get_drvdata(wdt_dev);
+ struct wm831x *wm831x = driver_data->wm831x;
int ret;
- mutex_lock(&wdt_mutex);
+ mutex_lock(&driver_data->lock);
ret = wm831x_reg_unlock(wm831x);
if (ret == 0) {
@@ -110,26 +92,28 @@ static int wm831x_wdt_stop(struct wm831x *wm831x)
ret);
}
- mutex_unlock(&wdt_mutex);
+ mutex_unlock(&driver_data->lock);
return ret;
}
-static int wm831x_wdt_kick(struct wm831x *wm831x)
+static int wm831x_wdt_ping(struct watchdog_device *wdt_dev)
{
+ struct wm831x_wdt_drvdata *driver_data = watchdog_get_drvdata(wdt_dev);
+ struct wm831x *wm831x = driver_data->wm831x;
int ret;
u16 reg;
- mutex_lock(&wdt_mutex);
+ mutex_lock(&driver_data->lock);
- if (update_gpio) {
- gpio_set_value_cansleep(update_gpio, update_state);
- update_state = !update_state;
+ if (driver_data->update_gpio) {
+ gpio_set_value_cansleep(driver_data->update_gpio,
+ driver_data->update_state);
+ driver_data->update_state = !driver_data->update_state;
ret = 0;
goto out;
}
-
reg = wm831x_reg_read(wm831x, WM831X_WATCHDOG);
if (!(reg & WM831X_WDOG_RST_SRC)) {
@@ -150,182 +134,59 @@ static int wm831x_wdt_kick(struct wm831x *wm831x)
}
out:
- mutex_unlock(&wdt_mutex);
+ mutex_unlock(&driver_data->lock);
return ret;
}
-static int wm831x_wdt_open(struct inode *inode, struct file *file)
+static int wm831x_wdt_set_timeout(struct watchdog_device *wdt_dev,
+ unsigned int timeout)
{
- int ret;
-
- if (!wm831x)
- return -ENODEV;
-
- if (test_and_set_bit(0, &wm831x_wdt_users))
- return -EBUSY;
+ struct wm831x_wdt_drvdata *driver_data = watchdog_get_drvdata(wdt_dev);
+ struct wm831x *wm831x = driver_data->wm831x;
+ int ret, i;
- ret = wm831x_wdt_start(wm831x);
- if (ret != 0)
- return ret;
-
- return nonseekable_open(inode, file);
-}
+ for (i = 0; i < ARRAY_SIZE(wm831x_wdt_cfgs); i++)
+ if (wm831x_wdt_cfgs[i].time == timeout)
+ break;
+ if (i == ARRAY_SIZE(wm831x_wdt_cfgs))
+ ret = -EINVAL;
-static int wm831x_wdt_release(struct inode *inode, struct file *file)
-{
- if (wm831x_wdt_expect_close)
- wm831x_wdt_stop(wm831x);
- else {
- dev_warn(wm831x->dev, "Watchdog device closed uncleanly\n");
- wm831x_wdt_kick(wm831x);
+ ret = wm831x_reg_unlock(wm831x);
+ if (ret == 0) {
+ ret = wm831x_set_bits(wm831x, WM831X_WATCHDOG,
+ WM831X_WDOG_TO_MASK,
+ wm831x_wdt_cfgs[i].val);
+ wm831x_reg_lock(wm831x);
+ } else {
+ dev_err(wm831x->dev, "Failed to unlock security key: %d\n",
+ ret);
}
- clear_bit(0, &wm831x_wdt_users);
-
- return 0;
-}
-
-static ssize_t wm831x_wdt_write(struct file *file,
- const char __user *data, size_t count,
- loff_t *ppos)
-{
- size_t i;
-
- if (count) {
- wm831x_wdt_kick(wm831x);
-
- if (!nowayout) {
- /* In case it was set long ago */
- wm831x_wdt_expect_close = 0;
-
- /* scan to see whether or not we got the magic
- character */
- for (i = 0; i != count; i++) {
- char c;
- if (get_user(c, data + i))
- return -EFAULT;
- if (c == 'V')
- wm831x_wdt_expect_close = 42;
- }
- }
- }
- return count;
+ return ret;
}
-static const struct watchdog_info ident = {
+static const struct watchdog_info wm831x_wdt_info = {
.options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE,
.identity = "WM831x Watchdog",
};
-static long wm831x_wdt_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- int ret = -ENOTTY, time, i;
- void __user *argp = (void __user *)arg;
- int __user *p = argp;
- u16 reg;
-
- switch (cmd) {
- case WDIOC_GETSUPPORT:
- ret = copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0;
- break;
-
- case WDIOC_GETSTATUS:
- case WDIOC_GETBOOTSTATUS:
- ret = put_user(0, p);
- break;
-
- case WDIOC_SETOPTIONS:
- {
- int options;
-
- if (get_user(options, p))
- return -EFAULT;
-
- ret = -EINVAL;
-
- /* Setting both simultaneously means at least one must fail */
- if (options == WDIOS_DISABLECARD)
- ret = wm831x_wdt_start(wm831x);
-
- if (options == WDIOS_ENABLECARD)
- ret = wm831x_wdt_stop(wm831x);
- break;
- }
-
- case WDIOC_KEEPALIVE:
- ret = wm831x_wdt_kick(wm831x);
- break;
-
- case WDIOC_SETTIMEOUT:
- ret = get_user(time, p);
- if (ret)
- break;
-
- if (time == 0) {
- if (nowayout)
- ret = -EINVAL;
- else
- wm831x_wdt_stop(wm831x);
- break;
- }
-
- for (i = 0; i < ARRAY_SIZE(wm831x_wdt_cfgs); i++)
- if (wm831x_wdt_cfgs[i].time == time)
- break;
- if (i == ARRAY_SIZE(wm831x_wdt_cfgs))
- ret = -EINVAL;
- else
- ret = wm831x_wdt_set_timeout(wm831x,
- wm831x_wdt_cfgs[i].val);
- break;
-
- case WDIOC_GETTIMEOUT:
- reg = wm831x_reg_read(wm831x, WM831X_WATCHDOG);
- reg &= WM831X_WDOG_TO_MASK;
- for (i = 0; i < ARRAY_SIZE(wm831x_wdt_cfgs); i++)
- if (wm831x_wdt_cfgs[i].val == reg)
- break;
- if (i == ARRAY_SIZE(wm831x_wdt_cfgs)) {
- dev_warn(wm831x->dev,
- "Unknown watchdog configuration: %x\n", reg);
- ret = -EINVAL;
- } else
- ret = put_user(wm831x_wdt_cfgs[i].time, p);
-
- }
-
- return ret;
-}
-
-static const struct file_operations wm831x_wdt_fops = {
+static const struct watchdog_ops wm831x_wdt_ops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
- .write = wm831x_wdt_write,
- .unlocked_ioctl = wm831x_wdt_ioctl,
- .open = wm831x_wdt_open,
- .release = wm831x_wdt_release,
-};
-
-static struct miscdevice wm831x_wdt_miscdev = {
- .minor = WATCHDOG_MINOR,
- .name = "watchdog",
- .fops = &wm831x_wdt_fops,
+ .start = wm831x_wdt_start,
+ .stop = wm831x_wdt_stop,
+ .ping = wm831x_wdt_ping,
+ .set_timeout = wm831x_wdt_set_timeout,
};
static int __devinit wm831x_wdt_probe(struct platform_device *pdev)
{
+ struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
struct wm831x_pdata *chip_pdata;
struct wm831x_watchdog_pdata *pdata;
- int reg, ret;
-
- if (wm831x) {
- dev_err(&pdev->dev, "wm831x watchdog already registered\n");
- return -EBUSY;
- }
-
- wm831x = dev_get_drvdata(pdev->dev.parent);
+ struct wm831x_wdt_drvdata *driver_data;
+ struct watchdog_device *wm831x_wdt;
+ int reg, ret, i;
ret = wm831x_reg_read(wm831x, WM831X_WATCHDOG);
if (ret < 0) {
@@ -338,6 +199,36 @@ static int __devinit wm831x_wdt_probe(struct platform_device *pdev)
if (reg & WM831X_WDOG_DEBUG)
dev_warn(wm831x->dev, "Watchdog is paused\n");
+ driver_data = kzalloc(sizeof(*driver_data), GFP_KERNEL);
+ if (!driver_data) {
+ dev_err(wm831x->dev, "Unable to alloacate watchdog device\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ mutex_init(&driver_data->lock);
+ driver_data->wm831x = wm831x;
+
+ wm831x_wdt = &driver_data->wdt;
+
+ wm831x_wdt->info = &wm831x_wdt_info;
+ wm831x_wdt->ops = &wm831x_wdt_ops;
+ watchdog_set_drvdata(wm831x_wdt, driver_data);
+
+ if (nowayout)
+ wm831x_wdt->status |= WDOG_NO_WAY_OUT;
+
+ reg = wm831x_reg_read(wm831x, WM831X_WATCHDOG);
+ reg &= WM831X_WDOG_TO_MASK;
+ for (i = 0; i < ARRAY_SIZE(wm831x_wdt_cfgs); i++)
+ if (wm831x_wdt_cfgs[i].val == reg)
+ break;
+ if (i == ARRAY_SIZE(wm831x_wdt_cfgs))
+ dev_warn(wm831x->dev,
+ "Unknown watchdog timeout: %x\n", reg);
+ else
+ wm831x_wdt->timeout = wm831x_wdt_cfgs[i].time;
+
/* Apply any configuration */
if (pdev->dev.parent->platform_data) {
chip_pdata = pdev->dev.parent->platform_data;
@@ -361,7 +252,7 @@ static int __devinit wm831x_wdt_probe(struct platform_device *pdev)
dev_err(wm831x->dev,
"Failed to request update GPIO: %d\n",
ret);
- goto err;
+ goto err_alloc;
}
ret = gpio_direction_output(pdata->update_gpio, 0);
@@ -372,7 +263,7 @@ static int __devinit wm831x_wdt_probe(struct platform_device *pdev)
goto err_gpio;
}
- update_gpio = pdata->update_gpio;
+ driver_data->update_gpio = pdata->update_gpio;
/* Make sure the watchdog takes hardware updates */
reg |= WM831X_WDOG_RST_SRC;
@@ -389,33 +280,34 @@ static int __devinit wm831x_wdt_probe(struct platform_device *pdev)
}
}
- wm831x_wdt_miscdev.parent = &pdev->dev;
-
- ret = misc_register(&wm831x_wdt_miscdev);
+ ret = watchdog_register_device(&driver_data->wdt);
if (ret != 0) {
- dev_err(wm831x->dev, "Failed to register miscdev: %d\n", ret);
+ dev_err(wm831x->dev, "watchdog_register_device() failed: %d\n",
+ ret);
goto err_gpio;
}
+ dev_set_drvdata(&pdev->dev, driver_data);
+
return 0;
err_gpio:
- if (update_gpio) {
- gpio_free(update_gpio);
- update_gpio = 0;
- }
+ if (driver_data->update_gpio)
+ gpio_free(driver_data->update_gpio);
+err_alloc:
+ kfree(driver_data);
err:
return ret;
}
static int __devexit wm831x_wdt_remove(struct platform_device *pdev)
{
- if (update_gpio) {
- gpio_free(update_gpio);
- update_gpio = 0;
- }
+ struct wm831x_wdt_drvdata *driver_data = dev_get_drvdata(&pdev->dev);
+
+ watchdog_unregister_device(&driver_data->wdt);
- misc_deregister(&wm831x_wdt_miscdev);
+ if (driver_data->update_gpio)
+ gpio_free(driver_data->update_gpio);
return 0;
}
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 5876e1ae6c2..a767884a6c7 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -39,6 +39,7 @@
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/errno.h>
+#include <linux/module.h>
#include <linux/mm.h>
#include <linux/bootmem.h>
#include <linux/pagemap.h>
@@ -93,8 +94,8 @@ static unsigned long frame_list[PAGE_SIZE / sizeof(unsigned long)];
#define inc_totalhigh_pages() (totalhigh_pages++)
#define dec_totalhigh_pages() (totalhigh_pages--)
#else
-#define inc_totalhigh_pages() do {} while(0)
-#define dec_totalhigh_pages() do {} while(0)
+#define inc_totalhigh_pages() do {} while (0)
+#define dec_totalhigh_pages() do {} while (0)
#endif
/* List of ballooned pages, threaded through the mem_map array. */
@@ -154,8 +155,7 @@ static struct page *balloon_retrieve(bool prefer_highmem)
if (PageHighMem(page)) {
balloon_stats.balloon_high--;
inc_totalhigh_pages();
- }
- else
+ } else
balloon_stats.balloon_low--;
totalram_pages++;
@@ -422,7 +422,7 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
(unsigned long)__va(pfn << PAGE_SHIFT),
__pte_ma(0), 0);
BUG_ON(ret);
- }
+ }
}
@@ -507,7 +507,7 @@ EXPORT_SYMBOL_GPL(balloon_set_new_target);
int alloc_xenballooned_pages(int nr_pages, struct page **pages, bool highmem)
{
int pgno = 0;
- struct page* page;
+ struct page *page;
mutex_lock(&balloon_mutex);
while (pgno < nr_pages) {
page = balloon_retrieve(highmem);
@@ -540,7 +540,7 @@ EXPORT_SYMBOL(alloc_xenballooned_pages);
* @nr_pages: Number of pages
* @pages: pages to return
*/
-void free_xenballooned_pages(int nr_pages, struct page** pages)
+void free_xenballooned_pages(int nr_pages, struct page **pages)
{
int i;
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 0eb8a57cc80..6e075cdd0c6 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -85,8 +85,7 @@ enum xen_irq_type {
* IPI - IPI vector
* EVTCHN -
*/
-struct irq_info
-{
+struct irq_info {
struct list_head list;
enum xen_irq_type type; /* type */
unsigned irq;
@@ -282,9 +281,9 @@ static inline unsigned long active_evtchns(unsigned int cpu,
struct shared_info *sh,
unsigned int idx)
{
- return (sh->evtchn_pending[idx] &
+ return sh->evtchn_pending[idx] &
per_cpu(cpu_evtchn_mask, cpu)[idx] &
- ~sh->evtchn_mask[idx]);
+ ~sh->evtchn_mask[idx];
}
static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
@@ -1180,7 +1179,7 @@ static void __xen_evtchn_do_upcall(void)
int cpu = get_cpu();
struct shared_info *s = HYPERVISOR_shared_info;
struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
- unsigned count;
+ unsigned count;
do {
unsigned long pending_words;
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 880798aae2f..39871326afa 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -193,9 +193,8 @@ static void gntdev_put_map(struct grant_map *map)
atomic_sub(map->count, &pages_mapped);
- if (map->notify.flags & UNMAP_NOTIFY_SEND_EVENT) {
+ if (map->notify.flags & UNMAP_NOTIFY_SEND_EVENT)
notify_remote_via_evtchn(map->notify.event);
- }
if (map->pages) {
if (!use_ptemod)
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 8c71ab80175..bf1c094f4eb 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -193,7 +193,7 @@ int gnttab_query_foreign_access(grant_ref_t ref)
nflags = shared[ref].flags;
- return (nflags & (GTF_reading|GTF_writing));
+ return nflags & (GTF_reading|GTF_writing);
}
EXPORT_SYMBOL_GPL(gnttab_query_foreign_access);
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index 0b5366b5be2..ce4fa083186 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -9,6 +9,7 @@
#include <linux/stop_machine.h>
#include <linux/freezer.h>
#include <linux/syscore_ops.h>
+#include <linux/export.h>
#include <xen/xen.h>
#include <xen/xenbus.h>
diff --git a/drivers/xen/pci.c b/drivers/xen/pci.c
index 66057075d6e..b84bf0b6cc3 100644
--- a/drivers/xen/pci.c
+++ b/drivers/xen/pci.c
@@ -116,7 +116,7 @@ static int xen_add_device(struct device *dev)
&manage_pci_ext);
} else {
struct physdev_manage_pci manage_pci = {
- .bus = pci_dev->bus->number,
+ .bus = pci_dev->bus->number,
.devfn = pci_dev->devfn,
};
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index c984768d98c..8e964b91c44 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -35,6 +35,7 @@
#include <linux/bootmem.h>
#include <linux/dma-mapping.h>
+#include <linux/export.h>
#include <xen/swiotlb-xen.h>
#include <xen/page.h>
#include <xen/xen-ops.h>
diff --git a/drivers/xen/xen-balloon.c b/drivers/xen/xen-balloon.c
index 5c9dc43c1e9..9cc2259c999 100644
--- a/drivers/xen/xen-balloon.c
+++ b/drivers/xen/xen-balloon.c
@@ -50,11 +50,6 @@ static struct sys_device balloon_sysdev;
static int register_balloon(struct sys_device *sysdev);
-static struct xenbus_watch target_watch =
-{
- .node = "memory/target"
-};
-
/* React to a change in the target key */
static void watch_target(struct xenbus_watch *watch,
const char **vec, unsigned int len)
@@ -73,6 +68,11 @@ static void watch_target(struct xenbus_watch *watch,
*/
balloon_set_new_target(new_target >> (PAGE_SHIFT - 10));
}
+static struct xenbus_watch target_watch = {
+ .node = "memory/target",
+ .callback = watch_target,
+};
+
static int balloon_init_watcher(struct notifier_block *notifier,
unsigned long event,
@@ -87,7 +87,9 @@ static int balloon_init_watcher(struct notifier_block *notifier,
return NOTIFY_DONE;
}
-static struct notifier_block xenstore_notifier;
+static struct notifier_block xenstore_notifier = {
+ .notifier_call = balloon_init_watcher,
+};
static int __init balloon_init(void)
{
@@ -100,9 +102,6 @@ static int __init balloon_init(void)
register_xen_selfballooning(&balloon_sysdev);
- target_watch.callback = watch_target;
- xenstore_notifier.notifier_call = balloon_init_watcher;
-
register_xenstore_notifier(&xenstore_notifier);
return 0;
diff --git a/drivers/xen/xen-pciback/conf_space.c b/drivers/xen/xen-pciback/conf_space.c
index 444345afbd5..52fed16d870 100644
--- a/drivers/xen/xen-pciback/conf_space.c
+++ b/drivers/xen/xen-pciback/conf_space.c
@@ -10,6 +10,7 @@
*/
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/pci.h>
#include "pciback.h"
#include "conf_space.h"
diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c
index cdacf923e07..81c3ce6b8bb 100644
--- a/drivers/xen/xenbus/xenbus_client.c
+++ b/drivers/xen/xenbus/xenbus_client.c
@@ -33,6 +33,7 @@
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/vmalloc.h>
+#include <linux/export.h>
#include <asm/xen/hypervisor.h>
#include <xen/interface/xen.h>
#include <xen/interface/event_channel.h>
@@ -443,7 +444,7 @@ int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void **vaddr)
*vaddr = NULL;
- area = xen_alloc_vm_area(PAGE_SIZE);
+ area = alloc_vm_area(PAGE_SIZE);
if (!area)
return -ENOMEM;
@@ -453,7 +454,7 @@ int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void **vaddr)
BUG();
if (op.status != GNTST_okay) {
- xen_free_vm_area(area);
+ free_vm_area(area);
xenbus_dev_fatal(dev, op.status,
"mapping in shared page %d from domain %d",
gnt_ref, dev->otherend_id);
@@ -552,7 +553,7 @@ int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
BUG();
if (op.status == GNTST_okay)
- xen_free_vm_area(area);
+ free_vm_area(area);
else
xenbus_dev_error(dev, op.status,
"unmapping page at handle %d error %d",
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
index cef9b0bf63d..1b178c6e893 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -46,6 +46,7 @@
#include <linux/mutex.h>
#include <linux/io.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include <asm/page.h>
#include <asm/pgtable.h>
@@ -309,8 +310,7 @@ void xenbus_unregister_driver(struct xenbus_driver *drv)
}
EXPORT_SYMBOL_GPL(xenbus_unregister_driver);
-struct xb_find_info
-{
+struct xb_find_info {
struct xenbus_device *dev;
const char *nodename;
};
@@ -639,7 +639,7 @@ int xenbus_dev_cancel(struct device *dev)
EXPORT_SYMBOL_GPL(xenbus_dev_cancel);
/* A flag to determine if xenstored is 'ready' (i.e. has started) */
-int xenstored_ready = 0;
+int xenstored_ready;
int register_xenstore_notifier(struct notifier_block *nb)
@@ -770,7 +770,7 @@ static int __init xenbus_init(void)
proc_mkdir("xen", NULL);
#endif
- out_error:
+out_error:
return err;
}
diff --git a/drivers/xen/xenbus/xenbus_probe.h b/drivers/xen/xenbus/xenbus_probe.h
index b814935378c..9b1de4e34c6 100644
--- a/drivers/xen/xenbus/xenbus_probe.h
+++ b/drivers/xen/xenbus/xenbus_probe.h
@@ -36,8 +36,7 @@
#define XEN_BUS_ID_SIZE 20
-struct xen_bus_type
-{
+struct xen_bus_type {
char *root;
unsigned int levels;
int (*get_bus_id)(char bus_id[XEN_BUS_ID_SIZE], const char *nodename);
diff --git a/drivers/xen/xenbus/xenbus_probe_backend.c b/drivers/xen/xenbus/xenbus_probe_backend.c
index 32417b5064f..c3c7cd195c1 100644
--- a/drivers/xen/xenbus/xenbus_probe_backend.c
+++ b/drivers/xen/xenbus/xenbus_probe_backend.c
@@ -42,6 +42,7 @@
#include <linux/fcntl.h>
#include <linux/mm.h>
#include <linux/notifier.h>
+#include <linux/export.h>
#include <asm/page.h>
#include <asm/pgtable.h>
diff --git a/drivers/xen/xenbus/xenbus_probe_frontend.c b/drivers/xen/xenbus/xenbus_probe_frontend.c
index 540587e18a9..2f73195512b 100644
--- a/drivers/xen/xenbus/xenbus_probe_frontend.c
+++ b/drivers/xen/xenbus/xenbus_probe_frontend.c
@@ -13,6 +13,7 @@
#include <linux/kthread.h>
#include <linux/mutex.h>
#include <linux/io.h>
+#include <linux/module.h>
#include <asm/page.h>
#include <asm/pgtable.h>
diff --git a/drivers/zorro/proc.c b/drivers/zorro/proc.c
index e0c84725d3e..988880dcee7 100644
--- a/drivers/zorro/proc.c
+++ b/drivers/zorro/proc.c
@@ -13,6 +13,7 @@
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/init.h>
+#include <linux/export.h>
#include <asm/uaccess.h>
#include <asm/amigahw.h>
#include <asm/setup.h>